trace-mcp 1.21.2 → 1.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -137
- package/README.md +9 -1
- package/dist/cli.js +1186 -649
- package/dist/cli.js.map +1 -1
- package/dist/index.js +724 -311
- package/dist/index.js.map +1 -1
- package/package.json +2 -2
package/dist/cli.js
CHANGED
|
@@ -11,10 +11,10 @@ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require
|
|
|
11
11
|
if (typeof require !== "undefined") return require.apply(this, arguments);
|
|
12
12
|
throw Error('Dynamic require of "' + x + '" is not supported');
|
|
13
13
|
});
|
|
14
|
-
var __glob = (map) => (
|
|
15
|
-
var fn2 = map[
|
|
14
|
+
var __glob = (map) => (path134) => {
|
|
15
|
+
var fn2 = map[path134];
|
|
16
16
|
if (fn2) return fn2();
|
|
17
|
-
throw new Error("Module not found in bundle: " +
|
|
17
|
+
throw new Error("Module not found in bundle: " + path134);
|
|
18
18
|
};
|
|
19
19
|
var __esm = (fn2, res) => function __init() {
|
|
20
20
|
return fn2 && (res = (0, fn2[__getOwnPropNames(fn2)[0]])(fn2 = 0)), res;
|
|
@@ -1197,14 +1197,14 @@ var init_file_repository = __esm({
|
|
|
1197
1197
|
}
|
|
1198
1198
|
db;
|
|
1199
1199
|
_stmts;
|
|
1200
|
-
insertFile(
|
|
1201
|
-
const result = this._stmts.insertFile.run(
|
|
1200
|
+
insertFile(path134, language, contentHash, byteLength, workspace, mtimeMs, createNode) {
|
|
1201
|
+
const result = this._stmts.insertFile.run(path134, language, contentHash, byteLength, workspace, mtimeMs);
|
|
1202
1202
|
const fileId = Number(result.lastInsertRowid);
|
|
1203
1203
|
createNode("file", fileId);
|
|
1204
1204
|
return fileId;
|
|
1205
1205
|
}
|
|
1206
|
-
getFile(
|
|
1207
|
-
return this._stmts.getFile.get(
|
|
1206
|
+
getFile(path134) {
|
|
1207
|
+
return this._stmts.getFile.get(path134);
|
|
1208
1208
|
}
|
|
1209
1209
|
getFileById(id) {
|
|
1210
1210
|
return this._stmts.getFileById.get(id);
|
|
@@ -2143,9 +2143,9 @@ var init_store = __esm({
|
|
|
2143
2143
|
domain;
|
|
2144
2144
|
analytics;
|
|
2145
2145
|
// --- Files (delegates to FileRepository) ---
|
|
2146
|
-
insertFile(
|
|
2146
|
+
insertFile(path134, language, contentHash, byteLength, workspace, mtimeMs) {
|
|
2147
2147
|
return this.files.insertFile(
|
|
2148
|
-
|
|
2148
|
+
path134,
|
|
2149
2149
|
language,
|
|
2150
2150
|
contentHash,
|
|
2151
2151
|
byteLength,
|
|
@@ -2154,8 +2154,8 @@ var init_store = __esm({
|
|
|
2154
2154
|
(nodeType, refId) => this.graph.createNode(nodeType, refId)
|
|
2155
2155
|
);
|
|
2156
2156
|
}
|
|
2157
|
-
getFile(
|
|
2158
|
-
return this.files.getFile(
|
|
2157
|
+
getFile(path134) {
|
|
2158
|
+
return this.files.getFile(path134);
|
|
2159
2159
|
}
|
|
2160
2160
|
getFileById(id) {
|
|
2161
2161
|
return this.files.getFileById(id);
|
|
@@ -27818,8 +27818,8 @@ var init_rails = __esm({
|
|
|
27818
27818
|
|
|
27819
27819
|
// src/indexer/plugins/integration/framework/spring/index.ts
|
|
27820
27820
|
import { ok as ok42 } from "neverthrow";
|
|
27821
|
-
function normalizePath(
|
|
27822
|
-
return "/" +
|
|
27821
|
+
function normalizePath(path134) {
|
|
27822
|
+
return "/" + path134.replace(/\/+/g, "/").replace(/^\/|\/$/g, "");
|
|
27823
27823
|
}
|
|
27824
27824
|
var SpringPlugin;
|
|
27825
27825
|
var init_spring = __esm({
|
|
@@ -27886,8 +27886,8 @@ var init_spring = __esm({
|
|
|
27886
27886
|
const re = new RegExp(`@${annotation}\\s*(?:\\(\\s*(?:value\\s*=\\s*)?["']([^"']*)["']\\s*\\))?`, "g");
|
|
27887
27887
|
let m;
|
|
27888
27888
|
while ((m = re.exec(source)) !== null) {
|
|
27889
|
-
const
|
|
27890
|
-
const uri = normalizePath(classPrefix + "/" +
|
|
27889
|
+
const path134 = m[1] ?? "";
|
|
27890
|
+
const uri = normalizePath(classPrefix + "/" + path134);
|
|
27891
27891
|
result.routes.push({ method, uri, line: source.substring(0, m.index).split("\n").length });
|
|
27892
27892
|
}
|
|
27893
27893
|
}
|
|
@@ -32660,8 +32660,8 @@ function extractExpoNavigationCalls(source) {
|
|
|
32660
32660
|
}
|
|
32661
32661
|
const templateRegex = /router\.(push|replace|navigate)\s*\(\s*`([^`]+)`/g;
|
|
32662
32662
|
while ((match = templateRegex.exec(source)) !== null) {
|
|
32663
|
-
const
|
|
32664
|
-
paths.push(
|
|
32663
|
+
const path134 = match[2].replace(/\$\{[^}]+\}/g, ":param");
|
|
32664
|
+
paths.push(path134);
|
|
32665
32665
|
}
|
|
32666
32666
|
const linkRegex = /<Link\s+[^>]*href\s*=\s*(?:\{?\s*)?['"]([^'"]+)['"]/g;
|
|
32667
32667
|
while ((match = linkRegex.exec(source)) !== null) {
|
|
@@ -32673,9 +32673,9 @@ function extractExpoNavigationCalls(source) {
|
|
|
32673
32673
|
}
|
|
32674
32674
|
return [...new Set(paths)];
|
|
32675
32675
|
}
|
|
32676
|
-
function matchExpoRoute(
|
|
32677
|
-
if (
|
|
32678
|
-
const pathParts =
|
|
32676
|
+
function matchExpoRoute(path134, routePattern) {
|
|
32677
|
+
if (path134 === routePattern) return true;
|
|
32678
|
+
const pathParts = path134.split("/").filter(Boolean);
|
|
32679
32679
|
const routeParts = routePattern.split("/").filter(Boolean);
|
|
32680
32680
|
if (pathParts.length !== routeParts.length) {
|
|
32681
32681
|
if (routeParts[routeParts.length - 1] === "*" && pathParts.length >= routeParts.length - 1) {
|
|
@@ -46089,9 +46089,9 @@ var init_client = __esm({
|
|
|
46089
46089
|
this.proc.stderr.on("data", (chunk2) => {
|
|
46090
46090
|
logger.debug({ lsp: this.command, stderr: chunk2.toString().trim() }, "LSP stderr");
|
|
46091
46091
|
});
|
|
46092
|
-
this.proc.on("error", (
|
|
46093
|
-
logger.warn({ lsp: this.command, error:
|
|
46094
|
-
this.rejectAll(new Error(`LSP process error: ${
|
|
46092
|
+
this.proc.on("error", (err49) => {
|
|
46093
|
+
logger.warn({ lsp: this.command, error: err49.message }, "LSP process error");
|
|
46094
|
+
this.rejectAll(new Error(`LSP process error: ${err49.message}`));
|
|
46095
46095
|
});
|
|
46096
46096
|
this.proc.on("exit", (code, signal) => {
|
|
46097
46097
|
logger.debug({ lsp: this.command, code, signal }, "LSP process exited");
|
|
@@ -47264,9 +47264,9 @@ var require_backend_impl = __commonJS({
|
|
|
47264
47264
|
if (!backend) {
|
|
47265
47265
|
throw new Error(`no available backend found. ERR: ${errors.map((e) => `[${e.name}] ${e.err}`).join(", ")}`);
|
|
47266
47266
|
}
|
|
47267
|
-
for (const { name, err:
|
|
47267
|
+
for (const { name, err: err49 } of errors) {
|
|
47268
47268
|
if (backendHints.includes(name)) {
|
|
47269
|
-
console.warn(`removing requested execution provider "${name}" from session options because it is not available: ${
|
|
47269
|
+
console.warn(`removing requested execution provider "${name}" from session options because it is not available: ${err49}`);
|
|
47270
47270
|
}
|
|
47271
47271
|
}
|
|
47272
47272
|
const filteredEps = eps.filter((i) => availableBackendNames.has(typeof i === "string" ? i : i.name));
|
|
@@ -49752,27 +49752,27 @@ var require_process = __commonJS({
|
|
|
49752
49752
|
var require_filesystem = __commonJS({
|
|
49753
49753
|
"node_modules/detect-libc/lib/filesystem.js"(exports, module) {
|
|
49754
49754
|
"use strict";
|
|
49755
|
-
var
|
|
49755
|
+
var fs123 = __require("fs");
|
|
49756
49756
|
var LDD_PATH = "/usr/bin/ldd";
|
|
49757
49757
|
var SELF_PATH = "/proc/self/exe";
|
|
49758
49758
|
var MAX_LENGTH = 2048;
|
|
49759
|
-
var
|
|
49760
|
-
const fd =
|
|
49759
|
+
var readFileSync12 = (path134) => {
|
|
49760
|
+
const fd = fs123.openSync(path134, "r");
|
|
49761
49761
|
const buffer = Buffer.alloc(MAX_LENGTH);
|
|
49762
|
-
const bytesRead =
|
|
49763
|
-
|
|
49762
|
+
const bytesRead = fs123.readSync(fd, buffer, 0, MAX_LENGTH, 0);
|
|
49763
|
+
fs123.close(fd, () => {
|
|
49764
49764
|
});
|
|
49765
49765
|
return buffer.subarray(0, bytesRead);
|
|
49766
49766
|
};
|
|
49767
|
-
var readFile = (
|
|
49768
|
-
|
|
49769
|
-
if (
|
|
49770
|
-
reject(
|
|
49767
|
+
var readFile = (path134) => new Promise((resolve4, reject) => {
|
|
49768
|
+
fs123.open(path134, "r", (err49, fd) => {
|
|
49769
|
+
if (err49) {
|
|
49770
|
+
reject(err49);
|
|
49771
49771
|
} else {
|
|
49772
49772
|
const buffer = Buffer.alloc(MAX_LENGTH);
|
|
49773
|
-
|
|
49773
|
+
fs123.read(fd, buffer, 0, MAX_LENGTH, 0, (_, bytesRead) => {
|
|
49774
49774
|
resolve4(buffer.subarray(0, bytesRead));
|
|
49775
|
-
|
|
49775
|
+
fs123.close(fd, () => {
|
|
49776
49776
|
});
|
|
49777
49777
|
});
|
|
49778
49778
|
}
|
|
@@ -49781,7 +49781,7 @@ var require_filesystem = __commonJS({
|
|
|
49781
49781
|
module.exports = {
|
|
49782
49782
|
LDD_PATH,
|
|
49783
49783
|
SELF_PATH,
|
|
49784
|
-
readFileSync:
|
|
49784
|
+
readFileSync: readFileSync12,
|
|
49785
49785
|
readFile
|
|
49786
49786
|
};
|
|
49787
49787
|
}
|
|
@@ -49830,7 +49830,7 @@ var require_detect_libc = __commonJS({
|
|
|
49830
49830
|
"use strict";
|
|
49831
49831
|
var childProcess = __require("child_process");
|
|
49832
49832
|
var { isLinux, getReport } = require_process();
|
|
49833
|
-
var { LDD_PATH, SELF_PATH, readFile, readFileSync:
|
|
49833
|
+
var { LDD_PATH, SELF_PATH, readFile, readFileSync: readFileSync12 } = require_filesystem();
|
|
49834
49834
|
var { interpreterPath } = require_elf();
|
|
49835
49835
|
var cachedFamilyInterpreter;
|
|
49836
49836
|
var cachedFamilyFilesystem;
|
|
@@ -49840,8 +49840,8 @@ var require_detect_libc = __commonJS({
|
|
|
49840
49840
|
var safeCommand = () => {
|
|
49841
49841
|
if (!commandOut) {
|
|
49842
49842
|
return new Promise((resolve4) => {
|
|
49843
|
-
childProcess.exec(command, (
|
|
49844
|
-
commandOut =
|
|
49843
|
+
childProcess.exec(command, (err49, out) => {
|
|
49844
|
+
commandOut = err49 ? " " : out;
|
|
49845
49845
|
resolve4(commandOut);
|
|
49846
49846
|
});
|
|
49847
49847
|
});
|
|
@@ -49884,11 +49884,11 @@ var require_detect_libc = __commonJS({
|
|
|
49884
49884
|
}
|
|
49885
49885
|
return null;
|
|
49886
49886
|
};
|
|
49887
|
-
var familyFromInterpreterPath = (
|
|
49888
|
-
if (
|
|
49889
|
-
if (
|
|
49887
|
+
var familyFromInterpreterPath = (path134) => {
|
|
49888
|
+
if (path134) {
|
|
49889
|
+
if (path134.includes("/ld-musl-")) {
|
|
49890
49890
|
return MUSL;
|
|
49891
|
-
} else if (
|
|
49891
|
+
} else if (path134.includes("/ld-linux-")) {
|
|
49892
49892
|
return GLIBC;
|
|
49893
49893
|
}
|
|
49894
49894
|
}
|
|
@@ -49922,7 +49922,7 @@ var require_detect_libc = __commonJS({
|
|
|
49922
49922
|
}
|
|
49923
49923
|
cachedFamilyFilesystem = null;
|
|
49924
49924
|
try {
|
|
49925
|
-
const lddContent =
|
|
49925
|
+
const lddContent = readFileSync12(LDD_PATH);
|
|
49926
49926
|
cachedFamilyFilesystem = getFamilyFromLddContent(lddContent);
|
|
49927
49927
|
} catch (e) {
|
|
49928
49928
|
}
|
|
@@ -49935,8 +49935,8 @@ var require_detect_libc = __commonJS({
|
|
|
49935
49935
|
cachedFamilyInterpreter = null;
|
|
49936
49936
|
try {
|
|
49937
49937
|
const selfContent = await readFile(SELF_PATH);
|
|
49938
|
-
const
|
|
49939
|
-
cachedFamilyInterpreter = familyFromInterpreterPath(
|
|
49938
|
+
const path134 = interpreterPath(selfContent);
|
|
49939
|
+
cachedFamilyInterpreter = familyFromInterpreterPath(path134);
|
|
49940
49940
|
} catch (e) {
|
|
49941
49941
|
}
|
|
49942
49942
|
return cachedFamilyInterpreter;
|
|
@@ -49947,9 +49947,9 @@ var require_detect_libc = __commonJS({
|
|
|
49947
49947
|
}
|
|
49948
49948
|
cachedFamilyInterpreter = null;
|
|
49949
49949
|
try {
|
|
49950
|
-
const selfContent =
|
|
49951
|
-
const
|
|
49952
|
-
cachedFamilyInterpreter = familyFromInterpreterPath(
|
|
49950
|
+
const selfContent = readFileSync12(SELF_PATH);
|
|
49951
|
+
const path134 = interpreterPath(selfContent);
|
|
49952
|
+
cachedFamilyInterpreter = familyFromInterpreterPath(path134);
|
|
49953
49953
|
} catch (e) {
|
|
49954
49954
|
}
|
|
49955
49955
|
return cachedFamilyInterpreter;
|
|
@@ -50011,7 +50011,7 @@ var require_detect_libc = __commonJS({
|
|
|
50011
50011
|
}
|
|
50012
50012
|
cachedVersionFilesystem = null;
|
|
50013
50013
|
try {
|
|
50014
|
-
const lddContent =
|
|
50014
|
+
const lddContent = readFileSync12(LDD_PATH);
|
|
50015
50015
|
const versionMatch = lddContent.match(RE_GLIBC_VERSION);
|
|
50016
50016
|
if (versionMatch) {
|
|
50017
50017
|
cachedVersionFilesystem = versionMatch[1];
|
|
@@ -51668,21 +51668,21 @@ var require_sharp = __commonJS({
|
|
|
51668
51668
|
`@img/sharp-${runtimePlatform}/sharp.node`,
|
|
51669
51669
|
"@img/sharp-wasm32/sharp.node"
|
|
51670
51670
|
];
|
|
51671
|
-
var
|
|
51671
|
+
var path134;
|
|
51672
51672
|
var sharp2;
|
|
51673
51673
|
var errors = [];
|
|
51674
|
-
for (
|
|
51674
|
+
for (path134 of paths) {
|
|
51675
51675
|
try {
|
|
51676
|
-
sharp2 = __require(
|
|
51676
|
+
sharp2 = __require(path134);
|
|
51677
51677
|
break;
|
|
51678
|
-
} catch (
|
|
51679
|
-
errors.push(
|
|
51678
|
+
} catch (err49) {
|
|
51679
|
+
errors.push(err49);
|
|
51680
51680
|
}
|
|
51681
51681
|
}
|
|
51682
|
-
if (sharp2 &&
|
|
51683
|
-
const
|
|
51684
|
-
|
|
51685
|
-
errors.push(
|
|
51682
|
+
if (sharp2 && path134.startsWith("@img/sharp-linux-x64") && !sharp2._isUsingX64V2()) {
|
|
51683
|
+
const err49 = new Error("Prebuilt binaries for linux-x64 require v2 microarchitecture");
|
|
51684
|
+
err49.code = "Unsupported CPU";
|
|
51685
|
+
errors.push(err49);
|
|
51686
51686
|
sharp2 = null;
|
|
51687
51687
|
}
|
|
51688
51688
|
if (sharp2) {
|
|
@@ -51690,12 +51690,12 @@ var require_sharp = __commonJS({
|
|
|
51690
51690
|
} else {
|
|
51691
51691
|
const [isLinux, isMacOs, isWindows] = ["linux", "darwin", "win32"].map((os16) => runtimePlatform.startsWith(os16));
|
|
51692
51692
|
const help = [`Could not load the "sharp" module using the ${runtimePlatform} runtime`];
|
|
51693
|
-
errors.forEach((
|
|
51694
|
-
if (
|
|
51695
|
-
help.push(`${
|
|
51693
|
+
errors.forEach((err49) => {
|
|
51694
|
+
if (err49.code !== "MODULE_NOT_FOUND") {
|
|
51695
|
+
help.push(`${err49.code}: ${err49.message}`);
|
|
51696
51696
|
}
|
|
51697
51697
|
});
|
|
51698
|
-
const messages = errors.map((
|
|
51698
|
+
const messages = errors.map((err49) => err49.message).join(" ");
|
|
51699
51699
|
help.push("Possible solutions:");
|
|
51700
51700
|
if (isUnsupportedNodeRuntime()) {
|
|
51701
51701
|
const { found, expected } = isUnsupportedNodeRuntime();
|
|
@@ -51748,7 +51748,7 @@ var require_sharp = __commonJS({
|
|
|
51748
51748
|
" brew update && brew upgrade vips"
|
|
51749
51749
|
);
|
|
51750
51750
|
}
|
|
51751
|
-
if (errors.some((
|
|
51751
|
+
if (errors.some((err49) => err49.code === "ERR_DLOPEN_DISABLED")) {
|
|
51752
51752
|
help.push("- Run Node.js without using the --no-addons flag");
|
|
51753
51753
|
}
|
|
51754
51754
|
if (isWindows && /The specified procedure could not be found/.test(messages)) {
|
|
@@ -52502,18 +52502,18 @@ var require_input = __commonJS({
|
|
|
52502
52502
|
if (this._isStreamInput()) {
|
|
52503
52503
|
this.on("finish", () => {
|
|
52504
52504
|
this._flattenBufferIn();
|
|
52505
|
-
sharp2.metadata(this.options, (
|
|
52506
|
-
if (
|
|
52507
|
-
callback(is2.nativeError(
|
|
52505
|
+
sharp2.metadata(this.options, (err49, metadata2) => {
|
|
52506
|
+
if (err49) {
|
|
52507
|
+
callback(is2.nativeError(err49, stack2));
|
|
52508
52508
|
} else {
|
|
52509
52509
|
callback(null, metadata2);
|
|
52510
52510
|
}
|
|
52511
52511
|
});
|
|
52512
52512
|
});
|
|
52513
52513
|
} else {
|
|
52514
|
-
sharp2.metadata(this.options, (
|
|
52515
|
-
if (
|
|
52516
|
-
callback(is2.nativeError(
|
|
52514
|
+
sharp2.metadata(this.options, (err49, metadata2) => {
|
|
52515
|
+
if (err49) {
|
|
52516
|
+
callback(is2.nativeError(err49, stack2));
|
|
52517
52517
|
} else {
|
|
52518
52518
|
callback(null, metadata2);
|
|
52519
52519
|
}
|
|
@@ -52525,9 +52525,9 @@ var require_input = __commonJS({
|
|
|
52525
52525
|
return new Promise((resolve4, reject) => {
|
|
52526
52526
|
const finished = () => {
|
|
52527
52527
|
this._flattenBufferIn();
|
|
52528
|
-
sharp2.metadata(this.options, (
|
|
52529
|
-
if (
|
|
52530
|
-
reject(is2.nativeError(
|
|
52528
|
+
sharp2.metadata(this.options, (err49, metadata2) => {
|
|
52529
|
+
if (err49) {
|
|
52530
|
+
reject(is2.nativeError(err49, stack2));
|
|
52531
52531
|
} else {
|
|
52532
52532
|
resolve4(metadata2);
|
|
52533
52533
|
}
|
|
@@ -52541,9 +52541,9 @@ var require_input = __commonJS({
|
|
|
52541
52541
|
});
|
|
52542
52542
|
} else {
|
|
52543
52543
|
return new Promise((resolve4, reject) => {
|
|
52544
|
-
sharp2.metadata(this.options, (
|
|
52545
|
-
if (
|
|
52546
|
-
reject(is2.nativeError(
|
|
52544
|
+
sharp2.metadata(this.options, (err49, metadata2) => {
|
|
52545
|
+
if (err49) {
|
|
52546
|
+
reject(is2.nativeError(err49, stack2));
|
|
52547
52547
|
} else {
|
|
52548
52548
|
resolve4(metadata2);
|
|
52549
52549
|
}
|
|
@@ -52558,18 +52558,18 @@ var require_input = __commonJS({
|
|
|
52558
52558
|
if (this._isStreamInput()) {
|
|
52559
52559
|
this.on("finish", () => {
|
|
52560
52560
|
this._flattenBufferIn();
|
|
52561
|
-
sharp2.stats(this.options, (
|
|
52562
|
-
if (
|
|
52563
|
-
callback(is2.nativeError(
|
|
52561
|
+
sharp2.stats(this.options, (err49, stats2) => {
|
|
52562
|
+
if (err49) {
|
|
52563
|
+
callback(is2.nativeError(err49, stack2));
|
|
52564
52564
|
} else {
|
|
52565
52565
|
callback(null, stats2);
|
|
52566
52566
|
}
|
|
52567
52567
|
});
|
|
52568
52568
|
});
|
|
52569
52569
|
} else {
|
|
52570
|
-
sharp2.stats(this.options, (
|
|
52571
|
-
if (
|
|
52572
|
-
callback(is2.nativeError(
|
|
52570
|
+
sharp2.stats(this.options, (err49, stats2) => {
|
|
52571
|
+
if (err49) {
|
|
52572
|
+
callback(is2.nativeError(err49, stack2));
|
|
52573
52573
|
} else {
|
|
52574
52574
|
callback(null, stats2);
|
|
52575
52575
|
}
|
|
@@ -52581,9 +52581,9 @@ var require_input = __commonJS({
|
|
|
52581
52581
|
return new Promise((resolve4, reject) => {
|
|
52582
52582
|
this.on("finish", function() {
|
|
52583
52583
|
this._flattenBufferIn();
|
|
52584
|
-
sharp2.stats(this.options, (
|
|
52585
|
-
if (
|
|
52586
|
-
reject(is2.nativeError(
|
|
52584
|
+
sharp2.stats(this.options, (err49, stats2) => {
|
|
52585
|
+
if (err49) {
|
|
52586
|
+
reject(is2.nativeError(err49, stack2));
|
|
52587
52587
|
} else {
|
|
52588
52588
|
resolve4(stats2);
|
|
52589
52589
|
}
|
|
@@ -52592,9 +52592,9 @@ var require_input = __commonJS({
|
|
|
52592
52592
|
});
|
|
52593
52593
|
} else {
|
|
52594
52594
|
return new Promise((resolve4, reject) => {
|
|
52595
|
-
sharp2.stats(this.options, (
|
|
52596
|
-
if (
|
|
52597
|
-
reject(is2.nativeError(
|
|
52595
|
+
sharp2.stats(this.options, (err49, stats2) => {
|
|
52596
|
+
if (err49) {
|
|
52597
|
+
reject(is2.nativeError(err49, stack2));
|
|
52598
52598
|
} else {
|
|
52599
52599
|
resolve4(stats2);
|
|
52600
52600
|
}
|
|
@@ -54590,15 +54590,15 @@ var require_color = __commonJS({
|
|
|
54590
54590
|
};
|
|
54591
54591
|
}
|
|
54592
54592
|
function wrapConversion(toModel, graph) {
|
|
54593
|
-
const
|
|
54593
|
+
const path134 = [graph[toModel].parent, toModel];
|
|
54594
54594
|
let fn2 = conversions_default[graph[toModel].parent][toModel];
|
|
54595
54595
|
let cur = graph[toModel].parent;
|
|
54596
54596
|
while (graph[cur].parent) {
|
|
54597
|
-
|
|
54597
|
+
path134.unshift(graph[cur].parent);
|
|
54598
54598
|
fn2 = link(conversions_default[graph[cur].parent][cur], fn2);
|
|
54599
54599
|
cur = graph[cur].parent;
|
|
54600
54600
|
}
|
|
54601
|
-
fn2.conversion =
|
|
54601
|
+
fn2.conversion = path134;
|
|
54602
54602
|
return fn2;
|
|
54603
54603
|
}
|
|
54604
54604
|
function route(fromModel) {
|
|
@@ -55215,7 +55215,7 @@ var require_channel = __commonJS({
|
|
|
55215
55215
|
var require_output = __commonJS({
|
|
55216
55216
|
"node_modules/sharp/lib/output.js"(exports, module) {
|
|
55217
55217
|
"use strict";
|
|
55218
|
-
var
|
|
55218
|
+
var path134 = __require("path");
|
|
55219
55219
|
var is2 = require_is();
|
|
55220
55220
|
var sharp2 = require_sharp();
|
|
55221
55221
|
var formats = /* @__PURE__ */ new Map([
|
|
@@ -55243,19 +55243,19 @@ var require_output = __commonJS({
|
|
|
55243
55243
|
var errJp2Save = () => new Error("JP2 output requires libvips with support for OpenJPEG");
|
|
55244
55244
|
var bitdepthFromColourCount = (colours) => 1 << 31 - Math.clz32(Math.ceil(Math.log2(colours)));
|
|
55245
55245
|
function toFile(fileOut, callback) {
|
|
55246
|
-
let
|
|
55246
|
+
let err49;
|
|
55247
55247
|
if (!is2.string(fileOut)) {
|
|
55248
|
-
|
|
55249
|
-
} else if (is2.string(this.options.input.file) &&
|
|
55250
|
-
|
|
55251
|
-
} else if (jp2Regex.test(
|
|
55252
|
-
|
|
55248
|
+
err49 = new Error("Missing output file path");
|
|
55249
|
+
} else if (is2.string(this.options.input.file) && path134.resolve(this.options.input.file) === path134.resolve(fileOut)) {
|
|
55250
|
+
err49 = new Error("Cannot use same file for input and output");
|
|
55251
|
+
} else if (jp2Regex.test(path134.extname(fileOut)) && !this.constructor.format.jp2k.output.file) {
|
|
55252
|
+
err49 = errJp2Save();
|
|
55253
55253
|
}
|
|
55254
|
-
if (
|
|
55254
|
+
if (err49) {
|
|
55255
55255
|
if (is2.fn(callback)) {
|
|
55256
|
-
callback(
|
|
55256
|
+
callback(err49);
|
|
55257
55257
|
} else {
|
|
55258
|
-
return Promise.reject(
|
|
55258
|
+
return Promise.reject(err49);
|
|
55259
55259
|
}
|
|
55260
55260
|
} else {
|
|
55261
55261
|
this.options.fileOut = fileOut;
|
|
@@ -55969,18 +55969,18 @@ var require_output = __commonJS({
|
|
|
55969
55969
|
if (this._isStreamInput()) {
|
|
55970
55970
|
this.on("finish", () => {
|
|
55971
55971
|
this._flattenBufferIn();
|
|
55972
|
-
sharp2.pipeline(this.options, (
|
|
55973
|
-
if (
|
|
55974
|
-
callback(is2.nativeError(
|
|
55972
|
+
sharp2.pipeline(this.options, (err49, data, info) => {
|
|
55973
|
+
if (err49) {
|
|
55974
|
+
callback(is2.nativeError(err49, stack2));
|
|
55975
55975
|
} else {
|
|
55976
55976
|
callback(null, data, info);
|
|
55977
55977
|
}
|
|
55978
55978
|
});
|
|
55979
55979
|
});
|
|
55980
55980
|
} else {
|
|
55981
|
-
sharp2.pipeline(this.options, (
|
|
55982
|
-
if (
|
|
55983
|
-
callback(is2.nativeError(
|
|
55981
|
+
sharp2.pipeline(this.options, (err49, data, info) => {
|
|
55982
|
+
if (err49) {
|
|
55983
|
+
callback(is2.nativeError(err49, stack2));
|
|
55984
55984
|
} else {
|
|
55985
55985
|
callback(null, data, info);
|
|
55986
55986
|
}
|
|
@@ -55991,9 +55991,9 @@ var require_output = __commonJS({
|
|
|
55991
55991
|
if (this._isStreamInput()) {
|
|
55992
55992
|
this.once("finish", () => {
|
|
55993
55993
|
this._flattenBufferIn();
|
|
55994
|
-
sharp2.pipeline(this.options, (
|
|
55995
|
-
if (
|
|
55996
|
-
this.emit("error", is2.nativeError(
|
|
55994
|
+
sharp2.pipeline(this.options, (err49, data, info) => {
|
|
55995
|
+
if (err49) {
|
|
55996
|
+
this.emit("error", is2.nativeError(err49, stack2));
|
|
55997
55997
|
} else {
|
|
55998
55998
|
this.emit("info", info);
|
|
55999
55999
|
this.push(data);
|
|
@@ -56006,9 +56006,9 @@ var require_output = __commonJS({
|
|
|
56006
56006
|
this.emit("finish");
|
|
56007
56007
|
}
|
|
56008
56008
|
} else {
|
|
56009
|
-
sharp2.pipeline(this.options, (
|
|
56010
|
-
if (
|
|
56011
|
-
this.emit("error", is2.nativeError(
|
|
56009
|
+
sharp2.pipeline(this.options, (err49, data, info) => {
|
|
56010
|
+
if (err49) {
|
|
56011
|
+
this.emit("error", is2.nativeError(err49, stack2));
|
|
56012
56012
|
} else {
|
|
56013
56013
|
this.emit("info", info);
|
|
56014
56014
|
this.push(data);
|
|
@@ -56023,9 +56023,9 @@ var require_output = __commonJS({
|
|
|
56023
56023
|
return new Promise((resolve4, reject) => {
|
|
56024
56024
|
this.once("finish", () => {
|
|
56025
56025
|
this._flattenBufferIn();
|
|
56026
|
-
sharp2.pipeline(this.options, (
|
|
56027
|
-
if (
|
|
56028
|
-
reject(is2.nativeError(
|
|
56026
|
+
sharp2.pipeline(this.options, (err49, data, info) => {
|
|
56027
|
+
if (err49) {
|
|
56028
|
+
reject(is2.nativeError(err49, stack2));
|
|
56029
56029
|
} else {
|
|
56030
56030
|
if (this.options.resolveWithObject) {
|
|
56031
56031
|
resolve4({ data, info });
|
|
@@ -56038,9 +56038,9 @@ var require_output = __commonJS({
|
|
|
56038
56038
|
});
|
|
56039
56039
|
} else {
|
|
56040
56040
|
return new Promise((resolve4, reject) => {
|
|
56041
|
-
sharp2.pipeline(this.options, (
|
|
56042
|
-
if (
|
|
56043
|
-
reject(is2.nativeError(
|
|
56041
|
+
sharp2.pipeline(this.options, (err49, data, info) => {
|
|
56042
|
+
if (err49) {
|
|
56043
|
+
reject(is2.nativeError(err49, stack2));
|
|
56044
56044
|
} else {
|
|
56045
56045
|
if (this.options.resolveWithObject) {
|
|
56046
56046
|
resolve4({ data, info });
|
|
@@ -58676,9 +58676,9 @@ function memoizePromise(key, factory) {
|
|
|
58676
58676
|
}
|
|
58677
58677
|
const promise = factory().then(
|
|
58678
58678
|
(value) => value,
|
|
58679
|
-
(
|
|
58679
|
+
(err49) => {
|
|
58680
58680
|
cache.delete(key);
|
|
58681
|
-
return Promise.reject(
|
|
58681
|
+
return Promise.reject(err49);
|
|
58682
58682
|
}
|
|
58683
58683
|
);
|
|
58684
58684
|
cache.put(key, promise);
|
|
@@ -58865,8 +58865,8 @@ async function storeCachedResource(path_or_repo_id, filename, cache2, cacheKey,
|
|
|
58865
58865
|
headers
|
|
58866
58866
|
}
|
|
58867
58867
|
)
|
|
58868
|
-
).catch((
|
|
58869
|
-
logger2.warn(`Unable to add response to browser cache: ${
|
|
58868
|
+
).catch((err49) => {
|
|
58869
|
+
logger2.warn(`Unable to add response to browser cache: ${err49}.`);
|
|
58870
58870
|
});
|
|
58871
58871
|
}
|
|
58872
58872
|
}
|
|
@@ -59049,9 +59049,9 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
|
|
|
59049
59049
|
INFLIGHT_LOADS.delete(key);
|
|
59050
59050
|
return result;
|
|
59051
59051
|
},
|
|
59052
|
-
(
|
|
59052
|
+
(err49) => {
|
|
59053
59053
|
INFLIGHT_LOADS.delete(key);
|
|
59054
|
-
throw
|
|
59054
|
+
throw err49;
|
|
59055
59055
|
}
|
|
59056
59056
|
);
|
|
59057
59057
|
INFLIGHT_LOADS.set(key, pending);
|
|
@@ -61126,8 +61126,8 @@ async function ensureWasmLoaded() {
|
|
|
61126
61126
|
ONNX_ENV.wasm.wasmBinary = wasmBinary;
|
|
61127
61127
|
wasmBinaryLoaded = true;
|
|
61128
61128
|
}
|
|
61129
|
-
} catch (
|
|
61130
|
-
logger2.warn("Failed to pre-load WASM binary:",
|
|
61129
|
+
} catch (err49) {
|
|
61130
|
+
logger2.warn("Failed to pre-load WASM binary:", err49);
|
|
61131
61131
|
}
|
|
61132
61132
|
})() : Promise.resolve(),
|
|
61133
61133
|
// Load and cache the WASM factory as a blob URL
|
|
@@ -61137,8 +61137,8 @@ async function ensureWasmLoaded() {
|
|
|
61137
61137
|
if (wasmFactoryBlob) {
|
|
61138
61138
|
ONNX_ENV.wasm.wasmPaths.mjs = wasmFactoryBlob;
|
|
61139
61139
|
}
|
|
61140
|
-
} catch (
|
|
61141
|
-
logger2.warn("Failed to pre-load WASM factory:",
|
|
61140
|
+
} catch (err49) {
|
|
61141
|
+
logger2.warn("Failed to pre-load WASM factory:", err49);
|
|
61142
61142
|
}
|
|
61143
61143
|
})() : Promise.resolve()
|
|
61144
61144
|
]);
|
|
@@ -68281,14 +68281,14 @@ var init_transformers_node = __esm({
|
|
|
68281
68281
|
try {
|
|
68282
68282
|
const evaluated = this.evaluateBlock(node.body, scope8);
|
|
68283
68283
|
result += evaluated.value;
|
|
68284
|
-
} catch (
|
|
68285
|
-
if (
|
|
68284
|
+
} catch (err49) {
|
|
68285
|
+
if (err49 instanceof ContinueControl) {
|
|
68286
68286
|
continue;
|
|
68287
68287
|
}
|
|
68288
|
-
if (
|
|
68288
|
+
if (err49 instanceof BreakControl) {
|
|
68289
68289
|
break;
|
|
68290
68290
|
}
|
|
68291
|
-
throw
|
|
68291
|
+
throw err49;
|
|
68292
68292
|
}
|
|
68293
68293
|
noIteration = false;
|
|
68294
68294
|
}
|
|
@@ -68493,7 +68493,7 @@ var init_transformers_node = __esm({
|
|
|
68493
68493
|
start(controller) {
|
|
68494
68494
|
stream.on("data", (chunk2) => controller.enqueue(chunk2));
|
|
68495
68495
|
stream.on("end", () => controller.close());
|
|
68496
|
-
stream.on("error", (
|
|
68496
|
+
stream.on("error", (err49) => controller.error(err49));
|
|
68497
68497
|
},
|
|
68498
68498
|
cancel() {
|
|
68499
68499
|
stream.destroy();
|
|
@@ -68770,9 +68770,9 @@ var init_transformers_node = __esm({
|
|
|
68770
68770
|
break;
|
|
68771
68771
|
}
|
|
68772
68772
|
await new Promise((resolve4, reject) => {
|
|
68773
|
-
fileStream.write(value, (
|
|
68774
|
-
if (
|
|
68775
|
-
reject(
|
|
68773
|
+
fileStream.write(value, (err49) => {
|
|
68774
|
+
if (err49) {
|
|
68775
|
+
reject(err49);
|
|
68776
68776
|
return;
|
|
68777
68777
|
}
|
|
68778
68778
|
resolve4();
|
|
@@ -68783,7 +68783,7 @@ var init_transformers_node = __esm({
|
|
|
68783
68783
|
progress_callback?.({ progress, loaded, total });
|
|
68784
68784
|
}
|
|
68785
68785
|
await new Promise((resolve4, reject) => {
|
|
68786
|
-
fileStream.close((
|
|
68786
|
+
fileStream.close((err49) => err49 ? reject(err49) : resolve4());
|
|
68787
68787
|
});
|
|
68788
68788
|
await fs310.promises.rename(tmpPath, filePath);
|
|
68789
68789
|
} catch (error) {
|
|
@@ -90763,7 +90763,7 @@ var require_package2 = __commonJS({
|
|
|
90763
90763
|
"package.json"(exports, module) {
|
|
90764
90764
|
module.exports = {
|
|
90765
90765
|
name: "trace-mcp",
|
|
90766
|
-
version: "1.
|
|
90766
|
+
version: "1.22.0",
|
|
90767
90767
|
mcpName: "io.github.nikolai-vysotskyi/trace-mcp",
|
|
90768
90768
|
description: "Framework-aware code intelligence MCP server \u2014 48+ frameworks, 68 languages",
|
|
90769
90769
|
type: "module",
|
|
@@ -90785,7 +90785,7 @@ var require_package2 = __commonJS({
|
|
|
90785
90785
|
engines: {
|
|
90786
90786
|
node: ">=20.0.0"
|
|
90787
90787
|
},
|
|
90788
|
-
license: "
|
|
90788
|
+
license: "MIT",
|
|
90789
90789
|
author: "Nikolai Vysotskyi",
|
|
90790
90790
|
repository: {
|
|
90791
90791
|
type: "git",
|
|
@@ -90855,9 +90855,9 @@ init_schema();
|
|
|
90855
90855
|
init_store();
|
|
90856
90856
|
init_registry();
|
|
90857
90857
|
init_config();
|
|
90858
|
-
import { Command as
|
|
90859
|
-
import
|
|
90860
|
-
import
|
|
90858
|
+
import { Command as Command18 } from "commander";
|
|
90859
|
+
import path133 from "path";
|
|
90860
|
+
import fs122 from "fs";
|
|
90861
90861
|
import { randomUUID } from "crypto";
|
|
90862
90862
|
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
90863
90863
|
import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
|
|
@@ -90869,7 +90869,7 @@ import https from "https";
|
|
|
90869
90869
|
import { spawnSync } from "child_process";
|
|
90870
90870
|
import path65 from "path";
|
|
90871
90871
|
import fs66 from "fs";
|
|
90872
|
-
var CURRENT_VERSION = true ? "1.
|
|
90872
|
+
var CURRENT_VERSION = true ? "1.22.0" : "0.0.0-dev";
|
|
90873
90873
|
var UPDATE_CACHE_PATH = path65.join(TRACE_MCP_HOME, "update-check.json");
|
|
90874
90874
|
function readCache() {
|
|
90875
90875
|
try {
|
|
@@ -91007,15 +91007,15 @@ async function runPostUpdateMigrations() {
|
|
|
91007
91007
|
installPrecompactHook2({ global: true });
|
|
91008
91008
|
installWorktreeHook2({ global: true });
|
|
91009
91009
|
logger.info("Post-update: hooks upgraded");
|
|
91010
|
-
} catch (
|
|
91011
|
-
logger.warn({ error:
|
|
91010
|
+
} catch (err49) {
|
|
91011
|
+
logger.warn({ error: err49 }, "Post-update: hook upgrade failed (non-fatal)");
|
|
91012
91012
|
}
|
|
91013
91013
|
}
|
|
91014
91014
|
try {
|
|
91015
91015
|
updateClaudeMd2(process.cwd(), { scope: "global" });
|
|
91016
91016
|
logger.info("Post-update: CLAUDE.md updated");
|
|
91017
|
-
} catch (
|
|
91018
|
-
logger.warn({ error:
|
|
91017
|
+
} catch (err49) {
|
|
91018
|
+
logger.warn({ error: err49 }, "Post-update: CLAUDE.md update failed (non-fatal)");
|
|
91019
91019
|
}
|
|
91020
91020
|
const projects = listProjects3();
|
|
91021
91021
|
if (projects.length > 0) {
|
|
@@ -91054,8 +91054,8 @@ async function runPostUpdateMigrations() {
|
|
|
91054
91054
|
{ root: proj.root, indexed: result.indexed, skipped: result.skipped, errors: result.errors },
|
|
91055
91055
|
"Post-update: project reindexed"
|
|
91056
91056
|
);
|
|
91057
|
-
} catch (
|
|
91058
|
-
logger.warn({ root: proj.root, error:
|
|
91057
|
+
} catch (err49) {
|
|
91058
|
+
logger.warn({ root: proj.root, error: err49 }, "Post-update: project reindex failed (non-fatal)");
|
|
91059
91059
|
}
|
|
91060
91060
|
}
|
|
91061
91061
|
}
|
|
@@ -92883,8 +92883,8 @@ var SessionJournal = class _SessionJournal {
|
|
|
92883
92883
|
};
|
|
92884
92884
|
this.entries.push(entry);
|
|
92885
92885
|
if (tool === "get_symbol" || tool === "get_outline") {
|
|
92886
|
-
const
|
|
92887
|
-
if (
|
|
92886
|
+
const path134 = params.path ?? params.file_path ?? "";
|
|
92887
|
+
if (path134) this.filesRead.add(path134);
|
|
92888
92888
|
}
|
|
92889
92889
|
if (resultCount === 0 && this.isSearchTool(tool)) {
|
|
92890
92890
|
this.zeroResultQueries.set(hash, summary);
|
|
@@ -93625,6 +93625,7 @@ var COMPACT_CORE_PARAMS = {
|
|
|
93625
93625
|
scan_security: ["file_pattern", "rules"],
|
|
93626
93626
|
check_quality_gates: ["scope"],
|
|
93627
93627
|
taint_analysis: ["source_symbol_id", "file_pattern"],
|
|
93628
|
+
export_security_context: ["scope", "depth"],
|
|
93628
93629
|
audit_config: [],
|
|
93629
93630
|
// Framework
|
|
93630
93631
|
get_request_flow: ["route", "method"],
|
|
@@ -93650,6 +93651,86 @@ var COMPACT_CORE_PARAMS = {
|
|
|
93650
93651
|
batch: ["calls"]
|
|
93651
93652
|
};
|
|
93652
93653
|
|
|
93654
|
+
// src/server/tool-annotations.ts
|
|
93655
|
+
var READ_ONLY = {
|
|
93656
|
+
readOnlyHint: true,
|
|
93657
|
+
destructiveHint: false,
|
|
93658
|
+
idempotentHint: true,
|
|
93659
|
+
openWorldHint: false
|
|
93660
|
+
};
|
|
93661
|
+
var INDEX_MUTATING = {
|
|
93662
|
+
readOnlyHint: false,
|
|
93663
|
+
destructiveHint: false,
|
|
93664
|
+
idempotentHint: true,
|
|
93665
|
+
openWorldHint: false
|
|
93666
|
+
};
|
|
93667
|
+
var FILE_WRITING = {
|
|
93668
|
+
readOnlyHint: false,
|
|
93669
|
+
destructiveHint: false,
|
|
93670
|
+
idempotentHint: false,
|
|
93671
|
+
openWorldHint: false
|
|
93672
|
+
};
|
|
93673
|
+
var FILE_DESTRUCTIVE = {
|
|
93674
|
+
readOnlyHint: false,
|
|
93675
|
+
destructiveHint: true,
|
|
93676
|
+
idempotentHint: false,
|
|
93677
|
+
openWorldHint: false
|
|
93678
|
+
};
|
|
93679
|
+
var OUTPUT_WRITING = {
|
|
93680
|
+
readOnlyHint: false,
|
|
93681
|
+
destructiveHint: false,
|
|
93682
|
+
idempotentHint: true,
|
|
93683
|
+
openWorldHint: false
|
|
93684
|
+
};
|
|
93685
|
+
var RUNTIME_READ = {
|
|
93686
|
+
readOnlyHint: true,
|
|
93687
|
+
destructiveHint: false,
|
|
93688
|
+
idempotentHint: true,
|
|
93689
|
+
openWorldHint: true
|
|
93690
|
+
};
|
|
93691
|
+
var OVERRIDES = {
|
|
93692
|
+
// ── Refactoring: file-destructive ──
|
|
93693
|
+
apply_codemod: FILE_DESTRUCTIVE,
|
|
93694
|
+
remove_dead_code: FILE_DESTRUCTIVE,
|
|
93695
|
+
// ── Refactoring: file-writing (non-destructive) ──
|
|
93696
|
+
apply_rename: FILE_WRITING,
|
|
93697
|
+
apply_move: FILE_WRITING,
|
|
93698
|
+
change_signature: FILE_WRITING,
|
|
93699
|
+
extract_function: FILE_WRITING,
|
|
93700
|
+
// ── Output generation (writes files but doesn't modify source) ──
|
|
93701
|
+
generate_docs: OUTPUT_WRITING,
|
|
93702
|
+
generate_sbom: OUTPUT_WRITING,
|
|
93703
|
+
visualize_graph: OUTPUT_WRITING,
|
|
93704
|
+
visualize_subproject_topology: OUTPUT_WRITING,
|
|
93705
|
+
// ── Index / store mutation (idempotent) ──
|
|
93706
|
+
reindex: INDEX_MUTATING,
|
|
93707
|
+
register_edit: INDEX_MUTATING,
|
|
93708
|
+
embed_repo: INDEX_MUTATING,
|
|
93709
|
+
subproject_add_repo: INDEX_MUTATING,
|
|
93710
|
+
subproject_sync: INDEX_MUTATING,
|
|
93711
|
+
invalidate_decision: INDEX_MUTATING,
|
|
93712
|
+
index_sessions: INDEX_MUTATING,
|
|
93713
|
+
mine_sessions: INDEX_MUTATING,
|
|
93714
|
+
refresh_co_changes: INDEX_MUTATING,
|
|
93715
|
+
detect_communities: INDEX_MUTATING,
|
|
93716
|
+
// ── Store mutation (not idempotent — creates new records) ──
|
|
93717
|
+
add_decision: {
|
|
93718
|
+
readOnlyHint: false,
|
|
93719
|
+
destructiveHint: false,
|
|
93720
|
+
idempotentHint: false,
|
|
93721
|
+
openWorldHint: false
|
|
93722
|
+
},
|
|
93723
|
+
// ── Runtime intelligence (reads external OTLP data) ──
|
|
93724
|
+
get_runtime_profile: RUNTIME_READ,
|
|
93725
|
+
get_runtime_call_graph: RUNTIME_READ,
|
|
93726
|
+
get_endpoint_analytics: RUNTIME_READ,
|
|
93727
|
+
get_runtime_deps: RUNTIME_READ
|
|
93728
|
+
};
|
|
93729
|
+
var DEFAULT_ANNOTATIONS = READ_ONLY;
|
|
93730
|
+
function getToolAnnotations(toolName) {
|
|
93731
|
+
return OVERRIDES[toolName] ?? DEFAULT_ANNOTATIONS;
|
|
93732
|
+
}
|
|
93733
|
+
|
|
93653
93734
|
// src/server/tool-gate.ts
|
|
93654
93735
|
function applyParamOverrides(schema, toolOverrides, sharedOverrides) {
|
|
93655
93736
|
for (const paramName of Object.keys(schema)) {
|
|
@@ -93811,9 +93892,23 @@ function installToolGate(server, config, activePreset, savings, journal, j3, ext
|
|
|
93811
93892
|
return result;
|
|
93812
93893
|
};
|
|
93813
93894
|
}
|
|
93895
|
+
const annotations = getToolAnnotations(name);
|
|
93896
|
+
const lastIdx = args.length - 1;
|
|
93897
|
+
if (typeof args[lastIdx] === "function") {
|
|
93898
|
+
args.splice(lastIdx, 0, annotations);
|
|
93899
|
+
}
|
|
93814
93900
|
return _originalTool(...args);
|
|
93815
93901
|
});
|
|
93816
|
-
|
|
93902
|
+
const annotatedOriginalTool = ((...oArgs) => {
|
|
93903
|
+
const oName = oArgs[0];
|
|
93904
|
+
const ann = getToolAnnotations(oName);
|
|
93905
|
+
const oLastIdx = oArgs.length - 1;
|
|
93906
|
+
if (typeof oArgs[oLastIdx] === "function") {
|
|
93907
|
+
oArgs.splice(oLastIdx, 0, ann);
|
|
93908
|
+
}
|
|
93909
|
+
return _originalTool(...oArgs);
|
|
93910
|
+
});
|
|
93911
|
+
return { _originalTool: annotatedOriginalTool, registeredToolNames, toolHandlers };
|
|
93817
93912
|
}
|
|
93818
93913
|
|
|
93819
93914
|
// src/tools/register/core.ts
|
|
@@ -93964,7 +94059,7 @@ function registerCoreTools(server, ctx) {
|
|
|
93964
94059
|
const { store, registry, config, projectRoot, guardPath, j: j3, jh, journal, vectorStore, embeddingService, progress } = ctx;
|
|
93965
94060
|
server.tool(
|
|
93966
94061
|
"get_index_health",
|
|
93967
|
-
"Get index status, statistics, health information, and pipeline progress (indexing, summarization, embedding)",
|
|
94062
|
+
"Get index status, statistics, health information, and pipeline progress (indexing, summarization, embedding). Read-only, no side effects. Use to verify the index is ready before running queries. Returns JSON: { totalFiles, totalSymbols, languages, frameworks, pipelineProgress }.",
|
|
93968
94063
|
{},
|
|
93969
94064
|
async () => {
|
|
93970
94065
|
const result = getIndexHealth(store, config);
|
|
@@ -93976,7 +94071,7 @@ function registerCoreTools(server, ctx) {
|
|
|
93976
94071
|
);
|
|
93977
94072
|
server.tool(
|
|
93978
94073
|
"reindex",
|
|
93979
|
-
"Trigger (re)indexing of the project or a subdirectory",
|
|
94074
|
+
"Trigger (re)indexing of the project or a subdirectory. Mutates the local index (SQLite). Use after major file changes; for single-file updates prefer register_edit instead. Idempotent \u2014 safe to re-run. Returns JSON: { status, totalFiles, indexed, skipped, errors, durationMs }.",
|
|
93980
94075
|
{
|
|
93981
94076
|
path: z3.string().max(512).optional().describe("Subdirectory to index (default: project root)"),
|
|
93982
94077
|
force: z3.boolean().optional().describe("Skip hash check and reindex all files")
|
|
@@ -93994,7 +94089,7 @@ function registerCoreTools(server, ctx) {
|
|
|
93994
94089
|
);
|
|
93995
94090
|
server.tool(
|
|
93996
94091
|
"embed_repo",
|
|
93997
|
-
"Precompute and cache symbol embeddings for semantic / hybrid search. Embeddings are also computed lazily on first semantic query, but calling this once after a fresh index avoids the first-query latency spike. Requires AI provider to be enabled in config (ollama/openai). Set force=true to drop and recompute all existing embeddings.",
|
|
94092
|
+
"Precompute and cache symbol embeddings for semantic / hybrid search. Embeddings are also computed lazily on first semantic query, but calling this once after a fresh index avoids the first-query latency spike. Requires AI provider to be enabled in config (ollama/openai). Set force=true to drop and recompute all existing embeddings. Mutates the vector store; idempotent. Use after reindex when you plan to use semantic search. Returns JSON: { status, indexed_this_run, total_embedded, coverage_pct, duration_ms }.",
|
|
93998
94093
|
{
|
|
93999
94094
|
batch_size: z3.number().int().min(1).max(500).optional().describe("Symbols per embedding API batch (default 50)"),
|
|
94000
94095
|
force: z3.boolean().optional().describe("Drop existing embeddings and re-embed everything (default false \u2014 incremental)")
|
|
@@ -94049,7 +94144,7 @@ function registerCoreTools(server, ctx) {
|
|
|
94049
94144
|
);
|
|
94050
94145
|
server.tool(
|
|
94051
94146
|
"register_edit",
|
|
94052
|
-
"Notify trace-mcp that a file was edited. Reindexes the single file and invalidates search caches. Call after Edit/Write to keep index fresh. Also checks for duplicate symbols \u2014 if `_duplication_warnings` appears in the response, you may be recreating existing logic; review the referenced symbols before continuing.",
|
|
94147
|
+
"Notify trace-mcp that a file was edited. Reindexes the single file and invalidates search caches. Call after Edit/Write to keep index fresh \u2014 much lighter than full reindex. Also checks for duplicate symbols \u2014 if `_duplication_warnings` appears in the response, you may be recreating existing logic; review the referenced symbols before continuing. Mutates the index; idempotent. Returns JSON: { status, file, totalFiles, indexed, _duplication_warnings? }.",
|
|
94053
94148
|
{
|
|
94054
94149
|
file_path: z3.string().min(1).max(512).describe("Relative path to the edited file")
|
|
94055
94150
|
},
|
|
@@ -94085,7 +94180,7 @@ function registerCoreTools(server, ctx) {
|
|
|
94085
94180
|
);
|
|
94086
94181
|
server.tool(
|
|
94087
94182
|
"get_project_map",
|
|
94088
|
-
"Get project overview: detected frameworks, languages, file counts, structure. Call with summary_only=true at session start to orient yourself before diving into code.",
|
|
94183
|
+
"Get project overview: detected frameworks, languages, file counts, structure. Read-only, no side effects. Call with summary_only=true at session start to orient yourself before diving into code. Use instead of manual ls/find. Returns JSON: { frameworks, languages, fileCount, symbolCount, structure }.",
|
|
94089
94184
|
{
|
|
94090
94185
|
summary_only: z3.boolean().optional().describe("Return only framework list + counts (default false)")
|
|
94091
94186
|
},
|
|
@@ -94097,7 +94192,7 @@ function registerCoreTools(server, ctx) {
|
|
|
94097
94192
|
);
|
|
94098
94193
|
server.tool(
|
|
94099
94194
|
"get_env_vars",
|
|
94100
|
-
"List environment variable keys from .env files with inferred value types/formats. Never exposes actual values \u2014 only keys, types (string/number/boolean/empty), and formats (url/email/ip/path/uuid/json/base64/csv/dsn/etc). Use to understand project configuration without accessing
|
|
94195
|
+
"List environment variable keys from .env files with inferred value types/formats. Never exposes actual values \u2014 only keys, types (string/number/boolean/empty), and formats (url/email/ip/path/uuid/json/base64/csv/dsn/etc). Read-only, no side effects, safe for secrets. Use to understand project configuration without accessing actual values. Returns JSON grouped by file: { [file]: [{ key, type, format, comment }] }.",
|
|
94101
94196
|
{
|
|
94102
94197
|
pattern: z3.string().max(256).optional().describe('Filter keys by pattern (e.g. "DB_" or "REDIS")'),
|
|
94103
94198
|
file: z3.string().max(512).optional().describe("Filter by specific .env file path")
|
|
@@ -95200,9 +95295,9 @@ function deduplicateByFile(rawDeps) {
|
|
|
95200
95295
|
}
|
|
95201
95296
|
}
|
|
95202
95297
|
const result = [];
|
|
95203
|
-
for (const [
|
|
95298
|
+
for (const [path134, entry] of fileMap) {
|
|
95204
95299
|
const dep = {
|
|
95205
|
-
path:
|
|
95300
|
+
path: path134,
|
|
95206
95301
|
edgeTypes: [...entry.edgeTypes],
|
|
95207
95302
|
depth: entry.depth
|
|
95208
95303
|
};
|
|
@@ -95781,8 +95876,8 @@ var FileReadCache = class {
|
|
|
95781
95876
|
if (buf === void 0) {
|
|
95782
95877
|
try {
|
|
95783
95878
|
const absPath = path77.resolve(this.rootPath, file.path);
|
|
95784
|
-
const
|
|
95785
|
-
buf =
|
|
95879
|
+
const fs123 = __require("fs");
|
|
95880
|
+
buf = fs123.readFileSync(absPath);
|
|
95786
95881
|
} catch {
|
|
95787
95882
|
buf = null;
|
|
95788
95883
|
}
|
|
@@ -98594,7 +98689,7 @@ function registerNavigationTools(server, ctx) {
|
|
|
98594
98689
|
const { store, projectRoot, guardPath, j: j3, jh, savings, vectorStore, embeddingService, reranker, markExplored, decisionStore } = ctx;
|
|
98595
98690
|
server.tool(
|
|
98596
98691
|
"get_symbol",
|
|
98597
|
-
"Look up a symbol by symbol_id or FQN and return its source code. Use instead of Read when you need one specific function/class/method \u2014 returns only the symbol, not the whole file.",
|
|
98692
|
+
"Look up a symbol by symbol_id or FQN and return its source code. Use instead of Read when you need one specific function/class/method \u2014 returns only the symbol, not the whole file. For multiple symbols at once, prefer get_context_bundle. Read-only. Returns JSON: { symbol_id, name, kind, fqn, signature, file, line_start, line_end, source }.",
|
|
98598
98693
|
{
|
|
98599
98694
|
symbol_id: z4.string().max(512).optional().describe("The symbol_id to look up"),
|
|
98600
98695
|
fqn: z4.string().max(512).optional().describe("The fully qualified name to look up"),
|
|
@@ -98629,7 +98724,7 @@ function registerNavigationTools(server, ctx) {
|
|
|
98629
98724
|
);
|
|
98630
98725
|
server.tool(
|
|
98631
98726
|
"search",
|
|
98632
|
-
'Search symbols by name, kind, or text. Use instead of Grep when looking for functions, classes, methods, or variables in source code. Supports kind/language/file_pattern filters. Set fuzzy=true for typo-tolerant search (trigram + Levenshtein). For natural-language / conceptual queries set semantic="on" (requires an AI provider configured + embed_repo run once). Set fusion=true for Signal Fusion \u2014 multi-channel ranking (BM25 + PageRank + embeddings + identity match) via Weighted Reciprocal Rank fusion.',
|
|
98727
|
+
'Search symbols by name, kind, or text. Use instead of Grep when looking for functions, classes, methods, or variables in source code. For raw text/string/comment search use search_text instead. For finding who references a known symbol use find_usages instead. Supports kind/language/file_pattern filters. Set fuzzy=true for typo-tolerant search (trigram + Levenshtein). For natural-language / conceptual queries set semantic="on" (requires an AI provider configured + embed_repo run once). Set fusion=true for Signal Fusion \u2014 multi-channel ranking (BM25 + PageRank + embeddings + identity match) via Weighted Reciprocal Rank fusion. Read-only. Returns JSON: { items: [{ symbol_id, name, kind, fqn, signature, file, line, score }], total, search_mode }.',
|
|
98633
98728
|
{
|
|
98634
98729
|
query: z4.string().min(1).max(500).describe("Search query"),
|
|
98635
98730
|
kind: z4.string().max(64).optional().describe("Filter by symbol kind (class, method, function, etc.)"),
|
|
@@ -98744,7 +98839,7 @@ function registerNavigationTools(server, ctx) {
|
|
|
98744
98839
|
);
|
|
98745
98840
|
server.tool(
|
|
98746
98841
|
"get_outline",
|
|
98747
|
-
"Get all symbols for a file (signatures only, no bodies). Use instead of Read to understand a file before editing \u2014 much cheaper in tokens.",
|
|
98842
|
+
"Get all symbols for a file (signatures only, no bodies). Use instead of Read to understand a file before editing \u2014 much cheaper in tokens. For reading one symbol's source, follow up with get_symbol. Read-only. Returns JSON: { path, language, symbols: [{ symbolId, name, kind, signature, lineStart, lineEnd }] }.",
|
|
98748
98843
|
{
|
|
98749
98844
|
path: z4.string().max(512).describe("Relative file path")
|
|
98750
98845
|
},
|
|
@@ -98770,7 +98865,7 @@ function registerNavigationTools(server, ctx) {
|
|
|
98770
98865
|
);
|
|
98771
98866
|
server.tool(
|
|
98772
98867
|
"get_change_impact",
|
|
98773
|
-
"Full change impact report: risk score + mitigations, breaking change detection, enriched dependents (complexity, coverage, exports), module groups, affected tests, co-change hidden couplings. Supports diff-aware mode via symbol_ids to scope analysis to only changed symbols.",
|
|
98868
|
+
"Full change impact report: risk score + mitigations, breaking change detection, enriched dependents (complexity, coverage, exports), module groups, affected tests, co-change hidden couplings. Supports diff-aware mode via symbol_ids to scope analysis to only changed symbols. Use before modifying code to understand blast radius. For quick risk assessment without full report, use assess_change_risk instead. Read-only. Returns JSON: { risk, dependents, affectedTests, breakingChanges, totalAffected }.",
|
|
98774
98869
|
{
|
|
98775
98870
|
file_path: z4.string().max(512).optional().describe("Relative file path to analyze"),
|
|
98776
98871
|
symbol_id: z4.string().max(512).optional().describe("Symbol ID to analyze"),
|
|
@@ -98813,7 +98908,7 @@ function registerNavigationTools(server, ctx) {
|
|
|
98813
98908
|
);
|
|
98814
98909
|
server.tool(
|
|
98815
98910
|
"get_feature_context",
|
|
98816
|
-
"Search code by keyword/topic \u2192 returns ranked source code snippets within a token budget. Use when you need to READ actual code for a concept or feature.",
|
|
98911
|
+
"Search code by keyword/topic \u2192 returns ranked source code snippets within a token budget. Use when you need to READ actual code for a concept or feature. For structured task context with tests and entry points, use get_task_context instead. For symbol metadata without source, use search. Read-only. Returns JSON: { items: [{ symbol_id, name, file, source, score }], token_usage }.",
|
|
98817
98912
|
{
|
|
98818
98913
|
description: z4.string().min(1).max(2e3).describe("Natural language description of the feature to find context for"),
|
|
98819
98914
|
token_budget: z4.number().int().min(100).max(1e5).optional().describe("Max tokens for assembled context (default 4000)")
|
|
@@ -98832,7 +98927,7 @@ function registerNavigationTools(server, ctx) {
|
|
|
98832
98927
|
);
|
|
98833
98928
|
server.tool(
|
|
98834
98929
|
"suggest_queries",
|
|
98835
|
-
"Onboarding helper: shows top imported files, most connected symbols (PageRank), language stats, and example tool calls. Call this first when exploring an unfamiliar project.",
|
|
98930
|
+
"Onboarding helper: shows top imported files, most connected symbols (PageRank), language stats, and example tool calls. Call this first when exploring an unfamiliar project. For a structured project map use get_project_map instead. Read-only. Returns JSON: { topFiles, topSymbols, languageStats, exampleQueries }.",
|
|
98836
98931
|
{},
|
|
98837
98932
|
async () => {
|
|
98838
98933
|
const result = suggestQueries(store);
|
|
@@ -98841,7 +98936,7 @@ function registerNavigationTools(server, ctx) {
|
|
|
98841
98936
|
);
|
|
98842
98937
|
server.tool(
|
|
98843
98938
|
"get_related_symbols",
|
|
98844
|
-
"Find symbols related via co-location (same file), shared importers, and name similarity.
|
|
98939
|
+
"Find symbols related via co-location (same file), shared importers, and name similarity. Use when exploring a symbol to discover sibling code. For call-graph relationships use get_call_graph instead; for all usages use find_usages. Read-only. Returns JSON: { related: [{ symbol_id, name, kind, file, relation_type, score }] }.",
|
|
98845
98940
|
{
|
|
98846
98941
|
symbol_id: z4.string().max(512).describe("Symbol ID to find related symbols for"),
|
|
98847
98942
|
max_results: z4.number().int().min(1).max(100).optional().describe("Max results (default 20)")
|
|
@@ -98856,7 +98951,7 @@ function registerNavigationTools(server, ctx) {
|
|
|
98856
98951
|
);
|
|
98857
98952
|
server.tool(
|
|
98858
98953
|
"get_context_bundle",
|
|
98859
|
-
"Get a symbol's source code + its import dependencies + optional callers, packed within a token budget. Supports batch queries with shared-import deduplication.",
|
|
98954
|
+
"Get a symbol's source code + its import dependencies + optional callers, packed within a token budget. Supports batch queries with shared-import deduplication. Use instead of chaining get_symbol calls \u2014 deduplicates shared imports across symbols. For a single symbol without imports, get_symbol is lighter. Read-only. Returns JSON: { primary: [{ symbol_id, file, source }], imports: [{ file, source }], token_usage }.",
|
|
98860
98955
|
{
|
|
98861
98956
|
symbol_id: z4.string().max(512).optional().describe("Single symbol ID"),
|
|
98862
98957
|
symbol_ids: z4.array(z4.string().max(512)).max(20).optional().describe("Batch: multiple symbol IDs"),
|
|
@@ -98887,7 +98982,7 @@ function registerNavigationTools(server, ctx) {
|
|
|
98887
98982
|
);
|
|
98888
98983
|
server.tool(
|
|
98889
98984
|
"get_task_context",
|
|
98890
|
-
"All-in-one context for starting a dev task: execution paths, tests, entry points, adapted by task type. Use as your FIRST call when beginning any new task.",
|
|
98985
|
+
"All-in-one context for starting a dev task: execution paths, tests, entry points, adapted by task type. Use as your FIRST call when beginning any new task \u2014 replaces manual chaining of search \u2192 get_symbol \u2192 Read. For narrower feature-code lookup use get_feature_context instead. Read-only. Returns JSON: { symbols: [{ symbol_id, name, file, source }], tests, entryPoints, taskType, token_usage }.",
|
|
98891
98986
|
{
|
|
98892
98987
|
task: z4.string().min(1).max(2e3).describe("Natural language description of the task"),
|
|
98893
98988
|
token_budget: z4.number().int().min(100).max(1e5).optional().describe("Max tokens (default 8000)"),
|
|
@@ -100556,7 +100651,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100556
100651
|
if (has("vue", "nuxt", "inertia")) {
|
|
100557
100652
|
server.tool(
|
|
100558
100653
|
"get_component_tree",
|
|
100559
|
-
"Build a component render tree starting from a given .vue file",
|
|
100654
|
+
"Build a component render tree starting from a given .vue file. Use to visualize parent-child component hierarchy. Read-only. Returns JSON: { root, children: [{ component, props, slots, depth }], totalComponents }.",
|
|
100560
100655
|
{
|
|
100561
100656
|
component_path: z5.string().max(512).describe("Relative path to the root .vue file"),
|
|
100562
100657
|
depth: z5.number().int().min(1).max(20).optional().describe("Max tree depth (default 3)"),
|
|
@@ -100576,7 +100671,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100576
100671
|
if (has("express", "nestjs", "laravel", "fastapi", "flask", "drf", "spring", "rails", "fastify", "hono", "trpc")) {
|
|
100577
100672
|
server.tool(
|
|
100578
100673
|
"get_request_flow",
|
|
100579
|
-
"Trace request flow for a URL+method: route \u2192 middleware \u2192 controller \u2192 service (Laravel/Express/NestJS/Fastify/Hono/tRPC/FastAPI/Flask/DRF)",
|
|
100674
|
+
"Trace request flow for a URL+method: route \u2192 middleware \u2192 controller \u2192 service (Laravel/Express/NestJS/Fastify/Hono/tRPC/FastAPI/Flask/DRF). Use to understand how a request is handled end-to-end. For middleware-only analysis use get_middleware_chain instead. Read-only. Returns JSON: { route, steps: [{ type, symbol_id, name, file }] }.",
|
|
100580
100675
|
{
|
|
100581
100676
|
url: z5.string().max(512).describe("Route URL (e.g. /api/users)"),
|
|
100582
100677
|
method: z5.string().max(64).optional().describe("HTTP method (default GET)")
|
|
@@ -100593,7 +100688,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100593
100688
|
if (has("express", "nestjs", "fastapi", "flask", "spring")) {
|
|
100594
100689
|
server.tool(
|
|
100595
100690
|
"get_middleware_chain",
|
|
100596
|
-
"Trace middleware chain for a route URL (Express/NestJS/FastAPI/Flask)",
|
|
100691
|
+
"Trace middleware chain for a route URL (Express/NestJS/FastAPI/Flask). Use when you only need the middleware stack, not the full request flow. For full route\u2192controller\u2192service flow use get_request_flow instead. Read-only. Returns JSON: { url, middlewares: [{ name, file, order }] }.",
|
|
100597
100692
|
{
|
|
100598
100693
|
url: z5.string().max(512).describe("Route URL to trace middleware for")
|
|
100599
100694
|
},
|
|
@@ -100609,7 +100704,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100609
100704
|
if (has("nestjs")) {
|
|
100610
100705
|
server.tool(
|
|
100611
100706
|
"get_module_graph",
|
|
100612
|
-
"Build NestJS module dependency graph (module -> imports -> controllers -> providers -> exports)",
|
|
100707
|
+
"Build NestJS module dependency graph (module -> imports -> controllers -> providers -> exports). Use to understand NestJS module structure and DI wiring. For provider-level DI tree use get_di_tree instead. Read-only. Returns JSON: { module, imports, controllers, providers, exports, edges }.",
|
|
100613
100708
|
{
|
|
100614
100709
|
module_name: z5.string().max(256).describe("NestJS module class name (e.g. AppModule)")
|
|
100615
100710
|
},
|
|
@@ -100623,7 +100718,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100623
100718
|
);
|
|
100624
100719
|
server.tool(
|
|
100625
100720
|
"get_di_tree",
|
|
100626
|
-
"Trace NestJS dependency injection tree (what a service injects + who injects it)",
|
|
100721
|
+
"Trace NestJS dependency injection tree (what a service injects + who injects it). Use to understand DI wiring for a specific provider. For module-level graph use get_module_graph instead. Read-only. Returns JSON: { service, injects: [{ name, kind }], injected_by: [{ name, kind }] }.",
|
|
100627
100722
|
{
|
|
100628
100723
|
service_name: z5.string().max(256).describe("NestJS service/provider class name")
|
|
100629
100724
|
},
|
|
@@ -100639,7 +100734,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100639
100734
|
if (has("react-native")) {
|
|
100640
100735
|
server.tool(
|
|
100641
100736
|
"get_navigation_graph",
|
|
100642
|
-
"Build React Native navigation tree from screens, navigators, and deep links",
|
|
100737
|
+
"Build React Native navigation tree from screens, navigators, and deep links. Use to understand app navigation structure. For details on a specific screen use get_screen_context instead. Read-only. Returns JSON: { navigators, screens, deepLinks, edges }.",
|
|
100643
100738
|
{},
|
|
100644
100739
|
async () => {
|
|
100645
100740
|
const result = getNavigationGraph(store);
|
|
@@ -100651,7 +100746,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100651
100746
|
);
|
|
100652
100747
|
server.tool(
|
|
100653
100748
|
"get_screen_context",
|
|
100654
|
-
"Get full context for a React Native screen: navigator, navigation edges, deep link, platform variants, native modules",
|
|
100749
|
+
"Get full context for a React Native screen: navigator, navigation edges, deep link, platform variants, native modules. Use to understand a specific screen before modifying it. For the full navigation tree use get_navigation_graph instead. Read-only. Returns JSON: { screen, navigator, deepLink, platformVariants, nativeModules, navigationEdges }.",
|
|
100655
100750
|
{
|
|
100656
100751
|
screen_name: z5.string().max(256).describe("Screen name (e.g. ProfileScreen or Profile)")
|
|
100657
100752
|
},
|
|
@@ -100667,7 +100762,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100667
100762
|
if (has("laravel", "mongoose", "sequelize", "prisma", "typeorm", "drizzle", "sqlalchemy")) {
|
|
100668
100763
|
server.tool(
|
|
100669
100764
|
"get_model_context",
|
|
100670
|
-
"Get full model context: relationships, schema, and metadata (Eloquent/Mongoose/Sequelize/SQLAlchemy/Prisma/TypeORM/Drizzle)",
|
|
100765
|
+
"Get full model context: relationships, schema, and metadata (Eloquent/Mongoose/Sequelize/SQLAlchemy/Prisma/TypeORM/Drizzle). Use to understand a specific ORM model. For raw table schema without ORM context use get_schema instead. Read-only. Returns JSON: { model, table, relationships: [{ type, related, foreignKey }], fields, metadata }.",
|
|
100671
100766
|
{
|
|
100672
100767
|
model_name: z5.string().max(256).describe("Model class name (e.g. User, Post)")
|
|
100673
100768
|
},
|
|
@@ -100681,7 +100776,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100681
100776
|
);
|
|
100682
100777
|
server.tool(
|
|
100683
100778
|
"get_schema",
|
|
100684
|
-
"Get database schema reconstructed from migrations or ORM model definitions",
|
|
100779
|
+
"Get database schema reconstructed from migrations or ORM model definitions. Use to understand table structure. For ORM-level context with relationships use get_model_context instead. Read-only. Returns JSON: { tables: [{ name, columns: [{ name, type, nullable, default }], indexes }] }.",
|
|
100685
100780
|
{
|
|
100686
100781
|
table_name: z5.string().max(256).optional().describe("Table/collection/model name (omit for all)")
|
|
100687
100782
|
},
|
|
@@ -100697,7 +100792,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100697
100792
|
if (has("laravel", "nestjs", "celery", "django", "socketio")) {
|
|
100698
100793
|
server.tool(
|
|
100699
100794
|
"get_event_graph",
|
|
100700
|
-
"Get event/signal/task dispatch graph (Laravel events, Django signals, NestJS events, Celery tasks, Socket.io events)",
|
|
100795
|
+
"Get event/signal/task dispatch graph (Laravel events, Django signals, NestJS events, Celery tasks, Socket.io events). Use to understand event-driven architecture and trace event producers/consumers. Read-only. Returns JSON: { events: [{ name, dispatchers, listeners, file }] }.",
|
|
100701
100796
|
{
|
|
100702
100797
|
event_name: z5.string().max(256).optional().describe("Filter to a specific event class name")
|
|
100703
100798
|
},
|
|
@@ -100712,7 +100807,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100712
100807
|
}
|
|
100713
100808
|
server.tool(
|
|
100714
100809
|
"find_usages",
|
|
100715
|
-
"Find all places that reference a symbol or file (imports, calls, renders, dispatches). Use instead of Grep for symbol usages \u2014 understands semantic relationships, not just text matches.",
|
|
100810
|
+
"Find all places that reference a symbol or file (imports, calls, renders, dispatches). Use instead of Grep for symbol usages \u2014 understands semantic relationships, not just text matches. For bidirectional call graph use get_call_graph instead. Read-only. Returns JSON: { references: [{ file, line, kind, context }], total }.",
|
|
100716
100811
|
{
|
|
100717
100812
|
symbol_id: z5.string().max(512).optional().describe("Symbol ID to find references for"),
|
|
100718
100813
|
fqn: z5.string().max(512).optional().describe("Fully qualified name to find references for"),
|
|
@@ -100746,7 +100841,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100746
100841
|
);
|
|
100747
100842
|
server.tool(
|
|
100748
100843
|
"get_call_graph",
|
|
100749
|
-
"Build a bidirectional call graph centered on a symbol (who calls it + what it calls). Use to understand control flow through a function.",
|
|
100844
|
+
"Build a bidirectional call graph centered on a symbol (who calls it + what it calls). Use to understand control flow through a function. For flat list of all references use find_usages instead. Read-only. Returns JSON: { root: { symbol_id, name, calls: [...], called_by: [...] } }.",
|
|
100750
100845
|
{
|
|
100751
100846
|
symbol_id: z5.string().max(512).optional().describe("Symbol ID to center the graph on"),
|
|
100752
100847
|
fqn: z5.string().max(512).optional().describe("Fully qualified name to center the graph on"),
|
|
@@ -100779,7 +100874,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100779
100874
|
);
|
|
100780
100875
|
server.tool(
|
|
100781
100876
|
"get_tests_for",
|
|
100782
|
-
"Find test files and test functions that cover a given symbol or file. Use instead of Glob/Grep \u2014 understands test-to-source mapping, not just filename conventions.",
|
|
100877
|
+
"Find test files and test functions that cover a given symbol or file. Use instead of Glob/Grep \u2014 understands test-to-source mapping, not just filename conventions. For project-wide test coverage gaps use get_untested_symbols instead. Read-only. Returns JSON: { tests: [{ file, testName, symbol_id }], total }.",
|
|
100783
100878
|
{
|
|
100784
100879
|
symbol_id: z5.string().max(512).optional().describe("Symbol ID to find tests for"),
|
|
100785
100880
|
fqn: z5.string().max(512).optional().describe("Fully qualified name to find tests for"),
|
|
@@ -100814,7 +100909,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100814
100909
|
if (has("laravel")) {
|
|
100815
100910
|
server.tool(
|
|
100816
100911
|
"get_livewire_context",
|
|
100817
|
-
"Get full context for a Livewire component: properties, actions, events, view, child components",
|
|
100912
|
+
"Get full context for a Livewire component: properties, actions, events, view, child components. Use to understand a specific Livewire component before modifying it. Read-only. Returns JSON: { component, properties, actions, events, view, children }.",
|
|
100818
100913
|
{
|
|
100819
100914
|
component_name: z5.string().max(256).describe("Livewire component class name or FQN (e.g. UserProfile or App\\Livewire\\UserProfile)")
|
|
100820
100915
|
},
|
|
@@ -100828,7 +100923,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100828
100923
|
);
|
|
100829
100924
|
server.tool(
|
|
100830
100925
|
"get_nova_resource",
|
|
100831
|
-
"Get full context for a Laravel Nova resource: model, fields, actions, filters, lenses, metrics",
|
|
100926
|
+
"Get full context for a Laravel Nova resource: model, fields, actions, filters, lenses, metrics. Use to understand a Nova admin resource before modifying it. Read-only. Returns JSON: { resource, model, fields, actions, filters, lenses, metrics }.",
|
|
100832
100927
|
{
|
|
100833
100928
|
resource_name: z5.string().max(256).describe("Nova resource class name or FQN (e.g. User or App\\Nova\\User)")
|
|
100834
100929
|
},
|
|
@@ -100844,7 +100939,7 @@ function registerFrameworkTools(server, ctx) {
|
|
|
100844
100939
|
if (has("zustand-redux")) {
|
|
100845
100940
|
server.tool(
|
|
100846
100941
|
"get_state_stores",
|
|
100847
|
-
"List all Zustand stores and Redux Toolkit slices with their state fields, actions/reducers, and dispatch sites",
|
|
100942
|
+
"List all Zustand stores and Redux Toolkit slices with their state fields, actions/reducers, and dispatch sites. Use to understand state management architecture. Read-only. Returns JSON: { stores: [{ type, name, handler, metadata }], dispatches, totalStores, totalDispatches }.",
|
|
100848
100943
|
{},
|
|
100849
100944
|
async () => {
|
|
100850
100945
|
const routes = store.getAllRoutes();
|
|
@@ -101697,7 +101792,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101697
101792
|
);
|
|
101698
101793
|
server.tool(
|
|
101699
101794
|
"get_implementations",
|
|
101700
|
-
"Find all classes that implement or extend a given interface or base class",
|
|
101795
|
+
"Find all classes that implement or extend a given interface or base class. Use when you know the interface name. For full hierarchy tree (ancestors + descendants) use get_type_hierarchy instead. Read-only. Returns JSON: { implementations: [{ symbol_id, name, kind, file, line }], total }.",
|
|
101701
101796
|
{
|
|
101702
101797
|
name: z6.string().max(256).describe("Interface or base class name (e.g. UserRepositoryInterface)")
|
|
101703
101798
|
},
|
|
@@ -101723,7 +101818,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101723
101818
|
);
|
|
101724
101819
|
server.tool(
|
|
101725
101820
|
"get_api_surface",
|
|
101726
|
-
"List all exported symbols (public API) of a file or matching files",
|
|
101821
|
+
"List all exported symbols (public API) of a file or matching files. Use to understand what a module exposes. For finding unused exports use get_dead_exports instead. Read-only. Returns JSON: { files: [{ path, exports: [{ name, kind, signature }] }] }.",
|
|
101727
101822
|
{
|
|
101728
101823
|
file_pattern: z6.string().max(512).optional().describe("Glob-style pattern to filter files (e.g. src/services/*.ts)")
|
|
101729
101824
|
},
|
|
@@ -101734,7 +101829,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101734
101829
|
);
|
|
101735
101830
|
server.tool(
|
|
101736
101831
|
"get_plugin_registry",
|
|
101737
|
-
"List all registered indexer plugins and the edge types they emit",
|
|
101832
|
+
"List all registered indexer plugins and the edge types they emit. Use for debugging indexer behavior or understanding which frameworks are supported. Read-only. Returns JSON: { languagePlugins, frameworkPlugins, edgeTypes }.",
|
|
101738
101833
|
{},
|
|
101739
101834
|
async () => {
|
|
101740
101835
|
const result = getPluginRegistry(store, registry, frameworkNames);
|
|
@@ -101743,7 +101838,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101743
101838
|
);
|
|
101744
101839
|
server.tool(
|
|
101745
101840
|
"get_type_hierarchy",
|
|
101746
|
-
"Walk TypeScript class/interface hierarchy: ancestors (what it extends/implements) and descendants (what extends/implements it)",
|
|
101841
|
+
"Walk TypeScript class/interface hierarchy: ancestors (what it extends/implements) and descendants (what extends/implements it). Use to understand inheritance trees. For a flat list of implementations only use get_implementations instead. Read-only. Returns JSON: { name, ancestors: [...], descendants: [...] }.",
|
|
101747
101842
|
{
|
|
101748
101843
|
name: z6.string().max(256).describe('Class or interface name (e.g. "LanguagePlugin", "Store")'),
|
|
101749
101844
|
max_depth: z6.number().int().min(1).max(20).optional().describe("Max traversal depth (default 10)")
|
|
@@ -101770,7 +101865,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101770
101865
|
);
|
|
101771
101866
|
server.tool(
|
|
101772
101867
|
"get_dead_exports",
|
|
101773
|
-
"Find exported symbols never imported by any other file \u2014 dead code candidates",
|
|
101868
|
+
"Find exported symbols never imported by any other file \u2014 dead code candidates. Use for quick export-level dead code scan. For deeper multi-signal dead code detection (including call graph) use get_dead_code instead. Read-only. Returns JSON: { deadExports: [{ symbol_id, name, kind, file }], total }.",
|
|
101774
101869
|
{
|
|
101775
101870
|
file_pattern: z6.string().max(512).optional().describe('Filter files by glob pattern (e.g. "src/tools/*.ts")')
|
|
101776
101871
|
},
|
|
@@ -101781,7 +101876,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101781
101876
|
);
|
|
101782
101877
|
server.tool(
|
|
101783
101878
|
"get_import_graph",
|
|
101784
|
-
"Show file-level dependency graph: what a file imports and what imports it (requires reindex for ESM edge resolution)",
|
|
101879
|
+
"Show file-level dependency graph: what a file imports and what imports it (requires reindex for ESM edge resolution). Use to understand module dependencies for a specific file. For project-wide coupling analysis use get_coupling; for visual diagram use get_dependency_diagram. Read-only. Returns JSON: { file, imports: [{ path }], importedBy: [{ path }] }.",
|
|
101785
101880
|
{
|
|
101786
101881
|
file_path: z6.string().max(512).describe('Relative file path to analyze (e.g. "src/server.ts")')
|
|
101787
101882
|
},
|
|
@@ -101794,7 +101889,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101794
101889
|
);
|
|
101795
101890
|
server.tool(
|
|
101796
101891
|
"get_untested_exports",
|
|
101797
|
-
"Find exported public symbols with no matching test file \u2014 test coverage gaps",
|
|
101892
|
+
"Find exported public symbols with no matching test file \u2014 test coverage gaps. For deeper analysis including non-exported symbols use get_untested_symbols instead. Read-only. Returns JSON: { untested: [{ symbol_id, name, kind, file }], total }.",
|
|
101798
101893
|
{
|
|
101799
101894
|
file_pattern: z6.string().max(512).optional().describe('Filter by file glob pattern (e.g. "src/tools/%")')
|
|
101800
101895
|
},
|
|
@@ -101805,7 +101900,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101805
101900
|
);
|
|
101806
101901
|
server.tool(
|
|
101807
101902
|
"get_untested_symbols",
|
|
101808
|
-
'Find ALL symbols (not just exports) lacking test coverage. Classifies as "unreached" (no test file imports the source) or "imported_not_called" (test imports file but never references this symbol). Use for thorough coverage gap analysis.',
|
|
101903
|
+
'Find ALL symbols (not just exports) lacking test coverage. Classifies as "unreached" (no test file imports the source) or "imported_not_called" (test imports file but never references this symbol). Use for thorough coverage gap analysis. For exports-only quick scan use get_untested_exports instead. Read-only. Returns JSON: { untested: [{ symbol_id, name, kind, file, classification }], total }.',
|
|
101809
101904
|
{
|
|
101810
101905
|
file_pattern: z6.string().max(512).optional().describe('Filter by file glob pattern (e.g. "src/tools/%")'),
|
|
101811
101906
|
max_results: z6.number().int().min(1).max(500).optional().describe("Cap on returned items (default: all)")
|
|
@@ -101815,12 +101910,12 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101815
101910
|
return { content: [{ type: "text", text: jh("get_untested_symbols", result) }] };
|
|
101816
101911
|
}
|
|
101817
101912
|
);
|
|
101818
|
-
server.tool("self_audit", "Dead code & coverage audit: dead exports, untested public symbols, heritage debt. Use
|
|
101913
|
+
server.tool("self_audit", "Dead code & coverage audit: dead exports, untested public symbols, heritage debt. Use as a one-shot health check combining dead exports + untested symbols + heritage debt. For individual checks use get_dead_exports, get_untested_symbols, or get_dead_code separately. Read-only. Returns JSON: { deadExports, untestedSymbols, heritageDebt, summary }.", {}, async () => {
|
|
101819
101914
|
return { content: [{ type: "text", text: j3(selfAudit(store)) }] };
|
|
101820
101915
|
});
|
|
101821
101916
|
server.tool(
|
|
101822
101917
|
"get_coupling",
|
|
101823
|
-
"Coupling analysis: afferent (Ca), efferent (Ce), instability index per file. Shows which modules are stable vs unstable",
|
|
101918
|
+
"Coupling analysis: afferent (Ca), efferent (Ce), instability index per file. Shows which modules are stable vs unstable. Use to identify fragile or overly-depended-on modules. For coupling changes over time use get_coupling_trend instead. Read-only. Returns JSON: [{ file, ca, ce, instability, assessment }].",
|
|
101824
101919
|
{
|
|
101825
101920
|
limit: z6.number().int().min(1).max(500).optional().describe("Max results (default: all)"),
|
|
101826
101921
|
assessment: z6.enum(["stable", "neutral", "unstable", "isolated"]).optional().describe("Filter by stability assessment")
|
|
@@ -101834,7 +101929,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101834
101929
|
);
|
|
101835
101930
|
server.tool(
|
|
101836
101931
|
"get_circular_imports",
|
|
101837
|
-
"Find circular dependency chains in the import graph (Kosaraju SCC algorithm)",
|
|
101932
|
+
"Find circular dependency chains in the import graph (Kosaraju SCC algorithm). Use to detect and break dependency cycles. Read-only. Returns JSON: { total_cycles, cycles: [[file1, file2, ...]] }.",
|
|
101838
101933
|
{},
|
|
101839
101934
|
async () => {
|
|
101840
101935
|
const cycles = getDependencyCycles(store);
|
|
@@ -101853,7 +101948,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101853
101948
|
);
|
|
101854
101949
|
server.tool(
|
|
101855
101950
|
"get_pagerank",
|
|
101856
|
-
"File importance ranking via PageRank on the import graph. Shows most central/important files",
|
|
101951
|
+
"File importance ranking via PageRank on the import graph. Shows most central/important files. Use to identify architecturally critical files. For combined health metrics use get_project_health instead. Read-only. Returns JSON: [{ file, score }].",
|
|
101857
101952
|
{
|
|
101858
101953
|
limit: z6.number().int().min(1).max(200).optional().describe("Max results (default: 50)")
|
|
101859
101954
|
},
|
|
@@ -101864,7 +101959,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101864
101959
|
);
|
|
101865
101960
|
server.tool(
|
|
101866
101961
|
"get_refactor_candidates",
|
|
101867
|
-
"Find functions with high complexity called from many files \u2014 candidates for extraction to shared modules",
|
|
101962
|
+
"Find functions with high complexity called from many files \u2014 candidates for extraction to shared modules. Use during architecture review to identify hotspots worth refactoring. Read-only. Returns JSON: [{ symbol_id, name, file, cyclomatic, callerCount }].",
|
|
101868
101963
|
{
|
|
101869
101964
|
min_cyclomatic: z6.number().int().min(1).optional().describe("Min cyclomatic complexity (default: 5)"),
|
|
101870
101965
|
min_callers: z6.number().int().min(1).optional().describe("Min distinct caller files (default: 2)"),
|
|
@@ -101881,7 +101976,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101881
101976
|
);
|
|
101882
101977
|
server.tool(
|
|
101883
101978
|
"get_project_health",
|
|
101884
|
-
"Structural health: coupling instability, dependency cycles, PageRank rankings, refactor candidates. Use for architecture review.",
|
|
101979
|
+
"Structural health: coupling instability, dependency cycles, PageRank rankings, refactor candidates. Use for architecture review as a single aggregated report. For individual metrics use get_coupling, get_circular_imports, or get_pagerank separately. Read-only. Returns JSON: { coupling, cycles, pagerank, refactorCandidates, hotspots }.",
|
|
101885
101980
|
{},
|
|
101886
101981
|
async () => {
|
|
101887
101982
|
const result = getRepoHealth(store);
|
|
@@ -101891,7 +101986,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101891
101986
|
);
|
|
101892
101987
|
server.tool(
|
|
101893
101988
|
"check_architecture",
|
|
101894
|
-
"Check architectural layer rules: detect forbidden imports between layers (e.g. domain importing infrastructure). Supports auto-detected presets (clean-architecture, hexagonal) or custom layers.",
|
|
101989
|
+
"Check architectural layer rules: detect forbidden imports between layers (e.g. domain importing infrastructure). Supports auto-detected presets (clean-architecture, hexagonal) or custom layers. Use to enforce architectural boundaries. Read-only. Returns JSON: { violations: [{ from, to, rule, file, line }], total, preset }.",
|
|
101895
101990
|
{
|
|
101896
101991
|
preset: z6.enum(["clean-architecture", "hexagonal"]).optional().describe("Use a built-in layer preset (auto-detected if omitted)"),
|
|
101897
101992
|
layers: z6.array(z6.object({
|
|
@@ -101920,7 +102015,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101920
102015
|
);
|
|
101921
102016
|
server.tool(
|
|
101922
102017
|
"get_code_owners",
|
|
101923
|
-
"Git-based code ownership: who contributed most to specific files (git shortlog). Requires git.",
|
|
102018
|
+
"Git-based code ownership: who contributed most to specific files (git shortlog). Requires git. Use to identify who to ask about specific files. For symbol-level ownership use get_symbol_owners instead. Read-only. Returns JSON: [{ file, owners: [{ author, commits, percentage }] }].",
|
|
101924
102019
|
{
|
|
101925
102020
|
file_paths: z6.array(z6.string().max(512)).min(1).max(20).describe("File paths to check ownership for")
|
|
101926
102021
|
},
|
|
@@ -101938,7 +102033,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101938
102033
|
);
|
|
101939
102034
|
server.tool(
|
|
101940
102035
|
"get_symbol_owners",
|
|
101941
|
-
"Git blame-based symbol ownership: who wrote which lines of a specific symbol. Requires git.",
|
|
102036
|
+
"Git blame-based symbol ownership: who wrote which lines of a specific symbol. Requires git. Use for fine-grained ownership of a specific function/class. For file-level ownership use get_code_owners instead. Read-only. Returns JSON: { symbol_id, owners: [{ author, lines, percentage }] }.",
|
|
101942
102037
|
{
|
|
101943
102038
|
symbol_id: z6.string().max(512).describe("Symbol ID to check ownership for")
|
|
101944
102039
|
},
|
|
@@ -101952,7 +102047,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101952
102047
|
);
|
|
101953
102048
|
server.tool(
|
|
101954
102049
|
"get_complexity_trend",
|
|
101955
|
-
"File complexity over git history: cyclomatic complexity at past commits. Shows if a file is getting more or less complex.",
|
|
102050
|
+
"File complexity over git history: cyclomatic complexity at past commits. Shows if a file is getting more or less complex. Requires git. Use to track whether a file is improving or degrading. For current snapshot use get_complexity_report; for symbol-level trends use get_symbol_complexity_trend. Read-only. Returns JSON: { file, snapshots: [{ commit, date, complexity }] }.",
|
|
101956
102051
|
{
|
|
101957
102052
|
file_path: z6.string().max(512).describe("File path to analyze"),
|
|
101958
102053
|
snapshots: z6.number().int().min(2).max(20).optional().describe("Number of historical snapshots (default: 5)")
|
|
@@ -101969,7 +102064,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101969
102064
|
);
|
|
101970
102065
|
server.tool(
|
|
101971
102066
|
"get_coupling_trend",
|
|
101972
|
-
"File coupling over git history: Ca/Ce/instability at past commits. Shows if a module is stabilizing or destabilizing.",
|
|
102067
|
+
"File coupling over git history: Ca/Ce/instability at past commits. Shows if a module is stabilizing or destabilizing. Requires git. Use to track module stability over time. For current coupling snapshot use get_coupling instead. Read-only. Returns JSON: { file, snapshots: [{ commit, date, ca, ce, instability }] }.",
|
|
101973
102068
|
{
|
|
101974
102069
|
file_path: z6.string().max(512).describe("File path to analyze"),
|
|
101975
102070
|
since_days: z6.number().int().min(1).optional().describe("Analyze last N days (default: 90)"),
|
|
@@ -101990,7 +102085,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
101990
102085
|
);
|
|
101991
102086
|
server.tool(
|
|
101992
102087
|
"get_symbol_complexity_trend",
|
|
101993
|
-
"Single symbol complexity over git history: cyclomatic, nesting, params, lines at past commits.",
|
|
102088
|
+
"Single symbol complexity over git history: cyclomatic, nesting, params, lines at past commits. Requires git. Use to track a specific function's complexity evolution. For file-level trends use get_complexity_trend instead. Read-only. Returns JSON: { symbol_id, snapshots: [{ commit, date, cyclomatic, nesting, params, lines }] }.",
|
|
101994
102089
|
{
|
|
101995
102090
|
symbol_id: z6.string().min(1).max(512).describe("Symbol ID to analyze (from search or outline)"),
|
|
101996
102091
|
since_days: z6.number().int().min(1).optional().describe("Analyze last N days (default: all history)"),
|
|
@@ -102009,7 +102104,7 @@ function registerAnalysisTools(server, ctx) {
|
|
|
102009
102104
|
);
|
|
102010
102105
|
server.tool(
|
|
102011
102106
|
"check_duplication",
|
|
102012
|
-
"Check if a function/class name already exists elsewhere in the codebase before creating it. Prevents duplicating existing logic. Call with just a name when planning new code, or symbol_id to check an existing symbol. Returns scored matches \u2014 score \u22650.7 means high likelihood of duplication, review the existing symbol before proceeding.",
|
|
102107
|
+
"Check if a function/class name already exists elsewhere in the codebase before creating it. Prevents duplicating existing logic. Call with just a name when planning new code, or symbol_id to check an existing symbol. Returns scored matches \u2014 score \u22650.7 means high likelihood of duplication, review the existing symbol before proceeding. Read-only. Returns JSON: { duplicates: [{ symbol_id, name, file, score }], hasDuplication }.",
|
|
102013
102108
|
{
|
|
102014
102109
|
symbol_id: z6.string().max(512).optional().describe("Existing symbol ID to check for duplicates"),
|
|
102015
102110
|
name: z6.string().max(256).optional().describe("Function/class name to check (when symbol_id not available)"),
|
|
@@ -105178,7 +105273,7 @@ function registerGitTools(server, ctx) {
|
|
|
105178
105273
|
const detectedFrameworks = registry.getAllFrameworkPlugins().map((p5) => p5.manifest.name);
|
|
105179
105274
|
server.tool(
|
|
105180
105275
|
"get_git_churn",
|
|
105181
|
-
"Per-file git churn: commits, unique authors, frequency, volatility assessment. Requires git.",
|
|
105276
|
+
"Per-file git churn: commits, unique authors, frequency, volatility assessment. Requires git. Use to identify frequently-changed files. For combined churn+complexity hotspots use get_risk_hotspots instead. Read-only. Returns JSON: { results: [{ file, commits, authors, frequency, volatility }], total }.",
|
|
105182
105277
|
{
|
|
105183
105278
|
since_days: z7.number().int().min(1).optional().describe("Analyze commits from last N days (default: all history)"),
|
|
105184
105279
|
limit: z7.number().int().min(1).max(500).optional().describe("Max results (default: 50)"),
|
|
@@ -105198,7 +105293,7 @@ function registerGitTools(server, ctx) {
|
|
|
105198
105293
|
);
|
|
105199
105294
|
server.tool(
|
|
105200
105295
|
"get_risk_hotspots",
|
|
105201
|
-
"Code hotspots: files with both high complexity AND high git churn (Adam Tornhill methodology). Score = complexity \xD7 log(1 + commits). Each entry includes a confidence_level (low/medium/multi_signal) counting how many of the two independent signals fired strongly. Result envelope includes _methodology disclosure and _warnings when git is unavailable.",
|
|
105296
|
+
"Code hotspots: files with both high complexity AND high git churn (Adam Tornhill methodology). Score = complexity \xD7 log(1 + commits). Each entry includes a confidence_level (low/medium/multi_signal) counting how many of the two independent signals fired strongly. Result envelope includes _methodology disclosure and _warnings when git is unavailable. Requires git. Use to prioritize refactoring. For per-file bug prediction use predict_bugs instead. Read-only. Returns JSON: { hotspots: [{ file, score, complexity, commits, confidence_level }], total }.",
|
|
105202
105297
|
{
|
|
105203
105298
|
since_days: z7.number().int().min(1).optional().describe("Git churn window in days (default: 90)"),
|
|
105204
105299
|
limit: z7.number().int().min(1).max(100).optional().describe("Max results (default: 20)"),
|
|
@@ -105229,7 +105324,7 @@ function registerGitTools(server, ctx) {
|
|
|
105229
105324
|
);
|
|
105230
105325
|
server.tool(
|
|
105231
105326
|
"get_dead_code",
|
|
105232
|
-
'Dead code detection. Two modes: (1) "multi-signal" (default) combines import graph, call graph, and barrel export analysis with confidence scores. (2) "reachability" runs forward BFS from auto-detected entry points (tests, package.json main/bin, src/{cli,main,index}, routes, framework-tagged controllers) \u2014 stricter but more accurate when entry points are enumerable. Pass entry_points to add custom roots. Both modes emit _methodology and _warnings.',
|
|
105327
|
+
'Dead code detection. Two modes: (1) "multi-signal" (default) combines import graph, call graph, and barrel export analysis with confidence scores. (2) "reachability" runs forward BFS from auto-detected entry points (tests, package.json main/bin, src/{cli,main,index}, routes, framework-tagged controllers) \u2014 stricter but more accurate when entry points are enumerable. Pass entry_points to add custom roots. Both modes emit _methodology and _warnings. Use for comprehensive dead code analysis. For quick export-only scan use get_dead_exports; to safely remove detected dead code use remove_dead_code. Read-only. Returns JSON: { dead_symbols: [{ symbol_id, name, file, confidence, signals }], total }.',
|
|
105233
105328
|
{
|
|
105234
105329
|
file_pattern: z7.string().max(512).optional().describe('Filter by file glob pattern (e.g. "src/tools/%")'),
|
|
105235
105330
|
threshold: z7.number().min(0).max(1).optional().describe("[multi-signal mode] Min confidence to report (default: 0.5 = at least 2 of 3 signals)"),
|
|
@@ -105259,7 +105354,7 @@ function registerGitTools(server, ctx) {
|
|
|
105259
105354
|
);
|
|
105260
105355
|
server.tool(
|
|
105261
105356
|
"scan_security",
|
|
105262
|
-
"Scan project files for OWASP Top-10 security vulnerabilities using pattern matching. Detects SQL injection (CWE-89), XSS (CWE-79), command injection (CWE-78), path traversal (CWE-22), hardcoded secrets (CWE-798), insecure crypto (CWE-327), open redirects (CWE-601), and SSRF (CWE-918). Skips test files.",
|
|
105357
|
+
"Scan project files for OWASP Top-10 security vulnerabilities using pattern matching. Detects SQL injection (CWE-89), XSS (CWE-79), command injection (CWE-78), path traversal (CWE-22), hardcoded secrets (CWE-798), insecure crypto (CWE-327), open redirects (CWE-601), and SSRF (CWE-918). Skips test files. Use for pattern-based security audit. For data-flow-aware analysis use taint_analysis instead. Read-only. Returns JSON: { findings: [{ rule, severity, cwe, file, line, message }], total, summary }.",
|
|
105263
105358
|
{
|
|
105264
105359
|
scope: z7.string().max(512).optional().describe("Directory to scan (default: whole project)"),
|
|
105265
105360
|
rules: z7.array(z7.enum([
|
|
@@ -105293,7 +105388,7 @@ function registerGitTools(server, ctx) {
|
|
|
105293
105388
|
);
|
|
105294
105389
|
server.tool(
|
|
105295
105390
|
"detect_antipatterns",
|
|
105296
|
-
"Detect performance antipatterns: N+1 query risks, missing eager loading, unbounded queries, event listener leaks, circular model dependencies, missing indexes, memory leaks (unbounded caches, closure leaks). Static analysis across all indexed ORMs (Eloquent, Sequelize, Mongoose, Django, Prisma, TypeORM, Drizzle).",
|
|
105391
|
+
"Detect performance antipatterns: N+1 query risks, missing eager loading, unbounded queries, event listener leaks, circular model dependencies, missing indexes, memory leaks (unbounded caches, closure leaks). Static analysis across all indexed ORMs (Eloquent, Sequelize, Mongoose, Django, Prisma, TypeORM, Drizzle). Use to find performance issues. For code quality issues (TODOs, empty functions) use scan_code_smells instead. Read-only. Returns JSON: { findings: [{ category, severity, file, line, message, suggestion }], total }.",
|
|
105297
105392
|
{
|
|
105298
105393
|
category: z7.array(z7.enum([
|
|
105299
105394
|
"n_plus_one_risk",
|
|
@@ -105323,7 +105418,7 @@ function registerGitTools(server, ctx) {
|
|
|
105323
105418
|
);
|
|
105324
105419
|
server.tool(
|
|
105325
105420
|
"scan_code_smells",
|
|
105326
|
-
"Find deferred work and shortcuts: TODO/FIXME/HACK/XXX comments, empty functions & stubs, hardcoded values (IPs, URLs, credentials, magic numbers, feature flags). Surfaces technical debt that grep alone misses by combining comment scanning, symbol body analysis, and context-aware false-positive filtering.",
|
|
105421
|
+
"Find deferred work and shortcuts: TODO/FIXME/HACK/XXX comments, empty functions & stubs, hardcoded values (IPs, URLs, credentials, magic numbers, feature flags). Surfaces technical debt that grep alone misses by combining comment scanning, symbol body analysis, and context-aware false-positive filtering. Use for code quality audit. For performance-specific antipatterns use detect_antipatterns; for security issues use scan_security. Read-only. Returns JSON: { findings: [{ category, priority, file, line, message }], total, summary }.",
|
|
105327
105422
|
{
|
|
105328
105423
|
category: z7.array(z7.enum([
|
|
105329
105424
|
"todo_comment",
|
|
@@ -105357,7 +105452,7 @@ function registerGitTools(server, ctx) {
|
|
|
105357
105452
|
);
|
|
105358
105453
|
server.tool(
|
|
105359
105454
|
"taint_analysis",
|
|
105360
|
-
"Track flow of untrusted data from sources (HTTP params, env vars, file reads) to dangerous sinks (SQL queries, exec, innerHTML, redirects). Framework-aware: knows Express req.params, Laravel $request->input, Django request.GET, FastAPI Query(), etc. Reports unsanitized flows with CWE IDs and fix suggestions. More accurate than pattern-based scanning \u2014 traces actual data flow paths.",
|
|
105455
|
+
"Track flow of untrusted data from sources (HTTP params, env vars, file reads) to dangerous sinks (SQL queries, exec, innerHTML, redirects). Framework-aware: knows Express req.params, Laravel $request->input, Django request.GET, FastAPI Query(), etc. Reports unsanitized flows with CWE IDs and fix suggestions. More accurate than pattern-based scanning \u2014 traces actual data flow paths. Use for data-flow security analysis. For pattern-based OWASP scanning use scan_security instead. Read-only. Returns JSON: { flows: [{ source, sink, path, sanitized, cwe, suggestion }], total }.",
|
|
105361
105456
|
{
|
|
105362
105457
|
scope: z7.string().max(512).optional().describe("Directory to scan (default: whole project)"),
|
|
105363
105458
|
sources: z7.array(z7.enum([
|
|
@@ -105403,7 +105498,7 @@ function registerGitTools(server, ctx) {
|
|
|
105403
105498
|
);
|
|
105404
105499
|
server.tool(
|
|
105405
105500
|
"generate_sbom",
|
|
105406
|
-
"Generate a Software Bill of Materials (SBOM) from package manifests and lockfiles. Supports npm, Composer, pip, Go, Cargo, Bundler, Maven. Outputs CycloneDX, SPDX, or plain JSON. Includes license compliance warnings for copyleft licenses.",
|
|
105501
|
+
"Generate a Software Bill of Materials (SBOM) from package manifests and lockfiles. Supports npm, Composer, pip, Go, Cargo, Bundler, Maven. Outputs CycloneDX, SPDX, or plain JSON. Includes license compliance warnings for copyleft licenses. Use for supply chain audits or compliance reports. Returns JSON/CycloneDX/SPDX: { components: [{ name, version, license, type }], warnings }.",
|
|
105407
105502
|
{
|
|
105408
105503
|
format: z7.enum(["cyclonedx", "spdx", "json"]).optional().describe("Output format (default: json)"),
|
|
105409
105504
|
include_dev: z7.boolean().optional().describe("Include devDependencies (default: false)"),
|
|
@@ -105423,7 +105518,7 @@ function registerGitTools(server, ctx) {
|
|
|
105423
105518
|
);
|
|
105424
105519
|
server.tool(
|
|
105425
105520
|
"get_artifacts",
|
|
105426
|
-
"Surface non-code knowledge from the index: DB schemas (migrations, ORM models), API specs (routes, OpenAPI endpoints), infrastructure (docker-compose services, K8s resources), CI pipelines (jobs, stages), and config (env vars). All data from the existing index \u2014 no extra I/O.",
|
|
105521
|
+
"Surface non-code knowledge from the index: DB schemas (migrations, ORM models), API specs (routes, OpenAPI endpoints), infrastructure (docker-compose services, K8s resources), CI pipelines (jobs, stages), and config (env vars). All data from the existing index \u2014 no extra I/O. Use to discover infrastructure and config artifacts without reading files. Read-only. Returns JSON: { artifacts: [{ category, kind, name, file }], total }.",
|
|
105427
105522
|
{
|
|
105428
105523
|
category: z7.enum(["database", "api", "infra", "ci", "config", "all"]).optional().describe("Filter by artifact category (default: all)"),
|
|
105429
105524
|
query: z7.string().max(256).optional().describe("Text filter on name/kind/file"),
|
|
@@ -105440,7 +105535,7 @@ function registerGitTools(server, ctx) {
|
|
|
105440
105535
|
);
|
|
105441
105536
|
server.tool(
|
|
105442
105537
|
"plan_batch_change",
|
|
105443
|
-
"Analyze the impact of updating a package/dependency. Shows all affected files, import references, and generates a PR template with checklist. Use before upgrading a dependency to understand blast radius.",
|
|
105538
|
+
"Analyze the impact of updating a package/dependency. Shows all affected files, import references, and generates a PR template with checklist. Use before upgrading a dependency to understand blast radius. Read-only (analysis only, does not modify files). Returns JSON: { package, affectedFiles, importReferences, prTemplate, checklist }.",
|
|
105444
105539
|
{
|
|
105445
105540
|
package: z7.string().min(1).max(256).describe('Package name (e.g. "express", "laravel/framework", "react")'),
|
|
105446
105541
|
from_version: z7.string().max(64).optional().describe("Current version"),
|
|
@@ -105462,7 +105557,7 @@ function registerGitTools(server, ctx) {
|
|
|
105462
105557
|
);
|
|
105463
105558
|
server.tool(
|
|
105464
105559
|
"get_complexity_report",
|
|
105465
|
-
"Get complexity metrics (cyclomatic, max nesting, param count) for symbols in a file or across the project.
|
|
105560
|
+
"Get complexity metrics (cyclomatic, max nesting, param count) for symbols in a file or across the project. Use to identify complex code before refactoring. For historical trends use get_complexity_trend instead. Read-only. Returns JSON: { symbols: [{ symbol_id, name, kind, file, line, cyclomatic, max_nesting, param_count }], total }.",
|
|
105466
105561
|
{
|
|
105467
105562
|
file_path: z7.string().max(512).optional().describe("File path to report on (omit for project-wide top complex symbols)"),
|
|
105468
105563
|
min_cyclomatic: z7.number().int().min(1).optional().describe("Min cyclomatic complexity to include (default: 1 for file, 5 for project)"),
|
|
@@ -105497,7 +105592,7 @@ function registerGitTools(server, ctx) {
|
|
|
105497
105592
|
);
|
|
105498
105593
|
server.tool(
|
|
105499
105594
|
"check_rename",
|
|
105500
|
-
"Pre-rename collision detection: checks the symbol's own file and all importing files for existing symbols with the target name",
|
|
105595
|
+
"Pre-rename collision detection: checks the symbol's own file and all importing files for existing symbols with the target name. Use before apply_rename to verify safety. Read-only (does not modify files). Returns JSON: { safe, conflicts: [{ symbol_id, name, file }] }.",
|
|
105501
105596
|
{
|
|
105502
105597
|
symbol_id: z7.string().max(512).describe("Symbol ID to rename"),
|
|
105503
105598
|
target_name: z7.string().min(1).max(256).describe("Proposed new name")
|
|
@@ -107363,7 +107458,7 @@ function registerRefactoringTools(server, ctx) {
|
|
|
107363
107458
|
const { store, projectRoot, guardPath, j: j3 } = ctx;
|
|
107364
107459
|
server.tool(
|
|
107365
107460
|
"apply_rename",
|
|
107366
|
-
|
|
107461
|
+
'Rename a symbol across all usages (definition + all importing files). Runs collision detection first and aborts on conflicts. Returns the list of edits applied. Modifies source files. Use check_rename first to verify safety; use plan_refactoring with type="rename" to preview edits. Returns JSON: { success, edits: [{ file, old_text, new_text }], filesModified }.',
|
|
107367
107462
|
{
|
|
107368
107463
|
symbol_id: z8.string().max(512).describe("Symbol ID to rename (from search or outline)"),
|
|
107369
107464
|
new_name: z8.string().min(1).max(256).describe("New name for the symbol"),
|
|
@@ -107379,7 +107474,7 @@ function registerRefactoringTools(server, ctx) {
|
|
|
107379
107474
|
);
|
|
107380
107475
|
server.tool(
|
|
107381
107476
|
"remove_dead_code",
|
|
107382
|
-
"Safely remove a dead symbol from its file. Verifies the symbol is actually dead (multi-signal detection or zero incoming edges) before removal. Warns about orphaned imports in other files.",
|
|
107477
|
+
"Safely remove a dead symbol from its file. Verifies the symbol is actually dead (multi-signal detection or zero incoming edges) before removal. Warns about orphaned imports in other files. Destructive \u2014 deletes code from source files. Use get_dead_code first to identify candidates. Returns JSON: { success, removed: { symbol_id, file }, orphanedImports }.",
|
|
107383
107478
|
{
|
|
107384
107479
|
symbol_id: z8.string().max(512).describe("Symbol ID to remove (from get_dead_code results)"),
|
|
107385
107480
|
dry_run: z8.boolean().default(false).describe("Preview changes without applying (default: false)")
|
|
@@ -107394,7 +107489,7 @@ function registerRefactoringTools(server, ctx) {
|
|
|
107394
107489
|
);
|
|
107395
107490
|
server.tool(
|
|
107396
107491
|
"extract_function",
|
|
107397
|
-
|
|
107492
|
+
'Extract a range of lines into a new named function. Detects parameters (variables from outer scope) and return values (variables used after the range). Supports TypeScript/JavaScript, Python, and Go. Modifies source files. Use plan_refactoring with type="extract" to preview first. Returns JSON: { success, edits: [{ file, old_text, new_text }], extractedFunction }.',
|
|
107398
107493
|
{
|
|
107399
107494
|
file_path: z8.string().max(512).describe("File path (relative to project root)"),
|
|
107400
107495
|
start_line: z8.number().int().min(1).describe("First line to extract (1-indexed, inclusive)"),
|
|
@@ -107414,7 +107509,7 @@ function registerRefactoringTools(server, ctx) {
|
|
|
107414
107509
|
);
|
|
107415
107510
|
server.tool(
|
|
107416
107511
|
"apply_codemod",
|
|
107417
|
-
"Bulk regex find-and-replace across files. Dry-run by default \u2014 first call shows preview, second call with dry_run=false applies. Use for mechanical changes like adding async/await, renaming patterns, updating imports across many files.",
|
|
107512
|
+
"Bulk regex find-and-replace across files. Dry-run by default \u2014 first call shows preview, second call with dry_run=false applies. Use for mechanical changes like adding async/await, renaming patterns, updating imports across many files. Potentially destructive \u2014 can modify or delete code. Always preview with dry_run=true first. Returns JSON: { success, matchedFiles, changes: [{ file, matches }], applied }.",
|
|
107418
107513
|
{
|
|
107419
107514
|
pattern: z8.string().min(1).max(1e3).describe("Regex pattern to match (JavaScript regex syntax)"),
|
|
107420
107515
|
replacement: z8.string().max(1e3).describe("Replacement string ($1, $2 for capture groups)"),
|
|
@@ -107439,7 +107534,7 @@ function registerRefactoringTools(server, ctx) {
|
|
|
107439
107534
|
);
|
|
107440
107535
|
server.tool(
|
|
107441
107536
|
"apply_move",
|
|
107442
|
-
|
|
107537
|
+
'Move a symbol to a different file or rename/move a file, updating all import paths across the codebase. Dry-run by default (safe preview). Modifies source files. Use plan_refactoring with type="move" to preview first. Returns JSON: { success, edits: [{ file, old_text, new_text }], filesModified }.',
|
|
107443
107538
|
{
|
|
107444
107539
|
symbol_id: z8.string().max(512).optional().describe("Symbol ID to move (mode: symbol)"),
|
|
107445
107540
|
target_file: z8.string().max(512).optional().describe("Target file path for the symbol (mode: symbol)"),
|
|
@@ -107503,7 +107598,7 @@ function registerRefactoringTools(server, ctx) {
|
|
|
107503
107598
|
});
|
|
107504
107599
|
server.tool(
|
|
107505
107600
|
"change_signature",
|
|
107506
|
-
|
|
107601
|
+
'Change a function/method signature (add/remove/rename/reorder parameters) and update all call sites. Dry-run by default (safe preview). Modifies source files. Use plan_refactoring with type="signature" to preview first. Returns JSON: { success, edits: [{ file, old_text, new_text }], callSitesUpdated }.',
|
|
107507
107602
|
{
|
|
107508
107603
|
symbol_id: z8.string().max(512).describe("Symbol ID of the function/method to modify"),
|
|
107509
107604
|
changes: z8.array(signatureChangeSchema).min(1).max(20).describe("Array of changes to apply"),
|
|
@@ -107533,7 +107628,7 @@ function registerRefactoringTools(server, ctx) {
|
|
|
107533
107628
|
});
|
|
107534
107629
|
server.tool(
|
|
107535
107630
|
"plan_refactoring",
|
|
107536
|
-
"Preview any refactoring (rename, move, extract, signature) without applying. Returns all edits as {old_text, new_text} pairs. Use to review
|
|
107631
|
+
"Preview any refactoring (rename, move, extract, signature) without applying. Returns all edits as {old_text, new_text} pairs. Read-only (does not modify files). Use to review the blast radius before calling apply_rename, apply_move, change_signature, or extract_function. Returns JSON: { success, type, edits: [{ file, old_text, new_text }], filesAffected }.",
|
|
107537
107632
|
{
|
|
107538
107633
|
type: z8.enum(["rename", "move", "extract", "signature"]).describe("Type of refactoring to preview"),
|
|
107539
107634
|
symbol_id: z8.string().max(512).optional().describe("Symbol ID (for rename, move symbol, signature)"),
|
|
@@ -108060,9 +108155,9 @@ var OtlpReceiver = class {
|
|
|
108060
108155
|
if (this.options.port === 0) return;
|
|
108061
108156
|
return new Promise((resolve4, reject) => {
|
|
108062
108157
|
this.server = createServer((req, res) => this.handleRequest(req, res));
|
|
108063
|
-
this.server.on("error", (
|
|
108064
|
-
logger.error({ error:
|
|
108065
|
-
reject(
|
|
108158
|
+
this.server.on("error", (err49) => {
|
|
108159
|
+
logger.error({ error: err49 }, "OTLP receiver error");
|
|
108160
|
+
reject(err49);
|
|
108066
108161
|
});
|
|
108067
108162
|
this.server.listen(this.options.port, this.options.host, () => {
|
|
108068
108163
|
logger.info(
|
|
@@ -109682,16 +109777,16 @@ function findShortestPath(store, startNodeId, endNodeId, maxDepth) {
|
|
|
109682
109777
|
parent.set(nodeId, { from, edgeType: edge.edge_type_name });
|
|
109683
109778
|
nextFrontier.push(nodeId);
|
|
109684
109779
|
if (nodeId === endNodeId) {
|
|
109685
|
-
const
|
|
109780
|
+
const path134 = [endNodeId];
|
|
109686
109781
|
const edgeTypes = [];
|
|
109687
109782
|
let cur = endNodeId;
|
|
109688
109783
|
while (cur !== startNodeId) {
|
|
109689
109784
|
const p5 = parent.get(cur);
|
|
109690
|
-
|
|
109785
|
+
path134.unshift(p5.from);
|
|
109691
109786
|
edgeTypes.unshift(p5.edgeType);
|
|
109692
109787
|
cur = p5.from;
|
|
109693
109788
|
}
|
|
109694
|
-
return { path:
|
|
109789
|
+
return { path: path134, edgeTypes };
|
|
109695
109790
|
}
|
|
109696
109791
|
}
|
|
109697
109792
|
}
|
|
@@ -110802,6 +110897,7 @@ const preTicks = Math.min(300, Math.max(50, Math.ceil(Math.log(N + 1) * 40)));
|
|
|
110802
110897
|
let ticksDone = 0;
|
|
110803
110898
|
const TICK_BATCH = N > 5000 ? 5 : 15;
|
|
110804
110899
|
let layoutDone = false;
|
|
110900
|
+
let frameRequested = false;
|
|
110805
110901
|
(function tickBatch() {
|
|
110806
110902
|
const t0 = performance.now();
|
|
110807
110903
|
while (ticksDone < preTicks && performance.now() - t0 < 12) { sim.tick(); ticksDone++; }
|
|
@@ -110943,7 +111039,6 @@ let highlightSet = null;
|
|
|
110943
111039
|
let hoveredNode = null;
|
|
110944
111040
|
let searchQ = '';
|
|
110945
111041
|
let animating = false;
|
|
110946
|
-
let frameRequested = false;
|
|
110947
111042
|
|
|
110948
111043
|
function scheduleFrame() {
|
|
110949
111044
|
if (!frameRequested) { frameRequested = true; requestAnimationFrame(frame); }
|
|
@@ -112632,7 +112727,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112632
112727
|
const additionalRepos = config.topology.repos ?? [];
|
|
112633
112728
|
server.tool(
|
|
112634
112729
|
"get_service_map",
|
|
112635
|
-
"Get map of all services, their APIs, and inter-service dependencies. Auto-detects services from Docker Compose or treats each repo as a service.",
|
|
112730
|
+
"Get map of all services, their APIs, and inter-service dependencies. Auto-detects services from Docker Compose or treats each repo as a service. Use to understand microservice topology. For subproject-level graph use get_subproject_graph instead. Read-only. Returns JSON: { services: [{ name, endpoints, dependencies }], total }.",
|
|
112636
112731
|
{
|
|
112637
112732
|
include_endpoints: z9.boolean().optional().describe("Include full endpoint list per service (default false)")
|
|
112638
112733
|
},
|
|
@@ -112644,7 +112739,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112644
112739
|
);
|
|
112645
112740
|
server.tool(
|
|
112646
112741
|
"get_cross_service_impact",
|
|
112647
|
-
"Analyze cross-service impact of changing an endpoint or event. Shows which services would be affected.",
|
|
112742
|
+
"Analyze cross-service impact of changing an endpoint or event. Shows which services would be affected. Use before modifying a shared endpoint. For within-codebase impact use get_change_impact instead. Read-only. Returns JSON: { service, affectedServices: [{ name, reason }], total }.",
|
|
112648
112743
|
{
|
|
112649
112744
|
service: z9.string().min(1).max(256).describe("Service name"),
|
|
112650
112745
|
endpoint: z9.string().max(512).optional().describe("Endpoint path (e.g. /api/users/{id})"),
|
|
@@ -112658,7 +112753,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112658
112753
|
);
|
|
112659
112754
|
server.tool(
|
|
112660
112755
|
"get_api_contract",
|
|
112661
|
-
"Get API contract (OpenAPI/gRPC/GraphQL) for a service. Parses spec files found in the service repo.",
|
|
112756
|
+
"Get API contract (OpenAPI/gRPC/GraphQL) for a service. Parses spec files found in the service repo. Use to inspect a service's public API. For detecting spec-vs-code mismatches use get_contract_drift instead. Read-only. Returns JSON: { service, contract_type, endpoints, schemas }.",
|
|
112662
112757
|
{
|
|
112663
112758
|
service: z9.string().min(1).max(256).describe("Service name"),
|
|
112664
112759
|
contract_type: z9.enum(["openapi", "grpc", "graphql"]).optional().describe("Filter by contract type")
|
|
@@ -112671,7 +112766,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112671
112766
|
);
|
|
112672
112767
|
server.tool(
|
|
112673
112768
|
"get_service_deps",
|
|
112674
|
-
"Get external service dependencies: which services this one calls (outgoing) and which call it (incoming).",
|
|
112769
|
+
"Get external service dependencies: which services this one calls (outgoing) and which call it (incoming). Use to understand a single service's dependency profile. For full topology use get_service_map instead. Read-only. Returns JSON: { service, outgoing, incoming }.",
|
|
112675
112770
|
{
|
|
112676
112771
|
service: z9.string().min(1).max(256).describe("Service name"),
|
|
112677
112772
|
direction: z9.enum(["outgoing", "incoming", "both"]).optional().describe("Dependency direction (default both)")
|
|
@@ -112684,7 +112779,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112684
112779
|
);
|
|
112685
112780
|
server.tool(
|
|
112686
112781
|
"get_contract_drift",
|
|
112687
|
-
"Detect mismatches between API spec and implementation: endpoints in spec but not in code, or in code but not in spec.",
|
|
112782
|
+
"Detect mismatches between API spec and implementation: endpoints in spec but not in code, or in code but not in spec. Use to verify API contract accuracy. For reading the contract itself use get_api_contract instead. Read-only. Returns JSON: { service, missingInCode, missingInSpec, total }.",
|
|
112688
112783
|
{
|
|
112689
112784
|
service: z9.string().min(1).max(256).describe("Service name")
|
|
112690
112785
|
},
|
|
@@ -112696,7 +112791,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112696
112791
|
);
|
|
112697
112792
|
server.tool(
|
|
112698
112793
|
"get_subproject_graph",
|
|
112699
|
-
"Show all subprojects and their cross-repo connections. A subproject is any working repository in your project ecosystem (microservices, frontends, backends, shared libraries, CLI tools, etc.). Displays repos, endpoints, client calls, and inter-repo dependency edges.",
|
|
112794
|
+
"Show all subprojects and their cross-repo connections. A subproject is any working repository in your project ecosystem (microservices, frontends, backends, shared libraries, CLI tools, etc.). Displays repos, endpoints, client calls, and inter-repo dependency edges. Use to understand multi-repo topology. Register repos first with subproject_add_repo. Read-only. Returns JSON: { repos, endpoints, clientCalls, edges }.",
|
|
112700
112795
|
{},
|
|
112701
112796
|
async () => {
|
|
112702
112797
|
const result = getSubprojectGraph(topoStore);
|
|
@@ -112706,7 +112801,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112706
112801
|
);
|
|
112707
112802
|
server.tool(
|
|
112708
112803
|
"get_subproject_impact",
|
|
112709
|
-
"Cross-repo impact analysis: find all client code across subprojects that would break if an endpoint changes. Resolves down to symbol level when per-repo indexes exist.",
|
|
112804
|
+
"Cross-repo impact analysis: find all client code across subprojects that would break if an endpoint changes. Resolves down to symbol level when per-repo indexes exist. Use before modifying a shared API endpoint. Read-only. Returns JSON: { endpoint, affectedClients: [{ repo, file, line, callType }], total }.",
|
|
112710
112805
|
{
|
|
112711
112806
|
endpoint: z9.string().max(512).optional().describe("Endpoint path pattern (e.g. /api/users)"),
|
|
112712
112807
|
method: z9.string().max(10).optional().describe("HTTP method filter (e.g. GET, POST)"),
|
|
@@ -112720,7 +112815,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112720
112815
|
);
|
|
112721
112816
|
server.tool(
|
|
112722
112817
|
"subproject_add_repo",
|
|
112723
|
-
"Add a repository as a subproject of the current project. A subproject is any working repository in your ecosystem: microservices, frontends, backends, shared libraries, CLI tools. Discovers services, parses API contracts (OpenAPI/gRPC/GraphQL), scans for HTTP client calls, and links them to known endpoints.",
|
|
112818
|
+
"Add a repository as a subproject of the current project. A subproject is any working repository in your ecosystem: microservices, frontends, backends, shared libraries, CLI tools. Discovers services, parses API contracts (OpenAPI/gRPC/GraphQL), scans for HTTP client calls, and links them to known endpoints. Mutates the topology store; idempotent. Use to build multi-repo intelligence. Returns JSON: { added, services, contracts, clientCalls }.",
|
|
112724
112819
|
{
|
|
112725
112820
|
repo_path: z9.string().min(1).max(1024).describe("Absolute or relative path to the repository/service"),
|
|
112726
112821
|
name: z9.string().max(256).optional().describe("Display name for the repo (default: directory basename)"),
|
|
@@ -112736,7 +112831,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112736
112831
|
);
|
|
112737
112832
|
server.tool(
|
|
112738
112833
|
"subproject_sync",
|
|
112739
|
-
"Re-scan all subprojects: re-discover services, re-parse contracts, re-scan client calls, and re-link everything.",
|
|
112834
|
+
"Re-scan all subprojects: re-discover services, re-parse contracts, re-scan client calls, and re-link everything. Mutates the topology store; idempotent. Use after code changes in subproject repos. Returns JSON: { synced, services, contracts, clientCalls }.",
|
|
112740
112835
|
{},
|
|
112741
112836
|
async () => {
|
|
112742
112837
|
const result = subprojectSync(topoStore);
|
|
@@ -112746,7 +112841,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112746
112841
|
);
|
|
112747
112842
|
server.tool(
|
|
112748
112843
|
"get_subproject_clients",
|
|
112749
|
-
"Find all client calls across subprojects that call a specific endpoint. Shows file, line, call type, and confidence.",
|
|
112844
|
+
"Find all client calls across subprojects that call a specific endpoint. Shows file, line, call type, and confidence. Use to find all consumers of an endpoint before modifying it. Read-only. Returns JSON: { endpoint, clients: [{ repo, file, line, callType, confidence }], total }.",
|
|
112750
112845
|
{
|
|
112751
112846
|
endpoint: z9.string().min(1).max(512).describe("Endpoint path to search for (e.g. /api/users)"),
|
|
112752
112847
|
method: z9.string().max(10).optional().describe("HTTP method filter")
|
|
@@ -112759,7 +112854,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112759
112854
|
);
|
|
112760
112855
|
server.tool(
|
|
112761
112856
|
"get_contract_versions",
|
|
112762
|
-
"Show version history for a service API contract with breaking change detection between versions. Compares request/response schemas across snapshots to flag removed fields, type changes, and renames.",
|
|
112857
|
+
"Show version history for a service API contract with breaking change detection between versions. Compares request/response schemas across snapshots to flag removed fields, type changes, and renames. Use to review API evolution. For current spec-vs-code drift use get_contract_drift instead. Read-only. Returns JSON: { service, versions: [{ version, date, breakingChanges }] }.",
|
|
112763
112858
|
{
|
|
112764
112859
|
service: z9.string().min(1).max(256).describe("Service name"),
|
|
112765
112860
|
limit: z9.number().int().min(1).max(50).optional().describe("Max versions to show (default 10)")
|
|
@@ -112772,7 +112867,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112772
112867
|
);
|
|
112773
112868
|
server.tool(
|
|
112774
112869
|
"discover_claude_sessions",
|
|
112775
|
-
"Scan ~/.claude/projects for projects Claude Code has touched on this machine, decode each directory name back to its absolute path, and report which ones still exist plus session-file count and last activity. With add_as_subprojects=true, every existing project is registered as a subproject in one call \u2014 useful for spinning up multi-repo intelligence after a fresh clone.",
|
|
112870
|
+
"Scan ~/.claude/projects for projects Claude Code has touched on this machine, decode each directory name back to its absolute path, and report which ones still exist plus session-file count and last activity. With add_as_subprojects=true, every existing project is registered as a subproject in one call \u2014 useful for spinning up multi-repo intelligence after a fresh clone. Reads local filesystem; with add_as_subprojects=true also mutates topology store. Returns JSON: { projects: [{ path, sessions, lastActivity }], total }.",
|
|
112776
112871
|
{
|
|
112777
112872
|
scan_root: z9.string().max(1024).optional().describe("Override the scan root (default: ~/.claude/projects)"),
|
|
112778
112873
|
exclude_current: z9.boolean().optional().describe("Exclude the current project from results (default: true)"),
|
|
@@ -112794,7 +112889,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112794
112889
|
);
|
|
112795
112890
|
server.tool(
|
|
112796
112891
|
"visualize_subproject_topology",
|
|
112797
|
-
"Open interactive HTML visualization of the subproject topology: services as nodes, API calls as edges, health/risk indicators per service. Node size = endpoint count, color = health (green/yellow/red).",
|
|
112892
|
+
"Open interactive HTML visualization of the subproject topology: services as nodes, API calls as edges, health/risk indicators per service. Node size = endpoint count, color = health (green/yellow/red). Writes an HTML file to disk. Use for visual architecture review. Returns JSON: { outputPath, services, edges }.",
|
|
112798
112893
|
{
|
|
112799
112894
|
output: z9.string().max(512).optional().describe("Output file path (default: /tmp/trace-mcp-subproject-topology.html)"),
|
|
112800
112895
|
layout: z9.enum(["force", "hierarchical", "radial"]).optional().describe("Graph layout (default force)")
|
|
@@ -112816,7 +112911,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112816
112911
|
runtimeIntelligence.start().catch((e) => logger.error({ error: e }, "Failed to start Runtime Intelligence"));
|
|
112817
112912
|
server.tool(
|
|
112818
112913
|
"get_runtime_profile",
|
|
112819
|
-
"Runtime profile for a symbol or route: call count, latency percentiles (p50/p95/p99), error rate, calls per hour. Requires OTLP trace ingestion.",
|
|
112914
|
+
"Runtime profile for a symbol or route: call count, latency percentiles (p50/p95/p99), error rate, calls per hour. Requires OTLP trace ingestion. Read-only, queries external runtime data. Use for performance analysis of specific endpoints. Returns JSON: { symbol_id, callCount, latency: { p50, p95, p99 }, errorRate, callsPerHour }.",
|
|
112820
112915
|
{
|
|
112821
112916
|
symbol_id: z9.string().max(512).optional().describe("Symbol ID to profile"),
|
|
112822
112917
|
fqn: z9.string().max(512).optional().describe("Fully qualified name"),
|
|
@@ -112831,7 +112926,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112831
112926
|
);
|
|
112832
112927
|
server.tool(
|
|
112833
112928
|
"get_runtime_call_graph",
|
|
112834
|
-
"Actual call graph from runtime traces (vs static analysis). Shows observed call paths with call counts and latency.",
|
|
112929
|
+
"Actual call graph from runtime traces (vs static analysis). Shows observed call paths with call counts and latency. Requires OTLP trace ingestion. Read-only, queries external runtime data. For static call graph use get_call_graph instead. Returns JSON: { root, calls: [{ symbol, count, latency }] }.",
|
|
112835
112930
|
{
|
|
112836
112931
|
symbol_id: z9.string().max(512).optional().describe("Symbol ID as root"),
|
|
112837
112932
|
fqn: z9.string().max(512).optional().describe("Fully qualified name as root"),
|
|
@@ -112846,7 +112941,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112846
112941
|
);
|
|
112847
112942
|
server.tool(
|
|
112848
112943
|
"get_endpoint_analytics",
|
|
112849
|
-
"Per-route analytics: request count, error rate, latency, caller services. Requires OTLP trace ingestion.",
|
|
112944
|
+
"Per-route analytics: request count, error rate, latency, caller services. Requires OTLP trace ingestion. Read-only, queries external runtime data. Use to understand endpoint performance and traffic. Returns JSON: { uri, method, requestCount, errorRate, latency, callerServices }.",
|
|
112850
112945
|
{
|
|
112851
112946
|
uri: z9.string().max(512).describe('Route URI (e.g. "/api/users/{id}")'),
|
|
112852
112947
|
method: z9.string().max(10).optional().describe("HTTP method filter"),
|
|
@@ -112860,7 +112955,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112860
112955
|
);
|
|
112861
112956
|
server.tool(
|
|
112862
112957
|
"get_runtime_deps",
|
|
112863
|
-
"Which external services (databases, caches, APIs, queues) does this code actually call at runtime. Based on OTLP traces.",
|
|
112958
|
+
"Which external services (databases, caches, APIs, queues) does this code actually call at runtime. Based on OTLP traces. Read-only, queries external runtime data. Use to discover actual runtime dependencies vs static analysis. Returns JSON: { dependencies: [{ type, name, callCount }] }.",
|
|
112864
112959
|
{
|
|
112865
112960
|
symbol_id: z9.string().max(512).optional().describe("Symbol ID"),
|
|
112866
112961
|
fqn: z9.string().max(512).optional().describe("Fully qualified name"),
|
|
@@ -112879,7 +112974,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112879
112974
|
}
|
|
112880
112975
|
server.tool(
|
|
112881
112976
|
"query_by_intent",
|
|
112882
|
-
"Map a business question to domain taxonomy \u2192 returns domain ownership and relevance scores (no source code). Use when you need to know WHICH DOMAIN owns specific functionality.",
|
|
112977
|
+
"Map a business question to domain taxonomy \u2192 returns domain ownership and relevance scores (no source code). Use when you need to know WHICH DOMAIN owns specific functionality. For actual source code use get_feature_context instead. Read-only. Returns JSON: { symbols: [{ symbol_id, domain, relevance }] }.",
|
|
112883
112978
|
{
|
|
112884
112979
|
query: z9.string().min(1).max(500).describe("Business-level question about the codebase"),
|
|
112885
112980
|
limit: z9.number().int().min(1).max(50).optional().describe("Max symbols to return (default 15)")
|
|
@@ -112897,7 +112992,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112897
112992
|
);
|
|
112898
112993
|
server.tool(
|
|
112899
112994
|
"get_domain_map",
|
|
112900
|
-
"Get hierarchical map of business domains with key symbols per domain. Auto-builds domain taxonomy on first call using heuristic classification.",
|
|
112995
|
+
"Get hierarchical map of business domains with key symbols per domain. Auto-builds domain taxonomy on first call using heuristic classification. Use to understand business domain boundaries. For specific domain code use get_domain_context instead. Read-only. Returns JSON: { domains: [{ name, children, symbols }] }.",
|
|
112901
112996
|
{
|
|
112902
112997
|
depth: z9.number().int().min(1).max(5).optional().describe("Max taxonomy depth (default 3)"),
|
|
112903
112998
|
include_symbols: z9.boolean().optional().describe("Include top symbols per domain (default true)"),
|
|
@@ -112911,7 +113006,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112911
113006
|
);
|
|
112912
113007
|
server.tool(
|
|
112913
113008
|
"get_domain_context",
|
|
112914
|
-
'Get all code related to a specific business domain. Supports "parent/child" notation (e.g. "payments/refunds").',
|
|
113009
|
+
'Get all code related to a specific business domain. Supports "parent/child" notation (e.g. "payments/refunds"). Use to explore code within a domain boundary. For the full domain taxonomy use get_domain_map instead. Read-only. Returns JSON: { domain, symbols: [{ symbol_id, name, file, source }], relatedDomains }.',
|
|
112915
113010
|
{
|
|
112916
113011
|
domain: z9.string().min(1).max(256).describe('Domain name (e.g. "payments" or "payments/refunds")'),
|
|
112917
113012
|
include_related: z9.boolean().optional().describe("Include symbols from related domains (default false)"),
|
|
@@ -112925,7 +113020,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112925
113020
|
);
|
|
112926
113021
|
server.tool(
|
|
112927
113022
|
"get_cross_domain_deps",
|
|
112928
|
-
"Show which business domains depend on which. Based on edges between symbols in different domains.",
|
|
113023
|
+
"Show which business domains depend on which. Based on edges between symbols in different domains. Use to understand domain coupling. Read-only. Returns JSON: { dependencies: [{ from, to, edgeCount }] }.",
|
|
112929
113024
|
{
|
|
112930
113025
|
domain: z9.string().max(256).optional().describe("Focus on a specific domain (default: all)")
|
|
112931
113026
|
},
|
|
@@ -112937,7 +113032,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112937
113032
|
);
|
|
112938
113033
|
server.tool(
|
|
112939
113034
|
"graph_query",
|
|
112940
|
-
'Trace how named symbols relate in the dependency graph \u2192 returns subgraph + Mermaid diagram. Input must contain symbol/class names (e.g. "How does AuthService reach Database?", "What depends on UserModel?").',
|
|
113035
|
+
'Trace how named symbols relate in the dependency graph \u2192 returns subgraph + Mermaid diagram. Input must contain symbol/class names (e.g. "How does AuthService reach Database?", "What depends on UserModel?"). Use for ad-hoc graph exploration. For structured call graph use get_call_graph instead. Read-only. Returns JSON: { nodes, edges, mermaid }.',
|
|
112941
113036
|
{
|
|
112942
113037
|
query: z9.string().min(1).max(500).describe("Natural language question about code relationships"),
|
|
112943
113038
|
depth: z9.number().int().min(1).max(6).optional().describe("Max traversal depth (default 3)"),
|
|
@@ -112951,7 +113046,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112951
113046
|
);
|
|
112952
113047
|
server.tool(
|
|
112953
113048
|
"get_dataflow",
|
|
112954
|
-
"Intra-function dataflow analysis: track how each parameter flows through the function body \u2014 into which calls, where it gets mutated, and what is returned. Phase 1: single function scope.",
|
|
113049
|
+
"Intra-function dataflow analysis: track how each parameter flows through the function body \u2014 into which calls, where it gets mutated, and what is returned. Phase 1: single function scope. Use to understand data transformations within a function. For security-focused data flow use taint_analysis instead. Read-only. Returns JSON: { symbol_id, params: [{ name, flows: [{ target, mutated }] }], returnPaths }.",
|
|
112955
113050
|
{
|
|
112956
113051
|
symbol_id: z9.string().max(512).optional().describe("Symbol ID of the function/method to analyze"),
|
|
112957
113052
|
fqn: z9.string().max(512).optional().describe("Fully qualified name of the function/method"),
|
|
@@ -112966,7 +113061,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
112966
113061
|
);
|
|
112967
113062
|
server.tool(
|
|
112968
113063
|
"visualize_graph",
|
|
112969
|
-
"Open interactive HTML graph in browser showing file/symbol dependencies. Supports force/hierarchical/radial layouts, community coloring. Use granularity=symbol to see individual functions/classes/methods as nodes instead of files.",
|
|
113064
|
+
"Open interactive HTML graph in browser showing file/symbol dependencies. Supports force/hierarchical/radial layouts, community coloring. Use granularity=symbol to see individual functions/classes/methods as nodes instead of files. Writes an HTML file to disk. For static Mermaid/DOT output use get_dependency_diagram instead. Returns JSON: { outputPath, nodes, edges }.",
|
|
112970
113065
|
{
|
|
112971
113066
|
scope: z9.string().min(1).max(512).describe('Scope: file path, directory (e.g. "src/"), or "project"'),
|
|
112972
113067
|
depth: z9.number().int().min(1).max(5).optional().describe("Max hops from scope (default 2)"),
|
|
@@ -113002,7 +113097,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
113002
113097
|
);
|
|
113003
113098
|
server.tool(
|
|
113004
113099
|
"get_dependency_diagram",
|
|
113005
|
-
'Render dependency diagram for a file/directory path as Mermaid or DOT. Input: a path like "src/tools/" \u2014 not a question. Trims to max_nodes most important nodes.',
|
|
113100
|
+
'Render dependency diagram for a file/directory path as Mermaid or DOT. Input: a path like "src/tools/" \u2014 not a question. Trims to max_nodes most important nodes. Read-only. For interactive HTML visualization use visualize_graph instead. Returns JSON: { format, diagram, nodes, edges }.',
|
|
113006
113101
|
{
|
|
113007
113102
|
scope: z9.string().min(1).max(512).describe('Scope: file path, directory, or "project"'),
|
|
113008
113103
|
depth: z9.number().int().min(1).max(5).optional().describe("Max hops from scope (default 2)"),
|
|
@@ -113017,7 +113112,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
113017
113112
|
);
|
|
113018
113113
|
server.tool(
|
|
113019
113114
|
"search_text",
|
|
113020
|
-
"Full-text search across all indexed files. Supports regex, glob file patterns, language filter. Use for finding strings, comments, TODOs, config values, error messages \u2014 anything not captured as a symbol.",
|
|
113115
|
+
"Full-text search across all indexed files. Supports regex, glob file patterns, language filter. Use for finding strings, comments, TODOs, config values, error messages \u2014 anything not captured as a symbol. For symbol search (functions, classes) use search instead. Read-only. Returns JSON: { matches: [{ file, line, text, context }], total_matches }.",
|
|
113021
113116
|
{
|
|
113022
113117
|
query: z9.string().min(1).max(1e3).describe("Search string or regex pattern"),
|
|
113023
113118
|
is_regex: z9.boolean().optional().describe("Treat query as regex (default false)"),
|
|
@@ -113048,7 +113143,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
113048
113143
|
);
|
|
113049
113144
|
server.tool(
|
|
113050
113145
|
"predict_bugs",
|
|
113051
|
-
"Predict which files are most likely to contain bugs. Multi-signal scoring: git churn, fix-commit ratio, complexity, coupling, PageRank importance, author count. Each prediction includes a numeric score, risk bucket (low/medium/high/critical) AND a confidence_level (low/medium/high/multi_signal) counting how many independent signals actually fired. Result envelope includes _methodology disclosure. Cached for 1 hour; use refresh=true to recompute.",
|
|
113146
|
+
"Predict which files are most likely to contain bugs. Multi-signal scoring: git churn, fix-commit ratio, complexity, coupling, PageRank importance, author count. Each prediction includes a numeric score, risk bucket (low/medium/high/critical) AND a confidence_level (low/medium/high/multi_signal) counting how many independent signals actually fired. Result envelope includes _methodology disclosure. Cached for 1 hour; use refresh=true to recompute. Requires git. Use for proactive bug hunting. For complexity+churn hotspots only use get_risk_hotspots instead. Read-only. Returns JSON: { predictions: [{ file, score, risk, confidence_level, signals }], total }.",
|
|
113052
113147
|
{
|
|
113053
113148
|
limit: z9.number().int().min(1).max(200).optional().describe("Max results (default: 50)"),
|
|
113054
113149
|
min_score: z9.number().min(0).max(1).optional().describe("Min bug probability score to include (default: 0)"),
|
|
@@ -113071,7 +113166,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
113071
113166
|
);
|
|
113072
113167
|
server.tool(
|
|
113073
113168
|
"detect_drift",
|
|
113074
|
-
"Detect architectural drift: cross-module co-change anomalies (files in different modules that always change together) and shotgun surgery patterns (commits touching 3+ modules). Requires git.",
|
|
113169
|
+
"Detect architectural drift: cross-module co-change anomalies (files in different modules that always change together) and shotgun surgery patterns (commits touching 3+ modules). Requires git. Use to identify hidden coupling across modules. For file-pair co-changes use get_co_changes instead. Read-only. Returns JSON: { anomalies, shotgunSurgery, total }.",
|
|
113075
113170
|
{
|
|
113076
113171
|
since_days: z9.number().int().min(1).optional().describe("Analyze commits from last N days (default: 180)"),
|
|
113077
113172
|
min_confidence: z9.number().min(0).max(1).optional().describe("Min Jaccard confidence for co-change anomalies (default: 0.3)")
|
|
@@ -113088,7 +113183,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
113088
113183
|
);
|
|
113089
113184
|
server.tool(
|
|
113090
113185
|
"get_tech_debt",
|
|
113091
|
-
"Per-module tech debt score (A\u2013F grade) combining: complexity, coupling instability, test coverage gaps, and git churn. Includes actionable recommendations.",
|
|
113186
|
+
"Per-module tech debt score (A\u2013F grade) combining: complexity, coupling instability, test coverage gaps, and git churn. Includes actionable recommendations. Use for architecture review and prioritizing cleanup. Read-only. Returns JSON: { modules: [{ module, grade, score, factors, recommendations }] }.",
|
|
113092
113187
|
{
|
|
113093
113188
|
module: z9.string().max(256).optional().describe('Focus on a specific module path (e.g. "src/tools")'),
|
|
113094
113189
|
refresh: z9.boolean().optional().describe("Force recomputation (default: false)")
|
|
@@ -113107,7 +113202,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
113107
113202
|
);
|
|
113108
113203
|
server.tool(
|
|
113109
113204
|
"assess_change_risk",
|
|
113110
|
-
"Before modifying a file or symbol, predict risk level (low/medium/high/critical) with contributing factors and recommended mitigations. Combines blast radius, complexity, git churn, test coverage, and coupling.",
|
|
113205
|
+
"Before modifying a file or symbol, predict risk level (low/medium/high/critical) with contributing factors and recommended mitigations. Combines blast radius, complexity, git churn, test coverage, and coupling. Use as a quick risk check. For full impact report with affected tests and dependents use get_change_impact instead. Read-only. Returns JSON: { risk, level, factors: [{ name, value }], mitigations }.",
|
|
113111
113206
|
{
|
|
113112
113207
|
file_path: z9.string().max(512).optional().describe("File path to assess"),
|
|
113113
113208
|
symbol_id: z9.string().max(512).optional().describe("Symbol ID to assess")
|
|
@@ -113129,7 +113224,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
113129
113224
|
);
|
|
113130
113225
|
server.tool(
|
|
113131
113226
|
"get_health_trends",
|
|
113132
|
-
"Time-series health metrics for a file or module: bug score, complexity, coupling, churn over time. Populated by predict_bugs runs.",
|
|
113227
|
+
"Time-series health metrics for a file or module: bug score, complexity, coupling, churn over time. Populated by predict_bugs runs. Use to track if a module is improving or degrading. Read-only. Returns JSON: { dataPoints: [{ date, bugScore, complexity, coupling, churn }] }.",
|
|
113133
113228
|
{
|
|
113134
113229
|
file_path: z9.string().max(512).optional().describe("File path to check"),
|
|
113135
113230
|
module: z9.string().max(256).optional().describe("Module path prefix to check"),
|
|
@@ -113147,7 +113242,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
113147
113242
|
);
|
|
113148
113243
|
server.tool(
|
|
113149
113244
|
"get_workspace_map",
|
|
113150
|
-
"List all detected monorepo workspaces with file counts, symbol counts, and languages. Returns dependency graph between workspaces showing cross-workspace imports.",
|
|
113245
|
+
"List all detected monorepo workspaces with file counts, symbol counts, and languages. Returns dependency graph between workspaces showing cross-workspace imports. Use for monorepo structure overview. For impact of changes on other workspaces use get_cross_workspace_impact instead. Read-only. Returns JSON: { workspaces: [{ name, files, symbols, languages }], dependencies }.",
|
|
113151
113246
|
{
|
|
113152
113247
|
include_dependencies: z9.boolean().optional().describe("Include cross-workspace dependency graph (default: true)")
|
|
113153
113248
|
},
|
|
@@ -113178,7 +113273,7 @@ function registerAdvancedTools(server, ctx) {
|
|
|
113178
113273
|
);
|
|
113179
113274
|
server.tool(
|
|
113180
113275
|
"get_cross_workspace_impact",
|
|
113181
|
-
"Show which workspaces are affected by changes in a given workspace. Lists all cross-workspace edges, affected symbols, and the public API surface consumed by other workspaces.",
|
|
113276
|
+
"Show which workspaces are affected by changes in a given workspace. Lists all cross-workspace edges, affected symbols, and the public API surface consumed by other workspaces. Use before modifying shared code in a monorepo. Read-only. Returns JSON: { workspace, public_api, consumed_by, depends_on, cross_workspace_edges }.",
|
|
113182
113277
|
{
|
|
113183
113278
|
workspace: z9.string().max(256).describe("Workspace name to analyze")
|
|
113184
113279
|
},
|
|
@@ -115038,28 +115133,331 @@ function formatGateReport(report) {
|
|
|
115038
115133
|
return lines.join("\n");
|
|
115039
115134
|
}
|
|
115040
115135
|
|
|
115136
|
+
// src/tools/quality/security-context-export.ts
|
|
115137
|
+
import { readFileSync as readFileSync9 } from "fs";
|
|
115138
|
+
import path107 from "path";
|
|
115139
|
+
import { ok as ok72 } from "neverthrow";
|
|
115140
|
+
var CATEGORY_PATTERNS = [
|
|
115141
|
+
// file_read
|
|
115142
|
+
{ pattern: /^(?:readFile|readFileSync|readdir|readdirSync|createReadStream|promises\.readFile)$/, category: "file_read" },
|
|
115143
|
+
{ pattern: /^(?:fs\.read|fs\.promises\.read|fsPromises\.read)/, category: "file_read" },
|
|
115144
|
+
// file_write
|
|
115145
|
+
{ pattern: /^(?:writeFile|writeFileSync|appendFile|appendFileSync|createWriteStream|promises\.writeFile)$/, category: "file_write" },
|
|
115146
|
+
{ pattern: /^(?:unlink|unlinkSync|rm|rmSync|rmdir|rmdirSync|rename|renameSync|copyFile|copyFileSync)$/, category: "file_write" },
|
|
115147
|
+
{ pattern: /^(?:fs\.write|fs\.promises\.write|fs\.unlink|fs\.rm|fsPromises\.write)/, category: "file_write" },
|
|
115148
|
+
{ pattern: /^(?:mkdir|mkdirSync|promises\.mkdir)$/, category: "file_write" },
|
|
115149
|
+
// network_outbound
|
|
115150
|
+
{ pattern: /^(?:fetch|request|got|axios)$/, category: "network_outbound" },
|
|
115151
|
+
{ pattern: /^(?:http\.request|https\.request|http\.get|https\.get)$/, category: "network_outbound" },
|
|
115152
|
+
{ pattern: /^(?:net\.connect|net\.createConnection|tls\.connect)$/, category: "network_outbound" },
|
|
115153
|
+
{ pattern: /^(?:XMLHttpRequest|WebSocket)$/, category: "network_outbound" },
|
|
115154
|
+
// env_read
|
|
115155
|
+
{ pattern: /^(?:process\.env|env\.)/, category: "env_read" },
|
|
115156
|
+
{ pattern: /^(?:getenv|os\.environ|dotenv)/, category: "env_read" },
|
|
115157
|
+
// shell_exec
|
|
115158
|
+
{ pattern: /^(?:exec|execSync|execFile|execFileSync|spawn|spawnSync|fork)$/, category: "shell_exec" },
|
|
115159
|
+
{ pattern: /^(?:child_process\.|cp\.)/, category: "shell_exec" },
|
|
115160
|
+
// crypto
|
|
115161
|
+
{ pattern: /^(?:createHash|createCipher|createCipheriv|createDecipher|createDecipheriv|createSign|createVerify|createHmac)$/, category: "crypto" },
|
|
115162
|
+
{ pattern: /^(?:crypto\.)/, category: "crypto" },
|
|
115163
|
+
// serialization
|
|
115164
|
+
{ pattern: /^(?:eval|Function)$/, category: "serialization" },
|
|
115165
|
+
{ pattern: /^(?:deserialize|unserialize|pickle\.loads|yaml\.load)$/, category: "serialization" }
|
|
115166
|
+
];
|
|
115167
|
+
function classifyFunction(name) {
|
|
115168
|
+
for (const { pattern, category } of CATEGORY_PATTERNS) {
|
|
115169
|
+
if (pattern.test(name)) return category;
|
|
115170
|
+
}
|
|
115171
|
+
return null;
|
|
115172
|
+
}
|
|
115173
|
+
var ANNOTATION_RE = /\{\s*(?:readOnlyHint|destructiveHint|idempotentHint|openWorldHint)\s*:/;
|
|
115174
|
+
function parseAnnotations(source, toolCallIndex) {
|
|
115175
|
+
const searchRegion = source.slice(toolCallIndex, toolCallIndex + 2e3);
|
|
115176
|
+
const annotationMatch = searchRegion.match(
|
|
115177
|
+
/\{\s*(readOnlyHint\s*:\s*(true|false)\s*,?\s*)?(destructiveHint\s*:\s*(true|false)\s*,?\s*)?(idempotentHint\s*:\s*(true|false)\s*,?\s*)?(openWorldHint\s*:\s*(true|false)\s*,?\s*)\}/
|
|
115178
|
+
);
|
|
115179
|
+
if (!annotationMatch) return null;
|
|
115180
|
+
const annotations = {};
|
|
115181
|
+
if (annotationMatch[2]) annotations.readOnlyHint = annotationMatch[2] === "true";
|
|
115182
|
+
if (annotationMatch[4]) annotations.destructiveHint = annotationMatch[4] === "true";
|
|
115183
|
+
if (annotationMatch[6]) annotations.idempotentHint = annotationMatch[6] === "true";
|
|
115184
|
+
if (annotationMatch[8]) annotations.openWorldHint = annotationMatch[8] === "true";
|
|
115185
|
+
return Object.keys(annotations).length > 0 ? annotations : null;
|
|
115186
|
+
}
|
|
115187
|
+
function parseAnnotationsFlexible(source, toolCallIndex) {
|
|
115188
|
+
const searchRegion = source.slice(toolCallIndex, toolCallIndex + 3e3);
|
|
115189
|
+
if (!ANNOTATION_RE.test(searchRegion)) return null;
|
|
115190
|
+
const annotations = {};
|
|
115191
|
+
const hints = ["readOnlyHint", "destructiveHint", "idempotentHint", "openWorldHint"];
|
|
115192
|
+
for (const hint of hints) {
|
|
115193
|
+
const re = new RegExp(`${hint}\\s*:\\s*(true|false)`);
|
|
115194
|
+
const m = searchRegion.match(re);
|
|
115195
|
+
if (m) annotations[hint] = m[1] === "true";
|
|
115196
|
+
}
|
|
115197
|
+
return Object.keys(annotations).length > 0 ? annotations : null;
|
|
115198
|
+
}
|
|
115199
|
+
function collectCallsFromGraph(node, visited, results) {
|
|
115200
|
+
if (visited.has(node.symbol_id)) return;
|
|
115201
|
+
visited.add(node.symbol_id);
|
|
115202
|
+
const category = classifyFunction(node.name);
|
|
115203
|
+
if (category && node.file && node.line) {
|
|
115204
|
+
results.push({
|
|
115205
|
+
function: node.name,
|
|
115206
|
+
file: node.file,
|
|
115207
|
+
line: node.line,
|
|
115208
|
+
category
|
|
115209
|
+
});
|
|
115210
|
+
}
|
|
115211
|
+
if (node.calls) {
|
|
115212
|
+
for (const callee of node.calls) {
|
|
115213
|
+
collectCallsFromGraph(callee, visited, results);
|
|
115214
|
+
}
|
|
115215
|
+
}
|
|
115216
|
+
}
|
|
115217
|
+
var SECURITY_CALL_RE = /\b((?:fs|http|https|net|crypto|child_process|cp)\.\w+|fetch|exec|execSync|spawn|spawnSync|eval|require|writeFile\w*|readFile\w*|unlink\w*|request|axios|got)\s*\(/g;
|
|
115218
|
+
var ENV_ACCESS_RE = /process\.env\b/g;
|
|
115219
|
+
var GENERIC_CALL_RE = /\b([a-zA-Z_$]\w*)\s*\(/g;
|
|
115220
|
+
var SKIP_KEYWORDS = /* @__PURE__ */ new Set([
|
|
115221
|
+
"if",
|
|
115222
|
+
"for",
|
|
115223
|
+
"while",
|
|
115224
|
+
"switch",
|
|
115225
|
+
"catch",
|
|
115226
|
+
"return",
|
|
115227
|
+
"typeof",
|
|
115228
|
+
"new",
|
|
115229
|
+
"async",
|
|
115230
|
+
"await",
|
|
115231
|
+
"function",
|
|
115232
|
+
"const",
|
|
115233
|
+
"let",
|
|
115234
|
+
"var",
|
|
115235
|
+
"class",
|
|
115236
|
+
"import",
|
|
115237
|
+
"export",
|
|
115238
|
+
"throw",
|
|
115239
|
+
"delete",
|
|
115240
|
+
"void",
|
|
115241
|
+
"yield",
|
|
115242
|
+
"as",
|
|
115243
|
+
"from"
|
|
115244
|
+
]);
|
|
115245
|
+
function findHandlerBounds(source, toolCallIndex) {
|
|
115246
|
+
const region = source.slice(toolCallIndex, toolCallIndex + 1e4);
|
|
115247
|
+
const arrowMatch = region.match(/async\s+(?:\([^)]*\)|[^=]*?)\s*=>\s*\{/);
|
|
115248
|
+
const funcMatch = region.match(/async\s+function\s*\([^)]*\)\s*\{/);
|
|
115249
|
+
const match = arrowMatch ?? funcMatch;
|
|
115250
|
+
if (!match || match.index === void 0) return null;
|
|
115251
|
+
const bodyBraceOffset = match.index + match[0].length - 1;
|
|
115252
|
+
const afterBrace = region.slice(bodyBraceOffset);
|
|
115253
|
+
let depth = 1;
|
|
115254
|
+
let end = -1;
|
|
115255
|
+
for (let i = 1; i < afterBrace.length; i++) {
|
|
115256
|
+
if (afterBrace[i] === "{") depth++;
|
|
115257
|
+
else if (afterBrace[i] === "}") {
|
|
115258
|
+
depth--;
|
|
115259
|
+
if (depth === 0) {
|
|
115260
|
+
end = i + 1;
|
|
115261
|
+
break;
|
|
115262
|
+
}
|
|
115263
|
+
}
|
|
115264
|
+
}
|
|
115265
|
+
if (end === -1) end = Math.min(afterBrace.length, 5e3);
|
|
115266
|
+
const absStart = toolCallIndex + bodyBraceOffset;
|
|
115267
|
+
const absEnd = toolCallIndex + bodyBraceOffset + end;
|
|
115268
|
+
return { start: absStart, end: absEnd };
|
|
115269
|
+
}
|
|
115270
|
+
function scanInlineHandler(source, toolCallIndex, filePath, store, projectRoot, depth) {
|
|
115271
|
+
const bounds = findHandlerBounds(source, toolCallIndex);
|
|
115272
|
+
if (!bounds) return { calls: [], calledSymbolIds: [] };
|
|
115273
|
+
const handlerBody = source.slice(bounds.start, bounds.end);
|
|
115274
|
+
const results = [];
|
|
115275
|
+
const calledSymbolIds = [];
|
|
115276
|
+
let m;
|
|
115277
|
+
const secRe = new RegExp(SECURITY_CALL_RE.source, "g");
|
|
115278
|
+
while ((m = secRe.exec(handlerBody)) !== null) {
|
|
115279
|
+
const funcName = m[1];
|
|
115280
|
+
const category = classifyFunction(funcName);
|
|
115281
|
+
if (category) {
|
|
115282
|
+
const lineOffset = source.slice(0, bounds.start + m.index).split("\n").length;
|
|
115283
|
+
results.push({ function: funcName, file: filePath, line: lineOffset, category });
|
|
115284
|
+
}
|
|
115285
|
+
}
|
|
115286
|
+
const envRe = new RegExp(ENV_ACCESS_RE.source, "g");
|
|
115287
|
+
while ((m = envRe.exec(handlerBody)) !== null) {
|
|
115288
|
+
const lineOffset = source.slice(0, bounds.start + m.index).split("\n").length;
|
|
115289
|
+
results.push({ function: "process.env", file: filePath, line: lineOffset, category: "env_read" });
|
|
115290
|
+
}
|
|
115291
|
+
const genericRe = new RegExp(GENERIC_CALL_RE.source, "g");
|
|
115292
|
+
const seenFuncs = /* @__PURE__ */ new Set();
|
|
115293
|
+
while ((m = genericRe.exec(handlerBody)) !== null) {
|
|
115294
|
+
const funcName = m[1];
|
|
115295
|
+
if (SKIP_KEYWORDS.has(funcName) || seenFuncs.has(funcName)) continue;
|
|
115296
|
+
seenFuncs.add(funcName);
|
|
115297
|
+
const sym = store.getSymbolByName(funcName, "function") ?? store.getSymbolByName(funcName, "method");
|
|
115298
|
+
if (!sym) continue;
|
|
115299
|
+
calledSymbolIds.push(sym.symbol_id);
|
|
115300
|
+
const cgResult = getCallGraph(store, { symbolId: sym.symbol_id }, depth);
|
|
115301
|
+
if (cgResult.isOk() && cgResult.value.root) {
|
|
115302
|
+
const visited = /* @__PURE__ */ new Set();
|
|
115303
|
+
collectCallsFromGraph(cgResult.value.root, visited, results);
|
|
115304
|
+
}
|
|
115305
|
+
const file = sym.file_id ? store.getFileById(sym.file_id) : null;
|
|
115306
|
+
if (file && sym.line_start && sym.line_end) {
|
|
115307
|
+
const symAbsPath = path107.resolve(projectRoot, file.path);
|
|
115308
|
+
try {
|
|
115309
|
+
const symSource = readFileSync9(symAbsPath, "utf-8");
|
|
115310
|
+
const lines = symSource.split("\n");
|
|
115311
|
+
const symBody = lines.slice(sym.line_start - 1, sym.line_end).join("\n");
|
|
115312
|
+
scanSourceForSecurityCalls(symBody, file.path, sym.line_start, results);
|
|
115313
|
+
} catch {
|
|
115314
|
+
}
|
|
115315
|
+
}
|
|
115316
|
+
}
|
|
115317
|
+
return { calls: results, calledSymbolIds };
|
|
115318
|
+
}
|
|
115319
|
+
function scanSourceForSecurityCalls(body, filePath, startLine, results) {
|
|
115320
|
+
let m;
|
|
115321
|
+
const secRe = new RegExp(SECURITY_CALL_RE.source, "g");
|
|
115322
|
+
while ((m = secRe.exec(body)) !== null) {
|
|
115323
|
+
const funcName = m[1];
|
|
115324
|
+
const category = classifyFunction(funcName);
|
|
115325
|
+
if (category) {
|
|
115326
|
+
const lineOffset = startLine + body.slice(0, m.index).split("\n").length - 1;
|
|
115327
|
+
results.push({ function: funcName, file: filePath, line: lineOffset, category });
|
|
115328
|
+
}
|
|
115329
|
+
}
|
|
115330
|
+
const envRe = new RegExp(ENV_ACCESS_RE.source, "g");
|
|
115331
|
+
while ((m = envRe.exec(body)) !== null) {
|
|
115332
|
+
const lineOffset = startLine + body.slice(0, m.index).split("\n").length - 1;
|
|
115333
|
+
results.push({ function: "process.env", file: filePath, line: lineOffset, category: "env_read" });
|
|
115334
|
+
}
|
|
115335
|
+
}
|
|
115336
|
+
var PKG_VERSION = true ? "1.22.0" : "0.0.0-dev";
|
|
115337
|
+
function exportSecurityContext(store, projectRoot, opts = {}) {
|
|
115338
|
+
const depth = Math.min(opts.depth ?? 3, 5);
|
|
115339
|
+
const warnings = [];
|
|
115340
|
+
const allRoutes = store.getAllRoutes();
|
|
115341
|
+
const toolRoutes = allRoutes.filter((r) => r.method === "TOOL");
|
|
115342
|
+
if (toolRoutes.length === 0) {
|
|
115343
|
+
warnings.push("No MCP tool registrations found in the index. Ensure the project uses @modelcontextprotocol/sdk and has been indexed.");
|
|
115344
|
+
}
|
|
115345
|
+
const toolRegistrations = [];
|
|
115346
|
+
const capabilityMap = {};
|
|
115347
|
+
for (const route of toolRoutes) {
|
|
115348
|
+
const fileRow = route.file_id ? store.getFileById(route.file_id) : null;
|
|
115349
|
+
if (!fileRow) continue;
|
|
115350
|
+
if (opts.scope && !fileRow.path.startsWith(opts.scope)) continue;
|
|
115351
|
+
const absPath = path107.resolve(projectRoot, fileRow.path);
|
|
115352
|
+
let source;
|
|
115353
|
+
try {
|
|
115354
|
+
source = readFileSync9(absPath, "utf-8");
|
|
115355
|
+
} catch {
|
|
115356
|
+
continue;
|
|
115357
|
+
}
|
|
115358
|
+
const toolNameEscaped = route.uri.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
115359
|
+
const toolCallRe = new RegExp(`\\.tool\\(\\s*['"]${toolNameEscaped}['"]`);
|
|
115360
|
+
const toolCallMatch = toolCallRe.exec(source);
|
|
115361
|
+
const toolCallIndex = toolCallMatch?.index ?? 0;
|
|
115362
|
+
const toolLine = toolCallIndex > 0 ? source.slice(0, toolCallIndex).split("\n").length : route.line ?? 0;
|
|
115363
|
+
const annotations = parseAnnotationsFlexible(source, toolCallIndex) ?? parseAnnotations(source, toolCallIndex);
|
|
115364
|
+
let handlerCalls = [];
|
|
115365
|
+
let handlerResolved = false;
|
|
115366
|
+
if (toolCallIndex > 0) {
|
|
115367
|
+
const scanResult = scanInlineHandler(source, toolCallIndex, fileRow.path, store, projectRoot, depth);
|
|
115368
|
+
handlerCalls = scanResult.calls;
|
|
115369
|
+
handlerResolved = handlerCalls.length > 0 || scanResult.calledSymbolIds.length > 0;
|
|
115370
|
+
}
|
|
115371
|
+
const seen = /* @__PURE__ */ new Set();
|
|
115372
|
+
handlerCalls = handlerCalls.filter((c) => {
|
|
115373
|
+
const key = `${c.function}:${c.file}:${c.line}`;
|
|
115374
|
+
if (seen.has(key)) return false;
|
|
115375
|
+
seen.add(key);
|
|
115376
|
+
return true;
|
|
115377
|
+
});
|
|
115378
|
+
for (const call of handlerCalls) {
|
|
115379
|
+
if (!capabilityMap[call.file]) capabilityMap[call.file] = /* @__PURE__ */ new Set();
|
|
115380
|
+
capabilityMap[call.file].add(call.category);
|
|
115381
|
+
}
|
|
115382
|
+
toolRegistrations.push({
|
|
115383
|
+
name: route.uri,
|
|
115384
|
+
description: route.name ?? null,
|
|
115385
|
+
file: fileRow.path,
|
|
115386
|
+
line: toolLine,
|
|
115387
|
+
annotations,
|
|
115388
|
+
handler_resolved: handlerResolved,
|
|
115389
|
+
handler_calls: handlerCalls
|
|
115390
|
+
});
|
|
115391
|
+
}
|
|
115392
|
+
const sensitiveFlows = [];
|
|
115393
|
+
const mcpServerFiles = new Set(toolRegistrations.map((t) => t.file));
|
|
115394
|
+
if (mcpServerFiles.size > 0) {
|
|
115395
|
+
const mcpDirs = [...new Set([...mcpServerFiles].map((f) => path107.dirname(f)))];
|
|
115396
|
+
for (const dir of mcpDirs) {
|
|
115397
|
+
const taintResult = taintAnalysis(store, projectRoot, {
|
|
115398
|
+
scope: dir,
|
|
115399
|
+
sources: ["env", "file_read"],
|
|
115400
|
+
includeSanitized: false,
|
|
115401
|
+
limit: 50
|
|
115402
|
+
});
|
|
115403
|
+
if (taintResult.isOk()) {
|
|
115404
|
+
for (const flow of taintResult.value.flows) {
|
|
115405
|
+
sensitiveFlows.push({
|
|
115406
|
+
source: {
|
|
115407
|
+
kind: flow.source.kind,
|
|
115408
|
+
name: flow.source.variable,
|
|
115409
|
+
file: flow.file,
|
|
115410
|
+
line: flow.source.line
|
|
115411
|
+
},
|
|
115412
|
+
sink: {
|
|
115413
|
+
kind: flow.sink.kind,
|
|
115414
|
+
file: flow.file,
|
|
115415
|
+
line: flow.sink.line
|
|
115416
|
+
},
|
|
115417
|
+
hops: flow.path.map((step) => `${flow.file}:${step.line}`)
|
|
115418
|
+
});
|
|
115419
|
+
}
|
|
115420
|
+
}
|
|
115421
|
+
}
|
|
115422
|
+
}
|
|
115423
|
+
const capabilityMapOutput = {};
|
|
115424
|
+
for (const [file, categories] of Object.entries(capabilityMap)) {
|
|
115425
|
+
capabilityMapOutput[file] = [...categories].sort();
|
|
115426
|
+
}
|
|
115427
|
+
return ok72({
|
|
115428
|
+
$schema: "https://skill-scan.dev/schemas/enrichment/v1.json",
|
|
115429
|
+
version: "1",
|
|
115430
|
+
generator: `trace-mcp/${PKG_VERSION}`,
|
|
115431
|
+
generated_at: (/* @__PURE__ */ new Date()).toISOString(),
|
|
115432
|
+
tool_registrations: toolRegistrations,
|
|
115433
|
+
sensitive_flows: sensitiveFlows,
|
|
115434
|
+
capability_map: capabilityMapOutput,
|
|
115435
|
+
warnings
|
|
115436
|
+
});
|
|
115437
|
+
}
|
|
115438
|
+
|
|
115041
115439
|
// src/tools/register/quality.ts
|
|
115042
115440
|
function registerQualityTools(server, ctx) {
|
|
115043
115441
|
const { store, registry, config, projectRoot, j: j3 } = ctx;
|
|
115044
|
-
server.tool("get_co_changes", "Find files that frequently change together in git history (temporal coupling).", { file: z11.string().min(1).max(512).describe("File path to analyze"), min_confidence: z11.number().min(0).max(1).optional().describe("Minimum confidence threshold (default 0.3)"), min_count: z11.number().int().min(1).optional().describe("Minimum co-change count (default 3)"), window_days: z11.number().int().min(1).max(730).optional().describe("Git history window in days (default 180)"), limit: z11.number().int().min(1).max(100).optional().describe("Max results (default 20)") }, async ({ file, min_confidence, min_count, window_days, limit: lim }) => {
|
|
115442
|
+
server.tool("get_co_changes", "Find files that frequently change together in git history (temporal coupling). Requires git. Use to discover hidden dependencies between files. For cross-module co-change anomalies use detect_drift instead. Read-only. Returns JSON: { file, coChanges: [{ file, confidence, count }] }.", { file: z11.string().min(1).max(512).describe("File path to analyze"), min_confidence: z11.number().min(0).max(1).optional().describe("Minimum confidence threshold (default 0.3)"), min_count: z11.number().int().min(1).optional().describe("Minimum co-change count (default 3)"), window_days: z11.number().int().min(1).max(730).optional().describe("Git history window in days (default 180)"), limit: z11.number().int().min(1).max(100).optional().describe("Max results (default 20)") }, async ({ file, min_confidence, min_count, window_days, limit: lim }) => {
|
|
115045
115443
|
const result = getCoChanges(store, { file, minConfidence: min_confidence, minCount: min_count, windowDays: window_days, limit: lim });
|
|
115046
115444
|
if (result.isErr()) return { content: [{ type: "text", text: j3(formatToolError(result.error)) }], isError: true };
|
|
115047
115445
|
return { content: [{ type: "text", text: j3(result.value) }] };
|
|
115048
115446
|
});
|
|
115049
|
-
server.tool("refresh_co_changes", "Rebuild co-change index from git history.", { window_days: z11.number().int().min(1).max(730).optional().describe("Git history window in days (default 180)") }, async ({ window_days }) => {
|
|
115447
|
+
server.tool("refresh_co_changes", "Rebuild co-change index from git history. Mutates the co-change index; idempotent. Use after significant git history changes. Returns JSON: { status, pairs_stored, window_days }.", { window_days: z11.number().int().min(1).max(730).optional().describe("Git history window in days (default 180)") }, async ({ window_days }) => {
|
|
115050
115448
|
const days = window_days ?? 180;
|
|
115051
115449
|
const pairs = collectCoChanges(projectRoot, days);
|
|
115052
115450
|
const count2 = persistCoChanges(store, pairs, projectRoot, days);
|
|
115053
115451
|
return { content: [{ type: "text", text: j3({ status: "completed", pairs_stored: count2, window_days: days }) }] };
|
|
115054
115452
|
});
|
|
115055
|
-
server.tool("get_changed_symbols", 'Map a git diff to affected symbols (functions, classes, methods). For PR review. If "since" is omitted, auto-detects main/master as the base.', { since: z11.string().min(1).max(256).optional().describe("Git ref to compare from (SHA, branch, tag). If omitted, auto-detects main/master merge-base"), until: z11.string().max(256).optional().describe("Git ref to compare to (default: HEAD)"), include_blast_radius: z11.boolean().optional().describe("Include blast radius for each changed symbol (default false)"), max_blast_depth: z11.number().int().min(1).max(10).optional().describe("Max blast radius traversal depth (default 3)") }, async ({ since, until, include_blast_radius, max_blast_depth }) => {
|
|
115453
|
+
server.tool("get_changed_symbols", 'Map a git diff to affected symbols (functions, classes, methods). For PR review. If "since" is omitted, auto-detects main/master as the base. Requires git. Use for PR review to see which symbols changed. For full branch comparison with risk assessment use compare_branches instead. Read-only. Returns JSON: { changes: [{ symbol_id, name, kind, file, changeType }], total }.', { since: z11.string().min(1).max(256).optional().describe("Git ref to compare from (SHA, branch, tag). If omitted, auto-detects main/master merge-base"), until: z11.string().max(256).optional().describe("Git ref to compare to (default: HEAD)"), include_blast_radius: z11.boolean().optional().describe("Include blast radius for each changed symbol (default false)"), max_blast_depth: z11.number().int().min(1).max(10).optional().describe("Max blast radius traversal depth (default 3)") }, async ({ since, until, include_blast_radius, max_blast_depth }) => {
|
|
115056
115454
|
const result = getChangedSymbols(store, projectRoot, { since, until, includeBlastRadius: include_blast_radius, maxBlastDepth: max_blast_depth, defaultBaseBranch: config.git?.defaultBaseBranch });
|
|
115057
115455
|
if (result.isErr()) return { content: [{ type: "text", text: j3(formatToolError(result.error)) }], isError: true };
|
|
115058
115456
|
return { content: [{ type: "text", text: j3(result.value) }] };
|
|
115059
115457
|
});
|
|
115060
115458
|
server.tool(
|
|
115061
115459
|
"compare_branches",
|
|
115062
|
-
"Compare two branches at symbol level: what was added, modified, removed. Resolves merge-base automatically, groups by category/file/risk, includes blast radius and risk assessment.",
|
|
115460
|
+
"Compare two branches at symbol level: what was added, modified, removed. Resolves merge-base automatically, groups by category/file/risk, includes blast radius and risk assessment. Requires git. Use for comprehensive PR comparison. For a quick list of changed symbols without risk analysis use get_changed_symbols instead. Read-only. Returns JSON: { branch, base, mergeBase, changes: [{ symbol_id, category, risk }], summary }.",
|
|
115063
115461
|
{
|
|
115064
115462
|
branch: z11.string().min(1).max(256).describe('Branch to compare (e.g. "feature/payments")'),
|
|
115065
115463
|
base: z11.string().max(256).optional().describe('Base branch (default: "main")'),
|
|
@@ -115080,28 +115478,28 @@ function registerQualityTools(server, ctx) {
|
|
|
115080
115478
|
return { content: [{ type: "text", text: j3(result.value) }] };
|
|
115081
115479
|
}
|
|
115082
115480
|
);
|
|
115083
|
-
server.tool("detect_communities", "Run Leiden community detection on the file dependency graph. Identifies tightly-coupled file clusters (modules).", { resolution: z11.number().min(0.1).max(5).optional().describe("Resolution parameter \u2014 higher values produce more communities (default 1.0)") }, async ({ resolution }) => {
|
|
115481
|
+
server.tool("detect_communities", "Run Leiden community detection on the file dependency graph. Identifies tightly-coupled file clusters (modules). Mutates the community index (stores results); idempotent. Use before get_communities or get_community. Returns JSON: { communities: [{ id, files, size }], modularity }.", { resolution: z11.number().min(0.1).max(5).optional().describe("Resolution parameter \u2014 higher values produce more communities (default 1.0)") }, async ({ resolution }) => {
|
|
115084
115482
|
const result = detectCommunities2(store, resolution ?? 1);
|
|
115085
115483
|
if (result.isErr()) return { content: [{ type: "text", text: j3(formatToolError(result.error)) }], isError: true };
|
|
115086
115484
|
return { content: [{ type: "text", text: j3(result.value) }] };
|
|
115087
115485
|
});
|
|
115088
|
-
server.tool("get_communities", "Get previously detected communities (file clusters). Run detect_communities first.", {}, async () => {
|
|
115486
|
+
server.tool("get_communities", "Get previously detected communities (file clusters). Run detect_communities first. Read-only. Returns JSON: { communities: [{ id, files, size }], total }.", {}, async () => {
|
|
115089
115487
|
const result = getCommunities(store);
|
|
115090
115488
|
if (result.isErr()) return { content: [{ type: "text", text: j3(formatToolError(result.error)) }], isError: true };
|
|
115091
115489
|
return { content: [{ type: "text", text: j3(result.value) }] };
|
|
115092
115490
|
});
|
|
115093
|
-
server.tool("get_community", "Get details for a specific community: files, inter-community dependencies.", { id: z11.number().int().min(0).describe("Community ID") }, async ({ id }) => {
|
|
115491
|
+
server.tool("get_community", "Get details for a specific community: files, inter-community dependencies. Read-only. Use after detect_communities to drill into a specific cluster. Returns JSON: { id, files, interCommunityDeps }.", { id: z11.number().int().min(0).describe("Community ID") }, async ({ id }) => {
|
|
115094
115492
|
const result = getCommunityDetail(store, id);
|
|
115095
115493
|
if (result.isErr()) return { content: [{ type: "text", text: j3(formatToolError(result.error)) }], isError: true };
|
|
115096
115494
|
return { content: [{ type: "text", text: j3(result.value) }] };
|
|
115097
115495
|
});
|
|
115098
|
-
server.tool("audit_config", "Scan AI agent config files for stale references, dead paths, and token bloat.", { config_files: z11.array(z11.string().max(512)).optional().describe("Specific config files to audit (default: auto-detect)"), fix_suggestions: z11.boolean().optional().describe("Include fix suggestions (default true)") }, async ({ config_files, fix_suggestions }) => {
|
|
115496
|
+
server.tool("audit_config", "Scan AI agent config files for stale references, dead paths, and token bloat. Read-only. Use periodically to clean up CLAUDE.md and settings. Returns JSON: { issues: [{ file, type, message, suggestion }], total }.", { config_files: z11.array(z11.string().max(512)).optional().describe("Specific config files to audit (default: auto-detect)"), fix_suggestions: z11.boolean().optional().describe("Include fix suggestions (default true)") }, async ({ config_files, fix_suggestions }) => {
|
|
115099
115497
|
const result = auditConfig(store, projectRoot, { configFiles: config_files, fixSuggestions: fix_suggestions ?? true });
|
|
115100
115498
|
return { content: [{ type: "text", text: j3(result) }] };
|
|
115101
115499
|
});
|
|
115102
115500
|
server.tool(
|
|
115103
115501
|
"get_control_flow",
|
|
115104
|
-
"Build a Control Flow Graph (CFG) for a function/method: if/else branches, loops, try/catch, returns, throws. Shows logical paths through the code. Outputs Mermaid diagram, ASCII, or JSON.",
|
|
115502
|
+
"Build a Control Flow Graph (CFG) for a function/method: if/else branches, loops, try/catch, returns, throws. Shows logical paths through the code. Outputs Mermaid diagram, ASCII, or JSON. Use to understand branching logic before modifying complex functions. For call-level graph (who calls whom) use get_call_graph instead. Read-only. Returns Mermaid/ASCII/JSON: { nodes, edges, entryPoint, exitPoints }.",
|
|
115105
115503
|
{
|
|
115106
115504
|
symbol_id: z11.string().max(512).optional().describe("Symbol ID of the function/method"),
|
|
115107
115505
|
fqn: z11.string().max(512).optional().describe("Fully qualified name of the function/method"),
|
|
@@ -115123,7 +115521,7 @@ function registerQualityTools(server, ctx) {
|
|
|
115123
115521
|
);
|
|
115124
115522
|
server.tool(
|
|
115125
115523
|
"get_package_deps",
|
|
115126
|
-
"Cross-repo package dependency analysis: find which registered projects depend on a package, or what packages a project publishes. Scans package.json/composer.json/pyproject.toml across all repos in the registry.",
|
|
115524
|
+
"Cross-repo package dependency analysis: find which registered projects depend on a package, or what packages a project publishes. Scans package.json/composer.json/pyproject.toml across all repos in the registry. Use for cross-project dependency mapping. For impact of upgrading a specific package use plan_batch_change instead. Read-only. Returns JSON: { dependents, dependencies, package }.",
|
|
115127
115525
|
{
|
|
115128
115526
|
package: z11.string().max(256).optional().describe('Package name to analyze (e.g. "@myorg/shared-utils")'),
|
|
115129
115527
|
project: z11.string().max(256).optional().describe("Project name \u2014 analyze all packages it publishes"),
|
|
@@ -115140,7 +115538,7 @@ function registerQualityTools(server, ctx) {
|
|
|
115140
115538
|
);
|
|
115141
115539
|
server.tool(
|
|
115142
115540
|
"generate_docs",
|
|
115143
|
-
"Auto-generate project documentation from the code graph. Produces structured docs with architecture, API surface, data models, components, and dependency analysis.",
|
|
115541
|
+
"Auto-generate project documentation from the code graph. Produces structured docs with architecture, API surface, data models, components, and dependency analysis. Writes output file (markdown or HTML). Use when you need a comprehensive documentation snapshot. Returns JSON: { format, sections, outputPath }.",
|
|
115144
115542
|
{
|
|
115145
115543
|
scope: z11.enum(["project", "module", "directory"]).optional().describe("Scope (default: project)"),
|
|
115146
115544
|
path: z11.string().max(512).optional().describe("Path for module/directory scope"),
|
|
@@ -115160,7 +115558,7 @@ function registerQualityTools(server, ctx) {
|
|
|
115160
115558
|
);
|
|
115161
115559
|
server.tool(
|
|
115162
115560
|
"pack_context",
|
|
115163
|
-
"Pack project context into a single document for external LLMs. Intelligent selection by graph importance, fits within token budget. Better than Repomix for focused context. Strategies: most_relevant (default \u2014 feature/PageRank ranked), core_first (PageRank always wins, surfaces architecturally central code), compact (signatures only \u2014 drops source bodies, lets outlines cover much more of the repo per token).",
|
|
115561
|
+
"Pack project context into a single document for external LLMs. Intelligent selection by graph importance, fits within token budget. Better than Repomix for focused context. Strategies: most_relevant (default \u2014 feature/PageRank ranked), core_first (PageRank always wins, surfaces architecturally central code), compact (signatures only \u2014 drops source bodies, lets outlines cover much more of the repo per token). Read-only. Use when sharing project context with external tools. Returns XML/Markdown/JSON with selected code within budget.",
|
|
115164
115562
|
{
|
|
115165
115563
|
scope: z11.enum(["project", "module", "feature"]).describe("Scope: project (whole repo), module (subdirectory), feature (NL query)"),
|
|
115166
115564
|
path: z11.string().max(512).optional().describe("Subdirectory path (for module scope)"),
|
|
@@ -115190,7 +115588,7 @@ function registerQualityTools(server, ctx) {
|
|
|
115190
115588
|
);
|
|
115191
115589
|
server.tool(
|
|
115192
115590
|
"check_quality_gates",
|
|
115193
|
-
"Run configurable quality gate checks against the project. Returns pass/fail for each gate (complexity, coupling, circular imports, dead exports, tech debt, security, antipatterns, code smells). Designed for CI integration \u2014 AI can verify gates pass before committing.",
|
|
115591
|
+
"Run configurable quality gate checks against the project. Returns pass/fail for each gate (complexity, coupling, circular imports, dead exports, tech debt, security, antipatterns, code smells). Designed for CI integration \u2014 AI can verify gates pass before committing. Use before PR/commit to ensure quality standards. Read-only. Returns JSON: { passed, gates: [{ name, status, value, threshold }], summary }.",
|
|
115194
115592
|
{
|
|
115195
115593
|
scope: z11.enum(["project", "changed"]).optional().describe('Scope: "project" (all) or "changed" (git diff). Default: project'),
|
|
115196
115594
|
since: z11.string().max(128).optional().describe('Git ref for "changed" scope (e.g. "main")'),
|
|
@@ -115235,6 +115633,21 @@ function registerQualityTools(server, ctx) {
|
|
|
115235
115633
|
return { content: [{ type: "text", text: j3(report) }] };
|
|
115236
115634
|
}
|
|
115237
115635
|
);
|
|
115636
|
+
server.tool(
|
|
115637
|
+
"export_security_context",
|
|
115638
|
+
"Export security context for MCP server analysis. Generates enrichment JSON for skill-scan: tool registrations with annotations, transitive call graphs classified by security category (file_read, file_write, network_outbound, env_read, shell_exec, crypto, serialization), sensitive data flows, and per-file capability maps. Use to analyze MCP server security before installation. Read-only. Returns JSON: { tool_registrations, sensitive_flows, capability_map, warnings }.",
|
|
115639
|
+
{
|
|
115640
|
+
scope: z11.string().max(512).optional().describe("Limit analysis to directory (relative to project root)"),
|
|
115641
|
+
depth: z11.number().int().min(1).max(5).optional().describe("Call graph traversal depth (default: 3)")
|
|
115642
|
+
},
|
|
115643
|
+
async ({ scope: scope8, depth }) => {
|
|
115644
|
+
const result = exportSecurityContext(store, projectRoot, { scope: scope8, depth });
|
|
115645
|
+
if (result.isErr()) {
|
|
115646
|
+
return { content: [{ type: "text", text: j3(formatToolError(result.error)) }], isError: true };
|
|
115647
|
+
}
|
|
115648
|
+
return { content: [{ type: "text", text: j3(result.value) }] };
|
|
115649
|
+
}
|
|
115650
|
+
);
|
|
115238
115651
|
}
|
|
115239
115652
|
|
|
115240
115653
|
// src/tools/register/session.ts
|
|
@@ -115243,7 +115656,7 @@ init_project_context();
|
|
|
115243
115656
|
|
|
115244
115657
|
// src/tools/ai/ai-tools.ts
|
|
115245
115658
|
import { z as z12 } from "zod";
|
|
115246
|
-
import
|
|
115659
|
+
import path108 from "path";
|
|
115247
115660
|
function j(value) {
|
|
115248
115661
|
return JSON.stringify(value);
|
|
115249
115662
|
}
|
|
@@ -115259,7 +115672,7 @@ function symbolToContextItem(sym, file, projectRoot, score = 1) {
|
|
|
115259
115672
|
}
|
|
115260
115673
|
function readSourceSafe(filePath, byteStart, byteEnd, projectRoot, gitignored) {
|
|
115261
115674
|
try {
|
|
115262
|
-
const absPath =
|
|
115675
|
+
const absPath = path108.resolve(projectRoot, filePath);
|
|
115263
115676
|
return readByteRange(absPath, byteStart, byteEnd, gitignored);
|
|
115264
115677
|
} catch {
|
|
115265
115678
|
return null;
|
|
@@ -115576,19 +115989,19 @@ function registerAITools(server, ctx) {
|
|
|
115576
115989
|
init_logger();
|
|
115577
115990
|
init_global();
|
|
115578
115991
|
import Database6 from "better-sqlite3";
|
|
115579
|
-
import
|
|
115992
|
+
import path109 from "path";
|
|
115580
115993
|
import fs99 from "fs";
|
|
115581
115994
|
import crypto6 from "crypto";
|
|
115582
|
-
var BUNDLES_DIR =
|
|
115995
|
+
var BUNDLES_DIR = path109.join(TRACE_MCP_HOME, "bundles");
|
|
115583
115996
|
function ensureBundlesDir() {
|
|
115584
115997
|
fs99.mkdirSync(BUNDLES_DIR, { recursive: true });
|
|
115585
115998
|
}
|
|
115586
115999
|
function getBundlePath(packageName, version2) {
|
|
115587
116000
|
const safeName = packageName.replace(/[^a-zA-Z0-9._-]/g, "_");
|
|
115588
|
-
return
|
|
116001
|
+
return path109.join(BUNDLES_DIR, `${safeName}-${version2}.bundle.db`);
|
|
115589
116002
|
}
|
|
115590
116003
|
function getManifestPath() {
|
|
115591
|
-
return
|
|
116004
|
+
return path109.join(BUNDLES_DIR, "manifest.json");
|
|
115592
116005
|
}
|
|
115593
116006
|
function loadManifest() {
|
|
115594
116007
|
const p5 = getManifestPath();
|
|
@@ -115757,7 +116170,7 @@ function exportBundle(sourceDbPath, packageName, version2) {
|
|
|
115757
116170
|
const entry = {
|
|
115758
116171
|
package: packageName,
|
|
115759
116172
|
version: version2,
|
|
115760
|
-
file:
|
|
116173
|
+
file: path109.basename(bundlePath),
|
|
115761
116174
|
symbols: srcSymbols.length,
|
|
115762
116175
|
edges: srcEdges.length,
|
|
115763
116176
|
size_bytes: sizeBytes,
|
|
@@ -115782,7 +116195,7 @@ function removeBundle(packageName, version2) {
|
|
|
115782
116195
|
(b) => b.package === packageName && (!version2 || b.version === version2)
|
|
115783
116196
|
);
|
|
115784
116197
|
for (const entry of toRemove) {
|
|
115785
|
-
const fp =
|
|
116198
|
+
const fp = path109.join(BUNDLES_DIR, entry.file);
|
|
115786
116199
|
if (fs99.existsSync(fp)) fs99.unlinkSync(fp);
|
|
115787
116200
|
}
|
|
115788
116201
|
manifest.bundles = manifest.bundles.filter(
|
|
@@ -115848,8 +116261,8 @@ function searchBundles(bundles, query, opts = {}) {
|
|
|
115848
116261
|
// src/analytics/analytics-store.ts
|
|
115849
116262
|
init_global();
|
|
115850
116263
|
import Database7 from "better-sqlite3";
|
|
115851
|
-
import
|
|
115852
|
-
var ANALYTICS_DB_PATH =
|
|
116264
|
+
import path110 from "path";
|
|
116265
|
+
var ANALYTICS_DB_PATH = path110.join(TRACE_MCP_HOME, "analytics.db");
|
|
115853
116266
|
var SCHEMA_SQL = `
|
|
115854
116267
|
CREATE TABLE IF NOT EXISTS sessions (
|
|
115855
116268
|
id TEXT PRIMARY KEY,
|
|
@@ -116082,9 +116495,9 @@ var AnalyticsStore = class {
|
|
|
116082
116495
|
// src/analytics/log-parser.ts
|
|
116083
116496
|
init_logger();
|
|
116084
116497
|
import fs100 from "fs";
|
|
116085
|
-
import
|
|
116498
|
+
import path111 from "path";
|
|
116086
116499
|
import os9 from "os";
|
|
116087
|
-
var CLAUDE_PROJECTS_DIR =
|
|
116500
|
+
var CLAUDE_PROJECTS_DIR = path111.join(os9.homedir(), ".claude", "projects");
|
|
116088
116501
|
var CLAW_SESSIONS_DIR_NAME = ".claw/sessions";
|
|
116089
116502
|
function parseToolName(fullName) {
|
|
116090
116503
|
const match = fullName.match(/^mcp__([^_]+)__(.+)$/);
|
|
@@ -116161,7 +116574,7 @@ function parseSessionFile(filePath, projectPath) {
|
|
|
116161
116574
|
try {
|
|
116162
116575
|
const content = fs100.readFileSync(filePath, "utf-8");
|
|
116163
116576
|
const lines = content.split("\n").filter((l) => l.trim());
|
|
116164
|
-
const sessionId =
|
|
116577
|
+
const sessionId = path111.basename(filePath, ".jsonl");
|
|
116165
116578
|
const toolCalls = [];
|
|
116166
116579
|
const toolResults = /* @__PURE__ */ new Map();
|
|
116167
116580
|
let model = "";
|
|
@@ -116282,11 +116695,11 @@ function listProjectDirs() {
|
|
|
116282
116695
|
}));
|
|
116283
116696
|
}
|
|
116284
116697
|
function listSessionFiles(projectDirName) {
|
|
116285
|
-
const dir =
|
|
116698
|
+
const dir = path111.join(CLAUDE_PROJECTS_DIR, projectDirName);
|
|
116286
116699
|
if (!fs100.existsSync(dir)) return [];
|
|
116287
116700
|
const entries = fs100.readdirSync(dir, { withFileTypes: true });
|
|
116288
116701
|
return entries.filter((e) => e.isFile() && e.name.endsWith(".jsonl")).map((e) => {
|
|
116289
|
-
const filePath =
|
|
116702
|
+
const filePath = path111.join(dir, e.name);
|
|
116290
116703
|
const stat = fs100.statSync(filePath);
|
|
116291
116704
|
return { filePath, mtime: stat.mtimeMs };
|
|
116292
116705
|
});
|
|
@@ -116300,13 +116713,13 @@ function listAllSessions() {
|
|
|
116300
116713
|
}
|
|
116301
116714
|
const clawProjectPaths = discoverClawProjects();
|
|
116302
116715
|
for (const projectPath of clawProjectPaths) {
|
|
116303
|
-
const sessionsDir =
|
|
116716
|
+
const sessionsDir = path111.join(projectPath, CLAW_SESSIONS_DIR_NAME);
|
|
116304
116717
|
if (!fs100.existsSync(sessionsDir)) continue;
|
|
116305
116718
|
try {
|
|
116306
116719
|
const entries = fs100.readdirSync(sessionsDir, { withFileTypes: true });
|
|
116307
116720
|
for (const e of entries) {
|
|
116308
116721
|
if (!e.isFile() || !e.name.endsWith(".jsonl")) continue;
|
|
116309
|
-
const filePath =
|
|
116722
|
+
const filePath = path111.join(sessionsDir, e.name);
|
|
116310
116723
|
try {
|
|
116311
116724
|
const stat = fs100.statSync(filePath);
|
|
116312
116725
|
results.push({ filePath, projectPath, client: "claw-code", mtime: stat.mtimeMs });
|
|
@@ -116321,25 +116734,25 @@ function listAllSessions() {
|
|
|
116321
116734
|
function discoverClawProjects() {
|
|
116322
116735
|
const paths = /* @__PURE__ */ new Set();
|
|
116323
116736
|
for (const { projectPath } of listProjectDirs()) {
|
|
116324
|
-
if (fs100.existsSync(
|
|
116737
|
+
if (fs100.existsSync(path111.join(projectPath, CLAW_SESSIONS_DIR_NAME))) {
|
|
116325
116738
|
paths.add(projectPath);
|
|
116326
116739
|
}
|
|
116327
116740
|
}
|
|
116328
116741
|
const cwd = process.cwd();
|
|
116329
|
-
if (fs100.existsSync(
|
|
116742
|
+
if (fs100.existsSync(path111.join(cwd, CLAW_SESSIONS_DIR_NAME))) {
|
|
116330
116743
|
paths.add(cwd);
|
|
116331
116744
|
}
|
|
116332
116745
|
const home = os9.homedir();
|
|
116333
116746
|
const commonRoots = ["Projects", "projects", "dev", "workspace", "code", "PhpstormProjects", "WebstormProjects", "src"];
|
|
116334
116747
|
for (const root of commonRoots) {
|
|
116335
|
-
const rootDir =
|
|
116748
|
+
const rootDir = path111.join(home, root);
|
|
116336
116749
|
if (!fs100.existsSync(rootDir)) continue;
|
|
116337
116750
|
try {
|
|
116338
116751
|
const entries = fs100.readdirSync(rootDir, { withFileTypes: true });
|
|
116339
116752
|
for (const e of entries) {
|
|
116340
116753
|
if (!e.isDirectory()) continue;
|
|
116341
|
-
const projectDir =
|
|
116342
|
-
if (fs100.existsSync(
|
|
116754
|
+
const projectDir = path111.join(rootDir, e.name);
|
|
116755
|
+
if (fs100.existsSync(path111.join(projectDir, CLAW_SESSIONS_DIR_NAME))) {
|
|
116343
116756
|
paths.add(projectDir);
|
|
116344
116757
|
}
|
|
116345
116758
|
}
|
|
@@ -117045,7 +117458,7 @@ function formatBenchmarkMarkdown(result) {
|
|
|
117045
117458
|
|
|
117046
117459
|
// src/analytics/tech-detector.ts
|
|
117047
117460
|
import fs101 from "fs";
|
|
117048
|
-
import
|
|
117461
|
+
import path112 from "path";
|
|
117049
117462
|
|
|
117050
117463
|
// src/analytics/known-packages.ts
|
|
117051
117464
|
var KNOWN_PACKAGES = {
|
|
@@ -118709,7 +119122,7 @@ function detectCoverage(projectRoot, opts = {}) {
|
|
|
118709
119122
|
const manifestsFound = [];
|
|
118710
119123
|
const allDeps = [];
|
|
118711
119124
|
for (const { file, ecosystem, parser } of MANIFEST_PARSERS) {
|
|
118712
|
-
const filePath =
|
|
119125
|
+
const filePath = path112.join(projectRoot, file);
|
|
118713
119126
|
if (!fs101.existsSync(filePath)) continue;
|
|
118714
119127
|
manifestsFound.push(file);
|
|
118715
119128
|
const rawDeps = parser(filePath);
|
|
@@ -119916,7 +120329,7 @@ function registerSessionTools(server, ctx) {
|
|
|
119916
120329
|
}
|
|
119917
120330
|
server.tool(
|
|
119918
120331
|
"search_bundles",
|
|
119919
|
-
"Search pre-indexed bundles for symbols from popular libraries (React, Express, etc.). Returns symbol definitions from dependency bundles \u2014 useful for go-to-definition into node_modules/vendor. Install bundles via CLI: `trace-mcp bundles export`.",
|
|
120332
|
+
"Search pre-indexed bundles for symbols from popular libraries (React, Express, etc.). Returns symbol definitions from dependency bundles \u2014 useful for go-to-definition into node_modules/vendor. Install bundles via CLI: `trace-mcp bundles export`. For project source code search use search instead. Read-only. Returns JSON: { results: [{ name, kind, signature, bundle }], bundles_searched }.",
|
|
119920
120333
|
{
|
|
119921
120334
|
query: z14.string().min(1).max(256).describe("Symbol name or FQN to search"),
|
|
119922
120335
|
kind: z14.string().max(64).optional().describe("Filter by symbol kind (function, class, interface, etc.)"),
|
|
@@ -119934,7 +120347,7 @@ function registerSessionTools(server, ctx) {
|
|
|
119934
120347
|
);
|
|
119935
120348
|
server.tool(
|
|
119936
120349
|
"list_bundles",
|
|
119937
|
-
"List installed pre-indexed bundles for dependency libraries. Shows package name, version, symbol/edge counts, and size.",
|
|
120350
|
+
"List installed pre-indexed bundles for dependency libraries. Shows package name, version, symbol/edge counts, and size. Read-only. Returns JSON: { bundles: [{ name, version, symbols, edges, size }], total }.",
|
|
119938
120351
|
{},
|
|
119939
120352
|
async () => {
|
|
119940
120353
|
const bundles = listBundles();
|
|
@@ -119943,7 +120356,7 @@ function registerSessionTools(server, ctx) {
|
|
|
119943
120356
|
);
|
|
119944
120357
|
_originalTool(
|
|
119945
120358
|
"get_preset_info",
|
|
119946
|
-
"Show active tool preset, available presets, and which tools are registered in this session",
|
|
120359
|
+
"Show active tool preset, available presets, and which tools are registered in this session. Read-only. Returns JSON: { active_preset, registered_tools, tool_names, available_presets }.",
|
|
119947
120360
|
{},
|
|
119948
120361
|
async () => {
|
|
119949
120362
|
const presets = listPresets();
|
|
@@ -119962,7 +120375,7 @@ function registerSessionTools(server, ctx) {
|
|
|
119962
120375
|
);
|
|
119963
120376
|
_originalTool(
|
|
119964
120377
|
"get_session_analytics",
|
|
119965
|
-
"Analyze AI agent session logs: token usage, cost breakdown by tool/server, top files, models used. Parses Claude Code JSONL logs automatically.",
|
|
120378
|
+
"Analyze AI agent session logs: token usage, cost breakdown by tool/server, top files, models used. Parses Claude Code JSONL logs automatically. Read-only. For waste detection use get_optimization_report; for cost trends use get_usage_trends. Returns JSON: { sessions, tokens, cost_usd, tools, models, topFiles }.",
|
|
119966
120379
|
{
|
|
119967
120380
|
period: z14.enum(["today", "week", "month", "all"]).optional().describe("Time period (default: week)"),
|
|
119968
120381
|
session_id: z14.string().max(128).optional().describe("Specific session ID to analyze")
|
|
@@ -119983,7 +120396,7 @@ function registerSessionTools(server, ctx) {
|
|
|
119983
120396
|
);
|
|
119984
120397
|
_originalTool(
|
|
119985
120398
|
"get_optimization_report",
|
|
119986
|
-
"Detect token waste patterns in AI agent sessions: repeated file reads, Bash grep instead of search, large file reads, unused trace-mcp tools. Provides savings estimates.",
|
|
120399
|
+
"Detect token waste patterns in AI agent sessions: repeated file reads, Bash grep instead of search, large file reads, unused trace-mcp tools. Provides savings estimates. Read-only. For usage/cost overview use get_session_analytics; for A/B savings comparison use get_real_savings. Returns JSON: { patterns: [{ type, description, savings_estimate }], total_waste }.",
|
|
119987
120400
|
{
|
|
119988
120401
|
period: z14.enum(["today", "week", "month", "all"]).optional().describe("Time period (default: week)")
|
|
119989
120402
|
},
|
|
@@ -120003,7 +120416,7 @@ function registerSessionTools(server, ctx) {
|
|
|
120003
120416
|
);
|
|
120004
120417
|
server.tool(
|
|
120005
120418
|
"benchmark_project",
|
|
120006
|
-
"Synthetic token efficiency benchmark: compare raw file reads vs trace-mcp compact responses across symbol lookup, file exploration, search, and impact analysis scenarios.",
|
|
120419
|
+
"Synthetic token efficiency benchmark: compare raw file reads vs trace-mcp compact responses across symbol lookup, file exploration, search, and impact analysis scenarios. Read-only, no side effects. Use to quantify token savings. Returns JSON: { scenarios: [{ name, raw_tokens, compact_tokens, savings_pct }], summary }.",
|
|
120007
120420
|
{
|
|
120008
120421
|
queries: z14.number().int().min(1).max(50).optional().describe("Queries per scenario (default 10)"),
|
|
120009
120422
|
seed: z14.number().int().optional().describe("Random seed for reproducibility (default 42)"),
|
|
@@ -120019,7 +120432,7 @@ function registerSessionTools(server, ctx) {
|
|
|
120019
120432
|
);
|
|
120020
120433
|
_originalTool(
|
|
120021
120434
|
"get_coverage_report",
|
|
120022
|
-
"Technology profile of the project: detected frameworks/ORMs/UI libs from manifests (package.json, composer.json, etc.), which are covered by trace-mcp plugins, and coverage gaps.",
|
|
120435
|
+
"Technology profile of the project: detected frameworks/ORMs/UI libs from manifests (package.json, composer.json, etc.), which are covered by trace-mcp plugins, and coverage gaps. Read-only. Returns JSON: { detected, covered, gaps }.",
|
|
120023
120436
|
{},
|
|
120024
120437
|
async () => {
|
|
120025
120438
|
try {
|
|
@@ -120032,7 +120445,7 @@ function registerSessionTools(server, ctx) {
|
|
|
120032
120445
|
);
|
|
120033
120446
|
_originalTool(
|
|
120034
120447
|
"get_real_savings",
|
|
120035
|
-
"A/B comparison: how many tokens could be saved by using trace-mcp instead of raw Read/Bash file reads. Per-file breakdown.",
|
|
120448
|
+
"A/B comparison: how many tokens could be saved by using trace-mcp instead of raw Read/Bash file reads. Per-file breakdown. Read-only. For pattern-based waste detection use get_optimization_report instead. Returns JSON: { files: [{ file, raw_tokens, compact_tokens, savings }], total_savings }.",
|
|
120036
120449
|
{
|
|
120037
120450
|
period: z14.enum(["today", "week", "month", "all"]).optional().describe("Time period (default: week)")
|
|
120038
120451
|
},
|
|
@@ -120054,7 +120467,7 @@ function registerSessionTools(server, ctx) {
|
|
|
120054
120467
|
);
|
|
120055
120468
|
_originalTool(
|
|
120056
120469
|
"get_usage_trends",
|
|
120057
|
-
"Daily token usage time-series: sessions, tokens, estimated cost, tool calls per day. For spotting cost spikes.",
|
|
120470
|
+
"Daily token usage time-series: sessions, tokens, estimated cost, tool calls per day. For spotting cost spikes. Read-only. For detailed session breakdown use get_session_analytics instead. Returns JSON: { days, daily: [{ date, sessions, tokens, cost_usd, tool_calls }], totals }.",
|
|
120058
120471
|
{
|
|
120059
120472
|
days: z14.number().int().min(1).max(365).optional().describe("Number of days to show (default: 30)")
|
|
120060
120473
|
},
|
|
@@ -120081,7 +120494,7 @@ function registerSessionTools(server, ctx) {
|
|
|
120081
120494
|
);
|
|
120082
120495
|
_originalTool(
|
|
120083
120496
|
"get_session_stats",
|
|
120084
|
-
"Token savings stats for this session: per-tool call counts, estimated token savings, reduction percentage, dedup savings.",
|
|
120497
|
+
"Token savings stats for this session: per-tool call counts, estimated token savings, reduction percentage, dedup savings. Read-only. Returns JSON: { total_calls, total_raw_tokens, total_compact_tokens, savings_pct, dedup_saved_tokens, per_tool }.",
|
|
120085
120498
|
{},
|
|
120086
120499
|
async () => {
|
|
120087
120500
|
const stats = savings.getFullStats();
|
|
@@ -120099,7 +120512,7 @@ function registerSessionTools(server, ctx) {
|
|
|
120099
120512
|
);
|
|
120100
120513
|
server.tool(
|
|
120101
120514
|
"get_session_journal",
|
|
120102
|
-
"Session history: all tool calls made, files read, zero-result searches, and duplicate queries. Use to avoid repeating work.",
|
|
120515
|
+
"Session history: all tool calls made, files read, zero-result searches, and duplicate queries. Use to avoid repeating work. For a compact snapshot use get_session_snapshot instead. Read-only. Returns JSON: { calls, filesRead, zeroResults, duplicates }.",
|
|
120103
120516
|
{},
|
|
120104
120517
|
async () => {
|
|
120105
120518
|
const summary = journal.getSummary();
|
|
@@ -120108,7 +120521,7 @@ function registerSessionTools(server, ctx) {
|
|
|
120108
120521
|
);
|
|
120109
120522
|
server.tool(
|
|
120110
120523
|
"get_session_snapshot",
|
|
120111
|
-
"Compact session snapshot (~200 tokens) for context recovery after compaction. Returns focus files (by read count), edited files, key searches, and dead ends. Also used by the PreCompact hook to preserve session orientation automatically.",
|
|
120524
|
+
"Compact session snapshot (~200 tokens) for context recovery after compaction. Returns focus files (by read count), edited files, key searches, and dead ends. Also used by the PreCompact hook to preserve session orientation automatically. Read-only. For full journal use get_session_journal; for cross-session context use get_session_resume. Returns JSON: { focusFiles, editedFiles, keySearches, deadEnds }.",
|
|
120112
120525
|
{
|
|
120113
120526
|
max_files: z14.number().int().min(1).max(50).optional().describe("Max focus files to include (default: 10)"),
|
|
120114
120527
|
max_searches: z14.number().int().min(1).max(20).optional().describe("Max key searches to include (default: 5)"),
|
|
@@ -120127,7 +120540,7 @@ function registerSessionTools(server, ctx) {
|
|
|
120127
120540
|
);
|
|
120128
120541
|
server.tool(
|
|
120129
120542
|
"get_session_resume",
|
|
120130
|
-
"Cross-session context carryover: shows what was explored in recent past sessions (files touched, tools used, dead-end searches). Call at session start to orient yourself without re-reading files. Much cheaper than re-exploring the codebase.",
|
|
120543
|
+
"Cross-session context carryover: shows what was explored in recent past sessions (files touched, tools used, dead-end searches). Call at session start to orient yourself without re-reading files. Much cheaper than re-exploring the codebase. Read-only. For decision-aware wake-up use get_wake_up instead. Returns JSON: { sessions: [{ files, tools, deadEnds }], active_decisions }.",
|
|
120131
120544
|
{
|
|
120132
120545
|
max_sessions: z14.number().int().min(1).max(20).optional().describe("Number of past sessions to include (default: 5)")
|
|
120133
120546
|
},
|
|
@@ -120146,7 +120559,7 @@ function registerSessionTools(server, ctx) {
|
|
|
120146
120559
|
);
|
|
120147
120560
|
_originalTool(
|
|
120148
120561
|
"plan_turn",
|
|
120149
|
-
"Opening-move router for new tasks. Combines BM25/PageRank search + session journal (negative evidence + focus signals) + framework-aware insertion-point suggestions + change-risk + turn-budget advisor into ONE call. Returns verdict (exists/partial/missing/ambiguous), confidence, ranked targets with provenance, scaffold hints when missing, and recommended next tool calls. Call this FIRST on a new task to break the empty-result hallucination chain.",
|
|
120562
|
+
"Opening-move router for new tasks. Combines BM25/PageRank search + session journal (negative evidence + focus signals) + framework-aware insertion-point suggestions + change-risk + turn-budget advisor into ONE call. Returns verdict (exists/partial/missing/ambiguous), confidence, ranked targets with provenance, scaffold hints when missing, and recommended next tool calls. Call this FIRST on a new task to break the empty-result hallucination chain. Read-only. For broader task context with source code use get_task_context instead. Returns JSON: { verdict, confidence, targets, scaffoldHints, nextSteps }.",
|
|
120150
120563
|
{
|
|
120151
120564
|
task: z14.string().min(1).max(512).describe('Natural-language task description (e.g. "add a webhook endpoint for stripe payments")'),
|
|
120152
120565
|
intent: z14.enum(["bugfix", "new_feature", "refactor", "understand"]).optional().describe("Optional intent hint; auto-classified from task if omitted"),
|
|
@@ -120180,7 +120593,7 @@ function registerSessionTools(server, ctx) {
|
|
|
120180
120593
|
);
|
|
120181
120594
|
_originalTool(
|
|
120182
120595
|
"batch",
|
|
120183
|
-
"Execute multiple trace-mcp tools in a single MCP request. Returns results for all calls. Use to reduce round-trips when you need several independent queries (e.g., get_outline for 3 files, or search + get_symbol together).",
|
|
120596
|
+
"Execute multiple trace-mcp tools in a single MCP request. Returns results for all calls. Use to reduce round-trips when you need several independent queries (e.g., get_outline for 3 files, or search + get_symbol together). Read-only (delegates to other tools). Returns JSON: { batch_results: [{ tool, result }], total }.",
|
|
120184
120597
|
{
|
|
120185
120598
|
calls: z14.array(z14.object({
|
|
120186
120599
|
tool: z14.string().describe('Tool name (e.g., "get_outline", "get_symbol", "search")'),
|
|
@@ -120230,7 +120643,7 @@ import { z as z15 } from "zod";
|
|
|
120230
120643
|
|
|
120231
120644
|
// src/memory/conversation-miner.ts
|
|
120232
120645
|
import * as fs102 from "fs";
|
|
120233
|
-
import * as
|
|
120646
|
+
import * as path113 from "path";
|
|
120234
120647
|
init_logger();
|
|
120235
120648
|
var DECISION_PATTERNS = [
|
|
120236
120649
|
// Architecture decisions: "decided to", "we'll use", "going with", "chose X over Y"
|
|
@@ -120458,7 +120871,7 @@ function mineSessions(decisionStore, opts = {}) {
|
|
|
120458
120871
|
file_path: d.file_path,
|
|
120459
120872
|
tags: d.tags,
|
|
120460
120873
|
valid_from: d.timestamp,
|
|
120461
|
-
session_id:
|
|
120874
|
+
session_id: path113.basename(session.filePath, ".jsonl"),
|
|
120462
120875
|
source: "mined",
|
|
120463
120876
|
confidence: d.confidence
|
|
120464
120877
|
}));
|
|
@@ -120484,7 +120897,7 @@ function mineSessions(decisionStore, opts = {}) {
|
|
|
120484
120897
|
|
|
120485
120898
|
// src/memory/session-indexer.ts
|
|
120486
120899
|
import * as fs103 from "fs";
|
|
120487
|
-
import * as
|
|
120900
|
+
import * as path114 from "path";
|
|
120488
120901
|
init_logger();
|
|
120489
120902
|
var MAX_CHUNK_CHARS = 2e3;
|
|
120490
120903
|
var MIN_MESSAGE_CHARS = 50;
|
|
@@ -120525,7 +120938,7 @@ function truncateChunk(text) {
|
|
|
120525
120938
|
function indexSessionFile(filePath, projectPath, decisionStore) {
|
|
120526
120939
|
const content = fs103.readFileSync(filePath, "utf-8");
|
|
120527
120940
|
const lines = content.split("\n").filter((l) => l.trim());
|
|
120528
|
-
const sessionId =
|
|
120941
|
+
const sessionId = path114.basename(filePath, ".jsonl");
|
|
120529
120942
|
const chunks = [];
|
|
120530
120943
|
let chunkIndex = 0;
|
|
120531
120944
|
for (const line of lines) {
|
|
@@ -120576,7 +120989,7 @@ function indexSessions(decisionStore, opts = {}) {
|
|
|
120576
120989
|
skipped++;
|
|
120577
120990
|
continue;
|
|
120578
120991
|
}
|
|
120579
|
-
const sessionId =
|
|
120992
|
+
const sessionId = path114.basename(session.filePath, ".jsonl");
|
|
120580
120993
|
if (!opts.force && decisionStore.isSessionIndexed(sessionId)) {
|
|
120581
120994
|
skipped++;
|
|
120582
120995
|
continue;
|
|
@@ -120601,7 +121014,7 @@ function indexSessions(decisionStore, opts = {}) {
|
|
|
120601
121014
|
}
|
|
120602
121015
|
|
|
120603
121016
|
// src/memory/wake-up.ts
|
|
120604
|
-
import * as
|
|
121017
|
+
import * as path115 from "path";
|
|
120605
121018
|
function compactDecision(d) {
|
|
120606
121019
|
const entry = {
|
|
120607
121020
|
id: d.id,
|
|
@@ -120625,7 +121038,7 @@ function assembleWakeUp(decisionStore, projectRoot, opts = {}) {
|
|
|
120625
121038
|
const indexedSessions = decisionStore.getIndexedSessionIds(projectRoot);
|
|
120626
121039
|
const result = {
|
|
120627
121040
|
project: {
|
|
120628
|
-
name:
|
|
121041
|
+
name: path115.basename(projectRoot),
|
|
120629
121042
|
root: projectRoot
|
|
120630
121043
|
},
|
|
120631
121044
|
decisions: {
|
|
@@ -120669,7 +121082,7 @@ function registerMemoryTools(server, ctx) {
|
|
|
120669
121082
|
}
|
|
120670
121083
|
server.tool(
|
|
120671
121084
|
"mine_sessions",
|
|
120672
|
-
"Mine Claude Code / Claw Code session logs for architectural decisions, tech choices, bug root causes, and preferences. Extracts decision-like content using pattern matching (no LLM calls). Skips already-mined sessions unless force=true.",
|
|
121085
|
+
"Mine Claude Code / Claw Code session logs for architectural decisions, tech choices, bug root causes, and preferences. Extracts decision-like content using pattern matching (no LLM calls). Skips already-mined sessions unless force=true. Mutates the decision store; idempotent. Use to populate the decision knowledge graph. Returns JSON: { mined, decisions_extracted, sessions_processed }.",
|
|
120673
121086
|
{
|
|
120674
121087
|
project_root: z15.string().max(1024).optional().describe("Only mine sessions for this project path (default: all projects)"),
|
|
120675
121088
|
force: z15.boolean().optional().describe("Re-mine already processed sessions (default: false)"),
|
|
@@ -120686,7 +121099,7 @@ function registerMemoryTools(server, ctx) {
|
|
|
120686
121099
|
);
|
|
120687
121100
|
server.tool(
|
|
120688
121101
|
"add_decision",
|
|
120689
|
-
"Manually record an architectural decision, tech choice, preference, or convention. Links to code symbols/files and optionally to a specific subproject for code-aware memory. Decisions have temporal validity \u2014 they can be invalidated later when they become outdated.",
|
|
121102
|
+
"Manually record an architectural decision, tech choice, preference, or convention. Links to code symbols/files and optionally to a specific subproject for code-aware memory. Decisions have temporal validity \u2014 they can be invalidated later when they become outdated. Mutates the decision store (creates a new record). For automated extraction from session logs use mine_sessions instead. Returns JSON: { added: { id, title, type } }.",
|
|
120690
121103
|
{
|
|
120691
121104
|
title: z15.string().min(1).max(200).describe("Short summary of the decision"),
|
|
120692
121105
|
content: z15.string().min(1).max(5e3).describe("Full decision text \u2014 reasoning, context, tradeoffs"),
|
|
@@ -120714,7 +121127,7 @@ function registerMemoryTools(server, ctx) {
|
|
|
120714
121127
|
);
|
|
120715
121128
|
server.tool(
|
|
120716
121129
|
"query_decisions",
|
|
120717
|
-
'Query the decision knowledge graph. Filter by type, subproject, code symbol, file path, tag, or time. Returns decisions linked to code \u2014 "why was this architecture chosen?" answered with the actual decision record. Use service_name to filter by a specific subproject within the project.',
|
|
121130
|
+
'Query the decision knowledge graph. Filter by type, subproject, code symbol, file path, tag, or time. Returns decisions linked to code \u2014 "why was this architecture chosen?" answered with the actual decision record. Use service_name to filter by a specific subproject within the project. Read-only. Returns JSON: { decisions: [{ id, title, type, content, tags }], total_results }.',
|
|
120718
121131
|
{
|
|
120719
121132
|
type: z15.enum(DECISION_TYPES).optional().describe("Filter by decision type"),
|
|
120720
121133
|
service_name: z15.string().max(256).optional().describe('Filter by subproject name (e.g., "auth-api")'),
|
|
@@ -120750,14 +121163,14 @@ function registerMemoryTools(server, ctx) {
|
|
|
120750
121163
|
);
|
|
120751
121164
|
server.tool(
|
|
120752
121165
|
"invalidate_decision",
|
|
120753
|
-
"Mark a decision as no longer valid. The decision remains in the knowledge graph for historical queries but is excluded from active queries. Use when a decision is superseded or reversed.",
|
|
121166
|
+
"Mark a decision as no longer valid. The decision remains in the knowledge graph for historical queries but is excluded from active queries. Use when a decision is superseded or reversed. Mutates the decision store; idempotent. Returns JSON: { invalidated: { id, title, valid_until } }.",
|
|
120754
121167
|
{
|
|
120755
121168
|
id: z15.number().int().min(1).describe("Decision ID to invalidate"),
|
|
120756
121169
|
valid_until: z15.string().max(30).optional().describe("ISO timestamp when decision became invalid (default: now)")
|
|
120757
121170
|
},
|
|
120758
121171
|
async ({ id, valid_until }) => {
|
|
120759
|
-
const
|
|
120760
|
-
if (!
|
|
121172
|
+
const ok73 = decisionStore.invalidateDecision(id, valid_until);
|
|
121173
|
+
if (!ok73) {
|
|
120761
121174
|
return { content: [{ type: "text", text: j3({ error: `Decision ${id} not found or already invalidated` }) }], isError: true };
|
|
120762
121175
|
}
|
|
120763
121176
|
const updated = decisionStore.getDecision(id);
|
|
@@ -120766,7 +121179,7 @@ function registerMemoryTools(server, ctx) {
|
|
|
120766
121179
|
);
|
|
120767
121180
|
server.tool(
|
|
120768
121181
|
"get_decision_timeline",
|
|
120769
|
-
"Chronological timeline of decisions for a project, symbol, or file. Shows when decisions were made and invalidated \u2014 like git log but for architectural decisions.",
|
|
121182
|
+
"Chronological timeline of decisions for a project, symbol, or file. Shows when decisions were made and invalidated \u2014 like git log but for architectural decisions. Read-only. Use to review decision history. Returns JSON: { timeline: [{ id, title, type, created_at, valid_until }], count }.",
|
|
120770
121183
|
{
|
|
120771
121184
|
symbol_id: z15.string().max(512).optional().describe("Filter timeline to decisions about this symbol"),
|
|
120772
121185
|
file_path: z15.string().max(1024).optional().describe("Filter timeline to decisions about this file"),
|
|
@@ -120784,7 +121197,7 @@ function registerMemoryTools(server, ctx) {
|
|
|
120784
121197
|
);
|
|
120785
121198
|
server.tool(
|
|
120786
121199
|
"get_decision_stats",
|
|
120787
|
-
"Overview of the decision knowledge graph: total decisions, active/invalidated counts, breakdown by type and source. Shows how much institutional knowledge is captured.",
|
|
121200
|
+
"Overview of the decision knowledge graph: total decisions, active/invalidated counts, breakdown by type and source. Shows how much institutional knowledge is captured. Read-only. Returns JSON: { total, active, invalidated, by_type, by_source, sessions_mined }.",
|
|
120788
121201
|
{},
|
|
120789
121202
|
async () => {
|
|
120790
121203
|
const stats = decisionStore.getStats(projectRoot);
|
|
@@ -120796,7 +121209,7 @@ function registerMemoryTools(server, ctx) {
|
|
|
120796
121209
|
);
|
|
120797
121210
|
server.tool(
|
|
120798
121211
|
"index_sessions",
|
|
120799
|
-
'Index conversation content from Claude Code / Claw Code sessions for cross-session search. Stores chunked messages in FTS5 \u2014 enables "what did we discuss about X?" queries across all past sessions. Skips already-indexed sessions unless force=true.',
|
|
121212
|
+
'Index conversation content from Claude Code / Claw Code sessions for cross-session search. Stores chunked messages in FTS5 \u2014 enables "what did we discuss about X?" queries across all past sessions. Skips already-indexed sessions unless force=true. Mutates the session index; idempotent. Use before search_sessions. Returns JSON: { indexed, sessions_processed, chunks_stored }.',
|
|
120800
121213
|
{
|
|
120801
121214
|
project_root: z15.string().max(1024).optional().describe("Only index sessions for this project path (default: current project)"),
|
|
120802
121215
|
force: z15.boolean().optional().describe("Re-index already processed sessions (default: false)")
|
|
@@ -120811,7 +121224,7 @@ function registerMemoryTools(server, ctx) {
|
|
|
120811
121224
|
);
|
|
120812
121225
|
server.tool(
|
|
120813
121226
|
"search_sessions",
|
|
120814
|
-
'Search across all past session conversations. Finds what was discussed, decided, or debugged in previous sessions. Full-text search with porter stemming \u2014 e.g., "why did we switch to GraphQL", "auth middleware bug", "database migration approach".',
|
|
121227
|
+
'Search across all past session conversations. Finds what was discussed, decided, or debugged in previous sessions. Full-text search with porter stemming \u2014 e.g., "why did we switch to GraphQL", "auth middleware bug", "database migration approach". Requires index_sessions to be run first. Read-only. Returns JSON: { results: [{ session_id, text, score }], total_results }.',
|
|
120815
121228
|
{
|
|
120816
121229
|
query: z15.string().min(1).max(500).describe("Search query (FTS5 with porter stemming)"),
|
|
120817
121230
|
limit: z15.number().int().min(1).max(50).optional().describe("Max results (default: 20)")
|
|
@@ -120830,7 +121243,7 @@ function registerMemoryTools(server, ctx) {
|
|
|
120830
121243
|
);
|
|
120831
121244
|
server.tool(
|
|
120832
121245
|
"get_wake_up",
|
|
120833
|
-
"Compact orientation context (~300 tokens) for session start. Returns: project identity, active architectural decisions (linked to code symbols/files), and memory stats. Auto-mines sessions on first call if no decisions exist yet. Like MemPalace wake-up but code-aware \u2014 decisions are tied to the dependency graph.",
|
|
121246
|
+
"Compact orientation context (~300 tokens) for session start. Returns: project identity, active architectural decisions (linked to code symbols/files), and memory stats. Auto-mines sessions on first call if no decisions exist yet. Like MemPalace wake-up but code-aware \u2014 decisions are tied to the dependency graph. Use at session start for context recovery. For cross-session file/tool history use get_session_resume instead. Returns JSON: { project, decisions, stats }.",
|
|
120834
121247
|
{
|
|
120835
121248
|
max_decisions: z15.number().int().min(1).max(30).optional().describe("Max recent decisions to include (default: 10)"),
|
|
120836
121249
|
auto_mine: z15.boolean().optional().describe("Auto-mine sessions if decision store is empty (default: true)")
|
|
@@ -121823,7 +122236,7 @@ var DecisionStore = class {
|
|
|
121823
122236
|
};
|
|
121824
122237
|
|
|
121825
122238
|
// src/server/server.ts
|
|
121826
|
-
var
|
|
122239
|
+
var PKG_VERSION2 = true ? "1.22.0" : "0.0.0-dev";
|
|
121827
122240
|
function j2(value) {
|
|
121828
122241
|
return JSON.stringify(value, (_key, val) => val === null || val === void 0 ? void 0 : val);
|
|
121829
122242
|
}
|
|
@@ -121939,7 +122352,7 @@ function createServer2(store, registry, config, rootPath, progress) {
|
|
|
121939
122352
|
const detectedFrameworks = [...frameworkNames].join(", ") || "none";
|
|
121940
122353
|
const instructionsVerbosity = config.tools?.instructions_verbosity ?? "full";
|
|
121941
122354
|
const server = new McpServer(
|
|
121942
|
-
{ name: "trace-mcp", version:
|
|
122355
|
+
{ name: "trace-mcp", version: PKG_VERSION2 },
|
|
121943
122356
|
{ instructions: buildInstructions(detectedFrameworks, instructionsVerbosity) }
|
|
121944
122357
|
);
|
|
121945
122358
|
const savings = new SavingsTracker(projectRoot);
|
|
@@ -122168,7 +122581,7 @@ init_pipeline();
|
|
|
122168
122581
|
init_logger();
|
|
122169
122582
|
init_traceignore();
|
|
122170
122583
|
import * as parcelWatcher from "@parcel/watcher";
|
|
122171
|
-
import
|
|
122584
|
+
import path116 from "path";
|
|
122172
122585
|
var DEFAULT_DEBOUNCE_MS = 300;
|
|
122173
122586
|
var FileWatcher = class {
|
|
122174
122587
|
constructor(_setTimeout = setTimeout, _clearTimeout = clearTimeout) {
|
|
@@ -122182,17 +122595,17 @@ var FileWatcher = class {
|
|
|
122182
122595
|
pendingPaths = /* @__PURE__ */ new Set();
|
|
122183
122596
|
async start(rootPath, config, onChanges, debounceMs = DEFAULT_DEBOUNCE_MS, onDeletes) {
|
|
122184
122597
|
const traceignore = new TraceignoreMatcher(rootPath, config.ignore ?? {});
|
|
122185
|
-
const ignoreDirs = [...traceignore.getSkipDirs()].map((d) =>
|
|
122598
|
+
const ignoreDirs = [...traceignore.getSkipDirs()].map((d) => path116.join(rootPath, d));
|
|
122186
122599
|
this.subscription = await parcelWatcher.subscribe(
|
|
122187
122600
|
rootPath,
|
|
122188
|
-
async (
|
|
122189
|
-
if (
|
|
122190
|
-
logger.error({ error:
|
|
122601
|
+
async (err49, events) => {
|
|
122602
|
+
if (err49) {
|
|
122603
|
+
logger.error({ error: err49 }, "Watcher error");
|
|
122191
122604
|
return;
|
|
122192
122605
|
}
|
|
122193
122606
|
const notIgnored = (p5) => {
|
|
122194
122607
|
if (ignoreDirs.some((d) => p5.startsWith(d))) return false;
|
|
122195
|
-
const rel =
|
|
122608
|
+
const rel = path116.relative(rootPath, p5);
|
|
122196
122609
|
return !traceignore.isIgnored(rel);
|
|
122197
122610
|
};
|
|
122198
122611
|
const changed = events.filter((e) => e.type === "create" || e.type === "update").map((e) => e.path).filter(notIgnored);
|
|
@@ -122407,12 +122820,12 @@ import http from "http";
|
|
|
122407
122820
|
// src/cli/init.ts
|
|
122408
122821
|
import { Command as Command3 } from "commander";
|
|
122409
122822
|
import fs112 from "fs";
|
|
122410
|
-
import
|
|
122823
|
+
import path125 from "path";
|
|
122411
122824
|
import * as p from "@clack/prompts";
|
|
122412
122825
|
|
|
122413
122826
|
// src/init/mcp-client.ts
|
|
122414
122827
|
import fs104 from "fs";
|
|
122415
|
-
import
|
|
122828
|
+
import path117 from "path";
|
|
122416
122829
|
import os10 from "os";
|
|
122417
122830
|
var HOME3 = os10.homedir();
|
|
122418
122831
|
var GUI_CLIENTS = /* @__PURE__ */ new Set([
|
|
@@ -122426,17 +122839,17 @@ var GUI_CLIENTS = /* @__PURE__ */ new Set([
|
|
|
122426
122839
|
]);
|
|
122427
122840
|
function resolveGuiCommand() {
|
|
122428
122841
|
const SYSTEM_PATH = "/usr/local/bin:/opt/homebrew/bin:/usr/bin:/bin:/usr/sbin:/sbin";
|
|
122429
|
-
const nodeBinDir =
|
|
122842
|
+
const nodeBinDir = path117.dirname(process.execPath);
|
|
122430
122843
|
const scriptPath = process.argv[1];
|
|
122431
|
-
if (scriptPath &&
|
|
122432
|
-
const scriptBinDir =
|
|
122844
|
+
if (scriptPath && path117.isAbsolute(scriptPath) && fs104.existsSync(scriptPath)) {
|
|
122845
|
+
const scriptBinDir = path117.dirname(scriptPath);
|
|
122433
122846
|
const dirs = /* @__PURE__ */ new Set([scriptBinDir, nodeBinDir]);
|
|
122434
122847
|
return {
|
|
122435
122848
|
command: scriptPath,
|
|
122436
122849
|
env: { PATH: [...dirs].join(":") + ":" + SYSTEM_PATH }
|
|
122437
122850
|
};
|
|
122438
122851
|
}
|
|
122439
|
-
const candidate =
|
|
122852
|
+
const candidate = path117.join(nodeBinDir, "trace-mcp");
|
|
122440
122853
|
if (fs104.existsSync(candidate)) {
|
|
122441
122854
|
return {
|
|
122442
122855
|
command: candidate,
|
|
@@ -122484,8 +122897,8 @@ function configureMcpClients(clientNames, projectRoot, opts) {
|
|
|
122484
122897
|
const resolved = resolveGuiCommand();
|
|
122485
122898
|
const action = writeCodexTomlEntry(configPath2, { ...resolved, args: ["serve"], cwd: projectRoot });
|
|
122486
122899
|
results.push({ target: configPath2, action, detail: `${name} (${opts.scope})` });
|
|
122487
|
-
} catch (
|
|
122488
|
-
results.push({ target: configPath2, action: "skipped", detail: `Error: ${
|
|
122900
|
+
} catch (err49) {
|
|
122901
|
+
results.push({ target: configPath2, action: "skipped", detail: `Error: ${err49.message}` });
|
|
122489
122902
|
}
|
|
122490
122903
|
continue;
|
|
122491
122904
|
}
|
|
@@ -122516,14 +122929,14 @@ function configureMcpClients(clientNames, projectRoot, opts) {
|
|
|
122516
122929
|
try {
|
|
122517
122930
|
const action = writeJsonEntry(configPath, entry);
|
|
122518
122931
|
results.push({ target: configPath, action, detail: `${name} (${opts.scope})` });
|
|
122519
|
-
} catch (
|
|
122520
|
-
results.push({ target: configPath, action: "skipped", detail: `Error: ${
|
|
122932
|
+
} catch (err49) {
|
|
122933
|
+
results.push({ target: configPath, action: "skipped", detail: `Error: ${err49.message}` });
|
|
122521
122934
|
}
|
|
122522
122935
|
}
|
|
122523
122936
|
return results;
|
|
122524
122937
|
}
|
|
122525
122938
|
function writeJsonEntry(configPath, entry) {
|
|
122526
|
-
const dir =
|
|
122939
|
+
const dir = path117.dirname(configPath);
|
|
122527
122940
|
if (!fs104.existsSync(dir)) fs104.mkdirSync(dir, { recursive: true });
|
|
122528
122941
|
let config = {};
|
|
122529
122942
|
let isNew = true;
|
|
@@ -122542,7 +122955,7 @@ function writeJsonEntry(configPath, entry) {
|
|
|
122542
122955
|
return isNew ? "created" : "updated";
|
|
122543
122956
|
}
|
|
122544
122957
|
function writeCodexTomlEntry(configPath, entry) {
|
|
122545
|
-
const dir =
|
|
122958
|
+
const dir = path117.dirname(configPath);
|
|
122546
122959
|
if (!fs104.existsSync(dir)) fs104.mkdirSync(dir, { recursive: true });
|
|
122547
122960
|
const argsToml = entry.args.map((a) => `"${a}"`).join(", ");
|
|
122548
122961
|
const section = [
|
|
@@ -122574,21 +122987,21 @@ function writeCodexTomlEntry(configPath, entry) {
|
|
|
122574
122987
|
function getConfigPath(name, projectRoot, scope8) {
|
|
122575
122988
|
switch (name) {
|
|
122576
122989
|
case "claude-code":
|
|
122577
|
-
return scope8 === "global" ?
|
|
122990
|
+
return scope8 === "global" ? path117.join(HOME3, ".claude.json") : path117.join(projectRoot, ".mcp.json");
|
|
122578
122991
|
case "claw-code":
|
|
122579
|
-
return scope8 === "global" ?
|
|
122992
|
+
return scope8 === "global" ? path117.join(HOME3, ".claw", "settings.json") : path117.join(projectRoot, ".claw.json");
|
|
122580
122993
|
case "claude-desktop":
|
|
122581
|
-
return process.platform === "darwin" ?
|
|
122994
|
+
return process.platform === "darwin" ? path117.join(HOME3, "Library", "Application Support", "Claude", "claude_desktop_config.json") : path117.join(process.env.APPDATA ?? path117.join(HOME3, "AppData", "Roaming"), "Claude", "claude_desktop_config.json");
|
|
122582
122995
|
case "cursor":
|
|
122583
|
-
return scope8 === "global" ?
|
|
122996
|
+
return scope8 === "global" ? path117.join(HOME3, ".cursor", "mcp.json") : path117.join(projectRoot, ".cursor", "mcp.json");
|
|
122584
122997
|
case "windsurf":
|
|
122585
|
-
return scope8 === "global" ?
|
|
122998
|
+
return scope8 === "global" ? path117.join(HOME3, ".windsurf", "mcp.json") : path117.join(projectRoot, ".windsurf", "mcp.json");
|
|
122586
122999
|
case "continue":
|
|
122587
|
-
return scope8 === "global" ?
|
|
123000
|
+
return scope8 === "global" ? path117.join(HOME3, ".continue", "mcpServers", "mcp.json") : path117.join(projectRoot, ".continue", "mcpServers", "mcp.json");
|
|
122588
123001
|
case "junie":
|
|
122589
|
-
return scope8 === "global" ?
|
|
123002
|
+
return scope8 === "global" ? path117.join(HOME3, ".junie", "mcp", "mcp.json") : path117.join(projectRoot, ".junie", "mcp", "mcp.json");
|
|
122590
123003
|
case "codex":
|
|
122591
|
-
return scope8 === "global" ?
|
|
123004
|
+
return scope8 === "global" ? path117.join(HOME3, ".codex", "config.toml") : path117.join(projectRoot, ".codex", "config.toml");
|
|
122592
123005
|
case "jetbrains-ai":
|
|
122593
123006
|
return null;
|
|
122594
123007
|
// Configured through IDE Settings UI, not a file we can write
|
|
@@ -122603,7 +123016,7 @@ init_hooks();
|
|
|
122603
123016
|
|
|
122604
123017
|
// src/init/ide-rules.ts
|
|
122605
123018
|
import fs105 from "fs";
|
|
122606
|
-
import
|
|
123019
|
+
import path118 from "path";
|
|
122607
123020
|
var START_MARKER2 = "<!-- trace-mcp:start -->";
|
|
122608
123021
|
var END_MARKER2 = "<!-- trace-mcp:end -->";
|
|
122609
123022
|
var TOOL_ROUTING_POLICY = `IMPORTANT: For ANY code exploration task, ALWAYS use trace-mcp tools first. NEVER use built-in search/grep/file listing for navigating source code.
|
|
@@ -122639,9 +123052,9 @@ alwaysApply: true
|
|
|
122639
123052
|
${TOOL_ROUTING_POLICY}
|
|
122640
123053
|
`;
|
|
122641
123054
|
function installCursorRules(projectRoot, opts) {
|
|
122642
|
-
const base = opts.global ?
|
|
122643
|
-
const rulesDir =
|
|
122644
|
-
const filePath =
|
|
123055
|
+
const base = opts.global ? path118.join(process.env.HOME ?? process.env.USERPROFILE ?? "", ".cursor") : path118.join(projectRoot, ".cursor");
|
|
123056
|
+
const rulesDir = path118.join(base, "rules");
|
|
123057
|
+
const filePath = path118.join(rulesDir, "trace-mcp.mdc");
|
|
122645
123058
|
if (opts.dryRun) {
|
|
122646
123059
|
if (fs105.existsSync(filePath)) {
|
|
122647
123060
|
const content = fs105.readFileSync(filePath, "utf-8");
|
|
@@ -122670,7 +123083,7 @@ var WINDSURF_BLOCK = `${START_MARKER2}
|
|
|
122670
123083
|
${TOOL_ROUTING_POLICY}
|
|
122671
123084
|
${END_MARKER2}`;
|
|
122672
123085
|
function installWindsurfRules(projectRoot, opts) {
|
|
122673
|
-
const filePath = opts.global ?
|
|
123086
|
+
const filePath = opts.global ? path118.join(process.env.HOME ?? process.env.USERPROFILE ?? "", ".windsurfrules") : path118.join(projectRoot, ".windsurfrules");
|
|
122674
123087
|
if (opts.dryRun) {
|
|
122675
123088
|
if (!fs105.existsSync(filePath)) {
|
|
122676
123089
|
return { target: filePath, action: "skipped", detail: "Would create .windsurfrules" };
|
|
@@ -122705,18 +123118,18 @@ function escapeRegex7(s) {
|
|
|
122705
123118
|
|
|
122706
123119
|
// src/init/tweakcc.ts
|
|
122707
123120
|
import fs106 from "fs";
|
|
122708
|
-
import
|
|
123121
|
+
import path119 from "path";
|
|
122709
123122
|
import os11 from "os";
|
|
122710
123123
|
import { execSync as execSync5 } from "child_process";
|
|
122711
123124
|
function getTweakccConfigDir() {
|
|
122712
123125
|
const envDir = process.env.TWEAKCC_CONFIG_DIR?.trim();
|
|
122713
123126
|
if (envDir) return envDir;
|
|
122714
123127
|
const candidates = [
|
|
122715
|
-
|
|
122716
|
-
|
|
123128
|
+
path119.join(os11.homedir(), ".tweakcc"),
|
|
123129
|
+
path119.join(os11.homedir(), ".claude", "tweakcc")
|
|
122717
123130
|
];
|
|
122718
123131
|
const xdg = process.env.XDG_CONFIG_HOME;
|
|
122719
|
-
if (xdg) candidates.push(
|
|
123132
|
+
if (xdg) candidates.push(path119.join(xdg, "tweakcc"));
|
|
122720
123133
|
for (const dir of candidates) {
|
|
122721
123134
|
if (fs106.existsSync(dir)) return dir;
|
|
122722
123135
|
}
|
|
@@ -122725,7 +123138,7 @@ function getTweakccConfigDir() {
|
|
|
122725
123138
|
function getTweakccSystemPromptsDir() {
|
|
122726
123139
|
const configDir = getTweakccConfigDir();
|
|
122727
123140
|
if (!configDir) return null;
|
|
122728
|
-
return
|
|
123141
|
+
return path119.join(configDir, "system-prompts");
|
|
122729
123142
|
}
|
|
122730
123143
|
function isTweakccInstalled() {
|
|
122731
123144
|
try {
|
|
@@ -122741,7 +123154,7 @@ function detectTweakccPrompts() {
|
|
|
122741
123154
|
return { installed: isTweakccInstalled(), promptsDir, hasOurPrompts: false };
|
|
122742
123155
|
}
|
|
122743
123156
|
const hasOurs = PROMPT_FILES.some(
|
|
122744
|
-
(pf) => fs106.existsSync(
|
|
123157
|
+
(pf) => fs106.existsSync(path119.join(promptsDir, pf.filename))
|
|
122745
123158
|
);
|
|
122746
123159
|
return { installed: true, promptsDir, hasOurPrompts: hasOurs };
|
|
122747
123160
|
}
|
|
@@ -122874,7 +123287,7 @@ function installTweakccPrompts(opts) {
|
|
|
122874
123287
|
if (opts.dryRun) {
|
|
122875
123288
|
for (const pf of PROMPT_FILES) {
|
|
122876
123289
|
results.push({
|
|
122877
|
-
target:
|
|
123290
|
+
target: path119.join(promptsDir, pf.filename),
|
|
122878
123291
|
action: "created",
|
|
122879
123292
|
detail: `Would write ${pf.id}`
|
|
122880
123293
|
});
|
|
@@ -122889,7 +123302,7 @@ function installTweakccPrompts(opts) {
|
|
|
122889
123302
|
fs106.mkdirSync(promptsDir, { recursive: true });
|
|
122890
123303
|
let written = 0;
|
|
122891
123304
|
for (const pf of PROMPT_FILES) {
|
|
122892
|
-
const filePath =
|
|
123305
|
+
const filePath = path119.join(promptsDir, pf.filename);
|
|
122893
123306
|
const content = generateMarkdown(pf);
|
|
122894
123307
|
const existed = fs106.existsSync(filePath);
|
|
122895
123308
|
fs106.writeFileSync(filePath, content, "utf-8");
|
|
@@ -122926,7 +123339,7 @@ init_detector();
|
|
|
122926
123339
|
|
|
122927
123340
|
// src/init/conflict-detector.ts
|
|
122928
123341
|
import fs107 from "fs";
|
|
122929
|
-
import
|
|
123342
|
+
import path120 from "path";
|
|
122930
123343
|
import os12 from "os";
|
|
122931
123344
|
var HOME4 = os12.homedir();
|
|
122932
123345
|
var COMPETING_MCP_SERVERS = {
|
|
@@ -123020,9 +123433,9 @@ var COMPETING_PROJECT_FILES = [
|
|
|
123020
123433
|
{ file: ".greptile.yaml", competitor: "greptile" }
|
|
123021
123434
|
];
|
|
123022
123435
|
var COMPETING_GLOBAL_DIRS = [
|
|
123023
|
-
{ dir:
|
|
123024
|
-
{ dir:
|
|
123025
|
-
{ dir:
|
|
123436
|
+
{ dir: path120.join(HOME4, ".code-index"), competitor: "jcodemunch-mcp" },
|
|
123437
|
+
{ dir: path120.join(HOME4, ".repomix"), competitor: "repomix" },
|
|
123438
|
+
{ dir: path120.join(HOME4, ".aider.tags.cache.v3"), competitor: "aider" }
|
|
123026
123439
|
];
|
|
123027
123440
|
function detectConflicts(projectRoot) {
|
|
123028
123441
|
const conflicts = [];
|
|
@@ -123081,55 +123494,55 @@ function getMcpConfigPaths(projectRoot) {
|
|
|
123081
123494
|
const paths = [];
|
|
123082
123495
|
const platform = os12.platform();
|
|
123083
123496
|
if (projectRoot) {
|
|
123084
|
-
paths.push({ clientName: "claude-code", configPath:
|
|
123497
|
+
paths.push({ clientName: "claude-code", configPath: path120.join(projectRoot, ".mcp.json") });
|
|
123085
123498
|
}
|
|
123086
|
-
paths.push({ clientName: "claude-code", configPath:
|
|
123087
|
-
paths.push({ clientName: "claude-code", configPath:
|
|
123499
|
+
paths.push({ clientName: "claude-code", configPath: path120.join(HOME4, ".claude.json") });
|
|
123500
|
+
paths.push({ clientName: "claude-code", configPath: path120.join(HOME4, ".claude", "settings.json") });
|
|
123088
123501
|
if (projectRoot) {
|
|
123089
|
-
paths.push({ clientName: "claw-code", configPath:
|
|
123502
|
+
paths.push({ clientName: "claw-code", configPath: path120.join(projectRoot, ".claw.json") });
|
|
123090
123503
|
}
|
|
123091
|
-
paths.push({ clientName: "claw-code", configPath:
|
|
123504
|
+
paths.push({ clientName: "claw-code", configPath: path120.join(HOME4, ".claw", "settings.json") });
|
|
123092
123505
|
if (platform === "darwin") {
|
|
123093
|
-
paths.push({ clientName: "claude-desktop", configPath:
|
|
123506
|
+
paths.push({ clientName: "claude-desktop", configPath: path120.join(HOME4, "Library", "Application Support", "Claude", "claude_desktop_config.json") });
|
|
123094
123507
|
} else if (platform === "win32") {
|
|
123095
|
-
const appData = process.env.APPDATA ??
|
|
123096
|
-
paths.push({ clientName: "claude-desktop", configPath:
|
|
123508
|
+
const appData = process.env.APPDATA ?? path120.join(HOME4, "AppData", "Roaming");
|
|
123509
|
+
paths.push({ clientName: "claude-desktop", configPath: path120.join(appData, "Claude", "claude_desktop_config.json") });
|
|
123097
123510
|
}
|
|
123098
|
-
paths.push({ clientName: "cursor", configPath:
|
|
123511
|
+
paths.push({ clientName: "cursor", configPath: path120.join(HOME4, ".cursor", "mcp.json") });
|
|
123099
123512
|
if (projectRoot) {
|
|
123100
|
-
paths.push({ clientName: "cursor", configPath:
|
|
123513
|
+
paths.push({ clientName: "cursor", configPath: path120.join(projectRoot, ".cursor", "mcp.json") });
|
|
123101
123514
|
}
|
|
123102
|
-
paths.push({ clientName: "windsurf", configPath:
|
|
123515
|
+
paths.push({ clientName: "windsurf", configPath: path120.join(HOME4, ".windsurf", "mcp.json") });
|
|
123103
123516
|
if (projectRoot) {
|
|
123104
|
-
paths.push({ clientName: "windsurf", configPath:
|
|
123517
|
+
paths.push({ clientName: "windsurf", configPath: path120.join(projectRoot, ".windsurf", "mcp.json") });
|
|
123105
123518
|
}
|
|
123106
|
-
paths.push({ clientName: "continue", configPath:
|
|
123107
|
-
paths.push({ clientName: "junie", configPath:
|
|
123519
|
+
paths.push({ clientName: "continue", configPath: path120.join(HOME4, ".continue", "mcpServers", "mcp.json") });
|
|
123520
|
+
paths.push({ clientName: "junie", configPath: path120.join(HOME4, ".junie", "mcp", "mcp.json") });
|
|
123108
123521
|
if (projectRoot) {
|
|
123109
|
-
paths.push({ clientName: "junie", configPath:
|
|
123522
|
+
paths.push({ clientName: "junie", configPath: path120.join(projectRoot, ".junie", "mcp", "mcp.json") });
|
|
123110
123523
|
}
|
|
123111
123524
|
return paths;
|
|
123112
123525
|
}
|
|
123113
123526
|
function scanHooksInSettings() {
|
|
123114
123527
|
const conflicts = [];
|
|
123115
123528
|
const settingsFiles = [
|
|
123116
|
-
|
|
123117
|
-
|
|
123118
|
-
|
|
123119
|
-
|
|
123529
|
+
path120.join(HOME4, ".claude", "settings.json"),
|
|
123530
|
+
path120.join(HOME4, ".claude", "settings.local.json"),
|
|
123531
|
+
path120.join(HOME4, ".claw", "settings.json"),
|
|
123532
|
+
path120.join(HOME4, ".claw", "settings.local.json")
|
|
123120
123533
|
];
|
|
123121
|
-
const projectsDir =
|
|
123534
|
+
const projectsDir = path120.join(HOME4, ".claude", "projects");
|
|
123122
123535
|
if (fs107.existsSync(projectsDir)) {
|
|
123123
123536
|
try {
|
|
123124
123537
|
for (const entry of fs107.readdirSync(projectsDir)) {
|
|
123125
|
-
const projDir =
|
|
123538
|
+
const projDir = path120.join(projectsDir, entry);
|
|
123126
123539
|
try {
|
|
123127
123540
|
if (!fs107.statSync(projDir).isDirectory()) continue;
|
|
123128
123541
|
} catch {
|
|
123129
123542
|
continue;
|
|
123130
123543
|
}
|
|
123131
|
-
settingsFiles.push(
|
|
123132
|
-
settingsFiles.push(
|
|
123544
|
+
settingsFiles.push(path120.join(projDir, "settings.json"));
|
|
123545
|
+
settingsFiles.push(path120.join(projDir, "settings.local.json"));
|
|
123133
123546
|
}
|
|
123134
123547
|
} catch {
|
|
123135
123548
|
}
|
|
@@ -123176,8 +123589,8 @@ function scanHooksInSettings() {
|
|
|
123176
123589
|
function scanHookScriptFiles() {
|
|
123177
123590
|
const conflicts = [];
|
|
123178
123591
|
const hooksDirs = [
|
|
123179
|
-
|
|
123180
|
-
|
|
123592
|
+
path120.join(HOME4, ".claude", "hooks"),
|
|
123593
|
+
path120.join(HOME4, ".claw", "hooks")
|
|
123181
123594
|
];
|
|
123182
123595
|
for (const hooksDir of hooksDirs) {
|
|
123183
123596
|
if (!fs107.existsSync(hooksDir)) continue;
|
|
@@ -123191,7 +123604,7 @@ function scanHookScriptFiles() {
|
|
|
123191
123604
|
if (file.startsWith("trace-mcp")) continue;
|
|
123192
123605
|
for (const { pattern, competitor } of COMPETING_HOOK_PATTERNS) {
|
|
123193
123606
|
if (pattern.test(file)) {
|
|
123194
|
-
const filePath =
|
|
123607
|
+
const filePath = path120.join(hooksDir, file);
|
|
123195
123608
|
conflicts.push({
|
|
123196
123609
|
id: `hook_script:${file}:${competitor}`,
|
|
123197
123610
|
category: "hook_script",
|
|
@@ -123212,23 +123625,23 @@ function scanHookScriptFiles() {
|
|
|
123212
123625
|
function scanClaudeMdFiles(projectRoot) {
|
|
123213
123626
|
const conflicts = [];
|
|
123214
123627
|
const files = [
|
|
123215
|
-
|
|
123216
|
-
|
|
123628
|
+
path120.join(HOME4, ".claude", "CLAUDE.md"),
|
|
123629
|
+
path120.join(HOME4, ".claude", "AGENTS.md")
|
|
123217
123630
|
];
|
|
123218
|
-
const projectsDir =
|
|
123631
|
+
const projectsDir = path120.join(HOME4, ".claude", "projects");
|
|
123219
123632
|
if (fs107.existsSync(projectsDir)) {
|
|
123220
123633
|
try {
|
|
123221
123634
|
for (const entry of fs107.readdirSync(projectsDir)) {
|
|
123222
|
-
const projDir =
|
|
123635
|
+
const projDir = path120.join(projectsDir, entry);
|
|
123223
123636
|
if (!fs107.statSync(projDir).isDirectory()) continue;
|
|
123224
|
-
files.push(
|
|
123225
|
-
files.push(
|
|
123226
|
-
const memDir =
|
|
123637
|
+
files.push(path120.join(projDir, "CLAUDE.md"));
|
|
123638
|
+
files.push(path120.join(projDir, "AGENTS.md"));
|
|
123639
|
+
const memDir = path120.join(projDir, "memory");
|
|
123227
123640
|
if (fs107.existsSync(memDir)) {
|
|
123228
123641
|
try {
|
|
123229
123642
|
for (const memFile of fs107.readdirSync(memDir)) {
|
|
123230
123643
|
if (memFile.endsWith(".md") && memFile !== "MEMORY.md") {
|
|
123231
|
-
files.push(
|
|
123644
|
+
files.push(path120.join(memDir, memFile));
|
|
123232
123645
|
}
|
|
123233
123646
|
}
|
|
123234
123647
|
} catch {
|
|
@@ -123240,8 +123653,8 @@ function scanClaudeMdFiles(projectRoot) {
|
|
|
123240
123653
|
}
|
|
123241
123654
|
if (projectRoot) {
|
|
123242
123655
|
files.push(
|
|
123243
|
-
|
|
123244
|
-
|
|
123656
|
+
path120.join(projectRoot, "CLAUDE.md"),
|
|
123657
|
+
path120.join(projectRoot, "AGENTS.md")
|
|
123245
123658
|
);
|
|
123246
123659
|
}
|
|
123247
123660
|
for (const filePath of files) {
|
|
@@ -123296,35 +123709,35 @@ var COMPETITOR_ALIASES = {
|
|
|
123296
123709
|
function scanIdeRuleFiles(projectRoot) {
|
|
123297
123710
|
const conflicts = [];
|
|
123298
123711
|
const ruleFiles = [];
|
|
123299
|
-
ruleFiles.push({ path:
|
|
123300
|
-
ruleFiles.push({ path:
|
|
123712
|
+
ruleFiles.push({ path: path120.join(HOME4, ".cursorrules"), type: ".cursorrules (global)" });
|
|
123713
|
+
ruleFiles.push({ path: path120.join(HOME4, ".windsurfrules"), type: ".windsurfrules (global)" });
|
|
123301
123714
|
if (projectRoot) {
|
|
123302
|
-
ruleFiles.push({ path:
|
|
123303
|
-
ruleFiles.push({ path:
|
|
123304
|
-
ruleFiles.push({ path:
|
|
123305
|
-
ruleFiles.push({ path:
|
|
123306
|
-
ruleFiles.push({ path:
|
|
123307
|
-
const clineRulesDir =
|
|
123715
|
+
ruleFiles.push({ path: path120.join(projectRoot, ".cursorrules"), type: ".cursorrules" });
|
|
123716
|
+
ruleFiles.push({ path: path120.join(projectRoot, ".windsurfrules"), type: ".windsurfrules" });
|
|
123717
|
+
ruleFiles.push({ path: path120.join(projectRoot, ".clinerules"), type: ".clinerules" });
|
|
123718
|
+
ruleFiles.push({ path: path120.join(projectRoot, ".continuerules"), type: ".continuerules" });
|
|
123719
|
+
ruleFiles.push({ path: path120.join(projectRoot, ".github", "copilot-instructions.md"), type: "copilot-instructions.md" });
|
|
123720
|
+
const clineRulesDir = path120.join(projectRoot, ".clinerules");
|
|
123308
123721
|
if (fs107.existsSync(clineRulesDir)) {
|
|
123309
123722
|
try {
|
|
123310
123723
|
const stat = fs107.statSync(clineRulesDir);
|
|
123311
123724
|
if (stat.isDirectory()) {
|
|
123312
123725
|
for (const file of fs107.readdirSync(clineRulesDir)) {
|
|
123313
|
-
ruleFiles.push({ path:
|
|
123726
|
+
ruleFiles.push({ path: path120.join(clineRulesDir, file), type: `.clinerules/${file}` });
|
|
123314
123727
|
}
|
|
123315
123728
|
}
|
|
123316
123729
|
} catch {
|
|
123317
123730
|
}
|
|
123318
123731
|
}
|
|
123319
123732
|
}
|
|
123320
|
-
const cursorRulesDirs = [
|
|
123321
|
-
if (projectRoot) cursorRulesDirs.push(
|
|
123733
|
+
const cursorRulesDirs = [path120.join(HOME4, ".cursor", "rules")];
|
|
123734
|
+
if (projectRoot) cursorRulesDirs.push(path120.join(projectRoot, ".cursor", "rules"));
|
|
123322
123735
|
for (const rulesDir of cursorRulesDirs) {
|
|
123323
123736
|
if (!fs107.existsSync(rulesDir)) continue;
|
|
123324
123737
|
try {
|
|
123325
123738
|
for (const file of fs107.readdirSync(rulesDir)) {
|
|
123326
123739
|
if (!file.endsWith(".mdc") || file === "trace-mcp.mdc") continue;
|
|
123327
|
-
ruleFiles.push({ path:
|
|
123740
|
+
ruleFiles.push({ path: path120.join(rulesDir, file), type: `.cursor/rules/${file}` });
|
|
123328
123741
|
}
|
|
123329
123742
|
} catch {
|
|
123330
123743
|
}
|
|
@@ -123358,7 +123771,7 @@ function scanIdeRuleFiles(projectRoot) {
|
|
|
123358
123771
|
function scanProjectConfigFiles(projectRoot) {
|
|
123359
123772
|
const conflicts = [];
|
|
123360
123773
|
for (const { file, competitor } of COMPETING_PROJECT_FILES) {
|
|
123361
|
-
const filePath =
|
|
123774
|
+
const filePath = path120.join(projectRoot, file);
|
|
123362
123775
|
if (!fs107.existsSync(filePath)) continue;
|
|
123363
123776
|
conflicts.push({
|
|
123364
123777
|
id: `config:${competitor}:${file}`,
|
|
@@ -123382,7 +123795,7 @@ function scanProjectConfigDirs(projectRoot) {
|
|
|
123382
123795
|
{ dir: ".continue", competitor: "continue.dev" }
|
|
123383
123796
|
];
|
|
123384
123797
|
for (const { dir, competitor } of dirs) {
|
|
123385
|
-
const fullPath =
|
|
123798
|
+
const fullPath = path120.join(projectRoot, dir);
|
|
123386
123799
|
if (!fs107.existsSync(fullPath)) continue;
|
|
123387
123800
|
let stat;
|
|
123388
123801
|
try {
|
|
@@ -123408,18 +123821,18 @@ function scanProjectConfigDirs(projectRoot) {
|
|
|
123408
123821
|
function scanContinueConfigs(projectRoot) {
|
|
123409
123822
|
const conflicts = [];
|
|
123410
123823
|
const configPaths = [
|
|
123411
|
-
|
|
123412
|
-
|
|
123824
|
+
path120.join(HOME4, ".continue", "config.yaml"),
|
|
123825
|
+
path120.join(HOME4, ".continue", "config.json")
|
|
123413
123826
|
];
|
|
123414
123827
|
if (projectRoot) {
|
|
123415
123828
|
configPaths.push(
|
|
123416
|
-
|
|
123417
|
-
|
|
123829
|
+
path120.join(projectRoot, ".continue", "config.yaml"),
|
|
123830
|
+
path120.join(projectRoot, ".continue", "config.json")
|
|
123418
123831
|
);
|
|
123419
123832
|
}
|
|
123420
|
-
const mcpServersDirs = [
|
|
123833
|
+
const mcpServersDirs = [path120.join(HOME4, ".continue", "mcpServers")];
|
|
123421
123834
|
if (projectRoot) {
|
|
123422
|
-
mcpServersDirs.push(
|
|
123835
|
+
mcpServersDirs.push(path120.join(projectRoot, ".continue", "mcpServers"));
|
|
123423
123836
|
}
|
|
123424
123837
|
for (const mcpDir of mcpServersDirs) {
|
|
123425
123838
|
if (!fs107.existsSync(mcpDir)) continue;
|
|
@@ -123431,7 +123844,7 @@ function scanContinueConfigs(projectRoot) {
|
|
|
123431
123844
|
}
|
|
123432
123845
|
for (const file of files) {
|
|
123433
123846
|
if (!file.endsWith(".json")) continue;
|
|
123434
|
-
const filePath =
|
|
123847
|
+
const filePath = path120.join(mcpDir, file);
|
|
123435
123848
|
let content;
|
|
123436
123849
|
try {
|
|
123437
123850
|
content = fs107.readFileSync(filePath, "utf-8");
|
|
@@ -123460,11 +123873,11 @@ function scanContinueConfigs(projectRoot) {
|
|
|
123460
123873
|
}
|
|
123461
123874
|
function scanGitHooks(projectRoot) {
|
|
123462
123875
|
const conflicts = [];
|
|
123463
|
-
const hooksDir =
|
|
123876
|
+
const hooksDir = path120.join(projectRoot, ".git", "hooks");
|
|
123464
123877
|
if (!fs107.existsSync(hooksDir)) return conflicts;
|
|
123465
123878
|
const hookFiles = ["pre-commit", "post-commit", "prepare-commit-msg"];
|
|
123466
123879
|
for (const hookFile of hookFiles) {
|
|
123467
|
-
const hookPath =
|
|
123880
|
+
const hookPath = path120.join(hooksDir, hookFile);
|
|
123468
123881
|
if (!fs107.existsSync(hookPath)) continue;
|
|
123469
123882
|
let content;
|
|
123470
123883
|
try {
|
|
@@ -123521,7 +123934,7 @@ function truncate(s, maxLen) {
|
|
|
123521
123934
|
|
|
123522
123935
|
// src/init/conflict-resolver.ts
|
|
123523
123936
|
import fs108 from "fs";
|
|
123524
|
-
import
|
|
123937
|
+
import path121 from "path";
|
|
123525
123938
|
function fixConflict(conflict, opts = {}) {
|
|
123526
123939
|
if (!conflict.fixable) {
|
|
123527
123940
|
return {
|
|
@@ -123568,8 +123981,8 @@ function fixMcpServer(conflict, opts) {
|
|
|
123568
123981
|
}
|
|
123569
123982
|
fs108.writeFileSync(configPath, result);
|
|
123570
123983
|
return { conflictId: conflict.id, action: "disabled", detail: `Commented out "${serverName}" in ${shortPath2(configPath)}`, target: configPath };
|
|
123571
|
-
} catch (
|
|
123572
|
-
return { conflictId: conflict.id, action: "skipped", detail: `Failed to update config: ${
|
|
123984
|
+
} catch (err49) {
|
|
123985
|
+
return { conflictId: conflict.id, action: "skipped", detail: `Failed to update config: ${err49.message}`, target: configPath };
|
|
123573
123986
|
}
|
|
123574
123987
|
}
|
|
123575
123988
|
function commentOutJsonKey(raw, key) {
|
|
@@ -123652,8 +124065,8 @@ function fixHookInSettings(conflict, opts) {
|
|
|
123652
124065
|
}
|
|
123653
124066
|
fs108.writeFileSync(settingsPath2, JSON.stringify(settings, null, 2) + "\n");
|
|
123654
124067
|
return { conflictId: conflict.id, action: "removed", detail: `Removed ${competitor} hooks from ${shortPath2(settingsPath2)}`, target: settingsPath2 };
|
|
123655
|
-
} catch (
|
|
123656
|
-
return { conflictId: conflict.id, action: "skipped", detail: `Failed to update settings: ${
|
|
124068
|
+
} catch (err49) {
|
|
124069
|
+
return { conflictId: conflict.id, action: "skipped", detail: `Failed to update settings: ${err49.message}`, target: settingsPath2 };
|
|
123657
124070
|
}
|
|
123658
124071
|
}
|
|
123659
124072
|
function fixHookScript(conflict, opts) {
|
|
@@ -123667,8 +124080,8 @@ function fixHookScript(conflict, opts) {
|
|
|
123667
124080
|
try {
|
|
123668
124081
|
fs108.unlinkSync(scriptPath);
|
|
123669
124082
|
return { conflictId: conflict.id, action: "removed", detail: `Deleted ${shortPath2(scriptPath)}`, target: scriptPath };
|
|
123670
|
-
} catch (
|
|
123671
|
-
return { conflictId: conflict.id, action: "skipped", detail: `Failed to delete: ${
|
|
124083
|
+
} catch (err49) {
|
|
124084
|
+
return { conflictId: conflict.id, action: "skipped", detail: `Failed to delete: ${err49.message}`, target: scriptPath };
|
|
123672
124085
|
}
|
|
123673
124086
|
}
|
|
123674
124087
|
function fixClaudeMdBlock(conflict, opts) {
|
|
@@ -123699,8 +124112,8 @@ function fixClaudeMdBlock(conflict, opts) {
|
|
|
123699
124112
|
}
|
|
123700
124113
|
fs108.writeFileSync(filePath, updated);
|
|
123701
124114
|
return { conflictId: conflict.id, action: "cleaned", detail: `Removed ${conflict.competitor} content from ${shortPath2(filePath)}`, target: filePath };
|
|
123702
|
-
} catch (
|
|
123703
|
-
return { conflictId: conflict.id, action: "skipped", detail: `Failed to update: ${
|
|
124115
|
+
} catch (err49) {
|
|
124116
|
+
return { conflictId: conflict.id, action: "skipped", detail: `Failed to update: ${err49.message}`, target: filePath };
|
|
123704
124117
|
}
|
|
123705
124118
|
}
|
|
123706
124119
|
var COMPETITOR_HEADING_NAMES = {
|
|
@@ -123747,12 +124160,12 @@ function isEntirelyAboutCompetitor(content, competitor) {
|
|
|
123747
124160
|
return false;
|
|
123748
124161
|
}
|
|
123749
124162
|
function removeFromMemoryIndex(deletedFilePath) {
|
|
123750
|
-
const memoryDir =
|
|
123751
|
-
const indexPath =
|
|
124163
|
+
const memoryDir = path121.dirname(deletedFilePath);
|
|
124164
|
+
const indexPath = path121.join(memoryDir, "MEMORY.md");
|
|
123752
124165
|
if (!fs108.existsSync(indexPath)) return;
|
|
123753
124166
|
try {
|
|
123754
124167
|
const content = fs108.readFileSync(indexPath, "utf-8");
|
|
123755
|
-
const fileName =
|
|
124168
|
+
const fileName = path121.basename(deletedFilePath);
|
|
123756
124169
|
const escaped = fileName.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
123757
124170
|
const updated = content.split("\n").filter((line) => !new RegExp(`\\(${escaped}\\)`).test(line)).join("\n");
|
|
123758
124171
|
if (updated !== content) {
|
|
@@ -123777,8 +124190,8 @@ function fixConfigFile(conflict, opts) {
|
|
|
123777
124190
|
fs108.unlinkSync(filePath);
|
|
123778
124191
|
}
|
|
123779
124192
|
return { conflictId: conflict.id, action: "removed", detail: `Deleted ${shortPath2(filePath)}`, target: filePath };
|
|
123780
|
-
} catch (
|
|
123781
|
-
return { conflictId: conflict.id, action: "skipped", detail: `Failed to delete: ${
|
|
124193
|
+
} catch (err49) {
|
|
124194
|
+
return { conflictId: conflict.id, action: "skipped", detail: `Failed to delete: ${err49.message}`, target: filePath };
|
|
123782
124195
|
}
|
|
123783
124196
|
}
|
|
123784
124197
|
function fixGlobalArtifact(conflict, opts) {
|
|
@@ -123792,8 +124205,8 @@ function fixGlobalArtifact(conflict, opts) {
|
|
|
123792
124205
|
try {
|
|
123793
124206
|
fs108.rmSync(dirPath, { recursive: true, force: true });
|
|
123794
124207
|
return { conflictId: conflict.id, action: "removed", detail: `Removed ${shortPath2(dirPath)}`, target: dirPath };
|
|
123795
|
-
} catch (
|
|
123796
|
-
return { conflictId: conflict.id, action: "skipped", detail: `Failed to remove: ${
|
|
124208
|
+
} catch (err49) {
|
|
124209
|
+
return { conflictId: conflict.id, action: "skipped", detail: `Failed to remove: ${err49.message}`, target: dirPath };
|
|
123797
124210
|
}
|
|
123798
124211
|
}
|
|
123799
124212
|
function shortPath2(p5) {
|
|
@@ -124045,13 +124458,13 @@ init_schema();
|
|
|
124045
124458
|
// src/project-setup.ts
|
|
124046
124459
|
init_detector();
|
|
124047
124460
|
import fs109 from "fs";
|
|
124048
|
-
import
|
|
124461
|
+
import path122 from "path";
|
|
124049
124462
|
init_config();
|
|
124050
124463
|
init_registry2();
|
|
124051
124464
|
init_global();
|
|
124052
124465
|
init_schema();
|
|
124053
124466
|
function setupProject(projectRoot, opts) {
|
|
124054
|
-
const absRoot =
|
|
124467
|
+
const absRoot = path122.resolve(projectRoot);
|
|
124055
124468
|
const existing = getProject(absRoot);
|
|
124056
124469
|
if (existing && !opts?.force) {
|
|
124057
124470
|
return { entry: existing, detection: { languages: [], frameworks: [], packageManagers: [], rootMarkers: [] }, dbPath: existing.dbPath, migrated: false, isNew: false };
|
|
@@ -124067,7 +124480,7 @@ function setupProject(projectRoot, opts) {
|
|
|
124067
124480
|
const dbPath = getDbPath(absRoot);
|
|
124068
124481
|
let migrated = false;
|
|
124069
124482
|
if (opts?.migrateOldDb) {
|
|
124070
|
-
const oldDbPath =
|
|
124483
|
+
const oldDbPath = path122.join(absRoot, ".trace-mcp", "index.db");
|
|
124071
124484
|
if (fs109.existsSync(oldDbPath) && !fs109.existsSync(dbPath)) {
|
|
124072
124485
|
fs109.copyFileSync(oldDbPath, dbPath);
|
|
124073
124486
|
migrated = true;
|
|
@@ -124088,7 +124501,7 @@ init_pipeline();
|
|
|
124088
124501
|
|
|
124089
124502
|
// src/cli/install-app.ts
|
|
124090
124503
|
import fs110 from "fs";
|
|
124091
|
-
import
|
|
124504
|
+
import path123 from "path";
|
|
124092
124505
|
import os13 from "os";
|
|
124093
124506
|
import https2 from "https";
|
|
124094
124507
|
import { execSync as execSync6 } from "child_process";
|
|
@@ -124097,7 +124510,7 @@ var GITHUB_REPO = "nikolai-vysotskyi/trace-mcp";
|
|
|
124097
124510
|
var isMac = process.platform === "darwin";
|
|
124098
124511
|
var isWin = process.platform === "win32";
|
|
124099
124512
|
var APP_NAME = isMac ? "trace-mcp.app" : "trace-mcp";
|
|
124100
|
-
var INSTALL_DIR = isMac ?
|
|
124513
|
+
var INSTALL_DIR = isMac ? path123.join(os13.homedir(), "Applications") : path123.join(process.env.LOCALAPPDATA ?? path123.join(os13.homedir(), "AppData", "Local"), "Programs", "trace-mcp");
|
|
124101
124514
|
function sleep(ms2) {
|
|
124102
124515
|
return new Promise((resolve4) => setTimeout(resolve4, ms2));
|
|
124103
124516
|
}
|
|
@@ -124179,10 +124592,10 @@ function downloadFile(url2, dest, timeoutMs = 6e4) {
|
|
|
124179
124592
|
file.close();
|
|
124180
124593
|
resolve4();
|
|
124181
124594
|
});
|
|
124182
|
-
}).on("error", (
|
|
124595
|
+
}).on("error", (err49) => {
|
|
124183
124596
|
fs110.unlink(dest, () => {
|
|
124184
124597
|
});
|
|
124185
|
-
reject(
|
|
124598
|
+
reject(err49);
|
|
124186
124599
|
});
|
|
124187
124600
|
};
|
|
124188
124601
|
doGet(url2);
|
|
@@ -124206,20 +124619,20 @@ function pinToDock(appPath) {
|
|
|
124206
124619
|
}
|
|
124207
124620
|
function createStartMenuShortcut(exePath) {
|
|
124208
124621
|
try {
|
|
124209
|
-
const startMenuDir =
|
|
124210
|
-
process.env.APPDATA ??
|
|
124622
|
+
const startMenuDir = path123.join(
|
|
124623
|
+
process.env.APPDATA ?? path123.join(os13.homedir(), "AppData", "Roaming"),
|
|
124211
124624
|
"Microsoft",
|
|
124212
124625
|
"Windows",
|
|
124213
124626
|
"Start Menu",
|
|
124214
124627
|
"Programs"
|
|
124215
124628
|
);
|
|
124216
124629
|
fs110.mkdirSync(startMenuDir, { recursive: true });
|
|
124217
|
-
const shortcutPath =
|
|
124630
|
+
const shortcutPath = path123.join(startMenuDir, "trace-mcp.lnk");
|
|
124218
124631
|
const ps2 = `
|
|
124219
124632
|
$ws = New-Object -ComObject WScript.Shell;
|
|
124220
124633
|
$sc = $ws.CreateShortcut('${shortcutPath.replace(/'/g, "''")}');
|
|
124221
124634
|
$sc.TargetPath = '${exePath.replace(/'/g, "''")}';
|
|
124222
|
-
$sc.WorkingDirectory = '${
|
|
124635
|
+
$sc.WorkingDirectory = '${path123.dirname(exePath).replace(/'/g, "''")}';
|
|
124223
124636
|
$sc.Description = 'trace-mcp';
|
|
124224
124637
|
$sc.Save();
|
|
124225
124638
|
`.replace(/\n/g, " ");
|
|
@@ -124269,17 +124682,17 @@ async function installGuiApp(opts = {}) {
|
|
|
124269
124682
|
error: `No ${process.platform}/${arch} archive found in release ${release.tag} (${release.assets.length} assets available: ${release.assets.map((a) => a.name).join(", ") || "none"}). The build may still be in progress \u2014 try again in a few minutes with: trace-mcp install-app`
|
|
124270
124683
|
};
|
|
124271
124684
|
}
|
|
124272
|
-
const tmpDir = fs110.mkdtempSync(
|
|
124273
|
-
const archivePath =
|
|
124685
|
+
const tmpDir = fs110.mkdtempSync(path123.join(os13.tmpdir(), "trace-mcp-app-"));
|
|
124686
|
+
const archivePath = path123.join(tmpDir, asset.name);
|
|
124274
124687
|
await downloadFile(asset.url, archivePath);
|
|
124275
124688
|
fs110.mkdirSync(INSTALL_DIR, { recursive: true });
|
|
124276
124689
|
if (isMac) {
|
|
124277
|
-
const appPath =
|
|
124690
|
+
const appPath = path123.join(INSTALL_DIR, APP_NAME);
|
|
124278
124691
|
if (fs110.existsSync(appPath)) {
|
|
124279
124692
|
fs110.rmSync(appPath, { recursive: true, force: true });
|
|
124280
124693
|
}
|
|
124281
124694
|
execSync6(`unzip -q -o "${archivePath}" -d "${INSTALL_DIR}"`, { stdio: "pipe" });
|
|
124282
|
-
fs110.writeFileSync(
|
|
124695
|
+
fs110.writeFileSync(path123.join(INSTALL_DIR, ".trace-mcp-version"), release.tag, "utf-8");
|
|
124283
124696
|
fs110.rmSync(tmpDir, { recursive: true, force: true });
|
|
124284
124697
|
pinToDock(appPath);
|
|
124285
124698
|
return { installed: true, path: appPath };
|
|
@@ -124293,25 +124706,25 @@ async function installGuiApp(opts = {}) {
|
|
|
124293
124706
|
{ stdio: "pipe", timeout: 12e4 }
|
|
124294
124707
|
);
|
|
124295
124708
|
}
|
|
124296
|
-
fs110.writeFileSync(
|
|
124709
|
+
fs110.writeFileSync(path123.join(INSTALL_DIR, ".trace-mcp-version"), release.tag, "utf-8");
|
|
124297
124710
|
fs110.rmSync(tmpDir, { recursive: true, force: true });
|
|
124298
|
-
const exePath =
|
|
124711
|
+
const exePath = path123.join(INSTALL_DIR, "trace-mcp.exe");
|
|
124299
124712
|
if (fs110.existsSync(exePath)) {
|
|
124300
124713
|
createStartMenuShortcut(exePath);
|
|
124301
124714
|
}
|
|
124302
124715
|
return { installed: true, path: fs110.existsSync(exePath) ? exePath : INSTALL_DIR };
|
|
124303
|
-
} catch (
|
|
124304
|
-
return { installed: false, error:
|
|
124716
|
+
} catch (err49) {
|
|
124717
|
+
return { installed: false, error: err49.message };
|
|
124305
124718
|
}
|
|
124306
124719
|
}
|
|
124307
124720
|
function isAppInstalled() {
|
|
124308
124721
|
if (isMac) {
|
|
124309
|
-
return fs110.existsSync(
|
|
124722
|
+
return fs110.existsSync(path123.join(INSTALL_DIR, APP_NAME));
|
|
124310
124723
|
}
|
|
124311
|
-
return fs110.existsSync(
|
|
124724
|
+
return fs110.existsSync(path123.join(INSTALL_DIR, "trace-mcp.exe"));
|
|
124312
124725
|
}
|
|
124313
124726
|
function getInstalledAppVersion() {
|
|
124314
|
-
const markerPath =
|
|
124727
|
+
const markerPath = path123.join(INSTALL_DIR, ".trace-mcp-version");
|
|
124315
124728
|
if (!fs110.existsSync(markerPath)) return null;
|
|
124316
124729
|
return fs110.readFileSync(markerPath, "utf-8").trim().replace(/^v/, "");
|
|
124317
124730
|
}
|
|
@@ -124356,7 +124769,7 @@ var installAppCommand = new Command("install-app").description("Download and ins
|
|
|
124356
124769
|
init_global();
|
|
124357
124770
|
import { Command as Command2 } from "commander";
|
|
124358
124771
|
import fs111 from "fs";
|
|
124359
|
-
import
|
|
124772
|
+
import path124 from "path";
|
|
124360
124773
|
import { execSync as execSync7, spawn as spawn2 } from "child_process";
|
|
124361
124774
|
|
|
124362
124775
|
// src/daemon/client.ts
|
|
@@ -124382,7 +124795,7 @@ var PLIST_LABEL = "com.trace-mcp.server";
|
|
|
124382
124795
|
function getTraceMcpBinary() {
|
|
124383
124796
|
const argv1 = process.argv[1];
|
|
124384
124797
|
if (argv1 && fs111.existsSync(argv1)) {
|
|
124385
|
-
return
|
|
124798
|
+
return path124.resolve(argv1);
|
|
124386
124799
|
}
|
|
124387
124800
|
try {
|
|
124388
124801
|
return execSync7("which trace-mcp", { encoding: "utf-8" }).trim();
|
|
@@ -124391,7 +124804,7 @@ function getTraceMcpBinary() {
|
|
|
124391
124804
|
}
|
|
124392
124805
|
}
|
|
124393
124806
|
function resolveNodePath() {
|
|
124394
|
-
const nodeDir =
|
|
124807
|
+
const nodeDir = path124.dirname(process.execPath);
|
|
124395
124808
|
const fallback = "/usr/local/bin:/opt/homebrew/bin:/usr/bin:/bin:/usr/sbin:/sbin";
|
|
124396
124809
|
return `${nodeDir}:${fallback}`;
|
|
124397
124810
|
}
|
|
@@ -124432,7 +124845,7 @@ function generatePlist(binaryPath, port) {
|
|
|
124432
124845
|
function installPlist(port) {
|
|
124433
124846
|
const binaryPath = getTraceMcpBinary();
|
|
124434
124847
|
const plistContent = generatePlist(binaryPath, port);
|
|
124435
|
-
const plistDir =
|
|
124848
|
+
const plistDir = path124.dirname(LAUNCHD_PLIST_PATH);
|
|
124436
124849
|
if (!fs111.existsSync(plistDir)) {
|
|
124437
124850
|
fs111.mkdirSync(plistDir, { recursive: true });
|
|
124438
124851
|
}
|
|
@@ -124712,9 +125125,9 @@ var initCommand = new Command3("init").description("One-time global setup: confi
|
|
|
124712
125125
|
spin.start("Setting up trace-mcp");
|
|
124713
125126
|
try {
|
|
124714
125127
|
executeSteps(steps, { selectedClients, installHooks, installTweakcc, claudeMdScope, force: opts.force, dryRun: opts.dryRun });
|
|
124715
|
-
} catch (
|
|
125128
|
+
} catch (err49) {
|
|
124716
125129
|
spin.stop("Failed");
|
|
124717
|
-
p.log.error(`Setup failed: ${
|
|
125130
|
+
p.log.error(`Setup failed: ${err49.message}`);
|
|
124718
125131
|
process.exit(1);
|
|
124719
125132
|
}
|
|
124720
125133
|
spin.stop("Done");
|
|
@@ -124746,11 +125159,11 @@ var initCommand = new Command3("init").description("One-time global setup: confi
|
|
|
124746
125159
|
}
|
|
124747
125160
|
}
|
|
124748
125161
|
}
|
|
124749
|
-
} catch (
|
|
125162
|
+
} catch (err49) {
|
|
124750
125163
|
if (!nonInteractive) {
|
|
124751
|
-
p.log.error(`Conflict resolution failed: ${
|
|
125164
|
+
p.log.error(`Conflict resolution failed: ${err49.message}`);
|
|
124752
125165
|
} else {
|
|
124753
|
-
console.error(`Conflict resolution failed: ${
|
|
125166
|
+
console.error(`Conflict resolution failed: ${err49.message}`);
|
|
124754
125167
|
}
|
|
124755
125168
|
}
|
|
124756
125169
|
}
|
|
@@ -124816,8 +125229,8 @@ var initCommand = new Command3("init").description("One-time global setup: confi
|
|
|
124816
125229
|
});
|
|
124817
125230
|
updateLastIndexed(proj.root);
|
|
124818
125231
|
db.close();
|
|
124819
|
-
} catch (
|
|
124820
|
-
steps.push({ target: proj.root, action: "skipped", detail: `Upgrade failed: ${
|
|
125232
|
+
} catch (err49) {
|
|
125233
|
+
steps.push({ target: proj.root, action: "skipped", detail: `Upgrade failed: ${err49.message}` });
|
|
124821
125234
|
}
|
|
124822
125235
|
}
|
|
124823
125236
|
spin?.stop("Upgrade complete");
|
|
@@ -124937,7 +125350,7 @@ async function runIndexingForProject(projectRoot) {
|
|
|
124937
125350
|
}
|
|
124938
125351
|
async function registerAndIndexProject(dir, opts) {
|
|
124939
125352
|
let projectRoot = null;
|
|
124940
|
-
const resolvedDir =
|
|
125353
|
+
const resolvedDir = path125.resolve(dir);
|
|
124941
125354
|
if (hasRootMarkers(resolvedDir)) {
|
|
124942
125355
|
projectRoot = resolvedDir;
|
|
124943
125356
|
} else {
|
|
@@ -124978,7 +125391,7 @@ async function registerMultiRootProject(parentDir, childRoots, opts) {
|
|
|
124978
125391
|
return {
|
|
124979
125392
|
target: parentDir,
|
|
124980
125393
|
action: "skipped",
|
|
124981
|
-
detail: `Would register multi-root with ${childRoots.length} children: ${childRoots.map((r) =>
|
|
125394
|
+
detail: `Would register multi-root with ${childRoots.length} children: ${childRoots.map((r) => path125.basename(r)).join(", ")}`
|
|
124982
125395
|
};
|
|
124983
125396
|
}
|
|
124984
125397
|
const existing = getProject(parentDir);
|
|
@@ -124988,14 +125401,14 @@ async function registerMultiRootProject(parentDir, childRoots, opts) {
|
|
|
124988
125401
|
const allInclude = [];
|
|
124989
125402
|
const allExclude = [];
|
|
124990
125403
|
for (const childRoot of childRoots) {
|
|
124991
|
-
const relPath =
|
|
125404
|
+
const relPath = path125.relative(parentDir, childRoot).replace(/\\/g, "/");
|
|
124992
125405
|
const detection = detectProject(childRoot);
|
|
124993
125406
|
const config = generateConfig(detection);
|
|
124994
125407
|
for (const pattern of config.include) allInclude.push(`${relPath}/${pattern}`);
|
|
124995
125408
|
for (const pattern of config.exclude) allExclude.push(`${relPath}/${pattern}`);
|
|
124996
125409
|
}
|
|
124997
125410
|
const allProjects = listProjects();
|
|
124998
|
-
const parentPrefix = parentDir +
|
|
125411
|
+
const parentPrefix = parentDir + path125.sep;
|
|
124999
125412
|
for (const proj of allProjects) {
|
|
125000
125413
|
if (proj.root !== parentDir && proj.root.startsWith(parentPrefix)) {
|
|
125001
125414
|
if (fs112.existsSync(proj.dbPath)) fs112.unlinkSync(proj.dbPath);
|
|
@@ -125014,7 +125427,7 @@ async function registerMultiRootProject(parentDir, childRoots, opts) {
|
|
|
125014
125427
|
db.close();
|
|
125015
125428
|
const entry = registerProject(parentDir, { type: "multi-root", children: childRoots });
|
|
125016
125429
|
const indexResult = await runIndexingForProject(parentDir);
|
|
125017
|
-
const detail = indexResult ? `Registered and indexed multi-root: ${entry.name} (${childRoots.length} children) \u2014 ${indexResult.indexed} files in ${formatDuration(indexResult.durationMs)}` : `Registered multi-root (${childRoots.length} children): ${childRoots.map((r) =>
|
|
125430
|
+
const detail = indexResult ? `Registered and indexed multi-root: ${entry.name} (${childRoots.length} children) \u2014 ${indexResult.indexed} files in ${formatDuration(indexResult.durationMs)}` : `Registered multi-root (${childRoots.length} children): ${childRoots.map((r) => path125.basename(r)).join(", ")} (indexing failed)`;
|
|
125018
125431
|
return {
|
|
125019
125432
|
target: parentDir,
|
|
125020
125433
|
action: existing ? "updated" : "created",
|
|
@@ -125059,11 +125472,11 @@ init_registry2();
|
|
|
125059
125472
|
init_global();
|
|
125060
125473
|
import { Command as Command4 } from "commander";
|
|
125061
125474
|
import fs113 from "fs";
|
|
125062
|
-
import
|
|
125475
|
+
import path126 from "path";
|
|
125063
125476
|
var upgradeCommand = new Command4("upgrade").description("Upgrade trace-mcp: run DB migrations, reindex with latest plugins, update hooks and CLAUDE.md").argument("[dir]", "Project directory (omit to upgrade all registered projects)").option("--skip-hooks", "Do not update guard hooks").option("--skip-reindex", "Do not trigger reindex").option("--skip-claude-md", "Do not update CLAUDE.md block").option("--dry-run", "Show what would be done without writing files").option("--json", "Output results as JSON").action(async (dir, opts) => {
|
|
125064
125477
|
const projectRoots = [];
|
|
125065
125478
|
if (dir) {
|
|
125066
|
-
projectRoots.push(
|
|
125479
|
+
projectRoots.push(path126.resolve(dir));
|
|
125067
125480
|
} else {
|
|
125068
125481
|
const projects = listProjects();
|
|
125069
125482
|
if (projects.length === 0) {
|
|
@@ -125116,9 +125529,9 @@ var upgradeCommand = new Command4("upgrade").description("Upgrade trace-mcp: run
|
|
|
125116
125529
|
updateLastIndexed(projectRoot);
|
|
125117
125530
|
}
|
|
125118
125531
|
db.close();
|
|
125119
|
-
} catch (
|
|
125120
|
-
logger.error({ error:
|
|
125121
|
-
steps.push({ target: projectRoot, action: "skipped", detail: `Upgrade failed: ${
|
|
125532
|
+
} catch (err49) {
|
|
125533
|
+
logger.error({ error: err49.message, project: projectRoot }, "Upgrade failed");
|
|
125534
|
+
steps.push({ target: projectRoot, action: "skipped", detail: `Upgrade failed: ${err49.message}` });
|
|
125122
125535
|
}
|
|
125123
125536
|
} else {
|
|
125124
125537
|
steps.push({ target: dbPath, action: "skipped", detail: "Would run migrations" });
|
|
@@ -125154,7 +125567,7 @@ var upgradeCommand = new Command4("upgrade").description("Upgrade trace-mcp: run
|
|
|
125154
125567
|
console.log(header);
|
|
125155
125568
|
for (const { projectRoot, steps } of allSteps) {
|
|
125156
125569
|
console.log(`
|
|
125157
|
-
Project: ${
|
|
125570
|
+
Project: ${path126.basename(projectRoot)} (${projectRoot})`);
|
|
125158
125571
|
for (const step of steps) {
|
|
125159
125572
|
console.log(` ${step.action}: ${step.detail ?? step.target}`);
|
|
125160
125573
|
}
|
|
@@ -125166,7 +125579,7 @@ var upgradeCommand = new Command4("upgrade").description("Upgrade trace-mcp: run
|
|
|
125166
125579
|
// src/cli/add.ts
|
|
125167
125580
|
import { Command as Command5 } from "commander";
|
|
125168
125581
|
import fs114 from "fs";
|
|
125169
|
-
import
|
|
125582
|
+
import path127 from "path";
|
|
125170
125583
|
import * as p2 from "@clack/prompts";
|
|
125171
125584
|
init_detector();
|
|
125172
125585
|
init_global();
|
|
@@ -125197,9 +125610,9 @@ async function runIndexing(projectRoot, opts) {
|
|
|
125197
125610
|
const result = await pipeline2.indexAll(true);
|
|
125198
125611
|
updateLastIndexed(projectRoot);
|
|
125199
125612
|
return { indexed: result.indexed, skipped: result.skipped, errors: result.errors, durationMs: result.durationMs };
|
|
125200
|
-
} catch (
|
|
125613
|
+
} catch (err49) {
|
|
125201
125614
|
if (!opts.json) {
|
|
125202
|
-
p2.log.warn(`Indexing failed: ${
|
|
125615
|
+
p2.log.warn(`Indexing failed: ${err49.message}`);
|
|
125203
125616
|
}
|
|
125204
125617
|
return null;
|
|
125205
125618
|
} finally {
|
|
@@ -125211,7 +125624,7 @@ function formatDuration2(ms2) {
|
|
|
125211
125624
|
return `${(ms2 / 1e3).toFixed(1)}s`;
|
|
125212
125625
|
}
|
|
125213
125626
|
var addCommand = new Command5("add").description("Register a project for indexing: detect root, create DB, add to registry").argument("[dir]", "Project directory (default: current directory)", ".").option("--force", "Re-register even if already registered").option("--no-index", "Skip indexing after registration").option("--json", "Output results as JSON").action(async (dir, opts) => {
|
|
125214
|
-
const resolvedDir =
|
|
125627
|
+
const resolvedDir = path127.resolve(dir);
|
|
125215
125628
|
if (!fs114.existsSync(resolvedDir)) {
|
|
125216
125629
|
console.error(`Directory does not exist: ${resolvedDir}`);
|
|
125217
125630
|
process.exit(1);
|
|
@@ -125344,7 +125757,7 @@ async function handleMultiRoot(parentDir, childRoots, opts) {
|
|
|
125344
125757
|
p2.note(
|
|
125345
125758
|
`No project root markers in ${parentDir}
|
|
125346
125759
|
Discovered ${childRoots.length} child project(s):
|
|
125347
|
-
` + childRoots.map((r) => ` ${
|
|
125760
|
+
` + childRoots.map((r) => ` ${path127.basename(r)}`).join("\n"),
|
|
125348
125761
|
"Multi-root"
|
|
125349
125762
|
);
|
|
125350
125763
|
}
|
|
@@ -125363,7 +125776,7 @@ Discovered ${childRoots.length} child project(s):
|
|
|
125363
125776
|
const allLanguages = /* @__PURE__ */ new Set();
|
|
125364
125777
|
const allFrameworks = /* @__PURE__ */ new Set();
|
|
125365
125778
|
for (const childRoot of childRoots) {
|
|
125366
|
-
const relPath =
|
|
125779
|
+
const relPath = path127.relative(parentDir, childRoot).replace(/\\/g, "/");
|
|
125367
125780
|
const detection = detectProject(childRoot);
|
|
125368
125781
|
const config = generateConfig(detection);
|
|
125369
125782
|
for (const pattern of config.include) {
|
|
@@ -125390,13 +125803,13 @@ Discovered ${childRoots.length} child project(s):
|
|
|
125390
125803
|
const allProjects = listProjects();
|
|
125391
125804
|
const cleaned = [];
|
|
125392
125805
|
for (const proj of allProjects) {
|
|
125393
|
-
if (proj.root.startsWith(parentDir +
|
|
125806
|
+
if (proj.root.startsWith(parentDir + path127.sep) || proj.root.startsWith(parentDir + "/")) {
|
|
125394
125807
|
if (fs114.existsSync(proj.dbPath)) {
|
|
125395
125808
|
fs114.unlinkSync(proj.dbPath);
|
|
125396
125809
|
}
|
|
125397
125810
|
unregisterProject(proj.root);
|
|
125398
125811
|
removeProjectConfigJsonc(proj.root);
|
|
125399
|
-
cleaned.push(
|
|
125812
|
+
cleaned.push(path127.basename(proj.root));
|
|
125400
125813
|
}
|
|
125401
125814
|
}
|
|
125402
125815
|
if (isInteractive && cleaned.length > 0) {
|
|
@@ -125437,7 +125850,7 @@ Discovered ${childRoots.length} child project(s):
|
|
|
125437
125850
|
status: existing ? "re-registered" : "registered",
|
|
125438
125851
|
type: "multi-root",
|
|
125439
125852
|
project: entry,
|
|
125440
|
-
children: childRoots.map((r) =>
|
|
125853
|
+
children: childRoots.map((r) => path127.basename(r)),
|
|
125441
125854
|
cleaned,
|
|
125442
125855
|
indexing: indexResult ?? void 0
|
|
125443
125856
|
}, null, 2));
|
|
@@ -125446,7 +125859,7 @@ Discovered ${childRoots.length} child project(s):
|
|
|
125446
125859
|
lines.push(`Project: ${entry.name} (multi-root)`);
|
|
125447
125860
|
lines.push(`Root: ${parentDir}`);
|
|
125448
125861
|
lines.push(`DB: ${shortPath4(dbPath)}`);
|
|
125449
|
-
lines.push(`Children: ${childRoots.map((r) =>
|
|
125862
|
+
lines.push(`Children: ${childRoots.map((r) => path127.basename(r)).join(", ")}`);
|
|
125450
125863
|
if (indexResult) {
|
|
125451
125864
|
lines.push(`Indexed: ${indexResult.indexed} files (${indexResult.skipped} skipped, ${indexResult.errors} errors)`);
|
|
125452
125865
|
lines.push(`Duration: ${formatDuration2(indexResult.durationMs)}`);
|
|
@@ -125614,7 +126027,7 @@ init_global();
|
|
|
125614
126027
|
init_registry2();
|
|
125615
126028
|
import { Command as Command7 } from "commander";
|
|
125616
126029
|
import { execFileSync as execFileSync7 } from "child_process";
|
|
125617
|
-
import
|
|
126030
|
+
import path128 from "path";
|
|
125618
126031
|
import fs115 from "fs";
|
|
125619
126032
|
|
|
125620
126033
|
// src/ci/report-generator.ts
|
|
@@ -126372,8 +126785,8 @@ function writeOutput(outputPath, content) {
|
|
|
126372
126785
|
if (outputPath === "-" || !outputPath) {
|
|
126373
126786
|
process.stdout.write(content + "\n");
|
|
126374
126787
|
} else {
|
|
126375
|
-
const resolved =
|
|
126376
|
-
fs115.mkdirSync(
|
|
126788
|
+
const resolved = path128.resolve(outputPath);
|
|
126789
|
+
fs115.mkdirSync(path128.dirname(resolved), { recursive: true });
|
|
126377
126790
|
fs115.writeFileSync(resolved, content, "utf-8");
|
|
126378
126791
|
logger.info({ path: resolved }, "CI report written");
|
|
126379
126792
|
}
|
|
@@ -126686,7 +127099,7 @@ subprojectCommand.command("impact").description("Cross-repo impact analysis: who
|
|
|
126686
127099
|
|
|
126687
127100
|
// src/cli/memory.ts
|
|
126688
127101
|
import { Command as Command11 } from "commander";
|
|
126689
|
-
import * as
|
|
127102
|
+
import * as path129 from "path";
|
|
126690
127103
|
init_global();
|
|
126691
127104
|
function openStore() {
|
|
126692
127105
|
ensureGlobalDirs();
|
|
@@ -126696,7 +127109,7 @@ var memoryCommand = new Command11("memory").description("Decision memory \u2014
|
|
|
126696
127109
|
memoryCommand.command("mine").description("Mine Claude Code / Claw Code session logs for decisions").option("--project <path>", "Project root to mine (default: current directory)", process.cwd()).option("--force", "Re-mine already processed sessions").option("--min-confidence <n>", "Minimum confidence threshold (default: 0.6)", "0.6").action((opts) => {
|
|
126697
127110
|
const store = openStore();
|
|
126698
127111
|
try {
|
|
126699
|
-
const projectRoot =
|
|
127112
|
+
const projectRoot = path129.resolve(opts.project);
|
|
126700
127113
|
console.log(`Mining sessions for: ${projectRoot}`);
|
|
126701
127114
|
const result = mineSessions(store, {
|
|
126702
127115
|
projectRoot,
|
|
@@ -126718,7 +127131,7 @@ memoryCommand.command("mine").description("Mine Claude Code / Claw Code session
|
|
|
126718
127131
|
memoryCommand.command("index").description("Index session content for cross-session search").option("--project <path>", "Project root to index (default: current directory)", process.cwd()).option("--force", "Re-index already processed sessions").action((opts) => {
|
|
126719
127132
|
const store = openStore();
|
|
126720
127133
|
try {
|
|
126721
|
-
const projectRoot =
|
|
127134
|
+
const projectRoot = path129.resolve(opts.project);
|
|
126722
127135
|
console.log(`Indexing session content for: ${projectRoot}`);
|
|
126723
127136
|
const result = indexSessions(store, {
|
|
126724
127137
|
projectRoot,
|
|
@@ -126739,7 +127152,7 @@ memoryCommand.command("index").description("Index session content for cross-sess
|
|
|
126739
127152
|
memoryCommand.command("search <query>").description("Search across past session conversations").option("--project <path>", "Filter to project (default: current directory)", process.cwd()).option("--limit <n>", "Max results (default: 20)", "20").action((query, opts) => {
|
|
126740
127153
|
const store = openStore();
|
|
126741
127154
|
try {
|
|
126742
|
-
const projectRoot =
|
|
127155
|
+
const projectRoot = path129.resolve(opts.project);
|
|
126743
127156
|
const results = store.searchSessions(query, {
|
|
126744
127157
|
project_root: projectRoot,
|
|
126745
127158
|
limit: parseInt(opts.limit, 10)
|
|
@@ -126771,7 +127184,7 @@ memoryCommand.command("search <query>").description("Search across past session
|
|
|
126771
127184
|
memoryCommand.command("decisions").description("List decisions in the knowledge graph").option("--project <path>", "Filter to project (default: current directory)", process.cwd()).option("--type <type>", "Filter by type (architecture_decision, tech_choice, bug_root_cause, preference, tradeoff, discovery, convention)").option("--search <query>", "Full-text search query").option("--limit <n>", "Max results (default: 20)", "20").option("--json", "Output as JSON").action((opts) => {
|
|
126772
127185
|
const store = openStore();
|
|
126773
127186
|
try {
|
|
126774
|
-
const projectRoot =
|
|
127187
|
+
const projectRoot = path129.resolve(opts.project);
|
|
126775
127188
|
const decisions = store.queryDecisions({
|
|
126776
127189
|
project_root: projectRoot,
|
|
126777
127190
|
type: opts.type,
|
|
@@ -126803,7 +127216,7 @@ memoryCommand.command("decisions").description("List decisions in the knowledge
|
|
|
126803
127216
|
memoryCommand.command("stats").description("Show decision memory statistics").option("--project <path>", "Filter to project (default: current directory)", process.cwd()).option("--json", "Output as JSON").action((opts) => {
|
|
126804
127217
|
const store = openStore();
|
|
126805
127218
|
try {
|
|
126806
|
-
const projectRoot =
|
|
127219
|
+
const projectRoot = path129.resolve(opts.project);
|
|
126807
127220
|
const stats = store.getStats(projectRoot);
|
|
126808
127221
|
const minedCount = store.getMinedSessionCount();
|
|
126809
127222
|
const chunkCount = store.getSessionChunkCount(projectRoot);
|
|
@@ -126846,7 +127259,7 @@ memoryCommand.command("stats").description("Show decision memory statistics").op
|
|
|
126846
127259
|
memoryCommand.command("timeline").description("Show chronological timeline of decisions").option("--project <path>", "Filter to project (default: current directory)", process.cwd()).option("--file <path>", "Filter to decisions about this file").option("--symbol <id>", "Filter to decisions about this symbol").option("--limit <n>", "Max entries (default: 50)", "50").action((opts) => {
|
|
126847
127260
|
const store = openStore();
|
|
126848
127261
|
try {
|
|
126849
|
-
const projectRoot =
|
|
127262
|
+
const projectRoot = path129.resolve(opts.project);
|
|
126850
127263
|
const timeline = store.getTimeline({
|
|
126851
127264
|
project_root: projectRoot,
|
|
126852
127265
|
file_path: opts.file,
|
|
@@ -127231,13 +127644,13 @@ analyticsCommand.command("trends").description("Show daily usage trends: tokens,
|
|
|
127231
127644
|
// src/cli/remove.ts
|
|
127232
127645
|
import { Command as Command13 } from "commander";
|
|
127233
127646
|
import fs117 from "fs";
|
|
127234
|
-
import
|
|
127647
|
+
import path130 from "path";
|
|
127235
127648
|
import * as p4 from "@clack/prompts";
|
|
127236
127649
|
init_registry2();
|
|
127237
127650
|
init_config();
|
|
127238
127651
|
init_global();
|
|
127239
127652
|
var removeCommand = new Command13("remove").description("Unregister a project and delete its index").argument("[dir]", "Project directory (default: current directory)", ".").option("--force", "Remove without confirmation").option("--keep-db", "Keep the database file (only unregister)").option("--json", "Output results as JSON").action(async (dir, opts) => {
|
|
127240
|
-
const resolvedDir =
|
|
127653
|
+
const resolvedDir = path130.resolve(dir);
|
|
127241
127654
|
const isInteractive = !opts.json;
|
|
127242
127655
|
let projectRoot;
|
|
127243
127656
|
try {
|
|
@@ -127268,7 +127681,7 @@ var removeCommand = new Command13("remove").description("Unregister a project an
|
|
|
127268
127681
|
lines.push(`Root: ${entry.root}`);
|
|
127269
127682
|
lines.push(`DB: ${shortPath6(entry.dbPath)}`);
|
|
127270
127683
|
if (entry.type === "multi-root" && entry.children) {
|
|
127271
|
-
lines.push(`Children: ${entry.children.map((c) =>
|
|
127684
|
+
lines.push(`Children: ${entry.children.map((c) => path130.basename(c)).join(", ")}`);
|
|
127272
127685
|
}
|
|
127273
127686
|
p4.note(lines.join("\n"), "Project to remove");
|
|
127274
127687
|
}
|
|
@@ -127321,13 +127734,13 @@ async function handleRemoveFromMultiRoot(childRoot, parent, opts) {
|
|
|
127321
127734
|
p4.note(
|
|
127322
127735
|
`This project is part of multi-root index: ${parent.name}
|
|
127323
127736
|
Parent root: ${parent.root}
|
|
127324
|
-
Child to exclude: ${
|
|
127737
|
+
Child to exclude: ${path130.basename(childRoot)}`,
|
|
127325
127738
|
"Multi-root"
|
|
127326
127739
|
);
|
|
127327
127740
|
}
|
|
127328
127741
|
if (!opts.force && isInteractive) {
|
|
127329
127742
|
const confirm4 = await p4.confirm({
|
|
127330
|
-
message: `Exclude "${
|
|
127743
|
+
message: `Exclude "${path130.basename(childRoot)}" from multi-root "${parent.name}"? (The parent index will be re-registered without this child.)`,
|
|
127331
127744
|
initialValue: false
|
|
127332
127745
|
});
|
|
127333
127746
|
if (p4.isCancel(confirm4) || !confirm4) {
|
|
@@ -127336,7 +127749,7 @@ Child to exclude: ${path129.basename(childRoot)}`,
|
|
|
127336
127749
|
}
|
|
127337
127750
|
}
|
|
127338
127751
|
const currentChildren = parent.children ?? [];
|
|
127339
|
-
const newChildren = currentChildren.filter((c) =>
|
|
127752
|
+
const newChildren = currentChildren.filter((c) => path130.resolve(c) !== path130.resolve(childRoot));
|
|
127340
127753
|
if (newChildren.length === 0) {
|
|
127341
127754
|
if (!opts.keepDb && fs117.existsSync(parent.dbPath)) {
|
|
127342
127755
|
fs117.unlinkSync(parent.dbPath);
|
|
@@ -127367,14 +127780,14 @@ Child to exclude: ${path129.basename(childRoot)}`,
|
|
|
127367
127780
|
if (opts.json) {
|
|
127368
127781
|
console.log(JSON.stringify({
|
|
127369
127782
|
status: "excluded_from_multi_root",
|
|
127370
|
-
excluded:
|
|
127371
|
-
remaining:
|
|
127783
|
+
excluded: path130.basename(childRoot),
|
|
127784
|
+
remaining: path130.basename(remainingChild),
|
|
127372
127785
|
hint: `Run \`trace-mcp add ${remainingChild}\` to re-register the remaining project individually.`
|
|
127373
127786
|
}, null, 2));
|
|
127374
127787
|
} else {
|
|
127375
127788
|
p4.note(
|
|
127376
|
-
`Excluded: ${
|
|
127377
|
-
Only one child remaining: ${
|
|
127789
|
+
`Excluded: ${path130.basename(childRoot)}
|
|
127790
|
+
Only one child remaining: ${path130.basename(remainingChild)}
|
|
127378
127791
|
Multi-root removed. Run \`trace-mcp add ${remainingChild}\` to re-register individually.`,
|
|
127379
127792
|
"Converted"
|
|
127380
127793
|
);
|
|
@@ -127391,14 +127804,14 @@ Multi-root removed. Run \`trace-mcp add ${remainingChild}\` to re-register indiv
|
|
|
127391
127804
|
if (opts.json) {
|
|
127392
127805
|
console.log(JSON.stringify({
|
|
127393
127806
|
status: "excluded_from_multi_root",
|
|
127394
|
-
excluded:
|
|
127395
|
-
remaining: newChildren.map((c) =>
|
|
127807
|
+
excluded: path130.basename(childRoot),
|
|
127808
|
+
remaining: newChildren.map((c) => path130.basename(c)),
|
|
127396
127809
|
hint: `Run \`trace-mcp add ${parent.root}\` to re-register with ${newChildren.length} children.`
|
|
127397
127810
|
}, null, 2));
|
|
127398
127811
|
} else {
|
|
127399
127812
|
p4.note(
|
|
127400
|
-
`Excluded: ${
|
|
127401
|
-
Remaining children: ${newChildren.map((c) =>
|
|
127813
|
+
`Excluded: ${path130.basename(childRoot)}
|
|
127814
|
+
Remaining children: ${newChildren.map((c) => path130.basename(c)).join(", ")}
|
|
127402
127815
|
Run \`trace-mcp add ${parent.root}\` to re-register the multi-root.`,
|
|
127403
127816
|
"Excluded"
|
|
127404
127817
|
);
|
|
@@ -127522,7 +127935,7 @@ init_global();
|
|
|
127522
127935
|
import { execSync as execSync8 } from "child_process";
|
|
127523
127936
|
import fs119 from "fs";
|
|
127524
127937
|
import os14 from "os";
|
|
127525
|
-
import
|
|
127938
|
+
import path131 from "path";
|
|
127526
127939
|
import { Command as Command15 } from "commander";
|
|
127527
127940
|
init_registry2();
|
|
127528
127941
|
function openInBrowser(filePath) {
|
|
@@ -127554,7 +127967,7 @@ var visualizeCommand = new Command15("visualize").alias("viz").description("Gene
|
|
|
127554
127967
|
Modes:
|
|
127555
127968
|
file each node = one file (default)
|
|
127556
127969
|
symbol each node = function/class/method`, "file").option("-k, --symbol-kinds <kinds>", "comma-separated symbol kinds when granularity=symbol (e.g. function,class,method)").option("--hide-isolated", "hide nodes with no edges").option("--max-files <n>", "max seed files for file-level graph (default: 10000)").option("--max-nodes <n>", "max viz nodes for symbol-level graph (default: 100000)").option("-o, --output <path>", "write HTML to this path instead of a temp file").option("--no-open", "write HTML but do not open the browser").option("--dir <dir>", "project directory (default: cwd)").action((scope8, opts) => {
|
|
127557
|
-
const projectDir = opts.dir ?
|
|
127970
|
+
const projectDir = opts.dir ? path131.resolve(opts.dir) : process.cwd();
|
|
127558
127971
|
let projectRoot;
|
|
127559
127972
|
try {
|
|
127560
127973
|
projectRoot = findProjectRoot(projectDir);
|
|
@@ -127570,7 +127983,7 @@ var visualizeCommand = new Command15("visualize").alias("viz").description("Gene
|
|
|
127570
127983
|
console.error(`Run: trace-mcp index ${projectRoot}`);
|
|
127571
127984
|
process.exit(1);
|
|
127572
127985
|
}
|
|
127573
|
-
const outputPath = opts.output ??
|
|
127986
|
+
const outputPath = opts.output ?? path131.join(os14.tmpdir(), "trace-mcp-graph.html");
|
|
127574
127987
|
const db = initializeDatabase(dbPath);
|
|
127575
127988
|
const store = new Store(db);
|
|
127576
127989
|
let topoStore;
|
|
@@ -127606,7 +128019,7 @@ var visualizeCommand = new Command15("visualize").alias("viz").description("Gene
|
|
|
127606
128019
|
});
|
|
127607
128020
|
visualizeCommand.command("subproject").alias("sub").description("Open subproject topology graph in the browser").option("-l, --layout <type>", "graph layout: force | hierarchical | radial", "force").option("-o, --output <path>", "output HTML file path").option("--no-open", "write HTML but do not open the browser").action((opts) => {
|
|
127608
128021
|
ensureGlobalDirs();
|
|
127609
|
-
const outputPath = opts.output ??
|
|
128022
|
+
const outputPath = opts.output ?? path131.join(os14.tmpdir(), "trace-mcp-subproject-topology.html");
|
|
127610
128023
|
const topoStore = new TopologyStore(TOPOLOGY_DB_PATH);
|
|
127611
128024
|
const result = visualizeSubprojectTopology(topoStore, {
|
|
127612
128025
|
layout: opts.layout,
|
|
@@ -127812,6 +128225,85 @@ var askCommand = new Command16("ask").description("Ask questions about your code
|
|
|
127812
128225
|
}
|
|
127813
128226
|
});
|
|
127814
128227
|
|
|
128228
|
+
// src/cli/export-security-context.ts
|
|
128229
|
+
init_schema();
|
|
128230
|
+
init_store();
|
|
128231
|
+
init_config();
|
|
128232
|
+
init_global();
|
|
128233
|
+
init_registry2();
|
|
128234
|
+
import { Command as Command17 } from "commander";
|
|
128235
|
+
import path132 from "path";
|
|
128236
|
+
import fs121 from "fs";
|
|
128237
|
+
init_logger();
|
|
128238
|
+
init_registry();
|
|
128239
|
+
init_all();
|
|
128240
|
+
init_all2();
|
|
128241
|
+
init_pipeline();
|
|
128242
|
+
function resolveDbPath6(projectRoot) {
|
|
128243
|
+
const entry = getProject(projectRoot);
|
|
128244
|
+
if (entry) return entry.dbPath;
|
|
128245
|
+
return getDbPath(projectRoot);
|
|
128246
|
+
}
|
|
128247
|
+
var exportSecurityContextCommand = new Command17("export-security-context").description("Export security context for MCP server analysis (enrichment JSON for skill-scan)").option("-o, --output <path>", "Output file path (default: stdout)", "-").option("--scope <path>", "Limit analysis to directory (relative to project root)").option("--depth <n>", "Call graph traversal depth (default: 3, max: 5)", "3").option("--index", "Re-index project before export", false).action(async (opts) => {
|
|
128248
|
+
let projectRoot;
|
|
128249
|
+
try {
|
|
128250
|
+
projectRoot = findProjectRoot(process.cwd());
|
|
128251
|
+
} catch {
|
|
128252
|
+
projectRoot = process.cwd();
|
|
128253
|
+
}
|
|
128254
|
+
const dbPath = resolveDbPath6(projectRoot);
|
|
128255
|
+
ensureGlobalDirs();
|
|
128256
|
+
const db = initializeDatabase(dbPath);
|
|
128257
|
+
const store = new Store(db);
|
|
128258
|
+
try {
|
|
128259
|
+
if (opts.index) {
|
|
128260
|
+
const configResult = await loadConfig(projectRoot);
|
|
128261
|
+
const config = configResult.isOk() ? configResult.value : {
|
|
128262
|
+
root: projectRoot,
|
|
128263
|
+
include: ["**/*"],
|
|
128264
|
+
exclude: ["vendor/**", "node_modules/**", ".git/**"],
|
|
128265
|
+
db: { path: "" },
|
|
128266
|
+
plugins: [],
|
|
128267
|
+
ignore: { directories: [], patterns: [] },
|
|
128268
|
+
watch: { enabled: false, debounceMs: 2e3 }
|
|
128269
|
+
};
|
|
128270
|
+
const registry = new PluginRegistry();
|
|
128271
|
+
for (const p5 of createAllLanguagePlugins()) registry.registerLanguagePlugin(p5);
|
|
128272
|
+
for (const p5 of createAllIntegrationPlugins()) registry.registerFrameworkPlugin(p5);
|
|
128273
|
+
logger.info("Indexing project...");
|
|
128274
|
+
const pipeline2 = new IndexingPipeline(store, registry, config, projectRoot);
|
|
128275
|
+
await pipeline2.indexAll(false);
|
|
128276
|
+
logger.info("Indexing complete");
|
|
128277
|
+
}
|
|
128278
|
+
const stats = store.getStats();
|
|
128279
|
+
if (stats.totalFiles === 0) {
|
|
128280
|
+
console.error("Error: No files indexed. Run `trace-mcp reindex` first or use --index flag.");
|
|
128281
|
+
process.exit(2);
|
|
128282
|
+
}
|
|
128283
|
+
const depth = Math.min(Math.max(parseInt(opts.depth, 10) || 3, 1), 5);
|
|
128284
|
+
const result = exportSecurityContext(store, projectRoot, {
|
|
128285
|
+
scope: opts.scope,
|
|
128286
|
+
depth
|
|
128287
|
+
});
|
|
128288
|
+
if (result.isErr()) {
|
|
128289
|
+
console.error(`Error: ${result.error.message}`);
|
|
128290
|
+
process.exit(1);
|
|
128291
|
+
}
|
|
128292
|
+
const json = JSON.stringify(result.value, null, 2);
|
|
128293
|
+
if (opts.output === "-") {
|
|
128294
|
+
process.stdout.write(json + "\n");
|
|
128295
|
+
} else {
|
|
128296
|
+
const outputPath = path132.resolve(opts.output);
|
|
128297
|
+
fs121.mkdirSync(path132.dirname(outputPath), { recursive: true });
|
|
128298
|
+
fs121.writeFileSync(outputPath, json, "utf-8");
|
|
128299
|
+
logger.info({ path: outputPath }, "Security context exported");
|
|
128300
|
+
console.error(`Exported to ${outputPath} (${result.value.tool_registrations.length} tool registrations)`);
|
|
128301
|
+
}
|
|
128302
|
+
} finally {
|
|
128303
|
+
db.close();
|
|
128304
|
+
}
|
|
128305
|
+
});
|
|
128306
|
+
|
|
127815
128307
|
// src/cli.ts
|
|
127816
128308
|
init_hooks();
|
|
127817
128309
|
init_global();
|
|
@@ -127845,8 +128337,8 @@ function runSubprojectAutoSync(projectRoot, config) {
|
|
|
127845
128337
|
sm.syncContracts();
|
|
127846
128338
|
sm.syncClientCalls();
|
|
127847
128339
|
db.close();
|
|
127848
|
-
} catch (
|
|
127849
|
-
logger.warn({ error:
|
|
128340
|
+
} catch (err49) {
|
|
128341
|
+
logger.warn({ error: err49, projectRoot }, "Subproject auto-sync failed (non-fatal)");
|
|
127850
128342
|
}
|
|
127851
128343
|
}
|
|
127852
128344
|
var ProjectManager = class {
|
|
@@ -127895,14 +128387,14 @@ var ProjectManager = class {
|
|
|
127895
128387
|
) : null;
|
|
127896
128388
|
const runEmbeddings = () => {
|
|
127897
128389
|
if (!embeddingPipeline) return;
|
|
127898
|
-
embeddingPipeline.indexUnembedded().catch((
|
|
127899
|
-
logger.error({ error:
|
|
128390
|
+
embeddingPipeline.indexUnembedded().catch((err49) => {
|
|
128391
|
+
logger.error({ error: err49, projectRoot }, "Embedding indexing failed");
|
|
127900
128392
|
});
|
|
127901
128393
|
};
|
|
127902
128394
|
const runSummarization = () => {
|
|
127903
128395
|
if (!summarizationPipeline) return;
|
|
127904
|
-
summarizationPipeline.summarizeUnsummarized().catch((
|
|
127905
|
-
logger.error({ error:
|
|
128396
|
+
summarizationPipeline.summarizeUnsummarized().catch((err49) => {
|
|
128397
|
+
logger.error({ error: err49, projectRoot }, "Summarization failed");
|
|
127906
128398
|
});
|
|
127907
128399
|
};
|
|
127908
128400
|
const server = createServer2(store, registry, config, projectRoot, progress);
|
|
@@ -127926,10 +128418,10 @@ var ProjectManager = class {
|
|
|
127926
128418
|
runEmbeddings();
|
|
127927
128419
|
runSubprojectAutoSync(projectRoot, config);
|
|
127928
128420
|
logger.info({ projectRoot }, "Project indexing complete");
|
|
127929
|
-
}).catch((
|
|
128421
|
+
}).catch((err49) => {
|
|
127930
128422
|
managed.status = "error";
|
|
127931
|
-
managed.error = String(
|
|
127932
|
-
logger.error({ error:
|
|
128423
|
+
managed.error = String(err49);
|
|
128424
|
+
logger.error({ error: err49, projectRoot }, "Initial indexing failed");
|
|
127933
128425
|
});
|
|
127934
128426
|
await watcher.start(projectRoot, config, async (paths) => {
|
|
127935
128427
|
await pipeline2.indexFiles(paths);
|
|
@@ -127986,12 +128478,12 @@ var ProjectManager = class {
|
|
|
127986
128478
|
|
|
127987
128479
|
// src/cli.ts
|
|
127988
128480
|
init_global();
|
|
127989
|
-
var
|
|
128481
|
+
var PKG_VERSION3 = true ? "1.22.0" : "0.0.0-dev";
|
|
127990
128482
|
function registerDefaultPlugins2(registry) {
|
|
127991
128483
|
for (const p5 of createAllLanguagePlugins()) registry.registerLanguagePlugin(p5);
|
|
127992
128484
|
for (const p5 of createAllIntegrationPlugins()) registry.registerFrameworkPlugin(p5);
|
|
127993
128485
|
}
|
|
127994
|
-
function
|
|
128486
|
+
function resolveDbPath7(projectRoot) {
|
|
127995
128487
|
const entry = getProject(projectRoot);
|
|
127996
128488
|
if (entry) return entry.dbPath;
|
|
127997
128489
|
return getDbPath(projectRoot);
|
|
@@ -128027,8 +128519,8 @@ function runSubprojectAutoSync2(projectRoot, config) {
|
|
|
128027
128519
|
logger.warn({ error: e }, "Subproject auto-sync failed (non-fatal)");
|
|
128028
128520
|
}
|
|
128029
128521
|
}
|
|
128030
|
-
var program = new
|
|
128031
|
-
program.name("trace-mcp").description("Framework-Aware Code Intelligence for Laravel/Vue/Inertia/Nuxt").version(
|
|
128522
|
+
var program = new Command18();
|
|
128523
|
+
program.name("trace-mcp").description("Framework-Aware Code Intelligence for Laravel/Vue/Inertia/Nuxt").version(PKG_VERSION3, "-v, --version");
|
|
128032
128524
|
program.command("serve", { isDefault: true }).description("Start MCP server (stdio transport)").action(async () => {
|
|
128033
128525
|
const projectRoot = process.cwd();
|
|
128034
128526
|
const globalRaw = loadGlobalConfigRaw();
|
|
@@ -128049,7 +128541,7 @@ program.command("serve", { isDefault: true }).description("Start MCP server (std
|
|
|
128049
128541
|
if (!existing) {
|
|
128050
128542
|
try {
|
|
128051
128543
|
const root = findProjectRoot(indexRoot);
|
|
128052
|
-
if (root ===
|
|
128544
|
+
if (root === path133.resolve(indexRoot)) {
|
|
128053
128545
|
setupProject(root);
|
|
128054
128546
|
logger.info({ root }, "Auto-registered project");
|
|
128055
128547
|
} else {
|
|
@@ -128067,101 +128559,145 @@ program.command("serve", { isDefault: true }).description("Start MCP server (std
|
|
|
128067
128559
|
if (config.logging) {
|
|
128068
128560
|
attachFileLogging(config.logging);
|
|
128069
128561
|
}
|
|
128070
|
-
|
|
128071
|
-
|
|
128072
|
-
|
|
128073
|
-
|
|
128074
|
-
|
|
128075
|
-
|
|
128076
|
-
|
|
128077
|
-
|
|
128562
|
+
let shuttingDown = false;
|
|
128563
|
+
const makeShutdown = (cleanup) => {
|
|
128564
|
+
return async (reason) => {
|
|
128565
|
+
if (shuttingDown) return;
|
|
128566
|
+
shuttingDown = true;
|
|
128567
|
+
logger.info({ reason: reason ?? "unknown" }, "Shutting down trace-mcp server");
|
|
128568
|
+
if (cleanup) await cleanup().catch(() => {
|
|
128569
|
+
});
|
|
128570
|
+
process.exit(0);
|
|
128571
|
+
};
|
|
128572
|
+
};
|
|
128573
|
+
const attachLifecycleHandlers = (shutdown2, idleTimeoutMs) => {
|
|
128574
|
+
process.on("SIGINT", () => shutdown2("SIGINT"));
|
|
128575
|
+
process.on("SIGTERM", () => shutdown2("SIGTERM"));
|
|
128576
|
+
process.stdin.on("end", () => shutdown2("stdin-end"));
|
|
128577
|
+
process.stdin.on("close", () => shutdown2("stdin-close"));
|
|
128578
|
+
let idleTimer = null;
|
|
128579
|
+
const resetIdleTimer = () => {
|
|
128580
|
+
if (idleTimer) clearTimeout(idleTimer);
|
|
128581
|
+
idleTimer = setTimeout(() => shutdown2("idle-timeout"), idleTimeoutMs);
|
|
128582
|
+
idleTimer.unref();
|
|
128583
|
+
};
|
|
128584
|
+
resetIdleTimer();
|
|
128585
|
+
process.stdin.on("data", resetIdleTimer);
|
|
128586
|
+
};
|
|
128587
|
+
const IDLE_TIMEOUT_MS = config.idle_timeout_minutes ? config.idle_timeout_minutes * 6e4 : 5 * 6e4;
|
|
128078
128588
|
const daemonActive = await isDaemonRunning(DEFAULT_DAEMON_PORT);
|
|
128079
|
-
let watcher = null;
|
|
128080
|
-
let daemonClientId = null;
|
|
128081
128589
|
if (daemonActive) {
|
|
128082
|
-
|
|
128083
|
-
|
|
128084
|
-
|
|
128590
|
+
const daemonUrl = `http://127.0.0.1:${DEFAULT_DAEMON_PORT}`;
|
|
128591
|
+
const mcpUrl = `${daemonUrl}/mcp?project=${encodeURIComponent(projectRoot)}`;
|
|
128592
|
+
const clientId = randomUUID();
|
|
128593
|
+
await fetch(`${daemonUrl}/api/projects`, {
|
|
128085
128594
|
method: "POST",
|
|
128086
128595
|
headers: { "Content-Type": "application/json" },
|
|
128087
|
-
body: JSON.stringify({
|
|
128596
|
+
body: JSON.stringify({ root: projectRoot })
|
|
128088
128597
|
}).catch(() => {
|
|
128089
128598
|
});
|
|
128090
|
-
|
|
128091
|
-
|
|
128092
|
-
|
|
128093
|
-
|
|
128094
|
-
|
|
128095
|
-
|
|
128096
|
-
const
|
|
128097
|
-
const
|
|
128098
|
-
|
|
128099
|
-
|
|
128100
|
-
|
|
128101
|
-
|
|
128102
|
-
projectRoot,
|
|
128103
|
-
{
|
|
128104
|
-
batchSize: config.ai.summarize_batch_size ?? 20,
|
|
128105
|
-
kinds: config.ai.summarize_kinds ?? ["class", "function", "method", "interface", "trait", "enum", "type"],
|
|
128106
|
-
concurrency: config.ai.concurrency ?? 1
|
|
128107
|
-
},
|
|
128108
|
-
progress
|
|
128109
|
-
) : null;
|
|
128110
|
-
const runEmbeddings = () => {
|
|
128111
|
-
if (!embeddingPipeline) return;
|
|
128112
|
-
embeddingPipeline.indexUnembedded().catch((err48) => {
|
|
128113
|
-
logger.error({ error: err48 }, "Embedding indexing failed");
|
|
128599
|
+
fetch(`${daemonUrl}/api/clients`, {
|
|
128600
|
+
method: "POST",
|
|
128601
|
+
headers: { "Content-Type": "application/json" },
|
|
128602
|
+
body: JSON.stringify({ id: clientId, project: projectRoot, transport: "stdio-proxy" })
|
|
128603
|
+
}).catch(() => {
|
|
128604
|
+
});
|
|
128605
|
+
const { StreamableHTTPClientTransport } = await import("@modelcontextprotocol/sdk/client/streamableHttp.js");
|
|
128606
|
+
const httpTransport = new StreamableHTTPClientTransport(new URL(mcpUrl));
|
|
128607
|
+
const stdioTransport = new StdioServerTransport();
|
|
128608
|
+
stdioTransport.onmessage = (msg) => {
|
|
128609
|
+
httpTransport.send(msg).catch((err49) => {
|
|
128610
|
+
logger.error({ error: err49 }, "Proxy: failed to forward message to daemon");
|
|
128114
128611
|
});
|
|
128115
128612
|
};
|
|
128116
|
-
|
|
128117
|
-
|
|
128118
|
-
|
|
128119
|
-
logger.error({ error: err48 }, "Summarization failed");
|
|
128613
|
+
httpTransport.onmessage = (msg) => {
|
|
128614
|
+
stdioTransport.send(msg).catch((err49) => {
|
|
128615
|
+
logger.error({ error: err49 }, "Proxy: failed to forward message to client");
|
|
128120
128616
|
});
|
|
128121
128617
|
};
|
|
128122
|
-
|
|
128123
|
-
|
|
128124
|
-
|
|
128125
|
-
|
|
128126
|
-
|
|
128127
|
-
|
|
128128
|
-
|
|
128129
|
-
|
|
128130
|
-
await
|
|
128131
|
-
|
|
128132
|
-
runEmbeddings();
|
|
128133
|
-
}, void 0, async (deleted) => {
|
|
128134
|
-
pipeline2.deleteFiles(deleted);
|
|
128618
|
+
httpTransport.onerror = (err49) => {
|
|
128619
|
+
logger.error({ error: err49 }, "Proxy: HTTP transport error");
|
|
128620
|
+
};
|
|
128621
|
+
const shutdown2 = makeShutdown(async () => {
|
|
128622
|
+
await fetch(`${daemonUrl}/api/clients?id=${clientId}`, { method: "DELETE" }).catch(() => {
|
|
128623
|
+
});
|
|
128624
|
+
await httpTransport.close().catch(() => {
|
|
128625
|
+
});
|
|
128626
|
+
await stdioTransport.close().catch(() => {
|
|
128627
|
+
});
|
|
128135
128628
|
});
|
|
128629
|
+
attachLifecycleHandlers(shutdown2, IDLE_TIMEOUT_MS);
|
|
128630
|
+
await httpTransport.start();
|
|
128631
|
+
await stdioTransport.start();
|
|
128632
|
+
logger.info({ projectRoot, mode: "proxy", daemonUrl, idleTimeoutMs: IDLE_TIMEOUT_MS }, "trace-mcp proxy started (forwarding to daemon)");
|
|
128633
|
+
return;
|
|
128136
128634
|
}
|
|
128137
|
-
const
|
|
128635
|
+
const dbPath = resolveDbPath7(indexRoot);
|
|
128636
|
+
ensureGlobalDirs();
|
|
128637
|
+
const db = initializeDatabase(dbPath);
|
|
128638
|
+
writeServerPid(db);
|
|
128639
|
+
const store = new Store(db);
|
|
128640
|
+
const registry = new PluginRegistry();
|
|
128641
|
+
registerDefaultPlugins2(registry);
|
|
128642
|
+
const progress = new ProgressState(db);
|
|
128643
|
+
let watcher = null;
|
|
128644
|
+
const pipeline2 = new IndexingPipeline(store, registry, config, projectRoot, progress);
|
|
128645
|
+
watcher = new FileWatcher();
|
|
128646
|
+
const aiProvider = createAIProvider(config);
|
|
128647
|
+
const vectorStore = config.ai?.enabled ? new BlobVectorStore(store.db) : null;
|
|
128648
|
+
const embeddingService = config.ai?.enabled ? aiProvider.embedding() : null;
|
|
128649
|
+
const embeddingPipeline = vectorStore && embeddingService ? new EmbeddingPipeline(store, embeddingService, vectorStore, progress) : null;
|
|
128650
|
+
const inferenceCache = config.ai?.enabled ? new InferenceCache(store.db) : null;
|
|
128651
|
+
inferenceCache?.evictExpired();
|
|
128652
|
+
const summarizationPipeline = config.ai?.enabled && config.ai.summarize_on_index !== false ? new SummarizationPipeline2(
|
|
128653
|
+
store,
|
|
128654
|
+
new CachedInferenceService(aiProvider.fastInference(), inferenceCache, config.ai.fast_model ?? "fast"),
|
|
128655
|
+
projectRoot,
|
|
128656
|
+
{
|
|
128657
|
+
batchSize: config.ai.summarize_batch_size ?? 20,
|
|
128658
|
+
kinds: config.ai.summarize_kinds ?? ["class", "function", "method", "interface", "trait", "enum", "type"],
|
|
128659
|
+
concurrency: config.ai.concurrency ?? 1
|
|
128660
|
+
},
|
|
128661
|
+
progress
|
|
128662
|
+
) : null;
|
|
128663
|
+
const runEmbeddings = () => {
|
|
128664
|
+
if (!embeddingPipeline) return;
|
|
128665
|
+
embeddingPipeline.indexUnembedded().catch((err49) => {
|
|
128666
|
+
logger.error({ error: err49 }, "Embedding indexing failed");
|
|
128667
|
+
});
|
|
128668
|
+
};
|
|
128669
|
+
const runSummarization = () => {
|
|
128670
|
+
if (!summarizationPipeline) return;
|
|
128671
|
+
summarizationPipeline.summarizeUnsummarized().catch((err49) => {
|
|
128672
|
+
logger.error({ error: err49 }, "Summarization failed");
|
|
128673
|
+
});
|
|
128674
|
+
};
|
|
128675
|
+
pipeline2.indexAll().then(() => {
|
|
128676
|
+
runSummarization();
|
|
128677
|
+
runEmbeddings();
|
|
128678
|
+
runSubprojectAutoSync2(projectRoot, config);
|
|
128679
|
+
}).catch((err49) => {
|
|
128680
|
+
logger.error({ error: err49 }, "Initial indexing failed");
|
|
128681
|
+
});
|
|
128682
|
+
await watcher.start(projectRoot, config, async (paths) => {
|
|
128683
|
+
await pipeline2.indexFiles(paths);
|
|
128684
|
+
runSummarization();
|
|
128685
|
+
runEmbeddings();
|
|
128686
|
+
}, void 0, async (deleted) => {
|
|
128687
|
+
pipeline2.deleteFiles(deleted);
|
|
128688
|
+
});
|
|
128689
|
+
const shutdown = makeShutdown(async () => {
|
|
128138
128690
|
clearServerPid(db);
|
|
128139
128691
|
if (watcher) await watcher.stop();
|
|
128140
|
-
|
|
128141
|
-
|
|
128142
|
-
|
|
128692
|
+
try {
|
|
128693
|
+
db.close();
|
|
128694
|
+
} catch {
|
|
128143
128695
|
}
|
|
128144
|
-
|
|
128145
|
-
|
|
128146
|
-
process.on("SIGINT", shutdown);
|
|
128147
|
-
process.on("SIGTERM", shutdown);
|
|
128696
|
+
});
|
|
128697
|
+
attachLifecycleHandlers(shutdown, IDLE_TIMEOUT_MS);
|
|
128148
128698
|
const server = createServer2(store, registry, config, projectRoot, progress);
|
|
128149
128699
|
const transport = new StdioServerTransport();
|
|
128150
|
-
|
|
128151
|
-
const cid = daemonClientId;
|
|
128152
|
-
server.server.oninitialized = () => {
|
|
128153
|
-
const clientVersion = server.server.getClientVersion();
|
|
128154
|
-
if (clientVersion?.name) {
|
|
128155
|
-
fetch(`http://127.0.0.1:${DEFAULT_DAEMON_PORT}/api/clients`, {
|
|
128156
|
-
method: "PATCH",
|
|
128157
|
-
headers: { "Content-Type": "application/json" },
|
|
128158
|
-
body: JSON.stringify({ id: cid, name: clientVersion.name })
|
|
128159
|
-
}).catch(() => {
|
|
128160
|
-
});
|
|
128161
|
-
}
|
|
128162
|
-
};
|
|
128163
|
-
}
|
|
128164
|
-
logger.info({ projectRoot, indexRoot, dbPath, daemonActive }, "Starting trace-mcp MCP server...");
|
|
128700
|
+
logger.info({ projectRoot, indexRoot, dbPath, mode: "full", idleTimeoutMs: IDLE_TIMEOUT_MS }, "Starting trace-mcp MCP server...");
|
|
128165
128701
|
await server.connect(transport);
|
|
128166
128702
|
});
|
|
128167
128703
|
program.command("serve-http").description("Start MCP server (HTTP/SSE transport) \u2014 daemon mode, indexes all registered projects").option("-p, --port <port>", "Port to listen on", "3741").option("--host <host>", "Host to bind to", "127.0.0.1").action(async (opts) => {
|
|
@@ -128497,7 +129033,7 @@ program.command("serve-http").description("Start MCP server (HTTP/SSE transport)
|
|
|
128497
129033
|
const maxNodes = url2.searchParams.has("maxNodes") ? parseInt(url2.searchParams.get("maxNodes"), 10) : void 0;
|
|
128498
129034
|
let topoStore;
|
|
128499
129035
|
try {
|
|
128500
|
-
if (
|
|
129036
|
+
if (fs122.existsSync(TOPOLOGY_DB_PATH)) topoStore = new TopologyStore(TOPOLOGY_DB_PATH);
|
|
128501
129037
|
} catch {
|
|
128502
129038
|
}
|
|
128503
129039
|
try {
|
|
@@ -128549,7 +129085,7 @@ program.command("serve-http").description("Start MCP server (HTTP/SSE transport)
|
|
|
128549
129085
|
const highlightDepth = url2.searchParams.has("highlightDepth") ? parseInt(url2.searchParams.get("highlightDepth"), 10) : void 0;
|
|
128550
129086
|
let topoStore;
|
|
128551
129087
|
try {
|
|
128552
|
-
if (
|
|
129088
|
+
if (fs122.existsSync(TOPOLOGY_DB_PATH)) topoStore = new TopologyStore(TOPOLOGY_DB_PATH);
|
|
128553
129089
|
} catch {
|
|
128554
129090
|
}
|
|
128555
129091
|
try {
|
|
@@ -128802,7 +129338,7 @@ program.command("serve-http").description("Start MCP server (HTTP/SSE transport)
|
|
|
128802
129338
|
try {
|
|
128803
129339
|
let topoStore;
|
|
128804
129340
|
try {
|
|
128805
|
-
if (
|
|
129341
|
+
if (fs122.existsSync(TOPOLOGY_DB_PATH)) topoStore = new TopologyStore(TOPOLOGY_DB_PATH);
|
|
128806
129342
|
} catch {
|
|
128807
129343
|
}
|
|
128808
129344
|
if (!topoStore) {
|
|
@@ -128875,7 +129411,7 @@ program.command("serve-http").description("Start MCP server (HTTP/SSE transport)
|
|
|
128875
129411
|
try {
|
|
128876
129412
|
let topoStore;
|
|
128877
129413
|
try {
|
|
128878
|
-
if (
|
|
129414
|
+
if (fs122.existsSync(TOPOLOGY_DB_PATH)) topoStore = new TopologyStore(TOPOLOGY_DB_PATH);
|
|
128879
129415
|
} catch {
|
|
128880
129416
|
}
|
|
128881
129417
|
if (!topoStore) {
|
|
@@ -128925,8 +129461,8 @@ program.command("serve-http").description("Start MCP server (HTTP/SSE transport)
|
|
|
128925
129461
|
res.end(JSON.stringify({ error: `Project not found: ${projectRoot}` }));
|
|
128926
129462
|
return;
|
|
128927
129463
|
}
|
|
128928
|
-
managed.pipeline.indexAll(true).catch((
|
|
128929
|
-
logger.error({ error:
|
|
129464
|
+
managed.pipeline.indexAll(true).catch((err49) => {
|
|
129465
|
+
logger.error({ error: err49, projectRoot }, "Reindex failed");
|
|
128930
129466
|
});
|
|
128931
129467
|
res.writeHead(202, { "Content-Type": "application/json" });
|
|
128932
129468
|
res.end(JSON.stringify({ status: "reindex_started", project: projectRoot }));
|
|
@@ -128936,7 +129472,7 @@ program.command("serve-http").description("Start MCP server (HTTP/SSE transport)
|
|
|
128936
129472
|
try {
|
|
128937
129473
|
const body = await collectBody(req);
|
|
128938
129474
|
const { root } = JSON.parse(body.toString());
|
|
128939
|
-
if (!root || !
|
|
129475
|
+
if (!root || !fs122.existsSync(root)) {
|
|
128940
129476
|
res.writeHead(400, { "Content-Type": "application/json" });
|
|
128941
129477
|
res.end(JSON.stringify({ error: "Invalid or missing root path" }));
|
|
128942
129478
|
return;
|
|
@@ -129052,7 +129588,7 @@ program.command("serve-http").description("Start MCP server (HTTP/SSE transport)
|
|
|
129052
129588
|
if (value !== void 0) merged[key] = value;
|
|
129053
129589
|
}
|
|
129054
129590
|
ensureGlobalDirs();
|
|
129055
|
-
|
|
129591
|
+
fs122.writeFileSync(GLOBAL_CONFIG_PATH, JSON.stringify(merged, null, 2));
|
|
129056
129592
|
res.writeHead(200, { "Content-Type": "application/json" });
|
|
129057
129593
|
res.end(JSON.stringify({ status: "updated", settings: merged }));
|
|
129058
129594
|
} catch (e) {
|
|
@@ -129235,8 +129771,8 @@ ${lastUserMsg.content}`
|
|
|
129235
129771
|
});
|
|
129236
129772
|
});
|
|
129237
129773
|
program.command("index").description("Index a project directory").argument("<dir>", "Directory to index").option("-f, --force", "Force reindex all files").action(async (dir, opts) => {
|
|
129238
|
-
const resolvedDir =
|
|
129239
|
-
if (!
|
|
129774
|
+
const resolvedDir = path133.resolve(dir);
|
|
129775
|
+
if (!fs122.existsSync(resolvedDir)) {
|
|
129240
129776
|
logger.error({ dir: resolvedDir }, "Directory does not exist");
|
|
129241
129777
|
process.exit(1);
|
|
129242
129778
|
}
|
|
@@ -129246,7 +129782,7 @@ program.command("index").description("Index a project directory").argument("<dir
|
|
|
129246
129782
|
process.exit(1);
|
|
129247
129783
|
}
|
|
129248
129784
|
const config = configResult.value;
|
|
129249
|
-
const dbPath =
|
|
129785
|
+
const dbPath = resolveDbPath7(resolvedDir);
|
|
129250
129786
|
ensureGlobalDirs();
|
|
129251
129787
|
const db = initializeDatabase(dbPath);
|
|
129252
129788
|
const store = new Store(db);
|
|
@@ -129260,20 +129796,20 @@ program.command("index").description("Index a project directory").argument("<dir
|
|
|
129260
129796
|
db.close();
|
|
129261
129797
|
});
|
|
129262
129798
|
program.command("index-file").description("Incrementally reindex a single file (called by the PostToolUse auto-reindex hook)").argument("<file>", "Absolute or relative path to the file to reindex").action(async (file) => {
|
|
129263
|
-
const resolvedFile =
|
|
129264
|
-
if (!
|
|
129799
|
+
const resolvedFile = path133.resolve(file);
|
|
129800
|
+
if (!fs122.existsSync(resolvedFile)) {
|
|
129265
129801
|
process.exit(0);
|
|
129266
129802
|
}
|
|
129267
129803
|
let projectRoot;
|
|
129268
129804
|
try {
|
|
129269
|
-
projectRoot = findProjectRoot(
|
|
129805
|
+
projectRoot = findProjectRoot(path133.dirname(resolvedFile));
|
|
129270
129806
|
} catch {
|
|
129271
129807
|
process.exit(0);
|
|
129272
129808
|
}
|
|
129273
129809
|
const configResult = await loadConfig(projectRoot);
|
|
129274
129810
|
if (configResult.isErr()) process.exit(0);
|
|
129275
129811
|
const config = configResult.value;
|
|
129276
|
-
const dbPath =
|
|
129812
|
+
const dbPath = resolveDbPath7(projectRoot);
|
|
129277
129813
|
ensureGlobalDirs();
|
|
129278
129814
|
const db = initializeDatabase(dbPath);
|
|
129279
129815
|
const store = new Store(db);
|
|
@@ -129303,7 +129839,7 @@ program.command("list").description("List all registered projects").option("--js
|
|
|
129303
129839
|
console.log("Registered projects:\n");
|
|
129304
129840
|
for (const p5 of projects) {
|
|
129305
129841
|
const lastIdx = p5.lastIndexed ? new Date(p5.lastIndexed).toLocaleString() : "never";
|
|
129306
|
-
const dbExists =
|
|
129842
|
+
const dbExists = fs122.existsSync(p5.dbPath) ? "ok" : "missing";
|
|
129307
129843
|
console.log(` ${p5.name}`);
|
|
129308
129844
|
console.log(` Root: ${p5.root}`);
|
|
129309
129845
|
console.log(` DB: ${dbExists}`);
|
|
@@ -129328,6 +129864,7 @@ program.addCommand(visualizeCommand);
|
|
|
129328
129864
|
program.addCommand(daemonCommand);
|
|
129329
129865
|
program.addCommand(installAppCommand);
|
|
129330
129866
|
program.addCommand(askCommand);
|
|
129867
|
+
program.addCommand(exportSecurityContextCommand);
|
|
129331
129868
|
program.parse();
|
|
129332
129869
|
/*! Bundled license information:
|
|
129333
129870
|
|