memtrace 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/memtrace.js +25 -0
- package/install.js +64 -0
- package/package.json +41 -0
- package/skills/commands/memtrace-api-topology.md +62 -0
- package/skills/commands/memtrace-evolution.md +119 -0
- package/skills/commands/memtrace-graph.md +67 -0
- package/skills/commands/memtrace-impact.md +61 -0
- package/skills/commands/memtrace-index.md +63 -0
- package/skills/commands/memtrace-quality.md +66 -0
- package/skills/commands/memtrace-relationships.md +70 -0
- package/skills/commands/memtrace-search.md +62 -0
- package/skills/workflows/memtrace-change-impact-analysis.md +85 -0
- package/skills/workflows/memtrace-codebase-exploration.md +108 -0
- package/skills/workflows/memtrace-incident-investigation.md +104 -0
- package/skills/workflows/memtrace-refactoring-guide.md +116 -0
package/bin/memtrace.js
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
"use strict";
|
|
3
|
+
|
|
4
|
+
const { spawnSync } = require("child_process");
|
|
5
|
+
const { getBinaryPath } = require("../install.js");
|
|
6
|
+
|
|
7
|
+
let binaryPath;
|
|
8
|
+
try {
|
|
9
|
+
binaryPath = getBinaryPath();
|
|
10
|
+
} catch (e) {
|
|
11
|
+
console.error(`Error: ${e.message}`);
|
|
12
|
+
process.exit(1);
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
const result = spawnSync(binaryPath, process.argv.slice(2), {
|
|
16
|
+
stdio: "inherit",
|
|
17
|
+
env: process.env,
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
if (result.error) {
|
|
21
|
+
console.error(`Failed to run memtrace: ${result.error.message}`);
|
|
22
|
+
process.exit(1);
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
process.exit(result.status ?? 0);
|
package/install.js
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
"use strict";
|
|
3
|
+
|
|
4
|
+
const os = require("os");
|
|
5
|
+
const path = require("path");
|
|
6
|
+
const fs = require("fs");
|
|
7
|
+
|
|
8
|
+
const PLATFORM_MAP = {
|
|
9
|
+
"darwin-arm64": "@memtrace/darwin-arm64",
|
|
10
|
+
"darwin-x64": "@memtrace/darwin-x64",
|
|
11
|
+
"linux-x64": "@memtrace/linux-x64",
|
|
12
|
+
"linux-arm64": "@memtrace/linux-arm64",
|
|
13
|
+
"win32-x64": "@memtrace/win32-x64",
|
|
14
|
+
};
|
|
15
|
+
|
|
16
|
+
function getPlatformKey() {
|
|
17
|
+
const platform = os.platform(); // darwin, linux, win32
|
|
18
|
+
const arch = os.arch(); // arm64, x64
|
|
19
|
+
return `${platform}-${arch}`;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
function getBinaryPath() {
|
|
23
|
+
const key = getPlatformKey();
|
|
24
|
+
const pkg = PLATFORM_MAP[key];
|
|
25
|
+
|
|
26
|
+
if (!pkg) {
|
|
27
|
+
throw new Error(
|
|
28
|
+
`Memtrace does not support platform: ${key}\n` +
|
|
29
|
+
`Supported: ${Object.keys(PLATFORM_MAP).join(", ")}`
|
|
30
|
+
);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
const ext = os.platform() === "win32" ? ".exe" : "";
|
|
34
|
+
const binaryName = `memtrace${ext}`;
|
|
35
|
+
|
|
36
|
+
try {
|
|
37
|
+
return require.resolve(`${pkg}/bin/${binaryName}`);
|
|
38
|
+
} catch {
|
|
39
|
+
throw new Error(
|
|
40
|
+
`Could not find memtrace binary for ${key}.\n` +
|
|
41
|
+
`Try reinstalling: npm install -g memtrace`
|
|
42
|
+
);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
// postinstall: verify the binary exists and is executable
|
|
47
|
+
if (require.main === module) {
|
|
48
|
+
try {
|
|
49
|
+
const bin = getBinaryPath();
|
|
50
|
+
if (!fs.existsSync(bin)) {
|
|
51
|
+
throw new Error(`Binary not found at: ${bin}`);
|
|
52
|
+
}
|
|
53
|
+
// Ensure executable on Unix
|
|
54
|
+
if (os.platform() !== "win32") {
|
|
55
|
+
fs.chmodSync(bin, 0o755);
|
|
56
|
+
}
|
|
57
|
+
console.log(`memtrace: installed binary at ${bin}`);
|
|
58
|
+
} catch (e) {
|
|
59
|
+
// Non-fatal — optional deps may not be installed in all environments
|
|
60
|
+
console.warn(`memtrace: ${e.message}`);
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
module.exports = { getBinaryPath };
|
package/package.json
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "memtrace",
|
|
3
|
+
"version": "0.1.3",
|
|
4
|
+
"description": "Code intelligence graph — MCP server + AI agent skills + visualization UI",
|
|
5
|
+
"keywords": [
|
|
6
|
+
"mcp",
|
|
7
|
+
"code-intelligence",
|
|
8
|
+
"ai",
|
|
9
|
+
"memgraph",
|
|
10
|
+
"graph",
|
|
11
|
+
"skills",
|
|
12
|
+
"claude-code"
|
|
13
|
+
],
|
|
14
|
+
"homepage": "https://memtrace.dev",
|
|
15
|
+
"repository": {
|
|
16
|
+
"type": "git",
|
|
17
|
+
"url": "https://github.com/syncable-dev/memtrace"
|
|
18
|
+
},
|
|
19
|
+
"license": "FSL-1.1-MIT",
|
|
20
|
+
"bin": {
|
|
21
|
+
"memtrace": "bin/memtrace.js"
|
|
22
|
+
},
|
|
23
|
+
"files": [
|
|
24
|
+
"bin/",
|
|
25
|
+
"skills/",
|
|
26
|
+
"install.js"
|
|
27
|
+
],
|
|
28
|
+
"scripts": {
|
|
29
|
+
"postinstall": "node install.js"
|
|
30
|
+
},
|
|
31
|
+
"optionalDependencies": {
|
|
32
|
+
"@memtrace/darwin-arm64": "0.1.0",
|
|
33
|
+
"@memtrace/darwin-x64": "0.1.0",
|
|
34
|
+
"@memtrace/linux-x64": "0.1.0",
|
|
35
|
+
"@memtrace/linux-arm64": "0.1.0",
|
|
36
|
+
"@memtrace/win32-x64": "0.1.0"
|
|
37
|
+
},
|
|
38
|
+
"engines": {
|
|
39
|
+
"node": ">=18"
|
|
40
|
+
}
|
|
41
|
+
}
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-api-topology
|
|
3
|
+
description: "Use when the user asks about API endpoints, HTTP routes, service-to-service calls, microservice dependencies, API topology, which services call which, cross-repo dependencies, or wants to understand the API surface of a codebase"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__get_api_topology
|
|
6
|
+
- mcp__memtrace__find_api_endpoints
|
|
7
|
+
- mcp__memtrace__find_api_calls
|
|
8
|
+
- mcp__memtrace__get_symbol_context
|
|
9
|
+
- mcp__memtrace__link_repositories
|
|
10
|
+
user-invocable: true
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
## Overview
|
|
14
|
+
|
|
15
|
+
Map the HTTP API surface of a codebase — exposed endpoints, outbound HTTP calls, and cross-repo service-to-service dependency graphs. Supports auto-detection for Express, Encore, NestJS, Axum, FastAPI, Flask, Gin, Spring Boot, and more.
|
|
16
|
+
|
|
17
|
+
## Quick Reference
|
|
18
|
+
|
|
19
|
+
| Tool | Purpose |
|
|
20
|
+
|------|---------|
|
|
21
|
+
| `find_api_endpoints` | All exposed HTTP endpoints (GET /users, POST /orders, etc.) |
|
|
22
|
+
| `find_api_calls` | All outbound HTTP calls (fetch, axios, reqwest, etc.) |
|
|
23
|
+
| `get_api_topology` | Cross-repo call graph: which service calls which endpoint |
|
|
24
|
+
| `link_repositories` | Manually link repos for cross-repo edge detection |
|
|
25
|
+
|
|
26
|
+
## Steps
|
|
27
|
+
|
|
28
|
+
### 1. Discover endpoints
|
|
29
|
+
|
|
30
|
+
Use `find_api_endpoints`:
|
|
31
|
+
- `repo_id` — required
|
|
32
|
+
- Returns: method, path, handler function, framework detected
|
|
33
|
+
|
|
34
|
+
### 2. Discover outbound calls
|
|
35
|
+
|
|
36
|
+
Use `find_api_calls`:
|
|
37
|
+
- `repo_id` — required
|
|
38
|
+
- Returns: target URL/path, HTTP method, calling function, library used (fetch, axios, reqwest, etc.)
|
|
39
|
+
|
|
40
|
+
### 3. Map service topology
|
|
41
|
+
|
|
42
|
+
Use `get_api_topology` to see the cross-repo HTTP call graph:
|
|
43
|
+
- Which services call which endpoints
|
|
44
|
+
- Confidence scores for each detected link
|
|
45
|
+
- Service-to-service dependency direction
|
|
46
|
+
|
|
47
|
+
**Prerequisite:** Multiple repos must be indexed. If cross-repo links aren't appearing, use `link_repositories` to explicitly connect them.
|
|
48
|
+
|
|
49
|
+
### 4. Deep-dive into an endpoint
|
|
50
|
+
|
|
51
|
+
For any specific endpoint, use `get_symbol_context` with the endpoint's symbol ID to see:
|
|
52
|
+
- Which internal functions handle the request
|
|
53
|
+
- Which processes (execution flows) include this endpoint
|
|
54
|
+
- Which external services call this endpoint
|
|
55
|
+
|
|
56
|
+
## Common Mistakes
|
|
57
|
+
|
|
58
|
+
| Mistake | Reality |
|
|
59
|
+
|---------|---------|
|
|
60
|
+
| Expecting cross-repo links with only one repo indexed | Index ALL related services first; cross-repo HTTP edges are linked automatically after indexing |
|
|
61
|
+
| Missing endpoints from custom frameworks | Memtrace auto-detects major frameworks; for custom routers, the endpoints may appear as regular functions |
|
|
62
|
+
| Not using `link_repositories` | If auto-linking missed a connection, use this to manually establish cross-repo edges |
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-evolution
|
|
3
|
+
description: "Use when the user asks what changed in the codebase, how code evolved over time, what was recently modified, what's the diff between versions, what changed since a date, incident investigation timeline, unexpected changes, change history, or temporal analysis of any kind"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__get_evolution
|
|
6
|
+
- mcp__memtrace__get_timeline
|
|
7
|
+
- mcp__memtrace__detect_changes
|
|
8
|
+
- mcp__memtrace__list_indexed_repositories
|
|
9
|
+
user-invocable: true
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## Overview
|
|
13
|
+
|
|
14
|
+
Multi-mode temporal analysis engine that answers "what changed and why should I care?" across arbitrary time windows. Uses Structural Significance Budgeting (SSB) to surface the most important changes without overwhelming you with noise.
|
|
15
|
+
|
|
16
|
+
This is memtrace's most powerful analytical tool. It implements six distinct scoring algorithms — choose the right one based on what the user needs.
|
|
17
|
+
|
|
18
|
+
## Query Modes — Choose the Right Algorithm
|
|
19
|
+
|
|
20
|
+
| Mode | Algorithm | Best For |
|
|
21
|
+
|------|-----------|----------|
|
|
22
|
+
| `compound` | Rank-fusion: 0.50×impact + 0.35×novel + 0.15×recent | **Default.** General-purpose "what changed?" — use when unsure |
|
|
23
|
+
| `impact` | Structural Significance: `sig(n) = in_degree^0.7 × (1 + out_degree)^0.3` | "What broke?" — finds changes with the largest blast radius |
|
|
24
|
+
| `novel` | Change Surprise Index: `surprise(n) = (1 + in_degree) / (1 + change_freq_90d)` | "What's unexpected?" — anomaly detection for rarely-changing code |
|
|
25
|
+
| `recent` | Temporal Proximity: `impact × exp(−0.5 × Δhours)` | "What changed near the incident?" — time-weighted for root cause |
|
|
26
|
+
| `directional` | Asymmetric scoring (added→out_degree, removed→in_degree, modified→impact) | "What was added vs removed?" — structural change direction |
|
|
27
|
+
| `overview` | Fast module-level rollup only | Quick summary — no per-symbol scoring, just module counts |
|
|
28
|
+
|
|
29
|
+
## Steps
|
|
30
|
+
|
|
31
|
+
### 1. Determine the time window
|
|
32
|
+
|
|
33
|
+
Ask the user or infer:
|
|
34
|
+
- `from` — ISO-8601 start timestamp (required)
|
|
35
|
+
- `to` — ISO-8601 end timestamp (defaults to now)
|
|
36
|
+
- `repo_id` — scope to a repo (call `list_indexed_repositories` if unknown)
|
|
37
|
+
|
|
38
|
+
### 2. Choose the mode
|
|
39
|
+
|
|
40
|
+
**Decision tree:**
|
|
41
|
+
|
|
42
|
+
```
|
|
43
|
+
User wants to know...
|
|
44
|
+
├── "what changed?" → compound (default)
|
|
45
|
+
├── "what could have broken?" → impact
|
|
46
|
+
├── "anything unexpected?" → novel
|
|
47
|
+
├── "what changed near X?" → recent (set to to incident time)
|
|
48
|
+
├── "what was added/removed?" → directional
|
|
49
|
+
└── "quick summary?" → overview
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### 3. Execute the query
|
|
53
|
+
|
|
54
|
+
Use the `get_evolution` MCP tool with:
|
|
55
|
+
- `repo_id` — required
|
|
56
|
+
- `from` / `to` — the time window
|
|
57
|
+
- `mode` — one of: compound, impact, novel, recent, directional, overview
|
|
58
|
+
|
|
59
|
+
### 4. Interpret results
|
|
60
|
+
|
|
61
|
+
The response contains:
|
|
62
|
+
|
|
63
|
+
- **`added[]`** — new symbols that appeared in the time window
|
|
64
|
+
- **`removed[]`** — symbols that were deleted
|
|
65
|
+
- **`modified[]`** — symbols that changed
|
|
66
|
+
- **`by_module[]`** — module-level rollup (NEVER truncated — always shows all modules)
|
|
67
|
+
- **`significance_coverage`** — fraction of total significance captured (target: ≥0.80)
|
|
68
|
+
- **`budget_exhausted`** — if true, there were more significant changes than the budget allowed
|
|
69
|
+
|
|
70
|
+
Each symbol includes: `name`, `kind`, `file_path`, `scope_path`, `in_degree`, `out_degree`, and all four scores (`impact`, `novel`, `recent`, `compound`).
|
|
71
|
+
|
|
72
|
+
### 5. Drill deeper
|
|
73
|
+
|
|
74
|
+
- **For a single symbol's full history:** Use `get_timeline` with the symbol name
|
|
75
|
+
- **For diff-based change scope:** Use `detect_changes` when you have a specific diff/patch
|
|
76
|
+
- **For blast radius of a specific change:** Use `get_impact` on high-scoring symbols
|
|
77
|
+
|
|
78
|
+
## Scoring Algorithms — Detailed Reference
|
|
79
|
+
|
|
80
|
+
### Impact Score (Structural Significance Budgeting)
|
|
81
|
+
```
|
|
82
|
+
sig(n) = in_degree^0.7 × (1 + out_degree)^0.3
|
|
83
|
+
```
|
|
84
|
+
- Heavily weights callers (in_degree) — symbols called by many others have high blast radius
|
|
85
|
+
- Mild boost for outbound complexity (out_degree) — complex functions that changed are notable
|
|
86
|
+
- SSB selects the minimum set covering ≥80% of total significance mass
|
|
87
|
+
|
|
88
|
+
### Novelty Score (Change Surprise Index)
|
|
89
|
+
```
|
|
90
|
+
surprise(n) = (1 + in_degree) / (1 + change_freq_90d)
|
|
91
|
+
```
|
|
92
|
+
- High in_degree + low change frequency = **maximum surprise**
|
|
93
|
+
- A core utility that hasn't changed in 90 days suddenly changing → likely worth investigating
|
|
94
|
+
- Low in_degree + high frequency = routine churn, deprioritized
|
|
95
|
+
|
|
96
|
+
### Recent Score (Temporal Proximity Weighting)
|
|
97
|
+
```
|
|
98
|
+
recent(n) = impact(n) × exp(−0.5 × |Δhours to reference|)
|
|
99
|
+
```
|
|
100
|
+
- Exponential decay from the reference timestamp (the `to` parameter)
|
|
101
|
+
- Changes close to an incident get amplified; older changes fade
|
|
102
|
+
- Best for incident timelines: set `to` to the incident timestamp
|
|
103
|
+
|
|
104
|
+
### Compound Score (Rank Fusion)
|
|
105
|
+
```
|
|
106
|
+
compound = 0.50×rank(impact) + 0.35×rank(novel) + 0.15×rank(recent)
|
|
107
|
+
```
|
|
108
|
+
- Rank-based fusion avoids scale sensitivity between different score types
|
|
109
|
+
- Impact-dominant but boosted by novelty and recency
|
|
110
|
+
- Best default when you don't have a specific hypothesis
|
|
111
|
+
|
|
112
|
+
## Common Mistakes
|
|
113
|
+
|
|
114
|
+
| Mistake | Reality |
|
|
115
|
+
|---------|---------|
|
|
116
|
+
| Using `overview` when user needs details | Overview only gives module-level counts — use `compound` for symbol-level |
|
|
117
|
+
| Ignoring `budget_exhausted` flag | If true, there are more significant changes beyond what was returned — narrow the time window or use module rollup |
|
|
118
|
+
| Not checking `by_module` first | Module rollup is never truncated — scan it to identify which areas changed before diving into symbol-level |
|
|
119
|
+
| Using `recent` without setting `to` | The `to` timestamp is the reference point for proximity weighting — set it to the incident/event time |
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-graph
|
|
3
|
+
description: "Use when the user asks about architectural bottlenecks, important symbols, PageRank, centrality, bridge functions, code communities, logical modules, service boundaries, chokepoints, or wants to understand the high-level architecture of a codebase"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__find_bridge_symbols
|
|
6
|
+
- mcp__memtrace__find_central_symbols
|
|
7
|
+
- mcp__memtrace__list_communities
|
|
8
|
+
- mcp__memtrace__list_processes
|
|
9
|
+
- mcp__memtrace__get_process_flow
|
|
10
|
+
- mcp__memtrace__execute_cypher
|
|
11
|
+
user-invocable: true
|
|
12
|
+
---
|
|
13
|
+
|
|
14
|
+
## Overview
|
|
15
|
+
|
|
16
|
+
Graph algorithms that reveal the structural architecture of a codebase — community detection (Louvain), centrality ranking (PageRank/degree), bridge symbol identification (betweenness), and execution flow tracing.
|
|
17
|
+
|
|
18
|
+
## Quick Reference
|
|
19
|
+
|
|
20
|
+
| Tool | Purpose |
|
|
21
|
+
|------|---------|
|
|
22
|
+
| `find_bridge_symbols` | Architectural chokepoints — symbols that connect otherwise-separate modules |
|
|
23
|
+
| `find_central_symbols` | Most important symbols by PageRank or degree centrality |
|
|
24
|
+
| `list_communities` | Louvain-detected logical modules/services |
|
|
25
|
+
| `list_processes` | Execution flows: HTTP handlers, background jobs, CLI commands, event handlers |
|
|
26
|
+
| `get_process_flow` | Trace a single process step-by-step |
|
|
27
|
+
| `execute_cypher` | Direct read-only Cypher queries for custom analysis |
|
|
28
|
+
|
|
29
|
+
## Steps
|
|
30
|
+
|
|
31
|
+
### 1. Understand the architecture
|
|
32
|
+
|
|
33
|
+
Start with `list_communities` to see how the codebase is naturally partitioned into logical modules. Each community has a name, member count, and representative symbols.
|
|
34
|
+
|
|
35
|
+
### 2. Find critical infrastructure
|
|
36
|
+
|
|
37
|
+
Use `find_central_symbols` to identify the most important symbols:
|
|
38
|
+
- `method: "pagerank"` — importance by link structure (like Google's PageRank)
|
|
39
|
+
- `method: "degree"` — importance by direct connection count
|
|
40
|
+
- `limit` — how many to return
|
|
41
|
+
|
|
42
|
+
### 3. Find architectural chokepoints
|
|
43
|
+
|
|
44
|
+
Use `find_bridge_symbols` to find symbols that, if removed, would disconnect parts of the graph. These are:
|
|
45
|
+
- **Single points of failure** — if they break, cascading failures occur
|
|
46
|
+
- **Integration points** — good places for interfaces/contracts
|
|
47
|
+
- **Refactoring targets** — often too much responsibility concentrated in one place
|
|
48
|
+
|
|
49
|
+
### 4. Trace execution flows
|
|
50
|
+
|
|
51
|
+
Use `list_processes` to see all entry points (HTTP handlers, background jobs, CLI commands, event handlers).
|
|
52
|
+
|
|
53
|
+
Use `get_process_flow` with a process ID to trace a specific flow step-by-step — shows the full call chain from entry point through business logic to data access.
|
|
54
|
+
|
|
55
|
+
### 5. Custom queries
|
|
56
|
+
|
|
57
|
+
Use `execute_cypher` for advanced graph queries not covered by built-in tools. This is read-only and runs directly against the knowledge graph.
|
|
58
|
+
|
|
59
|
+
## Decision Points
|
|
60
|
+
|
|
61
|
+
| Question | Tool |
|
|
62
|
+
|----------|------|
|
|
63
|
+
| "What are the main modules?" | `list_communities` |
|
|
64
|
+
| "What are the most important functions?" | `find_central_symbols` with method=pagerank |
|
|
65
|
+
| "Where are the bottlenecks?" | `find_bridge_symbols` |
|
|
66
|
+
| "How does a request flow through the system?" | `list_processes` → `get_process_flow` |
|
|
67
|
+
| "What's the entry point for feature X?" | `list_processes`, then filter by name |
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-impact
|
|
3
|
+
description: "Use when the user asks about blast radius, what will break if I change this, risk of modifying a symbol, upstream or downstream dependencies, impact analysis before refactoring, or wants to understand the consequences of a code change"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__get_impact
|
|
6
|
+
- mcp__memtrace__detect_changes
|
|
7
|
+
- mcp__memtrace__find_symbol
|
|
8
|
+
- mcp__memtrace__find_code
|
|
9
|
+
user-invocable: true
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## Overview
|
|
13
|
+
|
|
14
|
+
Compute the blast radius of changing a specific symbol. Traces upstream (what depends on this) and downstream (what this depends on) through the knowledge graph to quantify risk before making modifications.
|
|
15
|
+
|
|
16
|
+
## Quick Reference
|
|
17
|
+
|
|
18
|
+
| Tool | Purpose |
|
|
19
|
+
|------|---------|
|
|
20
|
+
| `get_impact` | Blast radius from a specific symbol (by ID) |
|
|
21
|
+
| `detect_changes` | Scope symbols affected by a diff/patch |
|
|
22
|
+
|
|
23
|
+
## Steps
|
|
24
|
+
|
|
25
|
+
### 1. Identify the symbol
|
|
26
|
+
|
|
27
|
+
If you have a symbol name but not its ID:
|
|
28
|
+
- Use `find_symbol` for exact names
|
|
29
|
+
- Use `find_code` for natural-language queries
|
|
30
|
+
|
|
31
|
+
### 2. Run impact analysis
|
|
32
|
+
|
|
33
|
+
Use the `get_impact` MCP tool:
|
|
34
|
+
- `symbol_id` — the symbol you plan to change (required)
|
|
35
|
+
- `direction` — `upstream` (what depends on me), `downstream` (what I depend on), or `both` (default)
|
|
36
|
+
- `depth` — traversal hops (default 3)
|
|
37
|
+
|
|
38
|
+
### 3. Interpret the risk rating
|
|
39
|
+
|
|
40
|
+
| Risk | Meaning | Action |
|
|
41
|
+
|------|---------|--------|
|
|
42
|
+
| **Low** | Few dependents, leaf node | Safe to modify; minimal testing needed |
|
|
43
|
+
| **Medium** | Moderate dependents | Test direct callers; review interface contracts |
|
|
44
|
+
| **High** | Many dependents across modules | Coordinate changes; comprehensive test coverage |
|
|
45
|
+
| **Critical** | Core infrastructure, many transitive dependents | Plan migration strategy; consider backward-compatible changes |
|
|
46
|
+
|
|
47
|
+
### 4. For diff-based analysis
|
|
48
|
+
|
|
49
|
+
When you have an actual code diff (not just a symbol), use `detect_changes`:
|
|
50
|
+
- Scopes all symbols affected by the diff
|
|
51
|
+
- Returns blast radius AND affected processes (execution flows)
|
|
52
|
+
- Useful for PR reviews or pre-commit checks
|
|
53
|
+
|
|
54
|
+
## Decision Points
|
|
55
|
+
|
|
56
|
+
| Situation | Action |
|
|
57
|
+
|-----------|--------|
|
|
58
|
+
| Changing a single function | `get_impact` with `direction: both` |
|
|
59
|
+
| Reviewing a PR or diff | `detect_changes` with the diff content |
|
|
60
|
+
| Renaming/removing a public API | `get_impact` with `direction: upstream`, high depth |
|
|
61
|
+
| Refactoring internals | `get_impact` with `direction: downstream` to check what you depend on |
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-index
|
|
3
|
+
description: "Use when the user asks to index a project, set up code intelligence, parse a codebase, build a knowledge graph, prepare a repo for analysis, or as the very first step before any code exploration, search, or relationship analysis"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__index_directory
|
|
6
|
+
- mcp__memtrace__check_job_status
|
|
7
|
+
- mcp__memtrace__list_jobs
|
|
8
|
+
- mcp__memtrace__list_indexed_repositories
|
|
9
|
+
user-invocable: true
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## Overview
|
|
13
|
+
|
|
14
|
+
Index a local codebase into the persistent code knowledge graph. This is always the first step — it parses every source file, resolves cross-file relationships, detects API endpoints/calls, runs community detection and process tracing, and embeds all symbols for semantic search.
|
|
15
|
+
|
|
16
|
+
## Quick Reference
|
|
17
|
+
|
|
18
|
+
| Parameter | Purpose |
|
|
19
|
+
|-----------|---------|
|
|
20
|
+
| `path` | Absolute path to the directory to index |
|
|
21
|
+
| `incremental` | Only re-parse changed files (use for subsequent runs) |
|
|
22
|
+
| `clear_existing` | Wipe and rebuild from scratch |
|
|
23
|
+
|
|
24
|
+
## Steps
|
|
25
|
+
|
|
26
|
+
### 1. Check if already indexed
|
|
27
|
+
|
|
28
|
+
Use the `list_indexed_repositories` MCP tool first. If the repo is already indexed and recent, skip to step 4.
|
|
29
|
+
|
|
30
|
+
**Success criteria:** You have a list of repo_ids and their last-indexed timestamps.
|
|
31
|
+
|
|
32
|
+
### 2. Index the directory
|
|
33
|
+
|
|
34
|
+
Use the `index_directory` MCP tool:
|
|
35
|
+
|
|
36
|
+
- Set `path` to the project root (absolute path)
|
|
37
|
+
- Set `incremental: true` if re-indexing after changes
|
|
38
|
+
- Set `clear_existing: true` only if a full rebuild is needed
|
|
39
|
+
|
|
40
|
+
**Success criteria:** You receive a `job_id` immediately.
|
|
41
|
+
|
|
42
|
+
### 3. Poll for completion
|
|
43
|
+
|
|
44
|
+
Use `check_job_status` with the `job_id` every 2–3 seconds.
|
|
45
|
+
|
|
46
|
+
Pipeline stages in order: **scan → parse → resolve → communities → processes → persist → embeddings → api_detect → done**
|
|
47
|
+
|
|
48
|
+
Wait until `status = "completed"`. If `status = "failed"`, report the error message to the user.
|
|
49
|
+
|
|
50
|
+
### 4. Report to user
|
|
51
|
+
|
|
52
|
+
After indexing completes, call `list_indexed_repositories` to confirm the repo appears with correct node/edge counts. Report: repo_id, languages detected, total symbols, total relationships.
|
|
53
|
+
|
|
54
|
+
**Save the `repo_id`** — most other memtrace tools require it.
|
|
55
|
+
|
|
56
|
+
## Error Handling
|
|
57
|
+
|
|
58
|
+
| Error | Action |
|
|
59
|
+
|-------|--------|
|
|
60
|
+
| Path does not exist | Ask user to verify the absolute path |
|
|
61
|
+
| Job status "failed" | Report the error message; suggest `clear_existing: true` for a fresh rebuild |
|
|
62
|
+
| Timeout (job running > 5 min) | Large repos are normal; keep polling. For monorepos, index subdirectories separately |
|
|
63
|
+
| Already indexed | Use `incremental: true` to update, or skip indexing entirely |
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-quality
|
|
3
|
+
description: "Use when the user asks about dead code, unused functions, code complexity, cyclomatic complexity, refactoring candidates, code smells, code quality metrics, functions that are too complex, or wants to find code that should be cleaned up"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__find_dead_code
|
|
6
|
+
- mcp__memtrace__calculate_cyclomatic_complexity
|
|
7
|
+
- mcp__memtrace__find_most_complex_functions
|
|
8
|
+
- mcp__memtrace__get_repository_stats
|
|
9
|
+
user-invocable: true
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## Overview
|
|
13
|
+
|
|
14
|
+
Identify code quality issues using structural graph analysis — dead code (zero callers), complexity hotspots (high out-degree), and repository-wide statistics.
|
|
15
|
+
|
|
16
|
+
## Quick Reference
|
|
17
|
+
|
|
18
|
+
| Tool | Purpose |
|
|
19
|
+
|------|---------|
|
|
20
|
+
| `find_dead_code` | Symbols with zero callers (potentially unused) |
|
|
21
|
+
| `calculate_cyclomatic_complexity` | Complexity score for a specific symbol |
|
|
22
|
+
| `find_most_complex_functions` | Top-N functions by complexity across the repo |
|
|
23
|
+
| `get_repository_stats` | Repo-wide counts: nodes, edges, communities, processes |
|
|
24
|
+
|
|
25
|
+
## Steps
|
|
26
|
+
|
|
27
|
+
### 1. Get repository overview
|
|
28
|
+
|
|
29
|
+
Use `get_repository_stats` to understand the codebase scale:
|
|
30
|
+
- Node counts by kind (functions, classes, methods, interfaces)
|
|
31
|
+
- Edge counts (calls, imports, extends, type references)
|
|
32
|
+
- Community and process counts
|
|
33
|
+
|
|
34
|
+
### 2. Find dead code
|
|
35
|
+
|
|
36
|
+
Use `find_dead_code`:
|
|
37
|
+
- `repo_id` — required
|
|
38
|
+
- `include_tests` — set true to also flag unused test helpers (default false)
|
|
39
|
+
|
|
40
|
+
**Note:** Exported symbols and entry points are excluded by default — the tool won't flag public APIs as "dead" just because they're called externally.
|
|
41
|
+
|
|
42
|
+
### 3. Find complexity hotspots
|
|
43
|
+
|
|
44
|
+
Use `find_most_complex_functions`:
|
|
45
|
+
- `repo_id` — required
|
|
46
|
+
- `limit` — how many to return (default 10)
|
|
47
|
+
|
|
48
|
+
Complexity scoring (based on out-degree — number of callees):
|
|
49
|
+
| Score | Rating | Action |
|
|
50
|
+
|-------|--------|--------|
|
|
51
|
+
| < 5 | Low | No action needed |
|
|
52
|
+
| 5–10 | Medium | Monitor; consider splitting if growing |
|
|
53
|
+
| 10–20 | High | Refactoring candidate; extract helper functions |
|
|
54
|
+
| > 20 | Critical | Immediate attention; this function does too much |
|
|
55
|
+
|
|
56
|
+
### 4. Drill into specific functions
|
|
57
|
+
|
|
58
|
+
Use `calculate_cyclomatic_complexity` on specific symbols flagged by the user or found in step 3.
|
|
59
|
+
|
|
60
|
+
## Common Mistakes
|
|
61
|
+
|
|
62
|
+
| Mistake | Reality |
|
|
63
|
+
|---------|---------|
|
|
64
|
+
| Treating all dead code as deletable | Some "dead" code is called via reflection, dynamic dispatch, or external consumers |
|
|
65
|
+
| Ignoring exported symbols in dead code results | If `include_tests: false`, exported symbols are already excluded |
|
|
66
|
+
| Only looking at the highest complexity | Medium-complexity functions that are growing (check `get_evolution`) are often more urgent |
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-relationships
|
|
3
|
+
description: "Use when the user asks who calls a function, what a function calls, class hierarchy, inheritance, imports, exports, type usages, dependencies between symbols, or wants to understand how code connects before making changes"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__analyze_relationships
|
|
6
|
+
- mcp__memtrace__get_symbol_context
|
|
7
|
+
- mcp__memtrace__find_symbol
|
|
8
|
+
- mcp__memtrace__find_code
|
|
9
|
+
user-invocable: true
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## Overview
|
|
13
|
+
|
|
14
|
+
Traverse the code knowledge graph to map relationships between symbols — callers, callees, class hierarchies, imports, exports, and type usages. Essential for understanding a symbol's neighbourhood before modifying it.
|
|
15
|
+
|
|
16
|
+
## Quick Reference
|
|
17
|
+
|
|
18
|
+
| query_type | What it finds |
|
|
19
|
+
|------------|---------------|
|
|
20
|
+
| `find_callers` | What calls this function/method? |
|
|
21
|
+
| `find_callees` | What does this function call? |
|
|
22
|
+
| `class_hierarchy` | Parent classes, interfaces, mixins |
|
|
23
|
+
| `overrides` | Which child classes override this method? |
|
|
24
|
+
| `imports` | What modules does this file import? |
|
|
25
|
+
| `exporters` | Which files import this module? |
|
|
26
|
+
| `type_usages` | Where is this type/interface referenced? |
|
|
27
|
+
|
|
28
|
+
## Steps
|
|
29
|
+
|
|
30
|
+
### 1. Get the symbol ID
|
|
31
|
+
|
|
32
|
+
If you don't have a symbol `id`, find it first:
|
|
33
|
+
- Use `find_symbol` for exact names
|
|
34
|
+
- Use `find_code` for natural-language queries
|
|
35
|
+
|
|
36
|
+
### 2. Choose your approach
|
|
37
|
+
|
|
38
|
+
**Quick 360° view** → Use `get_symbol_context`
|
|
39
|
+
Returns in one call: direct callers, callees, type references, community membership, process membership, and cross-repo API callers.
|
|
40
|
+
|
|
41
|
+
**ALWAYS prefer `get_symbol_context` first** — it answers "what does this touch and what touches it?" faster than multiple `analyze_relationships` calls.
|
|
42
|
+
|
|
43
|
+
**Targeted traversal** → Use `analyze_relationships`
|
|
44
|
+
When you need a specific relationship type at a specific depth:
|
|
45
|
+
- `symbol_id` — the symbol to start from (required)
|
|
46
|
+
- `query_type` — one of the types above (required)
|
|
47
|
+
- `depth` — traversal hops, default 2 (higher = slower but reveals indirect deps)
|
|
48
|
+
|
|
49
|
+
### 3. Interpret results
|
|
50
|
+
|
|
51
|
+
- **High in_degree** (many callers) → widely-used symbol; changes have large blast radius
|
|
52
|
+
- **High out_degree** (many callees) → complex function; candidate for refactoring
|
|
53
|
+
- **Deep class hierarchy** → check for Liskov violations or fragile base class issues
|
|
54
|
+
- **Cross-repo API callers** → changes require coordination with other teams/services
|
|
55
|
+
|
|
56
|
+
### 4. Follow up
|
|
57
|
+
|
|
58
|
+
After understanding relationships, consider:
|
|
59
|
+
- `get_impact` to quantify the blast radius of a change
|
|
60
|
+
- `get_evolution` to see how this symbol has changed over time
|
|
61
|
+
- `find_dead_code` if you found unreferenced symbols
|
|
62
|
+
|
|
63
|
+
## Decision Points
|
|
64
|
+
|
|
65
|
+
| Situation | Action |
|
|
66
|
+
|-----------|--------|
|
|
67
|
+
| Need broad context fast | Use `get_symbol_context` (one call, full picture) |
|
|
68
|
+
| Need specific relationship at depth >2 | Use `analyze_relationships` with custom depth |
|
|
69
|
+
| Symbol has many callers | Follow up with `get_impact` before modifying |
|
|
70
|
+
| Found cross-repo API callers | This is a service boundary — coordinate changes |
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-search
|
|
3
|
+
description: "Use when the user asks to find code, search for a function, locate a symbol, look up where something is defined, search across repos, find implementations, or needs to discover where a piece of logic lives before making changes"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__find_code
|
|
6
|
+
- mcp__memtrace__find_symbol
|
|
7
|
+
- mcp__memtrace__list_indexed_repositories
|
|
8
|
+
user-invocable: true
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
## Overview
|
|
12
|
+
|
|
13
|
+
Find code using hybrid BM25 full-text + semantic vector search with Reciprocal Rank Fusion. Works for both natural-language queries and exact symbol names. This is the primary discovery tool — use it before calling relationship or impact analysis tools.
|
|
14
|
+
|
|
15
|
+
## Quick Reference
|
|
16
|
+
|
|
17
|
+
| Tool | Best For |
|
|
18
|
+
|------|----------|
|
|
19
|
+
| `find_code` | Natural-language queries ("authentication middleware", "retry logic"), broad searches |
|
|
20
|
+
| `find_symbol` | Exact identifier names ("getUserById", "PaymentService"), when you know the name |
|
|
21
|
+
|
|
22
|
+
## Steps
|
|
23
|
+
|
|
24
|
+
### 1. Choose the right search tool
|
|
25
|
+
|
|
26
|
+
- **Know the exact name?** → Use `find_symbol` with `fuzzy: true` for typo tolerance
|
|
27
|
+
- **Describing behaviour?** → Use `find_code` with a natural-language query
|
|
28
|
+
- **Searching all repos?** → Omit `repo_id` from either tool
|
|
29
|
+
|
|
30
|
+
### 2. Execute the search
|
|
31
|
+
|
|
32
|
+
**find_code parameters:**
|
|
33
|
+
- `query` — natural-language or exact text (required)
|
|
34
|
+
- `repo_id` — scope to a single repo (optional; omit to search all)
|
|
35
|
+
- `kind` — filter by symbol type: Function, Class, Method, Interface, APIEndpoint, APICall
|
|
36
|
+
- `limit` — max results (default 10)
|
|
37
|
+
- `as_of` — ISO-8601 timestamp for time-travel search
|
|
38
|
+
|
|
39
|
+
**find_symbol parameters:**
|
|
40
|
+
- `name` — exact or partial symbol name (required)
|
|
41
|
+
- `fuzzy` — enable Levenshtein correction (default false)
|
|
42
|
+
- `repo_id` — scope to a single repo (optional)
|
|
43
|
+
- `kind` — filter by symbol type
|
|
44
|
+
- `file_path` — filter by file path substring
|
|
45
|
+
|
|
46
|
+
**Success criteria:** Results include `file_path`, `start_line`, `kind`, and relevance `score`.
|
|
47
|
+
|
|
48
|
+
### 3. Use results for next steps
|
|
49
|
+
|
|
50
|
+
Save the symbol `id` from results — pass it to:
|
|
51
|
+
- `analyze_relationships` to map callers/callees
|
|
52
|
+
- `get_symbol_context` for a 360-degree view
|
|
53
|
+
- `get_impact` to assess blast radius before changes
|
|
54
|
+
|
|
55
|
+
## Common Mistakes
|
|
56
|
+
|
|
57
|
+
| Mistake | Reality |
|
|
58
|
+
|---------|---------|
|
|
59
|
+
| Searching without indexing first | Call `list_indexed_repositories` to verify the repo is indexed |
|
|
60
|
+
| Using find_symbol for vague queries | Use `find_code` for natural-language; `find_symbol` is for exact names |
|
|
61
|
+
| Ignoring the `kind` filter | Narrow results with kind=Function, kind=Class etc. to reduce noise |
|
|
62
|
+
| Re-searching to get more context | Use the symbol `id` with `get_symbol_context` instead of re-searching |
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-change-impact-analysis
|
|
3
|
+
description: "Use when the user is about to modify code, planning a refactoring, wants to know what will break, needs a pre-change risk assessment, is reviewing a PR, or wants to understand the full consequences of a code change before making it"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__find_symbol
|
|
6
|
+
- mcp__memtrace__find_code
|
|
7
|
+
- mcp__memtrace__get_symbol_context
|
|
8
|
+
- mcp__memtrace__get_impact
|
|
9
|
+
- mcp__memtrace__detect_changes
|
|
10
|
+
- mcp__memtrace__get_evolution
|
|
11
|
+
- mcp__memtrace__analyze_relationships
|
|
12
|
+
- mcp__memtrace__list_indexed_repositories
|
|
13
|
+
user-invocable: true
|
|
14
|
+
---
|
|
15
|
+
|
|
16
|
+
## Overview
|
|
17
|
+
|
|
18
|
+
Pre-change risk assessment workflow. Before modifying code, this workflow maps the full blast radius, identifies affected processes, checks recent change history for instability signals, and produces a risk-rated change plan.
|
|
19
|
+
|
|
20
|
+
## Steps
|
|
21
|
+
|
|
22
|
+
### 1. Identify what's being changed
|
|
23
|
+
|
|
24
|
+
Find the target symbol(s):
|
|
25
|
+
- Use `find_symbol` if the user named specific functions/classes
|
|
26
|
+
- Use `find_code` if the user described behaviour ("the authentication middleware")
|
|
27
|
+
|
|
28
|
+
Collect symbol IDs for all targets.
|
|
29
|
+
|
|
30
|
+
### 2. Get 360° context for each target
|
|
31
|
+
|
|
32
|
+
For each symbol, call `get_symbol_context`:
|
|
33
|
+
- Direct callers and callees
|
|
34
|
+
- Community membership (which module is this in?)
|
|
35
|
+
- Process membership (which execution flows does this participate in?)
|
|
36
|
+
- Cross-repo API callers (is this an endpoint called by other services?)
|
|
37
|
+
|
|
38
|
+
**Decision:** If cross-repo API callers exist, this change requires coordination with other teams. Flag this immediately.
|
|
39
|
+
|
|
40
|
+
### 3. Compute blast radius
|
|
41
|
+
|
|
42
|
+
For each target, call `get_impact` with `direction: both`:
|
|
43
|
+
- Upstream impact: what depends on this symbol
|
|
44
|
+
- Downstream impact: what this symbol depends on
|
|
45
|
+
- Risk rating: Low / Medium / High / Critical
|
|
46
|
+
|
|
47
|
+
**Decision:**
|
|
48
|
+
| Risk | Action |
|
|
49
|
+
|------|--------|
|
|
50
|
+
| Low | Proceed with standard testing |
|
|
51
|
+
| Medium | Review all direct callers; test affected processes |
|
|
52
|
+
| High | Plan incremental migration; consider feature flags |
|
|
53
|
+
| Critical | Full migration strategy; backward-compatible changes required |
|
|
54
|
+
|
|
55
|
+
### 4. Check temporal stability
|
|
56
|
+
|
|
57
|
+
Call `get_evolution` with mode `novel` for a 30-day window on the repo:
|
|
58
|
+
- Are any of the target symbols flagged as "rarely changing"? If so, this change is structurally surprising and deserves extra scrutiny.
|
|
59
|
+
- Have the target symbols been changing frequently? High churn + high impact = volatile hotspot.
|
|
60
|
+
|
|
61
|
+
### 5. Map affected execution flows
|
|
62
|
+
|
|
63
|
+
From step 2, you already know which processes are affected. For critical changes, use `analyze_relationships` with `query_type: find_callers` at `depth: 3` to trace the full transitive caller chain.
|
|
64
|
+
|
|
65
|
+
### 6. Produce the risk assessment
|
|
66
|
+
|
|
67
|
+
Synthesize into a change plan:
|
|
68
|
+
|
|
69
|
+
1. **Target(s)** — what's being changed and where
|
|
70
|
+
2. **Blast Radius** — number of direct/transitive dependents, risk rating
|
|
71
|
+
3. **Affected Processes** — which execution flows will be impacted
|
|
72
|
+
4. **Cross-Service Impact** — any external callers or consumers
|
|
73
|
+
5. **Stability Signal** — is this code stable (novel) or volatile (frequent changes)?
|
|
74
|
+
6. **Recommended Approach** — based on risk: direct change, incremental migration, or backward-compatible evolution
|
|
75
|
+
7. **Test Coverage** — which callers/processes to verify after the change
|
|
76
|
+
|
|
77
|
+
## Decision Points
|
|
78
|
+
|
|
79
|
+
| Condition | Action |
|
|
80
|
+
|-----------|--------|
|
|
81
|
+
| Risk = Critical | Recommend backward-compatible change + deprecation path |
|
|
82
|
+
| Cross-repo callers exist | Flag as requiring multi-service coordination |
|
|
83
|
+
| Symbol has high novelty score | Extra review — this rarely changes; make sure the change is intentional |
|
|
84
|
+
| Multiple processes affected | List each affected flow; recommend testing each one |
|
|
85
|
+
| Symbol is a bridge point | Change may disconnect parts of the architecture — verify alternative paths exist |
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-codebase-exploration
|
|
3
|
+
description: "Use when the user asks to explore a codebase, understand a project, onboard to a new repo, get an overview of how code is structured, map the architecture, or wants a comprehensive understanding of a codebase they're new to"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__index_directory
|
|
6
|
+
- mcp__memtrace__check_job_status
|
|
7
|
+
- mcp__memtrace__list_indexed_repositories
|
|
8
|
+
- mcp__memtrace__get_repository_stats
|
|
9
|
+
- mcp__memtrace__list_communities
|
|
10
|
+
- mcp__memtrace__list_processes
|
|
11
|
+
- mcp__memtrace__find_central_symbols
|
|
12
|
+
- mcp__memtrace__find_bridge_symbols
|
|
13
|
+
- mcp__memtrace__find_api_endpoints
|
|
14
|
+
- mcp__memtrace__get_api_topology
|
|
15
|
+
- mcp__memtrace__get_evolution
|
|
16
|
+
- mcp__memtrace__find_most_complex_functions
|
|
17
|
+
user-invocable: true
|
|
18
|
+
---
|
|
19
|
+
|
|
20
|
+
## Overview
|
|
21
|
+
|
|
22
|
+
Full codebase exploration workflow — from indexing through architectural understanding. Chains indexing, graph algorithms, community detection, and temporal analysis into a structured onboarding experience. Use this when someone is new to a codebase and needs to build a mental model.
|
|
23
|
+
|
|
24
|
+
## Steps
|
|
25
|
+
|
|
26
|
+
### 1. Index the codebase
|
|
27
|
+
|
|
28
|
+
Call `list_indexed_repositories` first. If the repo is already indexed, skip to step 2.
|
|
29
|
+
|
|
30
|
+
Otherwise, call `index_directory` with the project path, then poll `check_job_status` until completion.
|
|
31
|
+
|
|
32
|
+
**Success criteria:** Repo appears in `list_indexed_repositories` with non-zero node/edge counts.
|
|
33
|
+
|
|
34
|
+
### 2. Get the lay of the land
|
|
35
|
+
|
|
36
|
+
Call `get_repository_stats` to understand scale:
|
|
37
|
+
- How many functions, classes, methods, interfaces?
|
|
38
|
+
- How many relationships (calls, imports, extends)?
|
|
39
|
+
- How many communities and processes were detected?
|
|
40
|
+
|
|
41
|
+
Report these numbers to the user — they set expectations for the codebase's size and complexity.
|
|
42
|
+
|
|
43
|
+
### 3. Map the architecture (communities)
|
|
44
|
+
|
|
45
|
+
Call `list_communities` to see how the codebase naturally clusters into logical modules.
|
|
46
|
+
|
|
47
|
+
**Decision:** If >10 communities, summarize the top 5–7 by size and let the user ask about specific ones.
|
|
48
|
+
|
|
49
|
+
Each community represents a cohesive module — these are the "areas" of the codebase.
|
|
50
|
+
|
|
51
|
+
### 4. Find the most important symbols
|
|
52
|
+
|
|
53
|
+
Call `find_central_symbols` with `method: "pagerank"` and `limit: 15`.
|
|
54
|
+
|
|
55
|
+
These are the symbols that the rest of the codebase depends on most heavily. They form the "skeleton" of the architecture.
|
|
56
|
+
|
|
57
|
+
### 5. Find architectural bottlenecks
|
|
58
|
+
|
|
59
|
+
Call `find_bridge_symbols` to identify chokepoints — symbols that connect otherwise-separate parts of the codebase.
|
|
60
|
+
|
|
61
|
+
**Decision:** If bridge symbols overlap heavily with central symbols, flag them as critical infrastructure — high importance AND single point of failure.
|
|
62
|
+
|
|
63
|
+
### 6. Map execution flows
|
|
64
|
+
|
|
65
|
+
Call `list_processes` to discover entry points:
|
|
66
|
+
- HTTP handlers (API endpoints)
|
|
67
|
+
- Background jobs
|
|
68
|
+
- CLI commands
|
|
69
|
+
- Event handlers
|
|
70
|
+
|
|
71
|
+
This shows HOW the code is actually used at runtime, not just how it's structured.
|
|
72
|
+
|
|
73
|
+
### 7. Map the API surface (if applicable)
|
|
74
|
+
|
|
75
|
+
Call `find_api_endpoints` to list all HTTP routes.
|
|
76
|
+
|
|
77
|
+
**Decision:** If multiple repos are indexed, also call `get_api_topology` to map service-to-service dependencies.
|
|
78
|
+
|
|
79
|
+
### 8. Recent activity
|
|
80
|
+
|
|
81
|
+
Call `get_evolution` with mode `overview` and a 30-day window to see which modules have been most active recently.
|
|
82
|
+
|
|
83
|
+
**Decision:** If the user asks about specific recent changes, switch to mode `compound` for symbol-level detail.
|
|
84
|
+
|
|
85
|
+
### 9. Complexity hotspots
|
|
86
|
+
|
|
87
|
+
Call `find_most_complex_functions` with `limit: 10` to identify potential technical debt.
|
|
88
|
+
|
|
89
|
+
## Report Synthesis
|
|
90
|
+
|
|
91
|
+
Synthesize findings into a structured overview:
|
|
92
|
+
|
|
93
|
+
1. **Scale** — languages, total symbols, total relationships
|
|
94
|
+
2. **Architecture** — main communities/modules and what they do
|
|
95
|
+
3. **Critical Infrastructure** — central symbols and bridge points
|
|
96
|
+
4. **Execution Flows** — how the code is entered and used
|
|
97
|
+
5. **API Surface** — endpoints and service dependencies
|
|
98
|
+
6. **Recent Activity** — what's been changing in the last 30 days
|
|
99
|
+
7. **Technical Debt** — complexity hotspots and potential dead code
|
|
100
|
+
|
|
101
|
+
## Common Mistakes
|
|
102
|
+
|
|
103
|
+
| Mistake | Reality |
|
|
104
|
+
|---------|---------|
|
|
105
|
+
| Skipping indexing and using file-based grep | The knowledge graph provides structural understanding that grep cannot — callers, callees, communities, processes |
|
|
106
|
+
| Reporting raw numbers without interpretation | "450 functions across 12 communities" means nothing; describe what each community does |
|
|
107
|
+
| Only looking at code structure | Execution flows (processes) show how the code is actually used — always include them |
|
|
108
|
+
| Ignoring temporal context | Recent evolution shows where active development is happening — this is where the user will likely need to work |
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-incident-investigation
|
|
3
|
+
description: "Use when the user is investigating a bug, incident, production issue, regression, something that broke, root cause analysis, debugging a failure, or trying to figure out what went wrong and when"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__get_evolution
|
|
6
|
+
- mcp__memtrace__get_timeline
|
|
7
|
+
- mcp__memtrace__detect_changes
|
|
8
|
+
- mcp__memtrace__get_impact
|
|
9
|
+
- mcp__memtrace__get_symbol_context
|
|
10
|
+
- mcp__memtrace__find_code
|
|
11
|
+
- mcp__memtrace__find_symbol
|
|
12
|
+
- mcp__memtrace__analyze_relationships
|
|
13
|
+
- mcp__memtrace__list_indexed_repositories
|
|
14
|
+
user-invocable: true
|
|
15
|
+
---
|
|
16
|
+
|
|
17
|
+
## Overview
|
|
18
|
+
|
|
19
|
+
Root cause investigation workflow for incidents, regressions, and production issues. Uses temporal analysis with the `recent` scoring mode to surface changes closest to the incident time, then traces blast radius and execution flows to identify the likely cause.
|
|
20
|
+
|
|
21
|
+
## Steps
|
|
22
|
+
|
|
23
|
+
### 1. Establish the timeline
|
|
24
|
+
|
|
25
|
+
Determine:
|
|
26
|
+
- **Incident time** — when did the problem start? (This becomes the `to` parameter)
|
|
27
|
+
- **Lookback window** — how far back to search? Start with 24 hours, expand if needed.
|
|
28
|
+
- **Repo(s)** — which services are affected? Call `list_indexed_repositories` to get repo_ids.
|
|
29
|
+
|
|
30
|
+
### 2. Surface recent changes near the incident
|
|
31
|
+
|
|
32
|
+
Call `get_evolution` with:
|
|
33
|
+
- `mode: "recent"` — temporal proximity weighting: `impact × exp(−0.5 × Δhours)`
|
|
34
|
+
- `from` — lookback start (e.g., 24 hours before incident)
|
|
35
|
+
- `to` — the incident timestamp
|
|
36
|
+
- `repo_id` — the affected repo
|
|
37
|
+
|
|
38
|
+
**Why `recent` mode?** It exponentially amplifies changes close to the incident time while still weighting by structural impact. A high-impact change made 1 hour before the incident scores much higher than the same change made 20 hours before.
|
|
39
|
+
|
|
40
|
+
**Success criteria:** A ranked list of changes, with the most likely culprits at the top.
|
|
41
|
+
|
|
42
|
+
### 3. Check for unexpected changes
|
|
43
|
+
|
|
44
|
+
Call `get_evolution` again with `mode: "novel"` on the same time window:
|
|
45
|
+
- Flags changes to rarely-modified code (high in_degree, low change frequency)
|
|
46
|
+
- A core utility that hasn't changed in 90 days suddenly changing near an incident is a strong signal
|
|
47
|
+
|
|
48
|
+
**Decision:** If `novel` mode surfaces different symbols than `recent` mode, investigate both — the root cause may be an unexpected change to stable infrastructure.
|
|
49
|
+
|
|
50
|
+
### 4. Trace the blast radius of top suspects
|
|
51
|
+
|
|
52
|
+
For the top 3–5 symbols from steps 2–3, call `get_impact` with `direction: upstream`:
|
|
53
|
+
- How many downstream consumers were affected?
|
|
54
|
+
- What execution flows pass through this symbol?
|
|
55
|
+
|
|
56
|
+
**Decision:** Prioritize symbols where the blast radius overlaps with the reported failure area.
|
|
57
|
+
|
|
58
|
+
### 5. Trace execution flows
|
|
59
|
+
|
|
60
|
+
Use `get_symbol_context` on the top suspects to see which processes (HTTP handlers, background jobs, etc.) they participate in.
|
|
61
|
+
|
|
62
|
+
**Decision:** If the incident is in a specific endpoint/flow, focus on suspects that are members of that process.
|
|
63
|
+
|
|
64
|
+
### 6. Build the full timeline for the suspect
|
|
65
|
+
|
|
66
|
+
Once you have a primary suspect, call `get_timeline` with the symbol name to see its full version history:
|
|
67
|
+
- What changed in each commit?
|
|
68
|
+
- When was the last "stable" version?
|
|
69
|
+
- Was the change a modification, or was it newly added?
|
|
70
|
+
|
|
71
|
+
### 7. Correlate with surrounding changes
|
|
72
|
+
|
|
73
|
+
Call `get_evolution` with mode `directional` to separate:
|
|
74
|
+
- **Added symbols** — new code introduced (potential new bugs)
|
|
75
|
+
- **Removed symbols** — deleted code (potential missing functionality)
|
|
76
|
+
- **Modified symbols** — changed behaviour (potential regressions)
|
|
77
|
+
|
|
78
|
+
## Report: Root Cause Analysis
|
|
79
|
+
|
|
80
|
+
1. **Incident Timeline** — when it started, what was observed
|
|
81
|
+
2. **Most Likely Cause** — the top-ranked change(s) by `recent` mode with blast radius confirmation
|
|
82
|
+
3. **Supporting Evidence** — novelty signal (was this an unexpected change?), blast radius overlap with failure area, process membership overlap
|
|
83
|
+
4. **Change History** — full timeline of the suspect symbol
|
|
84
|
+
5. **Affected Scope** — all processes and downstream consumers impacted
|
|
85
|
+
6. **Remediation** — revert the change, fix forward, or mitigate
|
|
86
|
+
|
|
87
|
+
## Algorithm Selection Guide for Incidents
|
|
88
|
+
|
|
89
|
+
| Phase | Mode | Why |
|
|
90
|
+
|-------|------|-----|
|
|
91
|
+
| Initial triage | `recent` | Time-weighted ranking surfaces changes near the incident |
|
|
92
|
+
| Anomaly detection | `novel` | Catches unexpected changes to stable code |
|
|
93
|
+
| Scope assessment | `impact` | Ranks by structural significance (blast radius) |
|
|
94
|
+
| Direction analysis | `directional` | Separates added/removed/modified |
|
|
95
|
+
| Quick summary | `overview` | Fast module-level scan before deep-diving |
|
|
96
|
+
|
|
97
|
+
## Common Mistakes
|
|
98
|
+
|
|
99
|
+
| Mistake | Reality |
|
|
100
|
+
|---------|---------|
|
|
101
|
+
| Starting with `impact` mode | Use `recent` first — time proximity is the strongest signal for incidents |
|
|
102
|
+
| Only looking at the most recent commit | The root cause may be from an earlier change whose effects were delayed |
|
|
103
|
+
| Ignoring `novel` mode | Unexpected changes to stable code are often the root cause |
|
|
104
|
+
| Not checking blast radius overlap | A change is only a suspect if its blast radius reaches the failure area |
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: memtrace-refactoring-guide
|
|
3
|
+
description: "Use when the user wants to refactor code, reduce complexity, clean up technical debt, split a large function, extract a module, reorganize code, identify refactoring priorities, or improve code structure"
|
|
4
|
+
allowed-tools:
|
|
5
|
+
- mcp__memtrace__find_most_complex_functions
|
|
6
|
+
- mcp__memtrace__find_dead_code
|
|
7
|
+
- mcp__memtrace__find_bridge_symbols
|
|
8
|
+
- mcp__memtrace__find_central_symbols
|
|
9
|
+
- mcp__memtrace__get_symbol_context
|
|
10
|
+
- mcp__memtrace__get_impact
|
|
11
|
+
- mcp__memtrace__get_evolution
|
|
12
|
+
- mcp__memtrace__analyze_relationships
|
|
13
|
+
- mcp__memtrace__list_communities
|
|
14
|
+
- mcp__memtrace__calculate_cyclomatic_complexity
|
|
15
|
+
- mcp__memtrace__list_indexed_repositories
|
|
16
|
+
user-invocable: true
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
## Overview
|
|
20
|
+
|
|
21
|
+
Guided refactoring workflow — identifies refactoring candidates using structural analysis, scores them by risk and priority, and produces a phased refactoring plan. Combines complexity metrics, dead code detection, bridge analysis, and temporal evolution to prioritize what to refactor first and how to do it safely.
|
|
22
|
+
|
|
23
|
+
## Steps
|
|
24
|
+
|
|
25
|
+
### 1. Identify refactoring candidates
|
|
26
|
+
|
|
27
|
+
Run these three tools in parallel to build a candidate list:
|
|
28
|
+
|
|
29
|
+
**a) Complexity hotspots:**
|
|
30
|
+
Call `find_most_complex_functions` with `limit: 20`
|
|
31
|
+
|
|
32
|
+
**b) Dead code:**
|
|
33
|
+
Call `find_dead_code` to find unused symbols
|
|
34
|
+
|
|
35
|
+
**c) Architectural bottlenecks:**
|
|
36
|
+
Call `find_bridge_symbols` to find chokepoints with too much responsibility
|
|
37
|
+
|
|
38
|
+
### 2. Score candidates by volatility
|
|
39
|
+
|
|
40
|
+
Call `get_evolution` with mode `compound` over a 90-day window:
|
|
41
|
+
- Symbols that are BOTH complex AND frequently changing are the highest priority
|
|
42
|
+
- Complex but stable code can wait — it's not causing active pain
|
|
43
|
+
- Volatile but simple code may be fine — frequent changes to simple code is normal
|
|
44
|
+
|
|
45
|
+
**Priority matrix:**
|
|
46
|
+
|
|
47
|
+
| | Low Complexity | High Complexity |
|
|
48
|
+
|---|---|---|
|
|
49
|
+
| **Stable (low change freq)** | Leave alone | Monitor; refactor if touched |
|
|
50
|
+
| **Volatile (high change freq)** | Normal; leave alone | **TOP PRIORITY** — refactor first |
|
|
51
|
+
|
|
52
|
+
### 3. Assess risk for top candidates
|
|
53
|
+
|
|
54
|
+
For each top-priority candidate, call `get_impact` with `direction: both`:
|
|
55
|
+
- **Low risk** → refactor directly
|
|
56
|
+
- **Medium risk** → refactor with comprehensive tests
|
|
57
|
+
- **High/Critical risk** → plan incremental migration with backward compatibility
|
|
58
|
+
|
|
59
|
+
Also call `get_symbol_context` to check:
|
|
60
|
+
- How many processes does this symbol participate in? (More = more testing needed)
|
|
61
|
+
- Is it part of a cross-repo API? (If yes, coordinate with consumers)
|
|
62
|
+
|
|
63
|
+
### 4. Understand the neighbourhood
|
|
64
|
+
|
|
65
|
+
For each refactoring target, call `analyze_relationships`:
|
|
66
|
+
- `find_callees` — what does it depend on? (these become candidates for extraction)
|
|
67
|
+
- `find_callers` — what depends on it? (these need updating after refactoring)
|
|
68
|
+
- `class_hierarchy` — is it part of an inheritance chain? (Liskov concerns)
|
|
69
|
+
|
|
70
|
+
### 5. Check community boundaries
|
|
71
|
+
|
|
72
|
+
Call `list_communities` and check: does the refactoring target sit at a community boundary?
|
|
73
|
+
- If yes, the refactoring may involve splitting responsibilities across modules
|
|
74
|
+
- If it belongs clearly to one community, the refactoring is more contained
|
|
75
|
+
|
|
76
|
+
### 6. Produce the refactoring plan
|
|
77
|
+
|
|
78
|
+
Synthesize into a phased plan:
|
|
79
|
+
|
|
80
|
+
**Phase 1 — Quick Wins:**
|
|
81
|
+
- Dead code removal (zero-risk deletions)
|
|
82
|
+
- Simple functions with high churn (reduce volatility)
|
|
83
|
+
|
|
84
|
+
**Phase 2 — High-Impact Refactors:**
|
|
85
|
+
- Complex + volatile functions (highest priority by the matrix)
|
|
86
|
+
- Bridge symbols with too many responsibilities (extract interfaces)
|
|
87
|
+
|
|
88
|
+
**Phase 3 — Structural Improvements:**
|
|
89
|
+
- Splitting oversized communities into smaller, focused modules
|
|
90
|
+
- Extracting shared logic from bridge symbols into dedicated services
|
|
91
|
+
|
|
92
|
+
For each item, include:
|
|
93
|
+
1. **Target** — function/class name, file, current complexity score
|
|
94
|
+
2. **Why** — complexity + volatility + blast radius rationale
|
|
95
|
+
3. **How** — specific refactoring approach (extract method, split class, introduce interface)
|
|
96
|
+
4. **Risk** — impact analysis rating + affected processes
|
|
97
|
+
5. **Test Plan** — which callers/processes to verify
|
|
98
|
+
|
|
99
|
+
## Decision Points
|
|
100
|
+
|
|
101
|
+
| Condition | Action |
|
|
102
|
+
|-----------|--------|
|
|
103
|
+
| Complex + volatile + high blast radius | Highest priority — but plan carefully; incremental approach |
|
|
104
|
+
| Complex + stable + low blast radius | Can wait; refactor when you're already touching nearby code |
|
|
105
|
+
| Dead code with zero callers | Safe to delete — quick win |
|
|
106
|
+
| Bridge symbol with many dependents | Extract interface first, then refactor implementation behind it |
|
|
107
|
+
| Symbol in cross-repo API | Coordinate with consumers; backward-compatible changes only |
|
|
108
|
+
|
|
109
|
+
## Common Mistakes
|
|
110
|
+
|
|
111
|
+
| Mistake | Reality |
|
|
112
|
+
|---------|---------|
|
|
113
|
+
| Refactoring the most complex function first | Complexity alone isn't enough — prioritize by complexity × volatility |
|
|
114
|
+
| Deleting all dead code at once | Some "dead" code is called dynamically; verify before batch deletion |
|
|
115
|
+
| Refactoring without checking blast radius | A "simple" refactor on a bridge symbol can cascade across the codebase |
|
|
116
|
+
| Not checking temporal evolution | A complex function that hasn't changed in a year is lower priority than a simpler one that changes weekly |
|