nervx 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nervx-0.1.0/LICENSE +21 -0
- nervx-0.1.0/PKG-INFO +118 -0
- nervx-0.1.0/README.md +81 -0
- nervx-0.1.0/nervx/__init__.py +3 -0
- nervx-0.1.0/nervx/attention/__init__.py +1 -0
- nervx-0.1.0/nervx/attention/briefing.py +394 -0
- nervx-0.1.0/nervx/attention/concepts.py +204 -0
- nervx-0.1.0/nervx/attention/query.py +563 -0
- nervx-0.1.0/nervx/build.py +382 -0
- nervx-0.1.0/nervx/cli/__init__.py +1 -0
- nervx-0.1.0/nervx/cli/main.py +387 -0
- nervx-0.1.0/nervx/cli/watch.py +139 -0
- nervx-0.1.0/nervx/instinct/__init__.py +1 -0
- nervx-0.1.0/nervx/instinct/patterns.py +203 -0
- nervx-0.1.0/nervx/memory/__init__.py +1 -0
- nervx-0.1.0/nervx/memory/schema.py +114 -0
- nervx-0.1.0/nervx/memory/store.py +563 -0
- nervx-0.1.0/nervx/perception/__init__.py +1 -0
- nervx-0.1.0/nervx/perception/git_miner.py +225 -0
- nervx-0.1.0/nervx/perception/lang_c.py +1245 -0
- nervx-0.1.0/nervx/perception/lang_csharp.py +1183 -0
- nervx-0.1.0/nervx/perception/lang_go.py +761 -0
- nervx-0.1.0/nervx/perception/lang_java.py +720 -0
- nervx-0.1.0/nervx/perception/lang_javascript.py +1457 -0
- nervx-0.1.0/nervx/perception/lang_ruby.py +963 -0
- nervx-0.1.0/nervx/perception/lang_rust.py +1137 -0
- nervx-0.1.0/nervx/perception/linker.py +259 -0
- nervx-0.1.0/nervx/perception/parser.py +970 -0
- nervx-0.1.0/nervx/reflexes/__init__.py +1 -0
- nervx-0.1.0/nervx/reflexes/warnings.py +217 -0
- nervx-0.1.0/nervx/viz/__init__.py +0 -0
- nervx-0.1.0/nervx/viz/export.py +460 -0
- nervx-0.1.0/nervx/viz/server.py +42 -0
- nervx-0.1.0/nervx/viz/template.html +1176 -0
- nervx-0.1.0/nervx.egg-info/PKG-INFO +118 -0
- nervx-0.1.0/nervx.egg-info/SOURCES.txt +51 -0
- nervx-0.1.0/nervx.egg-info/dependency_links.txt +1 -0
- nervx-0.1.0/nervx.egg-info/entry_points.txt +2 -0
- nervx-0.1.0/nervx.egg-info/requires.txt +14 -0
- nervx-0.1.0/nervx.egg-info/top_level.txt +1 -0
- nervx-0.1.0/pyproject.toml +57 -0
- nervx-0.1.0/setup.cfg +4 -0
- nervx-0.1.0/tests/test_briefing.py +114 -0
- nervx-0.1.0/tests/test_build.py +142 -0
- nervx-0.1.0/tests/test_export.py +221 -0
- nervx-0.1.0/tests/test_git_miner.py +130 -0
- nervx-0.1.0/tests/test_linker.py +213 -0
- nervx-0.1.0/tests/test_parser.py +394 -0
- nervx-0.1.0/tests/test_patterns.py +81 -0
- nervx-0.1.0/tests/test_query.py +221 -0
- nervx-0.1.0/tests/test_store.py +196 -0
- nervx-0.1.0/tests/test_warnings.py +103 -0
- nervx-0.1.0/tests/test_watch.py +78 -0
nervx-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Aditya Kamat
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
nervx-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: nervx
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A codebase brain for AI coding assistants — pre-indexed navigation, blast radius, dead code detection
|
|
5
|
+
Author-email: Aditya Kamat <adityakamat24@gmail.com>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/adityakamat24/nervx
|
|
8
|
+
Project-URL: Repository, https://github.com/adityakamat24/nervx
|
|
9
|
+
Project-URL: Issues, https://github.com/adityakamat24/nervx/issues
|
|
10
|
+
Keywords: cli,code-intelligence,static-analysis,developer-tools,claude
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Environment :: Console
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Classifier: Topic :: Software Development :: Quality Assurance
|
|
20
|
+
Requires-Python: >=3.10
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
License-File: LICENSE
|
|
23
|
+
Requires-Dist: tree-sitter>=0.21.0
|
|
24
|
+
Requires-Dist: tree-sitter-python>=0.21.0
|
|
25
|
+
Requires-Dist: tree-sitter-javascript>=0.21.0
|
|
26
|
+
Requires-Dist: tree-sitter-typescript>=0.21.0
|
|
27
|
+
Requires-Dist: tree-sitter-java>=0.21.0
|
|
28
|
+
Requires-Dist: tree-sitter-go>=0.21.0
|
|
29
|
+
Requires-Dist: tree-sitter-rust>=0.21.0
|
|
30
|
+
Requires-Dist: tree-sitter-c>=0.21.0
|
|
31
|
+
Requires-Dist: tree-sitter-cpp>=0.21.0
|
|
32
|
+
Requires-Dist: tree-sitter-c-sharp>=0.21.0
|
|
33
|
+
Requires-Dist: tree-sitter-ruby>=0.21.0
|
|
34
|
+
Provides-Extra: watch
|
|
35
|
+
Requires-Dist: watchdog>=3.0.0; extra == "watch"
|
|
36
|
+
Dynamic: license-file
|
|
37
|
+
|
|
38
|
+
# nervx
|
|
39
|
+
|
|
40
|
+
A codebase brain for AI coding assistants. Pre-indexed navigation, blast radius analysis, dead code detection, and architectural pattern recognition — all from a single `pip install`.
|
|
41
|
+
|
|
42
|
+
## Install
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install nervx
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## Quick Start
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
# Build the brain for your project
|
|
52
|
+
nervx build .
|
|
53
|
+
|
|
54
|
+
# Ask questions in natural language
|
|
55
|
+
nervx nav "how does authentication work"
|
|
56
|
+
|
|
57
|
+
# Check blast radius before refactoring
|
|
58
|
+
nervx blast-radius "src/auth.py::validate_token"
|
|
59
|
+
|
|
60
|
+
# Find dead code
|
|
61
|
+
nervx find --dead
|
|
62
|
+
|
|
63
|
+
# Find untested critical code
|
|
64
|
+
nervx find --no-tests --importance-gt 20
|
|
65
|
+
|
|
66
|
+
# Open interactive visualization
|
|
67
|
+
nervx viz .
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## What It Does
|
|
71
|
+
|
|
72
|
+
nervx parses your codebase with tree-sitter, builds a graph of every function, class, and method, then pre-computes:
|
|
73
|
+
|
|
74
|
+
- **Edges**: who calls what, who imports what, who inherits from what
|
|
75
|
+
- **Importance scores**: based on caller count, cross-module usage, and connectivity
|
|
76
|
+
- **Architectural patterns**: factories, singletons, event buses, strategy patterns, repositories
|
|
77
|
+
- **Concept paths**: end-to-end call chains and domain clusters
|
|
78
|
+
- **Git intelligence**: hotspots, temporal coupling, churn analysis
|
|
79
|
+
- **Contract analysis**: callers that disagree on error handling
|
|
80
|
+
- **Dead code**: unreferenced functions and classes
|
|
81
|
+
|
|
82
|
+
All stored in a single SQLite database (`.nervx/brain.db`), queryable in milliseconds.
|
|
83
|
+
|
|
84
|
+
## Commands
|
|
85
|
+
|
|
86
|
+
| Command | What it does |
|
|
87
|
+
|---------|-------------|
|
|
88
|
+
| `nervx build <path>` | Full build of the brain |
|
|
89
|
+
| `nervx update <path>` | Incremental update (only changed files) |
|
|
90
|
+
| `nervx nav "<question>"` | Natural language navigation with execution flows |
|
|
91
|
+
| `nervx blast-radius "<symbol>"` | Impact analysis for refactoring |
|
|
92
|
+
| `nervx find --dead` | Find unreferenced symbols |
|
|
93
|
+
| `nervx find --no-tests` | Find untested code |
|
|
94
|
+
| `nervx flows [keyword]` | Show execution paths |
|
|
95
|
+
| `nervx diff --days 7` | Recent structural changes |
|
|
96
|
+
| `nervx viz .` | Interactive D3 visualization |
|
|
97
|
+
| `nervx stats` | Graph statistics |
|
|
98
|
+
|
|
99
|
+
## Claude Code Integration
|
|
100
|
+
|
|
101
|
+
When you run `nervx build`, it automatically adds instructions to your project's `CLAUDE.md` that teach Claude Code to use nervx commands. Claude will use `nervx nav` before exploring code, check blast radius before refactoring, and find dead code before cleanup — saving tokens and tool calls.
|
|
102
|
+
|
|
103
|
+
## Supported Languages
|
|
104
|
+
|
|
105
|
+
Python, JavaScript/TypeScript, Java, Go, Rust, C/C++, C#, Ruby
|
|
106
|
+
|
|
107
|
+
## Watch Mode (Optional)
|
|
108
|
+
|
|
109
|
+
```bash
|
|
110
|
+
pip install nervx[watch]
|
|
111
|
+
nervx watch .
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
Auto-updates the brain when files change.
|
|
115
|
+
|
|
116
|
+
## License
|
|
117
|
+
|
|
118
|
+
MIT
|
nervx-0.1.0/README.md
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# nervx
|
|
2
|
+
|
|
3
|
+
A codebase brain for AI coding assistants. Pre-indexed navigation, blast radius analysis, dead code detection, and architectural pattern recognition — all from a single `pip install`.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install nervx
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
# Build the brain for your project
|
|
15
|
+
nervx build .
|
|
16
|
+
|
|
17
|
+
# Ask questions in natural language
|
|
18
|
+
nervx nav "how does authentication work"
|
|
19
|
+
|
|
20
|
+
# Check blast radius before refactoring
|
|
21
|
+
nervx blast-radius "src/auth.py::validate_token"
|
|
22
|
+
|
|
23
|
+
# Find dead code
|
|
24
|
+
nervx find --dead
|
|
25
|
+
|
|
26
|
+
# Find untested critical code
|
|
27
|
+
nervx find --no-tests --importance-gt 20
|
|
28
|
+
|
|
29
|
+
# Open interactive visualization
|
|
30
|
+
nervx viz .
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## What It Does
|
|
34
|
+
|
|
35
|
+
nervx parses your codebase with tree-sitter, builds a graph of every function, class, and method, then pre-computes:
|
|
36
|
+
|
|
37
|
+
- **Edges**: who calls what, who imports what, who inherits from what
|
|
38
|
+
- **Importance scores**: based on caller count, cross-module usage, and connectivity
|
|
39
|
+
- **Architectural patterns**: factories, singletons, event buses, strategy patterns, repositories
|
|
40
|
+
- **Concept paths**: end-to-end call chains and domain clusters
|
|
41
|
+
- **Git intelligence**: hotspots, temporal coupling, churn analysis
|
|
42
|
+
- **Contract analysis**: callers that disagree on error handling
|
|
43
|
+
- **Dead code**: unreferenced functions and classes
|
|
44
|
+
|
|
45
|
+
All stored in a single SQLite database (`.nervx/brain.db`), queryable in milliseconds.
|
|
46
|
+
|
|
47
|
+
## Commands
|
|
48
|
+
|
|
49
|
+
| Command | What it does |
|
|
50
|
+
|---------|-------------|
|
|
51
|
+
| `nervx build <path>` | Full build of the brain |
|
|
52
|
+
| `nervx update <path>` | Incremental update (only changed files) |
|
|
53
|
+
| `nervx nav "<question>"` | Natural language navigation with execution flows |
|
|
54
|
+
| `nervx blast-radius "<symbol>"` | Impact analysis for refactoring |
|
|
55
|
+
| `nervx find --dead` | Find unreferenced symbols |
|
|
56
|
+
| `nervx find --no-tests` | Find untested code |
|
|
57
|
+
| `nervx flows [keyword]` | Show execution paths |
|
|
58
|
+
| `nervx diff --days 7` | Recent structural changes |
|
|
59
|
+
| `nervx viz .` | Interactive D3 visualization |
|
|
60
|
+
| `nervx stats` | Graph statistics |
|
|
61
|
+
|
|
62
|
+
## Claude Code Integration
|
|
63
|
+
|
|
64
|
+
When you run `nervx build`, it automatically adds instructions to your project's `CLAUDE.md` that teach Claude Code to use nervx commands. Claude will use `nervx nav` before exploring code, check blast radius before refactoring, and find dead code before cleanup — saving tokens and tool calls.
|
|
65
|
+
|
|
66
|
+
## Supported Languages
|
|
67
|
+
|
|
68
|
+
Python, JavaScript/TypeScript, Java, Go, Rust, C/C++, C#, Ruby
|
|
69
|
+
|
|
70
|
+
## Watch Mode (Optional)
|
|
71
|
+
|
|
72
|
+
```bash
|
|
73
|
+
pip install nervx[watch]
|
|
74
|
+
nervx watch .
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
Auto-updates the brain when files change.
|
|
78
|
+
|
|
79
|
+
## License
|
|
80
|
+
|
|
81
|
+
MIT
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Attention layer: query engine, briefing, concept paths."""
|
|
@@ -0,0 +1,394 @@
|
|
|
1
|
+
"""NERVX.md briefing generator."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
from collections import Counter, defaultdict
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
from nervx.memory.store import GraphStore
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# Known framework indicators
|
|
14
|
+
FRAMEWORK_INDICATORS = {
|
|
15
|
+
"fastapi": "FastAPI",
|
|
16
|
+
"flask": "Flask",
|
|
17
|
+
"django": "Django",
|
|
18
|
+
"asyncio": "asyncio",
|
|
19
|
+
"websockets": "WebSockets",
|
|
20
|
+
"react": "React",
|
|
21
|
+
"express": "Express",
|
|
22
|
+
"sqlalchemy": "SQLAlchemy",
|
|
23
|
+
"pydantic": "Pydantic",
|
|
24
|
+
"pytest": "pytest",
|
|
25
|
+
"torch": "PyTorch",
|
|
26
|
+
"tensorflow": "TensorFlow",
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def generate_briefing(store: GraphStore, repo_root: str) -> str:
|
|
31
|
+
"""Generate NERVX.md content from graph data."""
|
|
32
|
+
repo_name = Path(repo_root).name
|
|
33
|
+
tech_stack = _detect_tech_stack(store)
|
|
34
|
+
node_count = store.get_meta("node_count") or "0"
|
|
35
|
+
edge_count = store.get_meta("edge_count") or "0"
|
|
36
|
+
|
|
37
|
+
lines = [
|
|
38
|
+
f"# {repo_name}",
|
|
39
|
+
tech_stack,
|
|
40
|
+
f"_Graph: {node_count} symbols, {edge_count} edges_",
|
|
41
|
+
"",
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
# Module Map (cap at 15, collapse small modules)
|
|
45
|
+
module_map = _build_module_map(store)
|
|
46
|
+
if module_map:
|
|
47
|
+
lines.append("## Module Map")
|
|
48
|
+
shown = module_map[:15]
|
|
49
|
+
for d, desc in shown:
|
|
50
|
+
lines.append(f" {d}/ {desc}")
|
|
51
|
+
if len(module_map) > 15:
|
|
52
|
+
lines.append(f" ... and {len(module_map) - 15} more modules")
|
|
53
|
+
lines.append("")
|
|
54
|
+
|
|
55
|
+
# Entry Points (cap at 10, sorted by importance)
|
|
56
|
+
entry_points = _find_entry_points(store)
|
|
57
|
+
if entry_points:
|
|
58
|
+
entry_points.sort(key=lambda n: -(n["importance"] or 0))
|
|
59
|
+
lines.append("## Entry Points")
|
|
60
|
+
for ep in entry_points[:10]:
|
|
61
|
+
lines.append(f" {ep['file_path']}::{ep['signature'] or ep['name']}")
|
|
62
|
+
if len(entry_points) > 10:
|
|
63
|
+
lines.append(f" ... and {len(entry_points) - 10} more entry points")
|
|
64
|
+
lines.append("")
|
|
65
|
+
|
|
66
|
+
# Key Flows (cap at 5)
|
|
67
|
+
paths = store.get_concept_paths()
|
|
68
|
+
if paths:
|
|
69
|
+
lines.append("## Key Flows")
|
|
70
|
+
for p in paths[:5]:
|
|
71
|
+
node_ids = json.loads(p["node_ids"]) if isinstance(p["node_ids"], str) else p["node_ids"]
|
|
72
|
+
chain = " → ".join(
|
|
73
|
+
_short_name(nid) for nid in node_ids[:6]
|
|
74
|
+
)
|
|
75
|
+
if len(node_ids) > 6:
|
|
76
|
+
chain += " → ..."
|
|
77
|
+
lines.append(f" {p['name']}: {chain}")
|
|
78
|
+
lines.append("")
|
|
79
|
+
|
|
80
|
+
# Detected Patterns (cap at 10, grouped by type for conciseness)
|
|
81
|
+
patterns = store.get_all_patterns()
|
|
82
|
+
if patterns:
|
|
83
|
+
lines.append("## Detected Patterns")
|
|
84
|
+
# Group by pattern type, show count + top examples
|
|
85
|
+
by_type: dict[str, list] = defaultdict(list)
|
|
86
|
+
for p in patterns:
|
|
87
|
+
by_type[p["pattern"]].append(p)
|
|
88
|
+
for ptype in sorted(by_type.keys()):
|
|
89
|
+
items = by_type[ptype]
|
|
90
|
+
if len(items) <= 3:
|
|
91
|
+
for p in items:
|
|
92
|
+
node = store.get_node(p["node_id"])
|
|
93
|
+
loc = node["name"] if node else p["node_id"]
|
|
94
|
+
lines.append(f" {ptype.upper()}: {loc} → {p['implication']}")
|
|
95
|
+
else:
|
|
96
|
+
# Show count + top 2 examples
|
|
97
|
+
lines.append(f" {ptype.upper()} ({len(items)} instances):")
|
|
98
|
+
for p in items[:2]:
|
|
99
|
+
node = store.get_node(p["node_id"])
|
|
100
|
+
loc = node["name"] if node else p["node_id"]
|
|
101
|
+
lines.append(f" {loc} → {p['implication']}")
|
|
102
|
+
lines.append(f" ... and {len(items) - 2} more")
|
|
103
|
+
lines.append("")
|
|
104
|
+
|
|
105
|
+
# Hotspots (cap at 5)
|
|
106
|
+
hotspots = _find_hotspots(store)
|
|
107
|
+
if hotspots:
|
|
108
|
+
lines.append("## Hotspots (last 30 days)")
|
|
109
|
+
for h in hotspots[:5]:
|
|
110
|
+
lines.append(
|
|
111
|
+
f" {h['file_path']} {h['commits_30d']} commits "
|
|
112
|
+
f"(by {h['primary_author']}, {h['author_count']} authors)"
|
|
113
|
+
)
|
|
114
|
+
if len(hotspots) > 5:
|
|
115
|
+
lines.append(f" ... and {len(hotspots) - 5} more active files")
|
|
116
|
+
lines.append("")
|
|
117
|
+
|
|
118
|
+
# Fragile Zones (cap at 5)
|
|
119
|
+
fragile = _find_fragile_zones(store)
|
|
120
|
+
if fragile:
|
|
121
|
+
lines.append("## Fragile Zones")
|
|
122
|
+
for f_node, reason in fragile[:5]:
|
|
123
|
+
lines.append(
|
|
124
|
+
f" {f_node['file_path']}::{f_node['name']} "
|
|
125
|
+
f"importance={f_node['importance']:.1f}, {reason}"
|
|
126
|
+
)
|
|
127
|
+
lines.append("")
|
|
128
|
+
|
|
129
|
+
# Temporal Couplings (cap at 5)
|
|
130
|
+
couplings = _find_temporal_couplings(store)
|
|
131
|
+
if couplings:
|
|
132
|
+
lines.append("## Temporal Couplings")
|
|
133
|
+
for c in couplings[:5]:
|
|
134
|
+
lines.append(
|
|
135
|
+
f" {c['file_a']} <-> {c['file_b']} "
|
|
136
|
+
f"({int(c['coupling_score'] * 100)}%, {c['co_commit_count']} co-commits)"
|
|
137
|
+
)
|
|
138
|
+
lines.append("")
|
|
139
|
+
|
|
140
|
+
return "\n".join(lines)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
# ── CLAUDE.md integration ─────────────────────────────────────────
|
|
144
|
+
|
|
145
|
+
_CLAUDE_MD_START = "<!-- nervx:start -->"
|
|
146
|
+
_CLAUDE_MD_END = "<!-- nervx:end -->"
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def generate_claude_instructions() -> str:
|
|
150
|
+
"""Generate the nervx section for CLAUDE.md."""
|
|
151
|
+
return f"""{_CLAUDE_MD_START}
|
|
152
|
+
## nervx — Codebase Intelligence
|
|
153
|
+
|
|
154
|
+
This project has a nervx brain (`.nervx/brain.db`). **Use nervx commands before falling back to Grep/Glob/Read** — they return pre-indexed results in ~400 tokens instead of multi-step exploration costing thousands.
|
|
155
|
+
|
|
156
|
+
### Commands (run via Bash tool)
|
|
157
|
+
|
|
158
|
+
| Command | When to use | Example |
|
|
159
|
+
|---------|-------------|---------|
|
|
160
|
+
| `nervx nav "<question>"` | Before exploring code for any task | `nervx nav "how does auth work"` |
|
|
161
|
+
| `nervx blast-radius "<symbol_id>"` | Before refactoring a function/class | `nervx blast-radius "src/api.py::handle_request"` |
|
|
162
|
+
| `nervx find --dead` | Finding unreferenced dead code | `nervx find --dead --kind function` |
|
|
163
|
+
| `nervx find --no-tests --importance-gt 20` | Finding untested critical code | `nervx find --kind function --no-tests` |
|
|
164
|
+
| `nervx flows <keyword>` | Tracing execution paths | `nervx flows auth` |
|
|
165
|
+
| `nervx diff --days 7` | Seeing recent structural changes | `nervx diff --days 30` |
|
|
166
|
+
|
|
167
|
+
### What navigate returns
|
|
168
|
+
|
|
169
|
+
`nervx nav` returns ranked symbols, **execution flows** (call chains traced from matches), connected symbols, suggested read order, and warnings — all in one query.
|
|
170
|
+
|
|
171
|
+
### Workflow
|
|
172
|
+
|
|
173
|
+
1. **Start of session**: Read `NERVX.md` for project overview (module map, entry points, patterns, fragile zones)
|
|
174
|
+
2. **Before any code exploration**: Run `nervx nav "<your question>"` first — it returns the right files, line ranges, execution flows, read order, and warnings
|
|
175
|
+
3. **Before refactoring**: Run `nervx blast-radius "<symbol>"` to see all downstream callers (saves multiple rounds of grep)
|
|
176
|
+
4. **Before cleanup**: Run `nervx find --dead` to find unreferenced symbols that may be safe to remove
|
|
177
|
+
5. **Only then** fall back to Grep/Read for details nervx didn't cover
|
|
178
|
+
|
|
179
|
+
### Symbol ID format
|
|
180
|
+
Symbol IDs use the format `file_path::ClassName.method_name` or `file_path::function_name`. Example: `server/main.py::handle_request`
|
|
181
|
+
{_CLAUDE_MD_END}"""
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def inject_claude_md(repo_root: str) -> bool:
|
|
185
|
+
"""Add or update the nervx section in the project's CLAUDE.md.
|
|
186
|
+
|
|
187
|
+
Returns True if CLAUDE.md was modified.
|
|
188
|
+
"""
|
|
189
|
+
claude_md_path = os.path.join(repo_root, "CLAUDE.md")
|
|
190
|
+
new_section = generate_claude_instructions()
|
|
191
|
+
|
|
192
|
+
if os.path.exists(claude_md_path):
|
|
193
|
+
with open(claude_md_path, "r", encoding="utf-8") as f:
|
|
194
|
+
content = f.read()
|
|
195
|
+
|
|
196
|
+
# Check if nervx section already exists
|
|
197
|
+
if _CLAUDE_MD_START in content:
|
|
198
|
+
# Replace existing section
|
|
199
|
+
import re
|
|
200
|
+
pattern = re.escape(_CLAUDE_MD_START) + r".*?" + re.escape(_CLAUDE_MD_END)
|
|
201
|
+
updated = re.sub(pattern, new_section, content, flags=re.DOTALL)
|
|
202
|
+
if updated != content:
|
|
203
|
+
with open(claude_md_path, "w", encoding="utf-8") as f:
|
|
204
|
+
f.write(updated)
|
|
205
|
+
return True
|
|
206
|
+
return False
|
|
207
|
+
else:
|
|
208
|
+
# Append to existing file
|
|
209
|
+
with open(claude_md_path, "a", encoding="utf-8") as f:
|
|
210
|
+
f.write("\n\n" + new_section + "\n")
|
|
211
|
+
return True
|
|
212
|
+
else:
|
|
213
|
+
# Create new CLAUDE.md
|
|
214
|
+
with open(claude_md_path, "w", encoding="utf-8") as f:
|
|
215
|
+
f.write(new_section + "\n")
|
|
216
|
+
return True
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def _detect_tech_stack(store: GraphStore) -> str:
|
|
220
|
+
"""Detect tech stack from imports and file patterns."""
|
|
221
|
+
detected = set()
|
|
222
|
+
# Check Python
|
|
223
|
+
detected.add("Python")
|
|
224
|
+
|
|
225
|
+
# Check imports
|
|
226
|
+
nodes = store.get_all_nodes()
|
|
227
|
+
all_names = set()
|
|
228
|
+
for n in nodes:
|
|
229
|
+
all_names.add(n["name"].lower())
|
|
230
|
+
if n["signature"]:
|
|
231
|
+
all_names.add(n["signature"].lower())
|
|
232
|
+
|
|
233
|
+
# Check keywords
|
|
234
|
+
for framework, label in FRAMEWORK_INDICATORS.items():
|
|
235
|
+
results = store.search_keywords([framework])
|
|
236
|
+
if results:
|
|
237
|
+
detected.add(label)
|
|
238
|
+
|
|
239
|
+
if detected:
|
|
240
|
+
return ", ".join(sorted(detected))
|
|
241
|
+
return "Python"
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def _build_module_map(store: GraphStore) -> list[tuple[str, str]]:
|
|
245
|
+
"""Build module map with auto-descriptions."""
|
|
246
|
+
# Group nodes by top-level directory
|
|
247
|
+
dir_nodes: dict[str, list[dict]] = defaultdict(list)
|
|
248
|
+
|
|
249
|
+
for node in store.get_all_nodes():
|
|
250
|
+
fp = node["file_path"]
|
|
251
|
+
if "/" in fp:
|
|
252
|
+
top_dir = fp.split("/")[0]
|
|
253
|
+
else:
|
|
254
|
+
continue
|
|
255
|
+
dir_nodes[top_dir].append(node)
|
|
256
|
+
|
|
257
|
+
result = []
|
|
258
|
+
for d in sorted(dir_nodes.keys()):
|
|
259
|
+
nodes = dir_nodes[d]
|
|
260
|
+
desc = _describe_module(nodes)
|
|
261
|
+
result.append((d, desc))
|
|
262
|
+
|
|
263
|
+
return result
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def _describe_module(nodes: list[dict]) -> str:
|
|
267
|
+
"""Auto-describe a module based on dominant tags and kinds."""
|
|
268
|
+
tag_counts: Counter = Counter()
|
|
269
|
+
kind_counts: Counter = Counter()
|
|
270
|
+
|
|
271
|
+
for n in nodes:
|
|
272
|
+
kind_counts[n["kind"]] += 1
|
|
273
|
+
tags = json.loads(n["tags"]) if isinstance(n["tags"], str) else n["tags"]
|
|
274
|
+
for t in tags:
|
|
275
|
+
if t.startswith("extends:"):
|
|
276
|
+
continue
|
|
277
|
+
tag_counts[t] += 1
|
|
278
|
+
|
|
279
|
+
# Check dominant tags
|
|
280
|
+
total = len(nodes)
|
|
281
|
+
if tag_counts.get("test", 0) > total * 0.3:
|
|
282
|
+
return "tests"
|
|
283
|
+
if tag_counts.get("route_handler", 0) > total * 0.3:
|
|
284
|
+
return "API routes/handlers"
|
|
285
|
+
if tag_counts.get("data_model", 0) > total * 0.3:
|
|
286
|
+
return "data models"
|
|
287
|
+
if tag_counts.get("callback", 0) > total * 0.3:
|
|
288
|
+
return "event handlers"
|
|
289
|
+
if tag_counts.get("factory", 0) > total * 0.2:
|
|
290
|
+
return "factories"
|
|
291
|
+
|
|
292
|
+
# Default: count classes and functions
|
|
293
|
+
n_classes = kind_counts.get("class", 0)
|
|
294
|
+
n_funcs = kind_counts.get("function", 0) + kind_counts.get("method", 0)
|
|
295
|
+
parts = []
|
|
296
|
+
if n_classes:
|
|
297
|
+
parts.append(f"{n_classes} classes")
|
|
298
|
+
if n_funcs:
|
|
299
|
+
parts.append(f"{n_funcs} functions")
|
|
300
|
+
return ", ".join(parts) if parts else "module"
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def _find_entry_points(store: GraphStore) -> list[dict]:
|
|
304
|
+
"""Find entrypoint-tagged nodes."""
|
|
305
|
+
result = []
|
|
306
|
+
for node in store.get_all_nodes():
|
|
307
|
+
tags = json.loads(node["tags"]) if isinstance(node["tags"], str) else node["tags"]
|
|
308
|
+
if "entrypoint" in tags or "route_handler" in tags:
|
|
309
|
+
result.append(node)
|
|
310
|
+
return result
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
def _find_hotspots(store: GraphStore) -> list[dict]:
|
|
314
|
+
"""Find files with high recent commit activity."""
|
|
315
|
+
stats = store.get_all_file_stats()
|
|
316
|
+
hotspots = [s for s in stats if s["commits_30d"] > 0]
|
|
317
|
+
hotspots.sort(key=lambda s: -s["commits_30d"])
|
|
318
|
+
return hotspots
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def _find_fragile_zones(store: GraphStore) -> list[tuple[dict, str]]:
|
|
322
|
+
"""Find important nodes with hazards.
|
|
323
|
+
|
|
324
|
+
Thresholds:
|
|
325
|
+
- importance >= 25 with any warning qualifies
|
|
326
|
+
- importance >= 15 needs a serious warning (many callers, contract conflict)
|
|
327
|
+
or multiple warning types
|
|
328
|
+
"""
|
|
329
|
+
conflict_ids = set(store.get_contract_conflicts())
|
|
330
|
+
result = []
|
|
331
|
+
for node in store.get_all_nodes():
|
|
332
|
+
importance = node["importance"] or 0.0
|
|
333
|
+
if importance < 15:
|
|
334
|
+
continue
|
|
335
|
+
if node["kind"] == "file":
|
|
336
|
+
continue
|
|
337
|
+
|
|
338
|
+
reasons = []
|
|
339
|
+
has_serious = False
|
|
340
|
+
|
|
341
|
+
# No test coverage
|
|
342
|
+
tags = json.loads(node["tags"]) if isinstance(node["tags"], str) else node["tags"]
|
|
343
|
+
if "test" not in tags:
|
|
344
|
+
has_test = False
|
|
345
|
+
edges = store.get_edges_from(node["id"]) + store.get_edges_to(node["id"])
|
|
346
|
+
for e in edges:
|
|
347
|
+
other_id = e["target_id"] if e["source_id"] == node["id"] else e["source_id"]
|
|
348
|
+
other = store.get_node(other_id)
|
|
349
|
+
if other:
|
|
350
|
+
otags = json.loads(other["tags"]) if isinstance(other["tags"], str) else other["tags"]
|
|
351
|
+
if "test" in otags:
|
|
352
|
+
has_test = True
|
|
353
|
+
break
|
|
354
|
+
if not has_test:
|
|
355
|
+
reasons.append("no tests")
|
|
356
|
+
|
|
357
|
+
# Many callers
|
|
358
|
+
called_by = [e for e in store.get_edges_from(node["id"])
|
|
359
|
+
if e["edge_type"] == "called_by"]
|
|
360
|
+
if len(called_by) >= 8:
|
|
361
|
+
reasons.append(f"{len(called_by)} callers")
|
|
362
|
+
has_serious = True
|
|
363
|
+
|
|
364
|
+
# Contract conflicts
|
|
365
|
+
if node["id"] in conflict_ids:
|
|
366
|
+
reasons.append("contract conflict")
|
|
367
|
+
has_serious = True
|
|
368
|
+
|
|
369
|
+
if not reasons:
|
|
370
|
+
continue
|
|
371
|
+
|
|
372
|
+
# Gate: importance >= 25 always qualifies; 15-25 needs serious or multiple
|
|
373
|
+
if importance < 25 and not has_serious and len(reasons) < 2:
|
|
374
|
+
continue
|
|
375
|
+
|
|
376
|
+
result.append((node, ", ".join(reasons)))
|
|
377
|
+
|
|
378
|
+
result.sort(key=lambda x: -x[0]["importance"])
|
|
379
|
+
return result
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
def _find_temporal_couplings(store: GraphStore) -> list[dict]:
|
|
383
|
+
"""Find high-coupling file pairs."""
|
|
384
|
+
rows = store.conn.execute(
|
|
385
|
+
"SELECT * FROM cochanges WHERE coupling_score >= 0.4 ORDER BY coupling_score DESC"
|
|
386
|
+
).fetchall()
|
|
387
|
+
return [dict(r) for r in rows]
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
def _short_name(node_id: str) -> str:
|
|
391
|
+
"""Extract short name from a node ID."""
|
|
392
|
+
if "::" in node_id:
|
|
393
|
+
return node_id.split("::")[-1]
|
|
394
|
+
return node_id.split("/")[-1] if "/" in node_id else node_id
|