mcp-vector-search 0.12.6__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_vector_search/__init__.py +2 -2
- mcp_vector_search/analysis/__init__.py +64 -0
- mcp_vector_search/analysis/collectors/__init__.py +39 -0
- mcp_vector_search/analysis/collectors/base.py +164 -0
- mcp_vector_search/analysis/collectors/complexity.py +743 -0
- mcp_vector_search/analysis/metrics.py +341 -0
- mcp_vector_search/analysis/reporters/__init__.py +5 -0
- mcp_vector_search/analysis/reporters/console.py +222 -0
- mcp_vector_search/cli/commands/analyze.py +408 -0
- mcp_vector_search/cli/commands/chat.py +1262 -0
- mcp_vector_search/cli/commands/index.py +21 -3
- mcp_vector_search/cli/commands/init.py +13 -0
- mcp_vector_search/cli/commands/install.py +597 -335
- mcp_vector_search/cli/commands/install_old.py +8 -4
- mcp_vector_search/cli/commands/mcp.py +78 -6
- mcp_vector_search/cli/commands/reset.py +68 -26
- mcp_vector_search/cli/commands/search.py +30 -7
- mcp_vector_search/cli/commands/setup.py +1133 -0
- mcp_vector_search/cli/commands/status.py +37 -2
- mcp_vector_search/cli/commands/uninstall.py +276 -357
- mcp_vector_search/cli/commands/visualize/__init__.py +39 -0
- mcp_vector_search/cli/commands/visualize/cli.py +276 -0
- mcp_vector_search/cli/commands/visualize/exporters/__init__.py +12 -0
- mcp_vector_search/cli/commands/visualize/exporters/html_exporter.py +33 -0
- mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +29 -0
- mcp_vector_search/cli/commands/visualize/graph_builder.py +714 -0
- mcp_vector_search/cli/commands/visualize/layout_engine.py +469 -0
- mcp_vector_search/cli/commands/visualize/server.py +311 -0
- mcp_vector_search/cli/commands/visualize/state_manager.py +428 -0
- mcp_vector_search/cli/commands/visualize/templates/__init__.py +16 -0
- mcp_vector_search/cli/commands/visualize/templates/base.py +180 -0
- mcp_vector_search/cli/commands/visualize/templates/scripts.py +2507 -0
- mcp_vector_search/cli/commands/visualize/templates/styles.py +1313 -0
- mcp_vector_search/cli/commands/visualize.py.original +2536 -0
- mcp_vector_search/cli/didyoumean.py +22 -2
- mcp_vector_search/cli/main.py +115 -159
- mcp_vector_search/cli/output.py +24 -8
- mcp_vector_search/config/__init__.py +4 -0
- mcp_vector_search/config/default_thresholds.yaml +52 -0
- mcp_vector_search/config/settings.py +12 -0
- mcp_vector_search/config/thresholds.py +185 -0
- mcp_vector_search/core/auto_indexer.py +3 -3
- mcp_vector_search/core/boilerplate.py +186 -0
- mcp_vector_search/core/config_utils.py +394 -0
- mcp_vector_search/core/database.py +369 -94
- mcp_vector_search/core/exceptions.py +11 -0
- mcp_vector_search/core/git_hooks.py +4 -4
- mcp_vector_search/core/indexer.py +221 -4
- mcp_vector_search/core/llm_client.py +751 -0
- mcp_vector_search/core/models.py +3 -0
- mcp_vector_search/core/project.py +17 -0
- mcp_vector_search/core/scheduler.py +11 -11
- mcp_vector_search/core/search.py +179 -29
- mcp_vector_search/mcp/server.py +24 -5
- mcp_vector_search/utils/__init__.py +2 -0
- mcp_vector_search/utils/gitignore_updater.py +212 -0
- mcp_vector_search/utils/monorepo.py +66 -4
- mcp_vector_search/utils/timing.py +10 -6
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/METADATA +182 -52
- mcp_vector_search-1.0.3.dist-info/RECORD +97 -0
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/WHEEL +1 -1
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/entry_points.txt +1 -0
- mcp_vector_search/cli/commands/visualize.py +0 -1467
- mcp_vector_search-0.12.6.dist-info/RECORD +0 -68
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,341 @@
|
|
|
1
|
+
"""Metric dataclasses for structural code analysis."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from typing import TYPE_CHECKING, Any
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from ..config.thresholds import ThresholdConfig
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class ChunkMetrics:
|
|
15
|
+
"""Metrics for a single code chunk (function/class/method).
|
|
16
|
+
|
|
17
|
+
Tracks complexity metrics, code smells, and computes quality grades
|
|
18
|
+
for individual code chunks.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
cognitive_complexity: Cognitive complexity score (how hard to understand)
|
|
22
|
+
cyclomatic_complexity: Cyclomatic complexity (number of decision paths)
|
|
23
|
+
max_nesting_depth: Maximum nesting level (if/for/while/try depth)
|
|
24
|
+
parameter_count: Number of function parameters
|
|
25
|
+
lines_of_code: Total lines in the chunk
|
|
26
|
+
smells: List of detected code smells (e.g., "too_many_parameters")
|
|
27
|
+
complexity_grade: Computed A-F grade based on cognitive complexity
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
cognitive_complexity: int = 0
|
|
31
|
+
cyclomatic_complexity: int = 0
|
|
32
|
+
max_nesting_depth: int = 0
|
|
33
|
+
parameter_count: int = 0
|
|
34
|
+
lines_of_code: int = 0
|
|
35
|
+
|
|
36
|
+
# Code smells detected
|
|
37
|
+
smells: list[str] = field(default_factory=list)
|
|
38
|
+
|
|
39
|
+
# Computed grades (A-F scale)
|
|
40
|
+
complexity_grade: str = field(init=False, default="A")
|
|
41
|
+
|
|
42
|
+
def __post_init__(self) -> None:
|
|
43
|
+
"""Initialize computed fields after dataclass initialization."""
|
|
44
|
+
self.complexity_grade = self._compute_grade()
|
|
45
|
+
|
|
46
|
+
def _compute_grade(self, thresholds: ThresholdConfig | None = None) -> str:
|
|
47
|
+
"""Compute A-F grade based on cognitive complexity.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
thresholds: Optional custom threshold configuration.
|
|
51
|
+
If None, uses default thresholds.
|
|
52
|
+
|
|
53
|
+
Grade thresholds (defaults):
|
|
54
|
+
- A: 0-5 (excellent)
|
|
55
|
+
- B: 6-10 (good)
|
|
56
|
+
- C: 11-20 (acceptable)
|
|
57
|
+
- D: 21-30 (needs improvement)
|
|
58
|
+
- F: 31+ (refactor recommended)
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
Letter grade from A to F
|
|
62
|
+
"""
|
|
63
|
+
if thresholds is None:
|
|
64
|
+
# Use default thresholds
|
|
65
|
+
if self.cognitive_complexity <= 5:
|
|
66
|
+
return "A"
|
|
67
|
+
elif self.cognitive_complexity <= 10:
|
|
68
|
+
return "B"
|
|
69
|
+
elif self.cognitive_complexity <= 20:
|
|
70
|
+
return "C"
|
|
71
|
+
elif self.cognitive_complexity <= 30:
|
|
72
|
+
return "D"
|
|
73
|
+
else:
|
|
74
|
+
return "F"
|
|
75
|
+
else:
|
|
76
|
+
# Use custom thresholds
|
|
77
|
+
return thresholds.get_grade(self.cognitive_complexity)
|
|
78
|
+
|
|
79
|
+
def to_metadata(self) -> dict[str, Any]:
|
|
80
|
+
"""Flatten metrics for ChromaDB metadata storage.
|
|
81
|
+
|
|
82
|
+
ChromaDB supports: str, int, float, bool.
|
|
83
|
+
Lists are converted to JSON strings for compatibility.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
Dictionary of flattened metrics compatible with ChromaDB
|
|
87
|
+
"""
|
|
88
|
+
import json
|
|
89
|
+
|
|
90
|
+
return {
|
|
91
|
+
"cognitive_complexity": self.cognitive_complexity,
|
|
92
|
+
"cyclomatic_complexity": self.cyclomatic_complexity,
|
|
93
|
+
"max_nesting_depth": self.max_nesting_depth,
|
|
94
|
+
"parameter_count": self.parameter_count,
|
|
95
|
+
"lines_of_code": self.lines_of_code,
|
|
96
|
+
"complexity_grade": self.complexity_grade,
|
|
97
|
+
"code_smells": json.dumps(self.smells), # Convert list to JSON string
|
|
98
|
+
"smell_count": len(self.smells),
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
@dataclass
|
|
103
|
+
class FileMetrics:
|
|
104
|
+
"""Aggregated metrics for an entire file.
|
|
105
|
+
|
|
106
|
+
Tracks file-level statistics and aggregates chunk metrics for all
|
|
107
|
+
functions/classes within the file.
|
|
108
|
+
|
|
109
|
+
Attributes:
|
|
110
|
+
file_path: Relative or absolute path to the file
|
|
111
|
+
total_lines: Total lines in file (including blank/comments)
|
|
112
|
+
code_lines: Lines containing code
|
|
113
|
+
comment_lines: Lines containing comments
|
|
114
|
+
blank_lines: Blank lines
|
|
115
|
+
function_count: Number of top-level functions
|
|
116
|
+
class_count: Number of classes
|
|
117
|
+
method_count: Number of methods (functions inside classes)
|
|
118
|
+
total_complexity: Sum of cognitive complexity across all chunks
|
|
119
|
+
avg_complexity: Average cognitive complexity per chunk
|
|
120
|
+
max_complexity: Maximum cognitive complexity in any chunk
|
|
121
|
+
chunks: List of chunk metrics for each function/class
|
|
122
|
+
"""
|
|
123
|
+
|
|
124
|
+
file_path: str
|
|
125
|
+
total_lines: int = 0
|
|
126
|
+
code_lines: int = 0
|
|
127
|
+
comment_lines: int = 0
|
|
128
|
+
blank_lines: int = 0
|
|
129
|
+
|
|
130
|
+
function_count: int = 0
|
|
131
|
+
class_count: int = 0
|
|
132
|
+
method_count: int = 0
|
|
133
|
+
|
|
134
|
+
# Aggregated complexity
|
|
135
|
+
total_complexity: int = 0
|
|
136
|
+
avg_complexity: float = 0.0
|
|
137
|
+
max_complexity: int = 0
|
|
138
|
+
|
|
139
|
+
# Chunk metrics for each function/class
|
|
140
|
+
chunks: list[ChunkMetrics] = field(default_factory=list)
|
|
141
|
+
|
|
142
|
+
def compute_aggregates(self) -> None:
|
|
143
|
+
"""Compute aggregate metrics from chunk metrics.
|
|
144
|
+
|
|
145
|
+
Calculates total_complexity, avg_complexity, and max_complexity
|
|
146
|
+
by aggregating values from all chunks.
|
|
147
|
+
"""
|
|
148
|
+
if not self.chunks:
|
|
149
|
+
self.total_complexity = 0
|
|
150
|
+
self.avg_complexity = 0.0
|
|
151
|
+
self.max_complexity = 0
|
|
152
|
+
return
|
|
153
|
+
|
|
154
|
+
# Compute complexity aggregates
|
|
155
|
+
complexities = [chunk.cognitive_complexity for chunk in self.chunks]
|
|
156
|
+
self.total_complexity = sum(complexities)
|
|
157
|
+
self.avg_complexity = self.total_complexity / len(self.chunks)
|
|
158
|
+
self.max_complexity = max(complexities)
|
|
159
|
+
|
|
160
|
+
@property
|
|
161
|
+
def health_score(self) -> float:
|
|
162
|
+
"""Calculate 0.0-1.0 health score based on metrics.
|
|
163
|
+
|
|
164
|
+
Health score considers:
|
|
165
|
+
- Average complexity (lower is better)
|
|
166
|
+
- Code smells count (fewer is better)
|
|
167
|
+
- Comment ratio (balanced is better)
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
Health score from 0.0 (poor) to 1.0 (excellent)
|
|
171
|
+
"""
|
|
172
|
+
score = 1.0
|
|
173
|
+
|
|
174
|
+
# Penalty for high average complexity (A=0%, B=-10%, C=-20%, D=-30%, F=-50%)
|
|
175
|
+
if self.avg_complexity > 30:
|
|
176
|
+
score -= 0.5
|
|
177
|
+
elif self.avg_complexity > 20:
|
|
178
|
+
score -= 0.3
|
|
179
|
+
elif self.avg_complexity > 10:
|
|
180
|
+
score -= 0.2
|
|
181
|
+
elif self.avg_complexity > 5:
|
|
182
|
+
score -= 0.1
|
|
183
|
+
|
|
184
|
+
# Penalty for code smells (up to -30%)
|
|
185
|
+
total_smells = sum(len(chunk.smells) for chunk in self.chunks)
|
|
186
|
+
smell_penalty = min(0.3, total_smells * 0.05) # 5% per smell, max 30%
|
|
187
|
+
score -= smell_penalty
|
|
188
|
+
|
|
189
|
+
# Penalty for poor comment ratio (ideal: 10-30%)
|
|
190
|
+
if self.total_lines > 0:
|
|
191
|
+
comment_ratio = self.comment_lines / self.total_lines
|
|
192
|
+
if comment_ratio < 0.1: # Too few comments
|
|
193
|
+
score -= 0.1
|
|
194
|
+
elif comment_ratio > 0.5: # Too many comments (suspicious)
|
|
195
|
+
score -= 0.1
|
|
196
|
+
|
|
197
|
+
return max(0.0, score) # Clamp to 0.0 minimum
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
@dataclass
|
|
201
|
+
class ProjectMetrics:
|
|
202
|
+
"""Project-wide metric aggregates.
|
|
203
|
+
|
|
204
|
+
Tracks project-level statistics and identifies complexity hotspots
|
|
205
|
+
across the entire codebase.
|
|
206
|
+
|
|
207
|
+
Attributes:
|
|
208
|
+
project_root: Root directory of the project
|
|
209
|
+
analyzed_at: Timestamp when analysis was performed
|
|
210
|
+
total_files: Total number of analyzed files
|
|
211
|
+
total_lines: Total lines across all files
|
|
212
|
+
total_functions: Total number of functions
|
|
213
|
+
total_classes: Total number of classes
|
|
214
|
+
files: Dictionary mapping file paths to FileMetrics
|
|
215
|
+
avg_file_complexity: Average complexity across all files
|
|
216
|
+
hotspots: List of file paths with highest complexity (top 10)
|
|
217
|
+
"""
|
|
218
|
+
|
|
219
|
+
project_root: str
|
|
220
|
+
analyzed_at: datetime = field(default_factory=datetime.now)
|
|
221
|
+
|
|
222
|
+
total_files: int = 0
|
|
223
|
+
total_lines: int = 0
|
|
224
|
+
total_functions: int = 0
|
|
225
|
+
total_classes: int = 0
|
|
226
|
+
|
|
227
|
+
# File metrics indexed by path
|
|
228
|
+
files: dict[str, FileMetrics] = field(default_factory=dict)
|
|
229
|
+
|
|
230
|
+
# Project-wide aggregates
|
|
231
|
+
avg_file_complexity: float = 0.0
|
|
232
|
+
hotspots: list[str] = field(default_factory=list) # Top 10 complex files
|
|
233
|
+
|
|
234
|
+
def compute_aggregates(self) -> None:
|
|
235
|
+
"""Compute project-wide aggregates from file metrics.
|
|
236
|
+
|
|
237
|
+
Calculates:
|
|
238
|
+
- Total files, lines, functions, classes
|
|
239
|
+
- Average file complexity
|
|
240
|
+
- Identifies complexity hotspots
|
|
241
|
+
"""
|
|
242
|
+
if not self.files:
|
|
243
|
+
self.total_files = 0
|
|
244
|
+
self.total_lines = 0
|
|
245
|
+
self.total_functions = 0
|
|
246
|
+
self.total_classes = 0
|
|
247
|
+
self.avg_file_complexity = 0.0
|
|
248
|
+
self.hotspots = []
|
|
249
|
+
return
|
|
250
|
+
|
|
251
|
+
# Compute totals
|
|
252
|
+
self.total_files = len(self.files)
|
|
253
|
+
self.total_lines = sum(f.total_lines for f in self.files.values())
|
|
254
|
+
self.total_functions = sum(f.function_count for f in self.files.values())
|
|
255
|
+
self.total_classes = sum(f.class_count for f in self.files.values())
|
|
256
|
+
|
|
257
|
+
# Compute average file complexity
|
|
258
|
+
file_complexities = [f.avg_complexity for f in self.files.values() if f.chunks]
|
|
259
|
+
if file_complexities:
|
|
260
|
+
self.avg_file_complexity = sum(file_complexities) / len(file_complexities)
|
|
261
|
+
else:
|
|
262
|
+
self.avg_file_complexity = 0.0
|
|
263
|
+
|
|
264
|
+
# Identify hotspots (top 10 most complex files)
|
|
265
|
+
hotspot_files = self.get_hotspots(limit=10)
|
|
266
|
+
self.hotspots = [f.file_path for f in hotspot_files]
|
|
267
|
+
|
|
268
|
+
def get_hotspots(self, limit: int = 10) -> list[FileMetrics]:
|
|
269
|
+
"""Return top N most complex files.
|
|
270
|
+
|
|
271
|
+
Complexity is determined by average cognitive complexity per chunk.
|
|
272
|
+
Files with no chunks are excluded.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
limit: Maximum number of hotspots to return
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
List of FileMetrics sorted by complexity (highest first)
|
|
279
|
+
"""
|
|
280
|
+
# Filter files with chunks and sort by avg complexity
|
|
281
|
+
files_with_complexity = [f for f in self.files.values() if f.chunks]
|
|
282
|
+
sorted_files = sorted(
|
|
283
|
+
files_with_complexity, key=lambda f: f.avg_complexity, reverse=True
|
|
284
|
+
)
|
|
285
|
+
return sorted_files[:limit]
|
|
286
|
+
|
|
287
|
+
def to_summary(self) -> dict[str, Any]:
|
|
288
|
+
"""Generate summary dict for reporting.
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
Dictionary containing project summary with key metrics
|
|
292
|
+
"""
|
|
293
|
+
return {
|
|
294
|
+
"project_root": self.project_root,
|
|
295
|
+
"analyzed_at": self.analyzed_at.isoformat(),
|
|
296
|
+
"total_files": self.total_files,
|
|
297
|
+
"total_lines": self.total_lines,
|
|
298
|
+
"total_functions": self.total_functions,
|
|
299
|
+
"total_classes": self.total_classes,
|
|
300
|
+
"avg_file_complexity": round(self.avg_file_complexity, 2),
|
|
301
|
+
"hotspots": self.hotspots,
|
|
302
|
+
"complexity_distribution": self._compute_grade_distribution(),
|
|
303
|
+
"health_metrics": {
|
|
304
|
+
"avg_health_score": self._compute_avg_health_score(),
|
|
305
|
+
"files_needing_attention": self._count_files_needing_attention(),
|
|
306
|
+
},
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
def _compute_grade_distribution(self) -> dict[str, int]:
|
|
310
|
+
"""Compute distribution of complexity grades across all chunks.
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
Dictionary mapping grade (A-F) to count of chunks
|
|
314
|
+
"""
|
|
315
|
+
distribution: dict[str, int] = {"A": 0, "B": 0, "C": 0, "D": 0, "F": 0}
|
|
316
|
+
|
|
317
|
+
for file_metrics in self.files.values():
|
|
318
|
+
for chunk in file_metrics.chunks:
|
|
319
|
+
distribution[chunk.complexity_grade] += 1
|
|
320
|
+
|
|
321
|
+
return distribution
|
|
322
|
+
|
|
323
|
+
def _compute_avg_health_score(self) -> float:
|
|
324
|
+
"""Compute average health score across all files.
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
Average health score from 0.0 to 1.0
|
|
328
|
+
"""
|
|
329
|
+
if not self.files:
|
|
330
|
+
return 1.0
|
|
331
|
+
|
|
332
|
+
health_scores = [f.health_score for f in self.files.values()]
|
|
333
|
+
return sum(health_scores) / len(health_scores)
|
|
334
|
+
|
|
335
|
+
def _count_files_needing_attention(self) -> int:
|
|
336
|
+
"""Count files with health score below 0.7.
|
|
337
|
+
|
|
338
|
+
Returns:
|
|
339
|
+
Number of files that need attention
|
|
340
|
+
"""
|
|
341
|
+
return sum(1 for f in self.files.values() if f.health_score < 0.7)
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
"""Console reporter for code analysis results."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING
|
|
6
|
+
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
from rich.table import Table
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from ..metrics import ProjectMetrics
|
|
12
|
+
|
|
13
|
+
console = Console()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ConsoleReporter:
|
|
17
|
+
"""Console reporter for displaying analysis results in terminal."""
|
|
18
|
+
|
|
19
|
+
def print_summary(self, metrics: ProjectMetrics) -> None:
|
|
20
|
+
"""Print high-level project summary.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
metrics: Project metrics to display
|
|
24
|
+
"""
|
|
25
|
+
console.print("\n[bold blue]📈 Code Complexity Analysis[/bold blue]")
|
|
26
|
+
console.print("━" * 60)
|
|
27
|
+
console.print()
|
|
28
|
+
|
|
29
|
+
console.print("[bold]Project Summary[/bold]")
|
|
30
|
+
console.print(f" Files Analyzed: {metrics.total_files}")
|
|
31
|
+
console.print(f" Total Lines: {metrics.total_lines:,}")
|
|
32
|
+
console.print(f" Functions: {metrics.total_functions}")
|
|
33
|
+
console.print(f" Classes: {metrics.total_classes}")
|
|
34
|
+
console.print(f" Avg File Complexity: {metrics.avg_file_complexity:.1f}")
|
|
35
|
+
console.print()
|
|
36
|
+
|
|
37
|
+
def print_distribution(self, metrics: ProjectMetrics) -> None:
|
|
38
|
+
"""Print complexity grade distribution.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
metrics: Project metrics with grade distribution
|
|
42
|
+
"""
|
|
43
|
+
console.print("[bold]Complexity Distribution[/bold]")
|
|
44
|
+
|
|
45
|
+
# Get grade distribution
|
|
46
|
+
distribution = metrics._compute_grade_distribution()
|
|
47
|
+
total_chunks = sum(distribution.values())
|
|
48
|
+
|
|
49
|
+
if total_chunks == 0:
|
|
50
|
+
console.print(" No functions/methods analyzed")
|
|
51
|
+
console.print()
|
|
52
|
+
return
|
|
53
|
+
|
|
54
|
+
# Define grade colors and descriptions
|
|
55
|
+
grade_info = {
|
|
56
|
+
"A": ("green", "Excellent (0-5)"),
|
|
57
|
+
"B": ("blue", "Good (6-10)"),
|
|
58
|
+
"C": ("yellow", "Acceptable (11-20)"),
|
|
59
|
+
"D": ("orange1", "Needs Improvement (21-30)"),
|
|
60
|
+
"F": ("red", "Refactor Required (31+)"),
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
# Print distribution table
|
|
64
|
+
table = Table(show_header=True, header_style="bold cyan", box=None)
|
|
65
|
+
table.add_column("Grade", style="bold", width=8)
|
|
66
|
+
table.add_column("Description", width=25)
|
|
67
|
+
table.add_column("Count", justify="right", width=8)
|
|
68
|
+
table.add_column("Percentage", justify="right", width=10)
|
|
69
|
+
table.add_column("Bar", width=20)
|
|
70
|
+
|
|
71
|
+
for grade in ["A", "B", "C", "D", "F"]:
|
|
72
|
+
count = distribution.get(grade, 0)
|
|
73
|
+
percentage = (count / total_chunks * 100) if total_chunks > 0 else 0
|
|
74
|
+
color, description = grade_info[grade]
|
|
75
|
+
|
|
76
|
+
# Create visual bar
|
|
77
|
+
bar_length = int(percentage / 5) # Scale: 5% = 1 char
|
|
78
|
+
bar = "█" * bar_length
|
|
79
|
+
|
|
80
|
+
table.add_row(
|
|
81
|
+
f"[{color}]{grade}[/{color}]",
|
|
82
|
+
description,
|
|
83
|
+
f"{count}",
|
|
84
|
+
f"{percentage:.1f}%",
|
|
85
|
+
f"[{color}]{bar}[/{color}]",
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
console.print(table)
|
|
89
|
+
console.print()
|
|
90
|
+
|
|
91
|
+
def print_hotspots(self, metrics: ProjectMetrics, top: int = 10) -> None:
|
|
92
|
+
"""Print complexity hotspots.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
metrics: Project metrics
|
|
96
|
+
top: Number of top hotspots to display
|
|
97
|
+
"""
|
|
98
|
+
hotspot_files = metrics.get_hotspots(limit=top)
|
|
99
|
+
|
|
100
|
+
if not hotspot_files:
|
|
101
|
+
console.print("[bold]🔥 Complexity Hotspots[/bold]")
|
|
102
|
+
console.print(" No hotspots found")
|
|
103
|
+
console.print()
|
|
104
|
+
return
|
|
105
|
+
|
|
106
|
+
console.print(
|
|
107
|
+
f"[bold]🔥 Top {min(top, len(hotspot_files))} Complexity Hotspots[/bold]"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
table = Table(show_header=True, header_style="bold cyan", box=None)
|
|
111
|
+
table.add_column("Rank", justify="right", width=6)
|
|
112
|
+
table.add_column("File", style="cyan", width=50)
|
|
113
|
+
table.add_column("Avg Complexity", justify="right", width=16)
|
|
114
|
+
table.add_column("Grade", justify="center", width=8)
|
|
115
|
+
table.add_column("Functions", justify="right", width=10)
|
|
116
|
+
|
|
117
|
+
for rank, file_metrics in enumerate(hotspot_files, 1):
|
|
118
|
+
# Compute average grade
|
|
119
|
+
if file_metrics.chunks:
|
|
120
|
+
grades = [chunk.complexity_grade for chunk in file_metrics.chunks]
|
|
121
|
+
avg_grade = max(set(grades), key=grades.count) # Most common grade
|
|
122
|
+
else:
|
|
123
|
+
avg_grade = "N/A"
|
|
124
|
+
|
|
125
|
+
# Color code grade
|
|
126
|
+
grade_colors = {
|
|
127
|
+
"A": "green",
|
|
128
|
+
"B": "blue",
|
|
129
|
+
"C": "yellow",
|
|
130
|
+
"D": "orange1",
|
|
131
|
+
"F": "red",
|
|
132
|
+
}
|
|
133
|
+
grade_color = grade_colors.get(avg_grade, "white")
|
|
134
|
+
|
|
135
|
+
# Truncate file path if too long
|
|
136
|
+
file_path = file_metrics.file_path
|
|
137
|
+
if len(file_path) > 48:
|
|
138
|
+
file_path = "..." + file_path[-45:]
|
|
139
|
+
|
|
140
|
+
table.add_row(
|
|
141
|
+
f"{rank}",
|
|
142
|
+
file_path,
|
|
143
|
+
f"{file_metrics.avg_complexity:.1f}",
|
|
144
|
+
f"[{grade_color}]{avg_grade}[/{grade_color}]",
|
|
145
|
+
f"{len(file_metrics.chunks)}",
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
console.print(table)
|
|
149
|
+
console.print()
|
|
150
|
+
|
|
151
|
+
def print_recommendations(self, metrics: ProjectMetrics) -> None:
|
|
152
|
+
"""Print actionable recommendations.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
metrics: Project metrics
|
|
156
|
+
"""
|
|
157
|
+
console.print("[bold]💡 Recommendations[/bold]")
|
|
158
|
+
|
|
159
|
+
recommendations: list[str] = []
|
|
160
|
+
|
|
161
|
+
# Check for files needing attention
|
|
162
|
+
files_needing_attention = metrics._count_files_needing_attention()
|
|
163
|
+
if files_needing_attention > 0:
|
|
164
|
+
recommendations.append(
|
|
165
|
+
f"[yellow]•[/yellow] {files_needing_attention} files have health score below 0.7 - consider refactoring"
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# Check for high complexity files
|
|
169
|
+
hotspots = metrics.get_hotspots(limit=5)
|
|
170
|
+
high_complexity_files = [f for f in hotspots if f.avg_complexity > 20]
|
|
171
|
+
if high_complexity_files:
|
|
172
|
+
recommendations.append(
|
|
173
|
+
f"[yellow]•[/yellow] {len(high_complexity_files)} files have average complexity > 20 - prioritize these for refactoring"
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Check grade distribution
|
|
177
|
+
distribution = metrics._compute_grade_distribution()
|
|
178
|
+
total_chunks = sum(distribution.values())
|
|
179
|
+
if total_chunks > 0:
|
|
180
|
+
d_f_percentage = (
|
|
181
|
+
(distribution.get("D", 0) + distribution.get("F", 0))
|
|
182
|
+
/ total_chunks
|
|
183
|
+
* 100
|
|
184
|
+
)
|
|
185
|
+
if d_f_percentage > 20:
|
|
186
|
+
recommendations.append(
|
|
187
|
+
f"[yellow]•[/yellow] {d_f_percentage:.1f}% of functions have D/F grades - aim to reduce this below 10%"
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
# Check overall health
|
|
191
|
+
avg_health = metrics._compute_avg_health_score()
|
|
192
|
+
if avg_health < 0.7:
|
|
193
|
+
recommendations.append(
|
|
194
|
+
f"[yellow]•[/yellow] Average health score is {avg_health:.2f} - target 0.8+ through refactoring"
|
|
195
|
+
)
|
|
196
|
+
elif avg_health >= 0.9:
|
|
197
|
+
recommendations.append(
|
|
198
|
+
"[green]✓[/green] Excellent code health! Keep up the good work."
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
if not recommendations:
|
|
202
|
+
recommendations.append(
|
|
203
|
+
"[green]✓[/green] Code quality looks good! No critical issues found."
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
for rec in recommendations:
|
|
207
|
+
console.print(f" {rec}")
|
|
208
|
+
|
|
209
|
+
console.print()
|
|
210
|
+
|
|
211
|
+
# Print tips
|
|
212
|
+
console.print("[dim]💡 Tips:[/dim]")
|
|
213
|
+
console.print(
|
|
214
|
+
"[dim] • Use [cyan]--top N[/cyan] to see more/fewer hotspots[/dim]"
|
|
215
|
+
)
|
|
216
|
+
console.print(
|
|
217
|
+
"[dim] • Use [cyan]--json[/cyan] to export results for further analysis[/dim]"
|
|
218
|
+
)
|
|
219
|
+
console.print(
|
|
220
|
+
"[dim] • Focus refactoring efforts on Grade D and F functions first[/dim]"
|
|
221
|
+
)
|
|
222
|
+
console.print()
|