mcp-vector-search 0.12.6__py3-none-any.whl → 1.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_vector_search/__init__.py +3 -3
- mcp_vector_search/analysis/__init__.py +111 -0
- mcp_vector_search/analysis/baseline/__init__.py +68 -0
- mcp_vector_search/analysis/baseline/comparator.py +462 -0
- mcp_vector_search/analysis/baseline/manager.py +621 -0
- mcp_vector_search/analysis/collectors/__init__.py +74 -0
- mcp_vector_search/analysis/collectors/base.py +164 -0
- mcp_vector_search/analysis/collectors/cohesion.py +463 -0
- mcp_vector_search/analysis/collectors/complexity.py +743 -0
- mcp_vector_search/analysis/collectors/coupling.py +1162 -0
- mcp_vector_search/analysis/collectors/halstead.py +514 -0
- mcp_vector_search/analysis/collectors/smells.py +325 -0
- mcp_vector_search/analysis/debt.py +516 -0
- mcp_vector_search/analysis/interpretation.py +685 -0
- mcp_vector_search/analysis/metrics.py +414 -0
- mcp_vector_search/analysis/reporters/__init__.py +7 -0
- mcp_vector_search/analysis/reporters/console.py +646 -0
- mcp_vector_search/analysis/reporters/markdown.py +480 -0
- mcp_vector_search/analysis/reporters/sarif.py +377 -0
- mcp_vector_search/analysis/storage/__init__.py +93 -0
- mcp_vector_search/analysis/storage/metrics_store.py +762 -0
- mcp_vector_search/analysis/storage/schema.py +245 -0
- mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
- mcp_vector_search/analysis/trends.py +308 -0
- mcp_vector_search/analysis/visualizer/__init__.py +90 -0
- mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
- mcp_vector_search/analysis/visualizer/exporter.py +484 -0
- mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
- mcp_vector_search/analysis/visualizer/schemas.py +525 -0
- mcp_vector_search/cli/commands/analyze.py +1062 -0
- mcp_vector_search/cli/commands/chat.py +1455 -0
- mcp_vector_search/cli/commands/index.py +621 -5
- mcp_vector_search/cli/commands/index_background.py +467 -0
- mcp_vector_search/cli/commands/init.py +13 -0
- mcp_vector_search/cli/commands/install.py +597 -335
- mcp_vector_search/cli/commands/install_old.py +8 -4
- mcp_vector_search/cli/commands/mcp.py +78 -6
- mcp_vector_search/cli/commands/reset.py +68 -26
- mcp_vector_search/cli/commands/search.py +224 -8
- mcp_vector_search/cli/commands/setup.py +1184 -0
- mcp_vector_search/cli/commands/status.py +339 -5
- mcp_vector_search/cli/commands/uninstall.py +276 -357
- mcp_vector_search/cli/commands/visualize/__init__.py +39 -0
- mcp_vector_search/cli/commands/visualize/cli.py +292 -0
- mcp_vector_search/cli/commands/visualize/exporters/__init__.py +12 -0
- mcp_vector_search/cli/commands/visualize/exporters/html_exporter.py +33 -0
- mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +33 -0
- mcp_vector_search/cli/commands/visualize/graph_builder.py +647 -0
- mcp_vector_search/cli/commands/visualize/layout_engine.py +469 -0
- mcp_vector_search/cli/commands/visualize/server.py +600 -0
- mcp_vector_search/cli/commands/visualize/state_manager.py +428 -0
- mcp_vector_search/cli/commands/visualize/templates/__init__.py +16 -0
- mcp_vector_search/cli/commands/visualize/templates/base.py +234 -0
- mcp_vector_search/cli/commands/visualize/templates/scripts.py +4542 -0
- mcp_vector_search/cli/commands/visualize/templates/styles.py +2522 -0
- mcp_vector_search/cli/didyoumean.py +27 -2
- mcp_vector_search/cli/main.py +127 -160
- mcp_vector_search/cli/output.py +158 -13
- mcp_vector_search/config/__init__.py +4 -0
- mcp_vector_search/config/default_thresholds.yaml +52 -0
- mcp_vector_search/config/settings.py +12 -0
- mcp_vector_search/config/thresholds.py +273 -0
- mcp_vector_search/core/__init__.py +16 -0
- mcp_vector_search/core/auto_indexer.py +3 -3
- mcp_vector_search/core/boilerplate.py +186 -0
- mcp_vector_search/core/config_utils.py +394 -0
- mcp_vector_search/core/database.py +406 -94
- mcp_vector_search/core/embeddings.py +24 -0
- mcp_vector_search/core/exceptions.py +11 -0
- mcp_vector_search/core/git.py +380 -0
- mcp_vector_search/core/git_hooks.py +4 -4
- mcp_vector_search/core/indexer.py +632 -54
- mcp_vector_search/core/llm_client.py +756 -0
- mcp_vector_search/core/models.py +91 -1
- mcp_vector_search/core/project.py +17 -0
- mcp_vector_search/core/relationships.py +473 -0
- mcp_vector_search/core/scheduler.py +11 -11
- mcp_vector_search/core/search.py +179 -29
- mcp_vector_search/mcp/server.py +819 -9
- mcp_vector_search/parsers/python.py +285 -5
- mcp_vector_search/utils/__init__.py +2 -0
- mcp_vector_search/utils/gitignore.py +0 -3
- mcp_vector_search/utils/gitignore_updater.py +212 -0
- mcp_vector_search/utils/monorepo.py +66 -4
- mcp_vector_search/utils/timing.py +10 -6
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +184 -53
- mcp_vector_search-1.1.22.dist-info/RECORD +120 -0
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +1 -1
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +1 -0
- mcp_vector_search/cli/commands/visualize.py +0 -1467
- mcp_vector_search-0.12.6.dist-info/RECORD +0 -68
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1062 @@
|
|
|
1
|
+
"""Analyze command for MCP Vector Search CLI."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import typer
|
|
7
|
+
from loguru import logger
|
|
8
|
+
|
|
9
|
+
from ...analysis import (
|
|
10
|
+
CognitiveComplexityCollector,
|
|
11
|
+
CyclomaticComplexityCollector,
|
|
12
|
+
FileMetrics,
|
|
13
|
+
ProjectMetrics,
|
|
14
|
+
)
|
|
15
|
+
from ...analysis.baseline import (
|
|
16
|
+
BaselineComparator,
|
|
17
|
+
BaselineExistsError,
|
|
18
|
+
BaselineManager,
|
|
19
|
+
BaselineNotFoundError,
|
|
20
|
+
)
|
|
21
|
+
from ...analysis.storage.metrics_store import MetricsStore, MetricsStoreError
|
|
22
|
+
from ...analysis.storage.trend_tracker import TrendData, TrendDirection, TrendTracker
|
|
23
|
+
from ...core.exceptions import ProjectNotFoundError
|
|
24
|
+
from ...core.git import GitError, GitManager, GitNotAvailableError, GitNotRepoError
|
|
25
|
+
from ...core.project import ProjectManager
|
|
26
|
+
from ...parsers.registry import ParserRegistry
|
|
27
|
+
from ..output import console, print_error, print_info, print_json
|
|
28
|
+
|
|
29
|
+
# Create analyze subcommand app
|
|
30
|
+
analyze_app = typer.Typer(help="📈 Analyze code complexity and quality")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@analyze_app.callback(invoke_without_command=True)
|
|
34
|
+
def main(
|
|
35
|
+
ctx: typer.Context,
|
|
36
|
+
project_root: Path | None = typer.Option(
|
|
37
|
+
None,
|
|
38
|
+
"--project-root",
|
|
39
|
+
"-p",
|
|
40
|
+
help="Project root directory (auto-detected if not specified)",
|
|
41
|
+
exists=True,
|
|
42
|
+
file_okay=False,
|
|
43
|
+
dir_okay=True,
|
|
44
|
+
readable=True,
|
|
45
|
+
rich_help_panel="🔧 Global Options",
|
|
46
|
+
),
|
|
47
|
+
quick: bool = typer.Option(
|
|
48
|
+
False,
|
|
49
|
+
"--quick",
|
|
50
|
+
help="Quick mode (cognitive + cyclomatic complexity only)",
|
|
51
|
+
rich_help_panel="⚡ Performance Options",
|
|
52
|
+
),
|
|
53
|
+
show_smells: bool = typer.Option(
|
|
54
|
+
True,
|
|
55
|
+
"--smells/--no-smells",
|
|
56
|
+
help="Show detected code smells in output",
|
|
57
|
+
rich_help_panel="📊 Display Options",
|
|
58
|
+
),
|
|
59
|
+
language: str | None = typer.Option(
|
|
60
|
+
None,
|
|
61
|
+
"--language",
|
|
62
|
+
help="Filter by programming language (python, javascript, typescript)",
|
|
63
|
+
rich_help_panel="🔍 Filters",
|
|
64
|
+
),
|
|
65
|
+
path: Path | None = typer.Option(
|
|
66
|
+
None,
|
|
67
|
+
"--path",
|
|
68
|
+
help="Analyze specific file or directory",
|
|
69
|
+
rich_help_panel="🔍 Filters",
|
|
70
|
+
),
|
|
71
|
+
top: int = typer.Option(
|
|
72
|
+
10,
|
|
73
|
+
"--top",
|
|
74
|
+
help="Number of top complexity hotspots to show",
|
|
75
|
+
min=1,
|
|
76
|
+
max=100,
|
|
77
|
+
rich_help_panel="📊 Display Options",
|
|
78
|
+
),
|
|
79
|
+
json_output: bool = typer.Option(
|
|
80
|
+
False,
|
|
81
|
+
"--json",
|
|
82
|
+
help="Output results in JSON format",
|
|
83
|
+
rich_help_panel="📊 Display Options",
|
|
84
|
+
),
|
|
85
|
+
include_context: bool = typer.Option(
|
|
86
|
+
False,
|
|
87
|
+
"--include-context",
|
|
88
|
+
help="Include LLM-consumable context in JSON output (enhanced interpretation)",
|
|
89
|
+
rich_help_panel="📊 Display Options",
|
|
90
|
+
),
|
|
91
|
+
format: str = typer.Option(
|
|
92
|
+
"console",
|
|
93
|
+
"--format",
|
|
94
|
+
"-f",
|
|
95
|
+
help="Output format: console, json, sarif, markdown",
|
|
96
|
+
rich_help_panel="📊 Display Options",
|
|
97
|
+
),
|
|
98
|
+
output: Path | None = typer.Option(
|
|
99
|
+
None,
|
|
100
|
+
"--output",
|
|
101
|
+
"-o",
|
|
102
|
+
help="Output file path (required for sarif format)",
|
|
103
|
+
rich_help_panel="📊 Display Options",
|
|
104
|
+
),
|
|
105
|
+
fail_on_smell: bool = typer.Option(
|
|
106
|
+
False,
|
|
107
|
+
"--fail-on-smell",
|
|
108
|
+
help="Exit with code 1 if code smells are detected",
|
|
109
|
+
rich_help_panel="🚦 Quality Gates",
|
|
110
|
+
),
|
|
111
|
+
severity_threshold: str = typer.Option(
|
|
112
|
+
"error",
|
|
113
|
+
"--severity-threshold",
|
|
114
|
+
help="Minimum severity to trigger failure: info, warning, error, none",
|
|
115
|
+
rich_help_panel="🚦 Quality Gates",
|
|
116
|
+
),
|
|
117
|
+
changed_only: bool = typer.Option(
|
|
118
|
+
False,
|
|
119
|
+
"--changed-only/--no-changed-only",
|
|
120
|
+
help="Analyze only uncommitted changes (staged + unstaged + untracked)",
|
|
121
|
+
rich_help_panel="🔍 Filters",
|
|
122
|
+
),
|
|
123
|
+
baseline: str | None = typer.Option(
|
|
124
|
+
None,
|
|
125
|
+
"--baseline",
|
|
126
|
+
help="Compare against baseline branch (e.g., main, master, develop)",
|
|
127
|
+
rich_help_panel="🔍 Filters",
|
|
128
|
+
),
|
|
129
|
+
save_baseline: str | None = typer.Option(
|
|
130
|
+
None,
|
|
131
|
+
"--save-baseline",
|
|
132
|
+
help="Save current analysis as named baseline",
|
|
133
|
+
rich_help_panel="📊 Baseline Management",
|
|
134
|
+
),
|
|
135
|
+
compare_baseline: str | None = typer.Option(
|
|
136
|
+
None,
|
|
137
|
+
"--compare-baseline",
|
|
138
|
+
help="Compare current analysis against named baseline",
|
|
139
|
+
rich_help_panel="📊 Baseline Management",
|
|
140
|
+
),
|
|
141
|
+
list_baselines: bool = typer.Option(
|
|
142
|
+
False,
|
|
143
|
+
"--list-baselines",
|
|
144
|
+
help="List all available baselines (standalone action)",
|
|
145
|
+
rich_help_panel="📊 Baseline Management",
|
|
146
|
+
),
|
|
147
|
+
delete_baseline: str | None = typer.Option(
|
|
148
|
+
None,
|
|
149
|
+
"--delete-baseline",
|
|
150
|
+
help="Delete a named baseline",
|
|
151
|
+
rich_help_panel="📊 Baseline Management",
|
|
152
|
+
),
|
|
153
|
+
force_baseline: bool = typer.Option(
|
|
154
|
+
False,
|
|
155
|
+
"--force",
|
|
156
|
+
help="Force overwrite when saving baseline that already exists",
|
|
157
|
+
rich_help_panel="📊 Baseline Management",
|
|
158
|
+
),
|
|
159
|
+
) -> None:
|
|
160
|
+
"""📈 Analyze code complexity and quality.
|
|
161
|
+
|
|
162
|
+
Performs structural code analysis to identify complexity hotspots,
|
|
163
|
+
code smells, and quality metrics across your codebase.
|
|
164
|
+
|
|
165
|
+
[bold cyan]Basic Examples:[/bold cyan]
|
|
166
|
+
|
|
167
|
+
[green]Quick analysis (cognitive + cyclomatic complexity):[/green]
|
|
168
|
+
$ mcp-vector-search analyze --quick
|
|
169
|
+
|
|
170
|
+
[green]Full analysis (all collectors):[/green]
|
|
171
|
+
$ mcp-vector-search analyze
|
|
172
|
+
|
|
173
|
+
[green]Filter by language:[/green]
|
|
174
|
+
$ mcp-vector-search analyze --language python
|
|
175
|
+
|
|
176
|
+
[green]Analyze specific directory:[/green]
|
|
177
|
+
$ mcp-vector-search analyze --path src/core
|
|
178
|
+
|
|
179
|
+
[green]Analyze only uncommitted changes:[/green]
|
|
180
|
+
$ mcp-vector-search analyze --changed-only
|
|
181
|
+
|
|
182
|
+
[green]Compare against baseline branch:[/green]
|
|
183
|
+
$ mcp-vector-search analyze --baseline main
|
|
184
|
+
|
|
185
|
+
[bold cyan]Output Options:[/bold cyan]
|
|
186
|
+
|
|
187
|
+
[green]Show top 5 hotspots:[/green]
|
|
188
|
+
$ mcp-vector-search analyze --top 5
|
|
189
|
+
|
|
190
|
+
[green]Export to JSON:[/green]
|
|
191
|
+
$ mcp-vector-search analyze --json > analysis.json
|
|
192
|
+
|
|
193
|
+
[green]Export to SARIF format:[/green]
|
|
194
|
+
$ mcp-vector-search analyze --format sarif --output report.sarif
|
|
195
|
+
|
|
196
|
+
[green]Export to Markdown format:[/green]
|
|
197
|
+
$ mcp-vector-search analyze --format markdown --output .
|
|
198
|
+
|
|
199
|
+
[bold cyan]CI/CD Quality Gates:[/bold cyan]
|
|
200
|
+
|
|
201
|
+
[green]Fail on ERROR-level smells (default):[/green]
|
|
202
|
+
$ mcp-vector-search analyze --fail-on-smell
|
|
203
|
+
|
|
204
|
+
[green]Fail on WARNING or ERROR smells:[/green]
|
|
205
|
+
$ mcp-vector-search analyze --fail-on-smell --severity-threshold warning
|
|
206
|
+
|
|
207
|
+
[green]CI/CD workflow with SARIF:[/green]
|
|
208
|
+
$ mcp-vector-search analyze --fail-on-smell --format sarif --output report.sarif
|
|
209
|
+
|
|
210
|
+
[dim]💡 Tip: Use --quick for faster analysis on large projects.[/dim]
|
|
211
|
+
"""
|
|
212
|
+
if ctx.invoked_subcommand is not None:
|
|
213
|
+
# A subcommand was invoked - let it handle the request
|
|
214
|
+
return
|
|
215
|
+
|
|
216
|
+
# Handle standalone baseline operations first
|
|
217
|
+
baseline_manager = BaselineManager()
|
|
218
|
+
|
|
219
|
+
# List baselines (standalone action)
|
|
220
|
+
if list_baselines:
|
|
221
|
+
baselines = baseline_manager.list_baselines()
|
|
222
|
+
if not baselines:
|
|
223
|
+
console.print("[yellow]No baselines found[/yellow]")
|
|
224
|
+
console.print(
|
|
225
|
+
f"\nBaselines are stored in: {baseline_manager.storage_dir}\n"
|
|
226
|
+
)
|
|
227
|
+
console.print(
|
|
228
|
+
"Create a baseline with: [cyan]mcp-vector-search analyze --save-baseline <name>[/cyan]"
|
|
229
|
+
)
|
|
230
|
+
else:
|
|
231
|
+
console.print(f"\n[bold]Available Baselines[/bold] ({len(baselines)})")
|
|
232
|
+
console.print("━" * 80)
|
|
233
|
+
for baseline in baselines:
|
|
234
|
+
console.print(f"\n[cyan]• {baseline.baseline_name}[/cyan]")
|
|
235
|
+
console.print(f" Created: {baseline.created_at}")
|
|
236
|
+
console.print(f" Project: {baseline.project_path}")
|
|
237
|
+
console.print(
|
|
238
|
+
f" Files: {baseline.file_count} | Functions: {baseline.function_count}"
|
|
239
|
+
)
|
|
240
|
+
console.print(f" Tool Version: {baseline.tool_version}")
|
|
241
|
+
if baseline.git_info.commit:
|
|
242
|
+
console.print(
|
|
243
|
+
f" Git: {baseline.git_info.branch or 'detached'} @ {baseline.git_info.commit[:8]}"
|
|
244
|
+
)
|
|
245
|
+
console.print()
|
|
246
|
+
raise typer.Exit(0)
|
|
247
|
+
|
|
248
|
+
# Delete baseline (standalone action)
|
|
249
|
+
if delete_baseline:
|
|
250
|
+
try:
|
|
251
|
+
baseline_manager.delete_baseline(delete_baseline)
|
|
252
|
+
console.print(
|
|
253
|
+
f"[green]✓[/green] Deleted baseline: [cyan]{delete_baseline}[/cyan]"
|
|
254
|
+
)
|
|
255
|
+
raise typer.Exit(0)
|
|
256
|
+
except BaselineNotFoundError as e:
|
|
257
|
+
print_error(str(e))
|
|
258
|
+
console.print("\nAvailable baselines:")
|
|
259
|
+
baselines = baseline_manager.list_baselines()
|
|
260
|
+
for baseline in baselines[:5]:
|
|
261
|
+
console.print(f" • {baseline.baseline_name}")
|
|
262
|
+
raise typer.Exit(1)
|
|
263
|
+
|
|
264
|
+
try:
|
|
265
|
+
# Validate format and output options
|
|
266
|
+
valid_formats = ["console", "json", "sarif", "markdown"]
|
|
267
|
+
format_lower = format.lower()
|
|
268
|
+
|
|
269
|
+
if format_lower not in valid_formats:
|
|
270
|
+
print_error(
|
|
271
|
+
f"Invalid format: {format}. Must be one of: {', '.join(valid_formats)}"
|
|
272
|
+
)
|
|
273
|
+
raise typer.Exit(1)
|
|
274
|
+
|
|
275
|
+
# SARIF and markdown formats should have output path (defaults to current dir)
|
|
276
|
+
if format_lower == "sarif" and output is None:
|
|
277
|
+
print_error("--output is required when using --format sarif")
|
|
278
|
+
raise typer.Exit(1)
|
|
279
|
+
|
|
280
|
+
# JSON flag overrides format for backward compatibility
|
|
281
|
+
if json_output:
|
|
282
|
+
format_lower = "json"
|
|
283
|
+
|
|
284
|
+
# Use provided project_root or current working directory
|
|
285
|
+
if project_root is None:
|
|
286
|
+
project_root = Path.cwd()
|
|
287
|
+
|
|
288
|
+
asyncio.run(
|
|
289
|
+
run_analysis(
|
|
290
|
+
project_root=project_root,
|
|
291
|
+
quick_mode=quick,
|
|
292
|
+
language_filter=language,
|
|
293
|
+
path_filter=path,
|
|
294
|
+
top_n=top,
|
|
295
|
+
json_output=(format_lower == "json"),
|
|
296
|
+
show_smells=show_smells,
|
|
297
|
+
output_format=format_lower,
|
|
298
|
+
output_file=output,
|
|
299
|
+
fail_on_smell=fail_on_smell,
|
|
300
|
+
severity_threshold=severity_threshold,
|
|
301
|
+
changed_only=changed_only,
|
|
302
|
+
baseline=baseline,
|
|
303
|
+
save_baseline=save_baseline,
|
|
304
|
+
compare_baseline=compare_baseline,
|
|
305
|
+
force_baseline=force_baseline,
|
|
306
|
+
baseline_manager=baseline_manager,
|
|
307
|
+
include_context=include_context,
|
|
308
|
+
)
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
except typer.Exit:
|
|
312
|
+
# Re-raise typer.Exit to preserve exit codes from run_analysis
|
|
313
|
+
raise
|
|
314
|
+
except Exception as e:
|
|
315
|
+
logger.error(f"Analysis failed: {e}")
|
|
316
|
+
print_error(f"Analysis failed: {e}")
|
|
317
|
+
raise typer.Exit(2) # Exit code 2 for analysis errors
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def filter_smells_by_severity(smells: list, severity_threshold: str) -> list:
|
|
321
|
+
"""Filter smells by minimum severity threshold.
|
|
322
|
+
|
|
323
|
+
Args:
|
|
324
|
+
smells: List of CodeSmell objects to filter
|
|
325
|
+
severity_threshold: Minimum severity level - "info", "warning", "error", or "none"
|
|
326
|
+
|
|
327
|
+
Returns:
|
|
328
|
+
Filtered list of smells matching or exceeding the severity threshold
|
|
329
|
+
"""
|
|
330
|
+
from ...analysis.collectors.smells import SmellSeverity
|
|
331
|
+
|
|
332
|
+
if severity_threshold.lower() == "none":
|
|
333
|
+
return []
|
|
334
|
+
|
|
335
|
+
severity_levels = {
|
|
336
|
+
"info": [SmellSeverity.INFO, SmellSeverity.WARNING, SmellSeverity.ERROR],
|
|
337
|
+
"warning": [SmellSeverity.WARNING, SmellSeverity.ERROR],
|
|
338
|
+
"error": [SmellSeverity.ERROR],
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
allowed = severity_levels.get(severity_threshold.lower(), [SmellSeverity.ERROR])
|
|
342
|
+
return [s for s in smells if s.severity in allowed]
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
async def run_analysis(
|
|
346
|
+
project_root: Path,
|
|
347
|
+
quick_mode: bool = False,
|
|
348
|
+
language_filter: str | None = None,
|
|
349
|
+
path_filter: Path | None = None,
|
|
350
|
+
top_n: int = 10,
|
|
351
|
+
json_output: bool = False,
|
|
352
|
+
show_smells: bool = True,
|
|
353
|
+
output_format: str = "console",
|
|
354
|
+
output_file: Path | None = None,
|
|
355
|
+
fail_on_smell: bool = False,
|
|
356
|
+
severity_threshold: str = "error",
|
|
357
|
+
changed_only: bool = False,
|
|
358
|
+
baseline: str | None = None,
|
|
359
|
+
save_baseline: str | None = None,
|
|
360
|
+
compare_baseline: str | None = None,
|
|
361
|
+
force_baseline: bool = False,
|
|
362
|
+
baseline_manager: BaselineManager | None = None,
|
|
363
|
+
include_context: bool = False,
|
|
364
|
+
) -> None:
|
|
365
|
+
"""Run code complexity analysis.
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
project_root: Root directory of the project
|
|
369
|
+
quick_mode: Use only cognitive + cyclomatic complexity (faster)
|
|
370
|
+
language_filter: Filter files by language
|
|
371
|
+
path_filter: Analyze specific file or directory
|
|
372
|
+
top_n: Number of top hotspots to show
|
|
373
|
+
json_output: Output results as JSON (deprecated, use output_format)
|
|
374
|
+
show_smells: Show detected code smells in output
|
|
375
|
+
output_format: Output format (console, json, sarif)
|
|
376
|
+
output_file: Output file path (for sarif format)
|
|
377
|
+
fail_on_smell: Exit with code 1 if smells are detected
|
|
378
|
+
severity_threshold: Minimum severity to trigger failure
|
|
379
|
+
changed_only: Analyze only uncommitted changes
|
|
380
|
+
baseline: Compare against baseline branch
|
|
381
|
+
save_baseline: Save analysis as named baseline
|
|
382
|
+
compare_baseline: Compare against named baseline
|
|
383
|
+
force_baseline: Force overwrite existing baseline
|
|
384
|
+
baseline_manager: BaselineManager instance
|
|
385
|
+
"""
|
|
386
|
+
try:
|
|
387
|
+
# Check if project is initialized (optional - we can analyze any directory)
|
|
388
|
+
project_manager = ProjectManager(project_root)
|
|
389
|
+
initialized = project_manager.is_initialized()
|
|
390
|
+
|
|
391
|
+
if not initialized and not json_output:
|
|
392
|
+
print_info(
|
|
393
|
+
f"Analyzing directory: {project_root} (not initialized as MCP project)"
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
# Initialize parser registry
|
|
397
|
+
parser_registry = ParserRegistry()
|
|
398
|
+
|
|
399
|
+
# Determine which collectors to use
|
|
400
|
+
if quick_mode:
|
|
401
|
+
collectors = [
|
|
402
|
+
CognitiveComplexityCollector(),
|
|
403
|
+
CyclomaticComplexityCollector(),
|
|
404
|
+
]
|
|
405
|
+
mode_label = "Quick Mode (2 collectors)"
|
|
406
|
+
else:
|
|
407
|
+
# Import all collectors for full mode
|
|
408
|
+
from ...analysis import (
|
|
409
|
+
MethodCountCollector,
|
|
410
|
+
NestingDepthCollector,
|
|
411
|
+
ParameterCountCollector,
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
collectors = [
|
|
415
|
+
CognitiveComplexityCollector(),
|
|
416
|
+
CyclomaticComplexityCollector(),
|
|
417
|
+
NestingDepthCollector(),
|
|
418
|
+
ParameterCountCollector(),
|
|
419
|
+
MethodCountCollector(),
|
|
420
|
+
]
|
|
421
|
+
mode_label = "Full Mode (5 collectors)"
|
|
422
|
+
|
|
423
|
+
# Initialize git manager if needed for changed/baseline filtering
|
|
424
|
+
git_manager = None
|
|
425
|
+
git_changed_files = None
|
|
426
|
+
|
|
427
|
+
if changed_only or baseline:
|
|
428
|
+
try:
|
|
429
|
+
git_manager = GitManager(project_root)
|
|
430
|
+
|
|
431
|
+
# Get changed files based on mode
|
|
432
|
+
if changed_only:
|
|
433
|
+
git_changed_files = git_manager.get_changed_files(
|
|
434
|
+
include_untracked=True
|
|
435
|
+
)
|
|
436
|
+
if not git_changed_files:
|
|
437
|
+
if json_output:
|
|
438
|
+
print_json(
|
|
439
|
+
{"error": "No changed files found. Nothing to analyze."}
|
|
440
|
+
)
|
|
441
|
+
else:
|
|
442
|
+
print_info("No changed files found. Nothing to analyze.")
|
|
443
|
+
return
|
|
444
|
+
elif baseline:
|
|
445
|
+
git_changed_files = git_manager.get_diff_files(baseline)
|
|
446
|
+
if not git_changed_files:
|
|
447
|
+
if json_output:
|
|
448
|
+
print_json(
|
|
449
|
+
{"error": f"No files changed vs baseline '{baseline}'."}
|
|
450
|
+
)
|
|
451
|
+
else:
|
|
452
|
+
print_info(f"No files changed vs baseline '{baseline}'.")
|
|
453
|
+
return
|
|
454
|
+
|
|
455
|
+
except GitNotAvailableError as e:
|
|
456
|
+
if json_output:
|
|
457
|
+
print_json({"warning": str(e), "fallback": "full analysis"})
|
|
458
|
+
else:
|
|
459
|
+
console.print(f"[yellow]⚠️ {e}[/yellow]")
|
|
460
|
+
print_info("Proceeding with full codebase analysis...")
|
|
461
|
+
git_manager = None
|
|
462
|
+
git_changed_files = None
|
|
463
|
+
|
|
464
|
+
except GitNotRepoError as e:
|
|
465
|
+
if json_output:
|
|
466
|
+
print_json({"warning": str(e), "fallback": "full analysis"})
|
|
467
|
+
else:
|
|
468
|
+
console.print(f"[yellow]⚠️ {e}[/yellow]")
|
|
469
|
+
print_info("Proceeding with full codebase analysis...")
|
|
470
|
+
git_manager = None
|
|
471
|
+
git_changed_files = None
|
|
472
|
+
|
|
473
|
+
except GitError as e:
|
|
474
|
+
if json_output:
|
|
475
|
+
print_json(
|
|
476
|
+
{"warning": f"Git error: {e}", "fallback": "full analysis"}
|
|
477
|
+
)
|
|
478
|
+
else:
|
|
479
|
+
console.print(f"[yellow]⚠️ Git error: {e}[/yellow]")
|
|
480
|
+
print_info("Proceeding with full codebase analysis...")
|
|
481
|
+
git_manager = None
|
|
482
|
+
git_changed_files = None
|
|
483
|
+
|
|
484
|
+
# Find files to analyze
|
|
485
|
+
files_to_analyze = _find_analyzable_files(
|
|
486
|
+
project_root,
|
|
487
|
+
language_filter,
|
|
488
|
+
path_filter,
|
|
489
|
+
parser_registry,
|
|
490
|
+
git_changed_files,
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
if not files_to_analyze:
|
|
494
|
+
if json_output:
|
|
495
|
+
print_json({"error": "No files found to analyze"})
|
|
496
|
+
else:
|
|
497
|
+
print_error("No files found to analyze")
|
|
498
|
+
return
|
|
499
|
+
|
|
500
|
+
# Display analysis info
|
|
501
|
+
if not json_output:
|
|
502
|
+
console.print(
|
|
503
|
+
f"\n[bold blue]Starting Code Analysis[/bold blue] - {mode_label}"
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
# Show file count information with git filtering context
|
|
507
|
+
if git_changed_files is not None:
|
|
508
|
+
# Get total files for context
|
|
509
|
+
total_files = len(
|
|
510
|
+
_find_analyzable_files(
|
|
511
|
+
project_root,
|
|
512
|
+
language_filter,
|
|
513
|
+
path_filter,
|
|
514
|
+
parser_registry,
|
|
515
|
+
None,
|
|
516
|
+
)
|
|
517
|
+
)
|
|
518
|
+
filter_type = "changed" if changed_only else f"vs {baseline}"
|
|
519
|
+
console.print(
|
|
520
|
+
f"Analyzing {len(files_to_analyze)} {filter_type} files "
|
|
521
|
+
f"({total_files} total in project)\n"
|
|
522
|
+
)
|
|
523
|
+
else:
|
|
524
|
+
console.print(f"Files to analyze: {len(files_to_analyze)}\n")
|
|
525
|
+
|
|
526
|
+
# Analyze files
|
|
527
|
+
project_metrics = ProjectMetrics(project_root=str(project_root))
|
|
528
|
+
|
|
529
|
+
for file_path in files_to_analyze:
|
|
530
|
+
try:
|
|
531
|
+
file_metrics = await _analyze_file(
|
|
532
|
+
file_path, parser_registry, collectors
|
|
533
|
+
)
|
|
534
|
+
if file_metrics and file_metrics.chunks:
|
|
535
|
+
project_metrics.files[str(file_path)] = file_metrics
|
|
536
|
+
except Exception as e:
|
|
537
|
+
logger.debug(f"Failed to analyze {file_path}: {e}")
|
|
538
|
+
continue
|
|
539
|
+
|
|
540
|
+
# Compute aggregates
|
|
541
|
+
project_metrics.compute_aggregates()
|
|
542
|
+
|
|
543
|
+
# Save snapshot to metrics store for historical tracking
|
|
544
|
+
trend_data: TrendData | None = None
|
|
545
|
+
try:
|
|
546
|
+
metrics_db_path = project_root / ".mcp-vector-search" / "metrics.db"
|
|
547
|
+
metrics_store = MetricsStore(metrics_db_path)
|
|
548
|
+
snapshot_id = metrics_store.save_project_snapshot(project_metrics)
|
|
549
|
+
logger.debug(f"Saved metrics snapshot {snapshot_id}")
|
|
550
|
+
|
|
551
|
+
# Check for historical data and compute trends if available
|
|
552
|
+
trend_tracker = TrendTracker(metrics_store)
|
|
553
|
+
trend_data = trend_tracker.get_trends(project_root, days=30)
|
|
554
|
+
|
|
555
|
+
# Only show trends if we have at least 2 snapshots
|
|
556
|
+
if len(trend_data.snapshots) >= 2 and not json_output:
|
|
557
|
+
_print_trends(trend_data)
|
|
558
|
+
|
|
559
|
+
except MetricsStoreError as e:
|
|
560
|
+
logger.debug(f"Could not save metrics snapshot: {e}")
|
|
561
|
+
except Exception as e:
|
|
562
|
+
logger.debug(f"Trend tracking unavailable: {e}")
|
|
563
|
+
|
|
564
|
+
# Detect code smells if requested
|
|
565
|
+
all_smells = []
|
|
566
|
+
if show_smells:
|
|
567
|
+
from ...analysis.collectors.smells import SmellDetector
|
|
568
|
+
from ...config.thresholds import ThresholdConfig
|
|
569
|
+
|
|
570
|
+
# Load threshold config (optional - defaults will be used)
|
|
571
|
+
threshold_config = ThresholdConfig()
|
|
572
|
+
smell_detector = SmellDetector(thresholds=threshold_config)
|
|
573
|
+
|
|
574
|
+
# Detect smells across all analyzed files
|
|
575
|
+
for file_path, file_metrics in project_metrics.files.items():
|
|
576
|
+
file_smells = smell_detector.detect_all(file_metrics, file_path)
|
|
577
|
+
all_smells.extend(file_smells)
|
|
578
|
+
|
|
579
|
+
# Output results based on format
|
|
580
|
+
if output_format == "markdown":
|
|
581
|
+
# Markdown format - write two files
|
|
582
|
+
from ...analysis.reporters.markdown import MarkdownReporter
|
|
583
|
+
|
|
584
|
+
reporter = MarkdownReporter()
|
|
585
|
+
|
|
586
|
+
# Generate full analysis report
|
|
587
|
+
analysis_file = reporter.generate_analysis_report(
|
|
588
|
+
project_metrics, all_smells, output_file
|
|
589
|
+
)
|
|
590
|
+
console.print(
|
|
591
|
+
f"[green]✓[/green] Analysis report written to: {analysis_file}"
|
|
592
|
+
)
|
|
593
|
+
|
|
594
|
+
# Generate fixes report if smells were detected
|
|
595
|
+
if all_smells:
|
|
596
|
+
fixes_file = reporter.generate_fixes_report(
|
|
597
|
+
project_metrics, all_smells, output_file
|
|
598
|
+
)
|
|
599
|
+
console.print(f"[green]✓[/green] Fixes report written to: {fixes_file}")
|
|
600
|
+
|
|
601
|
+
elif output_format == "sarif":
|
|
602
|
+
# SARIF format - write to file
|
|
603
|
+
from ...analysis.reporters.sarif import SARIFReporter
|
|
604
|
+
|
|
605
|
+
if not all_smells:
|
|
606
|
+
print_error(
|
|
607
|
+
"No code smells detected - SARIF report requires smells to report"
|
|
608
|
+
)
|
|
609
|
+
return
|
|
610
|
+
|
|
611
|
+
reporter = SARIFReporter()
|
|
612
|
+
reporter.write_sarif(all_smells, output_file, base_path=project_root)
|
|
613
|
+
console.print(f"[green]✓[/green] SARIF report written to: {output_file}")
|
|
614
|
+
|
|
615
|
+
elif json_output or output_format == "json":
|
|
616
|
+
# JSON format - with optional LLM context
|
|
617
|
+
if include_context:
|
|
618
|
+
# Enhanced JSON export with LLM-consumable context
|
|
619
|
+
from ...analysis.interpretation import EnhancedJSONExporter
|
|
620
|
+
from ...config.thresholds import ThresholdConfig
|
|
621
|
+
|
|
622
|
+
threshold_config = ThresholdConfig()
|
|
623
|
+
exporter = EnhancedJSONExporter(
|
|
624
|
+
project_root=project_root, threshold_config=threshold_config
|
|
625
|
+
)
|
|
626
|
+
enhanced_export = exporter.export_with_context(
|
|
627
|
+
project_metrics, include_smells=show_smells
|
|
628
|
+
)
|
|
629
|
+
# Output as JSON
|
|
630
|
+
import json
|
|
631
|
+
|
|
632
|
+
print_json(json.loads(enhanced_export.model_dump_json()))
|
|
633
|
+
else:
|
|
634
|
+
# Standard JSON format
|
|
635
|
+
output = project_metrics.to_summary()
|
|
636
|
+
# Add smell data to JSON output if available
|
|
637
|
+
if show_smells and all_smells:
|
|
638
|
+
from ...analysis.collectors.smells import SmellDetector
|
|
639
|
+
|
|
640
|
+
detector = SmellDetector()
|
|
641
|
+
smell_summary = detector.get_smell_summary(all_smells)
|
|
642
|
+
output["smells"] = {
|
|
643
|
+
"summary": smell_summary,
|
|
644
|
+
"details": [
|
|
645
|
+
{
|
|
646
|
+
"name": smell.name,
|
|
647
|
+
"severity": smell.severity.value,
|
|
648
|
+
"location": smell.location,
|
|
649
|
+
"description": smell.description,
|
|
650
|
+
"metric_value": smell.metric_value,
|
|
651
|
+
"threshold": smell.threshold,
|
|
652
|
+
"suggestion": smell.suggestion,
|
|
653
|
+
}
|
|
654
|
+
for smell in all_smells
|
|
655
|
+
],
|
|
656
|
+
}
|
|
657
|
+
print_json(output)
|
|
658
|
+
else:
|
|
659
|
+
# Console format (default)
|
|
660
|
+
# Import console reporter
|
|
661
|
+
from ...analysis.reporters.console import ConsoleReporter
|
|
662
|
+
|
|
663
|
+
reporter = ConsoleReporter()
|
|
664
|
+
reporter.print_summary(project_metrics)
|
|
665
|
+
reporter.print_distribution(project_metrics)
|
|
666
|
+
reporter.print_hotspots(project_metrics, top=top_n)
|
|
667
|
+
|
|
668
|
+
# Print code smells if requested
|
|
669
|
+
if show_smells and all_smells:
|
|
670
|
+
reporter.print_smells(all_smells, top=top_n)
|
|
671
|
+
|
|
672
|
+
reporter.print_recommendations(project_metrics)
|
|
673
|
+
|
|
674
|
+
# Handle baseline operations after analysis
|
|
675
|
+
if baseline_manager:
|
|
676
|
+
# Save baseline if requested
|
|
677
|
+
if save_baseline:
|
|
678
|
+
try:
|
|
679
|
+
baseline_path = baseline_manager.save_baseline(
|
|
680
|
+
baseline_name=save_baseline,
|
|
681
|
+
metrics=project_metrics,
|
|
682
|
+
overwrite=force_baseline,
|
|
683
|
+
)
|
|
684
|
+
if not json_output:
|
|
685
|
+
console.print(
|
|
686
|
+
f"\n[green]✓[/green] Saved baseline: [cyan]{save_baseline}[/cyan]"
|
|
687
|
+
)
|
|
688
|
+
console.print(f" Location: {baseline_path}")
|
|
689
|
+
except BaselineExistsError as e:
|
|
690
|
+
if json_output:
|
|
691
|
+
print_json({"error": str(e)})
|
|
692
|
+
else:
|
|
693
|
+
print_error(str(e))
|
|
694
|
+
console.print(
|
|
695
|
+
"\nUse [cyan]--force[/cyan] to overwrite the existing baseline"
|
|
696
|
+
)
|
|
697
|
+
raise typer.Exit(1)
|
|
698
|
+
|
|
699
|
+
# Compare against baseline if requested
|
|
700
|
+
if compare_baseline:
|
|
701
|
+
try:
|
|
702
|
+
baseline_metrics = baseline_manager.load_baseline(compare_baseline)
|
|
703
|
+
comparator = BaselineComparator()
|
|
704
|
+
comparison_result = comparator.compare(
|
|
705
|
+
current=project_metrics,
|
|
706
|
+
baseline=baseline_metrics,
|
|
707
|
+
baseline_name=compare_baseline,
|
|
708
|
+
)
|
|
709
|
+
|
|
710
|
+
# Print comparison results (console only)
|
|
711
|
+
if not json_output and output_format == "console":
|
|
712
|
+
from ...analysis.reporters.console import ConsoleReporter
|
|
713
|
+
|
|
714
|
+
reporter = ConsoleReporter()
|
|
715
|
+
reporter.print_baseline_comparison(comparison_result)
|
|
716
|
+
|
|
717
|
+
except BaselineNotFoundError as e:
|
|
718
|
+
if json_output:
|
|
719
|
+
print_json({"error": str(e)})
|
|
720
|
+
else:
|
|
721
|
+
print_error(str(e))
|
|
722
|
+
console.print("\nAvailable baselines:")
|
|
723
|
+
baselines = baseline_manager.list_baselines()
|
|
724
|
+
for baseline_meta in baselines[:5]:
|
|
725
|
+
console.print(f" • {baseline_meta.baseline_name}")
|
|
726
|
+
raise typer.Exit(1)
|
|
727
|
+
|
|
728
|
+
# Quality gate: check if we should fail on smells
|
|
729
|
+
if fail_on_smell and all_smells:
|
|
730
|
+
failing_smells = filter_smells_by_severity(all_smells, severity_threshold)
|
|
731
|
+
if failing_smells:
|
|
732
|
+
console.print(
|
|
733
|
+
f"\n[red]❌ Quality gate failed: {len(failing_smells)} "
|
|
734
|
+
f"{severity_threshold}+ severity smell(s) detected[/red]"
|
|
735
|
+
)
|
|
736
|
+
raise typer.Exit(1)
|
|
737
|
+
|
|
738
|
+
except ProjectNotFoundError as e:
|
|
739
|
+
if json_output:
|
|
740
|
+
print_json({"error": str(e)})
|
|
741
|
+
else:
|
|
742
|
+
print_error(str(e))
|
|
743
|
+
raise typer.Exit(1)
|
|
744
|
+
except typer.Exit:
|
|
745
|
+
# Let typer.Exit propagate for quality gate failures
|
|
746
|
+
raise
|
|
747
|
+
except Exception as e:
|
|
748
|
+
logger.error(f"Analysis failed: {e}", exc_info=True)
|
|
749
|
+
if json_output:
|
|
750
|
+
print_json({"error": str(e)})
|
|
751
|
+
else:
|
|
752
|
+
print_error(f"Analysis failed: {e}")
|
|
753
|
+
raise typer.Exit(2) # Exit code 2 for analysis errors
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
def _find_analyzable_files(
|
|
757
|
+
project_root: Path,
|
|
758
|
+
language_filter: str | None,
|
|
759
|
+
path_filter: Path | None,
|
|
760
|
+
parser_registry: ParserRegistry,
|
|
761
|
+
git_changed_files: list[Path] | None = None,
|
|
762
|
+
) -> list[Path]:
|
|
763
|
+
"""Find files that can be analyzed.
|
|
764
|
+
|
|
765
|
+
Args:
|
|
766
|
+
project_root: Root directory
|
|
767
|
+
language_filter: Optional language filter
|
|
768
|
+
path_filter: Optional path filter
|
|
769
|
+
parser_registry: Parser registry for checking supported files
|
|
770
|
+
git_changed_files: Optional list of git changed files to filter by
|
|
771
|
+
|
|
772
|
+
Returns:
|
|
773
|
+
List of file paths to analyze
|
|
774
|
+
"""
|
|
775
|
+
import fnmatch
|
|
776
|
+
|
|
777
|
+
# If git_changed_files is provided, use it as the primary filter
|
|
778
|
+
if git_changed_files is not None:
|
|
779
|
+
# Filter based on supported extensions and language
|
|
780
|
+
files: list[Path] = []
|
|
781
|
+
supported_extensions = parser_registry.get_supported_extensions()
|
|
782
|
+
|
|
783
|
+
for file_path in git_changed_files:
|
|
784
|
+
# Check if file extension is supported
|
|
785
|
+
if file_path.suffix.lower() not in supported_extensions:
|
|
786
|
+
logger.debug(f"Skipping unsupported file type: {file_path}")
|
|
787
|
+
continue
|
|
788
|
+
|
|
789
|
+
# Apply language filter
|
|
790
|
+
if language_filter:
|
|
791
|
+
try:
|
|
792
|
+
parser = parser_registry.get_parser_for_file(file_path)
|
|
793
|
+
if parser.language.lower() != language_filter.lower():
|
|
794
|
+
logger.debug(
|
|
795
|
+
f"Skipping file (language mismatch): {file_path} "
|
|
796
|
+
f"({parser.language} != {language_filter})"
|
|
797
|
+
)
|
|
798
|
+
continue
|
|
799
|
+
except Exception as e:
|
|
800
|
+
logger.debug(f"Skipping file (parser error): {file_path}: {e}")
|
|
801
|
+
continue
|
|
802
|
+
|
|
803
|
+
# Apply path filter if specified
|
|
804
|
+
if path_filter:
|
|
805
|
+
path_filter_resolved = path_filter.resolve()
|
|
806
|
+
file_path_resolved = file_path.resolve()
|
|
807
|
+
|
|
808
|
+
# Check if file is within path_filter scope
|
|
809
|
+
try:
|
|
810
|
+
# If path_filter is a file, only include that specific file
|
|
811
|
+
if path_filter_resolved.is_file():
|
|
812
|
+
if file_path_resolved != path_filter_resolved:
|
|
813
|
+
continue
|
|
814
|
+
# If path_filter is a directory, check if file is within it
|
|
815
|
+
elif path_filter_resolved.is_dir():
|
|
816
|
+
file_path_resolved.relative_to(path_filter_resolved)
|
|
817
|
+
except ValueError:
|
|
818
|
+
# File is not within path_filter scope
|
|
819
|
+
logger.debug(f"Skipping file (outside path filter): {file_path}")
|
|
820
|
+
continue
|
|
821
|
+
|
|
822
|
+
files.append(file_path)
|
|
823
|
+
|
|
824
|
+
return sorted(files)
|
|
825
|
+
|
|
826
|
+
# No git filtering - fall back to standard directory traversal
|
|
827
|
+
# Determine base path to search
|
|
828
|
+
base_path = path_filter if path_filter and path_filter.exists() else project_root
|
|
829
|
+
|
|
830
|
+
# If path_filter is a file, return just that file
|
|
831
|
+
if base_path.is_file():
|
|
832
|
+
# Check if file extension is supported
|
|
833
|
+
if base_path.suffix.lower() in parser_registry.get_supported_extensions():
|
|
834
|
+
return [base_path]
|
|
835
|
+
return []
|
|
836
|
+
|
|
837
|
+
# Find all supported files
|
|
838
|
+
files = []
|
|
839
|
+
supported_extensions = parser_registry.get_supported_extensions()
|
|
840
|
+
|
|
841
|
+
# Common ignore patterns
|
|
842
|
+
ignore_patterns = {
|
|
843
|
+
".git",
|
|
844
|
+
".venv",
|
|
845
|
+
"venv",
|
|
846
|
+
"node_modules",
|
|
847
|
+
"__pycache__",
|
|
848
|
+
".pytest_cache",
|
|
849
|
+
"dist",
|
|
850
|
+
"build",
|
|
851
|
+
".tox",
|
|
852
|
+
".eggs",
|
|
853
|
+
}
|
|
854
|
+
|
|
855
|
+
for file_path in base_path.rglob("*"):
|
|
856
|
+
# Skip directories
|
|
857
|
+
if file_path.is_dir():
|
|
858
|
+
continue
|
|
859
|
+
|
|
860
|
+
# Skip ignored directories
|
|
861
|
+
if any(
|
|
862
|
+
ignored in file_path.parts or fnmatch.fnmatch(file_path.name, f"{ignored}*")
|
|
863
|
+
for ignored in ignore_patterns
|
|
864
|
+
):
|
|
865
|
+
continue
|
|
866
|
+
|
|
867
|
+
# Check if file extension is supported
|
|
868
|
+
if file_path.suffix.lower() not in supported_extensions:
|
|
869
|
+
continue
|
|
870
|
+
|
|
871
|
+
# Apply language filter
|
|
872
|
+
if language_filter:
|
|
873
|
+
parser = parser_registry.get_parser_for_file(file_path)
|
|
874
|
+
if parser.language.lower() != language_filter.lower():
|
|
875
|
+
continue
|
|
876
|
+
|
|
877
|
+
files.append(file_path)
|
|
878
|
+
|
|
879
|
+
return sorted(files)
|
|
880
|
+
|
|
881
|
+
|
|
882
|
+
async def _analyze_file(
|
|
883
|
+
file_path: Path, parser_registry: ParserRegistry, collectors: list
|
|
884
|
+
) -> FileMetrics | None:
|
|
885
|
+
"""Analyze a single file and return metrics.
|
|
886
|
+
|
|
887
|
+
Args:
|
|
888
|
+
file_path: Path to file
|
|
889
|
+
parser_registry: Parser registry
|
|
890
|
+
collectors: List of metric collectors
|
|
891
|
+
|
|
892
|
+
Returns:
|
|
893
|
+
FileMetrics or None if analysis failed
|
|
894
|
+
"""
|
|
895
|
+
try:
|
|
896
|
+
# Get parser for file
|
|
897
|
+
parser = parser_registry.get_parser_for_file(file_path)
|
|
898
|
+
|
|
899
|
+
# Parse file into chunks
|
|
900
|
+
chunks = await parser.parse_file(file_path)
|
|
901
|
+
|
|
902
|
+
if not chunks:
|
|
903
|
+
return None
|
|
904
|
+
|
|
905
|
+
# Create file metrics
|
|
906
|
+
file_metrics = FileMetrics(file_path=str(file_path))
|
|
907
|
+
|
|
908
|
+
# Count lines
|
|
909
|
+
try:
|
|
910
|
+
with open(file_path, encoding="utf-8") as f:
|
|
911
|
+
lines = f.readlines()
|
|
912
|
+
file_metrics.total_lines = len(lines)
|
|
913
|
+
file_metrics.code_lines = sum(
|
|
914
|
+
1
|
|
915
|
+
for line in lines
|
|
916
|
+
if line.strip() and not line.strip().startswith("#")
|
|
917
|
+
)
|
|
918
|
+
file_metrics.comment_lines = sum(
|
|
919
|
+
1 for line in lines if line.strip().startswith("#")
|
|
920
|
+
)
|
|
921
|
+
file_metrics.blank_lines = sum(1 for line in lines if not line.strip())
|
|
922
|
+
except Exception:
|
|
923
|
+
pass
|
|
924
|
+
|
|
925
|
+
# Count functions and classes from chunks
|
|
926
|
+
for chunk in chunks:
|
|
927
|
+
if chunk.chunk_type == "function":
|
|
928
|
+
file_metrics.function_count += 1
|
|
929
|
+
elif chunk.chunk_type == "class":
|
|
930
|
+
file_metrics.class_count += 1
|
|
931
|
+
elif chunk.chunk_type == "method":
|
|
932
|
+
file_metrics.method_count += 1
|
|
933
|
+
|
|
934
|
+
# Extract chunk metrics from parsed chunks
|
|
935
|
+
from ...analysis.metrics import ChunkMetrics
|
|
936
|
+
|
|
937
|
+
for chunk in chunks:
|
|
938
|
+
# Use complexity_score from parser (cyclomatic complexity)
|
|
939
|
+
# For quick mode, this is sufficient
|
|
940
|
+
complexity = (
|
|
941
|
+
int(chunk.complexity_score) if chunk.complexity_score > 0 else 1
|
|
942
|
+
)
|
|
943
|
+
|
|
944
|
+
# Count parameters if available
|
|
945
|
+
param_count = len(chunk.parameters) if chunk.parameters else 0
|
|
946
|
+
|
|
947
|
+
# Estimate cognitive complexity from cyclomatic (rough approximation)
|
|
948
|
+
# Cognitive is typically 1.2-1.5x cyclomatic for complex code
|
|
949
|
+
cognitive = int(complexity * 1.3)
|
|
950
|
+
|
|
951
|
+
chunk_metrics = ChunkMetrics(
|
|
952
|
+
cognitive_complexity=cognitive,
|
|
953
|
+
cyclomatic_complexity=complexity,
|
|
954
|
+
max_nesting_depth=0, # Not available without collectors
|
|
955
|
+
parameter_count=param_count,
|
|
956
|
+
lines_of_code=chunk.end_line - chunk.start_line + 1,
|
|
957
|
+
)
|
|
958
|
+
file_metrics.chunks.append(chunk_metrics)
|
|
959
|
+
|
|
960
|
+
# Compute aggregates
|
|
961
|
+
file_metrics.compute_aggregates()
|
|
962
|
+
|
|
963
|
+
return file_metrics
|
|
964
|
+
|
|
965
|
+
except Exception as e:
|
|
966
|
+
logger.debug(f"Failed to analyze file {file_path}: {e}")
|
|
967
|
+
return None
|
|
968
|
+
|
|
969
|
+
|
|
970
|
+
def _print_trends(trend_data: TrendData) -> None:
|
|
971
|
+
"""Print trend analysis to console.
|
|
972
|
+
|
|
973
|
+
Args:
|
|
974
|
+
trend_data: TrendData from TrendTracker
|
|
975
|
+
"""
|
|
976
|
+
from rich.panel import Panel
|
|
977
|
+
from rich.table import Table
|
|
978
|
+
|
|
979
|
+
# Build trend display
|
|
980
|
+
table = Table(show_header=False, box=None, padding=(0, 2))
|
|
981
|
+
table.add_column("Metric", style="bold")
|
|
982
|
+
table.add_column("Direction")
|
|
983
|
+
table.add_column("Change")
|
|
984
|
+
|
|
985
|
+
def trend_icon(direction: TrendDirection) -> str:
|
|
986
|
+
"""Get icon for trend direction."""
|
|
987
|
+
if direction == TrendDirection.IMPROVING:
|
|
988
|
+
return "[green]↓ improving[/green]"
|
|
989
|
+
elif direction == TrendDirection.WORSENING:
|
|
990
|
+
return "[red]↑ worsening[/red]"
|
|
991
|
+
else:
|
|
992
|
+
return "[dim]→ stable[/dim]"
|
|
993
|
+
|
|
994
|
+
def format_change(change: float, invert: bool = False) -> str:
|
|
995
|
+
"""Format percentage change with color."""
|
|
996
|
+
if abs(change) < 0.1:
|
|
997
|
+
return "[dim]—[/dim]"
|
|
998
|
+
# For complexity/smells, negative is good; for health, positive is good
|
|
999
|
+
is_good = (change < 0) if not invert else (change > 0)
|
|
1000
|
+
color = "green" if is_good else "red"
|
|
1001
|
+
sign = "+" if change > 0 else ""
|
|
1002
|
+
return f"[{color}]{sign}{change:.1f}%[/{color}]"
|
|
1003
|
+
|
|
1004
|
+
# Complexity trend
|
|
1005
|
+
table.add_row(
|
|
1006
|
+
"Complexity",
|
|
1007
|
+
trend_icon(trend_data.complexity_direction),
|
|
1008
|
+
format_change(trend_data.avg_complexity_change),
|
|
1009
|
+
)
|
|
1010
|
+
|
|
1011
|
+
# Smell trend
|
|
1012
|
+
table.add_row(
|
|
1013
|
+
"Code Smells",
|
|
1014
|
+
trend_icon(trend_data.smell_direction),
|
|
1015
|
+
format_change(trend_data.smell_count_change),
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
# Health trend
|
|
1019
|
+
table.add_row(
|
|
1020
|
+
"Health Score",
|
|
1021
|
+
trend_icon(trend_data.health_direction),
|
|
1022
|
+
format_change(
|
|
1023
|
+
(
|
|
1024
|
+
trend_data.health_trend[-1][1] - trend_data.health_trend[0][1]
|
|
1025
|
+
if len(trend_data.health_trend) >= 2
|
|
1026
|
+
else 0
|
|
1027
|
+
),
|
|
1028
|
+
invert=True,
|
|
1029
|
+
),
|
|
1030
|
+
)
|
|
1031
|
+
|
|
1032
|
+
# Show panel with snapshot count
|
|
1033
|
+
snapshot_count = len(trend_data.snapshots)
|
|
1034
|
+
panel = Panel(
|
|
1035
|
+
table,
|
|
1036
|
+
title=f"[bold cyan]Trends[/bold cyan] (last 30 days, {snapshot_count} snapshots)",
|
|
1037
|
+
border_style="cyan",
|
|
1038
|
+
padding=(0, 1),
|
|
1039
|
+
)
|
|
1040
|
+
console.print(panel)
|
|
1041
|
+
|
|
1042
|
+
# Show critical regressions if any
|
|
1043
|
+
if trend_data.critical_regressions:
|
|
1044
|
+
console.print("\n[bold red]⚠ Regressions Detected:[/bold red]")
|
|
1045
|
+
for regression in trend_data.critical_regressions[:3]:
|
|
1046
|
+
console.print(
|
|
1047
|
+
f" • [red]{regression.file_path}[/red]: "
|
|
1048
|
+
f"complexity {regression.change_percentage:+.1f}%"
|
|
1049
|
+
)
|
|
1050
|
+
|
|
1051
|
+
# Show significant improvements if any
|
|
1052
|
+
if trend_data.significant_improvements:
|
|
1053
|
+
console.print("\n[bold green]✓ Improvements:[/bold green]")
|
|
1054
|
+
for improvement in trend_data.significant_improvements[:3]:
|
|
1055
|
+
console.print(
|
|
1056
|
+
f" • [green]{improvement.file_path}[/green]: "
|
|
1057
|
+
f"complexity {improvement.change_percentage:+.1f}%"
|
|
1058
|
+
)
|
|
1059
|
+
|
|
1060
|
+
|
|
1061
|
+
if __name__ == "__main__":
|
|
1062
|
+
analyze_app()
|