mcp-vector-search 1.0.3__py3-none-any.whl → 1.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_vector_search/__init__.py +3 -3
- mcp_vector_search/analysis/__init__.py +48 -1
- mcp_vector_search/analysis/baseline/__init__.py +68 -0
- mcp_vector_search/analysis/baseline/comparator.py +462 -0
- mcp_vector_search/analysis/baseline/manager.py +621 -0
- mcp_vector_search/analysis/collectors/__init__.py +35 -0
- mcp_vector_search/analysis/collectors/cohesion.py +463 -0
- mcp_vector_search/analysis/collectors/coupling.py +1162 -0
- mcp_vector_search/analysis/collectors/halstead.py +514 -0
- mcp_vector_search/analysis/collectors/smells.py +325 -0
- mcp_vector_search/analysis/debt.py +516 -0
- mcp_vector_search/analysis/interpretation.py +685 -0
- mcp_vector_search/analysis/metrics.py +74 -1
- mcp_vector_search/analysis/reporters/__init__.py +3 -1
- mcp_vector_search/analysis/reporters/console.py +424 -0
- mcp_vector_search/analysis/reporters/markdown.py +480 -0
- mcp_vector_search/analysis/reporters/sarif.py +377 -0
- mcp_vector_search/analysis/storage/__init__.py +93 -0
- mcp_vector_search/analysis/storage/metrics_store.py +762 -0
- mcp_vector_search/analysis/storage/schema.py +245 -0
- mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
- mcp_vector_search/analysis/trends.py +308 -0
- mcp_vector_search/analysis/visualizer/__init__.py +90 -0
- mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
- mcp_vector_search/analysis/visualizer/exporter.py +484 -0
- mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
- mcp_vector_search/analysis/visualizer/schemas.py +525 -0
- mcp_vector_search/cli/commands/analyze.py +665 -11
- mcp_vector_search/cli/commands/chat.py +193 -0
- mcp_vector_search/cli/commands/index.py +600 -2
- mcp_vector_search/cli/commands/index_background.py +467 -0
- mcp_vector_search/cli/commands/search.py +194 -1
- mcp_vector_search/cli/commands/setup.py +64 -13
- mcp_vector_search/cli/commands/status.py +302 -3
- mcp_vector_search/cli/commands/visualize/cli.py +26 -10
- mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +8 -4
- mcp_vector_search/cli/commands/visualize/graph_builder.py +167 -234
- mcp_vector_search/cli/commands/visualize/server.py +304 -15
- mcp_vector_search/cli/commands/visualize/templates/base.py +60 -6
- mcp_vector_search/cli/commands/visualize/templates/scripts.py +2100 -65
- mcp_vector_search/cli/commands/visualize/templates/styles.py +1297 -88
- mcp_vector_search/cli/didyoumean.py +5 -0
- mcp_vector_search/cli/main.py +16 -5
- mcp_vector_search/cli/output.py +134 -5
- mcp_vector_search/config/thresholds.py +89 -1
- mcp_vector_search/core/__init__.py +16 -0
- mcp_vector_search/core/database.py +39 -2
- mcp_vector_search/core/embeddings.py +24 -0
- mcp_vector_search/core/git.py +380 -0
- mcp_vector_search/core/indexer.py +445 -84
- mcp_vector_search/core/llm_client.py +9 -4
- mcp_vector_search/core/models.py +88 -1
- mcp_vector_search/core/relationships.py +473 -0
- mcp_vector_search/core/search.py +1 -1
- mcp_vector_search/mcp/server.py +795 -4
- mcp_vector_search/parsers/python.py +285 -5
- mcp_vector_search/utils/gitignore.py +0 -3
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +3 -2
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/RECORD +62 -39
- mcp_vector_search/cli/commands/visualize.py.original +0 -2536
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +0 -0
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +0 -0
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
|
@@ -12,7 +12,16 @@ from ...analysis import (
|
|
|
12
12
|
FileMetrics,
|
|
13
13
|
ProjectMetrics,
|
|
14
14
|
)
|
|
15
|
+
from ...analysis.baseline import (
|
|
16
|
+
BaselineComparator,
|
|
17
|
+
BaselineExistsError,
|
|
18
|
+
BaselineManager,
|
|
19
|
+
BaselineNotFoundError,
|
|
20
|
+
)
|
|
21
|
+
from ...analysis.storage.metrics_store import MetricsStore, MetricsStoreError
|
|
22
|
+
from ...analysis.storage.trend_tracker import TrendData, TrendDirection, TrendTracker
|
|
15
23
|
from ...core.exceptions import ProjectNotFoundError
|
|
24
|
+
from ...core.git import GitError, GitManager, GitNotAvailableError, GitNotRepoError
|
|
16
25
|
from ...core.project import ProjectManager
|
|
17
26
|
from ...parsers.registry import ParserRegistry
|
|
18
27
|
from ..output import console, print_error, print_info, print_json
|
|
@@ -41,6 +50,12 @@ def main(
|
|
|
41
50
|
help="Quick mode (cognitive + cyclomatic complexity only)",
|
|
42
51
|
rich_help_panel="⚡ Performance Options",
|
|
43
52
|
),
|
|
53
|
+
show_smells: bool = typer.Option(
|
|
54
|
+
True,
|
|
55
|
+
"--smells/--no-smells",
|
|
56
|
+
help="Show detected code smells in output",
|
|
57
|
+
rich_help_panel="📊 Display Options",
|
|
58
|
+
),
|
|
44
59
|
language: str | None = typer.Option(
|
|
45
60
|
None,
|
|
46
61
|
"--language",
|
|
@@ -67,6 +82,80 @@ def main(
|
|
|
67
82
|
help="Output results in JSON format",
|
|
68
83
|
rich_help_panel="📊 Display Options",
|
|
69
84
|
),
|
|
85
|
+
include_context: bool = typer.Option(
|
|
86
|
+
False,
|
|
87
|
+
"--include-context",
|
|
88
|
+
help="Include LLM-consumable context in JSON output (enhanced interpretation)",
|
|
89
|
+
rich_help_panel="📊 Display Options",
|
|
90
|
+
),
|
|
91
|
+
format: str = typer.Option(
|
|
92
|
+
"console",
|
|
93
|
+
"--format",
|
|
94
|
+
"-f",
|
|
95
|
+
help="Output format: console, json, sarif, markdown",
|
|
96
|
+
rich_help_panel="📊 Display Options",
|
|
97
|
+
),
|
|
98
|
+
output: Path | None = typer.Option(
|
|
99
|
+
None,
|
|
100
|
+
"--output",
|
|
101
|
+
"-o",
|
|
102
|
+
help="Output file path (required for sarif format)",
|
|
103
|
+
rich_help_panel="📊 Display Options",
|
|
104
|
+
),
|
|
105
|
+
fail_on_smell: bool = typer.Option(
|
|
106
|
+
False,
|
|
107
|
+
"--fail-on-smell",
|
|
108
|
+
help="Exit with code 1 if code smells are detected",
|
|
109
|
+
rich_help_panel="🚦 Quality Gates",
|
|
110
|
+
),
|
|
111
|
+
severity_threshold: str = typer.Option(
|
|
112
|
+
"error",
|
|
113
|
+
"--severity-threshold",
|
|
114
|
+
help="Minimum severity to trigger failure: info, warning, error, none",
|
|
115
|
+
rich_help_panel="🚦 Quality Gates",
|
|
116
|
+
),
|
|
117
|
+
changed_only: bool = typer.Option(
|
|
118
|
+
False,
|
|
119
|
+
"--changed-only/--no-changed-only",
|
|
120
|
+
help="Analyze only uncommitted changes (staged + unstaged + untracked)",
|
|
121
|
+
rich_help_panel="🔍 Filters",
|
|
122
|
+
),
|
|
123
|
+
baseline: str | None = typer.Option(
|
|
124
|
+
None,
|
|
125
|
+
"--baseline",
|
|
126
|
+
help="Compare against baseline branch (e.g., main, master, develop)",
|
|
127
|
+
rich_help_panel="🔍 Filters",
|
|
128
|
+
),
|
|
129
|
+
save_baseline: str | None = typer.Option(
|
|
130
|
+
None,
|
|
131
|
+
"--save-baseline",
|
|
132
|
+
help="Save current analysis as named baseline",
|
|
133
|
+
rich_help_panel="📊 Baseline Management",
|
|
134
|
+
),
|
|
135
|
+
compare_baseline: str | None = typer.Option(
|
|
136
|
+
None,
|
|
137
|
+
"--compare-baseline",
|
|
138
|
+
help="Compare current analysis against named baseline",
|
|
139
|
+
rich_help_panel="📊 Baseline Management",
|
|
140
|
+
),
|
|
141
|
+
list_baselines: bool = typer.Option(
|
|
142
|
+
False,
|
|
143
|
+
"--list-baselines",
|
|
144
|
+
help="List all available baselines (standalone action)",
|
|
145
|
+
rich_help_panel="📊 Baseline Management",
|
|
146
|
+
),
|
|
147
|
+
delete_baseline: str | None = typer.Option(
|
|
148
|
+
None,
|
|
149
|
+
"--delete-baseline",
|
|
150
|
+
help="Delete a named baseline",
|
|
151
|
+
rich_help_panel="📊 Baseline Management",
|
|
152
|
+
),
|
|
153
|
+
force_baseline: bool = typer.Option(
|
|
154
|
+
False,
|
|
155
|
+
"--force",
|
|
156
|
+
help="Force overwrite when saving baseline that already exists",
|
|
157
|
+
rich_help_panel="📊 Baseline Management",
|
|
158
|
+
),
|
|
70
159
|
) -> None:
|
|
71
160
|
"""📈 Analyze code complexity and quality.
|
|
72
161
|
|
|
@@ -87,6 +176,12 @@ def main(
|
|
|
87
176
|
[green]Analyze specific directory:[/green]
|
|
88
177
|
$ mcp-vector-search analyze --path src/core
|
|
89
178
|
|
|
179
|
+
[green]Analyze only uncommitted changes:[/green]
|
|
180
|
+
$ mcp-vector-search analyze --changed-only
|
|
181
|
+
|
|
182
|
+
[green]Compare against baseline branch:[/green]
|
|
183
|
+
$ mcp-vector-search analyze --baseline main
|
|
184
|
+
|
|
90
185
|
[bold cyan]Output Options:[/bold cyan]
|
|
91
186
|
|
|
92
187
|
[green]Show top 5 hotspots:[/green]
|
|
@@ -95,13 +190,97 @@ def main(
|
|
|
95
190
|
[green]Export to JSON:[/green]
|
|
96
191
|
$ mcp-vector-search analyze --json > analysis.json
|
|
97
192
|
|
|
193
|
+
[green]Export to SARIF format:[/green]
|
|
194
|
+
$ mcp-vector-search analyze --format sarif --output report.sarif
|
|
195
|
+
|
|
196
|
+
[green]Export to Markdown format:[/green]
|
|
197
|
+
$ mcp-vector-search analyze --format markdown --output .
|
|
198
|
+
|
|
199
|
+
[bold cyan]CI/CD Quality Gates:[/bold cyan]
|
|
200
|
+
|
|
201
|
+
[green]Fail on ERROR-level smells (default):[/green]
|
|
202
|
+
$ mcp-vector-search analyze --fail-on-smell
|
|
203
|
+
|
|
204
|
+
[green]Fail on WARNING or ERROR smells:[/green]
|
|
205
|
+
$ mcp-vector-search analyze --fail-on-smell --severity-threshold warning
|
|
206
|
+
|
|
207
|
+
[green]CI/CD workflow with SARIF:[/green]
|
|
208
|
+
$ mcp-vector-search analyze --fail-on-smell --format sarif --output report.sarif
|
|
209
|
+
|
|
98
210
|
[dim]💡 Tip: Use --quick for faster analysis on large projects.[/dim]
|
|
99
211
|
"""
|
|
100
212
|
if ctx.invoked_subcommand is not None:
|
|
101
213
|
# A subcommand was invoked - let it handle the request
|
|
102
214
|
return
|
|
103
215
|
|
|
216
|
+
# Handle standalone baseline operations first
|
|
217
|
+
baseline_manager = BaselineManager()
|
|
218
|
+
|
|
219
|
+
# List baselines (standalone action)
|
|
220
|
+
if list_baselines:
|
|
221
|
+
baselines = baseline_manager.list_baselines()
|
|
222
|
+
if not baselines:
|
|
223
|
+
console.print("[yellow]No baselines found[/yellow]")
|
|
224
|
+
console.print(
|
|
225
|
+
f"\nBaselines are stored in: {baseline_manager.storage_dir}\n"
|
|
226
|
+
)
|
|
227
|
+
console.print(
|
|
228
|
+
"Create a baseline with: [cyan]mcp-vector-search analyze --save-baseline <name>[/cyan]"
|
|
229
|
+
)
|
|
230
|
+
else:
|
|
231
|
+
console.print(f"\n[bold]Available Baselines[/bold] ({len(baselines)})")
|
|
232
|
+
console.print("━" * 80)
|
|
233
|
+
for baseline in baselines:
|
|
234
|
+
console.print(f"\n[cyan]• {baseline.baseline_name}[/cyan]")
|
|
235
|
+
console.print(f" Created: {baseline.created_at}")
|
|
236
|
+
console.print(f" Project: {baseline.project_path}")
|
|
237
|
+
console.print(
|
|
238
|
+
f" Files: {baseline.file_count} | Functions: {baseline.function_count}"
|
|
239
|
+
)
|
|
240
|
+
console.print(f" Tool Version: {baseline.tool_version}")
|
|
241
|
+
if baseline.git_info.commit:
|
|
242
|
+
console.print(
|
|
243
|
+
f" Git: {baseline.git_info.branch or 'detached'} @ {baseline.git_info.commit[:8]}"
|
|
244
|
+
)
|
|
245
|
+
console.print()
|
|
246
|
+
raise typer.Exit(0)
|
|
247
|
+
|
|
248
|
+
# Delete baseline (standalone action)
|
|
249
|
+
if delete_baseline:
|
|
250
|
+
try:
|
|
251
|
+
baseline_manager.delete_baseline(delete_baseline)
|
|
252
|
+
console.print(
|
|
253
|
+
f"[green]✓[/green] Deleted baseline: [cyan]{delete_baseline}[/cyan]"
|
|
254
|
+
)
|
|
255
|
+
raise typer.Exit(0)
|
|
256
|
+
except BaselineNotFoundError as e:
|
|
257
|
+
print_error(str(e))
|
|
258
|
+
console.print("\nAvailable baselines:")
|
|
259
|
+
baselines = baseline_manager.list_baselines()
|
|
260
|
+
for baseline in baselines[:5]:
|
|
261
|
+
console.print(f" • {baseline.baseline_name}")
|
|
262
|
+
raise typer.Exit(1)
|
|
263
|
+
|
|
104
264
|
try:
|
|
265
|
+
# Validate format and output options
|
|
266
|
+
valid_formats = ["console", "json", "sarif", "markdown"]
|
|
267
|
+
format_lower = format.lower()
|
|
268
|
+
|
|
269
|
+
if format_lower not in valid_formats:
|
|
270
|
+
print_error(
|
|
271
|
+
f"Invalid format: {format}. Must be one of: {', '.join(valid_formats)}"
|
|
272
|
+
)
|
|
273
|
+
raise typer.Exit(1)
|
|
274
|
+
|
|
275
|
+
# SARIF and markdown formats should have output path (defaults to current dir)
|
|
276
|
+
if format_lower == "sarif" and output is None:
|
|
277
|
+
print_error("--output is required when using --format sarif")
|
|
278
|
+
raise typer.Exit(1)
|
|
279
|
+
|
|
280
|
+
# JSON flag overrides format for backward compatibility
|
|
281
|
+
if json_output:
|
|
282
|
+
format_lower = "json"
|
|
283
|
+
|
|
105
284
|
# Use provided project_root or current working directory
|
|
106
285
|
if project_root is None:
|
|
107
286
|
project_root = Path.cwd()
|
|
@@ -113,14 +292,54 @@ def main(
|
|
|
113
292
|
language_filter=language,
|
|
114
293
|
path_filter=path,
|
|
115
294
|
top_n=top,
|
|
116
|
-
json_output=
|
|
295
|
+
json_output=(format_lower == "json"),
|
|
296
|
+
show_smells=show_smells,
|
|
297
|
+
output_format=format_lower,
|
|
298
|
+
output_file=output,
|
|
299
|
+
fail_on_smell=fail_on_smell,
|
|
300
|
+
severity_threshold=severity_threshold,
|
|
301
|
+
changed_only=changed_only,
|
|
302
|
+
baseline=baseline,
|
|
303
|
+
save_baseline=save_baseline,
|
|
304
|
+
compare_baseline=compare_baseline,
|
|
305
|
+
force_baseline=force_baseline,
|
|
306
|
+
baseline_manager=baseline_manager,
|
|
307
|
+
include_context=include_context,
|
|
117
308
|
)
|
|
118
309
|
)
|
|
119
310
|
|
|
311
|
+
except typer.Exit:
|
|
312
|
+
# Re-raise typer.Exit to preserve exit codes from run_analysis
|
|
313
|
+
raise
|
|
120
314
|
except Exception as e:
|
|
121
315
|
logger.error(f"Analysis failed: {e}")
|
|
122
316
|
print_error(f"Analysis failed: {e}")
|
|
123
|
-
raise typer.Exit(
|
|
317
|
+
raise typer.Exit(2) # Exit code 2 for analysis errors
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def filter_smells_by_severity(smells: list, severity_threshold: str) -> list:
|
|
321
|
+
"""Filter smells by minimum severity threshold.
|
|
322
|
+
|
|
323
|
+
Args:
|
|
324
|
+
smells: List of CodeSmell objects to filter
|
|
325
|
+
severity_threshold: Minimum severity level - "info", "warning", "error", or "none"
|
|
326
|
+
|
|
327
|
+
Returns:
|
|
328
|
+
Filtered list of smells matching or exceeding the severity threshold
|
|
329
|
+
"""
|
|
330
|
+
from ...analysis.collectors.smells import SmellSeverity
|
|
331
|
+
|
|
332
|
+
if severity_threshold.lower() == "none":
|
|
333
|
+
return []
|
|
334
|
+
|
|
335
|
+
severity_levels = {
|
|
336
|
+
"info": [SmellSeverity.INFO, SmellSeverity.WARNING, SmellSeverity.ERROR],
|
|
337
|
+
"warning": [SmellSeverity.WARNING, SmellSeverity.ERROR],
|
|
338
|
+
"error": [SmellSeverity.ERROR],
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
allowed = severity_levels.get(severity_threshold.lower(), [SmellSeverity.ERROR])
|
|
342
|
+
return [s for s in smells if s.severity in allowed]
|
|
124
343
|
|
|
125
344
|
|
|
126
345
|
async def run_analysis(
|
|
@@ -130,6 +349,18 @@ async def run_analysis(
|
|
|
130
349
|
path_filter: Path | None = None,
|
|
131
350
|
top_n: int = 10,
|
|
132
351
|
json_output: bool = False,
|
|
352
|
+
show_smells: bool = True,
|
|
353
|
+
output_format: str = "console",
|
|
354
|
+
output_file: Path | None = None,
|
|
355
|
+
fail_on_smell: bool = False,
|
|
356
|
+
severity_threshold: str = "error",
|
|
357
|
+
changed_only: bool = False,
|
|
358
|
+
baseline: str | None = None,
|
|
359
|
+
save_baseline: str | None = None,
|
|
360
|
+
compare_baseline: str | None = None,
|
|
361
|
+
force_baseline: bool = False,
|
|
362
|
+
baseline_manager: BaselineManager | None = None,
|
|
363
|
+
include_context: bool = False,
|
|
133
364
|
) -> None:
|
|
134
365
|
"""Run code complexity analysis.
|
|
135
366
|
|
|
@@ -139,7 +370,18 @@ async def run_analysis(
|
|
|
139
370
|
language_filter: Filter files by language
|
|
140
371
|
path_filter: Analyze specific file or directory
|
|
141
372
|
top_n: Number of top hotspots to show
|
|
142
|
-
json_output: Output results as JSON
|
|
373
|
+
json_output: Output results as JSON (deprecated, use output_format)
|
|
374
|
+
show_smells: Show detected code smells in output
|
|
375
|
+
output_format: Output format (console, json, sarif)
|
|
376
|
+
output_file: Output file path (for sarif format)
|
|
377
|
+
fail_on_smell: Exit with code 1 if smells are detected
|
|
378
|
+
severity_threshold: Minimum severity to trigger failure
|
|
379
|
+
changed_only: Analyze only uncommitted changes
|
|
380
|
+
baseline: Compare against baseline branch
|
|
381
|
+
save_baseline: Save analysis as named baseline
|
|
382
|
+
compare_baseline: Compare against named baseline
|
|
383
|
+
force_baseline: Force overwrite existing baseline
|
|
384
|
+
baseline_manager: BaselineManager instance
|
|
143
385
|
"""
|
|
144
386
|
try:
|
|
145
387
|
# Check if project is initialized (optional - we can analyze any directory)
|
|
@@ -178,9 +420,74 @@ async def run_analysis(
|
|
|
178
420
|
]
|
|
179
421
|
mode_label = "Full Mode (5 collectors)"
|
|
180
422
|
|
|
423
|
+
# Initialize git manager if needed for changed/baseline filtering
|
|
424
|
+
git_manager = None
|
|
425
|
+
git_changed_files = None
|
|
426
|
+
|
|
427
|
+
if changed_only or baseline:
|
|
428
|
+
try:
|
|
429
|
+
git_manager = GitManager(project_root)
|
|
430
|
+
|
|
431
|
+
# Get changed files based on mode
|
|
432
|
+
if changed_only:
|
|
433
|
+
git_changed_files = git_manager.get_changed_files(
|
|
434
|
+
include_untracked=True
|
|
435
|
+
)
|
|
436
|
+
if not git_changed_files:
|
|
437
|
+
if json_output:
|
|
438
|
+
print_json(
|
|
439
|
+
{"error": "No changed files found. Nothing to analyze."}
|
|
440
|
+
)
|
|
441
|
+
else:
|
|
442
|
+
print_info("No changed files found. Nothing to analyze.")
|
|
443
|
+
return
|
|
444
|
+
elif baseline:
|
|
445
|
+
git_changed_files = git_manager.get_diff_files(baseline)
|
|
446
|
+
if not git_changed_files:
|
|
447
|
+
if json_output:
|
|
448
|
+
print_json(
|
|
449
|
+
{"error": f"No files changed vs baseline '{baseline}'."}
|
|
450
|
+
)
|
|
451
|
+
else:
|
|
452
|
+
print_info(f"No files changed vs baseline '{baseline}'.")
|
|
453
|
+
return
|
|
454
|
+
|
|
455
|
+
except GitNotAvailableError as e:
|
|
456
|
+
if json_output:
|
|
457
|
+
print_json({"warning": str(e), "fallback": "full analysis"})
|
|
458
|
+
else:
|
|
459
|
+
console.print(f"[yellow]⚠️ {e}[/yellow]")
|
|
460
|
+
print_info("Proceeding with full codebase analysis...")
|
|
461
|
+
git_manager = None
|
|
462
|
+
git_changed_files = None
|
|
463
|
+
|
|
464
|
+
except GitNotRepoError as e:
|
|
465
|
+
if json_output:
|
|
466
|
+
print_json({"warning": str(e), "fallback": "full analysis"})
|
|
467
|
+
else:
|
|
468
|
+
console.print(f"[yellow]⚠️ {e}[/yellow]")
|
|
469
|
+
print_info("Proceeding with full codebase analysis...")
|
|
470
|
+
git_manager = None
|
|
471
|
+
git_changed_files = None
|
|
472
|
+
|
|
473
|
+
except GitError as e:
|
|
474
|
+
if json_output:
|
|
475
|
+
print_json(
|
|
476
|
+
{"warning": f"Git error: {e}", "fallback": "full analysis"}
|
|
477
|
+
)
|
|
478
|
+
else:
|
|
479
|
+
console.print(f"[yellow]⚠️ Git error: {e}[/yellow]")
|
|
480
|
+
print_info("Proceeding with full codebase analysis...")
|
|
481
|
+
git_manager = None
|
|
482
|
+
git_changed_files = None
|
|
483
|
+
|
|
181
484
|
# Find files to analyze
|
|
182
485
|
files_to_analyze = _find_analyzable_files(
|
|
183
|
-
project_root,
|
|
486
|
+
project_root,
|
|
487
|
+
language_filter,
|
|
488
|
+
path_filter,
|
|
489
|
+
parser_registry,
|
|
490
|
+
git_changed_files,
|
|
184
491
|
)
|
|
185
492
|
|
|
186
493
|
if not files_to_analyze:
|
|
@@ -190,11 +497,31 @@ async def run_analysis(
|
|
|
190
497
|
print_error("No files found to analyze")
|
|
191
498
|
return
|
|
192
499
|
|
|
500
|
+
# Display analysis info
|
|
193
501
|
if not json_output:
|
|
194
502
|
console.print(
|
|
195
503
|
f"\n[bold blue]Starting Code Analysis[/bold blue] - {mode_label}"
|
|
196
504
|
)
|
|
197
|
-
|
|
505
|
+
|
|
506
|
+
# Show file count information with git filtering context
|
|
507
|
+
if git_changed_files is not None:
|
|
508
|
+
# Get total files for context
|
|
509
|
+
total_files = len(
|
|
510
|
+
_find_analyzable_files(
|
|
511
|
+
project_root,
|
|
512
|
+
language_filter,
|
|
513
|
+
path_filter,
|
|
514
|
+
parser_registry,
|
|
515
|
+
None,
|
|
516
|
+
)
|
|
517
|
+
)
|
|
518
|
+
filter_type = "changed" if changed_only else f"vs {baseline}"
|
|
519
|
+
console.print(
|
|
520
|
+
f"Analyzing {len(files_to_analyze)} {filter_type} files "
|
|
521
|
+
f"({total_files} total in project)\n"
|
|
522
|
+
)
|
|
523
|
+
else:
|
|
524
|
+
console.print(f"Files to analyze: {len(files_to_analyze)}\n")
|
|
198
525
|
|
|
199
526
|
# Analyze files
|
|
200
527
|
project_metrics = ProjectMetrics(project_root=str(project_root))
|
|
@@ -213,11 +540,123 @@ async def run_analysis(
|
|
|
213
540
|
# Compute aggregates
|
|
214
541
|
project_metrics.compute_aggregates()
|
|
215
542
|
|
|
216
|
-
#
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
543
|
+
# Save snapshot to metrics store for historical tracking
|
|
544
|
+
trend_data: TrendData | None = None
|
|
545
|
+
try:
|
|
546
|
+
metrics_db_path = project_root / ".mcp-vector-search" / "metrics.db"
|
|
547
|
+
metrics_store = MetricsStore(metrics_db_path)
|
|
548
|
+
snapshot_id = metrics_store.save_project_snapshot(project_metrics)
|
|
549
|
+
logger.debug(f"Saved metrics snapshot {snapshot_id}")
|
|
550
|
+
|
|
551
|
+
# Check for historical data and compute trends if available
|
|
552
|
+
trend_tracker = TrendTracker(metrics_store)
|
|
553
|
+
trend_data = trend_tracker.get_trends(project_root, days=30)
|
|
554
|
+
|
|
555
|
+
# Only show trends if we have at least 2 snapshots
|
|
556
|
+
if len(trend_data.snapshots) >= 2 and not json_output:
|
|
557
|
+
_print_trends(trend_data)
|
|
558
|
+
|
|
559
|
+
except MetricsStoreError as e:
|
|
560
|
+
logger.debug(f"Could not save metrics snapshot: {e}")
|
|
561
|
+
except Exception as e:
|
|
562
|
+
logger.debug(f"Trend tracking unavailable: {e}")
|
|
563
|
+
|
|
564
|
+
# Detect code smells if requested
|
|
565
|
+
all_smells = []
|
|
566
|
+
if show_smells:
|
|
567
|
+
from ...analysis.collectors.smells import SmellDetector
|
|
568
|
+
from ...config.thresholds import ThresholdConfig
|
|
569
|
+
|
|
570
|
+
# Load threshold config (optional - defaults will be used)
|
|
571
|
+
threshold_config = ThresholdConfig()
|
|
572
|
+
smell_detector = SmellDetector(thresholds=threshold_config)
|
|
573
|
+
|
|
574
|
+
# Detect smells across all analyzed files
|
|
575
|
+
for file_path, file_metrics in project_metrics.files.items():
|
|
576
|
+
file_smells = smell_detector.detect_all(file_metrics, file_path)
|
|
577
|
+
all_smells.extend(file_smells)
|
|
578
|
+
|
|
579
|
+
# Output results based on format
|
|
580
|
+
if output_format == "markdown":
|
|
581
|
+
# Markdown format - write two files
|
|
582
|
+
from ...analysis.reporters.markdown import MarkdownReporter
|
|
583
|
+
|
|
584
|
+
reporter = MarkdownReporter()
|
|
585
|
+
|
|
586
|
+
# Generate full analysis report
|
|
587
|
+
analysis_file = reporter.generate_analysis_report(
|
|
588
|
+
project_metrics, all_smells, output_file
|
|
589
|
+
)
|
|
590
|
+
console.print(
|
|
591
|
+
f"[green]✓[/green] Analysis report written to: {analysis_file}"
|
|
592
|
+
)
|
|
593
|
+
|
|
594
|
+
# Generate fixes report if smells were detected
|
|
595
|
+
if all_smells:
|
|
596
|
+
fixes_file = reporter.generate_fixes_report(
|
|
597
|
+
project_metrics, all_smells, output_file
|
|
598
|
+
)
|
|
599
|
+
console.print(f"[green]✓[/green] Fixes report written to: {fixes_file}")
|
|
600
|
+
|
|
601
|
+
elif output_format == "sarif":
|
|
602
|
+
# SARIF format - write to file
|
|
603
|
+
from ...analysis.reporters.sarif import SARIFReporter
|
|
604
|
+
|
|
605
|
+
if not all_smells:
|
|
606
|
+
print_error(
|
|
607
|
+
"No code smells detected - SARIF report requires smells to report"
|
|
608
|
+
)
|
|
609
|
+
return
|
|
610
|
+
|
|
611
|
+
reporter = SARIFReporter()
|
|
612
|
+
reporter.write_sarif(all_smells, output_file, base_path=project_root)
|
|
613
|
+
console.print(f"[green]✓[/green] SARIF report written to: {output_file}")
|
|
614
|
+
|
|
615
|
+
elif json_output or output_format == "json":
|
|
616
|
+
# JSON format - with optional LLM context
|
|
617
|
+
if include_context:
|
|
618
|
+
# Enhanced JSON export with LLM-consumable context
|
|
619
|
+
from ...analysis.interpretation import EnhancedJSONExporter
|
|
620
|
+
from ...config.thresholds import ThresholdConfig
|
|
621
|
+
|
|
622
|
+
threshold_config = ThresholdConfig()
|
|
623
|
+
exporter = EnhancedJSONExporter(
|
|
624
|
+
project_root=project_root, threshold_config=threshold_config
|
|
625
|
+
)
|
|
626
|
+
enhanced_export = exporter.export_with_context(
|
|
627
|
+
project_metrics, include_smells=show_smells
|
|
628
|
+
)
|
|
629
|
+
# Output as JSON
|
|
630
|
+
import json
|
|
631
|
+
|
|
632
|
+
print_json(json.loads(enhanced_export.model_dump_json()))
|
|
633
|
+
else:
|
|
634
|
+
# Standard JSON format
|
|
635
|
+
output = project_metrics.to_summary()
|
|
636
|
+
# Add smell data to JSON output if available
|
|
637
|
+
if show_smells and all_smells:
|
|
638
|
+
from ...analysis.collectors.smells import SmellDetector
|
|
639
|
+
|
|
640
|
+
detector = SmellDetector()
|
|
641
|
+
smell_summary = detector.get_smell_summary(all_smells)
|
|
642
|
+
output["smells"] = {
|
|
643
|
+
"summary": smell_summary,
|
|
644
|
+
"details": [
|
|
645
|
+
{
|
|
646
|
+
"name": smell.name,
|
|
647
|
+
"severity": smell.severity.value,
|
|
648
|
+
"location": smell.location,
|
|
649
|
+
"description": smell.description,
|
|
650
|
+
"metric_value": smell.metric_value,
|
|
651
|
+
"threshold": smell.threshold,
|
|
652
|
+
"suggestion": smell.suggestion,
|
|
653
|
+
}
|
|
654
|
+
for smell in all_smells
|
|
655
|
+
],
|
|
656
|
+
}
|
|
657
|
+
print_json(output)
|
|
220
658
|
else:
|
|
659
|
+
# Console format (default)
|
|
221
660
|
# Import console reporter
|
|
222
661
|
from ...analysis.reporters.console import ConsoleReporter
|
|
223
662
|
|
|
@@ -225,21 +664,93 @@ async def run_analysis(
|
|
|
225
664
|
reporter.print_summary(project_metrics)
|
|
226
665
|
reporter.print_distribution(project_metrics)
|
|
227
666
|
reporter.print_hotspots(project_metrics, top=top_n)
|
|
667
|
+
|
|
668
|
+
# Print code smells if requested
|
|
669
|
+
if show_smells and all_smells:
|
|
670
|
+
reporter.print_smells(all_smells, top=top_n)
|
|
671
|
+
|
|
228
672
|
reporter.print_recommendations(project_metrics)
|
|
229
673
|
|
|
674
|
+
# Handle baseline operations after analysis
|
|
675
|
+
if baseline_manager:
|
|
676
|
+
# Save baseline if requested
|
|
677
|
+
if save_baseline:
|
|
678
|
+
try:
|
|
679
|
+
baseline_path = baseline_manager.save_baseline(
|
|
680
|
+
baseline_name=save_baseline,
|
|
681
|
+
metrics=project_metrics,
|
|
682
|
+
overwrite=force_baseline,
|
|
683
|
+
)
|
|
684
|
+
if not json_output:
|
|
685
|
+
console.print(
|
|
686
|
+
f"\n[green]✓[/green] Saved baseline: [cyan]{save_baseline}[/cyan]"
|
|
687
|
+
)
|
|
688
|
+
console.print(f" Location: {baseline_path}")
|
|
689
|
+
except BaselineExistsError as e:
|
|
690
|
+
if json_output:
|
|
691
|
+
print_json({"error": str(e)})
|
|
692
|
+
else:
|
|
693
|
+
print_error(str(e))
|
|
694
|
+
console.print(
|
|
695
|
+
"\nUse [cyan]--force[/cyan] to overwrite the existing baseline"
|
|
696
|
+
)
|
|
697
|
+
raise typer.Exit(1)
|
|
698
|
+
|
|
699
|
+
# Compare against baseline if requested
|
|
700
|
+
if compare_baseline:
|
|
701
|
+
try:
|
|
702
|
+
baseline_metrics = baseline_manager.load_baseline(compare_baseline)
|
|
703
|
+
comparator = BaselineComparator()
|
|
704
|
+
comparison_result = comparator.compare(
|
|
705
|
+
current=project_metrics,
|
|
706
|
+
baseline=baseline_metrics,
|
|
707
|
+
baseline_name=compare_baseline,
|
|
708
|
+
)
|
|
709
|
+
|
|
710
|
+
# Print comparison results (console only)
|
|
711
|
+
if not json_output and output_format == "console":
|
|
712
|
+
from ...analysis.reporters.console import ConsoleReporter
|
|
713
|
+
|
|
714
|
+
reporter = ConsoleReporter()
|
|
715
|
+
reporter.print_baseline_comparison(comparison_result)
|
|
716
|
+
|
|
717
|
+
except BaselineNotFoundError as e:
|
|
718
|
+
if json_output:
|
|
719
|
+
print_json({"error": str(e)})
|
|
720
|
+
else:
|
|
721
|
+
print_error(str(e))
|
|
722
|
+
console.print("\nAvailable baselines:")
|
|
723
|
+
baselines = baseline_manager.list_baselines()
|
|
724
|
+
for baseline_meta in baselines[:5]:
|
|
725
|
+
console.print(f" • {baseline_meta.baseline_name}")
|
|
726
|
+
raise typer.Exit(1)
|
|
727
|
+
|
|
728
|
+
# Quality gate: check if we should fail on smells
|
|
729
|
+
if fail_on_smell and all_smells:
|
|
730
|
+
failing_smells = filter_smells_by_severity(all_smells, severity_threshold)
|
|
731
|
+
if failing_smells:
|
|
732
|
+
console.print(
|
|
733
|
+
f"\n[red]❌ Quality gate failed: {len(failing_smells)} "
|
|
734
|
+
f"{severity_threshold}+ severity smell(s) detected[/red]"
|
|
735
|
+
)
|
|
736
|
+
raise typer.Exit(1)
|
|
737
|
+
|
|
230
738
|
except ProjectNotFoundError as e:
|
|
231
739
|
if json_output:
|
|
232
740
|
print_json({"error": str(e)})
|
|
233
741
|
else:
|
|
234
742
|
print_error(str(e))
|
|
235
743
|
raise typer.Exit(1)
|
|
744
|
+
except typer.Exit:
|
|
745
|
+
# Let typer.Exit propagate for quality gate failures
|
|
746
|
+
raise
|
|
236
747
|
except Exception as e:
|
|
237
748
|
logger.error(f"Analysis failed: {e}", exc_info=True)
|
|
238
749
|
if json_output:
|
|
239
750
|
print_json({"error": str(e)})
|
|
240
751
|
else:
|
|
241
752
|
print_error(f"Analysis failed: {e}")
|
|
242
|
-
raise
|
|
753
|
+
raise typer.Exit(2) # Exit code 2 for analysis errors
|
|
243
754
|
|
|
244
755
|
|
|
245
756
|
def _find_analyzable_files(
|
|
@@ -247,6 +758,7 @@ def _find_analyzable_files(
|
|
|
247
758
|
language_filter: str | None,
|
|
248
759
|
path_filter: Path | None,
|
|
249
760
|
parser_registry: ParserRegistry,
|
|
761
|
+
git_changed_files: list[Path] | None = None,
|
|
250
762
|
) -> list[Path]:
|
|
251
763
|
"""Find files that can be analyzed.
|
|
252
764
|
|
|
@@ -255,12 +767,63 @@ def _find_analyzable_files(
|
|
|
255
767
|
language_filter: Optional language filter
|
|
256
768
|
path_filter: Optional path filter
|
|
257
769
|
parser_registry: Parser registry for checking supported files
|
|
770
|
+
git_changed_files: Optional list of git changed files to filter by
|
|
258
771
|
|
|
259
772
|
Returns:
|
|
260
773
|
List of file paths to analyze
|
|
261
774
|
"""
|
|
262
775
|
import fnmatch
|
|
263
776
|
|
|
777
|
+
# If git_changed_files is provided, use it as the primary filter
|
|
778
|
+
if git_changed_files is not None:
|
|
779
|
+
# Filter based on supported extensions and language
|
|
780
|
+
files: list[Path] = []
|
|
781
|
+
supported_extensions = parser_registry.get_supported_extensions()
|
|
782
|
+
|
|
783
|
+
for file_path in git_changed_files:
|
|
784
|
+
# Check if file extension is supported
|
|
785
|
+
if file_path.suffix.lower() not in supported_extensions:
|
|
786
|
+
logger.debug(f"Skipping unsupported file type: {file_path}")
|
|
787
|
+
continue
|
|
788
|
+
|
|
789
|
+
# Apply language filter
|
|
790
|
+
if language_filter:
|
|
791
|
+
try:
|
|
792
|
+
parser = parser_registry.get_parser_for_file(file_path)
|
|
793
|
+
if parser.language.lower() != language_filter.lower():
|
|
794
|
+
logger.debug(
|
|
795
|
+
f"Skipping file (language mismatch): {file_path} "
|
|
796
|
+
f"({parser.language} != {language_filter})"
|
|
797
|
+
)
|
|
798
|
+
continue
|
|
799
|
+
except Exception as e:
|
|
800
|
+
logger.debug(f"Skipping file (parser error): {file_path}: {e}")
|
|
801
|
+
continue
|
|
802
|
+
|
|
803
|
+
# Apply path filter if specified
|
|
804
|
+
if path_filter:
|
|
805
|
+
path_filter_resolved = path_filter.resolve()
|
|
806
|
+
file_path_resolved = file_path.resolve()
|
|
807
|
+
|
|
808
|
+
# Check if file is within path_filter scope
|
|
809
|
+
try:
|
|
810
|
+
# If path_filter is a file, only include that specific file
|
|
811
|
+
if path_filter_resolved.is_file():
|
|
812
|
+
if file_path_resolved != path_filter_resolved:
|
|
813
|
+
continue
|
|
814
|
+
# If path_filter is a directory, check if file is within it
|
|
815
|
+
elif path_filter_resolved.is_dir():
|
|
816
|
+
file_path_resolved.relative_to(path_filter_resolved)
|
|
817
|
+
except ValueError:
|
|
818
|
+
# File is not within path_filter scope
|
|
819
|
+
logger.debug(f"Skipping file (outside path filter): {file_path}")
|
|
820
|
+
continue
|
|
821
|
+
|
|
822
|
+
files.append(file_path)
|
|
823
|
+
|
|
824
|
+
return sorted(files)
|
|
825
|
+
|
|
826
|
+
# No git filtering - fall back to standard directory traversal
|
|
264
827
|
# Determine base path to search
|
|
265
828
|
base_path = path_filter if path_filter and path_filter.exists() else project_root
|
|
266
829
|
|
|
@@ -272,7 +835,7 @@ def _find_analyzable_files(
|
|
|
272
835
|
return []
|
|
273
836
|
|
|
274
837
|
# Find all supported files
|
|
275
|
-
files
|
|
838
|
+
files = []
|
|
276
839
|
supported_extensions = parser_registry.get_supported_extensions()
|
|
277
840
|
|
|
278
841
|
# Common ignore patterns
|
|
@@ -404,5 +967,96 @@ async def _analyze_file(
|
|
|
404
967
|
return None
|
|
405
968
|
|
|
406
969
|
|
|
970
|
+
def _print_trends(trend_data: TrendData) -> None:
|
|
971
|
+
"""Print trend analysis to console.
|
|
972
|
+
|
|
973
|
+
Args:
|
|
974
|
+
trend_data: TrendData from TrendTracker
|
|
975
|
+
"""
|
|
976
|
+
from rich.panel import Panel
|
|
977
|
+
from rich.table import Table
|
|
978
|
+
|
|
979
|
+
# Build trend display
|
|
980
|
+
table = Table(show_header=False, box=None, padding=(0, 2))
|
|
981
|
+
table.add_column("Metric", style="bold")
|
|
982
|
+
table.add_column("Direction")
|
|
983
|
+
table.add_column("Change")
|
|
984
|
+
|
|
985
|
+
def trend_icon(direction: TrendDirection) -> str:
|
|
986
|
+
"""Get icon for trend direction."""
|
|
987
|
+
if direction == TrendDirection.IMPROVING:
|
|
988
|
+
return "[green]↓ improving[/green]"
|
|
989
|
+
elif direction == TrendDirection.WORSENING:
|
|
990
|
+
return "[red]↑ worsening[/red]"
|
|
991
|
+
else:
|
|
992
|
+
return "[dim]→ stable[/dim]"
|
|
993
|
+
|
|
994
|
+
def format_change(change: float, invert: bool = False) -> str:
|
|
995
|
+
"""Format percentage change with color."""
|
|
996
|
+
if abs(change) < 0.1:
|
|
997
|
+
return "[dim]—[/dim]"
|
|
998
|
+
# For complexity/smells, negative is good; for health, positive is good
|
|
999
|
+
is_good = (change < 0) if not invert else (change > 0)
|
|
1000
|
+
color = "green" if is_good else "red"
|
|
1001
|
+
sign = "+" if change > 0 else ""
|
|
1002
|
+
return f"[{color}]{sign}{change:.1f}%[/{color}]"
|
|
1003
|
+
|
|
1004
|
+
# Complexity trend
|
|
1005
|
+
table.add_row(
|
|
1006
|
+
"Complexity",
|
|
1007
|
+
trend_icon(trend_data.complexity_direction),
|
|
1008
|
+
format_change(trend_data.avg_complexity_change),
|
|
1009
|
+
)
|
|
1010
|
+
|
|
1011
|
+
# Smell trend
|
|
1012
|
+
table.add_row(
|
|
1013
|
+
"Code Smells",
|
|
1014
|
+
trend_icon(trend_data.smell_direction),
|
|
1015
|
+
format_change(trend_data.smell_count_change),
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
# Health trend
|
|
1019
|
+
table.add_row(
|
|
1020
|
+
"Health Score",
|
|
1021
|
+
trend_icon(trend_data.health_direction),
|
|
1022
|
+
format_change(
|
|
1023
|
+
(
|
|
1024
|
+
trend_data.health_trend[-1][1] - trend_data.health_trend[0][1]
|
|
1025
|
+
if len(trend_data.health_trend) >= 2
|
|
1026
|
+
else 0
|
|
1027
|
+
),
|
|
1028
|
+
invert=True,
|
|
1029
|
+
),
|
|
1030
|
+
)
|
|
1031
|
+
|
|
1032
|
+
# Show panel with snapshot count
|
|
1033
|
+
snapshot_count = len(trend_data.snapshots)
|
|
1034
|
+
panel = Panel(
|
|
1035
|
+
table,
|
|
1036
|
+
title=f"[bold cyan]Trends[/bold cyan] (last 30 days, {snapshot_count} snapshots)",
|
|
1037
|
+
border_style="cyan",
|
|
1038
|
+
padding=(0, 1),
|
|
1039
|
+
)
|
|
1040
|
+
console.print(panel)
|
|
1041
|
+
|
|
1042
|
+
# Show critical regressions if any
|
|
1043
|
+
if trend_data.critical_regressions:
|
|
1044
|
+
console.print("\n[bold red]⚠ Regressions Detected:[/bold red]")
|
|
1045
|
+
for regression in trend_data.critical_regressions[:3]:
|
|
1046
|
+
console.print(
|
|
1047
|
+
f" • [red]{regression.file_path}[/red]: "
|
|
1048
|
+
f"complexity {regression.change_percentage:+.1f}%"
|
|
1049
|
+
)
|
|
1050
|
+
|
|
1051
|
+
# Show significant improvements if any
|
|
1052
|
+
if trend_data.significant_improvements:
|
|
1053
|
+
console.print("\n[bold green]✓ Improvements:[/bold green]")
|
|
1054
|
+
for improvement in trend_data.significant_improvements[:3]:
|
|
1055
|
+
console.print(
|
|
1056
|
+
f" • [green]{improvement.file_path}[/green]: "
|
|
1057
|
+
f"complexity {improvement.change_percentage:+.1f}%"
|
|
1058
|
+
)
|
|
1059
|
+
|
|
1060
|
+
|
|
407
1061
|
if __name__ == "__main__":
|
|
408
1062
|
analyze_app()
|