mcp-vector-search 0.12.6__py3-none-any.whl → 1.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. mcp_vector_search/__init__.py +3 -3
  2. mcp_vector_search/analysis/__init__.py +111 -0
  3. mcp_vector_search/analysis/baseline/__init__.py +68 -0
  4. mcp_vector_search/analysis/baseline/comparator.py +462 -0
  5. mcp_vector_search/analysis/baseline/manager.py +621 -0
  6. mcp_vector_search/analysis/collectors/__init__.py +74 -0
  7. mcp_vector_search/analysis/collectors/base.py +164 -0
  8. mcp_vector_search/analysis/collectors/cohesion.py +463 -0
  9. mcp_vector_search/analysis/collectors/complexity.py +743 -0
  10. mcp_vector_search/analysis/collectors/coupling.py +1162 -0
  11. mcp_vector_search/analysis/collectors/halstead.py +514 -0
  12. mcp_vector_search/analysis/collectors/smells.py +325 -0
  13. mcp_vector_search/analysis/debt.py +516 -0
  14. mcp_vector_search/analysis/interpretation.py +685 -0
  15. mcp_vector_search/analysis/metrics.py +414 -0
  16. mcp_vector_search/analysis/reporters/__init__.py +7 -0
  17. mcp_vector_search/analysis/reporters/console.py +646 -0
  18. mcp_vector_search/analysis/reporters/markdown.py +480 -0
  19. mcp_vector_search/analysis/reporters/sarif.py +377 -0
  20. mcp_vector_search/analysis/storage/__init__.py +93 -0
  21. mcp_vector_search/analysis/storage/metrics_store.py +762 -0
  22. mcp_vector_search/analysis/storage/schema.py +245 -0
  23. mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
  24. mcp_vector_search/analysis/trends.py +308 -0
  25. mcp_vector_search/analysis/visualizer/__init__.py +90 -0
  26. mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
  27. mcp_vector_search/analysis/visualizer/exporter.py +484 -0
  28. mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
  29. mcp_vector_search/analysis/visualizer/schemas.py +525 -0
  30. mcp_vector_search/cli/commands/analyze.py +1062 -0
  31. mcp_vector_search/cli/commands/chat.py +1455 -0
  32. mcp_vector_search/cli/commands/index.py +621 -5
  33. mcp_vector_search/cli/commands/index_background.py +467 -0
  34. mcp_vector_search/cli/commands/init.py +13 -0
  35. mcp_vector_search/cli/commands/install.py +597 -335
  36. mcp_vector_search/cli/commands/install_old.py +8 -4
  37. mcp_vector_search/cli/commands/mcp.py +78 -6
  38. mcp_vector_search/cli/commands/reset.py +68 -26
  39. mcp_vector_search/cli/commands/search.py +224 -8
  40. mcp_vector_search/cli/commands/setup.py +1184 -0
  41. mcp_vector_search/cli/commands/status.py +339 -5
  42. mcp_vector_search/cli/commands/uninstall.py +276 -357
  43. mcp_vector_search/cli/commands/visualize/__init__.py +39 -0
  44. mcp_vector_search/cli/commands/visualize/cli.py +292 -0
  45. mcp_vector_search/cli/commands/visualize/exporters/__init__.py +12 -0
  46. mcp_vector_search/cli/commands/visualize/exporters/html_exporter.py +33 -0
  47. mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +33 -0
  48. mcp_vector_search/cli/commands/visualize/graph_builder.py +647 -0
  49. mcp_vector_search/cli/commands/visualize/layout_engine.py +469 -0
  50. mcp_vector_search/cli/commands/visualize/server.py +600 -0
  51. mcp_vector_search/cli/commands/visualize/state_manager.py +428 -0
  52. mcp_vector_search/cli/commands/visualize/templates/__init__.py +16 -0
  53. mcp_vector_search/cli/commands/visualize/templates/base.py +234 -0
  54. mcp_vector_search/cli/commands/visualize/templates/scripts.py +4542 -0
  55. mcp_vector_search/cli/commands/visualize/templates/styles.py +2522 -0
  56. mcp_vector_search/cli/didyoumean.py +27 -2
  57. mcp_vector_search/cli/main.py +127 -160
  58. mcp_vector_search/cli/output.py +158 -13
  59. mcp_vector_search/config/__init__.py +4 -0
  60. mcp_vector_search/config/default_thresholds.yaml +52 -0
  61. mcp_vector_search/config/settings.py +12 -0
  62. mcp_vector_search/config/thresholds.py +273 -0
  63. mcp_vector_search/core/__init__.py +16 -0
  64. mcp_vector_search/core/auto_indexer.py +3 -3
  65. mcp_vector_search/core/boilerplate.py +186 -0
  66. mcp_vector_search/core/config_utils.py +394 -0
  67. mcp_vector_search/core/database.py +406 -94
  68. mcp_vector_search/core/embeddings.py +24 -0
  69. mcp_vector_search/core/exceptions.py +11 -0
  70. mcp_vector_search/core/git.py +380 -0
  71. mcp_vector_search/core/git_hooks.py +4 -4
  72. mcp_vector_search/core/indexer.py +632 -54
  73. mcp_vector_search/core/llm_client.py +756 -0
  74. mcp_vector_search/core/models.py +91 -1
  75. mcp_vector_search/core/project.py +17 -0
  76. mcp_vector_search/core/relationships.py +473 -0
  77. mcp_vector_search/core/scheduler.py +11 -11
  78. mcp_vector_search/core/search.py +179 -29
  79. mcp_vector_search/mcp/server.py +819 -9
  80. mcp_vector_search/parsers/python.py +285 -5
  81. mcp_vector_search/utils/__init__.py +2 -0
  82. mcp_vector_search/utils/gitignore.py +0 -3
  83. mcp_vector_search/utils/gitignore_updater.py +212 -0
  84. mcp_vector_search/utils/monorepo.py +66 -4
  85. mcp_vector_search/utils/timing.py +10 -6
  86. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +184 -53
  87. mcp_vector_search-1.1.22.dist-info/RECORD +120 -0
  88. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +1 -1
  89. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +1 -0
  90. mcp_vector_search/cli/commands/visualize.py +0 -1467
  91. mcp_vector_search-0.12.6.dist-info/RECORD +0 -68
  92. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,646 @@
1
+ """Console reporter for code analysis results."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import TYPE_CHECKING
6
+
7
+ from rich.console import Console
8
+ from rich.table import Table
9
+
10
+ if TYPE_CHECKING:
11
+ from ..metrics import ProjectMetrics
12
+
13
+ console = Console()
14
+
15
+
16
+ class ConsoleReporter:
17
+ """Console reporter for displaying analysis results in terminal."""
18
+
19
+ def print_summary(self, metrics: ProjectMetrics) -> None:
20
+ """Print high-level project summary.
21
+
22
+ Args:
23
+ metrics: Project metrics to display
24
+ """
25
+ console.print("\n[bold blue]📈 Code Complexity Analysis[/bold blue]")
26
+ console.print("━" * 60)
27
+ console.print()
28
+
29
+ console.print("[bold]Project Summary[/bold]")
30
+ console.print(f" Files Analyzed: {metrics.total_files}")
31
+ console.print(f" Total Lines: {metrics.total_lines:,}")
32
+ console.print(f" Functions: {metrics.total_functions}")
33
+ console.print(f" Classes: {metrics.total_classes}")
34
+ console.print(f" Avg File Complexity: {metrics.avg_file_complexity:.1f}")
35
+ console.print()
36
+
37
+ def print_distribution(self, metrics: ProjectMetrics) -> None:
38
+ """Print complexity grade distribution.
39
+
40
+ Args:
41
+ metrics: Project metrics with grade distribution
42
+ """
43
+ console.print("[bold]Complexity Distribution[/bold]")
44
+
45
+ # Get grade distribution
46
+ distribution = metrics._compute_grade_distribution()
47
+ total_chunks = sum(distribution.values())
48
+
49
+ if total_chunks == 0:
50
+ console.print(" No functions/methods analyzed")
51
+ console.print()
52
+ return
53
+
54
+ # Define grade colors and descriptions
55
+ grade_info = {
56
+ "A": ("green", "Excellent (0-5)"),
57
+ "B": ("blue", "Good (6-10)"),
58
+ "C": ("yellow", "Acceptable (11-20)"),
59
+ "D": ("orange1", "Needs Improvement (21-30)"),
60
+ "F": ("red", "Refactor Required (31+)"),
61
+ }
62
+
63
+ # Print distribution table
64
+ table = Table(show_header=True, header_style="bold cyan", box=None)
65
+ table.add_column("Grade", style="bold", width=8)
66
+ table.add_column("Description", width=25)
67
+ table.add_column("Count", justify="right", width=8)
68
+ table.add_column("Percentage", justify="right", width=10)
69
+ table.add_column("Bar", width=20)
70
+
71
+ for grade in ["A", "B", "C", "D", "F"]:
72
+ count = distribution.get(grade, 0)
73
+ percentage = (count / total_chunks * 100) if total_chunks > 0 else 0
74
+ color, description = grade_info[grade]
75
+
76
+ # Create visual bar
77
+ bar_length = int(percentage / 5) # Scale: 5% = 1 char
78
+ bar = "█" * bar_length
79
+
80
+ table.add_row(
81
+ f"[{color}]{grade}[/{color}]",
82
+ description,
83
+ f"{count}",
84
+ f"{percentage:.1f}%",
85
+ f"[{color}]{bar}[/{color}]",
86
+ )
87
+
88
+ console.print(table)
89
+ console.print()
90
+
91
+ def print_hotspots(self, metrics: ProjectMetrics, top: int = 10) -> None:
92
+ """Print complexity hotspots.
93
+
94
+ Args:
95
+ metrics: Project metrics
96
+ top: Number of top hotspots to display
97
+ """
98
+ hotspot_files = metrics.get_hotspots(limit=top)
99
+
100
+ if not hotspot_files:
101
+ console.print("[bold]🔥 Complexity Hotspots[/bold]")
102
+ console.print(" No hotspots found")
103
+ console.print()
104
+ return
105
+
106
+ console.print(
107
+ f"[bold]🔥 Top {min(top, len(hotspot_files))} Complexity Hotspots[/bold]"
108
+ )
109
+
110
+ table = Table(show_header=True, header_style="bold cyan", box=None)
111
+ table.add_column("Rank", justify="right", width=6)
112
+ table.add_column("File", style="cyan", width=50)
113
+ table.add_column("Avg Complexity", justify="right", width=16)
114
+ table.add_column("Grade", justify="center", width=8)
115
+ table.add_column("Functions", justify="right", width=10)
116
+
117
+ for rank, file_metrics in enumerate(hotspot_files, 1):
118
+ # Compute average grade
119
+ if file_metrics.chunks:
120
+ grades = [chunk.complexity_grade for chunk in file_metrics.chunks]
121
+ avg_grade = max(set(grades), key=grades.count) # Most common grade
122
+ else:
123
+ avg_grade = "N/A"
124
+
125
+ # Color code grade
126
+ grade_colors = {
127
+ "A": "green",
128
+ "B": "blue",
129
+ "C": "yellow",
130
+ "D": "orange1",
131
+ "F": "red",
132
+ }
133
+ grade_color = grade_colors.get(avg_grade, "white")
134
+
135
+ # Truncate file path if too long
136
+ file_path = file_metrics.file_path
137
+ if len(file_path) > 48:
138
+ file_path = "..." + file_path[-45:]
139
+
140
+ table.add_row(
141
+ f"{rank}",
142
+ file_path,
143
+ f"{file_metrics.avg_complexity:.1f}",
144
+ f"[{grade_color}]{avg_grade}[/{grade_color}]",
145
+ f"{len(file_metrics.chunks)}",
146
+ )
147
+
148
+ console.print(table)
149
+ console.print()
150
+
151
+ def print_smells(self, smells: list, top: int = 10) -> None:
152
+ """Print detected code smells.
153
+
154
+ Args:
155
+ smells: List of CodeSmell objects
156
+ top: Maximum number of smells to display
157
+ """
158
+ from ..collectors.smells import SmellSeverity
159
+
160
+ if not smells:
161
+ console.print("[bold]🔍 Code Smells[/bold]")
162
+ console.print(" No code smells detected!")
163
+ console.print()
164
+ return
165
+
166
+ console.print(
167
+ f"[bold]🔍 Code Smells Detected[/bold] - Found {len(smells)} issues"
168
+ )
169
+
170
+ # Group smells by severity
171
+ error_smells = [s for s in smells if s.severity == SmellSeverity.ERROR]
172
+ warning_smells = [s for s in smells if s.severity == SmellSeverity.WARNING]
173
+ info_smells = [s for s in smells if s.severity == SmellSeverity.INFO]
174
+
175
+ # Summary
176
+ console.print(
177
+ f" [red]Errors: {len(error_smells)}[/red] "
178
+ f"[yellow]Warnings: {len(warning_smells)}[/yellow] "
179
+ f"[blue]Info: {len(info_smells)}[/blue]"
180
+ )
181
+ console.print()
182
+
183
+ # Show top smells (prioritize errors first)
184
+ smells_to_display = error_smells + warning_smells + info_smells
185
+ smells_to_display = smells_to_display[:top]
186
+
187
+ # Create table
188
+ table = Table(show_header=True, header_style="bold cyan", box=None)
189
+ table.add_column("Severity", width=10)
190
+ table.add_column("Smell Type", width=20)
191
+ table.add_column("Location", width=40)
192
+ table.add_column("Details", width=30)
193
+
194
+ for smell in smells_to_display:
195
+ # Color code by severity
196
+ if smell.severity == SmellSeverity.ERROR:
197
+ severity_str = "[red]ERROR[/red]"
198
+ elif smell.severity == SmellSeverity.WARNING:
199
+ severity_str = "[yellow]WARNING[/yellow]"
200
+ else:
201
+ severity_str = "[blue]INFO[/blue]"
202
+
203
+ # Truncate location if too long
204
+ location = smell.location
205
+ if len(location) > 38:
206
+ location = "..." + location[-35:]
207
+
208
+ # Format details (metric value vs threshold)
209
+ details = f"{smell.metric_value} > {smell.threshold}"
210
+
211
+ table.add_row(severity_str, smell.name, location, details)
212
+
213
+ console.print(table)
214
+ console.print()
215
+
216
+ # Show suggestions for top smells
217
+ if smells_to_display:
218
+ console.print("[bold]💡 Top Suggestions[/bold]")
219
+ shown_suggestions = set()
220
+ suggestion_count = 0
221
+
222
+ for smell in smells_to_display:
223
+ if smell.suggestion and smell.suggestion not in shown_suggestions:
224
+ console.print(f" • [dim]{smell.suggestion}[/dim]")
225
+ shown_suggestions.add(smell.suggestion)
226
+ suggestion_count += 1
227
+
228
+ # Limit to 5 unique suggestions
229
+ if suggestion_count >= 5:
230
+ break
231
+
232
+ console.print()
233
+
234
+ def print_instability(self, metrics: ProjectMetrics, top: int = 10) -> None:
235
+ """Print instability metrics.
236
+
237
+ Args:
238
+ metrics: Project metrics
239
+ top: Number of top files to display
240
+ """
241
+ console.print("[bold]🔗 Instability Metrics[/bold]")
242
+
243
+ # Collect instability data from files with coupling metrics
244
+ files_with_coupling = [
245
+ (f.file_path, f.coupling)
246
+ for f in metrics.files.values()
247
+ if f.coupling.efferent_coupling + f.coupling.afferent_coupling > 0
248
+ ]
249
+
250
+ if not files_with_coupling:
251
+ console.print(" No coupling data available")
252
+ console.print()
253
+ return
254
+
255
+ # Compute instability distribution
256
+ stable_count = sum(1 for _, c in files_with_coupling if c.instability <= 0.3)
257
+ balanced_count = sum(
258
+ 1 for _, c in files_with_coupling if 0.3 < c.instability <= 0.7
259
+ )
260
+ unstable_count = sum(1 for _, c in files_with_coupling if c.instability > 0.7)
261
+ total_files = len(files_with_coupling)
262
+
263
+ console.print(f" Total Files: {total_files}")
264
+ console.print(
265
+ f" [green]Stable (I ≤ 0.3):[/green] {stable_count} "
266
+ f"({stable_count / total_files * 100:.1f}%)"
267
+ )
268
+ console.print(
269
+ f" [yellow]Balanced (0.3 < I ≤ 0.7):[/yellow] {balanced_count} "
270
+ f"({balanced_count / total_files * 100:.1f}%)"
271
+ )
272
+ console.print(
273
+ f" [red]Unstable (I > 0.7):[/red] {unstable_count} "
274
+ f"({unstable_count / total_files * 100:.1f}%)"
275
+ )
276
+ console.print()
277
+
278
+ # Show most stable files
279
+ most_stable = sorted(files_with_coupling, key=lambda x: x[1].instability)[:top]
280
+ if most_stable:
281
+ console.print(
282
+ f"[bold green]✓ Most Stable Files (Top {len(most_stable)})[/bold green]"
283
+ )
284
+
285
+ table = Table(show_header=True, header_style="bold cyan", box=None)
286
+ table.add_column("Rank", justify="right", width=6)
287
+ table.add_column("File", style="cyan", width=45)
288
+ table.add_column("Instability", justify="right", width=12)
289
+ table.add_column("Category", justify="center", width=12)
290
+ table.add_column("Ce/Ca", justify="right", width=10)
291
+
292
+ for rank, (file_path, coupling) in enumerate(most_stable, 1):
293
+ # Truncate file path if too long
294
+ display_path = file_path
295
+ if len(display_path) > 43:
296
+ display_path = "..." + display_path[-40:]
297
+
298
+ # Determine category color
299
+ if coupling.instability <= 0.3:
300
+ category = "[green]Stable[/green]"
301
+ elif coupling.instability <= 0.7:
302
+ category = "[yellow]Balanced[/yellow]"
303
+ else:
304
+ category = "[red]Unstable[/red]"
305
+
306
+ table.add_row(
307
+ f"{rank}",
308
+ display_path,
309
+ f"{coupling.instability:.3f}",
310
+ category,
311
+ f"{coupling.efferent_coupling}/{coupling.afferent_coupling}",
312
+ )
313
+
314
+ console.print(table)
315
+ console.print()
316
+
317
+ # Show most unstable files
318
+ most_unstable = sorted(
319
+ files_with_coupling, key=lambda x: x[1].instability, reverse=True
320
+ )[:top]
321
+ if most_unstable:
322
+ console.print(
323
+ f"[bold red]⚠️ Most Unstable Files (Top {len(most_unstable)})[/bold red]"
324
+ )
325
+
326
+ table = Table(show_header=True, header_style="bold cyan", box=None)
327
+ table.add_column("Rank", justify="right", width=6)
328
+ table.add_column("File", style="cyan", width=45)
329
+ table.add_column("Instability", justify="right", width=12)
330
+ table.add_column("Category", justify="center", width=12)
331
+ table.add_column("Ce/Ca", justify="right", width=10)
332
+
333
+ for rank, (file_path, coupling) in enumerate(most_unstable, 1):
334
+ # Truncate file path if too long
335
+ display_path = file_path
336
+ if len(display_path) > 43:
337
+ display_path = "..." + display_path[-40:]
338
+
339
+ # Determine category color
340
+ if coupling.instability <= 0.3:
341
+ category = "[green]Stable[/green]"
342
+ elif coupling.instability <= 0.7:
343
+ category = "[yellow]Balanced[/yellow]"
344
+ else:
345
+ category = "[red]Unstable[/red]"
346
+
347
+ table.add_row(
348
+ f"{rank}",
349
+ display_path,
350
+ f"{coupling.instability:.3f}",
351
+ category,
352
+ f"{coupling.efferent_coupling}/{coupling.afferent_coupling}",
353
+ )
354
+
355
+ console.print(table)
356
+ console.print()
357
+
358
+ def print_recommendations(self, metrics: ProjectMetrics) -> None:
359
+ """Print actionable recommendations.
360
+
361
+ Args:
362
+ metrics: Project metrics
363
+ """
364
+ console.print("[bold]💡 Recommendations[/bold]")
365
+
366
+ recommendations: list[str] = []
367
+
368
+ # Check for files needing attention
369
+ files_needing_attention = metrics._count_files_needing_attention()
370
+ if files_needing_attention > 0:
371
+ recommendations.append(
372
+ f"[yellow]•[/yellow] {files_needing_attention} files have health score below 0.7 - consider refactoring"
373
+ )
374
+
375
+ # Check for high complexity files
376
+ hotspots = metrics.get_hotspots(limit=5)
377
+ high_complexity_files = [f for f in hotspots if f.avg_complexity > 20]
378
+ if high_complexity_files:
379
+ recommendations.append(
380
+ f"[yellow]•[/yellow] {len(high_complexity_files)} files have average complexity > 20 - prioritize these for refactoring"
381
+ )
382
+
383
+ # Check grade distribution
384
+ distribution = metrics._compute_grade_distribution()
385
+ total_chunks = sum(distribution.values())
386
+ if total_chunks > 0:
387
+ d_f_percentage = (
388
+ (distribution.get("D", 0) + distribution.get("F", 0))
389
+ / total_chunks
390
+ * 100
391
+ )
392
+ if d_f_percentage > 20:
393
+ recommendations.append(
394
+ f"[yellow]•[/yellow] {d_f_percentage:.1f}% of functions have D/F grades - aim to reduce this below 10%"
395
+ )
396
+
397
+ # Check for highly unstable files (instability > 0.8)
398
+ highly_unstable = [
399
+ f
400
+ for f in metrics.files.values()
401
+ if f.coupling.instability > 0.8
402
+ and (f.coupling.efferent_coupling + f.coupling.afferent_coupling) > 0
403
+ ]
404
+ if highly_unstable:
405
+ recommendations.append(
406
+ f"[yellow]•[/yellow] {len(highly_unstable)} files have instability > 0.8 - consider reducing dependencies"
407
+ )
408
+
409
+ # Check overall health
410
+ avg_health = metrics._compute_avg_health_score()
411
+ if avg_health < 0.7:
412
+ recommendations.append(
413
+ f"[yellow]•[/yellow] Average health score is {avg_health:.2f} - target 0.8+ through refactoring"
414
+ )
415
+ elif avg_health >= 0.9:
416
+ recommendations.append(
417
+ "[green]✓[/green] Excellent code health! Keep up the good work."
418
+ )
419
+
420
+ if not recommendations:
421
+ recommendations.append(
422
+ "[green]✓[/green] Code quality looks good! No critical issues found."
423
+ )
424
+
425
+ for rec in recommendations:
426
+ console.print(f" {rec}")
427
+
428
+ console.print()
429
+
430
+ # Print tips
431
+ console.print("[dim]💡 Tips:[/dim]")
432
+ console.print(
433
+ "[dim] • Use [cyan]--top N[/cyan] to see more/fewer hotspots[/dim]"
434
+ )
435
+ console.print(
436
+ "[dim] • Use [cyan]--json[/cyan] to export results for further analysis[/dim]"
437
+ )
438
+ console.print(
439
+ "[dim] • Focus refactoring efforts on Grade D and F functions first[/dim]"
440
+ )
441
+ console.print(
442
+ "[dim] • Stable files (I ≤ 0.3) should contain abstractions and core logic[/dim]"
443
+ )
444
+ console.print(
445
+ "[dim] • Unstable files (I > 0.7) should contain concrete implementations[/dim]"
446
+ )
447
+ console.print()
448
+
449
+ def print_baseline_comparison(self, comparison_result) -> None:
450
+ """Print baseline comparison results.
451
+
452
+ Args:
453
+ comparison_result: ComparisonResult from BaselineComparator
454
+ """
455
+ console.print(
456
+ f"\n[bold blue]📊 Baseline Comparison[/bold blue] - vs {comparison_result.baseline_name}"
457
+ )
458
+ console.print("━" * 80)
459
+ console.print()
460
+
461
+ # Summary statistics
462
+ console.print("[bold]Summary[/bold]")
463
+ summary = comparison_result.summary
464
+ console.print(
465
+ f" Total Files Compared: {comparison_result.total_files_compared}"
466
+ )
467
+ console.print(
468
+ f" Files - Current: {summary.get('total_files_current', 0)} | "
469
+ f"Baseline: {summary.get('total_files_baseline', 0)}"
470
+ )
471
+ console.print(
472
+ f" Functions - Current: {summary.get('total_functions_current', 0)} | "
473
+ f"Baseline: {summary.get('total_functions_baseline', 0)}"
474
+ )
475
+ console.print()
476
+
477
+ # Change summary
478
+ console.print("[bold]Changes[/bold]")
479
+ console.print(
480
+ f" [red]Regressions:[/red] {len(comparison_result.regressions)} files"
481
+ )
482
+ console.print(
483
+ f" [green]Improvements:[/green] {len(comparison_result.improvements)} files"
484
+ )
485
+ console.print(
486
+ f" [dim]Unchanged:[/dim] {len(comparison_result.unchanged)} files"
487
+ )
488
+ console.print(
489
+ f" [blue]New Files:[/blue] {len(comparison_result.new_files)} files"
490
+ )
491
+ console.print(
492
+ f" [yellow]Deleted Files:[/yellow] {len(comparison_result.deleted_files)} files"
493
+ )
494
+ console.print()
495
+
496
+ # Complexity metrics comparison
497
+ avg_cc_current = summary.get("avg_complexity_current", 0.0)
498
+ avg_cc_baseline = summary.get("avg_complexity_baseline", 0.0)
499
+ avg_cc_delta = avg_cc_current - avg_cc_baseline
500
+ avg_cc_pct = (
501
+ (avg_cc_delta / avg_cc_baseline * 100) if avg_cc_baseline > 0 else 0.0
502
+ )
503
+
504
+ max_cc_current = summary.get("max_complexity_current", 0)
505
+ max_cc_baseline = summary.get("max_complexity_baseline", 0)
506
+ max_cc_delta = max_cc_current - max_cc_baseline
507
+
508
+ console.print("[bold]Complexity Metrics[/bold]")
509
+
510
+ # Average complexity with color coding
511
+ delta_color = (
512
+ "red" if avg_cc_delta > 0 else "green" if avg_cc_delta < 0 else "dim"
513
+ )
514
+ delta_sign = "+" if avg_cc_delta > 0 else ""
515
+ console.print(
516
+ f" Avg Complexity: {avg_cc_current:.2f} "
517
+ f"(baseline: {avg_cc_baseline:.2f}, "
518
+ f"[{delta_color}]{delta_sign}{avg_cc_delta:.2f} / {delta_sign}{avg_cc_pct:.1f}%[/{delta_color}])"
519
+ )
520
+
521
+ # Max complexity with color coding
522
+ max_delta_color = (
523
+ "red" if max_cc_delta > 0 else "green" if max_cc_delta < 0 else "dim"
524
+ )
525
+ max_delta_sign = "+" if max_cc_delta > 0 else ""
526
+ console.print(
527
+ f" Max Complexity: {max_cc_current} "
528
+ f"(baseline: {max_cc_baseline}, "
529
+ f"[{max_delta_color}]{max_delta_sign}{max_cc_delta}[/{max_delta_color}])"
530
+ )
531
+ console.print()
532
+
533
+ # Show regressions
534
+ if comparison_result.regressions:
535
+ console.print(
536
+ f"[bold red]⚠️ Regressions ({len(comparison_result.regressions)} files)[/bold red]"
537
+ )
538
+
539
+ # Show top 10 regressions
540
+ top_regressions = comparison_result.regressions[:10]
541
+
542
+ table = Table(show_header=True, header_style="bold cyan", box=None)
543
+ table.add_column("File", style="cyan", width=45)
544
+ table.add_column("Metric", width=20)
545
+ table.add_column("Change", justify="right", width=15)
546
+
547
+ for file_comp in top_regressions:
548
+ # Truncate file path
549
+ file_path = file_comp.file_path
550
+ if len(file_path) > 43:
551
+ file_path = "..." + file_path[-40:]
552
+
553
+ # Show worst regression metric for this file
554
+ regression_changes = [
555
+ c for c in file_comp.metric_changes if c.is_regression
556
+ ]
557
+ if regression_changes:
558
+ worst_change = max(
559
+ regression_changes, key=lambda c: abs(c.percentage_delta)
560
+ )
561
+ table.add_row(
562
+ file_path,
563
+ worst_change.metric_name.replace("_", " ").title(),
564
+ f"+{worst_change.percentage_delta:.1f}%",
565
+ )
566
+
567
+ console.print(table)
568
+
569
+ if len(comparison_result.regressions) > 10:
570
+ console.print(
571
+ f" [dim]... and {len(comparison_result.regressions) - 10} more[/dim]"
572
+ )
573
+ console.print()
574
+
575
+ # Show improvements
576
+ if comparison_result.improvements:
577
+ console.print(
578
+ f"[bold green]✓ Improvements ({len(comparison_result.improvements)} files)[/bold green]"
579
+ )
580
+
581
+ # Show top 10 improvements
582
+ top_improvements = comparison_result.improvements[:10]
583
+
584
+ table = Table(show_header=True, header_style="bold cyan", box=None)
585
+ table.add_column("File", style="cyan", width=45)
586
+ table.add_column("Metric", width=20)
587
+ table.add_column("Change", justify="right", width=15)
588
+
589
+ for file_comp in top_improvements:
590
+ # Truncate file path
591
+ file_path = file_comp.file_path
592
+ if len(file_path) > 43:
593
+ file_path = "..." + file_path[-40:]
594
+
595
+ # Show best improvement metric for this file
596
+ improvement_changes = [
597
+ c for c in file_comp.metric_changes if c.is_improvement
598
+ ]
599
+ if improvement_changes:
600
+ best_change = max(
601
+ improvement_changes, key=lambda c: abs(c.percentage_delta)
602
+ )
603
+ table.add_row(
604
+ file_path,
605
+ best_change.metric_name.replace("_", " ").title(),
606
+ f"{best_change.percentage_delta:.1f}%",
607
+ )
608
+
609
+ console.print(table)
610
+
611
+ if len(comparison_result.improvements) > 10:
612
+ console.print(
613
+ f" [dim]... and {len(comparison_result.improvements) - 10} more[/dim]"
614
+ )
615
+ console.print()
616
+
617
+ # Show new/deleted files summary
618
+ if comparison_result.new_files:
619
+ console.print(
620
+ f"[bold blue]📄 New Files ({len(comparison_result.new_files)})[/bold blue]"
621
+ )
622
+ for file_comp in comparison_result.new_files[:5]:
623
+ file_path = file_comp.file_path
624
+ if len(file_path) > 70:
625
+ file_path = "..." + file_path[-67:]
626
+ console.print(f" • {file_path}")
627
+ if len(comparison_result.new_files) > 5:
628
+ console.print(
629
+ f" [dim]... and {len(comparison_result.new_files) - 5} more[/dim]"
630
+ )
631
+ console.print()
632
+
633
+ if comparison_result.deleted_files:
634
+ console.print(
635
+ f"[bold yellow]🗑 Deleted Files ({len(comparison_result.deleted_files)})[/bold yellow]"
636
+ )
637
+ for file_comp in comparison_result.deleted_files[:5]:
638
+ file_path = file_comp.file_path
639
+ if len(file_path) > 70:
640
+ file_path = "..." + file_path[-67:]
641
+ console.print(f" • {file_path}")
642
+ if len(comparison_result.deleted_files) > 5:
643
+ console.print(
644
+ f" [dim]... and {len(comparison_result.deleted_files) - 5} more[/dim]"
645
+ )
646
+ console.print()