mcp-vector-search 1.0.3__py3-none-any.whl → 1.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. mcp_vector_search/__init__.py +3 -3
  2. mcp_vector_search/analysis/__init__.py +48 -1
  3. mcp_vector_search/analysis/baseline/__init__.py +68 -0
  4. mcp_vector_search/analysis/baseline/comparator.py +462 -0
  5. mcp_vector_search/analysis/baseline/manager.py +621 -0
  6. mcp_vector_search/analysis/collectors/__init__.py +35 -0
  7. mcp_vector_search/analysis/collectors/cohesion.py +463 -0
  8. mcp_vector_search/analysis/collectors/coupling.py +1162 -0
  9. mcp_vector_search/analysis/collectors/halstead.py +514 -0
  10. mcp_vector_search/analysis/collectors/smells.py +325 -0
  11. mcp_vector_search/analysis/debt.py +516 -0
  12. mcp_vector_search/analysis/interpretation.py +685 -0
  13. mcp_vector_search/analysis/metrics.py +74 -1
  14. mcp_vector_search/analysis/reporters/__init__.py +3 -1
  15. mcp_vector_search/analysis/reporters/console.py +424 -0
  16. mcp_vector_search/analysis/reporters/markdown.py +480 -0
  17. mcp_vector_search/analysis/reporters/sarif.py +377 -0
  18. mcp_vector_search/analysis/storage/__init__.py +93 -0
  19. mcp_vector_search/analysis/storage/metrics_store.py +762 -0
  20. mcp_vector_search/analysis/storage/schema.py +245 -0
  21. mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
  22. mcp_vector_search/analysis/trends.py +308 -0
  23. mcp_vector_search/analysis/visualizer/__init__.py +90 -0
  24. mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
  25. mcp_vector_search/analysis/visualizer/exporter.py +484 -0
  26. mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
  27. mcp_vector_search/analysis/visualizer/schemas.py +525 -0
  28. mcp_vector_search/cli/commands/analyze.py +665 -11
  29. mcp_vector_search/cli/commands/chat.py +193 -0
  30. mcp_vector_search/cli/commands/index.py +600 -2
  31. mcp_vector_search/cli/commands/index_background.py +467 -0
  32. mcp_vector_search/cli/commands/search.py +194 -1
  33. mcp_vector_search/cli/commands/setup.py +64 -13
  34. mcp_vector_search/cli/commands/status.py +302 -3
  35. mcp_vector_search/cli/commands/visualize/cli.py +26 -10
  36. mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +8 -4
  37. mcp_vector_search/cli/commands/visualize/graph_builder.py +167 -234
  38. mcp_vector_search/cli/commands/visualize/server.py +304 -15
  39. mcp_vector_search/cli/commands/visualize/templates/base.py +60 -6
  40. mcp_vector_search/cli/commands/visualize/templates/scripts.py +2100 -65
  41. mcp_vector_search/cli/commands/visualize/templates/styles.py +1297 -88
  42. mcp_vector_search/cli/didyoumean.py +5 -0
  43. mcp_vector_search/cli/main.py +16 -5
  44. mcp_vector_search/cli/output.py +134 -5
  45. mcp_vector_search/config/thresholds.py +89 -1
  46. mcp_vector_search/core/__init__.py +16 -0
  47. mcp_vector_search/core/database.py +39 -2
  48. mcp_vector_search/core/embeddings.py +24 -0
  49. mcp_vector_search/core/git.py +380 -0
  50. mcp_vector_search/core/indexer.py +445 -84
  51. mcp_vector_search/core/llm_client.py +9 -4
  52. mcp_vector_search/core/models.py +88 -1
  53. mcp_vector_search/core/relationships.py +473 -0
  54. mcp_vector_search/core/search.py +1 -1
  55. mcp_vector_search/mcp/server.py +795 -4
  56. mcp_vector_search/parsers/python.py +285 -5
  57. mcp_vector_search/utils/gitignore.py +0 -3
  58. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +3 -2
  59. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/RECORD +62 -39
  60. mcp_vector_search/cli/commands/visualize.py.original +0 -2536
  61. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +0 -0
  62. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +0 -0
  63. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
@@ -33,6 +33,12 @@ class ChunkMetrics:
33
33
  parameter_count: int = 0
34
34
  lines_of_code: int = 0
35
35
 
36
+ # Halstead metrics (Phase 4)
37
+ halstead_volume: float | None = None
38
+ halstead_difficulty: float | None = None
39
+ halstead_effort: float | None = None
40
+ halstead_bugs: float | None = None
41
+
36
42
  # Code smells detected
37
43
  smells: list[str] = field(default_factory=list)
38
44
 
@@ -87,7 +93,7 @@ class ChunkMetrics:
87
93
  """
88
94
  import json
89
95
 
90
- return {
96
+ metadata = {
91
97
  "cognitive_complexity": self.cognitive_complexity,
92
98
  "cyclomatic_complexity": self.cyclomatic_complexity,
93
99
  "max_nesting_depth": self.max_nesting_depth,
@@ -98,6 +104,61 @@ class ChunkMetrics:
98
104
  "smell_count": len(self.smells),
99
105
  }
100
106
 
107
+ # Add Halstead metrics if available
108
+ if self.halstead_volume is not None:
109
+ metadata["halstead_volume"] = self.halstead_volume
110
+ if self.halstead_difficulty is not None:
111
+ metadata["halstead_difficulty"] = self.halstead_difficulty
112
+ if self.halstead_effort is not None:
113
+ metadata["halstead_effort"] = self.halstead_effort
114
+ if self.halstead_bugs is not None:
115
+ metadata["halstead_bugs"] = self.halstead_bugs
116
+
117
+ return metadata
118
+
119
+
120
+ @dataclass
121
+ class CouplingMetrics:
122
+ """Coupling metrics for a file.
123
+
124
+ Tracks dependencies between files to measure coupling.
125
+
126
+ Attributes:
127
+ efferent_coupling: Number of files this file depends on (outgoing dependencies)
128
+ afferent_coupling: Number of files that depend on this file (incoming dependencies)
129
+ imports: List of all imported modules
130
+ internal_imports: Imports from same project
131
+ external_imports: Third-party and standard library imports
132
+ dependents: List of files that import this file
133
+ instability: Ratio Ce / (Ce + Ca), measures resistance to change (0-1)
134
+ """
135
+
136
+ efferent_coupling: int = 0 # Ce - outgoing dependencies
137
+ afferent_coupling: int = 0 # Ca - incoming dependencies
138
+ imports: list[str] = field(default_factory=list)
139
+ internal_imports: list[str] = field(default_factory=list)
140
+ external_imports: list[str] = field(default_factory=list)
141
+ dependents: list[str] = field(default_factory=list)
142
+
143
+ @property
144
+ def instability(self) -> float:
145
+ """Calculate instability metric (0-1).
146
+
147
+ Instability = Ce / (Ce + Ca)
148
+
149
+ Interpretation:
150
+ - 0.0: Maximally stable (many incoming, few outgoing)
151
+ - 0.5: Balanced (equal incoming and outgoing)
152
+ - 1.0: Maximally unstable (many outgoing, few incoming)
153
+
154
+ Returns:
155
+ Instability ratio from 0.0 to 1.0
156
+ """
157
+ total = self.efferent_coupling + self.afferent_coupling
158
+ if total == 0:
159
+ return 0.0
160
+ return self.efferent_coupling / total
161
+
101
162
 
102
163
  @dataclass
103
164
  class FileMetrics:
@@ -119,6 +180,7 @@ class FileMetrics:
119
180
  avg_complexity: Average cognitive complexity per chunk
120
181
  max_complexity: Maximum cognitive complexity in any chunk
121
182
  chunks: List of chunk metrics for each function/class
183
+ coupling: Coupling metrics for this file
122
184
  """
123
185
 
124
186
  file_path: str
@@ -136,9 +198,20 @@ class FileMetrics:
136
198
  avg_complexity: float = 0.0
137
199
  max_complexity: int = 0
138
200
 
201
+ # Coupling metrics (Phase 3)
202
+ efferent_coupling: int = 0 # Outgoing dependencies
203
+ imports: list[str] = field(default_factory=list) # All imported modules
204
+ internal_imports: list[str] = field(default_factory=list) # Same-project imports
205
+ external_imports: list[str] = field(
206
+ default_factory=list
207
+ ) # Third-party/stdlib imports
208
+
139
209
  # Chunk metrics for each function/class
140
210
  chunks: list[ChunkMetrics] = field(default_factory=list)
141
211
 
212
+ # Coupling metrics
213
+ coupling: CouplingMetrics = field(default_factory=CouplingMetrics)
214
+
142
215
  def compute_aggregates(self) -> None:
143
216
  """Compute aggregate metrics from chunk metrics.
144
217
 
@@ -1,5 +1,7 @@
1
1
  """Analysis reporters for outputting metrics in various formats."""
2
2
 
3
3
  from .console import ConsoleReporter
4
+ from .markdown import MarkdownReporter
5
+ from .sarif import SARIFReporter
4
6
 
5
- __all__ = ["ConsoleReporter"]
7
+ __all__ = ["ConsoleReporter", "MarkdownReporter", "SARIFReporter"]
@@ -148,6 +148,213 @@ class ConsoleReporter:
148
148
  console.print(table)
149
149
  console.print()
150
150
 
151
+ def print_smells(self, smells: list, top: int = 10) -> None:
152
+ """Print detected code smells.
153
+
154
+ Args:
155
+ smells: List of CodeSmell objects
156
+ top: Maximum number of smells to display
157
+ """
158
+ from ..collectors.smells import SmellSeverity
159
+
160
+ if not smells:
161
+ console.print("[bold]🔍 Code Smells[/bold]")
162
+ console.print(" No code smells detected!")
163
+ console.print()
164
+ return
165
+
166
+ console.print(
167
+ f"[bold]🔍 Code Smells Detected[/bold] - Found {len(smells)} issues"
168
+ )
169
+
170
+ # Group smells by severity
171
+ error_smells = [s for s in smells if s.severity == SmellSeverity.ERROR]
172
+ warning_smells = [s for s in smells if s.severity == SmellSeverity.WARNING]
173
+ info_smells = [s for s in smells if s.severity == SmellSeverity.INFO]
174
+
175
+ # Summary
176
+ console.print(
177
+ f" [red]Errors: {len(error_smells)}[/red] "
178
+ f"[yellow]Warnings: {len(warning_smells)}[/yellow] "
179
+ f"[blue]Info: {len(info_smells)}[/blue]"
180
+ )
181
+ console.print()
182
+
183
+ # Show top smells (prioritize errors first)
184
+ smells_to_display = error_smells + warning_smells + info_smells
185
+ smells_to_display = smells_to_display[:top]
186
+
187
+ # Create table
188
+ table = Table(show_header=True, header_style="bold cyan", box=None)
189
+ table.add_column("Severity", width=10)
190
+ table.add_column("Smell Type", width=20)
191
+ table.add_column("Location", width=40)
192
+ table.add_column("Details", width=30)
193
+
194
+ for smell in smells_to_display:
195
+ # Color code by severity
196
+ if smell.severity == SmellSeverity.ERROR:
197
+ severity_str = "[red]ERROR[/red]"
198
+ elif smell.severity == SmellSeverity.WARNING:
199
+ severity_str = "[yellow]WARNING[/yellow]"
200
+ else:
201
+ severity_str = "[blue]INFO[/blue]"
202
+
203
+ # Truncate location if too long
204
+ location = smell.location
205
+ if len(location) > 38:
206
+ location = "..." + location[-35:]
207
+
208
+ # Format details (metric value vs threshold)
209
+ details = f"{smell.metric_value} > {smell.threshold}"
210
+
211
+ table.add_row(severity_str, smell.name, location, details)
212
+
213
+ console.print(table)
214
+ console.print()
215
+
216
+ # Show suggestions for top smells
217
+ if smells_to_display:
218
+ console.print("[bold]💡 Top Suggestions[/bold]")
219
+ shown_suggestions = set()
220
+ suggestion_count = 0
221
+
222
+ for smell in smells_to_display:
223
+ if smell.suggestion and smell.suggestion not in shown_suggestions:
224
+ console.print(f" • [dim]{smell.suggestion}[/dim]")
225
+ shown_suggestions.add(smell.suggestion)
226
+ suggestion_count += 1
227
+
228
+ # Limit to 5 unique suggestions
229
+ if suggestion_count >= 5:
230
+ break
231
+
232
+ console.print()
233
+
234
+ def print_instability(self, metrics: ProjectMetrics, top: int = 10) -> None:
235
+ """Print instability metrics.
236
+
237
+ Args:
238
+ metrics: Project metrics
239
+ top: Number of top files to display
240
+ """
241
+ console.print("[bold]🔗 Instability Metrics[/bold]")
242
+
243
+ # Collect instability data from files with coupling metrics
244
+ files_with_coupling = [
245
+ (f.file_path, f.coupling)
246
+ for f in metrics.files.values()
247
+ if f.coupling.efferent_coupling + f.coupling.afferent_coupling > 0
248
+ ]
249
+
250
+ if not files_with_coupling:
251
+ console.print(" No coupling data available")
252
+ console.print()
253
+ return
254
+
255
+ # Compute instability distribution
256
+ stable_count = sum(1 for _, c in files_with_coupling if c.instability <= 0.3)
257
+ balanced_count = sum(
258
+ 1 for _, c in files_with_coupling if 0.3 < c.instability <= 0.7
259
+ )
260
+ unstable_count = sum(1 for _, c in files_with_coupling if c.instability > 0.7)
261
+ total_files = len(files_with_coupling)
262
+
263
+ console.print(f" Total Files: {total_files}")
264
+ console.print(
265
+ f" [green]Stable (I ≤ 0.3):[/green] {stable_count} "
266
+ f"({stable_count / total_files * 100:.1f}%)"
267
+ )
268
+ console.print(
269
+ f" [yellow]Balanced (0.3 < I ≤ 0.7):[/yellow] {balanced_count} "
270
+ f"({balanced_count / total_files * 100:.1f}%)"
271
+ )
272
+ console.print(
273
+ f" [red]Unstable (I > 0.7):[/red] {unstable_count} "
274
+ f"({unstable_count / total_files * 100:.1f}%)"
275
+ )
276
+ console.print()
277
+
278
+ # Show most stable files
279
+ most_stable = sorted(files_with_coupling, key=lambda x: x[1].instability)[:top]
280
+ if most_stable:
281
+ console.print(
282
+ f"[bold green]✓ Most Stable Files (Top {len(most_stable)})[/bold green]"
283
+ )
284
+
285
+ table = Table(show_header=True, header_style="bold cyan", box=None)
286
+ table.add_column("Rank", justify="right", width=6)
287
+ table.add_column("File", style="cyan", width=45)
288
+ table.add_column("Instability", justify="right", width=12)
289
+ table.add_column("Category", justify="center", width=12)
290
+ table.add_column("Ce/Ca", justify="right", width=10)
291
+
292
+ for rank, (file_path, coupling) in enumerate(most_stable, 1):
293
+ # Truncate file path if too long
294
+ display_path = file_path
295
+ if len(display_path) > 43:
296
+ display_path = "..." + display_path[-40:]
297
+
298
+ # Determine category color
299
+ if coupling.instability <= 0.3:
300
+ category = "[green]Stable[/green]"
301
+ elif coupling.instability <= 0.7:
302
+ category = "[yellow]Balanced[/yellow]"
303
+ else:
304
+ category = "[red]Unstable[/red]"
305
+
306
+ table.add_row(
307
+ f"{rank}",
308
+ display_path,
309
+ f"{coupling.instability:.3f}",
310
+ category,
311
+ f"{coupling.efferent_coupling}/{coupling.afferent_coupling}",
312
+ )
313
+
314
+ console.print(table)
315
+ console.print()
316
+
317
+ # Show most unstable files
318
+ most_unstable = sorted(
319
+ files_with_coupling, key=lambda x: x[1].instability, reverse=True
320
+ )[:top]
321
+ if most_unstable:
322
+ console.print(
323
+ f"[bold red]⚠️ Most Unstable Files (Top {len(most_unstable)})[/bold red]"
324
+ )
325
+
326
+ table = Table(show_header=True, header_style="bold cyan", box=None)
327
+ table.add_column("Rank", justify="right", width=6)
328
+ table.add_column("File", style="cyan", width=45)
329
+ table.add_column("Instability", justify="right", width=12)
330
+ table.add_column("Category", justify="center", width=12)
331
+ table.add_column("Ce/Ca", justify="right", width=10)
332
+
333
+ for rank, (file_path, coupling) in enumerate(most_unstable, 1):
334
+ # Truncate file path if too long
335
+ display_path = file_path
336
+ if len(display_path) > 43:
337
+ display_path = "..." + display_path[-40:]
338
+
339
+ # Determine category color
340
+ if coupling.instability <= 0.3:
341
+ category = "[green]Stable[/green]"
342
+ elif coupling.instability <= 0.7:
343
+ category = "[yellow]Balanced[/yellow]"
344
+ else:
345
+ category = "[red]Unstable[/red]"
346
+
347
+ table.add_row(
348
+ f"{rank}",
349
+ display_path,
350
+ f"{coupling.instability:.3f}",
351
+ category,
352
+ f"{coupling.efferent_coupling}/{coupling.afferent_coupling}",
353
+ )
354
+
355
+ console.print(table)
356
+ console.print()
357
+
151
358
  def print_recommendations(self, metrics: ProjectMetrics) -> None:
152
359
  """Print actionable recommendations.
153
360
 
@@ -187,6 +394,18 @@ class ConsoleReporter:
187
394
  f"[yellow]•[/yellow] {d_f_percentage:.1f}% of functions have D/F grades - aim to reduce this below 10%"
188
395
  )
189
396
 
397
+ # Check for highly unstable files (instability > 0.8)
398
+ highly_unstable = [
399
+ f
400
+ for f in metrics.files.values()
401
+ if f.coupling.instability > 0.8
402
+ and (f.coupling.efferent_coupling + f.coupling.afferent_coupling) > 0
403
+ ]
404
+ if highly_unstable:
405
+ recommendations.append(
406
+ f"[yellow]•[/yellow] {len(highly_unstable)} files have instability > 0.8 - consider reducing dependencies"
407
+ )
408
+
190
409
  # Check overall health
191
410
  avg_health = metrics._compute_avg_health_score()
192
411
  if avg_health < 0.7:
@@ -219,4 +438,209 @@ class ConsoleReporter:
219
438
  console.print(
220
439
  "[dim] • Focus refactoring efforts on Grade D and F functions first[/dim]"
221
440
  )
441
+ console.print(
442
+ "[dim] • Stable files (I ≤ 0.3) should contain abstractions and core logic[/dim]"
443
+ )
444
+ console.print(
445
+ "[dim] • Unstable files (I > 0.7) should contain concrete implementations[/dim]"
446
+ )
447
+ console.print()
448
+
449
+ def print_baseline_comparison(self, comparison_result) -> None:
450
+ """Print baseline comparison results.
451
+
452
+ Args:
453
+ comparison_result: ComparisonResult from BaselineComparator
454
+ """
455
+ console.print(
456
+ f"\n[bold blue]📊 Baseline Comparison[/bold blue] - vs {comparison_result.baseline_name}"
457
+ )
458
+ console.print("━" * 80)
459
+ console.print()
460
+
461
+ # Summary statistics
462
+ console.print("[bold]Summary[/bold]")
463
+ summary = comparison_result.summary
464
+ console.print(
465
+ f" Total Files Compared: {comparison_result.total_files_compared}"
466
+ )
467
+ console.print(
468
+ f" Files - Current: {summary.get('total_files_current', 0)} | "
469
+ f"Baseline: {summary.get('total_files_baseline', 0)}"
470
+ )
471
+ console.print(
472
+ f" Functions - Current: {summary.get('total_functions_current', 0)} | "
473
+ f"Baseline: {summary.get('total_functions_baseline', 0)}"
474
+ )
475
+ console.print()
476
+
477
+ # Change summary
478
+ console.print("[bold]Changes[/bold]")
479
+ console.print(
480
+ f" [red]Regressions:[/red] {len(comparison_result.regressions)} files"
481
+ )
482
+ console.print(
483
+ f" [green]Improvements:[/green] {len(comparison_result.improvements)} files"
484
+ )
485
+ console.print(
486
+ f" [dim]Unchanged:[/dim] {len(comparison_result.unchanged)} files"
487
+ )
488
+ console.print(
489
+ f" [blue]New Files:[/blue] {len(comparison_result.new_files)} files"
490
+ )
491
+ console.print(
492
+ f" [yellow]Deleted Files:[/yellow] {len(comparison_result.deleted_files)} files"
493
+ )
222
494
  console.print()
495
+
496
+ # Complexity metrics comparison
497
+ avg_cc_current = summary.get("avg_complexity_current", 0.0)
498
+ avg_cc_baseline = summary.get("avg_complexity_baseline", 0.0)
499
+ avg_cc_delta = avg_cc_current - avg_cc_baseline
500
+ avg_cc_pct = (
501
+ (avg_cc_delta / avg_cc_baseline * 100) if avg_cc_baseline > 0 else 0.0
502
+ )
503
+
504
+ max_cc_current = summary.get("max_complexity_current", 0)
505
+ max_cc_baseline = summary.get("max_complexity_baseline", 0)
506
+ max_cc_delta = max_cc_current - max_cc_baseline
507
+
508
+ console.print("[bold]Complexity Metrics[/bold]")
509
+
510
+ # Average complexity with color coding
511
+ delta_color = (
512
+ "red" if avg_cc_delta > 0 else "green" if avg_cc_delta < 0 else "dim"
513
+ )
514
+ delta_sign = "+" if avg_cc_delta > 0 else ""
515
+ console.print(
516
+ f" Avg Complexity: {avg_cc_current:.2f} "
517
+ f"(baseline: {avg_cc_baseline:.2f}, "
518
+ f"[{delta_color}]{delta_sign}{avg_cc_delta:.2f} / {delta_sign}{avg_cc_pct:.1f}%[/{delta_color}])"
519
+ )
520
+
521
+ # Max complexity with color coding
522
+ max_delta_color = (
523
+ "red" if max_cc_delta > 0 else "green" if max_cc_delta < 0 else "dim"
524
+ )
525
+ max_delta_sign = "+" if max_cc_delta > 0 else ""
526
+ console.print(
527
+ f" Max Complexity: {max_cc_current} "
528
+ f"(baseline: {max_cc_baseline}, "
529
+ f"[{max_delta_color}]{max_delta_sign}{max_cc_delta}[/{max_delta_color}])"
530
+ )
531
+ console.print()
532
+
533
+ # Show regressions
534
+ if comparison_result.regressions:
535
+ console.print(
536
+ f"[bold red]⚠️ Regressions ({len(comparison_result.regressions)} files)[/bold red]"
537
+ )
538
+
539
+ # Show top 10 regressions
540
+ top_regressions = comparison_result.regressions[:10]
541
+
542
+ table = Table(show_header=True, header_style="bold cyan", box=None)
543
+ table.add_column("File", style="cyan", width=45)
544
+ table.add_column("Metric", width=20)
545
+ table.add_column("Change", justify="right", width=15)
546
+
547
+ for file_comp in top_regressions:
548
+ # Truncate file path
549
+ file_path = file_comp.file_path
550
+ if len(file_path) > 43:
551
+ file_path = "..." + file_path[-40:]
552
+
553
+ # Show worst regression metric for this file
554
+ regression_changes = [
555
+ c for c in file_comp.metric_changes if c.is_regression
556
+ ]
557
+ if regression_changes:
558
+ worst_change = max(
559
+ regression_changes, key=lambda c: abs(c.percentage_delta)
560
+ )
561
+ table.add_row(
562
+ file_path,
563
+ worst_change.metric_name.replace("_", " ").title(),
564
+ f"+{worst_change.percentage_delta:.1f}%",
565
+ )
566
+
567
+ console.print(table)
568
+
569
+ if len(comparison_result.regressions) > 10:
570
+ console.print(
571
+ f" [dim]... and {len(comparison_result.regressions) - 10} more[/dim]"
572
+ )
573
+ console.print()
574
+
575
+ # Show improvements
576
+ if comparison_result.improvements:
577
+ console.print(
578
+ f"[bold green]✓ Improvements ({len(comparison_result.improvements)} files)[/bold green]"
579
+ )
580
+
581
+ # Show top 10 improvements
582
+ top_improvements = comparison_result.improvements[:10]
583
+
584
+ table = Table(show_header=True, header_style="bold cyan", box=None)
585
+ table.add_column("File", style="cyan", width=45)
586
+ table.add_column("Metric", width=20)
587
+ table.add_column("Change", justify="right", width=15)
588
+
589
+ for file_comp in top_improvements:
590
+ # Truncate file path
591
+ file_path = file_comp.file_path
592
+ if len(file_path) > 43:
593
+ file_path = "..." + file_path[-40:]
594
+
595
+ # Show best improvement metric for this file
596
+ improvement_changes = [
597
+ c for c in file_comp.metric_changes if c.is_improvement
598
+ ]
599
+ if improvement_changes:
600
+ best_change = max(
601
+ improvement_changes, key=lambda c: abs(c.percentage_delta)
602
+ )
603
+ table.add_row(
604
+ file_path,
605
+ best_change.metric_name.replace("_", " ").title(),
606
+ f"{best_change.percentage_delta:.1f}%",
607
+ )
608
+
609
+ console.print(table)
610
+
611
+ if len(comparison_result.improvements) > 10:
612
+ console.print(
613
+ f" [dim]... and {len(comparison_result.improvements) - 10} more[/dim]"
614
+ )
615
+ console.print()
616
+
617
+ # Show new/deleted files summary
618
+ if comparison_result.new_files:
619
+ console.print(
620
+ f"[bold blue]📄 New Files ({len(comparison_result.new_files)})[/bold blue]"
621
+ )
622
+ for file_comp in comparison_result.new_files[:5]:
623
+ file_path = file_comp.file_path
624
+ if len(file_path) > 70:
625
+ file_path = "..." + file_path[-67:]
626
+ console.print(f" • {file_path}")
627
+ if len(comparison_result.new_files) > 5:
628
+ console.print(
629
+ f" [dim]... and {len(comparison_result.new_files) - 5} more[/dim]"
630
+ )
631
+ console.print()
632
+
633
+ if comparison_result.deleted_files:
634
+ console.print(
635
+ f"[bold yellow]🗑 Deleted Files ({len(comparison_result.deleted_files)})[/bold yellow]"
636
+ )
637
+ for file_comp in comparison_result.deleted_files[:5]:
638
+ file_path = file_comp.file_path
639
+ if len(file_path) > 70:
640
+ file_path = "..." + file_path[-67:]
641
+ console.print(f" • {file_path}")
642
+ if len(comparison_result.deleted_files) > 5:
643
+ console.print(
644
+ f" [dim]... and {len(comparison_result.deleted_files) - 5} more[/dim]"
645
+ )
646
+ console.print()