mcp-vector-search 1.0.3__py3-none-any.whl → 1.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. mcp_vector_search/__init__.py +3 -3
  2. mcp_vector_search/analysis/__init__.py +48 -1
  3. mcp_vector_search/analysis/baseline/__init__.py +68 -0
  4. mcp_vector_search/analysis/baseline/comparator.py +462 -0
  5. mcp_vector_search/analysis/baseline/manager.py +621 -0
  6. mcp_vector_search/analysis/collectors/__init__.py +35 -0
  7. mcp_vector_search/analysis/collectors/cohesion.py +463 -0
  8. mcp_vector_search/analysis/collectors/coupling.py +1162 -0
  9. mcp_vector_search/analysis/collectors/halstead.py +514 -0
  10. mcp_vector_search/analysis/collectors/smells.py +325 -0
  11. mcp_vector_search/analysis/debt.py +516 -0
  12. mcp_vector_search/analysis/interpretation.py +685 -0
  13. mcp_vector_search/analysis/metrics.py +74 -1
  14. mcp_vector_search/analysis/reporters/__init__.py +3 -1
  15. mcp_vector_search/analysis/reporters/console.py +424 -0
  16. mcp_vector_search/analysis/reporters/markdown.py +480 -0
  17. mcp_vector_search/analysis/reporters/sarif.py +377 -0
  18. mcp_vector_search/analysis/storage/__init__.py +93 -0
  19. mcp_vector_search/analysis/storage/metrics_store.py +762 -0
  20. mcp_vector_search/analysis/storage/schema.py +245 -0
  21. mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
  22. mcp_vector_search/analysis/trends.py +308 -0
  23. mcp_vector_search/analysis/visualizer/__init__.py +90 -0
  24. mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
  25. mcp_vector_search/analysis/visualizer/exporter.py +484 -0
  26. mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
  27. mcp_vector_search/analysis/visualizer/schemas.py +525 -0
  28. mcp_vector_search/cli/commands/analyze.py +665 -11
  29. mcp_vector_search/cli/commands/chat.py +193 -0
  30. mcp_vector_search/cli/commands/index.py +600 -2
  31. mcp_vector_search/cli/commands/index_background.py +467 -0
  32. mcp_vector_search/cli/commands/search.py +194 -1
  33. mcp_vector_search/cli/commands/setup.py +64 -13
  34. mcp_vector_search/cli/commands/status.py +302 -3
  35. mcp_vector_search/cli/commands/visualize/cli.py +26 -10
  36. mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +8 -4
  37. mcp_vector_search/cli/commands/visualize/graph_builder.py +167 -234
  38. mcp_vector_search/cli/commands/visualize/server.py +304 -15
  39. mcp_vector_search/cli/commands/visualize/templates/base.py +60 -6
  40. mcp_vector_search/cli/commands/visualize/templates/scripts.py +2100 -65
  41. mcp_vector_search/cli/commands/visualize/templates/styles.py +1297 -88
  42. mcp_vector_search/cli/didyoumean.py +5 -0
  43. mcp_vector_search/cli/main.py +16 -5
  44. mcp_vector_search/cli/output.py +134 -5
  45. mcp_vector_search/config/thresholds.py +89 -1
  46. mcp_vector_search/core/__init__.py +16 -0
  47. mcp_vector_search/core/database.py +39 -2
  48. mcp_vector_search/core/embeddings.py +24 -0
  49. mcp_vector_search/core/git.py +380 -0
  50. mcp_vector_search/core/indexer.py +445 -84
  51. mcp_vector_search/core/llm_client.py +9 -4
  52. mcp_vector_search/core/models.py +88 -1
  53. mcp_vector_search/core/relationships.py +473 -0
  54. mcp_vector_search/core/search.py +1 -1
  55. mcp_vector_search/mcp/server.py +795 -4
  56. mcp_vector_search/parsers/python.py +285 -5
  57. mcp_vector_search/utils/gitignore.py +0 -3
  58. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +3 -2
  59. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/RECORD +62 -39
  60. mcp_vector_search/cli/commands/visualize.py.original +0 -2536
  61. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +0 -0
  62. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +0 -0
  63. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,560 @@
1
+ """Trend tracking and regression detection for metrics over time.
2
+
3
+ This module provides the TrendTracker class for analyzing metric trends,
4
+ identifying regressions and improvements, and generating alerts.
5
+
6
+ Design Decisions:
7
+ Threshold Strategy: Configurable percentage-based thresholds
8
+ - Default 5% change considered "significant"
9
+ - Users can customize via constructor parameter
10
+ - Percentage-based to normalize across different metric scales
11
+
12
+ Trend Direction: Three states (Improving/Worsening/Stable)
13
+ - Based on statistical significance of change
14
+ - Uses first/last snapshot comparison
15
+ - Requires minimum 2 snapshots for trend analysis
16
+
17
+ Alert Generation: Proactive identification of issues
18
+ - Regression alerts: Metrics that worsened significantly
19
+ - Improvement alerts: Metrics that improved significantly
20
+ - Per-file and project-level analysis
21
+
22
+ Performance:
23
+ - get_trends: O(n) where n=snapshots, typically <100ms for 30-day period
24
+ - get_regression_alerts: O(m) where m=files in snapshots, <50ms
25
+ - calculate_trend_direction: O(1), instant
26
+
27
+ Error Handling:
28
+ - ValueError: Invalid threshold or days parameter
29
+ - Logs warnings when insufficient data for analysis
30
+ - Returns empty results rather than raising exceptions
31
+
32
+ Example:
33
+ >>> tracker = TrendTracker(metrics_store, threshold_percentage=5.0)
34
+ >>> trends = tracker.get_trends("/path/to/project", days=30)
35
+ >>> if trends.complexity_trend == TrendDirection.WORSENING:
36
+ ... print("Complexity is trending up!")
37
+ >>> alerts = tracker.get_regression_alerts("/path/to/project")
38
+ """
39
+
40
+ from __future__ import annotations
41
+
42
+ from dataclasses import dataclass, field
43
+ from datetime import datetime
44
+ from enum import Enum
45
+ from pathlib import Path
46
+ from typing import TYPE_CHECKING
47
+
48
+ from loguru import logger
49
+
50
+ if TYPE_CHECKING:
51
+ from .metrics_store import MetricsStore, ProjectSnapshot
52
+
53
+
54
+ class TrendDirection(Enum):
55
+ """Direction of metric trend over time."""
56
+
57
+ IMPROVING = "improving" # Complexity/smells decreasing
58
+ WORSENING = "worsening" # Complexity/smells increasing
59
+ STABLE = "stable" # No significant change
60
+
61
+
62
+ @dataclass
63
+ class FileRegression:
64
+ """Details of a file-level regression.
65
+
66
+ Attributes:
67
+ file_path: Path to the file with regression
68
+ metric_name: Name of metric that regressed (e.g., "avg_complexity")
69
+ old_value: Previous metric value
70
+ new_value: Current metric value
71
+ change_percentage: Percentage change (positive = worse)
72
+ timestamp: When regression was detected
73
+ """
74
+
75
+ file_path: str
76
+ metric_name: str
77
+ old_value: float
78
+ new_value: float
79
+ change_percentage: float
80
+ timestamp: datetime
81
+
82
+
83
+ @dataclass
84
+ class TrendData:
85
+ """Trend analysis data over time period.
86
+
87
+ Extends the basic TrendData from metrics_store with additional
88
+ analysis fields for alerts and trend directions.
89
+
90
+ Attributes:
91
+ project_path: Project being analyzed
92
+ period_days: Number of days in trend period
93
+ snapshots: List of snapshots in chronological order
94
+ complexity_trend: List of (timestamp, avg_complexity) tuples
95
+ smell_trend: List of (timestamp, total_smells) tuples
96
+ health_trend: List of (timestamp, avg_health_score) tuples
97
+ change_rate: Average daily change in complexity
98
+
99
+ # Enhanced trend analysis fields
100
+ complexity_direction: Improving/Worsening/Stable for complexity
101
+ smell_direction: Improving/Worsening/Stable for code smells
102
+ health_direction: Improving/Worsening/Stable for health score
103
+ critical_regressions: List of files with significant worsening
104
+ significant_improvements: List of files with significant improvements
105
+ avg_complexity_change: Total percentage change in avg complexity
106
+ smell_count_change: Total percentage change in smell count
107
+ """
108
+
109
+ project_path: str
110
+ period_days: int
111
+ snapshots: list[ProjectSnapshot]
112
+ complexity_trend: list[tuple[datetime, float]]
113
+ smell_trend: list[tuple[datetime, int]]
114
+ health_trend: list[tuple[datetime, float]]
115
+ change_rate: float
116
+
117
+ # Enhanced fields for trend analysis
118
+ complexity_direction: TrendDirection = TrendDirection.STABLE
119
+ smell_direction: TrendDirection = TrendDirection.STABLE
120
+ health_direction: TrendDirection = TrendDirection.STABLE
121
+ critical_regressions: list[FileRegression] = field(default_factory=list)
122
+ significant_improvements: list[FileRegression] = field(default_factory=list)
123
+ avg_complexity_change: float = 0.0
124
+ smell_count_change: float = 0.0
125
+
126
+ @property
127
+ def improving(self) -> bool:
128
+ """Check if trends are improving overall.
129
+
130
+ Returns:
131
+ True if complexity is decreasing AND health is improving
132
+ """
133
+ return (
134
+ self.complexity_direction == TrendDirection.IMPROVING
135
+ and self.health_direction == TrendDirection.IMPROVING
136
+ )
137
+
138
+ @property
139
+ def has_regressions(self) -> bool:
140
+ """Check if there are critical regressions.
141
+
142
+ Returns:
143
+ True if any critical regressions detected
144
+ """
145
+ return len(self.critical_regressions) > 0
146
+
147
+
148
+ class TrendTracker:
149
+ """Track metric trends and detect regressions/improvements over time.
150
+
151
+ This class provides comprehensive trend analysis capabilities:
152
+ - Analyze metric trends over configurable time periods
153
+ - Identify significant regressions (metrics worsening)
154
+ - Identify significant improvements (metrics improving)
155
+ - Calculate trend directions (improving/worsening/stable)
156
+
157
+ Thread Safety:
158
+ - Safe for single-threaded CLI usage
159
+ - Reads from MetricsStore (which handles its own locking)
160
+
161
+ Example:
162
+ >>> store = MetricsStore()
163
+ >>> tracker = TrendTracker(store, threshold_percentage=5.0)
164
+ >>>
165
+ >>> # Get 30-day trend analysis
166
+ >>> trends = tracker.get_trends("/path/to/project", days=30)
167
+ >>>
168
+ >>> # Check for regressions
169
+ >>> if trends.has_regressions:
170
+ ... print(f"Found {len(trends.critical_regressions)} regressions!")
171
+ >>>
172
+ >>> # Get specific regression alerts
173
+ >>> alerts = tracker.get_regression_alerts("/path/to/project")
174
+ """
175
+
176
+ def __init__(
177
+ self, metrics_store: MetricsStore, threshold_percentage: float = 5.0
178
+ ) -> None:
179
+ """Initialize trend tracker.
180
+
181
+ Args:
182
+ metrics_store: MetricsStore instance for accessing historical data
183
+ threshold_percentage: Percentage change to consider "significant" (default: 5%)
184
+
185
+ Raises:
186
+ ValueError: If threshold_percentage is negative or > 100
187
+ """
188
+ if threshold_percentage < 0 or threshold_percentage > 100:
189
+ raise ValueError(
190
+ f"threshold_percentage must be between 0 and 100, got {threshold_percentage}"
191
+ )
192
+
193
+ self.store = metrics_store
194
+ self.threshold = threshold_percentage / 100.0 # Convert to decimal
195
+
196
+ logger.debug(f"Initialized TrendTracker with threshold {threshold_percentage}%")
197
+
198
+ def get_trends(self, project_path: str | Path, days: int = 30) -> TrendData:
199
+ """Get comprehensive trend analysis over time period.
200
+
201
+ Analyzes all available metrics over the specified time period and
202
+ identifies trends, regressions, and improvements.
203
+
204
+ Args:
205
+ project_path: Path to project root
206
+ days: Number of days to analyze (from now backwards)
207
+
208
+ Returns:
209
+ TrendData with comprehensive trend analysis
210
+
211
+ Raises:
212
+ ValueError: If days <= 0
213
+
214
+ Performance: O(n) where n=snapshots, typically <100ms
215
+ """
216
+ if days <= 0:
217
+ raise ValueError(f"days must be positive, got {days}")
218
+
219
+ project_path_str = str(Path(project_path).resolve())
220
+
221
+ # Get base trend data from store
222
+ base_trends = self.store.get_trends(project_path_str, days=days)
223
+
224
+ # If insufficient data, return basic trends
225
+ if len(base_trends.snapshots) < 2:
226
+ logger.warning(
227
+ f"Insufficient snapshots ({len(base_trends.snapshots)}) for trend analysis"
228
+ )
229
+ return TrendData(
230
+ project_path=base_trends.project_path,
231
+ period_days=base_trends.period_days,
232
+ snapshots=base_trends.snapshots,
233
+ complexity_trend=base_trends.complexity_trend,
234
+ smell_trend=base_trends.smell_trend,
235
+ health_trend=base_trends.health_trend,
236
+ change_rate=base_trends.change_rate,
237
+ )
238
+
239
+ # Calculate trend directions
240
+ complexity_direction = self.calculate_trend_direction(
241
+ base_trends.complexity_trend
242
+ )
243
+ smell_direction = self.calculate_trend_direction(
244
+ base_trends.smell_trend, lower_is_better=True
245
+ )
246
+ health_direction = self.calculate_trend_direction(
247
+ base_trends.health_trend, lower_is_better=False
248
+ )
249
+
250
+ # Calculate percentage changes
251
+ first_snapshot = base_trends.snapshots[0]
252
+ last_snapshot = base_trends.snapshots[-1]
253
+
254
+ avg_complexity_change = self._calculate_percentage_change(
255
+ first_snapshot.avg_complexity, last_snapshot.avg_complexity
256
+ )
257
+
258
+ smell_count_change = self._calculate_percentage_change(
259
+ first_snapshot.total_smells, last_snapshot.total_smells
260
+ )
261
+
262
+ # Get regressions and improvements
263
+ regressions = self._find_regressions(base_trends.snapshots)
264
+ improvements = self._find_improvements(base_trends.snapshots)
265
+
266
+ logger.info(
267
+ f"Analyzed trends for {project_path_str}: "
268
+ f"{len(base_trends.snapshots)} snapshots, "
269
+ f"complexity {complexity_direction.value}, "
270
+ f"{len(regressions)} regressions, "
271
+ f"{len(improvements)} improvements"
272
+ )
273
+
274
+ return TrendData(
275
+ project_path=base_trends.project_path,
276
+ period_days=base_trends.period_days,
277
+ snapshots=base_trends.snapshots,
278
+ complexity_trend=base_trends.complexity_trend,
279
+ smell_trend=base_trends.smell_trend,
280
+ health_trend=base_trends.health_trend,
281
+ change_rate=base_trends.change_rate,
282
+ complexity_direction=complexity_direction,
283
+ smell_direction=smell_direction,
284
+ health_direction=health_direction,
285
+ critical_regressions=regressions,
286
+ significant_improvements=improvements,
287
+ avg_complexity_change=avg_complexity_change,
288
+ smell_count_change=smell_count_change,
289
+ )
290
+
291
+ def get_regression_alerts(
292
+ self, project_path: str | Path, days: int = 30
293
+ ) -> list[FileRegression]:
294
+ """Identify metrics that worsened significantly.
295
+
296
+ Returns list of FileRegression objects for all metrics that
297
+ exceeded the threshold for worsening.
298
+
299
+ Args:
300
+ project_path: Path to project root
301
+ days: Number of days to analyze (default: 30)
302
+
303
+ Returns:
304
+ List of FileRegression objects (empty if no regressions)
305
+
306
+ Performance: O(m) where m=files in snapshots, typically <50ms
307
+ """
308
+ trends = self.get_trends(project_path, days=days)
309
+ return trends.critical_regressions
310
+
311
+ def get_improvement_alerts(
312
+ self, project_path: str | Path, days: int = 30
313
+ ) -> list[FileRegression]:
314
+ """Identify metrics that improved significantly.
315
+
316
+ Returns list of FileRegression objects (reused for improvements)
317
+ for all metrics that exceeded the threshold for improvement.
318
+
319
+ Args:
320
+ project_path: Path to project root
321
+ days: Number of days to analyze (default: 30)
322
+
323
+ Returns:
324
+ List of FileRegression objects (empty if no improvements)
325
+
326
+ Performance: O(m) where m=files in snapshots, typically <50ms
327
+ """
328
+ trends = self.get_trends(project_path, days=days)
329
+ return trends.significant_improvements
330
+
331
+ def calculate_trend_direction(
332
+ self,
333
+ trend_data: list[tuple[datetime, float | int]],
334
+ lower_is_better: bool = True,
335
+ ) -> TrendDirection:
336
+ """Determine if metric is improving, worsening, or stable.
337
+
338
+ Compares first and last values in trend data to determine direction.
339
+ Uses threshold to determine if change is significant.
340
+
341
+ Args:
342
+ trend_data: List of (timestamp, value) tuples
343
+ lower_is_better: If True, decreasing values = improving (default)
344
+ If False, increasing values = improving
345
+
346
+ Returns:
347
+ TrendDirection enum value
348
+
349
+ Performance: O(1), instant
350
+
351
+ Example:
352
+ >>> trend = [(t1, 10.0), (t2, 15.0), (t3, 20.0)]
353
+ >>> direction = tracker.calculate_trend_direction(trend)
354
+ >>> # Returns WORSENING (increasing complexity is bad)
355
+ """
356
+ if len(trend_data) < 2:
357
+ logger.debug("Insufficient data for trend direction calculation")
358
+ return TrendDirection.STABLE
359
+
360
+ first_value = float(trend_data[0][1])
361
+ last_value = float(trend_data[-1][1])
362
+
363
+ # Calculate percentage change
364
+ if first_value == 0:
365
+ # Avoid division by zero
366
+ if last_value > 0:
367
+ percentage_change = 1.0 # 100% increase
368
+ else:
369
+ percentage_change = 0.0
370
+ else:
371
+ percentage_change = abs(last_value - first_value) / first_value
372
+
373
+ # Check if change is significant
374
+ if percentage_change < self.threshold:
375
+ return TrendDirection.STABLE
376
+
377
+ # Determine direction based on whether lower is better
378
+ if lower_is_better:
379
+ # For complexity/smells: lower = better
380
+ if last_value < first_value:
381
+ return TrendDirection.IMPROVING
382
+ else:
383
+ return TrendDirection.WORSENING
384
+ else:
385
+ # For health score: higher = better
386
+ if last_value > first_value:
387
+ return TrendDirection.IMPROVING
388
+ else:
389
+ return TrendDirection.WORSENING
390
+
391
+ def _calculate_percentage_change(self, old_value: float, new_value: float) -> float:
392
+ """Calculate percentage change between two values.
393
+
394
+ Args:
395
+ old_value: Previous value
396
+ new_value: Current value
397
+
398
+ Returns:
399
+ Percentage change (positive = increase, negative = decrease)
400
+ """
401
+ if old_value == 0:
402
+ if new_value > 0:
403
+ return 100.0 # 100% increase from zero
404
+ else:
405
+ return 0.0
406
+ return ((new_value - old_value) / old_value) * 100.0
407
+
408
+ def _find_regressions(
409
+ self, snapshots: list[ProjectSnapshot]
410
+ ) -> list[FileRegression]:
411
+ """Find file-level regressions between first and last snapshot.
412
+
413
+ Compares per-file metrics between oldest and newest snapshot
414
+ to identify files that significantly worsened.
415
+
416
+ Args:
417
+ snapshots: List of project snapshots (chronologically ordered)
418
+
419
+ Returns:
420
+ List of FileRegression objects for files that worsened
421
+ """
422
+ if len(snapshots) < 2:
423
+ return []
424
+
425
+ first_snapshot = snapshots[0]
426
+ last_snapshot = snapshots[-1]
427
+
428
+ # Note: File-level metrics would require per-file tracking
429
+ # For now, we'll create project-level regression if metrics worsened
430
+
431
+ regressions: list[FileRegression] = []
432
+
433
+ # Check avg complexity regression
434
+ complexity_change = self._calculate_percentage_change(
435
+ first_snapshot.avg_complexity, last_snapshot.avg_complexity
436
+ )
437
+ if complexity_change > (self.threshold * 100):
438
+ regressions.append(
439
+ FileRegression(
440
+ file_path="PROJECT_OVERALL",
441
+ metric_name="avg_complexity",
442
+ old_value=first_snapshot.avg_complexity,
443
+ new_value=last_snapshot.avg_complexity,
444
+ change_percentage=complexity_change,
445
+ timestamp=last_snapshot.timestamp,
446
+ )
447
+ )
448
+
449
+ # Check smell count regression
450
+ smell_change = self._calculate_percentage_change(
451
+ first_snapshot.total_smells, last_snapshot.total_smells
452
+ )
453
+ if smell_change > (self.threshold * 100):
454
+ regressions.append(
455
+ FileRegression(
456
+ file_path="PROJECT_OVERALL",
457
+ metric_name="total_smells",
458
+ old_value=float(first_snapshot.total_smells),
459
+ new_value=float(last_snapshot.total_smells),
460
+ change_percentage=smell_change,
461
+ timestamp=last_snapshot.timestamp,
462
+ )
463
+ )
464
+
465
+ # Check health score regression (lower = worse)
466
+ # For health: decrease is bad, so absolute decrease > threshold is regression
467
+ if first_snapshot.avg_health_score > 0: # Avoid division by zero
468
+ health_change = self._calculate_percentage_change(
469
+ first_snapshot.avg_health_score, last_snapshot.avg_health_score
470
+ )
471
+ if last_snapshot.avg_health_score < first_snapshot.avg_health_score:
472
+ # Health decreased - check if change exceeds threshold
473
+ if abs(health_change) > (self.threshold * 100):
474
+ regressions.append(
475
+ FileRegression(
476
+ file_path="PROJECT_OVERALL",
477
+ metric_name="avg_health_score",
478
+ old_value=first_snapshot.avg_health_score,
479
+ new_value=last_snapshot.avg_health_score,
480
+ change_percentage=health_change,
481
+ timestamp=last_snapshot.timestamp,
482
+ )
483
+ )
484
+
485
+ return regressions
486
+
487
+ def _find_improvements(
488
+ self, snapshots: list[ProjectSnapshot]
489
+ ) -> list[FileRegression]:
490
+ """Find file-level improvements between first and last snapshot.
491
+
492
+ Compares per-file metrics between oldest and newest snapshot
493
+ to identify files that significantly improved.
494
+
495
+ Args:
496
+ snapshots: List of project snapshots (chronologically ordered)
497
+
498
+ Returns:
499
+ List of FileRegression objects for files that improved
500
+ """
501
+ if len(snapshots) < 2:
502
+ return []
503
+
504
+ first_snapshot = snapshots[0]
505
+ last_snapshot = snapshots[-1]
506
+
507
+ improvements: list[FileRegression] = []
508
+
509
+ # Check avg complexity improvement
510
+ complexity_change = self._calculate_percentage_change(
511
+ first_snapshot.avg_complexity, last_snapshot.avg_complexity
512
+ )
513
+ if complexity_change < -(self.threshold * 100): # Negative = better
514
+ improvements.append(
515
+ FileRegression(
516
+ file_path="PROJECT_OVERALL",
517
+ metric_name="avg_complexity",
518
+ old_value=first_snapshot.avg_complexity,
519
+ new_value=last_snapshot.avg_complexity,
520
+ change_percentage=complexity_change,
521
+ timestamp=last_snapshot.timestamp,
522
+ )
523
+ )
524
+
525
+ # Check smell count improvement
526
+ smell_change = self._calculate_percentage_change(
527
+ first_snapshot.total_smells, last_snapshot.total_smells
528
+ )
529
+ if smell_change < -(self.threshold * 100): # Negative = better
530
+ improvements.append(
531
+ FileRegression(
532
+ file_path="PROJECT_OVERALL",
533
+ metric_name="total_smells",
534
+ old_value=float(first_snapshot.total_smells),
535
+ new_value=float(last_snapshot.total_smells),
536
+ change_percentage=smell_change,
537
+ timestamp=last_snapshot.timestamp,
538
+ )
539
+ )
540
+
541
+ # Check health score improvement (higher = better)
542
+ if first_snapshot.avg_health_score > 0: # Avoid division by zero
543
+ health_change = self._calculate_percentage_change(
544
+ first_snapshot.avg_health_score, last_snapshot.avg_health_score
545
+ )
546
+ if last_snapshot.avg_health_score > first_snapshot.avg_health_score:
547
+ # Health increased - check if change exceeds threshold
548
+ if abs(health_change) > (self.threshold * 100):
549
+ improvements.append(
550
+ FileRegression(
551
+ file_path="PROJECT_OVERALL",
552
+ metric_name="avg_health_score",
553
+ old_value=first_snapshot.avg_health_score,
554
+ new_value=last_snapshot.avg_health_score,
555
+ change_percentage=health_change,
556
+ timestamp=last_snapshot.timestamp,
557
+ )
558
+ )
559
+
560
+ return improvements