mcp-vector-search 0.12.6__py3-none-any.whl → 1.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_vector_search/__init__.py +3 -3
- mcp_vector_search/analysis/__init__.py +111 -0
- mcp_vector_search/analysis/baseline/__init__.py +68 -0
- mcp_vector_search/analysis/baseline/comparator.py +462 -0
- mcp_vector_search/analysis/baseline/manager.py +621 -0
- mcp_vector_search/analysis/collectors/__init__.py +74 -0
- mcp_vector_search/analysis/collectors/base.py +164 -0
- mcp_vector_search/analysis/collectors/cohesion.py +463 -0
- mcp_vector_search/analysis/collectors/complexity.py +743 -0
- mcp_vector_search/analysis/collectors/coupling.py +1162 -0
- mcp_vector_search/analysis/collectors/halstead.py +514 -0
- mcp_vector_search/analysis/collectors/smells.py +325 -0
- mcp_vector_search/analysis/debt.py +516 -0
- mcp_vector_search/analysis/interpretation.py +685 -0
- mcp_vector_search/analysis/metrics.py +414 -0
- mcp_vector_search/analysis/reporters/__init__.py +7 -0
- mcp_vector_search/analysis/reporters/console.py +646 -0
- mcp_vector_search/analysis/reporters/markdown.py +480 -0
- mcp_vector_search/analysis/reporters/sarif.py +377 -0
- mcp_vector_search/analysis/storage/__init__.py +93 -0
- mcp_vector_search/analysis/storage/metrics_store.py +762 -0
- mcp_vector_search/analysis/storage/schema.py +245 -0
- mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
- mcp_vector_search/analysis/trends.py +308 -0
- mcp_vector_search/analysis/visualizer/__init__.py +90 -0
- mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
- mcp_vector_search/analysis/visualizer/exporter.py +484 -0
- mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
- mcp_vector_search/analysis/visualizer/schemas.py +525 -0
- mcp_vector_search/cli/commands/analyze.py +1062 -0
- mcp_vector_search/cli/commands/chat.py +1455 -0
- mcp_vector_search/cli/commands/index.py +621 -5
- mcp_vector_search/cli/commands/index_background.py +467 -0
- mcp_vector_search/cli/commands/init.py +13 -0
- mcp_vector_search/cli/commands/install.py +597 -335
- mcp_vector_search/cli/commands/install_old.py +8 -4
- mcp_vector_search/cli/commands/mcp.py +78 -6
- mcp_vector_search/cli/commands/reset.py +68 -26
- mcp_vector_search/cli/commands/search.py +224 -8
- mcp_vector_search/cli/commands/setup.py +1184 -0
- mcp_vector_search/cli/commands/status.py +339 -5
- mcp_vector_search/cli/commands/uninstall.py +276 -357
- mcp_vector_search/cli/commands/visualize/__init__.py +39 -0
- mcp_vector_search/cli/commands/visualize/cli.py +292 -0
- mcp_vector_search/cli/commands/visualize/exporters/__init__.py +12 -0
- mcp_vector_search/cli/commands/visualize/exporters/html_exporter.py +33 -0
- mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +33 -0
- mcp_vector_search/cli/commands/visualize/graph_builder.py +647 -0
- mcp_vector_search/cli/commands/visualize/layout_engine.py +469 -0
- mcp_vector_search/cli/commands/visualize/server.py +600 -0
- mcp_vector_search/cli/commands/visualize/state_manager.py +428 -0
- mcp_vector_search/cli/commands/visualize/templates/__init__.py +16 -0
- mcp_vector_search/cli/commands/visualize/templates/base.py +234 -0
- mcp_vector_search/cli/commands/visualize/templates/scripts.py +4542 -0
- mcp_vector_search/cli/commands/visualize/templates/styles.py +2522 -0
- mcp_vector_search/cli/didyoumean.py +27 -2
- mcp_vector_search/cli/main.py +127 -160
- mcp_vector_search/cli/output.py +158 -13
- mcp_vector_search/config/__init__.py +4 -0
- mcp_vector_search/config/default_thresholds.yaml +52 -0
- mcp_vector_search/config/settings.py +12 -0
- mcp_vector_search/config/thresholds.py +273 -0
- mcp_vector_search/core/__init__.py +16 -0
- mcp_vector_search/core/auto_indexer.py +3 -3
- mcp_vector_search/core/boilerplate.py +186 -0
- mcp_vector_search/core/config_utils.py +394 -0
- mcp_vector_search/core/database.py +406 -94
- mcp_vector_search/core/embeddings.py +24 -0
- mcp_vector_search/core/exceptions.py +11 -0
- mcp_vector_search/core/git.py +380 -0
- mcp_vector_search/core/git_hooks.py +4 -4
- mcp_vector_search/core/indexer.py +632 -54
- mcp_vector_search/core/llm_client.py +756 -0
- mcp_vector_search/core/models.py +91 -1
- mcp_vector_search/core/project.py +17 -0
- mcp_vector_search/core/relationships.py +473 -0
- mcp_vector_search/core/scheduler.py +11 -11
- mcp_vector_search/core/search.py +179 -29
- mcp_vector_search/mcp/server.py +819 -9
- mcp_vector_search/parsers/python.py +285 -5
- mcp_vector_search/utils/__init__.py +2 -0
- mcp_vector_search/utils/gitignore.py +0 -3
- mcp_vector_search/utils/gitignore_updater.py +212 -0
- mcp_vector_search/utils/monorepo.py +66 -4
- mcp_vector_search/utils/timing.py +10 -6
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +184 -53
- mcp_vector_search-1.1.22.dist-info/RECORD +120 -0
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +1 -1
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +1 -0
- mcp_vector_search/cli/commands/visualize.py +0 -1467
- mcp_vector_search-0.12.6.dist-info/RECORD +0 -68
- {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
mcp_vector_search/__init__.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
"""MCP Vector Search - CLI-first semantic code search with MCP integration."""
|
|
2
2
|
|
|
3
|
-
__version__ = "
|
|
4
|
-
__build__ = "
|
|
3
|
+
__version__ = "1.1.22"
|
|
4
|
+
__build__ = "122"
|
|
5
5
|
__author__ = "Robert Matsuoka"
|
|
6
|
-
__email__ = "
|
|
6
|
+
__email__ = "bob@matsuoka.com"
|
|
7
7
|
|
|
8
8
|
from .core.exceptions import MCPVectorSearchError
|
|
9
9
|
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
"""Structural code analysis module.
|
|
2
|
+
|
|
3
|
+
This module provides dataclasses and interfaces for collecting and storing
|
|
4
|
+
code quality metrics during semantic code analysis.
|
|
5
|
+
|
|
6
|
+
Key Components:
|
|
7
|
+
- ChunkMetrics: Metrics for individual functions/methods/classes
|
|
8
|
+
- FileMetrics: Aggregated metrics for entire files
|
|
9
|
+
- ProjectMetrics: Project-wide metric aggregates
|
|
10
|
+
- MetricCollector: Abstract base class for metric collection
|
|
11
|
+
- CollectorContext: Shared context during AST traversal
|
|
12
|
+
- visualizer: JSON export schemas for analysis results (Phase 4)
|
|
13
|
+
|
|
14
|
+
Example:
|
|
15
|
+
# Create chunk metrics
|
|
16
|
+
chunk = ChunkMetrics(
|
|
17
|
+
cognitive_complexity=8,
|
|
18
|
+
cyclomatic_complexity=5,
|
|
19
|
+
max_nesting_depth=3,
|
|
20
|
+
parameter_count=2,
|
|
21
|
+
lines_of_code=25
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
# Chunk automatically computes grade
|
|
25
|
+
assert chunk.complexity_grade == "B" # 6-10 range
|
|
26
|
+
|
|
27
|
+
# Store in ChromaDB-compatible format
|
|
28
|
+
metadata = chunk.to_metadata()
|
|
29
|
+
|
|
30
|
+
# Aggregate file metrics
|
|
31
|
+
file_metrics = FileMetrics(
|
|
32
|
+
file_path="src/module.py",
|
|
33
|
+
chunks=[chunk]
|
|
34
|
+
)
|
|
35
|
+
file_metrics.compute_aggregates()
|
|
36
|
+
|
|
37
|
+
# Project-wide analysis
|
|
38
|
+
project = ProjectMetrics(project_root="/path/to/project")
|
|
39
|
+
project.files["src/module.py"] = file_metrics
|
|
40
|
+
project.compute_aggregates()
|
|
41
|
+
hotspots = project.get_hotspots(limit=5)
|
|
42
|
+
|
|
43
|
+
# Export to JSON (Phase 4)
|
|
44
|
+
from .visualizer import AnalysisExport, ExportMetadata, MetricsSummary
|
|
45
|
+
export = AnalysisExport(...)
|
|
46
|
+
json_output = export.model_dump_json(indent=2)
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
# Phase 4: JSON export schemas (available but not in __all__ to avoid namespace pollution)
|
|
50
|
+
from . import visualizer
|
|
51
|
+
from .collectors.base import CollectorContext, MetricCollector
|
|
52
|
+
from .collectors.cohesion import (
|
|
53
|
+
ClassCohesion,
|
|
54
|
+
FileCohesion,
|
|
55
|
+
LCOM4Calculator,
|
|
56
|
+
MethodAttributeAccess,
|
|
57
|
+
UnionFind,
|
|
58
|
+
)
|
|
59
|
+
from .collectors.complexity import (
|
|
60
|
+
CognitiveComplexityCollector,
|
|
61
|
+
CyclomaticComplexityCollector,
|
|
62
|
+
MethodCountCollector,
|
|
63
|
+
NestingDepthCollector,
|
|
64
|
+
ParameterCountCollector,
|
|
65
|
+
)
|
|
66
|
+
from .collectors.coupling import (
|
|
67
|
+
AfferentCouplingCollector,
|
|
68
|
+
EfferentCouplingCollector,
|
|
69
|
+
InstabilityCalculator,
|
|
70
|
+
build_import_graph,
|
|
71
|
+
)
|
|
72
|
+
from .collectors.smells import CodeSmell, SmellDetector, SmellSeverity
|
|
73
|
+
from .debt import (
|
|
74
|
+
DebtCategory,
|
|
75
|
+
DebtItem,
|
|
76
|
+
DebtSummary,
|
|
77
|
+
RemediationTime,
|
|
78
|
+
TechnicalDebtEstimator,
|
|
79
|
+
)
|
|
80
|
+
from .metrics import ChunkMetrics, CouplingMetrics, FileMetrics, ProjectMetrics
|
|
81
|
+
|
|
82
|
+
__all__ = [
|
|
83
|
+
"ChunkMetrics",
|
|
84
|
+
"CouplingMetrics",
|
|
85
|
+
"FileMetrics",
|
|
86
|
+
"ProjectMetrics",
|
|
87
|
+
"CollectorContext",
|
|
88
|
+
"MetricCollector",
|
|
89
|
+
"CognitiveComplexityCollector",
|
|
90
|
+
"CyclomaticComplexityCollector",
|
|
91
|
+
"NestingDepthCollector",
|
|
92
|
+
"ParameterCountCollector",
|
|
93
|
+
"MethodCountCollector",
|
|
94
|
+
"EfferentCouplingCollector",
|
|
95
|
+
"AfferentCouplingCollector",
|
|
96
|
+
"InstabilityCalculator",
|
|
97
|
+
"build_import_graph",
|
|
98
|
+
"SmellDetector",
|
|
99
|
+
"CodeSmell",
|
|
100
|
+
"SmellSeverity",
|
|
101
|
+
"ClassCohesion",
|
|
102
|
+
"FileCohesion",
|
|
103
|
+
"LCOM4Calculator",
|
|
104
|
+
"MethodAttributeAccess",
|
|
105
|
+
"UnionFind",
|
|
106
|
+
"TechnicalDebtEstimator",
|
|
107
|
+
"DebtCategory",
|
|
108
|
+
"DebtItem",
|
|
109
|
+
"DebtSummary",
|
|
110
|
+
"RemediationTime",
|
|
111
|
+
]
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"""Baseline comparison for tracking metric changes over time.
|
|
2
|
+
|
|
3
|
+
This module implements baseline snapshot storage and comparison capabilities,
|
|
4
|
+
enabling developers and CI/CD pipelines to track code quality metrics against
|
|
5
|
+
a known-good state.
|
|
6
|
+
|
|
7
|
+
Key Components:
|
|
8
|
+
- BaselineManager: Store/load/manage baseline snapshots
|
|
9
|
+
- BaselineComparator: Compare current metrics against baseline
|
|
10
|
+
- ComparisonResult: Structured comparison output with classifications
|
|
11
|
+
|
|
12
|
+
Usage Example:
|
|
13
|
+
>>> from pathlib import Path
|
|
14
|
+
>>> from mcp_vector_search.analysis.baseline import BaselineManager, BaselineComparator
|
|
15
|
+
>>> from mcp_vector_search.analysis.metrics import ProjectMetrics
|
|
16
|
+
>>>
|
|
17
|
+
>>> # Save baseline
|
|
18
|
+
>>> manager = BaselineManager()
|
|
19
|
+
>>> metrics = analyze_project(Path.cwd()) # Your analysis function
|
|
20
|
+
>>> manager.save_baseline("main-branch", metrics)
|
|
21
|
+
>>>
|
|
22
|
+
>>> # Compare against baseline
|
|
23
|
+
>>> current_metrics = analyze_project(Path.cwd())
|
|
24
|
+
>>> comparator = BaselineComparator()
|
|
25
|
+
>>> baseline = manager.load_baseline("main-branch")
|
|
26
|
+
>>> result = comparator.compare(current_metrics, baseline)
|
|
27
|
+
>>> print(f"Regressions: {len(result.regressions)}")
|
|
28
|
+
>>> print(f"Improvements: {len(result.improvements)}")
|
|
29
|
+
|
|
30
|
+
Design Decisions:
|
|
31
|
+
- JSON storage for Phase 2 (human-readable, simple, no dependencies)
|
|
32
|
+
- Storage location: ~/.mcp-vector-search/baselines/
|
|
33
|
+
- Includes git metadata (commit, branch) for traceability
|
|
34
|
+
- Includes tool version for compatibility checking
|
|
35
|
+
- Graceful handling of incompatible baselines
|
|
36
|
+
|
|
37
|
+
Performance:
|
|
38
|
+
- Save baseline: O(n) where n is number of files, ~50-100ms typical
|
|
39
|
+
- Load baseline: O(n), ~20-50ms typical
|
|
40
|
+
- Compare metrics: O(n + m) where n=files, m=functions, ~10-20ms typical
|
|
41
|
+
|
|
42
|
+
Future Enhancements (Phase 3):
|
|
43
|
+
- Migrate to SQLite for better queryability (Issue #24)
|
|
44
|
+
- Trend analysis across multiple baselines
|
|
45
|
+
- Automated baseline creation on CI success
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
from .comparator import BaselineComparator, ComparisonResult, MetricChange
|
|
49
|
+
from .manager import (
|
|
50
|
+
BaselineCorruptedError,
|
|
51
|
+
BaselineExistsError,
|
|
52
|
+
BaselineManager,
|
|
53
|
+
BaselineMetadata,
|
|
54
|
+
BaselineNotFoundError,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
__all__ = [
|
|
58
|
+
# Manager
|
|
59
|
+
"BaselineManager",
|
|
60
|
+
"BaselineMetadata",
|
|
61
|
+
"BaselineNotFoundError",
|
|
62
|
+
"BaselineExistsError",
|
|
63
|
+
"BaselineCorruptedError",
|
|
64
|
+
# Comparator
|
|
65
|
+
"BaselineComparator",
|
|
66
|
+
"ComparisonResult",
|
|
67
|
+
"MetricChange",
|
|
68
|
+
]
|
|
@@ -0,0 +1,462 @@
|
|
|
1
|
+
"""Baseline comparison for detecting metric changes.
|
|
2
|
+
|
|
3
|
+
This module provides the BaselineComparator class for comparing current
|
|
4
|
+
metrics against a stored baseline, identifying regressions, improvements,
|
|
5
|
+
and neutral changes.
|
|
6
|
+
|
|
7
|
+
Design Decisions:
|
|
8
|
+
- Classification logic based on cognitive complexity thresholds
|
|
9
|
+
- Percentage change calculation for relative comparison
|
|
10
|
+
- Grade transitions tracked (A→B is regression, C→B is improvement)
|
|
11
|
+
- Both absolute and percentage deltas reported
|
|
12
|
+
- Neutral changes include unchanged and minor variations (<5% change)
|
|
13
|
+
|
|
14
|
+
Classification Logic:
|
|
15
|
+
- Regression: Metric increased (complexity worse)
|
|
16
|
+
- Cognitive complexity increased
|
|
17
|
+
- Grade decreased (A→B, B→C, etc.)
|
|
18
|
+
- Max nesting depth increased
|
|
19
|
+
- More code smells detected
|
|
20
|
+
- Improvement: Metric decreased (complexity better)
|
|
21
|
+
- Cognitive complexity decreased
|
|
22
|
+
- Grade improved (C→B, B→A, etc.)
|
|
23
|
+
- Max nesting depth decreased
|
|
24
|
+
- Fewer code smells
|
|
25
|
+
- Neutral: No significant change (<5% for numeric metrics)
|
|
26
|
+
|
|
27
|
+
Performance:
|
|
28
|
+
- Compare: O(n + m) where n=files, m=functions
|
|
29
|
+
- Typical: 10-20ms for 100 files with 500 functions
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
from __future__ import annotations
|
|
33
|
+
|
|
34
|
+
from dataclasses import dataclass, field
|
|
35
|
+
from typing import Literal
|
|
36
|
+
|
|
37
|
+
from ..metrics import FileMetrics, ProjectMetrics
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class MetricChange:
|
|
42
|
+
"""Represents a change in a single metric.
|
|
43
|
+
|
|
44
|
+
Attributes:
|
|
45
|
+
metric_name: Name of the metric (e.g., "cognitive_complexity")
|
|
46
|
+
baseline_value: Value in baseline
|
|
47
|
+
current_value: Value in current analysis
|
|
48
|
+
absolute_delta: Absolute change (current - baseline)
|
|
49
|
+
percentage_delta: Percentage change ((current - baseline) / baseline * 100)
|
|
50
|
+
classification: Change classification (regression/improvement/neutral)
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
metric_name: str
|
|
54
|
+
baseline_value: float | int
|
|
55
|
+
current_value: float | int
|
|
56
|
+
absolute_delta: float | int
|
|
57
|
+
percentage_delta: float
|
|
58
|
+
classification: Literal["regression", "improvement", "neutral"]
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def is_regression(self) -> bool:
|
|
62
|
+
"""Check if this is a regression."""
|
|
63
|
+
return self.classification == "regression"
|
|
64
|
+
|
|
65
|
+
@property
|
|
66
|
+
def is_improvement(self) -> bool:
|
|
67
|
+
"""Check if this is an improvement."""
|
|
68
|
+
return self.classification == "improvement"
|
|
69
|
+
|
|
70
|
+
@property
|
|
71
|
+
def is_neutral(self) -> bool:
|
|
72
|
+
"""Check if this is neutral (no significant change)."""
|
|
73
|
+
return self.classification == "neutral"
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class FileComparison:
|
|
78
|
+
"""Comparison results for a single file.
|
|
79
|
+
|
|
80
|
+
Attributes:
|
|
81
|
+
file_path: Path to the file
|
|
82
|
+
in_baseline: Whether file existed in baseline
|
|
83
|
+
in_current: Whether file exists in current analysis
|
|
84
|
+
metric_changes: List of metric changes for this file
|
|
85
|
+
grade_change: Tuple of (baseline_grade, current_grade) if changed
|
|
86
|
+
"""
|
|
87
|
+
|
|
88
|
+
file_path: str
|
|
89
|
+
in_baseline: bool
|
|
90
|
+
in_current: bool
|
|
91
|
+
metric_changes: list[MetricChange] = field(default_factory=list)
|
|
92
|
+
grade_change: tuple[str, str] | None = None
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def has_regressions(self) -> bool:
|
|
96
|
+
"""Check if file has any regressions."""
|
|
97
|
+
return any(change.is_regression for change in self.metric_changes)
|
|
98
|
+
|
|
99
|
+
@property
|
|
100
|
+
def has_improvements(self) -> bool:
|
|
101
|
+
"""Check if file has any improvements."""
|
|
102
|
+
return any(change.is_improvement for change in self.metric_changes)
|
|
103
|
+
|
|
104
|
+
@property
|
|
105
|
+
def is_new_file(self) -> bool:
|
|
106
|
+
"""Check if this is a new file (not in baseline)."""
|
|
107
|
+
return self.in_current and not self.in_baseline
|
|
108
|
+
|
|
109
|
+
@property
|
|
110
|
+
def is_deleted_file(self) -> bool:
|
|
111
|
+
"""Check if this file was deleted (in baseline, not in current)."""
|
|
112
|
+
return self.in_baseline and not self.in_current
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
@dataclass
|
|
116
|
+
class ComparisonResult:
|
|
117
|
+
"""Complete baseline comparison results.
|
|
118
|
+
|
|
119
|
+
Attributes:
|
|
120
|
+
baseline_name: Name of baseline compared against
|
|
121
|
+
regressions: List of file comparisons with regressions
|
|
122
|
+
improvements: List of file comparisons with improvements
|
|
123
|
+
unchanged: List of file comparisons with no significant changes
|
|
124
|
+
new_files: List of new files not in baseline
|
|
125
|
+
deleted_files: List of files in baseline but not in current
|
|
126
|
+
summary: Dictionary of aggregate statistics
|
|
127
|
+
"""
|
|
128
|
+
|
|
129
|
+
baseline_name: str
|
|
130
|
+
regressions: list[FileComparison] = field(default_factory=list)
|
|
131
|
+
improvements: list[FileComparison] = field(default_factory=list)
|
|
132
|
+
unchanged: list[FileComparison] = field(default_factory=list)
|
|
133
|
+
new_files: list[FileComparison] = field(default_factory=list)
|
|
134
|
+
deleted_files: list[FileComparison] = field(default_factory=list)
|
|
135
|
+
summary: dict[str, int | float] = field(default_factory=dict)
|
|
136
|
+
|
|
137
|
+
@property
|
|
138
|
+
def has_regressions(self) -> bool:
|
|
139
|
+
"""Check if comparison found any regressions."""
|
|
140
|
+
return len(self.regressions) > 0
|
|
141
|
+
|
|
142
|
+
@property
|
|
143
|
+
def has_improvements(self) -> bool:
|
|
144
|
+
"""Check if comparison found any improvements."""
|
|
145
|
+
return len(self.improvements) > 0
|
|
146
|
+
|
|
147
|
+
@property
|
|
148
|
+
def total_files_compared(self) -> int:
|
|
149
|
+
"""Total number of files compared."""
|
|
150
|
+
return (
|
|
151
|
+
len(self.regressions)
|
|
152
|
+
+ len(self.improvements)
|
|
153
|
+
+ len(self.unchanged)
|
|
154
|
+
+ len(self.new_files)
|
|
155
|
+
+ len(self.deleted_files)
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
class BaselineComparator:
|
|
160
|
+
"""Compare current metrics against baseline.
|
|
161
|
+
|
|
162
|
+
This class analyzes differences between current metrics and a baseline,
|
|
163
|
+
classifying changes as regressions, improvements, or neutral.
|
|
164
|
+
|
|
165
|
+
Comparison Strategy:
|
|
166
|
+
1. Compare files present in both baseline and current
|
|
167
|
+
2. Identify new files (in current, not in baseline)
|
|
168
|
+
3. Identify deleted files (in baseline, not in current)
|
|
169
|
+
4. For each file, compare metrics and classify changes
|
|
170
|
+
5. Aggregate results into ComparisonResult
|
|
171
|
+
|
|
172
|
+
Example:
|
|
173
|
+
>>> comparator = BaselineComparator()
|
|
174
|
+
>>> baseline = manager.load_baseline("main-branch")
|
|
175
|
+
>>> current = analyze_project(Path.cwd())
|
|
176
|
+
>>> result = comparator.compare(current, baseline, threshold_percent=5.0)
|
|
177
|
+
>>> print(f"Regressions: {len(result.regressions)}")
|
|
178
|
+
>>> print(f"Improvements: {len(result.improvements)}")
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
def compare(
|
|
182
|
+
self,
|
|
183
|
+
current: ProjectMetrics,
|
|
184
|
+
baseline: ProjectMetrics,
|
|
185
|
+
baseline_name: str = "baseline",
|
|
186
|
+
threshold_percent: float = 5.0,
|
|
187
|
+
) -> ComparisonResult:
|
|
188
|
+
"""Compare current metrics against baseline.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
current: Current ProjectMetrics
|
|
192
|
+
baseline: Baseline ProjectMetrics to compare against
|
|
193
|
+
baseline_name: Name of baseline (for result metadata)
|
|
194
|
+
threshold_percent: Percentage threshold for neutral classification (default: 5.0%)
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
ComparisonResult with classified changes
|
|
198
|
+
|
|
199
|
+
Performance: O(n + m) where n=files, m=functions
|
|
200
|
+
|
|
201
|
+
Example:
|
|
202
|
+
>>> comparator = BaselineComparator()
|
|
203
|
+
>>> result = comparator.compare(current, baseline)
|
|
204
|
+
>>> if result.has_regressions:
|
|
205
|
+
... print(f"Found {len(result.regressions)} regressions")
|
|
206
|
+
"""
|
|
207
|
+
result = ComparisonResult(baseline_name=baseline_name)
|
|
208
|
+
|
|
209
|
+
# Get file sets
|
|
210
|
+
baseline_files = set(baseline.files.keys())
|
|
211
|
+
current_files = set(current.files.keys())
|
|
212
|
+
|
|
213
|
+
# Files in both baseline and current
|
|
214
|
+
common_files = baseline_files & current_files
|
|
215
|
+
|
|
216
|
+
# New files (in current, not in baseline)
|
|
217
|
+
new_files = current_files - baseline_files
|
|
218
|
+
|
|
219
|
+
# Deleted files (in baseline, not in current)
|
|
220
|
+
deleted_files = baseline_files - current_files
|
|
221
|
+
|
|
222
|
+
# Compare common files
|
|
223
|
+
for file_path in common_files:
|
|
224
|
+
comparison = self._compare_file(
|
|
225
|
+
file_path=file_path,
|
|
226
|
+
baseline_file=baseline.files[file_path],
|
|
227
|
+
current_file=current.files[file_path],
|
|
228
|
+
threshold_percent=threshold_percent,
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
# Classify based on changes
|
|
232
|
+
if comparison.has_regressions:
|
|
233
|
+
result.regressions.append(comparison)
|
|
234
|
+
elif comparison.has_improvements:
|
|
235
|
+
result.improvements.append(comparison)
|
|
236
|
+
else:
|
|
237
|
+
result.unchanged.append(comparison)
|
|
238
|
+
|
|
239
|
+
# Add new files
|
|
240
|
+
for file_path in new_files:
|
|
241
|
+
comparison = FileComparison(
|
|
242
|
+
file_path=file_path, in_baseline=False, in_current=True
|
|
243
|
+
)
|
|
244
|
+
result.new_files.append(comparison)
|
|
245
|
+
|
|
246
|
+
# Add deleted files
|
|
247
|
+
for file_path in deleted_files:
|
|
248
|
+
comparison = FileComparison(
|
|
249
|
+
file_path=file_path, in_baseline=True, in_current=False
|
|
250
|
+
)
|
|
251
|
+
result.deleted_files.append(comparison)
|
|
252
|
+
|
|
253
|
+
# Compute summary statistics
|
|
254
|
+
result.summary = self._compute_summary(current, baseline)
|
|
255
|
+
|
|
256
|
+
return result
|
|
257
|
+
|
|
258
|
+
def _compare_file(
|
|
259
|
+
self,
|
|
260
|
+
file_path: str,
|
|
261
|
+
baseline_file: FileMetrics,
|
|
262
|
+
current_file: FileMetrics,
|
|
263
|
+
threshold_percent: float,
|
|
264
|
+
) -> FileComparison:
|
|
265
|
+
"""Compare metrics for a single file.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
file_path: Path to file
|
|
269
|
+
baseline_file: Baseline FileMetrics
|
|
270
|
+
current_file: Current FileMetrics
|
|
271
|
+
threshold_percent: Threshold for neutral classification
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
FileComparison with metric changes
|
|
275
|
+
"""
|
|
276
|
+
comparison = FileComparison(
|
|
277
|
+
file_path=file_path, in_baseline=True, in_current=True
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
# Compare file-level metrics
|
|
281
|
+
metrics_to_compare = [
|
|
282
|
+
(
|
|
283
|
+
"total_complexity",
|
|
284
|
+
baseline_file.total_complexity,
|
|
285
|
+
current_file.total_complexity,
|
|
286
|
+
),
|
|
287
|
+
(
|
|
288
|
+
"avg_complexity",
|
|
289
|
+
baseline_file.avg_complexity,
|
|
290
|
+
current_file.avg_complexity,
|
|
291
|
+
),
|
|
292
|
+
(
|
|
293
|
+
"max_complexity",
|
|
294
|
+
baseline_file.max_complexity,
|
|
295
|
+
current_file.max_complexity,
|
|
296
|
+
),
|
|
297
|
+
(
|
|
298
|
+
"function_count",
|
|
299
|
+
baseline_file.function_count,
|
|
300
|
+
current_file.function_count,
|
|
301
|
+
),
|
|
302
|
+
("class_count", baseline_file.class_count, current_file.class_count),
|
|
303
|
+
]
|
|
304
|
+
|
|
305
|
+
for metric_name, baseline_value, current_value in metrics_to_compare:
|
|
306
|
+
change = self._calculate_metric_change(
|
|
307
|
+
metric_name=metric_name,
|
|
308
|
+
baseline_value=baseline_value,
|
|
309
|
+
current_value=current_value,
|
|
310
|
+
threshold_percent=threshold_percent,
|
|
311
|
+
)
|
|
312
|
+
comparison.metric_changes.append(change)
|
|
313
|
+
|
|
314
|
+
return comparison
|
|
315
|
+
|
|
316
|
+
def _calculate_metric_change(
|
|
317
|
+
self,
|
|
318
|
+
metric_name: str,
|
|
319
|
+
baseline_value: float | int,
|
|
320
|
+
current_value: float | int,
|
|
321
|
+
threshold_percent: float,
|
|
322
|
+
) -> MetricChange:
|
|
323
|
+
"""Calculate change for a single metric.
|
|
324
|
+
|
|
325
|
+
Args:
|
|
326
|
+
metric_name: Name of metric
|
|
327
|
+
baseline_value: Baseline value
|
|
328
|
+
current_value: Current value
|
|
329
|
+
threshold_percent: Threshold for neutral classification
|
|
330
|
+
|
|
331
|
+
Returns:
|
|
332
|
+
MetricChange with classification
|
|
333
|
+
"""
|
|
334
|
+
# Calculate deltas
|
|
335
|
+
absolute_delta = current_value - baseline_value
|
|
336
|
+
|
|
337
|
+
if baseline_value == 0:
|
|
338
|
+
# Handle division by zero
|
|
339
|
+
if current_value == 0:
|
|
340
|
+
percentage_delta = 0.0
|
|
341
|
+
else:
|
|
342
|
+
# Treat as 100% increase if baseline was 0
|
|
343
|
+
percentage_delta = 100.0 if current_value > 0 else -100.0
|
|
344
|
+
else:
|
|
345
|
+
percentage_delta = (absolute_delta / baseline_value) * 100
|
|
346
|
+
|
|
347
|
+
# Classify change
|
|
348
|
+
classification = self._classify_change(
|
|
349
|
+
metric_name=metric_name,
|
|
350
|
+
absolute_delta=absolute_delta,
|
|
351
|
+
percentage_delta=percentage_delta,
|
|
352
|
+
threshold_percent=threshold_percent,
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
return MetricChange(
|
|
356
|
+
metric_name=metric_name,
|
|
357
|
+
baseline_value=baseline_value,
|
|
358
|
+
current_value=current_value,
|
|
359
|
+
absolute_delta=absolute_delta,
|
|
360
|
+
percentage_delta=percentage_delta,
|
|
361
|
+
classification=classification,
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
def _classify_change(
|
|
365
|
+
self,
|
|
366
|
+
metric_name: str,
|
|
367
|
+
absolute_delta: float | int,
|
|
368
|
+
percentage_delta: float,
|
|
369
|
+
threshold_percent: float,
|
|
370
|
+
) -> Literal["regression", "improvement", "neutral"]:
|
|
371
|
+
"""Classify metric change as regression, improvement, or neutral.
|
|
372
|
+
|
|
373
|
+
Args:
|
|
374
|
+
metric_name: Name of metric
|
|
375
|
+
absolute_delta: Absolute change
|
|
376
|
+
percentage_delta: Percentage change
|
|
377
|
+
threshold_percent: Threshold for neutral classification
|
|
378
|
+
|
|
379
|
+
Returns:
|
|
380
|
+
Classification string
|
|
381
|
+
|
|
382
|
+
Classification Rules:
|
|
383
|
+
- Complexity metrics (higher is worse):
|
|
384
|
+
- Increase > threshold → regression
|
|
385
|
+
- Decrease > threshold → improvement
|
|
386
|
+
- Otherwise → neutral
|
|
387
|
+
- Count metrics (depends on context):
|
|
388
|
+
- function_count, class_count → neutral (not inherently good/bad)
|
|
389
|
+
"""
|
|
390
|
+
# Metrics where increase is bad (complexity metrics)
|
|
391
|
+
complexity_metrics = [
|
|
392
|
+
"total_complexity",
|
|
393
|
+
"avg_complexity",
|
|
394
|
+
"max_complexity",
|
|
395
|
+
"cognitive_complexity",
|
|
396
|
+
"cyclomatic_complexity",
|
|
397
|
+
"max_nesting_depth",
|
|
398
|
+
"parameter_count",
|
|
399
|
+
]
|
|
400
|
+
|
|
401
|
+
# Check if change is within neutral threshold
|
|
402
|
+
if abs(percentage_delta) < threshold_percent:
|
|
403
|
+
return "neutral"
|
|
404
|
+
|
|
405
|
+
# For complexity metrics, increase is regression
|
|
406
|
+
if metric_name in complexity_metrics:
|
|
407
|
+
if absolute_delta > 0:
|
|
408
|
+
return "regression"
|
|
409
|
+
elif absolute_delta < 0:
|
|
410
|
+
return "improvement"
|
|
411
|
+
else:
|
|
412
|
+
return "neutral"
|
|
413
|
+
|
|
414
|
+
# For count metrics, treat as neutral (not inherently good/bad)
|
|
415
|
+
# More functions/classes can be refactoring (good) or bloat (bad)
|
|
416
|
+
return "neutral"
|
|
417
|
+
|
|
418
|
+
def _compute_summary(
|
|
419
|
+
self, current: ProjectMetrics, baseline: ProjectMetrics
|
|
420
|
+
) -> dict[str, int | float]:
|
|
421
|
+
"""Compute aggregate summary statistics.
|
|
422
|
+
|
|
423
|
+
Args:
|
|
424
|
+
current: Current ProjectMetrics
|
|
425
|
+
baseline: Baseline ProjectMetrics
|
|
426
|
+
|
|
427
|
+
Returns:
|
|
428
|
+
Dictionary of summary statistics
|
|
429
|
+
"""
|
|
430
|
+
# Aggregate cognitive complexity across all files
|
|
431
|
+
current_total_cc = sum(f.total_complexity for f in current.files.values())
|
|
432
|
+
baseline_total_cc = sum(f.total_complexity for f in baseline.files.values())
|
|
433
|
+
|
|
434
|
+
current_avg_cc = (
|
|
435
|
+
current_total_cc / current.total_files if current.total_files > 0 else 0.0
|
|
436
|
+
)
|
|
437
|
+
baseline_avg_cc = (
|
|
438
|
+
baseline_total_cc / baseline.total_files
|
|
439
|
+
if baseline.total_files > 0
|
|
440
|
+
else 0.0
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
# Find max complexity
|
|
444
|
+
current_max_cc = max(
|
|
445
|
+
(f.max_complexity for f in current.files.values()), default=0
|
|
446
|
+
)
|
|
447
|
+
baseline_max_cc = max(
|
|
448
|
+
(f.max_complexity for f in baseline.files.values()), default=0
|
|
449
|
+
)
|
|
450
|
+
|
|
451
|
+
return {
|
|
452
|
+
"total_files_current": current.total_files,
|
|
453
|
+
"total_files_baseline": baseline.total_files,
|
|
454
|
+
"total_functions_current": current.total_functions,
|
|
455
|
+
"total_functions_baseline": baseline.total_functions,
|
|
456
|
+
"total_complexity_current": current_total_cc,
|
|
457
|
+
"total_complexity_baseline": baseline_total_cc,
|
|
458
|
+
"avg_complexity_current": round(current_avg_cc, 2),
|
|
459
|
+
"avg_complexity_baseline": round(baseline_avg_cc, 2),
|
|
460
|
+
"max_complexity_current": current_max_cc,
|
|
461
|
+
"max_complexity_baseline": baseline_max_cc,
|
|
462
|
+
}
|