mcp-vector-search 1.0.3__py3-none-any.whl → 1.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_vector_search/__init__.py +3 -3
- mcp_vector_search/analysis/__init__.py +48 -1
- mcp_vector_search/analysis/baseline/__init__.py +68 -0
- mcp_vector_search/analysis/baseline/comparator.py +462 -0
- mcp_vector_search/analysis/baseline/manager.py +621 -0
- mcp_vector_search/analysis/collectors/__init__.py +35 -0
- mcp_vector_search/analysis/collectors/cohesion.py +463 -0
- mcp_vector_search/analysis/collectors/coupling.py +1162 -0
- mcp_vector_search/analysis/collectors/halstead.py +514 -0
- mcp_vector_search/analysis/collectors/smells.py +325 -0
- mcp_vector_search/analysis/debt.py +516 -0
- mcp_vector_search/analysis/interpretation.py +685 -0
- mcp_vector_search/analysis/metrics.py +74 -1
- mcp_vector_search/analysis/reporters/__init__.py +3 -1
- mcp_vector_search/analysis/reporters/console.py +424 -0
- mcp_vector_search/analysis/reporters/markdown.py +480 -0
- mcp_vector_search/analysis/reporters/sarif.py +377 -0
- mcp_vector_search/analysis/storage/__init__.py +93 -0
- mcp_vector_search/analysis/storage/metrics_store.py +762 -0
- mcp_vector_search/analysis/storage/schema.py +245 -0
- mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
- mcp_vector_search/analysis/trends.py +308 -0
- mcp_vector_search/analysis/visualizer/__init__.py +90 -0
- mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
- mcp_vector_search/analysis/visualizer/exporter.py +484 -0
- mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
- mcp_vector_search/analysis/visualizer/schemas.py +525 -0
- mcp_vector_search/cli/commands/analyze.py +665 -11
- mcp_vector_search/cli/commands/chat.py +193 -0
- mcp_vector_search/cli/commands/index.py +600 -2
- mcp_vector_search/cli/commands/index_background.py +467 -0
- mcp_vector_search/cli/commands/search.py +194 -1
- mcp_vector_search/cli/commands/setup.py +64 -13
- mcp_vector_search/cli/commands/status.py +302 -3
- mcp_vector_search/cli/commands/visualize/cli.py +26 -10
- mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +8 -4
- mcp_vector_search/cli/commands/visualize/graph_builder.py +167 -234
- mcp_vector_search/cli/commands/visualize/server.py +304 -15
- mcp_vector_search/cli/commands/visualize/templates/base.py +60 -6
- mcp_vector_search/cli/commands/visualize/templates/scripts.py +2100 -65
- mcp_vector_search/cli/commands/visualize/templates/styles.py +1297 -88
- mcp_vector_search/cli/didyoumean.py +5 -0
- mcp_vector_search/cli/main.py +16 -5
- mcp_vector_search/cli/output.py +134 -5
- mcp_vector_search/config/thresholds.py +89 -1
- mcp_vector_search/core/__init__.py +16 -0
- mcp_vector_search/core/database.py +39 -2
- mcp_vector_search/core/embeddings.py +24 -0
- mcp_vector_search/core/git.py +380 -0
- mcp_vector_search/core/indexer.py +445 -84
- mcp_vector_search/core/llm_client.py +9 -4
- mcp_vector_search/core/models.py +88 -1
- mcp_vector_search/core/relationships.py +473 -0
- mcp_vector_search/core/search.py +1 -1
- mcp_vector_search/mcp/server.py +795 -4
- mcp_vector_search/parsers/python.py +285 -5
- mcp_vector_search/utils/gitignore.py +0 -3
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +3 -2
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/RECORD +62 -39
- mcp_vector_search/cli/commands/visualize.py.original +0 -2536
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +0 -0
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +0 -0
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,516 @@
|
|
|
1
|
+
"""Technical debt estimation based on code smells and metrics.
|
|
2
|
+
|
|
3
|
+
This module provides technical debt estimation functionality using the SQALE
|
|
4
|
+
methodology (similar to SonarQube). Technical debt is estimated as the time
|
|
5
|
+
required to fix all detected code smells and quality issues.
|
|
6
|
+
|
|
7
|
+
The estimation formula:
|
|
8
|
+
Total Debt = Σ (smell_count × base_remediation_time × severity_multiplier)
|
|
9
|
+
|
|
10
|
+
Remediation times are based on industry research and conservative estimates
|
|
11
|
+
for how long it takes to properly fix each type of code smell.
|
|
12
|
+
|
|
13
|
+
Example:
|
|
14
|
+
from mcp_vector_search.analysis.debt import TechnicalDebtEstimator
|
|
15
|
+
from mcp_vector_search.analysis.collectors.smells import SmellDetector
|
|
16
|
+
|
|
17
|
+
# Detect smells
|
|
18
|
+
detector = SmellDetector()
|
|
19
|
+
smells = detector.detect_all(file_metrics, file_path)
|
|
20
|
+
|
|
21
|
+
# Estimate debt
|
|
22
|
+
estimator = TechnicalDebtEstimator()
|
|
23
|
+
summary = estimator.estimate_from_smells(smells)
|
|
24
|
+
|
|
25
|
+
print(f"Total debt: {summary.total_hours:.1f} hours")
|
|
26
|
+
print(f"By category: {summary.minutes_by_category}")
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
from __future__ import annotations
|
|
30
|
+
|
|
31
|
+
from collections import defaultdict
|
|
32
|
+
from dataclasses import dataclass
|
|
33
|
+
from enum import Enum
|
|
34
|
+
from typing import TYPE_CHECKING, Any
|
|
35
|
+
|
|
36
|
+
if TYPE_CHECKING:
|
|
37
|
+
from .collectors.smells import CodeSmell
|
|
38
|
+
from .metrics import ProjectMetrics
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class DebtCategory(str, Enum):
|
|
42
|
+
"""Categories of technical debt.
|
|
43
|
+
|
|
44
|
+
These categories align with standard software quality models and help
|
|
45
|
+
organize debt by the type of impact it has on the codebase.
|
|
46
|
+
|
|
47
|
+
Attributes:
|
|
48
|
+
COMPLEXITY: Issues related to code complexity and understandability
|
|
49
|
+
MAINTAINABILITY: Issues affecting ease of modification and extension
|
|
50
|
+
RELIABILITY: Issues that could lead to bugs or failures
|
|
51
|
+
SECURITY: Security-related issues (placeholder for future)
|
|
52
|
+
DOCUMENTATION: Missing or inadequate documentation
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
COMPLEXITY = "complexity"
|
|
56
|
+
MAINTAINABILITY = "maintainability"
|
|
57
|
+
RELIABILITY = "reliability"
|
|
58
|
+
SECURITY = "security"
|
|
59
|
+
DOCUMENTATION = "documentation"
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@dataclass
|
|
63
|
+
class RemediationTime:
|
|
64
|
+
"""Time estimate for fixing a specific smell type.
|
|
65
|
+
|
|
66
|
+
Attributes:
|
|
67
|
+
smell_type: Type of code smell (e.g., "long_method")
|
|
68
|
+
base_minutes: Base time in minutes to fix this smell
|
|
69
|
+
category: Debt category this smell belongs to
|
|
70
|
+
description: Human-readable description of the remediation work
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
smell_type: str
|
|
74
|
+
base_minutes: int
|
|
75
|
+
category: DebtCategory
|
|
76
|
+
description: str
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
@dataclass
|
|
80
|
+
class DebtItem:
|
|
81
|
+
"""A single technical debt item representing one code smell instance.
|
|
82
|
+
|
|
83
|
+
Attributes:
|
|
84
|
+
smell_type: Type of smell (normalized to snake_case)
|
|
85
|
+
file_path: File where the smell was detected
|
|
86
|
+
line: Line number where the smell occurs
|
|
87
|
+
severity: Severity level ("error", "warning", "info")
|
|
88
|
+
base_minutes: Base remediation time before severity adjustment
|
|
89
|
+
adjusted_minutes: Final remediation time after severity multiplier
|
|
90
|
+
category: Debt category
|
|
91
|
+
message: Detailed message about the smell
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
smell_type: str
|
|
95
|
+
file_path: str
|
|
96
|
+
line: int
|
|
97
|
+
severity: str
|
|
98
|
+
base_minutes: int
|
|
99
|
+
adjusted_minutes: float
|
|
100
|
+
category: DebtCategory
|
|
101
|
+
message: str
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@dataclass
|
|
105
|
+
class DebtSummary:
|
|
106
|
+
"""Summary of technical debt for a project.
|
|
107
|
+
|
|
108
|
+
Provides comprehensive debt metrics with multiple breakdowns and
|
|
109
|
+
aggregations to help prioritize remediation efforts.
|
|
110
|
+
|
|
111
|
+
Attributes:
|
|
112
|
+
total_minutes: Total debt in minutes
|
|
113
|
+
total_hours: Total debt in hours
|
|
114
|
+
total_days: Total debt in work days (8-hour days)
|
|
115
|
+
items_by_category: Debt items grouped by category
|
|
116
|
+
minutes_by_category: Total minutes of debt per category
|
|
117
|
+
items_by_severity: Debt items grouped by severity
|
|
118
|
+
minutes_by_severity: Total minutes of debt per severity
|
|
119
|
+
top_files: Files with most debt (file_path, minutes)
|
|
120
|
+
top_smell_types: Most common smell types (smell_type, minutes)
|
|
121
|
+
item_count: Total number of debt items
|
|
122
|
+
"""
|
|
123
|
+
|
|
124
|
+
total_minutes: float
|
|
125
|
+
total_hours: float
|
|
126
|
+
total_days: float
|
|
127
|
+
|
|
128
|
+
items_by_category: dict[DebtCategory, list[DebtItem]]
|
|
129
|
+
minutes_by_category: dict[DebtCategory, float]
|
|
130
|
+
|
|
131
|
+
items_by_severity: dict[str, list[DebtItem]]
|
|
132
|
+
minutes_by_severity: dict[str, float]
|
|
133
|
+
|
|
134
|
+
top_files: list[tuple[str, float]] # (file_path, minutes)
|
|
135
|
+
top_smell_types: list[tuple[str, float]] # (smell_type, minutes)
|
|
136
|
+
|
|
137
|
+
item_count: int
|
|
138
|
+
|
|
139
|
+
def to_dict(self) -> dict[str, Any]:
|
|
140
|
+
"""Convert to dictionary for JSON export.
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
Dictionary representation suitable for JSON serialization
|
|
144
|
+
"""
|
|
145
|
+
return {
|
|
146
|
+
"total_minutes": self.total_minutes,
|
|
147
|
+
"total_hours": self.total_hours,
|
|
148
|
+
"total_days": self.total_days,
|
|
149
|
+
"item_count": self.item_count,
|
|
150
|
+
"by_category": {
|
|
151
|
+
category.value: {
|
|
152
|
+
"minutes": minutes,
|
|
153
|
+
"hours": round(minutes / 60, 2),
|
|
154
|
+
"item_count": len(self.items_by_category.get(category, [])),
|
|
155
|
+
}
|
|
156
|
+
for category, minutes in self.minutes_by_category.items()
|
|
157
|
+
},
|
|
158
|
+
"by_severity": {
|
|
159
|
+
severity: {
|
|
160
|
+
"minutes": minutes,
|
|
161
|
+
"hours": round(minutes / 60, 2),
|
|
162
|
+
"item_count": len(self.items_by_severity.get(severity, [])),
|
|
163
|
+
}
|
|
164
|
+
for severity, minutes in self.minutes_by_severity.items()
|
|
165
|
+
},
|
|
166
|
+
"top_files": [
|
|
167
|
+
{"file_path": path, "minutes": mins, "hours": round(mins / 60, 2)}
|
|
168
|
+
for path, mins in self.top_files
|
|
169
|
+
],
|
|
170
|
+
"top_smell_types": [
|
|
171
|
+
{
|
|
172
|
+
"smell_type": smell,
|
|
173
|
+
"minutes": mins,
|
|
174
|
+
"hours": round(mins / 60, 2),
|
|
175
|
+
}
|
|
176
|
+
for smell, mins in self.top_smell_types
|
|
177
|
+
],
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
class TechnicalDebtEstimator:
|
|
182
|
+
"""Estimates technical debt based on code smells.
|
|
183
|
+
|
|
184
|
+
Uses the SQALE methodology to estimate the time required to fix all
|
|
185
|
+
detected code smells. Remediation times are configurable and based on
|
|
186
|
+
industry research and conservative estimates.
|
|
187
|
+
|
|
188
|
+
Severity multipliers adjust base times:
|
|
189
|
+
- error (critical): 1.5× (urgent, blocks development)
|
|
190
|
+
- warning (major): 1.0× (standard remediation time)
|
|
191
|
+
- info (minor): 0.5× (nice to have, low priority)
|
|
192
|
+
|
|
193
|
+
Example:
|
|
194
|
+
estimator = TechnicalDebtEstimator()
|
|
195
|
+
summary = estimator.estimate_from_smells(smells)
|
|
196
|
+
print(f"Total debt: {summary.total_hours:.1f} hours")
|
|
197
|
+
"""
|
|
198
|
+
|
|
199
|
+
# Default remediation times based on SonarQube research
|
|
200
|
+
# Times are conservative estimates including testing and review
|
|
201
|
+
DEFAULT_REMEDIATION_TIMES: dict[str, RemediationTime] = {
|
|
202
|
+
"long_method": RemediationTime(
|
|
203
|
+
smell_type="long_method",
|
|
204
|
+
base_minutes=20,
|
|
205
|
+
category=DebtCategory.MAINTAINABILITY,
|
|
206
|
+
description="Extract method refactoring, update tests",
|
|
207
|
+
),
|
|
208
|
+
"deep_nesting": RemediationTime(
|
|
209
|
+
smell_type="deep_nesting",
|
|
210
|
+
base_minutes=15,
|
|
211
|
+
category=DebtCategory.COMPLEXITY,
|
|
212
|
+
description="Flatten control flow, extract guard clauses",
|
|
213
|
+
),
|
|
214
|
+
"god_class": RemediationTime(
|
|
215
|
+
smell_type="god_class",
|
|
216
|
+
base_minutes=120,
|
|
217
|
+
category=DebtCategory.MAINTAINABILITY,
|
|
218
|
+
description="Split into smaller classes, refactor dependencies",
|
|
219
|
+
),
|
|
220
|
+
"high_cognitive_complexity": RemediationTime(
|
|
221
|
+
smell_type="high_cognitive_complexity",
|
|
222
|
+
base_minutes=30,
|
|
223
|
+
category=DebtCategory.COMPLEXITY,
|
|
224
|
+
description="Simplify logic flow, extract helper functions",
|
|
225
|
+
),
|
|
226
|
+
"high_cyclomatic_complexity": RemediationTime(
|
|
227
|
+
smell_type="high_cyclomatic_complexity",
|
|
228
|
+
base_minutes=25,
|
|
229
|
+
category=DebtCategory.COMPLEXITY,
|
|
230
|
+
description="Break into smaller functions, reduce branches",
|
|
231
|
+
),
|
|
232
|
+
"complex_method": RemediationTime(
|
|
233
|
+
smell_type="complex_method",
|
|
234
|
+
base_minutes=25,
|
|
235
|
+
category=DebtCategory.COMPLEXITY,
|
|
236
|
+
description="Simplify control flow, reduce cyclomatic complexity",
|
|
237
|
+
),
|
|
238
|
+
"circular_dependency": RemediationTime(
|
|
239
|
+
smell_type="circular_dependency",
|
|
240
|
+
base_minutes=60,
|
|
241
|
+
category=DebtCategory.MAINTAINABILITY,
|
|
242
|
+
description="Restructure module dependencies, introduce abstractions",
|
|
243
|
+
),
|
|
244
|
+
"empty_catch": RemediationTime(
|
|
245
|
+
smell_type="empty_catch",
|
|
246
|
+
base_minutes=5,
|
|
247
|
+
category=DebtCategory.RELIABILITY,
|
|
248
|
+
description="Add proper error handling and logging",
|
|
249
|
+
),
|
|
250
|
+
"magic_number": RemediationTime(
|
|
251
|
+
smell_type="magic_number",
|
|
252
|
+
base_minutes=5,
|
|
253
|
+
category=DebtCategory.MAINTAINABILITY,
|
|
254
|
+
description="Extract named constant with documentation",
|
|
255
|
+
),
|
|
256
|
+
"long_parameter_list": RemediationTime(
|
|
257
|
+
smell_type="long_parameter_list",
|
|
258
|
+
base_minutes=15,
|
|
259
|
+
category=DebtCategory.MAINTAINABILITY,
|
|
260
|
+
description="Introduce parameter object or builder pattern",
|
|
261
|
+
),
|
|
262
|
+
"duplicate_code": RemediationTime(
|
|
263
|
+
smell_type="duplicate_code",
|
|
264
|
+
base_minutes=30,
|
|
265
|
+
category=DebtCategory.MAINTAINABILITY,
|
|
266
|
+
description="Extract shared function, update call sites",
|
|
267
|
+
),
|
|
268
|
+
"dead_code": RemediationTime(
|
|
269
|
+
smell_type="dead_code",
|
|
270
|
+
base_minutes=10,
|
|
271
|
+
category=DebtCategory.MAINTAINABILITY,
|
|
272
|
+
description="Remove dead code, verify no side effects",
|
|
273
|
+
),
|
|
274
|
+
"missing_docstring": RemediationTime(
|
|
275
|
+
smell_type="missing_docstring",
|
|
276
|
+
base_minutes=10,
|
|
277
|
+
category=DebtCategory.DOCUMENTATION,
|
|
278
|
+
description="Write comprehensive documentation",
|
|
279
|
+
),
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
# Severity multipliers based on urgency and impact
|
|
283
|
+
SEVERITY_MULTIPLIERS: dict[str, float] = {
|
|
284
|
+
"error": 1.5, # Critical: urgent fix required
|
|
285
|
+
"warning": 1.0, # Major: standard remediation time
|
|
286
|
+
"info": 0.5, # Minor: nice to have, lower priority
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
def __init__(
|
|
290
|
+
self,
|
|
291
|
+
remediation_times: dict[str, RemediationTime] | None = None,
|
|
292
|
+
severity_multipliers: dict[str, float] | None = None,
|
|
293
|
+
) -> None:
|
|
294
|
+
"""Initialize estimator with optional custom remediation times.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
remediation_times: Optional custom remediation time mapping.
|
|
298
|
+
If None, uses DEFAULT_REMEDIATION_TIMES.
|
|
299
|
+
severity_multipliers: Optional custom severity multipliers.
|
|
300
|
+
If None, uses SEVERITY_MULTIPLIERS.
|
|
301
|
+
"""
|
|
302
|
+
self.remediation_times = (
|
|
303
|
+
remediation_times or self.DEFAULT_REMEDIATION_TIMES.copy()
|
|
304
|
+
)
|
|
305
|
+
self.severity_multipliers = (
|
|
306
|
+
severity_multipliers or self.SEVERITY_MULTIPLIERS.copy()
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
def estimate_from_smells(self, smells: list[CodeSmell]) -> DebtSummary:
|
|
310
|
+
"""Calculate technical debt from a list of code smells.
|
|
311
|
+
|
|
312
|
+
Analyzes each smell, applies remediation times and severity multipliers,
|
|
313
|
+
and generates a comprehensive debt summary with multiple breakdowns.
|
|
314
|
+
|
|
315
|
+
Args:
|
|
316
|
+
smells: List of detected code smells
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
DebtSummary with total debt and detailed breakdowns
|
|
320
|
+
"""
|
|
321
|
+
debt_items: list[DebtItem] = []
|
|
322
|
+
|
|
323
|
+
# Convert each smell to a debt item
|
|
324
|
+
for smell in smells:
|
|
325
|
+
# Normalize smell name to snake_case for lookup
|
|
326
|
+
smell_type = self._normalize_smell_name(smell.name)
|
|
327
|
+
|
|
328
|
+
# Get base remediation time
|
|
329
|
+
base_minutes = self.get_remediation_time(smell_type)
|
|
330
|
+
|
|
331
|
+
# Apply severity multiplier
|
|
332
|
+
severity = smell.severity.value # Convert enum to string
|
|
333
|
+
adjusted_minutes = self.apply_severity_multiplier(base_minutes, severity)
|
|
334
|
+
|
|
335
|
+
# Get category
|
|
336
|
+
category = self._get_category(smell_type)
|
|
337
|
+
|
|
338
|
+
# Extract file path and line from location
|
|
339
|
+
file_path, line = self._parse_location(smell.location)
|
|
340
|
+
|
|
341
|
+
debt_item = DebtItem(
|
|
342
|
+
smell_type=smell_type,
|
|
343
|
+
file_path=file_path,
|
|
344
|
+
line=line,
|
|
345
|
+
severity=severity,
|
|
346
|
+
base_minutes=base_minutes,
|
|
347
|
+
adjusted_minutes=adjusted_minutes,
|
|
348
|
+
category=category,
|
|
349
|
+
message=smell.description,
|
|
350
|
+
)
|
|
351
|
+
debt_items.append(debt_item)
|
|
352
|
+
|
|
353
|
+
# Generate summary
|
|
354
|
+
return self._create_summary(debt_items)
|
|
355
|
+
|
|
356
|
+
def estimate_from_project_metrics(
|
|
357
|
+
self, project_metrics: ProjectMetrics
|
|
358
|
+
) -> DebtSummary:
|
|
359
|
+
"""Calculate debt from full project metrics.
|
|
360
|
+
|
|
361
|
+
Collects all smells from all files in the project and estimates
|
|
362
|
+
total technical debt.
|
|
363
|
+
|
|
364
|
+
Args:
|
|
365
|
+
project_metrics: Project-wide metrics containing all file metrics
|
|
366
|
+
|
|
367
|
+
Returns:
|
|
368
|
+
DebtSummary with total debt and detailed breakdowns
|
|
369
|
+
"""
|
|
370
|
+
from .collectors.smells import SmellDetector
|
|
371
|
+
|
|
372
|
+
detector = SmellDetector()
|
|
373
|
+
all_smells: list[CodeSmell] = []
|
|
374
|
+
|
|
375
|
+
# Collect smells from all files
|
|
376
|
+
for file_path, file_metrics in project_metrics.files.items():
|
|
377
|
+
file_smells = detector.detect_all(file_metrics, file_path)
|
|
378
|
+
all_smells.extend(file_smells)
|
|
379
|
+
|
|
380
|
+
return self.estimate_from_smells(all_smells)
|
|
381
|
+
|
|
382
|
+
def get_remediation_time(self, smell_type: str) -> int:
|
|
383
|
+
"""Get base remediation time for a smell type.
|
|
384
|
+
|
|
385
|
+
Args:
|
|
386
|
+
smell_type: Type of smell (normalized to snake_case)
|
|
387
|
+
|
|
388
|
+
Returns:
|
|
389
|
+
Base remediation time in minutes (default: 15 if unknown)
|
|
390
|
+
"""
|
|
391
|
+
if smell_type in self.remediation_times:
|
|
392
|
+
return self.remediation_times[smell_type].base_minutes
|
|
393
|
+
return 15 # Default fallback for unknown smell types
|
|
394
|
+
|
|
395
|
+
def apply_severity_multiplier(self, base_minutes: int, severity: str) -> float:
|
|
396
|
+
"""Apply severity multiplier to base time.
|
|
397
|
+
|
|
398
|
+
Args:
|
|
399
|
+
base_minutes: Base remediation time in minutes
|
|
400
|
+
severity: Severity level ("error", "warning", "info")
|
|
401
|
+
|
|
402
|
+
Returns:
|
|
403
|
+
Adjusted time after applying severity multiplier
|
|
404
|
+
"""
|
|
405
|
+
multiplier = self.severity_multipliers.get(severity, 1.0)
|
|
406
|
+
return base_minutes * multiplier
|
|
407
|
+
|
|
408
|
+
def _normalize_smell_name(self, smell_name: str) -> str:
|
|
409
|
+
"""Normalize smell name to snake_case for consistent lookup.
|
|
410
|
+
|
|
411
|
+
Converts human-readable names like "Long Method" to "long_method".
|
|
412
|
+
|
|
413
|
+
Args:
|
|
414
|
+
smell_name: Human-readable smell name
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
Normalized snake_case smell type
|
|
418
|
+
"""
|
|
419
|
+
return smell_name.lower().replace(" ", "_")
|
|
420
|
+
|
|
421
|
+
def _get_category(self, smell_type: str) -> DebtCategory:
|
|
422
|
+
"""Get debt category for a smell type.
|
|
423
|
+
|
|
424
|
+
Args:
|
|
425
|
+
smell_type: Type of smell (normalized)
|
|
426
|
+
|
|
427
|
+
Returns:
|
|
428
|
+
Appropriate debt category
|
|
429
|
+
"""
|
|
430
|
+
if smell_type in self.remediation_times:
|
|
431
|
+
return self.remediation_times[smell_type].category
|
|
432
|
+
return DebtCategory.MAINTAINABILITY # Default category
|
|
433
|
+
|
|
434
|
+
def _parse_location(self, location: str) -> tuple[str, int]:
|
|
435
|
+
"""Parse location string to extract file path and line number.
|
|
436
|
+
|
|
437
|
+
Location format: "file_path:line" or "file_path:line-range"
|
|
438
|
+
|
|
439
|
+
Args:
|
|
440
|
+
location: Location string from code smell
|
|
441
|
+
|
|
442
|
+
Returns:
|
|
443
|
+
Tuple of (file_path, line_number)
|
|
444
|
+
"""
|
|
445
|
+
if ":" in location:
|
|
446
|
+
parts = location.split(":", 1)
|
|
447
|
+
file_path = parts[0]
|
|
448
|
+
# Extract line number (handle ranges like "10-20")
|
|
449
|
+
line_str = parts[1].split("-")[0] if "-" in parts[1] else parts[1]
|
|
450
|
+
try:
|
|
451
|
+
line = int(line_str)
|
|
452
|
+
except ValueError:
|
|
453
|
+
line = 0
|
|
454
|
+
return file_path, line
|
|
455
|
+
return location, 0
|
|
456
|
+
|
|
457
|
+
def _create_summary(self, debt_items: list[DebtItem]) -> DebtSummary:
|
|
458
|
+
"""Create debt summary from debt items.
|
|
459
|
+
|
|
460
|
+
Aggregates debt items by category, severity, file, and smell type.
|
|
461
|
+
|
|
462
|
+
Args:
|
|
463
|
+
debt_items: List of debt items to summarize
|
|
464
|
+
|
|
465
|
+
Returns:
|
|
466
|
+
Comprehensive debt summary
|
|
467
|
+
"""
|
|
468
|
+
# Calculate totals
|
|
469
|
+
total_minutes = sum(item.adjusted_minutes for item in debt_items)
|
|
470
|
+
total_hours = total_minutes / 60
|
|
471
|
+
total_days = total_hours / 8 # 8-hour work days
|
|
472
|
+
|
|
473
|
+
# Group by category
|
|
474
|
+
items_by_category: dict[DebtCategory, list[DebtItem]] = defaultdict(list)
|
|
475
|
+
minutes_by_category: dict[DebtCategory, float] = defaultdict(float)
|
|
476
|
+
|
|
477
|
+
for item in debt_items:
|
|
478
|
+
items_by_category[item.category].append(item)
|
|
479
|
+
minutes_by_category[item.category] += item.adjusted_minutes
|
|
480
|
+
|
|
481
|
+
# Group by severity
|
|
482
|
+
items_by_severity: dict[str, list[DebtItem]] = defaultdict(list)
|
|
483
|
+
minutes_by_severity: dict[str, float] = defaultdict(float)
|
|
484
|
+
|
|
485
|
+
for item in debt_items:
|
|
486
|
+
items_by_severity[item.severity].append(item)
|
|
487
|
+
minutes_by_severity[item.severity] += item.adjusted_minutes
|
|
488
|
+
|
|
489
|
+
# Calculate top files
|
|
490
|
+
file_minutes: dict[str, float] = defaultdict(float)
|
|
491
|
+
for item in debt_items:
|
|
492
|
+
file_minutes[item.file_path] += item.adjusted_minutes
|
|
493
|
+
|
|
494
|
+
top_files = sorted(file_minutes.items(), key=lambda x: x[1], reverse=True)[:10]
|
|
495
|
+
|
|
496
|
+
# Calculate top smell types
|
|
497
|
+
smell_minutes: dict[str, float] = defaultdict(float)
|
|
498
|
+
for item in debt_items:
|
|
499
|
+
smell_minutes[item.smell_type] += item.adjusted_minutes
|
|
500
|
+
|
|
501
|
+
top_smell_types = sorted(
|
|
502
|
+
smell_minutes.items(), key=lambda x: x[1], reverse=True
|
|
503
|
+
)[:10]
|
|
504
|
+
|
|
505
|
+
return DebtSummary(
|
|
506
|
+
total_minutes=total_minutes,
|
|
507
|
+
total_hours=total_hours,
|
|
508
|
+
total_days=total_days,
|
|
509
|
+
items_by_category=dict(items_by_category),
|
|
510
|
+
minutes_by_category=dict(minutes_by_category),
|
|
511
|
+
items_by_severity=dict(items_by_severity),
|
|
512
|
+
minutes_by_severity=dict(minutes_by_severity),
|
|
513
|
+
top_files=top_files,
|
|
514
|
+
top_smell_types=top_smell_types,
|
|
515
|
+
item_count=len(debt_items),
|
|
516
|
+
)
|