codeshift 0.5.0__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codeshift/__init__.py +1 -1
- codeshift/cli/commands/health.py +244 -0
- codeshift/cli/main.py +2 -0
- codeshift/health/__init__.py +50 -0
- codeshift/health/calculator.py +217 -0
- codeshift/health/metrics/__init__.py +63 -0
- codeshift/health/metrics/documentation.py +209 -0
- codeshift/health/metrics/freshness.py +180 -0
- codeshift/health/metrics/migration_readiness.py +142 -0
- codeshift/health/metrics/security.py +225 -0
- codeshift/health/metrics/test_coverage.py +191 -0
- codeshift/health/models.py +284 -0
- codeshift/health/report.py +310 -0
- {codeshift-0.5.0.dist-info → codeshift-0.7.0.dist-info}/METADATA +1 -1
- {codeshift-0.5.0.dist-info → codeshift-0.7.0.dist-info}/RECORD +19 -8
- {codeshift-0.5.0.dist-info → codeshift-0.7.0.dist-info}/WHEEL +0 -0
- {codeshift-0.5.0.dist-info → codeshift-0.7.0.dist-info}/entry_points.txt +0 -0
- {codeshift-0.5.0.dist-info → codeshift-0.7.0.dist-info}/licenses/LICENSE +0 -0
- {codeshift-0.5.0.dist-info → codeshift-0.7.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
"""Documentation quality metric calculator."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
import libcst as cst
|
|
8
|
+
|
|
9
|
+
from codeshift.health.metrics import BaseMetricCalculator
|
|
10
|
+
from codeshift.health.models import MetricCategory, MetricResult
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class DocumentationCalculator(BaseMetricCalculator):
|
|
16
|
+
"""Calculates documentation score (10% weight).
|
|
17
|
+
|
|
18
|
+
Score based on:
|
|
19
|
+
- Type hints coverage: 70% of score
|
|
20
|
+
- Docstring coverage: 30% of score
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
@property
|
|
24
|
+
def category(self) -> MetricCategory:
|
|
25
|
+
return MetricCategory.DOCUMENTATION
|
|
26
|
+
|
|
27
|
+
@property
|
|
28
|
+
def weight(self) -> float:
|
|
29
|
+
return 0.10
|
|
30
|
+
|
|
31
|
+
def calculate(self, project_path: Path, **kwargs: Any) -> MetricResult:
|
|
32
|
+
"""Calculate the documentation score.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
project_path: Path to the project
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
MetricResult with documentation score
|
|
39
|
+
"""
|
|
40
|
+
# Find all Python files
|
|
41
|
+
python_files = list(project_path.rglob("*.py"))
|
|
42
|
+
|
|
43
|
+
# Exclude common non-source directories
|
|
44
|
+
excluded_patterns = [
|
|
45
|
+
".venv",
|
|
46
|
+
"venv",
|
|
47
|
+
".git",
|
|
48
|
+
"__pycache__",
|
|
49
|
+
".tox",
|
|
50
|
+
".eggs",
|
|
51
|
+
"build",
|
|
52
|
+
"dist",
|
|
53
|
+
".mypy_cache",
|
|
54
|
+
".pytest_cache",
|
|
55
|
+
]
|
|
56
|
+
|
|
57
|
+
python_files = [
|
|
58
|
+
f for f in python_files if not any(pattern in str(f) for pattern in excluded_patterns)
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
if not python_files:
|
|
62
|
+
return self._create_result(
|
|
63
|
+
score=100,
|
|
64
|
+
description="No Python files to analyze",
|
|
65
|
+
details={"file_count": 0},
|
|
66
|
+
recommendations=[],
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# Analyze files
|
|
70
|
+
total_functions = 0
|
|
71
|
+
typed_functions = 0
|
|
72
|
+
documented_functions = 0
|
|
73
|
+
|
|
74
|
+
for file_path in python_files:
|
|
75
|
+
try:
|
|
76
|
+
source = file_path.read_text()
|
|
77
|
+
tree = cst.parse_module(source)
|
|
78
|
+
stats = self._analyze_file(tree)
|
|
79
|
+
|
|
80
|
+
total_functions += stats["total"]
|
|
81
|
+
typed_functions += stats["typed"]
|
|
82
|
+
documented_functions += stats["documented"]
|
|
83
|
+
except Exception as e:
|
|
84
|
+
logger.debug(f"Failed to analyze {file_path}: {e}")
|
|
85
|
+
|
|
86
|
+
if total_functions == 0:
|
|
87
|
+
return self._create_result(
|
|
88
|
+
score=100,
|
|
89
|
+
description="No functions found to analyze",
|
|
90
|
+
details={"file_count": len(python_files), "function_count": 0},
|
|
91
|
+
recommendations=[],
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
typed_ratio = typed_functions / total_functions
|
|
95
|
+
documented_ratio = documented_functions / total_functions
|
|
96
|
+
|
|
97
|
+
# Score = (typed_ratio * 70) + (documented_ratio * 30)
|
|
98
|
+
score = (typed_ratio * 70) + (documented_ratio * 30)
|
|
99
|
+
|
|
100
|
+
recommendations: list[str] = []
|
|
101
|
+
if typed_ratio < 0.5:
|
|
102
|
+
recommendations.append(
|
|
103
|
+
f"Add type hints to functions ({typed_functions}/{total_functions} typed)"
|
|
104
|
+
)
|
|
105
|
+
if documented_ratio < 0.3:
|
|
106
|
+
recommendations.append(
|
|
107
|
+
f"Add docstrings to functions ({documented_functions}/{total_functions} documented)"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
return self._create_result(
|
|
111
|
+
score=score,
|
|
112
|
+
description=f"{typed_ratio:.0%} typed, {documented_ratio:.0%} documented",
|
|
113
|
+
details={
|
|
114
|
+
"file_count": len(python_files),
|
|
115
|
+
"function_count": total_functions,
|
|
116
|
+
"typed_count": typed_functions,
|
|
117
|
+
"documented_count": documented_functions,
|
|
118
|
+
"typed_ratio": typed_ratio,
|
|
119
|
+
"documented_ratio": documented_ratio,
|
|
120
|
+
},
|
|
121
|
+
recommendations=recommendations,
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
def _analyze_file(self, tree: cst.Module) -> dict:
|
|
125
|
+
"""Analyze a file for type hints and docstrings.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
tree: Parsed CST module
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Dict with total, typed, and documented counts
|
|
132
|
+
"""
|
|
133
|
+
visitor = FunctionAnalyzer()
|
|
134
|
+
# Use MetadataWrapper to walk the tree with the visitor
|
|
135
|
+
wrapper = cst.MetadataWrapper(tree)
|
|
136
|
+
wrapper.visit(visitor)
|
|
137
|
+
|
|
138
|
+
return {
|
|
139
|
+
"total": visitor.total_functions,
|
|
140
|
+
"typed": visitor.typed_functions,
|
|
141
|
+
"documented": visitor.documented_functions,
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class FunctionAnalyzer(cst.CSTVisitor):
|
|
146
|
+
"""CST visitor to analyze functions for type hints and docstrings."""
|
|
147
|
+
|
|
148
|
+
def __init__(self) -> None:
|
|
149
|
+
self.total_functions = 0
|
|
150
|
+
self.typed_functions = 0
|
|
151
|
+
self.documented_functions = 0
|
|
152
|
+
|
|
153
|
+
def visit_FunctionDef(self, node: cst.FunctionDef) -> bool:
|
|
154
|
+
self.total_functions += 1
|
|
155
|
+
|
|
156
|
+
# Check for type hints
|
|
157
|
+
if self._has_type_hints(node):
|
|
158
|
+
self.typed_functions += 1
|
|
159
|
+
|
|
160
|
+
# Check for docstring
|
|
161
|
+
if self._has_docstring(node):
|
|
162
|
+
self.documented_functions += 1
|
|
163
|
+
|
|
164
|
+
return True # Continue visiting nested functions
|
|
165
|
+
|
|
166
|
+
def _has_type_hints(self, node: cst.FunctionDef) -> bool:
|
|
167
|
+
"""Check if a function has type hints.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
node: Function definition node
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
True if function has return type or any parameter types
|
|
174
|
+
"""
|
|
175
|
+
# Check return type
|
|
176
|
+
if node.returns is not None:
|
|
177
|
+
return True
|
|
178
|
+
|
|
179
|
+
# Check parameter types
|
|
180
|
+
for param in node.params.params:
|
|
181
|
+
if param.annotation is not None:
|
|
182
|
+
return True
|
|
183
|
+
|
|
184
|
+
return False
|
|
185
|
+
|
|
186
|
+
def _has_docstring(self, node: cst.FunctionDef) -> bool:
|
|
187
|
+
"""Check if a function has a docstring.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
node: Function definition node
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
True if function has a docstring
|
|
194
|
+
"""
|
|
195
|
+
if not node.body.body:
|
|
196
|
+
return False
|
|
197
|
+
|
|
198
|
+
first_stmt = node.body.body[0]
|
|
199
|
+
|
|
200
|
+
# Check if first statement is an expression statement with a string
|
|
201
|
+
if isinstance(first_stmt, cst.SimpleStatementLine):
|
|
202
|
+
if first_stmt.body and isinstance(first_stmt.body[0], cst.Expr):
|
|
203
|
+
expr = first_stmt.body[0].value
|
|
204
|
+
if isinstance(expr, (cst.SimpleString, cst.ConcatenatedString)):
|
|
205
|
+
return True
|
|
206
|
+
if isinstance(expr, cst.FormattedString):
|
|
207
|
+
return True
|
|
208
|
+
|
|
209
|
+
return False
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
"""Dependency freshness metric calculator."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
from packaging.version import Version
|
|
9
|
+
|
|
10
|
+
from codeshift.health.metrics import BaseMetricCalculator
|
|
11
|
+
from codeshift.health.models import DependencyHealth, MetricCategory, MetricResult
|
|
12
|
+
from codeshift.scanner.dependency_parser import DependencyParser
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
# PyPI API timeout
|
|
17
|
+
PYPI_TIMEOUT = 5.0
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class FreshnessCalculator(BaseMetricCalculator):
|
|
21
|
+
"""Calculates dependency freshness score (30% weight).
|
|
22
|
+
|
|
23
|
+
Score is based on how up-to-date dependencies are:
|
|
24
|
+
- Major version behind: -15 points per dependency
|
|
25
|
+
- Minor version behind: -5 points each (up to 3 per dependency)
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
@property
|
|
29
|
+
def category(self) -> MetricCategory:
|
|
30
|
+
return MetricCategory.FRESHNESS
|
|
31
|
+
|
|
32
|
+
@property
|
|
33
|
+
def weight(self) -> float:
|
|
34
|
+
return 0.30
|
|
35
|
+
|
|
36
|
+
def calculate(
|
|
37
|
+
self,
|
|
38
|
+
project_path: Path,
|
|
39
|
+
dependencies: list[DependencyHealth] | None = None,
|
|
40
|
+
**kwargs: Any,
|
|
41
|
+
) -> MetricResult:
|
|
42
|
+
"""Calculate the freshness score.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
project_path: Path to the project
|
|
46
|
+
dependencies: Pre-populated dependency health list (optional)
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
MetricResult with freshness score
|
|
50
|
+
"""
|
|
51
|
+
if dependencies is None:
|
|
52
|
+
dependencies = self._analyze_dependencies(project_path)
|
|
53
|
+
|
|
54
|
+
if not dependencies:
|
|
55
|
+
return self._create_result(
|
|
56
|
+
score=100,
|
|
57
|
+
description="No dependencies to analyze",
|
|
58
|
+
details={"dependency_count": 0},
|
|
59
|
+
recommendations=[],
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# Calculate penalty
|
|
63
|
+
total_penalty = 0
|
|
64
|
+
outdated_deps: list[str] = []
|
|
65
|
+
major_outdated: list[str] = []
|
|
66
|
+
|
|
67
|
+
for dep in dependencies:
|
|
68
|
+
if dep.is_outdated:
|
|
69
|
+
outdated_deps.append(dep.name)
|
|
70
|
+
penalty = dep.version_lag_penalty
|
|
71
|
+
total_penalty += penalty
|
|
72
|
+
|
|
73
|
+
if dep.major_versions_behind > 0:
|
|
74
|
+
major_outdated.append(
|
|
75
|
+
f"{dep.name} ({dep.current_version} -> {dep.latest_version})"
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# Score starts at 100, subtract penalties (min 0)
|
|
79
|
+
score = max(0, 100 - total_penalty)
|
|
80
|
+
|
|
81
|
+
# Build recommendations
|
|
82
|
+
recommendations: list[str] = []
|
|
83
|
+
if major_outdated:
|
|
84
|
+
recommendations.append(
|
|
85
|
+
f"Update major versions: {', '.join(major_outdated[:3])}"
|
|
86
|
+
+ (f" (+{len(major_outdated) - 3} more)" if len(major_outdated) > 3 else "")
|
|
87
|
+
)
|
|
88
|
+
if len(outdated_deps) > len(major_outdated):
|
|
89
|
+
minor_count = len(outdated_deps) - len(major_outdated)
|
|
90
|
+
recommendations.append(f"Update {minor_count} dependencies with minor version updates")
|
|
91
|
+
|
|
92
|
+
return self._create_result(
|
|
93
|
+
score=score,
|
|
94
|
+
description=f"{len(outdated_deps)}/{len(dependencies)} dependencies outdated",
|
|
95
|
+
details={
|
|
96
|
+
"total_dependencies": len(dependencies),
|
|
97
|
+
"outdated_count": len(outdated_deps),
|
|
98
|
+
"major_outdated_count": len(major_outdated),
|
|
99
|
+
"total_penalty": total_penalty,
|
|
100
|
+
},
|
|
101
|
+
recommendations=recommendations,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
def _analyze_dependencies(self, project_path: Path) -> list[DependencyHealth]:
|
|
105
|
+
"""Analyze project dependencies for freshness.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
project_path: Path to the project
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
List of DependencyHealth objects
|
|
112
|
+
"""
|
|
113
|
+
parser = DependencyParser(project_path)
|
|
114
|
+
dependencies = parser.parse_all()
|
|
115
|
+
|
|
116
|
+
results: list[DependencyHealth] = []
|
|
117
|
+
|
|
118
|
+
for dep in dependencies:
|
|
119
|
+
try:
|
|
120
|
+
latest = self._get_latest_version(dep.name)
|
|
121
|
+
current = dep.min_version
|
|
122
|
+
|
|
123
|
+
if current and latest:
|
|
124
|
+
is_outdated = current < latest
|
|
125
|
+
major_behind = max(0, latest.major - current.major)
|
|
126
|
+
minor_behind = 0
|
|
127
|
+
if major_behind == 0:
|
|
128
|
+
minor_behind = max(0, latest.minor - current.minor)
|
|
129
|
+
else:
|
|
130
|
+
is_outdated = False
|
|
131
|
+
major_behind = 0
|
|
132
|
+
minor_behind = 0
|
|
133
|
+
|
|
134
|
+
results.append(
|
|
135
|
+
DependencyHealth(
|
|
136
|
+
name=dep.name,
|
|
137
|
+
current_version=str(current) if current else None,
|
|
138
|
+
latest_version=str(latest) if latest else None,
|
|
139
|
+
is_outdated=is_outdated,
|
|
140
|
+
major_versions_behind=major_behind,
|
|
141
|
+
minor_versions_behind=minor_behind,
|
|
142
|
+
)
|
|
143
|
+
)
|
|
144
|
+
except Exception as e:
|
|
145
|
+
logger.debug(f"Error analyzing {dep.name}: {e}")
|
|
146
|
+
# Add with unknown status
|
|
147
|
+
results.append(
|
|
148
|
+
DependencyHealth(
|
|
149
|
+
name=dep.name,
|
|
150
|
+
current_version=str(dep.min_version) if dep.min_version else None,
|
|
151
|
+
latest_version=None,
|
|
152
|
+
is_outdated=False,
|
|
153
|
+
)
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
return results
|
|
157
|
+
|
|
158
|
+
def _get_latest_version(self, package_name: str) -> Version | None:
|
|
159
|
+
"""Get the latest version of a package from PyPI.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
package_name: Name of the package
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
Latest Version or None if not found
|
|
166
|
+
"""
|
|
167
|
+
try:
|
|
168
|
+
response = httpx.get(
|
|
169
|
+
f"https://pypi.org/pypi/{package_name}/json",
|
|
170
|
+
timeout=PYPI_TIMEOUT,
|
|
171
|
+
)
|
|
172
|
+
if response.status_code == 200:
|
|
173
|
+
data = response.json()
|
|
174
|
+
version_str = data.get("info", {}).get("version")
|
|
175
|
+
if version_str:
|
|
176
|
+
return Version(version_str)
|
|
177
|
+
except Exception as e:
|
|
178
|
+
logger.debug(f"Failed to get latest version for {package_name}: {e}")
|
|
179
|
+
|
|
180
|
+
return None
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""Migration readiness metric calculator."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from codeshift.health.metrics import BaseMetricCalculator
|
|
8
|
+
from codeshift.health.models import DependencyHealth, MetricCategory, MetricResult
|
|
9
|
+
from codeshift.knowledge_base import KnowledgeBaseLoader
|
|
10
|
+
from codeshift.scanner.dependency_parser import DependencyParser
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class MigrationReadinessCalculator(BaseMetricCalculator):
|
|
16
|
+
"""Calculates migration readiness score (20% weight).
|
|
17
|
+
|
|
18
|
+
Score based on Tier 1/2 support coverage:
|
|
19
|
+
- Tier 1 (deterministic AST): 100% score contribution
|
|
20
|
+
- Tier 2 (knowledge base + LLM): 50% score contribution
|
|
21
|
+
- No support: 0% score contribution
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def category(self) -> MetricCategory:
|
|
26
|
+
return MetricCategory.MIGRATION_READINESS
|
|
27
|
+
|
|
28
|
+
@property
|
|
29
|
+
def weight(self) -> float:
|
|
30
|
+
return 0.20
|
|
31
|
+
|
|
32
|
+
def calculate(
|
|
33
|
+
self,
|
|
34
|
+
project_path: Path,
|
|
35
|
+
dependencies: list[DependencyHealth] | None = None,
|
|
36
|
+
**kwargs: Any,
|
|
37
|
+
) -> MetricResult:
|
|
38
|
+
"""Calculate the migration readiness score.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
project_path: Path to the project
|
|
42
|
+
dependencies: Pre-populated dependency health list (optional)
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
MetricResult with migration readiness score
|
|
46
|
+
"""
|
|
47
|
+
if dependencies is None:
|
|
48
|
+
dependencies = self._analyze_dependencies(project_path)
|
|
49
|
+
|
|
50
|
+
if not dependencies:
|
|
51
|
+
return self._create_result(
|
|
52
|
+
score=100,
|
|
53
|
+
description="No dependencies to analyze",
|
|
54
|
+
details={"dependency_count": 0},
|
|
55
|
+
recommendations=[],
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
tier1_count = sum(1 for d in dependencies if d.has_tier1_support)
|
|
59
|
+
tier2_count = sum(
|
|
60
|
+
1 for d in dependencies if d.has_tier2_support and not d.has_tier1_support
|
|
61
|
+
)
|
|
62
|
+
no_support_count = len(dependencies) - tier1_count - tier2_count
|
|
63
|
+
|
|
64
|
+
total = len(dependencies)
|
|
65
|
+
# Score: Tier 1 gets full points, Tier 2 gets half points
|
|
66
|
+
score = ((tier1_count * 100) + (tier2_count * 50)) / total if total > 0 else 100
|
|
67
|
+
|
|
68
|
+
# Build recommendations
|
|
69
|
+
recommendations: list[str] = []
|
|
70
|
+
|
|
71
|
+
if no_support_count > 0:
|
|
72
|
+
unsupported = [
|
|
73
|
+
d.name for d in dependencies if not d.has_tier1_support and not d.has_tier2_support
|
|
74
|
+
]
|
|
75
|
+
recommendations.append(
|
|
76
|
+
f"Consider requesting Tier 1 support for: {', '.join(unsupported[:3])}"
|
|
77
|
+
+ (f" (+{len(unsupported) - 3} more)" if len(unsupported) > 3 else "")
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
if tier2_count > 0:
|
|
81
|
+
tier2_deps = [
|
|
82
|
+
d.name for d in dependencies if d.has_tier2_support and not d.has_tier1_support
|
|
83
|
+
]
|
|
84
|
+
recommendations.append(
|
|
85
|
+
f"Libraries with Tier 2 (LLM) support: {', '.join(tier2_deps[:3])}"
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
return self._create_result(
|
|
89
|
+
score=score,
|
|
90
|
+
description=f"{tier1_count} Tier 1, {tier2_count} Tier 2, {no_support_count} unsupported",
|
|
91
|
+
details={
|
|
92
|
+
"total_dependencies": total,
|
|
93
|
+
"tier1_count": tier1_count,
|
|
94
|
+
"tier2_count": tier2_count,
|
|
95
|
+
"unsupported_count": no_support_count,
|
|
96
|
+
"tier1_ratio": tier1_count / total if total > 0 else 0,
|
|
97
|
+
"tier2_ratio": tier2_count / total if total > 0 else 0,
|
|
98
|
+
},
|
|
99
|
+
recommendations=recommendations,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
def _analyze_dependencies(self, project_path: Path) -> list[DependencyHealth]:
|
|
103
|
+
"""Analyze project dependencies for migration support.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
project_path: Path to the project
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
List of DependencyHealth objects with tier support info
|
|
110
|
+
"""
|
|
111
|
+
parser = DependencyParser(project_path)
|
|
112
|
+
dependencies = parser.parse_all()
|
|
113
|
+
|
|
114
|
+
loader = KnowledgeBaseLoader()
|
|
115
|
+
supported_libraries = loader.get_supported_libraries()
|
|
116
|
+
|
|
117
|
+
results: list[DependencyHealth] = []
|
|
118
|
+
|
|
119
|
+
# Tier 1 supported libraries (have AST transformers)
|
|
120
|
+
tier1_libraries = {"pydantic", "fastapi", "sqlalchemy", "pandas", "requests"}
|
|
121
|
+
|
|
122
|
+
for dep in dependencies:
|
|
123
|
+
dep_name_lower = dep.name.lower()
|
|
124
|
+
|
|
125
|
+
# Check Tier 1 support (deterministic AST transforms)
|
|
126
|
+
has_tier1 = dep_name_lower in tier1_libraries
|
|
127
|
+
|
|
128
|
+
# Check Tier 2 support (knowledge base exists)
|
|
129
|
+
has_tier2 = dep_name_lower in [lib.lower() for lib in supported_libraries]
|
|
130
|
+
|
|
131
|
+
results.append(
|
|
132
|
+
DependencyHealth(
|
|
133
|
+
name=dep.name,
|
|
134
|
+
current_version=str(dep.min_version) if dep.min_version else None,
|
|
135
|
+
latest_version=None,
|
|
136
|
+
is_outdated=False,
|
|
137
|
+
has_tier1_support=has_tier1,
|
|
138
|
+
has_tier2_support=has_tier2,
|
|
139
|
+
)
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
return results
|