codeshift 0.5.0__py3-none-any.whl → 0.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
codeshift/__init__.py CHANGED
@@ -4,5 +4,5 @@ Codeshift - AI-powered CLI tool for migrating Python code to handle breaking dep
4
4
  Don't just flag the update. Fix the break.
5
5
  """
6
6
 
7
- __version__ = "0.2.0"
7
+ __version__ = "0.7.3"
8
8
  __author__ = "Codeshift Team"
@@ -0,0 +1,244 @@
1
+ """CLI command for codebase health scoring."""
2
+
3
+ import sys
4
+ from pathlib import Path
5
+
6
+ import click
7
+ from rich.console import Console
8
+ from rich.panel import Panel
9
+ from rich.table import Table
10
+
11
+ from codeshift.health.calculator import HealthCalculator
12
+ from codeshift.health.models import HealthGrade, HealthScore, MetricCategory
13
+ from codeshift.health.report import generate_json_report, save_html_report, save_json_report
14
+
15
+ console = Console()
16
+
17
+
18
+ @click.command()
19
+ @click.option(
20
+ "--path",
21
+ "-p",
22
+ type=click.Path(exists=True, file_okay=False, dir_okay=True),
23
+ default=".",
24
+ help="Path to the project (default: current directory)",
25
+ )
26
+ @click.option(
27
+ "--report",
28
+ "-r",
29
+ type=click.Choice(["json", "html"]),
30
+ help="Generate a detailed report in the specified format",
31
+ )
32
+ @click.option(
33
+ "--output",
34
+ "-o",
35
+ type=click.Path(),
36
+ help="Output file path for the report (default: health_report.<format>)",
37
+ )
38
+ @click.option(
39
+ "--ci",
40
+ is_flag=True,
41
+ help="CI mode: exit with non-zero status if score is below threshold",
42
+ )
43
+ @click.option(
44
+ "--threshold",
45
+ type=int,
46
+ default=70,
47
+ help="Minimum score for CI mode (default: 70)",
48
+ )
49
+ @click.option(
50
+ "--verbose",
51
+ "-v",
52
+ is_flag=True,
53
+ help="Show detailed output including all dependencies",
54
+ )
55
+ def health(
56
+ path: str,
57
+ report: str | None,
58
+ output: str | None,
59
+ ci: bool,
60
+ threshold: int,
61
+ verbose: bool,
62
+ ) -> None:
63
+ """Analyze codebase health and generate a score.
64
+
65
+ Evaluates your project across five dimensions:
66
+ - Dependency Freshness (30%): How up-to-date are your dependencies?
67
+ - Security (25%): Known vulnerabilities in dependencies
68
+ - Migration Readiness (20%): Tier 1/2 support coverage
69
+ - Test Coverage (15%): Percentage of code covered by tests
70
+ - Documentation (10%): Type hints and docstrings
71
+
72
+ \b
73
+ Examples:
74
+ codeshift health # Show health summary
75
+ codeshift health --report html # Generate HTML report
76
+ codeshift health --report json -o report.json
77
+ codeshift health --ci --threshold 70 # CI mode
78
+
79
+ """
80
+ project_path = Path(path).resolve()
81
+
82
+ with console.status("[bold blue]Analyzing codebase health..."):
83
+ calculator = HealthCalculator()
84
+ score = calculator.calculate(project_path)
85
+
86
+ # Handle report generation
87
+ if report:
88
+ output_path = Path(output) if output else Path(f"health_report.{report}")
89
+
90
+ if report == "json":
91
+ save_json_report(score, output_path)
92
+ console.print(f"[green]JSON report saved to:[/] {output_path}")
93
+ elif report == "html":
94
+ save_html_report(score, output_path)
95
+ console.print(f"[green]HTML report saved to:[/] {output_path}")
96
+
97
+ # In CI mode with report, also output JSON to stdout
98
+ if ci:
99
+ console.print(generate_json_report(score))
100
+ else:
101
+ # Display rich table output
102
+ _display_health_summary(score, verbose)
103
+
104
+ # CI mode exit code handling
105
+ if ci:
106
+ if score.overall_score < threshold:
107
+ console.print(
108
+ f"\n[red]CI Check Failed:[/] Score {score.overall_score:.1f} is below threshold {threshold}"
109
+ )
110
+ sys.exit(1)
111
+ else:
112
+ console.print(
113
+ f"\n[green]CI Check Passed:[/] Score {score.overall_score:.1f} meets threshold {threshold}"
114
+ )
115
+ sys.exit(0)
116
+
117
+
118
+ def _display_health_summary(score: HealthScore, verbose: bool) -> None:
119
+ """Display the health score summary in the terminal.
120
+
121
+ Args:
122
+ score: HealthScore object
123
+ verbose: Whether to show detailed output
124
+ """
125
+ # Grade panel
126
+ grade_style = _get_grade_style(score.grade)
127
+ console.print(
128
+ Panel(
129
+ f"[{grade_style}]Grade {score.grade.value}[/] - {score.overall_score:.1f}/100",
130
+ title="[bold]Codebase Health Score[/]",
131
+ subtitle=str(score.project_path),
132
+ )
133
+ )
134
+
135
+ # Metrics table
136
+ table = Table(title="Metrics Breakdown", show_header=True)
137
+ table.add_column("Category", style="cyan")
138
+ table.add_column("Score", justify="right")
139
+ table.add_column("Weight", justify="right", style="dim")
140
+ table.add_column("Details")
141
+
142
+ # Sort by score (lowest first) to highlight problem areas
143
+ sorted_metrics = sorted(score.metrics, key=lambda m: m.score)
144
+
145
+ for metric in sorted_metrics:
146
+ score_style = _get_score_style(metric.score)
147
+ weight_pct = f"{metric.weight * 100:.0f}%"
148
+ table.add_row(
149
+ _format_category(metric.category),
150
+ f"[{score_style}]{metric.score:.1f}[/]",
151
+ weight_pct,
152
+ metric.description,
153
+ )
154
+
155
+ console.print(table)
156
+
157
+ # Recommendations
158
+ if score.top_recommendations:
159
+ console.print("\n[bold]Top Recommendations:[/]")
160
+ for i, rec in enumerate(score.top_recommendations, 1):
161
+ console.print(f" {i}. {rec}")
162
+
163
+ # Verbose: show dependencies
164
+ if verbose and score.dependencies:
165
+ console.print()
166
+ deps_table = Table(title="Dependencies", show_header=True)
167
+ deps_table.add_column("Package", style="cyan")
168
+ deps_table.add_column("Current")
169
+ deps_table.add_column("Latest")
170
+ deps_table.add_column("Status")
171
+ deps_table.add_column("Migration")
172
+ deps_table.add_column("Vulns", justify="right")
173
+
174
+ for dep in score.dependencies:
175
+ status = "[green]✓[/]" if not dep.is_outdated else "[yellow]↑[/]"
176
+ tier = (
177
+ "[green]Tier 1[/]"
178
+ if dep.has_tier1_support
179
+ else ("[cyan]Tier 2[/]" if dep.has_tier2_support else "[dim]-[/]")
180
+ )
181
+ vuln_count = len(dep.vulnerabilities)
182
+ vuln_style = "green" if vuln_count == 0 else "red"
183
+
184
+ deps_table.add_row(
185
+ dep.name,
186
+ dep.current_version or "?",
187
+ dep.latest_version or "?",
188
+ status,
189
+ tier,
190
+ f"[{vuln_style}]{vuln_count}[/]",
191
+ )
192
+
193
+ console.print(deps_table)
194
+
195
+ # Show vulnerabilities summary if any
196
+ if score.vulnerabilities:
197
+ console.print()
198
+ console.print(
199
+ f"[bold red]Security Alert:[/] {len(score.vulnerabilities)} vulnerabilities found"
200
+ )
201
+ for vuln in score.vulnerabilities[:3]:
202
+ console.print(
203
+ f" - [{vuln.severity.value.upper()}] {vuln.package}: {vuln.vulnerability_id}"
204
+ )
205
+ if len(score.vulnerabilities) > 3:
206
+ console.print(f" ... and {len(score.vulnerabilities) - 3} more")
207
+
208
+
209
+ def _get_grade_style(grade: HealthGrade) -> str:
210
+ """Get Rich style for a grade."""
211
+ styles = {
212
+ HealthGrade.A: "bold green",
213
+ HealthGrade.B: "bold cyan",
214
+ HealthGrade.C: "bold yellow",
215
+ HealthGrade.D: "bold orange1",
216
+ HealthGrade.F: "bold red",
217
+ }
218
+ return styles.get(grade, "white")
219
+
220
+
221
+ def _get_score_style(score: float) -> str:
222
+ """Get Rich style for a numeric score."""
223
+ if score >= 90:
224
+ return "green"
225
+ elif score >= 80:
226
+ return "cyan"
227
+ elif score >= 70:
228
+ return "yellow"
229
+ elif score >= 60:
230
+ return "orange1"
231
+ else:
232
+ return "red"
233
+
234
+
235
+ def _format_category(category: MetricCategory) -> str:
236
+ """Format category for display."""
237
+ names = {
238
+ MetricCategory.FRESHNESS: "Freshness",
239
+ MetricCategory.SECURITY: "Security",
240
+ MetricCategory.MIGRATION_READINESS: "Migration Ready",
241
+ MetricCategory.TEST_COVERAGE: "Test Coverage",
242
+ MetricCategory.DOCUMENTATION: "Documentation",
243
+ }
244
+ return names.get(category, category.value)
codeshift/cli/main.py CHANGED
@@ -15,6 +15,7 @@ from codeshift.cli.commands.auth import (
15
15
  whoami,
16
16
  )
17
17
  from codeshift.cli.commands.diff import diff
18
+ from codeshift.cli.commands.health import health
18
19
  from codeshift.cli.commands.scan import scan
19
20
  from codeshift.cli.commands.upgrade import upgrade
20
21
  from codeshift.cli.commands.upgrade_all import upgrade_all
@@ -46,6 +47,7 @@ cli.add_command(upgrade)
46
47
  cli.add_command(upgrade_all)
47
48
  cli.add_command(diff)
48
49
  cli.add_command(apply)
50
+ cli.add_command(health)
49
51
 
50
52
  # Auth commands
51
53
  cli.add_command(register)
@@ -0,0 +1,50 @@
1
+ """Codebase health scoring module.
2
+
3
+ This module provides health scoring capabilities for Python projects,
4
+ analyzing dependency freshness, security, migration readiness, test
5
+ coverage, and documentation quality.
6
+
7
+ Example:
8
+ >>> from codeshift.health import HealthCalculator
9
+ >>> calculator = HealthCalculator()
10
+ >>> score = calculator.calculate(Path("."))
11
+ >>> print(score.summary)
12
+ 🟢 Grade A (92.5/100)
13
+ """
14
+
15
+ from codeshift.health.calculator import HealthCalculator
16
+ from codeshift.health.models import (
17
+ DependencyHealth,
18
+ HealthGrade,
19
+ HealthReport,
20
+ HealthScore,
21
+ MetricCategory,
22
+ MetricResult,
23
+ SecurityVulnerability,
24
+ VulnerabilitySeverity,
25
+ )
26
+ from codeshift.health.report import (
27
+ generate_html_report,
28
+ generate_json_report,
29
+ save_html_report,
30
+ save_json_report,
31
+ )
32
+
33
+ __all__ = [
34
+ # Main calculator
35
+ "HealthCalculator",
36
+ # Models
37
+ "DependencyHealth",
38
+ "HealthGrade",
39
+ "HealthReport",
40
+ "HealthScore",
41
+ "MetricCategory",
42
+ "MetricResult",
43
+ "SecurityVulnerability",
44
+ "VulnerabilitySeverity",
45
+ # Report functions
46
+ "generate_html_report",
47
+ "generate_json_report",
48
+ "save_html_report",
49
+ "save_json_report",
50
+ ]
@@ -0,0 +1,217 @@
1
+ """Main health score calculator orchestrator."""
2
+
3
+ import logging
4
+ from pathlib import Path
5
+
6
+ from codeshift.health.metrics import BaseMetricCalculator
7
+ from codeshift.health.metrics.documentation import DocumentationCalculator
8
+ from codeshift.health.metrics.freshness import FreshnessCalculator
9
+ from codeshift.health.metrics.migration_readiness import MigrationReadinessCalculator
10
+ from codeshift.health.metrics.security import SecurityCalculator
11
+ from codeshift.health.metrics.test_coverage import TestCoverageCalculator
12
+ from codeshift.health.models import (
13
+ DependencyHealth,
14
+ HealthGrade,
15
+ HealthReport,
16
+ HealthScore,
17
+ MetricResult,
18
+ SecurityVulnerability,
19
+ )
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ class HealthCalculator:
25
+ """Orchestrates health score calculation across all metrics."""
26
+
27
+ def __init__(self) -> None:
28
+ """Initialize the calculator with all metric calculators."""
29
+ self.calculators: list[BaseMetricCalculator] = [
30
+ FreshnessCalculator(),
31
+ SecurityCalculator(),
32
+ MigrationReadinessCalculator(),
33
+ TestCoverageCalculator(),
34
+ DocumentationCalculator(),
35
+ ]
36
+
37
+ def calculate(self, project_path: Path) -> HealthScore:
38
+ """Calculate the complete health score for a project.
39
+
40
+ Args:
41
+ project_path: Path to the project root
42
+
43
+ Returns:
44
+ HealthScore with all metrics and overall score
45
+ """
46
+ project_path = project_path.resolve()
47
+
48
+ # First, analyze dependencies once to share across calculators
49
+ dependencies = self._analyze_dependencies(project_path)
50
+
51
+ # Calculate each metric
52
+ metrics: list[MetricResult] = []
53
+ for calculator in self.calculators:
54
+ try:
55
+ result = calculator.calculate(
56
+ project_path,
57
+ dependencies=dependencies,
58
+ )
59
+ metrics.append(result)
60
+ except Exception as e:
61
+ logger.warning(f"Failed to calculate {calculator.category.value}: {e}")
62
+ # Add a neutral result on failure
63
+ metrics.append(
64
+ MetricResult(
65
+ category=calculator.category,
66
+ score=50,
67
+ weight=calculator.weight,
68
+ description=f"Error: {str(e)[:50]}",
69
+ details={"error": str(e)},
70
+ recommendations=["Fix metric calculation error"],
71
+ )
72
+ )
73
+
74
+ # Calculate overall weighted score
75
+ total_weight = sum(m.weight for m in metrics)
76
+ if total_weight > 0:
77
+ overall_score = sum(m.weighted_score for m in metrics) / total_weight
78
+ else:
79
+ overall_score = 0
80
+
81
+ # Collect all vulnerabilities
82
+ all_vulns: list[SecurityVulnerability] = []
83
+ for dep in dependencies:
84
+ all_vulns.extend(dep.vulnerabilities)
85
+
86
+ return HealthScore(
87
+ overall_score=overall_score,
88
+ grade=HealthGrade.from_score(overall_score),
89
+ metrics=metrics,
90
+ dependencies=dependencies,
91
+ vulnerabilities=all_vulns,
92
+ project_path=project_path,
93
+ )
94
+
95
+ def calculate_report(
96
+ self,
97
+ project_path: Path,
98
+ previous: HealthScore | None = None,
99
+ ) -> HealthReport:
100
+ """Calculate a health report with trend information.
101
+
102
+ Args:
103
+ project_path: Path to the project root
104
+ previous: Optional previous health score for comparison
105
+
106
+ Returns:
107
+ HealthReport with current score and trend
108
+ """
109
+ current = self.calculate(project_path)
110
+ return HealthReport(current=current, previous=previous)
111
+
112
+ def _analyze_dependencies(self, project_path: Path) -> list[DependencyHealth]:
113
+ """Analyze all dependencies for shared data.
114
+
115
+ This method runs once and provides data for multiple calculators
116
+ to avoid redundant API calls.
117
+
118
+ Args:
119
+ project_path: Path to the project
120
+
121
+ Returns:
122
+ List of DependencyHealth with all analyzable data
123
+ """
124
+ from codeshift.scanner.dependency_parser import DependencyParser
125
+
126
+ parser = DependencyParser(project_path)
127
+ raw_deps = parser.parse_all()
128
+
129
+ # Get knowledge base info for tier support
130
+ from codeshift.knowledge_base import KnowledgeBaseLoader
131
+
132
+ loader = KnowledgeBaseLoader()
133
+ supported_libraries = loader.get_supported_libraries()
134
+ tier1_libraries = {"pydantic", "fastapi", "sqlalchemy", "pandas", "requests"}
135
+
136
+ dependencies: list[DependencyHealth] = []
137
+
138
+ for dep in raw_deps:
139
+ dep_name_lower = dep.name.lower()
140
+
141
+ # Get latest version and vulnerabilities from PyPI
142
+ latest_version = None
143
+ vulnerabilities: list[SecurityVulnerability] = []
144
+
145
+ try:
146
+ import httpx
147
+ from packaging.version import Version
148
+
149
+ response = httpx.get(
150
+ f"https://pypi.org/pypi/{dep.name}/json",
151
+ timeout=5.0,
152
+ )
153
+ if response.status_code == 200:
154
+ data = response.json()
155
+
156
+ # Get latest version
157
+ version_str = data.get("info", {}).get("version")
158
+ if version_str:
159
+ latest_version = Version(version_str)
160
+
161
+ # Get vulnerabilities
162
+ from codeshift.health.models import VulnerabilitySeverity
163
+
164
+ for vuln_data in data.get("vulnerabilities", []):
165
+ try:
166
+ severity = VulnerabilitySeverity.MEDIUM
167
+ vulnerabilities.append(
168
+ SecurityVulnerability(
169
+ package=dep.name,
170
+ vulnerability_id=vuln_data.get("id", "unknown"),
171
+ severity=severity,
172
+ description=vuln_data.get("summary", "")[:200],
173
+ fixed_in=(
174
+ vuln_data.get("fixed_in", [None])[0]
175
+ if vuln_data.get("fixed_in")
176
+ else None
177
+ ),
178
+ url=vuln_data.get("link"),
179
+ )
180
+ )
181
+ except Exception:
182
+ pass
183
+
184
+ except Exception as e:
185
+ logger.debug(f"Failed to fetch PyPI data for {dep.name}: {e}")
186
+
187
+ # Calculate version lag
188
+ current = dep.min_version
189
+ is_outdated = False
190
+ major_behind = 0
191
+ minor_behind = 0
192
+
193
+ if current and latest_version:
194
+ is_outdated = current < latest_version
195
+ major_behind = max(0, latest_version.major - current.major)
196
+ if major_behind == 0:
197
+ minor_behind = max(0, latest_version.minor - current.minor)
198
+
199
+ # Check tier support
200
+ has_tier1 = dep_name_lower in tier1_libraries
201
+ has_tier2 = dep_name_lower in [lib.lower() for lib in supported_libraries]
202
+
203
+ dependencies.append(
204
+ DependencyHealth(
205
+ name=dep.name,
206
+ current_version=str(current) if current else None,
207
+ latest_version=str(latest_version) if latest_version else None,
208
+ is_outdated=is_outdated,
209
+ major_versions_behind=major_behind,
210
+ minor_versions_behind=minor_behind,
211
+ has_tier1_support=has_tier1,
212
+ has_tier2_support=has_tier2,
213
+ vulnerabilities=vulnerabilities,
214
+ )
215
+ )
216
+
217
+ return dependencies
@@ -0,0 +1,63 @@
1
+ """Base class and utilities for health metric calculators."""
2
+
3
+ from abc import ABC, abstractmethod
4
+ from pathlib import Path
5
+ from typing import Any
6
+
7
+ from codeshift.health.models import MetricCategory, MetricResult
8
+
9
+
10
+ class BaseMetricCalculator(ABC):
11
+ """Abstract base class for health metric calculators."""
12
+
13
+ @property
14
+ @abstractmethod
15
+ def category(self) -> MetricCategory:
16
+ """Return the metric category."""
17
+ ...
18
+
19
+ @property
20
+ @abstractmethod
21
+ def weight(self) -> float:
22
+ """Return the weight for this metric (0.0 to 1.0)."""
23
+ ...
24
+
25
+ @abstractmethod
26
+ def calculate(self, project_path: Path, **kwargs: Any) -> MetricResult:
27
+ """Calculate the metric score.
28
+
29
+ Args:
30
+ project_path: Path to the project root
31
+ **kwargs: Additional arguments specific to the metric
32
+
33
+ Returns:
34
+ MetricResult with score and details
35
+ """
36
+ ...
37
+
38
+ def _create_result(
39
+ self,
40
+ score: float,
41
+ description: str,
42
+ details: dict | None = None,
43
+ recommendations: list[str] | None = None,
44
+ ) -> MetricResult:
45
+ """Helper to create a MetricResult.
46
+
47
+ Args:
48
+ score: Score from 0-100
49
+ description: Human-readable description
50
+ details: Optional details dictionary
51
+ recommendations: Optional list of recommendations
52
+
53
+ Returns:
54
+ MetricResult instance
55
+ """
56
+ return MetricResult(
57
+ category=self.category,
58
+ score=max(0, min(100, score)), # Clamp to 0-100
59
+ weight=self.weight,
60
+ description=description,
61
+ details=details or {},
62
+ recommendations=recommendations or [],
63
+ )