codeclone-cli 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codeclone_cli-1.0.0/.gitignore +57 -0
- codeclone_cli-1.0.0/PKG-INFO +75 -0
- codeclone_cli-1.0.0/README.md +45 -0
- codeclone_cli-1.0.0/codeclone_cli.py +877 -0
- codeclone_cli-1.0.0/pyproject.toml +55 -0
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
# Node
|
|
2
|
+
node_modules/
|
|
3
|
+
out/
|
|
4
|
+
*.vsix
|
|
5
|
+
|
|
6
|
+
# Python
|
|
7
|
+
__pycache__/
|
|
8
|
+
*.py[cod]
|
|
9
|
+
*$py.class
|
|
10
|
+
*.so
|
|
11
|
+
.Python
|
|
12
|
+
build/
|
|
13
|
+
develop-eggs/
|
|
14
|
+
dist/
|
|
15
|
+
downloads/
|
|
16
|
+
eggs/
|
|
17
|
+
.eggs/
|
|
18
|
+
lib/
|
|
19
|
+
lib64/
|
|
20
|
+
parts/
|
|
21
|
+
sdist/
|
|
22
|
+
var/
|
|
23
|
+
wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
|
|
28
|
+
# Virtual environments
|
|
29
|
+
.venv/
|
|
30
|
+
venv/
|
|
31
|
+
ENV/
|
|
32
|
+
|
|
33
|
+
# IDE
|
|
34
|
+
.idea/
|
|
35
|
+
.vscode/
|
|
36
|
+
*.swp
|
|
37
|
+
*.swo
|
|
38
|
+
*~
|
|
39
|
+
|
|
40
|
+
# Testing
|
|
41
|
+
.pytest_cache/
|
|
42
|
+
.coverage
|
|
43
|
+
htmlcov/
|
|
44
|
+
.tox/
|
|
45
|
+
.nox/
|
|
46
|
+
|
|
47
|
+
# Build artifacts
|
|
48
|
+
*.log
|
|
49
|
+
*.tmp
|
|
50
|
+
|
|
51
|
+
# OS
|
|
52
|
+
.DS_Store
|
|
53
|
+
Thumbs.db
|
|
54
|
+
site/.astro/
|
|
55
|
+
site/dist/
|
|
56
|
+
site/node_modules/
|
|
57
|
+
.polyglot-cache.json
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: codeclone-cli
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: CodeClone CLI - Deterministic code coverage analysis
|
|
5
|
+
Project-URL: Homepage, https://github.com/mcp-tool-shop-org/codeclone-suite
|
|
6
|
+
Project-URL: Repository, https://github.com/mcp-tool-shop-org/codeclone-suite
|
|
7
|
+
Project-URL: Documentation, https://github.com/mcp-tool-shop-org/codeclone-suite/blob/main/docs/EDITOR_INTEGRATION.md
|
|
8
|
+
Author-email: mcp-tool-shop <64996768+mcp-tool-shop@users.noreply.github.com>
|
|
9
|
+
License-Expression: MIT
|
|
10
|
+
Keywords: analysis,cli,coverage,testing
|
|
11
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
12
|
+
Classifier: Environment :: Console
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Operating System :: OS Independent
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: Topic :: Software Development :: Quality Assurance
|
|
21
|
+
Classifier: Topic :: Software Development :: Testing
|
|
22
|
+
Requires-Python: >=3.10
|
|
23
|
+
Provides-Extra: dev
|
|
24
|
+
Requires-Dist: jsonschema>=4.0; extra == 'dev'
|
|
25
|
+
Requires-Dist: mypy>=1.0; extra == 'dev'
|
|
26
|
+
Requires-Dist: pytest-cov>=4.0; extra == 'dev'
|
|
27
|
+
Requires-Dist: pytest>=7.0; extra == 'dev'
|
|
28
|
+
Requires-Dist: ruff>=0.4; extra == 'dev'
|
|
29
|
+
Description-Content-Type: text/markdown
|
|
30
|
+
|
|
31
|
+
# CodeClone CLI
|
|
32
|
+
|
|
33
|
+
The analysis engine for CodeClone Suite. This CLI provides the protocol boundary for editor extensions.
|
|
34
|
+
|
|
35
|
+
## Installation
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
pip install codeclone-cli
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
Or install from source:
|
|
42
|
+
|
|
43
|
+
```bash
|
|
44
|
+
cd engines/codeclone-cli
|
|
45
|
+
pip install -e .
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## Usage
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
# Show version
|
|
52
|
+
codeclone version --json
|
|
53
|
+
|
|
54
|
+
# Analyze a repository
|
|
55
|
+
codeclone analyze /path/to/repo --json
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## Output Contract
|
|
59
|
+
|
|
60
|
+
This CLI follows the [Editor Integration Specification](../../docs/EDITOR_INTEGRATION.md).
|
|
61
|
+
|
|
62
|
+
- stdout is JSON only in `--json` mode
|
|
63
|
+
- stderr is for human diagnostics
|
|
64
|
+
- Exit codes: 0 (OK), 1 (PARTIAL), 2 (FAIL)
|
|
65
|
+
|
|
66
|
+
## Development
|
|
67
|
+
|
|
68
|
+
```bash
|
|
69
|
+
pip install -e ".[dev]"
|
|
70
|
+
pytest
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## License
|
|
74
|
+
|
|
75
|
+
MIT
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# CodeClone CLI
|
|
2
|
+
|
|
3
|
+
The analysis engine for CodeClone Suite. This CLI provides the protocol boundary for editor extensions.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install codeclone-cli
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
Or install from source:
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
cd engines/codeclone-cli
|
|
15
|
+
pip install -e .
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
## Usage
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
# Show version
|
|
22
|
+
codeclone version --json
|
|
23
|
+
|
|
24
|
+
# Analyze a repository
|
|
25
|
+
codeclone analyze /path/to/repo --json
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
## Output Contract
|
|
29
|
+
|
|
30
|
+
This CLI follows the [Editor Integration Specification](../../docs/EDITOR_INTEGRATION.md).
|
|
31
|
+
|
|
32
|
+
- stdout is JSON only in `--json` mode
|
|
33
|
+
- stderr is for human diagnostics
|
|
34
|
+
- Exit codes: 0 (OK), 1 (PARTIAL), 2 (FAIL)
|
|
35
|
+
|
|
36
|
+
## Development
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
pip install -e ".[dev]"
|
|
40
|
+
pytest
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## License
|
|
44
|
+
|
|
45
|
+
MIT
|
|
@@ -0,0 +1,877 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
CodeClone CLI - Deterministic code coverage analysis.
|
|
4
|
+
|
|
5
|
+
This CLI provides the protocol boundary for editor extensions.
|
|
6
|
+
stdout is JSON only in --json mode. stderr is for human diagnostics.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import argparse
|
|
10
|
+
import json
|
|
11
|
+
import platform
|
|
12
|
+
import sys
|
|
13
|
+
from dataclasses import dataclass, field
|
|
14
|
+
from datetime import datetime, timezone
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
__version__ = "0.2.0"
|
|
19
|
+
SCHEMA_VERSION = "0.1"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# =============================================================================
|
|
23
|
+
# Diagnostic Codes (frozen for v0.1)
|
|
24
|
+
# =============================================================================
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class DiagnosticCode:
|
|
28
|
+
"""Canonical diagnostic codes per EDITOR_INTEGRATION.md"""
|
|
29
|
+
|
|
30
|
+
# Evidence-only codes (Increment A)
|
|
31
|
+
UNTESTED_MODULE = "UNTESTED_MODULE"
|
|
32
|
+
LINE_UNCOVERED = "LINE_UNCOVERED"
|
|
33
|
+
COVERAGE_DATA_MISSING = "COVERAGE_DATA_MISSING"
|
|
34
|
+
COVERAGE_FILE_STALE = "COVERAGE_FILE_STALE"
|
|
35
|
+
COVERAGE_REPORT_PARSE_ERROR = "COVERAGE_REPORT_PARSE_ERROR"
|
|
36
|
+
DIAGNOSTICS_TRUNCATED = "DIAGNOSTICS_TRUNCATED"
|
|
37
|
+
|
|
38
|
+
# Reason codes (Increment B - future)
|
|
39
|
+
GUARD_BLOCKS_EXECUTION = "GUARD_BLOCKS_EXECUTION"
|
|
40
|
+
EXCEPTION_NEVER_TRIGGERED = "EXCEPTION_NEVER_TRIGGERED"
|
|
41
|
+
EARLY_RETURN_BLOCKS_CODE = "EARLY_RETURN_BLOCKS_CODE"
|
|
42
|
+
BRANCH_NEVER_TAKEN = "BRANCH_NEVER_TAKEN"
|
|
43
|
+
|
|
44
|
+
# Toolchain codes
|
|
45
|
+
TOOLCHAIN_ERROR = "TOOLCHAIN_ERROR"
|
|
46
|
+
REPO_ROOT_NOT_FOUND = "REPO_ROOT_NOT_FOUND"
|
|
47
|
+
CONFIG_INVALID = "CONFIG_INVALID"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
# Maximum diagnostics per file and total
|
|
51
|
+
MAX_DIAGNOSTICS_PER_FILE = 10
|
|
52
|
+
MAX_TOTAL_DIAGNOSTICS = 100
|
|
53
|
+
STALENESS_THRESHOLD_SECONDS = 3600
|
|
54
|
+
|
|
55
|
+
# Codes promoted to error under --strict-evidence
|
|
56
|
+
STRICT_EVIDENCE_CODES = frozenset(
|
|
57
|
+
{
|
|
58
|
+
DiagnosticCode.COVERAGE_FILE_STALE,
|
|
59
|
+
}
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class Severity:
|
|
64
|
+
"""Diagnostic severity levels"""
|
|
65
|
+
|
|
66
|
+
ERROR = "error"
|
|
67
|
+
WARNING = "warning"
|
|
68
|
+
INFO = "info"
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# =============================================================================
|
|
72
|
+
# Coverage Data Structures
|
|
73
|
+
# =============================================================================
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class FileCoverage:
|
|
78
|
+
"""Coverage data for a single file."""
|
|
79
|
+
|
|
80
|
+
path: str
|
|
81
|
+
executed_lines: set[int] = field(default_factory=set)
|
|
82
|
+
missing_lines: set[int] = field(default_factory=set)
|
|
83
|
+
excluded_lines: set[int] = field(default_factory=set)
|
|
84
|
+
total_statements: int = 0
|
|
85
|
+
covered_statements: int = 0
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def coverage_percent(self) -> float:
|
|
89
|
+
if self.total_statements == 0:
|
|
90
|
+
return 0.0
|
|
91
|
+
return (self.covered_statements / self.total_statements) * 100
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
@dataclass
|
|
95
|
+
class CoverageReport:
|
|
96
|
+
"""Parsed coverage report."""
|
|
97
|
+
|
|
98
|
+
files: dict[str, FileCoverage] = field(default_factory=dict)
|
|
99
|
+
total_statements: int = 0
|
|
100
|
+
covered_statements: int = 0
|
|
101
|
+
source_path: str | None = None
|
|
102
|
+
coverage_mtime: float | None = None # mtime of coverage file for staleness check
|
|
103
|
+
|
|
104
|
+
@property
|
|
105
|
+
def coverage_percent(self) -> float:
|
|
106
|
+
if self.total_statements == 0:
|
|
107
|
+
return 0.0
|
|
108
|
+
return (self.covered_statements / self.total_statements) * 100
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@dataclass
|
|
112
|
+
class AnalysisContext:
|
|
113
|
+
"""Context accumulated during analysis for building summary."""
|
|
114
|
+
|
|
115
|
+
files_analyzed: int = 0
|
|
116
|
+
coverage_percent: float | None = None
|
|
117
|
+
dropped_count: int = 0
|
|
118
|
+
stale_file_count: int = 0
|
|
119
|
+
coverage_json_path: str | None = None
|
|
120
|
+
coverage_json_mtime: float | None = None
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
# =============================================================================
|
|
124
|
+
# Coverage Parsing
|
|
125
|
+
# =============================================================================
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def find_coverage_file(repo_root: Path) -> Path | None:
|
|
129
|
+
"""Find coverage.json in the repository.
|
|
130
|
+
|
|
131
|
+
Looks for:
|
|
132
|
+
1. coverage.json (coverage.py JSON output)
|
|
133
|
+
2. .coverage.json (alternate location)
|
|
134
|
+
3. htmlcov/status.json (coverage.py HTML report)
|
|
135
|
+
"""
|
|
136
|
+
candidates = [
|
|
137
|
+
repo_root / "coverage.json",
|
|
138
|
+
repo_root / ".coverage.json",
|
|
139
|
+
repo_root / "htmlcov" / "status.json",
|
|
140
|
+
]
|
|
141
|
+
|
|
142
|
+
for path in candidates:
|
|
143
|
+
if path.exists() and path.is_file():
|
|
144
|
+
return path
|
|
145
|
+
|
|
146
|
+
return None
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def parse_coverage_json(
|
|
150
|
+
coverage_path: Path, repo_root: Path
|
|
151
|
+
) -> tuple[CoverageReport | None, str | None]:
|
|
152
|
+
"""Parse coverage.py JSON format.
|
|
153
|
+
|
|
154
|
+
Handles the standard coverage.py JSON output format:
|
|
155
|
+
{
|
|
156
|
+
"meta": {...},
|
|
157
|
+
"files": {
|
|
158
|
+
"path/to/file.py": {
|
|
159
|
+
"executed_lines": [1, 2, 3],
|
|
160
|
+
"missing_lines": [4, 5],
|
|
161
|
+
"excluded_lines": [],
|
|
162
|
+
"summary": {
|
|
163
|
+
"num_statements": 10,
|
|
164
|
+
"covered_lines": 5,
|
|
165
|
+
...
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
},
|
|
169
|
+
"totals": {...}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
(report, error_message) - report is None if parsing failed
|
|
174
|
+
"""
|
|
175
|
+
try:
|
|
176
|
+
with open(coverage_path, "r", encoding="utf-8") as f:
|
|
177
|
+
data = json.load(f)
|
|
178
|
+
except json.JSONDecodeError as e:
|
|
179
|
+
return None, f"Invalid JSON at line {e.lineno}: {e.msg}"
|
|
180
|
+
except OSError as e:
|
|
181
|
+
return None, f"Cannot read file: {e}"
|
|
182
|
+
|
|
183
|
+
# Validate expected structure
|
|
184
|
+
if not isinstance(data, dict):
|
|
185
|
+
return None, "Coverage file must be a JSON object"
|
|
186
|
+
|
|
187
|
+
if "files" not in data:
|
|
188
|
+
return None, "Coverage file missing 'files' field (expected coverage.py format)"
|
|
189
|
+
|
|
190
|
+
report = CoverageReport(source_path=str(coverage_path))
|
|
191
|
+
|
|
192
|
+
# Store coverage file mtime for staleness check
|
|
193
|
+
try:
|
|
194
|
+
report.coverage_mtime = coverage_path.stat().st_mtime
|
|
195
|
+
except OSError:
|
|
196
|
+
report.coverage_mtime = None
|
|
197
|
+
|
|
198
|
+
# Handle coverage.py format
|
|
199
|
+
files_data = data.get("files", {})
|
|
200
|
+
|
|
201
|
+
for file_path, file_data in files_data.items():
|
|
202
|
+
# Normalize path relative to repo root
|
|
203
|
+
try:
|
|
204
|
+
abs_path = Path(file_path)
|
|
205
|
+
if not abs_path.is_absolute():
|
|
206
|
+
abs_path = repo_root / file_path
|
|
207
|
+
rel_path = abs_path.relative_to(repo_root)
|
|
208
|
+
except ValueError:
|
|
209
|
+
rel_path = Path(file_path)
|
|
210
|
+
|
|
211
|
+
fc = FileCoverage(path=str(rel_path))
|
|
212
|
+
|
|
213
|
+
# Parse line data
|
|
214
|
+
fc.executed_lines = set(file_data.get("executed_lines", []))
|
|
215
|
+
fc.missing_lines = set(file_data.get("missing_lines", []))
|
|
216
|
+
fc.excluded_lines = set(file_data.get("excluded_lines", []))
|
|
217
|
+
|
|
218
|
+
# Parse summary
|
|
219
|
+
summary = file_data.get("summary", {})
|
|
220
|
+
fc.total_statements = summary.get("num_statements", 0)
|
|
221
|
+
fc.covered_statements = summary.get("covered_lines", len(fc.executed_lines))
|
|
222
|
+
|
|
223
|
+
report.files[str(rel_path)] = fc
|
|
224
|
+
report.total_statements += fc.total_statements
|
|
225
|
+
report.covered_statements += fc.covered_statements
|
|
226
|
+
|
|
227
|
+
return report, None
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def check_coverage_staleness(
|
|
231
|
+
report: CoverageReport,
|
|
232
|
+
repo_root: Path,
|
|
233
|
+
threshold_seconds: float = STALENESS_THRESHOLD_SECONDS,
|
|
234
|
+
) -> tuple[dict[str, Any] | None, int]:
|
|
235
|
+
"""Check if coverage data is stale (older than source files).
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
(diagnostic_or_none, stale_file_count)
|
|
239
|
+
"""
|
|
240
|
+
if report.coverage_mtime is None:
|
|
241
|
+
return None, 0
|
|
242
|
+
|
|
243
|
+
# Count files newer than coverage
|
|
244
|
+
stale_count = 0
|
|
245
|
+
newest_source_mtime: float | None = None
|
|
246
|
+
newest_source_path: str | None = None
|
|
247
|
+
|
|
248
|
+
for file_path in report.files.keys():
|
|
249
|
+
full_path = repo_root / file_path
|
|
250
|
+
if full_path.exists():
|
|
251
|
+
try:
|
|
252
|
+
mtime = full_path.stat().st_mtime
|
|
253
|
+
if mtime > report.coverage_mtime + threshold_seconds:
|
|
254
|
+
stale_count += 1
|
|
255
|
+
if newest_source_mtime is None or mtime > newest_source_mtime:
|
|
256
|
+
newest_source_mtime = mtime
|
|
257
|
+
newest_source_path = file_path
|
|
258
|
+
except OSError:
|
|
259
|
+
continue
|
|
260
|
+
|
|
261
|
+
if newest_source_mtime is None:
|
|
262
|
+
return None, 0
|
|
263
|
+
|
|
264
|
+
# Check if source is newer than coverage by threshold
|
|
265
|
+
age_diff = newest_source_mtime - report.coverage_mtime
|
|
266
|
+
if age_diff > threshold_seconds:
|
|
267
|
+
hours_stale = int(age_diff / 3600)
|
|
268
|
+
diag = build_diagnostic(
|
|
269
|
+
code=DiagnosticCode.COVERAGE_FILE_STALE,
|
|
270
|
+
severity=Severity.WARNING,
|
|
271
|
+
message=f"Coverage data may be stale ({hours_stale}h behind source changes)",
|
|
272
|
+
evidence=[
|
|
273
|
+
f"Coverage file: {report.source_path}",
|
|
274
|
+
f"Newest source: {newest_source_path}",
|
|
275
|
+
f"Source is {hours_stale} hours newer than coverage",
|
|
276
|
+
],
|
|
277
|
+
suggestions=[
|
|
278
|
+
"Run tests with coverage again: pytest --cov=. --cov-report=json",
|
|
279
|
+
],
|
|
280
|
+
)
|
|
281
|
+
return diag, stale_count
|
|
282
|
+
|
|
283
|
+
return None, 0
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
# =============================================================================
|
|
287
|
+
# Analysis Engine
|
|
288
|
+
# =============================================================================
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def analyze_coverage(
|
|
292
|
+
repo_root: Path, files_analyzed: int
|
|
293
|
+
) -> tuple[CoverageReport | None, list[dict[str, Any]], AnalysisContext]:
|
|
294
|
+
"""Analyze coverage data and generate diagnostics.
|
|
295
|
+
|
|
296
|
+
Returns:
|
|
297
|
+
(coverage_report, diagnostics, context)
|
|
298
|
+
"""
|
|
299
|
+
diagnostics: list[dict[str, Any]] = []
|
|
300
|
+
truncated_count = 0
|
|
301
|
+
ctx = AnalysisContext(files_analyzed=files_analyzed)
|
|
302
|
+
|
|
303
|
+
# Find coverage file
|
|
304
|
+
coverage_path = find_coverage_file(repo_root)
|
|
305
|
+
|
|
306
|
+
if coverage_path is None:
|
|
307
|
+
diagnostics.append(
|
|
308
|
+
build_diagnostic(
|
|
309
|
+
code=DiagnosticCode.COVERAGE_DATA_MISSING,
|
|
310
|
+
severity=Severity.WARNING,
|
|
311
|
+
message="No coverage data found. Run tests with coverage first.",
|
|
312
|
+
evidence=[
|
|
313
|
+
"Looked for: coverage.json, .coverage.json, htmlcov/status.json"
|
|
314
|
+
],
|
|
315
|
+
suggestions=[
|
|
316
|
+
"Run: pytest --cov=. --cov-report=json",
|
|
317
|
+
"Or: coverage run -m pytest && coverage json",
|
|
318
|
+
],
|
|
319
|
+
)
|
|
320
|
+
)
|
|
321
|
+
return None, diagnostics, ctx
|
|
322
|
+
|
|
323
|
+
# Parse coverage data
|
|
324
|
+
report, parse_error = parse_coverage_json(coverage_path, repo_root)
|
|
325
|
+
|
|
326
|
+
if report is None:
|
|
327
|
+
diagnostics.append(
|
|
328
|
+
build_diagnostic(
|
|
329
|
+
code=DiagnosticCode.COVERAGE_REPORT_PARSE_ERROR,
|
|
330
|
+
severity=Severity.ERROR,
|
|
331
|
+
message=f"Failed to parse coverage file: {parse_error}",
|
|
332
|
+
file=str(coverage_path.relative_to(repo_root))
|
|
333
|
+
if coverage_path.is_relative_to(repo_root)
|
|
334
|
+
else str(coverage_path),
|
|
335
|
+
suggestions=[
|
|
336
|
+
"Check coverage file format is valid JSON",
|
|
337
|
+
"Regenerate with: coverage json",
|
|
338
|
+
],
|
|
339
|
+
)
|
|
340
|
+
)
|
|
341
|
+
return None, diagnostics, ctx
|
|
342
|
+
|
|
343
|
+
# Store evidence metadata in context
|
|
344
|
+
ctx.coverage_json_path = str(coverage_path)
|
|
345
|
+
ctx.coverage_json_mtime = report.coverage_mtime
|
|
346
|
+
ctx.coverage_percent = report.coverage_percent
|
|
347
|
+
|
|
348
|
+
# Check for stale coverage data
|
|
349
|
+
staleness_diag, stale_count = check_coverage_staleness(report, repo_root)
|
|
350
|
+
ctx.stale_file_count = stale_count
|
|
351
|
+
if staleness_diag:
|
|
352
|
+
diagnostics.append(staleness_diag)
|
|
353
|
+
|
|
354
|
+
# Generate diagnostics for each file
|
|
355
|
+
file_diag_counts: dict[str, int] = {}
|
|
356
|
+
|
|
357
|
+
for file_path, file_cov in report.files.items():
|
|
358
|
+
# Check total diagnostic limit
|
|
359
|
+
if len(diagnostics) >= MAX_TOTAL_DIAGNOSTICS:
|
|
360
|
+
truncated_count += 1
|
|
361
|
+
break
|
|
362
|
+
|
|
363
|
+
# Skip test files
|
|
364
|
+
if is_test_file(file_path):
|
|
365
|
+
continue
|
|
366
|
+
|
|
367
|
+
file_diag_counts[file_path] = 0
|
|
368
|
+
|
|
369
|
+
# Check for completely untested modules
|
|
370
|
+
if file_cov.coverage_percent == 0 and file_cov.total_statements > 0:
|
|
371
|
+
diagnostics.append(
|
|
372
|
+
build_diagnostic(
|
|
373
|
+
code=DiagnosticCode.UNTESTED_MODULE,
|
|
374
|
+
severity=Severity.WARNING,
|
|
375
|
+
message=f"Module has no test coverage: {file_path}",
|
|
376
|
+
file=file_path,
|
|
377
|
+
line=1,
|
|
378
|
+
evidence=[
|
|
379
|
+
f"0% line coverage ({file_cov.total_statements} statements)",
|
|
380
|
+
"No executed lines found",
|
|
381
|
+
],
|
|
382
|
+
suggestions=[
|
|
383
|
+
f"Add tests for {Path(file_path).stem}",
|
|
384
|
+
],
|
|
385
|
+
)
|
|
386
|
+
)
|
|
387
|
+
file_diag_counts[file_path] += 1
|
|
388
|
+
continue
|
|
389
|
+
|
|
390
|
+
# Report uncovered line ranges (group consecutive lines)
|
|
391
|
+
if file_cov.missing_lines:
|
|
392
|
+
ranges = group_line_ranges(sorted(file_cov.missing_lines))
|
|
393
|
+
|
|
394
|
+
# Limit per file
|
|
395
|
+
max_for_file = min(
|
|
396
|
+
MAX_DIAGNOSTICS_PER_FILE - file_diag_counts[file_path], 5
|
|
397
|
+
)
|
|
398
|
+
for start, end in ranges[:max_for_file]:
|
|
399
|
+
if len(diagnostics) >= MAX_TOTAL_DIAGNOSTICS:
|
|
400
|
+
truncated_count += 1
|
|
401
|
+
break
|
|
402
|
+
|
|
403
|
+
if start == end:
|
|
404
|
+
msg = f"Line {start} not covered"
|
|
405
|
+
line_desc = f"Line {start}"
|
|
406
|
+
else:
|
|
407
|
+
msg = f"Lines {start}-{end} not covered"
|
|
408
|
+
line_desc = f"Lines {start}-{end}"
|
|
409
|
+
|
|
410
|
+
diagnostics.append(
|
|
411
|
+
build_diagnostic(
|
|
412
|
+
code=DiagnosticCode.LINE_UNCOVERED,
|
|
413
|
+
severity=Severity.INFO,
|
|
414
|
+
message=msg,
|
|
415
|
+
file=file_path,
|
|
416
|
+
line=start,
|
|
417
|
+
end_line=end if end != start else None,
|
|
418
|
+
evidence=[
|
|
419
|
+
f"{line_desc} never executed during tests",
|
|
420
|
+
f"File coverage: {file_cov.coverage_percent:.1f}%",
|
|
421
|
+
],
|
|
422
|
+
)
|
|
423
|
+
)
|
|
424
|
+
file_diag_counts[file_path] += 1
|
|
425
|
+
|
|
426
|
+
# If more ranges were truncated, note it
|
|
427
|
+
if len(ranges) > max_for_file:
|
|
428
|
+
remaining = len(ranges) - max_for_file
|
|
429
|
+
truncated_count += remaining
|
|
430
|
+
|
|
431
|
+
# Store dropped count in context before adding truncation diagnostic
|
|
432
|
+
ctx.dropped_count = truncated_count
|
|
433
|
+
|
|
434
|
+
# Add truncation diagnostic if we hit limits
|
|
435
|
+
if truncated_count > 0:
|
|
436
|
+
diagnostics.append(
|
|
437
|
+
build_diagnostic(
|
|
438
|
+
code=DiagnosticCode.DIAGNOSTICS_TRUNCATED,
|
|
439
|
+
severity=Severity.INFO,
|
|
440
|
+
message=f"{truncated_count} additional findings not shown",
|
|
441
|
+
evidence=[
|
|
442
|
+
f"Limit: {MAX_TOTAL_DIAGNOSTICS} diagnostics, {MAX_DIAGNOSTICS_PER_FILE} per file",
|
|
443
|
+
"Run with higher limits or fix existing issues first",
|
|
444
|
+
],
|
|
445
|
+
)
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
return report, diagnostics, ctx
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
def group_line_ranges(lines: list[int]) -> list[tuple[int, int]]:
|
|
452
|
+
"""Group consecutive line numbers into ranges.
|
|
453
|
+
|
|
454
|
+
[1, 2, 3, 5, 7, 8, 9] -> [(1, 3), (5, 5), (7, 9)]
|
|
455
|
+
"""
|
|
456
|
+
if not lines:
|
|
457
|
+
return []
|
|
458
|
+
|
|
459
|
+
ranges: list[tuple[int, int]] = []
|
|
460
|
+
start = lines[0]
|
|
461
|
+
end = lines[0]
|
|
462
|
+
|
|
463
|
+
for line in lines[1:]:
|
|
464
|
+
if line == end + 1:
|
|
465
|
+
end = line
|
|
466
|
+
else:
|
|
467
|
+
ranges.append((start, end))
|
|
468
|
+
start = line
|
|
469
|
+
end = line
|
|
470
|
+
|
|
471
|
+
ranges.append((start, end))
|
|
472
|
+
return ranges
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
def is_test_file(path: str) -> bool:
|
|
476
|
+
"""Check if a file is a test file."""
|
|
477
|
+
p = Path(path)
|
|
478
|
+
name = p.name.lower()
|
|
479
|
+
|
|
480
|
+
# Common test file patterns
|
|
481
|
+
if name.startswith("test_") or name.endswith("_test.py"):
|
|
482
|
+
return True
|
|
483
|
+
if name == "conftest.py":
|
|
484
|
+
return True
|
|
485
|
+
|
|
486
|
+
# Test directories
|
|
487
|
+
parts = p.parts
|
|
488
|
+
if "tests" in parts or "test" in parts:
|
|
489
|
+
return True
|
|
490
|
+
|
|
491
|
+
return False
|
|
492
|
+
|
|
493
|
+
|
|
494
|
+
def find_python_files(repo_root: Path) -> list[Path]:
|
|
495
|
+
"""Find all Python files in the repository."""
|
|
496
|
+
files = []
|
|
497
|
+
|
|
498
|
+
for path in repo_root.rglob("*.py"):
|
|
499
|
+
# Skip hidden directories and common excludes
|
|
500
|
+
parts = path.relative_to(repo_root).parts
|
|
501
|
+
if any(p.startswith(".") for p in parts):
|
|
502
|
+
continue
|
|
503
|
+
if any(
|
|
504
|
+
p in ("venv", ".venv", "node_modules", "__pycache__", "build", "dist")
|
|
505
|
+
for p in parts
|
|
506
|
+
):
|
|
507
|
+
continue
|
|
508
|
+
|
|
509
|
+
files.append(path)
|
|
510
|
+
|
|
511
|
+
return files
|
|
512
|
+
|
|
513
|
+
|
|
514
|
+
# =============================================================================
|
|
515
|
+
# Output Builders
|
|
516
|
+
# =============================================================================
|
|
517
|
+
|
|
518
|
+
|
|
519
|
+
def build_version_response() -> dict[str, Any]:
|
|
520
|
+
"""Build version --json response."""
|
|
521
|
+
return {
|
|
522
|
+
"cli_version": __version__,
|
|
523
|
+
"schema_version": SCHEMA_VERSION,
|
|
524
|
+
"python_version": platform.python_version(),
|
|
525
|
+
"platform": platform.system(),
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
|
|
529
|
+
def build_summary(
|
|
530
|
+
diagnostics: list[dict[str, Any]],
|
|
531
|
+
ctx: AnalysisContext,
|
|
532
|
+
) -> dict[str, Any]:
|
|
533
|
+
"""Build the structured summary object."""
|
|
534
|
+
# Count by severity
|
|
535
|
+
errors = sum(1 for d in diagnostics if d["severity"] == Severity.ERROR)
|
|
536
|
+
warnings = sum(1 for d in diagnostics if d["severity"] == Severity.WARNING)
|
|
537
|
+
info = sum(1 for d in diagnostics if d["severity"] == Severity.INFO)
|
|
538
|
+
|
|
539
|
+
# Build by_code breakdown
|
|
540
|
+
by_code: dict[str, dict[str, Any]] = {}
|
|
541
|
+
for d in diagnostics:
|
|
542
|
+
code = d["code"]
|
|
543
|
+
if code not in by_code:
|
|
544
|
+
by_code[code] = {"count": 0, "severity": d["severity"]}
|
|
545
|
+
by_code[code]["count"] += 1
|
|
546
|
+
|
|
547
|
+
# Build evidence section (only if we have coverage data)
|
|
548
|
+
evidence: dict[str, Any] = {}
|
|
549
|
+
if ctx.coverage_json_path:
|
|
550
|
+
evidence["coverage_json_path"] = ctx.coverage_json_path
|
|
551
|
+
if ctx.coverage_json_mtime is not None:
|
|
552
|
+
evidence["coverage_json_mtime"] = datetime.fromtimestamp(
|
|
553
|
+
ctx.coverage_json_mtime, tz=timezone.utc
|
|
554
|
+
).isoformat()
|
|
555
|
+
evidence["staleness_threshold_seconds"] = STALENESS_THRESHOLD_SECONDS
|
|
556
|
+
evidence["stale_file_count"] = ctx.stale_file_count
|
|
557
|
+
|
|
558
|
+
summary: dict[str, Any] = {
|
|
559
|
+
"schema_version": SCHEMA_VERSION,
|
|
560
|
+
"totals": {
|
|
561
|
+
"files_analyzed": ctx.files_analyzed,
|
|
562
|
+
"diagnostics_emitted": len(diagnostics),
|
|
563
|
+
"coverage_percent": ctx.coverage_percent,
|
|
564
|
+
"errors": errors,
|
|
565
|
+
"warnings": warnings,
|
|
566
|
+
"info": info,
|
|
567
|
+
},
|
|
568
|
+
"by_code": by_code,
|
|
569
|
+
"truncation": {
|
|
570
|
+
"is_truncated": ctx.dropped_count > 0,
|
|
571
|
+
"max_total": MAX_TOTAL_DIAGNOSTICS,
|
|
572
|
+
"max_per_file": MAX_DIAGNOSTICS_PER_FILE,
|
|
573
|
+
"emitted": len(diagnostics),
|
|
574
|
+
"dropped": ctx.dropped_count,
|
|
575
|
+
},
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
# Only include evidence if we have any data
|
|
579
|
+
if evidence:
|
|
580
|
+
summary["evidence"] = evidence
|
|
581
|
+
|
|
582
|
+
return summary
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
def apply_strict_evidence(
|
|
586
|
+
diagnostics: list[dict[str, Any]],
|
|
587
|
+
summary: dict[str, Any],
|
|
588
|
+
) -> None:
|
|
589
|
+
"""Apply --strict-evidence transforms in place.
|
|
590
|
+
|
|
591
|
+
Promotes STRICT_EVIDENCE_CODES from warning to error.
|
|
592
|
+
Updates both diagnostics list and summary to stay consistent.
|
|
593
|
+
"""
|
|
594
|
+
promoted_count = 0
|
|
595
|
+
|
|
596
|
+
# Transform diagnostics
|
|
597
|
+
for diag in diagnostics:
|
|
598
|
+
if (
|
|
599
|
+
diag["code"] in STRICT_EVIDENCE_CODES
|
|
600
|
+
and diag["severity"] == Severity.WARNING
|
|
601
|
+
):
|
|
602
|
+
diag["severity"] = Severity.ERROR
|
|
603
|
+
promoted_count += 1
|
|
604
|
+
|
|
605
|
+
# Always mark the policy in summary for debuggability
|
|
606
|
+
summary["policy"] = {"strict_evidence": True}
|
|
607
|
+
|
|
608
|
+
if promoted_count == 0:
|
|
609
|
+
return
|
|
610
|
+
|
|
611
|
+
# Update summary totals
|
|
612
|
+
summary["totals"]["errors"] += promoted_count
|
|
613
|
+
summary["totals"]["warnings"] -= promoted_count
|
|
614
|
+
|
|
615
|
+
# Update by_code severities
|
|
616
|
+
for code in STRICT_EVIDENCE_CODES:
|
|
617
|
+
if code in summary["by_code"]:
|
|
618
|
+
summary["by_code"][code]["severity"] = Severity.ERROR
|
|
619
|
+
|
|
620
|
+
|
|
621
|
+
def build_analyze_response(
|
|
622
|
+
status: str,
|
|
623
|
+
repo_root: str,
|
|
624
|
+
diagnostics: list[dict[str, Any]],
|
|
625
|
+
ctx: AnalysisContext,
|
|
626
|
+
) -> dict[str, Any]:
|
|
627
|
+
"""Build analyze --json response."""
|
|
628
|
+
return {
|
|
629
|
+
"schema_version": SCHEMA_VERSION,
|
|
630
|
+
"status": status,
|
|
631
|
+
"repo_root": repo_root,
|
|
632
|
+
"summary": build_summary(diagnostics, ctx),
|
|
633
|
+
"diagnostics": diagnostics,
|
|
634
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
|
|
638
|
+
def build_diagnostic(
|
|
639
|
+
code: str,
|
|
640
|
+
severity: str,
|
|
641
|
+
message: str,
|
|
642
|
+
file: str | None = None,
|
|
643
|
+
line: int | None = None,
|
|
644
|
+
column: int | None = None,
|
|
645
|
+
end_line: int | None = None,
|
|
646
|
+
end_column: int | None = None,
|
|
647
|
+
evidence: list[str] | None = None,
|
|
648
|
+
suggestions: list[str] | None = None,
|
|
649
|
+
) -> dict[str, Any]:
|
|
650
|
+
"""Build a single diagnostic entry."""
|
|
651
|
+
diag: dict[str, Any] = {
|
|
652
|
+
"code": code,
|
|
653
|
+
"severity": severity,
|
|
654
|
+
"message": message,
|
|
655
|
+
}
|
|
656
|
+
if file is not None:
|
|
657
|
+
diag["file"] = file
|
|
658
|
+
if line is not None:
|
|
659
|
+
diag["line"] = line
|
|
660
|
+
if column is not None:
|
|
661
|
+
diag["column"] = column
|
|
662
|
+
if end_line is not None:
|
|
663
|
+
diag["end_line"] = end_line
|
|
664
|
+
if end_column is not None:
|
|
665
|
+
diag["end_column"] = end_column
|
|
666
|
+
if evidence:
|
|
667
|
+
diag["evidence"] = evidence
|
|
668
|
+
if suggestions:
|
|
669
|
+
diag["suggestions"] = suggestions
|
|
670
|
+
return diag
|
|
671
|
+
|
|
672
|
+
|
|
673
|
+
# =============================================================================
|
|
674
|
+
# Commands
|
|
675
|
+
# =============================================================================
|
|
676
|
+
|
|
677
|
+
|
|
678
|
+
def cmd_version(args: argparse.Namespace) -> int:
|
|
679
|
+
"""Handle version command."""
|
|
680
|
+
if args.json:
|
|
681
|
+
print(json.dumps(build_version_response(), indent=2))
|
|
682
|
+
else:
|
|
683
|
+
print(f"codeclone {__version__}")
|
|
684
|
+
print(f"Schema version: {SCHEMA_VERSION}")
|
|
685
|
+
print(f"Python: {platform.python_version()}")
|
|
686
|
+
print(f"Platform: {platform.system()}")
|
|
687
|
+
return 0
|
|
688
|
+
|
|
689
|
+
|
|
690
|
+
def cmd_analyze(args: argparse.Namespace) -> int:
|
|
691
|
+
"""Handle analyze command."""
|
|
692
|
+
repo_root = Path(args.repo_root).resolve()
|
|
693
|
+
repo_root_str = str(repo_root)
|
|
694
|
+
|
|
695
|
+
# Check if repo exists
|
|
696
|
+
if not repo_root.exists():
|
|
697
|
+
ctx = AnalysisContext()
|
|
698
|
+
diags = [
|
|
699
|
+
build_diagnostic(
|
|
700
|
+
code=DiagnosticCode.REPO_ROOT_NOT_FOUND,
|
|
701
|
+
severity=Severity.ERROR,
|
|
702
|
+
message=f"Repository root does not exist: {repo_root_str}",
|
|
703
|
+
suggestions=["Check the path and try again"],
|
|
704
|
+
)
|
|
705
|
+
]
|
|
706
|
+
if args.json:
|
|
707
|
+
response = build_analyze_response(
|
|
708
|
+
status="FAIL",
|
|
709
|
+
repo_root=repo_root_str,
|
|
710
|
+
diagnostics=diags,
|
|
711
|
+
ctx=ctx,
|
|
712
|
+
)
|
|
713
|
+
print(json.dumps(response, indent=2))
|
|
714
|
+
else:
|
|
715
|
+
print(
|
|
716
|
+
f"Error: Repository root does not exist: {repo_root_str}",
|
|
717
|
+
file=sys.stderr,
|
|
718
|
+
)
|
|
719
|
+
return 2
|
|
720
|
+
|
|
721
|
+
if not repo_root.is_dir():
|
|
722
|
+
ctx = AnalysisContext()
|
|
723
|
+
diags = [
|
|
724
|
+
build_diagnostic(
|
|
725
|
+
code=DiagnosticCode.REPO_ROOT_NOT_FOUND,
|
|
726
|
+
severity=Severity.ERROR,
|
|
727
|
+
message=f"Repository root is not a directory: {repo_root_str}",
|
|
728
|
+
suggestions=["Provide a directory path, not a file"],
|
|
729
|
+
)
|
|
730
|
+
]
|
|
731
|
+
if args.json:
|
|
732
|
+
response = build_analyze_response(
|
|
733
|
+
status="FAIL",
|
|
734
|
+
repo_root=repo_root_str,
|
|
735
|
+
diagnostics=diags,
|
|
736
|
+
ctx=ctx,
|
|
737
|
+
)
|
|
738
|
+
print(json.dumps(response, indent=2))
|
|
739
|
+
else:
|
|
740
|
+
print(
|
|
741
|
+
f"Error: Repository root is not a directory: {repo_root_str}",
|
|
742
|
+
file=sys.stderr,
|
|
743
|
+
)
|
|
744
|
+
return 2
|
|
745
|
+
|
|
746
|
+
# Find Python files
|
|
747
|
+
python_files = find_python_files(repo_root)
|
|
748
|
+
files_analyzed = len(python_files)
|
|
749
|
+
|
|
750
|
+
# Run analysis
|
|
751
|
+
report, diagnostics, ctx = analyze_coverage(repo_root, files_analyzed)
|
|
752
|
+
|
|
753
|
+
# Build summary (natural severities first)
|
|
754
|
+
summary = build_summary(diagnostics, ctx)
|
|
755
|
+
|
|
756
|
+
# Apply strict-evidence transform if requested
|
|
757
|
+
if args.strict_evidence:
|
|
758
|
+
apply_strict_evidence(diagnostics, summary)
|
|
759
|
+
|
|
760
|
+
# Determine status from (potentially transformed) diagnostics
|
|
761
|
+
has_errors = any(d["severity"] == Severity.ERROR for d in diagnostics)
|
|
762
|
+
has_warnings = any(d["severity"] == Severity.WARNING for d in diagnostics)
|
|
763
|
+
|
|
764
|
+
if has_errors:
|
|
765
|
+
status = "FAIL"
|
|
766
|
+
exit_code = 2
|
|
767
|
+
elif has_warnings:
|
|
768
|
+
status = "PARTIAL"
|
|
769
|
+
exit_code = 1
|
|
770
|
+
else:
|
|
771
|
+
status = "OK"
|
|
772
|
+
exit_code = 0
|
|
773
|
+
|
|
774
|
+
if args.json:
|
|
775
|
+
response = {
|
|
776
|
+
"schema_version": SCHEMA_VERSION,
|
|
777
|
+
"status": status,
|
|
778
|
+
"repo_root": repo_root_str,
|
|
779
|
+
"summary": summary,
|
|
780
|
+
"diagnostics": diagnostics,
|
|
781
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
782
|
+
}
|
|
783
|
+
print(json.dumps(response, indent=2))
|
|
784
|
+
else:
|
|
785
|
+
print(f"Analyzing: {repo_root_str}")
|
|
786
|
+
print(f"Files analyzed: {ctx.files_analyzed}")
|
|
787
|
+
if ctx.coverage_percent is not None:
|
|
788
|
+
print(f"Coverage: {ctx.coverage_percent:.1f}%")
|
|
789
|
+
print(f"Status: {status}")
|
|
790
|
+
print(f"Diagnostics: {len(diagnostics)}")
|
|
791
|
+
|
|
792
|
+
if diagnostics:
|
|
793
|
+
print("\nFindings:")
|
|
794
|
+
for d in diagnostics[:10]: # Limit human output
|
|
795
|
+
sev = d["severity"].upper()
|
|
796
|
+
code = d["code"]
|
|
797
|
+
msg = d["message"]
|
|
798
|
+
print(f" [{sev}] {code}: {msg}")
|
|
799
|
+
if len(diagnostics) > 10:
|
|
800
|
+
print(f" ... and {len(diagnostics) - 10} more")
|
|
801
|
+
|
|
802
|
+
# Human-friendly summary footer (uses pre-built summary)
|
|
803
|
+
totals = summary["totals"]
|
|
804
|
+
trunc = summary["truncation"]
|
|
805
|
+
print(
|
|
806
|
+
f"\nErrors: {totals['errors']}, Warnings: {totals['warnings']}, Info: {totals['info']}",
|
|
807
|
+
end="",
|
|
808
|
+
)
|
|
809
|
+
if trunc["is_truncated"]:
|
|
810
|
+
print(f", Dropped: {trunc['dropped']}")
|
|
811
|
+
else:
|
|
812
|
+
print()
|
|
813
|
+
|
|
814
|
+
return exit_code
|
|
815
|
+
|
|
816
|
+
|
|
817
|
+
# =============================================================================
|
|
818
|
+
# CLI Entry Point
|
|
819
|
+
# =============================================================================
|
|
820
|
+
|
|
821
|
+
|
|
822
|
+
def create_parser() -> argparse.ArgumentParser:
|
|
823
|
+
"""Create argument parser."""
|
|
824
|
+
parser = argparse.ArgumentParser(
|
|
825
|
+
prog="codeclone",
|
|
826
|
+
description="CodeClone - Deterministic code coverage analysis",
|
|
827
|
+
)
|
|
828
|
+
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
|
829
|
+
|
|
830
|
+
# version command
|
|
831
|
+
version_parser = subparsers.add_parser("version", help="Show version information")
|
|
832
|
+
version_parser.add_argument(
|
|
833
|
+
"--json",
|
|
834
|
+
action="store_true",
|
|
835
|
+
help="Output as JSON",
|
|
836
|
+
)
|
|
837
|
+
|
|
838
|
+
# analyze command
|
|
839
|
+
analyze_parser = subparsers.add_parser("analyze", help="Analyze a repository")
|
|
840
|
+
analyze_parser.add_argument(
|
|
841
|
+
"repo_root",
|
|
842
|
+
help="Path to repository root",
|
|
843
|
+
)
|
|
844
|
+
analyze_parser.add_argument(
|
|
845
|
+
"--json",
|
|
846
|
+
action="store_true",
|
|
847
|
+
help="Output as JSON (required for editor integration)",
|
|
848
|
+
)
|
|
849
|
+
analyze_parser.add_argument(
|
|
850
|
+
"--strict-evidence",
|
|
851
|
+
action="store_true",
|
|
852
|
+
help="Promote evidence freshness warnings (COVERAGE_FILE_STALE) to errors; exit 2 when coverage is stale",
|
|
853
|
+
)
|
|
854
|
+
|
|
855
|
+
return parser
|
|
856
|
+
|
|
857
|
+
|
|
858
|
+
def main() -> int:
|
|
859
|
+
"""Main entry point."""
|
|
860
|
+
parser = create_parser()
|
|
861
|
+
args = parser.parse_args()
|
|
862
|
+
|
|
863
|
+
if args.command is None:
|
|
864
|
+
parser.print_help()
|
|
865
|
+
return 1
|
|
866
|
+
|
|
867
|
+
if args.command == "version":
|
|
868
|
+
return cmd_version(args)
|
|
869
|
+
elif args.command == "analyze":
|
|
870
|
+
return cmd_analyze(args)
|
|
871
|
+
else:
|
|
872
|
+
parser.print_help()
|
|
873
|
+
return 1
|
|
874
|
+
|
|
875
|
+
|
|
876
|
+
if __name__ == "__main__":
|
|
877
|
+
sys.exit(main())
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "codeclone-cli"
|
|
7
|
+
version = "1.0.0"
|
|
8
|
+
description = "CodeClone CLI - Deterministic code coverage analysis"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
requires-python = ">=3.10"
|
|
12
|
+
authors = [
|
|
13
|
+
{ name = "mcp-tool-shop", email = "64996768+mcp-tool-shop@users.noreply.github.com" }
|
|
14
|
+
]
|
|
15
|
+
keywords = ["coverage", "testing", "analysis", "cli"]
|
|
16
|
+
classifiers = [
|
|
17
|
+
"Development Status :: 5 - Production/Stable",
|
|
18
|
+
"Environment :: Console",
|
|
19
|
+
"Intended Audience :: Developers",
|
|
20
|
+
"License :: OSI Approved :: MIT License",
|
|
21
|
+
"Operating System :: OS Independent",
|
|
22
|
+
"Programming Language :: Python :: 3",
|
|
23
|
+
"Programming Language :: Python :: 3.10",
|
|
24
|
+
"Programming Language :: Python :: 3.11",
|
|
25
|
+
"Programming Language :: Python :: 3.12",
|
|
26
|
+
"Topic :: Software Development :: Testing",
|
|
27
|
+
"Topic :: Software Development :: Quality Assurance",
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
dependencies = []
|
|
31
|
+
|
|
32
|
+
[project.optional-dependencies]
|
|
33
|
+
dev = [
|
|
34
|
+
"pytest>=7.0",
|
|
35
|
+
"pytest-cov>=4.0",
|
|
36
|
+
"jsonschema>=4.0",
|
|
37
|
+
"mypy>=1.0",
|
|
38
|
+
"ruff>=0.4",
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
[project.scripts]
|
|
42
|
+
codeclone = "codeclone_cli:main"
|
|
43
|
+
|
|
44
|
+
[project.urls]
|
|
45
|
+
Homepage = "https://github.com/mcp-tool-shop-org/codeclone-suite"
|
|
46
|
+
Repository = "https://github.com/mcp-tool-shop-org/codeclone-suite"
|
|
47
|
+
Documentation = "https://github.com/mcp-tool-shop-org/codeclone-suite/blob/main/docs/EDITOR_INTEGRATION.md"
|
|
48
|
+
|
|
49
|
+
[tool.hatch.build.targets.wheel]
|
|
50
|
+
packages = ["."]
|
|
51
|
+
|
|
52
|
+
[tool.pytest.ini_options]
|
|
53
|
+
testpaths = ["tests"]
|
|
54
|
+
python_files = ["test_*.py"]
|
|
55
|
+
python_functions = ["test_*"]
|