python-doctor 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- python_doctor/__init__.py +3 -0
- python_doctor/analyzers/__init__.py +1 -0
- python_doctor/analyzers/bandit_analyzer.py +62 -0
- python_doctor/analyzers/complexity.py +49 -0
- python_doctor/analyzers/dependency_analyzer.py +87 -0
- python_doctor/analyzers/docstring_analyzer.py +91 -0
- python_doctor/analyzers/exceptions_analyzer.py +72 -0
- python_doctor/analyzers/imports_analyzer.py +104 -0
- python_doctor/analyzers/ruff_analyzer.py +53 -0
- python_doctor/analyzers/structure.py +269 -0
- python_doctor/analyzers/vulture_analyzer.py +41 -0
- python_doctor/cli.py +149 -0
- python_doctor/py.typed +0 -0
- python_doctor/rules.py +76 -0
- python_doctor/scorer.py +27 -0
- python_doctor-0.1.0.dist-info/METADATA +191 -0
- python_doctor-0.1.0.dist-info/RECORD +20 -0
- python_doctor-0.1.0.dist-info/WHEEL +4 -0
- python_doctor-0.1.0.dist-info/entry_points.txt +2 -0
- python_doctor-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Analyzers for Python Doctor."""
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""Bandit security analyzer."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import subprocess # nosec B404 — required for running CLI tools
|
|
6
|
+
|
|
7
|
+
from ..rules import BANDIT_SEVERITY_COST, CATEGORIES, AnalyzerResult, Finding
|
|
8
|
+
|
|
9
|
+
_EXCLUDE_DIRS = [".venv", "venv", "node_modules", "__pycache__", ".git", ".tox", ".mypy_cache", ".ruff_cache"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _is_test_file(filepath: str) -> bool:
|
|
13
|
+
"""Check if a file is a test file (in test dir or test-named)."""
|
|
14
|
+
parts = os.path.normpath(filepath).split(os.sep)
|
|
15
|
+
if any(p in ("tests", "test") for p in parts):
|
|
16
|
+
return True
|
|
17
|
+
basename = os.path.basename(filepath)
|
|
18
|
+
return basename.startswith("test_") or basename.endswith("_test.py")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def analyze(path: str, **_kw) -> AnalyzerResult:
|
|
22
|
+
"""Run bandit security analysis on the project."""
|
|
23
|
+
result = AnalyzerResult(category="security")
|
|
24
|
+
max_ded = CATEGORIES["security"]["max_deduction"]
|
|
25
|
+
abs_path = os.path.abspath(path)
|
|
26
|
+
excludes = ",".join(os.path.join(abs_path, d) for d in _EXCLUDE_DIRS)
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
proc = subprocess.run( # nosec B603 B607 — intentional subprocess call to bandit CLI tool
|
|
30
|
+
["bandit", "-r", "-f", "json", "-q",
|
|
31
|
+
"--exclude", excludes,
|
|
32
|
+
abs_path],
|
|
33
|
+
capture_output=True, text=True, timeout=120
|
|
34
|
+
)
|
|
35
|
+
data = json.loads(proc.stdout) if proc.stdout.strip() else {}
|
|
36
|
+
items = data.get("results", [])
|
|
37
|
+
except FileNotFoundError:
|
|
38
|
+
result.error = "bandit not found (skipped)"
|
|
39
|
+
return result
|
|
40
|
+
except Exception as e:
|
|
41
|
+
result.error = str(e)
|
|
42
|
+
return result
|
|
43
|
+
|
|
44
|
+
for item in items:
|
|
45
|
+
sev = item.get("issue_severity", "LOW").upper()
|
|
46
|
+
cost = BANDIT_SEVERITY_COST.get(sev, 1)
|
|
47
|
+
test_id = item.get("test_id", "?")
|
|
48
|
+
msg = item.get("issue_text", "")
|
|
49
|
+
filename = item.get("filename", "")
|
|
50
|
+
line = item.get("line_number", 0)
|
|
51
|
+
|
|
52
|
+
# Skip B101 (assert) in test files
|
|
53
|
+
if test_id == "B101" and _is_test_file(filename):
|
|
54
|
+
continue
|
|
55
|
+
|
|
56
|
+
result.findings.append(Finding(
|
|
57
|
+
category="security", rule=f"bandit/{test_id}", message=msg,
|
|
58
|
+
file=filename, line=line, severity=sev.lower(), cost=cost,
|
|
59
|
+
))
|
|
60
|
+
|
|
61
|
+
result.deduction = min(sum(f.cost for f in result.findings), max_ded)
|
|
62
|
+
return result
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"""Radon cyclomatic complexity analyzer."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import subprocess # nosec B404 — required for running CLI tools
|
|
5
|
+
|
|
6
|
+
from ..rules import CATEGORIES, AnalyzerResult, Finding
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def analyze(path: str, **_kw) -> AnalyzerResult:
|
|
10
|
+
"""Analyze cyclomatic complexity using radon."""
|
|
11
|
+
result = AnalyzerResult(category="complexity")
|
|
12
|
+
max_ded = CATEGORIES["complexity"]["max_deduction"]
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
proc = subprocess.run( # nosec B603 B607 — intentional subprocess call to radon CLI tool
|
|
16
|
+
["radon", "cc", "-j", "-n", "C",
|
|
17
|
+
"-e", ".venv/*,node_modules/*,__pycache__/*,.git/*,.tox/*",
|
|
18
|
+
path],
|
|
19
|
+
capture_output=True, text=True, timeout=120
|
|
20
|
+
)
|
|
21
|
+
data = json.loads(proc.stdout) if proc.stdout.strip() else {}
|
|
22
|
+
except FileNotFoundError:
|
|
23
|
+
result.error = "radon not found (skipped)"
|
|
24
|
+
return result
|
|
25
|
+
except Exception as e:
|
|
26
|
+
result.error = str(e)
|
|
27
|
+
return result
|
|
28
|
+
|
|
29
|
+
for filename, funcs in data.items():
|
|
30
|
+
for func in funcs:
|
|
31
|
+
cc = func.get("complexity", 0)
|
|
32
|
+
name = func.get("name", "?")
|
|
33
|
+
line = func.get("lineno", 0)
|
|
34
|
+
if cc > 20:
|
|
35
|
+
cost = 5
|
|
36
|
+
elif cc > 10:
|
|
37
|
+
cost = 2
|
|
38
|
+
else:
|
|
39
|
+
continue
|
|
40
|
+
|
|
41
|
+
result.findings.append(Finding(
|
|
42
|
+
category="complexity",
|
|
43
|
+
rule=f"radon/CC{cc}",
|
|
44
|
+
message=f"Function '{name}' has complexity {cc}",
|
|
45
|
+
file=filename, line=line, cost=cost,
|
|
46
|
+
))
|
|
47
|
+
|
|
48
|
+
result.deduction = min(sum(f.cost for f in result.findings), max_ded)
|
|
49
|
+
return result
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""Dependency hygiene analyzer."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import subprocess # nosec B404 — required for running CLI tools
|
|
6
|
+
|
|
7
|
+
from ..rules import (
|
|
8
|
+
CATEGORIES,
|
|
9
|
+
MIXED_BUILD_SYSTEM_COST,
|
|
10
|
+
NO_BUILD_FILE_COST,
|
|
11
|
+
VULNERABLE_DEP_COST,
|
|
12
|
+
AnalyzerResult,
|
|
13
|
+
Finding,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _check_build_files(path: str) -> tuple[bool, bool, bool, bool]:
|
|
18
|
+
"""Check which build/dependency files exist in the project."""
|
|
19
|
+
has_pyproject = os.path.isfile(os.path.join(path, "pyproject.toml"))
|
|
20
|
+
has_setup_py = os.path.isfile(os.path.join(path, "setup.py"))
|
|
21
|
+
has_setup_cfg = os.path.isfile(os.path.join(path, "setup.cfg"))
|
|
22
|
+
has_requirements = os.path.isfile(os.path.join(path, "requirements.txt"))
|
|
23
|
+
return has_pyproject, has_setup_py, has_setup_cfg, has_requirements
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _check_build_system(path: str, result: AnalyzerResult) -> None:
|
|
27
|
+
"""Check for missing or mixed build system files."""
|
|
28
|
+
has_pyproject, has_setup_py, has_setup_cfg, has_requirements = _check_build_files(path)
|
|
29
|
+
|
|
30
|
+
if not (has_pyproject or has_setup_py or has_setup_cfg):
|
|
31
|
+
result.findings.append(Finding(
|
|
32
|
+
category="dependencies", rule="deps/no-build-file",
|
|
33
|
+
message="No pyproject.toml, setup.py, or setup.cfg found",
|
|
34
|
+
cost=NO_BUILD_FILE_COST,
|
|
35
|
+
))
|
|
36
|
+
|
|
37
|
+
if has_pyproject and has_requirements:
|
|
38
|
+
result.findings.append(Finding(
|
|
39
|
+
category="dependencies", rule="deps/mixed-build",
|
|
40
|
+
message="Both pyproject.toml and requirements.txt found (consider consolidating)",
|
|
41
|
+
cost=MIXED_BUILD_SYSTEM_COST,
|
|
42
|
+
))
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _run_pip_audit(path: str) -> list:
|
|
46
|
+
"""Run pip-audit and return vulnerability list."""
|
|
47
|
+
proc = subprocess.run( # nosec B603 B607 — intentional subprocess call to pip-audit CLI tool
|
|
48
|
+
["pip-audit", "--format", "json", "--path", path],
|
|
49
|
+
capture_output=True, text=True, timeout=120,
|
|
50
|
+
)
|
|
51
|
+
vulns = json.loads(proc.stdout) if proc.stdout.strip() else []
|
|
52
|
+
if isinstance(vulns, dict):
|
|
53
|
+
vulns = vulns.get("dependencies", [])
|
|
54
|
+
return vulns
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _check_vulnerabilities(path: str, result: AnalyzerResult) -> None:
|
|
58
|
+
"""Run pip-audit to check for known vulnerabilities."""
|
|
59
|
+
try:
|
|
60
|
+
vulns = _run_pip_audit(path)
|
|
61
|
+
except (FileNotFoundError, subprocess.TimeoutExpired, json.JSONDecodeError):
|
|
62
|
+
return # pip-audit not available, skip gracefully
|
|
63
|
+
except OSError:
|
|
64
|
+
return
|
|
65
|
+
|
|
66
|
+
for v in vulns:
|
|
67
|
+
if not isinstance(v, dict) or not v.get("vulns"):
|
|
68
|
+
continue
|
|
69
|
+
for vuln in v["vulns"]:
|
|
70
|
+
vid = vuln.get("id", "?") if isinstance(vuln, dict) else str(vuln)
|
|
71
|
+
result.findings.append(Finding(
|
|
72
|
+
category="dependencies", rule=f"deps/vuln/{vid}",
|
|
73
|
+
message=f"Vulnerable: {v.get('name', '?')} {v.get('version', '')}",
|
|
74
|
+
cost=VULNERABLE_DEP_COST,
|
|
75
|
+
))
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def analyze(path: str, **_kw) -> AnalyzerResult:
|
|
79
|
+
"""Analyze dependency hygiene: build files, mixed systems, and vulnerabilities."""
|
|
80
|
+
result = AnalyzerResult(category="dependencies")
|
|
81
|
+
max_ded = CATEGORIES["dependencies"]["max_deduction"]
|
|
82
|
+
|
|
83
|
+
_check_build_system(path, result)
|
|
84
|
+
_check_vulnerabilities(path, result)
|
|
85
|
+
|
|
86
|
+
result.deduction = min(sum(f.cost for f in result.findings), max_ded)
|
|
87
|
+
return result
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
"""Docstring coverage analyzer."""
|
|
2
|
+
|
|
3
|
+
import ast
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
from ..rules import CATEGORIES, AnalyzerResult, Finding
|
|
7
|
+
|
|
8
|
+
_SKIP_DIRS = {"__pycache__", ".git", "node_modules", ".venv", "venv", ".tox", ".mypy_cache", ".ruff_cache"}
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _is_test_file(filepath: str) -> bool:
|
|
12
|
+
"""Check if a file is a test file."""
|
|
13
|
+
parts = os.path.normpath(filepath).split(os.sep)
|
|
14
|
+
if any(p in ("tests", "test") for p in parts):
|
|
15
|
+
return True
|
|
16
|
+
basename = os.path.basename(filepath)
|
|
17
|
+
return basename.startswith("test_") or basename.endswith("_test.py")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _check_file(filepath: str) -> tuple[int, int]:
|
|
21
|
+
"""Return (total_public, documented) counts for public functions/classes."""
|
|
22
|
+
try:
|
|
23
|
+
with open(filepath, "r", errors="ignore") as f:
|
|
24
|
+
source = f.read()
|
|
25
|
+
tree = ast.parse(source, filename=filepath)
|
|
26
|
+
except (SyntaxError, OSError):
|
|
27
|
+
return 0, 0
|
|
28
|
+
|
|
29
|
+
total = 0
|
|
30
|
+
documented = 0
|
|
31
|
+
for node in ast.iter_child_nodes(tree):
|
|
32
|
+
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
|
|
33
|
+
if node.name.startswith("_"):
|
|
34
|
+
continue
|
|
35
|
+
total += 1
|
|
36
|
+
if (node.body and isinstance(node.body[0], ast.Expr)
|
|
37
|
+
and isinstance(node.body[0].value, (ast.Constant, ast.Str))):
|
|
38
|
+
documented += 1
|
|
39
|
+
return total, documented
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _collect_coverage(path: str) -> tuple[int, int]:
|
|
43
|
+
"""Walk project files and sum up docstring coverage stats."""
|
|
44
|
+
total_public = 0
|
|
45
|
+
total_documented = 0
|
|
46
|
+
for root, dirs, files in os.walk(path):
|
|
47
|
+
dirs[:] = [d for d in dirs if d not in _SKIP_DIRS]
|
|
48
|
+
for f in files:
|
|
49
|
+
if not f.endswith(".py"):
|
|
50
|
+
continue
|
|
51
|
+
fp = os.path.join(root, f)
|
|
52
|
+
if _is_test_file(fp):
|
|
53
|
+
continue
|
|
54
|
+
if f == "__init__.py" and os.path.getsize(fp) < 10:
|
|
55
|
+
continue
|
|
56
|
+
pub, doc = _check_file(fp)
|
|
57
|
+
total_public += pub
|
|
58
|
+
total_documented += doc
|
|
59
|
+
return total_public, total_documented
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _compute_deduction(ratio: float) -> tuple[int, int]:
|
|
63
|
+
"""Compute cost and percentage from docstring coverage ratio."""
|
|
64
|
+
pct = int(ratio * 100)
|
|
65
|
+
if ratio < 0.25:
|
|
66
|
+
return 10, pct
|
|
67
|
+
if ratio < 0.50:
|
|
68
|
+
return 5, pct
|
|
69
|
+
return 0, pct
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def analyze(path: str, **_kw) -> AnalyzerResult:
|
|
73
|
+
"""Analyze docstring coverage for public functions and classes."""
|
|
74
|
+
result = AnalyzerResult(category="docs")
|
|
75
|
+
max_ded = CATEGORIES["docs"]["max_deduction"]
|
|
76
|
+
|
|
77
|
+
total_public, total_documented = _collect_coverage(path)
|
|
78
|
+
if total_public == 0:
|
|
79
|
+
return result
|
|
80
|
+
|
|
81
|
+
ratio = total_documented / total_public
|
|
82
|
+
cost, pct = _compute_deduction(ratio)
|
|
83
|
+
if cost > 0:
|
|
84
|
+
result.findings.append(Finding(
|
|
85
|
+
category="docs", rule="docs/low-coverage",
|
|
86
|
+
message=f"Only {pct}% of public functions/classes have docstrings",
|
|
87
|
+
cost=cost,
|
|
88
|
+
))
|
|
89
|
+
|
|
90
|
+
result.deduction = min(sum(f.cost for f in result.findings), max_ded)
|
|
91
|
+
return result
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""Exception handling analyzer."""
|
|
2
|
+
|
|
3
|
+
import ast
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
from ..rules import BARE_EXCEPT_COST, CATEGORIES, SILENT_EXCEPTION_COST, AnalyzerResult, Finding
|
|
7
|
+
|
|
8
|
+
_SKIP_DIRS = {"__pycache__", ".git", "node_modules", ".venv", "venv", ".tox", ".mypy_cache", ".ruff_cache"}
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _is_test_file(filepath: str) -> bool:
|
|
12
|
+
"""Check if a file is a test file."""
|
|
13
|
+
parts = os.path.normpath(filepath).split(os.sep)
|
|
14
|
+
if any(p in ("tests", "test") for p in parts):
|
|
15
|
+
return True
|
|
16
|
+
basename = os.path.basename(filepath)
|
|
17
|
+
return basename.startswith("test_") or basename.endswith("_test.py")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _check_bare_except(node: ast.ExceptHandler, fp: str, result: AnalyzerResult) -> None:
|
|
21
|
+
"""Flag bare except: clauses without an exception type."""
|
|
22
|
+
if node.type is None:
|
|
23
|
+
result.findings.append(Finding(
|
|
24
|
+
category="exceptions", rule="exceptions/bare",
|
|
25
|
+
message="Bare except: without exception type",
|
|
26
|
+
file=fp, line=node.lineno, cost=BARE_EXCEPT_COST,
|
|
27
|
+
))
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _check_silent_swallow(node: ast.ExceptHandler, fp: str, result: AnalyzerResult) -> None:
|
|
31
|
+
"""Flag except Exception: pass patterns."""
|
|
32
|
+
if (isinstance(node.type, ast.Name) and node.type.id == "Exception"
|
|
33
|
+
and len(node.body) == 1 and isinstance(node.body[0], ast.Pass)):
|
|
34
|
+
result.findings.append(Finding(
|
|
35
|
+
category="exceptions", rule="exceptions/silent",
|
|
36
|
+
message="except Exception: pass (silently swallowed)",
|
|
37
|
+
file=fp, line=node.lineno, cost=SILENT_EXCEPTION_COST,
|
|
38
|
+
))
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _check_file(fp: str, result: AnalyzerResult) -> None:
|
|
42
|
+
"""Analyze exception handlers in a single file."""
|
|
43
|
+
try:
|
|
44
|
+
with open(fp, "r", errors="ignore") as fh:
|
|
45
|
+
tree = ast.parse(fh.read(), filename=fp)
|
|
46
|
+
except (SyntaxError, OSError):
|
|
47
|
+
return
|
|
48
|
+
|
|
49
|
+
for node in ast.walk(tree):
|
|
50
|
+
if not isinstance(node, ast.ExceptHandler):
|
|
51
|
+
continue
|
|
52
|
+
_check_bare_except(node, fp, result)
|
|
53
|
+
_check_silent_swallow(node, fp, result)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def analyze(path: str, **_kw) -> AnalyzerResult:
|
|
57
|
+
"""Analyze exception handling patterns across the project."""
|
|
58
|
+
result = AnalyzerResult(category="exceptions")
|
|
59
|
+
max_ded = CATEGORIES["exceptions"]["max_deduction"]
|
|
60
|
+
|
|
61
|
+
for root, dirs, files in os.walk(path):
|
|
62
|
+
dirs[:] = [d for d in dirs if d not in _SKIP_DIRS]
|
|
63
|
+
for f in files:
|
|
64
|
+
if not f.endswith(".py"):
|
|
65
|
+
continue
|
|
66
|
+
fp = os.path.join(root, f)
|
|
67
|
+
if _is_test_file(fp):
|
|
68
|
+
continue
|
|
69
|
+
_check_file(fp, result)
|
|
70
|
+
|
|
71
|
+
result.deduction = min(sum(f.cost for f in result.findings), max_ded)
|
|
72
|
+
return result
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""Import hygiene analyzer."""
|
|
2
|
+
|
|
3
|
+
import ast
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
from ..rules import CATEGORIES, CIRCULAR_IMPORT_COST, STAR_IMPORT_COST, AnalyzerResult, Finding
|
|
7
|
+
|
|
8
|
+
_SKIP_DIRS = {"__pycache__", ".git", "node_modules", ".venv", "venv", ".tox", ".mypy_cache", ".ruff_cache"}
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _get_module_name(filepath: str, base_path: str) -> str | None:
|
|
12
|
+
"""Convert filepath to dotted module name relative to base."""
|
|
13
|
+
rel = os.path.relpath(filepath, base_path)
|
|
14
|
+
if rel.endswith(".py"):
|
|
15
|
+
rel = rel[:-3]
|
|
16
|
+
parts = rel.replace(os.sep, ".").split(".")
|
|
17
|
+
if parts[-1] == "__init__":
|
|
18
|
+
parts = parts[:-1]
|
|
19
|
+
return ".".join(parts) if parts else None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _collect_py_files(path: str) -> list[str]:
|
|
23
|
+
"""Collect all Python files in the project, skipping excluded dirs."""
|
|
24
|
+
py_files = []
|
|
25
|
+
for root, dirs, files in os.walk(path):
|
|
26
|
+
dirs[:] = [d for d in dirs if d not in _SKIP_DIRS]
|
|
27
|
+
for f in files:
|
|
28
|
+
if f.endswith(".py"):
|
|
29
|
+
py_files.append(os.path.join(root, f))
|
|
30
|
+
return py_files
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _check_star_imports(node: ast.ImportFrom, fp: str, result: AnalyzerResult) -> None:
|
|
34
|
+
"""Flag wildcard imports like 'from X import *'."""
|
|
35
|
+
if node.names:
|
|
36
|
+
for alias in node.names:
|
|
37
|
+
if alias.name == "*":
|
|
38
|
+
result.findings.append(Finding(
|
|
39
|
+
category="imports", rule="imports/star",
|
|
40
|
+
message=f"from {node.module or '?'} import *",
|
|
41
|
+
file=fp, line=node.lineno, cost=STAR_IMPORT_COST,
|
|
42
|
+
))
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _process_file_imports(
|
|
46
|
+
fp: str, path: str, imports_graph: dict[str, set[str]], result: AnalyzerResult
|
|
47
|
+
) -> None:
|
|
48
|
+
"""Parse a single file and update the import graph, flagging star imports."""
|
|
49
|
+
try:
|
|
50
|
+
with open(fp, "r", errors="ignore") as f:
|
|
51
|
+
tree = ast.parse(f.read(), filename=fp)
|
|
52
|
+
except (SyntaxError, OSError):
|
|
53
|
+
return
|
|
54
|
+
|
|
55
|
+
mod_name = _get_module_name(fp, path)
|
|
56
|
+
if mod_name:
|
|
57
|
+
imports_graph.setdefault(mod_name, set())
|
|
58
|
+
|
|
59
|
+
for node in ast.walk(tree):
|
|
60
|
+
if isinstance(node, ast.ImportFrom):
|
|
61
|
+
_check_star_imports(node, fp, result)
|
|
62
|
+
if mod_name and node.module:
|
|
63
|
+
imports_graph.setdefault(mod_name, set()).add(node.module)
|
|
64
|
+
elif isinstance(node, ast.Import):
|
|
65
|
+
for alias in node.names:
|
|
66
|
+
if mod_name:
|
|
67
|
+
imports_graph.setdefault(mod_name, set()).add(alias.name)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _build_import_graph(py_files: list[str], path: str, result: AnalyzerResult) -> dict[str, set[str]]:
|
|
71
|
+
"""Parse all files and build an import dependency graph, flagging star imports."""
|
|
72
|
+
imports_graph: dict[str, set[str]] = {}
|
|
73
|
+
for fp in py_files:
|
|
74
|
+
_process_file_imports(fp, path, imports_graph, result)
|
|
75
|
+
return imports_graph
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _detect_circular_imports(imports_graph: dict[str, set[str]], result: AnalyzerResult) -> None:
|
|
79
|
+
"""Detect direct circular imports (A->B and B->A)."""
|
|
80
|
+
seen_cycles: set[tuple[str, str]] = set()
|
|
81
|
+
for mod_a, deps_a in imports_graph.items():
|
|
82
|
+
for dep in deps_a:
|
|
83
|
+
if dep in imports_graph and mod_a in imports_graph[dep]:
|
|
84
|
+
cycle_key = tuple(sorted([mod_a, dep]))
|
|
85
|
+
if cycle_key not in seen_cycles:
|
|
86
|
+
seen_cycles.add(cycle_key)
|
|
87
|
+
result.findings.append(Finding(
|
|
88
|
+
category="imports", rule="imports/circular",
|
|
89
|
+
message=f"Circular import: {cycle_key[0]} <-> {cycle_key[1]}",
|
|
90
|
+
cost=CIRCULAR_IMPORT_COST,
|
|
91
|
+
))
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def analyze(path: str, **_kw) -> AnalyzerResult:
|
|
95
|
+
"""Analyze import hygiene: star imports and circular dependencies."""
|
|
96
|
+
result = AnalyzerResult(category="imports")
|
|
97
|
+
max_ded = CATEGORIES["imports"]["max_deduction"]
|
|
98
|
+
|
|
99
|
+
py_files = _collect_py_files(path)
|
|
100
|
+
imports_graph = _build_import_graph(py_files, path, result)
|
|
101
|
+
_detect_circular_imports(imports_graph, result)
|
|
102
|
+
|
|
103
|
+
result.deduction = min(sum(f.cost for f in result.findings), max_ded)
|
|
104
|
+
return result
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""Ruff linter analyzer."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import subprocess # nosec B404 — required for running CLI tools
|
|
5
|
+
|
|
6
|
+
from ..rules import CATEGORIES, RUFF_ERROR_COST, RUFF_WARNING_COST, AnalyzerResult, Finding
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def analyze(path: str, fix: bool = False) -> AnalyzerResult:
|
|
10
|
+
"""Run ruff linting analysis, optionally auto-fixing issues."""
|
|
11
|
+
result = AnalyzerResult(category="lint")
|
|
12
|
+
max_ded = CATEGORIES["lint"]["max_deduction"]
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
if fix:
|
|
16
|
+
subprocess.run( # nosec B603 B607 — intentional subprocess call to ruff CLI tool
|
|
17
|
+
["ruff", "check", "--fix",
|
|
18
|
+
"--exclude", ".venv,node_modules,__pycache__,.git,.tox",
|
|
19
|
+
path],
|
|
20
|
+
capture_output=True, text=True, timeout=120
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
proc = subprocess.run( # nosec B603 B607 — intentional subprocess call to ruff CLI tool
|
|
24
|
+
["ruff", "check", "--output-format", "json",
|
|
25
|
+
"--exclude", ".venv,node_modules,__pycache__,.git,.tox",
|
|
26
|
+
path],
|
|
27
|
+
capture_output=True, text=True, timeout=120
|
|
28
|
+
)
|
|
29
|
+
items = json.loads(proc.stdout) if proc.stdout.strip() else []
|
|
30
|
+
except FileNotFoundError:
|
|
31
|
+
result.error = "ruff not found (skipped)"
|
|
32
|
+
return result
|
|
33
|
+
except Exception as e:
|
|
34
|
+
result.error = str(e)
|
|
35
|
+
return result
|
|
36
|
+
|
|
37
|
+
for item in items:
|
|
38
|
+
code = item.get("code", "?")
|
|
39
|
+
msg = item.get("message", "")
|
|
40
|
+
filename = item.get("filename", "")
|
|
41
|
+
line = item.get("location", {}).get("row", 0)
|
|
42
|
+
# E/W prefixes are warnings, others are errors
|
|
43
|
+
is_warning = code.startswith(("W", "D"))
|
|
44
|
+
cost = RUFF_WARNING_COST if is_warning else RUFF_ERROR_COST
|
|
45
|
+
|
|
46
|
+
result.findings.append(Finding(
|
|
47
|
+
category="lint", rule=f"ruff/{code}", message=msg,
|
|
48
|
+
file=filename, line=line,
|
|
49
|
+
severity="warning" if is_warning else "error", cost=cost,
|
|
50
|
+
))
|
|
51
|
+
|
|
52
|
+
result.deduction = min(sum(f.cost for f in result.findings), max_ded)
|
|
53
|
+
return result
|
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
"""Structure analyzer: file sizes, tests, type hints, project health."""
|
|
2
|
+
|
|
3
|
+
import ast
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
from ..rules import (
|
|
7
|
+
CATEGORIES,
|
|
8
|
+
LARGE_FILE_COST,
|
|
9
|
+
LARGE_FILE_THRESHOLD,
|
|
10
|
+
LOW_TEST_RATIO_COST,
|
|
11
|
+
LOW_TYPE_HINTS_COST,
|
|
12
|
+
NO_GITIGNORE_COST,
|
|
13
|
+
NO_LICENSE_COST,
|
|
14
|
+
NO_LINTER_CONFIG_COST,
|
|
15
|
+
NO_PY_TYPED_COST,
|
|
16
|
+
NO_README_COST,
|
|
17
|
+
NO_TESTS_COST,
|
|
18
|
+
NO_TYPE_CHECKER_COST,
|
|
19
|
+
TYPE_HINT_THRESHOLD,
|
|
20
|
+
VERY_LOW_TEST_RATIO_COST,
|
|
21
|
+
AnalyzerResult,
|
|
22
|
+
Finding,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
_SKIP_DIRS = {"__pycache__", ".git", "node_modules", ".venv", "venv", ".tox", ".mypy_cache", ".ruff_cache"}
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _count_lines(filepath: str) -> int:
|
|
29
|
+
"""Count total lines in a file."""
|
|
30
|
+
try:
|
|
31
|
+
with open(filepath, "r", errors="ignore") as f:
|
|
32
|
+
return sum(1 for _ in f)
|
|
33
|
+
except OSError:
|
|
34
|
+
return 0
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _count_code_lines(filepath: str) -> int:
|
|
38
|
+
"""Count non-blank, non-comment lines."""
|
|
39
|
+
count = 0
|
|
40
|
+
try:
|
|
41
|
+
with open(filepath, "r", errors="ignore") as f:
|
|
42
|
+
for line in f:
|
|
43
|
+
stripped = line.strip()
|
|
44
|
+
if stripped and not stripped.startswith("#"):
|
|
45
|
+
count += 1
|
|
46
|
+
except OSError:
|
|
47
|
+
pass
|
|
48
|
+
return count
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _has_type_hints(filepath: str) -> bool:
|
|
52
|
+
"""Check if a file contains any type annotations."""
|
|
53
|
+
try:
|
|
54
|
+
with open(filepath, "r", errors="ignore") as f:
|
|
55
|
+
tree = ast.parse(f.read(), filename=filepath)
|
|
56
|
+
except (SyntaxError, OSError):
|
|
57
|
+
return False
|
|
58
|
+
|
|
59
|
+
for node in ast.walk(tree):
|
|
60
|
+
if isinstance(node, ast.FunctionDef | ast.AsyncFunctionDef):
|
|
61
|
+
if node.returns is not None:
|
|
62
|
+
return True
|
|
63
|
+
for arg in node.args.args + node.args.kwonlyargs:
|
|
64
|
+
if arg.annotation is not None:
|
|
65
|
+
return True
|
|
66
|
+
if isinstance(node, ast.AnnAssign):
|
|
67
|
+
return True
|
|
68
|
+
return False
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _is_test_file(filepath: str) -> bool:
|
|
72
|
+
"""Check if a file is a test file."""
|
|
73
|
+
parts = os.path.normpath(filepath).split(os.sep)
|
|
74
|
+
if any(p in ("tests", "test") for p in parts):
|
|
75
|
+
return True
|
|
76
|
+
basename = os.path.basename(filepath)
|
|
77
|
+
return basename.startswith("test_") or basename.endswith("_test.py")
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _collect_py_files(path: str) -> tuple[list[str], list[str], list[str], bool]:
|
|
81
|
+
"""Walk the project and classify Python files into source and test files."""
|
|
82
|
+
py_files = []
|
|
83
|
+
test_files = []
|
|
84
|
+
source_files = []
|
|
85
|
+
has_tests = False
|
|
86
|
+
for root, dirs, files in os.walk(path):
|
|
87
|
+
dirs[:] = [d for d in dirs if d not in _SKIP_DIRS]
|
|
88
|
+
if any(d in ("tests", "test") for d in dirs):
|
|
89
|
+
has_tests = True
|
|
90
|
+
for f in files:
|
|
91
|
+
if f.endswith(".py"):
|
|
92
|
+
fp = os.path.join(root, f)
|
|
93
|
+
py_files.append(fp)
|
|
94
|
+
if _is_test_file(fp):
|
|
95
|
+
has_tests = True
|
|
96
|
+
test_files.append(fp)
|
|
97
|
+
else:
|
|
98
|
+
source_files.append(fp)
|
|
99
|
+
return py_files, test_files, source_files, has_tests
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _check_large_files(py_files: list[str], result: AnalyzerResult) -> None:
|
|
103
|
+
"""Flag files exceeding the line threshold."""
|
|
104
|
+
for fp in py_files:
|
|
105
|
+
lines = _count_lines(fp)
|
|
106
|
+
if lines > LARGE_FILE_THRESHOLD:
|
|
107
|
+
result.findings.append(Finding(
|
|
108
|
+
category="structure", rule="structure/large-file",
|
|
109
|
+
message=f"{lines} lines (consider splitting)",
|
|
110
|
+
file=fp, cost=LARGE_FILE_COST,
|
|
111
|
+
))
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _check_tests(has_tests: bool, test_files: list[str], source_files: list[str], result: AnalyzerResult) -> None:
|
|
115
|
+
"""Check for test existence and test-to-source ratio."""
|
|
116
|
+
if not has_tests:
|
|
117
|
+
result.findings.append(Finding(
|
|
118
|
+
category="structure", rule="structure/no-tests",
|
|
119
|
+
message="No tests directory or test files found",
|
|
120
|
+
cost=NO_TESTS_COST,
|
|
121
|
+
))
|
|
122
|
+
return
|
|
123
|
+
|
|
124
|
+
test_lines = sum(_count_code_lines(f) for f in test_files)
|
|
125
|
+
source_lines = sum(_count_code_lines(f) for f in source_files)
|
|
126
|
+
if source_lines <= 0:
|
|
127
|
+
return
|
|
128
|
+
|
|
129
|
+
ratio = test_lines / source_lines
|
|
130
|
+
if ratio < 0.1:
|
|
131
|
+
result.findings.append(Finding(
|
|
132
|
+
category="structure", rule="structure/low-test-ratio",
|
|
133
|
+
message=f"Test-to-source ratio is {ratio:.2f} (very low, <0.1)",
|
|
134
|
+
cost=VERY_LOW_TEST_RATIO_COST,
|
|
135
|
+
))
|
|
136
|
+
elif ratio < 0.3:
|
|
137
|
+
result.findings.append(Finding(
|
|
138
|
+
category="structure", rule="structure/low-test-ratio",
|
|
139
|
+
message=f"Test-to-source ratio is {ratio:.2f} (low, <0.3)",
|
|
140
|
+
cost=LOW_TEST_RATIO_COST,
|
|
141
|
+
))
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def _check_type_hints(py_files: list[str], result: AnalyzerResult) -> bool:
|
|
145
|
+
"""Check type hint coverage across files. Returns whether type hints are used."""
|
|
146
|
+
hinted = sum(1 for fp in py_files if _has_type_hints(fp))
|
|
147
|
+
uses_type_hints = hinted > 0
|
|
148
|
+
ratio = hinted / len(py_files) if py_files else 1
|
|
149
|
+
if ratio < TYPE_HINT_THRESHOLD:
|
|
150
|
+
pct = int((1 - ratio) * 100)
|
|
151
|
+
result.findings.append(Finding(
|
|
152
|
+
category="structure", rule="structure/type-hints",
|
|
153
|
+
message=f"No type hints found in {pct}% of files",
|
|
154
|
+
cost=LOW_TYPE_HINTS_COST,
|
|
155
|
+
))
|
|
156
|
+
return uses_type_hints
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def _check_readme(path: str, result: AnalyzerResult) -> None:
|
|
160
|
+
"""Check for README file."""
|
|
161
|
+
if not any(os.path.isfile(os.path.join(path, n)) for n in ("README.md", "README.rst", "README")):
|
|
162
|
+
result.findings.append(Finding(
|
|
163
|
+
category="structure", rule="structure/no-readme",
|
|
164
|
+
message="No README found", cost=NO_README_COST,
|
|
165
|
+
))
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def _check_license(path: str, result: AnalyzerResult) -> None:
|
|
169
|
+
"""Check for LICENSE file."""
|
|
170
|
+
if not any(os.path.isfile(os.path.join(path, n)) for n in ("LICENSE", "LICENSE.md", "LICENSE.txt", "LICENCE")):
|
|
171
|
+
result.findings.append(Finding(
|
|
172
|
+
category="structure", rule="structure/no-license",
|
|
173
|
+
message="No LICENSE file found", cost=NO_LICENSE_COST,
|
|
174
|
+
))
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def _check_gitignore(path: str, result: AnalyzerResult) -> None:
|
|
178
|
+
"""Check for .gitignore file."""
|
|
179
|
+
if not os.path.isfile(os.path.join(path, ".gitignore")):
|
|
180
|
+
result.findings.append(Finding(
|
|
181
|
+
category="structure", rule="structure/no-gitignore",
|
|
182
|
+
message="No .gitignore found", cost=NO_GITIGNORE_COST,
|
|
183
|
+
))
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def _check_linter_config(path: str, result: AnalyzerResult) -> None:
|
|
187
|
+
"""Check for linter configuration."""
|
|
188
|
+
if os.path.isfile(os.path.join(path, "ruff.toml")):
|
|
189
|
+
return
|
|
190
|
+
pyproject = os.path.join(path, "pyproject.toml")
|
|
191
|
+
if os.path.isfile(pyproject):
|
|
192
|
+
try:
|
|
193
|
+
with open(pyproject) as f:
|
|
194
|
+
if "[tool.ruff]" in f.read():
|
|
195
|
+
return
|
|
196
|
+
except OSError:
|
|
197
|
+
pass
|
|
198
|
+
setup_cfg = os.path.join(path, "setup.cfg")
|
|
199
|
+
if os.path.isfile(setup_cfg):
|
|
200
|
+
try:
|
|
201
|
+
with open(setup_cfg) as f:
|
|
202
|
+
if "[flake8]" in f.read():
|
|
203
|
+
return
|
|
204
|
+
except OSError:
|
|
205
|
+
pass
|
|
206
|
+
result.findings.append(Finding(
|
|
207
|
+
category="structure", rule="structure/no-linter-config",
|
|
208
|
+
message="No linter configuration found", cost=NO_LINTER_CONFIG_COST,
|
|
209
|
+
))
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def _check_type_checker_config(path: str, result: AnalyzerResult) -> None:
|
|
213
|
+
"""Check for type checker configuration."""
|
|
214
|
+
if any(os.path.isfile(os.path.join(path, n)) for n in ("mypy.ini", "pyrightconfig.json", ".mypy.ini")):
|
|
215
|
+
return
|
|
216
|
+
pyproject = os.path.join(path, "pyproject.toml")
|
|
217
|
+
if os.path.isfile(pyproject):
|
|
218
|
+
try:
|
|
219
|
+
with open(pyproject) as f:
|
|
220
|
+
if "[tool.mypy]" in f.read():
|
|
221
|
+
return
|
|
222
|
+
except OSError:
|
|
223
|
+
pass
|
|
224
|
+
result.findings.append(Finding(
|
|
225
|
+
category="structure", rule="structure/no-type-checker",
|
|
226
|
+
message="No type checker configuration found", cost=NO_TYPE_CHECKER_COST,
|
|
227
|
+
))
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def _check_py_typed(path: str, uses_type_hints: bool, result: AnalyzerResult) -> None:
|
|
231
|
+
"""Check for py.typed marker file when type hints are used."""
|
|
232
|
+
if not uses_type_hints:
|
|
233
|
+
return
|
|
234
|
+
for root, dirs, files in os.walk(path):
|
|
235
|
+
dirs[:] = [d for d in dirs if d not in _SKIP_DIRS]
|
|
236
|
+
if "py.typed" in files:
|
|
237
|
+
return
|
|
238
|
+
result.findings.append(Finding(
|
|
239
|
+
category="structure", rule="structure/no-py-typed",
|
|
240
|
+
message="Type hints used but no py.typed marker found", cost=NO_PY_TYPED_COST,
|
|
241
|
+
))
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def _check_project_health(path: str, result: AnalyzerResult, uses_type_hints: bool) -> None:
|
|
245
|
+
"""Check for README, LICENSE, .gitignore, linter config, type checker config."""
|
|
246
|
+
_check_readme(path, result)
|
|
247
|
+
_check_license(path, result)
|
|
248
|
+
_check_gitignore(path, result)
|
|
249
|
+
_check_linter_config(path, result)
|
|
250
|
+
_check_type_checker_config(path, result)
|
|
251
|
+
_check_py_typed(path, uses_type_hints, result)
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def analyze(path: str, **_kw) -> AnalyzerResult:
|
|
255
|
+
"""Analyze project structure: file sizes, tests, type hints, and project health."""
|
|
256
|
+
result = AnalyzerResult(category="structure")
|
|
257
|
+
max_ded = CATEGORIES["structure"]["max_deduction"]
|
|
258
|
+
|
|
259
|
+
py_files, test_files, source_files, has_tests = _collect_py_files(path)
|
|
260
|
+
if not py_files:
|
|
261
|
+
return result
|
|
262
|
+
|
|
263
|
+
_check_large_files(py_files, result)
|
|
264
|
+
_check_tests(has_tests, test_files, source_files, result)
|
|
265
|
+
uses_type_hints = _check_type_hints(py_files, result)
|
|
266
|
+
_check_project_health(path, result, uses_type_hints)
|
|
267
|
+
|
|
268
|
+
result.deduction = min(sum(f.cost for f in result.findings), max_ded)
|
|
269
|
+
return result
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""Vulture dead code analyzer."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
import subprocess # nosec B404 — required for running CLI tools
|
|
5
|
+
|
|
6
|
+
from ..rules import CATEGORIES, VULTURE_COST, AnalyzerResult, Finding
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def analyze(path: str, **_kw) -> AnalyzerResult:
|
|
10
|
+
"""Analyze dead code using vulture."""
|
|
11
|
+
result = AnalyzerResult(category="dead_code")
|
|
12
|
+
max_ded = CATEGORIES["dead_code"]["max_deduction"]
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
proc = subprocess.run( # nosec B603 B607 — intentional subprocess call to vulture CLI tool
|
|
16
|
+
["vulture", "--min-confidence", "80",
|
|
17
|
+
"--exclude", ".venv,node_modules,__pycache__,.git,.tox",
|
|
18
|
+
path],
|
|
19
|
+
capture_output=True, text=True, timeout=120
|
|
20
|
+
)
|
|
21
|
+
lines = proc.stdout.strip().splitlines() if proc.stdout else []
|
|
22
|
+
except FileNotFoundError:
|
|
23
|
+
result.error = "vulture not found (skipped)"
|
|
24
|
+
return result
|
|
25
|
+
except Exception as e:
|
|
26
|
+
result.error = str(e)
|
|
27
|
+
return result
|
|
28
|
+
|
|
29
|
+
# vulture output: filename:line: unused X 'name' (NN% confidence)
|
|
30
|
+
pat = re.compile(r"^(.+?):(\d+): (.+)$")
|
|
31
|
+
for line in lines:
|
|
32
|
+
m = pat.match(line)
|
|
33
|
+
if m:
|
|
34
|
+
filename, lineno, msg = m.group(1), int(m.group(2)), m.group(3)
|
|
35
|
+
result.findings.append(Finding(
|
|
36
|
+
category="dead_code", rule="vulture", message=msg,
|
|
37
|
+
file=filename, line=lineno, cost=VULTURE_COST,
|
|
38
|
+
))
|
|
39
|
+
|
|
40
|
+
result.deduction = min(sum(f.cost for f in result.findings), max_ded)
|
|
41
|
+
return result
|
python_doctor/cli.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
"""Main CLI entry point for Python Doctor."""
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
from . import __version__
|
|
9
|
+
from .analyzers import (
|
|
10
|
+
bandit_analyzer,
|
|
11
|
+
complexity,
|
|
12
|
+
dependency_analyzer,
|
|
13
|
+
docstring_analyzer,
|
|
14
|
+
exceptions_analyzer,
|
|
15
|
+
imports_analyzer,
|
|
16
|
+
ruff_analyzer,
|
|
17
|
+
structure,
|
|
18
|
+
vulture_analyzer,
|
|
19
|
+
)
|
|
20
|
+
from .rules import CATEGORIES
|
|
21
|
+
from .scorer import category_score, compute_score, score_label
|
|
22
|
+
|
|
23
|
+
ANALYZERS = [
|
|
24
|
+
("security", bandit_analyzer),
|
|
25
|
+
("lint", ruff_analyzer),
|
|
26
|
+
("dead_code", vulture_analyzer),
|
|
27
|
+
("complexity", complexity),
|
|
28
|
+
("structure", structure),
|
|
29
|
+
("dependencies", dependency_analyzer),
|
|
30
|
+
("docs", docstring_analyzer),
|
|
31
|
+
("imports", imports_analyzer),
|
|
32
|
+
("exceptions", exceptions_analyzer),
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
MAX_FINDINGS_DISPLAY = 5
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def run_analyzers(path: str, fix: bool = False):
|
|
39
|
+
"""Run all analyzers on the given path and return results."""
|
|
40
|
+
results = []
|
|
41
|
+
for cat_name, mod in ANALYZERS:
|
|
42
|
+
kwargs = {"path": path}
|
|
43
|
+
if cat_name == "lint":
|
|
44
|
+
kwargs["fix"] = fix
|
|
45
|
+
result = mod.analyze(**kwargs)
|
|
46
|
+
results.append(result)
|
|
47
|
+
return results
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def format_finding(f, path: str) -> str:
|
|
51
|
+
"""Format a single finding for display."""
|
|
52
|
+
rel = os.path.relpath(f.file, path) if f.file else ""
|
|
53
|
+
loc = f"{rel}:{f.line}" if f.line else rel
|
|
54
|
+
icon = "⚠" if f.severity in ("warning", "low", "medium") else "✗"
|
|
55
|
+
return f" {icon} {f.rule}: {f.message}" + (f" ({loc})" if loc else "")
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def print_report(results, path: str, verbose: bool = False):
|
|
59
|
+
"""Print the full health report to stdout."""
|
|
60
|
+
print(f"\n🐍 Python Doctor v{__version__}")
|
|
61
|
+
print(f"Scanning: {path}\n")
|
|
62
|
+
|
|
63
|
+
score = compute_score(results)
|
|
64
|
+
label = score_label(score)
|
|
65
|
+
print(f"📊 Score: {score}/100 ({label})\n")
|
|
66
|
+
|
|
67
|
+
for result in results:
|
|
68
|
+
cat = CATEGORIES[result.category]
|
|
69
|
+
cat_sc = category_score(result)
|
|
70
|
+
emoji = cat["emoji"]
|
|
71
|
+
name = cat["label"]
|
|
72
|
+
max_d = cat["max_deduction"]
|
|
73
|
+
|
|
74
|
+
if result.error:
|
|
75
|
+
print(f"{emoji} {name} — ⚠ {result.error}")
|
|
76
|
+
continue
|
|
77
|
+
|
|
78
|
+
check = " ✓" if not result.findings else ""
|
|
79
|
+
print(f"{emoji} {name} ({cat_sc}/{max_d}){check}")
|
|
80
|
+
|
|
81
|
+
if not result.findings:
|
|
82
|
+
print(" ✓ All clear.")
|
|
83
|
+
else:
|
|
84
|
+
limit = None if verbose else MAX_FINDINGS_DISPLAY
|
|
85
|
+
shown = result.findings[:limit]
|
|
86
|
+
for f in shown:
|
|
87
|
+
print(format_finding(f, path))
|
|
88
|
+
remaining = len(result.findings) - len(shown)
|
|
89
|
+
if remaining > 0:
|
|
90
|
+
print(f" ... and {remaining} more")
|
|
91
|
+
print()
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def main():
|
|
95
|
+
"""CLI entry point for python-doctor."""
|
|
96
|
+
parser = argparse.ArgumentParser(
|
|
97
|
+
prog="python-doctor",
|
|
98
|
+
description="Scan Python codebases and get a 0-100 health score.",
|
|
99
|
+
)
|
|
100
|
+
parser.add_argument("path", nargs="?", default=".", help="Directory to scan")
|
|
101
|
+
parser.add_argument("--verbose", "-v", action="store_true", help="Show all findings")
|
|
102
|
+
parser.add_argument("--score", action="store_true", help="Output only the score number")
|
|
103
|
+
parser.add_argument("--json", dest="json_out", action="store_true", help="JSON output")
|
|
104
|
+
parser.add_argument("--fix", action="store_true", help="Auto-fix what's possible (ruff --fix)")
|
|
105
|
+
parser.add_argument("--version", action="version", version=f"%(prog)s {__version__}")
|
|
106
|
+
|
|
107
|
+
args = parser.parse_args()
|
|
108
|
+
path = os.path.abspath(args.path)
|
|
109
|
+
|
|
110
|
+
if not os.path.isdir(path):
|
|
111
|
+
print(f"Error: '{path}' is not a directory.", file=sys.stderr)
|
|
112
|
+
sys.exit(1)
|
|
113
|
+
|
|
114
|
+
results = run_analyzers(path, fix=args.fix)
|
|
115
|
+
score = compute_score(results)
|
|
116
|
+
|
|
117
|
+
if args.score:
|
|
118
|
+
print(score)
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
if args.json_out:
|
|
122
|
+
output = {
|
|
123
|
+
"version": __version__,
|
|
124
|
+
"path": path,
|
|
125
|
+
"score": score,
|
|
126
|
+
"label": score_label(score),
|
|
127
|
+
"categories": {},
|
|
128
|
+
}
|
|
129
|
+
for r in results:
|
|
130
|
+
cat = CATEGORIES[r.category]
|
|
131
|
+
output["categories"][r.category] = {
|
|
132
|
+
"score": category_score(r),
|
|
133
|
+
"max": cat["max_deduction"],
|
|
134
|
+
"deduction": r.deduction,
|
|
135
|
+
"error": r.error,
|
|
136
|
+
"findings": [
|
|
137
|
+
{"rule": f.rule, "message": f.message, "file": f.file, "line": f.line, "severity": f.severity}
|
|
138
|
+
for f in r.findings
|
|
139
|
+
],
|
|
140
|
+
}
|
|
141
|
+
print(json.dumps(output, indent=2))
|
|
142
|
+
return
|
|
143
|
+
|
|
144
|
+
print_report(results, path, verbose=args.verbose)
|
|
145
|
+
sys.exit(0 if score >= 50 else 1)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
if __name__ == "__main__":
|
|
149
|
+
main()
|
python_doctor/py.typed
ADDED
|
File without changes
|
python_doctor/rules.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"""Rule definitions and scoring categories."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
|
|
5
|
+
CATEGORIES = {
|
|
6
|
+
"security": {"emoji": "🔒", "label": "Security", "max_deduction": 30},
|
|
7
|
+
"lint": {"emoji": "🧹", "label": "Lint", "max_deduction": 25},
|
|
8
|
+
"dead_code": {"emoji": "💀", "label": "Dead Code", "max_deduction": 15},
|
|
9
|
+
"complexity": {"emoji": "🔄", "label": "Complexity", "max_deduction": 15},
|
|
10
|
+
"structure": {"emoji": "🏗", "label": "Structure", "max_deduction": 15},
|
|
11
|
+
"dependencies": {"emoji": "📦", "label": "Dependencies", "max_deduction": 15},
|
|
12
|
+
"docs": {"emoji": "📝", "label": "Docstrings", "max_deduction": 10},
|
|
13
|
+
"imports": {"emoji": "🔗", "label": "Imports", "max_deduction": 10},
|
|
14
|
+
"exceptions": {"emoji": "⚡", "label": "Exceptions", "max_deduction": 10},
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
BANDIT_SEVERITY_COST = {"HIGH": 3, "MEDIUM": 2, "LOW": 1}
|
|
18
|
+
RUFF_ERROR_COST = 1.0
|
|
19
|
+
RUFF_WARNING_COST = 0.5
|
|
20
|
+
VULTURE_COST = 0.5
|
|
21
|
+
COMPLEXITY_COST = {10: 2, 20: 5} # CC>10: -2, CC>20: -5
|
|
22
|
+
LARGE_FILE_THRESHOLD = 500
|
|
23
|
+
LARGE_FILE_COST = 2
|
|
24
|
+
NO_TESTS_COST = 5
|
|
25
|
+
LOW_TYPE_HINTS_COST = 5
|
|
26
|
+
TYPE_HINT_THRESHOLD = 0.5
|
|
27
|
+
|
|
28
|
+
# Dependency analyzer costs
|
|
29
|
+
NO_BUILD_FILE_COST = 3
|
|
30
|
+
VULNERABLE_DEP_COST = 2
|
|
31
|
+
MIXED_BUILD_SYSTEM_COST = 1
|
|
32
|
+
|
|
33
|
+
# Docstring analyzer
|
|
34
|
+
DOCSTRING_LOW_COVERAGE_COST = 5
|
|
35
|
+
DOCSTRING_NO_COVERAGE_COST = 10
|
|
36
|
+
|
|
37
|
+
# Import analyzer
|
|
38
|
+
STAR_IMPORT_COST = 1
|
|
39
|
+
CIRCULAR_IMPORT_COST = 3
|
|
40
|
+
|
|
41
|
+
# Exception analyzer
|
|
42
|
+
BARE_EXCEPT_COST = 2
|
|
43
|
+
SILENT_EXCEPTION_COST = 1
|
|
44
|
+
|
|
45
|
+
# Structure: project health
|
|
46
|
+
NO_README_COST = 2
|
|
47
|
+
NO_LICENSE_COST = 1
|
|
48
|
+
NO_GITIGNORE_COST = 1
|
|
49
|
+
NO_LINTER_CONFIG_COST = 1
|
|
50
|
+
NO_TYPE_CHECKER_COST = 1
|
|
51
|
+
NO_PY_TYPED_COST = 1
|
|
52
|
+
|
|
53
|
+
# Structure: test quality
|
|
54
|
+
LOW_TEST_RATIO_COST = 2
|
|
55
|
+
VERY_LOW_TEST_RATIO_COST = 4
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclass
|
|
59
|
+
class Finding:
|
|
60
|
+
"""A single diagnostic finding from an analyzer."""
|
|
61
|
+
category: str
|
|
62
|
+
rule: str
|
|
63
|
+
message: str
|
|
64
|
+
file: str = ""
|
|
65
|
+
line: int = 0
|
|
66
|
+
severity: str = "medium"
|
|
67
|
+
cost: float = 0.0
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
@dataclass
|
|
71
|
+
class AnalyzerResult:
|
|
72
|
+
"""Result from running an analyzer, containing findings and deduction."""
|
|
73
|
+
category: str
|
|
74
|
+
findings: list[Finding] = field(default_factory=list)
|
|
75
|
+
deduction: float = 0.0
|
|
76
|
+
error: str | None = None
|
python_doctor/scorer.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Score aggregation logic."""
|
|
2
|
+
|
|
3
|
+
from .rules import CATEGORIES, AnalyzerResult
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def compute_score(results: list[AnalyzerResult]) -> int:
|
|
7
|
+
"""Compute the overall health score (0-100) from analyzer results."""
|
|
8
|
+
total_deduction = sum(r.deduction for r in results)
|
|
9
|
+
return max(0, int(100 - total_deduction))
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def score_label(score: int) -> str:
|
|
13
|
+
"""Return a human-readable label for the given score."""
|
|
14
|
+
if score >= 90:
|
|
15
|
+
return "Excellent"
|
|
16
|
+
elif score >= 75:
|
|
17
|
+
return "Good"
|
|
18
|
+
elif score >= 50:
|
|
19
|
+
return "Needs Work"
|
|
20
|
+
else:
|
|
21
|
+
return "Critical"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def category_score(result: AnalyzerResult) -> int:
|
|
25
|
+
"""Compute the score for a single category."""
|
|
26
|
+
cat = CATEGORIES[result.category]
|
|
27
|
+
return int(cat["max_deduction"] - result.deduction)
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: python-doctor
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: One command. One score. Built for AI agents. Scans Python codebases and returns a 0-100 health score.
|
|
5
|
+
Project-URL: Homepage, https://github.com/saikatkumardey/python-doctor
|
|
6
|
+
Project-URL: Repository, https://github.com/saikatkumardey/python-doctor
|
|
7
|
+
Project-URL: Issues, https://github.com/saikatkumardey/python-doctor/issues
|
|
8
|
+
Project-URL: Changelog, https://github.com/saikatkumardey/python-doctor/blob/main/CHANGELOG.md
|
|
9
|
+
Author-email: Saikat Kumar Dey <deysaikatkumar@gmail.com>
|
|
10
|
+
License: MIT
|
|
11
|
+
License-File: LICENSE
|
|
12
|
+
Keywords: ai-agents,code-quality,developer-tools,linting,python,static-analysis
|
|
13
|
+
Classifier: Development Status :: 4 - Beta
|
|
14
|
+
Classifier: Environment :: Console
|
|
15
|
+
Classifier: Intended Audience :: Developers
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
22
|
+
Classifier: Topic :: Software Development :: Quality Assurance
|
|
23
|
+
Classifier: Topic :: Software Development :: Testing
|
|
24
|
+
Requires-Python: >=3.10
|
|
25
|
+
Requires-Dist: bandit>=1.7.0
|
|
26
|
+
Requires-Dist: radon>=6.0.0
|
|
27
|
+
Requires-Dist: ruff>=0.4.0
|
|
28
|
+
Requires-Dist: vulture>=2.11
|
|
29
|
+
Provides-Extra: dev
|
|
30
|
+
Requires-Dist: pytest>=7.0; extra == 'dev'
|
|
31
|
+
Description-Content-Type: text/markdown
|
|
32
|
+
|
|
33
|
+
# Python Doctor 🐍
|
|
34
|
+
|
|
35
|
+
[](https://github.com/saikatkumardey/python-doctor/actions/workflows/test.yml)
|
|
36
|
+
[](https://pypi.org/project/python-doctor/)
|
|
37
|
+
[](https://pypi.org/project/python-doctor/)
|
|
38
|
+
[](LICENSE)
|
|
39
|
+
|
|
40
|
+
**One command. One score. Built for AI agents.**
|
|
41
|
+
|
|
42
|
+
Python Doctor scans a Python codebase and returns a 0-100 health score with structured, actionable output. It's designed so an AI agent can run it, read the results, fix the issues, and verify the fix — in a loop, without human intervention.
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
python-doctor .
|
|
46
|
+
# 📊 Score: 98/100 (Excellent)
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## Why?
|
|
50
|
+
|
|
51
|
+
Setting up linting, security scanning, dead code detection, and complexity analysis means configuring 5+ tools, reading 5 different output formats, and deciding what matters. Python Doctor wraps them all into a single command with a single score.
|
|
52
|
+
|
|
53
|
+
An agent doesn't need to know what Bandit is. It just needs to know the score dropped and which lines to fix.
|
|
54
|
+
|
|
55
|
+
## Install the CLI
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
# Using uv (recommended)
|
|
59
|
+
uv tool install git+https://github.com/saikatkumardey/python-doctor
|
|
60
|
+
|
|
61
|
+
# Using pip
|
|
62
|
+
pip install git+https://github.com/saikatkumardey/python-doctor
|
|
63
|
+
|
|
64
|
+
# Or clone and run directly
|
|
65
|
+
git clone https://github.com/saikatkumardey/python-doctor.git
|
|
66
|
+
cd python-doctor
|
|
67
|
+
uv run python-doctor /path/to/project
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## Add to Your Coding Agent
|
|
71
|
+
|
|
72
|
+
Python Doctor works with any agent that can run shell commands. Install the CLI (above), then add the rule to your agent:
|
|
73
|
+
|
|
74
|
+
### Claude Code
|
|
75
|
+
|
|
76
|
+
Add to your `CLAUDE.md`:
|
|
77
|
+
|
|
78
|
+
```markdown
|
|
79
|
+
## Python Health Check
|
|
80
|
+
|
|
81
|
+
Before finishing work on Python files, run:
|
|
82
|
+
python-doctor . --json
|
|
83
|
+
|
|
84
|
+
Fix any findings with severity "error". Target score: 80+.
|
|
85
|
+
If score drops below 50, do not commit — fix the issues first.
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
### Cursor
|
|
89
|
+
|
|
90
|
+
Add to `.cursor/rules/python-doctor.mdc`:
|
|
91
|
+
|
|
92
|
+
```markdown
|
|
93
|
+
---
|
|
94
|
+
description: Python codebase health check
|
|
95
|
+
globs: "**/*.py"
|
|
96
|
+
alwaysApply: false
|
|
97
|
+
---
|
|
98
|
+
|
|
99
|
+
Run `python-doctor . --json` after modifying Python files.
|
|
100
|
+
Fix findings. Target score: 80+. Do not commit below 50.
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### OpenAI Codex
|
|
104
|
+
|
|
105
|
+
Add to `AGENTS.md`:
|
|
106
|
+
|
|
107
|
+
```markdown
|
|
108
|
+
## Python Health Check
|
|
109
|
+
|
|
110
|
+
After modifying Python files, run `python-doctor . --json` to check codebase health.
|
|
111
|
+
Fix any findings. Target score: 80+. Exit code 1 means score < 50 — fix before committing.
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
### Windsurf / Cline / Aider
|
|
115
|
+
|
|
116
|
+
Add to your project rules or system prompt:
|
|
117
|
+
|
|
118
|
+
```
|
|
119
|
+
After modifying Python files, run: python-doctor . --json
|
|
120
|
+
Read the output. Fix findings with severity "error" first, then warnings.
|
|
121
|
+
Re-run to verify the score improved. Target: 80+.
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### OpenClaw
|
|
125
|
+
|
|
126
|
+
```bash
|
|
127
|
+
clawhub install python-doctor
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
### GitHub Actions (CI)
|
|
131
|
+
|
|
132
|
+
```yaml
|
|
133
|
+
- name: Health Check
|
|
134
|
+
run: |
|
|
135
|
+
uv tool install git+https://github.com/saikatkumardey/python-doctor
|
|
136
|
+
python-doctor . --verbose
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
Exits with code 1 if score < 50.
|
|
140
|
+
|
|
141
|
+
## Usage
|
|
142
|
+
|
|
143
|
+
```bash
|
|
144
|
+
# Scan current directory
|
|
145
|
+
python-doctor .
|
|
146
|
+
|
|
147
|
+
# Verbose — show all findings with line numbers
|
|
148
|
+
python-doctor . --verbose
|
|
149
|
+
|
|
150
|
+
# Just the score (for CI or quick checks)
|
|
151
|
+
python-doctor . --score
|
|
152
|
+
|
|
153
|
+
# Structured JSON for agents
|
|
154
|
+
python-doctor . --json
|
|
155
|
+
|
|
156
|
+
# Auto-fix what Ruff can handle, then report the rest
|
|
157
|
+
python-doctor . --fix
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
## What It Checks
|
|
161
|
+
|
|
162
|
+
9 categories, 5 external tools + 4 custom AST analyzers:
|
|
163
|
+
|
|
164
|
+
| Category | Max | What |
|
|
165
|
+
|----------|-----|------|
|
|
166
|
+
| 🔒 Security | -30 | Bandit (SQLi, hardcoded secrets, unsafe calls). Auto-skips `assert` in test files. |
|
|
167
|
+
| 🧹 Lint | -25 | Ruff (unused imports, undefined names, style) |
|
|
168
|
+
| 💀 Dead Code | -15 | Vulture (unused functions, variables, imports) |
|
|
169
|
+
| 🔄 Complexity | -15 | Radon (cyclomatic complexity > 10) |
|
|
170
|
+
| 🏗 Structure | -15 | File sizes, test ratio, type hints, README, LICENSE, linter/type-checker config |
|
|
171
|
+
| 📦 Dependencies | -15 | Build file exists, no mixed systems, pip-audit vulnerabilities |
|
|
172
|
+
| 📝 Docstrings | -10 | Public function/class docstring coverage |
|
|
173
|
+
| 🔗 Imports | -10 | Star imports, circular import detection |
|
|
174
|
+
| ⚡ Exceptions | -10 | Bare `except:`, silently swallowed exceptions |
|
|
175
|
+
|
|
176
|
+
Score = `max(0, 100 - total_deductions)`. Each category is capped at its max.
|
|
177
|
+
|
|
178
|
+
## The Loop
|
|
179
|
+
|
|
180
|
+
This is how an agent uses it:
|
|
181
|
+
|
|
182
|
+
1. `python-doctor . --json` → read the report
|
|
183
|
+
2. Fix the findings (auto-fix with `--fix`, manual fixes for the rest)
|
|
184
|
+
3. `python-doctor . --score` → verify improvement
|
|
185
|
+
4. Repeat until score target met
|
|
186
|
+
|
|
187
|
+
We built Python Doctor, then ran it on itself. Score: 47. Fixed everything it flagged. Score: 98. The tool eats its own dogfood.
|
|
188
|
+
|
|
189
|
+
## License
|
|
190
|
+
|
|
191
|
+
MIT — Saikat Kumar Dey, 2026
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
python_doctor/__init__.py,sha256=N0uv_1jmUicRwPRW5sPW8lAVxfeFtVZiCpOTWQDNsug,84
|
|
2
|
+
python_doctor/cli.py,sha256=Di74xyaGLk0iv-x7P0DGIk0eHiJTQoQ3uvjvt4IAjM0,4644
|
|
3
|
+
python_doctor/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
python_doctor/rules.py,sha256=H-0ET7-A9JsVGlxN14e3rjLiXWE7XFgvbYV_yyv4bZA,2135
|
|
5
|
+
python_doctor/scorer.py,sha256=qEqRw436GyCmDjqw-IwDd175UxU2xMK5KGV-Bzmf-M8,787
|
|
6
|
+
python_doctor/analyzers/__init__.py,sha256=y9QKh8aD_8jKviICtx3-QmxZrkJ5AXW0gjid73tlPlw,35
|
|
7
|
+
python_doctor/analyzers/bandit_analyzer.py,sha256=Ku9Q1bo3XHesB4ZsyyYoY46IuDRr971lJqBJAWjtCZo,2274
|
|
8
|
+
python_doctor/analyzers/complexity.py,sha256=3LP15RvHfin-OXJcn7wXKuAWW0tyzW56PAZau-3R4h0,1644
|
|
9
|
+
python_doctor/analyzers/dependency_analyzer.py,sha256=pgLo3fN2ubvaBv7tBV8M-Igt98KOSNT7Gu33aZTisw0,3283
|
|
10
|
+
python_doctor/analyzers/docstring_analyzer.py,sha256=Rf9bERdW7JQorJSd4vjR4FovS9Sk7ZKfB5z-D9d4rXE,3030
|
|
11
|
+
python_doctor/analyzers/exceptions_analyzer.py,sha256=D9Exl4W9ASkO_kT42e324YIerXj9n6tyM11Oh_A24E4,2658
|
|
12
|
+
python_doctor/analyzers/imports_analyzer.py,sha256=ld7DDjZtM_4xIiVIb8R4q8HFpcADe0HAclYZ41G5lWo,4034
|
|
13
|
+
python_doctor/analyzers/ruff_analyzer.py,sha256=D70NygBGQCXOgX7uDLHXF2SVD-4exKOGU8zGqAN71-4,2014
|
|
14
|
+
python_doctor/analyzers/structure.py,sha256=jCrvL6P9j5_U7OKcyjD2pXdqKlP_YQUX9leVwM2k1OU,9543
|
|
15
|
+
python_doctor/analyzers/vulture_analyzer.py,sha256=PE_UZB2zYgKd3QOABwK3nvQYILJkxEohOjEjKniUrOE,1495
|
|
16
|
+
python_doctor-0.1.0.dist-info/METADATA,sha256=H878TW_cIFqsuIt86faKBdAfUXo0KLy-35-xACKREoo,6169
|
|
17
|
+
python_doctor-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
18
|
+
python_doctor-0.1.0.dist-info/entry_points.txt,sha256=J40LC_mJMQt2ai5r_9PVhgnjR_ykSkoczMspOn2h-xM,57
|
|
19
|
+
python_doctor-0.1.0.dist-info/licenses/LICENSE,sha256=rlgCkmloUxFvtgQeP6gf9B7pTVjkZyutQCKf56KQIbA,1073
|
|
20
|
+
python_doctor-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Saikat Kumar Dey
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|