gwc-pybundle 1.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gwc-pybundle might be problematic. Click here for more details.
- gwc_pybundle-1.4.5.dist-info/METADATA +876 -0
- gwc_pybundle-1.4.5.dist-info/RECORD +55 -0
- gwc_pybundle-1.4.5.dist-info/WHEEL +5 -0
- gwc_pybundle-1.4.5.dist-info/entry_points.txt +2 -0
- gwc_pybundle-1.4.5.dist-info/licenses/LICENSE.md +25 -0
- gwc_pybundle-1.4.5.dist-info/top_level.txt +1 -0
- pybundle/__init__.py +0 -0
- pybundle/__main__.py +4 -0
- pybundle/cli.py +365 -0
- pybundle/context.py +362 -0
- pybundle/doctor.py +148 -0
- pybundle/filters.py +178 -0
- pybundle/manifest.py +77 -0
- pybundle/packaging.py +45 -0
- pybundle/policy.py +132 -0
- pybundle/profiles.py +340 -0
- pybundle/roadmap_model.py +42 -0
- pybundle/roadmap_scan.py +295 -0
- pybundle/root_detect.py +14 -0
- pybundle/runner.py +163 -0
- pybundle/steps/__init__.py +26 -0
- pybundle/steps/bandit.py +72 -0
- pybundle/steps/base.py +20 -0
- pybundle/steps/compileall.py +76 -0
- pybundle/steps/context_expand.py +272 -0
- pybundle/steps/copy_pack.py +293 -0
- pybundle/steps/coverage.py +101 -0
- pybundle/steps/cprofile_step.py +155 -0
- pybundle/steps/dependency_sizes.py +120 -0
- pybundle/steps/duplication.py +94 -0
- pybundle/steps/error_refs.py +204 -0
- pybundle/steps/handoff_md.py +167 -0
- pybundle/steps/import_time.py +165 -0
- pybundle/steps/interrogate.py +84 -0
- pybundle/steps/license_scan.py +96 -0
- pybundle/steps/line_profiler.py +108 -0
- pybundle/steps/memory_profile.py +173 -0
- pybundle/steps/mutation_testing.py +136 -0
- pybundle/steps/mypy.py +60 -0
- pybundle/steps/pip_audit.py +45 -0
- pybundle/steps/pipdeptree.py +61 -0
- pybundle/steps/pylance.py +562 -0
- pybundle/steps/pytest.py +66 -0
- pybundle/steps/radon.py +121 -0
- pybundle/steps/repro_md.py +161 -0
- pybundle/steps/rg_scans.py +78 -0
- pybundle/steps/roadmap.py +153 -0
- pybundle/steps/ruff.py +111 -0
- pybundle/steps/shell.py +74 -0
- pybundle/steps/slow_tests.py +170 -0
- pybundle/steps/test_flakiness.py +172 -0
- pybundle/steps/tree.py +116 -0
- pybundle/steps/unused_deps.py +112 -0
- pybundle/steps/vulture.py +83 -0
- pybundle/tools.py +63 -0
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from dataclasses import asdict
|
|
5
|
+
from datetime import datetime, timezone
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from .base import Step, StepResult
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _utc_now() -> str:
|
|
13
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _safe_read(path: Path) -> str:
|
|
17
|
+
if not path.exists():
|
|
18
|
+
return f"(missing: {path.as_posix()})"
|
|
19
|
+
return path.read_text(encoding="utf-8", errors="replace").strip()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _tool_table(tools_obj: Any) -> list[str]:
|
|
23
|
+
d = (
|
|
24
|
+
asdict(tools_obj)
|
|
25
|
+
if hasattr(tools_obj, "__dataclass_fields__")
|
|
26
|
+
else dict(tools_obj)
|
|
27
|
+
)
|
|
28
|
+
lines = ["| Tool | Status |", "|------|--------|"]
|
|
29
|
+
for k in sorted(d.keys()):
|
|
30
|
+
v = d[k]
|
|
31
|
+
if v:
|
|
32
|
+
lines.append(f"| `{k}` | ✅ `{v}` |")
|
|
33
|
+
else:
|
|
34
|
+
lines.append(f"| `{k}` | ❌ `<missing>` |")
|
|
35
|
+
return lines
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class HandoffMarkdownStep(Step):
|
|
39
|
+
name = "generate HANDOFF.md"
|
|
40
|
+
|
|
41
|
+
def run(self, ctx: Any) -> StepResult:
|
|
42
|
+
start = time.time()
|
|
43
|
+
|
|
44
|
+
created_utc = getattr(ctx, "created_utc", None) or _utc_now()
|
|
45
|
+
profile = getattr(ctx, "profile_name", "<unknown>")
|
|
46
|
+
root_path = Path(getattr(ctx, "root"))
|
|
47
|
+
project = root_path.name
|
|
48
|
+
root = str(root_path)
|
|
49
|
+
workdir_path = Path(getattr(ctx, "workdir"))
|
|
50
|
+
workdir = str(workdir_path)
|
|
51
|
+
|
|
52
|
+
# filenames fixed to match your repo
|
|
53
|
+
uname = _safe_read(workdir_path / "meta" / "21_uname.txt")
|
|
54
|
+
pyver = _safe_read(workdir_path / "meta" / "20_python_version.txt")
|
|
55
|
+
|
|
56
|
+
redact = bool(getattr(ctx, "redact", True))
|
|
57
|
+
redact_status = "enabled" if redact else "disabled"
|
|
58
|
+
|
|
59
|
+
results: list[Any] = list(getattr(ctx, "results", []))
|
|
60
|
+
pass_n = sum(1 for r in results if getattr(r, "status", "") == "PASS")
|
|
61
|
+
fail_n = sum(1 for r in results if getattr(r, "status", "") == "FAIL")
|
|
62
|
+
skip_n = sum(1 for r in results if getattr(r, "status", "") == "SKIP")
|
|
63
|
+
total_n = len(results)
|
|
64
|
+
|
|
65
|
+
overall = "FAIL" if fail_n else ("DEGRADED" if skip_n else "PASS")
|
|
66
|
+
|
|
67
|
+
# tool table
|
|
68
|
+
tools_obj = getattr(ctx, "tools", None) or getattr(ctx, "tooling", None)
|
|
69
|
+
tools_table = (
|
|
70
|
+
_tool_table(tools_obj) if tools_obj is not None else ["(no tools detected)"]
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
command_used = getattr(ctx, "command_used", "") or "(not captured)"
|
|
74
|
+
|
|
75
|
+
lines: list[str] = []
|
|
76
|
+
lines.append("# Bundle Handoff")
|
|
77
|
+
lines.append("")
|
|
78
|
+
lines.append("## Overview")
|
|
79
|
+
lines.append(
|
|
80
|
+
f"- **Bundle tool:** pybundle {getattr(ctx, 'version', '<unknown>')}"
|
|
81
|
+
)
|
|
82
|
+
lines.append(f"- **Profile:** {profile}")
|
|
83
|
+
lines.append(f"- **Created (UTC):** {created_utc}")
|
|
84
|
+
lines.append(f"- **Project:** {project}")
|
|
85
|
+
lines.append(f"- **Root:** {root}")
|
|
86
|
+
lines.append(f"- **Workdir:** {workdir}")
|
|
87
|
+
lines.append("")
|
|
88
|
+
lines.append("## System")
|
|
89
|
+
lines.append(f"- **OS:** {uname}")
|
|
90
|
+
lines.append(f"- **Python:** {pyver}")
|
|
91
|
+
lines.append(f"- **Redaction:** {redact_status}")
|
|
92
|
+
lines.append("")
|
|
93
|
+
lines.append("## At a glance")
|
|
94
|
+
|
|
95
|
+
lines.append("## AI context summary")
|
|
96
|
+
|
|
97
|
+
copy_manifest = _safe_read(
|
|
98
|
+
workdir_path / "meta" / "50_copy_manifest.txt"
|
|
99
|
+
).strip()
|
|
100
|
+
if copy_manifest:
|
|
101
|
+
lines.append("### Curated copy")
|
|
102
|
+
lines.append("```")
|
|
103
|
+
lines.append(copy_manifest)
|
|
104
|
+
lines.append("```")
|
|
105
|
+
else:
|
|
106
|
+
lines.append("- Curated copy manifest not found.")
|
|
107
|
+
|
|
108
|
+
roadmap_json = _safe_read(workdir_path / "meta" / "70_roadmap.json").strip()
|
|
109
|
+
if roadmap_json:
|
|
110
|
+
try:
|
|
111
|
+
import json
|
|
112
|
+
|
|
113
|
+
rj = json.loads(roadmap_json)
|
|
114
|
+
langs = set()
|
|
115
|
+
for n in rj.get("nodes", []):
|
|
116
|
+
if isinstance(n, dict):
|
|
117
|
+
lang = n.get("lang")
|
|
118
|
+
if lang:
|
|
119
|
+
langs.add(lang)
|
|
120
|
+
eps = rj.get("entrypoints", []) or []
|
|
121
|
+
lines.append(
|
|
122
|
+
f"- **Languages detected:** {', '.join(sorted(langs)) if langs else '(none)'}"
|
|
123
|
+
)
|
|
124
|
+
if eps:
|
|
125
|
+
lines.append("- **Entrypoints:**")
|
|
126
|
+
for ep in eps[:10]:
|
|
127
|
+
node = ep.get("node") if isinstance(ep, dict) else None
|
|
128
|
+
reason = ep.get("reason") if isinstance(ep, dict) else None
|
|
129
|
+
conf = ep.get("confidence") if isinstance(ep, dict) else None
|
|
130
|
+
if node:
|
|
131
|
+
extra = ""
|
|
132
|
+
if reason is not None and conf is not None:
|
|
133
|
+
extra = f" — {reason} ({conf}/3)"
|
|
134
|
+
lines.append(f" - `{node}`{extra}")
|
|
135
|
+
else:
|
|
136
|
+
lines.append("- **Entrypoints:** (none detected)")
|
|
137
|
+
except Exception:
|
|
138
|
+
lines.append("- Roadmap JSON present but could not be parsed.")
|
|
139
|
+
else:
|
|
140
|
+
lines.append("- Roadmap not found.")
|
|
141
|
+
|
|
142
|
+
lines.append("")
|
|
143
|
+
|
|
144
|
+
lines.append(f"- **Overall status:** {overall}")
|
|
145
|
+
lines.append(
|
|
146
|
+
f"- **Steps:** {total_n} total — {pass_n} PASS, {fail_n} FAIL, {skip_n} SKIP"
|
|
147
|
+
)
|
|
148
|
+
lines.append("")
|
|
149
|
+
lines.append("## Tools")
|
|
150
|
+
lines.extend(tools_table)
|
|
151
|
+
lines.append("")
|
|
152
|
+
lines.append("## Command used")
|
|
153
|
+
lines.append("```bash")
|
|
154
|
+
lines.append(command_used)
|
|
155
|
+
lines.append("```")
|
|
156
|
+
lines.append("")
|
|
157
|
+
lines.append("## Reproduction")
|
|
158
|
+
lines.append("See **REPRO.md** for step-by-step reproduction instructions.")
|
|
159
|
+
lines.append("")
|
|
160
|
+
|
|
161
|
+
out_path = workdir_path / "HANDOFF.md"
|
|
162
|
+
out_path.write_text("\n".join(lines), encoding="utf-8")
|
|
163
|
+
|
|
164
|
+
secs = int(time.time() - start)
|
|
165
|
+
return StepResult(
|
|
166
|
+
name=self.name, status="PASS", seconds=secs, note="wrote HANDOFF.md"
|
|
167
|
+
)
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Import time analysis - Milestone 3 (v1.4.0)
|
|
3
|
+
"""
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import subprocess
|
|
7
|
+
import time
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
from .base import StepResult
|
|
12
|
+
from ..context import BundleContext
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ImportTimeStep:
|
|
17
|
+
"""
|
|
18
|
+
Analyze Python import times using -X importtime to identify slow imports.
|
|
19
|
+
|
|
20
|
+
Outputs:
|
|
21
|
+
- logs/61_import_time.txt: Ranked list of slowest imports
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
name: str = "import_time"
|
|
25
|
+
|
|
26
|
+
def run(self, ctx: BundleContext) -> StepResult:
|
|
27
|
+
start = time.time()
|
|
28
|
+
|
|
29
|
+
if ctx.options.no_profile:
|
|
30
|
+
return StepResult(self.name, "SKIP", 0, "profiling disabled")
|
|
31
|
+
|
|
32
|
+
# Find entry point
|
|
33
|
+
entry_point = self._find_entry_point(ctx)
|
|
34
|
+
|
|
35
|
+
if not entry_point:
|
|
36
|
+
return StepResult(self.name, "SKIP", 0, "no suitable entry point found")
|
|
37
|
+
|
|
38
|
+
ctx.emit(f" Analyzing import time for {entry_point.name}")
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
# Run with -X importtime
|
|
42
|
+
result = subprocess.run(
|
|
43
|
+
[
|
|
44
|
+
str(ctx.tools.python),
|
|
45
|
+
"-X", "importtime",
|
|
46
|
+
"-c", f"import runpy; runpy.run_path('{entry_point}')"
|
|
47
|
+
],
|
|
48
|
+
cwd=ctx.root,
|
|
49
|
+
capture_output=True,
|
|
50
|
+
text=True,
|
|
51
|
+
timeout=60
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Parse and rank import times
|
|
55
|
+
self._generate_report(result.stderr, ctx.workdir) # importtime outputs to stderr
|
|
56
|
+
|
|
57
|
+
elapsed = int((time.time() - start) * 1000)
|
|
58
|
+
return StepResult(self.name, "OK", elapsed)
|
|
59
|
+
|
|
60
|
+
except subprocess.TimeoutExpired:
|
|
61
|
+
elapsed = int((time.time() - start) * 1000)
|
|
62
|
+
return StepResult(self.name, "FAIL", elapsed, "timeout")
|
|
63
|
+
except Exception as e:
|
|
64
|
+
elapsed = int((time.time() - start) * 1000)
|
|
65
|
+
return StepResult(self.name, "FAIL", elapsed, str(e))
|
|
66
|
+
|
|
67
|
+
def _find_entry_point(self, ctx: BundleContext) -> Path | None:
|
|
68
|
+
"""Find the best entry point to analyze"""
|
|
69
|
+
if ctx.options.profile_entry_point:
|
|
70
|
+
ep = Path(ctx.options.profile_entry_point)
|
|
71
|
+
if not ep.is_absolute():
|
|
72
|
+
ep = ctx.root / ctx.options.profile_entry_point
|
|
73
|
+
if ep.exists() and ep.is_file():
|
|
74
|
+
return ep
|
|
75
|
+
|
|
76
|
+
# Try package/__main__.py
|
|
77
|
+
pyproject = ctx.root / "pyproject.toml"
|
|
78
|
+
if pyproject.exists():
|
|
79
|
+
try:
|
|
80
|
+
import tomllib
|
|
81
|
+
with pyproject.open("rb") as f:
|
|
82
|
+
data = tomllib.load(f)
|
|
83
|
+
pkg_name = data.get("project", {}).get("name", "").replace("-", "_")
|
|
84
|
+
if pkg_name:
|
|
85
|
+
pkg_main = ctx.root / pkg_name / "__main__.py"
|
|
86
|
+
if pkg_main.exists():
|
|
87
|
+
return pkg_main
|
|
88
|
+
except Exception:
|
|
89
|
+
pass
|
|
90
|
+
|
|
91
|
+
# Try common entry points
|
|
92
|
+
for entry in ["__main__.py", "main.py", "app.py", "cli.py"]:
|
|
93
|
+
path = ctx.root / entry
|
|
94
|
+
if path.exists():
|
|
95
|
+
return path
|
|
96
|
+
|
|
97
|
+
return None
|
|
98
|
+
|
|
99
|
+
def _generate_report(self, importtime_output: str, workdir: Path) -> None:
|
|
100
|
+
"""Parse -X importtime output and generate ranked report"""
|
|
101
|
+
output_file = workdir / "logs" / "61_import_time.txt"
|
|
102
|
+
output_file.parent.mkdir(parents=True, exist_ok=True)
|
|
103
|
+
|
|
104
|
+
# Parse import times
|
|
105
|
+
# Format: "import time: self [us] | cumulative | imported package"
|
|
106
|
+
imports = []
|
|
107
|
+
for line in importtime_output.splitlines():
|
|
108
|
+
if "import time:" in line:
|
|
109
|
+
parts = line.split("|")
|
|
110
|
+
if len(parts) >= 3:
|
|
111
|
+
try:
|
|
112
|
+
# Extract times
|
|
113
|
+
time_part = parts[0].split(":")[-1].strip()
|
|
114
|
+
self_time = int(time_part.split()[0])
|
|
115
|
+
cumulative = int(parts[1].strip())
|
|
116
|
+
module = parts[2].strip()
|
|
117
|
+
imports.append((cumulative, self_time, module))
|
|
118
|
+
except (ValueError, IndexError):
|
|
119
|
+
continue
|
|
120
|
+
|
|
121
|
+
# Sort by cumulative time (descending)
|
|
122
|
+
imports.sort(reverse=True)
|
|
123
|
+
|
|
124
|
+
with output_file.open("w") as f:
|
|
125
|
+
f.write("=" * 70 + "\n")
|
|
126
|
+
f.write("IMPORT TIME ANALYSIS\n")
|
|
127
|
+
f.write("=" * 70 + "\n\n")
|
|
128
|
+
|
|
129
|
+
if not imports:
|
|
130
|
+
f.write("No import time data collected.\n")
|
|
131
|
+
f.write("\nRaw output:\n")
|
|
132
|
+
f.write(importtime_output)
|
|
133
|
+
return
|
|
134
|
+
|
|
135
|
+
# Calculate total
|
|
136
|
+
total_time = sum(imp[0] for imp in imports[:1]) # Top-level cumulative
|
|
137
|
+
|
|
138
|
+
f.write(f"Total import time: {total_time / 1000:.1f} ms\n")
|
|
139
|
+
f.write(f"Number of imports analyzed: {len(imports)}\n\n")
|
|
140
|
+
|
|
141
|
+
f.write("TOP 30 SLOWEST IMPORTS (by cumulative time):\n")
|
|
142
|
+
f.write("-" * 70 + "\n")
|
|
143
|
+
f.write(f"{'Cumulative (ms)':<18} {'Self (ms)':<15} {'Module'}\n")
|
|
144
|
+
f.write("-" * 70 + "\n")
|
|
145
|
+
|
|
146
|
+
for cumulative, self_time, module in imports[:30]:
|
|
147
|
+
f.write(f"{cumulative / 1000:>15.1f} {self_time / 1000:>12.1f} {module}\n")
|
|
148
|
+
|
|
149
|
+
# Also show slowest by self time
|
|
150
|
+
imports_by_self = sorted(imports, key=lambda x: x[1], reverse=True)
|
|
151
|
+
|
|
152
|
+
f.write("\n" + "=" * 70 + "\n")
|
|
153
|
+
f.write("TOP 20 SLOWEST IMPORTS (by self time, excluding children):\n")
|
|
154
|
+
f.write("-" * 70 + "\n")
|
|
155
|
+
f.write(f"{'Self (ms)':<15} {'Cumulative (ms)':<18} {'Module'}\n")
|
|
156
|
+
f.write("-" * 70 + "\n")
|
|
157
|
+
|
|
158
|
+
for cumulative, self_time, module in imports_by_self[:20]:
|
|
159
|
+
f.write(f"{self_time / 1000:>12.1f} {cumulative / 1000:>15.1f} {module}\n")
|
|
160
|
+
|
|
161
|
+
f.write("\n" + "=" * 70 + "\n")
|
|
162
|
+
f.write("Recommendations:\n")
|
|
163
|
+
f.write("- Consider lazy imports for modules with high cumulative times\n")
|
|
164
|
+
f.write("- Review modules with high self times for optimization\n")
|
|
165
|
+
f.write("- Use conditional imports to defer loading when possible\n")
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import subprocess # nosec B404 - Required for tool execution, paths validated
|
|
4
|
+
import time
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from .base import StepResult
|
|
9
|
+
from ..context import BundleContext
|
|
10
|
+
from ..tools import which
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _repo_has_py_files(root: Path) -> bool:
|
|
14
|
+
"""Fast check if there are Python files to scan."""
|
|
15
|
+
for p in root.rglob("*.py"):
|
|
16
|
+
parts = set(p.parts)
|
|
17
|
+
if (
|
|
18
|
+
".venv" not in parts
|
|
19
|
+
and "__pycache__" not in parts
|
|
20
|
+
and "node_modules" not in parts
|
|
21
|
+
and "dist" not in parts
|
|
22
|
+
and "build" not in parts
|
|
23
|
+
and "artifacts" not in parts
|
|
24
|
+
):
|
|
25
|
+
return True
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class InterrogateStep:
|
|
31
|
+
name: str = "interrogate"
|
|
32
|
+
target: str = "."
|
|
33
|
+
outfile: str = "logs/52_docstring_coverage.txt"
|
|
34
|
+
|
|
35
|
+
def run(self, ctx: BundleContext) -> StepResult:
|
|
36
|
+
start = time.time()
|
|
37
|
+
out = ctx.workdir / self.outfile
|
|
38
|
+
out.parent.mkdir(parents=True, exist_ok=True)
|
|
39
|
+
|
|
40
|
+
interrogate = which("interrogate")
|
|
41
|
+
if not interrogate:
|
|
42
|
+
out.write_text(
|
|
43
|
+
"interrogate not found; skipping (pip install interrogate)\n",
|
|
44
|
+
encoding="utf-8"
|
|
45
|
+
)
|
|
46
|
+
return StepResult(self.name, "SKIP", 0, "missing interrogate")
|
|
47
|
+
|
|
48
|
+
if not _repo_has_py_files(ctx.root):
|
|
49
|
+
out.write_text(
|
|
50
|
+
"no .py files detected; skipping interrogate\n",
|
|
51
|
+
encoding="utf-8"
|
|
52
|
+
)
|
|
53
|
+
return StepResult(self.name, "SKIP", 0, "no python files")
|
|
54
|
+
|
|
55
|
+
target_path = ctx.root / self.target
|
|
56
|
+
cmd = [
|
|
57
|
+
interrogate,
|
|
58
|
+
str(target_path),
|
|
59
|
+
"-v", # Verbose output
|
|
60
|
+
"--fail-under", "0", # Don't fail the step based on coverage percentage
|
|
61
|
+
"--color",
|
|
62
|
+
]
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
result = subprocess.run( # nosec B603 - Using full path from which()
|
|
66
|
+
cmd,
|
|
67
|
+
cwd=ctx.root,
|
|
68
|
+
stdout=subprocess.PIPE,
|
|
69
|
+
stderr=subprocess.STDOUT,
|
|
70
|
+
text=True,
|
|
71
|
+
timeout=120,
|
|
72
|
+
)
|
|
73
|
+
out.write_text(result.stdout, encoding="utf-8")
|
|
74
|
+
elapsed = int((time.time() - start) * 1000)
|
|
75
|
+
|
|
76
|
+
# interrogate returns 0 even with missing docstrings when --fail-under=0
|
|
77
|
+
# We consider any execution a success
|
|
78
|
+
return StepResult(self.name, "OK", elapsed, None)
|
|
79
|
+
except subprocess.TimeoutExpired:
|
|
80
|
+
out.write_text("interrogate timed out after 120s\n", encoding="utf-8")
|
|
81
|
+
return StepResult(self.name, "FAIL", 120000, "timeout")
|
|
82
|
+
except Exception as e:
|
|
83
|
+
out.write_text(f"interrogate error: {e}\n", encoding="utf-8")
|
|
84
|
+
return StepResult(self.name, "FAIL", 0, str(e))
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import subprocess # nosec B404 - Required for tool execution, paths validated
|
|
4
|
+
import time
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
|
|
7
|
+
from .base import StepResult
|
|
8
|
+
from ..context import BundleContext
|
|
9
|
+
from ..tools import which
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class LicenseScanStep:
|
|
14
|
+
name: str = "license scan"
|
|
15
|
+
outfile: str = "meta/32_licenses.txt"
|
|
16
|
+
|
|
17
|
+
def run(self, ctx: BundleContext) -> StepResult:
|
|
18
|
+
start = time.time()
|
|
19
|
+
out = ctx.workdir / self.outfile
|
|
20
|
+
out.parent.mkdir(parents=True, exist_ok=True)
|
|
21
|
+
|
|
22
|
+
pip_licenses = which("pip-licenses")
|
|
23
|
+
if not pip_licenses:
|
|
24
|
+
out.write_text(
|
|
25
|
+
"pip-licenses not found; skipping (pip install pip-licenses)\n",
|
|
26
|
+
encoding="utf-8"
|
|
27
|
+
)
|
|
28
|
+
return StepResult(self.name, "SKIP", 0, "missing pip-licenses")
|
|
29
|
+
|
|
30
|
+
# Run pip-licenses with detailed output
|
|
31
|
+
cmd = [
|
|
32
|
+
pip_licenses,
|
|
33
|
+
"--format=markdown", # Markdown table format
|
|
34
|
+
"--with-urls", # Include project URLs
|
|
35
|
+
"--with-description", # Include package descriptions
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
result = subprocess.run( # nosec B603 - Using full path from which()
|
|
40
|
+
cmd,
|
|
41
|
+
cwd=ctx.root,
|
|
42
|
+
stdout=subprocess.PIPE,
|
|
43
|
+
stderr=subprocess.PIPE,
|
|
44
|
+
text=True,
|
|
45
|
+
timeout=60,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
output = result.stdout
|
|
49
|
+
|
|
50
|
+
# Add license compatibility warnings
|
|
51
|
+
warnings = self._check_license_compatibility(output)
|
|
52
|
+
if warnings:
|
|
53
|
+
output += "\n\n" + "=" * 70 + "\n"
|
|
54
|
+
output += "LICENSE COMPATIBILITY WARNINGS\n"
|
|
55
|
+
output += "=" * 70 + "\n\n"
|
|
56
|
+
output += "\n".join(warnings)
|
|
57
|
+
|
|
58
|
+
out.write_text(output, encoding="utf-8")
|
|
59
|
+
elapsed = int((time.time() - start) * 1000)
|
|
60
|
+
|
|
61
|
+
return StepResult(self.name, "OK", elapsed, None)
|
|
62
|
+
except subprocess.TimeoutExpired:
|
|
63
|
+
out.write_text("pip-licenses timed out after 60s\n", encoding="utf-8")
|
|
64
|
+
return StepResult(self.name, "FAIL", 60000, "timeout")
|
|
65
|
+
except Exception as e:
|
|
66
|
+
out.write_text(f"pip-licenses error: {e}\n", encoding="utf-8")
|
|
67
|
+
return StepResult(self.name, "FAIL", 0, str(e))
|
|
68
|
+
|
|
69
|
+
def _check_license_compatibility(self, output: str) -> list[str]:
|
|
70
|
+
"""Check for common license compatibility issues."""
|
|
71
|
+
warnings = []
|
|
72
|
+
|
|
73
|
+
# Simple heuristic: look for GPL + permissive license mixing
|
|
74
|
+
has_gpl = any(gpl in output for gpl in ["GPL", "AGPL", "LGPL"])
|
|
75
|
+
has_mit = "MIT" in output
|
|
76
|
+
has_apache = "Apache" in output
|
|
77
|
+
has_bsd = "BSD" in output
|
|
78
|
+
|
|
79
|
+
if has_gpl and (has_mit or has_apache or has_bsd):
|
|
80
|
+
warnings.append(
|
|
81
|
+
"⚠️ Potential GPL compatibility issue detected:\n"
|
|
82
|
+
" - GPL/LGPL/AGPL licenses found alongside permissive licenses (MIT/Apache/BSD)\n"
|
|
83
|
+
" - Review GPL obligations if redistributing\n"
|
|
84
|
+
" - LGPL is generally compatible with permissive licenses\n"
|
|
85
|
+
" - Consult legal counsel for production use"
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
# Check for proprietary or unknown licenses
|
|
89
|
+
if "UNKNOWN" in output:
|
|
90
|
+
warnings.append(
|
|
91
|
+
"⚠️ Packages with UNKNOWN licenses detected:\n"
|
|
92
|
+
" - Review manually before distribution\n"
|
|
93
|
+
" - May indicate missing license metadata"
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
return warnings
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Line-by-line profiling with line_profiler - Milestone 3 (v1.4.0)
|
|
3
|
+
"""
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import subprocess
|
|
7
|
+
import time
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
from .base import StepResult
|
|
12
|
+
from ..context import BundleContext
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class LineProfilerStep:
|
|
17
|
+
"""
|
|
18
|
+
Line-by-line profiling using line_profiler (optional, requires manual annotation).
|
|
19
|
+
|
|
20
|
+
This step is disabled by default and requires:
|
|
21
|
+
1. line_profiler installed
|
|
22
|
+
2. Functions decorated with @profile or listed in config
|
|
23
|
+
|
|
24
|
+
Outputs:
|
|
25
|
+
- logs/63_line_profile.txt: Line-by-line execution times
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
name: str = "line_profiler"
|
|
29
|
+
|
|
30
|
+
def run(self, ctx: BundleContext) -> StepResult:
|
|
31
|
+
start = time.time()
|
|
32
|
+
|
|
33
|
+
# Only run if explicitly enabled
|
|
34
|
+
if ctx.options.no_profile or not ctx.options.enable_line_profiler:
|
|
35
|
+
return StepResult(self.name, "SKIP", 0, "line profiler not enabled")
|
|
36
|
+
|
|
37
|
+
# Check if line_profiler is installed
|
|
38
|
+
if not ctx.tools.line_profiler:
|
|
39
|
+
return StepResult(self.name, "SKIP", 0, "line_profiler not installed")
|
|
40
|
+
|
|
41
|
+
# Check if entry point exists
|
|
42
|
+
entry_point = ctx.options.profile_entry_point
|
|
43
|
+
if not entry_point:
|
|
44
|
+
return StepResult(self.name, "SKIP", 0, "no entry point specified")
|
|
45
|
+
|
|
46
|
+
target_path = Path(entry_point)
|
|
47
|
+
if not target_path.is_absolute():
|
|
48
|
+
target_path = ctx.root / entry_point
|
|
49
|
+
|
|
50
|
+
if not target_path.exists():
|
|
51
|
+
return StepResult(self.name, "SKIP", 0, f"entry point not found: {entry_point}")
|
|
52
|
+
|
|
53
|
+
ctx.emit(f" Running line profiler on {target_path.name}")
|
|
54
|
+
ctx.emit(" Note: Functions must be decorated with @profile")
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
# Run line_profiler via kernprof
|
|
58
|
+
result = subprocess.run(
|
|
59
|
+
[
|
|
60
|
+
str(ctx.tools.line_profiler),
|
|
61
|
+
"-l", # Line-by-line
|
|
62
|
+
"-v", # Verbose output
|
|
63
|
+
str(target_path)
|
|
64
|
+
],
|
|
65
|
+
cwd=ctx.root,
|
|
66
|
+
capture_output=True,
|
|
67
|
+
text=True,
|
|
68
|
+
timeout=300 # 5 minute timeout
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Write output
|
|
72
|
+
output_file = ctx.workdir / "logs" / "63_line_profile.txt"
|
|
73
|
+
output_file.parent.mkdir(parents=True, exist_ok=True)
|
|
74
|
+
|
|
75
|
+
with output_file.open("w") as f:
|
|
76
|
+
f.write("=" * 70 + "\n")
|
|
77
|
+
f.write("LINE-BY-LINE PROFILING (line_profiler)\n")
|
|
78
|
+
f.write("=" * 70 + "\n\n")
|
|
79
|
+
|
|
80
|
+
if result.returncode == 0:
|
|
81
|
+
f.write(result.stdout)
|
|
82
|
+
if result.stderr:
|
|
83
|
+
f.write("\n\nWarnings/Errors:\n")
|
|
84
|
+
f.write(result.stderr)
|
|
85
|
+
else:
|
|
86
|
+
f.write("Line profiling failed or no functions decorated with @profile\n\n")
|
|
87
|
+
f.write("To use line_profiler:\n")
|
|
88
|
+
f.write("1. Install: pip install line_profiler\n")
|
|
89
|
+
f.write("2. Decorate functions with @profile\n")
|
|
90
|
+
f.write("3. Specify entry point: --profile-entry-point path/to/script.py\n")
|
|
91
|
+
f.write("4. Enable: --enable-line-profiler\n\n")
|
|
92
|
+
f.write("STDOUT:\n")
|
|
93
|
+
f.write(result.stdout)
|
|
94
|
+
f.write("\n\nSTDERR:\n")
|
|
95
|
+
f.write(result.stderr)
|
|
96
|
+
|
|
97
|
+
elapsed = int((time.time() - start) * 1000)
|
|
98
|
+
if result.returncode == 0:
|
|
99
|
+
return StepResult(self.name, "OK", elapsed)
|
|
100
|
+
else:
|
|
101
|
+
return StepResult(self.name, "FAIL", elapsed, f"exit {result.returncode}")
|
|
102
|
+
|
|
103
|
+
except subprocess.TimeoutExpired:
|
|
104
|
+
elapsed = int((time.time() - start) * 1000)
|
|
105
|
+
return StepResult(self.name, "FAIL", elapsed, "timeout")
|
|
106
|
+
except Exception as e:
|
|
107
|
+
elapsed = int((time.time() - start) * 1000)
|
|
108
|
+
return StepResult(self.name, "FAIL", elapsed, str(e))
|