gwc-pybundle 1.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gwc-pybundle might be problematic. Click here for more details.

Files changed (55) hide show
  1. gwc_pybundle-1.4.5.dist-info/METADATA +876 -0
  2. gwc_pybundle-1.4.5.dist-info/RECORD +55 -0
  3. gwc_pybundle-1.4.5.dist-info/WHEEL +5 -0
  4. gwc_pybundle-1.4.5.dist-info/entry_points.txt +2 -0
  5. gwc_pybundle-1.4.5.dist-info/licenses/LICENSE.md +25 -0
  6. gwc_pybundle-1.4.5.dist-info/top_level.txt +1 -0
  7. pybundle/__init__.py +0 -0
  8. pybundle/__main__.py +4 -0
  9. pybundle/cli.py +365 -0
  10. pybundle/context.py +362 -0
  11. pybundle/doctor.py +148 -0
  12. pybundle/filters.py +178 -0
  13. pybundle/manifest.py +77 -0
  14. pybundle/packaging.py +45 -0
  15. pybundle/policy.py +132 -0
  16. pybundle/profiles.py +340 -0
  17. pybundle/roadmap_model.py +42 -0
  18. pybundle/roadmap_scan.py +295 -0
  19. pybundle/root_detect.py +14 -0
  20. pybundle/runner.py +163 -0
  21. pybundle/steps/__init__.py +26 -0
  22. pybundle/steps/bandit.py +72 -0
  23. pybundle/steps/base.py +20 -0
  24. pybundle/steps/compileall.py +76 -0
  25. pybundle/steps/context_expand.py +272 -0
  26. pybundle/steps/copy_pack.py +293 -0
  27. pybundle/steps/coverage.py +101 -0
  28. pybundle/steps/cprofile_step.py +155 -0
  29. pybundle/steps/dependency_sizes.py +120 -0
  30. pybundle/steps/duplication.py +94 -0
  31. pybundle/steps/error_refs.py +204 -0
  32. pybundle/steps/handoff_md.py +167 -0
  33. pybundle/steps/import_time.py +165 -0
  34. pybundle/steps/interrogate.py +84 -0
  35. pybundle/steps/license_scan.py +96 -0
  36. pybundle/steps/line_profiler.py +108 -0
  37. pybundle/steps/memory_profile.py +173 -0
  38. pybundle/steps/mutation_testing.py +136 -0
  39. pybundle/steps/mypy.py +60 -0
  40. pybundle/steps/pip_audit.py +45 -0
  41. pybundle/steps/pipdeptree.py +61 -0
  42. pybundle/steps/pylance.py +562 -0
  43. pybundle/steps/pytest.py +66 -0
  44. pybundle/steps/radon.py +121 -0
  45. pybundle/steps/repro_md.py +161 -0
  46. pybundle/steps/rg_scans.py +78 -0
  47. pybundle/steps/roadmap.py +153 -0
  48. pybundle/steps/ruff.py +111 -0
  49. pybundle/steps/shell.py +74 -0
  50. pybundle/steps/slow_tests.py +170 -0
  51. pybundle/steps/test_flakiness.py +172 -0
  52. pybundle/steps/tree.py +116 -0
  53. pybundle/steps/unused_deps.py +112 -0
  54. pybundle/steps/vulture.py +83 -0
  55. pybundle/tools.py +63 -0
@@ -0,0 +1,121 @@
1
+ from __future__ import annotations
2
+
3
+ import subprocess # nosec B404 - Required for tool execution, paths validated
4
+ import time
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ from .base import StepResult
9
+ from ..context import BundleContext
10
+ from ..tools import which
11
+
12
+
13
+ def _repo_has_py_files(root: Path) -> bool:
14
+ """Fast check if there are Python files to scan."""
15
+ for p in root.rglob("*.py"):
16
+ parts = set(p.parts)
17
+ if (
18
+ ".venv" not in parts
19
+ and "__pycache__" not in parts
20
+ and "node_modules" not in parts
21
+ and "dist" not in parts
22
+ and "build" not in parts
23
+ and "artifacts" not in parts
24
+ ):
25
+ return True
26
+ return False
27
+
28
+
29
+ @dataclass
30
+ class RadonStep:
31
+ name: str = "radon"
32
+ target: str = "."
33
+ outfile: str = "logs/51_radon_complexity.txt"
34
+
35
+ def run(self, ctx: BundleContext) -> StepResult:
36
+ start = time.time()
37
+ out = ctx.workdir / self.outfile
38
+ out.parent.mkdir(parents=True, exist_ok=True)
39
+
40
+ radon = which("radon")
41
+ if not radon:
42
+ out.write_text(
43
+ "radon not found; skipping (pip install radon)\n", encoding="utf-8"
44
+ )
45
+ return StepResult(self.name, "SKIP", 0, "missing radon")
46
+
47
+ if not _repo_has_py_files(ctx.root):
48
+ out.write_text("no .py files detected; skipping radon\n", encoding="utf-8")
49
+ return StepResult(self.name, "SKIP", 0, "no python files")
50
+
51
+ target_path = ctx.root / self.target
52
+
53
+ # Run cyclomatic complexity check
54
+ cmd_cc = [
55
+ radon,
56
+ "cc",
57
+ str(target_path),
58
+ "-s", # Show complexity score
59
+ "-a", # Average complexity
60
+ "-nc", # No color
61
+ ]
62
+
63
+ # Run maintainability index check
64
+ cmd_mi = [
65
+ radon,
66
+ "mi",
67
+ str(target_path),
68
+ "-s", # Show maintainability index
69
+ "-nc", # No color
70
+ ]
71
+
72
+ try:
73
+ # Collect both metrics in one output file
74
+ with out.open("w", encoding="utf-8") as f:
75
+ f.write("=" * 70 + "\n")
76
+ f.write("CYCLOMATIC COMPLEXITY\n")
77
+ f.write("=" * 70 + "\n\n")
78
+
79
+ result_cc = subprocess.run( # nosec B603 - Using full path from which()
80
+ cmd_cc,
81
+ cwd=ctx.root,
82
+ stdout=subprocess.PIPE,
83
+ stderr=subprocess.STDOUT,
84
+ text=True,
85
+ timeout=120,
86
+ )
87
+ f.write(result_cc.stdout)
88
+
89
+ f.write("\n\n")
90
+ f.write("=" * 70 + "\n")
91
+ f.write("MAINTAINABILITY INDEX\n")
92
+ f.write("=" * 70 + "\n\n")
93
+
94
+ result_mi = subprocess.run( # nosec B603 - Using full path from which()
95
+ cmd_mi,
96
+ cwd=ctx.root,
97
+ stdout=subprocess.PIPE,
98
+ stderr=subprocess.STDOUT,
99
+ text=True,
100
+ timeout=120,
101
+ )
102
+ f.write(result_mi.stdout)
103
+
104
+ elapsed = int((time.time() - start) * 1000)
105
+
106
+ # Radon returns 0 on success
107
+ if result_cc.returncode == 0 and result_mi.returncode == 0:
108
+ return StepResult(self.name, "OK", elapsed, None)
109
+ else:
110
+ return StepResult(
111
+ self.name,
112
+ "FAIL",
113
+ elapsed,
114
+ f"exit cc:{result_cc.returncode} mi:{result_mi.returncode}"
115
+ )
116
+ except subprocess.TimeoutExpired:
117
+ out.write_text("radon timed out after 120s\n", encoding="utf-8")
118
+ return StepResult(self.name, "FAIL", 120000, "timeout")
119
+ except Exception as e:
120
+ out.write_text(f"radon error: {e}\n", encoding="utf-8")
121
+ return StepResult(self.name, "FAIL", 0, str(e))
@@ -0,0 +1,161 @@
1
+ from __future__ import annotations
2
+
3
+ import platform
4
+ import sys
5
+ import time
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+
9
+ from .base import StepResult
10
+ from ..context import BundleContext
11
+ from ..tools import which
12
+
13
+
14
+ @dataclass
15
+ class ReproMarkdownStep:
16
+ name: str = "generate REPRO.md"
17
+ outfile: str = "REPRO.md"
18
+
19
+ def run(self, ctx: BundleContext) -> StepResult:
20
+ start = time.time()
21
+ repro = ctx.workdir / self.outfile
22
+
23
+ # ---- tool detection ----
24
+ tool_names = [
25
+ "python",
26
+ "pip",
27
+ "git",
28
+ "ruff",
29
+ "mypy",
30
+ "pytest",
31
+ "rg",
32
+ "zip",
33
+ "tar",
34
+ ]
35
+ detected = {t: which(t) for t in tool_names}
36
+
37
+ # Prefer ctx.tools.python if you have it
38
+ if getattr(ctx, "tools", None) and getattr(ctx.tools, "python", None):
39
+ detected["python"] = ctx.tools.python
40
+
41
+ # ---- file inventory (what actually exists) ----
42
+ def list_txt(dirpath: Path) -> list[str]:
43
+ if not dirpath.is_dir():
44
+ return []
45
+ return sorted(
46
+ str(p.relative_to(ctx.workdir)) for p in dirpath.rglob("*.txt")
47
+ )
48
+
49
+ logs_list = list_txt(ctx.logdir)
50
+ meta_list = list_txt(ctx.metadir)
51
+
52
+ # Also include key top-level files if present
53
+ top_files = []
54
+ for name in [
55
+ "RUN_LOG.txt",
56
+ "SUMMARY.json",
57
+ "error_files_from_logs.txt",
58
+ "error_refs_count.txt",
59
+ ]:
60
+ p = ctx.workdir / name
61
+ if p.exists():
62
+ top_files.append(name)
63
+
64
+ # ---- step summary (best-effort, never crash) ----
65
+ results = getattr(ctx, "results", [])
66
+ ctx.results = results # ensure it's set for future steps
67
+
68
+ summary_lines = []
69
+ for r in results:
70
+ note = f" ({r.note})" if getattr(r, "note", "") else ""
71
+ summary_lines.append(f"- **{r.name}**: {r.status}{note}")
72
+
73
+ # ---- environment ----
74
+ pyver = sys.version.split()[0]
75
+ plat = platform.platform()
76
+ profile = ctx.profile_name
77
+ utc_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
78
+
79
+ # ---- build markdown ----
80
+ def fmt_tool(t: str) -> str:
81
+ path = detected.get(t)
82
+ return f"- `{t}`: ✅ `{path}`" if path else f"- `{t}`: ❌ not found"
83
+
84
+ md = []
85
+ md += ["# Reproduction Guide", ""]
86
+ md += [
87
+ "This bundle captures diagnostic outputs and the minimum relevant project context",
88
+ "to reproduce issues reliably on another system.",
89
+ "",
90
+ "## Overview",
91
+ f"- Profile: `{profile}`",
92
+ f"- Generated (UTC): `{utc_now}`",
93
+ f"- Project root: `{ctx.root}`",
94
+ "",
95
+ "## Environment Snapshot",
96
+ f"- OS: `{plat}`",
97
+ f"- Python: `{pyver}`",
98
+ "",
99
+ "## Tools Detected",
100
+ *[fmt_tool(t) for t in tool_names],
101
+ "",
102
+ ]
103
+
104
+ if summary_lines:
105
+ md += ["## Steps Executed", *summary_lines, ""]
106
+
107
+ md += [
108
+ "## How to Reproduce",
109
+ "",
110
+ "From the project root:",
111
+ "",
112
+ "```bash",
113
+ f"python -m pybundle run {profile}",
114
+ "```",
115
+ "",
116
+ "Re-run individual tools (if installed):",
117
+ "",
118
+ "```bash",
119
+ "python -m compileall .",
120
+ "ruff check .",
121
+ "ruff format --check .",
122
+ "mypy .",
123
+ "pytest -q",
124
+ "```",
125
+ "",
126
+ "## Produced Artifacts",
127
+ "",
128
+ ]
129
+
130
+ if top_files:
131
+ md += ["### Top-level", *[f"- `{p}`" for p in top_files], ""]
132
+
133
+ md += (
134
+ ["### logs/", *(f"- `{p}`" for p in logs_list)]
135
+ if logs_list
136
+ else ["### logs/", "- (none)", ""]
137
+ )
138
+ md += (
139
+ ["", "### meta/", *(f"- `{p}`" for p in meta_list)]
140
+ if meta_list
141
+ else ["", "### meta/", "- (none)"]
142
+ )
143
+
144
+ md += [
145
+ "",
146
+ "## Context Packs",
147
+ "",
148
+ "- `src/_error_refs/` – files directly referenced by tool output",
149
+ "- `src/_error_context/` – related imports + pytest glue (conftest/__init__) + configs",
150
+ "",
151
+ "## Notes",
152
+ "",
153
+ "- Non-zero exits from linters/tests are recorded for diagnosis; bundle creation continues.",
154
+ "- Missing tools typically produce SKIP logs rather than failing the bundle.",
155
+ "",
156
+ ]
157
+
158
+ repro.write_text("\n".join(md) + "\n", encoding="utf-8")
159
+
160
+ dur = int(time.time() - start)
161
+ return StepResult(self.name, "PASS", dur, "")
@@ -0,0 +1,78 @@
1
+ from __future__ import annotations
2
+
3
+ import subprocess # nosec B404 - Required for tool execution, paths validated
4
+ import time
5
+ from dataclasses import dataclass
6
+
7
+ from .base import StepResult
8
+ from ..context import BundleContext
9
+ from ..tools import which
10
+
11
+
12
+ @dataclass
13
+ class RipgrepScanStep:
14
+ name: str
15
+ pattern: str
16
+ outfile: str
17
+ target: str = "." # directory or file
18
+ extra_args: list[str] | None = None
19
+
20
+ def run(self, ctx: BundleContext) -> StepResult:
21
+ start = time.time()
22
+ out = ctx.workdir / self.outfile
23
+ out.parent.mkdir(parents=True, exist_ok=True)
24
+
25
+ rg = which("rg")
26
+ if not rg:
27
+ out.write_text(
28
+ "rg (ripgrep) not found; skipping (install ripgrep)\n", encoding="utf-8"
29
+ )
30
+ return StepResult(self.name, "SKIP", 0, "missing rg")
31
+
32
+ args = self.extra_args or []
33
+ # -n line numbers, --no-heading keeps it grep-like, -S smart case can be handy
34
+ cmd = [rg, "-n", "--no-heading", "-S", *args, self.pattern, self.target]
35
+ header = f"## PWD: {ctx.root}\n## CMD: {' '.join(cmd)}\n\n"
36
+
37
+ cp = subprocess.run( # nosec B603
38
+ cmd, cwd=str(ctx.root), text=True, capture_output=True, check=False
39
+ )
40
+ # rg exit codes:
41
+ # 0 = matches found
42
+ # 1 = no matches found (not an error!)
43
+ # 2 = actual error
44
+ text = header + (cp.stdout or "") + ("\n" + cp.stderr if cp.stderr else "")
45
+ out.write_text(ctx.redact_text(text), encoding="utf-8")
46
+
47
+ dur = int(time.time() - start)
48
+ note = ""
49
+ if cp.returncode == 2:
50
+ note = "rg error (exit=2) recorded"
51
+ elif cp.returncode == 1:
52
+ note = "no matches"
53
+
54
+ # Always PASS; we’re collecting info, not enforcing policy (yet).
55
+ return StepResult(self.name, "PASS", dur, note)
56
+
57
+
58
+ def default_rg_steps(target: str = ".") -> list[RipgrepScanStep]:
59
+ return [
60
+ RipgrepScanStep(
61
+ name="rg TODO/FIXME/HACK",
62
+ pattern=r"TODO|FIXME|HACK",
63
+ outfile="logs/40_rg_todos.txt",
64
+ target=target,
65
+ ),
66
+ RipgrepScanStep(
67
+ name="rg print(",
68
+ pattern=r"^\s*print\(",
69
+ outfile="logs/41_rg_prints.txt",
70
+ target=target,
71
+ ),
72
+ RipgrepScanStep(
73
+ name="rg except patterns",
74
+ pattern=r"except\s+Exception|except\s*:",
75
+ outfile="logs/42_rg_bare_excepts.txt",
76
+ target=target,
77
+ ),
78
+ ]
@@ -0,0 +1,153 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import time
5
+ from dataclasses import dataclass
6
+ from typing import Any, Protocol
7
+
8
+ from .base import StepResult
9
+ from ..context import BundleContext
10
+ from ..policy import AIContextPolicy
11
+ from ..roadmap_scan import build_roadmap # keep only if you actually use it
12
+
13
+
14
+ class RoadmapGraph(Protocol):
15
+ entrypoints: list[Any]
16
+ nodes: list[Any]
17
+ edges: list[Any]
18
+ stats: dict[str, Any]
19
+
20
+
21
+ @dataclass
22
+ class RoadmapStep:
23
+ name: str = "roadmap (project map)"
24
+ out_md: str = "meta/70_roadmap.md"
25
+ out_json: str = "meta/70_roadmap.json"
26
+ include: list[str] | None = None
27
+ policy: AIContextPolicy | None = None
28
+
29
+ def run(self, ctx: BundleContext) -> StepResult:
30
+ start = time.time()
31
+
32
+ policy = self.policy or AIContextPolicy()
33
+
34
+ # Include dirs: explicit override wins; otherwise policy candidates (with fallback)
35
+ if self.include:
36
+ include_dirs = [
37
+ ctx.root / p for p in self.include if (ctx.root / p).exists()
38
+ ]
39
+ if not include_dirs:
40
+ include_dirs = [ctx.root]
41
+ else:
42
+ include_dirs = policy.include_dir_candidates(
43
+ ctx.root
44
+ ) # includes fallback to [root]
45
+
46
+ exclude_dirs = set(policy.exclude_dirs)
47
+
48
+ graph = build_roadmap(
49
+ root=ctx.root,
50
+ include_dirs=include_dirs,
51
+ exclude_dirs=exclude_dirs,
52
+ max_files=policy.roadmap_max_files,
53
+ )
54
+
55
+ # JSON
56
+ out_json_path = ctx.workdir / self.out_json
57
+ out_json_path.parent.mkdir(parents=True, exist_ok=True)
58
+ out_json_path.write_text(
59
+ json.dumps(graph.to_dict(), indent=2), encoding="utf-8"
60
+ )
61
+
62
+ # Markdown (policy-driven Mermaid knobs)
63
+ out_md_path = ctx.workdir / self.out_md
64
+ out_md_path.parent.mkdir(parents=True, exist_ok=True)
65
+ out_md_path.write_text(self._render_md(graph, policy), encoding="utf-8")
66
+
67
+ langs = sorted({n.lang for n in graph.nodes if getattr(n, "lang", None)})
68
+ summary = {
69
+ "languages": langs,
70
+ "entrypoints": [ep.node for ep in graph.entrypoints[:50]],
71
+ "stats": graph.stats,
72
+ }
73
+ (ctx.workdir / "meta" / "71_roadmap_summary.json").write_text(
74
+ json.dumps(summary, indent=2), encoding="utf-8"
75
+ )
76
+
77
+ dur = int(time.time() - start)
78
+ note = f"nodes={len(graph.nodes)} edges={len(graph.edges)} entrypoints={len(graph.entrypoints)}"
79
+ return StepResult(self.name, "PASS", dur, note)
80
+
81
+ def _render_md(self, graph: RoadmapGraph, policy: AIContextPolicy) -> str:
82
+ depth = policy.roadmap_mermaid_depth
83
+ max_edges = policy.roadmap_mermaid_max_edges
84
+
85
+ lines: list[str] = []
86
+ lines.append("# Project Roadmap")
87
+ lines.append("")
88
+ lines.append("## Entrypoints")
89
+ if not graph.entrypoints:
90
+ lines.append("- (none detected)")
91
+ else:
92
+ for ep in graph.entrypoints[:50]:
93
+ lines.append(
94
+ f"- `{ep.node}` — {ep.reason} (confidence {ep.confidence}/3)"
95
+ )
96
+ lines.append("")
97
+ lines.append("## High-level map")
98
+ lines.append("```mermaid")
99
+ lines.append("flowchart LR")
100
+ lines.extend(
101
+ self._render_mermaid_bfs(graph, max_depth=depth, max_edges=max_edges)
102
+ )
103
+ lines.append("```")
104
+ lines.append("")
105
+ lines.append("## Stats")
106
+ for k in sorted(graph.stats.keys()):
107
+ lines.append(f"- **{k}**: {graph.stats[k]}")
108
+ lines.append("")
109
+ lines.append("## Notes")
110
+ lines.append(
111
+ "- Destinations like `py:...`, `js:...`, `rs:...` are dependency specs (not resolved to paths yet)."
112
+ )
113
+ lines.append(
114
+ "- This is designed to be deterministic and readable, not a perfect compiler-grade call graph."
115
+ )
116
+ lines.append("")
117
+ return "\n".join(lines)
118
+
119
+ def _render_mermaid_bfs(
120
+ self, graph: RoadmapGraph, max_depth: int = 2, max_edges: int = 180
121
+ ) -> list[str]:
122
+ from collections import deque
123
+
124
+ adj: dict[str, list[str]] = {}
125
+ for e in graph.edges:
126
+ adj.setdefault(e.src, []).append(e.dst)
127
+
128
+ entry = [ep.node for ep in graph.entrypoints]
129
+ if not entry:
130
+ return [' A["(no entrypoints)"]']
131
+
132
+ q = deque([(n, 0) for n in entry])
133
+ seen_edges: set[tuple[str, str]] = set()
134
+ shown: list[str] = []
135
+ seen_nodes: set[str] = set(entry)
136
+
137
+ while q and len(shown) < max_edges:
138
+ node, depth = q.popleft()
139
+ if depth >= max_depth:
140
+ continue
141
+ for dst in adj.get(node, []):
142
+ key = (node, dst)
143
+ if key in seen_edges:
144
+ continue
145
+ seen_edges.add(key)
146
+ shown.append(f' "{node}" --> "{dst}"')
147
+ if dst not in seen_nodes:
148
+ seen_nodes.add(dst)
149
+ q.append((dst, depth + 1))
150
+ if len(shown) >= max_edges:
151
+ break
152
+
153
+ return shown or [' A["(no edges rendered)"]']
pybundle/steps/ruff.py ADDED
@@ -0,0 +1,111 @@
1
+ from __future__ import annotations
2
+
3
+ import subprocess # nosec B404 - Required for tool execution, paths validated
4
+ import time
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ from .base import StepResult
9
+ from ..context import BundleContext
10
+ from ..tools import which
11
+
12
+
13
+ def _repo_has_py_files(root: Path) -> bool:
14
+ # Fast-ish heuristic: look for any .py file in top couple levels
15
+ # (Avoid walking deep trees; ruff itself can handle it.)
16
+ for p in root.rglob("*.py"):
17
+ # ignore common junk dirs
18
+ parts = set(p.parts)
19
+ if (
20
+ ".venv" in parts
21
+ or "__pycache__" in parts
22
+ or ".mypy_cache" in parts
23
+ or ".ruff_cache" in parts
24
+ ):
25
+ continue
26
+ if (
27
+ "node_modules" in parts
28
+ or "dist" in parts
29
+ or "build" in parts
30
+ or "artifacts" in parts
31
+ ):
32
+ continue
33
+ return True
34
+ return False
35
+
36
+
37
+ @dataclass
38
+ class RuffCheckStep:
39
+ name: str = "ruff check"
40
+ target: str = "."
41
+ outfile: str = "logs/31_ruff_check.txt"
42
+
43
+ def run(self, ctx: BundleContext) -> StepResult:
44
+ start = time.time()
45
+ out = ctx.workdir / self.outfile
46
+ out.parent.mkdir(parents=True, exist_ok=True)
47
+
48
+ ruff = which("ruff")
49
+ if not ruff:
50
+ out.write_text(
51
+ "ruff not found; skipping (pip install ruff)\n", encoding="utf-8"
52
+ )
53
+ return StepResult(self.name, "SKIP", 0, "missing ruff")
54
+
55
+ if not _repo_has_py_files(ctx.root):
56
+ out.write_text(
57
+ "no .py files detected; skipping ruff check\n", encoding="utf-8"
58
+ )
59
+ return StepResult(self.name, "SKIP", 0, "no python files")
60
+
61
+ cmd = [ruff, "check", self.target]
62
+ header = f"## PWD: {ctx.root}\n## CMD: {' '.join(cmd)}\n\n"
63
+
64
+ cp = subprocess.run( # nosec B603
65
+ cmd, cwd=str(ctx.root), text=True, capture_output=True, check=False
66
+ )
67
+ text = header + (cp.stdout or "") + ("\n" + cp.stderr if cp.stderr else "")
68
+ out.write_text(ctx.redact_text(text), encoding="utf-8")
69
+
70
+ dur = int(time.time() - start)
71
+ # ruff nonzero = lint failures; that’s *valuable*, but for bundling we record it.
72
+ note = "" if cp.returncode == 0 else f"exit={cp.returncode} (lint findings)"
73
+ return StepResult(self.name, "PASS", dur, note)
74
+
75
+
76
+ @dataclass
77
+ class RuffFormatCheckStep:
78
+ name: str = "ruff format --check"
79
+ target: str = "."
80
+ outfile: str = "logs/32_ruff_format_check.txt"
81
+
82
+ def run(self, ctx: BundleContext) -> StepResult:
83
+ start = time.time()
84
+ out = ctx.workdir / self.outfile
85
+ out.parent.mkdir(parents=True, exist_ok=True)
86
+
87
+ ruff = which("ruff")
88
+ if not ruff:
89
+ out.write_text(
90
+ "ruff not found; skipping (pip install ruff)\n", encoding="utf-8"
91
+ )
92
+ return StepResult(self.name, "SKIP", 0, "missing ruff")
93
+
94
+ if not _repo_has_py_files(ctx.root):
95
+ out.write_text(
96
+ "no .py files detected; skipping ruff format check\n", encoding="utf-8"
97
+ )
98
+ return StepResult(self.name, "SKIP", 0, "no python files")
99
+
100
+ cmd = [ruff, "format", "--check", self.target]
101
+ header = f"## PWD: {ctx.root}\n## CMD: {' '.join(cmd)}\n\n"
102
+
103
+ cp = subprocess.run( # nosec B603
104
+ cmd, cwd=str(ctx.root), text=True, capture_output=True, check=False
105
+ )
106
+ text = header + (cp.stdout or "") + ("\n" + cp.stderr if cp.stderr else "")
107
+ out.write_text(ctx.redact_text(text), encoding="utf-8")
108
+
109
+ dur = int(time.time() - start)
110
+ note = "" if cp.returncode == 0 else f"exit={cp.returncode} (format drift)"
111
+ return StepResult(self.name, "PASS", dur, note)
@@ -0,0 +1,74 @@
1
+ from __future__ import annotations
2
+
3
+ import subprocess # nosec B404 - Required for tool execution, paths validated
4
+ import time
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ from .base import StepResult
9
+ from ..context import BundleContext
10
+
11
+
12
+ @dataclass
13
+ class ShellStep:
14
+ name: str
15
+ outfile_rel: str
16
+ cmd: list[str]
17
+ cwd_is_root: bool = True
18
+ allow_fail: bool = True
19
+ require_cmd: str | None = None
20
+
21
+ @property
22
+ def out_rel(self) -> str:
23
+ return self.outfile_rel
24
+
25
+ def run(self, ctx: BundleContext) -> StepResult:
26
+ if self.require_cmd and not getattr(ctx.tools, self.require_cmd, None):
27
+ out = ctx.workdir / self.outfile_rel
28
+ out.parent.mkdir(parents=True, exist_ok=True)
29
+ out.write_text(
30
+ f"{self.require_cmd} not found; skipping\n", encoding="utf-8"
31
+ )
32
+ return StepResult(self.name, "SKIP", 0, f"missing {self.require_cmd}")
33
+
34
+ # Resolve command path if the first element matches a tool name
35
+ cmd = list(self.cmd)
36
+ if cmd and self.require_cmd:
37
+ tool_path = getattr(ctx.tools, self.require_cmd, None)
38
+ if tool_path and cmd[0] in [self.require_cmd, "python", "python3"]:
39
+ cmd[0] = tool_path
40
+
41
+ out = ctx.workdir / self.outfile_rel
42
+ out.parent.mkdir(parents=True, exist_ok=True)
43
+
44
+ start = time.time()
45
+ header = (
46
+ f"## PWD: {ctx.root if self.cwd_is_root else Path.cwd()}\n"
47
+ f"## CMD: {' '.join(cmd)}\n\n"
48
+ )
49
+
50
+ try:
51
+ cp = subprocess.run( # nosec B603
52
+ cmd,
53
+ cwd=str(ctx.root) if self.cwd_is_root else None,
54
+ text=True,
55
+ capture_output=True,
56
+ check=False,
57
+ )
58
+ text = header + (cp.stdout or "") + ("\n" + cp.stderr if cp.stderr else "")
59
+ out.write_text(ctx.redact_text(text), encoding="utf-8")
60
+ status = (
61
+ "PASS"
62
+ if cp.returncode == 0
63
+ else ("FAIL" if not self.allow_fail else "PASS")
64
+ )
65
+ note = "" if cp.returncode == 0 else f"exit={cp.returncode}"
66
+ except Exception as e:
67
+ out.write_text(
68
+ ctx.redact_text(header + f"\nEXCEPTION: {e}\n"), encoding="utf-8"
69
+ )
70
+ status = "FAIL" if not self.allow_fail else "PASS"
71
+ note = str(e)
72
+
73
+ dur = int(time.time() - start)
74
+ return StepResult(self.name, status, dur, note)