gwc-pybundle 2.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gwc-pybundle might be problematic. Click here for more details.

Files changed (82) hide show
  1. gwc_pybundle-2.1.2.dist-info/METADATA +903 -0
  2. gwc_pybundle-2.1.2.dist-info/RECORD +82 -0
  3. gwc_pybundle-2.1.2.dist-info/WHEEL +5 -0
  4. gwc_pybundle-2.1.2.dist-info/entry_points.txt +2 -0
  5. gwc_pybundle-2.1.2.dist-info/licenses/LICENSE.md +25 -0
  6. gwc_pybundle-2.1.2.dist-info/top_level.txt +1 -0
  7. pybundle/__init__.py +0 -0
  8. pybundle/__main__.py +4 -0
  9. pybundle/cli.py +546 -0
  10. pybundle/context.py +404 -0
  11. pybundle/doctor.py +148 -0
  12. pybundle/filters.py +228 -0
  13. pybundle/manifest.py +77 -0
  14. pybundle/packaging.py +45 -0
  15. pybundle/policy.py +132 -0
  16. pybundle/profiles.py +454 -0
  17. pybundle/roadmap_model.py +42 -0
  18. pybundle/roadmap_scan.py +328 -0
  19. pybundle/root_detect.py +14 -0
  20. pybundle/runner.py +180 -0
  21. pybundle/steps/__init__.py +26 -0
  22. pybundle/steps/ai_context.py +791 -0
  23. pybundle/steps/api_docs.py +219 -0
  24. pybundle/steps/asyncio_analysis.py +358 -0
  25. pybundle/steps/bandit.py +72 -0
  26. pybundle/steps/base.py +20 -0
  27. pybundle/steps/blocking_call_detection.py +291 -0
  28. pybundle/steps/call_graph.py +219 -0
  29. pybundle/steps/compileall.py +76 -0
  30. pybundle/steps/config_docs.py +319 -0
  31. pybundle/steps/config_validation.py +302 -0
  32. pybundle/steps/container_image.py +294 -0
  33. pybundle/steps/context_expand.py +272 -0
  34. pybundle/steps/copy_pack.py +293 -0
  35. pybundle/steps/coverage.py +101 -0
  36. pybundle/steps/cprofile_step.py +166 -0
  37. pybundle/steps/dependency_sizes.py +136 -0
  38. pybundle/steps/django_checks.py +214 -0
  39. pybundle/steps/dockerfile_lint.py +282 -0
  40. pybundle/steps/dockerignore.py +311 -0
  41. pybundle/steps/duplication.py +103 -0
  42. pybundle/steps/env_completeness.py +269 -0
  43. pybundle/steps/env_var_usage.py +253 -0
  44. pybundle/steps/error_refs.py +204 -0
  45. pybundle/steps/event_loop_patterns.py +280 -0
  46. pybundle/steps/exception_patterns.py +190 -0
  47. pybundle/steps/fastapi_integration.py +250 -0
  48. pybundle/steps/flask_debugging.py +312 -0
  49. pybundle/steps/git_analytics.py +315 -0
  50. pybundle/steps/handoff_md.py +176 -0
  51. pybundle/steps/import_time.py +175 -0
  52. pybundle/steps/interrogate.py +106 -0
  53. pybundle/steps/license_scan.py +96 -0
  54. pybundle/steps/line_profiler.py +117 -0
  55. pybundle/steps/link_validation.py +287 -0
  56. pybundle/steps/logging_analysis.py +233 -0
  57. pybundle/steps/memory_profile.py +176 -0
  58. pybundle/steps/migration_history.py +336 -0
  59. pybundle/steps/mutation_testing.py +141 -0
  60. pybundle/steps/mypy.py +103 -0
  61. pybundle/steps/orm_optimization.py +316 -0
  62. pybundle/steps/pip_audit.py +45 -0
  63. pybundle/steps/pipdeptree.py +62 -0
  64. pybundle/steps/pylance.py +562 -0
  65. pybundle/steps/pytest.py +66 -0
  66. pybundle/steps/query_pattern_analysis.py +334 -0
  67. pybundle/steps/radon.py +161 -0
  68. pybundle/steps/repro_md.py +161 -0
  69. pybundle/steps/rg_scans.py +78 -0
  70. pybundle/steps/roadmap.py +153 -0
  71. pybundle/steps/ruff.py +117 -0
  72. pybundle/steps/secrets_detection.py +235 -0
  73. pybundle/steps/security_headers.py +309 -0
  74. pybundle/steps/shell.py +74 -0
  75. pybundle/steps/slow_tests.py +178 -0
  76. pybundle/steps/sqlalchemy_validation.py +269 -0
  77. pybundle/steps/test_flakiness.py +184 -0
  78. pybundle/steps/tree.py +116 -0
  79. pybundle/steps/type_coverage.py +277 -0
  80. pybundle/steps/unused_deps.py +211 -0
  81. pybundle/steps/vulture.py +167 -0
  82. pybundle/tools.py +63 -0
@@ -0,0 +1,334 @@
1
+ """
2
+ Step: Query Pattern Analysis
3
+ Analyze database query patterns and detect performance issues.
4
+ """
5
+
6
+ import re
7
+ import ast
8
+ from pathlib import Path
9
+ from typing import Dict, List, Set, Tuple, Optional
10
+
11
+ from .base import Step, StepResult
12
+
13
+
14
+ class QueryPatternAnalysisStep(Step):
15
+ """Analyze database query patterns for performance issues."""
16
+
17
+ name = "query pattern analysis"
18
+
19
+ def run(self, ctx: "BundleContext") -> StepResult: # type: ignore[name-defined]
20
+ """Analyze query patterns in codebase."""
21
+ import time
22
+
23
+ start = time.time()
24
+
25
+ root = ctx.root
26
+
27
+ # Analyze query patterns
28
+ patterns = self._analyze_query_patterns(root)
29
+
30
+ # Generate report
31
+ lines = [
32
+ "=" * 80,
33
+ "QUERY PATTERN ANALYSIS REPORT",
34
+ "=" * 80,
35
+ "",
36
+ ]
37
+
38
+ # Summary
39
+ lines.extend(
40
+ [
41
+ "SUMMARY",
42
+ "=" * 80,
43
+ "",
44
+ f"ORM framework detected: {patterns['orm_type']}",
45
+ f"Model definitions found: {patterns['model_count']}",
46
+ f"Query patterns analyzed: {patterns['query_count']}",
47
+ "",
48
+ ]
49
+ )
50
+
51
+ if not patterns["orm_type"]:
52
+ lines.extend(
53
+ [
54
+ "⊘ No ORM detected",
55
+ "",
56
+ "This project does not appear to use an ORM.",
57
+ "If this is incorrect, ensure ORM imports are in analyzed files.",
58
+ "",
59
+ ]
60
+ )
61
+ else:
62
+ # ORM Details
63
+ lines.extend(
64
+ [
65
+ f"{patterns['orm_type'].upper()} ANALYSIS",
66
+ "=" * 80,
67
+ "",
68
+ ]
69
+ )
70
+
71
+ lines.append(f"Models/Entities found: {patterns['model_count']}")
72
+ if patterns["models"]:
73
+ for model in sorted(patterns["models"])[:15]:
74
+ lines.append(f" - {model}")
75
+ if len(patterns["models"]) > 15:
76
+ lines.append(f" ... and {len(patterns['models']) - 15} more")
77
+
78
+ lines.append("")
79
+
80
+ # N+1 Query Patterns
81
+ if patterns["suspected_n_plus_1"]:
82
+ lines.extend(
83
+ [
84
+ "SUSPECTED N+1 QUERY PATTERNS",
85
+ "=" * 80,
86
+ "",
87
+ ]
88
+ )
89
+
90
+ for issue in patterns["suspected_n_plus_1"][:15]:
91
+ lines.append(f"File: {issue['file']}")
92
+ lines.append(f"Line: {issue['line']}")
93
+ lines.append(f"Pattern: {issue['pattern']}")
94
+ if issue.get("context"):
95
+ context = issue["context"].strip()
96
+ if len(context) > 70:
97
+ context = context[:67] + "..."
98
+ lines.append(f"Context: {context}")
99
+ lines.append("")
100
+
101
+ if len(patterns["suspected_n_plus_1"]) > 15:
102
+ lines.append(
103
+ f"... and {len(patterns['suspected_n_plus_1']) - 15} more suspected N+1 patterns"
104
+ )
105
+ lines.append("")
106
+
107
+ else:
108
+ lines.append("✓ No obvious N+1 query patterns detected")
109
+ lines.append("")
110
+
111
+ # Lazy Loading Patterns
112
+ if patterns["lazy_loading"]:
113
+ lines.extend(
114
+ [
115
+ "LAZY LOADING PATTERNS (Potential Performance Issues)",
116
+ "=" * 80,
117
+ "",
118
+ ]
119
+ )
120
+
121
+ for issue in patterns["lazy_loading"][:10]:
122
+ lines.append(f"File: {issue['file']}")
123
+ lines.append(f"Line: {issue['line']}")
124
+ lines.append(f"Type: {issue['type']}")
125
+ lines.append("")
126
+
127
+ if len(patterns["lazy_loading"]) > 10:
128
+ lines.append(
129
+ f"... and {len(patterns['lazy_loading']) - 10} more lazy loading patterns"
130
+ )
131
+ lines.append("")
132
+
133
+ # Relationship Access
134
+ if patterns["relationship_access"]:
135
+ lines.extend(
136
+ [
137
+ "RELATIONSHIP ACCESS PATTERNS",
138
+ "=" * 80,
139
+ "",
140
+ ]
141
+ )
142
+
143
+ lines.append(
144
+ f"Foreign key accesses: {patterns['relationship_access'].get('foreign_keys', 0)}"
145
+ )
146
+ lines.append(
147
+ f"Many-to-many accesses: {patterns['relationship_access'].get('many_to_many', 0)}"
148
+ )
149
+ lines.append(
150
+ f"Reverse relationship accesses: {patterns['relationship_access'].get('reverse', 0)}"
151
+ )
152
+
153
+ lines.append("")
154
+
155
+ # Recommendations
156
+ lines.extend(
157
+ [
158
+ "=" * 80,
159
+ "RECOMMENDATIONS",
160
+ "=" * 80,
161
+ "",
162
+ ]
163
+ )
164
+
165
+ if patterns["orm_type"]:
166
+ if patterns["suspected_n_plus_1"]:
167
+ if patterns["orm_type"].lower() == "django":
168
+ lines.append(" N+1 Query Fixes (Django):")
169
+ lines.append(" - Use select_related() for ForeignKey relationships")
170
+ lines.append(" - Use prefetch_related() for ManyToMany and reverse ForeignKey")
171
+ lines.append("")
172
+ lines.append(" Example:")
173
+ lines.append(
174
+ " users = User.objects.select_related('profile').prefetch_related('posts')"
175
+ )
176
+
177
+ elif patterns["orm_type"].lower() == "sqlalchemy":
178
+ lines.append(" N+1 Query Fixes (SQLAlchemy):")
179
+ lines.append(" - Use joinedload() for eager loading")
180
+ lines.append(" - Use contains_eager() with joins")
181
+ lines.append(" - Use selectinload() for relationships")
182
+ lines.append("")
183
+ lines.append(" Example:")
184
+ lines.append(
185
+ " query.options(joinedload(User.posts)).all()"
186
+ )
187
+
188
+ lines.append("")
189
+
190
+ if patterns["lazy_loading"]:
191
+ lines.append(" Lazy Loading Best Practices:")
192
+ lines.append(" - Load related objects within query, not after retrieval")
193
+ lines.append(" - Use batch loading for sets of objects")
194
+ lines.append(" - Consider caching for frequently accessed relationships")
195
+ lines.append("")
196
+
197
+ lines.append(" General Recommendations:")
198
+ lines.append(" - Use database query profiling (Django Debug Toolbar, etc.)")
199
+ lines.append(" - Review query execution plans (EXPLAIN)")
200
+ lines.append(" - Add indexes to frequently filtered columns")
201
+ lines.append(" - Monitor query count and execution time")
202
+
203
+ else:
204
+ lines.append(" - No ORM detected; static query analysis not applicable")
205
+ lines.append(" - If using raw SQL, consider adopting an ORM for consistency")
206
+
207
+ lines.append("")
208
+
209
+ # Write report
210
+ output = "\n".join(lines)
211
+ dest = ctx.workdir / "logs" / "140_query_patterns.txt"
212
+ dest.parent.mkdir(parents=True, exist_ok=True)
213
+ dest.write_text(output, encoding="utf-8")
214
+
215
+ elapsed = int(time.time() - start)
216
+ return StepResult(self.name, "OK", elapsed, "")
217
+
218
+ def _analyze_query_patterns(self, root: Path) -> Dict:
219
+ """Analyze query patterns in codebase."""
220
+ orm_type = None
221
+ models = set()
222
+ model_count = 0
223
+ query_count = 0
224
+ suspected_n_plus_1 = []
225
+ lazy_loading = []
226
+ relationship_access = {"foreign_keys": 0, "many_to_many": 0, "reverse": 0}
227
+
228
+ python_files = list(root.rglob("*.py"))
229
+
230
+ for py_file in python_files:
231
+ if any(
232
+ part in py_file.parts
233
+ for part in ["venv", ".venv", "env", "__pycache__", "site-packages"]
234
+ ):
235
+ continue
236
+
237
+ try:
238
+ source = py_file.read_text(encoding="utf-8", errors="ignore")
239
+ rel_path = str(py_file.relative_to(root))
240
+
241
+ # Detect ORM
242
+ if "from django.db import models" in source or "from django.db.models" in source:
243
+ if not orm_type:
244
+ orm_type = "Django"
245
+ elif (
246
+ "from sqlalchemy" in source
247
+ and "declarative_base" in source
248
+ ):
249
+ if not orm_type:
250
+ orm_type = "SQLAlchemy"
251
+ elif "from tortoise import fields" in source:
252
+ if not orm_type:
253
+ orm_type = "Tortoise ORM"
254
+
255
+ # Count models
256
+ for line in source.split("\n"):
257
+ if re.search(r"class\s+(\w+)\s*\(.*Model.*\):", line):
258
+ match = re.search(r"class\s+(\w+)\s*\(", line)
259
+ if match:
260
+ model_name = match.group(1)
261
+ models.add(model_name)
262
+ model_count += 1
263
+
264
+ # Detect query patterns
265
+ for line_num, line in enumerate(source.split("\n"), 1):
266
+ # N+1 patterns
267
+ if re.search(
268
+ r"for\s+\w+\s+in\s+.*\.\s*(all|filter|get)\(\)",
269
+ line
270
+ ):
271
+ query_count += 1
272
+ suspected_n_plus_1.append(
273
+ {
274
+ "file": rel_path,
275
+ "line": line_num,
276
+ "pattern": "Loop with query",
277
+ "context": line,
278
+ }
279
+ )
280
+
281
+ # Django patterns
282
+ if "select_related" not in source and "for obj in" in line:
283
+ if ".objects.all()" in source or ".objects.filter" in source:
284
+ query_count += 1
285
+
286
+ # Lazy loading patterns (accessing attributes after query)
287
+ if (
288
+ re.search(r"\.[\w_]+\s*(?:$|#)", line)
289
+ and "select_related" not in line
290
+ and "prefetch" not in line
291
+ ):
292
+ # Potential lazy loading
293
+ if any(
294
+ kw in line
295
+ for kw in ["for ", r"\.all(", r"\.filter("]
296
+ ):
297
+ pass
298
+ else:
299
+ lazy_loading.append(
300
+ {
301
+ "file": rel_path,
302
+ "line": line_num,
303
+ "type": "Potential lazy loading",
304
+ }
305
+ )
306
+
307
+ # Relationship access patterns
308
+ if re.search(r"\.\w+_set\.", line): # Reverse FK Django
309
+ relationship_access["reverse"] += 1
310
+ elif re.search(r"\.objects\.through", line): # Many-to-many
311
+ relationship_access["many_to_many"] += 1
312
+ elif re.search(r"\..*_id\b", line): # FK access
313
+ relationship_access["foreign_keys"] += 1
314
+
315
+ except (OSError, UnicodeDecodeError, SyntaxError):
316
+ continue
317
+
318
+ # Deduplicate and limit
319
+ suspected_n_plus_1 = list(
320
+ {(item["file"], item["line"]): item for item in suspected_n_plus_1}.values()
321
+ )
322
+ lazy_loading = list(
323
+ {(item["file"], item["line"]): item for item in lazy_loading}.values()
324
+ )
325
+
326
+ return {
327
+ "orm_type": orm_type,
328
+ "models": models,
329
+ "model_count": model_count,
330
+ "query_count": query_count,
331
+ "suspected_n_plus_1": suspected_n_plus_1,
332
+ "lazy_loading": lazy_loading,
333
+ "relationship_access": relationship_access,
334
+ }
@@ -0,0 +1,161 @@
1
+ from __future__ import annotations
2
+
3
+ import subprocess # nosec B404 - Required for tool execution, paths validated
4
+ import time
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ from .base import StepResult
9
+ from ..context import BundleContext
10
+ from ..tools import which
11
+
12
+
13
+ def _repo_has_py_files(root: Path) -> bool:
14
+ """Fast check if there are Python files to scan."""
15
+ for p in root.rglob("*.py"):
16
+ parts = set(p.parts)
17
+ if (
18
+ ".venv" not in parts
19
+ and "__pycache__" not in parts
20
+ and "node_modules" not in parts
21
+ and "dist" not in parts
22
+ and "build" not in parts
23
+ and "artifacts" not in parts
24
+ ):
25
+ return True
26
+ return False
27
+
28
+
29
+ @dataclass
30
+ class RadonStep:
31
+ name: str = "radon"
32
+ target: str = "."
33
+ outfile: str = "logs/51_radon_complexity.txt"
34
+
35
+ def run(self, ctx: BundleContext) -> StepResult:
36
+ start = time.time()
37
+ out = ctx.workdir / self.outfile
38
+ out.parent.mkdir(parents=True, exist_ok=True)
39
+
40
+ radon = which("radon")
41
+ if not radon:
42
+ out.write_text(
43
+ "radon not found; skipping (pip install radon)\n", encoding="utf-8"
44
+ )
45
+ return StepResult(self.name, "SKIP", 0, "missing radon")
46
+
47
+ if not _repo_has_py_files(ctx.root):
48
+ out.write_text("no .py files detected; skipping radon\n", encoding="utf-8")
49
+ return StepResult(self.name, "SKIP", 0, "no python files")
50
+
51
+ target_path = ctx.root / self.target
52
+
53
+ # Build exclude patterns to avoid scanning artifacts, venvs, caches
54
+ # CRITICAL: Radon scans everything by default, including prior pybundle runs
55
+ # Use simple patterns without wildcards - radon's --exclude is finicky
56
+ excludes = [
57
+ # Artifacts from prior pybundle runs (CRITICAL - prevents duplicate reports)
58
+ "artifacts",
59
+ # Virtual environments (all common patterns)
60
+ ".venv",
61
+ "venv",
62
+ "env",
63
+ ".env",
64
+ ".freeze-venv",
65
+ ".pybundle-venv",
66
+ # Also catch custom venv names with glob patterns
67
+ "*-venv",
68
+ "*_venv",
69
+ ".gaslog-venv",
70
+ # Caches
71
+ "__pycache__",
72
+ ".mypy_cache",
73
+ ".pytest_cache",
74
+ ".ruff_cache",
75
+ ".tox",
76
+ ".nox",
77
+ # Build outputs
78
+ "node_modules",
79
+ "dist",
80
+ "build",
81
+ "target",
82
+ # Version control
83
+ ".git",
84
+ ]
85
+
86
+ # Radon --exclude takes comma-separated patterns
87
+ exclude_arg = ",".join(excludes)
88
+
89
+ # Run cyclomatic complexity check
90
+ cmd_cc = [
91
+ radon,
92
+ "cc",
93
+ str(target_path),
94
+ "-s", # Show complexity score
95
+ "-a", # Average complexity
96
+ "-nc", # No color
97
+ "--exclude",
98
+ exclude_arg,
99
+ ]
100
+
101
+ # Run maintainability index check
102
+ cmd_mi = [
103
+ radon,
104
+ "mi",
105
+ str(target_path),
106
+ "-s", # Show maintainability index
107
+ "-nc", # No color
108
+ "--exclude",
109
+ exclude_arg,
110
+ ]
111
+
112
+ try:
113
+ # Collect both metrics in one output file
114
+ with out.open("w", encoding="utf-8") as f:
115
+ f.write("=" * 70 + "\n")
116
+ f.write("CYCLOMATIC COMPLEXITY\n")
117
+ f.write("=" * 70 + "\n\n")
118
+
119
+ result_cc = subprocess.run( # nosec B603 - Using full path from which()
120
+ cmd_cc,
121
+ cwd=ctx.root,
122
+ stdout=subprocess.PIPE,
123
+ stderr=subprocess.STDOUT,
124
+ text=True,
125
+ timeout=120,
126
+ )
127
+ f.write(result_cc.stdout)
128
+
129
+ f.write("\n\n")
130
+ f.write("=" * 70 + "\n")
131
+ f.write("MAINTAINABILITY INDEX\n")
132
+ f.write("=" * 70 + "\n\n")
133
+
134
+ result_mi = subprocess.run( # nosec B603 - Using full path from which()
135
+ cmd_mi,
136
+ cwd=ctx.root,
137
+ stdout=subprocess.PIPE,
138
+ stderr=subprocess.STDOUT,
139
+ text=True,
140
+ timeout=120,
141
+ )
142
+ f.write(result_mi.stdout)
143
+
144
+ elapsed = int((time.time() - start) * 1000)
145
+
146
+ # Radon returns 0 on success
147
+ if result_cc.returncode == 0 and result_mi.returncode == 0:
148
+ return StepResult(self.name, "OK", elapsed, "")
149
+ else:
150
+ return StepResult(
151
+ self.name,
152
+ "FAIL",
153
+ elapsed,
154
+ f"exit cc:{result_cc.returncode} mi:{result_mi.returncode}",
155
+ )
156
+ except subprocess.TimeoutExpired:
157
+ out.write_text("radon timed out after 120s\n", encoding="utf-8")
158
+ return StepResult(self.name, "FAIL", 120000, "timeout")
159
+ except Exception as e:
160
+ out.write_text(f"radon error: {e}\n", encoding="utf-8")
161
+ return StepResult(self.name, "FAIL", 0, str(e))
@@ -0,0 +1,161 @@
1
+ from __future__ import annotations
2
+
3
+ import platform
4
+ import sys
5
+ import time
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+
9
+ from .base import StepResult
10
+ from ..context import BundleContext
11
+ from ..tools import which
12
+
13
+
14
+ @dataclass
15
+ class ReproMarkdownStep:
16
+ name: str = "generate REPRO.md"
17
+ outfile: str = "REPRO.md"
18
+
19
+ def run(self, ctx: BundleContext) -> StepResult:
20
+ start = time.time()
21
+ repro = ctx.workdir / self.outfile
22
+
23
+ # ---- tool detection ----
24
+ tool_names = [
25
+ "python",
26
+ "pip",
27
+ "git",
28
+ "ruff",
29
+ "mypy",
30
+ "pytest",
31
+ "rg",
32
+ "zip",
33
+ "tar",
34
+ ]
35
+ detected = {t: which(t) for t in tool_names}
36
+
37
+ # Prefer ctx.tools.python if you have it
38
+ if getattr(ctx, "tools", None) and getattr(ctx.tools, "python", None):
39
+ detected["python"] = ctx.tools.python
40
+
41
+ # ---- file inventory (what actually exists) ----
42
+ def list_txt(dirpath: Path) -> list[str]:
43
+ if not dirpath.is_dir():
44
+ return []
45
+ return sorted(
46
+ str(p.relative_to(ctx.workdir)) for p in dirpath.rglob("*.txt")
47
+ )
48
+
49
+ logs_list = list_txt(ctx.logdir)
50
+ meta_list = list_txt(ctx.metadir)
51
+
52
+ # Also include key top-level files if present
53
+ top_files = []
54
+ for name in [
55
+ "RUN_LOG.txt",
56
+ "SUMMARY.json",
57
+ "error_files_from_logs.txt",
58
+ "error_refs_count.txt",
59
+ ]:
60
+ p = ctx.workdir / name
61
+ if p.exists():
62
+ top_files.append(name)
63
+
64
+ # ---- step summary (best-effort, never crash) ----
65
+ results = getattr(ctx, "results", [])
66
+ ctx.results = results # ensure it's set for future steps
67
+
68
+ summary_lines = []
69
+ for r in results:
70
+ note = f" ({r.note})" if getattr(r, "note", "") else ""
71
+ summary_lines.append(f"- **{r.name}**: {r.status}{note}")
72
+
73
+ # ---- environment ----
74
+ pyver = sys.version.split()[0]
75
+ plat = platform.platform()
76
+ profile = ctx.profile_name
77
+ utc_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
78
+
79
+ # ---- build markdown ----
80
+ def fmt_tool(t: str) -> str:
81
+ path = detected.get(t)
82
+ return f"- `{t}`: ✅ `{path}`" if path else f"- `{t}`: ❌ not found"
83
+
84
+ md = []
85
+ md += ["# Reproduction Guide", ""]
86
+ md += [
87
+ "This bundle captures diagnostic outputs and the minimum relevant project context",
88
+ "to reproduce issues reliably on another system.",
89
+ "",
90
+ "## Overview",
91
+ f"- Profile: `{profile}`",
92
+ f"- Generated (UTC): `{utc_now}`",
93
+ f"- Project root: `{ctx.root}`",
94
+ "",
95
+ "## Environment Snapshot",
96
+ f"- OS: `{plat}`",
97
+ f"- Python: `{pyver}`",
98
+ "",
99
+ "## Tools Detected",
100
+ *[fmt_tool(t) for t in tool_names],
101
+ "",
102
+ ]
103
+
104
+ if summary_lines:
105
+ md += ["## Steps Executed", *summary_lines, ""]
106
+
107
+ md += [
108
+ "## How to Reproduce",
109
+ "",
110
+ "From the project root:",
111
+ "",
112
+ "```bash",
113
+ f"python -m pybundle run {profile}",
114
+ "```",
115
+ "",
116
+ "Re-run individual tools (if installed):",
117
+ "",
118
+ "```bash",
119
+ "python -m compileall .",
120
+ "ruff check .",
121
+ "ruff format --check .",
122
+ "mypy .",
123
+ "pytest -q",
124
+ "```",
125
+ "",
126
+ "## Produced Artifacts",
127
+ "",
128
+ ]
129
+
130
+ if top_files:
131
+ md += ["### Top-level", *[f"- `{p}`" for p in top_files], ""]
132
+
133
+ md += (
134
+ ["### logs/", *(f"- `{p}`" for p in logs_list)]
135
+ if logs_list
136
+ else ["### logs/", "- (none)", ""]
137
+ )
138
+ md += (
139
+ ["", "### meta/", *(f"- `{p}`" for p in meta_list)]
140
+ if meta_list
141
+ else ["", "### meta/", "- (none)"]
142
+ )
143
+
144
+ md += [
145
+ "",
146
+ "## Context Packs",
147
+ "",
148
+ "- `src/_error_refs/` – files directly referenced by tool output",
149
+ "- `src/_error_context/` – related imports + pytest glue (conftest/__init__) + configs",
150
+ "",
151
+ "## Notes",
152
+ "",
153
+ "- Non-zero exits from linters/tests are recorded for diagnosis; bundle creation continues.",
154
+ "- Missing tools typically produce SKIP logs rather than failing the bundle.",
155
+ "",
156
+ ]
157
+
158
+ repro.write_text("\n".join(md) + "\n", encoding="utf-8")
159
+
160
+ dur = int(time.time() - start)
161
+ return StepResult(self.name, "PASS", dur, "")