gwc-pybundle 2.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gwc-pybundle might be problematic. Click here for more details.
- gwc_pybundle-2.1.2.dist-info/METADATA +903 -0
- gwc_pybundle-2.1.2.dist-info/RECORD +82 -0
- gwc_pybundle-2.1.2.dist-info/WHEEL +5 -0
- gwc_pybundle-2.1.2.dist-info/entry_points.txt +2 -0
- gwc_pybundle-2.1.2.dist-info/licenses/LICENSE.md +25 -0
- gwc_pybundle-2.1.2.dist-info/top_level.txt +1 -0
- pybundle/__init__.py +0 -0
- pybundle/__main__.py +4 -0
- pybundle/cli.py +546 -0
- pybundle/context.py +404 -0
- pybundle/doctor.py +148 -0
- pybundle/filters.py +228 -0
- pybundle/manifest.py +77 -0
- pybundle/packaging.py +45 -0
- pybundle/policy.py +132 -0
- pybundle/profiles.py +454 -0
- pybundle/roadmap_model.py +42 -0
- pybundle/roadmap_scan.py +328 -0
- pybundle/root_detect.py +14 -0
- pybundle/runner.py +180 -0
- pybundle/steps/__init__.py +26 -0
- pybundle/steps/ai_context.py +791 -0
- pybundle/steps/api_docs.py +219 -0
- pybundle/steps/asyncio_analysis.py +358 -0
- pybundle/steps/bandit.py +72 -0
- pybundle/steps/base.py +20 -0
- pybundle/steps/blocking_call_detection.py +291 -0
- pybundle/steps/call_graph.py +219 -0
- pybundle/steps/compileall.py +76 -0
- pybundle/steps/config_docs.py +319 -0
- pybundle/steps/config_validation.py +302 -0
- pybundle/steps/container_image.py +294 -0
- pybundle/steps/context_expand.py +272 -0
- pybundle/steps/copy_pack.py +293 -0
- pybundle/steps/coverage.py +101 -0
- pybundle/steps/cprofile_step.py +166 -0
- pybundle/steps/dependency_sizes.py +136 -0
- pybundle/steps/django_checks.py +214 -0
- pybundle/steps/dockerfile_lint.py +282 -0
- pybundle/steps/dockerignore.py +311 -0
- pybundle/steps/duplication.py +103 -0
- pybundle/steps/env_completeness.py +269 -0
- pybundle/steps/env_var_usage.py +253 -0
- pybundle/steps/error_refs.py +204 -0
- pybundle/steps/event_loop_patterns.py +280 -0
- pybundle/steps/exception_patterns.py +190 -0
- pybundle/steps/fastapi_integration.py +250 -0
- pybundle/steps/flask_debugging.py +312 -0
- pybundle/steps/git_analytics.py +315 -0
- pybundle/steps/handoff_md.py +176 -0
- pybundle/steps/import_time.py +175 -0
- pybundle/steps/interrogate.py +106 -0
- pybundle/steps/license_scan.py +96 -0
- pybundle/steps/line_profiler.py +117 -0
- pybundle/steps/link_validation.py +287 -0
- pybundle/steps/logging_analysis.py +233 -0
- pybundle/steps/memory_profile.py +176 -0
- pybundle/steps/migration_history.py +336 -0
- pybundle/steps/mutation_testing.py +141 -0
- pybundle/steps/mypy.py +103 -0
- pybundle/steps/orm_optimization.py +316 -0
- pybundle/steps/pip_audit.py +45 -0
- pybundle/steps/pipdeptree.py +62 -0
- pybundle/steps/pylance.py +562 -0
- pybundle/steps/pytest.py +66 -0
- pybundle/steps/query_pattern_analysis.py +334 -0
- pybundle/steps/radon.py +161 -0
- pybundle/steps/repro_md.py +161 -0
- pybundle/steps/rg_scans.py +78 -0
- pybundle/steps/roadmap.py +153 -0
- pybundle/steps/ruff.py +117 -0
- pybundle/steps/secrets_detection.py +235 -0
- pybundle/steps/security_headers.py +309 -0
- pybundle/steps/shell.py +74 -0
- pybundle/steps/slow_tests.py +178 -0
- pybundle/steps/sqlalchemy_validation.py +269 -0
- pybundle/steps/test_flakiness.py +184 -0
- pybundle/steps/tree.py +116 -0
- pybundle/steps/type_coverage.py +277 -0
- pybundle/steps/unused_deps.py +211 -0
- pybundle/steps/vulture.py +167 -0
- pybundle/tools.py +63 -0
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Step: Security Headers Analysis
|
|
3
|
+
Detect security headers in Flask, FastAPI, and Django applications.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import re
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Dict, List, Set, Tuple, Optional
|
|
9
|
+
|
|
10
|
+
from .base import Step, StepResult
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SecurityHeadersStep(Step):
|
|
14
|
+
"""Analyze security headers in web applications."""
|
|
15
|
+
|
|
16
|
+
name = "security headers"
|
|
17
|
+
|
|
18
|
+
# Security headers and their implementations
|
|
19
|
+
SECURITY_HEADERS = {
|
|
20
|
+
"Content-Security-Policy": "CSP",
|
|
21
|
+
"X-Content-Type-Options": "MIME Type",
|
|
22
|
+
"X-Frame-Options": "Clickjacking",
|
|
23
|
+
"Strict-Transport-Security": "HSTS",
|
|
24
|
+
"X-XSS-Protection": "XSS",
|
|
25
|
+
"Referrer-Policy": "Referrer",
|
|
26
|
+
"Permissions-Policy": "Permissions",
|
|
27
|
+
"Access-Control-Allow-Origin": "CORS",
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
def run(self, ctx: "BundleContext") -> StepResult: # type: ignore[name-defined]
|
|
31
|
+
"""Analyze security headers in codebase."""
|
|
32
|
+
import time
|
|
33
|
+
|
|
34
|
+
start = time.time()
|
|
35
|
+
|
|
36
|
+
root = ctx.root
|
|
37
|
+
|
|
38
|
+
# Detect frameworks
|
|
39
|
+
frameworks = self._detect_frameworks(root)
|
|
40
|
+
|
|
41
|
+
# Find security header implementations
|
|
42
|
+
headers_found = self._find_security_headers(root, frameworks)
|
|
43
|
+
|
|
44
|
+
# Generate report
|
|
45
|
+
lines = [
|
|
46
|
+
"=" * 80,
|
|
47
|
+
"SECURITY HEADERS ANALYSIS REPORT",
|
|
48
|
+
"=" * 80,
|
|
49
|
+
"",
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
# Framework detection
|
|
53
|
+
lines.extend(
|
|
54
|
+
[
|
|
55
|
+
"FRAMEWORK DETECTION",
|
|
56
|
+
"=" * 80,
|
|
57
|
+
"",
|
|
58
|
+
]
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
if frameworks:
|
|
62
|
+
for framework, details in frameworks.items():
|
|
63
|
+
lines.append(f"✓ {framework}")
|
|
64
|
+
if details.get("files"):
|
|
65
|
+
lines.append(f" Found in: {', '.join(details['files'][:3])}")
|
|
66
|
+
if len(details["files"]) > 3:
|
|
67
|
+
lines.append(f" ... and {len(details['files']) - 3} more")
|
|
68
|
+
lines.append("")
|
|
69
|
+
|
|
70
|
+
else:
|
|
71
|
+
lines.append("⊘ No web frameworks detected")
|
|
72
|
+
lines.append("")
|
|
73
|
+
|
|
74
|
+
# Security headers summary
|
|
75
|
+
lines.extend(
|
|
76
|
+
[
|
|
77
|
+
"SECURITY HEADERS IMPLEMENTATION",
|
|
78
|
+
"=" * 80,
|
|
79
|
+
"",
|
|
80
|
+
]
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
if headers_found:
|
|
84
|
+
implemented_count = len(headers_found["implemented"])
|
|
85
|
+
total_count = len(self.SECURITY_HEADERS)
|
|
86
|
+
|
|
87
|
+
lines.append(f"Headers implemented: {implemented_count}/{total_count}")
|
|
88
|
+
lines.append("")
|
|
89
|
+
|
|
90
|
+
if headers_found["implemented"]:
|
|
91
|
+
lines.append("✓ IMPLEMENTED HEADERS:")
|
|
92
|
+
for header, details in headers_found["implemented"].items():
|
|
93
|
+
lines.append(f" - {header}")
|
|
94
|
+
if details.get("files"):
|
|
95
|
+
for file_path in details["files"][:2]:
|
|
96
|
+
lines.append(f" Found in: {file_path}")
|
|
97
|
+
if len(details["files"]) > 2:
|
|
98
|
+
lines.append(f" ... and {len(details['files']) - 2} more files")
|
|
99
|
+
|
|
100
|
+
lines.append("")
|
|
101
|
+
|
|
102
|
+
if headers_found["missing"]:
|
|
103
|
+
lines.append(f"⚠ MISSING HEADERS ({len(headers_found['missing'])}):")
|
|
104
|
+
for header in sorted(headers_found["missing"]):
|
|
105
|
+
purpose = self.SECURITY_HEADERS.get(header, "")
|
|
106
|
+
lines.append(f" - {header} ({purpose})")
|
|
107
|
+
|
|
108
|
+
lines.append("")
|
|
109
|
+
|
|
110
|
+
else:
|
|
111
|
+
lines.append("⊘ No security headers found in codebase")
|
|
112
|
+
lines.append("")
|
|
113
|
+
|
|
114
|
+
# Implementation patterns
|
|
115
|
+
if headers_found and headers_found.get("patterns"):
|
|
116
|
+
lines.extend(
|
|
117
|
+
[
|
|
118
|
+
"IMPLEMENTATION PATTERNS",
|
|
119
|
+
"=" * 80,
|
|
120
|
+
"",
|
|
121
|
+
]
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
for pattern_type, details in headers_found["patterns"].items():
|
|
125
|
+
lines.append(f"{pattern_type}:")
|
|
126
|
+
for location in details[:5]:
|
|
127
|
+
lines.append(f" - {location}")
|
|
128
|
+
if len(details) > 5:
|
|
129
|
+
lines.append(f" ... and {len(details) - 5} more")
|
|
130
|
+
lines.append("")
|
|
131
|
+
|
|
132
|
+
# Recommendations
|
|
133
|
+
lines.extend(
|
|
134
|
+
[
|
|
135
|
+
"=" * 80,
|
|
136
|
+
"RECOMMENDATIONS",
|
|
137
|
+
"=" * 80,
|
|
138
|
+
"",
|
|
139
|
+
]
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
if frameworks:
|
|
143
|
+
framework_name = list(frameworks.keys())[0]
|
|
144
|
+
lines.append(f"For {framework_name}:")
|
|
145
|
+
lines.append("")
|
|
146
|
+
|
|
147
|
+
if framework_name == "Flask":
|
|
148
|
+
lines.append(" @app.after_request")
|
|
149
|
+
lines.append(" def set_security_headers(response):")
|
|
150
|
+
lines.append(
|
|
151
|
+
" response.headers['X-Content-Type-Options'] = 'nosniff'"
|
|
152
|
+
)
|
|
153
|
+
lines.append(
|
|
154
|
+
" response.headers['X-Frame-Options'] = 'SAMEORIGIN'"
|
|
155
|
+
)
|
|
156
|
+
lines.append(
|
|
157
|
+
" response.headers['Strict-Transport-Security'] = 'max-age=31536000; includeSubDomains'"
|
|
158
|
+
)
|
|
159
|
+
lines.append(" return response")
|
|
160
|
+
|
|
161
|
+
elif framework_name == "FastAPI":
|
|
162
|
+
lines.append(" from fastapi.middleware.cors import CORSMiddleware")
|
|
163
|
+
lines.append(" from fastapi.middleware.trustedhost import TrustedHostMiddleware")
|
|
164
|
+
lines.append("")
|
|
165
|
+
lines.append(" app.add_middleware(TrustedHostMiddleware, ...)")
|
|
166
|
+
lines.append(" app.add_middleware(CORSMiddleware, ...)")
|
|
167
|
+
|
|
168
|
+
elif framework_name == "Django":
|
|
169
|
+
lines.append(" # In settings.py:")
|
|
170
|
+
lines.append(" SECURE_BROWSER_XSS_FILTER = True")
|
|
171
|
+
lines.append(" SECURE_CONTENT_SECURITY_POLICY = {...}")
|
|
172
|
+
lines.append(" SESSION_COOKIE_SECURE = True")
|
|
173
|
+
lines.append(" CSRF_COOKIE_SECURE = True")
|
|
174
|
+
|
|
175
|
+
lines.append("")
|
|
176
|
+
|
|
177
|
+
lines.append(" 1. Implement missing security headers")
|
|
178
|
+
lines.append(" 2. Set appropriate CSP directives for your application")
|
|
179
|
+
lines.append(" 3. Enable HSTS in production")
|
|
180
|
+
lines.append(" 4. Configure CORS for necessary origins only")
|
|
181
|
+
lines.append(" 5. Use security header testing tools (securityheaders.com)")
|
|
182
|
+
|
|
183
|
+
lines.append("")
|
|
184
|
+
|
|
185
|
+
# Write report
|
|
186
|
+
output = "\n".join(lines)
|
|
187
|
+
dest = ctx.workdir / "logs" / "122_security_headers.txt"
|
|
188
|
+
dest.parent.mkdir(parents=True, exist_ok=True)
|
|
189
|
+
dest.write_text(output, encoding="utf-8")
|
|
190
|
+
|
|
191
|
+
elapsed = int(time.time() - start)
|
|
192
|
+
return StepResult(self.name, "OK", elapsed, "")
|
|
193
|
+
|
|
194
|
+
def _detect_frameworks(self, root: Path) -> Dict[str, dict]:
|
|
195
|
+
"""Detect web frameworks in the project."""
|
|
196
|
+
frameworks = {}
|
|
197
|
+
|
|
198
|
+
python_files = list(root.rglob("*.py"))
|
|
199
|
+
file_contents = {}
|
|
200
|
+
|
|
201
|
+
for py_file in python_files:
|
|
202
|
+
if any(
|
|
203
|
+
part in py_file.parts
|
|
204
|
+
for part in ["venv", ".venv", "env", "__pycache__", "site-packages"]
|
|
205
|
+
):
|
|
206
|
+
continue
|
|
207
|
+
|
|
208
|
+
try:
|
|
209
|
+
content = py_file.read_text(encoding="utf-8", errors="ignore")
|
|
210
|
+
file_contents[py_file] = content
|
|
211
|
+
except (OSError, UnicodeDecodeError):
|
|
212
|
+
continue
|
|
213
|
+
|
|
214
|
+
# Check for Flask
|
|
215
|
+
for py_file, content in file_contents.items():
|
|
216
|
+
if "from flask import" in content or "import flask" in content:
|
|
217
|
+
if "Flask" not in frameworks:
|
|
218
|
+
frameworks["Flask"] = {"files": []}
|
|
219
|
+
frameworks["Flask"]["files"].append(str(py_file.relative_to(root)))
|
|
220
|
+
|
|
221
|
+
# Check for FastAPI
|
|
222
|
+
if "from fastapi import" in content or "import fastapi" in content:
|
|
223
|
+
if "FastAPI" not in frameworks:
|
|
224
|
+
frameworks["FastAPI"] = {"files": []}
|
|
225
|
+
frameworks["FastAPI"]["files"].append(str(py_file.relative_to(root)))
|
|
226
|
+
|
|
227
|
+
# Check for Django
|
|
228
|
+
if "django" in content and (
|
|
229
|
+
"from django" in content or "import django" in content
|
|
230
|
+
):
|
|
231
|
+
if "Django" not in frameworks:
|
|
232
|
+
frameworks["Django"] = {"files": []}
|
|
233
|
+
frameworks["Django"]["files"].append(str(py_file.relative_to(root)))
|
|
234
|
+
|
|
235
|
+
return frameworks
|
|
236
|
+
|
|
237
|
+
def _find_security_headers(self, root: Path, frameworks: Dict) -> Dict:
|
|
238
|
+
"""Find security header implementations."""
|
|
239
|
+
implemented = {}
|
|
240
|
+
patterns = {
|
|
241
|
+
"Flask after_request": [],
|
|
242
|
+
"FastAPI Middleware": [],
|
|
243
|
+
"Django middleware": [],
|
|
244
|
+
"Direct response.headers": [],
|
|
245
|
+
"Custom header functions": [],
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
python_files = list(root.rglob("*.py"))
|
|
249
|
+
|
|
250
|
+
for py_file in python_files:
|
|
251
|
+
if any(
|
|
252
|
+
part in py_file.parts
|
|
253
|
+
for part in ["venv", ".venv", "env", "__pycache__", "site-packages"]
|
|
254
|
+
):
|
|
255
|
+
continue
|
|
256
|
+
|
|
257
|
+
try:
|
|
258
|
+
content = py_file.read_text(encoding="utf-8", errors="ignore")
|
|
259
|
+
rel_path = str(py_file.relative_to(root))
|
|
260
|
+
|
|
261
|
+
# Find header implementations
|
|
262
|
+
for header in self.SECURITY_HEADERS:
|
|
263
|
+
# Various patterns for setting headers
|
|
264
|
+
header_variants = [
|
|
265
|
+
f"'{header}'",
|
|
266
|
+
f'"{header}"',
|
|
267
|
+
header.replace("-", "_").upper(),
|
|
268
|
+
]
|
|
269
|
+
|
|
270
|
+
for variant in header_variants:
|
|
271
|
+
if variant in content:
|
|
272
|
+
if header not in implemented:
|
|
273
|
+
implemented[header] = {"files": []}
|
|
274
|
+
if rel_path not in implemented[header]["files"]:
|
|
275
|
+
implemented[header]["files"].append(rel_path)
|
|
276
|
+
|
|
277
|
+
# Detect patterns
|
|
278
|
+
if "@app.after_request" in content:
|
|
279
|
+
patterns["Flask after_request"].append(rel_path)
|
|
280
|
+
|
|
281
|
+
if "CORSMiddleware" in content or "TrustedHostMiddleware" in content:
|
|
282
|
+
patterns["FastAPI Middleware"].append(rel_path)
|
|
283
|
+
|
|
284
|
+
if "MIDDLEWARE" in content and "django" in content:
|
|
285
|
+
patterns["Django middleware"].append(rel_path)
|
|
286
|
+
|
|
287
|
+
if "response.headers" in content or "set_header" in content:
|
|
288
|
+
patterns["Direct response.headers"].append(rel_path)
|
|
289
|
+
|
|
290
|
+
# Check for custom header functions
|
|
291
|
+
if re.search(
|
|
292
|
+
r"def\s+\w*header\w*|def\s+\w*security\w*", content, re.IGNORECASE
|
|
293
|
+
):
|
|
294
|
+
patterns["Custom header functions"].append(rel_path)
|
|
295
|
+
|
|
296
|
+
except (OSError, UnicodeDecodeError):
|
|
297
|
+
continue
|
|
298
|
+
|
|
299
|
+
# Calculate missing headers
|
|
300
|
+
missing = set(self.SECURITY_HEADERS.keys()) - set(implemented.keys())
|
|
301
|
+
|
|
302
|
+
# Filter patterns
|
|
303
|
+
patterns = {k: list(set(v)) for k, v in patterns.items() if v}
|
|
304
|
+
|
|
305
|
+
return {
|
|
306
|
+
"implemented": implemented,
|
|
307
|
+
"missing": sorted(missing),
|
|
308
|
+
"patterns": patterns,
|
|
309
|
+
}
|
pybundle/steps/shell.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import subprocess # nosec B404 - Required for tool execution, paths validated
|
|
4
|
+
import time
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from .base import StepResult
|
|
9
|
+
from ..context import BundleContext
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class ShellStep:
|
|
14
|
+
name: str
|
|
15
|
+
outfile_rel: str
|
|
16
|
+
cmd: list[str]
|
|
17
|
+
cwd_is_root: bool = True
|
|
18
|
+
allow_fail: bool = True
|
|
19
|
+
require_cmd: str | None = None
|
|
20
|
+
|
|
21
|
+
@property
|
|
22
|
+
def out_rel(self) -> str:
|
|
23
|
+
return self.outfile_rel
|
|
24
|
+
|
|
25
|
+
def run(self, ctx: BundleContext) -> StepResult:
|
|
26
|
+
if self.require_cmd and not getattr(ctx.tools, self.require_cmd, None):
|
|
27
|
+
out = ctx.workdir / self.outfile_rel
|
|
28
|
+
out.parent.mkdir(parents=True, exist_ok=True)
|
|
29
|
+
out.write_text(
|
|
30
|
+
f"{self.require_cmd} not found; skipping\n", encoding="utf-8"
|
|
31
|
+
)
|
|
32
|
+
return StepResult(self.name, "SKIP", 0, f"missing {self.require_cmd}")
|
|
33
|
+
|
|
34
|
+
# Resolve command path if the first element matches a tool name
|
|
35
|
+
cmd = list(self.cmd)
|
|
36
|
+
if cmd and self.require_cmd:
|
|
37
|
+
tool_path = getattr(ctx.tools, self.require_cmd, None)
|
|
38
|
+
if tool_path and cmd[0] in [self.require_cmd, "python", "python3"]:
|
|
39
|
+
cmd[0] = tool_path
|
|
40
|
+
|
|
41
|
+
out = ctx.workdir / self.outfile_rel
|
|
42
|
+
out.parent.mkdir(parents=True, exist_ok=True)
|
|
43
|
+
|
|
44
|
+
start = time.time()
|
|
45
|
+
header = (
|
|
46
|
+
f"## PWD: {ctx.root if self.cwd_is_root else Path.cwd()}\n"
|
|
47
|
+
f"## CMD: {' '.join(cmd)}\n\n"
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
try:
|
|
51
|
+
cp = subprocess.run( # nosec B603
|
|
52
|
+
cmd,
|
|
53
|
+
cwd=str(ctx.root) if self.cwd_is_root else None,
|
|
54
|
+
text=True,
|
|
55
|
+
capture_output=True,
|
|
56
|
+
check=False,
|
|
57
|
+
)
|
|
58
|
+
text = header + (cp.stdout or "") + ("\n" + cp.stderr if cp.stderr else "")
|
|
59
|
+
out.write_text(ctx.redact_text(text), encoding="utf-8")
|
|
60
|
+
status = (
|
|
61
|
+
"PASS"
|
|
62
|
+
if cp.returncode == 0
|
|
63
|
+
else ("FAIL" if not self.allow_fail else "PASS")
|
|
64
|
+
)
|
|
65
|
+
note = "" if cp.returncode == 0 else f"exit={cp.returncode}"
|
|
66
|
+
except Exception as e:
|
|
67
|
+
out.write_text(
|
|
68
|
+
ctx.redact_text(header + f"\nEXCEPTION: {e}\n"), encoding="utf-8"
|
|
69
|
+
)
|
|
70
|
+
status = "FAIL" if not self.allow_fail else "PASS"
|
|
71
|
+
note = str(e)
|
|
72
|
+
|
|
73
|
+
dur = int(time.time() - start)
|
|
74
|
+
return StepResult(self.name, status, dur, note)
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Slow test identification - Milestone 4 (v1.4.1)
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import subprocess
|
|
8
|
+
import time
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
|
|
11
|
+
from .base import StepResult
|
|
12
|
+
from ..context import BundleContext
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class SlowTestsStep:
|
|
17
|
+
"""
|
|
18
|
+
Identify slow tests by parsing pytest duration output.
|
|
19
|
+
|
|
20
|
+
Outputs:
|
|
21
|
+
- logs/71_slow_tests.txt: Ranked list of slowest tests
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
name: str = "slow_tests"
|
|
25
|
+
|
|
26
|
+
def run(self, ctx: BundleContext) -> StepResult:
|
|
27
|
+
start = time.time()
|
|
28
|
+
|
|
29
|
+
if not ctx.tools.pytest:
|
|
30
|
+
return StepResult(self.name, "SKIP", 0, "pytest not found")
|
|
31
|
+
|
|
32
|
+
tests_dir = ctx.root / "tests"
|
|
33
|
+
if not tests_dir.is_dir():
|
|
34
|
+
return StepResult(self.name, "SKIP", 0, "no tests/ directory")
|
|
35
|
+
|
|
36
|
+
threshold = ctx.options.slow_test_threshold
|
|
37
|
+
ctx.emit(f" Identifying tests slower than {threshold}s...")
|
|
38
|
+
|
|
39
|
+
output_file = ctx.workdir / "logs" / "71_slow_tests.txt"
|
|
40
|
+
output_file.parent.mkdir(parents=True, exist_ok=True)
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
# Run pytest with duration reporting
|
|
44
|
+
result = subprocess.run(
|
|
45
|
+
[
|
|
46
|
+
str(ctx.tools.pytest),
|
|
47
|
+
"-v",
|
|
48
|
+
"--durations=0", # Show all durations
|
|
49
|
+
"--tb=no", # No traceback to keep output clean
|
|
50
|
+
],
|
|
51
|
+
cwd=ctx.root,
|
|
52
|
+
capture_output=True,
|
|
53
|
+
text=True,
|
|
54
|
+
timeout=180, # 3 minute timeout
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Parse durations from output
|
|
58
|
+
slow_tests = self._parse_durations(result.stdout, threshold)
|
|
59
|
+
|
|
60
|
+
# Generate report
|
|
61
|
+
with output_file.open("w") as f:
|
|
62
|
+
f.write("=" * 70 + "\n")
|
|
63
|
+
f.write(f"SLOW TEST IDENTIFICATION (threshold: {threshold}s)\n")
|
|
64
|
+
f.write("=" * 70 + "\n\n")
|
|
65
|
+
|
|
66
|
+
if slow_tests:
|
|
67
|
+
f.write(
|
|
68
|
+
f"Found {len(slow_tests)} test(s) exceeding {threshold}s:\n\n"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Sort by duration (descending)
|
|
72
|
+
slow_tests.sort(key=lambda x: x[1], reverse=True)
|
|
73
|
+
|
|
74
|
+
f.write(f"{'Duration (s)':<15} {'Test'}\n")
|
|
75
|
+
f.write("-" * 70 + "\n")
|
|
76
|
+
|
|
77
|
+
for test_name, duration in slow_tests:
|
|
78
|
+
f.write(f"{duration:>13.2f} {test_name}\n")
|
|
79
|
+
|
|
80
|
+
f.write("\n" + "=" * 70 + "\n")
|
|
81
|
+
f.write("STATISTICS:\n")
|
|
82
|
+
f.write("-" * 70 + "\n")
|
|
83
|
+
total_time = sum(d for _, d in slow_tests)
|
|
84
|
+
avg_time = total_time / len(slow_tests)
|
|
85
|
+
f.write(f"Total slow test time: {total_time:.2f}s\n")
|
|
86
|
+
f.write(f"Average slow test time: {avg_time:.2f}s\n")
|
|
87
|
+
f.write(
|
|
88
|
+
f"Slowest test: {slow_tests[0][1]:.2f}s ({slow_tests[0][0]})\n"
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
f.write("\n" + "=" * 70 + "\n")
|
|
92
|
+
f.write("RECOMMENDATIONS:\n")
|
|
93
|
+
f.write("- Profile slow tests to identify bottlenecks\n")
|
|
94
|
+
f.write("- Consider using pytest fixtures to reduce setup time\n")
|
|
95
|
+
f.write("- Mock external dependencies (DB, API calls, file I/O)\n")
|
|
96
|
+
f.write("- Use pytest-xdist for parallel test execution\n")
|
|
97
|
+
else:
|
|
98
|
+
f.write(f"✅ No tests exceed {threshold}s threshold!\n\n")
|
|
99
|
+
|
|
100
|
+
# Still show fastest tests for context
|
|
101
|
+
all_tests = self._parse_all_durations(result.stdout)
|
|
102
|
+
if all_tests:
|
|
103
|
+
all_tests.sort(key=lambda x: x[1], reverse=True)
|
|
104
|
+
f.write("Top 10 longest tests (all under threshold):\n\n")
|
|
105
|
+
f.write(f"{'Duration (s)':<15} {'Test'}\n")
|
|
106
|
+
f.write("-" * 70 + "\n")
|
|
107
|
+
for test_name, duration in all_tests[:10]:
|
|
108
|
+
f.write(f"{duration:>13.2f} {test_name}\n")
|
|
109
|
+
|
|
110
|
+
# Append raw duration output for reference
|
|
111
|
+
f.write("\n" + "=" * 70 + "\n")
|
|
112
|
+
f.write("RAW PYTEST DURATION OUTPUT:\n")
|
|
113
|
+
f.write("-" * 70 + "\n")
|
|
114
|
+
# Find and include the duration section
|
|
115
|
+
in_duration_section = False
|
|
116
|
+
for line in result.stdout.splitlines():
|
|
117
|
+
if "slowest durations" in line.lower() or "=== " in line:
|
|
118
|
+
in_duration_section = True
|
|
119
|
+
if in_duration_section:
|
|
120
|
+
f.write(line + "\n")
|
|
121
|
+
|
|
122
|
+
elapsed = int((time.time() - start) * 1000)
|
|
123
|
+
|
|
124
|
+
if slow_tests:
|
|
125
|
+
return StepResult(
|
|
126
|
+
self.name, "OK", elapsed, f"{len(slow_tests)} slow tests"
|
|
127
|
+
)
|
|
128
|
+
else:
|
|
129
|
+
return StepResult(self.name, "OK", elapsed)
|
|
130
|
+
|
|
131
|
+
except subprocess.TimeoutExpired:
|
|
132
|
+
elapsed = int((time.time() - start) * 1000)
|
|
133
|
+
return StepResult(self.name, "FAIL", elapsed, "timeout")
|
|
134
|
+
except Exception as e:
|
|
135
|
+
elapsed = int((time.time() - start) * 1000)
|
|
136
|
+
return StepResult(self.name, "FAIL", elapsed, str(e))
|
|
137
|
+
|
|
138
|
+
def _parse_durations(
|
|
139
|
+
self, output: str, threshold: float
|
|
140
|
+
) -> list[tuple[str, float]]:
|
|
141
|
+
"""Parse pytest --durations output for tests exceeding threshold"""
|
|
142
|
+
slow_tests = []
|
|
143
|
+
|
|
144
|
+
# Look for duration lines like: "0.52s call test_file.py::test_name"
|
|
145
|
+
for line in output.splitlines():
|
|
146
|
+
if "s call" in line or "s setup" in line or "s teardown" in line:
|
|
147
|
+
parts = line.split()
|
|
148
|
+
if len(parts) >= 3:
|
|
149
|
+
try:
|
|
150
|
+
duration_str = parts[0].rstrip("s")
|
|
151
|
+
duration = float(duration_str)
|
|
152
|
+
|
|
153
|
+
if duration >= threshold:
|
|
154
|
+
# Extract test name (usually the last part)
|
|
155
|
+
test_name = parts[-1]
|
|
156
|
+
slow_tests.append((test_name, duration))
|
|
157
|
+
except (ValueError, IndexError):
|
|
158
|
+
continue
|
|
159
|
+
|
|
160
|
+
return slow_tests
|
|
161
|
+
|
|
162
|
+
def _parse_all_durations(self, output: str) -> list[tuple[str, float]]:
|
|
163
|
+
"""Parse all test durations"""
|
|
164
|
+
all_tests = []
|
|
165
|
+
|
|
166
|
+
for line in output.splitlines():
|
|
167
|
+
if "s call" in line:
|
|
168
|
+
parts = line.split()
|
|
169
|
+
if len(parts) >= 3:
|
|
170
|
+
try:
|
|
171
|
+
duration_str = parts[0].rstrip("s")
|
|
172
|
+
duration = float(duration_str)
|
|
173
|
+
test_name = parts[-1]
|
|
174
|
+
all_tests.append((test_name, duration))
|
|
175
|
+
except (ValueError, IndexError):
|
|
176
|
+
continue
|
|
177
|
+
|
|
178
|
+
return all_tests
|