lucidscan 0.5.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidscan/__init__.py +12 -0
- lucidscan/bootstrap/__init__.py +26 -0
- lucidscan/bootstrap/paths.py +160 -0
- lucidscan/bootstrap/platform.py +111 -0
- lucidscan/bootstrap/validation.py +76 -0
- lucidscan/bootstrap/versions.py +119 -0
- lucidscan/cli/__init__.py +50 -0
- lucidscan/cli/__main__.py +8 -0
- lucidscan/cli/arguments.py +405 -0
- lucidscan/cli/commands/__init__.py +64 -0
- lucidscan/cli/commands/autoconfigure.py +294 -0
- lucidscan/cli/commands/help.py +69 -0
- lucidscan/cli/commands/init.py +656 -0
- lucidscan/cli/commands/list_scanners.py +59 -0
- lucidscan/cli/commands/scan.py +307 -0
- lucidscan/cli/commands/serve.py +142 -0
- lucidscan/cli/commands/status.py +84 -0
- lucidscan/cli/commands/validate.py +105 -0
- lucidscan/cli/config_bridge.py +152 -0
- lucidscan/cli/exit_codes.py +17 -0
- lucidscan/cli/runner.py +284 -0
- lucidscan/config/__init__.py +29 -0
- lucidscan/config/ignore.py +178 -0
- lucidscan/config/loader.py +431 -0
- lucidscan/config/models.py +316 -0
- lucidscan/config/validation.py +645 -0
- lucidscan/core/__init__.py +3 -0
- lucidscan/core/domain_runner.py +463 -0
- lucidscan/core/git.py +174 -0
- lucidscan/core/logging.py +34 -0
- lucidscan/core/models.py +207 -0
- lucidscan/core/streaming.py +340 -0
- lucidscan/core/subprocess_runner.py +164 -0
- lucidscan/detection/__init__.py +21 -0
- lucidscan/detection/detector.py +154 -0
- lucidscan/detection/frameworks.py +270 -0
- lucidscan/detection/languages.py +328 -0
- lucidscan/detection/tools.py +229 -0
- lucidscan/generation/__init__.py +15 -0
- lucidscan/generation/config_generator.py +275 -0
- lucidscan/generation/package_installer.py +330 -0
- lucidscan/mcp/__init__.py +20 -0
- lucidscan/mcp/formatter.py +510 -0
- lucidscan/mcp/server.py +297 -0
- lucidscan/mcp/tools.py +1049 -0
- lucidscan/mcp/watcher.py +237 -0
- lucidscan/pipeline/__init__.py +17 -0
- lucidscan/pipeline/executor.py +187 -0
- lucidscan/pipeline/parallel.py +181 -0
- lucidscan/plugins/__init__.py +40 -0
- lucidscan/plugins/coverage/__init__.py +28 -0
- lucidscan/plugins/coverage/base.py +160 -0
- lucidscan/plugins/coverage/coverage_py.py +454 -0
- lucidscan/plugins/coverage/istanbul.py +411 -0
- lucidscan/plugins/discovery.py +107 -0
- lucidscan/plugins/enrichers/__init__.py +61 -0
- lucidscan/plugins/enrichers/base.py +63 -0
- lucidscan/plugins/linters/__init__.py +26 -0
- lucidscan/plugins/linters/base.py +125 -0
- lucidscan/plugins/linters/biome.py +448 -0
- lucidscan/plugins/linters/checkstyle.py +393 -0
- lucidscan/plugins/linters/eslint.py +368 -0
- lucidscan/plugins/linters/ruff.py +498 -0
- lucidscan/plugins/reporters/__init__.py +45 -0
- lucidscan/plugins/reporters/base.py +30 -0
- lucidscan/plugins/reporters/json_reporter.py +79 -0
- lucidscan/plugins/reporters/sarif_reporter.py +303 -0
- lucidscan/plugins/reporters/summary_reporter.py +61 -0
- lucidscan/plugins/reporters/table_reporter.py +81 -0
- lucidscan/plugins/scanners/__init__.py +57 -0
- lucidscan/plugins/scanners/base.py +60 -0
- lucidscan/plugins/scanners/checkov.py +484 -0
- lucidscan/plugins/scanners/opengrep.py +464 -0
- lucidscan/plugins/scanners/trivy.py +492 -0
- lucidscan/plugins/test_runners/__init__.py +27 -0
- lucidscan/plugins/test_runners/base.py +111 -0
- lucidscan/plugins/test_runners/jest.py +381 -0
- lucidscan/plugins/test_runners/karma.py +481 -0
- lucidscan/plugins/test_runners/playwright.py +434 -0
- lucidscan/plugins/test_runners/pytest.py +598 -0
- lucidscan/plugins/type_checkers/__init__.py +27 -0
- lucidscan/plugins/type_checkers/base.py +106 -0
- lucidscan/plugins/type_checkers/mypy.py +355 -0
- lucidscan/plugins/type_checkers/pyright.py +313 -0
- lucidscan/plugins/type_checkers/typescript.py +280 -0
- lucidscan-0.5.12.dist-info/METADATA +242 -0
- lucidscan-0.5.12.dist-info/RECORD +91 -0
- lucidscan-0.5.12.dist-info/WHEEL +5 -0
- lucidscan-0.5.12.dist-info/entry_points.txt +34 -0
- lucidscan-0.5.12.dist-info/licenses/LICENSE +201 -0
- lucidscan-0.5.12.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,598 @@
|
|
|
1
|
+
"""pytest test runner plugin.
|
|
2
|
+
|
|
3
|
+
pytest is a full-featured testing framework for Python.
|
|
4
|
+
https://docs.pytest.org/
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import hashlib
|
|
10
|
+
import json
|
|
11
|
+
import shutil
|
|
12
|
+
import subprocess
|
|
13
|
+
import tempfile
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Any, Dict, List, Optional
|
|
16
|
+
import defusedxml.ElementTree as ElementTree # type: ignore[import-untyped]
|
|
17
|
+
|
|
18
|
+
from lucidscan.core.logging import get_logger
|
|
19
|
+
from lucidscan.core.models import (
|
|
20
|
+
ScanContext,
|
|
21
|
+
Severity,
|
|
22
|
+
ToolDomain,
|
|
23
|
+
UnifiedIssue,
|
|
24
|
+
)
|
|
25
|
+
from lucidscan.core.subprocess_runner import run_with_streaming
|
|
26
|
+
from lucidscan.plugins.test_runners.base import TestRunnerPlugin, TestResult
|
|
27
|
+
|
|
28
|
+
LOGGER = get_logger(__name__)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class PytestRunner(TestRunnerPlugin):
|
|
32
|
+
"""pytest test runner plugin for Python test execution."""
|
|
33
|
+
|
|
34
|
+
def __init__(self, project_root: Optional[Path] = None):
|
|
35
|
+
"""Initialize PytestRunner.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
project_root: Optional project root for finding pytest installation.
|
|
39
|
+
"""
|
|
40
|
+
self._project_root = project_root
|
|
41
|
+
|
|
42
|
+
@property
|
|
43
|
+
def name(self) -> str:
|
|
44
|
+
"""Plugin identifier."""
|
|
45
|
+
return "pytest"
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def languages(self) -> List[str]:
|
|
49
|
+
"""Supported languages."""
|
|
50
|
+
return ["python"]
|
|
51
|
+
|
|
52
|
+
def get_version(self) -> str:
|
|
53
|
+
"""Get pytest version.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Version string or 'unknown' if unable to determine.
|
|
57
|
+
"""
|
|
58
|
+
try:
|
|
59
|
+
binary = self.ensure_binary()
|
|
60
|
+
result = subprocess.run(
|
|
61
|
+
[str(binary), "--version"],
|
|
62
|
+
capture_output=True,
|
|
63
|
+
text=True,
|
|
64
|
+
encoding="utf-8",
|
|
65
|
+
errors="replace",
|
|
66
|
+
timeout=30,
|
|
67
|
+
)
|
|
68
|
+
# Output is like "pytest 8.0.0"
|
|
69
|
+
if result.returncode == 0:
|
|
70
|
+
parts = result.stdout.strip().split()
|
|
71
|
+
if len(parts) >= 2:
|
|
72
|
+
return parts[1]
|
|
73
|
+
except Exception:
|
|
74
|
+
pass
|
|
75
|
+
return "unknown"
|
|
76
|
+
|
|
77
|
+
def ensure_binary(self) -> Path:
|
|
78
|
+
"""Ensure pytest is available.
|
|
79
|
+
|
|
80
|
+
Checks for pytest in:
|
|
81
|
+
1. Project's .venv/bin/pytest
|
|
82
|
+
2. System PATH
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
Path to pytest binary.
|
|
86
|
+
|
|
87
|
+
Raises:
|
|
88
|
+
FileNotFoundError: If pytest is not installed.
|
|
89
|
+
"""
|
|
90
|
+
# Check project venv first
|
|
91
|
+
if self._project_root:
|
|
92
|
+
venv_pytest = self._project_root / ".venv" / "bin" / "pytest"
|
|
93
|
+
if venv_pytest.exists():
|
|
94
|
+
return venv_pytest
|
|
95
|
+
|
|
96
|
+
# Check system PATH
|
|
97
|
+
pytest_path = shutil.which("pytest")
|
|
98
|
+
if pytest_path:
|
|
99
|
+
return Path(pytest_path)
|
|
100
|
+
|
|
101
|
+
raise FileNotFoundError(
|
|
102
|
+
"pytest is not installed. Install it with: pip install pytest"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
def run_tests(self, context: ScanContext) -> TestResult:
|
|
106
|
+
"""Run pytest on the specified paths.
|
|
107
|
+
|
|
108
|
+
Attempts to use pytest-json-report for JSON output.
|
|
109
|
+
Falls back to JUnit XML if JSON plugin not available.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
context: Scan context with paths and configuration.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
TestResult with test statistics and issues for failures.
|
|
116
|
+
"""
|
|
117
|
+
try:
|
|
118
|
+
binary = self.ensure_binary()
|
|
119
|
+
except FileNotFoundError as e:
|
|
120
|
+
LOGGER.warning(str(e))
|
|
121
|
+
return TestResult()
|
|
122
|
+
|
|
123
|
+
# Check if pytest-json-report is available
|
|
124
|
+
if self._has_json_report_plugin(binary, context.project_root):
|
|
125
|
+
return self._run_with_json_report(binary, context)
|
|
126
|
+
else:
|
|
127
|
+
return self._run_with_junit_xml(binary, context)
|
|
128
|
+
|
|
129
|
+
def _has_json_report_plugin(self, binary: Path, project_root: Path) -> bool:
|
|
130
|
+
"""Check if pytest-json-report plugin is available.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
binary: Path to pytest binary.
|
|
134
|
+
project_root: Project root directory.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
True if pytest-json-report is installed.
|
|
138
|
+
"""
|
|
139
|
+
try:
|
|
140
|
+
subprocess.run(
|
|
141
|
+
[str(binary), "--co", "-q"],
|
|
142
|
+
capture_output=True,
|
|
143
|
+
text=True,
|
|
144
|
+
encoding="utf-8",
|
|
145
|
+
errors="replace",
|
|
146
|
+
cwd=str(project_root),
|
|
147
|
+
timeout=60,
|
|
148
|
+
)
|
|
149
|
+
# Check if json-report option is available
|
|
150
|
+
help_result = subprocess.run(
|
|
151
|
+
[str(binary), "--help"],
|
|
152
|
+
capture_output=True,
|
|
153
|
+
text=True,
|
|
154
|
+
encoding="utf-8",
|
|
155
|
+
errors="replace",
|
|
156
|
+
cwd=str(project_root),
|
|
157
|
+
timeout=30,
|
|
158
|
+
)
|
|
159
|
+
return "--json-report" in help_result.stdout
|
|
160
|
+
except Exception:
|
|
161
|
+
return False
|
|
162
|
+
|
|
163
|
+
def _run_with_json_report(
|
|
164
|
+
self,
|
|
165
|
+
binary: Path,
|
|
166
|
+
context: ScanContext,
|
|
167
|
+
) -> TestResult:
|
|
168
|
+
"""Run pytest with JSON report output.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
binary: Path to pytest binary.
|
|
172
|
+
context: Scan context with paths and configuration.
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
TestResult with test statistics and issues.
|
|
176
|
+
"""
|
|
177
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
178
|
+
report_file = Path(tmpdir) / "report.json"
|
|
179
|
+
|
|
180
|
+
cmd = [
|
|
181
|
+
str(binary),
|
|
182
|
+
"--tb=short",
|
|
183
|
+
"-v",
|
|
184
|
+
"--json-report",
|
|
185
|
+
f"--json-report-file={report_file}",
|
|
186
|
+
]
|
|
187
|
+
|
|
188
|
+
# Add paths to test
|
|
189
|
+
paths = [str(p) for p in context.paths] if context.paths else ["."]
|
|
190
|
+
cmd.extend(paths)
|
|
191
|
+
|
|
192
|
+
LOGGER.debug(f"Running: {' '.join(cmd)}")
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
run_with_streaming(
|
|
196
|
+
cmd=cmd,
|
|
197
|
+
cwd=context.project_root,
|
|
198
|
+
tool_name="pytest",
|
|
199
|
+
stream_handler=context.stream_handler,
|
|
200
|
+
timeout=600,
|
|
201
|
+
)
|
|
202
|
+
except subprocess.TimeoutExpired:
|
|
203
|
+
LOGGER.warning("pytest timed out after 600 seconds")
|
|
204
|
+
return TestResult()
|
|
205
|
+
except Exception as e:
|
|
206
|
+
LOGGER.error(f"Failed to run pytest: {e}")
|
|
207
|
+
return TestResult()
|
|
208
|
+
|
|
209
|
+
# Parse JSON report
|
|
210
|
+
if report_file.exists():
|
|
211
|
+
return self._parse_json_report(report_file, context.project_root)
|
|
212
|
+
else:
|
|
213
|
+
LOGGER.warning("JSON report file not generated")
|
|
214
|
+
return TestResult()
|
|
215
|
+
|
|
216
|
+
def _run_with_junit_xml(
|
|
217
|
+
self,
|
|
218
|
+
binary: Path,
|
|
219
|
+
context: ScanContext,
|
|
220
|
+
) -> TestResult:
|
|
221
|
+
"""Run pytest with JUnit XML output (fallback).
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
binary: Path to pytest binary.
|
|
225
|
+
context: Scan context with paths and configuration.
|
|
226
|
+
|
|
227
|
+
Returns:
|
|
228
|
+
TestResult with test statistics and issues.
|
|
229
|
+
"""
|
|
230
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
231
|
+
report_file = Path(tmpdir) / "junit.xml"
|
|
232
|
+
|
|
233
|
+
cmd = [
|
|
234
|
+
str(binary),
|
|
235
|
+
"--tb=short",
|
|
236
|
+
"-v",
|
|
237
|
+
f"--junit-xml={report_file}",
|
|
238
|
+
]
|
|
239
|
+
|
|
240
|
+
# Add paths to test
|
|
241
|
+
paths = [str(p) for p in context.paths] if context.paths else ["."]
|
|
242
|
+
cmd.extend(paths)
|
|
243
|
+
|
|
244
|
+
LOGGER.debug(f"Running: {' '.join(cmd)}")
|
|
245
|
+
|
|
246
|
+
try:
|
|
247
|
+
run_with_streaming(
|
|
248
|
+
cmd=cmd,
|
|
249
|
+
cwd=context.project_root,
|
|
250
|
+
tool_name="pytest",
|
|
251
|
+
stream_handler=context.stream_handler,
|
|
252
|
+
timeout=600,
|
|
253
|
+
)
|
|
254
|
+
except subprocess.TimeoutExpired:
|
|
255
|
+
LOGGER.warning("pytest timed out after 600 seconds")
|
|
256
|
+
return TestResult()
|
|
257
|
+
except Exception as e:
|
|
258
|
+
LOGGER.error(f"Failed to run pytest: {e}")
|
|
259
|
+
return TestResult()
|
|
260
|
+
|
|
261
|
+
# Parse JUnit XML report
|
|
262
|
+
if report_file.exists():
|
|
263
|
+
return self._parse_junit_xml(report_file, context.project_root)
|
|
264
|
+
else:
|
|
265
|
+
LOGGER.warning("JUnit XML report file not generated")
|
|
266
|
+
return TestResult()
|
|
267
|
+
|
|
268
|
+
def _parse_json_report(
|
|
269
|
+
self,
|
|
270
|
+
report_file: Path,
|
|
271
|
+
project_root: Path,
|
|
272
|
+
) -> TestResult:
|
|
273
|
+
"""Parse pytest JSON report.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
report_file: Path to JSON report file.
|
|
277
|
+
project_root: Project root directory.
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
TestResult with parsed data.
|
|
281
|
+
"""
|
|
282
|
+
try:
|
|
283
|
+
with open(report_file) as f:
|
|
284
|
+
report = json.load(f)
|
|
285
|
+
except Exception as e:
|
|
286
|
+
LOGGER.error(f"Failed to parse JSON report: {e}")
|
|
287
|
+
return TestResult()
|
|
288
|
+
|
|
289
|
+
summary = report.get("summary", {})
|
|
290
|
+
tests = report.get("tests", [])
|
|
291
|
+
duration = report.get("duration", 0)
|
|
292
|
+
|
|
293
|
+
result = TestResult(
|
|
294
|
+
passed=summary.get("passed", 0),
|
|
295
|
+
failed=summary.get("failed", 0),
|
|
296
|
+
skipped=summary.get("skipped", 0) + summary.get("xfailed", 0),
|
|
297
|
+
errors=summary.get("error", 0),
|
|
298
|
+
duration_ms=int(duration * 1000),
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
# Convert failures to issues
|
|
302
|
+
for test in tests:
|
|
303
|
+
outcome = test.get("outcome", "")
|
|
304
|
+
if outcome in ("failed", "error"):
|
|
305
|
+
issue = self._test_to_issue(test, project_root, outcome)
|
|
306
|
+
if issue:
|
|
307
|
+
result.issues.append(issue)
|
|
308
|
+
|
|
309
|
+
LOGGER.info(
|
|
310
|
+
f"pytest: {result.passed} passed, {result.failed} failed, "
|
|
311
|
+
f"{result.skipped} skipped, {result.errors} errors"
|
|
312
|
+
)
|
|
313
|
+
return result
|
|
314
|
+
|
|
315
|
+
def _test_to_issue(
|
|
316
|
+
self,
|
|
317
|
+
test: Dict[str, Any],
|
|
318
|
+
project_root: Path,
|
|
319
|
+
outcome: str,
|
|
320
|
+
) -> Optional[UnifiedIssue]:
|
|
321
|
+
"""Convert pytest test failure to UnifiedIssue.
|
|
322
|
+
|
|
323
|
+
Args:
|
|
324
|
+
test: Test dict from JSON report.
|
|
325
|
+
project_root: Project root directory.
|
|
326
|
+
outcome: Test outcome (failed or error).
|
|
327
|
+
|
|
328
|
+
Returns:
|
|
329
|
+
UnifiedIssue or None.
|
|
330
|
+
"""
|
|
331
|
+
try:
|
|
332
|
+
nodeid = test.get("nodeid", "")
|
|
333
|
+
call = test.get("call", {})
|
|
334
|
+
longrepr = call.get("longrepr", "")
|
|
335
|
+
duration = call.get("duration", 0)
|
|
336
|
+
|
|
337
|
+
# Parse nodeid for file and test name
|
|
338
|
+
# Format: path/to/test_file.py::TestClass::test_method
|
|
339
|
+
# or: path/to/test_file.py::test_function
|
|
340
|
+
file_path = None
|
|
341
|
+
test_name = nodeid
|
|
342
|
+
line_number = None
|
|
343
|
+
|
|
344
|
+
if "::" in nodeid:
|
|
345
|
+
parts = nodeid.split("::")
|
|
346
|
+
file_part = parts[0]
|
|
347
|
+
test_name = "::".join(parts[1:])
|
|
348
|
+
file_path = project_root / file_part
|
|
349
|
+
|
|
350
|
+
# Extract line number from lineno if available
|
|
351
|
+
lineno = test.get("lineno")
|
|
352
|
+
if lineno:
|
|
353
|
+
line_number = lineno
|
|
354
|
+
|
|
355
|
+
# Get crash info for more details
|
|
356
|
+
crash = call.get("crash", {})
|
|
357
|
+
if not line_number and crash:
|
|
358
|
+
line_number = crash.get("lineno")
|
|
359
|
+
|
|
360
|
+
# Build message from longrepr
|
|
361
|
+
message = self._extract_assertion_message(longrepr)
|
|
362
|
+
|
|
363
|
+
# Determine severity
|
|
364
|
+
severity = Severity.HIGH if outcome == "failed" else Severity.MEDIUM
|
|
365
|
+
|
|
366
|
+
# Generate deterministic ID
|
|
367
|
+
issue_id = self._generate_issue_id(nodeid, message)
|
|
368
|
+
|
|
369
|
+
# Build title
|
|
370
|
+
title = f"{test_name} {outcome}: {message}" if message else f"{test_name} {outcome}"
|
|
371
|
+
|
|
372
|
+
return UnifiedIssue(
|
|
373
|
+
id=issue_id,
|
|
374
|
+
domain=ToolDomain.TESTING,
|
|
375
|
+
source_tool="pytest",
|
|
376
|
+
severity=severity,
|
|
377
|
+
rule_id=outcome,
|
|
378
|
+
title=title,
|
|
379
|
+
description=longrepr or f"Test {outcome}",
|
|
380
|
+
file_path=file_path,
|
|
381
|
+
line_start=line_number,
|
|
382
|
+
line_end=line_number,
|
|
383
|
+
fixable=False,
|
|
384
|
+
metadata={
|
|
385
|
+
"nodeid": nodeid,
|
|
386
|
+
"test_name": test_name,
|
|
387
|
+
"outcome": outcome,
|
|
388
|
+
"duration_ms": int(duration * 1000),
|
|
389
|
+
"assertion": message,
|
|
390
|
+
"traceback": longrepr,
|
|
391
|
+
},
|
|
392
|
+
)
|
|
393
|
+
except Exception as e:
|
|
394
|
+
LOGGER.warning(f"Failed to parse pytest test failure: {e}")
|
|
395
|
+
return None
|
|
396
|
+
|
|
397
|
+
def _parse_junit_xml(
|
|
398
|
+
self,
|
|
399
|
+
report_file: Path,
|
|
400
|
+
project_root: Path,
|
|
401
|
+
) -> TestResult:
|
|
402
|
+
"""Parse pytest JUnit XML report.
|
|
403
|
+
|
|
404
|
+
Args:
|
|
405
|
+
report_file: Path to JUnit XML file.
|
|
406
|
+
project_root: Project root directory.
|
|
407
|
+
|
|
408
|
+
Returns:
|
|
409
|
+
TestResult with parsed data.
|
|
410
|
+
"""
|
|
411
|
+
try:
|
|
412
|
+
tree = ElementTree.parse(report_file)
|
|
413
|
+
root = tree.getroot()
|
|
414
|
+
except Exception as e:
|
|
415
|
+
LOGGER.error(f"Failed to parse JUnit XML report: {e}")
|
|
416
|
+
return TestResult()
|
|
417
|
+
|
|
418
|
+
# Get testsuite element (may be root or child)
|
|
419
|
+
testsuite = root if root.tag == "testsuite" else root.find("testsuite")
|
|
420
|
+
if testsuite is None:
|
|
421
|
+
testsuite = root
|
|
422
|
+
|
|
423
|
+
# Parse summary from attributes
|
|
424
|
+
tests_total = int(testsuite.get("tests", 0))
|
|
425
|
+
failures = int(testsuite.get("failures", 0))
|
|
426
|
+
errors = int(testsuite.get("errors", 0))
|
|
427
|
+
skipped = int(testsuite.get("skipped", 0))
|
|
428
|
+
time_str = testsuite.get("time", "0")
|
|
429
|
+
duration_ms = int(float(time_str) * 1000)
|
|
430
|
+
|
|
431
|
+
result = TestResult(
|
|
432
|
+
passed=tests_total - failures - errors - skipped,
|
|
433
|
+
failed=failures,
|
|
434
|
+
skipped=skipped,
|
|
435
|
+
errors=errors,
|
|
436
|
+
duration_ms=duration_ms,
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
# Parse individual test cases for failures
|
|
440
|
+
for testcase in testsuite.iter("testcase"):
|
|
441
|
+
failure = testcase.find("failure")
|
|
442
|
+
error = testcase.find("error")
|
|
443
|
+
|
|
444
|
+
if failure is not None:
|
|
445
|
+
issue = self._xml_testcase_to_issue(
|
|
446
|
+
testcase, failure, project_root, "failed"
|
|
447
|
+
)
|
|
448
|
+
if issue:
|
|
449
|
+
result.issues.append(issue)
|
|
450
|
+
elif error is not None:
|
|
451
|
+
issue = self._xml_testcase_to_issue(
|
|
452
|
+
testcase, error, project_root, "error"
|
|
453
|
+
)
|
|
454
|
+
if issue:
|
|
455
|
+
result.issues.append(issue)
|
|
456
|
+
|
|
457
|
+
LOGGER.info(
|
|
458
|
+
f"pytest: {result.passed} passed, {result.failed} failed, "
|
|
459
|
+
f"{result.skipped} skipped, {result.errors} errors"
|
|
460
|
+
)
|
|
461
|
+
return result
|
|
462
|
+
|
|
463
|
+
def _xml_testcase_to_issue(
|
|
464
|
+
self,
|
|
465
|
+
testcase: ElementTree.Element,
|
|
466
|
+
failure_elem: ElementTree.Element,
|
|
467
|
+
project_root: Path,
|
|
468
|
+
outcome: str,
|
|
469
|
+
) -> Optional[UnifiedIssue]:
|
|
470
|
+
"""Convert JUnit XML testcase failure to UnifiedIssue.
|
|
471
|
+
|
|
472
|
+
Args:
|
|
473
|
+
testcase: testcase XML element.
|
|
474
|
+
failure_elem: failure or error XML element.
|
|
475
|
+
project_root: Project root directory.
|
|
476
|
+
outcome: Test outcome (failed or error).
|
|
477
|
+
|
|
478
|
+
Returns:
|
|
479
|
+
UnifiedIssue or None.
|
|
480
|
+
"""
|
|
481
|
+
try:
|
|
482
|
+
classname = testcase.get("classname", "")
|
|
483
|
+
name = testcase.get("name", "")
|
|
484
|
+
file_attr = testcase.get("file", "")
|
|
485
|
+
line_attr = testcase.get("line")
|
|
486
|
+
|
|
487
|
+
# Build file path
|
|
488
|
+
file_path = None
|
|
489
|
+
if file_attr:
|
|
490
|
+
file_path = project_root / file_attr
|
|
491
|
+
elif classname:
|
|
492
|
+
# Try to convert classname to file path
|
|
493
|
+
# e.g., tests.test_example -> tests/test_example.py
|
|
494
|
+
file_guess = classname.replace(".", "/") + ".py"
|
|
495
|
+
file_path = project_root / file_guess
|
|
496
|
+
if not file_path.exists():
|
|
497
|
+
file_path = None
|
|
498
|
+
|
|
499
|
+
# Get line number
|
|
500
|
+
line_number = int(line_attr) if line_attr else None
|
|
501
|
+
|
|
502
|
+
# Get failure message and content
|
|
503
|
+
message = failure_elem.get("message", "")
|
|
504
|
+
content = failure_elem.text or ""
|
|
505
|
+
|
|
506
|
+
# Extract assertion from content
|
|
507
|
+
assertion = self._extract_assertion_message(content) or message
|
|
508
|
+
|
|
509
|
+
# Build nodeid for consistency
|
|
510
|
+
nodeid = f"{classname}::{name}" if classname else name
|
|
511
|
+
|
|
512
|
+
# Determine severity
|
|
513
|
+
severity = Severity.HIGH if outcome == "failed" else Severity.MEDIUM
|
|
514
|
+
|
|
515
|
+
# Generate deterministic ID
|
|
516
|
+
issue_id = self._generate_issue_id(nodeid, assertion)
|
|
517
|
+
|
|
518
|
+
# Build title
|
|
519
|
+
title = f"{name} {outcome}: {assertion}" if assertion else f"{name} {outcome}"
|
|
520
|
+
|
|
521
|
+
return UnifiedIssue(
|
|
522
|
+
id=issue_id,
|
|
523
|
+
domain=ToolDomain.TESTING,
|
|
524
|
+
source_tool="pytest",
|
|
525
|
+
severity=severity,
|
|
526
|
+
rule_id=outcome,
|
|
527
|
+
title=title,
|
|
528
|
+
description=content or message or f"Test {outcome}",
|
|
529
|
+
file_path=file_path,
|
|
530
|
+
line_start=line_number,
|
|
531
|
+
line_end=line_number,
|
|
532
|
+
fixable=False,
|
|
533
|
+
metadata={
|
|
534
|
+
"nodeid": nodeid,
|
|
535
|
+
"test_name": name,
|
|
536
|
+
"classname": classname,
|
|
537
|
+
"outcome": outcome,
|
|
538
|
+
"assertion": assertion,
|
|
539
|
+
"traceback": content,
|
|
540
|
+
},
|
|
541
|
+
)
|
|
542
|
+
except Exception as e:
|
|
543
|
+
LOGGER.warning(f"Failed to parse JUnit XML testcase: {e}")
|
|
544
|
+
return None
|
|
545
|
+
|
|
546
|
+
def _extract_assertion_message(self, longrepr: str) -> str:
|
|
547
|
+
"""Extract assertion message from pytest output.
|
|
548
|
+
|
|
549
|
+
Args:
|
|
550
|
+
longrepr: Long representation of the test failure.
|
|
551
|
+
|
|
552
|
+
Returns:
|
|
553
|
+
Extracted assertion message or empty string.
|
|
554
|
+
"""
|
|
555
|
+
if not longrepr:
|
|
556
|
+
return ""
|
|
557
|
+
|
|
558
|
+
# Look for common assertion patterns
|
|
559
|
+
lines = longrepr.strip().split("\n")
|
|
560
|
+
|
|
561
|
+
# Try to find AssertionError or assert line
|
|
562
|
+
for line in reversed(lines):
|
|
563
|
+
stripped = line.strip()
|
|
564
|
+
# Handle E prefix from pytest output (e.g., "E assert 1 == 2")
|
|
565
|
+
if stripped.startswith("E "):
|
|
566
|
+
content = stripped[2:].strip()
|
|
567
|
+
if content.startswith("assert "):
|
|
568
|
+
return content
|
|
569
|
+
if "AssertionError" in content:
|
|
570
|
+
return content.replace("AssertionError:", "").strip()
|
|
571
|
+
if stripped.startswith("AssertionError:"):
|
|
572
|
+
return stripped.replace("AssertionError:", "").strip()
|
|
573
|
+
if stripped.startswith("assert "):
|
|
574
|
+
return stripped
|
|
575
|
+
if "AssertionError" in stripped:
|
|
576
|
+
return stripped
|
|
577
|
+
|
|
578
|
+
# Return first non-empty line as fallback
|
|
579
|
+
for line in lines:
|
|
580
|
+
stripped = line.strip()
|
|
581
|
+
if stripped and not stripped.startswith(">") and not stripped.startswith("E "):
|
|
582
|
+
return stripped[:100] # Truncate long messages
|
|
583
|
+
|
|
584
|
+
return ""
|
|
585
|
+
|
|
586
|
+
def _generate_issue_id(self, nodeid: str, message: str) -> str:
|
|
587
|
+
"""Generate deterministic issue ID.
|
|
588
|
+
|
|
589
|
+
Args:
|
|
590
|
+
nodeid: Test node ID (path::classname::testname).
|
|
591
|
+
message: Error message.
|
|
592
|
+
|
|
593
|
+
Returns:
|
|
594
|
+
Unique issue ID.
|
|
595
|
+
"""
|
|
596
|
+
content = f"{nodeid}:{message}"
|
|
597
|
+
hash_val = hashlib.sha256(content.encode()).hexdigest()[:12]
|
|
598
|
+
return f"pytest-{hash_val}"
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Type checker plugins for lucidscan.
|
|
2
|
+
|
|
3
|
+
This module provides type checker integrations for the quality pipeline.
|
|
4
|
+
Type checkers are discovered via the lucidscan.type_checkers entry point group.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from lucidscan.plugins.type_checkers.base import TypeCheckerPlugin, TypeCheckResult
|
|
8
|
+
from lucidscan.plugins.discovery import (
|
|
9
|
+
discover_plugins,
|
|
10
|
+
TYPE_CHECKER_ENTRY_POINT_GROUP,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def discover_type_checker_plugins():
|
|
15
|
+
"""Discover all installed type checker plugins.
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
Dictionary mapping plugin names to plugin classes.
|
|
19
|
+
"""
|
|
20
|
+
return discover_plugins(TYPE_CHECKER_ENTRY_POINT_GROUP, TypeCheckerPlugin)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
__all__ = [
|
|
24
|
+
"TypeCheckerPlugin",
|
|
25
|
+
"TypeCheckResult",
|
|
26
|
+
"discover_type_checker_plugins",
|
|
27
|
+
]
|