elspais 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,163 @@
1
+ """
2
+ elspais.testing.mapper - Test-to-requirement mapping orchestration.
3
+
4
+ Coordinates test file scanning and result parsing to produce
5
+ per-requirement test coverage data.
6
+ """
7
+
8
+ from dataclasses import dataclass, field
9
+ from pathlib import Path
10
+ from typing import Any, Dict, List, Optional, Set
11
+
12
+ from elspais.testing.config import TestingConfig
13
+ from elspais.testing.result_parser import ResultParser, TestResult, TestStatus
14
+ from elspais.testing.scanner import TestScanner
15
+
16
+
17
+ @dataclass
18
+ class RequirementTestData:
19
+ """
20
+ Test coverage data for a single requirement.
21
+
22
+ Attributes:
23
+ test_count: Total number of tests referencing this requirement
24
+ test_passed: Number of passed tests
25
+ test_failed: Number of failed tests
26
+ test_skipped: Number of skipped tests
27
+ test_result_files: Unique result file paths for this requirement
28
+ """
29
+
30
+ test_count: int = 0
31
+ test_passed: int = 0
32
+ test_failed: int = 0
33
+ test_skipped: int = 0
34
+ test_result_files: List[str] = field(default_factory=list)
35
+
36
+
37
+ @dataclass
38
+ class TestMappingResult:
39
+ """
40
+ Complete test mapping for all requirements.
41
+
42
+ Attributes:
43
+ requirement_data: Mapping of requirement IDs to test data
44
+ scan_summary: Summary of scanning operation
45
+ errors: List of errors encountered
46
+ """
47
+
48
+ requirement_data: Dict[str, RequirementTestData] = field(default_factory=dict)
49
+ scan_summary: Dict[str, Any] = field(default_factory=dict)
50
+ errors: List[str] = field(default_factory=list)
51
+
52
+
53
+ class TestMapper:
54
+ """
55
+ Orchestrates test scanning and result mapping.
56
+
57
+ Coordinates the TestScanner and ResultParser to produce
58
+ per-requirement test coverage data for JSON output.
59
+ """
60
+
61
+ def __init__(self, config: TestingConfig) -> None:
62
+ """
63
+ Initialize the mapper.
64
+
65
+ Args:
66
+ config: Testing configuration
67
+ """
68
+ self.config = config
69
+ self._scanner = TestScanner(config.reference_patterns or None)
70
+ self._parser = ResultParser(config.reference_patterns or None)
71
+
72
+ def map_tests(
73
+ self,
74
+ requirement_ids: Set[str],
75
+ base_path: Path,
76
+ ignore: Optional[List[str]] = None,
77
+ ) -> TestMappingResult:
78
+ """
79
+ Map tests to requirements and gather coverage data.
80
+
81
+ Args:
82
+ requirement_ids: Set of known requirement IDs
83
+ base_path: Project root path
84
+ ignore: Directory names to ignore
85
+
86
+ Returns:
87
+ TestMappingResult with per-requirement test data
88
+ """
89
+ result = TestMappingResult()
90
+ ignore_dirs = ignore or []
91
+
92
+ # Step 1: Scan test files for requirement references
93
+ scan_result = self._scanner.scan_directories(
94
+ base_path=base_path,
95
+ test_dirs=self.config.test_dirs,
96
+ file_patterns=self.config.patterns,
97
+ ignore=ignore_dirs,
98
+ )
99
+
100
+ result.scan_summary["files_scanned"] = scan_result.files_scanned
101
+ result.scan_summary["test_dirs"] = self.config.test_dirs
102
+ result.scan_summary["patterns"] = self.config.patterns
103
+ result.errors.extend(scan_result.errors)
104
+
105
+ # Step 2: Parse test result files
106
+ parse_result = self._parser.parse_result_files(
107
+ base_path=base_path,
108
+ result_file_patterns=self.config.result_files,
109
+ )
110
+
111
+ result.scan_summary["result_files_parsed"] = len(parse_result.files_parsed)
112
+ result.errors.extend(parse_result.errors)
113
+
114
+ # Step 3: Build requirement ID to test results mapping
115
+ req_to_results: Dict[str, List[TestResult]] = {}
116
+ for test_result in parse_result.results:
117
+ for req_id in test_result.requirement_ids:
118
+ if req_id not in req_to_results:
119
+ req_to_results[req_id] = []
120
+ req_to_results[req_id].append(test_result)
121
+
122
+ # Step 4: Calculate per-requirement test data
123
+ # Combine scan references with result data
124
+ all_req_ids = set(scan_result.references.keys()) | set(req_to_results.keys())
125
+
126
+ for req_id in all_req_ids:
127
+ test_data = RequirementTestData()
128
+
129
+ # Count from scan references (test files found)
130
+ if req_id in scan_result.references:
131
+ test_data.test_count = len(scan_result.references[req_id])
132
+
133
+ # Count from results (if available)
134
+ if req_id in req_to_results:
135
+ results = req_to_results[req_id]
136
+ # If we have results, use result count (more accurate)
137
+ if not scan_result.references.get(req_id):
138
+ test_data.test_count = len(results)
139
+
140
+ result_files: Set[str] = set()
141
+ for tr in results:
142
+ if tr.status == TestStatus.PASSED:
143
+ test_data.test_passed += 1
144
+ elif tr.status == TestStatus.FAILED:
145
+ test_data.test_failed += 1
146
+ elif tr.status == TestStatus.ERROR:
147
+ test_data.test_failed += 1
148
+ elif tr.status == TestStatus.SKIPPED:
149
+ test_data.test_skipped += 1
150
+
151
+ if tr.result_file:
152
+ result_files.add(str(tr.result_file))
153
+
154
+ test_data.test_result_files = sorted(result_files)
155
+
156
+ result.requirement_data[req_id] = test_data
157
+
158
+ # Ensure all known requirements have entries (even if zero)
159
+ for req_id in requirement_ids:
160
+ if req_id not in result.requirement_data:
161
+ result.requirement_data[req_id] = RequirementTestData()
162
+
163
+ return result
@@ -0,0 +1,289 @@
1
+ """
2
+ elspais.testing.result_parser - Test result file parser.
3
+
4
+ Parses JUnit XML and pytest JSON result files to extract test outcomes.
5
+ """
6
+
7
+ import json
8
+ import re
9
+ import xml.etree.ElementTree as ET
10
+ from dataclasses import dataclass, field
11
+ from enum import Enum
12
+ from pathlib import Path
13
+ from typing import Any, Dict, List, Optional, Set
14
+
15
+
16
+ class TestStatus(Enum):
17
+ """Test execution status."""
18
+
19
+ PASSED = "passed"
20
+ FAILED = "failed"
21
+ ERROR = "error"
22
+ SKIPPED = "skipped"
23
+ UNKNOWN = "unknown"
24
+
25
+
26
+ @dataclass
27
+ class TestResult:
28
+ """
29
+ Result of a single test case.
30
+
31
+ Attributes:
32
+ test_name: Name of the test function/method
33
+ classname: Test class or module name
34
+ status: Test execution status
35
+ requirement_ids: Requirement IDs this test covers
36
+ result_file: Path to the result file
37
+ duration: Test duration in seconds (if available)
38
+ message: Failure/error message (if applicable)
39
+ """
40
+
41
+ test_name: str
42
+ classname: str
43
+ status: TestStatus
44
+ requirement_ids: List[str] = field(default_factory=list)
45
+ result_file: Optional[Path] = None
46
+ duration: Optional[float] = None
47
+ message: Optional[str] = None
48
+
49
+
50
+ @dataclass
51
+ class ResultParseResult:
52
+ """
53
+ Result of parsing test result files.
54
+
55
+ Attributes:
56
+ results: List of all parsed test results
57
+ files_parsed: List of successfully parsed files
58
+ errors: List of parse errors
59
+ """
60
+
61
+ results: List[TestResult] = field(default_factory=list)
62
+ files_parsed: List[Path] = field(default_factory=list)
63
+ errors: List[str] = field(default_factory=list)
64
+
65
+
66
+ class ResultParser:
67
+ """
68
+ Parses test result files (JUnit XML, pytest JSON).
69
+
70
+ Extracts test names, outcomes, and requirement references from
71
+ test result files to correlate with test file scanning.
72
+ """
73
+
74
+ # Patterns for extracting requirement IDs from test names
75
+ DEFAULT_REF_PATTERNS = [
76
+ r"(?:REQ[-_])?([pod]\d{5})(?:[-_]([A-Z]))?",
77
+ ]
78
+
79
+ def __init__(self, reference_patterns: Optional[List[str]] = None) -> None:
80
+ """
81
+ Initialize the parser.
82
+
83
+ Args:
84
+ reference_patterns: Regex patterns to extract requirement IDs from test names
85
+ """
86
+ patterns = reference_patterns or self.DEFAULT_REF_PATTERNS
87
+ self._patterns = [re.compile(p) for p in patterns]
88
+
89
+ def parse_result_files(
90
+ self,
91
+ base_path: Path,
92
+ result_file_patterns: List[str],
93
+ ) -> ResultParseResult:
94
+ """
95
+ Parse all matching result files.
96
+
97
+ Args:
98
+ base_path: Project root path
99
+ result_file_patterns: Glob patterns for result files
100
+
101
+ Returns:
102
+ ResultParseResult with all parsed test results
103
+ """
104
+ result = ResultParseResult()
105
+ seen_files: Set[Path] = set()
106
+
107
+ for pattern in result_file_patterns:
108
+ for result_file in base_path.glob(pattern):
109
+ if result_file in seen_files:
110
+ continue
111
+ if not result_file.is_file():
112
+ continue
113
+ seen_files.add(result_file)
114
+
115
+ try:
116
+ if result_file.suffix == ".xml":
117
+ file_results = self._parse_junit_xml(result_file)
118
+ elif result_file.suffix == ".json":
119
+ file_results = self._parse_pytest_json(result_file)
120
+ else:
121
+ continue
122
+
123
+ result.results.extend(file_results)
124
+ result.files_parsed.append(result_file)
125
+ except Exception as e:
126
+ result.errors.append(f"{result_file}: {e}")
127
+
128
+ return result
129
+
130
+ def _parse_junit_xml(self, file_path: Path) -> List[TestResult]:
131
+ """
132
+ Parse a JUnit XML result file.
133
+
134
+ Args:
135
+ file_path: Path to JUnit XML file
136
+
137
+ Returns:
138
+ List of TestResult objects
139
+ """
140
+ results: List[TestResult] = []
141
+
142
+ try:
143
+ tree = ET.parse(file_path)
144
+ root = tree.getroot()
145
+ except ET.ParseError as e:
146
+ raise ValueError(f"Invalid XML: {e}")
147
+
148
+ # Handle both <testsuites> and <testsuite> as root
149
+ if root.tag == "testsuites":
150
+ testsuites = root.findall("testsuite")
151
+ elif root.tag == "testsuite":
152
+ testsuites = [root]
153
+ else:
154
+ return results
155
+
156
+ for testsuite in testsuites:
157
+ for testcase in testsuite.findall("testcase"):
158
+ test_name = testcase.get("name", "")
159
+ classname = testcase.get("classname", "")
160
+ time_str = testcase.get("time")
161
+ duration = float(time_str) if time_str else None
162
+
163
+ # Determine status
164
+ failure = testcase.find("failure")
165
+ error = testcase.find("error")
166
+ skipped = testcase.find("skipped")
167
+
168
+ if failure is not None:
169
+ status = TestStatus.FAILED
170
+ message = failure.get("message") or failure.text
171
+ elif error is not None:
172
+ status = TestStatus.ERROR
173
+ message = error.get("message") or error.text
174
+ elif skipped is not None:
175
+ status = TestStatus.SKIPPED
176
+ message = skipped.get("message") or skipped.text
177
+ else:
178
+ status = TestStatus.PASSED
179
+ message = None
180
+
181
+ # Extract requirement IDs from test name
182
+ req_ids = self._extract_requirement_ids(test_name, classname)
183
+
184
+ results.append(TestResult(
185
+ test_name=test_name,
186
+ classname=classname,
187
+ status=status,
188
+ requirement_ids=req_ids,
189
+ result_file=file_path,
190
+ duration=duration,
191
+ message=message,
192
+ ))
193
+
194
+ return results
195
+
196
+ def _parse_pytest_json(self, file_path: Path) -> List[TestResult]:
197
+ """
198
+ Parse a pytest JSON result file.
199
+
200
+ Args:
201
+ file_path: Path to pytest JSON file
202
+
203
+ Returns:
204
+ List of TestResult objects
205
+ """
206
+ results: List[TestResult] = []
207
+
208
+ try:
209
+ with open(file_path, encoding="utf-8") as f:
210
+ data = json.load(f)
211
+ except json.JSONDecodeError as e:
212
+ raise ValueError(f"Invalid JSON: {e}")
213
+
214
+ # Handle pytest-json-report format
215
+ tests = data.get("tests", [])
216
+ for test in tests:
217
+ nodeid = test.get("nodeid", "")
218
+ outcome = test.get("outcome", "unknown")
219
+
220
+ # Parse nodeid: "tests/test_foo.py::TestClass::test_method"
221
+ parts = nodeid.split("::")
222
+ test_name = parts[-1] if parts else nodeid
223
+ classname = parts[-2] if len(parts) > 1 else ""
224
+
225
+ # Map outcome to status
226
+ status_map = {
227
+ "passed": TestStatus.PASSED,
228
+ "failed": TestStatus.FAILED,
229
+ "error": TestStatus.ERROR,
230
+ "skipped": TestStatus.SKIPPED,
231
+ }
232
+ status = status_map.get(outcome, TestStatus.UNKNOWN)
233
+
234
+ # Get duration and message
235
+ duration = test.get("duration")
236
+ message = None
237
+ call_data = test.get("call", {})
238
+ if call_data and status in (TestStatus.FAILED, TestStatus.ERROR):
239
+ message = call_data.get("longrepr")
240
+
241
+ # Extract requirement IDs
242
+ req_ids = self._extract_requirement_ids(test_name, classname)
243
+
244
+ results.append(TestResult(
245
+ test_name=test_name,
246
+ classname=classname,
247
+ status=status,
248
+ requirement_ids=req_ids,
249
+ result_file=file_path,
250
+ duration=duration,
251
+ message=message,
252
+ ))
253
+
254
+ return results
255
+
256
+ def _extract_requirement_ids(self, test_name: str, classname: str) -> List[str]:
257
+ """
258
+ Extract requirement IDs from test name and classname.
259
+
260
+ Args:
261
+ test_name: Test function/method name
262
+ classname: Test class or module name
263
+
264
+ Returns:
265
+ List of normalized requirement IDs (e.g., ["REQ-p00001"])
266
+ """
267
+ req_ids: List[str] = []
268
+ search_text = f"{classname}::{test_name}"
269
+
270
+ for pattern in self._patterns:
271
+ for match in pattern.finditer(search_text):
272
+ groups = match.groups()
273
+ if not groups or not groups[0]:
274
+ continue
275
+ type_id = groups[0]
276
+ # Normalize to full requirement ID (ignore assertion label for ID)
277
+ req_id = f"REQ-{type_id}"
278
+ if req_id not in req_ids:
279
+ req_ids.append(req_id)
280
+
281
+ return req_ids
282
+
283
+ def parse_junit_xml(self, file_path: Path) -> List[TestResult]:
284
+ """Public method to parse a JUnit XML file."""
285
+ return self._parse_junit_xml(file_path)
286
+
287
+ def parse_pytest_json(self, file_path: Path) -> List[TestResult]:
288
+ """Public method to parse a pytest JSON file."""
289
+ return self._parse_pytest_json(file_path)
@@ -0,0 +1,206 @@
1
+ """
2
+ elspais.testing.scanner - Test file scanner for requirement references.
3
+
4
+ Scans test files for requirement ID references (e.g., REQ-d00001, REQ-p00001-A)
5
+ in function names, docstrings, and comments.
6
+ """
7
+
8
+ import re
9
+ from dataclasses import dataclass, field
10
+ from pathlib import Path
11
+ from typing import Dict, List, Optional, Set
12
+
13
+
14
+ @dataclass
15
+ class TestReference:
16
+ """
17
+ A reference from a test to a requirement.
18
+
19
+ Attributes:
20
+ requirement_id: Normalized requirement ID (e.g., "REQ-p00001")
21
+ assertion_label: Assertion label if present (e.g., "A" for REQ-p00001-A)
22
+ test_file: Path to the test file
23
+ test_name: Name of the test function/method if extractable
24
+ line_number: Line number where reference was found
25
+ """
26
+
27
+ requirement_id: str
28
+ assertion_label: Optional[str]
29
+ test_file: Path
30
+ test_name: Optional[str] = None
31
+ line_number: int = 0
32
+
33
+
34
+ @dataclass
35
+ class TestScanResult:
36
+ """
37
+ Result of scanning test files for requirement references.
38
+
39
+ Attributes:
40
+ references: Mapping of requirement IDs to their test references
41
+ files_scanned: Number of test files scanned
42
+ errors: List of errors encountered during scanning
43
+ """
44
+
45
+ references: Dict[str, List[TestReference]] = field(default_factory=dict)
46
+ files_scanned: int = 0
47
+ errors: List[str] = field(default_factory=list)
48
+
49
+ def add_reference(self, ref: TestReference) -> None:
50
+ """Add a test reference, rolling up assertion-level refs to parent."""
51
+ # Roll up assertion-level references to the parent requirement
52
+ req_id = ref.requirement_id
53
+ if req_id not in self.references:
54
+ self.references[req_id] = []
55
+ self.references[req_id].append(ref)
56
+
57
+
58
+ class TestScanner:
59
+ """
60
+ Scans test files for requirement ID references.
61
+
62
+ Uses configurable patterns to find requirement references in:
63
+ - Test function/method names
64
+ - Docstrings
65
+ - Comments (IMPLEMENTS: patterns)
66
+ """
67
+
68
+ # Default patterns if none configured
69
+ DEFAULT_PATTERNS = [
70
+ # Test function names: test_REQ_p00001_something or test_p00001_something
71
+ r"test_.*(?:REQ[-_])?([pod]\d{5})(?:[-_]([A-Z]))?",
72
+ # IMPLEMENTS comments: IMPLEMENTS: REQ-p00001 or IMPLEMENTS: REQ-p00001-A
73
+ r"(?:IMPLEMENTS|Implements|implements)[:\s]+(?:REQ[-_])?([pod]\d{5})(?:-([A-Z]))?",
74
+ # Direct references: REQ-p00001 or REQ-p00001-A
75
+ r"\bREQ[-_]([pod]\d{5})(?:-([A-Z]))?\b",
76
+ ]
77
+
78
+ def __init__(self, reference_patterns: Optional[List[str]] = None) -> None:
79
+ """
80
+ Initialize the scanner with reference patterns.
81
+
82
+ Args:
83
+ reference_patterns: Regex patterns for extracting requirement IDs.
84
+ Each pattern should have groups for (type+id) and
85
+ optionally (assertion_label).
86
+ """
87
+ patterns = reference_patterns or self.DEFAULT_PATTERNS
88
+ self._patterns = [re.compile(p) for p in patterns]
89
+
90
+ def scan_directories(
91
+ self,
92
+ base_path: Path,
93
+ test_dirs: List[str],
94
+ file_patterns: List[str],
95
+ ignore: Optional[List[str]] = None,
96
+ ) -> TestScanResult:
97
+ """
98
+ Scan test directories for requirement references.
99
+
100
+ Args:
101
+ base_path: Project root path
102
+ test_dirs: Glob patterns for test directories (e.g., ["apps/**/test"])
103
+ file_patterns: File patterns to match (e.g., ["*_test.py"])
104
+ ignore: Directory names to ignore (e.g., ["node_modules"])
105
+
106
+ Returns:
107
+ TestScanResult with all found references
108
+ """
109
+ result = TestScanResult()
110
+ ignore_set = set(ignore or [])
111
+ seen_files: Set[Path] = set()
112
+
113
+ for dir_pattern in test_dirs:
114
+ # Handle special cases for directory patterns
115
+ if dir_pattern in (".", ""):
116
+ # Current directory
117
+ dirs_to_scan = [base_path]
118
+ else:
119
+ # Resolve the directory pattern
120
+ dirs_to_scan = list(base_path.glob(dir_pattern))
121
+
122
+ for test_dir in dirs_to_scan:
123
+ if not test_dir.is_dir():
124
+ continue
125
+ if any(ig in test_dir.parts for ig in ignore_set):
126
+ continue
127
+
128
+ # Find test files in this directory
129
+ for file_pattern in file_patterns:
130
+ for test_file in test_dir.glob(file_pattern):
131
+ if test_file in seen_files:
132
+ continue
133
+ if not test_file.is_file():
134
+ continue
135
+ seen_files.add(test_file)
136
+
137
+ # Scan the file
138
+ file_refs = self._scan_file(test_file)
139
+ for ref in file_refs:
140
+ result.add_reference(ref)
141
+ result.files_scanned += 1
142
+
143
+ return result
144
+
145
+ def _scan_file(self, file_path: Path) -> List[TestReference]:
146
+ """
147
+ Scan a single test file for requirement references.
148
+
149
+ Args:
150
+ file_path: Path to the test file
151
+
152
+ Returns:
153
+ List of TestReference objects found in the file
154
+ """
155
+ references: List[TestReference] = []
156
+
157
+ try:
158
+ content = file_path.read_text(encoding="utf-8", errors="replace")
159
+ except Exception:
160
+ return references
161
+
162
+ lines = content.split("\n")
163
+ current_test_name: Optional[str] = None
164
+
165
+ for line_num, line in enumerate(lines, start=1):
166
+ # Track current test function name
167
+ test_match = re.match(r"\s*def\s+(test_\w+)", line)
168
+ if test_match:
169
+ current_test_name = test_match.group(1)
170
+
171
+ # Look for requirement references
172
+ for pattern in self._patterns:
173
+ for match in pattern.finditer(line):
174
+ groups = match.groups()
175
+ if not groups or not groups[0]:
176
+ continue
177
+
178
+ # Extract requirement ID parts
179
+ type_id = groups[0] # e.g., "p00001"
180
+ assertion_label = groups[1] if len(groups) > 1 else None
181
+
182
+ # Normalize to full requirement ID
183
+ req_id = f"REQ-{type_id}"
184
+
185
+ ref = TestReference(
186
+ requirement_id=req_id,
187
+ assertion_label=assertion_label,
188
+ test_file=file_path,
189
+ test_name=current_test_name,
190
+ line_number=line_num,
191
+ )
192
+ references.append(ref)
193
+
194
+ return references
195
+
196
+ def scan_file(self, file_path: Path) -> List[TestReference]:
197
+ """
198
+ Public method to scan a single file.
199
+
200
+ Args:
201
+ file_path: Path to the test file
202
+
203
+ Returns:
204
+ List of TestReference objects
205
+ """
206
+ return self._scan_file(file_path)