elspais 0.11.1__py3-none-any.whl → 0.43.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. elspais/__init__.py +2 -11
  2. elspais/{sponsors/__init__.py → associates.py} +102 -58
  3. elspais/cli.py +395 -79
  4. elspais/commands/__init__.py +9 -3
  5. elspais/commands/analyze.py +121 -173
  6. elspais/commands/changed.py +15 -30
  7. elspais/commands/config_cmd.py +13 -16
  8. elspais/commands/edit.py +60 -44
  9. elspais/commands/example_cmd.py +319 -0
  10. elspais/commands/hash_cmd.py +167 -183
  11. elspais/commands/health.py +1177 -0
  12. elspais/commands/index.py +98 -114
  13. elspais/commands/init.py +103 -26
  14. elspais/commands/reformat_cmd.py +41 -444
  15. elspais/commands/rules_cmd.py +7 -3
  16. elspais/commands/trace.py +444 -321
  17. elspais/commands/validate.py +195 -415
  18. elspais/config/__init__.py +799 -5
  19. elspais/{core/content_rules.py → content_rules.py} +20 -3
  20. elspais/docs/cli/assertions.md +67 -0
  21. elspais/docs/cli/commands.md +304 -0
  22. elspais/docs/cli/config.md +262 -0
  23. elspais/docs/cli/format.md +66 -0
  24. elspais/docs/cli/git.md +45 -0
  25. elspais/docs/cli/health.md +190 -0
  26. elspais/docs/cli/hierarchy.md +60 -0
  27. elspais/docs/cli/ignore.md +72 -0
  28. elspais/docs/cli/mcp.md +245 -0
  29. elspais/docs/cli/quickstart.md +58 -0
  30. elspais/docs/cli/traceability.md +89 -0
  31. elspais/docs/cli/validation.md +96 -0
  32. elspais/graph/GraphNode.py +383 -0
  33. elspais/graph/__init__.py +40 -0
  34. elspais/graph/annotators.py +927 -0
  35. elspais/graph/builder.py +1886 -0
  36. elspais/graph/deserializer.py +248 -0
  37. elspais/graph/factory.py +284 -0
  38. elspais/graph/metrics.py +127 -0
  39. elspais/graph/mutations.py +161 -0
  40. elspais/graph/parsers/__init__.py +156 -0
  41. elspais/graph/parsers/code.py +213 -0
  42. elspais/graph/parsers/comments.py +112 -0
  43. elspais/graph/parsers/config_helpers.py +29 -0
  44. elspais/graph/parsers/heredocs.py +225 -0
  45. elspais/graph/parsers/journey.py +131 -0
  46. elspais/graph/parsers/remainder.py +79 -0
  47. elspais/graph/parsers/requirement.py +347 -0
  48. elspais/graph/parsers/results/__init__.py +6 -0
  49. elspais/graph/parsers/results/junit_xml.py +229 -0
  50. elspais/graph/parsers/results/pytest_json.py +313 -0
  51. elspais/graph/parsers/test.py +305 -0
  52. elspais/graph/relations.py +78 -0
  53. elspais/graph/serialize.py +216 -0
  54. elspais/html/__init__.py +8 -0
  55. elspais/html/generator.py +731 -0
  56. elspais/html/templates/trace_view.html.j2 +2151 -0
  57. elspais/mcp/__init__.py +47 -29
  58. elspais/mcp/__main__.py +5 -1
  59. elspais/mcp/file_mutations.py +138 -0
  60. elspais/mcp/server.py +2016 -247
  61. elspais/testing/__init__.py +4 -4
  62. elspais/testing/config.py +3 -0
  63. elspais/testing/mapper.py +1 -1
  64. elspais/testing/result_parser.py +25 -21
  65. elspais/testing/scanner.py +301 -12
  66. elspais/utilities/__init__.py +1 -0
  67. elspais/utilities/docs_loader.py +115 -0
  68. elspais/utilities/git.py +607 -0
  69. elspais/{core → utilities}/hasher.py +8 -22
  70. elspais/utilities/md_renderer.py +189 -0
  71. elspais/{core → utilities}/patterns.py +58 -57
  72. elspais/utilities/reference_config.py +626 -0
  73. elspais/validation/__init__.py +19 -0
  74. elspais/validation/format.py +264 -0
  75. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/METADATA +7 -4
  76. elspais-0.43.5.dist-info/RECORD +80 -0
  77. elspais/config/defaults.py +0 -173
  78. elspais/config/loader.py +0 -494
  79. elspais/core/__init__.py +0 -21
  80. elspais/core/git.py +0 -352
  81. elspais/core/models.py +0 -320
  82. elspais/core/parser.py +0 -640
  83. elspais/core/rules.py +0 -514
  84. elspais/mcp/context.py +0 -171
  85. elspais/mcp/serializers.py +0 -112
  86. elspais/reformat/__init__.py +0 -50
  87. elspais/reformat/detector.py +0 -119
  88. elspais/reformat/hierarchy.py +0 -246
  89. elspais/reformat/line_breaks.py +0 -220
  90. elspais/reformat/prompts.py +0 -123
  91. elspais/reformat/transformer.py +0 -264
  92. elspais/trace_view/__init__.py +0 -54
  93. elspais/trace_view/coverage.py +0 -183
  94. elspais/trace_view/generators/__init__.py +0 -12
  95. elspais/trace_view/generators/base.py +0 -329
  96. elspais/trace_view/generators/csv.py +0 -122
  97. elspais/trace_view/generators/markdown.py +0 -175
  98. elspais/trace_view/html/__init__.py +0 -31
  99. elspais/trace_view/html/generator.py +0 -1006
  100. elspais/trace_view/html/templates/base.html +0 -283
  101. elspais/trace_view/html/templates/components/code_viewer_modal.html +0 -14
  102. elspais/trace_view/html/templates/components/file_picker_modal.html +0 -20
  103. elspais/trace_view/html/templates/components/legend_modal.html +0 -69
  104. elspais/trace_view/html/templates/components/review_panel.html +0 -118
  105. elspais/trace_view/html/templates/partials/review/help/help-panel.json +0 -244
  106. elspais/trace_view/html/templates/partials/review/help/onboarding.json +0 -77
  107. elspais/trace_view/html/templates/partials/review/help/tooltips.json +0 -237
  108. elspais/trace_view/html/templates/partials/review/review-comments.js +0 -928
  109. elspais/trace_view/html/templates/partials/review/review-data.js +0 -961
  110. elspais/trace_view/html/templates/partials/review/review-help.js +0 -679
  111. elspais/trace_view/html/templates/partials/review/review-init.js +0 -177
  112. elspais/trace_view/html/templates/partials/review/review-line-numbers.js +0 -429
  113. elspais/trace_view/html/templates/partials/review/review-packages.js +0 -1029
  114. elspais/trace_view/html/templates/partials/review/review-position.js +0 -540
  115. elspais/trace_view/html/templates/partials/review/review-resize.js +0 -115
  116. elspais/trace_view/html/templates/partials/review/review-status.js +0 -659
  117. elspais/trace_view/html/templates/partials/review/review-sync.js +0 -992
  118. elspais/trace_view/html/templates/partials/review-styles.css +0 -2238
  119. elspais/trace_view/html/templates/partials/scripts.js +0 -1741
  120. elspais/trace_view/html/templates/partials/styles.css +0 -1756
  121. elspais/trace_view/models.py +0 -353
  122. elspais/trace_view/review/__init__.py +0 -60
  123. elspais/trace_view/review/branches.py +0 -1149
  124. elspais/trace_view/review/models.py +0 -1205
  125. elspais/trace_view/review/position.py +0 -609
  126. elspais/trace_view/review/server.py +0 -1056
  127. elspais/trace_view/review/status.py +0 -470
  128. elspais/trace_view/review/storage.py +0 -1367
  129. elspais/trace_view/scanning.py +0 -213
  130. elspais/trace_view/specs/README.md +0 -84
  131. elspais/trace_view/specs/tv-d00001-template-architecture.md +0 -36
  132. elspais/trace_view/specs/tv-d00002-css-extraction.md +0 -37
  133. elspais/trace_view/specs/tv-d00003-js-extraction.md +0 -43
  134. elspais/trace_view/specs/tv-d00004-build-embedding.md +0 -40
  135. elspais/trace_view/specs/tv-d00005-test-format.md +0 -78
  136. elspais/trace_view/specs/tv-d00010-review-data-models.md +0 -33
  137. elspais/trace_view/specs/tv-d00011-review-storage.md +0 -33
  138. elspais/trace_view/specs/tv-d00012-position-resolution.md +0 -33
  139. elspais/trace_view/specs/tv-d00013-git-branches.md +0 -31
  140. elspais/trace_view/specs/tv-d00014-review-api-server.md +0 -31
  141. elspais/trace_view/specs/tv-d00015-status-modifier.md +0 -27
  142. elspais/trace_view/specs/tv-d00016-js-integration.md +0 -33
  143. elspais/trace_view/specs/tv-p00001-html-generator.md +0 -33
  144. elspais/trace_view/specs/tv-p00002-review-system.md +0 -29
  145. elspais-0.11.1.dist-info/RECORD +0 -101
  146. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/WHEEL +0 -0
  147. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/entry_points.txt +0 -0
  148. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,347 @@
1
+ """RequirementParser - Priority 50 parser for requirement blocks.
2
+
3
+ Parses requirement specifications from markdown, claiming lines from
4
+ header through end marker.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import re
10
+ from typing import Any, Iterator
11
+
12
+ from elspais.graph.parsers import ParseContext, ParsedContent
13
+ from elspais.utilities.patterns import PatternConfig, PatternValidator
14
+
15
+
16
+ class RequirementParser:
17
+ """Parser for requirement blocks.
18
+
19
+ Priority: 50 (after comments, before remainder)
20
+
21
+ Parses requirement blocks in the standard format:
22
+ - Header: ## REQ-xxx: Title
23
+ - Metadata: **Level**: ... | **Status**: ...
24
+ - Body text
25
+ - Optional assertions section
26
+ - End marker: *End* *REQ-xxx*
27
+ """
28
+
29
+ priority = 50
30
+
31
+ # Regex patterns
32
+ HEADER_PATTERN = re.compile(r"^#*\s*(?P<id>[A-Z]+-[A-Za-z0-9-]+):\s*(?P<title>.+)$")
33
+ LEVEL_STATUS_PATTERN = re.compile(
34
+ r"\*\*Level\*\*:\s*(?P<level>\w+)"
35
+ r"(?:\s*\|\s*\*\*Implements\*\*:\s*(?P<implements>[^|\n]+))?"
36
+ r"(?:\s*\|\s*\*\*Status\*\*:\s*(?P<status>\w+))?"
37
+ )
38
+ ALT_STATUS_PATTERN = re.compile(r"\*\*Status\*\*:\s*(?P<status>\w+)")
39
+ IMPLEMENTS_PATTERN = re.compile(r"\*\*Implements\*\*:\s*(?P<implements>[^|\n]+)")
40
+ REFINES_PATTERN = re.compile(r"\*\*Refines\*\*:\s*(?P<refines>[^|\n]+)")
41
+ END_MARKER_PATTERN = re.compile(
42
+ r"^\*End\*\s+\*[^*]+\*\s*(?:\|\s*\*\*Hash\*\*:\s*(?P<hash>[a-zA-Z0-9]+))?",
43
+ re.MULTILINE,
44
+ )
45
+ ASSERTIONS_HEADER_PATTERN = re.compile(r"^##\s+Assertions\s*$", re.MULTILINE)
46
+ ASSERTION_LINE_PATTERN = re.compile(r"^\s*([A-Z0-9]+)\.\s+(.+)$", re.MULTILINE)
47
+
48
+ # Values that mean "no references"
49
+ NO_REFERENCE_VALUES = ["-", "null", "none", "x", "X", "N/A", "n/a"]
50
+
51
+ def __init__(self, pattern_config: PatternConfig) -> None:
52
+ """Initialize parser with pattern configuration.
53
+
54
+ Args:
55
+ pattern_config: Configuration for ID patterns.
56
+ """
57
+ self.pattern_config = pattern_config
58
+ self.validator = PatternValidator(pattern_config)
59
+
60
+ def claim_and_parse(
61
+ self,
62
+ lines: list[tuple[int, str]],
63
+ context: ParseContext,
64
+ ) -> Iterator[ParsedContent]:
65
+ """Claim and parse requirement blocks.
66
+
67
+ Args:
68
+ lines: List of (line_number, content) tuples.
69
+ context: Parsing context.
70
+
71
+ Yields:
72
+ ParsedContent for each requirement block.
73
+ """
74
+ # Build line map for quick access
75
+ line_map = dict(lines)
76
+ line_numbers = sorted(line_map.keys())
77
+
78
+ if not line_numbers:
79
+ return
80
+
81
+ claimed: set[int] = set()
82
+ i = 0
83
+
84
+ while i < len(line_numbers):
85
+ ln = line_numbers[i]
86
+ if ln in claimed:
87
+ i += 1
88
+ continue
89
+
90
+ text = line_map[ln]
91
+
92
+ # Check for requirement header
93
+ header_match = self.HEADER_PATTERN.match(text)
94
+ if header_match:
95
+ req_id = header_match.group("id")
96
+
97
+ # Validate ID against configured pattern
98
+ if not self.validator.is_valid(req_id):
99
+ i += 1
100
+ continue
101
+
102
+ title = header_match.group("title").strip()
103
+ start_line = ln
104
+
105
+ # Find the end of this requirement
106
+ req_lines = [(ln, text)]
107
+ end_line = ln
108
+ j = i + 1
109
+
110
+ while j < len(line_numbers):
111
+ next_ln = line_numbers[j]
112
+ next_text = line_map[next_ln]
113
+ req_lines.append((next_ln, next_text))
114
+ end_line = next_ln
115
+
116
+ # Check for end marker
117
+ if self.END_MARKER_PATTERN.match(next_text):
118
+ j += 1
119
+ # Include separator if present
120
+ if j < len(line_numbers):
121
+ sep_ln = line_numbers[j]
122
+ if line_map[sep_ln].strip() == "---":
123
+ req_lines.append((sep_ln, line_map[sep_ln]))
124
+ end_line = sep_ln
125
+ j += 1
126
+ break
127
+
128
+ # Check for next requirement header
129
+ next_match = self.HEADER_PATTERN.match(next_text)
130
+ if next_match and self.validator.is_valid(next_match.group("id")):
131
+ # Hit next requirement - don't include this line
132
+ req_lines.pop()
133
+ end_line = line_numbers[j - 1] if j > i + 1 else ln
134
+ break
135
+
136
+ j += 1
137
+
138
+ # Claim all lines in this requirement
139
+ for claim_ln, _ in req_lines:
140
+ claimed.add(claim_ln)
141
+
142
+ # Parse the requirement data
143
+ raw_text = "\n".join(t for _, t in req_lines)
144
+ parsed_data = self._parse_requirement(req_id, title, raw_text)
145
+
146
+ yield ParsedContent(
147
+ content_type="requirement",
148
+ start_line=start_line,
149
+ end_line=end_line,
150
+ raw_text=raw_text,
151
+ parsed_data=parsed_data,
152
+ )
153
+
154
+ # Move index past claimed lines
155
+ while i < len(line_numbers) and line_numbers[i] in claimed:
156
+ i += 1
157
+ else:
158
+ i += 1
159
+
160
+ def _parse_requirement(self, req_id: str, title: str, text: str) -> dict[str, Any]:
161
+ """Parse requirement fields from text block.
162
+
163
+ Args:
164
+ req_id: Requirement ID.
165
+ title: Requirement title.
166
+ text: Full requirement text.
167
+
168
+ Returns:
169
+ Dictionary of parsed requirement data.
170
+ """
171
+ data: dict[str, Any] = {
172
+ "id": req_id,
173
+ "title": title,
174
+ "level": "Unknown",
175
+ "status": "Unknown",
176
+ "implements": [],
177
+ "refines": [],
178
+ "assertions": [],
179
+ "hash": None,
180
+ "body_text": "", # Raw text between header and footer for hash computation
181
+ }
182
+
183
+ # Extract body_text: everything AFTER header line and BEFORE footer line
184
+ # Per spec: "hash SHALL be calculated from every line AFTER Header, BEFORE Footer"
185
+ data["body_text"] = self._extract_body_text(text)
186
+
187
+ # Extract level and status
188
+ level_match = self.LEVEL_STATUS_PATTERN.search(text)
189
+ if level_match:
190
+ data["level"] = level_match.group("level") or "Unknown"
191
+ data["status"] = level_match.group("status") or "Unknown"
192
+ if level_match.group("implements"):
193
+ data["implements"] = self._parse_refs(level_match.group("implements"))
194
+
195
+ # Try alternative status pattern
196
+ if data["status"] == "Unknown":
197
+ alt_match = self.ALT_STATUS_PATTERN.search(text)
198
+ if alt_match:
199
+ data["status"] = alt_match.group("status")
200
+
201
+ # Try alternative implements pattern
202
+ if not data["implements"]:
203
+ impl_match = self.IMPLEMENTS_PATTERN.search(text)
204
+ if impl_match:
205
+ data["implements"] = self._parse_refs(impl_match.group("implements"))
206
+
207
+ # Parse refines
208
+ refines_match = self.REFINES_PATTERN.search(text)
209
+ if refines_match:
210
+ data["refines"] = self._parse_refs(refines_match.group("refines"))
211
+
212
+ # Expand multi-assertion references
213
+ data["implements"] = self._expand_multi_assertion(data["implements"])
214
+ data["refines"] = self._expand_multi_assertion(data["refines"])
215
+
216
+ # Extract assertions
217
+ data["assertions"] = self._extract_assertions(text)
218
+
219
+ # Extract hash
220
+ end_match = self.END_MARKER_PATTERN.search(text)
221
+ if end_match and end_match.group("hash"):
222
+ data["hash"] = end_match.group("hash")
223
+
224
+ return data
225
+
226
+ def _parse_refs(self, refs_str: str) -> list[str]:
227
+ """Parse comma-separated reference list.
228
+
229
+ Handles both full IDs (REQ-p00001) and shorthand (p00001).
230
+ Shorthand references are normalized to full IDs using the configured prefix.
231
+ """
232
+ if not refs_str:
233
+ return []
234
+
235
+ stripped = refs_str.strip()
236
+ if stripped in self.NO_REFERENCE_VALUES:
237
+ return []
238
+
239
+ prefix = self.pattern_config.prefix
240
+ parts = [p.strip() for p in refs_str.split(",")]
241
+ result = []
242
+
243
+ for p in parts:
244
+ if not p or p in self.NO_REFERENCE_VALUES:
245
+ continue
246
+ # Normalize shorthand to full ID (e.g., "o00001" -> "REQ-o00001")
247
+ if not p.startswith(f"{prefix}-"):
248
+ p = f"{prefix}-{p}"
249
+ result.append(p)
250
+
251
+ return result
252
+
253
+ def _expand_multi_assertion(self, refs: list[str]) -> list[str]:
254
+ """Expand multi-assertion syntax.
255
+
256
+ REQ-p00001-A-B-C -> [REQ-p00001-A, REQ-p00001-B, REQ-p00001-C]
257
+ """
258
+ result = []
259
+ multi_pattern = re.compile(r"^([A-Z]+-[A-Za-z0-9-]+?)(-[A-Z](?:-[A-Z])+|-\d+(?:-\d+)+)$")
260
+
261
+ for ref in refs:
262
+ match = multi_pattern.match(ref)
263
+ if match:
264
+ base_id = match.group(1)
265
+ labels_str = match.group(2)
266
+ labels = [lbl for lbl in labels_str.split("-") if lbl]
267
+ for label in labels:
268
+ result.append(f"{base_id}-{label}")
269
+ else:
270
+ result.append(ref)
271
+
272
+ return result
273
+
274
+ def _extract_assertions(self, text: str) -> list[dict[str, Any]]:
275
+ """Extract assertions from text."""
276
+ assertions = []
277
+
278
+ header_match = self.ASSERTIONS_HEADER_PATTERN.search(text)
279
+ if not header_match:
280
+ return assertions
281
+
282
+ # Get text after header
283
+ start_pos = header_match.end()
284
+ section_text = text[start_pos:]
285
+
286
+ # Find end of assertions section
287
+ end_patterns = [r"^##\s+", r"^\*End\*", r"^---\s*$"]
288
+ end_pos = len(section_text)
289
+ for pattern in end_patterns:
290
+ match = re.search(pattern, section_text, re.MULTILINE)
291
+ if match and match.start() < end_pos:
292
+ end_pos = match.start()
293
+
294
+ assertions_text = section_text[:end_pos]
295
+
296
+ # Parse assertion lines
297
+ for match in self.ASSERTION_LINE_PATTERN.finditer(assertions_text):
298
+ label = match.group(1)
299
+ assertion_text = match.group(2).strip()
300
+ assertions.append(
301
+ {
302
+ "label": label,
303
+ "text": assertion_text,
304
+ }
305
+ )
306
+
307
+ return assertions
308
+
309
+ def _extract_body_text(self, text: str) -> str:
310
+ """Extract body text for hash computation.
311
+
312
+ Per spec/requirements-spec.md:
313
+ > The hash SHALL be calculated from:
314
+ > - every line AFTER the Header line
315
+ > - every line BEFORE the Footer line
316
+
317
+ Args:
318
+ text: Full requirement text including header and footer.
319
+
320
+ Returns:
321
+ Body text (between header and footer) for hash computation.
322
+ """
323
+ lines = text.split("\n")
324
+ if not lines:
325
+ return ""
326
+
327
+ # Header is the first line (## REQ-xxx: Title)
328
+ # Body starts from line 1 (after header)
329
+ body_start = 1
330
+
331
+ # Find footer line (*End* *Title* | **Hash**: xxx)
332
+ body_end = len(lines)
333
+ for i, line in enumerate(lines):
334
+ if self.END_MARKER_PATTERN.match(line):
335
+ body_end = i
336
+ break
337
+
338
+ # Extract body lines and join
339
+ body_lines = lines[body_start:body_end]
340
+
341
+ # Strip leading/trailing empty lines but preserve internal structure
342
+ while body_lines and not body_lines[0].strip():
343
+ body_lines.pop(0)
344
+ while body_lines and not body_lines[-1].strip():
345
+ body_lines.pop()
346
+
347
+ return "\n".join(body_lines)
@@ -0,0 +1,6 @@
1
+ """Test result parsers for JUnit XML and Pytest JSON formats."""
2
+
3
+ from elspais.graph.parsers.results.junit_xml import JUnitXMLParser
4
+ from elspais.graph.parsers.results.pytest_json import PytestJSONParser
5
+
6
+ __all__ = ["JUnitXMLParser", "PytestJSONParser"]
@@ -0,0 +1,229 @@
1
+ """JUnit XML parser for test results.
2
+
3
+ This parser extracts test results from JUnit XML format files.
4
+ Uses the shared reference_config infrastructure for configurable patterns.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import xml.etree.ElementTree as ET
10
+ from pathlib import Path
11
+ from typing import TYPE_CHECKING, Any
12
+
13
+ from elspais.utilities.reference_config import (
14
+ ReferenceConfig,
15
+ ReferenceResolver,
16
+ extract_ids_from_text,
17
+ )
18
+
19
+ if TYPE_CHECKING:
20
+ from elspais.utilities.patterns import PatternConfig
21
+
22
+
23
+ class JUnitXMLParser:
24
+ """Parser for JUnit XML test result files.
25
+
26
+ Parses standard JUnit XML format used by pytest, JUnit, and other
27
+ test frameworks.
28
+
29
+ Uses configurable patterns from ReferenceConfig for:
30
+ - Separator characters (- _ etc.)
31
+ - Case sensitivity
32
+ - Prefix requirements
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ pattern_config: PatternConfig | None = None,
38
+ reference_resolver: ReferenceResolver | None = None,
39
+ base_path: Path | None = None,
40
+ ) -> None:
41
+ """Initialize JUnitXMLParser with optional configuration.
42
+
43
+ Args:
44
+ pattern_config: Configuration for ID structure. If None, uses defaults.
45
+ reference_resolver: Resolver for file-specific reference config. If None,
46
+ uses default ReferenceConfig.
47
+ base_path: Base path for resolving file-specific configs.
48
+ """
49
+ self._pattern_config = pattern_config
50
+ self._reference_resolver = reference_resolver
51
+ self._base_path = base_path or Path(".")
52
+
53
+ def _get_pattern_config(self) -> PatternConfig:
54
+ """Get pattern config from instance or create default.
55
+
56
+ Returns:
57
+ PatternConfig to use for parsing.
58
+ """
59
+ if self._pattern_config is not None:
60
+ return self._pattern_config
61
+
62
+ from elspais.utilities.patterns import PatternConfig
63
+
64
+ return PatternConfig.from_dict(
65
+ {
66
+ "prefix": "REQ",
67
+ "types": {
68
+ "prd": {"id": "p", "name": "PRD"},
69
+ "ops": {"id": "o", "name": "OPS"},
70
+ "dev": {"id": "d", "name": "DEV"},
71
+ },
72
+ "id_format": {"style": "numeric", "digits": 5},
73
+ }
74
+ )
75
+
76
+ def _get_reference_config(self, source_file: str | None = None) -> ReferenceConfig:
77
+ """Get reference config for the current file.
78
+
79
+ Args:
80
+ source_file: Optional source file path for file-specific config.
81
+
82
+ Returns:
83
+ ReferenceConfig for parsing.
84
+ """
85
+ if self._reference_resolver is not None and source_file:
86
+ return self._reference_resolver.resolve(Path(source_file), self._base_path)
87
+
88
+ if self._reference_resolver is not None:
89
+ return self._reference_resolver.defaults
90
+
91
+ return ReferenceConfig()
92
+
93
+ def parse(self, content: str, source_path: str) -> list[dict[str, Any]]:
94
+ """Parse JUnit XML content and return test result dicts.
95
+
96
+ Args:
97
+ content: XML file content.
98
+ source_path: Path to the source file.
99
+
100
+ Returns:
101
+ List of test result dictionaries with keys:
102
+ - id: Unique test ID
103
+ - name: Test name
104
+ - classname: Test class name
105
+ - status: passed, failed, skipped, or error
106
+ - duration: Test duration in seconds
107
+ - message: Error/failure message (if any)
108
+ - validates: List of requirement IDs this test validates
109
+ """
110
+ results: list[dict[str, Any]] = []
111
+
112
+ try:
113
+ root = ET.fromstring(content)
114
+ except ET.ParseError:
115
+ return results
116
+
117
+ # Handle both <testsuites> and <testsuite> as root
118
+ testsuites = root.findall(".//testsuite")
119
+ if not testsuites and root.tag == "testsuite":
120
+ testsuites = [root]
121
+
122
+ for testsuite in testsuites:
123
+ for testcase in testsuite.findall("testcase"):
124
+ name = testcase.get("name", "")
125
+ classname = testcase.get("classname", "")
126
+ time_str = testcase.get("time", "0")
127
+
128
+ try:
129
+ duration = float(time_str)
130
+ except ValueError:
131
+ duration = 0.0
132
+
133
+ # Determine status
134
+ status = "passed"
135
+ message = None
136
+
137
+ failure = testcase.find("failure")
138
+ error = testcase.find("error")
139
+ skipped = testcase.find("skipped")
140
+
141
+ if failure is not None:
142
+ status = "failed"
143
+ message = failure.get("message") or failure.text
144
+ elif error is not None:
145
+ status = "error"
146
+ message = error.get("message") or error.text
147
+ elif skipped is not None:
148
+ status = "skipped"
149
+ message = skipped.get("message") or skipped.text
150
+
151
+ # Extract requirement references from test name or classname
152
+ validates = self._extract_req_ids(f"{classname} {name}", source_path)
153
+
154
+ # Generate stable TEST node ID from classname and name
155
+ # This allows multiple results to link to the same logical test
156
+ test_id = f"test:{classname}::{name}" if classname else f"test::{name}"
157
+
158
+ result = {
159
+ "id": f"{source_path}:{classname}::{name}",
160
+ "name": name,
161
+ "classname": classname,
162
+ "status": status,
163
+ "duration": duration,
164
+ "message": message[:200] if message else None,
165
+ "validates": validates,
166
+ "source_path": source_path,
167
+ "test_id": test_id,
168
+ }
169
+
170
+ results.append(result)
171
+
172
+ return results
173
+
174
+ def _extract_req_ids(self, text: str, source_file: str | None = None) -> list[str]:
175
+ """Extract requirement IDs from text.
176
+
177
+ Args:
178
+ text: Text to search for requirement IDs.
179
+ source_file: Optional source file for file-specific config.
180
+
181
+ Returns:
182
+ List of normalized requirement IDs (using hyphens).
183
+ """
184
+ pattern_config = self._get_pattern_config()
185
+ ref_config = self._get_reference_config(source_file)
186
+
187
+ # Use shared extraction function
188
+ ids = extract_ids_from_text(text, pattern_config, ref_config)
189
+
190
+ # Normalize: replace underscores with hyphens
191
+ normalized = []
192
+ for req_id in ids:
193
+ normalized_id = req_id.replace("_", "-")
194
+ if normalized_id not in normalized:
195
+ normalized.append(normalized_id)
196
+
197
+ return normalized
198
+
199
+ def can_parse(self, file_path: Path) -> bool:
200
+ """Check if this parser can handle the given file.
201
+
202
+ Args:
203
+ file_path: Path to the file.
204
+
205
+ Returns:
206
+ True for XML files that look like JUnit results.
207
+ """
208
+ name = file_path.name.lower()
209
+ return file_path.suffix.lower() == ".xml" and (
210
+ "junit" in name or "test" in name or "result" in name
211
+ )
212
+
213
+
214
+ def create_parser(
215
+ pattern_config: PatternConfig | None = None,
216
+ reference_resolver: ReferenceResolver | None = None,
217
+ base_path: Path | None = None,
218
+ ) -> JUnitXMLParser:
219
+ """Factory function to create a JUnitXMLParser.
220
+
221
+ Args:
222
+ pattern_config: Optional configuration for ID structure.
223
+ reference_resolver: Optional resolver for file-specific configs.
224
+ base_path: Optional base path for resolving file paths.
225
+
226
+ Returns:
227
+ New JUnitXMLParser instance.
228
+ """
229
+ return JUnitXMLParser(pattern_config, reference_resolver, base_path)