elspais 0.11.2__py3-none-any.whl → 0.43.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. elspais/__init__.py +1 -10
  2. elspais/{sponsors/__init__.py → associates.py} +102 -56
  3. elspais/cli.py +366 -69
  4. elspais/commands/__init__.py +9 -3
  5. elspais/commands/analyze.py +118 -169
  6. elspais/commands/changed.py +12 -23
  7. elspais/commands/config_cmd.py +10 -13
  8. elspais/commands/edit.py +33 -13
  9. elspais/commands/example_cmd.py +319 -0
  10. elspais/commands/hash_cmd.py +161 -183
  11. elspais/commands/health.py +1177 -0
  12. elspais/commands/index.py +98 -115
  13. elspais/commands/init.py +99 -22
  14. elspais/commands/reformat_cmd.py +41 -433
  15. elspais/commands/rules_cmd.py +2 -2
  16. elspais/commands/trace.py +443 -324
  17. elspais/commands/validate.py +193 -411
  18. elspais/config/__init__.py +799 -5
  19. elspais/{core/content_rules.py → content_rules.py} +20 -2
  20. elspais/docs/cli/assertions.md +67 -0
  21. elspais/docs/cli/commands.md +304 -0
  22. elspais/docs/cli/config.md +262 -0
  23. elspais/docs/cli/format.md +66 -0
  24. elspais/docs/cli/git.md +45 -0
  25. elspais/docs/cli/health.md +190 -0
  26. elspais/docs/cli/hierarchy.md +60 -0
  27. elspais/docs/cli/ignore.md +72 -0
  28. elspais/docs/cli/mcp.md +245 -0
  29. elspais/docs/cli/quickstart.md +58 -0
  30. elspais/docs/cli/traceability.md +89 -0
  31. elspais/docs/cli/validation.md +96 -0
  32. elspais/graph/GraphNode.py +383 -0
  33. elspais/graph/__init__.py +40 -0
  34. elspais/graph/annotators.py +927 -0
  35. elspais/graph/builder.py +1886 -0
  36. elspais/graph/deserializer.py +248 -0
  37. elspais/graph/factory.py +284 -0
  38. elspais/graph/metrics.py +127 -0
  39. elspais/graph/mutations.py +161 -0
  40. elspais/graph/parsers/__init__.py +156 -0
  41. elspais/graph/parsers/code.py +213 -0
  42. elspais/graph/parsers/comments.py +112 -0
  43. elspais/graph/parsers/config_helpers.py +29 -0
  44. elspais/graph/parsers/heredocs.py +225 -0
  45. elspais/graph/parsers/journey.py +131 -0
  46. elspais/graph/parsers/remainder.py +79 -0
  47. elspais/graph/parsers/requirement.py +347 -0
  48. elspais/graph/parsers/results/__init__.py +6 -0
  49. elspais/graph/parsers/results/junit_xml.py +229 -0
  50. elspais/graph/parsers/results/pytest_json.py +313 -0
  51. elspais/graph/parsers/test.py +305 -0
  52. elspais/graph/relations.py +78 -0
  53. elspais/graph/serialize.py +216 -0
  54. elspais/html/__init__.py +8 -0
  55. elspais/html/generator.py +731 -0
  56. elspais/html/templates/trace_view.html.j2 +2151 -0
  57. elspais/mcp/__init__.py +45 -29
  58. elspais/mcp/__main__.py +5 -1
  59. elspais/mcp/file_mutations.py +138 -0
  60. elspais/mcp/server.py +1998 -244
  61. elspais/testing/__init__.py +3 -3
  62. elspais/testing/config.py +3 -0
  63. elspais/testing/mapper.py +1 -1
  64. elspais/testing/scanner.py +301 -12
  65. elspais/utilities/__init__.py +1 -0
  66. elspais/utilities/docs_loader.py +115 -0
  67. elspais/utilities/git.py +607 -0
  68. elspais/{core → utilities}/hasher.py +8 -22
  69. elspais/utilities/md_renderer.py +189 -0
  70. elspais/{core → utilities}/patterns.py +56 -51
  71. elspais/utilities/reference_config.py +626 -0
  72. elspais/validation/__init__.py +19 -0
  73. elspais/validation/format.py +264 -0
  74. {elspais-0.11.2.dist-info → elspais-0.43.5.dist-info}/METADATA +7 -4
  75. elspais-0.43.5.dist-info/RECORD +80 -0
  76. elspais/config/defaults.py +0 -179
  77. elspais/config/loader.py +0 -494
  78. elspais/core/__init__.py +0 -21
  79. elspais/core/git.py +0 -346
  80. elspais/core/models.py +0 -320
  81. elspais/core/parser.py +0 -639
  82. elspais/core/rules.py +0 -509
  83. elspais/mcp/context.py +0 -172
  84. elspais/mcp/serializers.py +0 -112
  85. elspais/reformat/__init__.py +0 -50
  86. elspais/reformat/detector.py +0 -112
  87. elspais/reformat/hierarchy.py +0 -247
  88. elspais/reformat/line_breaks.py +0 -218
  89. elspais/reformat/prompts.py +0 -133
  90. elspais/reformat/transformer.py +0 -266
  91. elspais/trace_view/__init__.py +0 -55
  92. elspais/trace_view/coverage.py +0 -183
  93. elspais/trace_view/generators/__init__.py +0 -12
  94. elspais/trace_view/generators/base.py +0 -334
  95. elspais/trace_view/generators/csv.py +0 -118
  96. elspais/trace_view/generators/markdown.py +0 -170
  97. elspais/trace_view/html/__init__.py +0 -33
  98. elspais/trace_view/html/generator.py +0 -1140
  99. elspais/trace_view/html/templates/base.html +0 -283
  100. elspais/trace_view/html/templates/components/code_viewer_modal.html +0 -14
  101. elspais/trace_view/html/templates/components/file_picker_modal.html +0 -20
  102. elspais/trace_view/html/templates/components/legend_modal.html +0 -69
  103. elspais/trace_view/html/templates/components/review_panel.html +0 -118
  104. elspais/trace_view/html/templates/partials/review/help/help-panel.json +0 -244
  105. elspais/trace_view/html/templates/partials/review/help/onboarding.json +0 -77
  106. elspais/trace_view/html/templates/partials/review/help/tooltips.json +0 -237
  107. elspais/trace_view/html/templates/partials/review/review-comments.js +0 -928
  108. elspais/trace_view/html/templates/partials/review/review-data.js +0 -961
  109. elspais/trace_view/html/templates/partials/review/review-help.js +0 -679
  110. elspais/trace_view/html/templates/partials/review/review-init.js +0 -177
  111. elspais/trace_view/html/templates/partials/review/review-line-numbers.js +0 -429
  112. elspais/trace_view/html/templates/partials/review/review-packages.js +0 -1029
  113. elspais/trace_view/html/templates/partials/review/review-position.js +0 -540
  114. elspais/trace_view/html/templates/partials/review/review-resize.js +0 -115
  115. elspais/trace_view/html/templates/partials/review/review-status.js +0 -659
  116. elspais/trace_view/html/templates/partials/review/review-sync.js +0 -992
  117. elspais/trace_view/html/templates/partials/review-styles.css +0 -2238
  118. elspais/trace_view/html/templates/partials/scripts.js +0 -1741
  119. elspais/trace_view/html/templates/partials/styles.css +0 -1756
  120. elspais/trace_view/models.py +0 -378
  121. elspais/trace_view/review/__init__.py +0 -63
  122. elspais/trace_view/review/branches.py +0 -1142
  123. elspais/trace_view/review/models.py +0 -1200
  124. elspais/trace_view/review/position.py +0 -591
  125. elspais/trace_view/review/server.py +0 -1032
  126. elspais/trace_view/review/status.py +0 -455
  127. elspais/trace_view/review/storage.py +0 -1343
  128. elspais/trace_view/scanning.py +0 -213
  129. elspais/trace_view/specs/README.md +0 -84
  130. elspais/trace_view/specs/tv-d00001-template-architecture.md +0 -36
  131. elspais/trace_view/specs/tv-d00002-css-extraction.md +0 -37
  132. elspais/trace_view/specs/tv-d00003-js-extraction.md +0 -43
  133. elspais/trace_view/specs/tv-d00004-build-embedding.md +0 -40
  134. elspais/trace_view/specs/tv-d00005-test-format.md +0 -78
  135. elspais/trace_view/specs/tv-d00010-review-data-models.md +0 -33
  136. elspais/trace_view/specs/tv-d00011-review-storage.md +0 -33
  137. elspais/trace_view/specs/tv-d00012-position-resolution.md +0 -33
  138. elspais/trace_view/specs/tv-d00013-git-branches.md +0 -31
  139. elspais/trace_view/specs/tv-d00014-review-api-server.md +0 -31
  140. elspais/trace_view/specs/tv-d00015-status-modifier.md +0 -27
  141. elspais/trace_view/specs/tv-d00016-js-integration.md +0 -33
  142. elspais/trace_view/specs/tv-p00001-html-generator.md +0 -33
  143. elspais/trace_view/specs/tv-p00002-review-system.md +0 -29
  144. elspais-0.11.2.dist-info/RECORD +0 -101
  145. {elspais-0.11.2.dist-info → elspais-0.43.5.dist-info}/WHEEL +0 -0
  146. {elspais-0.11.2.dist-info → elspais-0.43.5.dist-info}/entry_points.txt +0 -0
  147. {elspais-0.11.2.dist-info → elspais-0.43.5.dist-info}/licenses/LICENSE +0 -0
elspais/core/parser.py DELETED
@@ -1,639 +0,0 @@
1
- """
2
- elspais.core.parser - Requirement file parsing.
3
-
4
- Parses Markdown files containing requirements in the standard format.
5
- """
6
-
7
- import re
8
- from pathlib import Path
9
- from typing import Dict, List, Optional, Sequence, Union
10
-
11
- from elspais.core.models import Assertion, ParseResult, ParseWarning, Requirement
12
- from elspais.core.patterns import PatternConfig, PatternValidator
13
-
14
-
15
- class RequirementParser:
16
- """
17
- Parses requirement specifications from Markdown files.
18
- """
19
-
20
- # Regex patterns for parsing
21
- # Generic pattern to find potential requirement headers
22
- # Actual ID validation is done by PatternValidator
23
- HEADER_PATTERN = re.compile(r"^#*\s*(?P<id>[A-Z]+-[A-Za-z0-9-]+):\s*(?P<title>.+)$")
24
- LEVEL_STATUS_PATTERN = re.compile(
25
- r"\*\*Level\*\*:\s*(?P<level>\w+)"
26
- r"(?:\s*\|\s*\*\*Implements\*\*:\s*(?P<implements>[^|\n]+))?"
27
- r"(?:\s*\|\s*\*\*Status\*\*:\s*(?P<status>\w+))?"
28
- )
29
- ALT_STATUS_PATTERN = re.compile(r"\*\*Status\*\*:\s*(?P<status>\w+)")
30
- IMPLEMENTS_PATTERN = re.compile(r"\*\*Implements\*\*:\s*(?P<implements>[^|\n]+)")
31
- END_MARKER_PATTERN = re.compile(
32
- r"^\*End\*\s+\*[^*]+\*\s*(?:\|\s*\*\*Hash\*\*:\s*(?P<hash>[a-zA-Z0-9]+))?", re.MULTILINE
33
- )
34
- RATIONALE_PATTERN = re.compile(r"\*\*Rationale\*\*:\s*(.+?)(?=\n\n|\n\*\*|\Z)", re.DOTALL)
35
- ACCEPTANCE_PATTERN = re.compile(
36
- r"\*\*Acceptance Criteria\*\*:\s*\n((?:\s*-\s*.+\n?)+)", re.MULTILINE
37
- )
38
- # Assertions section header (## Assertions or **Assertions**)
39
- ASSERTIONS_HEADER_PATTERN = re.compile(r"^##\s+Assertions\s*$", re.MULTILINE)
40
- # Individual assertion line: "A. The system SHALL..." or "01. ..." etc.
41
- # Captures: label (any alphanumeric), text (rest of line, may continue)
42
- ASSERTION_LINE_PATTERN = re.compile(r"^\s*([A-Z0-9]+)\.\s+(.+)$", re.MULTILINE)
43
-
44
- # Default values that mean "no references" in Implements field
45
- DEFAULT_NO_REFERENCE_VALUES = ["-", "null", "none", "x", "X", "N/A", "n/a"]
46
-
47
- # Default placeholder values that indicate a removed/deprecated assertion
48
- DEFAULT_PLACEHOLDER_VALUES = [
49
- "obsolete",
50
- "removed",
51
- "deprecated",
52
- "N/A",
53
- "n/a",
54
- "-",
55
- "reserved",
56
- ]
57
-
58
- def __init__(
59
- self,
60
- pattern_config: PatternConfig,
61
- no_reference_values: Optional[List[str]] = None,
62
- placeholder_values: Optional[List[str]] = None,
63
- ):
64
- """
65
- Initialize parser with pattern configuration.
66
-
67
- Args:
68
- pattern_config: Configuration for ID patterns
69
- no_reference_values: Values in Implements field that mean "no references"
70
- placeholder_values: Values that indicate removed/deprecated assertions
71
- """
72
- self.pattern_config = pattern_config
73
- self.validator = PatternValidator(pattern_config)
74
- self.no_reference_values = (
75
- no_reference_values
76
- if no_reference_values is not None
77
- else self.DEFAULT_NO_REFERENCE_VALUES
78
- )
79
- self.placeholder_values = (
80
- placeholder_values
81
- if placeholder_values is not None
82
- else self.DEFAULT_PLACEHOLDER_VALUES
83
- )
84
-
85
- def parse_text(
86
- self,
87
- text: str,
88
- file_path: Optional[Path] = None,
89
- subdir: str = "",
90
- ) -> ParseResult:
91
- """
92
- Parse requirements from text.
93
-
94
- Args:
95
- text: Markdown text containing requirements
96
- file_path: Optional source file path for location tracking
97
- subdir: Subdirectory within spec/ (e.g., "roadmap", "archive", "")
98
-
99
- Returns:
100
- ParseResult with requirements dict and warnings list
101
- """
102
- requirements: Dict[str, Requirement] = {}
103
- warnings: List[ParseWarning] = []
104
- lines = text.split("\n")
105
-
106
- i = 0
107
- while i < len(lines):
108
- line = lines[i]
109
-
110
- # Look for requirement header
111
- header_match = self.HEADER_PATTERN.match(line)
112
- if header_match:
113
- req_id = header_match.group("id")
114
-
115
- # Validate ID against configured pattern
116
- if not self.validator.is_valid(req_id):
117
- i += 1
118
- continue
119
-
120
- title = header_match.group("title").strip()
121
- start_line = i + 1 # 1-indexed
122
-
123
- # Find the end of this requirement
124
- req_lines = [line]
125
- i += 1
126
- while i < len(lines):
127
- req_lines.append(lines[i])
128
- # Check for end marker or next requirement
129
- if self.END_MARKER_PATTERN.match(lines[i]):
130
- i += 1
131
- # Skip separator line if present
132
- if i < len(lines) and lines[i].strip() == "---":
133
- i += 1
134
- break
135
- # Check for next valid requirement header
136
- next_match = self.HEADER_PATTERN.match(lines[i])
137
- if next_match and self.validator.is_valid(next_match.group("id")):
138
- # Hit next requirement without end marker
139
- break
140
- i += 1
141
-
142
- # Parse the requirement block
143
- req_text = "\n".join(req_lines)
144
- req, block_warnings = self._parse_requirement_block(
145
- req_id, title, req_text, file_path, start_line, subdir
146
- )
147
- warnings.extend(block_warnings)
148
- if req:
149
- # Check for duplicate ID
150
- if req_id in requirements:
151
- # Keep both: original stays, duplicate gets __conflict suffix
152
- conflict_key, conflict_req, warning = self._make_conflict_entry(
153
- req, req_id, requirements[req_id], file_path, start_line
154
- )
155
- requirements[conflict_key] = conflict_req
156
- warnings.append(warning)
157
- else:
158
- requirements[req_id] = req
159
- else:
160
- i += 1
161
-
162
- return ParseResult(requirements=requirements, warnings=warnings)
163
-
164
- def parse_file(
165
- self,
166
- file_path: Path,
167
- subdir: str = "",
168
- ) -> ParseResult:
169
- """
170
- Parse requirements from a file.
171
-
172
- Args:
173
- file_path: Path to the Markdown file
174
- subdir: Subdirectory within spec/ (e.g., "roadmap", "archive", "")
175
-
176
- Returns:
177
- ParseResult with requirements dict and warnings list
178
- """
179
- text = file_path.read_text(encoding="utf-8")
180
- return self.parse_text(text, file_path, subdir)
181
-
182
- def parse_directory(
183
- self,
184
- directory: Path,
185
- patterns: Optional[List[str]] = None,
186
- skip_files: Optional[List[str]] = None,
187
- subdir: str = "",
188
- ) -> ParseResult:
189
- """
190
- Parse all requirements from a directory.
191
-
192
- Args:
193
- directory: Path to the spec directory
194
- patterns: Optional glob patterns to match files
195
- skip_files: Optional list of filenames to skip
196
- subdir: Subdirectory within spec/ (e.g., "roadmap", "archive", "")
197
-
198
- Returns:
199
- ParseResult with requirements dict and warnings list
200
- """
201
- if patterns is None:
202
- patterns = ["*.md"]
203
-
204
- if skip_files is None:
205
- skip_files = []
206
-
207
- requirements: Dict[str, Requirement] = {}
208
- warnings: List[ParseWarning] = []
209
-
210
- for pattern in patterns:
211
- for file_path in directory.glob(pattern):
212
- if file_path.is_file() and file_path.name not in skip_files:
213
- result = self.parse_file(file_path, subdir)
214
- # Merge requirements, checking for cross-file duplicates
215
- for req_id, req in result.requirements.items():
216
- if req_id in requirements:
217
- # Keep both: original stays, duplicate gets __conflict suffix
218
- conflict_key, conflict_req, warning = self._make_conflict_entry(
219
- req, req_id, requirements[req_id], file_path, req.line_number
220
- )
221
- requirements[conflict_key] = conflict_req
222
- warnings.append(warning)
223
- else:
224
- requirements[req_id] = req
225
- warnings.extend(result.warnings)
226
-
227
- return ParseResult(requirements=requirements, warnings=warnings)
228
-
229
- def parse_directories(
230
- self,
231
- directories: Union[str, Path, Sequence[Union[str, Path]]],
232
- base_path: Optional[Path] = None,
233
- patterns: Optional[List[str]] = None,
234
- skip_files: Optional[List[str]] = None,
235
- ) -> ParseResult:
236
- """
237
- Parse all requirements from one or more directories.
238
-
239
- Does NOT recursively search subdirectories - only the specified directories.
240
-
241
- Args:
242
- directories: Single directory path (str/Path) or list of directory paths
243
- base_path: Base path to resolve relative directories against
244
- patterns: Optional glob patterns to match files (default: ["*.md"])
245
- skip_files: Optional list of filenames to skip
246
-
247
- Returns:
248
- ParseResult with requirements dict and warnings list
249
- """
250
- # Normalize to list
251
- if isinstance(directories, (str, Path)):
252
- dir_list = [directories]
253
- else:
254
- dir_list = list(directories)
255
-
256
- if base_path is None:
257
- base_path = Path.cwd()
258
-
259
- requirements: Dict[str, Requirement] = {}
260
- warnings: List[ParseWarning] = []
261
-
262
- for dir_entry in dir_list:
263
- if Path(dir_entry).is_absolute():
264
- dir_path = Path(dir_entry)
265
- else:
266
- dir_path = base_path / dir_entry
267
- if dir_path.exists() and dir_path.is_dir():
268
- result = self.parse_directory(dir_path, patterns=patterns, skip_files=skip_files)
269
- # Merge requirements, checking for cross-directory duplicates
270
- for req_id, req in result.requirements.items():
271
- if req_id in requirements:
272
- # Keep both: original stays, duplicate gets __conflict suffix
273
- conflict_key, conflict_req, warning = self._make_conflict_entry(
274
- req, req_id, requirements[req_id], req.file_path, req.line_number
275
- )
276
- requirements[conflict_key] = conflict_req
277
- warnings.append(warning)
278
- else:
279
- requirements[req_id] = req
280
- warnings.extend(result.warnings)
281
-
282
- return ParseResult(requirements=requirements, warnings=warnings)
283
-
284
- def parse_directory_with_subdirs(
285
- self,
286
- directory: Path,
287
- subdirs: Optional[List[str]] = None,
288
- patterns: Optional[List[str]] = None,
289
- skip_files: Optional[List[str]] = None,
290
- ) -> ParseResult:
291
- """
292
- Parse requirements from a directory and its subdirectories.
293
-
294
- Unlike parse_directory, this method:
295
- - Parses the root directory (with subdir="")
296
- - Parses each specified subdirectory (with subdir set to the subdir name)
297
-
298
- Args:
299
- directory: Path to the spec directory
300
- subdirs: List of subdirectory names to include (e.g., ["roadmap", "archive"])
301
- patterns: Optional glob patterns to match files
302
- skip_files: Optional list of filenames to skip
303
-
304
- Returns:
305
- ParseResult with requirements dict and warnings list
306
- """
307
- if subdirs is None:
308
- subdirs = []
309
-
310
- requirements: Dict[str, Requirement] = {}
311
- warnings: List[ParseWarning] = []
312
-
313
- # Parse root directory
314
- root_result = self.parse_directory(
315
- directory, patterns=patterns, skip_files=skip_files, subdir=""
316
- )
317
- requirements.update(root_result.requirements)
318
- warnings.extend(root_result.warnings)
319
-
320
- # Parse each subdirectory
321
- for subdir_name in subdirs:
322
- subdir_path = directory / subdir_name
323
- if subdir_path.exists() and subdir_path.is_dir():
324
- subdir_result = self.parse_directory(
325
- subdir_path, patterns=patterns, skip_files=skip_files, subdir=subdir_name
326
- )
327
- # Merge requirements, checking for cross-subdir duplicates
328
- for req_id, req in subdir_result.requirements.items():
329
- if req_id in requirements:
330
- # Keep both: original stays, duplicate gets __conflict suffix
331
- conflict_key, conflict_req, warning = self._make_conflict_entry(
332
- req, req_id, requirements[req_id], req.file_path, req.line_number
333
- )
334
- requirements[conflict_key] = conflict_req
335
- warnings.append(warning)
336
- else:
337
- requirements[req_id] = req
338
- warnings.extend(subdir_result.warnings)
339
-
340
- return ParseResult(requirements=requirements, warnings=warnings)
341
-
342
- def _make_conflict_entry(
343
- self,
344
- duplicate_req: Requirement,
345
- original_id: str,
346
- original_req: Requirement,
347
- file_path: Optional[Path],
348
- line_number: Optional[int],
349
- ) -> tuple:
350
- """
351
- Create a conflict entry for a duplicate requirement.
352
-
353
- When a requirement ID already exists, this creates a modified version
354
- of the duplicate with:
355
- - Key suffix `__conflict` for storage
356
- - `is_conflict=True` flag
357
- - `conflict_with` set to the original ID
358
- - `implements=[]` (treated as orphaned)
359
-
360
- Args:
361
- duplicate_req: The duplicate requirement that was found
362
- original_id: The ID that is duplicated
363
- original_req: The original requirement that was first
364
- file_path: File path for the warning
365
- line_number: Line number for the warning
366
-
367
- Returns:
368
- Tuple of (conflict_key, modified_requirement, ParseWarning)
369
- """
370
- conflict_key = f"{original_id}__conflict"
371
-
372
- # Modify the duplicate requirement
373
- duplicate_req.is_conflict = True
374
- duplicate_req.conflict_with = original_id
375
- duplicate_req.implements = [] # Treat as orphaned
376
-
377
- warning = ParseWarning(
378
- requirement_id=original_id,
379
- message=(
380
- f"Duplicate ID found "
381
- f"(first occurrence in {original_req.file_path}:{original_req.line_number})"
382
- ),
383
- file_path=file_path,
384
- line_number=line_number,
385
- )
386
-
387
- return conflict_key, duplicate_req, warning
388
-
389
- def _parse_requirement_block(
390
- self,
391
- req_id: str,
392
- title: str,
393
- text: str,
394
- file_path: Optional[Path],
395
- line_number: int,
396
- subdir: str = "",
397
- ) -> tuple:
398
- """
399
- Parse a single requirement block.
400
-
401
- Args:
402
- req_id: The requirement ID
403
- title: The requirement title
404
- text: The full requirement text block
405
- file_path: Source file path
406
- line_number: Starting line number
407
- subdir: Subdirectory within spec/ (e.g., "roadmap", "archive", "")
408
-
409
- Returns:
410
- Tuple of (Requirement or None, List[ParseWarning])
411
- """
412
- block_warnings: List[ParseWarning] = []
413
-
414
- # Extract level, status, and implements from header line
415
- level = "Unknown"
416
- status = "Unknown"
417
- implements_str = ""
418
-
419
- level_match = self.LEVEL_STATUS_PATTERN.search(text)
420
- if level_match:
421
- level = level_match.group("level") or "Unknown"
422
- implements_str = level_match.group("implements") or ""
423
- status = level_match.group("status") or "Unknown"
424
-
425
- # Try alternative status pattern
426
- if status == "Unknown":
427
- alt_status_match = self.ALT_STATUS_PATTERN.search(text)
428
- if alt_status_match:
429
- status = alt_status_match.group("status")
430
-
431
- # Try alternative implements pattern
432
- if not implements_str:
433
- impl_match = self.IMPLEMENTS_PATTERN.search(text)
434
- if impl_match:
435
- implements_str = impl_match.group("implements")
436
-
437
- # Parse implements list and validate references
438
- implements = self._parse_implements(implements_str)
439
- for ref in implements:
440
- if not self.validator.is_valid(ref):
441
- block_warnings.append(
442
- ParseWarning(
443
- requirement_id=req_id,
444
- message=f"Invalid implements reference: {ref}",
445
- file_path=file_path,
446
- line_number=line_number,
447
- )
448
- )
449
-
450
- # Extract body (text between header and acceptance/end)
451
- body = self._extract_body(text)
452
-
453
- # Extract rationale
454
- rationale = None
455
- rationale_match = self.RATIONALE_PATTERN.search(text)
456
- if rationale_match:
457
- rationale = rationale_match.group(1).strip()
458
-
459
- # Extract acceptance criteria (legacy format)
460
- acceptance_criteria = []
461
- acceptance_match = self.ACCEPTANCE_PATTERN.search(text)
462
- if acceptance_match:
463
- criteria_text = acceptance_match.group(1)
464
- acceptance_criteria = [
465
- line.strip().lstrip("- ").strip()
466
- for line in criteria_text.split("\n")
467
- if line.strip().startswith("-")
468
- ]
469
-
470
- # Extract assertions (new format) and validate labels
471
- assertions = self._extract_assertions(text)
472
- for assertion in assertions:
473
- if not self._is_valid_assertion_label(assertion.label):
474
- block_warnings.append(
475
- ParseWarning(
476
- requirement_id=req_id,
477
- message=f"Invalid assertion label format: {assertion.label}",
478
- file_path=file_path,
479
- line_number=line_number,
480
- )
481
- )
482
-
483
- # Extract hash from end marker
484
- hash_value = None
485
- end_match = self.END_MARKER_PATTERN.search(text)
486
- if end_match:
487
- hash_value = end_match.group("hash")
488
-
489
- req = Requirement(
490
- id=req_id,
491
- title=title,
492
- level=level,
493
- status=status,
494
- body=body,
495
- implements=implements,
496
- acceptance_criteria=acceptance_criteria,
497
- assertions=assertions,
498
- rationale=rationale,
499
- hash=hash_value,
500
- file_path=file_path,
501
- line_number=line_number,
502
- subdir=subdir,
503
- )
504
- return req, block_warnings
505
-
506
- def _is_valid_assertion_label(self, label: str) -> bool:
507
- """Check if an assertion label matches expected format.
508
-
509
- Default expectation is uppercase letters A-Z.
510
- """
511
- # Check against configured assertion label pattern if available
512
- assertion_config = getattr(self.pattern_config, "assertions", None)
513
- if assertion_config:
514
- label_style = assertion_config.get("label_style", "uppercase")
515
- if label_style == "uppercase":
516
- return bool(re.match(r"^[A-Z]$", label))
517
- elif label_style == "numeric":
518
- return bool(re.match(r"^\d+$", label))
519
- elif label_style == "alphanumeric":
520
- return bool(re.match(r"^[A-Z0-9]+$", label))
521
- # Default: uppercase single letter
522
- return bool(re.match(r"^[A-Z]$", label))
523
-
524
- def _parse_implements(self, implements_str: str) -> List[str]:
525
- """Parse comma-separated implements list.
526
-
527
- Returns empty list if the value is a "no reference" indicator.
528
- """
529
- if not implements_str:
530
- return []
531
-
532
- # Check if it's a "no reference" value
533
- stripped = implements_str.strip()
534
- if stripped in self.no_reference_values:
535
- return []
536
-
537
- parts = [p.strip() for p in implements_str.split(",")]
538
- # Filter out empty parts and no-reference values
539
- return [p for p in parts if p and p not in self.no_reference_values]
540
-
541
- def _extract_body(self, text: str) -> str:
542
- """Extract the main body text from requirement block.
543
-
544
- Body is everything between the header (and optional metadata line)
545
- and the end marker, including Rationale and Acceptance Criteria sections.
546
- Trailing blank lines are removed for consistent hashing.
547
- """
548
- lines = text.split("\n")
549
- body_lines = []
550
- found_header = False
551
- in_body = False
552
-
553
- for line in lines:
554
- # Skip header line
555
- if self.HEADER_PATTERN.match(line):
556
- found_header = True
557
- continue
558
-
559
- if found_header and not in_body:
560
- # Metadata line - skip it but mark body start
561
- if "**Level**" in line or "**Status**" in line:
562
- in_body = True
563
- continue
564
- # First non-blank content line starts body (when no metadata)
565
- elif line.strip():
566
- in_body = True
567
- # Don't continue - include this line in body
568
-
569
- # Stop at end marker
570
- if line.strip().startswith("*End*"):
571
- break
572
-
573
- if in_body:
574
- body_lines.append(line)
575
-
576
- # Remove trailing blank lines (matches hht-diary clean_requirement_body)
577
- while body_lines and not body_lines[-1].strip():
578
- body_lines.pop()
579
-
580
- # Strip trailing whitespace from result
581
- return "\n".join(body_lines).rstrip()
582
-
583
- def _extract_assertions(self, text: str) -> List[Assertion]:
584
- """Extract assertions from requirement text.
585
-
586
- Looks for `## Assertions` section and parses lines like:
587
- A. The system SHALL...
588
- B. The system SHALL NOT...
589
-
590
- Args:
591
- text: The requirement text block
592
-
593
- Returns:
594
- List of Assertion objects
595
- """
596
- assertions: List[Assertion] = []
597
-
598
- # Find the assertions section
599
- header_match = self.ASSERTIONS_HEADER_PATTERN.search(text)
600
- if not header_match:
601
- return assertions
602
-
603
- # Get text after the header until the next section or end marker
604
- start_pos = header_match.end()
605
- section_text = text[start_pos:]
606
-
607
- # Find the end of the assertions section (next ## header, Rationale, or End marker)
608
- end_patterns = [
609
- r"^##\s+", # Next section header
610
- r"^\*End\*", # End marker
611
- r"^---\s*$", # Separator line
612
- ]
613
- end_pos = len(section_text)
614
- for pattern in end_patterns:
615
- match = re.search(pattern, section_text, re.MULTILINE)
616
- if match and match.start() < end_pos:
617
- end_pos = match.start()
618
-
619
- assertions_text = section_text[:end_pos]
620
-
621
- # Parse individual assertion lines
622
- for match in self.ASSERTION_LINE_PATTERN.finditer(assertions_text):
623
- label = match.group(1)
624
- assertion_text = match.group(2).strip()
625
-
626
- # Check if this is a placeholder
627
- is_placeholder = any(
628
- assertion_text.lower().startswith(pv.lower()) for pv in self.placeholder_values
629
- )
630
-
631
- assertions.append(
632
- Assertion(
633
- label=label,
634
- text=assertion_text,
635
- is_placeholder=is_placeholder,
636
- )
637
- )
638
-
639
- return assertions