elspais 0.11.1__py3-none-any.whl → 0.43.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. elspais/__init__.py +2 -11
  2. elspais/{sponsors/__init__.py → associates.py} +102 -58
  3. elspais/cli.py +395 -79
  4. elspais/commands/__init__.py +9 -3
  5. elspais/commands/analyze.py +121 -173
  6. elspais/commands/changed.py +15 -30
  7. elspais/commands/config_cmd.py +13 -16
  8. elspais/commands/edit.py +60 -44
  9. elspais/commands/example_cmd.py +319 -0
  10. elspais/commands/hash_cmd.py +167 -183
  11. elspais/commands/health.py +1177 -0
  12. elspais/commands/index.py +98 -114
  13. elspais/commands/init.py +103 -26
  14. elspais/commands/reformat_cmd.py +41 -444
  15. elspais/commands/rules_cmd.py +7 -3
  16. elspais/commands/trace.py +444 -321
  17. elspais/commands/validate.py +195 -415
  18. elspais/config/__init__.py +799 -5
  19. elspais/{core/content_rules.py → content_rules.py} +20 -3
  20. elspais/docs/cli/assertions.md +67 -0
  21. elspais/docs/cli/commands.md +304 -0
  22. elspais/docs/cli/config.md +262 -0
  23. elspais/docs/cli/format.md +66 -0
  24. elspais/docs/cli/git.md +45 -0
  25. elspais/docs/cli/health.md +190 -0
  26. elspais/docs/cli/hierarchy.md +60 -0
  27. elspais/docs/cli/ignore.md +72 -0
  28. elspais/docs/cli/mcp.md +245 -0
  29. elspais/docs/cli/quickstart.md +58 -0
  30. elspais/docs/cli/traceability.md +89 -0
  31. elspais/docs/cli/validation.md +96 -0
  32. elspais/graph/GraphNode.py +383 -0
  33. elspais/graph/__init__.py +40 -0
  34. elspais/graph/annotators.py +927 -0
  35. elspais/graph/builder.py +1886 -0
  36. elspais/graph/deserializer.py +248 -0
  37. elspais/graph/factory.py +284 -0
  38. elspais/graph/metrics.py +127 -0
  39. elspais/graph/mutations.py +161 -0
  40. elspais/graph/parsers/__init__.py +156 -0
  41. elspais/graph/parsers/code.py +213 -0
  42. elspais/graph/parsers/comments.py +112 -0
  43. elspais/graph/parsers/config_helpers.py +29 -0
  44. elspais/graph/parsers/heredocs.py +225 -0
  45. elspais/graph/parsers/journey.py +131 -0
  46. elspais/graph/parsers/remainder.py +79 -0
  47. elspais/graph/parsers/requirement.py +347 -0
  48. elspais/graph/parsers/results/__init__.py +6 -0
  49. elspais/graph/parsers/results/junit_xml.py +229 -0
  50. elspais/graph/parsers/results/pytest_json.py +313 -0
  51. elspais/graph/parsers/test.py +305 -0
  52. elspais/graph/relations.py +78 -0
  53. elspais/graph/serialize.py +216 -0
  54. elspais/html/__init__.py +8 -0
  55. elspais/html/generator.py +731 -0
  56. elspais/html/templates/trace_view.html.j2 +2151 -0
  57. elspais/mcp/__init__.py +47 -29
  58. elspais/mcp/__main__.py +5 -1
  59. elspais/mcp/file_mutations.py +138 -0
  60. elspais/mcp/server.py +2016 -247
  61. elspais/testing/__init__.py +4 -4
  62. elspais/testing/config.py +3 -0
  63. elspais/testing/mapper.py +1 -1
  64. elspais/testing/result_parser.py +25 -21
  65. elspais/testing/scanner.py +301 -12
  66. elspais/utilities/__init__.py +1 -0
  67. elspais/utilities/docs_loader.py +115 -0
  68. elspais/utilities/git.py +607 -0
  69. elspais/{core → utilities}/hasher.py +8 -22
  70. elspais/utilities/md_renderer.py +189 -0
  71. elspais/{core → utilities}/patterns.py +58 -57
  72. elspais/utilities/reference_config.py +626 -0
  73. elspais/validation/__init__.py +19 -0
  74. elspais/validation/format.py +264 -0
  75. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/METADATA +7 -4
  76. elspais-0.43.5.dist-info/RECORD +80 -0
  77. elspais/config/defaults.py +0 -173
  78. elspais/config/loader.py +0 -494
  79. elspais/core/__init__.py +0 -21
  80. elspais/core/git.py +0 -352
  81. elspais/core/models.py +0 -320
  82. elspais/core/parser.py +0 -640
  83. elspais/core/rules.py +0 -514
  84. elspais/mcp/context.py +0 -171
  85. elspais/mcp/serializers.py +0 -112
  86. elspais/reformat/__init__.py +0 -50
  87. elspais/reformat/detector.py +0 -119
  88. elspais/reformat/hierarchy.py +0 -246
  89. elspais/reformat/line_breaks.py +0 -220
  90. elspais/reformat/prompts.py +0 -123
  91. elspais/reformat/transformer.py +0 -264
  92. elspais/trace_view/__init__.py +0 -54
  93. elspais/trace_view/coverage.py +0 -183
  94. elspais/trace_view/generators/__init__.py +0 -12
  95. elspais/trace_view/generators/base.py +0 -329
  96. elspais/trace_view/generators/csv.py +0 -122
  97. elspais/trace_view/generators/markdown.py +0 -175
  98. elspais/trace_view/html/__init__.py +0 -31
  99. elspais/trace_view/html/generator.py +0 -1006
  100. elspais/trace_view/html/templates/base.html +0 -283
  101. elspais/trace_view/html/templates/components/code_viewer_modal.html +0 -14
  102. elspais/trace_view/html/templates/components/file_picker_modal.html +0 -20
  103. elspais/trace_view/html/templates/components/legend_modal.html +0 -69
  104. elspais/trace_view/html/templates/components/review_panel.html +0 -118
  105. elspais/trace_view/html/templates/partials/review/help/help-panel.json +0 -244
  106. elspais/trace_view/html/templates/partials/review/help/onboarding.json +0 -77
  107. elspais/trace_view/html/templates/partials/review/help/tooltips.json +0 -237
  108. elspais/trace_view/html/templates/partials/review/review-comments.js +0 -928
  109. elspais/trace_view/html/templates/partials/review/review-data.js +0 -961
  110. elspais/trace_view/html/templates/partials/review/review-help.js +0 -679
  111. elspais/trace_view/html/templates/partials/review/review-init.js +0 -177
  112. elspais/trace_view/html/templates/partials/review/review-line-numbers.js +0 -429
  113. elspais/trace_view/html/templates/partials/review/review-packages.js +0 -1029
  114. elspais/trace_view/html/templates/partials/review/review-position.js +0 -540
  115. elspais/trace_view/html/templates/partials/review/review-resize.js +0 -115
  116. elspais/trace_view/html/templates/partials/review/review-status.js +0 -659
  117. elspais/trace_view/html/templates/partials/review/review-sync.js +0 -992
  118. elspais/trace_view/html/templates/partials/review-styles.css +0 -2238
  119. elspais/trace_view/html/templates/partials/scripts.js +0 -1741
  120. elspais/trace_view/html/templates/partials/styles.css +0 -1756
  121. elspais/trace_view/models.py +0 -353
  122. elspais/trace_view/review/__init__.py +0 -60
  123. elspais/trace_view/review/branches.py +0 -1149
  124. elspais/trace_view/review/models.py +0 -1205
  125. elspais/trace_view/review/position.py +0 -609
  126. elspais/trace_view/review/server.py +0 -1056
  127. elspais/trace_view/review/status.py +0 -470
  128. elspais/trace_view/review/storage.py +0 -1367
  129. elspais/trace_view/scanning.py +0 -213
  130. elspais/trace_view/specs/README.md +0 -84
  131. elspais/trace_view/specs/tv-d00001-template-architecture.md +0 -36
  132. elspais/trace_view/specs/tv-d00002-css-extraction.md +0 -37
  133. elspais/trace_view/specs/tv-d00003-js-extraction.md +0 -43
  134. elspais/trace_view/specs/tv-d00004-build-embedding.md +0 -40
  135. elspais/trace_view/specs/tv-d00005-test-format.md +0 -78
  136. elspais/trace_view/specs/tv-d00010-review-data-models.md +0 -33
  137. elspais/trace_view/specs/tv-d00011-review-storage.md +0 -33
  138. elspais/trace_view/specs/tv-d00012-position-resolution.md +0 -33
  139. elspais/trace_view/specs/tv-d00013-git-branches.md +0 -31
  140. elspais/trace_view/specs/tv-d00014-review-api-server.md +0 -31
  141. elspais/trace_view/specs/tv-d00015-status-modifier.md +0 -27
  142. elspais/trace_view/specs/tv-d00016-js-integration.md +0 -33
  143. elspais/trace_view/specs/tv-p00001-html-generator.md +0 -33
  144. elspais/trace_view/specs/tv-p00002-review-system.md +0 -29
  145. elspais-0.11.1.dist-info/RECORD +0 -101
  146. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/WHEEL +0 -0
  147. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/entry_points.txt +0 -0
  148. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/licenses/LICENSE +0 -0
elspais/core/parser.py DELETED
@@ -1,640 +0,0 @@
1
- """
2
- elspais.core.parser - Requirement file parsing.
3
-
4
- Parses Markdown files containing requirements in the standard format.
5
- """
6
-
7
- import re
8
- from pathlib import Path
9
- from typing import Dict, List, Optional, Sequence, Union
10
-
11
- from elspais.core.models import Assertion, ParseResult, ParseWarning, Requirement
12
- from elspais.core.patterns import PatternConfig, PatternValidator
13
-
14
-
15
- class RequirementParser:
16
- """
17
- Parses requirement specifications from Markdown files.
18
- """
19
-
20
- # Regex patterns for parsing
21
- # Generic pattern to find potential requirement headers
22
- # Actual ID validation is done by PatternValidator
23
- HEADER_PATTERN = re.compile(
24
- r"^#*\s*(?P<id>[A-Z]+-[A-Za-z0-9-]+):\s*(?P<title>.+)$"
25
- )
26
- LEVEL_STATUS_PATTERN = re.compile(
27
- r"\*\*Level\*\*:\s*(?P<level>\w+)"
28
- r"(?:\s*\|\s*\*\*Implements\*\*:\s*(?P<implements>[^|\n]+))?"
29
- r"(?:\s*\|\s*\*\*Status\*\*:\s*(?P<status>\w+))?"
30
- )
31
- ALT_STATUS_PATTERN = re.compile(
32
- r"\*\*Status\*\*:\s*(?P<status>\w+)"
33
- )
34
- IMPLEMENTS_PATTERN = re.compile(
35
- r"\*\*Implements\*\*:\s*(?P<implements>[^|\n]+)"
36
- )
37
- END_MARKER_PATTERN = re.compile(
38
- r"^\*End\*\s+\*[^*]+\*\s*(?:\|\s*\*\*Hash\*\*:\s*(?P<hash>[a-zA-Z0-9]+))?",
39
- re.MULTILINE
40
- )
41
- RATIONALE_PATTERN = re.compile(
42
- r"\*\*Rationale\*\*:\s*(.+?)(?=\n\n|\n\*\*|\Z)", re.DOTALL
43
- )
44
- ACCEPTANCE_PATTERN = re.compile(
45
- r"\*\*Acceptance Criteria\*\*:\s*\n((?:\s*-\s*.+\n?)+)", re.MULTILINE
46
- )
47
- # Assertions section header (## Assertions or **Assertions**)
48
- ASSERTIONS_HEADER_PATTERN = re.compile(
49
- r"^##\s+Assertions\s*$", re.MULTILINE
50
- )
51
- # Individual assertion line: "A. The system SHALL..." or "01. ..." etc.
52
- # Captures: label (any alphanumeric), text (rest of line, may continue)
53
- ASSERTION_LINE_PATTERN = re.compile(
54
- r"^\s*([A-Z0-9]+)\.\s+(.+)$", re.MULTILINE
55
- )
56
-
57
- # Default values that mean "no references" in Implements field
58
- DEFAULT_NO_REFERENCE_VALUES = ["-", "null", "none", "x", "X", "N/A", "n/a"]
59
-
60
- # Default placeholder values that indicate a removed/deprecated assertion
61
- DEFAULT_PLACEHOLDER_VALUES = [
62
- "obsolete", "removed", "deprecated", "N/A", "n/a", "-", "reserved"
63
- ]
64
-
65
- def __init__(
66
- self,
67
- pattern_config: PatternConfig,
68
- no_reference_values: Optional[List[str]] = None,
69
- placeholder_values: Optional[List[str]] = None,
70
- ):
71
- """
72
- Initialize parser with pattern configuration.
73
-
74
- Args:
75
- pattern_config: Configuration for ID patterns
76
- no_reference_values: Values in Implements field that mean "no references"
77
- placeholder_values: Values that indicate removed/deprecated assertions
78
- """
79
- self.pattern_config = pattern_config
80
- self.validator = PatternValidator(pattern_config)
81
- self.no_reference_values = (
82
- no_reference_values
83
- if no_reference_values is not None
84
- else self.DEFAULT_NO_REFERENCE_VALUES
85
- )
86
- self.placeholder_values = (
87
- placeholder_values
88
- if placeholder_values is not None
89
- else self.DEFAULT_PLACEHOLDER_VALUES
90
- )
91
-
92
- def parse_text(
93
- self,
94
- text: str,
95
- file_path: Optional[Path] = None,
96
- subdir: str = "",
97
- ) -> ParseResult:
98
- """
99
- Parse requirements from text.
100
-
101
- Args:
102
- text: Markdown text containing requirements
103
- file_path: Optional source file path for location tracking
104
- subdir: Subdirectory within spec/ (e.g., "roadmap", "archive", "")
105
-
106
- Returns:
107
- ParseResult with requirements dict and warnings list
108
- """
109
- requirements: Dict[str, Requirement] = {}
110
- warnings: List[ParseWarning] = []
111
- lines = text.split("\n")
112
-
113
- i = 0
114
- while i < len(lines):
115
- line = lines[i]
116
-
117
- # Look for requirement header
118
- header_match = self.HEADER_PATTERN.match(line)
119
- if header_match:
120
- req_id = header_match.group("id")
121
-
122
- # Validate ID against configured pattern
123
- if not self.validator.is_valid(req_id):
124
- i += 1
125
- continue
126
-
127
- title = header_match.group("title").strip()
128
- start_line = i + 1 # 1-indexed
129
-
130
- # Find the end of this requirement
131
- req_lines = [line]
132
- i += 1
133
- while i < len(lines):
134
- req_lines.append(lines[i])
135
- # Check for end marker or next requirement
136
- if self.END_MARKER_PATTERN.match(lines[i]):
137
- i += 1
138
- # Skip separator line if present
139
- if i < len(lines) and lines[i].strip() == "---":
140
- i += 1
141
- break
142
- # Check for next valid requirement header
143
- next_match = self.HEADER_PATTERN.match(lines[i])
144
- if next_match and self.validator.is_valid(next_match.group("id")):
145
- # Hit next requirement without end marker
146
- break
147
- i += 1
148
-
149
- # Parse the requirement block
150
- req_text = "\n".join(req_lines)
151
- req, block_warnings = self._parse_requirement_block(
152
- req_id, title, req_text, file_path, start_line, subdir
153
- )
154
- warnings.extend(block_warnings)
155
- if req:
156
- # Check for duplicate ID
157
- if req_id in requirements:
158
- # Keep both: original stays, duplicate gets __conflict suffix
159
- conflict_key, conflict_req, warning = self._make_conflict_entry(
160
- req, req_id, requirements[req_id], file_path, start_line
161
- )
162
- requirements[conflict_key] = conflict_req
163
- warnings.append(warning)
164
- else:
165
- requirements[req_id] = req
166
- else:
167
- i += 1
168
-
169
- return ParseResult(requirements=requirements, warnings=warnings)
170
-
171
- def parse_file(
172
- self,
173
- file_path: Path,
174
- subdir: str = "",
175
- ) -> ParseResult:
176
- """
177
- Parse requirements from a file.
178
-
179
- Args:
180
- file_path: Path to the Markdown file
181
- subdir: Subdirectory within spec/ (e.g., "roadmap", "archive", "")
182
-
183
- Returns:
184
- ParseResult with requirements dict and warnings list
185
- """
186
- text = file_path.read_text(encoding="utf-8")
187
- return self.parse_text(text, file_path, subdir)
188
-
189
- def parse_directory(
190
- self,
191
- directory: Path,
192
- patterns: Optional[List[str]] = None,
193
- skip_files: Optional[List[str]] = None,
194
- subdir: str = "",
195
- ) -> ParseResult:
196
- """
197
- Parse all requirements from a directory.
198
-
199
- Args:
200
- directory: Path to the spec directory
201
- patterns: Optional glob patterns to match files
202
- skip_files: Optional list of filenames to skip
203
- subdir: Subdirectory within spec/ (e.g., "roadmap", "archive", "")
204
-
205
- Returns:
206
- ParseResult with requirements dict and warnings list
207
- """
208
- if patterns is None:
209
- patterns = ["*.md"]
210
-
211
- if skip_files is None:
212
- skip_files = []
213
-
214
- requirements: Dict[str, Requirement] = {}
215
- warnings: List[ParseWarning] = []
216
-
217
- for pattern in patterns:
218
- for file_path in directory.glob(pattern):
219
- if file_path.is_file() and file_path.name not in skip_files:
220
- result = self.parse_file(file_path, subdir)
221
- # Merge requirements, checking for cross-file duplicates
222
- for req_id, req in result.requirements.items():
223
- if req_id in requirements:
224
- # Keep both: original stays, duplicate gets __conflict suffix
225
- conflict_key, conflict_req, warning = self._make_conflict_entry(
226
- req, req_id, requirements[req_id], file_path, req.line_number
227
- )
228
- requirements[conflict_key] = conflict_req
229
- warnings.append(warning)
230
- else:
231
- requirements[req_id] = req
232
- warnings.extend(result.warnings)
233
-
234
- return ParseResult(requirements=requirements, warnings=warnings)
235
-
236
- def parse_directories(
237
- self,
238
- directories: Union[str, Path, Sequence[Union[str, Path]]],
239
- base_path: Optional[Path] = None,
240
- patterns: Optional[List[str]] = None,
241
- skip_files: Optional[List[str]] = None,
242
- ) -> ParseResult:
243
- """
244
- Parse all requirements from one or more directories.
245
-
246
- Does NOT recursively search subdirectories - only the specified directories.
247
-
248
- Args:
249
- directories: Single directory path (str/Path) or list of directory paths
250
- base_path: Base path to resolve relative directories against
251
- patterns: Optional glob patterns to match files (default: ["*.md"])
252
- skip_files: Optional list of filenames to skip
253
-
254
- Returns:
255
- ParseResult with requirements dict and warnings list
256
- """
257
- # Normalize to list
258
- if isinstance(directories, (str, Path)):
259
- dir_list = [directories]
260
- else:
261
- dir_list = list(directories)
262
-
263
- if base_path is None:
264
- base_path = Path.cwd()
265
-
266
- requirements: Dict[str, Requirement] = {}
267
- warnings: List[ParseWarning] = []
268
-
269
- for dir_entry in dir_list:
270
- if Path(dir_entry).is_absolute():
271
- dir_path = Path(dir_entry)
272
- else:
273
- dir_path = base_path / dir_entry
274
- if dir_path.exists() and dir_path.is_dir():
275
- result = self.parse_directory(
276
- dir_path, patterns=patterns, skip_files=skip_files
277
- )
278
- # Merge requirements, checking for cross-directory duplicates
279
- for req_id, req in result.requirements.items():
280
- if req_id in requirements:
281
- # Keep both: original stays, duplicate gets __conflict suffix
282
- conflict_key, conflict_req, warning = self._make_conflict_entry(
283
- req, req_id, requirements[req_id], req.file_path, req.line_number
284
- )
285
- requirements[conflict_key] = conflict_req
286
- warnings.append(warning)
287
- else:
288
- requirements[req_id] = req
289
- warnings.extend(result.warnings)
290
-
291
- return ParseResult(requirements=requirements, warnings=warnings)
292
-
293
- def parse_directory_with_subdirs(
294
- self,
295
- directory: Path,
296
- subdirs: Optional[List[str]] = None,
297
- patterns: Optional[List[str]] = None,
298
- skip_files: Optional[List[str]] = None,
299
- ) -> ParseResult:
300
- """
301
- Parse requirements from a directory and its subdirectories.
302
-
303
- Unlike parse_directory, this method:
304
- - Parses the root directory (with subdir="")
305
- - Parses each specified subdirectory (with subdir set to the subdir name)
306
-
307
- Args:
308
- directory: Path to the spec directory
309
- subdirs: List of subdirectory names to include (e.g., ["roadmap", "archive"])
310
- patterns: Optional glob patterns to match files
311
- skip_files: Optional list of filenames to skip
312
-
313
- Returns:
314
- ParseResult with requirements dict and warnings list
315
- """
316
- if subdirs is None:
317
- subdirs = []
318
-
319
- requirements: Dict[str, Requirement] = {}
320
- warnings: List[ParseWarning] = []
321
-
322
- # Parse root directory
323
- root_result = self.parse_directory(
324
- directory, patterns=patterns, skip_files=skip_files, subdir=""
325
- )
326
- requirements.update(root_result.requirements)
327
- warnings.extend(root_result.warnings)
328
-
329
- # Parse each subdirectory
330
- for subdir_name in subdirs:
331
- subdir_path = directory / subdir_name
332
- if subdir_path.exists() and subdir_path.is_dir():
333
- subdir_result = self.parse_directory(
334
- subdir_path, patterns=patterns, skip_files=skip_files, subdir=subdir_name
335
- )
336
- # Merge requirements, checking for cross-subdir duplicates
337
- for req_id, req in subdir_result.requirements.items():
338
- if req_id in requirements:
339
- # Keep both: original stays, duplicate gets __conflict suffix
340
- conflict_key, conflict_req, warning = self._make_conflict_entry(
341
- req, req_id, requirements[req_id], req.file_path, req.line_number
342
- )
343
- requirements[conflict_key] = conflict_req
344
- warnings.append(warning)
345
- else:
346
- requirements[req_id] = req
347
- warnings.extend(subdir_result.warnings)
348
-
349
- return ParseResult(requirements=requirements, warnings=warnings)
350
-
351
- def _make_conflict_entry(
352
- self,
353
- duplicate_req: Requirement,
354
- original_id: str,
355
- original_req: Requirement,
356
- file_path: Optional[Path],
357
- line_number: Optional[int],
358
- ) -> tuple:
359
- """
360
- Create a conflict entry for a duplicate requirement.
361
-
362
- When a requirement ID already exists, this creates a modified version
363
- of the duplicate with:
364
- - Key suffix `__conflict` for storage
365
- - `is_conflict=True` flag
366
- - `conflict_with` set to the original ID
367
- - `implements=[]` (treated as orphaned)
368
-
369
- Args:
370
- duplicate_req: The duplicate requirement that was found
371
- original_id: The ID that is duplicated
372
- original_req: The original requirement that was first
373
- file_path: File path for the warning
374
- line_number: Line number for the warning
375
-
376
- Returns:
377
- Tuple of (conflict_key, modified_requirement, ParseWarning)
378
- """
379
- conflict_key = f"{original_id}__conflict"
380
-
381
- # Modify the duplicate requirement
382
- duplicate_req.is_conflict = True
383
- duplicate_req.conflict_with = original_id
384
- duplicate_req.implements = [] # Treat as orphaned
385
-
386
- warning = ParseWarning(
387
- requirement_id=original_id,
388
- message=f"Duplicate ID found (first occurrence in {original_req.file_path}:{original_req.line_number})",
389
- file_path=file_path,
390
- line_number=line_number,
391
- )
392
-
393
- return conflict_key, duplicate_req, warning
394
-
395
- def _parse_requirement_block(
396
- self,
397
- req_id: str,
398
- title: str,
399
- text: str,
400
- file_path: Optional[Path],
401
- line_number: int,
402
- subdir: str = "",
403
- ) -> tuple:
404
- """
405
- Parse a single requirement block.
406
-
407
- Args:
408
- req_id: The requirement ID
409
- title: The requirement title
410
- text: The full requirement text block
411
- file_path: Source file path
412
- line_number: Starting line number
413
- subdir: Subdirectory within spec/ (e.g., "roadmap", "archive", "")
414
-
415
- Returns:
416
- Tuple of (Requirement or None, List[ParseWarning])
417
- """
418
- block_warnings: List[ParseWarning] = []
419
-
420
- # Extract level, status, and implements from header line
421
- level = "Unknown"
422
- status = "Unknown"
423
- implements_str = ""
424
-
425
- level_match = self.LEVEL_STATUS_PATTERN.search(text)
426
- if level_match:
427
- level = level_match.group("level") or "Unknown"
428
- implements_str = level_match.group("implements") or ""
429
- status = level_match.group("status") or "Unknown"
430
-
431
- # Try alternative status pattern
432
- if status == "Unknown":
433
- alt_status_match = self.ALT_STATUS_PATTERN.search(text)
434
- if alt_status_match:
435
- status = alt_status_match.group("status")
436
-
437
- # Try alternative implements pattern
438
- if not implements_str:
439
- impl_match = self.IMPLEMENTS_PATTERN.search(text)
440
- if impl_match:
441
- implements_str = impl_match.group("implements")
442
-
443
- # Parse implements list and validate references
444
- implements = self._parse_implements(implements_str)
445
- for ref in implements:
446
- if not self.validator.is_valid(ref):
447
- block_warnings.append(ParseWarning(
448
- requirement_id=req_id,
449
- message=f"Invalid implements reference: {ref}",
450
- file_path=file_path,
451
- line_number=line_number,
452
- ))
453
-
454
- # Extract body (text between header and acceptance/end)
455
- body = self._extract_body(text)
456
-
457
- # Extract rationale
458
- rationale = None
459
- rationale_match = self.RATIONALE_PATTERN.search(text)
460
- if rationale_match:
461
- rationale = rationale_match.group(1).strip()
462
-
463
- # Extract acceptance criteria (legacy format)
464
- acceptance_criteria = []
465
- acceptance_match = self.ACCEPTANCE_PATTERN.search(text)
466
- if acceptance_match:
467
- criteria_text = acceptance_match.group(1)
468
- acceptance_criteria = [
469
- line.strip().lstrip("- ").strip()
470
- for line in criteria_text.split("\n")
471
- if line.strip().startswith("-")
472
- ]
473
-
474
- # Extract assertions (new format) and validate labels
475
- assertions = self._extract_assertions(text)
476
- for assertion in assertions:
477
- if not self._is_valid_assertion_label(assertion.label):
478
- block_warnings.append(ParseWarning(
479
- requirement_id=req_id,
480
- message=f"Invalid assertion label format: {assertion.label}",
481
- file_path=file_path,
482
- line_number=line_number,
483
- ))
484
-
485
- # Extract hash from end marker
486
- hash_value = None
487
- end_match = self.END_MARKER_PATTERN.search(text)
488
- if end_match:
489
- hash_value = end_match.group("hash")
490
-
491
- req = Requirement(
492
- id=req_id,
493
- title=title,
494
- level=level,
495
- status=status,
496
- body=body,
497
- implements=implements,
498
- acceptance_criteria=acceptance_criteria,
499
- assertions=assertions,
500
- rationale=rationale,
501
- hash=hash_value,
502
- file_path=file_path,
503
- line_number=line_number,
504
- subdir=subdir,
505
- )
506
- return req, block_warnings
507
-
508
- def _is_valid_assertion_label(self, label: str) -> bool:
509
- """Check if an assertion label matches expected format.
510
-
511
- Default expectation is uppercase letters A-Z.
512
- """
513
- # Check against configured assertion label pattern if available
514
- assertion_config = getattr(self.pattern_config, 'assertions', None)
515
- if assertion_config:
516
- label_style = assertion_config.get('label_style', 'uppercase')
517
- if label_style == 'uppercase':
518
- return bool(re.match(r'^[A-Z]$', label))
519
- elif label_style == 'numeric':
520
- return bool(re.match(r'^\d+$', label))
521
- elif label_style == 'alphanumeric':
522
- return bool(re.match(r'^[A-Z0-9]+$', label))
523
- # Default: uppercase single letter
524
- return bool(re.match(r'^[A-Z]$', label))
525
-
526
- def _parse_implements(self, implements_str: str) -> List[str]:
527
- """Parse comma-separated implements list.
528
-
529
- Returns empty list if the value is a "no reference" indicator.
530
- """
531
- if not implements_str:
532
- return []
533
-
534
- # Check if it's a "no reference" value
535
- stripped = implements_str.strip()
536
- if stripped in self.no_reference_values:
537
- return []
538
-
539
- parts = [p.strip() for p in implements_str.split(",")]
540
- # Filter out empty parts and no-reference values
541
- return [p for p in parts if p and p not in self.no_reference_values]
542
-
543
- def _extract_body(self, text: str) -> str:
544
- """Extract the main body text from requirement block.
545
-
546
- Body is everything between the header (and optional metadata line)
547
- and the end marker, including Rationale and Acceptance Criteria sections.
548
- Trailing blank lines are removed for consistent hashing.
549
- """
550
- lines = text.split("\n")
551
- body_lines = []
552
- found_header = False
553
- in_body = False
554
-
555
- for line in lines:
556
- # Skip header line
557
- if self.HEADER_PATTERN.match(line):
558
- found_header = True
559
- continue
560
-
561
- if found_header and not in_body:
562
- # Metadata line - skip it but mark body start
563
- if "**Level**" in line or "**Status**" in line:
564
- in_body = True
565
- continue
566
- # First non-blank content line starts body (when no metadata)
567
- elif line.strip():
568
- in_body = True
569
- # Don't continue - include this line in body
570
-
571
- # Stop at end marker
572
- if line.strip().startswith("*End*"):
573
- break
574
-
575
- if in_body:
576
- body_lines.append(line)
577
-
578
- # Remove trailing blank lines (matches hht-diary clean_requirement_body)
579
- while body_lines and not body_lines[-1].strip():
580
- body_lines.pop()
581
-
582
- # Strip trailing whitespace from result
583
- return "\n".join(body_lines).rstrip()
584
-
585
- def _extract_assertions(self, text: str) -> List[Assertion]:
586
- """Extract assertions from requirement text.
587
-
588
- Looks for `## Assertions` section and parses lines like:
589
- A. The system SHALL...
590
- B. The system SHALL NOT...
591
-
592
- Args:
593
- text: The requirement text block
594
-
595
- Returns:
596
- List of Assertion objects
597
- """
598
- assertions: List[Assertion] = []
599
-
600
- # Find the assertions section
601
- header_match = self.ASSERTIONS_HEADER_PATTERN.search(text)
602
- if not header_match:
603
- return assertions
604
-
605
- # Get text after the header until the next section or end marker
606
- start_pos = header_match.end()
607
- section_text = text[start_pos:]
608
-
609
- # Find the end of the assertions section (next ## header, Rationale, or End marker)
610
- end_patterns = [
611
- r"^##\s+", # Next section header
612
- r"^\*End\*", # End marker
613
- r"^---\s*$", # Separator line
614
- ]
615
- end_pos = len(section_text)
616
- for pattern in end_patterns:
617
- match = re.search(pattern, section_text, re.MULTILINE)
618
- if match and match.start() < end_pos:
619
- end_pos = match.start()
620
-
621
- assertions_text = section_text[:end_pos]
622
-
623
- # Parse individual assertion lines
624
- for match in self.ASSERTION_LINE_PATTERN.finditer(assertions_text):
625
- label = match.group(1)
626
- assertion_text = match.group(2).strip()
627
-
628
- # Check if this is a placeholder
629
- is_placeholder = any(
630
- assertion_text.lower().startswith(pv.lower())
631
- for pv in self.placeholder_values
632
- )
633
-
634
- assertions.append(Assertion(
635
- label=label,
636
- text=assertion_text,
637
- is_placeholder=is_placeholder,
638
- ))
639
-
640
- return assertions