elspais 0.9.1__py3-none-any.whl → 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- elspais/cli.py +123 -1
- elspais/commands/changed.py +160 -0
- elspais/commands/hash_cmd.py +72 -26
- elspais/commands/reformat_cmd.py +458 -0
- elspais/commands/trace.py +157 -3
- elspais/commands/validate.py +81 -18
- elspais/core/git.py +352 -0
- elspais/core/models.py +2 -0
- elspais/core/parser.py +68 -24
- elspais/reformat/__init__.py +50 -0
- elspais/reformat/detector.py +119 -0
- elspais/reformat/hierarchy.py +246 -0
- elspais/reformat/line_breaks.py +220 -0
- elspais/reformat/prompts.py +123 -0
- elspais/reformat/transformer.py +264 -0
- elspais/sponsors/__init__.py +432 -0
- elspais/trace_view/__init__.py +54 -0
- elspais/trace_view/coverage.py +183 -0
- elspais/trace_view/generators/__init__.py +12 -0
- elspais/trace_view/generators/base.py +329 -0
- elspais/trace_view/generators/csv.py +122 -0
- elspais/trace_view/generators/markdown.py +175 -0
- elspais/trace_view/html/__init__.py +31 -0
- elspais/trace_view/html/generator.py +1006 -0
- elspais/trace_view/html/templates/base.html +283 -0
- elspais/trace_view/html/templates/components/code_viewer_modal.html +14 -0
- elspais/trace_view/html/templates/components/file_picker_modal.html +20 -0
- elspais/trace_view/html/templates/components/legend_modal.html +69 -0
- elspais/trace_view/html/templates/components/review_panel.html +118 -0
- elspais/trace_view/html/templates/partials/review/help/help-panel.json +244 -0
- elspais/trace_view/html/templates/partials/review/help/onboarding.json +77 -0
- elspais/trace_view/html/templates/partials/review/help/tooltips.json +237 -0
- elspais/trace_view/html/templates/partials/review/review-comments.js +928 -0
- elspais/trace_view/html/templates/partials/review/review-data.js +961 -0
- elspais/trace_view/html/templates/partials/review/review-help.js +679 -0
- elspais/trace_view/html/templates/partials/review/review-init.js +177 -0
- elspais/trace_view/html/templates/partials/review/review-line-numbers.js +429 -0
- elspais/trace_view/html/templates/partials/review/review-packages.js +1029 -0
- elspais/trace_view/html/templates/partials/review/review-position.js +540 -0
- elspais/trace_view/html/templates/partials/review/review-resize.js +115 -0
- elspais/trace_view/html/templates/partials/review/review-status.js +659 -0
- elspais/trace_view/html/templates/partials/review/review-sync.js +992 -0
- elspais/trace_view/html/templates/partials/review-styles.css +2238 -0
- elspais/trace_view/html/templates/partials/scripts.js +1741 -0
- elspais/trace_view/html/templates/partials/styles.css +1756 -0
- elspais/trace_view/models.py +353 -0
- elspais/trace_view/review/__init__.py +60 -0
- elspais/trace_view/review/branches.py +1149 -0
- elspais/trace_view/review/models.py +1205 -0
- elspais/trace_view/review/position.py +609 -0
- elspais/trace_view/review/server.py +1056 -0
- elspais/trace_view/review/status.py +470 -0
- elspais/trace_view/review/storage.py +1367 -0
- elspais/trace_view/scanning.py +213 -0
- elspais/trace_view/specs/README.md +84 -0
- elspais/trace_view/specs/tv-d00001-template-architecture.md +36 -0
- elspais/trace_view/specs/tv-d00002-css-extraction.md +37 -0
- elspais/trace_view/specs/tv-d00003-js-extraction.md +43 -0
- elspais/trace_view/specs/tv-d00004-build-embedding.md +40 -0
- elspais/trace_view/specs/tv-d00005-test-format.md +78 -0
- elspais/trace_view/specs/tv-d00010-review-data-models.md +33 -0
- elspais/trace_view/specs/tv-d00011-review-storage.md +33 -0
- elspais/trace_view/specs/tv-d00012-position-resolution.md +33 -0
- elspais/trace_view/specs/tv-d00013-git-branches.md +31 -0
- elspais/trace_view/specs/tv-d00014-review-api-server.md +31 -0
- elspais/trace_view/specs/tv-d00015-status-modifier.md +27 -0
- elspais/trace_view/specs/tv-d00016-js-integration.md +33 -0
- elspais/trace_view/specs/tv-p00001-html-generator.md +33 -0
- elspais/trace_view/specs/tv-p00002-review-system.md +29 -0
- {elspais-0.9.1.dist-info → elspais-0.11.0.dist-info}/METADATA +78 -26
- elspais-0.11.0.dist-info/RECORD +101 -0
- elspais-0.9.1.dist-info/RECORD +0 -38
- {elspais-0.9.1.dist-info → elspais-0.11.0.dist-info}/WHEEL +0 -0
- {elspais-0.9.1.dist-info → elspais-0.11.0.dist-info}/entry_points.txt +0 -0
- {elspais-0.9.1.dist-info → elspais-0.11.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
# Implements: REQ-int-d00008 (Reformat Command)
|
|
2
|
+
"""
|
|
3
|
+
elspais.reformat - Requirement format transformation.
|
|
4
|
+
|
|
5
|
+
Transforms legacy Acceptance Criteria format to Assertions format.
|
|
6
|
+
Also provides line break normalization.
|
|
7
|
+
|
|
8
|
+
IMPLEMENTS REQUIREMENTS:
|
|
9
|
+
REQ-int-d00008: Reformat Command
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from elspais.reformat.detector import detect_format, needs_reformatting, FormatAnalysis
|
|
13
|
+
from elspais.reformat.transformer import (
|
|
14
|
+
reformat_requirement,
|
|
15
|
+
assemble_new_format,
|
|
16
|
+
validate_reformatted_content,
|
|
17
|
+
)
|
|
18
|
+
from elspais.reformat.line_breaks import (
|
|
19
|
+
normalize_line_breaks,
|
|
20
|
+
fix_requirement_line_breaks,
|
|
21
|
+
detect_line_break_issues,
|
|
22
|
+
)
|
|
23
|
+
from elspais.reformat.hierarchy import (
|
|
24
|
+
RequirementNode,
|
|
25
|
+
get_all_requirements,
|
|
26
|
+
build_hierarchy,
|
|
27
|
+
traverse_top_down,
|
|
28
|
+
normalize_req_id,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
__all__ = [
|
|
32
|
+
# Detection
|
|
33
|
+
"detect_format",
|
|
34
|
+
"needs_reformatting",
|
|
35
|
+
"FormatAnalysis",
|
|
36
|
+
# Transformation
|
|
37
|
+
"reformat_requirement",
|
|
38
|
+
"assemble_new_format",
|
|
39
|
+
"validate_reformatted_content",
|
|
40
|
+
# Line breaks
|
|
41
|
+
"normalize_line_breaks",
|
|
42
|
+
"fix_requirement_line_breaks",
|
|
43
|
+
"detect_line_break_issues",
|
|
44
|
+
# Hierarchy
|
|
45
|
+
"RequirementNode",
|
|
46
|
+
"get_all_requirements",
|
|
47
|
+
"build_hierarchy",
|
|
48
|
+
"traverse_top_down",
|
|
49
|
+
"normalize_req_id",
|
|
50
|
+
]
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
# Implements: REQ-int-d00008 (Reformat Command)
|
|
2
|
+
"""
|
|
3
|
+
Format detection for requirements.
|
|
4
|
+
|
|
5
|
+
Detects whether a requirement is in old format (needs reformatting)
|
|
6
|
+
or new format (already reformatted).
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import re
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class FormatAnalysis:
|
|
15
|
+
"""Result of format detection analysis."""
|
|
16
|
+
is_new_format: bool
|
|
17
|
+
has_assertions_section: bool
|
|
18
|
+
has_labeled_assertions: bool
|
|
19
|
+
has_acceptance_criteria: bool
|
|
20
|
+
uses_shall_language: bool
|
|
21
|
+
assertion_count: int
|
|
22
|
+
confidence: float # 0.0 to 1.0
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def detect_format(body: str, rationale: str = "") -> FormatAnalysis:
|
|
26
|
+
"""
|
|
27
|
+
Detect whether a requirement is in old or new format.
|
|
28
|
+
|
|
29
|
+
New format indicators:
|
|
30
|
+
- Has '## Assertions' section with labeled assertions (A., B., C.)
|
|
31
|
+
- Does NOT have '**Acceptance Criteria**:' section
|
|
32
|
+
- Uses prescriptive SHALL language in assertions
|
|
33
|
+
|
|
34
|
+
Old format indicators:
|
|
35
|
+
- Has '**Acceptance Criteria**:' or 'Acceptance Criteria:' section
|
|
36
|
+
- Uses descriptive language (does, has, provides) without labeled assertions
|
|
37
|
+
- May have bullet points without letter labels
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
body: The requirement body text
|
|
41
|
+
rationale: Optional rationale text
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
FormatAnalysis with detection results
|
|
45
|
+
"""
|
|
46
|
+
full_text = f"{body}\n{rationale}".strip()
|
|
47
|
+
|
|
48
|
+
# Check for ## Assertions section
|
|
49
|
+
has_assertions_section = bool(
|
|
50
|
+
re.search(r'^##\s+Assertions\s*$', full_text, re.MULTILINE)
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# Check for labeled assertions (A., B., C., etc. followed by SHALL somewhere in the line)
|
|
54
|
+
labeled_assertions = re.findall(
|
|
55
|
+
r'^[A-Z]\.\s+.*\bSHALL\b',
|
|
56
|
+
full_text,
|
|
57
|
+
re.MULTILINE | re.IGNORECASE
|
|
58
|
+
)
|
|
59
|
+
has_labeled_assertions = len(labeled_assertions) >= 1
|
|
60
|
+
assertion_count = len(labeled_assertions)
|
|
61
|
+
|
|
62
|
+
# Check for Acceptance Criteria section
|
|
63
|
+
has_acceptance_criteria = bool(re.search(
|
|
64
|
+
r'\*?\*?Acceptance\s+Criteria\*?\*?\s*:',
|
|
65
|
+
full_text,
|
|
66
|
+
re.IGNORECASE
|
|
67
|
+
))
|
|
68
|
+
|
|
69
|
+
# Check for SHALL language usage anywhere
|
|
70
|
+
shall_count = len(re.findall(r'\bSHALL\b', full_text, re.IGNORECASE))
|
|
71
|
+
uses_shall_language = shall_count >= 1
|
|
72
|
+
|
|
73
|
+
# Determine if new format
|
|
74
|
+
# New format: has Assertions section with labeled assertions, no Acceptance Criteria
|
|
75
|
+
is_new_format = (
|
|
76
|
+
has_assertions_section and
|
|
77
|
+
has_labeled_assertions and
|
|
78
|
+
not has_acceptance_criteria
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# Calculate confidence score
|
|
82
|
+
confidence = 0.0
|
|
83
|
+
if has_assertions_section:
|
|
84
|
+
confidence += 0.35
|
|
85
|
+
if has_labeled_assertions:
|
|
86
|
+
confidence += 0.35
|
|
87
|
+
if not has_acceptance_criteria:
|
|
88
|
+
confidence += 0.20
|
|
89
|
+
if uses_shall_language:
|
|
90
|
+
confidence += 0.10
|
|
91
|
+
|
|
92
|
+
# Invert confidence if old format
|
|
93
|
+
if not is_new_format:
|
|
94
|
+
confidence = 1.0 - confidence
|
|
95
|
+
|
|
96
|
+
return FormatAnalysis(
|
|
97
|
+
is_new_format=is_new_format,
|
|
98
|
+
has_assertions_section=has_assertions_section,
|
|
99
|
+
has_labeled_assertions=has_labeled_assertions,
|
|
100
|
+
has_acceptance_criteria=has_acceptance_criteria,
|
|
101
|
+
uses_shall_language=uses_shall_language,
|
|
102
|
+
assertion_count=assertion_count,
|
|
103
|
+
confidence=confidence
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def needs_reformatting(body: str, rationale: str = "") -> bool:
|
|
108
|
+
"""
|
|
109
|
+
Simple check if a requirement needs reformatting.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
body: The requirement body text
|
|
113
|
+
rationale: Optional rationale text
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
True if the requirement needs reformatting (is in old format)
|
|
117
|
+
"""
|
|
118
|
+
analysis = detect_format(body, rationale)
|
|
119
|
+
return not analysis.is_new_format
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
# Implements: REQ-int-d00008 (Reformat Command)
|
|
2
|
+
"""
|
|
3
|
+
Hierarchy traversal logic for requirements.
|
|
4
|
+
|
|
5
|
+
Uses elspais core modules directly to parse requirements and build
|
|
6
|
+
a traversable hierarchy based on implements relationships.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import sys
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Callable, Dict, List, Optional, TYPE_CHECKING
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from elspais.core.models import Requirement
|
|
16
|
+
from elspais.core.patterns import PatternValidator
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class RequirementNode:
|
|
21
|
+
"""Represents a requirement with its metadata and hierarchy info."""
|
|
22
|
+
req_id: str
|
|
23
|
+
title: str
|
|
24
|
+
body: str
|
|
25
|
+
rationale: str
|
|
26
|
+
file_path: str
|
|
27
|
+
line: int
|
|
28
|
+
implements: List[str] # Parent REQ IDs
|
|
29
|
+
hash: str
|
|
30
|
+
status: str
|
|
31
|
+
level: str
|
|
32
|
+
children: List[str] = field(default_factory=list) # Child REQ IDs
|
|
33
|
+
|
|
34
|
+
@classmethod
|
|
35
|
+
def from_core(cls, req: "Requirement") -> "RequirementNode":
|
|
36
|
+
"""
|
|
37
|
+
Create a RequirementNode from a core Requirement object.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
req: Core Requirement object from elspais.core.models
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
RequirementNode with mapped fields
|
|
44
|
+
"""
|
|
45
|
+
return cls(
|
|
46
|
+
req_id=req.id,
|
|
47
|
+
title=req.title,
|
|
48
|
+
body=req.body,
|
|
49
|
+
rationale=req.rationale or "",
|
|
50
|
+
file_path=str(req.file_path) if req.file_path else "",
|
|
51
|
+
line=req.line_number or 0,
|
|
52
|
+
implements=list(req.implements),
|
|
53
|
+
hash=req.hash or "",
|
|
54
|
+
status=req.status,
|
|
55
|
+
level=req.level,
|
|
56
|
+
children=[],
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def get_all_requirements(
|
|
61
|
+
config_path: Optional[Path] = None,
|
|
62
|
+
base_path: Optional[Path] = None,
|
|
63
|
+
mode: str = "combined",
|
|
64
|
+
) -> Dict[str, RequirementNode]:
|
|
65
|
+
"""
|
|
66
|
+
Get all requirements using core parser directly.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
config_path: Optional path to .elspais.toml config file
|
|
70
|
+
base_path: Base path for resolving relative directories
|
|
71
|
+
mode: Which repos to include:
|
|
72
|
+
- "combined" (default): Load local + core/associated repo requirements
|
|
73
|
+
- "core-only": Load only core/associated repo requirements
|
|
74
|
+
- "local-only": Load only local requirements
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
Dict mapping requirement ID (e.g., 'REQ-d00027') to RequirementNode
|
|
78
|
+
"""
|
|
79
|
+
from elspais.config.loader import load_config, find_config_file, get_spec_directories
|
|
80
|
+
from elspais.core.parser import RequirementParser
|
|
81
|
+
from elspais.core.patterns import PatternConfig
|
|
82
|
+
from elspais.commands.validate import load_requirements_from_repo
|
|
83
|
+
|
|
84
|
+
# Find and load config
|
|
85
|
+
if config_path is None:
|
|
86
|
+
config_path = find_config_file(base_path or Path.cwd())
|
|
87
|
+
|
|
88
|
+
if config_path is None:
|
|
89
|
+
print("Warning: No .elspais.toml found", file=sys.stderr)
|
|
90
|
+
return {}
|
|
91
|
+
|
|
92
|
+
try:
|
|
93
|
+
config = load_config(config_path)
|
|
94
|
+
except Exception as e:
|
|
95
|
+
print(f"Warning: Failed to load config: {e}", file=sys.stderr)
|
|
96
|
+
return {}
|
|
97
|
+
|
|
98
|
+
requirements = {}
|
|
99
|
+
|
|
100
|
+
# Load local requirements (unless core-only mode)
|
|
101
|
+
if mode in ("combined", "local-only"):
|
|
102
|
+
# Create parser with pattern config
|
|
103
|
+
pattern_config = PatternConfig.from_dict(config.get("patterns", {}))
|
|
104
|
+
parser = RequirementParser(pattern_config)
|
|
105
|
+
|
|
106
|
+
# Get spec directories
|
|
107
|
+
spec_dirs = get_spec_directories(None, config, base_path or config_path.parent)
|
|
108
|
+
|
|
109
|
+
if spec_dirs:
|
|
110
|
+
try:
|
|
111
|
+
parse_result = parser.parse_directories(spec_dirs)
|
|
112
|
+
for req_id, req in parse_result.requirements.items():
|
|
113
|
+
requirements[req_id] = RequirementNode.from_core(req)
|
|
114
|
+
except Exception as e:
|
|
115
|
+
print(f"Warning: Failed to parse local requirements: {e}", file=sys.stderr)
|
|
116
|
+
|
|
117
|
+
# Load core/associated repo requirements (unless local-only mode)
|
|
118
|
+
if mode in ("combined", "core-only"):
|
|
119
|
+
core_path = config.get("core", {}).get("path")
|
|
120
|
+
if core_path:
|
|
121
|
+
core_reqs = load_requirements_from_repo(Path(core_path), config)
|
|
122
|
+
for req_id, req in core_reqs.items():
|
|
123
|
+
# Don't overwrite local requirements with same ID
|
|
124
|
+
if req_id not in requirements:
|
|
125
|
+
requirements[req_id] = RequirementNode.from_core(req)
|
|
126
|
+
|
|
127
|
+
if not requirements:
|
|
128
|
+
print("Warning: No requirements found", file=sys.stderr)
|
|
129
|
+
|
|
130
|
+
return requirements
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def build_hierarchy(requirements: Dict[str, RequirementNode]) -> Dict[str, RequirementNode]:
|
|
134
|
+
"""
|
|
135
|
+
Compute children for each requirement by inverting implements relationships.
|
|
136
|
+
|
|
137
|
+
This modifies the requirements dict in-place, populating each node's
|
|
138
|
+
children list.
|
|
139
|
+
"""
|
|
140
|
+
for req_id, node in requirements.items():
|
|
141
|
+
for parent_id in node.implements:
|
|
142
|
+
# Normalize parent ID format
|
|
143
|
+
parent_key = parent_id if parent_id.startswith('REQ-') else f"REQ-{parent_id}"
|
|
144
|
+
if parent_key in requirements:
|
|
145
|
+
requirements[parent_key].children.append(req_id)
|
|
146
|
+
|
|
147
|
+
# Sort children for deterministic traversal
|
|
148
|
+
for node in requirements.values():
|
|
149
|
+
node.children.sort()
|
|
150
|
+
|
|
151
|
+
return requirements
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def traverse_top_down(
|
|
155
|
+
requirements: Dict[str, RequirementNode],
|
|
156
|
+
start_req: str,
|
|
157
|
+
max_depth: Optional[int] = None,
|
|
158
|
+
callback: Optional[Callable[[RequirementNode, int], None]] = None
|
|
159
|
+
) -> List[str]:
|
|
160
|
+
"""
|
|
161
|
+
Traverse hierarchy from start_req downward using BFS.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
requirements: All requirements with children computed
|
|
165
|
+
start_req: Starting REQ ID (e.g., 'REQ-p00044')
|
|
166
|
+
max_depth: Maximum depth to traverse (None = unlimited)
|
|
167
|
+
callback: Function to call for each REQ visited (node, depth)
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
List of REQ IDs in traversal order
|
|
171
|
+
"""
|
|
172
|
+
visited = []
|
|
173
|
+
queue = [(start_req, 0)] # (req_id, depth)
|
|
174
|
+
seen = set()
|
|
175
|
+
|
|
176
|
+
while queue:
|
|
177
|
+
req_id, depth = queue.pop(0)
|
|
178
|
+
|
|
179
|
+
if req_id in seen:
|
|
180
|
+
continue
|
|
181
|
+
|
|
182
|
+
# Depth limit check (depth 0 is the start node)
|
|
183
|
+
if max_depth is not None and depth > max_depth:
|
|
184
|
+
continue
|
|
185
|
+
|
|
186
|
+
seen.add(req_id)
|
|
187
|
+
|
|
188
|
+
if req_id not in requirements:
|
|
189
|
+
print(f"Warning: {req_id} not found in requirements", file=sys.stderr)
|
|
190
|
+
continue
|
|
191
|
+
|
|
192
|
+
visited.append(req_id)
|
|
193
|
+
node = requirements[req_id]
|
|
194
|
+
|
|
195
|
+
if callback:
|
|
196
|
+
callback(node, depth)
|
|
197
|
+
|
|
198
|
+
# Add children to queue
|
|
199
|
+
for child_id in node.children:
|
|
200
|
+
if child_id not in seen:
|
|
201
|
+
queue.append((child_id, depth + 1))
|
|
202
|
+
|
|
203
|
+
return visited
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def normalize_req_id(req_id: str, validator: Optional["PatternValidator"] = None) -> str:
|
|
207
|
+
"""
|
|
208
|
+
Normalize requirement ID to canonical format using PatternValidator.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
req_id: Requirement ID (e.g., "d00027", "REQ-d00027", "REQ-CAL-p00001")
|
|
212
|
+
validator: PatternValidator instance (created from config if not provided)
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Normalized ID in canonical format from config
|
|
216
|
+
"""
|
|
217
|
+
from elspais.config.loader import load_config, find_config_file
|
|
218
|
+
from elspais.core.patterns import PatternValidator, PatternConfig
|
|
219
|
+
|
|
220
|
+
# Create validator if not provided
|
|
221
|
+
if validator is None:
|
|
222
|
+
try:
|
|
223
|
+
config_path = find_config_file(Path.cwd())
|
|
224
|
+
config = load_config(config_path) if config_path else {}
|
|
225
|
+
except Exception:
|
|
226
|
+
config = {}
|
|
227
|
+
pattern_config = PatternConfig.from_dict(config.get("patterns", {}))
|
|
228
|
+
validator = PatternValidator(pattern_config)
|
|
229
|
+
|
|
230
|
+
# Try parsing the ID as-is
|
|
231
|
+
parsed = validator.parse(req_id)
|
|
232
|
+
|
|
233
|
+
# If that fails, try with prefix
|
|
234
|
+
if parsed is None and not req_id.upper().startswith(validator.config.prefix):
|
|
235
|
+
parsed = validator.parse(f"{validator.config.prefix}-{req_id}")
|
|
236
|
+
|
|
237
|
+
if parsed:
|
|
238
|
+
# Reconstruct canonical ID from parsed components
|
|
239
|
+
parts = [parsed.prefix]
|
|
240
|
+
if parsed.associated:
|
|
241
|
+
parts.append(parsed.associated)
|
|
242
|
+
parts.append(f"{parsed.type_code}{parsed.number}")
|
|
243
|
+
return "-".join(parts)
|
|
244
|
+
|
|
245
|
+
# Return as-is if unparseable
|
|
246
|
+
return req_id
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
# Implements: REQ-int-d00008-C (Line break normalization)
|
|
2
|
+
"""
|
|
3
|
+
Line break normalization for requirement content.
|
|
4
|
+
|
|
5
|
+
Provides functions to:
|
|
6
|
+
- Remove unnecessary blank lines after section headers
|
|
7
|
+
- Reflow paragraphs (join lines broken mid-sentence)
|
|
8
|
+
- Preserve intentional structure (list items, code blocks)
|
|
9
|
+
|
|
10
|
+
IMPLEMENTS REQUIREMENTS:
|
|
11
|
+
REQ-int-d00008-C: Line break normalization SHALL be included.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import re
|
|
15
|
+
from typing import List, Tuple
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def normalize_line_breaks(content: str, reflow: bool = True) -> str:
|
|
19
|
+
"""
|
|
20
|
+
Normalize line breaks in requirement content.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
content: Raw requirement markdown content
|
|
24
|
+
reflow: If True, also reflow paragraphs (join broken lines)
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
Content with normalized line breaks
|
|
28
|
+
"""
|
|
29
|
+
lines = content.split('\n')
|
|
30
|
+
result_lines: List[str] = []
|
|
31
|
+
|
|
32
|
+
i = 0
|
|
33
|
+
while i < len(lines):
|
|
34
|
+
line = lines[i]
|
|
35
|
+
|
|
36
|
+
# Check if this is a section header (## Something)
|
|
37
|
+
if re.match(r'^##\s+\w', line):
|
|
38
|
+
result_lines.append(line)
|
|
39
|
+
# Skip blank lines immediately after section header
|
|
40
|
+
i += 1
|
|
41
|
+
while i < len(lines) and lines[i].strip() == '':
|
|
42
|
+
i += 1
|
|
43
|
+
# Add single blank line after header for readability
|
|
44
|
+
result_lines.append('')
|
|
45
|
+
continue
|
|
46
|
+
|
|
47
|
+
# Check if this starts a paragraph that might need reflowing
|
|
48
|
+
if reflow and line.strip() and not _is_structural_line(line):
|
|
49
|
+
# Collect paragraph lines
|
|
50
|
+
para_lines = [line.rstrip()]
|
|
51
|
+
i += 1
|
|
52
|
+
while i < len(lines):
|
|
53
|
+
next_line = lines[i]
|
|
54
|
+
# Stop at blank lines, structural elements, or next section
|
|
55
|
+
if (next_line.strip() == '' or
|
|
56
|
+
_is_structural_line(next_line) or
|
|
57
|
+
re.match(r'^##\s+', next_line)):
|
|
58
|
+
break
|
|
59
|
+
para_lines.append(next_line.rstrip())
|
|
60
|
+
i += 1
|
|
61
|
+
|
|
62
|
+
# Join and reflow the paragraph
|
|
63
|
+
reflowed = _reflow_paragraph(para_lines)
|
|
64
|
+
result_lines.append(reflowed)
|
|
65
|
+
continue
|
|
66
|
+
|
|
67
|
+
# Keep structural lines and blank lines as-is
|
|
68
|
+
result_lines.append(line.rstrip())
|
|
69
|
+
i += 1
|
|
70
|
+
|
|
71
|
+
# Clean up multiple consecutive blank lines
|
|
72
|
+
return _collapse_blank_lines('\n'.join(result_lines))
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _is_structural_line(line: str) -> bool:
|
|
76
|
+
"""
|
|
77
|
+
Check if a line is structural (should not be reflowed).
|
|
78
|
+
|
|
79
|
+
Structural lines include:
|
|
80
|
+
- List items (A., B., 1., -, *)
|
|
81
|
+
- Headers (# or ##)
|
|
82
|
+
- Metadata lines (**Level**: etc)
|
|
83
|
+
- End markers (*End* ...)
|
|
84
|
+
- Code fence markers (```)
|
|
85
|
+
"""
|
|
86
|
+
stripped = line.strip()
|
|
87
|
+
|
|
88
|
+
if not stripped:
|
|
89
|
+
return False
|
|
90
|
+
|
|
91
|
+
# Headers
|
|
92
|
+
if stripped.startswith('#'):
|
|
93
|
+
return True
|
|
94
|
+
|
|
95
|
+
# Lettered assertions (A. B. C. etc)
|
|
96
|
+
if re.match(r'^[A-Z]\.\s', stripped):
|
|
97
|
+
return True
|
|
98
|
+
|
|
99
|
+
# Numbered lists (1. 2. 3. etc)
|
|
100
|
+
if re.match(r'^\d+\.\s', stripped):
|
|
101
|
+
return True
|
|
102
|
+
|
|
103
|
+
# Bullet points
|
|
104
|
+
if stripped.startswith(('- ', '* ', '+ ')):
|
|
105
|
+
return True
|
|
106
|
+
|
|
107
|
+
# Metadata line
|
|
108
|
+
if stripped.startswith('**Level**:') or stripped.startswith('**Status**:'):
|
|
109
|
+
return True
|
|
110
|
+
|
|
111
|
+
# Combined metadata line
|
|
112
|
+
if re.match(r'\*\*Level\*\*:', stripped):
|
|
113
|
+
return True
|
|
114
|
+
|
|
115
|
+
# End marker
|
|
116
|
+
if stripped.startswith('*End*'):
|
|
117
|
+
return True
|
|
118
|
+
|
|
119
|
+
# Code fence
|
|
120
|
+
if stripped.startswith('```'):
|
|
121
|
+
return True
|
|
122
|
+
|
|
123
|
+
return False
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _reflow_paragraph(lines: List[str]) -> str:
|
|
127
|
+
"""
|
|
128
|
+
Reflow a list of paragraph lines into a single line.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
lines: Lines that form a paragraph
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Single reflowed line
|
|
135
|
+
"""
|
|
136
|
+
if not lines:
|
|
137
|
+
return ''
|
|
138
|
+
|
|
139
|
+
if len(lines) == 1:
|
|
140
|
+
return lines[0]
|
|
141
|
+
|
|
142
|
+
# Join lines with space, collapsing multiple spaces
|
|
143
|
+
joined = ' '.join(line.strip() for line in lines if line.strip())
|
|
144
|
+
# Collapse multiple spaces
|
|
145
|
+
return re.sub(r'\s+', ' ', joined)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def _collapse_blank_lines(content: str) -> str:
|
|
149
|
+
"""
|
|
150
|
+
Collapse multiple consecutive blank lines into single blank lines.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
content: Content that may have multiple blank lines
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
Content with at most one blank line between paragraphs
|
|
157
|
+
"""
|
|
158
|
+
# Replace 3+ newlines with 2 newlines (one blank line)
|
|
159
|
+
return re.sub(r'\n{3,}', '\n\n', content)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def fix_requirement_line_breaks(
|
|
163
|
+
body: str,
|
|
164
|
+
rationale: str,
|
|
165
|
+
reflow: bool = True
|
|
166
|
+
) -> Tuple[str, str]:
|
|
167
|
+
"""
|
|
168
|
+
Fix line breaks in requirement body and rationale.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
body: Requirement body text
|
|
172
|
+
rationale: Requirement rationale text
|
|
173
|
+
reflow: Whether to reflow paragraphs
|
|
174
|
+
|
|
175
|
+
Returns:
|
|
176
|
+
Tuple of (fixed_body, fixed_rationale)
|
|
177
|
+
"""
|
|
178
|
+
fixed_body = normalize_line_breaks(body, reflow=reflow) if body else ''
|
|
179
|
+
fixed_rationale = normalize_line_breaks(rationale, reflow=reflow) if rationale else ''
|
|
180
|
+
|
|
181
|
+
return fixed_body, fixed_rationale
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def detect_line_break_issues(content: str) -> List[str]:
|
|
185
|
+
"""
|
|
186
|
+
Detect potential line break issues in content.
|
|
187
|
+
|
|
188
|
+
Returns list of issues found for reporting.
|
|
189
|
+
"""
|
|
190
|
+
issues = []
|
|
191
|
+
lines = content.split('\n')
|
|
192
|
+
|
|
193
|
+
for i, line in enumerate(lines):
|
|
194
|
+
# Check for blank line after section header
|
|
195
|
+
if re.match(r'^##\s+\w', line):
|
|
196
|
+
# Look ahead for multiple blank lines
|
|
197
|
+
blank_count = 0
|
|
198
|
+
j = i + 1
|
|
199
|
+
while j < len(lines) and lines[j].strip() == '':
|
|
200
|
+
blank_count += 1
|
|
201
|
+
j += 1
|
|
202
|
+
if blank_count > 1:
|
|
203
|
+
issues.append(
|
|
204
|
+
f"Line {i+1}: Multiple blank lines ({blank_count}) after section header"
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
# Check for mid-sentence line break (line ends without punctuation)
|
|
208
|
+
stripped = line.rstrip()
|
|
209
|
+
if (stripped and
|
|
210
|
+
not _is_structural_line(line) and
|
|
211
|
+
i + 1 < len(lines) and
|
|
212
|
+
lines[i + 1].strip() and
|
|
213
|
+
not _is_structural_line(lines[i + 1])):
|
|
214
|
+
# Line ends with a word (not punctuation), followed by non-empty line
|
|
215
|
+
if stripped and stripped[-1].isalnum():
|
|
216
|
+
issues.append(
|
|
217
|
+
f"Line {i+1}: Possible mid-sentence line break"
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
return issues
|