elspais 0.11.0__py3-none-any.whl → 0.11.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. elspais/__init__.py +1 -1
  2. elspais/cli.py +75 -23
  3. elspais/commands/analyze.py +5 -6
  4. elspais/commands/changed.py +2 -6
  5. elspais/commands/config_cmd.py +4 -4
  6. elspais/commands/edit.py +32 -36
  7. elspais/commands/hash_cmd.py +24 -18
  8. elspais/commands/index.py +8 -7
  9. elspais/commands/init.py +4 -4
  10. elspais/commands/reformat_cmd.py +32 -43
  11. elspais/commands/rules_cmd.py +6 -2
  12. elspais/commands/trace.py +23 -19
  13. elspais/commands/validate.py +8 -10
  14. elspais/config/defaults.py +7 -1
  15. elspais/core/content_rules.py +0 -1
  16. elspais/core/git.py +4 -10
  17. elspais/core/parser.py +55 -56
  18. elspais/core/patterns.py +2 -6
  19. elspais/core/rules.py +10 -15
  20. elspais/mcp/__init__.py +2 -0
  21. elspais/mcp/context.py +1 -0
  22. elspais/mcp/serializers.py +1 -1
  23. elspais/mcp/server.py +54 -39
  24. elspais/reformat/__init__.py +13 -13
  25. elspais/reformat/detector.py +9 -16
  26. elspais/reformat/hierarchy.py +8 -7
  27. elspais/reformat/line_breaks.py +36 -38
  28. elspais/reformat/prompts.py +22 -12
  29. elspais/reformat/transformer.py +43 -41
  30. elspais/sponsors/__init__.py +0 -2
  31. elspais/testing/__init__.py +1 -1
  32. elspais/testing/result_parser.py +25 -21
  33. elspais/trace_view/__init__.py +4 -3
  34. elspais/trace_view/coverage.py +5 -5
  35. elspais/trace_view/generators/__init__.py +1 -1
  36. elspais/trace_view/generators/base.py +17 -12
  37. elspais/trace_view/generators/csv.py +2 -6
  38. elspais/trace_view/generators/markdown.py +3 -8
  39. elspais/trace_view/html/__init__.py +4 -2
  40. elspais/trace_view/html/generator.py +423 -289
  41. elspais/trace_view/models.py +25 -0
  42. elspais/trace_view/review/__init__.py +21 -18
  43. elspais/trace_view/review/branches.py +114 -121
  44. elspais/trace_view/review/models.py +232 -237
  45. elspais/trace_view/review/position.py +53 -71
  46. elspais/trace_view/review/server.py +264 -288
  47. elspais/trace_view/review/status.py +43 -58
  48. elspais/trace_view/review/storage.py +48 -72
  49. {elspais-0.11.0.dist-info → elspais-0.11.2.dist-info}/METADATA +12 -9
  50. {elspais-0.11.0.dist-info → elspais-0.11.2.dist-info}/RECORD +53 -53
  51. {elspais-0.11.0.dist-info → elspais-0.11.2.dist-info}/WHEEL +0 -0
  52. {elspais-0.11.0.dist-info → elspais-0.11.2.dist-info}/entry_points.txt +0 -0
  53. {elspais-0.11.0.dist-info → elspais-0.11.2.dist-info}/licenses/LICENSE +0 -0
elspais/core/parser.py CHANGED
@@ -20,46 +20,39 @@ class RequirementParser:
20
20
  # Regex patterns for parsing
21
21
  # Generic pattern to find potential requirement headers
22
22
  # Actual ID validation is done by PatternValidator
23
- HEADER_PATTERN = re.compile(
24
- r"^#*\s*(?P<id>[A-Z]+-[A-Za-z0-9-]+):\s*(?P<title>.+)$"
25
- )
23
+ HEADER_PATTERN = re.compile(r"^#*\s*(?P<id>[A-Z]+-[A-Za-z0-9-]+):\s*(?P<title>.+)$")
26
24
  LEVEL_STATUS_PATTERN = re.compile(
27
25
  r"\*\*Level\*\*:\s*(?P<level>\w+)"
28
26
  r"(?:\s*\|\s*\*\*Implements\*\*:\s*(?P<implements>[^|\n]+))?"
29
27
  r"(?:\s*\|\s*\*\*Status\*\*:\s*(?P<status>\w+))?"
30
28
  )
31
- ALT_STATUS_PATTERN = re.compile(
32
- r"\*\*Status\*\*:\s*(?P<status>\w+)"
33
- )
34
- IMPLEMENTS_PATTERN = re.compile(
35
- r"\*\*Implements\*\*:\s*(?P<implements>[^|\n]+)"
36
- )
29
+ ALT_STATUS_PATTERN = re.compile(r"\*\*Status\*\*:\s*(?P<status>\w+)")
30
+ IMPLEMENTS_PATTERN = re.compile(r"\*\*Implements\*\*:\s*(?P<implements>[^|\n]+)")
37
31
  END_MARKER_PATTERN = re.compile(
38
- r"^\*End\*\s+\*[^*]+\*\s*(?:\|\s*\*\*Hash\*\*:\s*(?P<hash>[a-zA-Z0-9]+))?",
39
- re.MULTILINE
40
- )
41
- RATIONALE_PATTERN = re.compile(
42
- r"\*\*Rationale\*\*:\s*(.+?)(?=\n\n|\n\*\*|\Z)", re.DOTALL
32
+ r"^\*End\*\s+\*[^*]+\*\s*(?:\|\s*\*\*Hash\*\*:\s*(?P<hash>[a-zA-Z0-9]+))?", re.MULTILINE
43
33
  )
34
+ RATIONALE_PATTERN = re.compile(r"\*\*Rationale\*\*:\s*(.+?)(?=\n\n|\n\*\*|\Z)", re.DOTALL)
44
35
  ACCEPTANCE_PATTERN = re.compile(
45
36
  r"\*\*Acceptance Criteria\*\*:\s*\n((?:\s*-\s*.+\n?)+)", re.MULTILINE
46
37
  )
47
38
  # Assertions section header (## Assertions or **Assertions**)
48
- ASSERTIONS_HEADER_PATTERN = re.compile(
49
- r"^##\s+Assertions\s*$", re.MULTILINE
50
- )
39
+ ASSERTIONS_HEADER_PATTERN = re.compile(r"^##\s+Assertions\s*$", re.MULTILINE)
51
40
  # Individual assertion line: "A. The system SHALL..." or "01. ..." etc.
52
41
  # Captures: label (any alphanumeric), text (rest of line, may continue)
53
- ASSERTION_LINE_PATTERN = re.compile(
54
- r"^\s*([A-Z0-9]+)\.\s+(.+)$", re.MULTILINE
55
- )
42
+ ASSERTION_LINE_PATTERN = re.compile(r"^\s*([A-Z0-9]+)\.\s+(.+)$", re.MULTILINE)
56
43
 
57
44
  # Default values that mean "no references" in Implements field
58
45
  DEFAULT_NO_REFERENCE_VALUES = ["-", "null", "none", "x", "X", "N/A", "n/a"]
59
46
 
60
47
  # Default placeholder values that indicate a removed/deprecated assertion
61
48
  DEFAULT_PLACEHOLDER_VALUES = [
62
- "obsolete", "removed", "deprecated", "N/A", "n/a", "-", "reserved"
49
+ "obsolete",
50
+ "removed",
51
+ "deprecated",
52
+ "N/A",
53
+ "n/a",
54
+ "-",
55
+ "reserved",
63
56
  ]
64
57
 
65
58
  def __init__(
@@ -272,9 +265,7 @@ class RequirementParser:
272
265
  else:
273
266
  dir_path = base_path / dir_entry
274
267
  if dir_path.exists() and dir_path.is_dir():
275
- result = self.parse_directory(
276
- dir_path, patterns=patterns, skip_files=skip_files
277
- )
268
+ result = self.parse_directory(dir_path, patterns=patterns, skip_files=skip_files)
278
269
  # Merge requirements, checking for cross-directory duplicates
279
270
  for req_id, req in result.requirements.items():
280
271
  if req_id in requirements:
@@ -385,7 +376,10 @@ class RequirementParser:
385
376
 
386
377
  warning = ParseWarning(
387
378
  requirement_id=original_id,
388
- message=f"Duplicate ID found (first occurrence in {original_req.file_path}:{original_req.line_number})",
379
+ message=(
380
+ f"Duplicate ID found "
381
+ f"(first occurrence in {original_req.file_path}:{original_req.line_number})"
382
+ ),
389
383
  file_path=file_path,
390
384
  line_number=line_number,
391
385
  )
@@ -444,12 +438,14 @@ class RequirementParser:
444
438
  implements = self._parse_implements(implements_str)
445
439
  for ref in implements:
446
440
  if not self.validator.is_valid(ref):
447
- block_warnings.append(ParseWarning(
448
- requirement_id=req_id,
449
- message=f"Invalid implements reference: {ref}",
450
- file_path=file_path,
451
- line_number=line_number,
452
- ))
441
+ block_warnings.append(
442
+ ParseWarning(
443
+ requirement_id=req_id,
444
+ message=f"Invalid implements reference: {ref}",
445
+ file_path=file_path,
446
+ line_number=line_number,
447
+ )
448
+ )
453
449
 
454
450
  # Extract body (text between header and acceptance/end)
455
451
  body = self._extract_body(text)
@@ -475,12 +471,14 @@ class RequirementParser:
475
471
  assertions = self._extract_assertions(text)
476
472
  for assertion in assertions:
477
473
  if not self._is_valid_assertion_label(assertion.label):
478
- block_warnings.append(ParseWarning(
479
- requirement_id=req_id,
480
- message=f"Invalid assertion label format: {assertion.label}",
481
- file_path=file_path,
482
- line_number=line_number,
483
- ))
474
+ block_warnings.append(
475
+ ParseWarning(
476
+ requirement_id=req_id,
477
+ message=f"Invalid assertion label format: {assertion.label}",
478
+ file_path=file_path,
479
+ line_number=line_number,
480
+ )
481
+ )
484
482
 
485
483
  # Extract hash from end marker
486
484
  hash_value = None
@@ -511,17 +509,17 @@ class RequirementParser:
511
509
  Default expectation is uppercase letters A-Z.
512
510
  """
513
511
  # Check against configured assertion label pattern if available
514
- assertion_config = getattr(self.pattern_config, 'assertions', None)
512
+ assertion_config = getattr(self.pattern_config, "assertions", None)
515
513
  if assertion_config:
516
- label_style = assertion_config.get('label_style', 'uppercase')
517
- if label_style == 'uppercase':
518
- return bool(re.match(r'^[A-Z]$', label))
519
- elif label_style == 'numeric':
520
- return bool(re.match(r'^\d+$', label))
521
- elif label_style == 'alphanumeric':
522
- return bool(re.match(r'^[A-Z0-9]+$', label))
514
+ label_style = assertion_config.get("label_style", "uppercase")
515
+ if label_style == "uppercase":
516
+ return bool(re.match(r"^[A-Z]$", label))
517
+ elif label_style == "numeric":
518
+ return bool(re.match(r"^\d+$", label))
519
+ elif label_style == "alphanumeric":
520
+ return bool(re.match(r"^[A-Z0-9]+$", label))
523
521
  # Default: uppercase single letter
524
- return bool(re.match(r'^[A-Z]$', label))
522
+ return bool(re.match(r"^[A-Z]$", label))
525
523
 
526
524
  def _parse_implements(self, implements_str: str) -> List[str]:
527
525
  """Parse comma-separated implements list.
@@ -608,9 +606,9 @@ class RequirementParser:
608
606
 
609
607
  # Find the end of the assertions section (next ## header, Rationale, or End marker)
610
608
  end_patterns = [
611
- r"^##\s+", # Next section header
612
- r"^\*End\*", # End marker
613
- r"^---\s*$", # Separator line
609
+ r"^##\s+", # Next section header
610
+ r"^\*End\*", # End marker
611
+ r"^---\s*$", # Separator line
614
612
  ]
615
613
  end_pos = len(section_text)
616
614
  for pattern in end_patterns:
@@ -627,14 +625,15 @@ class RequirementParser:
627
625
 
628
626
  # Check if this is a placeholder
629
627
  is_placeholder = any(
630
- assertion_text.lower().startswith(pv.lower())
631
- for pv in self.placeholder_values
628
+ assertion_text.lower().startswith(pv.lower()) for pv in self.placeholder_values
632
629
  )
633
630
 
634
- assertions.append(Assertion(
635
- label=label,
636
- text=assertion_text,
637
- is_placeholder=is_placeholder,
638
- ))
631
+ assertions.append(
632
+ Assertion(
633
+ label=label,
634
+ text=assertion_text,
635
+ is_placeholder=is_placeholder,
636
+ )
637
+ )
639
638
 
640
639
  return assertions
elspais/core/patterns.py CHANGED
@@ -116,9 +116,7 @@ class PatternValidator:
116
116
  self.config = config
117
117
  self._regex = self._build_regex()
118
118
  self._regex_with_assertion = self._build_regex(include_assertion=True)
119
- self._assertion_label_regex = re.compile(
120
- f"^{self.config.get_assertion_label_pattern()}$"
121
- )
119
+ self._assertion_label_regex = re.compile(f"^{self.config.get_assertion_label_pattern()}$")
122
120
 
123
121
  def _build_regex(self, include_assertion: bool = False) -> re.Pattern:
124
122
  """Build regex pattern from configuration.
@@ -306,9 +304,7 @@ class PatternValidator:
306
304
 
307
305
  raise ValueError(f"Cannot parse assertion label: {label}")
308
306
 
309
- def format(
310
- self, type_code: str, number: int, associated: Optional[str] = None
311
- ) -> str:
307
+ def format(self, type_code: str, number: int, associated: Optional[str] = None) -> str:
312
308
  """
313
309
  Format a requirement ID from components.
314
310
 
elspais/core/rules.py CHANGED
@@ -101,9 +101,9 @@ class FormatConfig:
101
101
  require_shall: bool = True
102
102
  labels_sequential: bool = True
103
103
  labels_unique: bool = True
104
- placeholder_values: List[str] = field(default_factory=lambda: [
105
- "obsolete", "removed", "deprecated", "N/A", "n/a", "-", "reserved"
106
- ])
104
+ placeholder_values: List[str] = field(
105
+ default_factory=lambda: ["obsolete", "removed", "deprecated", "N/A", "n/a", "-", "reserved"]
106
+ )
107
107
 
108
108
 
109
109
  @dataclass
@@ -142,9 +142,10 @@ class RulesConfig:
142
142
  require_shall=format_data.get("require_shall", True),
143
143
  labels_sequential=format_data.get("labels_sequential", True),
144
144
  labels_unique=format_data.get("labels_unique", True),
145
- placeholder_values=format_data.get("placeholder_values", [
146
- "obsolete", "removed", "deprecated", "N/A", "n/a", "-", "reserved"
147
- ]),
145
+ placeholder_values=format_data.get(
146
+ "placeholder_values",
147
+ ["obsolete", "removed", "deprecated", "N/A", "n/a", "-", "reserved"],
148
+ ),
148
149
  )
149
150
 
150
151
  return cls(hierarchy=hierarchy, format=format_config)
@@ -169,9 +170,7 @@ class RuleEngine:
169
170
  """
170
171
  self.config = config
171
172
  self.pattern_config = pattern_config
172
- self.pattern_validator = (
173
- PatternValidator(pattern_config) if pattern_config else None
174
- )
173
+ self.pattern_validator = PatternValidator(pattern_config) if pattern_config else None
175
174
 
176
175
  def validate(self, requirements: Dict[str, Requirement]) -> List[RuleViolation]:
177
176
  """
@@ -381,9 +380,7 @@ class RuleEngine:
381
380
 
382
381
  return violations
383
382
 
384
- def _check_assertions(
385
- self, req_id: str, req: Requirement
386
- ) -> List[RuleViolation]:
383
+ def _check_assertions(self, req_id: str, req: Requirement) -> List[RuleViolation]:
387
384
  """Check assertion-specific validation rules."""
388
385
  violations = []
389
386
 
@@ -426,9 +423,7 @@ class RuleEngine:
426
423
  if self.config.format.labels_sequential and self.pattern_validator:
427
424
  expected_labels = []
428
425
  for i in range(len(labels)):
429
- expected_labels.append(
430
- self.pattern_validator.format_assertion_label(i)
431
- )
426
+ expected_labels.append(self.pattern_validator.format_assertion_label(i))
432
427
  if labels != expected_labels:
433
428
  msg = f"Labels not sequential: {labels} (expected {expected_labels})"
434
429
  violations.append(
elspais/mcp/__init__.py CHANGED
@@ -33,10 +33,12 @@ __all__ = [
33
33
  def create_server(working_dir=None):
34
34
  """Create MCP server instance."""
35
35
  from elspais.mcp.server import create_server as _create
36
+
36
37
  return _create(working_dir)
37
38
 
38
39
 
39
40
  def run_server(working_dir=None, transport="stdio"):
40
41
  """Run MCP server."""
41
42
  from elspais.mcp.server import run_server as _run
43
+
42
44
  return _run(working_dir, transport)
elspais/mcp/context.py CHANGED
@@ -52,6 +52,7 @@ class WorkspaceContext:
52
52
  else:
53
53
  # Use defaults
54
54
  from elspais.config.defaults import DEFAULT_CONFIG
55
+
55
56
  config = DEFAULT_CONFIG.copy()
56
57
 
57
58
  return cls(working_dir=directory, config=config)
@@ -4,7 +4,7 @@ elspais.mcp.serializers - JSON serialization for MCP responses.
4
4
  Provides functions to serialize elspais data models to JSON-compatible dicts.
5
5
  """
6
6
 
7
- from typing import Any, Dict, List
7
+ from typing import Any, Dict
8
8
 
9
9
  from elspais.core.models import Assertion, ContentRule, Requirement
10
10
  from elspais.core.rules import RuleViolation
elspais/mcp/server.py CHANGED
@@ -9,6 +9,7 @@ from typing import Any, Dict, List, Optional
9
9
 
10
10
  try:
11
11
  from mcp.server.fastmcp import FastMCP
12
+
12
13
  MCP_AVAILABLE = True
13
14
  except ImportError:
14
15
  MCP_AVAILABLE = False
@@ -39,8 +40,7 @@ def create_server(working_dir: Optional[Path] = None) -> "FastMCP":
39
40
  """
40
41
  if not MCP_AVAILABLE:
41
42
  raise ImportError(
42
- "MCP dependencies not installed. "
43
- "Install with: pip install elspais[mcp]"
43
+ "MCP dependencies not installed. " "Install with: pip install elspais[mcp]"
44
44
  )
45
45
 
46
46
  if working_dir is None:
@@ -75,14 +75,17 @@ def _register_resources(mcp: "FastMCP", ctx: WorkspaceContext) -> None:
75
75
  ID, title, level, status, and assertion count.
76
76
  """
77
77
  import json
78
+
78
79
  requirements = ctx.get_requirements()
79
- return json.dumps({
80
- "count": len(requirements),
81
- "requirements": [
82
- serialize_requirement_summary(req)
83
- for req in requirements.values()
84
- ]
85
- }, indent=2)
80
+ return json.dumps(
81
+ {
82
+ "count": len(requirements),
83
+ "requirements": [
84
+ serialize_requirement_summary(req) for req in requirements.values()
85
+ ],
86
+ },
87
+ indent=2,
88
+ )
86
89
 
87
90
  @mcp.resource("requirements://{req_id}")
88
91
  def get_requirement_resource(req_id: str) -> str:
@@ -93,6 +96,7 @@ def _register_resources(mcp: "FastMCP", ctx: WorkspaceContext) -> None:
93
96
  implements references, and location.
94
97
  """
95
98
  import json
99
+
96
100
  req = ctx.get_requirement(req_id)
97
101
  if req is None:
98
102
  return json.dumps({"error": f"Requirement {req_id} not found"})
@@ -102,34 +106,39 @@ def _register_resources(mcp: "FastMCP", ctx: WorkspaceContext) -> None:
102
106
  def get_requirements_by_level(level: str) -> str:
103
107
  """Get all requirements of a specific level (PRD, OPS, DEV)."""
104
108
  import json
109
+
105
110
  requirements = ctx.get_requirements()
106
- filtered = [
107
- r for r in requirements.values()
108
- if r.level.upper() == level.upper()
109
- ]
110
- return json.dumps({
111
- "level": level,
112
- "count": len(filtered),
113
- "requirements": [serialize_requirement_summary(r) for r in filtered]
114
- }, indent=2)
111
+ filtered = [r for r in requirements.values() if r.level.upper() == level.upper()]
112
+ return json.dumps(
113
+ {
114
+ "level": level,
115
+ "count": len(filtered),
116
+ "requirements": [serialize_requirement_summary(r) for r in filtered],
117
+ },
118
+ indent=2,
119
+ )
115
120
 
116
121
  @mcp.resource("content-rules://list")
117
122
  def list_content_rules() -> str:
118
123
  """List all configured content rule files."""
119
124
  import json
125
+
120
126
  rules = ctx.get_content_rules()
121
- return json.dumps({
122
- "count": len(rules),
123
- "rules": [
124
- {
125
- "file": str(r.file_path),
126
- "title": r.title,
127
- "type": r.type,
128
- "applies_to": r.applies_to,
129
- }
130
- for r in rules
131
- ]
132
- }, indent=2)
127
+ return json.dumps(
128
+ {
129
+ "count": len(rules),
130
+ "rules": [
131
+ {
132
+ "file": str(r.file_path),
133
+ "title": r.title,
134
+ "type": r.type,
135
+ "applies_to": r.applies_to,
136
+ }
137
+ for r in rules
138
+ ],
139
+ },
140
+ indent=2,
141
+ )
133
142
 
134
143
  @mcp.resource("content-rules://{filename}")
135
144
  def get_content_rule(filename: str) -> str:
@@ -140,6 +149,7 @@ def _register_resources(mcp: "FastMCP", ctx: WorkspaceContext) -> None:
140
149
  requirement formats and authoring guidelines.
141
150
  """
142
151
  import json
152
+
143
153
  rules = ctx.get_content_rules()
144
154
  for rule in rules:
145
155
  if rule.file_path.name == filename or str(rule.file_path).endswith(filename):
@@ -150,6 +160,7 @@ def _register_resources(mcp: "FastMCP", ctx: WorkspaceContext) -> None:
150
160
  def get_current_config() -> str:
151
161
  """Get the current elspais configuration."""
152
162
  import json
163
+
153
164
  return json.dumps(ctx.config, indent=2, default=str)
154
165
 
155
166
 
@@ -186,7 +197,10 @@ def _register_tools(mcp: "FastMCP", ctx: WorkspaceContext) -> None:
186
197
  "valid": len(errors) == 0,
187
198
  "errors": [serialize_violation(v) for v in errors],
188
199
  "warnings": [serialize_violation(v) for v in warnings],
189
- "summary": f"{len(errors)} errors, {len(warnings)} warnings in {len(requirements)} requirements"
200
+ "summary": (
201
+ f"{len(errors)} errors, {len(warnings)} warnings "
202
+ f"in {len(requirements)} requirements"
203
+ ),
190
204
  }
191
205
 
192
206
  @mcp.tool()
@@ -209,9 +223,8 @@ def _register_tools(mcp: "FastMCP", ctx: WorkspaceContext) -> None:
209
223
  return {
210
224
  "count": len(requirements),
211
225
  "requirements": {
212
- req_id: serialize_requirement(req)
213
- for req_id, req in requirements.items()
214
- }
226
+ req_id: serialize_requirement(req) for req_id, req in requirements.items()
227
+ },
215
228
  }
216
229
 
217
230
  @mcp.tool()
@@ -233,7 +246,7 @@ def _register_tools(mcp: "FastMCP", ctx: WorkspaceContext) -> None:
233
246
  "count": len(results),
234
247
  "query": query,
235
248
  "field": field,
236
- "requirements": [serialize_requirement_summary(r) for r in results]
249
+ "requirements": [serialize_requirement_summary(r) for r in results],
237
250
  }
238
251
 
239
252
  @mcp.tool()
@@ -299,10 +312,12 @@ def _analyze_orphans(requirements: Dict[str, Any]) -> Dict[str, Any]:
299
312
  for req in requirements.values():
300
313
  for parent_id in req.implements:
301
314
  if parent_id not in all_ids:
302
- orphans.append({
303
- "id": req.id,
304
- "missing_parent": parent_id,
305
- })
315
+ orphans.append(
316
+ {
317
+ "id": req.id,
318
+ "missing_parent": parent_id,
319
+ }
320
+ )
306
321
 
307
322
  return {
308
323
  "count": len(orphans),
@@ -9,23 +9,23 @@ IMPLEMENTS REQUIREMENTS:
9
9
  REQ-int-d00008: Reformat Command
10
10
  """
11
11
 
12
- from elspais.reformat.detector import detect_format, needs_reformatting, FormatAnalysis
13
- from elspais.reformat.transformer import (
14
- reformat_requirement,
15
- assemble_new_format,
16
- validate_reformatted_content,
17
- )
18
- from elspais.reformat.line_breaks import (
19
- normalize_line_breaks,
20
- fix_requirement_line_breaks,
21
- detect_line_break_issues,
22
- )
12
+ from elspais.reformat.detector import FormatAnalysis, detect_format, needs_reformatting
23
13
  from elspais.reformat.hierarchy import (
24
14
  RequirementNode,
25
- get_all_requirements,
26
15
  build_hierarchy,
27
- traverse_top_down,
16
+ get_all_requirements,
28
17
  normalize_req_id,
18
+ traverse_top_down,
19
+ )
20
+ from elspais.reformat.line_breaks import (
21
+ detect_line_break_issues,
22
+ fix_requirement_line_breaks,
23
+ normalize_line_breaks,
24
+ )
25
+ from elspais.reformat.transformer import (
26
+ assemble_new_format,
27
+ reformat_requirement,
28
+ validate_reformatted_content,
29
29
  )
30
30
 
31
31
  __all__ = [
@@ -13,6 +13,7 @@ from dataclasses import dataclass
13
13
  @dataclass
14
14
  class FormatAnalysis:
15
15
  """Result of format detection analysis."""
16
+
16
17
  is_new_format: bool
17
18
  has_assertions_section: bool
18
19
  has_labeled_assertions: bool
@@ -46,36 +47,28 @@ def detect_format(body: str, rationale: str = "") -> FormatAnalysis:
46
47
  full_text = f"{body}\n{rationale}".strip()
47
48
 
48
49
  # Check for ## Assertions section
49
- has_assertions_section = bool(
50
- re.search(r'^##\s+Assertions\s*$', full_text, re.MULTILINE)
51
- )
50
+ has_assertions_section = bool(re.search(r"^##\s+Assertions\s*$", full_text, re.MULTILINE))
52
51
 
53
52
  # Check for labeled assertions (A., B., C., etc. followed by SHALL somewhere in the line)
54
53
  labeled_assertions = re.findall(
55
- r'^[A-Z]\.\s+.*\bSHALL\b',
56
- full_text,
57
- re.MULTILINE | re.IGNORECASE
54
+ r"^[A-Z]\.\s+.*\bSHALL\b", full_text, re.MULTILINE | re.IGNORECASE
58
55
  )
59
56
  has_labeled_assertions = len(labeled_assertions) >= 1
60
57
  assertion_count = len(labeled_assertions)
61
58
 
62
59
  # Check for Acceptance Criteria section
63
- has_acceptance_criteria = bool(re.search(
64
- r'\*?\*?Acceptance\s+Criteria\*?\*?\s*:',
65
- full_text,
66
- re.IGNORECASE
67
- ))
60
+ has_acceptance_criteria = bool(
61
+ re.search(r"\*?\*?Acceptance\s+Criteria\*?\*?\s*:", full_text, re.IGNORECASE)
62
+ )
68
63
 
69
64
  # Check for SHALL language usage anywhere
70
- shall_count = len(re.findall(r'\bSHALL\b', full_text, re.IGNORECASE))
65
+ shall_count = len(re.findall(r"\bSHALL\b", full_text, re.IGNORECASE))
71
66
  uses_shall_language = shall_count >= 1
72
67
 
73
68
  # Determine if new format
74
69
  # New format: has Assertions section with labeled assertions, no Acceptance Criteria
75
70
  is_new_format = (
76
- has_assertions_section and
77
- has_labeled_assertions and
78
- not has_acceptance_criteria
71
+ has_assertions_section and has_labeled_assertions and not has_acceptance_criteria
79
72
  )
80
73
 
81
74
  # Calculate confidence score
@@ -100,7 +93,7 @@ def detect_format(body: str, rationale: str = "") -> FormatAnalysis:
100
93
  has_acceptance_criteria=has_acceptance_criteria,
101
94
  uses_shall_language=uses_shall_language,
102
95
  assertion_count=assertion_count,
103
- confidence=confidence
96
+ confidence=confidence,
104
97
  )
105
98
 
106
99
 
@@ -9,7 +9,7 @@ a traversable hierarchy based on implements relationships.
9
9
  import sys
10
10
  from dataclasses import dataclass, field
11
11
  from pathlib import Path
12
- from typing import Callable, Dict, List, Optional, TYPE_CHECKING
12
+ from typing import TYPE_CHECKING, Callable, Dict, List, Optional
13
13
 
14
14
  if TYPE_CHECKING:
15
15
  from elspais.core.models import Requirement
@@ -19,6 +19,7 @@ if TYPE_CHECKING:
19
19
  @dataclass
20
20
  class RequirementNode:
21
21
  """Represents a requirement with its metadata and hierarchy info."""
22
+
22
23
  req_id: str
23
24
  title: str
24
25
  body: str
@@ -76,10 +77,10 @@ def get_all_requirements(
76
77
  Returns:
77
78
  Dict mapping requirement ID (e.g., 'REQ-d00027') to RequirementNode
78
79
  """
79
- from elspais.config.loader import load_config, find_config_file, get_spec_directories
80
+ from elspais.commands.validate import load_requirements_from_repo
81
+ from elspais.config.loader import find_config_file, get_spec_directories, load_config
80
82
  from elspais.core.parser import RequirementParser
81
83
  from elspais.core.patterns import PatternConfig
82
- from elspais.commands.validate import load_requirements_from_repo
83
84
 
84
85
  # Find and load config
85
86
  if config_path is None:
@@ -140,7 +141,7 @@ def build_hierarchy(requirements: Dict[str, RequirementNode]) -> Dict[str, Requi
140
141
  for req_id, node in requirements.items():
141
142
  for parent_id in node.implements:
142
143
  # Normalize parent ID format
143
- parent_key = parent_id if parent_id.startswith('REQ-') else f"REQ-{parent_id}"
144
+ parent_key = parent_id if parent_id.startswith("REQ-") else f"REQ-{parent_id}"
144
145
  if parent_key in requirements:
145
146
  requirements[parent_key].children.append(req_id)
146
147
 
@@ -155,7 +156,7 @@ def traverse_top_down(
155
156
  requirements: Dict[str, RequirementNode],
156
157
  start_req: str,
157
158
  max_depth: Optional[int] = None,
158
- callback: Optional[Callable[[RequirementNode, int], None]] = None
159
+ callback: Optional[Callable[[RequirementNode, int], None]] = None,
159
160
  ) -> List[str]:
160
161
  """
161
162
  Traverse hierarchy from start_req downward using BFS.
@@ -214,8 +215,8 @@ def normalize_req_id(req_id: str, validator: Optional["PatternValidator"] = None
214
215
  Returns:
215
216
  Normalized ID in canonical format from config
216
217
  """
217
- from elspais.config.loader import load_config, find_config_file
218
- from elspais.core.patterns import PatternValidator, PatternConfig
218
+ from elspais.config.loader import find_config_file, load_config
219
+ from elspais.core.patterns import PatternConfig, PatternValidator
219
220
 
220
221
  # Create validator if not provided
221
222
  if validator is None: