elspais 0.11.2__py3-none-any.whl → 0.43.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. elspais/__init__.py +1 -10
  2. elspais/{sponsors/__init__.py → associates.py} +102 -56
  3. elspais/cli.py +366 -69
  4. elspais/commands/__init__.py +9 -3
  5. elspais/commands/analyze.py +118 -169
  6. elspais/commands/changed.py +12 -23
  7. elspais/commands/config_cmd.py +10 -13
  8. elspais/commands/edit.py +33 -13
  9. elspais/commands/example_cmd.py +319 -0
  10. elspais/commands/hash_cmd.py +161 -183
  11. elspais/commands/health.py +1177 -0
  12. elspais/commands/index.py +98 -115
  13. elspais/commands/init.py +99 -22
  14. elspais/commands/reformat_cmd.py +41 -433
  15. elspais/commands/rules_cmd.py +2 -2
  16. elspais/commands/trace.py +443 -324
  17. elspais/commands/validate.py +193 -411
  18. elspais/config/__init__.py +799 -5
  19. elspais/{core/content_rules.py → content_rules.py} +20 -2
  20. elspais/docs/cli/assertions.md +67 -0
  21. elspais/docs/cli/commands.md +304 -0
  22. elspais/docs/cli/config.md +262 -0
  23. elspais/docs/cli/format.md +66 -0
  24. elspais/docs/cli/git.md +45 -0
  25. elspais/docs/cli/health.md +190 -0
  26. elspais/docs/cli/hierarchy.md +60 -0
  27. elspais/docs/cli/ignore.md +72 -0
  28. elspais/docs/cli/mcp.md +245 -0
  29. elspais/docs/cli/quickstart.md +58 -0
  30. elspais/docs/cli/traceability.md +89 -0
  31. elspais/docs/cli/validation.md +96 -0
  32. elspais/graph/GraphNode.py +383 -0
  33. elspais/graph/__init__.py +40 -0
  34. elspais/graph/annotators.py +927 -0
  35. elspais/graph/builder.py +1886 -0
  36. elspais/graph/deserializer.py +248 -0
  37. elspais/graph/factory.py +284 -0
  38. elspais/graph/metrics.py +127 -0
  39. elspais/graph/mutations.py +161 -0
  40. elspais/graph/parsers/__init__.py +156 -0
  41. elspais/graph/parsers/code.py +213 -0
  42. elspais/graph/parsers/comments.py +112 -0
  43. elspais/graph/parsers/config_helpers.py +29 -0
  44. elspais/graph/parsers/heredocs.py +225 -0
  45. elspais/graph/parsers/journey.py +131 -0
  46. elspais/graph/parsers/remainder.py +79 -0
  47. elspais/graph/parsers/requirement.py +347 -0
  48. elspais/graph/parsers/results/__init__.py +6 -0
  49. elspais/graph/parsers/results/junit_xml.py +229 -0
  50. elspais/graph/parsers/results/pytest_json.py +313 -0
  51. elspais/graph/parsers/test.py +305 -0
  52. elspais/graph/relations.py +78 -0
  53. elspais/graph/serialize.py +216 -0
  54. elspais/html/__init__.py +8 -0
  55. elspais/html/generator.py +731 -0
  56. elspais/html/templates/trace_view.html.j2 +2151 -0
  57. elspais/mcp/__init__.py +45 -29
  58. elspais/mcp/__main__.py +5 -1
  59. elspais/mcp/file_mutations.py +138 -0
  60. elspais/mcp/server.py +1998 -244
  61. elspais/testing/__init__.py +3 -3
  62. elspais/testing/config.py +3 -0
  63. elspais/testing/mapper.py +1 -1
  64. elspais/testing/scanner.py +301 -12
  65. elspais/utilities/__init__.py +1 -0
  66. elspais/utilities/docs_loader.py +115 -0
  67. elspais/utilities/git.py +607 -0
  68. elspais/{core → utilities}/hasher.py +8 -22
  69. elspais/utilities/md_renderer.py +189 -0
  70. elspais/{core → utilities}/patterns.py +56 -51
  71. elspais/utilities/reference_config.py +626 -0
  72. elspais/validation/__init__.py +19 -0
  73. elspais/validation/format.py +264 -0
  74. {elspais-0.11.2.dist-info → elspais-0.43.5.dist-info}/METADATA +7 -4
  75. elspais-0.43.5.dist-info/RECORD +80 -0
  76. elspais/config/defaults.py +0 -179
  77. elspais/config/loader.py +0 -494
  78. elspais/core/__init__.py +0 -21
  79. elspais/core/git.py +0 -346
  80. elspais/core/models.py +0 -320
  81. elspais/core/parser.py +0 -639
  82. elspais/core/rules.py +0 -509
  83. elspais/mcp/context.py +0 -172
  84. elspais/mcp/serializers.py +0 -112
  85. elspais/reformat/__init__.py +0 -50
  86. elspais/reformat/detector.py +0 -112
  87. elspais/reformat/hierarchy.py +0 -247
  88. elspais/reformat/line_breaks.py +0 -218
  89. elspais/reformat/prompts.py +0 -133
  90. elspais/reformat/transformer.py +0 -266
  91. elspais/trace_view/__init__.py +0 -55
  92. elspais/trace_view/coverage.py +0 -183
  93. elspais/trace_view/generators/__init__.py +0 -12
  94. elspais/trace_view/generators/base.py +0 -334
  95. elspais/trace_view/generators/csv.py +0 -118
  96. elspais/trace_view/generators/markdown.py +0 -170
  97. elspais/trace_view/html/__init__.py +0 -33
  98. elspais/trace_view/html/generator.py +0 -1140
  99. elspais/trace_view/html/templates/base.html +0 -283
  100. elspais/trace_view/html/templates/components/code_viewer_modal.html +0 -14
  101. elspais/trace_view/html/templates/components/file_picker_modal.html +0 -20
  102. elspais/trace_view/html/templates/components/legend_modal.html +0 -69
  103. elspais/trace_view/html/templates/components/review_panel.html +0 -118
  104. elspais/trace_view/html/templates/partials/review/help/help-panel.json +0 -244
  105. elspais/trace_view/html/templates/partials/review/help/onboarding.json +0 -77
  106. elspais/trace_view/html/templates/partials/review/help/tooltips.json +0 -237
  107. elspais/trace_view/html/templates/partials/review/review-comments.js +0 -928
  108. elspais/trace_view/html/templates/partials/review/review-data.js +0 -961
  109. elspais/trace_view/html/templates/partials/review/review-help.js +0 -679
  110. elspais/trace_view/html/templates/partials/review/review-init.js +0 -177
  111. elspais/trace_view/html/templates/partials/review/review-line-numbers.js +0 -429
  112. elspais/trace_view/html/templates/partials/review/review-packages.js +0 -1029
  113. elspais/trace_view/html/templates/partials/review/review-position.js +0 -540
  114. elspais/trace_view/html/templates/partials/review/review-resize.js +0 -115
  115. elspais/trace_view/html/templates/partials/review/review-status.js +0 -659
  116. elspais/trace_view/html/templates/partials/review/review-sync.js +0 -992
  117. elspais/trace_view/html/templates/partials/review-styles.css +0 -2238
  118. elspais/trace_view/html/templates/partials/scripts.js +0 -1741
  119. elspais/trace_view/html/templates/partials/styles.css +0 -1756
  120. elspais/trace_view/models.py +0 -378
  121. elspais/trace_view/review/__init__.py +0 -63
  122. elspais/trace_view/review/branches.py +0 -1142
  123. elspais/trace_view/review/models.py +0 -1200
  124. elspais/trace_view/review/position.py +0 -591
  125. elspais/trace_view/review/server.py +0 -1032
  126. elspais/trace_view/review/status.py +0 -455
  127. elspais/trace_view/review/storage.py +0 -1343
  128. elspais/trace_view/scanning.py +0 -213
  129. elspais/trace_view/specs/README.md +0 -84
  130. elspais/trace_view/specs/tv-d00001-template-architecture.md +0 -36
  131. elspais/trace_view/specs/tv-d00002-css-extraction.md +0 -37
  132. elspais/trace_view/specs/tv-d00003-js-extraction.md +0 -43
  133. elspais/trace_view/specs/tv-d00004-build-embedding.md +0 -40
  134. elspais/trace_view/specs/tv-d00005-test-format.md +0 -78
  135. elspais/trace_view/specs/tv-d00010-review-data-models.md +0 -33
  136. elspais/trace_view/specs/tv-d00011-review-storage.md +0 -33
  137. elspais/trace_view/specs/tv-d00012-position-resolution.md +0 -33
  138. elspais/trace_view/specs/tv-d00013-git-branches.md +0 -31
  139. elspais/trace_view/specs/tv-d00014-review-api-server.md +0 -31
  140. elspais/trace_view/specs/tv-d00015-status-modifier.md +0 -27
  141. elspais/trace_view/specs/tv-d00016-js-integration.md +0 -33
  142. elspais/trace_view/specs/tv-p00001-html-generator.md +0 -33
  143. elspais/trace_view/specs/tv-p00002-review-system.md +0 -29
  144. elspais-0.11.2.dist-info/RECORD +0 -101
  145. {elspais-0.11.2.dist-info → elspais-0.43.5.dist-info}/WHEEL +0 -0
  146. {elspais-0.11.2.dist-info → elspais-0.43.5.dist-info}/entry_points.txt +0 -0
  147. {elspais-0.11.2.dist-info → elspais-0.43.5.dist-info}/licenses/LICENSE +0 -0
@@ -1,449 +1,231 @@
1
+ # Implements: REQ-int-d00003 (CLI Extension)
1
2
  """
2
- elspais.commands.validate - Validate requirements command.
3
+ elspais.commands.validate - Validate requirements format and relationships.
3
4
 
4
- Validates requirements format, links, and hashes.
5
+ Uses the graph-based system for validation. Commands only work with graph data.
6
+ Supports --fix to auto-fix certain issues (hashes, status).
5
7
  """
6
8
 
9
+ from __future__ import annotations
10
+
7
11
  import argparse
8
12
  import json
9
13
  import sys
10
14
  from pathlib import Path
11
- from typing import Any, Dict, List, Optional
12
-
13
- from elspais.config.defaults import DEFAULT_CONFIG
14
- from elspais.config.loader import find_config_file, get_spec_directories, load_config
15
- from elspais.core.hasher import calculate_hash, verify_hash
16
- from elspais.core.models import ParseWarning, Requirement
17
- from elspais.core.parser import RequirementParser
18
- from elspais.core.patterns import PatternConfig
19
- from elspais.core.rules import RuleEngine, RulesConfig, RuleViolation, Severity
20
- from elspais.sponsors import get_sponsor_spec_directories
21
- from elspais.testing.config import TestingConfig
22
-
23
-
24
- def run(args: argparse.Namespace) -> int:
25
- """
26
- Run the validate command.
27
-
28
- Args:
29
- args: Parsed command line arguments
30
-
31
- Returns:
32
- Exit code (0 for success, 1 for validation errors)
33
- """
34
- # Find and load configuration
35
- config = load_configuration(args)
36
- if config is None:
37
- return 1
38
-
39
- # Determine spec directories (can be string or list)
40
- spec_dirs = get_spec_directories(args.spec_dir, config)
41
- if not spec_dirs:
42
- print("Error: No spec directories found", file=sys.stderr)
43
- return 1
44
-
45
- # Add sponsor spec directories if mode is "combined" and include_associated is enabled
46
- mode = getattr(args, "mode", "combined")
47
- include_associated = config.get("traceability", {}).get("include_associated", True)
48
-
49
- if mode == "combined" and include_associated:
50
- base_path = find_project_root(spec_dirs)
51
- sponsor_dirs = get_sponsor_spec_directories(config, base_path)
52
- if sponsor_dirs:
53
- spec_dirs = list(spec_dirs) + sponsor_dirs
54
- if not args.quiet:
55
- for sponsor_dir in sponsor_dirs:
56
- print(f"Including sponsor specs: {sponsor_dir}")
57
-
58
- if not args.quiet:
59
- if len(spec_dirs) == 1:
60
- print(f"Validating requirements in: {spec_dirs[0]}")
61
- else:
62
- print(f"Validating requirements in: {', '.join(str(d) for d in spec_dirs)}")
63
-
64
- # Parse requirements
65
- pattern_config = PatternConfig.from_dict(config.get("patterns", {}))
66
- spec_config = config.get("spec", {})
67
- no_reference_values = spec_config.get("no_reference_values")
68
- parser = RequirementParser(pattern_config, no_reference_values=no_reference_values)
69
- skip_files = spec_config.get("skip_files", [])
70
-
71
- try:
72
- parse_result = parser.parse_directories(spec_dirs, skip_files=skip_files)
73
- requirements = dict(parse_result) # ParseResult supports dict-like access
74
- except Exception as e:
75
- print(f"Error parsing requirements: {e}", file=sys.stderr)
76
- return 1
77
-
78
- if not requirements:
79
- print("No requirements found.", file=sys.stderr)
80
- return 1
81
-
82
- if not args.quiet:
83
- print(f"Found {len(requirements)} requirements")
84
-
85
- # Run validation
86
- rules_config = RulesConfig.from_dict(config.get("rules", {}))
87
- engine = RuleEngine(rules_config)
88
-
89
- violations = engine.validate(requirements)
90
-
91
- # Add hash validation
92
- hash_violations = validate_hashes(requirements, config)
93
- violations.extend(hash_violations)
94
-
95
- # Add broken link validation
96
- link_violations = validate_links(requirements, args, config)
97
- violations.extend(link_violations)
98
-
99
- # Add parser warnings (duplicates, etc.) as violations
100
- parse_violations = convert_parse_warnings_to_violations(parse_result.warnings)
101
- violations.extend(parse_violations)
102
-
103
- # Filter skipped rules
104
- if args.skip_rule:
105
- violations = [
106
- v for v in violations if not any(skip in v.rule_name for skip in args.skip_rule)
107
- ]
108
-
109
- # JSON output mode - output and exit
110
- if getattr(args, "json", False):
111
- # Test mapping (if enabled)
112
- test_data = None
113
- testing_config = TestingConfig.from_dict(config.get("testing", {}))
114
- if should_scan_tests(args, testing_config):
115
- from elspais.testing.mapper import TestMapper
116
-
117
- base_path = find_project_root(spec_dirs)
118
- ignore_dirs = config.get("directories", {}).get("ignore", [])
119
- mapper = TestMapper(testing_config)
120
- test_data = mapper.map_tests(
121
- requirement_ids=set(requirements.keys()),
122
- base_path=base_path,
123
- ignore=ignore_dirs,
124
- )
125
-
126
- print(format_requirements_json(requirements, violations, test_data))
127
- errors = [v for v in violations if v.severity == Severity.ERROR]
128
- return 1 if errors else 0
129
-
130
- # Report results
131
- errors = [v for v in violations if v.severity == Severity.ERROR]
132
- warnings = [v for v in violations if v.severity == Severity.WARNING]
133
- infos = [v for v in violations if v.severity == Severity.INFO]
134
-
135
- if violations and not args.quiet:
136
- print()
137
- for violation in sorted(violations, key=lambda v: (v.severity.value, v.requirement_id)):
138
- print(violation)
139
- print()
140
-
141
- # Summary
142
- if not args.quiet:
143
- print("─" * 60)
144
- valid_count = len(requirements) - len({v.requirement_id for v in errors})
145
- print(f"✓ {valid_count}/{len(requirements)} requirements valid")
146
-
147
- if errors:
148
- print(f"❌ {len(errors)} errors")
149
- if warnings:
150
- print(f"⚠️ {len(warnings)} warnings")
151
- if infos and getattr(args, "verbose", False):
152
- print(f"ℹ️ {len(infos)} info")
15
+ from typing import TYPE_CHECKING
153
16
 
154
- # Return error if there are errors
155
- if errors:
156
- return 1
17
+ if TYPE_CHECKING:
18
+ pass
157
19
 
158
- if not args.quiet and not violations:
159
- print("✓ All requirements valid")
20
+ from elspais.graph import NodeKind
160
21
 
161
- return 0
162
22
 
23
+ def _get_requirement_body(node) -> str:
24
+ """Extract hashable body content from a requirement node.
163
25
 
164
- def load_configuration(args: argparse.Namespace) -> Optional[Dict]:
165
- """Load configuration from file or use defaults."""
166
- if args.config:
167
- config_path = args.config
168
- else:
169
- config_path = find_config_file(Path.cwd())
170
-
171
- if config_path and config_path.exists():
172
- try:
173
- return load_config(config_path)
174
- except Exception as e:
175
- print(f"Error loading config: {e}", file=sys.stderr)
176
- return None
177
- else:
178
- # Use defaults
179
- return DEFAULT_CONFIG
26
+ Per spec/requirements-spec.md:
27
+ > The hash SHALL be calculated from:
28
+ > - every line AFTER the Header line
29
+ > - every line BEFORE the Footer line
180
30
 
181
-
182
- def should_scan_tests(args: argparse.Namespace, config: TestingConfig) -> bool:
31
+ The body_text is extracted during parsing and stored in the node.
183
32
  """
184
- Determine if test scanning should run based on args and config.
33
+ return node.get_field("body_text", "")
185
34
 
186
- Args:
187
- args: Command line arguments
188
- config: Testing configuration
189
35
 
190
- Returns:
191
- True if test scanning should run
36
+ def run(args: argparse.Namespace) -> int:
37
+ """Run the validate command.
38
+
39
+ Uses graph factory to build TraceGraph, then validates requirements.
40
+ Supports --fix to auto-fix certain issues.
192
41
  """
193
- if getattr(args, "no_tests", False):
194
- return False
195
- if getattr(args, "tests", False):
196
- return True
197
- return config.enabled
42
+ from elspais.graph.factory import build_graph
43
+ from elspais.utilities.hasher import calculate_hash
44
+
45
+ spec_dir = getattr(args, "spec_dir", None)
46
+ config_path = getattr(args, "config", None)
47
+ fix_mode = getattr(args, "fix", False)
48
+ dry_run = getattr(args, "dry_run", False)
49
+
50
+ # Get repo root from spec_dir or cwd
51
+ repo_root = Path(spec_dir).parent if spec_dir else Path.cwd()
52
+
53
+ graph = build_graph(
54
+ spec_dirs=[spec_dir] if spec_dir else None,
55
+ config_path=config_path,
56
+ repo_root=repo_root,
57
+ )
58
+
59
+ # Collect validation issues
60
+ errors = []
61
+ warnings = []
62
+ fixable = [] # Issues that can be auto-fixed
63
+
64
+ for node in graph.nodes_by_kind(NodeKind.REQUIREMENT):
65
+ # Check for orphan requirements (no parents except roots)
66
+ if node.parent_count() == 0 and node.level not in ("PRD", "prd"):
67
+ warnings.append(
68
+ {
69
+ "rule": "hierarchy.orphan",
70
+ "id": node.id,
71
+ "message": f"Requirement {node.id} has no parent (orphan)",
72
+ }
73
+ )
198
74
 
75
+ # Check for hash presence and correctness
76
+ body = _get_requirement_body(node)
77
+ if body:
78
+ computed_hash = calculate_hash(body)
79
+ stored_hash = node.hash
80
+
81
+ if not stored_hash:
82
+ # Missing hash - fixable
83
+ issue = {
84
+ "rule": "hash.missing",
85
+ "id": node.id,
86
+ "message": f"Requirement {node.id} is missing a hash",
87
+ "fixable": True,
88
+ "fix_type": "hash",
89
+ "computed_hash": computed_hash,
90
+ "file": str(repo_root / node.source.path) if node.source else None,
91
+ }
92
+ warnings.append(issue)
93
+ if issue["file"]:
94
+ fixable.append(issue)
95
+ elif stored_hash != computed_hash:
96
+ # Hash mismatch - fixable
97
+ issue = {
98
+ "rule": "hash.mismatch",
99
+ "id": node.id,
100
+ "message": f"Requirement {node.id} hash mismatch: "
101
+ f"stored={stored_hash} computed={computed_hash}",
102
+ "fixable": True,
103
+ "fix_type": "hash",
104
+ "computed_hash": computed_hash,
105
+ "file": str(repo_root / node.source.path) if node.source else None,
106
+ }
107
+ warnings.append(issue)
108
+ if issue["file"]:
109
+ fixable.append(issue)
110
+ elif not node.hash:
111
+ # No body and no hash
112
+ warnings.append(
113
+ {
114
+ "rule": "hash.missing",
115
+ "id": node.id,
116
+ "message": f"Requirement {node.id} is missing a hash",
117
+ }
118
+ )
199
119
 
200
- def find_project_root(spec_dirs: List[Path]) -> Path:
201
- """
202
- Find the project root from spec directories.
120
+ # Filter by skip rules
121
+ skip_rules = getattr(args, "skip_rule", None) or []
122
+ if skip_rules:
123
+ import fnmatch
203
124
 
204
- Looks for .elspais.toml or .git directory above spec dirs.
125
+ errors = [e for e in errors if not any(fnmatch.fnmatch(e["rule"], p) for p in skip_rules)]
126
+ warnings = [
127
+ w for w in warnings if not any(fnmatch.fnmatch(w["rule"], p) for p in skip_rules)
128
+ ]
129
+ fixable = [f for f in fixable if not any(fnmatch.fnmatch(f["rule"], p) for p in skip_rules)]
205
130
 
206
- Args:
207
- spec_dirs: List of spec directories
131
+ # Handle --fix mode
132
+ fixed_count = 0
133
+ if fix_mode and fixable:
134
+ fixed_count = _apply_fixes(fixable, dry_run)
208
135
 
209
- Returns:
210
- Project root path
211
- """
212
- if not spec_dirs:
213
- return Path.cwd()
214
-
215
- # Start from first spec dir and look upward
216
- current = spec_dirs[0].resolve()
217
- while current != current.parent:
218
- if (current / ".elspais.toml").exists():
219
- return current
220
- if (current / ".git").exists():
221
- return current
222
- current = current.parent
223
-
224
- return Path.cwd()
225
-
226
-
227
- def validate_hashes(requirements: Dict[str, Requirement], config: Dict) -> List[RuleViolation]:
228
- """Validate requirement hashes."""
229
- violations = []
230
- hash_length = config.get("validation", {}).get("hash_length", 8)
231
- algorithm = config.get("validation", {}).get("hash_algorithm", "sha256")
232
-
233
- for req_id, req in requirements.items():
234
- if req.hash:
235
- # Verify hash matches content
236
- expected_hash = calculate_hash(req.body, length=hash_length, algorithm=algorithm)
237
- if not verify_hash(req.body, req.hash, length=hash_length, algorithm=algorithm):
238
- violations.append(
239
- RuleViolation(
240
- rule_name="hash.mismatch",
241
- requirement_id=req_id,
242
- message=f"Hash mismatch: expected {expected_hash}, found {req.hash}",
243
- severity=Severity.WARNING,
244
- location=req.location(),
245
- )
246
- )
247
-
248
- return violations
249
-
250
-
251
- def validate_links(
252
- requirements: Dict[str, Requirement],
253
- args: argparse.Namespace,
254
- config: Dict,
255
- ) -> List[RuleViolation]:
256
- """Validate requirement links (implements references)."""
257
- violations = []
258
-
259
- # Load core requirements if this is an associated repo
260
- core_requirements = {}
261
- core_path = args.core_repo or config.get("core", {}).get("path")
262
- if core_path:
263
- core_requirements = load_requirements_from_repo(Path(core_path), config)
264
-
265
- all_requirements = {**core_requirements, **requirements}
266
- all_ids = set(all_requirements.keys())
267
-
268
- # Build set of all valid short IDs too
269
- short_ids = set()
270
- for req_id in all_ids:
271
- # Add various shortened forms
272
- parts = req_id.split("-")
273
- if len(parts) >= 2:
274
- # REQ-p00001 -> p00001
275
- short_ids.add("-".join(parts[1:]))
276
- # REQ-CAL-p00001 -> CAL-p00001
277
- if len(parts) >= 3:
278
- short_ids.add("-".join(parts[2:]))
279
- short_ids.add("-".join(parts[1:]))
280
-
281
- for req_id, req in requirements.items():
282
- for impl_id in req.implements:
283
- # Check if reference is valid
284
- if impl_id not in all_ids and impl_id not in short_ids:
285
- violations.append(
286
- RuleViolation(
287
- rule_name="link.broken",
288
- requirement_id=req_id,
289
- message=f"Implements reference not found: {impl_id}",
290
- severity=Severity.ERROR,
291
- location=req.location(),
292
- )
293
- )
294
-
295
- return violations
296
-
297
-
298
- def convert_parse_warnings_to_violations(
299
- warnings: List[ParseWarning],
300
- ) -> List[RuleViolation]:
301
- """Convert parser warnings (like duplicates) to rule violations.
302
-
303
- The parser detects duplicate REQ IDs and generates ParseWarning objects.
304
- This function converts them to RuleViolation objects so they appear in
305
- validation output.
136
+ # Count requirements
137
+ req_count = sum(1 for _ in graph.nodes_by_kind(NodeKind.REQUIREMENT))
306
138
 
307
- Args:
308
- warnings: List of ParseWarning objects from parser
139
+ # Output results
140
+ if getattr(args, "json", False):
141
+ result = {
142
+ "valid": len(errors) == 0,
143
+ "errors": errors,
144
+ "warnings": warnings,
145
+ "requirements_count": req_count,
146
+ "fixed_count": fixed_count if fix_mode else 0,
147
+ }
148
+ print(json.dumps(result, indent=2))
149
+ else:
150
+ if not getattr(args, "quiet", False):
151
+ print(f"Validated {req_count} requirements")
152
+
153
+ # Show fix results
154
+ if fix_mode:
155
+ if dry_run:
156
+ if fixable:
157
+ print(f"Would fix {len(fixable)} issue(s):")
158
+ for f in fixable:
159
+ print(f" {f['id']}: {f['rule']}")
160
+ else:
161
+ print("No fixable issues found.")
162
+ else:
163
+ if fixed_count > 0:
164
+ print(f"Fixed {fixed_count} issue(s)")
165
+
166
+ for err in errors:
167
+ print(f"ERROR [{err['rule']}] {err['id']}: {err['message']}", file=sys.stderr)
168
+
169
+ # Only show unfixed warnings
170
+ unfixed_warnings = [w for w in warnings if not w.get("fixable") or not fix_mode]
171
+ for warn in unfixed_warnings:
172
+ print(
173
+ f"WARNING [{warn['rule']}] {warn['id']}: {warn['message']}",
174
+ file=sys.stderr,
175
+ )
309
176
 
310
- Returns:
311
- List of RuleViolation objects for duplicate IDs
312
- """
313
- violations = []
314
- for warning in warnings:
315
- if "duplicate" in warning.message.lower():
316
- violations.append(
317
- RuleViolation(
318
- rule_name="id.duplicate",
319
- requirement_id=warning.requirement_id,
320
- message=warning.message,
321
- severity=Severity.ERROR,
322
- location=f"{warning.file_path}:{warning.line_number}",
323
- )
177
+ if errors:
178
+ print(
179
+ f"\n{len(errors)} errors, {len(unfixed_warnings)} warnings",
180
+ file=sys.stderr,
324
181
  )
325
- return violations
182
+ elif unfixed_warnings:
183
+ print(f"\n{len(unfixed_warnings)} warnings", file=sys.stderr)
184
+
185
+ return 1 if errors else 0
326
186
 
327
187
 
328
- def load_requirements_from_repo(repo_path: Path, config: Dict) -> Dict[str, Requirement]:
329
- """Load requirements from any repository path.
188
+ def _apply_fixes(fixable: list[dict], dry_run: bool) -> int:
189
+ """Apply fixes to spec files.
330
190
 
331
191
  Args:
332
- repo_path: Path to the repository root
333
- config: Configuration dict (used as fallback if repo has no config)
192
+ fixable: List of fixable issues with fix metadata.
193
+ dry_run: If True, don't actually modify files.
334
194
 
335
195
  Returns:
336
- Dict mapping requirement ID to Requirement object
196
+ Number of issues fixed.
337
197
  """
338
- if not repo_path.exists():
339
- return {}
340
-
341
- # Find repo config
342
- repo_config_path = repo_path / ".elspais.toml"
343
- if repo_config_path.exists():
344
- repo_config = load_config(repo_config_path)
345
- else:
346
- repo_config = config # Use same config
347
-
348
- spec_dir = repo_path / repo_config.get("directories", {}).get("spec", "spec")
349
- if not spec_dir.exists():
350
- return {}
351
-
352
- pattern_config = PatternConfig.from_dict(repo_config.get("patterns", {}))
353
- spec_config = repo_config.get("spec", {})
354
- no_reference_values = spec_config.get("no_reference_values")
355
- parser = RequirementParser(pattern_config, no_reference_values=no_reference_values)
356
- skip_files = spec_config.get("skip_files", [])
357
-
358
- try:
359
- return parser.parse_directory(spec_dir, skip_files=skip_files)
360
- except Exception:
361
- return {}
198
+ if dry_run:
199
+ return 0
362
200
 
201
+ from elspais.mcp.file_mutations import add_status_to_file, update_hash_in_file
363
202
 
364
- def format_requirements_json(
365
- requirements: Dict[str, Requirement],
366
- violations: List[RuleViolation],
367
- test_data: Optional[Any] = None,
368
- ) -> str:
369
- """
370
- Format requirements as JSON in hht_diary compatible format.
203
+ fixed = 0
204
+ for issue in fixable:
205
+ fix_type = issue.get("fix_type")
206
+ file_path = issue.get("file")
371
207
 
372
- Args:
373
- requirements: Dictionary of requirement ID to Requirement
374
- violations: List of rule violations for error metadata
375
- test_data: Optional TestMappingResult with test coverage data
208
+ if not file_path:
209
+ continue
376
210
 
377
- Returns:
378
- JSON string with requirement data
379
- """
380
- # Build violation lookup for cycle/conflict detection
381
- violation_by_req: Dict[str, List[RuleViolation]] = {}
382
- for v in violations:
383
- if v.requirement_id not in violation_by_req:
384
- violation_by_req[v.requirement_id] = []
385
- violation_by_req[v.requirement_id].append(v)
386
-
387
- output = {}
388
- for req_id, req in requirements.items():
389
- req_violations = violation_by_req.get(req_id, [])
390
-
391
- # Check for specific violation types
392
- is_cycle = any("cycle" in v.rule_name.lower() for v in req_violations)
393
-
394
- # Use the model's is_conflict flag directly, or check violations for older behavior
395
- is_conflict = req.is_conflict or any(
396
- "conflict" in v.rule_name.lower() or "duplicate" in v.rule_name.lower()
397
- for v in req_violations
398
- )
399
- conflict_with = req.conflict_with if req.conflict_with else None
400
- cycle_path = None
401
-
402
- # Also check violations for additional context
403
- for v in req_violations:
404
- if "duplicate" in v.rule_name.lower() and not conflict_with:
405
- # Try to extract conflicting ID from message
406
- conflict_with = v.message
407
- if "cycle" in v.rule_name.lower():
408
- cycle_path = v.message
409
-
410
- # Build requirement data matching hht_diary format
411
- # Note: req_id includes __conflict suffix for conflicts to avoid key collision
412
- output[req_id] = {
413
- "title": req.title,
414
- "status": req.status,
415
- "level": req.level,
416
- "body": req.body.strip(),
417
- "rationale": (req.rationale or "").strip(),
418
- "file": req.file_path.name if req.file_path else "",
419
- "filePath": str(req.file_path) if req.file_path else "",
420
- "line": req.line_number or 0,
421
- "implements": req.implements,
422
- "hash": req.hash or "",
423
- "subdir": req.subdir,
424
- "isConflict": is_conflict,
425
- "conflictWith": conflict_with,
426
- "isCycle": is_cycle,
427
- "cyclePath": cycle_path,
428
- }
211
+ if fix_type == "hash":
212
+ # Fix hash (missing or mismatch)
213
+ success = update_hash_in_file(
214
+ file_path=Path(file_path),
215
+ req_id=issue["id"],
216
+ new_hash=issue["computed_hash"],
217
+ )
218
+ if success:
219
+ fixed += 1
220
+
221
+ elif fix_type == "status":
222
+ # Add missing status
223
+ success = add_status_to_file(
224
+ file_path=Path(file_path),
225
+ req_id=issue["id"],
226
+ status=issue.get("status", "Active"),
227
+ )
228
+ if success:
229
+ fixed += 1
429
230
 
430
- # Include assertions if present
431
- if req.assertions:
432
- output[req_id]["assertions"] = [
433
- {"label": a.label, "text": a.text, "isPlaceholder": a.is_placeholder}
434
- for a in req.assertions
435
- ]
436
-
437
- # Include test data if available
438
- if test_data and req_id in test_data.requirement_data:
439
- td = test_data.requirement_data[req_id]
440
- output[req_id]["test_count"] = td.test_count
441
- output[req_id]["test_passed"] = td.test_passed
442
- output[req_id]["test_result_files"] = td.test_result_files
443
- else:
444
- # Default values when no test data
445
- output[req_id]["test_count"] = 0
446
- output[req_id]["test_passed"] = 0
447
- output[req_id]["test_result_files"] = []
448
-
449
- return json.dumps(output, indent=2)
231
+ return fixed