elspais 0.11.1__py3-none-any.whl → 0.43.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- elspais/__init__.py +2 -11
- elspais/{sponsors/__init__.py → associates.py} +102 -58
- elspais/cli.py +395 -79
- elspais/commands/__init__.py +9 -3
- elspais/commands/analyze.py +121 -173
- elspais/commands/changed.py +15 -30
- elspais/commands/config_cmd.py +13 -16
- elspais/commands/edit.py +60 -44
- elspais/commands/example_cmd.py +319 -0
- elspais/commands/hash_cmd.py +167 -183
- elspais/commands/health.py +1177 -0
- elspais/commands/index.py +98 -114
- elspais/commands/init.py +103 -26
- elspais/commands/reformat_cmd.py +41 -444
- elspais/commands/rules_cmd.py +7 -3
- elspais/commands/trace.py +444 -321
- elspais/commands/validate.py +195 -415
- elspais/config/__init__.py +799 -5
- elspais/{core/content_rules.py → content_rules.py} +20 -3
- elspais/docs/cli/assertions.md +67 -0
- elspais/docs/cli/commands.md +304 -0
- elspais/docs/cli/config.md +262 -0
- elspais/docs/cli/format.md +66 -0
- elspais/docs/cli/git.md +45 -0
- elspais/docs/cli/health.md +190 -0
- elspais/docs/cli/hierarchy.md +60 -0
- elspais/docs/cli/ignore.md +72 -0
- elspais/docs/cli/mcp.md +245 -0
- elspais/docs/cli/quickstart.md +58 -0
- elspais/docs/cli/traceability.md +89 -0
- elspais/docs/cli/validation.md +96 -0
- elspais/graph/GraphNode.py +383 -0
- elspais/graph/__init__.py +40 -0
- elspais/graph/annotators.py +927 -0
- elspais/graph/builder.py +1886 -0
- elspais/graph/deserializer.py +248 -0
- elspais/graph/factory.py +284 -0
- elspais/graph/metrics.py +127 -0
- elspais/graph/mutations.py +161 -0
- elspais/graph/parsers/__init__.py +156 -0
- elspais/graph/parsers/code.py +213 -0
- elspais/graph/parsers/comments.py +112 -0
- elspais/graph/parsers/config_helpers.py +29 -0
- elspais/graph/parsers/heredocs.py +225 -0
- elspais/graph/parsers/journey.py +131 -0
- elspais/graph/parsers/remainder.py +79 -0
- elspais/graph/parsers/requirement.py +347 -0
- elspais/graph/parsers/results/__init__.py +6 -0
- elspais/graph/parsers/results/junit_xml.py +229 -0
- elspais/graph/parsers/results/pytest_json.py +313 -0
- elspais/graph/parsers/test.py +305 -0
- elspais/graph/relations.py +78 -0
- elspais/graph/serialize.py +216 -0
- elspais/html/__init__.py +8 -0
- elspais/html/generator.py +731 -0
- elspais/html/templates/trace_view.html.j2 +2151 -0
- elspais/mcp/__init__.py +47 -29
- elspais/mcp/__main__.py +5 -1
- elspais/mcp/file_mutations.py +138 -0
- elspais/mcp/server.py +2016 -247
- elspais/testing/__init__.py +4 -4
- elspais/testing/config.py +3 -0
- elspais/testing/mapper.py +1 -1
- elspais/testing/result_parser.py +25 -21
- elspais/testing/scanner.py +301 -12
- elspais/utilities/__init__.py +1 -0
- elspais/utilities/docs_loader.py +115 -0
- elspais/utilities/git.py +607 -0
- elspais/{core → utilities}/hasher.py +8 -22
- elspais/utilities/md_renderer.py +189 -0
- elspais/{core → utilities}/patterns.py +58 -57
- elspais/utilities/reference_config.py +626 -0
- elspais/validation/__init__.py +19 -0
- elspais/validation/format.py +264 -0
- {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/METADATA +7 -4
- elspais-0.43.5.dist-info/RECORD +80 -0
- elspais/config/defaults.py +0 -173
- elspais/config/loader.py +0 -494
- elspais/core/__init__.py +0 -21
- elspais/core/git.py +0 -352
- elspais/core/models.py +0 -320
- elspais/core/parser.py +0 -640
- elspais/core/rules.py +0 -514
- elspais/mcp/context.py +0 -171
- elspais/mcp/serializers.py +0 -112
- elspais/reformat/__init__.py +0 -50
- elspais/reformat/detector.py +0 -119
- elspais/reformat/hierarchy.py +0 -246
- elspais/reformat/line_breaks.py +0 -220
- elspais/reformat/prompts.py +0 -123
- elspais/reformat/transformer.py +0 -264
- elspais/trace_view/__init__.py +0 -54
- elspais/trace_view/coverage.py +0 -183
- elspais/trace_view/generators/__init__.py +0 -12
- elspais/trace_view/generators/base.py +0 -329
- elspais/trace_view/generators/csv.py +0 -122
- elspais/trace_view/generators/markdown.py +0 -175
- elspais/trace_view/html/__init__.py +0 -31
- elspais/trace_view/html/generator.py +0 -1006
- elspais/trace_view/html/templates/base.html +0 -283
- elspais/trace_view/html/templates/components/code_viewer_modal.html +0 -14
- elspais/trace_view/html/templates/components/file_picker_modal.html +0 -20
- elspais/trace_view/html/templates/components/legend_modal.html +0 -69
- elspais/trace_view/html/templates/components/review_panel.html +0 -118
- elspais/trace_view/html/templates/partials/review/help/help-panel.json +0 -244
- elspais/trace_view/html/templates/partials/review/help/onboarding.json +0 -77
- elspais/trace_view/html/templates/partials/review/help/tooltips.json +0 -237
- elspais/trace_view/html/templates/partials/review/review-comments.js +0 -928
- elspais/trace_view/html/templates/partials/review/review-data.js +0 -961
- elspais/trace_view/html/templates/partials/review/review-help.js +0 -679
- elspais/trace_view/html/templates/partials/review/review-init.js +0 -177
- elspais/trace_view/html/templates/partials/review/review-line-numbers.js +0 -429
- elspais/trace_view/html/templates/partials/review/review-packages.js +0 -1029
- elspais/trace_view/html/templates/partials/review/review-position.js +0 -540
- elspais/trace_view/html/templates/partials/review/review-resize.js +0 -115
- elspais/trace_view/html/templates/partials/review/review-status.js +0 -659
- elspais/trace_view/html/templates/partials/review/review-sync.js +0 -992
- elspais/trace_view/html/templates/partials/review-styles.css +0 -2238
- elspais/trace_view/html/templates/partials/scripts.js +0 -1741
- elspais/trace_view/html/templates/partials/styles.css +0 -1756
- elspais/trace_view/models.py +0 -353
- elspais/trace_view/review/__init__.py +0 -60
- elspais/trace_view/review/branches.py +0 -1149
- elspais/trace_view/review/models.py +0 -1205
- elspais/trace_view/review/position.py +0 -609
- elspais/trace_view/review/server.py +0 -1056
- elspais/trace_view/review/status.py +0 -470
- elspais/trace_view/review/storage.py +0 -1367
- elspais/trace_view/scanning.py +0 -213
- elspais/trace_view/specs/README.md +0 -84
- elspais/trace_view/specs/tv-d00001-template-architecture.md +0 -36
- elspais/trace_view/specs/tv-d00002-css-extraction.md +0 -37
- elspais/trace_view/specs/tv-d00003-js-extraction.md +0 -43
- elspais/trace_view/specs/tv-d00004-build-embedding.md +0 -40
- elspais/trace_view/specs/tv-d00005-test-format.md +0 -78
- elspais/trace_view/specs/tv-d00010-review-data-models.md +0 -33
- elspais/trace_view/specs/tv-d00011-review-storage.md +0 -33
- elspais/trace_view/specs/tv-d00012-position-resolution.md +0 -33
- elspais/trace_view/specs/tv-d00013-git-branches.md +0 -31
- elspais/trace_view/specs/tv-d00014-review-api-server.md +0 -31
- elspais/trace_view/specs/tv-d00015-status-modifier.md +0 -27
- elspais/trace_view/specs/tv-d00016-js-integration.md +0 -33
- elspais/trace_view/specs/tv-p00001-html-generator.md +0 -33
- elspais/trace_view/specs/tv-p00002-review-system.md +0 -29
- elspais-0.11.1.dist-info/RECORD +0 -101
- {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/WHEEL +0 -0
- {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/entry_points.txt +0 -0
- {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/licenses/LICENSE +0 -0
elspais/commands/validate.py
CHANGED
|
@@ -1,451 +1,231 @@
|
|
|
1
|
+
# Implements: REQ-int-d00003 (CLI Extension)
|
|
1
2
|
"""
|
|
2
|
-
elspais.commands.validate - Validate requirements
|
|
3
|
+
elspais.commands.validate - Validate requirements format and relationships.
|
|
3
4
|
|
|
4
|
-
|
|
5
|
+
Uses the graph-based system for validation. Commands only work with graph data.
|
|
6
|
+
Supports --fix to auto-fix certain issues (hashes, status).
|
|
5
7
|
"""
|
|
6
8
|
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
7
11
|
import argparse
|
|
8
12
|
import json
|
|
9
13
|
import sys
|
|
10
14
|
from pathlib import Path
|
|
11
|
-
from typing import
|
|
12
|
-
|
|
13
|
-
from elspais.config.defaults import DEFAULT_CONFIG
|
|
14
|
-
from elspais.config.loader import find_config_file, get_spec_directories, load_config
|
|
15
|
-
from elspais.core.hasher import calculate_hash, verify_hash
|
|
16
|
-
from elspais.core.models import ParseWarning, Requirement
|
|
17
|
-
from elspais.core.parser import RequirementParser
|
|
18
|
-
from elspais.core.patterns import PatternConfig
|
|
19
|
-
from elspais.core.rules import RuleEngine, RulesConfig, RuleViolation, Severity
|
|
20
|
-
from elspais.sponsors import get_sponsor_spec_directories
|
|
21
|
-
from elspais.testing.config import TestingConfig
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def run(args: argparse.Namespace) -> int:
|
|
25
|
-
"""
|
|
26
|
-
Run the validate command.
|
|
27
|
-
|
|
28
|
-
Args:
|
|
29
|
-
args: Parsed command line arguments
|
|
30
|
-
|
|
31
|
-
Returns:
|
|
32
|
-
Exit code (0 for success, 1 for validation errors)
|
|
33
|
-
"""
|
|
34
|
-
# Find and load configuration
|
|
35
|
-
config = load_configuration(args)
|
|
36
|
-
if config is None:
|
|
37
|
-
return 1
|
|
38
|
-
|
|
39
|
-
# Determine spec directories (can be string or list)
|
|
40
|
-
spec_dirs = get_spec_directories(args.spec_dir, config)
|
|
41
|
-
if not spec_dirs:
|
|
42
|
-
print("Error: No spec directories found", file=sys.stderr)
|
|
43
|
-
return 1
|
|
44
|
-
|
|
45
|
-
# Add sponsor spec directories if mode is "combined" and include_associated is enabled
|
|
46
|
-
mode = getattr(args, 'mode', 'combined')
|
|
47
|
-
include_associated = config.get('traceability', {}).get('include_associated', True)
|
|
48
|
-
|
|
49
|
-
if mode == 'combined' and include_associated:
|
|
50
|
-
base_path = find_project_root(spec_dirs)
|
|
51
|
-
sponsor_dirs = get_sponsor_spec_directories(config, base_path)
|
|
52
|
-
if sponsor_dirs:
|
|
53
|
-
spec_dirs = list(spec_dirs) + sponsor_dirs
|
|
54
|
-
if not args.quiet:
|
|
55
|
-
for sponsor_dir in sponsor_dirs:
|
|
56
|
-
print(f"Including sponsor specs: {sponsor_dir}")
|
|
57
|
-
|
|
58
|
-
if not args.quiet:
|
|
59
|
-
if len(spec_dirs) == 1:
|
|
60
|
-
print(f"Validating requirements in: {spec_dirs[0]}")
|
|
61
|
-
else:
|
|
62
|
-
print(f"Validating requirements in: {', '.join(str(d) for d in spec_dirs)}")
|
|
63
|
-
|
|
64
|
-
# Parse requirements
|
|
65
|
-
pattern_config = PatternConfig.from_dict(config.get("patterns", {}))
|
|
66
|
-
spec_config = config.get("spec", {})
|
|
67
|
-
no_reference_values = spec_config.get("no_reference_values")
|
|
68
|
-
parser = RequirementParser(pattern_config, no_reference_values=no_reference_values)
|
|
69
|
-
skip_files = spec_config.get("skip_files", [])
|
|
70
|
-
|
|
71
|
-
try:
|
|
72
|
-
parse_result = parser.parse_directories(spec_dirs, skip_files=skip_files)
|
|
73
|
-
requirements = dict(parse_result) # ParseResult supports dict-like access
|
|
74
|
-
except Exception as e:
|
|
75
|
-
print(f"Error parsing requirements: {e}", file=sys.stderr)
|
|
76
|
-
return 1
|
|
77
|
-
|
|
78
|
-
if not requirements:
|
|
79
|
-
print("No requirements found.", file=sys.stderr)
|
|
80
|
-
return 1
|
|
81
|
-
|
|
82
|
-
if not args.quiet:
|
|
83
|
-
print(f"Found {len(requirements)} requirements")
|
|
84
|
-
|
|
85
|
-
# Run validation
|
|
86
|
-
rules_config = RulesConfig.from_dict(config.get("rules", {}))
|
|
87
|
-
engine = RuleEngine(rules_config)
|
|
88
|
-
|
|
89
|
-
violations = engine.validate(requirements)
|
|
90
|
-
|
|
91
|
-
# Add hash validation
|
|
92
|
-
hash_violations = validate_hashes(requirements, config)
|
|
93
|
-
violations.extend(hash_violations)
|
|
94
|
-
|
|
95
|
-
# Add broken link validation
|
|
96
|
-
link_violations = validate_links(requirements, args, config)
|
|
97
|
-
violations.extend(link_violations)
|
|
98
|
-
|
|
99
|
-
# Add parser warnings (duplicates, etc.) as violations
|
|
100
|
-
parse_violations = convert_parse_warnings_to_violations(parse_result.warnings)
|
|
101
|
-
violations.extend(parse_violations)
|
|
102
|
-
|
|
103
|
-
# Filter skipped rules
|
|
104
|
-
if args.skip_rule:
|
|
105
|
-
violations = [
|
|
106
|
-
v for v in violations
|
|
107
|
-
if not any(skip in v.rule_name for skip in args.skip_rule)
|
|
108
|
-
]
|
|
109
|
-
|
|
110
|
-
# JSON output mode - output and exit
|
|
111
|
-
if getattr(args, 'json', False):
|
|
112
|
-
# Test mapping (if enabled)
|
|
113
|
-
test_data = None
|
|
114
|
-
testing_config = TestingConfig.from_dict(config.get("testing", {}))
|
|
115
|
-
if should_scan_tests(args, testing_config):
|
|
116
|
-
from elspais.testing.mapper import TestMapper
|
|
117
|
-
|
|
118
|
-
base_path = find_project_root(spec_dirs)
|
|
119
|
-
ignore_dirs = config.get("directories", {}).get("ignore", [])
|
|
120
|
-
mapper = TestMapper(testing_config)
|
|
121
|
-
test_data = mapper.map_tests(
|
|
122
|
-
requirement_ids=set(requirements.keys()),
|
|
123
|
-
base_path=base_path,
|
|
124
|
-
ignore=ignore_dirs,
|
|
125
|
-
)
|
|
15
|
+
from typing import TYPE_CHECKING
|
|
126
16
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
return 1 if errors else 0
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
pass
|
|
130
19
|
|
|
131
|
-
|
|
132
|
-
errors = [v for v in violations if v.severity == Severity.ERROR]
|
|
133
|
-
warnings = [v for v in violations if v.severity == Severity.WARNING]
|
|
134
|
-
infos = [v for v in violations if v.severity == Severity.INFO]
|
|
20
|
+
from elspais.graph import NodeKind
|
|
135
21
|
|
|
136
|
-
if violations and not args.quiet:
|
|
137
|
-
print()
|
|
138
|
-
for violation in sorted(violations, key=lambda v: (v.severity.value, v.requirement_id)):
|
|
139
|
-
print(violation)
|
|
140
|
-
print()
|
|
141
22
|
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
print("─" * 60)
|
|
145
|
-
valid_count = len(requirements) - len({v.requirement_id for v in errors})
|
|
146
|
-
print(f"✓ {valid_count}/{len(requirements)} requirements valid")
|
|
147
|
-
|
|
148
|
-
if errors:
|
|
149
|
-
print(f"❌ {len(errors)} errors")
|
|
150
|
-
if warnings:
|
|
151
|
-
print(f"⚠️ {len(warnings)} warnings")
|
|
152
|
-
if infos and getattr(args, "verbose", False):
|
|
153
|
-
print(f"ℹ️ {len(infos)} info")
|
|
154
|
-
|
|
155
|
-
# Return error if there are errors
|
|
156
|
-
if errors:
|
|
157
|
-
return 1
|
|
158
|
-
|
|
159
|
-
if not args.quiet and not violations:
|
|
160
|
-
print("✓ All requirements valid")
|
|
161
|
-
|
|
162
|
-
return 0
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
def load_configuration(args: argparse.Namespace) -> Optional[Dict]:
|
|
166
|
-
"""Load configuration from file or use defaults."""
|
|
167
|
-
if args.config:
|
|
168
|
-
config_path = args.config
|
|
169
|
-
else:
|
|
170
|
-
config_path = find_config_file(Path.cwd())
|
|
171
|
-
|
|
172
|
-
if config_path and config_path.exists():
|
|
173
|
-
try:
|
|
174
|
-
return load_config(config_path)
|
|
175
|
-
except Exception as e:
|
|
176
|
-
print(f"Error loading config: {e}", file=sys.stderr)
|
|
177
|
-
return None
|
|
178
|
-
else:
|
|
179
|
-
# Use defaults
|
|
180
|
-
return DEFAULT_CONFIG
|
|
23
|
+
def _get_requirement_body(node) -> str:
|
|
24
|
+
"""Extract hashable body content from a requirement node.
|
|
181
25
|
|
|
26
|
+
Per spec/requirements-spec.md:
|
|
27
|
+
> The hash SHALL be calculated from:
|
|
28
|
+
> - every line AFTER the Header line
|
|
29
|
+
> - every line BEFORE the Footer line
|
|
182
30
|
|
|
183
|
-
|
|
31
|
+
The body_text is extracted during parsing and stored in the node.
|
|
184
32
|
"""
|
|
185
|
-
|
|
33
|
+
return node.get_field("body_text", "")
|
|
186
34
|
|
|
187
|
-
Args:
|
|
188
|
-
args: Command line arguments
|
|
189
|
-
config: Testing configuration
|
|
190
|
-
|
|
191
|
-
Returns:
|
|
192
|
-
True if test scanning should run
|
|
193
|
-
"""
|
|
194
|
-
if getattr(args, 'no_tests', False):
|
|
195
|
-
return False
|
|
196
|
-
if getattr(args, 'tests', False):
|
|
197
|
-
return True
|
|
198
|
-
return config.enabled
|
|
199
35
|
|
|
36
|
+
def run(args: argparse.Namespace) -> int:
|
|
37
|
+
"""Run the validate command.
|
|
200
38
|
|
|
201
|
-
|
|
39
|
+
Uses graph factory to build TraceGraph, then validates requirements.
|
|
40
|
+
Supports --fix to auto-fix certain issues.
|
|
202
41
|
"""
|
|
203
|
-
|
|
42
|
+
from elspais.graph.factory import build_graph
|
|
43
|
+
from elspais.utilities.hasher import calculate_hash
|
|
44
|
+
|
|
45
|
+
spec_dir = getattr(args, "spec_dir", None)
|
|
46
|
+
config_path = getattr(args, "config", None)
|
|
47
|
+
fix_mode = getattr(args, "fix", False)
|
|
48
|
+
dry_run = getattr(args, "dry_run", False)
|
|
49
|
+
|
|
50
|
+
# Get repo root from spec_dir or cwd
|
|
51
|
+
repo_root = Path(spec_dir).parent if spec_dir else Path.cwd()
|
|
52
|
+
|
|
53
|
+
graph = build_graph(
|
|
54
|
+
spec_dirs=[spec_dir] if spec_dir else None,
|
|
55
|
+
config_path=config_path,
|
|
56
|
+
repo_root=repo_root,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
# Collect validation issues
|
|
60
|
+
errors = []
|
|
61
|
+
warnings = []
|
|
62
|
+
fixable = [] # Issues that can be auto-fixed
|
|
63
|
+
|
|
64
|
+
for node in graph.nodes_by_kind(NodeKind.REQUIREMENT):
|
|
65
|
+
# Check for orphan requirements (no parents except roots)
|
|
66
|
+
if node.parent_count() == 0 and node.level not in ("PRD", "prd"):
|
|
67
|
+
warnings.append(
|
|
68
|
+
{
|
|
69
|
+
"rule": "hierarchy.orphan",
|
|
70
|
+
"id": node.id,
|
|
71
|
+
"message": f"Requirement {node.id} has no parent (orphan)",
|
|
72
|
+
}
|
|
73
|
+
)
|
|
204
74
|
|
|
205
|
-
|
|
75
|
+
# Check for hash presence and correctness
|
|
76
|
+
body = _get_requirement_body(node)
|
|
77
|
+
if body:
|
|
78
|
+
computed_hash = calculate_hash(body)
|
|
79
|
+
stored_hash = node.hash
|
|
80
|
+
|
|
81
|
+
if not stored_hash:
|
|
82
|
+
# Missing hash - fixable
|
|
83
|
+
issue = {
|
|
84
|
+
"rule": "hash.missing",
|
|
85
|
+
"id": node.id,
|
|
86
|
+
"message": f"Requirement {node.id} is missing a hash",
|
|
87
|
+
"fixable": True,
|
|
88
|
+
"fix_type": "hash",
|
|
89
|
+
"computed_hash": computed_hash,
|
|
90
|
+
"file": str(repo_root / node.source.path) if node.source else None,
|
|
91
|
+
}
|
|
92
|
+
warnings.append(issue)
|
|
93
|
+
if issue["file"]:
|
|
94
|
+
fixable.append(issue)
|
|
95
|
+
elif stored_hash != computed_hash:
|
|
96
|
+
# Hash mismatch - fixable
|
|
97
|
+
issue = {
|
|
98
|
+
"rule": "hash.mismatch",
|
|
99
|
+
"id": node.id,
|
|
100
|
+
"message": f"Requirement {node.id} hash mismatch: "
|
|
101
|
+
f"stored={stored_hash} computed={computed_hash}",
|
|
102
|
+
"fixable": True,
|
|
103
|
+
"fix_type": "hash",
|
|
104
|
+
"computed_hash": computed_hash,
|
|
105
|
+
"file": str(repo_root / node.source.path) if node.source else None,
|
|
106
|
+
}
|
|
107
|
+
warnings.append(issue)
|
|
108
|
+
if issue["file"]:
|
|
109
|
+
fixable.append(issue)
|
|
110
|
+
elif not node.hash:
|
|
111
|
+
# No body and no hash
|
|
112
|
+
warnings.append(
|
|
113
|
+
{
|
|
114
|
+
"rule": "hash.missing",
|
|
115
|
+
"id": node.id,
|
|
116
|
+
"message": f"Requirement {node.id} is missing a hash",
|
|
117
|
+
}
|
|
118
|
+
)
|
|
206
119
|
|
|
207
|
-
|
|
208
|
-
|
|
120
|
+
# Filter by skip rules
|
|
121
|
+
skip_rules = getattr(args, "skip_rule", None) or []
|
|
122
|
+
if skip_rules:
|
|
123
|
+
import fnmatch
|
|
209
124
|
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
#
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
if
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
)
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
core_requirements = {}
|
|
262
|
-
core_path = args.core_repo or config.get("core", {}).get("path")
|
|
263
|
-
if core_path:
|
|
264
|
-
core_requirements = load_requirements_from_repo(Path(core_path), config)
|
|
265
|
-
|
|
266
|
-
all_requirements = {**core_requirements, **requirements}
|
|
267
|
-
all_ids = set(all_requirements.keys())
|
|
268
|
-
|
|
269
|
-
# Build set of all valid short IDs too
|
|
270
|
-
short_ids = set()
|
|
271
|
-
for req_id in all_ids:
|
|
272
|
-
# Add various shortened forms
|
|
273
|
-
parts = req_id.split("-")
|
|
274
|
-
if len(parts) >= 2:
|
|
275
|
-
# REQ-p00001 -> p00001
|
|
276
|
-
short_ids.add("-".join(parts[1:]))
|
|
277
|
-
# REQ-CAL-p00001 -> CAL-p00001
|
|
278
|
-
if len(parts) >= 3:
|
|
279
|
-
short_ids.add("-".join(parts[2:]))
|
|
280
|
-
short_ids.add("-".join(parts[1:]))
|
|
281
|
-
|
|
282
|
-
for req_id, req in requirements.items():
|
|
283
|
-
for impl_id in req.implements:
|
|
284
|
-
# Check if reference is valid
|
|
285
|
-
if impl_id not in all_ids and impl_id not in short_ids:
|
|
286
|
-
violations.append(
|
|
287
|
-
RuleViolation(
|
|
288
|
-
rule_name="link.broken",
|
|
289
|
-
requirement_id=req_id,
|
|
290
|
-
message=f"Implements reference not found: {impl_id}",
|
|
291
|
-
severity=Severity.ERROR,
|
|
292
|
-
location=req.location(),
|
|
293
|
-
)
|
|
294
|
-
)
|
|
295
|
-
|
|
296
|
-
return violations
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
def convert_parse_warnings_to_violations(
|
|
300
|
-
warnings: List[ParseWarning],
|
|
301
|
-
) -> List[RuleViolation]:
|
|
302
|
-
"""Convert parser warnings (like duplicates) to rule violations.
|
|
303
|
-
|
|
304
|
-
The parser detects duplicate REQ IDs and generates ParseWarning objects.
|
|
305
|
-
This function converts them to RuleViolation objects so they appear in
|
|
306
|
-
validation output.
|
|
307
|
-
|
|
308
|
-
Args:
|
|
309
|
-
warnings: List of ParseWarning objects from parser
|
|
125
|
+
errors = [e for e in errors if not any(fnmatch.fnmatch(e["rule"], p) for p in skip_rules)]
|
|
126
|
+
warnings = [
|
|
127
|
+
w for w in warnings if not any(fnmatch.fnmatch(w["rule"], p) for p in skip_rules)
|
|
128
|
+
]
|
|
129
|
+
fixable = [f for f in fixable if not any(fnmatch.fnmatch(f["rule"], p) for p in skip_rules)]
|
|
130
|
+
|
|
131
|
+
# Handle --fix mode
|
|
132
|
+
fixed_count = 0
|
|
133
|
+
if fix_mode and fixable:
|
|
134
|
+
fixed_count = _apply_fixes(fixable, dry_run)
|
|
135
|
+
|
|
136
|
+
# Count requirements
|
|
137
|
+
req_count = sum(1 for _ in graph.nodes_by_kind(NodeKind.REQUIREMENT))
|
|
138
|
+
|
|
139
|
+
# Output results
|
|
140
|
+
if getattr(args, "json", False):
|
|
141
|
+
result = {
|
|
142
|
+
"valid": len(errors) == 0,
|
|
143
|
+
"errors": errors,
|
|
144
|
+
"warnings": warnings,
|
|
145
|
+
"requirements_count": req_count,
|
|
146
|
+
"fixed_count": fixed_count if fix_mode else 0,
|
|
147
|
+
}
|
|
148
|
+
print(json.dumps(result, indent=2))
|
|
149
|
+
else:
|
|
150
|
+
if not getattr(args, "quiet", False):
|
|
151
|
+
print(f"Validated {req_count} requirements")
|
|
152
|
+
|
|
153
|
+
# Show fix results
|
|
154
|
+
if fix_mode:
|
|
155
|
+
if dry_run:
|
|
156
|
+
if fixable:
|
|
157
|
+
print(f"Would fix {len(fixable)} issue(s):")
|
|
158
|
+
for f in fixable:
|
|
159
|
+
print(f" {f['id']}: {f['rule']}")
|
|
160
|
+
else:
|
|
161
|
+
print("No fixable issues found.")
|
|
162
|
+
else:
|
|
163
|
+
if fixed_count > 0:
|
|
164
|
+
print(f"Fixed {fixed_count} issue(s)")
|
|
165
|
+
|
|
166
|
+
for err in errors:
|
|
167
|
+
print(f"ERROR [{err['rule']}] {err['id']}: {err['message']}", file=sys.stderr)
|
|
168
|
+
|
|
169
|
+
# Only show unfixed warnings
|
|
170
|
+
unfixed_warnings = [w for w in warnings if not w.get("fixable") or not fix_mode]
|
|
171
|
+
for warn in unfixed_warnings:
|
|
172
|
+
print(
|
|
173
|
+
f"WARNING [{warn['rule']}] {warn['id']}: {warn['message']}",
|
|
174
|
+
file=sys.stderr,
|
|
175
|
+
)
|
|
310
176
|
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
for warning in warnings:
|
|
316
|
-
if "duplicate" in warning.message.lower():
|
|
317
|
-
violations.append(
|
|
318
|
-
RuleViolation(
|
|
319
|
-
rule_name="id.duplicate",
|
|
320
|
-
requirement_id=warning.requirement_id,
|
|
321
|
-
message=warning.message,
|
|
322
|
-
severity=Severity.ERROR,
|
|
323
|
-
location=f"{warning.file_path}:{warning.line_number}",
|
|
324
|
-
)
|
|
177
|
+
if errors:
|
|
178
|
+
print(
|
|
179
|
+
f"\n{len(errors)} errors, {len(unfixed_warnings)} warnings",
|
|
180
|
+
file=sys.stderr,
|
|
325
181
|
)
|
|
326
|
-
|
|
182
|
+
elif unfixed_warnings:
|
|
183
|
+
print(f"\n{len(unfixed_warnings)} warnings", file=sys.stderr)
|
|
327
184
|
|
|
185
|
+
return 1 if errors else 0
|
|
328
186
|
|
|
329
|
-
|
|
330
|
-
|
|
187
|
+
|
|
188
|
+
def _apply_fixes(fixable: list[dict], dry_run: bool) -> int:
|
|
189
|
+
"""Apply fixes to spec files.
|
|
331
190
|
|
|
332
191
|
Args:
|
|
333
|
-
|
|
334
|
-
|
|
192
|
+
fixable: List of fixable issues with fix metadata.
|
|
193
|
+
dry_run: If True, don't actually modify files.
|
|
335
194
|
|
|
336
195
|
Returns:
|
|
337
|
-
|
|
196
|
+
Number of issues fixed.
|
|
338
197
|
"""
|
|
339
|
-
if
|
|
340
|
-
return
|
|
341
|
-
|
|
342
|
-
# Find repo config
|
|
343
|
-
repo_config_path = repo_path / ".elspais.toml"
|
|
344
|
-
if repo_config_path.exists():
|
|
345
|
-
repo_config = load_config(repo_config_path)
|
|
346
|
-
else:
|
|
347
|
-
repo_config = config # Use same config
|
|
348
|
-
|
|
349
|
-
spec_dir = repo_path / repo_config.get("directories", {}).get("spec", "spec")
|
|
350
|
-
if not spec_dir.exists():
|
|
351
|
-
return {}
|
|
198
|
+
if dry_run:
|
|
199
|
+
return 0
|
|
352
200
|
|
|
353
|
-
|
|
354
|
-
spec_config = repo_config.get("spec", {})
|
|
355
|
-
no_reference_values = spec_config.get("no_reference_values")
|
|
356
|
-
parser = RequirementParser(pattern_config, no_reference_values=no_reference_values)
|
|
357
|
-
skip_files = spec_config.get("skip_files", [])
|
|
201
|
+
from elspais.mcp.file_mutations import add_status_to_file, update_hash_in_file
|
|
358
202
|
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
203
|
+
fixed = 0
|
|
204
|
+
for issue in fixable:
|
|
205
|
+
fix_type = issue.get("fix_type")
|
|
206
|
+
file_path = issue.get("file")
|
|
363
207
|
|
|
208
|
+
if not file_path:
|
|
209
|
+
continue
|
|
364
210
|
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
if v.requirement_id not in violation_by_req:
|
|
385
|
-
violation_by_req[v.requirement_id] = []
|
|
386
|
-
violation_by_req[v.requirement_id].append(v)
|
|
387
|
-
|
|
388
|
-
output = {}
|
|
389
|
-
for req_id, req in requirements.items():
|
|
390
|
-
req_violations = violation_by_req.get(req_id, [])
|
|
391
|
-
|
|
392
|
-
# Check for specific violation types
|
|
393
|
-
is_cycle = any("cycle" in v.rule_name.lower() for v in req_violations)
|
|
394
|
-
|
|
395
|
-
# Use the model's is_conflict flag directly, or check violations for older behavior
|
|
396
|
-
is_conflict = req.is_conflict or any(
|
|
397
|
-
"conflict" in v.rule_name.lower() or "duplicate" in v.rule_name.lower()
|
|
398
|
-
for v in req_violations
|
|
399
|
-
)
|
|
400
|
-
conflict_with = req.conflict_with if req.conflict_with else None
|
|
401
|
-
cycle_path = None
|
|
402
|
-
|
|
403
|
-
# Also check violations for additional context
|
|
404
|
-
for v in req_violations:
|
|
405
|
-
if "duplicate" in v.rule_name.lower() and not conflict_with:
|
|
406
|
-
# Try to extract conflicting ID from message
|
|
407
|
-
conflict_with = v.message
|
|
408
|
-
if "cycle" in v.rule_name.lower():
|
|
409
|
-
cycle_path = v.message
|
|
410
|
-
|
|
411
|
-
# Build requirement data matching hht_diary format
|
|
412
|
-
# Use the original ID (strip __conflict suffix) for output key
|
|
413
|
-
output_key = req_id.replace("__conflict", "") if req.is_conflict else req_id
|
|
414
|
-
output[req_id] = {
|
|
415
|
-
"title": req.title,
|
|
416
|
-
"status": req.status,
|
|
417
|
-
"level": req.level,
|
|
418
|
-
"body": req.body.strip(),
|
|
419
|
-
"rationale": (req.rationale or "").strip(),
|
|
420
|
-
"file": req.file_path.name if req.file_path else "",
|
|
421
|
-
"filePath": str(req.file_path) if req.file_path else "",
|
|
422
|
-
"line": req.line_number or 0,
|
|
423
|
-
"implements": req.implements,
|
|
424
|
-
"hash": req.hash or "",
|
|
425
|
-
"subdir": req.subdir,
|
|
426
|
-
"isConflict": is_conflict,
|
|
427
|
-
"conflictWith": conflict_with,
|
|
428
|
-
"isCycle": is_cycle,
|
|
429
|
-
"cyclePath": cycle_path,
|
|
430
|
-
}
|
|
211
|
+
if fix_type == "hash":
|
|
212
|
+
# Fix hash (missing or mismatch)
|
|
213
|
+
success = update_hash_in_file(
|
|
214
|
+
file_path=Path(file_path),
|
|
215
|
+
req_id=issue["id"],
|
|
216
|
+
new_hash=issue["computed_hash"],
|
|
217
|
+
)
|
|
218
|
+
if success:
|
|
219
|
+
fixed += 1
|
|
220
|
+
|
|
221
|
+
elif fix_type == "status":
|
|
222
|
+
# Add missing status
|
|
223
|
+
success = add_status_to_file(
|
|
224
|
+
file_path=Path(file_path),
|
|
225
|
+
req_id=issue["id"],
|
|
226
|
+
status=issue.get("status", "Active"),
|
|
227
|
+
)
|
|
228
|
+
if success:
|
|
229
|
+
fixed += 1
|
|
431
230
|
|
|
432
|
-
|
|
433
|
-
if req.assertions:
|
|
434
|
-
output[req_id]["assertions"] = [
|
|
435
|
-
{"label": a.label, "text": a.text, "isPlaceholder": a.is_placeholder}
|
|
436
|
-
for a in req.assertions
|
|
437
|
-
]
|
|
438
|
-
|
|
439
|
-
# Include test data if available
|
|
440
|
-
if test_data and req_id in test_data.requirement_data:
|
|
441
|
-
td = test_data.requirement_data[req_id]
|
|
442
|
-
output[req_id]["test_count"] = td.test_count
|
|
443
|
-
output[req_id]["test_passed"] = td.test_passed
|
|
444
|
-
output[req_id]["test_result_files"] = td.test_result_files
|
|
445
|
-
else:
|
|
446
|
-
# Default values when no test data
|
|
447
|
-
output[req_id]["test_count"] = 0
|
|
448
|
-
output[req_id]["test_passed"] = 0
|
|
449
|
-
output[req_id]["test_result_files"] = []
|
|
450
|
-
|
|
451
|
-
return json.dumps(output, indent=2)
|
|
231
|
+
return fixed
|