elspais 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,388 @@
1
+ """
2
+ elspais.commands.validate - Validate requirements command.
3
+
4
+ Validates requirements format, links, and hashes.
5
+ """
6
+
7
+ import argparse
8
+ import json
9
+ import sys
10
+ from pathlib import Path
11
+ from typing import Any, Dict, List, Optional
12
+
13
+ from elspais.config.defaults import DEFAULT_CONFIG
14
+ from elspais.config.loader import find_config_file, get_spec_directories, load_config
15
+ from elspais.core.hasher import calculate_hash, verify_hash
16
+ from elspais.core.models import Requirement
17
+ from elspais.core.parser import RequirementParser
18
+ from elspais.core.patterns import PatternConfig
19
+ from elspais.core.rules import RuleEngine, RulesConfig, RuleViolation, Severity
20
+ from elspais.testing.config import TestingConfig
21
+
22
+
23
+ def run(args: argparse.Namespace) -> int:
24
+ """
25
+ Run the validate command.
26
+
27
+ Args:
28
+ args: Parsed command line arguments
29
+
30
+ Returns:
31
+ Exit code (0 for success, 1 for validation errors)
32
+ """
33
+ # Find and load configuration
34
+ config = load_configuration(args)
35
+ if config is None:
36
+ return 1
37
+
38
+ # Determine spec directories (can be string or list)
39
+ spec_dirs = get_spec_directories(args.spec_dir, config)
40
+ if not spec_dirs:
41
+ print("Error: No spec directories found", file=sys.stderr)
42
+ return 1
43
+
44
+ if not args.quiet:
45
+ if len(spec_dirs) == 1:
46
+ print(f"Validating requirements in: {spec_dirs[0]}")
47
+ else:
48
+ print(f"Validating requirements in: {', '.join(str(d) for d in spec_dirs)}")
49
+
50
+ # Parse requirements
51
+ pattern_config = PatternConfig.from_dict(config.get("patterns", {}))
52
+ spec_config = config.get("spec", {})
53
+ no_reference_values = spec_config.get("no_reference_values")
54
+ parser = RequirementParser(pattern_config, no_reference_values=no_reference_values)
55
+ skip_files = spec_config.get("skip_files", [])
56
+
57
+ try:
58
+ requirements = parser.parse_directories(spec_dirs, skip_files=skip_files)
59
+ except Exception as e:
60
+ print(f"Error parsing requirements: {e}", file=sys.stderr)
61
+ return 1
62
+
63
+ if not requirements:
64
+ print("No requirements found.", file=sys.stderr)
65
+ return 1
66
+
67
+ if not args.quiet:
68
+ print(f"Found {len(requirements)} requirements")
69
+
70
+ # Run validation
71
+ rules_config = RulesConfig.from_dict(config.get("rules", {}))
72
+ engine = RuleEngine(rules_config)
73
+
74
+ violations = engine.validate(requirements)
75
+
76
+ # Add hash validation
77
+ hash_violations = validate_hashes(requirements, config)
78
+ violations.extend(hash_violations)
79
+
80
+ # Add broken link validation
81
+ link_violations = validate_links(requirements, args, config)
82
+ violations.extend(link_violations)
83
+
84
+ # Filter skipped rules
85
+ if args.skip_rule:
86
+ violations = [
87
+ v for v in violations
88
+ if not any(skip in v.rule_name for skip in args.skip_rule)
89
+ ]
90
+
91
+ # JSON output mode - output and exit
92
+ if getattr(args, 'json', False):
93
+ # Test mapping (if enabled)
94
+ test_data = None
95
+ testing_config = TestingConfig.from_dict(config.get("testing", {}))
96
+ if should_scan_tests(args, testing_config):
97
+ from elspais.testing.mapper import TestMapper
98
+
99
+ base_path = find_project_root(spec_dirs)
100
+ ignore_dirs = config.get("directories", {}).get("ignore", [])
101
+ mapper = TestMapper(testing_config)
102
+ test_data = mapper.map_tests(
103
+ requirement_ids=set(requirements.keys()),
104
+ base_path=base_path,
105
+ ignore=ignore_dirs,
106
+ )
107
+
108
+ print(format_requirements_json(requirements, violations, test_data))
109
+ errors = [v for v in violations if v.severity == Severity.ERROR]
110
+ return 1 if errors else 0
111
+
112
+ # Report results
113
+ errors = [v for v in violations if v.severity == Severity.ERROR]
114
+ warnings = [v for v in violations if v.severity == Severity.WARNING]
115
+ infos = [v for v in violations if v.severity == Severity.INFO]
116
+
117
+ if violations and not args.quiet:
118
+ print()
119
+ for violation in sorted(violations, key=lambda v: (v.severity.value, v.requirement_id)):
120
+ print(violation)
121
+ print()
122
+
123
+ # Summary
124
+ if not args.quiet:
125
+ print("─" * 60)
126
+ valid_count = len(requirements) - len({v.requirement_id for v in errors})
127
+ print(f"✓ {valid_count}/{len(requirements)} requirements valid")
128
+
129
+ if errors:
130
+ print(f"❌ {len(errors)} errors")
131
+ if warnings:
132
+ print(f"⚠️ {len(warnings)} warnings")
133
+ if infos and getattr(args, "verbose", False):
134
+ print(f"ℹ️ {len(infos)} info")
135
+
136
+ # Return error if there are errors
137
+ if errors:
138
+ return 1
139
+
140
+ if not args.quiet and not violations:
141
+ print("✓ All requirements valid")
142
+
143
+ return 0
144
+
145
+
146
+ def load_configuration(args: argparse.Namespace) -> Optional[Dict]:
147
+ """Load configuration from file or use defaults."""
148
+ if args.config:
149
+ config_path = args.config
150
+ else:
151
+ config_path = find_config_file(Path.cwd())
152
+
153
+ if config_path and config_path.exists():
154
+ try:
155
+ return load_config(config_path)
156
+ except Exception as e:
157
+ print(f"Error loading config: {e}", file=sys.stderr)
158
+ return None
159
+ else:
160
+ # Use defaults
161
+ return DEFAULT_CONFIG
162
+
163
+
164
+ def should_scan_tests(args: argparse.Namespace, config: TestingConfig) -> bool:
165
+ """
166
+ Determine if test scanning should run based on args and config.
167
+
168
+ Args:
169
+ args: Command line arguments
170
+ config: Testing configuration
171
+
172
+ Returns:
173
+ True if test scanning should run
174
+ """
175
+ if getattr(args, 'no_tests', False):
176
+ return False
177
+ if getattr(args, 'tests', False):
178
+ return True
179
+ return config.enabled
180
+
181
+
182
+ def find_project_root(spec_dirs: List[Path]) -> Path:
183
+ """
184
+ Find the project root from spec directories.
185
+
186
+ Looks for .elspais.toml or .git directory above spec dirs.
187
+
188
+ Args:
189
+ spec_dirs: List of spec directories
190
+
191
+ Returns:
192
+ Project root path
193
+ """
194
+ if not spec_dirs:
195
+ return Path.cwd()
196
+
197
+ # Start from first spec dir and look upward
198
+ current = spec_dirs[0].resolve()
199
+ while current != current.parent:
200
+ if (current / ".elspais.toml").exists():
201
+ return current
202
+ if (current / ".git").exists():
203
+ return current
204
+ current = current.parent
205
+
206
+ return Path.cwd()
207
+
208
+
209
+ def validate_hashes(requirements: Dict[str, Requirement], config: Dict) -> List[RuleViolation]:
210
+ """Validate requirement hashes."""
211
+ violations = []
212
+ hash_length = config.get("validation", {}).get("hash_length", 8)
213
+ algorithm = config.get("validation", {}).get("hash_algorithm", "sha256")
214
+
215
+ for req_id, req in requirements.items():
216
+ if req.hash:
217
+ # Verify hash matches content
218
+ expected_hash = calculate_hash(req.body, length=hash_length, algorithm=algorithm)
219
+ if not verify_hash(req.body, req.hash, length=hash_length, algorithm=algorithm):
220
+ violations.append(
221
+ RuleViolation(
222
+ rule_name="hash.mismatch",
223
+ requirement_id=req_id,
224
+ message=f"Hash mismatch: expected {expected_hash}, found {req.hash}",
225
+ severity=Severity.WARNING,
226
+ location=req.location(),
227
+ )
228
+ )
229
+
230
+ return violations
231
+
232
+
233
+ def validate_links(
234
+ requirements: Dict[str, Requirement],
235
+ args: argparse.Namespace,
236
+ config: Dict,
237
+ ) -> List[RuleViolation]:
238
+ """Validate requirement links (implements references)."""
239
+ violations = []
240
+
241
+ # Load core requirements if this is an associated repo
242
+ core_requirements = {}
243
+ if args.core_repo:
244
+ core_requirements = load_core_requirements(args.core_repo, config)
245
+
246
+ all_requirements = {**core_requirements, **requirements}
247
+ all_ids = set(all_requirements.keys())
248
+
249
+ # Build set of all valid short IDs too
250
+ short_ids = set()
251
+ for req_id in all_ids:
252
+ # Add various shortened forms
253
+ parts = req_id.split("-")
254
+ if len(parts) >= 2:
255
+ # REQ-p00001 -> p00001
256
+ short_ids.add("-".join(parts[1:]))
257
+ # REQ-CAL-p00001 -> CAL-p00001
258
+ if len(parts) >= 3:
259
+ short_ids.add("-".join(parts[2:]))
260
+ short_ids.add("-".join(parts[1:]))
261
+
262
+ for req_id, req in requirements.items():
263
+ for impl_id in req.implements:
264
+ # Check if reference is valid
265
+ if impl_id not in all_ids and impl_id not in short_ids:
266
+ violations.append(
267
+ RuleViolation(
268
+ rule_name="link.broken",
269
+ requirement_id=req_id,
270
+ message=f"Implements reference not found: {impl_id}",
271
+ severity=Severity.ERROR,
272
+ location=req.location(),
273
+ )
274
+ )
275
+
276
+ return violations
277
+
278
+
279
+ def load_core_requirements(core_path: Path, config: Dict) -> Dict[str, Requirement]:
280
+ """Load requirements from core repository."""
281
+ if not core_path.exists():
282
+ return {}
283
+
284
+ # Find core config
285
+ core_config_path = core_path / ".elspais.toml"
286
+ if core_config_path.exists():
287
+ core_config = load_config(core_config_path)
288
+ else:
289
+ core_config = config # Use same config
290
+
291
+ spec_dir = core_path / core_config.get("directories", {}).get("spec", "spec")
292
+ if not spec_dir.exists():
293
+ return {}
294
+
295
+ pattern_config = PatternConfig.from_dict(core_config.get("patterns", {}))
296
+ spec_config = core_config.get("spec", {})
297
+ no_reference_values = spec_config.get("no_reference_values")
298
+ parser = RequirementParser(pattern_config, no_reference_values=no_reference_values)
299
+ skip_files = spec_config.get("skip_files", [])
300
+
301
+ try:
302
+ return parser.parse_directory(spec_dir, skip_files=skip_files)
303
+ except Exception:
304
+ return {}
305
+
306
+
307
+ def format_requirements_json(
308
+ requirements: Dict[str, Requirement],
309
+ violations: List[RuleViolation],
310
+ test_data: Optional[Any] = None,
311
+ ) -> str:
312
+ """
313
+ Format requirements as JSON in hht_diary compatible format.
314
+
315
+ Args:
316
+ requirements: Dictionary of requirement ID to Requirement
317
+ violations: List of rule violations for error metadata
318
+ test_data: Optional TestMappingResult with test coverage data
319
+
320
+ Returns:
321
+ JSON string with requirement data
322
+ """
323
+ # Build violation lookup for cycle/conflict detection
324
+ violation_by_req: Dict[str, List[RuleViolation]] = {}
325
+ for v in violations:
326
+ if v.requirement_id not in violation_by_req:
327
+ violation_by_req[v.requirement_id] = []
328
+ violation_by_req[v.requirement_id].append(v)
329
+
330
+ output = {}
331
+ for req_id, req in requirements.items():
332
+ req_violations = violation_by_req.get(req_id, [])
333
+
334
+ # Check for specific violation types
335
+ is_cycle = any("cycle" in v.rule_name.lower() for v in req_violations)
336
+ is_conflict = any(
337
+ "conflict" in v.rule_name.lower() or "duplicate" in v.rule_name.lower()
338
+ for v in req_violations
339
+ )
340
+ conflict_with = None
341
+ cycle_path = None
342
+
343
+ for v in req_violations:
344
+ if "duplicate" in v.rule_name.lower():
345
+ # Try to extract conflicting ID from message
346
+ conflict_with = v.message
347
+ if "cycle" in v.rule_name.lower():
348
+ cycle_path = v.message
349
+
350
+ # Build requirement data matching hht_diary format
351
+ output[req_id] = {
352
+ "title": req.title,
353
+ "status": req.status,
354
+ "level": req.level,
355
+ "body": req.body.strip(),
356
+ "rationale": (req.rationale or "").strip(),
357
+ "file": req.file_path.name if req.file_path else "",
358
+ "filePath": str(req.file_path) if req.file_path else "",
359
+ "line": req.line_number or 0,
360
+ "implements": req.implements,
361
+ "hash": req.hash or "",
362
+ "subdir": req.subdir,
363
+ "isConflict": is_conflict,
364
+ "conflictWith": conflict_with,
365
+ "isCycle": is_cycle,
366
+ "cyclePath": cycle_path,
367
+ }
368
+
369
+ # Include assertions if present
370
+ if req.assertions:
371
+ output[req_id]["assertions"] = [
372
+ {"label": a.label, "text": a.text, "isPlaceholder": a.is_placeholder}
373
+ for a in req.assertions
374
+ ]
375
+
376
+ # Include test data if available
377
+ if test_data and req_id in test_data.requirement_data:
378
+ td = test_data.requirement_data[req_id]
379
+ output[req_id]["test_count"] = td.test_count
380
+ output[req_id]["test_passed"] = td.test_passed
381
+ output[req_id]["test_result_files"] = td.test_result_files
382
+ else:
383
+ # Default values when no test data
384
+ output[req_id]["test_count"] = 0
385
+ output[req_id]["test_passed"] = 0
386
+ output[req_id]["test_result_files"] = []
387
+
388
+ return json.dumps(output, indent=2)
@@ -0,0 +1,13 @@
1
+ """
2
+ elspais.config - Configuration loading and defaults
3
+ """
4
+
5
+ from elspais.config.defaults import DEFAULT_CONFIG
6
+ from elspais.config.loader import find_config_file, load_config, merge_configs
7
+
8
+ __all__ = [
9
+ "load_config",
10
+ "find_config_file",
11
+ "merge_configs",
12
+ "DEFAULT_CONFIG",
13
+ ]
@@ -0,0 +1,173 @@
1
+ """
2
+ elspais.config.defaults - Default configuration values.
3
+
4
+ Provides built-in defaults matching the HHT-diary repository structure.
5
+ """
6
+
7
+ DEFAULT_CONFIG = {
8
+ "project": {
9
+ "name": "",
10
+ "type": "core",
11
+ },
12
+ "directories": {
13
+ "spec": "spec",
14
+ "docs": "docs",
15
+ "database": "database",
16
+ "code": ["apps", "packages", "server", "tools"],
17
+ "ignore": [
18
+ "node_modules",
19
+ ".git",
20
+ "build",
21
+ "dist",
22
+ ".dart_tool",
23
+ "__pycache__",
24
+ ".venv",
25
+ "venv",
26
+ ],
27
+ },
28
+ "patterns": {
29
+ "id_template": "{prefix}-{associated}{type}{id}",
30
+ "prefix": "REQ",
31
+ "types": {
32
+ "prd": {"id": "p", "name": "Product Requirement", "level": 1},
33
+ "ops": {"id": "o", "name": "Operations Requirement", "level": 2},
34
+ "dev": {"id": "d", "name": "Development Requirement", "level": 3},
35
+ },
36
+ "id_format": {
37
+ "style": "numeric",
38
+ "digits": 5,
39
+ "leading_zeros": True,
40
+ },
41
+ "associated": {
42
+ "enabled": True,
43
+ "position": "after_prefix",
44
+ "format": "uppercase",
45
+ "length": 3,
46
+ "separator": "-",
47
+ },
48
+ "assertions": {
49
+ "label_style": "uppercase", # uppercase | numeric | alphanumeric | numeric_1based
50
+ "max_count": 26,
51
+ "zero_pad": False,
52
+ },
53
+ },
54
+ "spec": {
55
+ "index_file": "INDEX.md",
56
+ "readme_file": "README.md",
57
+ "format_guide": "requirements-format.md",
58
+ "skip_files": ["README.md", "requirements-format.md", "INDEX.md"],
59
+ "file_patterns": {
60
+ "prd-*.md": "prd",
61
+ "ops-*.md": "ops",
62
+ "dev-*.md": "dev",
63
+ },
64
+ # Values in Implements field that mean "no references"
65
+ "no_reference_values": ["-", "null", "none", "x", "X", "N/A", "n/a"],
66
+ },
67
+ "core": {
68
+ "path": None,
69
+ "remote": None,
70
+ },
71
+ "associated": {
72
+ "prefix": None,
73
+ "id_range": [1, 99999],
74
+ },
75
+ "rules": {
76
+ "hierarchy": {
77
+ "allowed_implements": [
78
+ "dev -> ops, prd",
79
+ "ops -> prd",
80
+ "prd -> prd",
81
+ ],
82
+ "allow_circular": False,
83
+ "allow_orphans": False,
84
+ "max_depth": 5,
85
+ "cross_repo_implements": True,
86
+ },
87
+ "format": {
88
+ "require_hash": True,
89
+ "require_rationale": False,
90
+ "require_status": True,
91
+ "allowed_statuses": ["Active", "Draft", "Deprecated", "Superseded"],
92
+ # Assertion format rules
93
+ "require_assertions": True,
94
+ "acceptance_criteria": "warn", # allow | warn | error
95
+ "require_shall": True,
96
+ "labels_sequential": True,
97
+ "labels_unique": True,
98
+ "placeholder_values": [
99
+ "obsolete", "removed", "deprecated", "N/A", "n/a", "-", "reserved"
100
+ ],
101
+ },
102
+ "traceability": {
103
+ "require_code_link": False,
104
+ "scan_for_orphans": True,
105
+ },
106
+ "naming": {
107
+ "title_min_length": 10,
108
+ "title_max_length": 100,
109
+ "title_pattern": "^[A-Z].*",
110
+ },
111
+ "content_rules": [], # List of content rule markdown file paths
112
+ },
113
+ "validation": {
114
+ "strict_hierarchy": True,
115
+ "hash_algorithm": "sha256",
116
+ "hash_length": 8,
117
+ "normalize_whitespace": False, # If True, normalize whitespace before hashing
118
+ },
119
+ "traceability": {
120
+ "output_formats": ["markdown", "html"],
121
+ "output_dir": ".",
122
+ "scan_patterns": [
123
+ "database/**/*.sql",
124
+ "apps/**/*.dart",
125
+ "packages/**/*.dart",
126
+ "server/**/*.dart",
127
+ "tools/**/*.py",
128
+ ".github/workflows/**/*.yml",
129
+ ],
130
+ "impl_patterns": [
131
+ r"IMPLEMENTS.*REQ-",
132
+ r"Implements:\s*REQ-",
133
+ r"Fixes:\s*REQ-",
134
+ ],
135
+ },
136
+ "index": {
137
+ "auto_regenerate": False,
138
+ },
139
+ "testing": {
140
+ "enabled": False,
141
+ "test_dirs": [
142
+ "apps/**/test",
143
+ "apps/**/tests",
144
+ "packages/**/test",
145
+ "packages/**/tests",
146
+ "tools/**/tests",
147
+ "tests",
148
+ ],
149
+ "patterns": [
150
+ "*_test.dart",
151
+ "test_*.dart",
152
+ "test_*.py",
153
+ "*_test.py",
154
+ "*_test.sql",
155
+ ],
156
+ "result_files": [
157
+ "build-reports/**/TEST-*.xml",
158
+ "build-reports/pytest-results.json",
159
+ ],
160
+ "reference_patterns": [
161
+ # Test function names containing requirement IDs
162
+ r"test_.*(?:REQ[-_])?([pod]\d{5})(?:_[A-Z])?",
163
+ # Comment/docstring patterns
164
+ r"(?:IMPLEMENTS|Implements|implements)[:\s]+(?:REQ[-_])?([pod]\d{5})(?:-[A-Z])?",
165
+ # Direct requirement ID mentions
166
+ r"\bREQ[-_]([pod]\d{5})(?:-[A-Z])?\b",
167
+ ],
168
+ },
169
+ "hooks": {
170
+ "pre_commit": True,
171
+ "commit_msg": True,
172
+ },
173
+ }