elspais 0.11.1__py3-none-any.whl → 0.43.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- elspais/__init__.py +2 -11
- elspais/{sponsors/__init__.py → associates.py} +102 -58
- elspais/cli.py +395 -79
- elspais/commands/__init__.py +9 -3
- elspais/commands/analyze.py +121 -173
- elspais/commands/changed.py +15 -30
- elspais/commands/config_cmd.py +13 -16
- elspais/commands/edit.py +60 -44
- elspais/commands/example_cmd.py +319 -0
- elspais/commands/hash_cmd.py +167 -183
- elspais/commands/health.py +1177 -0
- elspais/commands/index.py +98 -114
- elspais/commands/init.py +103 -26
- elspais/commands/reformat_cmd.py +41 -444
- elspais/commands/rules_cmd.py +7 -3
- elspais/commands/trace.py +444 -321
- elspais/commands/validate.py +195 -415
- elspais/config/__init__.py +799 -5
- elspais/{core/content_rules.py → content_rules.py} +20 -3
- elspais/docs/cli/assertions.md +67 -0
- elspais/docs/cli/commands.md +304 -0
- elspais/docs/cli/config.md +262 -0
- elspais/docs/cli/format.md +66 -0
- elspais/docs/cli/git.md +45 -0
- elspais/docs/cli/health.md +190 -0
- elspais/docs/cli/hierarchy.md +60 -0
- elspais/docs/cli/ignore.md +72 -0
- elspais/docs/cli/mcp.md +245 -0
- elspais/docs/cli/quickstart.md +58 -0
- elspais/docs/cli/traceability.md +89 -0
- elspais/docs/cli/validation.md +96 -0
- elspais/graph/GraphNode.py +383 -0
- elspais/graph/__init__.py +40 -0
- elspais/graph/annotators.py +927 -0
- elspais/graph/builder.py +1886 -0
- elspais/graph/deserializer.py +248 -0
- elspais/graph/factory.py +284 -0
- elspais/graph/metrics.py +127 -0
- elspais/graph/mutations.py +161 -0
- elspais/graph/parsers/__init__.py +156 -0
- elspais/graph/parsers/code.py +213 -0
- elspais/graph/parsers/comments.py +112 -0
- elspais/graph/parsers/config_helpers.py +29 -0
- elspais/graph/parsers/heredocs.py +225 -0
- elspais/graph/parsers/journey.py +131 -0
- elspais/graph/parsers/remainder.py +79 -0
- elspais/graph/parsers/requirement.py +347 -0
- elspais/graph/parsers/results/__init__.py +6 -0
- elspais/graph/parsers/results/junit_xml.py +229 -0
- elspais/graph/parsers/results/pytest_json.py +313 -0
- elspais/graph/parsers/test.py +305 -0
- elspais/graph/relations.py +78 -0
- elspais/graph/serialize.py +216 -0
- elspais/html/__init__.py +8 -0
- elspais/html/generator.py +731 -0
- elspais/html/templates/trace_view.html.j2 +2151 -0
- elspais/mcp/__init__.py +47 -29
- elspais/mcp/__main__.py +5 -1
- elspais/mcp/file_mutations.py +138 -0
- elspais/mcp/server.py +2016 -247
- elspais/testing/__init__.py +4 -4
- elspais/testing/config.py +3 -0
- elspais/testing/mapper.py +1 -1
- elspais/testing/result_parser.py +25 -21
- elspais/testing/scanner.py +301 -12
- elspais/utilities/__init__.py +1 -0
- elspais/utilities/docs_loader.py +115 -0
- elspais/utilities/git.py +607 -0
- elspais/{core → utilities}/hasher.py +8 -22
- elspais/utilities/md_renderer.py +189 -0
- elspais/{core → utilities}/patterns.py +58 -57
- elspais/utilities/reference_config.py +626 -0
- elspais/validation/__init__.py +19 -0
- elspais/validation/format.py +264 -0
- {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/METADATA +7 -4
- elspais-0.43.5.dist-info/RECORD +80 -0
- elspais/config/defaults.py +0 -173
- elspais/config/loader.py +0 -494
- elspais/core/__init__.py +0 -21
- elspais/core/git.py +0 -352
- elspais/core/models.py +0 -320
- elspais/core/parser.py +0 -640
- elspais/core/rules.py +0 -514
- elspais/mcp/context.py +0 -171
- elspais/mcp/serializers.py +0 -112
- elspais/reformat/__init__.py +0 -50
- elspais/reformat/detector.py +0 -119
- elspais/reformat/hierarchy.py +0 -246
- elspais/reformat/line_breaks.py +0 -220
- elspais/reformat/prompts.py +0 -123
- elspais/reformat/transformer.py +0 -264
- elspais/trace_view/__init__.py +0 -54
- elspais/trace_view/coverage.py +0 -183
- elspais/trace_view/generators/__init__.py +0 -12
- elspais/trace_view/generators/base.py +0 -329
- elspais/trace_view/generators/csv.py +0 -122
- elspais/trace_view/generators/markdown.py +0 -175
- elspais/trace_view/html/__init__.py +0 -31
- elspais/trace_view/html/generator.py +0 -1006
- elspais/trace_view/html/templates/base.html +0 -283
- elspais/trace_view/html/templates/components/code_viewer_modal.html +0 -14
- elspais/trace_view/html/templates/components/file_picker_modal.html +0 -20
- elspais/trace_view/html/templates/components/legend_modal.html +0 -69
- elspais/trace_view/html/templates/components/review_panel.html +0 -118
- elspais/trace_view/html/templates/partials/review/help/help-panel.json +0 -244
- elspais/trace_view/html/templates/partials/review/help/onboarding.json +0 -77
- elspais/trace_view/html/templates/partials/review/help/tooltips.json +0 -237
- elspais/trace_view/html/templates/partials/review/review-comments.js +0 -928
- elspais/trace_view/html/templates/partials/review/review-data.js +0 -961
- elspais/trace_view/html/templates/partials/review/review-help.js +0 -679
- elspais/trace_view/html/templates/partials/review/review-init.js +0 -177
- elspais/trace_view/html/templates/partials/review/review-line-numbers.js +0 -429
- elspais/trace_view/html/templates/partials/review/review-packages.js +0 -1029
- elspais/trace_view/html/templates/partials/review/review-position.js +0 -540
- elspais/trace_view/html/templates/partials/review/review-resize.js +0 -115
- elspais/trace_view/html/templates/partials/review/review-status.js +0 -659
- elspais/trace_view/html/templates/partials/review/review-sync.js +0 -992
- elspais/trace_view/html/templates/partials/review-styles.css +0 -2238
- elspais/trace_view/html/templates/partials/scripts.js +0 -1741
- elspais/trace_view/html/templates/partials/styles.css +0 -1756
- elspais/trace_view/models.py +0 -353
- elspais/trace_view/review/__init__.py +0 -60
- elspais/trace_view/review/branches.py +0 -1149
- elspais/trace_view/review/models.py +0 -1205
- elspais/trace_view/review/position.py +0 -609
- elspais/trace_view/review/server.py +0 -1056
- elspais/trace_view/review/status.py +0 -470
- elspais/trace_view/review/storage.py +0 -1367
- elspais/trace_view/scanning.py +0 -213
- elspais/trace_view/specs/README.md +0 -84
- elspais/trace_view/specs/tv-d00001-template-architecture.md +0 -36
- elspais/trace_view/specs/tv-d00002-css-extraction.md +0 -37
- elspais/trace_view/specs/tv-d00003-js-extraction.md +0 -43
- elspais/trace_view/specs/tv-d00004-build-embedding.md +0 -40
- elspais/trace_view/specs/tv-d00005-test-format.md +0 -78
- elspais/trace_view/specs/tv-d00010-review-data-models.md +0 -33
- elspais/trace_view/specs/tv-d00011-review-storage.md +0 -33
- elspais/trace_view/specs/tv-d00012-position-resolution.md +0 -33
- elspais/trace_view/specs/tv-d00013-git-branches.md +0 -31
- elspais/trace_view/specs/tv-d00014-review-api-server.md +0 -31
- elspais/trace_view/specs/tv-d00015-status-modifier.md +0 -27
- elspais/trace_view/specs/tv-d00016-js-integration.md +0 -33
- elspais/trace_view/specs/tv-p00001-html-generator.md +0 -33
- elspais/trace_view/specs/tv-p00002-review-system.md +0 -29
- elspais-0.11.1.dist-info/RECORD +0 -101
- {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/WHEEL +0 -0
- {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/entry_points.txt +0 -0
- {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1177 @@
|
|
|
1
|
+
# Implements: REQ-int-d00003 (CLI Extension)
|
|
2
|
+
"""
|
|
3
|
+
elspais.commands.health - Diagnose configuration and repository health.
|
|
4
|
+
|
|
5
|
+
Provides comprehensive health checks for:
|
|
6
|
+
- Config: TOML syntax, required fields, valid paths
|
|
7
|
+
- Spec: File parsing, duplicate IDs, reference resolution
|
|
8
|
+
- Code: Code→REQ reference validation
|
|
9
|
+
- Tests: Test→REQ mapping validation
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import argparse
|
|
15
|
+
import json
|
|
16
|
+
from dataclasses import dataclass, field
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import TYPE_CHECKING, Any, Iterator
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from elspais.config import ConfigLoader
|
|
22
|
+
from elspais.graph.builder import TraceGraph
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class HealthCheck:
|
|
27
|
+
"""Result of a single health check."""
|
|
28
|
+
|
|
29
|
+
name: str
|
|
30
|
+
passed: bool
|
|
31
|
+
message: str
|
|
32
|
+
category: str # config, spec, code, tests
|
|
33
|
+
severity: str = "error" # error, warning, info
|
|
34
|
+
details: dict[str, Any] = field(default_factory=dict)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class HealthReport:
|
|
39
|
+
"""Aggregated health check results."""
|
|
40
|
+
|
|
41
|
+
checks: list[HealthCheck] = field(default_factory=list)
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def passed(self) -> int:
|
|
45
|
+
return sum(1 for c in self.checks if c.passed)
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def failed(self) -> int:
|
|
49
|
+
return sum(1 for c in self.checks if not c.passed and c.severity == "error")
|
|
50
|
+
|
|
51
|
+
@property
|
|
52
|
+
def warnings(self) -> int:
|
|
53
|
+
return sum(1 for c in self.checks if not c.passed and c.severity == "warning")
|
|
54
|
+
|
|
55
|
+
@property
|
|
56
|
+
def is_healthy(self) -> bool:
|
|
57
|
+
return self.failed == 0
|
|
58
|
+
|
|
59
|
+
def add(self, check: HealthCheck) -> None:
|
|
60
|
+
self.checks.append(check)
|
|
61
|
+
|
|
62
|
+
def iter_by_category(self, category: str) -> Iterator[HealthCheck]:
|
|
63
|
+
for check in self.checks:
|
|
64
|
+
if check.category == category:
|
|
65
|
+
yield check
|
|
66
|
+
|
|
67
|
+
def to_dict(self) -> dict[str, Any]:
|
|
68
|
+
return {
|
|
69
|
+
"healthy": self.is_healthy,
|
|
70
|
+
"summary": {
|
|
71
|
+
"passed": self.passed,
|
|
72
|
+
"failed": self.failed,
|
|
73
|
+
"warnings": self.warnings,
|
|
74
|
+
},
|
|
75
|
+
"checks": [
|
|
76
|
+
{
|
|
77
|
+
"name": c.name,
|
|
78
|
+
"passed": c.passed,
|
|
79
|
+
"message": c.message,
|
|
80
|
+
"category": c.category,
|
|
81
|
+
"severity": c.severity,
|
|
82
|
+
"details": c.details,
|
|
83
|
+
}
|
|
84
|
+
for c in self.checks
|
|
85
|
+
],
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
# =============================================================================
|
|
90
|
+
# Config Checks
|
|
91
|
+
# =============================================================================
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def check_config_exists(config_path: Path | None, start_path: Path) -> HealthCheck:
|
|
95
|
+
"""Check if config file exists and is accessible."""
|
|
96
|
+
from elspais.config import find_config_file
|
|
97
|
+
|
|
98
|
+
if config_path and config_path.exists():
|
|
99
|
+
return HealthCheck(
|
|
100
|
+
name="config.exists",
|
|
101
|
+
passed=True,
|
|
102
|
+
message=f"Config file found: {config_path}",
|
|
103
|
+
category="config",
|
|
104
|
+
details={"path": str(config_path)},
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# Try auto-discovery
|
|
108
|
+
found = find_config_file(start_path)
|
|
109
|
+
if found:
|
|
110
|
+
return HealthCheck(
|
|
111
|
+
name="config.exists",
|
|
112
|
+
passed=True,
|
|
113
|
+
message=f"Config file found: {found}",
|
|
114
|
+
category="config",
|
|
115
|
+
details={"path": str(found)},
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
return HealthCheck(
|
|
119
|
+
name="config.exists",
|
|
120
|
+
passed=True, # Using defaults is valid
|
|
121
|
+
message="No config file found, using defaults",
|
|
122
|
+
category="config",
|
|
123
|
+
severity="info",
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def check_config_syntax(config_path: Path | None, start_path: Path) -> HealthCheck:
|
|
128
|
+
"""Check if config file has valid TOML syntax."""
|
|
129
|
+
from elspais.config import find_config_file
|
|
130
|
+
|
|
131
|
+
actual_path = (
|
|
132
|
+
config_path if config_path and config_path.exists() else find_config_file(start_path)
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
if not actual_path:
|
|
136
|
+
return HealthCheck(
|
|
137
|
+
name="config.syntax",
|
|
138
|
+
passed=True,
|
|
139
|
+
message="No config file to validate (using defaults)",
|
|
140
|
+
category="config",
|
|
141
|
+
severity="info",
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
try:
|
|
145
|
+
content = actual_path.read_text(encoding="utf-8")
|
|
146
|
+
# Validate TOML syntax using the public parser
|
|
147
|
+
from elspais.config import parse_toml
|
|
148
|
+
|
|
149
|
+
parse_toml(content)
|
|
150
|
+
return HealthCheck(
|
|
151
|
+
name="config.syntax",
|
|
152
|
+
passed=True,
|
|
153
|
+
message="TOML syntax is valid",
|
|
154
|
+
category="config",
|
|
155
|
+
)
|
|
156
|
+
except Exception as e:
|
|
157
|
+
return HealthCheck(
|
|
158
|
+
name="config.syntax",
|
|
159
|
+
passed=False,
|
|
160
|
+
message=f"TOML syntax error: {e}",
|
|
161
|
+
category="config",
|
|
162
|
+
details={"error": str(e), "path": str(actual_path)},
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def check_config_required_fields(config: ConfigLoader) -> HealthCheck:
|
|
167
|
+
"""Check that required configuration sections exist."""
|
|
168
|
+
raw = config.get_raw()
|
|
169
|
+
missing = []
|
|
170
|
+
|
|
171
|
+
# Check for patterns section with types
|
|
172
|
+
patterns = raw.get("patterns", {})
|
|
173
|
+
if not patterns.get("types"):
|
|
174
|
+
missing.append("patterns.types")
|
|
175
|
+
|
|
176
|
+
# Check for spec directories
|
|
177
|
+
spec = raw.get("spec", {})
|
|
178
|
+
if not spec.get("directories"):
|
|
179
|
+
missing.append("spec.directories")
|
|
180
|
+
|
|
181
|
+
# Check for hierarchy rules
|
|
182
|
+
rules = raw.get("rules", {})
|
|
183
|
+
if not rules.get("hierarchy"):
|
|
184
|
+
missing.append("rules.hierarchy")
|
|
185
|
+
|
|
186
|
+
if missing:
|
|
187
|
+
return HealthCheck(
|
|
188
|
+
name="config.required_fields",
|
|
189
|
+
passed=False,
|
|
190
|
+
message=f"Missing required fields: {', '.join(missing)}",
|
|
191
|
+
category="config",
|
|
192
|
+
severity="warning",
|
|
193
|
+
details={"missing": missing},
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
return HealthCheck(
|
|
197
|
+
name="config.required_fields",
|
|
198
|
+
passed=True,
|
|
199
|
+
message="All required configuration fields present",
|
|
200
|
+
category="config",
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def check_config_pattern_tokens(config: ConfigLoader) -> HealthCheck:
|
|
205
|
+
"""Validate that pattern template uses valid tokens."""
|
|
206
|
+
template = config.get("patterns.id_template", "")
|
|
207
|
+
valid_tokens = {"{prefix}", "{type}", "{id}", "{associated}"}
|
|
208
|
+
|
|
209
|
+
# Find all tokens in template
|
|
210
|
+
import re
|
|
211
|
+
|
|
212
|
+
found_tokens = set(re.findall(r"\{[^}]+\}", template))
|
|
213
|
+
|
|
214
|
+
invalid = found_tokens - valid_tokens
|
|
215
|
+
if invalid:
|
|
216
|
+
return HealthCheck(
|
|
217
|
+
name="config.pattern_tokens",
|
|
218
|
+
passed=False,
|
|
219
|
+
message=f"Invalid pattern tokens: {', '.join(invalid)}",
|
|
220
|
+
category="config",
|
|
221
|
+
details={"invalid_tokens": list(invalid), "valid_tokens": list(valid_tokens)},
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
# Check that essential tokens are present
|
|
225
|
+
required = {"{prefix}", "{id}"}
|
|
226
|
+
missing = required - found_tokens
|
|
227
|
+
if missing:
|
|
228
|
+
return HealthCheck(
|
|
229
|
+
name="config.pattern_tokens",
|
|
230
|
+
passed=False,
|
|
231
|
+
message=f"Missing required tokens: {', '.join(missing)}",
|
|
232
|
+
category="config",
|
|
233
|
+
severity="warning",
|
|
234
|
+
details={"missing": list(missing)},
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
return HealthCheck(
|
|
238
|
+
name="config.pattern_tokens",
|
|
239
|
+
passed=True,
|
|
240
|
+
message=f"Pattern template valid: {template}",
|
|
241
|
+
category="config",
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def check_config_hierarchy_rules(config: ConfigLoader) -> HealthCheck:
|
|
246
|
+
"""Validate hierarchy rules are consistent."""
|
|
247
|
+
hierarchy = config.get("rules.hierarchy", {})
|
|
248
|
+
types = config.get("patterns.types", {})
|
|
249
|
+
|
|
250
|
+
# Handle non-dict hierarchy (e.g., hierarchy = false)
|
|
251
|
+
if not isinstance(hierarchy, dict):
|
|
252
|
+
return HealthCheck(
|
|
253
|
+
name="config.hierarchy_rules",
|
|
254
|
+
passed=False,
|
|
255
|
+
message=f"rules.hierarchy must be a dict, got {type(hierarchy).__name__}",
|
|
256
|
+
category="config",
|
|
257
|
+
severity="warning",
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
# Handle non-dict types
|
|
261
|
+
if not isinstance(types, dict):
|
|
262
|
+
return HealthCheck(
|
|
263
|
+
name="config.hierarchy_rules",
|
|
264
|
+
passed=False,
|
|
265
|
+
message=f"patterns.types must be a dict, got {type(types).__name__}",
|
|
266
|
+
category="config",
|
|
267
|
+
severity="warning",
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
issues = []
|
|
271
|
+
|
|
272
|
+
# Known non-level keys in rules.hierarchy (config options, not level definitions)
|
|
273
|
+
non_level_keys = {"allowed_implements", "allow_circular", "allow_orphans", "allowed"}
|
|
274
|
+
|
|
275
|
+
for level, allowed_parents in hierarchy.items():
|
|
276
|
+
# Skip known config options that aren't level definitions
|
|
277
|
+
if level in non_level_keys:
|
|
278
|
+
continue
|
|
279
|
+
|
|
280
|
+
# Check level exists in types
|
|
281
|
+
if level not in types:
|
|
282
|
+
issues.append(f"Rule for '{level}' but type not defined")
|
|
283
|
+
continue
|
|
284
|
+
|
|
285
|
+
# Handle non-list allowed_parents
|
|
286
|
+
if not isinstance(allowed_parents, list):
|
|
287
|
+
issues.append(
|
|
288
|
+
f"Hierarchy rule for '{level}' must be a list, got {type(allowed_parents).__name__}"
|
|
289
|
+
)
|
|
290
|
+
continue
|
|
291
|
+
|
|
292
|
+
# Check allowed parents exist
|
|
293
|
+
for parent in allowed_parents:
|
|
294
|
+
if parent not in types:
|
|
295
|
+
issues.append(f"'{level}' can implement '{parent}' but '{parent}' type not defined")
|
|
296
|
+
|
|
297
|
+
if issues:
|
|
298
|
+
return HealthCheck(
|
|
299
|
+
name="config.hierarchy_rules",
|
|
300
|
+
passed=False,
|
|
301
|
+
message=f"Hierarchy issues: {'; '.join(issues)}",
|
|
302
|
+
category="config",
|
|
303
|
+
severity="warning",
|
|
304
|
+
details={"issues": issues},
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
return HealthCheck(
|
|
308
|
+
name="config.hierarchy_rules",
|
|
309
|
+
passed=True,
|
|
310
|
+
message=f"Hierarchy rules valid ({len(hierarchy)} levels configured)",
|
|
311
|
+
category="config",
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def check_config_paths_exist(config: ConfigLoader, start_path: Path) -> HealthCheck:
|
|
316
|
+
"""Check that configured directories exist."""
|
|
317
|
+
spec_dirs = config.get("spec.directories", ["spec"])
|
|
318
|
+
|
|
319
|
+
# Handle non-list spec_dirs
|
|
320
|
+
if not isinstance(spec_dirs, list):
|
|
321
|
+
return HealthCheck(
|
|
322
|
+
name="config.paths_exist",
|
|
323
|
+
passed=False,
|
|
324
|
+
message=f"spec.directories must be a list, got {type(spec_dirs).__name__}",
|
|
325
|
+
category="config",
|
|
326
|
+
severity="warning",
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
missing = []
|
|
330
|
+
found = []
|
|
331
|
+
|
|
332
|
+
for spec_dir in spec_dirs:
|
|
333
|
+
full_path = start_path / spec_dir
|
|
334
|
+
if full_path.exists():
|
|
335
|
+
found.append(str(spec_dir))
|
|
336
|
+
else:
|
|
337
|
+
missing.append(str(spec_dir))
|
|
338
|
+
|
|
339
|
+
if missing:
|
|
340
|
+
return HealthCheck(
|
|
341
|
+
name="config.paths_exist",
|
|
342
|
+
passed=False,
|
|
343
|
+
message=f"Spec directories not found: {', '.join(missing)}",
|
|
344
|
+
category="config",
|
|
345
|
+
details={"missing": missing, "found": found},
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
return HealthCheck(
|
|
349
|
+
name="config.paths_exist",
|
|
350
|
+
passed=True,
|
|
351
|
+
message=f"All spec directories exist ({len(found)} found)",
|
|
352
|
+
category="config",
|
|
353
|
+
details={"directories": found},
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
def check_config_project_type(config: ConfigLoader) -> HealthCheck:
|
|
358
|
+
"""Validate project type configuration consistency.
|
|
359
|
+
|
|
360
|
+
Checks that project.type matches the presence of [core] and [associated] sections.
|
|
361
|
+
"""
|
|
362
|
+
from elspais.config import validate_project_config
|
|
363
|
+
|
|
364
|
+
raw = config.get_raw()
|
|
365
|
+
errors = validate_project_config(raw)
|
|
366
|
+
|
|
367
|
+
if errors:
|
|
368
|
+
return HealthCheck(
|
|
369
|
+
name="config.project_type",
|
|
370
|
+
passed=False,
|
|
371
|
+
message=errors[0], # First error as main message
|
|
372
|
+
category="config",
|
|
373
|
+
severity="warning",
|
|
374
|
+
details={"errors": errors},
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
project_type = raw.get("project", {}).get("type")
|
|
378
|
+
if project_type:
|
|
379
|
+
return HealthCheck(
|
|
380
|
+
name="config.project_type",
|
|
381
|
+
passed=True,
|
|
382
|
+
message=f"Project type '{project_type}' configuration is valid",
|
|
383
|
+
category="config",
|
|
384
|
+
details={"type": project_type},
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
return HealthCheck(
|
|
388
|
+
name="config.project_type",
|
|
389
|
+
passed=True,
|
|
390
|
+
message="Project type not set (using defaults)",
|
|
391
|
+
category="config",
|
|
392
|
+
severity="info",
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
def run_config_checks(
|
|
397
|
+
config_path: Path | None, config: ConfigLoader, start_path: Path
|
|
398
|
+
) -> list[HealthCheck]:
|
|
399
|
+
"""Run all configuration health checks."""
|
|
400
|
+
return [
|
|
401
|
+
check_config_exists(config_path, start_path),
|
|
402
|
+
check_config_syntax(config_path, start_path),
|
|
403
|
+
check_config_required_fields(config),
|
|
404
|
+
check_config_project_type(config),
|
|
405
|
+
check_config_pattern_tokens(config),
|
|
406
|
+
check_config_hierarchy_rules(config),
|
|
407
|
+
check_config_paths_exist(config, start_path),
|
|
408
|
+
]
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
# =============================================================================
|
|
412
|
+
# Spec Checks
|
|
413
|
+
# =============================================================================
|
|
414
|
+
|
|
415
|
+
|
|
416
|
+
def check_spec_files_parseable(graph: TraceGraph) -> HealthCheck:
|
|
417
|
+
"""Check that all spec files were parsed without errors."""
|
|
418
|
+
from elspais.graph import NodeKind
|
|
419
|
+
|
|
420
|
+
# Count requirements found
|
|
421
|
+
req_count = sum(1 for _ in graph.nodes_by_kind(NodeKind.REQUIREMENT))
|
|
422
|
+
assertion_count = sum(1 for _ in graph.nodes_by_kind(NodeKind.ASSERTION))
|
|
423
|
+
|
|
424
|
+
if req_count == 0:
|
|
425
|
+
return HealthCheck(
|
|
426
|
+
name="spec.parseable",
|
|
427
|
+
passed=False,
|
|
428
|
+
message="No requirements found in spec files",
|
|
429
|
+
category="spec",
|
|
430
|
+
severity="warning",
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
return HealthCheck(
|
|
434
|
+
name="spec.parseable",
|
|
435
|
+
passed=True,
|
|
436
|
+
message=f"Parsed {req_count} requirements with {assertion_count} assertions",
|
|
437
|
+
category="spec",
|
|
438
|
+
details={"requirements": req_count, "assertions": assertion_count},
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
def check_spec_no_duplicates(graph: TraceGraph) -> HealthCheck:
|
|
443
|
+
"""Check for duplicate requirement IDs."""
|
|
444
|
+
from elspais.graph import NodeKind
|
|
445
|
+
|
|
446
|
+
seen_ids: dict[str, list[str]] = {}
|
|
447
|
+
|
|
448
|
+
for node in graph.nodes_by_kind(NodeKind.REQUIREMENT):
|
|
449
|
+
node_id = node.id
|
|
450
|
+
source = node.get_field("source_file", "unknown")
|
|
451
|
+
|
|
452
|
+
if node_id in seen_ids:
|
|
453
|
+
seen_ids[node_id].append(source)
|
|
454
|
+
else:
|
|
455
|
+
seen_ids[node_id] = [source]
|
|
456
|
+
|
|
457
|
+
duplicates = {k: v for k, v in seen_ids.items() if len(v) > 1}
|
|
458
|
+
|
|
459
|
+
if duplicates:
|
|
460
|
+
return HealthCheck(
|
|
461
|
+
name="spec.no_duplicates",
|
|
462
|
+
passed=False,
|
|
463
|
+
message=f"Found {len(duplicates)} duplicate requirement IDs",
|
|
464
|
+
category="spec",
|
|
465
|
+
details={"duplicates": duplicates},
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
return HealthCheck(
|
|
469
|
+
name="spec.no_duplicates",
|
|
470
|
+
passed=True,
|
|
471
|
+
message="No duplicate requirement IDs",
|
|
472
|
+
category="spec",
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
def check_spec_implements_resolve(graph: TraceGraph) -> HealthCheck:
|
|
477
|
+
"""Check that all Implements references resolve to valid requirements."""
|
|
478
|
+
from elspais.graph import NodeKind
|
|
479
|
+
|
|
480
|
+
unresolved = []
|
|
481
|
+
|
|
482
|
+
for node in graph.nodes_by_kind(NodeKind.REQUIREMENT):
|
|
483
|
+
# Get implements field
|
|
484
|
+
implements = node.get_field("implements", [])
|
|
485
|
+
for ref in implements:
|
|
486
|
+
# Try to find the referenced requirement
|
|
487
|
+
target = graph.find_by_id(ref)
|
|
488
|
+
if target is None:
|
|
489
|
+
# Check if it's an assertion reference (e.g., REQ-xxx-A)
|
|
490
|
+
if "-" in ref:
|
|
491
|
+
parts = ref.rsplit("-", 1)
|
|
492
|
+
if len(parts) == 2:
|
|
493
|
+
parent_id, assertion_label = parts
|
|
494
|
+
parent = graph.find_by_id(parent_id)
|
|
495
|
+
if parent is not None:
|
|
496
|
+
continue # Assertion reference is valid
|
|
497
|
+
unresolved.append({"from": node.id, "to": ref})
|
|
498
|
+
|
|
499
|
+
if unresolved:
|
|
500
|
+
return HealthCheck(
|
|
501
|
+
name="spec.implements_resolve",
|
|
502
|
+
passed=False,
|
|
503
|
+
message=f"{len(unresolved)} unresolved Implements references",
|
|
504
|
+
category="spec",
|
|
505
|
+
severity="warning",
|
|
506
|
+
details={"unresolved": unresolved[:10]}, # Limit to first 10
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
return HealthCheck(
|
|
510
|
+
name="spec.implements_resolve",
|
|
511
|
+
passed=True,
|
|
512
|
+
message="All Implements references resolve",
|
|
513
|
+
category="spec",
|
|
514
|
+
)
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
def check_spec_refines_resolve(graph: TraceGraph) -> HealthCheck:
|
|
518
|
+
"""Check that all Refines references resolve to valid requirements."""
|
|
519
|
+
from elspais.graph import NodeKind
|
|
520
|
+
|
|
521
|
+
unresolved = []
|
|
522
|
+
|
|
523
|
+
for node in graph.nodes_by_kind(NodeKind.REQUIREMENT):
|
|
524
|
+
refines = node.get_field("refines", [])
|
|
525
|
+
for ref in refines:
|
|
526
|
+
target = graph.find_by_id(ref)
|
|
527
|
+
if target is None:
|
|
528
|
+
# Check assertion reference
|
|
529
|
+
if "-" in ref:
|
|
530
|
+
parts = ref.rsplit("-", 1)
|
|
531
|
+
if len(parts) == 2:
|
|
532
|
+
parent_id, _ = parts
|
|
533
|
+
parent = graph.find_by_id(parent_id)
|
|
534
|
+
if parent is not None:
|
|
535
|
+
continue
|
|
536
|
+
unresolved.append({"from": node.id, "to": ref})
|
|
537
|
+
|
|
538
|
+
if unresolved:
|
|
539
|
+
return HealthCheck(
|
|
540
|
+
name="spec.refines_resolve",
|
|
541
|
+
passed=False,
|
|
542
|
+
message=f"{len(unresolved)} unresolved Refines references",
|
|
543
|
+
category="spec",
|
|
544
|
+
severity="warning",
|
|
545
|
+
details={"unresolved": unresolved[:10]},
|
|
546
|
+
)
|
|
547
|
+
|
|
548
|
+
return HealthCheck(
|
|
549
|
+
name="spec.refines_resolve",
|
|
550
|
+
passed=True,
|
|
551
|
+
message="All Refines references resolve",
|
|
552
|
+
category="spec",
|
|
553
|
+
)
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
def _parse_hierarchy_rules(hierarchy: dict[str, Any]) -> dict[str, list[str]]:
|
|
557
|
+
"""Parse hierarchy rules from config.
|
|
558
|
+
|
|
559
|
+
Expected format: { "dev": ["ops", "prd"], "prd": ["prd"] }
|
|
560
|
+
|
|
561
|
+
Returns:
|
|
562
|
+
Dict mapping child level -> list of allowed parent levels (lowercase)
|
|
563
|
+
"""
|
|
564
|
+
result: dict[str, list[str]] = {}
|
|
565
|
+
|
|
566
|
+
# Filter out non-level keys
|
|
567
|
+
non_level_keys = {"allow_circular", "allow_orphans", "cross_repo_implements"}
|
|
568
|
+
for key, value in hierarchy.items():
|
|
569
|
+
if key in non_level_keys:
|
|
570
|
+
continue
|
|
571
|
+
if isinstance(value, list):
|
|
572
|
+
result[key.lower()] = [v.lower() for v in value]
|
|
573
|
+
|
|
574
|
+
return result
|
|
575
|
+
|
|
576
|
+
|
|
577
|
+
def check_spec_hierarchy_levels(graph: TraceGraph, config: ConfigLoader) -> HealthCheck:
|
|
578
|
+
"""Check that hierarchy levels follow configured rules."""
|
|
579
|
+
from elspais.graph import NodeKind
|
|
580
|
+
|
|
581
|
+
hierarchy = config.get("rules.hierarchy", {})
|
|
582
|
+
types = config.get("patterns.types", {})
|
|
583
|
+
strict_hierarchy = config.get("validation.strict_hierarchy", False)
|
|
584
|
+
|
|
585
|
+
# Parse hierarchy rules
|
|
586
|
+
allowed_parents_map = _parse_hierarchy_rules(hierarchy)
|
|
587
|
+
|
|
588
|
+
# Build level lookup: type_id -> level_name (lowercase)
|
|
589
|
+
# Note: level_lookup reserved for future strict hierarchy validation
|
|
590
|
+
_ = {v["id"]: k for k, v in types.items()}
|
|
591
|
+
|
|
592
|
+
violations = []
|
|
593
|
+
|
|
594
|
+
for node in graph.nodes_by_kind(NodeKind.REQUIREMENT):
|
|
595
|
+
node_level = node.level.lower() if node.level else None
|
|
596
|
+
if not node_level:
|
|
597
|
+
continue
|
|
598
|
+
|
|
599
|
+
allowed_parents = allowed_parents_map.get(node_level, [])
|
|
600
|
+
|
|
601
|
+
for parent in node.iter_parents():
|
|
602
|
+
if parent.kind != NodeKind.REQUIREMENT:
|
|
603
|
+
continue
|
|
604
|
+
parent_level = parent.level.lower() if parent.level else None
|
|
605
|
+
if parent_level and parent_level not in allowed_parents:
|
|
606
|
+
violations.append(
|
|
607
|
+
{
|
|
608
|
+
"child": node.id,
|
|
609
|
+
"child_level": node_level.upper(),
|
|
610
|
+
"parent": parent.id,
|
|
611
|
+
"parent_level": parent_level.upper(),
|
|
612
|
+
}
|
|
613
|
+
)
|
|
614
|
+
|
|
615
|
+
if violations:
|
|
616
|
+
# Severity controlled by validation.strict_hierarchy config
|
|
617
|
+
if strict_hierarchy:
|
|
618
|
+
return HealthCheck(
|
|
619
|
+
name="spec.hierarchy_levels",
|
|
620
|
+
passed=False,
|
|
621
|
+
message=f"{len(violations)} hierarchy level violations",
|
|
622
|
+
category="spec",
|
|
623
|
+
severity="warning",
|
|
624
|
+
details={"violations": violations[:10]},
|
|
625
|
+
)
|
|
626
|
+
else:
|
|
627
|
+
return HealthCheck(
|
|
628
|
+
name="spec.hierarchy_levels",
|
|
629
|
+
passed=True, # Informational when not strict
|
|
630
|
+
message=f"{len(violations)} hierarchy level deviations (strict_hierarchy=false)",
|
|
631
|
+
category="spec",
|
|
632
|
+
severity="info",
|
|
633
|
+
details={
|
|
634
|
+
"violations": violations[:10],
|
|
635
|
+
"hint": "Set validation.strict_hierarchy=true to enforce",
|
|
636
|
+
},
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
return HealthCheck(
|
|
640
|
+
name="spec.hierarchy_levels",
|
|
641
|
+
passed=True,
|
|
642
|
+
message="All requirements follow hierarchy rules",
|
|
643
|
+
category="spec",
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
|
|
647
|
+
def check_spec_orphans(graph: TraceGraph) -> HealthCheck:
|
|
648
|
+
"""Check for orphan requirements (non-PRD with no parents)."""
|
|
649
|
+
from elspais.graph import NodeKind
|
|
650
|
+
|
|
651
|
+
orphans = []
|
|
652
|
+
|
|
653
|
+
for node in graph.nodes_by_kind(NodeKind.REQUIREMENT):
|
|
654
|
+
if node.parent_count() == 0 and node.level and node.level.upper() not in ("PRD", "P"):
|
|
655
|
+
orphans.append({"id": node.id, "level": node.level})
|
|
656
|
+
|
|
657
|
+
if orphans:
|
|
658
|
+
return HealthCheck(
|
|
659
|
+
name="spec.orphans",
|
|
660
|
+
passed=False,
|
|
661
|
+
message=f"{len(orphans)} orphan requirements (non-PRD without parents)",
|
|
662
|
+
category="spec",
|
|
663
|
+
severity="warning",
|
|
664
|
+
details={"orphans": orphans[:10]},
|
|
665
|
+
)
|
|
666
|
+
|
|
667
|
+
return HealthCheck(
|
|
668
|
+
name="spec.orphans",
|
|
669
|
+
passed=True,
|
|
670
|
+
message="No orphan requirements",
|
|
671
|
+
category="spec",
|
|
672
|
+
)
|
|
673
|
+
|
|
674
|
+
|
|
675
|
+
def check_spec_format_rules(graph: TraceGraph, config: ConfigLoader) -> HealthCheck:
|
|
676
|
+
"""Check that requirements comply with configured format rules."""
|
|
677
|
+
from elspais.graph import NodeKind
|
|
678
|
+
from elspais.validation.format import get_format_rules_config, validate_requirement_format
|
|
679
|
+
|
|
680
|
+
rules = get_format_rules_config(config.get_raw())
|
|
681
|
+
|
|
682
|
+
# Check if any rules are enabled
|
|
683
|
+
rules_enabled = any(
|
|
684
|
+
[
|
|
685
|
+
rules.require_hash,
|
|
686
|
+
rules.require_assertions,
|
|
687
|
+
rules.require_rationale,
|
|
688
|
+
rules.require_shall,
|
|
689
|
+
rules.require_status,
|
|
690
|
+
bool(rules.allowed_statuses),
|
|
691
|
+
rules.labels_sequential,
|
|
692
|
+
rules.labels_unique,
|
|
693
|
+
]
|
|
694
|
+
)
|
|
695
|
+
|
|
696
|
+
if not rules_enabled:
|
|
697
|
+
return HealthCheck(
|
|
698
|
+
name="spec.format_rules",
|
|
699
|
+
passed=True,
|
|
700
|
+
message="No format rules enabled (configure in [rules.format])",
|
|
701
|
+
category="spec",
|
|
702
|
+
severity="info",
|
|
703
|
+
)
|
|
704
|
+
|
|
705
|
+
all_violations = []
|
|
706
|
+
req_count = 0
|
|
707
|
+
|
|
708
|
+
for node in graph.nodes_by_kind(NodeKind.REQUIREMENT):
|
|
709
|
+
req_count += 1
|
|
710
|
+
violations = validate_requirement_format(node, rules)
|
|
711
|
+
all_violations.extend(violations)
|
|
712
|
+
|
|
713
|
+
errors = [v for v in all_violations if v.severity == "error"]
|
|
714
|
+
warnings = [v for v in all_violations if v.severity == "warning"]
|
|
715
|
+
|
|
716
|
+
if errors:
|
|
717
|
+
return HealthCheck(
|
|
718
|
+
name="spec.format_rules",
|
|
719
|
+
passed=False,
|
|
720
|
+
message=f"{len(errors)} format error(s) in {req_count} requirements",
|
|
721
|
+
category="spec",
|
|
722
|
+
details={
|
|
723
|
+
"errors": [
|
|
724
|
+
{"rule": v.rule, "message": v.message, "node": v.node_id} for v in errors
|
|
725
|
+
],
|
|
726
|
+
"warnings": [
|
|
727
|
+
{"rule": v.rule, "message": v.message, "node": v.node_id} for v in warnings
|
|
728
|
+
],
|
|
729
|
+
},
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
if warnings:
|
|
733
|
+
return HealthCheck(
|
|
734
|
+
name="spec.format_rules",
|
|
735
|
+
passed=True,
|
|
736
|
+
message=f"{req_count} requirements pass format rules ({len(warnings)} warning(s))",
|
|
737
|
+
category="spec",
|
|
738
|
+
severity="warning",
|
|
739
|
+
details={
|
|
740
|
+
"warnings": [
|
|
741
|
+
{"rule": v.rule, "message": v.message, "node": v.node_id} for v in warnings
|
|
742
|
+
],
|
|
743
|
+
},
|
|
744
|
+
)
|
|
745
|
+
|
|
746
|
+
return HealthCheck(
|
|
747
|
+
name="spec.format_rules",
|
|
748
|
+
passed=True,
|
|
749
|
+
message=f"{req_count} requirements pass all format rules",
|
|
750
|
+
category="spec",
|
|
751
|
+
)
|
|
752
|
+
|
|
753
|
+
|
|
754
|
+
def run_spec_checks(graph: TraceGraph, config: ConfigLoader) -> list[HealthCheck]:
|
|
755
|
+
"""Run all spec file health checks."""
|
|
756
|
+
return [
|
|
757
|
+
check_spec_files_parseable(graph),
|
|
758
|
+
check_spec_no_duplicates(graph),
|
|
759
|
+
check_spec_implements_resolve(graph),
|
|
760
|
+
check_spec_refines_resolve(graph),
|
|
761
|
+
check_spec_hierarchy_levels(graph, config),
|
|
762
|
+
check_spec_orphans(graph),
|
|
763
|
+
check_spec_format_rules(graph, config),
|
|
764
|
+
]
|
|
765
|
+
|
|
766
|
+
|
|
767
|
+
# =============================================================================
|
|
768
|
+
# Code Checks
|
|
769
|
+
# =============================================================================
|
|
770
|
+
|
|
771
|
+
|
|
772
|
+
def check_code_references_resolve(graph: TraceGraph) -> HealthCheck:
|
|
773
|
+
"""Check that code # Implements: references resolve to valid requirements."""
|
|
774
|
+
from elspais.graph import NodeKind
|
|
775
|
+
|
|
776
|
+
code_nodes = list(graph.nodes_by_kind(NodeKind.CODE))
|
|
777
|
+
|
|
778
|
+
if not code_nodes:
|
|
779
|
+
return HealthCheck(
|
|
780
|
+
name="code.references_resolve",
|
|
781
|
+
passed=True,
|
|
782
|
+
message="No code references found (code scanning may be disabled)",
|
|
783
|
+
category="code",
|
|
784
|
+
severity="info",
|
|
785
|
+
)
|
|
786
|
+
|
|
787
|
+
unresolved = []
|
|
788
|
+
resolved_count = 0
|
|
789
|
+
|
|
790
|
+
for node in code_nodes:
|
|
791
|
+
# CODE nodes reference requirements via parents
|
|
792
|
+
has_valid_parent = False
|
|
793
|
+
for parent in node.iter_parents():
|
|
794
|
+
if parent.kind in (NodeKind.REQUIREMENT, NodeKind.ASSERTION):
|
|
795
|
+
has_valid_parent = True
|
|
796
|
+
resolved_count += 1
|
|
797
|
+
break
|
|
798
|
+
|
|
799
|
+
if not has_valid_parent:
|
|
800
|
+
implements = node.get_field("implements", [])
|
|
801
|
+
unresolved.append(
|
|
802
|
+
{
|
|
803
|
+
"source": node.get_field("source_file", "unknown"),
|
|
804
|
+
"line": node.get_field("line", 0),
|
|
805
|
+
"references": implements,
|
|
806
|
+
}
|
|
807
|
+
)
|
|
808
|
+
|
|
809
|
+
if unresolved:
|
|
810
|
+
return HealthCheck(
|
|
811
|
+
name="code.references_resolve",
|
|
812
|
+
passed=False,
|
|
813
|
+
message=f"{len(unresolved)} code references don't resolve to requirements",
|
|
814
|
+
category="code",
|
|
815
|
+
severity="warning",
|
|
816
|
+
details={"unresolved": unresolved[:10], "resolved_count": resolved_count},
|
|
817
|
+
)
|
|
818
|
+
|
|
819
|
+
return HealthCheck(
|
|
820
|
+
name="code.references_resolve",
|
|
821
|
+
passed=True,
|
|
822
|
+
message=f"All {resolved_count} code references resolve to requirements",
|
|
823
|
+
category="code",
|
|
824
|
+
details={"resolved_count": resolved_count},
|
|
825
|
+
)
|
|
826
|
+
|
|
827
|
+
|
|
828
|
+
def check_code_coverage(graph: TraceGraph) -> HealthCheck:
|
|
829
|
+
"""Check code coverage statistics."""
|
|
830
|
+
from elspais.graph import NodeKind
|
|
831
|
+
from elspais.graph.annotators import count_with_code_refs
|
|
832
|
+
|
|
833
|
+
code_count = sum(1 for _ in graph.nodes_by_kind(NodeKind.CODE))
|
|
834
|
+
coverage = count_with_code_refs(graph)
|
|
835
|
+
|
|
836
|
+
return HealthCheck(
|
|
837
|
+
name="code.coverage",
|
|
838
|
+
passed=True, # Informational only
|
|
839
|
+
message=(
|
|
840
|
+
f"{coverage['with_code_refs']}/{coverage['total_requirements']} requirements "
|
|
841
|
+
f"have code references ({coverage['coverage_percent']}%)"
|
|
842
|
+
),
|
|
843
|
+
category="code",
|
|
844
|
+
severity="info",
|
|
845
|
+
details={
|
|
846
|
+
"code_nodes": code_count,
|
|
847
|
+
"requirements_with_code": coverage["with_code_refs"],
|
|
848
|
+
"total_requirements": coverage["total_requirements"],
|
|
849
|
+
"coverage_percent": coverage["coverage_percent"],
|
|
850
|
+
},
|
|
851
|
+
)
|
|
852
|
+
|
|
853
|
+
|
|
854
|
+
def run_code_checks(graph: TraceGraph) -> list[HealthCheck]:
|
|
855
|
+
"""Run all code reference health checks."""
|
|
856
|
+
return [
|
|
857
|
+
check_code_references_resolve(graph),
|
|
858
|
+
check_code_coverage(graph),
|
|
859
|
+
]
|
|
860
|
+
|
|
861
|
+
|
|
862
|
+
# =============================================================================
|
|
863
|
+
# Test Checks
|
|
864
|
+
# =============================================================================
|
|
865
|
+
|
|
866
|
+
|
|
867
|
+
def check_test_references_resolve(graph: TraceGraph) -> HealthCheck:
|
|
868
|
+
"""Check that test file REQ references resolve to valid requirements."""
|
|
869
|
+
from elspais.graph import NodeKind
|
|
870
|
+
|
|
871
|
+
test_nodes = list(graph.nodes_by_kind(NodeKind.TEST))
|
|
872
|
+
|
|
873
|
+
if not test_nodes:
|
|
874
|
+
return HealthCheck(
|
|
875
|
+
name="tests.references_resolve",
|
|
876
|
+
passed=True,
|
|
877
|
+
message="No test references found (test scanning may be disabled)",
|
|
878
|
+
category="tests",
|
|
879
|
+
severity="info",
|
|
880
|
+
)
|
|
881
|
+
|
|
882
|
+
unresolved = []
|
|
883
|
+
resolved_count = 0
|
|
884
|
+
|
|
885
|
+
for node in test_nodes:
|
|
886
|
+
has_valid_parent = False
|
|
887
|
+
for parent in node.iter_parents():
|
|
888
|
+
if parent.kind in (NodeKind.REQUIREMENT, NodeKind.ASSERTION):
|
|
889
|
+
has_valid_parent = True
|
|
890
|
+
resolved_count += 1
|
|
891
|
+
break
|
|
892
|
+
|
|
893
|
+
if not has_valid_parent:
|
|
894
|
+
unresolved.append(
|
|
895
|
+
{
|
|
896
|
+
"source": node.get_field("source_file", "unknown"),
|
|
897
|
+
"test_name": node.get_label() or node.id,
|
|
898
|
+
}
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
if unresolved:
|
|
902
|
+
return HealthCheck(
|
|
903
|
+
name="tests.references_resolve",
|
|
904
|
+
passed=False,
|
|
905
|
+
message=f"{len(unresolved)} test references don't resolve to requirements",
|
|
906
|
+
category="tests",
|
|
907
|
+
severity="warning",
|
|
908
|
+
details={"unresolved": unresolved[:10], "resolved_count": resolved_count},
|
|
909
|
+
)
|
|
910
|
+
|
|
911
|
+
return HealthCheck(
|
|
912
|
+
name="tests.references_resolve",
|
|
913
|
+
passed=True,
|
|
914
|
+
message=f"All {resolved_count} test references resolve to requirements",
|
|
915
|
+
category="tests",
|
|
916
|
+
details={"resolved_count": resolved_count},
|
|
917
|
+
)
|
|
918
|
+
|
|
919
|
+
|
|
920
|
+
def check_test_results(graph: TraceGraph) -> HealthCheck:
|
|
921
|
+
"""Check test result status from JUnit/pytest output."""
|
|
922
|
+
from elspais.graph import NodeKind
|
|
923
|
+
|
|
924
|
+
result_nodes = list(graph.nodes_by_kind(NodeKind.TEST_RESULT))
|
|
925
|
+
|
|
926
|
+
if not result_nodes:
|
|
927
|
+
return HealthCheck(
|
|
928
|
+
name="tests.results",
|
|
929
|
+
passed=True,
|
|
930
|
+
message="No test results found (result scanning may be disabled)",
|
|
931
|
+
category="tests",
|
|
932
|
+
severity="info",
|
|
933
|
+
)
|
|
934
|
+
|
|
935
|
+
passed = 0
|
|
936
|
+
failed = 0
|
|
937
|
+
skipped = 0
|
|
938
|
+
|
|
939
|
+
for node in result_nodes:
|
|
940
|
+
status = node.get_field("status", "unknown")
|
|
941
|
+
if status == "passed":
|
|
942
|
+
passed += 1
|
|
943
|
+
elif status == "failed":
|
|
944
|
+
failed += 1
|
|
945
|
+
elif status == "skipped":
|
|
946
|
+
skipped += 1
|
|
947
|
+
|
|
948
|
+
total = passed + failed + skipped
|
|
949
|
+
pass_rate = (passed / total * 100) if total > 0 else 0
|
|
950
|
+
|
|
951
|
+
if failed > 0:
|
|
952
|
+
return HealthCheck(
|
|
953
|
+
name="tests.results",
|
|
954
|
+
passed=False,
|
|
955
|
+
message=(
|
|
956
|
+
f"Test failures: {passed} passed, {failed} failed, "
|
|
957
|
+
f"{skipped} skipped ({pass_rate:.1f}% pass rate)"
|
|
958
|
+
),
|
|
959
|
+
category="tests",
|
|
960
|
+
severity="warning",
|
|
961
|
+
details={
|
|
962
|
+
"passed": passed,
|
|
963
|
+
"failed": failed,
|
|
964
|
+
"skipped": skipped,
|
|
965
|
+
"pass_rate": round(pass_rate, 1),
|
|
966
|
+
},
|
|
967
|
+
)
|
|
968
|
+
|
|
969
|
+
return HealthCheck(
|
|
970
|
+
name="tests.results",
|
|
971
|
+
passed=True,
|
|
972
|
+
message=f"All tests passing: {passed} passed, {skipped} skipped",
|
|
973
|
+
category="tests",
|
|
974
|
+
details={
|
|
975
|
+
"passed": passed,
|
|
976
|
+
"failed": failed,
|
|
977
|
+
"skipped": skipped,
|
|
978
|
+
"pass_rate": round(pass_rate, 1),
|
|
979
|
+
},
|
|
980
|
+
)
|
|
981
|
+
|
|
982
|
+
|
|
983
|
+
def check_test_coverage(graph: TraceGraph) -> HealthCheck:
|
|
984
|
+
"""Check test coverage statistics."""
|
|
985
|
+
from elspais.graph import NodeKind
|
|
986
|
+
|
|
987
|
+
test_count = sum(1 for _ in graph.nodes_by_kind(NodeKind.TEST))
|
|
988
|
+
req_count = sum(1 for _ in graph.nodes_by_kind(NodeKind.REQUIREMENT))
|
|
989
|
+
|
|
990
|
+
# Count requirements with at least one TEST child
|
|
991
|
+
covered_reqs = set()
|
|
992
|
+
for node in graph.nodes_by_kind(NodeKind.TEST):
|
|
993
|
+
for parent in node.iter_parents():
|
|
994
|
+
if parent.kind == NodeKind.REQUIREMENT:
|
|
995
|
+
covered_reqs.add(parent.id)
|
|
996
|
+
elif parent.kind == NodeKind.ASSERTION:
|
|
997
|
+
for grandparent in parent.iter_parents():
|
|
998
|
+
if grandparent.kind == NodeKind.REQUIREMENT:
|
|
999
|
+
covered_reqs.add(grandparent.id)
|
|
1000
|
+
|
|
1001
|
+
coverage_pct = (len(covered_reqs) / req_count * 100) if req_count > 0 else 0
|
|
1002
|
+
|
|
1003
|
+
return HealthCheck(
|
|
1004
|
+
name="tests.coverage",
|
|
1005
|
+
passed=True, # Informational only
|
|
1006
|
+
message=(
|
|
1007
|
+
f"{len(covered_reqs)}/{req_count} requirements "
|
|
1008
|
+
f"have test references ({coverage_pct:.1f}%)"
|
|
1009
|
+
),
|
|
1010
|
+
category="tests",
|
|
1011
|
+
severity="info",
|
|
1012
|
+
details={
|
|
1013
|
+
"test_nodes": test_count,
|
|
1014
|
+
"requirements_with_tests": len(covered_reqs),
|
|
1015
|
+
"total_requirements": req_count,
|
|
1016
|
+
"coverage_percent": round(coverage_pct, 1),
|
|
1017
|
+
},
|
|
1018
|
+
)
|
|
1019
|
+
|
|
1020
|
+
|
|
1021
|
+
def run_test_checks(graph: TraceGraph) -> list[HealthCheck]:
|
|
1022
|
+
"""Run all test file health checks."""
|
|
1023
|
+
return [
|
|
1024
|
+
check_test_references_resolve(graph),
|
|
1025
|
+
check_test_results(graph),
|
|
1026
|
+
check_test_coverage(graph),
|
|
1027
|
+
]
|
|
1028
|
+
|
|
1029
|
+
|
|
1030
|
+
# =============================================================================
|
|
1031
|
+
# Main Command
|
|
1032
|
+
# =============================================================================
|
|
1033
|
+
|
|
1034
|
+
|
|
1035
|
+
def run(args: argparse.Namespace) -> int:
|
|
1036
|
+
"""Run the health command.
|
|
1037
|
+
|
|
1038
|
+
Performs comprehensive health checks on the elspais configuration
|
|
1039
|
+
and repository structure.
|
|
1040
|
+
"""
|
|
1041
|
+
from elspais.config import ConfigLoader, get_config
|
|
1042
|
+
from elspais.graph.factory import build_graph
|
|
1043
|
+
|
|
1044
|
+
spec_dir = getattr(args, "spec_dir", None)
|
|
1045
|
+
config_path = getattr(args, "config", None)
|
|
1046
|
+
start_path = Path.cwd()
|
|
1047
|
+
|
|
1048
|
+
report = HealthReport()
|
|
1049
|
+
|
|
1050
|
+
# Determine which checks to run
|
|
1051
|
+
run_all = not any(
|
|
1052
|
+
[
|
|
1053
|
+
getattr(args, "config_only", False),
|
|
1054
|
+
getattr(args, "spec_only", False),
|
|
1055
|
+
getattr(args, "code_only", False),
|
|
1056
|
+
getattr(args, "tests_only", False),
|
|
1057
|
+
]
|
|
1058
|
+
)
|
|
1059
|
+
|
|
1060
|
+
run_config = run_all or getattr(args, "config_only", False)
|
|
1061
|
+
run_spec = run_all or getattr(args, "spec_only", False)
|
|
1062
|
+
run_code = run_all or getattr(args, "code_only", False)
|
|
1063
|
+
run_tests = run_all or getattr(args, "tests_only", False)
|
|
1064
|
+
|
|
1065
|
+
# Config checks can run without building the graph
|
|
1066
|
+
config = None
|
|
1067
|
+
if run_config:
|
|
1068
|
+
try:
|
|
1069
|
+
config_dict = get_config(config_path, start_path=start_path)
|
|
1070
|
+
config = ConfigLoader.from_dict(config_dict)
|
|
1071
|
+
for check in run_config_checks(config_path, config, start_path):
|
|
1072
|
+
report.add(check)
|
|
1073
|
+
except Exception as e:
|
|
1074
|
+
report.add(
|
|
1075
|
+
HealthCheck(
|
|
1076
|
+
name="config.load",
|
|
1077
|
+
passed=False,
|
|
1078
|
+
message=f"Failed to load config: {e}",
|
|
1079
|
+
category="config",
|
|
1080
|
+
)
|
|
1081
|
+
)
|
|
1082
|
+
# Can't continue without config
|
|
1083
|
+
if not run_all:
|
|
1084
|
+
return _output_report(report, args)
|
|
1085
|
+
|
|
1086
|
+
# Build graph for other checks
|
|
1087
|
+
graph = None
|
|
1088
|
+
if run_spec or run_code or run_tests:
|
|
1089
|
+
try:
|
|
1090
|
+
graph = build_graph(
|
|
1091
|
+
spec_dirs=[spec_dir] if spec_dir else None,
|
|
1092
|
+
config_path=config_path,
|
|
1093
|
+
)
|
|
1094
|
+
if config is None:
|
|
1095
|
+
config_dict = get_config(config_path, start_path=start_path)
|
|
1096
|
+
config = ConfigLoader.from_dict(config_dict)
|
|
1097
|
+
except Exception as e:
|
|
1098
|
+
report.add(
|
|
1099
|
+
HealthCheck(
|
|
1100
|
+
name="graph.build",
|
|
1101
|
+
passed=False,
|
|
1102
|
+
message=f"Failed to build graph: {e}",
|
|
1103
|
+
category="spec",
|
|
1104
|
+
)
|
|
1105
|
+
)
|
|
1106
|
+
return _output_report(report, args)
|
|
1107
|
+
|
|
1108
|
+
# Spec checks
|
|
1109
|
+
if run_spec and graph and config:
|
|
1110
|
+
for check in run_spec_checks(graph, config):
|
|
1111
|
+
report.add(check)
|
|
1112
|
+
|
|
1113
|
+
# Code checks
|
|
1114
|
+
if run_code and graph:
|
|
1115
|
+
for check in run_code_checks(graph):
|
|
1116
|
+
report.add(check)
|
|
1117
|
+
|
|
1118
|
+
# Test checks
|
|
1119
|
+
if run_tests and graph:
|
|
1120
|
+
for check in run_test_checks(graph):
|
|
1121
|
+
report.add(check)
|
|
1122
|
+
|
|
1123
|
+
return _output_report(report, args)
|
|
1124
|
+
|
|
1125
|
+
|
|
1126
|
+
def _output_report(report: HealthReport, args: argparse.Namespace) -> int:
|
|
1127
|
+
"""Output the health report in the requested format."""
|
|
1128
|
+
if getattr(args, "json", False):
|
|
1129
|
+
print(json.dumps(report.to_dict(), indent=2))
|
|
1130
|
+
else:
|
|
1131
|
+
_print_text_report(report, verbose=getattr(args, "verbose", False))
|
|
1132
|
+
|
|
1133
|
+
return 0 if report.is_healthy else 1
|
|
1134
|
+
|
|
1135
|
+
|
|
1136
|
+
def _print_text_report(report: HealthReport, verbose: bool = False) -> None:
|
|
1137
|
+
"""Print human-readable health report."""
|
|
1138
|
+
categories = ["config", "spec", "code", "tests"]
|
|
1139
|
+
|
|
1140
|
+
for category in categories:
|
|
1141
|
+
checks = list(report.iter_by_category(category))
|
|
1142
|
+
if not checks:
|
|
1143
|
+
continue
|
|
1144
|
+
|
|
1145
|
+
# Category header
|
|
1146
|
+
passed = sum(1 for c in checks if c.passed)
|
|
1147
|
+
total = len(checks)
|
|
1148
|
+
status = "✓" if passed == total else "✗"
|
|
1149
|
+
print(f"\n{status} {category.upper()} ({passed}/{total} checks passed)")
|
|
1150
|
+
print("-" * 40)
|
|
1151
|
+
|
|
1152
|
+
for check in checks:
|
|
1153
|
+
if check.passed:
|
|
1154
|
+
icon = "✓"
|
|
1155
|
+
elif check.severity == "warning":
|
|
1156
|
+
icon = "⚠"
|
|
1157
|
+
else:
|
|
1158
|
+
icon = "✗"
|
|
1159
|
+
|
|
1160
|
+
print(f" {icon} {check.name}: {check.message}")
|
|
1161
|
+
|
|
1162
|
+
# Show details in verbose mode
|
|
1163
|
+
if verbose and check.details:
|
|
1164
|
+
for key, value in check.details.items():
|
|
1165
|
+
if isinstance(value, list) and len(value) > 3:
|
|
1166
|
+
print(f" {key}: {value[:3]} ... ({len(value)} total)")
|
|
1167
|
+
else:
|
|
1168
|
+
print(f" {key}: {value}")
|
|
1169
|
+
|
|
1170
|
+
# Summary
|
|
1171
|
+
print()
|
|
1172
|
+
print("=" * 40)
|
|
1173
|
+
if report.is_healthy:
|
|
1174
|
+
print(f"✓ HEALTHY: {report.passed} checks passed")
|
|
1175
|
+
else:
|
|
1176
|
+
print(f"✗ UNHEALTHY: {report.failed} errors, {report.warnings} warnings")
|
|
1177
|
+
print("=" * 40)
|