elspais 0.11.1__py3-none-any.whl → 0.43.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. elspais/__init__.py +2 -11
  2. elspais/{sponsors/__init__.py → associates.py} +102 -58
  3. elspais/cli.py +395 -79
  4. elspais/commands/__init__.py +9 -3
  5. elspais/commands/analyze.py +121 -173
  6. elspais/commands/changed.py +15 -30
  7. elspais/commands/config_cmd.py +13 -16
  8. elspais/commands/edit.py +60 -44
  9. elspais/commands/example_cmd.py +319 -0
  10. elspais/commands/hash_cmd.py +167 -183
  11. elspais/commands/health.py +1177 -0
  12. elspais/commands/index.py +98 -114
  13. elspais/commands/init.py +103 -26
  14. elspais/commands/reformat_cmd.py +41 -444
  15. elspais/commands/rules_cmd.py +7 -3
  16. elspais/commands/trace.py +444 -321
  17. elspais/commands/validate.py +195 -415
  18. elspais/config/__init__.py +799 -5
  19. elspais/{core/content_rules.py → content_rules.py} +20 -3
  20. elspais/docs/cli/assertions.md +67 -0
  21. elspais/docs/cli/commands.md +304 -0
  22. elspais/docs/cli/config.md +262 -0
  23. elspais/docs/cli/format.md +66 -0
  24. elspais/docs/cli/git.md +45 -0
  25. elspais/docs/cli/health.md +190 -0
  26. elspais/docs/cli/hierarchy.md +60 -0
  27. elspais/docs/cli/ignore.md +72 -0
  28. elspais/docs/cli/mcp.md +245 -0
  29. elspais/docs/cli/quickstart.md +58 -0
  30. elspais/docs/cli/traceability.md +89 -0
  31. elspais/docs/cli/validation.md +96 -0
  32. elspais/graph/GraphNode.py +383 -0
  33. elspais/graph/__init__.py +40 -0
  34. elspais/graph/annotators.py +927 -0
  35. elspais/graph/builder.py +1886 -0
  36. elspais/graph/deserializer.py +248 -0
  37. elspais/graph/factory.py +284 -0
  38. elspais/graph/metrics.py +127 -0
  39. elspais/graph/mutations.py +161 -0
  40. elspais/graph/parsers/__init__.py +156 -0
  41. elspais/graph/parsers/code.py +213 -0
  42. elspais/graph/parsers/comments.py +112 -0
  43. elspais/graph/parsers/config_helpers.py +29 -0
  44. elspais/graph/parsers/heredocs.py +225 -0
  45. elspais/graph/parsers/journey.py +131 -0
  46. elspais/graph/parsers/remainder.py +79 -0
  47. elspais/graph/parsers/requirement.py +347 -0
  48. elspais/graph/parsers/results/__init__.py +6 -0
  49. elspais/graph/parsers/results/junit_xml.py +229 -0
  50. elspais/graph/parsers/results/pytest_json.py +313 -0
  51. elspais/graph/parsers/test.py +305 -0
  52. elspais/graph/relations.py +78 -0
  53. elspais/graph/serialize.py +216 -0
  54. elspais/html/__init__.py +8 -0
  55. elspais/html/generator.py +731 -0
  56. elspais/html/templates/trace_view.html.j2 +2151 -0
  57. elspais/mcp/__init__.py +47 -29
  58. elspais/mcp/__main__.py +5 -1
  59. elspais/mcp/file_mutations.py +138 -0
  60. elspais/mcp/server.py +2016 -247
  61. elspais/testing/__init__.py +4 -4
  62. elspais/testing/config.py +3 -0
  63. elspais/testing/mapper.py +1 -1
  64. elspais/testing/result_parser.py +25 -21
  65. elspais/testing/scanner.py +301 -12
  66. elspais/utilities/__init__.py +1 -0
  67. elspais/utilities/docs_loader.py +115 -0
  68. elspais/utilities/git.py +607 -0
  69. elspais/{core → utilities}/hasher.py +8 -22
  70. elspais/utilities/md_renderer.py +189 -0
  71. elspais/{core → utilities}/patterns.py +58 -57
  72. elspais/utilities/reference_config.py +626 -0
  73. elspais/validation/__init__.py +19 -0
  74. elspais/validation/format.py +264 -0
  75. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/METADATA +7 -4
  76. elspais-0.43.5.dist-info/RECORD +80 -0
  77. elspais/config/defaults.py +0 -173
  78. elspais/config/loader.py +0 -494
  79. elspais/core/__init__.py +0 -21
  80. elspais/core/git.py +0 -352
  81. elspais/core/models.py +0 -320
  82. elspais/core/parser.py +0 -640
  83. elspais/core/rules.py +0 -514
  84. elspais/mcp/context.py +0 -171
  85. elspais/mcp/serializers.py +0 -112
  86. elspais/reformat/__init__.py +0 -50
  87. elspais/reformat/detector.py +0 -119
  88. elspais/reformat/hierarchy.py +0 -246
  89. elspais/reformat/line_breaks.py +0 -220
  90. elspais/reformat/prompts.py +0 -123
  91. elspais/reformat/transformer.py +0 -264
  92. elspais/trace_view/__init__.py +0 -54
  93. elspais/trace_view/coverage.py +0 -183
  94. elspais/trace_view/generators/__init__.py +0 -12
  95. elspais/trace_view/generators/base.py +0 -329
  96. elspais/trace_view/generators/csv.py +0 -122
  97. elspais/trace_view/generators/markdown.py +0 -175
  98. elspais/trace_view/html/__init__.py +0 -31
  99. elspais/trace_view/html/generator.py +0 -1006
  100. elspais/trace_view/html/templates/base.html +0 -283
  101. elspais/trace_view/html/templates/components/code_viewer_modal.html +0 -14
  102. elspais/trace_view/html/templates/components/file_picker_modal.html +0 -20
  103. elspais/trace_view/html/templates/components/legend_modal.html +0 -69
  104. elspais/trace_view/html/templates/components/review_panel.html +0 -118
  105. elspais/trace_view/html/templates/partials/review/help/help-panel.json +0 -244
  106. elspais/trace_view/html/templates/partials/review/help/onboarding.json +0 -77
  107. elspais/trace_view/html/templates/partials/review/help/tooltips.json +0 -237
  108. elspais/trace_view/html/templates/partials/review/review-comments.js +0 -928
  109. elspais/trace_view/html/templates/partials/review/review-data.js +0 -961
  110. elspais/trace_view/html/templates/partials/review/review-help.js +0 -679
  111. elspais/trace_view/html/templates/partials/review/review-init.js +0 -177
  112. elspais/trace_view/html/templates/partials/review/review-line-numbers.js +0 -429
  113. elspais/trace_view/html/templates/partials/review/review-packages.js +0 -1029
  114. elspais/trace_view/html/templates/partials/review/review-position.js +0 -540
  115. elspais/trace_view/html/templates/partials/review/review-resize.js +0 -115
  116. elspais/trace_view/html/templates/partials/review/review-status.js +0 -659
  117. elspais/trace_view/html/templates/partials/review/review-sync.js +0 -992
  118. elspais/trace_view/html/templates/partials/review-styles.css +0 -2238
  119. elspais/trace_view/html/templates/partials/scripts.js +0 -1741
  120. elspais/trace_view/html/templates/partials/styles.css +0 -1756
  121. elspais/trace_view/models.py +0 -353
  122. elspais/trace_view/review/__init__.py +0 -60
  123. elspais/trace_view/review/branches.py +0 -1149
  124. elspais/trace_view/review/models.py +0 -1205
  125. elspais/trace_view/review/position.py +0 -609
  126. elspais/trace_view/review/server.py +0 -1056
  127. elspais/trace_view/review/status.py +0 -470
  128. elspais/trace_view/review/storage.py +0 -1367
  129. elspais/trace_view/scanning.py +0 -213
  130. elspais/trace_view/specs/README.md +0 -84
  131. elspais/trace_view/specs/tv-d00001-template-architecture.md +0 -36
  132. elspais/trace_view/specs/tv-d00002-css-extraction.md +0 -37
  133. elspais/trace_view/specs/tv-d00003-js-extraction.md +0 -43
  134. elspais/trace_view/specs/tv-d00004-build-embedding.md +0 -40
  135. elspais/trace_view/specs/tv-d00005-test-format.md +0 -78
  136. elspais/trace_view/specs/tv-d00010-review-data-models.md +0 -33
  137. elspais/trace_view/specs/tv-d00011-review-storage.md +0 -33
  138. elspais/trace_view/specs/tv-d00012-position-resolution.md +0 -33
  139. elspais/trace_view/specs/tv-d00013-git-branches.md +0 -31
  140. elspais/trace_view/specs/tv-d00014-review-api-server.md +0 -31
  141. elspais/trace_view/specs/tv-d00015-status-modifier.md +0 -27
  142. elspais/trace_view/specs/tv-d00016-js-integration.md +0 -33
  143. elspais/trace_view/specs/tv-p00001-html-generator.md +0 -33
  144. elspais/trace_view/specs/tv-p00002-review-system.md +0 -29
  145. elspais-0.11.1.dist-info/RECORD +0 -101
  146. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/WHEEL +0 -0
  147. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/entry_points.txt +0 -0
  148. {elspais-0.11.1.dist-info → elspais-0.43.5.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,248 @@
1
+ """DomainDeserializer - Abstract controller for text domain deserialization.
2
+
3
+ This module provides the infrastructure for deserializing text from
4
+ various sources (files, stdin, CLI args) into parsed content.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from dataclasses import dataclass, field
10
+ from pathlib import Path
11
+ from typing import Any, Iterator, Protocol, runtime_checkable
12
+
13
+ from elspais.graph.parsers import ParseContext, ParsedContent, ParserRegistry
14
+
15
+
16
+ @dataclass
17
+ class DomainContext:
18
+ """Context for a source being deserialized.
19
+
20
+ Attributes:
21
+ source_type: Type of source ("file", "stdin", "cli").
22
+ source_id: Identifier for the source (file path, etc.).
23
+ metadata: Additional metadata about the source.
24
+ """
25
+
26
+ source_type: str
27
+ source_id: str
28
+ metadata: dict[str, Any] = field(default_factory=dict)
29
+
30
+
31
+ @dataclass
32
+ class ParsedContentWithContext(ParsedContent):
33
+ """ParsedContent with source context attached.
34
+
35
+ Extends ParsedContent to include the DomainContext from which
36
+ the content was parsed.
37
+ """
38
+
39
+ source_context: DomainContext | None = None
40
+
41
+
42
+ @runtime_checkable
43
+ class DomainDeserializer(Protocol):
44
+ """Protocol for domain deserializers.
45
+
46
+ Deserializers iterate over sources and use parsers to extract
47
+ structured content.
48
+ """
49
+
50
+ def iterate_sources(self) -> Iterator[tuple[DomainContext, str]]:
51
+ """Iterate over sources, yielding context and content.
52
+
53
+ Yields:
54
+ Tuples of (DomainContext, content_string).
55
+ """
56
+ ...
57
+
58
+ def deserialize(self, registry: ParserRegistry) -> Iterator[ParsedContentWithContext]:
59
+ """Deserialize all sources using the parser registry.
60
+
61
+ Args:
62
+ registry: ParserRegistry with registered parsers.
63
+
64
+ Yields:
65
+ ParsedContentWithContext for each parsed region.
66
+ """
67
+ ...
68
+
69
+
70
+ class DomainFile:
71
+ """Deserializer for files and directories.
72
+
73
+ Can deserialize:
74
+ - A single file
75
+ - All matching files in a directory
76
+ """
77
+
78
+ def __init__(
79
+ self,
80
+ path: Path | str,
81
+ patterns: list[str] | None = None,
82
+ recursive: bool = False,
83
+ skip_dirs: list[str] | None = None,
84
+ skip_files: list[str] | None = None,
85
+ ) -> None:
86
+ """Initialize file deserializer.
87
+
88
+ Args:
89
+ path: Path to file or directory.
90
+ patterns: Glob patterns for directory (default: ["*.md"]).
91
+ recursive: Whether to search recursively.
92
+ skip_dirs: Directory names to skip (e.g., ["roadmap", "reference"]).
93
+ skip_files: File names to skip (e.g., ["README.md", "INDEX.md"]).
94
+ """
95
+ self.path = Path(path)
96
+ self.patterns = patterns or ["*.md"]
97
+ self.recursive = recursive
98
+ self.skip_dirs = skip_dirs or []
99
+ self.skip_files = skip_files or []
100
+
101
+ def _should_skip(self, file_path: Path) -> bool:
102
+ """Check if a file should be skipped based on skip_dirs and skip_files.
103
+
104
+ Args:
105
+ file_path: Path to check.
106
+
107
+ Returns:
108
+ True if the file should be skipped.
109
+ """
110
+ # Check if file name matches skip_files
111
+ if file_path.name in self.skip_files:
112
+ return True
113
+
114
+ # Check if any parent directory matches skip_dirs
115
+ # Get path relative to base to check directory names
116
+ try:
117
+ rel_path = file_path.relative_to(self.path)
118
+ # Check each part of the relative path (excluding the file name)
119
+ for part in rel_path.parts[:-1]:
120
+ if part in self.skip_dirs:
121
+ return True
122
+ except ValueError:
123
+ # file_path is not relative to self.path, check absolute path parts
124
+ for part in file_path.parts:
125
+ if part in self.skip_dirs:
126
+ return True
127
+
128
+ return False
129
+
130
+ def iterate_sources(self) -> Iterator[tuple[DomainContext, str]]:
131
+ """Iterate over file sources.
132
+
133
+ Yields:
134
+ Tuples of (DomainContext, file_content).
135
+ """
136
+ if self.path.is_file():
137
+ if not self._should_skip(self.path):
138
+ yield self._read_file(self.path)
139
+ elif self.path.is_dir():
140
+ for pattern in self.patterns:
141
+ if self.recursive:
142
+ file_iter = self.path.rglob(pattern)
143
+ else:
144
+ file_iter = self.path.glob(pattern)
145
+
146
+ for file_path in sorted(file_iter):
147
+ if file_path.is_file() and not self._should_skip(file_path):
148
+ yield self._read_file(file_path)
149
+
150
+ def _read_file(self, file_path: Path) -> tuple[DomainContext, str]:
151
+ """Read a file and create context.
152
+
153
+ Args:
154
+ file_path: Path to file.
155
+
156
+ Returns:
157
+ Tuple of (DomainContext, content).
158
+ """
159
+ content = file_path.read_text(encoding="utf-8")
160
+ ctx = DomainContext(
161
+ source_type="file",
162
+ source_id=str(file_path),
163
+ metadata={"path": file_path},
164
+ )
165
+ return ctx, content
166
+
167
+ def deserialize(self, registry: ParserRegistry) -> Iterator[ParsedContentWithContext]:
168
+ """Deserialize files using parser registry.
169
+
170
+ Args:
171
+ registry: ParserRegistry with registered parsers.
172
+
173
+ Yields:
174
+ ParsedContentWithContext for each parsed region.
175
+ """
176
+ for ctx, content in self.iterate_sources():
177
+ # Convert content to lines
178
+ lines = [(i + 1, line) for i, line in enumerate(content.split("\n"))]
179
+
180
+ # Create parse context
181
+ parse_ctx = ParseContext(
182
+ file_path=ctx.source_id,
183
+ config=ctx.metadata,
184
+ )
185
+
186
+ # Parse and yield with context
187
+ for parsed in registry.parse_all(lines, parse_ctx):
188
+ yield ParsedContentWithContext(
189
+ content_type=parsed.content_type,
190
+ start_line=parsed.start_line,
191
+ end_line=parsed.end_line,
192
+ raw_text=parsed.raw_text,
193
+ parsed_data=parsed.parsed_data,
194
+ source_context=ctx,
195
+ )
196
+
197
+
198
+ class DomainStdio:
199
+ """Deserializer for stdin content."""
200
+
201
+ def __init__(self, content: str, source_id: str = "<stdin>") -> None:
202
+ """Initialize stdin deserializer.
203
+
204
+ Args:
205
+ content: Content read from stdin.
206
+ source_id: Identifier for the source.
207
+ """
208
+ self.content = content
209
+ self.source_id = source_id
210
+
211
+ def iterate_sources(self) -> Iterator[tuple[DomainContext, str]]:
212
+ """Yield the stdin content.
213
+
214
+ Yields:
215
+ Single tuple of (DomainContext, content).
216
+ """
217
+ ctx = DomainContext(
218
+ source_type="stdin",
219
+ source_id=self.source_id,
220
+ )
221
+ yield ctx, self.content
222
+
223
+ def deserialize(self, registry: ParserRegistry) -> Iterator[ParsedContentWithContext]:
224
+ """Deserialize stdin using parser registry.
225
+
226
+ Args:
227
+ registry: ParserRegistry with registered parsers.
228
+
229
+ Yields:
230
+ ParsedContentWithContext for each parsed region.
231
+ """
232
+ for ctx, content in self.iterate_sources():
233
+ lines = [(i + 1, line) for i, line in enumerate(content.split("\n"))]
234
+
235
+ parse_ctx = ParseContext(
236
+ file_path=ctx.source_id,
237
+ config={},
238
+ )
239
+
240
+ for parsed in registry.parse_all(lines, parse_ctx):
241
+ yield ParsedContentWithContext(
242
+ content_type=parsed.content_type,
243
+ start_line=parsed.start_line,
244
+ end_line=parsed.end_line,
245
+ raw_text=parsed.raw_text,
246
+ parsed_data=parsed.parsed_data,
247
+ source_context=ctx,
248
+ )
@@ -0,0 +1,284 @@
1
+ """Graph Factory - Shared utility for building TraceGraph from spec files.
2
+
3
+ This module provides a single entry point for all commands to build a TraceGraph
4
+ from configuration and spec directories. Commands should use this instead of
5
+ implementing their own file reading logic.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from glob import glob
11
+ from pathlib import Path
12
+ from typing import Any
13
+
14
+ from elspais.associates import get_associate_spec_directories
15
+ from elspais.config import get_config, get_ignore_config, get_spec_directories
16
+ from elspais.graph.builder import GraphBuilder, TraceGraph
17
+ from elspais.graph.deserializer import DomainFile
18
+ from elspais.graph.parsers import ParserRegistry
19
+ from elspais.graph.parsers.code import CodeParser
20
+ from elspais.graph.parsers.journey import JourneyParser
21
+ from elspais.graph.parsers.requirement import RequirementParser
22
+ from elspais.graph.parsers.results import JUnitXMLParser, PytestJSONParser
23
+ from elspais.graph.parsers.test import TestParser
24
+ from elspais.utilities.patterns import PatternConfig
25
+ from elspais.utilities.reference_config import ReferenceResolver
26
+
27
+
28
+ def _find_repo_root(spec_dir: Path) -> Path | None:
29
+ """Find the repository root containing .elspais.toml for a spec directory.
30
+
31
+ Walks up the directory tree looking for .elspais.toml.
32
+
33
+ Args:
34
+ spec_dir: The spec directory path
35
+
36
+ Returns:
37
+ Path to repo root, or None if not found
38
+ """
39
+ current = spec_dir.resolve()
40
+ while current != current.parent:
41
+ if (current / ".elspais.toml").exists():
42
+ return current
43
+ current = current.parent
44
+ return None
45
+
46
+
47
+ def _create_registry_for_spec_dir(
48
+ spec_dir: Path,
49
+ default_pattern_config: PatternConfig,
50
+ default_reference_resolver: ReferenceResolver,
51
+ ) -> ParserRegistry:
52
+ """Create a parser registry for a spec directory.
53
+
54
+ If the spec directory is in a different repo (has its own .elspais.toml),
55
+ loads that repo's config and creates a registry with its pattern config
56
+ and reference resolver. Otherwise uses the defaults.
57
+
58
+ Args:
59
+ spec_dir: The spec directory path
60
+ default_pattern_config: The default pattern config from main repo
61
+ default_reference_resolver: The default reference resolver from main repo
62
+
63
+ Returns:
64
+ ParserRegistry configured for this spec directory
65
+ """
66
+ registry = ParserRegistry()
67
+
68
+ # Check if this spec dir is in a different repo with its own config
69
+ repo_root = _find_repo_root(spec_dir)
70
+ if repo_root:
71
+ config_path = repo_root / ".elspais.toml"
72
+ if config_path.exists():
73
+ repo_config = get_config(config_path, repo_root)
74
+ patterns_dict = repo_config.get("patterns", {})
75
+ pattern_config = PatternConfig.from_dict(patterns_dict)
76
+ # Build reference resolver from this repo's config
77
+ reference_resolver = ReferenceResolver.from_config(repo_config.get("references", {}))
78
+ registry.register(RequirementParser(pattern_config))
79
+ registry.register(JourneyParser())
80
+ registry.register(CodeParser(pattern_config, reference_resolver))
81
+ registry.register(TestParser(pattern_config, reference_resolver))
82
+ return registry
83
+
84
+ # Fall back to defaults
85
+ registry.register(RequirementParser(default_pattern_config))
86
+ registry.register(JourneyParser())
87
+ registry.register(CodeParser(default_pattern_config, default_reference_resolver))
88
+ registry.register(TestParser(default_pattern_config, default_reference_resolver))
89
+ return registry
90
+
91
+
92
+ def build_graph(
93
+ config: dict[str, Any] | None = None,
94
+ spec_dirs: list[Path] | None = None,
95
+ config_path: Path | None = None,
96
+ repo_root: Path | None = None,
97
+ scan_code: bool = True,
98
+ scan_tests: bool = True,
99
+ scan_sponsors: bool = True,
100
+ ) -> TraceGraph:
101
+ """Build a TraceGraph from spec directories.
102
+
103
+ This is the standard way for commands to obtain a TraceGraph.
104
+ It handles:
105
+ - Configuration loading (auto-discovery or explicit)
106
+ - Spec directory resolution
107
+ - Sponsor/associate spec directory resolution
108
+ - Parser registration
109
+ - Graph construction
110
+ - Code and test directory scanning (configurable)
111
+
112
+ Args:
113
+ config: Pre-loaded config dict (optional).
114
+ spec_dirs: Explicit spec directories (optional).
115
+ config_path: Path to config file (optional).
116
+ repo_root: Repository root for relative paths (defaults to cwd).
117
+ scan_code: Whether to scan code directories from traceability.scan_patterns.
118
+ scan_tests: Whether to scan test directories from testing.test_dirs.
119
+ scan_sponsors: Whether to scan sponsor/associate spec directories.
120
+
121
+ Returns:
122
+ Complete TraceGraph with all requirements linked.
123
+
124
+ Priority:
125
+ spec_dirs > config > config_path > defaults
126
+ """
127
+ # Default repo_root
128
+ if repo_root is None:
129
+ repo_root = Path.cwd()
130
+
131
+ # 1. Resolve configuration
132
+ if config is None:
133
+ config = get_config(config_path, repo_root)
134
+
135
+ # 2. Resolve spec directories
136
+ if spec_dirs is None:
137
+ spec_dirs = get_spec_directories(None, config, repo_root)
138
+
139
+ # 2b. Add sponsor/associate spec directories if enabled
140
+ if scan_sponsors:
141
+ sponsor_dirs = get_associate_spec_directories(config, repo_root)
142
+ spec_dirs = list(spec_dirs) + sponsor_dirs
143
+
144
+ # 3. Create default pattern config and reference resolver
145
+ default_pattern_config = PatternConfig.from_dict(config.get("patterns", {}))
146
+ default_reference_resolver = ReferenceResolver.from_config(config.get("references", {}))
147
+
148
+ # Registry for code files (code parser only)
149
+ code_registry = ParserRegistry()
150
+ code_registry.register(CodeParser(default_pattern_config, default_reference_resolver))
151
+
152
+ # Registry for test files (test parser only)
153
+ test_registry = ParserRegistry()
154
+ test_registry.register(TestParser(default_pattern_config, default_reference_resolver))
155
+
156
+ # 4. Build graph from all spec directories
157
+ builder = GraphBuilder(repo_root=repo_root)
158
+
159
+ # Get ignore configuration for filtering spec files
160
+ ignore_config = get_ignore_config(config)
161
+
162
+ # Get skip configuration (legacy, for backward compatibility)
163
+ spec_config = config.get("spec", {})
164
+ skip_dirs = spec_config.get("skip_dirs", [])
165
+ skip_files = spec_config.get("skip_files", [])
166
+
167
+ for spec_dir in spec_dirs:
168
+ # Create registry with appropriate pattern config for this spec dir
169
+ # (uses sponsor repo's config if applicable)
170
+ spec_registry = _create_registry_for_spec_dir(
171
+ spec_dir, default_pattern_config, default_reference_resolver
172
+ )
173
+
174
+ # Get file patterns from config
175
+ file_patterns = spec_config.get("patterns", ["*.md"])
176
+ domain_file = DomainFile(
177
+ spec_dir,
178
+ patterns=file_patterns,
179
+ recursive=True,
180
+ skip_dirs=skip_dirs,
181
+ skip_files=skip_files,
182
+ )
183
+
184
+ for parsed_content in domain_file.deserialize(spec_registry):
185
+ # Check if source should be ignored using [ignore].spec patterns
186
+ source_path = parsed_content.source_context.metadata.get("path")
187
+ if source_path and ignore_config.should_ignore(source_path, scope="spec"):
188
+ continue
189
+ builder.add_parsed_content(parsed_content)
190
+
191
+ # 5. Scan code directories from traceability.scan_patterns
192
+ if scan_code:
193
+ traceability_config = config.get("traceability", {})
194
+ scan_patterns = traceability_config.get("scan_patterns", [])
195
+
196
+ for pattern in scan_patterns:
197
+ # Resolve glob pattern relative to repo_root
198
+ matched_files = glob(str(repo_root / pattern), recursive=True)
199
+ for file_path in matched_files:
200
+ path = Path(file_path)
201
+ if path.is_file():
202
+ domain_file = DomainFile(path)
203
+ for parsed_content in domain_file.deserialize(code_registry):
204
+ builder.add_parsed_content(parsed_content)
205
+
206
+ # 6. Scan test directories from testing config
207
+ if scan_tests:
208
+ testing_config = config.get("testing", {})
209
+ if testing_config.get("enabled", False):
210
+ test_dirs = testing_config.get("test_dirs", [])
211
+ test_patterns = testing_config.get("patterns", ["*_test.*", "test_*.*"])
212
+
213
+ for dir_pattern in test_dirs:
214
+ # Resolve glob pattern to get directories
215
+ matched_dirs = glob(str(repo_root / dir_pattern), recursive=True)
216
+ for dir_path in matched_dirs:
217
+ path = Path(dir_path)
218
+ if path.is_dir():
219
+ domain_file = DomainFile(
220
+ path,
221
+ patterns=test_patterns,
222
+ recursive=True,
223
+ )
224
+ for parsed_content in domain_file.deserialize(test_registry):
225
+ builder.add_parsed_content(parsed_content)
226
+
227
+ # 6b. Scan test result files (JUnit XML, pytest JSON)
228
+ result_files = testing_config.get("result_files", [])
229
+ if result_files:
230
+ # Create parsers for result files (these have a different interface)
231
+ junit_parser = JUnitXMLParser(
232
+ pattern_config=default_pattern_config,
233
+ reference_resolver=default_reference_resolver,
234
+ base_path=repo_root,
235
+ )
236
+ pytest_parser = PytestJSONParser(
237
+ pattern_config=default_pattern_config,
238
+ reference_resolver=default_reference_resolver,
239
+ base_path=repo_root,
240
+ )
241
+
242
+ for file_pattern in result_files:
243
+ # Resolve glob pattern relative to repo_root
244
+ matched_files = glob(str(repo_root / file_pattern), recursive=True)
245
+ for file_path in matched_files:
246
+ path = Path(file_path)
247
+ if path.is_file():
248
+ content = path.read_text(encoding="utf-8")
249
+ source_path = str(path)
250
+
251
+ # Choose parser based on file extension
252
+ results = []
253
+ if path.suffix.lower() == ".xml":
254
+ results = junit_parser.parse(content, source_path)
255
+ elif path.suffix.lower() == ".json":
256
+ results = pytest_parser.parse(content, source_path)
257
+
258
+ # Convert results to ParsedContent for the builder
259
+ for result in results:
260
+ from elspais.graph.deserializer import (
261
+ DomainContext,
262
+ ParsedContentWithContext,
263
+ )
264
+
265
+ ctx = DomainContext(
266
+ source_type="file",
267
+ source_id=source_path,
268
+ metadata={"path": path},
269
+ )
270
+
271
+ parsed_content = ParsedContentWithContext(
272
+ content_type="test_result",
273
+ start_line=1,
274
+ end_line=1,
275
+ raw_text="",
276
+ parsed_data=result,
277
+ source_context=ctx,
278
+ )
279
+ builder.add_parsed_content(parsed_content)
280
+
281
+ return builder.build()
282
+
283
+
284
+ __all__ = ["build_graph"]
@@ -0,0 +1,127 @@
1
+ """Coverage metrics data structures.
2
+
3
+ This module defines the data structures for centralized coverage tracking:
4
+ - CoverageSource: Enum indicating where coverage originated
5
+ - CoverageContribution: A single coverage claim on an assertion
6
+ - RollupMetrics: Aggregated metrics for a requirement node
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from dataclasses import dataclass, field
12
+ from enum import Enum
13
+
14
+
15
+ class CoverageSource(Enum):
16
+ """Source type for coverage contributions.
17
+
18
+ Different sources have different confidence levels:
19
+ - DIRECT: High confidence - TEST validates or CODE implements assertion
20
+ - EXPLICIT: High confidence - REQ implements specific assertion(s) via syntax
21
+ - INFERRED: Review recommended - REQ implements parent REQ (claims all assertions)
22
+ """
23
+
24
+ DIRECT = "direct" # TEST/CODE validates/implements assertion
25
+ EXPLICIT = "explicit" # REQ implements specific assertions (e.g., REQ-100-A-B)
26
+ INFERRED = "inferred" # REQ implements parent REQ (all assertions implied)
27
+
28
+
29
+ @dataclass
30
+ class CoverageContribution:
31
+ """A single coverage contribution to an assertion.
32
+
33
+ Tracks which node claims to cover an assertion and how.
34
+
35
+ Attributes:
36
+ source_id: ID of the node providing coverage (TEST, CODE, or REQ)
37
+ source_type: How the coverage was determined
38
+ assertion_label: The assertion label being covered (e.g., "A", "B")
39
+ """
40
+
41
+ source_id: str
42
+ source_type: CoverageSource
43
+ assertion_label: str
44
+
45
+
46
+ @dataclass
47
+ class RollupMetrics:
48
+ """Aggregated coverage metrics for a requirement.
49
+
50
+ Computed once during graph annotation and stored in node._metrics.
51
+ Provides both aggregate counts and per-assertion detail.
52
+
53
+ Attributes:
54
+ total_assertions: Number of assertions in this requirement
55
+ covered_assertions: Number with at least one coverage contributor
56
+ direct_covered: Assertions covered by TEST or CODE nodes
57
+ explicit_covered: Assertions covered by REQ with assertion syntax
58
+ inferred_covered: Assertions covered by REQ without assertion syntax
59
+ coverage_pct: Percentage of assertions covered (0-100)
60
+ assertion_coverage: Map of assertion label to coverage contributors
61
+ direct_tested: Assertions covered specifically by TEST nodes
62
+ validated: Assertions with passing TEST_RESULTs
63
+ has_failures: True if any TEST_RESULT is failed/error
64
+ """
65
+
66
+ total_assertions: int = 0
67
+ covered_assertions: int = 0
68
+ direct_covered: int = 0
69
+ explicit_covered: int = 0
70
+ inferred_covered: int = 0
71
+ coverage_pct: float = 0.0
72
+ assertion_coverage: dict[str, list[CoverageContribution]] = field(default_factory=dict)
73
+ # Test-specific metrics
74
+ direct_tested: int = 0 # Assertions with TEST coverage (not CODE)
75
+ validated: int = 0 # Assertions with passing TEST_RESULTs
76
+ has_failures: bool = False # Any TEST_RESULT failed?
77
+
78
+ def add_contribution(self, contribution: CoverageContribution) -> None:
79
+ """Add a coverage contribution for an assertion.
80
+
81
+ Args:
82
+ contribution: The coverage contribution to add.
83
+ """
84
+ label = contribution.assertion_label
85
+ if label not in self.assertion_coverage:
86
+ self.assertion_coverage[label] = []
87
+ self.assertion_coverage[label].append(contribution)
88
+
89
+ def finalize(self) -> None:
90
+ """Compute aggregate counts after all contributions are added.
91
+
92
+ Call this after adding all contributions to update the aggregate
93
+ counts (covered_assertions, direct_covered, etc.) and coverage_pct.
94
+ """
95
+ if self.total_assertions == 0:
96
+ return
97
+
98
+ # Track unique assertions by coverage source type
99
+ direct_labels: set[str] = set()
100
+ explicit_labels: set[str] = set()
101
+ inferred_labels: set[str] = set()
102
+
103
+ for label, contributions in self.assertion_coverage.items():
104
+ for contrib in contributions:
105
+ if contrib.source_type == CoverageSource.DIRECT:
106
+ direct_labels.add(label)
107
+ elif contrib.source_type == CoverageSource.EXPLICIT:
108
+ explicit_labels.add(label)
109
+ elif contrib.source_type == CoverageSource.INFERRED:
110
+ inferred_labels.add(label)
111
+
112
+ # Count assertions with any coverage
113
+ all_covered = direct_labels | explicit_labels | inferred_labels
114
+ self.covered_assertions = len(all_covered)
115
+ self.direct_covered = len(direct_labels)
116
+ self.explicit_covered = len(explicit_labels)
117
+ self.inferred_covered = len(inferred_labels)
118
+
119
+ # Compute percentage
120
+ self.coverage_pct = (self.covered_assertions / self.total_assertions) * 100
121
+
122
+
123
+ __all__ = [
124
+ "CoverageSource",
125
+ "CoverageContribution",
126
+ "RollupMetrics",
127
+ ]