elspais 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
elspais/mcp/context.py ADDED
@@ -0,0 +1,171 @@
1
+ """
2
+ elspais.mcp.context - Workspace context for MCP server.
3
+
4
+ Manages workspace state including configuration, requirements cache,
5
+ and content rules.
6
+ """
7
+
8
+ import re
9
+ from dataclasses import dataclass, field
10
+ from pathlib import Path
11
+ from typing import Any, Dict, List, Optional
12
+
13
+ from elspais.config.loader import find_config_file, get_spec_directories, load_config
14
+ from elspais.core.content_rules import load_content_rules
15
+ from elspais.core.models import ContentRule, Requirement
16
+ from elspais.core.parser import RequirementParser
17
+ from elspais.core.patterns import PatternConfig
18
+
19
+
20
+ @dataclass
21
+ class WorkspaceContext:
22
+ """
23
+ Manages workspace state for MCP server operations.
24
+
25
+ Provides caching of parsed requirements and access to configuration,
26
+ content rules, and other workspace resources.
27
+ """
28
+
29
+ working_dir: Path
30
+ config: Dict[str, Any] = field(default_factory=dict)
31
+ _requirements_cache: Optional[Dict[str, Requirement]] = field(default=None, repr=False)
32
+ _parser: Optional[RequirementParser] = field(default=None, repr=False)
33
+
34
+ @classmethod
35
+ def from_directory(cls, directory: Path) -> "WorkspaceContext":
36
+ """
37
+ Initialize context from a working directory.
38
+
39
+ Loads configuration from .elspais.toml if found.
40
+
41
+ Args:
42
+ directory: Working directory path
43
+
44
+ Returns:
45
+ Initialized WorkspaceContext
46
+ """
47
+ directory = directory.resolve()
48
+ config_path = find_config_file(directory)
49
+
50
+ if config_path:
51
+ config = load_config(config_path)
52
+ else:
53
+ # Use defaults
54
+ from elspais.config.defaults import DEFAULT_CONFIG
55
+ config = DEFAULT_CONFIG.copy()
56
+
57
+ return cls(working_dir=directory, config=config)
58
+
59
+ def get_requirements(self, force_refresh: bool = False) -> Dict[str, Requirement]:
60
+ """
61
+ Get all parsed requirements, with caching.
62
+
63
+ Args:
64
+ force_refresh: If True, ignore cache and re-parse
65
+
66
+ Returns:
67
+ Dict mapping requirement IDs to Requirement objects
68
+ """
69
+ if self._requirements_cache is None or force_refresh:
70
+ self._requirements_cache = self._parse_requirements()
71
+ return self._requirements_cache
72
+
73
+ def get_requirement(self, req_id: str) -> Optional[Requirement]:
74
+ """
75
+ Get a single requirement by ID.
76
+
77
+ Args:
78
+ req_id: Requirement ID (e.g., "REQ-p00001")
79
+
80
+ Returns:
81
+ Requirement if found, None otherwise
82
+ """
83
+ requirements = self.get_requirements()
84
+ return requirements.get(req_id)
85
+
86
+ def get_content_rules(self) -> List[ContentRule]:
87
+ """
88
+ Get all configured content rules.
89
+
90
+ Returns:
91
+ List of ContentRule objects
92
+ """
93
+ return load_content_rules(self.config, self.working_dir)
94
+
95
+ def search_requirements(
96
+ self,
97
+ query: str,
98
+ field: str = "all",
99
+ regex: bool = False,
100
+ ) -> List[Requirement]:
101
+ """
102
+ Search requirements by pattern.
103
+
104
+ Args:
105
+ query: Search query string
106
+ field: Field to search - "all", "id", "title", "body", "assertions"
107
+ regex: If True, treat query as regex pattern
108
+
109
+ Returns:
110
+ List of matching requirements
111
+ """
112
+ requirements = self.get_requirements()
113
+ results = []
114
+
115
+ if regex:
116
+ pattern = re.compile(query, re.IGNORECASE)
117
+ else:
118
+ pattern = re.compile(re.escape(query), re.IGNORECASE)
119
+
120
+ for req in requirements.values():
121
+ if self._matches(req, pattern, field):
122
+ results.append(req)
123
+
124
+ return results
125
+
126
+ def invalidate_cache(self) -> None:
127
+ """Clear cached requirements (call after edits)."""
128
+ self._requirements_cache = None
129
+
130
+ def _parse_requirements(self) -> Dict[str, Requirement]:
131
+ """Parse requirements from spec directories."""
132
+ if self._parser is None:
133
+ pattern_config = PatternConfig.from_dict(self.config.get("patterns", {}))
134
+ self._parser = RequirementParser(pattern_config)
135
+
136
+ spec_dirs = get_spec_directories(None, self.config, self.working_dir)
137
+ skip_files = self.config.get("spec", {}).get("skip_files", [])
138
+
139
+ all_requirements: Dict[str, Requirement] = {}
140
+
141
+ for spec_dir in spec_dirs:
142
+ if spec_dir.exists():
143
+ requirements = self._parser.parse_directory(spec_dir, skip_files=skip_files)
144
+ all_requirements.update(requirements)
145
+
146
+ return all_requirements
147
+
148
+ def _matches(self, req: Requirement, pattern: re.Pattern, field: str) -> bool:
149
+ """Check if requirement matches search pattern."""
150
+ if field == "id":
151
+ return bool(pattern.search(req.id))
152
+ elif field == "title":
153
+ return bool(pattern.search(req.title))
154
+ elif field == "body":
155
+ return bool(pattern.search(req.body))
156
+ elif field == "assertions":
157
+ for assertion in req.assertions:
158
+ if pattern.search(assertion.text):
159
+ return True
160
+ return False
161
+ else: # "all"
162
+ if pattern.search(req.id):
163
+ return True
164
+ if pattern.search(req.title):
165
+ return True
166
+ if pattern.search(req.body):
167
+ return True
168
+ for assertion in req.assertions:
169
+ if pattern.search(assertion.text):
170
+ return True
171
+ return False
@@ -0,0 +1,112 @@
1
+ """
2
+ elspais.mcp.serializers - JSON serialization for MCP responses.
3
+
4
+ Provides functions to serialize elspais data models to JSON-compatible dicts.
5
+ """
6
+
7
+ from typing import Any, Dict, List
8
+
9
+ from elspais.core.models import Assertion, ContentRule, Requirement
10
+ from elspais.core.rules import RuleViolation
11
+
12
+
13
+ def serialize_requirement(req: Requirement) -> Dict[str, Any]:
14
+ """
15
+ Serialize a Requirement to a JSON-compatible dict.
16
+
17
+ Args:
18
+ req: Requirement to serialize
19
+
20
+ Returns:
21
+ Dict suitable for JSON serialization
22
+ """
23
+ return {
24
+ "id": req.id,
25
+ "title": req.title,
26
+ "level": req.level,
27
+ "status": req.status,
28
+ "body": req.body,
29
+ "implements": req.implements,
30
+ "assertions": [serialize_assertion(a) for a in req.assertions],
31
+ "rationale": req.rationale,
32
+ "hash": req.hash,
33
+ "file_path": str(req.file_path) if req.file_path else None,
34
+ "line_number": req.line_number,
35
+ "subdir": req.subdir,
36
+ "type_code": req.type_code,
37
+ }
38
+
39
+
40
+ def serialize_requirement_summary(req: Requirement) -> Dict[str, Any]:
41
+ """
42
+ Serialize requirement summary (lighter weight, for listings).
43
+
44
+ Args:
45
+ req: Requirement to serialize
46
+
47
+ Returns:
48
+ Dict with summary fields only
49
+ """
50
+ return {
51
+ "id": req.id,
52
+ "title": req.title,
53
+ "level": req.level,
54
+ "status": req.status,
55
+ "implements": req.implements,
56
+ "assertion_count": len(req.assertions),
57
+ }
58
+
59
+
60
+ def serialize_assertion(assertion: Assertion) -> Dict[str, Any]:
61
+ """
62
+ Serialize an Assertion to a JSON-compatible dict.
63
+
64
+ Args:
65
+ assertion: Assertion to serialize
66
+
67
+ Returns:
68
+ Dict suitable for JSON serialization
69
+ """
70
+ return {
71
+ "label": assertion.label,
72
+ "text": assertion.text,
73
+ "is_placeholder": assertion.is_placeholder,
74
+ }
75
+
76
+
77
+ def serialize_violation(violation: RuleViolation) -> Dict[str, Any]:
78
+ """
79
+ Serialize a RuleViolation to a JSON-compatible dict.
80
+
81
+ Args:
82
+ violation: RuleViolation to serialize
83
+
84
+ Returns:
85
+ Dict suitable for JSON serialization
86
+ """
87
+ return {
88
+ "rule_name": violation.rule_name,
89
+ "requirement_id": violation.requirement_id,
90
+ "message": violation.message,
91
+ "severity": violation.severity.value,
92
+ "location": violation.location,
93
+ }
94
+
95
+
96
+ def serialize_content_rule(rule: ContentRule) -> Dict[str, Any]:
97
+ """
98
+ Serialize a ContentRule to a JSON-compatible dict.
99
+
100
+ Args:
101
+ rule: ContentRule to serialize
102
+
103
+ Returns:
104
+ Dict suitable for JSON serialization
105
+ """
106
+ return {
107
+ "file_path": str(rule.file_path),
108
+ "title": rule.title,
109
+ "content": rule.content,
110
+ "type": rule.type,
111
+ "applies_to": rule.applies_to,
112
+ }
elspais/mcp/server.py ADDED
@@ -0,0 +1,339 @@
1
+ """
2
+ elspais.mcp.server - MCP server implementation.
3
+
4
+ Creates and runs the MCP server exposing elspais functionality.
5
+ """
6
+
7
+ from pathlib import Path
8
+ from typing import Any, Dict, List, Optional
9
+
10
+ try:
11
+ from mcp.server.fastmcp import FastMCP
12
+ MCP_AVAILABLE = True
13
+ except ImportError:
14
+ MCP_AVAILABLE = False
15
+ FastMCP = None
16
+
17
+ from elspais.mcp.context import WorkspaceContext
18
+ from elspais.mcp.serializers import (
19
+ serialize_content_rule,
20
+ serialize_requirement,
21
+ serialize_requirement_summary,
22
+ serialize_violation,
23
+ )
24
+
25
+
26
+ def create_server(working_dir: Optional[Path] = None) -> "FastMCP":
27
+ """
28
+ Create and configure the MCP server.
29
+
30
+ Args:
31
+ working_dir: Working directory for finding .elspais.toml
32
+ Defaults to current working directory
33
+
34
+ Returns:
35
+ Configured FastMCP server instance
36
+
37
+ Raises:
38
+ ImportError: If MCP dependencies are not installed
39
+ """
40
+ if not MCP_AVAILABLE:
41
+ raise ImportError(
42
+ "MCP dependencies not installed. "
43
+ "Install with: pip install elspais[mcp]"
44
+ )
45
+
46
+ if working_dir is None:
47
+ working_dir = Path.cwd()
48
+
49
+ # Initialize workspace context
50
+ ctx = WorkspaceContext.from_directory(working_dir)
51
+
52
+ # Create FastMCP server
53
+ mcp = FastMCP(
54
+ name="elspais",
55
+ )
56
+
57
+ # Register resources
58
+ _register_resources(mcp, ctx)
59
+
60
+ # Register tools
61
+ _register_tools(mcp, ctx)
62
+
63
+ return mcp
64
+
65
+
66
+ def _register_resources(mcp: "FastMCP", ctx: WorkspaceContext) -> None:
67
+ """Register MCP resources."""
68
+
69
+ @mcp.resource("requirements://all")
70
+ def list_all_requirements() -> str:
71
+ """
72
+ Get list of all requirements in the workspace.
73
+
74
+ Returns summary information for each requirement including
75
+ ID, title, level, status, and assertion count.
76
+ """
77
+ import json
78
+ requirements = ctx.get_requirements()
79
+ return json.dumps({
80
+ "count": len(requirements),
81
+ "requirements": [
82
+ serialize_requirement_summary(req)
83
+ for req in requirements.values()
84
+ ]
85
+ }, indent=2)
86
+
87
+ @mcp.resource("requirements://{req_id}")
88
+ def get_requirement_resource(req_id: str) -> str:
89
+ """
90
+ Get detailed information about a specific requirement.
91
+
92
+ Returns full requirement data including body, assertions,
93
+ implements references, and location.
94
+ """
95
+ import json
96
+ req = ctx.get_requirement(req_id)
97
+ if req is None:
98
+ return json.dumps({"error": f"Requirement {req_id} not found"})
99
+ return json.dumps(serialize_requirement(req), indent=2)
100
+
101
+ @mcp.resource("requirements://level/{level}")
102
+ def get_requirements_by_level(level: str) -> str:
103
+ """Get all requirements of a specific level (PRD, OPS, DEV)."""
104
+ import json
105
+ requirements = ctx.get_requirements()
106
+ filtered = [
107
+ r for r in requirements.values()
108
+ if r.level.upper() == level.upper()
109
+ ]
110
+ return json.dumps({
111
+ "level": level,
112
+ "count": len(filtered),
113
+ "requirements": [serialize_requirement_summary(r) for r in filtered]
114
+ }, indent=2)
115
+
116
+ @mcp.resource("content-rules://list")
117
+ def list_content_rules() -> str:
118
+ """List all configured content rule files."""
119
+ import json
120
+ rules = ctx.get_content_rules()
121
+ return json.dumps({
122
+ "count": len(rules),
123
+ "rules": [
124
+ {
125
+ "file": str(r.file_path),
126
+ "title": r.title,
127
+ "type": r.type,
128
+ "applies_to": r.applies_to,
129
+ }
130
+ for r in rules
131
+ ]
132
+ }, indent=2)
133
+
134
+ @mcp.resource("content-rules://{filename}")
135
+ def get_content_rule(filename: str) -> str:
136
+ """
137
+ Get content of a content rule markdown file.
138
+
139
+ Content rules are documentation files that describe
140
+ requirement formats and authoring guidelines.
141
+ """
142
+ import json
143
+ rules = ctx.get_content_rules()
144
+ for rule in rules:
145
+ if rule.file_path.name == filename or str(rule.file_path).endswith(filename):
146
+ return json.dumps(serialize_content_rule(rule), indent=2)
147
+ return json.dumps({"error": f"Content rule not found: {filename}"})
148
+
149
+ @mcp.resource("config://current")
150
+ def get_current_config() -> str:
151
+ """Get the current elspais configuration."""
152
+ import json
153
+ return json.dumps(ctx.config, indent=2, default=str)
154
+
155
+
156
+ def _register_tools(mcp: "FastMCP", ctx: WorkspaceContext) -> None:
157
+ """Register MCP tools."""
158
+
159
+ @mcp.tool()
160
+ def validate(skip_rules: Optional[List[str]] = None) -> Dict[str, Any]:
161
+ """
162
+ Validate all requirements in the workspace.
163
+
164
+ Checks format, hierarchy relationships, hashes, and links.
165
+ Returns violations grouped by severity.
166
+
167
+ Args:
168
+ skip_rules: Optional list of rule names to skip
169
+ """
170
+ from elspais.core.rules import RuleEngine, RulesConfig, Severity
171
+
172
+ requirements = ctx.get_requirements(force_refresh=True)
173
+ rules_config = RulesConfig.from_dict(ctx.config.get("rules", {}))
174
+ engine = RuleEngine(rules_config)
175
+
176
+ violations = engine.validate(requirements)
177
+
178
+ # Filter by skip_rules
179
+ if skip_rules:
180
+ violations = [v for v in violations if v.rule_name not in skip_rules]
181
+
182
+ errors = [v for v in violations if v.severity == Severity.ERROR]
183
+ warnings = [v for v in violations if v.severity == Severity.WARNING]
184
+
185
+ return {
186
+ "valid": len(errors) == 0,
187
+ "errors": [serialize_violation(v) for v in errors],
188
+ "warnings": [serialize_violation(v) for v in warnings],
189
+ "summary": f"{len(errors)} errors, {len(warnings)} warnings in {len(requirements)} requirements"
190
+ }
191
+
192
+ @mcp.tool()
193
+ def parse_requirement(text: str, file_path: Optional[str] = None) -> Dict[str, Any]:
194
+ """
195
+ Parse requirement text and extract structured data.
196
+
197
+ Args:
198
+ text: Markdown text containing one or more requirements
199
+ file_path: Optional source file path for location info
200
+ """
201
+ from elspais.core.parser import RequirementParser
202
+ from elspais.core.patterns import PatternConfig
203
+
204
+ pattern_config = PatternConfig.from_dict(ctx.config.get("patterns", {}))
205
+ parser = RequirementParser(pattern_config)
206
+ path = Path(file_path) if file_path else None
207
+ requirements = parser.parse_text(text, file_path=path)
208
+
209
+ return {
210
+ "count": len(requirements),
211
+ "requirements": {
212
+ req_id: serialize_requirement(req)
213
+ for req_id, req in requirements.items()
214
+ }
215
+ }
216
+
217
+ @mcp.tool()
218
+ def search(
219
+ query: str,
220
+ field: str = "all",
221
+ regex: bool = False,
222
+ ) -> Dict[str, Any]:
223
+ """
224
+ Search requirements by pattern.
225
+
226
+ Args:
227
+ query: Search query string
228
+ field: Field to search - "all", "id", "title", "body", "assertions"
229
+ regex: If true, treat query as regex pattern
230
+ """
231
+ results = ctx.search_requirements(query, field, regex)
232
+ return {
233
+ "count": len(results),
234
+ "query": query,
235
+ "field": field,
236
+ "requirements": [serialize_requirement_summary(r) for r in results]
237
+ }
238
+
239
+ @mcp.tool()
240
+ def get_requirement(req_id: str) -> Dict[str, Any]:
241
+ """
242
+ Get complete details for a single requirement.
243
+
244
+ Args:
245
+ req_id: The requirement ID (e.g., "REQ-p00001")
246
+ """
247
+ req = ctx.get_requirement(req_id)
248
+ if req is None:
249
+ return {"error": f"Requirement {req_id} not found"}
250
+ return serialize_requirement(req)
251
+
252
+ @mcp.tool()
253
+ def analyze(analysis_type: str = "hierarchy") -> Dict[str, Any]:
254
+ """
255
+ Analyze requirement structure.
256
+
257
+ Args:
258
+ analysis_type: One of "hierarchy", "orphans", "coverage"
259
+ """
260
+ requirements = ctx.get_requirements()
261
+
262
+ if analysis_type == "hierarchy":
263
+ return _analyze_hierarchy(requirements)
264
+ elif analysis_type == "orphans":
265
+ return _analyze_orphans(requirements)
266
+ elif analysis_type == "coverage":
267
+ return _analyze_coverage(requirements)
268
+ else:
269
+ return {"error": f"Unknown analysis type: {analysis_type}"}
270
+
271
+
272
+ def _analyze_hierarchy(requirements: Dict[str, Any]) -> Dict[str, Any]:
273
+ """Analyze requirement hierarchy."""
274
+ # Build parent -> children mapping
275
+ children_map: Dict[str, List[str]] = {}
276
+ roots = []
277
+
278
+ for req in requirements.values():
279
+ if not req.implements:
280
+ roots.append(req.id)
281
+ else:
282
+ for parent_id in req.implements:
283
+ if parent_id not in children_map:
284
+ children_map[parent_id] = []
285
+ children_map[parent_id].append(req.id)
286
+
287
+ return {
288
+ "total": len(requirements),
289
+ "roots": roots,
290
+ "children_map": children_map,
291
+ }
292
+
293
+
294
+ def _analyze_orphans(requirements: Dict[str, Any]) -> Dict[str, Any]:
295
+ """Find orphaned requirements."""
296
+ all_ids = set(requirements.keys())
297
+ orphans = []
298
+
299
+ for req in requirements.values():
300
+ for parent_id in req.implements:
301
+ if parent_id not in all_ids:
302
+ orphans.append({
303
+ "id": req.id,
304
+ "missing_parent": parent_id,
305
+ })
306
+
307
+ return {
308
+ "count": len(orphans),
309
+ "orphans": orphans,
310
+ }
311
+
312
+
313
+ def _analyze_coverage(requirements: Dict[str, Any]) -> Dict[str, Any]:
314
+ """Analyze requirement coverage by level."""
315
+ levels: Dict[str, int] = {}
316
+
317
+ for req in requirements.values():
318
+ level = req.level.upper()
319
+ levels[level] = levels.get(level, 0) + 1
320
+
321
+ return {
322
+ "total": len(requirements),
323
+ "by_level": levels,
324
+ }
325
+
326
+
327
+ def run_server(
328
+ working_dir: Optional[Path] = None,
329
+ transport: str = "stdio",
330
+ ) -> None:
331
+ """
332
+ Run the MCP server.
333
+
334
+ Args:
335
+ working_dir: Working directory
336
+ transport: Transport type - "stdio", "sse", or "streamable-http"
337
+ """
338
+ mcp = create_server(working_dir)
339
+ mcp.run(transport=transport)
@@ -0,0 +1,27 @@
1
+ """
2
+ elspais.testing - Test mapping and coverage functionality.
3
+
4
+ This package provides test-to-requirement mapping and coverage analysis:
5
+ - TestingConfig: Configuration for test scanning
6
+ - TestScanner: Scans test files for requirement references
7
+ - ResultParser: Parses JUnit XML and pytest JSON results
8
+ - TestMapper: Orchestrates scanning and result mapping
9
+ """
10
+
11
+ from elspais.testing.config import TestingConfig
12
+ from elspais.testing.mapper import RequirementTestData, TestMapper, TestMappingResult
13
+ from elspais.testing.result_parser import ResultParser, TestResult, TestStatus
14
+ from elspais.testing.scanner import TestReference, TestScanResult, TestScanner
15
+
16
+ __all__ = [
17
+ "TestingConfig",
18
+ "TestScanner",
19
+ "TestScanResult",
20
+ "TestReference",
21
+ "ResultParser",
22
+ "TestResult",
23
+ "TestStatus",
24
+ "TestMapper",
25
+ "TestMappingResult",
26
+ "RequirementTestData",
27
+ ]
@@ -0,0 +1,48 @@
1
+ """
2
+ elspais.testing.config - Configuration for test mapping and coverage.
3
+
4
+ Provides TestingConfig dataclass for configuring test file scanning
5
+ and result file parsing.
6
+ """
7
+
8
+ from dataclasses import dataclass, field
9
+ from typing import Any, Dict, List
10
+
11
+
12
+ @dataclass
13
+ class TestingConfig:
14
+ """
15
+ Configuration for test mapping and coverage features.
16
+
17
+ Attributes:
18
+ enabled: Whether test mapping is enabled (default: False)
19
+ test_dirs: Glob patterns for test directories to scan
20
+ patterns: File patterns to match test files (e.g., "*_test.py")
21
+ result_files: Glob patterns for test result files (JUnit XML, pytest JSON)
22
+ reference_patterns: Regex patterns to extract requirement IDs from tests
23
+ """
24
+
25
+ enabled: bool = False
26
+ test_dirs: List[str] = field(default_factory=list)
27
+ patterns: List[str] = field(default_factory=list)
28
+ result_files: List[str] = field(default_factory=list)
29
+ reference_patterns: List[str] = field(default_factory=list)
30
+
31
+ @classmethod
32
+ def from_dict(cls, data: Dict[str, Any]) -> "TestingConfig":
33
+ """
34
+ Create TestingConfig from configuration dictionary.
35
+
36
+ Args:
37
+ data: Dictionary from [testing] config section
38
+
39
+ Returns:
40
+ TestingConfig instance with values from data or defaults
41
+ """
42
+ return cls(
43
+ enabled=data.get("enabled", False),
44
+ test_dirs=data.get("test_dirs", []),
45
+ patterns=data.get("patterns", []),
46
+ result_files=data.get("result_files", []),
47
+ reference_patterns=data.get("reference_patterns", []),
48
+ )