elspais 0.9.3__py3-none-any.whl → 0.11.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- elspais/cli.py +141 -10
- elspais/commands/hash_cmd.py +72 -26
- elspais/commands/reformat_cmd.py +458 -0
- elspais/commands/trace.py +157 -3
- elspais/commands/validate.py +44 -16
- elspais/core/models.py +2 -0
- elspais/core/parser.py +68 -24
- elspais/reformat/__init__.py +50 -0
- elspais/reformat/detector.py +119 -0
- elspais/reformat/hierarchy.py +246 -0
- elspais/reformat/line_breaks.py +220 -0
- elspais/reformat/prompts.py +123 -0
- elspais/reformat/transformer.py +264 -0
- elspais/sponsors/__init__.py +432 -0
- elspais/trace_view/__init__.py +54 -0
- elspais/trace_view/coverage.py +183 -0
- elspais/trace_view/generators/__init__.py +12 -0
- elspais/trace_view/generators/base.py +329 -0
- elspais/trace_view/generators/csv.py +122 -0
- elspais/trace_view/generators/markdown.py +175 -0
- elspais/trace_view/html/__init__.py +31 -0
- elspais/trace_view/html/generator.py +1006 -0
- elspais/trace_view/html/templates/base.html +283 -0
- elspais/trace_view/html/templates/components/code_viewer_modal.html +14 -0
- elspais/trace_view/html/templates/components/file_picker_modal.html +20 -0
- elspais/trace_view/html/templates/components/legend_modal.html +69 -0
- elspais/trace_view/html/templates/components/review_panel.html +118 -0
- elspais/trace_view/html/templates/partials/review/help/help-panel.json +244 -0
- elspais/trace_view/html/templates/partials/review/help/onboarding.json +77 -0
- elspais/trace_view/html/templates/partials/review/help/tooltips.json +237 -0
- elspais/trace_view/html/templates/partials/review/review-comments.js +928 -0
- elspais/trace_view/html/templates/partials/review/review-data.js +961 -0
- elspais/trace_view/html/templates/partials/review/review-help.js +679 -0
- elspais/trace_view/html/templates/partials/review/review-init.js +177 -0
- elspais/trace_view/html/templates/partials/review/review-line-numbers.js +429 -0
- elspais/trace_view/html/templates/partials/review/review-packages.js +1029 -0
- elspais/trace_view/html/templates/partials/review/review-position.js +540 -0
- elspais/trace_view/html/templates/partials/review/review-resize.js +115 -0
- elspais/trace_view/html/templates/partials/review/review-status.js +659 -0
- elspais/trace_view/html/templates/partials/review/review-sync.js +992 -0
- elspais/trace_view/html/templates/partials/review-styles.css +2238 -0
- elspais/trace_view/html/templates/partials/scripts.js +1741 -0
- elspais/trace_view/html/templates/partials/styles.css +1756 -0
- elspais/trace_view/models.py +353 -0
- elspais/trace_view/review/__init__.py +60 -0
- elspais/trace_view/review/branches.py +1149 -0
- elspais/trace_view/review/models.py +1205 -0
- elspais/trace_view/review/position.py +609 -0
- elspais/trace_view/review/server.py +1056 -0
- elspais/trace_view/review/status.py +470 -0
- elspais/trace_view/review/storage.py +1367 -0
- elspais/trace_view/scanning.py +213 -0
- elspais/trace_view/specs/README.md +84 -0
- elspais/trace_view/specs/tv-d00001-template-architecture.md +36 -0
- elspais/trace_view/specs/tv-d00002-css-extraction.md +37 -0
- elspais/trace_view/specs/tv-d00003-js-extraction.md +43 -0
- elspais/trace_view/specs/tv-d00004-build-embedding.md +40 -0
- elspais/trace_view/specs/tv-d00005-test-format.md +78 -0
- elspais/trace_view/specs/tv-d00010-review-data-models.md +33 -0
- elspais/trace_view/specs/tv-d00011-review-storage.md +33 -0
- elspais/trace_view/specs/tv-d00012-position-resolution.md +33 -0
- elspais/trace_view/specs/tv-d00013-git-branches.md +31 -0
- elspais/trace_view/specs/tv-d00014-review-api-server.md +31 -0
- elspais/trace_view/specs/tv-d00015-status-modifier.md +27 -0
- elspais/trace_view/specs/tv-d00016-js-integration.md +33 -0
- elspais/trace_view/specs/tv-p00001-html-generator.md +33 -0
- elspais/trace_view/specs/tv-p00002-review-system.md +29 -0
- {elspais-0.9.3.dist-info → elspais-0.11.1.dist-info}/METADATA +36 -18
- elspais-0.11.1.dist-info/RECORD +101 -0
- elspais-0.9.3.dist-info/RECORD +0 -40
- {elspais-0.9.3.dist-info → elspais-0.11.1.dist-info}/WHEEL +0 -0
- {elspais-0.9.3.dist-info → elspais-0.11.1.dist-info}/entry_points.txt +0 -0
- {elspais-0.9.3.dist-info → elspais-0.11.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,458 @@
|
|
|
1
|
+
# Implements: REQ-int-d00008 (Reformat Command)
|
|
2
|
+
"""
|
|
3
|
+
elspais.commands.reformat_cmd - Reformat requirements using AI.
|
|
4
|
+
|
|
5
|
+
Transforms requirements from old format (Acceptance Criteria) to new format
|
|
6
|
+
(labeled Assertions). Also provides line break normalization.
|
|
7
|
+
|
|
8
|
+
REQ-int-d00008-A: Format transformation SHALL be available via
|
|
9
|
+
`elspais reformat-with-claude`.
|
|
10
|
+
REQ-int-d00008-B: The command SHALL support --dry-run, --backup, --start-req flags.
|
|
11
|
+
REQ-int-d00008-C: Line break normalization SHALL be included.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import argparse
|
|
15
|
+
import shutil
|
|
16
|
+
import sys
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import List, Optional
|
|
19
|
+
|
|
20
|
+
from elspais.config.loader import load_config, find_config_file, get_spec_directories
|
|
21
|
+
from elspais.core.parser import RequirementParser
|
|
22
|
+
from elspais.core.patterns import PatternValidator, PatternConfig
|
|
23
|
+
from elspais.core.rules import RuleEngine, RulesConfig
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def run(args: argparse.Namespace) -> int:
|
|
27
|
+
"""Run the reformat-with-claude command.
|
|
28
|
+
|
|
29
|
+
This command reformats requirements from the old Acceptance Criteria format
|
|
30
|
+
to the new Assertions format using Claude AI.
|
|
31
|
+
"""
|
|
32
|
+
from elspais.reformat import (
|
|
33
|
+
get_all_requirements,
|
|
34
|
+
build_hierarchy,
|
|
35
|
+
traverse_top_down,
|
|
36
|
+
normalize_req_id,
|
|
37
|
+
reformat_requirement,
|
|
38
|
+
assemble_new_format,
|
|
39
|
+
validate_reformatted_content,
|
|
40
|
+
normalize_line_breaks,
|
|
41
|
+
fix_requirement_line_breaks,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
print("elspais reformat-with-claude")
|
|
45
|
+
print()
|
|
46
|
+
|
|
47
|
+
# Handle line-breaks-only mode
|
|
48
|
+
if args.line_breaks_only:
|
|
49
|
+
return run_line_breaks_only(args)
|
|
50
|
+
|
|
51
|
+
# Configuration
|
|
52
|
+
start_req = args.start_req
|
|
53
|
+
max_depth = args.depth
|
|
54
|
+
dry_run = args.dry_run
|
|
55
|
+
backup = args.backup
|
|
56
|
+
force = args.force
|
|
57
|
+
fix_line_breaks = args.fix_line_breaks
|
|
58
|
+
verbose = getattr(args, 'verbose', False)
|
|
59
|
+
mode = getattr(args, 'mode', 'combined')
|
|
60
|
+
|
|
61
|
+
print(f"Options:")
|
|
62
|
+
print(f" Start REQ: {start_req or 'All PRD requirements'}")
|
|
63
|
+
print(f" Max depth: {max_depth or 'Unlimited'}")
|
|
64
|
+
print(f" Mode: {mode}")
|
|
65
|
+
print(f" Dry run: {dry_run}")
|
|
66
|
+
print(f" Backup: {backup}")
|
|
67
|
+
print(f" Force reformat: {force}")
|
|
68
|
+
print(f" Fix line breaks: {fix_line_breaks}")
|
|
69
|
+
print()
|
|
70
|
+
|
|
71
|
+
if dry_run:
|
|
72
|
+
print("DRY RUN MODE - no changes will be made")
|
|
73
|
+
print()
|
|
74
|
+
|
|
75
|
+
# Create cached validator for ID normalization
|
|
76
|
+
config_path = find_config_file(Path.cwd())
|
|
77
|
+
config = load_config(config_path) if config_path else {}
|
|
78
|
+
pattern_config = PatternConfig.from_dict(config.get("patterns", {}))
|
|
79
|
+
validator = PatternValidator(pattern_config)
|
|
80
|
+
|
|
81
|
+
# Determine local base path for filtering (only modify local files)
|
|
82
|
+
local_base_path = config_path.parent if config_path else Path.cwd()
|
|
83
|
+
|
|
84
|
+
# Get all requirements (including cross-repo if mode allows)
|
|
85
|
+
print("Loading requirements...", end=" ", flush=True)
|
|
86
|
+
requirements = get_all_requirements(mode=mode)
|
|
87
|
+
if not requirements:
|
|
88
|
+
print("FAILED")
|
|
89
|
+
print("Error: Could not load requirements. Run 'elspais validate' first.",
|
|
90
|
+
file=sys.stderr)
|
|
91
|
+
return 1
|
|
92
|
+
print(f"found {len(requirements)} requirements")
|
|
93
|
+
|
|
94
|
+
# Build hierarchy
|
|
95
|
+
print("Building hierarchy...", end=" ", flush=True)
|
|
96
|
+
build_hierarchy(requirements)
|
|
97
|
+
print("done", flush=True)
|
|
98
|
+
|
|
99
|
+
# Determine which requirements to process
|
|
100
|
+
if start_req:
|
|
101
|
+
# Normalize and validate start requirement
|
|
102
|
+
print(f"Normalizing {start_req}...", end=" ", flush=True)
|
|
103
|
+
start_req = normalize_req_id(start_req, validator)
|
|
104
|
+
print(f"-> {start_req}", flush=True)
|
|
105
|
+
if start_req not in requirements:
|
|
106
|
+
print(f"Error: Requirement {start_req} not found", file=sys.stderr)
|
|
107
|
+
return 1
|
|
108
|
+
|
|
109
|
+
print(f"Traversing from {start_req}...", flush=True)
|
|
110
|
+
req_ids = traverse_top_down(requirements, start_req, max_depth)
|
|
111
|
+
print(f"Traversal complete", flush=True)
|
|
112
|
+
else:
|
|
113
|
+
# Process all PRD requirements first, then their descendants
|
|
114
|
+
prd_reqs = [
|
|
115
|
+
req_id for req_id, node in requirements.items()
|
|
116
|
+
if node.level.upper() == 'PRD'
|
|
117
|
+
]
|
|
118
|
+
prd_reqs.sort()
|
|
119
|
+
|
|
120
|
+
print(f"Processing {len(prd_reqs)} PRD requirements and their descendants...")
|
|
121
|
+
req_ids = []
|
|
122
|
+
seen = set()
|
|
123
|
+
for prd_id in prd_reqs:
|
|
124
|
+
for req_id in traverse_top_down(requirements, prd_id, max_depth):
|
|
125
|
+
if req_id not in seen:
|
|
126
|
+
req_ids.append(req_id)
|
|
127
|
+
seen.add(req_id)
|
|
128
|
+
|
|
129
|
+
print(f"Found {len(req_ids)} requirements to process", flush=True)
|
|
130
|
+
|
|
131
|
+
# Run validation to identify requirements with acceptance_criteria issues
|
|
132
|
+
print("Running validation to identify old format...", end=" ", flush=True)
|
|
133
|
+
needs_reformat_ids = get_requirements_needing_reformat(config, local_base_path)
|
|
134
|
+
print(f"found {len(needs_reformat_ids)} with old format", flush=True)
|
|
135
|
+
print(flush=True)
|
|
136
|
+
|
|
137
|
+
# Filter to only requirements that need reformatting (unless --force)
|
|
138
|
+
if not force:
|
|
139
|
+
req_ids = [r for r in req_ids if r in needs_reformat_ids]
|
|
140
|
+
print(f"Filtered to {len(req_ids)} requirements needing reformat")
|
|
141
|
+
print(flush=True)
|
|
142
|
+
|
|
143
|
+
# Process each requirement
|
|
144
|
+
reformatted = 0
|
|
145
|
+
skipped = 0
|
|
146
|
+
errors = 0
|
|
147
|
+
line_break_fixes = 0
|
|
148
|
+
|
|
149
|
+
for i, req_id in enumerate(req_ids):
|
|
150
|
+
if i % 10 == 0 and i > 0:
|
|
151
|
+
print(f"Processing {i}/{len(req_ids)}...", flush=True)
|
|
152
|
+
node = requirements[req_id]
|
|
153
|
+
|
|
154
|
+
# Skip non-local files (from core/associated repos)
|
|
155
|
+
if not is_local_file(node.file_path, local_base_path):
|
|
156
|
+
skipped += 1
|
|
157
|
+
continue
|
|
158
|
+
|
|
159
|
+
print(f"[PROC] {req_id}: {node.title[:50]}...")
|
|
160
|
+
|
|
161
|
+
# Call Claude to reformat
|
|
162
|
+
result, success, error_msg = reformat_requirement(node, verbose=verbose)
|
|
163
|
+
|
|
164
|
+
if not success:
|
|
165
|
+
print(f" ERROR: {error_msg}")
|
|
166
|
+
errors += 1
|
|
167
|
+
continue
|
|
168
|
+
|
|
169
|
+
# Validate the result
|
|
170
|
+
rationale = result.get('rationale', '')
|
|
171
|
+
assertions = result.get('assertions', [])
|
|
172
|
+
|
|
173
|
+
is_valid, warnings = validate_reformatted_content(node, rationale, assertions)
|
|
174
|
+
|
|
175
|
+
if warnings:
|
|
176
|
+
for warning in warnings:
|
|
177
|
+
print(f" WARNING: {warning}")
|
|
178
|
+
|
|
179
|
+
if not is_valid:
|
|
180
|
+
print(f" INVALID: Skipping due to validation errors")
|
|
181
|
+
errors += 1
|
|
182
|
+
continue
|
|
183
|
+
|
|
184
|
+
# Assemble the new format
|
|
185
|
+
new_content = assemble_new_format(
|
|
186
|
+
req_id=node.req_id,
|
|
187
|
+
title=node.title,
|
|
188
|
+
level=node.level,
|
|
189
|
+
status=node.status,
|
|
190
|
+
implements=node.implements,
|
|
191
|
+
rationale=rationale,
|
|
192
|
+
assertions=assertions
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
# Optionally normalize line breaks
|
|
196
|
+
if fix_line_breaks:
|
|
197
|
+
new_content = normalize_line_breaks(new_content)
|
|
198
|
+
line_break_fixes += 1
|
|
199
|
+
|
|
200
|
+
if dry_run:
|
|
201
|
+
print(f" Would write to: {node.file_path}")
|
|
202
|
+
print(f" Assertions: {len(assertions)}")
|
|
203
|
+
reformatted += 1
|
|
204
|
+
else:
|
|
205
|
+
# Write the reformatted content
|
|
206
|
+
try:
|
|
207
|
+
file_path = Path(node.file_path)
|
|
208
|
+
|
|
209
|
+
if backup:
|
|
210
|
+
backup_path = file_path.with_suffix(file_path.suffix + '.bak')
|
|
211
|
+
shutil.copy2(file_path, backup_path)
|
|
212
|
+
print(f" Backup: {backup_path}")
|
|
213
|
+
|
|
214
|
+
# Read the entire file
|
|
215
|
+
content = file_path.read_text()
|
|
216
|
+
|
|
217
|
+
# Find and replace this requirement's content
|
|
218
|
+
# The requirement starts with its header and ends before the next
|
|
219
|
+
# requirement or end of file
|
|
220
|
+
updated_content = replace_requirement_content(
|
|
221
|
+
content, node.req_id, node.title, new_content
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
if updated_content:
|
|
225
|
+
file_path.write_text(updated_content)
|
|
226
|
+
print(f" Written: {file_path}")
|
|
227
|
+
reformatted += 1
|
|
228
|
+
else:
|
|
229
|
+
print(f" ERROR: Could not locate requirement in file")
|
|
230
|
+
errors += 1
|
|
231
|
+
|
|
232
|
+
except Exception as e:
|
|
233
|
+
print(f" ERROR: {e}")
|
|
234
|
+
errors += 1
|
|
235
|
+
|
|
236
|
+
# Summary
|
|
237
|
+
print()
|
|
238
|
+
print("=" * 60)
|
|
239
|
+
print(f"Summary:")
|
|
240
|
+
print(f" Reformatted: {reformatted}")
|
|
241
|
+
print(f" Skipped: {skipped}")
|
|
242
|
+
print(f" Errors: {errors}")
|
|
243
|
+
if fix_line_breaks:
|
|
244
|
+
print(f" Line breaks: {line_break_fixes} files normalized")
|
|
245
|
+
|
|
246
|
+
return 0 if errors == 0 else 1
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def replace_requirement_content(
|
|
250
|
+
file_content: str,
|
|
251
|
+
req_id: str,
|
|
252
|
+
title: str,
|
|
253
|
+
new_content: str
|
|
254
|
+
) -> Optional[str]:
|
|
255
|
+
"""
|
|
256
|
+
Replace a requirement's content in a file.
|
|
257
|
+
|
|
258
|
+
Finds the requirement by its header pattern and replaces everything
|
|
259
|
+
up to the footer line.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
file_content: Full file content
|
|
263
|
+
req_id: Requirement ID (e.g., 'REQ-d00027')
|
|
264
|
+
title: Requirement title
|
|
265
|
+
new_content: New requirement content
|
|
266
|
+
|
|
267
|
+
Returns:
|
|
268
|
+
Updated file content, or None if requirement not found
|
|
269
|
+
"""
|
|
270
|
+
import re
|
|
271
|
+
|
|
272
|
+
# Pattern to match the requirement header
|
|
273
|
+
# # REQ-d00027: Title
|
|
274
|
+
header_pattern = rf'^# {re.escape(req_id)}:\s*'
|
|
275
|
+
|
|
276
|
+
# Pattern to match the footer
|
|
277
|
+
# *End* *Title* | **Hash**: xxxxxxxx
|
|
278
|
+
footer_pattern = rf'^\*End\*\s+\*{re.escape(title)}\*\s+\|\s+\*\*Hash\*\*:\s*[a-fA-F0-9]+'
|
|
279
|
+
|
|
280
|
+
lines = file_content.split('\n')
|
|
281
|
+
result_lines = []
|
|
282
|
+
in_requirement = False
|
|
283
|
+
found = False
|
|
284
|
+
|
|
285
|
+
i = 0
|
|
286
|
+
while i < len(lines):
|
|
287
|
+
line = lines[i]
|
|
288
|
+
|
|
289
|
+
if not in_requirement:
|
|
290
|
+
# Check if this line starts the requirement
|
|
291
|
+
if re.match(header_pattern, line, re.IGNORECASE):
|
|
292
|
+
in_requirement = True
|
|
293
|
+
found = True
|
|
294
|
+
# Insert new content (without trailing newline, we'll add it)
|
|
295
|
+
new_lines = new_content.rstrip('\n').split('\n')
|
|
296
|
+
result_lines.extend(new_lines)
|
|
297
|
+
i += 1
|
|
298
|
+
continue
|
|
299
|
+
else:
|
|
300
|
+
result_lines.append(line)
|
|
301
|
+
i += 1
|
|
302
|
+
else:
|
|
303
|
+
# We're inside the requirement, skip until we find the footer
|
|
304
|
+
if re.match(footer_pattern, line, re.IGNORECASE):
|
|
305
|
+
# Found the footer, we've already added the new content
|
|
306
|
+
# with its own footer, so skip this old footer
|
|
307
|
+
in_requirement = False
|
|
308
|
+
i += 1
|
|
309
|
+
# Skip any trailing blank lines after the footer
|
|
310
|
+
while i < len(lines) and lines[i].strip() == '':
|
|
311
|
+
i += 1
|
|
312
|
+
else:
|
|
313
|
+
# Skip this line (part of old requirement)
|
|
314
|
+
i += 1
|
|
315
|
+
|
|
316
|
+
if not found:
|
|
317
|
+
return None
|
|
318
|
+
|
|
319
|
+
return '\n'.join(result_lines)
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
def run_line_breaks_only(args: argparse.Namespace) -> int:
|
|
323
|
+
"""Run line break normalization only."""
|
|
324
|
+
from elspais.reformat import (
|
|
325
|
+
get_all_requirements,
|
|
326
|
+
normalize_line_breaks,
|
|
327
|
+
detect_line_break_issues,
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
dry_run = args.dry_run
|
|
331
|
+
backup = args.backup
|
|
332
|
+
|
|
333
|
+
print("Line break normalization mode")
|
|
334
|
+
print(f" Dry run: {dry_run}")
|
|
335
|
+
print(f" Backup: {backup}")
|
|
336
|
+
print()
|
|
337
|
+
|
|
338
|
+
# Get all requirements
|
|
339
|
+
print("Loading requirements...", end=" ", flush=True)
|
|
340
|
+
requirements = get_all_requirements()
|
|
341
|
+
if not requirements:
|
|
342
|
+
print("FAILED")
|
|
343
|
+
print("Error: Could not load requirements.", file=sys.stderr)
|
|
344
|
+
return 1
|
|
345
|
+
print(f"found {len(requirements)} requirements")
|
|
346
|
+
|
|
347
|
+
# Group by file
|
|
348
|
+
files_to_process = {}
|
|
349
|
+
for req_id, node in requirements.items():
|
|
350
|
+
if node.file_path not in files_to_process:
|
|
351
|
+
files_to_process[node.file_path] = []
|
|
352
|
+
files_to_process[node.file_path].append(req_id)
|
|
353
|
+
|
|
354
|
+
print(f"Processing {len(files_to_process)} files...")
|
|
355
|
+
print()
|
|
356
|
+
|
|
357
|
+
fixed = 0
|
|
358
|
+
unchanged = 0
|
|
359
|
+
errors = 0
|
|
360
|
+
|
|
361
|
+
for file_path_str, req_ids in sorted(files_to_process.items()):
|
|
362
|
+
file_path = Path(file_path_str)
|
|
363
|
+
|
|
364
|
+
try:
|
|
365
|
+
content = file_path.read_text()
|
|
366
|
+
issues = detect_line_break_issues(content)
|
|
367
|
+
|
|
368
|
+
if not issues:
|
|
369
|
+
unchanged += 1
|
|
370
|
+
continue
|
|
371
|
+
|
|
372
|
+
print(f"[FIX] {file_path}")
|
|
373
|
+
for issue in issues:
|
|
374
|
+
print(f" - {issue}")
|
|
375
|
+
|
|
376
|
+
if dry_run:
|
|
377
|
+
fixed += 1
|
|
378
|
+
continue
|
|
379
|
+
|
|
380
|
+
# Apply fixes
|
|
381
|
+
fixed_content = normalize_line_breaks(content)
|
|
382
|
+
|
|
383
|
+
if backup:
|
|
384
|
+
backup_path = file_path.with_suffix(file_path.suffix + '.bak')
|
|
385
|
+
shutil.copy2(file_path, backup_path)
|
|
386
|
+
|
|
387
|
+
file_path.write_text(fixed_content)
|
|
388
|
+
fixed += 1
|
|
389
|
+
|
|
390
|
+
except Exception as e:
|
|
391
|
+
print(f"[ERR] {file_path}: {e}")
|
|
392
|
+
errors += 1
|
|
393
|
+
|
|
394
|
+
print()
|
|
395
|
+
print("=" * 60)
|
|
396
|
+
print(f"Summary:")
|
|
397
|
+
print(f" Fixed: {fixed}")
|
|
398
|
+
print(f" Unchanged: {unchanged}")
|
|
399
|
+
print(f" Errors: {errors}")
|
|
400
|
+
|
|
401
|
+
return 0 if errors == 0 else 1
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
def get_requirements_needing_reformat(config: dict, base_path: Path) -> set:
|
|
405
|
+
"""Run validation to identify requirements with old format.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
config: Configuration dictionary
|
|
409
|
+
base_path: Base path of the local repository
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
Set of requirement IDs that have format.acceptance_criteria violations
|
|
413
|
+
"""
|
|
414
|
+
# Get local spec directories only
|
|
415
|
+
spec_dirs = get_spec_directories(None, config, base_path)
|
|
416
|
+
if not spec_dirs:
|
|
417
|
+
return set()
|
|
418
|
+
|
|
419
|
+
# Parse local requirements
|
|
420
|
+
pattern_config = PatternConfig.from_dict(config.get("patterns", {}))
|
|
421
|
+
spec_config = config.get("spec", {})
|
|
422
|
+
no_reference_values = spec_config.get("no_reference_values")
|
|
423
|
+
parser = RequirementParser(pattern_config, no_reference_values=no_reference_values)
|
|
424
|
+
skip_files = spec_config.get("skip_files", [])
|
|
425
|
+
|
|
426
|
+
try:
|
|
427
|
+
parse_result = parser.parse_directories(spec_dirs, skip_files=skip_files)
|
|
428
|
+
requirements = dict(parse_result)
|
|
429
|
+
except Exception:
|
|
430
|
+
return set()
|
|
431
|
+
|
|
432
|
+
# Run validation
|
|
433
|
+
rules_config = RulesConfig.from_dict(config.get("rules", {}))
|
|
434
|
+
engine = RuleEngine(rules_config)
|
|
435
|
+
violations = engine.validate(requirements)
|
|
436
|
+
|
|
437
|
+
# Filter to acceptance_criteria violations
|
|
438
|
+
return {
|
|
439
|
+
v.requirement_id for v in violations
|
|
440
|
+
if v.rule_name == "format.acceptance_criteria"
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
|
|
444
|
+
def is_local_file(file_path: str, base_path: Path) -> bool:
|
|
445
|
+
"""Check if file is in the local repo (not core/associated).
|
|
446
|
+
|
|
447
|
+
Args:
|
|
448
|
+
file_path: Path to the file (string)
|
|
449
|
+
base_path: Base path of the local repository
|
|
450
|
+
|
|
451
|
+
Returns:
|
|
452
|
+
True if file is within the local repo, False otherwise
|
|
453
|
+
"""
|
|
454
|
+
try:
|
|
455
|
+
Path(file_path).resolve().relative_to(base_path.resolve())
|
|
456
|
+
return True
|
|
457
|
+
except ValueError:
|
|
458
|
+
return False
|
elspais/commands/trace.py
CHANGED
|
@@ -1,5 +1,8 @@
|
|
|
1
|
+
# Implements: REQ-int-d00003 (CLI Extension)
|
|
1
2
|
"""
|
|
2
3
|
elspais.commands.trace - Generate traceability matrix command.
|
|
4
|
+
|
|
5
|
+
Supports both basic matrix generation and enhanced trace-view features.
|
|
3
6
|
"""
|
|
4
7
|
|
|
5
8
|
import argparse
|
|
@@ -15,7 +18,28 @@ from elspais.core.patterns import PatternConfig
|
|
|
15
18
|
|
|
16
19
|
|
|
17
20
|
def run(args: argparse.Namespace) -> int:
|
|
18
|
-
"""Run the trace command.
|
|
21
|
+
"""Run the trace command.
|
|
22
|
+
|
|
23
|
+
REQ-int-d00003-C: Existing elspais trace --format html behavior SHALL be preserved.
|
|
24
|
+
"""
|
|
25
|
+
# Check if enhanced trace-view features are requested
|
|
26
|
+
use_trace_view = (
|
|
27
|
+
getattr(args, 'view', False) or
|
|
28
|
+
getattr(args, 'embed_content', False) or
|
|
29
|
+
getattr(args, 'edit_mode', False) or
|
|
30
|
+
getattr(args, 'review_mode', False) or
|
|
31
|
+
getattr(args, 'server', False)
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
if use_trace_view:
|
|
35
|
+
return run_trace_view(args)
|
|
36
|
+
|
|
37
|
+
# Original basic trace functionality
|
|
38
|
+
return run_basic_trace(args)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def run_basic_trace(args: argparse.Namespace) -> int:
|
|
42
|
+
"""Run basic trace matrix generation (original behavior)."""
|
|
19
43
|
# Load configuration
|
|
20
44
|
config_path = args.config or find_config_file(Path.cwd())
|
|
21
45
|
if config_path and config_path.exists():
|
|
@@ -78,6 +102,138 @@ def run(args: argparse.Namespace) -> int:
|
|
|
78
102
|
return 0
|
|
79
103
|
|
|
80
104
|
|
|
105
|
+
def run_trace_view(args: argparse.Namespace) -> int:
|
|
106
|
+
"""Run enhanced trace-view features.
|
|
107
|
+
|
|
108
|
+
REQ-int-d00003-A: Trace-view features SHALL be accessible via elspais trace command.
|
|
109
|
+
REQ-int-d00003-B: New flags SHALL include: --view, --embed-content, --edit-mode,
|
|
110
|
+
--review-mode, --server.
|
|
111
|
+
"""
|
|
112
|
+
# Check if starting review server
|
|
113
|
+
if args.server:
|
|
114
|
+
return run_review_server(args)
|
|
115
|
+
|
|
116
|
+
# Import trace_view (requires jinja2)
|
|
117
|
+
try:
|
|
118
|
+
from elspais.trace_view import TraceViewGenerator
|
|
119
|
+
except ImportError as e:
|
|
120
|
+
print("Error: trace-view features require additional dependencies.", file=sys.stderr)
|
|
121
|
+
print("Install with: pip install elspais[trace-view]", file=sys.stderr)
|
|
122
|
+
if args.verbose if hasattr(args, 'verbose') else False:
|
|
123
|
+
print(f"Import error: {e}", file=sys.stderr)
|
|
124
|
+
return 1
|
|
125
|
+
|
|
126
|
+
# Load configuration
|
|
127
|
+
config_path = args.config or find_config_file(Path.cwd())
|
|
128
|
+
if config_path and config_path.exists():
|
|
129
|
+
config = load_config(config_path)
|
|
130
|
+
else:
|
|
131
|
+
config = DEFAULT_CONFIG
|
|
132
|
+
|
|
133
|
+
# Determine spec directory
|
|
134
|
+
spec_dir = args.spec_dir
|
|
135
|
+
if not spec_dir:
|
|
136
|
+
spec_dirs = get_spec_directories(None, config)
|
|
137
|
+
spec_dir = spec_dirs[0] if spec_dirs else Path.cwd() / "spec"
|
|
138
|
+
|
|
139
|
+
repo_root = spec_dir.parent if spec_dir.name == "spec" else spec_dir.parent.parent
|
|
140
|
+
|
|
141
|
+
# Get implementation directories from config
|
|
142
|
+
impl_dirs = []
|
|
143
|
+
dirs_config = config.get("directories", {})
|
|
144
|
+
code_dirs = dirs_config.get("code", [])
|
|
145
|
+
for code_dir in code_dirs:
|
|
146
|
+
impl_path = repo_root / code_dir
|
|
147
|
+
if impl_path.exists():
|
|
148
|
+
impl_dirs.append(impl_path)
|
|
149
|
+
|
|
150
|
+
# Create generator
|
|
151
|
+
generator = TraceViewGenerator(
|
|
152
|
+
spec_dir=spec_dir,
|
|
153
|
+
impl_dirs=impl_dirs,
|
|
154
|
+
sponsor=getattr(args, 'sponsor', None),
|
|
155
|
+
mode=getattr(args, 'mode', 'core'),
|
|
156
|
+
repo_root=repo_root,
|
|
157
|
+
config=config,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# Determine output format
|
|
161
|
+
# --view implies HTML
|
|
162
|
+
output_format = "html" if args.view else args.format
|
|
163
|
+
if output_format == "both":
|
|
164
|
+
output_format = "html"
|
|
165
|
+
|
|
166
|
+
# Determine output file
|
|
167
|
+
output_file = args.output
|
|
168
|
+
if output_file is None:
|
|
169
|
+
if output_format == "html":
|
|
170
|
+
output_file = Path("traceability_matrix.html")
|
|
171
|
+
elif output_format == "csv":
|
|
172
|
+
output_file = Path("traceability_matrix.csv")
|
|
173
|
+
else:
|
|
174
|
+
output_file = Path("traceability_matrix.md")
|
|
175
|
+
|
|
176
|
+
# Generate
|
|
177
|
+
quiet = getattr(args, 'quiet', False)
|
|
178
|
+
generator.generate(
|
|
179
|
+
format=output_format,
|
|
180
|
+
output_file=output_file,
|
|
181
|
+
embed_content=getattr(args, 'embed_content', False),
|
|
182
|
+
edit_mode=getattr(args, 'edit_mode', False),
|
|
183
|
+
review_mode=getattr(args, 'review_mode', False),
|
|
184
|
+
quiet=quiet,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
return 0
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def run_review_server(args: argparse.Namespace) -> int:
|
|
191
|
+
"""Start the review server.
|
|
192
|
+
|
|
193
|
+
REQ-int-d00002-C: Review server SHALL require flask, flask-cors via
|
|
194
|
+
elspais[trace-review] extra.
|
|
195
|
+
"""
|
|
196
|
+
try:
|
|
197
|
+
from elspais.trace_view.review import create_app, FLASK_AVAILABLE
|
|
198
|
+
except ImportError:
|
|
199
|
+
print("Error: Review server requires additional dependencies.", file=sys.stderr)
|
|
200
|
+
print("Install with: pip install elspais[trace-review]", file=sys.stderr)
|
|
201
|
+
return 1
|
|
202
|
+
|
|
203
|
+
if not FLASK_AVAILABLE:
|
|
204
|
+
print("Error: Review server requires Flask.", file=sys.stderr)
|
|
205
|
+
print("Install with: pip install elspais[trace-review]", file=sys.stderr)
|
|
206
|
+
return 1
|
|
207
|
+
|
|
208
|
+
# Determine repo root
|
|
209
|
+
spec_dir = args.spec_dir
|
|
210
|
+
if spec_dir:
|
|
211
|
+
repo_root = spec_dir.parent if spec_dir.name == "spec" else spec_dir.parent.parent
|
|
212
|
+
else:
|
|
213
|
+
repo_root = Path.cwd()
|
|
214
|
+
|
|
215
|
+
port = getattr(args, 'port', 8080)
|
|
216
|
+
|
|
217
|
+
print(f"""
|
|
218
|
+
======================================
|
|
219
|
+
elspais Review Server
|
|
220
|
+
======================================
|
|
221
|
+
|
|
222
|
+
Repository: {repo_root}
|
|
223
|
+
Server: http://localhost:{port}
|
|
224
|
+
|
|
225
|
+
Press Ctrl+C to stop
|
|
226
|
+
""")
|
|
227
|
+
|
|
228
|
+
app = create_app(repo_root, auto_sync=True)
|
|
229
|
+
try:
|
|
230
|
+
app.run(host='0.0.0.0', port=port, debug=False)
|
|
231
|
+
except KeyboardInterrupt:
|
|
232
|
+
print("\nServer stopped.")
|
|
233
|
+
|
|
234
|
+
return 0
|
|
235
|
+
|
|
236
|
+
|
|
81
237
|
def generate_markdown_matrix(requirements: Dict[str, Requirement]) -> str:
|
|
82
238
|
"""Generate Markdown traceability matrix."""
|
|
83
239
|
lines = ["# Traceability Matrix", "", "## Requirements Hierarchy", ""]
|
|
@@ -204,5 +360,3 @@ def find_implementers(req_id: str, requirements: Dict[str, Requirement]) -> List
|
|
|
204
360
|
break
|
|
205
361
|
|
|
206
362
|
return implementers
|
|
207
|
-
|
|
208
|
-
|