loki-mode 6.9.0 → 6.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,827 @@
1
+ #!/usr/bin/env python3
2
+ """OpenSpec Change Adapter for Loki Mode
3
+
4
+ Parses OpenSpec change directories (proposal.md, specs/, design.md, tasks.md)
5
+ and normalizes them into Loki Mode's internal format. Bridges OpenSpec's
6
+ delta-based specification workflow into the .loki/ pipeline.
7
+
8
+ Stdlib only - no pip dependencies required. Python 3.9+.
9
+
10
+ Usage:
11
+ python3 openspec-adapter.py <change-dir-path> [options]
12
+ --output-dir DIR Where to write output files (default: .loki/)
13
+ --json Output metadata as JSON to stdout
14
+ --validate Run artifact validation only
15
+ """
16
+
17
+ import argparse
18
+ import json
19
+ import os
20
+ import re
21
+ import sys
22
+ import tempfile
23
+ from pathlib import Path
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ # Maximum artifact file size (10 MB)
27
+ MAX_ARTIFACT_SIZE = 10 * 1024 * 1024
28
+
29
+
30
+ def _safe_read(path: Path) -> str:
31
+ """Read a file with size limit and encoding safety."""
32
+ size = path.stat().st_size
33
+ if size > MAX_ARTIFACT_SIZE:
34
+ raise ValueError(f"Artifact too large ({size} bytes, max {MAX_ARTIFACT_SIZE}): {path.name}")
35
+ return path.read_text(encoding="utf-8", errors="replace")
36
+
37
+
38
+ def _write_atomic(path: Path, content: str) -> None:
39
+ """Write content to file atomically using temp file + rename."""
40
+ path.parent.mkdir(parents=True, exist_ok=True)
41
+ fd, tmp_path = tempfile.mkstemp(dir=path.parent, suffix=".tmp")
42
+ try:
43
+ with os.fdopen(fd, "w", encoding="utf-8") as f:
44
+ f.write(content)
45
+ os.replace(tmp_path, str(path))
46
+ except Exception:
47
+ try:
48
+ os.unlink(tmp_path)
49
+ except OSError:
50
+ pass
51
+ raise
52
+
53
+
54
+ # -- Simple YAML Parsing (regex-based, no PyYAML) ----------------------------
55
+
56
+ def _parse_simple_yaml(text: str) -> Dict[str, Any]:
57
+ """Parse simple YAML key-value pairs via regex.
58
+
59
+ Handles scalars, quoted strings, and flow-style lists.
60
+ Does NOT handle nested mappings or block-style lists.
61
+ """
62
+ metadata: Dict[str, Any] = {}
63
+ for line in text.split("\n"):
64
+ line = line.strip()
65
+ if not line or line.startswith("#"):
66
+ continue
67
+ match = re.match(r"^(\w[\w-]*):\s*(.*)", line)
68
+ if not match:
69
+ continue
70
+ key = match.group(1)
71
+ value = match.group(2).strip()
72
+ # Flow-style list: [item1, item2]
73
+ if value.startswith("[") and value.endswith("]"):
74
+ items = value[1:-1].split(",")
75
+ metadata[key] = [_unquote(item.strip()) for item in items if item.strip()]
76
+ # Quoted string
77
+ elif (value.startswith("'") and value.endswith("'")) or \
78
+ (value.startswith('"') and value.endswith('"')):
79
+ metadata[key] = value[1:-1]
80
+ # Plain scalar
81
+ else:
82
+ metadata[key] = value
83
+ return metadata
84
+
85
+
86
+ def _unquote(s: str) -> str:
87
+ """Remove surrounding quotes from a string."""
88
+ if len(s) >= 2:
89
+ if (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'):
90
+ return s[1:-1]
91
+ return s
92
+
93
+
94
+ # -- Proposal Parsing --------------------------------------------------------
95
+
96
+ def parse_proposal(proposal_path: Path) -> Dict[str, Any]:
97
+ """Parse proposal.md into structured data.
98
+
99
+ Extracts sections: Why, What Changes, Capabilities (New/Modified), Impact.
100
+ """
101
+ text = _safe_read(proposal_path)
102
+ result: Dict[str, Any] = {
103
+ "title": "",
104
+ "why": "",
105
+ "what_changes": "",
106
+ "new_capabilities": [],
107
+ "modified_capabilities": [],
108
+ "impact": "",
109
+ }
110
+
111
+ # Extract title from H1 heading if present
112
+ title_match = re.match(r"^#\s+(.+)", text.strip())
113
+ if title_match:
114
+ result["title"] = title_match.group(1).strip()
115
+
116
+ # Extract sections by ## headings
117
+ sections = _split_sections(text, level=2)
118
+
119
+ for heading, body in sections.items():
120
+ heading_lower = heading.lower().strip()
121
+ if heading_lower == "why":
122
+ result["why"] = body.strip()
123
+ elif heading_lower == "what changes":
124
+ result["what_changes"] = body.strip()
125
+ elif heading_lower == "impact":
126
+ result["impact"] = body.strip()
127
+ elif heading_lower == "capabilities":
128
+ # Parse sub-sections for New/Modified
129
+ sub_sections = _split_sections(body, level=3)
130
+ for sub_heading, sub_body in sub_sections.items():
131
+ sub_lower = sub_heading.lower().strip()
132
+ caps = _extract_capabilities(sub_body)
133
+ if "new" in sub_lower:
134
+ result["new_capabilities"] = caps
135
+ elif "modified" in sub_lower:
136
+ result["modified_capabilities"] = caps
137
+
138
+ return result
139
+
140
+
141
+ def _split_sections(text: str, level: int = 2) -> Dict[str, str]:
142
+ """Split markdown text into sections by heading level.
143
+
144
+ Returns {heading_text: body_text} preserving order.
145
+ """
146
+ prefix = "#" * level
147
+ pattern = re.compile(rf"^{prefix}\s+(.+)$", re.MULTILINE)
148
+ matches = list(pattern.finditer(text))
149
+ sections: Dict[str, str] = {}
150
+ for i, m in enumerate(matches):
151
+ heading = m.group(1).strip()
152
+ start = m.end()
153
+ end = matches[i + 1].start() if i + 1 < len(matches) else len(text)
154
+ sections[heading] = text[start:end].strip()
155
+ return sections
156
+
157
+
158
+ def _extract_capabilities(text: str) -> List[Dict[str, str]]:
159
+ """Extract capability names and descriptions from bullet items.
160
+
161
+ Matches patterns like:
162
+ - `name`: description
163
+ - **name:** description
164
+ """
165
+ capabilities: List[Dict[str, str]] = []
166
+ # Pattern: - `name`: description
167
+ for m in re.finditer(r"^-\s+`([^`]+)`:\s*(.+)", text, re.MULTILINE):
168
+ capabilities.append({"name": m.group(1).strip(), "description": m.group(2).strip()})
169
+ if capabilities:
170
+ return capabilities
171
+ # Fallback: - **name:** description
172
+ for m in re.finditer(r"^-\s+\*\*([^*]+?):\*\*\s*(.+)", text, re.MULTILINE):
173
+ capabilities.append({"name": m.group(1).strip(), "description": m.group(2).strip()})
174
+ return capabilities
175
+
176
+
177
+ # -- Delta Spec Parsing -------------------------------------------------------
178
+
179
+ def parse_delta_spec(spec_path: Path) -> Dict[str, Any]:
180
+ """Parse a delta spec.md file.
181
+
182
+ Extracts ADDED, MODIFIED, and REMOVED requirements with their scenarios.
183
+ """
184
+ text = _safe_read(spec_path)
185
+ result: Dict[str, List[Dict[str, Any]]] = {
186
+ "added": [],
187
+ "modified": [],
188
+ "removed": [],
189
+ }
190
+
191
+ sections = _split_sections(text, level=2)
192
+
193
+ for heading, body in sections.items():
194
+ heading_lower = heading.lower().strip()
195
+ if "added" in heading_lower:
196
+ result["added"] = _parse_requirements(body, category="added")
197
+ elif "modified" in heading_lower:
198
+ result["modified"] = _parse_requirements(body, category="modified")
199
+ elif "removed" in heading_lower:
200
+ result["removed"] = _parse_requirements(body, category="removed")
201
+
202
+ return result
203
+
204
+
205
+ def _parse_requirements(text: str, category: str = "added") -> List[Dict[str, Any]]:
206
+ """Parse requirements from a delta section.
207
+
208
+ Each requirement: ### Requirement: <name>
209
+ With optional scenarios: #### Scenario: <name>
210
+ """
211
+ requirements: List[Dict[str, Any]] = []
212
+ # Split by ### Requirement: headings
213
+ req_pattern = re.compile(r"^###\s+Requirement:\s*(.+)$", re.MULTILINE)
214
+ req_matches = list(req_pattern.finditer(text))
215
+
216
+ for i, m in enumerate(req_matches):
217
+ name = m.group(1).strip()
218
+ start = m.end()
219
+ end = req_matches[i + 1].start() if i + 1 < len(req_matches) else len(text)
220
+ req_body = text[start:end].strip()
221
+
222
+ req: Dict[str, Any] = {
223
+ "name": name,
224
+ "text": "",
225
+ "scenarios": [],
226
+ }
227
+
228
+ # Extract previously annotation for modified requirements
229
+ if category == "modified":
230
+ # Try parenthesized format first: (Previously: ...)
231
+ prev_match = re.search(r"\(Previously:\s*(.+?)\)", req_body)
232
+ if not prev_match:
233
+ # Try inline format: Previously ... (sentence boundary)
234
+ prev_match = re.search(r"Previously\s+(.+?)(?:\.\s|\.$|\n|$)", req_body)
235
+ if prev_match:
236
+ req["previously"] = prev_match.group(1).strip().rstrip(".")
237
+
238
+ # Extract deprecated/removed reason annotation
239
+ if category == "removed":
240
+ # Try parenthesized format first: (Deprecated: ...)
241
+ dep_match = re.search(r"\(Deprecated(?::\s*(.+?))?\)", req_body)
242
+ if dep_match:
243
+ reason = dep_match.group(1)
244
+ req["reason"] = reason.strip() if reason else ""
245
+ else:
246
+ # Try inline narrative: extract first sentence as reason
247
+ # Look for patterns like "is removed", "was deprecated", etc.
248
+ narrative = re.search(
249
+ r"(?:removed|deprecated|no longer|eliminated)[.\s]+(.+?)(?:\.\s|\.$|\n\n|$)",
250
+ req_body, re.IGNORECASE
251
+ )
252
+ if narrative:
253
+ req["reason"] = narrative.group(1).strip().rstrip(".")
254
+ elif req_body.strip():
255
+ # Use first sentence of body as reason
256
+ first_sentence = req_body.strip().split(".")[0]
257
+ req["reason"] = first_sentence.strip()
258
+
259
+ # Split into pre-scenario text and scenarios
260
+ scenario_pattern = re.compile(r"^####\s+Scenario:\s*(.+)$", re.MULTILINE)
261
+ scenario_matches = list(scenario_pattern.finditer(req_body))
262
+
263
+ if scenario_matches:
264
+ # Text before first scenario
265
+ req["text"] = req_body[:scenario_matches[0].start()].strip()
266
+ # Parse each scenario
267
+ for j, sm in enumerate(scenario_matches):
268
+ sc_name = sm.group(1).strip()
269
+ sc_start = sm.end()
270
+ sc_end = scenario_matches[j + 1].start() if j + 1 < len(scenario_matches) else len(req_body)
271
+ sc_body = req_body[sc_start:sc_end].strip()
272
+ scenario = _parse_scenario(sc_name, sc_body)
273
+ req["scenarios"].append(scenario)
274
+ else:
275
+ # No scenarios -- entire body is the requirement text
276
+ req["text"] = req_body
277
+
278
+ requirements.append(req)
279
+
280
+ return requirements
281
+
282
+
283
+ def _parse_scenario(name: str, body: str) -> Dict[str, Any]:
284
+ """Parse a scenario body for GIVEN/WHEN/THEN lines.
285
+
286
+ Handles two formats:
287
+ - GIVEN ..., - WHEN ..., - THEN ... (list items)
288
+ - **GIVEN** ..., **WHEN** ..., **THEN** ... (bold keywords)
289
+ Also handles AND lines appended to the previous step.
290
+ """
291
+ scenario: Dict[str, Any] = {
292
+ "name": name,
293
+ "given": [],
294
+ "when": [],
295
+ "then": [],
296
+ }
297
+
298
+ for line in body.split("\n"):
299
+ stripped = line.strip()
300
+ if not stripped:
301
+ continue
302
+
303
+ # Format 1: - **KEYWORD** text or - KEYWORD text
304
+ m = re.match(
305
+ r"^-\s+(?:\*\*)?(?:GIVEN|Given)(?:\*\*)?\s+(.+)",
306
+ stripped,
307
+ )
308
+ if m:
309
+ scenario["given"].append(m.group(1).strip())
310
+ continue
311
+
312
+ m = re.match(
313
+ r"^-\s+(?:\*\*)?(?:WHEN|When)(?:\*\*)?\s+(.+)",
314
+ stripped,
315
+ )
316
+ if m:
317
+ scenario["when"].append(m.group(1).strip())
318
+ continue
319
+
320
+ m = re.match(
321
+ r"^-\s+(?:\*\*)?(?:THEN|Then)(?:\*\*)?\s+(.+)",
322
+ stripped,
323
+ )
324
+ if m:
325
+ scenario["then"].append(m.group(1).strip())
326
+ continue
327
+
328
+ m = re.match(
329
+ r"^-\s+(?:\*\*)?(?:AND|And)(?:\*\*)?\s+(.+)",
330
+ stripped,
331
+ )
332
+ if m:
333
+ # Append AND to the last non-empty list (then > when > given)
334
+ and_text = m.group(1).strip()
335
+ if scenario["then"]:
336
+ scenario["then"].append(and_text)
337
+ elif scenario["when"]:
338
+ scenario["when"].append(and_text)
339
+ elif scenario["given"]:
340
+ scenario["given"].append(and_text)
341
+ continue
342
+
343
+ return scenario
344
+
345
+
346
+ # -- Tasks Parsing ------------------------------------------------------------
347
+
348
+ def parse_tasks(tasks_path: Path) -> Tuple[List[Dict[str, Any]], Dict[str, Dict[str, Any]]]:
349
+ """Parse tasks.md into structured task list and source map.
350
+
351
+ Returns:
352
+ (tasks_list, source_map)
353
+ tasks_list: list of task objects
354
+ source_map: {task_id: {file, line, group}}
355
+ """
356
+ text = _safe_read(tasks_path)
357
+ tasks: List[Dict[str, Any]] = []
358
+ source_map: Dict[str, Dict[str, Any]] = {}
359
+ current_group = ""
360
+
361
+ for line_num, line in enumerate(text.split("\n"), start=1):
362
+ stripped = line.strip()
363
+
364
+ # Group heading: ## N. Group Name
365
+ group_match = re.match(r"^##\s+(\d+)\.\s+(.+)", stripped)
366
+ if group_match:
367
+ current_group = group_match.group(2).strip()
368
+ continue
369
+
370
+ # Task item: - [ ] N.M description or - [x] N.M description
371
+ task_match = re.match(r"^-\s+\[([ xX])\]\s+(\d+\.\d+)\s+(.*)", stripped)
372
+ if task_match:
373
+ checked = task_match.group(1).lower() == "x"
374
+ task_id_num = task_match.group(2)
375
+ description = task_match.group(3).strip()
376
+ task_id = f"openspec-{task_id_num}"
377
+
378
+ task = {
379
+ "id": task_id,
380
+ "title": description,
381
+ "group": current_group,
382
+ "status": "completed" if checked else "pending",
383
+ "source": "tasks.md",
384
+ "priority": "medium",
385
+ }
386
+ tasks.append(task)
387
+ source_map[task_id] = {
388
+ "file": "tasks.md",
389
+ "line": line_num,
390
+ "group": current_group,
391
+ }
392
+
393
+ return tasks, source_map
394
+
395
+
396
+ # -- Design Parsing -----------------------------------------------------------
397
+
398
+ def parse_design(design_path: Path) -> Dict[str, str]:
399
+ """Parse design.md into structured sections.
400
+
401
+ Extracts: Context, Goals/Non-Goals, Decisions, Risks/Trade-offs.
402
+ """
403
+ text = _safe_read(design_path)
404
+ result: Dict[str, str] = {}
405
+
406
+ sections = _split_sections(text, level=2)
407
+ for heading, body in sections.items():
408
+ heading_lower = heading.lower().strip()
409
+ if "context" in heading_lower:
410
+ result["context"] = body.strip()
411
+ elif "goal" in heading_lower:
412
+ result["goals"] = body.strip()
413
+ elif "decision" in heading_lower:
414
+ result["decisions"] = body.strip()
415
+ elif "risk" in heading_lower or "trade" in heading_lower:
416
+ result["risks"] = body.strip()
417
+ else:
418
+ # Preserve any other sections
419
+ result[heading_lower.replace(" ", "_").replace("/", "_")] = body.strip()
420
+
421
+ return result
422
+
423
+
424
+ # -- Metadata Parsing ---------------------------------------------------------
425
+
426
+ def parse_metadata(yaml_path: Path) -> Dict[str, Any]:
427
+ """Parse .openspec.yaml for change metadata."""
428
+ text = _safe_read(yaml_path)
429
+ return _parse_simple_yaml(text)
430
+
431
+
432
+ # -- Complexity Classification ------------------------------------------------
433
+
434
+ def classify_complexity(
435
+ num_tasks: int,
436
+ num_spec_files: int,
437
+ has_design: bool,
438
+ ) -> str:
439
+ """Classify change complexity from OpenSpec signals.
440
+
441
+ Rules:
442
+ - 1-3 tasks, 1 spec file, no design.md -> simple
443
+ - 4-10 tasks, 2-5 spec files, design.md present -> standard
444
+ - 11-20 tasks, 5-10 spec files -> complex
445
+ - 20+ tasks or 10+ spec files -> enterprise
446
+ """
447
+ if num_tasks > 20 or num_spec_files > 10:
448
+ return "enterprise"
449
+ if num_tasks > 10 or num_spec_files > 5:
450
+ return "complex"
451
+ if num_tasks > 3 or num_spec_files > 1 or has_design:
452
+ return "standard"
453
+ return "simple"
454
+
455
+
456
+ # -- Validation ---------------------------------------------------------------
457
+
458
+ def validate_change(change_dir: Path) -> Tuple[List[str], List[str]]:
459
+ """Validate an OpenSpec change directory.
460
+
461
+ Returns (errors, warnings).
462
+ """
463
+ errors: List[str] = []
464
+ warnings: List[str] = []
465
+
466
+ # proposal.md must exist and have content
467
+ proposal_path = change_dir / "proposal.md"
468
+ if not proposal_path.exists():
469
+ errors.append("proposal.md not found")
470
+ elif proposal_path.stat().st_size == 0:
471
+ errors.append("proposal.md is empty")
472
+ else:
473
+ text = _safe_read(proposal_path)
474
+ # Check it has at least one non-comment, non-empty line
475
+ content_lines = [
476
+ l for l in text.split("\n")
477
+ if l.strip() and not l.strip().startswith("<!--")
478
+ ]
479
+ if len(content_lines) < 2:
480
+ warnings.append("proposal.md has very little content")
481
+
482
+ # specs/ directory must exist with at least one spec.md
483
+ specs_dir = change_dir / "specs"
484
+ if not specs_dir.is_dir():
485
+ errors.append("specs/ directory not found")
486
+ else:
487
+ spec_files = list(specs_dir.rglob("spec.md"))
488
+ if not spec_files:
489
+ errors.append("No spec.md files found under specs/")
490
+ else:
491
+ # Each spec.md should have at least one delta section
492
+ for sf in spec_files:
493
+ text = _safe_read(sf)
494
+ has_delta = any(
495
+ re.search(rf"##\s+{keyword}\s+Requirements", text, re.IGNORECASE)
496
+ for keyword in ("ADDED", "MODIFIED", "REMOVED")
497
+ )
498
+ if not has_delta:
499
+ domain = sf.parent.name
500
+ warnings.append(
501
+ f"specs/{domain}/spec.md has no ADDED/MODIFIED/REMOVED sections"
502
+ )
503
+
504
+ # tasks.md should exist (warn if missing)
505
+ tasks_path = change_dir / "tasks.md"
506
+ if not tasks_path.exists():
507
+ warnings.append("tasks.md not found (no implementation checklist)")
508
+
509
+ return errors, warnings
510
+
511
+
512
+ # -- Output Generation --------------------------------------------------------
513
+
514
+ def build_normalized_prd(
515
+ change_name: str,
516
+ proposal: Dict[str, Any],
517
+ all_deltas: Dict[str, Dict[str, Any]],
518
+ design: Optional[Dict[str, str]],
519
+ ) -> str:
520
+ """Build the synthesized PRD markdown from proposal + specs + design."""
521
+ lines: List[str] = []
522
+ lines.append(f"# OpenSpec Change: {change_name}")
523
+ lines.append("")
524
+
525
+ # Motivation
526
+ lines.append("## Motivation")
527
+ lines.append("")
528
+ if proposal.get("why"):
529
+ lines.append(proposal["why"])
530
+ else:
531
+ lines.append("(No motivation provided)")
532
+ lines.append("")
533
+
534
+ # Scope
535
+ lines.append("## Scope")
536
+ lines.append("")
537
+ if proposal.get("what_changes"):
538
+ lines.append(proposal["what_changes"])
539
+ else:
540
+ lines.append("(No scope provided)")
541
+ lines.append("")
542
+
543
+ # Requirements from all delta specs
544
+ lines.append("## Requirements")
545
+ lines.append("")
546
+ for domain, deltas in sorted(all_deltas.items()):
547
+ for category in ("added", "modified", "removed"):
548
+ for req in deltas.get(category, []):
549
+ tag = category.upper()
550
+ lines.append(f"### {domain}: {req['name']} [{tag}]")
551
+ lines.append("")
552
+ if req.get("text"):
553
+ lines.append(req["text"])
554
+ lines.append("")
555
+ if category == "modified" and req.get("previously"):
556
+ lines.append(f"(Previously: {req['previously']})")
557
+ lines.append("")
558
+ if category == "removed" and req.get("reason"):
559
+ lines.append(f"(Deprecated: {req['reason']})")
560
+ lines.append("")
561
+ for sc in req.get("scenarios", []):
562
+ lines.append(f"- Scenario: {sc['name']}")
563
+ for g in sc.get("given", []):
564
+ lines.append(f" - GIVEN {g}")
565
+ for w in sc.get("when", []):
566
+ lines.append(f" - WHEN {w}")
567
+ for t in sc.get("then", []):
568
+ lines.append(f" - THEN {t}")
569
+ lines.append("")
570
+
571
+ # Technical Design
572
+ if design:
573
+ lines.append("## Technical Design")
574
+ lines.append("")
575
+ for section_name, section_body in design.items():
576
+ lines.append(f"### {section_name.replace('_', ' ').title()}")
577
+ lines.append("")
578
+ lines.append(section_body)
579
+ lines.append("")
580
+
581
+ return "\n".join(lines)
582
+
583
+
584
+ def build_delta_context(
585
+ change_name: str,
586
+ all_deltas: Dict[str, Dict[str, Any]],
587
+ complexity: str,
588
+ ) -> Dict[str, Any]:
589
+ """Build the delta-context.json structure."""
590
+ total = 0
591
+ added = 0
592
+ modified = 0
593
+ removed = 0
594
+
595
+ for deltas in all_deltas.values():
596
+ a = len(deltas.get("added", []))
597
+ m = len(deltas.get("modified", []))
598
+ r = len(deltas.get("removed", []))
599
+ added += a
600
+ modified += m
601
+ removed += r
602
+ total += a + m + r
603
+
604
+ return {
605
+ "change_name": change_name,
606
+ "deltas": all_deltas,
607
+ "complexity": complexity,
608
+ "stats": {
609
+ "total_requirements": total,
610
+ "added": added,
611
+ "modified": modified,
612
+ "removed": removed,
613
+ },
614
+ }
615
+
616
+
617
+ def build_verification_map(all_deltas: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
618
+ """Build verification-map.json from scenarios across all deltas."""
619
+ scenarios: List[Dict[str, Any]] = []
620
+
621
+ for domain, deltas in sorted(all_deltas.items()):
622
+ for category in ("added", "modified"):
623
+ for req in deltas.get(category, []):
624
+ for sc in req.get("scenarios", []):
625
+ scenarios.append({
626
+ "domain": domain,
627
+ "requirement": req["name"],
628
+ "scenario": sc["name"],
629
+ "given": " ".join(sc.get("given", [])),
630
+ "when": " ".join(sc.get("when", [])),
631
+ "then": " ".join(sc.get("then", [])),
632
+ "verified": False,
633
+ })
634
+
635
+ return {"scenarios": scenarios}
636
+
637
+
638
+ # -- Main Orchestration -------------------------------------------------------
639
+
640
+ def run(
641
+ change_dir_path: str,
642
+ output_dir: str = ".loki",
643
+ as_json: bool = False,
644
+ validate_only: bool = False,
645
+ ) -> int:
646
+ """Main entry point. Returns exit code (0 = success, 1 = errors)."""
647
+
648
+ change_dir = Path(change_dir_path).resolve()
649
+ if not change_dir.is_dir():
650
+ print(f"ERROR: Not a directory: {change_dir}", file=sys.stderr)
651
+ return 1
652
+
653
+ change_name = change_dir.name
654
+
655
+ # -- Validation mode --
656
+ if validate_only:
657
+ errors, warnings = validate_change(change_dir)
658
+ for err in errors:
659
+ print(f"ERROR: {err}", file=sys.stderr)
660
+ for warn in warnings:
661
+ print(f"WARNING: {warn}", file=sys.stderr)
662
+ if not errors and not warnings:
663
+ print(f"OpenSpec validation: {change_name} -- OK")
664
+ elif not errors:
665
+ print(f"OpenSpec validation: {change_name} -- OK with {len(warnings)} warning(s)")
666
+ else:
667
+ print(f"OpenSpec validation: {change_name} -- FAILED ({len(errors)} error(s), {len(warnings)} warning(s))")
668
+ return 1 if errors else 0
669
+
670
+ # -- Parse proposal.md (required) --
671
+ proposal_path = change_dir / "proposal.md"
672
+ if not proposal_path.exists():
673
+ print("ERROR: proposal.md not found", file=sys.stderr)
674
+ return 1
675
+
676
+ proposal = parse_proposal(proposal_path)
677
+
678
+ # -- Parse delta specs --
679
+ specs_dir = change_dir / "specs"
680
+ all_deltas: Dict[str, Dict[str, Any]] = {}
681
+ num_spec_files = 0
682
+
683
+ if specs_dir.is_dir():
684
+ for spec_file in sorted(specs_dir.rglob("spec.md")):
685
+ domain = spec_file.parent.name
686
+ deltas = parse_delta_spec(spec_file)
687
+ all_deltas[domain] = deltas
688
+ num_spec_files += 1
689
+
690
+ if not all_deltas:
691
+ print("ERROR: No spec files found under specs/", file=sys.stderr)
692
+ return 1
693
+
694
+ # -- Parse tasks.md (optional) --
695
+ tasks_list: List[Dict[str, Any]] = []
696
+ source_map: Dict[str, Dict[str, Any]] = {}
697
+ tasks_path = change_dir / "tasks.md"
698
+ if tasks_path.exists():
699
+ tasks_list, source_map = parse_tasks(tasks_path)
700
+
701
+ # -- Parse design.md (optional) --
702
+ design_data: Optional[Dict[str, str]] = None
703
+ design_path = change_dir / "design.md"
704
+ has_design = design_path.exists()
705
+ if has_design:
706
+ design_data = parse_design(design_path)
707
+
708
+ # -- Parse .openspec.yaml (optional) --
709
+ yaml_metadata: Dict[str, Any] = {}
710
+ yaml_path = change_dir / ".openspec.yaml"
711
+ if yaml_path.exists():
712
+ yaml_metadata = parse_metadata(yaml_path)
713
+
714
+ # -- Classify complexity --
715
+ complexity = classify_complexity(
716
+ num_tasks=len(tasks_list),
717
+ num_spec_files=num_spec_files,
718
+ has_design=has_design,
719
+ )
720
+
721
+ # -- Build outputs --
722
+ normalized_prd = build_normalized_prd(change_name, proposal, all_deltas, design_data)
723
+ delta_context = build_delta_context(change_name, all_deltas, complexity)
724
+ verification_map = build_verification_map(all_deltas)
725
+
726
+ # -- JSON mode: output to stdout --
727
+ if as_json:
728
+ output = {
729
+ "change_name": change_name,
730
+ "complexity": complexity,
731
+ "proposal": proposal,
732
+ "deltas": all_deltas,
733
+ "tasks": tasks_list,
734
+ "metadata": yaml_metadata,
735
+ "stats": delta_context["stats"],
736
+ }
737
+ print(json.dumps(output, indent=2))
738
+ return 0
739
+
740
+ # -- Write output files --
741
+ if Path(output_dir).is_absolute():
742
+ abs_output_dir = Path(output_dir)
743
+ else:
744
+ abs_output_dir = (Path.cwd() / output_dir).resolve()
745
+
746
+ written: List[str] = []
747
+
748
+ # .loki/openspec-prd-normalized.md
749
+ prd_out = abs_output_dir / "openspec-prd-normalized.md"
750
+ _write_atomic(prd_out, normalized_prd)
751
+ written.append(str(prd_out))
752
+
753
+ # .loki/openspec-tasks.json
754
+ tasks_out = abs_output_dir / "openspec-tasks.json"
755
+ _write_atomic(tasks_out, json.dumps(tasks_list, indent=2))
756
+ written.append(str(tasks_out))
757
+
758
+ # .loki/openspec/delta-context.json
759
+ delta_out = abs_output_dir / "openspec" / "delta-context.json"
760
+ _write_atomic(delta_out, json.dumps(delta_context, indent=2))
761
+ written.append(str(delta_out))
762
+
763
+ # .loki/openspec/source-map.json
764
+ srcmap_out = abs_output_dir / "openspec" / "source-map.json"
765
+ _write_atomic(srcmap_out, json.dumps(source_map, indent=2))
766
+ written.append(str(srcmap_out))
767
+
768
+ # .loki/openspec/verification-map.json
769
+ verif_out = abs_output_dir / "openspec" / "verification-map.json"
770
+ _write_atomic(verif_out, json.dumps(verification_map, indent=2))
771
+ written.append(str(verif_out))
772
+
773
+ # -- CLI summary --
774
+ print(f"OpenSpec adapter: change={change_name} tasks={len(tasks_list)} specs={num_spec_files} complexity={complexity}")
775
+ print(f" Output files written to {abs_output_dir}/:")
776
+ for path in written:
777
+ print(f" - {Path(path).name}")
778
+
779
+ return 0
780
+
781
+
782
+ def main() -> None:
783
+ parser = argparse.ArgumentParser(
784
+ description="OpenSpec Change Adapter for Loki Mode",
785
+ formatter_class=argparse.RawDescriptionHelpFormatter,
786
+ epilog=(
787
+ "Examples:\n"
788
+ " python3 openspec-adapter.py ./openspec/changes/add-dark-mode\n"
789
+ " python3 openspec-adapter.py ./openspec/changes/add-dark-mode --json\n"
790
+ " python3 openspec-adapter.py ./openspec/changes/add-dark-mode --validate\n"
791
+ " python3 openspec-adapter.py ./openspec/changes/add-dark-mode --output-dir .loki/\n"
792
+ ),
793
+ )
794
+ parser.add_argument(
795
+ "change_dir_path",
796
+ help="Path to the OpenSpec change directory",
797
+ )
798
+ parser.add_argument(
799
+ "--output-dir",
800
+ default=".loki",
801
+ help="Where to write output files (default: .loki/)",
802
+ )
803
+ parser.add_argument(
804
+ "--json",
805
+ action="store_true",
806
+ dest="as_json",
807
+ help="Output metadata as JSON to stdout (no files written)",
808
+ )
809
+ parser.add_argument(
810
+ "--validate",
811
+ action="store_true",
812
+ dest="validate_only",
813
+ help="Run artifact validation only",
814
+ )
815
+
816
+ args = parser.parse_args()
817
+ exit_code = run(
818
+ change_dir_path=args.change_dir_path,
819
+ output_dir=args.output_dir,
820
+ as_json=args.as_json,
821
+ validate_only=args.validate_only,
822
+ )
823
+ sys.exit(exit_code)
824
+
825
+
826
+ if __name__ == "__main__":
827
+ main()