loki-mode 6.10.0 → 6.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/SKILL.md +2 -2
- package/VERSION +1 -1
- package/autonomy/loki +82 -0
- package/autonomy/openspec-adapter.py +827 -0
- package/autonomy/run.sh +116 -6
- package/dashboard/__init__.py +1 -1
- package/docs/INSTALLATION.md +1 -1
- package/mcp/__init__.py +1 -1
- package/package.json +1 -1
- package/skills/00-index.md +9 -0
- package/skills/openspec-integration.md +147 -0
package/SKILL.md
CHANGED
|
@@ -3,7 +3,7 @@ name: loki-mode
|
|
|
3
3
|
description: Multi-agent autonomous startup system. Triggers on "Loki Mode". Takes PRD to deployed product with minimal human intervention. Requires --dangerously-skip-permissions flag.
|
|
4
4
|
---
|
|
5
5
|
|
|
6
|
-
# Loki Mode v6.
|
|
6
|
+
# Loki Mode v6.11.0
|
|
7
7
|
|
|
8
8
|
**You are an autonomous agent. You make decisions. You do not ask questions. You do not stop.**
|
|
9
9
|
|
|
@@ -267,4 +267,4 @@ The following features are documented in skill modules but not yet fully automat
|
|
|
267
267
|
| Quality gates 3-reviewer system | Implemented (v5.35.0) | 5 specialist reviewers in `skills/quality-gates.md`; execution in run.sh |
|
|
268
268
|
| Benchmarks (HumanEval, SWE-bench) | Infrastructure only | Runner scripts and datasets exist in `benchmarks/`; no published results |
|
|
269
269
|
|
|
270
|
-
**v6.
|
|
270
|
+
**v6.11.0 | [Autonomi](https://www.autonomi.dev/) flagship product | ~260 lines core**
|
package/VERSION
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
6.
|
|
1
|
+
6.11.0
|
package/autonomy/loki
CHANGED
|
@@ -448,6 +448,7 @@ show_help() {
|
|
|
448
448
|
echo " --compliance PRESET Enable compliance mode (default|healthcare|fintech|government)"
|
|
449
449
|
echo " --budget USD Set cost budget limit (display in dashboard/status)"
|
|
450
450
|
echo " --bmad-project PATH Use BMAD Method project artifacts as input"
|
|
451
|
+
echo " --openspec PATH Use OpenSpec change directory as input"
|
|
451
452
|
echo ""
|
|
452
453
|
echo "Options for 'run' (v6.0.0):"
|
|
453
454
|
echo " --dry-run Preview generated PRD without starting"
|
|
@@ -490,6 +491,7 @@ cmd_start() {
|
|
|
490
491
|
local prd_file=""
|
|
491
492
|
local provider=""
|
|
492
493
|
local bmad_project_path=""
|
|
494
|
+
local openspec_change_path=""
|
|
493
495
|
|
|
494
496
|
while [[ $# -gt 0 ]]; do
|
|
495
497
|
case "$1" in
|
|
@@ -514,6 +516,7 @@ cmd_start() {
|
|
|
514
516
|
echo " --compliance PRESET Enable compliance mode (default|healthcare|fintech|government)"
|
|
515
517
|
echo " --budget USD Cost budget limit (auto-pause when exceeded)"
|
|
516
518
|
echo " --bmad-project PATH Use BMAD Method project artifacts as input"
|
|
519
|
+
echo " --openspec PATH Use OpenSpec change directory as input"
|
|
517
520
|
echo " --yes, -y Skip confirmation prompts (auto-confirm)"
|
|
518
521
|
echo ""
|
|
519
522
|
echo "Environment Variables:"
|
|
@@ -529,6 +532,7 @@ cmd_start() {
|
|
|
529
532
|
echo " loki start ./prd.md --parallel # Parallel mode with worktrees"
|
|
530
533
|
echo " loki start --provider codex # Use OpenAI Codex CLI"
|
|
531
534
|
echo " loki start --bmad-project ./my-project # Start from BMAD artifacts"
|
|
535
|
+
echo " loki start --openspec ./openspec/changes/my-feature # Start from OpenSpec change"
|
|
532
536
|
echo " loki start --yes # Skip confirmation prompt"
|
|
533
537
|
echo " LOKI_PRD_FILE=./prd.md loki start # PRD via env var"
|
|
534
538
|
exit 0
|
|
@@ -652,6 +656,19 @@ cmd_start() {
|
|
|
652
656
|
bmad_project_path="${1#*=}"
|
|
653
657
|
shift
|
|
654
658
|
;;
|
|
659
|
+
--openspec)
|
|
660
|
+
if [[ -n "${2:-}" ]]; then
|
|
661
|
+
openspec_change_path="$2"
|
|
662
|
+
shift 2
|
|
663
|
+
else
|
|
664
|
+
echo -e "${RED}--openspec requires a path to an OpenSpec change directory${NC}"
|
|
665
|
+
exit 1
|
|
666
|
+
fi
|
|
667
|
+
;;
|
|
668
|
+
--openspec=*)
|
|
669
|
+
openspec_change_path="${1#*=}"
|
|
670
|
+
shift
|
|
671
|
+
;;
|
|
655
672
|
--budget)
|
|
656
673
|
if [[ -n "${2:-}" ]]; then
|
|
657
674
|
if ! echo "$2" | grep -qE '^[0-9]+(\.[0-9]+)?$'; then
|
|
@@ -690,6 +707,12 @@ cmd_start() {
|
|
|
690
707
|
prd_file="$LOKI_PRD_FILE"
|
|
691
708
|
fi
|
|
692
709
|
|
|
710
|
+
# Mutual exclusivity: --openspec and --bmad-project cannot be used together
|
|
711
|
+
if [[ -n "${openspec_change_path:-}" ]] && [[ -n "${bmad_project_path:-}" ]]; then
|
|
712
|
+
echo -e "${RED}Error: --openspec and --bmad-project are mutually exclusive. Use one or the other.${NC}"
|
|
713
|
+
exit 1
|
|
714
|
+
fi
|
|
715
|
+
|
|
693
716
|
# BMAD project validation and adapter execution
|
|
694
717
|
if [[ -n "$bmad_project_path" ]]; then
|
|
695
718
|
# Resolve to absolute path
|
|
@@ -752,6 +775,65 @@ cmd_start() {
|
|
|
752
775
|
fi
|
|
753
776
|
fi
|
|
754
777
|
|
|
778
|
+
# OpenSpec change directory validation and adapter execution
|
|
779
|
+
if [[ -n "$openspec_change_path" ]]; then
|
|
780
|
+
# Resolve to absolute path
|
|
781
|
+
if [[ ! "$openspec_change_path" = /* ]]; then
|
|
782
|
+
local original_openspec_path="$openspec_change_path"
|
|
783
|
+
openspec_change_path="$(cd "$openspec_change_path" 2>/dev/null && pwd)" || {
|
|
784
|
+
echo -e "${RED}Error: OpenSpec change path does not exist: $original_openspec_path${NC}"
|
|
785
|
+
exit 1
|
|
786
|
+
}
|
|
787
|
+
fi
|
|
788
|
+
|
|
789
|
+
# Validate path is a directory
|
|
790
|
+
if [[ ! -d "$openspec_change_path" ]]; then
|
|
791
|
+
echo -e "${RED}Error: OpenSpec change path is not a directory: $openspec_change_path${NC}"
|
|
792
|
+
exit 1
|
|
793
|
+
fi
|
|
794
|
+
|
|
795
|
+
# Check for required OpenSpec artifacts
|
|
796
|
+
if [[ ! -f "$openspec_change_path/proposal.md" ]]; then
|
|
797
|
+
echo -e "${RED}Error: No proposal.md found in $openspec_change_path${NC}"
|
|
798
|
+
echo "Expected an OpenSpec change directory with proposal.md and specs/."
|
|
799
|
+
exit 1
|
|
800
|
+
fi
|
|
801
|
+
|
|
802
|
+
# Export for run.sh to access
|
|
803
|
+
export OPENSPEC_CHANGE_PATH="$openspec_change_path"
|
|
804
|
+
|
|
805
|
+
# Ensure .loki directory exists for adapter output
|
|
806
|
+
mkdir -p "$LOKI_DIR"
|
|
807
|
+
|
|
808
|
+
# Run the OpenSpec adapter to normalize artifacts
|
|
809
|
+
echo -e "${CYAN}Running OpenSpec adapter...${NC}"
|
|
810
|
+
local adapter_script="${SCRIPT_DIR:-$(dirname "$0")}/openspec-adapter.py"
|
|
811
|
+
if [[ ! -f "$adapter_script" ]]; then
|
|
812
|
+
echo -e "${RED}Error: OpenSpec adapter not found at $adapter_script${NC}"
|
|
813
|
+
echo "Please ensure autonomy/openspec-adapter.py exists."
|
|
814
|
+
exit 1
|
|
815
|
+
fi
|
|
816
|
+
|
|
817
|
+
# Validate first
|
|
818
|
+
if ! python3 "$adapter_script" "$openspec_change_path" --validate; then
|
|
819
|
+
echo -e "${RED}Error: OpenSpec adapter validation failed. Check the change artifacts.${NC}"
|
|
820
|
+
exit 1
|
|
821
|
+
fi
|
|
822
|
+
|
|
823
|
+
# Generate output files
|
|
824
|
+
if ! python3 "$adapter_script" "$openspec_change_path" --output-dir "$LOKI_DIR"; then
|
|
825
|
+
echo -e "${RED}Error: OpenSpec adapter failed to generate output files.${NC}"
|
|
826
|
+
exit 1
|
|
827
|
+
fi
|
|
828
|
+
echo -e "${GREEN}OpenSpec artifacts normalized successfully.${NC}"
|
|
829
|
+
|
|
830
|
+
# If no explicit PRD was provided, use the normalized OpenSpec PRD
|
|
831
|
+
if [[ -z "$prd_file" ]] && [[ -f "$LOKI_DIR/openspec-prd-normalized.md" ]]; then
|
|
832
|
+
prd_file="$LOKI_DIR/openspec-prd-normalized.md"
|
|
833
|
+
echo -e "${CYAN}Using normalized OpenSpec PRD: $prd_file${NC}"
|
|
834
|
+
fi
|
|
835
|
+
fi
|
|
836
|
+
|
|
755
837
|
if [ -n "$prd_file" ]; then
|
|
756
838
|
args+=("$prd_file")
|
|
757
839
|
else
|
|
@@ -0,0 +1,827 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""OpenSpec Change Adapter for Loki Mode
|
|
3
|
+
|
|
4
|
+
Parses OpenSpec change directories (proposal.md, specs/, design.md, tasks.md)
|
|
5
|
+
and normalizes them into Loki Mode's internal format. Bridges OpenSpec's
|
|
6
|
+
delta-based specification workflow into the .loki/ pipeline.
|
|
7
|
+
|
|
8
|
+
Stdlib only - no pip dependencies required. Python 3.9+.
|
|
9
|
+
|
|
10
|
+
Usage:
|
|
11
|
+
python3 openspec-adapter.py <change-dir-path> [options]
|
|
12
|
+
--output-dir DIR Where to write output files (default: .loki/)
|
|
13
|
+
--json Output metadata as JSON to stdout
|
|
14
|
+
--validate Run artifact validation only
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import argparse
|
|
18
|
+
import json
|
|
19
|
+
import os
|
|
20
|
+
import re
|
|
21
|
+
import sys
|
|
22
|
+
import tempfile
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
25
|
+
|
|
26
|
+
# Maximum artifact file size (10 MB)
|
|
27
|
+
MAX_ARTIFACT_SIZE = 10 * 1024 * 1024
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _safe_read(path: Path) -> str:
|
|
31
|
+
"""Read a file with size limit and encoding safety."""
|
|
32
|
+
size = path.stat().st_size
|
|
33
|
+
if size > MAX_ARTIFACT_SIZE:
|
|
34
|
+
raise ValueError(f"Artifact too large ({size} bytes, max {MAX_ARTIFACT_SIZE}): {path.name}")
|
|
35
|
+
return path.read_text(encoding="utf-8", errors="replace")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _write_atomic(path: Path, content: str) -> None:
|
|
39
|
+
"""Write content to file atomically using temp file + rename."""
|
|
40
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
41
|
+
fd, tmp_path = tempfile.mkstemp(dir=path.parent, suffix=".tmp")
|
|
42
|
+
try:
|
|
43
|
+
with os.fdopen(fd, "w", encoding="utf-8") as f:
|
|
44
|
+
f.write(content)
|
|
45
|
+
os.replace(tmp_path, str(path))
|
|
46
|
+
except Exception:
|
|
47
|
+
try:
|
|
48
|
+
os.unlink(tmp_path)
|
|
49
|
+
except OSError:
|
|
50
|
+
pass
|
|
51
|
+
raise
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
# -- Simple YAML Parsing (regex-based, no PyYAML) ----------------------------
|
|
55
|
+
|
|
56
|
+
def _parse_simple_yaml(text: str) -> Dict[str, Any]:
|
|
57
|
+
"""Parse simple YAML key-value pairs via regex.
|
|
58
|
+
|
|
59
|
+
Handles scalars, quoted strings, and flow-style lists.
|
|
60
|
+
Does NOT handle nested mappings or block-style lists.
|
|
61
|
+
"""
|
|
62
|
+
metadata: Dict[str, Any] = {}
|
|
63
|
+
for line in text.split("\n"):
|
|
64
|
+
line = line.strip()
|
|
65
|
+
if not line or line.startswith("#"):
|
|
66
|
+
continue
|
|
67
|
+
match = re.match(r"^(\w[\w-]*):\s*(.*)", line)
|
|
68
|
+
if not match:
|
|
69
|
+
continue
|
|
70
|
+
key = match.group(1)
|
|
71
|
+
value = match.group(2).strip()
|
|
72
|
+
# Flow-style list: [item1, item2]
|
|
73
|
+
if value.startswith("[") and value.endswith("]"):
|
|
74
|
+
items = value[1:-1].split(",")
|
|
75
|
+
metadata[key] = [_unquote(item.strip()) for item in items if item.strip()]
|
|
76
|
+
# Quoted string
|
|
77
|
+
elif (value.startswith("'") and value.endswith("'")) or \
|
|
78
|
+
(value.startswith('"') and value.endswith('"')):
|
|
79
|
+
metadata[key] = value[1:-1]
|
|
80
|
+
# Plain scalar
|
|
81
|
+
else:
|
|
82
|
+
metadata[key] = value
|
|
83
|
+
return metadata
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _unquote(s: str) -> str:
|
|
87
|
+
"""Remove surrounding quotes from a string."""
|
|
88
|
+
if len(s) >= 2:
|
|
89
|
+
if (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'):
|
|
90
|
+
return s[1:-1]
|
|
91
|
+
return s
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
# -- Proposal Parsing --------------------------------------------------------
|
|
95
|
+
|
|
96
|
+
def parse_proposal(proposal_path: Path) -> Dict[str, Any]:
|
|
97
|
+
"""Parse proposal.md into structured data.
|
|
98
|
+
|
|
99
|
+
Extracts sections: Why, What Changes, Capabilities (New/Modified), Impact.
|
|
100
|
+
"""
|
|
101
|
+
text = _safe_read(proposal_path)
|
|
102
|
+
result: Dict[str, Any] = {
|
|
103
|
+
"title": "",
|
|
104
|
+
"why": "",
|
|
105
|
+
"what_changes": "",
|
|
106
|
+
"new_capabilities": [],
|
|
107
|
+
"modified_capabilities": [],
|
|
108
|
+
"impact": "",
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
# Extract title from H1 heading if present
|
|
112
|
+
title_match = re.match(r"^#\s+(.+)", text.strip())
|
|
113
|
+
if title_match:
|
|
114
|
+
result["title"] = title_match.group(1).strip()
|
|
115
|
+
|
|
116
|
+
# Extract sections by ## headings
|
|
117
|
+
sections = _split_sections(text, level=2)
|
|
118
|
+
|
|
119
|
+
for heading, body in sections.items():
|
|
120
|
+
heading_lower = heading.lower().strip()
|
|
121
|
+
if heading_lower == "why":
|
|
122
|
+
result["why"] = body.strip()
|
|
123
|
+
elif heading_lower == "what changes":
|
|
124
|
+
result["what_changes"] = body.strip()
|
|
125
|
+
elif heading_lower == "impact":
|
|
126
|
+
result["impact"] = body.strip()
|
|
127
|
+
elif heading_lower == "capabilities":
|
|
128
|
+
# Parse sub-sections for New/Modified
|
|
129
|
+
sub_sections = _split_sections(body, level=3)
|
|
130
|
+
for sub_heading, sub_body in sub_sections.items():
|
|
131
|
+
sub_lower = sub_heading.lower().strip()
|
|
132
|
+
caps = _extract_capabilities(sub_body)
|
|
133
|
+
if "new" in sub_lower:
|
|
134
|
+
result["new_capabilities"] = caps
|
|
135
|
+
elif "modified" in sub_lower:
|
|
136
|
+
result["modified_capabilities"] = caps
|
|
137
|
+
|
|
138
|
+
return result
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _split_sections(text: str, level: int = 2) -> Dict[str, str]:
|
|
142
|
+
"""Split markdown text into sections by heading level.
|
|
143
|
+
|
|
144
|
+
Returns {heading_text: body_text} preserving order.
|
|
145
|
+
"""
|
|
146
|
+
prefix = "#" * level
|
|
147
|
+
pattern = re.compile(rf"^{prefix}\s+(.+)$", re.MULTILINE)
|
|
148
|
+
matches = list(pattern.finditer(text))
|
|
149
|
+
sections: Dict[str, str] = {}
|
|
150
|
+
for i, m in enumerate(matches):
|
|
151
|
+
heading = m.group(1).strip()
|
|
152
|
+
start = m.end()
|
|
153
|
+
end = matches[i + 1].start() if i + 1 < len(matches) else len(text)
|
|
154
|
+
sections[heading] = text[start:end].strip()
|
|
155
|
+
return sections
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def _extract_capabilities(text: str) -> List[Dict[str, str]]:
|
|
159
|
+
"""Extract capability names and descriptions from bullet items.
|
|
160
|
+
|
|
161
|
+
Matches patterns like:
|
|
162
|
+
- `name`: description
|
|
163
|
+
- **name:** description
|
|
164
|
+
"""
|
|
165
|
+
capabilities: List[Dict[str, str]] = []
|
|
166
|
+
# Pattern: - `name`: description
|
|
167
|
+
for m in re.finditer(r"^-\s+`([^`]+)`:\s*(.+)", text, re.MULTILINE):
|
|
168
|
+
capabilities.append({"name": m.group(1).strip(), "description": m.group(2).strip()})
|
|
169
|
+
if capabilities:
|
|
170
|
+
return capabilities
|
|
171
|
+
# Fallback: - **name:** description
|
|
172
|
+
for m in re.finditer(r"^-\s+\*\*([^*]+?):\*\*\s*(.+)", text, re.MULTILINE):
|
|
173
|
+
capabilities.append({"name": m.group(1).strip(), "description": m.group(2).strip()})
|
|
174
|
+
return capabilities
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
# -- Delta Spec Parsing -------------------------------------------------------
|
|
178
|
+
|
|
179
|
+
def parse_delta_spec(spec_path: Path) -> Dict[str, Any]:
|
|
180
|
+
"""Parse a delta spec.md file.
|
|
181
|
+
|
|
182
|
+
Extracts ADDED, MODIFIED, and REMOVED requirements with their scenarios.
|
|
183
|
+
"""
|
|
184
|
+
text = _safe_read(spec_path)
|
|
185
|
+
result: Dict[str, List[Dict[str, Any]]] = {
|
|
186
|
+
"added": [],
|
|
187
|
+
"modified": [],
|
|
188
|
+
"removed": [],
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
sections = _split_sections(text, level=2)
|
|
192
|
+
|
|
193
|
+
for heading, body in sections.items():
|
|
194
|
+
heading_lower = heading.lower().strip()
|
|
195
|
+
if "added" in heading_lower:
|
|
196
|
+
result["added"] = _parse_requirements(body, category="added")
|
|
197
|
+
elif "modified" in heading_lower:
|
|
198
|
+
result["modified"] = _parse_requirements(body, category="modified")
|
|
199
|
+
elif "removed" in heading_lower:
|
|
200
|
+
result["removed"] = _parse_requirements(body, category="removed")
|
|
201
|
+
|
|
202
|
+
return result
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def _parse_requirements(text: str, category: str = "added") -> List[Dict[str, Any]]:
|
|
206
|
+
"""Parse requirements from a delta section.
|
|
207
|
+
|
|
208
|
+
Each requirement: ### Requirement: <name>
|
|
209
|
+
With optional scenarios: #### Scenario: <name>
|
|
210
|
+
"""
|
|
211
|
+
requirements: List[Dict[str, Any]] = []
|
|
212
|
+
# Split by ### Requirement: headings
|
|
213
|
+
req_pattern = re.compile(r"^###\s+Requirement:\s*(.+)$", re.MULTILINE)
|
|
214
|
+
req_matches = list(req_pattern.finditer(text))
|
|
215
|
+
|
|
216
|
+
for i, m in enumerate(req_matches):
|
|
217
|
+
name = m.group(1).strip()
|
|
218
|
+
start = m.end()
|
|
219
|
+
end = req_matches[i + 1].start() if i + 1 < len(req_matches) else len(text)
|
|
220
|
+
req_body = text[start:end].strip()
|
|
221
|
+
|
|
222
|
+
req: Dict[str, Any] = {
|
|
223
|
+
"name": name,
|
|
224
|
+
"text": "",
|
|
225
|
+
"scenarios": [],
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
# Extract previously annotation for modified requirements
|
|
229
|
+
if category == "modified":
|
|
230
|
+
# Try parenthesized format first: (Previously: ...)
|
|
231
|
+
prev_match = re.search(r"\(Previously:\s*(.+?)\)", req_body)
|
|
232
|
+
if not prev_match:
|
|
233
|
+
# Try inline format: Previously ... (sentence boundary)
|
|
234
|
+
prev_match = re.search(r"Previously\s+(.+?)(?:\.\s|\.$|\n|$)", req_body)
|
|
235
|
+
if prev_match:
|
|
236
|
+
req["previously"] = prev_match.group(1).strip().rstrip(".")
|
|
237
|
+
|
|
238
|
+
# Extract deprecated/removed reason annotation
|
|
239
|
+
if category == "removed":
|
|
240
|
+
# Try parenthesized format first: (Deprecated: ...)
|
|
241
|
+
dep_match = re.search(r"\(Deprecated(?::\s*(.+?))?\)", req_body)
|
|
242
|
+
if dep_match:
|
|
243
|
+
reason = dep_match.group(1)
|
|
244
|
+
req["reason"] = reason.strip() if reason else ""
|
|
245
|
+
else:
|
|
246
|
+
# Try inline narrative: extract first sentence as reason
|
|
247
|
+
# Look for patterns like "is removed", "was deprecated", etc.
|
|
248
|
+
narrative = re.search(
|
|
249
|
+
r"(?:removed|deprecated|no longer|eliminated)[.\s]+(.+?)(?:\.\s|\.$|\n\n|$)",
|
|
250
|
+
req_body, re.IGNORECASE
|
|
251
|
+
)
|
|
252
|
+
if narrative:
|
|
253
|
+
req["reason"] = narrative.group(1).strip().rstrip(".")
|
|
254
|
+
elif req_body.strip():
|
|
255
|
+
# Use first sentence of body as reason
|
|
256
|
+
first_sentence = req_body.strip().split(".")[0]
|
|
257
|
+
req["reason"] = first_sentence.strip()
|
|
258
|
+
|
|
259
|
+
# Split into pre-scenario text and scenarios
|
|
260
|
+
scenario_pattern = re.compile(r"^####\s+Scenario:\s*(.+)$", re.MULTILINE)
|
|
261
|
+
scenario_matches = list(scenario_pattern.finditer(req_body))
|
|
262
|
+
|
|
263
|
+
if scenario_matches:
|
|
264
|
+
# Text before first scenario
|
|
265
|
+
req["text"] = req_body[:scenario_matches[0].start()].strip()
|
|
266
|
+
# Parse each scenario
|
|
267
|
+
for j, sm in enumerate(scenario_matches):
|
|
268
|
+
sc_name = sm.group(1).strip()
|
|
269
|
+
sc_start = sm.end()
|
|
270
|
+
sc_end = scenario_matches[j + 1].start() if j + 1 < len(scenario_matches) else len(req_body)
|
|
271
|
+
sc_body = req_body[sc_start:sc_end].strip()
|
|
272
|
+
scenario = _parse_scenario(sc_name, sc_body)
|
|
273
|
+
req["scenarios"].append(scenario)
|
|
274
|
+
else:
|
|
275
|
+
# No scenarios -- entire body is the requirement text
|
|
276
|
+
req["text"] = req_body
|
|
277
|
+
|
|
278
|
+
requirements.append(req)
|
|
279
|
+
|
|
280
|
+
return requirements
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def _parse_scenario(name: str, body: str) -> Dict[str, Any]:
|
|
284
|
+
"""Parse a scenario body for GIVEN/WHEN/THEN lines.
|
|
285
|
+
|
|
286
|
+
Handles two formats:
|
|
287
|
+
- GIVEN ..., - WHEN ..., - THEN ... (list items)
|
|
288
|
+
- **GIVEN** ..., **WHEN** ..., **THEN** ... (bold keywords)
|
|
289
|
+
Also handles AND lines appended to the previous step.
|
|
290
|
+
"""
|
|
291
|
+
scenario: Dict[str, Any] = {
|
|
292
|
+
"name": name,
|
|
293
|
+
"given": [],
|
|
294
|
+
"when": [],
|
|
295
|
+
"then": [],
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
for line in body.split("\n"):
|
|
299
|
+
stripped = line.strip()
|
|
300
|
+
if not stripped:
|
|
301
|
+
continue
|
|
302
|
+
|
|
303
|
+
# Format 1: - **KEYWORD** text or - KEYWORD text
|
|
304
|
+
m = re.match(
|
|
305
|
+
r"^-\s+(?:\*\*)?(?:GIVEN|Given)(?:\*\*)?\s+(.+)",
|
|
306
|
+
stripped,
|
|
307
|
+
)
|
|
308
|
+
if m:
|
|
309
|
+
scenario["given"].append(m.group(1).strip())
|
|
310
|
+
continue
|
|
311
|
+
|
|
312
|
+
m = re.match(
|
|
313
|
+
r"^-\s+(?:\*\*)?(?:WHEN|When)(?:\*\*)?\s+(.+)",
|
|
314
|
+
stripped,
|
|
315
|
+
)
|
|
316
|
+
if m:
|
|
317
|
+
scenario["when"].append(m.group(1).strip())
|
|
318
|
+
continue
|
|
319
|
+
|
|
320
|
+
m = re.match(
|
|
321
|
+
r"^-\s+(?:\*\*)?(?:THEN|Then)(?:\*\*)?\s+(.+)",
|
|
322
|
+
stripped,
|
|
323
|
+
)
|
|
324
|
+
if m:
|
|
325
|
+
scenario["then"].append(m.group(1).strip())
|
|
326
|
+
continue
|
|
327
|
+
|
|
328
|
+
m = re.match(
|
|
329
|
+
r"^-\s+(?:\*\*)?(?:AND|And)(?:\*\*)?\s+(.+)",
|
|
330
|
+
stripped,
|
|
331
|
+
)
|
|
332
|
+
if m:
|
|
333
|
+
# Append AND to the last non-empty list (then > when > given)
|
|
334
|
+
and_text = m.group(1).strip()
|
|
335
|
+
if scenario["then"]:
|
|
336
|
+
scenario["then"].append(and_text)
|
|
337
|
+
elif scenario["when"]:
|
|
338
|
+
scenario["when"].append(and_text)
|
|
339
|
+
elif scenario["given"]:
|
|
340
|
+
scenario["given"].append(and_text)
|
|
341
|
+
continue
|
|
342
|
+
|
|
343
|
+
return scenario
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
# -- Tasks Parsing ------------------------------------------------------------
|
|
347
|
+
|
|
348
|
+
def parse_tasks(tasks_path: Path) -> Tuple[List[Dict[str, Any]], Dict[str, Dict[str, Any]]]:
|
|
349
|
+
"""Parse tasks.md into structured task list and source map.
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
(tasks_list, source_map)
|
|
353
|
+
tasks_list: list of task objects
|
|
354
|
+
source_map: {task_id: {file, line, group}}
|
|
355
|
+
"""
|
|
356
|
+
text = _safe_read(tasks_path)
|
|
357
|
+
tasks: List[Dict[str, Any]] = []
|
|
358
|
+
source_map: Dict[str, Dict[str, Any]] = {}
|
|
359
|
+
current_group = ""
|
|
360
|
+
|
|
361
|
+
for line_num, line in enumerate(text.split("\n"), start=1):
|
|
362
|
+
stripped = line.strip()
|
|
363
|
+
|
|
364
|
+
# Group heading: ## N. Group Name
|
|
365
|
+
group_match = re.match(r"^##\s+(\d+)\.\s+(.+)", stripped)
|
|
366
|
+
if group_match:
|
|
367
|
+
current_group = group_match.group(2).strip()
|
|
368
|
+
continue
|
|
369
|
+
|
|
370
|
+
# Task item: - [ ] N.M description or - [x] N.M description
|
|
371
|
+
task_match = re.match(r"^-\s+\[([ xX])\]\s+(\d+\.\d+)\s+(.*)", stripped)
|
|
372
|
+
if task_match:
|
|
373
|
+
checked = task_match.group(1).lower() == "x"
|
|
374
|
+
task_id_num = task_match.group(2)
|
|
375
|
+
description = task_match.group(3).strip()
|
|
376
|
+
task_id = f"openspec-{task_id_num}"
|
|
377
|
+
|
|
378
|
+
task = {
|
|
379
|
+
"id": task_id,
|
|
380
|
+
"title": description,
|
|
381
|
+
"group": current_group,
|
|
382
|
+
"status": "completed" if checked else "pending",
|
|
383
|
+
"source": "tasks.md",
|
|
384
|
+
"priority": "medium",
|
|
385
|
+
}
|
|
386
|
+
tasks.append(task)
|
|
387
|
+
source_map[task_id] = {
|
|
388
|
+
"file": "tasks.md",
|
|
389
|
+
"line": line_num,
|
|
390
|
+
"group": current_group,
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
return tasks, source_map
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
# -- Design Parsing -----------------------------------------------------------
|
|
397
|
+
|
|
398
|
+
def parse_design(design_path: Path) -> Dict[str, str]:
|
|
399
|
+
"""Parse design.md into structured sections.
|
|
400
|
+
|
|
401
|
+
Extracts: Context, Goals/Non-Goals, Decisions, Risks/Trade-offs.
|
|
402
|
+
"""
|
|
403
|
+
text = _safe_read(design_path)
|
|
404
|
+
result: Dict[str, str] = {}
|
|
405
|
+
|
|
406
|
+
sections = _split_sections(text, level=2)
|
|
407
|
+
for heading, body in sections.items():
|
|
408
|
+
heading_lower = heading.lower().strip()
|
|
409
|
+
if "context" in heading_lower:
|
|
410
|
+
result["context"] = body.strip()
|
|
411
|
+
elif "goal" in heading_lower:
|
|
412
|
+
result["goals"] = body.strip()
|
|
413
|
+
elif "decision" in heading_lower:
|
|
414
|
+
result["decisions"] = body.strip()
|
|
415
|
+
elif "risk" in heading_lower or "trade" in heading_lower:
|
|
416
|
+
result["risks"] = body.strip()
|
|
417
|
+
else:
|
|
418
|
+
# Preserve any other sections
|
|
419
|
+
result[heading_lower.replace(" ", "_").replace("/", "_")] = body.strip()
|
|
420
|
+
|
|
421
|
+
return result
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
# -- Metadata Parsing ---------------------------------------------------------
|
|
425
|
+
|
|
426
|
+
def parse_metadata(yaml_path: Path) -> Dict[str, Any]:
|
|
427
|
+
"""Parse .openspec.yaml for change metadata."""
|
|
428
|
+
text = _safe_read(yaml_path)
|
|
429
|
+
return _parse_simple_yaml(text)
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
# -- Complexity Classification ------------------------------------------------
|
|
433
|
+
|
|
434
|
+
def classify_complexity(
|
|
435
|
+
num_tasks: int,
|
|
436
|
+
num_spec_files: int,
|
|
437
|
+
has_design: bool,
|
|
438
|
+
) -> str:
|
|
439
|
+
"""Classify change complexity from OpenSpec signals.
|
|
440
|
+
|
|
441
|
+
Rules:
|
|
442
|
+
- 1-3 tasks, 1 spec file, no design.md -> simple
|
|
443
|
+
- 4-10 tasks, 2-5 spec files, design.md present -> standard
|
|
444
|
+
- 11-20 tasks, 5-10 spec files -> complex
|
|
445
|
+
- 20+ tasks or 10+ spec files -> enterprise
|
|
446
|
+
"""
|
|
447
|
+
if num_tasks > 20 or num_spec_files > 10:
|
|
448
|
+
return "enterprise"
|
|
449
|
+
if num_tasks > 10 or num_spec_files > 5:
|
|
450
|
+
return "complex"
|
|
451
|
+
if num_tasks > 3 or num_spec_files > 1 or has_design:
|
|
452
|
+
return "standard"
|
|
453
|
+
return "simple"
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
# -- Validation ---------------------------------------------------------------
|
|
457
|
+
|
|
458
|
+
def validate_change(change_dir: Path) -> Tuple[List[str], List[str]]:
|
|
459
|
+
"""Validate an OpenSpec change directory.
|
|
460
|
+
|
|
461
|
+
Returns (errors, warnings).
|
|
462
|
+
"""
|
|
463
|
+
errors: List[str] = []
|
|
464
|
+
warnings: List[str] = []
|
|
465
|
+
|
|
466
|
+
# proposal.md must exist and have content
|
|
467
|
+
proposal_path = change_dir / "proposal.md"
|
|
468
|
+
if not proposal_path.exists():
|
|
469
|
+
errors.append("proposal.md not found")
|
|
470
|
+
elif proposal_path.stat().st_size == 0:
|
|
471
|
+
errors.append("proposal.md is empty")
|
|
472
|
+
else:
|
|
473
|
+
text = _safe_read(proposal_path)
|
|
474
|
+
# Check it has at least one non-comment, non-empty line
|
|
475
|
+
content_lines = [
|
|
476
|
+
l for l in text.split("\n")
|
|
477
|
+
if l.strip() and not l.strip().startswith("<!--")
|
|
478
|
+
]
|
|
479
|
+
if len(content_lines) < 2:
|
|
480
|
+
warnings.append("proposal.md has very little content")
|
|
481
|
+
|
|
482
|
+
# specs/ directory must exist with at least one spec.md
|
|
483
|
+
specs_dir = change_dir / "specs"
|
|
484
|
+
if not specs_dir.is_dir():
|
|
485
|
+
errors.append("specs/ directory not found")
|
|
486
|
+
else:
|
|
487
|
+
spec_files = list(specs_dir.rglob("spec.md"))
|
|
488
|
+
if not spec_files:
|
|
489
|
+
errors.append("No spec.md files found under specs/")
|
|
490
|
+
else:
|
|
491
|
+
# Each spec.md should have at least one delta section
|
|
492
|
+
for sf in spec_files:
|
|
493
|
+
text = _safe_read(sf)
|
|
494
|
+
has_delta = any(
|
|
495
|
+
re.search(rf"##\s+{keyword}\s+Requirements", text, re.IGNORECASE)
|
|
496
|
+
for keyword in ("ADDED", "MODIFIED", "REMOVED")
|
|
497
|
+
)
|
|
498
|
+
if not has_delta:
|
|
499
|
+
domain = sf.parent.name
|
|
500
|
+
warnings.append(
|
|
501
|
+
f"specs/{domain}/spec.md has no ADDED/MODIFIED/REMOVED sections"
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
# tasks.md should exist (warn if missing)
|
|
505
|
+
tasks_path = change_dir / "tasks.md"
|
|
506
|
+
if not tasks_path.exists():
|
|
507
|
+
warnings.append("tasks.md not found (no implementation checklist)")
|
|
508
|
+
|
|
509
|
+
return errors, warnings
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
# -- Output Generation --------------------------------------------------------
|
|
513
|
+
|
|
514
|
+
def build_normalized_prd(
|
|
515
|
+
change_name: str,
|
|
516
|
+
proposal: Dict[str, Any],
|
|
517
|
+
all_deltas: Dict[str, Dict[str, Any]],
|
|
518
|
+
design: Optional[Dict[str, str]],
|
|
519
|
+
) -> str:
|
|
520
|
+
"""Build the synthesized PRD markdown from proposal + specs + design."""
|
|
521
|
+
lines: List[str] = []
|
|
522
|
+
lines.append(f"# OpenSpec Change: {change_name}")
|
|
523
|
+
lines.append("")
|
|
524
|
+
|
|
525
|
+
# Motivation
|
|
526
|
+
lines.append("## Motivation")
|
|
527
|
+
lines.append("")
|
|
528
|
+
if proposal.get("why"):
|
|
529
|
+
lines.append(proposal["why"])
|
|
530
|
+
else:
|
|
531
|
+
lines.append("(No motivation provided)")
|
|
532
|
+
lines.append("")
|
|
533
|
+
|
|
534
|
+
# Scope
|
|
535
|
+
lines.append("## Scope")
|
|
536
|
+
lines.append("")
|
|
537
|
+
if proposal.get("what_changes"):
|
|
538
|
+
lines.append(proposal["what_changes"])
|
|
539
|
+
else:
|
|
540
|
+
lines.append("(No scope provided)")
|
|
541
|
+
lines.append("")
|
|
542
|
+
|
|
543
|
+
# Requirements from all delta specs
|
|
544
|
+
lines.append("## Requirements")
|
|
545
|
+
lines.append("")
|
|
546
|
+
for domain, deltas in sorted(all_deltas.items()):
|
|
547
|
+
for category in ("added", "modified", "removed"):
|
|
548
|
+
for req in deltas.get(category, []):
|
|
549
|
+
tag = category.upper()
|
|
550
|
+
lines.append(f"### {domain}: {req['name']} [{tag}]")
|
|
551
|
+
lines.append("")
|
|
552
|
+
if req.get("text"):
|
|
553
|
+
lines.append(req["text"])
|
|
554
|
+
lines.append("")
|
|
555
|
+
if category == "modified" and req.get("previously"):
|
|
556
|
+
lines.append(f"(Previously: {req['previously']})")
|
|
557
|
+
lines.append("")
|
|
558
|
+
if category == "removed" and req.get("reason"):
|
|
559
|
+
lines.append(f"(Deprecated: {req['reason']})")
|
|
560
|
+
lines.append("")
|
|
561
|
+
for sc in req.get("scenarios", []):
|
|
562
|
+
lines.append(f"- Scenario: {sc['name']}")
|
|
563
|
+
for g in sc.get("given", []):
|
|
564
|
+
lines.append(f" - GIVEN {g}")
|
|
565
|
+
for w in sc.get("when", []):
|
|
566
|
+
lines.append(f" - WHEN {w}")
|
|
567
|
+
for t in sc.get("then", []):
|
|
568
|
+
lines.append(f" - THEN {t}")
|
|
569
|
+
lines.append("")
|
|
570
|
+
|
|
571
|
+
# Technical Design
|
|
572
|
+
if design:
|
|
573
|
+
lines.append("## Technical Design")
|
|
574
|
+
lines.append("")
|
|
575
|
+
for section_name, section_body in design.items():
|
|
576
|
+
lines.append(f"### {section_name.replace('_', ' ').title()}")
|
|
577
|
+
lines.append("")
|
|
578
|
+
lines.append(section_body)
|
|
579
|
+
lines.append("")
|
|
580
|
+
|
|
581
|
+
return "\n".join(lines)
|
|
582
|
+
|
|
583
|
+
|
|
584
|
+
def build_delta_context(
|
|
585
|
+
change_name: str,
|
|
586
|
+
all_deltas: Dict[str, Dict[str, Any]],
|
|
587
|
+
complexity: str,
|
|
588
|
+
) -> Dict[str, Any]:
|
|
589
|
+
"""Build the delta-context.json structure."""
|
|
590
|
+
total = 0
|
|
591
|
+
added = 0
|
|
592
|
+
modified = 0
|
|
593
|
+
removed = 0
|
|
594
|
+
|
|
595
|
+
for deltas in all_deltas.values():
|
|
596
|
+
a = len(deltas.get("added", []))
|
|
597
|
+
m = len(deltas.get("modified", []))
|
|
598
|
+
r = len(deltas.get("removed", []))
|
|
599
|
+
added += a
|
|
600
|
+
modified += m
|
|
601
|
+
removed += r
|
|
602
|
+
total += a + m + r
|
|
603
|
+
|
|
604
|
+
return {
|
|
605
|
+
"change_name": change_name,
|
|
606
|
+
"deltas": all_deltas,
|
|
607
|
+
"complexity": complexity,
|
|
608
|
+
"stats": {
|
|
609
|
+
"total_requirements": total,
|
|
610
|
+
"added": added,
|
|
611
|
+
"modified": modified,
|
|
612
|
+
"removed": removed,
|
|
613
|
+
},
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
def build_verification_map(all_deltas: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
|
|
618
|
+
"""Build verification-map.json from scenarios across all deltas."""
|
|
619
|
+
scenarios: List[Dict[str, Any]] = []
|
|
620
|
+
|
|
621
|
+
for domain, deltas in sorted(all_deltas.items()):
|
|
622
|
+
for category in ("added", "modified"):
|
|
623
|
+
for req in deltas.get(category, []):
|
|
624
|
+
for sc in req.get("scenarios", []):
|
|
625
|
+
scenarios.append({
|
|
626
|
+
"domain": domain,
|
|
627
|
+
"requirement": req["name"],
|
|
628
|
+
"scenario": sc["name"],
|
|
629
|
+
"given": " ".join(sc.get("given", [])),
|
|
630
|
+
"when": " ".join(sc.get("when", [])),
|
|
631
|
+
"then": " ".join(sc.get("then", [])),
|
|
632
|
+
"verified": False,
|
|
633
|
+
})
|
|
634
|
+
|
|
635
|
+
return {"scenarios": scenarios}
|
|
636
|
+
|
|
637
|
+
|
|
638
|
+
# -- Main Orchestration -------------------------------------------------------
|
|
639
|
+
|
|
640
|
+
def run(
|
|
641
|
+
change_dir_path: str,
|
|
642
|
+
output_dir: str = ".loki",
|
|
643
|
+
as_json: bool = False,
|
|
644
|
+
validate_only: bool = False,
|
|
645
|
+
) -> int:
|
|
646
|
+
"""Main entry point. Returns exit code (0 = success, 1 = errors)."""
|
|
647
|
+
|
|
648
|
+
change_dir = Path(change_dir_path).resolve()
|
|
649
|
+
if not change_dir.is_dir():
|
|
650
|
+
print(f"ERROR: Not a directory: {change_dir}", file=sys.stderr)
|
|
651
|
+
return 1
|
|
652
|
+
|
|
653
|
+
change_name = change_dir.name
|
|
654
|
+
|
|
655
|
+
# -- Validation mode --
|
|
656
|
+
if validate_only:
|
|
657
|
+
errors, warnings = validate_change(change_dir)
|
|
658
|
+
for err in errors:
|
|
659
|
+
print(f"ERROR: {err}", file=sys.stderr)
|
|
660
|
+
for warn in warnings:
|
|
661
|
+
print(f"WARNING: {warn}", file=sys.stderr)
|
|
662
|
+
if not errors and not warnings:
|
|
663
|
+
print(f"OpenSpec validation: {change_name} -- OK")
|
|
664
|
+
elif not errors:
|
|
665
|
+
print(f"OpenSpec validation: {change_name} -- OK with {len(warnings)} warning(s)")
|
|
666
|
+
else:
|
|
667
|
+
print(f"OpenSpec validation: {change_name} -- FAILED ({len(errors)} error(s), {len(warnings)} warning(s))")
|
|
668
|
+
return 1 if errors else 0
|
|
669
|
+
|
|
670
|
+
# -- Parse proposal.md (required) --
|
|
671
|
+
proposal_path = change_dir / "proposal.md"
|
|
672
|
+
if not proposal_path.exists():
|
|
673
|
+
print("ERROR: proposal.md not found", file=sys.stderr)
|
|
674
|
+
return 1
|
|
675
|
+
|
|
676
|
+
proposal = parse_proposal(proposal_path)
|
|
677
|
+
|
|
678
|
+
# -- Parse delta specs --
|
|
679
|
+
specs_dir = change_dir / "specs"
|
|
680
|
+
all_deltas: Dict[str, Dict[str, Any]] = {}
|
|
681
|
+
num_spec_files = 0
|
|
682
|
+
|
|
683
|
+
if specs_dir.is_dir():
|
|
684
|
+
for spec_file in sorted(specs_dir.rglob("spec.md")):
|
|
685
|
+
domain = spec_file.parent.name
|
|
686
|
+
deltas = parse_delta_spec(spec_file)
|
|
687
|
+
all_deltas[domain] = deltas
|
|
688
|
+
num_spec_files += 1
|
|
689
|
+
|
|
690
|
+
if not all_deltas:
|
|
691
|
+
print("ERROR: No spec files found under specs/", file=sys.stderr)
|
|
692
|
+
return 1
|
|
693
|
+
|
|
694
|
+
# -- Parse tasks.md (optional) --
|
|
695
|
+
tasks_list: List[Dict[str, Any]] = []
|
|
696
|
+
source_map: Dict[str, Dict[str, Any]] = {}
|
|
697
|
+
tasks_path = change_dir / "tasks.md"
|
|
698
|
+
if tasks_path.exists():
|
|
699
|
+
tasks_list, source_map = parse_tasks(tasks_path)
|
|
700
|
+
|
|
701
|
+
# -- Parse design.md (optional) --
|
|
702
|
+
design_data: Optional[Dict[str, str]] = None
|
|
703
|
+
design_path = change_dir / "design.md"
|
|
704
|
+
has_design = design_path.exists()
|
|
705
|
+
if has_design:
|
|
706
|
+
design_data = parse_design(design_path)
|
|
707
|
+
|
|
708
|
+
# -- Parse .openspec.yaml (optional) --
|
|
709
|
+
yaml_metadata: Dict[str, Any] = {}
|
|
710
|
+
yaml_path = change_dir / ".openspec.yaml"
|
|
711
|
+
if yaml_path.exists():
|
|
712
|
+
yaml_metadata = parse_metadata(yaml_path)
|
|
713
|
+
|
|
714
|
+
# -- Classify complexity --
|
|
715
|
+
complexity = classify_complexity(
|
|
716
|
+
num_tasks=len(tasks_list),
|
|
717
|
+
num_spec_files=num_spec_files,
|
|
718
|
+
has_design=has_design,
|
|
719
|
+
)
|
|
720
|
+
|
|
721
|
+
# -- Build outputs --
|
|
722
|
+
normalized_prd = build_normalized_prd(change_name, proposal, all_deltas, design_data)
|
|
723
|
+
delta_context = build_delta_context(change_name, all_deltas, complexity)
|
|
724
|
+
verification_map = build_verification_map(all_deltas)
|
|
725
|
+
|
|
726
|
+
# -- JSON mode: output to stdout --
|
|
727
|
+
if as_json:
|
|
728
|
+
output = {
|
|
729
|
+
"change_name": change_name,
|
|
730
|
+
"complexity": complexity,
|
|
731
|
+
"proposal": proposal,
|
|
732
|
+
"deltas": all_deltas,
|
|
733
|
+
"tasks": tasks_list,
|
|
734
|
+
"metadata": yaml_metadata,
|
|
735
|
+
"stats": delta_context["stats"],
|
|
736
|
+
}
|
|
737
|
+
print(json.dumps(output, indent=2))
|
|
738
|
+
return 0
|
|
739
|
+
|
|
740
|
+
# -- Write output files --
|
|
741
|
+
if Path(output_dir).is_absolute():
|
|
742
|
+
abs_output_dir = Path(output_dir)
|
|
743
|
+
else:
|
|
744
|
+
abs_output_dir = (Path.cwd() / output_dir).resolve()
|
|
745
|
+
|
|
746
|
+
written: List[str] = []
|
|
747
|
+
|
|
748
|
+
# .loki/openspec-prd-normalized.md
|
|
749
|
+
prd_out = abs_output_dir / "openspec-prd-normalized.md"
|
|
750
|
+
_write_atomic(prd_out, normalized_prd)
|
|
751
|
+
written.append(str(prd_out))
|
|
752
|
+
|
|
753
|
+
# .loki/openspec-tasks.json
|
|
754
|
+
tasks_out = abs_output_dir / "openspec-tasks.json"
|
|
755
|
+
_write_atomic(tasks_out, json.dumps(tasks_list, indent=2))
|
|
756
|
+
written.append(str(tasks_out))
|
|
757
|
+
|
|
758
|
+
# .loki/openspec/delta-context.json
|
|
759
|
+
delta_out = abs_output_dir / "openspec" / "delta-context.json"
|
|
760
|
+
_write_atomic(delta_out, json.dumps(delta_context, indent=2))
|
|
761
|
+
written.append(str(delta_out))
|
|
762
|
+
|
|
763
|
+
# .loki/openspec/source-map.json
|
|
764
|
+
srcmap_out = abs_output_dir / "openspec" / "source-map.json"
|
|
765
|
+
_write_atomic(srcmap_out, json.dumps(source_map, indent=2))
|
|
766
|
+
written.append(str(srcmap_out))
|
|
767
|
+
|
|
768
|
+
# .loki/openspec/verification-map.json
|
|
769
|
+
verif_out = abs_output_dir / "openspec" / "verification-map.json"
|
|
770
|
+
_write_atomic(verif_out, json.dumps(verification_map, indent=2))
|
|
771
|
+
written.append(str(verif_out))
|
|
772
|
+
|
|
773
|
+
# -- CLI summary --
|
|
774
|
+
print(f"OpenSpec adapter: change={change_name} tasks={len(tasks_list)} specs={num_spec_files} complexity={complexity}")
|
|
775
|
+
print(f" Output files written to {abs_output_dir}/:")
|
|
776
|
+
for path in written:
|
|
777
|
+
print(f" - {Path(path).name}")
|
|
778
|
+
|
|
779
|
+
return 0
|
|
780
|
+
|
|
781
|
+
|
|
782
|
+
def main() -> None:
|
|
783
|
+
parser = argparse.ArgumentParser(
|
|
784
|
+
description="OpenSpec Change Adapter for Loki Mode",
|
|
785
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
786
|
+
epilog=(
|
|
787
|
+
"Examples:\n"
|
|
788
|
+
" python3 openspec-adapter.py ./openspec/changes/add-dark-mode\n"
|
|
789
|
+
" python3 openspec-adapter.py ./openspec/changes/add-dark-mode --json\n"
|
|
790
|
+
" python3 openspec-adapter.py ./openspec/changes/add-dark-mode --validate\n"
|
|
791
|
+
" python3 openspec-adapter.py ./openspec/changes/add-dark-mode --output-dir .loki/\n"
|
|
792
|
+
),
|
|
793
|
+
)
|
|
794
|
+
parser.add_argument(
|
|
795
|
+
"change_dir_path",
|
|
796
|
+
help="Path to the OpenSpec change directory",
|
|
797
|
+
)
|
|
798
|
+
parser.add_argument(
|
|
799
|
+
"--output-dir",
|
|
800
|
+
default=".loki",
|
|
801
|
+
help="Where to write output files (default: .loki/)",
|
|
802
|
+
)
|
|
803
|
+
parser.add_argument(
|
|
804
|
+
"--json",
|
|
805
|
+
action="store_true",
|
|
806
|
+
dest="as_json",
|
|
807
|
+
help="Output metadata as JSON to stdout (no files written)",
|
|
808
|
+
)
|
|
809
|
+
parser.add_argument(
|
|
810
|
+
"--validate",
|
|
811
|
+
action="store_true",
|
|
812
|
+
dest="validate_only",
|
|
813
|
+
help="Run artifact validation only",
|
|
814
|
+
)
|
|
815
|
+
|
|
816
|
+
args = parser.parse_args()
|
|
817
|
+
exit_code = run(
|
|
818
|
+
change_dir_path=args.change_dir_path,
|
|
819
|
+
output_dir=args.output_dir,
|
|
820
|
+
as_json=args.as_json,
|
|
821
|
+
validate_only=args.validate_only,
|
|
822
|
+
)
|
|
823
|
+
sys.exit(exit_code)
|
|
824
|
+
|
|
825
|
+
|
|
826
|
+
if __name__ == "__main__":
|
|
827
|
+
main()
|
package/autonomy/run.sh
CHANGED
|
@@ -7710,6 +7710,29 @@ except: pass
|
|
|
7710
7710
|
fi
|
|
7711
7711
|
fi
|
|
7712
7712
|
|
|
7713
|
+
# OpenSpec delta context injection (if available)
|
|
7714
|
+
local openspec_context=""
|
|
7715
|
+
if [[ -f ".loki/openspec/delta-context.json" ]]; then
|
|
7716
|
+
openspec_context=$(_DELTA_FILE=".loki/openspec/delta-context.json" python3 -c "
|
|
7717
|
+
import json, os
|
|
7718
|
+
try:
|
|
7719
|
+
with open(os.environ['_DELTA_FILE']) as f:
|
|
7720
|
+
data = json.load(f)
|
|
7721
|
+
parts = ['OPENSPEC DELTA CONTEXT:']
|
|
7722
|
+
for domain, deltas in data.get('deltas', {}).items():
|
|
7723
|
+
for req in deltas.get('added', []):
|
|
7724
|
+
parts.append(f' ADDED [{domain}]: {req[\"name\"]} - Create new code following existing patterns')
|
|
7725
|
+
for req in deltas.get('modified', []):
|
|
7726
|
+
parts.append(f' MODIFIED [{domain}]: {req[\"name\"]} - Find and update existing code, do NOT create new files. Previously: {req.get(\"previously\", \"N/A\")}')
|
|
7727
|
+
for req in deltas.get('removed', []):
|
|
7728
|
+
parts.append(f' REMOVED [{domain}]: {req[\"name\"]} - Deprecate or remove. Reason: {req.get(\"reason\", \"N/A\")}')
|
|
7729
|
+
parts.append(f'Complexity: {data.get(\"complexity\", \"unknown\")}')
|
|
7730
|
+
print(' '.join(parts))
|
|
7731
|
+
except Exception:
|
|
7732
|
+
pass
|
|
7733
|
+
" 2>/dev/null || true)
|
|
7734
|
+
fi
|
|
7735
|
+
|
|
7713
7736
|
# Degraded providers with small models need simplified prompts
|
|
7714
7737
|
# Full RARV/SDLC instructions overwhelm models < 30B parameters
|
|
7715
7738
|
if [ "${PROVIDER_DEGRADED:-false}" = "true" ]; then
|
|
@@ -7734,15 +7757,15 @@ except: pass
|
|
|
7734
7757
|
else
|
|
7735
7758
|
if [ $retry -eq 0 ]; then
|
|
7736
7759
|
if [ -n "$prd" ]; then
|
|
7737
|
-
echo "Loki Mode with PRD at $prd. $human_directive $gate_failure_context $queue_tasks $bmad_context $checklist_status $app_runner_info $playwright_info $memory_context_section $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
7760
|
+
echo "Loki Mode with PRD at $prd. $human_directive $gate_failure_context $queue_tasks $bmad_context $openspec_context $checklist_status $app_runner_info $playwright_info $memory_context_section $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
7738
7761
|
else
|
|
7739
|
-
echo "Loki Mode. $human_directive $gate_failure_context $queue_tasks $bmad_context $checklist_status $app_runner_info $playwright_info $memory_context_section $analysis_instruction $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
7762
|
+
echo "Loki Mode. $human_directive $gate_failure_context $queue_tasks $bmad_context $openspec_context $checklist_status $app_runner_info $playwright_info $memory_context_section $analysis_instruction $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
7740
7763
|
fi
|
|
7741
7764
|
else
|
|
7742
7765
|
if [ -n "$prd" ]; then
|
|
7743
|
-
echo "Loki Mode - Resume iteration #$iteration (retry #$retry). PRD: $prd. $human_directive $gate_failure_context $queue_tasks $bmad_context $checklist_status $app_runner_info $playwright_info $memory_context_section $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
7766
|
+
echo "Loki Mode - Resume iteration #$iteration (retry #$retry). PRD: $prd. $human_directive $gate_failure_context $queue_tasks $bmad_context $openspec_context $checklist_status $app_runner_info $playwright_info $memory_context_section $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
7744
7767
|
else
|
|
7745
|
-
echo "Loki Mode - Resume iteration #$iteration (retry #$retry). $human_directive $gate_failure_context $queue_tasks $bmad_context $checklist_status $app_runner_info $playwright_info $memory_context_section Use .loki/generated-prd.md if exists. $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
7768
|
+
echo "Loki Mode - Resume iteration #$iteration (retry #$retry). $human_directive $gate_failure_context $queue_tasks $bmad_context $openspec_context $checklist_status $app_runner_info $playwright_info $memory_context_section Use .loki/generated-prd.md if exists. $rarv_instruction $memory_instruction $compaction_reminder $completion_instruction $sdlc_instruction $autonomous_suffix"
|
|
7746
7769
|
fi
|
|
7747
7770
|
fi
|
|
7748
7771
|
fi
|
|
@@ -7867,6 +7890,91 @@ BMAD_QUEUE_EOF
|
|
|
7867
7890
|
log_info "BMAD queue population complete"
|
|
7868
7891
|
}
|
|
7869
7892
|
|
|
7893
|
+
#===============================================================================
|
|
7894
|
+
# OpenSpec Task Queue Population
|
|
7895
|
+
#===============================================================================
|
|
7896
|
+
|
|
7897
|
+
# Populate the task queue from OpenSpec task artifacts
|
|
7898
|
+
# Only runs once -- skips if queue was already populated from OpenSpec
|
|
7899
|
+
populate_openspec_queue() {
|
|
7900
|
+
# Skip if no OpenSpec tasks file
|
|
7901
|
+
if [[ ! -f ".loki/openspec-tasks.json" ]]; then
|
|
7902
|
+
return 0
|
|
7903
|
+
fi
|
|
7904
|
+
|
|
7905
|
+
# Skip if already populated (marker file)
|
|
7906
|
+
if [[ -f ".loki/queue/.openspec-populated" ]]; then
|
|
7907
|
+
log_info "OpenSpec queue already populated, skipping"
|
|
7908
|
+
return 0
|
|
7909
|
+
fi
|
|
7910
|
+
|
|
7911
|
+
log_step "Populating task queue from OpenSpec tasks..."
|
|
7912
|
+
|
|
7913
|
+
# Ensure queue directory exists
|
|
7914
|
+
mkdir -p ".loki/queue"
|
|
7915
|
+
|
|
7916
|
+
# Read OpenSpec tasks and create queue entries
|
|
7917
|
+
python3 << 'OPENSPEC_QUEUE_EOF'
|
|
7918
|
+
import json
|
|
7919
|
+
import sys
|
|
7920
|
+
|
|
7921
|
+
openspec_tasks_path = ".loki/openspec-tasks.json"
|
|
7922
|
+
pending_path = ".loki/queue/pending.json"
|
|
7923
|
+
|
|
7924
|
+
try:
|
|
7925
|
+
with open(openspec_tasks_path, "r") as f:
|
|
7926
|
+
openspec_tasks = json.load(f)
|
|
7927
|
+
except (json.JSONDecodeError, FileNotFoundError) as e:
|
|
7928
|
+
print(f"Warning: Could not read OpenSpec tasks: {e}", file=sys.stderr)
|
|
7929
|
+
sys.exit(0)
|
|
7930
|
+
|
|
7931
|
+
# Load existing queue
|
|
7932
|
+
existing = []
|
|
7933
|
+
try:
|
|
7934
|
+
with open(pending_path, "r") as f:
|
|
7935
|
+
existing = json.load(f)
|
|
7936
|
+
except (json.JSONDecodeError, FileNotFoundError):
|
|
7937
|
+
pass
|
|
7938
|
+
|
|
7939
|
+
# Convert OpenSpec tasks to queue format (skip completed tasks)
|
|
7940
|
+
for task in openspec_tasks:
|
|
7941
|
+
if task.get("status") == "completed":
|
|
7942
|
+
continue
|
|
7943
|
+
queue_entry = {
|
|
7944
|
+
"id": task.get("id", "openspec-unknown"),
|
|
7945
|
+
"title": task.get("title", "Untitled"),
|
|
7946
|
+
"description": f"[OpenSpec] {task.get('group', 'General')}: {task.get('title', '')}",
|
|
7947
|
+
"priority": task.get("priority", "medium"),
|
|
7948
|
+
"status": "pending",
|
|
7949
|
+
"source": "openspec",
|
|
7950
|
+
"metadata": {
|
|
7951
|
+
"openspec_source": task.get("source", "tasks.md"),
|
|
7952
|
+
"openspec_group": task.get("group", ""),
|
|
7953
|
+
}
|
|
7954
|
+
}
|
|
7955
|
+
existing.append(queue_entry)
|
|
7956
|
+
|
|
7957
|
+
with open(pending_path, "w") as f:
|
|
7958
|
+
json.dump(existing, f, indent=2)
|
|
7959
|
+
|
|
7960
|
+
pending_count = sum(1 for t in openspec_tasks if t.get('status') != 'completed')
|
|
7961
|
+
if pending_count == 0:
|
|
7962
|
+
print("WARNING: All OpenSpec tasks are already marked as completed. No tasks added to queue.", file=sys.stderr)
|
|
7963
|
+
print("Check your tasks.md file -- all checkboxes are checked.", file=sys.stderr)
|
|
7964
|
+
else:
|
|
7965
|
+
print(f"Added {pending_count} OpenSpec tasks to queue")
|
|
7966
|
+
OPENSPEC_QUEUE_EOF
|
|
7967
|
+
|
|
7968
|
+
if [[ $? -ne 0 ]]; then
|
|
7969
|
+
log_warn "Failed to populate OpenSpec queue (python3 error)"
|
|
7970
|
+
return 0
|
|
7971
|
+
fi
|
|
7972
|
+
|
|
7973
|
+
# Mark as populated so we don't re-add on restart
|
|
7974
|
+
touch ".loki/queue/.openspec-populated"
|
|
7975
|
+
log_info "OpenSpec queue population complete"
|
|
7976
|
+
}
|
|
7977
|
+
|
|
7870
7978
|
#===============================================================================
|
|
7871
7979
|
# Main Autonomous Loop
|
|
7872
7980
|
#===============================================================================
|
|
@@ -7966,6 +8074,9 @@ run_autonomous() {
|
|
|
7966
8074
|
# Populate task queue from BMAD artifacts (if present, runs once)
|
|
7967
8075
|
populate_bmad_queue
|
|
7968
8076
|
|
|
8077
|
+
# Populate task queue from OpenSpec artifacts (if present, runs once)
|
|
8078
|
+
populate_openspec_queue
|
|
8079
|
+
|
|
7969
8080
|
# Check max iterations before starting
|
|
7970
8081
|
if check_max_iterations; then
|
|
7971
8082
|
log_error "Max iterations already reached. Reset with: rm .loki/autonomy-state.json"
|
|
@@ -8484,8 +8595,7 @@ if __name__ == "__main__":
|
|
|
8484
8595
|
echo "ESCALATE" > "${TARGET_DIR:-.}/.loki/signals/GATE_ESCALATION"
|
|
8485
8596
|
gate_failures="${gate_failures}code_review_ESCALATED,"
|
|
8486
8597
|
elif [ "$cr_count" -ge "$GATE_CLEAR_LIMIT" ]; then
|
|
8487
|
-
log_warn "Gate cleared: code_review failed $cr_count times (>= $GATE_CLEAR_LIMIT) -
|
|
8488
|
-
clear_gate_failure "code_review"
|
|
8598
|
+
log_warn "Gate cleared: code_review failed $cr_count times (>= $GATE_CLEAR_LIMIT) - passing gate this iteration, counter continues"
|
|
8489
8599
|
else
|
|
8490
8600
|
gate_failures="${gate_failures}code_review,"
|
|
8491
8601
|
log_warn "Code review BLOCKED ($cr_count consecutive) - Critical/High findings"
|
package/dashboard/__init__.py
CHANGED
package/docs/INSTALLATION.md
CHANGED
package/mcp/__init__.py
CHANGED
package/package.json
CHANGED
package/skills/00-index.md
CHANGED
|
@@ -27,6 +27,7 @@
|
|
|
27
27
|
| Scale patterns (50+ agents) | `parallel-workflows.md` + `references/cursor-learnings.md` |
|
|
28
28
|
| GitHub issues, PRs, syncing | `github-integration.md` |
|
|
29
29
|
| Multi-provider (Codex, Gemini) | `providers.md` |
|
|
30
|
+
| OpenSpec delta context, brownfield modifications | `openspec-integration.md` |
|
|
30
31
|
| Plan deepening, knowledge extraction | `compound-learning.md` |
|
|
31
32
|
|
|
32
33
|
## Module Descriptions
|
|
@@ -109,6 +110,14 @@
|
|
|
109
110
|
- Filter by labels, milestone, assignee
|
|
110
111
|
- Requires `gh` CLI authenticated
|
|
111
112
|
|
|
113
|
+
### openspec-integration.md
|
|
114
|
+
**When:** Working with OpenSpec delta context, `--openspec` flag, brownfield modifications
|
|
115
|
+
- Delta-aware development rules (ADDED/MODIFIED/REMOVED)
|
|
116
|
+
- Task execution by group order
|
|
117
|
+
- Scenario-to-test mapping (GIVEN/WHEN/THEN)
|
|
118
|
+
- Source mapping and verification tracking
|
|
119
|
+
- Complexity-based agent strategy
|
|
120
|
+
|
|
112
121
|
### compound-learning.md (v5.30.0)
|
|
113
122
|
**When:** After architecture phase (deepen plan), after verification (extract learnings)
|
|
114
123
|
- Deepen-plan: 4 parallel research agents enhance plans before implementation
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
# OpenSpec Integration
|
|
2
|
+
|
|
3
|
+
> **Reference:** OpenSpec delta specs use ADDED/MODIFIED/REMOVED sections to describe changes to existing system behavior. See `.loki/openspec/delta-context.json` for the parsed delta context injected into your prompt.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## When This Module Applies
|
|
8
|
+
|
|
9
|
+
- Your prompt contains an `OPENSPEC DELTA CONTEXT` section
|
|
10
|
+
- The project has `.loki/openspec/delta-context.json`
|
|
11
|
+
- The session was started with `--openspec` flag
|
|
12
|
+
- Tasks in `.loki/queue/pending.json` have `openspec_group` metadata
|
|
13
|
+
|
|
14
|
+
**If none of the above are true, do not load this module.**
|
|
15
|
+
|
|
16
|
+
---
|
|
17
|
+
|
|
18
|
+
## Delta-Aware Development Rules
|
|
19
|
+
|
|
20
|
+
### ADDED Requirements
|
|
21
|
+
|
|
22
|
+
New behavior that does not exist in the codebase yet.
|
|
23
|
+
|
|
24
|
+
1. Create NEW files and functions following existing codebase patterns
|
|
25
|
+
2. Do NOT modify existing code unless the new feature integrates with it
|
|
26
|
+
3. Write tests for every scenario (GIVEN/WHEN/THEN from the delta spec)
|
|
27
|
+
4. Reference: `delta-context.json` entries with `"type": "added"`
|
|
28
|
+
|
|
29
|
+
```
|
|
30
|
+
# Mental model for ADDED
|
|
31
|
+
Read scenario -> Write test -> Implement -> Verify test passes
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
### MODIFIED Requirements
|
|
35
|
+
|
|
36
|
+
Existing behavior that is changing. This is the most common delta type in brownfield work.
|
|
37
|
+
|
|
38
|
+
1. Find the EXISTING code that implements this requirement
|
|
39
|
+
2. Modify IN PLACE -- do NOT create new files for modified behavior
|
|
40
|
+
3. Check the `(Previously: ...)` annotation to understand what changed
|
|
41
|
+
4. Update existing tests to match the new behavior
|
|
42
|
+
5. Reference: `delta-context.json` entries with `"type": "modified"`
|
|
43
|
+
|
|
44
|
+
```
|
|
45
|
+
# Mental model for MODIFIED
|
|
46
|
+
Read "(Previously: ...)" -> Find existing code -> Update code -> Update tests -> Verify
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
**Common mistake:** Treating MODIFIED as ADDED and creating new files. Always search the codebase first for the existing implementation.
|
|
50
|
+
|
|
51
|
+
### REMOVED Requirements
|
|
52
|
+
|
|
53
|
+
Behavior that is being deprecated or deleted.
|
|
54
|
+
|
|
55
|
+
1. Find and remove or deprecate the code implementing this requirement
|
|
56
|
+
2. Check the `(Deprecated: ...)` annotation for the reason
|
|
57
|
+
3. Remove associated tests
|
|
58
|
+
4. Ensure no orphaned imports or dead code remains
|
|
59
|
+
5. Reference: `delta-context.json` entries with `"type": "removed"`
|
|
60
|
+
|
|
61
|
+
```
|
|
62
|
+
# Mental model for REMOVED
|
|
63
|
+
Read "(Deprecated: ...)" -> Find existing code -> Remove code -> Remove tests -> Verify no dead refs
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
---
|
|
67
|
+
|
|
68
|
+
## Task Execution
|
|
69
|
+
|
|
70
|
+
Tasks are generated from OpenSpec `tasks.md` and loaded into `.loki/queue/pending.json`.
|
|
71
|
+
|
|
72
|
+
- Each task has `openspec_group` metadata indicating its task group number
|
|
73
|
+
- Execute tasks in group order (group 1 before group 2, etc.)
|
|
74
|
+
- Within a group, tasks can run in parallel if they touch different files
|
|
75
|
+
- Mark tasks complete in the queue when done
|
|
76
|
+
|
|
77
|
+
```json
|
|
78
|
+
{
|
|
79
|
+
"id": "task-3",
|
|
80
|
+
"title": "Implement session timeout change",
|
|
81
|
+
"openspec_group": 1,
|
|
82
|
+
"delta_type": "modified",
|
|
83
|
+
"spec_ref": "auth/spec.md#session-expiration"
|
|
84
|
+
}
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
---
|
|
88
|
+
|
|
89
|
+
## Scenario Verification
|
|
90
|
+
|
|
91
|
+
After implementing a requirement, verify its scenarios.
|
|
92
|
+
|
|
93
|
+
1. Each scenario has GIVEN (precondition), WHEN (action), THEN (expected outcome)
|
|
94
|
+
2. Write test cases that map 1:1 to scenarios
|
|
95
|
+
3. Use the scenario name as the test name for traceability
|
|
96
|
+
4. Verification results are tracked in `.loki/openspec/verification-results.json`
|
|
97
|
+
|
|
98
|
+
```python
|
|
99
|
+
# Scenario: "Idle timeout" -> test name matches scenario
|
|
100
|
+
def test_idle_timeout():
|
|
101
|
+
# GIVEN an authenticated session
|
|
102
|
+
session = create_authenticated_session()
|
|
103
|
+
# WHEN 15 minutes pass without activity
|
|
104
|
+
advance_time(minutes=15)
|
|
105
|
+
# THEN the session is invalidated
|
|
106
|
+
assert session.is_expired()
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
---
|
|
110
|
+
|
|
111
|
+
## Source Mapping
|
|
112
|
+
|
|
113
|
+
`.loki/openspec/source-map.json` maps each task ID to its origin in the spec files.
|
|
114
|
+
|
|
115
|
+
| Field | Purpose |
|
|
116
|
+
|-------|---------|
|
|
117
|
+
| `task_id` | Queue task identifier |
|
|
118
|
+
| `spec_file` | Source spec file path |
|
|
119
|
+
| `requirement` | Requirement name |
|
|
120
|
+
| `scenario` | Scenario name (if applicable) |
|
|
121
|
+
| `line` | Line number in spec file |
|
|
122
|
+
|
|
123
|
+
Use this to trace implementation decisions back to the specification.
|
|
124
|
+
|
|
125
|
+
---
|
|
126
|
+
|
|
127
|
+
## Complexity Levels
|
|
128
|
+
|
|
129
|
+
| Level | Tasks | Spec Files | Design | Agent Strategy |
|
|
130
|
+
|-------|-------|------------|--------|----------------|
|
|
131
|
+
| simple | 1-3 | 1 | none | Single agent, sequential |
|
|
132
|
+
| standard | 4-10 | 2-5 | present | Parallel where possible |
|
|
133
|
+
| complex | 11-20 | 5-10 | present | Task tool parallelization |
|
|
134
|
+
| enterprise | 20+ | 10+ | present | Full agent team |
|
|
135
|
+
|
|
136
|
+
---
|
|
137
|
+
|
|
138
|
+
## Common Mistakes
|
|
139
|
+
|
|
140
|
+
| Mistake | Correction |
|
|
141
|
+
|---------|------------|
|
|
142
|
+
| Creating new files for MODIFIED requirements | Search codebase first, update existing code in place |
|
|
143
|
+
| Ignoring `(Previously: ...)` annotations | These tell you exactly what changed -- read them |
|
|
144
|
+
| Not writing tests for GIVEN/WHEN/THEN scenarios | Every scenario must have a corresponding test |
|
|
145
|
+
| Treating all deltas as ADDED | Most brownfield work is MODIFIED -- check the delta type |
|
|
146
|
+
| Skipping REMOVED cleanup | Dead code and orphaned imports cause maintenance burden |
|
|
147
|
+
| Implementing groups out of order | Group 1 must complete before group 2 starts |
|