loki-mode 5.42.2 → 5.46.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -3
- package/SKILL.md +2 -2
- package/VERSION +1 -1
- package/autonomy/app-runner.sh +684 -0
- package/autonomy/checklist-verify.py +368 -0
- package/autonomy/completion-council.sh +49 -0
- package/autonomy/loki +83 -0
- package/autonomy/playwright-verify.sh +350 -0
- package/autonomy/prd-analyzer.py +457 -0
- package/autonomy/prd-checklist.sh +223 -0
- package/autonomy/run.sh +164 -4
- package/completions/loki.bash +6 -1
- package/dashboard/__init__.py +1 -1
- package/dashboard/server.py +134 -1
- package/dashboard/static/index.html +804 -265
- package/docs/INSTALLATION.md +1 -1
- package/docs/audit-logging.md +600 -0
- package/docs/authentication.md +374 -0
- package/docs/authorization.md +455 -0
- package/docs/git-workflow.md +446 -0
- package/docs/metrics.md +527 -0
- package/docs/network-security.md +275 -0
- package/docs/openclaw-integration.md +572 -0
- package/docs/siem-integration.md +579 -0
- package/learning/__init__.py +1 -1
- package/mcp/__init__.py +1 -1
- package/memory/__init__.py +2 -0
- package/package.json +2 -1
|
@@ -0,0 +1,457 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""PRD Quality Analyzer for Loki Mode v5.44.0
|
|
3
|
+
|
|
4
|
+
Analyzes PRD structure and completeness using regex-based heuristics.
|
|
5
|
+
Writes observations to .loki/prd-observations.md with quality score,
|
|
6
|
+
strengths, gaps, assumptions, and recommendations.
|
|
7
|
+
|
|
8
|
+
Stdlib only - no pip dependencies required.
|
|
9
|
+
|
|
10
|
+
Usage:
|
|
11
|
+
python3 prd-analyzer.py path/to/prd.md --output .loki/prd-observations.md
|
|
12
|
+
python3 prd-analyzer.py path/to/prd.md --output .loki/prd-observations.md --interactive
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import argparse
|
|
16
|
+
import os
|
|
17
|
+
import re
|
|
18
|
+
import sys
|
|
19
|
+
import tempfile
|
|
20
|
+
from datetime import datetime, timezone
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# Analysis dimensions with their detection patterns and weights
|
|
25
|
+
DIMENSIONS = {
|
|
26
|
+
"feature_list": {
|
|
27
|
+
"label": "Feature List",
|
|
28
|
+
"weight": 1.5,
|
|
29
|
+
"heading_patterns": [
|
|
30
|
+
r"(?i)#+\s.*(?:feature|requirement|scope|functional|capability)",
|
|
31
|
+
],
|
|
32
|
+
"content_patterns": [
|
|
33
|
+
r"^\s*[-*]\s+\S",
|
|
34
|
+
r"^\s*\d+\.\s+\S",
|
|
35
|
+
],
|
|
36
|
+
"description": "Numbered or bulleted list of features/requirements",
|
|
37
|
+
},
|
|
38
|
+
"tech_stack": {
|
|
39
|
+
"label": "Tech Stack",
|
|
40
|
+
"weight": 1.0,
|
|
41
|
+
"heading_patterns": [
|
|
42
|
+
r"(?i)#+\s.*(?:tech|stack|technology|architecture|infrastructure)",
|
|
43
|
+
],
|
|
44
|
+
"content_patterns": [
|
|
45
|
+
r"(?i)\b(?:react|vue|angular|svelte|next\.?js|nuxt)\b",
|
|
46
|
+
r"(?i)\b(?:node\.?js|express|fastapi|django|flask|rails|spring)\b",
|
|
47
|
+
r"(?i)\b(?:python|typescript|javascript|rust|go|java|ruby)\b",
|
|
48
|
+
r"(?i)\b(?:postgres|mysql|sqlite|mongodb|redis|dynamodb)\b",
|
|
49
|
+
r"(?i)\b(?:docker|kubernetes|k8s|aws|gcp|azure|vercel|netlify)\b",
|
|
50
|
+
],
|
|
51
|
+
"description": "Technology choices and architecture",
|
|
52
|
+
},
|
|
53
|
+
"user_stories": {
|
|
54
|
+
"label": "User Stories / Flows",
|
|
55
|
+
"weight": 1.0,
|
|
56
|
+
"heading_patterns": [
|
|
57
|
+
r"(?i)#+\s.*(?:user\s+(?:stor|flow|journey)|persona|use\s+case)",
|
|
58
|
+
],
|
|
59
|
+
"content_patterns": [
|
|
60
|
+
r"(?i)\bas\s+a\s+\w+",
|
|
61
|
+
r"(?i)\buser\s+(?:can|should|will|must|wants?\s+to)\b",
|
|
62
|
+
r"(?i)\bso\s+that\s+\w+",
|
|
63
|
+
],
|
|
64
|
+
"description": "User stories, personas, or user flow descriptions",
|
|
65
|
+
},
|
|
66
|
+
"acceptance_criteria": {
|
|
67
|
+
"label": "Acceptance Criteria",
|
|
68
|
+
"weight": 1.0,
|
|
69
|
+
"heading_patterns": [
|
|
70
|
+
r"(?i)#+\s.*(?:acceptance|criteria|definition\s+of\s+done|done\s+when)",
|
|
71
|
+
],
|
|
72
|
+
"content_patterns": [
|
|
73
|
+
r"^\s*-\s*\[\s*[xX ]?\s*\]",
|
|
74
|
+
r"(?i)\bgiven\b.*\bwhen\b.*\bthen\b",
|
|
75
|
+
r"(?i)\bmust\s+(?:be|have|support|handle)\b",
|
|
76
|
+
],
|
|
77
|
+
"description": "Measurable completion criteria or checklists",
|
|
78
|
+
},
|
|
79
|
+
"data_model": {
|
|
80
|
+
"label": "Data Model / Schema",
|
|
81
|
+
"weight": 1.0,
|
|
82
|
+
"heading_patterns": [
|
|
83
|
+
r"(?i)#+\s.*(?:data\s*(?:base|model)|schema|entit|table|erd)",
|
|
84
|
+
],
|
|
85
|
+
"content_patterns": [
|
|
86
|
+
r"(?i)\b(?:database|schema|model|table|entity|collection)\b",
|
|
87
|
+
r"(?i)\b(?:primary\s+key|foreign\s+key|index|relation)\b",
|
|
88
|
+
r"(?i)\b(?:one-to-many|many-to-many|has_many|belongs_to)\b",
|
|
89
|
+
],
|
|
90
|
+
"description": "Database schema, data models, or entity definitions",
|
|
91
|
+
},
|
|
92
|
+
"api_spec": {
|
|
93
|
+
"label": "API Specifications",
|
|
94
|
+
"weight": 1.0,
|
|
95
|
+
"heading_patterns": [
|
|
96
|
+
r"(?i)#+\s.*(?:api|endpoint|route|rest|graphql)",
|
|
97
|
+
],
|
|
98
|
+
"content_patterns": [
|
|
99
|
+
r"(?i)\b(?:GET|POST|PUT|PATCH|DELETE)\s+/",
|
|
100
|
+
r"(?i)\b(?:endpoint|route|api)\b.*\b(?:GET|POST|PUT|DELETE)\b",
|
|
101
|
+
r"(?i)\b(?:request|response)\s+(?:body|payload|schema)\b",
|
|
102
|
+
],
|
|
103
|
+
"description": "API endpoints, request/response formats",
|
|
104
|
+
},
|
|
105
|
+
"deployment": {
|
|
106
|
+
"label": "Deployment Requirements",
|
|
107
|
+
"weight": 0.75,
|
|
108
|
+
"heading_patterns": [
|
|
109
|
+
r"(?i)#+\s.*(?:deploy|hosting|infra|ci.?cd|environment)",
|
|
110
|
+
],
|
|
111
|
+
"content_patterns": [
|
|
112
|
+
r"(?i)\b(?:deploy|hosting|ci.?cd|pipeline|staging|production)\b",
|
|
113
|
+
r"(?i)\b(?:docker|container|serverless|lambda|cloud\s*run)\b",
|
|
114
|
+
],
|
|
115
|
+
"description": "Deployment targets, CI/CD, and infrastructure",
|
|
116
|
+
},
|
|
117
|
+
"error_handling": {
|
|
118
|
+
"label": "Error Handling",
|
|
119
|
+
"weight": 0.75,
|
|
120
|
+
"heading_patterns": [
|
|
121
|
+
r"(?i)#+\s.*(?:error|exception|failure|fallback|edge\s+case)",
|
|
122
|
+
],
|
|
123
|
+
"content_patterns": [
|
|
124
|
+
r"(?i)\b(?:error\s+handl|exception|fallback|retry|timeout)\b",
|
|
125
|
+
r"(?i)\b(?:edge\s+case|failure\s+mode|graceful|degraded)\b",
|
|
126
|
+
],
|
|
127
|
+
"description": "Error handling, edge cases, and failure modes",
|
|
128
|
+
},
|
|
129
|
+
"security": {
|
|
130
|
+
"label": "Security Requirements",
|
|
131
|
+
"weight": 1.0,
|
|
132
|
+
"heading_patterns": [
|
|
133
|
+
r"(?i)#+\s.*(?:security|auth|permission|access\s+control)",
|
|
134
|
+
],
|
|
135
|
+
"content_patterns": [
|
|
136
|
+
r"(?i)\b(?:auth(?:entication|orization)?|oauth|jwt|token)\b",
|
|
137
|
+
r"(?i)\b(?:security|permission|role|rbac|encrypt|ssl|tls|https)\b",
|
|
138
|
+
r"(?i)\b(?:password|credential|secret|api\s*key)\b",
|
|
139
|
+
],
|
|
140
|
+
"description": "Authentication, authorization, and security measures",
|
|
141
|
+
},
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
# Scope estimation based on feature count
|
|
145
|
+
SCOPE_THRESHOLDS = [
|
|
146
|
+
(5, "small"),
|
|
147
|
+
(15, "medium"),
|
|
148
|
+
(30, "large"),
|
|
149
|
+
(999999, "enterprise"),
|
|
150
|
+
]
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
class PrdAnalyzer:
|
|
154
|
+
"""Analyzes PRD quality and completeness."""
|
|
155
|
+
|
|
156
|
+
def __init__(self, prd_path):
|
|
157
|
+
self.prd_path = Path(prd_path)
|
|
158
|
+
self.content = ""
|
|
159
|
+
self.lines = []
|
|
160
|
+
self.results = {}
|
|
161
|
+
self.feature_count = 0
|
|
162
|
+
self.scope = "unknown"
|
|
163
|
+
self.score = 0.0
|
|
164
|
+
|
|
165
|
+
def load(self):
|
|
166
|
+
"""Load and validate the PRD file."""
|
|
167
|
+
if not self.prd_path.exists():
|
|
168
|
+
raise FileNotFoundError(f"PRD file not found: {self.prd_path}")
|
|
169
|
+
self.content = self.prd_path.read_text(encoding="utf-8", errors="replace")
|
|
170
|
+
if not self.content.strip():
|
|
171
|
+
raise ValueError(f"PRD file is empty: {self.prd_path}")
|
|
172
|
+
self.lines = self.content.splitlines()
|
|
173
|
+
|
|
174
|
+
def analyze(self):
|
|
175
|
+
"""Run all analysis dimensions and compute score."""
|
|
176
|
+
self.load()
|
|
177
|
+
total_weight = 0.0
|
|
178
|
+
earned_weight = 0.0
|
|
179
|
+
|
|
180
|
+
for key, dim in DIMENSIONS.items():
|
|
181
|
+
found_heading = False
|
|
182
|
+
found_content = False
|
|
183
|
+
matches = []
|
|
184
|
+
|
|
185
|
+
# Check for relevant headings
|
|
186
|
+
for pattern in dim["heading_patterns"]:
|
|
187
|
+
for line in self.lines:
|
|
188
|
+
if re.search(pattern, line):
|
|
189
|
+
found_heading = True
|
|
190
|
+
break
|
|
191
|
+
if found_heading:
|
|
192
|
+
break
|
|
193
|
+
|
|
194
|
+
# Check for content patterns
|
|
195
|
+
for pattern in dim["content_patterns"]:
|
|
196
|
+
for line in self.lines:
|
|
197
|
+
m = re.search(pattern, line)
|
|
198
|
+
if m:
|
|
199
|
+
found_content = True
|
|
200
|
+
matches.append(line.strip()[:120])
|
|
201
|
+
if len(matches) >= 5:
|
|
202
|
+
break
|
|
203
|
+
if len(matches) >= 5:
|
|
204
|
+
break
|
|
205
|
+
|
|
206
|
+
detected = found_heading or found_content
|
|
207
|
+
confidence = "high" if (found_heading and found_content) else "partial" if detected else "none"
|
|
208
|
+
|
|
209
|
+
self.results[key] = {
|
|
210
|
+
"label": dim["label"],
|
|
211
|
+
"detected": detected,
|
|
212
|
+
"confidence": confidence,
|
|
213
|
+
"has_heading": found_heading,
|
|
214
|
+
"has_content": found_content,
|
|
215
|
+
"sample_matches": matches[:3],
|
|
216
|
+
"weight": dim["weight"],
|
|
217
|
+
"description": dim["description"],
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
total_weight += dim["weight"]
|
|
221
|
+
if confidence == "high":
|
|
222
|
+
earned_weight += dim["weight"]
|
|
223
|
+
elif confidence == "partial":
|
|
224
|
+
earned_weight += dim["weight"] * 0.5
|
|
225
|
+
|
|
226
|
+
# Compute score (0-10 scale)
|
|
227
|
+
self.score = round((earned_weight / total_weight) * 10, 1) if total_weight > 0 else 0.0
|
|
228
|
+
|
|
229
|
+
# Count features for scope estimation
|
|
230
|
+
self._estimate_scope()
|
|
231
|
+
|
|
232
|
+
return self.results
|
|
233
|
+
|
|
234
|
+
def _estimate_scope(self):
|
|
235
|
+
"""Estimate project scope from feature count."""
|
|
236
|
+
count = 0
|
|
237
|
+
in_feature_section = False
|
|
238
|
+
for line in self.lines:
|
|
239
|
+
if re.search(r"(?i)#+\s.*(?:feature|requirement|scope|functional)", line):
|
|
240
|
+
in_feature_section = True
|
|
241
|
+
continue
|
|
242
|
+
if in_feature_section and re.match(r"^\s*#+\s", line):
|
|
243
|
+
in_feature_section = False
|
|
244
|
+
continue
|
|
245
|
+
if re.match(r"^\s*[-*]\s+\S", line) or re.match(r"^\s*\d+\.\s+\S", line):
|
|
246
|
+
count += 1
|
|
247
|
+
|
|
248
|
+
self.feature_count = count
|
|
249
|
+
for threshold, label in SCOPE_THRESHOLDS:
|
|
250
|
+
if count <= threshold:
|
|
251
|
+
self.scope = label
|
|
252
|
+
break
|
|
253
|
+
|
|
254
|
+
def generate_observations(self):
|
|
255
|
+
"""Generate the observations markdown content."""
|
|
256
|
+
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
257
|
+
strengths = []
|
|
258
|
+
missing = []
|
|
259
|
+
assumptions = []
|
|
260
|
+
recommendations = []
|
|
261
|
+
|
|
262
|
+
for key, result in self.results.items():
|
|
263
|
+
label = result["label"]
|
|
264
|
+
if result["confidence"] == "high":
|
|
265
|
+
strengths.append(f"- **{label}**: Well-defined with dedicated section and content")
|
|
266
|
+
elif result["confidence"] == "partial":
|
|
267
|
+
strengths.append(f"- **{label}**: Partially covered (mentioned but lacks dedicated section)")
|
|
268
|
+
recommendations.append(f"- Add a dedicated **{label}** section with more detail")
|
|
269
|
+
else:
|
|
270
|
+
missing.append(f"- **{label}**: {result['description']}")
|
|
271
|
+
assumptions.append(self._make_assumption(key))
|
|
272
|
+
recommendations.append(f"- Add **{label}** section: {result['description']}")
|
|
273
|
+
|
|
274
|
+
lines = [
|
|
275
|
+
f"# PRD Analysis Observations",
|
|
276
|
+
f"",
|
|
277
|
+
f"**Source:** `{self.prd_path}`",
|
|
278
|
+
f"**Analyzed:** {now}",
|
|
279
|
+
f"**Quality Score:** {self.score}/10",
|
|
280
|
+
f"**Estimated Scope:** {self.scope} (~{self.feature_count} items detected)",
|
|
281
|
+
f"",
|
|
282
|
+
f"## Strengths",
|
|
283
|
+
f"",
|
|
284
|
+
]
|
|
285
|
+
if strengths:
|
|
286
|
+
lines.extend(strengths)
|
|
287
|
+
else:
|
|
288
|
+
lines.append("- No well-defined sections detected")
|
|
289
|
+
lines.append("")
|
|
290
|
+
|
|
291
|
+
lines.append("## Missing or Unclear")
|
|
292
|
+
lines.append("")
|
|
293
|
+
if missing:
|
|
294
|
+
lines.extend(missing)
|
|
295
|
+
else:
|
|
296
|
+
lines.append("- All key dimensions are covered")
|
|
297
|
+
lines.append("")
|
|
298
|
+
|
|
299
|
+
lines.append("## Assumptions Made")
|
|
300
|
+
lines.append("")
|
|
301
|
+
if assumptions:
|
|
302
|
+
lines.extend([a for a in assumptions if a])
|
|
303
|
+
else:
|
|
304
|
+
lines.append("- No assumptions needed; PRD is comprehensive")
|
|
305
|
+
lines.append("")
|
|
306
|
+
|
|
307
|
+
lines.append("## Recommended Additions")
|
|
308
|
+
lines.append("")
|
|
309
|
+
if recommendations:
|
|
310
|
+
lines.extend(recommendations)
|
|
311
|
+
else:
|
|
312
|
+
lines.append("- PRD is well-structured; no major additions needed")
|
|
313
|
+
lines.append("")
|
|
314
|
+
|
|
315
|
+
return "\n".join(lines)
|
|
316
|
+
|
|
317
|
+
def _make_assumption(self, key):
|
|
318
|
+
"""Generate a reasonable assumption for a missing dimension."""
|
|
319
|
+
assumptions_map = {
|
|
320
|
+
"feature_list": "- Will extract features from prose descriptions",
|
|
321
|
+
"tech_stack": "- Will infer tech stack from context or use common defaults",
|
|
322
|
+
"user_stories": "- Will derive user flows from feature descriptions",
|
|
323
|
+
"acceptance_criteria": "- Will generate acceptance criteria from requirements",
|
|
324
|
+
"data_model": "- Will design data model based on feature requirements",
|
|
325
|
+
"api_spec": "- Will define API endpoints based on feature set",
|
|
326
|
+
"deployment": "- Will use standard containerized deployment",
|
|
327
|
+
"error_handling": "- Will implement standard error handling patterns",
|
|
328
|
+
"security": "- Will apply baseline security (input validation, auth if applicable)",
|
|
329
|
+
}
|
|
330
|
+
return assumptions_map.get(key, "")
|
|
331
|
+
|
|
332
|
+
def get_interactive_questions(self):
|
|
333
|
+
"""Generate questions for missing/partial dimensions."""
|
|
334
|
+
questions = []
|
|
335
|
+
for key, result in self.results.items():
|
|
336
|
+
if result["confidence"] == "none":
|
|
337
|
+
q = self._make_question(key)
|
|
338
|
+
if q:
|
|
339
|
+
questions.append((key, q))
|
|
340
|
+
return questions
|
|
341
|
+
|
|
342
|
+
def _make_question(self, key):
|
|
343
|
+
"""Generate an interactive question for a missing dimension."""
|
|
344
|
+
questions_map = {
|
|
345
|
+
"feature_list": "No clear feature list found. Can you list the key features? (comma-separated): ",
|
|
346
|
+
"tech_stack": "No tech stack specified. What technologies should be used? [e.g., React+Node, Python+FastAPI]: ",
|
|
347
|
+
"user_stories": "No user stories found. Who are the primary users? (comma-separated roles): ",
|
|
348
|
+
"acceptance_criteria": "No acceptance criteria found. What defines 'done' for this project?: ",
|
|
349
|
+
"data_model": "No data model specified. Which database should be used? [postgres/mysql/sqlite/mongodb/none]: ",
|
|
350
|
+
"api_spec": "No API specs found. Will this project have a REST API? [yes/no]: ",
|
|
351
|
+
"deployment": "No deployment requirements. Where will this be deployed? [docker/vercel/aws/local]: ",
|
|
352
|
+
"error_handling": "No error handling requirements. Any specific reliability needs? [skip to use defaults]: ",
|
|
353
|
+
"security": "No security requirements. Does this need authentication? [yes/no/skip]: ",
|
|
354
|
+
}
|
|
355
|
+
return questions_map.get(key)
|
|
356
|
+
|
|
357
|
+
def run_interactive(self):
|
|
358
|
+
"""Run interactive Q&A for missing dimensions. Returns clarifications dict."""
|
|
359
|
+
if not sys.stdin.isatty():
|
|
360
|
+
return {}
|
|
361
|
+
|
|
362
|
+
questions = self.get_interactive_questions()
|
|
363
|
+
if not questions:
|
|
364
|
+
print("PRD covers all key dimensions. No questions needed.")
|
|
365
|
+
return {}
|
|
366
|
+
|
|
367
|
+
print(f"\nPRD Quality Score: {self.score}/10")
|
|
368
|
+
print(f"Found {len(questions)} gap(s). Please provide clarifications:\n")
|
|
369
|
+
|
|
370
|
+
clarifications = {}
|
|
371
|
+
for key, question in questions:
|
|
372
|
+
try:
|
|
373
|
+
answer = input(question).strip()
|
|
374
|
+
if answer and answer.lower() not in ("skip", ""):
|
|
375
|
+
clarifications[key] = answer
|
|
376
|
+
except (EOFError, KeyboardInterrupt):
|
|
377
|
+
print("\nInteractive mode cancelled.")
|
|
378
|
+
break
|
|
379
|
+
|
|
380
|
+
return clarifications
|
|
381
|
+
|
|
382
|
+
def append_clarifications(self, observations_text, clarifications):
|
|
383
|
+
"""Append user clarifications to observations content."""
|
|
384
|
+
if not clarifications:
|
|
385
|
+
return observations_text
|
|
386
|
+
|
|
387
|
+
lines = [observations_text.rstrip(), "", "## User Clarifications", ""]
|
|
388
|
+
for key, answer in clarifications.items():
|
|
389
|
+
label = DIMENSIONS[key]["label"] if key in DIMENSIONS else key
|
|
390
|
+
lines.append(f"- **{label}**: {answer}")
|
|
391
|
+
lines.append("")
|
|
392
|
+
|
|
393
|
+
return "\n".join(lines)
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
def write_atomic(path, content):
|
|
397
|
+
"""Write content to file atomically using temp file + rename."""
|
|
398
|
+
path = Path(path)
|
|
399
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
400
|
+
fd, tmp_path = tempfile.mkstemp(dir=path.parent, suffix=".tmp")
|
|
401
|
+
try:
|
|
402
|
+
with os.fdopen(fd, "w", encoding="utf-8") as f:
|
|
403
|
+
f.write(content)
|
|
404
|
+
os.replace(tmp_path, str(path))
|
|
405
|
+
except Exception:
|
|
406
|
+
try:
|
|
407
|
+
os.unlink(tmp_path)
|
|
408
|
+
except OSError:
|
|
409
|
+
pass
|
|
410
|
+
raise
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
def main():
|
|
414
|
+
parser = argparse.ArgumentParser(
|
|
415
|
+
description="Analyze PRD quality and completeness for Loki Mode"
|
|
416
|
+
)
|
|
417
|
+
parser.add_argument("prd_path", help="Path to the PRD markdown file")
|
|
418
|
+
parser.add_argument(
|
|
419
|
+
"--output",
|
|
420
|
+
default=".loki/prd-observations.md",
|
|
421
|
+
help="Output path for observations (default: .loki/prd-observations.md)",
|
|
422
|
+
)
|
|
423
|
+
parser.add_argument(
|
|
424
|
+
"--interactive",
|
|
425
|
+
action="store_true",
|
|
426
|
+
help="Ask clarifying questions for missing dimensions",
|
|
427
|
+
)
|
|
428
|
+
args = parser.parse_args()
|
|
429
|
+
|
|
430
|
+
try:
|
|
431
|
+
analyzer = PrdAnalyzer(args.prd_path)
|
|
432
|
+
analyzer.analyze()
|
|
433
|
+
observations = analyzer.generate_observations()
|
|
434
|
+
|
|
435
|
+
clarifications = {}
|
|
436
|
+
if args.interactive:
|
|
437
|
+
clarifications = analyzer.run_interactive()
|
|
438
|
+
if clarifications:
|
|
439
|
+
observations = analyzer.append_clarifications(observations, clarifications)
|
|
440
|
+
|
|
441
|
+
write_atomic(args.output, observations)
|
|
442
|
+
print(f"PRD analysis complete: score={analyzer.score}/10 scope={analyzer.scope}")
|
|
443
|
+
print(f"Observations written to: {args.output}")
|
|
444
|
+
|
|
445
|
+
except FileNotFoundError as e:
|
|
446
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
447
|
+
sys.exit(1)
|
|
448
|
+
except ValueError as e:
|
|
449
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
450
|
+
sys.exit(1)
|
|
451
|
+
except Exception as e:
|
|
452
|
+
print(f"Unexpected error: {e}", file=sys.stderr)
|
|
453
|
+
sys.exit(1)
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
if __name__ == "__main__":
|
|
457
|
+
main()
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
#===============================================================================
|
|
3
|
+
# PRD Checklist Module (v5.44.0)
|
|
4
|
+
#
|
|
5
|
+
# Manages PRD requirement tracking and automated verification. Creates a
|
|
6
|
+
# structured checklist from PRD analysis, verifies items on a configurable
|
|
7
|
+
# interval, and provides status summaries for prompt injection and council.
|
|
8
|
+
#
|
|
9
|
+
# Functions:
|
|
10
|
+
# checklist_init(prd_path) - Initialize checklist during DISCOVERY phase
|
|
11
|
+
# checklist_should_verify() - Check if verification should run this iteration
|
|
12
|
+
# checklist_verify() - Run verification checks via checklist-verify.py
|
|
13
|
+
# checklist_summary() - One-line summary for prompt injection
|
|
14
|
+
# checklist_as_evidence() - Formatted output for council evidence file
|
|
15
|
+
#
|
|
16
|
+
# Environment Variables:
|
|
17
|
+
# LOKI_CHECKLIST_INTERVAL - Verify every N iterations (default: 5)
|
|
18
|
+
# LOKI_CHECKLIST_TIMEOUT - Timeout per check in seconds (default: 30)
|
|
19
|
+
# LOKI_CHECKLIST_ENABLED - Enable/disable checklist (default: true)
|
|
20
|
+
#
|
|
21
|
+
# Data:
|
|
22
|
+
# .loki/checklist/checklist.json - Full checklist with verification
|
|
23
|
+
# .loki/checklist/verification-results.json - Summary of last verification
|
|
24
|
+
#
|
|
25
|
+
# Usage:
|
|
26
|
+
# source autonomy/prd-checklist.sh
|
|
27
|
+
# checklist_init "$prd_path"
|
|
28
|
+
# if checklist_should_verify; then checklist_verify; fi
|
|
29
|
+
# checklist_summary
|
|
30
|
+
#
|
|
31
|
+
#===============================================================================
|
|
32
|
+
|
|
33
|
+
# Configuration
|
|
34
|
+
CHECKLIST_ENABLED=${LOKI_CHECKLIST_ENABLED:-true}
|
|
35
|
+
CHECKLIST_INTERVAL=${LOKI_CHECKLIST_INTERVAL:-5}
|
|
36
|
+
# Guard against zero/negative interval (division by zero in modulo)
|
|
37
|
+
if [ "$CHECKLIST_INTERVAL" -le 0 ] 2>/dev/null; then
|
|
38
|
+
CHECKLIST_INTERVAL=5
|
|
39
|
+
fi
|
|
40
|
+
CHECKLIST_TIMEOUT=${LOKI_CHECKLIST_TIMEOUT:-30}
|
|
41
|
+
# Guard against zero/negative timeout
|
|
42
|
+
if [ "$CHECKLIST_TIMEOUT" -le 0 ] 2>/dev/null; then
|
|
43
|
+
CHECKLIST_TIMEOUT=30
|
|
44
|
+
fi
|
|
45
|
+
|
|
46
|
+
# Internal state
|
|
47
|
+
CHECKLIST_DIR=""
|
|
48
|
+
CHECKLIST_FILE=""
|
|
49
|
+
CHECKLIST_RESULTS_FILE=""
|
|
50
|
+
CHECKLIST_LAST_VERIFY_ITERATION=0
|
|
51
|
+
|
|
52
|
+
#===============================================================================
|
|
53
|
+
# Initialization
|
|
54
|
+
#===============================================================================
|
|
55
|
+
|
|
56
|
+
checklist_init() {
|
|
57
|
+
local prd_path="${1:-}"
|
|
58
|
+
|
|
59
|
+
if [ "$CHECKLIST_ENABLED" != "true" ]; then
|
|
60
|
+
return 0
|
|
61
|
+
fi
|
|
62
|
+
|
|
63
|
+
CHECKLIST_DIR=".loki/checklist"
|
|
64
|
+
CHECKLIST_FILE="${CHECKLIST_DIR}/checklist.json"
|
|
65
|
+
CHECKLIST_RESULTS_FILE="${CHECKLIST_DIR}/verification-results.json"
|
|
66
|
+
|
|
67
|
+
mkdir -p "$CHECKLIST_DIR"
|
|
68
|
+
|
|
69
|
+
if [ -n "$prd_path" ] && [ -f "$prd_path" ]; then
|
|
70
|
+
log_info "PRD checklist initialized for: $prd_path"
|
|
71
|
+
fi
|
|
72
|
+
|
|
73
|
+
return 0
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
#===============================================================================
|
|
77
|
+
# Interval Control
|
|
78
|
+
#===============================================================================
|
|
79
|
+
|
|
80
|
+
checklist_should_verify() {
|
|
81
|
+
# Returns 0 (true) if verification should run this iteration
|
|
82
|
+
if [ "$CHECKLIST_ENABLED" != "true" ]; then
|
|
83
|
+
return 1
|
|
84
|
+
fi
|
|
85
|
+
|
|
86
|
+
if [ ! -f "$CHECKLIST_FILE" ]; then
|
|
87
|
+
return 1
|
|
88
|
+
fi
|
|
89
|
+
|
|
90
|
+
# Check iteration interval
|
|
91
|
+
local current_iteration="${ITERATION_COUNT:-0}"
|
|
92
|
+
if [ "$current_iteration" -eq 0 ]; then
|
|
93
|
+
return 1
|
|
94
|
+
fi
|
|
95
|
+
|
|
96
|
+
if [ $((current_iteration % CHECKLIST_INTERVAL)) -ne 0 ]; then
|
|
97
|
+
return 1
|
|
98
|
+
fi
|
|
99
|
+
|
|
100
|
+
# Don't verify same iteration twice
|
|
101
|
+
if [ "$current_iteration" -eq "$CHECKLIST_LAST_VERIFY_ITERATION" ]; then
|
|
102
|
+
return 1
|
|
103
|
+
fi
|
|
104
|
+
|
|
105
|
+
return 0
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
#===============================================================================
|
|
109
|
+
# Verification
|
|
110
|
+
#===============================================================================
|
|
111
|
+
|
|
112
|
+
checklist_verify() {
|
|
113
|
+
if [ "$CHECKLIST_ENABLED" != "true" ]; then
|
|
114
|
+
return 0
|
|
115
|
+
fi
|
|
116
|
+
|
|
117
|
+
if [ ! -f "$CHECKLIST_FILE" ]; then
|
|
118
|
+
return 0
|
|
119
|
+
fi
|
|
120
|
+
|
|
121
|
+
local script_dir
|
|
122
|
+
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
123
|
+
local verify_script="${script_dir}/checklist-verify.py"
|
|
124
|
+
|
|
125
|
+
if [ ! -f "$verify_script" ]; then
|
|
126
|
+
log_warn "checklist-verify.py not found at $verify_script"
|
|
127
|
+
return 0
|
|
128
|
+
fi
|
|
129
|
+
|
|
130
|
+
log_step "Running PRD checklist verification..."
|
|
131
|
+
|
|
132
|
+
python3 "$verify_script" \
|
|
133
|
+
--checklist "$CHECKLIST_FILE" \
|
|
134
|
+
--timeout "$CHECKLIST_TIMEOUT" 2>/dev/null || true
|
|
135
|
+
|
|
136
|
+
CHECKLIST_LAST_VERIFY_ITERATION="${ITERATION_COUNT:-0}"
|
|
137
|
+
|
|
138
|
+
# Log result if available
|
|
139
|
+
if [ -f "$CHECKLIST_RESULTS_FILE" ]; then
|
|
140
|
+
local summary
|
|
141
|
+
summary=$(checklist_summary 2>/dev/null || true)
|
|
142
|
+
if [ -n "$summary" ]; then
|
|
143
|
+
log_info "Checklist: $summary"
|
|
144
|
+
fi
|
|
145
|
+
fi
|
|
146
|
+
|
|
147
|
+
return 0
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
#===============================================================================
|
|
151
|
+
# Summary (for prompt injection)
|
|
152
|
+
#===============================================================================
|
|
153
|
+
|
|
154
|
+
checklist_summary() {
|
|
155
|
+
# Returns one-line summary string
|
|
156
|
+
if [ ! -f "$CHECKLIST_RESULTS_FILE" ]; then
|
|
157
|
+
echo ""
|
|
158
|
+
return 0
|
|
159
|
+
fi
|
|
160
|
+
|
|
161
|
+
_CHECKLIST_RESULTS="$CHECKLIST_RESULTS_FILE" python3 -c "
|
|
162
|
+
import json, sys, os
|
|
163
|
+
try:
|
|
164
|
+
fpath = os.environ.get('_CHECKLIST_RESULTS', '')
|
|
165
|
+
data = json.load(open(fpath))
|
|
166
|
+
s = data.get('summary', {})
|
|
167
|
+
total = s.get('total', 0)
|
|
168
|
+
verified = s.get('verified', 0)
|
|
169
|
+
failing = s.get('failing', 0)
|
|
170
|
+
pending = s.get('pending', 0)
|
|
171
|
+
if total == 0:
|
|
172
|
+
print('')
|
|
173
|
+
else:
|
|
174
|
+
failing_items = []
|
|
175
|
+
for cat in data.get('categories', []):
|
|
176
|
+
for item in cat.get('items', []):
|
|
177
|
+
if item.get('status') == 'failing' and item.get('priority') in ('critical', 'major'):
|
|
178
|
+
failing_items.append(item.get('title', item.get('id', '?')))
|
|
179
|
+
detail = ''
|
|
180
|
+
if failing_items:
|
|
181
|
+
detail = ' FAILING: ' + ', '.join(failing_items[:5])
|
|
182
|
+
print(f'{verified}/{total} verified, {failing} failing, {pending} pending.{detail}')
|
|
183
|
+
except Exception:
|
|
184
|
+
print('', file=sys.stderr)
|
|
185
|
+
" 2>/dev/null || echo ""
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
#===============================================================================
|
|
189
|
+
# Council Evidence (for completion-council.sh)
|
|
190
|
+
#===============================================================================
|
|
191
|
+
|
|
192
|
+
checklist_as_evidence() {
|
|
193
|
+
# Writes formatted checklist evidence to stdout for council consumption
|
|
194
|
+
local evidence_file="${1:-}"
|
|
195
|
+
|
|
196
|
+
if [ ! -f "$CHECKLIST_RESULTS_FILE" ]; then
|
|
197
|
+
return 0
|
|
198
|
+
fi
|
|
199
|
+
|
|
200
|
+
{
|
|
201
|
+
echo ""
|
|
202
|
+
echo "## PRD Checklist Verification"
|
|
203
|
+
echo ""
|
|
204
|
+
|
|
205
|
+
_CHECKLIST_RESULTS="$CHECKLIST_RESULTS_FILE" python3 -c "
|
|
206
|
+
import json, os
|
|
207
|
+
try:
|
|
208
|
+
data = json.load(open(os.environ['_CHECKLIST_RESULTS']))
|
|
209
|
+
s = data.get('summary', {})
|
|
210
|
+
print(f\"Summary: {s.get('verified',0)}/{s.get('total',0)} verified, {s.get('failing',0)} failing\")
|
|
211
|
+
print()
|
|
212
|
+
for cat in data.get('categories', []):
|
|
213
|
+
print(f\"### {cat.get('name', 'Unknown')}\")
|
|
214
|
+
for item in cat.get('items', []):
|
|
215
|
+
status_icon = {'verified': '[PASS]', 'failing': '[FAIL]', 'pending': '[----]'}.get(item.get('status','pending'), '[----]')
|
|
216
|
+
priority = item.get('priority', 'minor').upper()
|
|
217
|
+
print(f\" {status_icon} [{priority}] {item.get('title', item.get('id', '?'))}\")
|
|
218
|
+
print()
|
|
219
|
+
except Exception:
|
|
220
|
+
print('Checklist data unavailable')
|
|
221
|
+
" 2>/dev/null || echo "Checklist data unavailable"
|
|
222
|
+
} >> "${evidence_file:-/dev/stdout}"
|
|
223
|
+
}
|