ouro-loop 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
framework.py ADDED
@@ -0,0 +1,1336 @@
1
+ """
2
+ Ouro Loop — Lightweight runtime for development methodology.
3
+ State management, stage transitions, verification, and logging.
4
+
5
+ Usage:
6
+ python framework.py status [path] # Show current Ouro state
7
+ python framework.py verify [path] # Run verification checks
8
+ python framework.py log <verdict> [path] # Log phase result
9
+ python framework.py advance [path] # Advance to next phase
10
+ python framework.py bound-check [path] # Check BOUND compliance
11
+
12
+ This file can be extended by AI agents with project-specific logic.
13
+ It corresponds to autoresearch's train.py — the file the agent iterates on.
14
+ """
15
+
16
+ import os
17
+ import re
18
+ import sys
19
+ import json
20
+ import shutil
21
+ import argparse
22
+ import subprocess
23
+ from collections import Counter
24
+ from datetime import datetime, timezone
25
+ from typing import Optional
26
+
27
+ # ---------------------------------------------------------------------------
28
+ # Constants
29
+ # ---------------------------------------------------------------------------
30
+
31
+ OURO_DIR = ".ouro"
32
+ STATE_FILE = "state.json"
33
+ RESULTS_FILE = "ouro-results.tsv"
34
+ REFLECTIVE_LOG = "reflective-log.jsonl"
35
+ CLAUDE_MD_FILENAME = "CLAUDE.md"
36
+
37
+ STAGES = ["BOUND", "MAP", "PLAN", "BUILD", "VERIFY", "LOOP"]
38
+
39
+ SEVERITY_ORDER = {"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3}
40
+
41
+ COMPLEXITY_ROUTES = {
42
+ "trivial": {"max_lines": 20, "max_files": 1, "phases": 0},
43
+ "simple": {"max_lines": 100, "max_files": 3, "phases": 2},
44
+ "complex": {"max_lines": 500, "max_files": 10, "phases": 5},
45
+ "architectural": {"max_lines": None, "max_files": None, "phases": None},
46
+ }
47
+
48
+ # Shared BOUND markers — used by both framework.py and prepare.py (DRY)
49
+ BOUND_SECTION_MARKERS = ["## BOUND", "# BOUND"]
50
+ BOUND_CONTENT_MARKERS = [
51
+ "DANGER ZONE",
52
+ "DANGER_ZONE",
53
+ "NEVER DO",
54
+ "NEVER_DO",
55
+ "IRON LAW",
56
+ "IRON_LAW",
57
+ ]
58
+ BOUND_ALL_MARKERS = BOUND_SECTION_MARKERS + BOUND_CONTENT_MARKERS
59
+
60
+ # Template placeholders indicating unfilled CLAUDE.md
61
+ TEMPLATE_PLACEHOLDERS = [
62
+ "[PROJECT_NAME]",
63
+ "[why it's dangerous]",
64
+ "[action]",
65
+ "[Invariant 1",
66
+ ]
67
+
68
+ # Magic values extracted as named constants
69
+ GIT_TIMEOUT_SECONDS = 10
70
+ HOT_FILE_EDIT_THRESHOLD = 3
71
+ HISTORY_LIMIT = 50
72
+ MAX_RETRY_BEFORE_ESCALATE = 3
73
+
74
+ # ---------------------------------------------------------------------------
75
+ # State management
76
+ # ---------------------------------------------------------------------------
77
+
78
+
79
+ def load_state(project_path: str, required: bool = True) -> dict:
80
+ """Load ouro state from .ouro/state.json.
81
+
82
+ If required=False, returns None when state doesn't exist (for verify).
83
+ """
84
+ state_path = os.path.join(project_path, OURO_DIR, STATE_FILE)
85
+ if not os.path.exists(state_path):
86
+ if not required:
87
+ return None
88
+ print(f"Ouro not initialized. Run: python prepare.py init {project_path}")
89
+ sys.exit(1)
90
+ try:
91
+ with open(state_path, "r") as f:
92
+ return json.load(f)
93
+ except (json.JSONDecodeError, ValueError) as e:
94
+ if not required:
95
+ return None
96
+ print(f"Corrupted state file: {state_path}")
97
+ print(f"Error: {e}")
98
+ print(
99
+ f"Run: python prepare.py init {project_path} (or delete .ouro/ to reset)"
100
+ )
101
+ sys.exit(1)
102
+
103
+
104
+ def save_state(project_path: str, state: dict):
105
+ """Save ouro state to .ouro/state.json (atomic write)."""
106
+ state_path = os.path.join(project_path, OURO_DIR, STATE_FILE)
107
+ state["updated_at"] = datetime.now(timezone.utc).isoformat()
108
+ tmp_path = state_path + ".tmp"
109
+ with open(tmp_path, "w") as f:
110
+ json.dump(state, f, indent=2)
111
+ try:
112
+ os.replace(tmp_path, state_path)
113
+ except OSError:
114
+ shutil.move(tmp_path, state_path)
115
+
116
+
117
+ # ---------------------------------------------------------------------------
118
+ # CLAUDE.md parsing
119
+ # ---------------------------------------------------------------------------
120
+
121
+
122
+ def _get_claude_md_path(project_path: str) -> str:
123
+ """Return the path to CLAUDE.md within the project."""
124
+ return os.path.join(project_path, CLAUDE_MD_FILENAME)
125
+
126
+
127
+ def parse_claude_md(project_path: str) -> dict:
128
+ """Parse CLAUDE.md into structured BOUND data.
129
+
130
+ Returns a dict with:
131
+ danger_zones: list[str] — paths/patterns from DANGER ZONES section
132
+ never_do: list[str] — prohibitions from NEVER DO section
133
+ iron_laws: list[str] — invariants from IRON LAWS section
134
+ has_bound: bool — whether any BOUND markers were found
135
+ raw_content: str — full file content (empty string if file missing)
136
+ """
137
+ result = {
138
+ "danger_zones": [],
139
+ "never_do": [],
140
+ "iron_laws": [],
141
+ "has_bound": False,
142
+ "raw_content": "",
143
+ "parse_source": "none", # "structured", "fallback", or "none"
144
+ }
145
+
146
+ claude_md = _get_claude_md_path(project_path)
147
+ if not os.path.exists(claude_md):
148
+ return result
149
+
150
+ try:
151
+ with open(claude_md, "r", encoding="utf-8") as f:
152
+ content = f.read()
153
+ except OSError:
154
+ return result
155
+
156
+ result["raw_content"] = content
157
+ result["has_bound"] = any(m in content for m in BOUND_ALL_MARKERS)
158
+
159
+ # --- Primary extraction: standard section headers ---
160
+
161
+ # Extract DANGER ZONES — lines with backtick-wrapped paths
162
+ dz_match = re.search(
163
+ r"(?:###?\s*DANGER\s*ZONES?)(.*?)(?=\n###?\s|\Z)",
164
+ content,
165
+ re.DOTALL | re.IGNORECASE,
166
+ )
167
+ if dz_match:
168
+ zone_text = dz_match.group(1)
169
+ result["danger_zones"] = re.findall(r"`([^`]+)`", zone_text)
170
+
171
+ # Extract NEVER DO — list items
172
+ nd_match = re.search(
173
+ r"(?:###?\s*NEVER\s*DO)(.*?)(?=\n###?\s|\Z)",
174
+ content,
175
+ re.DOTALL | re.IGNORECASE,
176
+ )
177
+ if nd_match:
178
+ nd_text = nd_match.group(1)
179
+ result["never_do"] = [
180
+ line.strip().lstrip("-*").strip()
181
+ for line in nd_text.strip().split("\n")
182
+ if line.strip() and line.strip().startswith(("-", "*"))
183
+ ]
184
+
185
+ # Extract IRON LAWS — list items
186
+ il_match = re.search(
187
+ r"(?:###?\s*IRON\s*LAWS?)(.*?)(?=\n###?\s|\Z)",
188
+ content,
189
+ re.DOTALL | re.IGNORECASE,
190
+ )
191
+ if il_match:
192
+ il_text = il_match.group(1)
193
+ result["iron_laws"] = [
194
+ line.strip().lstrip("-*").strip()
195
+ for line in il_text.strip().split("\n")
196
+ if line.strip() and line.strip().startswith(("-", "*"))
197
+ ]
198
+
199
+ # Mark source if primary extraction succeeded
200
+ if any([result["danger_zones"], result["never_do"], result["iron_laws"]]):
201
+ result["parse_source"] = "structured"
202
+ return result
203
+
204
+ # --- Fallback extraction: prose-style CLAUDE.md without standard headers ---
205
+ # Only runs if primary extraction found nothing but has_bound is True
206
+ # (keywords exist but not in structured sections)
207
+
208
+ if result["has_bound"] and not any(
209
+ [result["danger_zones"], result["never_do"], result["iron_laws"]]
210
+ ):
211
+ # Fallback DANGER ZONES: backtick-wrapped paths on lines near
212
+ # "DANGER" keyword (within 3 lines)
213
+ lines = content.split("\n")
214
+ for i, line in enumerate(lines):
215
+ if "DANGER" in line.upper():
216
+ # Scan this line and nearby lines for backtick paths
217
+ window = lines[max(0, i - 1) : i + 4]
218
+ for wline in window:
219
+ for path in re.findall(r"`([^`]+)`", wline):
220
+ # Only include path-like strings (contain / or .)
221
+ if "/" in path or path.endswith(
222
+ (".py", ".sh", ".js", ".ts", ".rs", ".go")
223
+ ):
224
+ if path not in result["danger_zones"]:
225
+ result["danger_zones"].append(path)
226
+
227
+ # Fallback NEVER DO: lines starting with "Never" or "Do not" or
228
+ # "- Never" anywhere in the file
229
+ for line in lines:
230
+ stripped = line.strip().lstrip("-*").strip()
231
+ if re.match(r"^(Never|Do not|NEVER)\b", stripped):
232
+ if stripped not in result["never_do"]:
233
+ result["never_do"].append(stripped)
234
+
235
+ # Fallback IRON LAWS: lines containing "must" or "always" near
236
+ # backtick-wrapped code/paths (heuristic for invariants)
237
+ for line in lines:
238
+ stripped = line.strip().lstrip("-*").strip()
239
+ if re.search(r"\b(must|always|required)\b", stripped, re.IGNORECASE):
240
+ if "`" in line and stripped not in result["iron_laws"]:
241
+ result["iron_laws"].append(stripped)
242
+
243
+ if any([result["danger_zones"], result["never_do"], result["iron_laws"]]):
244
+ result["parse_source"] = "fallback"
245
+
246
+ return result
247
+
248
+
249
+ def _file_in_danger_zone(file_path: str, danger_zones: list) -> Optional[str]:
250
+ """Check if a file path matches any DANGER ZONE pattern.
251
+
252
+ Uses path-segment-aware matching to avoid false positives:
253
+ - Zone "auth/" matches "auth/login.py" but NOT "unauthorized.py"
254
+ - Zone "auth/core.py" matches exactly that file
255
+ - Zone ending with "/" is treated as a directory prefix
256
+
257
+ Returns the matched zone pattern, or None if no match.
258
+ """
259
+ if not file_path:
260
+ return None
261
+
262
+ # Normalize separators
263
+ norm_file = file_path.replace("\\", "/")
264
+ file_segments = norm_file.split("/")
265
+
266
+ for zone in danger_zones:
267
+ if not zone:
268
+ continue
269
+
270
+ norm_zone = zone.replace("\\", "/")
271
+
272
+ # Exact match
273
+ if norm_file == norm_zone:
274
+ return zone
275
+
276
+ # Directory prefix: zone "src/payments/" → file must start with that path
277
+ if norm_zone.endswith("/"):
278
+ if norm_file.startswith(norm_zone):
279
+ return zone
280
+ continue
281
+
282
+ # File match: zone "auth/core.py" → exact path segment match
283
+ zone_segments = norm_zone.split("/")
284
+
285
+ # Check if zone segments appear as contiguous subsequence in file path
286
+ zone_len = len(zone_segments)
287
+ for i in range(len(file_segments) - zone_len + 1):
288
+ if file_segments[i : i + zone_len] == zone_segments:
289
+ return zone
290
+
291
+ return None
292
+
293
+
294
+ # ---------------------------------------------------------------------------
295
+ # Complexity detection
296
+ # ---------------------------------------------------------------------------
297
+
298
+
299
+ def detect_complexity(
300
+ project_path: str, changed_files: list = None, danger_zones: list = None
301
+ ) -> dict:
302
+ """Detect task complexity based on file count and DANGER ZONE proximity.
303
+
304
+ Returns:
305
+ level: str — trivial/simple/complex/architectural
306
+ reason: str — why this level was chosen
307
+ route: dict — the matching COMPLEXITY_ROUTES entry
308
+ """
309
+ if changed_files is None:
310
+ changed_files = []
311
+ if danger_zones is None:
312
+ danger_zones = []
313
+
314
+ num_files = len(changed_files)
315
+ dz_touched = [f for f in changed_files if _file_in_danger_zone(f, danger_zones)]
316
+
317
+ # Determine level
318
+ if dz_touched:
319
+ if any("IRON" in str(dz).upper() for dz in dz_touched):
320
+ level = "architectural"
321
+ reason = f"Modifies IRON LAW area: {', '.join(dz_touched[:3])}"
322
+ else:
323
+ level = "complex"
324
+ reason = f"Touches DANGER ZONE: {', '.join(dz_touched[:3])}"
325
+ elif num_files <= 1:
326
+ level = "trivial"
327
+ reason = f"{num_files} file(s), no DANGER ZONE contact"
328
+ elif num_files <= 3:
329
+ level = "simple"
330
+ reason = f"{num_files} files, no DANGER ZONE contact"
331
+ else:
332
+ level = "complex"
333
+ reason = f"{num_files} files across multiple areas"
334
+
335
+ return {
336
+ "level": level,
337
+ "reason": reason,
338
+ "route": COMPLEXITY_ROUTES[level],
339
+ }
340
+
341
+
342
+ # ---------------------------------------------------------------------------
343
+ # Status
344
+ # ---------------------------------------------------------------------------
345
+
346
+
347
+ def show_status(project_path: str):
348
+ """Display current Ouro state."""
349
+ state = load_state(project_path)
350
+
351
+ print(f"{'=' * 50}")
352
+ print(" Ouro Loop — Status")
353
+ print(f"{'=' * 50}")
354
+ print(f" Project: {state.get('project_name', 'Unknown')}")
355
+ print(f" Stage: {state.get('current_stage', 'UNKNOWN')}")
356
+
357
+ phase = state.get("current_phase")
358
+ total = state.get("total_phases", 0)
359
+ if phase is not None and total > 0:
360
+ print(f" Phase: {phase}/{total}")
361
+ else:
362
+ print(" Phase: N/A")
363
+
364
+ print(f" BOUND: {'Defined' if state.get('bound_defined') else 'Not defined'}")
365
+
366
+ history = state.get("history", [])
367
+ if history:
368
+ last = history[-1]
369
+ print(f" Last: {last.get('stage', '?')} — {last.get('verdict', '?')}")
370
+ print(f" at {last.get('timestamp', '?')}")
371
+
372
+ passed = sum(1 for h in history if h.get("verdict") == "PASS")
373
+ failed = sum(1 for h in history if h.get("verdict") in ("FAIL", "RETRY"))
374
+ print(f" History: {passed} passed, {failed} failed, {len(history)} total")
375
+ print(f"{'=' * 50}")
376
+
377
+
378
+ # ---------------------------------------------------------------------------
379
+ # Verification
380
+ # ---------------------------------------------------------------------------
381
+
382
+
383
+ def run_verification(project_path: str) -> dict:
384
+ """Run multi-layer verification checks (Layer 1 + 2 + 3)."""
385
+ results = {
386
+ "timestamp": datetime.now(timezone.utc).isoformat(),
387
+ "layer1_gates": {},
388
+ "layer2_self": {},
389
+ "layer3_review": {},
390
+ "overall": "PASS",
391
+ }
392
+
393
+ # Refresh bound_defined in state — init snapshot may be stale
394
+ # (user might have added BOUND to CLAUDE.md after init)
395
+ bound_data = parse_claude_md(project_path)
396
+ state = load_state(project_path, required=False)
397
+ if state and state.get("bound_defined") != bound_data["has_bound"]:
398
+ state["bound_defined"] = bound_data["has_bound"]
399
+ save_state(project_path, state)
400
+
401
+ # Layer 1: Gates (pass cached bound_data to avoid re-parsing)
402
+ results["layer1_gates"] = run_gates(project_path, _bound_data=bound_data)
403
+
404
+ # Layer 2: Self-assessment
405
+ results["layer2_self"] = run_self_assessment(project_path, _bound_data=bound_data)
406
+
407
+ # Layer 3: External review triggers
408
+ results["layer3_review"] = _check_layer3_triggers(
409
+ project_path, results, _bound_data=bound_data
410
+ )
411
+
412
+ # Determine overall verdict
413
+ gate_failures = [
414
+ g for g, v in results["layer1_gates"].items() if v["status"] == "FAIL"
415
+ ]
416
+ self_failures = [
417
+ s for s, v in results["layer2_self"].items() if v["status"] == "FAIL"
418
+ ]
419
+ review_required = results["layer3_review"].get("required", False)
420
+
421
+ if gate_failures or self_failures:
422
+ results["overall"] = "FAIL"
423
+ results["failures"] = gate_failures + self_failures
424
+ elif review_required:
425
+ results["overall"] = "REVIEW"
426
+ results["review_reasons"] = results["layer3_review"].get("reasons", [])
427
+ else:
428
+ # Check if everything is WARN/SKIP with no PASS — project likely not set up
429
+ all_statuses = [v["status"] for v in results["layer1_gates"].values()] + [
430
+ v["status"] for v in results["layer2_self"].values()
431
+ ]
432
+ if all_statuses and "PASS" not in all_statuses:
433
+ results["overall"] = "WARN"
434
+
435
+ return results
436
+
437
+
438
+ def _check_layer3_triggers(
439
+ project_path: str, current_results: dict, _bound_data: dict = None
440
+ ) -> dict:
441
+ """Layer 3: Check if external (human) review is required.
442
+
443
+ Triggers:
444
+ - Changes touch a DANGER ZONE
445
+ - IRON LAW needs modification
446
+ - 3+ consecutive RETRY verdicts
447
+ - Failed Layer 1 gate
448
+ """
449
+ review = {"required": False, "reasons": []}
450
+
451
+ # Check DANGER ZONE contact via RELEVANCE gate
452
+ relevance = current_results.get("layer1_gates", {}).get("RELEVANCE", {})
453
+ dz_files = relevance.get("danger_zone_files", [])
454
+ if dz_files:
455
+ review["required"] = True
456
+ review["reasons"].append(f"DANGER ZONE touched: {', '.join(dz_files[:3])}")
457
+
458
+ # Check for Layer 1 gate failures
459
+ gate_failures = [
460
+ g
461
+ for g, v in current_results.get("layer1_gates", {}).items()
462
+ if v["status"] == "FAIL"
463
+ ]
464
+ if gate_failures:
465
+ review["required"] = True
466
+ review["reasons"].append(f"Layer 1 gate failed: {', '.join(gate_failures)}")
467
+
468
+ # Check consecutive RETRY count from state history
469
+ state = load_state(project_path, required=False)
470
+ if state:
471
+ history = state.get("history", [])
472
+ consecutive_retries = 0
473
+ for entry in reversed(history):
474
+ if entry.get("verdict") == "RETRY":
475
+ consecutive_retries += 1
476
+ else:
477
+ break
478
+ if consecutive_retries >= MAX_RETRY_BEFORE_ESCALATE:
479
+ review["required"] = True
480
+ review["reasons"].append(
481
+ f"{consecutive_retries} consecutive RETRY verdicts — "
482
+ f"mandatory user review"
483
+ )
484
+
485
+ # Check complexity level (architectural = always review)
486
+ if _bound_data is None:
487
+ _bound_data = parse_claude_md(project_path)
488
+ changed_files = relevance.get("files", [])
489
+ if changed_files:
490
+ complexity = detect_complexity(
491
+ project_path, changed_files, _bound_data["danger_zones"]
492
+ )
493
+ if complexity["level"] == "architectural":
494
+ review["required"] = True
495
+ review["reasons"].append(
496
+ f"Architectural complexity: {complexity['reason']}"
497
+ )
498
+
499
+ return review
500
+
501
+
502
+ def run_gates(project_path: str, _bound_data: dict = None) -> dict:
503
+ """Layer 1: Automated gates (EXIST, RELEVANCE, ROOT_CAUSE, RECALL, MOMENTUM)."""
504
+ gates = {}
505
+ if _bound_data is None:
506
+ _bound_data = parse_claude_md(project_path)
507
+ bound_data = _bound_data
508
+ danger_zones = bound_data["danger_zones"]
509
+
510
+ # EXIST gate: check that key files exist + DANGER ZONE awareness
511
+ claude_md = _get_claude_md_path(project_path)
512
+ claude_exists = os.path.exists(claude_md)
513
+ if claude_exists:
514
+ gates["EXIST"] = {"status": "PASS", "detail": "CLAUDE.md exists"}
515
+ else:
516
+ state = load_state(project_path, required=False)
517
+ bound_expected = state.get("bound_defined", False) if state else False
518
+ if bound_expected:
519
+ gates["EXIST"] = {
520
+ "status": "FAIL",
521
+ "detail": "CLAUDE.md missing but BOUND was expected",
522
+ }
523
+ else:
524
+ gates["EXIST"] = {
525
+ "status": "WARN",
526
+ "detail": "No CLAUDE.md — define BOUND before BUILD",
527
+ }
528
+
529
+ # RELEVANCE gate: check git status for scope + DANGER ZONE overlap
530
+ changed_files = []
531
+ try:
532
+ result = subprocess.run(
533
+ ["git", "status", "--short"],
534
+ capture_output=True,
535
+ text=True,
536
+ cwd=project_path,
537
+ timeout=GIT_TIMEOUT_SECONDS,
538
+ )
539
+ changed_files = [
540
+ line.strip().split()[-1]
541
+ for line in result.stdout.strip().split("\n")
542
+ if line.strip()
543
+ ]
544
+
545
+ # Check if any changed files are in DANGER ZONES
546
+ dz_hits = []
547
+ for f in changed_files:
548
+ zone = _file_in_danger_zone(f, danger_zones)
549
+ if zone:
550
+ dz_hits.append(f"{f} (zone: {zone})")
551
+
552
+ if dz_hits:
553
+ gates["RELEVANCE"] = {
554
+ "status": "WARN",
555
+ "detail": f"{len(changed_files)} files changed, "
556
+ f"{len(dz_hits)} in DANGER ZONE: {', '.join(dz_hits[:5])}",
557
+ "files": changed_files[:20],
558
+ "danger_zone_files": dz_hits,
559
+ }
560
+ else:
561
+ gates["RELEVANCE"] = {
562
+ "status": "PASS",
563
+ "detail": f"{len(changed_files)} files changed",
564
+ "files": changed_files[:20],
565
+ }
566
+ except (subprocess.TimeoutExpired, FileNotFoundError):
567
+ gates["RELEVANCE"] = {"status": "SKIP", "detail": "git not available"}
568
+
569
+ # ROOT_CAUSE gate: check for repeated edits to same file
570
+ try:
571
+ result = subprocess.run(
572
+ ["git", "log", "--name-only", "--pretty=format:", "-10"],
573
+ capture_output=True,
574
+ text=True,
575
+ cwd=project_path,
576
+ timeout=GIT_TIMEOUT_SECONDS,
577
+ )
578
+ files = [f.strip() for f in result.stdout.strip().split("\n") if f.strip()]
579
+ freq = Counter(files)
580
+ hot_files = {f: c for f, c in freq.items() if c >= HOT_FILE_EDIT_THRESHOLD}
581
+ gates["ROOT_CAUSE"] = {
582
+ "status": "WARN" if hot_files else "PASS",
583
+ "detail": (
584
+ f"Hot files: {', '.join(hot_files.keys())}"
585
+ if hot_files
586
+ else "No repeated edits detected"
587
+ ),
588
+ }
589
+ except (subprocess.TimeoutExpired, FileNotFoundError):
590
+ gates["ROOT_CAUSE"] = {"status": "SKIP", "detail": "git not available"}
591
+
592
+ # RECALL gate: verify BOUND constraints are accessible and recently read
593
+ if bound_data["has_bound"]:
594
+ recall_issues = []
595
+ if not bound_data["danger_zones"]:
596
+ recall_issues.append("no DANGER ZONES parsed")
597
+ if not bound_data["iron_laws"]:
598
+ recall_issues.append("no IRON LAWS parsed")
599
+ if recall_issues:
600
+ gates["RECALL"] = {
601
+ "status": "WARN",
602
+ "detail": f"BOUND exists but incomplete: {', '.join(recall_issues)}",
603
+ }
604
+ else:
605
+ gates["RECALL"] = {
606
+ "status": "PASS",
607
+ "detail": (
608
+ f"BOUND loaded: {len(bound_data['danger_zones'])} zones, "
609
+ f"{len(bound_data['never_do'])} prohibitions, "
610
+ f"{len(bound_data['iron_laws'])} laws"
611
+ ),
612
+ }
613
+ else:
614
+ gates["RECALL"] = {
615
+ "status": "WARN",
616
+ "detail": "No BOUND defined — constraints may be forgotten",
617
+ }
618
+
619
+ # MOMENTUM gate: check recent commit frequency
620
+ try:
621
+ result = subprocess.run(
622
+ ["git", "log", "--oneline", "-5"],
623
+ capture_output=True,
624
+ text=True,
625
+ cwd=project_path,
626
+ timeout=GIT_TIMEOUT_SECONDS,
627
+ )
628
+ commits = [line for line in result.stdout.strip().split("\n") if line.strip()]
629
+ gates["MOMENTUM"] = {
630
+ "status": "PASS" if len(commits) >= 2 else "WARN",
631
+ "detail": f"{len(commits)} recent commits",
632
+ }
633
+ except (subprocess.TimeoutExpired, FileNotFoundError):
634
+ gates["MOMENTUM"] = {"status": "SKIP", "detail": "git not available"}
635
+
636
+ return gates
637
+
638
+
639
+ def run_self_assessment(project_path: str, _bound_data: dict = None) -> dict:
640
+ """Layer 2: Self-assessment checks."""
641
+ checks = {}
642
+
643
+ # BOUND compliance: use parse_claude_md() for structured check
644
+ bound_data = (
645
+ _bound_data if _bound_data is not None else parse_claude_md(project_path)
646
+ )
647
+ claude_md = _get_claude_md_path(project_path)
648
+ if os.path.exists(claude_md):
649
+ # File exists but parse returned empty content → read error
650
+ if not bound_data["raw_content"] and os.path.getsize(claude_md) > 0:
651
+ checks["bound_compliance"] = {
652
+ "status": "SKIP",
653
+ "detail": "Cannot read CLAUDE.md",
654
+ }
655
+ elif bound_data["has_bound"]:
656
+ checks["bound_compliance"] = {
657
+ "status": "PASS",
658
+ "detail": "BOUND section found",
659
+ }
660
+ else:
661
+ checks["bound_compliance"] = {
662
+ "status": "WARN",
663
+ "detail": "No BOUND section in CLAUDE.md",
664
+ }
665
+ else:
666
+ checks["bound_compliance"] = {"status": "SKIP", "detail": "No CLAUDE.md"}
667
+
668
+ # Test detection
669
+ test_found = False
670
+ for root, dirs, files in os.walk(project_path):
671
+ dirs[:] = [
672
+ d
673
+ for d in dirs
674
+ if d not in {".git", "node_modules", "__pycache__", ".venv", ".ouro"}
675
+ ]
676
+ for f in files:
677
+ if "test" in f.lower() or "spec" in f.lower():
678
+ test_found = True
679
+ break
680
+ if test_found:
681
+ break
682
+
683
+ checks["tests_exist"] = {
684
+ "status": "PASS" if test_found else "WARN",
685
+ "detail": "Test files found" if test_found else "No test files detected",
686
+ }
687
+
688
+ return checks
689
+
690
+
691
+ def print_verification(results: dict):
692
+ """Print verification results."""
693
+ print(f"{'=' * 50}")
694
+ print(" Ouro Loop — Verification")
695
+ print(f"{'=' * 50}")
696
+
697
+ print(" Layer 1 — Gates:")
698
+ for gate, info in results.get("layer1_gates", {}).items():
699
+ icon = {"PASS": "+", "FAIL": "X", "WARN": "!", "SKIP": "-"}.get(
700
+ info["status"], "?"
701
+ )
702
+ print(f" [{icon}] {gate:15s} {info['detail']}")
703
+
704
+ print()
705
+ print(" Layer 2 — Self-Assessment:")
706
+ for check, info in results.get("layer2_self", {}).items():
707
+ icon = {"PASS": "+", "FAIL": "X", "WARN": "!", "SKIP": "-"}.get(
708
+ info["status"], "?"
709
+ )
710
+ print(f" [{icon}] {check:15s} {info['detail']}")
711
+
712
+ # Layer 3
713
+ layer3 = results.get("layer3_review", {})
714
+ print()
715
+ if layer3.get("required"):
716
+ print(" Layer 3 — External Review: REQUIRED")
717
+ for reason in layer3.get("reasons", []):
718
+ print(f" [!] {reason}")
719
+ else:
720
+ print(" Layer 3 — External Review: Not required")
721
+
722
+ print()
723
+ overall = results.get("overall", "UNKNOWN")
724
+ print(f" Overall: {overall}")
725
+
726
+ if overall == "FAIL":
727
+ failures = results.get("failures", [])
728
+ print(f" Failures: {', '.join(failures)}")
729
+ elif overall == "REVIEW":
730
+ print(" Action: Human review required before continuing")
731
+
732
+ print(f"{'=' * 50}")
733
+
734
+
735
+ # ---------------------------------------------------------------------------
736
+ # Pattern detection
737
+ # ---------------------------------------------------------------------------
738
+
739
+ # Pattern detection thresholds
740
+ CONSECUTIVE_FAIL_THRESHOLD = 2
741
+ VELOCITY_WINDOW = 5
742
+ DRIFT_DIRECTORY_THRESHOLD = 5
743
+
744
+
745
+ def detect_patterns(history: list, current_gates: dict = None) -> dict:
746
+ """Analyze history to detect behavioral patterns.
747
+
748
+ This is the "Pattern" layer of the reflective log — it identifies
749
+ recurring behaviors that an LLM should be aware of when starting
750
+ a new iteration.
751
+
752
+ Returns:
753
+ consecutive_failures: int — how many FAIL/RETRY in a row (tail)
754
+ stuck_loop: bool — same file failing repeatedly
755
+ velocity_trend: str — ACCELERATING / STABLE / DECELERATING / STALLED
756
+ hot_files: list — files appearing in ROOT_CAUSE warnings
757
+ drift_signal: bool — RELEVANCE gate has been warning
758
+ retry_rate: float — percentage of RETRY verdicts in recent history
759
+ """
760
+ patterns = {
761
+ "consecutive_failures": 0,
762
+ "stuck_loop": False,
763
+ "velocity_trend": "UNKNOWN",
764
+ "hot_files": [],
765
+ "drift_signal": False,
766
+ "retry_rate": 0.0,
767
+ }
768
+
769
+ # Extract gate-based signals (even with empty history)
770
+ if current_gates:
771
+ root_cause = current_gates.get("ROOT_CAUSE", {})
772
+ detail = root_cause.get("detail", "")
773
+ if "Hot files:" in detail:
774
+ files_str = detail.replace("Hot files: ", "")
775
+ patterns["hot_files"] = [f.strip() for f in files_str.split(",")]
776
+
777
+ relevance = current_gates.get("RELEVANCE", {})
778
+ if relevance.get("danger_zone_files"):
779
+ patterns["drift_signal"] = True
780
+
781
+ if not history:
782
+ return patterns
783
+
784
+ # Consecutive failures (from tail)
785
+ for entry in reversed(history):
786
+ if entry.get("verdict") in ("FAIL", "RETRY"):
787
+ patterns["consecutive_failures"] += 1
788
+ else:
789
+ break
790
+
791
+ # Retry rate in recent window
792
+ window = history[-VELOCITY_WINDOW:]
793
+ retries = sum(1 for e in window if e.get("verdict") == "RETRY")
794
+ patterns["retry_rate"] = retries / len(window) if window else 0.0
795
+
796
+ # Velocity trend: compare pass rates in two halves of recent history
797
+ # Require >= 6 entries for meaningful trend detection — with 4-5 entries
798
+ # a single RETRY creates misleading DECELERATING signal
799
+ recent = (
800
+ history[-VELOCITY_WINDOW * 2 :]
801
+ if len(history) >= VELOCITY_WINDOW * 2
802
+ else history
803
+ )
804
+ if len(recent) >= 6:
805
+ mid = len(recent) // 2
806
+ first_half = recent[:mid]
807
+ second_half = recent[mid:]
808
+ first_pass_rate = sum(
809
+ 1 for e in first_half if e.get("verdict") == "PASS"
810
+ ) / len(first_half)
811
+ second_pass_rate = sum(
812
+ 1 for e in second_half if e.get("verdict") == "PASS"
813
+ ) / len(second_half)
814
+ diff = second_pass_rate - first_pass_rate
815
+ # Require > 0.3 swing (not 0.2) to reduce false positives
816
+ if diff > 0.3:
817
+ patterns["velocity_trend"] = "ACCELERATING"
818
+ elif diff < -0.3:
819
+ patterns["velocity_trend"] = "DECELERATING"
820
+ elif second_pass_rate == 0:
821
+ patterns["velocity_trend"] = "STALLED"
822
+ else:
823
+ patterns["velocity_trend"] = "STABLE"
824
+
825
+ # Stuck loop: same stage appearing 3+ times consecutively
826
+ if len(history) >= 3:
827
+ last_stages = [e.get("stage") for e in history[-3:]]
828
+ last_verdicts = [e.get("verdict") for e in history[-3:]]
829
+ if len(set(last_stages)) == 1 and all(
830
+ v in ("FAIL", "RETRY") for v in last_verdicts
831
+ ):
832
+ patterns["stuck_loop"] = True
833
+
834
+ return patterns
835
+
836
+
837
+ # ---------------------------------------------------------------------------
838
+ # Reflective logging (three-layer structured log)
839
+ # ---------------------------------------------------------------------------
840
+
841
+ REFLECTIVE_LOG_LIMIT = 30 # keep last N entries
842
+
843
+
844
+ def build_reflective_entry(
845
+ project_path: str, verdict: str, verification: dict, notes: str = ""
846
+ ) -> dict:
847
+ """Build a three-layer reflective log entry.
848
+
849
+ Layer 1 — WHAT: what happened this iteration (facts, signals)
850
+ Layer 2 — WHY: why decisions were made (causal chain)
851
+ Layer 3 — PATTERN: behavioral patterns detected (self-awareness)
852
+
853
+ This structured entry is designed to be quickly parseable by an LLM
854
+ at the start of the next iteration, providing ambient self-awareness
855
+ without requiring raw session replay.
856
+ """
857
+ state = load_state(project_path, required=False) or {}
858
+ bound_data = parse_claude_md(project_path)
859
+ gates = verification.get("layer1_gates", {})
860
+ layer3 = verification.get("layer3_review", {})
861
+
862
+ # Collect changed files from RELEVANCE gate
863
+ changed_files = gates.get("RELEVANCE", {}).get("files", [])
864
+
865
+ # Detect complexity
866
+ complexity = detect_complexity(
867
+ project_path, changed_files, bound_data["danger_zones"]
868
+ )
869
+
870
+ # Detect patterns from history
871
+ history = state.get("history", [])
872
+ patterns = detect_patterns(history, gates)
873
+
874
+ entry = {
875
+ "timestamp": datetime.now(timezone.utc).isoformat(),
876
+ "iteration": len(history) + 1,
877
+ # Layer 1 — WHAT (facts)
878
+ "what": {
879
+ "stage": state.get("current_stage", "UNKNOWN"),
880
+ "phase": f"{state.get('current_phase', '?')}/{state.get('total_phases', '?')}",
881
+ "verdict": verdict,
882
+ "overall": verification.get("overall", "UNKNOWN"),
883
+ "gates": {
884
+ gate: {
885
+ "status": info.get("status", "?"),
886
+ "detail": info.get("detail", ""),
887
+ }
888
+ for gate, info in gates.items()
889
+ },
890
+ "changed_files": changed_files[:10],
891
+ "danger_zone_contact": gates.get("RELEVANCE", {}).get(
892
+ "danger_zone_files", []
893
+ ),
894
+ "bound_violations": sum(
895
+ 1 for v in gates.values() if v.get("status") == "FAIL"
896
+ ),
897
+ "review_required": layer3.get("required", False),
898
+ },
899
+ # Layer 2 — WHY (decisions and causal chain)
900
+ "why": {
901
+ "complexity": complexity["level"],
902
+ "complexity_reason": complexity["reason"],
903
+ "review_reasons": layer3.get("reasons", []),
904
+ "bound_state": {
905
+ "danger_zones": len(bound_data["danger_zones"]),
906
+ "never_do": len(bound_data["never_do"]),
907
+ "iron_laws": len(bound_data["iron_laws"]),
908
+ },
909
+ "notes": notes,
910
+ },
911
+ # Layer 3 — PATTERN (self-awareness)
912
+ "pattern": {
913
+ "consecutive_failures": patterns["consecutive_failures"],
914
+ "stuck_loop": patterns["stuck_loop"],
915
+ "velocity_trend": patterns["velocity_trend"],
916
+ "retry_rate": round(patterns["retry_rate"], 2),
917
+ "hot_files": patterns["hot_files"],
918
+ "drift_signal": patterns["drift_signal"],
919
+ },
920
+ }
921
+
922
+ # Add actionable summary for quick LLM consumption
923
+ alerts = []
924
+ if patterns["stuck_loop"]:
925
+ alerts.append(
926
+ "STUCK: same stage failing 3+ times — try fundamentally different approach"
927
+ )
928
+ if patterns["consecutive_failures"] >= MAX_RETRY_BEFORE_ESCALATE:
929
+ alerts.append(
930
+ f"ESCALATE: {patterns['consecutive_failures']} consecutive failures — consider user review"
931
+ )
932
+ if patterns["velocity_trend"] == "DECELERATING":
933
+ alerts.append("SLOWING: pass rate declining — reassess approach")
934
+ if patterns["velocity_trend"] == "STALLED":
935
+ alerts.append("STALLED: no passes in recent window — step back and remap")
936
+ if patterns["drift_signal"]:
937
+ alerts.append("DRIFT: working in DANGER ZONE — extra caution required")
938
+ if patterns["hot_files"]:
939
+ alerts.append(
940
+ f"HOT FILES: {', '.join(patterns['hot_files'][:3])} — possible symptom-chasing"
941
+ )
942
+
943
+ entry["alerts"] = alerts
944
+
945
+ return entry
946
+
947
+
948
+ def write_reflective_log(project_path: str, entry: dict):
949
+ """Append a reflective log entry to .ouro/reflective-log.jsonl.
950
+
951
+ Each line is a self-contained JSON object. The file is append-only
952
+ and trimmed to REFLECTIVE_LOG_LIMIT entries on write.
953
+ """
954
+ log_path = os.path.join(project_path, OURO_DIR, REFLECTIVE_LOG)
955
+ os.makedirs(os.path.dirname(log_path), exist_ok=True)
956
+
957
+ # Read existing entries
958
+ entries = []
959
+ if os.path.exists(log_path):
960
+ try:
961
+ with open(log_path, "r", encoding="utf-8") as f:
962
+ for line in f:
963
+ line = line.strip()
964
+ if line:
965
+ try:
966
+ entries.append(json.loads(line))
967
+ except json.JSONDecodeError:
968
+ continue
969
+ except OSError:
970
+ pass
971
+
972
+ entries.append(entry)
973
+
974
+ # Trim to limit
975
+ entries = entries[-REFLECTIVE_LOG_LIMIT:]
976
+
977
+ # Write back (atomic-ish: write to tmp then rename)
978
+ tmp_path = log_path + ".tmp"
979
+ try:
980
+ with open(tmp_path, "w", encoding="utf-8") as f:
981
+ for e in entries:
982
+ f.write(json.dumps(e, ensure_ascii=False) + "\n")
983
+ try:
984
+ os.replace(tmp_path, log_path)
985
+ except OSError:
986
+ # Fallback for cross-device moves (Docker volumes, NFS, etc.)
987
+ shutil.move(tmp_path, log_path)
988
+ except OSError as e:
989
+ print(f"Warning: Could not write reflective log: {e}")
990
+ # Clean up temp file if write or move failed
991
+ if os.path.exists(tmp_path):
992
+ try:
993
+ os.remove(tmp_path)
994
+ except OSError:
995
+ pass
996
+
997
+
998
+ def read_reflective_log(project_path: str, last_n: int = 5) -> list:
999
+ """Read the last N entries from the reflective log.
1000
+
1001
+ Returns a list of dicts, newest last.
1002
+ """
1003
+ log_path = os.path.join(project_path, OURO_DIR, REFLECTIVE_LOG)
1004
+ if not os.path.exists(log_path):
1005
+ return []
1006
+
1007
+ entries = []
1008
+ try:
1009
+ with open(log_path, "r", encoding="utf-8") as f:
1010
+ for line in f:
1011
+ line = line.strip()
1012
+ if line:
1013
+ try:
1014
+ entries.append(json.loads(line))
1015
+ except json.JSONDecodeError:
1016
+ continue
1017
+ except OSError:
1018
+ return []
1019
+
1020
+ return entries[-last_n:]
1021
+
1022
+
1023
+ def print_reflective_summary(project_path: str, last_n: int = 5):
1024
+ """Print a human-readable summary of recent reflective log entries."""
1025
+ entries = read_reflective_log(project_path, last_n)
1026
+ if not entries:
1027
+ print("No reflective log entries found.")
1028
+ return
1029
+
1030
+ print(f"{'=' * 60}")
1031
+ print(f" Ouro Loop — Reflective Log (last {len(entries)} entries)")
1032
+ print(f"{'=' * 60}")
1033
+
1034
+ for i, entry in enumerate(entries, 1):
1035
+ what = entry.get("what", {})
1036
+ why = entry.get("why", {})
1037
+ pattern = entry.get("pattern", {})
1038
+ alerts = entry.get("alerts", [])
1039
+
1040
+ ts = entry.get("timestamp", "?")[:19]
1041
+ iteration = entry.get("iteration", "?")
1042
+
1043
+ print(f"\n #{iteration} [{ts}]")
1044
+ print(
1045
+ f" WHAT: {what.get('stage', '?')} {what.get('phase', '?')} "
1046
+ f"→ {what.get('verdict', '?')} "
1047
+ f"(overall: {what.get('overall', '?')})"
1048
+ )
1049
+
1050
+ # Gate summary (compact)
1051
+ gate_summary = []
1052
+ for gate, info in what.get("gates", {}).items():
1053
+ status = info.get("status", "?")
1054
+ icon = {"PASS": "+", "FAIL": "X", "WARN": "!", "SKIP": "-"}.get(status, "?")
1055
+ gate_summary.append(f"{gate}[{icon}]")
1056
+ if gate_summary:
1057
+ print(f" Gates: {' '.join(gate_summary)}")
1058
+
1059
+ if what.get("danger_zone_contact"):
1060
+ print(f" DZ contact: {', '.join(what['danger_zone_contact'][:3])}")
1061
+
1062
+ print(
1063
+ f" WHY: complexity={why.get('complexity', '?')} "
1064
+ f"| {why.get('complexity_reason', '')}"
1065
+ )
1066
+ if why.get("notes"):
1067
+ print(f" notes: {why['notes']}")
1068
+
1069
+ print(
1070
+ f" PATTERN: velocity={pattern.get('velocity_trend', '?')} "
1071
+ f"| failures={pattern.get('consecutive_failures', 0)} "
1072
+ f"| retry_rate={pattern.get('retry_rate', 0):.0%}"
1073
+ )
1074
+ if pattern.get("stuck_loop"):
1075
+ print(" STUCK LOOP DETECTED")
1076
+ if pattern.get("hot_files"):
1077
+ print(f" hot: {', '.join(pattern['hot_files'][:3])}")
1078
+
1079
+ if alerts:
1080
+ for alert in alerts:
1081
+ print(f" >> {alert}")
1082
+
1083
+ # Overall trend
1084
+ if len(entries) >= 3:
1085
+ verdicts = [e.get("what", {}).get("verdict") for e in entries]
1086
+ pass_count = sum(1 for v in verdicts if v == "PASS")
1087
+ fail_count = sum(1 for v in verdicts if v in ("FAIL", "RETRY"))
1088
+ print(
1089
+ f"\n Trend: {pass_count} PASS / {fail_count} FAIL in last {len(entries)}"
1090
+ )
1091
+
1092
+ last_pattern = entries[-1].get("pattern", {})
1093
+ velocity = last_pattern.get("velocity_trend", "UNKNOWN")
1094
+ print(f" Velocity: {velocity}")
1095
+
1096
+ print(f"\n{'=' * 60}")
1097
+
1098
+
1099
+ # ---------------------------------------------------------------------------
1100
+ # Logging
1101
+ # ---------------------------------------------------------------------------
1102
+
1103
+
1104
+ def log_phase_result(project_path: str, verdict: str, notes: str = ""):
1105
+ """Log a phase result to ouro-results.tsv, state history, and reflective log."""
1106
+ state = load_state(project_path)
1107
+ phase = state.get("current_phase")
1108
+ total = state.get("total_phases", 0)
1109
+
1110
+ # Handle missing phase plan gracefully
1111
+ if phase is None:
1112
+ phase_str = state.get("current_stage", "N/A")
1113
+ else:
1114
+ phase_str = f"{phase}/{total}"
1115
+
1116
+ # Run quick verification for the log
1117
+ results = run_verification(project_path)
1118
+
1119
+ # Count bound violations
1120
+ gate_results = results.get("layer1_gates", {})
1121
+ bound_violations = sum(
1122
+ 1 for v in gate_results.values() if v.get("status") == "FAIL"
1123
+ )
1124
+
1125
+ # Log to TSV (with error handling for filesystem issues)
1126
+ results_path = os.path.join(project_path, RESULTS_FILE)
1127
+ try:
1128
+ with open(results_path, "a") as f:
1129
+ f.write(f"{phase_str}\t{verdict}\t{bound_violations}\tN/A\tnone\t{notes}\n")
1130
+ except OSError as e:
1131
+ print(f"Warning: Could not write to {results_path}: {e}")
1132
+
1133
+ # Build reflective entry BEFORE updating state (so iteration count is correct)
1134
+ reflective_entry = build_reflective_entry(project_path, verdict, results, notes)
1135
+
1136
+ # Update state history
1137
+ state.setdefault("history", []).append(
1138
+ {
1139
+ "timestamp": datetime.now(timezone.utc).isoformat(),
1140
+ "stage": state.get("current_stage", "UNKNOWN"),
1141
+ "phase": phase_str,
1142
+ "verdict": verdict,
1143
+ "bound_violations": bound_violations,
1144
+ "notes": notes,
1145
+ }
1146
+ )
1147
+
1148
+ # Keep last N history entries
1149
+ state["history"] = state["history"][-HISTORY_LIMIT:]
1150
+ save_state(project_path, state)
1151
+
1152
+ # Write reflective log after state is saved
1153
+ write_reflective_log(project_path, reflective_entry)
1154
+
1155
+ print(f"Logged: {phase_str} — {verdict}")
1156
+
1157
+ # Print alerts if any
1158
+ if reflective_entry.get("alerts"):
1159
+ for alert in reflective_entry["alerts"]:
1160
+ print(f" >> {alert}")
1161
+
1162
+
1163
+ # ---------------------------------------------------------------------------
1164
+ # Phase advancement
1165
+ # ---------------------------------------------------------------------------
1166
+
1167
+
1168
+ def advance_phase(project_path: str):
1169
+ """Advance to the next phase."""
1170
+ state = load_state(project_path)
1171
+ phase = state.get("current_phase")
1172
+ total = state.get("total_phases", 0)
1173
+
1174
+ if phase is None:
1175
+ print("No phase plan active. Use PLAN stage to define phases first.")
1176
+ return
1177
+
1178
+ if phase >= total:
1179
+ print(f"All {total} phases complete.")
1180
+ state["current_stage"] = "LOOP"
1181
+ state["current_phase"] = None
1182
+ save_state(project_path, state)
1183
+ return
1184
+
1185
+ state["current_phase"] = phase + 1
1186
+ state["current_stage"] = "BUILD"
1187
+ save_state(project_path, state)
1188
+ print(f"Advanced to phase {phase + 1}/{total}")
1189
+
1190
+
1191
+ # ---------------------------------------------------------------------------
1192
+ # BOUND check
1193
+ # ---------------------------------------------------------------------------
1194
+
1195
+
1196
+ def check_bound(project_path: str):
1197
+ """Check BOUND compliance in CLAUDE.md."""
1198
+ claude_md = _get_claude_md_path(project_path)
1199
+ if not os.path.exists(claude_md):
1200
+ print("No CLAUDE.md found. BOUND not defined.")
1201
+ print("Run: python prepare.py template claude")
1202
+ return
1203
+
1204
+ bound_data = parse_claude_md(project_path)
1205
+ content = bound_data["raw_content"]
1206
+
1207
+ # Detect template placeholders — template has keywords but no real content
1208
+ is_template = any(marker in content for marker in TEMPLATE_PLACEHOLDERS)
1209
+ if is_template:
1210
+ print(f"{'=' * 50}")
1211
+ print(" Ouro Loop — BOUND Check")
1212
+ print(f"{'=' * 50}")
1213
+ print(" [!] CLAUDE.md is still a template — fill in real BOUND values")
1214
+ print(" Edit CLAUDE.md to replace [placeholders] with actual boundaries")
1215
+ print(f"{'=' * 50}")
1216
+ return
1217
+
1218
+ print(f"{'=' * 50}")
1219
+ print(" Ouro Loop — BOUND Check")
1220
+ print(f"{'=' * 50}")
1221
+
1222
+ sections = {
1223
+ "DANGER ZONES": "DANGER ZONE" in content or "DANGER_ZONE" in content,
1224
+ "NEVER DO": "NEVER DO" in content or "NEVER_DO" in content,
1225
+ "IRON LAWS": "IRON LAW" in content or "IRON_LAW" in content,
1226
+ }
1227
+
1228
+ all_defined = True
1229
+ for section, found in sections.items():
1230
+ icon = "+" if found else "X"
1231
+ print(f" [{icon}] {section}")
1232
+ if not found:
1233
+ all_defined = False
1234
+
1235
+ # Show parsed BOUND details
1236
+ if bound_data["danger_zones"]:
1237
+ print(f"\n Parsed DANGER ZONES: {len(bound_data['danger_zones'])}")
1238
+ for dz in bound_data["danger_zones"][:5]:
1239
+ print(f" - {dz}")
1240
+ if bound_data["iron_laws"]:
1241
+ print(f" Parsed IRON LAWS: {len(bound_data['iron_laws'])}")
1242
+ for il in bound_data["iron_laws"][:5]:
1243
+ print(f" - {il}")
1244
+
1245
+ source = bound_data.get("parse_source", "none")
1246
+ if source == "fallback":
1247
+ print(
1248
+ "\n [!] Parse source: fallback (prose-style CLAUDE.md, results may be noisy)"
1249
+ )
1250
+ elif source == "structured":
1251
+ print("\n Parse source: structured")
1252
+
1253
+ print()
1254
+ if all_defined:
1255
+ print(" BOUND fully defined. Ready for BUILD.")
1256
+ else:
1257
+ missing = [s for s, f in sections.items() if not f]
1258
+ print(f" Missing: {', '.join(missing)}")
1259
+ print(" Define these before starting BUILD stage.")
1260
+
1261
+ print(f"{'=' * 50}")
1262
+
1263
+
1264
+ # ---------------------------------------------------------------------------
1265
+ # Main
1266
+ # ---------------------------------------------------------------------------
1267
+
1268
+ def main():
1269
+ """CLI entry point for Ouro Loop framework."""
1270
+ parser = argparse.ArgumentParser(
1271
+ description="Ouro Loop — Development methodology runtime"
1272
+ )
1273
+ subparsers = parser.add_subparsers(dest="command", help="Command to run")
1274
+
1275
+ # status
1276
+ status_parser = subparsers.add_parser("status", help="Show Ouro state")
1277
+ status_parser.add_argument("path", nargs="?", default=".", help="Project directory")
1278
+
1279
+ # verify
1280
+ verify_parser = subparsers.add_parser("verify", help="Run verification checks")
1281
+ verify_parser.add_argument("path", nargs="?", default=".", help="Project directory")
1282
+
1283
+ # log
1284
+ log_parser = subparsers.add_parser("log", help="Log phase result")
1285
+ log_parser.add_argument(
1286
+ "verdict", choices=["PASS", "FAIL", "RETRY", "SKIP"], help="Phase verdict"
1287
+ )
1288
+ log_parser.add_argument("--notes", default="", help="Notes for this phase")
1289
+ log_parser.add_argument("--path", default=".", help="Project directory")
1290
+
1291
+ # advance
1292
+ advance_parser = subparsers.add_parser("advance", help="Advance to next phase")
1293
+ advance_parser.add_argument(
1294
+ "path", nargs="?", default=".", help="Project directory"
1295
+ )
1296
+
1297
+ # bound-check
1298
+ bound_parser = subparsers.add_parser("bound-check", help="Check BOUND compliance")
1299
+ bound_parser.add_argument("path", nargs="?", default=".", help="Project directory")
1300
+
1301
+ # reflect
1302
+ reflect_parser = subparsers.add_parser("reflect", help="Show reflective log")
1303
+ reflect_parser.add_argument(
1304
+ "path", nargs="?", default=".", help="Project directory"
1305
+ )
1306
+ reflect_parser.add_argument(
1307
+ "-n",
1308
+ "--last",
1309
+ type=int,
1310
+ default=5,
1311
+ help="Number of entries to show (default: 5)",
1312
+ )
1313
+
1314
+ args = parser.parse_args()
1315
+
1316
+ if args.command is None:
1317
+ parser.print_help()
1318
+ sys.exit(0)
1319
+
1320
+ if args.command == "status":
1321
+ show_status(args.path)
1322
+ elif args.command == "verify":
1323
+ results = run_verification(args.path)
1324
+ print_verification(results)
1325
+ elif args.command == "log":
1326
+ log_phase_result(getattr(args, "path", "."), args.verdict, args.notes)
1327
+ elif args.command == "advance":
1328
+ advance_phase(args.path)
1329
+ elif args.command == "bound-check":
1330
+ check_bound(args.path)
1331
+ elif args.command == "reflect":
1332
+ print_reflective_summary(args.path, args.last)
1333
+
1334
+
1335
+ if __name__ == "__main__":
1336
+ main()