delimit-cli 4.1.44 → 4.1.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,12 +24,134 @@ logger = logging.getLogger("delimit.ai.loop_engine")
24
24
  # ── Configuration ────────────────────────────────────────────────────
25
25
  ROOT_LEDGER_PATH = Path("/root/.delimit")
26
26
  BUILD_SAFE_TYPES = ["feat", "fix", "task"]
27
+ SOCIAL_SAFE_TYPES = ["social", "outreach", "content", "sensor", "strategy"]
28
+ SIGNAL_TYPES = ["strategy"] # Web scanner signals eligible for triage
27
29
  MAX_ITERATIONS_DEFAULT = 10
28
30
  MAX_COST_DEFAULT = 2.0
29
31
  MAX_ERRORS_DEFAULT = 2
32
+ SOCIAL_SCAN_PLATFORMS = ["reddit", "x", "hn", "devto", "github", "web"]
33
+ SOCIAL_SCAN_VENTURES = ["delimit"]
34
+
35
+ # Web scanner signal prefixes (from social_target._scan_web)
36
+ WEB_SIGNAL_PREFIXES = {
37
+ "competitor": "[COMPETITOR RELEASE]",
38
+ "ecosystem": "[ECOSYSTEM]",
39
+ "npm": "[NPM]",
40
+ "venture": "[VENTURE SIGNAL]",
41
+ }
42
+
43
+ # LED-788: timeouts + observability for the social loop
44
+ SOCIAL_ITERATION_TIMEOUT = int(os.environ.get("DELIMIT_SOCIAL_ITERATION_TIMEOUT", "300")) # 5 min
45
+ SOCIAL_STRATEGY_TIMEOUT = int(os.environ.get("DELIMIT_SOCIAL_STRATEGY_TIMEOUT", "120")) # 2 min
46
+ SOCIAL_SCAN_TIMEOUT = int(os.environ.get("DELIMIT_SOCIAL_SCAN_TIMEOUT", "180")) # 3 min total for all platform scans
30
47
 
31
48
  # ── Session State ────────────────────────────────────────────────────
32
49
  SESSION_DIR = Path.home() / ".delimit" / "loop" / "sessions"
50
+ HEARTBEAT_DIR = Path.home() / ".delimit" / "loop" / "heartbeat"
51
+
52
+
53
+ def _ensure_heartbeat_dir():
54
+ HEARTBEAT_DIR.mkdir(parents=True, exist_ok=True)
55
+
56
+
57
+ def _write_heartbeat(session_id: str, stage: str, extra: Optional[Dict[str, Any]] = None) -> None:
58
+ """LED-788: record the current loop stage + elapsed time.
59
+
60
+ delimit_loop_status reads this so callers can see where an in-flight
61
+ iteration is actually spending its time instead of staring at a stale
62
+ snapshot of the last completed iteration.
63
+ """
64
+ try:
65
+ _ensure_heartbeat_dir()
66
+ payload = {
67
+ "session_id": session_id,
68
+ "stage": stage,
69
+ "started_at": datetime.now(timezone.utc).isoformat(),
70
+ "ts": time.time(),
71
+ }
72
+ if extra:
73
+ payload.update(extra)
74
+ path = HEARTBEAT_DIR / f"{session_id}.json"
75
+ path.write_text(json.dumps(payload, indent=2))
76
+ except OSError as e:
77
+ logger.debug("heartbeat write failed: %s", e)
78
+
79
+
80
+ def _read_heartbeat(session_id: str) -> Optional[Dict[str, Any]]:
81
+ try:
82
+ path = HEARTBEAT_DIR / f"{session_id}.json"
83
+ if not path.exists():
84
+ return None
85
+ data = json.loads(path.read_text())
86
+ # Augment with elapsed seconds for the currently-running stage
87
+ if "ts" in data:
88
+ data["elapsed_seconds"] = round(time.time() - data["ts"], 1)
89
+ return data
90
+ except (OSError, json.JSONDecodeError):
91
+ return None
92
+
93
+
94
+ def _run_stage_with_timeout(
95
+ stage: str,
96
+ fn,
97
+ timeout_s: int,
98
+ session_id: str = "",
99
+ ) -> Dict[str, Any]:
100
+ """Run a callable with a wall-clock timeout and timing instrumentation.
101
+
102
+ Uses ThreadPoolExecutor so a hung HTTP client can be abandoned without
103
+ killing the whole loop process. Returns a dict with keys:
104
+ - ok: bool
105
+ - value: return value on success
106
+ - error: error string on failure
107
+ - elapsed_seconds: wall-clock time
108
+ - timed_out: True if the wall-clock deadline was hit
109
+ """
110
+ import threading
111
+
112
+ start = time.time()
113
+ if session_id:
114
+ _write_heartbeat(session_id, stage)
115
+ logger.info("[loop] stage=%s start timeout=%ss", stage, timeout_s)
116
+
117
+ container: Dict[str, Any] = {"value": None, "error": None}
118
+
119
+ def _runner():
120
+ try:
121
+ container["value"] = fn()
122
+ except Exception as _exc: # noqa: BLE001 — intentional broad catch
123
+ container["error"] = _exc
124
+
125
+ # Daemon thread so a hung worker cannot block interpreter shutdown.
126
+ worker = threading.Thread(target=_runner, name=f"loop-stage-{stage}", daemon=True)
127
+ worker.start()
128
+ worker.join(timeout=timeout_s)
129
+ elapsed = time.time() - start
130
+
131
+ if worker.is_alive():
132
+ logger.error("[loop] stage=%s TIMEOUT after %.1fs (limit=%ss)", stage, elapsed, timeout_s)
133
+ return {
134
+ "ok": False,
135
+ "error": f"{stage} exceeded {timeout_s}s timeout",
136
+ "elapsed_seconds": round(elapsed, 1),
137
+ "timed_out": True,
138
+ }
139
+ if container["error"] is not None:
140
+ logger.error("[loop] stage=%s failed after %.1fs: %s", stage, elapsed, container["error"])
141
+ return {
142
+ "ok": False,
143
+ "error": str(container["error"]),
144
+ "elapsed_seconds": round(elapsed, 1),
145
+ "timed_out": False,
146
+ }
147
+ logger.info("[loop] stage=%s done elapsed=%.1fs", stage, elapsed)
148
+ return {
149
+ "ok": True,
150
+ "value": container["value"],
151
+ "elapsed_seconds": round(elapsed, 1),
152
+ "timed_out": False,
153
+ }
154
+
33
155
 
34
156
  def _ensure_session_dir():
35
157
  SESSION_DIR.mkdir(parents=True, exist_ok=True)
@@ -39,11 +161,13 @@ def _save_session(session: Dict[str, Any]):
39
161
  path = SESSION_DIR / f"{session['session_id']}.json"
40
162
  path.write_text(json.dumps(session, indent=2))
41
163
 
42
- def create_governed_session() -> Dict[str, Any]:
43
- session_id = f"build-{uuid.uuid4().hex[:8]}"
164
+ def create_governed_session(loop_type: str = "build") -> Dict[str, Any]:
165
+ prefix = loop_type if loop_type in ("build", "social", "deploy") else "build"
166
+ session_id = f"{prefix}-{uuid.uuid4().hex[:8]}"
44
167
  session = {
45
168
  "session_id": session_id,
46
- "type": "governed_build",
169
+ "type": f"governed_{prefix}",
170
+ "loop_type": prefix,
47
171
  "started_at": datetime.now(timezone.utc).isoformat(),
48
172
  "iterations": 0,
49
173
  "max_iterations": MAX_ITERATIONS_DEFAULT,
@@ -86,6 +210,164 @@ def resolve_venture_context(venture_name: str) -> Dict[str, str]:
86
210
 
87
211
  return context
88
212
 
213
+ # ── Web Signal Triage (think→build pipeline) ────────────────────────
214
+
215
+ def _classify_web_signal(item: Dict[str, Any]) -> Optional[Dict[str, str]]:
216
+ """Classify a web scanner strategy item into a triage action.
217
+
218
+ Returns dict with keys: action, build_type, priority, title, description
219
+ or None if the signal should be skipped.
220
+ """
221
+ title = item.get("title", "")
222
+ desc = item.get("description", "")
223
+ snippet = f"{title} {desc}".lower()
224
+
225
+ # Competitor releases → assess feature parity need
226
+ if WEB_SIGNAL_PREFIXES["competitor"].lower() in snippet or "competitor release" in snippet:
227
+ return {
228
+ "action": "build",
229
+ "build_type": "task",
230
+ "priority": "P1",
231
+ "title": f"Assess: {title}",
232
+ "description": (
233
+ f"Web scanner detected competitor activity. Assess whether Delimit "
234
+ f"needs a matching feature or response.\n\nOriginal signal: {desc[:500]}"
235
+ ),
236
+ "venture": item.get("venture", "delimit"),
237
+ "source_signal": item.get("id", ""),
238
+ }
239
+
240
+ # Ecosystem build signals → assess threat or opportunity
241
+ if WEB_SIGNAL_PREFIXES["ecosystem"].lower() in snippet:
242
+ return {
243
+ "action": "build",
244
+ "build_type": "task",
245
+ "priority": "P2",
246
+ "title": f"Evaluate: {title}",
247
+ "description": (
248
+ f"Ecosystem signal detected. Assess if this is a threat, opportunity, "
249
+ f"or integration target for Delimit.\n\nOriginal signal: {desc[:500]}"
250
+ ),
251
+ "venture": item.get("venture", "delimit"),
252
+ "source_signal": item.get("id", ""),
253
+ }
254
+
255
+ # npm packages → check compete or complement
256
+ if WEB_SIGNAL_PREFIXES["npm"].lower() in snippet:
257
+ return {
258
+ "action": "build",
259
+ "build_type": "task",
260
+ "priority": "P2",
261
+ "title": f"npm scout: {title}",
262
+ "description": (
263
+ f"New npm package detected in Delimit's space. Determine if it "
264
+ f"competes with or complements Delimit.\n\nOriginal signal: {desc[:500]}"
265
+ ),
266
+ "venture": "delimit",
267
+ "source_signal": item.get("id", ""),
268
+ }
269
+
270
+ # Venture discovery → flag for founder review (never auto-build)
271
+ if WEB_SIGNAL_PREFIXES["venture"].lower() in snippet:
272
+ return {
273
+ "action": "notify",
274
+ "venture": item.get("venture", "jamsons"),
275
+ "source_signal": item.get("id", ""),
276
+ }
277
+
278
+ return None
279
+
280
+
281
+ def triage_web_signals(session: Dict[str, Any], max_signals: int = 5) -> List[Dict[str, Any]]:
282
+ """Consume strategy items created by the web scanner and convert to build tasks.
283
+
284
+ This is the think→build pipeline:
285
+ 1. Find open strategy items with web scanner fingerprints
286
+ 2. Classify each signal (competitor, ecosystem, npm, venture)
287
+ 3. For build signals: create a feat/task item in the ledger
288
+ 4. For venture signals: send founder notification
289
+ 5. Mark the original strategy item as triaged
290
+
291
+ Returns list of actions taken.
292
+ """
293
+ from ai.ledger_manager import list_items, add_item, update_item
294
+
295
+ result = list_items(status="open", project_path=str(ROOT_LEDGER_PATH))
296
+ items = []
297
+ for ledger_items in result.get("items", {}).values():
298
+ items.extend(ledger_items)
299
+
300
+ # Find untriaged web scanner signals
301
+ web_signals = []
302
+ for item in items:
303
+ if item.get("type") not in SIGNAL_TYPES:
304
+ continue
305
+ tags = item.get("tags", [])
306
+ if "web-triaged" in tags:
307
+ continue
308
+ title = item.get("title", "")
309
+ desc = item.get("description", "")
310
+ snippet = f"{title} {desc}".lower()
311
+ # Match web scanner output patterns
312
+ if any(prefix.lower() in snippet for prefix in WEB_SIGNAL_PREFIXES.values()):
313
+ web_signals.append(item)
314
+
315
+ if not web_signals:
316
+ return []
317
+
318
+ actions = []
319
+ for signal in web_signals[:max_signals]:
320
+ classification = _classify_web_signal(signal)
321
+ if not classification:
322
+ continue
323
+
324
+ if classification["action"] == "build":
325
+ # Create a build-safe ledger item from the signal
326
+ try:
327
+ new_item = add_item(
328
+ title=classification["title"],
329
+ item_type=classification["build_type"],
330
+ priority=classification["priority"],
331
+ description=classification["description"],
332
+ venture=classification.get("venture", "delimit"),
333
+ project_path=str(ROOT_LEDGER_PATH),
334
+ tags=["web-signal", f"from:{classification.get('source_signal', '')}"],
335
+ )
336
+ actions.append({
337
+ "action": "created_build_task",
338
+ "source": signal.get("id"),
339
+ "new_item": new_item.get("id", "unknown"),
340
+ "type": classification["build_type"],
341
+ "priority": classification["priority"],
342
+ })
343
+ except Exception as e:
344
+ logger.warning("Failed to create build item from signal %s: %s", signal.get("id"), e)
345
+ continue
346
+
347
+ elif classification["action"] == "notify":
348
+ # Venture signals → founder review
349
+ actions.append({
350
+ "action": "notify_founder",
351
+ "source": signal.get("id"),
352
+ "venture": classification.get("venture", "jamsons"),
353
+ "title": signal.get("title", ""),
354
+ })
355
+
356
+ # Mark signal as triaged so we don't process it again
357
+ try:
358
+ existing_tags = signal.get("tags", [])
359
+ update_item(
360
+ item_id=signal["id"],
361
+ status="done",
362
+ note=f"Triaged by build loop → {classification['action']}",
363
+ project_path=str(ROOT_LEDGER_PATH),
364
+ )
365
+ except Exception as e:
366
+ logger.warning("Failed to mark signal %s as triaged: %s", signal.get("id"), e)
367
+
368
+ return actions
369
+
370
+
89
371
  # ── Governed Selection ───────────────────────────────────────────────
90
372
 
91
373
  def next_task(venture: str = "", max_risk: str = "", session_id: str = "") -> Dict[str, Any]:
@@ -130,6 +412,368 @@ def get_next_build_task(session: Dict[str, Any]) -> Optional[Dict[str, Any]]:
130
412
 
131
413
  return actionable[0]
132
414
 
415
+ # ── Social Loop Task Selection ────────────────────────────────────────
416
+
417
+ def get_next_social_task(session: Dict[str, Any]) -> Optional[Dict[str, Any]]:
418
+ """Select the next social/outreach item from the root ledger."""
419
+ from ai.ledger_manager import list_items
420
+
421
+ result = list_items(status="open", project_path=str(ROOT_LEDGER_PATH))
422
+ items = []
423
+ for ledger_items in result.get("items", {}).values():
424
+ items.extend(ledger_items)
425
+
426
+ actionable = []
427
+ for item in items:
428
+ if item.get("type") not in SOCIAL_SAFE_TYPES:
429
+ continue
430
+ tags = item.get("tags", [])
431
+ if "manual" in tags:
432
+ continue
433
+ actionable.append(item)
434
+
435
+ if not actionable:
436
+ return None
437
+
438
+ priority_map = {"P0": 0, "P1": 1, "P2": 2, "P3": 3}
439
+ actionable.sort(key=lambda x: priority_map.get(x.get("priority", "P2"), 9))
440
+ return actionable[0]
441
+
442
+
443
+ def run_social_iteration(session_id: str) -> Dict[str, Any]:
444
+ """Execute one governed social/think loop iteration.
445
+
446
+ Cycle: scan platforms → draft replies → notify founder → handle social ledger items.
447
+ """
448
+ path = SESSION_DIR / f"{session_id}.json"
449
+ if not path.exists():
450
+ return {"error": f"Session {session_id} not found"}
451
+ session = json.loads(path.read_text())
452
+
453
+ if session["status"] != "running":
454
+ return {"status": "stopped", "reason": f"Session status is {session['status']}"}
455
+ if session["iterations"] >= session["max_iterations"]:
456
+ session["status"] = "finished"
457
+ _save_session(session)
458
+ return {"status": "finished", "reason": "Max iterations reached"}
459
+ if session["cost_incurred"] >= session["cost_cap"]:
460
+ session["status"] = "stopped"
461
+ _save_session(session)
462
+ return {"status": "stopped", "reason": "Cost cap reached"}
463
+
464
+ results = {"scans": [], "drafts_sent": 0, "ledger_task": None, "triage": [], "stage_timings": {}}
465
+ iteration_start = time.time()
466
+ _write_heartbeat(session_id, "iteration_start", {"iteration": session["iterations"] + 1})
467
+
468
+ # 1. Scan all platforms via social_target pipeline (scan + draft + ledger)
469
+ # LED-788: wall-clock timeout prevents a hung platform from eating the session
470
+ def _do_scan_and_process():
471
+ from ai.social_target import scan_targets, process_targets
472
+ _targets = scan_targets(
473
+ platforms=SOCIAL_SCAN_PLATFORMS,
474
+ ventures=SOCIAL_SCAN_VENTURES,
475
+ limit=10,
476
+ )
477
+ _processed = None
478
+ if _targets:
479
+ _processed = process_targets(_targets, draft_replies=True, create_ledger=True)
480
+ return _targets, _processed
481
+
482
+ scan_result = _run_stage_with_timeout(
483
+ "social_scan_and_process",
484
+ _do_scan_and_process,
485
+ SOCIAL_SCAN_TIMEOUT,
486
+ session_id=session_id,
487
+ )
488
+ results["stage_timings"]["scan_and_process"] = scan_result["elapsed_seconds"]
489
+ if scan_result["ok"]:
490
+ targets, processed = scan_result["value"]
491
+ results["scans"] = [
492
+ {"platform": t.get("platform"), "title": t.get("title", "")[:80]}
493
+ for t in targets[:5]
494
+ ]
495
+ results["targets_found"] = len(targets)
496
+ if processed:
497
+ drafted_list = processed.get("drafted", []) or []
498
+ ledger_list = processed.get("ledger_items", []) or []
499
+ notifs_sent = sum(1 for d in drafted_list if d.get("notification_sent"))
500
+ results["processed"] = {
501
+ "drafts": len(drafted_list),
502
+ "drafts_ready": notifs_sent,
503
+ "drafts_suppressed": sum(1 for d in drafted_list if d.get("suppressed_reason")),
504
+ "ledger_items": len(ledger_list),
505
+ "notifications": notifs_sent,
506
+ }
507
+ results["drafts_sent"] = notifs_sent
508
+ else:
509
+ logger.error("Social scan failed: %s", scan_result.get("error"))
510
+ session["errors"] += 1
511
+ results["scan_error"] = scan_result.get("error")
512
+ results["scan_timed_out"] = scan_result.get("timed_out", False)
513
+
514
+ # 3. Triage web signals (think→build pipeline)
515
+ _write_heartbeat(session_id, "triage_web_signals")
516
+ triage_actions = triage_web_signals(session)
517
+ if triage_actions:
518
+ results["triage"] = [
519
+ {"action": a.get("action"), "title": a.get("title", "")[:60]}
520
+ for a in triage_actions
521
+ ]
522
+
523
+ # 4. Pick up social-typed ledger items
524
+ social_task = get_next_social_task(session)
525
+ if social_task:
526
+ results["ledger_task"] = {"id": social_task["id"], "title": social_task.get("title", "")}
527
+ try:
528
+ from ai.ledger_manager import update_item
529
+ update_item(
530
+ item_id=social_task["id"],
531
+ status="in_progress",
532
+ note="Picked up by think loop",
533
+ project_path=str(ROOT_LEDGER_PATH),
534
+ )
535
+ except Exception:
536
+ pass
537
+
538
+ # 5. Strategy deliberation (think): every 4th iteration to avoid rate limits
539
+ # LED-788: strategy cycle wraps delimit_deliberate which easily hangs on
540
+ # a single slow model — wall-clock cap so it can't eat the whole iteration.
541
+ results["strategy"] = None
542
+ if session["iterations"] % 4 == 0:
543
+ strat_result = _run_stage_with_timeout(
544
+ "strategy_cycle",
545
+ lambda: _run_strategy_cycle(session),
546
+ SOCIAL_STRATEGY_TIMEOUT,
547
+ session_id=session_id,
548
+ )
549
+ results["stage_timings"]["strategy_cycle"] = strat_result["elapsed_seconds"]
550
+ if strat_result["ok"]:
551
+ results["strategy"] = strat_result["value"]
552
+ else:
553
+ logger.error("Strategy cycle failed: %s", strat_result.get("error"))
554
+ results["strategy"] = {
555
+ "error": strat_result.get("error"),
556
+ "timed_out": strat_result.get("timed_out", False),
557
+ }
558
+
559
+ # LED-788: total iteration time — if we've overrun, mark the session so
560
+ # the next iteration runs lighter (strategy cycle will still be rate-gated
561
+ # by the %4 check, but the warning surfaces to operators).
562
+ total_elapsed = round(time.time() - iteration_start, 1)
563
+ results["stage_timings"]["total"] = total_elapsed
564
+ if total_elapsed > SOCIAL_ITERATION_TIMEOUT:
565
+ logger.error(
566
+ "[loop] iteration %d took %.1fs, exceeding soft cap of %ss",
567
+ session["iterations"] + 1, total_elapsed, SOCIAL_ITERATION_TIMEOUT,
568
+ )
569
+ results["iteration_overrun"] = True
570
+
571
+ # 6. Update session
572
+ _write_heartbeat(session_id, "iteration_complete", {"elapsed_seconds": total_elapsed})
573
+ session["iterations"] += 1
574
+ cost = 0.01 if not results.get("strategy") else 0.15 # deliberations cost more
575
+ session["cost_incurred"] += cost
576
+ session["tasks_completed"].append({
577
+ "iteration": session["iterations"],
578
+ "drafts_sent": results["drafts_sent"],
579
+ "targets_scanned": len(results["scans"]),
580
+ "ledger_task": results.get("ledger_task"),
581
+ "strategy": results.get("strategy"),
582
+ "timestamp": datetime.now(timezone.utc).isoformat(),
583
+ })
584
+ _save_session(session)
585
+
586
+ return {"status": "continued", "session_id": session_id, "results": results}
587
+
588
+
589
+ # ── Strategy Deliberation (think cycle) ───────────────────────────────
590
+
591
+ STRATEGY_LEDGER = Path("/root/.delimit/ledger/strategy.jsonl")
592
+ DELIBERATION_DIR = Path("/home/delimit/delimit-private/decisions")
593
+
594
+ def _get_open_strategy_items(limit: int = 6) -> List[Dict[str, Any]]:
595
+ """Read open strategy items from the strategy ledger."""
596
+ if not STRATEGY_LEDGER.exists():
597
+ return []
598
+ items = []
599
+ for line in STRATEGY_LEDGER.read_text().splitlines():
600
+ line = line.strip()
601
+ if not line:
602
+ continue
603
+ try:
604
+ item = json.loads(line)
605
+ if item.get("status", "open") == "open":
606
+ items.append(item)
607
+ except json.JSONDecodeError:
608
+ continue
609
+ priority_map = {"P0": 0, "P1": 1, "P2": 2, "P3": 3}
610
+ items.sort(key=lambda x: priority_map.get(x.get("priority", "P2"), 9))
611
+ return items[:limit]
612
+
613
+
614
+ def _group_strategy_items(items: List[Dict[str, Any]]) -> List[List[Dict[str, Any]]]:
615
+ """Group related strategy items by venture/topic for batch deliberation."""
616
+ groups: Dict[str, List[Dict[str, Any]]] = {}
617
+ for item in items:
618
+ key = item.get("venture", item.get("tags", ["general"])[0] if item.get("tags") else "general")
619
+ groups.setdefault(key, []).append(item)
620
+ # Cap each group at 4 items
621
+ return [g[:4] for g in groups.values()]
622
+
623
+
624
+ def _run_strategy_cycle(session: Dict[str, Any]) -> Dict[str, Any]:
625
+ """Run one strategy deliberation cycle: pull items → group → deliberate → build tasks."""
626
+ items = _get_open_strategy_items(limit=6)
627
+ if not items:
628
+ return {"status": "idle", "reason": "No open strategy items"}
629
+
630
+ groups = _group_strategy_items(items)
631
+ result = {"deliberations": 0, "build_tasks_created": 0, "items_closed": 0}
632
+
633
+ # Process at most 1 group per cycle to stay within rate limits
634
+ group = groups[0]
635
+ item_refs = ", ".join(f"{i.get('id', '?')}: {i.get('title', '')[:40]}" for i in group)
636
+ titles = " + ".join(i.get("id", "?") for i in group)
637
+
638
+ question = (
639
+ f"{titles}: {' | '.join(i.get('title', '') for i in group)}. "
640
+ "What are the specific next steps to move these forward? "
641
+ "Output as 3-5 specific operational tasks with titles and descriptions."
642
+ )
643
+
644
+ context = (
645
+ f"Items: {item_refs}\n"
646
+ f"Venture: {group[0].get('venture', 'delimit')}\n"
647
+ f"Session: think loop iteration {session['iterations']}\n"
648
+ f"Constraint: solo founder, all ventures parallel, ledger-based dev"
649
+ )
650
+
651
+ try:
652
+ from ai.deliberation import deliberate as run_deliberation
653
+ date_str = datetime.now(timezone.utc).strftime("%Y_%m_%d")
654
+ topic = group[0].get("venture", "strategy").upper()
655
+ save_path = str(DELIBERATION_DIR / f"DELIBERATION_{topic}_{date_str}.md")
656
+
657
+ delib_result = run_deliberation(
658
+ question=question,
659
+ context=context,
660
+ mode="debate",
661
+ save_path=save_path,
662
+ )
663
+ result["deliberations"] = 1
664
+ result["save_path"] = save_path
665
+
666
+ # Close the strategy items
667
+ from ai.ledger_manager import update_item
668
+ for item in group:
669
+ try:
670
+ update_item(
671
+ item_id=item["id"],
672
+ status="done",
673
+ note=f"Deliberated in think loop. Transcript: {save_path}",
674
+ project_path=str(ROOT_LEDGER_PATH),
675
+ )
676
+ result["items_closed"] += 1
677
+ except Exception:
678
+ pass
679
+
680
+ except Exception as e:
681
+ logger.error("Deliberation failed for %s: %s", titles, e)
682
+ result["error"] = str(e)
683
+
684
+ return result
685
+
686
+
687
+ # ── Deploy Handoff (build→deploy pipeline) ──────────────────────────
688
+
689
+ DEPLOY_QUEUE_DIR = Path.home() / ".delimit" / "loop" / "deploy-queue"
690
+
691
+ def _ensure_deploy_queue():
692
+ DEPLOY_QUEUE_DIR.mkdir(parents=True, exist_ok=True)
693
+
694
+
695
+ def _notify_deploy_loop(task: Dict[str, Any], venture: str, project_path: str,
696
+ session_id: str = "") -> Dict[str, Any]:
697
+ """Signal the deploy loop that a build task completed and code is ready.
698
+
699
+ Writes a deploy-ready item to the deploy queue. The deploy loop picks these
700
+ up and runs commit → push → deploy gates → deploy for each venture.
701
+ """
702
+ _ensure_deploy_queue()
703
+
704
+ item = {
705
+ "task_id": task.get("id", "unknown"),
706
+ "title": task.get("title", ""),
707
+ "venture": venture,
708
+ "project_path": project_path,
709
+ "status": "pending",
710
+ "created_at": datetime.now(timezone.utc).isoformat(),
711
+ "session_id": session_id,
712
+ }
713
+
714
+ queue_file = DEPLOY_QUEUE_DIR / "pending.jsonl"
715
+ with open(queue_file, "a") as f:
716
+ f.write(json.dumps(item) + "\n")
717
+
718
+ logger.info("Deploy queue: added %s (%s) for %s", task.get("id"), venture, project_path)
719
+ return item
720
+
721
+
722
+ def get_deploy_ready(venture: str = "") -> List[Dict[str, Any]]:
723
+ """Get pending deploy-ready items, optionally filtered by venture.
724
+
725
+ Called by the deploy loop to discover what the build loop produced.
726
+ """
727
+ _ensure_deploy_queue()
728
+ queue_file = DEPLOY_QUEUE_DIR / "pending.jsonl"
729
+ if not queue_file.exists():
730
+ return []
731
+
732
+ items = []
733
+ for line in queue_file.read_text().strip().split("\n"):
734
+ if not line.strip():
735
+ continue
736
+ try:
737
+ item = json.loads(line)
738
+ if item.get("status") != "pending":
739
+ continue
740
+ if venture and item.get("venture", "") != venture:
741
+ continue
742
+ items.append(item)
743
+ except json.JSONDecodeError:
744
+ continue
745
+
746
+ return items
747
+
748
+
749
+ def mark_deployed(task_id: str) -> bool:
750
+ """Mark a deploy-queue item as deployed. Called by deploy loop after successful deploy."""
751
+ _ensure_deploy_queue()
752
+ queue_file = DEPLOY_QUEUE_DIR / "pending.jsonl"
753
+ if not queue_file.exists():
754
+ return False
755
+
756
+ lines = queue_file.read_text().strip().split("\n")
757
+ updated = False
758
+ new_lines = []
759
+ for line in lines:
760
+ if not line.strip():
761
+ continue
762
+ try:
763
+ item = json.loads(line)
764
+ if item.get("task_id") == task_id and item.get("status") == "pending":
765
+ item["status"] = "deployed"
766
+ item["deployed_at"] = datetime.now(timezone.utc).isoformat()
767
+ updated = True
768
+ new_lines.append(json.dumps(item))
769
+ except json.JSONDecodeError:
770
+ new_lines.append(line)
771
+
772
+ if updated:
773
+ queue_file.write_text("\n".join(new_lines) + "\n")
774
+ return updated
775
+
776
+
133
777
  # ── Swarm Dispatch & Execution ───────────────────────────────────────
134
778
 
135
779
  def loop_config(session_id: str = "", max_iterations: int = 0,
@@ -188,60 +832,114 @@ def loop_config(session_id: str = "", max_iterations: int = 0,
188
832
  }
189
833
 
190
834
 
191
- def run_governed_iteration(session_id: str) -> Dict[str, Any]:
192
- """Execute one governed build iteration."""
835
+ def run_governed_iteration(session_id: str, hardening: Optional[Any] = None) -> Dict[str, Any]:
836
+ """Execute one governed build iteration.
837
+
838
+ Args:
839
+ session_id: The session to advance.
840
+ hardening: Optional GovernanceHardeningConfig from ai.governance_hardening.
841
+ When provided, dispatch calls are wrapped with retry, debounce,
842
+ and circuit-breaker protection. When None (default), behavior
843
+ is unchanged from the original implementation.
844
+ """
193
845
  from datetime import datetime, timezone
846
+ import importlib
847
+ import ai.swarm as _swarm_mod
848
+ importlib.reload(_swarm_mod)
194
849
  from ai.swarm import dispatch_task
195
-
850
+
196
851
  # 1. Load Session & Check Safeguards
197
852
  path = SESSION_DIR / f"{session_id}.json"
198
853
  if not path.exists():
199
854
  return {"error": f"Session {session_id} not found"}
200
855
  session = json.loads(path.read_text())
201
-
856
+
202
857
  if session["status"] != "running":
203
858
  return {"status": "stopped", "reason": f"Session status is {session['status']}"}
204
-
859
+
205
860
  if session["iterations"] >= session["max_iterations"]:
206
861
  session["status"] = "finished"
207
862
  _save_session(session)
208
863
  return {"status": "finished", "reason": "Max iterations reached"}
209
-
864
+
210
865
  if session["cost_incurred"] >= session["cost_cap"]:
211
866
  session["status"] = "stopped"
212
867
  _save_session(session)
213
868
  return {"status": "stopped", "reason": "Cost cap reached"}
214
869
 
870
+ # 1b. Triage web scanner signals (think→build pipeline)
871
+ triage_actions = triage_web_signals(session)
872
+ if triage_actions:
873
+ logger.info("Web signal triage: %d actions taken", len(triage_actions))
874
+ # If we created new build tasks, they'll be picked up in task selection below
875
+ # If we need to notify founder for venture signals, do it now
876
+ for action in triage_actions:
877
+ if action.get("action") == "notify_founder":
878
+ try:
879
+ from ai.notify import send_notification
880
+ send_notification(
881
+ message=(
882
+ f"[VENTURE SIGNAL] {action.get('title', 'New venture opportunity')}\n"
883
+ f"Source: {action.get('source', 'web scanner')}\n"
884
+ f"Venture: {action.get('venture', 'jamsons')}\n"
885
+ f"Action: Founder review needed before acting"
886
+ ),
887
+ channel="email",
888
+ priority="P1",
889
+ )
890
+ except Exception as e:
891
+ logger.warning("Failed to notify founder for venture signal: %s", e)
892
+
215
893
  # 2. Select Task
216
894
  task = get_next_build_task(session)
217
895
  if not task:
218
- return {"status": "idle", "reason": "No build-safe items in ledger"}
219
-
896
+ return {"status": "idle", "reason": "No build-safe items in ledger", "triage_actions": triage_actions}
897
+
220
898
  # 3. Resolve Context
221
899
  v_name = task.get("venture", "root")
222
900
  ctx = resolve_venture_context(v_name)
223
-
901
+
224
902
  # 4. Dispatch through Swarm (Control Plane)
225
903
  logger.info(f"Dispatching build task {task['id']} for venture {v_name}")
226
-
904
+
227
905
  start_time = time.time()
228
906
  try:
229
- # Note: Swarm dispatch is the central point of governance
230
- dispatch_result = dispatch_task(
907
+ # LED-661: Route through governance hardening stack when configured
908
+ dispatch_kwargs = dict(
231
909
  title=task["title"],
232
910
  description=task["description"],
233
911
  context=f"Executing governed build loop for {v_name}. Ledger ID: {task['id']}",
234
912
  project_path=ctx["path"],
235
- priority=task["priority"]
913
+ priority=task["priority"],
236
914
  )
237
-
915
+
916
+ if hardening is not None and hardening.is_active():
917
+ from ai.governance_hardening import hardened_dispatch
918
+ dispatch_result = hardened_dispatch(
919
+ hardening, dispatch_task,
920
+ tool_name="dispatch_task",
921
+ **dispatch_kwargs,
922
+ )
923
+ # hardened_dispatch may return a control dict (debounced/circuit_open)
924
+ if isinstance(dispatch_result, dict) and dispatch_result.get("status") in ("debounced", "circuit_open"):
925
+ session["tasks_completed"].append({
926
+ "id": task["id"],
927
+ "status": dispatch_result["status"],
928
+ "timestamp": datetime.now(timezone.utc).isoformat(),
929
+ })
930
+ _save_session(session)
931
+ return {"status": dispatch_result["status"], "task_id": task["id"], "detail": dispatch_result}
932
+ else:
933
+ # Original path: direct dispatch, no hardening
934
+ dispatch_result = dispatch_task(**dispatch_kwargs)
935
+
238
936
  # 5. Update State & Ledger
239
937
  duration = time.time() - start_time
240
938
  cost = dispatch_result.get("estimated_cost", 0.05) # Default placeholder if missing
241
-
939
+
242
940
  session["iterations"] += 1
243
941
  session["cost_incurred"] += cost
244
-
942
+
245
943
  from ai.ledger_manager import update_item
246
944
  if dispatch_result.get("status") == "completed":
247
945
  update_item(
@@ -256,6 +954,16 @@ def run_governed_iteration(session_id: str) -> Dict[str, Any]:
256
954
  "duration": duration,
257
955
  "cost": cost
258
956
  })
957
+ # 5b. Signal deploy loop that code is ready
958
+ try:
959
+ _notify_deploy_loop(
960
+ task=task,
961
+ venture=v_name,
962
+ project_path=ctx["path"],
963
+ session_id=session_id,
964
+ )
965
+ except Exception as e:
966
+ logger.warning("Failed to notify deploy loop for %s: %s", task.get("id"), e)
259
967
  else:
260
968
  session["errors"] += 1
261
969
  if session["errors"] >= session["error_threshold"]:
@@ -265,15 +973,71 @@ def run_governed_iteration(session_id: str) -> Dict[str, Any]:
265
973
  "status": "failed",
266
974
  "error": dispatch_result.get("error", "Dispatch failed")
267
975
  })
268
-
976
+
269
977
  _save_session(session)
270
978
  return {"status": "continued", "task_id": task["id"], "result": dispatch_result}
271
-
979
+
272
980
  except Exception as e:
273
981
  session["errors"] += 1
274
982
  _save_session(session)
275
983
  return {"error": str(e)}
276
984
 
985
+ def loop_status(session_id: str = "") -> Dict[str, Any]:
986
+ """Check autonomous loop metrics for a session."""
987
+ _ensure_session_dir()
988
+ if session_id:
989
+ path = SESSION_DIR / f"{session_id}.json"
990
+ if not path.exists():
991
+ return {"error": f"Session {session_id} not found"}
992
+ session = json.loads(path.read_text())
993
+ else:
994
+ # Find most recent session
995
+ sessions = sorted(SESSION_DIR.glob("*.json"), key=lambda p: p.stat().st_mtime, reverse=True)
996
+ if not sessions:
997
+ return {"error": "No loop sessions found"}
998
+ session = json.loads(sessions[0].read_text())
999
+
1000
+ heartbeat = _read_heartbeat(session["session_id"]) # LED-788: live stage + elapsed
1001
+ return {
1002
+ "session_id": session["session_id"],
1003
+ "status": session.get("status", "unknown"),
1004
+ "iterations": session.get("iterations", 0),
1005
+ "max_iterations": session.get("max_iterations", MAX_ITERATIONS_DEFAULT),
1006
+ "cost_incurred": session.get("cost_incurred", 0.0),
1007
+ "cost_cap": session.get("cost_cap", MAX_COST_DEFAULT),
1008
+ "errors": session.get("errors", 0),
1009
+ "error_threshold": session.get("error_threshold", MAX_ERRORS_DEFAULT),
1010
+ "tasks_completed": session.get("tasks_completed", []),
1011
+ "started_at": session.get("started_at", ""),
1012
+ "heartbeat": heartbeat,
1013
+ }
1014
+
1015
+
1016
+ def task_complete(task_id: str, status: str = "done", note: str = "", session_id: str = "") -> Dict[str, Any]:
1017
+ """Mark a task as complete within a loop session."""
1018
+ from ai.ledger_manager import update_item
1019
+
1020
+ result = update_item(
1021
+ item_id=task_id,
1022
+ status=status,
1023
+ note=note or f"Completed via governed build loop",
1024
+ project_path=str(ROOT_LEDGER_PATH),
1025
+ )
1026
+
1027
+ # Update session if provided
1028
+ if session_id:
1029
+ path = SESSION_DIR / f"{session_id}.json"
1030
+ if path.exists():
1031
+ session = json.loads(path.read_text())
1032
+ session["tasks_completed"].append({
1033
+ "id": task_id,
1034
+ "status": status,
1035
+ "note": note,
1036
+ })
1037
+ _save_session(session)
1038
+
1039
+ return {"task_id": task_id, "status": status, "ledger_update": result}
1040
+
1041
+
277
1042
  if __name__ == "__main__":
278
- # Test pass if run directly
279
1043
  pass