delimit-cli 4.1.44 → 4.1.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -596,11 +596,156 @@ def fetch_thread(thread_id: str, *, proxy_url: str = PROXY_URL) -> Optional[Dict
596
596
  return None
597
597
 
598
598
 
599
- def monitor_user_engagement(username: str = "delimitdev") -> list:
599
+ def monitor_user_engagement(username: str = "delimitdev", proxy_url: str = PROXY_URL) -> list:
600
600
  """Monitor engagement on posts by a Reddit user (LED-300).
601
601
 
602
- Checks recent posts/comments by the user for new replies, upvotes,
603
- and engagement signals. Returns a list of alert dicts.
602
+ Fetches the user's recent comments via the residential proxy and checks
603
+ for new replies. Returns a list of alert dicts for actionable engagement.
604
604
  """
605
- # Stub full implementation requires residential proxy + Playwright (LED-248)
606
- return []
605
+ alerts: list = []
606
+ seen_file = Path.home() / ".delimit" / "reddit_engagement_seen.json"
607
+ seen: dict = {}
608
+ if seen_file.exists():
609
+ try:
610
+ seen = json.loads(seen_file.read_text())
611
+ except Exception:
612
+ seen = {}
613
+
614
+ # Fetch user's recent comments
615
+ reddit_url = f"https://www.reddit.com/user/{username}/comments.json?limit=25&raw_json=1"
616
+ fetch_url = f"{proxy_url}?url={urllib.request.quote(reddit_url, safe='')}"
617
+ req = urllib.request.Request(
618
+ fetch_url,
619
+ headers={"User-Agent": "delimit-scanner/1.0", "Accept": "application/json"},
620
+ )
621
+ try:
622
+ with urllib.request.urlopen(req, timeout=15) as resp:
623
+ body = json.loads(resp.read().decode())
624
+ except Exception as exc:
625
+ logger.warning("Failed to fetch u/%s comments: %s", username, exc)
626
+ return []
627
+
628
+ children = []
629
+ if isinstance(body, dict):
630
+ children = body.get("data", {}).get("children", [])
631
+
632
+ for child in children:
633
+ d = child.get("data", {})
634
+ if not d:
635
+ continue
636
+ comment_id = d.get("id", "")
637
+ num_replies = d.get("num_replies", 0) if "num_replies" in d else 0
638
+ score = d.get("score", 1)
639
+ permalink = d.get("permalink", "")
640
+ link_title = d.get("link_title", "")
641
+ subreddit = d.get("subreddit", "")
642
+ comment_body = (d.get("body") or "")[:150]
643
+
644
+ prev = seen.get(comment_id, {})
645
+ prev_score = prev.get("score", 1)
646
+ prev_replies = prev.get("replies", 0)
647
+
648
+ # Detect new replies (check via the thread)
649
+ # For now, detect score changes as engagement signal
650
+ score_delta = score - prev_score
651
+ is_fresh = comment_id not in seen
652
+ is_actionable = False
653
+
654
+ if score_delta >= 5 and not is_fresh:
655
+ alerts.append({
656
+ "type": "score_change",
657
+ "comment_id": comment_id,
658
+ "subreddit": subreddit,
659
+ "title": link_title,
660
+ "message": f"Your comment in r/{subreddit} gained +{score_delta} upvotes (now {score})",
661
+ "url": f"https://reddit.com{permalink}" if permalink else "",
662
+ "score": score,
663
+ "delta": score_delta,
664
+ "actionable": False,
665
+ "fresh": False,
666
+ })
667
+
668
+ # Update seen state
669
+ seen[comment_id] = {
670
+ "score": score,
671
+ "replies": num_replies,
672
+ "subreddit": subreddit,
673
+ "checked_at": datetime.now(timezone.utc).isoformat(),
674
+ }
675
+
676
+ # Now check for replies to our comments by fetching each thread
677
+ # (expensive — only do top 5 most recent)
678
+ for child in children[:5]:
679
+ d = child.get("data", {})
680
+ comment_id = d.get("id", "")
681
+ link_id = d.get("link_id", "").replace("t3_", "")
682
+ permalink = d.get("permalink", "")
683
+ if not link_id or not permalink:
684
+ continue
685
+
686
+ # Fetch the comment context to see replies
687
+ context_url = f"https://www.reddit.com{permalink}.json?raw_json=1"
688
+ ctx_fetch = f"{proxy_url}?url={urllib.request.quote(context_url, safe='')}"
689
+ ctx_req = urllib.request.Request(
690
+ ctx_fetch,
691
+ headers={"User-Agent": "delimit-scanner/1.0", "Accept": "application/json"},
692
+ )
693
+ try:
694
+ with urllib.request.urlopen(ctx_req, timeout=15) as resp:
695
+ thread_data = json.loads(resp.read().decode())
696
+ except Exception:
697
+ continue
698
+
699
+ # thread_data is [post_listing, comment_listing]
700
+ if not isinstance(thread_data, list) or len(thread_data) < 2:
701
+ continue
702
+
703
+ # Find replies to our comment
704
+ comment_children = thread_data[1].get("data", {}).get("children", [])
705
+ for cc in comment_children:
706
+ cd = cc.get("data", {})
707
+ if cd.get("id") != comment_id:
708
+ continue
709
+ replies_listing = cd.get("replies")
710
+ if not isinstance(replies_listing, dict):
711
+ continue
712
+ reply_children = replies_listing.get("data", {}).get("children", [])
713
+ for rc in reply_children:
714
+ rd = rc.get("data", {})
715
+ reply_id = rd.get("id", "")
716
+ reply_author = rd.get("author", "")
717
+ reply_body = (rd.get("body") or "")[:200]
718
+ if not reply_id or reply_author == username:
719
+ continue
720
+ seen_key = f"reply_{reply_id}"
721
+ if seen_key in seen:
722
+ continue
723
+ alerts.append({
724
+ "type": "reply",
725
+ "comment_id": comment_id,
726
+ "reply_id": reply_id,
727
+ "reply_author": reply_author,
728
+ "reply_body": reply_body,
729
+ "subreddit": d.get("subreddit", ""),
730
+ "title": d.get("link_title", ""),
731
+ "message": f"u/{reply_author} replied to your comment in r/{d.get('subreddit','')}: {reply_body[:100]}",
732
+ "url": f"https://reddit.com{permalink}" if permalink else "",
733
+ "actionable": True,
734
+ "fresh": True,
735
+ })
736
+ seen[seen_key] = {
737
+ "author": reply_author,
738
+ "checked_at": datetime.now(timezone.utc).isoformat(),
739
+ }
740
+
741
+ # Rate limit between thread fetches
742
+ time.sleep(2)
743
+
744
+ # Persist seen state
745
+ try:
746
+ seen_file.parent.mkdir(parents=True, exist_ok=True)
747
+ seen_file.write_text(json.dumps(seen, indent=2))
748
+ except Exception as exc:
749
+ logger.warning("Failed to save engagement seen state: %s", exc)
750
+
751
+ return alerts
@@ -1677,12 +1677,19 @@ def delimit_zero_spec(
1677
1677
  def delimit_init(
1678
1678
  project_path: str = ".",
1679
1679
  preset: str = "default",
1680
+ no_permissions: bool = False,
1680
1681
  ) -> Dict[str, Any]:
1681
1682
  """Initialize Delimit governance for a project. Creates .delimit/policies.yml and ledger directory.
1682
1683
 
1684
+ Also auto-configures filesystem permissions: chmod 755 on .delimit/,
1685
+ chmod 600 on any .delimit/secrets/* files, and creates a project-local
1686
+ .claude/settings.json with a reasonable Edit/Write/Bash allowlist if one
1687
+ does not already exist. Pass no_permissions=True to skip permission setup.
1688
+
1683
1689
  Args:
1684
1690
  project_path: Project root directory.
1685
1691
  preset: Policy preset - strict, default, or relaxed.
1692
+ no_permissions: Skip the filesystem permission auto-config (LED-269).
1686
1693
  """
1687
1694
  VALID_PRESETS = ("strict", "default", "relaxed")
1688
1695
  if preset not in VALID_PRESETS:
@@ -1708,12 +1715,18 @@ def delimit_init(
1708
1715
  and strategy_file.exists()
1709
1716
  ):
1710
1717
  environment = _detect_environment()
1718
+ # LED-269: Re-run permission setup on idempotent re-init so existing
1719
+ # installs (created before LED-269) can pick up correct perms by
1720
+ # simply re-running delimit_init.
1721
+ from ai.activate_helpers import setup_init_permissions
1722
+ permissions = setup_init_permissions(root, no_permissions=no_permissions)
1711
1723
  return _with_next_steps("init", {
1712
1724
  "tool": "init",
1713
1725
  "status": "already_initialized",
1714
1726
  "project_path": str(root),
1715
1727
  "preset": preset,
1716
1728
  "environment": environment,
1729
+ "permissions": permissions,
1717
1730
  "message": f"Project already initialized at {delimit_dir}. No files overwritten.",
1718
1731
  })
1719
1732
 
@@ -1761,6 +1774,11 @@ def delimit_init(
1761
1774
  # Auto-detect available API keys and CLIs
1762
1775
  environment = _detect_environment()
1763
1776
 
1777
+ # LED-269: Filesystem permission auto-config (chmod, .claude/settings.json,
1778
+ # ownership). Pass no_permissions=True to skip.
1779
+ from ai.activate_helpers import setup_init_permissions
1780
+ permissions = setup_init_permissions(root, no_permissions=no_permissions)
1781
+
1764
1782
  return _with_next_steps("init", {
1765
1783
  "tool": "init",
1766
1784
  "status": "initialized",
@@ -1768,6 +1786,7 @@ def delimit_init(
1768
1786
  "preset": preset,
1769
1787
  "created": created,
1770
1788
  "environment": environment,
1789
+ "permissions": permissions,
1771
1790
  "message": f"Governance initialized with '{preset}' preset. {len(created)} items created.",
1772
1791
  })
1773
1792
 
@@ -2983,18 +3002,22 @@ def delimit_security_audit(target: str = ".") -> Dict[str, Any]:
2983
3002
  # ─── Evidence ───────────────────────────────────────────────────────────
2984
3003
 
2985
3004
  @mcp.tool()
2986
- def delimit_evidence_collect(target: str = ".") -> Dict[str, Any]:
3005
+ def delimit_evidence_collect(target: str = ".", evidence_type: str = "") -> Dict[str, Any]:
2987
3006
  """Collect evidence artifacts for governance (Pro).
2988
3007
 
2989
3008
  Args:
2990
3009
  target: Repository or task path.
3010
+ evidence_type: Type of evidence (e.g. "deploy", "security", "test", "audit"). Stored in bundle metadata.
2991
3011
  """
2992
3012
  from ai.license import require_premium
2993
3013
  gate = require_premium("evidence_collect")
2994
3014
  if gate:
2995
3015
  return gate
2996
3016
  from backends.repo_bridge import evidence_collect
2997
- return _with_next_steps("evidence_collect", _safe_call(evidence_collect, target=target))
3017
+ options = {}
3018
+ if evidence_type:
3019
+ options["evidence_type"] = evidence_type
3020
+ return _with_next_steps("evidence_collect", _safe_call(evidence_collect, target=target, options=options or None))
2998
3021
 
2999
3022
 
3000
3023
  @mcp.tool()
@@ -4332,7 +4355,7 @@ def delimit_help(tool_name: str = "") -> Dict[str, Any]:
4332
4355
 
4333
4356
 
4334
4357
  @mcp.tool()
4335
- def delimit_diagnose(project_path: str = ".") -> Dict[str, Any]:
4358
+ def delimit_diagnose(project_path: str = ".", dry_run: bool = False, undo: bool = False) -> Dict[str, Any]:
4336
4359
  """Comprehensive health check of your Delimit installation (delimit doctor).
4337
4360
 
4338
4361
  Universal debugging tool. Runs 10 checks covering MCP connectivity,
@@ -4342,11 +4365,85 @@ def delimit_diagnose(project_path: str = ".") -> Dict[str, Any]:
4342
4365
 
4343
4366
  Args:
4344
4367
  project_path: Project to diagnose.
4368
+ dry_run: If True, return a preview of what doctor would create/modify without executing changes.
4369
+ undo: If True, revert changes from the last doctor --fix run using the saved manifest.
4345
4370
  """
4346
4371
  import sys
4372
+ import hashlib
4347
4373
  import urllib.request
4348
4374
  import urllib.error
4349
4375
 
4376
+ p_resolve = Path(project_path).resolve()
4377
+ manifest_path = p_resolve / ".delimit" / "doctor-manifest.json"
4378
+
4379
+ # ── Undo mode: revert changes from last doctor run ──────────────────
4380
+ if undo:
4381
+ if not manifest_path.is_file():
4382
+ return {"status": "no_manifest", "message": "No doctor-manifest.json found. Nothing to undo."}
4383
+ try:
4384
+ manifest = json.loads(manifest_path.read_text())
4385
+ except Exception as exc:
4386
+ return {"status": "error", "message": f"Failed to read manifest: {exc}"}
4387
+ reverted = []
4388
+ skipped = []
4389
+ for entry in manifest.get("actions", []):
4390
+ target = Path(entry["path"])
4391
+ action = entry["action"]
4392
+ if action == "created":
4393
+ if target.is_file():
4394
+ target.unlink()
4395
+ reverted.append({"path": str(target), "action": "deleted"})
4396
+ elif target.is_dir():
4397
+ import shutil
4398
+ shutil.rmtree(target, ignore_errors=True)
4399
+ reverted.append({"path": str(target), "action": "deleted_dir"})
4400
+ else:
4401
+ skipped.append({"path": str(target), "reason": "already_gone"})
4402
+ elif action == "modified":
4403
+ # We stored original_hash but not original content — cannot restore
4404
+ skipped.append({"path": str(target), "reason": "modified_files_cannot_be_restored"})
4405
+ else:
4406
+ skipped.append({"path": str(target), "reason": f"unknown_action_{action}"})
4407
+ # Remove the manifest itself
4408
+ manifest_path.unlink(missing_ok=True)
4409
+ return {
4410
+ "status": "undo_complete",
4411
+ "reverted": reverted,
4412
+ "skipped": skipped,
4413
+ "message": f"Reverted {len(reverted)} item(s), skipped {len(skipped)}.",
4414
+ }
4415
+
4416
+ # ── Dry-run mode: preview what doctor would create/modify ───────────
4417
+ if dry_run:
4418
+ planned = []
4419
+ delimit_dir = p_resolve / ".delimit"
4420
+ if not delimit_dir.is_dir():
4421
+ planned.append({"path": str(delimit_dir), "action": "create_dir", "description": ".delimit/ governance directory"})
4422
+ planned.append({"path": str(delimit_dir / "policies.yml"), "action": "create_file", "description": "Governance policy rules"})
4423
+ planned.append({"path": str(delimit_dir / "ledger"), "action": "create_dir", "description": "Operations ledger directory"})
4424
+ planned.append({"path": str(delimit_dir / "ledger" / "operations.jsonl"), "action": "create_file", "description": "Operations ledger"})
4425
+ planned.append({"path": str(delimit_dir / "evidence"), "action": "create_dir", "description": "Audit trail events directory"})
4426
+ else:
4427
+ if not (delimit_dir / "policies.yml").is_file():
4428
+ planned.append({"path": str(delimit_dir / "policies.yml"), "action": "create_file", "description": "Governance policy rules"})
4429
+ if not (delimit_dir / "ledger").is_dir():
4430
+ planned.append({"path": str(delimit_dir / "ledger"), "action": "create_dir", "description": "Operations ledger directory"})
4431
+ if not (delimit_dir / "evidence").is_dir():
4432
+ planned.append({"path": str(delimit_dir / "evidence"), "action": "create_dir", "description": "Audit trail events directory"})
4433
+ # Check for GitHub workflow creation
4434
+ github_dir = p_resolve / ".github" / "workflows"
4435
+ if github_dir.is_dir():
4436
+ wf = github_dir / "api-governance.yml"
4437
+ if not wf.is_file():
4438
+ planned.append({"path": str(wf), "action": "create_file", "description": "API governance GitHub Action workflow"})
4439
+ return {
4440
+ "status": "dry_run",
4441
+ "planned_changes": planned,
4442
+ "change_count": len(planned),
4443
+ "message": f"Doctor would create/modify {len(planned)} item(s). Run without --dry-run to apply."
4444
+ if planned else "No changes needed.",
4445
+ }
4446
+
4350
4447
  issues: List[Dict[str, str]] = []
4351
4448
  checks: Dict[str, Any] = {}
4352
4449
  checklist: List[Dict[str, str]] = []
@@ -4832,10 +4929,12 @@ def delimit_ledger_add(
4832
4929
  priority: str = "P1",
4833
4930
  description: str = "",
4834
4931
  source: str = "session",
4932
+ tags: Optional[Union[str, List[str]]] = None,
4835
4933
  acceptance_criteria: Optional[Union[str, List[str]]] = None,
4836
4934
  context: str = "",
4837
4935
  tools_needed: Optional[Union[str, List[str]]] = None,
4838
4936
  estimated_complexity: str = "",
4937
+ worked_by: str = "",
4839
4938
  ) -> Dict[str, Any]:
4840
4939
  """Add a new item to a project's ledger.
4841
4940
 
@@ -4850,11 +4949,17 @@ def delimit_ledger_add(
4850
4949
  priority: P0 (urgent), P1 (important), P2 (nice to have).
4851
4950
  description: Details.
4852
4951
  source: Where this came from (session, consensus, focus-group, etc).
4952
+ tags: Labels/tags (e.g. ["deploy-ready", "ship"] or "deploy-ready,ship").
4853
4953
  acceptance_criteria: List of testable "done when" conditions (e.g. "tests pass", "coverage > 80%").
4854
4954
  context: Background info an AI agent needs to work on this item.
4855
4955
  tools_needed: Delimit tools needed (e.g. "delimit_lint", "delimit_test_coverage").
4856
4956
  estimated_complexity: small, medium, or large.
4957
+ worked_by: Which AI model is working on this. Auto-detected if empty.
4857
4958
  """
4959
+ try:
4960
+ tags = _coerce_list_arg(tags, "tags")
4961
+ except ValueError:
4962
+ tags = None
4858
4963
  try:
4859
4964
  acceptance_criteria = _coerce_list_arg(acceptance_criteria, "acceptance_criteria")
4860
4965
  except ValueError:
@@ -4867,8 +4972,9 @@ def delimit_ledger_add(
4867
4972
  project = _resolve_venture(venture)
4868
4973
  result = add_item(title=title, ledger=ledger, type=item_type, priority=priority,
4869
4974
  description=description, source=source, project_path=project,
4870
- acceptance_criteria=acceptance_criteria, context=context,
4871
- tools_needed=tools_needed, estimated_complexity=estimated_complexity)
4975
+ tags=tags, acceptance_criteria=acceptance_criteria, context=context,
4976
+ tools_needed=tools_needed, estimated_complexity=estimated_complexity,
4977
+ worked_by=worked_by)
4872
4978
  return _with_next_steps("ledger_add", result)
4873
4979
 
4874
4980
 
@@ -4886,6 +4992,7 @@ def delimit_ledger_update(
4886
4992
  labels: Optional[Union[str, List[str]]] = None,
4887
4993
  blocked_by: str = "",
4888
4994
  blocks: str = "",
4995
+ worked_by: str = "",
4889
4996
  ) -> Dict[str, Any]:
4890
4997
  """Update any field on a ledger item.
4891
4998
 
@@ -4905,6 +5012,7 @@ def delimit_ledger_update(
4905
5012
  labels: Labels/tags (e.g. ["dashboard", "ux"] or "dashboard,ux").
4906
5013
  blocked_by: Item ID that blocks this item (e.g. "LED-025").
4907
5014
  blocks: Item ID that this item blocks (e.g. "STR-005").
5015
+ worked_by: Which AI model is working on this. Auto-detected if empty.
4908
5016
  """
4909
5017
  try:
4910
5018
  labels = _coerce_list_arg(labels, "labels") if labels else None
@@ -4917,7 +5025,7 @@ def delimit_ledger_update(
4917
5025
  title=title or None, description=description or None, note=note or None,
4918
5026
  assignee=assignee or None, due_date=due_date or None, labels=labels,
4919
5027
  blocked_by=blocked_by or None, blocks=blocks or None,
4920
- project_path=project,
5028
+ project_path=project, worked_by=worked_by,
4921
5029
  )
4922
5030
  return _with_next_steps("ledger_update", result)
4923
5031
 
@@ -6943,29 +7051,70 @@ def delimit_daemon_run(iterations: int = 1, dry_run: bool = True) -> Dict[str, A
6943
7051
  ))
6944
7052
 
6945
7053
  @mcp.tool()
6946
- def delimit_build_loop(action: str = "run", session_id: str = "") -> Dict[str, Any]:
6947
- """Execute the governed continuous build loop (LED-239).
7054
+ def delimit_build_loop(action: str = "run", session_id: str = "", loop_type: str = "build") -> Dict[str, Any]:
7055
+ """Execute a governed continuous loop (LED-239).
6948
7056
 
6949
- Requirements:
6950
- - root ledger in /root/.delimit is authoritative
6951
- - select only build-safe open items
6952
- - resolve venture + repo before dispatch
6953
- - use Delimit swarm/governance as control plane
6954
- - enforce max-iteration, max-error, and max-cost safeguards
7057
+ Supports three loop types matching the OS terminal model:
7058
+ - **build**: picks feat/fix/task items from ledger, dispatches via swarm
7059
+ - **social** (think): scans Reddit/X/HN, drafts replies, handles social/outreach/content/sensor ledger items
7060
+ - **deploy**: runs deploy gates, publishes, verifies
6955
7061
 
6956
7062
  Args:
6957
7063
  action: 'init' to start a session, 'run' to execute one iteration.
6958
7064
  session_id: Optional session ID to continue.
7065
+ loop_type: 'build', 'social', or 'deploy' (default: build).
6959
7066
  """
6960
- from ai.loop_engine import create_governed_session, run_governed_iteration
7067
+ from ai.loop_engine import create_governed_session, run_governed_iteration, run_social_iteration
6961
7068
 
6962
7069
  if action == "init":
6963
- return _with_next_steps("build_loop", create_governed_session())
7070
+ return _with_next_steps("build_loop", create_governed_session(loop_type=loop_type))
6964
7071
  else:
6965
7072
  if not session_id:
6966
- # Try to pick up existing or create new
6967
- session_id = create_governed_session()["session_id"]
6968
- return _with_next_steps("build_loop", run_governed_iteration(session_id))
7073
+ session_id = create_governed_session(loop_type=loop_type)["session_id"]
7074
+ if loop_type == "social" or session_id.startswith("social-"):
7075
+ return _with_next_steps("build_loop", run_social_iteration(session_id))
7076
+ else:
7077
+ return _with_next_steps("build_loop", run_governed_iteration(session_id))
7078
+
7079
+
7080
+ @mcp.tool()
7081
+ def delimit_build_loop_daemon(
7082
+ action: str = "status",
7083
+ session_id: str = "",
7084
+ interval_seconds: int = 900,
7085
+ loop_type: str = "build",
7086
+ ) -> Dict[str, Any]:
7087
+ """Background auto-pull daemon for the governed build/social/deploy loops (Pro).
7088
+
7089
+ Spawns a daemon thread that calls run_governed_iteration (or run_social_iteration)
7090
+ every interval_seconds. Preserves the pull-based triage pattern — each tick logs
7091
+ the returned task_id to ~/.delimit/logs/loop_daemon_{session_id}.jsonl so the
7092
+ orchestrating Claude session can tail the log and handle triage.
7093
+
7094
+ Respects existing delimit_loop_config safeguards (cost_cap, error_threshold,
7095
+ max_iterations, status=paused/stopped) via loop_status check before each tick.
7096
+
7097
+ Args:
7098
+ action: 'start', 'stop', or 'status' (default: status)
7099
+ session_id: Session to run (required for all actions)
7100
+ interval_seconds: Tick interval in seconds (default 900 = 15 min). Only used on start.
7101
+ loop_type: 'build', 'social', or 'deploy' (default: build). Only used on start.
7102
+ """
7103
+ from ai.license import require_premium
7104
+ gate = require_premium("build_loop_daemon")
7105
+ if gate:
7106
+ return gate
7107
+ from ai import loop_daemon
7108
+ if not session_id:
7109
+ return {"error": "session_id is required"}
7110
+ if action == "start":
7111
+ return _with_next_steps("build_loop_daemon", loop_daemon.start(session_id, interval_seconds, loop_type))
7112
+ elif action == "stop":
7113
+ return _with_next_steps("build_loop_daemon", loop_daemon.stop(session_id))
7114
+ elif action == "status":
7115
+ return _with_next_steps("build_loop_daemon", loop_daemon.status(session_id))
7116
+ else:
7117
+ return {"error": f"unknown action: {action}. Expected start, stop, or status."}
6969
7118
 
6970
7119
 
6971
7120
  @mcp.tool()
@@ -8073,6 +8222,92 @@ def main():
8073
8222
  import asyncio
8074
8223
  asyncio.run(run_mcp_server(mcp))
8075
8224
 
8225
+ # ═══════════════════════════════════════════════════════════════════════
8226
+ # CONTENT INTELLIGENCE (LED-797) — tweet corpus → long-form content radar
8227
+ # ═══════════════════════════════════════════════════════════════════════
8228
+
8229
+
8230
+ @mcp.tool()
8231
+ def delimit_content_intel_daily(
8232
+ date: str = "",
8233
+ since_hours: int = 72,
8234
+ top_n: int = 5,
8235
+ email: bool = True,
8236
+ ) -> Dict[str, Any]:
8237
+ """Run the daily content intelligence digest (LED-797).
8238
+
8239
+ Clusters the tweet corpus over a trailing window, intersects with
8240
+ Delimit's ground truth feature list, and drafts per-channel content
8241
+ seeds (Reddit targets, blog topics, Dev.to tutorials, HN submissions).
8242
+
8243
+ Every draft cites at least 3 corpus rows verbatim with engagement counts
8244
+ and grounds all product claims in shipped features. NO AUTO-POSTING —
8245
+ drafts are written to ~/.delimit/content/ and (if email=True) emailed
8246
+ to the founder for manual approval.
8247
+
8248
+ Args:
8249
+ date: ISO date string (YYYY-MM-DD). Default = today UTC.
8250
+ since_hours: Trailing window for clustering. Default 72h.
8251
+ top_n: Max topics to draft per channel. Default 5.
8252
+ email: Send the digest email via delimit_notify. Default True.
8253
+ """
8254
+ from ai.content_intel import ContentIntelligence
8255
+ try:
8256
+ ci = ContentIntelligence()
8257
+ return ci.generate_daily_digest(
8258
+ date=date or None,
8259
+ since_hours=since_hours,
8260
+ top_n=top_n,
8261
+ email=email,
8262
+ )
8263
+ except Exception as e:
8264
+ logger.error("delimit_content_intel_daily failed: %s", e)
8265
+ return {"error": str(e)}
8266
+
8267
+
8268
+ @mcp.tool()
8269
+ def delimit_content_intel_topic(keyword: str, since_hours: int = 168) -> Dict[str, Any]:
8270
+ """On-demand content intelligence probe for a single keyword (LED-797).
8271
+
8272
+ Runs the same cluster → intersect → rank pipeline as the daily digest
8273
+ but filtered to one keyword and over a longer 7-day window by default.
8274
+ Returns ranked topics with cited sample tweets — does NOT write files
8275
+ or send email.
8276
+
8277
+ Args:
8278
+ keyword: Topic keyword to probe (e.g. "openapi", "claude code").
8279
+ since_hours: Trailing window in hours. Default 168 (7 days).
8280
+ """
8281
+ from ai.content_intel import ContentIntelligence
8282
+ try:
8283
+ ci = ContentIntelligence()
8284
+ return ci.topic_probe(keyword=keyword, since_hours=since_hours)
8285
+ except Exception as e:
8286
+ logger.error("delimit_content_intel_topic failed: %s", e)
8287
+ return {"error": str(e)}
8288
+
8289
+
8290
+ @mcp.tool()
8291
+ def delimit_content_intel_weekly(date: str = "") -> Dict[str, Any]:
8292
+ """Run the weekly content intelligence summary (LED-797).
8293
+
8294
+ 7-day rollup: top topics that intersect Delimit ground truth, plus a
8295
+ covered/missed split showing which topics already made it into a daily
8296
+ digest and which slipped through. Designed to run every Monday 09:00 UTC
8297
+ via cron (`delimit_content_intel_weekly`).
8298
+
8299
+ Args:
8300
+ date: ISO date (YYYY-MM-DD). Default = today UTC.
8301
+ """
8302
+ from ai.content_intel import ContentIntelligence
8303
+ try:
8304
+ ci = ContentIntelligence()
8305
+ return ci.generate_weekly_summary(date=date or None)
8306
+ except Exception as e:
8307
+ logger.error("delimit_content_intel_weekly failed: %s", e)
8308
+ return {"error": str(e)}
8309
+
8310
+
8076
8311
  @mcp.tool()
8077
8312
  def delimit_reddit_fetch_thread(thread_id: str) -> Dict[str, Any]:
8078
8313
  """Surgically fetch a single Reddit thread by ID (e.g. 'OSKJVH7f35')."""