open-research-protocol 0.4.32 → 0.4.34

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/cli/orp.py CHANGED
@@ -138,6 +138,40 @@ FRONTIER_BANDS = ("exact", "structured", "horizon")
138
138
  FRONTIER_ACTIVE_STATUSES = {"active", "in_progress", "running"}
139
139
  FRONTIER_PENDING_STATUSES = {"", "pending", "planned", "ready"}
140
140
  FRONTIER_TERMINAL_STATUSES = {"complete", "completed", "done", "skipped", "terminal"}
141
+ FRONTIER_MODELED_CHECKOFF_PHASE_ID = "modeled-checkoff-v0"
142
+ FRONTIER_MODELED_CHECKOFF_LIST_ID = "modeled-checkoff"
143
+ FRONTIER_MODELED_CHECKOFF_ITEM_ID = "modeled-professional-lens-taxonomy-v0"
144
+ FRONTIER_MODELED_CHECKOFF_HINTS = (
145
+ "approval",
146
+ "check-off",
147
+ "checkoff",
148
+ "clinical",
149
+ "domain expert",
150
+ "expert",
151
+ "gate",
152
+ "governance",
153
+ "human",
154
+ "legal",
155
+ "packet check",
156
+ "packet-check",
157
+ "privacy",
158
+ "publication",
159
+ "regulatory",
160
+ "review",
161
+ "safety",
162
+ "sign-off",
163
+ "signoff",
164
+ "validator",
165
+ )
166
+ FRONTIER_MODELED_CHECKOFF_ACTIVE_HINTS = (
167
+ "continuous-phase-delegation",
168
+ "frontier-continuation-governor",
169
+ "modeled-checkoff",
170
+ "modeled-expert",
171
+ "modeled-professional-lens",
172
+ "packet-checker",
173
+ "phase-delegation-governor",
174
+ )
141
175
  YOUTUBE_SOURCE_SCHEMA_VERSION = "1.0.0"
142
176
  EXCHANGE_REPORT_SCHEMA_VERSION = "1.0.0"
143
177
  RESEARCH_RUN_SCHEMA_VERSION = "1.0.0"
@@ -170,6 +204,9 @@ AGENDA_DEFAULT_REFRESH_TIMES = {
170
204
  }
171
205
  AGENT_GUIDE_BEGIN = "<!-- ORP:AGENT_GUIDE:BEGIN -->"
172
206
  AGENT_GUIDE_END = "<!-- ORP:AGENT_GUIDE:END -->"
207
+ CODEX_GLOBAL_GUIDE_BEGIN = "<!-- ORP:CODEX_GLOBAL:BEGIN -->"
208
+ CODEX_GLOBAL_GUIDE_END = "<!-- ORP:CODEX_GLOBAL:END -->"
209
+ CODEX_SESSION_START_HOOK = "orp_codex_session_start.py"
173
210
  ORP_SNIPPET_BEGIN = "<!-- ORP:BEGIN -->"
174
211
  ORP_SNIPPET_END = "<!-- ORP:END -->"
175
212
  AGENT_GUIDE_FILENAMES = ("AGENTS.md", "CLAUDE.md")
@@ -4904,9 +4941,31 @@ def _build_remote_feature_body(
4904
4941
  body["superStarred"] = True
4905
4942
  if getattr(args, "visibility", None) is not None:
4906
4943
  body["visibility"] = str(getattr(args, "visibility", "")).strip()
4944
+ completed = bool(getattr(args, "completed", False))
4945
+ incomplete = bool(getattr(args, "incomplete", False))
4946
+ if completed and incomplete:
4947
+ raise RuntimeError("Use only one of --completed or --incomplete.")
4948
+ if completed or incomplete:
4949
+ body["completed"] = completed
4907
4950
  return body
4908
4951
 
4909
4952
 
4953
+ def _feature_body_has_metadata_update(body: dict[str, Any]) -> bool:
4954
+ return any(
4955
+ key in body
4956
+ for key in (
4957
+ "title",
4958
+ "notes",
4959
+ "detail",
4960
+ "detailLabel",
4961
+ "details",
4962
+ "starred",
4963
+ "superStarred",
4964
+ "visibility",
4965
+ )
4966
+ )
4967
+
4968
+
4910
4969
  def _resolve_codex_bin(args: argparse.Namespace) -> str:
4911
4970
  explicit = str(getattr(args, "codex_bin", "")).strip()
4912
4971
  if explicit:
@@ -6269,6 +6328,78 @@ def _frontier_terminal_declared(state: dict[str, Any] | None, stack: dict[str, A
6269
6328
  return completion_status in {"complete", "completed", "done", "terminal"}
6270
6329
 
6271
6330
 
6331
+ def _frontier_normalized_signal_text(values: Sequence[Any]) -> str:
6332
+ text = " ".join(str(value or "") for value in values).lower()
6333
+ return re.sub(r"[^a-z0-9]+", "-", text).strip("-")
6334
+
6335
+
6336
+ def _frontier_modeled_checkoff_needed(blockers: list[str], next_action: str) -> bool:
6337
+ raw_values = [*blockers, next_action]
6338
+ normalized = _frontier_normalized_signal_text(raw_values)
6339
+ tokens = {token for token in normalized.split("-") if token}
6340
+ for hint in FRONTIER_MODELED_CHECKOFF_HINTS:
6341
+ normalized_hint = _frontier_normalized_signal_text([hint])
6342
+ if not normalized_hint:
6343
+ continue
6344
+ if "-" in normalized_hint and normalized_hint in normalized:
6345
+ return True
6346
+ if "-" not in normalized_hint and normalized_hint in tokens:
6347
+ return True
6348
+ return False
6349
+
6350
+
6351
+ def _frontier_modeled_checkoff_active(
6352
+ *,
6353
+ active_primary_id: str,
6354
+ active_list_id: str,
6355
+ active_item_id: str,
6356
+ next_action: str,
6357
+ ) -> bool:
6358
+ normalized = _frontier_normalized_signal_text([active_primary_id, active_list_id, active_item_id, next_action])
6359
+ return any(hint in normalized for hint in FRONTIER_MODELED_CHECKOFF_ACTIVE_HINTS)
6360
+
6361
+
6362
+ def _frontier_modeled_checkoff_payload(
6363
+ *,
6364
+ blockers: list[str],
6365
+ next_action: str,
6366
+ active_primary_kind: str,
6367
+ active_primary_id: str,
6368
+ active_list_id: str,
6369
+ active_item_id: str,
6370
+ ) -> dict[str, Any]:
6371
+ eligible = _frontier_modeled_checkoff_needed(blockers, next_action)
6372
+ active = _frontier_modeled_checkoff_active(
6373
+ active_primary_id=active_primary_id,
6374
+ active_list_id=active_list_id,
6375
+ active_item_id=active_item_id,
6376
+ next_action=next_action,
6377
+ )
6378
+ return {
6379
+ "eligible": eligible,
6380
+ "active": active,
6381
+ "phase_id": FRONTIER_MODELED_CHECKOFF_PHASE_ID,
6382
+ "list_id": FRONTIER_MODELED_CHECKOFF_LIST_ID,
6383
+ "item_id": FRONTIER_MODELED_CHECKOFF_ITEM_ID,
6384
+ "label": "Modeled Checkoff v0",
6385
+ "active_primary_kind": active_primary_kind,
6386
+ "active_primary_id": active_primary_id,
6387
+ "active_list_id": active_list_id,
6388
+ "active_item_id": active_item_id,
6389
+ "detected_blockers": blockers,
6390
+ "boundary": (
6391
+ "Modeled/proxy pre-review only; this does not clear human, clinical, legal, "
6392
+ "regulatory, privacy, publication, or real-world expert gates."
6393
+ ),
6394
+ "professional_lens_requirements": [
6395
+ "Name the modeled professional role and decision language.",
6396
+ "State evidence expectations, uncertainty, stop conditions, and provenance requirements.",
6397
+ "Separate packet-improvement decisions from external gate clearance.",
6398
+ ],
6399
+ "suggested_next_command": "orp frontier modeled-checkoff activate --json" if eligible and not active else "",
6400
+ }
6401
+
6402
+
6272
6403
  def _frontier_build_continuation_payload(
6273
6404
  repo_root: Path,
6274
6405
  stack: dict[str, Any] | None,
@@ -6404,6 +6535,33 @@ def _frontier_build_continuation_payload(
6404
6535
  if not next_action:
6405
6536
  next_action = _frontier_additional_item_summary(active_list, active_item)
6406
6537
 
6538
+ modeled_checkoff = _frontier_modeled_checkoff_payload(
6539
+ blockers=blockers,
6540
+ next_action=next_action,
6541
+ active_primary_kind=active_primary_kind,
6542
+ active_primary_id=active_primary_id,
6543
+ active_list_id=active_list_id,
6544
+ active_item_id=active_item_id,
6545
+ )
6546
+ has_active_additional_work = int(additional_summary["active_items"]) > 0
6547
+ if modeled_checkoff["eligible"] and not modeled_checkoff["active"] and not has_active_additional_work:
6548
+ issues.append(
6549
+ {
6550
+ "severity": "warning",
6551
+ "code": "modeled_checkoff_continuation_available",
6552
+ "message": (
6553
+ "frontier appears to be at a review/checkoff gate; activate a modeled-checkoff "
6554
+ "continuation so delegation can improve the packet without clearing the external gate."
6555
+ ),
6556
+ }
6557
+ )
6558
+ suggested_next_command = modeled_checkoff["suggested_next_command"]
6559
+ if not next_action:
6560
+ next_action = (
6561
+ f"Activate modeled checkoff continuation {modeled_checkoff['list_id']}/"
6562
+ f"{modeled_checkoff['item_id']}."
6563
+ )
6564
+
6407
6565
  if not active_item_id and int(additional_summary["pending_items"]) > 0:
6408
6566
  issues.append(
6409
6567
  {
@@ -6412,7 +6570,8 @@ def _frontier_build_continuation_payload(
6412
6570
  "message": "frontier additional queue has pending work but no active item; run `orp frontier additional activate-next` before delegating queue work.",
6413
6571
  }
6414
6572
  )
6415
- suggested_next_command = "orp frontier additional activate-next --json"
6573
+ if not suggested_next_command:
6574
+ suggested_next_command = "orp frontier additional activate-next --json"
6416
6575
  next_pending = additional_summary.get("next_pending")
6417
6576
  if isinstance(next_pending, dict) and not next_action:
6418
6577
  next_action = (
@@ -6452,6 +6611,7 @@ def _frontier_build_continuation_payload(
6452
6611
  "strict": strict,
6453
6612
  "issues": issues,
6454
6613
  "summary": summary,
6614
+ "modeled_checkoff": modeled_checkoff,
6455
6615
  "next_action": next_action,
6456
6616
  "suggested_next_command": suggested_next_command,
6457
6617
  "paths": {key: _path_for_state(value, repo_root) for key, value in paths.items()},
@@ -6838,6 +6998,7 @@ def _frontier_doctor_payload(repo_root: Path) -> dict[str, Any]:
6838
6998
  "continuation": {
6839
6999
  "ok": continuation["ok"],
6840
7000
  "summary": continuation["summary"],
7001
+ "modeled_checkoff": continuation.get("modeled_checkoff", {}),
6841
7002
  "next_action": continuation["next_action"],
6842
7003
  "suggested_next_command": continuation["suggested_next_command"],
6843
7004
  },
@@ -7614,6 +7775,7 @@ def _normalize_link_project_payload(
7614
7775
  "world_name": ["world_name", "worldName", "name"],
7615
7776
  "github_url": ["github_url", "githubUrl"],
7616
7777
  "linked_email": ["linked_email", "linkedEmail"],
7778
+ "active_feature_id": ["active_feature_id", "activeFeatureId", "linked_feature_id", "linkedFeatureId"],
7617
7779
  "notes": ["notes"],
7618
7780
  }.items():
7619
7781
  value = ""
@@ -7623,6 +7785,15 @@ def _normalize_link_project_payload(
7623
7785
  break
7624
7786
  if value:
7625
7787
  payload[key] = value
7788
+ frontier_feature_ids = raw.get("frontier_feature_ids", raw.get("frontierFeatureIds"))
7789
+ if isinstance(frontier_feature_ids, dict):
7790
+ normalized_feature_ids = {
7791
+ str(key).strip(): str(value).strip()
7792
+ for key, value in frontier_feature_ids.items()
7793
+ if str(key).strip() and str(value).strip()
7794
+ }
7795
+ if normalized_feature_ids:
7796
+ payload["frontier_feature_ids"] = normalized_feature_ids
7626
7797
  payload["source"] = source
7627
7798
  return payload
7628
7799
 
@@ -11267,6 +11438,494 @@ def _audit_agents_root(
11267
11438
  }
11268
11439
 
11269
11440
 
11441
+ def _resolve_codex_home_path(raw: str = "") -> Path:
11442
+ explicit = str(raw or "").strip()
11443
+ if explicit:
11444
+ return Path(explicit).expanduser().resolve()
11445
+ env_home = str(os.environ.get("CODEX_HOME", "")).strip()
11446
+ if env_home:
11447
+ return Path(env_home).expanduser().resolve()
11448
+ return (Path.home() / ".codex").resolve()
11449
+
11450
+
11451
+ def _codex_session_hook_path(codex_home: Path) -> Path:
11452
+ return codex_home / "hooks" / CODEX_SESSION_START_HOOK
11453
+
11454
+
11455
+ def _codex_global_guide_block() -> str:
11456
+ return (
11457
+ f"{CODEX_GLOBAL_GUIDE_BEGIN}\n"
11458
+ "## ORP Codex Global Layer\n\n"
11459
+ "- Scope: This block is managed by ORP for Codex's global `~/.codex/AGENTS.md` layer.\n"
11460
+ "- Instruction hierarchy: Codex global guidance comes first, then repo/project `AGENTS.md` files, then narrower directory guidance.\n"
11461
+ "- Keep universal preferences here; keep repo build/test commands and project-specific facts in the nearest project `AGENTS.md`.\n"
11462
+ "- Treat ORP as process-only governance. Evidence and project truth still live in canonical artifacts such as code, data, logs, papers, proofs, and repo docs.\n"
11463
+ "- Prefer non-mutating startup checks. Ask before creating or rewriting project `AGENTS.md`, ORP files, Clawdad registration, remotes, or service config.\n"
11464
+ "- Use `orp hygiene --json` before long delegation, after material writeback, before remote side effects or unbudgeted paid compute, and when dirty state grows unexpectedly.\n"
11465
+ "- Local stack surfaces to detect and mention when relevant: `orp`, `clawdad`, `dumpy-files`, `cmail`, and repo-local `AGENTS.md` / `PROTOCOL.md` / `.clawdad` files.\n"
11466
+ "- Startup automation belongs in Codex hooks. Keep this file as policy and preference, not a place for live shell checks.\n\n"
11467
+ "### ORP-Owned Startup Rhythm\n\n"
11468
+ "1. Let the Codex `SessionStart` hook inspect the current directory and inject a short status note.\n"
11469
+ "2. Read the nearest project instructions before editing.\n"
11470
+ "3. If ORP or Clawdad are missing from a substantive project, suggest `orp init --projects-root <root> --with-clawdad` or `orp agents sync`; do not run it silently.\n"
11471
+ "4. Keep user-written preferences outside this managed block so ORP can refresh the block without overwriting them.\n"
11472
+ f"{CODEX_GLOBAL_GUIDE_END}\n"
11473
+ )
11474
+
11475
+
11476
+ def _default_codex_global_agents_text() -> str:
11477
+ return (
11478
+ "# Codex Global Instructions\n\n"
11479
+ "This file is loaded by Codex before project-level `AGENTS.md` files. Keep durable user preferences here, and let ORP refresh only the marked block below.\n\n"
11480
+ f"{_codex_global_guide_block()}"
11481
+ )
11482
+
11483
+
11484
+ def _codex_session_start_hook_script() -> str:
11485
+ return r'''#!/usr/bin/env python3
11486
+ """ORP-managed Codex SessionStart hook.
11487
+
11488
+ This hook is intentionally non-mutating. It inspects the current working
11489
+ directory and injects a compact startup note into Codex context.
11490
+ """
11491
+
11492
+ from __future__ import annotations
11493
+
11494
+ import json
11495
+ import os
11496
+ from pathlib import Path
11497
+ import shutil
11498
+ import subprocess
11499
+ import sys
11500
+
11501
+
11502
+ def _read_hook_input() -> dict:
11503
+ try:
11504
+ payload = json.load(sys.stdin)
11505
+ except Exception:
11506
+ return {}
11507
+ return payload if isinstance(payload, dict) else {}
11508
+
11509
+
11510
+ def _run_git_root(cwd: Path) -> str:
11511
+ try:
11512
+ proc = subprocess.run(
11513
+ ["git", "-C", str(cwd), "rev-parse", "--show-toplevel"],
11514
+ capture_output=True,
11515
+ text=True,
11516
+ timeout=2,
11517
+ )
11518
+ except Exception:
11519
+ return ""
11520
+ if proc.returncode != 0:
11521
+ return ""
11522
+ return proc.stdout.strip()
11523
+
11524
+
11525
+ def _is_within(child: Path, parent: Path) -> bool:
11526
+ try:
11527
+ child.resolve().relative_to(parent.resolve())
11528
+ return True
11529
+ except Exception:
11530
+ return False
11531
+
11532
+
11533
+ def _tool_status(name: str) -> str:
11534
+ return shutil.which(name) or "missing"
11535
+
11536
+
11537
+ def _bool_text(value: bool) -> str:
11538
+ return "present" if value else "missing"
11539
+
11540
+
11541
+ def main() -> int:
11542
+ payload = _read_hook_input()
11543
+ cwd = Path(str(payload.get("cwd") or os.getcwd())).expanduser()
11544
+ source = str(payload.get("source") or "").strip() or "startup"
11545
+ git_root_text = _run_git_root(cwd)
11546
+ root = Path(git_root_text).expanduser() if git_root_text else cwd
11547
+ codex_home = Path(os.environ.get("CODEX_HOME") or Path.home() / ".codex").expanduser()
11548
+
11549
+ global_agents = codex_home / "AGENTS.md"
11550
+ project_agents = root / "AGENTS.md"
11551
+ protocol = root / "PROTOCOL.md"
11552
+ orp_config = root / "orp.yml"
11553
+ clawdad_dir = root / ".clawdad"
11554
+ code_root = Path("/Volumes/Code_2TB/code")
11555
+ parent_agents = code_root / "AGENTS.md" if _is_within(root, code_root) and root != code_root else None
11556
+
11557
+ tools = {
11558
+ "orp": _tool_status("orp"),
11559
+ "clawdad": _tool_status("clawdad"),
11560
+ "dumpy-files": _tool_status("dumpy-files"),
11561
+ "cmail": _tool_status("cmail"),
11562
+ }
11563
+ missing_tools = [name for name, value in tools.items() if value == "missing"]
11564
+
11565
+ lines = [
11566
+ "ORP/Codex startup context:",
11567
+ f"- source: {source}",
11568
+ f"- cwd: {cwd}",
11569
+ f"- project root: {root}",
11570
+ f"- global Codex AGENTS.md: {_bool_text(global_agents.exists())}",
11571
+ f"- project AGENTS.md: {_bool_text(project_agents.exists())}",
11572
+ f"- ORP signals: PROTOCOL.md={_bool_text(protocol.exists())}, orp.yml={_bool_text(orp_config.exists())}",
11573
+ f"- Clawdad project state: {_bool_text(clawdad_dir.exists())}",
11574
+ ]
11575
+ if parent_agents is not None:
11576
+ lines.append(f"- Code_2TB umbrella AGENTS.md: {_bool_text(parent_agents.exists())} at {parent_agents}")
11577
+ if missing_tools:
11578
+ lines.append(f"- missing optional stack tools: {', '.join(missing_tools)}")
11579
+ if not project_agents.exists():
11580
+ lines.append("- note: ask before creating project AGENTS.md; suggested commands include `orp agents sync` or `orp init --projects-root <root>`.")
11581
+ if not protocol.exists() and not orp_config.exists():
11582
+ lines.append("- note: no ORP project signals detected; ask before initializing ORP for this directory.")
11583
+
11584
+ print(
11585
+ json.dumps(
11586
+ {
11587
+ "hookSpecificOutput": {
11588
+ "hookEventName": "SessionStart",
11589
+ "additionalContext": "\n".join(lines),
11590
+ }
11591
+ }
11592
+ )
11593
+ )
11594
+ return 0
11595
+
11596
+
11597
+ if __name__ == "__main__":
11598
+ raise SystemExit(main())
11599
+ '''
11600
+
11601
+
11602
+ def _codex_config_hooks_enabled(text: str) -> bool:
11603
+ in_features = False
11604
+ for raw_line in str(text or "").splitlines():
11605
+ stripped = raw_line.strip()
11606
+ section_match = re.match(r"^\[([^\]]+)\]\s*$", stripped)
11607
+ if section_match:
11608
+ in_features = section_match.group(1).strip() == "features"
11609
+ continue
11610
+ if not in_features or stripped.startswith("#"):
11611
+ continue
11612
+ if re.match(r"^codex_hooks\s*=\s*true(?:\s*(?:#.*)?)?$", stripped, flags=re.IGNORECASE):
11613
+ return True
11614
+ return False
11615
+
11616
+
11617
+ def _codex_config_enable_hooks_text(text: str) -> tuple[str, str]:
11618
+ original = str(text or "")
11619
+ if _codex_config_hooks_enabled(original):
11620
+ return (original if original.endswith("\n") or not original else original + "\n"), "kept"
11621
+
11622
+ lines = original.splitlines()
11623
+ feature_start = -1
11624
+ feature_end = len(lines)
11625
+ for index, line in enumerate(lines):
11626
+ section_match = re.match(r"^\s*\[([^\]]+)\]\s*$", line)
11627
+ if not section_match:
11628
+ continue
11629
+ if section_match.group(1).strip() == "features":
11630
+ feature_start = index
11631
+ feature_end = len(lines)
11632
+ for next_index in range(index + 1, len(lines)):
11633
+ if re.match(r"^\s*\[[^\]]+\]\s*$", lines[next_index]):
11634
+ feature_end = next_index
11635
+ break
11636
+ break
11637
+
11638
+ if feature_start >= 0:
11639
+ for index in range(feature_start + 1, feature_end):
11640
+ if re.match(r"^\s*codex_hooks\s*=", lines[index]):
11641
+ lines[index] = "codex_hooks = true"
11642
+ return "\n".join(lines).rstrip() + "\n", "updated"
11643
+ lines.insert(feature_end, "codex_hooks = true")
11644
+ return "\n".join(lines).rstrip() + "\n", "updated"
11645
+
11646
+ if lines and lines[-1].strip():
11647
+ lines.append("")
11648
+ lines.extend(["[features]", "codex_hooks = true"])
11649
+ return "\n".join(lines).rstrip() + "\n", "updated"
11650
+
11651
+
11652
+ def _codex_desired_session_hook(codex_home: Path) -> dict[str, Any]:
11653
+ script = _codex_session_hook_path(codex_home)
11654
+ return {
11655
+ "matcher": "startup|resume|clear",
11656
+ "hooks": [
11657
+ {
11658
+ "type": "command",
11659
+ "command": f"/usr/bin/python3 {shlex.quote(str(script))}",
11660
+ "timeout": 10,
11661
+ "statusMessage": "Checking ORP/Codex startup context",
11662
+ }
11663
+ ],
11664
+ }
11665
+
11666
+
11667
+ def _codex_hooks_payload_has_session_hook(payload: dict[str, Any], codex_home: Path) -> bool:
11668
+ desired_command = str(_codex_desired_session_hook(codex_home)["hooks"][0]["command"])
11669
+ hooks = payload.get("hooks")
11670
+ if not isinstance(hooks, dict):
11671
+ return False
11672
+ groups = hooks.get("SessionStart")
11673
+ if not isinstance(groups, list):
11674
+ return False
11675
+ for group in groups:
11676
+ if not isinstance(group, dict):
11677
+ continue
11678
+ handlers = group.get("hooks")
11679
+ if not isinstance(handlers, list):
11680
+ continue
11681
+ for handler in handlers:
11682
+ if isinstance(handler, dict) and str(handler.get("command", "")).strip() == desired_command:
11683
+ return True
11684
+ return False
11685
+
11686
+
11687
+ def _codex_load_hooks_for_edit(path: Path) -> tuple[dict[str, Any], str]:
11688
+ if not path.exists():
11689
+ return {"hooks": {}}, "missing"
11690
+ try:
11691
+ payload = json.loads(path.read_text(encoding="utf-8"))
11692
+ except Exception:
11693
+ return {}, "invalid_json"
11694
+ if not isinstance(payload, dict):
11695
+ return {}, "invalid_json"
11696
+ return payload, "loaded"
11697
+
11698
+
11699
+ def _codex_merge_hooks_payload(payload: dict[str, Any], codex_home: Path) -> tuple[dict[str, Any], str]:
11700
+ if _codex_hooks_payload_has_session_hook(payload, codex_home):
11701
+ return payload, "kept"
11702
+ updated = copy.deepcopy(payload)
11703
+ hooks = updated.get("hooks")
11704
+ if not isinstance(hooks, dict):
11705
+ hooks = {}
11706
+ updated["hooks"] = hooks
11707
+ groups = hooks.get("SessionStart")
11708
+ if not isinstance(groups, list):
11709
+ groups = []
11710
+ hooks["SessionStart"] = groups
11711
+ groups.append(_codex_desired_session_hook(codex_home))
11712
+ return updated, "updated"
11713
+
11714
+
11715
+ def _codex_audit_payload(codex_home: Path) -> dict[str, Any]:
11716
+ codex_home = codex_home.expanduser().resolve()
11717
+ agents_path = codex_home / "AGENTS.md"
11718
+ config_path = codex_home / "config.toml"
11719
+ hooks_path = codex_home / "hooks.json"
11720
+ hook_script_path = _codex_session_hook_path(codex_home)
11721
+
11722
+ agents_text = agents_path.read_text(encoding="utf-8") if agents_path.exists() else ""
11723
+ guide_block = _extract_marked_block(agents_text, CODEX_GLOBAL_GUIDE_BEGIN, CODEX_GLOBAL_GUIDE_END)
11724
+ expected_guide = _codex_global_guide_block()
11725
+ config_text = config_path.read_text(encoding="utf-8") if config_path.exists() else ""
11726
+ hooks_payload = _read_json_if_exists(hooks_path)
11727
+ script_text = hook_script_path.read_text(encoding="utf-8") if hook_script_path.exists() else ""
11728
+
11729
+ checks = {
11730
+ "codex_home": {
11731
+ "path": str(codex_home),
11732
+ "exists": codex_home.exists(),
11733
+ "status": "ok" if codex_home.exists() else "missing",
11734
+ },
11735
+ "global_agents": {
11736
+ "path": str(agents_path),
11737
+ "exists": agents_path.exists(),
11738
+ "managed_block_present": bool(guide_block),
11739
+ "managed_block_synced": guide_block == expected_guide,
11740
+ "status": "ok" if agents_path.exists() and guide_block == expected_guide else "needs_sync",
11741
+ },
11742
+ "config": {
11743
+ "path": str(config_path),
11744
+ "exists": config_path.exists(),
11745
+ "codex_hooks_enabled": _codex_config_hooks_enabled(config_text),
11746
+ "status": "ok" if config_path.exists() and _codex_config_hooks_enabled(config_text) else "needs_sync",
11747
+ },
11748
+ "hooks_json": {
11749
+ "path": str(hooks_path),
11750
+ "exists": hooks_path.exists(),
11751
+ "session_start_hook_present": _codex_hooks_payload_has_session_hook(hooks_payload, codex_home),
11752
+ "status": "ok" if hooks_path.exists() and _codex_hooks_payload_has_session_hook(hooks_payload, codex_home) else "needs_sync",
11753
+ },
11754
+ "session_start_script": {
11755
+ "path": str(hook_script_path),
11756
+ "exists": hook_script_path.exists(),
11757
+ "synced": script_text == _codex_session_start_hook_script(),
11758
+ "status": "ok" if hook_script_path.exists() and script_text == _codex_session_start_hook_script() else "needs_sync",
11759
+ },
11760
+ }
11761
+ stack_tools = {
11762
+ name: {
11763
+ "path": _tool_path(name),
11764
+ "available": bool(_tool_path(name)),
11765
+ "required": name == "orp",
11766
+ }
11767
+ for name in ("orp", "clawdad", "dumpy-files", "cmail")
11768
+ }
11769
+ ok = all(row.get("status") == "ok" for row in checks.values())
11770
+ next_actions = [] if ok else [f"orp agents codex sync --codex-home {shlex.quote(str(codex_home))} --json"]
11771
+ return {
11772
+ "ok": ok,
11773
+ "schema_version": "1.0.0",
11774
+ "kind": "orp_codex_global_audit",
11775
+ "codex_home": str(codex_home),
11776
+ "checks": checks,
11777
+ "stack_tools": stack_tools,
11778
+ "next_actions": next_actions,
11779
+ }
11780
+
11781
+
11782
+ def _codex_sync_payload(codex_home: Path, *, dry_run: bool = False) -> dict[str, Any]:
11783
+ codex_home = codex_home.expanduser().resolve()
11784
+ agents_path = codex_home / "AGENTS.md"
11785
+ config_path = codex_home / "config.toml"
11786
+ hooks_path = codex_home / "hooks.json"
11787
+ hook_script_path = _codex_session_hook_path(codex_home)
11788
+ actions: list[dict[str, Any]] = []
11789
+
11790
+ if not dry_run:
11791
+ codex_home.mkdir(parents=True, exist_ok=True)
11792
+
11793
+ expected_guide = _codex_global_guide_block()
11794
+ if agents_path.exists():
11795
+ agents_text = agents_path.read_text(encoding="utf-8")
11796
+ updated_agents, guide_action = _upsert_marked_block(
11797
+ agents_text,
11798
+ begin_marker=CODEX_GLOBAL_GUIDE_BEGIN,
11799
+ end_marker=CODEX_GLOBAL_GUIDE_END,
11800
+ block_text=expected_guide,
11801
+ )
11802
+ if updated_agents != agents_text and not dry_run:
11803
+ _write_text(agents_path, updated_agents if updated_agents.endswith("\n") else updated_agents + "\n")
11804
+ action = "kept" if updated_agents == agents_text else ("would_update" if dry_run else "updated")
11805
+ else:
11806
+ updated_agents = _default_codex_global_agents_text()
11807
+ guide_action = "created"
11808
+ if not dry_run:
11809
+ _write_text(agents_path, updated_agents)
11810
+ action = "would_create" if dry_run else "created"
11811
+ actions.append({"path": str(agents_path), "action": action, "guide_action": guide_action})
11812
+
11813
+ config_existed = config_path.exists()
11814
+ config_text = config_path.read_text(encoding="utf-8") if config_existed else ""
11815
+ updated_config, config_action = _codex_config_enable_hooks_text(config_text)
11816
+ if updated_config != config_text and not dry_run:
11817
+ _write_text(config_path, updated_config)
11818
+ actions.append(
11819
+ {
11820
+ "path": str(config_path),
11821
+ "action": (
11822
+ "kept"
11823
+ if updated_config == config_text
11824
+ else ("would_create" if dry_run and not config_existed else "would_update" if dry_run else "created" if not config_existed else "updated")
11825
+ ),
11826
+ "codex_hooks_action": config_action,
11827
+ }
11828
+ )
11829
+
11830
+ hooks_existed = hooks_path.exists()
11831
+ hooks_payload, hooks_load_status = _codex_load_hooks_for_edit(hooks_path)
11832
+ if hooks_load_status == "invalid_json":
11833
+ actions.append({"path": str(hooks_path), "action": "blocked_invalid_json"})
11834
+ audit = _codex_audit_payload(codex_home) if not dry_run else {}
11835
+ return {
11836
+ "ok": False,
11837
+ "schema_version": "1.0.0",
11838
+ "kind": "orp_codex_global_sync",
11839
+ "codex_home": str(codex_home),
11840
+ "dry_run": dry_run,
11841
+ "actions": actions,
11842
+ "audit": audit,
11843
+ "warnings": ["hooks.json exists but is not a JSON object; ORP did not overwrite it."],
11844
+ }
11845
+ updated_hooks, hooks_action = _codex_merge_hooks_payload(hooks_payload, codex_home)
11846
+ if updated_hooks != hooks_payload and not dry_run:
11847
+ _write_json(hooks_path, updated_hooks)
11848
+ actions.append(
11849
+ {
11850
+ "path": str(hooks_path),
11851
+ "action": (
11852
+ "kept"
11853
+ if updated_hooks == hooks_payload
11854
+ else ("would_create" if dry_run and not hooks_existed else "would_update" if dry_run else "created" if not hooks_existed else "updated")
11855
+ ),
11856
+ "session_start_hook_action": hooks_action,
11857
+ }
11858
+ )
11859
+
11860
+ script_text = _codex_session_start_hook_script()
11861
+ script_existed = hook_script_path.exists()
11862
+ existing_script = hook_script_path.read_text(encoding="utf-8") if script_existed else ""
11863
+ if existing_script != script_text and not dry_run:
11864
+ _write_text(hook_script_path, script_text)
11865
+ try:
11866
+ hook_script_path.chmod(0o755)
11867
+ except Exception:
11868
+ pass
11869
+ actions.append(
11870
+ {
11871
+ "path": str(hook_script_path),
11872
+ "action": (
11873
+ "kept"
11874
+ if existing_script == script_text
11875
+ else ("would_create" if dry_run and not script_existed else "would_update" if dry_run else "created" if not script_existed else "updated")
11876
+ ),
11877
+ }
11878
+ )
11879
+
11880
+ audit = _codex_audit_payload(codex_home) if not dry_run else {}
11881
+ return {
11882
+ "ok": True if dry_run else bool(audit.get("ok")),
11883
+ "schema_version": "1.0.0",
11884
+ "kind": "orp_codex_global_sync",
11885
+ "codex_home": str(codex_home),
11886
+ "dry_run": dry_run,
11887
+ "actions": actions,
11888
+ "audit": audit,
11889
+ "warnings": [],
11890
+ }
11891
+
11892
+
11893
+ def _render_codex_audit_report(payload: dict[str, Any]) -> str:
11894
+ lines = [
11895
+ "ORP Codex Global Audit",
11896
+ f"ok={'true' if payload.get('ok') else 'false'}",
11897
+ f"codex_home={payload.get('codex_home', '')}",
11898
+ ]
11899
+ checks = payload.get("checks")
11900
+ if isinstance(checks, dict):
11901
+ for name, row in checks.items():
11902
+ if isinstance(row, dict):
11903
+ lines.append(f"{name}={row.get('status', '')}:{row.get('path', '')}")
11904
+ stack_tools = payload.get("stack_tools")
11905
+ if isinstance(stack_tools, dict):
11906
+ for name, row in stack_tools.items():
11907
+ if isinstance(row, dict):
11908
+ lines.append(f"tool.{name}={row.get('path', '') or 'missing'}")
11909
+ for next_action in payload.get("next_actions", []):
11910
+ lines.append(f"next={next_action}")
11911
+ return "\n".join(lines)
11912
+
11913
+
11914
+ def _render_codex_sync_report(payload: dict[str, Any]) -> str:
11915
+ lines = [
11916
+ "ORP Codex Global Sync",
11917
+ f"ok={'true' if payload.get('ok') else 'false'}",
11918
+ f"codex_home={payload.get('codex_home', '')}",
11919
+ f"dry_run={'true' if payload.get('dry_run') else 'false'}",
11920
+ ]
11921
+ for action in payload.get("actions", []):
11922
+ if isinstance(action, dict):
11923
+ lines.append(f"action={action.get('action', '')}:{action.get('path', '')}")
11924
+ for warning in payload.get("warnings", []):
11925
+ lines.append(f"warning={warning}")
11926
+ return "\n".join(lines)
11927
+
11928
+
11270
11929
  def _resolve_projects_root_path(raw: str) -> Path:
11271
11930
  text = str(raw or "").strip()
11272
11931
  if not text:
@@ -12649,6 +13308,7 @@ def _about_payload() -> dict[str, Any]:
12649
13308
  ["frontier", "add-phase"],
12650
13309
  ["frontier", "set-live"],
12651
13310
  ["frontier", "render"],
13311
+ ["frontier", "sync-idea"],
12652
13312
  ["frontier", "doctor"],
12653
13313
  ],
12654
13314
  },
@@ -12833,6 +13493,7 @@ def _about_payload() -> dict[str, Any]:
12833
13493
  {"name": "frontier_add_phase", "path": ["frontier", "add-phase"], "json_output": True},
12834
13494
  {"name": "frontier_set_live", "path": ["frontier", "set-live"], "json_output": True},
12835
13495
  {"name": "frontier_render", "path": ["frontier", "render"], "json_output": True},
13496
+ {"name": "frontier_sync_idea", "path": ["frontier", "sync-idea"], "json_output": True},
12836
13497
  {"name": "frontier_doctor", "path": ["frontier", "doctor"], "json_output": True},
12837
13498
  {"name": "branch_start", "path": ["branch", "start"], "json_output": True},
12838
13499
  {"name": "checkpoint_create", "path": ["checkpoint", "create"], "json_output": True},
@@ -14469,6 +15130,26 @@ def cmd_agents_audit(args: argparse.Namespace) -> int:
14469
15130
  return 0 if payload.get("ok") else 1
14470
15131
 
14471
15132
 
15133
+ def cmd_agents_codex_audit(args: argparse.Namespace) -> int:
15134
+ codex_home = _resolve_codex_home_path(str(getattr(args, "codex_home", "") or ""))
15135
+ payload = _codex_audit_payload(codex_home)
15136
+ if args.json_output:
15137
+ _print_json(payload)
15138
+ return 0 if payload.get("ok") else 1
15139
+ print(_render_codex_audit_report(payload))
15140
+ return 0 if payload.get("ok") else 1
15141
+
15142
+
15143
+ def cmd_agents_codex_sync(args: argparse.Namespace) -> int:
15144
+ codex_home = _resolve_codex_home_path(str(getattr(args, "codex_home", "") or ""))
15145
+ payload = _codex_sync_payload(codex_home, dry_run=bool(getattr(args, "dry_run", False)))
15146
+ if args.json_output:
15147
+ _print_json(payload)
15148
+ return 0 if payload.get("ok") else 1
15149
+ print(_render_codex_sync_report(payload))
15150
+ return 0 if payload.get("ok") else 1
15151
+
15152
+
14472
15153
  def _render_governance_status_text(payload: dict[str, Any]) -> str:
14473
15154
  git = payload.get("git", {}) if isinstance(payload.get("git"), dict) else {}
14474
15155
  runtime = payload.get("runtime", {}) if isinstance(payload.get("runtime"), dict) else {}
@@ -15808,6 +16489,225 @@ def cmd_frontier_additional_complete_active(args: argparse.Namespace) -> int:
15808
16489
  return 0
15809
16490
 
15810
16491
 
16492
+ def cmd_frontier_modeled_checkoff_activate(args: argparse.Namespace) -> int:
16493
+ repo_root = Path(args.repo_root).resolve()
16494
+ stack = _frontier_load_stack(repo_root)
16495
+ state = _frontier_load_state(repo_root)
16496
+ additional = _frontier_load_additional(repo_root, stack)
16497
+
16498
+ phase_id = str(args.phase_id or FRONTIER_MODELED_CHECKOFF_PHASE_ID).strip() or FRONTIER_MODELED_CHECKOFF_PHASE_ID
16499
+ list_id = str(args.list_id or FRONTIER_MODELED_CHECKOFF_LIST_ID).strip() or FRONTIER_MODELED_CHECKOFF_LIST_ID
16500
+ item_id = str(args.item_id or FRONTIER_MODELED_CHECKOFF_ITEM_ID).strip() or FRONTIER_MODELED_CHECKOFF_ITEM_ID
16501
+
16502
+ version_id = str(args.version or state.get("active_version", "") or "modeled-checkoff").strip()
16503
+ milestone_id = str(args.milestone or state.get("active_milestone", "") or phase_id).strip()
16504
+ band = str(args.band or state.get("band", "") or "structured").strip()
16505
+ if band not in FRONTIER_BANDS:
16506
+ raise RuntimeError(f"frontier band must be one of: {', '.join(FRONTIER_BANDS)}")
16507
+
16508
+ version = _frontier_find_version(stack, version_id)
16509
+ if version is None:
16510
+ version = {
16511
+ "id": version_id,
16512
+ "label": "Modeled Checkoff",
16513
+ "intent": "Keep review/checkoff gates moving through modeled packet improvement without clearing external gates.",
16514
+ "status": "active",
16515
+ "milestones": [],
16516
+ }
16517
+ versions = stack.get("versions")
16518
+ if not isinstance(versions, list):
16519
+ versions = []
16520
+ stack["versions"] = versions
16521
+ versions.append(version)
16522
+
16523
+ parent_version, milestone = _frontier_find_milestone(stack, milestone_id)
16524
+ if milestone is None:
16525
+ milestone = {
16526
+ "id": milestone_id,
16527
+ "parent_version": version_id,
16528
+ "label": "Modeled Checkoff Continuation",
16529
+ "band": band,
16530
+ "status": "active",
16531
+ "depends_on": [],
16532
+ "success_criteria": [
16533
+ "delegation has an active modeled-checkoff packet path",
16534
+ "external review gates remain explicitly uncleared",
16535
+ ],
16536
+ "phases": [],
16537
+ }
16538
+ milestones = version.get("milestones")
16539
+ if not isinstance(milestones, list):
16540
+ milestones = []
16541
+ version["milestones"] = milestones
16542
+ milestones.append(milestone)
16543
+ elif isinstance(parent_version, dict):
16544
+ version_id = str(parent_version.get("id", "")).strip() or version_id
16545
+ version = parent_version
16546
+
16547
+ phase = _frontier_find_phase(milestone, phase_id)
16548
+ phase_requirements = [
16549
+ "state that this is modeled/proxy pre-review, not real expert approval",
16550
+ "name the professional lens, decision language, evidence expectations, uncertainty, and stop conditions",
16551
+ "preserve provenance, scope, and uncertainty on scientific or medical claims",
16552
+ "separate packet-improvement decisions from clinical, legal, regulatory, privacy, publication, or human gate clearance",
16553
+ ]
16554
+ phase_success = [
16555
+ "a delegate can continue by writing modeled checkoff packets",
16556
+ "the packet records which external gates remain open",
16557
+ "the output is safe for public artifacts and does not include patient-specific advice",
16558
+ ]
16559
+ phase_plans = [
16560
+ "classify the blocker or review gate",
16561
+ "select the modeled professional lens and decision rubric",
16562
+ "run packet checks that improve evidence, scope, uncertainty, and stop-condition language",
16563
+ ]
16564
+ if phase is None:
16565
+ phase = {
16566
+ "id": phase_id,
16567
+ "label": "Modeled Checkoff v0",
16568
+ "status": "active",
16569
+ "goal": "Convert review/checkoff blockers into modeled packet-improvement work without clearing the external gate.",
16570
+ "depends_on": [],
16571
+ "requirements": phase_requirements,
16572
+ "success_criteria": phase_success,
16573
+ "plans": phase_plans,
16574
+ "compute_hooks": [],
16575
+ }
16576
+ phases = milestone.get("phases")
16577
+ if not isinstance(phases, list):
16578
+ phases = []
16579
+ milestone["phases"] = phases
16580
+ phases.append(phase)
16581
+ else:
16582
+ phase["status"] = "active"
16583
+ phase.setdefault("requirements", phase_requirements)
16584
+ phase.setdefault("success_criteria", phase_success)
16585
+ phase.setdefault("plans", phase_plans)
16586
+
16587
+ versions = stack.get("versions")
16588
+ if isinstance(versions, list):
16589
+ for version_row in versions:
16590
+ if not isinstance(version_row, dict):
16591
+ continue
16592
+ version_row["status"] = "active" if str(version_row.get("id", "")).strip() == version_id else (
16593
+ "planned" if str(version_row.get("status", "")).strip() == "active" else str(version_row.get("status", "")).strip() or "planned"
16594
+ )
16595
+ milestones = version_row.get("milestones")
16596
+ if not isinstance(milestones, list):
16597
+ continue
16598
+ for milestone_row in milestones:
16599
+ if not isinstance(milestone_row, dict):
16600
+ continue
16601
+ milestone_row["status"] = "active" if str(milestone_row.get("id", "")).strip() == milestone_id else (
16602
+ "planned" if str(milestone_row.get("status", "")).strip() == "active" else str(milestone_row.get("status", "")).strip() or "planned"
16603
+ )
16604
+ phases = milestone_row.get("phases")
16605
+ if not isinstance(phases, list):
16606
+ continue
16607
+ for phase_row in phases:
16608
+ if not isinstance(phase_row, dict):
16609
+ continue
16610
+ if str(milestone_row.get("id", "")).strip() == milestone_id and str(phase_row.get("id", "")).strip() == phase_id:
16611
+ phase_row["status"] = "active"
16612
+ elif str(phase_row.get("status", "")).strip() == "active":
16613
+ phase_row["status"] = "planned"
16614
+
16615
+ item_list = _frontier_find_additional_list(additional, list_id)
16616
+ if item_list is None:
16617
+ item_list = {
16618
+ "id": list_id,
16619
+ "label": "Modeled checkoff continuations",
16620
+ "status": "active",
16621
+ "items": [],
16622
+ }
16623
+ lists = additional.get("lists")
16624
+ if not isinstance(lists, list):
16625
+ lists = []
16626
+ additional["lists"] = lists
16627
+ lists.append(item_list)
16628
+ item_list["status"] = "active"
16629
+ items = item_list.get("items")
16630
+ if not isinstance(items, list):
16631
+ items = []
16632
+ item_list["items"] = items
16633
+ item = _frontier_find_additional_item(item_list, item_id)
16634
+ item_goal = (
16635
+ "Build the modeled professional lens taxonomy and packet-check rubric for the blocked frontier; "
16636
+ "do not claim real expert sign-off or clear external gates."
16637
+ )
16638
+ item_requirements = [
16639
+ "declare modeled/proxy status in the packet",
16640
+ "identify professional persona, language, decision criteria, evidence standard, uncertainty, and stop conditions",
16641
+ "carry provenance and scope for scientific, medical, legal, privacy, or publication-sensitive claims",
16642
+ ]
16643
+ item_success = [
16644
+ "delegation can proceed with explicit modeled checkoff records",
16645
+ "remaining human or external gates are named as still open",
16646
+ ]
16647
+ if item is None:
16648
+ item = {
16649
+ "id": item_id,
16650
+ "label": "Modeled professional lens taxonomy",
16651
+ "status": "active",
16652
+ "goal": item_goal,
16653
+ "depends_on": [],
16654
+ "requirements": item_requirements,
16655
+ "success_criteria": item_success,
16656
+ "plans": phase_plans,
16657
+ }
16658
+ items.append(item)
16659
+ else:
16660
+ item["status"] = "active"
16661
+ item.setdefault("goal", item_goal)
16662
+ item.setdefault("requirements", item_requirements)
16663
+ item.setdefault("success_criteria", item_success)
16664
+ item.setdefault("plans", phase_plans)
16665
+
16666
+ additional["active_list_id"] = list_id
16667
+ additional["active_item_id"] = item_id
16668
+ if not str(additional.get("program_id", "")).strip():
16669
+ additional["program_id"] = str(stack.get("program_id", "")).strip()
16670
+ if not str(additional.get("label", "")).strip():
16671
+ additional["label"] = str(stack.get("label", "")).strip()
16672
+
16673
+ state["active_version"] = version_id
16674
+ state["active_milestone"] = milestone_id
16675
+ state["active_phase"] = phase_id
16676
+ state["band"] = band
16677
+ state["blocked_by"] = _coerce_string_list(state.get("blocked_by"))
16678
+ state["next_action"] = str(args.next_action or "").strip() or (
16679
+ f"Run modeled checkoff packet {list_id}/{item_id}: model the professional lens, "
16680
+ "improve the packet, and keep external gates explicitly open."
16681
+ )
16682
+
16683
+ written = _frontier_write_materialized_views(repo_root, stack, state)
16684
+ written.update(_frontier_write_additional_views(repo_root, additional))
16685
+ modeled_checkoff = _frontier_modeled_checkoff_payload(
16686
+ blockers=_coerce_string_list(state.get("blocked_by")),
16687
+ next_action=str(state.get("next_action", "")).strip(),
16688
+ active_primary_kind="phase",
16689
+ active_primary_id=phase_id,
16690
+ active_list_id=list_id,
16691
+ active_item_id=item_id,
16692
+ )
16693
+ result = {
16694
+ "ok": True,
16695
+ "modeled_checkoff": modeled_checkoff,
16696
+ "state": state,
16697
+ "phase": phase,
16698
+ "list": item_list,
16699
+ "item": item,
16700
+ "paths": written,
16701
+ }
16702
+ if args.json_output:
16703
+ _print_json(result)
16704
+ else:
16705
+ print(f"phase_id={phase_id}")
16706
+ print(f"active_additional={list_id}/{item_id}")
16707
+ print(f"next_action={state['next_action']}")
16708
+ return 0
16709
+
16710
+
15811
16711
  def _print_frontier_diagnostic_payload(payload: dict[str, Any]) -> None:
15812
16712
  print(f"ok={'true' if payload.get('ok') else 'false'}")
15813
16713
  print(f"next_action={payload.get('next_action', '') or '(none)'}")
@@ -15821,6 +16721,15 @@ def _print_frontier_diagnostic_payload(payload: dict[str, Any]) -> None:
15821
16721
  if isinstance(additional, dict):
15822
16722
  print(f"pending_additional_items={additional.get('pending_items', 0)}")
15823
16723
  print(f"active_additional={additional.get('active_list_id', '') or '(none)'}/{additional.get('active_item_id', '') or '(none)'}")
16724
+ modeled_checkoff = payload.get("modeled_checkoff")
16725
+ if not isinstance(modeled_checkoff, dict):
16726
+ continuation = payload.get("continuation")
16727
+ modeled_checkoff = continuation.get("modeled_checkoff", {}) if isinstance(continuation, dict) else {}
16728
+ if isinstance(modeled_checkoff, dict) and modeled_checkoff.get("eligible"):
16729
+ print(f"modeled_checkoff_active={'true' if modeled_checkoff.get('active') else 'false'}")
16730
+ command = str(modeled_checkoff.get("suggested_next_command", "")).strip()
16731
+ if command:
16732
+ print(f"modeled_checkoff_suggested_next_command={command}")
15824
16733
  for issue in payload.get("issues", []):
15825
16734
  if isinstance(issue, dict):
15826
16735
  print(f"issue={issue.get('severity','')}:{issue.get('code','')}:{issue.get('message','')}")
@@ -15846,6 +16755,7 @@ def cmd_frontier_preflight_delegate(args: argparse.Namespace) -> int:
15846
16755
  payload["next_action"] = continuation.get("next_action", "")
15847
16756
  payload["suggested_next_command"] = continuation.get("suggested_next_command", "")
15848
16757
  payload["summary"] = continuation.get("summary", {})
16758
+ payload["modeled_checkoff"] = continuation.get("modeled_checkoff", {})
15849
16759
  payload["preflight"] = {
15850
16760
  "ready": bool(payload["ok"]),
15851
16761
  "purpose": "Block delegation when the frontier cannot prove a single safe continuation or terminal state.",
@@ -16061,6 +16971,305 @@ def cmd_frontier_render(args: argparse.Namespace) -> int:
16061
16971
  return 0
16062
16972
 
16063
16973
 
16974
+ FRONTIER_FEATURE_MARKER_PATTERN = re.compile(r"<!--\s*ORP:FRONTIER_PHASE:([^>\s]+)\s*-->")
16975
+
16976
+
16977
+ def _frontier_tas_text(repo_root: Path) -> str:
16978
+ path = _frontier_paths(repo_root)["root"] / "TAS.md"
16979
+ if not path.exists():
16980
+ return ""
16981
+ return _read_text(path).strip()
16982
+
16983
+
16984
+ def _frontier_tas_title(text: str) -> str:
16985
+ for line in str(text or "").splitlines():
16986
+ stripped = line.strip()
16987
+ if stripped.startswith("# "):
16988
+ return stripped[2:].strip()
16989
+ return ""
16990
+
16991
+
16992
+ def _frontier_plan_notes(repo_root: Path, stack: dict[str, Any], state: dict[str, Any]) -> tuple[str, str]:
16993
+ tas_text = _frontier_tas_text(repo_root)
16994
+ title = (
16995
+ _frontier_tas_title(tas_text)
16996
+ or str(stack.get("label", "")).strip()
16997
+ or str(stack.get("program_id", "")).strip()
16998
+ or repo_root.name
16999
+ )
17000
+ active_parts = [
17001
+ str(state.get("active_version", "")).strip(),
17002
+ str(state.get("active_milestone", "")).strip(),
17003
+ str(state.get("active_phase", "")).strip(),
17004
+ ]
17005
+ body_parts = [
17006
+ f"Current next action: {str(state.get('next_action', '')).strip()}" if str(state.get("next_action", "")).strip() else "",
17007
+ f"Active frontier: {' / '.join([part for part in active_parts if part])}" if any(active_parts) else "",
17008
+ tas_text,
17009
+ ]
17010
+ return title, "\n\n".join(part for part in body_parts if part).strip()
17011
+
17012
+
17013
+ def _frontier_phase_completed(status: str) -> bool:
17014
+ return _frontier_status(status, default="planned") in {"complete", "completed", "done", "terminal"}
17015
+
17016
+
17017
+ def _frontier_phase_tasks(stack: dict[str, Any], state: dict[str, Any]) -> list[dict[str, Any]]:
17018
+ active_milestone_id = str(state.get("active_milestone", "") or stack.get("current_frontier", {}).get("active_milestone", "")).strip()
17019
+ active_phase_id = str(state.get("active_phase", "") or stack.get("current_frontier", {}).get("active_phase", "")).strip()
17020
+ _, milestone = _frontier_find_milestone(stack, active_milestone_id) if active_milestone_id else (None, None)
17021
+ phases = milestone.get("phases") if isinstance(milestone, dict) else []
17022
+ rows: list[dict[str, Any]] = []
17023
+ for index, phase in enumerate(phases if isinstance(phases, list) else []):
17024
+ if not isinstance(phase, dict):
17025
+ continue
17026
+ phase_id = str(phase.get("id", "")).strip() or f"frontier-phase-{index + 1}"
17027
+ status = _frontier_status(phase.get("status"), default="planned")
17028
+ rows.append(
17029
+ {
17030
+ "phase_id": phase_id,
17031
+ "title": str(phase.get("label", "")).strip() or str(phase.get("goal", "")).strip() or phase_id,
17032
+ "status": "active" if phase_id == active_phase_id else status,
17033
+ "completed": _frontier_phase_completed(status),
17034
+ "active": phase_id == active_phase_id,
17035
+ "phase": phase,
17036
+ }
17037
+ )
17038
+ return rows
17039
+
17040
+
17041
+ def _frontier_feature_notes(task: dict[str, Any]) -> str:
17042
+ phase = task.get("phase") if isinstance(task.get("phase"), dict) else {}
17043
+ phase_id = str(task.get("phase_id", "")).strip()
17044
+ lines = [
17045
+ f"<!-- ORP:FRONTIER_PHASE:{phase_id} -->",
17046
+ f"ORP frontier phase id: {phase_id}",
17047
+ f"Status: {str(task.get('status', '')).strip() or 'planned'}",
17048
+ ]
17049
+ goal = str(phase.get("goal", "")).strip()
17050
+ if goal:
17051
+ lines.extend(["", "Goal:", goal])
17052
+ for label, key in [
17053
+ ("Requirements", "requirements"),
17054
+ ("Success criteria", "success_criteria"),
17055
+ ("Plan", "plans"),
17056
+ ]:
17057
+ values = _coerce_string_list(phase.get(key))
17058
+ if values:
17059
+ lines.extend(["", f"{label}:"])
17060
+ lines.extend([f"- {value}" for value in values])
17061
+ return "\n".join(lines).strip()
17062
+
17063
+
17064
+ def _feature_text_for_matching(feature: dict[str, Any]) -> str:
17065
+ parts = [
17066
+ str(feature.get("title", "") or ""),
17067
+ str(feature.get("notes", "") or ""),
17068
+ str(feature.get("detail", "") or ""),
17069
+ ]
17070
+ sections = feature.get("detailSections", feature.get("details"))
17071
+ if isinstance(sections, list):
17072
+ for section in sections:
17073
+ if isinstance(section, dict):
17074
+ parts.append(str(section.get("body", "") or ""))
17075
+ parts.append(str(section.get("detail", "") or ""))
17076
+ return "\n".join(parts)
17077
+
17078
+
17079
+ def _frontier_phase_id_from_feature(feature: dict[str, Any]) -> str:
17080
+ match = FRONTIER_FEATURE_MARKER_PATTERN.search(_feature_text_for_matching(feature))
17081
+ return match.group(1).strip() if match else ""
17082
+
17083
+
17084
+ def _frontier_match_features_by_phase(features: list[dict[str, Any]]) -> dict[str, dict[str, Any]]:
17085
+ matches: dict[str, dict[str, Any]] = {}
17086
+ for feature in features:
17087
+ phase_id = _frontier_phase_id_from_feature(feature)
17088
+ if phase_id and phase_id not in matches:
17089
+ matches[phase_id] = feature
17090
+ return matches
17091
+
17092
+
17093
+ def _frontier_sync_update_idea(args: argparse.Namespace, idea_id: str, title: str, notes: str, *, dry_run: bool) -> dict[str, Any]:
17094
+ current = _get_remote_idea(args, idea_id)
17095
+ if dry_run:
17096
+ return current
17097
+ session = _require_hosted_session(args)
17098
+ body = {
17099
+ "title": title,
17100
+ "notes": notes,
17101
+ }
17102
+ updated_at = str(current.get("updatedAt", "")).strip()
17103
+ if updated_at:
17104
+ body["updatedAt"] = updated_at
17105
+ payload = _request_hosted_json(
17106
+ base_url=_resolve_hosted_base_url(args, session),
17107
+ path=f"/api/cli/ideas/{urlparse.quote(idea_id)}",
17108
+ method="PATCH",
17109
+ token=str(session.get("token", "")).strip(),
17110
+ body=body,
17111
+ )
17112
+ return _normalize_remote_idea_payload(payload)["idea"]
17113
+
17114
+
17115
+ def _frontier_sync_create_idea(args: argparse.Namespace, title: str, notes: str, *, dry_run: bool) -> dict[str, Any]:
17116
+ if dry_run:
17117
+ return {"id": "", "title": title, "notes": notes}
17118
+ session = _require_hosted_session(args)
17119
+ payload = _request_hosted_json(
17120
+ base_url=_resolve_hosted_base_url(args, session),
17121
+ path="/api/cli/ideas",
17122
+ method="POST",
17123
+ token=str(session.get("token", "")).strip(),
17124
+ body={"title": title, "notes": notes},
17125
+ )
17126
+ return _normalize_remote_idea_payload(payload)["idea"]
17127
+
17128
+
17129
+ def _frontier_sync_feature(
17130
+ args: argparse.Namespace,
17131
+ *,
17132
+ idea_id: str,
17133
+ task: dict[str, Any],
17134
+ existing: dict[str, Any] | None,
17135
+ dry_run: bool,
17136
+ ) -> dict[str, Any]:
17137
+ phase_id = str(task.get("phase_id", "")).strip()
17138
+ body = {
17139
+ "ideaId": idea_id,
17140
+ "title": str(task.get("title", "")).strip(),
17141
+ "notes": _frontier_feature_notes(task),
17142
+ "completed": bool(task.get("completed")),
17143
+ }
17144
+ action = "update" if existing else "create"
17145
+ if dry_run:
17146
+ return {
17147
+ "phase_id": phase_id,
17148
+ "action": action,
17149
+ "feature": existing or {"id": "", "ideaId": idea_id, "title": body["title"], "completed": body["completed"]},
17150
+ }
17151
+ session = _require_hosted_session(args)
17152
+ if existing:
17153
+ updated_at = str(existing.get("updatedAt", "")).strip()
17154
+ if updated_at:
17155
+ body["updatedAt"] = updated_at
17156
+ payload = _request_hosted_json(
17157
+ base_url=_resolve_hosted_base_url(args, session),
17158
+ path=f"/api/cli/features/{urlparse.quote(str(existing.get('id', '')).strip())}",
17159
+ method="PATCH",
17160
+ token=str(session.get("token", "")).strip(),
17161
+ body=body,
17162
+ )
17163
+ else:
17164
+ payload = _request_hosted_json(
17165
+ base_url=_resolve_hosted_base_url(args, session),
17166
+ path=f"/api/cli/ideas/{urlparse.quote(idea_id)}/features",
17167
+ method="POST",
17168
+ token=str(session.get("token", "")).strip(),
17169
+ body=body,
17170
+ )
17171
+ created = _normalize_remote_feature_payload(payload)
17172
+ if bool(task.get("completed")) and str(created.get("id", "")).strip():
17173
+ created = _patch_remote_feature_completion(
17174
+ args,
17175
+ feature_id=str(created.get("id", "")).strip(),
17176
+ completed=True,
17177
+ )
17178
+ payload = {"ok": True, "feature": created}
17179
+ return {
17180
+ "phase_id": phase_id,
17181
+ "action": action,
17182
+ "feature": _normalize_remote_feature_payload(payload),
17183
+ }
17184
+
17185
+
17186
+ def cmd_frontier_sync_idea(args: argparse.Namespace) -> int:
17187
+ repo_root = Path(args.repo_root).resolve()
17188
+ stack = _frontier_load_stack(repo_root)
17189
+ state = _frontier_load_state(repo_root)
17190
+ project_link = _read_link_project(repo_root)
17191
+ requested_idea_id = str(getattr(args, "idea_id", "") or "").strip()
17192
+ idea_id = requested_idea_id or str(project_link.get("idea_id", "")).strip()
17193
+ title, notes = _frontier_plan_notes(repo_root, stack, state)
17194
+ dry_run = bool(getattr(args, "dry_run", False))
17195
+ create_idea = bool(getattr(args, "create_idea", False))
17196
+
17197
+ if not idea_id and not create_idea:
17198
+ raise RuntimeError(
17199
+ "No hosted idea is linked. Run `orp frontier sync-idea --create-idea --json`, or pass --idea-id."
17200
+ )
17201
+
17202
+ idea_created = False
17203
+ if idea_id:
17204
+ idea = _frontier_sync_update_idea(args, idea_id, title, notes, dry_run=dry_run)
17205
+ else:
17206
+ idea = _frontier_sync_create_idea(args, title, notes, dry_run=dry_run)
17207
+ idea_id = str(idea.get("id", "")).strip()
17208
+ idea_created = True
17209
+
17210
+ tasks = _frontier_phase_tasks(stack, state)
17211
+ existing_features = _list_remote_features(args, idea_id) if idea_id else []
17212
+ features_by_phase = _frontier_match_features_by_phase(existing_features)
17213
+ synced_features: list[dict[str, Any]] = []
17214
+ frontier_feature_ids: dict[str, str] = {
17215
+ str(key): str(value)
17216
+ for key, value in (project_link.get("frontier_feature_ids") if isinstance(project_link.get("frontier_feature_ids"), dict) else {}).items()
17217
+ if str(key).strip() and str(value).strip()
17218
+ }
17219
+ active_feature_id = ""
17220
+
17221
+ for task in tasks:
17222
+ phase_id = str(task.get("phase_id", "")).strip()
17223
+ existing = features_by_phase.get(phase_id)
17224
+ synced = _frontier_sync_feature(args, idea_id=idea_id, task=task, existing=existing, dry_run=dry_run)
17225
+ synced_features.append(synced)
17226
+ feature_id = str(synced.get("feature", {}).get("id", "")).strip()
17227
+ if feature_id:
17228
+ frontier_feature_ids[phase_id] = feature_id
17229
+ if bool(task.get("active")):
17230
+ active_feature_id = feature_id
17231
+
17232
+ project_link_path = ""
17233
+ should_link_project = not bool(getattr(args, "no_link_project", False))
17234
+ if should_link_project and not dry_run and idea_id:
17235
+ next_link = {
17236
+ **project_link,
17237
+ "idea_id": idea_id,
17238
+ "idea_title": str(idea.get("title", "")).strip() or title,
17239
+ "project_root": str(repo_root),
17240
+ "active_feature_id": active_feature_id,
17241
+ "frontier_feature_ids": frontier_feature_ids,
17242
+ "linked_at_utc": str(project_link.get("linked_at_utc", "")).strip() or _now_utc(),
17243
+ "source": str(project_link.get("source", "")).strip() or "cli",
17244
+ }
17245
+ project_link_path = _path_for_state(_write_link_project(repo_root, next_link), repo_root)
17246
+ elif should_link_project and project_link:
17247
+ path = _link_project_path(repo_root)
17248
+ project_link_path = _path_for_state(path, repo_root) if path is not None else ""
17249
+
17250
+ result = {
17251
+ "ok": True,
17252
+ "dry_run": dry_run,
17253
+ "idea_created": idea_created and not dry_run,
17254
+ "idea": idea,
17255
+ "tasks": tasks,
17256
+ "features": synced_features,
17257
+ "active_feature_id": active_feature_id,
17258
+ "frontier_feature_ids": frontier_feature_ids,
17259
+ "project_link_path": project_link_path,
17260
+ }
17261
+ if args.json_output:
17262
+ _print_json(result)
17263
+ else:
17264
+ print(f"idea.id={idea_id}")
17265
+ print(f"idea.title={str(idea.get('title', '')).strip() or title}")
17266
+ print(f"features.synced={len(synced_features)}")
17267
+ print(f"active_feature_id={active_feature_id}")
17268
+ if project_link_path:
17269
+ print(f"project.link_path={project_link_path}")
17270
+ return 0
17271
+
17272
+
16064
17273
  def cmd_frontier_doctor(args: argparse.Namespace) -> int:
16065
17274
  repo_root = Path(args.repo_root).resolve()
16066
17275
  payload = _frontier_doctor_payload(repo_root)
@@ -24997,6 +26206,59 @@ def _normalize_remote_idea_payload(payload: dict[str, Any]) -> dict[str, Any]:
24997
26206
  }
24998
26207
 
24999
26208
 
26209
+ def _normalize_remote_feature_payload(payload: dict[str, Any]) -> dict[str, Any]:
26210
+ feature = payload.get("feature") if isinstance(payload.get("feature"), dict) else payload
26211
+ if not isinstance(feature, dict):
26212
+ raise RuntimeError("Hosted ORP returned an invalid feature payload.")
26213
+ return feature
26214
+
26215
+
26216
+ def _normalize_remote_features_payload(payload: dict[str, Any]) -> list[dict[str, Any]]:
26217
+ features = payload.get("features")
26218
+ if not isinstance(features, list):
26219
+ features = payload.get("items")
26220
+ if not isinstance(features, list):
26221
+ idea = payload.get("idea") if isinstance(payload.get("idea"), dict) else {}
26222
+ features = idea.get("features") if isinstance(idea.get("features"), list) else []
26223
+ return [row for row in features if isinstance(row, dict)]
26224
+
26225
+
26226
+ def _patch_remote_feature_completion(
26227
+ args: argparse.Namespace,
26228
+ *,
26229
+ feature_id: str,
26230
+ completed: bool,
26231
+ ) -> dict[str, Any]:
26232
+ session = _require_hosted_session(args)
26233
+ payload = _request_hosted_json(
26234
+ base_url=_resolve_hosted_base_url(args, session),
26235
+ path=f"/api/cli/features/{urlparse.quote(feature_id)}",
26236
+ method="PATCH",
26237
+ token=str(session.get("token", "")).strip(),
26238
+ body={"completed": completed},
26239
+ )
26240
+ return _normalize_remote_feature_payload(payload)
26241
+
26242
+
26243
+ def _list_remote_features(args: argparse.Namespace, idea_id: str, *, fallback_to_idea: bool = True) -> list[dict[str, Any]]:
26244
+ session = _require_hosted_session(args)
26245
+ try:
26246
+ payload = _request_hosted_json(
26247
+ base_url=_resolve_hosted_base_url(args, session),
26248
+ path=f"/api/cli/ideas/{urlparse.quote(idea_id)}/features",
26249
+ token=str(session.get("token", "")).strip(),
26250
+ )
26251
+ if not isinstance(payload, dict):
26252
+ raise RuntimeError("Hosted ORP returned an invalid features payload.")
26253
+ return _normalize_remote_features_payload(payload)
26254
+ except HostedApiError:
26255
+ if not fallback_to_idea:
26256
+ raise
26257
+ idea = _get_remote_idea(args, idea_id)
26258
+ features = idea.get("features") if isinstance(idea.get("features"), list) else []
26259
+ return [row for row in features if isinstance(row, dict)]
26260
+
26261
+
25000
26262
  def _get_remote_idea(args: argparse.Namespace, idea_id: str) -> dict[str, Any]:
25001
26263
  session = _require_hosted_session(args)
25002
26264
  payload = _request_hosted_json(
@@ -25201,7 +26463,7 @@ def cmd_idea_restore(args: argparse.Namespace) -> int:
25201
26463
 
25202
26464
  def cmd_feature_list(args: argparse.Namespace) -> int:
25203
26465
  idea = _get_remote_idea(args, args.idea_id)
25204
- features = idea.get("features") if isinstance(idea.get("features"), list) else []
26466
+ features = _list_remote_features(args, args.idea_id)
25205
26467
  result = {
25206
26468
  "idea_id": str(idea.get("id", "")).strip(),
25207
26469
  "idea_title": str(idea.get("title", "")).strip(),
@@ -25243,7 +26505,8 @@ def _find_feature_by_id(idea_payload: dict[str, Any], feature_id: str) -> dict[s
25243
26505
 
25244
26506
  def cmd_feature_show(args: argparse.Namespace) -> int:
25245
26507
  idea = _get_remote_idea(args, args.idea_id)
25246
- feature = _find_feature_by_id(idea, args.feature_id)
26508
+ features = _list_remote_features(args, args.idea_id)
26509
+ feature = _find_feature_by_id({"features": features}, args.feature_id)
25247
26510
  result = {
25248
26511
  "idea_id": str(idea.get("id", "")).strip(),
25249
26512
  "feature": feature,
@@ -25271,14 +26534,22 @@ def cmd_feature_add(args: argparse.Namespace) -> int:
25271
26534
  token=str(session.get("token", "")).strip(),
25272
26535
  body=body,
25273
26536
  )
26537
+ feature = _normalize_remote_feature_payload(payload)
26538
+ if "completed" in body and str(feature.get("id", "")).strip():
26539
+ feature = _patch_remote_feature_completion(
26540
+ args,
26541
+ feature_id=str(feature.get("id", "")).strip(),
26542
+ completed=bool(body["completed"]),
26543
+ )
26544
+ payload = {"ok": True, "feature": feature}
25274
26545
  if args.json_output:
25275
26546
  _print_json(payload)
25276
26547
  else:
25277
26548
  _print_pairs(
25278
26549
  [
25279
- ("feature.id", str(payload.get("id", "")).strip()),
25280
- ("feature.title", str(payload.get("title", "")).strip()),
25281
- ("idea.id", str(payload.get("ideaId", args.idea_id)).strip()),
26550
+ ("feature.id", str(feature.get("id", "")).strip()),
26551
+ ("feature.title", str(feature.get("title", "")).strip()),
26552
+ ("idea.id", str(feature.get("ideaId", args.idea_id)).strip()),
25282
26553
  ]
25283
26554
  )
25284
26555
  return 0
@@ -25286,11 +26557,11 @@ def cmd_feature_add(args: argparse.Namespace) -> int:
25286
26557
 
25287
26558
  def cmd_feature_update(args: argparse.Namespace) -> int:
25288
26559
  session = _require_hosted_session(args)
25289
- idea = _get_remote_idea(args, args.idea_id)
25290
- current = _find_feature_by_id(idea, args.feature_id)
26560
+ features = _list_remote_features(args, args.idea_id)
26561
+ current = _find_feature_by_id({"features": features}, args.feature_id)
25291
26562
  body = _build_remote_feature_body(args, args.idea_id, current)
25292
26563
  updated_at = str(current.get("updatedAt", "")).strip()
25293
- if updated_at:
26564
+ if updated_at and _feature_body_has_metadata_update(body):
25294
26565
  body["updatedAt"] = updated_at
25295
26566
  payload = _request_hosted_json(
25296
26567
  base_url=_resolve_hosted_base_url(args, session),
@@ -25299,14 +26570,15 @@ def cmd_feature_update(args: argparse.Namespace) -> int:
25299
26570
  token=str(session.get("token", "")).strip(),
25300
26571
  body=body,
25301
26572
  )
26573
+ feature = _normalize_remote_feature_payload(payload)
25302
26574
  if args.json_output:
25303
26575
  _print_json(payload)
25304
26576
  else:
25305
26577
  _print_pairs(
25306
26578
  [
25307
- ("feature.id", str(payload.get("id", "")).strip()),
25308
- ("feature.title", str(payload.get("title", "")).strip()),
25309
- ("feature.updated_at", str(payload.get("updatedAt", "")).strip()),
26579
+ ("feature.id", str(feature.get("id", "")).strip()),
26580
+ ("feature.title", str(feature.get("title", "")).strip()),
26581
+ ("feature.updated_at", str(feature.get("updatedAt", "")).strip()),
25310
26582
  ]
25311
26583
  )
25312
26584
  return 0
@@ -27522,6 +28794,9 @@ def build_parser() -> argparse.ArgumentParser:
27522
28794
  default=None,
27523
28795
  help="Feature visibility override when supported by the hosted workspace",
27524
28796
  )
28797
+ completion_group = parser.add_mutually_exclusive_group()
28798
+ completion_group.add_argument("--completed", action="store_true", help="Mark feature as completed")
28799
+ completion_group.add_argument("--incomplete", action="store_true", help="Mark feature as not completed")
27525
28800
 
27526
28801
  s_home = sub.add_parser(
27527
28802
  "home",
@@ -27818,6 +29093,8 @@ def build_parser() -> argparse.ArgumentParser:
27818
29093
  "Examples:\n"
27819
29094
  " orp agents root show\n"
27820
29095
  " orp agents root set /absolute/path/to/projects\n"
29096
+ " orp agents codex audit\n"
29097
+ " orp agents codex sync\n"
27821
29098
  " orp agents sync\n"
27822
29099
  " orp agents sync --role umbrella --root /absolute/path/to/projects\n"
27823
29100
  " orp agents sync --projects-root /absolute/path/to/projects\n"
@@ -27881,6 +29158,41 @@ def build_parser() -> argparse.ArgumentParser:
27881
29158
  add_json_flag(s_agents_audit)
27882
29159
  s_agents_audit.set_defaults(func=cmd_agents_audit, json_output=False)
27883
29160
 
29161
+ s_agents_codex = agents_sub.add_parser(
29162
+ "codex",
29163
+ help="Audit or sync Codex global AGENTS.md and SessionStart hook scaffolding",
29164
+ )
29165
+ agents_codex_sub = s_agents_codex.add_subparsers(dest="agents_codex_cmd", required=True)
29166
+
29167
+ s_agents_codex_audit = agents_codex_sub.add_parser(
29168
+ "audit",
29169
+ help="Audit Codex global AGENTS.md, hooks.json, hook script, and codex_hooks feature flag",
29170
+ )
29171
+ s_agents_codex_audit.add_argument(
29172
+ "--codex-home",
29173
+ default="",
29174
+ help="Codex home directory (default: CODEX_HOME or ~/.codex)",
29175
+ )
29176
+ add_json_flag(s_agents_codex_audit)
29177
+ s_agents_codex_audit.set_defaults(func=cmd_agents_codex_audit, json_output=False)
29178
+
29179
+ s_agents_codex_sync = agents_codex_sub.add_parser(
29180
+ "sync",
29181
+ help="Create or refresh Codex global AGENTS.md and non-mutating SessionStart hook scaffolding",
29182
+ )
29183
+ s_agents_codex_sync.add_argument(
29184
+ "--codex-home",
29185
+ default="",
29186
+ help="Codex home directory (default: CODEX_HOME or ~/.codex)",
29187
+ )
29188
+ s_agents_codex_sync.add_argument(
29189
+ "--dry-run",
29190
+ action="store_true",
29191
+ help="Plan the Codex global sync without writing files",
29192
+ )
29193
+ add_json_flag(s_agents_codex_sync)
29194
+ s_agents_codex_sync.set_defaults(func=cmd_agents_codex_sync, json_output=False)
29195
+
27884
29196
  s_opportunities = sub.add_parser(
27885
29197
  "opportunities",
27886
29198
  help="Local-first opportunity boards for contests, programs, grants, and similar openings",
@@ -30023,6 +31335,43 @@ def build_parser() -> argparse.ArgumentParser:
30023
31335
  add_json_flag(s_frontier_set_live)
30024
31336
  s_frontier_set_live.set_defaults(func=cmd_frontier_set_live, json_output=False)
30025
31337
 
31338
+ s_frontier_modeled_checkoff = frontier_sub.add_parser(
31339
+ "modeled-checkoff",
31340
+ help="Activate a modeled packet-check continuation for review/checkoff gates",
31341
+ )
31342
+ frontier_modeled_checkoff_sub = s_frontier_modeled_checkoff.add_subparsers(dest="frontier_modeled_checkoff_cmd", required=True)
31343
+
31344
+ s_frontier_modeled_checkoff_activate = frontier_modeled_checkoff_sub.add_parser(
31345
+ "activate",
31346
+ help="Create and activate the modeled-checkoff phase and additional item",
31347
+ )
31348
+ s_frontier_modeled_checkoff_activate.add_argument("--version", default="", help="Version id to use (default: current active version)")
31349
+ s_frontier_modeled_checkoff_activate.add_argument("--milestone", default="", help="Milestone id to use (default: current active milestone)")
31350
+ s_frontier_modeled_checkoff_activate.add_argument(
31351
+ "--phase-id",
31352
+ default=FRONTIER_MODELED_CHECKOFF_PHASE_ID,
31353
+ help="Modeled-checkoff phase id",
31354
+ )
31355
+ s_frontier_modeled_checkoff_activate.add_argument(
31356
+ "--list-id",
31357
+ default=FRONTIER_MODELED_CHECKOFF_LIST_ID,
31358
+ help="Additional item list id",
31359
+ )
31360
+ s_frontier_modeled_checkoff_activate.add_argument(
31361
+ "--item-id",
31362
+ default=FRONTIER_MODELED_CHECKOFF_ITEM_ID,
31363
+ help="Additional item id",
31364
+ )
31365
+ s_frontier_modeled_checkoff_activate.add_argument(
31366
+ "--band",
31367
+ default="",
31368
+ choices=["", *FRONTIER_BANDS],
31369
+ help="Optional explicit planning band",
31370
+ )
31371
+ s_frontier_modeled_checkoff_activate.add_argument("--next-action", default="", help="Optional live next action override")
31372
+ add_json_flag(s_frontier_modeled_checkoff_activate)
31373
+ s_frontier_modeled_checkoff_activate.set_defaults(func=cmd_frontier_modeled_checkoff_activate, json_output=False)
31374
+
30026
31375
  s_frontier_render = frontier_sub.add_parser(
30027
31376
  "render",
30028
31377
  help="Refresh the materialized frontier JSON and markdown views",
@@ -30030,6 +31379,26 @@ def build_parser() -> argparse.ArgumentParser:
30030
31379
  add_json_flag(s_frontier_render)
30031
31380
  s_frontier_render.set_defaults(func=cmd_frontier_render, json_output=False)
30032
31381
 
31382
+ s_frontier_sync_idea = frontier_sub.add_parser(
31383
+ "sync-idea",
31384
+ help="Sync local frontier plan and phase tasks to a hosted ORP idea/features",
31385
+ )
31386
+ s_frontier_sync_idea.add_argument("--idea-id", default="", help="Hosted idea id; defaults to the local project link")
31387
+ s_frontier_sync_idea.add_argument(
31388
+ "--create-idea",
31389
+ action="store_true",
31390
+ help="Create a hosted idea when the project is not already linked",
31391
+ )
31392
+ s_frontier_sync_idea.add_argument(
31393
+ "--no-link-project",
31394
+ action="store_true",
31395
+ help="Do not update the local project link with the synced idea/feature ids",
31396
+ )
31397
+ s_frontier_sync_idea.add_argument("--dry-run", action="store_true", help="Preview hosted changes without writing")
31398
+ add_base_url_flag(s_frontier_sync_idea)
31399
+ add_json_flag(s_frontier_sync_idea)
31400
+ s_frontier_sync_idea.set_defaults(func=cmd_frontier_sync_idea, json_output=False)
31401
+
30033
31402
  s_frontier_doctor = frontier_sub.add_parser(
30034
31403
  "doctor",
30035
31404
  help="Validate frontier consistency and optionally re-render views",