open-research-protocol 0.4.32 → 0.4.33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/CHANGELOG.md +22 -0
  2. package/cli/orp.py +951 -1
  3. package/package.json +1 -1
package/CHANGELOG.md CHANGED
@@ -6,6 +6,28 @@ There was no prior in-repo changelog file, so the first formal entry starts
6
6
  with the currently shipped `v0.4.4` release and summarizes the full release
7
7
  delta reflected in this repo.
8
8
 
9
+ ## v0.4.33 - 2026-04-30
10
+
11
+ This release teaches ORP to manage Codex's global instruction and startup hook
12
+ layer, and adds a modeled-checkoff frontier continuation for review-gated work.
13
+
14
+ ### Added
15
+
16
+ - Added `orp agents codex audit` and `orp agents codex sync` to audit or
17
+ bootstrap Codex global `AGENTS.md`, `hooks.json`, the non-mutating
18
+ `SessionStart` hook script, and the `codex_hooks` feature flag.
19
+ - Added frontier modeled-checkoff detection and
20
+ `orp frontier modeled-checkoff activate` so review/checkoff blockers can be
21
+ converted into explicitly modeled packet-improvement work without claiming
22
+ real human, clinical, legal, regulatory, privacy, publication, or expert gate
23
+ clearance.
24
+
25
+ ### Changed
26
+
27
+ - Frontier delegation preflight now reports modeled-checkoff eligibility when a
28
+ live frontier appears blocked on review or sign-off, while preserving existing
29
+ active additional work as the safer continuation when it exists.
30
+
9
31
  ## v0.4.32 - 2026-04-25
10
32
 
11
33
  This release clarifies ORP's paid-work boundary so budgeted OpenAI research is
package/cli/orp.py CHANGED
@@ -138,6 +138,40 @@ FRONTIER_BANDS = ("exact", "structured", "horizon")
138
138
  FRONTIER_ACTIVE_STATUSES = {"active", "in_progress", "running"}
139
139
  FRONTIER_PENDING_STATUSES = {"", "pending", "planned", "ready"}
140
140
  FRONTIER_TERMINAL_STATUSES = {"complete", "completed", "done", "skipped", "terminal"}
141
+ FRONTIER_MODELED_CHECKOFF_PHASE_ID = "modeled-checkoff-v0"
142
+ FRONTIER_MODELED_CHECKOFF_LIST_ID = "modeled-checkoff"
143
+ FRONTIER_MODELED_CHECKOFF_ITEM_ID = "modeled-professional-lens-taxonomy-v0"
144
+ FRONTIER_MODELED_CHECKOFF_HINTS = (
145
+ "approval",
146
+ "check-off",
147
+ "checkoff",
148
+ "clinical",
149
+ "domain expert",
150
+ "expert",
151
+ "gate",
152
+ "governance",
153
+ "human",
154
+ "legal",
155
+ "packet check",
156
+ "packet-check",
157
+ "privacy",
158
+ "publication",
159
+ "regulatory",
160
+ "review",
161
+ "safety",
162
+ "sign-off",
163
+ "signoff",
164
+ "validator",
165
+ )
166
+ FRONTIER_MODELED_CHECKOFF_ACTIVE_HINTS = (
167
+ "continuous-phase-delegation",
168
+ "frontier-continuation-governor",
169
+ "modeled-checkoff",
170
+ "modeled-expert",
171
+ "modeled-professional-lens",
172
+ "packet-checker",
173
+ "phase-delegation-governor",
174
+ )
141
175
  YOUTUBE_SOURCE_SCHEMA_VERSION = "1.0.0"
142
176
  EXCHANGE_REPORT_SCHEMA_VERSION = "1.0.0"
143
177
  RESEARCH_RUN_SCHEMA_VERSION = "1.0.0"
@@ -170,6 +204,9 @@ AGENDA_DEFAULT_REFRESH_TIMES = {
170
204
  }
171
205
  AGENT_GUIDE_BEGIN = "<!-- ORP:AGENT_GUIDE:BEGIN -->"
172
206
  AGENT_GUIDE_END = "<!-- ORP:AGENT_GUIDE:END -->"
207
+ CODEX_GLOBAL_GUIDE_BEGIN = "<!-- ORP:CODEX_GLOBAL:BEGIN -->"
208
+ CODEX_GLOBAL_GUIDE_END = "<!-- ORP:CODEX_GLOBAL:END -->"
209
+ CODEX_SESSION_START_HOOK = "orp_codex_session_start.py"
173
210
  ORP_SNIPPET_BEGIN = "<!-- ORP:BEGIN -->"
174
211
  ORP_SNIPPET_END = "<!-- ORP:END -->"
175
212
  AGENT_GUIDE_FILENAMES = ("AGENTS.md", "CLAUDE.md")
@@ -6269,6 +6306,78 @@ def _frontier_terminal_declared(state: dict[str, Any] | None, stack: dict[str, A
6269
6306
  return completion_status in {"complete", "completed", "done", "terminal"}
6270
6307
 
6271
6308
 
6309
+ def _frontier_normalized_signal_text(values: Sequence[Any]) -> str:
6310
+ text = " ".join(str(value or "") for value in values).lower()
6311
+ return re.sub(r"[^a-z0-9]+", "-", text).strip("-")
6312
+
6313
+
6314
+ def _frontier_modeled_checkoff_needed(blockers: list[str], next_action: str) -> bool:
6315
+ raw_values = [*blockers, next_action]
6316
+ normalized = _frontier_normalized_signal_text(raw_values)
6317
+ tokens = {token for token in normalized.split("-") if token}
6318
+ for hint in FRONTIER_MODELED_CHECKOFF_HINTS:
6319
+ normalized_hint = _frontier_normalized_signal_text([hint])
6320
+ if not normalized_hint:
6321
+ continue
6322
+ if "-" in normalized_hint and normalized_hint in normalized:
6323
+ return True
6324
+ if "-" not in normalized_hint and normalized_hint in tokens:
6325
+ return True
6326
+ return False
6327
+
6328
+
6329
+ def _frontier_modeled_checkoff_active(
6330
+ *,
6331
+ active_primary_id: str,
6332
+ active_list_id: str,
6333
+ active_item_id: str,
6334
+ next_action: str,
6335
+ ) -> bool:
6336
+ normalized = _frontier_normalized_signal_text([active_primary_id, active_list_id, active_item_id, next_action])
6337
+ return any(hint in normalized for hint in FRONTIER_MODELED_CHECKOFF_ACTIVE_HINTS)
6338
+
6339
+
6340
+ def _frontier_modeled_checkoff_payload(
6341
+ *,
6342
+ blockers: list[str],
6343
+ next_action: str,
6344
+ active_primary_kind: str,
6345
+ active_primary_id: str,
6346
+ active_list_id: str,
6347
+ active_item_id: str,
6348
+ ) -> dict[str, Any]:
6349
+ eligible = _frontier_modeled_checkoff_needed(blockers, next_action)
6350
+ active = _frontier_modeled_checkoff_active(
6351
+ active_primary_id=active_primary_id,
6352
+ active_list_id=active_list_id,
6353
+ active_item_id=active_item_id,
6354
+ next_action=next_action,
6355
+ )
6356
+ return {
6357
+ "eligible": eligible,
6358
+ "active": active,
6359
+ "phase_id": FRONTIER_MODELED_CHECKOFF_PHASE_ID,
6360
+ "list_id": FRONTIER_MODELED_CHECKOFF_LIST_ID,
6361
+ "item_id": FRONTIER_MODELED_CHECKOFF_ITEM_ID,
6362
+ "label": "Modeled Checkoff v0",
6363
+ "active_primary_kind": active_primary_kind,
6364
+ "active_primary_id": active_primary_id,
6365
+ "active_list_id": active_list_id,
6366
+ "active_item_id": active_item_id,
6367
+ "detected_blockers": blockers,
6368
+ "boundary": (
6369
+ "Modeled/proxy pre-review only; this does not clear human, clinical, legal, "
6370
+ "regulatory, privacy, publication, or real-world expert gates."
6371
+ ),
6372
+ "professional_lens_requirements": [
6373
+ "Name the modeled professional role and decision language.",
6374
+ "State evidence expectations, uncertainty, stop conditions, and provenance requirements.",
6375
+ "Separate packet-improvement decisions from external gate clearance.",
6376
+ ],
6377
+ "suggested_next_command": "orp frontier modeled-checkoff activate --json" if eligible and not active else "",
6378
+ }
6379
+
6380
+
6272
6381
  def _frontier_build_continuation_payload(
6273
6382
  repo_root: Path,
6274
6383
  stack: dict[str, Any] | None,
@@ -6404,6 +6513,33 @@ def _frontier_build_continuation_payload(
6404
6513
  if not next_action:
6405
6514
  next_action = _frontier_additional_item_summary(active_list, active_item)
6406
6515
 
6516
+ modeled_checkoff = _frontier_modeled_checkoff_payload(
6517
+ blockers=blockers,
6518
+ next_action=next_action,
6519
+ active_primary_kind=active_primary_kind,
6520
+ active_primary_id=active_primary_id,
6521
+ active_list_id=active_list_id,
6522
+ active_item_id=active_item_id,
6523
+ )
6524
+ has_active_additional_work = int(additional_summary["active_items"]) > 0
6525
+ if modeled_checkoff["eligible"] and not modeled_checkoff["active"] and not has_active_additional_work:
6526
+ issues.append(
6527
+ {
6528
+ "severity": "warning",
6529
+ "code": "modeled_checkoff_continuation_available",
6530
+ "message": (
6531
+ "frontier appears to be at a review/checkoff gate; activate a modeled-checkoff "
6532
+ "continuation so delegation can improve the packet without clearing the external gate."
6533
+ ),
6534
+ }
6535
+ )
6536
+ suggested_next_command = modeled_checkoff["suggested_next_command"]
6537
+ if not next_action:
6538
+ next_action = (
6539
+ f"Activate modeled checkoff continuation {modeled_checkoff['list_id']}/"
6540
+ f"{modeled_checkoff['item_id']}."
6541
+ )
6542
+
6407
6543
  if not active_item_id and int(additional_summary["pending_items"]) > 0:
6408
6544
  issues.append(
6409
6545
  {
@@ -6412,7 +6548,8 @@ def _frontier_build_continuation_payload(
6412
6548
  "message": "frontier additional queue has pending work but no active item; run `orp frontier additional activate-next` before delegating queue work.",
6413
6549
  }
6414
6550
  )
6415
- suggested_next_command = "orp frontier additional activate-next --json"
6551
+ if not suggested_next_command:
6552
+ suggested_next_command = "orp frontier additional activate-next --json"
6416
6553
  next_pending = additional_summary.get("next_pending")
6417
6554
  if isinstance(next_pending, dict) and not next_action:
6418
6555
  next_action = (
@@ -6452,6 +6589,7 @@ def _frontier_build_continuation_payload(
6452
6589
  "strict": strict,
6453
6590
  "issues": issues,
6454
6591
  "summary": summary,
6592
+ "modeled_checkoff": modeled_checkoff,
6455
6593
  "next_action": next_action,
6456
6594
  "suggested_next_command": suggested_next_command,
6457
6595
  "paths": {key: _path_for_state(value, repo_root) for key, value in paths.items()},
@@ -6838,6 +6976,7 @@ def _frontier_doctor_payload(repo_root: Path) -> dict[str, Any]:
6838
6976
  "continuation": {
6839
6977
  "ok": continuation["ok"],
6840
6978
  "summary": continuation["summary"],
6979
+ "modeled_checkoff": continuation.get("modeled_checkoff", {}),
6841
6980
  "next_action": continuation["next_action"],
6842
6981
  "suggested_next_command": continuation["suggested_next_command"],
6843
6982
  },
@@ -11267,6 +11406,494 @@ def _audit_agents_root(
11267
11406
  }
11268
11407
 
11269
11408
 
11409
+ def _resolve_codex_home_path(raw: str = "") -> Path:
11410
+ explicit = str(raw or "").strip()
11411
+ if explicit:
11412
+ return Path(explicit).expanduser().resolve()
11413
+ env_home = str(os.environ.get("CODEX_HOME", "")).strip()
11414
+ if env_home:
11415
+ return Path(env_home).expanduser().resolve()
11416
+ return (Path.home() / ".codex").resolve()
11417
+
11418
+
11419
+ def _codex_session_hook_path(codex_home: Path) -> Path:
11420
+ return codex_home / "hooks" / CODEX_SESSION_START_HOOK
11421
+
11422
+
11423
+ def _codex_global_guide_block() -> str:
11424
+ return (
11425
+ f"{CODEX_GLOBAL_GUIDE_BEGIN}\n"
11426
+ "## ORP Codex Global Layer\n\n"
11427
+ "- Scope: This block is managed by ORP for Codex's global `~/.codex/AGENTS.md` layer.\n"
11428
+ "- Instruction hierarchy: Codex global guidance comes first, then repo/project `AGENTS.md` files, then narrower directory guidance.\n"
11429
+ "- Keep universal preferences here; keep repo build/test commands and project-specific facts in the nearest project `AGENTS.md`.\n"
11430
+ "- Treat ORP as process-only governance. Evidence and project truth still live in canonical artifacts such as code, data, logs, papers, proofs, and repo docs.\n"
11431
+ "- Prefer non-mutating startup checks. Ask before creating or rewriting project `AGENTS.md`, ORP files, Clawdad registration, remotes, or service config.\n"
11432
+ "- Use `orp hygiene --json` before long delegation, after material writeback, before remote side effects or unbudgeted paid compute, and when dirty state grows unexpectedly.\n"
11433
+ "- Local stack surfaces to detect and mention when relevant: `orp`, `clawdad`, `dumpy-files`, `cmail`, and repo-local `AGENTS.md` / `PROTOCOL.md` / `.clawdad` files.\n"
11434
+ "- Startup automation belongs in Codex hooks. Keep this file as policy and preference, not a place for live shell checks.\n\n"
11435
+ "### ORP-Owned Startup Rhythm\n\n"
11436
+ "1. Let the Codex `SessionStart` hook inspect the current directory and inject a short status note.\n"
11437
+ "2. Read the nearest project instructions before editing.\n"
11438
+ "3. If ORP or Clawdad are missing from a substantive project, suggest `orp init --projects-root <root> --with-clawdad` or `orp agents sync`; do not run it silently.\n"
11439
+ "4. Keep user-written preferences outside this managed block so ORP can refresh the block without overwriting them.\n"
11440
+ f"{CODEX_GLOBAL_GUIDE_END}\n"
11441
+ )
11442
+
11443
+
11444
+ def _default_codex_global_agents_text() -> str:
11445
+ return (
11446
+ "# Codex Global Instructions\n\n"
11447
+ "This file is loaded by Codex before project-level `AGENTS.md` files. Keep durable user preferences here, and let ORP refresh only the marked block below.\n\n"
11448
+ f"{_codex_global_guide_block()}"
11449
+ )
11450
+
11451
+
11452
+ def _codex_session_start_hook_script() -> str:
11453
+ return r'''#!/usr/bin/env python3
11454
+ """ORP-managed Codex SessionStart hook.
11455
+
11456
+ This hook is intentionally non-mutating. It inspects the current working
11457
+ directory and injects a compact startup note into Codex context.
11458
+ """
11459
+
11460
+ from __future__ import annotations
11461
+
11462
+ import json
11463
+ import os
11464
+ from pathlib import Path
11465
+ import shutil
11466
+ import subprocess
11467
+ import sys
11468
+
11469
+
11470
+ def _read_hook_input() -> dict:
11471
+ try:
11472
+ payload = json.load(sys.stdin)
11473
+ except Exception:
11474
+ return {}
11475
+ return payload if isinstance(payload, dict) else {}
11476
+
11477
+
11478
+ def _run_git_root(cwd: Path) -> str:
11479
+ try:
11480
+ proc = subprocess.run(
11481
+ ["git", "-C", str(cwd), "rev-parse", "--show-toplevel"],
11482
+ capture_output=True,
11483
+ text=True,
11484
+ timeout=2,
11485
+ )
11486
+ except Exception:
11487
+ return ""
11488
+ if proc.returncode != 0:
11489
+ return ""
11490
+ return proc.stdout.strip()
11491
+
11492
+
11493
+ def _is_within(child: Path, parent: Path) -> bool:
11494
+ try:
11495
+ child.resolve().relative_to(parent.resolve())
11496
+ return True
11497
+ except Exception:
11498
+ return False
11499
+
11500
+
11501
+ def _tool_status(name: str) -> str:
11502
+ return shutil.which(name) or "missing"
11503
+
11504
+
11505
+ def _bool_text(value: bool) -> str:
11506
+ return "present" if value else "missing"
11507
+
11508
+
11509
+ def main() -> int:
11510
+ payload = _read_hook_input()
11511
+ cwd = Path(str(payload.get("cwd") or os.getcwd())).expanduser()
11512
+ source = str(payload.get("source") or "").strip() or "startup"
11513
+ git_root_text = _run_git_root(cwd)
11514
+ root = Path(git_root_text).expanduser() if git_root_text else cwd
11515
+ codex_home = Path(os.environ.get("CODEX_HOME") or Path.home() / ".codex").expanduser()
11516
+
11517
+ global_agents = codex_home / "AGENTS.md"
11518
+ project_agents = root / "AGENTS.md"
11519
+ protocol = root / "PROTOCOL.md"
11520
+ orp_config = root / "orp.yml"
11521
+ clawdad_dir = root / ".clawdad"
11522
+ code_root = Path("/Volumes/Code_2TB/code")
11523
+ parent_agents = code_root / "AGENTS.md" if _is_within(root, code_root) and root != code_root else None
11524
+
11525
+ tools = {
11526
+ "orp": _tool_status("orp"),
11527
+ "clawdad": _tool_status("clawdad"),
11528
+ "dumpy-files": _tool_status("dumpy-files"),
11529
+ "cmail": _tool_status("cmail"),
11530
+ }
11531
+ missing_tools = [name for name, value in tools.items() if value == "missing"]
11532
+
11533
+ lines = [
11534
+ "ORP/Codex startup context:",
11535
+ f"- source: {source}",
11536
+ f"- cwd: {cwd}",
11537
+ f"- project root: {root}",
11538
+ f"- global Codex AGENTS.md: {_bool_text(global_agents.exists())}",
11539
+ f"- project AGENTS.md: {_bool_text(project_agents.exists())}",
11540
+ f"- ORP signals: PROTOCOL.md={_bool_text(protocol.exists())}, orp.yml={_bool_text(orp_config.exists())}",
11541
+ f"- Clawdad project state: {_bool_text(clawdad_dir.exists())}",
11542
+ ]
11543
+ if parent_agents is not None:
11544
+ lines.append(f"- Code_2TB umbrella AGENTS.md: {_bool_text(parent_agents.exists())} at {parent_agents}")
11545
+ if missing_tools:
11546
+ lines.append(f"- missing optional stack tools: {', '.join(missing_tools)}")
11547
+ if not project_agents.exists():
11548
+ lines.append("- note: ask before creating project AGENTS.md; suggested commands include `orp agents sync` or `orp init --projects-root <root>`.")
11549
+ if not protocol.exists() and not orp_config.exists():
11550
+ lines.append("- note: no ORP project signals detected; ask before initializing ORP for this directory.")
11551
+
11552
+ print(
11553
+ json.dumps(
11554
+ {
11555
+ "hookSpecificOutput": {
11556
+ "hookEventName": "SessionStart",
11557
+ "additionalContext": "\n".join(lines),
11558
+ }
11559
+ }
11560
+ )
11561
+ )
11562
+ return 0
11563
+
11564
+
11565
+ if __name__ == "__main__":
11566
+ raise SystemExit(main())
11567
+ '''
11568
+
11569
+
11570
+ def _codex_config_hooks_enabled(text: str) -> bool:
11571
+ in_features = False
11572
+ for raw_line in str(text or "").splitlines():
11573
+ stripped = raw_line.strip()
11574
+ section_match = re.match(r"^\[([^\]]+)\]\s*$", stripped)
11575
+ if section_match:
11576
+ in_features = section_match.group(1).strip() == "features"
11577
+ continue
11578
+ if not in_features or stripped.startswith("#"):
11579
+ continue
11580
+ if re.match(r"^codex_hooks\s*=\s*true(?:\s*(?:#.*)?)?$", stripped, flags=re.IGNORECASE):
11581
+ return True
11582
+ return False
11583
+
11584
+
11585
+ def _codex_config_enable_hooks_text(text: str) -> tuple[str, str]:
11586
+ original = str(text or "")
11587
+ if _codex_config_hooks_enabled(original):
11588
+ return (original if original.endswith("\n") or not original else original + "\n"), "kept"
11589
+
11590
+ lines = original.splitlines()
11591
+ feature_start = -1
11592
+ feature_end = len(lines)
11593
+ for index, line in enumerate(lines):
11594
+ section_match = re.match(r"^\s*\[([^\]]+)\]\s*$", line)
11595
+ if not section_match:
11596
+ continue
11597
+ if section_match.group(1).strip() == "features":
11598
+ feature_start = index
11599
+ feature_end = len(lines)
11600
+ for next_index in range(index + 1, len(lines)):
11601
+ if re.match(r"^\s*\[[^\]]+\]\s*$", lines[next_index]):
11602
+ feature_end = next_index
11603
+ break
11604
+ break
11605
+
11606
+ if feature_start >= 0:
11607
+ for index in range(feature_start + 1, feature_end):
11608
+ if re.match(r"^\s*codex_hooks\s*=", lines[index]):
11609
+ lines[index] = "codex_hooks = true"
11610
+ return "\n".join(lines).rstrip() + "\n", "updated"
11611
+ lines.insert(feature_end, "codex_hooks = true")
11612
+ return "\n".join(lines).rstrip() + "\n", "updated"
11613
+
11614
+ if lines and lines[-1].strip():
11615
+ lines.append("")
11616
+ lines.extend(["[features]", "codex_hooks = true"])
11617
+ return "\n".join(lines).rstrip() + "\n", "updated"
11618
+
11619
+
11620
+ def _codex_desired_session_hook(codex_home: Path) -> dict[str, Any]:
11621
+ script = _codex_session_hook_path(codex_home)
11622
+ return {
11623
+ "matcher": "startup|resume|clear",
11624
+ "hooks": [
11625
+ {
11626
+ "type": "command",
11627
+ "command": f"/usr/bin/python3 {shlex.quote(str(script))}",
11628
+ "timeout": 10,
11629
+ "statusMessage": "Checking ORP/Codex startup context",
11630
+ }
11631
+ ],
11632
+ }
11633
+
11634
+
11635
+ def _codex_hooks_payload_has_session_hook(payload: dict[str, Any], codex_home: Path) -> bool:
11636
+ desired_command = str(_codex_desired_session_hook(codex_home)["hooks"][0]["command"])
11637
+ hooks = payload.get("hooks")
11638
+ if not isinstance(hooks, dict):
11639
+ return False
11640
+ groups = hooks.get("SessionStart")
11641
+ if not isinstance(groups, list):
11642
+ return False
11643
+ for group in groups:
11644
+ if not isinstance(group, dict):
11645
+ continue
11646
+ handlers = group.get("hooks")
11647
+ if not isinstance(handlers, list):
11648
+ continue
11649
+ for handler in handlers:
11650
+ if isinstance(handler, dict) and str(handler.get("command", "")).strip() == desired_command:
11651
+ return True
11652
+ return False
11653
+
11654
+
11655
+ def _codex_load_hooks_for_edit(path: Path) -> tuple[dict[str, Any], str]:
11656
+ if not path.exists():
11657
+ return {"hooks": {}}, "missing"
11658
+ try:
11659
+ payload = json.loads(path.read_text(encoding="utf-8"))
11660
+ except Exception:
11661
+ return {}, "invalid_json"
11662
+ if not isinstance(payload, dict):
11663
+ return {}, "invalid_json"
11664
+ return payload, "loaded"
11665
+
11666
+
11667
+ def _codex_merge_hooks_payload(payload: dict[str, Any], codex_home: Path) -> tuple[dict[str, Any], str]:
11668
+ if _codex_hooks_payload_has_session_hook(payload, codex_home):
11669
+ return payload, "kept"
11670
+ updated = copy.deepcopy(payload)
11671
+ hooks = updated.get("hooks")
11672
+ if not isinstance(hooks, dict):
11673
+ hooks = {}
11674
+ updated["hooks"] = hooks
11675
+ groups = hooks.get("SessionStart")
11676
+ if not isinstance(groups, list):
11677
+ groups = []
11678
+ hooks["SessionStart"] = groups
11679
+ groups.append(_codex_desired_session_hook(codex_home))
11680
+ return updated, "updated"
11681
+
11682
+
11683
+ def _codex_audit_payload(codex_home: Path) -> dict[str, Any]:
11684
+ codex_home = codex_home.expanduser().resolve()
11685
+ agents_path = codex_home / "AGENTS.md"
11686
+ config_path = codex_home / "config.toml"
11687
+ hooks_path = codex_home / "hooks.json"
11688
+ hook_script_path = _codex_session_hook_path(codex_home)
11689
+
11690
+ agents_text = agents_path.read_text(encoding="utf-8") if agents_path.exists() else ""
11691
+ guide_block = _extract_marked_block(agents_text, CODEX_GLOBAL_GUIDE_BEGIN, CODEX_GLOBAL_GUIDE_END)
11692
+ expected_guide = _codex_global_guide_block()
11693
+ config_text = config_path.read_text(encoding="utf-8") if config_path.exists() else ""
11694
+ hooks_payload = _read_json_if_exists(hooks_path)
11695
+ script_text = hook_script_path.read_text(encoding="utf-8") if hook_script_path.exists() else ""
11696
+
11697
+ checks = {
11698
+ "codex_home": {
11699
+ "path": str(codex_home),
11700
+ "exists": codex_home.exists(),
11701
+ "status": "ok" if codex_home.exists() else "missing",
11702
+ },
11703
+ "global_agents": {
11704
+ "path": str(agents_path),
11705
+ "exists": agents_path.exists(),
11706
+ "managed_block_present": bool(guide_block),
11707
+ "managed_block_synced": guide_block == expected_guide,
11708
+ "status": "ok" if agents_path.exists() and guide_block == expected_guide else "needs_sync",
11709
+ },
11710
+ "config": {
11711
+ "path": str(config_path),
11712
+ "exists": config_path.exists(),
11713
+ "codex_hooks_enabled": _codex_config_hooks_enabled(config_text),
11714
+ "status": "ok" if config_path.exists() and _codex_config_hooks_enabled(config_text) else "needs_sync",
11715
+ },
11716
+ "hooks_json": {
11717
+ "path": str(hooks_path),
11718
+ "exists": hooks_path.exists(),
11719
+ "session_start_hook_present": _codex_hooks_payload_has_session_hook(hooks_payload, codex_home),
11720
+ "status": "ok" if hooks_path.exists() and _codex_hooks_payload_has_session_hook(hooks_payload, codex_home) else "needs_sync",
11721
+ },
11722
+ "session_start_script": {
11723
+ "path": str(hook_script_path),
11724
+ "exists": hook_script_path.exists(),
11725
+ "synced": script_text == _codex_session_start_hook_script(),
11726
+ "status": "ok" if hook_script_path.exists() and script_text == _codex_session_start_hook_script() else "needs_sync",
11727
+ },
11728
+ }
11729
+ stack_tools = {
11730
+ name: {
11731
+ "path": _tool_path(name),
11732
+ "available": bool(_tool_path(name)),
11733
+ "required": name == "orp",
11734
+ }
11735
+ for name in ("orp", "clawdad", "dumpy-files", "cmail")
11736
+ }
11737
+ ok = all(row.get("status") == "ok" for row in checks.values())
11738
+ next_actions = [] if ok else [f"orp agents codex sync --codex-home {shlex.quote(str(codex_home))} --json"]
11739
+ return {
11740
+ "ok": ok,
11741
+ "schema_version": "1.0.0",
11742
+ "kind": "orp_codex_global_audit",
11743
+ "codex_home": str(codex_home),
11744
+ "checks": checks,
11745
+ "stack_tools": stack_tools,
11746
+ "next_actions": next_actions,
11747
+ }
11748
+
11749
+
11750
+ def _codex_sync_payload(codex_home: Path, *, dry_run: bool = False) -> dict[str, Any]:
11751
+ codex_home = codex_home.expanduser().resolve()
11752
+ agents_path = codex_home / "AGENTS.md"
11753
+ config_path = codex_home / "config.toml"
11754
+ hooks_path = codex_home / "hooks.json"
11755
+ hook_script_path = _codex_session_hook_path(codex_home)
11756
+ actions: list[dict[str, Any]] = []
11757
+
11758
+ if not dry_run:
11759
+ codex_home.mkdir(parents=True, exist_ok=True)
11760
+
11761
+ expected_guide = _codex_global_guide_block()
11762
+ if agents_path.exists():
11763
+ agents_text = agents_path.read_text(encoding="utf-8")
11764
+ updated_agents, guide_action = _upsert_marked_block(
11765
+ agents_text,
11766
+ begin_marker=CODEX_GLOBAL_GUIDE_BEGIN,
11767
+ end_marker=CODEX_GLOBAL_GUIDE_END,
11768
+ block_text=expected_guide,
11769
+ )
11770
+ if updated_agents != agents_text and not dry_run:
11771
+ _write_text(agents_path, updated_agents if updated_agents.endswith("\n") else updated_agents + "\n")
11772
+ action = "kept" if updated_agents == agents_text else ("would_update" if dry_run else "updated")
11773
+ else:
11774
+ updated_agents = _default_codex_global_agents_text()
11775
+ guide_action = "created"
11776
+ if not dry_run:
11777
+ _write_text(agents_path, updated_agents)
11778
+ action = "would_create" if dry_run else "created"
11779
+ actions.append({"path": str(agents_path), "action": action, "guide_action": guide_action})
11780
+
11781
+ config_existed = config_path.exists()
11782
+ config_text = config_path.read_text(encoding="utf-8") if config_existed else ""
11783
+ updated_config, config_action = _codex_config_enable_hooks_text(config_text)
11784
+ if updated_config != config_text and not dry_run:
11785
+ _write_text(config_path, updated_config)
11786
+ actions.append(
11787
+ {
11788
+ "path": str(config_path),
11789
+ "action": (
11790
+ "kept"
11791
+ if updated_config == config_text
11792
+ else ("would_create" if dry_run and not config_existed else "would_update" if dry_run else "created" if not config_existed else "updated")
11793
+ ),
11794
+ "codex_hooks_action": config_action,
11795
+ }
11796
+ )
11797
+
11798
+ hooks_existed = hooks_path.exists()
11799
+ hooks_payload, hooks_load_status = _codex_load_hooks_for_edit(hooks_path)
11800
+ if hooks_load_status == "invalid_json":
11801
+ actions.append({"path": str(hooks_path), "action": "blocked_invalid_json"})
11802
+ audit = _codex_audit_payload(codex_home) if not dry_run else {}
11803
+ return {
11804
+ "ok": False,
11805
+ "schema_version": "1.0.0",
11806
+ "kind": "orp_codex_global_sync",
11807
+ "codex_home": str(codex_home),
11808
+ "dry_run": dry_run,
11809
+ "actions": actions,
11810
+ "audit": audit,
11811
+ "warnings": ["hooks.json exists but is not a JSON object; ORP did not overwrite it."],
11812
+ }
11813
+ updated_hooks, hooks_action = _codex_merge_hooks_payload(hooks_payload, codex_home)
11814
+ if updated_hooks != hooks_payload and not dry_run:
11815
+ _write_json(hooks_path, updated_hooks)
11816
+ actions.append(
11817
+ {
11818
+ "path": str(hooks_path),
11819
+ "action": (
11820
+ "kept"
11821
+ if updated_hooks == hooks_payload
11822
+ else ("would_create" if dry_run and not hooks_existed else "would_update" if dry_run else "created" if not hooks_existed else "updated")
11823
+ ),
11824
+ "session_start_hook_action": hooks_action,
11825
+ }
11826
+ )
11827
+
11828
+ script_text = _codex_session_start_hook_script()
11829
+ script_existed = hook_script_path.exists()
11830
+ existing_script = hook_script_path.read_text(encoding="utf-8") if script_existed else ""
11831
+ if existing_script != script_text and not dry_run:
11832
+ _write_text(hook_script_path, script_text)
11833
+ try:
11834
+ hook_script_path.chmod(0o755)
11835
+ except Exception:
11836
+ pass
11837
+ actions.append(
11838
+ {
11839
+ "path": str(hook_script_path),
11840
+ "action": (
11841
+ "kept"
11842
+ if existing_script == script_text
11843
+ else ("would_create" if dry_run and not script_existed else "would_update" if dry_run else "created" if not script_existed else "updated")
11844
+ ),
11845
+ }
11846
+ )
11847
+
11848
+ audit = _codex_audit_payload(codex_home) if not dry_run else {}
11849
+ return {
11850
+ "ok": True if dry_run else bool(audit.get("ok")),
11851
+ "schema_version": "1.0.0",
11852
+ "kind": "orp_codex_global_sync",
11853
+ "codex_home": str(codex_home),
11854
+ "dry_run": dry_run,
11855
+ "actions": actions,
11856
+ "audit": audit,
11857
+ "warnings": [],
11858
+ }
11859
+
11860
+
11861
+ def _render_codex_audit_report(payload: dict[str, Any]) -> str:
11862
+ lines = [
11863
+ "ORP Codex Global Audit",
11864
+ f"ok={'true' if payload.get('ok') else 'false'}",
11865
+ f"codex_home={payload.get('codex_home', '')}",
11866
+ ]
11867
+ checks = payload.get("checks")
11868
+ if isinstance(checks, dict):
11869
+ for name, row in checks.items():
11870
+ if isinstance(row, dict):
11871
+ lines.append(f"{name}={row.get('status', '')}:{row.get('path', '')}")
11872
+ stack_tools = payload.get("stack_tools")
11873
+ if isinstance(stack_tools, dict):
11874
+ for name, row in stack_tools.items():
11875
+ if isinstance(row, dict):
11876
+ lines.append(f"tool.{name}={row.get('path', '') or 'missing'}")
11877
+ for next_action in payload.get("next_actions", []):
11878
+ lines.append(f"next={next_action}")
11879
+ return "\n".join(lines)
11880
+
11881
+
11882
+ def _render_codex_sync_report(payload: dict[str, Any]) -> str:
11883
+ lines = [
11884
+ "ORP Codex Global Sync",
11885
+ f"ok={'true' if payload.get('ok') else 'false'}",
11886
+ f"codex_home={payload.get('codex_home', '')}",
11887
+ f"dry_run={'true' if payload.get('dry_run') else 'false'}",
11888
+ ]
11889
+ for action in payload.get("actions", []):
11890
+ if isinstance(action, dict):
11891
+ lines.append(f"action={action.get('action', '')}:{action.get('path', '')}")
11892
+ for warning in payload.get("warnings", []):
11893
+ lines.append(f"warning={warning}")
11894
+ return "\n".join(lines)
11895
+
11896
+
11270
11897
  def _resolve_projects_root_path(raw: str) -> Path:
11271
11898
  text = str(raw or "").strip()
11272
11899
  if not text:
@@ -14469,6 +15096,26 @@ def cmd_agents_audit(args: argparse.Namespace) -> int:
14469
15096
  return 0 if payload.get("ok") else 1
14470
15097
 
14471
15098
 
15099
+ def cmd_agents_codex_audit(args: argparse.Namespace) -> int:
15100
+ codex_home = _resolve_codex_home_path(str(getattr(args, "codex_home", "") or ""))
15101
+ payload = _codex_audit_payload(codex_home)
15102
+ if args.json_output:
15103
+ _print_json(payload)
15104
+ return 0 if payload.get("ok") else 1
15105
+ print(_render_codex_audit_report(payload))
15106
+ return 0 if payload.get("ok") else 1
15107
+
15108
+
15109
+ def cmd_agents_codex_sync(args: argparse.Namespace) -> int:
15110
+ codex_home = _resolve_codex_home_path(str(getattr(args, "codex_home", "") or ""))
15111
+ payload = _codex_sync_payload(codex_home, dry_run=bool(getattr(args, "dry_run", False)))
15112
+ if args.json_output:
15113
+ _print_json(payload)
15114
+ return 0 if payload.get("ok") else 1
15115
+ print(_render_codex_sync_report(payload))
15116
+ return 0 if payload.get("ok") else 1
15117
+
15118
+
14472
15119
  def _render_governance_status_text(payload: dict[str, Any]) -> str:
14473
15120
  git = payload.get("git", {}) if isinstance(payload.get("git"), dict) else {}
14474
15121
  runtime = payload.get("runtime", {}) if isinstance(payload.get("runtime"), dict) else {}
@@ -15808,6 +16455,225 @@ def cmd_frontier_additional_complete_active(args: argparse.Namespace) -> int:
15808
16455
  return 0
15809
16456
 
15810
16457
 
16458
+ def cmd_frontier_modeled_checkoff_activate(args: argparse.Namespace) -> int:
16459
+ repo_root = Path(args.repo_root).resolve()
16460
+ stack = _frontier_load_stack(repo_root)
16461
+ state = _frontier_load_state(repo_root)
16462
+ additional = _frontier_load_additional(repo_root, stack)
16463
+
16464
+ phase_id = str(args.phase_id or FRONTIER_MODELED_CHECKOFF_PHASE_ID).strip() or FRONTIER_MODELED_CHECKOFF_PHASE_ID
16465
+ list_id = str(args.list_id or FRONTIER_MODELED_CHECKOFF_LIST_ID).strip() or FRONTIER_MODELED_CHECKOFF_LIST_ID
16466
+ item_id = str(args.item_id or FRONTIER_MODELED_CHECKOFF_ITEM_ID).strip() or FRONTIER_MODELED_CHECKOFF_ITEM_ID
16467
+
16468
+ version_id = str(args.version or state.get("active_version", "") or "modeled-checkoff").strip()
16469
+ milestone_id = str(args.milestone or state.get("active_milestone", "") or phase_id).strip()
16470
+ band = str(args.band or state.get("band", "") or "structured").strip()
16471
+ if band not in FRONTIER_BANDS:
16472
+ raise RuntimeError(f"frontier band must be one of: {', '.join(FRONTIER_BANDS)}")
16473
+
16474
+ version = _frontier_find_version(stack, version_id)
16475
+ if version is None:
16476
+ version = {
16477
+ "id": version_id,
16478
+ "label": "Modeled Checkoff",
16479
+ "intent": "Keep review/checkoff gates moving through modeled packet improvement without clearing external gates.",
16480
+ "status": "active",
16481
+ "milestones": [],
16482
+ }
16483
+ versions = stack.get("versions")
16484
+ if not isinstance(versions, list):
16485
+ versions = []
16486
+ stack["versions"] = versions
16487
+ versions.append(version)
16488
+
16489
+ parent_version, milestone = _frontier_find_milestone(stack, milestone_id)
16490
+ if milestone is None:
16491
+ milestone = {
16492
+ "id": milestone_id,
16493
+ "parent_version": version_id,
16494
+ "label": "Modeled Checkoff Continuation",
16495
+ "band": band,
16496
+ "status": "active",
16497
+ "depends_on": [],
16498
+ "success_criteria": [
16499
+ "delegation has an active modeled-checkoff packet path",
16500
+ "external review gates remain explicitly uncleared",
16501
+ ],
16502
+ "phases": [],
16503
+ }
16504
+ milestones = version.get("milestones")
16505
+ if not isinstance(milestones, list):
16506
+ milestones = []
16507
+ version["milestones"] = milestones
16508
+ milestones.append(milestone)
16509
+ elif isinstance(parent_version, dict):
16510
+ version_id = str(parent_version.get("id", "")).strip() or version_id
16511
+ version = parent_version
16512
+
16513
+ phase = _frontier_find_phase(milestone, phase_id)
16514
+ phase_requirements = [
16515
+ "state that this is modeled/proxy pre-review, not real expert approval",
16516
+ "name the professional lens, decision language, evidence expectations, uncertainty, and stop conditions",
16517
+ "preserve provenance, scope, and uncertainty on scientific or medical claims",
16518
+ "separate packet-improvement decisions from clinical, legal, regulatory, privacy, publication, or human gate clearance",
16519
+ ]
16520
+ phase_success = [
16521
+ "a delegate can continue by writing modeled checkoff packets",
16522
+ "the packet records which external gates remain open",
16523
+ "the output is safe for public artifacts and does not include patient-specific advice",
16524
+ ]
16525
+ phase_plans = [
16526
+ "classify the blocker or review gate",
16527
+ "select the modeled professional lens and decision rubric",
16528
+ "run packet checks that improve evidence, scope, uncertainty, and stop-condition language",
16529
+ ]
16530
+ if phase is None:
16531
+ phase = {
16532
+ "id": phase_id,
16533
+ "label": "Modeled Checkoff v0",
16534
+ "status": "active",
16535
+ "goal": "Convert review/checkoff blockers into modeled packet-improvement work without clearing the external gate.",
16536
+ "depends_on": [],
16537
+ "requirements": phase_requirements,
16538
+ "success_criteria": phase_success,
16539
+ "plans": phase_plans,
16540
+ "compute_hooks": [],
16541
+ }
16542
+ phases = milestone.get("phases")
16543
+ if not isinstance(phases, list):
16544
+ phases = []
16545
+ milestone["phases"] = phases
16546
+ phases.append(phase)
16547
+ else:
16548
+ phase["status"] = "active"
16549
+ phase.setdefault("requirements", phase_requirements)
16550
+ phase.setdefault("success_criteria", phase_success)
16551
+ phase.setdefault("plans", phase_plans)
16552
+
16553
+ versions = stack.get("versions")
16554
+ if isinstance(versions, list):
16555
+ for version_row in versions:
16556
+ if not isinstance(version_row, dict):
16557
+ continue
16558
+ version_row["status"] = "active" if str(version_row.get("id", "")).strip() == version_id else (
16559
+ "planned" if str(version_row.get("status", "")).strip() == "active" else str(version_row.get("status", "")).strip() or "planned"
16560
+ )
16561
+ milestones = version_row.get("milestones")
16562
+ if not isinstance(milestones, list):
16563
+ continue
16564
+ for milestone_row in milestones:
16565
+ if not isinstance(milestone_row, dict):
16566
+ continue
16567
+ milestone_row["status"] = "active" if str(milestone_row.get("id", "")).strip() == milestone_id else (
16568
+ "planned" if str(milestone_row.get("status", "")).strip() == "active" else str(milestone_row.get("status", "")).strip() or "planned"
16569
+ )
16570
+ phases = milestone_row.get("phases")
16571
+ if not isinstance(phases, list):
16572
+ continue
16573
+ for phase_row in phases:
16574
+ if not isinstance(phase_row, dict):
16575
+ continue
16576
+ if str(milestone_row.get("id", "")).strip() == milestone_id and str(phase_row.get("id", "")).strip() == phase_id:
16577
+ phase_row["status"] = "active"
16578
+ elif str(phase_row.get("status", "")).strip() == "active":
16579
+ phase_row["status"] = "planned"
16580
+
16581
+ item_list = _frontier_find_additional_list(additional, list_id)
16582
+ if item_list is None:
16583
+ item_list = {
16584
+ "id": list_id,
16585
+ "label": "Modeled checkoff continuations",
16586
+ "status": "active",
16587
+ "items": [],
16588
+ }
16589
+ lists = additional.get("lists")
16590
+ if not isinstance(lists, list):
16591
+ lists = []
16592
+ additional["lists"] = lists
16593
+ lists.append(item_list)
16594
+ item_list["status"] = "active"
16595
+ items = item_list.get("items")
16596
+ if not isinstance(items, list):
16597
+ items = []
16598
+ item_list["items"] = items
16599
+ item = _frontier_find_additional_item(item_list, item_id)
16600
+ item_goal = (
16601
+ "Build the modeled professional lens taxonomy and packet-check rubric for the blocked frontier; "
16602
+ "do not claim real expert sign-off or clear external gates."
16603
+ )
16604
+ item_requirements = [
16605
+ "declare modeled/proxy status in the packet",
16606
+ "identify professional persona, language, decision criteria, evidence standard, uncertainty, and stop conditions",
16607
+ "carry provenance and scope for scientific, medical, legal, privacy, or publication-sensitive claims",
16608
+ ]
16609
+ item_success = [
16610
+ "delegation can proceed with explicit modeled checkoff records",
16611
+ "remaining human or external gates are named as still open",
16612
+ ]
16613
+ if item is None:
16614
+ item = {
16615
+ "id": item_id,
16616
+ "label": "Modeled professional lens taxonomy",
16617
+ "status": "active",
16618
+ "goal": item_goal,
16619
+ "depends_on": [],
16620
+ "requirements": item_requirements,
16621
+ "success_criteria": item_success,
16622
+ "plans": phase_plans,
16623
+ }
16624
+ items.append(item)
16625
+ else:
16626
+ item["status"] = "active"
16627
+ item.setdefault("goal", item_goal)
16628
+ item.setdefault("requirements", item_requirements)
16629
+ item.setdefault("success_criteria", item_success)
16630
+ item.setdefault("plans", phase_plans)
16631
+
16632
+ additional["active_list_id"] = list_id
16633
+ additional["active_item_id"] = item_id
16634
+ if not str(additional.get("program_id", "")).strip():
16635
+ additional["program_id"] = str(stack.get("program_id", "")).strip()
16636
+ if not str(additional.get("label", "")).strip():
16637
+ additional["label"] = str(stack.get("label", "")).strip()
16638
+
16639
+ state["active_version"] = version_id
16640
+ state["active_milestone"] = milestone_id
16641
+ state["active_phase"] = phase_id
16642
+ state["band"] = band
16643
+ state["blocked_by"] = _coerce_string_list(state.get("blocked_by"))
16644
+ state["next_action"] = str(args.next_action or "").strip() or (
16645
+ f"Run modeled checkoff packet {list_id}/{item_id}: model the professional lens, "
16646
+ "improve the packet, and keep external gates explicitly open."
16647
+ )
16648
+
16649
+ written = _frontier_write_materialized_views(repo_root, stack, state)
16650
+ written.update(_frontier_write_additional_views(repo_root, additional))
16651
+ modeled_checkoff = _frontier_modeled_checkoff_payload(
16652
+ blockers=_coerce_string_list(state.get("blocked_by")),
16653
+ next_action=str(state.get("next_action", "")).strip(),
16654
+ active_primary_kind="phase",
16655
+ active_primary_id=phase_id,
16656
+ active_list_id=list_id,
16657
+ active_item_id=item_id,
16658
+ )
16659
+ result = {
16660
+ "ok": True,
16661
+ "modeled_checkoff": modeled_checkoff,
16662
+ "state": state,
16663
+ "phase": phase,
16664
+ "list": item_list,
16665
+ "item": item,
16666
+ "paths": written,
16667
+ }
16668
+ if args.json_output:
16669
+ _print_json(result)
16670
+ else:
16671
+ print(f"phase_id={phase_id}")
16672
+ print(f"active_additional={list_id}/{item_id}")
16673
+ print(f"next_action={state['next_action']}")
16674
+ return 0
16675
+
16676
+
15811
16677
  def _print_frontier_diagnostic_payload(payload: dict[str, Any]) -> None:
15812
16678
  print(f"ok={'true' if payload.get('ok') else 'false'}")
15813
16679
  print(f"next_action={payload.get('next_action', '') or '(none)'}")
@@ -15821,6 +16687,15 @@ def _print_frontier_diagnostic_payload(payload: dict[str, Any]) -> None:
15821
16687
  if isinstance(additional, dict):
15822
16688
  print(f"pending_additional_items={additional.get('pending_items', 0)}")
15823
16689
  print(f"active_additional={additional.get('active_list_id', '') or '(none)'}/{additional.get('active_item_id', '') or '(none)'}")
16690
+ modeled_checkoff = payload.get("modeled_checkoff")
16691
+ if not isinstance(modeled_checkoff, dict):
16692
+ continuation = payload.get("continuation")
16693
+ modeled_checkoff = continuation.get("modeled_checkoff", {}) if isinstance(continuation, dict) else {}
16694
+ if isinstance(modeled_checkoff, dict) and modeled_checkoff.get("eligible"):
16695
+ print(f"modeled_checkoff_active={'true' if modeled_checkoff.get('active') else 'false'}")
16696
+ command = str(modeled_checkoff.get("suggested_next_command", "")).strip()
16697
+ if command:
16698
+ print(f"modeled_checkoff_suggested_next_command={command}")
15824
16699
  for issue in payload.get("issues", []):
15825
16700
  if isinstance(issue, dict):
15826
16701
  print(f"issue={issue.get('severity','')}:{issue.get('code','')}:{issue.get('message','')}")
@@ -15846,6 +16721,7 @@ def cmd_frontier_preflight_delegate(args: argparse.Namespace) -> int:
15846
16721
  payload["next_action"] = continuation.get("next_action", "")
15847
16722
  payload["suggested_next_command"] = continuation.get("suggested_next_command", "")
15848
16723
  payload["summary"] = continuation.get("summary", {})
16724
+ payload["modeled_checkoff"] = continuation.get("modeled_checkoff", {})
15849
16725
  payload["preflight"] = {
15850
16726
  "ready": bool(payload["ok"]),
15851
16727
  "purpose": "Block delegation when the frontier cannot prove a single safe continuation or terminal state.",
@@ -27818,6 +28694,8 @@ def build_parser() -> argparse.ArgumentParser:
27818
28694
  "Examples:\n"
27819
28695
  " orp agents root show\n"
27820
28696
  " orp agents root set /absolute/path/to/projects\n"
28697
+ " orp agents codex audit\n"
28698
+ " orp agents codex sync\n"
27821
28699
  " orp agents sync\n"
27822
28700
  " orp agents sync --role umbrella --root /absolute/path/to/projects\n"
27823
28701
  " orp agents sync --projects-root /absolute/path/to/projects\n"
@@ -27881,6 +28759,41 @@ def build_parser() -> argparse.ArgumentParser:
27881
28759
  add_json_flag(s_agents_audit)
27882
28760
  s_agents_audit.set_defaults(func=cmd_agents_audit, json_output=False)
27883
28761
 
28762
+ s_agents_codex = agents_sub.add_parser(
28763
+ "codex",
28764
+ help="Audit or sync Codex global AGENTS.md and SessionStart hook scaffolding",
28765
+ )
28766
+ agents_codex_sub = s_agents_codex.add_subparsers(dest="agents_codex_cmd", required=True)
28767
+
28768
+ s_agents_codex_audit = agents_codex_sub.add_parser(
28769
+ "audit",
28770
+ help="Audit Codex global AGENTS.md, hooks.json, hook script, and codex_hooks feature flag",
28771
+ )
28772
+ s_agents_codex_audit.add_argument(
28773
+ "--codex-home",
28774
+ default="",
28775
+ help="Codex home directory (default: CODEX_HOME or ~/.codex)",
28776
+ )
28777
+ add_json_flag(s_agents_codex_audit)
28778
+ s_agents_codex_audit.set_defaults(func=cmd_agents_codex_audit, json_output=False)
28779
+
28780
+ s_agents_codex_sync = agents_codex_sub.add_parser(
28781
+ "sync",
28782
+ help="Create or refresh Codex global AGENTS.md and non-mutating SessionStart hook scaffolding",
28783
+ )
28784
+ s_agents_codex_sync.add_argument(
28785
+ "--codex-home",
28786
+ default="",
28787
+ help="Codex home directory (default: CODEX_HOME or ~/.codex)",
28788
+ )
28789
+ s_agents_codex_sync.add_argument(
28790
+ "--dry-run",
28791
+ action="store_true",
28792
+ help="Plan the Codex global sync without writing files",
28793
+ )
28794
+ add_json_flag(s_agents_codex_sync)
28795
+ s_agents_codex_sync.set_defaults(func=cmd_agents_codex_sync, json_output=False)
28796
+
27884
28797
  s_opportunities = sub.add_parser(
27885
28798
  "opportunities",
27886
28799
  help="Local-first opportunity boards for contests, programs, grants, and similar openings",
@@ -30023,6 +30936,43 @@ def build_parser() -> argparse.ArgumentParser:
30023
30936
  add_json_flag(s_frontier_set_live)
30024
30937
  s_frontier_set_live.set_defaults(func=cmd_frontier_set_live, json_output=False)
30025
30938
 
30939
+ s_frontier_modeled_checkoff = frontier_sub.add_parser(
30940
+ "modeled-checkoff",
30941
+ help="Activate a modeled packet-check continuation for review/checkoff gates",
30942
+ )
30943
+ frontier_modeled_checkoff_sub = s_frontier_modeled_checkoff.add_subparsers(dest="frontier_modeled_checkoff_cmd", required=True)
30944
+
30945
+ s_frontier_modeled_checkoff_activate = frontier_modeled_checkoff_sub.add_parser(
30946
+ "activate",
30947
+ help="Create and activate the modeled-checkoff phase and additional item",
30948
+ )
30949
+ s_frontier_modeled_checkoff_activate.add_argument("--version", default="", help="Version id to use (default: current active version)")
30950
+ s_frontier_modeled_checkoff_activate.add_argument("--milestone", default="", help="Milestone id to use (default: current active milestone)")
30951
+ s_frontier_modeled_checkoff_activate.add_argument(
30952
+ "--phase-id",
30953
+ default=FRONTIER_MODELED_CHECKOFF_PHASE_ID,
30954
+ help="Modeled-checkoff phase id",
30955
+ )
30956
+ s_frontier_modeled_checkoff_activate.add_argument(
30957
+ "--list-id",
30958
+ default=FRONTIER_MODELED_CHECKOFF_LIST_ID,
30959
+ help="Additional item list id",
30960
+ )
30961
+ s_frontier_modeled_checkoff_activate.add_argument(
30962
+ "--item-id",
30963
+ default=FRONTIER_MODELED_CHECKOFF_ITEM_ID,
30964
+ help="Additional item id",
30965
+ )
30966
+ s_frontier_modeled_checkoff_activate.add_argument(
30967
+ "--band",
30968
+ default="",
30969
+ choices=["", *FRONTIER_BANDS],
30970
+ help="Optional explicit planning band",
30971
+ )
30972
+ s_frontier_modeled_checkoff_activate.add_argument("--next-action", default="", help="Optional live next action override")
30973
+ add_json_flag(s_frontier_modeled_checkoff_activate)
30974
+ s_frontier_modeled_checkoff_activate.set_defaults(func=cmd_frontier_modeled_checkoff_activate, json_output=False)
30975
+
30026
30976
  s_frontier_render = frontier_sub.add_parser(
30027
30977
  "render",
30028
30978
  help="Refresh the materialized frontier JSON and markdown views",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "open-research-protocol",
3
- "version": "0.4.32",
3
+ "version": "0.4.33",
4
4
  "description": "ORP CLI (Open Research Protocol): workspace ledgers, secrets, scheduling, governed execution, and agent-friendly research workflows.",
5
5
  "license": "MIT",
6
6
  "author": "Fractal Research Group <cody@frg.earth>",