open-research-protocol 0.4.25 → 0.4.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/cli/orp.py CHANGED
@@ -139,6 +139,8 @@ FRONTIER_PENDING_STATUSES = {"", "pending", "planned", "ready"}
139
139
  FRONTIER_TERMINAL_STATUSES = {"complete", "completed", "done", "skipped", "terminal"}
140
140
  YOUTUBE_SOURCE_SCHEMA_VERSION = "1.0.0"
141
141
  EXCHANGE_REPORT_SCHEMA_VERSION = "1.0.0"
142
+ RESEARCH_RUN_SCHEMA_VERSION = "1.0.0"
143
+ PROJECT_CONTEXT_SCHEMA_VERSION = "1.0.0"
142
144
  MAINTENANCE_STATE_SCHEMA_VERSION = "1.0.0"
143
145
  SCHEDULE_REGISTRY_SCHEMA_VERSION = "1.0.0"
144
146
  AGENDA_REGISTRY_SCHEMA_VERSION = "1.0.0"
@@ -5845,6 +5847,9 @@ def _default_state_payload() -> dict[str, Any]:
5845
5847
  "last_erdos_sync": {},
5846
5848
  "last_discover_scan_id": "",
5847
5849
  "discovery_scans": {},
5850
+ "last_research_run_id": "",
5851
+ "research_runs": {},
5852
+ "project_context": {},
5848
5853
  "governance": {},
5849
5854
  }
5850
5855
 
@@ -5853,6 +5858,7 @@ def _ensure_dirs(repo_root: Path) -> None:
5853
5858
  (repo_root / "orp" / "packets").mkdir(parents=True, exist_ok=True)
5854
5859
  (repo_root / "orp" / "artifacts").mkdir(parents=True, exist_ok=True)
5855
5860
  (repo_root / "orp" / "discovery" / "github").mkdir(parents=True, exist_ok=True)
5861
+ (repo_root / "orp" / "research").mkdir(parents=True, exist_ok=True)
5856
5862
  (repo_root / "orp" / "checkpoints").mkdir(parents=True, exist_ok=True)
5857
5863
  (repo_root / "orp" / "handoffs").mkdir(parents=True, exist_ok=True)
5858
5864
  state_path = repo_root / "orp" / "state.json"
@@ -9604,6 +9610,238 @@ def _effective_remote_context(
9604
9610
  }
9605
9611
 
9606
9612
 
9613
+ def _project_context_path(repo_root: Path) -> Path:
9614
+ return repo_root / "orp" / "project.json"
9615
+
9616
+
9617
+ def _project_surface(path: Path, repo_root: Path, *, kind: str, role: str) -> dict[str, Any]:
9618
+ rel_path = _path_for_state(path, repo_root)
9619
+ exists = path.exists()
9620
+ size_bytes = path.stat().st_size if exists and path.is_file() else 0
9621
+ return {
9622
+ "path": rel_path,
9623
+ "kind": kind,
9624
+ "role": role,
9625
+ "exists": exists,
9626
+ "size_bytes": int(size_bytes),
9627
+ }
9628
+
9629
+
9630
+ def _project_authority_surfaces(repo_root: Path) -> list[dict[str, Any]]:
9631
+ candidates: list[tuple[str, str, str]] = [
9632
+ ("AGENTS.md", "agent_guidance", "project_agent_rules"),
9633
+ ("CLAUDE.md", "agent_guidance", "project_agent_rules"),
9634
+ ("README.md", "overview", "project_overview"),
9635
+ ("llms.txt", "llm_discovery", "machine_readable_discovery"),
9636
+ ("orp.yml", "orp_config", "runtime_config"),
9637
+ ("analysis/orp.kernel.task.yml", "kernel_artifact", "starter_task_contract"),
9638
+ ("docs/START_HERE.md", "operator_docs", "starter_flow"),
9639
+ ("docs/ROADMAP.md", "roadmap", "planning_authority"),
9640
+ ("ROADMAP.md", "roadmap", "planning_authority"),
9641
+ ("TODO.md", "task_notes", "planning_authority"),
9642
+ ("package.json", "manifest", "javascript_manifest"),
9643
+ ("pyproject.toml", "manifest", "python_manifest"),
9644
+ ("Cargo.toml", "manifest", "rust_manifest"),
9645
+ ("go.mod", "manifest", "go_manifest"),
9646
+ ("Makefile", "command_surface", "build_commands"),
9647
+ ("justfile", "command_surface", "build_commands"),
9648
+ ]
9649
+ rows: list[dict[str, Any]] = []
9650
+ seen: set[str] = set()
9651
+ for rel_path, kind, role in candidates:
9652
+ path = repo_root / rel_path
9653
+ row = _project_surface(path, repo_root, kind=kind, role=role)
9654
+ if row["exists"] or kind in {"agent_guidance", "orp_config", "kernel_artifact"}:
9655
+ rows.append(row)
9656
+ seen.add(str(row["path"]))
9657
+
9658
+ docs_root = repo_root / "docs"
9659
+ if docs_root.exists() and docs_root.is_dir():
9660
+ doc_paths = sorted(
9661
+ path
9662
+ for path in docs_root.glob("*.md")
9663
+ if path.is_file()
9664
+ and _path_for_state(path, repo_root) not in seen
9665
+ and path.name.upper() not in {"README.MD"}
9666
+ )
9667
+ for path in doc_paths[:12]:
9668
+ lower_name = path.name.lower()
9669
+ kind = "project_doc"
9670
+ role = "supporting_context"
9671
+ if "roadmap" in lower_name or "plan" in lower_name:
9672
+ kind = "roadmap"
9673
+ role = "planning_authority"
9674
+ elif "research" in lower_name:
9675
+ kind = "research_doc"
9676
+ role = "research_context"
9677
+ elif "spec" in lower_name or "protocol" in lower_name:
9678
+ kind = "spec"
9679
+ role = "project_authority"
9680
+ rows.append(_project_surface(path, repo_root, kind=kind, role=role))
9681
+
9682
+ return rows
9683
+
9684
+
9685
+ def _project_directory_signals(repo_root: Path, surfaces: list[dict[str, Any]]) -> dict[str, Any]:
9686
+ surface_paths = {str(row.get("path", "")).strip() for row in surfaces if isinstance(row, dict)}
9687
+ source_dirs = [
9688
+ rel
9689
+ for rel in ("src", "lib", "app", "cli", "packages", "research", "analysis", "tests", "test", "docs")
9690
+ if (repo_root / rel).exists()
9691
+ ]
9692
+ languages: list[str] = []
9693
+ if "package.json" in surface_paths:
9694
+ languages.append("javascript")
9695
+ if "pyproject.toml" in surface_paths:
9696
+ languages.append("python")
9697
+ if "Cargo.toml" in surface_paths:
9698
+ languages.append("rust")
9699
+ if "go.mod" in surface_paths:
9700
+ languages.append("go")
9701
+ if any((repo_root / rel).exists() for rel in ("lakefile.lean", "lakefile.toml")):
9702
+ languages.append("lean")
9703
+ return {
9704
+ "source_dirs": source_dirs,
9705
+ "languages_or_stacks": _unique_strings(languages),
9706
+ "has_tests": any((repo_root / rel).exists() for rel in ("tests", "test", "__tests__")),
9707
+ "has_docs": (repo_root / "docs").exists(),
9708
+ "has_orp_config": "orp.yml" in surface_paths,
9709
+ "authority_surface_count": len([row for row in surfaces if row.get("exists")]),
9710
+ }
9711
+
9712
+
9713
+ def _project_research_trigger_policy() -> dict[str, Any]:
9714
+ return {
9715
+ "default_timing": "after_local_decomposition_before_action",
9716
+ "provider_calls_are_explicit": True,
9717
+ "live_calls_require_execute": True,
9718
+ "secret_alias": "openai-primary",
9719
+ "env_var": "OPENAI_API_KEY",
9720
+ "call_moments": [
9721
+ {
9722
+ "moment_id": "plan",
9723
+ "calls_api": False,
9724
+ "when": "Always run first during project refresh or research ask; inspect local authority surfaces and decompose the question.",
9725
+ },
9726
+ {
9727
+ "moment_id": "thinking_reasoning_high",
9728
+ "calls_api": True,
9729
+ "lane": "openai_reasoning_high",
9730
+ "model": "gpt-5.4",
9731
+ "when": "Use when the directory has a decision gate, route choice, proof strategy, architecture tradeoff, or ambiguous next action.",
9732
+ },
9733
+ {
9734
+ "moment_id": "web_synthesis",
9735
+ "calls_api": True,
9736
+ "lane": "openai_web_synthesis",
9737
+ "model": "gpt-5.4",
9738
+ "when": "Use when the answer depends on current public facts, external docs, papers, project status, or citations.",
9739
+ },
9740
+ {
9741
+ "moment_id": "pro_deep_research",
9742
+ "calls_api": True,
9743
+ "lane": "openai_deep_research",
9744
+ "model": "o3-deep-research-2025-06-26",
9745
+ "when": "Use only after reasoning/web lanes expose a research-heavy gap, disagreement, source-quality issue, or literature-scale synthesis need.",
9746
+ "capability_note": "Requires an OpenAI organization verified for Deep Research model access.",
9747
+ },
9748
+ ],
9749
+ "skip_research_when": [
9750
+ "the next action is already executable from local project authority",
9751
+ "the question is only a deterministic local status or file lookup",
9752
+ "the task is implementation-ready and no external/public evidence is needed",
9753
+ ],
9754
+ "escalate_to_deep_research_when": [
9755
+ "web synthesis finds conflicting or weak public sources",
9756
+ "the project must compare multiple papers, standards, providers, or public claims",
9757
+ "the output needs a citation-rich report rather than a short decision memo",
9758
+ ],
9759
+ }
9760
+
9761
+
9762
+ def _project_evolution_policy() -> dict[str, Any]:
9763
+ return {
9764
+ "refresh_surfaces": [
9765
+ "orp init",
9766
+ "orp project refresh",
9767
+ "after adding or changing roadmap/spec/agent-guidance files",
9768
+ "after installing a profile pack or changing command surfaces",
9769
+ "before a research loop whose answer depends on project state",
9770
+ ],
9771
+ "evolution_loop": [
9772
+ "scan authority surfaces",
9773
+ "classify what is local, public, executable, or human-gated",
9774
+ "choose whether reasoning, web synthesis, or deep research is justified",
9775
+ "act only after the decision gate has enough evidence",
9776
+ "checkpoint the resulting project state",
9777
+ ],
9778
+ "boundary": "ORP project context is process-only. It guides routing and research timing but is not evidence.",
9779
+ }
9780
+
9781
+
9782
+ def _project_context_payload(repo_root: Path, *, source: str) -> dict[str, Any]:
9783
+ context_path = _project_context_path(repo_root)
9784
+ existing = _read_json_if_exists(context_path)
9785
+ generated_at = _now_utc()
9786
+ initialized_at = str(existing.get("initialized_at_utc", "")).strip() or generated_at
9787
+ surfaces = _project_authority_surfaces(repo_root)
9788
+ signals = _project_directory_signals(repo_root, surfaces)
9789
+ research_policy = _project_research_trigger_policy()
9790
+ return {
9791
+ "schema_version": PROJECT_CONTEXT_SCHEMA_VERSION,
9792
+ "kind": "orp_project_context",
9793
+ "project": {
9794
+ "name": repo_root.name or "project",
9795
+ "root": str(repo_root),
9796
+ },
9797
+ "initialized_at_utc": initialized_at,
9798
+ "refreshed_at_utc": generated_at,
9799
+ "refresh_source": source,
9800
+ "authority_surfaces": surfaces,
9801
+ "directory_signals": signals,
9802
+ "research_policy": research_policy,
9803
+ "evolution_policy": _project_evolution_policy(),
9804
+ "next_actions": [
9805
+ "orp project refresh --json",
9806
+ "orp agents audit",
9807
+ "orp status --json",
9808
+ 'orp research ask "<decision question>" --json',
9809
+ ],
9810
+ "notes": [
9811
+ "This file is ORP process context for the local directory.",
9812
+ "It is refreshed as the project evolves and should not be cited as proof or canonical evidence.",
9813
+ "Provider research calls remain opt-in through `orp research ask --execute`.",
9814
+ ],
9815
+ }
9816
+
9817
+
9818
+ def _write_project_context(repo_root: Path, *, source: str) -> tuple[dict[str, Any], str]:
9819
+ path = _project_context_path(repo_root)
9820
+ existed = path.exists()
9821
+ payload = _project_context_payload(repo_root, source=source)
9822
+ _write_json(path, payload)
9823
+ state_path = repo_root / "orp" / "state.json"
9824
+ state = {**_default_state_payload(), **_read_json_if_exists(state_path)}
9825
+ state["project_context"] = {
9826
+ "path": _path_for_state(path, repo_root),
9827
+ "schema_version": PROJECT_CONTEXT_SCHEMA_VERSION,
9828
+ "refreshed_at_utc": payload["refreshed_at_utc"],
9829
+ "refresh_source": source,
9830
+ "authority_surface_count": payload["directory_signals"]["authority_surface_count"],
9831
+ "research_default_timing": payload["research_policy"]["default_timing"],
9832
+ }
9833
+ _write_json(state_path, state)
9834
+ return payload, "updated" if existed else "created"
9835
+
9836
+
9837
+ def _load_project_context(repo_root: Path) -> dict[str, Any]:
9838
+ path = _project_context_path(repo_root)
9839
+ payload = _read_json_if_exists(path)
9840
+ if not payload:
9841
+ raise RuntimeError("No ORP project context found. Run `orp init` or `orp project refresh --json` first.")
9842
+ return payload
9843
+
9844
+
9607
9845
  def _init_kernel_task_template(repo_name: str) -> str:
9608
9846
  safe_name = str(repo_name or "").strip() or "my-project"
9609
9847
  return (
@@ -10533,6 +10771,18 @@ def _governance_status_payload(repo_root: Path, config_arg: str) -> dict[str, An
10533
10771
  str(governance_state.get("checkpoint_log_path", "orp/checkpoints/CHECKPOINT_LOG.md")),
10534
10772
  "orp/checkpoints/CHECKPOINT_LOG.md",
10535
10773
  )
10774
+ project_context_path = _project_context_path(repo_root)
10775
+ project_context = _read_json_if_exists(project_context_path)
10776
+ project_context_signals = (
10777
+ project_context.get("directory_signals")
10778
+ if isinstance(project_context.get("directory_signals"), dict)
10779
+ else {}
10780
+ )
10781
+ project_research_policy = (
10782
+ project_context.get("research_policy")
10783
+ if isinstance(project_context.get("research_policy"), dict)
10784
+ else {}
10785
+ )
10536
10786
 
10537
10787
  manifest = _read_json_if_exists(manifest_path)
10538
10788
  orp_governed = bool(governance_state.get("orp_governed")) or bool(manifest.get("repo", {}).get("orp_governed"))
@@ -10622,6 +10872,9 @@ def _governance_status_payload(repo_root: Path, config_arg: str) -> dict[str, An
10622
10872
  warnings.append("checkpoint log is missing from ORP governance runtime.")
10623
10873
  if not agent_policy_path.exists():
10624
10874
  warnings.append("agent policy file is missing from ORP governance runtime.")
10875
+ if not project_context_path.exists():
10876
+ warnings.append("project context lens is missing from ORP governance runtime.")
10877
+ next_actions.append("orp project refresh --json")
10625
10878
 
10626
10879
  if remote_context["mode"] == "local_only":
10627
10880
  notes.append("local-first mode active; no remote is required.")
@@ -10710,6 +10963,15 @@ def _governance_status_payload(repo_root: Path, config_arg: str) -> dict[str, An
10710
10963
  "handoff_exists": handoff_path.exists(),
10711
10964
  "checkpoint_log_path": _path_for_state(checkpoint_log_path, repo_root),
10712
10965
  "checkpoint_log_exists": checkpoint_log_path.exists(),
10966
+ "project_context": {
10967
+ "path": _path_for_state(project_context_path, repo_root),
10968
+ "exists": project_context_path.exists(),
10969
+ "schema_version": str(project_context.get("schema_version", "")).strip(),
10970
+ "refreshed_at_utc": str(project_context.get("refreshed_at_utc", "")).strip(),
10971
+ "refresh_source": str(project_context.get("refresh_source", "")).strip(),
10972
+ "authority_surface_count": int(project_context_signals.get("authority_surface_count", 0) or 0),
10973
+ "research_default_timing": str(project_research_policy.get("default_timing", "")).strip(),
10974
+ },
10713
10975
  "git_runtime_path": _path_for_state(_git_runtime_path(repo_root) or Path(".git/orp/runtime.json"), repo_root),
10714
10976
  "git": {
10715
10977
  **git_snapshot,
@@ -11104,10 +11366,13 @@ def _about_payload() -> dict[str, Any]:
11104
11366
  "agent_loop": "docs/AGENT_LOOP.md",
11105
11367
  "discover": "docs/DISCOVER.md",
11106
11368
  "exchange": "docs/EXCHANGE.md",
11369
+ "research_council": "docs/RESEARCH_COUNCIL.md",
11107
11370
  "profile_packs": "docs/PROFILE_PACKS.md",
11371
+ "research_mcp_server": "scripts/orp-mcp",
11108
11372
  },
11109
11373
  "artifacts": {
11110
11374
  "state_json": "orp/state.json",
11375
+ "project_context_json": "orp/project.json",
11111
11376
  "run_json": "orp/artifacts/<run_id>/RUN.json",
11112
11377
  "run_summary_md": "orp/artifacts/<run_id>/RUN_SUMMARY.md",
11113
11378
  "packet_json": "orp/packets/<packet_id>.json",
@@ -11117,6 +11382,9 @@ def _about_payload() -> dict[str, Any]:
11117
11382
  "exchange_json": "orp/exchange/<exchange_id>/EXCHANGE.json",
11118
11383
  "exchange_summary_md": "orp/exchange/<exchange_id>/EXCHANGE_SUMMARY.md",
11119
11384
  "exchange_transfer_map_md": "orp/exchange/<exchange_id>/TRANSFER_MAP.md",
11385
+ "research_answer_json": "orp/research/<run_id>/ANSWER.json",
11386
+ "research_summary_md": "orp/research/<run_id>/RUN_SUMMARY.md",
11387
+ "research_lanes_root": "orp/research/<run_id>/lanes/",
11120
11388
  },
11121
11389
  "schemas": {
11122
11390
  "config": "spec/v1/orp.config.schema.json",
@@ -11126,6 +11394,8 @@ def _about_payload() -> dict[str, Any]:
11126
11394
  "kernel_extension": "spec/v1/kernel-extension.schema.json",
11127
11395
  "youtube_source": "spec/v1/youtube-source.schema.json",
11128
11396
  "exchange_report": "spec/v1/exchange-report.schema.json",
11397
+ "research_run": "spec/v1/research-run.schema.json",
11398
+ "project_context": "spec/v1/project-context.schema.json",
11129
11399
  "profile_pack": "spec/v1/profile-pack.schema.json",
11130
11400
  "link_project": "spec/v1/link-project.schema.json",
11131
11401
  "link_session": "spec/v1/link-session.schema.json",
@@ -11216,6 +11486,14 @@ def _about_payload() -> dict[str, Any]:
11216
11486
  ["agents", "audit"],
11217
11487
  ],
11218
11488
  },
11489
+ {
11490
+ "id": "project",
11491
+ "description": "Local project context lens for authority surfaces, directory signals, research call timing, and explicit evolution as the repo changes.",
11492
+ "entrypoints": [
11493
+ ["project", "refresh"],
11494
+ ["project", "show"],
11495
+ ],
11496
+ },
11219
11497
  {
11220
11498
  "id": "secrets",
11221
11499
  "description": "Hosted secret store for global API key inventory, provider metadata, and project-scoped resolution.",
@@ -11289,6 +11567,15 @@ def _about_payload() -> dict[str, Any]:
11289
11567
  ["exchange", "repo", "synthesize"],
11290
11568
  ],
11291
11569
  },
11570
+ {
11571
+ "id": "research",
11572
+ "description": "Durable OpenAI research-loop runs with decomposition, explicit API call moments, provider lanes, and synthesis artifacts.",
11573
+ "entrypoints": [
11574
+ ["research", "ask"],
11575
+ ["research", "status"],
11576
+ ["research", "show"],
11577
+ ],
11578
+ },
11292
11579
  {
11293
11580
  "id": "collaborate",
11294
11581
  "description": "Built-in repository collaboration setup and workflow execution.",
@@ -11385,6 +11672,8 @@ def _about_payload() -> dict[str, Any]:
11385
11672
  {"name": "agents_root_set", "path": ["agents", "root", "set"], "json_output": True},
11386
11673
  {"name": "agents_sync", "path": ["agents", "sync"], "json_output": True},
11387
11674
  {"name": "agents_audit", "path": ["agents", "audit"], "json_output": True},
11675
+ {"name": "project_refresh", "path": ["project", "refresh"], "json_output": True},
11676
+ {"name": "project_show", "path": ["project", "show"], "json_output": True},
11388
11677
  {"name": "opportunities_list", "path": ["opportunities", "list"], "json_output": True},
11389
11678
  {"name": "opportunities_show", "path": ["opportunities", "show"], "json_output": True},
11390
11679
  {"name": "opportunities_focus", "path": ["opportunities", "focus"], "json_output": True},
@@ -11412,6 +11701,7 @@ def _about_payload() -> dict[str, Any]:
11412
11701
  {"name": "secrets_show", "path": ["secrets", "show"], "json_output": True},
11413
11702
  {"name": "secrets_add", "path": ["secrets", "add"], "json_output": True},
11414
11703
  {"name": "secrets_ensure", "path": ["secrets", "ensure"], "json_output": True},
11704
+ {"name": "secrets_keychain_add", "path": ["secrets", "keychain-add"], "json_output": True},
11415
11705
  {"name": "secrets_keychain_list", "path": ["secrets", "keychain-list"], "json_output": True},
11416
11706
  {"name": "secrets_keychain_show", "path": ["secrets", "keychain-show"], "json_output": True},
11417
11707
  {"name": "secrets_sync_keychain", "path": ["secrets", "sync-keychain"], "json_output": True},
@@ -11468,6 +11758,9 @@ def _about_payload() -> dict[str, Any]:
11468
11758
  {"name": "discover_profile_init", "path": ["discover", "profile", "init"], "json_output": True},
11469
11759
  {"name": "discover_github_scan", "path": ["discover", "github", "scan"], "json_output": True},
11470
11760
  {"name": "exchange_repo_synthesize", "path": ["exchange", "repo", "synthesize"], "json_output": True},
11761
+ {"name": "research_ask", "path": ["research", "ask"], "json_output": True},
11762
+ {"name": "research_status", "path": ["research", "status"], "json_output": True},
11763
+ {"name": "research_show", "path": ["research", "show"], "json_output": True},
11471
11764
  {"name": "collaborate_init", "path": ["collaborate", "init"], "json_output": True},
11472
11765
  {"name": "collaborate_workflows", "path": ["collaborate", "workflows"], "json_output": True},
11473
11766
  {"name": "collaborate_gates", "path": ["collaborate", "gates"], "json_output": True},
@@ -11515,6 +11808,8 @@ def _about_payload() -> dict[str, Any]:
11515
11808
  "YouTube inspection is a built-in ORP ability exposed through `orp youtube inspect`, returning public metadata plus full transcript text and segments whenever public caption tracks are available.",
11516
11809
  "Discovery profiles in ORP are portable search-intent files managed directly by ORP.",
11517
11810
  "Knowledge exchange is a built-in ORP ability exposed through `orp exchange repo synthesize`, producing structured exchange artifacts and transfer maps for local or remote source repositories.",
11811
+ "Research council runs are built into ORP through `orp research ask`, `orp research status`, and `orp research show`, with dry-run decomposition by default and explicit `--execute` for live provider calls.",
11812
+ "Project context is built into ORP through `orp project refresh` and `orp project show`; it records local authority surfaces and research timing policy for the current directory without calling providers.",
11518
11813
  "Collaboration is a built-in ORP ability exposed through `orp collaborate ...`.",
11519
11814
  "Frontier control is a built-in ORP ability exposed through `orp frontier ...`, separating the exact live point, the exact active milestone, the near structured checklist, the additional work queue, and strict continuation preflight before delegation.",
11520
11815
  "Agent modes are lightweight optional overlays for taste, perspective shifts, fresh movement, and intentional comprehension breakdowns; `orp mode breakdown granular-breakdown --json` gives agents a broad-to-atomic ladder for complex work, while `orp mode nudge granular-breakdown --json` gives a short reminder card.",
@@ -11653,6 +11948,10 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
11653
11948
  "label": "Audit AGENTS.md and CLAUDE.md so parent/child guidance stays in sync",
11654
11949
  "command": "orp agents audit",
11655
11950
  },
11951
+ {
11952
+ "label": "Refresh the local project context lens before research-heavy work",
11953
+ "command": "orp project refresh --json",
11954
+ },
11656
11955
  {
11657
11956
  "label": "Save a new API key or token interactively when you need one",
11658
11957
  "command": 'orp secrets add --alias <alias> --label "<label>" --provider <provider>',
@@ -11696,6 +11995,14 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
11696
11995
  "label": "Audit AGENTS.md and CLAUDE.md for the current repo",
11697
11996
  "command": "orp agents audit",
11698
11997
  },
11998
+ {
11999
+ "label": "Inspect the local project context lens",
12000
+ "command": "orp project show --json",
12001
+ },
12002
+ {
12003
+ "label": "Refresh the local project context lens",
12004
+ "command": "orp project refresh --json",
12005
+ },
11699
12006
  {
11700
12007
  "label": "Inspect the saved service and data connections for this user",
11701
12008
  "command": "orp connections list",
@@ -11804,6 +12111,10 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
11804
12111
  "label": "List locally cached Keychain-backed secrets on this Mac",
11805
12112
  "command": "orp secrets keychain-list --json",
11806
12113
  },
12114
+ {
12115
+ "label": "Save a key directly into the local ORP macOS Keychain store",
12116
+ "command": "orp secrets keychain-add --alias <alias> --provider <provider> --value-stdin --json",
12117
+ },
11807
12118
  {
11808
12119
  "label": "Sync one hosted secret into the local macOS Keychain",
11809
12120
  "command": "orp secrets sync-keychain <alias-or-id> --json",
@@ -11836,6 +12147,10 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
11836
12147
  "label": "Deeply synthesize another repo or local project into exchange artifacts",
11837
12148
  "command": "orp exchange repo synthesize /path/to/source --json",
11838
12149
  },
12150
+ {
12151
+ "label": "Decompose a question into an OpenAI research-loop run",
12152
+ "command": 'orp research ask "What should we investigate?" --json',
12153
+ },
11839
12154
  {
11840
12155
  "label": "Inspect local repo governance status",
11841
12156
  "command": "orp status --json",
@@ -12137,6 +12452,14 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
12137
12452
  "orp agents audit",
12138
12453
  ],
12139
12454
  },
12455
+ {
12456
+ "id": "project",
12457
+ "description": "Local project context lens for authority surfaces, directory signals, research call timing, and explicit evolution as the repo changes.",
12458
+ "entrypoints": [
12459
+ "orp project refresh --json",
12460
+ "orp project show --json",
12461
+ ],
12462
+ },
12140
12463
  {
12141
12464
  "id": "hosted",
12142
12465
  "description": "Hosted identity, ideas, first-class workspace records, runner lanes, and control-plane status.",
@@ -12169,6 +12492,7 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
12169
12492
  "orp secrets show <alias-or-id> --json",
12170
12493
  'orp secrets add --alias <alias> --label "<label>" --provider <provider>',
12171
12494
  "orp secrets ensure --alias <alias> --provider <provider> --current-project --json",
12495
+ "orp secrets keychain-add --alias <alias> --provider <provider> --value-stdin --json",
12172
12496
  "orp secrets keychain-list --json",
12173
12497
  "orp secrets keychain-show <alias-or-id> --json",
12174
12498
  "orp secrets sync-keychain <alias-or-id> --json",
@@ -12237,6 +12561,16 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
12237
12561
  "orp exchange repo synthesize /path/to/source --json",
12238
12562
  ],
12239
12563
  },
12564
+ {
12565
+ "id": "research",
12566
+ "description": "Durable OpenAI research-loop question answering that records the decomposition, API call moments, optional live calls, and synthesized answer under orp/research.",
12567
+ "entrypoints": [
12568
+ 'orp research ask "What should we investigate?" --json',
12569
+ 'orp research ask "What should we investigate?" --execute --json',
12570
+ "orp research status latest --json",
12571
+ "orp research show latest --json",
12572
+ ],
12573
+ },
12240
12574
  {
12241
12575
  "id": "collaborate",
12242
12576
  "description": "Built-in repository collaboration setup and workflow execution.",
@@ -12380,6 +12714,7 @@ def _render_home_screen(payload: dict[str, Any]) -> str:
12380
12714
  "opportunities",
12381
12715
  "connections",
12382
12716
  "secrets",
12717
+ "project",
12383
12718
  "governance",
12384
12719
  "frontier",
12385
12720
  "schedule",
@@ -12848,6 +13183,12 @@ def cmd_init(args: argparse.Namespace) -> int:
12848
13183
  "action": str(row.get("action", "")).strip(),
12849
13184
  }
12850
13185
 
13186
+ project_context, project_context_action = _write_project_context(repo_root, source="init")
13187
+ files["project_context"] = {
13188
+ "path": _path_for_state(_project_context_path(repo_root), repo_root),
13189
+ "action": project_context_action,
13190
+ }
13191
+
12851
13192
  result = {
12852
13193
  "ok": True,
12853
13194
  "config_action": config_action,
@@ -12855,6 +13196,12 @@ def cmd_init(args: argparse.Namespace) -> int:
12855
13196
  "runtime_root": str(repo_root / "orp"),
12856
13197
  "files": files,
12857
13198
  "agents": agents_sync,
13199
+ "project_context": {
13200
+ "path": _path_for_state(_project_context_path(repo_root), repo_root),
13201
+ "action": project_context_action,
13202
+ "authority_surface_count": project_context["directory_signals"]["authority_surface_count"],
13203
+ "research_default_timing": project_context["research_policy"]["default_timing"],
13204
+ },
12858
13205
  "git": {
12859
13206
  **git_snapshot,
12860
13207
  "initialized_by_orp": bool(git_init_result["initialized"]),
@@ -12878,6 +13225,7 @@ def cmd_init(args: argparse.Namespace) -> int:
12878
13225
  if git_init_result["initialized"]:
12879
13226
  print(f"initialized git repository with default branch {default_branch}")
12880
13227
  print("synced AGENTS.md and CLAUDE.md with ORP-managed blocks")
13228
+ print(f"project_context={_path_for_state(_project_context_path(repo_root), repo_root)}")
12881
13229
  print(
12882
13230
  "git_state="
12883
13231
  + ",".join(
@@ -12898,6 +13246,48 @@ def cmd_init(args: argparse.Namespace) -> int:
12898
13246
  return 0
12899
13247
 
12900
13248
 
13249
+ def cmd_project_refresh(args: argparse.Namespace) -> int:
13250
+ repo_root = Path(args.repo_root).resolve()
13251
+ _ensure_dirs(repo_root)
13252
+ payload, action = _write_project_context(repo_root, source="project_refresh")
13253
+ result = {
13254
+ "ok": True,
13255
+ "action": action,
13256
+ "project_context_path": _path_for_state(_project_context_path(repo_root), repo_root),
13257
+ "project": payload.get("project", {}),
13258
+ "authority_surface_count": payload.get("directory_signals", {}).get("authority_surface_count", 0),
13259
+ "directory_signals": payload.get("directory_signals", {}),
13260
+ "research_policy": payload.get("research_policy", {}),
13261
+ "next_actions": payload.get("next_actions", []),
13262
+ }
13263
+ if args.json_output:
13264
+ _print_json(result)
13265
+ else:
13266
+ print(f"action={action}")
13267
+ print(f"project_context={result['project_context_path']}")
13268
+ print(f"authority_surface_count={result['authority_surface_count']}")
13269
+ print(f"research_default_timing={payload.get('research_policy', {}).get('default_timing', '')}")
13270
+ for next_action in result["next_actions"]:
13271
+ print(f"next={next_action}")
13272
+ return 0
13273
+
13274
+
13275
+ def cmd_project_show(args: argparse.Namespace) -> int:
13276
+ repo_root = Path(args.repo_root).resolve()
13277
+ payload = _load_project_context(repo_root)
13278
+ if args.json_output:
13279
+ _print_json(payload)
13280
+ else:
13281
+ print(f"project={payload.get('project', {}).get('name', '')}")
13282
+ print(f"root={payload.get('project', {}).get('root', '')}")
13283
+ print(f"refreshed_at_utc={payload.get('refreshed_at_utc', '')}")
13284
+ print(f"research_default_timing={payload.get('research_policy', {}).get('default_timing', '')}")
13285
+ for surface in payload.get("authority_surfaces", []):
13286
+ if isinstance(surface, dict) and surface.get("exists"):
13287
+ print(f"surface={surface.get('path', '')}:{surface.get('kind', '')}:{surface.get('role', '')}")
13288
+ return 0
13289
+
13290
+
12901
13291
  def cmd_agents_root_show(args: argparse.Namespace) -> int:
12902
13292
  payload = _agents_root_show_payload()
12903
13293
  if args.json_output:
@@ -12949,6 +13339,7 @@ def _render_governance_status_text(payload: dict[str, Any]) -> str:
12949
13339
  runtime = payload.get("runtime", {}) if isinstance(payload.get("runtime"), dict) else {}
12950
13340
  validation = payload.get("validation", {}) if isinstance(payload.get("validation"), dict) else {}
12951
13341
  readiness = payload.get("readiness", {}) if isinstance(payload.get("readiness"), dict) else {}
13342
+ project_context = payload.get("project_context", {}) if isinstance(payload.get("project_context"), dict) else {}
12952
13343
  last_branch_action = (
12953
13344
  runtime.get("last_branch_action")
12954
13345
  if isinstance(runtime.get("last_branch_action"), dict)
@@ -12987,6 +13378,10 @@ def _render_governance_status_text(payload: dict[str, Any]) -> str:
12987
13378
  f"paths.config={payload.get('config_path', '')}",
12988
13379
  f"paths.handoff={payload.get('handoff_path', '')}",
12989
13380
  f"paths.checkpoint_log={payload.get('checkpoint_log_path', '')}",
13381
+ f"paths.project_context={project_context.get('path', '')}",
13382
+ f"project_context.exists={'true' if project_context.get('exists') else 'false'}",
13383
+ f"project_context.refreshed_at={project_context.get('refreshed_at_utc', '') or '(never)'}",
13384
+ f"project_context.research_default_timing={project_context.get('research_default_timing', '') or '(unset)'}",
12990
13385
  f"paths.git_runtime={payload.get('git_runtime_path', '')}",
12991
13386
  f"readiness.local_ready={'true' if readiness.get('local_ready') else 'false'}",
12992
13387
  f"readiness.remote_ready={'true' if readiness.get('remote_ready') else 'false'}",
@@ -15842,142 +16237,1554 @@ def cmd_pack_list(args: argparse.Namespace) -> int:
15842
16237
  print(f"pack.name={name}")
15843
16238
  print("---")
15844
16239
 
15845
- print(f"packs_root={packs_root}")
15846
- print(f"packs.count={len(packs)}")
15847
- return 0
16240
+ print(f"packs_root={packs_root}")
16241
+ print(f"packs.count={len(packs)}")
16242
+ return 0
16243
+
16244
+
16245
+ def cmd_discover_profile_init(args: argparse.Namespace) -> int:
16246
+ repo_root = Path(args.repo_root).resolve()
16247
+ out_path = _resolve_cli_path(args.out or DEFAULT_DISCOVER_PROFILE, repo_root)
16248
+ payload = _discover_profile_template(
16249
+ profile_id=args.profile_id,
16250
+ owner=args.owner or "",
16251
+ owner_type=args.owner_type,
16252
+ keywords=_coerce_string_list(args.keyword),
16253
+ topics=_coerce_string_list(args.topic),
16254
+ languages=_coerce_string_list(args.language),
16255
+ areas=_coerce_string_list(args.area),
16256
+ people=_coerce_string_list(args.person),
16257
+ )
16258
+ _write_json(out_path, payload)
16259
+
16260
+ result = {
16261
+ "ok": True,
16262
+ "profile_path": _path_for_state(out_path, repo_root),
16263
+ "profile_id": payload["profile_id"],
16264
+ "owner_login": payload["discover"]["github"]["owner"]["login"],
16265
+ "owner_type": payload["discover"]["github"]["owner"]["type"],
16266
+ "notes": payload["notes"],
16267
+ }
16268
+ if args.json_output:
16269
+ _print_json(result)
16270
+ return 0
16271
+
16272
+ print(f"profile_path={result['profile_path']}")
16273
+ print(f"profile_id={result['profile_id']}")
16274
+ print(f"owner_login={result['owner_login']}")
16275
+ print(f"owner_type={result['owner_type']}")
16276
+ print(f"next=orp discover github scan --profile {result['profile_path']}")
16277
+ return 0
16278
+
16279
+
16280
+ def cmd_discover_github_scan(args: argparse.Namespace) -> int:
16281
+ repo_root = Path(args.repo_root).resolve()
16282
+ profile_path = _resolve_cli_path(args.profile or DEFAULT_DISCOVER_PROFILE, repo_root)
16283
+ if not profile_path.exists():
16284
+ raise RuntimeError(
16285
+ f"missing discovery profile: {_path_for_state(profile_path, repo_root)}. "
16286
+ "Run `orp discover profile init` first."
16287
+ )
16288
+
16289
+ repos_fixture = _resolve_cli_path(args.repos_fixture, repo_root) if args.repos_fixture else None
16290
+ issues_fixture = _resolve_cli_path(args.issues_fixture, repo_root) if args.issues_fixture else None
16291
+ scan_id = args.scan_id or _scan_id()
16292
+ payload = _perform_github_discovery_scan(
16293
+ repo_root=repo_root,
16294
+ profile_path=profile_path,
16295
+ scan_id=scan_id,
16296
+ repos_fixture_path=repos_fixture,
16297
+ issues_fixture_path=issues_fixture,
16298
+ )
16299
+ if args.json_output:
16300
+ _print_json(payload)
16301
+ return 0
16302
+
16303
+ print(f"scan_id={payload['scan_id']}")
16304
+ print(f"profile={payload['profile']['path']}")
16305
+ print(f"owner={payload['owner']['login']}")
16306
+ print(f"owner_type={payload['owner']['type']}")
16307
+ print(f"scan_json={payload['artifacts']['scan_json']}")
16308
+ print(f"summary_md={payload['artifacts']['summary_md']}")
16309
+ if payload["repos"]:
16310
+ top_repo = payload["repos"][0]["full_name"]
16311
+ print(f"top_repo={top_repo}")
16312
+ print(f"next=orp collaborate init --github-repo {top_repo}")
16313
+ if payload["issues"]:
16314
+ top_issue = payload["issues"][0]
16315
+ print(f"top_issue={top_issue['repo']}#{top_issue['number']}")
16316
+ return 0
16317
+
16318
+
16319
+ def cmd_exchange_repo_synthesize(args: argparse.Namespace) -> int:
16320
+ repo_root = Path(args.repo_root).resolve()
16321
+ exchange_id = str(getattr(args, "exchange_id", "") or "").strip() or _exchange_id()
16322
+ source = _exchange_source_payload(repo_root, args)
16323
+ source_root = Path(str(source.get("local_path", "")).strip()).resolve()
16324
+ inventory = _exchange_inventory(source_root)
16325
+ relation = _exchange_relation(repo_root, source_root, inventory)
16326
+ suggested_focus = _exchange_suggested_focus(inventory, relation)
16327
+ paths = _exchange_paths(repo_root, exchange_id)
16328
+
16329
+ payload = {
16330
+ "schema_version": EXCHANGE_REPORT_SCHEMA_VERSION,
16331
+ "kind": "exchange_report",
16332
+ "exchange_id": exchange_id,
16333
+ "generated_at_utc": _now_utc(),
16334
+ "current_project_root": str(repo_root),
16335
+ "source": source,
16336
+ "inventory": inventory,
16337
+ "relation": relation,
16338
+ "suggested_focus": suggested_focus,
16339
+ "artifacts": {
16340
+ "exchange_json": _path_for_state(paths["exchange_json"], repo_root),
16341
+ "summary_md": _path_for_state(paths["summary_md"], repo_root),
16342
+ "transfer_map_md": _path_for_state(paths["transfer_map_md"], repo_root),
16343
+ },
16344
+ "notes": [
16345
+ "Knowledge exchange is deeper than discovery scan output.",
16346
+ "Exchange artifacts are structured synthesis aids, not evidence by themselves.",
16347
+ "Local non-git directories can be bootstrapped into git when `--allow-git-init` is explicitly provided.",
16348
+ ],
16349
+ }
16350
+ _write_json(paths["exchange_json"], payload)
16351
+ _write_text(paths["summary_md"], _exchange_summary_markdown(payload))
16352
+ _write_text(paths["transfer_map_md"], _exchange_transfer_map_markdown(payload))
16353
+
16354
+ result = {
16355
+ "ok": True,
16356
+ "exchange_id": exchange_id,
16357
+ "source": source,
16358
+ "inventory": inventory,
16359
+ "relation": relation,
16360
+ "suggested_focus": suggested_focus,
16361
+ "artifacts": payload["artifacts"],
16362
+ "schema_path": "spec/v1/exchange-report.schema.json",
16363
+ }
16364
+ if args.json_output:
16365
+ _print_json(result)
16366
+ return 0
16367
+
16368
+ print(f"exchange_id={exchange_id}")
16369
+ print(f"source.mode={source.get('mode', '')}")
16370
+ print(f"source.local_path={source.get('local_path', '')}")
16371
+ print(f"source.git_present={str(bool(source.get('git_present'))).lower()}")
16372
+ print(f"source.git_initialized_by_orp={str(bool(source.get('git_initialized_by_orp'))).lower()}")
16373
+ print(f"artifacts.exchange_json={payload['artifacts']['exchange_json']}")
16374
+ print(f"artifacts.summary_md={payload['artifacts']['summary_md']}")
16375
+ print(f"artifacts.transfer_map_md={payload['artifacts']['transfer_map_md']}")
16376
+ return 0
16377
+
16378
+
16379
+ def _research_id() -> str:
16380
+ return "research-" + dt.datetime.now(dt.timezone.utc).strftime("%Y%m%d-%H%M%S-%f")
16381
+
16382
+
16383
+ def _research_root(repo_root: Path) -> Path:
16384
+ return repo_root / "orp" / "research"
16385
+
16386
+
16387
+ def _research_paths(repo_root: Path, run_id: str) -> dict[str, Path]:
16388
+ root = _research_root(repo_root) / run_id
16389
+ return {
16390
+ "root": root,
16391
+ "request_json": root / "REQUEST.json",
16392
+ "breakdown_json": root / "BREAKDOWN.json",
16393
+ "profile_json": root / "PROFILE.json",
16394
+ "answer_json": root / "ANSWER.json",
16395
+ "summary_md": root / "RUN_SUMMARY.md",
16396
+ "lanes_root": root / "lanes",
16397
+ "raw_root": root / "raw",
16398
+ }
16399
+
16400
+
16401
+ def _research_default_profile(profile_id: str = "openai-council") -> dict[str, Any]:
16402
+ profile_id = profile_id or "openai-council"
16403
+ return {
16404
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
16405
+ "profile_id": profile_id,
16406
+ "label": "OpenAI research loop",
16407
+ "description": (
16408
+ "ORP-owned decomposition and synthesis across three explicit OpenAI API "
16409
+ "call moments: high-reasoning thinking, web synthesis, and Pro/Deep Research."
16410
+ ),
16411
+ "execution_policy": {
16412
+ "live_requires_execute": True,
16413
+ "process_only": True,
16414
+ "secrets_not_persisted": True,
16415
+ "default_timeout_sec": 120,
16416
+ },
16417
+ "call_moments": [
16418
+ {
16419
+ "moment_id": "plan",
16420
+ "label": "Local decomposition plan",
16421
+ "calls_api": False,
16422
+ "description": "Create ORP artifacts, prompts, and lane plan without resolving any API key.",
16423
+ },
16424
+ {
16425
+ "moment_id": "thinking_reasoning_high",
16426
+ "label": "Thinking / reasoning high",
16427
+ "calls_api": True,
16428
+ "secret_alias": "openai-primary",
16429
+ "env_var": "OPENAI_API_KEY",
16430
+ "description": "Call GPT-5.4 with high reasoning for the deliberate thinking pass.",
16431
+ },
16432
+ {
16433
+ "moment_id": "web_synthesis",
16434
+ "label": "Web synthesis",
16435
+ "calls_api": True,
16436
+ "secret_alias": "openai-primary",
16437
+ "env_var": "OPENAI_API_KEY",
16438
+ "description": "Call GPT-5.4 with web search for current public evidence and citations.",
16439
+ },
16440
+ {
16441
+ "moment_id": "pro_deep_research",
16442
+ "label": "Pro / Deep Research",
16443
+ "calls_api": True,
16444
+ "secret_alias": "openai-primary",
16445
+ "env_var": "OPENAI_API_KEY",
16446
+ "description": "Call the OpenAI Deep Research model for a longer agentic research report.",
16447
+ },
16448
+ ],
16449
+ "lanes": [
16450
+ {
16451
+ "lane_id": "openai_reasoning_high",
16452
+ "call_moment": "thinking_reasoning_high",
16453
+ "label": "OpenAI reasoning high",
16454
+ "provider": "openai",
16455
+ "model": "gpt-5.4",
16456
+ "adapter": "openai_responses",
16457
+ "role": "Deliberate high-reasoning pass from the provided context. Think hard, critique assumptions, and produce a decision-oriented answer.",
16458
+ "env_var": "OPENAI_API_KEY",
16459
+ "secret_alias": "openai-primary",
16460
+ "reasoning_effort": "high",
16461
+ "text_verbosity": "medium",
16462
+ "max_output_tokens": 4200,
16463
+ },
16464
+ {
16465
+ "lane_id": "openai_web_synthesis",
16466
+ "call_moment": "web_synthesis",
16467
+ "label": "OpenAI web synthesis",
16468
+ "provider": "openai",
16469
+ "model": "gpt-5.4",
16470
+ "adapter": "openai_responses",
16471
+ "role": "Recency-aware synthesis using OpenAI Responses web search with citations.",
16472
+ "env_var": "OPENAI_API_KEY",
16473
+ "secret_alias": "openai-primary",
16474
+ "reasoning_effort": "high",
16475
+ "text_verbosity": "medium",
16476
+ "web_search": True,
16477
+ "web_search_tool": "web_search",
16478
+ "search_context_size": "high",
16479
+ "external_web_access": True,
16480
+ "max_tool_calls": 8,
16481
+ "max_output_tokens": 3600,
16482
+ },
16483
+ {
16484
+ "lane_id": "openai_deep_research",
16485
+ "call_moment": "pro_deep_research",
16486
+ "label": "OpenAI Pro / Deep Research",
16487
+ "provider": "openai",
16488
+ "model": "o3-deep-research-2025-06-26",
16489
+ "adapter": "openai_responses",
16490
+ "role": "Pro Research style long-form investigation. Produce a structured, citation-rich report grounded in public sources.",
16491
+ "env_var": "OPENAI_API_KEY",
16492
+ "secret_alias": "openai-primary",
16493
+ "reasoning_summary": "auto",
16494
+ "web_search": True,
16495
+ "web_search_tool": "web_search_preview",
16496
+ "background": True,
16497
+ "max_tool_calls": 40,
16498
+ "max_output_tokens": 12000,
16499
+ },
16500
+ ],
16501
+ "synthesis": {
16502
+ "style": "answer_with_lane_evidence",
16503
+ "require_disagreements": True,
16504
+ "require_open_questions": True,
16505
+ },
16506
+ }
16507
+
16508
+
16509
+ def _research_normalize_profile(raw: dict[str, Any], *, fallback_profile_id: str) -> dict[str, Any]:
16510
+ base = _research_default_profile(fallback_profile_id)
16511
+ profile = {**base, **raw}
16512
+ profile["schema_version"] = str(profile.get("schema_version", RESEARCH_RUN_SCHEMA_VERSION)).strip() or RESEARCH_RUN_SCHEMA_VERSION
16513
+ profile["profile_id"] = str(profile.get("profile_id", fallback_profile_id)).strip() or fallback_profile_id
16514
+ lanes = profile.get("lanes")
16515
+ if not isinstance(lanes, list) or not lanes:
16516
+ lanes = base["lanes"]
16517
+ normalized_lanes: list[dict[str, Any]] = []
16518
+ for index, lane_raw in enumerate(lanes):
16519
+ if not isinstance(lane_raw, dict):
16520
+ continue
16521
+ lane = dict(lane_raw)
16522
+ lane_id = str(lane.get("lane_id", lane.get("id", ""))).strip() or f"lane_{index + 1}"
16523
+ lane["lane_id"] = _slug_token(lane_id, fallback=f"lane-{index + 1}").replace("-", "_")
16524
+ lane["label"] = str(lane.get("label", lane["lane_id"])).strip() or lane["lane_id"]
16525
+ lane["provider"] = str(lane.get("provider", "")).strip() or "custom"
16526
+ lane["model"] = str(lane.get("model", "")).strip() or lane["provider"]
16527
+ lane["adapter"] = str(lane.get("adapter", "planned")).strip() or "planned"
16528
+ lane["role"] = str(lane.get("role", "")).strip()
16529
+ lane["env_var"] = str(lane.get("env_var", "")).strip()
16530
+ lane["secret_alias"] = str(lane.get("secret_alias", "")).strip()
16531
+ lane["call_moment"] = str(lane.get("call_moment", lane["lane_id"])).strip() or lane["lane_id"]
16532
+ normalized_lanes.append(lane)
16533
+ profile["lanes"] = normalized_lanes
16534
+ return profile
16535
+
16536
+
16537
+ def _research_load_profile(args: argparse.Namespace, repo_root: Path) -> dict[str, Any]:
16538
+ profile_id = str(getattr(args, "profile", "") or "openai-council").strip() or "openai-council"
16539
+ profile_file = str(getattr(args, "profile_file", "") or "").strip()
16540
+ if not profile_file:
16541
+ return _research_normalize_profile({}, fallback_profile_id=profile_id)
16542
+ path = _resolve_cli_path(profile_file, repo_root)
16543
+ payload = _read_json_if_exists(path)
16544
+ if not payload:
16545
+ raise RuntimeError(f"missing or invalid research profile: {_path_for_state(path, repo_root)}")
16546
+ return _research_normalize_profile(payload, fallback_profile_id=profile_id)
16547
+
16548
+
16549
+ def _research_breakdown(question: str) -> dict[str, Any]:
16550
+ ladder = _agent_mode_breakdown(_agent_mode("granular-breakdown"), topic=question)
16551
+ return {
16552
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
16553
+ "question": question,
16554
+ "mode": ladder.get("mode", {}),
16555
+ "sequence": ladder.get("sequence", []),
16556
+ "output_contract": ladder.get("output_contract", []),
16557
+ "prompt_enrichment": {
16558
+ "goal": "Answer the question with explicit assumptions, evidence boundaries, disagreements, and next verification.",
16559
+ "public_web_needed": True,
16560
+ "private_context_policy": "Do not assume private data unless it is included in the question or attached artifacts.",
16561
+ },
16562
+ "lanes": [
16563
+ {
16564
+ "lane": "openai_reasoning_high",
16565
+ "task": "Run a high-reasoning synthesis pass over tradeoffs and likely answer shape.",
16566
+ },
16567
+ {
16568
+ "lane": "openai_web_synthesis",
16569
+ "task": "Use web search when current public evidence matters and return citation-backed synthesis.",
16570
+ },
16571
+ {
16572
+ "lane": "openai_deep_research",
16573
+ "task": "Run a Pro/Deep Research investigation for a longer citation-rich report.",
16574
+ },
16575
+ ],
16576
+ }
16577
+
16578
+
16579
+ def _research_lane_prompt(question: str, lane: dict[str, Any], breakdown: dict[str, Any]) -> str:
16580
+ sequence_titles = [
16581
+ str(row.get("title", "")).strip()
16582
+ for row in breakdown.get("sequence", [])
16583
+ if isinstance(row, dict) and str(row.get("title", "")).strip()
16584
+ ]
16585
+ role = str(lane.get("role", "")).strip() or "Independent research lane."
16586
+ return "\n".join(
16587
+ [
16588
+ "You are one lane in an ORP OpenAI research loop.",
16589
+ f"Lane: {lane.get('lane_id', '')}",
16590
+ f"Provider/model: {lane.get('provider', '')}/{lane.get('model', '')}",
16591
+ f"Lane role: {role}",
16592
+ "",
16593
+ "Question:",
16594
+ question,
16595
+ "",
16596
+ "Use this decomposition ladder as the working frame:",
16597
+ ", ".join(sequence_titles) or "broad frame, boundary, lanes, subclaims, obligations, synthesis",
16598
+ "",
16599
+ "Return a concise but substantial lane report with:",
16600
+ "- answer or position",
16601
+ "- key evidence or reasoning",
16602
+ "- assumptions and uncertainty",
16603
+ "- disagreements or failure modes",
16604
+ "- sources or citations when the lane has source access",
16605
+ "",
16606
+ "Do not modify files. Do not perform actions outside answering this lane prompt.",
16607
+ ]
16608
+ )
16609
+
16610
+
16611
+ def _research_parse_lane_fixtures(raw_fixtures: Sequence[str], repo_root: Path) -> dict[str, Path]:
16612
+ fixtures: dict[str, Path] = {}
16613
+ for raw in raw_fixtures:
16614
+ text = str(raw or "").strip()
16615
+ if not text:
16616
+ continue
16617
+ if "=" not in text:
16618
+ raise RuntimeError("research lane fixtures must use lane_id=path")
16619
+ lane_id_raw, path_raw = text.split("=", 1)
16620
+ lane_id = _slug_token(lane_id_raw, fallback="lane").replace("-", "_")
16621
+ fixtures[lane_id] = _resolve_cli_path(path_raw.strip(), repo_root)
16622
+ return fixtures
16623
+
16624
+
16625
+ def _research_text_from_payload(payload: Any) -> str:
16626
+ if isinstance(payload, str):
16627
+ return payload.strip()
16628
+ if isinstance(payload, dict):
16629
+ for key in ("text", "answer", "summary", "content", "report"):
16630
+ value = payload.get(key)
16631
+ if isinstance(value, str) and value.strip():
16632
+ return value.strip()
16633
+ return ""
16634
+
16635
+
16636
+ def _research_lane_api_call_plan(
16637
+ lane: dict[str, Any],
16638
+ *,
16639
+ execute: bool,
16640
+ called: bool = False,
16641
+ secret_source: str = "",
16642
+ reason: str = "",
16643
+ request_body_keys: Sequence[str] | None = None,
16644
+ tools: Sequence[str] | None = None,
16645
+ ) -> dict[str, Any]:
16646
+ adapter = str(lane.get("adapter", "")).strip()
16647
+ provider = str(lane.get("provider", "")).strip()
16648
+ env_var = str(lane.get("env_var", "")).strip()
16649
+ secret_alias = str(lane.get("secret_alias", "")).strip()
16650
+ return {
16651
+ "call_moment": str(lane.get("call_moment", lane.get("lane_id", ""))).strip(),
16652
+ "calls_api": adapter in {"openai_responses", "anthropic_messages", "xai_chat_completions", "chimera_cli"},
16653
+ "called": bool(called),
16654
+ "execute_required": True,
16655
+ "execute": bool(execute),
16656
+ "provider": provider,
16657
+ "adapter": adapter,
16658
+ "model": str(lane.get("model", "")).strip(),
16659
+ "env_var": env_var,
16660
+ "secret_alias": secret_alias,
16661
+ "secret_resolution_order": [row for row in (f"env:{env_var}" if env_var else "", f"keychain:{secret_alias}" if secret_alias else "") if row],
16662
+ "secret_source": secret_source,
16663
+ "secret_value_persisted": False,
16664
+ "request_body_keys": sorted(str(row) for row in request_body_keys) if request_body_keys else [],
16665
+ "tools": [str(row) for row in tools] if tools else [],
16666
+ "reason": reason,
16667
+ }
16668
+
16669
+
16670
+ def _research_fixture_lane_result(
16671
+ lane: dict[str, Any],
16672
+ fixture_path: Path,
16673
+ *,
16674
+ started_at_utc: str,
16675
+ repo_root: Path,
16676
+ ) -> dict[str, Any]:
16677
+ if not fixture_path.exists():
16678
+ raise RuntimeError(f"missing research lane fixture: {_path_for_state(fixture_path, repo_root)}")
16679
+ text = fixture_path.read_text(encoding="utf-8")
16680
+ raw_payload: Any = text
16681
+ if fixture_path.suffix.lower() == ".json":
16682
+ try:
16683
+ raw_payload = json.loads(text)
16684
+ except Exception:
16685
+ raw_payload = text
16686
+ lane_text = _research_text_from_payload(raw_payload) or text.strip()
16687
+ finished_at_utc = _now_utc()
16688
+ result = {
16689
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
16690
+ "lane_id": lane["lane_id"],
16691
+ "label": lane.get("label", lane["lane_id"]),
16692
+ "provider": lane.get("provider", ""),
16693
+ "model": lane.get("model", ""),
16694
+ "adapter": "fixture",
16695
+ "call_moment": lane.get("call_moment", lane["lane_id"]),
16696
+ "api_call": _research_lane_api_call_plan(
16697
+ lane,
16698
+ execute=False,
16699
+ called=False,
16700
+ reason="Lane output loaded from fixture; no provider key was resolved.",
16701
+ ),
16702
+ "status": "complete",
16703
+ "source": "fixture",
16704
+ "started_at_utc": started_at_utc,
16705
+ "finished_at_utc": finished_at_utc,
16706
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
16707
+ "text": lane_text,
16708
+ "citations": raw_payload.get("citations", []) if isinstance(raw_payload, dict) and isinstance(raw_payload.get("citations"), list) else [],
16709
+ "fixture_path": _path_for_state(fixture_path, repo_root),
16710
+ "notes": ["Lane output loaded from an explicit fixture; no provider call was made."],
16711
+ }
16712
+ if isinstance(raw_payload, dict):
16713
+ for key in ("claims", "confidence", "disagreements", "open_questions"):
16714
+ if key in raw_payload:
16715
+ result[key] = raw_payload[key]
16716
+ return result
16717
+
16718
+
16719
+ def _research_secret_value_for_lane(lane: dict[str, Any]) -> tuple[str, str, str]:
16720
+ env_var = str(lane.get("env_var", "")).strip()
16721
+ if env_var:
16722
+ value = os.environ.get(env_var, "").strip()
16723
+ if value:
16724
+ return value, f"env:{env_var}", ""
16725
+
16726
+ secret_alias = str(lane.get("secret_alias", "")).strip()
16727
+ provider = str(lane.get("provider", "")).strip()
16728
+ if not secret_alias and not provider:
16729
+ return "", "", "no env var or secret alias configured"
16730
+
16731
+ try:
16732
+ entry = _select_keychain_entry(
16733
+ secret_ref=secret_alias,
16734
+ provider=provider,
16735
+ world_id="",
16736
+ idea_id="",
16737
+ )
16738
+ if entry is None:
16739
+ ref = secret_alias or provider
16740
+ return "", "", f"no matching local Keychain secret for {ref}"
16741
+ return _read_keychain_secret_value(entry).strip(), "keychain", ""
16742
+ except Exception as exc:
16743
+ return "", "", str(exc)
16744
+
16745
+
16746
+ def _research_chimera_result_text(stdout: str) -> tuple[str, dict[str, Any]]:
16747
+ deltas: list[str] = []
16748
+ final_text = ""
16749
+ session_id = ""
16750
+ session_path = ""
16751
+ event_count = 0
16752
+ usage: dict[str, Any] = {}
16753
+ for raw_line in stdout.splitlines():
16754
+ line = raw_line.strip()
16755
+ if not line:
16756
+ continue
16757
+ try:
16758
+ event = json.loads(line)
16759
+ except Exception:
16760
+ continue
16761
+ if not isinstance(event, dict):
16762
+ continue
16763
+ event_count += 1
16764
+ if "TextDelta" in event and isinstance(event["TextDelta"], dict):
16765
+ deltas.append(str(event["TextDelta"].get("text", "")))
16766
+ if "TurnComplete" in event and isinstance(event["TurnComplete"], dict):
16767
+ final_text = str(event["TurnComplete"].get("text") or final_text).strip()
16768
+ session_id = str(event["TurnComplete"].get("session_id", session_id)).strip()
16769
+ if "SessionReady" in event and isinstance(event["SessionReady"], dict):
16770
+ session_id = str(event["SessionReady"].get("session_id", session_id)).strip()
16771
+ if "SessionSaved" in event and isinstance(event["SessionSaved"], dict):
16772
+ session_id = str(event["SessionSaved"].get("session_id", session_id)).strip()
16773
+ session_path = str(event["SessionSaved"].get("path", "")).strip()
16774
+ if "Usage" in event and isinstance(event["Usage"], dict):
16775
+ usage = dict(event["Usage"])
16776
+
16777
+ event_type = str(event.get("type", "")).strip()
16778
+ if event_type == "text_delta":
16779
+ deltas.append(str(event.get("text", "")))
16780
+ if event_type == "turn_complete":
16781
+ final_text = str(event.get("text") or final_text).strip()
16782
+ session_id = str(event.get("session_id", session_id)).strip()
16783
+ if not final_text:
16784
+ final_text = "".join(deltas).strip()
16785
+ return final_text, {
16786
+ "event_count": event_count,
16787
+ "session_id": session_id,
16788
+ "session_path": session_path,
16789
+ "usage": usage,
16790
+ }
16791
+
16792
+
16793
+ def _research_run_chimera_lane(
16794
+ lane: dict[str, Any],
16795
+ prompt: str,
16796
+ *,
16797
+ repo_root: Path,
16798
+ chimera_bin: str,
16799
+ timeout_sec: int,
16800
+ started_at_utc: str,
16801
+ ) -> dict[str, Any]:
16802
+ bin_path = shutil.which(chimera_bin) if chimera_bin else None
16803
+ if bin_path is None and chimera_bin:
16804
+ candidate = Path(chimera_bin).expanduser()
16805
+ if candidate.exists():
16806
+ bin_path = str(candidate)
16807
+ if not bin_path:
16808
+ finished_at_utc = _now_utc()
16809
+ return {
16810
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
16811
+ "lane_id": lane["lane_id"],
16812
+ "label": lane.get("label", lane["lane_id"]),
16813
+ "provider": lane.get("provider", ""),
16814
+ "model": lane.get("model", ""),
16815
+ "adapter": "chimera_cli",
16816
+ "status": "skipped",
16817
+ "started_at_utc": started_at_utc,
16818
+ "finished_at_utc": finished_at_utc,
16819
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
16820
+ "text": "",
16821
+ "notes": [f"Chimera binary not found: {chimera_bin}"],
16822
+ }
16823
+
16824
+ env = os.environ.copy()
16825
+ secret_value, secret_source, secret_issue = _research_secret_value_for_lane(lane)
16826
+ env_var = str(lane.get("env_var", "")).strip()
16827
+ if secret_value and env_var and not env.get(env_var):
16828
+ env[env_var] = secret_value
16829
+
16830
+ args = [
16831
+ bin_path,
16832
+ "--model",
16833
+ str(lane.get("model", "local")).strip() or "local",
16834
+ "--prompt",
16835
+ prompt,
16836
+ "--approval-mode",
16837
+ str(lane.get("chimera_approval_mode", "approve")).strip() or "approve",
16838
+ "--json",
16839
+ ]
16840
+ try:
16841
+ proc = subprocess.run(
16842
+ args,
16843
+ cwd=str(repo_root),
16844
+ capture_output=True,
16845
+ text=True,
16846
+ timeout=max(1, int(timeout_sec)),
16847
+ env=env,
16848
+ )
16849
+ except subprocess.TimeoutExpired as exc:
16850
+ finished_at_utc = _now_utc()
16851
+ return {
16852
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
16853
+ "lane_id": lane["lane_id"],
16854
+ "label": lane.get("label", lane["lane_id"]),
16855
+ "provider": lane.get("provider", ""),
16856
+ "model": lane.get("model", ""),
16857
+ "adapter": "chimera_cli",
16858
+ "status": "failed",
16859
+ "started_at_utc": started_at_utc,
16860
+ "finished_at_utc": finished_at_utc,
16861
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
16862
+ "text": "",
16863
+ "error": f"chimera timed out after {timeout_sec}s",
16864
+ "stdout": (exc.stdout or "")[-4000:],
16865
+ "stderr": (exc.stderr or "")[-4000:],
16866
+ }
16867
+
16868
+ finished_at_utc = _now_utc()
16869
+ lane_text, meta = _research_chimera_result_text(proc.stdout)
16870
+ notes: list[str] = []
16871
+ if secret_source:
16872
+ notes.append(f"Secret supplied from {secret_source}; secret value was not persisted.")
16873
+ elif env_var and secret_issue:
16874
+ notes.append(f"No secret supplied for {env_var}: {secret_issue}")
16875
+ status = "complete" if proc.returncode == 0 and lane_text else "failed"
16876
+ if proc.returncode != 0:
16877
+ notes.append("Chimera exited non-zero.")
16878
+ return {
16879
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
16880
+ "lane_id": lane["lane_id"],
16881
+ "label": lane.get("label", lane["lane_id"]),
16882
+ "provider": lane.get("provider", ""),
16883
+ "model": lane.get("model", ""),
16884
+ "adapter": "chimera_cli",
16885
+ "status": status,
16886
+ "started_at_utc": started_at_utc,
16887
+ "finished_at_utc": finished_at_utc,
16888
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
16889
+ "text": lane_text,
16890
+ "chimera": meta,
16891
+ "returncode": proc.returncode,
16892
+ "stderr_tail": proc.stderr[-4000:],
16893
+ "notes": notes,
16894
+ }
16895
+
16896
+
16897
+ def _research_extract_openai_text(payload: dict[str, Any]) -> tuple[str, list[dict[str, Any]], int]:
16898
+ texts: list[str] = []
16899
+ citations: list[dict[str, Any]] = []
16900
+ tool_calls = 0
16901
+ output = payload.get("output")
16902
+ if not isinstance(output, list):
16903
+ return "", citations, tool_calls
16904
+ for item in output:
16905
+ if not isinstance(item, dict):
16906
+ continue
16907
+ item_type = str(item.get("type", "")).strip()
16908
+ if item_type.endswith("_call"):
16909
+ tool_calls += 1
16910
+ if item_type != "message":
16911
+ continue
16912
+ content = item.get("content")
16913
+ if not isinstance(content, list):
16914
+ continue
16915
+ for part in content:
16916
+ if not isinstance(part, dict):
16917
+ continue
16918
+ if str(part.get("type", "")).strip() == "output_text":
16919
+ text = str(part.get("text", "")).strip()
16920
+ if text:
16921
+ texts.append(text)
16922
+ annotations = part.get("annotations")
16923
+ if isinstance(annotations, list):
16924
+ for annotation in annotations:
16925
+ if isinstance(annotation, dict):
16926
+ citations.append(
16927
+ {
16928
+ "type": str(annotation.get("type", "")).strip(),
16929
+ "title": str(annotation.get("title", "")).strip(),
16930
+ "url": str(annotation.get("url", "")).strip(),
16931
+ "start_index": annotation.get("start_index"),
16932
+ "end_index": annotation.get("end_index"),
16933
+ }
16934
+ )
16935
+ return "\n\n".join(texts).strip(), citations, tool_calls
16936
+
16937
+
16938
+ def _research_openai_output_types(payload: dict[str, Any]) -> list[str]:
16939
+ output = payload.get("output")
16940
+ if not isinstance(output, list):
16941
+ return []
16942
+ return [
16943
+ str(item.get("type", "")).strip()
16944
+ for item in output
16945
+ if isinstance(item, dict) and str(item.get("type", "")).strip()
16946
+ ]
16947
+
16948
+
16949
+ def _research_run_openai_lane(
16950
+ lane: dict[str, Any],
16951
+ prompt: str,
16952
+ *,
16953
+ timeout_sec: int,
16954
+ started_at_utc: str,
16955
+ ) -> dict[str, Any]:
16956
+ api_key, secret_source, secret_issue = _research_secret_value_for_lane(lane)
16957
+ finished_at_utc = _now_utc()
16958
+ if not api_key:
16959
+ return {
16960
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
16961
+ "lane_id": lane["lane_id"],
16962
+ "label": lane.get("label", lane["lane_id"]),
16963
+ "provider": lane.get("provider", ""),
16964
+ "model": lane.get("model", ""),
16965
+ "adapter": "openai_responses",
16966
+ "call_moment": lane.get("call_moment", lane["lane_id"]),
16967
+ "api_call": _research_lane_api_call_plan(
16968
+ lane,
16969
+ execute=True,
16970
+ called=False,
16971
+ reason=f"No OpenAI API key available: {secret_issue or 'missing OPENAI_API_KEY'}",
16972
+ ),
16973
+ "status": "skipped",
16974
+ "started_at_utc": started_at_utc,
16975
+ "finished_at_utc": finished_at_utc,
16976
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
16977
+ "text": "",
16978
+ "notes": [f"No OpenAI API key available: {secret_issue or 'missing OPENAI_API_KEY'}"],
16979
+ }
16980
+
16981
+ body: dict[str, Any] = {
16982
+ "model": str(lane.get("model", "gpt-5.4")).strip() or "gpt-5.4",
16983
+ "input": prompt,
16984
+ "background": bool(lane.get("background", False)),
16985
+ }
16986
+ tools: list[dict[str, Any]] = []
16987
+ raw_tools = lane.get("tools")
16988
+ if isinstance(raw_tools, list):
16989
+ tools.extend([dict(row) for row in raw_tools if isinstance(row, dict)])
16990
+ if bool(lane.get("web_search", False)):
16991
+ web_tool_type = str(lane.get("web_search_tool", "web_search")).strip() or "web_search"
16992
+ web_tool: dict[str, Any] = {
16993
+ "type": web_tool_type,
16994
+ "search_context_size": str(lane.get("search_context_size", "medium")).strip() or "medium",
16995
+ }
16996
+ if web_tool_type == "web_search" and "external_web_access" in lane:
16997
+ web_tool["external_web_access"] = bool(lane.get("external_web_access"))
16998
+ filters = lane.get("filters")
16999
+ if isinstance(filters, dict):
17000
+ web_tool["filters"] = filters
17001
+ tools.append(web_tool)
17002
+ if tools:
17003
+ body["tools"] = tools
17004
+ max_tool_calls = int(lane.get("max_tool_calls", 0) or 0)
17005
+ if max_tool_calls > 0:
17006
+ body["max_tool_calls"] = max_tool_calls
17007
+ max_output_tokens = int(lane.get("max_output_tokens", 0) or 0)
17008
+ if max_output_tokens > 0:
17009
+ body["max_output_tokens"] = max_output_tokens
17010
+ raw_reasoning = lane.get("reasoning")
17011
+ reasoning_effort = str(lane.get("reasoning_effort", "") or "").strip()
17012
+ if not reasoning_effort and isinstance(raw_reasoning, dict):
17013
+ reasoning_effort = str(raw_reasoning.get("effort", "") or "").strip()
17014
+ if reasoning_effort:
17015
+ body["reasoning"] = {"effort": reasoning_effort}
17016
+ reasoning_summary = str(lane.get("reasoning_summary", "") or "").strip()
17017
+ if reasoning_summary:
17018
+ reasoning_body = body.get("reasoning") if isinstance(body.get("reasoning"), dict) else {}
17019
+ reasoning_body = dict(reasoning_body)
17020
+ reasoning_body["summary"] = reasoning_summary
17021
+ body["reasoning"] = reasoning_body
17022
+ raw_text = lane.get("text")
17023
+ text_verbosity = str(lane.get("text_verbosity", "") or "").strip()
17024
+ if not text_verbosity and isinstance(raw_text, dict):
17025
+ text_verbosity = str(raw_text.get("verbosity", "") or "").strip()
17026
+ if text_verbosity:
17027
+ body["text"] = {"verbosity": text_verbosity}
17028
+ instructions = str(lane.get("instructions", "")).strip()
17029
+ if instructions:
17030
+ body["instructions"] = instructions
17031
+
17032
+ tool_types = [
17033
+ str(row.get("type", "")).strip()
17034
+ for row in body.get("tools", [])
17035
+ if isinstance(row, dict) and str(row.get("type", "")).strip()
17036
+ ]
17037
+ api_call = _research_lane_api_call_plan(
17038
+ lane,
17039
+ execute=True,
17040
+ called=True,
17041
+ secret_source=secret_source,
17042
+ request_body_keys=body.keys(),
17043
+ tools=tool_types,
17044
+ )
17045
+
17046
+ request = urlrequest.Request(
17047
+ "https://api.openai.com/v1/responses",
17048
+ data=json.dumps(body).encode("utf-8"),
17049
+ headers={
17050
+ "Authorization": f"Bearer {api_key}",
17051
+ "Content-Type": "application/json",
17052
+ },
17053
+ method="POST",
17054
+ )
17055
+ try:
17056
+ with urlrequest.urlopen(request, timeout=max(1, int(timeout_sec))) as response:
17057
+ response_payload = json.loads(response.read().decode("utf-8"))
17058
+ except urlerror.HTTPError as exc:
17059
+ error_body = ""
17060
+ try:
17061
+ error_body = exc.read().decode("utf-8")
17062
+ except Exception:
17063
+ error_body = str(exc)
17064
+ finished_at_utc = _now_utc()
17065
+ return {
17066
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17067
+ "lane_id": lane["lane_id"],
17068
+ "label": lane.get("label", lane["lane_id"]),
17069
+ "provider": lane.get("provider", ""),
17070
+ "model": lane.get("model", ""),
17071
+ "adapter": "openai_responses",
17072
+ "call_moment": lane.get("call_moment", lane["lane_id"]),
17073
+ "api_call": api_call,
17074
+ "status": "failed",
17075
+ "started_at_utc": started_at_utc,
17076
+ "finished_at_utc": finished_at_utc,
17077
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17078
+ "text": "",
17079
+ "error": error_body[-4000:],
17080
+ "notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
17081
+ }
17082
+ except Exception as exc:
17083
+ finished_at_utc = _now_utc()
17084
+ return {
17085
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17086
+ "lane_id": lane["lane_id"],
17087
+ "label": lane.get("label", lane["lane_id"]),
17088
+ "provider": lane.get("provider", ""),
17089
+ "model": lane.get("model", ""),
17090
+ "adapter": "openai_responses",
17091
+ "call_moment": lane.get("call_moment", lane["lane_id"]),
17092
+ "api_call": api_call,
17093
+ "status": "failed",
17094
+ "started_at_utc": started_at_utc,
17095
+ "finished_at_utc": finished_at_utc,
17096
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17097
+ "text": "",
17098
+ "error": str(exc),
17099
+ "notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
17100
+ }
17101
+
17102
+ finished_at_utc = _now_utc()
17103
+ text, citations, tool_calls = _research_extract_openai_text(response_payload)
17104
+ response_status = str(response_payload.get("status", "")).strip()
17105
+ status = "complete" if response_status == "completed" and text else response_status or "complete"
17106
+ if status == "in_progress":
17107
+ text = text or "OpenAI deep research started in background mode; poll the response id outside ORP for completion."
17108
+ return {
17109
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17110
+ "lane_id": lane["lane_id"],
17111
+ "label": lane.get("label", lane["lane_id"]),
17112
+ "provider": lane.get("provider", ""),
17113
+ "model": lane.get("model", ""),
17114
+ "adapter": "openai_responses",
17115
+ "call_moment": lane.get("call_moment", lane["lane_id"]),
17116
+ "api_call": api_call,
17117
+ "status": status,
17118
+ "started_at_utc": started_at_utc,
17119
+ "finished_at_utc": finished_at_utc,
17120
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17121
+ "text": text,
17122
+ "citations": citations,
17123
+ "provider_response_id": str(response_payload.get("id", "")).strip(),
17124
+ "provider_status": response_status,
17125
+ "provider_error": response_payload.get("error") if isinstance(response_payload.get("error"), dict) else None,
17126
+ "incomplete_details": response_payload.get("incomplete_details")
17127
+ if isinstance(response_payload.get("incomplete_details"), dict)
17128
+ else None,
17129
+ "output_types": _research_openai_output_types(response_payload),
17130
+ "tool_call_count": tool_calls,
17131
+ "usage": response_payload.get("usage") if isinstance(response_payload.get("usage"), dict) else {},
17132
+ "notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
17133
+ }
17134
+
17135
+
17136
+ def _research_extract_anthropic_text(payload: dict[str, Any]) -> str:
17137
+ content = payload.get("content")
17138
+ if not isinstance(content, list):
17139
+ return ""
17140
+ parts: list[str] = []
17141
+ for part in content:
17142
+ if not isinstance(part, dict):
17143
+ continue
17144
+ if str(part.get("type", "")).strip() == "text":
17145
+ text = str(part.get("text", "")).strip()
17146
+ if text:
17147
+ parts.append(text)
17148
+ return "\n\n".join(parts).strip()
17149
+
17150
+
17151
+ def _research_run_anthropic_lane(
17152
+ lane: dict[str, Any],
17153
+ prompt: str,
17154
+ *,
17155
+ timeout_sec: int,
17156
+ started_at_utc: str,
17157
+ ) -> dict[str, Any]:
17158
+ api_key, secret_source, secret_issue = _research_secret_value_for_lane(lane)
17159
+ finished_at_utc = _now_utc()
17160
+ if not api_key:
17161
+ return {
17162
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17163
+ "lane_id": lane["lane_id"],
17164
+ "label": lane.get("label", lane["lane_id"]),
17165
+ "provider": lane.get("provider", ""),
17166
+ "model": lane.get("model", ""),
17167
+ "adapter": "anthropic_messages",
17168
+ "status": "skipped",
17169
+ "started_at_utc": started_at_utc,
17170
+ "finished_at_utc": finished_at_utc,
17171
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17172
+ "text": "",
17173
+ "notes": [f"No Anthropic API key available: {secret_issue or 'missing ANTHROPIC_API_KEY'}"],
17174
+ }
17175
+
17176
+ model = str(lane.get("model", "claude-opus-4-7")).strip() or "claude-opus-4-7"
17177
+ body: dict[str, Any] = {
17178
+ "model": model,
17179
+ "max_tokens": int(lane.get("max_tokens", 4096) or 4096),
17180
+ "messages": [{"role": "user", "content": prompt}],
17181
+ }
17182
+ system = str(lane.get("system", "") or "").strip()
17183
+ if system:
17184
+ body["system"] = system
17185
+ if "temperature" in lane:
17186
+ try:
17187
+ body["temperature"] = float(lane.get("temperature"))
17188
+ except Exception:
17189
+ pass
17190
+ anthropic_version = str(lane.get("anthropic_version", "2023-06-01")).strip() or "2023-06-01"
17191
+ request = urlrequest.Request(
17192
+ "https://api.anthropic.com/v1/messages",
17193
+ data=json.dumps(body).encode("utf-8"),
17194
+ headers={
17195
+ "x-api-key": api_key,
17196
+ "anthropic-version": anthropic_version,
17197
+ "Content-Type": "application/json",
17198
+ },
17199
+ method="POST",
17200
+ )
17201
+ try:
17202
+ with urlrequest.urlopen(request, timeout=max(1, int(timeout_sec))) as response:
17203
+ response_payload = json.loads(response.read().decode("utf-8"))
17204
+ except urlerror.HTTPError as exc:
17205
+ error_body = ""
17206
+ try:
17207
+ error_body = exc.read().decode("utf-8")
17208
+ except Exception:
17209
+ error_body = str(exc)
17210
+ finished_at_utc = _now_utc()
17211
+ return {
17212
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17213
+ "lane_id": lane["lane_id"],
17214
+ "label": lane.get("label", lane["lane_id"]),
17215
+ "provider": lane.get("provider", ""),
17216
+ "model": model,
17217
+ "adapter": "anthropic_messages",
17218
+ "status": "failed",
17219
+ "started_at_utc": started_at_utc,
17220
+ "finished_at_utc": finished_at_utc,
17221
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17222
+ "text": "",
17223
+ "error": error_body[-4000:],
17224
+ "notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
17225
+ }
17226
+ except Exception as exc:
17227
+ finished_at_utc = _now_utc()
17228
+ return {
17229
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17230
+ "lane_id": lane["lane_id"],
17231
+ "label": lane.get("label", lane["lane_id"]),
17232
+ "provider": lane.get("provider", ""),
17233
+ "model": model,
17234
+ "adapter": "anthropic_messages",
17235
+ "status": "failed",
17236
+ "started_at_utc": started_at_utc,
17237
+ "finished_at_utc": finished_at_utc,
17238
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17239
+ "text": "",
17240
+ "error": str(exc),
17241
+ "notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
17242
+ }
17243
+
17244
+ finished_at_utc = _now_utc()
17245
+ text = _research_extract_anthropic_text(response_payload)
17246
+ return {
17247
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17248
+ "lane_id": lane["lane_id"],
17249
+ "label": lane.get("label", lane["lane_id"]),
17250
+ "provider": lane.get("provider", ""),
17251
+ "model": str(response_payload.get("model", model)).strip() or model,
17252
+ "adapter": "anthropic_messages",
17253
+ "status": "complete" if text else "failed",
17254
+ "started_at_utc": started_at_utc,
17255
+ "finished_at_utc": finished_at_utc,
17256
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17257
+ "text": text,
17258
+ "provider_response_id": str(response_payload.get("id", "")).strip(),
17259
+ "stop_reason": str(response_payload.get("stop_reason", "")).strip(),
17260
+ "usage": response_payload.get("usage") if isinstance(response_payload.get("usage"), dict) else {},
17261
+ "notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
17262
+ }
17263
+
17264
+
17265
+ def _research_extract_chat_completion_text(payload: dict[str, Any]) -> str:
17266
+ choices = payload.get("choices")
17267
+ if not isinstance(choices, list):
17268
+ return ""
17269
+ parts: list[str] = []
17270
+ for choice in choices:
17271
+ if not isinstance(choice, dict):
17272
+ continue
17273
+ message = choice.get("message")
17274
+ if isinstance(message, dict):
17275
+ content = message.get("content")
17276
+ if isinstance(content, str) and content.strip():
17277
+ parts.append(content.strip())
17278
+ elif isinstance(content, list):
17279
+ for item in content:
17280
+ if isinstance(item, dict):
17281
+ text = str(item.get("text", "")).strip()
17282
+ if text:
17283
+ parts.append(text)
17284
+ return "\n\n".join(parts).strip()
17285
+
17286
+
17287
+ def _research_run_xai_lane(
17288
+ lane: dict[str, Any],
17289
+ prompt: str,
17290
+ *,
17291
+ timeout_sec: int,
17292
+ started_at_utc: str,
17293
+ ) -> dict[str, Any]:
17294
+ api_key, secret_source, secret_issue = _research_secret_value_for_lane(lane)
17295
+ finished_at_utc = _now_utc()
17296
+ if not api_key:
17297
+ return {
17298
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17299
+ "lane_id": lane["lane_id"],
17300
+ "label": lane.get("label", lane["lane_id"]),
17301
+ "provider": lane.get("provider", ""),
17302
+ "model": lane.get("model", ""),
17303
+ "adapter": "xai_chat_completions",
17304
+ "status": "skipped",
17305
+ "started_at_utc": started_at_utc,
17306
+ "finished_at_utc": finished_at_utc,
17307
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17308
+ "text": "",
17309
+ "notes": [f"No xAI API key available: {secret_issue or 'missing XAI_API_KEY'}"],
17310
+ }
17311
+
17312
+ model = str(lane.get("model", "grok-4.20-reasoning")).strip() or "grok-4.20-reasoning"
17313
+ system = str(lane.get("system", "You are an independent research critique lane.")).strip()
17314
+ messages: list[dict[str, str]] = []
17315
+ if system:
17316
+ messages.append({"role": "system", "content": system})
17317
+ messages.append({"role": "user", "content": prompt})
17318
+ body: dict[str, Any] = {
17319
+ "model": model,
17320
+ "messages": messages,
17321
+ "stream": False,
17322
+ }
17323
+ max_tokens = int(lane.get("max_tokens", 0) or 0)
17324
+ if max_tokens > 0:
17325
+ body["max_tokens"] = max_tokens
17326
+ if "temperature" in lane:
17327
+ try:
17328
+ body["temperature"] = float(lane.get("temperature"))
17329
+ except Exception:
17330
+ pass
17331
+ base_url = str(lane.get("base_url", "https://api.x.ai/v1")).rstrip("/") or "https://api.x.ai/v1"
17332
+ request = urlrequest.Request(
17333
+ f"{base_url}/chat/completions",
17334
+ data=json.dumps(body).encode("utf-8"),
17335
+ headers={
17336
+ "Authorization": f"Bearer {api_key}",
17337
+ "Content-Type": "application/json",
17338
+ },
17339
+ method="POST",
17340
+ )
17341
+ try:
17342
+ with urlrequest.urlopen(request, timeout=max(1, int(timeout_sec))) as response:
17343
+ response_payload = json.loads(response.read().decode("utf-8"))
17344
+ except urlerror.HTTPError as exc:
17345
+ error_body = ""
17346
+ try:
17347
+ error_body = exc.read().decode("utf-8")
17348
+ except Exception:
17349
+ error_body = str(exc)
17350
+ finished_at_utc = _now_utc()
17351
+ return {
17352
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17353
+ "lane_id": lane["lane_id"],
17354
+ "label": lane.get("label", lane["lane_id"]),
17355
+ "provider": lane.get("provider", ""),
17356
+ "model": model,
17357
+ "adapter": "xai_chat_completions",
17358
+ "status": "failed",
17359
+ "started_at_utc": started_at_utc,
17360
+ "finished_at_utc": finished_at_utc,
17361
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17362
+ "text": "",
17363
+ "error": error_body[-4000:],
17364
+ "notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
17365
+ }
17366
+ except Exception as exc:
17367
+ finished_at_utc = _now_utc()
17368
+ return {
17369
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17370
+ "lane_id": lane["lane_id"],
17371
+ "label": lane.get("label", lane["lane_id"]),
17372
+ "provider": lane.get("provider", ""),
17373
+ "model": model,
17374
+ "adapter": "xai_chat_completions",
17375
+ "status": "failed",
17376
+ "started_at_utc": started_at_utc,
17377
+ "finished_at_utc": finished_at_utc,
17378
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17379
+ "text": "",
17380
+ "error": str(exc),
17381
+ "notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
17382
+ }
17383
+
17384
+ finished_at_utc = _now_utc()
17385
+ text = _research_extract_chat_completion_text(response_payload)
17386
+ return {
17387
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17388
+ "lane_id": lane["lane_id"],
17389
+ "label": lane.get("label", lane["lane_id"]),
17390
+ "provider": lane.get("provider", ""),
17391
+ "model": str(response_payload.get("model", model)).strip() or model,
17392
+ "adapter": "xai_chat_completions",
17393
+ "status": "complete" if text else "failed",
17394
+ "started_at_utc": started_at_utc,
17395
+ "finished_at_utc": finished_at_utc,
17396
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17397
+ "text": text,
17398
+ "provider_response_id": str(response_payload.get("id", "")).strip(),
17399
+ "usage": response_payload.get("usage") if isinstance(response_payload.get("usage"), dict) else {},
17400
+ "notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
17401
+ }
17402
+
17403
+
17404
+ def _research_planned_lane(lane: dict[str, Any], *, started_at_utc: str, execute: bool, reason: str) -> dict[str, Any]:
17405
+ finished_at_utc = _now_utc()
17406
+ return {
17407
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17408
+ "lane_id": lane["lane_id"],
17409
+ "label": lane.get("label", lane["lane_id"]),
17410
+ "provider": lane.get("provider", ""),
17411
+ "model": lane.get("model", ""),
17412
+ "adapter": lane.get("adapter", "planned"),
17413
+ "call_moment": lane.get("call_moment", lane["lane_id"]),
17414
+ "api_call": _research_lane_api_call_plan(
17415
+ lane,
17416
+ execute=execute,
17417
+ called=False,
17418
+ reason=reason,
17419
+ ),
17420
+ "status": "planned" if not execute else "skipped",
17421
+ "started_at_utc": started_at_utc,
17422
+ "finished_at_utc": finished_at_utc,
17423
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17424
+ "text": "",
17425
+ "notes": [reason],
17426
+ }
17427
+
17428
+
17429
+ def _research_run_lane(
17430
+ lane: dict[str, Any],
17431
+ *,
17432
+ question: str,
17433
+ breakdown: dict[str, Any],
17434
+ repo_root: Path,
17435
+ execute: bool,
17436
+ fixtures: dict[str, Path],
17437
+ chimera_bin: str,
17438
+ timeout_sec: int,
17439
+ ) -> dict[str, Any]:
17440
+ started_at_utc = _now_utc()
17441
+ lane_id = str(lane.get("lane_id", "")).strip()
17442
+ if lane_id in fixtures:
17443
+ return _research_fixture_lane_result(lane, fixtures[lane_id], started_at_utc=started_at_utc, repo_root=repo_root)
17444
+ if not execute:
17445
+ return _research_planned_lane(
17446
+ lane,
17447
+ started_at_utc=started_at_utc,
17448
+ execute=False,
17449
+ reason="Dry run only. Re-run with --execute or provide --lane-fixture lane_id=path.",
17450
+ )
17451
+ prompt = _research_lane_prompt(question, lane, breakdown)
17452
+ adapter = str(lane.get("adapter", "")).strip()
17453
+ if adapter == "chimera_cli":
17454
+ return _research_run_chimera_lane(
17455
+ lane,
17456
+ prompt,
17457
+ repo_root=repo_root,
17458
+ chimera_bin=chimera_bin,
17459
+ timeout_sec=timeout_sec,
17460
+ started_at_utc=started_at_utc,
17461
+ )
17462
+ if adapter == "openai_responses":
17463
+ return _research_run_openai_lane(
17464
+ lane,
17465
+ prompt,
17466
+ timeout_sec=timeout_sec,
17467
+ started_at_utc=started_at_utc,
17468
+ )
17469
+ if adapter == "anthropic_messages":
17470
+ return _research_run_anthropic_lane(
17471
+ lane,
17472
+ prompt,
17473
+ timeout_sec=timeout_sec,
17474
+ started_at_utc=started_at_utc,
17475
+ )
17476
+ if adapter == "xai_chat_completions":
17477
+ return _research_run_xai_lane(
17478
+ lane,
17479
+ prompt,
17480
+ timeout_sec=timeout_sec,
17481
+ started_at_utc=started_at_utc,
17482
+ )
17483
+ return _research_planned_lane(
17484
+ lane,
17485
+ started_at_utc=started_at_utc,
17486
+ execute=True,
17487
+ reason=f"No live adapter implemented for `{adapter}`.",
17488
+ )
17489
+
17490
+
17491
+ def _research_status_from_lanes(lanes: list[dict[str, Any]]) -> str:
17492
+ statuses = {str(row.get("status", "")).strip() for row in lanes if isinstance(row, dict)}
17493
+ if not statuses:
17494
+ return "planned"
17495
+ if "failed" in statuses:
17496
+ return "partial"
17497
+ if statuses & {"queued", "in_progress"}:
17498
+ return "in_progress"
17499
+ if "complete" in statuses and statuses <= {"complete"}:
17500
+ return "complete"
17501
+ if "complete" in statuses:
17502
+ return "partial"
17503
+ if statuses <= {"planned"}:
17504
+ return "planned"
17505
+ return "partial"
17506
+
17507
+
17508
+ def _research_synthesize(question: str, lanes: list[dict[str, Any]], *, execute: bool) -> dict[str, Any]:
17509
+ complete_lanes = [lane for lane in lanes if lane.get("status") == "complete" and str(lane.get("text", "")).strip()]
17510
+ skipped = [lane for lane in lanes if lane.get("status") in {"planned", "skipped"}]
17511
+ failed = [lane for lane in lanes if lane.get("status") == "failed"]
17512
+ lines: list[str] = []
17513
+ if complete_lanes:
17514
+ lines.append(f"Question: {question}")
17515
+ lines.append("")
17516
+ lines.append("Synthesis from completed lanes:")
17517
+ for lane in complete_lanes:
17518
+ lane_label = str(lane.get("label", lane.get("lane_id", ""))).strip()
17519
+ text = str(lane.get("text", "")).strip()
17520
+ lines.append("")
17521
+ lines.append(f"[{lane_label}]")
17522
+ lines.append(text)
17523
+ else:
17524
+ lines.append(
17525
+ "No live research lane has completed yet. ORP created the durable decomposition, provider plan, and lane prompts; "
17526
+ "run again with --execute or attach lane fixtures to produce an answer."
17527
+ )
17528
+ next_actions: list[str] = []
17529
+ if not execute:
17530
+ next_actions.append("Run `orp research ask <question> --execute --json` when you are ready to spend live provider calls.")
17531
+ if skipped:
17532
+ next_actions.append("Attach completed external reports with `--lane-fixture lane_id=path` to synthesize without re-calling providers.")
17533
+ if failed:
17534
+ next_actions.append("Inspect failed lane JSON files under `orp/research/<run_id>/lanes/` and re-run only after credentials/adapters are fixed.")
17535
+ citations: list[dict[str, Any]] = []
17536
+ for lane in complete_lanes:
17537
+ lane_citations = lane.get("citations")
17538
+ if isinstance(lane_citations, list):
17539
+ citations.extend([row for row in lane_citations if isinstance(row, dict)])
17540
+ return {
17541
+ "answer": "\n".join(lines).strip(),
17542
+ "completed_lane_count": len(complete_lanes),
17543
+ "planned_or_skipped_lane_count": len(skipped),
17544
+ "failed_lane_count": len(failed),
17545
+ "confidence": "multi_lane" if len(complete_lanes) > 1 else ("single_lane" if complete_lanes else "planning_only"),
17546
+ "citations": citations,
17547
+ "next_actions": _unique_strings(next_actions),
17548
+ }
17549
+
17550
+
17551
+ def _research_summary_markdown(payload: dict[str, Any]) -> str:
17552
+ lines: list[str] = []
17553
+ lines.append(f"# ORP Research Run `{payload.get('run_id', '')}`")
17554
+ lines.append("")
17555
+ lines.append(f"- status: `{payload.get('status', '')}`")
17556
+ lines.append(f"- question: {payload.get('question', '')}")
17557
+ lines.append(f"- execute: `{str(bool(payload.get('execute'))).lower()}`")
17558
+ lines.append(f"- profile: `{payload.get('profile', {}).get('profile_id', '')}`")
17559
+ lines.append("")
17560
+ lines.append("## Lanes")
17561
+ lines.append("")
17562
+ for lane in payload.get("lanes", []):
17563
+ if not isinstance(lane, dict):
17564
+ continue
17565
+ api_call = lane.get("api_call") if isinstance(lane.get("api_call"), dict) else {}
17566
+ lines.append(
17567
+ f"- `{lane.get('lane_id', '')}`: `{lane.get('status', '')}` "
17568
+ f"via `{lane.get('adapter', '')}` on `{lane.get('model', '')}` "
17569
+ f"at `{lane.get('call_moment', '')}` "
17570
+ f"(api_called: `{str(bool(api_call.get('called', False))).lower()}`)"
17571
+ )
17572
+ lines.append("")
17573
+ lines.append("## Synthesis")
17574
+ lines.append("")
17575
+ lines.append(str(payload.get("synthesis", {}).get("answer", "")).strip())
17576
+ next_actions = payload.get("synthesis", {}).get("next_actions", [])
17577
+ if isinstance(next_actions, list) and next_actions:
17578
+ lines.append("")
17579
+ lines.append("## Next Actions")
17580
+ lines.append("")
17581
+ for action in next_actions:
17582
+ lines.append(f"- {action}")
17583
+ lines.append("")
17584
+ lines.append("## Notes")
17585
+ lines.append("")
17586
+ lines.append("- Research runs are ORP process artifacts, not evidence by themselves.")
17587
+ lines.append("- Secret values are used only at execution time and are not written to artifacts.")
17588
+ return "\n".join(lines).rstrip() + "\n"
17589
+
17590
+
17591
+ def _research_load_answer(repo_root: Path, run_id: str) -> tuple[dict[str, Any], dict[str, Path]]:
17592
+ run_ref = str(run_id or "").strip()
17593
+ state = _read_json_if_exists(repo_root / "orp" / "state.json")
17594
+ if not run_ref or run_ref == "latest":
17595
+ run_ref = str(state.get("last_research_run_id", "")).strip()
17596
+ if not run_ref:
17597
+ raise RuntimeError("No research run id provided and no last research run is recorded.")
17598
+ paths = _research_paths(repo_root, run_ref)
17599
+ payload = _read_json_if_exists(paths["answer_json"])
17600
+ if not payload:
17601
+ raise RuntimeError(f"research run not found: {run_ref}")
17602
+ return payload, paths
15848
17603
 
15849
17604
 
15850
- def cmd_discover_profile_init(args: argparse.Namespace) -> int:
17605
+ def _research_update_state(repo_root: Path, payload: dict[str, Any]) -> None:
17606
+ state_path = repo_root / "orp" / "state.json"
17607
+ state = {**_default_state_payload(), **_read_json_if_exists(state_path)}
17608
+ run_id = str(payload.get("run_id", "")).strip()
17609
+ research_runs = state.get("research_runs") if isinstance(state.get("research_runs"), dict) else {}
17610
+ if run_id:
17611
+ research_runs[run_id] = {
17612
+ "run_id": run_id,
17613
+ "status": payload.get("status", ""),
17614
+ "question": payload.get("question", ""),
17615
+ "generated_at_utc": payload.get("generated_at_utc", ""),
17616
+ "answer_json": payload.get("artifacts", {}).get("answer_json", ""),
17617
+ "summary_md": payload.get("artifacts", {}).get("summary_md", ""),
17618
+ }
17619
+ state["last_research_run_id"] = run_id
17620
+ state["research_runs"] = research_runs
17621
+ _write_json(state_path, state)
17622
+
17623
+
17624
+ def cmd_research_ask(args: argparse.Namespace) -> int:
15851
17625
  repo_root = Path(args.repo_root).resolve()
15852
- out_path = _resolve_cli_path(args.out or DEFAULT_DISCOVER_PROFILE, repo_root)
15853
- payload = _discover_profile_template(
15854
- profile_id=args.profile_id,
15855
- owner=args.owner or "",
15856
- owner_type=args.owner_type,
15857
- keywords=_coerce_string_list(args.keyword),
15858
- topics=_coerce_string_list(args.topic),
15859
- languages=_coerce_string_list(args.language),
15860
- areas=_coerce_string_list(args.area),
15861
- people=_coerce_string_list(args.person),
15862
- )
15863
- _write_json(out_path, payload)
17626
+ _ensure_dirs(repo_root)
17627
+ question = " ".join(str(part) for part in getattr(args, "question", [])).strip()
17628
+ if not question:
17629
+ raise RuntimeError("research question is required.")
17630
+ run_id = str(getattr(args, "run_id", "") or "").strip() or _research_id()
17631
+ execute = bool(getattr(args, "execute", False))
17632
+ timeout_sec = int(getattr(args, "timeout_sec", 120) or 120)
17633
+ profile = _research_load_profile(args, repo_root)
17634
+ breakdown = _research_breakdown(question)
17635
+ fixtures = _research_parse_lane_fixtures(getattr(args, "lane_fixture", []) or [], repo_root)
17636
+ paths = _research_paths(repo_root, run_id)
17637
+ started_at_utc = _now_utc()
17638
+
17639
+ request_payload = {
17640
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17641
+ "kind": "research_request",
17642
+ "run_id": run_id,
17643
+ "question": question,
17644
+ "profile_id": profile.get("profile_id", ""),
17645
+ "execute": execute,
17646
+ "created_at_utc": started_at_utc,
17647
+ "timeout_sec": timeout_sec,
17648
+ "call_moments": profile.get("call_moments", []) if isinstance(profile.get("call_moments"), list) else [],
17649
+ "lane_fixtures": {lane_id: _path_for_state(path, repo_root) for lane_id, path in fixtures.items()},
17650
+ }
17651
+ _write_json(paths["request_json"], request_payload)
17652
+ _write_json(paths["breakdown_json"], breakdown)
17653
+ _write_json(paths["profile_json"], profile)
17654
+
17655
+ lanes: list[dict[str, Any]] = []
17656
+ for lane in profile.get("lanes", []):
17657
+ if not isinstance(lane, dict):
17658
+ continue
17659
+ lane_result = _research_run_lane(
17660
+ lane,
17661
+ question=question,
17662
+ breakdown=breakdown,
17663
+ repo_root=repo_root,
17664
+ execute=execute,
17665
+ fixtures=fixtures,
17666
+ chimera_bin=str(getattr(args, "chimera_bin", "chimera") or "chimera"),
17667
+ timeout_sec=timeout_sec,
17668
+ )
17669
+ lanes.append(lane_result)
17670
+ _write_json(paths["lanes_root"] / f"{lane_result['lane_id']}.json", lane_result)
17671
+
17672
+ finished_at_utc = _now_utc()
17673
+ artifacts = {
17674
+ "request_json": _path_for_state(paths["request_json"], repo_root),
17675
+ "breakdown_json": _path_for_state(paths["breakdown_json"], repo_root),
17676
+ "profile_json": _path_for_state(paths["profile_json"], repo_root),
17677
+ "answer_json": _path_for_state(paths["answer_json"], repo_root),
17678
+ "summary_md": _path_for_state(paths["summary_md"], repo_root),
17679
+ "lanes_root": _path_for_state(paths["lanes_root"], repo_root),
17680
+ }
17681
+ payload = {
17682
+ "schema_version": RESEARCH_RUN_SCHEMA_VERSION,
17683
+ "kind": "research_run",
17684
+ "run_id": run_id,
17685
+ "status": _research_status_from_lanes(lanes),
17686
+ "question": question,
17687
+ "execute": execute,
17688
+ "generated_at_utc": finished_at_utc,
17689
+ "started_at_utc": started_at_utc,
17690
+ "finished_at_utc": finished_at_utc,
17691
+ "duration_ms": _duration_ms(started_at_utc, finished_at_utc),
17692
+ "profile": {
17693
+ "profile_id": profile.get("profile_id", ""),
17694
+ "label": profile.get("label", ""),
17695
+ "lane_count": len(profile.get("lanes", [])) if isinstance(profile.get("lanes"), list) else 0,
17696
+ },
17697
+ "call_moments": profile.get("call_moments", []) if isinstance(profile.get("call_moments"), list) else [],
17698
+ "breakdown": breakdown,
17699
+ "lanes": lanes,
17700
+ "synthesis": _research_synthesize(question, lanes, execute=execute),
17701
+ "artifacts": artifacts,
17702
+ "notes": [
17703
+ "Research runs are ORP process artifacts, not canonical evidence.",
17704
+ "Live provider calls require --execute; dry runs persist the decomposition and lane plan only.",
17705
+ "Secret values are not written to ORP artifacts.",
17706
+ ],
17707
+ }
17708
+ _write_json(paths["answer_json"], payload)
17709
+ _write_text(paths["summary_md"], _research_summary_markdown(payload))
17710
+ _research_update_state(repo_root, payload)
15864
17711
 
15865
17712
  result = {
15866
17713
  "ok": True,
15867
- "profile_path": _path_for_state(out_path, repo_root),
15868
- "profile_id": payload["profile_id"],
15869
- "owner_login": payload["discover"]["github"]["owner"]["login"],
15870
- "owner_type": payload["discover"]["github"]["owner"]["type"],
15871
- "notes": payload["notes"],
17714
+ "run_id": run_id,
17715
+ "status": payload["status"],
17716
+ "question": question,
17717
+ "execute": execute,
17718
+ "profile_id": profile.get("profile_id", ""),
17719
+ "lane_statuses": [
17720
+ {
17721
+ "lane_id": lane.get("lane_id", ""),
17722
+ "call_moment": lane.get("call_moment", ""),
17723
+ "status": lane.get("status", ""),
17724
+ "adapter": lane.get("adapter", ""),
17725
+ "model": lane.get("model", ""),
17726
+ "api_called": bool(lane.get("api_call", {}).get("called", False)) if isinstance(lane.get("api_call"), dict) else False,
17727
+ }
17728
+ for lane in lanes
17729
+ ],
17730
+ "synthesis": payload["synthesis"],
17731
+ "artifacts": artifacts,
17732
+ "schema_path": "spec/v1/research-run.schema.json",
15872
17733
  }
15873
17734
  if args.json_output:
15874
17735
  _print_json(result)
15875
17736
  return 0
15876
17737
 
15877
- print(f"profile_path={result['profile_path']}")
15878
- print(f"profile_id={result['profile_id']}")
15879
- print(f"owner_login={result['owner_login']}")
15880
- print(f"owner_type={result['owner_type']}")
15881
- print(f"next=orp discover github scan --profile {result['profile_path']}")
17738
+ print(f"run_id={run_id}")
17739
+ print(f"status={payload['status']}")
17740
+ print(f"answer_json={artifacts['answer_json']}")
17741
+ print(f"summary_md={artifacts['summary_md']}")
17742
+ for lane in lanes:
17743
+ print(f"lane.{lane.get('lane_id', '')}.status={lane.get('status', '')}")
15882
17744
  return 0
15883
17745
 
15884
17746
 
15885
- def cmd_discover_github_scan(args: argparse.Namespace) -> int:
17747
+ def cmd_research_status(args: argparse.Namespace) -> int:
15886
17748
  repo_root = Path(args.repo_root).resolve()
15887
- profile_path = _resolve_cli_path(args.profile or DEFAULT_DISCOVER_PROFILE, repo_root)
15888
- if not profile_path.exists():
15889
- raise RuntimeError(
15890
- f"missing discovery profile: {_path_for_state(profile_path, repo_root)}. "
15891
- "Run `orp discover profile init` first."
15892
- )
15893
-
15894
- repos_fixture = _resolve_cli_path(args.repos_fixture, repo_root) if args.repos_fixture else None
15895
- issues_fixture = _resolve_cli_path(args.issues_fixture, repo_root) if args.issues_fixture else None
15896
- scan_id = args.scan_id or _scan_id()
15897
- payload = _perform_github_discovery_scan(
15898
- repo_root=repo_root,
15899
- profile_path=profile_path,
15900
- scan_id=scan_id,
15901
- repos_fixture_path=repos_fixture,
15902
- issues_fixture_path=issues_fixture,
15903
- )
17749
+ payload, _ = _research_load_answer(repo_root, str(getattr(args, "run_id", "") or "latest"))
17750
+ result = {
17751
+ "ok": True,
17752
+ "run_id": payload.get("run_id", ""),
17753
+ "status": payload.get("status", ""),
17754
+ "question": payload.get("question", ""),
17755
+ "generated_at_utc": payload.get("generated_at_utc", ""),
17756
+ "lane_statuses": [
17757
+ {
17758
+ "lane_id": lane.get("lane_id", ""),
17759
+ "call_moment": lane.get("call_moment", ""),
17760
+ "status": lane.get("status", ""),
17761
+ "adapter": lane.get("adapter", ""),
17762
+ "model": lane.get("model", ""),
17763
+ "api_called": bool(lane.get("api_call", {}).get("called", False)) if isinstance(lane.get("api_call"), dict) else False,
17764
+ }
17765
+ for lane in payload.get("lanes", [])
17766
+ if isinstance(lane, dict)
17767
+ ],
17768
+ "artifacts": payload.get("artifacts", {}),
17769
+ }
15904
17770
  if args.json_output:
15905
- _print_json(payload)
17771
+ _print_json(result)
15906
17772
  return 0
15907
-
15908
- print(f"scan_id={payload['scan_id']}")
15909
- print(f"profile={payload['profile']['path']}")
15910
- print(f"owner={payload['owner']['login']}")
15911
- print(f"owner_type={payload['owner']['type']}")
15912
- print(f"scan_json={payload['artifacts']['scan_json']}")
15913
- print(f"summary_md={payload['artifacts']['summary_md']}")
15914
- if payload["repos"]:
15915
- top_repo = payload["repos"][0]["full_name"]
15916
- print(f"top_repo={top_repo}")
15917
- print(f"next=orp collaborate init --github-repo {top_repo}")
15918
- if payload["issues"]:
15919
- top_issue = payload["issues"][0]
15920
- print(f"top_issue={top_issue['repo']}#{top_issue['number']}")
17773
+ print(f"run_id={result['run_id']}")
17774
+ print(f"status={result['status']}")
17775
+ print(f"question={result['question']}")
17776
+ for lane in result["lane_statuses"]:
17777
+ print(f"lane.{lane.get('lane_id', '')}.status={lane.get('status', '')}")
15921
17778
  return 0
15922
17779
 
15923
17780
 
15924
- def cmd_exchange_repo_synthesize(args: argparse.Namespace) -> int:
17781
+ def cmd_research_show(args: argparse.Namespace) -> int:
15925
17782
  repo_root = Path(args.repo_root).resolve()
15926
- exchange_id = str(getattr(args, "exchange_id", "") or "").strip() or _exchange_id()
15927
- source = _exchange_source_payload(repo_root, args)
15928
- source_root = Path(str(source.get("local_path", "")).strip()).resolve()
15929
- inventory = _exchange_inventory(source_root)
15930
- relation = _exchange_relation(repo_root, source_root, inventory)
15931
- suggested_focus = _exchange_suggested_focus(inventory, relation)
15932
- paths = _exchange_paths(repo_root, exchange_id)
15933
-
15934
- payload = {
15935
- "schema_version": EXCHANGE_REPORT_SCHEMA_VERSION,
15936
- "kind": "exchange_report",
15937
- "exchange_id": exchange_id,
15938
- "generated_at_utc": _now_utc(),
15939
- "current_project_root": str(repo_root),
15940
- "source": source,
15941
- "inventory": inventory,
15942
- "relation": relation,
15943
- "suggested_focus": suggested_focus,
15944
- "artifacts": {
15945
- "exchange_json": _path_for_state(paths["exchange_json"], repo_root),
15946
- "summary_md": _path_for_state(paths["summary_md"], repo_root),
15947
- "transfer_map_md": _path_for_state(paths["transfer_map_md"], repo_root),
15948
- },
15949
- "notes": [
15950
- "Knowledge exchange is deeper than discovery scan output.",
15951
- "Exchange artifacts are structured synthesis aids, not evidence by themselves.",
15952
- "Local non-git directories can be bootstrapped into git when `--allow-git-init` is explicitly provided.",
15953
- ],
15954
- }
15955
- _write_json(paths["exchange_json"], payload)
15956
- _write_text(paths["summary_md"], _exchange_summary_markdown(payload))
15957
- _write_text(paths["transfer_map_md"], _exchange_transfer_map_markdown(payload))
15958
-
15959
- result = {
15960
- "ok": True,
15961
- "exchange_id": exchange_id,
15962
- "source": source,
15963
- "inventory": inventory,
15964
- "relation": relation,
15965
- "suggested_focus": suggested_focus,
15966
- "artifacts": payload["artifacts"],
15967
- "schema_path": "spec/v1/exchange-report.schema.json",
15968
- }
17783
+ payload, _ = _research_load_answer(repo_root, str(getattr(args, "run_id", "") or "latest"))
15969
17784
  if args.json_output:
15970
- _print_json(result)
17785
+ _print_json(payload)
15971
17786
  return 0
15972
-
15973
- print(f"exchange_id={exchange_id}")
15974
- print(f"source.mode={source.get('mode', '')}")
15975
- print(f"source.local_path={source.get('local_path', '')}")
15976
- print(f"source.git_present={str(bool(source.get('git_present'))).lower()}")
15977
- print(f"source.git_initialized_by_orp={str(bool(source.get('git_initialized_by_orp'))).lower()}")
15978
- print(f"artifacts.exchange_json={payload['artifacts']['exchange_json']}")
15979
- print(f"artifacts.summary_md={payload['artifacts']['summary_md']}")
15980
- print(f"artifacts.transfer_map_md={payload['artifacts']['transfer_map_md']}")
17787
+ print(str(payload.get("synthesis", {}).get("answer", "")).strip())
15981
17788
  return 0
15982
17789
 
15983
17790
 
@@ -19452,14 +21259,22 @@ def _resolve_secret_scope_from_args(
19452
21259
 
19453
21260
  def _resolve_secret_value_arg(args: argparse.Namespace, *, required: bool) -> tuple[bool, str]:
19454
21261
  value_from_stdin = bool(getattr(args, "value_stdin", False))
21262
+ value_from_env = bool(getattr(args, "from_env", False))
19455
21263
  raw_value = getattr(args, "value", None)
19456
- if value_from_stdin and raw_value is not None:
19457
- raise RuntimeError("Use either --value or --value-stdin, not both.")
21264
+ if sum([bool(value_from_stdin), bool(value_from_env), raw_value is not None]) > 1:
21265
+ raise RuntimeError("Use only one of --value, --value-stdin, or --from-env.")
19458
21266
 
19459
- provided = raw_value is not None or value_from_stdin
21267
+ provided = raw_value is not None or value_from_stdin or value_from_env
19460
21268
  value = str(raw_value).strip() if raw_value is not None else ""
19461
21269
  if value_from_stdin:
19462
21270
  value = _read_value_from_stdin()
21271
+ if value_from_env:
21272
+ env_var_name = str(getattr(args, "env_var_name", "") or "").strip()
21273
+ if not env_var_name:
21274
+ raise RuntimeError("--from-env requires --env-var-name.")
21275
+ value = os.environ.get(env_var_name, "").strip()
21276
+ if required and not value:
21277
+ raise RuntimeError(f"Environment variable {env_var_name} is empty or not set.")
19463
21278
 
19464
21279
  if required and not value:
19465
21280
  value = _prompt_value("Secret value", secret=True)
@@ -19717,11 +21532,11 @@ def _keychain_comment_for_secret(secret: dict[str, Any]) -> str:
19717
21532
 
19718
21533
  def _normalize_secret_binding_summary(binding: dict[str, Any]) -> dict[str, Any]:
19719
21534
  return {
19720
- "binding_id": str(binding.get("id", "")).strip(),
19721
- "world_id": str(binding.get("worldId", "")).strip(),
19722
- "idea_id": str(binding.get("ideaId", "")).strip(),
21535
+ "binding_id": str(binding.get("binding_id", binding.get("id", ""))).strip(),
21536
+ "world_id": str(binding.get("world_id", binding.get("worldId", ""))).strip(),
21537
+ "idea_id": str(binding.get("idea_id", binding.get("ideaId", ""))).strip(),
19723
21538
  "purpose": str(binding.get("purpose", "")).strip(),
19724
- "primary": bool(binding.get("isPrimary", False)),
21539
+ "primary": bool(binding.get("primary", binding.get("isPrimary", False))),
19725
21540
  }
19726
21541
 
19727
21542
 
@@ -19998,6 +21813,48 @@ def _sync_secret_to_keychain(
19998
21813
  return _upsert_keychain_secret_registry_entry(entry)
19999
21814
 
20000
21815
 
21816
+ def _build_local_keychain_secret_from_args(args: argparse.Namespace, existing_entry: dict[str, Any] | None = None) -> dict[str, Any]:
21817
+ alias = str(getattr(args, "alias", "") or "").strip()
21818
+ provider = str(getattr(args, "provider", "") or "").strip()
21819
+ if not alias:
21820
+ raise RuntimeError("Secret alias is required.")
21821
+ if not provider:
21822
+ raise RuntimeError("Secret provider is required.")
21823
+
21824
+ if existing_entry:
21825
+ existing_provider = str(existing_entry.get("provider", "") or "").strip()
21826
+ if existing_provider and existing_provider != provider:
21827
+ raise RuntimeError(
21828
+ f"Local Keychain secret alias already exists with provider '{existing_provider}', not '{provider}'."
21829
+ )
21830
+
21831
+ label = str(getattr(args, "label", "") or "").strip() or str(existing_entry.get("label", "") if existing_entry else "").strip() or alias
21832
+ kind = str(getattr(args, "kind", "api_key") or "api_key").strip() or "api_key"
21833
+ username = getattr(args, "username", None)
21834
+ env_var_name = getattr(args, "env_var_name", None)
21835
+ now = _now_utc()
21836
+ return {
21837
+ "id": str(existing_entry.get("secret_id", "") if existing_entry else "").strip() or f"local-{uuid.uuid4().hex[:12]}",
21838
+ "alias": alias,
21839
+ "label": label,
21840
+ "provider": provider,
21841
+ "kind": kind,
21842
+ "username": str(username).strip() if username is not None else str(existing_entry.get("username", "") if existing_entry else "").strip(),
21843
+ "envVarName": str(env_var_name).strip() if env_var_name is not None else str(existing_entry.get("env_var_name", "") if existing_entry else "").strip(),
21844
+ "status": "active",
21845
+ "valueVersion": f"local:{now}",
21846
+ "valuePreview": "stored in local Keychain",
21847
+ "bindings": [
21848
+ _binding_payload_from_keychain_summary(row)
21849
+ for row in (existing_entry.get("bindings", []) if isinstance(existing_entry, dict) else [])
21850
+ if isinstance(row, dict)
21851
+ ],
21852
+ "lastUsedAt": "",
21853
+ "rotatedAt": now,
21854
+ "updatedAt": now,
21855
+ }
21856
+
21857
+
20001
21858
  def _try_get_secret_by_ref(args: argparse.Namespace, secret_ref: str) -> dict[str, Any] | None:
20002
21859
  ref = str(secret_ref or "").strip()
20003
21860
  if not ref:
@@ -21794,6 +23651,50 @@ def cmd_secrets_resolve(args: argparse.Namespace) -> int:
21794
23651
  return 0
21795
23652
 
21796
23653
 
23654
+ def cmd_secrets_keychain_add(args: argparse.Namespace) -> int:
23655
+ _ensure_keychain_supported()
23656
+ _, value = _resolve_secret_value_arg(args, required=True)
23657
+ alias = str(getattr(args, "alias", "") or "").strip()
23658
+ existing_entry = _select_keychain_entry(
23659
+ secret_ref=alias,
23660
+ provider="",
23661
+ world_id="",
23662
+ idea_id="",
23663
+ )
23664
+ secret = _build_local_keychain_secret_from_args(args, existing_entry)
23665
+ binding = _build_secret_binding_payload_from_args(args)
23666
+ entry = _build_keychain_registry_entry(secret, binding=binding)
23667
+ entry.update(_store_keychain_secret_value(secret, value))
23668
+ entry = _upsert_keychain_secret_registry_entry(entry)
23669
+ result = {
23670
+ "ok": True,
23671
+ "created": existing_entry is None,
23672
+ "secret": _secret_payload_from_keychain_entry(entry),
23673
+ "entry": entry,
23674
+ "registry_path": str(_keychain_secret_registry_path()),
23675
+ "keychain_service": str(entry.get("keychain_service", "")).strip(),
23676
+ "keychain_account": str(entry.get("keychain_account", "")).strip(),
23677
+ "source": "keychain",
23678
+ }
23679
+ if args.json_output:
23680
+ _print_json(result)
23681
+ else:
23682
+ _print_secret_human(
23683
+ result["secret"],
23684
+ include_bindings=True,
23685
+ source="keychain",
23686
+ )
23687
+ _print_pairs(
23688
+ [
23689
+ ("secret.created", str(result["created"]).lower()),
23690
+ ("keychain.service", result["keychain_service"]),
23691
+ ("keychain.account", result["keychain_account"]),
23692
+ ("registry.path", result["registry_path"]),
23693
+ ]
23694
+ )
23695
+ return 0
23696
+
23697
+
21797
23698
  def cmd_secrets_keychain_list(args: argparse.Namespace) -> int:
21798
23699
  provider = str(getattr(args, "provider", "") or "").strip()
21799
23700
  world_id, idea_id = _resolve_secret_scope_from_args(
@@ -24310,6 +26211,8 @@ def build_parser() -> argparse.ArgumentParser:
24310
26211
  " 3. Later run `orp secrets list` or `orp secrets resolve ...`\n\n"
24311
26212
  "Agent flow:\n"
24312
26213
  " - Pipe the value with `--value-stdin` instead of typing it interactively.\n\n"
26214
+ "Local flow:\n"
26215
+ " - Use `orp secrets keychain-add ...` to store a machine-local secret without the hosted API.\n\n"
24313
26216
  "Local macOS Keychain caching and hosted sync are optional layers on top."
24314
26217
  ),
24315
26218
  epilog=(
@@ -24317,6 +26220,7 @@ def build_parser() -> argparse.ArgumentParser:
24317
26220
  " orp secrets add --alias openai-primary --label \"OpenAI Primary\" --provider openai\n"
24318
26221
  " orp secrets add --alias huggingface-login --label \"Hugging Face Login\" --provider huggingface --kind password --username cody\n"
24319
26222
  " printf '%s' 'sk-...' | orp secrets add --alias openai-primary --label \"OpenAI Primary\" --provider openai --value-stdin\n"
26223
+ " printf '%s' 'sk-...' | orp secrets keychain-add --alias openai-primary --label \"OpenAI Primary\" --provider openai --env-var-name OPENAI_API_KEY --value-stdin\n"
24320
26224
  " orp secrets list\n"
24321
26225
  " orp secrets resolve openai-primary --reveal"
24322
26226
  ),
@@ -24425,6 +26329,46 @@ def build_parser() -> argparse.ArgumentParser:
24425
26329
  add_json_flag(s_secrets_ensure)
24426
26330
  s_secrets_ensure.set_defaults(func=cmd_secrets_ensure, json_output=False)
24427
26331
 
26332
+ s_secrets_keychain_add = secrets_sub.add_parser(
26333
+ "keychain-add",
26334
+ help="Save or update one secret directly in the local macOS Keychain registry",
26335
+ )
26336
+ s_secrets_keychain_add.add_argument("--alias", required=True, help="Stable secret alias")
26337
+ s_secrets_keychain_add.add_argument("--label", default="", help="Human label for the secret")
26338
+ s_secrets_keychain_add.add_argument("--provider", required=True, help="Provider slug, for example openai")
26339
+ s_secrets_keychain_add.add_argument(
26340
+ "--kind",
26341
+ choices=["api_key", "access_token", "password", "other"],
26342
+ default="api_key",
26343
+ help="Secret kind (default: api_key)",
26344
+ )
26345
+ s_secrets_keychain_add.add_argument(
26346
+ "--username",
26347
+ default=None,
26348
+ help="Optional username or login identifier that belongs with this credential",
26349
+ )
26350
+ s_secrets_keychain_add.add_argument("--env-var-name", default=None, help="Optional env var name, for example OPENAI_API_KEY")
26351
+ s_secrets_keychain_add.add_argument("--value", default=None, help="Secret value")
26352
+ s_secrets_keychain_add.add_argument(
26353
+ "--value-stdin",
26354
+ action="store_true",
26355
+ help="Read the secret value from stdin",
26356
+ )
26357
+ s_secrets_keychain_add.add_argument(
26358
+ "--from-env",
26359
+ action="store_true",
26360
+ help="Read the secret value from --env-var-name in the current process environment",
26361
+ )
26362
+ add_secret_scope_flags(s_secrets_keychain_add)
26363
+ s_secrets_keychain_add.add_argument("--purpose", default="", help="Optional project usage note when binding")
26364
+ s_secrets_keychain_add.add_argument(
26365
+ "--primary",
26366
+ action="store_true",
26367
+ help="Mark the local project binding as primary",
26368
+ )
26369
+ add_json_flag(s_secrets_keychain_add)
26370
+ s_secrets_keychain_add.set_defaults(func=cmd_secrets_keychain_add, json_output=False)
26371
+
24428
26372
  s_secrets_keychain_list = secrets_sub.add_parser(
24429
26373
  "keychain-list",
24430
26374
  help="List local macOS Keychain copies known to ORP on this machine",
@@ -25094,6 +27038,73 @@ def build_parser() -> argparse.ArgumentParser:
25094
27038
  add_json_flag(s_exchange_repo_synthesize)
25095
27039
  s_exchange_repo_synthesize.set_defaults(func=cmd_exchange_repo_synthesize, json_output=False)
25096
27040
 
27041
+ s_research = sub.add_parser(
27042
+ "research",
27043
+ help="Durable OpenAI research-loop question decomposition and synthesis runs",
27044
+ )
27045
+ research_sub = s_research.add_subparsers(dest="research_cmd", required=True)
27046
+
27047
+ s_research_ask = research_sub.add_parser(
27048
+ "ask",
27049
+ help="Create a research council run; use --execute for live provider calls",
27050
+ )
27051
+ s_research_ask.add_argument("question", nargs="+", help="Question to decompose and answer")
27052
+ s_research_ask.add_argument(
27053
+ "--profile",
27054
+ default="openai-council",
27055
+ help="Research profile id (default: openai-council)",
27056
+ )
27057
+ s_research_ask.add_argument(
27058
+ "--profile-file",
27059
+ default="",
27060
+ help="Optional JSON profile file overriding the built-in OpenAI model lanes",
27061
+ )
27062
+ s_research_ask.add_argument(
27063
+ "--run-id",
27064
+ default="",
27065
+ help="Optional research run id override",
27066
+ )
27067
+ s_research_ask.add_argument(
27068
+ "--execute",
27069
+ action="store_true",
27070
+ help="Allow live provider adapters to run; without this ORP writes the plan only",
27071
+ )
27072
+ s_research_ask.add_argument(
27073
+ "--lane-fixture",
27074
+ action="append",
27075
+ default=[],
27076
+ help="Load one lane result from lane_id=path instead of calling a provider (repeatable)",
27077
+ )
27078
+ s_research_ask.add_argument(
27079
+ "--chimera-bin",
27080
+ default="chimera",
27081
+ help="Chimera CLI binary or path for custom chimera_cli lanes (default: chimera)",
27082
+ )
27083
+ s_research_ask.add_argument(
27084
+ "--timeout-sec",
27085
+ type=int,
27086
+ default=120,
27087
+ help="Per-lane live adapter timeout in seconds (default: 120)",
27088
+ )
27089
+ add_json_flag(s_research_ask)
27090
+ s_research_ask.set_defaults(func=cmd_research_ask, json_output=False)
27091
+
27092
+ s_research_status = research_sub.add_parser(
27093
+ "status",
27094
+ help="Show status and lane summary for a research run",
27095
+ )
27096
+ s_research_status.add_argument("run_id", nargs="?", default="latest", help="Run id or latest (default: latest)")
27097
+ add_json_flag(s_research_status)
27098
+ s_research_status.set_defaults(func=cmd_research_status, json_output=False)
27099
+
27100
+ s_research_show = research_sub.add_parser(
27101
+ "show",
27102
+ help="Show a research run answer payload or human synthesis",
27103
+ )
27104
+ s_research_show.add_argument("run_id", nargs="?", default="latest", help="Run id or latest (default: latest)")
27105
+ add_json_flag(s_research_show)
27106
+ s_research_show.set_defaults(func=cmd_research_show, json_output=False)
27107
+
25097
27108
  s_collab = sub.add_parser(
25098
27109
  "collaborate",
25099
27110
  help="Built-in repository collaboration setup and workflow operations",
@@ -25210,6 +27221,25 @@ def build_parser() -> argparse.ArgumentParser:
25210
27221
  )
25211
27222
  s_collab_run.set_defaults(func=cmd_collaborate_run, json_output=False)
25212
27223
 
27224
+ s_project = sub.add_parser(
27225
+ "project",
27226
+ help="Local project context lens and evolution policy operations",
27227
+ )
27228
+ project_sub = s_project.add_subparsers(dest="project_cmd", required=True)
27229
+ s_project_refresh = project_sub.add_parser(
27230
+ "refresh",
27231
+ help="Rescan this directory and refresh orp/project.json",
27232
+ )
27233
+ add_json_flag(s_project_refresh)
27234
+ s_project_refresh.set_defaults(func=cmd_project_refresh, json_output=False)
27235
+
27236
+ s_project_show = project_sub.add_parser(
27237
+ "show",
27238
+ help="Show the current ORP project context lens",
27239
+ )
27240
+ add_json_flag(s_project_show)
27241
+ s_project_show.set_defaults(func=cmd_project_show, json_output=False)
27242
+
25213
27243
  s_init = sub.add_parser("init", help="Make this repo ORP-governed with local-first git safety")
25214
27244
  s_init.add_argument(
25215
27245
  "--default-branch",