open-research-protocol 0.4.24 → 0.4.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +456 -0
- package/README.md +47 -13
- package/cli/orp.py +2998 -70
- package/docs/AGENT_RUNTIME_BORROWING_NOTES.md +68 -0
- package/docs/RESEARCH_COUNCIL.md +123 -0
- package/docs/START_HERE.md +4 -0
- package/package.json +2 -1
- package/packages/orp-workspace-launcher/src/index.js +3 -0
- package/packages/orp-workspace-launcher/src/ledger.js +192 -33
- package/packages/orp-workspace-launcher/src/orp.js +61 -1
- package/packages/orp-workspace-launcher/src/tabs.js +147 -4
- package/packages/orp-workspace-launcher/test/ledger.test.js +226 -0
- package/packages/orp-workspace-launcher/test/tabs.test.js +60 -0
- package/scripts/orp-mcp +205 -0
- package/spec/v1/project-context.schema.json +223 -0
- package/spec/v1/research-run.schema.json +245 -0
- package/cli/__pycache__/orp.cpython-311.pyc +0 -0
- package/scripts/__pycache__/orp-kernel-agent-pilot.cpython-311.pyc +0 -0
- package/scripts/__pycache__/orp-kernel-agent-replication.cpython-311.pyc +0 -0
- package/scripts/__pycache__/orp-kernel-benchmark.cpython-311.pyc +0 -0
- package/scripts/__pycache__/orp-kernel-canonical-continuation.cpython-311.pyc +0 -0
- package/scripts/__pycache__/orp-kernel-continuation-pilot.cpython-311.pyc +0 -0
package/cli/orp.py
CHANGED
|
@@ -134,8 +134,13 @@ DEFAULT_HOSTED_BASE_URL = "https://orp.earth"
|
|
|
134
134
|
KERNEL_SCHEMA_VERSION = "1.0.0"
|
|
135
135
|
FRONTIER_SCHEMA_VERSION = "1.0.0"
|
|
136
136
|
FRONTIER_BANDS = ("exact", "structured", "horizon")
|
|
137
|
+
FRONTIER_ACTIVE_STATUSES = {"active", "in_progress", "running"}
|
|
138
|
+
FRONTIER_PENDING_STATUSES = {"", "pending", "planned", "ready"}
|
|
139
|
+
FRONTIER_TERMINAL_STATUSES = {"complete", "completed", "done", "skipped", "terminal"}
|
|
137
140
|
YOUTUBE_SOURCE_SCHEMA_VERSION = "1.0.0"
|
|
138
141
|
EXCHANGE_REPORT_SCHEMA_VERSION = "1.0.0"
|
|
142
|
+
RESEARCH_RUN_SCHEMA_VERSION = "1.0.0"
|
|
143
|
+
PROJECT_CONTEXT_SCHEMA_VERSION = "1.0.0"
|
|
139
144
|
MAINTENANCE_STATE_SCHEMA_VERSION = "1.0.0"
|
|
140
145
|
SCHEDULE_REGISTRY_SCHEMA_VERSION = "1.0.0"
|
|
141
146
|
AGENDA_REGISTRY_SCHEMA_VERSION = "1.0.0"
|
|
@@ -5842,6 +5847,9 @@ def _default_state_payload() -> dict[str, Any]:
|
|
|
5842
5847
|
"last_erdos_sync": {},
|
|
5843
5848
|
"last_discover_scan_id": "",
|
|
5844
5849
|
"discovery_scans": {},
|
|
5850
|
+
"last_research_run_id": "",
|
|
5851
|
+
"research_runs": {},
|
|
5852
|
+
"project_context": {},
|
|
5845
5853
|
"governance": {},
|
|
5846
5854
|
}
|
|
5847
5855
|
|
|
@@ -5850,6 +5858,7 @@ def _ensure_dirs(repo_root: Path) -> None:
|
|
|
5850
5858
|
(repo_root / "orp" / "packets").mkdir(parents=True, exist_ok=True)
|
|
5851
5859
|
(repo_root / "orp" / "artifacts").mkdir(parents=True, exist_ok=True)
|
|
5852
5860
|
(repo_root / "orp" / "discovery" / "github").mkdir(parents=True, exist_ok=True)
|
|
5861
|
+
(repo_root / "orp" / "research").mkdir(parents=True, exist_ok=True)
|
|
5853
5862
|
(repo_root / "orp" / "checkpoints").mkdir(parents=True, exist_ok=True)
|
|
5854
5863
|
(repo_root / "orp" / "handoffs").mkdir(parents=True, exist_ok=True)
|
|
5855
5864
|
state_path = repo_root / "orp" / "state.json"
|
|
@@ -5869,10 +5878,12 @@ def _frontier_paths(repo_root: Path) -> dict[str, Path]:
|
|
|
5869
5878
|
"roadmap_json": root / "roadmap.json",
|
|
5870
5879
|
"checklist_json": root / "checklist.json",
|
|
5871
5880
|
"stack_json": root / "version-stack.json",
|
|
5881
|
+
"additional_json": root / "additional-items.json",
|
|
5872
5882
|
"state_md": root / "STATE.md",
|
|
5873
5883
|
"roadmap_md": root / "ROADMAP.md",
|
|
5874
5884
|
"checklist_md": root / "CHECKLIST.md",
|
|
5875
5885
|
"stack_md": root / "VERSION_STACK.md",
|
|
5886
|
+
"additional_md": root / "ADDITIONAL_ITEMS.md",
|
|
5876
5887
|
}
|
|
5877
5888
|
|
|
5878
5889
|
|
|
@@ -5923,6 +5934,18 @@ def _default_frontier_stack_payload(program_id: str, label: str) -> dict[str, An
|
|
|
5923
5934
|
}
|
|
5924
5935
|
|
|
5925
5936
|
|
|
5937
|
+
def _default_frontier_additional_payload(program_id: str = "", label: str = "") -> dict[str, Any]:
|
|
5938
|
+
return {
|
|
5939
|
+
"schema_version": FRONTIER_SCHEMA_VERSION,
|
|
5940
|
+
"kind": "orp_frontier_additional_items",
|
|
5941
|
+
"program_id": str(program_id).strip(),
|
|
5942
|
+
"label": str(label).strip(),
|
|
5943
|
+
"active_list_id": "",
|
|
5944
|
+
"active_item_id": "",
|
|
5945
|
+
"lists": [],
|
|
5946
|
+
}
|
|
5947
|
+
|
|
5948
|
+
|
|
5926
5949
|
def _frontier_load_stack(repo_root: Path) -> dict[str, Any]:
|
|
5927
5950
|
payload = _read_json_if_exists(_frontier_paths(repo_root)["stack_json"])
|
|
5928
5951
|
if not payload:
|
|
@@ -5976,6 +5999,485 @@ def _frontier_find_phase(
|
|
|
5976
5999
|
return None
|
|
5977
6000
|
|
|
5978
6001
|
|
|
6002
|
+
def _frontier_load_additional(repo_root: Path, stack: dict[str, Any] | None = None) -> dict[str, Any]:
|
|
6003
|
+
payload = _read_json_if_exists(_frontier_paths(repo_root)["additional_json"])
|
|
6004
|
+
if not payload:
|
|
6005
|
+
stack_payload = stack if isinstance(stack, dict) else _read_json_if_exists(_frontier_paths(repo_root)["stack_json"])
|
|
6006
|
+
return _default_frontier_additional_payload(
|
|
6007
|
+
str((stack_payload or {}).get("program_id", "")).strip(),
|
|
6008
|
+
str((stack_payload or {}).get("label", "")).strip(),
|
|
6009
|
+
)
|
|
6010
|
+
if not isinstance(payload.get("lists"), list):
|
|
6011
|
+
payload["lists"] = []
|
|
6012
|
+
payload["active_list_id"] = str(payload.get("active_list_id", "")).strip()
|
|
6013
|
+
payload["active_item_id"] = str(payload.get("active_item_id", "")).strip()
|
|
6014
|
+
return payload
|
|
6015
|
+
|
|
6016
|
+
|
|
6017
|
+
def _frontier_find_additional_list(payload: dict[str, Any], list_id: str) -> dict[str, Any] | None:
|
|
6018
|
+
lists = payload.get("lists")
|
|
6019
|
+
if not isinstance(lists, list):
|
|
6020
|
+
return None
|
|
6021
|
+
for row in lists:
|
|
6022
|
+
if isinstance(row, dict) and str(row.get("id", "")).strip() == list_id:
|
|
6023
|
+
return row
|
|
6024
|
+
return None
|
|
6025
|
+
|
|
6026
|
+
|
|
6027
|
+
def _frontier_find_additional_item(item_list: dict[str, Any], item_id: str) -> dict[str, Any] | None:
|
|
6028
|
+
items = item_list.get("items")
|
|
6029
|
+
if not isinstance(items, list):
|
|
6030
|
+
return None
|
|
6031
|
+
for item in items:
|
|
6032
|
+
if isinstance(item, dict) and str(item.get("id", "")).strip() == item_id:
|
|
6033
|
+
return item
|
|
6034
|
+
return None
|
|
6035
|
+
|
|
6036
|
+
|
|
6037
|
+
def _frontier_active_additional_item(payload: dict[str, Any]) -> tuple[dict[str, Any] | None, dict[str, Any] | None]:
|
|
6038
|
+
list_id = str(payload.get("active_list_id", "")).strip()
|
|
6039
|
+
item_id = str(payload.get("active_item_id", "")).strip()
|
|
6040
|
+
if not list_id or not item_id:
|
|
6041
|
+
return (None, None)
|
|
6042
|
+
item_list = _frontier_find_additional_list(payload, list_id)
|
|
6043
|
+
if not isinstance(item_list, dict):
|
|
6044
|
+
return (None, None)
|
|
6045
|
+
item = _frontier_find_additional_item(item_list, item_id)
|
|
6046
|
+
if not isinstance(item, dict):
|
|
6047
|
+
return (None, None)
|
|
6048
|
+
return (item_list, item)
|
|
6049
|
+
|
|
6050
|
+
|
|
6051
|
+
def _frontier_normalize_additional_statuses(payload: dict[str, Any]) -> None:
|
|
6052
|
+
lists = payload.get("lists")
|
|
6053
|
+
if not isinstance(lists, list):
|
|
6054
|
+
payload["lists"] = []
|
|
6055
|
+
return
|
|
6056
|
+
for item_list in lists:
|
|
6057
|
+
if not isinstance(item_list, dict):
|
|
6058
|
+
continue
|
|
6059
|
+
items = item_list.get("items")
|
|
6060
|
+
if not isinstance(items, list):
|
|
6061
|
+
item_list["items"] = []
|
|
6062
|
+
items = item_list["items"]
|
|
6063
|
+
for item in items:
|
|
6064
|
+
if isinstance(item, dict):
|
|
6065
|
+
item["status"] = str(item.get("status", "")).strip() or "pending"
|
|
6066
|
+
if items and all(str(item.get("status", "")).strip() in {"complete", "skipped"} for item in items if isinstance(item, dict)):
|
|
6067
|
+
item_list["status"] = "complete"
|
|
6068
|
+
else:
|
|
6069
|
+
item_list["status"] = str(item_list.get("status", "")).strip() or "pending"
|
|
6070
|
+
|
|
6071
|
+
|
|
6072
|
+
def _frontier_next_pending_additional_item(payload: dict[str, Any]) -> tuple[dict[str, Any] | None, dict[str, Any] | None]:
|
|
6073
|
+
_frontier_normalize_additional_statuses(payload)
|
|
6074
|
+
lists = payload.get("lists")
|
|
6075
|
+
if not isinstance(lists, list):
|
|
6076
|
+
return (None, None)
|
|
6077
|
+
for item_list in lists:
|
|
6078
|
+
if not isinstance(item_list, dict) or str(item_list.get("status", "")).strip() in {"complete", "skipped"}:
|
|
6079
|
+
continue
|
|
6080
|
+
items = item_list.get("items")
|
|
6081
|
+
if not isinstance(items, list):
|
|
6082
|
+
continue
|
|
6083
|
+
for item in items:
|
|
6084
|
+
if isinstance(item, dict) and str(item.get("status", "")).strip() in {"", "pending"}:
|
|
6085
|
+
return (item_list, item)
|
|
6086
|
+
return (None, None)
|
|
6087
|
+
|
|
6088
|
+
|
|
6089
|
+
def _frontier_status(raw: Any, *, default: str = "pending") -> str:
|
|
6090
|
+
text = str(raw or "").strip().lower().replace(" ", "_").replace("-", "_")
|
|
6091
|
+
return text or default
|
|
6092
|
+
|
|
6093
|
+
|
|
6094
|
+
def _frontier_status_is_active(raw: Any) -> bool:
|
|
6095
|
+
return _frontier_status(raw, default="") in FRONTIER_ACTIVE_STATUSES
|
|
6096
|
+
|
|
6097
|
+
|
|
6098
|
+
def _frontier_status_is_pending(raw: Any) -> bool:
|
|
6099
|
+
return _frontier_status(raw, default="") in FRONTIER_PENDING_STATUSES
|
|
6100
|
+
|
|
6101
|
+
|
|
6102
|
+
def _frontier_status_is_terminal(raw: Any) -> bool:
|
|
6103
|
+
return _frontier_status(raw, default="") in FRONTIER_TERMINAL_STATUSES
|
|
6104
|
+
|
|
6105
|
+
|
|
6106
|
+
def _frontier_diagnostic_ok(issues: list[dict[str, Any]], *, strict: bool = False) -> bool:
|
|
6107
|
+
if any(str(issue.get("severity", "")).strip() == "error" for issue in issues):
|
|
6108
|
+
return False
|
|
6109
|
+
if strict and any(str(issue.get("severity", "")).strip() == "warning" for issue in issues):
|
|
6110
|
+
return False
|
|
6111
|
+
return True
|
|
6112
|
+
|
|
6113
|
+
|
|
6114
|
+
def _frontier_stack_summary(stack: dict[str, Any] | None) -> dict[str, int]:
|
|
6115
|
+
summary = {
|
|
6116
|
+
"versions": 0,
|
|
6117
|
+
"milestones": 0,
|
|
6118
|
+
"phases": 0,
|
|
6119
|
+
}
|
|
6120
|
+
if not isinstance(stack, dict):
|
|
6121
|
+
return summary
|
|
6122
|
+
versions = stack.get("versions")
|
|
6123
|
+
if not isinstance(versions, list):
|
|
6124
|
+
return summary
|
|
6125
|
+
for version in versions:
|
|
6126
|
+
if not isinstance(version, dict):
|
|
6127
|
+
continue
|
|
6128
|
+
summary["versions"] += 1
|
|
6129
|
+
milestones = version.get("milestones")
|
|
6130
|
+
if not isinstance(milestones, list):
|
|
6131
|
+
continue
|
|
6132
|
+
for milestone in milestones:
|
|
6133
|
+
if not isinstance(milestone, dict):
|
|
6134
|
+
continue
|
|
6135
|
+
summary["milestones"] += 1
|
|
6136
|
+
phases = milestone.get("phases")
|
|
6137
|
+
if isinstance(phases, list):
|
|
6138
|
+
summary["phases"] += len([phase for phase in phases if isinstance(phase, dict)])
|
|
6139
|
+
return summary
|
|
6140
|
+
|
|
6141
|
+
|
|
6142
|
+
def _frontier_compact_additional_item(item_list: dict[str, Any], item: dict[str, Any]) -> dict[str, str]:
|
|
6143
|
+
return {
|
|
6144
|
+
"list_id": str(item_list.get("id", "")).strip(),
|
|
6145
|
+
"list_label": str(item_list.get("label", "")).strip(),
|
|
6146
|
+
"item_id": str(item.get("id", "")).strip(),
|
|
6147
|
+
"item_label": str(item.get("label", "")).strip(),
|
|
6148
|
+
"status": _frontier_status(item.get("status")),
|
|
6149
|
+
}
|
|
6150
|
+
|
|
6151
|
+
|
|
6152
|
+
def _frontier_additional_summary(payload: dict[str, Any]) -> dict[str, Any]:
|
|
6153
|
+
active_list_id = str(payload.get("active_list_id", "")).strip()
|
|
6154
|
+
active_item_id = str(payload.get("active_item_id", "")).strip()
|
|
6155
|
+
summary: dict[str, Any] = {
|
|
6156
|
+
"lists": 0,
|
|
6157
|
+
"items": 0,
|
|
6158
|
+
"pending_items": 0,
|
|
6159
|
+
"active_items": 0,
|
|
6160
|
+
"complete_items": 0,
|
|
6161
|
+
"skipped_items": 0,
|
|
6162
|
+
"open_items": 0,
|
|
6163
|
+
"active_list_id": active_list_id,
|
|
6164
|
+
"active_item_id": active_item_id,
|
|
6165
|
+
"active_pointer_partial": bool(active_list_id) != bool(active_item_id),
|
|
6166
|
+
"active_pointer_valid": False,
|
|
6167
|
+
"active_item_status": "",
|
|
6168
|
+
"next_pending": None,
|
|
6169
|
+
}
|
|
6170
|
+
lists = payload.get("lists")
|
|
6171
|
+
if not isinstance(lists, list):
|
|
6172
|
+
return summary
|
|
6173
|
+
summary["lists"] = len([row for row in lists if isinstance(row, dict)])
|
|
6174
|
+
for item_list in lists:
|
|
6175
|
+
if not isinstance(item_list, dict):
|
|
6176
|
+
continue
|
|
6177
|
+
items = item_list.get("items")
|
|
6178
|
+
if not isinstance(items, list):
|
|
6179
|
+
continue
|
|
6180
|
+
for item in items:
|
|
6181
|
+
if not isinstance(item, dict):
|
|
6182
|
+
continue
|
|
6183
|
+
status = _frontier_status(item.get("status"))
|
|
6184
|
+
summary["items"] += 1
|
|
6185
|
+
if _frontier_status_is_active(status):
|
|
6186
|
+
summary["active_items"] += 1
|
|
6187
|
+
summary["open_items"] += 1
|
|
6188
|
+
elif status == "skipped":
|
|
6189
|
+
summary["skipped_items"] += 1
|
|
6190
|
+
elif _frontier_status_is_terminal(status):
|
|
6191
|
+
summary["complete_items"] += 1
|
|
6192
|
+
else:
|
|
6193
|
+
summary["pending_items"] += 1
|
|
6194
|
+
summary["open_items"] += 1
|
|
6195
|
+
if summary["next_pending"] is None:
|
|
6196
|
+
summary["next_pending"] = _frontier_compact_additional_item(item_list, item)
|
|
6197
|
+
|
|
6198
|
+
if (
|
|
6199
|
+
active_list_id
|
|
6200
|
+
and active_item_id
|
|
6201
|
+
and str(item_list.get("id", "")).strip() == active_list_id
|
|
6202
|
+
and str(item.get("id", "")).strip() == active_item_id
|
|
6203
|
+
):
|
|
6204
|
+
summary["active_pointer_valid"] = True
|
|
6205
|
+
summary["active_item_status"] = status
|
|
6206
|
+
|
|
6207
|
+
return summary
|
|
6208
|
+
|
|
6209
|
+
|
|
6210
|
+
def _frontier_terminal_declared(state: dict[str, Any] | None, stack: dict[str, Any] | None) -> bool:
|
|
6211
|
+
state_obj = state if isinstance(state, dict) else {}
|
|
6212
|
+
stack_obj = stack if isinstance(stack, dict) else {}
|
|
6213
|
+
if bool(state_obj.get("terminal")) or bool(stack_obj.get("terminal")):
|
|
6214
|
+
return True
|
|
6215
|
+
completion_status = _frontier_status(
|
|
6216
|
+
state_obj.get("completion_status", state_obj.get("completionStatus", stack_obj.get("completion_status", ""))),
|
|
6217
|
+
default="",
|
|
6218
|
+
)
|
|
6219
|
+
return completion_status in {"complete", "completed", "done", "terminal"}
|
|
6220
|
+
|
|
6221
|
+
|
|
6222
|
+
def _frontier_build_continuation_payload(
|
|
6223
|
+
repo_root: Path,
|
|
6224
|
+
stack: dict[str, Any] | None,
|
|
6225
|
+
state: dict[str, Any] | None,
|
|
6226
|
+
*,
|
|
6227
|
+
include_structural_issues: bool = True,
|
|
6228
|
+
strict: bool = False,
|
|
6229
|
+
) -> dict[str, Any]:
|
|
6230
|
+
issues: list[dict[str, Any]] = []
|
|
6231
|
+
paths = _frontier_paths(repo_root)
|
|
6232
|
+
stack_obj = stack if isinstance(stack, dict) else None
|
|
6233
|
+
state_obj = state if isinstance(state, dict) else None
|
|
6234
|
+
|
|
6235
|
+
if stack_obj is None and include_structural_issues:
|
|
6236
|
+
issues.append({"severity": "error", "code": "missing_stack", "message": "frontier version stack is missing."})
|
|
6237
|
+
if state_obj is None and include_structural_issues:
|
|
6238
|
+
issues.append({"severity": "error", "code": "missing_state", "message": "frontier state is missing."})
|
|
6239
|
+
|
|
6240
|
+
additional = _frontier_load_additional(repo_root, stack_obj)
|
|
6241
|
+
additional_summary = _frontier_additional_summary(additional)
|
|
6242
|
+
stack_summary = _frontier_stack_summary(stack_obj)
|
|
6243
|
+
blockers = _coerce_string_list(state_obj.get("blocked_by") if isinstance(state_obj, dict) else [])
|
|
6244
|
+
terminal_declared = _frontier_terminal_declared(state_obj, stack_obj)
|
|
6245
|
+
active_primary = False
|
|
6246
|
+
active_primary_status = ""
|
|
6247
|
+
active_primary_kind = ""
|
|
6248
|
+
active_primary_id = ""
|
|
6249
|
+
next_action = str(state_obj.get("next_action", "")).strip() if isinstance(state_obj, dict) else ""
|
|
6250
|
+
suggested_next_command = ""
|
|
6251
|
+
|
|
6252
|
+
if stack_obj is not None and state_obj is not None:
|
|
6253
|
+
active_version = str(state_obj.get("active_version", "")).strip()
|
|
6254
|
+
active_milestone = str(state_obj.get("active_milestone", "")).strip()
|
|
6255
|
+
active_phase = str(state_obj.get("active_phase", "")).strip()
|
|
6256
|
+
version = _frontier_find_version(stack_obj, active_version) if active_version else None
|
|
6257
|
+
_, milestone = _frontier_find_milestone(stack_obj, active_milestone) if active_milestone else (None, None)
|
|
6258
|
+
phase = _frontier_find_phase(milestone, active_phase) if active_phase and isinstance(milestone, dict) else None
|
|
6259
|
+
|
|
6260
|
+
if isinstance(version, dict):
|
|
6261
|
+
version_status = _frontier_status(version.get("status", ""))
|
|
6262
|
+
if _frontier_status_is_terminal(version_status):
|
|
6263
|
+
issues.append(
|
|
6264
|
+
{
|
|
6265
|
+
"severity": "error",
|
|
6266
|
+
"code": "stale_active_version_complete",
|
|
6267
|
+
"message": f"active version `{active_version}` is marked `{version_status}`; advance the frontier or declare terminal completion.",
|
|
6268
|
+
}
|
|
6269
|
+
)
|
|
6270
|
+
elif active_version:
|
|
6271
|
+
active_primary = True
|
|
6272
|
+
active_primary_kind = "version"
|
|
6273
|
+
active_primary_id = active_version
|
|
6274
|
+
active_primary_status = version_status
|
|
6275
|
+
|
|
6276
|
+
if isinstance(milestone, dict):
|
|
6277
|
+
milestone_status = _frontier_status(milestone.get("status", ""))
|
|
6278
|
+
if _frontier_status_is_terminal(milestone_status):
|
|
6279
|
+
issues.append(
|
|
6280
|
+
{
|
|
6281
|
+
"severity": "error",
|
|
6282
|
+
"code": "stale_active_milestone_complete",
|
|
6283
|
+
"message": f"active milestone `{active_milestone}` is marked `{milestone_status}`; advance the frontier or declare terminal completion.",
|
|
6284
|
+
}
|
|
6285
|
+
)
|
|
6286
|
+
if active_primary_kind == "version":
|
|
6287
|
+
active_primary = False
|
|
6288
|
+
elif active_milestone:
|
|
6289
|
+
active_primary = True
|
|
6290
|
+
active_primary_kind = "milestone"
|
|
6291
|
+
active_primary_id = active_milestone
|
|
6292
|
+
active_primary_status = milestone_status
|
|
6293
|
+
|
|
6294
|
+
if isinstance(phase, dict):
|
|
6295
|
+
phase_status = _frontier_status(phase.get("status", ""))
|
|
6296
|
+
if _frontier_status_is_terminal(phase_status):
|
|
6297
|
+
issues.append(
|
|
6298
|
+
{
|
|
6299
|
+
"severity": "error",
|
|
6300
|
+
"code": "stale_active_phase_complete",
|
|
6301
|
+
"message": f"active phase `{active_phase}` is marked `{phase_status}`; record the handoff and move the frontier to the next phase or queue item.",
|
|
6302
|
+
}
|
|
6303
|
+
)
|
|
6304
|
+
active_primary = False
|
|
6305
|
+
elif active_phase:
|
|
6306
|
+
active_primary = True
|
|
6307
|
+
active_primary_kind = "phase"
|
|
6308
|
+
active_primary_id = active_phase
|
|
6309
|
+
active_primary_status = phase_status
|
|
6310
|
+
|
|
6311
|
+
if additional_summary["active_pointer_partial"]:
|
|
6312
|
+
issues.append(
|
|
6313
|
+
{
|
|
6314
|
+
"severity": "error",
|
|
6315
|
+
"code": "partial_active_additional_pointer",
|
|
6316
|
+
"message": "frontier additional queue has only one of active_list_id or active_item_id set.",
|
|
6317
|
+
}
|
|
6318
|
+
)
|
|
6319
|
+
|
|
6320
|
+
active_list_id = str(additional_summary["active_list_id"]).strip()
|
|
6321
|
+
active_item_id = str(additional_summary["active_item_id"]).strip()
|
|
6322
|
+
if active_list_id or active_item_id:
|
|
6323
|
+
active_list, active_item = _frontier_active_additional_item(additional)
|
|
6324
|
+
if active_list is None:
|
|
6325
|
+
issues.append(
|
|
6326
|
+
{"severity": "error", "code": "missing_active_additional_list", "message": f"active additional list `{active_list_id}` does not exist."}
|
|
6327
|
+
)
|
|
6328
|
+
elif active_item is None:
|
|
6329
|
+
issues.append(
|
|
6330
|
+
{
|
|
6331
|
+
"severity": "error",
|
|
6332
|
+
"code": "missing_active_additional_item",
|
|
6333
|
+
"message": f"active additional item `{active_item_id}` does not exist in list `{active_list_id}`.",
|
|
6334
|
+
}
|
|
6335
|
+
)
|
|
6336
|
+
else:
|
|
6337
|
+
active_status = _frontier_status(active_item.get("status", ""))
|
|
6338
|
+
if _frontier_status_is_terminal(active_status):
|
|
6339
|
+
issues.append(
|
|
6340
|
+
{
|
|
6341
|
+
"severity": "error",
|
|
6342
|
+
"code": "stale_active_additional_item",
|
|
6343
|
+
"message": f"active additional item `{active_list_id}/{active_item_id}` is marked `{active_status}`; complete the handoff and activate the next pending item.",
|
|
6344
|
+
}
|
|
6345
|
+
)
|
|
6346
|
+
elif not _frontier_status_is_active(active_status):
|
|
6347
|
+
issues.append(
|
|
6348
|
+
{
|
|
6349
|
+
"severity": "warning",
|
|
6350
|
+
"code": "active_additional_item_not_marked_active",
|
|
6351
|
+
"message": f"active additional item `{active_list_id}/{active_item_id}` is marked `{active_status}` instead of active.",
|
|
6352
|
+
}
|
|
6353
|
+
)
|
|
6354
|
+
if not next_action:
|
|
6355
|
+
next_action = _frontier_additional_item_summary(active_list, active_item)
|
|
6356
|
+
|
|
6357
|
+
if not active_item_id and int(additional_summary["pending_items"]) > 0:
|
|
6358
|
+
issues.append(
|
|
6359
|
+
{
|
|
6360
|
+
"severity": "warning",
|
|
6361
|
+
"code": "pending_additional_without_active_pointer",
|
|
6362
|
+
"message": "frontier additional queue has pending work but no active item; run `orp frontier additional activate-next` before delegating queue work.",
|
|
6363
|
+
}
|
|
6364
|
+
)
|
|
6365
|
+
suggested_next_command = "orp frontier additional activate-next --json"
|
|
6366
|
+
next_pending = additional_summary.get("next_pending")
|
|
6367
|
+
if isinstance(next_pending, dict) and not next_action:
|
|
6368
|
+
next_action = (
|
|
6369
|
+
f"Activate additional item {next_pending.get('list_id', '')}/{next_pending.get('item_id', '')}: "
|
|
6370
|
+
f"{next_pending.get('item_label', '')}"
|
|
6371
|
+
)
|
|
6372
|
+
|
|
6373
|
+
defined_work = int(stack_summary["versions"]) + int(stack_summary["milestones"]) + int(stack_summary["phases"]) + int(additional_summary["items"])
|
|
6374
|
+
if (
|
|
6375
|
+
defined_work > 0
|
|
6376
|
+
and not blockers
|
|
6377
|
+
and not terminal_declared
|
|
6378
|
+
and not active_primary
|
|
6379
|
+
and int(additional_summary["active_items"]) == 0
|
|
6380
|
+
and int(additional_summary["pending_items"]) == 0
|
|
6381
|
+
):
|
|
6382
|
+
issues.append(
|
|
6383
|
+
{
|
|
6384
|
+
"severity": "warning",
|
|
6385
|
+
"code": "no_frontier_continuation_or_terminal_declaration",
|
|
6386
|
+
"message": "frontier has defined work but no active/open continuation and no terminal completion declaration.",
|
|
6387
|
+
}
|
|
6388
|
+
)
|
|
6389
|
+
|
|
6390
|
+
summary = {
|
|
6391
|
+
"defined_work": defined_work,
|
|
6392
|
+
"blocked": bool(blockers),
|
|
6393
|
+
"terminal_declared": terminal_declared,
|
|
6394
|
+
"active_primary": active_primary,
|
|
6395
|
+
"active_primary_kind": active_primary_kind,
|
|
6396
|
+
"active_primary_id": active_primary_id,
|
|
6397
|
+
"active_primary_status": active_primary_status,
|
|
6398
|
+
"additional": additional_summary,
|
|
6399
|
+
}
|
|
6400
|
+
return {
|
|
6401
|
+
"ok": _frontier_diagnostic_ok(issues, strict=strict),
|
|
6402
|
+
"strict": strict,
|
|
6403
|
+
"issues": issues,
|
|
6404
|
+
"summary": summary,
|
|
6405
|
+
"next_action": next_action,
|
|
6406
|
+
"suggested_next_command": suggested_next_command,
|
|
6407
|
+
"paths": {key: _path_for_state(value, repo_root) for key, value in paths.items()},
|
|
6408
|
+
}
|
|
6409
|
+
|
|
6410
|
+
|
|
6411
|
+
def _frontier_continuation_payload(repo_root: Path, *, strict: bool = False) -> dict[str, Any]:
|
|
6412
|
+
paths = _frontier_paths(repo_root)
|
|
6413
|
+
stack = _read_json_if_exists(paths["stack_json"])
|
|
6414
|
+
state = _read_json_if_exists(paths["state_json"])
|
|
6415
|
+
return _frontier_build_continuation_payload(repo_root, stack, state, include_structural_issues=True, strict=strict)
|
|
6416
|
+
|
|
6417
|
+
|
|
6418
|
+
def _frontier_additional_item_summary(item_list: dict[str, Any], item: dict[str, Any]) -> str:
|
|
6419
|
+
bits = [
|
|
6420
|
+
f"ORP additional item {str(item_list.get('id', '')).strip()}/{str(item.get('id', '')).strip()}: {str(item.get('label', '')).strip()}",
|
|
6421
|
+
]
|
|
6422
|
+
goal = str(item.get("goal", "")).strip()
|
|
6423
|
+
if goal:
|
|
6424
|
+
bits.append(f"Goal: {goal}")
|
|
6425
|
+
criteria = _coerce_string_list(item.get("success_criteria"))
|
|
6426
|
+
if criteria:
|
|
6427
|
+
bits.append("Success: " + "; ".join(criteria))
|
|
6428
|
+
return " ".join(bits)
|
|
6429
|
+
|
|
6430
|
+
|
|
6431
|
+
def _render_frontier_additional_md(payload: dict[str, Any]) -> str:
|
|
6432
|
+
lines = [
|
|
6433
|
+
f"# Additional Frontier Items: {payload.get('label', '') or payload.get('program_id', '')}",
|
|
6434
|
+
"",
|
|
6435
|
+
f"- active_list_id: `{payload.get('active_list_id', '') or '(none)'}`",
|
|
6436
|
+
f"- active_item_id: `{payload.get('active_item_id', '') or '(none)'}`",
|
|
6437
|
+
"",
|
|
6438
|
+
]
|
|
6439
|
+
lists = payload.get("lists")
|
|
6440
|
+
if isinstance(lists, list) and lists:
|
|
6441
|
+
for item_list in lists:
|
|
6442
|
+
if not isinstance(item_list, dict):
|
|
6443
|
+
continue
|
|
6444
|
+
lines.append(
|
|
6445
|
+
f"## `{item_list.get('id', '')}` {item_list.get('label', '')} (`{item_list.get('status', '') or 'pending'}`)"
|
|
6446
|
+
)
|
|
6447
|
+
lines.append("")
|
|
6448
|
+
items = item_list.get("items")
|
|
6449
|
+
if isinstance(items, list) and items:
|
|
6450
|
+
for item in items:
|
|
6451
|
+
if not isinstance(item, dict):
|
|
6452
|
+
continue
|
|
6453
|
+
marker = "x" if str(item.get("status", "")).strip() == "complete" else " "
|
|
6454
|
+
lines.append(
|
|
6455
|
+
f"- [{marker}] `{item.get('id', '')}` {item.get('label', '')} (`{item.get('status', '') or 'pending'}`)"
|
|
6456
|
+
)
|
|
6457
|
+
goal = str(item.get("goal", "")).strip()
|
|
6458
|
+
if goal:
|
|
6459
|
+
lines.append(f" - goal: {goal}")
|
|
6460
|
+
else:
|
|
6461
|
+
lines.append("- `(no items)`")
|
|
6462
|
+
lines.append("")
|
|
6463
|
+
else:
|
|
6464
|
+
lines.append("- `(no additional lists yet)`")
|
|
6465
|
+
lines.append("")
|
|
6466
|
+
return "\n".join(lines)
|
|
6467
|
+
|
|
6468
|
+
|
|
6469
|
+
def _frontier_write_additional_views(repo_root: Path, payload: dict[str, Any]) -> dict[str, str]:
|
|
6470
|
+
paths = _frontier_paths(repo_root)
|
|
6471
|
+
paths["root"].mkdir(parents=True, exist_ok=True)
|
|
6472
|
+
_frontier_normalize_additional_statuses(payload)
|
|
6473
|
+
_write_json(paths["additional_json"], payload)
|
|
6474
|
+
_write_text(paths["additional_md"], _render_frontier_additional_md(payload) + "\n")
|
|
6475
|
+
return {
|
|
6476
|
+
"additional_json": _path_for_state(paths["additional_json"], repo_root),
|
|
6477
|
+
"additional_md": _path_for_state(paths["additional_md"], repo_root),
|
|
6478
|
+
}
|
|
6479
|
+
|
|
6480
|
+
|
|
5979
6481
|
def _frontier_set_current_frontier(stack: dict[str, Any], state: dict[str, Any]) -> None:
|
|
5980
6482
|
stack["current_frontier"] = {
|
|
5981
6483
|
"active_version": str(state.get("active_version", "")).strip(),
|
|
@@ -6179,6 +6681,11 @@ def _frontier_write_materialized_views(repo_root: Path, stack: dict[str, Any], s
|
|
|
6179
6681
|
_frontier_set_current_frontier(stack, state)
|
|
6180
6682
|
roadmap = _frontier_build_roadmap_payload(stack, state)
|
|
6181
6683
|
checklist = _frontier_build_checklist_payload(stack, state)
|
|
6684
|
+
additional = _frontier_load_additional(repo_root, stack)
|
|
6685
|
+
if not str(additional.get("program_id", "")).strip():
|
|
6686
|
+
additional["program_id"] = str(stack.get("program_id", "")).strip()
|
|
6687
|
+
if not str(additional.get("label", "")).strip():
|
|
6688
|
+
additional["label"] = str(stack.get("label", "")).strip()
|
|
6182
6689
|
_write_json(paths["state_json"], state)
|
|
6183
6690
|
_write_json(paths["stack_json"], stack)
|
|
6184
6691
|
_write_json(paths["roadmap_json"], roadmap)
|
|
@@ -6187,6 +6694,7 @@ def _frontier_write_materialized_views(repo_root: Path, stack: dict[str, Any], s
|
|
|
6187
6694
|
_write_text(paths["roadmap_md"], _render_frontier_roadmap_md(roadmap) + "\n")
|
|
6188
6695
|
_write_text(paths["checklist_md"], _render_frontier_checklist_md(checklist) + "\n")
|
|
6189
6696
|
_write_text(paths["stack_md"], _render_frontier_stack_md(stack) + "\n")
|
|
6697
|
+
_frontier_write_additional_views(repo_root, additional)
|
|
6190
6698
|
return {key: _path_for_state(value, repo_root) for key, value in paths.items() if key != "root"}
|
|
6191
6699
|
|
|
6192
6700
|
|
|
@@ -6245,14 +6753,44 @@ def _frontier_doctor_payload(repo_root: Path) -> dict[str, Any]:
|
|
|
6245
6753
|
)
|
|
6246
6754
|
if band == "exact":
|
|
6247
6755
|
exact_milestones += 1
|
|
6756
|
+
milestone_id = str(milestone_row.get("id", "")).strip()
|
|
6757
|
+
if active_milestone and milestone_id and milestone_id != active_milestone:
|
|
6758
|
+
issues.append(
|
|
6759
|
+
{
|
|
6760
|
+
"severity": "warning",
|
|
6761
|
+
"code": "exact_milestone_not_live",
|
|
6762
|
+
"message": f"milestone `{milestone_id}` is marked exact but the live milestone is `{active_milestone}`.",
|
|
6763
|
+
}
|
|
6764
|
+
)
|
|
6248
6765
|
if exact_milestones > 1:
|
|
6249
6766
|
issues.append(
|
|
6250
6767
|
{"severity": "warning", "code": "multiple_exact_milestones", "message": f"{exact_milestones} milestones are marked exact; the planning rule expects only one live exact milestone."}
|
|
6251
6768
|
)
|
|
6252
6769
|
|
|
6770
|
+
continuation = _frontier_build_continuation_payload(
|
|
6771
|
+
repo_root,
|
|
6772
|
+
stack,
|
|
6773
|
+
state,
|
|
6774
|
+
include_structural_issues=False,
|
|
6775
|
+
)
|
|
6776
|
+
issues.extend(continuation["issues"])
|
|
6777
|
+
else:
|
|
6778
|
+
continuation = _frontier_build_continuation_payload(
|
|
6779
|
+
repo_root,
|
|
6780
|
+
stack if isinstance(stack, dict) else None,
|
|
6781
|
+
state if isinstance(state, dict) else None,
|
|
6782
|
+
include_structural_issues=False,
|
|
6783
|
+
)
|
|
6784
|
+
|
|
6253
6785
|
return {
|
|
6254
|
-
"ok":
|
|
6786
|
+
"ok": _frontier_diagnostic_ok(issues),
|
|
6255
6787
|
"issues": issues,
|
|
6788
|
+
"continuation": {
|
|
6789
|
+
"ok": continuation["ok"],
|
|
6790
|
+
"summary": continuation["summary"],
|
|
6791
|
+
"next_action": continuation["next_action"],
|
|
6792
|
+
"suggested_next_command": continuation["suggested_next_command"],
|
|
6793
|
+
},
|
|
6256
6794
|
"paths": {key: _path_for_state(value, repo_root) for key, value in paths.items()},
|
|
6257
6795
|
}
|
|
6258
6796
|
|
|
@@ -9072,6 +9610,238 @@ def _effective_remote_context(
|
|
|
9072
9610
|
}
|
|
9073
9611
|
|
|
9074
9612
|
|
|
9613
|
+
def _project_context_path(repo_root: Path) -> Path:
|
|
9614
|
+
return repo_root / "orp" / "project.json"
|
|
9615
|
+
|
|
9616
|
+
|
|
9617
|
+
def _project_surface(path: Path, repo_root: Path, *, kind: str, role: str) -> dict[str, Any]:
|
|
9618
|
+
rel_path = _path_for_state(path, repo_root)
|
|
9619
|
+
exists = path.exists()
|
|
9620
|
+
size_bytes = path.stat().st_size if exists and path.is_file() else 0
|
|
9621
|
+
return {
|
|
9622
|
+
"path": rel_path,
|
|
9623
|
+
"kind": kind,
|
|
9624
|
+
"role": role,
|
|
9625
|
+
"exists": exists,
|
|
9626
|
+
"size_bytes": int(size_bytes),
|
|
9627
|
+
}
|
|
9628
|
+
|
|
9629
|
+
|
|
9630
|
+
def _project_authority_surfaces(repo_root: Path) -> list[dict[str, Any]]:
|
|
9631
|
+
candidates: list[tuple[str, str, str]] = [
|
|
9632
|
+
("AGENTS.md", "agent_guidance", "project_agent_rules"),
|
|
9633
|
+
("CLAUDE.md", "agent_guidance", "project_agent_rules"),
|
|
9634
|
+
("README.md", "overview", "project_overview"),
|
|
9635
|
+
("llms.txt", "llm_discovery", "machine_readable_discovery"),
|
|
9636
|
+
("orp.yml", "orp_config", "runtime_config"),
|
|
9637
|
+
("analysis/orp.kernel.task.yml", "kernel_artifact", "starter_task_contract"),
|
|
9638
|
+
("docs/START_HERE.md", "operator_docs", "starter_flow"),
|
|
9639
|
+
("docs/ROADMAP.md", "roadmap", "planning_authority"),
|
|
9640
|
+
("ROADMAP.md", "roadmap", "planning_authority"),
|
|
9641
|
+
("TODO.md", "task_notes", "planning_authority"),
|
|
9642
|
+
("package.json", "manifest", "javascript_manifest"),
|
|
9643
|
+
("pyproject.toml", "manifest", "python_manifest"),
|
|
9644
|
+
("Cargo.toml", "manifest", "rust_manifest"),
|
|
9645
|
+
("go.mod", "manifest", "go_manifest"),
|
|
9646
|
+
("Makefile", "command_surface", "build_commands"),
|
|
9647
|
+
("justfile", "command_surface", "build_commands"),
|
|
9648
|
+
]
|
|
9649
|
+
rows: list[dict[str, Any]] = []
|
|
9650
|
+
seen: set[str] = set()
|
|
9651
|
+
for rel_path, kind, role in candidates:
|
|
9652
|
+
path = repo_root / rel_path
|
|
9653
|
+
row = _project_surface(path, repo_root, kind=kind, role=role)
|
|
9654
|
+
if row["exists"] or kind in {"agent_guidance", "orp_config", "kernel_artifact"}:
|
|
9655
|
+
rows.append(row)
|
|
9656
|
+
seen.add(str(row["path"]))
|
|
9657
|
+
|
|
9658
|
+
docs_root = repo_root / "docs"
|
|
9659
|
+
if docs_root.exists() and docs_root.is_dir():
|
|
9660
|
+
doc_paths = sorted(
|
|
9661
|
+
path
|
|
9662
|
+
for path in docs_root.glob("*.md")
|
|
9663
|
+
if path.is_file()
|
|
9664
|
+
and _path_for_state(path, repo_root) not in seen
|
|
9665
|
+
and path.name.upper() not in {"README.MD"}
|
|
9666
|
+
)
|
|
9667
|
+
for path in doc_paths[:12]:
|
|
9668
|
+
lower_name = path.name.lower()
|
|
9669
|
+
kind = "project_doc"
|
|
9670
|
+
role = "supporting_context"
|
|
9671
|
+
if "roadmap" in lower_name or "plan" in lower_name:
|
|
9672
|
+
kind = "roadmap"
|
|
9673
|
+
role = "planning_authority"
|
|
9674
|
+
elif "research" in lower_name:
|
|
9675
|
+
kind = "research_doc"
|
|
9676
|
+
role = "research_context"
|
|
9677
|
+
elif "spec" in lower_name or "protocol" in lower_name:
|
|
9678
|
+
kind = "spec"
|
|
9679
|
+
role = "project_authority"
|
|
9680
|
+
rows.append(_project_surface(path, repo_root, kind=kind, role=role))
|
|
9681
|
+
|
|
9682
|
+
return rows
|
|
9683
|
+
|
|
9684
|
+
|
|
9685
|
+
def _project_directory_signals(repo_root: Path, surfaces: list[dict[str, Any]]) -> dict[str, Any]:
|
|
9686
|
+
surface_paths = {str(row.get("path", "")).strip() for row in surfaces if isinstance(row, dict)}
|
|
9687
|
+
source_dirs = [
|
|
9688
|
+
rel
|
|
9689
|
+
for rel in ("src", "lib", "app", "cli", "packages", "research", "analysis", "tests", "test", "docs")
|
|
9690
|
+
if (repo_root / rel).exists()
|
|
9691
|
+
]
|
|
9692
|
+
languages: list[str] = []
|
|
9693
|
+
if "package.json" in surface_paths:
|
|
9694
|
+
languages.append("javascript")
|
|
9695
|
+
if "pyproject.toml" in surface_paths:
|
|
9696
|
+
languages.append("python")
|
|
9697
|
+
if "Cargo.toml" in surface_paths:
|
|
9698
|
+
languages.append("rust")
|
|
9699
|
+
if "go.mod" in surface_paths:
|
|
9700
|
+
languages.append("go")
|
|
9701
|
+
if any((repo_root / rel).exists() for rel in ("lakefile.lean", "lakefile.toml")):
|
|
9702
|
+
languages.append("lean")
|
|
9703
|
+
return {
|
|
9704
|
+
"source_dirs": source_dirs,
|
|
9705
|
+
"languages_or_stacks": _unique_strings(languages),
|
|
9706
|
+
"has_tests": any((repo_root / rel).exists() for rel in ("tests", "test", "__tests__")),
|
|
9707
|
+
"has_docs": (repo_root / "docs").exists(),
|
|
9708
|
+
"has_orp_config": "orp.yml" in surface_paths,
|
|
9709
|
+
"authority_surface_count": len([row for row in surfaces if row.get("exists")]),
|
|
9710
|
+
}
|
|
9711
|
+
|
|
9712
|
+
|
|
9713
|
+
def _project_research_trigger_policy() -> dict[str, Any]:
|
|
9714
|
+
return {
|
|
9715
|
+
"default_timing": "after_local_decomposition_before_action",
|
|
9716
|
+
"provider_calls_are_explicit": True,
|
|
9717
|
+
"live_calls_require_execute": True,
|
|
9718
|
+
"secret_alias": "openai-primary",
|
|
9719
|
+
"env_var": "OPENAI_API_KEY",
|
|
9720
|
+
"call_moments": [
|
|
9721
|
+
{
|
|
9722
|
+
"moment_id": "plan",
|
|
9723
|
+
"calls_api": False,
|
|
9724
|
+
"when": "Always run first during project refresh or research ask; inspect local authority surfaces and decompose the question.",
|
|
9725
|
+
},
|
|
9726
|
+
{
|
|
9727
|
+
"moment_id": "thinking_reasoning_high",
|
|
9728
|
+
"calls_api": True,
|
|
9729
|
+
"lane": "openai_reasoning_high",
|
|
9730
|
+
"model": "gpt-5.4",
|
|
9731
|
+
"when": "Use when the directory has a decision gate, route choice, proof strategy, architecture tradeoff, or ambiguous next action.",
|
|
9732
|
+
},
|
|
9733
|
+
{
|
|
9734
|
+
"moment_id": "web_synthesis",
|
|
9735
|
+
"calls_api": True,
|
|
9736
|
+
"lane": "openai_web_synthesis",
|
|
9737
|
+
"model": "gpt-5.4",
|
|
9738
|
+
"when": "Use when the answer depends on current public facts, external docs, papers, project status, or citations.",
|
|
9739
|
+
},
|
|
9740
|
+
{
|
|
9741
|
+
"moment_id": "pro_deep_research",
|
|
9742
|
+
"calls_api": True,
|
|
9743
|
+
"lane": "openai_deep_research",
|
|
9744
|
+
"model": "o3-deep-research-2025-06-26",
|
|
9745
|
+
"when": "Use only after reasoning/web lanes expose a research-heavy gap, disagreement, source-quality issue, or literature-scale synthesis need.",
|
|
9746
|
+
"capability_note": "Requires an OpenAI organization verified for Deep Research model access.",
|
|
9747
|
+
},
|
|
9748
|
+
],
|
|
9749
|
+
"skip_research_when": [
|
|
9750
|
+
"the next action is already executable from local project authority",
|
|
9751
|
+
"the question is only a deterministic local status or file lookup",
|
|
9752
|
+
"the task is implementation-ready and no external/public evidence is needed",
|
|
9753
|
+
],
|
|
9754
|
+
"escalate_to_deep_research_when": [
|
|
9755
|
+
"web synthesis finds conflicting or weak public sources",
|
|
9756
|
+
"the project must compare multiple papers, standards, providers, or public claims",
|
|
9757
|
+
"the output needs a citation-rich report rather than a short decision memo",
|
|
9758
|
+
],
|
|
9759
|
+
}
|
|
9760
|
+
|
|
9761
|
+
|
|
9762
|
+
def _project_evolution_policy() -> dict[str, Any]:
|
|
9763
|
+
return {
|
|
9764
|
+
"refresh_surfaces": [
|
|
9765
|
+
"orp init",
|
|
9766
|
+
"orp project refresh",
|
|
9767
|
+
"after adding or changing roadmap/spec/agent-guidance files",
|
|
9768
|
+
"after installing a profile pack or changing command surfaces",
|
|
9769
|
+
"before a research loop whose answer depends on project state",
|
|
9770
|
+
],
|
|
9771
|
+
"evolution_loop": [
|
|
9772
|
+
"scan authority surfaces",
|
|
9773
|
+
"classify what is local, public, executable, or human-gated",
|
|
9774
|
+
"choose whether reasoning, web synthesis, or deep research is justified",
|
|
9775
|
+
"act only after the decision gate has enough evidence",
|
|
9776
|
+
"checkpoint the resulting project state",
|
|
9777
|
+
],
|
|
9778
|
+
"boundary": "ORP project context is process-only. It guides routing and research timing but is not evidence.",
|
|
9779
|
+
}
|
|
9780
|
+
|
|
9781
|
+
|
|
9782
|
+
def _project_context_payload(repo_root: Path, *, source: str) -> dict[str, Any]:
|
|
9783
|
+
context_path = _project_context_path(repo_root)
|
|
9784
|
+
existing = _read_json_if_exists(context_path)
|
|
9785
|
+
generated_at = _now_utc()
|
|
9786
|
+
initialized_at = str(existing.get("initialized_at_utc", "")).strip() or generated_at
|
|
9787
|
+
surfaces = _project_authority_surfaces(repo_root)
|
|
9788
|
+
signals = _project_directory_signals(repo_root, surfaces)
|
|
9789
|
+
research_policy = _project_research_trigger_policy()
|
|
9790
|
+
return {
|
|
9791
|
+
"schema_version": PROJECT_CONTEXT_SCHEMA_VERSION,
|
|
9792
|
+
"kind": "orp_project_context",
|
|
9793
|
+
"project": {
|
|
9794
|
+
"name": repo_root.name or "project",
|
|
9795
|
+
"root": str(repo_root),
|
|
9796
|
+
},
|
|
9797
|
+
"initialized_at_utc": initialized_at,
|
|
9798
|
+
"refreshed_at_utc": generated_at,
|
|
9799
|
+
"refresh_source": source,
|
|
9800
|
+
"authority_surfaces": surfaces,
|
|
9801
|
+
"directory_signals": signals,
|
|
9802
|
+
"research_policy": research_policy,
|
|
9803
|
+
"evolution_policy": _project_evolution_policy(),
|
|
9804
|
+
"next_actions": [
|
|
9805
|
+
"orp project refresh --json",
|
|
9806
|
+
"orp agents audit",
|
|
9807
|
+
"orp status --json",
|
|
9808
|
+
'orp research ask "<decision question>" --json',
|
|
9809
|
+
],
|
|
9810
|
+
"notes": [
|
|
9811
|
+
"This file is ORP process context for the local directory.",
|
|
9812
|
+
"It is refreshed as the project evolves and should not be cited as proof or canonical evidence.",
|
|
9813
|
+
"Provider research calls remain opt-in through `orp research ask --execute`.",
|
|
9814
|
+
],
|
|
9815
|
+
}
|
|
9816
|
+
|
|
9817
|
+
|
|
9818
|
+
def _write_project_context(repo_root: Path, *, source: str) -> tuple[dict[str, Any], str]:
|
|
9819
|
+
path = _project_context_path(repo_root)
|
|
9820
|
+
existed = path.exists()
|
|
9821
|
+
payload = _project_context_payload(repo_root, source=source)
|
|
9822
|
+
_write_json(path, payload)
|
|
9823
|
+
state_path = repo_root / "orp" / "state.json"
|
|
9824
|
+
state = {**_default_state_payload(), **_read_json_if_exists(state_path)}
|
|
9825
|
+
state["project_context"] = {
|
|
9826
|
+
"path": _path_for_state(path, repo_root),
|
|
9827
|
+
"schema_version": PROJECT_CONTEXT_SCHEMA_VERSION,
|
|
9828
|
+
"refreshed_at_utc": payload["refreshed_at_utc"],
|
|
9829
|
+
"refresh_source": source,
|
|
9830
|
+
"authority_surface_count": payload["directory_signals"]["authority_surface_count"],
|
|
9831
|
+
"research_default_timing": payload["research_policy"]["default_timing"],
|
|
9832
|
+
}
|
|
9833
|
+
_write_json(state_path, state)
|
|
9834
|
+
return payload, "updated" if existed else "created"
|
|
9835
|
+
|
|
9836
|
+
|
|
9837
|
+
def _load_project_context(repo_root: Path) -> dict[str, Any]:
|
|
9838
|
+
path = _project_context_path(repo_root)
|
|
9839
|
+
payload = _read_json_if_exists(path)
|
|
9840
|
+
if not payload:
|
|
9841
|
+
raise RuntimeError("No ORP project context found. Run `orp init` or `orp project refresh --json` first.")
|
|
9842
|
+
return payload
|
|
9843
|
+
|
|
9844
|
+
|
|
9075
9845
|
def _init_kernel_task_template(repo_name: str) -> str:
|
|
9076
9846
|
safe_name = str(repo_name or "").strip() or "my-project"
|
|
9077
9847
|
return (
|
|
@@ -10001,6 +10771,18 @@ def _governance_status_payload(repo_root: Path, config_arg: str) -> dict[str, An
|
|
|
10001
10771
|
str(governance_state.get("checkpoint_log_path", "orp/checkpoints/CHECKPOINT_LOG.md")),
|
|
10002
10772
|
"orp/checkpoints/CHECKPOINT_LOG.md",
|
|
10003
10773
|
)
|
|
10774
|
+
project_context_path = _project_context_path(repo_root)
|
|
10775
|
+
project_context = _read_json_if_exists(project_context_path)
|
|
10776
|
+
project_context_signals = (
|
|
10777
|
+
project_context.get("directory_signals")
|
|
10778
|
+
if isinstance(project_context.get("directory_signals"), dict)
|
|
10779
|
+
else {}
|
|
10780
|
+
)
|
|
10781
|
+
project_research_policy = (
|
|
10782
|
+
project_context.get("research_policy")
|
|
10783
|
+
if isinstance(project_context.get("research_policy"), dict)
|
|
10784
|
+
else {}
|
|
10785
|
+
)
|
|
10004
10786
|
|
|
10005
10787
|
manifest = _read_json_if_exists(manifest_path)
|
|
10006
10788
|
orp_governed = bool(governance_state.get("orp_governed")) or bool(manifest.get("repo", {}).get("orp_governed"))
|
|
@@ -10090,6 +10872,9 @@ def _governance_status_payload(repo_root: Path, config_arg: str) -> dict[str, An
|
|
|
10090
10872
|
warnings.append("checkpoint log is missing from ORP governance runtime.")
|
|
10091
10873
|
if not agent_policy_path.exists():
|
|
10092
10874
|
warnings.append("agent policy file is missing from ORP governance runtime.")
|
|
10875
|
+
if not project_context_path.exists():
|
|
10876
|
+
warnings.append("project context lens is missing from ORP governance runtime.")
|
|
10877
|
+
next_actions.append("orp project refresh --json")
|
|
10093
10878
|
|
|
10094
10879
|
if remote_context["mode"] == "local_only":
|
|
10095
10880
|
notes.append("local-first mode active; no remote is required.")
|
|
@@ -10178,6 +10963,15 @@ def _governance_status_payload(repo_root: Path, config_arg: str) -> dict[str, An
|
|
|
10178
10963
|
"handoff_exists": handoff_path.exists(),
|
|
10179
10964
|
"checkpoint_log_path": _path_for_state(checkpoint_log_path, repo_root),
|
|
10180
10965
|
"checkpoint_log_exists": checkpoint_log_path.exists(),
|
|
10966
|
+
"project_context": {
|
|
10967
|
+
"path": _path_for_state(project_context_path, repo_root),
|
|
10968
|
+
"exists": project_context_path.exists(),
|
|
10969
|
+
"schema_version": str(project_context.get("schema_version", "")).strip(),
|
|
10970
|
+
"refreshed_at_utc": str(project_context.get("refreshed_at_utc", "")).strip(),
|
|
10971
|
+
"refresh_source": str(project_context.get("refresh_source", "")).strip(),
|
|
10972
|
+
"authority_surface_count": int(project_context_signals.get("authority_surface_count", 0) or 0),
|
|
10973
|
+
"research_default_timing": str(project_research_policy.get("default_timing", "")).strip(),
|
|
10974
|
+
},
|
|
10181
10975
|
"git_runtime_path": _path_for_state(_git_runtime_path(repo_root) or Path(".git/orp/runtime.json"), repo_root),
|
|
10182
10976
|
"git": {
|
|
10183
10977
|
**git_snapshot,
|
|
@@ -10572,10 +11366,13 @@ def _about_payload() -> dict[str, Any]:
|
|
|
10572
11366
|
"agent_loop": "docs/AGENT_LOOP.md",
|
|
10573
11367
|
"discover": "docs/DISCOVER.md",
|
|
10574
11368
|
"exchange": "docs/EXCHANGE.md",
|
|
11369
|
+
"research_council": "docs/RESEARCH_COUNCIL.md",
|
|
10575
11370
|
"profile_packs": "docs/PROFILE_PACKS.md",
|
|
11371
|
+
"research_mcp_server": "scripts/orp-mcp",
|
|
10576
11372
|
},
|
|
10577
11373
|
"artifacts": {
|
|
10578
11374
|
"state_json": "orp/state.json",
|
|
11375
|
+
"project_context_json": "orp/project.json",
|
|
10579
11376
|
"run_json": "orp/artifacts/<run_id>/RUN.json",
|
|
10580
11377
|
"run_summary_md": "orp/artifacts/<run_id>/RUN_SUMMARY.md",
|
|
10581
11378
|
"packet_json": "orp/packets/<packet_id>.json",
|
|
@@ -10585,6 +11382,9 @@ def _about_payload() -> dict[str, Any]:
|
|
|
10585
11382
|
"exchange_json": "orp/exchange/<exchange_id>/EXCHANGE.json",
|
|
10586
11383
|
"exchange_summary_md": "orp/exchange/<exchange_id>/EXCHANGE_SUMMARY.md",
|
|
10587
11384
|
"exchange_transfer_map_md": "orp/exchange/<exchange_id>/TRANSFER_MAP.md",
|
|
11385
|
+
"research_answer_json": "orp/research/<run_id>/ANSWER.json",
|
|
11386
|
+
"research_summary_md": "orp/research/<run_id>/RUN_SUMMARY.md",
|
|
11387
|
+
"research_lanes_root": "orp/research/<run_id>/lanes/",
|
|
10588
11388
|
},
|
|
10589
11389
|
"schemas": {
|
|
10590
11390
|
"config": "spec/v1/orp.config.schema.json",
|
|
@@ -10594,6 +11394,8 @@ def _about_payload() -> dict[str, Any]:
|
|
|
10594
11394
|
"kernel_extension": "spec/v1/kernel-extension.schema.json",
|
|
10595
11395
|
"youtube_source": "spec/v1/youtube-source.schema.json",
|
|
10596
11396
|
"exchange_report": "spec/v1/exchange-report.schema.json",
|
|
11397
|
+
"research_run": "spec/v1/research-run.schema.json",
|
|
11398
|
+
"project_context": "spec/v1/project-context.schema.json",
|
|
10597
11399
|
"profile_pack": "spec/v1/profile-pack.schema.json",
|
|
10598
11400
|
"link_project": "spec/v1/link-project.schema.json",
|
|
10599
11401
|
"link_session": "spec/v1/link-session.schema.json",
|
|
@@ -10684,6 +11486,14 @@ def _about_payload() -> dict[str, Any]:
|
|
|
10684
11486
|
["agents", "audit"],
|
|
10685
11487
|
],
|
|
10686
11488
|
},
|
|
11489
|
+
{
|
|
11490
|
+
"id": "project",
|
|
11491
|
+
"description": "Local project context lens for authority surfaces, directory signals, research call timing, and explicit evolution as the repo changes.",
|
|
11492
|
+
"entrypoints": [
|
|
11493
|
+
["project", "refresh"],
|
|
11494
|
+
["project", "show"],
|
|
11495
|
+
],
|
|
11496
|
+
},
|
|
10687
11497
|
{
|
|
10688
11498
|
"id": "secrets",
|
|
10689
11499
|
"description": "Hosted secret store for global API key inventory, provider metadata, and project-scoped resolution.",
|
|
@@ -10757,6 +11567,15 @@ def _about_payload() -> dict[str, Any]:
|
|
|
10757
11567
|
["exchange", "repo", "synthesize"],
|
|
10758
11568
|
],
|
|
10759
11569
|
},
|
|
11570
|
+
{
|
|
11571
|
+
"id": "research",
|
|
11572
|
+
"description": "Durable OpenAI research-loop runs with decomposition, explicit API call moments, provider lanes, and synthesis artifacts.",
|
|
11573
|
+
"entrypoints": [
|
|
11574
|
+
["research", "ask"],
|
|
11575
|
+
["research", "status"],
|
|
11576
|
+
["research", "show"],
|
|
11577
|
+
],
|
|
11578
|
+
},
|
|
10760
11579
|
{
|
|
10761
11580
|
"id": "collaborate",
|
|
10762
11581
|
"description": "Built-in repository collaboration setup and workflow execution.",
|
|
@@ -10769,12 +11588,16 @@ def _about_payload() -> dict[str, Any]:
|
|
|
10769
11588
|
},
|
|
10770
11589
|
{
|
|
10771
11590
|
"id": "frontier",
|
|
10772
|
-
"description": "Version-stack, milestone, phase, and
|
|
11591
|
+
"description": "Version-stack, milestone, phase, additional-queue, and continuation-preflight control for long-running agent-first research programs.",
|
|
10773
11592
|
"entrypoints": [
|
|
10774
11593
|
["frontier", "init"],
|
|
10775
11594
|
["frontier", "state"],
|
|
10776
11595
|
["frontier", "roadmap"],
|
|
10777
11596
|
["frontier", "checklist"],
|
|
11597
|
+
["frontier", "continuation-status"],
|
|
11598
|
+
["frontier", "preflight-delegate"],
|
|
11599
|
+
["frontier", "additional", "list"],
|
|
11600
|
+
["frontier", "additional", "activate-next"],
|
|
10778
11601
|
["frontier", "stack"],
|
|
10779
11602
|
["frontier", "add-version"],
|
|
10780
11603
|
["frontier", "add-milestone"],
|
|
@@ -10849,6 +11672,8 @@ def _about_payload() -> dict[str, Any]:
|
|
|
10849
11672
|
{"name": "agents_root_set", "path": ["agents", "root", "set"], "json_output": True},
|
|
10850
11673
|
{"name": "agents_sync", "path": ["agents", "sync"], "json_output": True},
|
|
10851
11674
|
{"name": "agents_audit", "path": ["agents", "audit"], "json_output": True},
|
|
11675
|
+
{"name": "project_refresh", "path": ["project", "refresh"], "json_output": True},
|
|
11676
|
+
{"name": "project_show", "path": ["project", "show"], "json_output": True},
|
|
10852
11677
|
{"name": "opportunities_list", "path": ["opportunities", "list"], "json_output": True},
|
|
10853
11678
|
{"name": "opportunities_show", "path": ["opportunities", "show"], "json_output": True},
|
|
10854
11679
|
{"name": "opportunities_focus", "path": ["opportunities", "focus"], "json_output": True},
|
|
@@ -10876,6 +11701,7 @@ def _about_payload() -> dict[str, Any]:
|
|
|
10876
11701
|
{"name": "secrets_show", "path": ["secrets", "show"], "json_output": True},
|
|
10877
11702
|
{"name": "secrets_add", "path": ["secrets", "add"], "json_output": True},
|
|
10878
11703
|
{"name": "secrets_ensure", "path": ["secrets", "ensure"], "json_output": True},
|
|
11704
|
+
{"name": "secrets_keychain_add", "path": ["secrets", "keychain-add"], "json_output": True},
|
|
10879
11705
|
{"name": "secrets_keychain_list", "path": ["secrets", "keychain-list"], "json_output": True},
|
|
10880
11706
|
{"name": "secrets_keychain_show", "path": ["secrets", "keychain-show"], "json_output": True},
|
|
10881
11707
|
{"name": "secrets_sync_keychain", "path": ["secrets", "sync-keychain"], "json_output": True},
|
|
@@ -10932,6 +11758,9 @@ def _about_payload() -> dict[str, Any]:
|
|
|
10932
11758
|
{"name": "discover_profile_init", "path": ["discover", "profile", "init"], "json_output": True},
|
|
10933
11759
|
{"name": "discover_github_scan", "path": ["discover", "github", "scan"], "json_output": True},
|
|
10934
11760
|
{"name": "exchange_repo_synthesize", "path": ["exchange", "repo", "synthesize"], "json_output": True},
|
|
11761
|
+
{"name": "research_ask", "path": ["research", "ask"], "json_output": True},
|
|
11762
|
+
{"name": "research_status", "path": ["research", "status"], "json_output": True},
|
|
11763
|
+
{"name": "research_show", "path": ["research", "show"], "json_output": True},
|
|
10935
11764
|
{"name": "collaborate_init", "path": ["collaborate", "init"], "json_output": True},
|
|
10936
11765
|
{"name": "collaborate_workflows", "path": ["collaborate", "workflows"], "json_output": True},
|
|
10937
11766
|
{"name": "collaborate_gates", "path": ["collaborate", "gates"], "json_output": True},
|
|
@@ -10942,6 +11771,13 @@ def _about_payload() -> dict[str, Any]:
|
|
|
10942
11771
|
{"name": "frontier_state", "path": ["frontier", "state"], "json_output": True},
|
|
10943
11772
|
{"name": "frontier_roadmap", "path": ["frontier", "roadmap"], "json_output": True},
|
|
10944
11773
|
{"name": "frontier_checklist", "path": ["frontier", "checklist"], "json_output": True},
|
|
11774
|
+
{"name": "frontier_continuation_status", "path": ["frontier", "continuation-status"], "json_output": True},
|
|
11775
|
+
{"name": "frontier_preflight_delegate", "path": ["frontier", "preflight-delegate"], "json_output": True},
|
|
11776
|
+
{"name": "frontier_additional_list", "path": ["frontier", "additional", "list"], "json_output": True},
|
|
11777
|
+
{"name": "frontier_additional_add_list", "path": ["frontier", "additional", "add-list"], "json_output": True},
|
|
11778
|
+
{"name": "frontier_additional_add_item", "path": ["frontier", "additional", "add-item"], "json_output": True},
|
|
11779
|
+
{"name": "frontier_additional_activate_next", "path": ["frontier", "additional", "activate-next"], "json_output": True},
|
|
11780
|
+
{"name": "frontier_additional_complete_active", "path": ["frontier", "additional", "complete-active"], "json_output": True},
|
|
10945
11781
|
{"name": "frontier_stack", "path": ["frontier", "stack"], "json_output": True},
|
|
10946
11782
|
{"name": "frontier_add_version", "path": ["frontier", "add-version"], "json_output": True},
|
|
10947
11783
|
{"name": "frontier_add_milestone", "path": ["frontier", "add-milestone"], "json_output": True},
|
|
@@ -10972,8 +11808,10 @@ def _about_payload() -> dict[str, Any]:
|
|
|
10972
11808
|
"YouTube inspection is a built-in ORP ability exposed through `orp youtube inspect`, returning public metadata plus full transcript text and segments whenever public caption tracks are available.",
|
|
10973
11809
|
"Discovery profiles in ORP are portable search-intent files managed directly by ORP.",
|
|
10974
11810
|
"Knowledge exchange is a built-in ORP ability exposed through `orp exchange repo synthesize`, producing structured exchange artifacts and transfer maps for local or remote source repositories.",
|
|
11811
|
+
"Research council runs are built into ORP through `orp research ask`, `orp research status`, and `orp research show`, with dry-run decomposition by default and explicit `--execute` for live provider calls.",
|
|
11812
|
+
"Project context is built into ORP through `orp project refresh` and `orp project show`; it records local authority surfaces and research timing policy for the current directory without calling providers.",
|
|
10975
11813
|
"Collaboration is a built-in ORP ability exposed through `orp collaborate ...`.",
|
|
10976
|
-
"Frontier control is a built-in ORP ability exposed through `orp frontier ...`, separating the exact live point, the exact active milestone, the near structured checklist, and
|
|
11814
|
+
"Frontier control is a built-in ORP ability exposed through `orp frontier ...`, separating the exact live point, the exact active milestone, the near structured checklist, the additional work queue, and strict continuation preflight before delegation.",
|
|
10977
11815
|
"Agent modes are lightweight optional overlays for taste, perspective shifts, fresh movement, and intentional comprehension breakdowns; `orp mode breakdown granular-breakdown --json` gives agents a broad-to-atomic ladder for complex work, while `orp mode nudge granular-breakdown --json` gives a short reminder card.",
|
|
10978
11816
|
"Project/session linking is a built-in ORP ability exposed through `orp link ...` and stored machine-locally under `.git/orp/link/`.",
|
|
10979
11817
|
"Secrets are easiest to understand as saved credentials and related login metadata: humans usually run `orp secrets add ...` and paste the value at the prompt, agents usually pipe the value with `--value-stdin`, optional usernames can be stored alongside the secret when a service needs them, and local macOS Keychain caching plus hosted sync are optional layers on top.",
|
|
@@ -11110,6 +11948,10 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
|
|
|
11110
11948
|
"label": "Audit AGENTS.md and CLAUDE.md so parent/child guidance stays in sync",
|
|
11111
11949
|
"command": "orp agents audit",
|
|
11112
11950
|
},
|
|
11951
|
+
{
|
|
11952
|
+
"label": "Refresh the local project context lens before research-heavy work",
|
|
11953
|
+
"command": "orp project refresh --json",
|
|
11954
|
+
},
|
|
11113
11955
|
{
|
|
11114
11956
|
"label": "Save a new API key or token interactively when you need one",
|
|
11115
11957
|
"command": 'orp secrets add --alias <alias> --label "<label>" --provider <provider>',
|
|
@@ -11154,11 +11996,19 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
|
|
|
11154
11996
|
"command": "orp agents audit",
|
|
11155
11997
|
},
|
|
11156
11998
|
{
|
|
11157
|
-
"label": "Inspect the
|
|
11158
|
-
"command": "orp
|
|
11999
|
+
"label": "Inspect the local project context lens",
|
|
12000
|
+
"command": "orp project show --json",
|
|
11159
12001
|
},
|
|
11160
12002
|
{
|
|
11161
|
-
"label": "
|
|
12003
|
+
"label": "Refresh the local project context lens",
|
|
12004
|
+
"command": "orp project refresh --json",
|
|
12005
|
+
},
|
|
12006
|
+
{
|
|
12007
|
+
"label": "Inspect the saved service and data connections for this user",
|
|
12008
|
+
"command": "orp connections list",
|
|
12009
|
+
},
|
|
12010
|
+
{
|
|
12011
|
+
"label": "Run a Codex-backed agenda refresh using current workspace, GitHub, and opportunity context",
|
|
11162
12012
|
"command": "orp agenda refresh --json",
|
|
11163
12013
|
},
|
|
11164
12014
|
{
|
|
@@ -11261,6 +12111,10 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
|
|
|
11261
12111
|
"label": "List locally cached Keychain-backed secrets on this Mac",
|
|
11262
12112
|
"command": "orp secrets keychain-list --json",
|
|
11263
12113
|
},
|
|
12114
|
+
{
|
|
12115
|
+
"label": "Save a key directly into the local ORP macOS Keychain store",
|
|
12116
|
+
"command": "orp secrets keychain-add --alias <alias> --provider <provider> --value-stdin --json",
|
|
12117
|
+
},
|
|
11264
12118
|
{
|
|
11265
12119
|
"label": "Sync one hosted secret into the local macOS Keychain",
|
|
11266
12120
|
"command": "orp secrets sync-keychain <alias-or-id> --json",
|
|
@@ -11293,6 +12147,10 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
|
|
|
11293
12147
|
"label": "Deeply synthesize another repo or local project into exchange artifacts",
|
|
11294
12148
|
"command": "orp exchange repo synthesize /path/to/source --json",
|
|
11295
12149
|
},
|
|
12150
|
+
{
|
|
12151
|
+
"label": "Decompose a question into an OpenAI research-loop run",
|
|
12152
|
+
"command": 'orp research ask "What should we investigate?" --json',
|
|
12153
|
+
},
|
|
11296
12154
|
{
|
|
11297
12155
|
"label": "Inspect local repo governance status",
|
|
11298
12156
|
"command": "orp status --json",
|
|
@@ -11594,6 +12452,14 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
|
|
|
11594
12452
|
"orp agents audit",
|
|
11595
12453
|
],
|
|
11596
12454
|
},
|
|
12455
|
+
{
|
|
12456
|
+
"id": "project",
|
|
12457
|
+
"description": "Local project context lens for authority surfaces, directory signals, research call timing, and explicit evolution as the repo changes.",
|
|
12458
|
+
"entrypoints": [
|
|
12459
|
+
"orp project refresh --json",
|
|
12460
|
+
"orp project show --json",
|
|
12461
|
+
],
|
|
12462
|
+
},
|
|
11597
12463
|
{
|
|
11598
12464
|
"id": "hosted",
|
|
11599
12465
|
"description": "Hosted identity, ideas, first-class workspace records, runner lanes, and control-plane status.",
|
|
@@ -11626,6 +12492,7 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
|
|
|
11626
12492
|
"orp secrets show <alias-or-id> --json",
|
|
11627
12493
|
'orp secrets add --alias <alias> --label "<label>" --provider <provider>',
|
|
11628
12494
|
"orp secrets ensure --alias <alias> --provider <provider> --current-project --json",
|
|
12495
|
+
"orp secrets keychain-add --alias <alias> --provider <provider> --value-stdin --json",
|
|
11629
12496
|
"orp secrets keychain-list --json",
|
|
11630
12497
|
"orp secrets keychain-show <alias-or-id> --json",
|
|
11631
12498
|
"orp secrets sync-keychain <alias-or-id> --json",
|
|
@@ -11694,6 +12561,16 @@ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
|
|
|
11694
12561
|
"orp exchange repo synthesize /path/to/source --json",
|
|
11695
12562
|
],
|
|
11696
12563
|
},
|
|
12564
|
+
{
|
|
12565
|
+
"id": "research",
|
|
12566
|
+
"description": "Durable OpenAI research-loop question answering that records the decomposition, API call moments, optional live calls, and synthesized answer under orp/research.",
|
|
12567
|
+
"entrypoints": [
|
|
12568
|
+
'orp research ask "What should we investigate?" --json',
|
|
12569
|
+
'orp research ask "What should we investigate?" --execute --json',
|
|
12570
|
+
"orp research status latest --json",
|
|
12571
|
+
"orp research show latest --json",
|
|
12572
|
+
],
|
|
12573
|
+
},
|
|
11697
12574
|
{
|
|
11698
12575
|
"id": "collaborate",
|
|
11699
12576
|
"description": "Built-in repository collaboration setup and workflow execution.",
|
|
@@ -11837,6 +12714,7 @@ def _render_home_screen(payload: dict[str, Any]) -> str:
|
|
|
11837
12714
|
"opportunities",
|
|
11838
12715
|
"connections",
|
|
11839
12716
|
"secrets",
|
|
12717
|
+
"project",
|
|
11840
12718
|
"governance",
|
|
11841
12719
|
"frontier",
|
|
11842
12720
|
"schedule",
|
|
@@ -12305,6 +13183,12 @@ def cmd_init(args: argparse.Namespace) -> int:
|
|
|
12305
13183
|
"action": str(row.get("action", "")).strip(),
|
|
12306
13184
|
}
|
|
12307
13185
|
|
|
13186
|
+
project_context, project_context_action = _write_project_context(repo_root, source="init")
|
|
13187
|
+
files["project_context"] = {
|
|
13188
|
+
"path": _path_for_state(_project_context_path(repo_root), repo_root),
|
|
13189
|
+
"action": project_context_action,
|
|
13190
|
+
}
|
|
13191
|
+
|
|
12308
13192
|
result = {
|
|
12309
13193
|
"ok": True,
|
|
12310
13194
|
"config_action": config_action,
|
|
@@ -12312,6 +13196,12 @@ def cmd_init(args: argparse.Namespace) -> int:
|
|
|
12312
13196
|
"runtime_root": str(repo_root / "orp"),
|
|
12313
13197
|
"files": files,
|
|
12314
13198
|
"agents": agents_sync,
|
|
13199
|
+
"project_context": {
|
|
13200
|
+
"path": _path_for_state(_project_context_path(repo_root), repo_root),
|
|
13201
|
+
"action": project_context_action,
|
|
13202
|
+
"authority_surface_count": project_context["directory_signals"]["authority_surface_count"],
|
|
13203
|
+
"research_default_timing": project_context["research_policy"]["default_timing"],
|
|
13204
|
+
},
|
|
12315
13205
|
"git": {
|
|
12316
13206
|
**git_snapshot,
|
|
12317
13207
|
"initialized_by_orp": bool(git_init_result["initialized"]),
|
|
@@ -12335,6 +13225,7 @@ def cmd_init(args: argparse.Namespace) -> int:
|
|
|
12335
13225
|
if git_init_result["initialized"]:
|
|
12336
13226
|
print(f"initialized git repository with default branch {default_branch}")
|
|
12337
13227
|
print("synced AGENTS.md and CLAUDE.md with ORP-managed blocks")
|
|
13228
|
+
print(f"project_context={_path_for_state(_project_context_path(repo_root), repo_root)}")
|
|
12338
13229
|
print(
|
|
12339
13230
|
"git_state="
|
|
12340
13231
|
+ ",".join(
|
|
@@ -12355,6 +13246,48 @@ def cmd_init(args: argparse.Namespace) -> int:
|
|
|
12355
13246
|
return 0
|
|
12356
13247
|
|
|
12357
13248
|
|
|
13249
|
+
def cmd_project_refresh(args: argparse.Namespace) -> int:
|
|
13250
|
+
repo_root = Path(args.repo_root).resolve()
|
|
13251
|
+
_ensure_dirs(repo_root)
|
|
13252
|
+
payload, action = _write_project_context(repo_root, source="project_refresh")
|
|
13253
|
+
result = {
|
|
13254
|
+
"ok": True,
|
|
13255
|
+
"action": action,
|
|
13256
|
+
"project_context_path": _path_for_state(_project_context_path(repo_root), repo_root),
|
|
13257
|
+
"project": payload.get("project", {}),
|
|
13258
|
+
"authority_surface_count": payload.get("directory_signals", {}).get("authority_surface_count", 0),
|
|
13259
|
+
"directory_signals": payload.get("directory_signals", {}),
|
|
13260
|
+
"research_policy": payload.get("research_policy", {}),
|
|
13261
|
+
"next_actions": payload.get("next_actions", []),
|
|
13262
|
+
}
|
|
13263
|
+
if args.json_output:
|
|
13264
|
+
_print_json(result)
|
|
13265
|
+
else:
|
|
13266
|
+
print(f"action={action}")
|
|
13267
|
+
print(f"project_context={result['project_context_path']}")
|
|
13268
|
+
print(f"authority_surface_count={result['authority_surface_count']}")
|
|
13269
|
+
print(f"research_default_timing={payload.get('research_policy', {}).get('default_timing', '')}")
|
|
13270
|
+
for next_action in result["next_actions"]:
|
|
13271
|
+
print(f"next={next_action}")
|
|
13272
|
+
return 0
|
|
13273
|
+
|
|
13274
|
+
|
|
13275
|
+
def cmd_project_show(args: argparse.Namespace) -> int:
|
|
13276
|
+
repo_root = Path(args.repo_root).resolve()
|
|
13277
|
+
payload = _load_project_context(repo_root)
|
|
13278
|
+
if args.json_output:
|
|
13279
|
+
_print_json(payload)
|
|
13280
|
+
else:
|
|
13281
|
+
print(f"project={payload.get('project', {}).get('name', '')}")
|
|
13282
|
+
print(f"root={payload.get('project', {}).get('root', '')}")
|
|
13283
|
+
print(f"refreshed_at_utc={payload.get('refreshed_at_utc', '')}")
|
|
13284
|
+
print(f"research_default_timing={payload.get('research_policy', {}).get('default_timing', '')}")
|
|
13285
|
+
for surface in payload.get("authority_surfaces", []):
|
|
13286
|
+
if isinstance(surface, dict) and surface.get("exists"):
|
|
13287
|
+
print(f"surface={surface.get('path', '')}:{surface.get('kind', '')}:{surface.get('role', '')}")
|
|
13288
|
+
return 0
|
|
13289
|
+
|
|
13290
|
+
|
|
12358
13291
|
def cmd_agents_root_show(args: argparse.Namespace) -> int:
|
|
12359
13292
|
payload = _agents_root_show_payload()
|
|
12360
13293
|
if args.json_output:
|
|
@@ -12406,6 +13339,7 @@ def _render_governance_status_text(payload: dict[str, Any]) -> str:
|
|
|
12406
13339
|
runtime = payload.get("runtime", {}) if isinstance(payload.get("runtime"), dict) else {}
|
|
12407
13340
|
validation = payload.get("validation", {}) if isinstance(payload.get("validation"), dict) else {}
|
|
12408
13341
|
readiness = payload.get("readiness", {}) if isinstance(payload.get("readiness"), dict) else {}
|
|
13342
|
+
project_context = payload.get("project_context", {}) if isinstance(payload.get("project_context"), dict) else {}
|
|
12409
13343
|
last_branch_action = (
|
|
12410
13344
|
runtime.get("last_branch_action")
|
|
12411
13345
|
if isinstance(runtime.get("last_branch_action"), dict)
|
|
@@ -12444,6 +13378,10 @@ def _render_governance_status_text(payload: dict[str, Any]) -> str:
|
|
|
12444
13378
|
f"paths.config={payload.get('config_path', '')}",
|
|
12445
13379
|
f"paths.handoff={payload.get('handoff_path', '')}",
|
|
12446
13380
|
f"paths.checkpoint_log={payload.get('checkpoint_log_path', '')}",
|
|
13381
|
+
f"paths.project_context={project_context.get('path', '')}",
|
|
13382
|
+
f"project_context.exists={'true' if project_context.get('exists') else 'false'}",
|
|
13383
|
+
f"project_context.refreshed_at={project_context.get('refreshed_at_utc', '') or '(never)'}",
|
|
13384
|
+
f"project_context.research_default_timing={project_context.get('research_default_timing', '') or '(unset)'}",
|
|
12447
13385
|
f"paths.git_runtime={payload.get('git_runtime_path', '')}",
|
|
12448
13386
|
f"readiness.local_ready={'true' if readiness.get('local_ready') else 'false'}",
|
|
12449
13387
|
f"readiness.remote_ready={'true' if readiness.get('remote_ready') else 'false'}",
|
|
@@ -13532,6 +14470,245 @@ def cmd_frontier_checklist(args: argparse.Namespace) -> int:
|
|
|
13532
14470
|
return 0
|
|
13533
14471
|
|
|
13534
14472
|
|
|
14473
|
+
def cmd_frontier_additional_list(args: argparse.Namespace) -> int:
|
|
14474
|
+
repo_root = Path(args.repo_root).resolve()
|
|
14475
|
+
stack = _frontier_load_stack(repo_root)
|
|
14476
|
+
payload = _frontier_load_additional(repo_root, stack)
|
|
14477
|
+
lists = payload.get("lists")
|
|
14478
|
+
rows = lists if isinstance(lists, list) else []
|
|
14479
|
+
pending_items = 0
|
|
14480
|
+
active_items = 0
|
|
14481
|
+
complete_items = 0
|
|
14482
|
+
for item_list in rows:
|
|
14483
|
+
if not isinstance(item_list, dict):
|
|
14484
|
+
continue
|
|
14485
|
+
items = item_list.get("items")
|
|
14486
|
+
if not isinstance(items, list):
|
|
14487
|
+
continue
|
|
14488
|
+
for item in items:
|
|
14489
|
+
if not isinstance(item, dict):
|
|
14490
|
+
continue
|
|
14491
|
+
status = str(item.get("status", "")).strip() or "pending"
|
|
14492
|
+
if status == "active":
|
|
14493
|
+
active_items += 1
|
|
14494
|
+
elif status == "complete":
|
|
14495
|
+
complete_items += 1
|
|
14496
|
+
else:
|
|
14497
|
+
pending_items += 1
|
|
14498
|
+
result = {
|
|
14499
|
+
"ok": True,
|
|
14500
|
+
**payload,
|
|
14501
|
+
"summary": {
|
|
14502
|
+
"lists": len(rows),
|
|
14503
|
+
"pending_items": pending_items,
|
|
14504
|
+
"active_items": active_items,
|
|
14505
|
+
"complete_items": complete_items,
|
|
14506
|
+
},
|
|
14507
|
+
"paths": {
|
|
14508
|
+
"additional_json": _path_for_state(_frontier_paths(repo_root)["additional_json"], repo_root),
|
|
14509
|
+
"additional_md": _path_for_state(_frontier_paths(repo_root)["additional_md"], repo_root),
|
|
14510
|
+
},
|
|
14511
|
+
}
|
|
14512
|
+
if args.json_output:
|
|
14513
|
+
_print_json(result)
|
|
14514
|
+
else:
|
|
14515
|
+
print(f"lists={len(rows)}")
|
|
14516
|
+
print(f"active_list_id={payload.get('active_list_id', '') or '(none)'}")
|
|
14517
|
+
print(f"active_item_id={payload.get('active_item_id', '') or '(none)'}")
|
|
14518
|
+
print(f"pending_items={pending_items}")
|
|
14519
|
+
print(f"active_items={active_items}")
|
|
14520
|
+
print(f"complete_items={complete_items}")
|
|
14521
|
+
return 0
|
|
14522
|
+
|
|
14523
|
+
|
|
14524
|
+
def cmd_frontier_additional_add_list(args: argparse.Namespace) -> int:
|
|
14525
|
+
repo_root = Path(args.repo_root).resolve()
|
|
14526
|
+
stack = _frontier_load_stack(repo_root)
|
|
14527
|
+
payload = _frontier_load_additional(repo_root, stack)
|
|
14528
|
+
list_id = str(args.id).strip()
|
|
14529
|
+
if _frontier_find_additional_list(payload, list_id) is not None:
|
|
14530
|
+
raise RuntimeError(f"frontier additional list `{list_id}` already exists.")
|
|
14531
|
+
item_list = {
|
|
14532
|
+
"id": list_id,
|
|
14533
|
+
"label": str(args.label).strip(),
|
|
14534
|
+
"status": str(args.status or "pending").strip() or "pending",
|
|
14535
|
+
"items": [],
|
|
14536
|
+
}
|
|
14537
|
+
lists = payload.get("lists")
|
|
14538
|
+
if not isinstance(lists, list):
|
|
14539
|
+
lists = []
|
|
14540
|
+
payload["lists"] = lists
|
|
14541
|
+
lists.append(item_list)
|
|
14542
|
+
written = _frontier_write_additional_views(repo_root, payload)
|
|
14543
|
+
result = {"ok": True, "list": item_list, "paths": written}
|
|
14544
|
+
if args.json_output:
|
|
14545
|
+
_print_json(result)
|
|
14546
|
+
else:
|
|
14547
|
+
print(f"list_id={list_id}")
|
|
14548
|
+
print(f"label={item_list['label']}")
|
|
14549
|
+
return 0
|
|
14550
|
+
|
|
14551
|
+
|
|
14552
|
+
def cmd_frontier_additional_add_item(args: argparse.Namespace) -> int:
|
|
14553
|
+
repo_root = Path(args.repo_root).resolve()
|
|
14554
|
+
stack = _frontier_load_stack(repo_root)
|
|
14555
|
+
payload = _frontier_load_additional(repo_root, stack)
|
|
14556
|
+
list_id = str(args.list).strip()
|
|
14557
|
+
item_list = _frontier_find_additional_list(payload, list_id)
|
|
14558
|
+
if item_list is None:
|
|
14559
|
+
raise RuntimeError(f"frontier additional list `{list_id}` was not found.")
|
|
14560
|
+
item_id = str(args.id).strip()
|
|
14561
|
+
if _frontier_find_additional_item(item_list, item_id) is not None:
|
|
14562
|
+
raise RuntimeError(f"frontier additional item `{item_id}` already exists in list `{list_id}`.")
|
|
14563
|
+
item = {
|
|
14564
|
+
"id": item_id,
|
|
14565
|
+
"label": str(args.label).strip(),
|
|
14566
|
+
"status": str(args.status or "pending").strip() or "pending",
|
|
14567
|
+
"goal": str(args.goal or "").strip(),
|
|
14568
|
+
"depends_on": _coerce_string_list(getattr(args, "depends_on", [])),
|
|
14569
|
+
"requirements": _coerce_string_list(getattr(args, "requirement", [])),
|
|
14570
|
+
"success_criteria": _coerce_string_list(getattr(args, "success_criterion", [])),
|
|
14571
|
+
"plans": _coerce_string_list(getattr(args, "plan", [])),
|
|
14572
|
+
}
|
|
14573
|
+
items = item_list.get("items")
|
|
14574
|
+
if not isinstance(items, list):
|
|
14575
|
+
items = []
|
|
14576
|
+
item_list["items"] = items
|
|
14577
|
+
items.append(item)
|
|
14578
|
+
written = _frontier_write_additional_views(repo_root, payload)
|
|
14579
|
+
result = {"ok": True, "list_id": list_id, "item": item, "paths": written}
|
|
14580
|
+
if args.json_output:
|
|
14581
|
+
_print_json(result)
|
|
14582
|
+
else:
|
|
14583
|
+
print(f"list_id={list_id}")
|
|
14584
|
+
print(f"item_id={item_id}")
|
|
14585
|
+
print(f"label={item['label']}")
|
|
14586
|
+
return 0
|
|
14587
|
+
|
|
14588
|
+
|
|
14589
|
+
def cmd_frontier_additional_activate_next(args: argparse.Namespace) -> int:
|
|
14590
|
+
repo_root = Path(args.repo_root).resolve()
|
|
14591
|
+
stack = _frontier_load_stack(repo_root)
|
|
14592
|
+
payload = _frontier_load_additional(repo_root, stack)
|
|
14593
|
+
active_list, active_item = _frontier_active_additional_item(payload)
|
|
14594
|
+
already_active = active_list is not None and active_item is not None and str(active_item.get("status", "")).strip() == "active"
|
|
14595
|
+
if not already_active:
|
|
14596
|
+
active_list, active_item = _frontier_next_pending_additional_item(payload)
|
|
14597
|
+
if active_list is not None and active_item is not None:
|
|
14598
|
+
active_list["status"] = "active"
|
|
14599
|
+
active_item["status"] = "active"
|
|
14600
|
+
payload["active_list_id"] = str(active_list.get("id", "")).strip()
|
|
14601
|
+
payload["active_item_id"] = str(active_item.get("id", "")).strip()
|
|
14602
|
+
else:
|
|
14603
|
+
payload["active_list_id"] = ""
|
|
14604
|
+
payload["active_item_id"] = ""
|
|
14605
|
+
written = _frontier_write_additional_views(repo_root, payload)
|
|
14606
|
+
activated = active_list is not None and active_item is not None
|
|
14607
|
+
result = {
|
|
14608
|
+
"ok": True,
|
|
14609
|
+
"activated": activated,
|
|
14610
|
+
"already_active": already_active,
|
|
14611
|
+
"active_list_id": payload.get("active_list_id", ""),
|
|
14612
|
+
"active_item_id": payload.get("active_item_id", ""),
|
|
14613
|
+
"list": active_list if activated else None,
|
|
14614
|
+
"item": active_item if activated else None,
|
|
14615
|
+
"next_action": _frontier_additional_item_summary(active_list, active_item) if activated else "",
|
|
14616
|
+
"paths": written,
|
|
14617
|
+
}
|
|
14618
|
+
if args.json_output:
|
|
14619
|
+
_print_json(result)
|
|
14620
|
+
else:
|
|
14621
|
+
print(f"activated={'true' if activated else 'false'}")
|
|
14622
|
+
if activated:
|
|
14623
|
+
print(f"active_list_id={result['active_list_id']}")
|
|
14624
|
+
print(f"active_item_id={result['active_item_id']}")
|
|
14625
|
+
print(f"next_action={result['next_action']}")
|
|
14626
|
+
return 0
|
|
14627
|
+
|
|
14628
|
+
|
|
14629
|
+
def cmd_frontier_additional_complete_active(args: argparse.Namespace) -> int:
|
|
14630
|
+
repo_root = Path(args.repo_root).resolve()
|
|
14631
|
+
stack = _frontier_load_stack(repo_root)
|
|
14632
|
+
payload = _frontier_load_additional(repo_root, stack)
|
|
14633
|
+
active_list, active_item = _frontier_active_additional_item(payload)
|
|
14634
|
+
completed = active_list is not None and active_item is not None
|
|
14635
|
+
list_completed = False
|
|
14636
|
+
if completed:
|
|
14637
|
+
active_item["status"] = str(args.status or "complete").strip() or "complete"
|
|
14638
|
+
items = active_list.get("items")
|
|
14639
|
+
if isinstance(items, list) and all(
|
|
14640
|
+
str(item.get("status", "")).strip() in {"complete", "skipped"} for item in items if isinstance(item, dict)
|
|
14641
|
+
):
|
|
14642
|
+
active_list["status"] = "complete"
|
|
14643
|
+
list_completed = True
|
|
14644
|
+
payload["active_list_id"] = ""
|
|
14645
|
+
payload["active_item_id"] = ""
|
|
14646
|
+
written = _frontier_write_additional_views(repo_root, payload)
|
|
14647
|
+
result = {
|
|
14648
|
+
"ok": True,
|
|
14649
|
+
"completed": completed,
|
|
14650
|
+
"list_completed": list_completed,
|
|
14651
|
+
"list": active_list if completed else None,
|
|
14652
|
+
"item": active_item if completed else None,
|
|
14653
|
+
"paths": written,
|
|
14654
|
+
}
|
|
14655
|
+
if args.json_output:
|
|
14656
|
+
_print_json(result)
|
|
14657
|
+
else:
|
|
14658
|
+
print(f"completed={'true' if completed else 'false'}")
|
|
14659
|
+
print(f"list_completed={'true' if list_completed else 'false'}")
|
|
14660
|
+
return 0
|
|
14661
|
+
|
|
14662
|
+
|
|
14663
|
+
def _print_frontier_diagnostic_payload(payload: dict[str, Any]) -> None:
|
|
14664
|
+
print(f"ok={'true' if payload.get('ok') else 'false'}")
|
|
14665
|
+
print(f"next_action={payload.get('next_action', '') or '(none)'}")
|
|
14666
|
+
suggested = str(payload.get("suggested_next_command", "")).strip()
|
|
14667
|
+
if suggested:
|
|
14668
|
+
print(f"suggested_next_command={suggested}")
|
|
14669
|
+
summary = payload.get("summary")
|
|
14670
|
+
if isinstance(summary, dict):
|
|
14671
|
+
print(f"active_primary={summary.get('active_primary_kind', '') or '(none)'}:{summary.get('active_primary_id', '') or '(none)'}")
|
|
14672
|
+
additional = summary.get("additional")
|
|
14673
|
+
if isinstance(additional, dict):
|
|
14674
|
+
print(f"pending_additional_items={additional.get('pending_items', 0)}")
|
|
14675
|
+
print(f"active_additional={additional.get('active_list_id', '') or '(none)'}/{additional.get('active_item_id', '') or '(none)'}")
|
|
14676
|
+
for issue in payload.get("issues", []):
|
|
14677
|
+
if isinstance(issue, dict):
|
|
14678
|
+
print(f"issue={issue.get('severity','')}:{issue.get('code','')}:{issue.get('message','')}")
|
|
14679
|
+
|
|
14680
|
+
|
|
14681
|
+
def cmd_frontier_continuation_status(args: argparse.Namespace) -> int:
|
|
14682
|
+
repo_root = Path(args.repo_root).resolve()
|
|
14683
|
+
strict = bool(getattr(args, "strict", False))
|
|
14684
|
+
payload = _frontier_continuation_payload(repo_root, strict=strict)
|
|
14685
|
+
if args.json_output:
|
|
14686
|
+
_print_json(payload)
|
|
14687
|
+
else:
|
|
14688
|
+
_print_frontier_diagnostic_payload(payload)
|
|
14689
|
+
return 0 if payload["ok"] else 1
|
|
14690
|
+
|
|
14691
|
+
|
|
14692
|
+
def cmd_frontier_preflight_delegate(args: argparse.Namespace) -> int:
|
|
14693
|
+
repo_root = Path(args.repo_root).resolve()
|
|
14694
|
+
payload = _frontier_doctor_payload(repo_root)
|
|
14695
|
+
payload["strict"] = True
|
|
14696
|
+
payload["ok"] = _frontier_diagnostic_ok(payload.get("issues", []), strict=True)
|
|
14697
|
+
continuation = payload.get("continuation") if isinstance(payload.get("continuation"), dict) else {}
|
|
14698
|
+
payload["next_action"] = continuation.get("next_action", "")
|
|
14699
|
+
payload["suggested_next_command"] = continuation.get("suggested_next_command", "")
|
|
14700
|
+
payload["summary"] = continuation.get("summary", {})
|
|
14701
|
+
payload["preflight"] = {
|
|
14702
|
+
"ready": bool(payload["ok"]),
|
|
14703
|
+
"purpose": "Block delegation when the frontier cannot prove a single safe continuation or terminal state.",
|
|
14704
|
+
}
|
|
14705
|
+
if args.json_output:
|
|
14706
|
+
_print_json(payload)
|
|
14707
|
+
else:
|
|
14708
|
+
_print_frontier_diagnostic_payload(payload)
|
|
14709
|
+
return 0 if payload["ok"] else 1
|
|
14710
|
+
|
|
14711
|
+
|
|
13535
14712
|
def cmd_frontier_add_version(args: argparse.Namespace) -> int:
|
|
13536
14713
|
repo_root = Path(args.repo_root).resolve()
|
|
13537
14714
|
stack = _frontier_load_stack(repo_root)
|
|
@@ -13739,6 +14916,9 @@ def cmd_frontier_render(args: argparse.Namespace) -> int:
|
|
|
13739
14916
|
def cmd_frontier_doctor(args: argparse.Namespace) -> int:
|
|
13740
14917
|
repo_root = Path(args.repo_root).resolve()
|
|
13741
14918
|
payload = _frontier_doctor_payload(repo_root)
|
|
14919
|
+
strict = bool(getattr(args, "strict", False))
|
|
14920
|
+
payload["strict"] = strict
|
|
14921
|
+
payload["ok"] = _frontier_diagnostic_ok(payload.get("issues", []), strict=strict)
|
|
13742
14922
|
fixes_applied: list[str] = []
|
|
13743
14923
|
if args.fix and payload["ok"]:
|
|
13744
14924
|
stack = _frontier_load_stack(repo_root)
|
|
@@ -15120,79 +16300,1491 @@ def cmd_discover_github_scan(args: argparse.Namespace) -> int:
|
|
|
15120
16300
|
_print_json(payload)
|
|
15121
16301
|
return 0
|
|
15122
16302
|
|
|
15123
|
-
print(f"scan_id={payload['scan_id']}")
|
|
15124
|
-
print(f"profile={payload['profile']['path']}")
|
|
15125
|
-
print(f"owner={payload['owner']['login']}")
|
|
15126
|
-
print(f"owner_type={payload['owner']['type']}")
|
|
15127
|
-
print(f"scan_json={payload['artifacts']['scan_json']}")
|
|
15128
|
-
print(f"summary_md={payload['artifacts']['summary_md']}")
|
|
15129
|
-
if payload["repos"]:
|
|
15130
|
-
top_repo = payload["repos"][0]["full_name"]
|
|
15131
|
-
print(f"top_repo={top_repo}")
|
|
15132
|
-
print(f"next=orp collaborate init --github-repo {top_repo}")
|
|
15133
|
-
if payload["issues"]:
|
|
15134
|
-
top_issue = payload["issues"][0]
|
|
15135
|
-
print(f"top_issue={top_issue['repo']}#{top_issue['number']}")
|
|
15136
|
-
return 0
|
|
16303
|
+
print(f"scan_id={payload['scan_id']}")
|
|
16304
|
+
print(f"profile={payload['profile']['path']}")
|
|
16305
|
+
print(f"owner={payload['owner']['login']}")
|
|
16306
|
+
print(f"owner_type={payload['owner']['type']}")
|
|
16307
|
+
print(f"scan_json={payload['artifacts']['scan_json']}")
|
|
16308
|
+
print(f"summary_md={payload['artifacts']['summary_md']}")
|
|
16309
|
+
if payload["repos"]:
|
|
16310
|
+
top_repo = payload["repos"][0]["full_name"]
|
|
16311
|
+
print(f"top_repo={top_repo}")
|
|
16312
|
+
print(f"next=orp collaborate init --github-repo {top_repo}")
|
|
16313
|
+
if payload["issues"]:
|
|
16314
|
+
top_issue = payload["issues"][0]
|
|
16315
|
+
print(f"top_issue={top_issue['repo']}#{top_issue['number']}")
|
|
16316
|
+
return 0
|
|
16317
|
+
|
|
16318
|
+
|
|
16319
|
+
def cmd_exchange_repo_synthesize(args: argparse.Namespace) -> int:
|
|
16320
|
+
repo_root = Path(args.repo_root).resolve()
|
|
16321
|
+
exchange_id = str(getattr(args, "exchange_id", "") or "").strip() or _exchange_id()
|
|
16322
|
+
source = _exchange_source_payload(repo_root, args)
|
|
16323
|
+
source_root = Path(str(source.get("local_path", "")).strip()).resolve()
|
|
16324
|
+
inventory = _exchange_inventory(source_root)
|
|
16325
|
+
relation = _exchange_relation(repo_root, source_root, inventory)
|
|
16326
|
+
suggested_focus = _exchange_suggested_focus(inventory, relation)
|
|
16327
|
+
paths = _exchange_paths(repo_root, exchange_id)
|
|
16328
|
+
|
|
16329
|
+
payload = {
|
|
16330
|
+
"schema_version": EXCHANGE_REPORT_SCHEMA_VERSION,
|
|
16331
|
+
"kind": "exchange_report",
|
|
16332
|
+
"exchange_id": exchange_id,
|
|
16333
|
+
"generated_at_utc": _now_utc(),
|
|
16334
|
+
"current_project_root": str(repo_root),
|
|
16335
|
+
"source": source,
|
|
16336
|
+
"inventory": inventory,
|
|
16337
|
+
"relation": relation,
|
|
16338
|
+
"suggested_focus": suggested_focus,
|
|
16339
|
+
"artifacts": {
|
|
16340
|
+
"exchange_json": _path_for_state(paths["exchange_json"], repo_root),
|
|
16341
|
+
"summary_md": _path_for_state(paths["summary_md"], repo_root),
|
|
16342
|
+
"transfer_map_md": _path_for_state(paths["transfer_map_md"], repo_root),
|
|
16343
|
+
},
|
|
16344
|
+
"notes": [
|
|
16345
|
+
"Knowledge exchange is deeper than discovery scan output.",
|
|
16346
|
+
"Exchange artifacts are structured synthesis aids, not evidence by themselves.",
|
|
16347
|
+
"Local non-git directories can be bootstrapped into git when `--allow-git-init` is explicitly provided.",
|
|
16348
|
+
],
|
|
16349
|
+
}
|
|
16350
|
+
_write_json(paths["exchange_json"], payload)
|
|
16351
|
+
_write_text(paths["summary_md"], _exchange_summary_markdown(payload))
|
|
16352
|
+
_write_text(paths["transfer_map_md"], _exchange_transfer_map_markdown(payload))
|
|
16353
|
+
|
|
16354
|
+
result = {
|
|
16355
|
+
"ok": True,
|
|
16356
|
+
"exchange_id": exchange_id,
|
|
16357
|
+
"source": source,
|
|
16358
|
+
"inventory": inventory,
|
|
16359
|
+
"relation": relation,
|
|
16360
|
+
"suggested_focus": suggested_focus,
|
|
16361
|
+
"artifacts": payload["artifacts"],
|
|
16362
|
+
"schema_path": "spec/v1/exchange-report.schema.json",
|
|
16363
|
+
}
|
|
16364
|
+
if args.json_output:
|
|
16365
|
+
_print_json(result)
|
|
16366
|
+
return 0
|
|
16367
|
+
|
|
16368
|
+
print(f"exchange_id={exchange_id}")
|
|
16369
|
+
print(f"source.mode={source.get('mode', '')}")
|
|
16370
|
+
print(f"source.local_path={source.get('local_path', '')}")
|
|
16371
|
+
print(f"source.git_present={str(bool(source.get('git_present'))).lower()}")
|
|
16372
|
+
print(f"source.git_initialized_by_orp={str(bool(source.get('git_initialized_by_orp'))).lower()}")
|
|
16373
|
+
print(f"artifacts.exchange_json={payload['artifacts']['exchange_json']}")
|
|
16374
|
+
print(f"artifacts.summary_md={payload['artifacts']['summary_md']}")
|
|
16375
|
+
print(f"artifacts.transfer_map_md={payload['artifacts']['transfer_map_md']}")
|
|
16376
|
+
return 0
|
|
16377
|
+
|
|
16378
|
+
|
|
16379
|
+
def _research_id() -> str:
|
|
16380
|
+
return "research-" + dt.datetime.now(dt.timezone.utc).strftime("%Y%m%d-%H%M%S-%f")
|
|
16381
|
+
|
|
16382
|
+
|
|
16383
|
+
def _research_root(repo_root: Path) -> Path:
|
|
16384
|
+
return repo_root / "orp" / "research"
|
|
16385
|
+
|
|
16386
|
+
|
|
16387
|
+
def _research_paths(repo_root: Path, run_id: str) -> dict[str, Path]:
|
|
16388
|
+
root = _research_root(repo_root) / run_id
|
|
16389
|
+
return {
|
|
16390
|
+
"root": root,
|
|
16391
|
+
"request_json": root / "REQUEST.json",
|
|
16392
|
+
"breakdown_json": root / "BREAKDOWN.json",
|
|
16393
|
+
"profile_json": root / "PROFILE.json",
|
|
16394
|
+
"answer_json": root / "ANSWER.json",
|
|
16395
|
+
"summary_md": root / "RUN_SUMMARY.md",
|
|
16396
|
+
"lanes_root": root / "lanes",
|
|
16397
|
+
"raw_root": root / "raw",
|
|
16398
|
+
}
|
|
16399
|
+
|
|
16400
|
+
|
|
16401
|
+
def _research_default_profile(profile_id: str = "openai-council") -> dict[str, Any]:
|
|
16402
|
+
profile_id = profile_id or "openai-council"
|
|
16403
|
+
return {
|
|
16404
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
16405
|
+
"profile_id": profile_id,
|
|
16406
|
+
"label": "OpenAI research loop",
|
|
16407
|
+
"description": (
|
|
16408
|
+
"ORP-owned decomposition and synthesis across three explicit OpenAI API "
|
|
16409
|
+
"call moments: high-reasoning thinking, web synthesis, and Pro/Deep Research."
|
|
16410
|
+
),
|
|
16411
|
+
"execution_policy": {
|
|
16412
|
+
"live_requires_execute": True,
|
|
16413
|
+
"process_only": True,
|
|
16414
|
+
"secrets_not_persisted": True,
|
|
16415
|
+
"default_timeout_sec": 120,
|
|
16416
|
+
},
|
|
16417
|
+
"call_moments": [
|
|
16418
|
+
{
|
|
16419
|
+
"moment_id": "plan",
|
|
16420
|
+
"label": "Local decomposition plan",
|
|
16421
|
+
"calls_api": False,
|
|
16422
|
+
"description": "Create ORP artifacts, prompts, and lane plan without resolving any API key.",
|
|
16423
|
+
},
|
|
16424
|
+
{
|
|
16425
|
+
"moment_id": "thinking_reasoning_high",
|
|
16426
|
+
"label": "Thinking / reasoning high",
|
|
16427
|
+
"calls_api": True,
|
|
16428
|
+
"secret_alias": "openai-primary",
|
|
16429
|
+
"env_var": "OPENAI_API_KEY",
|
|
16430
|
+
"description": "Call GPT-5.4 with high reasoning for the deliberate thinking pass.",
|
|
16431
|
+
},
|
|
16432
|
+
{
|
|
16433
|
+
"moment_id": "web_synthesis",
|
|
16434
|
+
"label": "Web synthesis",
|
|
16435
|
+
"calls_api": True,
|
|
16436
|
+
"secret_alias": "openai-primary",
|
|
16437
|
+
"env_var": "OPENAI_API_KEY",
|
|
16438
|
+
"description": "Call GPT-5.4 with web search for current public evidence and citations.",
|
|
16439
|
+
},
|
|
16440
|
+
{
|
|
16441
|
+
"moment_id": "pro_deep_research",
|
|
16442
|
+
"label": "Pro / Deep Research",
|
|
16443
|
+
"calls_api": True,
|
|
16444
|
+
"secret_alias": "openai-primary",
|
|
16445
|
+
"env_var": "OPENAI_API_KEY",
|
|
16446
|
+
"description": "Call the OpenAI Deep Research model for a longer agentic research report.",
|
|
16447
|
+
},
|
|
16448
|
+
],
|
|
16449
|
+
"lanes": [
|
|
16450
|
+
{
|
|
16451
|
+
"lane_id": "openai_reasoning_high",
|
|
16452
|
+
"call_moment": "thinking_reasoning_high",
|
|
16453
|
+
"label": "OpenAI reasoning high",
|
|
16454
|
+
"provider": "openai",
|
|
16455
|
+
"model": "gpt-5.4",
|
|
16456
|
+
"adapter": "openai_responses",
|
|
16457
|
+
"role": "Deliberate high-reasoning pass from the provided context. Think hard, critique assumptions, and produce a decision-oriented answer.",
|
|
16458
|
+
"env_var": "OPENAI_API_KEY",
|
|
16459
|
+
"secret_alias": "openai-primary",
|
|
16460
|
+
"reasoning_effort": "high",
|
|
16461
|
+
"text_verbosity": "medium",
|
|
16462
|
+
"max_output_tokens": 4200,
|
|
16463
|
+
},
|
|
16464
|
+
{
|
|
16465
|
+
"lane_id": "openai_web_synthesis",
|
|
16466
|
+
"call_moment": "web_synthesis",
|
|
16467
|
+
"label": "OpenAI web synthesis",
|
|
16468
|
+
"provider": "openai",
|
|
16469
|
+
"model": "gpt-5.4",
|
|
16470
|
+
"adapter": "openai_responses",
|
|
16471
|
+
"role": "Recency-aware synthesis using OpenAI Responses web search with citations.",
|
|
16472
|
+
"env_var": "OPENAI_API_KEY",
|
|
16473
|
+
"secret_alias": "openai-primary",
|
|
16474
|
+
"reasoning_effort": "high",
|
|
16475
|
+
"text_verbosity": "medium",
|
|
16476
|
+
"web_search": True,
|
|
16477
|
+
"web_search_tool": "web_search",
|
|
16478
|
+
"search_context_size": "high",
|
|
16479
|
+
"external_web_access": True,
|
|
16480
|
+
"max_tool_calls": 8,
|
|
16481
|
+
"max_output_tokens": 3600,
|
|
16482
|
+
},
|
|
16483
|
+
{
|
|
16484
|
+
"lane_id": "openai_deep_research",
|
|
16485
|
+
"call_moment": "pro_deep_research",
|
|
16486
|
+
"label": "OpenAI Pro / Deep Research",
|
|
16487
|
+
"provider": "openai",
|
|
16488
|
+
"model": "o3-deep-research-2025-06-26",
|
|
16489
|
+
"adapter": "openai_responses",
|
|
16490
|
+
"role": "Pro Research style long-form investigation. Produce a structured, citation-rich report grounded in public sources.",
|
|
16491
|
+
"env_var": "OPENAI_API_KEY",
|
|
16492
|
+
"secret_alias": "openai-primary",
|
|
16493
|
+
"reasoning_summary": "auto",
|
|
16494
|
+
"web_search": True,
|
|
16495
|
+
"web_search_tool": "web_search_preview",
|
|
16496
|
+
"background": True,
|
|
16497
|
+
"max_tool_calls": 40,
|
|
16498
|
+
"max_output_tokens": 12000,
|
|
16499
|
+
},
|
|
16500
|
+
],
|
|
16501
|
+
"synthesis": {
|
|
16502
|
+
"style": "answer_with_lane_evidence",
|
|
16503
|
+
"require_disagreements": True,
|
|
16504
|
+
"require_open_questions": True,
|
|
16505
|
+
},
|
|
16506
|
+
}
|
|
16507
|
+
|
|
16508
|
+
|
|
16509
|
+
def _research_normalize_profile(raw: dict[str, Any], *, fallback_profile_id: str) -> dict[str, Any]:
|
|
16510
|
+
base = _research_default_profile(fallback_profile_id)
|
|
16511
|
+
profile = {**base, **raw}
|
|
16512
|
+
profile["schema_version"] = str(profile.get("schema_version", RESEARCH_RUN_SCHEMA_VERSION)).strip() or RESEARCH_RUN_SCHEMA_VERSION
|
|
16513
|
+
profile["profile_id"] = str(profile.get("profile_id", fallback_profile_id)).strip() or fallback_profile_id
|
|
16514
|
+
lanes = profile.get("lanes")
|
|
16515
|
+
if not isinstance(lanes, list) or not lanes:
|
|
16516
|
+
lanes = base["lanes"]
|
|
16517
|
+
normalized_lanes: list[dict[str, Any]] = []
|
|
16518
|
+
for index, lane_raw in enumerate(lanes):
|
|
16519
|
+
if not isinstance(lane_raw, dict):
|
|
16520
|
+
continue
|
|
16521
|
+
lane = dict(lane_raw)
|
|
16522
|
+
lane_id = str(lane.get("lane_id", lane.get("id", ""))).strip() or f"lane_{index + 1}"
|
|
16523
|
+
lane["lane_id"] = _slug_token(lane_id, fallback=f"lane-{index + 1}").replace("-", "_")
|
|
16524
|
+
lane["label"] = str(lane.get("label", lane["lane_id"])).strip() or lane["lane_id"]
|
|
16525
|
+
lane["provider"] = str(lane.get("provider", "")).strip() or "custom"
|
|
16526
|
+
lane["model"] = str(lane.get("model", "")).strip() or lane["provider"]
|
|
16527
|
+
lane["adapter"] = str(lane.get("adapter", "planned")).strip() or "planned"
|
|
16528
|
+
lane["role"] = str(lane.get("role", "")).strip()
|
|
16529
|
+
lane["env_var"] = str(lane.get("env_var", "")).strip()
|
|
16530
|
+
lane["secret_alias"] = str(lane.get("secret_alias", "")).strip()
|
|
16531
|
+
lane["call_moment"] = str(lane.get("call_moment", lane["lane_id"])).strip() or lane["lane_id"]
|
|
16532
|
+
normalized_lanes.append(lane)
|
|
16533
|
+
profile["lanes"] = normalized_lanes
|
|
16534
|
+
return profile
|
|
16535
|
+
|
|
16536
|
+
|
|
16537
|
+
def _research_load_profile(args: argparse.Namespace, repo_root: Path) -> dict[str, Any]:
|
|
16538
|
+
profile_id = str(getattr(args, "profile", "") or "openai-council").strip() or "openai-council"
|
|
16539
|
+
profile_file = str(getattr(args, "profile_file", "") or "").strip()
|
|
16540
|
+
if not profile_file:
|
|
16541
|
+
return _research_normalize_profile({}, fallback_profile_id=profile_id)
|
|
16542
|
+
path = _resolve_cli_path(profile_file, repo_root)
|
|
16543
|
+
payload = _read_json_if_exists(path)
|
|
16544
|
+
if not payload:
|
|
16545
|
+
raise RuntimeError(f"missing or invalid research profile: {_path_for_state(path, repo_root)}")
|
|
16546
|
+
return _research_normalize_profile(payload, fallback_profile_id=profile_id)
|
|
16547
|
+
|
|
16548
|
+
|
|
16549
|
+
def _research_breakdown(question: str) -> dict[str, Any]:
|
|
16550
|
+
ladder = _agent_mode_breakdown(_agent_mode("granular-breakdown"), topic=question)
|
|
16551
|
+
return {
|
|
16552
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
16553
|
+
"question": question,
|
|
16554
|
+
"mode": ladder.get("mode", {}),
|
|
16555
|
+
"sequence": ladder.get("sequence", []),
|
|
16556
|
+
"output_contract": ladder.get("output_contract", []),
|
|
16557
|
+
"prompt_enrichment": {
|
|
16558
|
+
"goal": "Answer the question with explicit assumptions, evidence boundaries, disagreements, and next verification.",
|
|
16559
|
+
"public_web_needed": True,
|
|
16560
|
+
"private_context_policy": "Do not assume private data unless it is included in the question or attached artifacts.",
|
|
16561
|
+
},
|
|
16562
|
+
"lanes": [
|
|
16563
|
+
{
|
|
16564
|
+
"lane": "openai_reasoning_high",
|
|
16565
|
+
"task": "Run a high-reasoning synthesis pass over tradeoffs and likely answer shape.",
|
|
16566
|
+
},
|
|
16567
|
+
{
|
|
16568
|
+
"lane": "openai_web_synthesis",
|
|
16569
|
+
"task": "Use web search when current public evidence matters and return citation-backed synthesis.",
|
|
16570
|
+
},
|
|
16571
|
+
{
|
|
16572
|
+
"lane": "openai_deep_research",
|
|
16573
|
+
"task": "Run a Pro/Deep Research investigation for a longer citation-rich report.",
|
|
16574
|
+
},
|
|
16575
|
+
],
|
|
16576
|
+
}
|
|
16577
|
+
|
|
16578
|
+
|
|
16579
|
+
def _research_lane_prompt(question: str, lane: dict[str, Any], breakdown: dict[str, Any]) -> str:
|
|
16580
|
+
sequence_titles = [
|
|
16581
|
+
str(row.get("title", "")).strip()
|
|
16582
|
+
for row in breakdown.get("sequence", [])
|
|
16583
|
+
if isinstance(row, dict) and str(row.get("title", "")).strip()
|
|
16584
|
+
]
|
|
16585
|
+
role = str(lane.get("role", "")).strip() or "Independent research lane."
|
|
16586
|
+
return "\n".join(
|
|
16587
|
+
[
|
|
16588
|
+
"You are one lane in an ORP OpenAI research loop.",
|
|
16589
|
+
f"Lane: {lane.get('lane_id', '')}",
|
|
16590
|
+
f"Provider/model: {lane.get('provider', '')}/{lane.get('model', '')}",
|
|
16591
|
+
f"Lane role: {role}",
|
|
16592
|
+
"",
|
|
16593
|
+
"Question:",
|
|
16594
|
+
question,
|
|
16595
|
+
"",
|
|
16596
|
+
"Use this decomposition ladder as the working frame:",
|
|
16597
|
+
", ".join(sequence_titles) or "broad frame, boundary, lanes, subclaims, obligations, synthesis",
|
|
16598
|
+
"",
|
|
16599
|
+
"Return a concise but substantial lane report with:",
|
|
16600
|
+
"- answer or position",
|
|
16601
|
+
"- key evidence or reasoning",
|
|
16602
|
+
"- assumptions and uncertainty",
|
|
16603
|
+
"- disagreements or failure modes",
|
|
16604
|
+
"- sources or citations when the lane has source access",
|
|
16605
|
+
"",
|
|
16606
|
+
"Do not modify files. Do not perform actions outside answering this lane prompt.",
|
|
16607
|
+
]
|
|
16608
|
+
)
|
|
16609
|
+
|
|
16610
|
+
|
|
16611
|
+
def _research_parse_lane_fixtures(raw_fixtures: Sequence[str], repo_root: Path) -> dict[str, Path]:
|
|
16612
|
+
fixtures: dict[str, Path] = {}
|
|
16613
|
+
for raw in raw_fixtures:
|
|
16614
|
+
text = str(raw or "").strip()
|
|
16615
|
+
if not text:
|
|
16616
|
+
continue
|
|
16617
|
+
if "=" not in text:
|
|
16618
|
+
raise RuntimeError("research lane fixtures must use lane_id=path")
|
|
16619
|
+
lane_id_raw, path_raw = text.split("=", 1)
|
|
16620
|
+
lane_id = _slug_token(lane_id_raw, fallback="lane").replace("-", "_")
|
|
16621
|
+
fixtures[lane_id] = _resolve_cli_path(path_raw.strip(), repo_root)
|
|
16622
|
+
return fixtures
|
|
16623
|
+
|
|
16624
|
+
|
|
16625
|
+
def _research_text_from_payload(payload: Any) -> str:
|
|
16626
|
+
if isinstance(payload, str):
|
|
16627
|
+
return payload.strip()
|
|
16628
|
+
if isinstance(payload, dict):
|
|
16629
|
+
for key in ("text", "answer", "summary", "content", "report"):
|
|
16630
|
+
value = payload.get(key)
|
|
16631
|
+
if isinstance(value, str) and value.strip():
|
|
16632
|
+
return value.strip()
|
|
16633
|
+
return ""
|
|
16634
|
+
|
|
16635
|
+
|
|
16636
|
+
def _research_lane_api_call_plan(
|
|
16637
|
+
lane: dict[str, Any],
|
|
16638
|
+
*,
|
|
16639
|
+
execute: bool,
|
|
16640
|
+
called: bool = False,
|
|
16641
|
+
secret_source: str = "",
|
|
16642
|
+
reason: str = "",
|
|
16643
|
+
request_body_keys: Sequence[str] | None = None,
|
|
16644
|
+
tools: Sequence[str] | None = None,
|
|
16645
|
+
) -> dict[str, Any]:
|
|
16646
|
+
adapter = str(lane.get("adapter", "")).strip()
|
|
16647
|
+
provider = str(lane.get("provider", "")).strip()
|
|
16648
|
+
env_var = str(lane.get("env_var", "")).strip()
|
|
16649
|
+
secret_alias = str(lane.get("secret_alias", "")).strip()
|
|
16650
|
+
return {
|
|
16651
|
+
"call_moment": str(lane.get("call_moment", lane.get("lane_id", ""))).strip(),
|
|
16652
|
+
"calls_api": adapter in {"openai_responses", "anthropic_messages", "xai_chat_completions", "chimera_cli"},
|
|
16653
|
+
"called": bool(called),
|
|
16654
|
+
"execute_required": True,
|
|
16655
|
+
"execute": bool(execute),
|
|
16656
|
+
"provider": provider,
|
|
16657
|
+
"adapter": adapter,
|
|
16658
|
+
"model": str(lane.get("model", "")).strip(),
|
|
16659
|
+
"env_var": env_var,
|
|
16660
|
+
"secret_alias": secret_alias,
|
|
16661
|
+
"secret_resolution_order": [row for row in (f"env:{env_var}" if env_var else "", f"keychain:{secret_alias}" if secret_alias else "") if row],
|
|
16662
|
+
"secret_source": secret_source,
|
|
16663
|
+
"secret_value_persisted": False,
|
|
16664
|
+
"request_body_keys": sorted(str(row) for row in request_body_keys) if request_body_keys else [],
|
|
16665
|
+
"tools": [str(row) for row in tools] if tools else [],
|
|
16666
|
+
"reason": reason,
|
|
16667
|
+
}
|
|
16668
|
+
|
|
16669
|
+
|
|
16670
|
+
def _research_fixture_lane_result(
|
|
16671
|
+
lane: dict[str, Any],
|
|
16672
|
+
fixture_path: Path,
|
|
16673
|
+
*,
|
|
16674
|
+
started_at_utc: str,
|
|
16675
|
+
repo_root: Path,
|
|
16676
|
+
) -> dict[str, Any]:
|
|
16677
|
+
if not fixture_path.exists():
|
|
16678
|
+
raise RuntimeError(f"missing research lane fixture: {_path_for_state(fixture_path, repo_root)}")
|
|
16679
|
+
text = fixture_path.read_text(encoding="utf-8")
|
|
16680
|
+
raw_payload: Any = text
|
|
16681
|
+
if fixture_path.suffix.lower() == ".json":
|
|
16682
|
+
try:
|
|
16683
|
+
raw_payload = json.loads(text)
|
|
16684
|
+
except Exception:
|
|
16685
|
+
raw_payload = text
|
|
16686
|
+
lane_text = _research_text_from_payload(raw_payload) or text.strip()
|
|
16687
|
+
finished_at_utc = _now_utc()
|
|
16688
|
+
result = {
|
|
16689
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
16690
|
+
"lane_id": lane["lane_id"],
|
|
16691
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
16692
|
+
"provider": lane.get("provider", ""),
|
|
16693
|
+
"model": lane.get("model", ""),
|
|
16694
|
+
"adapter": "fixture",
|
|
16695
|
+
"call_moment": lane.get("call_moment", lane["lane_id"]),
|
|
16696
|
+
"api_call": _research_lane_api_call_plan(
|
|
16697
|
+
lane,
|
|
16698
|
+
execute=False,
|
|
16699
|
+
called=False,
|
|
16700
|
+
reason="Lane output loaded from fixture; no provider key was resolved.",
|
|
16701
|
+
),
|
|
16702
|
+
"status": "complete",
|
|
16703
|
+
"source": "fixture",
|
|
16704
|
+
"started_at_utc": started_at_utc,
|
|
16705
|
+
"finished_at_utc": finished_at_utc,
|
|
16706
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
16707
|
+
"text": lane_text,
|
|
16708
|
+
"citations": raw_payload.get("citations", []) if isinstance(raw_payload, dict) and isinstance(raw_payload.get("citations"), list) else [],
|
|
16709
|
+
"fixture_path": _path_for_state(fixture_path, repo_root),
|
|
16710
|
+
"notes": ["Lane output loaded from an explicit fixture; no provider call was made."],
|
|
16711
|
+
}
|
|
16712
|
+
if isinstance(raw_payload, dict):
|
|
16713
|
+
for key in ("claims", "confidence", "disagreements", "open_questions"):
|
|
16714
|
+
if key in raw_payload:
|
|
16715
|
+
result[key] = raw_payload[key]
|
|
16716
|
+
return result
|
|
16717
|
+
|
|
16718
|
+
|
|
16719
|
+
def _research_secret_value_for_lane(lane: dict[str, Any]) -> tuple[str, str, str]:
|
|
16720
|
+
env_var = str(lane.get("env_var", "")).strip()
|
|
16721
|
+
if env_var:
|
|
16722
|
+
value = os.environ.get(env_var, "").strip()
|
|
16723
|
+
if value:
|
|
16724
|
+
return value, f"env:{env_var}", ""
|
|
16725
|
+
|
|
16726
|
+
secret_alias = str(lane.get("secret_alias", "")).strip()
|
|
16727
|
+
provider = str(lane.get("provider", "")).strip()
|
|
16728
|
+
if not secret_alias and not provider:
|
|
16729
|
+
return "", "", "no env var or secret alias configured"
|
|
16730
|
+
|
|
16731
|
+
try:
|
|
16732
|
+
entry = _select_keychain_entry(
|
|
16733
|
+
secret_ref=secret_alias,
|
|
16734
|
+
provider=provider,
|
|
16735
|
+
world_id="",
|
|
16736
|
+
idea_id="",
|
|
16737
|
+
)
|
|
16738
|
+
if entry is None:
|
|
16739
|
+
ref = secret_alias or provider
|
|
16740
|
+
return "", "", f"no matching local Keychain secret for {ref}"
|
|
16741
|
+
return _read_keychain_secret_value(entry).strip(), "keychain", ""
|
|
16742
|
+
except Exception as exc:
|
|
16743
|
+
return "", "", str(exc)
|
|
16744
|
+
|
|
16745
|
+
|
|
16746
|
+
def _research_chimera_result_text(stdout: str) -> tuple[str, dict[str, Any]]:
|
|
16747
|
+
deltas: list[str] = []
|
|
16748
|
+
final_text = ""
|
|
16749
|
+
session_id = ""
|
|
16750
|
+
session_path = ""
|
|
16751
|
+
event_count = 0
|
|
16752
|
+
usage: dict[str, Any] = {}
|
|
16753
|
+
for raw_line in stdout.splitlines():
|
|
16754
|
+
line = raw_line.strip()
|
|
16755
|
+
if not line:
|
|
16756
|
+
continue
|
|
16757
|
+
try:
|
|
16758
|
+
event = json.loads(line)
|
|
16759
|
+
except Exception:
|
|
16760
|
+
continue
|
|
16761
|
+
if not isinstance(event, dict):
|
|
16762
|
+
continue
|
|
16763
|
+
event_count += 1
|
|
16764
|
+
if "TextDelta" in event and isinstance(event["TextDelta"], dict):
|
|
16765
|
+
deltas.append(str(event["TextDelta"].get("text", "")))
|
|
16766
|
+
if "TurnComplete" in event and isinstance(event["TurnComplete"], dict):
|
|
16767
|
+
final_text = str(event["TurnComplete"].get("text") or final_text).strip()
|
|
16768
|
+
session_id = str(event["TurnComplete"].get("session_id", session_id)).strip()
|
|
16769
|
+
if "SessionReady" in event and isinstance(event["SessionReady"], dict):
|
|
16770
|
+
session_id = str(event["SessionReady"].get("session_id", session_id)).strip()
|
|
16771
|
+
if "SessionSaved" in event and isinstance(event["SessionSaved"], dict):
|
|
16772
|
+
session_id = str(event["SessionSaved"].get("session_id", session_id)).strip()
|
|
16773
|
+
session_path = str(event["SessionSaved"].get("path", "")).strip()
|
|
16774
|
+
if "Usage" in event and isinstance(event["Usage"], dict):
|
|
16775
|
+
usage = dict(event["Usage"])
|
|
16776
|
+
|
|
16777
|
+
event_type = str(event.get("type", "")).strip()
|
|
16778
|
+
if event_type == "text_delta":
|
|
16779
|
+
deltas.append(str(event.get("text", "")))
|
|
16780
|
+
if event_type == "turn_complete":
|
|
16781
|
+
final_text = str(event.get("text") or final_text).strip()
|
|
16782
|
+
session_id = str(event.get("session_id", session_id)).strip()
|
|
16783
|
+
if not final_text:
|
|
16784
|
+
final_text = "".join(deltas).strip()
|
|
16785
|
+
return final_text, {
|
|
16786
|
+
"event_count": event_count,
|
|
16787
|
+
"session_id": session_id,
|
|
16788
|
+
"session_path": session_path,
|
|
16789
|
+
"usage": usage,
|
|
16790
|
+
}
|
|
16791
|
+
|
|
16792
|
+
|
|
16793
|
+
def _research_run_chimera_lane(
|
|
16794
|
+
lane: dict[str, Any],
|
|
16795
|
+
prompt: str,
|
|
16796
|
+
*,
|
|
16797
|
+
repo_root: Path,
|
|
16798
|
+
chimera_bin: str,
|
|
16799
|
+
timeout_sec: int,
|
|
16800
|
+
started_at_utc: str,
|
|
16801
|
+
) -> dict[str, Any]:
|
|
16802
|
+
bin_path = shutil.which(chimera_bin) if chimera_bin else None
|
|
16803
|
+
if bin_path is None and chimera_bin:
|
|
16804
|
+
candidate = Path(chimera_bin).expanduser()
|
|
16805
|
+
if candidate.exists():
|
|
16806
|
+
bin_path = str(candidate)
|
|
16807
|
+
if not bin_path:
|
|
16808
|
+
finished_at_utc = _now_utc()
|
|
16809
|
+
return {
|
|
16810
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
16811
|
+
"lane_id": lane["lane_id"],
|
|
16812
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
16813
|
+
"provider": lane.get("provider", ""),
|
|
16814
|
+
"model": lane.get("model", ""),
|
|
16815
|
+
"adapter": "chimera_cli",
|
|
16816
|
+
"status": "skipped",
|
|
16817
|
+
"started_at_utc": started_at_utc,
|
|
16818
|
+
"finished_at_utc": finished_at_utc,
|
|
16819
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
16820
|
+
"text": "",
|
|
16821
|
+
"notes": [f"Chimera binary not found: {chimera_bin}"],
|
|
16822
|
+
}
|
|
16823
|
+
|
|
16824
|
+
env = os.environ.copy()
|
|
16825
|
+
secret_value, secret_source, secret_issue = _research_secret_value_for_lane(lane)
|
|
16826
|
+
env_var = str(lane.get("env_var", "")).strip()
|
|
16827
|
+
if secret_value and env_var and not env.get(env_var):
|
|
16828
|
+
env[env_var] = secret_value
|
|
16829
|
+
|
|
16830
|
+
args = [
|
|
16831
|
+
bin_path,
|
|
16832
|
+
"--model",
|
|
16833
|
+
str(lane.get("model", "local")).strip() or "local",
|
|
16834
|
+
"--prompt",
|
|
16835
|
+
prompt,
|
|
16836
|
+
"--approval-mode",
|
|
16837
|
+
str(lane.get("chimera_approval_mode", "approve")).strip() or "approve",
|
|
16838
|
+
"--json",
|
|
16839
|
+
]
|
|
16840
|
+
try:
|
|
16841
|
+
proc = subprocess.run(
|
|
16842
|
+
args,
|
|
16843
|
+
cwd=str(repo_root),
|
|
16844
|
+
capture_output=True,
|
|
16845
|
+
text=True,
|
|
16846
|
+
timeout=max(1, int(timeout_sec)),
|
|
16847
|
+
env=env,
|
|
16848
|
+
)
|
|
16849
|
+
except subprocess.TimeoutExpired as exc:
|
|
16850
|
+
finished_at_utc = _now_utc()
|
|
16851
|
+
return {
|
|
16852
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
16853
|
+
"lane_id": lane["lane_id"],
|
|
16854
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
16855
|
+
"provider": lane.get("provider", ""),
|
|
16856
|
+
"model": lane.get("model", ""),
|
|
16857
|
+
"adapter": "chimera_cli",
|
|
16858
|
+
"status": "failed",
|
|
16859
|
+
"started_at_utc": started_at_utc,
|
|
16860
|
+
"finished_at_utc": finished_at_utc,
|
|
16861
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
16862
|
+
"text": "",
|
|
16863
|
+
"error": f"chimera timed out after {timeout_sec}s",
|
|
16864
|
+
"stdout": (exc.stdout or "")[-4000:],
|
|
16865
|
+
"stderr": (exc.stderr or "")[-4000:],
|
|
16866
|
+
}
|
|
16867
|
+
|
|
16868
|
+
finished_at_utc = _now_utc()
|
|
16869
|
+
lane_text, meta = _research_chimera_result_text(proc.stdout)
|
|
16870
|
+
notes: list[str] = []
|
|
16871
|
+
if secret_source:
|
|
16872
|
+
notes.append(f"Secret supplied from {secret_source}; secret value was not persisted.")
|
|
16873
|
+
elif env_var and secret_issue:
|
|
16874
|
+
notes.append(f"No secret supplied for {env_var}: {secret_issue}")
|
|
16875
|
+
status = "complete" if proc.returncode == 0 and lane_text else "failed"
|
|
16876
|
+
if proc.returncode != 0:
|
|
16877
|
+
notes.append("Chimera exited non-zero.")
|
|
16878
|
+
return {
|
|
16879
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
16880
|
+
"lane_id": lane["lane_id"],
|
|
16881
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
16882
|
+
"provider": lane.get("provider", ""),
|
|
16883
|
+
"model": lane.get("model", ""),
|
|
16884
|
+
"adapter": "chimera_cli",
|
|
16885
|
+
"status": status,
|
|
16886
|
+
"started_at_utc": started_at_utc,
|
|
16887
|
+
"finished_at_utc": finished_at_utc,
|
|
16888
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
16889
|
+
"text": lane_text,
|
|
16890
|
+
"chimera": meta,
|
|
16891
|
+
"returncode": proc.returncode,
|
|
16892
|
+
"stderr_tail": proc.stderr[-4000:],
|
|
16893
|
+
"notes": notes,
|
|
16894
|
+
}
|
|
16895
|
+
|
|
16896
|
+
|
|
16897
|
+
def _research_extract_openai_text(payload: dict[str, Any]) -> tuple[str, list[dict[str, Any]], int]:
|
|
16898
|
+
texts: list[str] = []
|
|
16899
|
+
citations: list[dict[str, Any]] = []
|
|
16900
|
+
tool_calls = 0
|
|
16901
|
+
output = payload.get("output")
|
|
16902
|
+
if not isinstance(output, list):
|
|
16903
|
+
return "", citations, tool_calls
|
|
16904
|
+
for item in output:
|
|
16905
|
+
if not isinstance(item, dict):
|
|
16906
|
+
continue
|
|
16907
|
+
item_type = str(item.get("type", "")).strip()
|
|
16908
|
+
if item_type.endswith("_call"):
|
|
16909
|
+
tool_calls += 1
|
|
16910
|
+
if item_type != "message":
|
|
16911
|
+
continue
|
|
16912
|
+
content = item.get("content")
|
|
16913
|
+
if not isinstance(content, list):
|
|
16914
|
+
continue
|
|
16915
|
+
for part in content:
|
|
16916
|
+
if not isinstance(part, dict):
|
|
16917
|
+
continue
|
|
16918
|
+
if str(part.get("type", "")).strip() == "output_text":
|
|
16919
|
+
text = str(part.get("text", "")).strip()
|
|
16920
|
+
if text:
|
|
16921
|
+
texts.append(text)
|
|
16922
|
+
annotations = part.get("annotations")
|
|
16923
|
+
if isinstance(annotations, list):
|
|
16924
|
+
for annotation in annotations:
|
|
16925
|
+
if isinstance(annotation, dict):
|
|
16926
|
+
citations.append(
|
|
16927
|
+
{
|
|
16928
|
+
"type": str(annotation.get("type", "")).strip(),
|
|
16929
|
+
"title": str(annotation.get("title", "")).strip(),
|
|
16930
|
+
"url": str(annotation.get("url", "")).strip(),
|
|
16931
|
+
"start_index": annotation.get("start_index"),
|
|
16932
|
+
"end_index": annotation.get("end_index"),
|
|
16933
|
+
}
|
|
16934
|
+
)
|
|
16935
|
+
return "\n\n".join(texts).strip(), citations, tool_calls
|
|
16936
|
+
|
|
16937
|
+
|
|
16938
|
+
def _research_openai_output_types(payload: dict[str, Any]) -> list[str]:
|
|
16939
|
+
output = payload.get("output")
|
|
16940
|
+
if not isinstance(output, list):
|
|
16941
|
+
return []
|
|
16942
|
+
return [
|
|
16943
|
+
str(item.get("type", "")).strip()
|
|
16944
|
+
for item in output
|
|
16945
|
+
if isinstance(item, dict) and str(item.get("type", "")).strip()
|
|
16946
|
+
]
|
|
16947
|
+
|
|
16948
|
+
|
|
16949
|
+
def _research_run_openai_lane(
|
|
16950
|
+
lane: dict[str, Any],
|
|
16951
|
+
prompt: str,
|
|
16952
|
+
*,
|
|
16953
|
+
timeout_sec: int,
|
|
16954
|
+
started_at_utc: str,
|
|
16955
|
+
) -> dict[str, Any]:
|
|
16956
|
+
api_key, secret_source, secret_issue = _research_secret_value_for_lane(lane)
|
|
16957
|
+
finished_at_utc = _now_utc()
|
|
16958
|
+
if not api_key:
|
|
16959
|
+
return {
|
|
16960
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
16961
|
+
"lane_id": lane["lane_id"],
|
|
16962
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
16963
|
+
"provider": lane.get("provider", ""),
|
|
16964
|
+
"model": lane.get("model", ""),
|
|
16965
|
+
"adapter": "openai_responses",
|
|
16966
|
+
"call_moment": lane.get("call_moment", lane["lane_id"]),
|
|
16967
|
+
"api_call": _research_lane_api_call_plan(
|
|
16968
|
+
lane,
|
|
16969
|
+
execute=True,
|
|
16970
|
+
called=False,
|
|
16971
|
+
reason=f"No OpenAI API key available: {secret_issue or 'missing OPENAI_API_KEY'}",
|
|
16972
|
+
),
|
|
16973
|
+
"status": "skipped",
|
|
16974
|
+
"started_at_utc": started_at_utc,
|
|
16975
|
+
"finished_at_utc": finished_at_utc,
|
|
16976
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
16977
|
+
"text": "",
|
|
16978
|
+
"notes": [f"No OpenAI API key available: {secret_issue or 'missing OPENAI_API_KEY'}"],
|
|
16979
|
+
}
|
|
16980
|
+
|
|
16981
|
+
body: dict[str, Any] = {
|
|
16982
|
+
"model": str(lane.get("model", "gpt-5.4")).strip() or "gpt-5.4",
|
|
16983
|
+
"input": prompt,
|
|
16984
|
+
"background": bool(lane.get("background", False)),
|
|
16985
|
+
}
|
|
16986
|
+
tools: list[dict[str, Any]] = []
|
|
16987
|
+
raw_tools = lane.get("tools")
|
|
16988
|
+
if isinstance(raw_tools, list):
|
|
16989
|
+
tools.extend([dict(row) for row in raw_tools if isinstance(row, dict)])
|
|
16990
|
+
if bool(lane.get("web_search", False)):
|
|
16991
|
+
web_tool_type = str(lane.get("web_search_tool", "web_search")).strip() or "web_search"
|
|
16992
|
+
web_tool: dict[str, Any] = {
|
|
16993
|
+
"type": web_tool_type,
|
|
16994
|
+
"search_context_size": str(lane.get("search_context_size", "medium")).strip() or "medium",
|
|
16995
|
+
}
|
|
16996
|
+
if web_tool_type == "web_search" and "external_web_access" in lane:
|
|
16997
|
+
web_tool["external_web_access"] = bool(lane.get("external_web_access"))
|
|
16998
|
+
filters = lane.get("filters")
|
|
16999
|
+
if isinstance(filters, dict):
|
|
17000
|
+
web_tool["filters"] = filters
|
|
17001
|
+
tools.append(web_tool)
|
|
17002
|
+
if tools:
|
|
17003
|
+
body["tools"] = tools
|
|
17004
|
+
max_tool_calls = int(lane.get("max_tool_calls", 0) or 0)
|
|
17005
|
+
if max_tool_calls > 0:
|
|
17006
|
+
body["max_tool_calls"] = max_tool_calls
|
|
17007
|
+
max_output_tokens = int(lane.get("max_output_tokens", 0) or 0)
|
|
17008
|
+
if max_output_tokens > 0:
|
|
17009
|
+
body["max_output_tokens"] = max_output_tokens
|
|
17010
|
+
raw_reasoning = lane.get("reasoning")
|
|
17011
|
+
reasoning_effort = str(lane.get("reasoning_effort", "") or "").strip()
|
|
17012
|
+
if not reasoning_effort and isinstance(raw_reasoning, dict):
|
|
17013
|
+
reasoning_effort = str(raw_reasoning.get("effort", "") or "").strip()
|
|
17014
|
+
if reasoning_effort:
|
|
17015
|
+
body["reasoning"] = {"effort": reasoning_effort}
|
|
17016
|
+
reasoning_summary = str(lane.get("reasoning_summary", "") or "").strip()
|
|
17017
|
+
if reasoning_summary:
|
|
17018
|
+
reasoning_body = body.get("reasoning") if isinstance(body.get("reasoning"), dict) else {}
|
|
17019
|
+
reasoning_body = dict(reasoning_body)
|
|
17020
|
+
reasoning_body["summary"] = reasoning_summary
|
|
17021
|
+
body["reasoning"] = reasoning_body
|
|
17022
|
+
raw_text = lane.get("text")
|
|
17023
|
+
text_verbosity = str(lane.get("text_verbosity", "") or "").strip()
|
|
17024
|
+
if not text_verbosity and isinstance(raw_text, dict):
|
|
17025
|
+
text_verbosity = str(raw_text.get("verbosity", "") or "").strip()
|
|
17026
|
+
if text_verbosity:
|
|
17027
|
+
body["text"] = {"verbosity": text_verbosity}
|
|
17028
|
+
instructions = str(lane.get("instructions", "")).strip()
|
|
17029
|
+
if instructions:
|
|
17030
|
+
body["instructions"] = instructions
|
|
17031
|
+
|
|
17032
|
+
tool_types = [
|
|
17033
|
+
str(row.get("type", "")).strip()
|
|
17034
|
+
for row in body.get("tools", [])
|
|
17035
|
+
if isinstance(row, dict) and str(row.get("type", "")).strip()
|
|
17036
|
+
]
|
|
17037
|
+
api_call = _research_lane_api_call_plan(
|
|
17038
|
+
lane,
|
|
17039
|
+
execute=True,
|
|
17040
|
+
called=True,
|
|
17041
|
+
secret_source=secret_source,
|
|
17042
|
+
request_body_keys=body.keys(),
|
|
17043
|
+
tools=tool_types,
|
|
17044
|
+
)
|
|
17045
|
+
|
|
17046
|
+
request = urlrequest.Request(
|
|
17047
|
+
"https://api.openai.com/v1/responses",
|
|
17048
|
+
data=json.dumps(body).encode("utf-8"),
|
|
17049
|
+
headers={
|
|
17050
|
+
"Authorization": f"Bearer {api_key}",
|
|
17051
|
+
"Content-Type": "application/json",
|
|
17052
|
+
},
|
|
17053
|
+
method="POST",
|
|
17054
|
+
)
|
|
17055
|
+
try:
|
|
17056
|
+
with urlrequest.urlopen(request, timeout=max(1, int(timeout_sec))) as response:
|
|
17057
|
+
response_payload = json.loads(response.read().decode("utf-8"))
|
|
17058
|
+
except urlerror.HTTPError as exc:
|
|
17059
|
+
error_body = ""
|
|
17060
|
+
try:
|
|
17061
|
+
error_body = exc.read().decode("utf-8")
|
|
17062
|
+
except Exception:
|
|
17063
|
+
error_body = str(exc)
|
|
17064
|
+
finished_at_utc = _now_utc()
|
|
17065
|
+
return {
|
|
17066
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17067
|
+
"lane_id": lane["lane_id"],
|
|
17068
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17069
|
+
"provider": lane.get("provider", ""),
|
|
17070
|
+
"model": lane.get("model", ""),
|
|
17071
|
+
"adapter": "openai_responses",
|
|
17072
|
+
"call_moment": lane.get("call_moment", lane["lane_id"]),
|
|
17073
|
+
"api_call": api_call,
|
|
17074
|
+
"status": "failed",
|
|
17075
|
+
"started_at_utc": started_at_utc,
|
|
17076
|
+
"finished_at_utc": finished_at_utc,
|
|
17077
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17078
|
+
"text": "",
|
|
17079
|
+
"error": error_body[-4000:],
|
|
17080
|
+
"notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
|
|
17081
|
+
}
|
|
17082
|
+
except Exception as exc:
|
|
17083
|
+
finished_at_utc = _now_utc()
|
|
17084
|
+
return {
|
|
17085
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17086
|
+
"lane_id": lane["lane_id"],
|
|
17087
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17088
|
+
"provider": lane.get("provider", ""),
|
|
17089
|
+
"model": lane.get("model", ""),
|
|
17090
|
+
"adapter": "openai_responses",
|
|
17091
|
+
"call_moment": lane.get("call_moment", lane["lane_id"]),
|
|
17092
|
+
"api_call": api_call,
|
|
17093
|
+
"status": "failed",
|
|
17094
|
+
"started_at_utc": started_at_utc,
|
|
17095
|
+
"finished_at_utc": finished_at_utc,
|
|
17096
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17097
|
+
"text": "",
|
|
17098
|
+
"error": str(exc),
|
|
17099
|
+
"notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
|
|
17100
|
+
}
|
|
17101
|
+
|
|
17102
|
+
finished_at_utc = _now_utc()
|
|
17103
|
+
text, citations, tool_calls = _research_extract_openai_text(response_payload)
|
|
17104
|
+
response_status = str(response_payload.get("status", "")).strip()
|
|
17105
|
+
status = "complete" if response_status == "completed" and text else response_status or "complete"
|
|
17106
|
+
if status == "in_progress":
|
|
17107
|
+
text = text or "OpenAI deep research started in background mode; poll the response id outside ORP for completion."
|
|
17108
|
+
return {
|
|
17109
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17110
|
+
"lane_id": lane["lane_id"],
|
|
17111
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17112
|
+
"provider": lane.get("provider", ""),
|
|
17113
|
+
"model": lane.get("model", ""),
|
|
17114
|
+
"adapter": "openai_responses",
|
|
17115
|
+
"call_moment": lane.get("call_moment", lane["lane_id"]),
|
|
17116
|
+
"api_call": api_call,
|
|
17117
|
+
"status": status,
|
|
17118
|
+
"started_at_utc": started_at_utc,
|
|
17119
|
+
"finished_at_utc": finished_at_utc,
|
|
17120
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17121
|
+
"text": text,
|
|
17122
|
+
"citations": citations,
|
|
17123
|
+
"provider_response_id": str(response_payload.get("id", "")).strip(),
|
|
17124
|
+
"provider_status": response_status,
|
|
17125
|
+
"provider_error": response_payload.get("error") if isinstance(response_payload.get("error"), dict) else None,
|
|
17126
|
+
"incomplete_details": response_payload.get("incomplete_details")
|
|
17127
|
+
if isinstance(response_payload.get("incomplete_details"), dict)
|
|
17128
|
+
else None,
|
|
17129
|
+
"output_types": _research_openai_output_types(response_payload),
|
|
17130
|
+
"tool_call_count": tool_calls,
|
|
17131
|
+
"usage": response_payload.get("usage") if isinstance(response_payload.get("usage"), dict) else {},
|
|
17132
|
+
"notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
|
|
17133
|
+
}
|
|
17134
|
+
|
|
17135
|
+
|
|
17136
|
+
def _research_extract_anthropic_text(payload: dict[str, Any]) -> str:
|
|
17137
|
+
content = payload.get("content")
|
|
17138
|
+
if not isinstance(content, list):
|
|
17139
|
+
return ""
|
|
17140
|
+
parts: list[str] = []
|
|
17141
|
+
for part in content:
|
|
17142
|
+
if not isinstance(part, dict):
|
|
17143
|
+
continue
|
|
17144
|
+
if str(part.get("type", "")).strip() == "text":
|
|
17145
|
+
text = str(part.get("text", "")).strip()
|
|
17146
|
+
if text:
|
|
17147
|
+
parts.append(text)
|
|
17148
|
+
return "\n\n".join(parts).strip()
|
|
17149
|
+
|
|
17150
|
+
|
|
17151
|
+
def _research_run_anthropic_lane(
|
|
17152
|
+
lane: dict[str, Any],
|
|
17153
|
+
prompt: str,
|
|
17154
|
+
*,
|
|
17155
|
+
timeout_sec: int,
|
|
17156
|
+
started_at_utc: str,
|
|
17157
|
+
) -> dict[str, Any]:
|
|
17158
|
+
api_key, secret_source, secret_issue = _research_secret_value_for_lane(lane)
|
|
17159
|
+
finished_at_utc = _now_utc()
|
|
17160
|
+
if not api_key:
|
|
17161
|
+
return {
|
|
17162
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17163
|
+
"lane_id": lane["lane_id"],
|
|
17164
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17165
|
+
"provider": lane.get("provider", ""),
|
|
17166
|
+
"model": lane.get("model", ""),
|
|
17167
|
+
"adapter": "anthropic_messages",
|
|
17168
|
+
"status": "skipped",
|
|
17169
|
+
"started_at_utc": started_at_utc,
|
|
17170
|
+
"finished_at_utc": finished_at_utc,
|
|
17171
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17172
|
+
"text": "",
|
|
17173
|
+
"notes": [f"No Anthropic API key available: {secret_issue or 'missing ANTHROPIC_API_KEY'}"],
|
|
17174
|
+
}
|
|
17175
|
+
|
|
17176
|
+
model = str(lane.get("model", "claude-opus-4-7")).strip() or "claude-opus-4-7"
|
|
17177
|
+
body: dict[str, Any] = {
|
|
17178
|
+
"model": model,
|
|
17179
|
+
"max_tokens": int(lane.get("max_tokens", 4096) or 4096),
|
|
17180
|
+
"messages": [{"role": "user", "content": prompt}],
|
|
17181
|
+
}
|
|
17182
|
+
system = str(lane.get("system", "") or "").strip()
|
|
17183
|
+
if system:
|
|
17184
|
+
body["system"] = system
|
|
17185
|
+
if "temperature" in lane:
|
|
17186
|
+
try:
|
|
17187
|
+
body["temperature"] = float(lane.get("temperature"))
|
|
17188
|
+
except Exception:
|
|
17189
|
+
pass
|
|
17190
|
+
anthropic_version = str(lane.get("anthropic_version", "2023-06-01")).strip() or "2023-06-01"
|
|
17191
|
+
request = urlrequest.Request(
|
|
17192
|
+
"https://api.anthropic.com/v1/messages",
|
|
17193
|
+
data=json.dumps(body).encode("utf-8"),
|
|
17194
|
+
headers={
|
|
17195
|
+
"x-api-key": api_key,
|
|
17196
|
+
"anthropic-version": anthropic_version,
|
|
17197
|
+
"Content-Type": "application/json",
|
|
17198
|
+
},
|
|
17199
|
+
method="POST",
|
|
17200
|
+
)
|
|
17201
|
+
try:
|
|
17202
|
+
with urlrequest.urlopen(request, timeout=max(1, int(timeout_sec))) as response:
|
|
17203
|
+
response_payload = json.loads(response.read().decode("utf-8"))
|
|
17204
|
+
except urlerror.HTTPError as exc:
|
|
17205
|
+
error_body = ""
|
|
17206
|
+
try:
|
|
17207
|
+
error_body = exc.read().decode("utf-8")
|
|
17208
|
+
except Exception:
|
|
17209
|
+
error_body = str(exc)
|
|
17210
|
+
finished_at_utc = _now_utc()
|
|
17211
|
+
return {
|
|
17212
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17213
|
+
"lane_id": lane["lane_id"],
|
|
17214
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17215
|
+
"provider": lane.get("provider", ""),
|
|
17216
|
+
"model": model,
|
|
17217
|
+
"adapter": "anthropic_messages",
|
|
17218
|
+
"status": "failed",
|
|
17219
|
+
"started_at_utc": started_at_utc,
|
|
17220
|
+
"finished_at_utc": finished_at_utc,
|
|
17221
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17222
|
+
"text": "",
|
|
17223
|
+
"error": error_body[-4000:],
|
|
17224
|
+
"notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
|
|
17225
|
+
}
|
|
17226
|
+
except Exception as exc:
|
|
17227
|
+
finished_at_utc = _now_utc()
|
|
17228
|
+
return {
|
|
17229
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17230
|
+
"lane_id": lane["lane_id"],
|
|
17231
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17232
|
+
"provider": lane.get("provider", ""),
|
|
17233
|
+
"model": model,
|
|
17234
|
+
"adapter": "anthropic_messages",
|
|
17235
|
+
"status": "failed",
|
|
17236
|
+
"started_at_utc": started_at_utc,
|
|
17237
|
+
"finished_at_utc": finished_at_utc,
|
|
17238
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17239
|
+
"text": "",
|
|
17240
|
+
"error": str(exc),
|
|
17241
|
+
"notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
|
|
17242
|
+
}
|
|
17243
|
+
|
|
17244
|
+
finished_at_utc = _now_utc()
|
|
17245
|
+
text = _research_extract_anthropic_text(response_payload)
|
|
17246
|
+
return {
|
|
17247
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17248
|
+
"lane_id": lane["lane_id"],
|
|
17249
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17250
|
+
"provider": lane.get("provider", ""),
|
|
17251
|
+
"model": str(response_payload.get("model", model)).strip() or model,
|
|
17252
|
+
"adapter": "anthropic_messages",
|
|
17253
|
+
"status": "complete" if text else "failed",
|
|
17254
|
+
"started_at_utc": started_at_utc,
|
|
17255
|
+
"finished_at_utc": finished_at_utc,
|
|
17256
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17257
|
+
"text": text,
|
|
17258
|
+
"provider_response_id": str(response_payload.get("id", "")).strip(),
|
|
17259
|
+
"stop_reason": str(response_payload.get("stop_reason", "")).strip(),
|
|
17260
|
+
"usage": response_payload.get("usage") if isinstance(response_payload.get("usage"), dict) else {},
|
|
17261
|
+
"notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
|
|
17262
|
+
}
|
|
17263
|
+
|
|
17264
|
+
|
|
17265
|
+
def _research_extract_chat_completion_text(payload: dict[str, Any]) -> str:
|
|
17266
|
+
choices = payload.get("choices")
|
|
17267
|
+
if not isinstance(choices, list):
|
|
17268
|
+
return ""
|
|
17269
|
+
parts: list[str] = []
|
|
17270
|
+
for choice in choices:
|
|
17271
|
+
if not isinstance(choice, dict):
|
|
17272
|
+
continue
|
|
17273
|
+
message = choice.get("message")
|
|
17274
|
+
if isinstance(message, dict):
|
|
17275
|
+
content = message.get("content")
|
|
17276
|
+
if isinstance(content, str) and content.strip():
|
|
17277
|
+
parts.append(content.strip())
|
|
17278
|
+
elif isinstance(content, list):
|
|
17279
|
+
for item in content:
|
|
17280
|
+
if isinstance(item, dict):
|
|
17281
|
+
text = str(item.get("text", "")).strip()
|
|
17282
|
+
if text:
|
|
17283
|
+
parts.append(text)
|
|
17284
|
+
return "\n\n".join(parts).strip()
|
|
17285
|
+
|
|
17286
|
+
|
|
17287
|
+
def _research_run_xai_lane(
|
|
17288
|
+
lane: dict[str, Any],
|
|
17289
|
+
prompt: str,
|
|
17290
|
+
*,
|
|
17291
|
+
timeout_sec: int,
|
|
17292
|
+
started_at_utc: str,
|
|
17293
|
+
) -> dict[str, Any]:
|
|
17294
|
+
api_key, secret_source, secret_issue = _research_secret_value_for_lane(lane)
|
|
17295
|
+
finished_at_utc = _now_utc()
|
|
17296
|
+
if not api_key:
|
|
17297
|
+
return {
|
|
17298
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17299
|
+
"lane_id": lane["lane_id"],
|
|
17300
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17301
|
+
"provider": lane.get("provider", ""),
|
|
17302
|
+
"model": lane.get("model", ""),
|
|
17303
|
+
"adapter": "xai_chat_completions",
|
|
17304
|
+
"status": "skipped",
|
|
17305
|
+
"started_at_utc": started_at_utc,
|
|
17306
|
+
"finished_at_utc": finished_at_utc,
|
|
17307
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17308
|
+
"text": "",
|
|
17309
|
+
"notes": [f"No xAI API key available: {secret_issue or 'missing XAI_API_KEY'}"],
|
|
17310
|
+
}
|
|
17311
|
+
|
|
17312
|
+
model = str(lane.get("model", "grok-4.20-reasoning")).strip() or "grok-4.20-reasoning"
|
|
17313
|
+
system = str(lane.get("system", "You are an independent research critique lane.")).strip()
|
|
17314
|
+
messages: list[dict[str, str]] = []
|
|
17315
|
+
if system:
|
|
17316
|
+
messages.append({"role": "system", "content": system})
|
|
17317
|
+
messages.append({"role": "user", "content": prompt})
|
|
17318
|
+
body: dict[str, Any] = {
|
|
17319
|
+
"model": model,
|
|
17320
|
+
"messages": messages,
|
|
17321
|
+
"stream": False,
|
|
17322
|
+
}
|
|
17323
|
+
max_tokens = int(lane.get("max_tokens", 0) or 0)
|
|
17324
|
+
if max_tokens > 0:
|
|
17325
|
+
body["max_tokens"] = max_tokens
|
|
17326
|
+
if "temperature" in lane:
|
|
17327
|
+
try:
|
|
17328
|
+
body["temperature"] = float(lane.get("temperature"))
|
|
17329
|
+
except Exception:
|
|
17330
|
+
pass
|
|
17331
|
+
base_url = str(lane.get("base_url", "https://api.x.ai/v1")).rstrip("/") or "https://api.x.ai/v1"
|
|
17332
|
+
request = urlrequest.Request(
|
|
17333
|
+
f"{base_url}/chat/completions",
|
|
17334
|
+
data=json.dumps(body).encode("utf-8"),
|
|
17335
|
+
headers={
|
|
17336
|
+
"Authorization": f"Bearer {api_key}",
|
|
17337
|
+
"Content-Type": "application/json",
|
|
17338
|
+
},
|
|
17339
|
+
method="POST",
|
|
17340
|
+
)
|
|
17341
|
+
try:
|
|
17342
|
+
with urlrequest.urlopen(request, timeout=max(1, int(timeout_sec))) as response:
|
|
17343
|
+
response_payload = json.loads(response.read().decode("utf-8"))
|
|
17344
|
+
except urlerror.HTTPError as exc:
|
|
17345
|
+
error_body = ""
|
|
17346
|
+
try:
|
|
17347
|
+
error_body = exc.read().decode("utf-8")
|
|
17348
|
+
except Exception:
|
|
17349
|
+
error_body = str(exc)
|
|
17350
|
+
finished_at_utc = _now_utc()
|
|
17351
|
+
return {
|
|
17352
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17353
|
+
"lane_id": lane["lane_id"],
|
|
17354
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17355
|
+
"provider": lane.get("provider", ""),
|
|
17356
|
+
"model": model,
|
|
17357
|
+
"adapter": "xai_chat_completions",
|
|
17358
|
+
"status": "failed",
|
|
17359
|
+
"started_at_utc": started_at_utc,
|
|
17360
|
+
"finished_at_utc": finished_at_utc,
|
|
17361
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17362
|
+
"text": "",
|
|
17363
|
+
"error": error_body[-4000:],
|
|
17364
|
+
"notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
|
|
17365
|
+
}
|
|
17366
|
+
except Exception as exc:
|
|
17367
|
+
finished_at_utc = _now_utc()
|
|
17368
|
+
return {
|
|
17369
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17370
|
+
"lane_id": lane["lane_id"],
|
|
17371
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17372
|
+
"provider": lane.get("provider", ""),
|
|
17373
|
+
"model": model,
|
|
17374
|
+
"adapter": "xai_chat_completions",
|
|
17375
|
+
"status": "failed",
|
|
17376
|
+
"started_at_utc": started_at_utc,
|
|
17377
|
+
"finished_at_utc": finished_at_utc,
|
|
17378
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17379
|
+
"text": "",
|
|
17380
|
+
"error": str(exc),
|
|
17381
|
+
"notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
|
|
17382
|
+
}
|
|
17383
|
+
|
|
17384
|
+
finished_at_utc = _now_utc()
|
|
17385
|
+
text = _research_extract_chat_completion_text(response_payload)
|
|
17386
|
+
return {
|
|
17387
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17388
|
+
"lane_id": lane["lane_id"],
|
|
17389
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17390
|
+
"provider": lane.get("provider", ""),
|
|
17391
|
+
"model": str(response_payload.get("model", model)).strip() or model,
|
|
17392
|
+
"adapter": "xai_chat_completions",
|
|
17393
|
+
"status": "complete" if text else "failed",
|
|
17394
|
+
"started_at_utc": started_at_utc,
|
|
17395
|
+
"finished_at_utc": finished_at_utc,
|
|
17396
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17397
|
+
"text": text,
|
|
17398
|
+
"provider_response_id": str(response_payload.get("id", "")).strip(),
|
|
17399
|
+
"usage": response_payload.get("usage") if isinstance(response_payload.get("usage"), dict) else {},
|
|
17400
|
+
"notes": [f"Secret supplied from {secret_source}; secret value was not persisted."],
|
|
17401
|
+
}
|
|
17402
|
+
|
|
17403
|
+
|
|
17404
|
+
def _research_planned_lane(lane: dict[str, Any], *, started_at_utc: str, execute: bool, reason: str) -> dict[str, Any]:
|
|
17405
|
+
finished_at_utc = _now_utc()
|
|
17406
|
+
return {
|
|
17407
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17408
|
+
"lane_id": lane["lane_id"],
|
|
17409
|
+
"label": lane.get("label", lane["lane_id"]),
|
|
17410
|
+
"provider": lane.get("provider", ""),
|
|
17411
|
+
"model": lane.get("model", ""),
|
|
17412
|
+
"adapter": lane.get("adapter", "planned"),
|
|
17413
|
+
"call_moment": lane.get("call_moment", lane["lane_id"]),
|
|
17414
|
+
"api_call": _research_lane_api_call_plan(
|
|
17415
|
+
lane,
|
|
17416
|
+
execute=execute,
|
|
17417
|
+
called=False,
|
|
17418
|
+
reason=reason,
|
|
17419
|
+
),
|
|
17420
|
+
"status": "planned" if not execute else "skipped",
|
|
17421
|
+
"started_at_utc": started_at_utc,
|
|
17422
|
+
"finished_at_utc": finished_at_utc,
|
|
17423
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17424
|
+
"text": "",
|
|
17425
|
+
"notes": [reason],
|
|
17426
|
+
}
|
|
17427
|
+
|
|
17428
|
+
|
|
17429
|
+
def _research_run_lane(
|
|
17430
|
+
lane: dict[str, Any],
|
|
17431
|
+
*,
|
|
17432
|
+
question: str,
|
|
17433
|
+
breakdown: dict[str, Any],
|
|
17434
|
+
repo_root: Path,
|
|
17435
|
+
execute: bool,
|
|
17436
|
+
fixtures: dict[str, Path],
|
|
17437
|
+
chimera_bin: str,
|
|
17438
|
+
timeout_sec: int,
|
|
17439
|
+
) -> dict[str, Any]:
|
|
17440
|
+
started_at_utc = _now_utc()
|
|
17441
|
+
lane_id = str(lane.get("lane_id", "")).strip()
|
|
17442
|
+
if lane_id in fixtures:
|
|
17443
|
+
return _research_fixture_lane_result(lane, fixtures[lane_id], started_at_utc=started_at_utc, repo_root=repo_root)
|
|
17444
|
+
if not execute:
|
|
17445
|
+
return _research_planned_lane(
|
|
17446
|
+
lane,
|
|
17447
|
+
started_at_utc=started_at_utc,
|
|
17448
|
+
execute=False,
|
|
17449
|
+
reason="Dry run only. Re-run with --execute or provide --lane-fixture lane_id=path.",
|
|
17450
|
+
)
|
|
17451
|
+
prompt = _research_lane_prompt(question, lane, breakdown)
|
|
17452
|
+
adapter = str(lane.get("adapter", "")).strip()
|
|
17453
|
+
if adapter == "chimera_cli":
|
|
17454
|
+
return _research_run_chimera_lane(
|
|
17455
|
+
lane,
|
|
17456
|
+
prompt,
|
|
17457
|
+
repo_root=repo_root,
|
|
17458
|
+
chimera_bin=chimera_bin,
|
|
17459
|
+
timeout_sec=timeout_sec,
|
|
17460
|
+
started_at_utc=started_at_utc,
|
|
17461
|
+
)
|
|
17462
|
+
if adapter == "openai_responses":
|
|
17463
|
+
return _research_run_openai_lane(
|
|
17464
|
+
lane,
|
|
17465
|
+
prompt,
|
|
17466
|
+
timeout_sec=timeout_sec,
|
|
17467
|
+
started_at_utc=started_at_utc,
|
|
17468
|
+
)
|
|
17469
|
+
if adapter == "anthropic_messages":
|
|
17470
|
+
return _research_run_anthropic_lane(
|
|
17471
|
+
lane,
|
|
17472
|
+
prompt,
|
|
17473
|
+
timeout_sec=timeout_sec,
|
|
17474
|
+
started_at_utc=started_at_utc,
|
|
17475
|
+
)
|
|
17476
|
+
if adapter == "xai_chat_completions":
|
|
17477
|
+
return _research_run_xai_lane(
|
|
17478
|
+
lane,
|
|
17479
|
+
prompt,
|
|
17480
|
+
timeout_sec=timeout_sec,
|
|
17481
|
+
started_at_utc=started_at_utc,
|
|
17482
|
+
)
|
|
17483
|
+
return _research_planned_lane(
|
|
17484
|
+
lane,
|
|
17485
|
+
started_at_utc=started_at_utc,
|
|
17486
|
+
execute=True,
|
|
17487
|
+
reason=f"No live adapter implemented for `{adapter}`.",
|
|
17488
|
+
)
|
|
17489
|
+
|
|
17490
|
+
|
|
17491
|
+
def _research_status_from_lanes(lanes: list[dict[str, Any]]) -> str:
|
|
17492
|
+
statuses = {str(row.get("status", "")).strip() for row in lanes if isinstance(row, dict)}
|
|
17493
|
+
if not statuses:
|
|
17494
|
+
return "planned"
|
|
17495
|
+
if "failed" in statuses:
|
|
17496
|
+
return "partial"
|
|
17497
|
+
if statuses & {"queued", "in_progress"}:
|
|
17498
|
+
return "in_progress"
|
|
17499
|
+
if "complete" in statuses and statuses <= {"complete"}:
|
|
17500
|
+
return "complete"
|
|
17501
|
+
if "complete" in statuses:
|
|
17502
|
+
return "partial"
|
|
17503
|
+
if statuses <= {"planned"}:
|
|
17504
|
+
return "planned"
|
|
17505
|
+
return "partial"
|
|
17506
|
+
|
|
17507
|
+
|
|
17508
|
+
def _research_synthesize(question: str, lanes: list[dict[str, Any]], *, execute: bool) -> dict[str, Any]:
|
|
17509
|
+
complete_lanes = [lane for lane in lanes if lane.get("status") == "complete" and str(lane.get("text", "")).strip()]
|
|
17510
|
+
skipped = [lane for lane in lanes if lane.get("status") in {"planned", "skipped"}]
|
|
17511
|
+
failed = [lane for lane in lanes if lane.get("status") == "failed"]
|
|
17512
|
+
lines: list[str] = []
|
|
17513
|
+
if complete_lanes:
|
|
17514
|
+
lines.append(f"Question: {question}")
|
|
17515
|
+
lines.append("")
|
|
17516
|
+
lines.append("Synthesis from completed lanes:")
|
|
17517
|
+
for lane in complete_lanes:
|
|
17518
|
+
lane_label = str(lane.get("label", lane.get("lane_id", ""))).strip()
|
|
17519
|
+
text = str(lane.get("text", "")).strip()
|
|
17520
|
+
lines.append("")
|
|
17521
|
+
lines.append(f"[{lane_label}]")
|
|
17522
|
+
lines.append(text)
|
|
17523
|
+
else:
|
|
17524
|
+
lines.append(
|
|
17525
|
+
"No live research lane has completed yet. ORP created the durable decomposition, provider plan, and lane prompts; "
|
|
17526
|
+
"run again with --execute or attach lane fixtures to produce an answer."
|
|
17527
|
+
)
|
|
17528
|
+
next_actions: list[str] = []
|
|
17529
|
+
if not execute:
|
|
17530
|
+
next_actions.append("Run `orp research ask <question> --execute --json` when you are ready to spend live provider calls.")
|
|
17531
|
+
if skipped:
|
|
17532
|
+
next_actions.append("Attach completed external reports with `--lane-fixture lane_id=path` to synthesize without re-calling providers.")
|
|
17533
|
+
if failed:
|
|
17534
|
+
next_actions.append("Inspect failed lane JSON files under `orp/research/<run_id>/lanes/` and re-run only after credentials/adapters are fixed.")
|
|
17535
|
+
citations: list[dict[str, Any]] = []
|
|
17536
|
+
for lane in complete_lanes:
|
|
17537
|
+
lane_citations = lane.get("citations")
|
|
17538
|
+
if isinstance(lane_citations, list):
|
|
17539
|
+
citations.extend([row for row in lane_citations if isinstance(row, dict)])
|
|
17540
|
+
return {
|
|
17541
|
+
"answer": "\n".join(lines).strip(),
|
|
17542
|
+
"completed_lane_count": len(complete_lanes),
|
|
17543
|
+
"planned_or_skipped_lane_count": len(skipped),
|
|
17544
|
+
"failed_lane_count": len(failed),
|
|
17545
|
+
"confidence": "multi_lane" if len(complete_lanes) > 1 else ("single_lane" if complete_lanes else "planning_only"),
|
|
17546
|
+
"citations": citations,
|
|
17547
|
+
"next_actions": _unique_strings(next_actions),
|
|
17548
|
+
}
|
|
17549
|
+
|
|
17550
|
+
|
|
17551
|
+
def _research_summary_markdown(payload: dict[str, Any]) -> str:
|
|
17552
|
+
lines: list[str] = []
|
|
17553
|
+
lines.append(f"# ORP Research Run `{payload.get('run_id', '')}`")
|
|
17554
|
+
lines.append("")
|
|
17555
|
+
lines.append(f"- status: `{payload.get('status', '')}`")
|
|
17556
|
+
lines.append(f"- question: {payload.get('question', '')}")
|
|
17557
|
+
lines.append(f"- execute: `{str(bool(payload.get('execute'))).lower()}`")
|
|
17558
|
+
lines.append(f"- profile: `{payload.get('profile', {}).get('profile_id', '')}`")
|
|
17559
|
+
lines.append("")
|
|
17560
|
+
lines.append("## Lanes")
|
|
17561
|
+
lines.append("")
|
|
17562
|
+
for lane in payload.get("lanes", []):
|
|
17563
|
+
if not isinstance(lane, dict):
|
|
17564
|
+
continue
|
|
17565
|
+
api_call = lane.get("api_call") if isinstance(lane.get("api_call"), dict) else {}
|
|
17566
|
+
lines.append(
|
|
17567
|
+
f"- `{lane.get('lane_id', '')}`: `{lane.get('status', '')}` "
|
|
17568
|
+
f"via `{lane.get('adapter', '')}` on `{lane.get('model', '')}` "
|
|
17569
|
+
f"at `{lane.get('call_moment', '')}` "
|
|
17570
|
+
f"(api_called: `{str(bool(api_call.get('called', False))).lower()}`)"
|
|
17571
|
+
)
|
|
17572
|
+
lines.append("")
|
|
17573
|
+
lines.append("## Synthesis")
|
|
17574
|
+
lines.append("")
|
|
17575
|
+
lines.append(str(payload.get("synthesis", {}).get("answer", "")).strip())
|
|
17576
|
+
next_actions = payload.get("synthesis", {}).get("next_actions", [])
|
|
17577
|
+
if isinstance(next_actions, list) and next_actions:
|
|
17578
|
+
lines.append("")
|
|
17579
|
+
lines.append("## Next Actions")
|
|
17580
|
+
lines.append("")
|
|
17581
|
+
for action in next_actions:
|
|
17582
|
+
lines.append(f"- {action}")
|
|
17583
|
+
lines.append("")
|
|
17584
|
+
lines.append("## Notes")
|
|
17585
|
+
lines.append("")
|
|
17586
|
+
lines.append("- Research runs are ORP process artifacts, not evidence by themselves.")
|
|
17587
|
+
lines.append("- Secret values are used only at execution time and are not written to artifacts.")
|
|
17588
|
+
return "\n".join(lines).rstrip() + "\n"
|
|
17589
|
+
|
|
15137
17590
|
|
|
17591
|
+
def _research_load_answer(repo_root: Path, run_id: str) -> tuple[dict[str, Any], dict[str, Path]]:
|
|
17592
|
+
run_ref = str(run_id or "").strip()
|
|
17593
|
+
state = _read_json_if_exists(repo_root / "orp" / "state.json")
|
|
17594
|
+
if not run_ref or run_ref == "latest":
|
|
17595
|
+
run_ref = str(state.get("last_research_run_id", "")).strip()
|
|
17596
|
+
if not run_ref:
|
|
17597
|
+
raise RuntimeError("No research run id provided and no last research run is recorded.")
|
|
17598
|
+
paths = _research_paths(repo_root, run_ref)
|
|
17599
|
+
payload = _read_json_if_exists(paths["answer_json"])
|
|
17600
|
+
if not payload:
|
|
17601
|
+
raise RuntimeError(f"research run not found: {run_ref}")
|
|
17602
|
+
return payload, paths
|
|
15138
17603
|
|
|
15139
|
-
def cmd_exchange_repo_synthesize(args: argparse.Namespace) -> int:
|
|
15140
|
-
repo_root = Path(args.repo_root).resolve()
|
|
15141
|
-
exchange_id = str(getattr(args, "exchange_id", "") or "").strip() or _exchange_id()
|
|
15142
|
-
source = _exchange_source_payload(repo_root, args)
|
|
15143
|
-
source_root = Path(str(source.get("local_path", "")).strip()).resolve()
|
|
15144
|
-
inventory = _exchange_inventory(source_root)
|
|
15145
|
-
relation = _exchange_relation(repo_root, source_root, inventory)
|
|
15146
|
-
suggested_focus = _exchange_suggested_focus(inventory, relation)
|
|
15147
|
-
paths = _exchange_paths(repo_root, exchange_id)
|
|
15148
17604
|
|
|
17605
|
+
def _research_update_state(repo_root: Path, payload: dict[str, Any]) -> None:
|
|
17606
|
+
state_path = repo_root / "orp" / "state.json"
|
|
17607
|
+
state = {**_default_state_payload(), **_read_json_if_exists(state_path)}
|
|
17608
|
+
run_id = str(payload.get("run_id", "")).strip()
|
|
17609
|
+
research_runs = state.get("research_runs") if isinstance(state.get("research_runs"), dict) else {}
|
|
17610
|
+
if run_id:
|
|
17611
|
+
research_runs[run_id] = {
|
|
17612
|
+
"run_id": run_id,
|
|
17613
|
+
"status": payload.get("status", ""),
|
|
17614
|
+
"question": payload.get("question", ""),
|
|
17615
|
+
"generated_at_utc": payload.get("generated_at_utc", ""),
|
|
17616
|
+
"answer_json": payload.get("artifacts", {}).get("answer_json", ""),
|
|
17617
|
+
"summary_md": payload.get("artifacts", {}).get("summary_md", ""),
|
|
17618
|
+
}
|
|
17619
|
+
state["last_research_run_id"] = run_id
|
|
17620
|
+
state["research_runs"] = research_runs
|
|
17621
|
+
_write_json(state_path, state)
|
|
17622
|
+
|
|
17623
|
+
|
|
17624
|
+
def cmd_research_ask(args: argparse.Namespace) -> int:
|
|
17625
|
+
repo_root = Path(args.repo_root).resolve()
|
|
17626
|
+
_ensure_dirs(repo_root)
|
|
17627
|
+
question = " ".join(str(part) for part in getattr(args, "question", [])).strip()
|
|
17628
|
+
if not question:
|
|
17629
|
+
raise RuntimeError("research question is required.")
|
|
17630
|
+
run_id = str(getattr(args, "run_id", "") or "").strip() or _research_id()
|
|
17631
|
+
execute = bool(getattr(args, "execute", False))
|
|
17632
|
+
timeout_sec = int(getattr(args, "timeout_sec", 120) or 120)
|
|
17633
|
+
profile = _research_load_profile(args, repo_root)
|
|
17634
|
+
breakdown = _research_breakdown(question)
|
|
17635
|
+
fixtures = _research_parse_lane_fixtures(getattr(args, "lane_fixture", []) or [], repo_root)
|
|
17636
|
+
paths = _research_paths(repo_root, run_id)
|
|
17637
|
+
started_at_utc = _now_utc()
|
|
17638
|
+
|
|
17639
|
+
request_payload = {
|
|
17640
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17641
|
+
"kind": "research_request",
|
|
17642
|
+
"run_id": run_id,
|
|
17643
|
+
"question": question,
|
|
17644
|
+
"profile_id": profile.get("profile_id", ""),
|
|
17645
|
+
"execute": execute,
|
|
17646
|
+
"created_at_utc": started_at_utc,
|
|
17647
|
+
"timeout_sec": timeout_sec,
|
|
17648
|
+
"call_moments": profile.get("call_moments", []) if isinstance(profile.get("call_moments"), list) else [],
|
|
17649
|
+
"lane_fixtures": {lane_id: _path_for_state(path, repo_root) for lane_id, path in fixtures.items()},
|
|
17650
|
+
}
|
|
17651
|
+
_write_json(paths["request_json"], request_payload)
|
|
17652
|
+
_write_json(paths["breakdown_json"], breakdown)
|
|
17653
|
+
_write_json(paths["profile_json"], profile)
|
|
17654
|
+
|
|
17655
|
+
lanes: list[dict[str, Any]] = []
|
|
17656
|
+
for lane in profile.get("lanes", []):
|
|
17657
|
+
if not isinstance(lane, dict):
|
|
17658
|
+
continue
|
|
17659
|
+
lane_result = _research_run_lane(
|
|
17660
|
+
lane,
|
|
17661
|
+
question=question,
|
|
17662
|
+
breakdown=breakdown,
|
|
17663
|
+
repo_root=repo_root,
|
|
17664
|
+
execute=execute,
|
|
17665
|
+
fixtures=fixtures,
|
|
17666
|
+
chimera_bin=str(getattr(args, "chimera_bin", "chimera") or "chimera"),
|
|
17667
|
+
timeout_sec=timeout_sec,
|
|
17668
|
+
)
|
|
17669
|
+
lanes.append(lane_result)
|
|
17670
|
+
_write_json(paths["lanes_root"] / f"{lane_result['lane_id']}.json", lane_result)
|
|
17671
|
+
|
|
17672
|
+
finished_at_utc = _now_utc()
|
|
17673
|
+
artifacts = {
|
|
17674
|
+
"request_json": _path_for_state(paths["request_json"], repo_root),
|
|
17675
|
+
"breakdown_json": _path_for_state(paths["breakdown_json"], repo_root),
|
|
17676
|
+
"profile_json": _path_for_state(paths["profile_json"], repo_root),
|
|
17677
|
+
"answer_json": _path_for_state(paths["answer_json"], repo_root),
|
|
17678
|
+
"summary_md": _path_for_state(paths["summary_md"], repo_root),
|
|
17679
|
+
"lanes_root": _path_for_state(paths["lanes_root"], repo_root),
|
|
17680
|
+
}
|
|
15149
17681
|
payload = {
|
|
15150
|
-
"schema_version":
|
|
15151
|
-
"kind": "
|
|
15152
|
-
"
|
|
15153
|
-
"
|
|
15154
|
-
"
|
|
15155
|
-
"
|
|
15156
|
-
"
|
|
15157
|
-
"
|
|
15158
|
-
"
|
|
15159
|
-
"
|
|
15160
|
-
|
|
15161
|
-
"
|
|
15162
|
-
"
|
|
17682
|
+
"schema_version": RESEARCH_RUN_SCHEMA_VERSION,
|
|
17683
|
+
"kind": "research_run",
|
|
17684
|
+
"run_id": run_id,
|
|
17685
|
+
"status": _research_status_from_lanes(lanes),
|
|
17686
|
+
"question": question,
|
|
17687
|
+
"execute": execute,
|
|
17688
|
+
"generated_at_utc": finished_at_utc,
|
|
17689
|
+
"started_at_utc": started_at_utc,
|
|
17690
|
+
"finished_at_utc": finished_at_utc,
|
|
17691
|
+
"duration_ms": _duration_ms(started_at_utc, finished_at_utc),
|
|
17692
|
+
"profile": {
|
|
17693
|
+
"profile_id": profile.get("profile_id", ""),
|
|
17694
|
+
"label": profile.get("label", ""),
|
|
17695
|
+
"lane_count": len(profile.get("lanes", [])) if isinstance(profile.get("lanes"), list) else 0,
|
|
15163
17696
|
},
|
|
17697
|
+
"call_moments": profile.get("call_moments", []) if isinstance(profile.get("call_moments"), list) else [],
|
|
17698
|
+
"breakdown": breakdown,
|
|
17699
|
+
"lanes": lanes,
|
|
17700
|
+
"synthesis": _research_synthesize(question, lanes, execute=execute),
|
|
17701
|
+
"artifacts": artifacts,
|
|
15164
17702
|
"notes": [
|
|
15165
|
-
"
|
|
15166
|
-
"
|
|
15167
|
-
"
|
|
17703
|
+
"Research runs are ORP process artifacts, not canonical evidence.",
|
|
17704
|
+
"Live provider calls require --execute; dry runs persist the decomposition and lane plan only.",
|
|
17705
|
+
"Secret values are not written to ORP artifacts.",
|
|
15168
17706
|
],
|
|
15169
17707
|
}
|
|
15170
|
-
_write_json(paths["
|
|
15171
|
-
_write_text(paths["summary_md"],
|
|
15172
|
-
|
|
17708
|
+
_write_json(paths["answer_json"], payload)
|
|
17709
|
+
_write_text(paths["summary_md"], _research_summary_markdown(payload))
|
|
17710
|
+
_research_update_state(repo_root, payload)
|
|
15173
17711
|
|
|
15174
17712
|
result = {
|
|
15175
17713
|
"ok": True,
|
|
15176
|
-
"
|
|
15177
|
-
"
|
|
15178
|
-
"
|
|
15179
|
-
"
|
|
15180
|
-
"
|
|
15181
|
-
"
|
|
15182
|
-
|
|
17714
|
+
"run_id": run_id,
|
|
17715
|
+
"status": payload["status"],
|
|
17716
|
+
"question": question,
|
|
17717
|
+
"execute": execute,
|
|
17718
|
+
"profile_id": profile.get("profile_id", ""),
|
|
17719
|
+
"lane_statuses": [
|
|
17720
|
+
{
|
|
17721
|
+
"lane_id": lane.get("lane_id", ""),
|
|
17722
|
+
"call_moment": lane.get("call_moment", ""),
|
|
17723
|
+
"status": lane.get("status", ""),
|
|
17724
|
+
"adapter": lane.get("adapter", ""),
|
|
17725
|
+
"model": lane.get("model", ""),
|
|
17726
|
+
"api_called": bool(lane.get("api_call", {}).get("called", False)) if isinstance(lane.get("api_call"), dict) else False,
|
|
17727
|
+
}
|
|
17728
|
+
for lane in lanes
|
|
17729
|
+
],
|
|
17730
|
+
"synthesis": payload["synthesis"],
|
|
17731
|
+
"artifacts": artifacts,
|
|
17732
|
+
"schema_path": "spec/v1/research-run.schema.json",
|
|
15183
17733
|
}
|
|
15184
17734
|
if args.json_output:
|
|
15185
17735
|
_print_json(result)
|
|
15186
17736
|
return 0
|
|
15187
17737
|
|
|
15188
|
-
print(f"
|
|
15189
|
-
print(f"
|
|
15190
|
-
print(f"
|
|
15191
|
-
print(f"
|
|
15192
|
-
|
|
15193
|
-
|
|
15194
|
-
|
|
15195
|
-
|
|
17738
|
+
print(f"run_id={run_id}")
|
|
17739
|
+
print(f"status={payload['status']}")
|
|
17740
|
+
print(f"answer_json={artifacts['answer_json']}")
|
|
17741
|
+
print(f"summary_md={artifacts['summary_md']}")
|
|
17742
|
+
for lane in lanes:
|
|
17743
|
+
print(f"lane.{lane.get('lane_id', '')}.status={lane.get('status', '')}")
|
|
17744
|
+
return 0
|
|
17745
|
+
|
|
17746
|
+
|
|
17747
|
+
def cmd_research_status(args: argparse.Namespace) -> int:
|
|
17748
|
+
repo_root = Path(args.repo_root).resolve()
|
|
17749
|
+
payload, _ = _research_load_answer(repo_root, str(getattr(args, "run_id", "") or "latest"))
|
|
17750
|
+
result = {
|
|
17751
|
+
"ok": True,
|
|
17752
|
+
"run_id": payload.get("run_id", ""),
|
|
17753
|
+
"status": payload.get("status", ""),
|
|
17754
|
+
"question": payload.get("question", ""),
|
|
17755
|
+
"generated_at_utc": payload.get("generated_at_utc", ""),
|
|
17756
|
+
"lane_statuses": [
|
|
17757
|
+
{
|
|
17758
|
+
"lane_id": lane.get("lane_id", ""),
|
|
17759
|
+
"call_moment": lane.get("call_moment", ""),
|
|
17760
|
+
"status": lane.get("status", ""),
|
|
17761
|
+
"adapter": lane.get("adapter", ""),
|
|
17762
|
+
"model": lane.get("model", ""),
|
|
17763
|
+
"api_called": bool(lane.get("api_call", {}).get("called", False)) if isinstance(lane.get("api_call"), dict) else False,
|
|
17764
|
+
}
|
|
17765
|
+
for lane in payload.get("lanes", [])
|
|
17766
|
+
if isinstance(lane, dict)
|
|
17767
|
+
],
|
|
17768
|
+
"artifacts": payload.get("artifacts", {}),
|
|
17769
|
+
}
|
|
17770
|
+
if args.json_output:
|
|
17771
|
+
_print_json(result)
|
|
17772
|
+
return 0
|
|
17773
|
+
print(f"run_id={result['run_id']}")
|
|
17774
|
+
print(f"status={result['status']}")
|
|
17775
|
+
print(f"question={result['question']}")
|
|
17776
|
+
for lane in result["lane_statuses"]:
|
|
17777
|
+
print(f"lane.{lane.get('lane_id', '')}.status={lane.get('status', '')}")
|
|
17778
|
+
return 0
|
|
17779
|
+
|
|
17780
|
+
|
|
17781
|
+
def cmd_research_show(args: argparse.Namespace) -> int:
|
|
17782
|
+
repo_root = Path(args.repo_root).resolve()
|
|
17783
|
+
payload, _ = _research_load_answer(repo_root, str(getattr(args, "run_id", "") or "latest"))
|
|
17784
|
+
if args.json_output:
|
|
17785
|
+
_print_json(payload)
|
|
17786
|
+
return 0
|
|
17787
|
+
print(str(payload.get("synthesis", {}).get("answer", "")).strip())
|
|
15196
17788
|
return 0
|
|
15197
17789
|
|
|
15198
17790
|
|
|
@@ -18667,14 +21259,22 @@ def _resolve_secret_scope_from_args(
|
|
|
18667
21259
|
|
|
18668
21260
|
def _resolve_secret_value_arg(args: argparse.Namespace, *, required: bool) -> tuple[bool, str]:
|
|
18669
21261
|
value_from_stdin = bool(getattr(args, "value_stdin", False))
|
|
21262
|
+
value_from_env = bool(getattr(args, "from_env", False))
|
|
18670
21263
|
raw_value = getattr(args, "value", None)
|
|
18671
|
-
if value_from_stdin
|
|
18672
|
-
raise RuntimeError("Use
|
|
21264
|
+
if sum([bool(value_from_stdin), bool(value_from_env), raw_value is not None]) > 1:
|
|
21265
|
+
raise RuntimeError("Use only one of --value, --value-stdin, or --from-env.")
|
|
18673
21266
|
|
|
18674
|
-
provided = raw_value is not None or value_from_stdin
|
|
21267
|
+
provided = raw_value is not None or value_from_stdin or value_from_env
|
|
18675
21268
|
value = str(raw_value).strip() if raw_value is not None else ""
|
|
18676
21269
|
if value_from_stdin:
|
|
18677
21270
|
value = _read_value_from_stdin()
|
|
21271
|
+
if value_from_env:
|
|
21272
|
+
env_var_name = str(getattr(args, "env_var_name", "") or "").strip()
|
|
21273
|
+
if not env_var_name:
|
|
21274
|
+
raise RuntimeError("--from-env requires --env-var-name.")
|
|
21275
|
+
value = os.environ.get(env_var_name, "").strip()
|
|
21276
|
+
if required and not value:
|
|
21277
|
+
raise RuntimeError(f"Environment variable {env_var_name} is empty or not set.")
|
|
18678
21278
|
|
|
18679
21279
|
if required and not value:
|
|
18680
21280
|
value = _prompt_value("Secret value", secret=True)
|
|
@@ -18932,11 +21532,11 @@ def _keychain_comment_for_secret(secret: dict[str, Any]) -> str:
|
|
|
18932
21532
|
|
|
18933
21533
|
def _normalize_secret_binding_summary(binding: dict[str, Any]) -> dict[str, Any]:
|
|
18934
21534
|
return {
|
|
18935
|
-
"binding_id": str(binding.get("id", "")).strip(),
|
|
18936
|
-
"world_id": str(binding.get("worldId", "")).strip(),
|
|
18937
|
-
"idea_id": str(binding.get("ideaId", "")).strip(),
|
|
21535
|
+
"binding_id": str(binding.get("binding_id", binding.get("id", ""))).strip(),
|
|
21536
|
+
"world_id": str(binding.get("world_id", binding.get("worldId", ""))).strip(),
|
|
21537
|
+
"idea_id": str(binding.get("idea_id", binding.get("ideaId", ""))).strip(),
|
|
18938
21538
|
"purpose": str(binding.get("purpose", "")).strip(),
|
|
18939
|
-
"primary": bool(binding.get("isPrimary", False)),
|
|
21539
|
+
"primary": bool(binding.get("primary", binding.get("isPrimary", False))),
|
|
18940
21540
|
}
|
|
18941
21541
|
|
|
18942
21542
|
|
|
@@ -19213,6 +21813,48 @@ def _sync_secret_to_keychain(
|
|
|
19213
21813
|
return _upsert_keychain_secret_registry_entry(entry)
|
|
19214
21814
|
|
|
19215
21815
|
|
|
21816
|
+
def _build_local_keychain_secret_from_args(args: argparse.Namespace, existing_entry: dict[str, Any] | None = None) -> dict[str, Any]:
|
|
21817
|
+
alias = str(getattr(args, "alias", "") or "").strip()
|
|
21818
|
+
provider = str(getattr(args, "provider", "") or "").strip()
|
|
21819
|
+
if not alias:
|
|
21820
|
+
raise RuntimeError("Secret alias is required.")
|
|
21821
|
+
if not provider:
|
|
21822
|
+
raise RuntimeError("Secret provider is required.")
|
|
21823
|
+
|
|
21824
|
+
if existing_entry:
|
|
21825
|
+
existing_provider = str(existing_entry.get("provider", "") or "").strip()
|
|
21826
|
+
if existing_provider and existing_provider != provider:
|
|
21827
|
+
raise RuntimeError(
|
|
21828
|
+
f"Local Keychain secret alias already exists with provider '{existing_provider}', not '{provider}'."
|
|
21829
|
+
)
|
|
21830
|
+
|
|
21831
|
+
label = str(getattr(args, "label", "") or "").strip() or str(existing_entry.get("label", "") if existing_entry else "").strip() or alias
|
|
21832
|
+
kind = str(getattr(args, "kind", "api_key") or "api_key").strip() or "api_key"
|
|
21833
|
+
username = getattr(args, "username", None)
|
|
21834
|
+
env_var_name = getattr(args, "env_var_name", None)
|
|
21835
|
+
now = _now_utc()
|
|
21836
|
+
return {
|
|
21837
|
+
"id": str(existing_entry.get("secret_id", "") if existing_entry else "").strip() or f"local-{uuid.uuid4().hex[:12]}",
|
|
21838
|
+
"alias": alias,
|
|
21839
|
+
"label": label,
|
|
21840
|
+
"provider": provider,
|
|
21841
|
+
"kind": kind,
|
|
21842
|
+
"username": str(username).strip() if username is not None else str(existing_entry.get("username", "") if existing_entry else "").strip(),
|
|
21843
|
+
"envVarName": str(env_var_name).strip() if env_var_name is not None else str(existing_entry.get("env_var_name", "") if existing_entry else "").strip(),
|
|
21844
|
+
"status": "active",
|
|
21845
|
+
"valueVersion": f"local:{now}",
|
|
21846
|
+
"valuePreview": "stored in local Keychain",
|
|
21847
|
+
"bindings": [
|
|
21848
|
+
_binding_payload_from_keychain_summary(row)
|
|
21849
|
+
for row in (existing_entry.get("bindings", []) if isinstance(existing_entry, dict) else [])
|
|
21850
|
+
if isinstance(row, dict)
|
|
21851
|
+
],
|
|
21852
|
+
"lastUsedAt": "",
|
|
21853
|
+
"rotatedAt": now,
|
|
21854
|
+
"updatedAt": now,
|
|
21855
|
+
}
|
|
21856
|
+
|
|
21857
|
+
|
|
19216
21858
|
def _try_get_secret_by_ref(args: argparse.Namespace, secret_ref: str) -> dict[str, Any] | None:
|
|
19217
21859
|
ref = str(secret_ref or "").strip()
|
|
19218
21860
|
if not ref:
|
|
@@ -21009,6 +23651,50 @@ def cmd_secrets_resolve(args: argparse.Namespace) -> int:
|
|
|
21009
23651
|
return 0
|
|
21010
23652
|
|
|
21011
23653
|
|
|
23654
|
+
def cmd_secrets_keychain_add(args: argparse.Namespace) -> int:
|
|
23655
|
+
_ensure_keychain_supported()
|
|
23656
|
+
_, value = _resolve_secret_value_arg(args, required=True)
|
|
23657
|
+
alias = str(getattr(args, "alias", "") or "").strip()
|
|
23658
|
+
existing_entry = _select_keychain_entry(
|
|
23659
|
+
secret_ref=alias,
|
|
23660
|
+
provider="",
|
|
23661
|
+
world_id="",
|
|
23662
|
+
idea_id="",
|
|
23663
|
+
)
|
|
23664
|
+
secret = _build_local_keychain_secret_from_args(args, existing_entry)
|
|
23665
|
+
binding = _build_secret_binding_payload_from_args(args)
|
|
23666
|
+
entry = _build_keychain_registry_entry(secret, binding=binding)
|
|
23667
|
+
entry.update(_store_keychain_secret_value(secret, value))
|
|
23668
|
+
entry = _upsert_keychain_secret_registry_entry(entry)
|
|
23669
|
+
result = {
|
|
23670
|
+
"ok": True,
|
|
23671
|
+
"created": existing_entry is None,
|
|
23672
|
+
"secret": _secret_payload_from_keychain_entry(entry),
|
|
23673
|
+
"entry": entry,
|
|
23674
|
+
"registry_path": str(_keychain_secret_registry_path()),
|
|
23675
|
+
"keychain_service": str(entry.get("keychain_service", "")).strip(),
|
|
23676
|
+
"keychain_account": str(entry.get("keychain_account", "")).strip(),
|
|
23677
|
+
"source": "keychain",
|
|
23678
|
+
}
|
|
23679
|
+
if args.json_output:
|
|
23680
|
+
_print_json(result)
|
|
23681
|
+
else:
|
|
23682
|
+
_print_secret_human(
|
|
23683
|
+
result["secret"],
|
|
23684
|
+
include_bindings=True,
|
|
23685
|
+
source="keychain",
|
|
23686
|
+
)
|
|
23687
|
+
_print_pairs(
|
|
23688
|
+
[
|
|
23689
|
+
("secret.created", str(result["created"]).lower()),
|
|
23690
|
+
("keychain.service", result["keychain_service"]),
|
|
23691
|
+
("keychain.account", result["keychain_account"]),
|
|
23692
|
+
("registry.path", result["registry_path"]),
|
|
23693
|
+
]
|
|
23694
|
+
)
|
|
23695
|
+
return 0
|
|
23696
|
+
|
|
23697
|
+
|
|
21012
23698
|
def cmd_secrets_keychain_list(args: argparse.Namespace) -> int:
|
|
21013
23699
|
provider = str(getattr(args, "provider", "") or "").strip()
|
|
21014
23700
|
world_id, idea_id = _resolve_secret_scope_from_args(
|
|
@@ -23525,6 +26211,8 @@ def build_parser() -> argparse.ArgumentParser:
|
|
|
23525
26211
|
" 3. Later run `orp secrets list` or `orp secrets resolve ...`\n\n"
|
|
23526
26212
|
"Agent flow:\n"
|
|
23527
26213
|
" - Pipe the value with `--value-stdin` instead of typing it interactively.\n\n"
|
|
26214
|
+
"Local flow:\n"
|
|
26215
|
+
" - Use `orp secrets keychain-add ...` to store a machine-local secret without the hosted API.\n\n"
|
|
23528
26216
|
"Local macOS Keychain caching and hosted sync are optional layers on top."
|
|
23529
26217
|
),
|
|
23530
26218
|
epilog=(
|
|
@@ -23532,6 +26220,7 @@ def build_parser() -> argparse.ArgumentParser:
|
|
|
23532
26220
|
" orp secrets add --alias openai-primary --label \"OpenAI Primary\" --provider openai\n"
|
|
23533
26221
|
" orp secrets add --alias huggingface-login --label \"Hugging Face Login\" --provider huggingface --kind password --username cody\n"
|
|
23534
26222
|
" printf '%s' 'sk-...' | orp secrets add --alias openai-primary --label \"OpenAI Primary\" --provider openai --value-stdin\n"
|
|
26223
|
+
" printf '%s' 'sk-...' | orp secrets keychain-add --alias openai-primary --label \"OpenAI Primary\" --provider openai --env-var-name OPENAI_API_KEY --value-stdin\n"
|
|
23535
26224
|
" orp secrets list\n"
|
|
23536
26225
|
" orp secrets resolve openai-primary --reveal"
|
|
23537
26226
|
),
|
|
@@ -23640,6 +26329,46 @@ def build_parser() -> argparse.ArgumentParser:
|
|
|
23640
26329
|
add_json_flag(s_secrets_ensure)
|
|
23641
26330
|
s_secrets_ensure.set_defaults(func=cmd_secrets_ensure, json_output=False)
|
|
23642
26331
|
|
|
26332
|
+
s_secrets_keychain_add = secrets_sub.add_parser(
|
|
26333
|
+
"keychain-add",
|
|
26334
|
+
help="Save or update one secret directly in the local macOS Keychain registry",
|
|
26335
|
+
)
|
|
26336
|
+
s_secrets_keychain_add.add_argument("--alias", required=True, help="Stable secret alias")
|
|
26337
|
+
s_secrets_keychain_add.add_argument("--label", default="", help="Human label for the secret")
|
|
26338
|
+
s_secrets_keychain_add.add_argument("--provider", required=True, help="Provider slug, for example openai")
|
|
26339
|
+
s_secrets_keychain_add.add_argument(
|
|
26340
|
+
"--kind",
|
|
26341
|
+
choices=["api_key", "access_token", "password", "other"],
|
|
26342
|
+
default="api_key",
|
|
26343
|
+
help="Secret kind (default: api_key)",
|
|
26344
|
+
)
|
|
26345
|
+
s_secrets_keychain_add.add_argument(
|
|
26346
|
+
"--username",
|
|
26347
|
+
default=None,
|
|
26348
|
+
help="Optional username or login identifier that belongs with this credential",
|
|
26349
|
+
)
|
|
26350
|
+
s_secrets_keychain_add.add_argument("--env-var-name", default=None, help="Optional env var name, for example OPENAI_API_KEY")
|
|
26351
|
+
s_secrets_keychain_add.add_argument("--value", default=None, help="Secret value")
|
|
26352
|
+
s_secrets_keychain_add.add_argument(
|
|
26353
|
+
"--value-stdin",
|
|
26354
|
+
action="store_true",
|
|
26355
|
+
help="Read the secret value from stdin",
|
|
26356
|
+
)
|
|
26357
|
+
s_secrets_keychain_add.add_argument(
|
|
26358
|
+
"--from-env",
|
|
26359
|
+
action="store_true",
|
|
26360
|
+
help="Read the secret value from --env-var-name in the current process environment",
|
|
26361
|
+
)
|
|
26362
|
+
add_secret_scope_flags(s_secrets_keychain_add)
|
|
26363
|
+
s_secrets_keychain_add.add_argument("--purpose", default="", help="Optional project usage note when binding")
|
|
26364
|
+
s_secrets_keychain_add.add_argument(
|
|
26365
|
+
"--primary",
|
|
26366
|
+
action="store_true",
|
|
26367
|
+
help="Mark the local project binding as primary",
|
|
26368
|
+
)
|
|
26369
|
+
add_json_flag(s_secrets_keychain_add)
|
|
26370
|
+
s_secrets_keychain_add.set_defaults(func=cmd_secrets_keychain_add, json_output=False)
|
|
26371
|
+
|
|
23643
26372
|
s_secrets_keychain_list = secrets_sub.add_parser(
|
|
23644
26373
|
"keychain-list",
|
|
23645
26374
|
help="List local macOS Keychain copies known to ORP on this machine",
|
|
@@ -24309,6 +27038,73 @@ def build_parser() -> argparse.ArgumentParser:
|
|
|
24309
27038
|
add_json_flag(s_exchange_repo_synthesize)
|
|
24310
27039
|
s_exchange_repo_synthesize.set_defaults(func=cmd_exchange_repo_synthesize, json_output=False)
|
|
24311
27040
|
|
|
27041
|
+
s_research = sub.add_parser(
|
|
27042
|
+
"research",
|
|
27043
|
+
help="Durable OpenAI research-loop question decomposition and synthesis runs",
|
|
27044
|
+
)
|
|
27045
|
+
research_sub = s_research.add_subparsers(dest="research_cmd", required=True)
|
|
27046
|
+
|
|
27047
|
+
s_research_ask = research_sub.add_parser(
|
|
27048
|
+
"ask",
|
|
27049
|
+
help="Create a research council run; use --execute for live provider calls",
|
|
27050
|
+
)
|
|
27051
|
+
s_research_ask.add_argument("question", nargs="+", help="Question to decompose and answer")
|
|
27052
|
+
s_research_ask.add_argument(
|
|
27053
|
+
"--profile",
|
|
27054
|
+
default="openai-council",
|
|
27055
|
+
help="Research profile id (default: openai-council)",
|
|
27056
|
+
)
|
|
27057
|
+
s_research_ask.add_argument(
|
|
27058
|
+
"--profile-file",
|
|
27059
|
+
default="",
|
|
27060
|
+
help="Optional JSON profile file overriding the built-in OpenAI model lanes",
|
|
27061
|
+
)
|
|
27062
|
+
s_research_ask.add_argument(
|
|
27063
|
+
"--run-id",
|
|
27064
|
+
default="",
|
|
27065
|
+
help="Optional research run id override",
|
|
27066
|
+
)
|
|
27067
|
+
s_research_ask.add_argument(
|
|
27068
|
+
"--execute",
|
|
27069
|
+
action="store_true",
|
|
27070
|
+
help="Allow live provider adapters to run; without this ORP writes the plan only",
|
|
27071
|
+
)
|
|
27072
|
+
s_research_ask.add_argument(
|
|
27073
|
+
"--lane-fixture",
|
|
27074
|
+
action="append",
|
|
27075
|
+
default=[],
|
|
27076
|
+
help="Load one lane result from lane_id=path instead of calling a provider (repeatable)",
|
|
27077
|
+
)
|
|
27078
|
+
s_research_ask.add_argument(
|
|
27079
|
+
"--chimera-bin",
|
|
27080
|
+
default="chimera",
|
|
27081
|
+
help="Chimera CLI binary or path for custom chimera_cli lanes (default: chimera)",
|
|
27082
|
+
)
|
|
27083
|
+
s_research_ask.add_argument(
|
|
27084
|
+
"--timeout-sec",
|
|
27085
|
+
type=int,
|
|
27086
|
+
default=120,
|
|
27087
|
+
help="Per-lane live adapter timeout in seconds (default: 120)",
|
|
27088
|
+
)
|
|
27089
|
+
add_json_flag(s_research_ask)
|
|
27090
|
+
s_research_ask.set_defaults(func=cmd_research_ask, json_output=False)
|
|
27091
|
+
|
|
27092
|
+
s_research_status = research_sub.add_parser(
|
|
27093
|
+
"status",
|
|
27094
|
+
help="Show status and lane summary for a research run",
|
|
27095
|
+
)
|
|
27096
|
+
s_research_status.add_argument("run_id", nargs="?", default="latest", help="Run id or latest (default: latest)")
|
|
27097
|
+
add_json_flag(s_research_status)
|
|
27098
|
+
s_research_status.set_defaults(func=cmd_research_status, json_output=False)
|
|
27099
|
+
|
|
27100
|
+
s_research_show = research_sub.add_parser(
|
|
27101
|
+
"show",
|
|
27102
|
+
help="Show a research run answer payload or human synthesis",
|
|
27103
|
+
)
|
|
27104
|
+
s_research_show.add_argument("run_id", nargs="?", default="latest", help="Run id or latest (default: latest)")
|
|
27105
|
+
add_json_flag(s_research_show)
|
|
27106
|
+
s_research_show.set_defaults(func=cmd_research_show, json_output=False)
|
|
27107
|
+
|
|
24312
27108
|
s_collab = sub.add_parser(
|
|
24313
27109
|
"collaborate",
|
|
24314
27110
|
help="Built-in repository collaboration setup and workflow operations",
|
|
@@ -24425,6 +27221,25 @@ def build_parser() -> argparse.ArgumentParser:
|
|
|
24425
27221
|
)
|
|
24426
27222
|
s_collab_run.set_defaults(func=cmd_collaborate_run, json_output=False)
|
|
24427
27223
|
|
|
27224
|
+
s_project = sub.add_parser(
|
|
27225
|
+
"project",
|
|
27226
|
+
help="Local project context lens and evolution policy operations",
|
|
27227
|
+
)
|
|
27228
|
+
project_sub = s_project.add_subparsers(dest="project_cmd", required=True)
|
|
27229
|
+
s_project_refresh = project_sub.add_parser(
|
|
27230
|
+
"refresh",
|
|
27231
|
+
help="Rescan this directory and refresh orp/project.json",
|
|
27232
|
+
)
|
|
27233
|
+
add_json_flag(s_project_refresh)
|
|
27234
|
+
s_project_refresh.set_defaults(func=cmd_project_refresh, json_output=False)
|
|
27235
|
+
|
|
27236
|
+
s_project_show = project_sub.add_parser(
|
|
27237
|
+
"show",
|
|
27238
|
+
help="Show the current ORP project context lens",
|
|
27239
|
+
)
|
|
27240
|
+
add_json_flag(s_project_show)
|
|
27241
|
+
s_project_show.set_defaults(func=cmd_project_show, json_output=False)
|
|
27242
|
+
|
|
24428
27243
|
s_init = sub.add_parser("init", help="Make this repo ORP-governed with local-first git safety")
|
|
24429
27244
|
s_init.add_argument(
|
|
24430
27245
|
"--default-branch",
|
|
@@ -24592,6 +27407,114 @@ def build_parser() -> argparse.ArgumentParser:
|
|
|
24592
27407
|
add_json_flag(s_frontier_checklist)
|
|
24593
27408
|
s_frontier_checklist.set_defaults(func=cmd_frontier_checklist, json_output=False)
|
|
24594
27409
|
|
|
27410
|
+
s_frontier_continuation_status = frontier_sub.add_parser(
|
|
27411
|
+
"continuation-status",
|
|
27412
|
+
help="Show whether the frontier has a safe next continuation, blocker, or terminal declaration",
|
|
27413
|
+
)
|
|
27414
|
+
s_frontier_continuation_status.add_argument(
|
|
27415
|
+
"--strict",
|
|
27416
|
+
action="store_true",
|
|
27417
|
+
help="Return non-zero when continuation warnings are present",
|
|
27418
|
+
)
|
|
27419
|
+
add_json_flag(s_frontier_continuation_status)
|
|
27420
|
+
s_frontier_continuation_status.set_defaults(func=cmd_frontier_continuation_status, json_output=False)
|
|
27421
|
+
|
|
27422
|
+
s_frontier_preflight_delegate = frontier_sub.add_parser(
|
|
27423
|
+
"preflight-delegate",
|
|
27424
|
+
help="Fail delegation when frontier continuation is stale, ambiguous, or not activated",
|
|
27425
|
+
)
|
|
27426
|
+
add_json_flag(s_frontier_preflight_delegate)
|
|
27427
|
+
s_frontier_preflight_delegate.set_defaults(func=cmd_frontier_preflight_delegate, json_output=False)
|
|
27428
|
+
|
|
27429
|
+
s_frontier_additional = frontier_sub.add_parser(
|
|
27430
|
+
"additional",
|
|
27431
|
+
help="Manage queued additional Frontier work that should run after the active delegate objective",
|
|
27432
|
+
)
|
|
27433
|
+
frontier_additional_sub = s_frontier_additional.add_subparsers(dest="frontier_additional_cmd", required=True)
|
|
27434
|
+
|
|
27435
|
+
s_frontier_additional_list = frontier_additional_sub.add_parser(
|
|
27436
|
+
"list",
|
|
27437
|
+
help="Show additional Frontier work lists and their item states",
|
|
27438
|
+
)
|
|
27439
|
+
add_json_flag(s_frontier_additional_list)
|
|
27440
|
+
s_frontier_additional_list.set_defaults(func=cmd_frontier_additional_list, json_output=False)
|
|
27441
|
+
|
|
27442
|
+
s_frontier_additional_add_list = frontier_additional_sub.add_parser(
|
|
27443
|
+
"add-list",
|
|
27444
|
+
help="Add an additional work list that delegates may drain after the active objective",
|
|
27445
|
+
)
|
|
27446
|
+
s_frontier_additional_add_list.add_argument("--id", required=True, help="Additional list id")
|
|
27447
|
+
s_frontier_additional_add_list.add_argument("--label", required=True, help="Additional list label")
|
|
27448
|
+
s_frontier_additional_add_list.add_argument(
|
|
27449
|
+
"--status",
|
|
27450
|
+
default="pending",
|
|
27451
|
+
choices=["pending", "active", "complete"],
|
|
27452
|
+
help="List status (default: pending)",
|
|
27453
|
+
)
|
|
27454
|
+
add_json_flag(s_frontier_additional_add_list)
|
|
27455
|
+
s_frontier_additional_add_list.set_defaults(func=cmd_frontier_additional_add_list, json_output=False)
|
|
27456
|
+
|
|
27457
|
+
s_frontier_additional_add_item = frontier_additional_sub.add_parser(
|
|
27458
|
+
"add-item",
|
|
27459
|
+
help="Add one item to an additional Frontier work list",
|
|
27460
|
+
)
|
|
27461
|
+
s_frontier_additional_add_item.add_argument("--list", required=True, help="Parent additional list id")
|
|
27462
|
+
s_frontier_additional_add_item.add_argument("--id", required=True, help="Additional item id")
|
|
27463
|
+
s_frontier_additional_add_item.add_argument("--label", required=True, help="Additional item label")
|
|
27464
|
+
s_frontier_additional_add_item.add_argument(
|
|
27465
|
+
"--status",
|
|
27466
|
+
default="pending",
|
|
27467
|
+
choices=["pending", "active", "complete", "skipped"],
|
|
27468
|
+
help="Item status (default: pending)",
|
|
27469
|
+
)
|
|
27470
|
+
s_frontier_additional_add_item.add_argument("--goal", default="", help="Optional item goal")
|
|
27471
|
+
s_frontier_additional_add_item.add_argument(
|
|
27472
|
+
"--depends-on",
|
|
27473
|
+
action="append",
|
|
27474
|
+
default=[],
|
|
27475
|
+
help="Dependency item, phase, or milestone id (repeatable)",
|
|
27476
|
+
)
|
|
27477
|
+
s_frontier_additional_add_item.add_argument(
|
|
27478
|
+
"--requirement",
|
|
27479
|
+
action="append",
|
|
27480
|
+
default=[],
|
|
27481
|
+
help="Requirement identifier or note (repeatable)",
|
|
27482
|
+
)
|
|
27483
|
+
s_frontier_additional_add_item.add_argument(
|
|
27484
|
+
"--success-criterion",
|
|
27485
|
+
action="append",
|
|
27486
|
+
default=[],
|
|
27487
|
+
help="Item success criterion (repeatable)",
|
|
27488
|
+
)
|
|
27489
|
+
s_frontier_additional_add_item.add_argument(
|
|
27490
|
+
"--plan",
|
|
27491
|
+
action="append",
|
|
27492
|
+
default=[],
|
|
27493
|
+
help="Plan id or plan note (repeatable)",
|
|
27494
|
+
)
|
|
27495
|
+
add_json_flag(s_frontier_additional_add_item)
|
|
27496
|
+
s_frontier_additional_add_item.set_defaults(func=cmd_frontier_additional_add_item, json_output=False)
|
|
27497
|
+
|
|
27498
|
+
s_frontier_additional_activate_next = frontier_additional_sub.add_parser(
|
|
27499
|
+
"activate-next",
|
|
27500
|
+
help="Activate and print the next pending additional Frontier item",
|
|
27501
|
+
)
|
|
27502
|
+
add_json_flag(s_frontier_additional_activate_next)
|
|
27503
|
+
s_frontier_additional_activate_next.set_defaults(func=cmd_frontier_additional_activate_next, json_output=False)
|
|
27504
|
+
|
|
27505
|
+
s_frontier_additional_complete_active = frontier_additional_sub.add_parser(
|
|
27506
|
+
"complete-active",
|
|
27507
|
+
help="Mark the active additional Frontier item complete",
|
|
27508
|
+
)
|
|
27509
|
+
s_frontier_additional_complete_active.add_argument(
|
|
27510
|
+
"--status",
|
|
27511
|
+
default="complete",
|
|
27512
|
+
choices=["complete", "skipped"],
|
|
27513
|
+
help="Completion status for the active item (default: complete)",
|
|
27514
|
+
)
|
|
27515
|
+
add_json_flag(s_frontier_additional_complete_active)
|
|
27516
|
+
s_frontier_additional_complete_active.set_defaults(func=cmd_frontier_additional_complete_active, json_output=False)
|
|
27517
|
+
|
|
24595
27518
|
s_frontier_stack = frontier_sub.add_parser(
|
|
24596
27519
|
"stack",
|
|
24597
27520
|
help="Show the larger major-version stack",
|
|
@@ -24745,6 +27668,11 @@ def build_parser() -> argparse.ArgumentParser:
|
|
|
24745
27668
|
action="store_true",
|
|
24746
27669
|
help="Re-render materialized frontier views when the frontier is otherwise consistent",
|
|
24747
27670
|
)
|
|
27671
|
+
s_frontier_doctor.add_argument(
|
|
27672
|
+
"--strict",
|
|
27673
|
+
action="store_true",
|
|
27674
|
+
help="Return non-zero when frontier warnings are present",
|
|
27675
|
+
)
|
|
24748
27676
|
add_json_flag(s_frontier_doctor)
|
|
24749
27677
|
s_frontier_doctor.set_defaults(func=cmd_frontier_doctor, json_output=False)
|
|
24750
27678
|
|