open-research-protocol 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/AGENT_INTEGRATION.md +94 -0
  2. package/INSTALL.md +159 -0
  3. package/LICENSE +22 -0
  4. package/PROTOCOL.md +140 -0
  5. package/README.md +312 -0
  6. package/bin/orp.js +38 -0
  7. package/cli/orp.py +3595 -0
  8. package/cone/CONTEXT_LOG.md +33 -0
  9. package/docs/AGENT_LOOP.md +63 -0
  10. package/docs/CHOOSING_OR_IGNORING_INSTRUMENTS.md +128 -0
  11. package/docs/CODA_ORP_CONTRACT.md +222 -0
  12. package/docs/CORE_ABILITY_REFOCUS_CHECKLIST.md +62 -0
  13. package/docs/DISCOVER.md +69 -0
  14. package/docs/EXTERNAL_CONTRIBUTION_GOVERNANCE.md +275 -0
  15. package/docs/MATHLIB_COLLABORATION_FLOW_PROMPT.md +112 -0
  16. package/docs/NPM_RELEASE_CHECKLIST.md +55 -0
  17. package/docs/ORP_V1_ATOMIC_DISCOVERY_EVOLUTION.md +186 -0
  18. package/docs/OSS_CONTRIBUTION_AGENT_LOOP.md +69 -0
  19. package/docs/PRESENTATION_BOW.md +100 -0
  20. package/docs/PROFILE_PACKS.md +227 -0
  21. package/docs/SUNFLOWER_CODA_PR_GOVERNANCE_MAPPING.md +77 -0
  22. package/docs/WHY_INSTRUMENTS.md +118 -0
  23. package/examples/README.md +21 -0
  24. package/examples/example_claim.md +33 -0
  25. package/examples/example_failed.md +24 -0
  26. package/examples/example_verification.md +36 -0
  27. package/examples/orp.erdos-problems.catalog.yml +88 -0
  28. package/examples/orp.external-pr-governance.yml +223 -0
  29. package/examples/orp.sunflower-coda.atomic.yml +144 -0
  30. package/examples/orp.sunflower-coda.live-compare.yml +181 -0
  31. package/examples/orp.sunflower-coda.pr-governance.yml +253 -0
  32. package/examples/packet.problem_scope.example.json +123 -0
  33. package/examples/reports/README.md +16 -0
  34. package/examples/reports/sunflower_live_compare_20.RUN_SUMMARY.md +37 -0
  35. package/examples/reports/sunflower_live_compare_367.RUN_SUMMARY.md +37 -0
  36. package/examples/reports/sunflower_live_compare_857.RUN_SUMMARY.md +37 -0
  37. package/llms.txt +58 -0
  38. package/modules/instruments/ADVERSARIAL/README.md +109 -0
  39. package/modules/instruments/ADVERSARIAL/TEMPLATE.md +27 -0
  40. package/modules/instruments/COMPRESSION/README.md +112 -0
  41. package/modules/instruments/COMPRESSION/TEMPLATE.md +27 -0
  42. package/modules/instruments/INSTRUMENT_TEMPLATE.md +30 -0
  43. package/modules/instruments/ORBIT/README.md +124 -0
  44. package/modules/instruments/ORBIT/TEMPLATE.md +28 -0
  45. package/modules/instruments/README.md +179 -0
  46. package/package.json +54 -0
  47. package/packs/README.md +16 -0
  48. package/packs/erdos-open-problems/README.md +287 -0
  49. package/packs/erdos-open-problems/data/README.md +43 -0
  50. package/packs/erdos-open-problems/data/erdos_open_problems.md +697 -0
  51. package/packs/erdos-open-problems/data/erdos_problems.active.json +15561 -0
  52. package/packs/erdos-open-problems/data/erdos_problems.all.json +26289 -0
  53. package/packs/erdos-open-problems/data/erdos_problems.closed.json +10760 -0
  54. package/packs/erdos-open-problems/data/erdos_problems.open.json +15561 -0
  55. package/packs/erdos-open-problems/docs/SUNFLOWER_ADAPTER_DEPENDENCIES.md +63 -0
  56. package/packs/erdos-open-problems/pack.yml +131 -0
  57. package/packs/erdos-open-problems/profiles/erdos-problems-catalog-sync.yml.tmpl +99 -0
  58. package/packs/erdos-open-problems/profiles/sunflower-live-compare.yml.tmpl +188 -0
  59. package/packs/erdos-open-problems/profiles/sunflower-mathlib-pr-governance.yml.tmpl +253 -0
  60. package/packs/erdos-open-problems/profiles/sunflower-problem857-discovery-public-repo.yml.tmpl +152 -0
  61. package/packs/erdos-open-problems/profiles/sunflower-problem857-discovery.yml.tmpl +154 -0
  62. package/packs/external-pr-governance/README.md +116 -0
  63. package/packs/external-pr-governance/adapters/formal-conjectures/README.md +35 -0
  64. package/packs/external-pr-governance/adapters/mathlib/README.md +37 -0
  65. package/packs/external-pr-governance/pack.yml +146 -0
  66. package/packs/external-pr-governance/profiles/oss-feedback-hardening.yml.tmpl +92 -0
  67. package/packs/external-pr-governance/profiles/oss-pr-governance.yml.tmpl +233 -0
  68. package/packs/issue-smashers/README.md +92 -0
  69. package/packs/issue-smashers/adapters/formal-conjectures/README.md +17 -0
  70. package/packs/issue-smashers/adapters/generic-github/README.md +16 -0
  71. package/packs/issue-smashers/adapters/mathlib/README.md +32 -0
  72. package/packs/issue-smashers/bootstrap/README.md +19 -0
  73. package/packs/issue-smashers/bootstrap/setup-issue-smashers.sh +18 -0
  74. package/packs/issue-smashers/examples/issue-smashers.workspace.yml +24 -0
  75. package/packs/issue-smashers/pack.yml +178 -0
  76. package/packs/issue-smashers/profiles/issue-smashers-feedback-hardening.yml.tmpl +102 -0
  77. package/packs/issue-smashers/profiles/issue-smashers.yml.tmpl +258 -0
  78. package/scripts/npm-postinstall-check.js +31 -0
  79. package/scripts/orp +11 -0
  80. package/scripts/orp-agent-integrate.sh +197 -0
  81. package/scripts/orp-checkpoint.sh +184 -0
  82. package/scripts/orp-erdos-problems-sync.py +580 -0
  83. package/scripts/orp-init.sh +50 -0
  84. package/scripts/orp-pack-fetch.py +155 -0
  85. package/scripts/orp-pack-install.py +2273 -0
  86. package/scripts/orp-pack-render.py +188 -0
  87. package/spec/v1/LIFECYCLE_MAPPING.md +40 -0
  88. package/spec/v1/orp.config.schema.json +385 -0
  89. package/spec/v1/packet.schema.json +552 -0
  90. package/spec/v1/profile-pack.schema.json +95 -0
  91. package/templates/CLAIM.md +33 -0
  92. package/templates/FAILED_TOPIC.md +19 -0
  93. package/templates/ISSUE_TEMPLATE.md +22 -0
  94. package/templates/VERIFICATION_RECORD.md +34 -0
package/cli/orp.py ADDED
@@ -0,0 +1,3595 @@
1
+ #!/usr/bin/env python3
2
+ """ORP CLI.
3
+
4
+ Public shape:
5
+ - home
6
+ - about
7
+ - discover
8
+ - collaborate
9
+ - init
10
+ - gate run
11
+ - packet emit
12
+ - erdos sync
13
+ - report summary
14
+
15
+ Advanced/internal:
16
+ - pack list
17
+ - pack install
18
+ - pack fetch
19
+
20
+ Design goals:
21
+ - local-first
22
+ - low dependency surface
23
+ - deterministic artifact layout
24
+ - built-in abilities over mode switches
25
+ """
26
+
27
+ from __future__ import annotations
28
+
29
+ import argparse
30
+ import datetime as dt
31
+ import hashlib
32
+ import json
33
+ import os
34
+ from pathlib import Path
35
+ import re
36
+ import subprocess
37
+ import sys
38
+ from typing import Any
39
+ from urllib import parse as urlparse
40
+ from urllib import request as urlrequest
41
+
42
+
43
+ def _now_utc() -> str:
44
+ return dt.datetime.now(dt.timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z")
45
+
46
+
47
+ def _run_id() -> str:
48
+ return "run-" + dt.datetime.now(dt.timezone.utc).strftime("%Y%m%d-%H%M%S-%f")
49
+
50
+
51
+ def _read_text(path: Path) -> str:
52
+ return path.read_text(encoding="utf-8")
53
+
54
+
55
+ def _write_json(path: Path, data: Any) -> None:
56
+ path.parent.mkdir(parents=True, exist_ok=True)
57
+ path.write_text(json.dumps(data, indent=2, sort_keys=False) + "\n", encoding="utf-8")
58
+
59
+
60
+ def _print_json(data: Any) -> None:
61
+ print(json.dumps(data, indent=2, sort_keys=False))
62
+
63
+
64
+ def _tool_version() -> str:
65
+ env_version = os.environ.get("ORP_VERSION", "").strip()
66
+ if env_version:
67
+ return env_version
68
+
69
+ package_json = Path(__file__).resolve().parent.parent / "package.json"
70
+ if not package_json.exists():
71
+ return "unknown"
72
+
73
+ try:
74
+ payload = json.loads(package_json.read_text(encoding="utf-8"))
75
+ except Exception:
76
+ return "unknown"
77
+
78
+ version = payload.get("version")
79
+ if isinstance(version, str) and version.strip():
80
+ return version.strip()
81
+ return "unknown"
82
+
83
+
84
+ def _tool_package_name() -> str:
85
+ package_json = Path(__file__).resolve().parent.parent / "package.json"
86
+ if not package_json.exists():
87
+ return "unknown"
88
+
89
+ try:
90
+ payload = json.loads(package_json.read_text(encoding="utf-8"))
91
+ except Exception:
92
+ return "unknown"
93
+
94
+ name = payload.get("name")
95
+ if isinstance(name, str) and name.strip():
96
+ return name.strip()
97
+ return "unknown"
98
+
99
+
100
+ ORP_TOOL_VERSION = _tool_version()
101
+ ORP_PACKAGE_NAME = _tool_package_name()
102
+ DEFAULT_DISCOVER_PROFILE = "orp.profile.default.json"
103
+ DEFAULT_DISCOVER_SCAN_ROOT = "orp/discovery/github"
104
+
105
+ COLLABORATION_WORKFLOWS: list[dict[str, Any]] = [
106
+ {
107
+ "id": "watch_select",
108
+ "profile": "issue_smashers_watch_select",
109
+ "config": "orp.issue-smashers.yml",
110
+ "description": "Select a candidate issue lane and record why it is worth pursuing.",
111
+ "gate_ids": ["watch_select"],
112
+ },
113
+ {
114
+ "id": "pre_open",
115
+ "profile": "issue_smashers_pre_open",
116
+ "config": "orp.issue-smashers.yml",
117
+ "description": "Run viability and overlap checks before implementation or public PR work.",
118
+ "gate_ids": ["viability_gate", "overlap_gate"],
119
+ },
120
+ {
121
+ "id": "local_readiness",
122
+ "profile": "issue_smashers_local_readiness",
123
+ "config": "orp.issue-smashers.yml",
124
+ "description": "Run local verification, freeze same-head readiness, and preflight PR text.",
125
+ "gate_ids": ["local_gate", "ready_to_draft", "pr_body_preflight"],
126
+ },
127
+ {
128
+ "id": "draft_transition",
129
+ "profile": "issue_smashers_draft_transition",
130
+ "config": "orp.issue-smashers.yml",
131
+ "description": "Open or update the draft PR after readiness passes.",
132
+ "gate_ids": ["draft_pr_transition"],
133
+ },
134
+ {
135
+ "id": "draft_lifecycle",
136
+ "profile": "issue_smashers_draft_lifecycle",
137
+ "config": "orp.issue-smashers.yml",
138
+ "description": "Check draft CI and ready-for-review status.",
139
+ "gate_ids": ["draft_ci", "ready_for_review"],
140
+ },
141
+ {
142
+ "id": "full_flow",
143
+ "profile": "issue_smashers_full_flow",
144
+ "config": "orp.issue-smashers.yml",
145
+ "description": "End-to-end collaboration lifecycle from watch/select through ready-for-review.",
146
+ "gate_ids": [
147
+ "watch_select",
148
+ "viability_gate",
149
+ "overlap_gate",
150
+ "local_gate",
151
+ "ready_to_draft",
152
+ "pr_body_preflight",
153
+ "draft_pr_transition",
154
+ "draft_ci",
155
+ "ready_for_review",
156
+ ],
157
+ },
158
+ {
159
+ "id": "feedback_hardening",
160
+ "profile": "issue_smashers_feedback_hardening",
161
+ "config": "orp.issue-smashers-feedback-hardening.yml",
162
+ "description": "Turn maintainer feedback into validated guards and synced docs.",
163
+ "gate_ids": ["feedback_record", "guard_validation", "docs_sync"],
164
+ },
165
+ ]
166
+
167
+
168
+ def _scan_id() -> str:
169
+ return "scan-" + dt.datetime.now(dt.timezone.utc).strftime("%Y%m%d-%H%M%S-%f")
170
+
171
+
172
+ def _write_text(path: Path, text: str) -> None:
173
+ path.parent.mkdir(parents=True, exist_ok=True)
174
+ path.write_text(text, encoding="utf-8")
175
+
176
+
177
+ def _coerce_string_list(raw: Any) -> list[str]:
178
+ if not isinstance(raw, list):
179
+ return []
180
+ values: list[str] = []
181
+ for item in raw:
182
+ if isinstance(item, str):
183
+ text = item.strip()
184
+ if text:
185
+ values.append(text)
186
+ return _unique_strings(values)
187
+
188
+
189
+ def _resolve_cli_path(raw: str, repo_root: Path) -> Path:
190
+ path = Path(raw)
191
+ if not path.is_absolute():
192
+ path = (repo_root / path).resolve()
193
+ return path
194
+
195
+
196
+ def _discover_profile_template(
197
+ *,
198
+ profile_id: str,
199
+ owner: str,
200
+ owner_type: str,
201
+ keywords: list[str],
202
+ topics: list[str],
203
+ languages: list[str],
204
+ areas: list[str],
205
+ people: list[str],
206
+ ) -> dict[str, Any]:
207
+ owner_value = owner.strip() or "YOUR_GITHUB_OWNER"
208
+ return {
209
+ "schema_version": "1.0.0",
210
+ "profile_id": profile_id,
211
+ "notes": [
212
+ "ORP owns the portable discovery profile format and scan artifacts.",
213
+ "If Coda exists later, it can manage active profile selection on top of ORP.",
214
+ "Discovery outputs are process-only recommendations, not evidence.",
215
+ ],
216
+ "discover": {
217
+ "github": {
218
+ "owner": {
219
+ "login": owner_value,
220
+ "type": owner_type,
221
+ },
222
+ "signals": {
223
+ "keywords": keywords,
224
+ "repo_topics": topics,
225
+ "languages": languages,
226
+ "areas": areas,
227
+ "people": people,
228
+ },
229
+ "filters": {
230
+ "include_repos": [],
231
+ "exclude_repos": [],
232
+ "issue_states": ["open"],
233
+ "labels_any": [],
234
+ "exclude_labels": [],
235
+ "updated_within_days": 180,
236
+ },
237
+ "ranking": {
238
+ "repo_sample_size": 30,
239
+ "max_repos": 12,
240
+ "max_issues": 24,
241
+ "max_people": 12,
242
+ "issues_per_repo": 30,
243
+ },
244
+ }
245
+ },
246
+ }
247
+
248
+
249
+ def _normalize_discover_profile(raw: dict[str, Any]) -> dict[str, Any]:
250
+ discover = raw.get("discover")
251
+ github = discover.get("github") if isinstance(discover, dict) else {}
252
+ github = github if isinstance(github, dict) else {}
253
+ owner = github.get("owner")
254
+ owner = owner if isinstance(owner, dict) else {}
255
+ signals = github.get("signals")
256
+ signals = signals if isinstance(signals, dict) else {}
257
+ filters = github.get("filters")
258
+ filters = filters if isinstance(filters, dict) else {}
259
+ ranking = github.get("ranking")
260
+ ranking = ranking if isinstance(ranking, dict) else {}
261
+ owner_type = str(owner.get("type", "auto")).strip().lower() or "auto"
262
+ if owner_type not in {"auto", "user", "org"}:
263
+ owner_type = "auto"
264
+ def _positive_int(value: Any, default: int) -> int:
265
+ try:
266
+ out = int(value)
267
+ except Exception:
268
+ return default
269
+ return out if out > 0 else default
270
+
271
+ issue_states = [state for state in _coerce_string_list(filters.get("issue_states")) if state in {"open", "closed", "all"}]
272
+ if not issue_states:
273
+ issue_states = ["open"]
274
+
275
+ return {
276
+ "schema_version": str(raw.get("schema_version", "1.0.0")).strip() or "1.0.0",
277
+ "profile_id": str(raw.get("profile_id", "default")).strip() or "default",
278
+ "notes": _coerce_string_list(raw.get("notes")),
279
+ "github": {
280
+ "owner": {
281
+ "login": str(owner.get("login", "")).strip(),
282
+ "type": owner_type,
283
+ },
284
+ "signals": {
285
+ "keywords": _coerce_string_list(signals.get("keywords")),
286
+ "repo_topics": _coerce_string_list(signals.get("repo_topics")),
287
+ "languages": _coerce_string_list(signals.get("languages")),
288
+ "areas": _coerce_string_list(signals.get("areas")),
289
+ "people": _coerce_string_list(signals.get("people")),
290
+ },
291
+ "filters": {
292
+ "include_repos": _coerce_string_list(filters.get("include_repos")),
293
+ "exclude_repos": _coerce_string_list(filters.get("exclude_repos")),
294
+ "issue_states": issue_states,
295
+ "labels_any": _coerce_string_list(filters.get("labels_any")),
296
+ "exclude_labels": _coerce_string_list(filters.get("exclude_labels")),
297
+ "updated_within_days": _positive_int(filters.get("updated_within_days", 180), 180),
298
+ },
299
+ "ranking": {
300
+ "repo_sample_size": _positive_int(ranking.get("repo_sample_size", 30), 30),
301
+ "max_repos": _positive_int(ranking.get("max_repos", 12), 12),
302
+ "max_issues": _positive_int(ranking.get("max_issues", 24), 24),
303
+ "max_people": _positive_int(ranking.get("max_people", 12), 12),
304
+ "issues_per_repo": _positive_int(ranking.get("issues_per_repo", 30), 30),
305
+ },
306
+ },
307
+ }
308
+
309
+
310
+ def _github_token_context() -> dict[str, str]:
311
+ for env_name in ["GITHUB_TOKEN", "GH_TOKEN"]:
312
+ token = os.environ.get(env_name, "").strip()
313
+ if token:
314
+ return {"token": token, "source": env_name}
315
+ return {"token": "", "source": "anonymous"}
316
+
317
+
318
+ def _github_headers(token: str) -> dict[str, str]:
319
+ headers = {
320
+ "Accept": "application/vnd.github+json",
321
+ "User-Agent": "ORP-Discover/1.0",
322
+ "X-GitHub-Api-Version": "2022-11-28",
323
+ }
324
+ if token:
325
+ headers["Authorization"] = f"Bearer {token}"
326
+ return headers
327
+
328
+
329
+ def _http_json_get(url: str, headers: dict[str, str]) -> Any:
330
+ request = urlrequest.Request(url, headers=headers)
331
+ with urlrequest.urlopen(request, timeout=30) as response:
332
+ text = response.read().decode("utf-8")
333
+ return json.loads(text)
334
+
335
+
336
+ def _github_detect_owner_type(owner_login: str, headers: dict[str, str]) -> str:
337
+ payload = _http_json_get(f"https://api.github.com/users/{urlparse.quote(owner_login)}", headers)
338
+ if isinstance(payload, dict):
339
+ owner_type = str(payload.get("type", "")).strip().lower()
340
+ if owner_type == "organization":
341
+ return "org"
342
+ if owner_type == "user":
343
+ return "user"
344
+ return "user"
345
+
346
+
347
+ def _github_list_repos(owner_login: str, owner_type: str, limit: int, headers: dict[str, str]) -> list[dict[str, Any]]:
348
+ repos: list[dict[str, Any]] = []
349
+ page = 1
350
+ while len(repos) < limit:
351
+ params = {"per_page": min(100, max(limit, 1)), "page": page, "sort": "updated"}
352
+ if owner_type == "org":
353
+ params["type"] = "public"
354
+ url = f"https://api.github.com/orgs/{urlparse.quote(owner_login)}/repos?{urlparse.urlencode(params)}"
355
+ else:
356
+ params["type"] = "owner"
357
+ url = f"https://api.github.com/users/{urlparse.quote(owner_login)}/repos?{urlparse.urlencode(params)}"
358
+ payload = _http_json_get(url, headers)
359
+ if not isinstance(payload, list) or not payload:
360
+ break
361
+ repos.extend([row for row in payload if isinstance(row, dict)])
362
+ if len(payload) < params["per_page"]:
363
+ break
364
+ page += 1
365
+ return repos[:limit]
366
+
367
+
368
+ def _github_list_issues(owner_login: str, repo_name: str, states: list[str], per_repo_limit: int, headers: dict[str, str]) -> list[dict[str, Any]]:
369
+ state = "all" if "all" in states else ("closed" if states == ["closed"] else "open")
370
+ issues: list[dict[str, Any]] = []
371
+ page = 1
372
+ while len(issues) < per_repo_limit:
373
+ params = {
374
+ "state": state,
375
+ "per_page": min(100, max(per_repo_limit, 1)),
376
+ "page": page,
377
+ "sort": "updated",
378
+ "direction": "desc",
379
+ }
380
+ url = (
381
+ f"https://api.github.com/repos/{urlparse.quote(owner_login)}/{urlparse.quote(repo_name)}"
382
+ f"/issues?{urlparse.urlencode(params)}"
383
+ )
384
+ payload = _http_json_get(url, headers)
385
+ if not isinstance(payload, list) or not payload:
386
+ break
387
+ cleaned = [row for row in payload if isinstance(row, dict) and "pull_request" not in row]
388
+ issues.extend(cleaned)
389
+ if len(payload) < params["per_page"]:
390
+ break
391
+ page += 1
392
+ return issues[:per_repo_limit]
393
+
394
+
395
+ def _days_since_iso(iso_text: str) -> int | None:
396
+ text = iso_text.strip()
397
+ if not text:
398
+ return None
399
+ try:
400
+ stamp = dt.datetime.fromisoformat(text.replace("Z", "+00:00"))
401
+ except Exception:
402
+ return None
403
+ now = dt.datetime.now(dt.timezone.utc)
404
+ delta = now - stamp.astimezone(dt.timezone.utc)
405
+ return max(0, int(delta.total_seconds() // 86400))
406
+
407
+
408
+ def _recency_sort_key(iso_text: str) -> int:
409
+ days = _days_since_iso(iso_text)
410
+ if days is None:
411
+ return 10**9
412
+ return days
413
+
414
+
415
+ def _text_contains_any(text: str, needles: list[str]) -> list[str]:
416
+ hay = text.lower()
417
+ matches: list[str] = []
418
+ for raw in needles:
419
+ needle = raw.strip().lower()
420
+ if needle and needle in hay:
421
+ matches.append(raw)
422
+ return _unique_strings(matches)
423
+
424
+
425
+ def _score_repo(repo: dict[str, Any], profile: dict[str, Any]) -> tuple[int, list[str]]:
426
+ github = profile["github"]
427
+ signals = github["signals"]
428
+ filters = github["filters"]
429
+ repo_name = str(repo.get("name", "")).strip()
430
+ full_name = str(repo.get("full_name", "")).strip()
431
+ description = str(repo.get("description", "") or "").strip()
432
+ language = str(repo.get("language", "") or "").strip()
433
+ topics = [str(item).strip() for item in repo.get("topics", []) if isinstance(item, str)]
434
+ searchable = " ".join([repo_name, full_name, description, language, " ".join(topics)]).lower()
435
+ reasons: list[str] = []
436
+ score = 0
437
+
438
+ if repo_name in filters["exclude_repos"] or full_name in filters["exclude_repos"]:
439
+ return (-1, ["excluded_repo"])
440
+ if filters["include_repos"] and repo_name not in filters["include_repos"] and full_name not in filters["include_repos"]:
441
+ return (0, ["not_included"])
442
+ if repo_name in filters["include_repos"] or full_name in filters["include_repos"]:
443
+ score += 100
444
+ reasons.append("include_repo")
445
+
446
+ keyword_matches = _text_contains_any(searchable, signals["keywords"])
447
+ score += 5 * len(keyword_matches)
448
+ reasons.extend([f"keyword:{item}" for item in keyword_matches])
449
+
450
+ area_matches = _text_contains_any(searchable, signals["areas"])
451
+ score += 3 * len(area_matches)
452
+ reasons.extend([f"area:{item}" for item in area_matches])
453
+
454
+ if language and language in signals["languages"]:
455
+ score += 6
456
+ reasons.append(f"language:{language}")
457
+
458
+ topic_set = {topic.lower(): topic for topic in topics}
459
+ for raw in signals["repo_topics"]:
460
+ key = raw.lower()
461
+ if key in topic_set:
462
+ score += 8
463
+ reasons.append(f"topic:{topic_set[key]}")
464
+
465
+ updated_days = _days_since_iso(str(repo.get("updated_at", "") or ""))
466
+ if updated_days is not None and updated_days <= int(github["filters"]["updated_within_days"]):
467
+ score += 2
468
+ reasons.append("recent_repo_activity")
469
+
470
+ if score == 0:
471
+ score = 1
472
+ reasons.append("baseline_repo")
473
+ return (score, _unique_strings(reasons))
474
+
475
+
476
+ def _score_issue(issue: dict[str, Any], repo_row: dict[str, Any], profile: dict[str, Any]) -> tuple[int, list[str]]:
477
+ github = profile["github"]
478
+ signals = github["signals"]
479
+ filters = github["filters"]
480
+ title = str(issue.get("title", "") or "").strip()
481
+ body = str(issue.get("body", "") or "").strip()
482
+ labels = [str(row.get("name", "")).strip() for row in issue.get("labels", []) if isinstance(row, dict)]
483
+ assignees = [str(row.get("login", "")).strip() for row in issue.get("assignees", []) if isinstance(row, dict)]
484
+ author = str((issue.get("user") or {}).get("login", "")).strip() if isinstance(issue.get("user"), dict) else ""
485
+ searchable = " ".join([title, body, " ".join(labels)]).lower()
486
+ reasons = [f"repo:{repo_row['full_name']}"]
487
+ score = int(repo_row["score"])
488
+
489
+ if any(label in filters["exclude_labels"] for label in labels):
490
+ return (-1, ["excluded_label"])
491
+
492
+ keyword_matches = _text_contains_any(searchable, signals["keywords"])
493
+ score += 6 * len(keyword_matches)
494
+ reasons.extend([f"keyword:{item}" for item in keyword_matches])
495
+
496
+ area_matches = _text_contains_any(searchable, signals["areas"])
497
+ score += 5 * len(area_matches)
498
+ reasons.extend([f"area:{item}" for item in area_matches])
499
+
500
+ label_matches = [label for label in labels if label in filters["labels_any"]]
501
+ score += 4 * len(label_matches)
502
+ reasons.extend([f"label:{item}" for item in label_matches])
503
+
504
+ people_matches = [person for person in signals["people"] if person in assignees or person == author]
505
+ score += 4 * len(people_matches)
506
+ reasons.extend([f"person:{item}" for item in people_matches])
507
+
508
+ updated_days = _days_since_iso(str(issue.get("updated_at", "") or ""))
509
+ if updated_days is not None and updated_days <= int(filters["updated_within_days"]):
510
+ score += 1
511
+ reasons.append("recent_issue_activity")
512
+
513
+ return (score, _unique_strings(reasons))
514
+
515
+
516
+ def _load_fixture_json(path: Path) -> Any:
517
+ return json.loads(path.read_text(encoding="utf-8"))
518
+
519
+
520
+ def _render_discover_scan_summary(payload: dict[str, Any]) -> str:
521
+ lines: list[str] = []
522
+ lines.append(f"# ORP GitHub Discovery Scan `{payload['scan_id']}`")
523
+ lines.append("")
524
+ lines.append("## Headline")
525
+ lines.append("")
526
+ lines.append(f"- owner: `{payload['owner']['login']}`")
527
+ lines.append(f"- owner_type: `{payload['owner']['type']}`")
528
+ lines.append(f"- profile_id: `{payload['profile']['profile_id']}`")
529
+ lines.append(f"- auth: `{payload['auth']['source']}`")
530
+ lines.append(f"- repos_considered: `{payload['counts']['repos_considered']}`")
531
+ lines.append(f"- issues_considered: `{payload['counts']['issues_considered']}`")
532
+ lines.append("")
533
+ lines.append("## Top Repo Matches")
534
+ lines.append("")
535
+ lines.append("| Repo | Score | Why |")
536
+ lines.append("|---|---:|---|")
537
+ for row in payload.get("repos", [])[:10]:
538
+ reasons = ", ".join(row.get("reasons", [])[:4]) or "baseline_repo"
539
+ lines.append(f"| `{row['full_name']}` | {row['score']} | {reasons} |")
540
+ lines.append("")
541
+ lines.append("## Top Issue Matches")
542
+ lines.append("")
543
+ lines.append("| Issue | Repo | Score | People | Why |")
544
+ lines.append("|---|---|---:|---|---|")
545
+ for row in payload.get("issues", [])[:12]:
546
+ people = ", ".join(row.get("people", [])[:3]) or "-"
547
+ reasons = ", ".join(row.get("reasons", [])[:4]) or "repo_match"
548
+ lines.append(
549
+ f"| `#{row['number']} {row['title']}` | `{row['repo']}` | {row['score']} | {people} | {reasons} |"
550
+ )
551
+ lines.append("")
552
+ lines.append("## Active People Signals")
553
+ lines.append("")
554
+ lines.append("| Login | Score | Issue Count | Repos |")
555
+ lines.append("|---|---:|---:|---|")
556
+ for row in payload.get("people", [])[:10]:
557
+ repos = ", ".join(row.get("repos", [])[:3]) or "-"
558
+ lines.append(f"| `{row['login']}` | {row['score']} | {row['matched_issue_count']} | {repos} |")
559
+ lines.append("")
560
+ lines.append("## Notes")
561
+ lines.append("")
562
+ lines.append("- Discovery scans are recommendation artifacts, not evidence.")
563
+ lines.append("- GitHub public issue metadata shows authors, assignees, labels, and recency, but not full code ownership.")
564
+ if payload.get("repos"):
565
+ top_repo = payload["repos"][0]["full_name"]
566
+ lines.append(f"- Suggested handoff: `orp collaborate init --github-repo {top_repo}`")
567
+ return "\n".join(lines) + "\n"
568
+
569
+
570
+ def _orp_repo_root() -> Path:
571
+ return Path(__file__).resolve().parent.parent
572
+
573
+
574
+ def _path_for_state(path: Path, repo_root: Path) -> str:
575
+ try:
576
+ return str(path.relative_to(repo_root))
577
+ except Exception:
578
+ return str(path)
579
+
580
+
581
+ def _load_config(path: Path) -> dict[str, Any]:
582
+ text = _read_text(path)
583
+ if path.suffix.lower() in {".json"}:
584
+ return json.loads(text)
585
+
586
+ # YAML path (orp.yml / orp.yaml)
587
+ try:
588
+ import yaml # type: ignore
589
+ except Exception as exc:
590
+ raise RuntimeError(
591
+ "YAML config requires PyYAML. Install it or provide JSON config."
592
+ ) from exc
593
+ loaded = yaml.safe_load(text)
594
+ if not isinstance(loaded, dict):
595
+ raise RuntimeError(f"Config root must be an object: {path}")
596
+ return loaded
597
+
598
+
599
+ def _read_json(path: Path) -> dict[str, Any]:
600
+ return json.loads(_read_text(path))
601
+
602
+
603
+ def _ensure_dirs(repo_root: Path) -> None:
604
+ (repo_root / "orp" / "packets").mkdir(parents=True, exist_ok=True)
605
+ (repo_root / "orp" / "artifacts").mkdir(parents=True, exist_ok=True)
606
+ (repo_root / "orp" / "discovery" / "github").mkdir(parents=True, exist_ok=True)
607
+ state_path = repo_root / "orp" / "state.json"
608
+ if not state_path.exists():
609
+ _write_json(
610
+ state_path,
611
+ {
612
+ "last_run_id": "",
613
+ "last_packet_id": "",
614
+ "runs": {},
615
+ "last_erdos_sync": {},
616
+ "last_discover_scan_id": "",
617
+ "discovery_scans": {},
618
+ },
619
+ )
620
+
621
+
622
+ def _replace_vars(s: str, values: dict[str, str]) -> str:
623
+ out = s
624
+ for key, val in values.items():
625
+ out = out.replace("{" + key + "}", val)
626
+ return out
627
+
628
+
629
+ def _sha256_text(text: str) -> str:
630
+ h = hashlib.sha256()
631
+ h.update(text.encode("utf-8"))
632
+ return "sha256:" + h.hexdigest()
633
+
634
+
635
+ def _unique_strings(values: list[str]) -> list[str]:
636
+ out: list[str] = []
637
+ seen: set[str] = set()
638
+ for raw in values:
639
+ text = str(raw).strip()
640
+ if not text or text in seen:
641
+ continue
642
+ seen.add(text)
643
+ out.append(text)
644
+ return out
645
+
646
+
647
+ def _resolve_config_paths(raw_paths: Any, repo_root: Path, vars_map: dict[str, str]) -> list[str]:
648
+ out: list[str] = []
649
+ if not isinstance(raw_paths, list):
650
+ return out
651
+ for raw in raw_paths:
652
+ if not isinstance(raw, str):
653
+ continue
654
+ replaced = _replace_vars(raw, vars_map)
655
+ path = Path(replaced)
656
+ full = path if path.is_absolute() else repo_root / path
657
+ out.append(_path_for_state(full, repo_root))
658
+ return _unique_strings(out)
659
+
660
+
661
+ def _eval_rule(text: str, must_contain: list[str] | None, must_not_contain: list[str] | None) -> tuple[bool, list[str]]:
662
+ issues: list[str] = []
663
+ if must_contain:
664
+ for needle in must_contain:
665
+ if needle not in text:
666
+ issues.append(f"missing required substring: {needle}")
667
+ if must_not_contain:
668
+ for needle in must_not_contain:
669
+ if needle in text:
670
+ issues.append(f"forbidden substring present: {needle}")
671
+ return (len(issues) == 0, issues)
672
+
673
+
674
+ def _collect_atomic_context(config: dict[str, Any], repo_root: Path, run: dict[str, Any] | None = None) -> dict[str, Any] | None:
675
+ board_cfg = config.get("atomic_board")
676
+ if not isinstance(board_cfg, dict) or not board_cfg.get("enabled"):
677
+ return None
678
+
679
+ board_path = board_cfg.get("board_path")
680
+ if not isinstance(board_path, str):
681
+ return None
682
+ full = repo_root / board_path
683
+ if not full.exists():
684
+ return None
685
+
686
+ try:
687
+ board = _read_json(full)
688
+ except Exception:
689
+ return None
690
+
691
+ route_status: dict[str, Any] = {}
692
+ live = {}
693
+ if isinstance(board, dict):
694
+ candidate_live = board.get("live_snapshot", board.get("live", {}))
695
+ if isinstance(candidate_live, dict):
696
+ live = candidate_live
697
+ route_rows = []
698
+ if isinstance(live, dict):
699
+ # Some boards store this as "routes", others as "route_status".
700
+ route_rows = live.get("route_status", live.get("routes", []))
701
+ if not route_rows and isinstance(board, dict):
702
+ direct_rows = board.get("route_status", [])
703
+ if isinstance(direct_rows, list):
704
+ route_rows = direct_rows
705
+ if isinstance(route_rows, list):
706
+ for row in route_rows:
707
+ if not isinstance(row, dict):
708
+ continue
709
+ route_name = str(row.get("route", "")).strip()
710
+ if not route_name:
711
+ continue
712
+ route_status[route_name] = {
713
+ "done": int(row.get("loose_done", 0)),
714
+ "total": int(row.get("loose_total", 0)),
715
+ "strict_done": int(row.get("strict_done", 0)),
716
+ "strict_total": int(row.get("strict_total", 0)),
717
+ }
718
+
719
+ ticket_id = ""
720
+ gate_id = ""
721
+ atom_id = ""
722
+ deps: list[str] = []
723
+ ready_queue_size = 0
724
+
725
+ # Best-effort extraction from run gate logs (typically a "*ready*" gate).
726
+ if isinstance(run, dict):
727
+ results = run.get("results", [])
728
+ if isinstance(results, list):
729
+ for gate_res in results:
730
+ if not isinstance(gate_res, dict):
731
+ continue
732
+ gid = str(gate_res.get("gate_id", ""))
733
+ cmd = str(gate_res.get("command", ""))
734
+ if "ready" not in gid.lower() and " ready" not in cmd and ".py ready" not in cmd:
735
+ continue
736
+ stdout_rel = gate_res.get("stdout_path")
737
+ if not isinstance(stdout_rel, str) or not stdout_rel:
738
+ continue
739
+ stdout_path = repo_root / stdout_rel
740
+ if not stdout_path.exists():
741
+ continue
742
+ content = stdout_path.read_text(encoding="utf-8")
743
+ m_count = re.search(r"^ready_atoms=(\d+)$", content, flags=re.MULTILINE)
744
+ if m_count:
745
+ ready_queue_size = int(m_count.group(1))
746
+ m_row = re.search(
747
+ r"^(?:atom|ready)=(?P<atom>\S+).*ticket=(?P<ticket>\S+)\s+gate=(?P<gate>\S+).*deps=(?P<deps>\S+)",
748
+ content,
749
+ flags=re.MULTILINE,
750
+ )
751
+ if m_row:
752
+ atom_id = m_row.group("atom")
753
+ ticket_id = m_row.group("ticket")
754
+ gate_id = m_row.group("gate")
755
+ dep_text = m_row.group("deps")
756
+ if dep_text and dep_text != "root":
757
+ deps = [x for x in dep_text.split(",") if x]
758
+ break
759
+
760
+ return {
761
+ "board_id": str(board.get("board_id", "")),
762
+ "problem_id": str(board.get("problem_id", "")),
763
+ "ticket_id": ticket_id,
764
+ "gate_id": gate_id,
765
+ "atom_id": atom_id,
766
+ "dependencies": deps,
767
+ "ready_queue_size": ready_queue_size,
768
+ "board_snapshot_path": board_path,
769
+ "route_status": route_status,
770
+ "starter_scaffold": bool(board.get("starter_scaffold", False)),
771
+ "starter_note": str(board.get("starter_note", "")),
772
+ }
773
+
774
+
775
+ def _collect_claim_context(
776
+ config: dict[str, Any],
777
+ run: dict[str, Any],
778
+ evidence_paths: list[str],
779
+ ) -> dict[str, Any]:
780
+ project = config.get("project")
781
+ project_name = ""
782
+ if isinstance(project, dict):
783
+ project_name = str(project.get("name", "")).strip()
784
+ profile = str(run.get("profile", "")).strip() or "default"
785
+ claim_id = f"{project_name}:{profile}" if project_name else profile
786
+
787
+ canonical_artifacts: list[str] = []
788
+ results = run.get("results", [])
789
+ if isinstance(results, list):
790
+ for row in results:
791
+ if not isinstance(row, dict):
792
+ continue
793
+ raw_paths = row.get("evidence_paths", [])
794
+ if isinstance(raw_paths, list):
795
+ for raw in raw_paths:
796
+ if isinstance(raw, str):
797
+ canonical_artifacts.append(raw)
798
+ canonical_artifacts.extend(evidence_paths)
799
+ return {
800
+ "claim_id": claim_id,
801
+ "canonical_artifacts": _unique_strings(canonical_artifacts),
802
+ }
803
+
804
+
805
+ def _config_epistemic_status(
806
+ config: dict[str, Any], repo_root: Path, vars_map: dict[str, str]
807
+ ) -> dict[str, Any]:
808
+ raw = config.get("epistemic_status")
809
+ if not isinstance(raw, dict):
810
+ return {
811
+ "overall": "",
812
+ "starter_scaffold": False,
813
+ "strongest_evidence_paths": [],
814
+ "notes": [],
815
+ }
816
+ notes = [str(x) for x in raw.get("notes", []) if isinstance(x, str)]
817
+ return {
818
+ "overall": str(raw.get("overall", "")).strip(),
819
+ "starter_scaffold": bool(raw.get("starter_scaffold", False)),
820
+ "include_last_erdos_sync": bool(raw.get("include_last_erdos_sync", False)),
821
+ "strongest_evidence_paths": _resolve_config_paths(
822
+ raw.get("strongest_evidence_paths", []), repo_root, vars_map
823
+ ),
824
+ "notes": notes,
825
+ }
826
+
827
+
828
+ def _last_erdos_sync_evidence_paths(state: dict[str, Any], repo_root: Path) -> list[str]:
829
+ raw = state.get("last_erdos_sync")
830
+ if not isinstance(raw, dict):
831
+ return []
832
+
833
+ paths: list[str] = []
834
+ for key in ["out_all", "out_open", "out_closed", "out_active", "out_open_list"]:
835
+ value = raw.get(key)
836
+ if isinstance(value, str) and value.strip():
837
+ path = Path(value.strip())
838
+ full = path if path.is_absolute() else repo_root / path
839
+ paths.append(_path_for_state(full, repo_root))
840
+
841
+ selected = raw.get("selected", [])
842
+ if isinstance(selected, list):
843
+ for row in selected:
844
+ if not isinstance(row, dict):
845
+ continue
846
+ out_value = row.get("out")
847
+ if isinstance(out_value, str) and out_value.strip():
848
+ path = Path(out_value.strip())
849
+ full = path if path.is_absolute() else repo_root / path
850
+ paths.append(_path_for_state(full, repo_root))
851
+ return _unique_strings(paths)
852
+
853
+
854
+ def _derive_epistemic_status(
855
+ config: dict[str, Any],
856
+ run_results: list[dict[str, Any]],
857
+ state: dict[str, Any],
858
+ repo_root: Path,
859
+ vars_map: dict[str, str],
860
+ ) -> dict[str, Any]:
861
+ declared = _config_epistemic_status(config, repo_root, vars_map)
862
+ stub_gates: list[str] = []
863
+ starter_gates: list[str] = []
864
+ evidence_gates: list[str] = []
865
+ process_only_gates: list[str] = []
866
+ notes = list(declared.get("notes", []))
867
+ strongest_paths = list(declared.get("strongest_evidence_paths", []))
868
+
869
+ for row in run_results:
870
+ if not isinstance(row, dict):
871
+ continue
872
+ gate_id = str(row.get("gate_id", "")).strip()
873
+ evidence_status = str(row.get("evidence_status", "")).strip()
874
+ if evidence_status == "starter_stub":
875
+ stub_gates.append(gate_id)
876
+ elif evidence_status == "starter_scaffold":
877
+ starter_gates.append(gate_id)
878
+ elif evidence_status == "evidence":
879
+ evidence_gates.append(gate_id)
880
+ strongest_paths.extend(
881
+ [str(x) for x in row.get("evidence_paths", []) if isinstance(x, str)]
882
+ )
883
+ else:
884
+ process_only_gates.append(gate_id)
885
+
886
+ note = str(row.get("evidence_note", "")).strip()
887
+ if note:
888
+ notes.append(note)
889
+
890
+ if bool(declared.get("include_last_erdos_sync", False)):
891
+ strongest_paths.extend(_last_erdos_sync_evidence_paths(state, repo_root))
892
+ strongest_paths = _unique_strings(strongest_paths)
893
+ notes = _unique_strings(notes)
894
+
895
+ declared_overall = str(declared.get("overall", "")).strip()
896
+ if declared_overall:
897
+ overall = declared_overall
898
+ elif stub_gates or starter_gates:
899
+ overall = "starter_scaffold"
900
+ elif strongest_paths:
901
+ overall = "evidence_backed"
902
+ else:
903
+ overall = "process_only"
904
+
905
+ starter_scaffold = bool(declared.get("starter_scaffold", False) or stub_gates or starter_gates)
906
+ return {
907
+ "overall": overall,
908
+ "starter_scaffold": starter_scaffold,
909
+ "stub_gates": _unique_strings(stub_gates),
910
+ "starter_scaffold_gates": _unique_strings(starter_gates),
911
+ "evidence_gates": _unique_strings(evidence_gates),
912
+ "process_only_gates": _unique_strings(process_only_gates),
913
+ "strongest_evidence_paths": strongest_paths,
914
+ "notes": notes,
915
+ }
916
+
917
+
918
+ def _discover_packs() -> tuple[Path, list[dict[str, str]]]:
919
+ packs_root = _orp_repo_root() / "packs"
920
+ packs: list[dict[str, str]] = []
921
+ if not packs_root.exists():
922
+ return packs_root, packs
923
+
924
+ for child in sorted(packs_root.iterdir()):
925
+ if not child.is_dir():
926
+ continue
927
+ meta_path = child / "pack.yml"
928
+ if not meta_path.exists():
929
+ continue
930
+ try:
931
+ meta = _load_config(meta_path)
932
+ except Exception:
933
+ meta = {}
934
+ pack_id = str(meta.get("pack_id", child.name)) if isinstance(meta, dict) else child.name
935
+ version = str(meta.get("version", "unknown")) if isinstance(meta, dict) else "unknown"
936
+ name = str(meta.get("name", "")) if isinstance(meta, dict) else ""
937
+ description = str(meta.get("description", "")).strip() if isinstance(meta, dict) else ""
938
+ packs.append(
939
+ {
940
+ "id": pack_id,
941
+ "version": version,
942
+ "name": name,
943
+ "description": description,
944
+ "path": str(child),
945
+ }
946
+ )
947
+ return packs_root, packs
948
+
949
+
950
+ def _about_payload() -> dict[str, Any]:
951
+ _, packs = _discover_packs()
952
+ return {
953
+ "tool": {
954
+ "name": "orp",
955
+ "package": ORP_PACKAGE_NAME,
956
+ "version": ORP_TOOL_VERSION,
957
+ "description": "Open Research Protocol CLI for agent-friendly research workflows.",
958
+ "agent_friendly": True,
959
+ },
960
+ "discovery": {
961
+ "llms_txt": "llms.txt",
962
+ "readme": "README.md",
963
+ "protocol": "PROTOCOL.md",
964
+ "install": "INSTALL.md",
965
+ "agent_integration": "AGENT_INTEGRATION.md",
966
+ "agent_loop": "docs/AGENT_LOOP.md",
967
+ "discover": "docs/DISCOVER.md",
968
+ "profile_packs": "docs/PROFILE_PACKS.md",
969
+ },
970
+ "artifacts": {
971
+ "state_json": "orp/state.json",
972
+ "run_json": "orp/artifacts/<run_id>/RUN.json",
973
+ "run_summary_md": "orp/artifacts/<run_id>/RUN_SUMMARY.md",
974
+ "packet_json": "orp/packets/<packet_id>.json",
975
+ "packet_md": "orp/packets/<packet_id>.md",
976
+ "discovery_scan_json": "orp/discovery/github/<scan_id>/SCAN.json",
977
+ "discovery_scan_md": "orp/discovery/github/<scan_id>/SCAN_SUMMARY.md",
978
+ },
979
+ "schemas": {
980
+ "config": "spec/v1/orp.config.schema.json",
981
+ "packet": "spec/v1/packet.schema.json",
982
+ "profile_pack": "spec/v1/profile-pack.schema.json",
983
+ },
984
+ "abilities": [
985
+ {
986
+ "id": "discover",
987
+ "description": "Profile-based GitHub discovery for repos, issues, and people signals.",
988
+ "entrypoints": [
989
+ ["discover", "profile", "init"],
990
+ ["discover", "github", "scan"],
991
+ ],
992
+ },
993
+ {
994
+ "id": "collaborate",
995
+ "description": "Built-in repository collaboration setup and workflow execution.",
996
+ "entrypoints": [
997
+ ["collaborate", "init"],
998
+ ["collaborate", "workflows"],
999
+ ["collaborate", "gates"],
1000
+ ["collaborate", "run"],
1001
+ ],
1002
+ },
1003
+ {
1004
+ "id": "erdos",
1005
+ "description": "Domain-specific Erdos sync and open-problem workflow support.",
1006
+ "entrypoints": [
1007
+ ["erdos", "sync"],
1008
+ ],
1009
+ },
1010
+ {
1011
+ "id": "packs",
1012
+ "description": "Advanced/internal pack discovery and install surface.",
1013
+ "entrypoints": [
1014
+ ["pack", "list"],
1015
+ ["pack", "install"],
1016
+ ["pack", "fetch"],
1017
+ ],
1018
+ },
1019
+ ],
1020
+ "commands": [
1021
+ {"name": "home", "path": ["home"], "json_output": True},
1022
+ {"name": "about", "path": ["about"], "json_output": True},
1023
+ {"name": "discover_profile_init", "path": ["discover", "profile", "init"], "json_output": True},
1024
+ {"name": "discover_github_scan", "path": ["discover", "github", "scan"], "json_output": True},
1025
+ {"name": "collaborate_init", "path": ["collaborate", "init"], "json_output": True},
1026
+ {"name": "collaborate_workflows", "path": ["collaborate", "workflows"], "json_output": True},
1027
+ {"name": "collaborate_gates", "path": ["collaborate", "gates"], "json_output": True},
1028
+ {"name": "collaborate_run", "path": ["collaborate", "run"], "json_output": True},
1029
+ {"name": "init", "path": ["init"], "json_output": True},
1030
+ {"name": "gate_run", "path": ["gate", "run"], "json_output": True},
1031
+ {"name": "packet_emit", "path": ["packet", "emit"], "json_output": True},
1032
+ {"name": "erdos_sync", "path": ["erdos", "sync"], "json_output": True},
1033
+ {"name": "pack_list", "path": ["pack", "list"], "json_output": True},
1034
+ {"name": "pack_install", "path": ["pack", "install"], "json_output": True},
1035
+ {"name": "pack_fetch", "path": ["pack", "fetch"], "json_output": True},
1036
+ {"name": "report_summary", "path": ["report", "summary"], "json_output": True},
1037
+ ],
1038
+ "notes": [
1039
+ "ORP files are process-only and are not evidence.",
1040
+ "Canonical evidence lives in repo artifact paths outside ORP docs.",
1041
+ "Default CLI output is human-readable; listed commands with json_output=true also support --json.",
1042
+ "Discovery profiles in ORP are portable search-intent files; higher-level wrappers like Coda can manage active-profile selection later.",
1043
+ "Collaboration is a built-in ORP ability exposed through `orp collaborate ...`.",
1044
+ ],
1045
+ "packs": packs,
1046
+ }
1047
+
1048
+
1049
+ def _collaboration_workflow_map() -> dict[str, dict[str, Any]]:
1050
+ return {str(row["id"]): row for row in COLLABORATION_WORKFLOWS}
1051
+
1052
+
1053
+ def _collaboration_workflow_payload(repo_root: Path) -> dict[str, Any]:
1054
+ workflows: list[dict[str, Any]] = []
1055
+ for row in COLLABORATION_WORKFLOWS:
1056
+ config_name = str(row.get("config", "")).strip()
1057
+ config_path = (repo_root / config_name).resolve()
1058
+ workflows.append(
1059
+ {
1060
+ "id": row["id"],
1061
+ "profile": row["profile"],
1062
+ "config": config_name,
1063
+ "config_exists": config_path.exists(),
1064
+ "description": row["description"],
1065
+ "gate_ids": list(row["gate_ids"]),
1066
+ }
1067
+ )
1068
+ return {
1069
+ "workspace_ready": (repo_root / "orp.issue-smashers.yml").exists(),
1070
+ "recommended_init_command": "orp collaborate init",
1071
+ "workflows": workflows,
1072
+ }
1073
+
1074
+
1075
+ def _git_home_context(repo_root: Path) -> dict[str, Any]:
1076
+ context = {
1077
+ "present": False,
1078
+ "branch": "",
1079
+ "commit": "",
1080
+ }
1081
+ try:
1082
+ inside = subprocess.check_output(
1083
+ ["git", "rev-parse", "--is-inside-work-tree"],
1084
+ cwd=str(repo_root),
1085
+ text=True,
1086
+ stderr=subprocess.DEVNULL,
1087
+ ).strip()
1088
+ context["present"] = inside == "true"
1089
+ except Exception:
1090
+ return context
1091
+
1092
+ if not context["present"]:
1093
+ return context
1094
+
1095
+ try:
1096
+ context["branch"] = subprocess.check_output(
1097
+ ["git", "rev-parse", "--abbrev-ref", "HEAD"],
1098
+ cwd=str(repo_root),
1099
+ text=True,
1100
+ stderr=subprocess.DEVNULL,
1101
+ ).strip()
1102
+ except Exception:
1103
+ pass
1104
+ try:
1105
+ context["commit"] = subprocess.check_output(
1106
+ ["git", "rev-parse", "--short", "HEAD"],
1107
+ cwd=str(repo_root),
1108
+ text=True,
1109
+ stderr=subprocess.DEVNULL,
1110
+ ).strip()
1111
+ except Exception:
1112
+ pass
1113
+ return context
1114
+
1115
+
1116
+ def _home_payload(repo_root: Path, config_arg: str) -> dict[str, Any]:
1117
+ about = _about_payload()
1118
+ collaboration = _collaboration_workflow_payload(repo_root)
1119
+ config_path = Path(config_arg)
1120
+ if not config_path.is_absolute():
1121
+ config_path = repo_root / config_path
1122
+ config_path = config_path.resolve()
1123
+
1124
+ state_path = repo_root / "orp" / "state.json"
1125
+ state_exists = state_path.exists()
1126
+ state: dict[str, Any] = {}
1127
+ if state_exists:
1128
+ try:
1129
+ state = _read_json(state_path)
1130
+ except Exception:
1131
+ state = {}
1132
+
1133
+ git_context = _git_home_context(repo_root)
1134
+ last_run_id = str(state.get("last_run_id", "")).strip() if isinstance(state, dict) else ""
1135
+ last_packet_id = str(state.get("last_packet_id", "")).strip() if isinstance(state, dict) else ""
1136
+ runtime_initialized = state_exists or (repo_root / "orp").exists()
1137
+
1138
+ quick_actions = [
1139
+ {
1140
+ "label": "Scaffold a discovery profile for GitHub scanning",
1141
+ "command": "orp discover profile init --json",
1142
+ },
1143
+ {
1144
+ "label": "Initialize collaboration scaffolding here",
1145
+ "command": "orp collaborate init",
1146
+ },
1147
+ {
1148
+ "label": "Inspect collaboration workflows",
1149
+ "command": "orp collaborate workflows --json",
1150
+ },
1151
+ {
1152
+ "label": "Inspect the full collaboration gate chain",
1153
+ "command": "orp collaborate gates --workflow full_flow --json",
1154
+ },
1155
+ {
1156
+ "label": "Run the full collaboration workflow",
1157
+ "command": "orp collaborate run --workflow full_flow --json",
1158
+ },
1159
+ {
1160
+ "label": "Inspect machine-readable capability surface",
1161
+ "command": "orp about --json",
1162
+ },
1163
+ ]
1164
+ if not runtime_initialized:
1165
+ quick_actions.insert(
1166
+ 0,
1167
+ {
1168
+ "label": "Initialize base ORP runtime only",
1169
+ "command": "orp init",
1170
+ },
1171
+ )
1172
+ if config_path.exists():
1173
+ quick_actions.append(
1174
+ {
1175
+ "label": "Run the default profile",
1176
+ "command": "orp gate run --profile default --json",
1177
+ }
1178
+ )
1179
+ if last_run_id:
1180
+ quick_actions.append(
1181
+ {
1182
+ "label": "Summarize the last run",
1183
+ "command": "orp report summary --json",
1184
+ }
1185
+ )
1186
+
1187
+ return {
1188
+ "tool": about["tool"],
1189
+ "repo": {
1190
+ "root_path": str(repo_root),
1191
+ "config_path": _path_for_state(config_path, repo_root),
1192
+ "config_exists": config_path.exists(),
1193
+ "git": git_context,
1194
+ },
1195
+ "runtime": {
1196
+ "initialized": runtime_initialized,
1197
+ "state_path": _path_for_state(state_path, repo_root),
1198
+ "state_exists": state_exists,
1199
+ "last_run_id": last_run_id,
1200
+ "last_packet_id": last_packet_id,
1201
+ },
1202
+ "abilities": [
1203
+ {
1204
+ "id": "discover",
1205
+ "description": "Profile-based GitHub discovery for repos, issues, and people signals.",
1206
+ "entrypoints": [
1207
+ "orp discover profile init --json",
1208
+ f"orp discover github scan --profile {DEFAULT_DISCOVER_PROFILE} --json",
1209
+ ],
1210
+ },
1211
+ {
1212
+ "id": "collaborate",
1213
+ "description": "Built-in repository collaboration setup and workflow execution.",
1214
+ "entrypoints": [
1215
+ "orp collaborate init",
1216
+ "orp collaborate workflows --json",
1217
+ "orp collaborate run --workflow full_flow --json",
1218
+ ],
1219
+ },
1220
+ {
1221
+ "id": "erdos",
1222
+ "description": "Domain-specific Erdos sync and problem workflow support.",
1223
+ "entrypoints": [
1224
+ "orp erdos sync --json",
1225
+ ],
1226
+ },
1227
+ ],
1228
+ "collaboration": collaboration,
1229
+ "packs": about["packs"],
1230
+ "discovery": about["discovery"],
1231
+ "quick_actions": quick_actions,
1232
+ "notes": about["notes"],
1233
+ }
1234
+
1235
+
1236
+ def _truncate(text: str, *, limit: int = 76) -> str:
1237
+ clean = re.sub(r"\s+", " ", text).strip()
1238
+ if len(clean) <= limit:
1239
+ return clean
1240
+ return clean[: limit - 3].rstrip() + "..."
1241
+
1242
+
1243
+ def _render_home_screen(payload: dict[str, Any]) -> str:
1244
+ tool = payload.get("tool", {})
1245
+ repo = payload.get("repo", {})
1246
+ runtime = payload.get("runtime", {})
1247
+ abilities = payload.get("abilities", [])
1248
+ collaboration = payload.get("collaboration", {})
1249
+ packs = payload.get("packs", [])
1250
+ discovery = payload.get("discovery", {})
1251
+ quick_actions = payload.get("quick_actions", [])
1252
+
1253
+ lines: list[str] = []
1254
+ lines.append(f"ORP {tool.get('version', 'unknown')}")
1255
+ lines.append("Open Research Protocol CLI")
1256
+ lines.append("")
1257
+ lines.append("Repo")
1258
+ lines.append(f" root: {repo.get('root_path', '')}")
1259
+ lines.append(
1260
+ f" config: {repo.get('config_path', '')} ({'present' if repo.get('config_exists') else 'missing'})"
1261
+ )
1262
+
1263
+ git_ctx = repo.get("git", {})
1264
+ if isinstance(git_ctx, dict) and git_ctx.get("present"):
1265
+ branch = str(git_ctx.get("branch", "")).strip() or "(no branch)"
1266
+ commit = str(git_ctx.get("commit", "")).strip() or "(no commits yet)"
1267
+ lines.append(f" git: yes, branch={branch}, commit={commit}")
1268
+ else:
1269
+ lines.append(" git: no")
1270
+
1271
+ lines.append("")
1272
+ lines.append("Runtime")
1273
+ lines.append(
1274
+ f" initialized: {'yes' if runtime.get('initialized') else 'no'}"
1275
+ )
1276
+ lines.append(
1277
+ f" state: {runtime.get('state_path', '')} ({'present' if runtime.get('state_exists') else 'missing'})"
1278
+ )
1279
+ last_run_id = str(runtime.get("last_run_id", "")).strip()
1280
+ last_packet_id = str(runtime.get("last_packet_id", "")).strip()
1281
+ lines.append(f" last_run_id: {last_run_id or '(none)'}")
1282
+ lines.append(f" last_packet_id: {last_packet_id or '(none)'}")
1283
+
1284
+ lines.append("")
1285
+ lines.append("Abilities")
1286
+ if isinstance(abilities, list) and abilities:
1287
+ for row in abilities:
1288
+ if not isinstance(row, dict):
1289
+ continue
1290
+ ability_id = str(row.get("id", "")).strip()
1291
+ desc = _truncate(str(row.get("description", "")).strip())
1292
+ lines.append(f" - {ability_id}")
1293
+ if desc:
1294
+ lines.append(f" {desc}")
1295
+ lines.append("")
1296
+ lines.append("Collaboration")
1297
+ lines.append(
1298
+ f" workspace_ready: {'yes' if collaboration.get('workspace_ready') else 'no'}"
1299
+ )
1300
+ lines.append(
1301
+ f" fastest_setup: {collaboration.get('recommended_init_command', 'orp collaborate init')}"
1302
+ )
1303
+ workflows = collaboration.get("workflows", [])
1304
+ if isinstance(workflows, list):
1305
+ for row in workflows[:3]:
1306
+ if not isinstance(row, dict):
1307
+ continue
1308
+ lines.append(
1309
+ f" - {row.get('id', '')}: {_truncate(str(row.get('description', '')).strip(), limit=64)}"
1310
+ )
1311
+ if len(workflows) > 3:
1312
+ lines.append(" - ... run `orp collaborate workflows --json` for the full list")
1313
+
1314
+ lines.append("")
1315
+ lines.append("Advanced Bundles")
1316
+ if isinstance(packs, list) and packs:
1317
+ for pack in packs:
1318
+ if not isinstance(pack, dict):
1319
+ continue
1320
+ pack_id = str(pack.get("id", "")).strip()
1321
+ version = str(pack.get("version", "")).strip()
1322
+ desc = _truncate(str(pack.get("description", "")).strip())
1323
+ title = f" - {pack_id}"
1324
+ if version:
1325
+ title += f" ({version})"
1326
+ lines.append(title)
1327
+ if desc:
1328
+ lines.append(f" {desc}")
1329
+ else:
1330
+ lines.append(" (no local internal bundles discovered)")
1331
+
1332
+ lines.append("")
1333
+ lines.append("Discovery")
1334
+ for key in ["readme", "protocol", "agent_integration", "agent_loop", "discover", "profile_packs"]:
1335
+ value = discovery.get(key)
1336
+ if isinstance(value, str) and value:
1337
+ lines.append(f" {key}: {value}")
1338
+
1339
+ lines.append("")
1340
+ lines.append("Quick Actions")
1341
+ if isinstance(quick_actions, list):
1342
+ for row in quick_actions:
1343
+ if not isinstance(row, dict):
1344
+ continue
1345
+ label = str(row.get("label", "")).strip()
1346
+ command = str(row.get("command", "")).strip()
1347
+ if not label or not command:
1348
+ continue
1349
+ lines.append(f" - {label}")
1350
+ lines.append(f" {command}")
1351
+
1352
+ lines.append("")
1353
+ lines.append("Tip")
1354
+ lines.append(" Run `orp home --json` or `orp about --json` for machine-readable output.")
1355
+ lines.append("")
1356
+ return "\n".join(lines)
1357
+
1358
+
1359
+ def _perform_github_discovery_scan(
1360
+ *,
1361
+ repo_root: Path,
1362
+ profile_path: Path,
1363
+ scan_id: str,
1364
+ repos_fixture_path: Path | None,
1365
+ issues_fixture_path: Path | None,
1366
+ ) -> dict[str, Any]:
1367
+ _ensure_dirs(repo_root)
1368
+ raw_profile = _read_json(profile_path)
1369
+ profile = _normalize_discover_profile(raw_profile)
1370
+ owner = profile["github"]["owner"]
1371
+ owner_login = owner["login"]
1372
+ if not owner_login or owner_login == "YOUR_GITHUB_OWNER":
1373
+ raise RuntimeError("discovery profile must set discover.github.owner.login")
1374
+
1375
+ token_context = _github_token_context()
1376
+ headers = _github_headers(token_context["token"])
1377
+ owner_type = owner["type"]
1378
+ if owner_type == "auto":
1379
+ owner_type = _github_detect_owner_type(owner_login, headers)
1380
+
1381
+ repos_fixture = _load_fixture_json(repos_fixture_path) if repos_fixture_path else None
1382
+ issues_fixture = _load_fixture_json(issues_fixture_path) if issues_fixture_path else None
1383
+ repo_limit = int(profile["github"]["ranking"]["repo_sample_size"])
1384
+ if repos_fixture is not None:
1385
+ if not isinstance(repos_fixture, list):
1386
+ raise RuntimeError("repos fixture must be a JSON array")
1387
+ repos_raw = [row for row in repos_fixture if isinstance(row, dict)]
1388
+ else:
1389
+ repos_raw = _github_list_repos(
1390
+ owner_login=owner_login,
1391
+ owner_type=owner_type,
1392
+ limit=repo_limit,
1393
+ headers=headers,
1394
+ )
1395
+
1396
+ ranked_repos: list[dict[str, Any]] = []
1397
+ for repo in repos_raw:
1398
+ if bool(repo.get("archived")):
1399
+ continue
1400
+ score, reasons = _score_repo(repo, profile)
1401
+ if score < 0:
1402
+ continue
1403
+ row = {
1404
+ "name": str(repo.get("name", "")).strip(),
1405
+ "full_name": str(repo.get("full_name", "")).strip(),
1406
+ "url": str(repo.get("html_url", "")).strip(),
1407
+ "description": str(repo.get("description", "") or "").strip(),
1408
+ "language": str(repo.get("language", "") or "").strip(),
1409
+ "topics": [str(item).strip() for item in repo.get("topics", []) if isinstance(item, str)],
1410
+ "score": score,
1411
+ "reasons": reasons,
1412
+ "updated_at": str(repo.get("updated_at", "") or "").strip(),
1413
+ "open_issues_count": int(repo.get("open_issues_count") or 0),
1414
+ }
1415
+ ranked_repos.append(row)
1416
+
1417
+ ranked_repos = sorted(
1418
+ ranked_repos,
1419
+ key=lambda row: (
1420
+ -int(row["score"]),
1421
+ _recency_sort_key(str(row["updated_at"])),
1422
+ str(row["full_name"]),
1423
+ ),
1424
+ reverse=False,
1425
+ )
1426
+ top_repos = ranked_repos[: int(profile["github"]["ranking"]["max_repos"])]
1427
+
1428
+ issue_rows: list[dict[str, Any]] = []
1429
+ people_map: dict[str, dict[str, Any]] = {}
1430
+ issues_per_repo = int(profile["github"]["ranking"]["issues_per_repo"])
1431
+ for repo_row in top_repos:
1432
+ repo_full_name = str(repo_row["full_name"])
1433
+ repo_name = str(repo_row["name"])
1434
+ if not repo_full_name or not repo_name:
1435
+ continue
1436
+ if isinstance(issues_fixture, dict):
1437
+ issues_raw = issues_fixture.get(repo_full_name, [])
1438
+ if not isinstance(issues_raw, list):
1439
+ issues_raw = []
1440
+ else:
1441
+ issues_raw = _github_list_issues(
1442
+ owner_login=owner_login,
1443
+ repo_name=repo_name,
1444
+ states=profile["github"]["filters"]["issue_states"],
1445
+ per_repo_limit=issues_per_repo,
1446
+ headers=headers,
1447
+ )
1448
+ for issue in issues_raw:
1449
+ if not isinstance(issue, dict):
1450
+ continue
1451
+ score, reasons = _score_issue(issue, repo_row, profile)
1452
+ if score < 0:
1453
+ continue
1454
+ labels = [str(row.get("name", "")).strip() for row in issue.get("labels", []) if isinstance(row, dict)]
1455
+ assignees = [str(row.get("login", "")).strip() for row in issue.get("assignees", []) if isinstance(row, dict)]
1456
+ author = str((issue.get("user") or {}).get("login", "")).strip() if isinstance(issue.get("user"), dict) else ""
1457
+ people = _unique_strings([author, *assignees])
1458
+ issue_row = {
1459
+ "repo": repo_full_name,
1460
+ "number": int(issue.get("number") or 0),
1461
+ "title": str(issue.get("title", "") or "").strip(),
1462
+ "url": str(issue.get("html_url", "")).strip(),
1463
+ "state": str(issue.get("state", "") or "").strip(),
1464
+ "labels": labels,
1465
+ "assignees": assignees,
1466
+ "author": author,
1467
+ "people": people,
1468
+ "updated_at": str(issue.get("updated_at", "") or "").strip(),
1469
+ "score": score,
1470
+ "reasons": reasons,
1471
+ }
1472
+ issue_rows.append(issue_row)
1473
+ for login in people:
1474
+ if not login:
1475
+ continue
1476
+ person = people_map.setdefault(
1477
+ login,
1478
+ {
1479
+ "login": login,
1480
+ "score": 0,
1481
+ "matched_issue_count": 0,
1482
+ "repos": set(),
1483
+ },
1484
+ )
1485
+ person["score"] += score
1486
+ person["matched_issue_count"] += 1
1487
+ person["repos"].add(repo_full_name)
1488
+
1489
+ issue_rows = sorted(
1490
+ issue_rows,
1491
+ key=lambda row: (
1492
+ -int(row["score"]),
1493
+ _recency_sort_key(str(row["updated_at"])),
1494
+ str(row["repo"]),
1495
+ int(row["number"]),
1496
+ ),
1497
+ reverse=False,
1498
+ )
1499
+ top_issues = issue_rows[: int(profile["github"]["ranking"]["max_issues"])]
1500
+
1501
+ people_rows: list[dict[str, Any]] = []
1502
+ for person in people_map.values():
1503
+ people_rows.append(
1504
+ {
1505
+ "login": str(person["login"]),
1506
+ "score": int(person["score"]),
1507
+ "matched_issue_count": int(person["matched_issue_count"]),
1508
+ "repos": sorted(str(repo) for repo in person["repos"]),
1509
+ }
1510
+ )
1511
+ people_rows = sorted(
1512
+ people_rows,
1513
+ key=lambda row: (-int(row["score"]), -int(row["matched_issue_count"]), str(row["login"])),
1514
+ reverse=False,
1515
+ )[: int(profile["github"]["ranking"]["max_people"])]
1516
+
1517
+ out_root = repo_root / DEFAULT_DISCOVER_SCAN_ROOT / scan_id
1518
+ scan_json = out_root / "SCAN.json"
1519
+ summary_md = out_root / "SCAN_SUMMARY.md"
1520
+ payload = {
1521
+ "scan_id": scan_id,
1522
+ "generated_at_utc": _now_utc(),
1523
+ "profile": {
1524
+ "path": _path_for_state(profile_path, repo_root),
1525
+ "profile_id": profile["profile_id"],
1526
+ },
1527
+ "owner": {
1528
+ "login": owner_login,
1529
+ "type": owner_type,
1530
+ },
1531
+ "auth": {
1532
+ "source": token_context["source"],
1533
+ "authenticated": bool(token_context["token"]),
1534
+ },
1535
+ "counts": {
1536
+ "repos_fetched": len(repos_raw),
1537
+ "repos_considered": len(top_repos),
1538
+ "issues_considered": len(top_issues),
1539
+ "people_considered": len(people_rows),
1540
+ },
1541
+ "repos": top_repos,
1542
+ "issues": top_issues,
1543
+ "people": people_rows,
1544
+ "notes": [
1545
+ "Discovery scan output is process-only recommendation data.",
1546
+ "Use the top repo/issue matches to choose where collaboration should start.",
1547
+ "Coda can manage active profile selection later, but ORP owns the portable profile and artifact format.",
1548
+ ],
1549
+ "artifacts": {
1550
+ "scan_json": _path_for_state(scan_json, repo_root),
1551
+ "summary_md": _path_for_state(summary_md, repo_root),
1552
+ },
1553
+ }
1554
+ _write_json(scan_json, payload)
1555
+ _write_text(summary_md, _render_discover_scan_summary(payload))
1556
+
1557
+ state_path = repo_root / "orp" / "state.json"
1558
+ state = _read_json(state_path) if state_path.exists() else {}
1559
+ if not isinstance(state, dict):
1560
+ state = {}
1561
+ state.setdefault("runs", {})
1562
+ state.setdefault("discovery_scans", {})
1563
+ state["last_discover_scan_id"] = scan_id
1564
+ state["discovery_scans"][scan_id] = {
1565
+ "scan_json": payload["artifacts"]["scan_json"],
1566
+ "summary_md": payload["artifacts"]["summary_md"],
1567
+ "profile_path": payload["profile"]["path"],
1568
+ "owner": owner_login,
1569
+ }
1570
+ _write_json(state_path, state)
1571
+ return payload
1572
+
1573
+
1574
+ def cmd_init(args: argparse.Namespace) -> int:
1575
+ repo_root = Path(args.repo_root).resolve()
1576
+ _ensure_dirs(repo_root)
1577
+
1578
+ config_path = repo_root / args.config
1579
+ config_action = "kept"
1580
+ if not config_path.exists():
1581
+ starter = (
1582
+ 'version: "1"\n'
1583
+ "project:\n"
1584
+ " name: my-project\n"
1585
+ " repo_root: .\n"
1586
+ " canonical_paths:\n"
1587
+ " code: src/\n"
1588
+ " analysis: analysis/\n"
1589
+ "lifecycle:\n"
1590
+ " claim_status_map:\n"
1591
+ " Draft: draft\n"
1592
+ " In review: ready\n"
1593
+ " Verified: reviewed\n"
1594
+ " Blocked: blocked\n"
1595
+ " Retracted: retracted\n"
1596
+ " atom_status_map:\n"
1597
+ " todo: draft\n"
1598
+ " in_progress: ready\n"
1599
+ " blocked: blocked\n"
1600
+ " done: reviewed\n"
1601
+ "gates:\n"
1602
+ " - id: smoke\n"
1603
+ " description: Basic smoke gate\n"
1604
+ " phase: verification\n"
1605
+ " command: echo ORP_SMOKE\n"
1606
+ " pass:\n"
1607
+ " exit_codes: [0]\n"
1608
+ " stdout_must_contain:\n"
1609
+ " - ORP_SMOKE\n"
1610
+ "profiles:\n"
1611
+ " default:\n"
1612
+ " description: Minimal starter profile\n"
1613
+ " mode: discovery\n"
1614
+ " packet_kind: problem_scope\n"
1615
+ " gate_ids:\n"
1616
+ " - smoke\n"
1617
+ )
1618
+ config_path.write_text(starter, encoding="utf-8")
1619
+ config_action = "created"
1620
+
1621
+ result = {
1622
+ "config_action": config_action,
1623
+ "config_path": str(config_path),
1624
+ "runtime_root": str(repo_root / "orp"),
1625
+ }
1626
+ if args.json_output:
1627
+ _print_json(result)
1628
+ else:
1629
+ if config_action == "created":
1630
+ print(f"created {config_path}")
1631
+ else:
1632
+ print(f"kept existing {config_path}")
1633
+ print(f"initialized ORP runtime dirs under {repo_root / 'orp'}")
1634
+ return 0
1635
+
1636
+
1637
+ def _load_profile(config: dict[str, Any], name: str) -> dict[str, Any]:
1638
+ profiles = config.get("profiles")
1639
+ if not isinstance(profiles, dict):
1640
+ raise RuntimeError("config missing profiles object")
1641
+ profile = profiles.get(name)
1642
+ if not isinstance(profile, dict):
1643
+ raise RuntimeError(f"profile not found: {name}")
1644
+ return profile
1645
+
1646
+
1647
+ def _gate_map(config: dict[str, Any]) -> dict[str, dict[str, Any]]:
1648
+ raw = config.get("gates")
1649
+ if not isinstance(raw, list):
1650
+ raise RuntimeError("config missing gates list")
1651
+ out: dict[str, dict[str, Any]] = {}
1652
+ for row in raw:
1653
+ if not isinstance(row, dict):
1654
+ continue
1655
+ gid = row.get("id")
1656
+ if isinstance(gid, str):
1657
+ out[gid] = row
1658
+ return out
1659
+
1660
+
1661
+ def cmd_gate_run(args: argparse.Namespace) -> int:
1662
+ repo_root = Path(args.repo_root).resolve()
1663
+ _ensure_dirs(repo_root)
1664
+
1665
+ config_path = (repo_root / args.config).resolve()
1666
+ config = _load_config(config_path)
1667
+ profile = _load_profile(config, args.profile)
1668
+ gate_ids = profile.get("gate_ids")
1669
+ if not isinstance(gate_ids, list) or not all(isinstance(x, str) for x in gate_ids):
1670
+ raise RuntimeError("profile gate_ids must be list[str]")
1671
+
1672
+ gid_to_gate = _gate_map(config)
1673
+
1674
+ run_id = args.run_id or _run_id()
1675
+ started = _now_utc()
1676
+ run_artifacts = repo_root / "orp" / "artifacts" / run_id
1677
+ run_artifacts.mkdir(parents=True, exist_ok=True)
1678
+
1679
+ run_results: list[dict[str, Any]] = []
1680
+ stop_now = False
1681
+ vars_map = {"run_id": run_id}
1682
+ shell = config.get("runtime", {}).get("shell", "/bin/bash")
1683
+
1684
+ # Deterministic input hash for current config + profile
1685
+ det_hash = _sha256_text(json.dumps({"config": config, "profile": profile}, sort_keys=True))
1686
+
1687
+ for gate_id in gate_ids:
1688
+ gate = gid_to_gate.get(gate_id)
1689
+ if gate is None:
1690
+ raise RuntimeError(f"unknown gate in profile: {gate_id}")
1691
+
1692
+ if stop_now:
1693
+ run_results.append(
1694
+ {
1695
+ "gate_id": gate_id,
1696
+ "phase": gate.get("phase", "custom"),
1697
+ "command": str(gate.get("command", "")),
1698
+ "status": "skipped",
1699
+ "exit_code": 0,
1700
+ "duration_ms": 0,
1701
+ "stdout_path": "",
1702
+ "stderr_path": "",
1703
+ "rule_issues": ["skipped after previous gate stop"],
1704
+ }
1705
+ )
1706
+ continue
1707
+
1708
+ cmd = _replace_vars(str(gate.get("command", "")), vars_map)
1709
+ workdir = gate.get("working_dir")
1710
+ cwd = repo_root / workdir if isinstance(workdir, str) else repo_root
1711
+ timeout_sec = int(gate.get("timeout_sec", config.get("runtime", {}).get("default_timeout_sec", 900)))
1712
+
1713
+ t0 = dt.datetime.now(dt.timezone.utc)
1714
+ try:
1715
+ proc = subprocess.run(
1716
+ [str(shell), "-lc", cmd],
1717
+ cwd=str(cwd),
1718
+ capture_output=True,
1719
+ text=True,
1720
+ timeout=timeout_sec,
1721
+ )
1722
+ rc = int(proc.returncode)
1723
+ out = proc.stdout or ""
1724
+ err = proc.stderr or ""
1725
+ exec_status = "ok"
1726
+ except subprocess.TimeoutExpired as exc:
1727
+ rc = 124
1728
+ out = exc.stdout or ""
1729
+ err = (exc.stderr or "") + f"\nERROR: gate timeout after {timeout_sec}s\n"
1730
+ exec_status = "timeout"
1731
+
1732
+ t1 = dt.datetime.now(dt.timezone.utc)
1733
+ dur_ms = int((t1 - t0).total_seconds() * 1000)
1734
+ stdout_path = run_artifacts / f"{gate_id}.stdout.log"
1735
+ stderr_path = run_artifacts / f"{gate_id}.stderr.log"
1736
+ stdout_path.write_text(out, encoding="utf-8")
1737
+ stderr_path.write_text(err, encoding="utf-8")
1738
+
1739
+ pass_cfg = gate.get("pass", {})
1740
+ evidence_cfg = gate.get("evidence", {}) if isinstance(gate.get("evidence"), dict) else {}
1741
+ exit_codes = pass_cfg.get("exit_codes", [0]) if isinstance(pass_cfg, dict) else [0]
1742
+ if not isinstance(exit_codes, list):
1743
+ exit_codes = [0]
1744
+
1745
+ ok_exit = rc in [int(x) for x in exit_codes]
1746
+ ok_out, out_issues = _eval_rule(
1747
+ out,
1748
+ pass_cfg.get("stdout_must_contain", []) if isinstance(pass_cfg, dict) else [],
1749
+ pass_cfg.get("stdout_must_not_contain", []) if isinstance(pass_cfg, dict) else [],
1750
+ )
1751
+ ok_err, err_issues = _eval_rule(
1752
+ err,
1753
+ pass_cfg.get("stderr_must_contain", []) if isinstance(pass_cfg, dict) else [],
1754
+ pass_cfg.get("stderr_must_not_contain", []) if isinstance(pass_cfg, dict) else [],
1755
+ )
1756
+
1757
+ file_issues: list[str] = []
1758
+ fm_exist = pass_cfg.get("file_must_exist", []) if isinstance(pass_cfg, dict) else []
1759
+ if isinstance(fm_exist, list):
1760
+ for rel in fm_exist:
1761
+ if not isinstance(rel, str):
1762
+ continue
1763
+ rel = _replace_vars(rel, vars_map)
1764
+ if not (repo_root / rel).exists():
1765
+ file_issues.append(f"required file missing: {rel}")
1766
+
1767
+ passed = ok_exit and ok_out and ok_err and (len(file_issues) == 0) and (exec_status == "ok")
1768
+ status = "pass" if passed else "fail"
1769
+ issues = []
1770
+ if not ok_exit:
1771
+ issues.append(f"exit code {rc} not in {exit_codes}")
1772
+ issues.extend(out_issues)
1773
+ issues.extend(err_issues)
1774
+ issues.extend(file_issues)
1775
+ if exec_status != "ok":
1776
+ issues.append(exec_status)
1777
+
1778
+ evidence_paths = _resolve_config_paths(
1779
+ evidence_cfg.get("paths", []) if isinstance(evidence_cfg, dict) else [],
1780
+ repo_root,
1781
+ vars_map,
1782
+ )
1783
+ evidence_status = (
1784
+ str(evidence_cfg.get("status", "")).strip()
1785
+ if isinstance(evidence_cfg, dict)
1786
+ else ""
1787
+ ) or "process_only"
1788
+ evidence_note = (
1789
+ str(evidence_cfg.get("note", "")).strip()
1790
+ if isinstance(evidence_cfg, dict)
1791
+ else ""
1792
+ )
1793
+
1794
+ run_results.append(
1795
+ {
1796
+ "gate_id": gate_id,
1797
+ "phase": gate.get("phase", "custom"),
1798
+ "command": cmd,
1799
+ "status": status,
1800
+ "exit_code": rc,
1801
+ "duration_ms": dur_ms,
1802
+ "stdout_path": str(stdout_path.relative_to(repo_root)),
1803
+ "stderr_path": str(stderr_path.relative_to(repo_root)),
1804
+ "rule_issues": issues,
1805
+ "evidence_paths": evidence_paths,
1806
+ "evidence_status": evidence_status,
1807
+ "evidence_note": evidence_note,
1808
+ }
1809
+ )
1810
+
1811
+ if not passed:
1812
+ on_fail = str(gate.get("on_fail", "stop"))
1813
+ if on_fail in {"stop", "mark_blocked"}:
1814
+ stop_now = True
1815
+
1816
+ ended = _now_utc()
1817
+ gates_passed = sum(1 for g in run_results if g["status"] == "pass")
1818
+ gates_failed = sum(1 for g in run_results if g["status"] == "fail")
1819
+ gates_total = len(run_results)
1820
+ overall = "PASS" if gates_failed == 0 else "FAIL"
1821
+
1822
+ run_record = {
1823
+ "run_id": run_id,
1824
+ "config_path": _path_for_state(config_path, repo_root),
1825
+ "profile": args.profile,
1826
+ "started_at_utc": started,
1827
+ "ended_at_utc": ended,
1828
+ "deterministic_input_hash": det_hash,
1829
+ "results": run_results,
1830
+ "summary": {
1831
+ "overall_result": overall,
1832
+ "gates_passed": gates_passed,
1833
+ "gates_failed": gates_failed,
1834
+ "gates_total": gates_total,
1835
+ },
1836
+ }
1837
+
1838
+ state_path = repo_root / "orp" / "state.json"
1839
+ state = _read_json(state_path)
1840
+ run_record["epistemic_status"] = _derive_epistemic_status(
1841
+ config=config,
1842
+ run_results=run_results,
1843
+ state=state,
1844
+ repo_root=repo_root,
1845
+ vars_map=vars_map,
1846
+ )
1847
+
1848
+ run_json_path = run_artifacts / "RUN.json"
1849
+ _write_json(run_json_path, run_record)
1850
+
1851
+ runs = state.setdefault("runs", {})
1852
+ if isinstance(runs, dict):
1853
+ runs[run_id] = str(run_json_path.relative_to(repo_root))
1854
+ state["last_run_id"] = run_id
1855
+ _write_json(state_path, state)
1856
+
1857
+ result = {
1858
+ "run_id": run_id,
1859
+ "overall": overall,
1860
+ "gates_passed": gates_passed,
1861
+ "gates_failed": gates_failed,
1862
+ "gates_total": gates_total,
1863
+ "run_record": str(run_json_path.relative_to(repo_root)),
1864
+ }
1865
+ if args.json_output:
1866
+ _print_json(result)
1867
+ else:
1868
+ print(f"run_id={run_id}")
1869
+ print(f"overall={overall} passed={gates_passed} failed={gates_failed} total={gates_total}")
1870
+ print(f"run_record={run_json_path.relative_to(repo_root)}")
1871
+ return 0 if overall == "PASS" else 1
1872
+
1873
+
1874
+ def _packet_id(kind: str, run_id: str) -> str:
1875
+ return f"pkt-{kind}-{run_id}"
1876
+
1877
+
1878
+ def _workflow_state_from_run(config: dict[str, Any], run: dict[str, Any]) -> tuple[str, str]:
1879
+ overall = run.get("summary", {}).get("overall_result", "INCONCLUSIVE")
1880
+ if overall == "PASS":
1881
+ return "reviewed", "done"
1882
+ if overall == "FAIL":
1883
+ return "blocked", "blocked"
1884
+ return "ready", "in_progress"
1885
+
1886
+
1887
+ def cmd_packet_emit(args: argparse.Namespace) -> int:
1888
+ repo_root = Path(args.repo_root).resolve()
1889
+ _ensure_dirs(repo_root)
1890
+ config_path = (repo_root / args.config).resolve()
1891
+ config = _load_config(config_path)
1892
+ profile = _load_profile(config, args.profile)
1893
+ effective_config = dict(config)
1894
+ profile_atomic = profile.get("atomic_board")
1895
+ if isinstance(profile_atomic, dict):
1896
+ effective_config["atomic_board"] = profile_atomic
1897
+
1898
+ state = _read_json(repo_root / "orp" / "state.json")
1899
+ run_id = args.run_id or state.get("last_run_id", "")
1900
+ if not isinstance(run_id, str) or not run_id:
1901
+ raise RuntimeError("no run_id found; run `orp gate run` first or pass --run-id")
1902
+
1903
+ run_ref = state.get("runs", {}).get(run_id, f"orp/artifacts/{run_id}/RUN.json")
1904
+ run_json_path = repo_root / str(run_ref)
1905
+ if not run_json_path.exists():
1906
+ run_json_path = repo_root / "orp" / "artifacts" / run_id / "RUN.json"
1907
+ run = _read_json(run_json_path)
1908
+
1909
+ kind = args.kind or profile.get("packet_kind") or config.get("packet", {}).get("default_kind", "problem_scope")
1910
+ if not isinstance(kind, str):
1911
+ kind = "problem_scope"
1912
+
1913
+ packet_id = _packet_id(kind, run_id)
1914
+ wf_state, atom_status = _workflow_state_from_run(config, run)
1915
+ now = _now_utc()
1916
+
1917
+ git_remote = ""
1918
+ git_branch = ""
1919
+ git_commit = ""
1920
+ git_present = False
1921
+ try:
1922
+ inside = subprocess.check_output(
1923
+ ["git", "rev-parse", "--is-inside-work-tree"],
1924
+ cwd=str(repo_root),
1925
+ text=True,
1926
+ stderr=subprocess.DEVNULL,
1927
+ ).strip()
1928
+ git_present = inside == "true"
1929
+ except Exception:
1930
+ git_present = False
1931
+ try:
1932
+ git_remote = subprocess.check_output(
1933
+ ["git", "remote", "get-url", "origin"],
1934
+ cwd=str(repo_root),
1935
+ text=True,
1936
+ stderr=subprocess.DEVNULL,
1937
+ ).strip()
1938
+ except Exception:
1939
+ pass
1940
+ try:
1941
+ git_branch = subprocess.check_output(
1942
+ ["git", "rev-parse", "--abbrev-ref", "HEAD"],
1943
+ cwd=str(repo_root),
1944
+ text=True,
1945
+ stderr=subprocess.DEVNULL,
1946
+ ).strip()
1947
+ except Exception:
1948
+ pass
1949
+ try:
1950
+ git_commit = subprocess.check_output(
1951
+ ["git", "rev-parse", "--short", "HEAD"],
1952
+ cwd=str(repo_root),
1953
+ text=True,
1954
+ stderr=subprocess.DEVNULL,
1955
+ ).strip()
1956
+ except Exception:
1957
+ pass
1958
+
1959
+ atomic_context = _collect_atomic_context(effective_config, repo_root, run=run)
1960
+ epistemic_status = run.get("epistemic_status")
1961
+ if not isinstance(epistemic_status, dict):
1962
+ epistemic_status = _derive_epistemic_status(
1963
+ config=effective_config,
1964
+ run_results=run.get("results", []) if isinstance(run.get("results"), list) else [],
1965
+ state=state,
1966
+ repo_root=repo_root,
1967
+ vars_map={"run_id": run_id},
1968
+ )
1969
+ strongest_evidence_paths = [
1970
+ str(x) for x in epistemic_status.get("strongest_evidence_paths", []) if isinstance(x, str)
1971
+ ]
1972
+ claim_context = _collect_claim_context(effective_config, run, strongest_evidence_paths)
1973
+
1974
+ packet = {
1975
+ "schema_version": "1.0.0",
1976
+ "packet_id": packet_id,
1977
+ "kind": kind,
1978
+ "created_at_utc": now,
1979
+ "protocol_boundary": {
1980
+ "process_only": True,
1981
+ "evidence_paths": strongest_evidence_paths,
1982
+ "note": "Packet is process metadata. Evidence remains in canonical artifact paths.",
1983
+ },
1984
+ "repo": {
1985
+ "root_path": str(repo_root),
1986
+ "git": {
1987
+ "present": git_present,
1988
+ "remote": git_remote,
1989
+ "branch": git_branch,
1990
+ "commit": git_commit,
1991
+ },
1992
+ },
1993
+ "run": {
1994
+ "run_id": run_id,
1995
+ "tool": {"name": "orp", "version": ORP_TOOL_VERSION},
1996
+ "deterministic_input_hash": run.get("deterministic_input_hash", ""),
1997
+ "started_at_utc": run.get("started_at_utc", now),
1998
+ "ended_at_utc": run.get("ended_at_utc", now),
1999
+ "duration_ms": _duration_ms(run.get("started_at_utc"), run.get("ended_at_utc")),
2000
+ },
2001
+ "lifecycle": {
2002
+ "workflow_state": wf_state,
2003
+ "atom_status": atom_status,
2004
+ "state_note": f"derived from run summary: {run.get('summary', {}).get('overall_result', 'INCONCLUSIVE')}",
2005
+ },
2006
+ "gates": run.get("results", []),
2007
+ "summary": run.get("summary", {"overall_result": "INCONCLUSIVE", "gates_passed": 0, "gates_failed": 0, "gates_total": 0}),
2008
+ "evidence_status": epistemic_status,
2009
+ "artifacts": {
2010
+ "packet_json_path": f"orp/packets/{packet_id}.json",
2011
+ "packet_md_path": f"orp/packets/{packet_id}.md",
2012
+ "artifact_root": f"orp/artifacts/{run_id}",
2013
+ "extra_paths": [],
2014
+ },
2015
+ }
2016
+ if kind in {"pr", "claim", "verification"}:
2017
+ packet["claim_context"] = claim_context
2018
+ if atomic_context is not None and kind in {"problem_scope", "atom_pass"}:
2019
+ packet["atomic_context"] = atomic_context
2020
+
2021
+ packets_dir = repo_root / "orp" / "packets"
2022
+ packets_dir.mkdir(parents=True, exist_ok=True)
2023
+ packet_json_path = packets_dir / f"{packet_id}.json"
2024
+ _write_json(packet_json_path, packet)
2025
+
2026
+ packet_md_path = packets_dir / f"{packet_id}.md"
2027
+ packet_md = _render_packet_md(packet)
2028
+ packet_md_path.write_text(packet_md, encoding="utf-8")
2029
+
2030
+ state["last_packet_id"] = packet_id
2031
+ _write_json(repo_root / "orp" / "state.json", state)
2032
+
2033
+ result = {
2034
+ "packet_id": packet_id,
2035
+ "packet_json": str(packet_json_path.relative_to(repo_root)),
2036
+ "packet_md": str(packet_md_path.relative_to(repo_root)),
2037
+ "packet_kind": kind,
2038
+ "run_id": run_id,
2039
+ }
2040
+ if args.json_output:
2041
+ _print_json(result)
2042
+ else:
2043
+ print(f"packet_id={packet_id}")
2044
+ print(f"packet_json={packet_json_path.relative_to(repo_root)}")
2045
+ print(f"packet_md={packet_md_path.relative_to(repo_root)}")
2046
+ return 0
2047
+
2048
+
2049
+ def cmd_pack_list(args: argparse.Namespace) -> int:
2050
+ packs_root, packs = _discover_packs()
2051
+ if args.json_output:
2052
+ _print_json(
2053
+ {
2054
+ "packs_root": str(packs_root),
2055
+ "packs_count": len(packs),
2056
+ "packs": packs,
2057
+ }
2058
+ )
2059
+ return 0
2060
+
2061
+ if not packs:
2062
+ print(f"packs_root={packs_root}")
2063
+ print("packs.count=0")
2064
+ return 0
2065
+
2066
+ for pack in packs:
2067
+ pack_id = pack.get("id", "")
2068
+ version = pack.get("version", "unknown")
2069
+ name = pack.get("name", "")
2070
+ path = pack.get("path", "")
2071
+ print(f"pack.id={pack_id}")
2072
+ print(f"pack.version={version}")
2073
+ print(f"pack.path={path}")
2074
+ if name:
2075
+ print(f"pack.name={name}")
2076
+ print("---")
2077
+
2078
+ print(f"packs_root={packs_root}")
2079
+ print(f"packs.count={len(packs)}")
2080
+ return 0
2081
+
2082
+
2083
+ def cmd_discover_profile_init(args: argparse.Namespace) -> int:
2084
+ repo_root = Path(args.repo_root).resolve()
2085
+ out_path = _resolve_cli_path(args.out or DEFAULT_DISCOVER_PROFILE, repo_root)
2086
+ payload = _discover_profile_template(
2087
+ profile_id=args.profile_id,
2088
+ owner=args.owner or "",
2089
+ owner_type=args.owner_type,
2090
+ keywords=_coerce_string_list(args.keyword),
2091
+ topics=_coerce_string_list(args.topic),
2092
+ languages=_coerce_string_list(args.language),
2093
+ areas=_coerce_string_list(args.area),
2094
+ people=_coerce_string_list(args.person),
2095
+ )
2096
+ _write_json(out_path, payload)
2097
+
2098
+ result = {
2099
+ "ok": True,
2100
+ "profile_path": _path_for_state(out_path, repo_root),
2101
+ "profile_id": payload["profile_id"],
2102
+ "owner_login": payload["discover"]["github"]["owner"]["login"],
2103
+ "owner_type": payload["discover"]["github"]["owner"]["type"],
2104
+ "notes": payload["notes"],
2105
+ }
2106
+ if args.json_output:
2107
+ _print_json(result)
2108
+ return 0
2109
+
2110
+ print(f"profile_path={result['profile_path']}")
2111
+ print(f"profile_id={result['profile_id']}")
2112
+ print(f"owner_login={result['owner_login']}")
2113
+ print(f"owner_type={result['owner_type']}")
2114
+ print(f"next=orp discover github scan --profile {result['profile_path']}")
2115
+ return 0
2116
+
2117
+
2118
+ def cmd_discover_github_scan(args: argparse.Namespace) -> int:
2119
+ repo_root = Path(args.repo_root).resolve()
2120
+ profile_path = _resolve_cli_path(args.profile or DEFAULT_DISCOVER_PROFILE, repo_root)
2121
+ if not profile_path.exists():
2122
+ raise RuntimeError(
2123
+ f"missing discovery profile: {_path_for_state(profile_path, repo_root)}. "
2124
+ "Run `orp discover profile init` first."
2125
+ )
2126
+
2127
+ repos_fixture = _resolve_cli_path(args.repos_fixture, repo_root) if args.repos_fixture else None
2128
+ issues_fixture = _resolve_cli_path(args.issues_fixture, repo_root) if args.issues_fixture else None
2129
+ scan_id = args.scan_id or _scan_id()
2130
+ payload = _perform_github_discovery_scan(
2131
+ repo_root=repo_root,
2132
+ profile_path=profile_path,
2133
+ scan_id=scan_id,
2134
+ repos_fixture_path=repos_fixture,
2135
+ issues_fixture_path=issues_fixture,
2136
+ )
2137
+ if args.json_output:
2138
+ _print_json(payload)
2139
+ return 0
2140
+
2141
+ print(f"scan_id={payload['scan_id']}")
2142
+ print(f"profile={payload['profile']['path']}")
2143
+ print(f"owner={payload['owner']['login']}")
2144
+ print(f"owner_type={payload['owner']['type']}")
2145
+ print(f"scan_json={payload['artifacts']['scan_json']}")
2146
+ print(f"summary_md={payload['artifacts']['summary_md']}")
2147
+ if payload["repos"]:
2148
+ top_repo = payload["repos"][0]["full_name"]
2149
+ print(f"top_repo={top_repo}")
2150
+ print(f"next=orp collaborate init --github-repo {top_repo}")
2151
+ if payload["issues"]:
2152
+ top_issue = payload["issues"][0]
2153
+ print(f"top_issue={top_issue['repo']}#{top_issue['number']}")
2154
+ return 0
2155
+
2156
+
2157
+ def cmd_about(args: argparse.Namespace) -> int:
2158
+ payload = _about_payload()
2159
+ if args.json_output:
2160
+ _print_json(payload)
2161
+ return 0
2162
+
2163
+ print(f"tool.name={payload['tool']['name']}")
2164
+ print(f"tool.package={payload['tool']['package']}")
2165
+ print(f"tool.version={payload['tool']['version']}")
2166
+ print(f"tool.agent_friendly={str(payload['tool']['agent_friendly']).lower()}")
2167
+ print(f"discovery.llms_txt={payload['discovery']['llms_txt']}")
2168
+ print(f"discovery.agent_integration={payload['discovery']['agent_integration']}")
2169
+ print(f"discovery.protocol={payload['discovery']['protocol']}")
2170
+ print(f"artifact.run_json={payload['artifacts']['run_json']}")
2171
+ print(f"artifact.packet_json={payload['artifacts']['packet_json']}")
2172
+ print(f"schema.config={payload['schemas']['config']}")
2173
+ print(f"schema.packet={payload['schemas']['packet']}")
2174
+ print(f"schema.profile_pack={payload['schemas']['profile_pack']}")
2175
+ print(f"packs.count={len(payload['packs'])}")
2176
+ for pack in payload["packs"]:
2177
+ if not isinstance(pack, dict):
2178
+ continue
2179
+ print(f"pack.id={pack.get('id', '')}")
2180
+ print(f"pack.version={pack.get('version', '')}")
2181
+ return 0
2182
+
2183
+
2184
+ def cmd_home(args: argparse.Namespace) -> int:
2185
+ repo_root = Path(args.repo_root).resolve()
2186
+ payload = _home_payload(repo_root, args.config)
2187
+ if args.json_output:
2188
+ _print_json(payload)
2189
+ return 0
2190
+
2191
+ print(_render_home_screen(payload))
2192
+ return 0
2193
+
2194
+
2195
+ def cmd_collaborate_workflows(args: argparse.Namespace) -> int:
2196
+ repo_root = Path(args.repo_root).resolve()
2197
+ payload = _collaboration_workflow_payload(repo_root)
2198
+ if args.json_output:
2199
+ _print_json(payload)
2200
+ return 0
2201
+
2202
+ print(f"workspace_ready={'yes' if payload['workspace_ready'] else 'no'}")
2203
+ print(f"recommended_init_command={payload['recommended_init_command']}")
2204
+ for row in payload["workflows"]:
2205
+ print("---")
2206
+ print(f"workflow.id={row['id']}")
2207
+ print(f"workflow.profile={row['profile']}")
2208
+ print(f"workflow.config={row['config']}")
2209
+ print(f"workflow.config_exists={str(bool(row['config_exists'])).lower()}")
2210
+ print(f"workflow.description={row['description']}")
2211
+ print(f"workflow.gates={','.join(row['gate_ids'])}")
2212
+ return 0
2213
+
2214
+
2215
+ def cmd_collaborate_gates(args: argparse.Namespace) -> int:
2216
+ repo_root = Path(args.repo_root).resolve()
2217
+ wf = _collaboration_workflow_map().get(args.workflow)
2218
+ if wf is None:
2219
+ raise RuntimeError(f"unknown collaboration workflow: {args.workflow}")
2220
+
2221
+ config_name = str(wf["config"])
2222
+ config_path = (repo_root / config_name).resolve()
2223
+ payload = {
2224
+ "workflow": str(wf["id"]),
2225
+ "profile": str(wf["profile"]),
2226
+ "config": config_name,
2227
+ "config_exists": config_path.exists(),
2228
+ "description": str(wf["description"]),
2229
+ "gate_ids": list(wf["gate_ids"]),
2230
+ "recommended_run_command": f"orp collaborate run --workflow {wf['id']}",
2231
+ }
2232
+ if args.json_output:
2233
+ _print_json(payload)
2234
+ return 0
2235
+
2236
+ print(f"workflow.id={payload['workflow']}")
2237
+ print(f"profile={payload['profile']}")
2238
+ print(f"config={payload['config']}")
2239
+ print(f"config_exists={str(bool(payload['config_exists'])).lower()}")
2240
+ print(f"description={payload['description']}")
2241
+ print(f"recommended_run_command={payload['recommended_run_command']}")
2242
+ print("gates=" + ",".join(payload["gate_ids"]))
2243
+ return 0
2244
+
2245
+
2246
+ def cmd_collaborate_init(args: argparse.Namespace) -> int:
2247
+ repo_root = Path(args.repo_root).resolve()
2248
+ target_repo_root = Path(args.target_repo_root)
2249
+ if not target_repo_root.is_absolute():
2250
+ target_repo_root = (repo_root / target_repo_root).resolve()
2251
+
2252
+ script_path = Path(__file__).resolve().parent.parent / "scripts" / "orp-pack-install.py"
2253
+ if not script_path.exists():
2254
+ raise RuntimeError(f"missing collaboration installer script: {script_path}")
2255
+
2256
+ forwarded: list[str] = [
2257
+ "--pack-id",
2258
+ "issue-smashers",
2259
+ "--target-repo-root",
2260
+ str(target_repo_root),
2261
+ ]
2262
+ if args.workspace_root:
2263
+ forwarded.extend(["--var", f"ISSUE_SMASHERS_ROOT={args.workspace_root}"])
2264
+ forwarded.extend(["--var", f"ISSUE_SMASHERS_REPOS_DIR={args.workspace_root}/repos"])
2265
+ forwarded.extend(["--var", f"ISSUE_SMASHERS_WORKTREES_DIR={args.workspace_root}/worktrees"])
2266
+ forwarded.extend(["--var", f"ISSUE_SMASHERS_SCRATCH_DIR={args.workspace_root}/scratch"])
2267
+ forwarded.extend(["--var", f"ISSUE_SMASHERS_ARCHIVE_DIR={args.workspace_root}/archive"])
2268
+ forwarded.extend(
2269
+ ["--var", f"WATCHLIST_FILE={args.workspace_root}/analysis/ISSUE_SMASHERS_WATCHLIST.json"]
2270
+ )
2271
+ forwarded.extend(
2272
+ ["--var", f"STATUS_FILE={args.workspace_root}/analysis/ISSUE_SMASHERS_STATUS.md"]
2273
+ )
2274
+ forwarded.extend(["--var", f"WORKSPACE_RULES_FILE={args.workspace_root}/WORKSPACE_RULES.md"])
2275
+ forwarded.extend(["--var", f"DEFAULT_PR_BODY_FILE={args.workspace_root}/analysis/PR_DRAFT_BODY.md"])
2276
+ if args.github_repo:
2277
+ forwarded.extend(["--var", f"TARGET_GITHUB_REPO={args.github_repo}"])
2278
+ if args.github_author:
2279
+ forwarded.extend(["--var", f"TARGET_GITHUB_AUTHOR={args.github_author}"])
2280
+ for raw in args.var or []:
2281
+ forwarded.extend(["--var", str(raw)])
2282
+ if args.report:
2283
+ forwarded.extend(["--report", args.report])
2284
+ if args.strict_deps:
2285
+ forwarded.append("--strict-deps")
2286
+ if not args.bootstrap:
2287
+ forwarded.append("--no-bootstrap")
2288
+ if args.overwrite_bootstrap:
2289
+ forwarded.append("--overwrite-bootstrap")
2290
+
2291
+ proc = subprocess.run(
2292
+ [sys.executable, str(script_path), *forwarded],
2293
+ cwd=str(repo_root),
2294
+ capture_output=True,
2295
+ text=True,
2296
+ )
2297
+ parsed = _parse_pack_install_output(proc.stdout)
2298
+ if proc.returncode == 0:
2299
+ _ensure_dirs(target_repo_root)
2300
+
2301
+ payload = {
2302
+ "ok": proc.returncode == 0,
2303
+ "returncode": int(proc.returncode),
2304
+ "target_repo_root": str(target_repo_root),
2305
+ "workspace_root": args.workspace_root or "issue-smashers",
2306
+ "config": "orp.issue-smashers.yml",
2307
+ "feedback_config": "orp.issue-smashers-feedback-hardening.yml",
2308
+ "report": parsed.get("report", "orp.issue-smashers.pack-install-report.md"),
2309
+ "rendered": parsed.get("rendered", {}),
2310
+ "bootstrap": parsed.get("bootstrap", {}),
2311
+ "implementation": {
2312
+ "internal_pack_id": "issue-smashers",
2313
+ },
2314
+ }
2315
+ if proc.stderr.strip():
2316
+ payload["stderr"] = proc.stderr.strip()
2317
+
2318
+ if args.json_output:
2319
+ _print_json(payload)
2320
+ else:
2321
+ if proc.returncode == 0:
2322
+ print(f"target_repo_root={target_repo_root}")
2323
+ print(f"workspace_root={payload['workspace_root']}")
2324
+ print(f"config={payload['config']}")
2325
+ print(f"feedback_config={payload['feedback_config']}")
2326
+ print(f"report={payload['report']}")
2327
+ print("next=orp collaborate workflows")
2328
+ print("next=orp collaborate run --workflow full_flow")
2329
+ else:
2330
+ _emit_subprocess_result(proc)
2331
+ return int(proc.returncode)
2332
+
2333
+
2334
+ def cmd_collaborate_run(args: argparse.Namespace) -> int:
2335
+ repo_root = Path(args.repo_root).resolve()
2336
+ wf = _collaboration_workflow_map().get(args.workflow)
2337
+ if wf is None:
2338
+ raise RuntimeError(f"unknown collaboration workflow: {args.workflow}")
2339
+
2340
+ config_name = str(wf["config"])
2341
+ config_path = (repo_root / config_name).resolve()
2342
+ if not config_path.exists():
2343
+ raise RuntimeError(
2344
+ f"missing collaboration config: {config_name}. Run `orp collaborate init` first."
2345
+ )
2346
+
2347
+ gate_args = argparse.Namespace(
2348
+ repo_root=str(repo_root),
2349
+ config=config_name,
2350
+ profile=str(wf["profile"]),
2351
+ run_id=args.run_id,
2352
+ json_output=bool(args.json_output),
2353
+ )
2354
+ return cmd_gate_run(gate_args)
2355
+
2356
+
2357
+ def cmd_pack_install(args: argparse.Namespace) -> int:
2358
+ repo_root = Path(args.repo_root).resolve()
2359
+ script_path = Path(__file__).resolve().parent.parent / "scripts" / "orp-pack-install.py"
2360
+ if not script_path.exists():
2361
+ raise RuntimeError(f"missing pack install script: {script_path}")
2362
+
2363
+ forwarded: list[str] = [
2364
+ "--pack-id",
2365
+ args.pack_id,
2366
+ "--target-repo-root",
2367
+ args.target_repo_root,
2368
+ ]
2369
+ if args.pack_path:
2370
+ forwarded.extend(["--pack-path", args.pack_path])
2371
+ if args.orp_repo_root:
2372
+ forwarded.extend(["--orp-repo-root", args.orp_repo_root])
2373
+ for comp in args.include or []:
2374
+ forwarded.extend(["--include", str(comp)])
2375
+ for raw in args.var or []:
2376
+ forwarded.extend(["--var", str(raw)])
2377
+ if args.report:
2378
+ forwarded.extend(["--report", args.report])
2379
+ if args.strict_deps:
2380
+ forwarded.append("--strict-deps")
2381
+ if not args.bootstrap:
2382
+ forwarded.append("--no-bootstrap")
2383
+ if args.overwrite_bootstrap:
2384
+ forwarded.append("--overwrite-bootstrap")
2385
+
2386
+ cmd = [sys.executable, str(script_path), *forwarded]
2387
+ proc = subprocess.run(cmd, cwd=str(repo_root), capture_output=True, text=True)
2388
+ if args.json_output:
2389
+ result = _parse_pack_install_output(proc.stdout)
2390
+ result["ok"] = proc.returncode == 0
2391
+ result["returncode"] = int(proc.returncode)
2392
+ if proc.stderr.strip():
2393
+ result["stderr"] = proc.stderr.strip()
2394
+ _print_json(result)
2395
+ else:
2396
+ _emit_subprocess_result(proc)
2397
+ return int(proc.returncode)
2398
+
2399
+
2400
+ def _parse_kv_lines(text: str) -> dict[str, str]:
2401
+ out: dict[str, str] = {}
2402
+ for raw in (text or "").splitlines():
2403
+ line = raw.strip()
2404
+ if "=" not in line:
2405
+ continue
2406
+ k, v = line.split("=", 1)
2407
+ out[k.strip()] = v.strip()
2408
+ return out
2409
+
2410
+
2411
+ def _coerce_scalar(value: str) -> Any:
2412
+ text = value.strip()
2413
+ if text == "":
2414
+ return ""
2415
+ lower = text.lower()
2416
+ if lower == "true":
2417
+ return True
2418
+ if lower == "false":
2419
+ return False
2420
+ if re.fullmatch(r"-?\d+", text):
2421
+ try:
2422
+ return int(text)
2423
+ except Exception:
2424
+ return text
2425
+ return text
2426
+
2427
+
2428
+ def _insert_dotted_value(target: dict[str, Any], dotted_key: str, value: Any) -> None:
2429
+ parts = [part for part in dotted_key.split(".") if part]
2430
+ if not parts:
2431
+ return
2432
+
2433
+ cur: dict[str, Any] = target
2434
+ for part in parts[:-1]:
2435
+ existing = cur.get(part)
2436
+ if not isinstance(existing, dict):
2437
+ existing = {}
2438
+ cur[part] = existing
2439
+ cur = existing
2440
+
2441
+ leaf = parts[-1]
2442
+ existing = cur.get(leaf)
2443
+ if existing is None:
2444
+ cur[leaf] = value
2445
+ return
2446
+ if isinstance(existing, list):
2447
+ existing.append(value)
2448
+ return
2449
+ cur[leaf] = [existing, value]
2450
+
2451
+
2452
+ def _parse_kv_tree(text: str) -> dict[str, Any]:
2453
+ out: dict[str, Any] = {}
2454
+ for raw in (text or "").splitlines():
2455
+ line = raw.strip()
2456
+ if "=" not in line:
2457
+ continue
2458
+ key, value = line.split("=", 1)
2459
+ _insert_dotted_value(out, key.strip(), _coerce_scalar(value))
2460
+ return out
2461
+
2462
+
2463
+ def _split_csv_value(value: Any) -> list[str]:
2464
+ if isinstance(value, list):
2465
+ return [str(x) for x in value]
2466
+ if not isinstance(value, str):
2467
+ return []
2468
+ text = value.strip()
2469
+ if not text:
2470
+ return []
2471
+ return [part.strip() for part in text.split(",") if part.strip()]
2472
+
2473
+
2474
+ def _parse_pack_install_output(text: str) -> dict[str, Any]:
2475
+ payload = _parse_kv_tree(text)
2476
+ payload["included_components"] = _split_csv_value(payload.get("included_components", ""))
2477
+ return payload
2478
+
2479
+
2480
+ def _parse_erdos_sync_output(text: str) -> dict[str, Any]:
2481
+ payload: dict[str, Any] = {}
2482
+ selected_rows: list[dict[str, Any]] = []
2483
+ current_selected: dict[str, Any] | None = None
2484
+
2485
+ for raw in (text or "").splitlines():
2486
+ line = raw.strip()
2487
+ if "=" not in line:
2488
+ continue
2489
+ key, value = line.split("=", 1)
2490
+ key = key.strip()
2491
+ coerced = _coerce_scalar(value)
2492
+
2493
+ if key == "selected.count":
2494
+ payload["selected_count"] = coerced
2495
+ continue
2496
+ if key == "selected.missing":
2497
+ payload["selected_missing"] = _split_csv_value(coerced)
2498
+ continue
2499
+ if key.startswith("selected."):
2500
+ field = key.split(".", 1)[1]
2501
+ if field == "problem_id":
2502
+ if current_selected:
2503
+ selected_rows.append(current_selected)
2504
+ current_selected = {}
2505
+ if current_selected is None:
2506
+ current_selected = {}
2507
+ current_selected[field] = coerced
2508
+ continue
2509
+
2510
+ _insert_dotted_value(payload, key, coerced)
2511
+
2512
+ if current_selected:
2513
+ selected_rows.append(current_selected)
2514
+ if selected_rows:
2515
+ payload["selected"] = selected_rows
2516
+ return payload
2517
+
2518
+
2519
+ def _emit_subprocess_result(proc: subprocess.CompletedProcess[str]) -> None:
2520
+ if proc.stdout:
2521
+ print(proc.stdout, end="")
2522
+ if proc.stderr:
2523
+ print(proc.stderr, file=sys.stderr, end="")
2524
+
2525
+
2526
+ def cmd_pack_fetch(args: argparse.Namespace) -> int:
2527
+ repo_root = Path(args.repo_root).resolve()
2528
+ fetch_script = Path(__file__).resolve().parent.parent / "scripts" / "orp-pack-fetch.py"
2529
+ install_script = Path(__file__).resolve().parent.parent / "scripts" / "orp-pack-install.py"
2530
+ if not fetch_script.exists():
2531
+ raise RuntimeError(f"missing pack fetch script: {fetch_script}")
2532
+
2533
+ fetch_cmd: list[str] = [sys.executable, str(fetch_script), "--source", args.source]
2534
+ if args.pack_id:
2535
+ fetch_cmd.extend(["--pack-id", args.pack_id])
2536
+ if args.ref:
2537
+ fetch_cmd.extend(["--ref", args.ref])
2538
+ if args.cache_root:
2539
+ fetch_cmd.extend(["--cache-root", args.cache_root])
2540
+ if args.name:
2541
+ fetch_cmd.extend(["--name", args.name])
2542
+
2543
+ proc = subprocess.run(fetch_cmd, cwd=str(repo_root), capture_output=True, text=True)
2544
+ fetch_payload = _parse_kv_tree(proc.stdout)
2545
+ if proc.returncode != 0:
2546
+ if args.json_output:
2547
+ result: dict[str, Any] = {
2548
+ "ok": False,
2549
+ "returncode": int(proc.returncode),
2550
+ "fetch": fetch_payload,
2551
+ }
2552
+ if proc.stderr.strip():
2553
+ result["stderr"] = proc.stderr.strip()
2554
+ _print_json(result)
2555
+ else:
2556
+ _emit_subprocess_result(proc)
2557
+ return int(proc.returncode)
2558
+
2559
+ if not args.install_target:
2560
+ if args.json_output:
2561
+ result = {
2562
+ "ok": True,
2563
+ "returncode": 0,
2564
+ "fetch": fetch_payload,
2565
+ }
2566
+ _print_json(result)
2567
+ else:
2568
+ _emit_subprocess_result(proc)
2569
+ return 0
2570
+ if not install_script.exists():
2571
+ raise RuntimeError(f"missing pack install script: {install_script}")
2572
+
2573
+ kv = _parse_kv_lines(proc.stdout)
2574
+ pack_path = kv.get("pack_path", "").strip()
2575
+ if not pack_path:
2576
+ raise RuntimeError("pack fetch did not return pack_path")
2577
+
2578
+ install_cmd: list[str] = [
2579
+ sys.executable,
2580
+ str(install_script),
2581
+ "--pack-path",
2582
+ pack_path,
2583
+ "--target-repo-root",
2584
+ args.install_target,
2585
+ ]
2586
+ # preserve discovered pack id for reporting consistency when available
2587
+ fetched_pack_id = kv.get("pack_id", "").strip()
2588
+ if fetched_pack_id:
2589
+ install_cmd.extend(["--pack-id", fetched_pack_id])
2590
+ if args.orp_repo_root:
2591
+ install_cmd.extend(["--orp-repo-root", args.orp_repo_root])
2592
+ for comp in args.include or []:
2593
+ install_cmd.extend(["--include", str(comp)])
2594
+ for raw in args.var or []:
2595
+ install_cmd.extend(["--var", str(raw)])
2596
+ if args.report:
2597
+ install_cmd.extend(["--report", args.report])
2598
+ if args.strict_deps:
2599
+ install_cmd.append("--strict-deps")
2600
+ if args.no_bootstrap:
2601
+ install_cmd.append("--no-bootstrap")
2602
+ if args.overwrite_bootstrap:
2603
+ install_cmd.append("--overwrite-bootstrap")
2604
+
2605
+ proc_install = subprocess.run(install_cmd, cwd=str(repo_root), capture_output=True, text=True)
2606
+ if args.json_output:
2607
+ result = {
2608
+ "ok": proc_install.returncode == 0,
2609
+ "returncode": int(proc_install.returncode),
2610
+ "fetch": fetch_payload,
2611
+ "install": _parse_pack_install_output(proc_install.stdout),
2612
+ }
2613
+ if proc.stderr.strip():
2614
+ result["fetch_stderr"] = proc.stderr.strip()
2615
+ if proc_install.stderr.strip():
2616
+ result["install_stderr"] = proc_install.stderr.strip()
2617
+ _print_json(result)
2618
+ else:
2619
+ _emit_subprocess_result(proc)
2620
+ _emit_subprocess_result(proc_install)
2621
+ return int(proc_install.returncode)
2622
+
2623
+
2624
+ def cmd_erdos_sync(args: argparse.Namespace) -> int:
2625
+ repo_root = Path(args.repo_root).resolve()
2626
+ _ensure_dirs(repo_root)
2627
+ script_path = Path(__file__).resolve().parent.parent / "scripts" / "orp-erdos-problems-sync.py"
2628
+ if not script_path.exists():
2629
+ raise RuntimeError(f"missing sync script: {script_path}")
2630
+
2631
+ forwarded: list[str] = []
2632
+ if args.source_url is not None:
2633
+ forwarded.extend(["--source-url", args.source_url])
2634
+ if args.input_html is not None:
2635
+ forwarded.extend(["--input-html", args.input_html])
2636
+ if args.write_html_snapshot is not None:
2637
+ forwarded.extend(["--write-html-snapshot", args.write_html_snapshot])
2638
+ if args.timeout_sec is not None:
2639
+ forwarded.extend(["--timeout-sec", str(args.timeout_sec)])
2640
+ if args.user_agent is not None:
2641
+ forwarded.extend(["--user-agent", args.user_agent])
2642
+ if args.active_status is not None:
2643
+ forwarded.extend(["--active-status", args.active_status])
2644
+ if args.allow_count_mismatch:
2645
+ forwarded.append("--allow-count-mismatch")
2646
+ if args.out_all is not None:
2647
+ forwarded.extend(["--out-all", args.out_all])
2648
+ if args.out_open is not None:
2649
+ forwarded.extend(["--out-open", args.out_open])
2650
+ if args.out_closed is not None:
2651
+ forwarded.extend(["--out-closed", args.out_closed])
2652
+ if args.out_active is not None:
2653
+ forwarded.extend(["--out-active", args.out_active])
2654
+ if args.out_open_list is not None:
2655
+ forwarded.extend(["--out-open-list", args.out_open_list])
2656
+ if args.open_list_max_statement_chars is not None:
2657
+ forwarded.extend(
2658
+ ["--open-list-max-statement-chars", str(args.open_list_max_statement_chars)]
2659
+ )
2660
+ for pid in args.problem_id or []:
2661
+ forwarded.extend(["--problem-id", str(pid)])
2662
+ if args.out_problem_dir is not None:
2663
+ forwarded.extend(["--out-problem-dir", args.out_problem_dir])
2664
+
2665
+ forwarded.extend(list(args.sync_args or []))
2666
+ if forwarded and forwarded[0] == "--":
2667
+ forwarded = forwarded[1:]
2668
+
2669
+ cmd = [sys.executable, str(script_path), *forwarded]
2670
+ proc = subprocess.run(cmd, cwd=str(repo_root), capture_output=True, text=True)
2671
+ result = _parse_erdos_sync_output(proc.stdout)
2672
+ if proc.returncode == 0:
2673
+ state_path = repo_root / "orp" / "state.json"
2674
+ state = _read_json(state_path)
2675
+ result["synced_at_utc"] = _now_utc()
2676
+ state["last_erdos_sync"] = result
2677
+ _write_json(state_path, state)
2678
+
2679
+ if args.json_output:
2680
+ result["ok"] = proc.returncode == 0
2681
+ result["returncode"] = int(proc.returncode)
2682
+ if proc.stderr.strip():
2683
+ result["stderr"] = proc.stderr.strip()
2684
+ _print_json(result)
2685
+ else:
2686
+ _emit_subprocess_result(proc)
2687
+ return int(proc.returncode)
2688
+
2689
+
2690
+ def _resolve_run_json_path(
2691
+ *,
2692
+ repo_root: Path,
2693
+ run_id_arg: str,
2694
+ run_json_arg: str,
2695
+ ) -> tuple[str, Path]:
2696
+ if run_json_arg:
2697
+ run_json = Path(run_json_arg)
2698
+ if not run_json.is_absolute():
2699
+ run_json = repo_root / run_json
2700
+ run_json = run_json.resolve()
2701
+ if not run_json.exists():
2702
+ raise RuntimeError(f"run json not found: {run_json}")
2703
+ run = _read_json(run_json)
2704
+ run_id = str(run.get("run_id", "")).strip()
2705
+ if not run_id:
2706
+ run_id = run_json.parent.name
2707
+ return run_id, run_json
2708
+
2709
+ state_path = repo_root / "orp" / "state.json"
2710
+ state: dict[str, Any] = {}
2711
+ if state_path.exists():
2712
+ state = _read_json(state_path)
2713
+
2714
+ run_id = run_id_arg.strip()
2715
+ if not run_id:
2716
+ run_id = str(state.get("last_run_id", "")).strip()
2717
+ if not run_id:
2718
+ raise RuntimeError("no run_id found; pass --run-id or --run-json")
2719
+
2720
+ run_json = None
2721
+ runs = state.get("runs")
2722
+ if isinstance(runs, dict):
2723
+ run_ref = runs.get(run_id)
2724
+ if isinstance(run_ref, str) and run_ref:
2725
+ candidate = (repo_root / run_ref).resolve()
2726
+ if candidate.exists():
2727
+ run_json = candidate
2728
+
2729
+ if run_json is None:
2730
+ candidate = (repo_root / "orp" / "artifacts" / run_id / "RUN.json").resolve()
2731
+ if candidate.exists():
2732
+ run_json = candidate
2733
+
2734
+ if run_json is None:
2735
+ raise RuntimeError(f"run json not found for run_id={run_id}")
2736
+ return run_id, run_json
2737
+
2738
+
2739
+ def _run_duration_ms_from_record(run: dict[str, Any]) -> int:
2740
+ started = run.get("started_at_utc")
2741
+ ended = run.get("ended_at_utc")
2742
+ duration = _duration_ms(started, ended)
2743
+ if duration > 0:
2744
+ return duration
2745
+ results = run.get("results", [])
2746
+ if not isinstance(results, list):
2747
+ return 0
2748
+ total = 0
2749
+ for row in results:
2750
+ if not isinstance(row, dict):
2751
+ continue
2752
+ try:
2753
+ total += int(row.get("duration_ms", 0))
2754
+ except Exception:
2755
+ continue
2756
+ return max(0, total)
2757
+
2758
+
2759
+ def _one_line(s: str, max_len: int = 88) -> str:
2760
+ collapsed = re.sub(r"\s+", " ", s).strip()
2761
+ if len(collapsed) <= max_len:
2762
+ return collapsed
2763
+ if max_len <= 3:
2764
+ return collapsed[:max_len]
2765
+ return collapsed[: max_len - 3].rstrip() + "..."
2766
+
2767
+
2768
+ def _render_run_summary_md(run: dict[str, Any]) -> str:
2769
+ run_id = str(run.get("run_id", "")).strip()
2770
+ profile = str(run.get("profile", "")).strip()
2771
+ config_path = str(run.get("config_path", "")).strip()
2772
+ started = str(run.get("started_at_utc", "")).strip()
2773
+ ended = str(run.get("ended_at_utc", "")).strip()
2774
+ det_hash = str(run.get("deterministic_input_hash", "")).strip()
2775
+
2776
+ summary = run.get("summary", {})
2777
+ if not isinstance(summary, dict):
2778
+ summary = {}
2779
+ overall = str(summary.get("overall_result", "INCONCLUSIVE")).strip() or "INCONCLUSIVE"
2780
+ passed = int(summary.get("gates_passed", 0) or 0)
2781
+ failed = int(summary.get("gates_failed", 0) or 0)
2782
+ total = int(summary.get("gates_total", 0) or 0)
2783
+ duration_ms = _run_duration_ms_from_record(run)
2784
+
2785
+ results = run.get("results", [])
2786
+ if not isinstance(results, list):
2787
+ results = []
2788
+
2789
+ lines: list[str] = []
2790
+ lines.append(f"# ORP Run Summary `{run_id}`")
2791
+ lines.append("")
2792
+ lines.append("## Headline")
2793
+ lines.append("")
2794
+ lines.append(f"- overall_result: `{overall}`")
2795
+ lines.append(f"- profile: `{profile}`")
2796
+ lines.append(f"- gates: `{passed} passed / {failed} failed / {total} total`")
2797
+ lines.append(f"- duration_ms: `{duration_ms}`")
2798
+ lines.append(f"- started_at_utc: `{started}`")
2799
+ lines.append(f"- ended_at_utc: `{ended}`")
2800
+ lines.append(f"- config_path: `{config_path}`")
2801
+ lines.append("")
2802
+ lines.append("## What This Report Shows")
2803
+ lines.append("")
2804
+ lines.append("- Which gates ran, in what order, and with what command.")
2805
+ lines.append("- Whether each gate passed or failed, with exit code and timing.")
2806
+ lines.append("- Where to inspect raw evidence (`stdout` / `stderr`) for each gate.")
2807
+ lines.append("- A deterministic input hash so teams can compare runs reliably.")
2808
+ lines.append("")
2809
+ lines.append("## Gate Results")
2810
+ lines.append("")
2811
+ lines.append("| Gate | Status | Exit | Duration ms | Command |")
2812
+ lines.append("|---|---:|---:|---:|---|")
2813
+
2814
+ for row in results:
2815
+ if not isinstance(row, dict):
2816
+ continue
2817
+ gate_id = str(row.get("gate_id", ""))
2818
+ status = str(row.get("status", ""))
2819
+ exit_code = str(row.get("exit_code", ""))
2820
+ gate_dur = str(row.get("duration_ms", ""))
2821
+ command = _one_line(str(row.get("command", "")))
2822
+ lines.append(
2823
+ f"| `{gate_id}` | `{status}` | {exit_code} | {gate_dur} | `{command}` |"
2824
+ )
2825
+
2826
+ failing_rows = [
2827
+ row
2828
+ for row in results
2829
+ if isinstance(row, dict) and str(row.get("status", "")).lower() == "fail"
2830
+ ]
2831
+ if failing_rows:
2832
+ lines.append("")
2833
+ lines.append("## Failing Conditions")
2834
+ lines.append("")
2835
+ for row in failing_rows:
2836
+ gate_id = str(row.get("gate_id", ""))
2837
+ lines.append(f"- `{gate_id}`")
2838
+ issues = row.get("rule_issues", [])
2839
+ if isinstance(issues, list) and issues:
2840
+ for issue in issues:
2841
+ lines.append(f" - {issue}")
2842
+ else:
2843
+ lines.append(" - no explicit rule issues recorded")
2844
+
2845
+ epistemic = run.get("epistemic_status", {})
2846
+ if isinstance(epistemic, dict) and epistemic:
2847
+ lines.append("")
2848
+ lines.append("## Epistemic Status")
2849
+ lines.append("")
2850
+ lines.append(f"- overall: `{str(epistemic.get('overall', '')).strip()}`")
2851
+ lines.append(f"- starter_scaffold: `{str(bool(epistemic.get('starter_scaffold', False))).lower()}`")
2852
+
2853
+ stub_gates = [str(x) for x in epistemic.get("stub_gates", []) if isinstance(x, str)]
2854
+ starter_gates = [
2855
+ str(x) for x in epistemic.get("starter_scaffold_gates", []) if isinstance(x, str)
2856
+ ]
2857
+ evidence_gates = [str(x) for x in epistemic.get("evidence_gates", []) if isinstance(x, str)]
2858
+ strongest_paths = [
2859
+ str(x) for x in epistemic.get("strongest_evidence_paths", []) if isinstance(x, str)
2860
+ ]
2861
+ notes = [str(x) for x in epistemic.get("notes", []) if isinstance(x, str)]
2862
+
2863
+ lines.append(
2864
+ f"- stub_gates: `{', '.join(stub_gates) if stub_gates else '(none)'}`"
2865
+ )
2866
+ lines.append(
2867
+ f"- starter_scaffold_gates: `{', '.join(starter_gates) if starter_gates else '(none)'}`"
2868
+ )
2869
+ lines.append(
2870
+ f"- evidence_gates: `{', '.join(evidence_gates) if evidence_gates else '(none)'}`"
2871
+ )
2872
+ if strongest_paths:
2873
+ lines.append("- strongest_evidence_paths:")
2874
+ for path in strongest_paths:
2875
+ lines.append(f" - `{path}`")
2876
+ if notes:
2877
+ lines.append("- notes:")
2878
+ for note in notes:
2879
+ lines.append(f" - {note}")
2880
+
2881
+ lines.append("")
2882
+ lines.append("## Evidence Pointers")
2883
+ lines.append("")
2884
+ for row in results:
2885
+ if not isinstance(row, dict):
2886
+ continue
2887
+ gate_id = str(row.get("gate_id", ""))
2888
+ stdout_path = str(row.get("stdout_path", "")).strip()
2889
+ stderr_path = str(row.get("stderr_path", "")).strip()
2890
+ lines.append(
2891
+ f"- `{gate_id}`: stdout=`{stdout_path or '(none)'}` stderr=`{stderr_path or '(none)'}`"
2892
+ )
2893
+
2894
+ lines.append("")
2895
+ lines.append("## Reproducibility")
2896
+ lines.append("")
2897
+ lines.append(f"- deterministic_input_hash: `{det_hash}`")
2898
+ lines.append("- rerun with the same profile/config and compare this hash + gate outputs.")
2899
+ lines.append("")
2900
+ return "\n".join(lines)
2901
+
2902
+
2903
+ def cmd_report_summary(args: argparse.Namespace) -> int:
2904
+ repo_root = Path(args.repo_root).resolve()
2905
+ _ensure_dirs(repo_root)
2906
+
2907
+ run_id, run_json_path = _resolve_run_json_path(
2908
+ repo_root=repo_root,
2909
+ run_id_arg=args.run_id,
2910
+ run_json_arg=args.run_json,
2911
+ )
2912
+ run = _read_json(run_json_path)
2913
+ summary_md = _render_run_summary_md(run)
2914
+
2915
+ if args.out:
2916
+ out_path = Path(args.out)
2917
+ if not out_path.is_absolute():
2918
+ out_path = repo_root / out_path
2919
+ out_path = out_path.resolve()
2920
+ else:
2921
+ out_path = run_json_path.parent / "RUN_SUMMARY.md"
2922
+
2923
+ out_path.parent.mkdir(parents=True, exist_ok=True)
2924
+ out_path.write_text(summary_md, encoding="utf-8")
2925
+
2926
+ result = {
2927
+ "run_id": run_id,
2928
+ "run_json": _path_for_state(run_json_path, repo_root),
2929
+ "summary_md": _path_for_state(out_path, repo_root),
2930
+ }
2931
+ if args.print_stdout:
2932
+ result["summary_markdown"] = summary_md
2933
+
2934
+ if args.json_output:
2935
+ _print_json(result)
2936
+ else:
2937
+ print(f"run_id={run_id}")
2938
+ print(f"run_json={_path_for_state(run_json_path, repo_root)}")
2939
+ print(f"summary_md={_path_for_state(out_path, repo_root)}")
2940
+ if args.print_stdout:
2941
+ print("---")
2942
+ print(summary_md)
2943
+ return 0
2944
+
2945
+
2946
+ def _duration_ms(started: Any, ended: Any) -> int:
2947
+ try:
2948
+ s = dt.datetime.fromisoformat(str(started).replace("Z", "+00:00"))
2949
+ e = dt.datetime.fromisoformat(str(ended).replace("Z", "+00:00"))
2950
+ return max(0, int((e - s).total_seconds() * 1000))
2951
+ except Exception:
2952
+ return 0
2953
+
2954
+
2955
+ def _render_packet_md(packet: dict[str, Any]) -> str:
2956
+ lines: list[str] = []
2957
+ lines.append(f"# ORP Packet `{packet.get('packet_id', '')}`")
2958
+ lines.append("")
2959
+ lines.append(f"- Kind: `{packet.get('kind', '')}`")
2960
+ lines.append(f"- Created (UTC): `{packet.get('created_at_utc', '')}`")
2961
+ lines.append(f"- Workflow state: `{packet.get('lifecycle', {}).get('workflow_state', '')}`")
2962
+ lines.append(f"- Overall result: `{packet.get('summary', {}).get('overall_result', '')}`")
2963
+ lines.append("")
2964
+ lines.append("## Gate Results")
2965
+ lines.append("")
2966
+ lines.append("| Gate | Phase | Status | Exit | Duration ms |")
2967
+ lines.append("|---|---|---:|---:|---:|")
2968
+ for gate in packet.get("gates", []):
2969
+ if not isinstance(gate, dict):
2970
+ continue
2971
+ lines.append(
2972
+ f"| `{gate.get('gate_id', '')}` | `{gate.get('phase', '')}` | `{gate.get('status', '')}` | "
2973
+ f"{gate.get('exit_code', '')} | {gate.get('duration_ms', '')} |"
2974
+ )
2975
+
2976
+ claim = packet.get("claim_context")
2977
+ if isinstance(claim, dict):
2978
+ lines.append("")
2979
+ lines.append("## Claim Context")
2980
+ lines.append("")
2981
+ lines.append(f"- Claim id: `{claim.get('claim_id', '')}`")
2982
+ artifacts = [str(x) for x in claim.get("canonical_artifacts", []) if isinstance(x, str)]
2983
+ if artifacts:
2984
+ lines.append("- Canonical artifacts:")
2985
+ for path in artifacts:
2986
+ lines.append(f" - `{path}`")
2987
+
2988
+ atomic = packet.get("atomic_context")
2989
+ if isinstance(atomic, dict):
2990
+ lines.append("")
2991
+ lines.append("## Atomic Context")
2992
+ lines.append("")
2993
+ lines.append(f"- Board: `{atomic.get('board_id', '')}`")
2994
+ lines.append(f"- Problem: `{atomic.get('problem_id', '')}`")
2995
+ lines.append(f"- Snapshot: `{atomic.get('board_snapshot_path', '')}`")
2996
+ if atomic.get("starter_scaffold"):
2997
+ lines.append(f"- Starter scaffold: `true`")
2998
+ starter_note = str(atomic.get("starter_note", "")).strip()
2999
+ if starter_note:
3000
+ lines.append(f"- Starter note: `{starter_note}`")
3001
+
3002
+ evidence_status = packet.get("evidence_status")
3003
+ if isinstance(evidence_status, dict):
3004
+ lines.append("")
3005
+ lines.append("## Evidence Status")
3006
+ lines.append("")
3007
+ lines.append(f"- Overall: `{evidence_status.get('overall', '')}`")
3008
+ lines.append(
3009
+ f"- Starter scaffold: `{str(bool(evidence_status.get('starter_scaffold', False))).lower()}`"
3010
+ )
3011
+ strongest_paths = [
3012
+ str(x)
3013
+ for x in evidence_status.get("strongest_evidence_paths", [])
3014
+ if isinstance(x, str)
3015
+ ]
3016
+ if strongest_paths:
3017
+ lines.append("- Strongest evidence paths:")
3018
+ for path in strongest_paths:
3019
+ lines.append(f" - `{path}`")
3020
+ stub_gates = [str(x) for x in evidence_status.get("stub_gates", []) if isinstance(x, str)]
3021
+ if stub_gates:
3022
+ lines.append(f"- Stub gates: `{', '.join(stub_gates)}`")
3023
+
3024
+ lines.append("")
3025
+ lines.append("## Boundary")
3026
+ lines.append("")
3027
+ lines.append("- This packet is process metadata only.")
3028
+ lines.append("- Evidence remains in canonical artifact paths.")
3029
+ lines.append("")
3030
+ return "\n".join(lines)
3031
+
3032
+
3033
+ def build_parser() -> argparse.ArgumentParser:
3034
+ p = argparse.ArgumentParser(description="ORP CLI")
3035
+ p.add_argument("--repo-root", default=".", help="Repository root (default: .)")
3036
+ p.add_argument("--config", default="orp.yml", help="Config path relative to repo root (default: orp.yml)")
3037
+ sub = p.add_subparsers(dest="cmd", required=False)
3038
+
3039
+ s_home = sub.add_parser(
3040
+ "home",
3041
+ help="Show ORP home screen with packs, repo status, and quick-start commands",
3042
+ )
3043
+ s_home.add_argument(
3044
+ "--json",
3045
+ dest="json_output",
3046
+ action="store_true",
3047
+ help="Print machine-readable JSON",
3048
+ )
3049
+ s_home.set_defaults(func=cmd_home, json_output=False)
3050
+
3051
+ s_about = sub.add_parser(
3052
+ "about",
3053
+ help="Describe ORP discovery surfaces and machine-friendly interfaces",
3054
+ )
3055
+ s_about.add_argument(
3056
+ "--json",
3057
+ dest="json_output",
3058
+ action="store_true",
3059
+ help="Print machine-readable JSON",
3060
+ )
3061
+ s_about.set_defaults(func=cmd_about, json_output=False)
3062
+
3063
+ s_discover = sub.add_parser(
3064
+ "discover",
3065
+ help="Profile-based GitHub discovery and recommendation operations",
3066
+ )
3067
+ discover_sub = s_discover.add_subparsers(dest="discover_cmd", required=True)
3068
+
3069
+ s_discover_profile = discover_sub.add_parser(
3070
+ "profile",
3071
+ help="Discovery profile scaffold operations",
3072
+ )
3073
+ discover_profile_sub = s_discover_profile.add_subparsers(dest="discover_profile_cmd", required=True)
3074
+ s_discover_profile_init = discover_profile_sub.add_parser(
3075
+ "init",
3076
+ help="Scaffold a GitHub discovery profile",
3077
+ )
3078
+ s_discover_profile_init.add_argument(
3079
+ "--out",
3080
+ default=DEFAULT_DISCOVER_PROFILE,
3081
+ help=f"Output profile path (default: {DEFAULT_DISCOVER_PROFILE})",
3082
+ )
3083
+ s_discover_profile_init.add_argument(
3084
+ "--profile-id",
3085
+ default="default",
3086
+ help="Profile id (default: default)",
3087
+ )
3088
+ s_discover_profile_init.add_argument(
3089
+ "--owner",
3090
+ default="",
3091
+ help="GitHub owner login to scan, for example SproutSeeds",
3092
+ )
3093
+ s_discover_profile_init.add_argument(
3094
+ "--owner-type",
3095
+ choices=["auto", "user", "org"],
3096
+ default="auto",
3097
+ help="GitHub owner type (default: auto)",
3098
+ )
3099
+ s_discover_profile_init.add_argument(
3100
+ "--keyword",
3101
+ action="append",
3102
+ default=[],
3103
+ help="Interest keyword (repeatable)",
3104
+ )
3105
+ s_discover_profile_init.add_argument(
3106
+ "--topic",
3107
+ action="append",
3108
+ default=[],
3109
+ help="Preferred repo topic (repeatable)",
3110
+ )
3111
+ s_discover_profile_init.add_argument(
3112
+ "--language",
3113
+ action="append",
3114
+ default=[],
3115
+ help="Preferred language (repeatable)",
3116
+ )
3117
+ s_discover_profile_init.add_argument(
3118
+ "--area",
3119
+ action="append",
3120
+ default=[],
3121
+ help="Preferred issue/repo area keyword, for example docs or compiler (repeatable)",
3122
+ )
3123
+ s_discover_profile_init.add_argument(
3124
+ "--person",
3125
+ action="append",
3126
+ default=[],
3127
+ help="Preferred person/login signal (repeatable)",
3128
+ )
3129
+ s_discover_profile_init.add_argument(
3130
+ "--json",
3131
+ dest="json_output",
3132
+ action="store_true",
3133
+ help="Print machine-readable JSON",
3134
+ )
3135
+ s_discover_profile_init.set_defaults(func=cmd_discover_profile_init, json_output=False)
3136
+
3137
+ s_discover_github = discover_sub.add_parser(
3138
+ "github",
3139
+ help="GitHub-owner discovery operations",
3140
+ )
3141
+ discover_github_sub = s_discover_github.add_subparsers(dest="discover_github_cmd", required=True)
3142
+ s_discover_github_scan = discover_github_sub.add_parser(
3143
+ "scan",
3144
+ help="Scan a GitHub owner space and rank repo/issue/person matches",
3145
+ )
3146
+ s_discover_github_scan.add_argument(
3147
+ "--profile",
3148
+ default=DEFAULT_DISCOVER_PROFILE,
3149
+ help=f"Discovery profile path (default: {DEFAULT_DISCOVER_PROFILE})",
3150
+ )
3151
+ s_discover_github_scan.add_argument(
3152
+ "--scan-id",
3153
+ default="",
3154
+ help="Optional scan id override",
3155
+ )
3156
+ s_discover_github_scan.add_argument(
3157
+ "--repos-fixture",
3158
+ default="",
3159
+ help="Advanced/testing: read repos from fixture JSON instead of GitHub API",
3160
+ )
3161
+ s_discover_github_scan.add_argument(
3162
+ "--issues-fixture",
3163
+ default="",
3164
+ help="Advanced/testing: read issues map fixture JSON instead of GitHub API",
3165
+ )
3166
+ s_discover_github_scan.add_argument(
3167
+ "--json",
3168
+ dest="json_output",
3169
+ action="store_true",
3170
+ help="Print machine-readable JSON",
3171
+ )
3172
+ s_discover_github_scan.set_defaults(func=cmd_discover_github_scan, json_output=False)
3173
+
3174
+ s_collab = sub.add_parser(
3175
+ "collaborate",
3176
+ help="Built-in repository collaboration setup and workflow operations",
3177
+ )
3178
+ collab_sub = s_collab.add_subparsers(dest="collaborate_cmd", required=True)
3179
+
3180
+ s_collab_init = collab_sub.add_parser(
3181
+ "init",
3182
+ help="Scaffold collaboration workspace and configs in the target repository",
3183
+ )
3184
+ s_collab_init.add_argument(
3185
+ "--target-repo-root",
3186
+ default=".",
3187
+ help="Repository root to scaffold (default: current --repo-root)",
3188
+ )
3189
+ s_collab_init.add_argument(
3190
+ "--workspace-root",
3191
+ default="issue-smashers",
3192
+ help="Workspace root relative to target repo (default: issue-smashers)",
3193
+ )
3194
+ s_collab_init.add_argument(
3195
+ "--github-repo",
3196
+ default="",
3197
+ help="Optional GitHub repo slug, for example owner/repo",
3198
+ )
3199
+ s_collab_init.add_argument(
3200
+ "--github-author",
3201
+ default="",
3202
+ help="Optional GitHub login used for coordination-aware gates",
3203
+ )
3204
+ s_collab_init.add_argument(
3205
+ "--var",
3206
+ action="append",
3207
+ default=[],
3208
+ help="Advanced internal template override KEY=VALUE (repeatable)",
3209
+ )
3210
+ s_collab_init.add_argument(
3211
+ "--report",
3212
+ default="",
3213
+ help="Optional install report output path",
3214
+ )
3215
+ s_collab_init.add_argument(
3216
+ "--strict-deps",
3217
+ action="store_true",
3218
+ help="Exit non-zero if dependency audit finds missing paths",
3219
+ )
3220
+ s_collab_init.add_argument(
3221
+ "--no-bootstrap",
3222
+ dest="bootstrap",
3223
+ action="store_false",
3224
+ help="Disable starter collaboration workspace scaffold",
3225
+ )
3226
+ s_collab_init.add_argument(
3227
+ "--overwrite-bootstrap",
3228
+ action="store_true",
3229
+ help="Allow overwriting existing scaffolded collaboration files",
3230
+ )
3231
+ s_collab_init.add_argument(
3232
+ "--json",
3233
+ dest="json_output",
3234
+ action="store_true",
3235
+ help="Print machine-readable JSON",
3236
+ )
3237
+ s_collab_init.set_defaults(func=cmd_collaborate_init, json_output=False, bootstrap=True)
3238
+
3239
+ s_collab_workflows = collab_sub.add_parser(
3240
+ "workflows",
3241
+ help="List built-in collaboration workflows and their backing configs",
3242
+ )
3243
+ s_collab_workflows.add_argument(
3244
+ "--json",
3245
+ dest="json_output",
3246
+ action="store_true",
3247
+ help="Print machine-readable JSON",
3248
+ )
3249
+ s_collab_workflows.set_defaults(func=cmd_collaborate_workflows, json_output=False)
3250
+
3251
+ s_collab_gates = collab_sub.add_parser(
3252
+ "gates",
3253
+ help="Show the gate chain for a collaboration workflow",
3254
+ )
3255
+ s_collab_gates.add_argument(
3256
+ "--workflow",
3257
+ default="full_flow",
3258
+ help="Workflow id (default: full_flow)",
3259
+ )
3260
+ s_collab_gates.add_argument(
3261
+ "--json",
3262
+ dest="json_output",
3263
+ action="store_true",
3264
+ help="Print machine-readable JSON",
3265
+ )
3266
+ s_collab_gates.set_defaults(func=cmd_collaborate_gates, json_output=False)
3267
+
3268
+ s_collab_run = collab_sub.add_parser(
3269
+ "run",
3270
+ help="Run a built-in collaboration workflow",
3271
+ )
3272
+ s_collab_run.add_argument(
3273
+ "--workflow",
3274
+ default="full_flow",
3275
+ help="Workflow id (default: full_flow)",
3276
+ )
3277
+ s_collab_run.add_argument(
3278
+ "--run-id",
3279
+ default="",
3280
+ help="Optional run id override",
3281
+ )
3282
+ s_collab_run.add_argument(
3283
+ "--json",
3284
+ dest="json_output",
3285
+ action="store_true",
3286
+ help="Print machine-readable JSON",
3287
+ )
3288
+ s_collab_run.set_defaults(func=cmd_collaborate_run, json_output=False)
3289
+
3290
+ s_init = sub.add_parser("init", help="Initialize runtime folders and starter config")
3291
+ s_init.add_argument(
3292
+ "--json",
3293
+ dest="json_output",
3294
+ action="store_true",
3295
+ help="Print machine-readable JSON",
3296
+ )
3297
+ s_init.set_defaults(func=cmd_init, json_output=False)
3298
+
3299
+ s_gate = sub.add_parser("gate", help="Gate operations")
3300
+ gate_sub = s_gate.add_subparsers(dest="gate_cmd", required=True)
3301
+ s_run = gate_sub.add_parser("run", help="Run configured gates for a profile")
3302
+ s_run.add_argument("--profile", required=True, help="Profile name from config")
3303
+ s_run.add_argument("--run-id", default="", help="Optional run id override")
3304
+ s_run.add_argument(
3305
+ "--json",
3306
+ dest="json_output",
3307
+ action="store_true",
3308
+ help="Print machine-readable JSON",
3309
+ )
3310
+ s_run.set_defaults(func=cmd_gate_run, json_output=False)
3311
+
3312
+ s_packet = sub.add_parser("packet", help="Packet operations")
3313
+ packet_sub = s_packet.add_subparsers(dest="packet_cmd", required=True)
3314
+ s_emit = packet_sub.add_parser("emit", help="Emit packet from latest or specified run")
3315
+ s_emit.add_argument("--profile", required=True, help="Profile name from config")
3316
+ s_emit.add_argument("--run-id", default="", help="Run id (defaults to last run)")
3317
+ s_emit.add_argument("--kind", default="", help="Packet kind override")
3318
+ s_emit.add_argument(
3319
+ "--json",
3320
+ dest="json_output",
3321
+ action="store_true",
3322
+ help="Print machine-readable JSON",
3323
+ )
3324
+ s_emit.set_defaults(func=cmd_packet_emit, json_output=False)
3325
+
3326
+ s_erdos = sub.add_parser("erdos", help="Erdos catalog operations")
3327
+ erdos_sub = s_erdos.add_subparsers(dest="erdos_cmd", required=True)
3328
+ s_erdos_sync = erdos_sub.add_parser("sync", help="Sync Erdos problems catalog")
3329
+ s_erdos_sync.add_argument("--source-url", default=None, help="Override source URL")
3330
+ s_erdos_sync.add_argument("--input-html", default=None, help="Read from local HTML file")
3331
+ s_erdos_sync.add_argument(
3332
+ "--write-html-snapshot",
3333
+ default=None,
3334
+ help="Write fetched HTML snapshot path",
3335
+ )
3336
+ s_erdos_sync.add_argument("--timeout-sec", type=int, default=None, help="HTTP timeout seconds")
3337
+ s_erdos_sync.add_argument("--user-agent", default=None, help="HTTP user-agent")
3338
+ s_erdos_sync.add_argument(
3339
+ "--active-status",
3340
+ choices=["open", "closed", "all"],
3341
+ default=None,
3342
+ help="Active subset (open|closed|all)",
3343
+ )
3344
+ s_erdos_sync.add_argument(
3345
+ "--allow-count-mismatch",
3346
+ action="store_true",
3347
+ help="Allow parsed count mismatch vs site banner",
3348
+ )
3349
+ s_erdos_sync.add_argument("--out-all", default=None, help="Output all-problems JSON path")
3350
+ s_erdos_sync.add_argument("--out-open", default=None, help="Output open-problems JSON path")
3351
+ s_erdos_sync.add_argument(
3352
+ "--out-closed", default=None, help="Output closed-problems JSON path"
3353
+ )
3354
+ s_erdos_sync.add_argument(
3355
+ "--out-active", default=None, help="Output active-problems JSON path"
3356
+ )
3357
+ s_erdos_sync.add_argument(
3358
+ "--out-open-list",
3359
+ default=None,
3360
+ help="Output open-problems markdown list path",
3361
+ )
3362
+ s_erdos_sync.add_argument(
3363
+ "--open-list-max-statement-chars",
3364
+ type=int,
3365
+ default=None,
3366
+ help="Open-list statement preview char cap",
3367
+ )
3368
+ s_erdos_sync.add_argument(
3369
+ "--problem-id",
3370
+ action="append",
3371
+ type=int,
3372
+ default=[],
3373
+ help="Problem id to print direct link/status for (repeatable)",
3374
+ )
3375
+ s_erdos_sync.add_argument(
3376
+ "--out-problem-dir",
3377
+ default=None,
3378
+ help="Write selected problem payloads to this directory",
3379
+ )
3380
+ s_erdos_sync.add_argument(
3381
+ "sync_args",
3382
+ nargs=argparse.REMAINDER,
3383
+ help="Additional args forwarded to scripts/orp-erdos-problems-sync.py",
3384
+ )
3385
+ s_erdos_sync.add_argument(
3386
+ "--json",
3387
+ dest="json_output",
3388
+ action="store_true",
3389
+ help="Print machine-readable JSON",
3390
+ )
3391
+ s_erdos_sync.set_defaults(func=cmd_erdos_sync, json_output=False)
3392
+
3393
+ s_pack = sub.add_parser("pack", help="Advanced/internal profile pack operations")
3394
+ pack_sub = s_pack.add_subparsers(dest="pack_cmd", required=True)
3395
+
3396
+ s_pack_list = pack_sub.add_parser("list", help="List available local ORP packs")
3397
+ s_pack_list.add_argument(
3398
+ "--json",
3399
+ dest="json_output",
3400
+ action="store_true",
3401
+ help="Print machine-readable JSON",
3402
+ )
3403
+ s_pack_list.set_defaults(func=cmd_pack_list, json_output=False)
3404
+
3405
+ s_pack_install = pack_sub.add_parser(
3406
+ "install",
3407
+ help="Install/render pack templates into a target repository with dependency audit",
3408
+ )
3409
+ s_pack_install.add_argument(
3410
+ "--pack-id",
3411
+ default="erdos-open-problems",
3412
+ help="Pack id under ORP packs/ (default: erdos-open-problems)",
3413
+ )
3414
+ s_pack_install.add_argument(
3415
+ "--pack-path",
3416
+ default="",
3417
+ help="Explicit pack root path containing pack.yml (overrides --pack-id lookup)",
3418
+ )
3419
+ s_pack_install.add_argument(
3420
+ "--target-repo-root",
3421
+ default=".",
3422
+ help="Target repository root for rendered config files (default: current directory)",
3423
+ )
3424
+ s_pack_install.add_argument(
3425
+ "--orp-repo-root",
3426
+ default="",
3427
+ help="Optional ORP repo root override (default: current ORP checkout)",
3428
+ )
3429
+ s_pack_install.add_argument(
3430
+ "--include",
3431
+ action="append",
3432
+ default=[],
3433
+ help=(
3434
+ "Component to install (repeatable). "
3435
+ "Valid values depend on the selected pack. "
3436
+ "Default when omitted: the pack's default install set."
3437
+ ),
3438
+ )
3439
+ s_pack_install.add_argument(
3440
+ "--var",
3441
+ action="append",
3442
+ default=[],
3443
+ help="Extra template variable KEY=VALUE (repeatable)",
3444
+ )
3445
+ s_pack_install.add_argument(
3446
+ "--report",
3447
+ default="",
3448
+ help="Install report output path (default depends on selected pack)",
3449
+ )
3450
+ s_pack_install.add_argument(
3451
+ "--strict-deps",
3452
+ action="store_true",
3453
+ help="Exit non-zero if dependency audit finds missing paths",
3454
+ )
3455
+ s_pack_install.add_argument(
3456
+ "--no-bootstrap",
3457
+ dest="bootstrap",
3458
+ action="store_false",
3459
+ help="Disable starter adapter scaffolding",
3460
+ )
3461
+ s_pack_install.add_argument(
3462
+ "--overwrite-bootstrap",
3463
+ action="store_true",
3464
+ help="Allow bootstrap to overwrite existing scaffolded files",
3465
+ )
3466
+ s_pack_install.set_defaults(bootstrap=True)
3467
+ s_pack_install.add_argument(
3468
+ "--json",
3469
+ dest="json_output",
3470
+ action="store_true",
3471
+ help="Print machine-readable JSON",
3472
+ )
3473
+ s_pack_install.set_defaults(func=cmd_pack_install, json_output=False)
3474
+
3475
+ s_pack_fetch = pack_sub.add_parser(
3476
+ "fetch",
3477
+ help="Fetch pack repo from git and optionally install into a target repo",
3478
+ )
3479
+ s_pack_fetch.add_argument("--source", required=True, help="Git URL or local git repo path")
3480
+ s_pack_fetch.add_argument(
3481
+ "--pack-id",
3482
+ default="",
3483
+ help="Pack id to select when source repo contains multiple packs",
3484
+ )
3485
+ s_pack_fetch.add_argument("--ref", default="", help="Optional branch/tag/commit checkout")
3486
+ s_pack_fetch.add_argument("--cache-root", default="", help="Local cache root (default: ~/.orp/packs)")
3487
+ s_pack_fetch.add_argument("--name", default="", help="Optional cache directory name override")
3488
+ s_pack_fetch.add_argument(
3489
+ "--install-target",
3490
+ default="",
3491
+ help="If set, install fetched pack into this target repo root",
3492
+ )
3493
+ s_pack_fetch.add_argument(
3494
+ "--orp-repo-root",
3495
+ default="",
3496
+ help="Optional ORP repo root override for install step",
3497
+ )
3498
+ s_pack_fetch.add_argument(
3499
+ "--include",
3500
+ action="append",
3501
+ default=[],
3502
+ help="Install component to include (repeatable, install mode only; valid values depend on the pack)",
3503
+ )
3504
+ s_pack_fetch.add_argument(
3505
+ "--var",
3506
+ action="append",
3507
+ default=[],
3508
+ help="Template variable KEY=VALUE (install mode only, repeatable)",
3509
+ )
3510
+ s_pack_fetch.add_argument(
3511
+ "--report",
3512
+ default="",
3513
+ help="Install report output path (install mode only)",
3514
+ )
3515
+ s_pack_fetch.add_argument(
3516
+ "--strict-deps",
3517
+ action="store_true",
3518
+ help="Fail install if dependency audit has missing paths",
3519
+ )
3520
+ s_pack_fetch.add_argument(
3521
+ "--no-bootstrap",
3522
+ action="store_true",
3523
+ help="Disable starter scaffolding during install",
3524
+ )
3525
+ s_pack_fetch.add_argument(
3526
+ "--overwrite-bootstrap",
3527
+ action="store_true",
3528
+ help="Allow overwriting starter scaffold files during install",
3529
+ )
3530
+ s_pack_fetch.add_argument(
3531
+ "--json",
3532
+ dest="json_output",
3533
+ action="store_true",
3534
+ help="Print machine-readable JSON",
3535
+ )
3536
+ s_pack_fetch.set_defaults(func=cmd_pack_fetch, json_output=False)
3537
+
3538
+ s_report = sub.add_parser("report", help="Run report operations")
3539
+ report_sub = s_report.add_subparsers(dest="report_cmd", required=True)
3540
+ s_report_summary = report_sub.add_parser(
3541
+ "summary",
3542
+ help="Render one-page markdown summary from RUN.json",
3543
+ )
3544
+ s_report_summary.add_argument(
3545
+ "--run-id",
3546
+ default="",
3547
+ help="Run id (defaults to last run in orp/state.json)",
3548
+ )
3549
+ s_report_summary.add_argument(
3550
+ "--run-json",
3551
+ default="",
3552
+ help="Explicit path to RUN.json (absolute or relative to --repo-root)",
3553
+ )
3554
+ s_report_summary.add_argument(
3555
+ "--out",
3556
+ default="",
3557
+ help="Output markdown path (default: alongside RUN.json as RUN_SUMMARY.md)",
3558
+ )
3559
+ s_report_summary.add_argument(
3560
+ "--print",
3561
+ dest="print_stdout",
3562
+ action="store_true",
3563
+ help="Also print markdown summary to stdout",
3564
+ )
3565
+ s_report_summary.add_argument(
3566
+ "--json",
3567
+ dest="json_output",
3568
+ action="store_true",
3569
+ help="Print machine-readable JSON",
3570
+ )
3571
+ s_report_summary.set_defaults(func=cmd_report_summary, json_output=False)
3572
+
3573
+ return p
3574
+
3575
+
3576
+ def main() -> int:
3577
+ parser = build_parser()
3578
+ args = parser.parse_args()
3579
+ if not getattr(args, "cmd", None):
3580
+ return cmd_home(
3581
+ argparse.Namespace(
3582
+ repo_root=args.repo_root,
3583
+ config=args.config,
3584
+ json_output=False,
3585
+ )
3586
+ )
3587
+ return args.func(args)
3588
+
3589
+
3590
+ if __name__ == "__main__":
3591
+ try:
3592
+ raise SystemExit(main())
3593
+ except RuntimeError as exc:
3594
+ print(f"error: {exc}", file=sys.stderr)
3595
+ raise SystemExit(2)