agent-control-plane 0.1.14 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +323 -349
- package/bin/pr-risk.sh +28 -6
- package/hooks/heartbeat-hooks.sh +62 -22
- package/npm/bin/agent-control-plane.js +434 -12
- package/package.json +1 -1
- package/references/architecture.md +8 -0
- package/references/control-plane-map.md +6 -2
- package/references/release-checklist.md +0 -2
- package/tools/bin/agent-github-update-labels +6 -1
- package/tools/bin/agent-project-catch-up-issue-pr-links +118 -0
- package/tools/bin/agent-project-catch-up-merged-prs +77 -21
- package/tools/bin/agent-project-catch-up-scheduled-issue-retries +123 -0
- package/tools/bin/agent-project-cleanup-session +84 -0
- package/tools/bin/agent-project-heartbeat-loop +10 -3
- package/tools/bin/agent-project-reconcile-issue-session +45 -12
- package/tools/bin/agent-project-reconcile-pr-session +25 -0
- package/tools/bin/agent-project-run-claude-session +2 -2
- package/tools/bin/agent-project-run-codex-resilient +57 -2
- package/tools/bin/agent-project-run-kilo-session +346 -14
- package/tools/bin/agent-project-run-ollama-session +658 -0
- package/tools/bin/agent-project-run-openclaw-session +73 -25
- package/tools/bin/agent-project-run-opencode-session +354 -14
- package/tools/bin/agent-project-run-pi-session +479 -0
- package/tools/bin/agent-project-worker-status +38 -1
- package/tools/bin/flow-config-lib.sh +123 -3
- package/tools/bin/flow-resident-worker-lib.sh +1 -1
- package/tools/bin/flow-shell-lib.sh +7 -2
- package/tools/bin/heartbeat-recovery-preflight.sh +1 -0
- package/tools/bin/heartbeat-safe-auto.sh +105 -17
- package/tools/bin/install-project-launchd.sh +19 -2
- package/tools/bin/prepare-worktree.sh +4 -4
- package/tools/bin/profile-activate.sh +2 -2
- package/tools/bin/profile-adopt.sh +2 -2
- package/tools/bin/project-init.sh +1 -1
- package/tools/bin/project-runtimectl.sh +90 -7
- package/tools/bin/provider-cooldown-state.sh +14 -14
- package/tools/bin/render-flow-config.sh +30 -33
- package/tools/bin/run-codex-task.sh +53 -4
- package/tools/bin/scaffold-profile.sh +18 -3
- package/tools/bin/start-issue-worker.sh +4 -1
- package/tools/bin/start-pr-fix-worker.sh +33 -0
- package/tools/bin/start-pr-review-worker.sh +34 -0
- package/tools/bin/start-resident-issue-loop.sh +5 -4
- package/tools/bin/sync-agent-repo.sh +2 -2
- package/tools/bin/sync-dependency-baseline.sh +3 -3
- package/tools/bin/sync-shared-agent-home.sh +4 -1
- package/tools/dashboard/app.js +62 -0
- package/tools/dashboard/dashboard_snapshot.py +53 -4
- package/tools/dashboard/index.html +5 -1
- package/tools/dashboard/styles.css +97 -20
- package/tools/templates/pr-fix-template.md +4 -8
- package/tools/templates/pr-merge-repair-template.md +4 -8
- package/tools/templates/pr-review-template.md +2 -1
|
@@ -48,7 +48,12 @@ if [[ -z "$repo_slug" || -z "$number" ]]; then
|
|
|
48
48
|
fi
|
|
49
49
|
|
|
50
50
|
resource="issues/${number}"
|
|
51
|
-
|
|
51
|
+
# Use caller-provided cached JSON if available to skip the GET call
|
|
52
|
+
if [[ -n "${ACP_CACHED_ISSUE_JSON:-}" ]]; then
|
|
53
|
+
current_json="${ACP_CACHED_ISSUE_JSON}"
|
|
54
|
+
else
|
|
55
|
+
current_json="$(flow_github_api_repo "${repo_slug}" "${resource}")"
|
|
56
|
+
fi
|
|
52
57
|
add_json="$(jq -R . <"$add_file" | jq -s .)"
|
|
53
58
|
remove_json="$(jq -R . <"$remove_file" | jq -s .)"
|
|
54
59
|
payload="$(
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
set -euo pipefail
|
|
3
|
+
|
|
4
|
+
usage() {
|
|
5
|
+
cat <<'EOF'
|
|
6
|
+
Usage:
|
|
7
|
+
agent-project-catch-up-issue-pr-links --repo-slug <owner/repo> --state-root <path> --hook-file <path> [--limit <n>]
|
|
8
|
+
|
|
9
|
+
Clear stale issue retry state when an issue already has a linked PR comment and
|
|
10
|
+
that PR still exists (open, closed, or merged).
|
|
11
|
+
EOF
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
repo_slug=""
|
|
15
|
+
state_root=""
|
|
16
|
+
hook_file=""
|
|
17
|
+
limit="100"
|
|
18
|
+
|
|
19
|
+
while [[ $# -gt 0 ]]; do
|
|
20
|
+
case "$1" in
|
|
21
|
+
--repo-slug) repo_slug="${2:-}"; shift 2 ;;
|
|
22
|
+
--state-root) state_root="${2:-}"; shift 2 ;;
|
|
23
|
+
--hook-file) hook_file="${2:-}"; shift 2 ;;
|
|
24
|
+
--limit) limit="${2:-}"; shift 2 ;;
|
|
25
|
+
--help|-h) usage; exit 0 ;;
|
|
26
|
+
*) echo "Unknown argument: $1" >&2; usage >&2; exit 1 ;;
|
|
27
|
+
esac
|
|
28
|
+
done
|
|
29
|
+
|
|
30
|
+
if [[ -z "$repo_slug" || -z "$state_root" || -z "$hook_file" ]]; then
|
|
31
|
+
usage >&2
|
|
32
|
+
exit 1
|
|
33
|
+
fi
|
|
34
|
+
|
|
35
|
+
if [[ ! -f "$hook_file" ]]; then
|
|
36
|
+
echo "missing hook file: $hook_file" >&2
|
|
37
|
+
exit 1
|
|
38
|
+
fi
|
|
39
|
+
|
|
40
|
+
# shellcheck source=/dev/null
|
|
41
|
+
source "$hook_file"
|
|
42
|
+
|
|
43
|
+
if ! declare -F issue_clear_retry >/dev/null 2>&1; then
|
|
44
|
+
issue_clear_retry() { :; }
|
|
45
|
+
fi
|
|
46
|
+
|
|
47
|
+
ledger_dir="${state_root}/linked-pr-issue-catchup"
|
|
48
|
+
retry_dir="${state_root}/retries/issues"
|
|
49
|
+
mkdir -p "$ledger_dir" "$retry_dir"
|
|
50
|
+
|
|
51
|
+
extract_latest_linked_pr() {
|
|
52
|
+
local issue_json="${1:-}"
|
|
53
|
+
ISSUE_JSON="$issue_json" python3 - <<'PY'
|
|
54
|
+
import json
|
|
55
|
+
import os
|
|
56
|
+
import re
|
|
57
|
+
|
|
58
|
+
issue = json.loads(os.environ.get("ISSUE_JSON", "{}") or "{}")
|
|
59
|
+
latest = ""
|
|
60
|
+
latest_at = ""
|
|
61
|
+
for comment in issue.get("comments", []) or []:
|
|
62
|
+
body = comment.get("body") or ""
|
|
63
|
+
match = None
|
|
64
|
+
for candidate in re.finditer(r"Opened PR #(\d+)", body):
|
|
65
|
+
match = candidate
|
|
66
|
+
if not match:
|
|
67
|
+
continue
|
|
68
|
+
created = comment.get("createdAt") or ""
|
|
69
|
+
pr_number = match.group(1)
|
|
70
|
+
if created >= latest_at:
|
|
71
|
+
latest_at = created
|
|
72
|
+
latest = pr_number
|
|
73
|
+
|
|
74
|
+
print(latest)
|
|
75
|
+
PY
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
pr_exists() {
|
|
79
|
+
local pr_number="${1:?pr number required}"
|
|
80
|
+
local pr_json=""
|
|
81
|
+
pr_json="$(flow_github_pr_view_json "$repo_slug" "$pr_number" 2>/dev/null || true)"
|
|
82
|
+
[[ -n "$pr_json" && "$pr_json" != "{}" ]]
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
for retry_file in "$retry_dir"/*.env; do
|
|
86
|
+
[[ -f "$retry_file" ]] || continue
|
|
87
|
+
issue_id="$(basename "${retry_file%.env}")"
|
|
88
|
+
[[ -n "$issue_id" ]] || continue
|
|
89
|
+
|
|
90
|
+
retry_reason="$(awk -F= '/^LAST_REASON=/{print $2; exit}' "$retry_file" 2>/dev/null | tr -d '\r' || true)"
|
|
91
|
+
if [[ "$retry_reason" != "host-publish-failed" ]]; then
|
|
92
|
+
continue
|
|
93
|
+
fi
|
|
94
|
+
|
|
95
|
+
ledger_file="${ledger_dir}/${issue_id}.env"
|
|
96
|
+
if [[ -f "$ledger_file" ]]; then
|
|
97
|
+
continue
|
|
98
|
+
fi
|
|
99
|
+
|
|
100
|
+
issue_json="$(flow_github_issue_view_json "$repo_slug" "$issue_id" 2>/dev/null || true)"
|
|
101
|
+
[[ -n "$issue_json" && "$issue_json" != "{}" ]] || continue
|
|
102
|
+
|
|
103
|
+
linked_pr="$(extract_latest_linked_pr "$issue_json")"
|
|
104
|
+
[[ -n "$linked_pr" ]] || continue
|
|
105
|
+
if ! pr_exists "$linked_pr"; then
|
|
106
|
+
continue
|
|
107
|
+
fi
|
|
108
|
+
|
|
109
|
+
ISSUE_ID="$issue_id" issue_clear_retry || true
|
|
110
|
+
|
|
111
|
+
{
|
|
112
|
+
printf 'ISSUE_ID=%s\n' "$issue_id"
|
|
113
|
+
printf 'LINKED_PR=%s\n' "$linked_pr"
|
|
114
|
+
printf 'PROCESSED_AT=%s\n' "$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
|
115
|
+
} >"$ledger_file"
|
|
116
|
+
|
|
117
|
+
printf 'CATCHUP_LINKED_PR_ISSUE=%s\n' "$issue_id"
|
|
118
|
+
done
|
|
@@ -54,8 +54,9 @@ for hook_name in "${optional_hooks[@]}"; do
|
|
|
54
54
|
fi
|
|
55
55
|
done
|
|
56
56
|
|
|
57
|
-
|
|
58
|
-
|
|
57
|
+
merged_ledger_dir="${state_root}/merged-pr-catchup"
|
|
58
|
+
closed_ledger_dir="${state_root}/closed-pr-catchup"
|
|
59
|
+
mkdir -p "$merged_ledger_dir" "$closed_ledger_dir"
|
|
59
60
|
|
|
60
61
|
get_pr_risk_json() {
|
|
61
62
|
local pr_number="${1:?pr number required}"
|
|
@@ -92,16 +93,68 @@ close_issue_if_needed() {
|
|
|
92
93
|
fi
|
|
93
94
|
}
|
|
94
95
|
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
96
|
+
process_terminal_pr() {
|
|
97
|
+
local pr_number="${1:?pr number required}"
|
|
98
|
+
local linked_issue_id="${2:-}"
|
|
99
|
+
local merged_at="${3:-}"
|
|
100
|
+
local processed_state="${4:?processed state required}"
|
|
101
|
+
local ledger_dir=""
|
|
102
|
+
local ledger_file=""
|
|
103
|
+
|
|
104
|
+
case "$processed_state" in
|
|
105
|
+
merged) ledger_dir="$merged_ledger_dir" ;;
|
|
106
|
+
closed) ledger_dir="$closed_ledger_dir" ;;
|
|
107
|
+
*)
|
|
108
|
+
echo "unsupported terminal PR state: ${processed_state}" >&2
|
|
109
|
+
return 1
|
|
110
|
+
;;
|
|
111
|
+
esac
|
|
99
112
|
|
|
100
113
|
ledger_file="${ledger_dir}/${pr_number}.env"
|
|
101
114
|
if [[ -f "$ledger_file" ]]; then
|
|
102
|
-
|
|
115
|
+
return 0
|
|
116
|
+
fi
|
|
117
|
+
|
|
118
|
+
PR_NUMBER="$pr_number" pr_clear_retry || true
|
|
119
|
+
if [[ "$processed_state" == "merged" ]]; then
|
|
120
|
+
close_issue_if_needed "$pr_number" "$linked_issue_id"
|
|
121
|
+
if ! PR_NUMBER="$pr_number" pr_after_merged "$pr_number"; then
|
|
122
|
+
printf 'CATCHUP_FAILED_PR=%s\n' "$pr_number" >&2
|
|
123
|
+
return 1
|
|
124
|
+
fi
|
|
125
|
+
else
|
|
126
|
+
if ! PR_NUMBER="$pr_number" pr_after_closed "$pr_number"; then
|
|
127
|
+
printf 'CATCHUP_FAILED_CLOSED_PR=%s\n' "$pr_number" >&2
|
|
128
|
+
return 1
|
|
129
|
+
fi
|
|
130
|
+
fi
|
|
131
|
+
|
|
132
|
+
if ! PR_NUMBER="$pr_number" pr_cleanup_merged_residue "$pr_number"; then
|
|
133
|
+
printf 'CATCHUP_FAILED_RESIDUE=%s\n' "$pr_number" >&2
|
|
134
|
+
return 1
|
|
103
135
|
fi
|
|
104
136
|
|
|
137
|
+
{
|
|
138
|
+
printf 'PR_NUMBER=%s\n' "$pr_number"
|
|
139
|
+
printf 'ISSUE_ID=%s\n' "$linked_issue_id"
|
|
140
|
+
printf 'PR_STATE=%s\n' "$processed_state"
|
|
141
|
+
printf 'MERGED_AT=%s\n' "$merged_at"
|
|
142
|
+
printf 'PROCESSED_AT=%s\n' "$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
|
143
|
+
} >"$ledger_file"
|
|
144
|
+
|
|
145
|
+
if [[ "$processed_state" == "merged" ]]; then
|
|
146
|
+
printf 'CATCHUP_MERGED_PR=%s\n' "$pr_number"
|
|
147
|
+
else
|
|
148
|
+
printf 'CATCHUP_CLOSED_PR=%s\n' "$pr_number"
|
|
149
|
+
fi
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
merged_prs_json="$(flow_github_pr_list_json "$repo_slug" merged "$limit")"
|
|
153
|
+
closed_prs_json="$(flow_github_pr_list_json "$repo_slug" closed "$limit")"
|
|
154
|
+
|
|
155
|
+
while IFS= read -r pr_number; do
|
|
156
|
+
[[ -n "$pr_number" ]] || continue
|
|
157
|
+
|
|
105
158
|
risk_json="$(get_pr_risk_json "$pr_number" 2>/dev/null || true)"
|
|
106
159
|
if [[ -z "$risk_json" ]]; then
|
|
107
160
|
continue
|
|
@@ -114,24 +167,27 @@ while IFS= read -r pr_number; do
|
|
|
114
167
|
|
|
115
168
|
linked_issue_id="$(jq -r '.linkedIssueId // empty' <<<"$risk_json" 2>/dev/null || true)"
|
|
116
169
|
merged_at="$(jq -r --arg pr "$pr_number" 'map(select((.number | tostring) == $pr)) | .[0].mergedAt // ""' <<<"$merged_prs_json")"
|
|
170
|
+
process_terminal_pr "$pr_number" "$linked_issue_id" "$merged_at" merged || true
|
|
171
|
+
done < <(jq -r 'sort_by(.mergedAt) | reverse | .[].number' <<<"$merged_prs_json")
|
|
117
172
|
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
173
|
+
while IFS= read -r pr_number; do
|
|
174
|
+
[[ -n "$pr_number" ]] || continue
|
|
175
|
+
|
|
176
|
+
merged_at="$(jq -r --arg pr "$pr_number" 'map(select((.number | tostring) == $pr)) | .[0].mergedAt // ""' <<<"$closed_prs_json")"
|
|
177
|
+
if [[ -n "$merged_at" ]]; then
|
|
122
178
|
continue
|
|
123
179
|
fi
|
|
124
|
-
|
|
125
|
-
|
|
180
|
+
|
|
181
|
+
risk_json="$(get_pr_risk_json "$pr_number" 2>/dev/null || true)"
|
|
182
|
+
if [[ -z "$risk_json" ]]; then
|
|
126
183
|
continue
|
|
127
184
|
fi
|
|
128
185
|
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
printf 'PROCESSED_AT=%s\n' "$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
|
134
|
-
} >"$ledger_file"
|
|
186
|
+
is_managed="$(jq -r '.isManagedByAgent // .isAgentBranch // false' <<<"$risk_json" 2>/dev/null || printf 'false\n')"
|
|
187
|
+
if [[ "$is_managed" != "true" ]]; then
|
|
188
|
+
continue
|
|
189
|
+
fi
|
|
135
190
|
|
|
136
|
-
|
|
137
|
-
|
|
191
|
+
linked_issue_id="$(jq -r '.linkedIssueId // empty' <<<"$risk_json" 2>/dev/null || true)"
|
|
192
|
+
process_terminal_pr "$pr_number" "$linked_issue_id" "" closed || true
|
|
193
|
+
done < <(jq -r 'sort_by(.createdAt) | reverse | .[].number' <<<"$closed_prs_json")
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
set -euo pipefail
|
|
3
|
+
|
|
4
|
+
usage() {
|
|
5
|
+
cat <<'EOF'
|
|
6
|
+
Usage:
|
|
7
|
+
agent-project-catch-up-scheduled-issue-retries --repo-slug <owner/repo> --state-root <path> --hook-file <path> [--limit <n>]
|
|
8
|
+
|
|
9
|
+
Clear stale retry state for recurring scheduled reporting issues once GitHub
|
|
10
|
+
already reflects the latest terminal status via labels and the issue is not
|
|
11
|
+
currently running.
|
|
12
|
+
EOF
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
repo_slug=""
|
|
16
|
+
state_root=""
|
|
17
|
+
hook_file=""
|
|
18
|
+
limit="100"
|
|
19
|
+
|
|
20
|
+
while [[ $# -gt 0 ]]; do
|
|
21
|
+
case "$1" in
|
|
22
|
+
--repo-slug) repo_slug="${2:-}"; shift 2 ;;
|
|
23
|
+
--state-root) state_root="${2:-}"; shift 2 ;;
|
|
24
|
+
--hook-file) hook_file="${2:-}"; shift 2 ;;
|
|
25
|
+
--limit) limit="${2:-}"; shift 2 ;;
|
|
26
|
+
--help|-h) usage; exit 0 ;;
|
|
27
|
+
*) echo "Unknown argument: $1" >&2; usage >&2; exit 1 ;;
|
|
28
|
+
esac
|
|
29
|
+
done
|
|
30
|
+
|
|
31
|
+
if [[ -z "$repo_slug" || -z "$state_root" || -z "$hook_file" ]]; then
|
|
32
|
+
usage >&2
|
|
33
|
+
exit 1
|
|
34
|
+
fi
|
|
35
|
+
|
|
36
|
+
if [[ ! -f "$hook_file" ]]; then
|
|
37
|
+
echo "missing hook file: $hook_file" >&2
|
|
38
|
+
exit 1
|
|
39
|
+
fi
|
|
40
|
+
|
|
41
|
+
# shellcheck source=/dev/null
|
|
42
|
+
source "$hook_file"
|
|
43
|
+
|
|
44
|
+
if ! declare -F issue_clear_retry >/dev/null 2>&1; then
|
|
45
|
+
issue_clear_retry() { :; }
|
|
46
|
+
fi
|
|
47
|
+
|
|
48
|
+
ledger_dir="${state_root}/scheduled-issue-retry-catchup"
|
|
49
|
+
retry_dir="${state_root}/retries/issues"
|
|
50
|
+
mkdir -p "$ledger_dir" "$retry_dir"
|
|
51
|
+
|
|
52
|
+
issue_has_terminal_scheduled_status() {
|
|
53
|
+
local issue_json="${1:-}"
|
|
54
|
+
ISSUE_JSON="$issue_json" python3 - <<'PY'
|
|
55
|
+
import json
|
|
56
|
+
import os
|
|
57
|
+
import re
|
|
58
|
+
import sys
|
|
59
|
+
|
|
60
|
+
issue = json.loads(os.environ.get("ISSUE_JSON", "{}") or "{}")
|
|
61
|
+
state = (issue.get("state") or "").upper()
|
|
62
|
+
labels = {str(item.get("name") or "") for item in (issue.get("labels") or [])}
|
|
63
|
+
body = issue.get("body") or ""
|
|
64
|
+
|
|
65
|
+
has_schedule = bool(re.search(r'^\s*(?:Agent schedule|Schedule|Cadence)\s*:\s*(?:every\s+)?\d+\s*[mhd]\s*$', body, re.I | re.M))
|
|
66
|
+
if not has_schedule and "agent-scheduled" not in labels:
|
|
67
|
+
sys.exit(1)
|
|
68
|
+
|
|
69
|
+
if state != "OPEN":
|
|
70
|
+
sys.exit(1)
|
|
71
|
+
|
|
72
|
+
if "agent-running" in labels:
|
|
73
|
+
sys.exit(1)
|
|
74
|
+
|
|
75
|
+
terminal_status_labels = {
|
|
76
|
+
"health-ok",
|
|
77
|
+
"health-not-ok",
|
|
78
|
+
"checks-ok",
|
|
79
|
+
"checks-not-ok",
|
|
80
|
+
"smoke-ok",
|
|
81
|
+
"smoke-not-ok",
|
|
82
|
+
}
|
|
83
|
+
if not (labels & terminal_status_labels):
|
|
84
|
+
sys.exit(1)
|
|
85
|
+
|
|
86
|
+
sys.exit(0)
|
|
87
|
+
PY
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
processed=0
|
|
91
|
+
for retry_file in "$retry_dir"/*.env; do
|
|
92
|
+
[[ -f "$retry_file" ]] || continue
|
|
93
|
+
if [[ "$processed" -ge "$limit" ]]; then
|
|
94
|
+
break
|
|
95
|
+
fi
|
|
96
|
+
|
|
97
|
+
issue_id="$(basename "${retry_file%.env}")"
|
|
98
|
+
[[ -n "$issue_id" ]] || continue
|
|
99
|
+
|
|
100
|
+
ledger_file="${ledger_dir}/${issue_id}.env"
|
|
101
|
+
if [[ -f "$ledger_file" ]]; then
|
|
102
|
+
continue
|
|
103
|
+
fi
|
|
104
|
+
|
|
105
|
+
issue_json="$(flow_github_issue_view_json "$repo_slug" "$issue_id" 2>/dev/null || true)"
|
|
106
|
+
[[ -n "$issue_json" && "$issue_json" != "{}" ]] || continue
|
|
107
|
+
|
|
108
|
+
if ! issue_has_terminal_scheduled_status "$issue_json"; then
|
|
109
|
+
continue
|
|
110
|
+
fi
|
|
111
|
+
|
|
112
|
+
retry_reason="$(awk -F= '/^LAST_REASON=/{print $2; exit}' "$retry_file" 2>/dev/null | tr -d '\r' || true)"
|
|
113
|
+
ISSUE_ID="$issue_id" issue_clear_retry || true
|
|
114
|
+
|
|
115
|
+
{
|
|
116
|
+
printf 'ISSUE_ID=%s\n' "$issue_id"
|
|
117
|
+
printf 'LAST_REASON=%s\n' "$retry_reason"
|
|
118
|
+
printf 'PROCESSED_AT=%s\n' "$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
|
119
|
+
} >"$ledger_file"
|
|
120
|
+
|
|
121
|
+
printf 'CATCHUP_SCHEDULED_ISSUE=%s\n' "$issue_id"
|
|
122
|
+
processed=$((processed + 1))
|
|
123
|
+
done
|
|
@@ -131,6 +131,24 @@ canonicalize_existing_dir() {
|
|
|
131
131
|
)
|
|
132
132
|
}
|
|
133
133
|
|
|
134
|
+
canonicalize_dir_or_parent_join() {
|
|
135
|
+
local target="${1:-}"
|
|
136
|
+
local resolved=""
|
|
137
|
+
|
|
138
|
+
[[ -n "${target}" ]] || return 1
|
|
139
|
+
|
|
140
|
+
resolved="$(canonicalize_existing_dir "${target}" || true)"
|
|
141
|
+
if [[ -n "${resolved}" ]]; then
|
|
142
|
+
printf '%s\n' "${resolved}"
|
|
143
|
+
return 0
|
|
144
|
+
fi
|
|
145
|
+
|
|
146
|
+
(
|
|
147
|
+
cd "$(dirname "${target}")" 2>/dev/null
|
|
148
|
+
printf '%s/%s\n' "$(pwd -P)" "$(basename "${target}")"
|
|
149
|
+
) || return 1
|
|
150
|
+
}
|
|
151
|
+
|
|
134
152
|
path_is_within_root() {
|
|
135
153
|
local target="${1:-}"
|
|
136
154
|
local root="${2:-}"
|
|
@@ -169,6 +187,8 @@ resident_worktree_protected() {
|
|
|
169
187
|
local resident_worktree=""
|
|
170
188
|
local resident_realpath=""
|
|
171
189
|
local resolved_resident=""
|
|
190
|
+
local resident_lane_kind=""
|
|
191
|
+
local resident_last_status=""
|
|
172
192
|
|
|
173
193
|
[[ -n "${candidate_path}" && -d "${candidate_path}" ]] || return 1
|
|
174
194
|
resolved_candidate="$(canonicalize_existing_dir "${candidate_path}" || true)"
|
|
@@ -181,12 +201,19 @@ resident_worktree_protected() {
|
|
|
181
201
|
[[ -f "${metadata_file}" ]] || continue
|
|
182
202
|
resident_worktree=""
|
|
183
203
|
resident_realpath=""
|
|
204
|
+
resident_lane_kind=""
|
|
205
|
+
resident_last_status=""
|
|
184
206
|
set +u
|
|
185
207
|
set -a
|
|
186
208
|
# shellcheck source=/dev/null
|
|
187
209
|
source "${metadata_file}"
|
|
188
210
|
set +a
|
|
189
211
|
set -u
|
|
212
|
+
resident_lane_kind="${RESIDENT_LANE_KIND:-}"
|
|
213
|
+
resident_last_status="${LAST_STATUS:-}"
|
|
214
|
+
if [[ "${resident_lane_kind}" != "recurring" && "${resident_last_status}" != "running" ]]; then
|
|
215
|
+
continue
|
|
216
|
+
fi
|
|
190
217
|
resident_worktree="${WORKTREE_REALPATH:-${WORKTREE:-}}"
|
|
191
218
|
[[ -n "${resident_worktree}" && -d "${resident_worktree}" ]] || continue
|
|
192
219
|
resolved_resident="$(canonicalize_existing_dir "${resident_worktree}" || true)"
|
|
@@ -268,6 +295,59 @@ active_resident_run_worktree_protected() {
|
|
|
268
295
|
return 1
|
|
269
296
|
}
|
|
270
297
|
|
|
298
|
+
clear_resident_worktree_realpath_references() {
|
|
299
|
+
local removed_path="${1:-}"
|
|
300
|
+
local resolved_removed=""
|
|
301
|
+
local resolved_state_root=""
|
|
302
|
+
local metadata_file=""
|
|
303
|
+
local resident_realpath=""
|
|
304
|
+
local tmp_file=""
|
|
305
|
+
local metadata_worktree_realpath=""
|
|
306
|
+
|
|
307
|
+
[[ -n "${removed_path}" ]] || return 0
|
|
308
|
+
|
|
309
|
+
resolved_removed="$(canonicalize_dir_or_parent_join "${removed_path}" || true)"
|
|
310
|
+
[[ -n "${resolved_removed}" ]] || return 0
|
|
311
|
+
|
|
312
|
+
resolved_state_root="$(derive_state_root || true)"
|
|
313
|
+
[[ -n "${resolved_state_root}" && -d "${resolved_state_root}/resident-workers/issues" ]] || return 0
|
|
314
|
+
|
|
315
|
+
for metadata_file in "${resolved_state_root}"/resident-workers/issues/*/metadata.env; do
|
|
316
|
+
[[ -f "${metadata_file}" ]] || continue
|
|
317
|
+
metadata_worktree_realpath=""
|
|
318
|
+
set +u
|
|
319
|
+
set -a
|
|
320
|
+
# shellcheck source=/dev/null
|
|
321
|
+
source "${metadata_file}"
|
|
322
|
+
set +a
|
|
323
|
+
set -u
|
|
324
|
+
resident_realpath="${WORKTREE_REALPATH:-${metadata_worktree_realpath:-}}"
|
|
325
|
+
[[ -n "${resident_realpath}" ]] || continue
|
|
326
|
+
resident_realpath="$(canonicalize_dir_or_parent_join "${resident_realpath}" || true)"
|
|
327
|
+
[[ -n "${resident_realpath}" ]] || continue
|
|
328
|
+
if [[ "${resident_realpath}" != "${resolved_removed}" ]]; then
|
|
329
|
+
continue
|
|
330
|
+
fi
|
|
331
|
+
|
|
332
|
+
tmp_file="${metadata_file}.tmp.$$"
|
|
333
|
+
awk '
|
|
334
|
+
BEGIN { replaced = 0 }
|
|
335
|
+
/^WORKTREE_REALPATH=/ {
|
|
336
|
+
print "WORKTREE_REALPATH='\'''\''"
|
|
337
|
+
replaced = 1
|
|
338
|
+
next
|
|
339
|
+
}
|
|
340
|
+
{ print }
|
|
341
|
+
END {
|
|
342
|
+
if (replaced == 0) {
|
|
343
|
+
print "WORKTREE_REALPATH='\'''\''"
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
' "${metadata_file}" >"${tmp_file}"
|
|
347
|
+
mv "${tmp_file}" "${metadata_file}"
|
|
348
|
+
done
|
|
349
|
+
}
|
|
350
|
+
|
|
271
351
|
cleanup_with_branch_tool() {
|
|
272
352
|
local include_path="${1:-yes}"
|
|
273
353
|
local -a cleanup_args
|
|
@@ -355,6 +435,10 @@ if [[ -n "$session" && "$active_tmux_session" != "true" ]]; then
|
|
|
355
435
|
)"
|
|
356
436
|
fi
|
|
357
437
|
|
|
438
|
+
if [[ "$skip_worktree_cleanup" != "true" && -n "$worktree_path" && ! -d "$worktree_path" ]]; then
|
|
439
|
+
clear_resident_worktree_realpath_references "$worktree_path"
|
|
440
|
+
fi
|
|
441
|
+
|
|
358
442
|
if [[ "$cleanup_status" != "0" && -z "$session" ]]; then
|
|
359
443
|
[[ -n "$cleanup_error" ]] && printf '%s\n' "$cleanup_error" >&2
|
|
360
444
|
exit "$cleanup_status"
|
|
@@ -558,6 +558,9 @@ cleanup_scheduler_caches() {
|
|
|
558
558
|
if [[ -n "${pr_risk_cache_dir:-}" && -d "${pr_risk_cache_dir}" ]]; then
|
|
559
559
|
rm -rf "${pr_risk_cache_dir}" || true
|
|
560
560
|
fi
|
|
561
|
+
if declare -F heartbeat_invalidate_snapshot_cache >/dev/null 2>&1; then
|
|
562
|
+
heartbeat_invalidate_snapshot_cache
|
|
563
|
+
fi
|
|
561
564
|
}
|
|
562
565
|
|
|
563
566
|
stage_issue_launch() {
|
|
@@ -1482,9 +1485,13 @@ sync_open_agent_issues() {
|
|
|
1482
1485
|
--session "${issue_prefix}${issue_id}"
|
|
1483
1486
|
)"
|
|
1484
1487
|
status="$(awk -F= '/^STATUS=/{print $2}' <<<"$status_out")"
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
+
case "$status" in
|
|
1489
|
+
RUNNING)
|
|
1490
|
+
;;
|
|
1491
|
+
*)
|
|
1492
|
+
heartbeat_sync_issue_labels "$issue_id" >/dev/null || true
|
|
1493
|
+
;;
|
|
1494
|
+
esac
|
|
1488
1495
|
done <<<"$running_issue_ids_cache"
|
|
1489
1496
|
}
|
|
1490
1497
|
|
|
@@ -359,6 +359,16 @@ infer_issue_runtime_failure_from_log() {
|
|
|
359
359
|
return 0
|
|
360
360
|
fi
|
|
361
361
|
|
|
362
|
+
if grep -Eiq 'stale-run no-agent-output-before-stall-threshold|no-agent-output-before-stall-threshold' "${log_file}" 2>/dev/null; then
|
|
363
|
+
printf 'no-agent-output-before-stall-threshold\n'
|
|
364
|
+
return 0
|
|
365
|
+
fi
|
|
366
|
+
|
|
367
|
+
if grep -Eiq 'stale-run no-agent-progress-before-stall-threshold|no-agent-progress-before-stall-threshold' "${log_file}" 2>/dev/null; then
|
|
368
|
+
printf 'no-agent-progress-before-stall-threshold\n'
|
|
369
|
+
return 0
|
|
370
|
+
fi
|
|
371
|
+
|
|
362
372
|
if grep -Eiq 'Ignoring invalid cwd .* No such file or directory|/tmp is absolute|Custom tool call output is missing' "${log_file}" 2>/dev/null; then
|
|
363
373
|
printf 'worker-environment-blocked\n'
|
|
364
374
|
return 0
|
|
@@ -630,6 +640,9 @@ try {
|
|
|
630
640
|
|
|
631
641
|
const changedFilesLower = gitChangedFiles.map((file) => file.toLowerCase());
|
|
632
642
|
const repoHasScript = (scriptName) => Boolean(packageJson?.scripts && Object.prototype.hasOwnProperty.call(packageJson.scripts, scriptName));
|
|
643
|
+
const rootTestScript = String(packageJson?.scripts?.test || '').trim();
|
|
644
|
+
const rootTestScriptUsesNodeTest = /^node\s+--test(?:\s|$)/.test(rootTestScript);
|
|
645
|
+
const rootTestScriptLooksWatchMode = /\B--watch(?:All)?(?:[=\s]|$)|(?:^|\s)vitest\s+watch(?:\s|$)/.test(rootTestScript);
|
|
633
646
|
const commandLooksRunnable = (command) => {
|
|
634
647
|
if (/^npm test(?:\s|$)?/.test(command)) return repoHasScript('test');
|
|
635
648
|
if (/^pnpm test(?:\s|$)?/.test(command)) return repoHasScript('test');
|
|
@@ -638,6 +651,14 @@ const commandLooksRunnable = (command) => {
|
|
|
638
651
|
if (/^node\s+--test(?:\s|$)/.test(command)) return true;
|
|
639
652
|
return true;
|
|
640
653
|
};
|
|
654
|
+
const rootTestFallbackCommand = () => {
|
|
655
|
+
if (!rootTestScript) return '';
|
|
656
|
+
if (rootTestScriptUsesNodeTest) return 'npm test';
|
|
657
|
+
if (!rootTestScriptLooksWatchMode) return 'npm test';
|
|
658
|
+
if (/\bjest\b/i.test(rootTestScript)) return 'npx jest --runInBand --watchAll=false';
|
|
659
|
+
if (/\bvitest\b/i.test(rootTestScript)) return 'npx vitest run';
|
|
660
|
+
return '';
|
|
661
|
+
};
|
|
641
662
|
|
|
642
663
|
if (promptFile && fs.existsSync(promptFile)) {
|
|
643
664
|
const lines = fs.readFileSync(promptFile, 'utf8').split(/\r?\n/).slice(0, 40);
|
|
@@ -668,15 +689,17 @@ if (promptFile && fs.existsSync(promptFile)) {
|
|
|
668
689
|
}
|
|
669
690
|
|
|
670
691
|
const changedTestFiles = [...new Set(gitChangedFiles.filter((file) => /\.(?:spec|test)\.[cm]?[jt]sx?$/.test(file)))];
|
|
671
|
-
|
|
672
|
-
if (/^node\s+--test(?:\s|$)/.test(rootTestScript)) {
|
|
692
|
+
if (rootTestScriptUsesNodeTest) {
|
|
673
693
|
for (const file of changedTestFiles) {
|
|
674
694
|
addCommand(`node --test ${file}`);
|
|
675
695
|
}
|
|
676
696
|
}
|
|
677
697
|
|
|
678
|
-
if (commands.length === 0
|
|
679
|
-
|
|
698
|
+
if (commands.length === 0) {
|
|
699
|
+
const fallbackCommand = rootTestFallbackCommand();
|
|
700
|
+
if (fallbackCommand) {
|
|
701
|
+
addCommand(fallbackCommand);
|
|
702
|
+
}
|
|
680
703
|
}
|
|
681
704
|
|
|
682
705
|
const filtered = commands.filter((command) => !recordedPassCommands.has(command));
|
|
@@ -987,6 +1010,17 @@ if (explicitFailureReason) {
|
|
|
987
1010
|
reason = 'scope-guard-blocked';
|
|
988
1011
|
} else if (/^# Blocker: Provider quota is currently exhausted$/im.test(body)) {
|
|
989
1012
|
reason = 'provider-quota-limit';
|
|
1013
|
+
} else if (
|
|
1014
|
+
/blocked on external network access/i.test(body) &&
|
|
1015
|
+
(/What I ran:/i.test(body) ||
|
|
1016
|
+
/`pnpm audit`/i.test(body) ||
|
|
1017
|
+
/`gh issue view`/i.test(body)) &&
|
|
1018
|
+
(/failed with `ENOTFOUND`/i.test(body) ||
|
|
1019
|
+
/Exact failure:/i.test(body) ||
|
|
1020
|
+
/registry\.npmjs\.org/i.test(body) ||
|
|
1021
|
+
/api\.github\.com/i.test(body))
|
|
1022
|
+
) {
|
|
1023
|
+
reason = 'worker-preflight-network-blocked';
|
|
990
1024
|
} else if (
|
|
991
1025
|
/blocked on external network access/i.test(body) ||
|
|
992
1026
|
/could not perform a safe offline bump/i.test(body) ||
|
|
@@ -1361,19 +1395,18 @@ case "$status" in
|
|
|
1361
1395
|
failure_reason="$(normalize_issue_failure_reason "${failure_reason:-worker-exit-failed}")"
|
|
1362
1396
|
schedule_provider_quota_cooldown "${failure_reason}"
|
|
1363
1397
|
normalize_issue_runner_state "failed" "${LAST_EXIT_CODE:-}" "${failure_reason}"
|
|
1364
|
-
if [[ "${result_outcome:-}" == "blocked" && "${result_action:-}" == "host-comment-blocker" ]]
|
|
1365
|
-
|| [[ "${failure_reason}" == "provider-quota-limit" ]]; then
|
|
1366
|
-
if [[ -z "${result_outcome:-}" ]]; then
|
|
1367
|
-
result_outcome="blocked"
|
|
1368
|
-
fi
|
|
1369
|
-
if [[ -z "${result_action:-}" ]]; then
|
|
1370
|
-
result_action="host-comment-blocker"
|
|
1371
|
-
fi
|
|
1398
|
+
if [[ "${result_outcome:-}" == "blocked" && "${result_action:-}" == "host-comment-blocker" ]]; then
|
|
1372
1399
|
if [[ ! -s "${run_dir}/issue-comment.md" ]]; then
|
|
1373
1400
|
write_issue_comment_artifact "$(build_issue_runtime_blocker_comment "${failure_reason}")" || true
|
|
1374
1401
|
fi
|
|
1375
1402
|
post_issue_comment_if_present
|
|
1376
1403
|
issue_set_reconcile_summary "$status" "$result_outcome" "$result_action" "$failure_reason"
|
|
1404
|
+
elif [[ "${failure_reason}" == "provider-quota-limit" ]]; then
|
|
1405
|
+
if [[ ! -s "${run_dir}/issue-comment.md" ]]; then
|
|
1406
|
+
write_issue_comment_artifact "$(build_issue_runtime_blocker_comment "${failure_reason}")" || true
|
|
1407
|
+
fi
|
|
1408
|
+
post_issue_comment_if_present
|
|
1409
|
+
issue_set_reconcile_summary "$status" "" "" "$failure_reason"
|
|
1377
1410
|
else
|
|
1378
1411
|
issue_set_reconcile_summary "$status" "" "" "$failure_reason"
|
|
1379
1412
|
fi
|
|
@@ -553,6 +553,31 @@ classify_pr_blocked_runtime_reason() {
|
|
|
553
553
|
return 0
|
|
554
554
|
fi
|
|
555
555
|
|
|
556
|
+
if [[ -f "$session_log_file" ]] && grep -Eiq 'no-codex-output-before-stall-threshold|no-codex-progress-before-stall-threshold' "$session_log_file" 2>/dev/null; then
|
|
557
|
+
printf 'codex-stalled\n'
|
|
558
|
+
return 0
|
|
559
|
+
fi
|
|
560
|
+
|
|
561
|
+
if [[ -f "$session_log_file" ]] && grep -Eiq 'no-agent-output-before-stall-threshold|no-agent-progress-before-stall-threshold' "$session_log_file" 2>/dev/null; then
|
|
562
|
+
printf 'agent-stalled\n'
|
|
563
|
+
return 0
|
|
564
|
+
fi
|
|
565
|
+
|
|
566
|
+
if [[ -f "$session_log_file" ]] && grep -Eiq 'provider-quota-limit|quota.*exhausted|rate.limit.*exceeded' "$session_log_file" 2>/dev/null; then
|
|
567
|
+
printf 'provider-quota-limit\n'
|
|
568
|
+
return 0
|
|
569
|
+
fi
|
|
570
|
+
|
|
571
|
+
if [[ -f "$pr_comment_file" ]] && grep -Eiq 'no-codex-output-before-stall-threshold|no-codex-progress-before-stall-threshold' "$pr_comment_file" 2>/dev/null; then
|
|
572
|
+
printf 'codex-stalled\n'
|
|
573
|
+
return 0
|
|
574
|
+
fi
|
|
575
|
+
|
|
576
|
+
if [[ -f "$pr_comment_file" ]] && grep -Eiq 'no-agent-output-before-stall-threshold|no-agent-progress-before-stall-threshold' "$pr_comment_file" 2>/dev/null; then
|
|
577
|
+
printf 'agent-stalled\n'
|
|
578
|
+
return 0
|
|
579
|
+
fi
|
|
580
|
+
|
|
556
581
|
return 1
|
|
557
582
|
}
|
|
558
583
|
|