agent-control-plane 0.4.9 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +109 -13
- package/npm/bin/agent-control-plane.js +1 -1
- package/package.json +39 -33
- package/tools/bin/debug-session.sh +106 -0
- package/tools/bin/flow-config-lib.sh +13 -3508
- package/tools/bin/flow-execution-lib.sh +243 -0
- package/tools/bin/flow-forge-lib.sh +1770 -0
- package/tools/bin/flow-profile-lib.sh +335 -0
- package/tools/bin/flow-provider-lib.sh +981 -0
- package/tools/bin/flow-runtime-doctor-linux.sh +136 -0
- package/tools/bin/flow-runtime-doctor.sh +5 -1
- package/tools/bin/flow-session-lib.sh +317 -0
- package/tools/bin/install-project-systemd.sh +255 -0
- package/tools/bin/project-runtimectl.sh +45 -0
- package/tools/bin/project-systemd-bootstrap.sh +74 -0
- package/tools/bin/uninstall-project-systemd.sh +87 -0
- package/tools/dashboard/app.js +238 -8
- package/tools/dashboard/issue_queue_state.py +101 -0
- package/tools/dashboard/requirements.txt +3 -0
- package/tools/dashboard/server.py +250 -30
- package/tools/dashboard/styles.css +526 -455
- package/tools/bin/agent-cleanup-worktree +0 -247
- package/tools/bin/agent-github-update-labels +0 -105
- package/tools/bin/agent-init-worktree +0 -216
- package/tools/bin/agent-project-archive-run +0 -52
- package/tools/bin/agent-project-capture-worker +0 -46
- package/tools/bin/agent-project-catch-up-issue-pr-links +0 -118
- package/tools/bin/agent-project-catch-up-merged-prs +0 -195
- package/tools/bin/agent-project-catch-up-scheduled-issue-retries +0 -123
- package/tools/bin/agent-project-cleanup-session +0 -513
- package/tools/bin/agent-project-detached-launch +0 -127
- package/tools/bin/agent-project-heartbeat-loop +0 -1029
- package/tools/bin/agent-project-open-issue-worktree +0 -89
- package/tools/bin/agent-project-open-pr-worktree +0 -80
- package/tools/bin/agent-project-publish-issue-pr +0 -468
- package/tools/bin/agent-project-reconcile-issue-session +0 -1409
- package/tools/bin/agent-project-reconcile-pr-session +0 -1288
- package/tools/bin/agent-project-retry-state +0 -158
- package/tools/bin/agent-project-run-claude-session +0 -805
- package/tools/bin/agent-project-run-codex-resilient +0 -963
- package/tools/bin/agent-project-run-codex-session +0 -435
- package/tools/bin/agent-project-run-kilo-session +0 -369
- package/tools/bin/agent-project-run-ollama-session +0 -658
- package/tools/bin/agent-project-run-openclaw-session +0 -1309
- package/tools/bin/agent-project-run-opencode-session +0 -377
- package/tools/bin/agent-project-run-pi-session +0 -479
- package/tools/bin/agent-project-sync-anchor-repo +0 -139
- package/tools/bin/agent-project-sync-source-repo-main +0 -163
- package/tools/bin/agent-project-worker-status +0 -188
- package/tools/bin/branch-verification-guard.sh +0 -364
- package/tools/bin/capture-worker.sh +0 -18
- package/tools/bin/cleanup-worktree.sh +0 -52
- package/tools/bin/codex-quota +0 -31
- package/tools/bin/create-follow-up-issue.sh +0 -114
- package/tools/bin/dashboard-launchd-bootstrap.sh +0 -50
- package/tools/bin/issue-publish-localization-guard.sh +0 -142
- package/tools/bin/issue-publish-scope-guard.sh +0 -242
- package/tools/bin/issue-requires-local-workspace-install.sh +0 -31
- package/tools/bin/issue-resource-class.sh +0 -12
- package/tools/bin/kick-scheduler.sh +0 -75
- package/tools/bin/label-follow-up-issues.sh +0 -14
- package/tools/bin/new-pr-worktree.sh +0 -50
- package/tools/bin/new-worktree.sh +0 -49
- package/tools/bin/pr-risk.sh +0 -12
- package/tools/bin/prepare-worktree.sh +0 -142
- package/tools/bin/provider-cooldown-state.sh +0 -204
- package/tools/bin/publish-issue-worker.sh +0 -31
- package/tools/bin/reconcile-bootstrap-lib.sh +0 -113
- package/tools/bin/reconcile-issue-worker.sh +0 -34
- package/tools/bin/reconcile-pr-worker.sh +0 -34
- package/tools/bin/record-verification.sh +0 -71
- package/tools/bin/render-flow-config.sh +0 -98
- package/tools/bin/resident-issue-controller-lib.sh +0 -448
- package/tools/bin/retry-state.sh +0 -31
- package/tools/bin/reuse-issue-worktree.sh +0 -121
- package/tools/bin/run-codex-bypass.sh +0 -3
- package/tools/bin/run-codex-safe.sh +0 -3
- package/tools/bin/run-codex-task.sh +0 -280
- package/tools/bin/serve-dashboard.sh +0 -5
- package/tools/bin/start-issue-worker.sh +0 -943
- package/tools/bin/start-pr-fix-worker.sh +0 -528
- package/tools/bin/start-pr-merge-repair-worker.sh +0 -8
- package/tools/bin/start-pr-review-worker.sh +0 -261
- package/tools/bin/start-resident-issue-loop.sh +0 -499
- package/tools/bin/update-github-labels.sh +0 -14
- package/tools/bin/worker-status.sh +0 -19
- package/tools/bin/workflow-catalog.sh +0 -77
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# flow-runtime-doctor-linux.sh - Linux-specific runtime validation for ACP
|
|
3
|
+
# Checks systemd services, Linux paths, and runtime health
|
|
4
|
+
set -euo pipefail
|
|
5
|
+
|
|
6
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
7
|
+
# shellcheck source=/dev/null
|
|
8
|
+
source "${SCRIPT_DIR}/flow-config-lib.sh"
|
|
9
|
+
|
|
10
|
+
FLOW_SKILL_DIR="$(resolve_flow_skill_dir "${BASH_SOURCE[0]}")"
|
|
11
|
+
CONTROL_PLANE_NAME="$(flow_canonical_skill_name)"
|
|
12
|
+
RUNTIME_HOME="$(resolve_runtime_home)"
|
|
13
|
+
|
|
14
|
+
echo "=== ACP Linux Runtime Doctor ==="
|
|
15
|
+
echo ""
|
|
16
|
+
|
|
17
|
+
# --- Systemd Checks ---
|
|
18
|
+
echo "--- Systemd Service Status ---"
|
|
19
|
+
if command -v systemctl &>/dev/null; then
|
|
20
|
+
echo "systemctl: available"
|
|
21
|
+
|
|
22
|
+
# Check user services (systemd --user)
|
|
23
|
+
if systemctl --user is-active --quiet "${CONTROL_PLANE_NAME}.service" 2>/dev/null; then
|
|
24
|
+
echo "service ${CONTROL_PLANE_NAME}: active (user)"
|
|
25
|
+
elif systemctl --user is-enabled --quiet "${CONTROL_PLANE_NAME}.service" 2>/dev/null; then
|
|
26
|
+
echo "service ${CONTROL_PLANE_NAME}: installed but not running (user)"
|
|
27
|
+
else
|
|
28
|
+
echo "service ${CONTROL_PLANE_NAME}: not installed (user)"
|
|
29
|
+
fi
|
|
30
|
+
|
|
31
|
+
# Check system services (if installed system-wide)
|
|
32
|
+
if systemctl is-active --quiet "${CONTROL_PLANE_NAME}.service" 2>/dev/null; then
|
|
33
|
+
echo "service ${CONTROL_PLANE_NAME}: active (system)"
|
|
34
|
+
fi
|
|
35
|
+
else
|
|
36
|
+
echo "systemctl: NOT available (not a systemd-based system?)"
|
|
37
|
+
fi
|
|
38
|
+
echo ""
|
|
39
|
+
|
|
40
|
+
# --- Linux Path Checks ---
|
|
41
|
+
echo "--- Linux Path Validation ---"
|
|
42
|
+
echo "RUNTIME_HOME=${RUNTIME_HOME}"
|
|
43
|
+
echo "FLOW_SKILL_DIR=${FLOW_SKILL_DIR}"
|
|
44
|
+
|
|
45
|
+
# Check XDG paths (Linux standard)
|
|
46
|
+
XDG_CONFIG_HOME="${XDG_CONFIG_HOME:-$HOME/.config}"
|
|
47
|
+
XDG_RUNTIME_DIR="${XDG_RUNTIME_DIR:-/run/user/$(id -u)}"
|
|
48
|
+
|
|
49
|
+
echo "XDG_CONFIG_HOME=${XDG_CONFIG_HOME}"
|
|
50
|
+
echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}"
|
|
51
|
+
|
|
52
|
+
if [[ -d "${XDG_RUNTIME_DIR}" ]]; then
|
|
53
|
+
echo "XDG_RUNTIME_DIR: exists"
|
|
54
|
+
else
|
|
55
|
+
echo "XDG_RUNTIME_DIR: NOT FOUND (may cause issues with user services)"
|
|
56
|
+
fi
|
|
57
|
+
echo ""
|
|
58
|
+
|
|
59
|
+
# --- Process Checks ---
|
|
60
|
+
echo "--- Process Checks ---"
|
|
61
|
+
if command -v pgrep &>/dev/null; then
|
|
62
|
+
AGENT_PIDS=$(pgrep -f "agent-control-plane" 2>/dev/null || true)
|
|
63
|
+
if [[ -n "${AGENT_PIDS}" ]]; then
|
|
64
|
+
echo "agent-control-plane processes running: yes (PIDs: ${AGENT_PIDS})"
|
|
65
|
+
ps -p "${AGENT_PIDS}" -o pid,ppid,cmd 2>/dev/null || true
|
|
66
|
+
else
|
|
67
|
+
echo "agent-control-plane processes running: no"
|
|
68
|
+
fi
|
|
69
|
+
else
|
|
70
|
+
echo "pgrep: NOT available"
|
|
71
|
+
fi
|
|
72
|
+
echo ""
|
|
73
|
+
|
|
74
|
+
# --- tmux Checks ---
|
|
75
|
+
echo "--- tmux Session Checks ---"
|
|
76
|
+
if command -v tmux &>/dev/null; then
|
|
77
|
+
echo "tmux: available ($(tmux -V))"
|
|
78
|
+
TMUX_SESSIONS=$(tmux ls 2>/dev/null | grep -c "agent-" || true)
|
|
79
|
+
echo "agent- tmux sessions: ${TMUX_SESSIONS}"
|
|
80
|
+
if [[ ${TMUX_SESSIONS} -gt 0 ]]; then
|
|
81
|
+
tmux ls 2>/dev/null | grep "agent-" || true
|
|
82
|
+
fi
|
|
83
|
+
else
|
|
84
|
+
echo "tmux: NOT installed (required for ACP worker sessions)"
|
|
85
|
+
fi
|
|
86
|
+
echo ""
|
|
87
|
+
|
|
88
|
+
# --- Socket/Port Checks ---
|
|
89
|
+
echo "--- Socket/Port Checks ---"
|
|
90
|
+
if command -v ss &>/dev/null; then
|
|
91
|
+
echo "Checking for dashboard port (3180)..."
|
|
92
|
+
ss -tlnp 2>/dev/null | grep ":3180 " || echo "Port 3180: not in use"
|
|
93
|
+
elif command -v netstat &>/dev/null; then
|
|
94
|
+
netstat -tlnp 2>/dev/null | grep ":3180 " || echo "Port 3180: not in use"
|
|
95
|
+
else
|
|
96
|
+
echo "ss/netstat: NOT available, skipping port check"
|
|
97
|
+
fi
|
|
98
|
+
echo ""
|
|
99
|
+
|
|
100
|
+
# --- Log File Checks ---
|
|
101
|
+
echo "--- Log File Checks ---"
|
|
102
|
+
LOG_DIR="${RUNTIME_HOME}/logs"
|
|
103
|
+
if [[ -d "${LOG_DIR}" ]]; then
|
|
104
|
+
echo "LOG_DIR=${LOG_DIR}: exists"
|
|
105
|
+
LOG_COUNT=$(find "${LOG_DIR}" -name "*.log" 2>/dev/null | wc -l)
|
|
106
|
+
echo "Log files: ${LOG_COUNT}"
|
|
107
|
+
else
|
|
108
|
+
echo "LOG_DIR=${LOG_DIR}: NOT FOUND"
|
|
109
|
+
fi
|
|
110
|
+
echo ""
|
|
111
|
+
|
|
112
|
+
# --- Run Generic Doctor ---
|
|
113
|
+
echo "=== Generic Runtime Doctor ==="
|
|
114
|
+
if [[ -f "${SCRIPT_DIR}/flow-runtime-doctor.sh" ]]; then
|
|
115
|
+
bash "${SCRIPT_DIR}/flow-runtime-doctor.sh"
|
|
116
|
+
else
|
|
117
|
+
echo "flow-runtime-doctor.sh: NOT FOUND"
|
|
118
|
+
fi
|
|
119
|
+
|
|
120
|
+
echo ""
|
|
121
|
+
echo "=== Linux Doctor Complete ==="
|
|
122
|
+
echo ""
|
|
123
|
+
echo "=== NEXT STEPS ==="
|
|
124
|
+
if ! command -v systemctl &>/dev/null; then
|
|
125
|
+
echo "NOT on systemd: Use macOS launchd or manual tmux for runtime."
|
|
126
|
+
elif ! systemctl --user is-active --quiet "${CONTROL_PLANE_NAME}.service" 2>/dev/null; then
|
|
127
|
+
echo "Service not running. Start with:"
|
|
128
|
+
echo " systemctl --user start ${CONTROL_PLANE_NAME}.service"
|
|
129
|
+
echo " systemctl --user enable ${CONTROL_PLANE_NAME}.service # autostart"
|
|
130
|
+
fi
|
|
131
|
+
|
|
132
|
+
if ! command -v tmux &>/dev/null; then
|
|
133
|
+
echo "MISSING: tmux is required. Install:"
|
|
134
|
+
echo " Ubuntu/Debian: sudo apt install tmux"
|
|
135
|
+
echo " Alpine: apk add tmux"
|
|
136
|
+
fi
|
|
@@ -93,5 +93,9 @@ if [[ -n "${PROFILE_SELECTION_HINT}" ]]; then
|
|
|
93
93
|
fi
|
|
94
94
|
|
|
95
95
|
if [[ "${status}" != "ok" ]]; then
|
|
96
|
-
printf '
|
|
96
|
+
printf '\n=== ACTION REQUIRED ===\n'
|
|
97
|
+
printf 'Status: %s\n' "${status}"
|
|
98
|
+
printf 'Next step: Run sync to fix issues:\n'
|
|
99
|
+
printf ' bash %q %q %q\n' "${SYNC_SCRIPT}" "${SHARED_AGENT_HOME}" "${RUNTIME_HOME}"
|
|
100
|
+
printf '\nOr run: bash %s/tools/bin/setup.sh --resume\n' "${FLOW_SKILL_DIR}"
|
|
97
101
|
fi
|
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
flow_provider_pool_state_get() {
|
|
2
|
+
local config_file="${1:?config file required}"
|
|
3
|
+
local pool_name="${2:?pool name required}"
|
|
4
|
+
local backend=""
|
|
5
|
+
local model=""
|
|
6
|
+
local state_root=""
|
|
7
|
+
local provider_key=""
|
|
8
|
+
local state_file=""
|
|
9
|
+
local attempts="0"
|
|
10
|
+
local next_attempt_epoch="0"
|
|
11
|
+
local next_attempt_at=""
|
|
12
|
+
local last_reason=""
|
|
13
|
+
local updated_at=""
|
|
14
|
+
local ready="yes"
|
|
15
|
+
local valid="yes"
|
|
16
|
+
local now_epoch=""
|
|
17
|
+
local safe_profile=""
|
|
18
|
+
local bypass_profile=""
|
|
19
|
+
local claude_model=""
|
|
20
|
+
local claude_permission_mode=""
|
|
21
|
+
local claude_effort=""
|
|
22
|
+
local claude_timeout_seconds=""
|
|
23
|
+
local claude_max_attempts=""
|
|
24
|
+
local claude_retry_backoff_seconds=""
|
|
25
|
+
local openclaw_model=""
|
|
26
|
+
local openclaw_thinking=""
|
|
27
|
+
local openclaw_timeout_seconds=""
|
|
28
|
+
local ollama_model=""
|
|
29
|
+
local ollama_base_url=""
|
|
30
|
+
local ollama_timeout_seconds=""
|
|
31
|
+
local pi_model=""
|
|
32
|
+
local pi_thinking=""
|
|
33
|
+
local pi_timeout_seconds=""
|
|
34
|
+
local opencode_model=""
|
|
35
|
+
local opencode_timeout_seconds=""
|
|
36
|
+
local kilo_model=""
|
|
37
|
+
local kilo_timeout_seconds=""
|
|
38
|
+
|
|
39
|
+
backend="$(flow_provider_pool_backend "${config_file}" "${pool_name}")"
|
|
40
|
+
safe_profile="$(flow_provider_pool_safe_profile "${config_file}" "${pool_name}")"
|
|
41
|
+
bypass_profile="$(flow_provider_pool_bypass_profile "${config_file}" "${pool_name}")"
|
|
42
|
+
claude_model="$(flow_provider_pool_claude_model "${config_file}" "${pool_name}")"
|
|
43
|
+
claude_permission_mode="$(flow_provider_pool_claude_permission_mode "${config_file}" "${pool_name}")"
|
|
44
|
+
claude_effort="$(flow_provider_pool_claude_effort "${config_file}" "${pool_name}")"
|
|
45
|
+
claude_timeout_seconds="$(flow_provider_pool_claude_timeout_seconds "${config_file}" "${pool_name}")"
|
|
46
|
+
claude_max_attempts="$(flow_provider_pool_claude_max_attempts "${config_file}" "${pool_name}")"
|
|
47
|
+
claude_retry_backoff_seconds="$(flow_provider_pool_claude_retry_backoff_seconds "${config_file}" "${pool_name}")"
|
|
48
|
+
openclaw_model="$(flow_provider_pool_openclaw_model "${config_file}" "${pool_name}")"
|
|
49
|
+
openclaw_thinking="$(flow_provider_pool_openclaw_thinking "${config_file}" "${pool_name}")"
|
|
50
|
+
openclaw_timeout_seconds="$(flow_provider_pool_openclaw_timeout_seconds "${config_file}" "${pool_name}")"
|
|
51
|
+
ollama_model="$(flow_provider_pool_ollama_model "${config_file}" "${pool_name}")"
|
|
52
|
+
ollama_base_url="$(flow_provider_pool_ollama_base_url "${config_file}" "${pool_name}")"
|
|
53
|
+
ollama_timeout_seconds="$(flow_provider_pool_ollama_timeout_seconds "${config_file}" "${pool_name}")"
|
|
54
|
+
pi_model="$(flow_provider_pool_pi_model "${config_file}" "${pool_name}")"
|
|
55
|
+
pi_thinking="$(flow_provider_pool_pi_thinking "${config_file}" "${pool_name}")"
|
|
56
|
+
pi_timeout_seconds="$(flow_provider_pool_pi_timeout_seconds "${config_file}" "${pool_name}")"
|
|
57
|
+
opencode_model="$(flow_provider_pool_opencode_model "${config_file}" "${pool_name}")"
|
|
58
|
+
opencode_timeout_seconds="$(flow_provider_pool_opencode_timeout_seconds "${config_file}" "${pool_name}")"
|
|
59
|
+
kilo_model="$(flow_provider_pool_kilo_model "${config_file}" "${pool_name}")"
|
|
60
|
+
kilo_timeout_seconds="$(flow_provider_pool_kilo_timeout_seconds "${config_file}" "${pool_name}")"
|
|
61
|
+
model="$(flow_provider_pool_model_identity "${config_file}" "${pool_name}")"
|
|
62
|
+
|
|
63
|
+
case "${backend}" in
|
|
64
|
+
codex)
|
|
65
|
+
[[ -n "${safe_profile}" && -n "${bypass_profile}" ]] || valid="no"
|
|
66
|
+
;;
|
|
67
|
+
claude)
|
|
68
|
+
[[ -n "${claude_model}" && -n "${claude_permission_mode}" && -n "${claude_effort}" && -n "${claude_timeout_seconds}" && -n "${claude_max_attempts}" && -n "${claude_retry_backoff_seconds}" ]] || valid="no"
|
|
69
|
+
;;
|
|
70
|
+
openclaw)
|
|
71
|
+
[[ -n "${openclaw_model}" && -n "${openclaw_thinking}" && -n "${openclaw_timeout_seconds}" ]] || valid="no"
|
|
72
|
+
;;
|
|
73
|
+
ollama)
|
|
74
|
+
[[ -n "${ollama_model}" ]] || valid="no"
|
|
75
|
+
;;
|
|
76
|
+
pi)
|
|
77
|
+
[[ -n "${pi_model}" ]] || valid="no"
|
|
78
|
+
;;
|
|
79
|
+
opencode)
|
|
80
|
+
[[ -n "${opencode_model}" && -n "${opencode_timeout_seconds}" ]] || valid="no"
|
|
81
|
+
;;
|
|
82
|
+
kilo)
|
|
83
|
+
[[ -n "${kilo_model}" && -n "${kilo_timeout_seconds}" ]] || valid="no"
|
|
84
|
+
;;
|
|
85
|
+
*)
|
|
86
|
+
valid="no"
|
|
87
|
+
;;
|
|
88
|
+
esac
|
|
89
|
+
|
|
90
|
+
if [[ "${valid}" == "yes" && -n "${model}" ]]; then
|
|
91
|
+
state_root="$(flow_resolve_state_root "${config_file}")"
|
|
92
|
+
provider_key="$(flow_sanitize_provider_key "${backend}-${model}")"
|
|
93
|
+
state_file="${state_root}/retries/providers/${provider_key}.env"
|
|
94
|
+
|
|
95
|
+
if [[ -f "${state_file}" ]]; then
|
|
96
|
+
set -a
|
|
97
|
+
# shellcheck source=/dev/null
|
|
98
|
+
source "${state_file}"
|
|
99
|
+
set +a
|
|
100
|
+
attempts="${ATTEMPTS:-0}"
|
|
101
|
+
next_attempt_epoch="${NEXT_ATTEMPT_EPOCH:-0}"
|
|
102
|
+
next_attempt_at="${NEXT_ATTEMPT_AT:-}"
|
|
103
|
+
last_reason="${LAST_REASON:-}"
|
|
104
|
+
updated_at="${UPDATED_AT:-}"
|
|
105
|
+
fi
|
|
106
|
+
|
|
107
|
+
now_epoch="$(date +%s)"
|
|
108
|
+
if [[ "${next_attempt_epoch}" =~ ^[0-9]+$ ]] && (( next_attempt_epoch > now_epoch )); then
|
|
109
|
+
ready="no"
|
|
110
|
+
fi
|
|
111
|
+
else
|
|
112
|
+
ready="no"
|
|
113
|
+
fi
|
|
114
|
+
|
|
115
|
+
printf 'POOL_NAME=%s\n' "${pool_name}"
|
|
116
|
+
printf 'VALID=%s\n' "${valid}"
|
|
117
|
+
printf 'BACKEND=%s\n' "${backend}"
|
|
118
|
+
printf 'MODEL=%s\n' "${model}"
|
|
119
|
+
printf 'PROVIDER_KEY=%s\n' "${provider_key}"
|
|
120
|
+
printf 'ATTEMPTS=%s\n' "${attempts}"
|
|
121
|
+
printf 'NEXT_ATTEMPT_EPOCH=%s\n' "${next_attempt_epoch}"
|
|
122
|
+
printf 'NEXT_ATTEMPT_AT=%s\n' "${next_attempt_at}"
|
|
123
|
+
printf 'READY=%s\n' "${ready}"
|
|
124
|
+
printf 'LAST_REASON=%s\n' "${last_reason}"
|
|
125
|
+
printf 'UPDATED_AT=%s\n' "${updated_at}"
|
|
126
|
+
printf 'SAFE_PROFILE=%s\n' "${safe_profile}"
|
|
127
|
+
printf 'BYPASS_PROFILE=%s\n' "${bypass_profile}"
|
|
128
|
+
printf 'CLAUDE_MODEL=%s\n' "${claude_model}"
|
|
129
|
+
printf 'CLAUDE_PERMISSION_MODE=%s\n' "${claude_permission_mode}"
|
|
130
|
+
printf 'CLAUDE_EFFORT=%s\n' "${claude_effort}"
|
|
131
|
+
printf 'CLAUDE_TIMEOUT_SECONDS=%s\n' "${claude_timeout_seconds}"
|
|
132
|
+
printf 'CLAUDE_MAX_ATTEMPTS=%s\n' "${claude_max_attempts}"
|
|
133
|
+
printf 'CLAUDE_RETRY_BACKOFF_SECONDS=%s\n' "${claude_retry_backoff_seconds}"
|
|
134
|
+
printf 'OPENCLAW_MODEL=%s\n' "${openclaw_model}"
|
|
135
|
+
printf 'OPENCLAW_THINKING=%s\n' "${openclaw_thinking}"
|
|
136
|
+
printf 'OPENCLAW_TIMEOUT_SECONDS=%s\n' "${openclaw_timeout_seconds}"
|
|
137
|
+
printf 'OLLAMA_MODEL=%s\n' "${ollama_model}"
|
|
138
|
+
printf 'OLLAMA_BASE_URL=%s\n' "${ollama_base_url}"
|
|
139
|
+
printf 'OLLAMA_TIMEOUT_SECONDS=%s\n' "${ollama_timeout_seconds}"
|
|
140
|
+
printf 'PI_MODEL=%s\n' "${pi_model}"
|
|
141
|
+
printf 'PI_THINKING=%s\n' "${pi_thinking}"
|
|
142
|
+
printf 'PI_TIMEOUT_SECONDS=%s\n' "${pi_timeout_seconds}"
|
|
143
|
+
printf 'OPENCODE_MODEL=%s\n' "${opencode_model}"
|
|
144
|
+
printf 'OPENCODE_TIMEOUT_SECONDS=%s\n' "${opencode_timeout_seconds}"
|
|
145
|
+
printf 'KILO_MODEL=%s\n' "${kilo_model}"
|
|
146
|
+
printf 'KILO_TIMEOUT_SECONDS=%s\n' "${kilo_timeout_seconds}"
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
flow_selected_provider_pool_env() {
|
|
150
|
+
local config_file="${1:-}"
|
|
151
|
+
local pool_name=""
|
|
152
|
+
local candidate=""
|
|
153
|
+
local candidate_valid=""
|
|
154
|
+
local candidate_ready=""
|
|
155
|
+
local candidate_next_epoch="0"
|
|
156
|
+
local exhausted_candidate=""
|
|
157
|
+
local exhausted_epoch=""
|
|
158
|
+
|
|
159
|
+
if [[ -z "${config_file}" ]]; then
|
|
160
|
+
config_file="$(resolve_flow_config_yaml "${BASH_SOURCE[1]:-${BASH_SOURCE[0]}}")"
|
|
161
|
+
fi
|
|
162
|
+
|
|
163
|
+
if ! flow_provider_pools_enabled "${config_file}"; then
|
|
164
|
+
return 1
|
|
165
|
+
fi
|
|
166
|
+
|
|
167
|
+
while IFS= read -r pool_name; do
|
|
168
|
+
[[ -n "${pool_name}" ]] || continue
|
|
169
|
+
candidate="$(flow_provider_pool_state_get "${config_file}" "${pool_name}")"
|
|
170
|
+
candidate_valid="$(awk -F= '/^VALID=/{print $2}' <<<"${candidate}")"
|
|
171
|
+
[[ "${candidate_valid}" == "yes" ]] || continue
|
|
172
|
+
|
|
173
|
+
candidate_ready="$(awk -F= '/^READY=/{print $2}' <<<"${candidate}")"
|
|
174
|
+
if [[ "${candidate_ready}" == "yes" ]]; then
|
|
175
|
+
printf '%s\n' "${candidate}"
|
|
176
|
+
printf 'POOLS_EXHAUSTED=no\n'
|
|
177
|
+
printf 'SELECTION_REASON=ready\n'
|
|
178
|
+
return 0
|
|
179
|
+
fi
|
|
180
|
+
|
|
181
|
+
candidate_next_epoch="$(awk -F= '/^NEXT_ATTEMPT_EPOCH=/{print $2}' <<<"${candidate}")"
|
|
182
|
+
if [[ -z "${exhausted_candidate}" ]]; then
|
|
183
|
+
exhausted_candidate="${candidate}"
|
|
184
|
+
exhausted_epoch="${candidate_next_epoch}"
|
|
185
|
+
continue
|
|
186
|
+
fi
|
|
187
|
+
|
|
188
|
+
if [[ "${candidate_next_epoch}" =~ ^[0-9]+$ && "${exhausted_epoch}" =~ ^[0-9]+$ ]] && (( candidate_next_epoch < exhausted_epoch )); then
|
|
189
|
+
exhausted_candidate="${candidate}"
|
|
190
|
+
exhausted_epoch="${candidate_next_epoch}"
|
|
191
|
+
fi
|
|
192
|
+
done < <(flow_provider_pool_names "${config_file}")
|
|
193
|
+
|
|
194
|
+
[[ -n "${exhausted_candidate}" ]] || return 1
|
|
195
|
+
|
|
196
|
+
printf '%s\n' "${exhausted_candidate}"
|
|
197
|
+
printf 'POOLS_EXHAUSTED=yes\n'
|
|
198
|
+
printf 'SELECTION_REASON=all-cooldown\n'
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
flow_resolve_issue_session_prefix() {
|
|
202
|
+
local config_file="${1:-}"
|
|
203
|
+
local default_value=""
|
|
204
|
+
if [[ -z "${config_file}" ]]; then
|
|
205
|
+
config_file="$(resolve_flow_config_yaml "${BASH_SOURCE[1]:-${BASH_SOURCE[0]}}")"
|
|
206
|
+
fi
|
|
207
|
+
default_value="$(flow_default_issue_session_prefix "${config_file}")"
|
|
208
|
+
flow_env_or_config "${config_file}" "ACP_ISSUE_SESSION_PREFIX F_LOSNING_ISSUE_SESSION_PREFIX" "session_naming.issue_prefix" "${default_value}"
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
flow_resolve_pr_session_prefix() {
|
|
212
|
+
local config_file="${1:-}"
|
|
213
|
+
local default_value=""
|
|
214
|
+
if [[ -z "${config_file}" ]]; then
|
|
215
|
+
config_file="$(resolve_flow_config_yaml "${BASH_SOURCE[1]:-${BASH_SOURCE[0]}}")"
|
|
216
|
+
fi
|
|
217
|
+
default_value="$(flow_default_pr_session_prefix "${config_file}")"
|
|
218
|
+
flow_env_or_config "${config_file}" "ACP_PR_SESSION_PREFIX F_LOSNING_PR_SESSION_PREFIX" "session_naming.pr_prefix" "${default_value}"
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
flow_resolve_issue_branch_prefix() {
|
|
222
|
+
local config_file="${1:-}"
|
|
223
|
+
local default_value=""
|
|
224
|
+
if [[ -z "${config_file}" ]]; then
|
|
225
|
+
config_file="$(resolve_flow_config_yaml "${BASH_SOURCE[1]:-${BASH_SOURCE[0]}}")"
|
|
226
|
+
fi
|
|
227
|
+
default_value="$(flow_default_issue_branch_prefix "${config_file}")"
|
|
228
|
+
flow_env_or_config "${config_file}" "ACP_ISSUE_BRANCH_PREFIX F_LOSNING_ISSUE_BRANCH_PREFIX" "session_naming.issue_branch_prefix" "${default_value}"
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
flow_resolve_pr_worktree_branch_prefix() {
|
|
232
|
+
local config_file="${1:-}"
|
|
233
|
+
local default_value=""
|
|
234
|
+
if [[ -z "${config_file}" ]]; then
|
|
235
|
+
config_file="$(resolve_flow_config_yaml "${BASH_SOURCE[1]:-${BASH_SOURCE[0]}}")"
|
|
236
|
+
fi
|
|
237
|
+
default_value="$(flow_default_pr_worktree_branch_prefix "${config_file}")"
|
|
238
|
+
flow_env_or_config "${config_file}" "ACP_PR_WORKTREE_BRANCH_PREFIX F_LOSNING_PR_WORKTREE_BRANCH_PREFIX" "session_naming.pr_worktree_branch_prefix" "${default_value}"
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
flow_resolve_managed_pr_branch_globs() {
|
|
242
|
+
local config_file="${1:-}"
|
|
243
|
+
local default_value=""
|
|
244
|
+
if [[ -z "${config_file}" ]]; then
|
|
245
|
+
config_file="$(resolve_flow_config_yaml "${BASH_SOURCE[1]:-${BASH_SOURCE[0]}}")"
|
|
246
|
+
fi
|
|
247
|
+
default_value="$(flow_default_managed_pr_branch_globs "${config_file}")"
|
|
248
|
+
flow_env_or_config "${config_file}" "ACP_MANAGED_PR_BRANCH_GLOBS F_LOSNING_MANAGED_PR_BRANCH_GLOBS" "session_naming.managed_pr_branch_globs" "${default_value}"
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
flow_escape_regex() {
|
|
252
|
+
local raw_value="${1:-}"
|
|
253
|
+
python3 - "${raw_value}" <<'PY'
|
|
254
|
+
import re
|
|
255
|
+
import sys
|
|
256
|
+
|
|
257
|
+
print(re.escape(sys.argv[1]))
|
|
258
|
+
PY
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
flow_managed_pr_prefixes() {
|
|
262
|
+
local config_file="${1:-}"
|
|
263
|
+
local managed_globs=""
|
|
264
|
+
local branch_glob=""
|
|
265
|
+
local prefix=""
|
|
266
|
+
|
|
267
|
+
managed_globs="$(flow_resolve_managed_pr_branch_globs "${config_file}")"
|
|
268
|
+
for branch_glob in ${managed_globs}; do
|
|
269
|
+
prefix="${branch_glob%\*}"
|
|
270
|
+
[[ -n "${prefix}" ]] || continue
|
|
271
|
+
printf '%s\n' "${prefix}"
|
|
272
|
+
done
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
flow_managed_pr_prefixes_json() {
|
|
276
|
+
local config_file="${1:-}"
|
|
277
|
+
local prefixes=()
|
|
278
|
+
local prefix=""
|
|
279
|
+
|
|
280
|
+
while IFS= read -r prefix; do
|
|
281
|
+
[[ -n "${prefix}" ]] || continue
|
|
282
|
+
prefixes+=("${prefix}")
|
|
283
|
+
done < <(flow_managed_pr_prefixes "${config_file}")
|
|
284
|
+
|
|
285
|
+
python3 - "${prefixes[@]}" <<'PY'
|
|
286
|
+
import json
|
|
287
|
+
import sys
|
|
288
|
+
|
|
289
|
+
print(json.dumps(sys.argv[1:]))
|
|
290
|
+
PY
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
flow_managed_issue_branch_regex() {
|
|
294
|
+
local config_file="${1:-}"
|
|
295
|
+
local prefix=""
|
|
296
|
+
local normalized_prefix=""
|
|
297
|
+
local escaped_prefix=""
|
|
298
|
+
local joined=""
|
|
299
|
+
|
|
300
|
+
while IFS= read -r prefix; do
|
|
301
|
+
[[ -n "${prefix}" ]] || continue
|
|
302
|
+
normalized_prefix="${prefix%/}"
|
|
303
|
+
escaped_prefix="$(flow_escape_regex "${normalized_prefix}")"
|
|
304
|
+
if [[ -n "${joined}" ]]; then
|
|
305
|
+
joined="${joined}|${escaped_prefix}"
|
|
306
|
+
else
|
|
307
|
+
joined="${escaped_prefix}"
|
|
308
|
+
fi
|
|
309
|
+
done < <(flow_managed_pr_prefixes "${config_file}")
|
|
310
|
+
|
|
311
|
+
if [[ -z "${joined}" ]]; then
|
|
312
|
+
joined="$(flow_escape_regex "agent/$(flow_resolve_adapter_id "${config_file}")")"
|
|
313
|
+
fi
|
|
314
|
+
|
|
315
|
+
printf '^(?:%s)/issue-(?<id>[0-9]+)(?:-|$)\n' "${joined}"
|
|
316
|
+
}
|
|
317
|
+
|