agent-control-plane 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +589 -0
- package/SKILL.md +149 -0
- package/assets/workflow-catalog.json +57 -0
- package/bin/audit-issue-routing.sh +74 -0
- package/bin/issue-resource-class.sh +58 -0
- package/bin/label-follow-up-issues.sh +114 -0
- package/bin/pr-risk.sh +532 -0
- package/bin/sync-pr-labels.sh +112 -0
- package/hooks/heartbeat-hooks.sh +573 -0
- package/hooks/issue-reconcile-hooks.sh +217 -0
- package/hooks/pr-reconcile-hooks.sh +225 -0
- package/npm/bin/agent-control-plane.js +1984 -0
- package/npm/public-bin/agent-control-plane +3 -0
- package/package.json +61 -0
- package/tools/bin/agent-cleanup-worktree +247 -0
- package/tools/bin/agent-github-update-labels +66 -0
- package/tools/bin/agent-init-worktree +216 -0
- package/tools/bin/agent-project-archive-run +52 -0
- package/tools/bin/agent-project-capture-worker +46 -0
- package/tools/bin/agent-project-catch-up-merged-prs +137 -0
- package/tools/bin/agent-project-cleanup-session +244 -0
- package/tools/bin/agent-project-detached-launch +107 -0
- package/tools/bin/agent-project-heartbeat-loop +2347 -0
- package/tools/bin/agent-project-open-issue-worktree +89 -0
- package/tools/bin/agent-project-open-pr-worktree +80 -0
- package/tools/bin/agent-project-publish-issue-pr +349 -0
- package/tools/bin/agent-project-reconcile-issue-session +1128 -0
- package/tools/bin/agent-project-reconcile-pr-session +1005 -0
- package/tools/bin/agent-project-retry-state +147 -0
- package/tools/bin/agent-project-run-claude-session +657 -0
- package/tools/bin/agent-project-run-codex-resilient +718 -0
- package/tools/bin/agent-project-run-codex-session +316 -0
- package/tools/bin/agent-project-run-kilo-session +27 -0
- package/tools/bin/agent-project-run-openclaw-session +984 -0
- package/tools/bin/agent-project-run-opencode-session +27 -0
- package/tools/bin/agent-project-sync-anchor-repo +128 -0
- package/tools/bin/agent-project-worker-status +143 -0
- package/tools/bin/audit-agent-worktrees.sh +310 -0
- package/tools/bin/audit-issue-routing.sh +11 -0
- package/tools/bin/audit-retained-layout.sh +58 -0
- package/tools/bin/audit-retained-overlap.sh +135 -0
- package/tools/bin/audit-retained-worktrees.sh +228 -0
- package/tools/bin/branch-verification-guard.sh +351 -0
- package/tools/bin/capture-worker.sh +18 -0
- package/tools/bin/check-skill-contracts.sh +324 -0
- package/tools/bin/cleanup-worktree.sh +44 -0
- package/tools/bin/codex-quota +31 -0
- package/tools/bin/create-follow-up-issue.sh +114 -0
- package/tools/bin/dashboard-launchd-bootstrap.sh +38 -0
- package/tools/bin/flow-config-lib.sh +2127 -0
- package/tools/bin/flow-resident-worker-lib.sh +683 -0
- package/tools/bin/flow-runtime-doctor.sh +97 -0
- package/tools/bin/flow-shell-lib.sh +266 -0
- package/tools/bin/heartbeat-recovery-preflight.sh +106 -0
- package/tools/bin/heartbeat-safe-auto.sh +551 -0
- package/tools/bin/install-dashboard-launchd.sh +152 -0
- package/tools/bin/install-project-launchd.sh +219 -0
- package/tools/bin/issue-publish-scope-guard.sh +242 -0
- package/tools/bin/issue-requires-local-workspace-install.sh +31 -0
- package/tools/bin/issue-resource-class.sh +12 -0
- package/tools/bin/kick-scheduler.sh +75 -0
- package/tools/bin/label-follow-up-issues.sh +14 -0
- package/tools/bin/new-pr-worktree.sh +50 -0
- package/tools/bin/new-worktree.sh +49 -0
- package/tools/bin/pr-risk.sh +12 -0
- package/tools/bin/prepare-worktree.sh +140 -0
- package/tools/bin/profile-activate.sh +109 -0
- package/tools/bin/profile-adopt.sh +219 -0
- package/tools/bin/profile-smoke.sh +461 -0
- package/tools/bin/project-init.sh +189 -0
- package/tools/bin/project-launchd-bootstrap.sh +54 -0
- package/tools/bin/project-remove.sh +155 -0
- package/tools/bin/project-runtime-supervisor.sh +56 -0
- package/tools/bin/project-runtimectl.sh +586 -0
- package/tools/bin/provider-cooldown-state.sh +166 -0
- package/tools/bin/publish-issue-worker.sh +31 -0
- package/tools/bin/reconcile-issue-worker.sh +34 -0
- package/tools/bin/reconcile-pr-worker.sh +34 -0
- package/tools/bin/record-verification.sh +71 -0
- package/tools/bin/render-architecture-infographics.sh +110 -0
- package/tools/bin/render-dashboard-demo-media.sh +333 -0
- package/tools/bin/render-dashboard-snapshot.py +16 -0
- package/tools/bin/render-flow-config.sh +86 -0
- package/tools/bin/retry-state.sh +31 -0
- package/tools/bin/reuse-issue-worktree.sh +75 -0
- package/tools/bin/run-codex-bypass.sh +3 -0
- package/tools/bin/run-codex-safe.sh +3 -0
- package/tools/bin/run-codex-task.sh +231 -0
- package/tools/bin/scaffold-profile.sh +374 -0
- package/tools/bin/serve-dashboard.sh +5 -0
- package/tools/bin/split-retained-slice.sh +124 -0
- package/tools/bin/start-issue-worker.sh +796 -0
- package/tools/bin/start-pr-fix-worker.sh +458 -0
- package/tools/bin/start-pr-merge-repair-worker.sh +8 -0
- package/tools/bin/start-pr-review-worker.sh +227 -0
- package/tools/bin/start-resident-issue-loop.sh +908 -0
- package/tools/bin/sync-agent-repo.sh +52 -0
- package/tools/bin/sync-dependency-baseline.sh +247 -0
- package/tools/bin/sync-pr-labels.sh +12 -0
- package/tools/bin/sync-recurring-issue-checklist.sh +274 -0
- package/tools/bin/sync-shared-agent-home.sh +214 -0
- package/tools/bin/sync-vscode-workspace.sh +157 -0
- package/tools/bin/test-smoke.sh +63 -0
- package/tools/bin/uninstall-project-launchd.sh +55 -0
- package/tools/bin/update-github-labels.sh +14 -0
- package/tools/bin/worker-status.sh +19 -0
- package/tools/bin/workflow-catalog.sh +77 -0
- package/tools/dashboard/app.js +286 -0
- package/tools/dashboard/dashboard_snapshot.py +466 -0
- package/tools/dashboard/index.html +41 -0
- package/tools/dashboard/server.py +64 -0
- package/tools/dashboard/styles.css +351 -0
- package/tools/templates/issue-prompt-template.md +109 -0
- package/tools/templates/pr-fix-template.md +120 -0
- package/tools/templates/pr-merge-repair-template.md +91 -0
- package/tools/templates/pr-review-template.md +62 -0
- package/tools/templates/scheduled-issue-prompt-template.md +62 -0
- package/tools/tests/test-agent-control-plane-npm-cli.sh +279 -0
- package/tools/tests/test-agent-github-update-labels-falls-back-to-repository-id.sh +56 -0
- package/tools/tests/test-agent-project-claude-session-wrapper-clears-stale-sandbox-artifacts.sh +89 -0
- package/tools/tests/test-agent-project-claude-session-wrapper-does-not-retry-provider-quota.sh +82 -0
- package/tools/tests/test-agent-project-claude-session-wrapper-retries-transient-failures.sh +90 -0
- package/tools/tests/test-agent-project-claude-session-wrapper-times-out.sh +73 -0
- package/tools/tests/test-agent-project-claude-session-wrapper.sh +103 -0
- package/tools/tests/test-agent-project-cleanup-session-orphan-fallback.sh +90 -0
- package/tools/tests/test-agent-project-cleanup-session-skip-worktree-cleanup.sh +90 -0
- package/tools/tests/test-agent-project-codex-live-thread-persist.sh +76 -0
- package/tools/tests/test-agent-project-codex-recovery.sh +731 -0
- package/tools/tests/test-agent-project-codex-session-wrapper-clears-stale-sandbox-artifacts.sh +105 -0
- package/tools/tests/test-agent-project-codex-session-wrapper.sh +97 -0
- package/tools/tests/test-agent-project-open-pr-worktree-config-prefix.sh +81 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-clears-stale-sandbox-artifacts.sh +109 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-infers-blocked-result-contract.sh +89 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-recovers-literal-env-artifacts.sh +113 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-recovers-version-mismatch.sh +135 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-resident.sh +179 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-reuses-existing-agent-after-add-race.sh +119 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-terminates-rate-limit-hang.sh +91 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper.sh +117 -0
- package/tools/tests/test-agent-project-publish-issue-pr-prunes-stale-worktree-entry.sh +148 -0
- package/tools/tests/test-agent-project-publish-issue-pr-reads-archived-session.sh +146 -0
- package/tools/tests/test-agent-project-publish-issue-pr-recovers-final-head.sh +145 -0
- package/tools/tests/test-agent-project-publish-issue-pr-reuses-existing-worktree.sh +147 -0
- package/tools/tests/test-agent-project-reconcile-failure-reason.sh +456 -0
- package/tools/tests/test-agent-project-reconcile-issue-archived-session-fallback.sh +96 -0
- package/tools/tests/test-agent-project-reconcile-issue-before-blocked.sh +90 -0
- package/tools/tests/test-agent-project-reconcile-issue-host-verification-recovery-uses-recovered-worktree.sh +212 -0
- package/tools/tests/test-agent-project-reconcile-issue-host-verification-recovery.sh +207 -0
- package/tools/tests/test-agent-project-reconcile-issue-provider-quota-schedules-provider-cooldown.sh +101 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-backfills-lane-metadata-from-worker-key.sh +113 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-clears-stale-failed-summary.sh +117 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-initializes-shared-agent-home.sh +55 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-normalizes-runner-state.sh +125 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-records-invalid-contract-summary.sh +118 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-skips-duplicate-blocked-comment.sh +144 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-standardizes-no-commits-blocker.sh +145 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-synthesizes-blocked-comment.sh +139 -0
- package/tools/tests/test-agent-project-reconcile-pr-blocked-host-recovery.sh +242 -0
- package/tools/tests/test-agent-project-reconcile-pr-guard-blocked-no-commit.sh +142 -0
- package/tools/tests/test-agent-project-reconcile-pr-provider-quota-schedules-provider-cooldown.sh +106 -0
- package/tools/tests/test-agent-project-reconcile-pr-session-initializes-shared-agent-home.sh +66 -0
- package/tools/tests/test-agent-project-reconcile-pr-updated-branch-noop.sh +129 -0
- package/tools/tests/test-audit-agent-worktrees-active-launch-skips-git-inspection.sh +69 -0
- package/tools/tests/test-audit-agent-worktrees-broken-worktree.sh +43 -0
- package/tools/tests/test-audit-agent-worktrees-pending-launch-owner.sh +46 -0
- package/tools/tests/test-audit-agent-worktrees-unreconciled-owner.sh +79 -0
- package/tools/tests/test-audit-issue-routing-managed-branch-globs.sh +56 -0
- package/tools/tests/test-branch-verification-guard-generated-artifacts.sh +72 -0
- package/tools/tests/test-branch-verification-guard-targeted-coverage.sh +125 -0
- package/tools/tests/test-codex-quota-manager-failure-driven-rotation.sh +178 -0
- package/tools/tests/test-codex-quota-wrapper.sh +37 -0
- package/tools/tests/test-contribution-docs.sh +18 -0
- package/tools/tests/test-control-plane-dashboard-runtime-smoke.sh +343 -0
- package/tools/tests/test-create-follow-up-issue.sh +73 -0
- package/tools/tests/test-dashboard-launchd-bootstrap.sh +55 -0
- package/tools/tests/test-flow-export-execution-env-exports-repo-id.sh +30 -0
- package/tools/tests/test-flow-export-github-cli-auth-env-prefers-git-credential.sh +48 -0
- package/tools/tests/test-flow-github-api-repo-fallback-preserves-input.sh +85 -0
- package/tools/tests/test-flow-github-api-repo-prefers-explicit-repository-id.sh +60 -0
- package/tools/tests/test-flow-github-issue-list-falls-back-to-repository-id.sh +64 -0
- package/tools/tests/test-flow-github-pr-list-falls-back-to-repository-id.sh +77 -0
- package/tools/tests/test-flow-resident-can-reuse-does-not-leak-metadata.sh +52 -0
- package/tools/tests/test-flow-resident-reap-stale-controllers.sh +63 -0
- package/tools/tests/test-flow-resolve-codex-quota-tools.sh +104 -0
- package/tools/tests/test-flow-runtime-doctor-profile-selection.sh +27 -0
- package/tools/tests/test-heartbeat-codex-pr-linked-issue-exclusion.sh +79 -0
- package/tools/tests/test-heartbeat-hooks-enqueue-resident-issue-for-idle-controller.sh +115 -0
- package/tools/tests/test-heartbeat-hooks-enqueue-resident-issue-for-live-lane-controller.sh +117 -0
- package/tools/tests/test-heartbeat-hooks-start-resident-issue-loop-claude.sh +96 -0
- package/tools/tests/test-heartbeat-hooks-start-resident-issue-loop-codex.sh +96 -0
- package/tools/tests/test-heartbeat-hooks-start-resident-issue-loop.sh +96 -0
- package/tools/tests/test-heartbeat-loop-auth-wait-does-not-consume-capacity.sh +170 -0
- package/tools/tests/test-heartbeat-loop-blocked-recovery-lane.sh +201 -0
- package/tools/tests/test-heartbeat-loop-blocked-recovery-vs-pr-reservation.sh +201 -0
- package/tools/tests/test-heartbeat-loop-idle-resident-controller-does-not-block-launches.sh +160 -0
- package/tools/tests/test-heartbeat-loop-pr-launch-dedup.sh +133 -0
- package/tools/tests/test-heartbeat-loop-provider-cooldown-suppresses-launches.sh +157 -0
- package/tools/tests/test-heartbeat-loop-reaps-stale-resident-controller.sh +181 -0
- package/tools/tests/test-heartbeat-loop-waiting-provider-resident-controller-does-not-block-launches.sh +160 -0
- package/tools/tests/test-heartbeat-ready-issues-blocked-recovery.sh +134 -0
- package/tools/tests/test-heartbeat-safe-auto-dynamic-concurrency.sh +162 -0
- package/tools/tests/test-heartbeat-safe-auto-no-tmux-sessions.sh +136 -0
- package/tools/tests/test-heartbeat-safe-auto-openclaw-skips-codex-quota.sh +139 -0
- package/tools/tests/test-heartbeat-safe-auto-quota-health-signal.sh +119 -0
- package/tools/tests/test-heartbeat-safe-auto-stale-shared-loop-pid-does-not-skip.sh +140 -0
- package/tools/tests/test-heartbeat-safe-auto-static-capacity-without-quota-cache.sh +142 -0
- package/tools/tests/test-heartbeat-safe-auto-zero-healthy-pools.sh +141 -0
- package/tools/tests/test-heartbeat-sync-issue-labels-empty-schedule.sh +65 -0
- package/tools/tests/test-heartbeat-sync-open-agent-prs-terminal-clears-running.sh +179 -0
- package/tools/tests/test-install-dashboard-launchd.sh +78 -0
- package/tools/tests/test-install-project-launchd-adds-tool-paths.sh +87 -0
- package/tools/tests/test-install-project-launchd.sh +110 -0
- package/tools/tests/test-issue-local-workspace-install-policy.sh +81 -0
- package/tools/tests/test-issue-publish-scope-guard-docs-signal.sh +70 -0
- package/tools/tests/test-issue-reconcile-hooks-success-clears-blocked.sh +36 -0
- package/tools/tests/test-kick-scheduler-requires-explicit-profile.sh +47 -0
- package/tools/tests/test-label-follow-up-issues-falls-back-to-repository-id.sh +132 -0
- package/tools/tests/test-manual-operator-entrypoints-require-explicit-profile.sh +64 -0
- package/tools/tests/test-package-funding-metadata.sh +21 -0
- package/tools/tests/test-package-public-metadata.sh +62 -0
- package/tools/tests/test-placeholder-worker-adapters.sh +38 -0
- package/tools/tests/test-pr-reconcile-hooks-refreshes-recurring-issue-checklist.sh +110 -0
- package/tools/tests/test-pr-risk-cohesive-mobile-locale-scope.sh +70 -0
- package/tools/tests/test-pr-risk-fix-label-semantics.sh +114 -0
- package/tools/tests/test-pr-risk-local-first-no-checks.sh +70 -0
- package/tools/tests/test-prepare-worktree-simple-repo-baseline.sh +67 -0
- package/tools/tests/test-profile-activate.sh +33 -0
- package/tools/tests/test-profile-adopt-allow-missing-repo.sh +68 -0
- package/tools/tests/test-profile-adopt-skip-workspace-sync-missing-file.sh +61 -0
- package/tools/tests/test-profile-adopt-syncs-anchor-and-workspace.sh +90 -0
- package/tools/tests/test-profile-smoke-collision.sh +44 -0
- package/tools/tests/test-profile-smoke-invalid-claude-config.sh +31 -0
- package/tools/tests/test-profile-smoke-invalid-provider-pool.sh +68 -0
- package/tools/tests/test-profile-smoke-repo-slug-mismatch.sh +36 -0
- package/tools/tests/test-profile-smoke.sh +45 -0
- package/tools/tests/test-project-init-force-and-skip-sync.sh +61 -0
- package/tools/tests/test-project-init-repo-slug-mismatch.sh +29 -0
- package/tools/tests/test-project-init.sh +66 -0
- package/tools/tests/test-project-launchd-bootstrap.sh +66 -0
- package/tools/tests/test-project-remove.sh +150 -0
- package/tools/tests/test-project-runtime-supervisor.sh +47 -0
- package/tools/tests/test-project-runtimectl-launchd.sh +115 -0
- package/tools/tests/test-project-runtimectl-missing-profile.sh +54 -0
- package/tools/tests/test-project-runtimectl-start-falls-back-to-bootstrap.sh +108 -0
- package/tools/tests/test-project-runtimectl-status-reports-supervisor-as-heartbeat-parent.sh +95 -0
- package/tools/tests/test-project-runtimectl-status-supervisor-running.sh +59 -0
- package/tools/tests/test-project-runtimectl-stop-cancels-pending-kick.sh +85 -0
- package/tools/tests/test-project-runtimectl-stop-clears-running-labels.sh +78 -0
- package/tools/tests/test-project-runtimectl.sh +212 -0
- package/tools/tests/test-provider-cooldown-state-prefers-runtime-worker-context.sh +39 -0
- package/tools/tests/test-provider-cooldown-state.sh +59 -0
- package/tools/tests/test-public-repo-docs.sh +159 -0
- package/tools/tests/test-reconcile-pr-worker-acp-config-routing.sh +75 -0
- package/tools/tests/test-render-dashboard-snapshot.sh +149 -0
- package/tools/tests/test-render-flow-config-demo-profile.sh +36 -0
- package/tools/tests/test-render-flow-config-provider-pool-fallback.sh +81 -0
- package/tools/tests/test-render-flow-config.sh +52 -0
- package/tools/tests/test-run-codex-task-claude-routing.sh +125 -0
- package/tools/tests/test-run-codex-task-codex-resident-routing.sh +108 -0
- package/tools/tests/test-run-codex-task-kilo-routing.sh +98 -0
- package/tools/tests/test-run-codex-task-openclaw-resident-routing.sh +117 -0
- package/tools/tests/test-run-codex-task-openclaw-routing.sh +113 -0
- package/tools/tests/test-run-codex-task-opencode-routing.sh +98 -0
- package/tools/tests/test-run-codex-task-provider-pool-fallback-routing.sh +146 -0
- package/tools/tests/test-scaffold-profile.sh +108 -0
- package/tools/tests/test-serve-dashboard.sh +93 -0
- package/tools/tests/test-start-issue-worker-blocked-context.sh +129 -0
- package/tools/tests/test-start-issue-worker-blocks-complete-recurring-checklist.sh +189 -0
- package/tools/tests/test-start-issue-worker-local-install-routing.sh +157 -0
- package/tools/tests/test-start-issue-worker-profile-template-routing.sh +149 -0
- package/tools/tests/test-start-issue-worker-recurring-resident-reuse-codex.sh +212 -0
- package/tools/tests/test-start-issue-worker-recurring-resident-reuse.sh +219 -0
- package/tools/tests/test-start-issue-worker-renders-verification-snippet.sh +155 -0
- package/tools/tests/test-start-issue-worker-resident-reuse-falls-back-to-new-worktree.sh +199 -0
- package/tools/tests/test-start-pr-fix-worker-host-blocker-context.sh +275 -0
- package/tools/tests/test-start-resident-issue-loop-adopts-next-recurring-issue.sh +185 -0
- package/tools/tests/test-start-resident-issue-loop-clears-pending-while-waiting-due.sh +152 -0
- package/tools/tests/test-start-resident-issue-loop-consumes-queued-lease.sh +186 -0
- package/tools/tests/test-start-resident-issue-loop-fails-over-provider-pool.sh +212 -0
- package/tools/tests/test-start-resident-issue-loop-immediate-cycles.sh +148 -0
- package/tools/tests/test-start-resident-issue-loop-waits-for-provider.sh +194 -0
- package/tools/tests/test-start-resident-issue-loop-waits-for-terminal-reconcile-status.sh +198 -0
- package/tools/tests/test-start-resident-issue-loop-yields-to-live-lane-controller.sh +145 -0
- package/tools/tests/test-sync-pr-labels-fix-lane-uses-repair-queued.sh +67 -0
- package/tools/tests/test-sync-recurring-issue-checklist-backfills-workflow-complete-blocker.sh +70 -0
- package/tools/tests/test-sync-recurring-issue-checklist.sh +95 -0
- package/tools/tests/test-sync-shared-agent-home-local-source-root.sh +66 -0
- package/tools/tests/test-sync-shared-agent-home-preserves-unrelated-workflow-catalog-skill.sh +47 -0
- package/tools/tests/test-test-smoke.sh +86 -0
- package/tools/tests/test-uninstall-project-launchd.sh +37 -0
- package/tools/tests/test-update-github-labels-prefers-sibling-helper.sh +49 -0
- package/tools/tests/test-workflow-catalog.sh +43 -0
- package/tools/vendor/codex-quota/LICENSE +21 -0
- package/tools/vendor/codex-quota/README.md +459 -0
- package/tools/vendor/codex-quota/codex-quota.js +261 -0
- package/tools/vendor/codex-quota/lib/claude-accounts.js +226 -0
- package/tools/vendor/codex-quota/lib/claude-oauth.js +174 -0
- package/tools/vendor/codex-quota/lib/claude-tokens.js +471 -0
- package/tools/vendor/codex-quota/lib/claude-usage.js +929 -0
- package/tools/vendor/codex-quota/lib/codex-accounts.js +205 -0
- package/tools/vendor/codex-quota/lib/codex-tokens.js +326 -0
- package/tools/vendor/codex-quota/lib/codex-usage.js +32 -0
- package/tools/vendor/codex-quota/lib/color.js +72 -0
- package/tools/vendor/codex-quota/lib/constants.js +57 -0
- package/tools/vendor/codex-quota/lib/container.js +143 -0
- package/tools/vendor/codex-quota/lib/display.js +1111 -0
- package/tools/vendor/codex-quota/lib/fs.js +63 -0
- package/tools/vendor/codex-quota/lib/handlers.js +2060 -0
- package/tools/vendor/codex-quota/lib/jwt.js +33 -0
- package/tools/vendor/codex-quota/lib/oauth.js +486 -0
- package/tools/vendor/codex-quota/lib/paths.js +34 -0
- package/tools/vendor/codex-quota/lib/prompts.js +44 -0
- package/tools/vendor/codex-quota/lib/sync.js +1438 -0
- package/tools/vendor/codex-quota/lib/token-match.js +96 -0
- package/tools/vendor/codex-quota-manager/scripts/auto-switch.sh +500 -0
- package/tools/vendor/codex-quota-manager/scripts/batch-add.sh +123 -0
|
@@ -0,0 +1,2347 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
set -euo pipefail
|
|
3
|
+
|
|
4
|
+
usage() {
|
|
5
|
+
cat <<'EOF'
|
|
6
|
+
Usage:
|
|
7
|
+
agent-project-heartbeat-loop --repo-slug <owner/repo> --runs-root <path> --state-root <path> --issue-prefix <prefix> --pr-prefix <prefix> --hook-file <path> [options]
|
|
8
|
+
|
|
9
|
+
Shared scheduler loop for project adapters. The shared engine owns:
|
|
10
|
+
- completed worker reconciliation
|
|
11
|
+
- retry/cooldown gating
|
|
12
|
+
- concurrency accounting
|
|
13
|
+
- issue/PR launch ordering
|
|
14
|
+
- operational summary output
|
|
15
|
+
|
|
16
|
+
Options:
|
|
17
|
+
--memory-dir <path> Optional memory log root
|
|
18
|
+
--max-concurrent-workers <n> Default 5
|
|
19
|
+
--max-concurrent-heavy-workers <n> Default 1
|
|
20
|
+
--max-concurrent-pr-workers <n> Default 5
|
|
21
|
+
--max-recurring-issue-workers <n> Default 1
|
|
22
|
+
--max-concurrent-scheduled-issue-workers <n> Default 0 (disabled)
|
|
23
|
+
--max-concurrent-scheduled-heavy-workers <n> Default 1
|
|
24
|
+
--max-concurrent-blocked-recovery-issue-workers <n> Default 0 (disabled)
|
|
25
|
+
--blocked-recovery-cooldown-seconds <n> Default 0 (disabled)
|
|
26
|
+
--max-open-agent-prs-for-recurring <n> Default 0 (disabled)
|
|
27
|
+
--max-launches-per-pass <n> Default matches max-concurrent-workers
|
|
28
|
+
--heavy-running-label <name> Default HEAVY_ISSUE
|
|
29
|
+
--heavy-deferred-key <name> Default HEAVY_DEFERRED
|
|
30
|
+
--heavy-deferred-message <text> Default heavy-queue message
|
|
31
|
+
--help Show this help
|
|
32
|
+
EOF
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
shared_agent_home="${SHARED_AGENT_HOME:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
|
|
36
|
+
repo_slug=""
|
|
37
|
+
runs_root=""
|
|
38
|
+
state_root=""
|
|
39
|
+
memory_dir=""
|
|
40
|
+
issue_prefix=""
|
|
41
|
+
pr_prefix=""
|
|
42
|
+
hook_file=""
|
|
43
|
+
max_concurrent_workers="5"
|
|
44
|
+
max_concurrent_heavy_workers="1"
|
|
45
|
+
max_concurrent_pr_workers="5"
|
|
46
|
+
max_recurring_issue_workers="1"
|
|
47
|
+
max_concurrent_scheduled_issue_workers="0"
|
|
48
|
+
max_concurrent_scheduled_heavy_workers="1"
|
|
49
|
+
max_concurrent_blocked_recovery_issue_workers="0"
|
|
50
|
+
blocked_recovery_cooldown_seconds="0"
|
|
51
|
+
max_open_agent_prs_for_recurring="0"
|
|
52
|
+
max_launches_per_pass="$max_concurrent_workers"
|
|
53
|
+
heavy_running_label="HEAVY_ISSUE"
|
|
54
|
+
heavy_deferred_key="HEAVY_DEFERRED"
|
|
55
|
+
heavy_deferred_message="Heavy issues remain queued until the heavy slot is free."
|
|
56
|
+
|
|
57
|
+
while [[ $# -gt 0 ]]; do
|
|
58
|
+
case "$1" in
|
|
59
|
+
--repo-slug) repo_slug="${2:-}"; shift 2 ;;
|
|
60
|
+
--runs-root) runs_root="${2:-}"; shift 2 ;;
|
|
61
|
+
--state-root) state_root="${2:-}"; shift 2 ;;
|
|
62
|
+
--memory-dir) memory_dir="${2:-}"; shift 2 ;;
|
|
63
|
+
--issue-prefix) issue_prefix="${2:-}"; shift 2 ;;
|
|
64
|
+
--pr-prefix) pr_prefix="${2:-}"; shift 2 ;;
|
|
65
|
+
--hook-file) hook_file="${2:-}"; shift 2 ;;
|
|
66
|
+
--max-concurrent-workers) max_concurrent_workers="${2:-}"; shift 2 ;;
|
|
67
|
+
--max-concurrent-heavy-workers) max_concurrent_heavy_workers="${2:-}"; shift 2 ;;
|
|
68
|
+
--max-concurrent-pr-workers) max_concurrent_pr_workers="${2:-}"; shift 2 ;;
|
|
69
|
+
--max-recurring-issue-workers) max_recurring_issue_workers="${2:-}"; shift 2 ;;
|
|
70
|
+
--max-concurrent-scheduled-issue-workers) max_concurrent_scheduled_issue_workers="${2:-}"; shift 2 ;;
|
|
71
|
+
--max-concurrent-scheduled-heavy-workers) max_concurrent_scheduled_heavy_workers="${2:-}"; shift 2 ;;
|
|
72
|
+
--max-concurrent-blocked-recovery-issue-workers) max_concurrent_blocked_recovery_issue_workers="${2:-}"; shift 2 ;;
|
|
73
|
+
--blocked-recovery-cooldown-seconds) blocked_recovery_cooldown_seconds="${2:-}"; shift 2 ;;
|
|
74
|
+
--max-open-agent-prs-for-recurring) max_open_agent_prs_for_recurring="${2:-}"; shift 2 ;;
|
|
75
|
+
--max-launches-per-pass) max_launches_per_pass="${2:-}"; shift 2 ;;
|
|
76
|
+
--heavy-running-label) heavy_running_label="${2:-}"; shift 2 ;;
|
|
77
|
+
--heavy-deferred-key) heavy_deferred_key="${2:-}"; shift 2 ;;
|
|
78
|
+
--heavy-deferred-message) heavy_deferred_message="${2:-}"; shift 2 ;;
|
|
79
|
+
--help|-h) usage; exit 0 ;;
|
|
80
|
+
*) echo "Unknown argument: $1" >&2; usage >&2; exit 1 ;;
|
|
81
|
+
esac
|
|
82
|
+
done
|
|
83
|
+
|
|
84
|
+
if [[ -z "$repo_slug" || -z "$runs_root" || -z "$state_root" || -z "$issue_prefix" || -z "$pr_prefix" || -z "$hook_file" ]]; then
|
|
85
|
+
usage >&2
|
|
86
|
+
exit 1
|
|
87
|
+
fi
|
|
88
|
+
|
|
89
|
+
mkdir -p "$runs_root"
|
|
90
|
+
memory_file=""
|
|
91
|
+
tmux_sessions_cache=""
|
|
92
|
+
tmux_sessions_cache_loaded="no"
|
|
93
|
+
auth_wait_workers_cache=""
|
|
94
|
+
auth_wait_workers_cache_loaded="no"
|
|
95
|
+
all_running_workers_cache=""
|
|
96
|
+
all_running_workers_cache_loaded="no"
|
|
97
|
+
running_issue_workers_cache=""
|
|
98
|
+
running_issue_workers_cache_loaded="no"
|
|
99
|
+
running_pr_workers_cache=""
|
|
100
|
+
running_pr_workers_cache_loaded="no"
|
|
101
|
+
completed_workers_cache=""
|
|
102
|
+
completed_workers_cache_loaded="no"
|
|
103
|
+
ready_issue_ids_cache=""
|
|
104
|
+
ready_issue_ids_cache_loaded="no"
|
|
105
|
+
open_agent_pr_ids_cache=""
|
|
106
|
+
open_agent_pr_ids_cache_loaded="no"
|
|
107
|
+
running_issue_ids_cache=""
|
|
108
|
+
running_issue_ids_cache_loaded="no"
|
|
109
|
+
exclusive_issue_ids_cache=""
|
|
110
|
+
exclusive_issue_ids_cache_loaded="no"
|
|
111
|
+
exclusive_pr_ids_cache=""
|
|
112
|
+
exclusive_pr_ids_cache_loaded="no"
|
|
113
|
+
blocked_recovery_issue_ids_cache=""
|
|
114
|
+
blocked_recovery_issue_ids_cache_loaded="no"
|
|
115
|
+
ordered_ready_issue_ids_cache=""
|
|
116
|
+
ordered_ready_issue_ids_cache_loaded="no"
|
|
117
|
+
due_scheduled_issue_ids_cache=""
|
|
118
|
+
due_scheduled_issue_ids_cache_loaded="no"
|
|
119
|
+
due_blocked_recovery_issue_ids_cache=""
|
|
120
|
+
due_blocked_recovery_issue_ids_cache_loaded="no"
|
|
121
|
+
issue_attr_cache_dir=""
|
|
122
|
+
pr_attr_cache_dir=""
|
|
123
|
+
pr_risk_cache_dir=""
|
|
124
|
+
pr_risk_runtime_cache_dir="${state_root}/pr-risk-cache"
|
|
125
|
+
pr_risk_runtime_cache_ttl_seconds="${ACP_PR_RISK_CACHE_TTL_SECONDS:-${F_LOSNING_PR_RISK_CACHE_TTL_SECONDS:-300}}"
|
|
126
|
+
recurring_rotation_dir="${state_root}/recurring"
|
|
127
|
+
recurring_rotation_file="${recurring_rotation_dir}/last-launched-issue-id"
|
|
128
|
+
scheduled_state_dir="${state_root}/scheduled-issues"
|
|
129
|
+
blocked_recovery_state_dir="${state_root}/blocked-recovery-issues"
|
|
130
|
+
pending_launch_dir="${state_root}/pending-launches"
|
|
131
|
+
if [[ -n "$memory_dir" ]]; then
|
|
132
|
+
mkdir -p "$memory_dir"
|
|
133
|
+
memory_file="${memory_dir}/$(date +%F).md"
|
|
134
|
+
touch "$memory_file"
|
|
135
|
+
fi
|
|
136
|
+
mkdir -p "$recurring_rotation_dir"
|
|
137
|
+
mkdir -p "$scheduled_state_dir"
|
|
138
|
+
mkdir -p "$blocked_recovery_state_dir"
|
|
139
|
+
mkdir -p "$pending_launch_dir"
|
|
140
|
+
mkdir -p "$pr_risk_runtime_cache_dir"
|
|
141
|
+
|
|
142
|
+
record_memory() {
|
|
143
|
+
local message="${1:?message required}"
|
|
144
|
+
[[ -n "$memory_file" ]] || return 0
|
|
145
|
+
printf -- "- %s %s\n" "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" "$message" >>"$memory_file"
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
print_block() {
|
|
149
|
+
local header="${1:?header required}"
|
|
150
|
+
local body="${2:-}"
|
|
151
|
+
printf '%s\n' "$header"
|
|
152
|
+
if [[ -n "$body" ]]; then
|
|
153
|
+
printf '%s\n' "$body"
|
|
154
|
+
fi
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
log_phase() {
|
|
158
|
+
printf 'HEARTBEAT_LOOP_PHASE=%s\n' "${1:?phase required}"
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
all_tmux_sessions() {
|
|
162
|
+
ensure_tmux_sessions_cache
|
|
163
|
+
printf '%s\n' "$tmux_sessions_cache"
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
session_matches_prefix() {
|
|
167
|
+
local session="${1:?session required}"
|
|
168
|
+
[[ "$session" == "${issue_prefix}"* || "$session" == "${pr_prefix}"* ]]
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
session_runner_state() {
|
|
172
|
+
local session="${1:?session required}"
|
|
173
|
+
local runner_state_file="${runs_root}/${session}/runner.env"
|
|
174
|
+
if [[ ! -f "$runner_state_file" ]]; then
|
|
175
|
+
return 1
|
|
176
|
+
fi
|
|
177
|
+
awk -F= '/^RUNNER_STATE=/{print $2; exit}' "$runner_state_file"
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
session_is_auth_waiting() {
|
|
181
|
+
local session="${1:?session required}"
|
|
182
|
+
local runner_state=""
|
|
183
|
+
runner_state="$(session_runner_state "$session" || true)"
|
|
184
|
+
[[ "$runner_state" == "waiting-auth-refresh" || "$runner_state" == "switching-account" ]]
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
all_running_workers() {
|
|
188
|
+
ensure_all_running_workers_cache
|
|
189
|
+
printf '%s\n' "$all_running_workers_cache"
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
running_issue_workers() {
|
|
193
|
+
ensure_running_issue_workers_cache
|
|
194
|
+
printf '%s\n' "$running_issue_workers_cache"
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
pending_launch_pid() {
|
|
198
|
+
local kind="${1:?kind required}"
|
|
199
|
+
local item_id="${2:?item id required}"
|
|
200
|
+
local pending_file pid
|
|
201
|
+
|
|
202
|
+
pending_file="${pending_launch_dir}/${kind}-${item_id}.pid"
|
|
203
|
+
if [[ ! -f "$pending_file" ]]; then
|
|
204
|
+
return 1
|
|
205
|
+
fi
|
|
206
|
+
|
|
207
|
+
pid="$(tr -d '[:space:]' <"$pending_file" 2>/dev/null || true)"
|
|
208
|
+
if [[ -z "$pid" ]]; then
|
|
209
|
+
rm -f "$pending_file"
|
|
210
|
+
return 1
|
|
211
|
+
fi
|
|
212
|
+
|
|
213
|
+
if kill -0 "$pid" 2>/dev/null; then
|
|
214
|
+
printf '%s\n' "$pid"
|
|
215
|
+
return 0
|
|
216
|
+
fi
|
|
217
|
+
|
|
218
|
+
rm -f "$pending_file"
|
|
219
|
+
return 1
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
pending_issue_launch_active() {
|
|
223
|
+
local issue_id="${1:?issue id required}"
|
|
224
|
+
if tmux has-session -t "${issue_prefix}${issue_id}" 2>/dev/null; then
|
|
225
|
+
rm -f "${pending_launch_dir}/issue-${issue_id}.pid" 2>/dev/null || true
|
|
226
|
+
return 1
|
|
227
|
+
fi
|
|
228
|
+
pending_launch_pid issue "$issue_id" >/dev/null
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
resident_issue_controller_file() {
|
|
232
|
+
local issue_id="${1:?issue id required}"
|
|
233
|
+
printf '%s/resident-workers/issues/%s/controller.env\n' "${state_root}" "${issue_id}"
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
resident_issue_controller_state() {
|
|
237
|
+
local issue_id="${1:?issue id required}"
|
|
238
|
+
local controller_file state=""
|
|
239
|
+
|
|
240
|
+
controller_file="$(resident_issue_controller_file "$issue_id")"
|
|
241
|
+
[[ -f "${controller_file}" ]] || return 1
|
|
242
|
+
|
|
243
|
+
state="$(awk -F= '/^CONTROLLER_STATE=/{print $2; exit}' "${controller_file}" 2>/dev/null | tr -d '"' || true)"
|
|
244
|
+
[[ -n "${state}" ]] || return 1
|
|
245
|
+
printf '%s\n' "${state}"
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
pending_issue_launch_counts_toward_capacity() {
|
|
249
|
+
local issue_id="${1:?issue id required}"
|
|
250
|
+
local controller_state=""
|
|
251
|
+
|
|
252
|
+
if ! pending_issue_launch_active "${issue_id}"; then
|
|
253
|
+
return 1
|
|
254
|
+
fi
|
|
255
|
+
|
|
256
|
+
controller_state="$(resident_issue_controller_state "${issue_id}" || true)"
|
|
257
|
+
if [[ -n "${controller_state}" ]]; then
|
|
258
|
+
case "${controller_state}" in
|
|
259
|
+
idle|sleeping|waiting-due|waiting-open-pr|waiting-provider)
|
|
260
|
+
return 1
|
|
261
|
+
;;
|
|
262
|
+
esac
|
|
263
|
+
fi
|
|
264
|
+
|
|
265
|
+
return 0
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
pending_pr_launch_active() {
|
|
269
|
+
local pr_id="${1:?pr id required}"
|
|
270
|
+
if tmux has-session -t "${pr_prefix}${pr_id}" 2>/dev/null; then
|
|
271
|
+
rm -f "${pending_launch_dir}/pr-${pr_id}.pid" 2>/dev/null || true
|
|
272
|
+
return 1
|
|
273
|
+
fi
|
|
274
|
+
pending_launch_pid pr "$pr_id" >/dev/null
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
running_pr_workers() {
|
|
278
|
+
ensure_running_pr_workers_cache
|
|
279
|
+
printf '%s\n' "$running_pr_workers_cache"
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
ensure_tmux_sessions_cache() {
|
|
283
|
+
if [[ "$tmux_sessions_cache_loaded" != "yes" ]]; then
|
|
284
|
+
tmux_sessions_cache="$(tmux list-sessions -F '#S' 2>/dev/null || true)"
|
|
285
|
+
tmux_sessions_cache_loaded="yes"
|
|
286
|
+
fi
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
ensure_all_running_workers_cache() {
|
|
290
|
+
local session
|
|
291
|
+
if [[ "$all_running_workers_cache_loaded" == "yes" ]]; then
|
|
292
|
+
return 0
|
|
293
|
+
fi
|
|
294
|
+
ensure_tmux_sessions_cache
|
|
295
|
+
all_running_workers_cache=""
|
|
296
|
+
while IFS= read -r session; do
|
|
297
|
+
[[ -n "$session" ]] || continue
|
|
298
|
+
if session_matches_prefix "$session"; then
|
|
299
|
+
all_running_workers_cache+="${session}"$'\n'
|
|
300
|
+
fi
|
|
301
|
+
done <<<"$tmux_sessions_cache"
|
|
302
|
+
all_running_workers_cache="${all_running_workers_cache%$'\n'}"
|
|
303
|
+
all_running_workers_cache_loaded="yes"
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
auth_wait_workers() {
|
|
307
|
+
ensure_auth_wait_workers_cache
|
|
308
|
+
printf '%s\n' "$auth_wait_workers_cache"
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
ensure_auth_wait_workers_cache() {
|
|
312
|
+
local session
|
|
313
|
+
if [[ "$auth_wait_workers_cache_loaded" == "yes" ]]; then
|
|
314
|
+
return 0
|
|
315
|
+
fi
|
|
316
|
+
ensure_tmux_sessions_cache
|
|
317
|
+
auth_wait_workers_cache=""
|
|
318
|
+
while IFS= read -r session; do
|
|
319
|
+
[[ -n "$session" ]] || continue
|
|
320
|
+
session_matches_prefix "$session" || continue
|
|
321
|
+
if session_is_auth_waiting "$session"; then
|
|
322
|
+
auth_wait_workers_cache+="${session}"$'\n'
|
|
323
|
+
fi
|
|
324
|
+
done <<<"$tmux_sessions_cache"
|
|
325
|
+
auth_wait_workers_cache="${auth_wait_workers_cache%$'\n'}"
|
|
326
|
+
auth_wait_workers_cache_loaded="yes"
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
ensure_running_issue_workers_cache() {
|
|
330
|
+
local session
|
|
331
|
+
if [[ "$running_issue_workers_cache_loaded" == "yes" ]]; then
|
|
332
|
+
return 0
|
|
333
|
+
fi
|
|
334
|
+
ensure_tmux_sessions_cache
|
|
335
|
+
running_issue_workers_cache=""
|
|
336
|
+
while IFS= read -r session; do
|
|
337
|
+
[[ -n "$session" ]] || continue
|
|
338
|
+
if [[ "$session" == "${issue_prefix}"* ]]; then
|
|
339
|
+
if session_is_auth_waiting "$session"; then
|
|
340
|
+
continue
|
|
341
|
+
fi
|
|
342
|
+
running_issue_workers_cache+="${session}"$'\n'
|
|
343
|
+
fi
|
|
344
|
+
done <<<"$tmux_sessions_cache"
|
|
345
|
+
running_issue_workers_cache="${running_issue_workers_cache%$'\n'}"
|
|
346
|
+
running_issue_workers_cache_loaded="yes"
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
ensure_running_pr_workers_cache() {
|
|
350
|
+
local session
|
|
351
|
+
if [[ "$running_pr_workers_cache_loaded" == "yes" ]]; then
|
|
352
|
+
return 0
|
|
353
|
+
fi
|
|
354
|
+
ensure_tmux_sessions_cache
|
|
355
|
+
running_pr_workers_cache=""
|
|
356
|
+
while IFS= read -r session; do
|
|
357
|
+
[[ -n "$session" ]] || continue
|
|
358
|
+
if [[ "$session" == "${pr_prefix}"* ]]; then
|
|
359
|
+
if session_is_auth_waiting "$session"; then
|
|
360
|
+
continue
|
|
361
|
+
fi
|
|
362
|
+
running_pr_workers_cache+="${session}"$'\n'
|
|
363
|
+
fi
|
|
364
|
+
done <<<"$tmux_sessions_cache"
|
|
365
|
+
running_pr_workers_cache="${running_pr_workers_cache%$'\n'}"
|
|
366
|
+
running_pr_workers_cache_loaded="yes"
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
worker_count() {
|
|
370
|
+
local workers="${1:-}"
|
|
371
|
+
if [[ -z "$workers" ]]; then
|
|
372
|
+
printf '0\n'
|
|
373
|
+
return
|
|
374
|
+
fi
|
|
375
|
+
printf '%s\n' "$workers" | sed '/^$/d' | wc -l | tr -d ' '
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
retry_ready() {
|
|
379
|
+
local kind="${1:?kind required}"
|
|
380
|
+
local item_id="${2:?item id required}"
|
|
381
|
+
local retry_out ready
|
|
382
|
+
|
|
383
|
+
retry_out="$(
|
|
384
|
+
"${shared_agent_home}/tools/bin/agent-project-retry-state" \
|
|
385
|
+
--state-root "$state_root" \
|
|
386
|
+
--kind "$kind" \
|
|
387
|
+
--item-id "$item_id" \
|
|
388
|
+
--action get
|
|
389
|
+
)"
|
|
390
|
+
ready="$(awk -F= '/^READY=/{print $2}' <<<"$retry_out")"
|
|
391
|
+
[[ "$ready" == "yes" ]]
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
provider_cooldown_state() {
|
|
395
|
+
"${shared_agent_home}/tools/bin/provider-cooldown-state.sh" get
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
issue_id_from_session() {
|
|
399
|
+
local session="${1:?session required}"
|
|
400
|
+
local issue_id=""
|
|
401
|
+
if [[ "$session" == "${issue_prefix}"* ]]; then
|
|
402
|
+
issue_id="${session#${issue_prefix}}"
|
|
403
|
+
fi
|
|
404
|
+
if [[ "$issue_id" =~ ^[0-9]+$ ]]; then
|
|
405
|
+
printf '%s\n' "$issue_id"
|
|
406
|
+
return 0
|
|
407
|
+
fi
|
|
408
|
+
return 1
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
pr_id_from_session() {
|
|
412
|
+
local session="${1:?session required}"
|
|
413
|
+
local pr_id=""
|
|
414
|
+
if [[ "$session" == "${pr_prefix}"* ]]; then
|
|
415
|
+
pr_id="${session#${pr_prefix}}"
|
|
416
|
+
fi
|
|
417
|
+
if [[ "$pr_id" =~ ^[0-9]+$ ]]; then
|
|
418
|
+
printf '%s\n' "$pr_id"
|
|
419
|
+
return 0
|
|
420
|
+
fi
|
|
421
|
+
return 1
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
if [[ ! -f "$hook_file" ]]; then
|
|
425
|
+
echo "missing hook file: $hook_file" >&2
|
|
426
|
+
exit 1
|
|
427
|
+
fi
|
|
428
|
+
# shellcheck source=/dev/null
|
|
429
|
+
source "$hook_file"
|
|
430
|
+
|
|
431
|
+
resident_lib="${shared_agent_home}/skills/openclaw/agent-control-plane/tools/bin/flow-resident-worker-lib.sh"
|
|
432
|
+
if [[ -f "${resident_lib}" ]]; then
|
|
433
|
+
# shellcheck source=/dev/null
|
|
434
|
+
source "${resident_lib}"
|
|
435
|
+
fi
|
|
436
|
+
|
|
437
|
+
if declare -F flow_resident_issue_reap_stale_state >/dev/null 2>&1; then
|
|
438
|
+
flow_resident_issue_reap_stale_state >/dev/null 2>&1 || true
|
|
439
|
+
fi
|
|
440
|
+
|
|
441
|
+
required_hooks=(
|
|
442
|
+
heartbeat_list_ready_issue_ids
|
|
443
|
+
heartbeat_list_open_agent_pr_ids
|
|
444
|
+
heartbeat_issue_is_heavy
|
|
445
|
+
heartbeat_issue_is_recurring
|
|
446
|
+
heartbeat_sync_pr_labels
|
|
447
|
+
heartbeat_pr_risk_json
|
|
448
|
+
heartbeat_mark_issue_running
|
|
449
|
+
heartbeat_issue_launch_failed
|
|
450
|
+
heartbeat_start_issue_worker
|
|
451
|
+
heartbeat_mark_pr_running
|
|
452
|
+
heartbeat_clear_pr_running
|
|
453
|
+
heartbeat_start_pr_merge_repair_worker
|
|
454
|
+
heartbeat_start_pr_review_worker
|
|
455
|
+
heartbeat_start_pr_fix_worker
|
|
456
|
+
heartbeat_start_pr_ci_refresh
|
|
457
|
+
heartbeat_reconcile_issue
|
|
458
|
+
heartbeat_reconcile_pr
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
for hook_name in "${required_hooks[@]}"; do
|
|
462
|
+
if ! declare -F "$hook_name" >/dev/null 2>&1; then
|
|
463
|
+
echo "missing required heartbeat hook: $hook_name" >&2
|
|
464
|
+
exit 1
|
|
465
|
+
fi
|
|
466
|
+
done
|
|
467
|
+
|
|
468
|
+
if ! declare -F heartbeat_list_exclusive_issue_ids >/dev/null 2>&1; then
|
|
469
|
+
heartbeat_list_exclusive_issue_ids() { :; }
|
|
470
|
+
fi
|
|
471
|
+
|
|
472
|
+
if ! declare -F heartbeat_list_exclusive_pr_ids >/dev/null 2>&1; then
|
|
473
|
+
heartbeat_list_exclusive_pr_ids() { :; }
|
|
474
|
+
fi
|
|
475
|
+
|
|
476
|
+
if ! declare -F heartbeat_issue_is_exclusive >/dev/null 2>&1; then
|
|
477
|
+
heartbeat_issue_is_exclusive() { printf 'no\n'; }
|
|
478
|
+
fi
|
|
479
|
+
|
|
480
|
+
if ! declare -F heartbeat_pr_is_exclusive >/dev/null 2>&1; then
|
|
481
|
+
heartbeat_pr_is_exclusive() { printf 'no\n'; }
|
|
482
|
+
fi
|
|
483
|
+
|
|
484
|
+
if ! declare -F heartbeat_list_running_issue_ids >/dev/null 2>&1; then
|
|
485
|
+
heartbeat_list_running_issue_ids() { :; }
|
|
486
|
+
fi
|
|
487
|
+
|
|
488
|
+
if ! declare -F heartbeat_list_blocked_recovery_issue_ids >/dev/null 2>&1; then
|
|
489
|
+
heartbeat_list_blocked_recovery_issue_ids() { :; }
|
|
490
|
+
fi
|
|
491
|
+
|
|
492
|
+
if ! declare -F heartbeat_sync_issue_labels >/dev/null 2>&1; then
|
|
493
|
+
heartbeat_sync_issue_labels() { :; }
|
|
494
|
+
fi
|
|
495
|
+
|
|
496
|
+
launch_in_progress_kind=""
|
|
497
|
+
launch_in_progress_id=""
|
|
498
|
+
launch_in_progress_issue_is_heavy="no"
|
|
499
|
+
launch_in_progress_cleanup_enabled="no"
|
|
500
|
+
reserved_pr_launch_ids=""
|
|
501
|
+
|
|
502
|
+
clear_launch_in_progress() {
|
|
503
|
+
launch_in_progress_kind=""
|
|
504
|
+
launch_in_progress_id=""
|
|
505
|
+
launch_in_progress_issue_is_heavy="no"
|
|
506
|
+
launch_in_progress_cleanup_enabled="no"
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
pr_launch_reserved() {
|
|
510
|
+
local pr_number="${1:?pr number required}"
|
|
511
|
+
[[ -n "$reserved_pr_launch_ids" ]] || return 1
|
|
512
|
+
grep -Fxq "$pr_number" <<<"$reserved_pr_launch_ids"
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
reserve_pr_launch() {
|
|
516
|
+
local pr_number="${1:?pr number required}"
|
|
517
|
+
if pr_launch_reserved "$pr_number"; then
|
|
518
|
+
return 0
|
|
519
|
+
fi
|
|
520
|
+
reserved_pr_launch_ids+="${pr_number}"$'\n'
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
cleanup_scheduler_caches() {
|
|
524
|
+
tmux_sessions_cache=""
|
|
525
|
+
tmux_sessions_cache_loaded="no"
|
|
526
|
+
all_running_workers_cache=""
|
|
527
|
+
all_running_workers_cache_loaded="no"
|
|
528
|
+
running_issue_workers_cache=""
|
|
529
|
+
running_issue_workers_cache_loaded="no"
|
|
530
|
+
running_pr_workers_cache=""
|
|
531
|
+
running_pr_workers_cache_loaded="no"
|
|
532
|
+
completed_workers_cache=""
|
|
533
|
+
completed_workers_cache_loaded="no"
|
|
534
|
+
ready_issue_ids_cache=""
|
|
535
|
+
ready_issue_ids_cache_loaded="no"
|
|
536
|
+
open_agent_pr_ids_cache=""
|
|
537
|
+
open_agent_pr_ids_cache_loaded="no"
|
|
538
|
+
running_issue_ids_cache=""
|
|
539
|
+
running_issue_ids_cache_loaded="no"
|
|
540
|
+
exclusive_issue_ids_cache=""
|
|
541
|
+
exclusive_issue_ids_cache_loaded="no"
|
|
542
|
+
exclusive_pr_ids_cache=""
|
|
543
|
+
exclusive_pr_ids_cache_loaded="no"
|
|
544
|
+
blocked_recovery_issue_ids_cache=""
|
|
545
|
+
blocked_recovery_issue_ids_cache_loaded="no"
|
|
546
|
+
ordered_ready_issue_ids_cache=""
|
|
547
|
+
ordered_ready_issue_ids_cache_loaded="no"
|
|
548
|
+
due_scheduled_issue_ids_cache=""
|
|
549
|
+
due_scheduled_issue_ids_cache_loaded="no"
|
|
550
|
+
due_blocked_recovery_issue_ids_cache=""
|
|
551
|
+
due_blocked_recovery_issue_ids_cache_loaded="no"
|
|
552
|
+
if [[ -n "${issue_attr_cache_dir:-}" && -d "${issue_attr_cache_dir}" ]]; then
|
|
553
|
+
rm -rf "${issue_attr_cache_dir}" || true
|
|
554
|
+
fi
|
|
555
|
+
if [[ -n "${pr_attr_cache_dir:-}" && -d "${pr_attr_cache_dir}" ]]; then
|
|
556
|
+
rm -rf "${pr_attr_cache_dir}" || true
|
|
557
|
+
fi
|
|
558
|
+
if [[ -n "${pr_risk_cache_dir:-}" && -d "${pr_risk_cache_dir}" ]]; then
|
|
559
|
+
rm -rf "${pr_risk_cache_dir}" || true
|
|
560
|
+
fi
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
stage_issue_launch() {
|
|
564
|
+
local issue_id="${1:?issue id required}"
|
|
565
|
+
local is_heavy="${2:-no}"
|
|
566
|
+
launch_in_progress_kind="issue"
|
|
567
|
+
launch_in_progress_id="$issue_id"
|
|
568
|
+
launch_in_progress_issue_is_heavy="$is_heavy"
|
|
569
|
+
launch_in_progress_cleanup_enabled="yes"
|
|
570
|
+
heartbeat_mark_issue_running "$issue_id" "$is_heavy"
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
stage_pr_launch() {
|
|
574
|
+
local pr_number="${1:?pr number required}"
|
|
575
|
+
reserve_pr_launch "$pr_number"
|
|
576
|
+
launch_in_progress_kind="pr"
|
|
577
|
+
launch_in_progress_id="$pr_number"
|
|
578
|
+
launch_in_progress_issue_is_heavy="no"
|
|
579
|
+
launch_in_progress_cleanup_enabled="yes"
|
|
580
|
+
heartbeat_mark_pr_running "$pr_number"
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
rollback_launch_in_progress() {
|
|
584
|
+
# Always cleanup caches on exit to avoid leaks
|
|
585
|
+
cleanup_scheduler_caches
|
|
586
|
+
# Only rollback labels if a launch was in progress and failed
|
|
587
|
+
if [[ "${launch_in_progress_cleanup_enabled:-no}" == "yes" ]]; then
|
|
588
|
+
case "${launch_in_progress_kind:-}" in
|
|
589
|
+
issue)
|
|
590
|
+
if [[ -n "${launch_in_progress_id:-}" ]]; then
|
|
591
|
+
heartbeat_issue_launch_failed "${launch_in_progress_id}" >/dev/null 2>&1 || true
|
|
592
|
+
fi
|
|
593
|
+
;;
|
|
594
|
+
pr)
|
|
595
|
+
if [[ -n "${launch_in_progress_id:-}" ]]; then
|
|
596
|
+
heartbeat_clear_pr_running "${launch_in_progress_id}" >/dev/null 2>&1 || true
|
|
597
|
+
fi
|
|
598
|
+
;;
|
|
599
|
+
esac
|
|
600
|
+
fi
|
|
601
|
+
clear_launch_in_progress
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
trap rollback_launch_in_progress EXIT INT TERM
|
|
605
|
+
|
|
606
|
+
cache_prefix() {
|
|
607
|
+
local raw_prefix="${issue_prefix:-${pr_prefix:-agent-control-plane}}"
|
|
608
|
+
local sanitized=""
|
|
609
|
+
|
|
610
|
+
sanitized="$(printf '%s' "${raw_prefix}" | tr '/[:space:]' '-' | tr -cd '[:alnum:]_.-')"
|
|
611
|
+
if [[ -z "${sanitized}" ]]; then
|
|
612
|
+
sanitized="agent-control-plane"
|
|
613
|
+
fi
|
|
614
|
+
|
|
615
|
+
printf '%s\n' "${sanitized}"
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
ensure_issue_attr_cache_dir() {
|
|
619
|
+
if [[ -z "${issue_attr_cache_dir:-}" || ! -d "${issue_attr_cache_dir:-}" ]]; then
|
|
620
|
+
issue_attr_cache_dir="$(mktemp -d "${TMPDIR:-/tmp}/$(cache_prefix)-issue-attrs.XXXXXX")"
|
|
621
|
+
fi
|
|
622
|
+
}
|
|
623
|
+
|
|
624
|
+
ensure_pr_attr_cache_dir() {
|
|
625
|
+
if [[ -z "${pr_attr_cache_dir:-}" || ! -d "${pr_attr_cache_dir:-}" ]]; then
|
|
626
|
+
pr_attr_cache_dir="$(mktemp -d "${TMPDIR:-/tmp}/$(cache_prefix)-pr-attrs.XXXXXX")"
|
|
627
|
+
fi
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
ensure_pr_risk_cache_dir() {
|
|
631
|
+
if [[ -z "${pr_risk_cache_dir:-}" || ! -d "${pr_risk_cache_dir:-}" ]]; then
|
|
632
|
+
pr_risk_cache_dir="$(mktemp -d "${TMPDIR:-/tmp}/$(cache_prefix)-pr-risk.XXXXXX")"
|
|
633
|
+
fi
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
pr_risk_runtime_cache_fresh() {
|
|
637
|
+
local cache_file="${1:?cache file required}"
|
|
638
|
+
local modified_at now age
|
|
639
|
+
[[ -f "$cache_file" ]] || return 1
|
|
640
|
+
modified_at="$(stat -f '%m' "$cache_file" 2>/dev/null || true)"
|
|
641
|
+
[[ "$modified_at" =~ ^[0-9]+$ ]] || return 1
|
|
642
|
+
now="$(date +%s)"
|
|
643
|
+
age=$((now - modified_at))
|
|
644
|
+
(( age >= 0 && age <= pr_risk_runtime_cache_ttl_seconds ))
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
cached_issue_attr() {
|
|
648
|
+
local attr_name="${1:?attr name required}"
|
|
649
|
+
local issue_id="${2:?issue id required}"
|
|
650
|
+
local cache_file attr_value
|
|
651
|
+
|
|
652
|
+
ensure_issue_attr_cache_dir
|
|
653
|
+
cache_file="${issue_attr_cache_dir}/${issue_id}.${attr_name}"
|
|
654
|
+
if [[ -f "${cache_file}" ]]; then
|
|
655
|
+
cat "${cache_file}"
|
|
656
|
+
return 0
|
|
657
|
+
fi
|
|
658
|
+
|
|
659
|
+
case "${attr_name}" in
|
|
660
|
+
heavy)
|
|
661
|
+
attr_value="$(heartbeat_issue_is_heavy "${issue_id}")"
|
|
662
|
+
;;
|
|
663
|
+
recurring)
|
|
664
|
+
attr_value="$(heartbeat_issue_is_recurring "${issue_id}")"
|
|
665
|
+
;;
|
|
666
|
+
scheduled)
|
|
667
|
+
attr_value="$(heartbeat_issue_is_scheduled "${issue_id}")"
|
|
668
|
+
;;
|
|
669
|
+
schedule_interval_seconds)
|
|
670
|
+
attr_value="$(heartbeat_issue_schedule_interval_seconds "${issue_id}")"
|
|
671
|
+
;;
|
|
672
|
+
exclusive)
|
|
673
|
+
attr_value="$(heartbeat_issue_is_exclusive "${issue_id}")"
|
|
674
|
+
;;
|
|
675
|
+
*)
|
|
676
|
+
echo "unsupported issue cache attr: ${attr_name}" >&2
|
|
677
|
+
return 1
|
|
678
|
+
;;
|
|
679
|
+
esac
|
|
680
|
+
|
|
681
|
+
printf '%s\n' "${attr_value}" >"${cache_file}"
|
|
682
|
+
printf '%s\n' "${attr_value}"
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
cached_pr_is_exclusive() {
|
|
686
|
+
local pr_number="${1:?pr number required}"
|
|
687
|
+
local cache_file attr_value
|
|
688
|
+
|
|
689
|
+
ensure_pr_attr_cache_dir
|
|
690
|
+
cache_file="${pr_attr_cache_dir}/${pr_number}.exclusive"
|
|
691
|
+
if [[ -f "${cache_file}" ]]; then
|
|
692
|
+
cat "${cache_file}"
|
|
693
|
+
return 0
|
|
694
|
+
fi
|
|
695
|
+
|
|
696
|
+
attr_value="$(heartbeat_pr_is_exclusive "${pr_number}")"
|
|
697
|
+
printf '%s\n' "${attr_value}" >"${cache_file}"
|
|
698
|
+
printf '%s\n' "${attr_value}"
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
cached_pr_risk_json() {
|
|
702
|
+
local pr_number="${1:?pr number required}"
|
|
703
|
+
local cache_file runtime_cache_file risk_json
|
|
704
|
+
|
|
705
|
+
ensure_pr_risk_cache_dir
|
|
706
|
+
cache_file="${pr_risk_cache_dir}/${pr_number}.json"
|
|
707
|
+
runtime_cache_file="${pr_risk_runtime_cache_dir}/${pr_number}.json"
|
|
708
|
+
if [[ -f "${cache_file}" ]]; then
|
|
709
|
+
cat "${cache_file}"
|
|
710
|
+
return 0
|
|
711
|
+
fi
|
|
712
|
+
|
|
713
|
+
if pr_risk_runtime_cache_fresh "${runtime_cache_file}"; then
|
|
714
|
+
cp "${runtime_cache_file}" "${cache_file}"
|
|
715
|
+
cat "${cache_file}"
|
|
716
|
+
return 0
|
|
717
|
+
fi
|
|
718
|
+
|
|
719
|
+
risk_json="$(heartbeat_pr_risk_json "${pr_number}")"
|
|
720
|
+
printf '%s\n' "${risk_json}" >"${cache_file}"
|
|
721
|
+
printf '%s\n' "${risk_json}" >"${runtime_cache_file}"
|
|
722
|
+
printf '%s\n' "${risk_json}"
|
|
723
|
+
}
|
|
724
|
+
|
|
725
|
+
running_heavy_issue_workers() {
|
|
726
|
+
local session issue_id is_heavy count=0
|
|
727
|
+
ensure_running_issue_workers_cache
|
|
728
|
+
while IFS= read -r session; do
|
|
729
|
+
[[ -n "$session" ]] || continue
|
|
730
|
+
issue_id="$(issue_id_from_session "$session" || true)"
|
|
731
|
+
[[ -n "$issue_id" ]] || continue
|
|
732
|
+
is_heavy="$(cached_issue_attr heavy "$issue_id")"
|
|
733
|
+
if [[ "$is_heavy" == "yes" ]]; then
|
|
734
|
+
count=$((count + 1))
|
|
735
|
+
fi
|
|
736
|
+
done <<<"$running_issue_workers_cache"
|
|
737
|
+
printf '%s\n' "$count"
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
pending_issue_launch_count() {
|
|
741
|
+
local pending_file issue_id count=0
|
|
742
|
+
for pending_file in "${pending_launch_dir}"/issue-*.pid; do
|
|
743
|
+
[[ -f "$pending_file" ]] || continue
|
|
744
|
+
issue_id="${pending_file##*/issue-}"
|
|
745
|
+
issue_id="${issue_id%.pid}"
|
|
746
|
+
[[ -n "$issue_id" ]] || continue
|
|
747
|
+
if tmux has-session -t "${issue_prefix}${issue_id}" 2>/dev/null; then
|
|
748
|
+
continue
|
|
749
|
+
fi
|
|
750
|
+
if pending_issue_launch_counts_toward_capacity "$issue_id"; then
|
|
751
|
+
count=$((count + 1))
|
|
752
|
+
fi
|
|
753
|
+
done
|
|
754
|
+
printf '%s\n' "$count"
|
|
755
|
+
}
|
|
756
|
+
|
|
757
|
+
pending_pr_launch_count() {
|
|
758
|
+
local pending_file pr_id count=0
|
|
759
|
+
for pending_file in "${pending_launch_dir}"/pr-*.pid; do
|
|
760
|
+
[[ -f "$pending_file" ]] || continue
|
|
761
|
+
pr_id="${pending_file##*/pr-}"
|
|
762
|
+
pr_id="${pr_id%.pid}"
|
|
763
|
+
[[ -n "$pr_id" ]] || continue
|
|
764
|
+
if tmux has-session -t "${pr_prefix}${pr_id}" 2>/dev/null; then
|
|
765
|
+
continue
|
|
766
|
+
fi
|
|
767
|
+
if pending_pr_launch_active "$pr_id"; then
|
|
768
|
+
count=$((count + 1))
|
|
769
|
+
fi
|
|
770
|
+
done
|
|
771
|
+
printf '%s\n' "$count"
|
|
772
|
+
}
|
|
773
|
+
|
|
774
|
+
pending_heavy_issue_launch_count() {
|
|
775
|
+
local pending_file issue_id count=0
|
|
776
|
+
for pending_file in "${pending_launch_dir}"/issue-*.pid; do
|
|
777
|
+
[[ -f "$pending_file" ]] || continue
|
|
778
|
+
issue_id="${pending_file##*/issue-}"
|
|
779
|
+
issue_id="${issue_id%.pid}"
|
|
780
|
+
[[ -n "$issue_id" ]] || continue
|
|
781
|
+
if tmux has-session -t "${issue_prefix}${issue_id}" 2>/dev/null; then
|
|
782
|
+
continue
|
|
783
|
+
fi
|
|
784
|
+
if pending_issue_launch_counts_toward_capacity "$issue_id" && [[ "$(cached_issue_attr heavy "$issue_id")" == "yes" ]]; then
|
|
785
|
+
count=$((count + 1))
|
|
786
|
+
fi
|
|
787
|
+
done
|
|
788
|
+
printf '%s\n' "$count"
|
|
789
|
+
}
|
|
790
|
+
|
|
791
|
+
pending_scheduled_issue_launch_count() {
|
|
792
|
+
local pending_file issue_id count=0
|
|
793
|
+
for pending_file in "${pending_launch_dir}"/issue-*.pid; do
|
|
794
|
+
[[ -f "$pending_file" ]] || continue
|
|
795
|
+
issue_id="${pending_file##*/issue-}"
|
|
796
|
+
issue_id="${issue_id%.pid}"
|
|
797
|
+
[[ -n "$issue_id" ]] || continue
|
|
798
|
+
if tmux has-session -t "${issue_prefix}${issue_id}" 2>/dev/null; then
|
|
799
|
+
continue
|
|
800
|
+
fi
|
|
801
|
+
if pending_issue_launch_counts_toward_capacity "$issue_id" && [[ "$(cached_issue_attr scheduled "$issue_id")" == "yes" ]]; then
|
|
802
|
+
count=$((count + 1))
|
|
803
|
+
fi
|
|
804
|
+
done
|
|
805
|
+
printf '%s\n' "$count"
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
pending_scheduled_heavy_issue_launch_count() {
|
|
809
|
+
local pending_file issue_id count=0
|
|
810
|
+
for pending_file in "${pending_launch_dir}"/issue-*.pid; do
|
|
811
|
+
[[ -f "$pending_file" ]] || continue
|
|
812
|
+
issue_id="${pending_file##*/issue-}"
|
|
813
|
+
issue_id="${issue_id%.pid}"
|
|
814
|
+
[[ -n "$issue_id" ]] || continue
|
|
815
|
+
if tmux has-session -t "${issue_prefix}${issue_id}" 2>/dev/null; then
|
|
816
|
+
continue
|
|
817
|
+
fi
|
|
818
|
+
if pending_issue_launch_counts_toward_capacity "$issue_id" \
|
|
819
|
+
&& [[ "$(cached_issue_attr scheduled "$issue_id")" == "yes" ]] \
|
|
820
|
+
&& [[ "$(cached_issue_attr heavy "$issue_id")" == "yes" ]]; then
|
|
821
|
+
count=$((count + 1))
|
|
822
|
+
fi
|
|
823
|
+
done
|
|
824
|
+
printf '%s\n' "$count"
|
|
825
|
+
}
|
|
826
|
+
|
|
827
|
+
pending_recurring_issue_launch_count() {
|
|
828
|
+
local pending_file issue_id count=0
|
|
829
|
+
for pending_file in "${pending_launch_dir}"/issue-*.pid; do
|
|
830
|
+
[[ -f "$pending_file" ]] || continue
|
|
831
|
+
issue_id="${pending_file##*/issue-}"
|
|
832
|
+
issue_id="${issue_id%.pid}"
|
|
833
|
+
[[ -n "$issue_id" ]] || continue
|
|
834
|
+
if tmux has-session -t "${issue_prefix}${issue_id}" 2>/dev/null; then
|
|
835
|
+
continue
|
|
836
|
+
fi
|
|
837
|
+
if pending_issue_launch_counts_toward_capacity "$issue_id" \
|
|
838
|
+
&& [[ "$(cached_issue_attr scheduled "$issue_id")" != "yes" ]] \
|
|
839
|
+
&& [[ "$(cached_issue_attr recurring "$issue_id")" == "yes" ]]; then
|
|
840
|
+
count=$((count + 1))
|
|
841
|
+
fi
|
|
842
|
+
done
|
|
843
|
+
printf '%s\n' "$count"
|
|
844
|
+
}
|
|
845
|
+
|
|
846
|
+
pending_blocked_recovery_issue_launch_count() {
|
|
847
|
+
local pending_file issue_id count=0
|
|
848
|
+
for pending_file in "${pending_launch_dir}"/issue-*.pid; do
|
|
849
|
+
[[ -f "$pending_file" ]] || continue
|
|
850
|
+
issue_id="${pending_file##*/issue-}"
|
|
851
|
+
issue_id="${issue_id%.pid}"
|
|
852
|
+
[[ -n "$issue_id" ]] || continue
|
|
853
|
+
if tmux has-session -t "${issue_prefix}${issue_id}" 2>/dev/null; then
|
|
854
|
+
continue
|
|
855
|
+
fi
|
|
856
|
+
if pending_issue_launch_counts_toward_capacity "$issue_id" && blocked_recovery_issue_has_state "$issue_id"; then
|
|
857
|
+
count=$((count + 1))
|
|
858
|
+
fi
|
|
859
|
+
done
|
|
860
|
+
printf '%s\n' "$count"
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
pending_exclusive_issue_launch_count() {
|
|
864
|
+
local pending_file issue_id count=0
|
|
865
|
+
for pending_file in "${pending_launch_dir}"/issue-*.pid; do
|
|
866
|
+
[[ -f "$pending_file" ]] || continue
|
|
867
|
+
issue_id="${pending_file##*/issue-}"
|
|
868
|
+
issue_id="${issue_id%.pid}"
|
|
869
|
+
[[ -n "$issue_id" ]] || continue
|
|
870
|
+
if tmux has-session -t "${issue_prefix}${issue_id}" 2>/dev/null; then
|
|
871
|
+
continue
|
|
872
|
+
fi
|
|
873
|
+
if pending_issue_launch_counts_toward_capacity "$issue_id" && [[ "$(cached_issue_attr exclusive "$issue_id")" == "yes" ]]; then
|
|
874
|
+
count=$((count + 1))
|
|
875
|
+
fi
|
|
876
|
+
done
|
|
877
|
+
printf '%s\n' "$count"
|
|
878
|
+
}
|
|
879
|
+
|
|
880
|
+
pending_exclusive_pr_launch_count() {
|
|
881
|
+
local pending_file pr_id count=0
|
|
882
|
+
for pending_file in "${pending_launch_dir}"/pr-*.pid; do
|
|
883
|
+
[[ -f "$pending_file" ]] || continue
|
|
884
|
+
pr_id="${pending_file##*/pr-}"
|
|
885
|
+
pr_id="${pr_id%.pid}"
|
|
886
|
+
[[ -n "$pr_id" ]] || continue
|
|
887
|
+
if tmux has-session -t "${pr_prefix}${pr_id}" 2>/dev/null; then
|
|
888
|
+
continue
|
|
889
|
+
fi
|
|
890
|
+
if pending_pr_launch_active "$pr_id" && [[ "$(cached_pr_is_exclusive "$pr_id")" == "yes" ]]; then
|
|
891
|
+
count=$((count + 1))
|
|
892
|
+
fi
|
|
893
|
+
done
|
|
894
|
+
printf '%s\n' "$count"
|
|
895
|
+
}
|
|
896
|
+
|
|
897
|
+
running_non_recurring_issue_workers() {
|
|
898
|
+
local session issue_id is_recurring is_scheduled count=0
|
|
899
|
+
ensure_running_issue_workers_cache
|
|
900
|
+
while IFS= read -r session; do
|
|
901
|
+
[[ -n "$session" ]] || continue
|
|
902
|
+
issue_id="$(issue_id_from_session "$session" || true)"
|
|
903
|
+
[[ -n "$issue_id" ]] || continue
|
|
904
|
+
is_scheduled="$(cached_issue_attr scheduled "$issue_id")"
|
|
905
|
+
if [[ "$is_scheduled" == "yes" ]]; then
|
|
906
|
+
continue
|
|
907
|
+
fi
|
|
908
|
+
is_recurring="$(cached_issue_attr recurring "$issue_id")"
|
|
909
|
+
if [[ "$is_recurring" != "yes" ]]; then
|
|
910
|
+
count=$((count + 1))
|
|
911
|
+
fi
|
|
912
|
+
done <<<"$running_issue_workers_cache"
|
|
913
|
+
printf '%s\n' "$count"
|
|
914
|
+
}
|
|
915
|
+
|
|
916
|
+
running_recurring_issue_workers() {
|
|
917
|
+
local session issue_id is_recurring is_scheduled count=0
|
|
918
|
+
ensure_running_issue_workers_cache
|
|
919
|
+
while IFS= read -r session; do
|
|
920
|
+
[[ -n "$session" ]] || continue
|
|
921
|
+
issue_id="$(issue_id_from_session "$session" || true)"
|
|
922
|
+
[[ -n "$issue_id" ]] || continue
|
|
923
|
+
is_scheduled="$(cached_issue_attr scheduled "$issue_id")"
|
|
924
|
+
if [[ "$is_scheduled" == "yes" ]]; then
|
|
925
|
+
continue
|
|
926
|
+
fi
|
|
927
|
+
is_recurring="$(cached_issue_attr recurring "$issue_id")"
|
|
928
|
+
if [[ "$is_recurring" == "yes" ]]; then
|
|
929
|
+
count=$((count + 1))
|
|
930
|
+
fi
|
|
931
|
+
done <<<"$running_issue_workers_cache"
|
|
932
|
+
printf '%s\n' "$count"
|
|
933
|
+
}
|
|
934
|
+
|
|
935
|
+
running_blocked_recovery_issue_workers() {
|
|
936
|
+
local session issue_id count=0
|
|
937
|
+
ensure_running_issue_workers_cache
|
|
938
|
+
while IFS= read -r session; do
|
|
939
|
+
[[ -n "$session" ]] || continue
|
|
940
|
+
issue_id="$(issue_id_from_session "$session" || true)"
|
|
941
|
+
[[ -n "$issue_id" ]] || continue
|
|
942
|
+
if blocked_recovery_issue_has_state "$issue_id"; then
|
|
943
|
+
count=$((count + 1))
|
|
944
|
+
fi
|
|
945
|
+
done <<<"$running_issue_workers_cache"
|
|
946
|
+
printf '%s\n' "$count"
|
|
947
|
+
}
|
|
948
|
+
|
|
949
|
+
running_exclusive_issue_workers() {
|
|
950
|
+
local session issue_id is_exclusive count=0
|
|
951
|
+
ensure_running_issue_workers_cache
|
|
952
|
+
while IFS= read -r session; do
|
|
953
|
+
[[ -n "$session" ]] || continue
|
|
954
|
+
issue_id="$(issue_id_from_session "$session" || true)"
|
|
955
|
+
[[ -n "$issue_id" ]] || continue
|
|
956
|
+
is_exclusive="$(cached_issue_attr exclusive "$issue_id")"
|
|
957
|
+
if [[ "$is_exclusive" == "yes" ]]; then
|
|
958
|
+
count=$((count + 1))
|
|
959
|
+
fi
|
|
960
|
+
done <<<"$running_issue_workers_cache"
|
|
961
|
+
printf '%s\n' "$count"
|
|
962
|
+
}
|
|
963
|
+
|
|
964
|
+
running_exclusive_pr_workers() {
|
|
965
|
+
local session pr_id is_exclusive count=0
|
|
966
|
+
ensure_running_pr_workers_cache
|
|
967
|
+
while IFS= read -r session; do
|
|
968
|
+
[[ -n "$session" ]] || continue
|
|
969
|
+
pr_id="$(pr_id_from_session "$session" || true)"
|
|
970
|
+
[[ -n "$pr_id" ]] || continue
|
|
971
|
+
is_exclusive="$(cached_pr_is_exclusive "$pr_id")"
|
|
972
|
+
if [[ "$is_exclusive" == "yes" ]]; then
|
|
973
|
+
count=$((count + 1))
|
|
974
|
+
fi
|
|
975
|
+
done <<<"$running_pr_workers_cache"
|
|
976
|
+
printf '%s\n' "$count"
|
|
977
|
+
}
|
|
978
|
+
|
|
979
|
+
running_scheduled_issue_workers() {
|
|
980
|
+
local session issue_id is_scheduled count=0
|
|
981
|
+
ensure_running_issue_workers_cache
|
|
982
|
+
while IFS= read -r session; do
|
|
983
|
+
[[ -n "$session" ]] || continue
|
|
984
|
+
issue_id="$(issue_id_from_session "$session" || true)"
|
|
985
|
+
[[ -n "$issue_id" ]] || continue
|
|
986
|
+
is_scheduled="$(cached_issue_attr scheduled "$issue_id")"
|
|
987
|
+
if [[ "$is_scheduled" == "yes" ]]; then
|
|
988
|
+
count=$((count + 1))
|
|
989
|
+
fi
|
|
990
|
+
done <<<"$running_issue_workers_cache"
|
|
991
|
+
printf '%s\n' "$count"
|
|
992
|
+
}
|
|
993
|
+
|
|
994
|
+
running_scheduled_heavy_issue_workers() {
|
|
995
|
+
local session issue_id is_scheduled is_heavy count=0
|
|
996
|
+
ensure_running_issue_workers_cache
|
|
997
|
+
while IFS= read -r session; do
|
|
998
|
+
[[ -n "$session" ]] || continue
|
|
999
|
+
issue_id="$(issue_id_from_session "$session" || true)"
|
|
1000
|
+
[[ -n "$issue_id" ]] || continue
|
|
1001
|
+
is_scheduled="$(cached_issue_attr scheduled "$issue_id")"
|
|
1002
|
+
is_heavy="$(cached_issue_attr heavy "$issue_id")"
|
|
1003
|
+
if [[ "$is_scheduled" == "yes" && "$is_heavy" == "yes" ]]; then
|
|
1004
|
+
count=$((count + 1))
|
|
1005
|
+
fi
|
|
1006
|
+
done <<<"$running_issue_workers_cache"
|
|
1007
|
+
printf '%s\n' "$count"
|
|
1008
|
+
}
|
|
1009
|
+
|
|
1010
|
+
ready_non_recurring_issue_count() {
|
|
1011
|
+
local issue_id is_recurring count=0
|
|
1012
|
+
ensure_ready_issue_ids_cache
|
|
1013
|
+
while IFS= read -r issue_id; do
|
|
1014
|
+
[[ -n "$issue_id" ]] || continue
|
|
1015
|
+
if [[ "$(cached_issue_attr scheduled "$issue_id")" == "yes" ]]; then
|
|
1016
|
+
continue
|
|
1017
|
+
fi
|
|
1018
|
+
is_recurring="$(cached_issue_attr recurring "$issue_id")"
|
|
1019
|
+
if [[ "$is_recurring" != "yes" ]]; then
|
|
1020
|
+
count=$((count + 1))
|
|
1021
|
+
fi
|
|
1022
|
+
done <<<"$ready_issue_ids_cache"
|
|
1023
|
+
printf '%s\n' "$count"
|
|
1024
|
+
}
|
|
1025
|
+
|
|
1026
|
+
blocked_recovery_issue_ids() {
|
|
1027
|
+
ensure_blocked_recovery_issue_ids_cache
|
|
1028
|
+
printf '%s\n' "$blocked_recovery_issue_ids_cache"
|
|
1029
|
+
}
|
|
1030
|
+
|
|
1031
|
+
ordered_ready_issue_ids() {
|
|
1032
|
+
ensure_ordered_ready_issue_ids_cache
|
|
1033
|
+
printf '%s\n' "$ordered_ready_issue_ids_cache"
|
|
1034
|
+
}
|
|
1035
|
+
|
|
1036
|
+
due_scheduled_issue_ids() {
|
|
1037
|
+
ensure_due_scheduled_issue_ids_cache
|
|
1038
|
+
printf '%s\n' "$due_scheduled_issue_ids_cache"
|
|
1039
|
+
}
|
|
1040
|
+
|
|
1041
|
+
due_blocked_recovery_issue_ids() {
|
|
1042
|
+
ensure_due_blocked_recovery_issue_ids_cache
|
|
1043
|
+
printf '%s\n' "$due_blocked_recovery_issue_ids_cache"
|
|
1044
|
+
}
|
|
1045
|
+
|
|
1046
|
+
ensure_due_scheduled_issue_ids_cache() {
|
|
1047
|
+
if [[ "$due_scheduled_issue_ids_cache_loaded" != "yes" ]]; then
|
|
1048
|
+
due_scheduled_issue_ids_cache="$(build_due_scheduled_issue_ids_cache)"
|
|
1049
|
+
due_scheduled_issue_ids_cache_loaded="yes"
|
|
1050
|
+
fi
|
|
1051
|
+
}
|
|
1052
|
+
|
|
1053
|
+
ensure_due_blocked_recovery_issue_ids_cache() {
|
|
1054
|
+
if [[ "$due_blocked_recovery_issue_ids_cache_loaded" != "yes" ]]; then
|
|
1055
|
+
due_blocked_recovery_issue_ids_cache="$(build_due_blocked_recovery_issue_ids_cache)"
|
|
1056
|
+
due_blocked_recovery_issue_ids_cache_loaded="yes"
|
|
1057
|
+
fi
|
|
1058
|
+
}
|
|
1059
|
+
|
|
1060
|
+
build_due_scheduled_issue_ids_cache() {
|
|
1061
|
+
local issue_id now_epoch due_epoch
|
|
1062
|
+
now_epoch="$(date +%s)"
|
|
1063
|
+
ensure_ready_issue_ids_cache
|
|
1064
|
+
while IFS= read -r issue_id; do
|
|
1065
|
+
[[ -n "$issue_id" ]] || continue
|
|
1066
|
+
if [[ "$(cached_issue_attr scheduled "$issue_id")" != "yes" ]]; then
|
|
1067
|
+
continue
|
|
1068
|
+
fi
|
|
1069
|
+
if ! scheduled_issue_is_due "$issue_id"; then
|
|
1070
|
+
continue
|
|
1071
|
+
fi
|
|
1072
|
+
due_epoch="$(scheduled_issue_due_epoch "$issue_id")"
|
|
1073
|
+
if ! [[ "${due_epoch:-}" =~ ^[0-9]+$ ]]; then
|
|
1074
|
+
due_epoch=0
|
|
1075
|
+
fi
|
|
1076
|
+
printf '%s\t%s\n' "$due_epoch" "$issue_id"
|
|
1077
|
+
done <<<"$ready_issue_ids_cache" | sort -n -k1,1 -k2,2n | cut -f2
|
|
1078
|
+
}
|
|
1079
|
+
|
|
1080
|
+
build_due_blocked_recovery_issue_ids_cache() {
|
|
1081
|
+
local issue_id due_epoch
|
|
1082
|
+
if (( max_concurrent_blocked_recovery_issue_workers <= 0 )); then
|
|
1083
|
+
return 0
|
|
1084
|
+
fi
|
|
1085
|
+
|
|
1086
|
+
ensure_blocked_recovery_issue_ids_cache
|
|
1087
|
+
while IFS= read -r issue_id; do
|
|
1088
|
+
[[ -n "$issue_id" ]] || continue
|
|
1089
|
+
if ! blocked_recovery_issue_is_due "$issue_id"; then
|
|
1090
|
+
continue
|
|
1091
|
+
fi
|
|
1092
|
+
due_epoch="$(blocked_recovery_issue_due_epoch "$issue_id")"
|
|
1093
|
+
if ! [[ "${due_epoch:-}" =~ ^[0-9]+$ ]]; then
|
|
1094
|
+
due_epoch=0
|
|
1095
|
+
fi
|
|
1096
|
+
printf '%s\t%s\n' "$due_epoch" "$issue_id"
|
|
1097
|
+
done <<<"$blocked_recovery_issue_ids_cache" | sort -n -k1,1 -k2,2n | cut -f2
|
|
1098
|
+
}
|
|
1099
|
+
|
|
1100
|
+
build_ordered_ready_issue_ids_cache() {
|
|
1101
|
+
local issue_id is_recurring last_recurring_issue seen_last="no"
|
|
1102
|
+
local -a recurring_ids=()
|
|
1103
|
+
ensure_ready_issue_ids_cache
|
|
1104
|
+
while IFS= read -r issue_id; do
|
|
1105
|
+
[[ -n "$issue_id" ]] || continue
|
|
1106
|
+
if [[ "$(cached_issue_attr scheduled "$issue_id")" == "yes" ]]; then
|
|
1107
|
+
continue
|
|
1108
|
+
fi
|
|
1109
|
+
is_recurring="$(cached_issue_attr recurring "$issue_id")"
|
|
1110
|
+
if [[ "$is_recurring" != "yes" ]]; then
|
|
1111
|
+
printf '%s\n' "$issue_id"
|
|
1112
|
+
else
|
|
1113
|
+
recurring_ids+=("$issue_id")
|
|
1114
|
+
fi
|
|
1115
|
+
done <<<"$ready_issue_ids_cache"
|
|
1116
|
+
|
|
1117
|
+
if (( ${#recurring_ids[@]} == 0 )); then
|
|
1118
|
+
return 0
|
|
1119
|
+
fi
|
|
1120
|
+
|
|
1121
|
+
last_recurring_issue="$(last_launched_recurring_issue_id || true)"
|
|
1122
|
+
if [[ -n "$last_recurring_issue" ]]; then
|
|
1123
|
+
for issue_id in "${recurring_ids[@]}"; do
|
|
1124
|
+
if [[ "$seen_last" == "yes" ]]; then
|
|
1125
|
+
printf '%s\n' "$issue_id"
|
|
1126
|
+
fi
|
|
1127
|
+
if [[ "$issue_id" == "$last_recurring_issue" ]]; then
|
|
1128
|
+
seen_last="yes"
|
|
1129
|
+
fi
|
|
1130
|
+
done
|
|
1131
|
+
fi
|
|
1132
|
+
|
|
1133
|
+
for issue_id in "${recurring_ids[@]}"; do
|
|
1134
|
+
if [[ -n "$last_recurring_issue" && "$seen_last" == "yes" && "$issue_id" == "$last_recurring_issue" ]]; then
|
|
1135
|
+
break
|
|
1136
|
+
fi
|
|
1137
|
+
printf '%s\n' "$issue_id"
|
|
1138
|
+
done
|
|
1139
|
+
}
|
|
1140
|
+
|
|
1141
|
+
completed_workers() {
|
|
1142
|
+
ensure_completed_workers_cache
|
|
1143
|
+
printf '%s\n' "$completed_workers_cache"
|
|
1144
|
+
}
|
|
1145
|
+
|
|
1146
|
+
ensure_completed_workers_cache() {
|
|
1147
|
+
local dir session issue_id status_line status
|
|
1148
|
+
if [[ "$completed_workers_cache_loaded" == "yes" ]]; then
|
|
1149
|
+
return 0
|
|
1150
|
+
fi
|
|
1151
|
+
completed_workers_cache=""
|
|
1152
|
+
for dir in "$runs_root"/*; do
|
|
1153
|
+
[[ -d "$dir" ]] || continue
|
|
1154
|
+
session="${dir##*/}"
|
|
1155
|
+
session_matches_prefix "$session" || continue
|
|
1156
|
+
[[ -f "$dir/reconciled.ok" ]] && continue
|
|
1157
|
+
if [[ "$session" == "${issue_prefix}"* ]]; then
|
|
1158
|
+
issue_id="$(issue_id_from_session "$session" || true)"
|
|
1159
|
+
if [[ -n "${issue_id}" ]] && pending_issue_launch_active "${issue_id}"; then
|
|
1160
|
+
continue
|
|
1161
|
+
fi
|
|
1162
|
+
fi
|
|
1163
|
+
status_line="$(
|
|
1164
|
+
"${shared_agent_home}/tools/bin/agent-project-worker-status" \
|
|
1165
|
+
--runs-root "$runs_root" \
|
|
1166
|
+
--session "$session" \
|
|
1167
|
+
| awk -F= '/^STATUS=/{print $2}' || true
|
|
1168
|
+
)"
|
|
1169
|
+
status="${status_line:-UNKNOWN}"
|
|
1170
|
+
if [[ "$status" == "SUCCEEDED" || "$status" == "FAILED" ]]; then
|
|
1171
|
+
completed_workers_cache+="${session}"$'\n'
|
|
1172
|
+
fi
|
|
1173
|
+
done
|
|
1174
|
+
completed_workers_cache="${completed_workers_cache%$'\n'}"
|
|
1175
|
+
completed_workers_cache_loaded="yes"
|
|
1176
|
+
}
|
|
1177
|
+
|
|
1178
|
+
ready_issue_ids() {
|
|
1179
|
+
ensure_ready_issue_ids_cache
|
|
1180
|
+
printf '%s\n' "$ready_issue_ids_cache"
|
|
1181
|
+
}
|
|
1182
|
+
|
|
1183
|
+
ensure_ready_issue_ids_cache() {
|
|
1184
|
+
if [[ "$ready_issue_ids_cache_loaded" != "yes" ]]; then
|
|
1185
|
+
ready_issue_ids_cache="$(heartbeat_list_ready_issue_ids)"
|
|
1186
|
+
ready_issue_ids_cache_loaded="yes"
|
|
1187
|
+
fi
|
|
1188
|
+
}
|
|
1189
|
+
|
|
1190
|
+
last_launched_recurring_issue_id() {
|
|
1191
|
+
if [[ -f "$recurring_rotation_file" ]]; then
|
|
1192
|
+
tr -d '[:space:]' <"$recurring_rotation_file"
|
|
1193
|
+
fi
|
|
1194
|
+
}
|
|
1195
|
+
|
|
1196
|
+
record_recurring_issue_launch() {
|
|
1197
|
+
local issue_id="${1:?issue id required}"
|
|
1198
|
+
printf '%s\n' "$issue_id" >"$recurring_rotation_file"
|
|
1199
|
+
}
|
|
1200
|
+
|
|
1201
|
+
scheduled_state_file() {
|
|
1202
|
+
local issue_id="${1:?issue id required}"
|
|
1203
|
+
printf '%s\n' "${scheduled_state_dir}/${issue_id}.env"
|
|
1204
|
+
}
|
|
1205
|
+
|
|
1206
|
+
scheduled_issue_due_epoch() {
|
|
1207
|
+
local issue_id="${1:?issue id required}"
|
|
1208
|
+
local state_file next_due_epoch
|
|
1209
|
+
state_file="$(scheduled_state_file "$issue_id")"
|
|
1210
|
+
if [[ ! -f "$state_file" ]]; then
|
|
1211
|
+
printf '0\n'
|
|
1212
|
+
return 0
|
|
1213
|
+
fi
|
|
1214
|
+
|
|
1215
|
+
next_due_epoch="$(awk -F= '/^NEXT_DUE_EPOCH=/{print $2}' "$state_file" 2>/dev/null | tr -d '[:space:]' || true)"
|
|
1216
|
+
if ! [[ "${next_due_epoch:-}" =~ ^[0-9]+$ ]]; then
|
|
1217
|
+
printf '0\n'
|
|
1218
|
+
return 0
|
|
1219
|
+
fi
|
|
1220
|
+
|
|
1221
|
+
printf '%s\n' "$next_due_epoch"
|
|
1222
|
+
}
|
|
1223
|
+
|
|
1224
|
+
scheduled_issue_is_due() {
|
|
1225
|
+
local issue_id="${1:?issue id required}"
|
|
1226
|
+
local interval_seconds due_epoch now_epoch
|
|
1227
|
+
interval_seconds="$(cached_issue_attr schedule_interval_seconds "$issue_id")"
|
|
1228
|
+
if ! [[ "${interval_seconds:-}" =~ ^[1-9][0-9]*$ ]]; then
|
|
1229
|
+
return 1
|
|
1230
|
+
fi
|
|
1231
|
+
|
|
1232
|
+
due_epoch="$(scheduled_issue_due_epoch "$issue_id")"
|
|
1233
|
+
now_epoch="$(date +%s)"
|
|
1234
|
+
if ! [[ "${due_epoch:-}" =~ ^[0-9]+$ ]] || (( due_epoch == 0 || due_epoch <= now_epoch )); then
|
|
1235
|
+
return 0
|
|
1236
|
+
fi
|
|
1237
|
+
return 1
|
|
1238
|
+
}
|
|
1239
|
+
|
|
1240
|
+
record_scheduled_issue_launch() {
|
|
1241
|
+
local issue_id="${1:?issue id required}"
|
|
1242
|
+
local interval_seconds state_file now_epoch due_epoch next_due_epoch
|
|
1243
|
+
|
|
1244
|
+
interval_seconds="$(cached_issue_attr schedule_interval_seconds "$issue_id")"
|
|
1245
|
+
if ! [[ "${interval_seconds:-}" =~ ^[1-9][0-9]*$ ]]; then
|
|
1246
|
+
return 0
|
|
1247
|
+
fi
|
|
1248
|
+
|
|
1249
|
+
now_epoch="$(date +%s)"
|
|
1250
|
+
due_epoch="$(scheduled_issue_due_epoch "$issue_id")"
|
|
1251
|
+
if ! [[ "${due_epoch:-}" =~ ^[0-9]+$ ]] || (( due_epoch <= 0 )); then
|
|
1252
|
+
next_due_epoch=$((now_epoch + interval_seconds))
|
|
1253
|
+
else
|
|
1254
|
+
next_due_epoch="$due_epoch"
|
|
1255
|
+
while (( next_due_epoch <= now_epoch )); do
|
|
1256
|
+
next_due_epoch=$((next_due_epoch + interval_seconds))
|
|
1257
|
+
done
|
|
1258
|
+
fi
|
|
1259
|
+
|
|
1260
|
+
state_file="$(scheduled_state_file "$issue_id")"
|
|
1261
|
+
cat >"$state_file" <<EOF
|
|
1262
|
+
INTERVAL_SECONDS=${interval_seconds}
|
|
1263
|
+
LAST_STARTED_EPOCH=${now_epoch}
|
|
1264
|
+
LAST_STARTED_AT=$(date -u -r "$now_epoch" +"%Y-%m-%dT%H:%M:%SZ")
|
|
1265
|
+
NEXT_DUE_EPOCH=${next_due_epoch}
|
|
1266
|
+
NEXT_DUE_AT=$(date -u -r "$next_due_epoch" +"%Y-%m-%dT%H:%M:%SZ")
|
|
1267
|
+
UPDATED_AT=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
1268
|
+
EOF
|
|
1269
|
+
}
|
|
1270
|
+
|
|
1271
|
+
record_scheduled_issue_result() {
|
|
1272
|
+
local issue_id="${1:?issue id required}"
|
|
1273
|
+
local result_status="${2:-unknown}"
|
|
1274
|
+
local state_file interval_seconds last_started_epoch next_due_epoch now_epoch
|
|
1275
|
+
|
|
1276
|
+
state_file="$(scheduled_state_file "$issue_id")"
|
|
1277
|
+
interval_seconds="$(cached_issue_attr schedule_interval_seconds "$issue_id")"
|
|
1278
|
+
last_started_epoch="$(awk -F= '/^LAST_STARTED_EPOCH=/{print $2}' "$state_file" 2>/dev/null | tr -d '[:space:]' || true)"
|
|
1279
|
+
next_due_epoch="$(awk -F= '/^NEXT_DUE_EPOCH=/{print $2}' "$state_file" 2>/dev/null | tr -d '[:space:]' || true)"
|
|
1280
|
+
now_epoch="$(date +%s)"
|
|
1281
|
+
|
|
1282
|
+
if ! [[ "${interval_seconds:-}" =~ ^[1-9][0-9]*$ ]]; then
|
|
1283
|
+
interval_seconds=0
|
|
1284
|
+
fi
|
|
1285
|
+
if ! [[ "${last_started_epoch:-}" =~ ^[0-9]+$ ]]; then
|
|
1286
|
+
last_started_epoch=0
|
|
1287
|
+
fi
|
|
1288
|
+
if ! [[ "${next_due_epoch:-}" =~ ^[0-9]+$ ]]; then
|
|
1289
|
+
next_due_epoch=0
|
|
1290
|
+
fi
|
|
1291
|
+
|
|
1292
|
+
cat >"$state_file" <<EOF
|
|
1293
|
+
INTERVAL_SECONDS=${interval_seconds}
|
|
1294
|
+
LAST_STARTED_EPOCH=${last_started_epoch}
|
|
1295
|
+
LAST_STARTED_AT=$(if [[ "$last_started_epoch" =~ ^[0-9]+$ ]] && (( last_started_epoch > 0 )); then date -u -r "$last_started_epoch" +"%Y-%m-%dT%H:%M:%SZ"; fi)
|
|
1296
|
+
LAST_RESULT_STATUS=${result_status}
|
|
1297
|
+
LAST_RESULT_EPOCH=${now_epoch}
|
|
1298
|
+
LAST_RESULT_AT=$(date -u -r "$now_epoch" +"%Y-%m-%dT%H:%M:%SZ")
|
|
1299
|
+
NEXT_DUE_EPOCH=${next_due_epoch}
|
|
1300
|
+
NEXT_DUE_AT=$(if [[ "$next_due_epoch" =~ ^[0-9]+$ ]] && (( next_due_epoch > 0 )); then date -u -r "$next_due_epoch" +"%Y-%m-%dT%H:%M:%SZ"; fi)
|
|
1301
|
+
UPDATED_AT=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
1302
|
+
EOF
|
|
1303
|
+
}
|
|
1304
|
+
|
|
1305
|
+
blocked_recovery_state_file() {
|
|
1306
|
+
local issue_id="${1:?issue id required}"
|
|
1307
|
+
printf '%s\n' "${blocked_recovery_state_dir}/${issue_id}.env"
|
|
1308
|
+
}
|
|
1309
|
+
|
|
1310
|
+
blocked_recovery_issue_has_state() {
|
|
1311
|
+
local issue_id="${1:?issue id required}"
|
|
1312
|
+
[[ -f "$(blocked_recovery_state_file "$issue_id")" ]]
|
|
1313
|
+
}
|
|
1314
|
+
|
|
1315
|
+
blocked_recovery_issue_due_epoch() {
|
|
1316
|
+
local issue_id="${1:?issue id required}"
|
|
1317
|
+
local state_file next_due_epoch
|
|
1318
|
+
state_file="$(blocked_recovery_state_file "$issue_id")"
|
|
1319
|
+
if [[ ! -f "$state_file" ]]; then
|
|
1320
|
+
printf '0\n'
|
|
1321
|
+
return 0
|
|
1322
|
+
fi
|
|
1323
|
+
|
|
1324
|
+
next_due_epoch="$(awk -F= '/^NEXT_DUE_EPOCH=/{print $2}' "$state_file" 2>/dev/null | tr -d '[:space:]' || true)"
|
|
1325
|
+
if ! [[ "${next_due_epoch:-}" =~ ^[0-9]+$ ]]; then
|
|
1326
|
+
printf '0\n'
|
|
1327
|
+
return 0
|
|
1328
|
+
fi
|
|
1329
|
+
|
|
1330
|
+
printf '%s\n' "$next_due_epoch"
|
|
1331
|
+
}
|
|
1332
|
+
|
|
1333
|
+
blocked_recovery_issue_is_due() {
|
|
1334
|
+
local issue_id="${1:?issue id required}"
|
|
1335
|
+
local due_epoch now_epoch
|
|
1336
|
+
if ! [[ "${blocked_recovery_cooldown_seconds:-}" =~ ^[1-9][0-9]*$ ]]; then
|
|
1337
|
+
return 0
|
|
1338
|
+
fi
|
|
1339
|
+
|
|
1340
|
+
due_epoch="$(blocked_recovery_issue_due_epoch "$issue_id")"
|
|
1341
|
+
now_epoch="$(date +%s)"
|
|
1342
|
+
if ! [[ "${due_epoch:-}" =~ ^[0-9]+$ ]] || (( due_epoch == 0 || due_epoch <= now_epoch )); then
|
|
1343
|
+
return 0
|
|
1344
|
+
fi
|
|
1345
|
+
return 1
|
|
1346
|
+
}
|
|
1347
|
+
|
|
1348
|
+
record_blocked_recovery_issue_launch() {
|
|
1349
|
+
local issue_id="${1:?issue id required}"
|
|
1350
|
+
local state_file now_epoch next_due_epoch next_due_at
|
|
1351
|
+
|
|
1352
|
+
now_epoch="$(date +%s)"
|
|
1353
|
+
next_due_epoch=0
|
|
1354
|
+
next_due_at=""
|
|
1355
|
+
if [[ "${blocked_recovery_cooldown_seconds:-}" =~ ^[1-9][0-9]*$ ]]; then
|
|
1356
|
+
next_due_epoch=$((now_epoch + blocked_recovery_cooldown_seconds))
|
|
1357
|
+
next_due_at="$(date -u -r "$next_due_epoch" +"%Y-%m-%dT%H:%M:%SZ")"
|
|
1358
|
+
fi
|
|
1359
|
+
|
|
1360
|
+
state_file="$(blocked_recovery_state_file "$issue_id")"
|
|
1361
|
+
cat >"$state_file" <<EOF
|
|
1362
|
+
LANE=blocked-recovery
|
|
1363
|
+
LAST_STARTED_EPOCH=${now_epoch}
|
|
1364
|
+
LAST_STARTED_AT=$(date -u -r "$now_epoch" +"%Y-%m-%dT%H:%M:%SZ")
|
|
1365
|
+
NEXT_DUE_EPOCH=${next_due_epoch}
|
|
1366
|
+
NEXT_DUE_AT=${next_due_at}
|
|
1367
|
+
UPDATED_AT=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
1368
|
+
EOF
|
|
1369
|
+
}
|
|
1370
|
+
|
|
1371
|
+
clear_blocked_recovery_issue_state() {
|
|
1372
|
+
local issue_id="${1:?issue id required}"
|
|
1373
|
+
rm -f "$(blocked_recovery_state_file "$issue_id")"
|
|
1374
|
+
}
|
|
1375
|
+
|
|
1376
|
+
open_agent_pr_ids() {
|
|
1377
|
+
ensure_open_agent_pr_ids_cache
|
|
1378
|
+
printf '%s\n' "$open_agent_pr_ids_cache"
|
|
1379
|
+
}
|
|
1380
|
+
|
|
1381
|
+
ensure_open_agent_pr_ids_cache() {
|
|
1382
|
+
if [[ "$open_agent_pr_ids_cache_loaded" != "yes" ]]; then
|
|
1383
|
+
open_agent_pr_ids_cache="$(heartbeat_list_open_agent_pr_ids)"
|
|
1384
|
+
open_agent_pr_ids_cache_loaded="yes"
|
|
1385
|
+
fi
|
|
1386
|
+
}
|
|
1387
|
+
|
|
1388
|
+
running_issue_ids() {
|
|
1389
|
+
ensure_running_issue_ids_cache
|
|
1390
|
+
printf '%s\n' "$running_issue_ids_cache"
|
|
1391
|
+
}
|
|
1392
|
+
|
|
1393
|
+
exclusive_issue_ids() {
|
|
1394
|
+
ensure_exclusive_issue_ids_cache
|
|
1395
|
+
printf '%s\n' "$exclusive_issue_ids_cache"
|
|
1396
|
+
}
|
|
1397
|
+
|
|
1398
|
+
exclusive_pr_ids() {
|
|
1399
|
+
ensure_exclusive_pr_ids_cache
|
|
1400
|
+
printf '%s\n' "$exclusive_pr_ids_cache"
|
|
1401
|
+
}
|
|
1402
|
+
|
|
1403
|
+
ensure_running_issue_ids_cache() {
|
|
1404
|
+
if [[ "$running_issue_ids_cache_loaded" != "yes" ]]; then
|
|
1405
|
+
running_issue_ids_cache="$(heartbeat_list_running_issue_ids)"
|
|
1406
|
+
running_issue_ids_cache_loaded="yes"
|
|
1407
|
+
fi
|
|
1408
|
+
}
|
|
1409
|
+
|
|
1410
|
+
ensure_exclusive_issue_ids_cache() {
|
|
1411
|
+
if [[ "$exclusive_issue_ids_cache_loaded" != "yes" ]]; then
|
|
1412
|
+
exclusive_issue_ids_cache="$(heartbeat_list_exclusive_issue_ids)"
|
|
1413
|
+
exclusive_issue_ids_cache_loaded="yes"
|
|
1414
|
+
fi
|
|
1415
|
+
}
|
|
1416
|
+
|
|
1417
|
+
ensure_exclusive_pr_ids_cache() {
|
|
1418
|
+
if [[ "$exclusive_pr_ids_cache_loaded" != "yes" ]]; then
|
|
1419
|
+
exclusive_pr_ids_cache="$(heartbeat_list_exclusive_pr_ids)"
|
|
1420
|
+
exclusive_pr_ids_cache_loaded="yes"
|
|
1421
|
+
fi
|
|
1422
|
+
}
|
|
1423
|
+
|
|
1424
|
+
ensure_ordered_ready_issue_ids_cache() {
|
|
1425
|
+
if [[ "$ordered_ready_issue_ids_cache_loaded" != "yes" ]]; then
|
|
1426
|
+
ordered_ready_issue_ids_cache="$(build_ordered_ready_issue_ids_cache)"
|
|
1427
|
+
ordered_ready_issue_ids_cache_loaded="yes"
|
|
1428
|
+
fi
|
|
1429
|
+
}
|
|
1430
|
+
|
|
1431
|
+
ensure_blocked_recovery_issue_ids_cache() {
|
|
1432
|
+
if [[ "$blocked_recovery_issue_ids_cache_loaded" != "yes" ]]; then
|
|
1433
|
+
blocked_recovery_issue_ids_cache="$(heartbeat_list_blocked_recovery_issue_ids)"
|
|
1434
|
+
blocked_recovery_issue_ids_cache_loaded="yes"
|
|
1435
|
+
fi
|
|
1436
|
+
}
|
|
1437
|
+
|
|
1438
|
+
sync_open_agent_issues() {
|
|
1439
|
+
local issue_id status_out status
|
|
1440
|
+
ensure_running_issue_ids_cache
|
|
1441
|
+
while IFS= read -r issue_id; do
|
|
1442
|
+
[[ -n "$issue_id" ]] || continue
|
|
1443
|
+
if tmux has-session -t "${issue_prefix}${issue_id}" 2>/dev/null; then
|
|
1444
|
+
continue
|
|
1445
|
+
fi
|
|
1446
|
+
if pending_issue_launch_active "$issue_id"; then
|
|
1447
|
+
if pending_issue_launch_counts_toward_capacity "$issue_id"; then
|
|
1448
|
+
heartbeat_mark_issue_running "$issue_id" "$(cached_issue_attr heavy "$issue_id")" >/dev/null || true
|
|
1449
|
+
fi
|
|
1450
|
+
continue
|
|
1451
|
+
fi
|
|
1452
|
+
status_out="$(
|
|
1453
|
+
"${shared_agent_home}/tools/bin/agent-project-worker-status" \
|
|
1454
|
+
--runs-root "$runs_root" \
|
|
1455
|
+
--session "${issue_prefix}${issue_id}"
|
|
1456
|
+
)"
|
|
1457
|
+
status="$(awk -F= '/^STATUS=/{print $2}' <<<"$status_out")"
|
|
1458
|
+
if [[ "$status" == "UNKNOWN" ]]; then
|
|
1459
|
+
heartbeat_sync_issue_labels "$issue_id" >/dev/null || true
|
|
1460
|
+
fi
|
|
1461
|
+
done <<<"$running_issue_ids_cache"
|
|
1462
|
+
}
|
|
1463
|
+
|
|
1464
|
+
sync_open_agent_prs() {
|
|
1465
|
+
local pr_number status_out status
|
|
1466
|
+
ensure_open_agent_pr_ids_cache
|
|
1467
|
+
while IFS= read -r pr_number; do
|
|
1468
|
+
[[ -n "$pr_number" ]] || continue
|
|
1469
|
+
if tmux has-session -t "${pr_prefix}${pr_number}" 2>/dev/null; then
|
|
1470
|
+
continue
|
|
1471
|
+
fi
|
|
1472
|
+
if pending_pr_launch_active "$pr_number"; then
|
|
1473
|
+
heartbeat_mark_pr_running "$pr_number" >/dev/null || true
|
|
1474
|
+
continue
|
|
1475
|
+
fi
|
|
1476
|
+
status_out="$(
|
|
1477
|
+
"${shared_agent_home}/tools/bin/agent-project-worker-status" \
|
|
1478
|
+
--runs-root "$runs_root" \
|
|
1479
|
+
--session "${pr_prefix}${pr_number}"
|
|
1480
|
+
)"
|
|
1481
|
+
status="$(awk -F= '/^STATUS=/{print $2}' <<<"$status_out")"
|
|
1482
|
+
case "$status" in
|
|
1483
|
+
UNKNOWN)
|
|
1484
|
+
heartbeat_clear_pr_running "$pr_number" >/dev/null || true
|
|
1485
|
+
heartbeat_sync_pr_labels "$pr_number" >/dev/null || true
|
|
1486
|
+
;;
|
|
1487
|
+
RUNNING)
|
|
1488
|
+
;;
|
|
1489
|
+
*)
|
|
1490
|
+
heartbeat_clear_pr_running "$pr_number" >/dev/null || true
|
|
1491
|
+
heartbeat_sync_pr_labels "$pr_number" >/dev/null || true
|
|
1492
|
+
;;
|
|
1493
|
+
esac
|
|
1494
|
+
done <<<"$open_agent_pr_ids_cache"
|
|
1495
|
+
}
|
|
1496
|
+
|
|
1497
|
+
next_pr_candidate_json() {
|
|
1498
|
+
local target_lane pr_number risk_json lane
|
|
1499
|
+
ensure_open_agent_pr_ids_cache
|
|
1500
|
+
for target_lane in double-check-2 double-check-1 automerge merge-repair fix ci-refresh; do
|
|
1501
|
+
while IFS= read -r pr_number; do
|
|
1502
|
+
[[ -n "$pr_number" ]] || continue
|
|
1503
|
+
if tmux has-session -t "${pr_prefix}${pr_number}" 2>/dev/null; then
|
|
1504
|
+
continue
|
|
1505
|
+
fi
|
|
1506
|
+
if pr_launch_reserved "$pr_number"; then
|
|
1507
|
+
continue
|
|
1508
|
+
fi
|
|
1509
|
+
if pending_pr_launch_active "$pr_number"; then
|
|
1510
|
+
continue
|
|
1511
|
+
fi
|
|
1512
|
+
if ! retry_ready pr "$pr_number"; then
|
|
1513
|
+
continue
|
|
1514
|
+
fi
|
|
1515
|
+
risk_json="$(cached_pr_risk_json "$pr_number")"
|
|
1516
|
+
lane="$(jq -r '.agentLane' <<<"$risk_json")"
|
|
1517
|
+
if [[ "$lane" == "$target_lane" ]]; then
|
|
1518
|
+
printf '%s\n' "$risk_json"
|
|
1519
|
+
return 0
|
|
1520
|
+
fi
|
|
1521
|
+
done <<<"$open_agent_pr_ids_cache"
|
|
1522
|
+
done
|
|
1523
|
+
}
|
|
1524
|
+
|
|
1525
|
+
next_priority_review_pr_candidate_json() {
|
|
1526
|
+
local target_lane pr_number risk_json lane
|
|
1527
|
+
ensure_open_agent_pr_ids_cache
|
|
1528
|
+
for target_lane in double-check-2 double-check-1; do
|
|
1529
|
+
while IFS= read -r pr_number; do
|
|
1530
|
+
[[ -n "$pr_number" ]] || continue
|
|
1531
|
+
if tmux has-session -t "${pr_prefix}${pr_number}" 2>/dev/null; then
|
|
1532
|
+
continue
|
|
1533
|
+
fi
|
|
1534
|
+
if pr_launch_reserved "$pr_number"; then
|
|
1535
|
+
continue
|
|
1536
|
+
fi
|
|
1537
|
+
if pending_pr_launch_active "$pr_number"; then
|
|
1538
|
+
continue
|
|
1539
|
+
fi
|
|
1540
|
+
if ! retry_ready pr "$pr_number"; then
|
|
1541
|
+
continue
|
|
1542
|
+
fi
|
|
1543
|
+
risk_json="$(cached_pr_risk_json "$pr_number")"
|
|
1544
|
+
lane="$(jq -r '.agentLane' <<<"$risk_json")"
|
|
1545
|
+
if [[ "$lane" == "$target_lane" ]]; then
|
|
1546
|
+
printf '%s\n' "$risk_json"
|
|
1547
|
+
return 0
|
|
1548
|
+
fi
|
|
1549
|
+
done <<<"$open_agent_pr_ids_cache"
|
|
1550
|
+
done
|
|
1551
|
+
}
|
|
1552
|
+
|
|
1553
|
+
eligible_pr_backlog_count() {
|
|
1554
|
+
local pr_number risk_json lane count=0
|
|
1555
|
+
ensure_open_agent_pr_ids_cache
|
|
1556
|
+
while IFS= read -r pr_number; do
|
|
1557
|
+
[[ -n "$pr_number" ]] || continue
|
|
1558
|
+
if tmux has-session -t "${pr_prefix}${pr_number}" 2>/dev/null; then
|
|
1559
|
+
continue
|
|
1560
|
+
fi
|
|
1561
|
+
if pr_launch_reserved "$pr_number"; then
|
|
1562
|
+
continue
|
|
1563
|
+
fi
|
|
1564
|
+
if pending_pr_launch_active "$pr_number"; then
|
|
1565
|
+
continue
|
|
1566
|
+
fi
|
|
1567
|
+
if ! retry_ready pr "$pr_number"; then
|
|
1568
|
+
continue
|
|
1569
|
+
fi
|
|
1570
|
+
risk_json="$(cached_pr_risk_json "$pr_number")"
|
|
1571
|
+
lane="$(jq -r '.agentLane' <<<"$risk_json")"
|
|
1572
|
+
case "$lane" in
|
|
1573
|
+
double-check-1|double-check-2|automerge|merge-repair|fix)
|
|
1574
|
+
count=$((count + 1))
|
|
1575
|
+
;;
|
|
1576
|
+
esac
|
|
1577
|
+
done <<<"$open_agent_pr_ids_cache"
|
|
1578
|
+
printf '%s\n' "$count"
|
|
1579
|
+
}
|
|
1580
|
+
|
|
1581
|
+
priority_review_backlog_count() {
|
|
1582
|
+
local pr_number risk_json lane count=0
|
|
1583
|
+
ensure_open_agent_pr_ids_cache
|
|
1584
|
+
while IFS= read -r pr_number; do
|
|
1585
|
+
[[ -n "$pr_number" ]] || continue
|
|
1586
|
+
if tmux has-session -t "${pr_prefix}${pr_number}" 2>/dev/null; then
|
|
1587
|
+
continue
|
|
1588
|
+
fi
|
|
1589
|
+
if pr_launch_reserved "$pr_number"; then
|
|
1590
|
+
continue
|
|
1591
|
+
fi
|
|
1592
|
+
if pending_pr_launch_active "$pr_number"; then
|
|
1593
|
+
continue
|
|
1594
|
+
fi
|
|
1595
|
+
if ! retry_ready pr "$pr_number"; then
|
|
1596
|
+
continue
|
|
1597
|
+
fi
|
|
1598
|
+
risk_json="$(cached_pr_risk_json "$pr_number")"
|
|
1599
|
+
lane="$(jq -r '.agentLane' <<<"$risk_json")"
|
|
1600
|
+
case "$lane" in
|
|
1601
|
+
double-check-1|double-check-2)
|
|
1602
|
+
count=$((count + 1))
|
|
1603
|
+
;;
|
|
1604
|
+
esac
|
|
1605
|
+
done <<<"$open_agent_pr_ids_cache"
|
|
1606
|
+
printf '%s\n' "$count"
|
|
1607
|
+
}
|
|
1608
|
+
|
|
1609
|
+
next_exclusive_pr_candidate_json() {
|
|
1610
|
+
local target_lane pr_number risk_json lane
|
|
1611
|
+
ensure_exclusive_pr_ids_cache
|
|
1612
|
+
for target_lane in double-check-2 double-check-1 automerge merge-repair fix ci-refresh; do
|
|
1613
|
+
while IFS= read -r pr_number; do
|
|
1614
|
+
[[ -n "$pr_number" ]] || continue
|
|
1615
|
+
if tmux has-session -t "${pr_prefix}${pr_number}" 2>/dev/null; then
|
|
1616
|
+
continue
|
|
1617
|
+
fi
|
|
1618
|
+
if pr_launch_reserved "$pr_number"; then
|
|
1619
|
+
continue
|
|
1620
|
+
fi
|
|
1621
|
+
if pending_pr_launch_active "$pr_number"; then
|
|
1622
|
+
continue
|
|
1623
|
+
fi
|
|
1624
|
+
if ! retry_ready pr "$pr_number"; then
|
|
1625
|
+
continue
|
|
1626
|
+
fi
|
|
1627
|
+
risk_json="$(cached_pr_risk_json "$pr_number")"
|
|
1628
|
+
lane="$(jq -r '.agentLane' <<<"$risk_json")"
|
|
1629
|
+
# Skip PRs requiring human review; they should not hold exclusive lock
|
|
1630
|
+
if [[ "$lane" == "human-review" ]]; then
|
|
1631
|
+
continue
|
|
1632
|
+
fi
|
|
1633
|
+
if [[ "$lane" == "$target_lane" ]]; then
|
|
1634
|
+
printf '%s\n' "$risk_json"
|
|
1635
|
+
return 0
|
|
1636
|
+
fi
|
|
1637
|
+
done <<<"$exclusive_pr_ids_cache"
|
|
1638
|
+
done
|
|
1639
|
+
}
|
|
1640
|
+
|
|
1641
|
+
next_exclusive_issue_id() {
|
|
1642
|
+
local issue_id
|
|
1643
|
+
ensure_exclusive_issue_ids_cache
|
|
1644
|
+
while IFS= read -r issue_id; do
|
|
1645
|
+
[[ -n "$issue_id" ]] || continue
|
|
1646
|
+
if tmux has-session -t "${issue_prefix}${issue_id}" 2>/dev/null; then
|
|
1647
|
+
continue
|
|
1648
|
+
fi
|
|
1649
|
+
if pending_issue_launch_active "$issue_id"; then
|
|
1650
|
+
continue
|
|
1651
|
+
fi
|
|
1652
|
+
if ! retry_ready issue "$issue_id"; then
|
|
1653
|
+
continue
|
|
1654
|
+
fi
|
|
1655
|
+
printf '%s\n' "$issue_id"
|
|
1656
|
+
return 0
|
|
1657
|
+
done <<<"$exclusive_issue_ids_cache"
|
|
1658
|
+
}
|
|
1659
|
+
|
|
1660
|
+
count_pr_lane() {
|
|
1661
|
+
local target_lane="${1:?target lane required}"
|
|
1662
|
+
local pr_number risk_json lane count=0
|
|
1663
|
+
ensure_open_agent_pr_ids_cache
|
|
1664
|
+
while IFS= read -r pr_number; do
|
|
1665
|
+
[[ -n "$pr_number" ]] || continue
|
|
1666
|
+
risk_json="$(cached_pr_risk_json "$pr_number")"
|
|
1667
|
+
lane="$(jq -r '.agentLane' <<<"$risk_json")"
|
|
1668
|
+
if [[ "$lane" == "$target_lane" ]]; then
|
|
1669
|
+
count=$((count + 1))
|
|
1670
|
+
fi
|
|
1671
|
+
done <<<"$open_agent_pr_ids_cache"
|
|
1672
|
+
printf '%s\n' "$count"
|
|
1673
|
+
}
|
|
1674
|
+
|
|
1675
|
+
human_review_pr_ids() {
|
|
1676
|
+
local pr_number risk_json lane
|
|
1677
|
+
ensure_open_agent_pr_ids_cache
|
|
1678
|
+
while IFS= read -r pr_number; do
|
|
1679
|
+
[[ -n "$pr_number" ]] || continue
|
|
1680
|
+
risk_json="$(cached_pr_risk_json "$pr_number")"
|
|
1681
|
+
lane="$(jq -r '.agentLane' <<<"$risk_json")"
|
|
1682
|
+
if [[ "$lane" == "human-review" ]]; then
|
|
1683
|
+
printf '%s\n' "$pr_number"
|
|
1684
|
+
fi
|
|
1685
|
+
done <<<"$open_agent_pr_ids_cache"
|
|
1686
|
+
}
|
|
1687
|
+
|
|
1688
|
+
log_phase "reconcile-completed-workers:start"
|
|
1689
|
+
ensure_completed_workers_cache
|
|
1690
|
+
while IFS= read -r completed_session; do
|
|
1691
|
+
[[ -n "$completed_session" ]] || continue
|
|
1692
|
+
case "$completed_session" in
|
|
1693
|
+
"${issue_prefix}"*)
|
|
1694
|
+
if reconcile_out="$(heartbeat_reconcile_issue "$completed_session" 2>&1)"; then
|
|
1695
|
+
record_memory "reconciled issue worker ${completed_session}"
|
|
1696
|
+
print_block "RECONCILED_SESSION=${completed_session}" "$reconcile_out"
|
|
1697
|
+
else
|
|
1698
|
+
record_memory "failed to reconcile issue worker ${completed_session}"
|
|
1699
|
+
print_block "RECONCILE_FAILED_SESSION=${completed_session}" "$reconcile_out"
|
|
1700
|
+
fi
|
|
1701
|
+
;;
|
|
1702
|
+
"${pr_prefix}"*)
|
|
1703
|
+
if reconcile_out="$(heartbeat_reconcile_pr "$completed_session" 2>&1)"; then
|
|
1704
|
+
record_memory "reconciled PR worker ${completed_session}"
|
|
1705
|
+
print_block "RECONCILED_SESSION=${completed_session}" "$reconcile_out"
|
|
1706
|
+
else
|
|
1707
|
+
completed_pr_number="${completed_session#${pr_prefix}}"
|
|
1708
|
+
if [[ -n "$completed_pr_number" ]]; then
|
|
1709
|
+
heartbeat_clear_pr_running "$completed_pr_number" >/dev/null || true
|
|
1710
|
+
heartbeat_sync_pr_labels "$completed_pr_number" >/dev/null || true
|
|
1711
|
+
fi
|
|
1712
|
+
record_memory "failed to reconcile PR worker ${completed_session}"
|
|
1713
|
+
print_block "RECONCILE_FAILED_SESSION=${completed_session}" "$reconcile_out"
|
|
1714
|
+
fi
|
|
1715
|
+
;;
|
|
1716
|
+
*)
|
|
1717
|
+
echo "unknown completed worker session: ${completed_session}" >&2
|
|
1718
|
+
exit 1
|
|
1719
|
+
;;
|
|
1720
|
+
esac
|
|
1721
|
+
done <<<"$completed_workers_cache"
|
|
1722
|
+
log_phase "reconcile-completed-workers:end"
|
|
1723
|
+
|
|
1724
|
+
log_phase "sync-open-agent-issues:start"
|
|
1725
|
+
sync_open_agent_issues
|
|
1726
|
+
log_phase "sync-open-agent-issues:end"
|
|
1727
|
+
log_phase "sync-open-agent-prs:start"
|
|
1728
|
+
sync_open_agent_prs
|
|
1729
|
+
log_phase "sync-open-agent-prs:end"
|
|
1730
|
+
|
|
1731
|
+
log_phase "snapshot-running-counts:start"
|
|
1732
|
+
running_workers_now="$(all_running_workers)"
|
|
1733
|
+
auth_wait_workers_now="$(auth_wait_workers)"
|
|
1734
|
+
running_issue_workers_now="$(running_issue_workers)"
|
|
1735
|
+
running_pr_workers_now="$(running_pr_workers)"
|
|
1736
|
+
pending_issue_count="$(pending_issue_launch_count)"
|
|
1737
|
+
pending_pr_count="$(pending_pr_launch_count)"
|
|
1738
|
+
pending_heavy_issue_count="$(pending_heavy_issue_launch_count)"
|
|
1739
|
+
pending_scheduled_issue_count="$(pending_scheduled_issue_launch_count)"
|
|
1740
|
+
pending_scheduled_heavy_issue_count="$(pending_scheduled_heavy_issue_launch_count)"
|
|
1741
|
+
pending_recurring_issue_count="$(pending_recurring_issue_launch_count)"
|
|
1742
|
+
pending_blocked_recovery_issue_count="$(pending_blocked_recovery_issue_launch_count)"
|
|
1743
|
+
pending_exclusive_issue_count="$(pending_exclusive_issue_launch_count)"
|
|
1744
|
+
pending_exclusive_pr_count="$(pending_exclusive_pr_launch_count)"
|
|
1745
|
+
auth_wait_worker_count="$(worker_count "$auth_wait_workers_now")"
|
|
1746
|
+
running_issue_count="$(worker_count "$running_issue_workers_now")"
|
|
1747
|
+
running_pr_count="$(worker_count "$running_pr_workers_now")"
|
|
1748
|
+
running_total_count=$((running_issue_count + running_pr_count))
|
|
1749
|
+
running_issue_count=$((running_issue_count + pending_issue_count))
|
|
1750
|
+
running_pr_count=$((running_pr_count + pending_pr_count))
|
|
1751
|
+
running_total_count=$((running_total_count + pending_issue_count + pending_pr_count))
|
|
1752
|
+
running_heavy_issue_count="$(running_heavy_issue_workers)"
|
|
1753
|
+
running_non_recurring_issue_count="$(running_non_recurring_issue_workers)"
|
|
1754
|
+
running_recurring_issue_count="$(running_recurring_issue_workers)"
|
|
1755
|
+
running_scheduled_issue_count="$(running_scheduled_issue_workers)"
|
|
1756
|
+
running_scheduled_heavy_issue_count="$(running_scheduled_heavy_issue_workers)"
|
|
1757
|
+
running_blocked_recovery_issue_count="$(running_blocked_recovery_issue_workers)"
|
|
1758
|
+
running_exclusive_issue_count="$(running_exclusive_issue_workers)"
|
|
1759
|
+
running_exclusive_pr_count="$(running_exclusive_pr_workers)"
|
|
1760
|
+
running_heavy_issue_count=$((running_heavy_issue_count + pending_heavy_issue_count))
|
|
1761
|
+
running_scheduled_issue_count=$((running_scheduled_issue_count + pending_scheduled_issue_count))
|
|
1762
|
+
running_scheduled_heavy_issue_count=$((running_scheduled_heavy_issue_count + pending_scheduled_heavy_issue_count))
|
|
1763
|
+
running_recurring_issue_count=$((running_recurring_issue_count + pending_recurring_issue_count))
|
|
1764
|
+
running_blocked_recovery_issue_count=$((running_blocked_recovery_issue_count + pending_blocked_recovery_issue_count))
|
|
1765
|
+
running_non_recurring_issue_count=$((running_non_recurring_issue_count + pending_issue_count - pending_recurring_issue_count - pending_scheduled_issue_count))
|
|
1766
|
+
running_exclusive_issue_count=$((running_exclusive_issue_count + pending_exclusive_issue_count))
|
|
1767
|
+
running_exclusive_pr_count=$((running_exclusive_pr_count + pending_exclusive_pr_count))
|
|
1768
|
+
ensure_open_agent_pr_ids_cache
|
|
1769
|
+
open_agent_pr_count="$(worker_count "$open_agent_pr_ids_cache")"
|
|
1770
|
+
ready_non_recurring_count="$(ready_non_recurring_issue_count)"
|
|
1771
|
+
log_phase "snapshot-running-counts:end"
|
|
1772
|
+
|
|
1773
|
+
exclusive_lock_mode="no"
|
|
1774
|
+
exclusive_lock_kind=""
|
|
1775
|
+
exclusive_lock_item=""
|
|
1776
|
+
exclusive_waiting_reason=""
|
|
1777
|
+
ensure_exclusive_pr_ids_cache
|
|
1778
|
+
ensure_exclusive_issue_ids_cache
|
|
1779
|
+
exclusive_pr_item_id="${exclusive_pr_ids_cache%%$'\n'*}"
|
|
1780
|
+
exclusive_issue_item_id="${exclusive_issue_ids_cache%%$'\n'*}"
|
|
1781
|
+
[[ "$exclusive_pr_item_id" == "$exclusive_pr_ids_cache" && -z "$exclusive_pr_ids_cache" ]] && exclusive_pr_item_id=""
|
|
1782
|
+
[[ "$exclusive_issue_item_id" == "$exclusive_issue_ids_cache" && -z "$exclusive_issue_ids_cache" ]] && exclusive_issue_item_id=""
|
|
1783
|
+
exclusive_pr_risk_json=""
|
|
1784
|
+
|
|
1785
|
+
if (( running_exclusive_issue_count + running_exclusive_pr_count > 0 )); then
|
|
1786
|
+
exclusive_lock_mode="running"
|
|
1787
|
+
elif [[ -n "$exclusive_pr_item_id" ]]; then
|
|
1788
|
+
exclusive_lock_mode="pending"
|
|
1789
|
+
exclusive_lock_kind="pr"
|
|
1790
|
+
exclusive_lock_item="$exclusive_pr_item_id"
|
|
1791
|
+
exclusive_pr_risk_json="$(cached_pr_risk_json "$exclusive_pr_item_id")"
|
|
1792
|
+
elif [[ -n "$exclusive_issue_item_id" ]]; then
|
|
1793
|
+
exclusive_lock_mode="pending"
|
|
1794
|
+
exclusive_lock_kind="issue"
|
|
1795
|
+
exclusive_lock_item="$exclusive_issue_item_id"
|
|
1796
|
+
fi
|
|
1797
|
+
|
|
1798
|
+
launched_issue_count=0
|
|
1799
|
+
launched_pr_count=0
|
|
1800
|
+
launch_budget_remaining="$max_launches_per_pass"
|
|
1801
|
+
heavy_deferred_count=0
|
|
1802
|
+
retry_deferred_issue_count=0
|
|
1803
|
+
provider_launch_suppressed="no"
|
|
1804
|
+
provider_cooldown_backend=""
|
|
1805
|
+
provider_cooldown_model=""
|
|
1806
|
+
provider_cooldown_until=""
|
|
1807
|
+
provider_cooldown_reason=""
|
|
1808
|
+
issue_capacity_limit="$max_concurrent_workers"
|
|
1809
|
+
reserved_pr_slots=0
|
|
1810
|
+
pr_backlog_eligible_count=0
|
|
1811
|
+
priority_review_backlog_count=0
|
|
1812
|
+
priority_review_launches_remaining="${ACP_MAX_DOUBLE_CHECK_FAST_LAUNCHES:-${F_LOSNING_MAX_DOUBLE_CHECK_FAST_LAUNCHES:-3}}"
|
|
1813
|
+
|
|
1814
|
+
if provider_cooldown_out="$(provider_cooldown_state 2>/dev/null || true)"; then
|
|
1815
|
+
provider_cooldown_ready="$(awk -F= '/^READY=/{print $2}' <<<"$provider_cooldown_out")"
|
|
1816
|
+
provider_cooldown_backend="$(awk -F= '/^BACKEND=/{print $2}' <<<"$provider_cooldown_out")"
|
|
1817
|
+
provider_cooldown_model="$(awk -F= '/^MODEL=/{print $2}' <<<"$provider_cooldown_out")"
|
|
1818
|
+
provider_cooldown_until="$(awk -F= '/^NEXT_ATTEMPT_AT=/{print $2}' <<<"$provider_cooldown_out")"
|
|
1819
|
+
provider_cooldown_reason="$(awk -F= '/^LAST_REASON=/{print $2}' <<<"$provider_cooldown_out")"
|
|
1820
|
+
if [[ "${provider_cooldown_ready}" == "no" ]]; then
|
|
1821
|
+
provider_launch_suppressed="yes"
|
|
1822
|
+
launch_budget_remaining=0
|
|
1823
|
+
fi
|
|
1824
|
+
fi
|
|
1825
|
+
|
|
1826
|
+
launch_pr_candidate_json() {
|
|
1827
|
+
local pr_candidate_json="${1:?pr candidate json required}"
|
|
1828
|
+
local pr_number pr_lane launch_out
|
|
1829
|
+
|
|
1830
|
+
pr_number="$(jq -r '.number' <<<"$pr_candidate_json")"
|
|
1831
|
+
pr_lane="$(jq -r '.agentLane' <<<"$pr_candidate_json")"
|
|
1832
|
+
stage_pr_launch "$pr_number"
|
|
1833
|
+
|
|
1834
|
+
case "$pr_lane" in
|
|
1835
|
+
double-check-1|double-check-2|automerge)
|
|
1836
|
+
if ! launch_out="$(heartbeat_start_pr_review_worker "$pr_number" 2>&1)"; then
|
|
1837
|
+
heartbeat_clear_pr_running "$pr_number" || true
|
|
1838
|
+
clear_launch_in_progress
|
|
1839
|
+
record_memory "failed to launch PR review worker for #${pr_number}"
|
|
1840
|
+
print_block "LAUNCH_FAILED_PR=${pr_number}" "$launch_out"
|
|
1841
|
+
exit 1
|
|
1842
|
+
fi
|
|
1843
|
+
record_memory "launched PR review worker for #${pr_number}"
|
|
1844
|
+
;;
|
|
1845
|
+
merge-repair)
|
|
1846
|
+
if ! launch_out="$(heartbeat_start_pr_merge_repair_worker "$pr_number" 2>&1)"; then
|
|
1847
|
+
heartbeat_clear_pr_running "$pr_number" || true
|
|
1848
|
+
clear_launch_in_progress
|
|
1849
|
+
record_memory "failed to launch PR merge-repair worker for #${pr_number}"
|
|
1850
|
+
print_block "LAUNCH_FAILED_PR=${pr_number}" "$launch_out"
|
|
1851
|
+
exit 1
|
|
1852
|
+
fi
|
|
1853
|
+
record_memory "launched PR merge-repair worker for #${pr_number}"
|
|
1854
|
+
;;
|
|
1855
|
+
ci-refresh)
|
|
1856
|
+
if ! launch_out="$(heartbeat_start_pr_ci_refresh "$pr_number" 2>&1)"; then
|
|
1857
|
+
heartbeat_clear_pr_running "$pr_number" || true
|
|
1858
|
+
clear_launch_in_progress
|
|
1859
|
+
record_memory "failed to trigger PR ci-refresh for #${pr_number}"
|
|
1860
|
+
print_block "LAUNCH_FAILED_PR=${pr_number}" "$launch_out"
|
|
1861
|
+
exit 1
|
|
1862
|
+
fi
|
|
1863
|
+
heartbeat_clear_pr_running "$pr_number" || true
|
|
1864
|
+
record_memory "triggered PR ci-refresh for #${pr_number}"
|
|
1865
|
+
;;
|
|
1866
|
+
fix)
|
|
1867
|
+
if ! launch_out="$(heartbeat_start_pr_fix_worker "$pr_number" 2>&1)"; then
|
|
1868
|
+
heartbeat_clear_pr_running "$pr_number" || true
|
|
1869
|
+
clear_launch_in_progress
|
|
1870
|
+
record_memory "failed to launch PR fix worker for #${pr_number}"
|
|
1871
|
+
print_block "LAUNCH_FAILED_PR=${pr_number}" "$launch_out"
|
|
1872
|
+
exit 1
|
|
1873
|
+
fi
|
|
1874
|
+
record_memory "launched PR fix worker for #${pr_number}"
|
|
1875
|
+
;;
|
|
1876
|
+
*)
|
|
1877
|
+
launch_out="Unsupported PR lane: ${pr_lane}"
|
|
1878
|
+
heartbeat_clear_pr_running "$pr_number" || true
|
|
1879
|
+
clear_launch_in_progress
|
|
1880
|
+
print_block "LAUNCH_FAILED_PR=${pr_number}" "$launch_out"
|
|
1881
|
+
exit 1
|
|
1882
|
+
;;
|
|
1883
|
+
esac
|
|
1884
|
+
|
|
1885
|
+
clear_launch_in_progress
|
|
1886
|
+
print_block "LAUNCHED_PR=${pr_number}" "$(printf 'LANE=%s\n%s' "$pr_lane" "$launch_out")"
|
|
1887
|
+
if [[ "$pr_lane" != "ci-refresh" ]]; then
|
|
1888
|
+
running_total_count=$((running_total_count + 1))
|
|
1889
|
+
running_pr_count=$((running_pr_count + 1))
|
|
1890
|
+
fi
|
|
1891
|
+
launched_pr_count=$((launched_pr_count + 1))
|
|
1892
|
+
if (( launch_budget_remaining > 0 )); then
|
|
1893
|
+
launch_budget_remaining=$((launch_budget_remaining - 1))
|
|
1894
|
+
fi
|
|
1895
|
+
}
|
|
1896
|
+
|
|
1897
|
+
if [[ "$exclusive_lock_mode" == "pending" && "$exclusive_lock_kind" == "pr" ]]; then
|
|
1898
|
+
pr_number="$exclusive_lock_item"
|
|
1899
|
+
pr_lane="$(jq -r '.agentLane' <<<"$exclusive_pr_risk_json")"
|
|
1900
|
+
if pending_pr_launch_active "$pr_number"; then
|
|
1901
|
+
exclusive_waiting_reason="launch-pending"
|
|
1902
|
+
elif [[ "$pr_lane" == "human-review" ]]; then
|
|
1903
|
+
# Do not lock exclusive for PRs requiring human review; treat as normal PR
|
|
1904
|
+
exclusive_waiting_reason=""
|
|
1905
|
+
exclusive_lock_mode="no"
|
|
1906
|
+
exclusive_lock_kind=""
|
|
1907
|
+
exclusive_lock_item=""
|
|
1908
|
+
elif [[ "$pr_lane" != "double-check-1" && "$pr_lane" != "double-check-2" && "$pr_lane" != "automerge" && "$pr_lane" != "merge-repair" && "$pr_lane" != "ci-refresh" && "$pr_lane" != "fix" ]]; then
|
|
1909
|
+
exclusive_waiting_reason="lane-${pr_lane}"
|
|
1910
|
+
elif [[ "$provider_launch_suppressed" == "yes" ]]; then
|
|
1911
|
+
exclusive_waiting_reason="provider-cooldown"
|
|
1912
|
+
elif ! retry_ready pr "$pr_number"; then
|
|
1913
|
+
exclusive_waiting_reason="retry-cooldown"
|
|
1914
|
+
elif (( running_pr_count >= max_concurrent_pr_workers )); then
|
|
1915
|
+
exclusive_waiting_reason="pr-capacity-full"
|
|
1916
|
+
elif (( running_total_count >= max_concurrent_workers )); then
|
|
1917
|
+
exclusive_waiting_reason="capacity-full"
|
|
1918
|
+
else
|
|
1919
|
+
stage_pr_launch "$pr_number"
|
|
1920
|
+
case "$pr_lane" in
|
|
1921
|
+
double-check-1|double-check-2|automerge)
|
|
1922
|
+
if ! launch_out="$(heartbeat_start_pr_review_worker "$pr_number" 2>&1)"; then
|
|
1923
|
+
heartbeat_clear_pr_running "$pr_number" || true
|
|
1924
|
+
clear_launch_in_progress
|
|
1925
|
+
record_memory "failed to launch exclusive PR review worker for #${pr_number}"
|
|
1926
|
+
print_block "LAUNCH_FAILED_PR=${pr_number}" "$launch_out"
|
|
1927
|
+
exit 1
|
|
1928
|
+
fi
|
|
1929
|
+
record_memory "launched exclusive PR review worker for #${pr_number}"
|
|
1930
|
+
;;
|
|
1931
|
+
merge-repair)
|
|
1932
|
+
if ! launch_out="$(heartbeat_start_pr_merge_repair_worker "$pr_number" 2>&1)"; then
|
|
1933
|
+
heartbeat_clear_pr_running "$pr_number" || true
|
|
1934
|
+
clear_launch_in_progress
|
|
1935
|
+
record_memory "failed to launch exclusive PR merge-repair worker for #${pr_number}"
|
|
1936
|
+
print_block "LAUNCH_FAILED_PR=${pr_number}" "$launch_out"
|
|
1937
|
+
exit 1
|
|
1938
|
+
fi
|
|
1939
|
+
record_memory "launched exclusive PR merge-repair worker for #${pr_number}"
|
|
1940
|
+
;;
|
|
1941
|
+
ci-refresh)
|
|
1942
|
+
if ! launch_out="$(heartbeat_start_pr_ci_refresh "$pr_number" 2>&1)"; then
|
|
1943
|
+
heartbeat_clear_pr_running "$pr_number" || true
|
|
1944
|
+
clear_launch_in_progress
|
|
1945
|
+
record_memory "failed to trigger exclusive PR ci-refresh for #${pr_number}"
|
|
1946
|
+
print_block "LAUNCH_FAILED_PR=${pr_number}" "$launch_out"
|
|
1947
|
+
exit 1
|
|
1948
|
+
fi
|
|
1949
|
+
heartbeat_clear_pr_running "$pr_number" || true
|
|
1950
|
+
record_memory "triggered exclusive PR ci-refresh for #${pr_number}"
|
|
1951
|
+
;;
|
|
1952
|
+
fix)
|
|
1953
|
+
if ! launch_out="$(heartbeat_start_pr_fix_worker "$pr_number" 2>&1)"; then
|
|
1954
|
+
heartbeat_clear_pr_running "$pr_number" || true
|
|
1955
|
+
clear_launch_in_progress
|
|
1956
|
+
record_memory "failed to launch exclusive PR fix worker for #${pr_number}"
|
|
1957
|
+
print_block "LAUNCH_FAILED_PR=${pr_number}" "$launch_out"
|
|
1958
|
+
exit 1
|
|
1959
|
+
fi
|
|
1960
|
+
record_memory "launched exclusive PR fix worker for #${pr_number}"
|
|
1961
|
+
;;
|
|
1962
|
+
esac
|
|
1963
|
+
clear_launch_in_progress
|
|
1964
|
+
print_block "LAUNCHED_PR=${pr_number}" "$(printf 'LANE=%s\nEXCLUSIVE=yes\n%s' "$pr_lane" "$launch_out")"
|
|
1965
|
+
if [[ "$pr_lane" != "ci-refresh" ]]; then
|
|
1966
|
+
running_total_count=$((running_total_count + 1))
|
|
1967
|
+
running_pr_count=$((running_pr_count + 1))
|
|
1968
|
+
running_exclusive_pr_count=$((running_exclusive_pr_count + 1))
|
|
1969
|
+
fi
|
|
1970
|
+
launched_pr_count=$((launched_pr_count + 1))
|
|
1971
|
+
if (( launch_budget_remaining > 0 )); then
|
|
1972
|
+
launch_budget_remaining=$((launch_budget_remaining - 1))
|
|
1973
|
+
fi
|
|
1974
|
+
exclusive_lock_mode="running"
|
|
1975
|
+
exclusive_waiting_reason=""
|
|
1976
|
+
fi
|
|
1977
|
+
elif [[ "$exclusive_lock_mode" == "pending" && "$exclusive_lock_kind" == "issue" ]]; then
|
|
1978
|
+
issue_id="$exclusive_lock_item"
|
|
1979
|
+
is_heavy="$(cached_issue_attr heavy "$issue_id")"
|
|
1980
|
+
is_recurring="$(cached_issue_attr recurring "$issue_id")"
|
|
1981
|
+
if pending_issue_launch_active "$issue_id"; then
|
|
1982
|
+
exclusive_waiting_reason="launch-pending"
|
|
1983
|
+
elif [[ "$provider_launch_suppressed" == "yes" ]]; then
|
|
1984
|
+
exclusive_waiting_reason="provider-cooldown"
|
|
1985
|
+
elif ! retry_ready issue "$issue_id"; then
|
|
1986
|
+
exclusive_waiting_reason="retry-cooldown"
|
|
1987
|
+
elif (( running_total_count >= max_concurrent_workers )); then
|
|
1988
|
+
exclusive_waiting_reason="capacity-full"
|
|
1989
|
+
elif [[ "$is_heavy" == "yes" ]] && (( running_heavy_issue_count >= max_concurrent_heavy_workers )); then
|
|
1990
|
+
exclusive_waiting_reason="heavy-slot-busy"
|
|
1991
|
+
else
|
|
1992
|
+
stage_issue_launch "$issue_id" "$is_heavy"
|
|
1993
|
+
if ! launch_out="$(heartbeat_start_issue_worker "$issue_id" 2>&1)"; then
|
|
1994
|
+
heartbeat_issue_launch_failed "$issue_id" "$is_heavy" || true
|
|
1995
|
+
clear_launch_in_progress
|
|
1996
|
+
record_memory "failed to launch exclusive issue worker for #${issue_id}"
|
|
1997
|
+
print_block "LAUNCH_FAILED_ISSUE=${issue_id}" "$launch_out"
|
|
1998
|
+
exit 1
|
|
1999
|
+
fi
|
|
2000
|
+
|
|
2001
|
+
clear_launch_in_progress
|
|
2002
|
+
record_memory "launched exclusive issue worker for #${issue_id}"
|
|
2003
|
+
print_block "LAUNCHED_ISSUE=${issue_id}" "$(printf 'EXCLUSIVE=yes\n%s' "$launch_out")"
|
|
2004
|
+
launched_issue_count=$((launched_issue_count + 1))
|
|
2005
|
+
if (( launch_budget_remaining > 0 )); then
|
|
2006
|
+
launch_budget_remaining=$((launch_budget_remaining - 1))
|
|
2007
|
+
fi
|
|
2008
|
+
running_total_count=$((running_total_count + 1))
|
|
2009
|
+
running_issue_count=$((running_issue_count + 1))
|
|
2010
|
+
running_exclusive_issue_count=$((running_exclusive_issue_count + 1))
|
|
2011
|
+
if [[ "$is_heavy" == "yes" ]]; then
|
|
2012
|
+
running_heavy_issue_count=$((running_heavy_issue_count + 1))
|
|
2013
|
+
fi
|
|
2014
|
+
if [[ "$is_recurring" == "yes" ]]; then
|
|
2015
|
+
running_recurring_issue_count=$((running_recurring_issue_count + 1))
|
|
2016
|
+
record_recurring_issue_launch "$issue_id"
|
|
2017
|
+
else
|
|
2018
|
+
running_non_recurring_issue_count=$((running_non_recurring_issue_count + 1))
|
|
2019
|
+
fi
|
|
2020
|
+
exclusive_lock_mode="running"
|
|
2021
|
+
exclusive_waiting_reason=""
|
|
2022
|
+
fi
|
|
2023
|
+
fi
|
|
2024
|
+
|
|
2025
|
+
if [[ "$exclusive_lock_mode" == "no" ]]; then
|
|
2026
|
+
launch_budget_remaining="${launch_budget_remaining:-$max_launches_per_pass}"
|
|
2027
|
+
priority_review_launches_remaining="${priority_review_launches_remaining:-${ACP_MAX_DOUBLE_CHECK_FAST_LAUNCHES:-${F_LOSNING_MAX_DOUBLE_CHECK_FAST_LAUNCHES:-3}}}"
|
|
2028
|
+
log_phase "launch-planning:start"
|
|
2029
|
+
log_phase "launch-planning:scheduled-issue-loop:start"
|
|
2030
|
+
ensure_due_scheduled_issue_ids_cache
|
|
2031
|
+
while IFS= read -r issue_id; do
|
|
2032
|
+
[[ -n "$issue_id" ]] || continue
|
|
2033
|
+
if (( launch_budget_remaining <= 0 )); then
|
|
2034
|
+
break
|
|
2035
|
+
fi
|
|
2036
|
+
if pending_issue_launch_active "$issue_id"; then
|
|
2037
|
+
continue
|
|
2038
|
+
fi
|
|
2039
|
+
if (( running_scheduled_issue_count >= max_concurrent_scheduled_issue_workers )); then
|
|
2040
|
+
break
|
|
2041
|
+
fi
|
|
2042
|
+
|
|
2043
|
+
is_heavy="$(cached_issue_attr heavy "$issue_id")"
|
|
2044
|
+
if [[ "$is_heavy" == "yes" ]] && (( running_scheduled_heavy_issue_count >= max_concurrent_scheduled_heavy_workers )); then
|
|
2045
|
+
heavy_deferred_count=$((heavy_deferred_count + 1))
|
|
2046
|
+
continue
|
|
2047
|
+
fi
|
|
2048
|
+
|
|
2049
|
+
stage_issue_launch "$issue_id" "$is_heavy"
|
|
2050
|
+
if ! launch_out="$(heartbeat_start_issue_worker "$issue_id" 2>&1)"; then
|
|
2051
|
+
heartbeat_issue_launch_failed "$issue_id" "$is_heavy" || true
|
|
2052
|
+
clear_launch_in_progress
|
|
2053
|
+
record_memory "failed to launch scheduled issue worker for #${issue_id}"
|
|
2054
|
+
print_block "LAUNCH_FAILED_SCHEDULED_ISSUE=${issue_id}" "$launch_out"
|
|
2055
|
+
exit 1
|
|
2056
|
+
fi
|
|
2057
|
+
|
|
2058
|
+
clear_launch_in_progress
|
|
2059
|
+
record_scheduled_issue_launch "$issue_id"
|
|
2060
|
+
record_memory "launched scheduled issue worker for #${issue_id}"
|
|
2061
|
+
print_block "LAUNCHED_SCHEDULED_ISSUE=${issue_id}" "$(printf 'SCHEDULED=yes\n%s' "$launch_out")"
|
|
2062
|
+
launched_issue_count=$((launched_issue_count + 1))
|
|
2063
|
+
if (( launch_budget_remaining > 0 )); then
|
|
2064
|
+
launch_budget_remaining=$((launch_budget_remaining - 1))
|
|
2065
|
+
fi
|
|
2066
|
+
running_total_count=$((running_total_count + 1))
|
|
2067
|
+
running_issue_count=$((running_issue_count + 1))
|
|
2068
|
+
running_scheduled_issue_count=$((running_scheduled_issue_count + 1))
|
|
2069
|
+
if [[ "$is_heavy" == "yes" ]]; then
|
|
2070
|
+
running_heavy_issue_count=$((running_heavy_issue_count + 1))
|
|
2071
|
+
running_scheduled_heavy_issue_count=$((running_scheduled_heavy_issue_count + 1))
|
|
2072
|
+
fi
|
|
2073
|
+
done <<<"$due_scheduled_issue_ids_cache"
|
|
2074
|
+
log_phase "launch-planning:scheduled-issue-loop:end"
|
|
2075
|
+
# Fast-path independent review lanes so high-risk PRs do not wait behind
|
|
2076
|
+
# issue work or lower-priority PR lanes in short heartbeat windows.
|
|
2077
|
+
log_phase "launch-planning:priority-review:start"
|
|
2078
|
+
while (( priority_review_launches_remaining > 0 )) && (( launch_budget_remaining > 0 )) && (( running_pr_count < max_concurrent_pr_workers )) && (( running_total_count < max_concurrent_workers )); do
|
|
2079
|
+
pr_candidate_json="$(next_priority_review_pr_candidate_json || true)"
|
|
2080
|
+
[[ -n "$pr_candidate_json" ]] || break
|
|
2081
|
+
launch_pr_candidate_json "$pr_candidate_json"
|
|
2082
|
+
priority_review_launches_remaining=$((priority_review_launches_remaining - 1))
|
|
2083
|
+
done
|
|
2084
|
+
log_phase "launch-planning:priority-review:end"
|
|
2085
|
+
log_phase "launch-planning:first-pr:start"
|
|
2086
|
+
if (( launch_budget_remaining > 0 )) && (( running_pr_count < max_concurrent_pr_workers )) && (( running_total_count < max_concurrent_workers )); then
|
|
2087
|
+
pr_candidate_json="$(next_pr_candidate_json || true)"
|
|
2088
|
+
if [[ -n "$pr_candidate_json" ]]; then
|
|
2089
|
+
# Launch a PR before issue work so short heartbeat windows do not starve PR lanes.
|
|
2090
|
+
launch_pr_candidate_json "$pr_candidate_json"
|
|
2091
|
+
fi
|
|
2092
|
+
fi
|
|
2093
|
+
log_phase "launch-planning:first-pr:end"
|
|
2094
|
+
|
|
2095
|
+
log_phase "launch-planning:backlog-counts:start"
|
|
2096
|
+
pr_backlog_eligible_count="$(eligible_pr_backlog_count)"
|
|
2097
|
+
priority_review_backlog_count="$(priority_review_backlog_count)"
|
|
2098
|
+
if (( pr_backlog_eligible_count > 0 )); then
|
|
2099
|
+
reserved_pr_slots=4
|
|
2100
|
+
if (( priority_review_backlog_count > reserved_pr_slots )); then
|
|
2101
|
+
reserved_pr_slots="$priority_review_backlog_count"
|
|
2102
|
+
fi
|
|
2103
|
+
if (( reserved_pr_slots > pr_backlog_eligible_count )); then
|
|
2104
|
+
reserved_pr_slots="$pr_backlog_eligible_count"
|
|
2105
|
+
fi
|
|
2106
|
+
if (( reserved_pr_slots > max_concurrent_pr_workers - running_pr_count )); then
|
|
2107
|
+
reserved_pr_slots=$((max_concurrent_pr_workers - running_pr_count))
|
|
2108
|
+
fi
|
|
2109
|
+
if (( reserved_pr_slots > max_concurrent_workers - running_total_count )); then
|
|
2110
|
+
reserved_pr_slots=$((max_concurrent_workers - running_total_count))
|
|
2111
|
+
fi
|
|
2112
|
+
if (( reserved_pr_slots < 0 )); then
|
|
2113
|
+
reserved_pr_slots=0
|
|
2114
|
+
fi
|
|
2115
|
+
fi
|
|
2116
|
+
issue_capacity_limit=$((max_concurrent_workers - reserved_pr_slots))
|
|
2117
|
+
if (( issue_capacity_limit < running_total_count )); then
|
|
2118
|
+
issue_capacity_limit="$running_total_count"
|
|
2119
|
+
fi
|
|
2120
|
+
log_phase "launch-planning:backlog-counts:end"
|
|
2121
|
+
|
|
2122
|
+
log_phase "launch-planning:blocked-recovery-loop:start"
|
|
2123
|
+
ensure_due_blocked_recovery_issue_ids_cache
|
|
2124
|
+
while IFS= read -r issue_id; do
|
|
2125
|
+
[[ -n "$issue_id" ]] || continue
|
|
2126
|
+
if pending_issue_launch_active "$issue_id"; then
|
|
2127
|
+
continue
|
|
2128
|
+
fi
|
|
2129
|
+
if (( max_concurrent_blocked_recovery_issue_workers <= 0 )); then
|
|
2130
|
+
break
|
|
2131
|
+
fi
|
|
2132
|
+
if (( launch_budget_remaining <= 0 )); then
|
|
2133
|
+
break
|
|
2134
|
+
fi
|
|
2135
|
+
if (( running_total_count >= max_concurrent_workers )); then
|
|
2136
|
+
break
|
|
2137
|
+
fi
|
|
2138
|
+
if (( running_blocked_recovery_issue_count >= max_concurrent_blocked_recovery_issue_workers )); then
|
|
2139
|
+
break
|
|
2140
|
+
fi
|
|
2141
|
+
if ! retry_ready issue "$issue_id"; then
|
|
2142
|
+
retry_deferred_issue_count=$((retry_deferred_issue_count + 1))
|
|
2143
|
+
continue
|
|
2144
|
+
fi
|
|
2145
|
+
|
|
2146
|
+
is_heavy="$(cached_issue_attr heavy "$issue_id")"
|
|
2147
|
+
is_recurring="$(cached_issue_attr recurring "$issue_id")"
|
|
2148
|
+
if [[ "$is_recurring" == "yes" ]]; then
|
|
2149
|
+
if (( running_recurring_issue_count >= max_recurring_issue_workers )); then
|
|
2150
|
+
continue
|
|
2151
|
+
fi
|
|
2152
|
+
if (( max_open_agent_prs_for_recurring > 0 )) && (( open_agent_pr_count >= max_open_agent_prs_for_recurring )); then
|
|
2153
|
+
continue
|
|
2154
|
+
fi
|
|
2155
|
+
fi
|
|
2156
|
+
if [[ "$is_heavy" == "yes" ]] && (( running_heavy_issue_count >= max_concurrent_heavy_workers )); then
|
|
2157
|
+
heavy_deferred_count=$((heavy_deferred_count + 1))
|
|
2158
|
+
continue
|
|
2159
|
+
fi
|
|
2160
|
+
|
|
2161
|
+
stage_issue_launch "$issue_id" "$is_heavy"
|
|
2162
|
+
if ! launch_out="$(heartbeat_start_issue_worker "$issue_id" 2>&1)"; then
|
|
2163
|
+
heartbeat_issue_launch_failed "$issue_id" "$is_heavy" || true
|
|
2164
|
+
clear_launch_in_progress
|
|
2165
|
+
clear_blocked_recovery_issue_state "$issue_id"
|
|
2166
|
+
record_memory "failed to launch blocked-recovery issue worker for #${issue_id}"
|
|
2167
|
+
print_block "LAUNCH_FAILED_BLOCKED_RECOVERY_ISSUE=${issue_id}" "$launch_out"
|
|
2168
|
+
exit 1
|
|
2169
|
+
fi
|
|
2170
|
+
|
|
2171
|
+
record_blocked_recovery_issue_launch "$issue_id"
|
|
2172
|
+
clear_launch_in_progress
|
|
2173
|
+
record_memory "launched blocked-recovery issue worker for #${issue_id}"
|
|
2174
|
+
print_block "LAUNCHED_BLOCKED_RECOVERY_ISSUE=${issue_id}" "$(printf 'BLOCKED_RECOVERY=yes\n%s' "$launch_out")"
|
|
2175
|
+
launched_issue_count=$((launched_issue_count + 1))
|
|
2176
|
+
if (( launch_budget_remaining > 0 )); then
|
|
2177
|
+
launch_budget_remaining=$((launch_budget_remaining - 1))
|
|
2178
|
+
fi
|
|
2179
|
+
running_total_count=$((running_total_count + 1))
|
|
2180
|
+
running_issue_count=$((running_issue_count + 1))
|
|
2181
|
+
running_blocked_recovery_issue_count=$((running_blocked_recovery_issue_count + 1))
|
|
2182
|
+
if [[ "$is_heavy" == "yes" ]]; then
|
|
2183
|
+
running_heavy_issue_count=$((running_heavy_issue_count + 1))
|
|
2184
|
+
fi
|
|
2185
|
+
if [[ "$is_recurring" == "yes" ]]; then
|
|
2186
|
+
running_recurring_issue_count=$((running_recurring_issue_count + 1))
|
|
2187
|
+
record_recurring_issue_launch "$issue_id"
|
|
2188
|
+
else
|
|
2189
|
+
running_non_recurring_issue_count=$((running_non_recurring_issue_count + 1))
|
|
2190
|
+
fi
|
|
2191
|
+
done <<<"$due_blocked_recovery_issue_ids_cache"
|
|
2192
|
+
log_phase "launch-planning:blocked-recovery-loop:end"
|
|
2193
|
+
|
|
2194
|
+
log_phase "launch-planning:issue-loop:start"
|
|
2195
|
+
ensure_ordered_ready_issue_ids_cache
|
|
2196
|
+
while IFS= read -r issue_id; do
|
|
2197
|
+
[[ -n "$issue_id" ]] || continue
|
|
2198
|
+
if pending_issue_launch_active "$issue_id"; then
|
|
2199
|
+
continue
|
|
2200
|
+
fi
|
|
2201
|
+
if (( launch_budget_remaining <= 0 )); then
|
|
2202
|
+
break
|
|
2203
|
+
fi
|
|
2204
|
+
if (( running_total_count >= issue_capacity_limit )); then
|
|
2205
|
+
break
|
|
2206
|
+
fi
|
|
2207
|
+
if ! retry_ready issue "$issue_id"; then
|
|
2208
|
+
retry_deferred_issue_count=$((retry_deferred_issue_count + 1))
|
|
2209
|
+
continue
|
|
2210
|
+
fi
|
|
2211
|
+
|
|
2212
|
+
if [[ "$(cached_issue_attr scheduled "$issue_id")" == "yes" ]]; then
|
|
2213
|
+
continue
|
|
2214
|
+
fi
|
|
2215
|
+
|
|
2216
|
+
is_heavy="$(cached_issue_attr heavy "$issue_id")"
|
|
2217
|
+
is_recurring="$(cached_issue_attr recurring "$issue_id")"
|
|
2218
|
+
if [[ "$is_recurring" == "yes" ]]; then
|
|
2219
|
+
if (( running_recurring_issue_count >= max_recurring_issue_workers )); then
|
|
2220
|
+
continue
|
|
2221
|
+
fi
|
|
2222
|
+
if (( max_open_agent_prs_for_recurring > 0 )) && (( open_agent_pr_count >= max_open_agent_prs_for_recurring )); then
|
|
2223
|
+
continue
|
|
2224
|
+
fi
|
|
2225
|
+
fi
|
|
2226
|
+
if [[ "$is_heavy" == "yes" ]] && (( running_heavy_issue_count >= max_concurrent_heavy_workers )); then
|
|
2227
|
+
heavy_deferred_count=$((heavy_deferred_count + 1))
|
|
2228
|
+
continue
|
|
2229
|
+
fi
|
|
2230
|
+
|
|
2231
|
+
stage_issue_launch "$issue_id" "$is_heavy"
|
|
2232
|
+
if ! launch_out="$(heartbeat_start_issue_worker "$issue_id" 2>&1)"; then
|
|
2233
|
+
heartbeat_issue_launch_failed "$issue_id" "$is_heavy" || true
|
|
2234
|
+
clear_launch_in_progress
|
|
2235
|
+
record_memory "failed to launch issue worker for #${issue_id}"
|
|
2236
|
+
print_block "LAUNCH_FAILED_ISSUE=${issue_id}" "$launch_out"
|
|
2237
|
+
exit 1
|
|
2238
|
+
fi
|
|
2239
|
+
|
|
2240
|
+
clear_launch_in_progress
|
|
2241
|
+
record_memory "launched issue worker for #${issue_id}"
|
|
2242
|
+
print_block "LAUNCHED_ISSUE=${issue_id}" "$launch_out"
|
|
2243
|
+
launched_issue_count=$((launched_issue_count + 1))
|
|
2244
|
+
if (( launch_budget_remaining > 0 )); then
|
|
2245
|
+
launch_budget_remaining=$((launch_budget_remaining - 1))
|
|
2246
|
+
fi
|
|
2247
|
+
running_total_count=$((running_total_count + 1))
|
|
2248
|
+
running_issue_count=$((running_issue_count + 1))
|
|
2249
|
+
if [[ "$is_heavy" == "yes" ]]; then
|
|
2250
|
+
running_heavy_issue_count=$((running_heavy_issue_count + 1))
|
|
2251
|
+
fi
|
|
2252
|
+
if [[ "$is_recurring" == "yes" ]]; then
|
|
2253
|
+
running_recurring_issue_count=$((running_recurring_issue_count + 1))
|
|
2254
|
+
record_recurring_issue_launch "$issue_id"
|
|
2255
|
+
else
|
|
2256
|
+
running_non_recurring_issue_count=$((running_non_recurring_issue_count + 1))
|
|
2257
|
+
fi
|
|
2258
|
+
done <<<"$ordered_ready_issue_ids_cache"
|
|
2259
|
+
log_phase "launch-planning:issue-loop:end"
|
|
2260
|
+
|
|
2261
|
+
log_phase "launch-planning:pr-fill:start"
|
|
2262
|
+
while (( launch_budget_remaining > 0 )) && (( running_pr_count < max_concurrent_pr_workers )) && (( running_total_count < max_concurrent_workers )); do
|
|
2263
|
+
pr_candidate_json="$(next_pr_candidate_json || true)"
|
|
2264
|
+
[[ -n "$pr_candidate_json" ]] || break
|
|
2265
|
+
launch_pr_candidate_json "$pr_candidate_json"
|
|
2266
|
+
done
|
|
2267
|
+
log_phase "launch-planning:pr-fill:end"
|
|
2268
|
+
log_phase "launch-planning:end"
|
|
2269
|
+
fi
|
|
2270
|
+
|
|
2271
|
+
log_phase "post-launch-summary:start"
|
|
2272
|
+
human_review_ids="$(human_review_pr_ids)"
|
|
2273
|
+
human_review_count="$(worker_count "$human_review_ids")"
|
|
2274
|
+
retry_deferred_pr_count=0
|
|
2275
|
+
ensure_open_agent_pr_ids_cache
|
|
2276
|
+
while IFS= read -r pr_number; do
|
|
2277
|
+
[[ -n "$pr_number" ]] || continue
|
|
2278
|
+
if tmux has-session -t "${pr_prefix}${pr_number}" 2>/dev/null; then
|
|
2279
|
+
continue
|
|
2280
|
+
fi
|
|
2281
|
+
risk_json="$(cached_pr_risk_json "$pr_number")"
|
|
2282
|
+
lane="$(jq -r '.agentLane' <<<"$risk_json")"
|
|
2283
|
+
if [[ "$lane" != "fix" && "$lane" != "merge-repair" && "$lane" != "ci-refresh" && "$lane" != "automerge" && "$lane" != "double-check-1" && "$lane" != "double-check-2" ]]; then
|
|
2284
|
+
continue
|
|
2285
|
+
fi
|
|
2286
|
+
if ! retry_ready pr "$pr_number"; then
|
|
2287
|
+
retry_deferred_pr_count=$((retry_deferred_pr_count + 1))
|
|
2288
|
+
fi
|
|
2289
|
+
done <<<"$open_agent_pr_ids_cache"
|
|
2290
|
+
log_phase "post-launch-summary:end"
|
|
2291
|
+
|
|
2292
|
+
if (( launched_issue_count > 0 || running_total_count > 0 || auth_wait_worker_count > 0 )); then
|
|
2293
|
+
print_block "RUNNING_COUNTS" "$(printf 'TOTAL=%s\nISSUE=%s\nPR=%s\nAUTH_WAITING_WORKERS=%s\nSCHEDULED_ISSUE=%s\nBLOCKED_RECOVERY_ISSUE=%s\nLAUNCHED_ISSUES=%s\nLAUNCHED_PRS=%s\nLAUNCH_BUDGET_REMAINING=%s\nPR_BACKLOG_ELIGIBLE=%s\nPR_RESERVED_SLOTS=%s\nISSUE_CAPACITY_LIMIT=%s\n%s=%s' "$running_total_count" "$running_issue_count" "$running_pr_count" "$auth_wait_worker_count" "$running_scheduled_issue_count" "$running_blocked_recovery_issue_count" "$launched_issue_count" "$launched_pr_count" "$launch_budget_remaining" "$pr_backlog_eligible_count" "$reserved_pr_slots" "$issue_capacity_limit" "$heavy_running_label" "$running_heavy_issue_count")"
|
|
2294
|
+
ensure_all_running_workers_cache
|
|
2295
|
+
while IFS= read -r running_session; do
|
|
2296
|
+
[[ -n "$running_session" ]] || continue
|
|
2297
|
+
status_out="$(
|
|
2298
|
+
"${shared_agent_home}/tools/bin/agent-project-worker-status" \
|
|
2299
|
+
--runs-root "$runs_root" \
|
|
2300
|
+
--session "$running_session"
|
|
2301
|
+
)"
|
|
2302
|
+
print_block "RUNNING_SESSION=${running_session}" "$status_out"
|
|
2303
|
+
done <<<"$all_running_workers_cache"
|
|
2304
|
+
if [[ "$exclusive_lock_mode" != "no" ]]; then
|
|
2305
|
+
print_block "EXCLUSIVE_QUEUE_LOCK" "$(printf 'STATE=%s\nKIND=%s\nITEM_ID=%s\nREASON=%s' "$exclusive_lock_mode" "${exclusive_lock_kind:-}" "${exclusive_lock_item:-}" "${exclusive_waiting_reason:-active}")"
|
|
2306
|
+
fi
|
|
2307
|
+
if (( human_review_count > 0 )); then
|
|
2308
|
+
print_block "PENDING_HUMAN_REVIEW=${human_review_count}" "$(printf 'High-risk PRs are green and intentionally idle pending human review.\nPRS=%s' "$(printf '%s\n' "$human_review_ids" | paste -sd, -)")"
|
|
2309
|
+
fi
|
|
2310
|
+
if (( heavy_deferred_count > 0 )); then
|
|
2311
|
+
print_block "${heavy_deferred_key}=${heavy_deferred_count}" "$heavy_deferred_message"
|
|
2312
|
+
fi
|
|
2313
|
+
if (( retry_deferred_issue_count > 0 || retry_deferred_pr_count > 0 )); then
|
|
2314
|
+
print_block "RETRY_DEFERRED" "$(printf 'ISSUES=%s\nPRS=%s' "$retry_deferred_issue_count" "$retry_deferred_pr_count")"
|
|
2315
|
+
fi
|
|
2316
|
+
if [[ "$provider_launch_suppressed" == "yes" ]]; then
|
|
2317
|
+
print_block "PROVIDER_COOLDOWN" "$(printf 'BACKEND=%s\nMODEL=%s\nNEXT_ATTEMPT_AT=%s\nLAST_REASON=%s' "${provider_cooldown_backend:-}" "${provider_cooldown_model:-}" "${provider_cooldown_until:-}" "${provider_cooldown_reason:-provider-quota-limit}")"
|
|
2318
|
+
fi
|
|
2319
|
+
exit 0
|
|
2320
|
+
fi
|
|
2321
|
+
|
|
2322
|
+
if [[ "$exclusive_lock_mode" != "no" ]]; then
|
|
2323
|
+
print_block "EXCLUSIVE_QUEUE_LOCK" "$(printf 'STATE=%s\nKIND=%s\nITEM_ID=%s\nREASON=%s' "$exclusive_lock_mode" "${exclusive_lock_kind:-}" "${exclusive_lock_item:-}" "${exclusive_waiting_reason:-active}")"
|
|
2324
|
+
exit 0
|
|
2325
|
+
fi
|
|
2326
|
+
|
|
2327
|
+
if (( human_review_count > 0 )); then
|
|
2328
|
+
print_block "PENDING_HUMAN_REVIEW=${human_review_count}" "$(printf 'High-risk PRs are green and intentionally idle pending human review.\nPRS=%s' "$(printf '%s\n' "$human_review_ids" | paste -sd, -)")"
|
|
2329
|
+
exit 0
|
|
2330
|
+
fi
|
|
2331
|
+
|
|
2332
|
+
if (( heavy_deferred_count > 0 )); then
|
|
2333
|
+
print_block "${heavy_deferred_key}=${heavy_deferred_count}" "$heavy_deferred_message"
|
|
2334
|
+
exit 0
|
|
2335
|
+
fi
|
|
2336
|
+
|
|
2337
|
+
if (( retry_deferred_issue_count > 0 || retry_deferred_pr_count > 0 )); then
|
|
2338
|
+
print_block "RETRY_DEFERRED" "$(printf 'ISSUES=%s\nPRS=%s' "$retry_deferred_issue_count" "$retry_deferred_pr_count")"
|
|
2339
|
+
exit 0
|
|
2340
|
+
fi
|
|
2341
|
+
|
|
2342
|
+
if [[ "$provider_launch_suppressed" == "yes" ]]; then
|
|
2343
|
+
print_block "PROVIDER_COOLDOWN" "$(printf 'BACKEND=%s\nMODEL=%s\nNEXT_ATTEMPT_AT=%s\nLAST_REASON=%s' "${provider_cooldown_backend:-}" "${provider_cooldown_model:-}" "${provider_cooldown_until:-}" "${provider_cooldown_reason:-provider-quota-limit}")"
|
|
2344
|
+
exit 0
|
|
2345
|
+
fi
|
|
2346
|
+
|
|
2347
|
+
echo "HEARTBEAT_OK"
|