agent-control-plane 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +589 -0
- package/SKILL.md +149 -0
- package/assets/workflow-catalog.json +57 -0
- package/bin/audit-issue-routing.sh +74 -0
- package/bin/issue-resource-class.sh +58 -0
- package/bin/label-follow-up-issues.sh +114 -0
- package/bin/pr-risk.sh +532 -0
- package/bin/sync-pr-labels.sh +112 -0
- package/hooks/heartbeat-hooks.sh +573 -0
- package/hooks/issue-reconcile-hooks.sh +217 -0
- package/hooks/pr-reconcile-hooks.sh +225 -0
- package/npm/bin/agent-control-plane.js +1984 -0
- package/npm/public-bin/agent-control-plane +3 -0
- package/package.json +61 -0
- package/tools/bin/agent-cleanup-worktree +247 -0
- package/tools/bin/agent-github-update-labels +66 -0
- package/tools/bin/agent-init-worktree +216 -0
- package/tools/bin/agent-project-archive-run +52 -0
- package/tools/bin/agent-project-capture-worker +46 -0
- package/tools/bin/agent-project-catch-up-merged-prs +137 -0
- package/tools/bin/agent-project-cleanup-session +244 -0
- package/tools/bin/agent-project-detached-launch +107 -0
- package/tools/bin/agent-project-heartbeat-loop +2347 -0
- package/tools/bin/agent-project-open-issue-worktree +89 -0
- package/tools/bin/agent-project-open-pr-worktree +80 -0
- package/tools/bin/agent-project-publish-issue-pr +349 -0
- package/tools/bin/agent-project-reconcile-issue-session +1128 -0
- package/tools/bin/agent-project-reconcile-pr-session +1005 -0
- package/tools/bin/agent-project-retry-state +147 -0
- package/tools/bin/agent-project-run-claude-session +657 -0
- package/tools/bin/agent-project-run-codex-resilient +718 -0
- package/tools/bin/agent-project-run-codex-session +316 -0
- package/tools/bin/agent-project-run-kilo-session +27 -0
- package/tools/bin/agent-project-run-openclaw-session +984 -0
- package/tools/bin/agent-project-run-opencode-session +27 -0
- package/tools/bin/agent-project-sync-anchor-repo +128 -0
- package/tools/bin/agent-project-worker-status +143 -0
- package/tools/bin/audit-agent-worktrees.sh +310 -0
- package/tools/bin/audit-issue-routing.sh +11 -0
- package/tools/bin/audit-retained-layout.sh +58 -0
- package/tools/bin/audit-retained-overlap.sh +135 -0
- package/tools/bin/audit-retained-worktrees.sh +228 -0
- package/tools/bin/branch-verification-guard.sh +351 -0
- package/tools/bin/capture-worker.sh +18 -0
- package/tools/bin/check-skill-contracts.sh +324 -0
- package/tools/bin/cleanup-worktree.sh +44 -0
- package/tools/bin/codex-quota +31 -0
- package/tools/bin/create-follow-up-issue.sh +114 -0
- package/tools/bin/dashboard-launchd-bootstrap.sh +38 -0
- package/tools/bin/flow-config-lib.sh +2127 -0
- package/tools/bin/flow-resident-worker-lib.sh +683 -0
- package/tools/bin/flow-runtime-doctor.sh +97 -0
- package/tools/bin/flow-shell-lib.sh +266 -0
- package/tools/bin/heartbeat-recovery-preflight.sh +106 -0
- package/tools/bin/heartbeat-safe-auto.sh +551 -0
- package/tools/bin/install-dashboard-launchd.sh +152 -0
- package/tools/bin/install-project-launchd.sh +219 -0
- package/tools/bin/issue-publish-scope-guard.sh +242 -0
- package/tools/bin/issue-requires-local-workspace-install.sh +31 -0
- package/tools/bin/issue-resource-class.sh +12 -0
- package/tools/bin/kick-scheduler.sh +75 -0
- package/tools/bin/label-follow-up-issues.sh +14 -0
- package/tools/bin/new-pr-worktree.sh +50 -0
- package/tools/bin/new-worktree.sh +49 -0
- package/tools/bin/pr-risk.sh +12 -0
- package/tools/bin/prepare-worktree.sh +140 -0
- package/tools/bin/profile-activate.sh +109 -0
- package/tools/bin/profile-adopt.sh +219 -0
- package/tools/bin/profile-smoke.sh +461 -0
- package/tools/bin/project-init.sh +189 -0
- package/tools/bin/project-launchd-bootstrap.sh +54 -0
- package/tools/bin/project-remove.sh +155 -0
- package/tools/bin/project-runtime-supervisor.sh +56 -0
- package/tools/bin/project-runtimectl.sh +586 -0
- package/tools/bin/provider-cooldown-state.sh +166 -0
- package/tools/bin/publish-issue-worker.sh +31 -0
- package/tools/bin/reconcile-issue-worker.sh +34 -0
- package/tools/bin/reconcile-pr-worker.sh +34 -0
- package/tools/bin/record-verification.sh +71 -0
- package/tools/bin/render-architecture-infographics.sh +110 -0
- package/tools/bin/render-dashboard-demo-media.sh +333 -0
- package/tools/bin/render-dashboard-snapshot.py +16 -0
- package/tools/bin/render-flow-config.sh +86 -0
- package/tools/bin/retry-state.sh +31 -0
- package/tools/bin/reuse-issue-worktree.sh +75 -0
- package/tools/bin/run-codex-bypass.sh +3 -0
- package/tools/bin/run-codex-safe.sh +3 -0
- package/tools/bin/run-codex-task.sh +231 -0
- package/tools/bin/scaffold-profile.sh +374 -0
- package/tools/bin/serve-dashboard.sh +5 -0
- package/tools/bin/split-retained-slice.sh +124 -0
- package/tools/bin/start-issue-worker.sh +796 -0
- package/tools/bin/start-pr-fix-worker.sh +458 -0
- package/tools/bin/start-pr-merge-repair-worker.sh +8 -0
- package/tools/bin/start-pr-review-worker.sh +227 -0
- package/tools/bin/start-resident-issue-loop.sh +908 -0
- package/tools/bin/sync-agent-repo.sh +52 -0
- package/tools/bin/sync-dependency-baseline.sh +247 -0
- package/tools/bin/sync-pr-labels.sh +12 -0
- package/tools/bin/sync-recurring-issue-checklist.sh +274 -0
- package/tools/bin/sync-shared-agent-home.sh +214 -0
- package/tools/bin/sync-vscode-workspace.sh +157 -0
- package/tools/bin/test-smoke.sh +63 -0
- package/tools/bin/uninstall-project-launchd.sh +55 -0
- package/tools/bin/update-github-labels.sh +14 -0
- package/tools/bin/worker-status.sh +19 -0
- package/tools/bin/workflow-catalog.sh +77 -0
- package/tools/dashboard/app.js +286 -0
- package/tools/dashboard/dashboard_snapshot.py +466 -0
- package/tools/dashboard/index.html +41 -0
- package/tools/dashboard/server.py +64 -0
- package/tools/dashboard/styles.css +351 -0
- package/tools/templates/issue-prompt-template.md +109 -0
- package/tools/templates/pr-fix-template.md +120 -0
- package/tools/templates/pr-merge-repair-template.md +91 -0
- package/tools/templates/pr-review-template.md +62 -0
- package/tools/templates/scheduled-issue-prompt-template.md +62 -0
- package/tools/tests/test-agent-control-plane-npm-cli.sh +279 -0
- package/tools/tests/test-agent-github-update-labels-falls-back-to-repository-id.sh +56 -0
- package/tools/tests/test-agent-project-claude-session-wrapper-clears-stale-sandbox-artifacts.sh +89 -0
- package/tools/tests/test-agent-project-claude-session-wrapper-does-not-retry-provider-quota.sh +82 -0
- package/tools/tests/test-agent-project-claude-session-wrapper-retries-transient-failures.sh +90 -0
- package/tools/tests/test-agent-project-claude-session-wrapper-times-out.sh +73 -0
- package/tools/tests/test-agent-project-claude-session-wrapper.sh +103 -0
- package/tools/tests/test-agent-project-cleanup-session-orphan-fallback.sh +90 -0
- package/tools/tests/test-agent-project-cleanup-session-skip-worktree-cleanup.sh +90 -0
- package/tools/tests/test-agent-project-codex-live-thread-persist.sh +76 -0
- package/tools/tests/test-agent-project-codex-recovery.sh +731 -0
- package/tools/tests/test-agent-project-codex-session-wrapper-clears-stale-sandbox-artifacts.sh +105 -0
- package/tools/tests/test-agent-project-codex-session-wrapper.sh +97 -0
- package/tools/tests/test-agent-project-open-pr-worktree-config-prefix.sh +81 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-clears-stale-sandbox-artifacts.sh +109 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-infers-blocked-result-contract.sh +89 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-recovers-literal-env-artifacts.sh +113 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-recovers-version-mismatch.sh +135 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-resident.sh +179 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-reuses-existing-agent-after-add-race.sh +119 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper-terminates-rate-limit-hang.sh +91 -0
- package/tools/tests/test-agent-project-openclaw-session-wrapper.sh +117 -0
- package/tools/tests/test-agent-project-publish-issue-pr-prunes-stale-worktree-entry.sh +148 -0
- package/tools/tests/test-agent-project-publish-issue-pr-reads-archived-session.sh +146 -0
- package/tools/tests/test-agent-project-publish-issue-pr-recovers-final-head.sh +145 -0
- package/tools/tests/test-agent-project-publish-issue-pr-reuses-existing-worktree.sh +147 -0
- package/tools/tests/test-agent-project-reconcile-failure-reason.sh +456 -0
- package/tools/tests/test-agent-project-reconcile-issue-archived-session-fallback.sh +96 -0
- package/tools/tests/test-agent-project-reconcile-issue-before-blocked.sh +90 -0
- package/tools/tests/test-agent-project-reconcile-issue-host-verification-recovery-uses-recovered-worktree.sh +212 -0
- package/tools/tests/test-agent-project-reconcile-issue-host-verification-recovery.sh +207 -0
- package/tools/tests/test-agent-project-reconcile-issue-provider-quota-schedules-provider-cooldown.sh +101 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-backfills-lane-metadata-from-worker-key.sh +113 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-clears-stale-failed-summary.sh +117 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-initializes-shared-agent-home.sh +55 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-normalizes-runner-state.sh +125 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-records-invalid-contract-summary.sh +118 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-skips-duplicate-blocked-comment.sh +144 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-standardizes-no-commits-blocker.sh +145 -0
- package/tools/tests/test-agent-project-reconcile-issue-session-synthesizes-blocked-comment.sh +139 -0
- package/tools/tests/test-agent-project-reconcile-pr-blocked-host-recovery.sh +242 -0
- package/tools/tests/test-agent-project-reconcile-pr-guard-blocked-no-commit.sh +142 -0
- package/tools/tests/test-agent-project-reconcile-pr-provider-quota-schedules-provider-cooldown.sh +106 -0
- package/tools/tests/test-agent-project-reconcile-pr-session-initializes-shared-agent-home.sh +66 -0
- package/tools/tests/test-agent-project-reconcile-pr-updated-branch-noop.sh +129 -0
- package/tools/tests/test-audit-agent-worktrees-active-launch-skips-git-inspection.sh +69 -0
- package/tools/tests/test-audit-agent-worktrees-broken-worktree.sh +43 -0
- package/tools/tests/test-audit-agent-worktrees-pending-launch-owner.sh +46 -0
- package/tools/tests/test-audit-agent-worktrees-unreconciled-owner.sh +79 -0
- package/tools/tests/test-audit-issue-routing-managed-branch-globs.sh +56 -0
- package/tools/tests/test-branch-verification-guard-generated-artifacts.sh +72 -0
- package/tools/tests/test-branch-verification-guard-targeted-coverage.sh +125 -0
- package/tools/tests/test-codex-quota-manager-failure-driven-rotation.sh +178 -0
- package/tools/tests/test-codex-quota-wrapper.sh +37 -0
- package/tools/tests/test-contribution-docs.sh +18 -0
- package/tools/tests/test-control-plane-dashboard-runtime-smoke.sh +343 -0
- package/tools/tests/test-create-follow-up-issue.sh +73 -0
- package/tools/tests/test-dashboard-launchd-bootstrap.sh +55 -0
- package/tools/tests/test-flow-export-execution-env-exports-repo-id.sh +30 -0
- package/tools/tests/test-flow-export-github-cli-auth-env-prefers-git-credential.sh +48 -0
- package/tools/tests/test-flow-github-api-repo-fallback-preserves-input.sh +85 -0
- package/tools/tests/test-flow-github-api-repo-prefers-explicit-repository-id.sh +60 -0
- package/tools/tests/test-flow-github-issue-list-falls-back-to-repository-id.sh +64 -0
- package/tools/tests/test-flow-github-pr-list-falls-back-to-repository-id.sh +77 -0
- package/tools/tests/test-flow-resident-can-reuse-does-not-leak-metadata.sh +52 -0
- package/tools/tests/test-flow-resident-reap-stale-controllers.sh +63 -0
- package/tools/tests/test-flow-resolve-codex-quota-tools.sh +104 -0
- package/tools/tests/test-flow-runtime-doctor-profile-selection.sh +27 -0
- package/tools/tests/test-heartbeat-codex-pr-linked-issue-exclusion.sh +79 -0
- package/tools/tests/test-heartbeat-hooks-enqueue-resident-issue-for-idle-controller.sh +115 -0
- package/tools/tests/test-heartbeat-hooks-enqueue-resident-issue-for-live-lane-controller.sh +117 -0
- package/tools/tests/test-heartbeat-hooks-start-resident-issue-loop-claude.sh +96 -0
- package/tools/tests/test-heartbeat-hooks-start-resident-issue-loop-codex.sh +96 -0
- package/tools/tests/test-heartbeat-hooks-start-resident-issue-loop.sh +96 -0
- package/tools/tests/test-heartbeat-loop-auth-wait-does-not-consume-capacity.sh +170 -0
- package/tools/tests/test-heartbeat-loop-blocked-recovery-lane.sh +201 -0
- package/tools/tests/test-heartbeat-loop-blocked-recovery-vs-pr-reservation.sh +201 -0
- package/tools/tests/test-heartbeat-loop-idle-resident-controller-does-not-block-launches.sh +160 -0
- package/tools/tests/test-heartbeat-loop-pr-launch-dedup.sh +133 -0
- package/tools/tests/test-heartbeat-loop-provider-cooldown-suppresses-launches.sh +157 -0
- package/tools/tests/test-heartbeat-loop-reaps-stale-resident-controller.sh +181 -0
- package/tools/tests/test-heartbeat-loop-waiting-provider-resident-controller-does-not-block-launches.sh +160 -0
- package/tools/tests/test-heartbeat-ready-issues-blocked-recovery.sh +134 -0
- package/tools/tests/test-heartbeat-safe-auto-dynamic-concurrency.sh +162 -0
- package/tools/tests/test-heartbeat-safe-auto-no-tmux-sessions.sh +136 -0
- package/tools/tests/test-heartbeat-safe-auto-openclaw-skips-codex-quota.sh +139 -0
- package/tools/tests/test-heartbeat-safe-auto-quota-health-signal.sh +119 -0
- package/tools/tests/test-heartbeat-safe-auto-stale-shared-loop-pid-does-not-skip.sh +140 -0
- package/tools/tests/test-heartbeat-safe-auto-static-capacity-without-quota-cache.sh +142 -0
- package/tools/tests/test-heartbeat-safe-auto-zero-healthy-pools.sh +141 -0
- package/tools/tests/test-heartbeat-sync-issue-labels-empty-schedule.sh +65 -0
- package/tools/tests/test-heartbeat-sync-open-agent-prs-terminal-clears-running.sh +179 -0
- package/tools/tests/test-install-dashboard-launchd.sh +78 -0
- package/tools/tests/test-install-project-launchd-adds-tool-paths.sh +87 -0
- package/tools/tests/test-install-project-launchd.sh +110 -0
- package/tools/tests/test-issue-local-workspace-install-policy.sh +81 -0
- package/tools/tests/test-issue-publish-scope-guard-docs-signal.sh +70 -0
- package/tools/tests/test-issue-reconcile-hooks-success-clears-blocked.sh +36 -0
- package/tools/tests/test-kick-scheduler-requires-explicit-profile.sh +47 -0
- package/tools/tests/test-label-follow-up-issues-falls-back-to-repository-id.sh +132 -0
- package/tools/tests/test-manual-operator-entrypoints-require-explicit-profile.sh +64 -0
- package/tools/tests/test-package-funding-metadata.sh +21 -0
- package/tools/tests/test-package-public-metadata.sh +62 -0
- package/tools/tests/test-placeholder-worker-adapters.sh +38 -0
- package/tools/tests/test-pr-reconcile-hooks-refreshes-recurring-issue-checklist.sh +110 -0
- package/tools/tests/test-pr-risk-cohesive-mobile-locale-scope.sh +70 -0
- package/tools/tests/test-pr-risk-fix-label-semantics.sh +114 -0
- package/tools/tests/test-pr-risk-local-first-no-checks.sh +70 -0
- package/tools/tests/test-prepare-worktree-simple-repo-baseline.sh +67 -0
- package/tools/tests/test-profile-activate.sh +33 -0
- package/tools/tests/test-profile-adopt-allow-missing-repo.sh +68 -0
- package/tools/tests/test-profile-adopt-skip-workspace-sync-missing-file.sh +61 -0
- package/tools/tests/test-profile-adopt-syncs-anchor-and-workspace.sh +90 -0
- package/tools/tests/test-profile-smoke-collision.sh +44 -0
- package/tools/tests/test-profile-smoke-invalid-claude-config.sh +31 -0
- package/tools/tests/test-profile-smoke-invalid-provider-pool.sh +68 -0
- package/tools/tests/test-profile-smoke-repo-slug-mismatch.sh +36 -0
- package/tools/tests/test-profile-smoke.sh +45 -0
- package/tools/tests/test-project-init-force-and-skip-sync.sh +61 -0
- package/tools/tests/test-project-init-repo-slug-mismatch.sh +29 -0
- package/tools/tests/test-project-init.sh +66 -0
- package/tools/tests/test-project-launchd-bootstrap.sh +66 -0
- package/tools/tests/test-project-remove.sh +150 -0
- package/tools/tests/test-project-runtime-supervisor.sh +47 -0
- package/tools/tests/test-project-runtimectl-launchd.sh +115 -0
- package/tools/tests/test-project-runtimectl-missing-profile.sh +54 -0
- package/tools/tests/test-project-runtimectl-start-falls-back-to-bootstrap.sh +108 -0
- package/tools/tests/test-project-runtimectl-status-reports-supervisor-as-heartbeat-parent.sh +95 -0
- package/tools/tests/test-project-runtimectl-status-supervisor-running.sh +59 -0
- package/tools/tests/test-project-runtimectl-stop-cancels-pending-kick.sh +85 -0
- package/tools/tests/test-project-runtimectl-stop-clears-running-labels.sh +78 -0
- package/tools/tests/test-project-runtimectl.sh +212 -0
- package/tools/tests/test-provider-cooldown-state-prefers-runtime-worker-context.sh +39 -0
- package/tools/tests/test-provider-cooldown-state.sh +59 -0
- package/tools/tests/test-public-repo-docs.sh +159 -0
- package/tools/tests/test-reconcile-pr-worker-acp-config-routing.sh +75 -0
- package/tools/tests/test-render-dashboard-snapshot.sh +149 -0
- package/tools/tests/test-render-flow-config-demo-profile.sh +36 -0
- package/tools/tests/test-render-flow-config-provider-pool-fallback.sh +81 -0
- package/tools/tests/test-render-flow-config.sh +52 -0
- package/tools/tests/test-run-codex-task-claude-routing.sh +125 -0
- package/tools/tests/test-run-codex-task-codex-resident-routing.sh +108 -0
- package/tools/tests/test-run-codex-task-kilo-routing.sh +98 -0
- package/tools/tests/test-run-codex-task-openclaw-resident-routing.sh +117 -0
- package/tools/tests/test-run-codex-task-openclaw-routing.sh +113 -0
- package/tools/tests/test-run-codex-task-opencode-routing.sh +98 -0
- package/tools/tests/test-run-codex-task-provider-pool-fallback-routing.sh +146 -0
- package/tools/tests/test-scaffold-profile.sh +108 -0
- package/tools/tests/test-serve-dashboard.sh +93 -0
- package/tools/tests/test-start-issue-worker-blocked-context.sh +129 -0
- package/tools/tests/test-start-issue-worker-blocks-complete-recurring-checklist.sh +189 -0
- package/tools/tests/test-start-issue-worker-local-install-routing.sh +157 -0
- package/tools/tests/test-start-issue-worker-profile-template-routing.sh +149 -0
- package/tools/tests/test-start-issue-worker-recurring-resident-reuse-codex.sh +212 -0
- package/tools/tests/test-start-issue-worker-recurring-resident-reuse.sh +219 -0
- package/tools/tests/test-start-issue-worker-renders-verification-snippet.sh +155 -0
- package/tools/tests/test-start-issue-worker-resident-reuse-falls-back-to-new-worktree.sh +199 -0
- package/tools/tests/test-start-pr-fix-worker-host-blocker-context.sh +275 -0
- package/tools/tests/test-start-resident-issue-loop-adopts-next-recurring-issue.sh +185 -0
- package/tools/tests/test-start-resident-issue-loop-clears-pending-while-waiting-due.sh +152 -0
- package/tools/tests/test-start-resident-issue-loop-consumes-queued-lease.sh +186 -0
- package/tools/tests/test-start-resident-issue-loop-fails-over-provider-pool.sh +212 -0
- package/tools/tests/test-start-resident-issue-loop-immediate-cycles.sh +148 -0
- package/tools/tests/test-start-resident-issue-loop-waits-for-provider.sh +194 -0
- package/tools/tests/test-start-resident-issue-loop-waits-for-terminal-reconcile-status.sh +198 -0
- package/tools/tests/test-start-resident-issue-loop-yields-to-live-lane-controller.sh +145 -0
- package/tools/tests/test-sync-pr-labels-fix-lane-uses-repair-queued.sh +67 -0
- package/tools/tests/test-sync-recurring-issue-checklist-backfills-workflow-complete-blocker.sh +70 -0
- package/tools/tests/test-sync-recurring-issue-checklist.sh +95 -0
- package/tools/tests/test-sync-shared-agent-home-local-source-root.sh +66 -0
- package/tools/tests/test-sync-shared-agent-home-preserves-unrelated-workflow-catalog-skill.sh +47 -0
- package/tools/tests/test-test-smoke.sh +86 -0
- package/tools/tests/test-uninstall-project-launchd.sh +37 -0
- package/tools/tests/test-update-github-labels-prefers-sibling-helper.sh +49 -0
- package/tools/tests/test-workflow-catalog.sh +43 -0
- package/tools/vendor/codex-quota/LICENSE +21 -0
- package/tools/vendor/codex-quota/README.md +459 -0
- package/tools/vendor/codex-quota/codex-quota.js +261 -0
- package/tools/vendor/codex-quota/lib/claude-accounts.js +226 -0
- package/tools/vendor/codex-quota/lib/claude-oauth.js +174 -0
- package/tools/vendor/codex-quota/lib/claude-tokens.js +471 -0
- package/tools/vendor/codex-quota/lib/claude-usage.js +929 -0
- package/tools/vendor/codex-quota/lib/codex-accounts.js +205 -0
- package/tools/vendor/codex-quota/lib/codex-tokens.js +326 -0
- package/tools/vendor/codex-quota/lib/codex-usage.js +32 -0
- package/tools/vendor/codex-quota/lib/color.js +72 -0
- package/tools/vendor/codex-quota/lib/constants.js +57 -0
- package/tools/vendor/codex-quota/lib/container.js +143 -0
- package/tools/vendor/codex-quota/lib/display.js +1111 -0
- package/tools/vendor/codex-quota/lib/fs.js +63 -0
- package/tools/vendor/codex-quota/lib/handlers.js +2060 -0
- package/tools/vendor/codex-quota/lib/jwt.js +33 -0
- package/tools/vendor/codex-quota/lib/oauth.js +486 -0
- package/tools/vendor/codex-quota/lib/paths.js +34 -0
- package/tools/vendor/codex-quota/lib/prompts.js +44 -0
- package/tools/vendor/codex-quota/lib/sync.js +1438 -0
- package/tools/vendor/codex-quota/lib/token-match.js +96 -0
- package/tools/vendor/codex-quota-manager/scripts/auto-switch.sh +500 -0
- package/tools/vendor/codex-quota-manager/scripts/batch-add.sh +123 -0
|
@@ -0,0 +1,908 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
set -euo pipefail
|
|
3
|
+
|
|
4
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
5
|
+
# shellcheck source=/dev/null
|
|
6
|
+
source "${SCRIPT_DIR}/flow-config-lib.sh"
|
|
7
|
+
# shellcheck source=/dev/null
|
|
8
|
+
source "${SCRIPT_DIR}/flow-resident-worker-lib.sh"
|
|
9
|
+
|
|
10
|
+
ISSUE_ID="${1:?usage: start-resident-issue-loop.sh ISSUE_ID [safe|bypass]}"
|
|
11
|
+
MODE="${2:-safe}"
|
|
12
|
+
|
|
13
|
+
FLOW_SKILL_DIR="$(resolve_flow_skill_dir "${BASH_SOURCE[0]}")"
|
|
14
|
+
if ! flow_require_explicit_profile_selection "${FLOW_SKILL_DIR}" "start-resident-issue-loop.sh"; then
|
|
15
|
+
exit 64
|
|
16
|
+
fi
|
|
17
|
+
|
|
18
|
+
CONFIG_YAML="$(resolve_flow_config_yaml "${BASH_SOURCE[0]}")"
|
|
19
|
+
flow_export_execution_env "${CONFIG_YAML}"
|
|
20
|
+
flow_export_project_env_aliases
|
|
21
|
+
|
|
22
|
+
FLOW_TOOLS_DIR="${FLOW_SKILL_DIR}/tools/bin"
|
|
23
|
+
HOOK_FILE="${FLOW_SKILL_DIR}/hooks/heartbeat-hooks.sh"
|
|
24
|
+
REPO_SLUG="$(flow_resolve_repo_slug "${CONFIG_YAML}")"
|
|
25
|
+
ISSUE_SESSION_PREFIX="$(flow_resolve_issue_session_prefix "${CONFIG_YAML}")"
|
|
26
|
+
SESSION="${ISSUE_SESSION_PREFIX}${ISSUE_ID}"
|
|
27
|
+
STATE_ROOT="$(flow_resolve_state_root "${CONFIG_YAML}")"
|
|
28
|
+
PENDING_LAUNCH_DIR="${ACP_PENDING_LAUNCH_DIR:-${F_LOSNING_PENDING_LAUNCH_DIR:-${STATE_ROOT}/pending-launches}}"
|
|
29
|
+
SCHEDULED_STATE_DIR="${STATE_ROOT}/scheduled-issues"
|
|
30
|
+
CONTROLLER_FILE="$(flow_resident_issue_controller_file "${CONFIG_YAML}" "${ISSUE_ID}")"
|
|
31
|
+
RESIDENT_META_FILE="$(flow_resident_issue_meta_file "${CONFIG_YAML}" "${ISSUE_ID}")"
|
|
32
|
+
CODING_WORKER="${ACP_CODING_WORKER:-${F_LOSNING_CODING_WORKER:-codex}}"
|
|
33
|
+
MAX_IMMEDIATE_CYCLES="$(flow_resident_issue_controller_max_immediate_cycles "${CONFIG_YAML}")"
|
|
34
|
+
POLL_SECONDS="$(flow_resident_issue_controller_poll_seconds "${CONFIG_YAML}")"
|
|
35
|
+
IDLE_TIMEOUT_SECONDS="$(flow_resident_issue_controller_idle_timeout_seconds "${CONFIG_YAML}")"
|
|
36
|
+
CONTROLLER_LOOP_COUNT="0"
|
|
37
|
+
CONTROLLER_STATE="starting"
|
|
38
|
+
CONTROLLER_REASON=""
|
|
39
|
+
NEXT_WAKE_EPOCH=""
|
|
40
|
+
NEXT_WAKE_AT=""
|
|
41
|
+
IDLE_WAIT_STARTED_EPOCH=""
|
|
42
|
+
PROVIDER_WAITED="no"
|
|
43
|
+
ACTIVE_RESIDENT_WORKER_KEY=""
|
|
44
|
+
ACTIVE_RESIDENT_META_FILE=""
|
|
45
|
+
ACTIVE_RESIDENT_LANE_KIND=""
|
|
46
|
+
ACTIVE_RESIDENT_LANE_VALUE=""
|
|
47
|
+
ACTIVE_PROVIDER_POOL_NAME=""
|
|
48
|
+
ACTIVE_PROVIDER_BACKEND=""
|
|
49
|
+
ACTIVE_PROVIDER_MODEL=""
|
|
50
|
+
ACTIVE_PROVIDER_KEY=""
|
|
51
|
+
ACTIVE_PROVIDER_SELECTION_REASON=""
|
|
52
|
+
ACTIVE_PROVIDER_NEXT_ATTEMPT_EPOCH=""
|
|
53
|
+
ACTIVE_PROVIDER_NEXT_ATTEMPT_AT=""
|
|
54
|
+
ACTIVE_PROVIDER_LAST_REASON=""
|
|
55
|
+
LAST_RECORDED_PROVIDER_POOL_NAME=""
|
|
56
|
+
LAST_RECORDED_PROVIDER_BACKEND=""
|
|
57
|
+
LAST_RECORDED_PROVIDER_MODEL=""
|
|
58
|
+
LAST_RECORDED_PROVIDER_KEY=""
|
|
59
|
+
LAST_LAUNCHED_PROVIDER_POOL_NAME=""
|
|
60
|
+
LAST_LAUNCHED_PROVIDER_BACKEND=""
|
|
61
|
+
LAST_LAUNCHED_PROVIDER_MODEL=""
|
|
62
|
+
LAST_LAUNCHED_PROVIDER_KEY=""
|
|
63
|
+
LAST_PROVIDER_SWITCH_AT=""
|
|
64
|
+
LAST_PROVIDER_SWITCH_REASON=""
|
|
65
|
+
LAST_PROVIDER_FROM_POOL_NAME=""
|
|
66
|
+
LAST_PROVIDER_FROM_BACKEND=""
|
|
67
|
+
LAST_PROVIDER_FROM_MODEL=""
|
|
68
|
+
LAST_PROVIDER_FROM_KEY=""
|
|
69
|
+
LAST_PROVIDER_TO_POOL_NAME=""
|
|
70
|
+
LAST_PROVIDER_TO_BACKEND=""
|
|
71
|
+
LAST_PROVIDER_TO_MODEL=""
|
|
72
|
+
LAST_PROVIDER_TO_KEY=""
|
|
73
|
+
LAST_PROVIDER_FAILOVER_AT=""
|
|
74
|
+
PROVIDER_SWITCH_COUNT="0"
|
|
75
|
+
PROVIDER_FAILOVER_COUNT="0"
|
|
76
|
+
PROVIDER_WAIT_COUNT="0"
|
|
77
|
+
PROVIDER_WAIT_TOTAL_SECONDS="0"
|
|
78
|
+
PROVIDER_LAST_WAIT_SECONDS="0"
|
|
79
|
+
PROVIDER_LAST_WAIT_STARTED_AT=""
|
|
80
|
+
PROVIDER_LAST_WAIT_COMPLETED_AT=""
|
|
81
|
+
|
|
82
|
+
mkdir -p "${SCHEDULED_STATE_DIR}" "${PENDING_LAUNCH_DIR}"
|
|
83
|
+
|
|
84
|
+
if [[ -f "${HOOK_FILE}" ]]; then
|
|
85
|
+
# shellcheck source=/dev/null
|
|
86
|
+
source "${HOOK_FILE}"
|
|
87
|
+
fi
|
|
88
|
+
|
|
89
|
+
issue_json_for() {
|
|
90
|
+
local issue_id="${1:?issue id required}"
|
|
91
|
+
flow_github_issue_view_json "${REPO_SLUG}" "${issue_id}"
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
issue_json() {
|
|
95
|
+
issue_json_for "${ISSUE_ID}"
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
issue_json_is_open() {
|
|
99
|
+
local issue_payload="${1-}"
|
|
100
|
+
if [[ -z "${issue_payload}" ]]; then
|
|
101
|
+
issue_payload='{}'
|
|
102
|
+
fi
|
|
103
|
+
jq -e '(.state // "") == "OPEN"' >/dev/null <<<"${issue_payload}"
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
issue_json_is_keep_open() {
|
|
107
|
+
local issue_payload="${1-}"
|
|
108
|
+
if [[ -z "${issue_payload}" ]]; then
|
|
109
|
+
issue_payload='{}'
|
|
110
|
+
fi
|
|
111
|
+
jq -e 'any(.labels[]?; .name == "agent-keep-open")' >/dev/null <<<"${issue_payload}"
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
issue_schedule_interval_seconds_from_json() {
|
|
115
|
+
local issue_payload="${1-}"
|
|
116
|
+
if [[ -z "${issue_payload}" ]]; then
|
|
117
|
+
issue_payload='{}'
|
|
118
|
+
fi
|
|
119
|
+
ISSUE_JSON="${issue_payload}" node <<'EOF'
|
|
120
|
+
const issue = JSON.parse(process.env.ISSUE_JSON || '{}');
|
|
121
|
+
const body = String(issue.body || '');
|
|
122
|
+
const match = body.match(/^\s*(?:Agent schedule|Schedule|Cadence)\s*:\s*(?:every\s+)?(\d+)\s*([mhd])\s*$/im);
|
|
123
|
+
if (!match) {
|
|
124
|
+
process.stdout.write('0\n');
|
|
125
|
+
process.exit(0);
|
|
126
|
+
}
|
|
127
|
+
const value = Number(match[1]);
|
|
128
|
+
const unit = String(match[2] || '').toLowerCase();
|
|
129
|
+
const multiplier = { m: 60, h: 3600, d: 86400 }[unit] || 0;
|
|
130
|
+
const seconds = Number.isFinite(value) && value > 0 ? value * multiplier : 0;
|
|
131
|
+
process.stdout.write(`${seconds}\n`);
|
|
132
|
+
EOF
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
issue_json_is_scheduled() {
|
|
136
|
+
local interval_seconds=""
|
|
137
|
+
interval_seconds="$(issue_schedule_interval_seconds_from_json "${1-}")"
|
|
138
|
+
[[ "${interval_seconds}" =~ ^[1-9][0-9]*$ ]]
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
issue_has_open_agent_pr() {
|
|
142
|
+
issue_id_has_open_agent_pr "${ISSUE_ID}"
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
issue_id_has_open_agent_pr() {
|
|
146
|
+
local issue_id="${1:?issue id required}"
|
|
147
|
+
local open_ids_json=""
|
|
148
|
+
|
|
149
|
+
if ! declare -F heartbeat_open_agent_pr_issue_ids >/dev/null 2>&1; then
|
|
150
|
+
return 1
|
|
151
|
+
fi
|
|
152
|
+
|
|
153
|
+
open_ids_json="$(heartbeat_open_agent_pr_issue_ids 2>/dev/null || printf '[]\n')"
|
|
154
|
+
jq -e --arg issueId "${issue_id}" 'index($issueId) != null' >/dev/null <<<"${open_ids_json}"
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
issue_pending_file() {
|
|
158
|
+
local issue_id="${1:?issue id required}"
|
|
159
|
+
printf '%s/issue-%s.pid\n' "${PENDING_LAUNCH_DIR}" "${issue_id}"
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
controller_unregister_pending_issue() {
|
|
163
|
+
local issue_id="${1:-${ISSUE_ID:-}}"
|
|
164
|
+
[[ -n "${issue_id}" ]] || return 0
|
|
165
|
+
rm -f "$(issue_pending_file "${issue_id}")"
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
controller_register_pending_issue() {
|
|
169
|
+
[[ -n "${ISSUE_ID:-}" ]] || return 0
|
|
170
|
+
printf '%s\n' "$$" >"$(issue_pending_file "${ISSUE_ID}")"
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
issue_id_is_recurring() {
|
|
174
|
+
local issue_id="${1:?issue id required}"
|
|
175
|
+
if declare -F heartbeat_issue_is_recurring >/dev/null 2>&1; then
|
|
176
|
+
[[ "$(heartbeat_issue_is_recurring "${issue_id}" 2>/dev/null || printf 'no\n')" == "yes" ]]
|
|
177
|
+
return $?
|
|
178
|
+
fi
|
|
179
|
+
|
|
180
|
+
issue_json_is_keep_open "$(issue_json_for "${issue_id}" 2>/dev/null || printf '{}\n')"
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
issue_id_is_scheduled() {
|
|
184
|
+
local issue_id="${1:?issue id required}"
|
|
185
|
+
if declare -F heartbeat_issue_is_scheduled >/dev/null 2>&1; then
|
|
186
|
+
[[ "$(heartbeat_issue_is_scheduled "${issue_id}" 2>/dev/null || printf 'no\n')" == "yes" ]]
|
|
187
|
+
return $?
|
|
188
|
+
fi
|
|
189
|
+
|
|
190
|
+
issue_json_is_scheduled "$(issue_json_for "${issue_id}" 2>/dev/null || printf '{}\n')"
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
controller_refresh_execution_context() {
|
|
194
|
+
unset \
|
|
195
|
+
ACP_CODING_WORKER F_LOSNING_CODING_WORKER \
|
|
196
|
+
ACP_CODEX_PROFILE_SAFE F_LOSNING_CODEX_PROFILE_SAFE \
|
|
197
|
+
ACP_CODEX_PROFILE_BYPASS F_LOSNING_CODEX_PROFILE_BYPASS \
|
|
198
|
+
ACP_CLAUDE_MODEL F_LOSNING_CLAUDE_MODEL \
|
|
199
|
+
ACP_CLAUDE_PERMISSION_MODE F_LOSNING_CLAUDE_PERMISSION_MODE \
|
|
200
|
+
ACP_CLAUDE_EFFORT F_LOSNING_CLAUDE_EFFORT \
|
|
201
|
+
ACP_CLAUDE_TIMEOUT_SECONDS F_LOSNING_CLAUDE_TIMEOUT_SECONDS \
|
|
202
|
+
ACP_CLAUDE_MAX_ATTEMPTS F_LOSNING_CLAUDE_MAX_ATTEMPTS \
|
|
203
|
+
ACP_CLAUDE_RETRY_BACKOFF_SECONDS F_LOSNING_CLAUDE_RETRY_BACKOFF_SECONDS \
|
|
204
|
+
ACP_OPENCLAW_MODEL F_LOSNING_OPENCLAW_MODEL \
|
|
205
|
+
ACP_OPENCLAW_THINKING F_LOSNING_OPENCLAW_THINKING \
|
|
206
|
+
ACP_OPENCLAW_TIMEOUT_SECONDS F_LOSNING_OPENCLAW_TIMEOUT_SECONDS \
|
|
207
|
+
ACP_ACTIVE_PROVIDER_POOL_NAME F_LOSNING_ACTIVE_PROVIDER_POOL_NAME \
|
|
208
|
+
ACP_ACTIVE_PROVIDER_BACKEND F_LOSNING_ACTIVE_PROVIDER_BACKEND \
|
|
209
|
+
ACP_ACTIVE_PROVIDER_MODEL F_LOSNING_ACTIVE_PROVIDER_MODEL \
|
|
210
|
+
ACP_ACTIVE_PROVIDER_KEY F_LOSNING_ACTIVE_PROVIDER_KEY \
|
|
211
|
+
ACP_PROVIDER_POOLS_EXHAUSTED F_LOSNING_PROVIDER_POOLS_EXHAUSTED \
|
|
212
|
+
ACP_PROVIDER_POOL_SELECTION_REASON F_LOSNING_PROVIDER_POOL_SELECTION_REASON \
|
|
213
|
+
ACP_PROVIDER_POOL_NEXT_ATTEMPT_EPOCH F_LOSNING_PROVIDER_POOL_NEXT_ATTEMPT_EPOCH \
|
|
214
|
+
ACP_PROVIDER_POOL_NEXT_ATTEMPT_AT F_LOSNING_PROVIDER_POOL_NEXT_ATTEMPT_AT \
|
|
215
|
+
ACP_PROVIDER_POOL_LAST_REASON F_LOSNING_PROVIDER_POOL_LAST_REASON
|
|
216
|
+
flow_export_execution_env "${CONFIG_YAML}"
|
|
217
|
+
flow_export_project_env_aliases
|
|
218
|
+
CODING_WORKER="${ACP_CODING_WORKER:-${F_LOSNING_CODING_WORKER:-codex}}"
|
|
219
|
+
controller_capture_active_provider_context
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
controller_refresh_issue_lane_context() {
|
|
223
|
+
local is_scheduled="${1:-no}"
|
|
224
|
+
local schedule_interval_seconds="${2:-0}"
|
|
225
|
+
|
|
226
|
+
if [[ "${is_scheduled}" == "yes" ]]; then
|
|
227
|
+
ACTIVE_RESIDENT_LANE_KIND="scheduled"
|
|
228
|
+
ACTIVE_RESIDENT_LANE_VALUE="${schedule_interval_seconds}"
|
|
229
|
+
else
|
|
230
|
+
ACTIVE_RESIDENT_LANE_KIND="recurring"
|
|
231
|
+
ACTIVE_RESIDENT_LANE_VALUE="general"
|
|
232
|
+
fi
|
|
233
|
+
|
|
234
|
+
ACTIVE_RESIDENT_WORKER_KEY="$(flow_resident_issue_lane_key "${CODING_WORKER}" "${MODE}" "${ACTIVE_RESIDENT_LANE_KIND}" "${ACTIVE_RESIDENT_LANE_VALUE}")"
|
|
235
|
+
ACTIVE_RESIDENT_META_FILE="$(flow_resident_issue_lane_meta_file "${CONFIG_YAML}" "${ACTIVE_RESIDENT_WORKER_KEY}")"
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
controller_live_lane_peer() {
|
|
239
|
+
[[ -n "${ACTIVE_RESIDENT_WORKER_KEY}" ]] || return 1
|
|
240
|
+
flow_resident_live_issue_controller_for_key "${CONFIG_YAML}" "${ACTIVE_RESIDENT_WORKER_KEY}" "$$" || return 1
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
controller_yield_to_live_lane_peer() {
|
|
244
|
+
local live_controller=""
|
|
245
|
+
local controller_issue_id=""
|
|
246
|
+
local controller_state=""
|
|
247
|
+
|
|
248
|
+
live_controller="$(controller_live_lane_peer || true)"
|
|
249
|
+
[[ -n "${live_controller}" ]] || return 1
|
|
250
|
+
|
|
251
|
+
controller_issue_id="$(awk -F= '/^ISSUE_ID=/{print $2; exit}' <<<"${live_controller}")"
|
|
252
|
+
controller_state="$(awk -F= '/^CONTROLLER_STATE=/{print $2; exit}' <<<"${live_controller}")"
|
|
253
|
+
|
|
254
|
+
if [[ -n "${controller_issue_id}" && "${controller_issue_id}" != "${ISSUE_ID}" ]]; then
|
|
255
|
+
flow_resident_issue_enqueue "${CONFIG_YAML}" "${ISSUE_ID}" "resident-live-lane" >/dev/null || true
|
|
256
|
+
CONTROLLER_REASON="live-lane-controller-${controller_issue_id}-${controller_state:-running}"
|
|
257
|
+
else
|
|
258
|
+
CONTROLLER_REASON="duplicate-live-lane-controller"
|
|
259
|
+
fi
|
|
260
|
+
|
|
261
|
+
return 0
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
controller_capture_active_provider_context() {
|
|
265
|
+
ACTIVE_PROVIDER_POOL_NAME="${ACP_ACTIVE_PROVIDER_POOL_NAME:-${F_LOSNING_ACTIVE_PROVIDER_POOL_NAME:-}}"
|
|
266
|
+
ACTIVE_PROVIDER_BACKEND="${ACP_ACTIVE_PROVIDER_BACKEND:-${F_LOSNING_ACTIVE_PROVIDER_BACKEND:-${CODING_WORKER:-}}}"
|
|
267
|
+
ACTIVE_PROVIDER_MODEL="${ACP_ACTIVE_PROVIDER_MODEL:-${F_LOSNING_ACTIVE_PROVIDER_MODEL:-}}"
|
|
268
|
+
ACTIVE_PROVIDER_KEY="${ACP_ACTIVE_PROVIDER_KEY:-${F_LOSNING_ACTIVE_PROVIDER_KEY:-}}"
|
|
269
|
+
ACTIVE_PROVIDER_SELECTION_REASON="${ACP_PROVIDER_POOL_SELECTION_REASON:-${F_LOSNING_PROVIDER_POOL_SELECTION_REASON:-}}"
|
|
270
|
+
ACTIVE_PROVIDER_NEXT_ATTEMPT_EPOCH="${ACP_PROVIDER_POOL_NEXT_ATTEMPT_EPOCH:-${F_LOSNING_PROVIDER_POOL_NEXT_ATTEMPT_EPOCH:-}}"
|
|
271
|
+
ACTIVE_PROVIDER_NEXT_ATTEMPT_AT="${ACP_PROVIDER_POOL_NEXT_ATTEMPT_AT:-${F_LOSNING_PROVIDER_POOL_NEXT_ATTEMPT_AT:-}}"
|
|
272
|
+
ACTIVE_PROVIDER_LAST_REASON="${ACP_PROVIDER_POOL_LAST_REASON:-${F_LOSNING_PROVIDER_POOL_LAST_REASON:-}}"
|
|
273
|
+
|
|
274
|
+
if [[ -z "${ACTIVE_PROVIDER_MODEL}" ]]; then
|
|
275
|
+
case "${ACTIVE_PROVIDER_BACKEND}" in
|
|
276
|
+
openclaw)
|
|
277
|
+
ACTIVE_PROVIDER_MODEL="${ACP_OPENCLAW_MODEL:-${F_LOSNING_OPENCLAW_MODEL:-}}"
|
|
278
|
+
;;
|
|
279
|
+
claude)
|
|
280
|
+
ACTIVE_PROVIDER_MODEL="${ACP_CLAUDE_MODEL:-${F_LOSNING_CLAUDE_MODEL:-}}"
|
|
281
|
+
;;
|
|
282
|
+
codex)
|
|
283
|
+
ACTIVE_PROVIDER_MODEL="${ACP_CODEX_PROFILE_SAFE:-${F_LOSNING_CODEX_PROFILE_SAFE:-}}"
|
|
284
|
+
;;
|
|
285
|
+
esac
|
|
286
|
+
fi
|
|
287
|
+
|
|
288
|
+
if [[ -z "${ACTIVE_PROVIDER_KEY}" && -n "${ACTIVE_PROVIDER_BACKEND}" && -n "${ACTIVE_PROVIDER_MODEL}" ]]; then
|
|
289
|
+
ACTIVE_PROVIDER_KEY="$(flow_sanitize_provider_key "${ACTIVE_PROVIDER_BACKEND}-${ACTIVE_PROVIDER_MODEL}")"
|
|
290
|
+
fi
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
controller_set_recorded_provider_from_active() {
|
|
294
|
+
LAST_RECORDED_PROVIDER_POOL_NAME="${ACTIVE_PROVIDER_POOL_NAME}"
|
|
295
|
+
LAST_RECORDED_PROVIDER_BACKEND="${ACTIVE_PROVIDER_BACKEND}"
|
|
296
|
+
LAST_RECORDED_PROVIDER_MODEL="${ACTIVE_PROVIDER_MODEL}"
|
|
297
|
+
LAST_RECORDED_PROVIDER_KEY="${ACTIVE_PROVIDER_KEY}"
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
controller_mark_provider_launched() {
|
|
301
|
+
LAST_LAUNCHED_PROVIDER_POOL_NAME="${ACTIVE_PROVIDER_POOL_NAME}"
|
|
302
|
+
LAST_LAUNCHED_PROVIDER_BACKEND="${ACTIVE_PROVIDER_BACKEND}"
|
|
303
|
+
LAST_LAUNCHED_PROVIDER_MODEL="${ACTIVE_PROVIDER_MODEL}"
|
|
304
|
+
LAST_LAUNCHED_PROVIDER_KEY="${ACTIVE_PROVIDER_KEY}"
|
|
305
|
+
|
|
306
|
+
if [[ -z "${LAST_RECORDED_PROVIDER_KEY}" ]]; then
|
|
307
|
+
controller_set_recorded_provider_from_active
|
|
308
|
+
fi
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
controller_track_provider_selection() {
|
|
312
|
+
local reason="${1:-provider-selection}"
|
|
313
|
+
local now_at=""
|
|
314
|
+
|
|
315
|
+
[[ -n "${ACTIVE_PROVIDER_KEY}" ]] || return 0
|
|
316
|
+
|
|
317
|
+
if [[ -z "${LAST_RECORDED_PROVIDER_KEY}" ]]; then
|
|
318
|
+
controller_set_recorded_provider_from_active
|
|
319
|
+
return 0
|
|
320
|
+
fi
|
|
321
|
+
|
|
322
|
+
if [[ "${ACTIVE_PROVIDER_KEY}" == "${LAST_RECORDED_PROVIDER_KEY}" ]]; then
|
|
323
|
+
return 0
|
|
324
|
+
fi
|
|
325
|
+
|
|
326
|
+
now_at="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
|
327
|
+
PROVIDER_SWITCH_COUNT=$((PROVIDER_SWITCH_COUNT + 1))
|
|
328
|
+
LAST_PROVIDER_SWITCH_AT="${now_at}"
|
|
329
|
+
LAST_PROVIDER_SWITCH_REASON="${reason}"
|
|
330
|
+
LAST_PROVIDER_FROM_POOL_NAME="${LAST_RECORDED_PROVIDER_POOL_NAME}"
|
|
331
|
+
LAST_PROVIDER_FROM_BACKEND="${LAST_RECORDED_PROVIDER_BACKEND}"
|
|
332
|
+
LAST_PROVIDER_FROM_MODEL="${LAST_RECORDED_PROVIDER_MODEL}"
|
|
333
|
+
LAST_PROVIDER_FROM_KEY="${LAST_RECORDED_PROVIDER_KEY}"
|
|
334
|
+
LAST_PROVIDER_TO_POOL_NAME="${ACTIVE_PROVIDER_POOL_NAME}"
|
|
335
|
+
LAST_PROVIDER_TO_BACKEND="${ACTIVE_PROVIDER_BACKEND}"
|
|
336
|
+
LAST_PROVIDER_TO_MODEL="${ACTIVE_PROVIDER_MODEL}"
|
|
337
|
+
LAST_PROVIDER_TO_KEY="${ACTIVE_PROVIDER_KEY}"
|
|
338
|
+
|
|
339
|
+
if [[ "${reason}" == "provider-failover" ]]; then
|
|
340
|
+
PROVIDER_FAILOVER_COUNT=$((PROVIDER_FAILOVER_COUNT + 1))
|
|
341
|
+
LAST_PROVIDER_FAILOVER_AT="${now_at}"
|
|
342
|
+
fi
|
|
343
|
+
|
|
344
|
+
controller_set_recorded_provider_from_active
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
select_next_recurring_issue_id() {
|
|
348
|
+
local candidate_id=""
|
|
349
|
+
|
|
350
|
+
if ! declare -F heartbeat_list_ready_issue_ids >/dev/null 2>&1; then
|
|
351
|
+
return 1
|
|
352
|
+
fi
|
|
353
|
+
|
|
354
|
+
while IFS= read -r candidate_id; do
|
|
355
|
+
[[ -n "${candidate_id}" ]] || continue
|
|
356
|
+
[[ "${candidate_id}" != "${ISSUE_ID}" ]] || continue
|
|
357
|
+
if issue_id_has_open_agent_pr "${candidate_id}"; then
|
|
358
|
+
continue
|
|
359
|
+
fi
|
|
360
|
+
if ! issue_id_is_recurring "${candidate_id}"; then
|
|
361
|
+
continue
|
|
362
|
+
fi
|
|
363
|
+
if issue_id_is_scheduled "${candidate_id}"; then
|
|
364
|
+
continue
|
|
365
|
+
fi
|
|
366
|
+
printf '%s\n' "${candidate_id}"
|
|
367
|
+
return 0
|
|
368
|
+
done < <(heartbeat_list_ready_issue_ids 2>/dev/null || true)
|
|
369
|
+
|
|
370
|
+
return 1
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
controller_adopt_issue() {
|
|
374
|
+
local next_issue_id="${1:?issue id required}"
|
|
375
|
+
local previous_issue_id="${ISSUE_ID:-}"
|
|
376
|
+
local previous_controller_file="${CONTROLLER_FILE:-}"
|
|
377
|
+
|
|
378
|
+
if [[ -n "${previous_issue_id}" && "${previous_issue_id}" != "${next_issue_id}" ]]; then
|
|
379
|
+
controller_unregister_pending_issue "${previous_issue_id}"
|
|
380
|
+
if [[ -n "${previous_controller_file}" && -f "${previous_controller_file}" ]]; then
|
|
381
|
+
rm -f "${previous_controller_file}"
|
|
382
|
+
fi
|
|
383
|
+
fi
|
|
384
|
+
|
|
385
|
+
ISSUE_ID="${next_issue_id}"
|
|
386
|
+
SESSION="${ISSUE_SESSION_PREFIX}${ISSUE_ID}"
|
|
387
|
+
CONTROLLER_FILE="$(flow_resident_issue_controller_file "${CONFIG_YAML}" "${ISSUE_ID}")"
|
|
388
|
+
RESIDENT_META_FILE="$(flow_resident_issue_meta_file "${CONFIG_YAML}" "${ISSUE_ID}")"
|
|
389
|
+
CONTROLLER_LOOP_COUNT="0"
|
|
390
|
+
NEXT_WAKE_EPOCH=""
|
|
391
|
+
NEXT_WAKE_AT=""
|
|
392
|
+
IDLE_WAIT_STARTED_EPOCH=""
|
|
393
|
+
ACTIVE_RESIDENT_WORKER_KEY=""
|
|
394
|
+
ACTIVE_RESIDENT_META_FILE=""
|
|
395
|
+
ACTIVE_RESIDENT_LANE_KIND=""
|
|
396
|
+
ACTIVE_RESIDENT_LANE_VALUE=""
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
controller_mark_issue_running() {
|
|
400
|
+
local is_heavy="no"
|
|
401
|
+
|
|
402
|
+
if declare -F heartbeat_issue_is_heavy >/dev/null 2>&1; then
|
|
403
|
+
is_heavy="$(heartbeat_issue_is_heavy "${ISSUE_ID}" 2>/dev/null || printf 'no\n')"
|
|
404
|
+
fi
|
|
405
|
+
|
|
406
|
+
if declare -F heartbeat_mark_issue_running >/dev/null 2>&1; then
|
|
407
|
+
heartbeat_mark_issue_running "${ISSUE_ID}" "${is_heavy}" >/dev/null 2>&1 || true
|
|
408
|
+
fi
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
controller_rollback_issue_launch() {
|
|
412
|
+
if declare -F heartbeat_issue_launch_failed >/dev/null 2>&1; then
|
|
413
|
+
heartbeat_issue_launch_failed "${ISSUE_ID}" >/dev/null 2>&1 || true
|
|
414
|
+
fi
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
controller_adopt_next_recurring_issue() {
|
|
418
|
+
local next_issue_id=""
|
|
419
|
+
local claim_out=""
|
|
420
|
+
local claim_file=""
|
|
421
|
+
|
|
422
|
+
claim_out="$(flow_resident_issue_claim_next "${CONFIG_YAML}" "${SESSION}" "${ISSUE_ID}" || true)"
|
|
423
|
+
next_issue_id="$(awk -F= '/^ISSUE_ID=/{print $2}' <<<"${claim_out}")"
|
|
424
|
+
claim_file="$(awk -F= '/^CLAIM_FILE=/{print $2}' <<<"${claim_out}")"
|
|
425
|
+
if [[ -z "${next_issue_id}" ]]; then
|
|
426
|
+
next_issue_id="$(select_next_recurring_issue_id || true)"
|
|
427
|
+
fi
|
|
428
|
+
[[ -n "${next_issue_id}" ]] || return 1
|
|
429
|
+
|
|
430
|
+
controller_adopt_issue "${next_issue_id}"
|
|
431
|
+
flow_resident_issue_release_claim "${claim_file}"
|
|
432
|
+
CONTROLLER_REASON="adopted-next-recurring-issue"
|
|
433
|
+
controller_write_state "adopting-issue" ""
|
|
434
|
+
return 0
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
controller_wait_for_leased_issue() {
|
|
438
|
+
local idle_timeout="${IDLE_TIMEOUT_SECONDS:-0}"
|
|
439
|
+
local now_epoch=""
|
|
440
|
+
|
|
441
|
+
case "${idle_timeout}" in
|
|
442
|
+
''|*[!0-9]*) idle_timeout="0" ;;
|
|
443
|
+
esac
|
|
444
|
+
|
|
445
|
+
if [[ "${idle_timeout}" -le 0 ]]; then
|
|
446
|
+
return 1
|
|
447
|
+
fi
|
|
448
|
+
|
|
449
|
+
if [[ -z "${IDLE_WAIT_STARTED_EPOCH}" ]]; then
|
|
450
|
+
IDLE_WAIT_STARTED_EPOCH="$(date +%s)"
|
|
451
|
+
fi
|
|
452
|
+
|
|
453
|
+
while true; do
|
|
454
|
+
if controller_adopt_next_recurring_issue; then
|
|
455
|
+
return 0
|
|
456
|
+
fi
|
|
457
|
+
|
|
458
|
+
now_epoch="$(date +%s)"
|
|
459
|
+
if (( now_epoch - IDLE_WAIT_STARTED_EPOCH >= idle_timeout )); then
|
|
460
|
+
CONTROLLER_REASON="idle-timeout"
|
|
461
|
+
return 1
|
|
462
|
+
fi
|
|
463
|
+
|
|
464
|
+
controller_write_state "idle" ""
|
|
465
|
+
sleep "${POLL_SECONDS}"
|
|
466
|
+
done
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
controller_write_state() {
|
|
470
|
+
local state="${1:?state required}"
|
|
471
|
+
local reason="${2:-${CONTROLLER_REASON}}"
|
|
472
|
+
|
|
473
|
+
CONTROLLER_STATE="${state}"
|
|
474
|
+
CONTROLLER_REASON="${reason}"
|
|
475
|
+
flow_resident_write_metadata "${CONTROLLER_FILE}" \
|
|
476
|
+
"ISSUE_ID=${ISSUE_ID}" \
|
|
477
|
+
"SESSION=${SESSION}" \
|
|
478
|
+
"CONTROLLER_PID=$$" \
|
|
479
|
+
"CONTROLLER_MODE=${MODE}" \
|
|
480
|
+
"CONTROLLER_LOOP_COUNT=${CONTROLLER_LOOP_COUNT}" \
|
|
481
|
+
"CONTROLLER_STATE=${CONTROLLER_STATE}" \
|
|
482
|
+
"CONTROLLER_REASON=${CONTROLLER_REASON}" \
|
|
483
|
+
"ACTIVE_RESIDENT_WORKER_KEY=${ACTIVE_RESIDENT_WORKER_KEY}" \
|
|
484
|
+
"ACTIVE_RESIDENT_LANE_KIND=${ACTIVE_RESIDENT_LANE_KIND}" \
|
|
485
|
+
"ACTIVE_RESIDENT_LANE_VALUE=${ACTIVE_RESIDENT_LANE_VALUE}" \
|
|
486
|
+
"ACTIVE_PROVIDER_POOL_NAME=${ACTIVE_PROVIDER_POOL_NAME}" \
|
|
487
|
+
"ACTIVE_PROVIDER_BACKEND=${ACTIVE_PROVIDER_BACKEND}" \
|
|
488
|
+
"ACTIVE_PROVIDER_MODEL=${ACTIVE_PROVIDER_MODEL}" \
|
|
489
|
+
"ACTIVE_PROVIDER_KEY=${ACTIVE_PROVIDER_KEY}" \
|
|
490
|
+
"ACTIVE_PROVIDER_SELECTION_REASON=${ACTIVE_PROVIDER_SELECTION_REASON}" \
|
|
491
|
+
"ACTIVE_PROVIDER_NEXT_ATTEMPT_EPOCH=${ACTIVE_PROVIDER_NEXT_ATTEMPT_EPOCH}" \
|
|
492
|
+
"ACTIVE_PROVIDER_NEXT_ATTEMPT_AT=${ACTIVE_PROVIDER_NEXT_ATTEMPT_AT}" \
|
|
493
|
+
"ACTIVE_PROVIDER_LAST_REASON=${ACTIVE_PROVIDER_LAST_REASON}" \
|
|
494
|
+
"LAST_LAUNCHED_PROVIDER_POOL_NAME=${LAST_LAUNCHED_PROVIDER_POOL_NAME}" \
|
|
495
|
+
"LAST_LAUNCHED_PROVIDER_BACKEND=${LAST_LAUNCHED_PROVIDER_BACKEND}" \
|
|
496
|
+
"LAST_LAUNCHED_PROVIDER_MODEL=${LAST_LAUNCHED_PROVIDER_MODEL}" \
|
|
497
|
+
"LAST_LAUNCHED_PROVIDER_KEY=${LAST_LAUNCHED_PROVIDER_KEY}" \
|
|
498
|
+
"PROVIDER_SWITCH_COUNT=${PROVIDER_SWITCH_COUNT}" \
|
|
499
|
+
"PROVIDER_FAILOVER_COUNT=${PROVIDER_FAILOVER_COUNT}" \
|
|
500
|
+
"LAST_PROVIDER_SWITCH_AT=${LAST_PROVIDER_SWITCH_AT}" \
|
|
501
|
+
"LAST_PROVIDER_SWITCH_REASON=${LAST_PROVIDER_SWITCH_REASON}" \
|
|
502
|
+
"LAST_PROVIDER_FROM_POOL_NAME=${LAST_PROVIDER_FROM_POOL_NAME}" \
|
|
503
|
+
"LAST_PROVIDER_FROM_BACKEND=${LAST_PROVIDER_FROM_BACKEND}" \
|
|
504
|
+
"LAST_PROVIDER_FROM_MODEL=${LAST_PROVIDER_FROM_MODEL}" \
|
|
505
|
+
"LAST_PROVIDER_FROM_KEY=${LAST_PROVIDER_FROM_KEY}" \
|
|
506
|
+
"LAST_PROVIDER_TO_POOL_NAME=${LAST_PROVIDER_TO_POOL_NAME}" \
|
|
507
|
+
"LAST_PROVIDER_TO_BACKEND=${LAST_PROVIDER_TO_BACKEND}" \
|
|
508
|
+
"LAST_PROVIDER_TO_MODEL=${LAST_PROVIDER_TO_MODEL}" \
|
|
509
|
+
"LAST_PROVIDER_TO_KEY=${LAST_PROVIDER_TO_KEY}" \
|
|
510
|
+
"LAST_PROVIDER_FAILOVER_AT=${LAST_PROVIDER_FAILOVER_AT}" \
|
|
511
|
+
"PROVIDER_WAIT_COUNT=${PROVIDER_WAIT_COUNT}" \
|
|
512
|
+
"PROVIDER_WAIT_TOTAL_SECONDS=${PROVIDER_WAIT_TOTAL_SECONDS}" \
|
|
513
|
+
"PROVIDER_LAST_WAIT_SECONDS=${PROVIDER_LAST_WAIT_SECONDS}" \
|
|
514
|
+
"PROVIDER_LAST_WAIT_STARTED_AT=${PROVIDER_LAST_WAIT_STARTED_AT}" \
|
|
515
|
+
"PROVIDER_LAST_WAIT_COMPLETED_AT=${PROVIDER_LAST_WAIT_COMPLETED_AT}" \
|
|
516
|
+
"NEXT_WAKE_EPOCH=${NEXT_WAKE_EPOCH}" \
|
|
517
|
+
"NEXT_WAKE_AT=${NEXT_WAKE_AT}" \
|
|
518
|
+
"UPDATED_AT=$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
|
|
519
|
+
|
|
520
|
+
if [[ "${CONTROLLER_STATE}" == "stopped" ]]; then
|
|
521
|
+
controller_unregister_pending_issue "${ISSUE_ID}"
|
|
522
|
+
elif flow_resident_issue_controller_counts_as_pending "${CONTROLLER_STATE}"; then
|
|
523
|
+
controller_register_pending_issue
|
|
524
|
+
else
|
|
525
|
+
controller_unregister_pending_issue "${ISSUE_ID}"
|
|
526
|
+
fi
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
controller_last_failure_reason() {
|
|
530
|
+
local metadata_file="${ACTIVE_RESIDENT_META_FILE:-${RESIDENT_META_FILE:-}}"
|
|
531
|
+
[[ -n "${metadata_file}" && -f "${metadata_file}" ]] || return 1
|
|
532
|
+
awk -F= '/^LAST_FAILURE_REASON=/{print $2; exit}' "${metadata_file}" 2>/dev/null | tr -d '"' || true
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
controller_provider_state() {
|
|
536
|
+
local provider_state_script="${FLOW_TOOLS_DIR}/provider-cooldown-state.sh"
|
|
537
|
+
local provider_state=""
|
|
538
|
+
|
|
539
|
+
if [[ ! -x "${provider_state_script}" ]]; then
|
|
540
|
+
printf 'READY=yes\n'
|
|
541
|
+
return 0
|
|
542
|
+
fi
|
|
543
|
+
|
|
544
|
+
provider_state="$(
|
|
545
|
+
env \
|
|
546
|
+
-u ACP_CODING_WORKER -u F_LOSNING_CODING_WORKER \
|
|
547
|
+
-u ACP_CODEX_PROFILE_SAFE -u F_LOSNING_CODEX_PROFILE_SAFE \
|
|
548
|
+
-u ACP_CODEX_PROFILE_BYPASS -u F_LOSNING_CODEX_PROFILE_BYPASS \
|
|
549
|
+
-u ACP_CLAUDE_MODEL -u F_LOSNING_CLAUDE_MODEL \
|
|
550
|
+
-u ACP_CLAUDE_PERMISSION_MODE -u F_LOSNING_CLAUDE_PERMISSION_MODE \
|
|
551
|
+
-u ACP_CLAUDE_EFFORT -u F_LOSNING_CLAUDE_EFFORT \
|
|
552
|
+
-u ACP_CLAUDE_TIMEOUT_SECONDS -u F_LOSNING_CLAUDE_TIMEOUT_SECONDS \
|
|
553
|
+
-u ACP_CLAUDE_MAX_ATTEMPTS -u F_LOSNING_CLAUDE_MAX_ATTEMPTS \
|
|
554
|
+
-u ACP_CLAUDE_RETRY_BACKOFF_SECONDS -u F_LOSNING_CLAUDE_RETRY_BACKOFF_SECONDS \
|
|
555
|
+
-u ACP_OPENCLAW_MODEL -u F_LOSNING_OPENCLAW_MODEL \
|
|
556
|
+
-u ACP_OPENCLAW_THINKING -u F_LOSNING_OPENCLAW_THINKING \
|
|
557
|
+
-u ACP_OPENCLAW_TIMEOUT_SECONDS -u F_LOSNING_OPENCLAW_TIMEOUT_SECONDS \
|
|
558
|
+
-u ACP_ACTIVE_PROVIDER_POOL_NAME -u F_LOSNING_ACTIVE_PROVIDER_POOL_NAME \
|
|
559
|
+
-u ACP_ACTIVE_PROVIDER_BACKEND -u F_LOSNING_ACTIVE_PROVIDER_BACKEND \
|
|
560
|
+
-u ACP_ACTIVE_PROVIDER_MODEL -u F_LOSNING_ACTIVE_PROVIDER_MODEL \
|
|
561
|
+
-u ACP_ACTIVE_PROVIDER_KEY -u F_LOSNING_ACTIVE_PROVIDER_KEY \
|
|
562
|
+
-u ACP_PROVIDER_POOLS_EXHAUSTED -u F_LOSNING_PROVIDER_POOLS_EXHAUSTED \
|
|
563
|
+
-u ACP_PROVIDER_POOL_SELECTION_REASON -u F_LOSNING_PROVIDER_POOL_SELECTION_REASON \
|
|
564
|
+
-u ACP_PROVIDER_POOL_NEXT_ATTEMPT_EPOCH -u F_LOSNING_PROVIDER_POOL_NEXT_ATTEMPT_EPOCH \
|
|
565
|
+
-u ACP_PROVIDER_POOL_NEXT_ATTEMPT_AT -u F_LOSNING_PROVIDER_POOL_NEXT_ATTEMPT_AT \
|
|
566
|
+
-u ACP_PROVIDER_POOL_LAST_REASON -u F_LOSNING_PROVIDER_POOL_LAST_REASON \
|
|
567
|
+
"${provider_state_script}" get 2>/dev/null || true
|
|
568
|
+
)"
|
|
569
|
+
if [[ -z "${provider_state}" ]]; then
|
|
570
|
+
printf 'READY=yes\n'
|
|
571
|
+
return 0
|
|
572
|
+
fi
|
|
573
|
+
|
|
574
|
+
printf '%s\n' "${provider_state}"
|
|
575
|
+
}
|
|
576
|
+
|
|
577
|
+
controller_wait_for_provider_capacity() {
|
|
578
|
+
local provider_state=""
|
|
579
|
+
local provider_ready=""
|
|
580
|
+
local provider_next_epoch=""
|
|
581
|
+
local provider_next_at=""
|
|
582
|
+
local now_epoch=""
|
|
583
|
+
local remaining=""
|
|
584
|
+
local sleep_seconds=""
|
|
585
|
+
local wait_started_epoch=""
|
|
586
|
+
local wait_completed_epoch=""
|
|
587
|
+
|
|
588
|
+
PROVIDER_WAITED="no"
|
|
589
|
+
|
|
590
|
+
while true; do
|
|
591
|
+
provider_state="$(controller_provider_state)"
|
|
592
|
+
provider_ready="$(flow_kv_get "${provider_state}" "READY")"
|
|
593
|
+
if [[ "${provider_ready}" == "yes" ]]; then
|
|
594
|
+
if [[ -n "${wait_started_epoch}" ]]; then
|
|
595
|
+
wait_completed_epoch="$(date +%s)"
|
|
596
|
+
if (( wait_completed_epoch >= wait_started_epoch )); then
|
|
597
|
+
PROVIDER_LAST_WAIT_SECONDS=$((wait_completed_epoch - wait_started_epoch))
|
|
598
|
+
PROVIDER_WAIT_TOTAL_SECONDS=$((PROVIDER_WAIT_TOTAL_SECONDS + PROVIDER_LAST_WAIT_SECONDS))
|
|
599
|
+
PROVIDER_LAST_WAIT_COMPLETED_AT="$(date -u -r "${wait_completed_epoch}" +"%Y-%m-%dT%H:%M:%SZ")"
|
|
600
|
+
fi
|
|
601
|
+
fi
|
|
602
|
+
NEXT_WAKE_EPOCH=""
|
|
603
|
+
NEXT_WAKE_AT=""
|
|
604
|
+
return 0
|
|
605
|
+
fi
|
|
606
|
+
|
|
607
|
+
provider_next_epoch="$(flow_kv_get "${provider_state}" "NEXT_ATTEMPT_EPOCH")"
|
|
608
|
+
provider_next_at="$(flow_kv_get "${provider_state}" "NEXT_ATTEMPT_AT")"
|
|
609
|
+
if ! [[ "${provider_next_epoch}" =~ ^[0-9]+$ ]] || [[ "${provider_next_epoch}" == "0" ]]; then
|
|
610
|
+
return 1
|
|
611
|
+
fi
|
|
612
|
+
|
|
613
|
+
if [[ -z "${wait_started_epoch}" ]]; then
|
|
614
|
+
wait_started_epoch="$(date +%s)"
|
|
615
|
+
PROVIDER_WAIT_COUNT=$((PROVIDER_WAIT_COUNT + 1))
|
|
616
|
+
PROVIDER_LAST_WAIT_STARTED_AT="$(date -u -r "${wait_started_epoch}" +"%Y-%m-%dT%H:%M:%SZ")"
|
|
617
|
+
fi
|
|
618
|
+
|
|
619
|
+
PROVIDER_WAITED="yes"
|
|
620
|
+
NEXT_WAKE_EPOCH="${provider_next_epoch}"
|
|
621
|
+
NEXT_WAKE_AT="${provider_next_at}"
|
|
622
|
+
CONTROLLER_REASON="provider-cooldown"
|
|
623
|
+
controller_write_state "waiting-provider" ""
|
|
624
|
+
|
|
625
|
+
now_epoch="$(date +%s)"
|
|
626
|
+
remaining=$((provider_next_epoch - now_epoch))
|
|
627
|
+
sleep_seconds="${POLL_SECONDS}"
|
|
628
|
+
if ! [[ "${sleep_seconds}" =~ ^[1-9][0-9]*$ ]]; then
|
|
629
|
+
sleep_seconds="60"
|
|
630
|
+
fi
|
|
631
|
+
if (( remaining > 0 && remaining < sleep_seconds )); then
|
|
632
|
+
sleep_seconds="${remaining}"
|
|
633
|
+
fi
|
|
634
|
+
if (( sleep_seconds <= 0 )); then
|
|
635
|
+
sleep_seconds="1"
|
|
636
|
+
fi
|
|
637
|
+
sleep "${sleep_seconds}"
|
|
638
|
+
done
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
record_scheduled_next_due() {
|
|
642
|
+
local interval_seconds="${1:-0}"
|
|
643
|
+
local state_file now_epoch next_due_epoch
|
|
644
|
+
|
|
645
|
+
if ! [[ "${interval_seconds}" =~ ^[1-9][0-9]*$ ]]; then
|
|
646
|
+
return 0
|
|
647
|
+
fi
|
|
648
|
+
|
|
649
|
+
now_epoch="$(date +%s)"
|
|
650
|
+
next_due_epoch=$((now_epoch + interval_seconds))
|
|
651
|
+
NEXT_WAKE_EPOCH="${next_due_epoch}"
|
|
652
|
+
NEXT_WAKE_AT="$(date -u -r "${next_due_epoch}" +"%Y-%m-%dT%H:%M:%SZ")"
|
|
653
|
+
state_file="${SCHEDULED_STATE_DIR}/${ISSUE_ID}.env"
|
|
654
|
+
cat >"${state_file}" <<EOF
|
|
655
|
+
INTERVAL_SECONDS=${interval_seconds}
|
|
656
|
+
LAST_STARTED_EPOCH=${now_epoch}
|
|
657
|
+
LAST_STARTED_AT=$(date -u -r "${now_epoch}" +"%Y-%m-%dT%H:%M:%SZ")
|
|
658
|
+
NEXT_DUE_EPOCH=${next_due_epoch}
|
|
659
|
+
NEXT_DUE_AT=${NEXT_WAKE_AT}
|
|
660
|
+
UPDATED_AT=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
661
|
+
EOF
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
controller_cleanup() {
|
|
665
|
+
controller_write_state "stopped" "${CONTROLLER_REASON:-stopped}"
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
trap 'CONTROLLER_REASON="${CONTROLLER_REASON:-terminated}"; controller_cleanup' EXIT
|
|
669
|
+
trap 'CONTROLLER_REASON="interrupted"; exit 0' INT TERM
|
|
670
|
+
|
|
671
|
+
controller_refresh_execution_context
|
|
672
|
+
if ! flow_resident_issue_backend_supported "${CODING_WORKER}" || ! flow_is_truthy "$(flow_resident_issue_workers_enabled "${CONFIG_YAML}")"; then
|
|
673
|
+
exec "${FLOW_TOOLS_DIR}/start-issue-worker.sh" "${ISSUE_ID}" "${MODE}"
|
|
674
|
+
fi
|
|
675
|
+
|
|
676
|
+
wait_for_worker_cycle() {
|
|
677
|
+
local appear_attempts attempt=0 saw_session="no"
|
|
678
|
+
local reconcile_out=""
|
|
679
|
+
local reconcile_status=""
|
|
680
|
+
local reconcile_attempts=0
|
|
681
|
+
local reconcile_max_attempts=20
|
|
682
|
+
|
|
683
|
+
# Poll quickly during launch so short-lived test shims and fast workers do not
|
|
684
|
+
# get misclassified as launch-no-session before the controller ever sees them.
|
|
685
|
+
appear_attempts=100
|
|
686
|
+
while (( attempt < appear_attempts )); do
|
|
687
|
+
if tmux has-session -t "${SESSION}" 2>/dev/null; then
|
|
688
|
+
saw_session="yes"
|
|
689
|
+
break
|
|
690
|
+
fi
|
|
691
|
+
sleep 0.1
|
|
692
|
+
attempt=$((attempt + 1))
|
|
693
|
+
done
|
|
694
|
+
|
|
695
|
+
if [[ "${saw_session}" != "yes" ]]; then
|
|
696
|
+
return 1
|
|
697
|
+
fi
|
|
698
|
+
|
|
699
|
+
controller_write_state "waiting-worker" ""
|
|
700
|
+
while tmux has-session -t "${SESSION}" 2>/dev/null; do
|
|
701
|
+
sleep 2
|
|
702
|
+
done
|
|
703
|
+
|
|
704
|
+
controller_write_state "reconciling" ""
|
|
705
|
+
while (( reconcile_attempts < reconcile_max_attempts )); do
|
|
706
|
+
if ! reconcile_out="$(bash "${FLOW_TOOLS_DIR}/reconcile-issue-worker.sh" "${SESSION}" 2>&1)"; then
|
|
707
|
+
printf '%s\n' "${reconcile_out}" >&2
|
|
708
|
+
CONTROLLER_REASON="reconcile-failed"
|
|
709
|
+
return 1
|
|
710
|
+
fi
|
|
711
|
+
|
|
712
|
+
reconcile_status="$(awk -F= '/^STATUS=/{print $2; exit}' <<<"${reconcile_out}")"
|
|
713
|
+
case "${reconcile_status}" in
|
|
714
|
+
SUCCEEDED|FAILED)
|
|
715
|
+
return 0
|
|
716
|
+
;;
|
|
717
|
+
"")
|
|
718
|
+
# Older test shims may not print STATUS. The real reconcile wrapper always
|
|
719
|
+
# does, so treat blank STATUS as successful test completion.
|
|
720
|
+
return 0
|
|
721
|
+
;;
|
|
722
|
+
RUNNING)
|
|
723
|
+
controller_write_state "reconciling" "worker-still-finalizing"
|
|
724
|
+
sleep 1
|
|
725
|
+
;;
|
|
726
|
+
*)
|
|
727
|
+
printf '%s\n' "${reconcile_out}" >&2
|
|
728
|
+
CONTROLLER_REASON="reconcile-non-terminal-${reconcile_status}"
|
|
729
|
+
return 1
|
|
730
|
+
;;
|
|
731
|
+
esac
|
|
732
|
+
|
|
733
|
+
reconcile_attempts=$((reconcile_attempts + 1))
|
|
734
|
+
done
|
|
735
|
+
|
|
736
|
+
CONTROLLER_REASON="reconcile-timeout"
|
|
737
|
+
return 1
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
sleep_until_next_due() {
|
|
741
|
+
local target_epoch="${1:-0}"
|
|
742
|
+
local now_epoch remaining sleep_seconds
|
|
743
|
+
|
|
744
|
+
while true; do
|
|
745
|
+
now_epoch="$(date +%s)"
|
|
746
|
+
if ! [[ "${target_epoch}" =~ ^[0-9]+$ ]] || (( target_epoch <= now_epoch )); then
|
|
747
|
+
return 0
|
|
748
|
+
fi
|
|
749
|
+
remaining=$((target_epoch - now_epoch))
|
|
750
|
+
sleep_seconds="${POLL_SECONDS}"
|
|
751
|
+
if ! [[ "${sleep_seconds}" =~ ^[1-9][0-9]*$ ]]; then
|
|
752
|
+
sleep_seconds="60"
|
|
753
|
+
fi
|
|
754
|
+
if (( remaining < sleep_seconds )); then
|
|
755
|
+
sleep_seconds="${remaining}"
|
|
756
|
+
fi
|
|
757
|
+
controller_write_state "waiting-due" ""
|
|
758
|
+
sleep "${sleep_seconds}"
|
|
759
|
+
done
|
|
760
|
+
}
|
|
761
|
+
|
|
762
|
+
while true; do
|
|
763
|
+
issue_payload="$(issue_json 2>/dev/null || printf '{}\n')"
|
|
764
|
+
if ! issue_json_is_open "${issue_payload}"; then
|
|
765
|
+
if controller_adopt_next_recurring_issue; then
|
|
766
|
+
continue
|
|
767
|
+
fi
|
|
768
|
+
CONTROLLER_REASON="issue-closed"
|
|
769
|
+
if controller_wait_for_leased_issue; then
|
|
770
|
+
continue
|
|
771
|
+
fi
|
|
772
|
+
break
|
|
773
|
+
fi
|
|
774
|
+
|
|
775
|
+
is_keep_open="no"
|
|
776
|
+
if issue_json_is_keep_open "${issue_payload}"; then
|
|
777
|
+
is_keep_open="yes"
|
|
778
|
+
fi
|
|
779
|
+
|
|
780
|
+
schedule_interval_seconds="$(issue_schedule_interval_seconds_from_json "${issue_payload}")"
|
|
781
|
+
is_scheduled="no"
|
|
782
|
+
if [[ "${schedule_interval_seconds}" =~ ^[1-9][0-9]*$ ]]; then
|
|
783
|
+
is_scheduled="yes"
|
|
784
|
+
fi
|
|
785
|
+
controller_refresh_execution_context
|
|
786
|
+
controller_refresh_issue_lane_context "${is_scheduled}" "${schedule_interval_seconds}"
|
|
787
|
+
controller_track_provider_selection "provider-selection"
|
|
788
|
+
|
|
789
|
+
if controller_yield_to_live_lane_peer; then
|
|
790
|
+
break
|
|
791
|
+
fi
|
|
792
|
+
|
|
793
|
+
if [[ "${is_keep_open}" != "yes" && "${is_scheduled}" != "yes" ]]; then
|
|
794
|
+
if controller_adopt_next_recurring_issue; then
|
|
795
|
+
continue
|
|
796
|
+
fi
|
|
797
|
+
CONTROLLER_REASON="resident-ineligible"
|
|
798
|
+
if controller_wait_for_leased_issue; then
|
|
799
|
+
continue
|
|
800
|
+
fi
|
|
801
|
+
break
|
|
802
|
+
fi
|
|
803
|
+
|
|
804
|
+
if issue_has_open_agent_pr; then
|
|
805
|
+
if controller_adopt_next_recurring_issue; then
|
|
806
|
+
continue
|
|
807
|
+
fi
|
|
808
|
+
CONTROLLER_REASON="open-agent-pr"
|
|
809
|
+
if controller_wait_for_leased_issue; then
|
|
810
|
+
continue
|
|
811
|
+
fi
|
|
812
|
+
controller_write_state "waiting-open-pr" ""
|
|
813
|
+
break
|
|
814
|
+
fi
|
|
815
|
+
|
|
816
|
+
if [[ "${is_scheduled}" == "yes" && -n "${NEXT_WAKE_EPOCH}" ]]; then
|
|
817
|
+
sleep_until_next_due "${NEXT_WAKE_EPOCH}"
|
|
818
|
+
fi
|
|
819
|
+
|
|
820
|
+
NEXT_WAKE_EPOCH=""
|
|
821
|
+
NEXT_WAKE_AT=""
|
|
822
|
+
if ! controller_wait_for_provider_capacity; then
|
|
823
|
+
CONTROLLER_REASON="provider-unavailable"
|
|
824
|
+
break
|
|
825
|
+
fi
|
|
826
|
+
if [[ "${PROVIDER_WAITED}" == "yes" ]]; then
|
|
827
|
+
CONTROLLER_REASON="provider-ready"
|
|
828
|
+
continue
|
|
829
|
+
fi
|
|
830
|
+
controller_write_state "launching" ""
|
|
831
|
+
controller_mark_issue_running
|
|
832
|
+
if ! bash "${FLOW_TOOLS_DIR}/start-issue-worker.sh" "${ISSUE_ID}" "${MODE}" >/dev/null; then
|
|
833
|
+
controller_rollback_issue_launch
|
|
834
|
+
CONTROLLER_REASON="launch-failed"
|
|
835
|
+
break
|
|
836
|
+
fi
|
|
837
|
+
controller_mark_provider_launched
|
|
838
|
+
|
|
839
|
+
if ! wait_for_worker_cycle; then
|
|
840
|
+
CONTROLLER_REASON="launch-no-session"
|
|
841
|
+
break
|
|
842
|
+
fi
|
|
843
|
+
|
|
844
|
+
CONTROLLER_LOOP_COUNT=$((CONTROLLER_LOOP_COUNT + 1))
|
|
845
|
+
|
|
846
|
+
if [[ "$(controller_last_failure_reason || true)" == "provider-quota-limit" ]]; then
|
|
847
|
+
controller_refresh_execution_context
|
|
848
|
+
controller_refresh_issue_lane_context "${is_scheduled}" "${schedule_interval_seconds}"
|
|
849
|
+
controller_track_provider_selection "provider-failover"
|
|
850
|
+
if ! controller_wait_for_provider_capacity; then
|
|
851
|
+
CONTROLLER_REASON="provider-unavailable"
|
|
852
|
+
break
|
|
853
|
+
fi
|
|
854
|
+
CONTROLLER_REASON="provider-failover"
|
|
855
|
+
continue
|
|
856
|
+
fi
|
|
857
|
+
|
|
858
|
+
issue_payload="$(issue_json 2>/dev/null || printf '{}\n')"
|
|
859
|
+
if ! issue_json_is_open "${issue_payload}"; then
|
|
860
|
+
if controller_adopt_next_recurring_issue; then
|
|
861
|
+
continue
|
|
862
|
+
fi
|
|
863
|
+
CONTROLLER_REASON="issue-closed"
|
|
864
|
+
if controller_wait_for_leased_issue; then
|
|
865
|
+
continue
|
|
866
|
+
fi
|
|
867
|
+
break
|
|
868
|
+
fi
|
|
869
|
+
if jq -e 'any(.labels[]?; .name == "agent-blocked")' >/dev/null <<<"${issue_payload}"; then
|
|
870
|
+
if controller_adopt_next_recurring_issue; then
|
|
871
|
+
continue
|
|
872
|
+
fi
|
|
873
|
+
CONTROLLER_REASON="issue-blocked"
|
|
874
|
+
if controller_wait_for_leased_issue; then
|
|
875
|
+
continue
|
|
876
|
+
fi
|
|
877
|
+
break
|
|
878
|
+
fi
|
|
879
|
+
if issue_has_open_agent_pr; then
|
|
880
|
+
if controller_adopt_next_recurring_issue; then
|
|
881
|
+
continue
|
|
882
|
+
fi
|
|
883
|
+
CONTROLLER_REASON="open-agent-pr"
|
|
884
|
+
if controller_wait_for_leased_issue; then
|
|
885
|
+
continue
|
|
886
|
+
fi
|
|
887
|
+
break
|
|
888
|
+
fi
|
|
889
|
+
|
|
890
|
+
if [[ "${is_scheduled}" == "yes" ]]; then
|
|
891
|
+
record_scheduled_next_due "${schedule_interval_seconds}"
|
|
892
|
+
controller_write_state "sleeping" ""
|
|
893
|
+
continue
|
|
894
|
+
fi
|
|
895
|
+
|
|
896
|
+
if [[ "${MAX_IMMEDIATE_CYCLES}" =~ ^[1-9][0-9]*$ ]] && (( CONTROLLER_LOOP_COUNT >= MAX_IMMEDIATE_CYCLES )); then
|
|
897
|
+
if controller_adopt_next_recurring_issue; then
|
|
898
|
+
continue
|
|
899
|
+
fi
|
|
900
|
+
CONTROLLER_REASON="max-immediate-cycles"
|
|
901
|
+
if controller_wait_for_leased_issue; then
|
|
902
|
+
continue
|
|
903
|
+
fi
|
|
904
|
+
break
|
|
905
|
+
fi
|
|
906
|
+
|
|
907
|
+
controller_write_state "idle" ""
|
|
908
|
+
done
|