shipwright-cli 3.1.0 → 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/code-reviewer.md +2 -0
- package/.claude/agents/devops-engineer.md +2 -0
- package/.claude/agents/doc-fleet-agent.md +2 -0
- package/.claude/agents/pipeline-agent.md +2 -0
- package/.claude/agents/shell-script-specialist.md +2 -0
- package/.claude/agents/test-specialist.md +2 -0
- package/.claude/hooks/agent-crash-capture.sh +32 -0
- package/.claude/hooks/post-tool-use.sh +3 -2
- package/.claude/hooks/pre-tool-use.sh +35 -3
- package/README.md +22 -8
- package/claude-code/hooks/config-change.sh +18 -0
- package/claude-code/hooks/instructions-reloaded.sh +7 -0
- package/claude-code/hooks/worktree-create.sh +25 -0
- package/claude-code/hooks/worktree-remove.sh +20 -0
- package/config/code-constitution.json +130 -0
- package/config/defaults.json +25 -2
- package/config/policy.json +1 -1
- package/dashboard/middleware/auth.ts +134 -0
- package/dashboard/middleware/constants.ts +21 -0
- package/dashboard/public/index.html +8 -6
- package/dashboard/public/styles.css +176 -97
- package/dashboard/routes/auth.ts +38 -0
- package/dashboard/server.ts +117 -25
- package/dashboard/services/config.ts +26 -0
- package/dashboard/services/db.ts +118 -0
- package/dashboard/src/canvas/pixel-agent.ts +298 -0
- package/dashboard/src/canvas/pixel-sprites.ts +440 -0
- package/dashboard/src/canvas/shipyard-effects.ts +367 -0
- package/dashboard/src/canvas/shipyard-scene.ts +616 -0
- package/dashboard/src/canvas/submarine-layout.ts +267 -0
- package/dashboard/src/components/header.ts +8 -7
- package/dashboard/src/core/api.ts +5 -0
- package/dashboard/src/core/router.ts +1 -0
- package/dashboard/src/design/submarine-theme.ts +253 -0
- package/dashboard/src/main.ts +2 -0
- package/dashboard/src/types/api.ts +12 -1
- package/dashboard/src/views/activity.ts +2 -1
- package/dashboard/src/views/metrics.ts +69 -1
- package/dashboard/src/views/shipyard.ts +39 -0
- package/dashboard/types/index.ts +166 -0
- package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
- package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
- package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
- package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
- package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
- package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
- package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
- package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
- package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
- package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
- package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
- package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
- package/docs/research/RESEARCH_INDEX.md +439 -0
- package/docs/research/RESEARCH_SOURCES.md +440 -0
- package/docs/research/RESEARCH_SUMMARY.txt +275 -0
- package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
- package/package.json +2 -2
- package/scripts/lib/adaptive-model.sh +427 -0
- package/scripts/lib/adaptive-timeout.sh +316 -0
- package/scripts/lib/audit-trail.sh +309 -0
- package/scripts/lib/auto-recovery.sh +471 -0
- package/scripts/lib/bandit-selector.sh +431 -0
- package/scripts/lib/bootstrap.sh +104 -2
- package/scripts/lib/causal-graph.sh +455 -0
- package/scripts/lib/compat.sh +126 -0
- package/scripts/lib/compound-audit.sh +337 -0
- package/scripts/lib/constitutional.sh +454 -0
- package/scripts/lib/context-budget.sh +359 -0
- package/scripts/lib/convergence.sh +594 -0
- package/scripts/lib/cost-optimizer.sh +634 -0
- package/scripts/lib/daemon-adaptive.sh +14 -2
- package/scripts/lib/daemon-dispatch.sh +106 -17
- package/scripts/lib/daemon-failure.sh +34 -4
- package/scripts/lib/daemon-patrol.sh +25 -4
- package/scripts/lib/daemon-poll-github.sh +361 -0
- package/scripts/lib/daemon-poll-health.sh +299 -0
- package/scripts/lib/daemon-poll.sh +27 -611
- package/scripts/lib/daemon-state.sh +119 -66
- package/scripts/lib/daemon-triage.sh +10 -0
- package/scripts/lib/dod-scorecard.sh +442 -0
- package/scripts/lib/error-actionability.sh +300 -0
- package/scripts/lib/formal-spec.sh +461 -0
- package/scripts/lib/helpers.sh +180 -5
- package/scripts/lib/intent-analysis.sh +409 -0
- package/scripts/lib/loop-convergence.sh +350 -0
- package/scripts/lib/loop-iteration.sh +682 -0
- package/scripts/lib/loop-progress.sh +48 -0
- package/scripts/lib/loop-restart.sh +185 -0
- package/scripts/lib/memory-effectiveness.sh +506 -0
- package/scripts/lib/mutation-executor.sh +352 -0
- package/scripts/lib/outcome-feedback.sh +521 -0
- package/scripts/lib/pipeline-cli.sh +336 -0
- package/scripts/lib/pipeline-commands.sh +1216 -0
- package/scripts/lib/pipeline-detection.sh +101 -3
- package/scripts/lib/pipeline-execution.sh +897 -0
- package/scripts/lib/pipeline-github.sh +28 -3
- package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
- package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
- package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
- package/scripts/lib/pipeline-intelligence.sh +104 -1138
- package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
- package/scripts/lib/pipeline-quality-checks.sh +17 -711
- package/scripts/lib/pipeline-quality-gates.sh +563 -0
- package/scripts/lib/pipeline-stages-build.sh +730 -0
- package/scripts/lib/pipeline-stages-delivery.sh +965 -0
- package/scripts/lib/pipeline-stages-intake.sh +1133 -0
- package/scripts/lib/pipeline-stages-monitor.sh +407 -0
- package/scripts/lib/pipeline-stages-review.sh +1022 -0
- package/scripts/lib/pipeline-stages.sh +161 -2901
- package/scripts/lib/pipeline-state.sh +36 -5
- package/scripts/lib/pipeline-util.sh +487 -0
- package/scripts/lib/policy-learner.sh +438 -0
- package/scripts/lib/process-reward.sh +493 -0
- package/scripts/lib/project-detect.sh +649 -0
- package/scripts/lib/quality-profile.sh +334 -0
- package/scripts/lib/recruit-commands.sh +885 -0
- package/scripts/lib/recruit-learning.sh +739 -0
- package/scripts/lib/recruit-roles.sh +648 -0
- package/scripts/lib/reward-aggregator.sh +458 -0
- package/scripts/lib/rl-optimizer.sh +362 -0
- package/scripts/lib/root-cause.sh +427 -0
- package/scripts/lib/scope-enforcement.sh +445 -0
- package/scripts/lib/session-restart.sh +493 -0
- package/scripts/lib/skill-memory.sh +300 -0
- package/scripts/lib/skill-registry.sh +775 -0
- package/scripts/lib/spec-driven.sh +476 -0
- package/scripts/lib/test-helpers.sh +18 -7
- package/scripts/lib/test-holdout.sh +429 -0
- package/scripts/lib/test-optimizer.sh +511 -0
- package/scripts/shipwright-file-suggest.sh +45 -0
- package/scripts/skills/adversarial-quality.md +61 -0
- package/scripts/skills/api-design.md +44 -0
- package/scripts/skills/architecture-design.md +50 -0
- package/scripts/skills/brainstorming.md +43 -0
- package/scripts/skills/data-pipeline.md +44 -0
- package/scripts/skills/deploy-safety.md +64 -0
- package/scripts/skills/documentation.md +38 -0
- package/scripts/skills/frontend-design.md +45 -0
- package/scripts/skills/generated/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
- package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
- package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
- package/scripts/skills/generated/cli-version-management.md +29 -0
- package/scripts/skills/generated/collection-system-validation.md +99 -0
- package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
- package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
- package/scripts/skills/generated/test-parallelization-detection.md +65 -0
- package/scripts/skills/observability.md +79 -0
- package/scripts/skills/performance.md +48 -0
- package/scripts/skills/pr-quality.md +49 -0
- package/scripts/skills/product-thinking.md +43 -0
- package/scripts/skills/security-audit.md +49 -0
- package/scripts/skills/systematic-debugging.md +40 -0
- package/scripts/skills/testing-strategy.md +47 -0
- package/scripts/skills/two-stage-review.md +52 -0
- package/scripts/skills/validation-thoroughness.md +55 -0
- package/scripts/sw +9 -3
- package/scripts/sw-activity.sh +9 -8
- package/scripts/sw-adaptive.sh +8 -7
- package/scripts/sw-adversarial.sh +2 -1
- package/scripts/sw-architecture-enforcer.sh +3 -1
- package/scripts/sw-auth.sh +12 -2
- package/scripts/sw-autonomous.sh +5 -1
- package/scripts/sw-changelog.sh +4 -1
- package/scripts/sw-checkpoint.sh +2 -1
- package/scripts/sw-ci.sh +15 -6
- package/scripts/sw-cleanup.sh +4 -26
- package/scripts/sw-code-review.sh +45 -20
- package/scripts/sw-connect.sh +2 -1
- package/scripts/sw-context.sh +2 -1
- package/scripts/sw-cost.sh +107 -5
- package/scripts/sw-daemon.sh +71 -11
- package/scripts/sw-dashboard.sh +3 -1
- package/scripts/sw-db.sh +71 -20
- package/scripts/sw-decide.sh +8 -2
- package/scripts/sw-decompose.sh +360 -17
- package/scripts/sw-deps.sh +4 -1
- package/scripts/sw-developer-simulation.sh +4 -1
- package/scripts/sw-discovery.sh +378 -5
- package/scripts/sw-doc-fleet.sh +4 -1
- package/scripts/sw-docs-agent.sh +3 -1
- package/scripts/sw-docs.sh +2 -1
- package/scripts/sw-doctor.sh +453 -2
- package/scripts/sw-dora.sh +4 -1
- package/scripts/sw-durable.sh +12 -7
- package/scripts/sw-e2e-orchestrator.sh +17 -16
- package/scripts/sw-eventbus.sh +13 -4
- package/scripts/sw-evidence.sh +364 -12
- package/scripts/sw-feedback.sh +550 -9
- package/scripts/sw-fix.sh +20 -1
- package/scripts/sw-fleet-discover.sh +6 -2
- package/scripts/sw-fleet-viz.sh +9 -4
- package/scripts/sw-fleet.sh +5 -1
- package/scripts/sw-github-app.sh +18 -4
- package/scripts/sw-github-checks.sh +3 -2
- package/scripts/sw-github-deploy.sh +3 -2
- package/scripts/sw-github-graphql.sh +18 -7
- package/scripts/sw-guild.sh +5 -1
- package/scripts/sw-heartbeat.sh +5 -30
- package/scripts/sw-hello.sh +67 -0
- package/scripts/sw-hygiene.sh +10 -3
- package/scripts/sw-incident.sh +273 -5
- package/scripts/sw-init.sh +18 -2
- package/scripts/sw-instrument.sh +10 -2
- package/scripts/sw-intelligence.sh +44 -7
- package/scripts/sw-jira.sh +5 -1
- package/scripts/sw-launchd.sh +2 -1
- package/scripts/sw-linear.sh +4 -1
- package/scripts/sw-logs.sh +4 -1
- package/scripts/sw-loop.sh +436 -1076
- package/scripts/sw-memory.sh +357 -3
- package/scripts/sw-mission-control.sh +6 -1
- package/scripts/sw-model-router.sh +483 -27
- package/scripts/sw-otel.sh +15 -4
- package/scripts/sw-oversight.sh +14 -5
- package/scripts/sw-patrol-meta.sh +334 -0
- package/scripts/sw-pipeline-composer.sh +7 -1
- package/scripts/sw-pipeline-vitals.sh +12 -6
- package/scripts/sw-pipeline.sh +54 -2653
- package/scripts/sw-pm.sh +16 -8
- package/scripts/sw-pr-lifecycle.sh +2 -1
- package/scripts/sw-predictive.sh +17 -5
- package/scripts/sw-prep.sh +185 -2
- package/scripts/sw-ps.sh +5 -25
- package/scripts/sw-public-dashboard.sh +17 -4
- package/scripts/sw-quality.sh +14 -6
- package/scripts/sw-reaper.sh +8 -25
- package/scripts/sw-recruit.sh +156 -2303
- package/scripts/sw-regression.sh +19 -12
- package/scripts/sw-release-manager.sh +3 -1
- package/scripts/sw-release.sh +4 -1
- package/scripts/sw-remote.sh +3 -1
- package/scripts/sw-replay.sh +7 -1
- package/scripts/sw-retro.sh +158 -1
- package/scripts/sw-review-rerun.sh +3 -1
- package/scripts/sw-scale.sh +14 -5
- package/scripts/sw-security-audit.sh +6 -1
- package/scripts/sw-self-optimize.sh +173 -6
- package/scripts/sw-session.sh +9 -3
- package/scripts/sw-setup.sh +3 -1
- package/scripts/sw-stall-detector.sh +406 -0
- package/scripts/sw-standup.sh +15 -7
- package/scripts/sw-status.sh +3 -1
- package/scripts/sw-strategic.sh +14 -6
- package/scripts/sw-stream.sh +13 -4
- package/scripts/sw-swarm.sh +20 -7
- package/scripts/sw-team-stages.sh +13 -6
- package/scripts/sw-templates.sh +7 -31
- package/scripts/sw-testgen.sh +17 -6
- package/scripts/sw-tmux-pipeline.sh +4 -1
- package/scripts/sw-tmux-role-color.sh +2 -0
- package/scripts/sw-tmux-status.sh +1 -1
- package/scripts/sw-tmux.sh +37 -1
- package/scripts/sw-trace.sh +3 -1
- package/scripts/sw-tracker-github.sh +3 -0
- package/scripts/sw-tracker-jira.sh +3 -0
- package/scripts/sw-tracker-linear.sh +3 -0
- package/scripts/sw-tracker.sh +3 -1
- package/scripts/sw-triage.sh +3 -2
- package/scripts/sw-upgrade.sh +3 -1
- package/scripts/sw-ux.sh +5 -2
- package/scripts/sw-webhook.sh +5 -2
- package/scripts/sw-widgets.sh +9 -4
- package/scripts/sw-worktree.sh +15 -3
- package/scripts/test-skill-injection.sh +1233 -0
- package/templates/pipelines/autonomous.json +27 -3
- package/templates/pipelines/cost-aware.json +34 -8
- package/templates/pipelines/deployed.json +12 -0
- package/templates/pipelines/enterprise.json +12 -0
- package/templates/pipelines/fast.json +6 -0
- package/templates/pipelines/full.json +27 -3
- package/templates/pipelines/hotfix.json +6 -0
- package/templates/pipelines/standard.json +12 -0
- package/templates/pipelines/tdd.json +12 -0
|
@@ -3,6 +3,19 @@
|
|
|
3
3
|
[[ -n "${_DAEMON_DISPATCH_LOADED:-}" ]] && return 0
|
|
4
4
|
_DAEMON_DISPATCH_LOADED=1
|
|
5
5
|
|
|
6
|
+
# Defaults for variables normally set by sw-daemon.sh (safe under set -u).
|
|
7
|
+
DAEMON_DIR="${DAEMON_DIR:-${HOME}/.shipwright}"
|
|
8
|
+
SCRIPT_DIR="${SCRIPT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}"
|
|
9
|
+
REPO_DIR="${REPO_DIR:-$(cd "$SCRIPT_DIR/.." && pwd)}"
|
|
10
|
+
STATE_FILE="${STATE_FILE:-${DAEMON_DIR}/daemon-state.json}"
|
|
11
|
+
LOG_DIR="${LOG_DIR:-${DAEMON_DIR}/logs}"
|
|
12
|
+
WORKTREE_DIR="${WORKTREE_DIR:-${REPO_DIR}/.claude/worktrees}"
|
|
13
|
+
BASE_BRANCH="${BASE_BRANCH:-main}"
|
|
14
|
+
PIPELINE_TEMPLATE="${PIPELINE_TEMPLATE:-autonomous}"
|
|
15
|
+
EFFORT_LEVEL="${EFFORT_LEVEL:-}"
|
|
16
|
+
FALLBACK_MODEL="${FALLBACK_MODEL:-sonnet}"
|
|
17
|
+
NO_GITHUB="${NO_GITHUB:-false}"
|
|
18
|
+
|
|
6
19
|
# ─── Org-Wide Repo Management ─────────────────────────────────────────────
|
|
7
20
|
|
|
8
21
|
daemon_ensure_repo() {
|
|
@@ -35,6 +48,51 @@ daemon_spawn_pipeline() {
|
|
|
35
48
|
shift 3 2>/dev/null || true
|
|
36
49
|
local extra_pipeline_args=("$@") # Optional extra args passed to sw-pipeline.sh
|
|
37
50
|
|
|
51
|
+
# ── Input validation: Validate issue number is strictly numeric ──
|
|
52
|
+
if [[ ! "$issue_num" =~ ^[0-9]+$ ]]; then
|
|
53
|
+
daemon_log ERROR "Invalid issue number format: ${issue_num}"
|
|
54
|
+
return 1
|
|
55
|
+
fi
|
|
56
|
+
|
|
57
|
+
# Ensure numeric conversion for safety
|
|
58
|
+
issue_num=$(printf '%d' "$issue_num" 2>/dev/null) || {
|
|
59
|
+
daemon_log ERROR "Issue number conversion failed: ${issue_num}"
|
|
60
|
+
return 1
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
# ── Worktree path validation ──
|
|
64
|
+
# Ensure WORKTREE_DIR is absolute and not a symlink
|
|
65
|
+
if [[ -z "$WORKTREE_DIR" ]]; then
|
|
66
|
+
daemon_log ERROR "WORKTREE_DIR is not set"
|
|
67
|
+
return 1
|
|
68
|
+
fi
|
|
69
|
+
|
|
70
|
+
if [[ ! "$WORKTREE_DIR" = /* ]]; then
|
|
71
|
+
daemon_log ERROR "WORKTREE_DIR is not absolute: ${WORKTREE_DIR}"
|
|
72
|
+
return 1
|
|
73
|
+
fi
|
|
74
|
+
|
|
75
|
+
if [[ -L "$WORKTREE_DIR" ]]; then
|
|
76
|
+
daemon_log ERROR "WORKTREE_DIR is a symlink: ${WORKTREE_DIR}"
|
|
77
|
+
return 1
|
|
78
|
+
fi
|
|
79
|
+
|
|
80
|
+
# Self-healing: ensure worktree directory exists before resolving
|
|
81
|
+
if [[ ! -d "$WORKTREE_DIR" ]]; then
|
|
82
|
+
daemon_log INFO "Auto-creating worktree directory: ${WORKTREE_DIR}"
|
|
83
|
+
mkdir -p "$WORKTREE_DIR" || {
|
|
84
|
+
daemon_log ERROR "Failed to create WORKTREE_DIR: ${WORKTREE_DIR}"
|
|
85
|
+
return 1
|
|
86
|
+
}
|
|
87
|
+
fi
|
|
88
|
+
|
|
89
|
+
# Verify resolved path is within repo/daemon directory
|
|
90
|
+
local resolved_wt_dir
|
|
91
|
+
resolved_wt_dir=$(cd "$WORKTREE_DIR" 2>/dev/null && pwd) || {
|
|
92
|
+
daemon_log ERROR "Could not resolve WORKTREE_DIR path: ${WORKTREE_DIR}"
|
|
93
|
+
return 1
|
|
94
|
+
}
|
|
95
|
+
|
|
38
96
|
daemon_log INFO "Spawning pipeline for issue #${issue_num}: ${issue_title}"
|
|
39
97
|
|
|
40
98
|
# ── Budget gate: hard-stop if daily budget exhausted ──
|
|
@@ -90,7 +148,7 @@ daemon_spawn_pipeline() {
|
|
|
90
148
|
overall_risk=$(echo "$risk_result" | jq -r '.overall_risk // 50' 2>/dev/null || echo "50")
|
|
91
149
|
if [[ "$overall_risk" -gt 80 ]]; then
|
|
92
150
|
daemon_log WARN "HIGH RISK (${overall_risk}%) predicted for issue #${issue_num} — upgrading model"
|
|
93
|
-
export CLAUDE_MODEL="opus"
|
|
151
|
+
export CLAUDE_MODEL="$(_smart_model high_risk opus)"
|
|
94
152
|
elif [[ "$overall_risk" -lt 30 ]]; then
|
|
95
153
|
daemon_log INFO "LOW RISK (${overall_risk}%) predicted for issue #${issue_num}"
|
|
96
154
|
fi
|
|
@@ -169,6 +227,12 @@ daemon_spawn_pipeline() {
|
|
|
169
227
|
if [[ -n "$MODEL" ]]; then
|
|
170
228
|
pipeline_args+=("--model" "$MODEL")
|
|
171
229
|
fi
|
|
230
|
+
if [[ -n "$EFFORT_LEVEL" ]]; then
|
|
231
|
+
pipeline_args+=("--effort" "$EFFORT_LEVEL")
|
|
232
|
+
fi
|
|
233
|
+
if [[ -n "$FALLBACK_MODEL" ]]; then
|
|
234
|
+
pipeline_args+=("--fallback-model" "$FALLBACK_MODEL")
|
|
235
|
+
fi
|
|
172
236
|
if [[ "$NO_GITHUB" == "true" ]]; then
|
|
173
237
|
pipeline_args+=("--no-github")
|
|
174
238
|
fi
|
|
@@ -186,12 +250,15 @@ daemon_spawn_pipeline() {
|
|
|
186
250
|
pipeline_args+=("${extra_pipeline_args[@]}")
|
|
187
251
|
fi
|
|
188
252
|
|
|
253
|
+
# Ensure issue type is available for skill injection in pipeline
|
|
254
|
+
export INTELLIGENCE_ISSUE_TYPE="${INTELLIGENCE_ISSUE_TYPE:-backend}"
|
|
255
|
+
|
|
189
256
|
# Run pipeline in work directory (background)
|
|
190
257
|
# Ignore SIGHUP so tmux attach/detach and process group changes don't kill the pipeline
|
|
191
258
|
echo -e "\n\n===== Pipeline run $(date -u +%Y-%m-%dT%H:%M:%SZ) =====" >> "$LOG_DIR/issue-${issue_num}.log" 2>/dev/null || true
|
|
192
259
|
(
|
|
193
260
|
trap '' HUP
|
|
194
|
-
cd "$work_dir"
|
|
261
|
+
cd "$work_dir" || exit 1
|
|
195
262
|
exec "$SCRIPT_DIR/sw-pipeline.sh" "${pipeline_args[@]}"
|
|
196
263
|
) >> "$LOG_DIR/issue-${issue_num}.log" 2>&1 200>&- &
|
|
197
264
|
local pid=$!
|
|
@@ -282,27 +349,48 @@ daemon_reap_completed() {
|
|
|
282
349
|
|
|
283
350
|
# Check if process is still running
|
|
284
351
|
if kill -0 "$pid" 2>/dev/null; then
|
|
285
|
-
# Guard against PID reuse
|
|
286
|
-
#
|
|
287
|
-
|
|
352
|
+
# Guard against PID reuse using two-tier detection:
|
|
353
|
+
# Tier 1 (always): Verify PID command matches pipeline process
|
|
354
|
+
# Tier 2 (>1 hour): Force-reap if process doesn't match
|
|
355
|
+
local _proc_cmd _is_pipeline=false
|
|
356
|
+
_proc_cmd=$(ps -p "$pid" -o command= 2>/dev/null || true)
|
|
357
|
+
if [[ -n "$_proc_cmd" ]] && echo "$_proc_cmd" | grep -qE 'sw-pipeline|sw-loop|claude|shipwright' 2>/dev/null; then
|
|
358
|
+
_is_pipeline=true
|
|
359
|
+
fi
|
|
360
|
+
|
|
361
|
+
# Also verify via process start time if available (detects PID reuse)
|
|
362
|
+
local _proc_start _started_at _start_e _age_s _pid_reused=false
|
|
288
363
|
_started_at=$(echo "$job" | jq -r '.started_at // empty')
|
|
289
364
|
if [[ -n "$_started_at" ]]; then
|
|
290
365
|
_start_e=$(TZ=UTC date -j -f "%Y-%m-%dT%H:%M:%SZ" "$_started_at" +%s 2>/dev/null || date -d "$_started_at" +%s 2>/dev/null || echo "0")
|
|
291
366
|
_age_s=$(( $(now_epoch) - ${_start_e:-0} ))
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
367
|
+
# On macOS/Linux, check process start time to detect PID reuse
|
|
368
|
+
_proc_start=$(ps -p "$pid" -o lstart= 2>/dev/null || true)
|
|
369
|
+
if [[ -n "$_proc_start" && -n "$_started_at" ]]; then
|
|
370
|
+
local _proc_start_epoch
|
|
371
|
+
_proc_start_epoch=$(date -j -f "%a %b %d %H:%M:%S %Y" "$_proc_start" +%s 2>/dev/null || date -d "$_proc_start" +%s 2>/dev/null || echo "0")
|
|
372
|
+
# If process started more than 60s after our job, PID was reused
|
|
373
|
+
if [[ "${_proc_start_epoch:-0}" -gt 0 && "${_start_e:-0}" -gt 0 ]]; then
|
|
374
|
+
local _start_diff=$(( _proc_start_epoch - _start_e ))
|
|
375
|
+
if [[ "$_start_diff" -gt 60 || "$_start_diff" -lt -60 ]]; then
|
|
376
|
+
_pid_reused=true
|
|
377
|
+
daemon_log WARN "PID reuse detected for job #${issue_num}: PID $pid started ${_start_diff}s off from job start"
|
|
378
|
+
fi
|
|
302
379
|
fi
|
|
303
|
-
else
|
|
304
|
-
continue
|
|
305
380
|
fi
|
|
381
|
+
else
|
|
382
|
+
_age_s=0
|
|
383
|
+
fi
|
|
384
|
+
|
|
385
|
+
# Decision: reap if PID was reused OR (not a pipeline process AND running > 1 hour)
|
|
386
|
+
if [[ "$_pid_reused" == "true" ]]; then
|
|
387
|
+
daemon_log WARN "Stale job #${issue_num}: PID $pid reused by another process — force-reaping"
|
|
388
|
+
emit_event "daemon.stale_dead" "issue=$issue_num" "pid=$pid" "elapsed_s=${_age_s:-0}" "reason=pid_reuse"
|
|
389
|
+
# Fall through to reap logic
|
|
390
|
+
elif [[ "$_is_pipeline" == "false" && "${_age_s:-0}" -gt 3600 ]]; then
|
|
391
|
+
daemon_log WARN "Stale job #${issue_num}: PID $pid running ${_age_s}s but not a pipeline process — force-reaping"
|
|
392
|
+
emit_event "daemon.stale_dead" "issue=$issue_num" "pid=$pid" "elapsed_s=$_age_s" "reason=wrong_process"
|
|
393
|
+
# Fall through to reap logic
|
|
306
394
|
else
|
|
307
395
|
continue
|
|
308
396
|
fi
|
|
@@ -401,6 +489,7 @@ daemon_reap_completed() {
|
|
|
401
489
|
# Trigger learning after pipeline reap
|
|
402
490
|
if type optimize_full_analysis &>/dev/null; then
|
|
403
491
|
optimize_full_analysis &>/dev/null &
|
|
492
|
+
wait $! 2>/dev/null || true
|
|
404
493
|
fi
|
|
405
494
|
|
|
406
495
|
# Clean up progress tracking for this job
|
|
@@ -3,6 +3,22 @@
|
|
|
3
3
|
[[ -n "${_DAEMON_FAILURE_LOADED:-}" ]] && return 0
|
|
4
4
|
_DAEMON_FAILURE_LOADED=1
|
|
5
5
|
|
|
6
|
+
# Defaults for variables normally set by sw-daemon.sh (safe under set -u).
|
|
7
|
+
DAEMON_DIR="${DAEMON_DIR:-${HOME}/.shipwright}"
|
|
8
|
+
STATE_FILE="${STATE_FILE:-${DAEMON_DIR}/daemon-state.json}"
|
|
9
|
+
PAUSE_FLAG="${PAUSE_FLAG:-${DAEMON_DIR}/daemon-pause.flag}"
|
|
10
|
+
LOG_DIR="${LOG_DIR:-${DAEMON_DIR}/logs}"
|
|
11
|
+
REPO_DIR="${REPO_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
|
|
12
|
+
PIPELINE_TEMPLATE="${PIPELINE_TEMPLATE:-autonomous}"
|
|
13
|
+
MODEL="${MODEL:-opus}"
|
|
14
|
+
WATCH_LABEL="${WATCH_LABEL:-shipwright}"
|
|
15
|
+
ON_SUCCESS_REMOVE_LABEL="${ON_SUCCESS_REMOVE_LABEL:-shipwright}"
|
|
16
|
+
ON_SUCCESS_ADD_LABEL="${ON_SUCCESS_ADD_LABEL:-pipeline/complete}"
|
|
17
|
+
ON_FAILURE_ADD_LABEL="${ON_FAILURE_ADD_LABEL:-pipeline/failed}"
|
|
18
|
+
ON_FAILURE_LOG_LINES="${ON_FAILURE_LOG_LINES:-50}"
|
|
19
|
+
NO_GITHUB="${NO_GITHUB:-false}"
|
|
20
|
+
EVENTS_FILE="${EVENTS_FILE:-${DAEMON_DIR}/events.jsonl}"
|
|
21
|
+
|
|
6
22
|
classify_failure() {
|
|
7
23
|
local issue_num="$1"
|
|
8
24
|
if [[ -z "${LOG_DIR:-}" ]]; then
|
|
@@ -127,9 +143,9 @@ record_failure_class() {
|
|
|
127
143
|
--argjson count "$consecutive" \
|
|
128
144
|
'{reason: $reason, timestamp: $ts, resume_after: $resume, consecutive_count: $count}')
|
|
129
145
|
local _tmp_pause
|
|
130
|
-
_tmp_pause=$(mktemp "${TMPDIR:-/tmp}/sw-pause.XXXXXX")
|
|
146
|
+
_tmp_pause=$(mktemp "${TMPDIR:-/tmp}/sw-pause.XXXXXX") || { daemon_log ERROR "mktemp failed for pause flag"; return 0; }
|
|
131
147
|
echo "$pause_json" > "$_tmp_pause"
|
|
132
|
-
mv "$_tmp_pause" "$PAUSE_FLAG"
|
|
148
|
+
mv "$_tmp_pause" "$PAUSE_FLAG" || rm -f "$_tmp_pause"
|
|
133
149
|
emit_event "daemon.auto_pause" "reason=consecutive_failures" "class=$failure_class" "count=$consecutive" "resume_after=$resume_after"
|
|
134
150
|
fi
|
|
135
151
|
}
|
|
@@ -185,8 +201,16 @@ daemon_on_failure() {
|
|
|
185
201
|
# ── Auto-retry with strategy escalation ──
|
|
186
202
|
if [[ "${RETRY_ESCALATION:-true}" == "true" ]]; then
|
|
187
203
|
local retry_count
|
|
188
|
-
|
|
189
|
-
|
|
204
|
+
# Read retry count from SQLite first (durable across daemon restarts),
|
|
205
|
+
# then fall back to JSON state file
|
|
206
|
+
local _db_file="${DAEMON_DIR}/shipwright.db"
|
|
207
|
+
if command -v sqlite3 >/dev/null 2>&1 && [[ -f "$_db_file" ]]; then
|
|
208
|
+
retry_count=$(sqlite3 -cmd ".timeout 3000" "$_db_file" \
|
|
209
|
+
"SELECT COALESCE(MAX(retry_count), 0) FROM daemon_state WHERE issue_number = ${issue_num};" 2>/dev/null || echo "0")
|
|
210
|
+
else
|
|
211
|
+
retry_count=$(jq -r --arg num "$issue_num" \
|
|
212
|
+
'.retry_counts[$num] // 0' "$STATE_FILE" 2>/dev/null || echo "0")
|
|
213
|
+
fi
|
|
190
214
|
|
|
191
215
|
# Non-retryable failures — skip retry entirely
|
|
192
216
|
case "$failure_class" in
|
|
@@ -216,6 +240,12 @@ daemon_on_failure() {
|
|
|
216
240
|
--arg num "$issue_num" --argjson count "$retry_count" \
|
|
217
241
|
'.retry_counts[$num] = $count'
|
|
218
242
|
|
|
243
|
+
# Also persist to SQLite (survives daemon restarts/state resets)
|
|
244
|
+
if command -v sqlite3 >/dev/null 2>&1 && [[ -f "$_db_file" ]]; then
|
|
245
|
+
sqlite3 -cmd ".timeout 3000" "$_db_file" \
|
|
246
|
+
"UPDATE daemon_state SET retry_count = ${retry_count} WHERE issue_number = ${issue_num} AND status = 'active';" 2>/dev/null || true
|
|
247
|
+
fi
|
|
248
|
+
|
|
219
249
|
daemon_log WARN "Auto-retry #${retry_count}/${effective_max} for issue #${issue_num} (class: ${failure_class})"
|
|
220
250
|
emit_event "daemon.retry" "issue=$issue_num" "retry=$retry_count" "max=$effective_max" "class=$failure_class"
|
|
221
251
|
|
|
@@ -3,6 +3,25 @@
|
|
|
3
3
|
[[ -n "${_DAEMON_PATROL_LOADED:-}" ]] && return 0
|
|
4
4
|
_DAEMON_PATROL_LOADED=1
|
|
5
5
|
|
|
6
|
+
# Ensure NO_GITHUB is set (parent sw-daemon.sh / sw-pipeline.sh normally sets it,
|
|
7
|
+
# but under set -u bare references crash if this file is sourced standalone).
|
|
8
|
+
NO_GITHUB="${NO_GITHUB:-false}"
|
|
9
|
+
DAEMON_DIR="${DAEMON_DIR:-${HOME}/.shipwright}"
|
|
10
|
+
EVENTS_FILE="${EVENTS_FILE:-${HOME}/.shipwright/events.jsonl}"
|
|
11
|
+
|
|
12
|
+
# Defaults for patrol configuration (normally set by sw-daemon.sh, but must be
|
|
13
|
+
# safe under set -u when this file is sourced in other contexts like tests).
|
|
14
|
+
PATROL_INTERVAL="${PATROL_INTERVAL:-3600}"
|
|
15
|
+
PATROL_MAX_ISSUES="${PATROL_MAX_ISSUES:-5}"
|
|
16
|
+
PATROL_LABEL="${PATROL_LABEL:-auto-patrol}"
|
|
17
|
+
PATROL_DRY_RUN="${PATROL_DRY_RUN:-false}"
|
|
18
|
+
PATROL_AUTO_WATCH="${PATROL_AUTO_WATCH:-false}"
|
|
19
|
+
PATROL_FAILURES_THRESHOLD="${PATROL_FAILURES_THRESHOLD:-3}"
|
|
20
|
+
PATROL_DORA_ENABLED="${PATROL_DORA_ENABLED:-true}"
|
|
21
|
+
PATROL_UNTESTED_ENABLED="${PATROL_UNTESTED_ENABLED:-true}"
|
|
22
|
+
PATROL_RETRY_ENABLED="${PATROL_RETRY_ENABLED:-true}"
|
|
23
|
+
PATROL_RETRY_THRESHOLD="${PATROL_RETRY_THRESHOLD:-2}"
|
|
24
|
+
|
|
6
25
|
# ─── Decision Engine Signal Mode ─────────────────────────────────────────────
|
|
7
26
|
# When DECISION_ENGINE_ENABLED=true, patrol writes candidates to the pending
|
|
8
27
|
# signals file instead of creating GitHub issues directly. The decision engine
|
|
@@ -853,8 +872,8 @@ Auto-detected by \`shipwright daemon patrol\` on $(now_iso)." \
|
|
|
853
872
|
usage_count=${usage_count:-0}
|
|
854
873
|
|
|
855
874
|
local line_count
|
|
856
|
-
line_count=$(wc -l < "$script" 2>/dev/null | tr -d ' ' ||
|
|
857
|
-
line_count
|
|
875
|
+
line_count=$(wc -l < "$script" 2>/dev/null | tr -d ' ' || true)
|
|
876
|
+
line_count="${line_count:-0}"
|
|
858
877
|
|
|
859
878
|
untested_entries="${untested_entries}${usage_count}|${basename}|${line_count}\n"
|
|
860
879
|
findings=$((findings + 1))
|
|
@@ -1154,7 +1173,9 @@ Patrol pre-filter findings to confirm: ${patrol_findings_summary}"
|
|
|
1154
1173
|
|
|
1155
1174
|
daemon_log INFO "Patrol complete: ${total_findings} findings, ${issues_created} issues created"
|
|
1156
1175
|
|
|
1157
|
-
# Adapt patrol limits based on hit rate
|
|
1158
|
-
adapt_patrol_limits
|
|
1176
|
+
# Adapt patrol limits based on hit rate (requires daemon-adaptive.sh)
|
|
1177
|
+
if type adapt_patrol_limits >/dev/null 2>&1; then
|
|
1178
|
+
adapt_patrol_limits "$total_findings" "$PATROL_MAX_ISSUES"
|
|
1179
|
+
fi
|
|
1159
1180
|
}
|
|
1160
1181
|
|
|
@@ -0,0 +1,361 @@
|
|
|
1
|
+
# daemon-poll-github.sh — GitHub API polling for daemon-poll.sh
|
|
2
|
+
# Source from daemon-poll.sh. Requires daemon-health, state, dispatch, failure, patrol.
|
|
3
|
+
[[ -n "${_DAEMON_POLL_GITHUB_LOADED:-}" ]] && return 0
|
|
4
|
+
_DAEMON_POLL_GITHUB_LOADED=1
|
|
5
|
+
|
|
6
|
+
# Defaults for variables normally set by sw-daemon.sh (safe under set -u).
|
|
7
|
+
DAEMON_DIR="${DAEMON_DIR:-${HOME}/.shipwright}"
|
|
8
|
+
STATE_FILE="${STATE_FILE:-${DAEMON_DIR}/daemon-state.json}"
|
|
9
|
+
PAUSE_FLAG="${PAUSE_FLAG:-${DAEMON_DIR}/daemon-pause.flag}"
|
|
10
|
+
SHUTDOWN_FLAG="${SHUTDOWN_FLAG:-${DAEMON_DIR}/daemon.shutdown}"
|
|
11
|
+
EVENTS_FILE="${EVENTS_FILE:-${DAEMON_DIR}/events.jsonl}"
|
|
12
|
+
SCRIPT_DIR="${SCRIPT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}"
|
|
13
|
+
NO_GITHUB="${NO_GITHUB:-false}"
|
|
14
|
+
POLL_INTERVAL="${POLL_INTERVAL:-60}"
|
|
15
|
+
MAX_PARALLEL="${MAX_PARALLEL:-4}"
|
|
16
|
+
WATCH_LABEL="${WATCH_LABEL:-shipwright}"
|
|
17
|
+
WATCH_MODE="${WATCH_MODE:-repo}"
|
|
18
|
+
PIPELINE_TEMPLATE="${PIPELINE_TEMPLATE:-autonomous}"
|
|
19
|
+
ISSUE_LIMIT="${ISSUE_LIMIT:-100}"
|
|
20
|
+
SLACK_WEBHOOK="${SLACK_WEBHOOK:-}"
|
|
21
|
+
BACKOFF_SECS="${BACKOFF_SECS:-0}"
|
|
22
|
+
POLL_CYCLE_COUNT="${POLL_CYCLE_COUNT:-0}"
|
|
23
|
+
|
|
24
|
+
daemon_poll_issues() {
|
|
25
|
+
if [[ "$NO_GITHUB" == "true" ]]; then
|
|
26
|
+
daemon_log INFO "Polling skipped (--no-github)"
|
|
27
|
+
return
|
|
28
|
+
fi
|
|
29
|
+
|
|
30
|
+
# Check for pause flag (set by dashboard, disk_low, or consecutive-failure backoff)
|
|
31
|
+
local pause_file="${PAUSE_FLAG:-$HOME/.shipwright/daemon-pause.flag}"
|
|
32
|
+
if [[ -f "$pause_file" ]]; then
|
|
33
|
+
local resume_after
|
|
34
|
+
resume_after=$(jq -r '.resume_after // empty' "$pause_file" 2>/dev/null || true)
|
|
35
|
+
if [[ -n "$resume_after" ]]; then
|
|
36
|
+
local now_epoch resume_epoch
|
|
37
|
+
now_epoch=$(date +%s)
|
|
38
|
+
resume_epoch=$(TZ=UTC date -j -f "%Y-%m-%dT%H:%M:%SZ" "$resume_after" +%s 2>/dev/null || \
|
|
39
|
+
date -d "$resume_after" +%s 2>/dev/null || echo 0)
|
|
40
|
+
if [[ "$resume_epoch" -gt 0 ]] && [[ "$now_epoch" -ge "$resume_epoch" ]]; then
|
|
41
|
+
rm -f "$pause_file"
|
|
42
|
+
daemon_log INFO "Auto-resuming after backoff (resume_after passed)"
|
|
43
|
+
else
|
|
44
|
+
daemon_log INFO "Daemon paused until ${resume_after} — skipping poll"
|
|
45
|
+
return
|
|
46
|
+
fi
|
|
47
|
+
else
|
|
48
|
+
daemon_log INFO "Daemon paused — skipping poll"
|
|
49
|
+
return
|
|
50
|
+
fi
|
|
51
|
+
fi
|
|
52
|
+
|
|
53
|
+
# Circuit breaker: skip poll if in backoff window
|
|
54
|
+
if gh_rate_limited; then
|
|
55
|
+
daemon_log INFO "Polling skipped (rate-limit backoff until $(epoch_to_iso "$GH_BACKOFF_UNTIL"))"
|
|
56
|
+
return
|
|
57
|
+
fi
|
|
58
|
+
|
|
59
|
+
local issues_json
|
|
60
|
+
|
|
61
|
+
# Select gh command wrapper: gh_retry for critical poll calls when enabled
|
|
62
|
+
local gh_cmd="gh"
|
|
63
|
+
if [[ "${GH_RETRY_ENABLED:-true}" == "true" ]]; then
|
|
64
|
+
gh_cmd="gh_retry gh"
|
|
65
|
+
fi
|
|
66
|
+
|
|
67
|
+
if [[ "$WATCH_MODE" == "org" && -n "$ORG" ]]; then
|
|
68
|
+
# Org-wide mode: search issues across all org repos
|
|
69
|
+
issues_json=$($gh_cmd search issues \
|
|
70
|
+
--label "$WATCH_LABEL" \
|
|
71
|
+
--owner "$ORG" \
|
|
72
|
+
--state open \
|
|
73
|
+
--json repository,number,title,labels,body,createdAt \
|
|
74
|
+
--limit "${ISSUE_LIMIT:-100}" 2>/dev/null) || {
|
|
75
|
+
# Handle rate limiting with exponential backoff
|
|
76
|
+
if [[ $BACKOFF_SECS -eq 0 ]]; then
|
|
77
|
+
BACKOFF_SECS=30
|
|
78
|
+
elif [[ $BACKOFF_SECS -lt 300 ]]; then
|
|
79
|
+
BACKOFF_SECS=$((BACKOFF_SECS * 2))
|
|
80
|
+
if [[ $BACKOFF_SECS -gt 300 ]]; then
|
|
81
|
+
BACKOFF_SECS=300
|
|
82
|
+
fi
|
|
83
|
+
fi
|
|
84
|
+
daemon_log WARN "GitHub API error (org search) — backing off ${BACKOFF_SECS}s"
|
|
85
|
+
gh_record_failure
|
|
86
|
+
sleep "$BACKOFF_SECS"
|
|
87
|
+
return
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
# Filter by repo_filter regex if set
|
|
91
|
+
if [[ -n "$REPO_FILTER" ]]; then
|
|
92
|
+
issues_json=$(echo "$issues_json" | jq -c --arg filter "$REPO_FILTER" \
|
|
93
|
+
'[.[] | select(.repository.nameWithOwner | test($filter))]')
|
|
94
|
+
fi
|
|
95
|
+
else
|
|
96
|
+
# Standard single-repo mode
|
|
97
|
+
issues_json=$($gh_cmd issue list \
|
|
98
|
+
--label "$WATCH_LABEL" \
|
|
99
|
+
--state open \
|
|
100
|
+
--json number,title,labels,body,createdAt \
|
|
101
|
+
--limit 100 2>/dev/null) || {
|
|
102
|
+
# Handle rate limiting with exponential backoff
|
|
103
|
+
if [[ $BACKOFF_SECS -eq 0 ]]; then
|
|
104
|
+
BACKOFF_SECS=30
|
|
105
|
+
elif [[ $BACKOFF_SECS -lt 300 ]]; then
|
|
106
|
+
BACKOFF_SECS=$((BACKOFF_SECS * 2))
|
|
107
|
+
if [[ $BACKOFF_SECS -gt 300 ]]; then
|
|
108
|
+
BACKOFF_SECS=300
|
|
109
|
+
fi
|
|
110
|
+
fi
|
|
111
|
+
daemon_log WARN "GitHub API error — backing off ${BACKOFF_SECS}s"
|
|
112
|
+
gh_record_failure
|
|
113
|
+
sleep "$BACKOFF_SECS"
|
|
114
|
+
return
|
|
115
|
+
}
|
|
116
|
+
fi
|
|
117
|
+
|
|
118
|
+
# Reset backoff on success
|
|
119
|
+
BACKOFF_SECS=0
|
|
120
|
+
gh_record_success
|
|
121
|
+
|
|
122
|
+
local issue_count
|
|
123
|
+
issue_count=$(echo "$issues_json" | jq 'length' 2>/dev/null || echo 0)
|
|
124
|
+
|
|
125
|
+
if [[ "$issue_count" -eq 0 ]]; then
|
|
126
|
+
return
|
|
127
|
+
fi
|
|
128
|
+
|
|
129
|
+
local mode_label="repo"
|
|
130
|
+
[[ "$WATCH_MODE" == "org" ]] && mode_label="org:${ORG}"
|
|
131
|
+
daemon_log INFO "Found ${issue_count} issue(s) with label '${WATCH_LABEL}' (${mode_label})"
|
|
132
|
+
emit_event "daemon.poll" "issues_found=$issue_count" "active=$(get_active_count)" "mode=$WATCH_MODE"
|
|
133
|
+
|
|
134
|
+
# Score each issue using intelligent triage and sort by descending score
|
|
135
|
+
local scored_issues=()
|
|
136
|
+
local dep_graph="" # "issue:dep1,dep2" entries for dependency ordering
|
|
137
|
+
while IFS= read -r issue; do
|
|
138
|
+
local num score
|
|
139
|
+
num=$(echo "$issue" | jq -r '.number')
|
|
140
|
+
score=$(triage_score_issue "$issue" 2>/dev/null | tail -1)
|
|
141
|
+
score=$(printf '%s' "$score" | tr -cd '[:digit:]')
|
|
142
|
+
[[ -z "$score" ]] && score=50
|
|
143
|
+
# For org mode, include repo name in the scored entry
|
|
144
|
+
local repo_name=""
|
|
145
|
+
if [[ "$WATCH_MODE" == "org" ]]; then
|
|
146
|
+
repo_name=$(echo "$issue" | jq -r '.repository.nameWithOwner // ""')
|
|
147
|
+
fi
|
|
148
|
+
scored_issues+=("${score}|${num}|${repo_name}")
|
|
149
|
+
|
|
150
|
+
# Issue dependency detection (adaptive: extract "depends on #X", "blocked by #X")
|
|
151
|
+
if [[ "${ADAPTIVE_THRESHOLDS_ENABLED:-false}" == "true" ]]; then
|
|
152
|
+
local issue_text
|
|
153
|
+
issue_text=$(echo "$issue" | jq -r '(.title // "") + " " + (.body // "")')
|
|
154
|
+
local deps
|
|
155
|
+
deps=$(extract_issue_dependencies "$issue_text")
|
|
156
|
+
if [[ -n "$deps" ]]; then
|
|
157
|
+
local dep_nums
|
|
158
|
+
dep_nums=$(echo "$deps" | tr -d '#' | tr '\n' ',' | sed 's/,$//')
|
|
159
|
+
dep_graph="${dep_graph}${num}:${dep_nums}\n"
|
|
160
|
+
daemon_log INFO "Issue #${num} depends on: ${deps//$'\n'/, }"
|
|
161
|
+
fi
|
|
162
|
+
fi
|
|
163
|
+
done < <(echo "$issues_json" | jq -c '.[]')
|
|
164
|
+
|
|
165
|
+
# Sort by score — strategy determines ascending vs descending
|
|
166
|
+
local sorted_order
|
|
167
|
+
if [[ "${PRIORITY_STRATEGY:-quick-wins-first}" == "complex-first" ]]; then
|
|
168
|
+
# Complex-first: lower score (more complex) first
|
|
169
|
+
sorted_order=$(printf '%s\n' "${scored_issues[@]}" | sort -t'|' -k1,1 -n -k2,2 -n)
|
|
170
|
+
else
|
|
171
|
+
# Quick-wins-first (default): higher score (simpler) first, lowest issue# first on ties
|
|
172
|
+
sorted_order=$(printf '%s\n' "${scored_issues[@]}" | sort -t'|' -k1,1 -rn -k2,2 -n)
|
|
173
|
+
fi
|
|
174
|
+
|
|
175
|
+
# Dependency-aware reordering: move dependencies before dependents
|
|
176
|
+
if [[ -n "$dep_graph" && "${ADAPTIVE_THRESHOLDS_ENABLED:-false}" == "true" ]]; then
|
|
177
|
+
local reordered=""
|
|
178
|
+
local scheduled=""
|
|
179
|
+
# Multiple passes to resolve transitive dependencies (max 3)
|
|
180
|
+
local pass=0
|
|
181
|
+
while [[ $pass -lt 3 ]]; do
|
|
182
|
+
local changed=false
|
|
183
|
+
local new_order=""
|
|
184
|
+
while IFS='|' read -r s_score s_num s_repo; do
|
|
185
|
+
[[ -z "$s_num" ]] && continue
|
|
186
|
+
# Check if this issue has unscheduled dependencies
|
|
187
|
+
local issue_deps
|
|
188
|
+
issue_deps=$(echo -e "$dep_graph" | grep "^${s_num}:" | head -1 | cut -d: -f2 || true)
|
|
189
|
+
if [[ -n "$issue_deps" ]]; then
|
|
190
|
+
# Check if all deps are scheduled (or not in our issue set)
|
|
191
|
+
local all_deps_ready=true
|
|
192
|
+
local IFS_SAVE="$IFS"
|
|
193
|
+
IFS=','
|
|
194
|
+
for dep in $issue_deps; do
|
|
195
|
+
dep="${dep## }"
|
|
196
|
+
dep="${dep%% }"
|
|
197
|
+
# Is this dep in our scored set and not yet scheduled?
|
|
198
|
+
if echo "$sorted_order" | grep -q "|${dep}|" && ! echo "$scheduled" | grep -q "|${dep}|"; then
|
|
199
|
+
all_deps_ready=false
|
|
200
|
+
break
|
|
201
|
+
fi
|
|
202
|
+
done
|
|
203
|
+
IFS="$IFS_SAVE"
|
|
204
|
+
if [[ "$all_deps_ready" == "false" ]]; then
|
|
205
|
+
# Defer this issue — append at end
|
|
206
|
+
new_order="${new_order}${s_score}|${s_num}|${s_repo}\n"
|
|
207
|
+
changed=true
|
|
208
|
+
continue
|
|
209
|
+
fi
|
|
210
|
+
fi
|
|
211
|
+
reordered="${reordered}${s_score}|${s_num}|${s_repo}\n"
|
|
212
|
+
scheduled="${scheduled}|${s_num}|"
|
|
213
|
+
done <<< "$sorted_order"
|
|
214
|
+
# Append deferred issues
|
|
215
|
+
reordered="${reordered}${new_order}"
|
|
216
|
+
sorted_order=$(echo -e "$reordered" | grep -v '^$')
|
|
217
|
+
reordered=""
|
|
218
|
+
scheduled=""
|
|
219
|
+
if [[ "$changed" == "false" ]]; then
|
|
220
|
+
break
|
|
221
|
+
fi
|
|
222
|
+
pass=$((pass + 1))
|
|
223
|
+
done
|
|
224
|
+
fi
|
|
225
|
+
|
|
226
|
+
local active_count
|
|
227
|
+
active_count=$(locked_get_active_count)
|
|
228
|
+
|
|
229
|
+
# Process each issue in triage order (process substitution keeps state in current shell)
|
|
230
|
+
while IFS='|' read -r score issue_num repo_name; do
|
|
231
|
+
[[ -z "$issue_num" ]] && continue
|
|
232
|
+
|
|
233
|
+
local issue_key
|
|
234
|
+
issue_key="$issue_num"
|
|
235
|
+
[[ -n "$repo_name" ]] && issue_key="${repo_name}:${issue_num}"
|
|
236
|
+
|
|
237
|
+
local issue_title labels_csv
|
|
238
|
+
issue_title=$(echo "$issues_json" | jq -r --argjson n "$issue_num" --arg repo "$repo_name" '.[] | select(.number == $n) | select($repo == "" or (.repository.nameWithOwner // "") == $repo) | .title')
|
|
239
|
+
labels_csv=$(echo "$issues_json" | jq -r --argjson n "$issue_num" --arg repo "$repo_name" '.[] | select(.number == $n) | select($repo == "" or (.repository.nameWithOwner // "") == $repo) | [.labels[].name] | join(",")')
|
|
240
|
+
|
|
241
|
+
# Cache title in state for dashboard visibility (use issue_key for org mode)
|
|
242
|
+
if [[ -n "$issue_title" ]]; then
|
|
243
|
+
locked_state_update --arg num "$issue_key" --arg title "$issue_title" \
|
|
244
|
+
'.titles[$num] = $title'
|
|
245
|
+
fi
|
|
246
|
+
|
|
247
|
+
# Skip if already inflight
|
|
248
|
+
if daemon_is_inflight "$issue_key"; then
|
|
249
|
+
continue
|
|
250
|
+
fi
|
|
251
|
+
|
|
252
|
+
# Distributed claim (skip if no machines registered)
|
|
253
|
+
if [[ -f "$HOME/.shipwright/machines.json" ]]; then
|
|
254
|
+
local machine_name
|
|
255
|
+
machine_name=$(jq -r '.machines[] | select(.role == "primary") | .name' "$HOME/.shipwright/machines.json" 2>/dev/null || hostname -s)
|
|
256
|
+
if ! claim_issue "$issue_num" "$machine_name"; then
|
|
257
|
+
daemon_log INFO "Issue #${issue_num} claimed by another machine — skipping"
|
|
258
|
+
continue
|
|
259
|
+
fi
|
|
260
|
+
fi
|
|
261
|
+
|
|
262
|
+
# Priority lane: bypass queue for critical issues
|
|
263
|
+
if [[ "$PRIORITY_LANE" == "true" ]]; then
|
|
264
|
+
local priority_active
|
|
265
|
+
priority_active=$(get_priority_active_count)
|
|
266
|
+
if is_priority_issue "$labels_csv" && [[ "$priority_active" -lt "$PRIORITY_LANE_MAX" ]]; then
|
|
267
|
+
daemon_log WARN "PRIORITY LANE: issue #${issue_num} bypassing queue (${labels_csv})"
|
|
268
|
+
emit_event "daemon.priority_lane" "issue=$issue_num" "score=$score"
|
|
269
|
+
|
|
270
|
+
local template
|
|
271
|
+
template=$(select_pipeline_template "$labels_csv" "$score" 2>/dev/null | tail -1)
|
|
272
|
+
template=$(printf '%s' "$template" | sed $'s/\x1b\\[[0-9;]*m//g' | tr -cd '[:alnum:]-_')
|
|
273
|
+
[[ -z "$template" ]] && template="$PIPELINE_TEMPLATE"
|
|
274
|
+
daemon_log INFO "Triage: issue #${issue_num} scored ${score}, template=${template} [PRIORITY]"
|
|
275
|
+
|
|
276
|
+
local orig_template="$PIPELINE_TEMPLATE"
|
|
277
|
+
PIPELINE_TEMPLATE="$template"
|
|
278
|
+
daemon_spawn_pipeline "$issue_num" "$issue_title" "$repo_name"
|
|
279
|
+
PIPELINE_TEMPLATE="$orig_template"
|
|
280
|
+
track_priority_job "$issue_num"
|
|
281
|
+
continue
|
|
282
|
+
fi
|
|
283
|
+
fi
|
|
284
|
+
|
|
285
|
+
# Check capacity
|
|
286
|
+
active_count=$(locked_get_active_count)
|
|
287
|
+
if [[ "$active_count" -ge "$MAX_PARALLEL" ]]; then
|
|
288
|
+
enqueue_issue "$issue_key"
|
|
289
|
+
continue
|
|
290
|
+
fi
|
|
291
|
+
|
|
292
|
+
# Auto-select pipeline template: PM recommendation (if available) else labels + triage score
|
|
293
|
+
local template
|
|
294
|
+
if [[ "$NO_GITHUB" != "true" ]] && [[ -x "$SCRIPT_DIR/sw-pm.sh" ]]; then
|
|
295
|
+
local pm_rec
|
|
296
|
+
pm_rec=$(bash "$SCRIPT_DIR/sw-pm.sh" recommend --json "$issue_num" 2>/dev/null) || true
|
|
297
|
+
if [[ -n "$pm_rec" ]]; then
|
|
298
|
+
template=$(echo "$pm_rec" | jq -r '.team_composition.template // empty' 2>/dev/null) || true
|
|
299
|
+
# Capability self-assessment: low confidence → upgrade to full template
|
|
300
|
+
local confidence
|
|
301
|
+
confidence=$(echo "$pm_rec" | jq -r '.team_composition.confidence_percent // 100' 2>/dev/null) || true
|
|
302
|
+
if [[ -n "$confidence" && "$confidence" != "null" && "$confidence" -lt 60 ]]; then
|
|
303
|
+
daemon_log INFO "Low PM confidence (${confidence}%) — upgrading to full template"
|
|
304
|
+
template="full"
|
|
305
|
+
fi
|
|
306
|
+
fi
|
|
307
|
+
fi
|
|
308
|
+
if [[ -z "$template" ]]; then
|
|
309
|
+
template=$(select_pipeline_template "$labels_csv" "$score" 2>/dev/null | tail -1)
|
|
310
|
+
fi
|
|
311
|
+
template=$(printf '%s' "$template" | sed $'s/\x1b\\[[0-9;]*m//g' | tr -cd '[:alnum:]-_')
|
|
312
|
+
[[ -z "$template" ]] && template="$PIPELINE_TEMPLATE"
|
|
313
|
+
daemon_log INFO "Triage: issue #${issue_num} scored ${score}, template=${template}"
|
|
314
|
+
|
|
315
|
+
# Spawn pipeline (template selection applied via PIPELINE_TEMPLATE override)
|
|
316
|
+
local orig_template="$PIPELINE_TEMPLATE"
|
|
317
|
+
PIPELINE_TEMPLATE="$template"
|
|
318
|
+
daemon_spawn_pipeline "$issue_num" "$issue_title" "$repo_name"
|
|
319
|
+
PIPELINE_TEMPLATE="$orig_template"
|
|
320
|
+
|
|
321
|
+
# Stagger delay between spawns to avoid API contention
|
|
322
|
+
local stagger_delay="${SPAWN_STAGGER_SECONDS:-15}"
|
|
323
|
+
if [[ "$stagger_delay" -gt 0 ]]; then
|
|
324
|
+
sleep "$stagger_delay"
|
|
325
|
+
fi
|
|
326
|
+
done <<< "$sorted_order"
|
|
327
|
+
|
|
328
|
+
# ── Drain queue if we have capacity (prevents deadlock when queue is
|
|
329
|
+
# populated but no active jobs exist to trigger dequeue) ──
|
|
330
|
+
local drain_active
|
|
331
|
+
drain_active=$(locked_get_active_count)
|
|
332
|
+
while [[ "$drain_active" -lt "$MAX_PARALLEL" ]]; do
|
|
333
|
+
local drain_issue_key
|
|
334
|
+
drain_issue_key=$(dequeue_next)
|
|
335
|
+
[[ -z "$drain_issue_key" ]] && break
|
|
336
|
+
local drain_issue_num="$drain_issue_key" drain_repo=""
|
|
337
|
+
[[ "$drain_issue_key" == *:* ]] && drain_repo="${drain_issue_key%%:*}" && drain_issue_num="${drain_issue_key##*:}"
|
|
338
|
+
local drain_title
|
|
339
|
+
drain_title=$(jq -r --arg n "$drain_issue_key" '.titles[$n] // ""' "$STATE_FILE" 2>/dev/null || true)
|
|
340
|
+
|
|
341
|
+
local drain_labels drain_score drain_template
|
|
342
|
+
drain_labels=$(echo "$issues_json" | jq -r --argjson n "$drain_issue_num" --arg repo "$drain_repo" \
|
|
343
|
+
'.[] | select(.number == $n) | select($repo == "" or (.repository.nameWithOwner // "") == $repo) | [.labels[].name] | join(",")' 2>/dev/null || echo "")
|
|
344
|
+
drain_score=$(echo "$sorted_order" | grep "|${drain_issue_num}|" | cut -d'|' -f1 || echo "50")
|
|
345
|
+
drain_template=$(select_pipeline_template "$drain_labels" "${drain_score:-50}" 2>/dev/null | tail -1)
|
|
346
|
+
drain_template=$(printf '%s' "$drain_template" | sed $'s/\x1b\\[[0-9;]*m//g' | tr -cd '[:alnum:]-_')
|
|
347
|
+
[[ -z "$drain_template" ]] && drain_template="$PIPELINE_TEMPLATE"
|
|
348
|
+
|
|
349
|
+
daemon_log INFO "Draining queue: issue #${drain_issue_num}${drain_repo:+, repo=${drain_repo}}, template=${drain_template}"
|
|
350
|
+
local orig_template="$PIPELINE_TEMPLATE"
|
|
351
|
+
PIPELINE_TEMPLATE="$drain_template"
|
|
352
|
+
daemon_spawn_pipeline "$drain_issue_num" "$drain_title" "$drain_repo"
|
|
353
|
+
PIPELINE_TEMPLATE="$orig_template"
|
|
354
|
+
drain_active=$(locked_get_active_count)
|
|
355
|
+
done
|
|
356
|
+
|
|
357
|
+
# Update last poll
|
|
358
|
+
update_state_field "last_poll" "$(now_iso)"
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
# ─── Health Check ─────────────────────────────────────────────────────────────
|