shipwright-cli 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +926 -0
- package/claude-code/CLAUDE.md.shipwright +125 -0
- package/claude-code/hooks/notify-idle.sh +35 -0
- package/claude-code/hooks/pre-compact-save.sh +57 -0
- package/claude-code/hooks/task-completed.sh +170 -0
- package/claude-code/hooks/teammate-idle.sh +68 -0
- package/claude-code/settings.json.template +184 -0
- package/completions/_shipwright +140 -0
- package/completions/shipwright.bash +89 -0
- package/completions/shipwright.fish +107 -0
- package/docs/KNOWN-ISSUES.md +199 -0
- package/docs/TIPS.md +331 -0
- package/docs/definition-of-done.example.md +16 -0
- package/docs/patterns/README.md +139 -0
- package/docs/patterns/audit-loop.md +149 -0
- package/docs/patterns/bug-hunt.md +183 -0
- package/docs/patterns/feature-implementation.md +159 -0
- package/docs/patterns/refactoring.md +183 -0
- package/docs/patterns/research-exploration.md +144 -0
- package/docs/patterns/test-generation.md +173 -0
- package/package.json +49 -0
- package/scripts/adapters/docker-deploy.sh +50 -0
- package/scripts/adapters/fly-deploy.sh +41 -0
- package/scripts/adapters/iterm2-adapter.sh +122 -0
- package/scripts/adapters/railway-deploy.sh +34 -0
- package/scripts/adapters/tmux-adapter.sh +87 -0
- package/scripts/adapters/vercel-deploy.sh +35 -0
- package/scripts/adapters/wezterm-adapter.sh +103 -0
- package/scripts/cct +242 -0
- package/scripts/cct-cleanup.sh +172 -0
- package/scripts/cct-cost.sh +590 -0
- package/scripts/cct-daemon.sh +3189 -0
- package/scripts/cct-doctor.sh +328 -0
- package/scripts/cct-fix.sh +478 -0
- package/scripts/cct-fleet.sh +904 -0
- package/scripts/cct-init.sh +282 -0
- package/scripts/cct-logs.sh +273 -0
- package/scripts/cct-loop.sh +1332 -0
- package/scripts/cct-memory.sh +1148 -0
- package/scripts/cct-pipeline.sh +3844 -0
- package/scripts/cct-prep.sh +1352 -0
- package/scripts/cct-ps.sh +168 -0
- package/scripts/cct-reaper.sh +390 -0
- package/scripts/cct-session.sh +284 -0
- package/scripts/cct-status.sh +169 -0
- package/scripts/cct-templates.sh +242 -0
- package/scripts/cct-upgrade.sh +422 -0
- package/scripts/cct-worktree.sh +405 -0
- package/scripts/postinstall.mjs +96 -0
- package/templates/pipelines/autonomous.json +71 -0
- package/templates/pipelines/cost-aware.json +95 -0
- package/templates/pipelines/deployed.json +79 -0
- package/templates/pipelines/enterprise.json +114 -0
- package/templates/pipelines/fast.json +63 -0
- package/templates/pipelines/full.json +104 -0
- package/templates/pipelines/hotfix.json +63 -0
- package/templates/pipelines/standard.json +91 -0
- package/tmux/claude-teams-overlay.conf +109 -0
- package/tmux/templates/architecture.json +19 -0
- package/tmux/templates/bug-fix.json +24 -0
- package/tmux/templates/code-review.json +24 -0
- package/tmux/templates/devops.json +19 -0
- package/tmux/templates/documentation.json +19 -0
- package/tmux/templates/exploration.json +19 -0
- package/tmux/templates/feature-dev.json +24 -0
- package/tmux/templates/full-stack.json +24 -0
- package/tmux/templates/migration.json +24 -0
- package/tmux/templates/refactor.json +19 -0
- package/tmux/templates/security-audit.json +24 -0
- package/tmux/templates/testing.json +24 -0
- package/tmux/tmux.conf +167 -0
|
@@ -0,0 +1,904 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ╔═══════════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
# ║ shipwright fleet — Multi-Repo Daemon Orchestrator ║
|
|
4
|
+
# ║ Spawns daemons across repos · Fleet dashboard · Aggregate metrics ║
|
|
5
|
+
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
6
|
+
set -euo pipefail
|
|
7
|
+
|
|
8
|
+
VERSION="1.7.0"
|
|
9
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
10
|
+
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
11
|
+
|
|
12
|
+
# ─── Colors (matches Seth's tmux theme) ─────────────────────────────────────
|
|
13
|
+
CYAN='\033[38;2;0;212;255m' # #00d4ff — primary accent
|
|
14
|
+
PURPLE='\033[38;2;124;58;237m' # #7c3aed — secondary
|
|
15
|
+
BLUE='\033[38;2;0;102;255m' # #0066ff — tertiary
|
|
16
|
+
GREEN='\033[38;2;74;222;128m' # success
|
|
17
|
+
YELLOW='\033[38;2;250;204;21m' # warning
|
|
18
|
+
RED='\033[38;2;248;113;113m' # error
|
|
19
|
+
DIM='\033[2m'
|
|
20
|
+
BOLD='\033[1m'
|
|
21
|
+
RESET='\033[0m'
|
|
22
|
+
|
|
23
|
+
# ─── Output Helpers ─────────────────────────────────────────────────────────
|
|
24
|
+
info() { echo -e "${CYAN}${BOLD}▸${RESET} $*"; }
|
|
25
|
+
success() { echo -e "${GREEN}${BOLD}✓${RESET} $*"; }
|
|
26
|
+
warn() { echo -e "${YELLOW}${BOLD}⚠${RESET} $*"; }
|
|
27
|
+
error() { echo -e "${RED}${BOLD}✗${RESET} $*" >&2; }
|
|
28
|
+
|
|
29
|
+
now_iso() { date -u +"%Y-%m-%dT%H:%M:%SZ"; }
|
|
30
|
+
now_epoch() { date +%s; }
|
|
31
|
+
|
|
32
|
+
epoch_to_iso() {
|
|
33
|
+
local epoch="$1"
|
|
34
|
+
date -u -r "$epoch" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || \
|
|
35
|
+
date -u -d "@$epoch" +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || \
|
|
36
|
+
echo "1970-01-01T00:00:00Z"
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
format_duration() {
|
|
40
|
+
local secs="$1"
|
|
41
|
+
if [[ "$secs" -ge 3600 ]]; then
|
|
42
|
+
printf "%dh %dm %ds" $((secs/3600)) $((secs%3600/60)) $((secs%60))
|
|
43
|
+
elif [[ "$secs" -ge 60 ]]; then
|
|
44
|
+
printf "%dm %ds" $((secs/60)) $((secs%60))
|
|
45
|
+
else
|
|
46
|
+
printf "%ds" "$secs"
|
|
47
|
+
fi
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
# ─── Structured Event Log ──────────────────────────────────────────────────
|
|
51
|
+
EVENTS_FILE="${HOME}/.claude-teams/events.jsonl"
|
|
52
|
+
|
|
53
|
+
emit_event() {
|
|
54
|
+
local event_type="$1"
|
|
55
|
+
shift
|
|
56
|
+
local json_fields=""
|
|
57
|
+
for kv in "$@"; do
|
|
58
|
+
local key="${kv%%=*}"
|
|
59
|
+
local val="${kv#*=}"
|
|
60
|
+
if [[ "$val" =~ ^-?[0-9]+\.?[0-9]*$ ]]; then
|
|
61
|
+
json_fields="${json_fields},\"${key}\":${val}"
|
|
62
|
+
else
|
|
63
|
+
val="${val//\"/\\\"}"
|
|
64
|
+
json_fields="${json_fields},\"${key}\":\"${val}\""
|
|
65
|
+
fi
|
|
66
|
+
done
|
|
67
|
+
mkdir -p "${HOME}/.claude-teams"
|
|
68
|
+
echo "{\"ts\":\"$(now_iso)\",\"ts_epoch\":$(now_epoch),\"type\":\"${event_type}\"${json_fields}}" >> "$EVENTS_FILE"
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
# ─── Defaults ───────────────────────────────────────────────────────────────
|
|
72
|
+
FLEET_DIR="$HOME/.claude-teams"
|
|
73
|
+
FLEET_STATE="$FLEET_DIR/fleet-state.json"
|
|
74
|
+
CONFIG_PATH=""
|
|
75
|
+
|
|
76
|
+
# ─── CLI Argument Parsing ──────────────────────────────────────────────────
|
|
77
|
+
SUBCOMMAND="${1:-help}"
|
|
78
|
+
shift 2>/dev/null || true
|
|
79
|
+
|
|
80
|
+
while [[ $# -gt 0 ]]; do
|
|
81
|
+
case "$1" in
|
|
82
|
+
--config)
|
|
83
|
+
CONFIG_PATH="${2:-}"
|
|
84
|
+
shift 2
|
|
85
|
+
;;
|
|
86
|
+
--config=*)
|
|
87
|
+
CONFIG_PATH="${1#--config=}"
|
|
88
|
+
shift
|
|
89
|
+
;;
|
|
90
|
+
--help|-h)
|
|
91
|
+
SUBCOMMAND="help"
|
|
92
|
+
shift
|
|
93
|
+
;;
|
|
94
|
+
--period)
|
|
95
|
+
METRICS_PERIOD="${2:-7}"
|
|
96
|
+
shift 2
|
|
97
|
+
;;
|
|
98
|
+
--period=*)
|
|
99
|
+
METRICS_PERIOD="${1#--period=}"
|
|
100
|
+
shift
|
|
101
|
+
;;
|
|
102
|
+
--json)
|
|
103
|
+
JSON_OUTPUT=true
|
|
104
|
+
shift
|
|
105
|
+
;;
|
|
106
|
+
*)
|
|
107
|
+
break
|
|
108
|
+
;;
|
|
109
|
+
esac
|
|
110
|
+
done
|
|
111
|
+
|
|
112
|
+
METRICS_PERIOD="${METRICS_PERIOD:-7}"
|
|
113
|
+
JSON_OUTPUT="${JSON_OUTPUT:-false}"
|
|
114
|
+
|
|
115
|
+
# ─── Help ───────────────────────────────────────────────────────────────────
|
|
116
|
+
|
|
117
|
+
show_help() {
|
|
118
|
+
echo ""
|
|
119
|
+
echo -e "${PURPLE}${BOLD}━━━ shipwright fleet v${VERSION} ━━━${RESET}"
|
|
120
|
+
echo ""
|
|
121
|
+
echo -e "${BOLD}USAGE${RESET}"
|
|
122
|
+
echo -e " ${CYAN}shipwright fleet${RESET} <command> [options]"
|
|
123
|
+
echo ""
|
|
124
|
+
echo -e "${BOLD}COMMANDS${RESET}"
|
|
125
|
+
echo -e " ${CYAN}start${RESET} Start daemons for all configured repos"
|
|
126
|
+
echo -e " ${CYAN}stop${RESET} Stop all fleet daemons"
|
|
127
|
+
echo -e " ${CYAN}status${RESET} Show fleet dashboard"
|
|
128
|
+
echo -e " ${CYAN}metrics${RESET} [--period N] [--json] Aggregate DORA metrics across repos"
|
|
129
|
+
echo -e " ${CYAN}init${RESET} Generate fleet-config.json"
|
|
130
|
+
echo -e " ${CYAN}help${RESET} Show this help"
|
|
131
|
+
echo ""
|
|
132
|
+
echo -e "${BOLD}OPTIONS${RESET}"
|
|
133
|
+
echo -e " ${CYAN}--config${RESET} <path> Path to fleet-config.json ${DIM}(default: .claude/fleet-config.json)${RESET}"
|
|
134
|
+
echo ""
|
|
135
|
+
echo -e "${BOLD}EXAMPLES${RESET}"
|
|
136
|
+
echo -e " ${DIM}shipwright fleet init${RESET} # Generate config"
|
|
137
|
+
echo -e " ${DIM}shipwright fleet start${RESET} # Start all daemons"
|
|
138
|
+
echo -e " ${DIM}shipwright fleet start --config my-fleet.json${RESET} # Custom config"
|
|
139
|
+
echo -e " ${DIM}shipwright fleet status${RESET} # Fleet dashboard"
|
|
140
|
+
echo -e " ${DIM}shipwright fleet metrics --period 30${RESET} # 30-day aggregate"
|
|
141
|
+
echo -e " ${DIM}shipwright fleet stop${RESET} # Stop everything"
|
|
142
|
+
echo ""
|
|
143
|
+
echo -e "${BOLD}CONFIG FILE${RESET} ${DIM}(.claude/fleet-config.json)${RESET}"
|
|
144
|
+
echo -e ' {
|
|
145
|
+
"repos": [
|
|
146
|
+
{ "path": "/path/to/api", "template": "autonomous", "max_parallel": 2 },
|
|
147
|
+
{ "path": "/path/to/web", "template": "standard" }
|
|
148
|
+
],
|
|
149
|
+
"defaults": {
|
|
150
|
+
"watch_label": "ready-to-build",
|
|
151
|
+
"pipeline_template": "autonomous",
|
|
152
|
+
"max_parallel": 2,
|
|
153
|
+
"model": "opus"
|
|
154
|
+
},
|
|
155
|
+
"shared_events": true
|
|
156
|
+
}'
|
|
157
|
+
echo ""
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
# ─── Config Loading ─────────────────────────────────────────────────────────
|
|
161
|
+
|
|
162
|
+
load_fleet_config() {
|
|
163
|
+
local config_file="${CONFIG_PATH:-.claude/fleet-config.json}"
|
|
164
|
+
|
|
165
|
+
if [[ ! -f "$config_file" ]]; then
|
|
166
|
+
error "Fleet config not found: $config_file"
|
|
167
|
+
info "Run ${CYAN}shipwright fleet init${RESET} to generate one"
|
|
168
|
+
exit 1
|
|
169
|
+
fi
|
|
170
|
+
|
|
171
|
+
info "Loading fleet config: ${DIM}${config_file}${RESET}" >&2
|
|
172
|
+
|
|
173
|
+
# Validate JSON
|
|
174
|
+
if ! jq empty "$config_file" 2>/dev/null; then
|
|
175
|
+
error "Invalid JSON in $config_file"
|
|
176
|
+
exit 1
|
|
177
|
+
fi
|
|
178
|
+
|
|
179
|
+
# Check repos array exists
|
|
180
|
+
local repo_count
|
|
181
|
+
repo_count=$(jq '.repos | length' "$config_file")
|
|
182
|
+
if [[ "$repo_count" -eq 0 ]]; then
|
|
183
|
+
error "No repos configured in $config_file"
|
|
184
|
+
exit 1
|
|
185
|
+
fi
|
|
186
|
+
|
|
187
|
+
echo "$config_file"
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
# ─── Session Name ───────────────────────────────────────────────────────────
|
|
191
|
+
|
|
192
|
+
session_name_for_repo() {
|
|
193
|
+
local repo_path="$1"
|
|
194
|
+
local basename
|
|
195
|
+
basename=$(basename "$repo_path")
|
|
196
|
+
echo "shipwright-fleet-${basename}"
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
# ─── Worker Pool Rebalancer ───────────────────────────────────────────────
|
|
200
|
+
# Runs in background, redistributes MAX_PARALLEL across repos based on demand
|
|
201
|
+
|
|
202
|
+
fleet_rebalance() {
|
|
203
|
+
local config_file="$1"
|
|
204
|
+
local interval
|
|
205
|
+
interval=$(jq -r '.worker_pool.rebalance_interval_seconds // 120' "$config_file")
|
|
206
|
+
local total_workers
|
|
207
|
+
total_workers=$(jq -r '.worker_pool.total_workers // 12' "$config_file")
|
|
208
|
+
|
|
209
|
+
local shutdown_flag="$HOME/.claude-teams/fleet-rebalancer.shutdown"
|
|
210
|
+
rm -f "$shutdown_flag"
|
|
211
|
+
|
|
212
|
+
while true; do
|
|
213
|
+
sleep "$interval"
|
|
214
|
+
|
|
215
|
+
# Check for shutdown signal or missing state
|
|
216
|
+
if [[ -f "$shutdown_flag" ]] || [[ ! -f "$FLEET_STATE" ]]; then
|
|
217
|
+
break
|
|
218
|
+
fi
|
|
219
|
+
|
|
220
|
+
local repo_names
|
|
221
|
+
repo_names=$(jq -r '.repos | keys[]' "$FLEET_STATE" 2>/dev/null || true)
|
|
222
|
+
if [[ -z "$repo_names" ]]; then
|
|
223
|
+
continue
|
|
224
|
+
fi
|
|
225
|
+
|
|
226
|
+
# Collect demand per repo using indexed arrays (bash 3.2 compatible)
|
|
227
|
+
local repo_list=()
|
|
228
|
+
local demand_list=()
|
|
229
|
+
local total_demand=0
|
|
230
|
+
local repo_count=0
|
|
231
|
+
|
|
232
|
+
while IFS= read -r repo_name; do
|
|
233
|
+
local repo_path
|
|
234
|
+
repo_path=$(jq -r --arg r "$repo_name" '.repos[$r].path' "$FLEET_STATE" 2>/dev/null || true)
|
|
235
|
+
[[ -z "$repo_path" || "$repo_path" == "null" ]] && continue
|
|
236
|
+
|
|
237
|
+
# Read daemon state — try repo-local state first
|
|
238
|
+
local active=0 queued=0
|
|
239
|
+
local daemon_state="$repo_path/.claude-teams/daemon-state.json"
|
|
240
|
+
if [[ ! -f "$daemon_state" ]]; then
|
|
241
|
+
# Fall back to shared state, filtered by repo
|
|
242
|
+
daemon_state="$HOME/.claude-teams/daemon-state.json"
|
|
243
|
+
fi
|
|
244
|
+
if [[ -f "$daemon_state" ]]; then
|
|
245
|
+
active=$(jq -r '.active_jobs | length // 0' "$daemon_state" 2>/dev/null || echo 0)
|
|
246
|
+
queued=$(jq -r '.queued | length // 0' "$daemon_state" 2>/dev/null || echo 0)
|
|
247
|
+
# Validate numeric
|
|
248
|
+
[[ ! "$active" =~ ^[0-9]+$ ]] && active=0
|
|
249
|
+
[[ ! "$queued" =~ ^[0-9]+$ ]] && queued=0
|
|
250
|
+
fi
|
|
251
|
+
|
|
252
|
+
local demand=$((active + queued))
|
|
253
|
+
repo_list+=("$repo_name")
|
|
254
|
+
demand_list+=("$demand")
|
|
255
|
+
total_demand=$((total_demand + demand))
|
|
256
|
+
repo_count=$((repo_count + 1))
|
|
257
|
+
done <<< "$repo_names"
|
|
258
|
+
|
|
259
|
+
if [[ "$repo_count" -eq 0 ]]; then
|
|
260
|
+
continue
|
|
261
|
+
fi
|
|
262
|
+
|
|
263
|
+
# Distribute workers proportionally with budget enforcement
|
|
264
|
+
local allocated_total=0
|
|
265
|
+
local alloc_list=()
|
|
266
|
+
|
|
267
|
+
local i
|
|
268
|
+
for i in $(seq 0 $((repo_count - 1))); do
|
|
269
|
+
local new_max
|
|
270
|
+
if [[ "$total_demand" -eq 0 ]]; then
|
|
271
|
+
new_max=$(( total_workers / repo_count ))
|
|
272
|
+
else
|
|
273
|
+
local repo_demand="${demand_list[$i]}"
|
|
274
|
+
new_max=$(awk -v d="$repo_demand" -v td="$total_demand" -v tw="$total_workers" \
|
|
275
|
+
'BEGIN { v = (d / td) * tw; if (v < 1) v = 1; printf "%.0f", v }')
|
|
276
|
+
fi
|
|
277
|
+
[[ "$new_max" -lt 1 ]] && new_max=1
|
|
278
|
+
alloc_list+=("$new_max")
|
|
279
|
+
allocated_total=$((allocated_total + new_max))
|
|
280
|
+
done
|
|
281
|
+
|
|
282
|
+
# Budget correction: if we over-allocated, reduce the largest allocations
|
|
283
|
+
while [[ "$allocated_total" -gt "$total_workers" ]]; do
|
|
284
|
+
local max_idx=0
|
|
285
|
+
local max_val="${alloc_list[0]}"
|
|
286
|
+
for i in $(seq 1 $((repo_count - 1))); do
|
|
287
|
+
if [[ "${alloc_list[$i]}" -gt "$max_val" ]]; then
|
|
288
|
+
max_val="${alloc_list[$i]}"
|
|
289
|
+
max_idx=$i
|
|
290
|
+
fi
|
|
291
|
+
done
|
|
292
|
+
# Don't reduce below 1
|
|
293
|
+
if [[ "${alloc_list[$max_idx]}" -le 1 ]]; then
|
|
294
|
+
break
|
|
295
|
+
fi
|
|
296
|
+
alloc_list[$max_idx]=$(( ${alloc_list[$max_idx]} - 1 ))
|
|
297
|
+
allocated_total=$((allocated_total - 1))
|
|
298
|
+
done
|
|
299
|
+
|
|
300
|
+
# Write updated configs
|
|
301
|
+
local reload_needed=false
|
|
302
|
+
for i in $(seq 0 $((repo_count - 1))); do
|
|
303
|
+
local repo_name="${repo_list[$i]}"
|
|
304
|
+
local new_max="${alloc_list[$i]}"
|
|
305
|
+
local repo_path
|
|
306
|
+
repo_path=$(jq -r --arg r "$repo_name" '.repos[$r].path' "$FLEET_STATE" 2>/dev/null || true)
|
|
307
|
+
[[ -z "$repo_path" || "$repo_path" == "null" ]] && continue
|
|
308
|
+
|
|
309
|
+
local fleet_config="$repo_path/.claude/.fleet-daemon-config.json"
|
|
310
|
+
if [[ -f "$fleet_config" ]]; then
|
|
311
|
+
local tmp_cfg="${fleet_config}.tmp.$$"
|
|
312
|
+
jq --argjson mp "$new_max" '.max_parallel = $mp' "$fleet_config" > "$tmp_cfg" \
|
|
313
|
+
&& mv "$tmp_cfg" "$fleet_config"
|
|
314
|
+
reload_needed=true
|
|
315
|
+
fi
|
|
316
|
+
done
|
|
317
|
+
|
|
318
|
+
# Signal daemons to reload
|
|
319
|
+
if [[ "$reload_needed" == "true" ]]; then
|
|
320
|
+
touch "$HOME/.claude-teams/fleet-reload.flag"
|
|
321
|
+
emit_event "fleet.rebalance" \
|
|
322
|
+
"total_workers=$total_workers" \
|
|
323
|
+
"total_demand=$total_demand" \
|
|
324
|
+
"repo_count=$repo_count" \
|
|
325
|
+
"allocated=$allocated_total"
|
|
326
|
+
fi
|
|
327
|
+
done
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
# ─── Fleet Start ────────────────────────────────────────────────────────────
|
|
331
|
+
|
|
332
|
+
fleet_start() {
|
|
333
|
+
echo -e "${PURPLE}${BOLD}━━━ shipwright fleet v${VERSION} — start ━━━${RESET}"
|
|
334
|
+
echo ""
|
|
335
|
+
|
|
336
|
+
if ! command -v tmux &>/dev/null; then
|
|
337
|
+
error "tmux is required for fleet mode"
|
|
338
|
+
exit 1
|
|
339
|
+
fi
|
|
340
|
+
|
|
341
|
+
if ! command -v jq &>/dev/null; then
|
|
342
|
+
error "jq is required. Install: brew install jq"
|
|
343
|
+
exit 1
|
|
344
|
+
fi
|
|
345
|
+
|
|
346
|
+
local config_file
|
|
347
|
+
config_file=$(load_fleet_config)
|
|
348
|
+
|
|
349
|
+
local repo_count
|
|
350
|
+
repo_count=$(jq '.repos | length' "$config_file")
|
|
351
|
+
|
|
352
|
+
# Read defaults
|
|
353
|
+
local default_label default_template default_max_parallel default_model
|
|
354
|
+
default_label=$(jq -r '.defaults.watch_label // "ready-to-build"' "$config_file")
|
|
355
|
+
default_template=$(jq -r '.defaults.pipeline_template // "autonomous"' "$config_file")
|
|
356
|
+
default_max_parallel=$(jq -r '.defaults.max_parallel // 2' "$config_file")
|
|
357
|
+
default_model=$(jq -r '.defaults.model // "opus"' "$config_file")
|
|
358
|
+
local shared_events
|
|
359
|
+
shared_events=$(jq -r '.shared_events // true' "$config_file")
|
|
360
|
+
|
|
361
|
+
mkdir -p "$FLEET_DIR"
|
|
362
|
+
|
|
363
|
+
# Initialize fleet state
|
|
364
|
+
local fleet_state_tmp="${FLEET_STATE}.tmp.$$"
|
|
365
|
+
echo '{"started_at":"'"$(now_iso)"'","repos":{}}' > "$fleet_state_tmp"
|
|
366
|
+
|
|
367
|
+
local started=0
|
|
368
|
+
local skipped=0
|
|
369
|
+
|
|
370
|
+
for i in $(seq 0 $((repo_count - 1))); do
|
|
371
|
+
local repo_path repo_template repo_max_parallel repo_label repo_model
|
|
372
|
+
repo_path=$(jq -r ".repos[$i].path" "$config_file")
|
|
373
|
+
repo_template=$(jq -r ".repos[$i].template // \"$default_template\"" "$config_file")
|
|
374
|
+
repo_max_parallel=$(jq -r ".repos[$i].max_parallel // $default_max_parallel" "$config_file")
|
|
375
|
+
repo_label=$(jq -r ".repos[$i].watch_label // \"$default_label\"" "$config_file")
|
|
376
|
+
repo_model=$(jq -r ".repos[$i].model // \"$default_model\"" "$config_file")
|
|
377
|
+
|
|
378
|
+
local repo_name
|
|
379
|
+
repo_name=$(basename "$repo_path")
|
|
380
|
+
local session_name
|
|
381
|
+
session_name=$(session_name_for_repo "$repo_path")
|
|
382
|
+
|
|
383
|
+
# Validate repo path
|
|
384
|
+
if [[ ! -d "$repo_path" ]]; then
|
|
385
|
+
warn "Repo not found: $repo_path — skipping"
|
|
386
|
+
skipped=$((skipped + 1))
|
|
387
|
+
continue
|
|
388
|
+
fi
|
|
389
|
+
|
|
390
|
+
if [[ ! -d "$repo_path/.git" ]]; then
|
|
391
|
+
warn "Not a git repo: $repo_path — skipping"
|
|
392
|
+
skipped=$((skipped + 1))
|
|
393
|
+
continue
|
|
394
|
+
fi
|
|
395
|
+
|
|
396
|
+
# Check for existing session
|
|
397
|
+
if tmux has-session -t "$session_name" 2>/dev/null; then
|
|
398
|
+
warn "Session already exists: ${session_name} — skipping"
|
|
399
|
+
skipped=$((skipped + 1))
|
|
400
|
+
continue
|
|
401
|
+
fi
|
|
402
|
+
|
|
403
|
+
# Generate per-repo daemon config with overrides
|
|
404
|
+
local repo_config_dir="$repo_path/.claude"
|
|
405
|
+
mkdir -p "$repo_config_dir"
|
|
406
|
+
local repo_daemon_config="$repo_config_dir/daemon-config.json"
|
|
407
|
+
|
|
408
|
+
# Only generate if fleet is managing the config (don't overwrite user configs)
|
|
409
|
+
local fleet_managed_config="$repo_config_dir/.fleet-daemon-config.json"
|
|
410
|
+
jq -n \
|
|
411
|
+
--arg label "$repo_label" \
|
|
412
|
+
--argjson poll 60 \
|
|
413
|
+
--argjson max_parallel "$repo_max_parallel" \
|
|
414
|
+
--arg template "$repo_template" \
|
|
415
|
+
--arg model "$repo_model" \
|
|
416
|
+
'{
|
|
417
|
+
watch_label: $label,
|
|
418
|
+
poll_interval: $poll,
|
|
419
|
+
max_parallel: $max_parallel,
|
|
420
|
+
pipeline_template: $template,
|
|
421
|
+
model: $model,
|
|
422
|
+
skip_gates: true,
|
|
423
|
+
on_success: { remove_label: $label, add_label: "pipeline/complete" },
|
|
424
|
+
on_failure: { add_label: "pipeline/failed", comment_log_lines: 50 }
|
|
425
|
+
}' > "$fleet_managed_config"
|
|
426
|
+
|
|
427
|
+
# Determine which config the daemon should use
|
|
428
|
+
local daemon_config_flag=""
|
|
429
|
+
if [[ -f "$repo_daemon_config" ]]; then
|
|
430
|
+
# Use existing user config — don't override
|
|
431
|
+
daemon_config_flag="--config $repo_daemon_config"
|
|
432
|
+
else
|
|
433
|
+
daemon_config_flag="--config $fleet_managed_config"
|
|
434
|
+
fi
|
|
435
|
+
|
|
436
|
+
# Spawn daemon in detached tmux session
|
|
437
|
+
tmux new-session -d -s "$session_name" \
|
|
438
|
+
"cd '$repo_path' && '$SCRIPT_DIR/cct-daemon.sh' start $daemon_config_flag"
|
|
439
|
+
|
|
440
|
+
# Record in fleet state
|
|
441
|
+
local tmp2="${fleet_state_tmp}.2"
|
|
442
|
+
jq --arg repo "$repo_name" \
|
|
443
|
+
--arg path "$repo_path" \
|
|
444
|
+
--arg session "$session_name" \
|
|
445
|
+
--arg template "$repo_template" \
|
|
446
|
+
--argjson max_parallel "$repo_max_parallel" \
|
|
447
|
+
--arg started_at "$(now_iso)" \
|
|
448
|
+
'.repos[$repo] = {
|
|
449
|
+
path: $path,
|
|
450
|
+
session: $session,
|
|
451
|
+
template: $template,
|
|
452
|
+
max_parallel: $max_parallel,
|
|
453
|
+
started_at: $started_at
|
|
454
|
+
}' "$fleet_state_tmp" > "$tmp2" && mv "$tmp2" "$fleet_state_tmp"
|
|
455
|
+
|
|
456
|
+
success "Started ${CYAN}${repo_name}${RESET} → tmux session ${DIM}${session_name}${RESET}"
|
|
457
|
+
started=$((started + 1))
|
|
458
|
+
done
|
|
459
|
+
|
|
460
|
+
# Atomic write of fleet state
|
|
461
|
+
mv "$fleet_state_tmp" "$FLEET_STATE"
|
|
462
|
+
|
|
463
|
+
# Start worker pool rebalancer if enabled
|
|
464
|
+
local pool_enabled
|
|
465
|
+
pool_enabled=$(jq -r '.worker_pool.enabled // false' "$config_file")
|
|
466
|
+
if [[ "$pool_enabled" == "true" ]]; then
|
|
467
|
+
local pool_total
|
|
468
|
+
pool_total=$(jq -r '.worker_pool.total_workers // 12' "$config_file")
|
|
469
|
+
fleet_rebalance "$config_file" &
|
|
470
|
+
local rebalancer_pid=$!
|
|
471
|
+
|
|
472
|
+
# Record rebalancer PID in fleet state
|
|
473
|
+
local tmp_rs="${FLEET_STATE}.tmp.$$"
|
|
474
|
+
jq --argjson pid "$rebalancer_pid" '.rebalancer_pid = $pid' "$FLEET_STATE" > "$tmp_rs" \
|
|
475
|
+
&& mv "$tmp_rs" "$FLEET_STATE"
|
|
476
|
+
|
|
477
|
+
success "Worker pool: ${CYAN}${pool_total} total workers${RESET} (rebalancer PID: ${rebalancer_pid})"
|
|
478
|
+
fi
|
|
479
|
+
|
|
480
|
+
echo ""
|
|
481
|
+
echo -e "${PURPLE}${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
|
|
482
|
+
echo -e " Fleet: ${GREEN}${started} started${RESET}"
|
|
483
|
+
[[ "$skipped" -gt 0 ]] && echo -e " ${YELLOW}${skipped} skipped${RESET}"
|
|
484
|
+
echo ""
|
|
485
|
+
echo -e " ${DIM}View dashboard:${RESET} ${CYAN}shipwright fleet status${RESET}"
|
|
486
|
+
echo -e " ${DIM}View metrics:${RESET} ${CYAN}shipwright fleet metrics${RESET}"
|
|
487
|
+
echo -e " ${DIM}Stop all:${RESET} ${CYAN}shipwright fleet stop${RESET}"
|
|
488
|
+
echo ""
|
|
489
|
+
|
|
490
|
+
emit_event "fleet.started" "repos=$started" "skipped=$skipped"
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
# ─── Fleet Stop ─────────────────────────────────────────────────────────────
|
|
494
|
+
|
|
495
|
+
fleet_stop() {
|
|
496
|
+
echo -e "${PURPLE}${BOLD}━━━ shipwright fleet v${VERSION} — stop ━━━${RESET}"
|
|
497
|
+
echo ""
|
|
498
|
+
|
|
499
|
+
if [[ ! -f "$FLEET_STATE" ]]; then
|
|
500
|
+
error "No fleet state found — is the fleet running?"
|
|
501
|
+
info "Start with: ${CYAN}shipwright fleet start${RESET}"
|
|
502
|
+
exit 1
|
|
503
|
+
fi
|
|
504
|
+
|
|
505
|
+
local repo_names
|
|
506
|
+
repo_names=$(jq -r '.repos | keys[]' "$FLEET_STATE" 2>/dev/null || true)
|
|
507
|
+
|
|
508
|
+
if [[ -z "$repo_names" ]]; then
|
|
509
|
+
warn "No repos in fleet state"
|
|
510
|
+
rm -f "$FLEET_STATE"
|
|
511
|
+
return 0
|
|
512
|
+
fi
|
|
513
|
+
|
|
514
|
+
# Signal rebalancer to stop
|
|
515
|
+
touch "$HOME/.claude-teams/fleet-rebalancer.shutdown"
|
|
516
|
+
|
|
517
|
+
# Kill rebalancer if running
|
|
518
|
+
local rebalancer_pid
|
|
519
|
+
rebalancer_pid=$(jq -r '.rebalancer_pid // empty' "$FLEET_STATE" 2>/dev/null || true)
|
|
520
|
+
if [[ -n "$rebalancer_pid" ]]; then
|
|
521
|
+
kill "$rebalancer_pid" 2>/dev/null || true
|
|
522
|
+
wait "$rebalancer_pid" 2>/dev/null || true
|
|
523
|
+
success "Stopped worker pool rebalancer (PID: ${rebalancer_pid})"
|
|
524
|
+
fi
|
|
525
|
+
|
|
526
|
+
# Clean up flags
|
|
527
|
+
rm -f "$HOME/.claude-teams/fleet-reload.flag"
|
|
528
|
+
rm -f "$HOME/.claude-teams/fleet-rebalancer.shutdown"
|
|
529
|
+
|
|
530
|
+
local stopped=0
|
|
531
|
+
while IFS= read -r repo_name; do
|
|
532
|
+
local session_name
|
|
533
|
+
session_name=$(jq -r --arg r "$repo_name" '.repos[$r].session' "$FLEET_STATE")
|
|
534
|
+
local repo_path
|
|
535
|
+
repo_path=$(jq -r --arg r "$repo_name" '.repos[$r].path' "$FLEET_STATE")
|
|
536
|
+
|
|
537
|
+
# Try graceful shutdown via the daemon's shutdown flag
|
|
538
|
+
local daemon_dir="$HOME/.claude-teams"
|
|
539
|
+
local shutdown_flag="$daemon_dir/daemon.shutdown"
|
|
540
|
+
|
|
541
|
+
# Send shutdown signal to the daemon process inside the tmux session
|
|
542
|
+
if tmux has-session -t "$session_name" 2>/dev/null; then
|
|
543
|
+
# Send Ctrl-C to the tmux session for graceful shutdown
|
|
544
|
+
tmux send-keys -t "$session_name" C-c 2>/dev/null || true
|
|
545
|
+
sleep 1
|
|
546
|
+
|
|
547
|
+
# Kill the session if still alive
|
|
548
|
+
if tmux has-session -t "$session_name" 2>/dev/null; then
|
|
549
|
+
tmux kill-session -t "$session_name" 2>/dev/null || true
|
|
550
|
+
fi
|
|
551
|
+
success "Stopped ${CYAN}${repo_name}${RESET}"
|
|
552
|
+
stopped=$((stopped + 1))
|
|
553
|
+
else
|
|
554
|
+
warn "Session not found: ${session_name} — already stopped?"
|
|
555
|
+
fi
|
|
556
|
+
|
|
557
|
+
# Clean up fleet-managed config
|
|
558
|
+
local fleet_managed_config="$repo_path/.claude/.fleet-daemon-config.json"
|
|
559
|
+
rm -f "$fleet_managed_config" 2>/dev/null || true
|
|
560
|
+
|
|
561
|
+
done <<< "$repo_names"
|
|
562
|
+
|
|
563
|
+
rm -f "$FLEET_STATE"
|
|
564
|
+
|
|
565
|
+
echo ""
|
|
566
|
+
echo -e " Fleet: ${GREEN}${stopped} stopped${RESET}"
|
|
567
|
+
echo ""
|
|
568
|
+
|
|
569
|
+
emit_event "fleet.stopped" "repos=$stopped"
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
# ─── Fleet Status ───────────────────────────────────────────────────────────
|
|
573
|
+
|
|
574
|
+
fleet_status() {
|
|
575
|
+
echo ""
|
|
576
|
+
echo -e "${PURPLE}${BOLD}━━━ shipwright fleet v${VERSION} — dashboard ━━━${RESET}"
|
|
577
|
+
echo -e " ${DIM}$(now_iso)${RESET}"
|
|
578
|
+
echo ""
|
|
579
|
+
|
|
580
|
+
if [[ ! -f "$FLEET_STATE" ]]; then
|
|
581
|
+
warn "No fleet running"
|
|
582
|
+
info "Start with: ${CYAN}shipwright fleet start${RESET}"
|
|
583
|
+
return 0
|
|
584
|
+
fi
|
|
585
|
+
|
|
586
|
+
local repo_names
|
|
587
|
+
repo_names=$(jq -r '.repos | keys[]' "$FLEET_STATE" 2>/dev/null || true)
|
|
588
|
+
|
|
589
|
+
if [[ -z "$repo_names" ]]; then
|
|
590
|
+
warn "Fleet state is empty"
|
|
591
|
+
return 0
|
|
592
|
+
fi
|
|
593
|
+
|
|
594
|
+
# Show worker pool info if enabled
|
|
595
|
+
local pool_enabled="false"
|
|
596
|
+
local config_file_path="${CONFIG_PATH:-.claude/fleet-config.json}"
|
|
597
|
+
if [[ -f "$config_file_path" ]]; then
|
|
598
|
+
pool_enabled=$(jq -r '.worker_pool.enabled // false' "$config_file_path" 2>/dev/null || echo "false")
|
|
599
|
+
fi
|
|
600
|
+
|
|
601
|
+
if [[ "$pool_enabled" == "true" ]]; then
|
|
602
|
+
local pool_total rebalancer_pid
|
|
603
|
+
pool_total=$(jq -r '.worker_pool.total_workers // 12' "$config_file_path" 2>/dev/null || echo "12")
|
|
604
|
+
rebalancer_pid=$(jq -r '.rebalancer_pid // "N/A"' "$FLEET_STATE" 2>/dev/null || echo "N/A")
|
|
605
|
+
echo -e " ${BOLD}Worker Pool:${RESET} ${CYAN}${pool_total} total workers${RESET} ${DIM}rebalancer PID: ${rebalancer_pid}${RESET}"
|
|
606
|
+
echo ""
|
|
607
|
+
fi
|
|
608
|
+
|
|
609
|
+
# Header
|
|
610
|
+
printf " ${BOLD}%-20s %-10s %-10s %-10s %-10s %-20s${RESET}\n" \
|
|
611
|
+
"REPO" "STATUS" "ACTIVE" "QUEUED" "DONE" "LAST POLL"
|
|
612
|
+
echo -e " ${DIM}────────────────────────────────────────────────────────────────────────────────${RESET}"
|
|
613
|
+
|
|
614
|
+
while IFS= read -r repo_name; do
|
|
615
|
+
local session_name repo_path
|
|
616
|
+
session_name=$(jq -r --arg r "$repo_name" '.repos[$r].session' "$FLEET_STATE")
|
|
617
|
+
repo_path=$(jq -r --arg r "$repo_name" '.repos[$r].path' "$FLEET_STATE")
|
|
618
|
+
|
|
619
|
+
# Check tmux session
|
|
620
|
+
local status_icon status_text
|
|
621
|
+
if tmux has-session -t "$session_name" 2>/dev/null; then
|
|
622
|
+
status_icon="${GREEN}●${RESET}"
|
|
623
|
+
status_text="running"
|
|
624
|
+
else
|
|
625
|
+
status_icon="${RED}●${RESET}"
|
|
626
|
+
status_text="stopped"
|
|
627
|
+
fi
|
|
628
|
+
|
|
629
|
+
# Try to read daemon state from the repo's daemon state file
|
|
630
|
+
local active="-" queued="-" done="-" last_poll="-"
|
|
631
|
+
local daemon_state="$HOME/.claude-teams/daemon-state.json"
|
|
632
|
+
if [[ -f "$daemon_state" ]]; then
|
|
633
|
+
active=$(jq -r '.active_jobs // 0' "$daemon_state" 2>/dev/null || echo "-")
|
|
634
|
+
queued=$(jq -r '.queued // 0' "$daemon_state" 2>/dev/null || echo "-")
|
|
635
|
+
done=$(jq -r '.completed // 0' "$daemon_state" 2>/dev/null || echo "-")
|
|
636
|
+
last_poll=$(jq -r '.last_poll // "-"' "$daemon_state" 2>/dev/null || echo "-")
|
|
637
|
+
# Shorten timestamp
|
|
638
|
+
if [[ "$last_poll" != "-" && "$last_poll" != "null" ]]; then
|
|
639
|
+
last_poll="${last_poll:11:8}"
|
|
640
|
+
else
|
|
641
|
+
last_poll="-"
|
|
642
|
+
fi
|
|
643
|
+
fi
|
|
644
|
+
|
|
645
|
+
printf " ${status_icon} %-19s %-10s %-10s %-10s %-10s %-20s\n" \
|
|
646
|
+
"$repo_name" "$status_text" "$active" "$queued" "$done" "$last_poll"
|
|
647
|
+
|
|
648
|
+
done <<< "$repo_names"
|
|
649
|
+
|
|
650
|
+
echo ""
|
|
651
|
+
|
|
652
|
+
# Summary
|
|
653
|
+
local total running=0
|
|
654
|
+
total=$(echo "$repo_names" | wc -l | tr -d ' ')
|
|
655
|
+
while IFS= read -r repo_name; do
|
|
656
|
+
local session_name
|
|
657
|
+
session_name=$(jq -r --arg r "$repo_name" '.repos[$r].session' "$FLEET_STATE")
|
|
658
|
+
if tmux has-session -t "$session_name" 2>/dev/null; then
|
|
659
|
+
running=$((running + 1))
|
|
660
|
+
fi
|
|
661
|
+
done <<< "$repo_names"
|
|
662
|
+
|
|
663
|
+
echo -e " ${BOLD}Total:${RESET} ${total} repos ${GREEN}${running} running${RESET} ${DIM}$((total - running)) stopped${RESET}"
|
|
664
|
+
echo ""
|
|
665
|
+
echo -e "${PURPLE}${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
|
|
666
|
+
echo ""
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
# ─── Fleet Metrics ──────────────────────────────────────────────────────────
|
|
670
|
+
|
|
671
|
+
fleet_metrics() {
|
|
672
|
+
local period_days="$METRICS_PERIOD"
|
|
673
|
+
local json_output="$JSON_OUTPUT"
|
|
674
|
+
|
|
675
|
+
if [[ ! -f "$EVENTS_FILE" ]]; then
|
|
676
|
+
error "No events file found at $EVENTS_FILE"
|
|
677
|
+
info "Events are generated when running ${CYAN}shipwright daemon${RESET} or ${CYAN}shipwright pipeline${RESET}"
|
|
678
|
+
exit 1
|
|
679
|
+
fi
|
|
680
|
+
|
|
681
|
+
if ! command -v jq &>/dev/null; then
|
|
682
|
+
error "jq is required. Install: brew install jq"
|
|
683
|
+
exit 1
|
|
684
|
+
fi
|
|
685
|
+
|
|
686
|
+
local cutoff_epoch
|
|
687
|
+
cutoff_epoch=$(( $(now_epoch) - (period_days * 86400) ))
|
|
688
|
+
|
|
689
|
+
# Filter events within period
|
|
690
|
+
local period_events
|
|
691
|
+
period_events=$(jq -c "select(.ts_epoch >= $cutoff_epoch)" "$EVENTS_FILE" 2>/dev/null)
|
|
692
|
+
|
|
693
|
+
if [[ -z "$period_events" ]]; then
|
|
694
|
+
warn "No events in the last ${period_days} day(s)"
|
|
695
|
+
return 0
|
|
696
|
+
fi
|
|
697
|
+
|
|
698
|
+
# Get unique repos from events (fall back to "default" if no repo field)
|
|
699
|
+
local repos
|
|
700
|
+
repos=$(echo "$period_events" | jq -r '.repo // "default"' | sort -u)
|
|
701
|
+
|
|
702
|
+
if [[ "$json_output" == "true" ]]; then
|
|
703
|
+
# JSON output: per-repo metrics
|
|
704
|
+
local json_result='{"period":"'"${period_days}d"'","repos":{}}'
|
|
705
|
+
|
|
706
|
+
while IFS= read -r repo; do
|
|
707
|
+
local repo_events
|
|
708
|
+
if [[ "$repo" == "default" ]]; then
|
|
709
|
+
repo_events=$(echo "$period_events" | jq -c 'select(.repo == null or .repo == "default")')
|
|
710
|
+
else
|
|
711
|
+
repo_events=$(echo "$period_events" | jq -c --arg r "$repo" 'select(.repo == $r)')
|
|
712
|
+
fi
|
|
713
|
+
|
|
714
|
+
[[ -z "$repo_events" ]] && continue
|
|
715
|
+
|
|
716
|
+
local completed successes failures
|
|
717
|
+
completed=$(echo "$repo_events" | jq -s '[.[] | select(.type == "pipeline.completed")] | length')
|
|
718
|
+
successes=$(echo "$repo_events" | jq -s '[.[] | select(.type == "pipeline.completed" and .result == "success")] | length')
|
|
719
|
+
failures=$(echo "$repo_events" | jq -s '[.[] | select(.type == "pipeline.completed" and .result == "failure")] | length')
|
|
720
|
+
|
|
721
|
+
local deploy_freq="0"
|
|
722
|
+
[[ "$period_days" -gt 0 ]] && deploy_freq=$(echo "$successes $period_days" | awk '{printf "%.1f", $1 / ($2 / 7)}')
|
|
723
|
+
|
|
724
|
+
local cfr="0"
|
|
725
|
+
[[ "$completed" -gt 0 ]] && cfr=$(echo "$failures $completed" | awk '{printf "%.1f", ($1 / $2) * 100}')
|
|
726
|
+
|
|
727
|
+
json_result=$(echo "$json_result" | jq \
|
|
728
|
+
--arg repo "$repo" \
|
|
729
|
+
--argjson completed "$completed" \
|
|
730
|
+
--argjson successes "$successes" \
|
|
731
|
+
--argjson failures "$failures" \
|
|
732
|
+
--argjson deploy_freq "${deploy_freq}" \
|
|
733
|
+
--arg cfr "$cfr" \
|
|
734
|
+
'.repos[$repo] = {
|
|
735
|
+
completed: $completed,
|
|
736
|
+
successes: $successes,
|
|
737
|
+
failures: $failures,
|
|
738
|
+
deploy_freq_per_week: $deploy_freq,
|
|
739
|
+
change_failure_rate_pct: ($cfr | tonumber)
|
|
740
|
+
}')
|
|
741
|
+
done <<< "$repos"
|
|
742
|
+
|
|
743
|
+
# Aggregate totals
|
|
744
|
+
local total_completed total_successes total_failures
|
|
745
|
+
total_completed=$(echo "$period_events" | jq -s '[.[] | select(.type == "pipeline.completed")] | length')
|
|
746
|
+
total_successes=$(echo "$period_events" | jq -s '[.[] | select(.type == "pipeline.completed" and .result == "success")] | length')
|
|
747
|
+
total_failures=$(echo "$period_events" | jq -s '[.[] | select(.type == "pipeline.completed" and .result == "failure")] | length')
|
|
748
|
+
|
|
749
|
+
json_result=$(echo "$json_result" | jq \
|
|
750
|
+
--argjson total "$total_completed" \
|
|
751
|
+
--argjson successes "$total_successes" \
|
|
752
|
+
--argjson failures "$total_failures" \
|
|
753
|
+
'.aggregate = { completed: $total, successes: $successes, failures: $failures }')
|
|
754
|
+
|
|
755
|
+
echo "$json_result" | jq .
|
|
756
|
+
return 0
|
|
757
|
+
fi
|
|
758
|
+
|
|
759
|
+
# Dashboard output
|
|
760
|
+
echo ""
|
|
761
|
+
echo -e "${PURPLE}${BOLD}━━━ Fleet Metrics ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
|
|
762
|
+
echo -e " Period: last ${period_days} day(s) ${DIM}$(now_iso)${RESET}"
|
|
763
|
+
echo ""
|
|
764
|
+
|
|
765
|
+
# Per-repo breakdown
|
|
766
|
+
echo -e "${BOLD} PER-REPO BREAKDOWN${RESET}"
|
|
767
|
+
printf " %-20s %8s %8s %8s %12s %8s\n" "REPO" "DONE" "PASS" "FAIL" "FREQ/wk" "CFR"
|
|
768
|
+
echo -e " ${DIM}──────────────────────────────────────────────────────────────────────${RESET}"
|
|
769
|
+
|
|
770
|
+
local grand_completed=0 grand_successes=0 grand_failures=0
|
|
771
|
+
|
|
772
|
+
while IFS= read -r repo; do
|
|
773
|
+
local repo_events
|
|
774
|
+
if [[ "$repo" == "default" ]]; then
|
|
775
|
+
repo_events=$(echo "$period_events" | jq -c 'select(.repo == null or .repo == "default")')
|
|
776
|
+
else
|
|
777
|
+
repo_events=$(echo "$period_events" | jq -c --arg r "$repo" 'select(.repo == $r)')
|
|
778
|
+
fi
|
|
779
|
+
|
|
780
|
+
[[ -z "$repo_events" ]] && continue
|
|
781
|
+
|
|
782
|
+
local completed successes failures
|
|
783
|
+
completed=$(echo "$repo_events" | jq -s '[.[] | select(.type == "pipeline.completed")] | length')
|
|
784
|
+
successes=$(echo "$repo_events" | jq -s '[.[] | select(.type == "pipeline.completed" and .result == "success")] | length')
|
|
785
|
+
failures=$(echo "$repo_events" | jq -s '[.[] | select(.type == "pipeline.completed" and .result == "failure")] | length')
|
|
786
|
+
|
|
787
|
+
local deploy_freq="0"
|
|
788
|
+
[[ "$period_days" -gt 0 ]] && deploy_freq=$(echo "$successes $period_days" | awk '{printf "%.1f", $1 / ($2 / 7)}')
|
|
789
|
+
|
|
790
|
+
local cfr="0"
|
|
791
|
+
[[ "$completed" -gt 0 ]] && cfr=$(echo "$failures $completed" | awk '{printf "%.1f", ($1 / $2) * 100}')
|
|
792
|
+
|
|
793
|
+
printf " %-20s %8s %8s %8s %12s %7s%%\n" \
|
|
794
|
+
"$repo" "$completed" "${successes}" "${failures}" "$deploy_freq" "$cfr"
|
|
795
|
+
|
|
796
|
+
grand_completed=$((grand_completed + completed))
|
|
797
|
+
grand_successes=$((grand_successes + successes))
|
|
798
|
+
grand_failures=$((grand_failures + failures))
|
|
799
|
+
done <<< "$repos"
|
|
800
|
+
|
|
801
|
+
echo -e " ${DIM}──────────────────────────────────────────────────────────────────────${RESET}"
|
|
802
|
+
|
|
803
|
+
local grand_freq="0"
|
|
804
|
+
[[ "$period_days" -gt 0 ]] && grand_freq=$(echo "$grand_successes $period_days" | awk '{printf "%.1f", $1 / ($2 / 7)}')
|
|
805
|
+
local grand_cfr="0"
|
|
806
|
+
[[ "$grand_completed" -gt 0 ]] && grand_cfr=$(echo "$grand_failures $grand_completed" | awk '{printf "%.1f", ($1 / $2) * 100}')
|
|
807
|
+
|
|
808
|
+
printf " ${BOLD}%-20s %8s %8s %8s %12s %7s%%${RESET}\n" \
|
|
809
|
+
"TOTAL" "$grand_completed" "$grand_successes" "$grand_failures" "$grand_freq" "$grand_cfr"
|
|
810
|
+
echo ""
|
|
811
|
+
|
|
812
|
+
echo -e "${PURPLE}${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
|
|
813
|
+
echo ""
|
|
814
|
+
}
|
|
815
|
+
|
|
816
|
+
# ─── Fleet Init ─────────────────────────────────────────────────────────────
|
|
817
|
+
|
|
818
|
+
fleet_init() {
|
|
819
|
+
local config_dir=".claude"
|
|
820
|
+
local config_file="${config_dir}/fleet-config.json"
|
|
821
|
+
|
|
822
|
+
if [[ -f "$config_file" ]]; then
|
|
823
|
+
warn "Config file already exists: $config_file"
|
|
824
|
+
info "Delete it first if you want to regenerate"
|
|
825
|
+
return 0
|
|
826
|
+
fi
|
|
827
|
+
|
|
828
|
+
mkdir -p "$config_dir"
|
|
829
|
+
|
|
830
|
+
# Scan for sibling git repos
|
|
831
|
+
local parent_dir
|
|
832
|
+
parent_dir=$(dirname "$(pwd)")
|
|
833
|
+
local detected_repos=()
|
|
834
|
+
|
|
835
|
+
while IFS= read -r dir; do
|
|
836
|
+
[[ -d "$dir/.git" ]] && detected_repos+=("$dir")
|
|
837
|
+
done < <(find "$parent_dir" -maxdepth 1 -type d ! -name ".*" 2>/dev/null | sort)
|
|
838
|
+
|
|
839
|
+
# Build repos array JSON
|
|
840
|
+
local repos_json="[]"
|
|
841
|
+
for repo in "${detected_repos[@]}"; do
|
|
842
|
+
repos_json=$(echo "$repos_json" | jq --arg path "$repo" '. + [{"path": $path}]')
|
|
843
|
+
done
|
|
844
|
+
|
|
845
|
+
jq -n --argjson repos "$repos_json" '{
|
|
846
|
+
repos: $repos,
|
|
847
|
+
defaults: {
|
|
848
|
+
watch_label: "ready-to-build",
|
|
849
|
+
pipeline_template: "autonomous",
|
|
850
|
+
max_parallel: 2,
|
|
851
|
+
model: "opus"
|
|
852
|
+
},
|
|
853
|
+
shared_events: true,
|
|
854
|
+
worker_pool: {
|
|
855
|
+
enabled: false,
|
|
856
|
+
total_workers: 12,
|
|
857
|
+
rebalance_interval_seconds: 120
|
|
858
|
+
}
|
|
859
|
+
}' > "$config_file"
|
|
860
|
+
|
|
861
|
+
success "Generated fleet config: ${config_file}"
|
|
862
|
+
echo ""
|
|
863
|
+
echo -e " Detected ${CYAN}${#detected_repos[@]}${RESET} repo(s) in parent directory"
|
|
864
|
+
echo ""
|
|
865
|
+
|
|
866
|
+
if [[ "${#detected_repos[@]}" -gt 0 ]]; then
|
|
867
|
+
for repo in "${detected_repos[@]}"; do
|
|
868
|
+
echo -e " ${DIM}•${RESET} $(basename "$repo") ${DIM}$repo${RESET}"
|
|
869
|
+
done
|
|
870
|
+
echo ""
|
|
871
|
+
fi
|
|
872
|
+
|
|
873
|
+
echo -e "${DIM}Edit the config to add/remove repos and set overrides, then run:${RESET}"
|
|
874
|
+
echo -e " ${CYAN}shipwright fleet start${RESET}"
|
|
875
|
+
}
|
|
876
|
+
|
|
877
|
+
# ─── Command Router ─────────────────────────────────────────────────────────
|
|
878
|
+
|
|
879
|
+
case "$SUBCOMMAND" in
|
|
880
|
+
start)
|
|
881
|
+
fleet_start
|
|
882
|
+
;;
|
|
883
|
+
stop)
|
|
884
|
+
fleet_stop
|
|
885
|
+
;;
|
|
886
|
+
status)
|
|
887
|
+
fleet_status
|
|
888
|
+
;;
|
|
889
|
+
metrics)
|
|
890
|
+
fleet_metrics
|
|
891
|
+
;;
|
|
892
|
+
init)
|
|
893
|
+
fleet_init
|
|
894
|
+
;;
|
|
895
|
+
help|--help|-h)
|
|
896
|
+
show_help
|
|
897
|
+
;;
|
|
898
|
+
*)
|
|
899
|
+
error "Unknown command: ${SUBCOMMAND}"
|
|
900
|
+
echo ""
|
|
901
|
+
show_help
|
|
902
|
+
exit 1
|
|
903
|
+
;;
|
|
904
|
+
esac
|