shipwright-cli 1.7.1 → 1.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/code-reviewer.md +90 -0
- package/.claude/agents/devops-engineer.md +142 -0
- package/.claude/agents/pipeline-agent.md +80 -0
- package/.claude/agents/shell-script-specialist.md +150 -0
- package/.claude/agents/test-specialist.md +196 -0
- package/.claude/hooks/post-tool-use.sh +45 -0
- package/.claude/hooks/pre-tool-use.sh +25 -0
- package/.claude/hooks/session-started.sh +37 -0
- package/README.md +212 -814
- package/claude-code/CLAUDE.md.shipwright +54 -0
- package/claude-code/hooks/notify-idle.sh +2 -2
- package/claude-code/hooks/session-start.sh +24 -0
- package/claude-code/hooks/task-completed.sh +6 -2
- package/claude-code/settings.json.template +12 -0
- package/dashboard/public/app.js +4422 -0
- package/dashboard/public/index.html +816 -0
- package/dashboard/public/styles.css +4755 -0
- package/dashboard/server.ts +4315 -0
- package/docs/KNOWN-ISSUES.md +18 -10
- package/docs/TIPS.md +38 -26
- package/docs/patterns/README.md +33 -23
- package/package.json +9 -5
- package/scripts/adapters/iterm2-adapter.sh +1 -1
- package/scripts/adapters/tmux-adapter.sh +52 -23
- package/scripts/adapters/wezterm-adapter.sh +26 -14
- package/scripts/lib/compat.sh +200 -0
- package/scripts/lib/helpers.sh +72 -0
- package/scripts/postinstall.mjs +72 -13
- package/scripts/{cct → sw} +118 -22
- package/scripts/sw-adversarial.sh +274 -0
- package/scripts/sw-architecture-enforcer.sh +330 -0
- package/scripts/sw-checkpoint.sh +468 -0
- package/scripts/sw-cleanup.sh +359 -0
- package/scripts/sw-connect.sh +619 -0
- package/scripts/{cct-cost.sh → sw-cost.sh} +368 -34
- package/scripts/sw-daemon.sh +5574 -0
- package/scripts/sw-dashboard.sh +477 -0
- package/scripts/sw-developer-simulation.sh +252 -0
- package/scripts/sw-docs.sh +635 -0
- package/scripts/sw-doctor.sh +907 -0
- package/scripts/{cct-fix.sh → sw-fix.sh} +10 -6
- package/scripts/{cct-fleet.sh → sw-fleet.sh} +498 -22
- package/scripts/sw-github-checks.sh +521 -0
- package/scripts/sw-github-deploy.sh +533 -0
- package/scripts/sw-github-graphql.sh +972 -0
- package/scripts/sw-heartbeat.sh +293 -0
- package/scripts/{cct-init.sh → sw-init.sh} +144 -11
- package/scripts/sw-intelligence.sh +1196 -0
- package/scripts/sw-jira.sh +643 -0
- package/scripts/sw-launchd.sh +364 -0
- package/scripts/sw-linear.sh +648 -0
- package/scripts/{cct-logs.sh → sw-logs.sh} +72 -2
- package/scripts/sw-loop.sh +2217 -0
- package/scripts/{cct-memory.sh → sw-memory.sh} +514 -36
- package/scripts/sw-patrol-meta.sh +417 -0
- package/scripts/sw-pipeline-composer.sh +455 -0
- package/scripts/sw-pipeline-vitals.sh +1096 -0
- package/scripts/sw-pipeline.sh +7593 -0
- package/scripts/sw-predictive.sh +820 -0
- package/scripts/{cct-prep.sh → sw-prep.sh} +339 -49
- package/scripts/{cct-ps.sh → sw-ps.sh} +9 -6
- package/scripts/{cct-reaper.sh → sw-reaper.sh} +10 -6
- package/scripts/sw-remote.sh +687 -0
- package/scripts/sw-self-optimize.sh +1048 -0
- package/scripts/sw-session.sh +541 -0
- package/scripts/sw-setup.sh +234 -0
- package/scripts/sw-status.sh +796 -0
- package/scripts/{cct-templates.sh → sw-templates.sh} +9 -4
- package/scripts/sw-tmux.sh +591 -0
- package/scripts/sw-tracker-jira.sh +277 -0
- package/scripts/sw-tracker-linear.sh +292 -0
- package/scripts/sw-tracker.sh +409 -0
- package/scripts/{cct-upgrade.sh → sw-upgrade.sh} +103 -46
- package/scripts/{cct-worktree.sh → sw-worktree.sh} +3 -0
- package/templates/pipelines/autonomous.json +35 -6
- package/templates/pipelines/cost-aware.json +21 -0
- package/templates/pipelines/deployed.json +40 -6
- package/templates/pipelines/enterprise.json +16 -2
- package/templates/pipelines/fast.json +19 -0
- package/templates/pipelines/full.json +28 -2
- package/templates/pipelines/hotfix.json +19 -0
- package/templates/pipelines/standard.json +31 -0
- package/tmux/{claude-teams-overlay.conf → shipwright-overlay.conf} +27 -9
- package/tmux/templates/accessibility.json +34 -0
- package/tmux/templates/api-design.json +35 -0
- package/tmux/templates/architecture.json +1 -0
- package/tmux/templates/bug-fix.json +9 -0
- package/tmux/templates/code-review.json +1 -0
- package/tmux/templates/compliance.json +36 -0
- package/tmux/templates/data-pipeline.json +36 -0
- package/tmux/templates/debt-paydown.json +34 -0
- package/tmux/templates/devops.json +1 -0
- package/tmux/templates/documentation.json +1 -0
- package/tmux/templates/exploration.json +1 -0
- package/tmux/templates/feature-dev.json +1 -0
- package/tmux/templates/full-stack.json +8 -0
- package/tmux/templates/i18n.json +34 -0
- package/tmux/templates/incident-response.json +36 -0
- package/tmux/templates/migration.json +1 -0
- package/tmux/templates/observability.json +35 -0
- package/tmux/templates/onboarding.json +33 -0
- package/tmux/templates/performance.json +35 -0
- package/tmux/templates/refactor.json +1 -0
- package/tmux/templates/release.json +35 -0
- package/tmux/templates/security-audit.json +8 -0
- package/tmux/templates/spike.json +34 -0
- package/tmux/templates/testing.json +1 -0
- package/tmux/tmux.conf +98 -9
- package/scripts/cct-cleanup.sh +0 -172
- package/scripts/cct-daemon.sh +0 -3189
- package/scripts/cct-doctor.sh +0 -414
- package/scripts/cct-loop.sh +0 -1332
- package/scripts/cct-pipeline.sh +0 -3844
- package/scripts/cct-session.sh +0 -284
- package/scripts/cct-status.sh +0 -169
|
@@ -0,0 +1,796 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# ╔═══════════════════════════════════════════════════════════════════════════╗
|
|
3
|
+
# ║ sw-status.sh — Dashboard showing Claude Code team status ║
|
|
4
|
+
# ║ ║
|
|
5
|
+
# ║ Shows running teams, agent windows, and task progress. ║
|
|
6
|
+
# ╚═══════════════════════════════════════════════════════════════════════════╝
|
|
7
|
+
VERSION="1.10.0"
|
|
8
|
+
set -euo pipefail
|
|
9
|
+
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
10
|
+
|
|
11
|
+
# ─── Colors ──────────────────────────────────────────────────────────────────
|
|
12
|
+
CYAN='\033[38;2;0;212;255m'
|
|
13
|
+
PURPLE='\033[38;2;124;58;237m'
|
|
14
|
+
BLUE='\033[38;2;0;102;255m'
|
|
15
|
+
GREEN='\033[38;2;74;222;128m'
|
|
16
|
+
YELLOW='\033[38;2;250;204;21m'
|
|
17
|
+
RED='\033[38;2;248;113;113m'
|
|
18
|
+
DIM='\033[2m'
|
|
19
|
+
BOLD='\033[1m'
|
|
20
|
+
RESET='\033[0m'
|
|
21
|
+
|
|
22
|
+
# ─── Cross-platform compatibility ──────────────────────────────────────────
|
|
23
|
+
_COMPAT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/lib/compat.sh"
|
|
24
|
+
# shellcheck source=lib/compat.sh
|
|
25
|
+
[[ -f "$_COMPAT" ]] && source "$_COMPAT"
|
|
26
|
+
|
|
27
|
+
# ─── Argument Parsing ─────────────────────────────────────────────────────────
|
|
28
|
+
JSON_OUTPUT="false"
|
|
29
|
+
while [[ $# -gt 0 ]]; do
|
|
30
|
+
case "$1" in
|
|
31
|
+
--json) JSON_OUTPUT="true"; shift ;;
|
|
32
|
+
--help|-h)
|
|
33
|
+
echo "Usage: shipwright status [--json]"
|
|
34
|
+
echo ""
|
|
35
|
+
echo "Options:"
|
|
36
|
+
echo " --json Output structured JSON instead of formatted text"
|
|
37
|
+
echo " --help Show this help message"
|
|
38
|
+
exit 0
|
|
39
|
+
;;
|
|
40
|
+
*) echo "Unknown option: $1" >&2; exit 1 ;;
|
|
41
|
+
esac
|
|
42
|
+
done
|
|
43
|
+
|
|
44
|
+
# ─── JSON Output Mode ─────────────────────────────────────────────────────────
|
|
45
|
+
if [[ "$JSON_OUTPUT" == "true" ]]; then
|
|
46
|
+
if ! command -v jq &>/dev/null; then
|
|
47
|
+
echo "Error: jq is required for --json output" >&2
|
|
48
|
+
exit 1
|
|
49
|
+
fi
|
|
50
|
+
|
|
51
|
+
# -- tmux windows --
|
|
52
|
+
WINDOWS_JSON="[]"
|
|
53
|
+
if command -v tmux &>/dev/null; then
|
|
54
|
+
WINDOWS_JSON=$(tmux list-windows -a -F '#{session_name}:#{window_index}|#{window_name}|#{window_panes}|#{window_active}' 2>/dev/null | \
|
|
55
|
+
while IFS='|' read -r sw wn pc act; do
|
|
56
|
+
is_claude="false"
|
|
57
|
+
echo "$wn" | grep -qi "claude" && is_claude="true"
|
|
58
|
+
is_active="false"
|
|
59
|
+
[[ "$act" == "1" ]] && is_active="true"
|
|
60
|
+
printf '%s\n' "{\"session_window\":\"$sw\",\"name\":\"$wn\",\"panes\":$pc,\"active\":$is_active,\"claude\":$is_claude}"
|
|
61
|
+
done | jq -s '.' 2>/dev/null) || WINDOWS_JSON="[]"
|
|
62
|
+
fi
|
|
63
|
+
|
|
64
|
+
# -- team configs --
|
|
65
|
+
TEAMS_JSON="[]"
|
|
66
|
+
_teams_dir="${HOME}/.claude/teams"
|
|
67
|
+
if [[ -d "$_teams_dir" ]]; then
|
|
68
|
+
TEAMS_JSON=$(find "$_teams_dir" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | sort | \
|
|
69
|
+
while IFS= read -r td; do
|
|
70
|
+
[[ -z "$td" ]] && continue
|
|
71
|
+
tn="$(basename "$td")"
|
|
72
|
+
cf="${td}/config.json"
|
|
73
|
+
if [[ -f "$cf" ]]; then
|
|
74
|
+
mc=$(jq '.members | length' "$cf" 2>/dev/null || echo 0)
|
|
75
|
+
printf '%s\n' "{\"name\":\"$tn\",\"members\":$mc,\"has_config\":true}"
|
|
76
|
+
else
|
|
77
|
+
printf '%s\n' "{\"name\":\"$tn\",\"members\":0,\"has_config\":false}"
|
|
78
|
+
fi
|
|
79
|
+
done | jq -s '.' 2>/dev/null) || TEAMS_JSON="[]"
|
|
80
|
+
fi
|
|
81
|
+
|
|
82
|
+
# -- task lists --
|
|
83
|
+
TASKS_JSON="[]"
|
|
84
|
+
_tasks_dir="${HOME}/.claude/tasks"
|
|
85
|
+
if [[ -d "$_tasks_dir" ]]; then
|
|
86
|
+
_tasks_tmp=""
|
|
87
|
+
while IFS= read -r td; do
|
|
88
|
+
[[ -z "$td" ]] && continue
|
|
89
|
+
tn="$(basename "$td")"
|
|
90
|
+
_total=0; _completed=0; _in_progress=0; _pending=0
|
|
91
|
+
while IFS= read -r tf; do
|
|
92
|
+
[[ -z "$tf" ]] && continue
|
|
93
|
+
_total=$((_total + 1))
|
|
94
|
+
_st=$(jq -r '.status // "unknown"' "$tf" 2>/dev/null || echo "unknown")
|
|
95
|
+
case "$_st" in
|
|
96
|
+
completed) _completed=$((_completed + 1)) ;;
|
|
97
|
+
in_progress) _in_progress=$((_in_progress + 1)) ;;
|
|
98
|
+
pending) _pending=$((_pending + 1)) ;;
|
|
99
|
+
esac
|
|
100
|
+
done < <(find "$td" -type f -name '*.json' 2>/dev/null)
|
|
101
|
+
_tasks_tmp="${_tasks_tmp}{\"team\":\"$tn\",\"total\":$_total,\"completed\":$_completed,\"in_progress\":$_in_progress,\"pending\":$_pending}
|
|
102
|
+
"
|
|
103
|
+
done < <(find "$_tasks_dir" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | sort)
|
|
104
|
+
if [[ -n "$_tasks_tmp" ]]; then
|
|
105
|
+
TASKS_JSON=$(printf '%s' "$_tasks_tmp" | jq -s '.' 2>/dev/null) || TASKS_JSON="[]"
|
|
106
|
+
fi
|
|
107
|
+
fi
|
|
108
|
+
|
|
109
|
+
# -- daemon --
|
|
110
|
+
DAEMON_JSON="null"
|
|
111
|
+
_state_file="${HOME}/.shipwright/daemon-state.json"
|
|
112
|
+
_pid_file="${HOME}/.shipwright/daemon.pid"
|
|
113
|
+
if [[ -f "$_state_file" ]]; then
|
|
114
|
+
_d_running="false"
|
|
115
|
+
_d_pid="null"
|
|
116
|
+
if [[ -f "$_pid_file" ]]; then
|
|
117
|
+
_d_pid_val=$(cat "$_pid_file" 2>/dev/null || true)
|
|
118
|
+
if [[ -n "$_d_pid_val" ]] && kill -0 "$_d_pid_val" 2>/dev/null; then
|
|
119
|
+
_d_running="true"
|
|
120
|
+
_d_pid="$_d_pid_val"
|
|
121
|
+
fi
|
|
122
|
+
fi
|
|
123
|
+
_active=$(jq -c '.active_jobs // []' "$_state_file" 2>/dev/null || echo "[]")
|
|
124
|
+
_queued=$(jq -c '.queued // []' "$_state_file" 2>/dev/null || echo "[]")
|
|
125
|
+
_completed=$(jq -c '[.completed // [] | reverse | .[:20][]]' "$_state_file" 2>/dev/null || echo "[]")
|
|
126
|
+
_started_at=$(jq -r '.started_at // null' "$_state_file" 2>/dev/null || echo "null")
|
|
127
|
+
_last_poll=$(jq -r '.last_poll // null' "$_state_file" 2>/dev/null || echo "null")
|
|
128
|
+
DAEMON_JSON=$(jq -n \
|
|
129
|
+
--argjson running "$_d_running" \
|
|
130
|
+
--argjson pid "$_d_pid" \
|
|
131
|
+
--argjson active_jobs "$_active" \
|
|
132
|
+
--argjson queued "$_queued" \
|
|
133
|
+
--argjson recent_completions "$_completed" \
|
|
134
|
+
--arg started_at "$_started_at" \
|
|
135
|
+
--arg last_poll "$_last_poll" \
|
|
136
|
+
'{running:$running, pid:$pid, started_at:$started_at, last_poll:$last_poll, active_jobs:$active_jobs, queued:$queued, recent_completions:$recent_completions}') || DAEMON_JSON="null"
|
|
137
|
+
fi
|
|
138
|
+
|
|
139
|
+
# -- issue tracker --
|
|
140
|
+
TRACKER_JSON="null"
|
|
141
|
+
_tracker_cfg="${HOME}/.shipwright/tracker-config.json"
|
|
142
|
+
if [[ -f "$_tracker_cfg" ]]; then
|
|
143
|
+
_provider=$(jq -r '.provider // "none"' "$_tracker_cfg" 2>/dev/null || echo "none")
|
|
144
|
+
if [[ "$_provider" != "none" && -n "$_provider" ]]; then
|
|
145
|
+
_url="null"
|
|
146
|
+
[[ "$_provider" == "jira" ]] && _url=$(jq -r '.jira.base_url // null' "$_tracker_cfg" 2>/dev/null || echo "null")
|
|
147
|
+
TRACKER_JSON=$(jq -n --arg provider "$_provider" --arg url "$_url" '{provider:$provider, url:$url}') || TRACKER_JSON="null"
|
|
148
|
+
fi
|
|
149
|
+
fi
|
|
150
|
+
|
|
151
|
+
# -- heartbeats --
|
|
152
|
+
HEARTBEATS_JSON="[]"
|
|
153
|
+
_hb_dir="${HOME}/.shipwright/heartbeats"
|
|
154
|
+
if [[ -d "$_hb_dir" ]]; then
|
|
155
|
+
HEARTBEATS_JSON=$(find "$_hb_dir" -name '*.json' -type f 2>/dev/null | \
|
|
156
|
+
while IFS= read -r hf; do
|
|
157
|
+
[[ -z "$hf" ]] && continue
|
|
158
|
+
_jid="$(basename "$hf" .json)"
|
|
159
|
+
_stage=$(jq -r '.stage // "unknown"' "$hf" 2>/dev/null || echo "unknown")
|
|
160
|
+
_ts=$(jq -r '.timestamp // null' "$hf" 2>/dev/null || echo "null")
|
|
161
|
+
_iter=$(jq -r '.iteration // 0' "$hf" 2>/dev/null || echo "0")
|
|
162
|
+
printf '%s\n' "{\"job_id\":\"$_jid\",\"stage\":\"$_stage\",\"timestamp\":\"$_ts\",\"iteration\":$_iter}"
|
|
163
|
+
done | jq -s '.' 2>/dev/null) || HEARTBEATS_JSON="[]"
|
|
164
|
+
fi
|
|
165
|
+
|
|
166
|
+
# -- remote machines --
|
|
167
|
+
MACHINES_JSON="[]"
|
|
168
|
+
_machines_file="${HOME}/.shipwright/machines.json"
|
|
169
|
+
if [[ -f "$_machines_file" ]]; then
|
|
170
|
+
MACHINES_JSON=$(jq -c '.machines // []' "$_machines_file" 2>/dev/null) || MACHINES_JSON="[]"
|
|
171
|
+
fi
|
|
172
|
+
|
|
173
|
+
# -- connected developers --
|
|
174
|
+
DEVELOPERS_JSON="null"
|
|
175
|
+
_team_cfg="${HOME}/.shipwright/team-config.json"
|
|
176
|
+
if [[ -f "$_team_cfg" ]]; then
|
|
177
|
+
_dash_url=$(jq -r '.dashboard_url // ""' "$_team_cfg" 2>/dev/null || true)
|
|
178
|
+
if [[ -n "$_dash_url" ]] && command -v curl &>/dev/null; then
|
|
179
|
+
_api_resp=$(curl -s --max-time 3 "${_dash_url}/api/status" 2>/dev/null || echo "")
|
|
180
|
+
if [[ -n "$_api_resp" ]] && echo "$_api_resp" | jq empty 2>/dev/null; then
|
|
181
|
+
_online=$(echo "$_api_resp" | jq '.total_online // 0' 2>/dev/null || echo "0")
|
|
182
|
+
_devs=$(echo "$_api_resp" | jq -c '.developers // []' 2>/dev/null || echo "[]")
|
|
183
|
+
DEVELOPERS_JSON=$(jq -n --argjson reachable true --argjson total_online "$_online" --argjson developers "$_devs" \
|
|
184
|
+
'{reachable:$reachable, total_online:$total_online, developers:$developers}') || DEVELOPERS_JSON="null"
|
|
185
|
+
else
|
|
186
|
+
DEVELOPERS_JSON='{"reachable":false,"total_online":0,"developers":[]}'
|
|
187
|
+
fi
|
|
188
|
+
fi
|
|
189
|
+
fi
|
|
190
|
+
|
|
191
|
+
# -- assemble and output --
|
|
192
|
+
jq -n \
|
|
193
|
+
--arg version "$VERSION" \
|
|
194
|
+
--arg timestamp "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
|
|
195
|
+
--argjson tmux_windows "$WINDOWS_JSON" \
|
|
196
|
+
--argjson teams "$TEAMS_JSON" \
|
|
197
|
+
--argjson task_lists "$TASKS_JSON" \
|
|
198
|
+
--argjson daemon "$DAEMON_JSON" \
|
|
199
|
+
--argjson issue_tracker "$TRACKER_JSON" \
|
|
200
|
+
--argjson heartbeats "$HEARTBEATS_JSON" \
|
|
201
|
+
--argjson remote_machines "$MACHINES_JSON" \
|
|
202
|
+
--argjson connected_developers "$DEVELOPERS_JSON" \
|
|
203
|
+
'{
|
|
204
|
+
version: $version,
|
|
205
|
+
timestamp: $timestamp,
|
|
206
|
+
tmux_windows: $tmux_windows,
|
|
207
|
+
teams: $teams,
|
|
208
|
+
task_lists: $task_lists,
|
|
209
|
+
daemon: $daemon,
|
|
210
|
+
issue_tracker: $issue_tracker,
|
|
211
|
+
heartbeats: $heartbeats,
|
|
212
|
+
remote_machines: $remote_machines,
|
|
213
|
+
connected_developers: $connected_developers
|
|
214
|
+
}'
|
|
215
|
+
exit 0
|
|
216
|
+
fi
|
|
217
|
+
|
|
218
|
+
# ─── Header ──────────────────────────────────────────────────────────────────
|
|
219
|
+
|
|
220
|
+
echo ""
|
|
221
|
+
echo -e "${CYAN}${BOLD} Shipwright — Status Dashboard${RESET}"
|
|
222
|
+
echo -e "${DIM} $(date '+%Y-%m-%d %H:%M:%S')${RESET}"
|
|
223
|
+
echo -e "${DIM} ══════════════════════════════════════════${RESET}"
|
|
224
|
+
echo ""
|
|
225
|
+
|
|
226
|
+
# ─── 1. Tmux Windows ────────────────────────────────────────────────────────
|
|
227
|
+
|
|
228
|
+
echo -e "${PURPLE}${BOLD} TMUX WINDOWS${RESET}"
|
|
229
|
+
echo -e "${DIM} ──────────────────────────────────────────${RESET}"
|
|
230
|
+
|
|
231
|
+
# Get all windows, highlight Claude-related ones
|
|
232
|
+
HAS_CLAUDE_WINDOWS=false
|
|
233
|
+
while IFS= read -r line; do
|
|
234
|
+
session_window="$(echo "$line" | cut -d'|' -f1)"
|
|
235
|
+
window_name="$(echo "$line" | cut -d'|' -f2)"
|
|
236
|
+
pane_count="$(echo "$line" | cut -d'|' -f3)"
|
|
237
|
+
active="$(echo "$line" | cut -d'|' -f4)"
|
|
238
|
+
|
|
239
|
+
if echo "$window_name" | grep -qi "claude"; then
|
|
240
|
+
HAS_CLAUDE_WINDOWS=true
|
|
241
|
+
if [[ "$active" == "1" ]]; then
|
|
242
|
+
status_icon="${GREEN}●${RESET}"
|
|
243
|
+
status_label="${GREEN}active${RESET}"
|
|
244
|
+
else
|
|
245
|
+
status_icon="${YELLOW}●${RESET}"
|
|
246
|
+
status_label="${YELLOW}idle${RESET}"
|
|
247
|
+
fi
|
|
248
|
+
echo -e " ${status_icon} ${BOLD}${window_name}${RESET} ${DIM}${session_window}${RESET} panes:${pane_count} ${status_label}"
|
|
249
|
+
fi
|
|
250
|
+
done < <(tmux list-windows -a -F '#{session_name}:#{window_index}|#{window_name}|#{window_panes}|#{window_active}' 2>/dev/null || true)
|
|
251
|
+
|
|
252
|
+
if ! $HAS_CLAUDE_WINDOWS; then
|
|
253
|
+
echo -e " ${DIM}No Claude team windows found.${RESET}"
|
|
254
|
+
echo -e " ${DIM}Start one with: ${CYAN}shipwright session <name>${RESET}"
|
|
255
|
+
fi
|
|
256
|
+
|
|
257
|
+
# ─── 2. Team Configurations ─────────────────────────────────────────────────
|
|
258
|
+
|
|
259
|
+
echo ""
|
|
260
|
+
echo -e "${PURPLE}${BOLD} TEAM CONFIGS${RESET} ${DIM}~/.claude/teams/${RESET}"
|
|
261
|
+
echo -e "${DIM} ──────────────────────────────────────────${RESET}"
|
|
262
|
+
|
|
263
|
+
TEAMS_DIR="${HOME}/.claude/teams"
|
|
264
|
+
HAS_TEAMS=false
|
|
265
|
+
|
|
266
|
+
if [[ -d "$TEAMS_DIR" ]]; then
|
|
267
|
+
while IFS= read -r team_dir; do
|
|
268
|
+
[[ -z "$team_dir" ]] && continue
|
|
269
|
+
HAS_TEAMS=true
|
|
270
|
+
team_name="$(basename "$team_dir")"
|
|
271
|
+
|
|
272
|
+
# Try to read config.json for member info
|
|
273
|
+
config_file="${team_dir}/config.json"
|
|
274
|
+
if [[ -f "$config_file" ]]; then
|
|
275
|
+
# Count members from JSON (look for "name" keys in members array)
|
|
276
|
+
member_count=$(grep -c '"name"' "$config_file" 2>/dev/null || true)
|
|
277
|
+
member_count="${member_count:-0}"
|
|
278
|
+
# Extract member names
|
|
279
|
+
member_names=$(grep '"name"' "$config_file" 2>/dev/null | sed 's/.*"name"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/' | tr '\n' ', ' | sed 's/,$//' | sed 's/,/, /g')
|
|
280
|
+
|
|
281
|
+
echo -e " ${GREEN}●${RESET} ${BOLD}${team_name}${RESET} ${DIM}members:${member_count}${RESET}"
|
|
282
|
+
if [[ -n "$member_names" ]]; then
|
|
283
|
+
echo -e " ${DIM}└─ ${member_names}${RESET}"
|
|
284
|
+
fi
|
|
285
|
+
else
|
|
286
|
+
# Directory exists but no config — possibly orphaned
|
|
287
|
+
echo -e " ${RED}●${RESET} ${BOLD}${team_name}${RESET} ${DIM}(no config — possibly orphaned)${RESET}"
|
|
288
|
+
fi
|
|
289
|
+
done < <(find "$TEAMS_DIR" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | sort)
|
|
290
|
+
fi
|
|
291
|
+
|
|
292
|
+
if ! $HAS_TEAMS; then
|
|
293
|
+
echo -e " ${DIM}No team configs found.${RESET}"
|
|
294
|
+
fi
|
|
295
|
+
|
|
296
|
+
# ─── 3. Task Lists ──────────────────────────────────────────────────────────
|
|
297
|
+
|
|
298
|
+
echo ""
|
|
299
|
+
echo -e "${PURPLE}${BOLD} TASK LISTS${RESET} ${DIM}~/.claude/tasks/${RESET}"
|
|
300
|
+
echo -e "${DIM} ──────────────────────────────────────────${RESET}"
|
|
301
|
+
|
|
302
|
+
TASKS_DIR="${HOME}/.claude/tasks"
|
|
303
|
+
HAS_TASKS=false
|
|
304
|
+
|
|
305
|
+
if [[ -d "$TASKS_DIR" ]]; then
|
|
306
|
+
while IFS= read -r task_dir; do
|
|
307
|
+
[[ -z "$task_dir" ]] && continue
|
|
308
|
+
HAS_TASKS=true
|
|
309
|
+
task_team="$(basename "$task_dir")"
|
|
310
|
+
|
|
311
|
+
# Count tasks by status
|
|
312
|
+
total=0
|
|
313
|
+
completed=0
|
|
314
|
+
in_progress=0
|
|
315
|
+
pending=0
|
|
316
|
+
|
|
317
|
+
while IFS= read -r task_file; do
|
|
318
|
+
[[ -z "$task_file" ]] && continue
|
|
319
|
+
total=$((total + 1))
|
|
320
|
+
status=$(grep -o '"status"[[:space:]]*:[[:space:]]*"[^"]*"' "$task_file" 2>/dev/null | head -1 | sed 's/.*"\([^"]*\)"$/\1/')
|
|
321
|
+
case "$status" in
|
|
322
|
+
completed) completed=$((completed + 1)) ;;
|
|
323
|
+
in_progress) in_progress=$((in_progress + 1)) ;;
|
|
324
|
+
pending) pending=$((pending + 1)) ;;
|
|
325
|
+
esac
|
|
326
|
+
done < <(find "$task_dir" -type f -name '*.json' 2>/dev/null)
|
|
327
|
+
|
|
328
|
+
# Build progress bar
|
|
329
|
+
if [[ $total -gt 0 ]]; then
|
|
330
|
+
pct=$((completed * 100 / total))
|
|
331
|
+
bar_width=20
|
|
332
|
+
filled=$((pct * bar_width / 100))
|
|
333
|
+
empty=$((bar_width - filled))
|
|
334
|
+
bar="${GREEN}"
|
|
335
|
+
for ((i=0; i<filled; i++)); do bar+="█"; done
|
|
336
|
+
bar+="${DIM}"
|
|
337
|
+
for ((i=0; i<empty; i++)); do bar+="░"; done
|
|
338
|
+
bar+="${RESET}"
|
|
339
|
+
|
|
340
|
+
echo -e " ${BLUE}●${RESET} ${BOLD}${task_team}${RESET} ${bar} ${pct}% ${DIM}(${completed}/${total} done)${RESET}"
|
|
341
|
+
|
|
342
|
+
# Show breakdown if there are active tasks
|
|
343
|
+
details=""
|
|
344
|
+
[[ $in_progress -gt 0 ]] && details+="${GREEN}${in_progress} active${RESET} "
|
|
345
|
+
[[ $pending -gt 0 ]] && details+="${YELLOW}${pending} pending${RESET} "
|
|
346
|
+
[[ $completed -gt 0 ]] && details+="${DIM}${completed} done${RESET}"
|
|
347
|
+
[[ -n "$details" ]] && echo -e " ${DIM}└─${RESET} ${details}"
|
|
348
|
+
else
|
|
349
|
+
echo -e " ${DIM}●${RESET} ${BOLD}${task_team}${RESET} ${DIM}(no tasks)${RESET}"
|
|
350
|
+
fi
|
|
351
|
+
done < <(find "$TASKS_DIR" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | sort)
|
|
352
|
+
fi
|
|
353
|
+
|
|
354
|
+
if ! $HAS_TASKS; then
|
|
355
|
+
echo -e " ${DIM}No task lists found.${RESET}"
|
|
356
|
+
fi
|
|
357
|
+
|
|
358
|
+
# ─── 4. Daemon Pipelines ──────────────────────────────────────────────────
|
|
359
|
+
|
|
360
|
+
DAEMON_DIR="${HOME}/.shipwright"
|
|
361
|
+
STATE_FILE="${DAEMON_DIR}/daemon-state.json"
|
|
362
|
+
PID_FILE="${DAEMON_DIR}/daemon.pid"
|
|
363
|
+
EVENTS_FILE="${DAEMON_DIR}/events.jsonl"
|
|
364
|
+
HAS_DAEMON=false
|
|
365
|
+
|
|
366
|
+
if [[ -f "$STATE_FILE" ]]; then
|
|
367
|
+
# Check daemon process
|
|
368
|
+
daemon_pid=""
|
|
369
|
+
daemon_running=false
|
|
370
|
+
if [[ -f "$PID_FILE" ]]; then
|
|
371
|
+
daemon_pid=$(cat "$PID_FILE" 2>/dev/null || true)
|
|
372
|
+
if [[ -n "$daemon_pid" ]] && kill -0 "$daemon_pid" 2>/dev/null; then
|
|
373
|
+
daemon_running=true
|
|
374
|
+
fi
|
|
375
|
+
fi
|
|
376
|
+
|
|
377
|
+
active_count=$(jq -r '.active_jobs | length' "$STATE_FILE" 2>/dev/null || echo 0)
|
|
378
|
+
queue_count=$(jq -r '.queued | length' "$STATE_FILE" 2>/dev/null || echo 0)
|
|
379
|
+
completed_count=$(jq -r '.completed | length' "$STATE_FILE" 2>/dev/null || echo 0)
|
|
380
|
+
|
|
381
|
+
if $daemon_running || [[ "$active_count" -gt 0 ]] || [[ "$queue_count" -gt 0 ]] || [[ "$completed_count" -gt 0 ]]; then
|
|
382
|
+
HAS_DAEMON=true
|
|
383
|
+
echo ""
|
|
384
|
+
echo -e "${PURPLE}${BOLD} DAEMON PIPELINES${RESET} ${DIM}~/.shipwright/${RESET}"
|
|
385
|
+
echo -e "${DIM} ──────────────────────────────────────────${RESET}"
|
|
386
|
+
|
|
387
|
+
# ── Daemon Health ──
|
|
388
|
+
if $daemon_running; then
|
|
389
|
+
started_at=$(jq -r '.started_at // "unknown"' "$STATE_FILE" 2>/dev/null)
|
|
390
|
+
last_poll=$(jq -r '.last_poll // "never"' "$STATE_FILE" 2>/dev/null)
|
|
391
|
+
# Calculate uptime
|
|
392
|
+
uptime_str=""
|
|
393
|
+
if [[ "$started_at" != "unknown" && "$started_at" != "null" ]]; then
|
|
394
|
+
start_epoch=$(TZ=UTC date -j -f "%Y-%m-%dT%H:%M:%SZ" "$started_at" +%s 2>/dev/null || echo 0)
|
|
395
|
+
if [[ "$start_epoch" -gt 0 ]]; then
|
|
396
|
+
now_e=$(date +%s)
|
|
397
|
+
elapsed=$((now_e - start_epoch))
|
|
398
|
+
if [[ "$elapsed" -ge 3600 ]]; then
|
|
399
|
+
uptime_str=$(printf "%dh %dm" $((elapsed/3600)) $((elapsed%3600/60)))
|
|
400
|
+
elif [[ "$elapsed" -ge 60 ]]; then
|
|
401
|
+
uptime_str=$(printf "%dm %ds" $((elapsed/60)) $((elapsed%60)))
|
|
402
|
+
else
|
|
403
|
+
uptime_str=$(printf "%ds" "$elapsed")
|
|
404
|
+
fi
|
|
405
|
+
fi
|
|
406
|
+
fi
|
|
407
|
+
echo -e " ${GREEN}●${RESET} ${BOLD}Running${RESET} ${DIM}PID:${daemon_pid}${RESET} ${DIM}up:${uptime_str:-?}${RESET} ${DIM}poll:${last_poll}${RESET}"
|
|
408
|
+
else
|
|
409
|
+
echo -e " ${RED}●${RESET} ${BOLD}Stopped${RESET}"
|
|
410
|
+
fi
|
|
411
|
+
|
|
412
|
+
# ── Active Jobs ──
|
|
413
|
+
if [[ "$active_count" -gt 0 ]]; then
|
|
414
|
+
echo ""
|
|
415
|
+
echo -e " ${BOLD}Active Jobs (${active_count})${RESET}"
|
|
416
|
+
while IFS= read -r job; do
|
|
417
|
+
[[ -z "$job" ]] && continue
|
|
418
|
+
a_issue=$(echo "$job" | jq -r '.issue')
|
|
419
|
+
a_title=$(echo "$job" | jq -r '.title // ""')
|
|
420
|
+
a_worktree=$(echo "$job" | jq -r '.worktree // ""')
|
|
421
|
+
a_started=$(echo "$job" | jq -r '.started_at // ""')
|
|
422
|
+
a_goal=$(echo "$job" | jq -r '.goal // ""')
|
|
423
|
+
|
|
424
|
+
# Look up title from title cache if empty
|
|
425
|
+
if [[ -z "$a_title" ]]; then
|
|
426
|
+
a_title=$(jq -r --arg n "$a_issue" '.titles[$n] // ""' "$STATE_FILE" 2>/dev/null || true)
|
|
427
|
+
fi
|
|
428
|
+
|
|
429
|
+
# Time elapsed
|
|
430
|
+
age_str=""
|
|
431
|
+
if [[ -n "$a_started" && "$a_started" != "null" ]]; then
|
|
432
|
+
s_epoch=$(TZ=UTC date -j -f "%Y-%m-%dT%H:%M:%SZ" "$a_started" +%s 2>/dev/null || echo 0)
|
|
433
|
+
if [[ "$s_epoch" -gt 0 ]]; then
|
|
434
|
+
now_e=$(date +%s)
|
|
435
|
+
el=$((now_e - s_epoch))
|
|
436
|
+
if [[ "$el" -ge 3600 ]]; then
|
|
437
|
+
age_str=$(printf "%dh %dm" $((el/3600)) $((el%3600/60)))
|
|
438
|
+
elif [[ "$el" -ge 60 ]]; then
|
|
439
|
+
age_str=$(printf "%dm %ds" $((el/60)) $((el%60)))
|
|
440
|
+
else
|
|
441
|
+
age_str=$(printf "%ds" "$el")
|
|
442
|
+
fi
|
|
443
|
+
fi
|
|
444
|
+
fi
|
|
445
|
+
|
|
446
|
+
# Read enriched pipeline state from worktree
|
|
447
|
+
stage_str=""
|
|
448
|
+
stage_desc=""
|
|
449
|
+
stage_progress=""
|
|
450
|
+
goal_from_state=""
|
|
451
|
+
if [[ -n "$a_worktree" && -f "${a_worktree}/.claude/pipeline-state.md" ]]; then
|
|
452
|
+
ps_file="${a_worktree}/.claude/pipeline-state.md"
|
|
453
|
+
stage_str=$(grep -E '^current_stage:' "$ps_file" 2>/dev/null | head -1 | sed 's/^current_stage:[[:space:]]*//' || true)
|
|
454
|
+
stage_desc=$(grep -E '^current_stage_description:' "$ps_file" 2>/dev/null | head -1 | sed 's/^current_stage_description:[[:space:]]*"//;s/"$//' || true)
|
|
455
|
+
stage_progress=$(grep -E '^stage_progress:' "$ps_file" 2>/dev/null | head -1 | sed 's/^stage_progress:[[:space:]]*"//;s/"$//' || true)
|
|
456
|
+
goal_from_state=$(grep -E '^goal:' "$ps_file" 2>/dev/null | head -1 | sed 's/^goal:[[:space:]]*"//;s/"$//' || true)
|
|
457
|
+
fi
|
|
458
|
+
|
|
459
|
+
# Use goal from state file if not in daemon job data
|
|
460
|
+
display_goal="${a_goal:-$goal_from_state}"
|
|
461
|
+
|
|
462
|
+
# Title line
|
|
463
|
+
echo -e " ${CYAN}#${a_issue}${RESET} ${BOLD}${a_title}${RESET}"
|
|
464
|
+
|
|
465
|
+
# Goal line (if different from title)
|
|
466
|
+
if [[ -n "$display_goal" && "$display_goal" != "$a_title" ]]; then
|
|
467
|
+
echo -e " ${DIM}Delivering: ${display_goal}${RESET}"
|
|
468
|
+
fi
|
|
469
|
+
|
|
470
|
+
# Stage + description line
|
|
471
|
+
if [[ -n "$stage_str" ]]; then
|
|
472
|
+
stage_icon="🔄"
|
|
473
|
+
stage_line=" ${stage_icon} ${BLUE}${stage_str}${RESET}"
|
|
474
|
+
[[ -n "$stage_desc" ]] && stage_line="${stage_line} ${DIM}— ${stage_desc}${RESET}"
|
|
475
|
+
echo -e "$stage_line"
|
|
476
|
+
fi
|
|
477
|
+
|
|
478
|
+
# Inline progress bar from stage_progress
|
|
479
|
+
if [[ -n "$stage_progress" ]]; then
|
|
480
|
+
progress_bar=""
|
|
481
|
+
entry=""
|
|
482
|
+
# Parse space-separated "stage:status" pairs
|
|
483
|
+
for entry in $stage_progress; do
|
|
484
|
+
s_name="${entry%%:*}"
|
|
485
|
+
s_stat="${entry#*:}"
|
|
486
|
+
s_icon=""
|
|
487
|
+
case "$s_stat" in
|
|
488
|
+
complete) s_icon="✅" ;;
|
|
489
|
+
running) s_icon="🔄" ;;
|
|
490
|
+
failed) s_icon="❌" ;;
|
|
491
|
+
*) s_icon="⬜" ;;
|
|
492
|
+
esac
|
|
493
|
+
if [[ -n "$progress_bar" ]]; then
|
|
494
|
+
progress_bar="${progress_bar} → ${s_icon}${s_name}"
|
|
495
|
+
else
|
|
496
|
+
progress_bar="${s_icon}${s_name}"
|
|
497
|
+
fi
|
|
498
|
+
done
|
|
499
|
+
echo -e " ${DIM}${progress_bar}${RESET}"
|
|
500
|
+
fi
|
|
501
|
+
|
|
502
|
+
# Elapsed time
|
|
503
|
+
[[ -n "$age_str" ]] && echo -e " ${DIM}Elapsed: ${age_str}${RESET}"
|
|
504
|
+
done < <(jq -c '.active_jobs[]' "$STATE_FILE" 2>/dev/null)
|
|
505
|
+
fi
|
|
506
|
+
|
|
507
|
+
# ── Queued Issues ──
|
|
508
|
+
if [[ "$queue_count" -gt 0 ]]; then
|
|
509
|
+
echo ""
|
|
510
|
+
echo -e " ${BOLD}Queued (${queue_count})${RESET}"
|
|
511
|
+
while read -r q_num; do
|
|
512
|
+
[[ -z "$q_num" ]] && continue
|
|
513
|
+
q_title=$(jq -r --arg n "$q_num" '.titles[$n] // ""' "$STATE_FILE" 2>/dev/null || true)
|
|
514
|
+
title_display=""
|
|
515
|
+
[[ -n "$q_title" ]] && title_display=" ${q_title}"
|
|
516
|
+
echo -e " ${YELLOW}#${q_num}${RESET}${title_display}"
|
|
517
|
+
done < <(jq -r '.queued[]' "$STATE_FILE" 2>/dev/null)
|
|
518
|
+
fi
|
|
519
|
+
|
|
520
|
+
# ── Recent Completions ──
|
|
521
|
+
if [[ "$completed_count" -gt 0 ]]; then
|
|
522
|
+
echo ""
|
|
523
|
+
echo -e " ${BOLD}Recent Completions${RESET}"
|
|
524
|
+
while IFS=$'\t' read -r c_num c_result c_dur c_at; do
|
|
525
|
+
[[ -z "$c_num" ]] && continue
|
|
526
|
+
if [[ "$c_result" == "success" ]]; then
|
|
527
|
+
c_icon="${GREEN}✓${RESET}"
|
|
528
|
+
else
|
|
529
|
+
c_icon="${RED}✗${RESET}"
|
|
530
|
+
fi
|
|
531
|
+
echo -e " ${c_icon} ${CYAN}#${c_num}${RESET} ${c_result} ${DIM}(${c_dur})${RESET}"
|
|
532
|
+
done < <(jq -r '.completed | reverse | .[:5][] | "\(.issue)\t\(.result)\t\(.duration // "—")\t\(.completed_at // "")"' "$STATE_FILE" 2>/dev/null)
|
|
533
|
+
fi
|
|
534
|
+
|
|
535
|
+
# ── Recent Activity (from events.jsonl) ──
|
|
536
|
+
if [[ -f "$EVENTS_FILE" ]]; then
|
|
537
|
+
# Get last 8 relevant events (spawns, stage changes, completions)
|
|
538
|
+
recent_events=$(tail -200 "$EVENTS_FILE" 2>/dev/null | \
|
|
539
|
+
grep -E '"type":"(daemon\.spawn|daemon\.reap|stage\.(started|completed)|daemon\.poll)"' 2>/dev/null | \
|
|
540
|
+
tail -8 || true)
|
|
541
|
+
if [[ -n "$recent_events" ]]; then
|
|
542
|
+
echo ""
|
|
543
|
+
echo -e " ${BOLD}Recent Activity${RESET}"
|
|
544
|
+
while IFS= read -r evt; do
|
|
545
|
+
[[ -z "$evt" ]] && continue
|
|
546
|
+
evt_ts=$(echo "$evt" | jq -r '.ts // ""' 2>/dev/null)
|
|
547
|
+
evt_type=$(echo "$evt" | jq -r '.type // ""' 2>/dev/null)
|
|
548
|
+
evt_issue=$(echo "$evt" | jq -r '.issue // ""' 2>/dev/null)
|
|
549
|
+
|
|
550
|
+
# Format timestamp as HH:MM
|
|
551
|
+
evt_time=""
|
|
552
|
+
if [[ -n "$evt_ts" && "$evt_ts" != "null" ]]; then
|
|
553
|
+
evt_time=$(TZ=UTC date -j -f "%Y-%m-%dT%H:%M:%SZ" "$evt_ts" +"%H:%M" 2>/dev/null || echo "")
|
|
554
|
+
fi
|
|
555
|
+
|
|
556
|
+
case "$evt_type" in
|
|
557
|
+
daemon.spawn)
|
|
558
|
+
echo -e " ${DIM}${evt_time}${RESET} ${GREEN}↳${RESET} Spawned pipeline for #${evt_issue}"
|
|
559
|
+
;;
|
|
560
|
+
daemon.reap)
|
|
561
|
+
evt_result=$(echo "$evt" | jq -r '.result // ""' 2>/dev/null)
|
|
562
|
+
evt_dur=$(echo "$evt" | jq -r '.duration_s // 0' 2>/dev/null)
|
|
563
|
+
dur_display=""
|
|
564
|
+
if [[ "$evt_dur" -gt 0 ]] 2>/dev/null; then
|
|
565
|
+
if [[ "$evt_dur" -ge 3600 ]]; then
|
|
566
|
+
dur_display=$(printf " (%dh %dm)" $((evt_dur/3600)) $((evt_dur%3600/60)))
|
|
567
|
+
elif [[ "$evt_dur" -ge 60 ]]; then
|
|
568
|
+
dur_display=$(printf " (%dm %ds)" $((evt_dur/60)) $((evt_dur%60)))
|
|
569
|
+
else
|
|
570
|
+
dur_display=$(printf " (%ds)" "$evt_dur")
|
|
571
|
+
fi
|
|
572
|
+
fi
|
|
573
|
+
if [[ "$evt_result" == "success" ]]; then
|
|
574
|
+
echo -e " ${DIM}${evt_time}${RESET} ${GREEN}●${RESET} #${evt_issue} completed${dur_display}"
|
|
575
|
+
else
|
|
576
|
+
echo -e " ${DIM}${evt_time}${RESET} ${RED}●${RESET} #${evt_issue} failed${dur_display}"
|
|
577
|
+
fi
|
|
578
|
+
;;
|
|
579
|
+
stage.started)
|
|
580
|
+
evt_stage=$(echo "$evt" | jq -r '.stage // ""' 2>/dev/null)
|
|
581
|
+
echo -e " ${DIM}${evt_time}${RESET} ${BLUE}●${RESET} #${evt_issue} started ${evt_stage}"
|
|
582
|
+
;;
|
|
583
|
+
stage.completed)
|
|
584
|
+
evt_stage=$(echo "$evt" | jq -r '.stage // ""' 2>/dev/null)
|
|
585
|
+
echo -e " ${DIM}${evt_time}${RESET} ${DIM}●${RESET} #${evt_issue} completed ${evt_stage}"
|
|
586
|
+
;;
|
|
587
|
+
daemon.poll)
|
|
588
|
+
evt_found=$(echo "$evt" | jq -r '.issues_found // 0' 2>/dev/null)
|
|
589
|
+
echo -e " ${DIM}${evt_time} ⟳ Polled — ${evt_found} issue(s) found${RESET}"
|
|
590
|
+
;;
|
|
591
|
+
esac
|
|
592
|
+
done <<< "$recent_events"
|
|
593
|
+
fi
|
|
594
|
+
fi
|
|
595
|
+
fi
|
|
596
|
+
fi
|
|
597
|
+
|
|
598
|
+
# ─── Issue Tracker ─────────────────────────────────────────────────────────
|
|
599
|
+
|
|
600
|
+
TRACKER_CONFIG="${HOME}/.shipwright/tracker-config.json"
|
|
601
|
+
if [[ -f "$TRACKER_CONFIG" ]]; then
|
|
602
|
+
TRACKER_PROVIDER=$(jq -r '.provider // "none"' "$TRACKER_CONFIG" 2>/dev/null || echo "none")
|
|
603
|
+
if [[ "$TRACKER_PROVIDER" != "none" && -n "$TRACKER_PROVIDER" ]]; then
|
|
604
|
+
echo ""
|
|
605
|
+
echo -e "${PURPLE}${BOLD} ISSUE TRACKER${RESET}"
|
|
606
|
+
echo -e "${DIM} ──────────────────────────────────────────${RESET}"
|
|
607
|
+
case "$TRACKER_PROVIDER" in
|
|
608
|
+
linear)
|
|
609
|
+
echo -e " ${GREEN}●${RESET} ${BOLD}Linear${RESET} ${DIM}(run shipwright linear status for details)${RESET}"
|
|
610
|
+
;;
|
|
611
|
+
jira)
|
|
612
|
+
JIRA_URL=$(jq -r '.jira.base_url // ""' "$TRACKER_CONFIG" 2>/dev/null || true)
|
|
613
|
+
echo -e " ${GREEN}●${RESET} ${BOLD}Jira${RESET} ${DIM}${JIRA_URL}${RESET} ${DIM}(run shipwright jira status for details)${RESET}"
|
|
614
|
+
;;
|
|
615
|
+
esac
|
|
616
|
+
fi
|
|
617
|
+
fi
|
|
618
|
+
|
|
619
|
+
# ─── Agent Heartbeats ──────────────────────────────────────────────────────
|
|
620
|
+
|
|
621
|
+
HEARTBEAT_DIR="$HOME/.shipwright/heartbeats"
|
|
622
|
+
HAS_HEARTBEATS=false
|
|
623
|
+
|
|
624
|
+
if [[ -d "$HEARTBEAT_DIR" ]]; then
|
|
625
|
+
hb_count=0
|
|
626
|
+
for hb_file in "${HEARTBEAT_DIR}"/*.json; do
|
|
627
|
+
[[ -f "$hb_file" ]] || continue
|
|
628
|
+
hb_count=$((hb_count + 1))
|
|
629
|
+
done
|
|
630
|
+
|
|
631
|
+
if [[ "$hb_count" -gt 0 ]]; then
|
|
632
|
+
HAS_HEARTBEATS=true
|
|
633
|
+
echo ""
|
|
634
|
+
echo -e "${PURPLE}${BOLD} AGENT HEARTBEATS${RESET} ${DIM}(${hb_count} active)${RESET}"
|
|
635
|
+
echo -e "${DIM} ──────────────────────────────────────────${RESET}"
|
|
636
|
+
|
|
637
|
+
for hb_file in "${HEARTBEAT_DIR}"/*.json; do
|
|
638
|
+
[[ -f "$hb_file" ]] || continue
|
|
639
|
+
local_job_id="$(basename "$hb_file" .json)"
|
|
640
|
+
hb_pid=$(jq -r '.pid // ""' "$hb_file" 2>/dev/null || true)
|
|
641
|
+
hb_stage=$(jq -r '.stage // ""' "$hb_file" 2>/dev/null || true)
|
|
642
|
+
hb_issue=$(jq -r '.issue // ""' "$hb_file" 2>/dev/null || true)
|
|
643
|
+
hb_iter=$(jq -r '.iteration // ""' "$hb_file" 2>/dev/null || true)
|
|
644
|
+
hb_activity=$(jq -r '.last_activity // ""' "$hb_file" 2>/dev/null || true)
|
|
645
|
+
hb_updated=$(jq -r '.updated_at // ""' "$hb_file" 2>/dev/null || true)
|
|
646
|
+
hb_mem=$(jq -r '.memory_mb // 0' "$hb_file" 2>/dev/null || true)
|
|
647
|
+
|
|
648
|
+
# Check if process is still alive
|
|
649
|
+
hb_alive=false
|
|
650
|
+
if [[ -n "$hb_pid" && "$hb_pid" != "null" ]] && kill -0 "$hb_pid" 2>/dev/null; then
|
|
651
|
+
hb_alive=true
|
|
652
|
+
fi
|
|
653
|
+
|
|
654
|
+
# Calculate age
|
|
655
|
+
hb_age_str=""
|
|
656
|
+
if [[ -n "$hb_updated" && "$hb_updated" != "null" ]]; then
|
|
657
|
+
hb_epoch=$(TZ=UTC date -j -f "%Y-%m-%dT%H:%M:%SZ" "$hb_updated" +%s 2>/dev/null || echo 0)
|
|
658
|
+
if [[ "$hb_epoch" -gt 0 ]]; then
|
|
659
|
+
now_e=$(date +%s)
|
|
660
|
+
hb_age=$((now_e - hb_epoch))
|
|
661
|
+
if [[ "$hb_age" -ge 120 ]]; then
|
|
662
|
+
hb_age_str="${RED}${hb_age}s ago (STALE)${RESET}"
|
|
663
|
+
else
|
|
664
|
+
hb_age_str="${DIM}${hb_age}s ago${RESET}"
|
|
665
|
+
fi
|
|
666
|
+
fi
|
|
667
|
+
fi
|
|
668
|
+
|
|
669
|
+
if $hb_alive; then
|
|
670
|
+
hb_icon="${GREEN}●${RESET}"
|
|
671
|
+
else
|
|
672
|
+
hb_icon="${RED}●${RESET}"
|
|
673
|
+
fi
|
|
674
|
+
|
|
675
|
+
echo -e " ${hb_icon} ${BOLD}${local_job_id}${RESET} ${DIM}pid:${hb_pid}${RESET}"
|
|
676
|
+
detail_line=" "
|
|
677
|
+
[[ -n "$hb_issue" && "$hb_issue" != "null" && "$hb_issue" != "0" ]] && detail_line+="${CYAN}#${hb_issue}${RESET} "
|
|
678
|
+
[[ -n "$hb_stage" && "$hb_stage" != "null" ]] && detail_line+="${BLUE}${hb_stage}${RESET} "
|
|
679
|
+
[[ -n "$hb_iter" && "$hb_iter" != "null" ]] && detail_line+="${DIM}iter:${hb_iter}${RESET} "
|
|
680
|
+
[[ -n "$hb_age_str" ]] && detail_line+="${hb_age_str} "
|
|
681
|
+
[[ "${hb_mem:-0}" -gt 0 ]] && detail_line+="${DIM}${hb_mem}MB${RESET}"
|
|
682
|
+
echo -e "$detail_line"
|
|
683
|
+
[[ -n "$hb_activity" && "$hb_activity" != "null" ]] && echo -e " ${DIM}${hb_activity}${RESET}"
|
|
684
|
+
done
|
|
685
|
+
fi
|
|
686
|
+
fi
|
|
687
|
+
|
|
688
|
+
# ─── Remote Machines ──────────────────────────────────────────────────────
|
|
689
|
+
|
|
690
|
+
MACHINES_FILE="$HOME/.shipwright/machines.json"
|
|
691
|
+
if [[ -f "$MACHINES_FILE" ]]; then
|
|
692
|
+
machine_count=$(jq '.machines | length' "$MACHINES_FILE" 2>/dev/null || echo 0)
|
|
693
|
+
if [[ "$machine_count" -gt 0 ]]; then
|
|
694
|
+
echo ""
|
|
695
|
+
echo -e "${PURPLE}${BOLD} REMOTE MACHINES${RESET} ${DIM}(${machine_count} registered)${RESET}"
|
|
696
|
+
echo -e "${DIM} ──────────────────────────────────────────${RESET}"
|
|
697
|
+
|
|
698
|
+
while IFS= read -r machine; do
|
|
699
|
+
[[ -z "$machine" ]] && continue
|
|
700
|
+
m_name=$(echo "$machine" | jq -r '.name // ""')
|
|
701
|
+
m_host=$(echo "$machine" | jq -r '.host // ""')
|
|
702
|
+
m_cores=$(echo "$machine" | jq -r '.cores // "?"')
|
|
703
|
+
m_mem=$(echo "$machine" | jq -r '.memory_gb // "?"')
|
|
704
|
+
m_workers=$(echo "$machine" | jq -r '.max_workers // "?"')
|
|
705
|
+
|
|
706
|
+
echo -e " ${BLUE}●${RESET} ${BOLD}${m_name}${RESET} ${DIM}${m_host}${RESET} ${DIM}cores:${m_cores} mem:${m_mem}GB workers:${m_workers}${RESET}"
|
|
707
|
+
done < <(jq -c '.machines[]' "$MACHINES_FILE" 2>/dev/null)
|
|
708
|
+
fi
|
|
709
|
+
fi
|
|
710
|
+
|
|
711
|
+
# ─── Connected Developers ─────────────────────────────────────────────────
|
|
712
|
+
|
|
713
|
+
# Check if curl and jq are available
|
|
714
|
+
if command -v curl &>/dev/null && command -v jq &>/dev/null; then
|
|
715
|
+
# Read dashboard URL from config, fall back to default
|
|
716
|
+
TEAM_CONFIG="${HOME}/.shipwright/team-config.json"
|
|
717
|
+
DASHBOARD_URL=""
|
|
718
|
+
if [[ -f "$TEAM_CONFIG" ]]; then
|
|
719
|
+
DASHBOARD_URL=$(jq -r '.dashboard_url // ""' "$TEAM_CONFIG" 2>/dev/null || true)
|
|
720
|
+
fi
|
|
721
|
+
[[ -z "$DASHBOARD_URL" ]] && DASHBOARD_URL="http://localhost:8767"
|
|
722
|
+
|
|
723
|
+
# Try to reach the dashboard /api/team endpoint with 3s timeout
|
|
724
|
+
api_response=$(curl -s --max-time 3 "$DASHBOARD_URL/api/team" 2>/dev/null || true)
|
|
725
|
+
|
|
726
|
+
# Check if we got a valid response
|
|
727
|
+
if [[ -n "$api_response" ]] && echo "$api_response" | jq empty 2>/dev/null; then
|
|
728
|
+
echo ""
|
|
729
|
+
echo -e "${PURPLE}${BOLD} CONNECTED DEVELOPERS${RESET}"
|
|
730
|
+
echo -e "${DIM} ──────────────────────────────────────────${RESET}"
|
|
731
|
+
|
|
732
|
+
# Parse total_online count
|
|
733
|
+
total_online=$(echo "$api_response" | jq -r '.total_online // 0' 2>/dev/null)
|
|
734
|
+
|
|
735
|
+
# Parse developers array and display table
|
|
736
|
+
dev_count=$(echo "$api_response" | jq '.developers | length' 2>/dev/null || echo 0)
|
|
737
|
+
if [[ "$dev_count" -gt 0 ]]; then
|
|
738
|
+
while IFS= read -r developer; do
|
|
739
|
+
[[ -z "$developer" ]] && continue
|
|
740
|
+
|
|
741
|
+
dev_id=$(echo "$developer" | jq -r '.developer_id // "?"')
|
|
742
|
+
dev_machine=$(echo "$developer" | jq -r '.machine_name // "?"')
|
|
743
|
+
dev_status=$(echo "$developer" | jq -r '.status // "offline"')
|
|
744
|
+
active_jobs=$(echo "$developer" | jq '.active_jobs | length' 2>/dev/null || echo 0)
|
|
745
|
+
queued=$(echo "$developer" | jq '.queued | length' 2>/dev/null || echo 0)
|
|
746
|
+
|
|
747
|
+
# Status indicator and color
|
|
748
|
+
case "$dev_status" in
|
|
749
|
+
online)
|
|
750
|
+
status_icon="${GREEN}●${RESET}"
|
|
751
|
+
status_label="${GREEN}online${RESET}"
|
|
752
|
+
;;
|
|
753
|
+
idle)
|
|
754
|
+
status_icon="${YELLOW}●${RESET}"
|
|
755
|
+
status_label="${YELLOW}idle${RESET}"
|
|
756
|
+
;;
|
|
757
|
+
offline|*)
|
|
758
|
+
status_icon="${DIM}●${RESET}"
|
|
759
|
+
status_label="${DIM}offline${RESET}"
|
|
760
|
+
;;
|
|
761
|
+
esac
|
|
762
|
+
|
|
763
|
+
echo -e " ${status_icon} ${BOLD}${dev_id}${RESET} ${DIM}${dev_machine}${RESET} ${status_label} ${DIM}active:${active_jobs} queued:${queued}${RESET}"
|
|
764
|
+
done < <(echo "$api_response" | jq -c '.developers[]' 2>/dev/null)
|
|
765
|
+
|
|
766
|
+
# Display total online count
|
|
767
|
+
echo -e " ${DIM}────────────────────────────────────────────${RESET}"
|
|
768
|
+
echo -e " ${DIM}Total online: ${GREEN}${total_online}${RESET}${DIM} / ${dev_count}${RESET}"
|
|
769
|
+
else
|
|
770
|
+
echo -e " ${DIM}No developers connected${RESET}"
|
|
771
|
+
fi
|
|
772
|
+
else
|
|
773
|
+
# Dashboard not reachable — show dim message
|
|
774
|
+
echo ""
|
|
775
|
+
echo -e "${PURPLE}${BOLD} CONNECTED DEVELOPERS${RESET}"
|
|
776
|
+
echo -e "${DIM} ──────────────────────────────────────────${RESET}"
|
|
777
|
+
echo -e " ${DIM}Dashboard not reachable (${DASHBOARD_URL})${RESET}"
|
|
778
|
+
fi
|
|
779
|
+
elif [[ -f "$HOME/.shipwright/team-config.json" ]] || [[ -f "$HOME/.shipwright/daemon-state.json" ]]; then
|
|
780
|
+
# If we have shipwright config but curl/jq missing, show info
|
|
781
|
+
echo ""
|
|
782
|
+
echo -e "${PURPLE}${BOLD} CONNECTED DEVELOPERS${RESET}"
|
|
783
|
+
echo -e "${DIM} ──────────────────────────────────────────${RESET}"
|
|
784
|
+
echo -e " ${DIM}curl or jq not available to check dashboard${RESET}"
|
|
785
|
+
fi
|
|
786
|
+
|
|
787
|
+
# ─── Footer ──────────────────────────────────────────────────────────────────
|
|
788
|
+
|
|
789
|
+
echo ""
|
|
790
|
+
echo -e "${DIM} ──────────────────────────────────────────${RESET}"
|
|
791
|
+
if $HAS_CLAUDE_WINDOWS || $HAS_TEAMS || $HAS_TASKS || $HAS_DAEMON || ${HAS_HEARTBEATS:-false}; then
|
|
792
|
+
echo -e " ${DIM}Clean up:${RESET} ${CYAN}shipwright cleanup${RESET} ${DIM}|${RESET} ${DIM}New session:${RESET} ${CYAN}shipwright session <name>${RESET}"
|
|
793
|
+
else
|
|
794
|
+
echo -e " ${DIM}No active teams. Start one:${RESET} ${CYAN}shipwright session <name>${RESET}"
|
|
795
|
+
fi
|
|
796
|
+
echo ""
|