codeswarm 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.codeswarm/skills/prd_template.md +98 -0
- package/AGENT_TIPS.md +206 -0
- package/BROWSER_TESTING.md +177 -0
- package/COORDINATOR.md +151 -0
- package/LICENSE +21 -0
- package/README.md +253 -0
- package/TASK_PROTOCOL.md +111 -0
- package/WORKFLOWS.md +174 -0
- package/bin/codeswarm.js +15 -0
- package/config.yaml +55 -0
- package/coordinator.sh +1762 -0
- package/dashboard/package-lock.json +1036 -0
- package/dashboard/package.json +14 -0
- package/dashboard/public/index.html +758 -0
- package/dashboard/server.js +444 -0
- package/docs/prd-example.md +90 -0
- package/docs/prd-template.md +45 -0
- package/orchestrate.sh +467 -0
- package/package.json +62 -0
- package/playwright.config.ts +19 -0
- package/setup.sh +142 -0
package/coordinator.sh
ADDED
|
@@ -0,0 +1,1762 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
#
|
|
3
|
+
# coordinator.sh — Dynamic Multi-Agent Coordinator v7.0
|
|
4
|
+
#
|
|
5
|
+
# DESIGN:
|
|
6
|
+
# Planner-driven loop. The PLANNER agent is the brain — it decides
|
|
7
|
+
# what happens next by writing DIRECTIVE files. The coordinator reads
|
|
8
|
+
# directives and dispatches work to executor/reviewer agents.
|
|
9
|
+
#
|
|
10
|
+
# --prd file.md → Use PRD (auto-detects format, normalizes if needed)
|
|
11
|
+
# --plan file.md → Use existing plan (auto-detects PRD format too)
|
|
12
|
+
# --task "..." → Planner creates PRD from scratch
|
|
13
|
+
#
|
|
14
|
+
# Planner writes directives: EXECUTE, REVIEW, APPROVE, SKIP, DONE
|
|
15
|
+
# Coordinator dispatches, collects results, feeds back to planner.
|
|
16
|
+
#
|
|
17
|
+
# PRD MODE: When input is a PRD (has user stories + acceptance criteria),
|
|
18
|
+
# the planner references acceptance criteria in EXECUTE/REVIEW prompts,
|
|
19
|
+
# and reviewers verify each criterion explicitly.
|
|
20
|
+
#
|
|
21
|
+
|
|
22
|
+
set -euo pipefail
|
|
23
|
+
shopt -s nullglob
|
|
24
|
+
|
|
25
|
+
# ─── Colors ─────────────────────────────────────────────
|
|
26
|
+
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'
|
|
27
|
+
BLUE='\033[0;34m'; CYAN='\033[0;36m'; MAGENTA='\033[0;35m'
|
|
28
|
+
BOLD='\033[1m'; DIM='\033[2m'; NC='\033[0m'
|
|
29
|
+
|
|
30
|
+
# ─── Defaults ───────────────────────────────────────────
|
|
31
|
+
PROJECT=""
|
|
32
|
+
TASK=""
|
|
33
|
+
PLAN_FILE="" # --plan: use existing plan file
|
|
34
|
+
PRD_FILE="" # --prd: use PRD (Product Requirements Document)
|
|
35
|
+
PRD_SOURCE="none" # none, prd, plan, task — how we got the task file
|
|
36
|
+
PLANNER="codex"
|
|
37
|
+
EXECUTOR="claude"
|
|
38
|
+
REVIEWERS="gemini" # comma-separated: "gemini,amp"
|
|
39
|
+
MAX_ROUNDS=30 # safety limit for planner loop
|
|
40
|
+
MAX_RETRIES=3 # retries per subtask before planner moves on
|
|
41
|
+
DRY_RUN=false
|
|
42
|
+
CONTEXT_FILES=""
|
|
43
|
+
SKILL_FILES=""
|
|
44
|
+
REPLAN=false
|
|
45
|
+
SPLIT_VIEW=false # --split: show agents in tmux split panes
|
|
46
|
+
USE_TMUX=false # --tmux: use tmux sessions (default: direct bg processes)
|
|
47
|
+
|
|
48
|
+
# Frontend mode
|
|
49
|
+
FRONTEND=false
|
|
50
|
+
FRONTEND_DEV="" # --frontend-dev: agent for frontend implementation
|
|
51
|
+
FRONTEND_REVIEWER="" # --frontend-reviewer: agent(s) for frontend review (comma-separated)
|
|
52
|
+
BACKEND_CMD="" # --backend-cmd: command to start backend (e.g. "mvn spring-boot:run")
|
|
53
|
+
FRONTEND_CMD="" # --frontend-cmd: command to start frontend (e.g. "ng serve")
|
|
54
|
+
BACKEND_URL="" # --backend-url: URL to check backend readiness (e.g. http://localhost:8096)
|
|
55
|
+
FRONTEND_URL="" # --frontend-url: URL to check frontend readiness (e.g. http://localhost:4200)
|
|
56
|
+
DASHBOARD=false # --dashboard: auto-start live monitoring dashboard
|
|
57
|
+
DASHBOARD_PORT=3777
|
|
58
|
+
|
|
59
|
+
# ─── Parse Arguments ────────────────────────────────────
|
|
60
|
+
while [[ $# -gt 0 ]]; do
|
|
61
|
+
case $1 in
|
|
62
|
+
--project) PROJECT="$2"; shift 2 ;;
|
|
63
|
+
--task) TASK="$2"; shift 2 ;;
|
|
64
|
+
--plan) PLAN_FILE="$2"; shift 2 ;;
|
|
65
|
+
--prd) PRD_FILE="$2"; shift 2 ;;
|
|
66
|
+
--planner) PLANNER="$2"; shift 2 ;;
|
|
67
|
+
--executor) EXECUTOR="$2"; shift 2 ;;
|
|
68
|
+
--reviewer|--reviewers) REVIEWERS="$2"; shift 2 ;;
|
|
69
|
+
--max-rounds) MAX_ROUNDS="$2"; shift 2 ;;
|
|
70
|
+
--max-retries) MAX_RETRIES="$2"; shift 2 ;;
|
|
71
|
+
--context) CONTEXT_FILES="$2"; shift 2 ;;
|
|
72
|
+
--skills) SKILL_FILES="$2"; shift 2 ;;
|
|
73
|
+
--replan) REPLAN=true; shift ;;
|
|
74
|
+
--split) SPLIT_VIEW=true; shift ;;
|
|
75
|
+
--frontend) FRONTEND=true; shift ;;
|
|
76
|
+
--frontend-dev) FRONTEND_DEV="$2"; FRONTEND=true; shift 2 ;;
|
|
77
|
+
--frontend-reviewer|--frontend-reviewers) FRONTEND_REVIEWER="$2"; FRONTEND=true; shift 2 ;;
|
|
78
|
+
--backend-cmd) BACKEND_CMD="$2"; shift 2 ;;
|
|
79
|
+
--frontend-cmd) FRONTEND_CMD="$2"; shift 2 ;;
|
|
80
|
+
--backend-url) BACKEND_URL="$2"; shift 2 ;;
|
|
81
|
+
--frontend-url) FRONTEND_URL="$2"; shift 2 ;;
|
|
82
|
+
--dashboard) DASHBOARD=true; shift ;;
|
|
83
|
+
--dashboard-port) DASHBOARD=true; DASHBOARD_PORT="$2"; shift 2 ;;
|
|
84
|
+
--tmux) USE_TMUX=true; shift ;;
|
|
85
|
+
--dry-run) DRY_RUN=true; shift ;;
|
|
86
|
+
-h|--help) head -16 "$0" | tail -14; exit 0 ;;
|
|
87
|
+
*) echo -e "${RED}Unknown: $1${NC}"; exit 1 ;;
|
|
88
|
+
esac
|
|
89
|
+
done
|
|
90
|
+
|
|
91
|
+
[[ -z "$PROJECT" ]] && { echo -e "${RED}--project required${NC}"; exit 1; }
|
|
92
|
+
[[ -z "$TASK" && -z "$PLAN_FILE" && -z "$PRD_FILE" ]] && { echo -e "${RED}Either --task, --plan, or --prd required${NC}"; exit 1; }
|
|
93
|
+
PROJECT=$(cd "$PROJECT" 2>/dev/null && pwd)
|
|
94
|
+
|
|
95
|
+
# ─── Session Setup ──────────────────────────────────────
|
|
96
|
+
ARTIFACTS="${PROJECT}/.codeswarm"
|
|
97
|
+
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
|
98
|
+
SESSION_ID="session_${TIMESTAMP}"
|
|
99
|
+
SESSION_DIR="${ARTIFACTS}/sessions/${SESSION_ID}"
|
|
100
|
+
TASK_FILE="${ARTIFACTS}/task.md"
|
|
101
|
+
STATE_FILE="${SESSION_DIR}/state.md"
|
|
102
|
+
DIRECTIVES_DIR="${SESSION_DIR}/directives"
|
|
103
|
+
|
|
104
|
+
mkdir -p "$SESSION_DIR"
|
|
105
|
+
mkdir -p "$DIRECTIVES_DIR"
|
|
106
|
+
SKILLS_DIR="${SESSION_DIR}/skills"
|
|
107
|
+
mkdir -p "$SKILLS_DIR"
|
|
108
|
+
|
|
109
|
+
# ─── Tmux (opt-in only: --tmux flag) ─────────────────────
|
|
110
|
+
# tmux strips shell environment (PATH, conda, API keys) which breaks agents.
|
|
111
|
+
# Only enable with explicit --tmux flag. Default: direct background processes.
|
|
112
|
+
TMUX_SESSION="codeswarm-${TIMESTAMP}"
|
|
113
|
+
if $USE_TMUX && command -v tmux &>/dev/null; then
|
|
114
|
+
tmux new-session -d -s "$TMUX_SESSION" -n "coord" -x 200 -y 50 2>/dev/null || true
|
|
115
|
+
else
|
|
116
|
+
USE_TMUX=false
|
|
117
|
+
fi
|
|
118
|
+
|
|
119
|
+
# ─── Logging ──────────────────────────────────────────────
|
|
120
|
+
LOG_FILE="${SESSION_DIR}/coordinator.log"
|
|
121
|
+
log() { echo -e "${CYAN}[$(date +%H:%M:%S)] $1${NC}"; echo "[$(date +%H:%M:%S)] $1" >> "$LOG_FILE"; }
|
|
122
|
+
success() { echo -e "${GREEN}✓ $1${NC}"; echo "✓ $1" >> "$LOG_FILE"; }
|
|
123
|
+
warn() { echo -e "${YELLOW}⚠ $1${NC}"; echo "⚠ $1" >> "$LOG_FILE"; }
|
|
124
|
+
error() { echo -e "${RED}✗ $1${NC}"; echo "✗ $1" >> "$LOG_FILE"; }
|
|
125
|
+
|
|
126
|
+
# ═══════════════════════════════════════════════════════════
|
|
127
|
+
# RETRY WRAPPER — embedded into generated agent scripts
|
|
128
|
+
# Handles transient API errors (e.g. "No messages returned")
|
|
129
|
+
# ═══════════════════════════════════════════════════════════
|
|
130
|
+
# This function body is injected into each generated .run_*.sh script
|
|
131
|
+
RETRY_FUNC='
|
|
132
|
+
_retry() {
|
|
133
|
+
local max_retries="$1"; shift
|
|
134
|
+
local retry_delay="$1"; shift
|
|
135
|
+
local attempt=1
|
|
136
|
+
local exit_code=0
|
|
137
|
+
while [[ $attempt -le $max_retries ]]; do
|
|
138
|
+
exit_code=0
|
|
139
|
+
"$@" && return 0 || exit_code=$?
|
|
140
|
+
if [[ $attempt -lt $max_retries ]]; then
|
|
141
|
+
local wait_secs=$(( retry_delay * attempt ))
|
|
142
|
+
echo ""
|
|
143
|
+
echo "[codeswarm] ⚠️ Attempt ${attempt}/${max_retries} failed (exit ${exit_code}). Retrying in ${wait_secs}s..."
|
|
144
|
+
echo ""
|
|
145
|
+
sleep $wait_secs
|
|
146
|
+
fi
|
|
147
|
+
attempt=$((attempt + 1))
|
|
148
|
+
done
|
|
149
|
+
echo "[codeswarm] ✗ All ${max_retries} attempts failed (last exit ${exit_code})"
|
|
150
|
+
return $exit_code
|
|
151
|
+
}
|
|
152
|
+
'
|
|
153
|
+
|
|
154
|
+
# ═══════════════════════════════════════════════════════════
|
|
155
|
+
# SEND PROMPT TO AGENT (with watchdog heartbeat)
|
|
156
|
+
# ═══════════════════════════════════════════════════════════
|
|
157
|
+
STALE_TIMEOUT=${STALE_TIMEOUT:-300} # 5 min no output → kill
|
|
158
|
+
HEARTBEAT_INTERVAL=30
|
|
159
|
+
|
|
160
|
+
MSG_SEQ=0
|
|
161
|
+
send_to_agent() {
|
|
162
|
+
local agent_name="$1"
|
|
163
|
+
local prompt="$2"
|
|
164
|
+
local label="${3:-}"
|
|
165
|
+
|
|
166
|
+
MSG_SEQ=$((MSG_SEQ + 1))
|
|
167
|
+
local seq=$(printf '%03d' $MSG_SEQ)
|
|
168
|
+
local prompt_file="${SESSION_DIR}/prompt_${seq}_${agent_name}.md"
|
|
169
|
+
local log_file="${SESSION_DIR}/log_${seq}_${agent_name}.md"
|
|
170
|
+
local exitcode_file="${SESSION_DIR}/.exitcode_${seq}"
|
|
171
|
+
local done_flag="${SESSION_DIR}/.done_${seq}"
|
|
172
|
+
|
|
173
|
+
echo "$prompt" > "$prompt_file"
|
|
174
|
+
touch "$log_file"
|
|
175
|
+
log "🚀 ${BOLD}${agent_name}${NC} ${label:+— ${label}}"
|
|
176
|
+
|
|
177
|
+
if $DRY_RUN; then
|
|
178
|
+
echo "# Dry run — prompt at: ${prompt_file}" > "$log_file"
|
|
179
|
+
return 0
|
|
180
|
+
fi
|
|
181
|
+
|
|
182
|
+
# ── Build agent command ──────────────────────────────
|
|
183
|
+
# Short CLI prompt telling agent to read the prompt file (agents use their own tools to read)
|
|
184
|
+
# NO stdout redirect — output goes to terminal; pipe-pane captures it to log_file
|
|
185
|
+
local short="Read ALL instructions from the file ${prompt_file} and execute them completely. When done, print: done"
|
|
186
|
+
local agent_cmd=""
|
|
187
|
+
local agent_retries=${AGENT_RETRY_COUNT:-3}
|
|
188
|
+
local agent_retry_delay=${AGENT_RETRY_DELAY:-5}
|
|
189
|
+
case "$agent_name" in
|
|
190
|
+
claude)
|
|
191
|
+
# Pattern from ralphy: direct execution, --output-format stream-json
|
|
192
|
+
# --verbose: better error diagnostics
|
|
193
|
+
# Retry handles transient "No messages returned" errors
|
|
194
|
+
agent_cmd="_retry ${agent_retries} ${agent_retry_delay} claude --dangerously-skip-permissions --verbose -p '${short}'; echo \$? > '${exitcode_file}'"
|
|
195
|
+
;;
|
|
196
|
+
gemini)
|
|
197
|
+
agent_cmd="_retry ${agent_retries} ${agent_retry_delay} gemini -p '${short}' --approval-mode yolo; echo \$? > '${exitcode_file}'"
|
|
198
|
+
;;
|
|
199
|
+
codex)
|
|
200
|
+
agent_cmd="_retry ${agent_retries} ${agent_retry_delay} codex exec --yolo '${short}'; echo \$? > '${exitcode_file}'"
|
|
201
|
+
;;
|
|
202
|
+
amp)
|
|
203
|
+
# Pattern from ralph project: --dangerously-allow-all + -x (execute mode)
|
|
204
|
+
agent_cmd="_retry ${agent_retries} ${agent_retry_delay} amp --dangerously-allow-all -x '${short}'; echo \$? > '${exitcode_file}'"
|
|
205
|
+
;;
|
|
206
|
+
opencode)
|
|
207
|
+
# Pattern from ralphy: env var for permissions, 'run' subcommand
|
|
208
|
+
agent_cmd="_retry ${agent_retries} ${agent_retry_delay} env OPENCODE_PERMISSION='{\"*\":\"allow\"}' opencode run '${short}'; echo \$? > '${exitcode_file}'"
|
|
209
|
+
;;
|
|
210
|
+
*)
|
|
211
|
+
error "Unknown agent: $agent_name (supported: claude, gemini, codex, amp, opencode)"; return 1 ;;
|
|
212
|
+
esac
|
|
213
|
+
|
|
214
|
+
# ── Execute with watchdog heartbeat ──────────────────
|
|
215
|
+
local start_time=$(date +%s)
|
|
216
|
+
local last_size=0
|
|
217
|
+
local stale_since=$start_time
|
|
218
|
+
|
|
219
|
+
if $USE_TMUX; then
|
|
220
|
+
# Each agent gets its own tmux session for isolation
|
|
221
|
+
local agent_session="${TMUX_SESSION}-${agent_name}-${seq}"
|
|
222
|
+
tmux new-session -d -s "$agent_session" -x 200 -y 50 2>/dev/null || true
|
|
223
|
+
|
|
224
|
+
# Write command to a script file to avoid tmux send-keys garbling long commands
|
|
225
|
+
# Embeds _retry function so it works in isolated tmux sessions
|
|
226
|
+
local cmd_script="${SESSION_DIR}/.run_${seq}_${agent_name}.sh"
|
|
227
|
+
cat > "$cmd_script" <<CMDEOF
|
|
228
|
+
#!/usr/bin/env bash
|
|
229
|
+
${RETRY_FUNC}
|
|
230
|
+
cd '${PROJECT}'
|
|
231
|
+
${agent_cmd}
|
|
232
|
+
touch '${done_flag}'
|
|
233
|
+
CMDEOF
|
|
234
|
+
chmod +x "$cmd_script"
|
|
235
|
+
|
|
236
|
+
echo -e " ${CYAN}📺 tmux attach -t ${agent_session}${NC}"
|
|
237
|
+
|
|
238
|
+
# If --split, create a tail pane in the coordinator session
|
|
239
|
+
if $SPLIT_VIEW; then
|
|
240
|
+
tmux split-window -t "${TMUX_SESSION}:coord" -v -l 15 \
|
|
241
|
+
"echo '─── ${agent_name} (${label:-}) ───'; tail -f '${log_file}'" 2>/dev/null || true
|
|
242
|
+
fi
|
|
243
|
+
|
|
244
|
+
# pipe-pane captures terminal output to log file
|
|
245
|
+
tmux pipe-pane -o -t "$agent_session" "cat >> '${log_file}'"
|
|
246
|
+
tmux send-keys -t "$agent_session" "bash '${cmd_script}'" Enter
|
|
247
|
+
|
|
248
|
+
while [[ ! -f "$done_flag" ]]; do
|
|
249
|
+
sleep $HEARTBEAT_INTERVAL
|
|
250
|
+
|
|
251
|
+
local elapsed=$(( $(date +%s) - start_time ))
|
|
252
|
+
local mins=$(( elapsed / 60 ))
|
|
253
|
+
local secs=$(( elapsed % 60 ))
|
|
254
|
+
local current_size=$(wc -c < "$log_file" 2>/dev/null | tr -d ' ' || echo "0")
|
|
255
|
+
|
|
256
|
+
# Check if agent process is still alive in tmux (some agents buffer output)
|
|
257
|
+
local agent_proc_alive=false
|
|
258
|
+
if tmux list-sessions 2>/dev/null | grep -q "$agent_session"; then
|
|
259
|
+
agent_proc_alive=true
|
|
260
|
+
fi
|
|
261
|
+
|
|
262
|
+
if [[ "$current_size" -gt "$last_size" ]]; then
|
|
263
|
+
stale_since=$(date +%s)
|
|
264
|
+
last_size=$current_size
|
|
265
|
+
echo -e "${DIM} 💚 ${agent_name} alive — ${mins}m${secs}s, ${current_size} bytes${NC}"
|
|
266
|
+
elif $agent_proc_alive; then
|
|
267
|
+
# Process alive but no output — agent is thinking (claude --print buffers)
|
|
268
|
+
echo -e "${DIM} 🧠 ${agent_name} working — ${mins}m${secs}s (process alive, waiting for output)${NC}"
|
|
269
|
+
# Only kill if total elapsed exceeds 15 min (hard timeout)
|
|
270
|
+
if [[ $elapsed -ge 900 ]]; then
|
|
271
|
+
warn "${agent_name} hard timeout — ${mins}m elapsed. Killing..."
|
|
272
|
+
tmux send-keys -t "$agent_session" C-c 2>/dev/null || true
|
|
273
|
+
sleep 2
|
|
274
|
+
tmux kill-session -t "$agent_session" 2>/dev/null || true
|
|
275
|
+
echo "WATCHDOG_KILLED" > "$exitcode_file"
|
|
276
|
+
break
|
|
277
|
+
fi
|
|
278
|
+
else
|
|
279
|
+
local stale_duration=$(( $(date +%s) - stale_since ))
|
|
280
|
+
local stale_mins=$(( stale_duration / 60 ))
|
|
281
|
+
|
|
282
|
+
if [[ $stale_duration -ge $STALE_TIMEOUT ]]; then
|
|
283
|
+
warn "${agent_name} stuck — no output for ${stale_mins}m. Killing..."
|
|
284
|
+
tmux send-keys -t "$agent_session" C-c 2>/dev/null || true
|
|
285
|
+
sleep 2
|
|
286
|
+
tmux kill-session -t "$agent_session" 2>/dev/null || true
|
|
287
|
+
echo "WATCHDOG_KILLED" > "$exitcode_file"
|
|
288
|
+
break
|
|
289
|
+
else
|
|
290
|
+
echo -e "${YELLOW} ⚠️ ${agent_name} stale — ${stale_mins}m no output (kill at $((STALE_TIMEOUT / 60))m)${NC}"
|
|
291
|
+
fi
|
|
292
|
+
fi
|
|
293
|
+
done
|
|
294
|
+
rm -f "$done_flag"
|
|
295
|
+
# Clean up agent session
|
|
296
|
+
tmux kill-session -t "$agent_session" 2>/dev/null || true
|
|
297
|
+
else
|
|
298
|
+
# Foreground (no tmux): write script file + run as background process
|
|
299
|
+
local cmd_script="${SESSION_DIR}/.run_${seq}_${agent_name}.sh"
|
|
300
|
+
cat > "$cmd_script" <<CMDEOF
|
|
301
|
+
#!/usr/bin/env bash
|
|
302
|
+
${RETRY_FUNC}
|
|
303
|
+
cd '${PROJECT}'
|
|
304
|
+
${agent_cmd}
|
|
305
|
+
CMDEOF
|
|
306
|
+
chmod +x "$cmd_script"
|
|
307
|
+
# Use script(1) to allocate a PTY — agents like claude need a terminal to work
|
|
308
|
+
script -q "$log_file" bash "$cmd_script" &
|
|
309
|
+
local agent_pid=$!
|
|
310
|
+
|
|
311
|
+
while kill -0 $agent_pid 2>/dev/null; do
|
|
312
|
+
sleep $HEARTBEAT_INTERVAL
|
|
313
|
+
local elapsed=$(( $(date +%s) - start_time ))
|
|
314
|
+
local current_size=$(wc -c < "$log_file" 2>/dev/null | tr -d ' ' || echo "0")
|
|
315
|
+
if [[ "$current_size" -gt "$last_size" ]]; then
|
|
316
|
+
stale_since=$(date +%s)
|
|
317
|
+
last_size=$current_size
|
|
318
|
+
echo -e "${DIM} 💚 ${agent_name} alive — $((elapsed/60))m$((elapsed%60))s, ${current_size} bytes${NC}"
|
|
319
|
+
else
|
|
320
|
+
local stale_duration=$(( $(date +%s) - stale_since ))
|
|
321
|
+
if [[ $stale_duration -ge $STALE_TIMEOUT ]]; then
|
|
322
|
+
warn "${agent_name} stuck — killing..."
|
|
323
|
+
kill $agent_pid 2>/dev/null || true; sleep 2; kill -9 $agent_pid 2>/dev/null || true
|
|
324
|
+
echo "WATCHDOG_KILLED" > "$exitcode_file"
|
|
325
|
+
break
|
|
326
|
+
fi
|
|
327
|
+
fi
|
|
328
|
+
done
|
|
329
|
+
wait $agent_pid 2>/dev/null || true
|
|
330
|
+
fi
|
|
331
|
+
|
|
332
|
+
local elapsed=$(( $(date +%s) - start_time ))
|
|
333
|
+
log "${agent_name} finished in $((elapsed / 60))m$((elapsed % 60))s"
|
|
334
|
+
|
|
335
|
+
local exit_code=0
|
|
336
|
+
if [[ -f "$exitcode_file" ]]; then
|
|
337
|
+
exit_code=$(cat "$exitcode_file" | tr -d '[:space:]')
|
|
338
|
+
rm -f "$exitcode_file"
|
|
339
|
+
fi
|
|
340
|
+
if [[ "$exit_code" == "WATCHDOG_KILLED" ]]; then
|
|
341
|
+
error "${agent_name} killed by watchdog"
|
|
342
|
+
echo "--- WATCHDOG KILLED ---" >> "$log_file"
|
|
343
|
+
return 1
|
|
344
|
+
fi
|
|
345
|
+
[[ "$exit_code" =~ ^[0-9]+$ ]] || { [[ -s "$log_file" ]] && exit_code=0 || exit_code=1; }
|
|
346
|
+
|
|
347
|
+
# Post-execution: detect transient errors even with exit 0 (e.g. claude "No messages returned")
|
|
348
|
+
if [[ $exit_code -eq 0 ]] && grep -qE "No messages returned|ECONNRESET|ETIMEDOUT" "$log_file" 2>/dev/null; then
|
|
349
|
+
local post_retries=${AGENT_RETRY_COUNT:-3}
|
|
350
|
+
local post_delay=${AGENT_RETRY_DELAY:-5}
|
|
351
|
+
for (( _pr=1; _pr<post_retries; _pr++ )); do
|
|
352
|
+
local wait_secs=$(( post_delay * _pr ))
|
|
353
|
+
warn "${agent_name} transient error detected in output. Retry ${_pr}/$((post_retries-1)) in ${wait_secs}s..."
|
|
354
|
+
sleep $wait_secs
|
|
355
|
+
# Re-run via the same script file
|
|
356
|
+
local cmd_script="${SESSION_DIR}/.run_${seq}_${agent_name}.sh"
|
|
357
|
+
if [[ -f "$cmd_script" ]]; then
|
|
358
|
+
> "$log_file" # truncate log for fresh attempt
|
|
359
|
+
if $USE_TMUX; then
|
|
360
|
+
local agent_session="${TMUX_SESSION}-${agent_name}-${seq}-r${_pr}"
|
|
361
|
+
tmux new-session -d -s "$agent_session" -x 200 -y 50 2>/dev/null || true
|
|
362
|
+
tmux pipe-pane -o -t "$agent_session" "cat >> '${log_file}'"
|
|
363
|
+
tmux send-keys -t "$agent_session" "bash '${cmd_script}'" Enter
|
|
364
|
+
while [[ ! -f "$done_flag" ]]; do sleep $HEARTBEAT_INTERVAL; done
|
|
365
|
+
rm -f "$done_flag"
|
|
366
|
+
tmux kill-session -t "$agent_session" 2>/dev/null || true
|
|
367
|
+
else
|
|
368
|
+
script -q "$log_file" bash "$cmd_script" &
|
|
369
|
+
local retry_pid=$!
|
|
370
|
+
wait $retry_pid 2>/dev/null || true
|
|
371
|
+
fi
|
|
372
|
+
# Check if retry succeeded
|
|
373
|
+
if ! grep -qE "No messages returned|ECONNRESET|ETIMEDOUT" "$log_file" 2>/dev/null; then
|
|
374
|
+
log "${agent_name} retry ${_pr} succeeded"
|
|
375
|
+
exit_code=0
|
|
376
|
+
break
|
|
377
|
+
fi
|
|
378
|
+
fi
|
|
379
|
+
done
|
|
380
|
+
fi
|
|
381
|
+
|
|
382
|
+
if [[ $exit_code -ne 0 ]]; then
|
|
383
|
+
warn "${agent_name} exited ${exit_code} — check: ${log_file}"
|
|
384
|
+
else
|
|
385
|
+
success "${agent_name} done"
|
|
386
|
+
fi
|
|
387
|
+
return $exit_code
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
# ─── Read context files ─────────────────────────────────
|
|
391
|
+
CONTEXT_CONTENT=""
|
|
392
|
+
if [[ -n "$CONTEXT_FILES" ]]; then
|
|
393
|
+
IFS=',' read -ra CFILES <<< "$CONTEXT_FILES"
|
|
394
|
+
for cf in "${CFILES[@]}"; do
|
|
395
|
+
cf=$(echo "$cf" | xargs)
|
|
396
|
+
local_path=""
|
|
397
|
+
[[ -f "$PROJECT/$cf" ]] && local_path="$PROJECT/$cf"
|
|
398
|
+
[[ -f "$cf" ]] && local_path="$cf"
|
|
399
|
+
if [[ -n "$local_path" ]]; then
|
|
400
|
+
CONTEXT_CONTENT="${CONTEXT_CONTENT}
|
|
401
|
+
=== FILE: ${cf} ===
|
|
402
|
+
$(cat "$local_path")
|
|
403
|
+
=== END ==="
|
|
404
|
+
fi
|
|
405
|
+
done
|
|
406
|
+
fi
|
|
407
|
+
|
|
408
|
+
# ─── Skill Discovery (file-based, bash 3.2 compatible) ──
|
|
409
|
+
load_skill_file() {
|
|
410
|
+
local path="$1"
|
|
411
|
+
local label="$2"
|
|
412
|
+
if [[ -f "$path" ]] && [[ -s "$path" ]]; then
|
|
413
|
+
echo "
|
|
414
|
+
=== SKILL: ${label} (${path}) ===
|
|
415
|
+
$(cat "$path")
|
|
416
|
+
=== END SKILL ==="
|
|
417
|
+
fi
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
discover_skills() {
|
|
421
|
+
local agent_name="$1"
|
|
422
|
+
local agent_upper=$(echo "$agent_name" | tr '[:lower:]' '[:upper:]')
|
|
423
|
+
local content=""
|
|
424
|
+
|
|
425
|
+
content+=$(load_skill_file "${PROJECT}/AGENTS.md" "project/AGENTS.md")
|
|
426
|
+
content+=$(load_skill_file "${PROJECT}/${agent_upper}.md" "project/${agent_upper}.md")
|
|
427
|
+
|
|
428
|
+
if [[ -d "${PROJECT}/.agentic/skills" ]]; then
|
|
429
|
+
for sf in "${PROJECT}/.agentic/skills/"*.md; do
|
|
430
|
+
content+=$(load_skill_file "$sf" ".agentic/skills/$(basename "$sf")")
|
|
431
|
+
done
|
|
432
|
+
fi
|
|
433
|
+
|
|
434
|
+
local home_dirs=(
|
|
435
|
+
"${HOME}/.claude"
|
|
436
|
+
"${HOME}/.gemini"
|
|
437
|
+
"${HOME}/.codex"
|
|
438
|
+
"${HOME}/.config/amp"
|
|
439
|
+
)
|
|
440
|
+
for hd in "${home_dirs[@]}"; do
|
|
441
|
+
if [[ -d "$hd" ]]; then
|
|
442
|
+
local agent_base=$(basename "$hd")
|
|
443
|
+
local agent_base_upper=$(echo "$agent_base" | tr '[:lower:]' '[:upper:]')
|
|
444
|
+
if [[ "$agent_upper" == "$agent_base_upper" ]]; then
|
|
445
|
+
content+=$(load_skill_file "${hd}/${agent_base_upper}.md" "global/${agent_base_upper}.md")
|
|
446
|
+
content+=$(load_skill_file "${hd}/instructions.md" "global/${agent_base}/instructions.md")
|
|
447
|
+
fi
|
|
448
|
+
fi
|
|
449
|
+
done
|
|
450
|
+
|
|
451
|
+
if [[ -n "$SKILL_FILES" ]]; then
|
|
452
|
+
IFS=',' read -ra SFILES <<< "$SKILL_FILES"
|
|
453
|
+
for sf in "${SFILES[@]}"; do
|
|
454
|
+
sf=$(echo "$sf" | xargs)
|
|
455
|
+
[[ -f "$PROJECT/$sf" ]] && sf="$PROJECT/$sf"
|
|
456
|
+
content+=$(load_skill_file "$sf" "explicit/$(basename "$sf")")
|
|
457
|
+
done
|
|
458
|
+
fi
|
|
459
|
+
|
|
460
|
+
echo "$content" > "${SKILLS_DIR}/${agent_name}.txt"
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
get_skills_for() {
|
|
464
|
+
local agent_name="$1"
|
|
465
|
+
local skill_file="${SKILLS_DIR}/${agent_name}.txt"
|
|
466
|
+
if [[ ! -f "$skill_file" ]]; then
|
|
467
|
+
discover_skills "$agent_name"
|
|
468
|
+
fi
|
|
469
|
+
local content=""
|
|
470
|
+
if [[ -f "$skill_file" ]]; then
|
|
471
|
+
content=$(cat "$skill_file")
|
|
472
|
+
fi
|
|
473
|
+
if [[ -n "$content" ]]; then
|
|
474
|
+
echo "
|
|
475
|
+
SKILLS & INSTRUCTIONS:
|
|
476
|
+
${content}"
|
|
477
|
+
fi
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
# ─── Parse reviewers ────────────────────────────────────
|
|
481
|
+
IFS=',' read -ra REVIEWER_LIST <<< "$REVIEWERS"
|
|
482
|
+
for idx in "${!REVIEWER_LIST[@]}"; do
|
|
483
|
+
REVIEWER_LIST[$idx]=$(echo "${REVIEWER_LIST[$idx]}" | xargs)
|
|
484
|
+
done
|
|
485
|
+
|
|
486
|
+
# ─── Parse frontend reviewers ──────────────────────────
|
|
487
|
+
FRONTEND_REVIEWER_LIST=()
|
|
488
|
+
if [[ -n "$FRONTEND_REVIEWER" ]]; then
|
|
489
|
+
IFS=',' read -ra FRONTEND_REVIEWER_LIST <<< "$FRONTEND_REVIEWER"
|
|
490
|
+
for idx in "${!FRONTEND_REVIEWER_LIST[@]}"; do
|
|
491
|
+
FRONTEND_REVIEWER_LIST[$idx]=$(echo "${FRONTEND_REVIEWER_LIST[$idx]}" | xargs)
|
|
492
|
+
done
|
|
493
|
+
fi
|
|
494
|
+
|
|
495
|
+
# Pre-discover skills
|
|
496
|
+
discover_skills "$PLANNER"
|
|
497
|
+
discover_skills "$EXECUTOR"
|
|
498
|
+
for _rev in "${REVIEWER_LIST[@]}"; do
|
|
499
|
+
discover_skills "$_rev"
|
|
500
|
+
done
|
|
501
|
+
if [[ -n "$FRONTEND_DEV" ]]; then
|
|
502
|
+
discover_skills "$FRONTEND_DEV"
|
|
503
|
+
fi
|
|
504
|
+
if [[ ${#FRONTEND_REVIEWER_LIST[@]} -gt 0 ]]; then
|
|
505
|
+
for _frev in "${FRONTEND_REVIEWER_LIST[@]}"; do
|
|
506
|
+
discover_skills "$_frev"
|
|
507
|
+
done
|
|
508
|
+
fi
|
|
509
|
+
|
|
510
|
+
# ─── Service management (frontend mode) ────────────────
|
|
511
|
+
BACKEND_SESSION=""
|
|
512
|
+
FRONTEND_SESSION=""
|
|
513
|
+
|
|
514
|
+
wait_for_url() {
|
|
515
|
+
local url="$1"
|
|
516
|
+
local name="$2"
|
|
517
|
+
local timeout="${3:-120}"
|
|
518
|
+
local start=$(date +%s)
|
|
519
|
+
echo -ne " ⏳ Waiting for ${name} at ${url}"
|
|
520
|
+
while true; do
|
|
521
|
+
if curl -sf -o /dev/null --max-time 3 "$url" 2>/dev/null; then
|
|
522
|
+
echo -e " ${GREEN}✓${NC}"
|
|
523
|
+
return 0
|
|
524
|
+
fi
|
|
525
|
+
local elapsed=$(( $(date +%s) - start ))
|
|
526
|
+
if [[ $elapsed -ge $timeout ]]; then
|
|
527
|
+
echo -e " ${RED}✗ timeout after ${timeout}s${NC}"
|
|
528
|
+
return 1
|
|
529
|
+
fi
|
|
530
|
+
echo -n "."
|
|
531
|
+
sleep 3
|
|
532
|
+
done
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
start_services() {
|
|
536
|
+
if [[ -z "$BACKEND_CMD" && -z "$FRONTEND_CMD" ]]; then
|
|
537
|
+
return 0
|
|
538
|
+
fi
|
|
539
|
+
|
|
540
|
+
log "🖥️ Starting services for frontend testing..."
|
|
541
|
+
|
|
542
|
+
if [[ -n "$BACKEND_CMD" ]]; then
|
|
543
|
+
BACKEND_SESSION="${TMUX_SESSION}-backend"
|
|
544
|
+
tmux new-session -d -s "$BACKEND_SESSION" -x 200 -y 50 2>/dev/null || true
|
|
545
|
+
tmux pipe-pane -o -t "$BACKEND_SESSION" "cat >> '${SESSION_DIR}/backend.log'"
|
|
546
|
+
tmux send-keys -t "$BACKEND_SESSION" "cd '${PROJECT}' && ${BACKEND_CMD}" Enter
|
|
547
|
+
echo -e " ${CYAN}📺 tmux attach -t ${BACKEND_SESSION}${NC}"
|
|
548
|
+
|
|
549
|
+
if [[ -n "$BACKEND_URL" ]]; then
|
|
550
|
+
wait_for_url "$BACKEND_URL" "backend" 180 || { error "Backend failed to start"; return 1; }
|
|
551
|
+
else
|
|
552
|
+
log "No --backend-url, waiting 30s for backend startup..."
|
|
553
|
+
sleep 30
|
|
554
|
+
fi
|
|
555
|
+
fi
|
|
556
|
+
|
|
557
|
+
if [[ -n "$FRONTEND_CMD" ]]; then
|
|
558
|
+
FRONTEND_SESSION="${TMUX_SESSION}-frontend"
|
|
559
|
+
tmux new-session -d -s "$FRONTEND_SESSION" -x 200 -y 50 2>/dev/null || true
|
|
560
|
+
tmux pipe-pane -o -t "$FRONTEND_SESSION" "cat >> '${SESSION_DIR}/frontend.log'"
|
|
561
|
+
tmux send-keys -t "$FRONTEND_SESSION" "cd '${PROJECT}' && ${FRONTEND_CMD}" Enter
|
|
562
|
+
echo -e " ${CYAN}📺 tmux attach -t ${FRONTEND_SESSION}${NC}"
|
|
563
|
+
|
|
564
|
+
if [[ -n "$FRONTEND_URL" ]]; then
|
|
565
|
+
wait_for_url "$FRONTEND_URL" "frontend" 120 || { error "Frontend failed to start"; return 1; }
|
|
566
|
+
else
|
|
567
|
+
log "No --frontend-url, waiting 20s for frontend startup..."
|
|
568
|
+
sleep 20
|
|
569
|
+
fi
|
|
570
|
+
fi
|
|
571
|
+
|
|
572
|
+
success "Services running"
|
|
573
|
+
}
|
|
574
|
+
|
|
575
|
+
stop_services() {
|
|
576
|
+
[[ -n "$BACKEND_SESSION" ]] && tmux kill-session -t "$BACKEND_SESSION" 2>/dev/null || true
|
|
577
|
+
[[ -n "$FRONTEND_SESSION" ]] && tmux kill-session -t "$FRONTEND_SESSION" 2>/dev/null || true
|
|
578
|
+
}
|
|
579
|
+
|
|
580
|
+
# ─── Banner ──────────────────────────────────────────────
|
|
581
|
+
echo ""
|
|
582
|
+
echo -e "${BOLD}${MAGENTA}"
|
|
583
|
+
echo " ╔═══════════════════════════════════════════════════════╗"
|
|
584
|
+
echo " ║ 🧠 DYNAMIC MULTI-AGENT COORDINATOR v7.0 ║"
|
|
585
|
+
echo " ╚═══════════════════════════════════════════════════════╝"
|
|
586
|
+
echo -e "${NC}"
|
|
587
|
+
echo -e " ${BOLD}Task:${NC} ${TASK:-'(from plan file)'}"
|
|
588
|
+
echo -e " ${BOLD}Project:${NC} $PROJECT"
|
|
589
|
+
echo -e " ${BOLD}Planner:${NC} ${PLANNER} ${BOLD}(brain — decides everything)${NC}"
|
|
590
|
+
echo -e " ${BOLD}Executor:${NC} ${EXECUTOR} (backend implements)"
|
|
591
|
+
echo -e " ${BOLD}Reviewers:${NC} ${REVIEWER_LIST[*]} (backend review)"
|
|
592
|
+
if $FRONTEND; then
|
|
593
|
+
[[ -n "$FRONTEND_DEV" ]] && echo -e " ${BOLD}FE Dev:${NC} ${FRONTEND_DEV} (frontend implements)"
|
|
594
|
+
[[ ${#FRONTEND_REVIEWER_LIST[@]} -gt 0 ]] && echo -e " ${BOLD}FE Review:${NC} ${FRONTEND_REVIEWER_LIST[*]} (frontend review)"
|
|
595
|
+
[[ -n "$BACKEND_CMD" ]] && echo -e " ${BOLD}Backend:${NC} ${BACKEND_CMD}"
|
|
596
|
+
[[ -n "$FRONTEND_CMD" ]] && echo -e " ${BOLD}Frontend:${NC} ${FRONTEND_CMD}"
|
|
597
|
+
fi
|
|
598
|
+
[[ -n "$PLAN_FILE" ]] && echo -e " ${BOLD}Plan:${NC} ${PLAN_FILE}"
|
|
599
|
+
$USE_TMUX && echo -e " ${BOLD}Tmux:${NC} tmux attach -t ${TMUX_SESSION}"
|
|
600
|
+
$SPLIT_VIEW && echo -e " ${BOLD}Split:${NC} agent output visible in coordinator tmux"
|
|
601
|
+
$DASHBOARD && echo -e " ${BOLD}Dashboard:${NC} http://localhost:${DASHBOARD_PORT}"
|
|
602
|
+
echo ""
|
|
603
|
+
|
|
604
|
+
# Write session metadata for dashboard
|
|
605
|
+
FRL_JSON="[]"
|
|
606
|
+
if [[ ${#FRONTEND_REVIEWER_LIST[@]} -gt 0 ]]; then
|
|
607
|
+
FRL_JSON=$(printf '"%s",' "${FRONTEND_REVIEWER_LIST[@]}" | sed 's/,$//')
|
|
608
|
+
FRL_JSON="[${FRL_JSON}]"
|
|
609
|
+
fi
|
|
610
|
+
REV_JSON=$(printf '"%s",' "${REVIEWER_LIST[@]}" | sed 's/,$//')
|
|
611
|
+
cat > "${SESSION_DIR}/metadata.json" <<METAEOF
|
|
612
|
+
{
|
|
613
|
+
"planner": "${PLANNER}",
|
|
614
|
+
"executor": "${EXECUTOR}",
|
|
615
|
+
"reviewers": [${REV_JSON}],
|
|
616
|
+
"frontendDev": "${FRONTEND_DEV:-}",
|
|
617
|
+
"frontendReviewers": ${FRL_JSON},
|
|
618
|
+
"project": "${PROJECT}",
|
|
619
|
+
"timestamp": "${TIMESTAMP}",
|
|
620
|
+
"frontend": $($FRONTEND && echo true || echo false)
|
|
621
|
+
}
|
|
622
|
+
METAEOF
|
|
623
|
+
|
|
624
|
+
# ═══════════════════════════════════════════════════════════
|
|
625
|
+
# START DASHBOARD (if enabled)
|
|
626
|
+
# ═══════════════════════════════════════════════════════════
|
|
627
|
+
DASHBOARD_PID=""
|
|
628
|
+
if $DASHBOARD; then
|
|
629
|
+
DASHBOARD_SCRIPT="$(cd "$(dirname "$0")" && pwd)/dashboard/server.js"
|
|
630
|
+
if [[ -f "$DASHBOARD_SCRIPT" ]]; then
|
|
631
|
+
# Kill any existing process on the dashboard port
|
|
632
|
+
lsof -ti:"$DASHBOARD_PORT" | xargs kill -9 2>/dev/null || true
|
|
633
|
+
sleep 0.5
|
|
634
|
+
node "$DASHBOARD_SCRIPT" --project "$PROJECT" --port "$DASHBOARD_PORT" &
|
|
635
|
+
DASHBOARD_PID=$!
|
|
636
|
+
log "📊 Dashboard started at http://localhost:${DASHBOARD_PORT} (PID: ${DASHBOARD_PID})"
|
|
637
|
+
# Try to open in browser
|
|
638
|
+
if command -v open &>/dev/null; then
|
|
639
|
+
open "http://localhost:${DASHBOARD_PORT}" 2>/dev/null || true
|
|
640
|
+
fi
|
|
641
|
+
else
|
|
642
|
+
warn "Dashboard not found at ${DASHBOARD_SCRIPT} — run: cd dashboard && npm install"
|
|
643
|
+
fi
|
|
644
|
+
fi
|
|
645
|
+
|
|
646
|
+
# ═══════════════════════════════════════════════════════════
|
|
647
|
+
# PRD HELPERS
|
|
648
|
+
# ═══════════════════════════════════════════════════════════
|
|
649
|
+
|
|
650
|
+
# Detect if a file is in PRD format (has user stories with acceptance criteria)
|
|
651
|
+
is_prd_format() {
|
|
652
|
+
local file="$1"
|
|
653
|
+
[[ ! -f "$file" ]] && return 1
|
|
654
|
+
# PRD format: has "### US-" user story headers with acceptance criteria
|
|
655
|
+
if grep -q '^### US-[0-9]' "$file" 2>/dev/null && grep -q 'Acceptance Criteria' "$file" 2>/dev/null; then
|
|
656
|
+
return 0
|
|
657
|
+
fi
|
|
658
|
+
return 1
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
# Detect if a file is a JSON PRD (ralph-style prd.json)
|
|
662
|
+
is_json_prd() {
|
|
663
|
+
local file="$1"
|
|
664
|
+
[[ ! -f "$file" ]] && return 1
|
|
665
|
+
# Check file extension is .json AND contains userStories key
|
|
666
|
+
if [[ "$file" == *.json ]] && grep -q '"userStories"' "$file" 2>/dev/null; then
|
|
667
|
+
return 0
|
|
668
|
+
fi
|
|
669
|
+
return 1
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
# Convert a JSON PRD (ralph-style prd.json) to markdown PRD, then to task.md
|
|
673
|
+
prd_from_json() {
|
|
674
|
+
local json_file="$1"
|
|
675
|
+
local output_file="$2"
|
|
676
|
+
local md_prd="${SESSION_DIR}/json_to_md_prd.md"
|
|
677
|
+
|
|
678
|
+
# Check for jq
|
|
679
|
+
if ! command -v jq &>/dev/null; then
|
|
680
|
+
error "jq is required for JSON PRD parsing. Install: brew install jq"
|
|
681
|
+
return 1
|
|
682
|
+
fi
|
|
683
|
+
|
|
684
|
+
local project_name
|
|
685
|
+
project_name=$(jq -r '.project // "Project"' "$json_file")
|
|
686
|
+
local description
|
|
687
|
+
description=$(jq -r '.description // ""' "$json_file")
|
|
688
|
+
|
|
689
|
+
{
|
|
690
|
+
echo "# PRD: ${project_name}"
|
|
691
|
+
echo ""
|
|
692
|
+
echo "## Overview"
|
|
693
|
+
echo "${description}"
|
|
694
|
+
echo ""
|
|
695
|
+
echo "## User Stories"
|
|
696
|
+
echo ""
|
|
697
|
+
|
|
698
|
+
# Iterate over each user story
|
|
699
|
+
local count
|
|
700
|
+
count=$(jq '.userStories | length' "$json_file")
|
|
701
|
+
for ((si=0; si<count; si++)); do
|
|
702
|
+
local id title desc priority notes passes
|
|
703
|
+
id=$(jq -r ".userStories[$si].id" "$json_file")
|
|
704
|
+
title=$(jq -r ".userStories[$si].title" "$json_file")
|
|
705
|
+
desc=$(jq -r ".userStories[$si].description // \"\"" "$json_file")
|
|
706
|
+
priority=$(jq -r ".userStories[$si].priority // $((si+1))" "$json_file")
|
|
707
|
+
notes=$(jq -r ".userStories[$si].notes // \"\"" "$json_file")
|
|
708
|
+
passes=$(jq -r ".userStories[$si].passes // false" "$json_file")
|
|
709
|
+
|
|
710
|
+
echo "### ${id}: ${title} [priority: ${priority}]"
|
|
711
|
+
[[ -n "$desc" ]] && echo "**Description:** ${desc}"
|
|
712
|
+
echo "**Acceptance Criteria:**"
|
|
713
|
+
|
|
714
|
+
# Iterate acceptance criteria
|
|
715
|
+
local ac_count
|
|
716
|
+
ac_count=$(jq ".userStories[$si].acceptanceCriteria | length" "$json_file")
|
|
717
|
+
for ((ci=0; ci<ac_count; ci++)); do
|
|
718
|
+
local criterion
|
|
719
|
+
criterion=$(jq -r ".userStories[$si].acceptanceCriteria[$ci]" "$json_file")
|
|
720
|
+
if [[ "$passes" == "true" ]]; then
|
|
721
|
+
echo "- [x] ${criterion}"
|
|
722
|
+
else
|
|
723
|
+
echo "- [ ] ${criterion}"
|
|
724
|
+
fi
|
|
725
|
+
done
|
|
726
|
+
|
|
727
|
+
[[ -n "$notes" && "$notes" != "" ]] && echo "**Notes:** ${notes}"
|
|
728
|
+
echo ""
|
|
729
|
+
done
|
|
730
|
+
} > "$md_prd"
|
|
731
|
+
|
|
732
|
+
# Now convert the generated markdown PRD to task.md
|
|
733
|
+
prd_to_task_md "$md_prd" "$output_file"
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
# Load PRD skill template if available
|
|
737
|
+
load_prd_skill() {
|
|
738
|
+
local skill_file="${PROJECT}/.agentic/skills/prd_template.md"
|
|
739
|
+
if [[ -f "$skill_file" ]]; then
|
|
740
|
+
echo "
|
|
741
|
+
=== PRD GENERATION SKILL ===
|
|
742
|
+
$(cat "$skill_file")
|
|
743
|
+
=== END PRD SKILL ==="
|
|
744
|
+
fi
|
|
745
|
+
}
|
|
746
|
+
|
|
747
|
+
# Convert a PRD file into the standardized task.md format
|
|
748
|
+
# Extracts user stories as subtasks, preserves acceptance criteria inline
|
|
749
|
+
prd_to_task_md() {
|
|
750
|
+
local prd_file="$1"
|
|
751
|
+
local output_file="$2"
|
|
752
|
+
|
|
753
|
+
# Extract title from first H1
|
|
754
|
+
local title
|
|
755
|
+
title=$(grep -m1 '^# ' "$prd_file" | sed 's/^# //' | sed 's/^PRD: //')
|
|
756
|
+
|
|
757
|
+
# Extract overview
|
|
758
|
+
local overview=""
|
|
759
|
+
overview=$(sed -n '/^## Overview/,/^## /{ /^## Overview/d; /^## /d; p; }' "$prd_file" | head -5 | sed '/^$/d')
|
|
760
|
+
|
|
761
|
+
# Build task.md
|
|
762
|
+
{
|
|
763
|
+
echo "# Task: ${title}"
|
|
764
|
+
echo ""
|
|
765
|
+
echo "## Summary"
|
|
766
|
+
echo "${overview}"
|
|
767
|
+
echo ""
|
|
768
|
+
echo "## Subtasks"
|
|
769
|
+
echo ""
|
|
770
|
+
|
|
771
|
+
# Parse each user story into a subtask
|
|
772
|
+
local story_num=0
|
|
773
|
+
local in_story=false
|
|
774
|
+
local story_id="" story_title="" story_desc="" story_files="" story_deps="" story_notes=""
|
|
775
|
+
local in_ac=false
|
|
776
|
+
local ac_lines=""
|
|
777
|
+
|
|
778
|
+
while IFS= read -r line; do
|
|
779
|
+
# New user story header: ### US-001: Title [priority: N]
|
|
780
|
+
if [[ "$line" =~ ^###\ US-([0-9]+):\ (.+) ]]; then
|
|
781
|
+
# Flush previous story
|
|
782
|
+
if [[ $story_num -gt 0 ]]; then
|
|
783
|
+
echo "- [ ] **[${story_id}] ${story_title}**"
|
|
784
|
+
[[ -n "$story_files" ]] && echo " - Files: ${story_files}"
|
|
785
|
+
[[ -n "$story_desc" ]] && echo " - Do: ${story_desc}"
|
|
786
|
+
if [[ -n "$ac_lines" ]]; then
|
|
787
|
+
echo " - Acceptance Criteria:"
|
|
788
|
+
echo "$ac_lines" | while IFS= read -r acl; do
|
|
789
|
+
[[ -n "$acl" ]] && echo " ${acl}"
|
|
790
|
+
done
|
|
791
|
+
fi
|
|
792
|
+
echo " - Verify: Check all acceptance criteria pass"
|
|
793
|
+
[[ -n "$story_deps" ]] && echo " - Dependencies: ${story_deps}"
|
|
794
|
+
[[ -n "$story_notes" ]] && echo " - Notes: ${story_notes}"
|
|
795
|
+
echo ""
|
|
796
|
+
fi
|
|
797
|
+
|
|
798
|
+
story_num=$((story_num + 1))
|
|
799
|
+
# Extract title (strip [priority: N] and [status: ...] suffixes)
|
|
800
|
+
story_title=$(echo "$line" | sed 's/^### US-[0-9]*: //' | sed 's/ \[priority:.*//; s/ \[status:.*//')
|
|
801
|
+
story_id=$(echo "$line" | grep -o 'US-[0-9]*')
|
|
802
|
+
story_desc="" story_files="" story_deps="" story_notes=""
|
|
803
|
+
in_ac=false ac_lines=""
|
|
804
|
+
in_story=true
|
|
805
|
+
continue
|
|
806
|
+
fi
|
|
807
|
+
|
|
808
|
+
# Stop at section breaks
|
|
809
|
+
if [[ "$line" =~ ^---$ ]] || [[ "$line" =~ ^##\ && ! "$line" =~ ^###\ ]]; then
|
|
810
|
+
in_story=false
|
|
811
|
+
fi
|
|
812
|
+
|
|
813
|
+
if $in_story; then
|
|
814
|
+
if [[ "$line" =~ ^\*\*Description:\*\* ]]; then
|
|
815
|
+
story_desc=$(echo "$line" | sed 's/^\*\*Description:\*\* //')
|
|
816
|
+
in_ac=false
|
|
817
|
+
elif [[ "$line" =~ ^\*\*Files:\*\* ]]; then
|
|
818
|
+
story_files=$(echo "$line" | sed 's/^\*\*Files:\*\* //')
|
|
819
|
+
in_ac=false
|
|
820
|
+
elif [[ "$line" =~ ^\*\*Dependencies:\*\* ]]; then
|
|
821
|
+
story_deps=$(echo "$line" | sed 's/^\*\*Dependencies:\*\* //')
|
|
822
|
+
in_ac=false
|
|
823
|
+
elif [[ "$line" =~ ^\*\*Notes:\*\* ]]; then
|
|
824
|
+
story_notes=$(echo "$line" | sed 's/^\*\*Notes:\*\* //')
|
|
825
|
+
in_ac=false
|
|
826
|
+
elif [[ "$line" =~ ^\*\*Acceptance\ Criteria:\*\* ]]; then
|
|
827
|
+
in_ac=true
|
|
828
|
+
elif $in_ac && [[ "$line" =~ ^-\ \[.\] ]]; then
|
|
829
|
+
ac_lines+="${line}
|
|
830
|
+
"
|
|
831
|
+
fi
|
|
832
|
+
fi
|
|
833
|
+
done < "$prd_file"
|
|
834
|
+
|
|
835
|
+
# Flush last story
|
|
836
|
+
if [[ $story_num -gt 0 ]]; then
|
|
837
|
+
echo "- [ ] **[${story_id}] ${story_title}**"
|
|
838
|
+
[[ -n "$story_files" ]] && echo " - Files: ${story_files}"
|
|
839
|
+
[[ -n "$story_desc" ]] && echo " - Do: ${story_desc}"
|
|
840
|
+
if [[ -n "$ac_lines" ]]; then
|
|
841
|
+
echo " - Acceptance Criteria:"
|
|
842
|
+
echo "$ac_lines" | while IFS= read -r acl; do
|
|
843
|
+
[[ -n "$acl" ]] && echo " ${acl}"
|
|
844
|
+
done
|
|
845
|
+
fi
|
|
846
|
+
echo " - Verify: Check all acceptance criteria pass"
|
|
847
|
+
[[ -n "$story_deps" ]] && echo " - Dependencies: ${story_deps}"
|
|
848
|
+
[[ -n "$story_notes" ]] && echo " - Notes: ${story_notes}"
|
|
849
|
+
echo ""
|
|
850
|
+
fi
|
|
851
|
+
|
|
852
|
+
echo ""
|
|
853
|
+
echo "---"
|
|
854
|
+
echo ""
|
|
855
|
+
echo "# Original PRD"
|
|
856
|
+
echo ""
|
|
857
|
+
cat "$prd_file"
|
|
858
|
+
} > "$output_file"
|
|
859
|
+
}
|
|
860
|
+
|
|
861
|
+
# ═══════════════════════════════════════════════════════════
|
|
862
|
+
# PHASE 1: LOAD OR CREATE PLAN (supports --prd, --plan, --task)
|
|
863
|
+
# ═══════════════════════════════════════════════════════════
|
|
864
|
+
|
|
865
|
+
if [[ -n "$PRD_FILE" ]]; then
|
|
866
|
+
# ── PRD mode: load and normalize PRD ─────────────────
|
|
867
|
+
if [[ ! -f "$PRD_FILE" ]]; then
|
|
868
|
+
[[ -f "$PROJECT/$PRD_FILE" ]] && PRD_FILE="$PROJECT/$PRD_FILE"
|
|
869
|
+
fi
|
|
870
|
+
if [[ ! -f "$PRD_FILE" ]]; then
|
|
871
|
+
error "PRD file not found: $PRD_FILE"
|
|
872
|
+
exit 1
|
|
873
|
+
fi
|
|
874
|
+
|
|
875
|
+
PRD_SOURCE="prd"
|
|
876
|
+
# Copy original PRD to session for reference
|
|
877
|
+
cp "$PRD_FILE" "${SESSION_DIR}/original_prd.$(echo "$PRD_FILE" | sed 's/.*\.//')" 2>/dev/null || true
|
|
878
|
+
|
|
879
|
+
if is_json_prd "$PRD_FILE"; then
|
|
880
|
+
# JSON PRD format (ralph-style prd.json) → convert to task.md
|
|
881
|
+
log "📋 JSON PRD detected — converting user stories to subtasks..."
|
|
882
|
+
prd_from_json "$PRD_FILE" "$TASK_FILE"
|
|
883
|
+
success "JSON PRD converted to task.md ($(grep -c '^\- \[ \]' "$TASK_FILE" 2>/dev/null || echo 0) subtasks)"
|
|
884
|
+
elif is_prd_format "$PRD_FILE"; then
|
|
885
|
+
# Already in markdown PRD format → convert directly to task.md
|
|
886
|
+
log "📋 Markdown PRD detected — converting user stories to subtasks..."
|
|
887
|
+
prd_to_task_md "$PRD_FILE" "$TASK_FILE"
|
|
888
|
+
success "PRD converted to task.md ($(grep -c '^\- \[ \]' "$TASK_FILE" 2>/dev/null || echo 0) subtasks)"
|
|
889
|
+
else
|
|
890
|
+
# Not in PRD format → planner normalizes it to PRD, then to task.md
|
|
891
|
+
log "📋 Input is not in PRD format — planner will normalize..."
|
|
892
|
+
PLANNER_SKILLS=$(get_skills_for "$PLANNER")
|
|
893
|
+
|
|
894
|
+
send_to_agent "$PLANNER" "You are a planner. Convert the following document into a structured PRD (Product Requirements Document).
|
|
895
|
+
|
|
896
|
+
Read the project at ${PROJECT} to understand the codebase, then read the input document.
|
|
897
|
+
${PLANNER_SKILLS}
|
|
898
|
+
${CONTEXT_CONTENT:+
|
|
899
|
+
CONTEXT:
|
|
900
|
+
${CONTEXT_CONTENT}
|
|
901
|
+
}
|
|
902
|
+
INPUT DOCUMENT: ${PRD_FILE}
|
|
903
|
+
(Read this file first to understand the requirements)
|
|
904
|
+
|
|
905
|
+
IMPORTANT: Write the normalized PRD to this exact file: ${SESSION_DIR}/normalized_prd.md
|
|
906
|
+
|
|
907
|
+
The PRD MUST use this exact format:
|
|
908
|
+
|
|
909
|
+
# PRD: <Project Title>
|
|
910
|
+
|
|
911
|
+
## Overview
|
|
912
|
+
<2-3 sentence description>
|
|
913
|
+
|
|
914
|
+
## Tech Stack
|
|
915
|
+
<language, framework, database>
|
|
916
|
+
|
|
917
|
+
## User Stories
|
|
918
|
+
|
|
919
|
+
### US-001: <Title> [priority: 1]
|
|
920
|
+
**Description:** <what and why>
|
|
921
|
+
**Files:** \`path/to/file1\`, \`path/to/file2\`
|
|
922
|
+
**Acceptance Criteria:**
|
|
923
|
+
- [ ] <specific, testable criterion — e.g. 'mvn compile passes'>
|
|
924
|
+
- [ ] <specific, testable criterion>
|
|
925
|
+
**Dependencies:** none | US-XXX
|
|
926
|
+
**Notes:** <patterns to follow, gotchas>
|
|
927
|
+
|
|
928
|
+
### US-002: <Title> [priority: 2]
|
|
929
|
+
...
|
|
930
|
+
|
|
931
|
+
Guidelines:
|
|
932
|
+
- Read existing code first to understand patterns and conventions
|
|
933
|
+
- Each user story should be completable by one agent in ~10 minutes
|
|
934
|
+
- Order by dependency (schema first, then entities, services, controllers)
|
|
935
|
+
- Be specific: name files, classes, methods, table names
|
|
936
|
+
- Include 'mvn compile passes' (or equivalent build check) in every story's acceptance criteria
|
|
937
|
+
- Reference existing implementations as patterns to follow
|
|
938
|
+
- When done, print: done" \
|
|
939
|
+
"normalizing input → PRD format"
|
|
940
|
+
|
|
941
|
+
NORM_PRD="${SESSION_DIR}/normalized_prd.md"
|
|
942
|
+
if [[ -f "$NORM_PRD" ]] && [[ -s "$NORM_PRD" ]]; then
|
|
943
|
+
prd_to_task_md "$NORM_PRD" "$TASK_FILE"
|
|
944
|
+
success "PRD normalized and converted to task.md ($(grep -c '^\- \[ \]' "$TASK_FILE" 2>/dev/null || echo 0) subtasks)"
|
|
945
|
+
else
|
|
946
|
+
# Fallback: planner may have written directly to task.md or the file itself was usable
|
|
947
|
+
warn "Planner did not produce normalized PRD — falling back to direct plan creation"
|
|
948
|
+
PRD_SOURCE="task"
|
|
949
|
+
# Let the planner create a standard plan from the input
|
|
950
|
+
send_to_agent "$PLANNER" "You are a planner. Read the project at ${PROJECT} and the requirements in ${PRD_FILE}. Create a structured implementation plan.
|
|
951
|
+
${PLANNER_SKILLS}
|
|
952
|
+
|
|
953
|
+
IMPORTANT: Write the plan to this exact file: ${TASK_FILE}
|
|
954
|
+
|
|
955
|
+
The plan MUST use this exact markdown format:
|
|
956
|
+
# Task: <one-line title>
|
|
957
|
+
|
|
958
|
+
## Summary
|
|
959
|
+
<2-3 sentence overview>
|
|
960
|
+
|
|
961
|
+
## Subtasks
|
|
962
|
+
|
|
963
|
+
- [ ] **SUBTASK 1: <title>**
|
|
964
|
+
- Files: <specific file paths to create or modify>
|
|
965
|
+
- Do: <concrete steps>
|
|
966
|
+
- Verify: <how to confirm it works>
|
|
967
|
+
|
|
968
|
+
- [ ] **SUBTASK 2: <title>**
|
|
969
|
+
...
|
|
970
|
+
|
|
971
|
+
When done, print: done" \
|
|
972
|
+
"creating plan from PRD → task.md"
|
|
973
|
+
fi
|
|
974
|
+
fi
|
|
975
|
+
|
|
976
|
+
elif [[ -n "$PLAN_FILE" ]]; then
|
|
977
|
+
# ── Use existing plan file ──────────────────────────
|
|
978
|
+
if [[ ! -f "$PLAN_FILE" ]]; then
|
|
979
|
+
[[ -f "$PROJECT/$PLAN_FILE" ]] && PLAN_FILE="$PROJECT/$PLAN_FILE"
|
|
980
|
+
fi
|
|
981
|
+
if [[ ! -f "$PLAN_FILE" ]]; then
|
|
982
|
+
error "Plan file not found: $PLAN_FILE"
|
|
983
|
+
exit 1
|
|
984
|
+
fi
|
|
985
|
+
|
|
986
|
+
PRD_SOURCE="plan"
|
|
987
|
+
# Check if the "plan" is actually a PRD
|
|
988
|
+
if is_prd_format "$PLAN_FILE"; then
|
|
989
|
+
log "📋 Plan file is in PRD format — converting user stories to subtasks..."
|
|
990
|
+
cp "$PLAN_FILE" "${SESSION_DIR}/original_prd.md"
|
|
991
|
+
prd_to_task_md "$PLAN_FILE" "$TASK_FILE"
|
|
992
|
+
PRD_SOURCE="prd"
|
|
993
|
+
success "PRD converted to task.md ($(grep -c '^\- \[ \]' "$TASK_FILE" 2>/dev/null || echo 0) subtasks)"
|
|
994
|
+
else
|
|
995
|
+
cp "$PLAN_FILE" "$TASK_FILE"
|
|
996
|
+
success "Plan loaded from: $PLAN_FILE"
|
|
997
|
+
fi
|
|
998
|
+
|
|
999
|
+
elif [[ -f "$TASK_FILE" ]] && [[ -s "$TASK_FILE" ]] && ! $REPLAN; then
|
|
1000
|
+
# ── Resume existing plan ─────────────────────────────
|
|
1001
|
+
PRD_SOURCE="plan"
|
|
1002
|
+
log "Existing task.md found — resuming"
|
|
1003
|
+
|
|
1004
|
+
else
|
|
1005
|
+
# ── Planner creates plan from task description ──────
|
|
1006
|
+
PRD_SOURCE="task"
|
|
1007
|
+
log "📋 Sending task to planner — generating PRD first..."
|
|
1008
|
+
PLANNER_SKILLS=$(get_skills_for "$PLANNER")
|
|
1009
|
+
PRD_SKILL=$(load_prd_skill)
|
|
1010
|
+
|
|
1011
|
+
send_to_agent "$PLANNER" "You are a planner. Read the project at ${PROJECT} and create a structured PRD (Product Requirements Document).
|
|
1012
|
+
${PLANNER_SKILLS}
|
|
1013
|
+
${PRD_SKILL}
|
|
1014
|
+
${CONTEXT_CONTENT:+
|
|
1015
|
+
CONTEXT:
|
|
1016
|
+
${CONTEXT_CONTENT}
|
|
1017
|
+
}
|
|
1018
|
+
GOAL: ${TASK}
|
|
1019
|
+
|
|
1020
|
+
IMPORTANT: Write the PRD to this exact file: ${SESSION_DIR}/generated_prd.md
|
|
1021
|
+
|
|
1022
|
+
The PRD MUST use this exact format:
|
|
1023
|
+
|
|
1024
|
+
# PRD: <Project Title>
|
|
1025
|
+
|
|
1026
|
+
## Overview
|
|
1027
|
+
<2-3 sentence description>
|
|
1028
|
+
|
|
1029
|
+
## Tech Stack
|
|
1030
|
+
<language, framework, database>
|
|
1031
|
+
|
|
1032
|
+
## User Stories
|
|
1033
|
+
|
|
1034
|
+
### US-001: <Title> [priority: 1]
|
|
1035
|
+
**Description:** <what and why>
|
|
1036
|
+
**Files:** \`path/to/file1\`, \`path/to/file2\`
|
|
1037
|
+
**Acceptance Criteria:**
|
|
1038
|
+
- [ ] <specific, testable criterion — e.g. 'mvn compile passes'>
|
|
1039
|
+
- [ ] <specific, testable criterion>
|
|
1040
|
+
**Dependencies:** none | US-XXX
|
|
1041
|
+
**Notes:** <patterns to follow, gotchas>
|
|
1042
|
+
|
|
1043
|
+
### US-002: <Title> [priority: 2]
|
|
1044
|
+
...
|
|
1045
|
+
|
|
1046
|
+
Guidelines:
|
|
1047
|
+
- Read existing code first to understand patterns and conventions
|
|
1048
|
+
- Each user story should be completable by one agent in ~10 minutes
|
|
1049
|
+
- Order by dependency (schema first, then entities, services, controllers)
|
|
1050
|
+
- Be specific: name files, classes, methods, table names
|
|
1051
|
+
- Include 'build passes' in every story's acceptance criteria
|
|
1052
|
+
- Reference existing implementations as patterns to follow
|
|
1053
|
+
- When done, print: done" \
|
|
1054
|
+
"creating PRD from task description"
|
|
1055
|
+
|
|
1056
|
+
GEN_PRD="${SESSION_DIR}/generated_prd.md"
|
|
1057
|
+
if [[ -f "$GEN_PRD" ]] && [[ -s "$GEN_PRD" ]]; then
|
|
1058
|
+
prd_to_task_md "$GEN_PRD" "$TASK_FILE"
|
|
1059
|
+
PRD_SOURCE="prd"
|
|
1060
|
+
success "PRD created and converted to task.md ($(grep -c '^\- \[ \]' "$TASK_FILE" 2>/dev/null || echo 0) subtasks)"
|
|
1061
|
+
else
|
|
1062
|
+
# Fallback: check if planner wrote directly to task.md
|
|
1063
|
+
if [[ -f "$TASK_FILE" ]] && [[ -s "$TASK_FILE" ]]; then
|
|
1064
|
+
success "task.md created (planner wrote directly)"
|
|
1065
|
+
else
|
|
1066
|
+
error "Planner did not create task.md or PRD!"
|
|
1067
|
+
exit 1
|
|
1068
|
+
fi
|
|
1069
|
+
fi
|
|
1070
|
+
fi
|
|
1071
|
+
|
|
1072
|
+
# Ensure task.md exists at this point
|
|
1073
|
+
if [[ ! -f "$TASK_FILE" ]] || [[ ! -s "$TASK_FILE" ]]; then
|
|
1074
|
+
error "No task.md found after Phase 1 — cannot proceed"
|
|
1075
|
+
exit 1
|
|
1076
|
+
fi
|
|
1077
|
+
|
|
1078
|
+
# ═══════════════════════════════════════════════════════════
|
|
1079
|
+
# START SERVICES (if frontend mode)
|
|
1080
|
+
# ═══════════════════════════════════════════════════════════
|
|
1081
|
+
if $FRONTEND; then
|
|
1082
|
+
start_services || { error "Service startup failed — aborting"; exit 1; }
|
|
1083
|
+
fi
|
|
1084
|
+
|
|
1085
|
+
# ═══════════════════════════════════════════════════════════
|
|
1086
|
+
# INITIALIZE SESSION STATE
|
|
1087
|
+
# ═══════════════════════════════════════════════════════════
|
|
1088
|
+
|
|
1089
|
+
# Parse subtasks — supports multiple plan formats
|
|
1090
|
+
ALL_LINES=()
|
|
1091
|
+
|
|
1092
|
+
# Format 1: checkbox items - [ ] or - [x] or - [/]
|
|
1093
|
+
while IFS= read -r line; do
|
|
1094
|
+
ALL_LINES+=("$line")
|
|
1095
|
+
done < <(grep -n '\- \[.\]' "$TASK_FILE" 2>/dev/null || true)
|
|
1096
|
+
|
|
1097
|
+
# Format 2: if no checkboxes, look for section headers and convert
|
|
1098
|
+
if [[ ${#ALL_LINES[@]} -eq 0 ]]; then
|
|
1099
|
+
log "No checkbox subtasks found — scanning for section headers..."
|
|
1100
|
+
|
|
1101
|
+
# Try patterns in order of specificity
|
|
1102
|
+
SECTION_PATTERN=""
|
|
1103
|
+
if grep -q '^### \(Phase\|Step\|Part\|Stage\|Task\) [0-9]' "$TASK_FILE" 2>/dev/null; then
|
|
1104
|
+
SECTION_PATTERN='^### \(Phase\|Step\|Part\|Stage\|Task\) [0-9]'
|
|
1105
|
+
elif grep -q '^## [0-9][0-9]*\.' "$TASK_FILE" 2>/dev/null; then
|
|
1106
|
+
SECTION_PATTERN='^## [0-9][0-9]*\.'
|
|
1107
|
+
elif grep -q '^### [0-9][0-9]*\.' "$TASK_FILE" 2>/dev/null; then
|
|
1108
|
+
SECTION_PATTERN='^### [0-9][0-9]*\.'
|
|
1109
|
+
fi
|
|
1110
|
+
|
|
1111
|
+
if [[ -n "$SECTION_PATTERN" ]]; then
|
|
1112
|
+
# Build a new task.md: checkbox index at top + original plan below
|
|
1113
|
+
ORIG_CONTENT=$(cat "$TASK_FILE")
|
|
1114
|
+
|
|
1115
|
+
# Write header
|
|
1116
|
+
{
|
|
1117
|
+
echo "## Subtasks"
|
|
1118
|
+
echo ""
|
|
1119
|
+
# Write each checkbox on its own line
|
|
1120
|
+
while IFS= read -r hdr; do
|
|
1121
|
+
title=$(echo "$hdr" | sed 's/^#\{2,3\} //')
|
|
1122
|
+
echo "- [ ] **${title}**"
|
|
1123
|
+
done < <(grep "$SECTION_PATTERN" <<< "$ORIG_CONTENT")
|
|
1124
|
+
echo ""
|
|
1125
|
+
echo "---"
|
|
1126
|
+
echo ""
|
|
1127
|
+
echo "$ORIG_CONTENT"
|
|
1128
|
+
} > "$TASK_FILE"
|
|
1129
|
+
|
|
1130
|
+
# Re-parse
|
|
1131
|
+
while IFS= read -r line; do
|
|
1132
|
+
ALL_LINES+=("$line")
|
|
1133
|
+
done < <(grep -n '\- \[.\]' "$TASK_FILE" 2>/dev/null || true)
|
|
1134
|
+
log "Converted ${#ALL_LINES[@]} section headers into subtasks"
|
|
1135
|
+
fi
|
|
1136
|
+
fi
|
|
1137
|
+
|
|
1138
|
+
TOTAL=${#ALL_LINES[@]}
|
|
1139
|
+
|
|
1140
|
+
[[ $TOTAL -eq 0 ]] && { error "No subtasks found in plan — expected '- [ ]' checkboxes or '### Phase N:' sections"; exit 1; }
|
|
1141
|
+
log "Found ${BOLD}${TOTAL}${NC} subtasks"
|
|
1142
|
+
|
|
1143
|
+
# Helpers
|
|
1144
|
+
get_subtask_status() {
|
|
1145
|
+
local info="${ALL_LINES[$1]}"
|
|
1146
|
+
if [[ "${info#*:}" == *"[x]"* ]]; then echo "done"
|
|
1147
|
+
elif [[ "${info#*:}" == *"[/]"* ]]; then echo "in_progress"
|
|
1148
|
+
else echo "pending"; fi
|
|
1149
|
+
}
|
|
1150
|
+
|
|
1151
|
+
get_block() {
|
|
1152
|
+
local idx=$1
|
|
1153
|
+
local start="${ALL_LINES[$idx]%%:*}"
|
|
1154
|
+
local end=""
|
|
1155
|
+
if [[ $((idx + 1)) -lt $TOTAL ]]; then
|
|
1156
|
+
end="${ALL_LINES[$((idx + 1))]%%:*}"
|
|
1157
|
+
end=$((end - 1))
|
|
1158
|
+
else
|
|
1159
|
+
end=$(wc -l < "$TASK_FILE" | tr -d ' ')
|
|
1160
|
+
fi
|
|
1161
|
+
sed -n "${start},${end}p" "$TASK_FILE"
|
|
1162
|
+
}
|
|
1163
|
+
|
|
1164
|
+
get_title() {
|
|
1165
|
+
echo "$1" | head -1 | sed 's/^- \[.\] \*\*//' | sed 's/\*\*.*//' | xargs
|
|
1166
|
+
}
|
|
1167
|
+
|
|
1168
|
+
# Build initial state
|
|
1169
|
+
build_state() {
|
|
1170
|
+
local state="# Session State\n"
|
|
1171
|
+
state+="Updated: $(date '+%Y-%m-%d %H:%M:%S')\n\n"
|
|
1172
|
+
state+="## Subtask Status\n"
|
|
1173
|
+
for ((j=0; j<TOTAL; j++)); do
|
|
1174
|
+
local num=$((j + 1))
|
|
1175
|
+
local status=$(get_subtask_status $j)
|
|
1176
|
+
local block=$(get_block $j)
|
|
1177
|
+
local title=$(get_title "$block")
|
|
1178
|
+
local mark="[ ]"
|
|
1179
|
+
[[ "$status" == "done" ]] && mark="[x]"
|
|
1180
|
+
[[ "$status" == "in_progress" ]] && mark="[/]"
|
|
1181
|
+
state+="- [${mark:1:1}] Subtask ${num}: ${title} (${status})\n"
|
|
1182
|
+
done
|
|
1183
|
+
state+="\n## History\n"
|
|
1184
|
+
echo -e "$state"
|
|
1185
|
+
}
|
|
1186
|
+
|
|
1187
|
+
echo -e "$(build_state)" > "$STATE_FILE"
|
|
1188
|
+
|
|
1189
|
+
# ═══════════════════════════════════════════════════════════
|
|
1190
|
+
# PHASE 2: PLANNER-DRIVEN DYNAMIC LOOP
|
|
1191
|
+
# ═══════════════════════════════════════════════════════════
|
|
1192
|
+
# The planner decides what to do next by writing DIRECTIVES.
|
|
1193
|
+
# Coordinator reads directive, dispatches, feeds result back.
|
|
1194
|
+
#
|
|
1195
|
+
ROUND=0
|
|
1196
|
+
COMPLETED=0
|
|
1197
|
+
FAILED=0
|
|
1198
|
+
|
|
1199
|
+
# Count already-done subtasks
|
|
1200
|
+
for ((i=0; i<TOTAL; i++)); do
|
|
1201
|
+
[[ "$(get_subtask_status $i)" == "done" ]] && COMPLETED=$((COMPLETED + 1))
|
|
1202
|
+
done
|
|
1203
|
+
[[ $COMPLETED -gt 0 ]] && log "${COMPLETED} subtask(s) already done — resuming"
|
|
1204
|
+
|
|
1205
|
+
while [[ $ROUND -lt $MAX_ROUNDS ]] && [[ $COMPLETED -lt $TOTAL ]]; do
|
|
1206
|
+
ROUND=$((ROUND + 1))
|
|
1207
|
+
DIRECTIVE_FILE="${DIRECTIVES_DIR}/directive_$(printf '%03d' $ROUND).md"
|
|
1208
|
+
|
|
1209
|
+
echo ""
|
|
1210
|
+
echo -e "${BOLD}${BLUE}╔══════════════════════════════════════════╗${NC}"
|
|
1211
|
+
echo -e "${BOLD}${BLUE}║ 🔄 ROUND ${ROUND} — Planner deciding... ║${NC}"
|
|
1212
|
+
echo -e "${BOLD}${BLUE}╚══════════════════════════════════════════╝${NC}"
|
|
1213
|
+
|
|
1214
|
+
# ─── Refresh state ────────────────────────────────
|
|
1215
|
+
# Re-parse task.md (may have been edited by agents)
|
|
1216
|
+
ALL_LINES=()
|
|
1217
|
+
while IFS= read -r line; do
|
|
1218
|
+
ALL_LINES+=("$line")
|
|
1219
|
+
done < <(grep -n '\- \[.\]' "$TASK_FILE" 2>/dev/null || true)
|
|
1220
|
+
TOTAL=${#ALL_LINES[@]}
|
|
1221
|
+
|
|
1222
|
+
# Truncate state history to last 10 rounds to prevent unbounded growth
|
|
1223
|
+
state_lines=$(wc -l < "$STATE_FILE" 2>/dev/null | tr -d ' ' || echo "0")
|
|
1224
|
+
if [[ $state_lines -gt 60 ]]; then
|
|
1225
|
+
# Keep header (first 15 lines) + last 30 lines of history
|
|
1226
|
+
{ head -15 "$STATE_FILE"; echo "... (earlier rounds truncated) ..."; tail -30 "$STATE_FILE"; } > "${STATE_FILE}.tmp"
|
|
1227
|
+
mv "${STATE_FILE}.tmp" "$STATE_FILE"
|
|
1228
|
+
fi
|
|
1229
|
+
CURRENT_STATE=$(cat "$STATE_FILE")
|
|
1230
|
+
|
|
1231
|
+
# Gather recent results (safe: check files exist before ls to avoid nullglob listing cwd)
|
|
1232
|
+
RECENT_LOGS=""
|
|
1233
|
+
LOG_FILES=("${SESSION_DIR}"/log_*.md)
|
|
1234
|
+
if [[ ${#LOG_FILES[@]} -gt 0 ]] && [[ -f "${LOG_FILES[0]}" ]]; then
|
|
1235
|
+
for recent in $(ls -t "${LOG_FILES[@]}" 2>/dev/null | head -3); do
|
|
1236
|
+
RECENT_LOGS+="
|
|
1237
|
+
=== $(basename "$recent") ===
|
|
1238
|
+
$(tail -30 "$recent" 2>/dev/null)
|
|
1239
|
+
=== END ==="
|
|
1240
|
+
done
|
|
1241
|
+
fi
|
|
1242
|
+
|
|
1243
|
+
# Build subtask summary for planner
|
|
1244
|
+
SUBTASK_SUMMARY=""
|
|
1245
|
+
for ((j=0; j<TOTAL; j++)); do
|
|
1246
|
+
local_num=$((j + 1))
|
|
1247
|
+
local_status=$(get_subtask_status $j)
|
|
1248
|
+
local_block=$(get_block $j)
|
|
1249
|
+
local_title=$(get_title "$local_block")
|
|
1250
|
+
case "$local_status" in
|
|
1251
|
+
done) SUBTASK_SUMMARY+=" ✅ ${local_num}. ${local_title}\n" ;;
|
|
1252
|
+
in_progress) SUBTASK_SUMMARY+=" 🔄 ${local_num}. ${local_title}\n" ;;
|
|
1253
|
+
pending) SUBTASK_SUMMARY+=" ⬜ ${local_num}. ${local_title}\n" ;;
|
|
1254
|
+
esac
|
|
1255
|
+
done
|
|
1256
|
+
|
|
1257
|
+
# ─── Ask planner what to do next ──────────────────
|
|
1258
|
+
PLANNER_SKILLS=$(get_skills_for "$PLANNER")
|
|
1259
|
+
|
|
1260
|
+
send_to_agent "$PLANNER" "You are the planner. Decide what happens next. Round ${ROUND}/${MAX_ROUNDS}.
|
|
1261
|
+
|
|
1262
|
+
PROJECT: ${PROJECT}
|
|
1263
|
+
PLAN: ${TASK_FILE}
|
|
1264
|
+
${PLANNER_SKILLS}
|
|
1265
|
+
|
|
1266
|
+
PROGRESS (${COMPLETED}/${TOTAL} done):
|
|
1267
|
+
$(echo -e "$SUBTASK_SUMMARY")
|
|
1268
|
+
${RECENT_LOGS:+
|
|
1269
|
+
RECENT RESULTS (last 3 agent outputs — check for errors/success):
|
|
1270
|
+
${RECENT_LOGS}
|
|
1271
|
+
}
|
|
1272
|
+
Read ${TASK_FILE} for full subtask details. Read recent logs above to understand what happened.
|
|
1273
|
+
|
|
1274
|
+
Write your decision to: ${DIRECTIVE_FILE}
|
|
1275
|
+
|
|
1276
|
+
FORMAT (first line MUST start with ACTION:):
|
|
1277
|
+
ACTION: EXECUTE|REVIEW|APPROVE|SKIP|DONE|FRONTEND_EXECUTE|FRONTEND_REVIEW
|
|
1278
|
+
SUBTASK: <number>
|
|
1279
|
+
INSTRUCTIONS: <specific instructions for the agent — cite files, patterns, classes>
|
|
1280
|
+
REASON: <why this action>
|
|
1281
|
+
|
|
1282
|
+
AVAILABLE AGENTS:
|
|
1283
|
+
- Backend executor: ${EXECUTOR} — implements backend code (Java/Spring Boot)
|
|
1284
|
+
- Backend reviewers: ${REVIEWER_LIST[*]} — reviews backend code$(
|
|
1285
|
+
if $FRONTEND; then
|
|
1286
|
+
echo "
|
|
1287
|
+
- Frontend dev: ${FRONTEND_DEV:-not assigned} — implements frontend code (Angular)
|
|
1288
|
+
- Frontend reviewers: $(if [[ ${#FRONTEND_REVIEWER_LIST[@]} -gt 0 ]]; then echo "${FRONTEND_REVIEWER_LIST[*]}"; else echo 'not assigned'; fi) — reviews frontend in browser
|
|
1289
|
+
- Services: backend=${BACKEND_URL:-not running}, frontend=${FRONTEND_URL:-not running}"
|
|
1290
|
+
fi)
|
|
1291
|
+
|
|
1292
|
+
Quick reference:
|
|
1293
|
+
- EXECUTE N: Send subtask to backend executor (${EXECUTOR}). Give SPECIFIC instructions.
|
|
1294
|
+
- REVIEW N: Send to backend reviewers (${REVIEWER_LIST[*]}). Tell them what to focus on.$(
|
|
1295
|
+
if $FRONTEND; then
|
|
1296
|
+
echo "
|
|
1297
|
+
- FRONTEND_EXECUTE N: Send subtask to frontend dev (${FRONTEND_DEV:-?}). For Angular/UI work.
|
|
1298
|
+
The frontend dev should: implement components, services, routes, templates.
|
|
1299
|
+
Include: use chrome-devtools-mcp to test at ${FRONTEND_URL:-http://localhost:4200}.
|
|
1300
|
+
- FRONTEND_REVIEW N: Send to frontend reviewers ($(if [[ ${#FRONTEND_REVIEWER_LIST[@]} -gt 0 ]]; then echo "${FRONTEND_REVIEWER_LIST[*]}"; else echo '?'; fi)).
|
|
1301
|
+
Reviewers use chrome-devtools-mcp to: navigate pages, check console errors, inspect network requests, take screenshots."
|
|
1302
|
+
fi)
|
|
1303
|
+
- APPROVE N: Mark subtask done. Use after successful execution or review.
|
|
1304
|
+
- SKIP N: Skip subtask with reason.
|
|
1305
|
+
- DONE: All work is complete.
|
|
1306
|
+
|
|
1307
|
+
Strategy tips:
|
|
1308
|
+
- For the FIRST pending subtask: use EXECUTE with detailed instructions
|
|
1309
|
+
- After EXECUTE succeeds: ALWAYS send to REVIEW next. Do NOT skip review. Reviewers catch bugs.
|
|
1310
|
+
- After REVIEW passes (reviewer says APPROVE): then APPROVE the subtask
|
|
1311
|
+
- After REVIEW fails (reviewer says REJECT): EXECUTE again with the reviewer's feedback
|
|
1312
|
+
- If execution failed: read the error in recent logs, then EXECUTE again with fix instructions
|
|
1313
|
+
- If stuck after 2+ failures: SKIP with reason
|
|
1314
|
+
- ONLY write ACTION: DONE when ALL ${TOTAL}/${TOTAL} subtasks are completed. Currently ${COMPLETED}/${TOTAL} done — DO NOT write DONE yet unless all are complete$(
|
|
1315
|
+
if [[ "$PRD_SOURCE" == "prd" ]]; then
|
|
1316
|
+
echo "
|
|
1317
|
+
|
|
1318
|
+
PRD MODE — Each subtask has Acceptance Criteria. When writing EXECUTE instructions:
|
|
1319
|
+
- Reference the acceptance criteria from the task.md for that subtask
|
|
1320
|
+
- Tell the executor to verify EACH acceptance criterion before finishing
|
|
1321
|
+
- Include the specific files, classes, and patterns from the PRD
|
|
1322
|
+
When writing REVIEW instructions:
|
|
1323
|
+
- Tell reviewers to verify EACH acceptance criterion explicitly
|
|
1324
|
+
- Ask reviewers to report which criteria pass and which fail
|
|
1325
|
+
- Include file/line references for any failures"
|
|
1326
|
+
fi)$(
|
|
1327
|
+
if $FRONTEND; then
|
|
1328
|
+
echo "
|
|
1329
|
+
- For UI/Angular subtasks: use FRONTEND_EXECUTE instead of EXECUTE
|
|
1330
|
+
- After FRONTEND_EXECUTE: use FRONTEND_REVIEW to verify in browser (console logs, network, screenshots)
|
|
1331
|
+
- Backend subtasks FIRST, then frontend subtasks that depend on backend APIs"
|
|
1332
|
+
fi)
|
|
1333
|
+
|
|
1334
|
+
Write the directive to ${DIRECTIVE_FILE}, then print: done" \
|
|
1335
|
+
"planner round ${ROUND}"
|
|
1336
|
+
|
|
1337
|
+
# ─── Parse directive ────────────────────────────────
|
|
1338
|
+
# Strategy: check directive file first, then fall back to extracting from agent log output
|
|
1339
|
+
if [[ ! -f "$DIRECTIVE_FILE" ]] || [[ ! -s "$DIRECTIVE_FILE" ]]; then
|
|
1340
|
+
PLANNER_LOG="${SESSION_DIR}/log_$(printf '%03d' $MSG_SEQ)_${PLANNER}.md"
|
|
1341
|
+
if [[ -f "$PLANNER_LOG" ]]; then
|
|
1342
|
+
# Strip ANSI escape codes from pipe-pane output before searching
|
|
1343
|
+
clean_log=$(sed 's/\x1b\[[0-9;]*[a-zA-Z]//g' "$PLANNER_LOG" 2>/dev/null)
|
|
1344
|
+
|
|
1345
|
+
# Extract directive from log output — skip the FORMAT template line (contains |)
|
|
1346
|
+
# Look for ACTION: with a single valid action (no pipes)
|
|
1347
|
+
if echo "$clean_log" | grep -q '^ACTION: [A-Z_]*$' 2>/dev/null; then
|
|
1348
|
+
# Found a clean ACTION line — grab it and the next few lines
|
|
1349
|
+
echo "$clean_log" | grep -A 5 '^ACTION: [A-Z_]*$' | head -6 > "$DIRECTIVE_FILE" 2>/dev/null || true
|
|
1350
|
+
fi
|
|
1351
|
+
|
|
1352
|
+
# Fallback: extract from ```md code blocks (codex often wraps output in code blocks)
|
|
1353
|
+
if [[ ! -s "$DIRECTIVE_FILE" ]]; then
|
|
1354
|
+
block_content=$(echo "$clean_log" | sed -n '/^```/,/^```/p' | sed '1d;$d')
|
|
1355
|
+
if echo "$block_content" | grep -q '^ACTION:' 2>/dev/null; then
|
|
1356
|
+
echo "$block_content" > "$DIRECTIVE_FILE" 2>/dev/null || true
|
|
1357
|
+
fi
|
|
1358
|
+
fi
|
|
1359
|
+
|
|
1360
|
+
# Last resort: find any ACTION: line that doesn't contain | (template)
|
|
1361
|
+
if [[ ! -s "$DIRECTIVE_FILE" ]]; then
|
|
1362
|
+
echo "$clean_log" | grep 'ACTION:' | grep -v '|' | head -1 > "$DIRECTIVE_FILE" 2>/dev/null || true
|
|
1363
|
+
if [[ -s "$DIRECTIVE_FILE" ]]; then
|
|
1364
|
+
# Also grab SUBTASK/INSTRUCTIONS/REASON lines nearby
|
|
1365
|
+
action_line=$(echo "$clean_log" | grep -n 'ACTION:' | grep -v '|' | head -1 | cut -d: -f1)
|
|
1366
|
+
if [[ -n "$action_line" ]]; then
|
|
1367
|
+
echo "$clean_log" | tail -n +"$action_line" | head -10 > "$DIRECTIVE_FILE" 2>/dev/null || true
|
|
1368
|
+
fi
|
|
1369
|
+
fi
|
|
1370
|
+
fi
|
|
1371
|
+
fi
|
|
1372
|
+
fi
|
|
1373
|
+
|
|
1374
|
+
if [[ ! -f "$DIRECTIVE_FILE" ]] || [[ ! -s "$DIRECTIVE_FILE" ]]; then
|
|
1375
|
+
warn "Planner didn't write directive file. Asking again..."
|
|
1376
|
+
continue
|
|
1377
|
+
fi
|
|
1378
|
+
|
|
1379
|
+
# Strip ANSI codes from directive file (in case extracted from pipe-pane log)
|
|
1380
|
+
sed -i '' 's/\x1b\[[0-9;]*[a-zA-Z]//g' "$DIRECTIVE_FILE" 2>/dev/null || true
|
|
1381
|
+
|
|
1382
|
+
# Extract fields
|
|
1383
|
+
ACTION=$(grep -m1 'ACTION:' "$DIRECTIVE_FILE" 2>/dev/null | sed 's/.*ACTION: *//' | xargs || echo "")
|
|
1384
|
+
SUBTASK_NUM=$(grep -m1 'SUBTASK:' "$DIRECTIVE_FILE" 2>/dev/null | sed 's/.*SUBTASK: *//' | tr -dc '0-9' || echo "")
|
|
1385
|
+
DIRECTIVE_AGENT=$(grep -m1 'AGENT:' "$DIRECTIVE_FILE" 2>/dev/null | sed 's/.*AGENT: *//' | xargs || echo "")
|
|
1386
|
+
DIRECTIVE_INSTRUCTIONS=$(sed -n '/INSTRUCTIONS:/,/^---$\|^$/p' "$DIRECTIVE_FILE" 2>/dev/null | sed '1s/.*INSTRUCTIONS: *//' | head -20 || echo "")
|
|
1387
|
+
DIRECTIVE_REASON=$(grep -m1 'REASON:' "$DIRECTIVE_FILE" 2>/dev/null | sed 's/.*REASON: *//' | xargs || echo "")
|
|
1388
|
+
|
|
1389
|
+
log "📋 Directive: ${BOLD}${ACTION}${NC}${SUBTASK_NUM:+ subtask #${SUBTASK_NUM}}"
|
|
1390
|
+
|
|
1391
|
+
# Append to state history
|
|
1392
|
+
echo "- Round ${ROUND}: ${ACTION}${SUBTASK_NUM:+ subtask #${SUBTASK_NUM}} ${DIRECTIVE_REASON:+— ${DIRECTIVE_REASON}}" >> "$STATE_FILE"
|
|
1393
|
+
|
|
1394
|
+
# ─── Dispatch directive ─────────────────────────────
|
|
1395
|
+
case "$ACTION" in
|
|
1396
|
+
|
|
1397
|
+
EXECUTE)
|
|
1398
|
+
[[ -z "$SUBTASK_NUM" ]] && { warn "EXECUTE needs SUBTASK number"; continue; }
|
|
1399
|
+
IDX=$((SUBTASK_NUM - 1))
|
|
1400
|
+
BLOCK=$(get_block $IDX)
|
|
1401
|
+
TITLE=$(get_title "$BLOCK")
|
|
1402
|
+
EXEC_AGENT="${DIRECTIVE_AGENT:-$EXECUTOR}"
|
|
1403
|
+
EXEC_SKILLS=$(get_skills_for "$EXEC_AGENT")
|
|
1404
|
+
|
|
1405
|
+
echo -e "${BOLD}━━━ 🔨 EXECUTE subtask #${SUBTASK_NUM}: ${TITLE} ━━━${NC}"
|
|
1406
|
+
|
|
1407
|
+
send_to_agent "$EXEC_AGENT" "You are an executor. Implement the following subtask in project ${PROJECT}.
|
|
1408
|
+
${EXEC_SKILLS}
|
|
1409
|
+
|
|
1410
|
+
SUBTASK #${SUBTASK_NUM}: ${TITLE}
|
|
1411
|
+
|
|
1412
|
+
${BLOCK}
|
|
1413
|
+
${DIRECTIVE_INSTRUCTIONS:+
|
|
1414
|
+
PLANNER INSTRUCTIONS:
|
|
1415
|
+
${DIRECTIVE_INSTRUCTIONS}
|
|
1416
|
+
}
|
|
1417
|
+
Steps:
|
|
1418
|
+
1. Read existing project code to understand patterns and conventions
|
|
1419
|
+
2. Implement exactly what the spec says
|
|
1420
|
+
3. Run the build command (e.g. mvn compile, npm run build) to verify no errors$(
|
|
1421
|
+
if [[ "$PRD_SOURCE" == "prd" ]]; then
|
|
1422
|
+
echo "
|
|
1423
|
+
4. Verify EACH acceptance criterion listed above — the subtask is NOT done until all criteria pass
|
|
1424
|
+
5. Do NOT edit task.md or any .agentic/ files
|
|
1425
|
+
6. When done, print: done"
|
|
1426
|
+
else
|
|
1427
|
+
echo "
|
|
1428
|
+
4. Do NOT edit task.md or any .agentic/ files
|
|
1429
|
+
5. When done, print: done"
|
|
1430
|
+
fi)" \
|
|
1431
|
+
"execute subtask #${SUBTASK_NUM}"
|
|
1432
|
+
|
|
1433
|
+
# Mark in-progress
|
|
1434
|
+
LINE_NUM="${ALL_LINES[$IDX]%%:*}"
|
|
1435
|
+
sed -i '' "${LINE_NUM}s/- \[ \]/- [\/]/" "$TASK_FILE" 2>/dev/null || true
|
|
1436
|
+
;;
|
|
1437
|
+
|
|
1438
|
+
REVIEW)
|
|
1439
|
+
[[ -z "$SUBTASK_NUM" ]] && { warn "REVIEW needs SUBTASK number"; continue; }
|
|
1440
|
+
IDX=$((SUBTASK_NUM - 1))
|
|
1441
|
+
BLOCK=$(get_block $IDX)
|
|
1442
|
+
TITLE=$(get_title "$BLOCK")
|
|
1443
|
+
|
|
1444
|
+
echo -e "${BOLD}━━━ 🔍 REVIEW subtask #${SUBTASK_NUM}: ${TITLE} ━━━${NC}"
|
|
1445
|
+
|
|
1446
|
+
# Get changed files for context
|
|
1447
|
+
CHANGED_FILES=$(cd "$PROJECT" && git diff --name-only HEAD 2>/dev/null | head -20 || echo "(git not available)")
|
|
1448
|
+
|
|
1449
|
+
log "📝 Sending to ${#REVIEWER_LIST[@]} reviewer(s) in parallel..."
|
|
1450
|
+
|
|
1451
|
+
REVIEW_REPORTS=()
|
|
1452
|
+
REVIEW_PIDS=()
|
|
1453
|
+
for rev_agent in "${REVIEWER_LIST[@]}"; do
|
|
1454
|
+
REVIEW_REPORT="${SESSION_DIR}/review_r${ROUND}_${SUBTASK_NUM}_${rev_agent}.md"
|
|
1455
|
+
REV_SKILLS=$(get_skills_for "$rev_agent")
|
|
1456
|
+
|
|
1457
|
+
send_to_agent "$rev_agent" "You are a code reviewer. Review subtask #${SUBTASK_NUM} in project ${PROJECT}.
|
|
1458
|
+
${REV_SKILLS}
|
|
1459
|
+
|
|
1460
|
+
SUBTASK: ${TITLE}
|
|
1461
|
+
|
|
1462
|
+
EXPECTED:
|
|
1463
|
+
${BLOCK}
|
|
1464
|
+
|
|
1465
|
+
CHANGED FILES: ${CHANGED_FILES}
|
|
1466
|
+
${DIRECTIVE_INSTRUCTIONS:+
|
|
1467
|
+
FOCUS: ${DIRECTIVE_INSTRUCTIONS}
|
|
1468
|
+
}
|
|
1469
|
+
Write your review report to: ${REVIEW_REPORT}
|
|
1470
|
+
|
|
1471
|
+
Report format:
|
|
1472
|
+
BUILD_STATUS: Pass/Fail (run the build command to check)
|
|
1473
|
+
ISSUES: <specific problems with file:line references, or 'None'>$(
|
|
1474
|
+
if [[ "$PRD_SOURCE" == "prd" ]]; then
|
|
1475
|
+
echo "
|
|
1476
|
+
ACCEPTANCE_CRITERIA:
|
|
1477
|
+
- [ ] or [x] <criterion 1> — <pass/fail with reason>
|
|
1478
|
+
- [ ] or [x] <criterion 2> — <pass/fail with reason>
|
|
1479
|
+
..."
|
|
1480
|
+
fi)
|
|
1481
|
+
ASSESSMENT: <1-2 sentence quality summary>$(
|
|
1482
|
+
if [[ "$PRD_SOURCE" == "prd" ]]; then
|
|
1483
|
+
echo "
|
|
1484
|
+
|
|
1485
|
+
IMPORTANT: This subtask has Acceptance Criteria (see EXPECTED block above).
|
|
1486
|
+
You MUST verify EACH acceptance criterion explicitly and report pass/fail for each one.
|
|
1487
|
+
A subtask only passes review if ALL acceptance criteria are met."
|
|
1488
|
+
fi)
|
|
1489
|
+
|
|
1490
|
+
Rules: Report only — do NOT approve/reject. Do NOT edit task.md. When done, print: done" \
|
|
1491
|
+
"review #${SUBTASK_NUM} by ${rev_agent}" &
|
|
1492
|
+
REVIEW_PIDS+=($!)
|
|
1493
|
+
|
|
1494
|
+
REVIEW_REPORTS+=("$REVIEW_REPORT")
|
|
1495
|
+
done
|
|
1496
|
+
|
|
1497
|
+
for rpid in "${REVIEW_PIDS[@]}"; do wait "$rpid" 2>/dev/null || true; done
|
|
1498
|
+
log "All reviewers finished"
|
|
1499
|
+
|
|
1500
|
+
# Consolidate reviews into state
|
|
1501
|
+
REVIEWS_SUMMARY=""
|
|
1502
|
+
for rr in "${REVIEW_REPORTS[@]}"; do
|
|
1503
|
+
rev_name=$(basename "$rr" .md)
|
|
1504
|
+
if [[ -f "$rr" ]] && [[ -s "$rr" ]]; then
|
|
1505
|
+
REVIEWS_SUMMARY+="
|
|
1506
|
+
=== ${rev_name} ===
|
|
1507
|
+
$(cat "$rr")
|
|
1508
|
+
=== END ==="
|
|
1509
|
+
fi
|
|
1510
|
+
done
|
|
1511
|
+
|
|
1512
|
+
# Append reviews to state for planner's next round
|
|
1513
|
+
echo -e "\n### Reviews for subtask #${SUBTASK_NUM} (round ${ROUND}):\n${REVIEWS_SUMMARY}" >> "$STATE_FILE"
|
|
1514
|
+
;;
|
|
1515
|
+
|
|
1516
|
+
FRONTEND_EXECUTE)
|
|
1517
|
+
[[ -z "$SUBTASK_NUM" ]] && { warn "FRONTEND_EXECUTE needs SUBTASK number"; continue; }
|
|
1518
|
+
[[ -z "$FRONTEND_DEV" ]] && { warn "No --frontend-dev agent assigned"; continue; }
|
|
1519
|
+
IDX=$((SUBTASK_NUM - 1))
|
|
1520
|
+
BLOCK=$(get_block $IDX)
|
|
1521
|
+
TITLE=$(get_title "$BLOCK")
|
|
1522
|
+
FE_SKILLS=$(get_skills_for "$FRONTEND_DEV")
|
|
1523
|
+
|
|
1524
|
+
echo -e "${BOLD}━━━ 🎨 FRONTEND_EXECUTE subtask #${SUBTASK_NUM}: ${TITLE} ━━━${NC}"
|
|
1525
|
+
|
|
1526
|
+
send_to_agent "$FRONTEND_DEV" "You are a frontend developer. Implement the following subtask in project ${PROJECT}.
|
|
1527
|
+
${FE_SKILLS}
|
|
1528
|
+
|
|
1529
|
+
SUBTASK #${SUBTASK_NUM}: ${TITLE}
|
|
1530
|
+
|
|
1531
|
+
${BLOCK}
|
|
1532
|
+
${DIRECTIVE_INSTRUCTIONS:+
|
|
1533
|
+
PLANNER INSTRUCTIONS:
|
|
1534
|
+
${DIRECTIVE_INSTRUCTIONS}
|
|
1535
|
+
}
|
|
1536
|
+
ENVIRONMENT:
|
|
1537
|
+
- Backend API: ${BACKEND_URL:-http://localhost:8080}
|
|
1538
|
+
- Frontend: ${FRONTEND_URL:-http://localhost:4200}
|
|
1539
|
+
- Both services are already running.
|
|
1540
|
+
|
|
1541
|
+
Steps:
|
|
1542
|
+
1. Read existing Angular code to understand patterns, component structure, and services
|
|
1543
|
+
2. Implement the Angular components, services, routes, and templates as specified
|
|
1544
|
+
3. After code changes, the frontend will hot-reload automatically
|
|
1545
|
+
4. Use chrome-devtools-mcp to verify your work:
|
|
1546
|
+
a. navigate_page to ${FRONTEND_URL:-http://localhost:4200}/your-route
|
|
1547
|
+
b. list_console_messages — check for errors
|
|
1548
|
+
c. list_network_requests — verify API calls succeed
|
|
1549
|
+
d. take_screenshot — visual confirmation
|
|
1550
|
+
e. fill_form / click — test interactive elements
|
|
1551
|
+
5. Do NOT edit task.md or any .agentic/ files
|
|
1552
|
+
6. When done, print: done" \
|
|
1553
|
+
"frontend execute subtask #${SUBTASK_NUM}"
|
|
1554
|
+
|
|
1555
|
+
# Mark in-progress
|
|
1556
|
+
LINE_NUM="${ALL_LINES[$IDX]%%:*}"
|
|
1557
|
+
sed -i '' "${LINE_NUM}s/- \[ \]/- [\/]/" "$TASK_FILE" 2>/dev/null || true
|
|
1558
|
+
;;
|
|
1559
|
+
|
|
1560
|
+
FRONTEND_REVIEW)
|
|
1561
|
+
[[ -z "$SUBTASK_NUM" ]] && { warn "FRONTEND_REVIEW needs SUBTASK number"; continue; }
|
|
1562
|
+
IDX=$((SUBTASK_NUM - 1))
|
|
1563
|
+
BLOCK=$(get_block $IDX)
|
|
1564
|
+
TITLE=$(get_title "$BLOCK")
|
|
1565
|
+
|
|
1566
|
+
echo -e "${BOLD}━━━ 🔍🎨 FRONTEND_REVIEW subtask #${SUBTASK_NUM}: ${TITLE} ━━━${NC}"
|
|
1567
|
+
|
|
1568
|
+
CHANGED_FILES=$(cd "$PROJECT" && git diff --name-only HEAD 2>/dev/null | head -20 || echo "(git not available)")
|
|
1569
|
+
|
|
1570
|
+
# Use frontend reviewers if available, otherwise fall back to backend reviewers
|
|
1571
|
+
FE_REV_LIST=()
|
|
1572
|
+
if [[ ${#FRONTEND_REVIEWER_LIST[@]} -gt 0 ]]; then
|
|
1573
|
+
FE_REV_LIST=("${FRONTEND_REVIEWER_LIST[@]}")
|
|
1574
|
+
fi
|
|
1575
|
+
[[ ${#FE_REV_LIST[@]} -eq 0 ]] && FE_REV_LIST=("${REVIEWER_LIST[@]}")
|
|
1576
|
+
|
|
1577
|
+
log "🎨 Sending to ${#FE_REV_LIST[@]} frontend reviewer(s) in parallel..."
|
|
1578
|
+
|
|
1579
|
+
REVIEW_REPORTS=()
|
|
1580
|
+
FE_REVIEW_PIDS=()
|
|
1581
|
+
for rev_agent in "${FE_REV_LIST[@]}"; do
|
|
1582
|
+
REVIEW_REPORT="${SESSION_DIR}/fe_review_r${ROUND}_${SUBTASK_NUM}_${rev_agent}.md"
|
|
1583
|
+
REV_SKILLS=$(get_skills_for "$rev_agent")
|
|
1584
|
+
|
|
1585
|
+
send_to_agent "$rev_agent" "You are a frontend reviewer. Review subtask #${SUBTASK_NUM} in project ${PROJECT}.
|
|
1586
|
+
${REV_SKILLS}
|
|
1587
|
+
|
|
1588
|
+
SUBTASK: ${TITLE}
|
|
1589
|
+
|
|
1590
|
+
EXPECTED:
|
|
1591
|
+
${BLOCK}
|
|
1592
|
+
|
|
1593
|
+
CHANGED FILES: ${CHANGED_FILES}
|
|
1594
|
+
${DIRECTIVE_INSTRUCTIONS:+
|
|
1595
|
+
FOCUS: ${DIRECTIVE_INSTRUCTIONS}
|
|
1596
|
+
}
|
|
1597
|
+
ENVIRONMENT:
|
|
1598
|
+
- Frontend running at: ${FRONTEND_URL:-http://localhost:4200}
|
|
1599
|
+
- Backend running at: ${BACKEND_URL:-http://localhost:8080}
|
|
1600
|
+
|
|
1601
|
+
Your review process (use chrome-devtools-mcp):
|
|
1602
|
+
1. navigate_page to the relevant page
|
|
1603
|
+
2. list_console_messages — report ANY errors or warnings
|
|
1604
|
+
3. list_network_requests — check API calls (status codes, failed requests)
|
|
1605
|
+
4. take_screenshot — capture the current state of the page
|
|
1606
|
+
5. Test interactions: fill_form, click buttons, verify behavior
|
|
1607
|
+
6. Check the Angular code: component structure, services, templates
|
|
1608
|
+
|
|
1609
|
+
Write your review report to: ${REVIEW_REPORT}
|
|
1610
|
+
|
|
1611
|
+
Report format:
|
|
1612
|
+
CONSOLE_ERRORS: <list of console errors, or 'None'>
|
|
1613
|
+
NETWORK_ISSUES: <failed API calls or unexpected responses, or 'None'>
|
|
1614
|
+
UI_STATUS: <does it render correctly? form validation? responsive?>
|
|
1615
|
+
CODE_QUALITY: <Angular best practices, component structure>
|
|
1616
|
+
SCREENSHOT: <describe what the page looks like>
|
|
1617
|
+
ASSESSMENT: <1-2 sentence overall quality summary>
|
|
1618
|
+
|
|
1619
|
+
Rules: Report only — do NOT approve/reject. Do NOT edit task.md. When done, print: done" \
|
|
1620
|
+
"frontend review #${SUBTASK_NUM} by ${rev_agent}" &
|
|
1621
|
+
FE_REVIEW_PIDS+=($!)
|
|
1622
|
+
|
|
1623
|
+
REVIEW_REPORTS+=("$REVIEW_REPORT")
|
|
1624
|
+
done
|
|
1625
|
+
|
|
1626
|
+
for rpid in "${FE_REVIEW_PIDS[@]}"; do wait "$rpid" 2>/dev/null || true; done
|
|
1627
|
+
log "All frontend reviewers finished"
|
|
1628
|
+
|
|
1629
|
+
# Consolidate frontend reviews into state
|
|
1630
|
+
REVIEWS_SUMMARY=""
|
|
1631
|
+
for rr in "${REVIEW_REPORTS[@]}"; do
|
|
1632
|
+
rev_name=$(basename "$rr" .md)
|
|
1633
|
+
if [[ -f "$rr" ]] && [[ -s "$rr" ]]; then
|
|
1634
|
+
REVIEWS_SUMMARY+="
|
|
1635
|
+
=== ${rev_name} ===
|
|
1636
|
+
$(cat "$rr")
|
|
1637
|
+
=== END ==="
|
|
1638
|
+
fi
|
|
1639
|
+
done
|
|
1640
|
+
|
|
1641
|
+
echo -e "\n### Frontend Reviews for subtask #${SUBTASK_NUM} (round ${ROUND}):\n${REVIEWS_SUMMARY}" >> "$STATE_FILE"
|
|
1642
|
+
;;
|
|
1643
|
+
|
|
1644
|
+
APPROVE)
|
|
1645
|
+
[[ -z "$SUBTASK_NUM" ]] && { warn "APPROVE needs SUBTASK number"; continue; }
|
|
1646
|
+
IDX=$((SUBTASK_NUM - 1))
|
|
1647
|
+
BLOCK=$(get_block $IDX)
|
|
1648
|
+
TITLE=$(get_title "$BLOCK")
|
|
1649
|
+
|
|
1650
|
+
# Mark done in task.md
|
|
1651
|
+
LINE_NUM="${ALL_LINES[$IDX]%%:*}"
|
|
1652
|
+
sed -i '' "${LINE_NUM}s/- \[.\]/- [x]/" "$TASK_FILE" 2>/dev/null || true
|
|
1653
|
+
|
|
1654
|
+
COMPLETED=$((COMPLETED + 1))
|
|
1655
|
+
success "Subtask #${SUBTASK_NUM} APPROVED ✓ (${COMPLETED}/${TOTAL})"
|
|
1656
|
+
|
|
1657
|
+
# Git commit
|
|
1658
|
+
if command -v git &>/dev/null && git -C "$PROJECT" rev-parse --is-inside-work-tree &>/dev/null 2>&1; then
|
|
1659
|
+
(cd "$PROJECT" && git add -A && git commit -m "feat: ${TITLE}" --no-verify 2>/dev/null) || true
|
|
1660
|
+
fi
|
|
1661
|
+
;;
|
|
1662
|
+
|
|
1663
|
+
SKIP)
|
|
1664
|
+
[[ -z "$SUBTASK_NUM" ]] && { warn "SKIP needs SUBTASK number"; continue; }
|
|
1665
|
+
IDX=$((SUBTASK_NUM - 1))
|
|
1666
|
+
|
|
1667
|
+
# Mark as done (skipped)
|
|
1668
|
+
LINE_NUM="${ALL_LINES[$IDX]%%:*}"
|
|
1669
|
+
sed -i '' "${LINE_NUM}s/- \[.\]/- [x]/" "$TASK_FILE" 2>/dev/null || true
|
|
1670
|
+
|
|
1671
|
+
COMPLETED=$((COMPLETED + 1))
|
|
1672
|
+
warn "Subtask #${SUBTASK_NUM} SKIPPED — ${DIRECTIVE_REASON:-no reason given}"
|
|
1673
|
+
;;
|
|
1674
|
+
|
|
1675
|
+
DONE)
|
|
1676
|
+
success "Planner says: ALL DONE — ${DIRECTIVE_REASON:-all subtasks completed}"
|
|
1677
|
+
break
|
|
1678
|
+
;;
|
|
1679
|
+
|
|
1680
|
+
*)
|
|
1681
|
+
warn "Unknown action: '${ACTION}' — asking planner again..."
|
|
1682
|
+
echo "- Round ${ROUND}: ERROR — unknown action '${ACTION}'" >> "$STATE_FILE"
|
|
1683
|
+
;;
|
|
1684
|
+
esac
|
|
1685
|
+
done
|
|
1686
|
+
|
|
1687
|
+
if [[ $ROUND -ge $MAX_ROUNDS ]] && [[ $COMPLETED -lt $TOTAL ]]; then
|
|
1688
|
+
warn "Max rounds (${MAX_ROUNDS}) reached with ${COMPLETED}/${TOTAL} done"
|
|
1689
|
+
fi
|
|
1690
|
+
|
|
1691
|
+
# ═══════════════════════════════════════════════════════════
|
|
1692
|
+
# CLEANUP SERVICES & DASHBOARD
|
|
1693
|
+
# ═══════════════════════════════════════════════════════════
|
|
1694
|
+
if $FRONTEND; then
|
|
1695
|
+
log "Stopping services..."
|
|
1696
|
+
stop_services
|
|
1697
|
+
fi
|
|
1698
|
+
if [[ -n "$DASHBOARD_PID" ]]; then
|
|
1699
|
+
log "Stopping dashboard (PID: ${DASHBOARD_PID})..."
|
|
1700
|
+
kill "$DASHBOARD_PID" 2>/dev/null || true
|
|
1701
|
+
fi
|
|
1702
|
+
|
|
1703
|
+
# ═══════════════════════════════════════════════════════════
|
|
1704
|
+
# FINAL REPORT
|
|
1705
|
+
# ═══════════════════════════════════════════════════════════
|
|
1706
|
+
echo ""
|
|
1707
|
+
echo -e "${BOLD}${MAGENTA}┌───────────────────────────────────────┐${NC}"
|
|
1708
|
+
echo -e "${BOLD}${MAGENTA}│ 📊 FINAL REPORT │${NC}"
|
|
1709
|
+
echo -e "${BOLD}${MAGENTA}└───────────────────────────────────────┘${NC}"
|
|
1710
|
+
echo -e " ${GREEN}Completed:${NC} ${COMPLETED}/${TOTAL}"
|
|
1711
|
+
echo -e " ${CYAN}Rounds:${NC} ${ROUND}"
|
|
1712
|
+
[[ $FAILED -gt 0 ]] && echo -e " ${RED}Failed:${NC} ${FAILED}"
|
|
1713
|
+
echo -e " ${DIM}Session: ${SESSION_DIR}${NC}"
|
|
1714
|
+
echo ""
|
|
1715
|
+
|
|
1716
|
+
# Show task.md status
|
|
1717
|
+
grep '\- \[' "$TASK_FILE" 2>/dev/null | while IFS= read -r line; do
|
|
1718
|
+
if echo "$line" | grep -q '\[x\]'; then
|
|
1719
|
+
echo -e " ${GREEN}${line}${NC}"
|
|
1720
|
+
else
|
|
1721
|
+
echo -e " ${RED}${line}${NC}"
|
|
1722
|
+
fi
|
|
1723
|
+
done
|
|
1724
|
+
echo ""
|
|
1725
|
+
|
|
1726
|
+
# ─── Archive completed plans ────────────────────────────
|
|
1727
|
+
if [[ $COMPLETED -eq $TOTAL ]]; then
|
|
1728
|
+
echo -e "${GREEN}${BOLD}✓ All ${TOTAL} subtasks completed!${NC}"
|
|
1729
|
+
|
|
1730
|
+
TASKS_DIR="${PROJECT}/docs/tasks"
|
|
1731
|
+
mkdir -p "$TASKS_DIR"
|
|
1732
|
+
|
|
1733
|
+
TASK_TITLE=$(grep -m1 '^# Task:' "$TASK_FILE" 2>/dev/null | sed 's/^# Task: *//' | xargs)
|
|
1734
|
+
[[ -z "$TASK_TITLE" ]] && TASK_TITLE="${TASK:-task}"
|
|
1735
|
+
|
|
1736
|
+
SLUG=$(echo "$TASK_TITLE" \
|
|
1737
|
+
| tr '[:upper:]' '[:lower:]' \
|
|
1738
|
+
| sed 's/[^a-z0-9 _-]//g' \
|
|
1739
|
+
| sed 's/ */ /g' \
|
|
1740
|
+
| sed 's/ /_/g' \
|
|
1741
|
+
| cut -c1-60)
|
|
1742
|
+
[[ -z "$SLUG" ]] && SLUG="task"
|
|
1743
|
+
|
|
1744
|
+
ARCHIVE_NAME="$(date +%Y%m%d)_${SLUG}.md"
|
|
1745
|
+
ARCHIVE_PATH="${TASKS_DIR}/${ARCHIVE_NAME}"
|
|
1746
|
+
|
|
1747
|
+
if [[ -f "$ARCHIVE_PATH" ]]; then
|
|
1748
|
+
COUNTER=2
|
|
1749
|
+
while [[ -f "${TASKS_DIR}/$(date +%Y%m%d)_${SLUG}_${COUNTER}.md" ]]; do
|
|
1750
|
+
COUNTER=$((COUNTER + 1))
|
|
1751
|
+
done
|
|
1752
|
+
ARCHIVE_NAME="$(date +%Y%m%d)_${SLUG}_${COUNTER}.md"
|
|
1753
|
+
ARCHIVE_PATH="${TASKS_DIR}/${ARCHIVE_NAME}"
|
|
1754
|
+
fi
|
|
1755
|
+
|
|
1756
|
+
mv "$TASK_FILE" "$ARCHIVE_PATH"
|
|
1757
|
+
success "Archived → docs/tasks/${ARCHIVE_NAME}"
|
|
1758
|
+
else
|
|
1759
|
+
echo -e "${YELLOW}${BOLD}⚠ ${COMPLETED}/${TOTAL} done. Re-run to continue.${NC}"
|
|
1760
|
+
echo -e " ${DIM}task.md kept for resume. Use --replan to start fresh.${NC}"
|
|
1761
|
+
fi
|
|
1762
|
+
echo ""
|