agentic-loop 3.17.4 → 3.18.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/ralph/code-check.sh +4 -15
- package/ralph/loop.sh +17 -0
- package/ralph/prd-check.sh +325 -162
- package/ralph/setup.sh +27 -0
- package/ralph/utils.sh +21 -2
- package/ralph/verify/api.sh +1 -0
- package/templates/config/elixir.json +1 -1
- package/templates/config/fastmcp.json +1 -1
- package/templates/config/fullstack.json +1 -1
- package/templates/config/go.json +1 -1
- package/templates/config/minimal.json +1 -1
- package/templates/config/node.json +1 -1
- package/templates/config/python.json +1 -1
- package/templates/config/rust.json +1 -1
package/package.json
CHANGED
package/ralph/code-check.sh
CHANGED
|
@@ -247,7 +247,7 @@ _detect_structural_errors() {
|
|
|
247
247
|
local error_content
|
|
248
248
|
error_content=$(cat "$context_file")
|
|
249
249
|
|
|
250
|
-
# Schema/column errors -
|
|
250
|
+
# Schema/column errors - detect and flag for Claude context
|
|
251
251
|
# Only show if not already detected (avoid duplicate markers on retry)
|
|
252
252
|
if echo "$error_content" | grep -qiE "(column.*does not exist|relation.*does not exist|no such column|unknown column|undefined column)" && \
|
|
253
253
|
! grep -q ">>> STRUCTURAL ISSUE: Database schema mismatch" "$context_file" 2>/dev/null; then
|
|
@@ -256,28 +256,17 @@ _detect_structural_errors() {
|
|
|
256
256
|
echo ""
|
|
257
257
|
echo " The test database is missing columns/tables that the code expects."
|
|
258
258
|
echo " This usually happens when:"
|
|
259
|
-
echo " - Migrations were added but test DB wasn't
|
|
259
|
+
echo " - Migrations were added but test DB wasn't updated"
|
|
260
260
|
echo " - Models were modified without running migrations"
|
|
261
261
|
echo ""
|
|
262
|
-
echo "
|
|
263
|
-
local reset_cmd
|
|
264
|
-
reset_cmd=$(get_config '.commands.resetDb' "")
|
|
265
|
-
if [[ -n "$reset_cmd" ]]; then
|
|
266
|
-
echo " $reset_cmd"
|
|
267
|
-
else
|
|
268
|
-
echo " # Add to .ralph/config.json:"
|
|
269
|
-
echo " {\"commands\": {\"resetDb\": \"npm run db:reset:test\"}}"
|
|
270
|
-
echo ""
|
|
271
|
-
echo " # Or run manually:"
|
|
272
|
-
echo " dropdb test_db && createdb test_db && alembic upgrade head"
|
|
273
|
-
fi
|
|
262
|
+
echo " Run pending migrations to fix the schema."
|
|
274
263
|
echo ""
|
|
275
264
|
|
|
276
265
|
# Append suggestion to failure context for Claude
|
|
277
266
|
{
|
|
278
267
|
echo ""
|
|
279
268
|
echo ">>> STRUCTURAL ISSUE: Database schema mismatch"
|
|
280
|
-
echo ">>> ACTION NEEDED:
|
|
269
|
+
echo ">>> ACTION NEEDED: Run pending migrations to update the schema"
|
|
281
270
|
echo ">>> This is NOT a code bug - the test DB is missing schema changes"
|
|
282
271
|
} >> "$context_file"
|
|
283
272
|
fi
|
package/ralph/loop.sh
CHANGED
|
@@ -88,6 +88,23 @@ preflight_checks() {
|
|
|
88
88
|
fi
|
|
89
89
|
done
|
|
90
90
|
|
|
91
|
+
# Check for timeout utility (critical for session enforcement)
|
|
92
|
+
printf " Timeout utility... "
|
|
93
|
+
if command -v timeout &>/dev/null; then
|
|
94
|
+
print_success "ok (timeout)"
|
|
95
|
+
elif command -v gtimeout &>/dev/null; then
|
|
96
|
+
print_success "ok (gtimeout)"
|
|
97
|
+
else
|
|
98
|
+
print_warning "not found (using bash fallback)"
|
|
99
|
+
echo " Session timeouts use a bash fallback. For better reliability:"
|
|
100
|
+
if [[ "$(uname)" == "Darwin" ]]; then
|
|
101
|
+
echo " brew install coreutils"
|
|
102
|
+
else
|
|
103
|
+
echo " Install GNU coreutils"
|
|
104
|
+
fi
|
|
105
|
+
((warnings++))
|
|
106
|
+
fi
|
|
107
|
+
|
|
91
108
|
echo ""
|
|
92
109
|
if [[ $warnings -gt 0 ]]; then
|
|
93
110
|
print_warning "$warnings pre-loop warning(s) - loop may fail on connectivity issues"
|
package/ralph/prd-check.sh
CHANGED
|
@@ -46,11 +46,7 @@
|
|
|
46
46
|
# LIST ENDPOINTS (get all, index):
|
|
47
47
|
# - Has pagination criteria (limit, page params)
|
|
48
48
|
#
|
|
49
|
-
#
|
|
50
|
-
# - Has prerequisites array with DB reset command
|
|
51
|
-
# - Prevents infinite retries on schema mismatch errors
|
|
52
|
-
#
|
|
53
|
-
# CUSTOM CHECKS (.ralph/checks/prd/ or ~/.config/ralph/checks/prd/):
|
|
49
|
+
# CUSTOM CHECKS (.ralph/checks/prd/):
|
|
54
50
|
# - User-provided scripts that receive story JSON on stdin
|
|
55
51
|
# - Output issue descriptions to stdout (one per line)
|
|
56
52
|
# - Excluded from auto-fix (reported for manual review)
|
|
@@ -63,12 +59,21 @@
|
|
|
63
59
|
# ============================================================================
|
|
64
60
|
# AUTO-FIX
|
|
65
61
|
# ============================================================================
|
|
66
|
-
# When issues are found,
|
|
62
|
+
# When issues are found, a two-tier fix runs automatically:
|
|
63
|
+
#
|
|
64
|
+
# Tier 1 — Mechanical fixes (instant, no LLM):
|
|
65
|
+
# - Missing mcp on frontend → ["playwright", "devtools"]
|
|
66
|
+
# - Bare pytest → prefixed with detected runner (uv/poetry/pipenv)
|
|
67
|
+
# - Missing camelCase note → standard text appended to .notes
|
|
68
|
+
# - Server-only testSteps → offline fallback appended
|
|
67
69
|
#
|
|
68
|
-
#
|
|
69
|
-
#
|
|
70
|
-
#
|
|
71
|
-
#
|
|
70
|
+
# Tier 2 — Parallel Claude subagents (one per story, concurrent):
|
|
71
|
+
# - For issues needing creative input (apiContract, prose testSteps, etc.)
|
|
72
|
+
# - Each story gets a small prompt with just its JSON + specific issues
|
|
73
|
+
# - All stories fix in parallel (wall-clock = time for 1 story)
|
|
74
|
+
# - Results merged back via update_json; failures left unchanged
|
|
75
|
+
#
|
|
76
|
+
# Timestamped backup preserved before any modifications.
|
|
72
77
|
#
|
|
73
78
|
# If Claude is unavailable or fix fails, loop continues with warnings.
|
|
74
79
|
#
|
|
@@ -81,7 +86,6 @@
|
|
|
81
86
|
# .api.baseUrl - API base URL (enables API config validation)
|
|
82
87
|
# .api.healthEndpoint - Health check path (default: /health, empty to disable)
|
|
83
88
|
# .ralph/checks/prd/check-* - Project-level custom checks (per-story)
|
|
84
|
-
# ~/.config/ralph/checks/prd/ - User-global custom checks (per-story)
|
|
85
89
|
# .checks.custom.<name> - Enable/disable individual custom checks
|
|
86
90
|
#
|
|
87
91
|
# ============================================================================
|
|
@@ -220,9 +224,13 @@ validate_prd() {
|
|
|
220
224
|
echo ""
|
|
221
225
|
fi
|
|
222
226
|
|
|
223
|
-
# Validate API smoke test configuration (skip in fast/cached mode)
|
|
227
|
+
# Validate API smoke test configuration in background (skip in fast/cached mode)
|
|
228
|
+
# Capture output to a temp file to avoid garbled terminal output
|
|
229
|
+
local api_check_pid="" api_check_output=""
|
|
224
230
|
if [[ "$dry_run" != "true" ]]; then
|
|
225
|
-
|
|
231
|
+
api_check_output=$(create_temp_file ".api-check.out")
|
|
232
|
+
_validate_api_config "$config" > "$api_check_output" 2>&1 &
|
|
233
|
+
api_check_pid=$!
|
|
226
234
|
fi
|
|
227
235
|
|
|
228
236
|
# Replace hardcoded paths with config placeholders
|
|
@@ -232,6 +240,12 @@ validate_prd() {
|
|
|
232
240
|
# dry_run flag — when "true", skip auto-fix
|
|
233
241
|
_validate_and_fix_stories "$prd_file" "$dry_run" || return 1
|
|
234
242
|
|
|
243
|
+
# Wait for background API health check and print its output
|
|
244
|
+
if [[ -n "$api_check_pid" ]]; then
|
|
245
|
+
wait "$api_check_pid" 2>/dev/null
|
|
246
|
+
[[ -s "$api_check_output" ]] && cat "$api_check_output"
|
|
247
|
+
fi
|
|
248
|
+
|
|
235
249
|
return 0
|
|
236
250
|
}
|
|
237
251
|
|
|
@@ -325,7 +339,7 @@ _validate_and_fix_stories() {
|
|
|
325
339
|
local cnt_no_tests=0 cnt_backend_curl=0 cnt_backend_contract=0
|
|
326
340
|
local cnt_frontend_tsc=0 cnt_frontend_url=0 cnt_frontend_context=0 cnt_frontend_mcp=0
|
|
327
341
|
local cnt_auth_security=0 cnt_list_pagination=0 cnt_prose_steps=0
|
|
328
|
-
local
|
|
342
|
+
local cnt_naming_convention=0 cnt_bare_pytest=0
|
|
329
343
|
local cnt_server_only=0
|
|
330
344
|
local cnt_custom=0
|
|
331
345
|
|
|
@@ -447,19 +461,6 @@ _validate_and_fix_stories() {
|
|
|
447
461
|
fi
|
|
448
462
|
fi
|
|
449
463
|
|
|
450
|
-
# Check 6: Migration stories need DB prerequisites
|
|
451
|
-
# If story creates migration files or modifies models, it needs resetDb prerequisite
|
|
452
|
-
local story_files
|
|
453
|
-
story_files=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | (.files.create // []) + (.files.modify // []) | join(" ")' "$prd_file")
|
|
454
|
-
if echo "$story_files" | grep -qiE "(alembic/versions|migrations/|\.migration\.|models\.py|models/|schema\.)"; then
|
|
455
|
-
local has_prereq
|
|
456
|
-
has_prereq=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | .prerequisites // [] | length' "$prd_file")
|
|
457
|
-
if [[ "$has_prereq" == "0" ]]; then
|
|
458
|
-
story_issues+="migration story needs prerequisites (DB reset), "
|
|
459
|
-
cnt_migration_prereq=$((cnt_migration_prereq + 1))
|
|
460
|
-
fi
|
|
461
|
-
fi
|
|
462
|
-
|
|
463
464
|
# Check 7: Frontend stories consuming APIs need naming convention notes
|
|
464
465
|
# If story is frontend/general AND mentions API/fetch/axios, ensure notes include camelCase guidance
|
|
465
466
|
if [[ "$story_type" == "frontend" || "$story_type" == "general" ]]; then
|
|
@@ -504,8 +505,8 @@ _validate_and_fix_stories() {
|
|
|
504
505
|
# Snapshot built-in issues before custom checks append
|
|
505
506
|
local builtin_story_issues="$story_issues"
|
|
506
507
|
|
|
507
|
-
# Check 8: User-defined custom checks (.ralph/checks/prd/
|
|
508
|
-
if [[ -d ".ralph/checks/prd" ]]
|
|
508
|
+
# Check 8: User-defined custom checks (.ralph/checks/prd/)
|
|
509
|
+
if [[ -d ".ralph/checks/prd" ]]; then
|
|
509
510
|
local story_json
|
|
510
511
|
story_json=$(jq --arg id "$story_id" '.stories[] | select(.id==$id)' "$prd_file")
|
|
511
512
|
local custom_output
|
|
@@ -544,7 +545,6 @@ _validate_and_fix_stories() {
|
|
|
544
545
|
[[ $cnt_frontend_mcp -gt 0 ]] && echo " ${cnt_frontend_mcp}x frontend: add mcp browser tools"
|
|
545
546
|
[[ $cnt_auth_security -gt 0 ]] && echo " ${cnt_auth_security}x auth: add security criteria"
|
|
546
547
|
[[ $cnt_list_pagination -gt 0 ]] && echo " ${cnt_list_pagination}x list: add pagination"
|
|
547
|
-
[[ $cnt_migration_prereq -gt 0 ]] && echo " ${cnt_migration_prereq}x migration: add prerequisites (DB reset)"
|
|
548
548
|
[[ $cnt_naming_convention -gt 0 ]] && echo " ${cnt_naming_convention}x API consumer: add camelCase transformation note"
|
|
549
549
|
[[ $cnt_bare_pytest -gt 0 ]] && echo " ${cnt_bare_pytest}x use 'uv run pytest' not bare 'pytest'"
|
|
550
550
|
[[ $cnt_server_only -gt 0 ]] && echo " ${cnt_server_only}x all testSteps need live server (add offline fallback)"
|
|
@@ -555,12 +555,32 @@ _validate_and_fix_stories() {
|
|
|
555
555
|
return 0
|
|
556
556
|
fi
|
|
557
557
|
|
|
558
|
-
#
|
|
559
|
-
|
|
560
|
-
|
|
558
|
+
# Create backup before any modifications
|
|
559
|
+
local backup_file="${prd_file}.$(date +%Y%m%d-%H%M%S).bak"
|
|
560
|
+
cp "$prd_file" "$backup_file"
|
|
561
|
+
|
|
562
|
+
# Tier 1: Instant mechanical fixes (no LLM needed)
|
|
563
|
+
_apply_mechanical_fixes "$prd_file"
|
|
564
|
+
|
|
565
|
+
# Re-check what's still broken after mechanical fixes
|
|
566
|
+
# validate_stories_quick returns "ID: issue, ID: issue, ..." on one line
|
|
567
|
+
# Group into one line per story for _fix_stories_parallel
|
|
568
|
+
local remaining_raw
|
|
569
|
+
remaining_raw=$(validate_stories_quick "$prd_file")
|
|
570
|
+
local remaining_grouped=""
|
|
571
|
+
[[ -n "$remaining_raw" ]] && remaining_grouped=$(_group_issues_by_story "$remaining_raw")
|
|
572
|
+
|
|
573
|
+
if [[ -n "$remaining_grouped" ]]; then
|
|
574
|
+
# Tier 2: Parallel Claude subagents for creative fixes
|
|
575
|
+
if command -v claude &>/dev/null; then
|
|
576
|
+
_fix_stories_parallel "$prd_file" "$remaining_grouped" "$backup_file"
|
|
577
|
+
else
|
|
578
|
+
print_warning "Claude CLI not found - mechanical fixes applied, but some stories need manual review"
|
|
579
|
+
echo " Backup at: $backup_file"
|
|
580
|
+
return 0
|
|
581
|
+
fi
|
|
561
582
|
else
|
|
562
|
-
|
|
563
|
-
return 1
|
|
583
|
+
print_success "All issues resolved with mechanical fixes (backup at $backup_file)"
|
|
564
584
|
fi
|
|
565
585
|
else
|
|
566
586
|
print_success "Test coverage looks good"
|
|
@@ -579,49 +599,176 @@ _run_custom_prd_checks() {
|
|
|
579
599
|
local custom_issues=""
|
|
580
600
|
local custom_log="$RALPH_DIR/last_custom_check.log"
|
|
581
601
|
|
|
582
|
-
local
|
|
583
|
-
[[ -d "
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
602
|
+
local check_dir=".ralph/checks/prd"
|
|
603
|
+
[[ ! -d "$check_dir" ]] && return 0
|
|
604
|
+
|
|
605
|
+
for check_script in "$check_dir"/check-*; do
|
|
606
|
+
[[ ! -f "$check_script" || ! -x "$check_script" ]] && continue
|
|
607
|
+
|
|
608
|
+
local check_key
|
|
609
|
+
check_key=$(basename "$check_script")
|
|
610
|
+
check_key="${check_key%.*}"
|
|
611
|
+
# Read directly instead of get_config — jq's // operator treats false as falsy
|
|
612
|
+
local enabled="true"
|
|
613
|
+
if [[ -f "$RALPH_DIR/config.json" ]]; then
|
|
614
|
+
local raw
|
|
615
|
+
raw=$(jq -r --arg key "$check_key" '.checks.custom[$key]' "$RALPH_DIR/config.json" 2>/dev/null)
|
|
616
|
+
[[ -n "$raw" && "$raw" != "null" ]] && enabled="$raw"
|
|
617
|
+
fi
|
|
618
|
+
[[ "$enabled" == "false" ]] && continue
|
|
619
|
+
|
|
620
|
+
# Run check — capture stdout for issues, stderr to log for debugging
|
|
621
|
+
local output=""
|
|
622
|
+
if ! output=$(echo "$story_json" | run_with_timeout 30 "$check_script" "$story_id" "$prd_file" 2>>"$custom_log"); then
|
|
623
|
+
# Script failed to execute — warn, don't silently swallow
|
|
624
|
+
print_warning "Custom check '$check_key' failed for story $story_id (see .ralph/last_custom_check.log)"
|
|
625
|
+
fi
|
|
626
|
+
|
|
627
|
+
if [[ -n "$output" ]]; then
|
|
628
|
+
while IFS= read -r line; do
|
|
629
|
+
[[ -n "$line" ]] && custom_issues+="${line}, "
|
|
630
|
+
done <<< "$output"
|
|
631
|
+
fi
|
|
632
|
+
done
|
|
633
|
+
|
|
634
|
+
echo "$custom_issues"
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
# Group flat "ID: issue, ID: issue, ..." string into one line per story
|
|
638
|
+
# Input: "S1: missing curl tests, S1: missing apiContract, S2: missing testUrl, "
|
|
639
|
+
# Output: "S1: missing curl tests, missing apiContract\nS2: missing testUrl"
|
|
640
|
+
_group_issues_by_story() {
|
|
641
|
+
local raw="$1"
|
|
642
|
+
# Split on ", " boundaries that precede a story ID pattern (word: )
|
|
643
|
+
# Use awk to accumulate issues per story ID
|
|
644
|
+
echo "$raw" | tr ',' '\n' | sed 's/^ *//' | while IFS= read -r entry; do
|
|
645
|
+
[[ -z "$entry" ]] && continue
|
|
646
|
+
if [[ "$entry" =~ ^([A-Za-z0-9._-]+):\ (.+) ]]; then
|
|
647
|
+
echo "${BASH_REMATCH[1]} ${BASH_REMATCH[2]}"
|
|
648
|
+
fi
|
|
649
|
+
done | awk -F'\t' '{
|
|
650
|
+
if (seen[$1]) {
|
|
651
|
+
issues[$1] = issues[$1] ", " $2
|
|
652
|
+
} else {
|
|
653
|
+
seen[$1] = 1
|
|
654
|
+
issues[$1] = $2
|
|
655
|
+
order[++n] = $1
|
|
656
|
+
}
|
|
657
|
+
} END {
|
|
658
|
+
for (i = 1; i <= n; i++) {
|
|
659
|
+
print order[i] ": " issues[order[i]]
|
|
660
|
+
}
|
|
661
|
+
}'
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
# Apply instant mechanical fixes using jq (no LLM needed)
|
|
665
|
+
# Fixes: missing mcp, bare pytest, missing camelCase note, missing migration prerequisites,
|
|
666
|
+
# server-only testSteps
|
|
667
|
+
_apply_mechanical_fixes() {
|
|
668
|
+
local prd_file="$1"
|
|
669
|
+
local fixed=0
|
|
670
|
+
|
|
671
|
+
# Detect Python runner once for bare pytest fixes
|
|
672
|
+
local py_runner
|
|
673
|
+
py_runner=$(detect_python_runner ".")
|
|
674
|
+
|
|
675
|
+
local story_ids
|
|
676
|
+
story_ids=$(jq -r '.stories[] | select(.passes != true) | .id' "$prd_file" 2>/dev/null)
|
|
677
|
+
|
|
678
|
+
while IFS= read -r story_id; do
|
|
679
|
+
[[ -z "$story_id" ]] && continue
|
|
680
|
+
|
|
681
|
+
local story_type
|
|
682
|
+
story_type=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | .type // "unknown"' "$prd_file")
|
|
683
|
+
|
|
684
|
+
# Fix: Frontend missing mcp → set to ["playwright", "devtools"]
|
|
685
|
+
if [[ "$story_type" == "frontend" ]]; then
|
|
686
|
+
local mcp_len
|
|
687
|
+
mcp_len=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | .mcp // [] | length' "$prd_file")
|
|
688
|
+
if [[ "$mcp_len" == "0" ]]; then
|
|
689
|
+
update_json "$prd_file" --arg id "$story_id" \
|
|
690
|
+
'(.stories[] | select(.id==$id) | .mcp) = ["playwright", "devtools"]' && fixed=$((fixed + 1))
|
|
600
691
|
fi
|
|
601
|
-
|
|
692
|
+
fi
|
|
602
693
|
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
694
|
+
# Fix: Bare pytest → prefix with detected runner
|
|
695
|
+
if [[ -n "$py_runner" ]]; then
|
|
696
|
+
local test_steps_raw
|
|
697
|
+
test_steps_raw=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | .testSteps // [] | join("\n")' "$prd_file")
|
|
698
|
+
if echo "$test_steps_raw" | grep -qE '(^|[; ])pytest ' && ! echo "$test_steps_raw" | grep -qE "(uv run|poetry run|pipenv run) pytest"; then
|
|
699
|
+
update_json "$prd_file" --arg id "$story_id" --arg runner "$py_runner" \
|
|
700
|
+
'(.stories[] | select(.id==$id) | .testSteps) |= [.[]? | gsub("(?<pre>^|[; ])pytest "; "\(.pre)\($runner) pytest ")]' && fixed=$((fixed + 1))
|
|
608
701
|
fi
|
|
702
|
+
fi
|
|
609
703
|
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
704
|
+
# Fix: Frontend/general API consumer missing camelCase note
|
|
705
|
+
if [[ "$story_type" == "frontend" || "$story_type" == "general" ]]; then
|
|
706
|
+
local story_desc
|
|
707
|
+
story_desc=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | (.title + " " + (.acceptanceCriteria // [] | join(" ")) + " " + (.notes // ""))' "$prd_file")
|
|
708
|
+
if echo "$story_desc" | grep -qiE "(api|fetch|axios|endpoint|backend|response)"; then
|
|
709
|
+
local story_notes
|
|
710
|
+
story_notes=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | .notes // ""' "$prd_file")
|
|
711
|
+
if ! echo "$story_notes" | grep -qiE "(camelCase|snake_case|naming)"; then
|
|
712
|
+
local camel_note="Transform API responses from snake_case to camelCase. Create typed interfaces with camelCase properties."
|
|
713
|
+
if [[ -z "$story_notes" ]]; then
|
|
714
|
+
update_json "$prd_file" --arg id "$story_id" --arg note "$camel_note" \
|
|
715
|
+
'(.stories[] | select(.id==$id) | .notes) = $note' && fixed=$((fixed + 1))
|
|
716
|
+
else
|
|
717
|
+
update_json "$prd_file" --arg id "$story_id" --arg note "$camel_note" \
|
|
718
|
+
'(.stories[] | select(.id==$id) | .notes) += (" " + $note)' && fixed=$((fixed + 1))
|
|
719
|
+
fi
|
|
720
|
+
fi
|
|
614
721
|
fi
|
|
615
|
-
|
|
616
|
-
done
|
|
722
|
+
fi
|
|
617
723
|
|
|
618
|
-
|
|
724
|
+
# Fix: All testSteps are server-dependent → append offline test step
|
|
725
|
+
local test_steps
|
|
726
|
+
test_steps=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | .testSteps // [] | join(" ")' "$prd_file")
|
|
727
|
+
if [[ -n "$test_steps" ]]; then
|
|
728
|
+
local has_offline=false has_server=false
|
|
729
|
+
local step_list
|
|
730
|
+
step_list=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | .testSteps[]?' "$prd_file")
|
|
731
|
+
while IFS= read -r single_step; do
|
|
732
|
+
[[ -z "$single_step" ]] && continue
|
|
733
|
+
if echo "$single_step" | grep -qE "^(curl |wget |http )"; then
|
|
734
|
+
has_server=true
|
|
735
|
+
else
|
|
736
|
+
has_offline=true
|
|
737
|
+
fi
|
|
738
|
+
done <<< "$step_list"
|
|
739
|
+
|
|
740
|
+
if [[ "$has_server" == "true" && "$has_offline" == "false" ]]; then
|
|
741
|
+
# Pick an offline step based on story type and project tooling
|
|
742
|
+
local offline_step="npx tsc --noEmit"
|
|
743
|
+
if [[ "$story_type" == "backend" ]]; then
|
|
744
|
+
if [[ -n "$py_runner" ]]; then
|
|
745
|
+
offline_step="$py_runner pytest tests/unit/"
|
|
746
|
+
elif [[ -f "go.mod" ]]; then
|
|
747
|
+
offline_step="go test ./..."
|
|
748
|
+
else
|
|
749
|
+
offline_step="npm test"
|
|
750
|
+
fi
|
|
751
|
+
fi
|
|
752
|
+
update_json "$prd_file" --arg id "$story_id" --arg step "$offline_step" \
|
|
753
|
+
'(.stories[] | select(.id==$id) | .testSteps) += [$step]' && fixed=$((fixed + 1))
|
|
754
|
+
fi
|
|
755
|
+
fi
|
|
756
|
+
|
|
757
|
+
done <<< "$story_ids"
|
|
758
|
+
|
|
759
|
+
if [[ $fixed -gt 0 ]]; then
|
|
760
|
+
echo " Applied $fixed mechanical fixes (no LLM needed)"
|
|
761
|
+
fi
|
|
762
|
+
|
|
763
|
+
return 0
|
|
619
764
|
}
|
|
620
765
|
|
|
621
|
-
#
|
|
622
|
-
|
|
766
|
+
# Fix stories with remaining issues using parallel Claude subagents (one per story)
|
|
767
|
+
# $1: prd_file $2: newline-separated "story_id: issues" lines $3: backup file path
|
|
768
|
+
_fix_stories_parallel() {
|
|
623
769
|
local prd_file="$1"
|
|
624
770
|
local issues="$2"
|
|
771
|
+
local backup_file="$3"
|
|
625
772
|
|
|
626
773
|
# Read config values for context
|
|
627
774
|
local config_file="$RALPH_DIR/config.json"
|
|
@@ -631,102 +778,129 @@ _fix_stories_with_claude() {
|
|
|
631
778
|
frontend_url=$(jq -r '.urls.frontend // .playwright.baseUrl // "http://localhost:3000"' "$config_file" 2>/dev/null)
|
|
632
779
|
fi
|
|
633
780
|
|
|
634
|
-
|
|
781
|
+
# Parse issues into per-story fix jobs
|
|
782
|
+
local pids=()
|
|
783
|
+
local story_ids_to_fix=()
|
|
784
|
+
local output_files=()
|
|
785
|
+
|
|
786
|
+
while IFS= read -r line; do
|
|
787
|
+
[[ -z "$line" ]] && continue
|
|
788
|
+
local sid="${line%%:*}"
|
|
789
|
+
local story_issues="${line#*: }"
|
|
790
|
+
[[ -z "$sid" || -z "$story_issues" ]] && continue
|
|
791
|
+
|
|
792
|
+
# Extract this story's JSON
|
|
793
|
+
local story_json
|
|
794
|
+
story_json=$(jq --arg id "$sid" '.stories[] | select(.id==$id)' "$prd_file" 2>/dev/null)
|
|
795
|
+
[[ -z "$story_json" ]] && continue
|
|
635
796
|
|
|
636
|
-
|
|
637
|
-
|
|
797
|
+
# Build a small per-story prompt
|
|
798
|
+
local prompt_file
|
|
799
|
+
prompt_file=$(create_temp_file ".prompt.txt")
|
|
800
|
+
local output_file
|
|
801
|
+
output_file=$(create_temp_file ".fix.json")
|
|
638
802
|
|
|
639
|
-
|
|
803
|
+
cat > "$prompt_file" <<PROMPT_EOF
|
|
804
|
+
Fix this story's issues. Output ONLY the fixed story JSON object (not the full PRD).
|
|
805
|
+
|
|
806
|
+
STORY JSON:
|
|
807
|
+
$story_json
|
|
808
|
+
|
|
809
|
+
ISSUES TO FIX:
|
|
810
|
+
$story_issues
|
|
811
|
+
|
|
812
|
+
CONFIG VALUES:
|
|
640
813
|
- Backend URL: $backend_url (use as {config.urls.backend} in testSteps)
|
|
641
814
|
- Frontend URL: $frontend_url (use as {config.urls.frontend} in testUrl)
|
|
642
815
|
|
|
643
816
|
RULES:
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
This prevents wasted retries when the server isn't running.
|
|
668
|
-
|
|
669
|
-
CURRENT PRD:
|
|
670
|
-
$(cat "$prd_file")
|
|
671
|
-
|
|
672
|
-
Output ONLY the fixed JSON, no explanation. Start with { and end with }."
|
|
673
|
-
|
|
674
|
-
local raw_response
|
|
675
|
-
raw_response=$(echo "$fix_prompt" | run_with_timeout "$CODE_REVIEW_TIMEOUT_SECONDS" claude -p 2>/dev/null)
|
|
676
|
-
|
|
677
|
-
# Extract JSON from response (Claude often wraps in markdown code fences)
|
|
678
|
-
local fixed_prd
|
|
679
|
-
# First strip markdown code fences if present
|
|
680
|
-
fixed_prd=$(echo "$raw_response" | sed 's/^```json//; s/^```$//' | sed -n '/^[[:space:]]*{/,/^[[:space:]]*}[[:space:]]*$/p' | head -1000)
|
|
681
|
-
|
|
682
|
-
# If sed extraction failed, try removing fences and using raw
|
|
683
|
-
if [[ -z "$fixed_prd" ]]; then
|
|
684
|
-
fixed_prd=$(echo "$raw_response" | sed 's/^```json//; s/^```//; s/```$//')
|
|
817
|
+
- Backend stories MUST have testSteps with curl commands hitting real endpoints
|
|
818
|
+
Example: curl -s -X POST {config.urls.backend}/api/users -d '...' | jq -e '.id'
|
|
819
|
+
- Backend stories MUST have apiContract with endpoint, request, response
|
|
820
|
+
- Frontend stories MUST have testUrl set to {config.urls.frontend}/[page-path]
|
|
821
|
+
- Frontend stories MUST have contextFiles array
|
|
822
|
+
- Auth stories MUST have security acceptanceCriteria (bcrypt, rate limiting)
|
|
823
|
+
- List endpoints MUST have pagination acceptanceCriteria (?page=N&limit=N)
|
|
824
|
+
- Stories with only curl testSteps MUST also have an offline test step (npm test, tsc --noEmit, pytest)
|
|
825
|
+
- Keep ALL existing fields. Only add/fix what's missing.
|
|
826
|
+
|
|
827
|
+
Output ONLY the fixed story JSON object. Start with { and end with }.
|
|
828
|
+
PROMPT_EOF
|
|
829
|
+
|
|
830
|
+
# Background a Claude call for this story
|
|
831
|
+
( run_with_timeout 60 claude -p < "$prompt_file" > "$output_file" 2>/dev/null ) &
|
|
832
|
+
pids+=($!)
|
|
833
|
+
story_ids_to_fix+=("$sid")
|
|
834
|
+
output_files+=("$output_file")
|
|
835
|
+
done <<< "$issues"
|
|
836
|
+
|
|
837
|
+
local job_count=${#pids[@]}
|
|
838
|
+
if [[ $job_count -eq 0 ]]; then
|
|
839
|
+
return 0
|
|
685
840
|
fi
|
|
686
841
|
|
|
687
|
-
|
|
688
|
-
local backup_file="${prd_file}.$(date +%Y%m%d-%H%M%S).bak"
|
|
689
|
-
cp "$prd_file" "$backup_file"
|
|
690
|
-
|
|
691
|
-
# Get original story count for validation
|
|
692
|
-
local orig_story_count
|
|
693
|
-
orig_story_count=$(jq '.stories | length' "$prd_file" 2>/dev/null || echo "0")
|
|
694
|
-
|
|
695
|
-
# Validate the response is valid JSON with required structure
|
|
696
|
-
if echo "$fixed_prd" | jq -e '.stories' >/dev/null 2>&1; then
|
|
697
|
-
# Critical: Check story count is preserved (not just that .stories exists)
|
|
698
|
-
local new_story_count
|
|
699
|
-
new_story_count=$(echo "$fixed_prd" | jq '.stories | length' 2>/dev/null || echo "0")
|
|
700
|
-
if [[ "$new_story_count" -lt "$orig_story_count" ]]; then
|
|
701
|
-
print_warning "Fixed PRD has fewer stories ($orig_story_count -> $new_story_count) - keeping original"
|
|
702
|
-
echo " Backup preserved at: $backup_file"
|
|
703
|
-
return 0
|
|
704
|
-
fi
|
|
842
|
+
echo " Fixing $job_count stories in parallel..."
|
|
705
843
|
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
844
|
+
# Wait for all background jobs
|
|
845
|
+
for pid in "${pids[@]}"; do
|
|
846
|
+
wait "$pid" 2>/dev/null
|
|
847
|
+
done
|
|
848
|
+
|
|
849
|
+
# Merge results back into PRD
|
|
850
|
+
local merged=0 failed=0
|
|
851
|
+
for i in "${!story_ids_to_fix[@]}"; do
|
|
852
|
+
local sid="${story_ids_to_fix[$i]}"
|
|
853
|
+
local output_file="${output_files[$i]}"
|
|
854
|
+
|
|
855
|
+
[[ ! -s "$output_file" ]] && { failed=$((failed + 1)); continue; }
|
|
856
|
+
|
|
857
|
+
# Extract JSON from response (strip markdown fences if present)
|
|
858
|
+
local raw_response
|
|
859
|
+
raw_response=$(cat "$output_file")
|
|
860
|
+
local fixed_story
|
|
861
|
+
fixed_story=$(echo "$raw_response" | sed 's/^```json//; s/^```$//' | sed -n '/^[[:space:]]*{/,/^[[:space:]]*}[[:space:]]*$/p')
|
|
862
|
+
|
|
863
|
+
if [[ -z "$fixed_story" ]]; then
|
|
864
|
+
fixed_story=$(echo "$raw_response" | sed 's/^```json//; s/^```//; s/```$//')
|
|
714
865
|
fi
|
|
715
866
|
|
|
716
|
-
#
|
|
717
|
-
|
|
718
|
-
|
|
867
|
+
# Validate it's a valid JSON object with an id field matching this story
|
|
868
|
+
local response_id
|
|
869
|
+
response_id=$(echo "$fixed_story" | jq -r '.id // empty' 2>/dev/null)
|
|
870
|
+
if [[ "$response_id" != "$sid" ]]; then
|
|
871
|
+
# Try to salvage: if valid JSON, force the correct id
|
|
872
|
+
if echo "$fixed_story" | jq -e '.' >/dev/null 2>&1; then
|
|
873
|
+
fixed_story=$(echo "$fixed_story" | jq --arg id "$sid" '.id = $id')
|
|
874
|
+
response_id="$sid"
|
|
875
|
+
else
|
|
876
|
+
failed=$((failed + 1))
|
|
877
|
+
continue
|
|
878
|
+
fi
|
|
879
|
+
fi
|
|
719
880
|
|
|
720
|
-
#
|
|
721
|
-
local
|
|
722
|
-
|
|
723
|
-
if
|
|
724
|
-
|
|
881
|
+
# Merge fixed story back into PRD using update_json
|
|
882
|
+
local fixed_story_escaped
|
|
883
|
+
fixed_story_escaped=$(echo "$fixed_story" | jq -c '.')
|
|
884
|
+
if update_json "$prd_file" --arg id "$sid" --argjson fixed "$fixed_story_escaped" \
|
|
885
|
+
'(.stories[] | select(.id==$id)) = $fixed'; then
|
|
886
|
+
merged=$((merged + 1))
|
|
887
|
+
else
|
|
888
|
+
failed=$((failed + 1))
|
|
725
889
|
fi
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
890
|
+
done
|
|
891
|
+
|
|
892
|
+
if [[ $merged -gt 0 ]]; then
|
|
893
|
+
print_success "Fixed $merged stories with Claude (backup at $backup_file)"
|
|
894
|
+
fi
|
|
895
|
+
if [[ $failed -gt 0 ]]; then
|
|
896
|
+
print_warning "$failed stories could not be auto-fixed — review with /prd"
|
|
897
|
+
fi
|
|
898
|
+
|
|
899
|
+
# Final validation pass
|
|
900
|
+
local remaining_issues
|
|
901
|
+
remaining_issues=$(validate_stories_quick "$prd_file")
|
|
902
|
+
if [[ -n "$remaining_issues" ]]; then
|
|
903
|
+
echo " Some stories may still need manual review"
|
|
730
904
|
fi
|
|
731
905
|
}
|
|
732
906
|
|
|
@@ -800,17 +974,6 @@ validate_stories_quick() {
|
|
|
800
974
|
fi
|
|
801
975
|
fi
|
|
802
976
|
|
|
803
|
-
# Check 6: Migration stories need prerequisites
|
|
804
|
-
local story_files
|
|
805
|
-
story_files=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | (.files.create // []) + (.files.modify // []) | join(" ")' "$prd_file")
|
|
806
|
-
if echo "$story_files" | grep -qiE "(alembic/versions|migrations/|\.migration\.|models\.py|models/|schema\.)"; then
|
|
807
|
-
local has_prereq
|
|
808
|
-
has_prereq=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | .prerequisites // [] | length' "$prd_file")
|
|
809
|
-
if [[ "$has_prereq" == "0" ]]; then
|
|
810
|
-
issues+="$story_id: migration needs prerequisites, "
|
|
811
|
-
fi
|
|
812
|
-
fi
|
|
813
|
-
|
|
814
977
|
# Check 7: Frontend/general stories consuming APIs need naming convention notes
|
|
815
978
|
if [[ "$story_type" == "frontend" || "$story_type" == "general" ]]; then
|
|
816
979
|
local story_desc
|
package/ralph/setup.sh
CHANGED
|
@@ -92,6 +92,33 @@ ralph_setup() {
|
|
|
92
92
|
local pkg_root
|
|
93
93
|
pkg_root="$(cd "$RALPH_LIB/.." && pwd)"
|
|
94
94
|
|
|
95
|
+
# Install timeout utility if missing (critical for session enforcement)
|
|
96
|
+
if ! command -v timeout &>/dev/null && ! command -v gtimeout &>/dev/null; then
|
|
97
|
+
if [[ "$(uname)" == "Darwin" ]]; then
|
|
98
|
+
if command -v brew &>/dev/null; then
|
|
99
|
+
echo ""
|
|
100
|
+
echo " Installing coreutils (provides gtimeout for session enforcement)..."
|
|
101
|
+
if brew install coreutils 2>/dev/null; then
|
|
102
|
+
print_success " coreutils installed"
|
|
103
|
+
else
|
|
104
|
+
print_warning " Failed to install coreutils — session timeouts will use a bash fallback"
|
|
105
|
+
echo " Try manually: brew install coreutils"
|
|
106
|
+
fi
|
|
107
|
+
echo ""
|
|
108
|
+
else
|
|
109
|
+
echo ""
|
|
110
|
+
print_warning "No timeout utility found — session timeouts will use a bash fallback"
|
|
111
|
+
echo " Install Homebrew (https://brew.sh), then: brew install coreutils"
|
|
112
|
+
echo ""
|
|
113
|
+
fi
|
|
114
|
+
else
|
|
115
|
+
echo ""
|
|
116
|
+
print_warning "No timeout utility found — session timeouts will use a bash fallback"
|
|
117
|
+
echo " Install GNU coreutils for reliable timeout enforcement"
|
|
118
|
+
echo ""
|
|
119
|
+
fi
|
|
120
|
+
fi
|
|
121
|
+
|
|
95
122
|
# Run all setup steps
|
|
96
123
|
setup_ralph_dir "$pkg_root"
|
|
97
124
|
setup_custom_checks
|
package/ralph/utils.sh
CHANGED
|
@@ -316,8 +316,27 @@ run_with_timeout() {
|
|
|
316
316
|
elif command -v gtimeout &>/dev/null; then
|
|
317
317
|
gtimeout "$seconds" "$@"
|
|
318
318
|
else
|
|
319
|
-
#
|
|
320
|
-
|
|
319
|
+
# Bash-native fallback: background the command, kill after timeout.
|
|
320
|
+
# Capture stdin to a temp file so the backgrounded process can read it
|
|
321
|
+
# (backgrounded commands lose access to the pipeline's stdin).
|
|
322
|
+
local stdin_file
|
|
323
|
+
stdin_file=$(mktemp)
|
|
324
|
+
cat > "$stdin_file"
|
|
325
|
+
"$@" < "$stdin_file" &
|
|
326
|
+
local cmd_pid=$!
|
|
327
|
+
rm -f "$stdin_file"
|
|
328
|
+
( sleep "$seconds" && kill -TERM "$cmd_pid" 2>/dev/null ) &
|
|
329
|
+
local watchdog_pid=$!
|
|
330
|
+
wait "$cmd_pid" 2>/dev/null
|
|
331
|
+
local exit_code=$?
|
|
332
|
+
kill "$watchdog_pid" 2>/dev/null
|
|
333
|
+
wait "$watchdog_pid" 2>/dev/null
|
|
334
|
+
# If the process received SIGTERM, return 124 (same as GNU timeout).
|
|
335
|
+
# Note: this cannot distinguish our watchdog from other SIGTERM sources.
|
|
336
|
+
if [[ $exit_code -eq 143 ]]; then # 143 = 128 + 15 (SIGTERM)
|
|
337
|
+
return 124
|
|
338
|
+
fi
|
|
339
|
+
return "$exit_code"
|
|
321
340
|
fi
|
|
322
341
|
}
|
|
323
342
|
|
package/ralph/verify/api.sh
CHANGED
|
@@ -140,6 +140,7 @@ run_frontend_smoke_test() {
|
|
|
140
140
|
# 3. Story-specific testUrl from PRD
|
|
141
141
|
local test_url
|
|
142
142
|
test_url=$(jq -r --arg id "$story" '.stories[] | select(.id==$id) | .testUrl // empty' "$RALPH_DIR/prd.json" 2>/dev/null)
|
|
143
|
+
test_url=$(_expand_config_vars "$test_url")
|
|
143
144
|
if [[ -n "$test_url" ]]; then
|
|
144
145
|
# testUrl can be full URL or just path
|
|
145
146
|
if [[ "$test_url" =~ ^https?:// ]]; then
|
package/templates/config/go.json
CHANGED