agentic-loop 3.2.11 → 3.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/ralph.sh +7 -0
- package/package.json +1 -1
- package/ralph/loop.sh +70 -6
- package/ralph/test.sh +181 -0
- package/ralph/verify/tests.sh +100 -0
- package/ralph/verify.sh +5 -2
- package/templates/PROMPT.md +15 -3
package/bin/ralph.sh
CHANGED
|
@@ -56,6 +56,7 @@ source "$RALPH_LIB/loop.sh"
|
|
|
56
56
|
source "$RALPH_LIB/verify.sh"
|
|
57
57
|
source "$RALPH_LIB/prd.sh"
|
|
58
58
|
source "$RALPH_LIB/signs.sh"
|
|
59
|
+
source "$RALPH_LIB/test.sh"
|
|
59
60
|
|
|
60
61
|
# Run auto-config if config.json was just created
|
|
61
62
|
if [[ "${_ralph_needs_autoconfig:-}" == "true" ]]; then
|
|
@@ -104,6 +105,12 @@ main() {
|
|
|
104
105
|
fi
|
|
105
106
|
run_verification "$1"
|
|
106
107
|
;;
|
|
108
|
+
test)
|
|
109
|
+
ralph_test "$@"
|
|
110
|
+
;;
|
|
111
|
+
coverage)
|
|
112
|
+
ralph_test_coverage "$@"
|
|
113
|
+
;;
|
|
107
114
|
sign)
|
|
108
115
|
ralph_sign "$@"
|
|
109
116
|
;;
|
package/package.json
CHANGED
package/ralph/loop.sh
CHANGED
|
@@ -182,6 +182,7 @@ run_loop() {
|
|
|
182
182
|
local total_attempts=0
|
|
183
183
|
local skipped_stories=()
|
|
184
184
|
local start_time
|
|
185
|
+
local session_started=false # Track if we've started a Claude session
|
|
185
186
|
start_time=$(date +%s)
|
|
186
187
|
|
|
187
188
|
while [[ $iteration -lt $max_iterations ]]; do
|
|
@@ -278,7 +279,7 @@ run_loop() {
|
|
|
278
279
|
|
|
279
280
|
# Temporarily disable errexit to capture build_prompt errors
|
|
280
281
|
set +e
|
|
281
|
-
build_prompt "$story" "$failure_context" > "$prompt_file" 2>&1
|
|
282
|
+
build_prompt "$story" "$failure_context" "$session_started" > "$prompt_file" 2>&1
|
|
282
283
|
local build_status=$?
|
|
283
284
|
set -e
|
|
284
285
|
|
|
@@ -323,18 +324,27 @@ run_loop() {
|
|
|
323
324
|
local timeout_seconds
|
|
324
325
|
timeout_seconds=$(get_config '.maxSessionSeconds' "$DEFAULT_TIMEOUT_SECONDS")
|
|
325
326
|
|
|
326
|
-
# Run Claude
|
|
327
|
-
|
|
327
|
+
# Run Claude - first story gets fresh session, subsequent continue the session
|
|
328
|
+
local claude_cmd="claude -p --dangerously-skip-permissions --verbose"
|
|
329
|
+
if [[ "$session_started" == "true" ]]; then
|
|
330
|
+
claude_cmd="claude --continue -p --dangerously-skip-permissions --verbose"
|
|
331
|
+
fi
|
|
332
|
+
|
|
333
|
+
if ! cat "$prompt_file" | run_with_timeout "$timeout_seconds" $claude_cmd; then
|
|
328
334
|
print_warning "Claude session ended (timeout or error)"
|
|
329
335
|
log_progress "$story" "TIMEOUT" "Claude session ended after ${timeout_seconds}s"
|
|
330
336
|
rm -f "$prompt_file"
|
|
331
337
|
|
|
338
|
+
# Session may be broken - reset for next attempt
|
|
339
|
+
session_started=false
|
|
340
|
+
|
|
332
341
|
# If running specific story, exit on failure
|
|
333
342
|
[[ -n "$specific_story" ]] && return 1
|
|
334
343
|
continue
|
|
335
344
|
fi
|
|
336
345
|
|
|
337
346
|
rm -f "$prompt_file"
|
|
347
|
+
session_started=true # Mark session as active for subsequent stories
|
|
338
348
|
|
|
339
349
|
# 5. Run migrations BEFORE verification (tests need DB schema)
|
|
340
350
|
if ! run_migrations_if_needed "$pre_story_sha"; then
|
|
@@ -611,6 +621,52 @@ _inject_architecture() {
|
|
|
611
621
|
echo "- Scripts go in scripts/, docs go in docs/"
|
|
612
622
|
}
|
|
613
623
|
|
|
624
|
+
# Helper: Build delta prompt for continuing session
|
|
625
|
+
# Minimal context - just new story + any failure info
|
|
626
|
+
_build_delta_prompt() {
|
|
627
|
+
local story="$1"
|
|
628
|
+
local story_json="$2"
|
|
629
|
+
local failure_context="${3:-}"
|
|
630
|
+
|
|
631
|
+
echo ""
|
|
632
|
+
echo "---"
|
|
633
|
+
echo ""
|
|
634
|
+
|
|
635
|
+
# If this is a retry (failure context exists), note it
|
|
636
|
+
if [[ -n "$failure_context" ]]; then
|
|
637
|
+
echo "## Retry: Fix the errors below"
|
|
638
|
+
echo ""
|
|
639
|
+
echo '```'
|
|
640
|
+
echo "$failure_context"
|
|
641
|
+
echo '```'
|
|
642
|
+
echo ""
|
|
643
|
+
else
|
|
644
|
+
# New story - note previous completion
|
|
645
|
+
local completed_count
|
|
646
|
+
completed_count=$(jq '[.stories[] | select(.passes==true)] | length' "$RALPH_DIR/prd.json" 2>/dev/null || echo "0")
|
|
647
|
+
if [[ "$completed_count" -gt 0 ]]; then
|
|
648
|
+
echo "## Previous stories complete. Moving to next story."
|
|
649
|
+
echo ""
|
|
650
|
+
# Suggest compact if we've done several stories
|
|
651
|
+
if [[ "$completed_count" -ge 3 ]]; then
|
|
652
|
+
echo "*Consider running /compact if context feels heavy.*"
|
|
653
|
+
echo ""
|
|
654
|
+
fi
|
|
655
|
+
fi
|
|
656
|
+
fi
|
|
657
|
+
|
|
658
|
+
echo "## Current Story"
|
|
659
|
+
echo ""
|
|
660
|
+
echo '```json'
|
|
661
|
+
echo "$story_json"
|
|
662
|
+
echo '```'
|
|
663
|
+
|
|
664
|
+
# Include file guidance for the new story
|
|
665
|
+
_inject_file_guidance "$story_json"
|
|
666
|
+
_inject_story_scale "$story_json"
|
|
667
|
+
_inject_styleguide "$story_json"
|
|
668
|
+
}
|
|
669
|
+
|
|
614
670
|
# Helper: Inject failure context from previous iteration
|
|
615
671
|
_inject_failure_context() {
|
|
616
672
|
local failure_context="$1"
|
|
@@ -653,17 +709,25 @@ _inject_developer_dna() {
|
|
|
653
709
|
}
|
|
654
710
|
|
|
655
711
|
# Build the prompt with story context injected
|
|
712
|
+
# Usage: build_prompt <story_id> [failure_context] [is_continuation]
|
|
656
713
|
build_prompt() {
|
|
657
714
|
local story="$1"
|
|
658
715
|
local failure_context="${2:-}"
|
|
659
|
-
|
|
660
|
-
# Read base PROMPT.md
|
|
661
|
-
cat "$PROMPT_FILE"
|
|
716
|
+
local is_continuation="${3:-false}"
|
|
662
717
|
|
|
663
718
|
# Get story JSON once
|
|
664
719
|
local story_json
|
|
665
720
|
story_json=$(jq --arg id "$story" '.stories[] | select(.id==$id)' "$RALPH_DIR/prd.json")
|
|
666
721
|
|
|
722
|
+
if [[ "$is_continuation" == "true" ]]; then
|
|
723
|
+
# Delta prompt for continuing session - just new story context
|
|
724
|
+
_build_delta_prompt "$story" "$story_json" "$failure_context"
|
|
725
|
+
return
|
|
726
|
+
fi
|
|
727
|
+
|
|
728
|
+
# Full prompt for fresh session
|
|
729
|
+
cat "$PROMPT_FILE"
|
|
730
|
+
|
|
667
731
|
# Inject all sections
|
|
668
732
|
_inject_story_context "$story_json"
|
|
669
733
|
_inject_file_guidance "$story_json"
|
package/ralph/test.sh
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# shellcheck shell=bash
|
|
3
|
+
# test.sh - Comprehensive test runner for nightly builds
|
|
4
|
+
#
|
|
5
|
+
# Runs full test suite + all PRD testSteps from completed stories.
|
|
6
|
+
# Use this in nightly CI jobs, not on every PR.
|
|
7
|
+
|
|
8
|
+
# Run comprehensive tests (for nightly CI)
|
|
9
|
+
ralph_test() {
|
|
10
|
+
local mode="${1:-all}"
|
|
11
|
+
|
|
12
|
+
echo ""
|
|
13
|
+
print_info "=== Ralph Nightly Test Suite ==="
|
|
14
|
+
echo ""
|
|
15
|
+
|
|
16
|
+
local failed=0
|
|
17
|
+
local total=0
|
|
18
|
+
local passed=0
|
|
19
|
+
|
|
20
|
+
case "$mode" in
|
|
21
|
+
all)
|
|
22
|
+
run_full_test_suite || failed=1
|
|
23
|
+
run_all_prd_tests || failed=1
|
|
24
|
+
;;
|
|
25
|
+
unit)
|
|
26
|
+
run_full_test_suite || failed=1
|
|
27
|
+
;;
|
|
28
|
+
prd)
|
|
29
|
+
run_all_prd_tests || failed=1
|
|
30
|
+
;;
|
|
31
|
+
*)
|
|
32
|
+
echo "Usage: ralph test [all|unit|prd]"
|
|
33
|
+
echo ""
|
|
34
|
+
echo "Modes:"
|
|
35
|
+
echo " all - Run unit tests + all PRD testSteps (default)"
|
|
36
|
+
echo " unit - Run only unit tests"
|
|
37
|
+
echo " prd - Run only PRD testSteps from completed stories"
|
|
38
|
+
return 1
|
|
39
|
+
;;
|
|
40
|
+
esac
|
|
41
|
+
|
|
42
|
+
echo ""
|
|
43
|
+
if [[ $failed -eq 0 ]]; then
|
|
44
|
+
print_success "=== All nightly tests passed ==="
|
|
45
|
+
return 0
|
|
46
|
+
else
|
|
47
|
+
print_error "=== Nightly tests failed ==="
|
|
48
|
+
return 1
|
|
49
|
+
fi
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# Run the full test suite
|
|
53
|
+
run_full_test_suite() {
|
|
54
|
+
echo "--- Unit Tests ---"
|
|
55
|
+
echo ""
|
|
56
|
+
|
|
57
|
+
local test_cmd
|
|
58
|
+
test_cmd=$(get_config '.checks.testCommand' "")
|
|
59
|
+
|
|
60
|
+
if [[ -z "$test_cmd" ]]; then
|
|
61
|
+
# Auto-detect test command
|
|
62
|
+
if [[ -f "package.json" ]] && grep -q '"test"' package.json; then
|
|
63
|
+
test_cmd="npm test"
|
|
64
|
+
elif [[ -f "pytest.ini" ]] || [[ -f "pyproject.toml" ]]; then
|
|
65
|
+
test_cmd="pytest -v"
|
|
66
|
+
elif [[ -f "Cargo.toml" ]]; then
|
|
67
|
+
test_cmd="cargo test"
|
|
68
|
+
elif [[ -f "go.mod" ]]; then
|
|
69
|
+
test_cmd="go test -v ./..."
|
|
70
|
+
else
|
|
71
|
+
print_warning "No test command found, skipping unit tests"
|
|
72
|
+
return 0
|
|
73
|
+
fi
|
|
74
|
+
fi
|
|
75
|
+
|
|
76
|
+
echo "Running: $test_cmd"
|
|
77
|
+
echo ""
|
|
78
|
+
|
|
79
|
+
if eval "$test_cmd"; then
|
|
80
|
+
print_success "Unit tests passed"
|
|
81
|
+
return 0
|
|
82
|
+
else
|
|
83
|
+
print_error "Unit tests failed"
|
|
84
|
+
return 1
|
|
85
|
+
fi
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
# Run all PRD testSteps from all stories (completed and incomplete)
|
|
89
|
+
run_all_prd_tests() {
|
|
90
|
+
echo ""
|
|
91
|
+
echo "--- PRD Test Steps ---"
|
|
92
|
+
echo ""
|
|
93
|
+
|
|
94
|
+
if [[ ! -f "$RALPH_DIR/prd.json" ]]; then
|
|
95
|
+
print_warning "No PRD found, skipping PRD tests"
|
|
96
|
+
return 0
|
|
97
|
+
fi
|
|
98
|
+
|
|
99
|
+
local failed=0
|
|
100
|
+
local total=0
|
|
101
|
+
local passed=0
|
|
102
|
+
|
|
103
|
+
# Get all stories
|
|
104
|
+
local stories
|
|
105
|
+
stories=$(jq -r '.stories[].id' "$RALPH_DIR/prd.json" 2>/dev/null)
|
|
106
|
+
|
|
107
|
+
if [[ -z "$stories" ]]; then
|
|
108
|
+
echo "No stories found in PRD"
|
|
109
|
+
return 0
|
|
110
|
+
fi
|
|
111
|
+
|
|
112
|
+
while IFS= read -r story_id; do
|
|
113
|
+
[[ -z "$story_id" ]] && continue
|
|
114
|
+
|
|
115
|
+
local story_title
|
|
116
|
+
story_title=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | .title' "$RALPH_DIR/prd.json")
|
|
117
|
+
|
|
118
|
+
echo "[$story_id] $story_title"
|
|
119
|
+
|
|
120
|
+
local test_steps
|
|
121
|
+
test_steps=$(jq -r --arg id "$story_id" '.stories[] | select(.id==$id) | .testSteps[]?' "$RALPH_DIR/prd.json" 2>/dev/null)
|
|
122
|
+
|
|
123
|
+
if [[ -z "$test_steps" ]]; then
|
|
124
|
+
echo " (no testSteps)"
|
|
125
|
+
continue
|
|
126
|
+
fi
|
|
127
|
+
|
|
128
|
+
while IFS= read -r step; do
|
|
129
|
+
[[ -z "$step" ]] && continue
|
|
130
|
+
((total++))
|
|
131
|
+
|
|
132
|
+
echo -n " $step... "
|
|
133
|
+
|
|
134
|
+
if eval "$step" >/dev/null 2>&1; then
|
|
135
|
+
print_success "passed"
|
|
136
|
+
((passed++))
|
|
137
|
+
else
|
|
138
|
+
print_error "failed"
|
|
139
|
+
((failed++))
|
|
140
|
+
fi
|
|
141
|
+
done <<< "$test_steps"
|
|
142
|
+
|
|
143
|
+
echo ""
|
|
144
|
+
done <<< "$stories"
|
|
145
|
+
|
|
146
|
+
echo "PRD Tests: $passed/$total passed"
|
|
147
|
+
|
|
148
|
+
[[ $failed -gt 0 ]] && return 1
|
|
149
|
+
return 0
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
# Generate test coverage report
|
|
153
|
+
ralph_test_coverage() {
|
|
154
|
+
echo ""
|
|
155
|
+
print_info "=== Test Coverage Report ==="
|
|
156
|
+
echo ""
|
|
157
|
+
|
|
158
|
+
# Python coverage
|
|
159
|
+
if [[ -f "pytest.ini" ]] || [[ -f "pyproject.toml" ]]; then
|
|
160
|
+
local backend_dir
|
|
161
|
+
backend_dir=$(get_config '.directories.backend' ".")
|
|
162
|
+
|
|
163
|
+
echo "Running pytest with coverage..."
|
|
164
|
+
if (cd "$backend_dir" && pytest --cov --cov-report=term-missing 2>/dev/null); then
|
|
165
|
+
return 0
|
|
166
|
+
else
|
|
167
|
+
print_warning "Coverage report failed (pytest-cov may not be installed)"
|
|
168
|
+
return 1
|
|
169
|
+
fi
|
|
170
|
+
fi
|
|
171
|
+
|
|
172
|
+
# JS/TS coverage
|
|
173
|
+
if [[ -f "package.json" ]] && grep -q '"test:coverage"' package.json; then
|
|
174
|
+
echo "Running npm test:coverage..."
|
|
175
|
+
npm run test:coverage
|
|
176
|
+
return $?
|
|
177
|
+
fi
|
|
178
|
+
|
|
179
|
+
print_warning "No coverage tool detected"
|
|
180
|
+
return 0
|
|
181
|
+
}
|
package/ralph/verify/tests.sh
CHANGED
|
@@ -2,6 +2,106 @@
|
|
|
2
2
|
# shellcheck shell=bash
|
|
3
3
|
# tests.sh - Test verification module for ralph
|
|
4
4
|
|
|
5
|
+
# Check that new/modified source files have corresponding test files
|
|
6
|
+
# This catches the case where Claude writes code but forgets tests
|
|
7
|
+
verify_test_files_exist() {
|
|
8
|
+
local story_type="${RALPH_STORY_TYPE:-general}"
|
|
9
|
+
|
|
10
|
+
# Skip for frontend stories (handled differently with .test.tsx pattern)
|
|
11
|
+
[[ "$story_type" == "frontend" ]] && return 0
|
|
12
|
+
|
|
13
|
+
echo -n " Test files exist for new code... "
|
|
14
|
+
|
|
15
|
+
# Get list of modified Python files (excluding tests themselves)
|
|
16
|
+
local modified_files
|
|
17
|
+
modified_files=$(git diff --name-only HEAD~1 2>/dev/null | grep '\.py$' | grep -v 'test_' | grep -v '_test\.py' | grep -v '/tests/' || true)
|
|
18
|
+
|
|
19
|
+
# If no Python files modified, skip
|
|
20
|
+
if [[ -z "$modified_files" ]]; then
|
|
21
|
+
print_success "skipped (no new Python files)"
|
|
22
|
+
return 0
|
|
23
|
+
fi
|
|
24
|
+
|
|
25
|
+
local missing_tests=()
|
|
26
|
+
local checked=0
|
|
27
|
+
|
|
28
|
+
while IFS= read -r src_file; do
|
|
29
|
+
[[ -z "$src_file" ]] && continue
|
|
30
|
+
[[ ! -f "$src_file" ]] && continue
|
|
31
|
+
|
|
32
|
+
# Skip __init__.py, migrations, config files
|
|
33
|
+
[[ "$src_file" == *"__init__.py" ]] && continue
|
|
34
|
+
[[ "$src_file" == *"/migrations/"* ]] && continue
|
|
35
|
+
[[ "$src_file" == *"/alembic/"* ]] && continue
|
|
36
|
+
[[ "$src_file" == *"config"* ]] && continue
|
|
37
|
+
[[ "$src_file" == *"settings"* ]] && continue
|
|
38
|
+
|
|
39
|
+
((checked++))
|
|
40
|
+
|
|
41
|
+
# Determine expected test file location
|
|
42
|
+
local base_name dir_name test_file
|
|
43
|
+
base_name=$(basename "$src_file" .py)
|
|
44
|
+
dir_name=$(dirname "$src_file")
|
|
45
|
+
|
|
46
|
+
# Common patterns: tests/test_foo.py or foo_test.py
|
|
47
|
+
local possible_tests=(
|
|
48
|
+
"$dir_name/tests/test_${base_name}.py"
|
|
49
|
+
"$dir_name/test_${base_name}.py"
|
|
50
|
+
"${dir_name}/tests/${base_name}_test.py"
|
|
51
|
+
"tests/test_${base_name}.py"
|
|
52
|
+
"tests/${base_name}_test.py"
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Check for backend dir patterns
|
|
56
|
+
local backend_dir
|
|
57
|
+
backend_dir=$(get_config '.directories.backend' "")
|
|
58
|
+
if [[ -n "$backend_dir" ]]; then
|
|
59
|
+
possible_tests+=(
|
|
60
|
+
"$backend_dir/tests/test_${base_name}.py"
|
|
61
|
+
"$backend_dir/tests/${base_name}_test.py"
|
|
62
|
+
)
|
|
63
|
+
fi
|
|
64
|
+
|
|
65
|
+
local found=false
|
|
66
|
+
for test_path in "${possible_tests[@]}"; do
|
|
67
|
+
if [[ -f "$test_path" ]]; then
|
|
68
|
+
found=true
|
|
69
|
+
break
|
|
70
|
+
fi
|
|
71
|
+
done
|
|
72
|
+
|
|
73
|
+
if [[ "$found" == "false" ]]; then
|
|
74
|
+
missing_tests+=("$src_file")
|
|
75
|
+
fi
|
|
76
|
+
done <<< "$modified_files"
|
|
77
|
+
|
|
78
|
+
if [[ ${#missing_tests[@]} -eq 0 ]]; then
|
|
79
|
+
print_success "passed ($checked files checked)"
|
|
80
|
+
return 0
|
|
81
|
+
else
|
|
82
|
+
print_error "missing tests"
|
|
83
|
+
echo ""
|
|
84
|
+
echo " The following files need test files:"
|
|
85
|
+
for file in "${missing_tests[@]}"; do
|
|
86
|
+
local base_name
|
|
87
|
+
base_name=$(basename "$file" .py)
|
|
88
|
+
echo " $file → test_${base_name}.py"
|
|
89
|
+
done
|
|
90
|
+
echo ""
|
|
91
|
+
echo " Create test files for new code before completing the story."
|
|
92
|
+
|
|
93
|
+
# Save for failure context
|
|
94
|
+
{
|
|
95
|
+
echo "Missing test files for new code:"
|
|
96
|
+
for file in "${missing_tests[@]}"; do
|
|
97
|
+
echo " $file"
|
|
98
|
+
done
|
|
99
|
+
} > "$RALPH_DIR/last_test_existence_failure.log"
|
|
100
|
+
|
|
101
|
+
return 1
|
|
102
|
+
fi
|
|
103
|
+
}
|
|
104
|
+
|
|
5
105
|
# Run unit tests
|
|
6
106
|
run_unit_tests() {
|
|
7
107
|
local log_file
|
package/ralph/verify.sh
CHANGED
|
@@ -33,12 +33,15 @@ run_verification() {
|
|
|
33
33
|
fi
|
|
34
34
|
|
|
35
35
|
# ========================================
|
|
36
|
-
# STEP 2:
|
|
36
|
+
# STEP 2: Verify tests exist + run them
|
|
37
37
|
# ========================================
|
|
38
38
|
if [[ $failed -eq 0 ]]; then
|
|
39
39
|
echo ""
|
|
40
40
|
echo " [2/3] Running tests..."
|
|
41
|
-
|
|
41
|
+
# First check that test files exist for new code
|
|
42
|
+
if ! verify_test_files_exist; then
|
|
43
|
+
failed=1
|
|
44
|
+
elif ! run_unit_tests; then
|
|
42
45
|
failed=1
|
|
43
46
|
fi
|
|
44
47
|
fi
|
package/templates/PROMPT.md
CHANGED
|
@@ -22,9 +22,21 @@ For each story, you must:
|
|
|
22
22
|
|
|
23
23
|
### 2. Write Tests
|
|
24
24
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
25
|
+
**Every new code file MUST have a corresponding test file.**
|
|
26
|
+
|
|
27
|
+
For **backend** stories (Python/API):
|
|
28
|
+
- New file `foo.py` → create `tests/test_foo.py`
|
|
29
|
+
- Test each public function/method
|
|
30
|
+
- Test error cases (invalid input, missing data, API failures)
|
|
31
|
+
- Test edge cases (empty lists, None values, boundary conditions)
|
|
32
|
+
- Use pytest fixtures for database/API mocking
|
|
33
|
+
|
|
34
|
+
For **frontend** stories (TypeScript/React):
|
|
35
|
+
- New component `Foo.tsx` → create `Foo.test.tsx`
|
|
36
|
+
- Test rendering, user interactions, error states
|
|
37
|
+
- Test loading states and empty states
|
|
38
|
+
|
|
39
|
+
**Do NOT skip tests.** If you create code without tests, verification will fail.
|
|
28
40
|
|
|
29
41
|
### 3. Verify It Actually Works
|
|
30
42
|
|