claude-fsd 1.5.22 → 1.5.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -0
- package/bin/claudefsd +11 -0
- package/bin/claudefsd-dev +67 -5
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -113,6 +113,7 @@ Generates a comprehensive development plan based on:
|
|
|
113
113
|
- Analyzes all open tasks in PLAN.md
|
|
114
114
|
- Intelligently executes tasks (sequentially or in parallel)
|
|
115
115
|
- Updates the plan to track progress
|
|
116
|
+
- Runs unit tests before declaring completion (if docs/unit-test exists)
|
|
116
117
|
- Repeats until all tasks are complete or **<ALL DONE>** is detected
|
|
117
118
|
|
|
118
119
|
## Monitoring Progress (Like Tesla FSD)
|
|
@@ -158,6 +159,7 @@ your-project/
|
|
|
158
159
|
│ ├── QUESTIONS.md # Interview questions and answers
|
|
159
160
|
│ ├── REQUIREMENTS.md # Consolidated requirements from interview
|
|
160
161
|
│ ├── IDEAS.md # Future ideas and improvements
|
|
162
|
+
│ ├── unit-test # Executable test script (exit 0 = pass)
|
|
161
163
|
│ └── INTERVIEW-SESSION.json # Interview session metadata
|
|
162
164
|
├── logs/ # Logs from each AI session
|
|
163
165
|
└── [your code files]
|
package/bin/claudefsd
CHANGED
|
@@ -246,6 +246,17 @@ if [ $# -eq 0 ]; then
|
|
|
246
246
|
echo "- Success criterion 1" >> "$WORKING_DIR/BRIEF.md"
|
|
247
247
|
echo "- Success criterion 2" >> "$WORKING_DIR/BRIEF.md"
|
|
248
248
|
|
|
249
|
+
# Create placeholder unit-test script
|
|
250
|
+
cat > "$WORKING_DIR/unit-test" << 'EOF'
|
|
251
|
+
#!/bin/bash
|
|
252
|
+
# Unit tests for the project
|
|
253
|
+
# Add your tests here. Exit 0 for success, non-zero for failure.
|
|
254
|
+
|
|
255
|
+
echo "No tests defined yet - placeholder passes"
|
|
256
|
+
exit 0
|
|
257
|
+
EOF
|
|
258
|
+
chmod +x "$WORKING_DIR/unit-test"
|
|
259
|
+
|
|
249
260
|
open_with_editor "$WORKING_DIR/BRIEF.md"
|
|
250
261
|
echo
|
|
251
262
|
echo -e "${GREEN}Brief created! Run claudefsd again to start the interview.${NC}"
|
package/bin/claudefsd-dev
CHANGED
|
@@ -71,6 +71,9 @@ LOOP_COUNTER=0
|
|
|
71
71
|
CONSECUTIVE_FAST_ITERATIONS=0
|
|
72
72
|
MIN_ITERATION_TIME=300 # 5 minutes in seconds
|
|
73
73
|
|
|
74
|
+
# Unit test failure tracking
|
|
75
|
+
UNIT_TEST_FAILURE_INFO=""
|
|
76
|
+
|
|
74
77
|
while true; do
|
|
75
78
|
# Record iteration start time
|
|
76
79
|
ITERATION_START_TIME=$(date +%s)
|
|
@@ -78,6 +81,8 @@ while true; do
|
|
|
78
81
|
# Increment loop counter
|
|
79
82
|
LOOP_COUNTER=$((LOOP_COUNTER + 1))
|
|
80
83
|
|
|
84
|
+
# Unit test failure info will persist from previous iteration if tests failed
|
|
85
|
+
|
|
81
86
|
mkdir -p logs
|
|
82
87
|
# Use a temporary directory for tmp files
|
|
83
88
|
mkdir -p tmp
|
|
@@ -110,7 +115,19 @@ while true; do
|
|
|
110
115
|
fi
|
|
111
116
|
|
|
112
117
|
# Build the development prompt combining intelligent task selection with parallel execution
|
|
113
|
-
DEVELOPMENT_PROMPT="$MEGATHINKING_MODE
|
|
118
|
+
DEVELOPMENT_PROMPT="$MEGATHINKING_MODE"
|
|
119
|
+
|
|
120
|
+
# Add unit test failure info if present
|
|
121
|
+
if [ -n "$UNIT_TEST_FAILURE_INFO" ]; then
|
|
122
|
+
DEVELOPMENT_PROMPT="$DEVELOPMENT_PROMPT
|
|
123
|
+
==== CRITICAL: UNIT TEST FAILURE FROM PREVIOUS ITERATION ====
|
|
124
|
+
$UNIT_TEST_FAILURE_INFO
|
|
125
|
+
==== END UNIT TEST FAILURE INFO ====
|
|
126
|
+
|
|
127
|
+
"
|
|
128
|
+
fi
|
|
129
|
+
|
|
130
|
+
DEVELOPMENT_PROMPT="$DEVELOPMENT_PROMPT
|
|
114
131
|
You are an elite AI developer working in an automated development environment. Your job is to IMPLEMENT tasks from the project plan, not just analyze them. You can either implement tasks directly yourself or coordinate parallel Task agents for independent work.
|
|
115
132
|
|
|
116
133
|
**PROJECT FILES TO READ AND ANALYZE:**"
|
|
@@ -284,10 +301,55 @@ Be thorough but concise in your verification."
|
|
|
284
301
|
|
|
285
302
|
# Check if verifier has confirmed all tasks are complete (only in output section)
|
|
286
303
|
if sed -n '/=== OUTPUT ===/,$p' $VERIFIER_LOGFILE | grep -q "^<VERIFIED_ALL_DONE>$"; then
|
|
287
|
-
echo -e "\033[
|
|
288
|
-
echo -e "\033[
|
|
289
|
-
echo -e "\033[
|
|
290
|
-
|
|
304
|
+
echo -e "\033[33m==================================================================\033[0m"
|
|
305
|
+
echo -e "\033[33m== VERIFIER THINKS WE'RE DONE - CHECKING UNIT TESTS...\033[0m"
|
|
306
|
+
echo -e "\033[33m==================================================================\033[0m"
|
|
307
|
+
|
|
308
|
+
# Check if unit test script exists
|
|
309
|
+
if [ -f "$WORKING_DIR/unit-test" ]; then
|
|
310
|
+
echo -e "\033[36mFound $WORKING_DIR/unit-test - running unit tests...\033[0m"
|
|
311
|
+
# Generate unit test log filename with working-dir token if set
|
|
312
|
+
if [ "$WORKING_DIR" != "docs" ]; then
|
|
313
|
+
WORKING_DIR_TOKEN=$(echo "$WORKING_DIR" | sed 's/[^a-zA-Z0-9]/-/g' | sed 's/^-\+//;s/-\+$//')
|
|
314
|
+
UNIT_TEST_LOGFILE="logs/unit-test-${WORKING_DIR_TOKEN}-$(date +%Y%m%d_%H%M%S).txt"
|
|
315
|
+
else
|
|
316
|
+
UNIT_TEST_LOGFILE="logs/unit-test-$(date +%Y%m%d_%H%M%S).txt"
|
|
317
|
+
fi
|
|
318
|
+
|
|
319
|
+
# Run unit tests and capture exit code
|
|
320
|
+
if bash "$WORKING_DIR/unit-test" 2>&1 | tee $UNIT_TEST_LOGFILE; then
|
|
321
|
+
echo -e "\033[32m==================================================================\033[0m"
|
|
322
|
+
echo -e "\033[32m== UNIT TESTS PASSED!\033[0m"
|
|
323
|
+
echo -e "\033[32m== PROJECT COMPLETE - ALL TASKS VERIFIED AND TESTED!\033[0m"
|
|
324
|
+
echo -e "\033[32m==================================================================\033[0m"
|
|
325
|
+
# Clear any previous unit test failure info
|
|
326
|
+
UNIT_TEST_FAILURE_INFO=""
|
|
327
|
+
exit 0
|
|
328
|
+
else
|
|
329
|
+
echo -e "\033[31m==================================================================\033[0m"
|
|
330
|
+
echo -e "\033[31m== UNIT TESTS FAILED!\033[0m"
|
|
331
|
+
echo -e "\033[31m==================================================================\033[0m"
|
|
332
|
+
echo -e "\033[31mUnit test output saved to: $UNIT_TEST_LOGFILE\033[0m"
|
|
333
|
+
echo -e "\033[31mContinuing development loop to fix failing tests...\033[0m"
|
|
334
|
+
|
|
335
|
+
# Store unit test failure info for next iteration
|
|
336
|
+
UNIT_TEST_FAILURE_INFO="UNIT TEST FAILURE DETECTED!
|
|
337
|
+
The verifier thought all tasks were complete, but unit tests are failing.
|
|
338
|
+
Unit test output is available in: $UNIT_TEST_LOGFILE
|
|
339
|
+
You MUST fix these failing tests before the project can be considered complete."
|
|
340
|
+
fi
|
|
341
|
+
else
|
|
342
|
+
# No unit tests found, accept verifier's judgment
|
|
343
|
+
echo -e "\033[33mNo $WORKING_DIR/unit-test script found - accepting verifier's judgment\033[0m"
|
|
344
|
+
echo -e "\033[32m==================================================================\033[0m"
|
|
345
|
+
echo -e "\033[32m== PROJECT COMPLETE - ALL TASKS VERIFIED!\033[0m"
|
|
346
|
+
echo -e "\033[32m==================================================================\033[0m"
|
|
347
|
+
exit 0
|
|
348
|
+
fi
|
|
349
|
+
else
|
|
350
|
+
# Verifier doesn't think we're done, so clear any unit test failure info
|
|
351
|
+
# (we're back to working on regular tasks)
|
|
352
|
+
UNIT_TEST_FAILURE_INFO=""
|
|
291
353
|
fi
|
|
292
354
|
|
|
293
355
|
# Show timestamp after verifier run
|