claude-fsd 1.5.24 → 1.5.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -2
- package/bin/claudefsd +0 -11
- package/bin/claudefsd-analyze-brief +1 -8
- package/bin/claudefsd-analyze-brief-personas +1 -8
- package/bin/claudefsd-create-plan +1 -17
- package/bin/claudefsd-dev +7 -80
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -113,7 +113,6 @@ Generates a comprehensive development plan based on:
|
|
|
113
113
|
- Analyzes all open tasks in PLAN.md
|
|
114
114
|
- Intelligently executes tasks (sequentially or in parallel)
|
|
115
115
|
- Updates the plan to track progress
|
|
116
|
-
- Runs unit tests before declaring completion (if docs/unit-test exists)
|
|
117
116
|
- Repeats until all tasks are complete or **<ALL DONE>** is detected
|
|
118
117
|
|
|
119
118
|
## Monitoring Progress (Like Tesla FSD)
|
|
@@ -159,7 +158,6 @@ your-project/
|
|
|
159
158
|
│ ├── QUESTIONS.md # Interview questions and answers
|
|
160
159
|
│ ├── REQUIREMENTS.md # Consolidated requirements from interview
|
|
161
160
|
│ ├── IDEAS.md # Future ideas and improvements
|
|
162
|
-
│ ├── unit-test # Executable test script (exit 0 = pass)
|
|
163
161
|
│ └── INTERVIEW-SESSION.json # Interview session metadata
|
|
164
162
|
├── logs/ # Logs from each AI session
|
|
165
163
|
└── [your code files]
|
package/bin/claudefsd
CHANGED
|
@@ -246,17 +246,6 @@ if [ $# -eq 0 ]; then
|
|
|
246
246
|
echo "- Success criterion 1" >> "$WORKING_DIR/BRIEF.md"
|
|
247
247
|
echo "- Success criterion 2" >> "$WORKING_DIR/BRIEF.md"
|
|
248
248
|
|
|
249
|
-
# Create placeholder unit-test script
|
|
250
|
-
cat > "$WORKING_DIR/unit-test" << 'EOF'
|
|
251
|
-
#!/bin/bash
|
|
252
|
-
# Unit tests for the project
|
|
253
|
-
# Add your tests here. Exit 0 for success, non-zero for failure.
|
|
254
|
-
|
|
255
|
-
echo "No tests defined yet - placeholder passes"
|
|
256
|
-
exit 0
|
|
257
|
-
EOF
|
|
258
|
-
chmod +x "$WORKING_DIR/unit-test"
|
|
259
|
-
|
|
260
249
|
open_with_editor "$WORKING_DIR/BRIEF.md"
|
|
261
250
|
echo
|
|
262
251
|
echo -e "${GREEN}Brief created! Run claudefsd again to start the interview.${NC}"
|
|
@@ -32,14 +32,7 @@ if [ "$EDITOR" == "" ]; then
|
|
|
32
32
|
EDITOR="nano"
|
|
33
33
|
fi
|
|
34
34
|
|
|
35
|
-
|
|
36
|
-
if [ "$WORKING_DIR" != "docs" ]; then
|
|
37
|
-
# Extract a clean token from the working directory path
|
|
38
|
-
WORKING_DIR_TOKEN=$(echo "$WORKING_DIR" | sed 's/[^a-zA-Z0-9]/-/g' | sed 's/^-\+//;s/-\+$//')
|
|
39
|
-
LOGFILE="logs/claude-${WORKING_DIR_TOKEN}-$(date +%Y%m%d_%H%M%S).txt"
|
|
40
|
-
else
|
|
41
|
-
LOGFILE="logs/claude-$(date +%Y%m%d_%H%M%S).txt"
|
|
42
|
-
fi
|
|
35
|
+
LOGFILE="logs/claude-$(date +%Y%m%d_%H%M%S).txt"
|
|
43
36
|
|
|
44
37
|
echo -e "\033[32m==================================================================\033[0m"
|
|
45
38
|
echo -e "\033[32m== ANALYZING PROJECT BRIEF\033[0m"
|
|
@@ -54,14 +54,7 @@ if [ $? -ne 0 ]; then
|
|
|
54
54
|
exit 1
|
|
55
55
|
fi
|
|
56
56
|
|
|
57
|
-
|
|
58
|
-
if [ "$WORKING_DIR" != "docs" ]; then
|
|
59
|
-
# Extract a clean token from the working directory path
|
|
60
|
-
WORKING_DIR_TOKEN=$(echo "$WORKING_DIR" | sed 's/[^a-zA-Z0-9]/-/g' | sed 's/^-\+//;s/-\+$//')
|
|
61
|
-
LOGFILE="logs/claude-${WORKING_DIR_TOKEN}-$(date +%Y%m%d_%H%M%S).txt"
|
|
62
|
-
else
|
|
63
|
-
LOGFILE="logs/claude-$(date +%Y%m%d_%H%M%S).txt"
|
|
64
|
-
fi
|
|
57
|
+
LOGFILE="logs/claude-$(date +%Y%m%d_%H%M%S).txt"
|
|
65
58
|
|
|
66
59
|
echo -e "\033[32m==================================================================\033[0m"
|
|
67
60
|
echo -e "\033[32m== ANALYZING PROJECT BRIEF WITH EXPERT PERSONAS\033[0m"
|
|
@@ -44,14 +44,7 @@ if [ ! -f "$WORKING_DIR/QUESTIONS.md" ] && [ ! -f "$WORKING_DIR/REQUIREMENTS.md"
|
|
|
44
44
|
exit 1
|
|
45
45
|
fi
|
|
46
46
|
|
|
47
|
-
|
|
48
|
-
if [ "$WORKING_DIR" != "docs" ]; then
|
|
49
|
-
# Extract a clean token from the working directory path
|
|
50
|
-
WORKING_DIR_TOKEN=$(echo "$WORKING_DIR" | sed 's/[^a-zA-Z0-9]/-/g' | sed 's/^-\+//;s/-\+$//')
|
|
51
|
-
LOGFILE="logs/claude-${WORKING_DIR_TOKEN}-$(date +%Y%m%d_%H%M%S).txt"
|
|
52
|
-
else
|
|
53
|
-
LOGFILE="logs/claude-$(date +%Y%m%d_%H%M%S).txt"
|
|
54
|
-
fi
|
|
47
|
+
LOGFILE="logs/claude-$(date +%Y%m%d_%H%M%S).txt"
|
|
55
48
|
|
|
56
49
|
echo -e "\033[32m==================================================================\033[0m"
|
|
57
50
|
echo -e "\033[32m== CREATING PLAN FROM PROJECT INPUTS\033[0m"
|
|
@@ -97,15 +90,6 @@ INFRASTRUCTURE PROPORTIONALITY RULES:
|
|
|
97
90
|
- Follow existing claude-fsd pattern: separate focused scripts, not monoliths
|
|
98
91
|
- FAIL LOUD - simple error handling, exit on failure
|
|
99
92
|
- Choose infrastructure complexity appropriate to solution size
|
|
100
|
-
|
|
101
|
-
WEB TESTING REQUIREMENTS:
|
|
102
|
-
- If the project includes ANY web interface (including dashboards, web apps, or APIs with UI):
|
|
103
|
-
- MUST include web testing tasks in the plan
|
|
104
|
-
- Use playwright MCP for browser automation and testing
|
|
105
|
-
- Create $WORKING_DIR/WEBTESTS.md to document web test scenarios
|
|
106
|
-
- Test both structure/functionality AND visual appearance with screenshots
|
|
107
|
-
- Include tests for: page rendering, user interactions, form submissions, navigation
|
|
108
|
-
- Web tests should cover happy paths and key error scenarios
|
|
109
93
|
"
|
|
110
94
|
|
|
111
95
|
# run BA's
|
package/bin/claudefsd-dev
CHANGED
|
@@ -69,10 +69,7 @@ LOOP_COUNTER=0
|
|
|
69
69
|
|
|
70
70
|
# Failure detection variables
|
|
71
71
|
CONSECUTIVE_FAST_ITERATIONS=0
|
|
72
|
-
MIN_ITERATION_TIME=
|
|
73
|
-
|
|
74
|
-
# Unit test failure tracking
|
|
75
|
-
UNIT_TEST_FAILURE_INFO=""
|
|
72
|
+
MIN_ITERATION_TIME=300 # 5 minutes in seconds
|
|
76
73
|
|
|
77
74
|
while true; do
|
|
78
75
|
# Record iteration start time
|
|
@@ -81,21 +78,11 @@ while true; do
|
|
|
81
78
|
# Increment loop counter
|
|
82
79
|
LOOP_COUNTER=$((LOOP_COUNTER + 1))
|
|
83
80
|
|
|
84
|
-
# Unit test failure info will persist from previous iteration if tests failed
|
|
85
|
-
|
|
86
81
|
mkdir -p logs
|
|
87
82
|
# Use a temporary directory for tmp files
|
|
88
83
|
mkdir -p tmp
|
|
89
84
|
export TMPDIR=tmp/
|
|
90
|
-
|
|
91
|
-
# Generate log filename with working-dir token if set
|
|
92
|
-
if [ "$WORKING_DIR" != "docs" ]; then
|
|
93
|
-
# Extract a clean token from the working directory path
|
|
94
|
-
WORKING_DIR_TOKEN=$(echo "$WORKING_DIR" | sed 's/[^a-zA-Z0-9]/-/g' | sed 's/^-\+//;s/-\+$//')
|
|
95
|
-
LOGFILE="logs/claude-dev-${WORKING_DIR_TOKEN}-$(date +%Y%m%d_%H%M%S).txt"
|
|
96
|
-
else
|
|
97
|
-
LOGFILE="logs/claude-dev-$(date +%Y%m%d_%H%M%S).txt"
|
|
98
|
-
fi
|
|
85
|
+
LOGFILE="logs/claude-dev-$(date +%Y%m%d_%H%M%S).txt"
|
|
99
86
|
|
|
100
87
|
echo "Logging to ${LOGFILE} ..."
|
|
101
88
|
|
|
@@ -115,19 +102,7 @@ while true; do
|
|
|
115
102
|
fi
|
|
116
103
|
|
|
117
104
|
# Build the development prompt combining intelligent task selection with parallel execution
|
|
118
|
-
DEVELOPMENT_PROMPT="$MEGATHINKING_MODE
|
|
119
|
-
|
|
120
|
-
# Add unit test failure info if present
|
|
121
|
-
if [ -n "$UNIT_TEST_FAILURE_INFO" ]; then
|
|
122
|
-
DEVELOPMENT_PROMPT="$DEVELOPMENT_PROMPT
|
|
123
|
-
==== CRITICAL: UNIT TEST FAILURE FROM PREVIOUS ITERATION ====
|
|
124
|
-
$UNIT_TEST_FAILURE_INFO
|
|
125
|
-
==== END UNIT TEST FAILURE INFO ====
|
|
126
|
-
|
|
127
|
-
"
|
|
128
|
-
fi
|
|
129
|
-
|
|
130
|
-
DEVELOPMENT_PROMPT="$DEVELOPMENT_PROMPT
|
|
105
|
+
DEVELOPMENT_PROMPT="$MEGATHINKING_MODE
|
|
131
106
|
You are an elite AI developer working in an automated development environment. Your job is to IMPLEMENT tasks from the project plan, not just analyze them. You can either implement tasks directly yourself or coordinate parallel Task agents for independent work.
|
|
132
107
|
|
|
133
108
|
**PROJECT FILES TO READ AND ANALYZE:**"
|
|
@@ -301,59 +276,11 @@ Be thorough but concise in your verification."
|
|
|
301
276
|
|
|
302
277
|
# Check if verifier has confirmed all tasks are complete (only in output section)
|
|
303
278
|
if sed -n '/=== OUTPUT ===/,$p' $VERIFIER_LOGFILE | grep -q "^<VERIFIED_ALL_DONE>$"; then
|
|
304
|
-
echo -e "\033[
|
|
305
|
-
echo -e "\033[
|
|
306
|
-
echo -e "\033[
|
|
307
|
-
|
|
308
|
-
# Check if unit test script exists
|
|
309
|
-
if [ -f "$WORKING_DIR/unit-test" ]; then
|
|
310
|
-
echo -e "\033[36mFound $WORKING_DIR/unit-test - running unit tests...\033[0m"
|
|
311
|
-
# Generate unit test log filename with working-dir token if set
|
|
312
|
-
if [ "$WORKING_DIR" != "docs" ]; then
|
|
313
|
-
WORKING_DIR_TOKEN=$(echo "$WORKING_DIR" | sed 's/[^a-zA-Z0-9]/-/g' | sed 's/^-\+//;s/-\+$//')
|
|
314
|
-
UNIT_TEST_LOGFILE="logs/unit-test-${WORKING_DIR_TOKEN}-$(date +%Y%m%d_%H%M%S).txt"
|
|
315
|
-
else
|
|
316
|
-
UNIT_TEST_LOGFILE="logs/unit-test-$(date +%Y%m%d_%H%M%S).txt"
|
|
317
|
-
fi
|
|
318
|
-
|
|
319
|
-
# Run unit tests and capture exit code
|
|
320
|
-
if "$WORKING_DIR/unit-test" 2>&1 | tee $UNIT_TEST_LOGFILE; then
|
|
321
|
-
echo -e "\033[32m==================================================================\033[0m"
|
|
322
|
-
echo -e "\033[32m== UNIT TESTS PASSED!\033[0m"
|
|
323
|
-
echo -e "\033[32m== PROJECT COMPLETE - ALL TASKS VERIFIED AND TESTED!\033[0m"
|
|
324
|
-
echo -e "\033[32m==================================================================\033[0m"
|
|
325
|
-
# Clear any previous unit test failure info
|
|
326
|
-
UNIT_TEST_FAILURE_INFO=""
|
|
327
|
-
exit 0
|
|
328
|
-
else
|
|
329
|
-
echo -e "\033[31m==================================================================\033[0m"
|
|
330
|
-
echo -e "\033[31m== UNIT TESTS FAILED!\033[0m"
|
|
331
|
-
echo -e "\033[31m==================================================================\033[0m"
|
|
332
|
-
echo -e "\033[31mUnit test output saved to: $UNIT_TEST_LOGFILE\033[0m"
|
|
333
|
-
echo -e "\033[31mContinuing development loop to fix failing tests...\033[0m"
|
|
334
|
-
|
|
335
|
-
# Store unit test failure info for next iteration
|
|
336
|
-
UNIT_TEST_FAILURE_INFO="UNIT TEST FAILURE DETECTED!
|
|
337
|
-
The verifier thought all tasks were complete, but unit tests are failing.
|
|
338
|
-
Unit test output is available in: $UNIT_TEST_LOGFILE
|
|
339
|
-
You MUST fix these failing tests before the project can be considered complete."
|
|
340
|
-
fi
|
|
341
|
-
else
|
|
342
|
-
# No unit tests found, accept verifier's judgment
|
|
343
|
-
echo -e "\033[33mNo $WORKING_DIR/unit-test script found - accepting verifier's judgment\033[0m"
|
|
344
|
-
echo -e "\033[32m==================================================================\033[0m"
|
|
345
|
-
echo -e "\033[32m== PROJECT COMPLETE - ALL TASKS VERIFIED!\033[0m"
|
|
346
|
-
echo -e "\033[32m==================================================================\033[0m"
|
|
347
|
-
exit 0
|
|
348
|
-
fi
|
|
349
|
-
else
|
|
350
|
-
# Verifier doesn't think we're done, so clear any unit test failure info
|
|
351
|
-
# (we're back to working on regular tasks)
|
|
352
|
-
UNIT_TEST_FAILURE_INFO=""
|
|
279
|
+
echo -e "\033[32m==================================================================\033[0m"
|
|
280
|
+
echo -e "\033[32m== PROJECT COMPLETE - ALL TASKS VERIFIED!\033[0m"
|
|
281
|
+
echo -e "\033[32m==================================================================\033[0m"
|
|
282
|
+
exit 0
|
|
353
283
|
fi
|
|
354
|
-
|
|
355
|
-
# Show timestamp after verifier run
|
|
356
|
-
echo -e "\033[36mVerifier completed at: $(date)\033[0m"
|
|
357
284
|
|
|
358
285
|
# Calculate iteration duration and check for failure patterns
|
|
359
286
|
ITERATION_END_TIME=$(date +%s)
|