safeword 0.6.9 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{check-OYYSYHFP.js → check-JWMAZLUO.js} +73 -57
- package/dist/check-JWMAZLUO.js.map +1 -0
- package/dist/{chunk-ZS3Z3Q37.js → chunk-CO3LARDH.js} +297 -65
- package/dist/chunk-CO3LARDH.js.map +1 -0
- package/dist/{chunk-LNSEDZIW.js → chunk-R4SBJKFJ.js} +159 -152
- package/dist/chunk-R4SBJKFJ.js.map +1 -0
- package/dist/{sync-BFMXZEHM.js → chunk-YNVT2S3D.js} +11 -40
- package/dist/chunk-YNVT2S3D.js.map +1 -0
- package/dist/cli.js +6 -6
- package/dist/{diff-325TIZ63.js → diff-2XITKG3T.js} +51 -53
- package/dist/diff-2XITKG3T.js.map +1 -0
- package/dist/index.d.ts +1 -0
- package/dist/{reset-ZGJIKMUW.js → reset-DOGYUFAG.js} +3 -3
- package/dist/{setup-GAMXTFM2.js → setup-UHSXXN6I.js} +17 -20
- package/dist/setup-UHSXXN6I.js.map +1 -0
- package/dist/sync-QPNODJBJ.js +9 -0
- package/dist/sync-QPNODJBJ.js.map +1 -0
- package/dist/{upgrade-X4GREJXN.js → upgrade-GTY3MG6A.js} +10 -7
- package/dist/upgrade-GTY3MG6A.js.map +1 -0
- package/package.json +1 -1
- package/templates/SAFEWORD.md +52 -15
- package/templates/commands/architecture.md +1 -1
- package/templates/commands/lint.md +1 -0
- package/templates/commands/quality-review.md +1 -1
- package/templates/cursor/rules/safeword-core.mdc +5 -0
- package/templates/doc-templates/architecture-template.md +1 -1
- package/templates/doc-templates/task-spec-template.md +151 -0
- package/templates/doc-templates/ticket-template.md +2 -4
- package/templates/guides/architecture-guide.md +2 -2
- package/templates/guides/code-philosophy.md +1 -1
- package/templates/guides/context-files-guide.md +3 -3
- package/templates/guides/design-doc-guide.md +2 -2
- package/templates/guides/development-workflow.md +2 -2
- package/templates/guides/learning-extraction.md +9 -9
- package/templates/guides/tdd-best-practices.md +39 -38
- package/templates/guides/test-definitions-guide.md +15 -14
- package/templates/hooks/cursor/after-file-edit.sh +66 -0
- package/templates/hooks/cursor/stop.sh +50 -0
- package/templates/hooks/post-tool-lint.sh +19 -5
- package/templates/hooks/prompt-questions.sh +1 -1
- package/templates/hooks/session-lint-check.sh +1 -1
- package/templates/hooks/session-verify-agents.sh +1 -1
- package/templates/hooks/session-version.sh +1 -1
- package/templates/hooks/stop-quality.sh +1 -1
- package/templates/markdownlint-cli2.jsonc +18 -19
- package/templates/scripts/bisect-test-pollution.sh +87 -0
- package/templates/scripts/bisect-zombie-processes.sh +129 -0
- package/templates/scripts/lint-md.sh +16 -0
- package/templates/skills/safeword-quality-reviewer/SKILL.md +3 -3
- package/templates/skills/safeword-systematic-debugger/SKILL.md +246 -0
- package/templates/skills/safeword-tdd-enforcer/SKILL.md +221 -0
- package/dist/check-OYYSYHFP.js.map +0 -1
- package/dist/chunk-LNSEDZIW.js.map +0 -1
- package/dist/chunk-ZS3Z3Q37.js.map +0 -1
- package/dist/diff-325TIZ63.js.map +0 -1
- package/dist/setup-GAMXTFM2.js.map +0 -1
- package/dist/sync-BFMXZEHM.js.map +0 -1
- package/dist/upgrade-X4GREJXN.js.map +0 -1
- /package/dist/{reset-ZGJIKMUW.js.map → reset-DOGYUFAG.js.map} +0 -0
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Bisect test pollution: Find which test creates unwanted files or shared state
|
|
3
|
+
#
|
|
4
|
+
# Use when: Tests pass individually but fail together, tests leave files behind,
|
|
5
|
+
# tests affect each other, test isolation problems, shared state between tests
|
|
6
|
+
#
|
|
7
|
+
# Usage: ./bisect-test-pollution.sh <file_to_check> <name_pattern> [search_dir]
|
|
8
|
+
# Example: ./bisect-test-pollution.sh '.git' '*.test.ts' src
|
|
9
|
+
# Example: ./bisect-test-pollution.sh '.git' '*.test.ts' (searches current dir)
|
|
10
|
+
|
|
11
|
+
set -e
|
|
12
|
+
|
|
13
|
+
if [ $# -lt 2 ]; then
|
|
14
|
+
echo "Usage: $0 <file_to_check> <name_pattern> [search_dir]"
|
|
15
|
+
echo "Example: $0 '.git' '*.test.ts' src"
|
|
16
|
+
echo "Example: $0 '.git' '*.test.ts'"
|
|
17
|
+
echo ""
|
|
18
|
+
echo "Runs tests one-by-one to find which creates <file_to_check>"
|
|
19
|
+
echo "Override test command: TEST_CMD='pnpm test' $0 ..."
|
|
20
|
+
exit 1
|
|
21
|
+
fi
|
|
22
|
+
|
|
23
|
+
# Detect package manager from lockfile
|
|
24
|
+
detect_runner() {
|
|
25
|
+
if [ -f "pnpm-lock.yaml" ]; then echo "pnpm"
|
|
26
|
+
elif [ -f "yarn.lock" ]; then echo "yarn"
|
|
27
|
+
elif [ -f "bun.lockb" ]; then echo "bun"
|
|
28
|
+
else echo "npm"
|
|
29
|
+
fi
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
# Allow override via environment variable
|
|
33
|
+
RUNNER="${TEST_CMD:-$(detect_runner) test}"
|
|
34
|
+
|
|
35
|
+
POLLUTION_CHECK="$1"
|
|
36
|
+
NAME_PATTERN="$2"
|
|
37
|
+
SEARCH_DIR="${3:-.}"
|
|
38
|
+
|
|
39
|
+
echo "Searching for test that creates: $POLLUTION_CHECK"
|
|
40
|
+
echo "Test pattern: $NAME_PATTERN in $SEARCH_DIR"
|
|
41
|
+
echo ""
|
|
42
|
+
|
|
43
|
+
# Get list of test files using find (portable across bash versions)
|
|
44
|
+
TEST_FILES=$(find "$SEARCH_DIR" -type f -name "$NAME_PATTERN" 2>/dev/null | sort)
|
|
45
|
+
TOTAL=$(echo "$TEST_FILES" | grep -c . || echo 0)
|
|
46
|
+
|
|
47
|
+
if [ "$TOTAL" -eq 0 ]; then
|
|
48
|
+
echo "No test files found matching: $NAME_PATTERN in $SEARCH_DIR"
|
|
49
|
+
exit 1
|
|
50
|
+
fi
|
|
51
|
+
|
|
52
|
+
echo "Found $TOTAL test files"
|
|
53
|
+
echo ""
|
|
54
|
+
|
|
55
|
+
COUNT=0
|
|
56
|
+
for TEST_FILE in $TEST_FILES; do
|
|
57
|
+
COUNT=$((COUNT + 1))
|
|
58
|
+
|
|
59
|
+
# Skip if pollution already exists
|
|
60
|
+
if [ -e "$POLLUTION_CHECK" ]; then
|
|
61
|
+
echo "Pollution already exists before test $COUNT/$TOTAL"
|
|
62
|
+
echo "Clean it up first: rm -rf $POLLUTION_CHECK"
|
|
63
|
+
exit 1
|
|
64
|
+
fi
|
|
65
|
+
|
|
66
|
+
echo "[$COUNT/$TOTAL] Testing: $TEST_FILE"
|
|
67
|
+
|
|
68
|
+
# Run the test
|
|
69
|
+
$RUNNER "$TEST_FILE" > /dev/null 2>&1 || true
|
|
70
|
+
|
|
71
|
+
# Check if pollution appeared
|
|
72
|
+
if [ -e "$POLLUTION_CHECK" ]; then
|
|
73
|
+
echo ""
|
|
74
|
+
echo "FOUND POLLUTER!"
|
|
75
|
+
echo " Test: $TEST_FILE"
|
|
76
|
+
echo " Created: $POLLUTION_CHECK"
|
|
77
|
+
echo ""
|
|
78
|
+
echo "To investigate:"
|
|
79
|
+
echo " $RUNNER $TEST_FILE # Run just this test"
|
|
80
|
+
echo " cat $TEST_FILE # Review test code"
|
|
81
|
+
exit 1
|
|
82
|
+
fi
|
|
83
|
+
done
|
|
84
|
+
|
|
85
|
+
echo ""
|
|
86
|
+
echo "No polluter found - all tests clean!"
|
|
87
|
+
exit 0
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Bisect zombie processes: Find which test leaves processes behind
|
|
3
|
+
#
|
|
4
|
+
# Use when: Tests leave processes running, playwright browsers not cleaned up,
|
|
5
|
+
# port stays in use after tests, zombie node processes after test suite,
|
|
6
|
+
# can't find which test is leaving processes behind, chromium processes accumulate
|
|
7
|
+
#
|
|
8
|
+
# Usage: ./bisect-zombie-processes.sh <process_pattern> <name_pattern> [search_dir]
|
|
9
|
+
# Example: ./bisect-zombie-processes.sh 'chromium' '*.test.ts' tests
|
|
10
|
+
# Example: ./bisect-zombie-processes.sh 'node.*:3000' '*.spec.ts' e2e
|
|
11
|
+
# Example: ./bisect-zombie-processes.sh 'playwright' '*.test.ts'
|
|
12
|
+
|
|
13
|
+
set -e
|
|
14
|
+
|
|
15
|
+
if [ $# -lt 2 ]; then
|
|
16
|
+
echo "Usage: $0 <process_pattern> <name_pattern> [search_dir]"
|
|
17
|
+
echo "Example: $0 'chromium' '*.test.ts' tests"
|
|
18
|
+
echo "Example: $0 'playwright' '*.test.ts'"
|
|
19
|
+
echo ""
|
|
20
|
+
echo "Runs tests one-by-one to find which leaves <process_pattern> running"
|
|
21
|
+
echo "Override test command: TEST_CMD='pnpm test' $0 ..."
|
|
22
|
+
exit 1
|
|
23
|
+
fi
|
|
24
|
+
|
|
25
|
+
# Detect package manager from lockfile
|
|
26
|
+
detect_runner() {
|
|
27
|
+
if [ -f "pnpm-lock.yaml" ]; then echo "pnpm"
|
|
28
|
+
elif [ -f "yarn.lock" ]; then echo "yarn"
|
|
29
|
+
elif [ -f "bun.lockb" ]; then echo "bun"
|
|
30
|
+
else echo "npm"
|
|
31
|
+
fi
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
# Allow override via environment variable
|
|
35
|
+
RUNNER="${TEST_CMD:-$(detect_runner) test}"
|
|
36
|
+
|
|
37
|
+
PROCESS_PATTERN="$1"
|
|
38
|
+
NAME_PATTERN="$2"
|
|
39
|
+
SEARCH_DIR="${3:-.}"
|
|
40
|
+
|
|
41
|
+
echo "Searching for test that leaves process behind: $PROCESS_PATTERN"
|
|
42
|
+
echo "Test pattern: $NAME_PATTERN in $SEARCH_DIR"
|
|
43
|
+
echo ""
|
|
44
|
+
|
|
45
|
+
# Function to count matching processes
|
|
46
|
+
count_procs() {
|
|
47
|
+
pgrep -f "$PROCESS_PATTERN" 2>/dev/null | wc -l | tr -d ' '
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
# Function to kill matching processes
|
|
51
|
+
kill_procs() {
|
|
52
|
+
pkill -9 -f "$PROCESS_PATTERN" 2>/dev/null || true
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
# Get list of test files using find (portable across bash versions)
|
|
56
|
+
TEST_FILES=$(find "$SEARCH_DIR" -type f -name "$NAME_PATTERN" 2>/dev/null | sort)
|
|
57
|
+
TOTAL=$(echo "$TEST_FILES" | grep -c . || echo 0)
|
|
58
|
+
|
|
59
|
+
if [ "$TOTAL" -eq 0 ]; then
|
|
60
|
+
echo "No test files found matching: $NAME_PATTERN in $SEARCH_DIR"
|
|
61
|
+
exit 1
|
|
62
|
+
fi
|
|
63
|
+
|
|
64
|
+
echo "Found $TOTAL test files"
|
|
65
|
+
echo ""
|
|
66
|
+
|
|
67
|
+
# Clean up any existing matching processes first
|
|
68
|
+
INITIAL_COUNT=$(count_procs)
|
|
69
|
+
if [ "$INITIAL_COUNT" -gt 0 ]; then
|
|
70
|
+
echo "Found $INITIAL_COUNT existing '$PROCESS_PATTERN' processes - cleaning up first..."
|
|
71
|
+
kill_procs
|
|
72
|
+
sleep 2
|
|
73
|
+
|
|
74
|
+
REMAINING=$(count_procs)
|
|
75
|
+
if [ "$REMAINING" -gt 0 ]; then
|
|
76
|
+
echo "WARNING: Could not kill all processes. $REMAINING still running."
|
|
77
|
+
echo "You may need to manually kill them first."
|
|
78
|
+
exit 1
|
|
79
|
+
fi
|
|
80
|
+
echo "Cleanup complete."
|
|
81
|
+
echo ""
|
|
82
|
+
fi
|
|
83
|
+
|
|
84
|
+
COUNT=0
|
|
85
|
+
for TEST_FILE in $TEST_FILES; do
|
|
86
|
+
COUNT=$((COUNT + 1))
|
|
87
|
+
|
|
88
|
+
# Get baseline process count
|
|
89
|
+
BEFORE=$(count_procs)
|
|
90
|
+
|
|
91
|
+
if [ "$BEFORE" -gt 0 ]; then
|
|
92
|
+
echo "Zombie process found before test $COUNT/$TOTAL"
|
|
93
|
+
echo "Clean up first or check previous test"
|
|
94
|
+
exit 1
|
|
95
|
+
fi
|
|
96
|
+
|
|
97
|
+
echo "[$COUNT/$TOTAL] Testing: $TEST_FILE"
|
|
98
|
+
|
|
99
|
+
# Run the test
|
|
100
|
+
$RUNNER "$TEST_FILE" > /dev/null 2>&1 || true
|
|
101
|
+
|
|
102
|
+
# Small delay for processes to settle
|
|
103
|
+
sleep 1
|
|
104
|
+
|
|
105
|
+
# Check if zombie processes appeared
|
|
106
|
+
AFTER=$(count_procs)
|
|
107
|
+
if [ "$AFTER" -gt 0 ]; then
|
|
108
|
+
echo ""
|
|
109
|
+
echo "FOUND ZOMBIE SPAWNER!"
|
|
110
|
+
echo " Test: $TEST_FILE"
|
|
111
|
+
echo " Process pattern: $PROCESS_PATTERN"
|
|
112
|
+
echo " Processes left behind: $AFTER"
|
|
113
|
+
echo ""
|
|
114
|
+
echo "Running processes:"
|
|
115
|
+
pgrep -f "$PROCESS_PATTERN" | head -5 | while read -r pid; do
|
|
116
|
+
ps -p "$pid" -o pid,command= 2>/dev/null | head -c 100
|
|
117
|
+
echo ""
|
|
118
|
+
done
|
|
119
|
+
echo ""
|
|
120
|
+
echo "To investigate:"
|
|
121
|
+
echo " $RUNNER $TEST_FILE # Run just this test"
|
|
122
|
+
echo " cat $TEST_FILE # Review test code for missing cleanup"
|
|
123
|
+
exit 1
|
|
124
|
+
fi
|
|
125
|
+
done
|
|
126
|
+
|
|
127
|
+
echo ""
|
|
128
|
+
echo "No zombie spawner found - all tests clean up properly!"
|
|
129
|
+
exit 0
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Markdown linter wrapper with helpful hints for MD040 errors
|
|
3
|
+
# Usage: ./lint-md.sh [markdownlint-cli2 args]
|
|
4
|
+
|
|
5
|
+
set -euo pipefail
|
|
6
|
+
|
|
7
|
+
# Run markdownlint and add context to MD040 errors
|
|
8
|
+
pnpm markdownlint-cli2 "$@" 2>&1 | while IFS= read -r line; do
|
|
9
|
+
echo "$line"
|
|
10
|
+
if [[ "$line" == *"MD040"* ]]; then
|
|
11
|
+
echo " 💡 Language hints: typescript|bash|json|yaml for code, 'text' for templates/pseudocode, 'plaintext' for directory trees"
|
|
12
|
+
fi
|
|
13
|
+
done
|
|
14
|
+
|
|
15
|
+
# Preserve exit code from markdownlint
|
|
16
|
+
exit "${PIPESTATUS[0]}"
|
|
@@ -96,7 +96,7 @@ Read relevant standards:
|
|
|
96
96
|
|
|
97
97
|
**CRITICAL**: This is your main differentiator from automatic hook. ALWAYS check versions.
|
|
98
98
|
|
|
99
|
-
```
|
|
99
|
+
```text
|
|
100
100
|
WebSearch: "[library name] latest stable version 2025"
|
|
101
101
|
WebSearch: "[library name] security vulnerabilities"
|
|
102
102
|
```
|
|
@@ -116,7 +116,7 @@ WebSearch: "[library name] security vulnerabilities"
|
|
|
116
116
|
|
|
117
117
|
**CRITICAL**: This is your main differentiator from automatic hook. ALWAYS verify against current docs.
|
|
118
118
|
|
|
119
|
-
```
|
|
119
|
+
```text
|
|
120
120
|
WebFetch: https://react.dev (for React)
|
|
121
121
|
WebFetch: https://vitejs.dev (for Vite)
|
|
122
122
|
WebFetch: https://www.electronjs.org/docs (for Electron)
|
|
@@ -134,7 +134,7 @@ WebFetch: https://www.electronjs.org/docs (for Electron)
|
|
|
134
134
|
|
|
135
135
|
**Simple question** ("is it correct?"):
|
|
136
136
|
|
|
137
|
-
```
|
|
137
|
+
```text
|
|
138
138
|
**Correctness:** ✓ Logic is sound, edge cases handled, no obvious errors.
|
|
139
139
|
```
|
|
140
140
|
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: systematic-debugger
|
|
3
|
+
description: Four-phase debugging framework that ensures root cause identification before fixes. Use when encountering bugs, test failures, unexpected behavior, or when previous fix attempts failed. Enforces investigate-first discipline ('debug this', 'fix this error', 'test is failing', 'not working').
|
|
4
|
+
allowed-tools: '*'
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# Systematic Debugger
|
|
8
|
+
|
|
9
|
+
Find root cause before fixing. Symptom fixes are failure.
|
|
10
|
+
|
|
11
|
+
**Iron Law:** NO FIXES WITHOUT ROOT CAUSE INVESTIGATION FIRST
|
|
12
|
+
|
|
13
|
+
## When to Use
|
|
14
|
+
|
|
15
|
+
Answer IN ORDER. Stop at first match:
|
|
16
|
+
|
|
17
|
+
1. Bug, error, or test failure? → Use this skill
|
|
18
|
+
2. Unexpected behavior? → Use this skill
|
|
19
|
+
3. Previous fix didn't work? → Use this skill (especially important)
|
|
20
|
+
4. Performance problem? → Use this skill
|
|
21
|
+
5. None of above? → Skip this skill
|
|
22
|
+
|
|
23
|
+
**Use especially when:**
|
|
24
|
+
|
|
25
|
+
- Under time pressure (emergencies make guessing tempting)
|
|
26
|
+
- "Quick fix" seems obvious (red flag)
|
|
27
|
+
- Already tried 1+ fixes that didn't work
|
|
28
|
+
|
|
29
|
+
## The Four Phases
|
|
30
|
+
|
|
31
|
+
Complete each phase before proceeding.
|
|
32
|
+
|
|
33
|
+
### Phase 1: Root Cause Investigation
|
|
34
|
+
|
|
35
|
+
**BEFORE attempting ANY fix:**
|
|
36
|
+
|
|
37
|
+
**1. Read Error Messages Completely**
|
|
38
|
+
|
|
39
|
+
```text
|
|
40
|
+
Don't skip past errors. They often contain the exact solution.
|
|
41
|
+
- Full stack trace (note line numbers, file paths)
|
|
42
|
+
- Error codes and messages
|
|
43
|
+
- Warnings that preceded the error
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
**2. Reproduce Consistently**
|
|
47
|
+
|
|
48
|
+
| Can reproduce? | Action |
|
|
49
|
+
| --------------- | ---------------------------------------------------- |
|
|
50
|
+
| Yes, every time | Proceed to step 3 |
|
|
51
|
+
| Sometimes | Gather more data - when does it happen vs not? |
|
|
52
|
+
| Never | Cannot debug what you cannot reproduce - gather logs |
|
|
53
|
+
|
|
54
|
+
**3. Check Recent Changes**
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
git diff HEAD~5 # Recent code changes
|
|
58
|
+
git log --oneline -10 # Recent commits
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
What changed that could cause this? Dependencies? Config? Environment?
|
|
62
|
+
|
|
63
|
+
**4. Trace Data Flow (Root Cause Tracing)**
|
|
64
|
+
|
|
65
|
+
When error is deep in call stack:
|
|
66
|
+
|
|
67
|
+
```text
|
|
68
|
+
Symptom: Error at line 50 in utils.js
|
|
69
|
+
↑ Called by handler.js:120
|
|
70
|
+
↑ Called by router.js:45
|
|
71
|
+
↑ Called by app.js:10 ← ROOT CAUSE: bad input here
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
**Technique:**
|
|
75
|
+
|
|
76
|
+
1. Find where error occurs (symptom)
|
|
77
|
+
2. Ask: "What called this with bad data?"
|
|
78
|
+
3. Trace up until you find the SOURCE
|
|
79
|
+
4. Fix at source, not at symptom
|
|
80
|
+
|
|
81
|
+
**5. Multi-Component Systems**
|
|
82
|
+
|
|
83
|
+
When system has multiple layers (API → service → database):
|
|
84
|
+
|
|
85
|
+
```bash
|
|
86
|
+
# Log at EACH boundary before proposing fixes
|
|
87
|
+
echo "=== Layer 1 (API): request=$REQUEST ==="
|
|
88
|
+
echo "=== Layer 2 (Service): input=$INPUT ==="
|
|
89
|
+
echo "=== Layer 3 (DB): query=$QUERY ==="
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
Run once to find WHERE it breaks. Then investigate that layer.
|
|
93
|
+
|
|
94
|
+
### Phase 2: Pattern Analysis
|
|
95
|
+
|
|
96
|
+
**1. Find Working Examples**
|
|
97
|
+
|
|
98
|
+
Locate similar working code in same codebase. What works that's similar?
|
|
99
|
+
|
|
100
|
+
**2. Identify Differences**
|
|
101
|
+
|
|
102
|
+
| Working code | Broken code | Could this matter? |
|
|
103
|
+
| ---------------- | -------------- | ------------------ |
|
|
104
|
+
| Uses async/await | Uses callbacks | Yes - timing |
|
|
105
|
+
| Validates input | No validation | Yes - bad data |
|
|
106
|
+
|
|
107
|
+
List ALL differences. Don't assume "that can't matter."
|
|
108
|
+
|
|
109
|
+
### Phase 3: Hypothesis Testing
|
|
110
|
+
|
|
111
|
+
**1. Form Single Hypothesis**
|
|
112
|
+
|
|
113
|
+
Write it down: "I think X is the root cause because Y"
|
|
114
|
+
|
|
115
|
+
Be specific:
|
|
116
|
+
|
|
117
|
+
- ❌ "Something's wrong with the database"
|
|
118
|
+
- ✅ "Connection pool exhausted because connections aren't released in error path"
|
|
119
|
+
|
|
120
|
+
**2. Test Minimally**
|
|
121
|
+
|
|
122
|
+
| Rule | Why |
|
|
123
|
+
| ------------------------ | ---------------------- |
|
|
124
|
+
| ONE change at a time | Isolate what works |
|
|
125
|
+
| Smallest possible change | Avoid side effects |
|
|
126
|
+
| Don't bundle fixes | Can't tell what helped |
|
|
127
|
+
|
|
128
|
+
**3. Evaluate Result**
|
|
129
|
+
|
|
130
|
+
| Result | Action |
|
|
131
|
+
| --------------- | --------------------------------------- |
|
|
132
|
+
| Fixed | Phase 4 (verify) |
|
|
133
|
+
| Not fixed | NEW hypothesis (return to 3.1) |
|
|
134
|
+
| Partially fixed | Found one issue, continue investigating |
|
|
135
|
+
|
|
136
|
+
### Phase 4: Implementation
|
|
137
|
+
|
|
138
|
+
**1. Create Failing Test**
|
|
139
|
+
|
|
140
|
+
Before fixing, write test that fails due to the bug:
|
|
141
|
+
|
|
142
|
+
```javascript
|
|
143
|
+
it('handles empty input without crashing', () => {
|
|
144
|
+
// This test should FAIL before fix, PASS after
|
|
145
|
+
expect(() => processData('')).not.toThrow();
|
|
146
|
+
});
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
**2. Implement Fix**
|
|
150
|
+
|
|
151
|
+
- Address ROOT CAUSE identified in Phase 1
|
|
152
|
+
- ONE change
|
|
153
|
+
- No "while I'm here" improvements
|
|
154
|
+
|
|
155
|
+
**3. Verify**
|
|
156
|
+
|
|
157
|
+
- [ ] New test passes
|
|
158
|
+
- [ ] Existing tests still pass
|
|
159
|
+
- [ ] Issue actually resolved (not just test passing)
|
|
160
|
+
|
|
161
|
+
**4. If Fix Doesn't Work**
|
|
162
|
+
|
|
163
|
+
| Fix attempts | Action |
|
|
164
|
+
| ------------ | ---------------------------------------- |
|
|
165
|
+
| 1-2 | Return to Phase 1 with new information |
|
|
166
|
+
| 3+ | STOP - Question architecture (see below) |
|
|
167
|
+
|
|
168
|
+
**5. After 3+ Failed Fixes: Question Architecture**
|
|
169
|
+
|
|
170
|
+
Pattern indicating architectural problem:
|
|
171
|
+
|
|
172
|
+
- Each fix reveals new coupling/shared state
|
|
173
|
+
- Fixes require "massive refactoring"
|
|
174
|
+
- Each fix creates new symptoms elsewhere
|
|
175
|
+
|
|
176
|
+
**STOP and ask:**
|
|
177
|
+
|
|
178
|
+
- Is this pattern fundamentally sound?
|
|
179
|
+
- Should we refactor vs. continue patching?
|
|
180
|
+
- Discuss with user before more fix attempts
|
|
181
|
+
|
|
182
|
+
## Red Flags - STOP Immediately
|
|
183
|
+
|
|
184
|
+
If you catch yourself thinking:
|
|
185
|
+
|
|
186
|
+
| Thought | Reality |
|
|
187
|
+
| ---------------------------------------------- | --------------------------------- |
|
|
188
|
+
| "Quick fix for now, investigate later" | Investigate NOW or you never will |
|
|
189
|
+
| "Just try changing X" | That's guessing, not debugging |
|
|
190
|
+
| "I'll add multiple fixes and test" | Can't isolate what worked |
|
|
191
|
+
| "I don't fully understand but this might work" | You need to understand first |
|
|
192
|
+
| "One more fix attempt" (after 2+ failures) | 3+ failures = wrong approach |
|
|
193
|
+
|
|
194
|
+
**ALL mean: STOP. Return to Phase 1.**
|
|
195
|
+
|
|
196
|
+
## Finding Test Pollution
|
|
197
|
+
|
|
198
|
+
When tests pass individually but fail together (test isolation problem, tests affect each other, tests leave files behind), use bisection:
|
|
199
|
+
|
|
200
|
+
```bash
|
|
201
|
+
./.safeword/scripts/bisect-test-pollution.sh '.git' '*.test.ts' src
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
See: @./.safeword/scripts/bisect-test-pollution.sh
|
|
205
|
+
|
|
206
|
+
## Debug Logging
|
|
207
|
+
|
|
208
|
+
When adding diagnostic logging:
|
|
209
|
+
|
|
210
|
+
```javascript
|
|
211
|
+
// ❌ BAD
|
|
212
|
+
console.log('here');
|
|
213
|
+
console.log(data);
|
|
214
|
+
|
|
215
|
+
// ✅ GOOD
|
|
216
|
+
console.log('validateUser', { expected: 'admin', actual: user.role });
|
|
217
|
+
console.log('processOrder', JSON.stringify({ input, output }, null, 2));
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
Log **expected vs actual**. Remove after fixing.
|
|
221
|
+
|
|
222
|
+
## Quick Reference
|
|
223
|
+
|
|
224
|
+
| Phase | Key Question | Success Criteria |
|
|
225
|
+
| ----------------- | ------------------------------------- | ---------------------------------- |
|
|
226
|
+
| 1. Root Cause | "WHY is this happening?" | Understand cause, not just symptom |
|
|
227
|
+
| 2. Pattern | "What's different from working code?" | Identified key differences |
|
|
228
|
+
| 3. Hypothesis | "Is my theory correct?" | Confirmed or formed new theory |
|
|
229
|
+
| 4. Implementation | "Does the fix work?" | Test passes, issue resolved |
|
|
230
|
+
|
|
231
|
+
## Finding Zombie Process Spawners
|
|
232
|
+
|
|
233
|
+
When tests leave processes behind (playwright browsers not cleaned up, port stays in use, zombie node processes, chromium accumulating), use bisection to find the culprit:
|
|
234
|
+
|
|
235
|
+
```bash
|
|
236
|
+
./.safeword/scripts/bisect-zombie-processes.sh 'chromium' '*.test.ts' tests
|
|
237
|
+
./.safeword/scripts/bisect-zombie-processes.sh 'playwright' '*.spec.ts' e2e
|
|
238
|
+
```
|
|
239
|
+
|
|
240
|
+
See: @./.safeword/scripts/bisect-zombie-processes.sh
|
|
241
|
+
|
|
242
|
+
## Related Resources
|
|
243
|
+
|
|
244
|
+
- Process cleanup guide: @./.safeword/guides/zombie-process-cleanup.md
|
|
245
|
+
- Debug logging style: @./.safeword/guides/code-philosophy.md
|
|
246
|
+
- TDD for fix verification: @./.safeword/guides/tdd-best-practices.md
|