anvil-dev-framework 0.1.7 → 0.1.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +71 -22
- package/VERSION +1 -1
- package/docs/ANV-263-hook-logging-investigation.md +116 -0
- package/docs/command-reference.md +398 -17
- package/docs/session-workflow.md +62 -9
- package/docs/system-architecture.md +584 -0
- package/global/api/__pycache__/ralph_api.cpython-314.pyc +0 -0
- package/global/api/openapi.yaml +357 -0
- package/global/api/ralph_api.py +528 -0
- package/global/commands/anvil-settings.md +47 -19
- package/global/commands/audit.md +163 -0
- package/global/commands/checklist.md +180 -0
- package/global/commands/coderabbit-fix.md +282 -0
- package/global/commands/efficiency.md +356 -0
- package/global/commands/evidence.md +117 -33
- package/global/commands/hud.md +24 -0
- package/global/commands/insights.md +101 -3
- package/global/commands/orient.md +22 -21
- package/global/commands/patterns.md +115 -0
- package/global/commands/ralph.md +47 -1
- package/global/commands/token-budget.md +214 -0
- package/global/commands/weekly-review.md +21 -1
- package/global/config/notifications.yaml.template +50 -0
- package/global/hooks/ralph_stop.sh +33 -1
- package/global/hooks/statusline.sh +67 -2
- package/global/lib/__pycache__/coderabbit_metrics.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/command_tracker.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/context_optimizer.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/git_utils.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/issue_models.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/linear_provider.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/optimization_applier.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/ralph_state.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/ralph_webhooks.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/state_manager.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/token_analyzer.cpython-314.pyc +0 -0
- package/global/lib/__pycache__/token_metrics.cpython-314.pyc +0 -0
- package/global/lib/coderabbit_metrics.py +647 -0
- package/global/lib/command_tracker.py +147 -0
- package/global/lib/context_optimizer.py +323 -0
- package/global/lib/linear_provider.py +210 -16
- package/global/lib/log_rotation.py +287 -0
- package/global/lib/optimization_applier.py +582 -0
- package/global/lib/ralph_events.py +398 -0
- package/global/lib/ralph_notifier.py +366 -0
- package/global/lib/ralph_state.py +264 -24
- package/global/lib/ralph_webhooks.py +470 -0
- package/global/lib/state_manager.py +121 -0
- package/global/lib/token_analyzer.py +1383 -0
- package/global/lib/token_metrics.py +919 -0
- package/global/tests/__pycache__/test_command_tracker.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_context_optimizer.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_doc_coverage.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_git_utils.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_issue_models.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_linear_filtering.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_linear_provider.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_local_provider.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_optimization_applier.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_token_analyzer.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_token_analyzer_phase6.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/__pycache__/test_token_metrics.cpython-314-pytest-9.0.2.pyc +0 -0
- package/global/tests/test_command_tracker.py +172 -0
- package/global/tests/test_context_optimizer.py +321 -0
- package/global/tests/test_linear_filtering.py +319 -0
- package/global/tests/test_linear_provider.py +40 -1
- package/global/tests/test_optimization_applier.py +508 -0
- package/global/tests/test_token_analyzer.py +735 -0
- package/global/tests/test_token_analyzer_phase6.py +537 -0
- package/global/tests/test_token_metrics.py +829 -0
- package/global/tools/README.md +153 -0
- package/global/tools/__pycache__/anvil-hud.cpython-314.pyc +0 -0
- package/global/tools/__pycache__/orient_linear.cpython-314.pyc +0 -0
- package/global/tools/__pycache__/ralph-watchcpython-314.pyc +0 -0
- package/global/tools/anvil-hud.py +86 -1
- package/global/tools/anvil-memory/src/__tests__/ccs/context-monitor.test.ts +472 -0
- package/global/tools/anvil-memory/src/__tests__/ccs/fixtures.ts +405 -0
- package/global/tools/anvil-memory/src/__tests__/ccs/index.ts +36 -0
- package/global/tools/anvil-memory/src/__tests__/ccs/prompt-generator.test.ts +653 -0
- package/global/tools/anvil-memory/src/__tests__/ccs/ralph-stop.test.ts +727 -0
- package/global/tools/anvil-memory/src/__tests__/ccs/test-utils.ts +340 -0
- package/global/tools/anvil-memory/src/__tests__/commands.test.ts +218 -0
- package/global/tools/anvil-memory/src/commands/context.ts +322 -0
- package/global/tools/anvil-memory/src/db.ts +108 -0
- package/global/tools/anvil-memory/src/index.ts +2 -8
- package/global/tools/orient_linear.py +159 -0
- package/global/tools/ralph-watch +423 -0
- package/package.json +2 -1
- package/project/.anvil-project.yaml.template +93 -0
- package/project/CLAUDE.md.template +343 -0
- package/project/agents/README.md +119 -0
- package/project/agents/cross-layer-debugger.md +217 -0
- package/project/agents/security-code-reviewer.md +162 -0
- package/project/constitution.md.template +235 -0
- package/project/coordination.md +103 -0
- package/project/docs/background-tasks.md +258 -0
- package/project/docs/skills-frontmatter.md +243 -0
- package/project/examples/README.md +106 -0
- package/project/examples/api-route-template.ts +171 -0
- package/project/examples/component-template.tsx +110 -0
- package/project/examples/hook-template.ts +152 -0
- package/project/examples/service-template.ts +207 -0
- package/project/examples/test-template.test.tsx +249 -0
- package/project/hooks/README.md +491 -0
- package/project/hooks/__pycache__/notification.cpython-314.pyc +0 -0
- package/project/hooks/__pycache__/post_tool_use.cpython-314.pyc +0 -0
- package/project/hooks/__pycache__/pre_tool_use.cpython-314.pyc +0 -0
- package/project/hooks/__pycache__/session_start.cpython-314.pyc +0 -0
- package/project/hooks/__pycache__/stop.cpython-314.pyc +0 -0
- package/project/hooks/notification.py +183 -0
- package/project/hooks/permission_request.py +438 -0
- package/project/hooks/post_tool_use.py +397 -0
- package/project/hooks/pre_compact.py +126 -0
- package/project/hooks/pre_tool_use.py +454 -0
- package/project/hooks/session_start.py +656 -0
- package/project/hooks/stop.py +356 -0
- package/project/hooks/subagent_start.py +223 -0
- package/project/hooks/subagent_stop.py +215 -0
- package/project/hooks/user_prompt_submit.py +110 -0
- package/project/hooks/utils/llm/anth.py +114 -0
- package/project/hooks/utils/llm/oai.py +114 -0
- package/project/hooks/utils/tts/elevenlabs_tts.py +63 -0
- package/project/hooks/utils/tts/mlx_audio_tts.py +86 -0
- package/project/hooks/utils/tts/openai_tts.py +92 -0
- package/project/hooks/utils/tts/pyttsx3_tts.py +75 -0
- package/project/linear.yaml.template +23 -0
- package/project/product.md.template +238 -0
- package/project/retros/README.md +126 -0
- package/project/rules/README.md +90 -0
- package/project/rules/debugging.md +139 -0
- package/project/rules/security-review.md +115 -0
- package/project/settings.yaml.template +185 -0
- package/project/specs/SPEC-ANV-72-hud-kanban.md +525 -0
- package/project/templates/api-python/CLAUDE.md +547 -0
- package/project/templates/generic/CLAUDE.md +260 -0
- package/project/templates/saas/CLAUDE.md +478 -0
- package/project/tests/README.md +140 -0
- package/project/tests/__pycache__/test_transcript_parser.cpython-314-pytest-9.0.2.pyc +0 -0
- package/project/tests/fixtures/sample-transcript.jsonl +21 -0
- package/project/tests/test-hooks.sh +259 -0
- package/project/tests/test-lib.sh +248 -0
- package/project/tests/test-statusline.sh +165 -0
- package/project/tests/test_transcript_parser.py +323 -0
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
#
|
|
3
|
+
# Test Harness for Hook Scripts
|
|
4
|
+
#
|
|
5
|
+
# Tests Anvil hook scripts (pre_tool_use.py, session_start.py, etc.)
|
|
6
|
+
# Uses heredocs for JSON fixtures to avoid shell escaping issues.
|
|
7
|
+
#
|
|
8
|
+
# Usage:
|
|
9
|
+
# ./test-hooks.sh # Run all tests
|
|
10
|
+
# ./test-hooks.sh pre_tool_use # Test specific hook
|
|
11
|
+
# ./test-hooks.sh --verbose # Run with verbose output
|
|
12
|
+
#
|
|
13
|
+
|
|
14
|
+
set -e
|
|
15
|
+
|
|
16
|
+
# Load test library
|
|
17
|
+
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
18
|
+
source "$SCRIPT_DIR/test-lib.sh"
|
|
19
|
+
|
|
20
|
+
# Path to hooks
|
|
21
|
+
HOOKS_DIR="$PROJECT_ROOT/project/hooks"
|
|
22
|
+
|
|
23
|
+
# Check hooks directory exists
|
|
24
|
+
if [ ! -d "$HOOKS_DIR" ]; then
|
|
25
|
+
echo "Error: hooks directory not found at $HOOKS_DIR"
|
|
26
|
+
exit 1
|
|
27
|
+
fi
|
|
28
|
+
|
|
29
|
+
# ============================================================================
|
|
30
|
+
# JSON Fixtures for pre_tool_use.py
|
|
31
|
+
# ============================================================================
|
|
32
|
+
|
|
33
|
+
read -r -d '' FIX_SAFE_BASH << 'EOF' || true
|
|
34
|
+
{
|
|
35
|
+
"tool_name": "Bash",
|
|
36
|
+
"tool_input": {
|
|
37
|
+
"command": "ls -la"
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
EOF
|
|
41
|
+
|
|
42
|
+
read -r -d '' FIX_DANGEROUS_RM << 'EOF' || true
|
|
43
|
+
{
|
|
44
|
+
"tool_name": "Bash",
|
|
45
|
+
"tool_input": {
|
|
46
|
+
"command": "rm -rf /"
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
EOF
|
|
50
|
+
|
|
51
|
+
read -r -d '' FIX_DANGEROUS_RM_HOME << 'EOF' || true
|
|
52
|
+
{
|
|
53
|
+
"tool_name": "Bash",
|
|
54
|
+
"tool_input": {
|
|
55
|
+
"command": "rm -rf ~"
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
EOF
|
|
59
|
+
|
|
60
|
+
read -r -d '' FIX_ENV_READ << 'EOF' || true
|
|
61
|
+
{
|
|
62
|
+
"tool_name": "Read",
|
|
63
|
+
"tool_input": {
|
|
64
|
+
"file_path": "/project/.env"
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
EOF
|
|
68
|
+
|
|
69
|
+
read -r -d '' FIX_ENV_SAMPLE << 'EOF' || true
|
|
70
|
+
{
|
|
71
|
+
"tool_name": "Read",
|
|
72
|
+
"tool_input": {
|
|
73
|
+
"file_path": "/project/.env.sample"
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
EOF
|
|
77
|
+
|
|
78
|
+
read -r -d '' FIX_EDIT_FILE << 'EOF' || true
|
|
79
|
+
{
|
|
80
|
+
"tool_name": "Edit",
|
|
81
|
+
"tool_input": {
|
|
82
|
+
"file_path": "/project/src/app.ts",
|
|
83
|
+
"old_string": "foo",
|
|
84
|
+
"new_string": "bar"
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
EOF
|
|
88
|
+
|
|
89
|
+
read -r -d '' FIX_ASK_USER << 'EOF' || true
|
|
90
|
+
{
|
|
91
|
+
"tool_name": "AskUserQuestion",
|
|
92
|
+
"tool_input": {
|
|
93
|
+
"question": "Which option do you prefer?"
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
EOF
|
|
97
|
+
|
|
98
|
+
# ============================================================================
|
|
99
|
+
# JSON Fixtures for session_start.py
|
|
100
|
+
# ============================================================================
|
|
101
|
+
|
|
102
|
+
read -r -d '' FIX_SESSION_START << 'EOF' || true
|
|
103
|
+
{
|
|
104
|
+
"session_id": "test-session-12345",
|
|
105
|
+
"cwd": "/Users/test/project"
|
|
106
|
+
}
|
|
107
|
+
EOF
|
|
108
|
+
|
|
109
|
+
# ============================================================================
|
|
110
|
+
# Test Functions - pre_tool_use.py
|
|
111
|
+
# ============================================================================
|
|
112
|
+
|
|
113
|
+
test_pre_tool_use_safe_bash() {
|
|
114
|
+
local hook="$HOOKS_DIR/pre_tool_use.py"
|
|
115
|
+
[ -f "$hook" ] || { echo "Hook not found: $hook"; return 1; }
|
|
116
|
+
|
|
117
|
+
# Safe command should pass (exit 0)
|
|
118
|
+
local result
|
|
119
|
+
result=$(echo "$FIX_SAFE_BASH" | python3 "$hook" 2>&1) || true
|
|
120
|
+
local exit_code=$?
|
|
121
|
+
|
|
122
|
+
# Exit code 0 means allowed
|
|
123
|
+
[ "$exit_code" -eq 0 ] || return 1
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
test_pre_tool_use_blocks_rm_rf() {
|
|
127
|
+
local hook="$HOOKS_DIR/pre_tool_use.py"
|
|
128
|
+
[ -f "$hook" ] || { echo "Hook not found: $hook"; return 1; }
|
|
129
|
+
|
|
130
|
+
# Dangerous rm -rf should be blocked (exit 2)
|
|
131
|
+
set +e
|
|
132
|
+
echo "$FIX_DANGEROUS_RM" | python3 "$hook" 2>&1
|
|
133
|
+
local exit_code=$?
|
|
134
|
+
set -e
|
|
135
|
+
|
|
136
|
+
# Exit code 2 means blocked
|
|
137
|
+
[ "$exit_code" -eq 2 ] || { echo "Expected exit 2, got $exit_code"; return 1; }
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
test_pre_tool_use_blocks_rm_home() {
|
|
141
|
+
local hook="$HOOKS_DIR/pre_tool_use.py"
|
|
142
|
+
[ -f "$hook" ] || { echo "Hook not found: $hook"; return 1; }
|
|
143
|
+
|
|
144
|
+
# rm -rf ~ should be blocked
|
|
145
|
+
set +e
|
|
146
|
+
echo "$FIX_DANGEROUS_RM_HOME" | python3 "$hook" 2>&1
|
|
147
|
+
local exit_code=$?
|
|
148
|
+
set -e
|
|
149
|
+
|
|
150
|
+
[ "$exit_code" -eq 2 ] || { echo "Expected exit 2, got $exit_code"; return 1; }
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
test_pre_tool_use_blocks_env_read() {
|
|
154
|
+
local hook="$HOOKS_DIR/pre_tool_use.py"
|
|
155
|
+
[ -f "$hook" ] || { echo "Hook not found: $hook"; return 1; }
|
|
156
|
+
|
|
157
|
+
# Reading .env should be blocked
|
|
158
|
+
set +e
|
|
159
|
+
echo "$FIX_ENV_READ" | python3 "$hook" 2>&1
|
|
160
|
+
local exit_code=$?
|
|
161
|
+
set -e
|
|
162
|
+
|
|
163
|
+
[ "$exit_code" -eq 2 ] || { echo "Expected exit 2, got $exit_code"; return 1; }
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
test_pre_tool_use_allows_env_sample() {
|
|
167
|
+
local hook="$HOOKS_DIR/pre_tool_use.py"
|
|
168
|
+
[ -f "$hook" ] || { echo "Hook not found: $hook"; return 1; }
|
|
169
|
+
|
|
170
|
+
# Reading .env.sample should be allowed
|
|
171
|
+
local result
|
|
172
|
+
result=$(echo "$FIX_ENV_SAMPLE" | python3 "$hook" 2>&1) || true
|
|
173
|
+
local exit_code=$?
|
|
174
|
+
|
|
175
|
+
[ "$exit_code" -eq 0 ] || { echo "Expected exit 0, got $exit_code"; return 1; }
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
test_pre_tool_use_allows_edit() {
|
|
179
|
+
local hook="$HOOKS_DIR/pre_tool_use.py"
|
|
180
|
+
[ -f "$hook" ] || { echo "Hook not found: $hook"; return 1; }
|
|
181
|
+
|
|
182
|
+
# Normal file edit should be allowed
|
|
183
|
+
local result
|
|
184
|
+
result=$(echo "$FIX_EDIT_FILE" | python3 "$hook" 2>&1) || true
|
|
185
|
+
local exit_code=$?
|
|
186
|
+
|
|
187
|
+
[ "$exit_code" -eq 0 ] || { echo "Expected exit 0, got $exit_code"; return 1; }
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
test_pre_tool_use_performance() {
|
|
191
|
+
local hook="$HOOKS_DIR/pre_tool_use.py"
|
|
192
|
+
[ -f "$hook" ] || { echo "Hook not found: $hook"; return 1; }
|
|
193
|
+
|
|
194
|
+
local start end elapsed
|
|
195
|
+
start=$(python3 -c "import time; print(int(time.time() * 1000))")
|
|
196
|
+
echo "$FIX_SAFE_BASH" | python3 "$hook" > /dev/null 2>&1
|
|
197
|
+
end=$(python3 -c "import time; print(int(time.time() * 1000))")
|
|
198
|
+
|
|
199
|
+
elapsed=$((end - start))
|
|
200
|
+
|
|
201
|
+
# Should complete in under 100ms
|
|
202
|
+
if [ "$elapsed" -lt 100 ]; then
|
|
203
|
+
return 0
|
|
204
|
+
else
|
|
205
|
+
echo " Performance: ${elapsed}ms (target: <100ms)"
|
|
206
|
+
return 1
|
|
207
|
+
fi
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
# ============================================================================
|
|
211
|
+
# Test Functions - session_start.py
|
|
212
|
+
# ============================================================================
|
|
213
|
+
|
|
214
|
+
test_session_start_runs() {
|
|
215
|
+
local hook="$HOOKS_DIR/session_start.py"
|
|
216
|
+
[ -f "$hook" ] || { echo "Hook not found: $hook"; return 1; }
|
|
217
|
+
|
|
218
|
+
# Should run without error
|
|
219
|
+
local result
|
|
220
|
+
result=$(echo "$FIX_SESSION_START" | python3 "$hook" 2>&1) || true
|
|
221
|
+
local exit_code=$?
|
|
222
|
+
|
|
223
|
+
# Exit code 0 means success
|
|
224
|
+
[ "$exit_code" -eq 0 ] || { echo "Expected exit 0, got $exit_code"; return 1; }
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
# ============================================================================
|
|
228
|
+
# Run Tests
|
|
229
|
+
# ============================================================================
|
|
230
|
+
|
|
231
|
+
echo "Hook Test Harness"
|
|
232
|
+
echo "================="
|
|
233
|
+
echo "Hooks Dir: $HOOKS_DIR"
|
|
234
|
+
echo ""
|
|
235
|
+
|
|
236
|
+
# Parse arguments
|
|
237
|
+
HOOK_FILTER="${1:-}"
|
|
238
|
+
|
|
239
|
+
if [ -z "$HOOK_FILTER" ] || [ "$HOOK_FILTER" = "--verbose" ] || [ "$HOOK_FILTER" = "pre_tool_use" ]; then
|
|
240
|
+
echo "Testing: pre_tool_use.py"
|
|
241
|
+
echo "------------------------"
|
|
242
|
+
run_test "safe_bash_allowed" test_pre_tool_use_safe_bash
|
|
243
|
+
run_test "blocks_rm_rf_root" test_pre_tool_use_blocks_rm_rf
|
|
244
|
+
run_test "blocks_rm_rf_home" test_pre_tool_use_blocks_rm_home
|
|
245
|
+
run_test "blocks_env_read" test_pre_tool_use_blocks_env_read
|
|
246
|
+
run_test "allows_env_sample" test_pre_tool_use_allows_env_sample
|
|
247
|
+
run_test "allows_normal_edit" test_pre_tool_use_allows_edit
|
|
248
|
+
run_test "performance" test_pre_tool_use_performance
|
|
249
|
+
echo ""
|
|
250
|
+
fi
|
|
251
|
+
|
|
252
|
+
if [ -z "$HOOK_FILTER" ] || [ "$HOOK_FILTER" = "--verbose" ] || [ "$HOOK_FILTER" = "session_start" ]; then
|
|
253
|
+
echo "Testing: session_start.py"
|
|
254
|
+
echo "-------------------------"
|
|
255
|
+
run_test "session_start_runs" test_session_start_runs
|
|
256
|
+
echo ""
|
|
257
|
+
fi
|
|
258
|
+
|
|
259
|
+
print_summary
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
#
|
|
3
|
+
# Anvil Test Library - Shared utilities for test harnesses
|
|
4
|
+
#
|
|
5
|
+
# Usage: source this file in test scripts
|
|
6
|
+
# source "$(dirname "$0")/test-lib.sh"
|
|
7
|
+
#
|
|
8
|
+
# Provides:
|
|
9
|
+
# - run_test: Execute a test with pass/fail output
|
|
10
|
+
# - assert_contains: Check if output contains expected pattern
|
|
11
|
+
# - assert_equals: Check if values are equal
|
|
12
|
+
# - assert_exit_code: Check command exit code
|
|
13
|
+
# - setup_test_env: Create isolated test environment
|
|
14
|
+
# - cleanup_test_env: Clean up test environment
|
|
15
|
+
#
|
|
16
|
+
|
|
17
|
+
set -e
|
|
18
|
+
|
|
19
|
+
# Colors for output
|
|
20
|
+
RED='\033[0;31m'
|
|
21
|
+
GREEN='\033[0;32m'
|
|
22
|
+
YELLOW='\033[1;33m'
|
|
23
|
+
NC='\033[0m' # No Color
|
|
24
|
+
|
|
25
|
+
# Test counters
|
|
26
|
+
TESTS_RUN=0
|
|
27
|
+
TESTS_PASSED=0
|
|
28
|
+
TESTS_FAILED=0
|
|
29
|
+
|
|
30
|
+
# Get script directory for relative paths
|
|
31
|
+
TEST_LIB_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
32
|
+
PROJECT_ROOT="$(cd "$TEST_LIB_DIR/../.." && pwd)"
|
|
33
|
+
|
|
34
|
+
#
|
|
35
|
+
# run_test - Execute a test function with pass/fail reporting
|
|
36
|
+
#
|
|
37
|
+
# Arguments:
|
|
38
|
+
# $1 - Test name (displayed in output)
|
|
39
|
+
# $2 - Test function or command to run
|
|
40
|
+
#
|
|
41
|
+
# Example:
|
|
42
|
+
# run_test "basic_json_input" test_basic_json
|
|
43
|
+
#
|
|
44
|
+
run_test() {
|
|
45
|
+
local name="$1"
|
|
46
|
+
local test_fn="$2"
|
|
47
|
+
|
|
48
|
+
TESTS_RUN=$((TESTS_RUN + 1))
|
|
49
|
+
|
|
50
|
+
# Run test and capture result
|
|
51
|
+
if eval "$test_fn" 2>&1; then
|
|
52
|
+
echo -e "${GREEN}✅ $name${NC}"
|
|
53
|
+
TESTS_PASSED=$((TESTS_PASSED + 1))
|
|
54
|
+
return 0
|
|
55
|
+
else
|
|
56
|
+
echo -e "${RED}❌ $name${NC}"
|
|
57
|
+
TESTS_FAILED=$((TESTS_FAILED + 1))
|
|
58
|
+
return 1
|
|
59
|
+
fi
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
#
|
|
63
|
+
# assert_contains - Check if output contains expected pattern
|
|
64
|
+
#
|
|
65
|
+
# Arguments:
|
|
66
|
+
# $1 - Actual output
|
|
67
|
+
# $2 - Expected pattern (grep regex, use -E for extended regex)
|
|
68
|
+
# $3 - (optional) Failure message
|
|
69
|
+
#
|
|
70
|
+
# Returns: 0 if contains, 1 if not
|
|
71
|
+
#
|
|
72
|
+
# Note: For extended regex, pass -E as first arg:
|
|
73
|
+
# assert_contains "$output" "pattern" "message" # Basic regex
|
|
74
|
+
# assert_contains "$output" "pat|tern" "message" # Extended (auto-detected)
|
|
75
|
+
#
|
|
76
|
+
assert_contains() {
|
|
77
|
+
local actual="$1"
|
|
78
|
+
local expected="$2"
|
|
79
|
+
local message="${3:-Expected pattern not found}"
|
|
80
|
+
|
|
81
|
+
# Use extended regex if pattern contains | or ()
|
|
82
|
+
local grep_flags="-q"
|
|
83
|
+
if echo "$expected" | grep -qE '\||\(|\)|\+|\?'; then
|
|
84
|
+
grep_flags="-qE"
|
|
85
|
+
fi
|
|
86
|
+
|
|
87
|
+
if echo "$actual" | grep $grep_flags "$expected"; then
|
|
88
|
+
return 0
|
|
89
|
+
else
|
|
90
|
+
echo -e " ${YELLOW}$message${NC}"
|
|
91
|
+
echo " Expected pattern: $expected"
|
|
92
|
+
echo " Actual output: $actual"
|
|
93
|
+
return 1
|
|
94
|
+
fi
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
#
|
|
98
|
+
# assert_not_contains - Check if output does NOT contain pattern
|
|
99
|
+
#
|
|
100
|
+
# Arguments:
|
|
101
|
+
# $1 - Actual output
|
|
102
|
+
# $2 - Pattern that should NOT be present
|
|
103
|
+
# $3 - (optional) Failure message
|
|
104
|
+
#
|
|
105
|
+
assert_not_contains() {
|
|
106
|
+
local actual="$1"
|
|
107
|
+
local pattern="$2"
|
|
108
|
+
local message="${3:-Unexpected pattern found}"
|
|
109
|
+
|
|
110
|
+
if echo "$actual" | grep -q "$pattern"; then
|
|
111
|
+
echo -e " ${YELLOW}$message${NC}"
|
|
112
|
+
echo " Unexpected pattern: $pattern"
|
|
113
|
+
echo " Actual output: $actual"
|
|
114
|
+
return 1
|
|
115
|
+
else
|
|
116
|
+
return 0
|
|
117
|
+
fi
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
#
|
|
121
|
+
# assert_equals - Check if two values are equal
|
|
122
|
+
#
|
|
123
|
+
# Arguments:
|
|
124
|
+
# $1 - Actual value
|
|
125
|
+
# $2 - Expected value
|
|
126
|
+
# $3 - (optional) Failure message
|
|
127
|
+
#
|
|
128
|
+
assert_equals() {
|
|
129
|
+
local actual="$1"
|
|
130
|
+
local expected="$2"
|
|
131
|
+
local message="${3:-Values not equal}"
|
|
132
|
+
|
|
133
|
+
if [ "$actual" = "$expected" ]; then
|
|
134
|
+
return 0
|
|
135
|
+
else
|
|
136
|
+
echo -e " ${YELLOW}$message${NC}"
|
|
137
|
+
echo " Expected: $expected"
|
|
138
|
+
echo " Actual: $actual"
|
|
139
|
+
return 1
|
|
140
|
+
fi
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
#
|
|
144
|
+
# assert_exit_code - Check command exit code
|
|
145
|
+
#
|
|
146
|
+
# Arguments:
|
|
147
|
+
# $1 - Expected exit code
|
|
148
|
+
# $2... - Command to run
|
|
149
|
+
#
|
|
150
|
+
# Example:
|
|
151
|
+
# assert_exit_code 0 echo "hello"
|
|
152
|
+
# assert_exit_code 2 python3 script.py # Expect exit 2
|
|
153
|
+
#
|
|
154
|
+
assert_exit_code() {
|
|
155
|
+
local expected="$1"
|
|
156
|
+
shift
|
|
157
|
+
local cmd="$@"
|
|
158
|
+
|
|
159
|
+
set +e
|
|
160
|
+
eval "$cmd" >/dev/null 2>&1
|
|
161
|
+
local actual=$?
|
|
162
|
+
set -e
|
|
163
|
+
|
|
164
|
+
if [ "$actual" -eq "$expected" ]; then
|
|
165
|
+
return 0
|
|
166
|
+
else
|
|
167
|
+
echo -e " ${YELLOW}Wrong exit code${NC}"
|
|
168
|
+
echo " Expected: $expected"
|
|
169
|
+
echo " Actual: $actual"
|
|
170
|
+
echo " Command: $cmd"
|
|
171
|
+
return 1
|
|
172
|
+
fi
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
#
|
|
176
|
+
# setup_test_env - Create isolated test environment
|
|
177
|
+
#
|
|
178
|
+
# Creates a temporary directory and sets up mock environment variables
|
|
179
|
+
#
|
|
180
|
+
# Returns: Path to temp directory (also sets TEST_TEMP_DIR)
|
|
181
|
+
#
|
|
182
|
+
setup_test_env() {
|
|
183
|
+
TEST_TEMP_DIR=$(mktemp -d)
|
|
184
|
+
|
|
185
|
+
# Mock environment variables
|
|
186
|
+
export CLAUDE_SESSION_ID="test-session-$(date +%s)"
|
|
187
|
+
export CLAUDE_MODEL="claude-opus-4-5-20251101"
|
|
188
|
+
|
|
189
|
+
# Create mock directory structure
|
|
190
|
+
mkdir -p "$TEST_TEMP_DIR/logs"
|
|
191
|
+
mkdir -p "$TEST_TEMP_DIR/.claude"
|
|
192
|
+
|
|
193
|
+
echo "$TEST_TEMP_DIR"
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
#
|
|
197
|
+
# cleanup_test_env - Clean up test environment
|
|
198
|
+
#
|
|
199
|
+
cleanup_test_env() {
|
|
200
|
+
if [ -n "$TEST_TEMP_DIR" ] && [ -d "$TEST_TEMP_DIR" ]; then
|
|
201
|
+
rm -rf "$TEST_TEMP_DIR"
|
|
202
|
+
fi
|
|
203
|
+
unset TEST_TEMP_DIR
|
|
204
|
+
unset CLAUDE_SESSION_ID
|
|
205
|
+
unset CLAUDE_MODEL
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
#
|
|
209
|
+
# print_summary - Print test run summary
|
|
210
|
+
#
|
|
211
|
+
print_summary() {
|
|
212
|
+
echo ""
|
|
213
|
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
214
|
+
echo "Test Summary"
|
|
215
|
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
216
|
+
echo "Total: $TESTS_RUN"
|
|
217
|
+
echo -e "Passed: ${GREEN}$TESTS_PASSED${NC}"
|
|
218
|
+
if [ "$TESTS_FAILED" -gt 0 ]; then
|
|
219
|
+
echo -e "Failed: ${RED}$TESTS_FAILED${NC}"
|
|
220
|
+
else
|
|
221
|
+
echo "Failed: $TESTS_FAILED"
|
|
222
|
+
fi
|
|
223
|
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
224
|
+
|
|
225
|
+
if [ "$TESTS_FAILED" -gt 0 ]; then
|
|
226
|
+
return 1
|
|
227
|
+
fi
|
|
228
|
+
return 0
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
#
|
|
232
|
+
# json_fixture - Create JSON fixture using heredoc (avoids escaping issues)
|
|
233
|
+
#
|
|
234
|
+
# Usage:
|
|
235
|
+
# json_fixture FIXTURE_NAME << 'EOF'
|
|
236
|
+
# {"key": "value"}
|
|
237
|
+
# EOF
|
|
238
|
+
#
|
|
239
|
+
json_fixture() {
|
|
240
|
+
local name="$1"
|
|
241
|
+
eval "read -r -d '' $name" || true
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
# Export functions for use in subshells
|
|
245
|
+
export -f run_test assert_contains assert_not_contains assert_equals assert_exit_code
|
|
246
|
+
export -f setup_test_env cleanup_test_env print_summary json_fixture
|
|
247
|
+
export TESTS_RUN TESTS_PASSED TESTS_FAILED
|
|
248
|
+
export TEST_LIB_DIR PROJECT_ROOT
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
#
|
|
3
|
+
# Test Harness for statusline.sh
|
|
4
|
+
#
|
|
5
|
+
# Tests the Anvil statusline script with various JSON inputs.
|
|
6
|
+
# Uses heredocs for JSON fixtures to avoid shell escaping issues.
|
|
7
|
+
#
|
|
8
|
+
# Usage:
|
|
9
|
+
# ./test-statusline.sh # Run all tests
|
|
10
|
+
# ./test-statusline.sh --verbose # Run with verbose output
|
|
11
|
+
#
|
|
12
|
+
|
|
13
|
+
set -e
|
|
14
|
+
|
|
15
|
+
# Load test library
|
|
16
|
+
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
|
17
|
+
source "$SCRIPT_DIR/test-lib.sh"
|
|
18
|
+
|
|
19
|
+
# Path to statusline script
|
|
20
|
+
STATUSLINE="$PROJECT_ROOT/global/hooks/statusline.sh"
|
|
21
|
+
|
|
22
|
+
# Check statusline exists
|
|
23
|
+
if [ ! -f "$STATUSLINE" ]; then
|
|
24
|
+
echo "Error: statusline.sh not found at $STATUSLINE"
|
|
25
|
+
exit 1
|
|
26
|
+
fi
|
|
27
|
+
|
|
28
|
+
# ============================================================================
|
|
29
|
+
# JSON Fixtures (heredocs avoid escaping issues)
|
|
30
|
+
# ============================================================================
|
|
31
|
+
|
|
32
|
+
read -r -d '' FIXTURE_BASIC << 'EOF' || true
|
|
33
|
+
{
|
|
34
|
+
"cwd": "/Users/test/project",
|
|
35
|
+
"model": {
|
|
36
|
+
"display_name": "Claude Opus 4.5"
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
EOF
|
|
40
|
+
|
|
41
|
+
read -r -d '' FIXTURE_WITH_MESSAGE << 'EOF' || true
|
|
42
|
+
{
|
|
43
|
+
"cwd": "/Users/test/project",
|
|
44
|
+
"status_message": "Building feature",
|
|
45
|
+
"model": {
|
|
46
|
+
"display_name": "Claude 3.5 Sonnet"
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
EOF
|
|
50
|
+
|
|
51
|
+
read -r -d '' FIXTURE_HAIKU << 'EOF' || true
|
|
52
|
+
{
|
|
53
|
+
"cwd": "/Users/test/project",
|
|
54
|
+
"model": {
|
|
55
|
+
"display_name": "Claude 3 Haiku"
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
EOF
|
|
59
|
+
|
|
60
|
+
read -r -d '' FIXTURE_EMPTY << 'EOF' || true
|
|
61
|
+
{}
|
|
62
|
+
EOF
|
|
63
|
+
|
|
64
|
+
read -r -d '' FIXTURE_NO_MODEL << 'EOF' || true
|
|
65
|
+
{
|
|
66
|
+
"cwd": "/Users/test/project",
|
|
67
|
+
"conversation": {}
|
|
68
|
+
}
|
|
69
|
+
EOF
|
|
70
|
+
|
|
71
|
+
# ============================================================================
|
|
72
|
+
# Test Functions
|
|
73
|
+
# ============================================================================
|
|
74
|
+
|
|
75
|
+
test_basic_output() {
|
|
76
|
+
local output
|
|
77
|
+
output=$(echo "$FIXTURE_BASIC" | "$STATUSLINE" 2>&1)
|
|
78
|
+
|
|
79
|
+
# Should produce some output
|
|
80
|
+
[ -n "$output" ] || return 1
|
|
81
|
+
|
|
82
|
+
# Should contain model indicator (Opus or Claude)
|
|
83
|
+
# Pattern with | is auto-detected as extended regex
|
|
84
|
+
assert_contains "$output" "Opus|opus|Claude|claude" "Should contain model name"
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
test_with_status_message() {
|
|
88
|
+
local output
|
|
89
|
+
output=$(echo "$FIXTURE_WITH_MESSAGE" | "$STATUSLINE" 2>&1)
|
|
90
|
+
|
|
91
|
+
# Should contain Sonnet model
|
|
92
|
+
assert_contains "$output" "Sonnet|sonnet" "Should contain Sonnet model"
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
test_haiku_model() {
|
|
96
|
+
local output
|
|
97
|
+
output=$(echo "$FIXTURE_HAIKU" | "$STATUSLINE" 2>&1)
|
|
98
|
+
|
|
99
|
+
# Should contain Haiku model
|
|
100
|
+
assert_contains "$output" "Haiku|haiku" "Should contain Haiku model"
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
test_empty_input() {
|
|
104
|
+
local output
|
|
105
|
+
# Should not crash on empty input
|
|
106
|
+
output=$(echo "$FIXTURE_EMPTY" | "$STATUSLINE" 2>&1) || true
|
|
107
|
+
|
|
108
|
+
# Should produce some output (even if minimal)
|
|
109
|
+
[ -n "$output" ] || return 0 # Empty output is acceptable for empty input
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
test_no_model() {
|
|
113
|
+
local output
|
|
114
|
+
# Should handle missing model gracefully
|
|
115
|
+
output=$(echo "$FIXTURE_NO_MODEL" | "$STATUSLINE" 2>&1) || true
|
|
116
|
+
|
|
117
|
+
# Should not crash
|
|
118
|
+
return 0
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
test_invalid_json() {
|
|
122
|
+
local output
|
|
123
|
+
# Should handle invalid JSON gracefully
|
|
124
|
+
output=$(echo "not valid json" | "$STATUSLINE" 2>&1) || true
|
|
125
|
+
|
|
126
|
+
# Should not crash - exit code 0 or non-empty fallback
|
|
127
|
+
return 0
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
test_performance() {
|
|
131
|
+
local start end elapsed
|
|
132
|
+
|
|
133
|
+
start=$(python3 -c "import time; print(int(time.time() * 1000))")
|
|
134
|
+
echo "$FIXTURE_BASIC" | "$STATUSLINE" > /dev/null 2>&1
|
|
135
|
+
end=$(python3 -c "import time; print(int(time.time() * 1000))")
|
|
136
|
+
|
|
137
|
+
elapsed=$((end - start))
|
|
138
|
+
|
|
139
|
+
# Should complete in under 200ms
|
|
140
|
+
if [ "$elapsed" -lt 200 ]; then
|
|
141
|
+
return 0
|
|
142
|
+
else
|
|
143
|
+
echo " Performance: ${elapsed}ms (target: <200ms)"
|
|
144
|
+
return 1
|
|
145
|
+
fi
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
# ============================================================================
|
|
149
|
+
# Run Tests
|
|
150
|
+
# ============================================================================
|
|
151
|
+
|
|
152
|
+
echo "Statusline Test Harness"
|
|
153
|
+
echo "======================="
|
|
154
|
+
echo "Script: $STATUSLINE"
|
|
155
|
+
echo ""
|
|
156
|
+
|
|
157
|
+
run_test "basic_output" test_basic_output
|
|
158
|
+
run_test "with_status_message" test_with_status_message
|
|
159
|
+
run_test "haiku_model" test_haiku_model
|
|
160
|
+
run_test "empty_input" test_empty_input
|
|
161
|
+
run_test "no_model" test_no_model
|
|
162
|
+
run_test "invalid_json" test_invalid_json
|
|
163
|
+
run_test "performance" test_performance
|
|
164
|
+
|
|
165
|
+
print_summary
|