@sandrinio/vbounce 1.5.0 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +108 -18
- package/bin/vbounce.mjs +291 -146
- package/brains/AGENTS.md +12 -10
- package/brains/CHANGELOG.md +99 -1
- package/brains/CLAUDE.md +29 -22
- package/brains/GEMINI.md +47 -9
- package/brains/SETUP.md +11 -5
- package/brains/claude-agents/architect.md +22 -6
- package/brains/claude-agents/developer.md +2 -2
- package/brains/claude-agents/devops.md +3 -0
- package/brains/claude-agents/qa.md +25 -9
- package/brains/copilot/copilot-instructions.md +49 -0
- package/brains/cursor-rules/vbounce-process.mdc +9 -7
- package/brains/windsurf/.windsurfrules +30 -0
- package/package.json +2 -4
- package/scripts/close_sprint.mjs +94 -0
- package/scripts/complete_story.mjs +113 -0
- package/scripts/doctor.mjs +144 -0
- package/scripts/init_gate_config.sh +151 -0
- package/scripts/init_sprint.mjs +121 -0
- package/scripts/pre_gate_common.sh +576 -0
- package/scripts/pre_gate_runner.sh +176 -0
- package/scripts/prep_arch_context.mjs +178 -0
- package/scripts/prep_qa_context.mjs +134 -0
- package/scripts/prep_sprint_context.mjs +118 -0
- package/scripts/prep_sprint_summary.mjs +154 -0
- package/scripts/sprint_trends.mjs +160 -0
- package/scripts/suggest_improvements.mjs +200 -0
- package/scripts/update_state.mjs +132 -0
- package/scripts/validate_bounce_readiness.mjs +125 -0
- package/scripts/validate_report.mjs +39 -2
- package/scripts/validate_sprint_plan.mjs +117 -0
- package/scripts/validate_state.mjs +99 -0
- package/skills/agent-team/SKILL.md +56 -21
- package/skills/agent-team/references/cleanup.md +42 -0
- package/skills/agent-team/references/delivery-sync.md +43 -0
- package/skills/agent-team/references/git-strategy.md +52 -0
- package/skills/agent-team/references/mid-sprint-triage.md +71 -0
- package/skills/agent-team/references/report-naming.md +34 -0
- package/skills/doc-manager/SKILL.md +5 -4
- package/skills/improve/SKILL.md +27 -1
- package/skills/lesson/SKILL.md +23 -0
- package/templates/delivery_plan.md +1 -1
- package/templates/hotfix.md +1 -1
- package/templates/sprint.md +65 -13
- package/templates/sprint_report.md +8 -1
- package/templates/story.md +1 -1
- package/scripts/pre_bounce_sync.sh +0 -37
- package/scripts/vbounce_ask.mjs +0 -98
- package/scripts/vbounce_index.mjs +0 -184
|
@@ -0,0 +1,576 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# pre_gate_common.sh — Shared gate check functions for V-Bounce OS
|
|
3
|
+
# Sourced by pre_gate_runner.sh. Never run directly.
|
|
4
|
+
|
|
5
|
+
set -euo pipefail
|
|
6
|
+
|
|
7
|
+
# ── Colors & formatting ──────────────────────────────────────────────
|
|
8
|
+
RED='\033[0;31m'
|
|
9
|
+
GREEN='\033[0;32m'
|
|
10
|
+
YELLOW='\033[1;33m'
|
|
11
|
+
CYAN='\033[0;36m'
|
|
12
|
+
NC='\033[0m'
|
|
13
|
+
|
|
14
|
+
PASS_COUNT=0
|
|
15
|
+
FAIL_COUNT=0
|
|
16
|
+
SKIP_COUNT=0
|
|
17
|
+
RESULTS=""
|
|
18
|
+
|
|
19
|
+
record_result() {
|
|
20
|
+
local id="$1" status="$2" detail="$3"
|
|
21
|
+
case "$status" in
|
|
22
|
+
PASS) PASS_COUNT=$((PASS_COUNT + 1)); RESULTS+="| ${id} | ${GREEN}PASS${NC} | ${detail} |"$'\n' ;;
|
|
23
|
+
FAIL) FAIL_COUNT=$((FAIL_COUNT + 1)); RESULTS+="| ${id} | ${RED}FAIL${NC} | ${detail} |"$'\n' ;;
|
|
24
|
+
SKIP) SKIP_COUNT=$((SKIP_COUNT + 1)); RESULTS+="| ${id} | ${YELLOW}SKIP${NC} | ${detail} |"$'\n' ;;
|
|
25
|
+
esac
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
record_result_plain() {
|
|
29
|
+
local id="$1" status="$2" detail="$3"
|
|
30
|
+
PLAIN_RESULTS+="| ${id} | ${status} | ${detail} |"$'\n'
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
print_summary() {
|
|
34
|
+
echo ""
|
|
35
|
+
echo -e "${CYAN}── Gate Check Results ──${NC}"
|
|
36
|
+
echo "| Check | Status | Detail |"
|
|
37
|
+
echo "|-------|--------|--------|"
|
|
38
|
+
echo -e "$RESULTS"
|
|
39
|
+
echo ""
|
|
40
|
+
echo -e "PASS: ${GREEN}${PASS_COUNT}${NC} FAIL: ${RED}${FAIL_COUNT}${NC} SKIP: ${YELLOW}${SKIP_COUNT}${NC}"
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
write_report() {
|
|
44
|
+
local output_path="$1"
|
|
45
|
+
mkdir -p "$(dirname "$output_path")"
|
|
46
|
+
{
|
|
47
|
+
echo "# Pre-Gate Scan Results"
|
|
48
|
+
echo "Date: $(date -u '+%Y-%m-%d %H:%M UTC')"
|
|
49
|
+
echo "Target: ${WORKTREE_PATH}"
|
|
50
|
+
echo "Gate: ${GATE_TYPE}"
|
|
51
|
+
echo ""
|
|
52
|
+
echo "| Check | Status | Detail |"
|
|
53
|
+
echo "|-------|--------|--------|"
|
|
54
|
+
echo -e "$PLAIN_RESULTS"
|
|
55
|
+
echo ""
|
|
56
|
+
echo "PASS: ${PASS_COUNT} FAIL: ${FAIL_COUNT} SKIP: ${SKIP_COUNT}"
|
|
57
|
+
} > "$output_path"
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
# ── Stack detection helpers ──────────────────────────────────────────
|
|
61
|
+
|
|
62
|
+
detect_test_cmd() {
|
|
63
|
+
local dir="$1"
|
|
64
|
+
if [[ -f "${dir}/package.json" ]]; then
|
|
65
|
+
local test_script
|
|
66
|
+
test_script=$(node -e "try{const p=require('${dir}/package.json');console.log(p.scripts&&p.scripts.test||'')}catch(e){}" 2>/dev/null || echo "")
|
|
67
|
+
if [[ -n "$test_script" && "$test_script" != "echo \"Error: no test specified\" && exit 1" ]]; then
|
|
68
|
+
echo "npm test"
|
|
69
|
+
return
|
|
70
|
+
fi
|
|
71
|
+
fi
|
|
72
|
+
if [[ -f "${dir}/pytest.ini" || -f "${dir}/pyproject.toml" || -f "${dir}/setup.cfg" ]]; then
|
|
73
|
+
echo "pytest"
|
|
74
|
+
return
|
|
75
|
+
fi
|
|
76
|
+
if [[ -f "${dir}/Cargo.toml" ]]; then
|
|
77
|
+
echo "cargo test"
|
|
78
|
+
return
|
|
79
|
+
fi
|
|
80
|
+
if [[ -f "${dir}/go.mod" ]]; then
|
|
81
|
+
echo "go test ./..."
|
|
82
|
+
return
|
|
83
|
+
fi
|
|
84
|
+
echo ""
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
detect_build_cmd() {
|
|
88
|
+
local dir="$1"
|
|
89
|
+
if [[ -f "${dir}/package.json" ]]; then
|
|
90
|
+
local build_script
|
|
91
|
+
build_script=$(node -e "try{const p=require('${dir}/package.json');console.log(p.scripts&&p.scripts.build||'')}catch(e){}" 2>/dev/null || echo "")
|
|
92
|
+
if [[ -n "$build_script" ]]; then
|
|
93
|
+
echo "npm run build"
|
|
94
|
+
return
|
|
95
|
+
fi
|
|
96
|
+
fi
|
|
97
|
+
if [[ -f "${dir}/Cargo.toml" ]]; then
|
|
98
|
+
echo "cargo build"
|
|
99
|
+
return
|
|
100
|
+
fi
|
|
101
|
+
if [[ -f "${dir}/go.mod" ]]; then
|
|
102
|
+
echo "go build ./..."
|
|
103
|
+
return
|
|
104
|
+
fi
|
|
105
|
+
echo ""
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
detect_lint_cmd() {
|
|
109
|
+
local dir="$1"
|
|
110
|
+
if [[ -f "${dir}/package.json" ]]; then
|
|
111
|
+
local lint_script
|
|
112
|
+
lint_script=$(node -e "try{const p=require('${dir}/package.json');console.log(p.scripts&&p.scripts.lint||'')}catch(e){}" 2>/dev/null || echo "")
|
|
113
|
+
if [[ -n "$lint_script" ]]; then
|
|
114
|
+
echo "npm run lint"
|
|
115
|
+
return
|
|
116
|
+
fi
|
|
117
|
+
fi
|
|
118
|
+
if command -v ruff &>/dev/null && [[ -f "${dir}/pyproject.toml" ]]; then
|
|
119
|
+
echo "ruff check ."
|
|
120
|
+
return
|
|
121
|
+
fi
|
|
122
|
+
if [[ -f "${dir}/Cargo.toml" ]]; then
|
|
123
|
+
echo "cargo clippy"
|
|
124
|
+
return
|
|
125
|
+
fi
|
|
126
|
+
echo ""
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
detect_source_glob() {
|
|
130
|
+
local dir="$1"
|
|
131
|
+
if [[ -f "${dir}/tsconfig.json" ]]; then
|
|
132
|
+
echo "*.{ts,tsx}"
|
|
133
|
+
return
|
|
134
|
+
fi
|
|
135
|
+
if [[ -f "${dir}/package.json" ]]; then
|
|
136
|
+
echo "*.{js,jsx}"
|
|
137
|
+
return
|
|
138
|
+
fi
|
|
139
|
+
if [[ -f "${dir}/pyproject.toml" || -f "${dir}/setup.py" || -f "${dir}/setup.cfg" ]]; then
|
|
140
|
+
echo "*.py"
|
|
141
|
+
return
|
|
142
|
+
fi
|
|
143
|
+
if [[ -f "${dir}/Cargo.toml" ]]; then
|
|
144
|
+
echo "*.rs"
|
|
145
|
+
return
|
|
146
|
+
fi
|
|
147
|
+
if [[ -f "${dir}/go.mod" ]]; then
|
|
148
|
+
echo "*.go"
|
|
149
|
+
return
|
|
150
|
+
fi
|
|
151
|
+
echo "*"
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
detect_dep_file() {
|
|
155
|
+
local dir="$1"
|
|
156
|
+
if [[ -f "${dir}/package-lock.json" ]]; then echo "package.json"; return; fi
|
|
157
|
+
if [[ -f "${dir}/yarn.lock" ]]; then echo "package.json"; return; fi
|
|
158
|
+
if [[ -f "${dir}/pnpm-lock.yaml" ]]; then echo "package.json"; return; fi
|
|
159
|
+
if [[ -f "${dir}/requirements.txt" ]]; then echo "requirements.txt"; return; fi
|
|
160
|
+
if [[ -f "${dir}/Pipfile.lock" ]]; then echo "Pipfile"; return; fi
|
|
161
|
+
if [[ -f "${dir}/pyproject.toml" ]]; then echo "pyproject.toml"; return; fi
|
|
162
|
+
if [[ -f "${dir}/Cargo.lock" ]]; then echo "Cargo.toml"; return; fi
|
|
163
|
+
if [[ -f "${dir}/go.sum" ]]; then echo "go.mod"; return; fi
|
|
164
|
+
echo ""
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
detect_test_pattern() {
|
|
168
|
+
local dir="$1"
|
|
169
|
+
if [[ -f "${dir}/tsconfig.json" || -f "${dir}/package.json" ]]; then
|
|
170
|
+
echo '\.test\.\|\.spec\.\|__tests__'
|
|
171
|
+
return
|
|
172
|
+
fi
|
|
173
|
+
if [[ -f "${dir}/pyproject.toml" || -f "${dir}/setup.py" ]]; then
|
|
174
|
+
echo 'test_\|_test\.py'
|
|
175
|
+
return
|
|
176
|
+
fi
|
|
177
|
+
if [[ -f "${dir}/Cargo.toml" ]]; then
|
|
178
|
+
echo '_test\.rs\|tests/'
|
|
179
|
+
return
|
|
180
|
+
fi
|
|
181
|
+
echo '\.test\.\|\.spec\.\|test_'
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
detect_doc_comment_pattern() {
|
|
185
|
+
local dir="$1"
|
|
186
|
+
if [[ -f "${dir}/tsconfig.json" || -f "${dir}/package.json" ]]; then
|
|
187
|
+
echo '/\*\*'
|
|
188
|
+
return
|
|
189
|
+
fi
|
|
190
|
+
if [[ -f "${dir}/pyproject.toml" || -f "${dir}/setup.py" ]]; then
|
|
191
|
+
echo '"""'
|
|
192
|
+
return
|
|
193
|
+
fi
|
|
194
|
+
if [[ -f "${dir}/Cargo.toml" ]]; then
|
|
195
|
+
echo '///'
|
|
196
|
+
return
|
|
197
|
+
fi
|
|
198
|
+
echo '/\*\*\|"""\|///'
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
detect_export_pattern() {
|
|
202
|
+
local dir="$1"
|
|
203
|
+
if [[ -f "${dir}/tsconfig.json" || -f "${dir}/package.json" ]]; then
|
|
204
|
+
echo 'export '
|
|
205
|
+
return
|
|
206
|
+
fi
|
|
207
|
+
if [[ -f "${dir}/pyproject.toml" || -f "${dir}/setup.py" ]]; then
|
|
208
|
+
echo '^def \|^class '
|
|
209
|
+
return
|
|
210
|
+
fi
|
|
211
|
+
if [[ -f "${dir}/Cargo.toml" ]]; then
|
|
212
|
+
echo '^pub '
|
|
213
|
+
return
|
|
214
|
+
fi
|
|
215
|
+
if [[ -f "${dir}/go.mod" ]]; then
|
|
216
|
+
echo '^func [A-Z]'
|
|
217
|
+
return
|
|
218
|
+
fi
|
|
219
|
+
echo 'export \|^def \|^class \|^pub \|^func [A-Z]'
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
detect_debug_pattern() {
|
|
223
|
+
local dir="$1"
|
|
224
|
+
if [[ -f "${dir}/tsconfig.json" || -f "${dir}/package.json" ]]; then
|
|
225
|
+
echo 'console\.log\|console\.debug'
|
|
226
|
+
return
|
|
227
|
+
fi
|
|
228
|
+
if [[ -f "${dir}/pyproject.toml" || -f "${dir}/setup.py" ]]; then
|
|
229
|
+
echo 'print(\|breakpoint()'
|
|
230
|
+
return
|
|
231
|
+
fi
|
|
232
|
+
if [[ -f "${dir}/Cargo.toml" ]]; then
|
|
233
|
+
echo 'dbg!\|println!'
|
|
234
|
+
return
|
|
235
|
+
fi
|
|
236
|
+
if [[ -f "${dir}/go.mod" ]]; then
|
|
237
|
+
echo 'fmt\.Print'
|
|
238
|
+
return
|
|
239
|
+
fi
|
|
240
|
+
echo 'console\.log\|print(\|dbg!\|fmt\.Print'
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
# ── Get modified files from git diff ─────────────────────────────────
|
|
244
|
+
|
|
245
|
+
get_modified_files() {
|
|
246
|
+
local dir="$1"
|
|
247
|
+
local base_branch="${2:-}"
|
|
248
|
+
cd "$dir"
|
|
249
|
+
if [[ -n "$base_branch" ]]; then
|
|
250
|
+
git diff --name-only "$base_branch"...HEAD -- . 2>/dev/null || git diff --name-only HEAD~1 -- . 2>/dev/null || echo ""
|
|
251
|
+
else
|
|
252
|
+
git diff --name-only HEAD~1 -- . 2>/dev/null || echo ""
|
|
253
|
+
fi
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
# ── Universal check functions ────────────────────────────────────────
|
|
257
|
+
|
|
258
|
+
check_tests_exist() {
|
|
259
|
+
local dir="$1" modified_files="$2"
|
|
260
|
+
local test_pattern
|
|
261
|
+
test_pattern=$(detect_test_pattern "$dir")
|
|
262
|
+
local source_glob
|
|
263
|
+
source_glob=$(detect_source_glob "$dir")
|
|
264
|
+
|
|
265
|
+
if [[ -z "$modified_files" ]]; then
|
|
266
|
+
record_result "tests_exist" "SKIP" "No modified files detected"
|
|
267
|
+
record_result_plain "tests_exist" "SKIP" "No modified files detected"
|
|
268
|
+
return
|
|
269
|
+
fi
|
|
270
|
+
|
|
271
|
+
local missing=0
|
|
272
|
+
local checked=0
|
|
273
|
+
while IFS= read -r file; do
|
|
274
|
+
[[ -z "$file" ]] && continue
|
|
275
|
+
# Skip test files themselves, configs, docs
|
|
276
|
+
if echo "$file" | grep -qE '(\.test\.|\.spec\.|__tests__|test_|_test\.|\.md$|\.json$|\.yml$|\.yaml$|\.config\.)'; then
|
|
277
|
+
continue
|
|
278
|
+
fi
|
|
279
|
+
# Only check source files
|
|
280
|
+
if ! echo "$file" | grep -qE "\.(ts|tsx|js|jsx|py|rs|go)$"; then
|
|
281
|
+
continue
|
|
282
|
+
fi
|
|
283
|
+
checked=$((checked + 1))
|
|
284
|
+
local basename
|
|
285
|
+
basename=$(basename "$file" | sed 's/\.[^.]*$//')
|
|
286
|
+
# Look for a corresponding test file anywhere in the tree
|
|
287
|
+
if ! find "$dir" -name "*${basename}*" 2>/dev/null | grep -q "$test_pattern"; then
|
|
288
|
+
missing=$((missing + 1))
|
|
289
|
+
fi
|
|
290
|
+
done <<< "$modified_files"
|
|
291
|
+
|
|
292
|
+
if [[ $checked -eq 0 ]]; then
|
|
293
|
+
record_result "tests_exist" "SKIP" "No source files in diff"
|
|
294
|
+
record_result_plain "tests_exist" "SKIP" "No source files in diff"
|
|
295
|
+
elif [[ $missing -eq 0 ]]; then
|
|
296
|
+
record_result "tests_exist" "PASS" "${checked} source files have tests"
|
|
297
|
+
record_result_plain "tests_exist" "PASS" "${checked} source files have tests"
|
|
298
|
+
else
|
|
299
|
+
record_result "tests_exist" "FAIL" "${missing}/${checked} source files missing tests"
|
|
300
|
+
record_result_plain "tests_exist" "FAIL" "${missing}/${checked} source files missing tests"
|
|
301
|
+
fi
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
check_tests_pass() {
|
|
305
|
+
local dir="$1"
|
|
306
|
+
local test_cmd
|
|
307
|
+
test_cmd=$(detect_test_cmd "$dir")
|
|
308
|
+
|
|
309
|
+
if [[ -z "$test_cmd" ]]; then
|
|
310
|
+
record_result "tests_pass" "SKIP" "No test runner detected"
|
|
311
|
+
record_result_plain "tests_pass" "SKIP" "No test runner detected"
|
|
312
|
+
return
|
|
313
|
+
fi
|
|
314
|
+
|
|
315
|
+
if (cd "$dir" && eval "$test_cmd" > /dev/null 2>&1); then
|
|
316
|
+
record_result "tests_pass" "PASS" "${test_cmd}"
|
|
317
|
+
record_result_plain "tests_pass" "PASS" "${test_cmd}"
|
|
318
|
+
else
|
|
319
|
+
record_result "tests_pass" "FAIL" "${test_cmd} failed"
|
|
320
|
+
record_result_plain "tests_pass" "FAIL" "${test_cmd} failed"
|
|
321
|
+
fi
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
check_build() {
|
|
325
|
+
local dir="$1"
|
|
326
|
+
local build_cmd
|
|
327
|
+
build_cmd=$(detect_build_cmd "$dir")
|
|
328
|
+
|
|
329
|
+
if [[ -z "$build_cmd" ]]; then
|
|
330
|
+
record_result "build" "SKIP" "No build command detected"
|
|
331
|
+
record_result_plain "build" "SKIP" "No build command detected"
|
|
332
|
+
return
|
|
333
|
+
fi
|
|
334
|
+
|
|
335
|
+
if (cd "$dir" && eval "$build_cmd" > /dev/null 2>&1); then
|
|
336
|
+
record_result "build" "PASS" "${build_cmd}"
|
|
337
|
+
record_result_plain "build" "PASS" "${build_cmd}"
|
|
338
|
+
else
|
|
339
|
+
record_result "build" "FAIL" "${build_cmd} failed"
|
|
340
|
+
record_result_plain "build" "FAIL" "${build_cmd} failed"
|
|
341
|
+
fi
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
check_lint() {
|
|
345
|
+
local dir="$1"
|
|
346
|
+
local lint_cmd
|
|
347
|
+
lint_cmd=$(detect_lint_cmd "$dir")
|
|
348
|
+
|
|
349
|
+
if [[ -z "$lint_cmd" ]]; then
|
|
350
|
+
record_result "lint" "SKIP" "No linter detected"
|
|
351
|
+
record_result_plain "lint" "SKIP" "No linter detected"
|
|
352
|
+
return
|
|
353
|
+
fi
|
|
354
|
+
|
|
355
|
+
if (cd "$dir" && eval "$lint_cmd" > /dev/null 2>&1); then
|
|
356
|
+
record_result "lint" "PASS" "${lint_cmd}"
|
|
357
|
+
record_result_plain "lint" "PASS" "${lint_cmd}"
|
|
358
|
+
else
|
|
359
|
+
record_result "lint" "FAIL" "${lint_cmd} failed"
|
|
360
|
+
record_result_plain "lint" "FAIL" "${lint_cmd} failed"
|
|
361
|
+
fi
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
check_no_debug_output() {
|
|
365
|
+
local dir="$1" modified_files="$2"
|
|
366
|
+
local debug_pattern
|
|
367
|
+
debug_pattern=$(detect_debug_pattern "$dir")
|
|
368
|
+
|
|
369
|
+
if [[ -z "$modified_files" ]]; then
|
|
370
|
+
record_result "no_debug_output" "SKIP" "No modified files"
|
|
371
|
+
record_result_plain "no_debug_output" "SKIP" "No modified files"
|
|
372
|
+
return
|
|
373
|
+
fi
|
|
374
|
+
|
|
375
|
+
local found=0
|
|
376
|
+
while IFS= read -r file; do
|
|
377
|
+
[[ -z "$file" ]] && continue
|
|
378
|
+
[[ ! -f "${dir}/${file}" ]] && continue
|
|
379
|
+
# Skip test files and configs
|
|
380
|
+
if echo "$file" | grep -qE '(\.test\.|\.spec\.|__tests__|test_|_test\.|\.config\.|\.md$|\.json$)'; then
|
|
381
|
+
continue
|
|
382
|
+
fi
|
|
383
|
+
if grep -qE "$debug_pattern" "${dir}/${file}" 2>/dev/null; then
|
|
384
|
+
found=$((found + 1))
|
|
385
|
+
fi
|
|
386
|
+
done <<< "$modified_files"
|
|
387
|
+
|
|
388
|
+
if [[ $found -eq 0 ]]; then
|
|
389
|
+
record_result "no_debug_output" "PASS" "No debug statements in modified files"
|
|
390
|
+
record_result_plain "no_debug_output" "PASS" "No debug statements in modified files"
|
|
391
|
+
else
|
|
392
|
+
record_result "no_debug_output" "FAIL" "${found} files contain debug statements"
|
|
393
|
+
record_result_plain "no_debug_output" "FAIL" "${found} files contain debug statements"
|
|
394
|
+
fi
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
check_no_todo_fixme() {
|
|
398
|
+
local dir="$1" modified_files="$2"
|
|
399
|
+
|
|
400
|
+
if [[ -z "$modified_files" ]]; then
|
|
401
|
+
record_result "no_todo_fixme" "SKIP" "No modified files"
|
|
402
|
+
record_result_plain "no_todo_fixme" "SKIP" "No modified files"
|
|
403
|
+
return
|
|
404
|
+
fi
|
|
405
|
+
|
|
406
|
+
local found=0
|
|
407
|
+
while IFS= read -r file; do
|
|
408
|
+
[[ -z "$file" ]] && continue
|
|
409
|
+
[[ ! -f "${dir}/${file}" ]] && continue
|
|
410
|
+
if grep -qiE '(TODO|FIXME|HACK|XXX)' "${dir}/${file}" 2>/dev/null; then
|
|
411
|
+
found=$((found + 1))
|
|
412
|
+
fi
|
|
413
|
+
done <<< "$modified_files"
|
|
414
|
+
|
|
415
|
+
if [[ $found -eq 0 ]]; then
|
|
416
|
+
record_result "no_todo_fixme" "PASS" "No TODO/FIXME in modified files"
|
|
417
|
+
record_result_plain "no_todo_fixme" "PASS" "No TODO/FIXME in modified files"
|
|
418
|
+
else
|
|
419
|
+
record_result "no_todo_fixme" "FAIL" "${found} files contain TODO/FIXME"
|
|
420
|
+
record_result_plain "no_todo_fixme" "FAIL" "${found} files contain TODO/FIXME"
|
|
421
|
+
fi
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
check_exports_have_docs() {
|
|
425
|
+
local dir="$1" modified_files="$2"
|
|
426
|
+
local export_pattern doc_pattern
|
|
427
|
+
export_pattern=$(detect_export_pattern "$dir")
|
|
428
|
+
doc_pattern=$(detect_doc_comment_pattern "$dir")
|
|
429
|
+
|
|
430
|
+
if [[ -z "$modified_files" ]]; then
|
|
431
|
+
record_result "exports_have_docs" "SKIP" "No modified files"
|
|
432
|
+
record_result_plain "exports_have_docs" "SKIP" "No modified files"
|
|
433
|
+
return
|
|
434
|
+
fi
|
|
435
|
+
|
|
436
|
+
local missing=0
|
|
437
|
+
local total=0
|
|
438
|
+
while IFS= read -r file; do
|
|
439
|
+
[[ -z "$file" ]] && continue
|
|
440
|
+
[[ ! -f "${dir}/${file}" ]] && continue
|
|
441
|
+
# Skip test files and configs
|
|
442
|
+
if echo "$file" | grep -qE '(\.test\.|\.spec\.|__tests__|test_|_test\.|\.config\.|\.md$|\.json$)'; then
|
|
443
|
+
continue
|
|
444
|
+
fi
|
|
445
|
+
if ! echo "$file" | grep -qE "\.(ts|tsx|js|jsx|py|rs|go)$"; then
|
|
446
|
+
continue
|
|
447
|
+
fi
|
|
448
|
+
# Count exports without preceding doc comments
|
|
449
|
+
local exports_in_file
|
|
450
|
+
exports_in_file=$(grep -c "$export_pattern" "${dir}/${file}" 2>/dev/null || echo 0)
|
|
451
|
+
if [[ $exports_in_file -gt 0 ]]; then
|
|
452
|
+
total=$((total + exports_in_file))
|
|
453
|
+
local docs_in_file
|
|
454
|
+
docs_in_file=$(grep -c "$doc_pattern" "${dir}/${file}" 2>/dev/null || echo 0)
|
|
455
|
+
if [[ $docs_in_file -lt $exports_in_file ]]; then
|
|
456
|
+
missing=$((missing + (exports_in_file - docs_in_file)))
|
|
457
|
+
fi
|
|
458
|
+
fi
|
|
459
|
+
done <<< "$modified_files"
|
|
460
|
+
|
|
461
|
+
if [[ $total -eq 0 ]]; then
|
|
462
|
+
record_result "exports_have_docs" "SKIP" "No exports in modified files"
|
|
463
|
+
record_result_plain "exports_have_docs" "SKIP" "No exports in modified files"
|
|
464
|
+
elif [[ $missing -eq 0 ]]; then
|
|
465
|
+
record_result "exports_have_docs" "PASS" "${total} exports documented"
|
|
466
|
+
record_result_plain "exports_have_docs" "PASS" "${total} exports documented"
|
|
467
|
+
else
|
|
468
|
+
record_result "exports_have_docs" "FAIL" "${missing}/${total} exports missing doc comments"
|
|
469
|
+
record_result_plain "exports_have_docs" "FAIL" "${missing}/${total} exports missing doc comments"
|
|
470
|
+
fi
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
check_no_new_dependencies() {
|
|
474
|
+
local dir="$1" base_branch="${2:-}"
|
|
475
|
+
local dep_file
|
|
476
|
+
dep_file=$(detect_dep_file "$dir")
|
|
477
|
+
|
|
478
|
+
if [[ -z "$dep_file" ]]; then
|
|
479
|
+
record_result "no_new_deps" "SKIP" "No dependency file detected"
|
|
480
|
+
record_result_plain "no_new_deps" "SKIP" "No dependency file detected"
|
|
481
|
+
return
|
|
482
|
+
fi
|
|
483
|
+
|
|
484
|
+
if [[ -z "$base_branch" ]]; then
|
|
485
|
+
record_result "no_new_deps" "SKIP" "No base branch to compare"
|
|
486
|
+
record_result_plain "no_new_deps" "SKIP" "No base branch to compare"
|
|
487
|
+
return
|
|
488
|
+
fi
|
|
489
|
+
|
|
490
|
+
cd "$dir"
|
|
491
|
+
local diff_output
|
|
492
|
+
diff_output=$(git diff "$base_branch"...HEAD -- "$dep_file" 2>/dev/null || echo "")
|
|
493
|
+
|
|
494
|
+
if [[ -z "$diff_output" ]]; then
|
|
495
|
+
record_result "no_new_deps" "PASS" "No changes to ${dep_file}"
|
|
496
|
+
record_result_plain "no_new_deps" "PASS" "No changes to ${dep_file}"
|
|
497
|
+
else
|
|
498
|
+
local added
|
|
499
|
+
added=$(echo "$diff_output" | grep -c "^+" | head -1 || echo "0")
|
|
500
|
+
record_result "no_new_deps" "FAIL" "${dep_file} modified — review new dependencies"
|
|
501
|
+
record_result_plain "no_new_deps" "FAIL" "${dep_file} modified — review new dependencies"
|
|
502
|
+
fi
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
check_file_size_limit() {
|
|
506
|
+
local dir="$1" modified_files="$2" max_lines="${3:-500}"
|
|
507
|
+
|
|
508
|
+
if [[ -z "$modified_files" ]]; then
|
|
509
|
+
record_result "file_size" "SKIP" "No modified files"
|
|
510
|
+
record_result_plain "file_size" "SKIP" "No modified files"
|
|
511
|
+
return
|
|
512
|
+
fi
|
|
513
|
+
|
|
514
|
+
local oversized=0
|
|
515
|
+
local details=""
|
|
516
|
+
while IFS= read -r file; do
|
|
517
|
+
[[ -z "$file" ]] && continue
|
|
518
|
+
[[ ! -f "${dir}/${file}" ]] && continue
|
|
519
|
+
if ! echo "$file" | grep -qE "\.(ts|tsx|js|jsx|py|rs|go|swift|kt|java)$"; then
|
|
520
|
+
continue
|
|
521
|
+
fi
|
|
522
|
+
local lines
|
|
523
|
+
lines=$(wc -l < "${dir}/${file}" | tr -d ' ')
|
|
524
|
+
if [[ $lines -gt $max_lines ]]; then
|
|
525
|
+
oversized=$((oversized + 1))
|
|
526
|
+
details="${details}${file}(${lines}L) "
|
|
527
|
+
fi
|
|
528
|
+
done <<< "$modified_files"
|
|
529
|
+
|
|
530
|
+
if [[ $oversized -eq 0 ]]; then
|
|
531
|
+
record_result "file_size" "PASS" "All files under ${max_lines} lines"
|
|
532
|
+
record_result_plain "file_size" "PASS" "All files under ${max_lines} lines"
|
|
533
|
+
else
|
|
534
|
+
record_result "file_size" "FAIL" "${oversized} files over ${max_lines}L: ${details}"
|
|
535
|
+
record_result_plain "file_size" "FAIL" "${oversized} files over ${max_lines}L: ${details}"
|
|
536
|
+
fi
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
# ── Custom check runner ──────────────────────────────────────────────
|
|
540
|
+
|
|
541
|
+
run_custom_check() {
|
|
542
|
+
local dir="$1" id="$2" cmd="$3" description="${4:-Custom check}"
|
|
543
|
+
|
|
544
|
+
if (cd "$dir" && eval "$cmd" > /dev/null 2>&1); then
|
|
545
|
+
record_result "$id" "PASS" "$description"
|
|
546
|
+
record_result_plain "$id" "PASS" "$description"
|
|
547
|
+
else
|
|
548
|
+
record_result "$id" "FAIL" "$description"
|
|
549
|
+
record_result_plain "$id" "FAIL" "$description"
|
|
550
|
+
fi
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
run_custom_grep_check() {
|
|
554
|
+
local dir="$1" id="$2" pattern="$3" glob="$4" should_find="${5:-false}"
|
|
555
|
+
|
|
556
|
+
local count
|
|
557
|
+
count=$(find "$dir" -name "$glob" -not -path '*/node_modules/*' -not -path '*/.git/*' -exec grep -l "$pattern" {} \; 2>/dev/null | wc -l | tr -d ' ')
|
|
558
|
+
|
|
559
|
+
if [[ "$should_find" == "true" ]]; then
|
|
560
|
+
if [[ $count -gt 0 ]]; then
|
|
561
|
+
record_result "$id" "PASS" "Pattern found in ${count} files"
|
|
562
|
+
record_result_plain "$id" "PASS" "Pattern found in ${count} files"
|
|
563
|
+
else
|
|
564
|
+
record_result "$id" "FAIL" "Expected pattern not found"
|
|
565
|
+
record_result_plain "$id" "FAIL" "Expected pattern not found"
|
|
566
|
+
fi
|
|
567
|
+
else
|
|
568
|
+
if [[ $count -eq 0 ]]; then
|
|
569
|
+
record_result "$id" "PASS" "Pattern not found (good)"
|
|
570
|
+
record_result_plain "$id" "PASS" "Pattern not found (good)"
|
|
571
|
+
else
|
|
572
|
+
record_result "$id" "FAIL" "Unwanted pattern in ${count} files"
|
|
573
|
+
record_result_plain "$id" "FAIL" "Unwanted pattern in ${count} files"
|
|
574
|
+
fi
|
|
575
|
+
fi
|
|
576
|
+
}
|