start-vibing 4.3.4 → 4.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/package.json +2 -2
  2. package/template/.claude/agents/sd-audit.md +32 -0
  3. package/template/.claude/commands/e2e-audit.md +16 -0
  4. package/template/.claude/hooks/e2e-audit-session-start.sh +4 -0
  5. package/template/.claude/settings.json +4 -0
  6. package/template/.claude/skills/e2e-audit/SKILL.md +216 -0
  7. package/template/.claude/skills/e2e-audit/findings.schema.json +98 -0
  8. package/template/.claude/skills/e2e-audit/references/api-contract-playbook.md +66 -0
  9. package/template/.claude/skills/e2e-audit/references/auth-setup-playbook.md +78 -0
  10. package/template/.claude/skills/e2e-audit/references/coverage-gap-playbook.md +95 -0
  11. package/template/.claude/skills/e2e-audit/references/post-run-feedback-playbook.md +80 -0
  12. package/template/.claude/skills/e2e-audit/scripts/detect-stack.sh +205 -0
  13. package/template/.claude/skills/e2e-audit/scripts/detect-uncovered.sh +137 -0
  14. package/template/.claude/skills/e2e-audit/scripts/discover-api-surface.sh +242 -0
  15. package/template/.claude/skills/e2e-audit/scripts/discover-routes.sh +163 -0
  16. package/template/.claude/skills/e2e-audit/scripts/inventory-existing-tests.sh +161 -0
  17. package/template/.claude/skills/e2e-audit/scripts/verify-audit.sh +88 -0
  18. package/template/.claude/skills/e2e-audit/templates/auth-setup.ts.tpl +24 -0
  19. package/template/.claude/skills/e2e-audit/templates/base-fixture.ts.tpl +75 -0
  20. package/template/.claude/skills/e2e-audit/templates/findings-report.md.tpl +54 -0
  21. package/template/.claude/skills/e2e-audit/templates/post-run-feedback.md.tpl +36 -0
  22. package/template/.claude/skills/super-design/SKILL.md +42 -4
  23. package/template/.claude/skills/super-design/scripts/discover-surfaces.sh +197 -0
  24. package/template/.claude/skills/super-design/scripts/extract-project-rules.sh +240 -0
  25. package/template/.claude/skills/super-design/scripts/verify-audit.sh +34 -1
@@ -0,0 +1,163 @@
1
+ #!/usr/bin/env bash
2
+ # discover-routes.sh — enumerate every user-facing page route from the source tree.
3
+ #
4
+ # Output: JSON array on stdout. Each item:
5
+ # { "path": "/users/[id]", "kind": "page" | "layout" | "route-group" | "parallel" | "intercepting",
6
+ # "file": "src/app/users/[id]/page.tsx", "dynamic": true, "catch_all": false }
7
+ #
8
+ # Supports next (app + pages router), remix, sveltekit, nuxt, astro.
9
+ # Non-framework repos emit an empty array.
10
+ set -euo pipefail
11
+
12
+ command -v jq >/dev/null || { echo "jq required" >&2; exit 2; }
13
+
14
+ detect_fw() {
15
+ if jq -e '.dependencies.next // .devDependencies.next' package.json >/dev/null 2>&1; then echo "next"
16
+ elif jq -e '.dependencies["@remix-run/react"] // .devDependencies["@remix-run/react"]' package.json >/dev/null 2>&1; then echo "remix"
17
+ elif jq -e '.dependencies["@sveltejs/kit"] // .devDependencies["@sveltejs/kit"]' package.json >/dev/null 2>&1; then echo "sveltekit"
18
+ elif jq -e '.dependencies.nuxt // .devDependencies.nuxt' package.json >/dev/null 2>&1; then echo "nuxt"
19
+ elif jq -e '.dependencies.astro // .devDependencies.astro' package.json >/dev/null 2>&1; then echo "astro"
20
+ else echo "unknown"
21
+ fi
22
+ }
23
+
24
+ FW="$(detect_fw)"
25
+ OUT='[]'
26
+
27
+ emit() {
28
+ # args: path kind file dynamic catch_all
29
+ OUT="$(jq --arg p "$1" --arg k "$2" --arg f "$3" \
30
+ --argjson d "$4" --argjson c "$5" \
31
+ '. + [{path:$p, kind:$k, file:$f, dynamic:$d, catch_all:$c}]' <<<"$OUT")"
32
+ }
33
+
34
+ # Translate a Next/Remix/Nuxt/SvelteKit file path into a URL path.
35
+ # Strips: src/app, app, src/pages, pages, src/routes, app/routes.
36
+ # Drops: route-group segments like (marketing), private _segments (Nuxt/SvelteKit),
37
+ # converts [param] into a URL segment that keeps the bracket notation.
38
+ path_to_url() {
39
+ local p="$1"
40
+ # strip known leading prefixes
41
+ p="${p#src/app/}"; p="${p#app/}"
42
+ p="${p#src/pages/}"; p="${p#pages/}"
43
+ p="${p#src/routes/}"; p="${p#app/routes/}"
44
+ p="${p#routes/}"
45
+ # drop trailing filename
46
+ p="${p%/page.tsx}"; p="${p%/page.ts}"; p="${p%/page.jsx}"; p="${p%/page.js}"
47
+ p="${p%/+page.svelte}"; p="${p%/+layout.svelte}"
48
+ p="${p%/index.tsx}"; p="${p%/index.ts}"; p="${p%/index.jsx}"; p="${p%/index.js}"
49
+ p="${p%/index.vue}"; p="${p%/index.astro}"
50
+ p="${p%.tsx}"; p="${p%.ts}"; p="${p%.jsx}"; p="${p%.js}"
51
+ p="${p%.vue}"; p="${p%.astro}"; p="${p%.svelte}"
52
+ # remove Next route-group segments (parentheses)
53
+ p="$(echo "$p" | awk -F/ '{
54
+ out=""; for(i=1;i<=NF;i++){
55
+ if($i ~ /^\(.*\)$/) continue;
56
+ out = out (out==""?"":"/") $i;
57
+ }
58
+ print out
59
+ }')"
60
+ # remove private Nuxt/SvelteKit underscore segments
61
+ p="$(echo "$p" | awk -F/ '{
62
+ out=""; for(i=1;i<=NF;i++){
63
+ if($i ~ /^_/) continue;
64
+ out = out (out==""?"":"/") $i;
65
+ }
66
+ print out
67
+ }')"
68
+ # remix: convert dot-delimited route files to slashes, $param -> [param]
69
+ if [[ "$FW" == "remix" ]]; then
70
+ p="$(echo "$p" | sed 's/\./\//g; s/\$/:/g')"
71
+ fi
72
+ # sveltekit/nuxt: [[optional]] / [...rest] already close enough; leave as-is
73
+ echo "/${p}"
74
+ }
75
+
76
+ classify() {
77
+ # args: url file
78
+ local url="$1" file="$2"
79
+ local dyn=false cat=false
80
+ [[ "$url" == *"["*"]"* || "$url" == *":"* ]] && dyn=true
81
+ [[ "$url" == *"[..."*"]"* || "$url" == *"\$\$"* ]] && cat=true
82
+ echo "$dyn $cat"
83
+ }
84
+
85
+ # --- Next.js app router -----------------------------------------------------
86
+ if [[ "$FW" == "next" ]]; then
87
+ while IFS= read -r f; do
88
+ [[ -z "$f" ]] && continue
89
+ url="$(path_to_url "$f")"
90
+ read -r dyn cat < <(classify "$url" "$f")
91
+ emit "$url" "page" "$f" "$dyn" "$cat"
92
+ done < <(find app src/app -type f \( -name 'page.tsx' -o -name 'page.ts' -o -name 'page.jsx' -o -name 'page.js' \) 2>/dev/null)
93
+
94
+ while IFS= read -r f; do
95
+ [[ -z "$f" ]] && continue
96
+ url="$(path_to_url "$f")"
97
+ emit "$url" "layout" "$f" false false
98
+ done < <(find app src/app -type f \( -name 'layout.tsx' -o -name 'layout.ts' \) 2>/dev/null)
99
+
100
+ while IFS= read -r d; do
101
+ [[ -z "$d" ]] && continue
102
+ emit "${d#*app/}" "parallel" "$d" false false
103
+ done < <(find app src/app -type d -name '@*' 2>/dev/null)
104
+
105
+ while IFS= read -r d; do
106
+ [[ -z "$d" ]] && continue
107
+ emit "${d#*app/}" "intercepting" "$d" false false
108
+ done < <(find app src/app -type d \( -name '(.)*' -o -name '(..)*' -o -name '(...)*' \) 2>/dev/null)
109
+
110
+ # Pages router (legacy)
111
+ while IFS= read -r f; do
112
+ [[ -z "$f" ]] && continue
113
+ # exclude _app, _document, _error, api/
114
+ case "$f" in
115
+ */_app.*|*/_document.*|*/_error.*|*/api/*) continue ;;
116
+ esac
117
+ url="$(path_to_url "$f")"
118
+ read -r dyn cat < <(classify "$url" "$f")
119
+ emit "$url" "page" "$f" "$dyn" "$cat"
120
+ done < <(find pages src/pages -type f \( -name '*.tsx' -o -name '*.ts' -o -name '*.jsx' -o -name '*.js' \) 2>/dev/null)
121
+ fi
122
+
123
+ # --- Remix ------------------------------------------------------------------
124
+ if [[ "$FW" == "remix" ]]; then
125
+ while IFS= read -r f; do
126
+ [[ -z "$f" ]] && continue
127
+ url="$(path_to_url "$f")"
128
+ read -r dyn cat < <(classify "$url" "$f")
129
+ emit "$url" "page" "$f" "$dyn" "$cat"
130
+ done < <(find app/routes src/routes -type f \( -name '*.tsx' -o -name '*.ts' \) 2>/dev/null)
131
+ fi
132
+
133
+ # --- SvelteKit --------------------------------------------------------------
134
+ if [[ "$FW" == "sveltekit" ]]; then
135
+ while IFS= read -r f; do
136
+ [[ -z "$f" ]] && continue
137
+ url="$(path_to_url "$f")"
138
+ read -r dyn cat < <(classify "$url" "$f")
139
+ emit "$url" "page" "$f" "$dyn" "$cat"
140
+ done < <(find src/routes -type f -name '+page.svelte' 2>/dev/null)
141
+ fi
142
+
143
+ # --- Nuxt -------------------------------------------------------------------
144
+ if [[ "$FW" == "nuxt" ]]; then
145
+ while IFS= read -r f; do
146
+ [[ -z "$f" ]] && continue
147
+ url="$(path_to_url "$f")"
148
+ read -r dyn cat < <(classify "$url" "$f")
149
+ emit "$url" "page" "$f" "$dyn" "$cat"
150
+ done < <(find pages src/pages -type f \( -name '*.vue' \) 2>/dev/null)
151
+ fi
152
+
153
+ # --- Astro ------------------------------------------------------------------
154
+ if [[ "$FW" == "astro" ]]; then
155
+ while IFS= read -r f; do
156
+ [[ -z "$f" ]] && continue
157
+ url="$(path_to_url "$f")"
158
+ read -r dyn cat < <(classify "$url" "$f")
159
+ emit "$url" "page" "$f" "$dyn" "$cat"
160
+ done < <(find src/pages -type f \( -name '*.astro' -o -name '*.tsx' -o -name '*.ts' \) 2>/dev/null)
161
+ fi
162
+
163
+ echo "$OUT" | jq '. | unique_by([.path, .file])'
@@ -0,0 +1,161 @@
1
+ #!/usr/bin/env bash
2
+ # inventory-existing-tests.sh — catalog every E2E test the project already has,
3
+ # so the audit can reuse setup and detect DRIFT between audit runs.
4
+ #
5
+ # Why: (1) If the project has tests/e2e/, we must not reinvent the fixtures,
6
+ # auth storage state, or page objects. (2) Between audit runs the test layout
7
+ # may change (test deleted, fixture renamed, new storageState file), and the
8
+ # skill should warn the user when that happens instead of silently producing
9
+ # stale findings.
10
+ #
11
+ # Output: JSON object on stdout:
12
+ # {
13
+ # "runner": "playwright" | "cypress" | "vitest-browser" | "none",
14
+ # "config_file": "playwright.config.ts" | "cypress.config.ts" | null,
15
+ # "test_dirs": ["tests/e2e", "e2e"],
16
+ # "test_files": [{ "file": "tests/e2e/login.spec.ts", "test_count": 5, "describe_count": 1 }],
17
+ # "fixtures": [{ "file": "...", "fixtures_defined": ["authenticatedPage", "apiErrors"] }],
18
+ # "page_objects": [{ "file": "...", "class": "LoginPage" }],
19
+ # "storage_states": ["tests/e2e/.auth/owner.json", ...],
20
+ # "has_global_setup": true | false,
21
+ # "hash": "<sha256 of file-list + sizes>" // for drift detection
22
+ # }
23
+ set -euo pipefail
24
+
25
+ command -v jq >/dev/null || { echo "jq required" >&2; exit 2; }
26
+
27
+ RUNNER="none"
28
+ CFG="null"
29
+ TDIRS='[]'
30
+ TFILES='[]'
31
+ FIXTURES='[]'
32
+ PAGES='[]'
33
+ STATES='[]'
34
+ HAS_GLOBAL_SETUP=false
35
+
36
+ # --- detect runner + config -------------------------------------------------
37
+ for c in playwright.config.ts playwright.config.js playwright.config.mjs; do
38
+ if [[ -f "$c" ]]; then RUNNER="playwright"; CFG="\"$c\""; break; fi
39
+ done
40
+ if [[ "$RUNNER" == "none" ]]; then
41
+ for c in cypress.config.ts cypress.config.js; do
42
+ [[ -f "$c" ]] && RUNNER="cypress" && CFG="\"$c\"" && break
43
+ done
44
+ fi
45
+ if [[ "$RUNNER" == "none" ]] && jq -e '.dependencies["@vitest/browser"] // .devDependencies["@vitest/browser"]' package.json >/dev/null 2>&1; then
46
+ RUNNER="vitest-browser"
47
+ fi
48
+
49
+ # --- detect test dirs -------------------------------------------------------
50
+ CANDIDATE_DIRS=(tests/e2e tests/playwright tests/integration e2e playwright-tests cypress/e2e cypress/integration)
51
+ for d in "${CANDIDATE_DIRS[@]}"; do
52
+ if [[ -d "$d" ]]; then
53
+ TDIRS="$(jq --arg d "$d" '. + [$d]' <<<"$TDIRS")"
54
+ fi
55
+ done
56
+
57
+ # --- list test files + count tests -----------------------------------------
58
+ SPEC_GLOBS=()
59
+ if [[ "$RUNNER" == "playwright" ]]; then
60
+ SPEC_GLOBS=('*.spec.ts' '*.spec.tsx' '*.spec.js' '*.test.ts' '*.test.tsx')
61
+ elif [[ "$RUNNER" == "cypress" ]]; then
62
+ SPEC_GLOBS=('*.cy.ts' '*.cy.js' '*.spec.ts' '*.spec.js')
63
+ elif [[ "$RUNNER" == "vitest-browser" ]]; then
64
+ SPEC_GLOBS=('*.test.ts' '*.test.tsx')
65
+ fi
66
+
67
+ if [[ ${#SPEC_GLOBS[@]} -gt 0 ]]; then
68
+ while IFS= read -r d; do
69
+ [[ -z "$d" ]] && continue
70
+ for g in "${SPEC_GLOBS[@]}"; do
71
+ while IFS= read -r f; do
72
+ [[ -z "$f" ]] && continue
73
+ tc=$(grep -cE "^[[:space:]]*(test|it)\\(" "$f" 2>/dev/null || echo 0)
74
+ dc=$(grep -cE "^[[:space:]]*describe\\(" "$f" 2>/dev/null || echo 0)
75
+ TFILES="$(jq --arg f "$f" --argjson t "$tc" --argjson dc "$dc" \
76
+ '. + [{file:$f, test_count:$t, describe_count:$dc}]' <<<"$TFILES")"
77
+ done < <(find "$d" -type f -name "$g" 2>/dev/null)
78
+ done
79
+ done < <(jq -r '.[]' <<<"$TDIRS")
80
+ fi
81
+
82
+ # --- detect fixtures --------------------------------------------------------
83
+ # Playwright: files that use `test.extend<>`, or export a test with extend.
84
+ while IFS= read -r d; do
85
+ [[ -z "$d" ]] && continue
86
+ while IFS= read -r f; do
87
+ [[ -z "$f" ]] && continue
88
+ defined='[]'
89
+ # Extract fixture names from `extend<{ name1: ..., name2: ... }>` blocks.
90
+ while IFS= read -r name; do
91
+ [[ -z "$name" ]] && continue
92
+ defined="$(jq --arg n "$name" '. + [$n]' <<<"$defined")"
93
+ done < <(grep -oE "([A-Za-z_][A-Za-z0-9_]*)[[:space:]]*:" "$f" 2>/dev/null \
94
+ | awk '{sub(":",""); print $1}' | sort -u | head -30)
95
+ if [[ "$(jq 'length' <<<"$defined")" -gt 0 ]]; then
96
+ FIXTURES="$(jq --arg fi "$f" --argjson df "$defined" '. + [{file:$fi, fixtures_defined:$df}]' <<<"$FIXTURES")"
97
+ fi
98
+ done < <(grep -rl "test\\.extend\\|base\\.extend\\|defineConfig" "$d" --include='*.ts' --include='*.tsx' 2>/dev/null | head -40)
99
+ done < <(jq -r '.[]' <<<"$TDIRS")
100
+
101
+ # --- detect page objects ---------------------------------------------------
102
+ while IFS= read -r d; do
103
+ [[ -z "$d" ]] && continue
104
+ while IFS= read -r hit; do
105
+ [[ -z "$hit" ]] && continue
106
+ f="${hit%%:*}"; rest="${hit#*:}"
107
+ cls="$(echo "$rest" | grep -oE "class[[:space:]]+[A-Z][A-Za-z0-9_]*Page" | awk '{print $2}' | head -1)"
108
+ [[ -n "$cls" ]] && PAGES="$(jq --arg f "$f" --arg c "$cls" '. + [{file:$f, class:$c}]' <<<"$PAGES")"
109
+ done < <(grep -rHn "class[[:space:]]\\+[A-Z][A-Za-z0-9_]*Page" "$d" --include='*.ts' --include='*.tsx' 2>/dev/null)
110
+ done < <(jq -r '.[]' <<<"$TDIRS")
111
+
112
+ # --- detect storage states --------------------------------------------------
113
+ while IFS= read -r d; do
114
+ [[ -z "$d" ]] && continue
115
+ while IFS= read -r f; do
116
+ [[ -z "$f" ]] && continue
117
+ STATES="$(jq --arg f "$f" '. + [$f]' <<<"$STATES")"
118
+ done < <(find "$d" -type f \( -path '*/.auth/*.json' -o -name 'storageState*.json' -o -path '*/storage/*.json' \) 2>/dev/null | head -40)
119
+ done < <(jq -r '.[]' <<<"$TDIRS")
120
+
121
+ # --- global setup ----------------------------------------------------------
122
+ if [[ "$CFG" != "null" ]]; then
123
+ cfg_file="$(jq -r . <<<"$CFG")"
124
+ if grep -Eq "globalSetup|globalTeardown" "$cfg_file" 2>/dev/null; then
125
+ HAS_GLOBAL_SETUP=true
126
+ fi
127
+ fi
128
+
129
+ # --- drift hash -------------------------------------------------------------
130
+ # Hash the sorted list of test files + their byte sizes. Changing any test or
131
+ # adding/removing one flips this hash; the skill uses this to alert the user.
132
+ HASH=""
133
+ if command -v sha256sum >/dev/null 2>&1; then
134
+ HASH="$(jq -r '.[] | .file' <<<"$TFILES" 2>/dev/null | sort | while IFS= read -r f; do
135
+ sz=$(wc -c <"$f" 2>/dev/null | awk '{print $1}'); printf '%s|%s\n' "$f" "${sz:-0}";
136
+ done | sha256sum | awk '{print $1}')"
137
+ fi
138
+ [[ -z "$HASH" ]] && HASH="unknown"
139
+
140
+ # --- assemble ---------------------------------------------------------------
141
+ jq -n \
142
+ --arg runner "$RUNNER" \
143
+ --argjson config_file "$CFG" \
144
+ --argjson test_dirs "$TDIRS" \
145
+ --argjson test_files "$TFILES" \
146
+ --argjson fixtures "$FIXTURES" \
147
+ --argjson page_objects "$PAGES" \
148
+ --argjson storage_states "$STATES" \
149
+ --argjson has_global_setup "$HAS_GLOBAL_SETUP" \
150
+ --arg hash "$HASH" \
151
+ '{
152
+ runner: $runner,
153
+ config_file: $config_file,
154
+ test_dirs: $test_dirs,
155
+ test_files: $test_files,
156
+ fixtures: $fixtures,
157
+ page_objects: $page_objects,
158
+ storage_states: $storage_states,
159
+ has_global_setup: $has_global_setup,
160
+ hash: $hash
161
+ }'
@@ -0,0 +1,88 @@
1
+ #!/usr/bin/env bash
2
+ # Usage: verify-audit.sh [--strict] <session_dir>
3
+ #
4
+ # Verifies artifacts produced by an e2e-audit session:
5
+ # 1. stack.json, routes.json, api-surface.json, existing-tests.json, uncovered.json exist and parse.
6
+ # 2. findings.json exists, is a JSON array, and every item has a SHOT+TRACE+ASSERT+SOURCE quad.
7
+ # 3. Every referenced screenshot / trace / source file resolves to a non-empty file.
8
+ # 4. If post-run-feedback.json exists, its problems[] reference valid finding IDs.
9
+ # 5. Warn when uncovered surfaces exist but no finding of rule=coverage-gap was emitted.
10
+ #
11
+ # Exit codes:
12
+ # 0 OK
13
+ # 1 verification failure (or warnings in --strict)
14
+ # 2 missing prerequisites
15
+ set -euo pipefail
16
+
17
+ STRICT=0
18
+ if [ "${1:-}" = "--strict" ]; then STRICT=1; shift; fi
19
+
20
+ SESSION_DIR="${1:?usage: verify-audit.sh [--strict] <session_dir>}"
21
+
22
+ for f in stack.json routes.json api-surface.json existing-tests.json uncovered.json findings.json; do
23
+ [ -f "$SESSION_DIR/$f" ] || { echo "FATAL: missing $SESSION_DIR/$f" >&2; exit 2; }
24
+ jq -e . "$SESSION_DIR/$f" >/dev/null 2>&1 || { echo "FATAL: $SESSION_DIR/$f is not valid JSON" >&2; exit 1; }
25
+ done
26
+
27
+ FINDINGS="$SESSION_DIR/findings.json"
28
+ if ! jq -e 'type == "array"' "$FINDINGS" >/dev/null; then
29
+ echo "FATAL: findings.json must be a JSON array" >&2; exit 1
30
+ fi
31
+
32
+ WARNINGS=0
33
+ warn() { echo "WARN: $*" >&2; WARNINGS=$((WARNINGS + 1)); }
34
+
35
+ # Every finding has id + rule + severity + files_affected + the evidence quad.
36
+ jq -c '.[]' "$FINDINGS" | while read -r f; do
37
+ id="$(echo "$f" | jq -r '.id // empty')"
38
+ rule="$(echo "$f" | jq -r '.rule // empty')"
39
+ [ -z "$id" ] && { echo "FAIL: finding missing id: $f" >&2; exit 1; }
40
+ [ -z "$rule" ] && { echo "FAIL: $id missing rule" >&2; exit 1; }
41
+
42
+ # Meta findings (coverage-gap, test-drift, stack-detect, post-run-feedback)
43
+ # are aggregate and do not require SHOT/TRACE.
44
+ case "$rule" in
45
+ coverage-gap-*|test-drift|stack-detect|post-run-feedback|uncovered-*)
46
+ continue
47
+ ;;
48
+ esac
49
+
50
+ shot="$(echo "$f" | jq -r '.screenshot_path // empty')"
51
+ trace="$(echo "$f" | jq -r '.trace_path // empty')"
52
+ source_file="$(echo "$f" | jq -r '.source_file // empty')"
53
+ assert="$(echo "$f" | jq -r '.assertion // empty')"
54
+
55
+ [ -n "$shot" ] || { echo "FAIL: $id missing screenshot_path" >&2; exit 1; }
56
+ [ -n "$trace" ] || { echo "FAIL: $id missing trace_path" >&2; exit 1; }
57
+ [ -n "$source_file" ] || { echo "FAIL: $id missing source_file" >&2; exit 1; }
58
+ [ -n "$assert" ] || { echo "FAIL: $id missing assertion" >&2; exit 1; }
59
+
60
+ [ -s "$shot" ] || { echo "FAIL: $id screenshot_path does not resolve: $shot" >&2; exit 1; }
61
+ [ -s "$trace" ] || { echo "FAIL: $id trace_path does not resolve: $trace" >&2; exit 1; }
62
+ [ -f "$source_file" ] || { echo "FAIL: $id source_file does not exist: $source_file" >&2; exit 1; }
63
+ done
64
+
65
+ # coverage-gap meta findings should exist if uncovered.json has non-empty arrays.
66
+ UNC_TOTAL=$(jq '
67
+ (.uncovered_routes | length) +
68
+ (.uncovered_http | length) +
69
+ (.uncovered_trpc | length) +
70
+ (.uncovered_actions | length)
71
+ ' "$SESSION_DIR/uncovered.json")
72
+ COV_FINDINGS=$(jq '[.[] | select(.rule | startswith("coverage-gap"))] | length' "$FINDINGS")
73
+ if [ "$UNC_TOTAL" -gt 0 ] && [ "$COV_FINDINGS" -eq 0 ]; then
74
+ warn "uncovered.json has $UNC_TOTAL uncovered surfaces but no coverage-gap finding was emitted"
75
+ fi
76
+
77
+ # post-run-feedback sanity
78
+ if [ -f "$SESSION_DIR/post-run-feedback.json" ]; then
79
+ jq -e 'type == "object" and has("problems")' "$SESSION_DIR/post-run-feedback.json" >/dev/null \
80
+ || { echo "FAIL: post-run-feedback.json malformed" >&2; exit 1; }
81
+ fi
82
+
83
+ COUNT=$(jq 'length' "$FINDINGS")
84
+ if [ "$STRICT" -eq 1 ] && [ "$WARNINGS" -gt 0 ]; then
85
+ echo "STRICT: $COUNT findings verified, $WARNINGS warning(s)" >&2
86
+ exit 1
87
+ fi
88
+ echo "OK: $COUNT findings verified${WARNINGS:+ ($WARNINGS warning(s))}"
@@ -0,0 +1,24 @@
1
+ // {{SESSION_DIR}}/auth.setup.ts — one storageState per role
2
+ // Run once via Playwright `projects[{ name: 'setup', testMatch: /.*\.setup\.ts/ }]`.
3
+ // NEVER reads .env* directly; credentials come from process.env set at invoke time.
4
+ import { test as setup } from '@playwright/test';
5
+
6
+ const ROLES = ['owner', 'admin', 'member'] as const;
7
+
8
+ for (const role of ROLES) {
9
+ setup(`authenticate ${role}`, async ({ page }) => {
10
+ const email = process.env[`E2E_${role.toUpperCase()}_EMAIL`];
11
+ const password = process.env[`E2E_${role.toUpperCase()}_PASSWORD`];
12
+ if (!email || !password) {
13
+ setup.skip(true, `missing E2E_${role.toUpperCase()}_EMAIL / _PASSWORD`);
14
+ return;
15
+ }
16
+ await page.goto('/signin');
17
+ await page.getByLabel(/email/i).fill(email);
18
+ await page.getByLabel(/password/i).fill(password);
19
+ await page.getByRole('button', { name: /sign in|log in|continue/i }).click();
20
+ // Adjust the wait URL to your post-login destination.
21
+ await page.waitForURL(/\/(dashboard|home|app|overview)/, { timeout: 30_000 });
22
+ await page.context().storageState({ path: `.e2e-audit/current/auth/${role}.json` });
23
+ });
24
+ }
@@ -0,0 +1,75 @@
1
+ // {{SESSION_DIR}}/fixtures/base.ts — e2e-audit base fixture
2
+ // Renders a Playwright test with three fixtures:
3
+ // 1. apiErrors: asserts no unexpected 4xx/5xx on API paths during the test
4
+ // 2. consoleErrors: fails when any console.error fires (with ignore list)
5
+ // 3. authenticatedPage: uses storageState from $STORAGE_STATE
6
+ //
7
+ // Use: import { test, expect } from './fixtures/base';
8
+ import { test as base, expect, type Page } from '@playwright/test';
9
+
10
+ const IGNORED_API_PATTERNS: (string | RegExp)[] = [
11
+ /\/api\/auth\//, // next-auth heartbeats
12
+ /\/_next\//,
13
+ /\/__nextjs_/,
14
+ /hot-update/,
15
+ /\.well-known\//,
16
+ ];
17
+
18
+ const IGNORED_CONSOLE_PATTERNS: (string | RegExp)[] = [
19
+ /React DevTools/,
20
+ /Download the React DevTools/,
21
+ /\[Fast Refresh\]/,
22
+ ];
23
+
24
+ type ApiError = { url: string; method: string; status: number; body: string };
25
+
26
+ export const test = base.extend<{
27
+ apiErrors: ApiError[];
28
+ consoleErrors: string[];
29
+ authenticatedPage: Page;
30
+ }>({
31
+ apiErrors: async ({ page }, use) => {
32
+ const errors: ApiError[] = [];
33
+ page.on('response', async (res) => {
34
+ const url = res.url();
35
+ const isApi = /\/api\/|\/v1\/|\/trpc\//.test(url);
36
+ if (!isApi) return;
37
+ if (IGNORED_API_PATTERNS.some((p) => (typeof p === 'string' ? url.includes(p) : p.test(url)))) return;
38
+ if (res.status() < 400) return;
39
+ let body = '';
40
+ try { body = (await res.text()).slice(0, 400); } catch {}
41
+ errors.push({ url, method: res.request().method(), status: res.status(), body });
42
+ });
43
+ await use(errors);
44
+ if (errors.length > 0) {
45
+ const lines = errors.map((e) => ` ${e.method} ${e.url} → ${e.status} ${e.body.replace(/\s+/g, ' ')}`);
46
+ throw new Error(`API errors during test:\n${lines.join('\n')}`);
47
+ }
48
+ },
49
+
50
+ consoleErrors: async ({ page }, use) => {
51
+ const errors: string[] = [];
52
+ page.on('console', (msg) => {
53
+ if (msg.type() !== 'error') return;
54
+ const text = msg.text();
55
+ if (IGNORED_CONSOLE_PATTERNS.some((p) => (typeof p === 'string' ? text.includes(p) : p.test(text)))) return;
56
+ errors.push(text);
57
+ });
58
+ page.on('pageerror', (err) => errors.push(`pageerror: ${err.message}`));
59
+ await use(errors);
60
+ if (errors.length > 0) {
61
+ throw new Error(`Console errors during test:\n${errors.map((e) => ` ${e}`).join('\n')}`);
62
+ }
63
+ },
64
+
65
+ authenticatedPage: async ({ browser }, use) => {
66
+ const context = await browser.newContext({
67
+ storageState: process.env.E2E_STORAGE_STATE || '.e2e-audit/current/auth/owner.json',
68
+ });
69
+ const page = await context.newPage();
70
+ await use(page);
71
+ await context.close();
72
+ },
73
+ });
74
+
75
+ export { expect };
@@ -0,0 +1,54 @@
1
+ # e2e-audit findings — {{SESSION_ID}}
2
+
3
+ Generated: {{GENERATED_AT}}
4
+ Session dir: `{{SESSION_DIR}}`
5
+ Base ref: `{{BASE_REF}}`
6
+
7
+ ## Summary
8
+
9
+ - Total findings: **{{TOTAL}}**
10
+ - Critical / High / Medium / Low / Info: **{{CRIT}} / {{HIGH}} / {{MED}} / {{LOW}} / {{INFO}}**
11
+ - Coverage gaps (meta): **{{COVERAGE_GAPS}}**
12
+ - Existing specs: **{{SPECS_TOTAL}}** · drift status: **{{DRIFT_STATUS}}**
13
+
14
+ ## Critical + High
15
+
16
+ {{#each critical_high}}
17
+ ### {{id}} — {{summary}}
18
+
19
+ - Rule: `{{rule}}` · Severity: **{{severity}}** · Category: `{{category}}`
20
+ - Files: {{files_affected}}
21
+ - Assertion: `{{assertion}}`
22
+ - Evidence: [screenshot]({{screenshot_path}}) · [trace]({{trace_path}}) · source: `{{source_file}}`
23
+ {{#if http}}
24
+ - HTTP: `{{http.method}} {{http.path}} → {{http.status}}`
25
+ ```
26
+ {{http.response_snippet}}
27
+ ```
28
+ {{/if}}
29
+ {{#if detail}}
30
+ - Detail: {{detail}}
31
+ {{/if}}
32
+
33
+ {{/each}}
34
+
35
+ ## Coverage gaps
36
+
37
+ {{#each coverage_gaps}}
38
+ ### {{id}} — {{summary}}
39
+
40
+ {{detail}}
41
+
42
+ Suggested next step: {{#if suggested_fix}}`{{suggested_fix.kind}}` → {{suggested_fix.files}}{{else}}add specs covering the items above{{/if}}
43
+
44
+ {{/each}}
45
+
46
+ ## Other findings
47
+
48
+ {{#each others}}
49
+ - **{{id}}** [{{severity}}] `{{rule}}` — {{summary}}
50
+ {{/each}}
51
+
52
+ ---
53
+
54
+ _Generated by `e2e-audit` v0.2.0. Verify with `bash .claude/skills/e2e-audit/scripts/verify-audit.sh {{SESSION_DIR}}`._
@@ -0,0 +1,36 @@
1
+ # Post-run feedback — {{SESSION_ID}}
2
+
3
+ Duration: **{{DURATION_S}}s** · Tests: **{{TESTS_PASSED}}/{{TESTS_TOTAL}}** passed · **{{TESTS_FLAKY}}** flaky · **{{TESTS_FAILED}}** failed
4
+
5
+ ## Verdict
6
+
7
+ {{VERDICT}}
8
+
9
+ ## Top problems
10
+
11
+ {{#each problems}}
12
+ - **[{{severity}}] {{kind}}** — {{where}} (count: {{count}})
13
+ {{#if sample_trace}}
14
+ - Trace: `{{sample_trace}}`
15
+ {{/if}}
16
+ {{#if sample_log_tail}}
17
+ - Log tail:
18
+ ```
19
+ {{sample_log_tail}}
20
+ ```
21
+ {{/if}}
22
+ {{#if sample}}
23
+ - Sample: `{{sample}}`
24
+ {{/if}}
25
+ {{/each}}
26
+
27
+ ## Coverage still uncovered after this run
28
+
29
+ - Routes: **{{UNCOVERED.routes}}**
30
+ - HTTP handlers: **{{UNCOVERED.http}}**
31
+ - tRPC procedures: **{{UNCOVERED.trpc}}**
32
+ - Server actions: **{{UNCOVERED.actions}}**
33
+
34
+ ## Suggested next actions
35
+
36
+ {{NEXT_ACTIONS}}