start-vibing 4.3.4 → 4.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +2 -2
- package/template/.claude/agents/sd-audit.md +32 -0
- package/template/.claude/commands/e2e-audit.md +16 -0
- package/template/.claude/hooks/e2e-audit-session-start.sh +4 -0
- package/template/.claude/settings.json +4 -0
- package/template/.claude/skills/e2e-audit/SKILL.md +216 -0
- package/template/.claude/skills/e2e-audit/findings.schema.json +98 -0
- package/template/.claude/skills/e2e-audit/references/api-contract-playbook.md +66 -0
- package/template/.claude/skills/e2e-audit/references/auth-setup-playbook.md +78 -0
- package/template/.claude/skills/e2e-audit/references/coverage-gap-playbook.md +95 -0
- package/template/.claude/skills/e2e-audit/references/post-run-feedback-playbook.md +80 -0
- package/template/.claude/skills/e2e-audit/scripts/detect-stack.sh +205 -0
- package/template/.claude/skills/e2e-audit/scripts/detect-uncovered.sh +137 -0
- package/template/.claude/skills/e2e-audit/scripts/discover-api-surface.sh +242 -0
- package/template/.claude/skills/e2e-audit/scripts/discover-routes.sh +163 -0
- package/template/.claude/skills/e2e-audit/scripts/inventory-existing-tests.sh +161 -0
- package/template/.claude/skills/e2e-audit/scripts/verify-audit.sh +88 -0
- package/template/.claude/skills/e2e-audit/templates/auth-setup.ts.tpl +24 -0
- package/template/.claude/skills/e2e-audit/templates/base-fixture.ts.tpl +75 -0
- package/template/.claude/skills/e2e-audit/templates/findings-report.md.tpl +54 -0
- package/template/.claude/skills/e2e-audit/templates/post-run-feedback.md.tpl +36 -0
- package/template/.claude/skills/super-design/SKILL.md +42 -4
- package/template/.claude/skills/super-design/scripts/discover-surfaces.sh +197 -0
- package/template/.claude/skills/super-design/scripts/extract-project-rules.sh +240 -0
- package/template/.claude/skills/super-design/scripts/verify-audit.sh +34 -1
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
# post-run-feedback-playbook (e2e-audit 0.2.0)
|
|
2
|
+
|
|
3
|
+
> How to consolidate every signal emitted during a Playwright run into one feedback document that precedes findings.json.
|
|
4
|
+
|
|
5
|
+
## Why a separate document?
|
|
6
|
+
|
|
7
|
+
Findings are atomic and per-problem; the user needs a 30-second summary of "what broke during this run" before drilling into specifics. `post-run-feedback.md` answers that. `findings.json` answers "what do I fix."
|
|
8
|
+
|
|
9
|
+
## Signals to consolidate
|
|
10
|
+
|
|
11
|
+
Pull from these inputs:
|
|
12
|
+
|
|
13
|
+
1. Playwright JSON reporter output (`--reporter=json`).
|
|
14
|
+
2. `$SESSION_DIR/logs/dev-server.log` (last-N-lines per crash).
|
|
15
|
+
3. Console + pageerror logs captured by fixtures.
|
|
16
|
+
4. Network responses matching 4xx/5xx on API paths.
|
|
17
|
+
5. Dev-server PID status at run end (exited unexpectedly?).
|
|
18
|
+
|
|
19
|
+
## Classification
|
|
20
|
+
|
|
21
|
+
| kind | trigger | severity |
|
|
22
|
+
| ------------------ | ------------------------------------------------------------ | -------- |
|
|
23
|
+
| `api-4xx` | unexpected 4xx on a test's expected-success request | high |
|
|
24
|
+
| `api-5xx` | any 5xx response on API path | critical |
|
|
25
|
+
| `server-crash` | 5xx AND `Content-Type: text/html` on API path | critical |
|
|
26
|
+
| `console-error` | `page.on('console')` level=error, de-duped by message stem | medium |
|
|
27
|
+
| `pageerror` | `page.on('pageerror')` raised | high |
|
|
28
|
+
| `rbac-bypass` | protected endpoint returned 200 to wrong role | critical |
|
|
29
|
+
| `auth-flow-broken` | redirect loop or 401 on happy-path login | critical |
|
|
30
|
+
| `dev-server-log` | unhandledRejection / uncaughtException in dev-server.log | high |
|
|
31
|
+
| `test-timeout` | test exceeded Playwright timeout | medium |
|
|
32
|
+
| `flake` | retried test passed on retry (suggests flake) | low |
|
|
33
|
+
|
|
34
|
+
## Output shape
|
|
35
|
+
|
|
36
|
+
`post-run-feedback.json`:
|
|
37
|
+
|
|
38
|
+
```jsonc
|
|
39
|
+
{
|
|
40
|
+
"session": "<abs path to session dir>",
|
|
41
|
+
"duration_s": 128,
|
|
42
|
+
"tests_total": 42,
|
|
43
|
+
"tests_passed": 39,
|
|
44
|
+
"tests_failed": 3,
|
|
45
|
+
"tests_flaky": 1,
|
|
46
|
+
"problems": [
|
|
47
|
+
{
|
|
48
|
+
"kind": "server-crash",
|
|
49
|
+
"where": "POST /api/users",
|
|
50
|
+
"count": 2,
|
|
51
|
+
"severity": "critical",
|
|
52
|
+
"sample_trace": "traces/users-create-1.zip",
|
|
53
|
+
"sample_log_tail": "TypeError: Cannot read properties of undefined (reading 'id')\n at ..."
|
|
54
|
+
}
|
|
55
|
+
],
|
|
56
|
+
"uncovered_carried_forward": {
|
|
57
|
+
"routes": 4, "http": 2, "trpc": 9, "actions": 1
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
`post-run-feedback.md` renders the same data as a human document with:
|
|
63
|
+
|
|
64
|
+
1. A one-line verdict ("3 tests failed, 2 critical problems, 16 uncovered surfaces — requires attention").
|
|
65
|
+
2. Top 5 problems by severity.
|
|
66
|
+
3. Link to the trace zip for each.
|
|
67
|
+
4. Next-step recommendations (add spec / fix handler / update RBAC middleware).
|
|
68
|
+
|
|
69
|
+
## De-duplication
|
|
70
|
+
|
|
71
|
+
- `console-error`: group by the first 120 chars of `text` (to collapse stack variation across retries).
|
|
72
|
+
- `api-4xx`/`api-5xx`: group by `(method, path, status)`.
|
|
73
|
+
- `pageerror`: group by the error message line.
|
|
74
|
+
|
|
75
|
+
## What NOT to include
|
|
76
|
+
|
|
77
|
+
- Full trace bytes — only file paths.
|
|
78
|
+
- Full stack traces for every occurrence — only a single `sample_log_tail` per group.
|
|
79
|
+
- Source code excerpts — that belongs in a finding's `source_quote`, not the feedback.
|
|
80
|
+
- Secret values (tokens, session cookies) — actively scrub any `Authorization:` header fragments out of `sample_log_tail`.
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# detect-stack.sh — identify framework, test runner, package manager, dev server,
|
|
3
|
+
# and environment files so downstream scripts + the skill prompt can adapt.
|
|
4
|
+
#
|
|
5
|
+
# Output: JSON object on stdout:
|
|
6
|
+
# {
|
|
7
|
+
# "framework": "next" | "remix" | "sveltekit" | "nuxt" | "astro" | "express" | "hono" | "fastify" | "unknown",
|
|
8
|
+
# "router_style": "app-router" | "pages-router" | "mixed" | "file-routes" | "n/a",
|
|
9
|
+
# "trpc": true | false,
|
|
10
|
+
# "trpc_version": "10" | "11" | "unknown" | null,
|
|
11
|
+
# "graphql": true | false,
|
|
12
|
+
# "orm": ["prisma" | "drizzle" | "mongoose" | "typeorm" | "kysely", ...],
|
|
13
|
+
# "auth": ["next-auth" | "authjs" | "clerk" | "auth0" | "lucia" | "better-auth" | "supabase" | "custom", ...],
|
|
14
|
+
# "test_runner": "playwright" | "cypress" | "vitest-browser" | "jest-puppeteer" | "none",
|
|
15
|
+
# "playwright_config": "playwright.config.ts" | null,
|
|
16
|
+
# "package_manager": "bun" | "pnpm" | "yarn" | "npm",
|
|
17
|
+
# "dev_command": "bun run dev" | "pnpm dev" | ...,
|
|
18
|
+
# "dev_port": <number> | null,
|
|
19
|
+
# "base_url": "http://localhost:<port>",
|
|
20
|
+
# "env_files": [".env.local", ".env.test", ...],
|
|
21
|
+
# "src_root": "src" | "app" | "." ,
|
|
22
|
+
# "has_middleware": true | false,
|
|
23
|
+
# "middleware_file": "middleware.ts" | "src/middleware.ts" | null
|
|
24
|
+
# }
|
|
25
|
+
#
|
|
26
|
+
# Best-effort. Absence of jq is fatal (downstream scripts require it).
|
|
27
|
+
set -euo pipefail
|
|
28
|
+
|
|
29
|
+
command -v jq >/dev/null || { echo "jq required" >&2; exit 2; }
|
|
30
|
+
|
|
31
|
+
read_pkg() {
|
|
32
|
+
[[ -f package.json ]] || { echo "null"; return; }
|
|
33
|
+
jq -r "${1:-.}" package.json 2>/dev/null || echo "null"
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
has_dep() {
|
|
37
|
+
local name="$1"
|
|
38
|
+
[[ -f package.json ]] || return 1
|
|
39
|
+
jq -e --arg n "$name" '(.dependencies[$n]? // .devDependencies[$n]? // empty)' package.json >/dev/null 2>&1
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
dep_version() {
|
|
43
|
+
local name="$1"
|
|
44
|
+
[[ -f package.json ]] || { echo ""; return; }
|
|
45
|
+
jq -r --arg n "$name" '.dependencies[$n]? // .devDependencies[$n]? // ""' package.json 2>/dev/null | sed 's/^[^0-9]*//'
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
# --- framework --------------------------------------------------------------
|
|
49
|
+
FRAMEWORK="unknown"
|
|
50
|
+
ROUTER_STYLE="n/a"
|
|
51
|
+
if has_dep next; then
|
|
52
|
+
FRAMEWORK="next"
|
|
53
|
+
if [[ -d app || -d src/app ]] && [[ -d pages || -d src/pages ]]; then ROUTER_STYLE="mixed"
|
|
54
|
+
elif [[ -d app || -d src/app ]]; then ROUTER_STYLE="app-router"
|
|
55
|
+
elif [[ -d pages || -d src/pages ]]; then ROUTER_STYLE="pages-router"
|
|
56
|
+
fi
|
|
57
|
+
elif has_dep @remix-run/react || has_dep @remix-run/node; then FRAMEWORK="remix"; ROUTER_STYLE="file-routes"
|
|
58
|
+
elif has_dep @sveltejs/kit; then FRAMEWORK="sveltekit"; ROUTER_STYLE="file-routes"
|
|
59
|
+
elif has_dep nuxt; then FRAMEWORK="nuxt"; ROUTER_STYLE="file-routes"
|
|
60
|
+
elif has_dep astro; then FRAMEWORK="astro"; ROUTER_STYLE="file-routes"
|
|
61
|
+
elif has_dep hono; then FRAMEWORK="hono"
|
|
62
|
+
elif has_dep fastify; then FRAMEWORK="fastify"
|
|
63
|
+
elif has_dep express; then FRAMEWORK="express"
|
|
64
|
+
fi
|
|
65
|
+
|
|
66
|
+
# --- trpc / graphql ---------------------------------------------------------
|
|
67
|
+
TRPC=false; TRPC_VER="null"
|
|
68
|
+
if has_dep @trpc/server; then
|
|
69
|
+
TRPC=true
|
|
70
|
+
v="$(dep_version @trpc/server)"
|
|
71
|
+
if [[ "$v" =~ ^10\. ]]; then TRPC_VER='"10"'
|
|
72
|
+
elif [[ "$v" =~ ^11\. ]]; then TRPC_VER='"11"'
|
|
73
|
+
else TRPC_VER='"unknown"'
|
|
74
|
+
fi
|
|
75
|
+
fi
|
|
76
|
+
GRAPHQL=false
|
|
77
|
+
if has_dep graphql || has_dep @apollo/server || has_dep @apollo/client || has_dep graphql-yoga; then
|
|
78
|
+
GRAPHQL=true
|
|
79
|
+
fi
|
|
80
|
+
|
|
81
|
+
# --- ORMs -------------------------------------------------------------------
|
|
82
|
+
orm_arr='[]'
|
|
83
|
+
for candidate in @prisma/client drizzle-orm mongoose typeorm kysely; do
|
|
84
|
+
if has_dep "$candidate"; then
|
|
85
|
+
short="${candidate##*/}"; short="${short%-orm}"
|
|
86
|
+
orm_arr="$(jq --arg o "$short" '. + [$o]' <<<"$orm_arr")"
|
|
87
|
+
fi
|
|
88
|
+
done
|
|
89
|
+
|
|
90
|
+
# --- auth providers ---------------------------------------------------------
|
|
91
|
+
auth_arr='[]'
|
|
92
|
+
add_auth() { auth_arr="$(jq --arg o "$1" '. + [$o]' <<<"$auth_arr")"; }
|
|
93
|
+
has_dep next-auth && add_auth next-auth
|
|
94
|
+
has_dep @auth/core && add_auth authjs
|
|
95
|
+
has_dep @clerk/nextjs && add_auth clerk
|
|
96
|
+
has_dep @clerk/clerk-sdk-node && add_auth clerk
|
|
97
|
+
has_dep @auth0/nextjs-auth0 && add_auth auth0
|
|
98
|
+
has_dep lucia && add_auth lucia
|
|
99
|
+
has_dep better-auth && add_auth better-auth
|
|
100
|
+
has_dep @supabase/supabase-js && add_auth supabase
|
|
101
|
+
|
|
102
|
+
# --- test runner ------------------------------------------------------------
|
|
103
|
+
TEST_RUNNER="none"
|
|
104
|
+
PLAYWRIGHT_CFG="null"
|
|
105
|
+
if has_dep @playwright/test; then
|
|
106
|
+
TEST_RUNNER="playwright"
|
|
107
|
+
for c in playwright.config.ts playwright.config.js playwright.config.mjs; do
|
|
108
|
+
[[ -f "$c" ]] && PLAYWRIGHT_CFG="\"$c\"" && break
|
|
109
|
+
done
|
|
110
|
+
elif has_dep cypress; then TEST_RUNNER="cypress"
|
|
111
|
+
elif has_dep @vitest/browser; then TEST_RUNNER="vitest-browser"
|
|
112
|
+
elif has_dep jest-puppeteer; then TEST_RUNNER="jest-puppeteer"
|
|
113
|
+
fi
|
|
114
|
+
|
|
115
|
+
# --- package manager + dev command -----------------------------------------
|
|
116
|
+
if [[ -f bun.lockb || -f bun.lock ]]; then PM="bun"; DEV_CMD="bun run dev"
|
|
117
|
+
elif [[ -f pnpm-lock.yaml ]]; then PM="pnpm"; DEV_CMD="pnpm dev"
|
|
118
|
+
elif [[ -f yarn.lock ]]; then PM="yarn"; DEV_CMD="yarn dev"
|
|
119
|
+
else PM="npm"; DEV_CMD="npm run dev"
|
|
120
|
+
fi
|
|
121
|
+
|
|
122
|
+
# Check `scripts.dev` exists; fall back to start script if not.
|
|
123
|
+
if [[ -f package.json ]]; then
|
|
124
|
+
HAS_DEV="$(jq -r '.scripts.dev // empty' package.json)"
|
|
125
|
+
[[ -z "$HAS_DEV" ]] && {
|
|
126
|
+
if jq -re '.scripts.start' package.json >/dev/null 2>&1; then
|
|
127
|
+
DEV_CMD="${DEV_CMD% dev} start"
|
|
128
|
+
fi
|
|
129
|
+
}
|
|
130
|
+
fi
|
|
131
|
+
|
|
132
|
+
# --- dev port ---------------------------------------------------------------
|
|
133
|
+
DEV_PORT="null"
|
|
134
|
+
# Common places: package.json scripts.dev "-p 3000" or "--port 3000", next.config, remix config
|
|
135
|
+
if [[ -f package.json ]]; then
|
|
136
|
+
if script="$(jq -r '.scripts.dev // ""' package.json)"; then
|
|
137
|
+
p="$(echo "$script" | grep -oE -- '--port[= ]+[0-9]+|-p[= ]+[0-9]+|PORT=[0-9]+' | grep -oE '[0-9]+' | head -1 || true)"
|
|
138
|
+
[[ -n "$p" ]] && DEV_PORT="$p"
|
|
139
|
+
fi
|
|
140
|
+
fi
|
|
141
|
+
# Fallback defaults per framework
|
|
142
|
+
if [[ "$DEV_PORT" == "null" ]]; then
|
|
143
|
+
case "$FRAMEWORK" in
|
|
144
|
+
next|express|hono|fastify) DEV_PORT=3000 ;;
|
|
145
|
+
remix) DEV_PORT=3000 ;;
|
|
146
|
+
sveltekit) DEV_PORT=5173 ;;
|
|
147
|
+
nuxt) DEV_PORT=3000 ;;
|
|
148
|
+
astro) DEV_PORT=4321 ;;
|
|
149
|
+
esac
|
|
150
|
+
fi
|
|
151
|
+
BASE_URL="http://localhost:${DEV_PORT:-3000}"
|
|
152
|
+
|
|
153
|
+
# --- env files --------------------------------------------------------------
|
|
154
|
+
env_arr='[]'
|
|
155
|
+
for f in .env .env.local .env.development .env.development.local .env.test .env.test.local; do
|
|
156
|
+
[[ -f "$f" ]] && env_arr="$(jq --arg n "$f" '. + [$n]' <<<"$env_arr")"
|
|
157
|
+
done
|
|
158
|
+
|
|
159
|
+
# --- src root + middleware --------------------------------------------------
|
|
160
|
+
SRC_ROOT="."
|
|
161
|
+
[[ -d src ]] && SRC_ROOT="src"
|
|
162
|
+
[[ -d app && ! -d src/app ]] && SRC_ROOT="."
|
|
163
|
+
HAS_MW=false; MW_FILE="null"
|
|
164
|
+
for f in middleware.ts middleware.js src/middleware.ts src/middleware.js; do
|
|
165
|
+
if [[ -f "$f" ]]; then HAS_MW=true; MW_FILE="\"$f\""; break; fi
|
|
166
|
+
done
|
|
167
|
+
|
|
168
|
+
# --- assemble ---------------------------------------------------------------
|
|
169
|
+
jq -n \
|
|
170
|
+
--arg framework "$FRAMEWORK" \
|
|
171
|
+
--arg router_style "$ROUTER_STYLE" \
|
|
172
|
+
--argjson trpc "$TRPC" \
|
|
173
|
+
--argjson trpc_version "$TRPC_VER" \
|
|
174
|
+
--argjson graphql "$GRAPHQL" \
|
|
175
|
+
--argjson orm "$orm_arr" \
|
|
176
|
+
--argjson auth "$auth_arr" \
|
|
177
|
+
--arg test_runner "$TEST_RUNNER" \
|
|
178
|
+
--argjson playwright_config "$PLAYWRIGHT_CFG" \
|
|
179
|
+
--arg package_manager "$PM" \
|
|
180
|
+
--arg dev_command "$DEV_CMD" \
|
|
181
|
+
--argjson dev_port "${DEV_PORT:-null}" \
|
|
182
|
+
--arg base_url "$BASE_URL" \
|
|
183
|
+
--argjson env_files "$env_arr" \
|
|
184
|
+
--arg src_root "$SRC_ROOT" \
|
|
185
|
+
--argjson has_middleware "$HAS_MW" \
|
|
186
|
+
--argjson middleware_file "$MW_FILE" \
|
|
187
|
+
'{
|
|
188
|
+
framework: $framework,
|
|
189
|
+
router_style: $router_style,
|
|
190
|
+
trpc: $trpc,
|
|
191
|
+
trpc_version: $trpc_version,
|
|
192
|
+
graphql: $graphql,
|
|
193
|
+
orm: $orm,
|
|
194
|
+
auth: $auth,
|
|
195
|
+
test_runner: $test_runner,
|
|
196
|
+
playwright_config: $playwright_config,
|
|
197
|
+
package_manager: $package_manager,
|
|
198
|
+
dev_command: $dev_command,
|
|
199
|
+
dev_port: $dev_port,
|
|
200
|
+
base_url: $base_url,
|
|
201
|
+
env_files: $env_files,
|
|
202
|
+
src_root: $src_root,
|
|
203
|
+
has_middleware: $has_middleware,
|
|
204
|
+
middleware_file: $middleware_file
|
|
205
|
+
}'
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# detect-uncovered.sh — intersect (current branch diff) × (existing tests) and
|
|
3
|
+
# emit every NEW or CHANGED surface that has no test coverage.
|
|
4
|
+
#
|
|
5
|
+
# Inputs (all produced earlier in the pipeline):
|
|
6
|
+
# $1 routes.json (from discover-routes.sh)
|
|
7
|
+
# $2 api-surface.json (from discover-api-surface.sh)
|
|
8
|
+
# $3 existing-tests.json (from inventory-existing-tests.sh)
|
|
9
|
+
# $4 base-ref (default: origin/main)
|
|
10
|
+
#
|
|
11
|
+
# Output: JSON object on stdout:
|
|
12
|
+
# {
|
|
13
|
+
# "base_ref": "origin/main",
|
|
14
|
+
# "diff_files": ["src/app/users/page.tsx", ...],
|
|
15
|
+
# "changed_routes": [{route from routes.json}, ...],
|
|
16
|
+
# "changed_http": [{route from api-surface.http_routes}, ...],
|
|
17
|
+
# "changed_trpc": [{proc from api-surface.trpc_procedures}, ...],
|
|
18
|
+
# "changed_actions": [{action from api-surface.server_actions}, ...],
|
|
19
|
+
# "uncovered_routes": [...], // changed AND no test references its URL or file
|
|
20
|
+
# "uncovered_http": [...],
|
|
21
|
+
# "uncovered_trpc": [...],
|
|
22
|
+
# "uncovered_actions": [...]
|
|
23
|
+
# }
|
|
24
|
+
set -euo pipefail
|
|
25
|
+
|
|
26
|
+
command -v jq >/dev/null || { echo "jq required" >&2; exit 2; }
|
|
27
|
+
|
|
28
|
+
ROUTES_JSON="${1:?usage: detect-uncovered.sh routes.json api-surface.json existing-tests.json [base-ref]}"
|
|
29
|
+
API_JSON="${2:?api-surface.json required}"
|
|
30
|
+
TESTS_JSON="${3:?existing-tests.json required}"
|
|
31
|
+
BASE_REF="${4:-origin/main}"
|
|
32
|
+
|
|
33
|
+
for f in "$ROUTES_JSON" "$API_JSON" "$TESTS_JSON"; do
|
|
34
|
+
[[ -f "$f" ]] || { echo "missing: $f" >&2; exit 2; }
|
|
35
|
+
done
|
|
36
|
+
|
|
37
|
+
# --- 1. branch diff ---------------------------------------------------------
|
|
38
|
+
DIFF_FILES='[]'
|
|
39
|
+
if git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
|
|
40
|
+
# If the base ref doesn't exist locally, fall back to HEAD~10..HEAD
|
|
41
|
+
if git rev-parse --verify "$BASE_REF" >/dev/null 2>&1; then
|
|
42
|
+
MERGE_BASE="$(git merge-base "$BASE_REF" HEAD 2>/dev/null || echo "$BASE_REF")"
|
|
43
|
+
else
|
|
44
|
+
MERGE_BASE="$(git rev-parse HEAD~10 2>/dev/null || git rev-parse HEAD)"
|
|
45
|
+
fi
|
|
46
|
+
while IFS= read -r f; do
|
|
47
|
+
[[ -z "$f" ]] && continue
|
|
48
|
+
DIFF_FILES="$(jq --arg f "$f" '. + [$f]' <<<"$DIFF_FILES")"
|
|
49
|
+
done < <(git diff --name-only "$MERGE_BASE"...HEAD 2>/dev/null; git diff --name-only --cached 2>/dev/null; git diff --name-only 2>/dev/null)
|
|
50
|
+
DIFF_FILES="$(jq 'unique' <<<"$DIFF_FILES")"
|
|
51
|
+
fi
|
|
52
|
+
|
|
53
|
+
# --- 2. filter each inventory by membership in diff -------------------------
|
|
54
|
+
# Helper: pass stdin JSON array + filter over .file field.
|
|
55
|
+
filter_by_file() {
|
|
56
|
+
jq --argjson diff "$DIFF_FILES" '[.[] | select(.file as $f | $diff | any(. == $f))]'
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
CHANGED_ROUTES="$(jq '.' "$ROUTES_JSON" | filter_by_file)"
|
|
60
|
+
CHANGED_HTTP="$(jq '.http_routes' "$API_JSON" | filter_by_file)"
|
|
61
|
+
CHANGED_TRPC="$(jq '.trpc_procedures' "$API_JSON" | filter_by_file)"
|
|
62
|
+
CHANGED_ACTIONS="$(jq '.server_actions' "$API_JSON" | filter_by_file)"
|
|
63
|
+
|
|
64
|
+
# --- 3. load test corpus contents for string-search coverage ---------------
|
|
65
|
+
# Build a single concatenated lowercase blob of all test-file contents;
|
|
66
|
+
# a route is "covered" if its URL path OR its source file path appears in any test.
|
|
67
|
+
TEST_FILES="$(jq -r '.test_files[]?.file // empty' "$TESTS_JSON")"
|
|
68
|
+
TEST_BLOB="/tmp/e2e-audit-testblob-$$.txt"
|
|
69
|
+
: >"$TEST_BLOB"
|
|
70
|
+
while IFS= read -r tf; do
|
|
71
|
+
[[ -z "$tf" ]] && continue
|
|
72
|
+
[[ -f "$tf" ]] && tr '[:upper:]' '[:lower:]' <"$tf" >>"$TEST_BLOB" || true
|
|
73
|
+
done <<<"$TEST_FILES"
|
|
74
|
+
|
|
75
|
+
is_covered() {
|
|
76
|
+
# args: needle1 needle2 ...
|
|
77
|
+
local n
|
|
78
|
+
for n in "$@"; do
|
|
79
|
+
[[ -z "$n" ]] && continue
|
|
80
|
+
# Strip dynamic segments to make a loose match: /users/[id] → /users/
|
|
81
|
+
loose="$(echo "$n" | sed -E 's#\[[^]]+\]#[^/]+#g' | tr '[:upper:]' '[:lower:]')"
|
|
82
|
+
# Plain literal check first
|
|
83
|
+
if grep -qF "$(echo "$n" | tr '[:upper:]' '[:lower:]')" "$TEST_BLOB" 2>/dev/null; then return 0; fi
|
|
84
|
+
# Regex-ish check with dynamic wildcards
|
|
85
|
+
if grep -Eq "$loose" "$TEST_BLOB" 2>/dev/null; then return 0; fi
|
|
86
|
+
done
|
|
87
|
+
return 1
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
# --- 4. compute uncovered for each category --------------------------------
|
|
91
|
+
compute_uncovered() {
|
|
92
|
+
local changed_json="$1"
|
|
93
|
+
local name_field="$2" # which field(s) to probe as test needles
|
|
94
|
+
local secondary_field="$3" # optional (e.g., path as well as file)
|
|
95
|
+
local out='[]'
|
|
96
|
+
while IFS= read -r item; do
|
|
97
|
+
[[ -z "$item" ]] && continue
|
|
98
|
+
primary="$(jq -r --arg k "$name_field" '.[$k] // ""' <<<"$item")"
|
|
99
|
+
secondary="$(jq -r --arg k "$secondary_field" '.[$k] // ""' <<<"$item")"
|
|
100
|
+
if ! is_covered "$primary" "$secondary"; then
|
|
101
|
+
out="$(jq --argjson o "$item" '. + [$o]' <<<"$out")"
|
|
102
|
+
fi
|
|
103
|
+
done < <(jq -c '.[]' <<<"$changed_json")
|
|
104
|
+
echo "$out"
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
UNC_ROUTES="$(compute_uncovered "$CHANGED_ROUTES" "path" "file")"
|
|
108
|
+
UNC_HTTP="$(compute_uncovered "$CHANGED_HTTP" "path" "file")"
|
|
109
|
+
UNC_TRPC="$(compute_uncovered "$CHANGED_TRPC" "name" "file")"
|
|
110
|
+
UNC_ACTIONS="$(compute_uncovered "$CHANGED_ACTIONS" "name" "file")"
|
|
111
|
+
|
|
112
|
+
rm -f "$TEST_BLOB"
|
|
113
|
+
|
|
114
|
+
# --- 5. assemble ------------------------------------------------------------
|
|
115
|
+
jq -n \
|
|
116
|
+
--arg base_ref "$BASE_REF" \
|
|
117
|
+
--argjson diff_files "$DIFF_FILES" \
|
|
118
|
+
--argjson changed_routes "$CHANGED_ROUTES" \
|
|
119
|
+
--argjson changed_http "$CHANGED_HTTP" \
|
|
120
|
+
--argjson changed_trpc "$CHANGED_TRPC" \
|
|
121
|
+
--argjson changed_actions "$CHANGED_ACTIONS" \
|
|
122
|
+
--argjson uncovered_routes "$UNC_ROUTES" \
|
|
123
|
+
--argjson uncovered_http "$UNC_HTTP" \
|
|
124
|
+
--argjson uncovered_trpc "$UNC_TRPC" \
|
|
125
|
+
--argjson uncovered_actions "$UNC_ACTIONS" \
|
|
126
|
+
'{
|
|
127
|
+
base_ref: $base_ref,
|
|
128
|
+
diff_files: $diff_files,
|
|
129
|
+
changed_routes: $changed_routes,
|
|
130
|
+
changed_http: $changed_http,
|
|
131
|
+
changed_trpc: $changed_trpc,
|
|
132
|
+
changed_actions: $changed_actions,
|
|
133
|
+
uncovered_routes: $uncovered_routes,
|
|
134
|
+
uncovered_http: $uncovered_http,
|
|
135
|
+
uncovered_trpc: $uncovered_trpc,
|
|
136
|
+
uncovered_actions: $uncovered_actions
|
|
137
|
+
}'
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# discover-api-surface.sh — enumerate every network-facing surface a browser
|
|
3
|
+
# or third-party client can hit: REST/HTTP route handlers, tRPC procedures,
|
|
4
|
+
# GraphQL resolvers, and server actions.
|
|
5
|
+
#
|
|
6
|
+
# Why: Playwright deduction misses endpoints that aren't touched by the
|
|
7
|
+
# flow you tested. A procedure that's only called from Settings > Advanced
|
|
8
|
+
# > Danger Zone, or an API route called only by a webhook, won't show up
|
|
9
|
+
# in a traffic log. This script reads the SOURCE and emits the full
|
|
10
|
+
# contract so the skill can report which endpoints HAVE no tests.
|
|
11
|
+
#
|
|
12
|
+
# Output: JSON object on stdout:
|
|
13
|
+
# {
|
|
14
|
+
# "http_routes": [{ "method": "POST", "path": "/api/users", "file": "...", "auth": "protected"|"public"|"unknown", "zod_schema_found": true|false }],
|
|
15
|
+
# "trpc_procedures": [{ "name": "users.create", "kind": "query"|"mutation"|"subscription", "file": "...", "auth": "protected"|"public"|"unknown", "input_schema_found": true|false }],
|
|
16
|
+
# "graphql": { "found": true|false, "files": [...] },
|
|
17
|
+
# "server_actions": [{ "name": "createUser", "file": "...", "directive": "'use server'"|"file-scoped" }],
|
|
18
|
+
# "middleware": { "file": "middleware.ts"|null, "has_auth_guard": true|false, "matches_public_patterns": [...] }
|
|
19
|
+
# }
|
|
20
|
+
#
|
|
21
|
+
# No AST. False positives are OK — skill cross-references this with tests.
|
|
22
|
+
set -euo pipefail
|
|
23
|
+
|
|
24
|
+
command -v jq >/dev/null || { echo "jq required" >&2; exit 2; }
|
|
25
|
+
|
|
26
|
+
HTTP='[]'
|
|
27
|
+
TRPC='[]'
|
|
28
|
+
GQL_FOUND=false
|
|
29
|
+
GQL_FILES='[]'
|
|
30
|
+
ACTIONS='[]'
|
|
31
|
+
MW_FILE="null"
|
|
32
|
+
MW_AUTH=false
|
|
33
|
+
MW_PUBLIC='[]'
|
|
34
|
+
|
|
35
|
+
# --- 1. HTTP ROUTE HANDLERS -------------------------------------------------
|
|
36
|
+
# Next.js app router: app/**/route.{ts,tsx,js} with exported METHOD handlers.
|
|
37
|
+
# Next.js pages router: pages/api/**/*.{ts,tsx,js} with default export.
|
|
38
|
+
# Remix: resource routes (files in app/routes with a loader/action but no default).
|
|
39
|
+
# Express/Hono/Fastify: app.get/post/put/delete/patch etc.
|
|
40
|
+
METHODS='GET|POST|PUT|PATCH|DELETE|HEAD|OPTIONS'
|
|
41
|
+
|
|
42
|
+
# Next.js app router route.ts
|
|
43
|
+
while IFS= read -r f; do
|
|
44
|
+
[[ -z "$f" ]] && continue
|
|
45
|
+
# URL path: strip app/src/app prefix and /route.* suffix; keep [param].
|
|
46
|
+
p="$f"; p="${p#src/app/}"; p="${p#app/}"; p="${p%/route.ts}"; p="${p%/route.tsx}"; p="${p%/route.js}"
|
|
47
|
+
# strip route-group segments
|
|
48
|
+
p="$(echo "$p" | awk -F/ '{out=""; for(i=1;i<=NF;i++){if($i~/^\(.*\)$/)continue; out=out(out==""?"":"/")$i} print out}')"
|
|
49
|
+
url="/$p"
|
|
50
|
+
# which methods are exported?
|
|
51
|
+
for m in GET POST PUT PATCH DELETE HEAD OPTIONS; do
|
|
52
|
+
if grep -Eq "export[[:space:]]+(async[[:space:]]+)?function[[:space:]]+$m\\b|export[[:space:]]+const[[:space:]]+$m[[:space:]]*=" "$f" 2>/dev/null; then
|
|
53
|
+
# zod schema mentioned in file?
|
|
54
|
+
zod=false
|
|
55
|
+
grep -Eq "z\\.object\\(|safeParse|\\.parse\\(|zodResolver" "$f" 2>/dev/null && zod=true
|
|
56
|
+
# auth hint: look for session/auth/protected keywords
|
|
57
|
+
auth="unknown"
|
|
58
|
+
if grep -Eq "getServerSession|auth\\(\\)|requireAuth|requireSession|getSession|currentUser\\(|userId" "$f" 2>/dev/null; then auth="protected"
|
|
59
|
+
elif grep -Eq "// public|@public" "$f" 2>/dev/null; then auth="public"
|
|
60
|
+
fi
|
|
61
|
+
HTTP="$(jq --arg m "$m" --arg p "$url" --arg f "$f" --arg a "$auth" --argjson z "$zod" \
|
|
62
|
+
'. + [{method:$m, path:$p, file:$f, auth:$a, zod_schema_found:$z}]' <<<"$HTTP")"
|
|
63
|
+
fi
|
|
64
|
+
done
|
|
65
|
+
done < <(find app src/app -type f \( -name 'route.ts' -o -name 'route.tsx' -o -name 'route.js' \) 2>/dev/null)
|
|
66
|
+
|
|
67
|
+
# Next.js pages/api
|
|
68
|
+
while IFS= read -r f; do
|
|
69
|
+
[[ -z "$f" ]] && continue
|
|
70
|
+
p="$f"; p="${p#src/pages/}"; p="${p#pages/}"
|
|
71
|
+
p="${p%.ts}"; p="${p%.tsx}"; p="${p%.js}"; p="${p%.jsx}"
|
|
72
|
+
# index.ts -> directory path
|
|
73
|
+
p="${p%/index}"
|
|
74
|
+
url="/$p"
|
|
75
|
+
auth="unknown"
|
|
76
|
+
if grep -Eq "getServerSession|requireAuth|getSession|authenticate\\(|req\\.session" "$f" 2>/dev/null; then auth="protected"; fi
|
|
77
|
+
zod=false
|
|
78
|
+
grep -Eq "z\\.object\\(|safeParse|\\.parse\\(" "$f" 2>/dev/null && zod=true
|
|
79
|
+
HTTP="$(jq --arg p "$url" --arg f "$f" --arg a "$auth" --argjson z "$zod" \
|
|
80
|
+
'. + [{method:"ANY", path:$p, file:$f, auth:$a, zod_schema_found:$z}]' <<<"$HTTP")"
|
|
81
|
+
done < <(find pages/api src/pages/api -type f \( -name '*.ts' -o -name '*.tsx' -o -name '*.js' \) 2>/dev/null)
|
|
82
|
+
|
|
83
|
+
# Express / Hono / Fastify style (app.get/post/...)
|
|
84
|
+
while IFS= read -r hit; do
|
|
85
|
+
[[ -z "$hit" ]] && continue
|
|
86
|
+
f="${hit%%:*}"; rest="${hit#*:}"
|
|
87
|
+
line="${rest%%:*}"
|
|
88
|
+
match="${rest#*:}"
|
|
89
|
+
m="$(echo "$match" | grep -oE "\\.(get|post|put|patch|delete|head|options)\\(" | head -1 | tr -d '.(' | tr '[:lower:]' '[:upper:]')"
|
|
90
|
+
url="$(echo "$match" | grep -oE "[\"'][^\"']+[\"']" | head -1 | tr -d "\"'")"
|
|
91
|
+
[[ -z "$m" || -z "$url" ]] && continue
|
|
92
|
+
[[ "$url" =~ ^/ ]] || continue
|
|
93
|
+
HTTP="$(jq --arg m "$m" --arg p "$url" --arg f "$f" \
|
|
94
|
+
'. + [{method:$m, path:$p, file:$f, auth:"unknown", zod_schema_found:false}]' <<<"$HTTP")"
|
|
95
|
+
done < <(grep -rEHn --include='*.ts' --include='*.tsx' --include='*.js' --include='*.mjs' \
|
|
96
|
+
"\\.(get|post|put|patch|delete|head|options)\\([\"'][^\"']+[\"']" \
|
|
97
|
+
src server app 2>/dev/null | head -500 || true)
|
|
98
|
+
|
|
99
|
+
# --- 2. tRPC PROCEDURES -----------------------------------------------------
|
|
100
|
+
# Look for files that import `router` or `procedure` and enumerate calls:
|
|
101
|
+
# foo: publicProcedure.input(...).query(...)
|
|
102
|
+
# bar: protectedProcedure.mutation(...)
|
|
103
|
+
#
|
|
104
|
+
# We don't build a router namespace tree; we emit procedure names as seen.
|
|
105
|
+
# Nested router paths can be computed by the skill from the file layout.
|
|
106
|
+
TRPC_FILES="$(grep -rl -E "createTRPCRouter|publicProcedure|protectedProcedure|router\\(\\{" \
|
|
107
|
+
--include='*.ts' --include='*.tsx' \
|
|
108
|
+
src server 2>/dev/null || true)"
|
|
109
|
+
|
|
110
|
+
while IFS= read -r f; do
|
|
111
|
+
[[ -z "$f" ]] && continue
|
|
112
|
+
# For each procedure definition in this file, capture name + kind + auth.
|
|
113
|
+
# Pattern matches: <name>: (public|protected|<prefix>)Procedure[.input(...)]?.<query|mutation|subscription>
|
|
114
|
+
awk -v file="$f" '
|
|
115
|
+
/(\w+Procedure)/ {
|
|
116
|
+
# join multi-line chains until we hit one of the terminators.
|
|
117
|
+
chain = chain " " $0
|
|
118
|
+
}
|
|
119
|
+
/\.query\s*\(|\.mutation\s*\(|\.subscription\s*\(/ {
|
|
120
|
+
chain = chain " " $0
|
|
121
|
+
# extract name (the last "foo:" before the chain start)
|
|
122
|
+
if (match(chain, /([A-Za-z_][A-Za-z0-9_]*)\s*:\s*[A-Za-z_][A-Za-z0-9_]*Procedure/, m)) {
|
|
123
|
+
name = m[1]
|
|
124
|
+
} else name = "_"
|
|
125
|
+
# extract kind
|
|
126
|
+
if (chain ~ /\.query\s*\(/) kind = "query"
|
|
127
|
+
else if (chain ~ /\.mutation\s*\(/) kind = "mutation"
|
|
128
|
+
else kind = "subscription"
|
|
129
|
+
# extract auth (protected vs public vs custom)
|
|
130
|
+
if (chain ~ /protectedProcedure/) auth = "protected"
|
|
131
|
+
else if (chain ~ /publicProcedure/) auth = "public"
|
|
132
|
+
else auth = "unknown"
|
|
133
|
+
# input schema?
|
|
134
|
+
input_found = (chain ~ /\.input\s*\(/) ? "true" : "false"
|
|
135
|
+
printf "%s|%s|%s|%s|%s\n", name, kind, file, auth, input_found
|
|
136
|
+
chain = ""
|
|
137
|
+
}
|
|
138
|
+
' "$f" 2>/dev/null || true
|
|
139
|
+
done <<<"$TRPC_FILES" | while IFS='|' read -r name kind file auth input_found; do
|
|
140
|
+
[[ -z "$name" ]] && continue
|
|
141
|
+
jq --arg n "$name" --arg k "$kind" --arg f "$file" --arg a "$auth" --argjson i "$input_found" \
|
|
142
|
+
'. + [{name:$n, kind:$k, file:$f, auth:$a, input_schema_found:$i}]'
|
|
143
|
+
done > /tmp/e2e-audit-trpc-$$.jsonl || true
|
|
144
|
+
|
|
145
|
+
if [[ -s /tmp/e2e-audit-trpc-$$.jsonl ]]; then
|
|
146
|
+
# The above pipeline created one growing JSON object per line via jq --arg,
|
|
147
|
+
# but each jq invocation started from scratch. Join properly:
|
|
148
|
+
TRPC='[]'
|
|
149
|
+
while IFS= read -r _; do :; done < /tmp/e2e-audit-trpc-$$.jsonl
|
|
150
|
+
fi
|
|
151
|
+
rm -f /tmp/e2e-audit-trpc-$$.jsonl
|
|
152
|
+
|
|
153
|
+
# Simpler, correct approach for tRPC — rebuild as JSON array in a single pass.
|
|
154
|
+
TRPC='[]'
|
|
155
|
+
while IFS= read -r f; do
|
|
156
|
+
[[ -z "$f" ]] && continue
|
|
157
|
+
while IFS='|' read -r name kind file auth input_found; do
|
|
158
|
+
[[ -z "$name" ]] && continue
|
|
159
|
+
TRPC="$(jq --arg n "$name" --arg k "$kind" --arg fi "$file" --arg a "$auth" --argjson i "$input_found" \
|
|
160
|
+
'. + [{name:$n, kind:$k, file:$fi, auth:$a, input_schema_found:$i}]' <<<"$TRPC")"
|
|
161
|
+
done < <(
|
|
162
|
+
awk -v file="$f" '
|
|
163
|
+
/(\w+Procedure)/ { chain = chain " " $0 }
|
|
164
|
+
/\.query\s*\(|\.mutation\s*\(|\.subscription\s*\(/ {
|
|
165
|
+
chain = chain " " $0
|
|
166
|
+
name = "_"
|
|
167
|
+
if (match(chain, /([A-Za-z_][A-Za-z0-9_]*)[[:space:]]*:[[:space:]]*[A-Za-z_][A-Za-z0-9_]*Procedure/, m)) name = m[1]
|
|
168
|
+
if (chain ~ /\.query[[:space:]]*\(/) kind = "query"
|
|
169
|
+
else if (chain ~ /\.mutation[[:space:]]*\(/) kind = "mutation"
|
|
170
|
+
else kind = "subscription"
|
|
171
|
+
if (chain ~ /protectedProcedure/) auth = "protected"
|
|
172
|
+
else if (chain ~ /publicProcedure/) auth = "public"
|
|
173
|
+
else auth = "unknown"
|
|
174
|
+
input_found = (chain ~ /\.input[[:space:]]*\(/) ? "true" : "false"
|
|
175
|
+
printf "%s|%s|%s|%s|%s\n", name, kind, file, auth, input_found
|
|
176
|
+
chain = ""
|
|
177
|
+
}
|
|
178
|
+
' "$f" 2>/dev/null
|
|
179
|
+
)
|
|
180
|
+
done <<<"$TRPC_FILES"
|
|
181
|
+
|
|
182
|
+
# --- 3. GRAPHQL -------------------------------------------------------------
|
|
183
|
+
GQL_HITS="$(grep -rl -E 'typeDefs|buildSchema|gql`|@ObjectType|@Resolver|createSchema' \
|
|
184
|
+
--include='*.ts' --include='*.tsx' --include='*.graphql' --include='*.gql' \
|
|
185
|
+
src server schema 2>/dev/null | head -50 || true)"
|
|
186
|
+
if [[ -n "$GQL_HITS" ]]; then
|
|
187
|
+
GQL_FOUND=true
|
|
188
|
+
GQL_FILES="$(echo "$GQL_HITS" | jq -Rn '[inputs]')"
|
|
189
|
+
fi
|
|
190
|
+
|
|
191
|
+
# --- 4. SERVER ACTIONS (Next.js) --------------------------------------------
|
|
192
|
+
# Two forms: 'use server' directive at top of file, or 'use server' inline in a
|
|
193
|
+
# function body. We emit both kinds.
|
|
194
|
+
while IFS= read -r f; do
|
|
195
|
+
[[ -z "$f" ]] && continue
|
|
196
|
+
if grep -Eq "^['\"]use server['\"]" "$f" 2>/dev/null; then
|
|
197
|
+
# File-scoped: every exported async function is an action.
|
|
198
|
+
while IFS= read -r name; do
|
|
199
|
+
[[ -z "$name" ]] && continue
|
|
200
|
+
ACTIONS="$(jq --arg n "$name" --arg fi "$f" '. + [{name:$n, file:$fi, directive:"file-scoped"}]' <<<"$ACTIONS")"
|
|
201
|
+
done < <(grep -Eo "export[[:space:]]+(async[[:space:]]+)?function[[:space:]]+[A-Za-z_][A-Za-z0-9_]*" "$f" \
|
|
202
|
+
| awk '{print $NF}' | sed 's/^function[[:space:]]*//')
|
|
203
|
+
fi
|
|
204
|
+
# Inline: async function () { 'use server'; ... }
|
|
205
|
+
while IFS= read -r hit; do
|
|
206
|
+
name="$(echo "$hit" | grep -oE "function[[:space:]]+[A-Za-z_][A-Za-z0-9_]*" | awk '{print $2}')"
|
|
207
|
+
[[ -z "$name" ]] && continue
|
|
208
|
+
ACTIONS="$(jq --arg n "$name" --arg fi "$f" '. + [{name:$n, file:$fi, directive:"'\''use server'\''"}]' <<<"$ACTIONS")"
|
|
209
|
+
done < <(grep -B1 -E "^[[:space:]]*['\"]use server['\"]" "$f" 2>/dev/null | grep -E "function[[:space:]]+[A-Za-z_]" || true)
|
|
210
|
+
done < <(find src app server -type f \( -name '*.ts' -o -name '*.tsx' \) 2>/dev/null)
|
|
211
|
+
|
|
212
|
+
# --- 5. MIDDLEWARE (Next.js) -----------------------------------------------
|
|
213
|
+
for cand in middleware.ts middleware.js src/middleware.ts src/middleware.js; do
|
|
214
|
+
if [[ -f "$cand" ]]; then
|
|
215
|
+
MW_FILE="\"$cand\""
|
|
216
|
+
grep -Eq "auth|getToken|getSession|currentUser|getServerSession|redirect\\(.*sign[_-]?in" "$cand" 2>/dev/null && MW_AUTH=true
|
|
217
|
+
# Extract matcher patterns that look like public paths.
|
|
218
|
+
while IFS= read -r pat; do
|
|
219
|
+
[[ -z "$pat" ]] && continue
|
|
220
|
+
MW_PUBLIC="$(jq --arg p "$pat" '. + [$p]' <<<"$MW_PUBLIC")"
|
|
221
|
+
done < <(grep -oE "['\"]/[^'\"]*['\"]" "$cand" | tr -d "'\"" | sort -u | head -40)
|
|
222
|
+
break
|
|
223
|
+
fi
|
|
224
|
+
done
|
|
225
|
+
|
|
226
|
+
# --- ASSEMBLE ---------------------------------------------------------------
|
|
227
|
+
jq -n \
|
|
228
|
+
--argjson http "$HTTP" \
|
|
229
|
+
--argjson trpc "$TRPC" \
|
|
230
|
+
--argjson graphql_found "$GQL_FOUND" \
|
|
231
|
+
--argjson graphql_files "$GQL_FILES" \
|
|
232
|
+
--argjson actions "$ACTIONS" \
|
|
233
|
+
--argjson mw_file "$MW_FILE" \
|
|
234
|
+
--argjson mw_auth "$MW_AUTH" \
|
|
235
|
+
--argjson mw_public "$MW_PUBLIC" \
|
|
236
|
+
'{
|
|
237
|
+
http_routes: ($http | unique_by([.method, .path, .file])),
|
|
238
|
+
trpc_procedures: ($trpc | unique_by([.name, .kind, .file])),
|
|
239
|
+
graphql: { found: $graphql_found, files: $graphql_files },
|
|
240
|
+
server_actions: ($actions | unique_by([.name, .file])),
|
|
241
|
+
middleware: { file: $mw_file, has_auth_guard: $mw_auth, matches_public_patterns: $mw_public }
|
|
242
|
+
}'
|