@soulbatical/tetra-dev-toolkit 1.17.2 → 1.17.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Tetra Check Peers — Validate peer dependency compatibility across consumer projects
|
|
5
|
+
*
|
|
6
|
+
* Scans all known consumer projects and checks if their installed versions
|
|
7
|
+
* are compatible with tetra packages' peerDependencies.
|
|
8
|
+
*
|
|
9
|
+
* Usage:
|
|
10
|
+
* tetra-check-peers # Check all consumers
|
|
11
|
+
* tetra-check-peers --fix # Show npm commands to fix mismatches
|
|
12
|
+
* tetra-check-peers --strict # Fail on any mismatch (for CI/prepublish)
|
|
13
|
+
* tetra-check-peers --json # JSON output
|
|
14
|
+
*
|
|
15
|
+
* Add to prepublishOnly to catch breaking peer dep changes before publish.
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
import { readFileSync, existsSync } from 'fs'
|
|
19
|
+
import { join, basename, dirname } from 'path'
|
|
20
|
+
import { execSync } from 'child_process'
|
|
21
|
+
|
|
22
|
+
// ─── Config ──────────────────────────────────────────────
|
|
23
|
+
|
|
24
|
+
const PROJECTS_ROOT = join(process.env.HOME || '~', 'projecten')
|
|
25
|
+
const TETRA_ROOT = join(PROJECTS_ROOT, 'tetra', 'packages')
|
|
26
|
+
|
|
27
|
+
// Tetra packages that have peerDependencies
|
|
28
|
+
const TETRA_PACKAGES = ['core', 'ui', 'dev-toolkit', 'schemas']
|
|
29
|
+
|
|
30
|
+
// ─── Helpers ─────────────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
function readJson(path) {
|
|
33
|
+
try {
|
|
34
|
+
return JSON.parse(readFileSync(path, 'utf-8'))
|
|
35
|
+
} catch {
|
|
36
|
+
return null
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
function satisfiesRange(installed, range) {
|
|
41
|
+
// Simple semver range check without external deps
|
|
42
|
+
// Handles: exact "2.93.3", caret "^2.93.3", tilde "~2.93.3", star "*", >= ">= 8.0.0"
|
|
43
|
+
if (!installed || !range) return false
|
|
44
|
+
if (range === '*') return true
|
|
45
|
+
|
|
46
|
+
// Clean versions: remove leading ^ ~ >= <= > < =
|
|
47
|
+
const cleanVersion = (v) => v.replace(/^[\^~>=<\s]+/, '').trim()
|
|
48
|
+
const parseVersion = (v) => {
|
|
49
|
+
const cleaned = cleanVersion(v)
|
|
50
|
+
const parts = cleaned.split('.').map(Number)
|
|
51
|
+
return { major: parts[0] || 0, minor: parts[1] || 0, patch: parts[2] || 0 }
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
const inst = parseVersion(installed)
|
|
55
|
+
const req = parseVersion(range)
|
|
56
|
+
|
|
57
|
+
if (range.startsWith('>=')) {
|
|
58
|
+
// >= check
|
|
59
|
+
if (inst.major > req.major) return true
|
|
60
|
+
if (inst.major === req.major && inst.minor > req.minor) return true
|
|
61
|
+
if (inst.major === req.major && inst.minor === req.minor && inst.patch >= req.patch) return true
|
|
62
|
+
return false
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
if (range.startsWith('^') || range.includes('||')) {
|
|
66
|
+
// Caret: same major, >= minor.patch
|
|
67
|
+
// For ranges with || (e.g. "^18.0.0 || ^19.0.0"), check each part
|
|
68
|
+
const parts = range.split('||').map(p => p.trim())
|
|
69
|
+
return parts.some(part => {
|
|
70
|
+
const r = parseVersion(part)
|
|
71
|
+
if (inst.major !== r.major) return false
|
|
72
|
+
if (inst.minor > r.minor) return true
|
|
73
|
+
if (inst.minor === r.minor && inst.patch >= r.patch) return true
|
|
74
|
+
return false
|
|
75
|
+
})
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
if (range.startsWith('~')) {
|
|
79
|
+
// Tilde: same major.minor, >= patch
|
|
80
|
+
if (inst.major !== req.major || inst.minor !== req.minor) return false
|
|
81
|
+
return inst.patch >= req.patch
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Exact version match
|
|
85
|
+
return inst.major === req.major && inst.minor === req.minor && inst.patch === req.patch
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// ─── Discovery ───────────────────────────────────────────
|
|
89
|
+
|
|
90
|
+
function discoverTetraPeerDeps() {
|
|
91
|
+
const result = {}
|
|
92
|
+
|
|
93
|
+
for (const pkg of TETRA_PACKAGES) {
|
|
94
|
+
const pkgJson = readJson(join(TETRA_ROOT, pkg, 'package.json'))
|
|
95
|
+
if (!pkgJson?.peerDependencies) continue
|
|
96
|
+
|
|
97
|
+
result[pkgJson.name] = {
|
|
98
|
+
version: pkgJson.version,
|
|
99
|
+
peerDependencies: pkgJson.peerDependencies,
|
|
100
|
+
peerDependenciesMeta: pkgJson.peerDependenciesMeta || {}
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
return result
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
function discoverConsumers() {
|
|
108
|
+
const consumers = []
|
|
109
|
+
|
|
110
|
+
try {
|
|
111
|
+
const dirs = execSync(`ls -d ${PROJECTS_ROOT}/*/`, { encoding: 'utf-8' })
|
|
112
|
+
.trim().split('\n').filter(Boolean)
|
|
113
|
+
|
|
114
|
+
for (const dir of dirs) {
|
|
115
|
+
const projectName = basename(dir.replace(/\/$/, ''))
|
|
116
|
+
if (projectName === 'tetra' || projectName.startsWith('.') || projectName.startsWith('_')) continue
|
|
117
|
+
|
|
118
|
+
// Check root, backend/, frontend/ package.json
|
|
119
|
+
const locations = [
|
|
120
|
+
{ path: join(dir, 'package.json'), label: projectName },
|
|
121
|
+
{ path: join(dir, 'backend', 'package.json'), label: `${projectName}/backend` },
|
|
122
|
+
{ path: join(dir, 'frontend', 'package.json'), label: `${projectName}/frontend` },
|
|
123
|
+
]
|
|
124
|
+
|
|
125
|
+
for (const loc of locations) {
|
|
126
|
+
const pkg = readJson(loc.path)
|
|
127
|
+
if (!pkg) continue
|
|
128
|
+
|
|
129
|
+
const allDeps = { ...pkg.dependencies, ...pkg.devDependencies }
|
|
130
|
+
|
|
131
|
+
// Check if this package.json uses any tetra package
|
|
132
|
+
const usesTetra = Object.keys(allDeps).some(d => d.startsWith('@soulbatical/tetra-'))
|
|
133
|
+
if (!usesTetra) continue
|
|
134
|
+
|
|
135
|
+
consumers.push({
|
|
136
|
+
label: loc.label,
|
|
137
|
+
path: loc.path,
|
|
138
|
+
dependencies: allDeps,
|
|
139
|
+
tetraDeps: Object.fromEntries(
|
|
140
|
+
Object.entries(allDeps).filter(([k]) => k.startsWith('@soulbatical/tetra-'))
|
|
141
|
+
)
|
|
142
|
+
})
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
} catch {
|
|
146
|
+
// ignore discovery errors
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
return consumers
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// ─── Check ───────────────────────────────────────────────
|
|
153
|
+
|
|
154
|
+
function checkCompatibility(tetraPeers, consumers) {
|
|
155
|
+
const issues = []
|
|
156
|
+
|
|
157
|
+
for (const consumer of consumers) {
|
|
158
|
+
for (const [tetraPkg, tetraInfo] of Object.entries(tetraPeers)) {
|
|
159
|
+
// Does this consumer use this tetra package?
|
|
160
|
+
if (!consumer.tetraDeps[tetraPkg]) continue
|
|
161
|
+
|
|
162
|
+
// Check each peer dependency
|
|
163
|
+
for (const [peerDep, requiredRange] of Object.entries(tetraInfo.peerDependencies)) {
|
|
164
|
+
const isOptional = tetraInfo.peerDependenciesMeta[peerDep]?.optional
|
|
165
|
+
const installedVersion = consumer.dependencies[peerDep]
|
|
166
|
+
|
|
167
|
+
if (!installedVersion) {
|
|
168
|
+
if (!isOptional) {
|
|
169
|
+
issues.push({
|
|
170
|
+
consumer: consumer.label,
|
|
171
|
+
tetraPackage: tetraPkg,
|
|
172
|
+
dependency: peerDep,
|
|
173
|
+
required: requiredRange,
|
|
174
|
+
installed: 'MISSING',
|
|
175
|
+
severity: 'error',
|
|
176
|
+
fix: `npm install ${peerDep}@"${requiredRange}"`
|
|
177
|
+
})
|
|
178
|
+
}
|
|
179
|
+
continue
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
// Extract version from range (consumer might have "^2.93.3")
|
|
183
|
+
const cleanInstalled = installedVersion.replace(/^[\^~>=<\s]+/, '')
|
|
184
|
+
|
|
185
|
+
if (!satisfiesRange(cleanInstalled, requiredRange)) {
|
|
186
|
+
// Check if it's an exact pin vs range issue
|
|
187
|
+
const isExactPin = !requiredRange.startsWith('^') && !requiredRange.startsWith('~') && !requiredRange.startsWith('>')
|
|
188
|
+
const severity = isExactPin ? 'warning' : 'error'
|
|
189
|
+
|
|
190
|
+
issues.push({
|
|
191
|
+
consumer: consumer.label,
|
|
192
|
+
tetraPackage: tetraPkg,
|
|
193
|
+
dependency: peerDep,
|
|
194
|
+
required: requiredRange,
|
|
195
|
+
installed: installedVersion,
|
|
196
|
+
severity,
|
|
197
|
+
isExactPin,
|
|
198
|
+
fix: `npm install ${peerDep}@"${requiredRange}"`,
|
|
199
|
+
suggestion: isExactPin
|
|
200
|
+
? `Consider using "^${requiredRange}" in ${tetraPkg} peerDependencies for flexibility`
|
|
201
|
+
: null
|
|
202
|
+
})
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
return issues
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
// ─── Output ──────────────────────────────────────────────
|
|
212
|
+
|
|
213
|
+
function formatTerminal(issues, consumers, tetraPeers, options) {
|
|
214
|
+
const lines = []
|
|
215
|
+
|
|
216
|
+
lines.push('')
|
|
217
|
+
lines.push('═══════════════════════════════════════════════════════════════')
|
|
218
|
+
lines.push(' 🔗 Tetra Check Peers — Peer Dependency Compatibility')
|
|
219
|
+
lines.push('═══════════════════════════════════════════════════════════════')
|
|
220
|
+
lines.push('')
|
|
221
|
+
|
|
222
|
+
// Summary
|
|
223
|
+
const tetraPackages = Object.entries(tetraPeers)
|
|
224
|
+
.map(([name, info]) => `${name}@${info.version}`)
|
|
225
|
+
.join(', ')
|
|
226
|
+
lines.push(` Tetra packages: ${tetraPackages}`)
|
|
227
|
+
lines.push(` Consumers found: ${consumers.length}`)
|
|
228
|
+
lines.push(` Issues found: ${issues.length}`)
|
|
229
|
+
lines.push('')
|
|
230
|
+
|
|
231
|
+
if (issues.length === 0) {
|
|
232
|
+
lines.push(' ✅ All consumer projects are compatible with current peer dependencies')
|
|
233
|
+
lines.push('')
|
|
234
|
+
lines.push('═══════════════════════════════════════════════════════════════')
|
|
235
|
+
return lines.join('\n')
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// Group by consumer
|
|
239
|
+
const byConsumer = {}
|
|
240
|
+
for (const issue of issues) {
|
|
241
|
+
if (!byConsumer[issue.consumer]) byConsumer[issue.consumer] = []
|
|
242
|
+
byConsumer[issue.consumer].push(issue)
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
for (const [consumer, consumerIssues] of Object.entries(byConsumer)) {
|
|
246
|
+
lines.push(` 📦 ${consumer}`)
|
|
247
|
+
for (const issue of consumerIssues) {
|
|
248
|
+
const icon = issue.severity === 'error' ? '❌' : '⚠️'
|
|
249
|
+
lines.push(` ${icon} ${issue.dependency}: installed ${issue.installed}, needs ${issue.required}`)
|
|
250
|
+
if (issue.suggestion) {
|
|
251
|
+
lines.push(` 💡 ${issue.suggestion}`)
|
|
252
|
+
}
|
|
253
|
+
if (options.fix) {
|
|
254
|
+
lines.push(` → ${issue.fix}`)
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
lines.push('')
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
// Exact pin warnings
|
|
261
|
+
const exactPins = issues.filter(i => i.isExactPin)
|
|
262
|
+
if (exactPins.length > 0) {
|
|
263
|
+
const uniquePins = [...new Set(exactPins.map(i => `${i.dependency} (${i.required} in ${i.tetraPackage})`))]
|
|
264
|
+
lines.push(' 💡 EXACT VERSION PINS detected — these cause most compatibility issues:')
|
|
265
|
+
for (const pin of uniquePins) {
|
|
266
|
+
lines.push(` → ${pin}`)
|
|
267
|
+
}
|
|
268
|
+
lines.push(' Consider using "^x.y.z" ranges instead of exact versions in peerDependencies')
|
|
269
|
+
lines.push('')
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
lines.push('═══════════════════════════════════════════════════════════════')
|
|
273
|
+
return lines.join('\n')
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// ─── Main ────────────────────────────────────────────────
|
|
277
|
+
|
|
278
|
+
const args = process.argv.slice(2)
|
|
279
|
+
const options = {
|
|
280
|
+
fix: args.includes('--fix'),
|
|
281
|
+
strict: args.includes('--strict'),
|
|
282
|
+
json: args.includes('--json'),
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
const tetraPeers = discoverTetraPeerDeps()
|
|
286
|
+
const consumers = discoverConsumers()
|
|
287
|
+
const issues = checkCompatibility(tetraPeers, consumers)
|
|
288
|
+
|
|
289
|
+
if (options.json) {
|
|
290
|
+
console.log(JSON.stringify({ tetraPeers, consumers: consumers.map(c => c.label), issues }, null, 2))
|
|
291
|
+
} else {
|
|
292
|
+
console.log(formatTerminal(issues, consumers, tetraPeers, options))
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
// Exit code
|
|
296
|
+
const errors = issues.filter(i => i.severity === 'error')
|
|
297
|
+
if (options.strict && errors.length > 0) {
|
|
298
|
+
process.exit(1)
|
|
299
|
+
}
|
|
@@ -194,15 +194,30 @@ function parseMigrations(projectRoot) {
|
|
|
194
194
|
|| content.match(/(?:TEXT\[\]|text\[\])\s*:=\s*ARRAY\s*\[\s*'([^[\]]+)'\s*\]/i)
|
|
195
195
|
if (arrayMatch) {
|
|
196
196
|
const loopTables = arrayMatch[1].split(/'\s*,\s*'/).map(t => t.trim())
|
|
197
|
-
|
|
197
|
+
// Match EXECUTE format('CREATE POLICY ...', ...) — multi-line, with escaped quotes ('')
|
|
198
|
+
// PL/pgSQL escapes single quotes as '' inside strings, so we must allow '' within the match
|
|
199
|
+
// The format string ends with a single ' (not '') followed by , or ;
|
|
200
|
+
const execLines = [...content.matchAll(/EXECUTE\s+format\s*\(\s*'(CREATE\s+POLICY(?:[^']|'')*?)'\s*,/gi)]
|
|
198
201
|
for (const exec of execLines) {
|
|
199
|
-
|
|
202
|
+
// Unescape PL/pgSQL doubled quotes back to single quotes for analysis
|
|
203
|
+
const stmt = exec[1].replace(/''/g, "'")
|
|
200
204
|
const forOp = stmt.match(/FOR\s+(SELECT|INSERT|UPDATE|DELETE|ALL)/i)
|
|
201
205
|
const operation = forOp ? forOp[1].toUpperCase() : 'ALL'
|
|
202
206
|
const hasUsing = /\bUSING\b/i.test(stmt)
|
|
203
207
|
const hasWithCheck = /WITH\s+CHECK/i.test(stmt)
|
|
204
|
-
|
|
205
|
-
|
|
208
|
+
|
|
209
|
+
// Extract the full USING/WITH CHECK clause (may be multi-line with nested parens)
|
|
210
|
+
let usingCondition = ''
|
|
211
|
+
let withCheckCondition = ''
|
|
212
|
+
if (hasUsing) {
|
|
213
|
+
const uMatch = stmt.match(/USING\s*\(\s*([\s\S]*?)\s*\)\s*(?:WITH\s+CHECK|$)/i)
|
|
214
|
+
|| stmt.match(/USING\s*\(\s*([\s\S]*?)\s*\)\s*$/i)
|
|
215
|
+
usingCondition = uMatch ? uMatch[1].trim() : ''
|
|
216
|
+
}
|
|
217
|
+
if (hasWithCheck) {
|
|
218
|
+
const wcMatch = stmt.match(/WITH\s+CHECK\s*\(\s*([\s\S]*?)\s*\)\s*$/i)
|
|
219
|
+
withCheckCondition = wcMatch ? wcMatch[1].trim() : ''
|
|
220
|
+
}
|
|
206
221
|
|
|
207
222
|
for (const table of loopTables) {
|
|
208
223
|
if (!tables.has(table)) tables.set(table, { rlsEnabled: false, policies: [], rpcFunctions: new Map() })
|
|
@@ -211,8 +226,8 @@ function parseMigrations(projectRoot) {
|
|
|
211
226
|
tables.get(table).policies.push({
|
|
212
227
|
name: policyName,
|
|
213
228
|
operation,
|
|
214
|
-
using:
|
|
215
|
-
withCheck:
|
|
229
|
+
using: usingCondition,
|
|
230
|
+
withCheck: withCheckCondition,
|
|
216
231
|
file: relFile
|
|
217
232
|
})
|
|
218
233
|
}
|
|
@@ -281,117 +296,95 @@ function isWideOpen(using) {
|
|
|
281
296
|
}
|
|
282
297
|
|
|
283
298
|
/**
|
|
284
|
-
*
|
|
299
|
+
* Allowed RLS policy patterns — whitelist approach.
|
|
285
300
|
*
|
|
286
|
-
*
|
|
287
|
-
*
|
|
288
|
-
* Everything
|
|
301
|
+
* Derived from sparkbuddy-live production DB (562 policies analyzed).
|
|
302
|
+
* These are the ONLY structural patterns allowed in USING/WITH CHECK clauses.
|
|
303
|
+
* Everything else is rejected. To add a new pattern: add it here with justification.
|
|
289
304
|
*
|
|
290
|
-
*
|
|
305
|
+
* Categories:
|
|
306
|
+
* 1. Org isolation: auth_admin_organizations(), auth_user_organizations(), auth_org_id()
|
|
307
|
+
* 2. User isolation: auth.uid(), auth_current_user_id()
|
|
308
|
+
* 3. Role gates: auth.role() = 'authenticated' or 'anon' (NOT 'service_role')
|
|
309
|
+
* 4. Data filters: column = literal, IS NULL, boolean checks
|
|
310
|
+
* 5. Parent checks: IN (SELECT ...), EXISTS (SELECT ...)
|
|
311
|
+
* 6. Open access: true, false
|
|
312
|
+
* 7. Legacy: auth.jwt() -> 'app_metadata', current_setting('app.*')
|
|
291
313
|
*/
|
|
292
|
-
const
|
|
293
|
-
// Org isolation
|
|
294
|
-
/auth_admin_organizations\s*\(\)/i,
|
|
295
|
-
/auth_user_organizations\s*\(\)/i,
|
|
296
|
-
/auth_org_id\s*\(\)/i,
|
|
297
|
-
|
|
298
|
-
// User isolation
|
|
299
|
-
/auth\.uid\s*\(\)/i,
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
/
|
|
304
|
-
/
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
/
|
|
310
|
-
/
|
|
311
|
-
/
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
/
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
/
|
|
323
|
-
/
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
/\bIN\s*\(\s*SELECT\b/i,
|
|
330
|
-
/\bEXISTS\s*\(\s*SELECT\b/i,
|
|
331
|
-
// Literals and operators
|
|
332
|
-
/true/i,
|
|
333
|
-
/false/i,
|
|
334
|
-
/null/i,
|
|
335
|
-
/now\s*\(\)/i,
|
|
336
|
-
/\bAND\b/i,
|
|
337
|
-
/\bOR\b/i,
|
|
338
|
-
/\bNOT\b/i,
|
|
339
|
-
/\bIS\b/i,
|
|
340
|
-
/\bANY\b/i,
|
|
341
|
-
/\bARRAY\b/i,
|
|
342
|
-
// Legacy JWT pattern (sparkbuddy initial schema)
|
|
343
|
-
/auth\.jwt\s*\(\)/i,
|
|
344
|
-
/app_metadata/i,
|
|
345
|
-
// current_setting for app context (NOT role/jwt.claims bypass)
|
|
346
|
-
/current_setting\s*\(\s*'app\./i,
|
|
314
|
+
const ALLOWED_RLS_PATTERNS = [
|
|
315
|
+
// 1. Org isolation helper functions
|
|
316
|
+
{ pattern: /auth_admin_organizations\s*\(\)/i, label: 'org-admin isolation' },
|
|
317
|
+
{ pattern: /auth_user_organizations\s*\(\)/i, label: 'org-user isolation' },
|
|
318
|
+
{ pattern: /auth_org_id\s*\(\)/i, label: 'org isolation' },
|
|
319
|
+
|
|
320
|
+
// 2. User isolation
|
|
321
|
+
{ pattern: /auth\.uid\s*\(\)/i, label: 'user isolation' },
|
|
322
|
+
{ pattern: /auth_current_user_id\s*\(\)/i, label: 'user isolation' },
|
|
323
|
+
|
|
324
|
+
// 3. Role gates (ONLY authenticated and anon — never service_role)
|
|
325
|
+
{ pattern: /auth\.role\s*\(\)\s*=\s*'authenticated'/i, label: 'authenticated role gate' },
|
|
326
|
+
{ pattern: /auth\.role\s*\(\)\s*=\s*'anon'/i, label: 'anon role gate' },
|
|
327
|
+
|
|
328
|
+
// 4. Column comparisons and data filters (any column = any value is fine)
|
|
329
|
+
// This is inherently safe — it filters data, doesn't bypass auth
|
|
330
|
+
{ pattern: /\w+\s*=\s*/i, label: 'column comparison' },
|
|
331
|
+
{ pattern: /\w+\s+IS\s+(NOT\s+)?NULL/i, label: 'null check' },
|
|
332
|
+
{ pattern: /\w+\s+IN\s*\(/i, label: 'IN check' },
|
|
333
|
+
{ pattern: /\w+\s*=\s*ANY\s*\(/i, label: 'ANY check' },
|
|
334
|
+
|
|
335
|
+
// 5. Parent table checks
|
|
336
|
+
{ pattern: /EXISTS\s*\(\s*SELECT/i, label: 'exists subquery' },
|
|
337
|
+
|
|
338
|
+
// 6. Open access
|
|
339
|
+
{ pattern: /^\s*true\s*$/i, label: 'public access' },
|
|
340
|
+
{ pattern: /^\s*\(true\)\s*$/i, label: 'public access' },
|
|
341
|
+
{ pattern: /^\s*false\s*$/i, label: 'deny all' },
|
|
342
|
+
|
|
343
|
+
// 7. Legacy JWT and app context
|
|
344
|
+
{ pattern: /auth\.jwt\s*\(\)/i, label: 'legacy JWT' },
|
|
345
|
+
{ pattern: /current_setting\s*\(\s*'app\./i, label: 'app context' },
|
|
346
|
+
|
|
347
|
+
// 8. Custom helper functions (e.g. is_org_member(), is_product_publicly_accessible())
|
|
348
|
+
// These are project-specific SECURITY DEFINER helpers — safe as long as
|
|
349
|
+
// the function itself is audited (which is done by the RPC Security Mode check)
|
|
350
|
+
{ pattern: /\b\w+\s*\([^)]*\)/i, label: 'function call' },
|
|
347
351
|
]
|
|
348
352
|
|
|
349
353
|
/**
|
|
350
|
-
* Patterns
|
|
351
|
-
*
|
|
354
|
+
* Patterns BANNED from RLS policies — these bypass tenant isolation.
|
|
355
|
+
* Service role already bypasses RLS at the Supabase layer automatically.
|
|
356
|
+
* Adding these to policies creates a false sense of security and opens
|
|
357
|
+
* cross-tenant data leakage vectors.
|
|
358
|
+
*
|
|
359
|
+
* Derived from sparkbuddy-live analysis: 2 policies with auth.role()='service_role'
|
|
360
|
+
* were identified as tech debt (redirects, translations) — not a pattern to follow.
|
|
352
361
|
*/
|
|
353
362
|
const BANNED_RLS_PATTERNS = [
|
|
354
363
|
{ pattern: /service_role/i, label: 'service_role bypass — service role already bypasses RLS at the Supabase layer' },
|
|
355
|
-
{ pattern: /
|
|
364
|
+
{ pattern: /auth\.role\s*\(\)\s*=\s*'service_role'/i, label: 'auth.role() service_role bypass — service role already bypasses RLS automatically' },
|
|
365
|
+
{ pattern: /current_setting\s*\(\s*'role'/i, label: 'PostgreSQL role check — bypasses tenant isolation' },
|
|
356
366
|
{ pattern: /current_setting\s*\(\s*'request\.jwt\.claims'/i, label: 'JWT claims role check — bypasses tenant isolation' },
|
|
357
367
|
{ pattern: /session_user/i, label: 'session_user check — bypasses tenant isolation' },
|
|
358
368
|
{ pattern: /current_user\s*=/i, label: 'current_user check — bypasses tenant isolation' },
|
|
359
|
-
{ pattern: /auth\.role\s*\(\)/i, label: 'auth.role() check — bypasses tenant isolation' },
|
|
360
369
|
{ pattern: /pg_has_role/i, label: 'pg_has_role — bypasses tenant isolation' },
|
|
361
370
|
]
|
|
362
371
|
|
|
363
372
|
/**
|
|
364
373
|
* Validate an RLS clause against the whitelist.
|
|
365
|
-
* Returns null if valid, or a description
|
|
374
|
+
* Returns null if valid, or a description string if banned/unrecognized.
|
|
366
375
|
*/
|
|
367
376
|
function validateRlsClause(clause) {
|
|
368
377
|
if (!clause || !clause.trim()) return null
|
|
369
378
|
|
|
370
|
-
// First check for explicitly banned patterns
|
|
379
|
+
// First: check for explicitly banned patterns (these are always wrong)
|
|
371
380
|
for (const { pattern, label } of BANNED_RLS_PATTERNS) {
|
|
372
381
|
if (pattern.test(clause)) return label
|
|
373
382
|
}
|
|
374
383
|
|
|
375
|
-
//
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
// Remove numbers
|
|
380
|
-
stripped = stripped.replace(/\b\d+\b/g, '')
|
|
381
|
-
// Remove known-safe identifiers and functions
|
|
382
|
-
for (const atom of ALLOWED_RLS_ATOMS) {
|
|
383
|
-
stripped = stripped.replace(new RegExp(atom.source, 'gi'), '')
|
|
384
|
-
}
|
|
385
|
-
// Remove SQL syntax noise (parens, commas, operators, quotes, casts, aliases)
|
|
386
|
-
stripped = stripped.replace(/[(),"=<>!:.|{}\-\+\*\/\s]/g, '')
|
|
387
|
-
stripped = stripped.replace(/\b(AS|FROM|WHERE|SELECT|JOIN|ON|LIMIT|uuid|text|boolean|integer|jsonb|json|public|ARRAY|FOR)\b/gi, '')
|
|
388
|
-
// Remove table/column qualifiers that look like identifiers (lowercase + underscore)
|
|
389
|
-
stripped = stripped.replace(/\b[a-z_][a-z0-9_]*\b/gi, '')
|
|
390
|
-
stripped = stripped.trim()
|
|
391
|
-
|
|
392
|
-
// If anything substantial remains, it's an unrecognized pattern
|
|
393
|
-
if (stripped.length > 0) {
|
|
394
|
-
return `Unrecognized RLS clause content: "${clause.substring(0, 120)}". Only whitelisted patterns are allowed.`
|
|
384
|
+
// Second: verify clause contains at least one allowed pattern
|
|
385
|
+
const hasAllowedPattern = ALLOWED_RLS_PATTERNS.some(({ pattern }) => pattern.test(clause))
|
|
386
|
+
if (!hasAllowedPattern) {
|
|
387
|
+
return `Unrecognized RLS clause: "${clause.substring(0, 150)}". Only whitelisted patterns are allowed (org/user isolation, role gates, data filters, subqueries). See ALLOWED_RLS_PATTERNS in config-rls-alignment.js.`
|
|
395
388
|
}
|
|
396
389
|
|
|
397
390
|
return null
|
|
@@ -63,6 +63,9 @@ const ALLOWED_FILE_PATTERNS = [
|
|
|
63
63
|
/BaseCronService/,
|
|
64
64
|
// Internal service-to-service routes (API key auth, no user JWT)
|
|
65
65
|
/internalRoutes/,
|
|
66
|
+
// Billing routers — hybrid files with both authenticated admin routes and unauthenticated webhook handlers
|
|
67
|
+
// systemDB is needed for Tetra BillingService config callbacks (getSystemDB, getWebhookDB)
|
|
68
|
+
/billingRouter/,
|
|
66
69
|
]
|
|
67
70
|
|
|
68
71
|
/**
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@soulbatical/tetra-dev-toolkit",
|
|
3
|
-
"version": "1.17.
|
|
3
|
+
"version": "1.17.4",
|
|
4
4
|
"publishConfig": {
|
|
5
5
|
"access": "restricted"
|
|
6
6
|
},
|
|
@@ -31,7 +31,8 @@
|
|
|
31
31
|
"tetra-dev-token": "./bin/tetra-dev-token.js",
|
|
32
32
|
"tetra-check-rls": "./bin/tetra-check-rls.js",
|
|
33
33
|
"tetra-migration-lint": "./bin/tetra-migration-lint.js",
|
|
34
|
-
"tetra-db-push": "./bin/tetra-db-push.js"
|
|
34
|
+
"tetra-db-push": "./bin/tetra-db-push.js",
|
|
35
|
+
"tetra-check-peers": "./bin/tetra-check-peers.js"
|
|
35
36
|
},
|
|
36
37
|
"files": [
|
|
37
38
|
"bin/",
|