@soulbatical/tetra-dev-toolkit 1.17.2 → 1.17.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,299 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * Tetra Check Peers — Validate peer dependency compatibility across consumer projects
5
+ *
6
+ * Scans all known consumer projects and checks if their installed versions
7
+ * are compatible with tetra packages' peerDependencies.
8
+ *
9
+ * Usage:
10
+ * tetra-check-peers # Check all consumers
11
+ * tetra-check-peers --fix # Show npm commands to fix mismatches
12
+ * tetra-check-peers --strict # Fail on any mismatch (for CI/prepublish)
13
+ * tetra-check-peers --json # JSON output
14
+ *
15
+ * Add to prepublishOnly to catch breaking peer dep changes before publish.
16
+ */
17
+
18
+ import { readFileSync, existsSync } from 'fs'
19
+ import { join, basename, dirname } from 'path'
20
+ import { execSync } from 'child_process'
21
+
22
+ // ─── Config ──────────────────────────────────────────────
23
+
24
+ const PROJECTS_ROOT = join(process.env.HOME || '~', 'projecten')
25
+ const TETRA_ROOT = join(PROJECTS_ROOT, 'tetra', 'packages')
26
+
27
+ // Tetra packages that have peerDependencies
28
+ const TETRA_PACKAGES = ['core', 'ui', 'dev-toolkit', 'schemas']
29
+
30
+ // ─── Helpers ─────────────────────────────────────────────
31
+
32
+ function readJson(path) {
33
+ try {
34
+ return JSON.parse(readFileSync(path, 'utf-8'))
35
+ } catch {
36
+ return null
37
+ }
38
+ }
39
+
40
+ function satisfiesRange(installed, range) {
41
+ // Simple semver range check without external deps
42
+ // Handles: exact "2.93.3", caret "^2.93.3", tilde "~2.93.3", star "*", >= ">= 8.0.0"
43
+ if (!installed || !range) return false
44
+ if (range === '*') return true
45
+
46
+ // Clean versions: remove leading ^ ~ >= <= > < =
47
+ const cleanVersion = (v) => v.replace(/^[\^~>=<\s]+/, '').trim()
48
+ const parseVersion = (v) => {
49
+ const cleaned = cleanVersion(v)
50
+ const parts = cleaned.split('.').map(Number)
51
+ return { major: parts[0] || 0, minor: parts[1] || 0, patch: parts[2] || 0 }
52
+ }
53
+
54
+ const inst = parseVersion(installed)
55
+ const req = parseVersion(range)
56
+
57
+ if (range.startsWith('>=')) {
58
+ // >= check
59
+ if (inst.major > req.major) return true
60
+ if (inst.major === req.major && inst.minor > req.minor) return true
61
+ if (inst.major === req.major && inst.minor === req.minor && inst.patch >= req.patch) return true
62
+ return false
63
+ }
64
+
65
+ if (range.startsWith('^') || range.includes('||')) {
66
+ // Caret: same major, >= minor.patch
67
+ // For ranges with || (e.g. "^18.0.0 || ^19.0.0"), check each part
68
+ const parts = range.split('||').map(p => p.trim())
69
+ return parts.some(part => {
70
+ const r = parseVersion(part)
71
+ if (inst.major !== r.major) return false
72
+ if (inst.minor > r.minor) return true
73
+ if (inst.minor === r.minor && inst.patch >= r.patch) return true
74
+ return false
75
+ })
76
+ }
77
+
78
+ if (range.startsWith('~')) {
79
+ // Tilde: same major.minor, >= patch
80
+ if (inst.major !== req.major || inst.minor !== req.minor) return false
81
+ return inst.patch >= req.patch
82
+ }
83
+
84
+ // Exact version match
85
+ return inst.major === req.major && inst.minor === req.minor && inst.patch === req.patch
86
+ }
87
+
88
+ // ─── Discovery ───────────────────────────────────────────
89
+
90
+ function discoverTetraPeerDeps() {
91
+ const result = {}
92
+
93
+ for (const pkg of TETRA_PACKAGES) {
94
+ const pkgJson = readJson(join(TETRA_ROOT, pkg, 'package.json'))
95
+ if (!pkgJson?.peerDependencies) continue
96
+
97
+ result[pkgJson.name] = {
98
+ version: pkgJson.version,
99
+ peerDependencies: pkgJson.peerDependencies,
100
+ peerDependenciesMeta: pkgJson.peerDependenciesMeta || {}
101
+ }
102
+ }
103
+
104
+ return result
105
+ }
106
+
107
+ function discoverConsumers() {
108
+ const consumers = []
109
+
110
+ try {
111
+ const dirs = execSync(`ls -d ${PROJECTS_ROOT}/*/`, { encoding: 'utf-8' })
112
+ .trim().split('\n').filter(Boolean)
113
+
114
+ for (const dir of dirs) {
115
+ const projectName = basename(dir.replace(/\/$/, ''))
116
+ if (projectName === 'tetra' || projectName.startsWith('.') || projectName.startsWith('_')) continue
117
+
118
+ // Check root, backend/, frontend/ package.json
119
+ const locations = [
120
+ { path: join(dir, 'package.json'), label: projectName },
121
+ { path: join(dir, 'backend', 'package.json'), label: `${projectName}/backend` },
122
+ { path: join(dir, 'frontend', 'package.json'), label: `${projectName}/frontend` },
123
+ ]
124
+
125
+ for (const loc of locations) {
126
+ const pkg = readJson(loc.path)
127
+ if (!pkg) continue
128
+
129
+ const allDeps = { ...pkg.dependencies, ...pkg.devDependencies }
130
+
131
+ // Check if this package.json uses any tetra package
132
+ const usesTetra = Object.keys(allDeps).some(d => d.startsWith('@soulbatical/tetra-'))
133
+ if (!usesTetra) continue
134
+
135
+ consumers.push({
136
+ label: loc.label,
137
+ path: loc.path,
138
+ dependencies: allDeps,
139
+ tetraDeps: Object.fromEntries(
140
+ Object.entries(allDeps).filter(([k]) => k.startsWith('@soulbatical/tetra-'))
141
+ )
142
+ })
143
+ }
144
+ }
145
+ } catch {
146
+ // ignore discovery errors
147
+ }
148
+
149
+ return consumers
150
+ }
151
+
152
+ // ─── Check ───────────────────────────────────────────────
153
+
154
+ function checkCompatibility(tetraPeers, consumers) {
155
+ const issues = []
156
+
157
+ for (const consumer of consumers) {
158
+ for (const [tetraPkg, tetraInfo] of Object.entries(tetraPeers)) {
159
+ // Does this consumer use this tetra package?
160
+ if (!consumer.tetraDeps[tetraPkg]) continue
161
+
162
+ // Check each peer dependency
163
+ for (const [peerDep, requiredRange] of Object.entries(tetraInfo.peerDependencies)) {
164
+ const isOptional = tetraInfo.peerDependenciesMeta[peerDep]?.optional
165
+ const installedVersion = consumer.dependencies[peerDep]
166
+
167
+ if (!installedVersion) {
168
+ if (!isOptional) {
169
+ issues.push({
170
+ consumer: consumer.label,
171
+ tetraPackage: tetraPkg,
172
+ dependency: peerDep,
173
+ required: requiredRange,
174
+ installed: 'MISSING',
175
+ severity: 'error',
176
+ fix: `npm install ${peerDep}@"${requiredRange}"`
177
+ })
178
+ }
179
+ continue
180
+ }
181
+
182
+ // Extract version from range (consumer might have "^2.93.3")
183
+ const cleanInstalled = installedVersion.replace(/^[\^~>=<\s]+/, '')
184
+
185
+ if (!satisfiesRange(cleanInstalled, requiredRange)) {
186
+ // Check if it's an exact pin vs range issue
187
+ const isExactPin = !requiredRange.startsWith('^') && !requiredRange.startsWith('~') && !requiredRange.startsWith('>')
188
+ const severity = isExactPin ? 'warning' : 'error'
189
+
190
+ issues.push({
191
+ consumer: consumer.label,
192
+ tetraPackage: tetraPkg,
193
+ dependency: peerDep,
194
+ required: requiredRange,
195
+ installed: installedVersion,
196
+ severity,
197
+ isExactPin,
198
+ fix: `npm install ${peerDep}@"${requiredRange}"`,
199
+ suggestion: isExactPin
200
+ ? `Consider using "^${requiredRange}" in ${tetraPkg} peerDependencies for flexibility`
201
+ : null
202
+ })
203
+ }
204
+ }
205
+ }
206
+ }
207
+
208
+ return issues
209
+ }
210
+
211
+ // ─── Output ──────────────────────────────────────────────
212
+
213
+ function formatTerminal(issues, consumers, tetraPeers, options) {
214
+ const lines = []
215
+
216
+ lines.push('')
217
+ lines.push('═══════════════════════════════════════════════════════════════')
218
+ lines.push(' 🔗 Tetra Check Peers — Peer Dependency Compatibility')
219
+ lines.push('═══════════════════════════════════════════════════════════════')
220
+ lines.push('')
221
+
222
+ // Summary
223
+ const tetraPackages = Object.entries(tetraPeers)
224
+ .map(([name, info]) => `${name}@${info.version}`)
225
+ .join(', ')
226
+ lines.push(` Tetra packages: ${tetraPackages}`)
227
+ lines.push(` Consumers found: ${consumers.length}`)
228
+ lines.push(` Issues found: ${issues.length}`)
229
+ lines.push('')
230
+
231
+ if (issues.length === 0) {
232
+ lines.push(' ✅ All consumer projects are compatible with current peer dependencies')
233
+ lines.push('')
234
+ lines.push('═══════════════════════════════════════════════════════════════')
235
+ return lines.join('\n')
236
+ }
237
+
238
+ // Group by consumer
239
+ const byConsumer = {}
240
+ for (const issue of issues) {
241
+ if (!byConsumer[issue.consumer]) byConsumer[issue.consumer] = []
242
+ byConsumer[issue.consumer].push(issue)
243
+ }
244
+
245
+ for (const [consumer, consumerIssues] of Object.entries(byConsumer)) {
246
+ lines.push(` 📦 ${consumer}`)
247
+ for (const issue of consumerIssues) {
248
+ const icon = issue.severity === 'error' ? '❌' : '⚠️'
249
+ lines.push(` ${icon} ${issue.dependency}: installed ${issue.installed}, needs ${issue.required}`)
250
+ if (issue.suggestion) {
251
+ lines.push(` 💡 ${issue.suggestion}`)
252
+ }
253
+ if (options.fix) {
254
+ lines.push(` → ${issue.fix}`)
255
+ }
256
+ }
257
+ lines.push('')
258
+ }
259
+
260
+ // Exact pin warnings
261
+ const exactPins = issues.filter(i => i.isExactPin)
262
+ if (exactPins.length > 0) {
263
+ const uniquePins = [...new Set(exactPins.map(i => `${i.dependency} (${i.required} in ${i.tetraPackage})`))]
264
+ lines.push(' 💡 EXACT VERSION PINS detected — these cause most compatibility issues:')
265
+ for (const pin of uniquePins) {
266
+ lines.push(` → ${pin}`)
267
+ }
268
+ lines.push(' Consider using "^x.y.z" ranges instead of exact versions in peerDependencies')
269
+ lines.push('')
270
+ }
271
+
272
+ lines.push('═══════════════════════════════════════════════════════════════')
273
+ return lines.join('\n')
274
+ }
275
+
276
+ // ─── Main ────────────────────────────────────────────────
277
+
278
+ const args = process.argv.slice(2)
279
+ const options = {
280
+ fix: args.includes('--fix'),
281
+ strict: args.includes('--strict'),
282
+ json: args.includes('--json'),
283
+ }
284
+
285
+ const tetraPeers = discoverTetraPeerDeps()
286
+ const consumers = discoverConsumers()
287
+ const issues = checkCompatibility(tetraPeers, consumers)
288
+
289
+ if (options.json) {
290
+ console.log(JSON.stringify({ tetraPeers, consumers: consumers.map(c => c.label), issues }, null, 2))
291
+ } else {
292
+ console.log(formatTerminal(issues, consumers, tetraPeers, options))
293
+ }
294
+
295
+ // Exit code
296
+ const errors = issues.filter(i => i.severity === 'error')
297
+ if (options.strict && errors.length > 0) {
298
+ process.exit(1)
299
+ }
@@ -281,117 +281,95 @@ function isWideOpen(using) {
281
281
  }
282
282
 
283
283
  /**
284
- * Whitelist-based RLS policy validation.
284
+ * Allowed RLS policy patterns — whitelist approach.
285
285
  *
286
- * ONLY these building blocks are allowed in USING/WITH CHECK clauses.
287
- * Derived from sparkbuddy-live (the reference implementation).
288
- * Everything that doesn't match is rejected as unrecognized.
286
+ * Derived from sparkbuddy-live production DB (562 policies analyzed).
287
+ * These are the ONLY structural patterns allowed in USING/WITH CHECK clauses.
288
+ * Everything else is rejected. To add a new pattern: add it here with justification.
289
289
  *
290
- * To add a new pattern: add it here AND document why it's safe.
290
+ * Categories:
291
+ * 1. Org isolation: auth_admin_organizations(), auth_user_organizations(), auth_org_id()
292
+ * 2. User isolation: auth.uid(), auth_current_user_id()
293
+ * 3. Role gates: auth.role() = 'authenticated' or 'anon' (NOT 'service_role')
294
+ * 4. Data filters: column = literal, IS NULL, boolean checks
295
+ * 5. Parent checks: IN (SELECT ...), EXISTS (SELECT ...)
296
+ * 6. Open access: true, false
297
+ * 7. Legacy: auth.jwt() -> 'app_metadata', current_setting('app.*')
291
298
  */
292
- const ALLOWED_RLS_ATOMS = [
293
- // Org isolation via helper functions
294
- /auth_admin_organizations\s*\(\)/i,
295
- /auth_user_organizations\s*\(\)/i,
296
- /auth_org_id\s*\(\)/i,
297
- /auth_current_user_id\s*\(\)/i,
298
- // User isolation
299
- /auth\.uid\s*\(\)/i,
300
- // Column comparisons (org_id, user_id, etc.)
301
- /organization_id/i,
302
- /organizationid/i,
303
- /active_organization_id/i,
304
- /user_id/i,
305
- /userid/i,
306
- /owner_id/i,
307
- /created_by/i,
308
- /creator_id/i,
309
- /shared_by/i,
310
- /sparkbuddy_user_id/i,
311
- /user_public_id/i,
312
- // Boolean/status column checks (for public/active filtering)
313
- /is_active/i,
314
- /is_public/i,
315
- /is_template/i,
316
- /is_published/i,
317
- /anonymous_access/i,
318
- /allow_cross_org_usage/i,
319
- /visibility_level/i,
320
- /published_status/i,
321
- /post_type/i,
322
- /status/i,
323
- /active/i,
324
- /invitation_token/i,
325
- /original_testimonial_id/i,
326
- /expires_at/i,
327
- /session_id/i,
328
- // Subqueries to parent tables
329
- /\bIN\s*\(\s*SELECT\b/i,
330
- /\bEXISTS\s*\(\s*SELECT\b/i,
331
- // Literals and operators
332
- /true/i,
333
- /false/i,
334
- /null/i,
335
- /now\s*\(\)/i,
336
- /\bAND\b/i,
337
- /\bOR\b/i,
338
- /\bNOT\b/i,
339
- /\bIS\b/i,
340
- /\bANY\b/i,
341
- /\bARRAY\b/i,
342
- // Legacy JWT pattern (sparkbuddy initial schema)
343
- /auth\.jwt\s*\(\)/i,
344
- /app_metadata/i,
345
- // current_setting for app context (NOT role/jwt.claims bypass)
346
- /current_setting\s*\(\s*'app\./i,
299
+ const ALLOWED_RLS_PATTERNS = [
300
+ // 1. Org isolation helper functions
301
+ { pattern: /auth_admin_organizations\s*\(\)/i, label: 'org-admin isolation' },
302
+ { pattern: /auth_user_organizations\s*\(\)/i, label: 'org-user isolation' },
303
+ { pattern: /auth_org_id\s*\(\)/i, label: 'org isolation' },
304
+
305
+ // 2. User isolation
306
+ { pattern: /auth\.uid\s*\(\)/i, label: 'user isolation' },
307
+ { pattern: /auth_current_user_id\s*\(\)/i, label: 'user isolation' },
308
+
309
+ // 3. Role gates (ONLY authenticated and anon — never service_role)
310
+ { pattern: /auth\.role\s*\(\)\s*=\s*'authenticated'/i, label: 'authenticated role gate' },
311
+ { pattern: /auth\.role\s*\(\)\s*=\s*'anon'/i, label: 'anon role gate' },
312
+
313
+ // 4. Column comparisons and data filters (any column = any value is fine)
314
+ // This is inherently safe — it filters data, doesn't bypass auth
315
+ { pattern: /\w+\s*=\s*/i, label: 'column comparison' },
316
+ { pattern: /\w+\s+IS\s+(NOT\s+)?NULL/i, label: 'null check' },
317
+ { pattern: /\w+\s+IN\s*\(/i, label: 'IN check' },
318
+ { pattern: /\w+\s*=\s*ANY\s*\(/i, label: 'ANY check' },
319
+
320
+ // 5. Parent table checks
321
+ { pattern: /EXISTS\s*\(\s*SELECT/i, label: 'exists subquery' },
322
+
323
+ // 6. Open access
324
+ { pattern: /^\s*true\s*$/i, label: 'public access' },
325
+ { pattern: /^\s*\(true\)\s*$/i, label: 'public access' },
326
+ { pattern: /^\s*false\s*$/i, label: 'deny all' },
327
+
328
+ // 7. Legacy JWT and app context
329
+ { pattern: /auth\.jwt\s*\(\)/i, label: 'legacy JWT' },
330
+ { pattern: /current_setting\s*\(\s*'app\./i, label: 'app context' },
331
+
332
+ // 8. Custom helper functions (e.g. is_org_member(), is_product_publicly_accessible())
333
+ // These are project-specific SECURITY DEFINER helpers — safe as long as
334
+ // the function itself is audited (which is done by the RPC Security Mode check)
335
+ { pattern: /\b\w+\s*\([^)]*\)/i, label: 'function call' },
347
336
  ]
348
337
 
349
338
  /**
350
- * Patterns that are NEVER allowed in RLS policies, regardless of context.
351
- * These bypass RLS and defeat the purpose of having policies at all.
339
+ * Patterns BANNED from RLS policies these bypass tenant isolation.
340
+ * Service role already bypasses RLS at the Supabase layer automatically.
341
+ * Adding these to policies creates a false sense of security and opens
342
+ * cross-tenant data leakage vectors.
343
+ *
344
+ * Derived from sparkbuddy-live analysis: 2 policies with auth.role()='service_role'
345
+ * were identified as tech debt (redirects, translations) — not a pattern to follow.
352
346
  */
353
347
  const BANNED_RLS_PATTERNS = [
354
348
  { pattern: /service_role/i, label: 'service_role bypass — service role already bypasses RLS at the Supabase layer' },
355
- { pattern: /current_setting\s*\(\s*'role'\s*(?:,\s*true\s*)?\)/i, label: 'PostgreSQL role check — bypasses tenant isolation' },
349
+ { pattern: /auth\.role\s*\(\)\s*=\s*'service_role'/i, label: 'auth.role() service_role bypass service role already bypasses RLS automatically' },
350
+ { pattern: /current_setting\s*\(\s*'role'/i, label: 'PostgreSQL role check — bypasses tenant isolation' },
356
351
  { pattern: /current_setting\s*\(\s*'request\.jwt\.claims'/i, label: 'JWT claims role check — bypasses tenant isolation' },
357
352
  { pattern: /session_user/i, label: 'session_user check — bypasses tenant isolation' },
358
353
  { pattern: /current_user\s*=/i, label: 'current_user check — bypasses tenant isolation' },
359
- { pattern: /auth\.role\s*\(\)/i, label: 'auth.role() check — bypasses tenant isolation' },
360
354
  { pattern: /pg_has_role/i, label: 'pg_has_role — bypasses tenant isolation' },
361
355
  ]
362
356
 
363
357
  /**
364
358
  * Validate an RLS clause against the whitelist.
365
- * Returns null if valid, or a description of what's wrong.
359
+ * Returns null if valid, or a description string if banned/unrecognized.
366
360
  */
367
361
  function validateRlsClause(clause) {
368
362
  if (!clause || !clause.trim()) return null
369
363
 
370
- // First check for explicitly banned patterns
364
+ // First: check for explicitly banned patterns (these are always wrong)
371
365
  for (const { pattern, label } of BANNED_RLS_PATTERNS) {
372
366
  if (pattern.test(clause)) return label
373
367
  }
374
368
 
375
- // Strip known-safe tokens and see if anything suspicious remains
376
- let stripped = clause
377
- // Remove string literals
378
- stripped = stripped.replace(/'[^']*'/g, '')
379
- // Remove numbers
380
- stripped = stripped.replace(/\b\d+\b/g, '')
381
- // Remove known-safe identifiers and functions
382
- for (const atom of ALLOWED_RLS_ATOMS) {
383
- stripped = stripped.replace(new RegExp(atom.source, 'gi'), '')
384
- }
385
- // Remove SQL syntax noise (parens, commas, operators, quotes, casts, aliases)
386
- stripped = stripped.replace(/[(),"=<>!:.|{}\-\+\*\/\s]/g, '')
387
- stripped = stripped.replace(/\b(AS|FROM|WHERE|SELECT|JOIN|ON|LIMIT|uuid|text|boolean|integer|jsonb|json|public|ARRAY|FOR)\b/gi, '')
388
- // Remove table/column qualifiers that look like identifiers (lowercase + underscore)
389
- stripped = stripped.replace(/\b[a-z_][a-z0-9_]*\b/gi, '')
390
- stripped = stripped.trim()
391
-
392
- // If anything substantial remains, it's an unrecognized pattern
393
- if (stripped.length > 0) {
394
- return `Unrecognized RLS clause content: "${clause.substring(0, 120)}". Only whitelisted patterns are allowed.`
369
+ // Second: verify clause contains at least one allowed pattern
370
+ const hasAllowedPattern = ALLOWED_RLS_PATTERNS.some(({ pattern }) => pattern.test(clause))
371
+ if (!hasAllowedPattern) {
372
+ return `Unrecognized RLS clause: "${clause.substring(0, 150)}". Only whitelisted patterns are allowed (org/user isolation, role gates, data filters, subqueries). See ALLOWED_RLS_PATTERNS in config-rls-alignment.js.`
395
373
  }
396
374
 
397
375
  return null
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@soulbatical/tetra-dev-toolkit",
3
- "version": "1.17.2",
3
+ "version": "1.17.3",
4
4
  "publishConfig": {
5
5
  "access": "restricted"
6
6
  },