@cubis/foundry 0.3.10 → 0.3.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/Ai Agent Workflow/powers/database-skills/POWER.md +15 -2
  2. package/Ai Agent Workflow/powers/database-skills/SKILL.md +26 -2
  3. package/Ai Agent Workflow/powers/database-skills/engines/mongodb/POWER.md +10 -0
  4. package/Ai Agent Workflow/powers/database-skills/engines/mysql/POWER.md +10 -0
  5. package/Ai Agent Workflow/powers/database-skills/engines/neki/POWER.md +10 -0
  6. package/Ai Agent Workflow/powers/database-skills/engines/postgres/POWER.md +10 -0
  7. package/Ai Agent Workflow/powers/database-skills/engines/redis/POWER.md +10 -0
  8. package/Ai Agent Workflow/powers/database-skills/engines/sqlite/POWER.md +10 -0
  9. package/Ai Agent Workflow/powers/database-skills/engines/supabase/POWER.md +10 -0
  10. package/Ai Agent Workflow/powers/database-skills/engines/vitess/POWER.md +10 -0
  11. package/Ai Agent Workflow/powers/database-skills/steering/readme.md +18 -6
  12. package/Ai Agent Workflow/skills/database-skills/LATEST_VERSIONS.md +36 -0
  13. package/Ai Agent Workflow/skills/database-skills/README.md +11 -2
  14. package/Ai Agent Workflow/skills/database-skills/SKILL.md +85 -20
  15. package/Ai Agent Workflow/skills/database-skills/skills/mongodb/SKILL.md +29 -7
  16. package/Ai Agent Workflow/skills/database-skills/skills/mongodb/references/aggregation.md +153 -0
  17. package/Ai Agent Workflow/skills/database-skills/skills/mongodb/references/modeling.md +95 -4
  18. package/Ai Agent Workflow/skills/database-skills/skills/mongodb/references/mongoose-nestjs.md +133 -4
  19. package/Ai Agent Workflow/skills/database-skills/skills/mysql/SKILL.md +33 -7
  20. package/Ai Agent Workflow/skills/database-skills/skills/mysql/references/locking-ddl.md +103 -4
  21. package/Ai Agent Workflow/skills/database-skills/skills/mysql/references/query-indexing.md +103 -4
  22. package/Ai Agent Workflow/skills/database-skills/skills/mysql/references/replication.md +142 -0
  23. package/Ai Agent Workflow/skills/database-skills/skills/neki/SKILL.md +18 -7
  24. package/Ai Agent Workflow/skills/database-skills/skills/neki/references/architecture.md +135 -4
  25. package/Ai Agent Workflow/skills/database-skills/skills/neki/references/operations.md +76 -4
  26. package/Ai Agent Workflow/skills/database-skills/skills/postgres/SKILL.md +31 -7
  27. package/Ai Agent Workflow/skills/database-skills/skills/postgres/references/connection-pooling.md +142 -0
  28. package/Ai Agent Workflow/skills/database-skills/skills/postgres/references/migrations.md +126 -0
  29. package/Ai Agent Workflow/skills/database-skills/skills/postgres/references/performance-ops.md +116 -4
  30. package/Ai Agent Workflow/skills/database-skills/skills/postgres/references/schema-indexing.md +78 -4
  31. package/Ai Agent Workflow/skills/database-skills/skills/redis/SKILL.md +28 -7
  32. package/Ai Agent Workflow/skills/database-skills/skills/redis/references/cache-patterns.md +153 -4
  33. package/Ai Agent Workflow/skills/database-skills/skills/redis/references/data-modeling.md +152 -0
  34. package/Ai Agent Workflow/skills/database-skills/skills/redis/references/operations.md +143 -4
  35. package/Ai Agent Workflow/skills/database-skills/skills/sqlite/SKILL.md +28 -7
  36. package/Ai Agent Workflow/skills/database-skills/skills/sqlite/references/local-first.md +94 -4
  37. package/Ai Agent Workflow/skills/database-skills/skills/sqlite/references/performance.md +104 -4
  38. package/Ai Agent Workflow/skills/database-skills/skills/supabase/SKILL.md +27 -7
  39. package/Ai Agent Workflow/skills/database-skills/skills/supabase/references/performance-operations.md +94 -4
  40. package/Ai Agent Workflow/skills/database-skills/skills/supabase/references/rls-auth.md +105 -4
  41. package/Ai Agent Workflow/skills/database-skills/skills/vitess/SKILL.md +27 -7
  42. package/Ai Agent Workflow/skills/database-skills/skills/vitess/references/operational-safety.md +104 -4
  43. package/Ai Agent Workflow/skills/database-skills/skills/vitess/references/sharding-routing.md +124 -4
  44. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/antigravity/agents/backend-specialist.md +1 -1
  45. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/antigravity/agents/database-architect.md +8 -1
  46. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/antigravity/agents/performance-optimizer.md +2 -0
  47. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/antigravity/workflows/database.md +11 -6
  48. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/codex/agents/backend-specialist.md +1 -1
  49. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/codex/agents/database-architect.md +8 -1
  50. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/codex/agents/performance-optimizer.md +2 -0
  51. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/codex/workflows/database.md +11 -6
  52. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/copilot/agents/backend-specialist.md +1 -1
  53. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/copilot/agents/database-architect.md +8 -1
  54. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/copilot/agents/performance-optimizer.md +2 -0
  55. package/Ai Agent Workflow/workflows/agent-environment-setup/platforms/copilot/workflows/database.md +11 -6
  56. package/package.json +1 -1
@@ -1,5 +1,95 @@
1
- # SQLite Local-First
1
+ # SQLite Local-First Design
2
2
 
3
- - Fit for mobile, desktop, edge, and embedded services.
4
- - Design sync and conflict handling explicitly when multi-device.
5
- - Keep schema migrations deterministic.
3
+ ## When SQLite is the right fit
4
+
5
+ SQLite is the right choice when:
6
+ - Each user has their own private data (mobile app, desktop app, browser extension, edge function).
7
+ - The application needs to work offline with local durability.
8
+ - Data sync is done at the application layer, not the database layer.
9
+ - Simplicity and zero-configuration matter.
10
+
11
+ SQLite is **not** the right fit for:
12
+ - High-concurrency write workloads with multiple simultaneous writers.
13
+ - Data shared across multiple users in real-time (use Postgres or MySQL instead).
14
+ - Analytics on large datasets (use ClickHouse or DuckDB instead).
15
+
16
+ ## Journal modes — choose WAL for mobile/desktop
17
+
18
+ The default Rollback Journal blocks all reads during a write. Switch to WAL (Write-Ahead Log) for concurrent reads:
19
+
20
+ ```sql
21
+ PRAGMA journal_mode = WAL;
22
+ -- Persist across connections (set once after DB creation)
23
+ ```
24
+
25
+ WAL mode allows:
26
+ - Multiple simultaneous readers while a write is in progress.
27
+ - Reads don't block writes; writes don't block reads.
28
+ - Slightly better write performance for sequential inserts.
29
+
30
+ Set `PRAGMA synchronous = NORMAL` with WAL (safe default, faster than `FULL`):
31
+ ```sql
32
+ PRAGMA synchronous = NORMAL;
33
+ ```
34
+
35
+ ## Migration patterns
36
+
37
+ Migrations must be deterministic and reversible. Use a migrations table to track applied versions:
38
+
39
+ ```sql
40
+ CREATE TABLE IF NOT EXISTS _migrations (
41
+ id INTEGER PRIMARY KEY,
42
+ name TEXT NOT NULL,
43
+ applied_at TEXT NOT NULL DEFAULT (datetime('now'))
44
+ );
45
+ ```
46
+
47
+ Migration rules:
48
+ - **Never modify an applied migration** — create a new one instead.
49
+ - **Always test on real device storage** — SD cards, old iPhones, and constrained flash behave differently than dev machines.
50
+ - **Include a down migration** (rollback SQL) alongside every up migration.
51
+ - Run migrations inside a transaction so partial failure is safe:
52
+ ```sql
53
+ BEGIN;
54
+ -- migration SQL here
55
+ INSERT INTO _migrations (id, name) VALUES (3, 'add_notes_column');
56
+ COMMIT;
57
+ ```
58
+
59
+ ## Multi-device sync and conflict handling
60
+
61
+ SQLite has no built-in sync. Design conflict handling explicitly before writing any app code.
62
+
63
+ Common strategies:
64
+ - **Last-write-wins (LWW)**: each row has `updated_at`. On sync, higher timestamp wins. Simple but loses concurrent edits.
65
+ - **CRDTs**: use conflict-free replicated data types for counters, sets, and ordered lists. Complex but correct.
66
+ - **Event sourcing**: store immutable event log, derive state. Sync events instead of rows.
67
+ - **Vector clocks**: each device has a logical clock; merge based on causal ordering.
68
+
69
+ Always include a `sync_id` (UUID, globally unique) and `device_id` on every synced row. Never rely on local SQLite `ROWID` or autoincrement as a sync key.
70
+
71
+ ## Offline-first checklist
72
+
73
+ - [ ] WAL mode enabled on database open.
74
+ - [ ] All writes go through a local queue that syncs when connectivity is available.
75
+ - [ ] Conflict resolution strategy defined before writing sync logic.
76
+ - [ ] Migrations run before any app reads/writes.
77
+ - [ ] Migration history table exists.
78
+ - [ ] UI shows sync status (pending, syncing, error).
79
+ - [ ] Sync tested under: airplane mode, partial connectivity, app killed mid-sync.
80
+
81
+ ## Appropriate use cases
82
+
83
+ | Use | Verdict |
84
+ | --- | --- |
85
+ | Mobile app local data | ✅ Excellent |
86
+ | Desktop app settings and cache | ✅ Excellent |
87
+ | Embedded IoT / edge data | ✅ Excellent |
88
+ | Read-heavy web server cache | ✅ Good (read-only or low-write) |
89
+ | Multi-user web app backend | ❌ Use Postgres |
90
+ | High-concurrency writes | ❌ Use Postgres or MySQL |
91
+
92
+ ## Sources
93
+ - SQLite appropriate uses: https://sqlite.org/whentouse.html
94
+ - Atomic commit behavior: https://sqlite.org/atomiccommit.html
95
+ - WAL mode: https://sqlite.org/wal.html
@@ -1,5 +1,105 @@
1
- # SQLite Performance
1
+ # SQLite Performance Techniques
2
2
 
3
- - Use indexes for repeated filters.
4
- - Run ANALYZE after significant data shifts.
5
- - Profile query plans before adding complexity.
3
+ ## EXPLAIN QUERY PLAN
4
+
5
+ Always check the planner before optimizing.
6
+
7
+ ```sql
8
+ EXPLAIN QUERY PLAN
9
+ SELECT * FROM orders WHERE user_id = 1 ORDER BY created_at DESC LIMIT 20;
10
+ ```
11
+
12
+ Key output to read:
13
+ - `SCAN orders` = full table scan — missing or unused index.
14
+ - `SEARCH orders USING INDEX idx_orders_user (user_id=?)` = index seek — good.
15
+ - `USE TEMP B-TREE FOR ORDER BY` = sort can't use the index — add a covering index with the sort column.
16
+
17
+ ## Index design
18
+
19
+ SQLite uses B-tree indexes. Same leftmost-prefix rules as other SQL databases.
20
+
21
+ ```sql
22
+ -- Equality first, sort last
23
+ CREATE INDEX idx_orders_user_created ON orders (user_id, created_at DESC);
24
+
25
+ -- Partial index for hot filtered subsets
26
+ CREATE INDEX idx_orders_pending ON orders (user_id, created_at)
27
+ WHERE status = 'pending';
28
+
29
+ -- Covering index: include projected columns to avoid table fetch
30
+ CREATE INDEX idx_orders_covering ON orders (user_id, status)
31
+ -- SQLite doesn't have INCLUDE, so list all needed columns in the key
32
+ ```
33
+
34
+ SQLite does not support `INCLUDE` columns — put all needed columns in the key if you want index-only reads.
35
+
36
+ ## Write performance — batch in transactions
37
+
38
+ Every `INSERT`/`UPDATE`/`DELETE` outside a transaction is its own `fsync`. For bulk operations this is catastrophically slow.
39
+
40
+ ```sql
41
+ -- BAD: 1000 individual fsyncs
42
+ INSERT INTO logs VALUES (...);
43
+ INSERT INTO logs VALUES (...);
44
+ -- × 1000
45
+
46
+ -- GOOD: one fsync
47
+ BEGIN;
48
+ INSERT INTO logs VALUES (...);
49
+ INSERT INTO logs VALUES (...);
50
+ -- × 1000
51
+ COMMIT;
52
+ ```
53
+
54
+ Rule of thumb: batch 100–10,000 rows per transaction. Benchmark for your hardware.
55
+
56
+ ## WAL and checkpoint tuning
57
+
58
+ With WAL mode (see local-first.md), the WAL file grows until a checkpoint writes it back to the main DB file.
59
+
60
+ ```sql
61
+ PRAGMA wal_autocheckpoint = 1000; -- checkpoint after 1000 pages (default)
62
+ -- Lower = more frequent checkpoints (less WAL growth, slightly more I/O)
63
+ -- Higher = less frequent (better write throughput, larger WAL file)
64
+ ```
65
+
66
+ Manual checkpoint before a backup:
67
+ ```sql
68
+ PRAGMA wal_checkpoint(TRUNCATE); -- flush WAL and truncate the file
69
+ ```
70
+
71
+ ## Memory and cache tuning
72
+
73
+ ```sql
74
+ PRAGMA cache_size = -32000; -- 32 MB page cache (negative = KB, default = 2MB)
75
+ PRAGMA mmap_size = 268435456; -- 256 MB memory-mapped I/O (faster reads on large DBs)
76
+ PRAGMA temp_store = MEMORY; -- keep temp tables in RAM instead of disk
77
+ ```
78
+
79
+ Apply these after every `OPEN` — they are not persisted.
80
+
81
+ ## Seek pagination
82
+
83
+ ```sql
84
+ -- BAD: OFFSET scans and discards N rows
85
+ SELECT * FROM orders ORDER BY id LIMIT 20 OFFSET 10000;
86
+
87
+ -- GOOD: seek on last seen id
88
+ SELECT * FROM orders WHERE id > :last_seen_id ORDER BY id LIMIT 20;
89
+ ```
90
+
91
+ ## Connection management (multi-threaded apps)
92
+
93
+ SQLite supports one writer at a time. For multi-threaded apps:
94
+ - Use a single write connection with serialized writes.
95
+ - Use a read connection pool (WAL mode allows concurrent readers).
96
+ - Set `PRAGMA busy_timeout = 5000` to wait instead of failing immediately on a locked DB:
97
+ ```sql
98
+ PRAGMA busy_timeout = 5000; -- wait up to 5s before returning SQLITE_BUSY
99
+ ```
100
+
101
+ ## Sources
102
+ - EXPLAIN QUERY PLAN: https://sqlite.org/eqp.html
103
+ - Query planner: https://sqlite.org/queryplanner.html
104
+ - WAL mode: https://sqlite.org/wal.html
105
+ - PRAGMA reference: https://sqlite.org/pragma.html
@@ -1,15 +1,35 @@
1
1
  ---
2
2
  name: supabase
3
- description: Supabase/Postgres patterns for RLS, auth-aware schema, pooling, and safe migrations.
3
+ description: Supabase/Postgres patterns for RLS, indexing, query optimization, pooling, and safe migrations.
4
4
  ---
5
5
 
6
6
  # Supabase
7
7
 
8
- Load references as needed:
8
+ ## Optimization workflow
9
+
10
+ 1. Confirm RLS policy shape and index policy predicates.
11
+ 2. Analyze query plans and use index advisor workflow.
12
+ 3. Choose proper connection mode (direct/session/transaction pooler).
13
+ 4. Use keyset pagination for heavy list endpoints.
14
+ 5. Include migration/rollback with version-awareness (managed vs self-hosted differences).
15
+
16
+ ## Indexing and RLS techniques
17
+
18
+ - Index columns referenced by RLS policies.
19
+ - Index join/filter columns used by API queries.
20
+ - Remove redundant indexes that inflate write cost.
21
+
22
+ ## Pagination techniques
23
+
24
+ - Prefer keyset pagination on stable, indexed sort columns.
25
+ - Avoid blind offset scans on large tables.
26
+
27
+ ## Operational guardrails
28
+
29
+ - Validate Supavisor mode based on runtime behavior.
30
+ - Confirm compatibility when moving between managed and self-hosted environments.
31
+
32
+ ## References
33
+
9
34
  - `references/rls-auth.md`
10
35
  - `references/performance-operations.md`
11
-
12
- Key rules:
13
- - Enforce tenant/user boundaries with RLS.
14
- - Index policy predicates and hot query paths.
15
- - Validate policies in staging before production rollout.
@@ -1,5 +1,95 @@
1
- # Supabase Performance and Operations
1
+ # Supabase — Query Performance and Connection Operations
2
2
 
3
- - Use pooled connections from app runtimes.
4
- - Track slow queries and remove N+1 behavior.
5
- - Keep migrations versioned and reversible.
3
+ ## Query optimization workflow
4
+
5
+ 1. Identify slow queries via Supabase Dashboard → Database → Query Performance (uses `pg_stat_statements`).
6
+ 2. Run `EXPLAIN (ANALYZE, BUFFERS)` on the slow query to read the actual plan.
7
+ 3. Check for: `Seq Scan` on large tables, `Sort` without index, high `Rows Removed by Filter` ratio.
8
+ 4. Add targeted index and re-test.
9
+
10
+ ```sql
11
+ -- From the SQL editor in Supabase Dashboard
12
+ EXPLAIN (ANALYZE, BUFFERS, FORMAT TEXT)
13
+ SELECT * FROM orders WHERE user_id = 'abc' ORDER BY created_at DESC LIMIT 20;
14
+ ```
15
+
16
+ ## Database Advisors
17
+
18
+ Supabase runs automated advisors that flag common issues. Check Dashboard → Database → Advisors for:
19
+ - **Unused indexes**: indexes with zero scans — drop them.
20
+ - **Unindexed foreign keys**: FKs without an index cause full scans on joins and cascades.
21
+ - **Seq scans on large tables**: tables being full-scanned when an index would help.
22
+ - **Bloated tables**: high dead tuple ratio — may need `VACUUM`.
23
+
24
+ ```sql
25
+ -- Manual unused index check
26
+ SELECT schemaname, relname, indexrelname, idx_scan
27
+ FROM pg_stat_user_indexes
28
+ WHERE idx_scan = 0 AND schemaname = 'public'
29
+ ORDER BY relname;
30
+ ```
31
+
32
+ ## Connection modes and when to use each
33
+
34
+ Supabase provides three connection methods:
35
+
36
+ | Mode | How | Use for |
37
+ | --- | --- | --- |
38
+ | **Direct** | `postgresql://...` port 5432 | Long-lived connections (background jobs, migrations) |
39
+ | **Transaction pooler** (PgBouncer) | Port 6543 | Serverless functions, edge functions, short-lived requests |
40
+ | **Session pooler** | Port 5432 alt | When you need session-level features (prepared statements, `SET`) |
41
+
42
+ **Serverless / Edge Functions**: always use the transaction pooler (6543). Direct connections from serverless cold starts exhaust Postgres connection limits fast.
43
+
44
+ ```ts
45
+ // Supabase JS — uses transaction pooler automatically when using the client library
46
+ const supabase = createClient(url, anonKey);
47
+
48
+ // Direct connection for migrations / background jobs (Prisma, Drizzle, etc.)
49
+ DATABASE_URL=postgresql://user:pass@db.xxx.supabase.co:5432/postgres
50
+ // Transaction pooler for Prisma in serverless
51
+ DATABASE_URL=postgresql://user:pass@db.xxx.supabase.co:6543/postgres?pgbouncer=true
52
+ ```
53
+
54
+ ## Prepared statements and pooling
55
+
56
+ PgBouncer in transaction mode does **not** support prepared statements — they are per-session. If your ORM uses prepared statements (Prisma does by default):
57
+
58
+ ```
59
+ # Prisma — disable prepared statements when using transaction pooler
60
+ DATABASE_URL="...?pgbouncer=true&connection_limit=1"
61
+ # Or in schema.prisma:
62
+ datasource db {
63
+ url = env("DATABASE_URL")
64
+ directUrl = env("DIRECT_URL") # direct connection for migrations
65
+ }
66
+ ```
67
+
68
+ ## Realtime performance considerations
69
+
70
+ Supabase Realtime uses Postgres logical replication. High-volume tables with Realtime enabled generate significant WAL traffic.
71
+
72
+ - Enable Realtime only on tables that clients actually subscribe to.
73
+ - Filter subscriptions as tightly as possible: `channel.on('postgres_changes', { filter: 'user_id=eq.123' }, ...)`.
74
+ - Monitor replication slot lag in Dashboard → Database → Replication.
75
+
76
+ ## Extensions useful for performance
77
+
78
+ ```sql
79
+ -- Query statistics (enabled by default in Supabase)
80
+ SELECT * FROM pg_stat_statements ORDER BY total_exec_time DESC LIMIT 10;
81
+
82
+ -- Index usage stats
83
+ SELECT * FROM pg_stat_user_indexes ORDER BY idx_scan ASC;
84
+
85
+ -- Table bloat estimate
86
+ SELECT relname, n_dead_tup, n_live_tup, last_autovacuum
87
+ FROM pg_stat_user_tables
88
+ ORDER BY n_dead_tup DESC;
89
+ ```
90
+
91
+ ## Sources
92
+ - Query optimization guide: https://supabase.com/docs/guides/database/query-optimization
93
+ - Database advisors: https://supabase.com/docs/guides/database/database-advisors
94
+ - Connecting to Postgres (poolers): https://supabase.com/docs/guides/database/connecting-to-postgres
95
+ - pg_stat_statements: https://www.postgresql.org/docs/current/pgstatstatements.html
@@ -1,5 +1,106 @@
1
- # Supabase RLS and Auth
1
+ # Supabase Row Level Security and Auth Performance
2
2
 
3
- - Keep policies simple and explicit.
4
- - Validate read and write paths for each role.
5
- - Avoid exposing privileged credentials client-side.
3
+ ## Enable RLS on every exposed table
4
+
5
+ Any table exposed via the Supabase Data API (PostgREST) or Realtime must have RLS enabled. Without it, all authenticated users can access all rows.
6
+
7
+ ```sql
8
+ ALTER TABLE orders ENABLE ROW LEVEL SECURITY;
9
+ ALTER TABLE orders FORCE ROW LEVEL SECURITY; -- applies to table owner too
10
+ ```
11
+
12
+ ## Policy fundamentals
13
+
14
+ A policy is a `WHERE` predicate automatically appended to every query on the table.
15
+
16
+ ```sql
17
+ -- Users can only see their own orders
18
+ CREATE POLICY "users can read own orders"
19
+ ON orders FOR SELECT
20
+ USING (user_id = auth.uid());
21
+
22
+ -- Users can only insert rows for themselves
23
+ CREATE POLICY "users can insert own orders"
24
+ ON orders FOR INSERT
25
+ WITH CHECK (user_id = auth.uid());
26
+
27
+ -- Separate policies for read vs write
28
+ -- SELECT: use USING()
29
+ -- INSERT: use WITH CHECK()
30
+ -- UPDATE: use both USING() (which rows to target) + WITH CHECK() (what values are allowed)
31
+ -- DELETE: use USING()
32
+ ```
33
+
34
+ ## auth.uid() and auth.jwt()
35
+
36
+ Supabase injects the authenticated user context via:
37
+ - `auth.uid()` — UUID of the current user (`auth.users.id`).
38
+ - `auth.jwt()` — full JWT payload as JSONB. Access claims: `auth.jwt() ->> 'role'`, `(auth.jwt() -> 'user_metadata') ->> 'org_id'`.
39
+
40
+ ```sql
41
+ -- Role-based policy using JWT claims
42
+ CREATE POLICY "admins can read all orders"
43
+ ON orders FOR SELECT
44
+ USING (auth.jwt() ->> 'user_role' = 'admin');
45
+
46
+ -- Multi-tenant: org_id from metadata
47
+ CREATE POLICY "org members can read org orders"
48
+ ON orders FOR SELECT
49
+ USING (org_id = (auth.jwt() -> 'user_metadata' ->> 'org_id')::uuid);
50
+ ```
51
+
52
+ ## Index policy predicate columns
53
+
54
+ **RLS policies are evaluated per row.** If the policy predicate isn't indexed, every query scans the full table before filtering.
55
+
56
+ ```sql
57
+ -- Policy uses user_id — must have an index on it
58
+ CREATE INDEX idx_orders_user_id ON orders (user_id);
59
+
60
+ -- Policy uses org_id — same rule
61
+ CREATE INDEX idx_orders_org_id ON orders (org_id);
62
+ ```
63
+
64
+ For composite queries: `(user_id, status)`, `(org_id, created_at)`.
65
+
66
+ ## Bypassing RLS for service-role operations
67
+
68
+ Backend code using the `service_role` key bypasses RLS — useful for admin operations, but dangerous if leaked. Never expose `service_role` to client code.
69
+
70
+ ```sql
71
+ -- Explicitly bypass RLS in a function running as superuser
72
+ CREATE FUNCTION admin_get_all_orders()
73
+ RETURNS SETOF orders
74
+ SECURITY DEFINER -- runs as function owner (bypasses RLS)
75
+ SET search_path = public
76
+ LANGUAGE sql AS $$
77
+ SELECT * FROM orders;
78
+ $$;
79
+ ```
80
+
81
+ ## Measuring policy overhead
82
+
83
+ ```sql
84
+ -- Compare query time with and without RLS
85
+ EXPLAIN (ANALYZE, BUFFERS)
86
+ SELECT * FROM orders WHERE status = 'pending' LIMIT 20;
87
+
88
+ -- Check if planner inlines auth.uid() correctly (it should appear in the plan)
89
+ ```
90
+
91
+ Benchmark list endpoints that have RLS policies after every policy change — policy complexity directly impacts query time.
92
+
93
+ ## Common RLS mistakes
94
+
95
+ | Mistake | Fix |
96
+ | --- | --- |
97
+ | No policy defined but RLS enabled | Default deny — no rows returned; add explicit policies |
98
+ | Policy predicate column not indexed | Add index on policy column |
99
+ | Using `auth.uid()` in `WITH CHECK` but not `USING` | UPDATE policy needs both |
100
+ | Relying solely on app-layer filtering | Always enforce with RLS even if app also filters |
101
+ | `service_role` key used in client | Switch to `anon` or `authenticated` key with RLS |
102
+
103
+ ## Sources
104
+ - Row Level Security: https://supabase.com/docs/guides/database/postgres/row-level-security
105
+ - Supabase auth helpers: https://supabase.com/docs/guides/auth
106
+ - PostgreSQL RLS docs: https://www.postgresql.org/docs/current/ddl-rowsecurity.html
@@ -1,15 +1,35 @@
1
1
  ---
2
2
  name: vitess
3
- description: Vitess and sharded MySQL planning, VSchema, routing, and online schema migration concerns.
3
+ description: Vitess sharding strategy, VSchema/vindex design, routing, and scale operations.
4
4
  ---
5
5
 
6
6
  # Vitess
7
7
 
8
- Load references as needed:
8
+ ## Primary focus
9
+
10
+ - Choose a strong primary vindex.
11
+ - Keep hot queries shard-local.
12
+ - Minimize cross-shard joins and transactions.
13
+
14
+ ## Optimization workflow
15
+
16
+ 1. Identify dominant access patterns and routing keys.
17
+ 2. Design VSchema/vindex for locality and balanced distribution.
18
+ 3. Validate routing/fan-out behavior on critical queries.
19
+ 4. Use seek pagination anchored on routing key and tie-breaker.
20
+ 5. Stage topology/resharding changes with rollback checkpoints.
21
+
22
+ ## Pagination techniques
23
+
24
+ - Prefer shard-aware keyset pagination.
25
+ - Avoid broad fan-out + offset combinations.
26
+
27
+ ## Operational guardrails
28
+
29
+ - Treat resharding as staged production operation.
30
+ - Monitor fan-out, replica lag, and failover behavior during topology changes.
31
+
32
+ ## References
33
+
9
34
  - `references/sharding-routing.md`
10
35
  - `references/operational-safety.md`
11
-
12
- Key rules:
13
- - Model entity ownership before keyspace/shard strategy.
14
- - Avoid cross-shard patterns unless explicitly required.
15
- - Plan online schema changes with observability checkpoints.
@@ -1,5 +1,105 @@
1
- # Vitess Operational Safety
1
+ # Vitess — Online DDL and Operational Safety
2
2
 
3
- - Stage schema changes and monitor replication lag.
4
- - Include fallback plan for routing or migration regressions.
5
- - Verify SLA metrics after each rollout phase.
3
+ ## Online DDL strategies
4
+
5
+ Vitess manages schema changes as tracked, non-blocking, revertible migrations. This is the **only recommended approach** for production schema changes.
6
+
7
+ Set DDL strategy per session or globally:
8
+ ```sql
9
+ SET @@ddl_strategy = 'vitess';
10
+ ```
11
+
12
+ | Strategy | Description |
13
+ | --- | --- |
14
+ | `vitess` (**recommended**) | VReplication-based. Non-blocking, revertible, failover-safe. |
15
+ | `online` | Alias for `vitess`. |
16
+ | `mysql` | Native MySQL DDL managed by Vitess scheduler. Blocking depends on the DDL. |
17
+ | `direct` | Unmanaged — direct DDL to MySQL. Not trackable. Avoid in production. |
18
+
19
+ ## Running a migration
20
+
21
+ ```sql
22
+ SET @@ddl_strategy = 'vitess';
23
+ ALTER TABLE orders ADD COLUMN notes TEXT; -- returns migration UUID immediately
24
+ ```
25
+
26
+ ```bash
27
+ vtctldclient ApplySchema --ddl-strategy "vitess" \
28
+ --sql "ALTER TABLE orders ADD COLUMN notes TEXT" commerce
29
+ ```
30
+
31
+ ## Migration lifecycle
32
+
33
+ ```
34
+ queued → ready → running → complete
35
+ ↘ failed
36
+ ↘ cancelled
37
+ ```
38
+
39
+ ## Monitor and control migrations
40
+
41
+ ```sql
42
+ SHOW VITESS_MIGRATIONS; -- all migrations
43
+ SHOW VITESS_MIGRATIONS LIKE '<uuid>'; -- specific migration
44
+ ```
45
+
46
+ Key columns to watch: `migration_status`, `progress`, `started_timestamp`, `completed_timestamp`, `message`.
47
+
48
+ ```sql
49
+ ALTER VITESS_MIGRATION '<uuid>' CANCEL; -- cancel pending
50
+ ALTER VITESS_MIGRATION '<uuid>' RETRY; -- retry failed
51
+ ALTER VITESS_MIGRATION '<uuid>' COMPLETE; -- complete a postponed migration
52
+ ALTER VITESS_MIGRATION '<uuid>' LAUNCH; -- launch a postponed migration
53
+ REVERT VITESS_MIGRATION '<uuid>'; -- revert a completed migration (non-destructive)
54
+ ```
55
+
56
+ ## Key DDL strategy flags
57
+
58
+ Append flags to strategy string:
59
+ ```sql
60
+ SET @@ddl_strategy = 'vitess --postpone-completion --allow-concurrent';
61
+ ```
62
+
63
+ | Flag | Effect |
64
+ | --- | --- |
65
+ | `--postpone-launch` | Queue migration but don't start automatically |
66
+ | `--postpone-completion` | Run migration but don't cut over — you control timing |
67
+ | `--allow-concurrent` | Allow multiple migrations to run simultaneously |
68
+ | `--declarative` | Provide desired `CREATE TABLE`; Vitess computes the ALTER |
69
+ | `--prefer-instant-ddl` | Use MySQL INSTANT DDL when possible |
70
+ | `--singleton` | Only one migration on this table at a time |
71
+
72
+ ## Declarative migrations
73
+
74
+ Supply the desired schema; Vitess computes the diff and runs the minimal ALTER:
75
+ ```sql
76
+ SET @@ddl_strategy = 'vitess --declarative';
77
+ CREATE TABLE demo (id BIGINT UNSIGNED NOT NULL, status VARCHAR(32), PRIMARY KEY (id));
78
+ ```
79
+
80
+ ## Throttling and failover safety
81
+
82
+ - The **tablet throttler** automatically slows migrations when replication lag is high.
83
+ Enable: `vtctldclient UpdateThrottlerConfig --enable <keyspace>`
84
+ - VReplication-based migrations **auto-resume** after primary reparenting (new primary must come up within 10 min).
85
+
86
+ ## Operational guardrails
87
+
88
+ - **Stage topology and resharding changes** in maintenance windows; keep blast radius small per operation.
89
+ - **Watch fan-out and replication lag** as release gates — do not proceed if lag is elevated.
90
+ - **Prepare explicit rollback procedures per shard move**: `MoveTables SwitchTraffic --reverse`, then `MoveTables Complete` on the old keyspace.
91
+ - Validate with `VDiff` before completing table moves: `vtctldclient VDiff <keyspace> <workflow>`.
92
+ - Test resharding end-to-end on staging with production-like data volume before production.
93
+
94
+ ## Best practices
95
+
96
+ 1. Always use `vitess` strategy in production — never `direct`.
97
+ 2. Use `--postpone-completion` for critical tables to control cut-over timing precisely.
98
+ 3. Enable the tablet throttler to prevent replication lag buildup.
99
+ 4. Use declarative migrations for desired-state schema management.
100
+ 5. Monitor all running migrations with `SHOW VITESS_MIGRATIONS` before and after deployments.
101
+
102
+ ## Sources
103
+ - Online DDL guide: https://vitess.io/docs/user-guides/schema-changes/
104
+ - VReplication reference: https://vitess.io/docs/reference/vreplication/
105
+ - Release notes / lifecycle: https://vitess.io/docs/releases/