opencodekit 0.15.8 → 0.15.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +9 -7
- package/dist/template/.opencode/AGENTS.md +7 -2
- package/dist/template/.opencode/agent/general.md +1 -0
- package/dist/template/.opencode/agent/looker.md +1 -0
- package/dist/template/.opencode/agent/painter.md +218 -0
- package/dist/template/.opencode/agent/plan.md +3 -0
- package/dist/template/.opencode/agent/vision.md +1 -0
- package/dist/template/.opencode/command/edit-image.md +1 -2
- package/dist/template/.opencode/command/generate-icon.md +1 -2
- package/dist/template/.opencode/command/generate-image.md +1 -2
- package/dist/template/.opencode/command/generate-pattern.md +1 -2
- package/dist/template/.opencode/command/generate-storyboard.md +1 -2
- package/dist/template/.opencode/command/restore-image.md +1 -2
- package/dist/template/.opencode/dcp.jsonc +12 -10
- package/dist/template/.opencode/opencode.json +707 -704
- package/dist/template/.opencode/package.json +1 -1
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/AGENTS.md +1490 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/SKILL.md +57 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/advanced-full-text-search.md +55 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/advanced-jsonb-indexing.md +49 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/conn-idle-timeout.md +46 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/conn-limits.md +44 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/conn-pooling.md +41 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/conn-prepared-statements.md +46 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/data-batch-inserts.md +54 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/data-n-plus-one.md +53 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/data-pagination.md +50 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/data-upsert.md +50 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/lock-advisory.md +56 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/lock-deadlock-prevention.md +68 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/lock-short-transactions.md +50 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/lock-skip-locked.md +54 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/monitor-explain-analyze.md +45 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/monitor-pg-stat-statements.md +55 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/monitor-vacuum-analyze.md +55 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/query-composite-indexes.md +44 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/query-covering-indexes.md +40 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/query-index-types.md +45 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/query-missing-indexes.md +43 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/query-partial-indexes.md +45 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/schema-data-types.md +46 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/schema-foreign-key-indexes.md +59 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/schema-lowercase-identifiers.md +55 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/schema-partitioning.md +55 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/schema-primary-keys.md +61 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/security-privileges.md +54 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/security-rls-basics.md +50 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/security-rls-performance.md +57 -0
- package/package.json +1 -1
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Keep Transactions Short to Reduce Lock Contention
|
|
3
|
+
impact: MEDIUM-HIGH
|
|
4
|
+
impactDescription: 3-5x throughput improvement, fewer deadlocks
|
|
5
|
+
tags: transactions, locking, contention, performance
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Keep Transactions Short to Reduce Lock Contention
|
|
9
|
+
|
|
10
|
+
Long-running transactions hold locks that block other queries. Keep transactions as short as possible.
|
|
11
|
+
|
|
12
|
+
**Incorrect (long transaction with external calls):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
begin;
|
|
16
|
+
select * from orders where id = 1 for update; -- Lock acquired
|
|
17
|
+
|
|
18
|
+
-- Application makes HTTP call to payment API (2-5 seconds)
|
|
19
|
+
-- Other queries on this row are blocked!
|
|
20
|
+
|
|
21
|
+
update orders set status = 'paid' where id = 1;
|
|
22
|
+
commit; -- Lock held for entire duration
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
**Correct (minimal transaction scope):**
|
|
26
|
+
|
|
27
|
+
```sql
|
|
28
|
+
-- Validate data and call APIs outside transaction
|
|
29
|
+
-- Application: response = await paymentAPI.charge(...)
|
|
30
|
+
|
|
31
|
+
-- Only hold lock for the actual update
|
|
32
|
+
begin;
|
|
33
|
+
update orders
|
|
34
|
+
set status = 'paid', payment_id = $1
|
|
35
|
+
where id = $2 and status = 'pending'
|
|
36
|
+
returning *;
|
|
37
|
+
commit; -- Lock held for milliseconds
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
Use `statement_timeout` to prevent runaway transactions:
|
|
41
|
+
|
|
42
|
+
```sql
|
|
43
|
+
-- Abort queries running longer than 30 seconds
|
|
44
|
+
set statement_timeout = '30s';
|
|
45
|
+
|
|
46
|
+
-- Or per-session
|
|
47
|
+
set local statement_timeout = '5s';
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
Reference: [Transaction Management](https://www.postgresql.org/docs/current/tutorial-transactions.html)
|
package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/lock-skip-locked.md
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Use SKIP LOCKED for Non-Blocking Queue Processing
|
|
3
|
+
impact: MEDIUM-HIGH
|
|
4
|
+
impactDescription: 10x throughput for worker queues
|
|
5
|
+
tags: skip-locked, queue, workers, concurrency
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Use SKIP LOCKED for Non-Blocking Queue Processing
|
|
9
|
+
|
|
10
|
+
When multiple workers process a queue, SKIP LOCKED allows workers to process different rows without waiting.
|
|
11
|
+
|
|
12
|
+
**Incorrect (workers block each other):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Worker 1 and Worker 2 both try to get next job
|
|
16
|
+
begin;
|
|
17
|
+
select * from jobs where status = 'pending' order by created_at limit 1 for update;
|
|
18
|
+
-- Worker 2 waits for Worker 1's lock to release!
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
**Correct (SKIP LOCKED for parallel processing):**
|
|
22
|
+
|
|
23
|
+
```sql
|
|
24
|
+
-- Each worker skips locked rows and gets the next available
|
|
25
|
+
begin;
|
|
26
|
+
select * from jobs
|
|
27
|
+
where status = 'pending'
|
|
28
|
+
order by created_at
|
|
29
|
+
limit 1
|
|
30
|
+
for update skip locked;
|
|
31
|
+
|
|
32
|
+
-- Worker 1 gets job 1, Worker 2 gets job 2 (no waiting)
|
|
33
|
+
|
|
34
|
+
update jobs set status = 'processing' where id = $1;
|
|
35
|
+
commit;
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
Complete queue pattern:
|
|
39
|
+
|
|
40
|
+
```sql
|
|
41
|
+
-- Atomic claim-and-update in one statement
|
|
42
|
+
update jobs
|
|
43
|
+
set status = 'processing', worker_id = $1, started_at = now()
|
|
44
|
+
where id = (
|
|
45
|
+
select id from jobs
|
|
46
|
+
where status = 'pending'
|
|
47
|
+
order by created_at
|
|
48
|
+
limit 1
|
|
49
|
+
for update skip locked
|
|
50
|
+
)
|
|
51
|
+
returning *;
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
Reference: [SELECT FOR UPDATE SKIP LOCKED](https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE)
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Use EXPLAIN ANALYZE to Diagnose Slow Queries
|
|
3
|
+
impact: LOW-MEDIUM
|
|
4
|
+
impactDescription: Identify exact bottlenecks in query execution
|
|
5
|
+
tags: explain, analyze, diagnostics, query-plan
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Use EXPLAIN ANALYZE to Diagnose Slow Queries
|
|
9
|
+
|
|
10
|
+
EXPLAIN ANALYZE executes the query and shows actual timings, revealing the true performance bottlenecks.
|
|
11
|
+
|
|
12
|
+
**Incorrect (guessing at performance issues):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Query is slow, but why?
|
|
16
|
+
select * from orders where customer_id = 123 and status = 'pending';
|
|
17
|
+
-- "It must be missing an index" - but which one?
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
**Correct (use EXPLAIN ANALYZE):**
|
|
21
|
+
|
|
22
|
+
```sql
|
|
23
|
+
explain (analyze, buffers, format text)
|
|
24
|
+
select * from orders where customer_id = 123 and status = 'pending';
|
|
25
|
+
|
|
26
|
+
-- Output reveals the issue:
|
|
27
|
+
-- Seq Scan on orders (cost=0.00..25000.00 rows=50 width=100) (actual time=0.015..450.123 rows=50 loops=1)
|
|
28
|
+
-- Filter: ((customer_id = 123) AND (status = 'pending'::text))
|
|
29
|
+
-- Rows Removed by Filter: 999950
|
|
30
|
+
-- Buffers: shared hit=5000 read=15000
|
|
31
|
+
-- Planning Time: 0.150 ms
|
|
32
|
+
-- Execution Time: 450.500 ms
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
Key things to look for:
|
|
36
|
+
|
|
37
|
+
```sql
|
|
38
|
+
-- Seq Scan on large tables = missing index
|
|
39
|
+
-- Rows Removed by Filter = poor selectivity or missing index
|
|
40
|
+
-- Buffers: read >> hit = data not cached, needs more memory
|
|
41
|
+
-- Nested Loop with high loops = consider different join strategy
|
|
42
|
+
-- Sort Method: external merge = work_mem too low
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
Reference: [EXPLAIN](https://supabase.com/docs/guides/database/inspect)
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Enable pg_stat_statements for Query Analysis
|
|
3
|
+
impact: LOW-MEDIUM
|
|
4
|
+
impactDescription: Identify top resource-consuming queries
|
|
5
|
+
tags: pg-stat-statements, monitoring, statistics, performance
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Enable pg_stat_statements for Query Analysis
|
|
9
|
+
|
|
10
|
+
pg_stat_statements tracks execution statistics for all queries, helping identify slow and frequent queries.
|
|
11
|
+
|
|
12
|
+
**Incorrect (no visibility into query patterns):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Database is slow, but which queries are the problem?
|
|
16
|
+
-- No way to know without pg_stat_statements
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
**Correct (enable and query pg_stat_statements):**
|
|
20
|
+
|
|
21
|
+
```sql
|
|
22
|
+
-- Enable the extension
|
|
23
|
+
create extension if not exists pg_stat_statements;
|
|
24
|
+
|
|
25
|
+
-- Find slowest queries by total time
|
|
26
|
+
select
|
|
27
|
+
calls,
|
|
28
|
+
round(total_exec_time::numeric, 2) as total_time_ms,
|
|
29
|
+
round(mean_exec_time::numeric, 2) as mean_time_ms,
|
|
30
|
+
query
|
|
31
|
+
from pg_stat_statements
|
|
32
|
+
order by total_exec_time desc
|
|
33
|
+
limit 10;
|
|
34
|
+
|
|
35
|
+
-- Find most frequent queries
|
|
36
|
+
select calls, query
|
|
37
|
+
from pg_stat_statements
|
|
38
|
+
order by calls desc
|
|
39
|
+
limit 10;
|
|
40
|
+
|
|
41
|
+
-- Reset statistics after optimization
|
|
42
|
+
select pg_stat_statements_reset();
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
Key metrics to monitor:
|
|
46
|
+
|
|
47
|
+
```sql
|
|
48
|
+
-- Queries with high mean time (candidates for optimization)
|
|
49
|
+
select query, mean_exec_time, calls
|
|
50
|
+
from pg_stat_statements
|
|
51
|
+
where mean_exec_time > 100 -- > 100ms average
|
|
52
|
+
order by mean_exec_time desc;
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
Reference: [pg_stat_statements](https://supabase.com/docs/guides/database/extensions/pg_stat_statements)
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Maintain Table Statistics with VACUUM and ANALYZE
|
|
3
|
+
impact: MEDIUM
|
|
4
|
+
impactDescription: 2-10x better query plans with accurate statistics
|
|
5
|
+
tags: vacuum, analyze, statistics, maintenance, autovacuum
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Maintain Table Statistics with VACUUM and ANALYZE
|
|
9
|
+
|
|
10
|
+
Outdated statistics cause the query planner to make poor decisions. VACUUM reclaims space, ANALYZE updates statistics.
|
|
11
|
+
|
|
12
|
+
**Incorrect (stale statistics):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Table has 1M rows but stats say 1000
|
|
16
|
+
-- Query planner chooses wrong strategy
|
|
17
|
+
explain select * from orders where status = 'pending';
|
|
18
|
+
-- Shows: Seq Scan (because stats show small table)
|
|
19
|
+
-- Actually: Index Scan would be much faster
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
**Correct (maintain fresh statistics):**
|
|
23
|
+
|
|
24
|
+
```sql
|
|
25
|
+
-- Manually analyze after large data changes
|
|
26
|
+
analyze orders;
|
|
27
|
+
|
|
28
|
+
-- Analyze specific columns used in WHERE clauses
|
|
29
|
+
analyze orders (status, created_at);
|
|
30
|
+
|
|
31
|
+
-- Check when tables were last analyzed
|
|
32
|
+
select
|
|
33
|
+
relname,
|
|
34
|
+
last_vacuum,
|
|
35
|
+
last_autovacuum,
|
|
36
|
+
last_analyze,
|
|
37
|
+
last_autoanalyze
|
|
38
|
+
from pg_stat_user_tables
|
|
39
|
+
order by last_analyze nulls first;
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Autovacuum tuning for busy tables:
|
|
43
|
+
|
|
44
|
+
```sql
|
|
45
|
+
-- Increase frequency for high-churn tables
|
|
46
|
+
alter table orders set (
|
|
47
|
+
autovacuum_vacuum_scale_factor = 0.05, -- Vacuum at 5% dead tuples (default 20%)
|
|
48
|
+
autovacuum_analyze_scale_factor = 0.02 -- Analyze at 2% changes (default 10%)
|
|
49
|
+
);
|
|
50
|
+
|
|
51
|
+
-- Check autovacuum status
|
|
52
|
+
select * from pg_stat_progress_vacuum;
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
Reference: [VACUUM](https://supabase.com/docs/guides/database/database-size#vacuum-operations)
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Create Composite Indexes for Multi-Column Queries
|
|
3
|
+
impact: HIGH
|
|
4
|
+
impactDescription: 5-10x faster multi-column queries
|
|
5
|
+
tags: indexes, composite-index, multi-column, query-optimization
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Create Composite Indexes for Multi-Column Queries
|
|
9
|
+
|
|
10
|
+
When queries filter on multiple columns, a composite index is more efficient than separate single-column indexes.
|
|
11
|
+
|
|
12
|
+
**Incorrect (separate indexes require bitmap scan):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Two separate indexes
|
|
16
|
+
create index orders_status_idx on orders (status);
|
|
17
|
+
create index orders_created_idx on orders (created_at);
|
|
18
|
+
|
|
19
|
+
-- Query must combine both indexes (slower)
|
|
20
|
+
select * from orders where status = 'pending' and created_at > '2024-01-01';
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
**Correct (composite index):**
|
|
24
|
+
|
|
25
|
+
```sql
|
|
26
|
+
-- Single composite index (leftmost column first for equality checks)
|
|
27
|
+
create index orders_status_created_idx on orders (status, created_at);
|
|
28
|
+
|
|
29
|
+
-- Query uses one efficient index scan
|
|
30
|
+
select * from orders where status = 'pending' and created_at > '2024-01-01';
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
**Column order matters** - place equality columns first, range columns last:
|
|
34
|
+
|
|
35
|
+
```sql
|
|
36
|
+
-- Good: status (=) before created_at (>)
|
|
37
|
+
create index idx on orders (status, created_at);
|
|
38
|
+
|
|
39
|
+
-- Works for: WHERE status = 'pending'
|
|
40
|
+
-- Works for: WHERE status = 'pending' AND created_at > '2024-01-01'
|
|
41
|
+
-- Does NOT work for: WHERE created_at > '2024-01-01' (leftmost prefix rule)
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
Reference: [Multicolumn Indexes](https://www.postgresql.org/docs/current/indexes-multicolumn.html)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Use Covering Indexes to Avoid Table Lookups
|
|
3
|
+
impact: MEDIUM-HIGH
|
|
4
|
+
impactDescription: 2-5x faster queries by eliminating heap fetches
|
|
5
|
+
tags: indexes, covering-index, include, index-only-scan
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Use Covering Indexes to Avoid Table Lookups
|
|
9
|
+
|
|
10
|
+
Covering indexes include all columns needed by a query, enabling index-only scans that skip the table entirely.
|
|
11
|
+
|
|
12
|
+
**Incorrect (index scan + heap fetch):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
create index users_email_idx on users (email);
|
|
16
|
+
|
|
17
|
+
-- Must fetch name and created_at from table heap
|
|
18
|
+
select email, name, created_at from users where email = 'user@example.com';
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
**Correct (index-only scan with INCLUDE):**
|
|
22
|
+
|
|
23
|
+
```sql
|
|
24
|
+
-- Include non-searchable columns in the index
|
|
25
|
+
create index users_email_idx on users (email) include (name, created_at);
|
|
26
|
+
|
|
27
|
+
-- All columns served from index, no table access needed
|
|
28
|
+
select email, name, created_at from users where email = 'user@example.com';
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
Use INCLUDE for columns you SELECT but don't filter on:
|
|
32
|
+
|
|
33
|
+
```sql
|
|
34
|
+
-- Searching by status, but also need customer_id and total
|
|
35
|
+
create index orders_status_idx on orders (status) include (customer_id, total);
|
|
36
|
+
|
|
37
|
+
select status, customer_id, total from orders where status = 'shipped';
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
Reference: [Index-Only Scans](https://www.postgresql.org/docs/current/indexes-index-only-scans.html)
|
package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/query-index-types.md
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Choose the Right Index Type for Your Data
|
|
3
|
+
impact: HIGH
|
|
4
|
+
impactDescription: 10-100x improvement with correct index type
|
|
5
|
+
tags: indexes, btree, gin, brin, hash, index-types
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Choose the Right Index Type for Your Data
|
|
9
|
+
|
|
10
|
+
Different index types excel at different query patterns. The default B-tree isn't always optimal.
|
|
11
|
+
|
|
12
|
+
**Incorrect (B-tree for JSONB containment):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- B-tree cannot optimize containment operators
|
|
16
|
+
create index products_attrs_idx on products (attributes);
|
|
17
|
+
select * from products where attributes @> '{"color": "red"}';
|
|
18
|
+
-- Full table scan - B-tree doesn't support @> operator
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
**Correct (GIN for JSONB):**
|
|
22
|
+
|
|
23
|
+
```sql
|
|
24
|
+
-- GIN supports @>, ?, ?&, ?| operators
|
|
25
|
+
create index products_attrs_idx on products using gin (attributes);
|
|
26
|
+
select * from products where attributes @> '{"color": "red"}';
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
Index type guide:
|
|
30
|
+
|
|
31
|
+
```sql
|
|
32
|
+
-- B-tree (default): =, <, >, BETWEEN, IN, IS NULL
|
|
33
|
+
create index users_created_idx on users (created_at);
|
|
34
|
+
|
|
35
|
+
-- GIN: arrays, JSONB, full-text search
|
|
36
|
+
create index posts_tags_idx on posts using gin (tags);
|
|
37
|
+
|
|
38
|
+
-- BRIN: large time-series tables (10-100x smaller)
|
|
39
|
+
create index events_time_idx on events using brin (created_at);
|
|
40
|
+
|
|
41
|
+
-- Hash: equality-only (slightly faster than B-tree for =)
|
|
42
|
+
create index sessions_token_idx on sessions using hash (token);
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
Reference: [Index Types](https://www.postgresql.org/docs/current/indexes-types.html)
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Add Indexes on WHERE and JOIN Columns
|
|
3
|
+
impact: CRITICAL
|
|
4
|
+
impactDescription: 100-1000x faster queries on large tables
|
|
5
|
+
tags: indexes, performance, sequential-scan, query-optimization
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Add Indexes on WHERE and JOIN Columns
|
|
9
|
+
|
|
10
|
+
Queries filtering or joining on unindexed columns cause full table scans, which become exponentially slower as tables grow.
|
|
11
|
+
|
|
12
|
+
**Incorrect (sequential scan on large table):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- No index on customer_id causes full table scan
|
|
16
|
+
select * from orders where customer_id = 123;
|
|
17
|
+
|
|
18
|
+
-- EXPLAIN shows: Seq Scan on orders (cost=0.00..25000.00 rows=100 width=85)
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
**Correct (index scan):**
|
|
22
|
+
|
|
23
|
+
```sql
|
|
24
|
+
-- Create index on frequently filtered column
|
|
25
|
+
create index orders_customer_id_idx on orders (customer_id);
|
|
26
|
+
|
|
27
|
+
select * from orders where customer_id = 123;
|
|
28
|
+
|
|
29
|
+
-- EXPLAIN shows: Index Scan using orders_customer_id_idx (cost=0.42..8.44 rows=100 width=85)
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
For JOIN columns, always index the foreign key side:
|
|
33
|
+
|
|
34
|
+
```sql
|
|
35
|
+
-- Index the referencing column
|
|
36
|
+
create index orders_customer_id_idx on orders (customer_id);
|
|
37
|
+
|
|
38
|
+
select c.name, o.total
|
|
39
|
+
from customers c
|
|
40
|
+
join orders o on o.customer_id = c.id;
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
Reference: [Query Optimization](https://supabase.com/docs/guides/database/query-optimization)
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Use Partial Indexes for Filtered Queries
|
|
3
|
+
impact: HIGH
|
|
4
|
+
impactDescription: 5-20x smaller indexes, faster writes and queries
|
|
5
|
+
tags: indexes, partial-index, query-optimization, storage
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Use Partial Indexes for Filtered Queries
|
|
9
|
+
|
|
10
|
+
Partial indexes only include rows matching a WHERE condition, making them smaller and faster when queries consistently filter on the same condition.
|
|
11
|
+
|
|
12
|
+
**Incorrect (full index includes irrelevant rows):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Index includes all rows, even soft-deleted ones
|
|
16
|
+
create index users_email_idx on users (email);
|
|
17
|
+
|
|
18
|
+
-- Query always filters active users
|
|
19
|
+
select * from users where email = 'user@example.com' and deleted_at is null;
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
**Correct (partial index matches query filter):**
|
|
23
|
+
|
|
24
|
+
```sql
|
|
25
|
+
-- Index only includes active users
|
|
26
|
+
create index users_active_email_idx on users (email)
|
|
27
|
+
where deleted_at is null;
|
|
28
|
+
|
|
29
|
+
-- Query uses the smaller, faster index
|
|
30
|
+
select * from users where email = 'user@example.com' and deleted_at is null;
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
Common use cases for partial indexes:
|
|
34
|
+
|
|
35
|
+
```sql
|
|
36
|
+
-- Only pending orders (status rarely changes once completed)
|
|
37
|
+
create index orders_pending_idx on orders (created_at)
|
|
38
|
+
where status = 'pending';
|
|
39
|
+
|
|
40
|
+
-- Only non-null values
|
|
41
|
+
create index products_sku_idx on products (sku)
|
|
42
|
+
where sku is not null;
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
Reference: [Partial Indexes](https://www.postgresql.org/docs/current/indexes-partial.html)
|
package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/schema-data-types.md
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Choose Appropriate Data Types
|
|
3
|
+
impact: HIGH
|
|
4
|
+
impactDescription: 50% storage reduction, faster comparisons
|
|
5
|
+
tags: data-types, schema, storage, performance
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Choose Appropriate Data Types
|
|
9
|
+
|
|
10
|
+
Using the right data types reduces storage, improves query performance, and prevents bugs.
|
|
11
|
+
|
|
12
|
+
**Incorrect (wrong data types):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
create table users (
|
|
16
|
+
id int, -- Will overflow at 2.1 billion
|
|
17
|
+
email varchar(255), -- Unnecessary length limit
|
|
18
|
+
created_at timestamp, -- Missing timezone info
|
|
19
|
+
is_active varchar(5), -- String for boolean
|
|
20
|
+
price varchar(20) -- String for numeric
|
|
21
|
+
);
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
**Correct (appropriate data types):**
|
|
25
|
+
|
|
26
|
+
```sql
|
|
27
|
+
create table users (
|
|
28
|
+
id bigint generated always as identity primary key, -- 9 quintillion max
|
|
29
|
+
email text, -- No artificial limit, same performance as varchar
|
|
30
|
+
created_at timestamptz, -- Always store timezone-aware timestamps
|
|
31
|
+
is_active boolean default true, -- 1 byte vs variable string length
|
|
32
|
+
price numeric(10,2) -- Exact decimal arithmetic
|
|
33
|
+
);
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
Key guidelines:
|
|
37
|
+
|
|
38
|
+
```sql
|
|
39
|
+
-- IDs: use bigint, not int (future-proofing)
|
|
40
|
+
-- Strings: use text, not varchar(n) unless constraint needed
|
|
41
|
+
-- Time: use timestamptz, not timestamp
|
|
42
|
+
-- Money: use numeric, not float (precision matters)
|
|
43
|
+
-- Enums: use text with check constraint or create enum type
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Reference: [Data Types](https://www.postgresql.org/docs/current/datatype.html)
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Index Foreign Key Columns
|
|
3
|
+
impact: HIGH
|
|
4
|
+
impactDescription: 10-100x faster JOINs and CASCADE operations
|
|
5
|
+
tags: foreign-key, indexes, joins, schema
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Index Foreign Key Columns
|
|
9
|
+
|
|
10
|
+
Postgres does not automatically index foreign key columns. Missing indexes cause slow JOINs and CASCADE operations.
|
|
11
|
+
|
|
12
|
+
**Incorrect (unindexed foreign key):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
create table orders (
|
|
16
|
+
id bigint generated always as identity primary key,
|
|
17
|
+
customer_id bigint references customers(id) on delete cascade,
|
|
18
|
+
total numeric(10,2)
|
|
19
|
+
);
|
|
20
|
+
|
|
21
|
+
-- No index on customer_id!
|
|
22
|
+
-- JOINs and ON DELETE CASCADE both require full table scan
|
|
23
|
+
select * from orders where customer_id = 123; -- Seq Scan
|
|
24
|
+
delete from customers where id = 123; -- Locks table, scans all orders
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
**Correct (indexed foreign key):**
|
|
28
|
+
|
|
29
|
+
```sql
|
|
30
|
+
create table orders (
|
|
31
|
+
id bigint generated always as identity primary key,
|
|
32
|
+
customer_id bigint references customers(id) on delete cascade,
|
|
33
|
+
total numeric(10,2)
|
|
34
|
+
);
|
|
35
|
+
|
|
36
|
+
-- Always index the FK column
|
|
37
|
+
create index orders_customer_id_idx on orders (customer_id);
|
|
38
|
+
|
|
39
|
+
-- Now JOINs and cascades are fast
|
|
40
|
+
select * from orders where customer_id = 123; -- Index Scan
|
|
41
|
+
delete from customers where id = 123; -- Uses index, fast cascade
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
Find missing FK indexes:
|
|
45
|
+
|
|
46
|
+
```sql
|
|
47
|
+
select
|
|
48
|
+
conrelid::regclass as table_name,
|
|
49
|
+
a.attname as fk_column
|
|
50
|
+
from pg_constraint c
|
|
51
|
+
join pg_attribute a on a.attrelid = c.conrelid and a.attnum = any(c.conkey)
|
|
52
|
+
where c.contype = 'f'
|
|
53
|
+
and not exists (
|
|
54
|
+
select 1 from pg_index i
|
|
55
|
+
where i.indrelid = c.conrelid and a.attnum = any(i.indkey)
|
|
56
|
+
);
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
Reference: [Foreign Keys](https://www.postgresql.org/docs/current/ddl-constraints.html#DDL-CONSTRAINTS-FK)
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Use Lowercase Identifiers for Compatibility
|
|
3
|
+
impact: MEDIUM
|
|
4
|
+
impactDescription: Avoid case-sensitivity bugs with tools, ORMs, and AI assistants
|
|
5
|
+
tags: naming, identifiers, case-sensitivity, schema, conventions
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Use Lowercase Identifiers for Compatibility
|
|
9
|
+
|
|
10
|
+
PostgreSQL folds unquoted identifiers to lowercase. Quoted mixed-case identifiers require quotes forever and cause issues with tools, ORMs, and AI assistants that may not recognize them.
|
|
11
|
+
|
|
12
|
+
**Incorrect (mixed-case identifiers):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Quoted identifiers preserve case but require quotes everywhere
|
|
16
|
+
CREATE TABLE "Users" (
|
|
17
|
+
"userId" bigint PRIMARY KEY,
|
|
18
|
+
"firstName" text,
|
|
19
|
+
"lastName" text
|
|
20
|
+
);
|
|
21
|
+
|
|
22
|
+
-- Must always quote or queries fail
|
|
23
|
+
SELECT "firstName" FROM "Users" WHERE "userId" = 1;
|
|
24
|
+
|
|
25
|
+
-- This fails - Users becomes users without quotes
|
|
26
|
+
SELECT firstName FROM Users;
|
|
27
|
+
-- ERROR: relation "users" does not exist
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
**Correct (lowercase snake_case):**
|
|
31
|
+
|
|
32
|
+
```sql
|
|
33
|
+
-- Unquoted lowercase identifiers are portable and tool-friendly
|
|
34
|
+
CREATE TABLE users (
|
|
35
|
+
user_id bigint PRIMARY KEY,
|
|
36
|
+
first_name text,
|
|
37
|
+
last_name text
|
|
38
|
+
);
|
|
39
|
+
|
|
40
|
+
-- Works without quotes, recognized by all tools
|
|
41
|
+
SELECT first_name FROM users WHERE user_id = 1;
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
Common sources of mixed-case identifiers:
|
|
45
|
+
|
|
46
|
+
```sql
|
|
47
|
+
-- ORMs often generate quoted camelCase - configure them to use snake_case
|
|
48
|
+
-- Migrations from other databases may preserve original casing
|
|
49
|
+
-- Some GUI tools quote identifiers by default - disable this
|
|
50
|
+
|
|
51
|
+
-- If stuck with mixed-case, create views as a compatibility layer
|
|
52
|
+
CREATE VIEW users AS SELECT "userId" AS user_id, "firstName" AS first_name FROM "Users";
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
Reference: [Identifiers and Key Words](https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS)
|