opencodekit 0.15.8 → 0.15.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +9 -7
- package/dist/template/.opencode/AGENTS.md +7 -2
- package/dist/template/.opencode/agent/general.md +1 -0
- package/dist/template/.opencode/agent/looker.md +1 -0
- package/dist/template/.opencode/agent/painter.md +218 -0
- package/dist/template/.opencode/agent/plan.md +3 -0
- package/dist/template/.opencode/agent/vision.md +1 -0
- package/dist/template/.opencode/command/edit-image.md +1 -2
- package/dist/template/.opencode/command/generate-icon.md +1 -2
- package/dist/template/.opencode/command/generate-image.md +1 -2
- package/dist/template/.opencode/command/generate-pattern.md +1 -2
- package/dist/template/.opencode/command/generate-storyboard.md +1 -2
- package/dist/template/.opencode/command/restore-image.md +1 -2
- package/dist/template/.opencode/dcp.jsonc +12 -10
- package/dist/template/.opencode/opencode.json +707 -704
- package/dist/template/.opencode/package.json +1 -1
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/AGENTS.md +1490 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/SKILL.md +57 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/advanced-full-text-search.md +55 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/advanced-jsonb-indexing.md +49 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/conn-idle-timeout.md +46 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/conn-limits.md +44 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/conn-pooling.md +41 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/conn-prepared-statements.md +46 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/data-batch-inserts.md +54 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/data-n-plus-one.md +53 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/data-pagination.md +50 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/data-upsert.md +50 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/lock-advisory.md +56 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/lock-deadlock-prevention.md +68 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/lock-short-transactions.md +50 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/lock-skip-locked.md +54 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/monitor-explain-analyze.md +45 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/monitor-pg-stat-statements.md +55 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/monitor-vacuum-analyze.md +55 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/query-composite-indexes.md +44 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/query-covering-indexes.md +40 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/query-index-types.md +45 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/query-missing-indexes.md +43 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/query-partial-indexes.md +45 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/schema-data-types.md +46 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/schema-foreign-key-indexes.md +59 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/schema-lowercase-identifiers.md +55 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/schema-partitioning.md +55 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/schema-primary-keys.md +61 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/security-privileges.md +54 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/security-rls-basics.md +50 -0
- package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/security-rls-performance.md +57 -0
- package/package.json +1 -1
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: supabase-postgres-best-practices
|
|
3
|
+
description: Postgres performance optimization and best practices from Supabase. Use this skill when writing, reviewing, or optimizing Postgres queries, schema designs, or database configurations.
|
|
4
|
+
license: MIT
|
|
5
|
+
metadata:
|
|
6
|
+
author: supabase
|
|
7
|
+
version: "1.0.0"
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
# Supabase Postgres Best Practices
|
|
11
|
+
|
|
12
|
+
Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design.
|
|
13
|
+
|
|
14
|
+
## When to Apply
|
|
15
|
+
|
|
16
|
+
Reference these guidelines when:
|
|
17
|
+
- Writing SQL queries or designing schemas
|
|
18
|
+
- Implementing indexes or query optimization
|
|
19
|
+
- Reviewing database performance issues
|
|
20
|
+
- Configuring connection pooling or scaling
|
|
21
|
+
- Optimizing for Postgres-specific features
|
|
22
|
+
- Working with Row-Level Security (RLS)
|
|
23
|
+
|
|
24
|
+
## Rule Categories by Priority
|
|
25
|
+
|
|
26
|
+
| Priority | Category | Impact | Prefix |
|
|
27
|
+
|----------|----------|--------|--------|
|
|
28
|
+
| 1 | Query Performance | CRITICAL | `query-` |
|
|
29
|
+
| 2 | Connection Management | CRITICAL | `conn-` |
|
|
30
|
+
| 3 | Security & RLS | CRITICAL | `security-` |
|
|
31
|
+
| 4 | Schema Design | HIGH | `schema-` |
|
|
32
|
+
| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` |
|
|
33
|
+
| 6 | Data Access Patterns | MEDIUM | `data-` |
|
|
34
|
+
| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` |
|
|
35
|
+
| 8 | Advanced Features | LOW | `advanced-` |
|
|
36
|
+
|
|
37
|
+
## How to Use
|
|
38
|
+
|
|
39
|
+
Read individual rule files for detailed explanations and SQL examples:
|
|
40
|
+
|
|
41
|
+
```
|
|
42
|
+
rules/query-missing-indexes.md
|
|
43
|
+
rules/schema-partial-indexes.md
|
|
44
|
+
rules/_sections.md
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
Each rule file contains:
|
|
48
|
+
- Brief explanation of why it matters
|
|
49
|
+
- Incorrect SQL example with explanation
|
|
50
|
+
- Correct SQL example with explanation
|
|
51
|
+
- Optional EXPLAIN output or metrics
|
|
52
|
+
- Additional context and references
|
|
53
|
+
- Supabase-specific notes (when applicable)
|
|
54
|
+
|
|
55
|
+
## Full Compiled Document
|
|
56
|
+
|
|
57
|
+
For the complete guide with all rules expanded: `AGENTS.md`
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Use tsvector for Full-Text Search
|
|
3
|
+
impact: MEDIUM
|
|
4
|
+
impactDescription: 100x faster than LIKE, with ranking support
|
|
5
|
+
tags: full-text-search, tsvector, gin, search
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Use tsvector for Full-Text Search
|
|
9
|
+
|
|
10
|
+
LIKE with wildcards can't use indexes. Full-text search with tsvector is orders of magnitude faster.
|
|
11
|
+
|
|
12
|
+
**Incorrect (LIKE pattern matching):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Cannot use index, scans all rows
|
|
16
|
+
select * from articles where content like '%postgresql%';
|
|
17
|
+
|
|
18
|
+
-- Case-insensitive makes it worse
|
|
19
|
+
select * from articles where lower(content) like '%postgresql%';
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
**Correct (full-text search with tsvector):**
|
|
23
|
+
|
|
24
|
+
```sql
|
|
25
|
+
-- Add tsvector column and index
|
|
26
|
+
alter table articles add column search_vector tsvector
|
|
27
|
+
generated always as (to_tsvector('english', coalesce(title,'') || ' ' || coalesce(content,''))) stored;
|
|
28
|
+
|
|
29
|
+
create index articles_search_idx on articles using gin (search_vector);
|
|
30
|
+
|
|
31
|
+
-- Fast full-text search
|
|
32
|
+
select * from articles
|
|
33
|
+
where search_vector @@ to_tsquery('english', 'postgresql & performance');
|
|
34
|
+
|
|
35
|
+
-- With ranking
|
|
36
|
+
select *, ts_rank(search_vector, query) as rank
|
|
37
|
+
from articles, to_tsquery('english', 'postgresql') query
|
|
38
|
+
where search_vector @@ query
|
|
39
|
+
order by rank desc;
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Search multiple terms:
|
|
43
|
+
|
|
44
|
+
```sql
|
|
45
|
+
-- AND: both terms required
|
|
46
|
+
to_tsquery('postgresql & performance')
|
|
47
|
+
|
|
48
|
+
-- OR: either term
|
|
49
|
+
to_tsquery('postgresql | mysql')
|
|
50
|
+
|
|
51
|
+
-- Prefix matching
|
|
52
|
+
to_tsquery('post:*')
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
Reference: [Full Text Search](https://supabase.com/docs/guides/database/full-text-search)
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Index JSONB Columns for Efficient Querying
|
|
3
|
+
impact: MEDIUM
|
|
4
|
+
impactDescription: 10-100x faster JSONB queries with proper indexing
|
|
5
|
+
tags: jsonb, gin, indexes, json
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Index JSONB Columns for Efficient Querying
|
|
9
|
+
|
|
10
|
+
JSONB queries without indexes scan the entire table. Use GIN indexes for containment queries.
|
|
11
|
+
|
|
12
|
+
**Incorrect (no index on JSONB):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
create table products (
|
|
16
|
+
id bigint primary key,
|
|
17
|
+
attributes jsonb
|
|
18
|
+
);
|
|
19
|
+
|
|
20
|
+
-- Full table scan for every query
|
|
21
|
+
select * from products where attributes @> '{"color": "red"}';
|
|
22
|
+
select * from products where attributes->>'brand' = 'Nike';
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
**Correct (GIN index for JSONB):**
|
|
26
|
+
|
|
27
|
+
```sql
|
|
28
|
+
-- GIN index for containment operators (@>, ?, ?&, ?|)
|
|
29
|
+
create index products_attrs_gin on products using gin (attributes);
|
|
30
|
+
|
|
31
|
+
-- Now containment queries use the index
|
|
32
|
+
select * from products where attributes @> '{"color": "red"}';
|
|
33
|
+
|
|
34
|
+
-- For specific key lookups, use expression index
|
|
35
|
+
create index products_brand_idx on products ((attributes->>'brand'));
|
|
36
|
+
select * from products where attributes->>'brand' = 'Nike';
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
Choose the right operator class:
|
|
40
|
+
|
|
41
|
+
```sql
|
|
42
|
+
-- jsonb_ops (default): supports all operators, larger index
|
|
43
|
+
create index idx1 on products using gin (attributes);
|
|
44
|
+
|
|
45
|
+
-- jsonb_path_ops: only @> operator, but 2-3x smaller index
|
|
46
|
+
create index idx2 on products using gin (attributes jsonb_path_ops);
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Reference: [JSONB Indexes](https://www.postgresql.org/docs/current/datatype-json.html#JSON-INDEXING)
|
package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/conn-idle-timeout.md
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Configure Idle Connection Timeouts
|
|
3
|
+
impact: HIGH
|
|
4
|
+
impactDescription: Reclaim 30-50% of connection slots from idle clients
|
|
5
|
+
tags: connections, timeout, idle, resource-management
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Configure Idle Connection Timeouts
|
|
9
|
+
|
|
10
|
+
Idle connections waste resources. Configure timeouts to automatically reclaim them.
|
|
11
|
+
|
|
12
|
+
**Incorrect (connections held indefinitely):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- No timeout configured
|
|
16
|
+
show idle_in_transaction_session_timeout; -- 0 (disabled)
|
|
17
|
+
|
|
18
|
+
-- Connections stay open forever, even when idle
|
|
19
|
+
select pid, state, state_change, query
|
|
20
|
+
from pg_stat_activity
|
|
21
|
+
where state = 'idle in transaction';
|
|
22
|
+
-- Shows transactions idle for hours, holding locks
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
**Correct (automatic cleanup of idle connections):**
|
|
26
|
+
|
|
27
|
+
```sql
|
|
28
|
+
-- Terminate connections idle in transaction after 30 seconds
|
|
29
|
+
alter system set idle_in_transaction_session_timeout = '30s';
|
|
30
|
+
|
|
31
|
+
-- Terminate completely idle connections after 10 minutes
|
|
32
|
+
alter system set idle_session_timeout = '10min';
|
|
33
|
+
|
|
34
|
+
-- Reload configuration
|
|
35
|
+
select pg_reload_conf();
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
For pooled connections, configure at the pooler level:
|
|
39
|
+
|
|
40
|
+
```ini
|
|
41
|
+
# pgbouncer.ini
|
|
42
|
+
server_idle_timeout = 60
|
|
43
|
+
client_idle_timeout = 300
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Reference: [Connection Timeouts](https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT)
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Set Appropriate Connection Limits
|
|
3
|
+
impact: CRITICAL
|
|
4
|
+
impactDescription: Prevent database crashes and memory exhaustion
|
|
5
|
+
tags: connections, max-connections, limits, stability
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Set Appropriate Connection Limits
|
|
9
|
+
|
|
10
|
+
Too many connections exhaust memory and degrade performance. Set limits based on available resources.
|
|
11
|
+
|
|
12
|
+
**Incorrect (unlimited or excessive connections):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Default max_connections = 100, but often increased blindly
|
|
16
|
+
show max_connections; -- 500 (way too high for 4GB RAM)
|
|
17
|
+
|
|
18
|
+
-- Each connection uses 1-3MB RAM
|
|
19
|
+
-- 500 connections * 2MB = 1GB just for connections!
|
|
20
|
+
-- Out of memory errors under load
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
**Correct (calculate based on resources):**
|
|
24
|
+
|
|
25
|
+
```sql
|
|
26
|
+
-- Formula: max_connections = (RAM in MB / 5MB per connection) - reserved
|
|
27
|
+
-- For 4GB RAM: (4096 / 5) - 10 = ~800 theoretical max
|
|
28
|
+
-- But practically, 100-200 is better for query performance
|
|
29
|
+
|
|
30
|
+
-- Recommended settings for 4GB RAM
|
|
31
|
+
alter system set max_connections = 100;
|
|
32
|
+
|
|
33
|
+
-- Also set work_mem appropriately
|
|
34
|
+
-- work_mem * max_connections should not exceed 25% of RAM
|
|
35
|
+
alter system set work_mem = '8MB'; -- 8MB * 100 = 800MB max
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
Monitor connection usage:
|
|
39
|
+
|
|
40
|
+
```sql
|
|
41
|
+
select count(*), state from pg_stat_activity group by state;
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
Reference: [Database Connections](https://supabase.com/docs/guides/platform/performance#connection-management)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Use Connection Pooling for All Applications
|
|
3
|
+
impact: CRITICAL
|
|
4
|
+
impactDescription: Handle 10-100x more concurrent users
|
|
5
|
+
tags: connection-pooling, pgbouncer, performance, scalability
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Use Connection Pooling for All Applications
|
|
9
|
+
|
|
10
|
+
Postgres connections are expensive (1-3MB RAM each). Without pooling, applications exhaust connections under load.
|
|
11
|
+
|
|
12
|
+
**Incorrect (new connection per request):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Each request creates a new connection
|
|
16
|
+
-- Application code: db.connect() per request
|
|
17
|
+
-- Result: 500 concurrent users = 500 connections = crashed database
|
|
18
|
+
|
|
19
|
+
-- Check current connections
|
|
20
|
+
select count(*) from pg_stat_activity; -- 487 connections!
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
**Correct (connection pooling):**
|
|
24
|
+
|
|
25
|
+
```sql
|
|
26
|
+
-- Use a pooler like PgBouncer between app and database
|
|
27
|
+
-- Application connects to pooler, pooler reuses a small pool to Postgres
|
|
28
|
+
|
|
29
|
+
-- Configure pool_size based on: (CPU cores * 2) + spindle_count
|
|
30
|
+
-- Example for 4 cores: pool_size = 10
|
|
31
|
+
|
|
32
|
+
-- Result: 500 concurrent users share 10 actual connections
|
|
33
|
+
select count(*) from pg_stat_activity; -- 10 connections
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
Pool modes:
|
|
37
|
+
|
|
38
|
+
- **Transaction mode**: connection returned after each transaction (best for most apps)
|
|
39
|
+
- **Session mode**: connection held for entire session (needed for prepared statements, temp tables)
|
|
40
|
+
|
|
41
|
+
Reference: [Connection Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pooler)
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Use Prepared Statements Correctly with Pooling
|
|
3
|
+
impact: HIGH
|
|
4
|
+
impactDescription: Avoid prepared statement conflicts in pooled environments
|
|
5
|
+
tags: prepared-statements, connection-pooling, transaction-mode
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Use Prepared Statements Correctly with Pooling
|
|
9
|
+
|
|
10
|
+
Prepared statements are tied to individual database connections. In transaction-mode pooling, connections are shared, causing conflicts.
|
|
11
|
+
|
|
12
|
+
**Incorrect (named prepared statements with transaction pooling):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Named prepared statement
|
|
16
|
+
prepare get_user as select * from users where id = $1;
|
|
17
|
+
|
|
18
|
+
-- In transaction mode pooling, next request may get different connection
|
|
19
|
+
execute get_user(123);
|
|
20
|
+
-- ERROR: prepared statement "get_user" does not exist
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
**Correct (use unnamed statements or session mode):**
|
|
24
|
+
|
|
25
|
+
```sql
|
|
26
|
+
-- Option 1: Use unnamed prepared statements (most ORMs do this automatically)
|
|
27
|
+
-- The query is prepared and executed in a single protocol message
|
|
28
|
+
|
|
29
|
+
-- Option 2: Deallocate after use in transaction mode
|
|
30
|
+
prepare get_user as select * from users where id = $1;
|
|
31
|
+
execute get_user(123);
|
|
32
|
+
deallocate get_user;
|
|
33
|
+
|
|
34
|
+
-- Option 3: Use session mode pooling (port 5432 vs 6543)
|
|
35
|
+
-- Connection is held for entire session, prepared statements persist
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
Check your driver settings:
|
|
39
|
+
|
|
40
|
+
```sql
|
|
41
|
+
-- Many drivers use prepared statements by default
|
|
42
|
+
-- Node.js pg: { prepare: false } to disable
|
|
43
|
+
-- JDBC: prepareThreshold=0 to disable
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Reference: [Prepared Statements with Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pool-modes)
|
package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/data-batch-inserts.md
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Batch INSERT Statements for Bulk Data
|
|
3
|
+
impact: MEDIUM
|
|
4
|
+
impactDescription: 10-50x faster bulk inserts
|
|
5
|
+
tags: batch, insert, bulk, performance, copy
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Batch INSERT Statements for Bulk Data
|
|
9
|
+
|
|
10
|
+
Individual INSERT statements have high overhead. Batch multiple rows in single statements or use COPY.
|
|
11
|
+
|
|
12
|
+
**Incorrect (individual inserts):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Each insert is a separate transaction and round trip
|
|
16
|
+
insert into events (user_id, action) values (1, 'click');
|
|
17
|
+
insert into events (user_id, action) values (1, 'view');
|
|
18
|
+
insert into events (user_id, action) values (2, 'click');
|
|
19
|
+
-- ... 1000 more individual inserts
|
|
20
|
+
|
|
21
|
+
-- 1000 inserts = 1000 round trips = slow
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
**Correct (batch insert):**
|
|
25
|
+
|
|
26
|
+
```sql
|
|
27
|
+
-- Multiple rows in single statement
|
|
28
|
+
insert into events (user_id, action) values
|
|
29
|
+
(1, 'click'),
|
|
30
|
+
(1, 'view'),
|
|
31
|
+
(2, 'click'),
|
|
32
|
+
-- ... up to ~1000 rows per batch
|
|
33
|
+
(999, 'view');
|
|
34
|
+
|
|
35
|
+
-- One round trip for 1000 rows
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
For large imports, use COPY:
|
|
39
|
+
|
|
40
|
+
```sql
|
|
41
|
+
-- COPY is fastest for bulk loading
|
|
42
|
+
copy events (user_id, action, created_at)
|
|
43
|
+
from '/path/to/data.csv'
|
|
44
|
+
with (format csv, header true);
|
|
45
|
+
|
|
46
|
+
-- Or from stdin in application
|
|
47
|
+
copy events (user_id, action) from stdin with (format csv);
|
|
48
|
+
1,click
|
|
49
|
+
1,view
|
|
50
|
+
2,click
|
|
51
|
+
\.
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
Reference: [COPY](https://www.postgresql.org/docs/current/sql-copy.html)
|
package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/data-n-plus-one.md
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Eliminate N+1 Queries with Batch Loading
|
|
3
|
+
impact: MEDIUM-HIGH
|
|
4
|
+
impactDescription: 10-100x fewer database round trips
|
|
5
|
+
tags: n-plus-one, batch, performance, queries
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Eliminate N+1 Queries with Batch Loading
|
|
9
|
+
|
|
10
|
+
N+1 queries execute one query per item in a loop. Batch them into a single query using arrays or JOINs.
|
|
11
|
+
|
|
12
|
+
**Incorrect (N+1 queries):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- First query: get all users
|
|
16
|
+
select id from users where active = true; -- Returns 100 IDs
|
|
17
|
+
|
|
18
|
+
-- Then N queries, one per user
|
|
19
|
+
select * from orders where user_id = 1;
|
|
20
|
+
select * from orders where user_id = 2;
|
|
21
|
+
select * from orders where user_id = 3;
|
|
22
|
+
-- ... 97 more queries!
|
|
23
|
+
|
|
24
|
+
-- Total: 101 round trips to database
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
**Correct (single batch query):**
|
|
28
|
+
|
|
29
|
+
```sql
|
|
30
|
+
-- Collect IDs and query once with ANY
|
|
31
|
+
select * from orders where user_id = any(array[1, 2, 3, ...]);
|
|
32
|
+
|
|
33
|
+
-- Or use JOIN instead of loop
|
|
34
|
+
select u.id, u.name, o.*
|
|
35
|
+
from users u
|
|
36
|
+
left join orders o on o.user_id = u.id
|
|
37
|
+
where u.active = true;
|
|
38
|
+
|
|
39
|
+
-- Total: 1 round trip
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Application pattern:
|
|
43
|
+
|
|
44
|
+
```sql
|
|
45
|
+
-- Instead of looping in application code:
|
|
46
|
+
-- for user in users: db.query("SELECT * FROM orders WHERE user_id = $1", user.id)
|
|
47
|
+
|
|
48
|
+
-- Pass array parameter:
|
|
49
|
+
select * from orders where user_id = any($1::bigint[]);
|
|
50
|
+
-- Application passes: [1, 2, 3, 4, 5, ...]
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
Reference: [N+1 Query Problem](https://supabase.com/docs/guides/database/query-optimization)
|
package/dist/template/.opencode/skill/supabase-postgres-best-practices/rules/data-pagination.md
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Use Cursor-Based Pagination Instead of OFFSET
|
|
3
|
+
impact: MEDIUM-HIGH
|
|
4
|
+
impactDescription: Consistent O(1) performance regardless of page depth
|
|
5
|
+
tags: pagination, cursor, keyset, offset, performance
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Use Cursor-Based Pagination Instead of OFFSET
|
|
9
|
+
|
|
10
|
+
OFFSET-based pagination scans all skipped rows, getting slower on deeper pages. Cursor pagination is O(1).
|
|
11
|
+
|
|
12
|
+
**Incorrect (OFFSET pagination):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Page 1: scans 20 rows
|
|
16
|
+
select * from products order by id limit 20 offset 0;
|
|
17
|
+
|
|
18
|
+
-- Page 100: scans 2000 rows to skip 1980
|
|
19
|
+
select * from products order by id limit 20 offset 1980;
|
|
20
|
+
|
|
21
|
+
-- Page 10000: scans 200,000 rows!
|
|
22
|
+
select * from products order by id limit 20 offset 199980;
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
**Correct (cursor/keyset pagination):**
|
|
26
|
+
|
|
27
|
+
```sql
|
|
28
|
+
-- Page 1: get first 20
|
|
29
|
+
select * from products order by id limit 20;
|
|
30
|
+
-- Application stores last_id = 20
|
|
31
|
+
|
|
32
|
+
-- Page 2: start after last ID
|
|
33
|
+
select * from products where id > 20 order by id limit 20;
|
|
34
|
+
-- Uses index, always fast regardless of page depth
|
|
35
|
+
|
|
36
|
+
-- Page 10000: same speed as page 1
|
|
37
|
+
select * from products where id > 199980 order by id limit 20;
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
For multi-column sorting:
|
|
41
|
+
|
|
42
|
+
```sql
|
|
43
|
+
-- Cursor must include all sort columns
|
|
44
|
+
select * from products
|
|
45
|
+
where (created_at, id) > ('2024-01-15 10:00:00', 12345)
|
|
46
|
+
order by created_at, id
|
|
47
|
+
limit 20;
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
Reference: [Pagination](https://supabase.com/docs/guides/database/pagination)
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Use UPSERT for Insert-or-Update Operations
|
|
3
|
+
impact: MEDIUM
|
|
4
|
+
impactDescription: Atomic operation, eliminates race conditions
|
|
5
|
+
tags: upsert, on-conflict, insert, update
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Use UPSERT for Insert-or-Update Operations
|
|
9
|
+
|
|
10
|
+
Using separate SELECT-then-INSERT/UPDATE creates race conditions. Use INSERT ... ON CONFLICT for atomic upserts.
|
|
11
|
+
|
|
12
|
+
**Incorrect (check-then-insert race condition):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Race condition: two requests check simultaneously
|
|
16
|
+
select * from settings where user_id = 123 and key = 'theme';
|
|
17
|
+
-- Both find nothing
|
|
18
|
+
|
|
19
|
+
-- Both try to insert
|
|
20
|
+
insert into settings (user_id, key, value) values (123, 'theme', 'dark');
|
|
21
|
+
-- One succeeds, one fails with duplicate key error!
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
**Correct (atomic UPSERT):**
|
|
25
|
+
|
|
26
|
+
```sql
|
|
27
|
+
-- Single atomic operation
|
|
28
|
+
insert into settings (user_id, key, value)
|
|
29
|
+
values (123, 'theme', 'dark')
|
|
30
|
+
on conflict (user_id, key)
|
|
31
|
+
do update set value = excluded.value, updated_at = now();
|
|
32
|
+
|
|
33
|
+
-- Returns the inserted/updated row
|
|
34
|
+
insert into settings (user_id, key, value)
|
|
35
|
+
values (123, 'theme', 'dark')
|
|
36
|
+
on conflict (user_id, key)
|
|
37
|
+
do update set value = excluded.value
|
|
38
|
+
returning *;
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
Insert-or-ignore pattern:
|
|
42
|
+
|
|
43
|
+
```sql
|
|
44
|
+
-- Insert only if not exists (no update)
|
|
45
|
+
insert into page_views (page_id, user_id)
|
|
46
|
+
values (1, 123)
|
|
47
|
+
on conflict (page_id, user_id) do nothing;
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
Reference: [INSERT ON CONFLICT](https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT)
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Use Advisory Locks for Application-Level Locking
|
|
3
|
+
impact: MEDIUM
|
|
4
|
+
impactDescription: Efficient coordination without row-level lock overhead
|
|
5
|
+
tags: advisory-locks, coordination, application-locks
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Use Advisory Locks for Application-Level Locking
|
|
9
|
+
|
|
10
|
+
Advisory locks provide application-level coordination without requiring database rows to lock.
|
|
11
|
+
|
|
12
|
+
**Incorrect (creating rows just for locking):**
|
|
13
|
+
|
|
14
|
+
```sql
|
|
15
|
+
-- Creating dummy rows to lock on
|
|
16
|
+
create table resource_locks (
|
|
17
|
+
resource_name text primary key
|
|
18
|
+
);
|
|
19
|
+
|
|
20
|
+
insert into resource_locks values ('report_generator');
|
|
21
|
+
|
|
22
|
+
-- Lock by selecting the row
|
|
23
|
+
select * from resource_locks where resource_name = 'report_generator' for update;
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
**Correct (advisory locks):**
|
|
27
|
+
|
|
28
|
+
```sql
|
|
29
|
+
-- Session-level advisory lock (released on disconnect or unlock)
|
|
30
|
+
select pg_advisory_lock(hashtext('report_generator'));
|
|
31
|
+
-- ... do exclusive work ...
|
|
32
|
+
select pg_advisory_unlock(hashtext('report_generator'));
|
|
33
|
+
|
|
34
|
+
-- Transaction-level lock (released on commit/rollback)
|
|
35
|
+
begin;
|
|
36
|
+
select pg_advisory_xact_lock(hashtext('daily_report'));
|
|
37
|
+
-- ... do work ...
|
|
38
|
+
commit; -- Lock automatically released
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
Try-lock for non-blocking operations:
|
|
42
|
+
|
|
43
|
+
```sql
|
|
44
|
+
-- Returns immediately with true/false instead of waiting
|
|
45
|
+
select pg_try_advisory_lock(hashtext('resource_name'));
|
|
46
|
+
|
|
47
|
+
-- Use in application
|
|
48
|
+
if (acquired) {
|
|
49
|
+
-- Do work
|
|
50
|
+
select pg_advisory_unlock(hashtext('resource_name'));
|
|
51
|
+
} else {
|
|
52
|
+
-- Skip or retry later
|
|
53
|
+
}
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
Reference: [Advisory Locks](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS)
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Prevent Deadlocks with Consistent Lock Ordering
|
|
3
|
+
impact: MEDIUM-HIGH
|
|
4
|
+
impactDescription: Eliminate deadlock errors, improve reliability
|
|
5
|
+
tags: deadlocks, locking, transactions, ordering
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Prevent Deadlocks with Consistent Lock Ordering
|
|
9
|
+
|
|
10
|
+
Deadlocks occur when transactions lock resources in different orders. Always
|
|
11
|
+
acquire locks in a consistent order.
|
|
12
|
+
|
|
13
|
+
**Incorrect (inconsistent lock ordering):**
|
|
14
|
+
|
|
15
|
+
```sql
|
|
16
|
+
-- Transaction A -- Transaction B
|
|
17
|
+
begin; begin;
|
|
18
|
+
update accounts update accounts
|
|
19
|
+
set balance = balance - 100 set balance = balance - 50
|
|
20
|
+
where id = 1; where id = 2; -- B locks row 2
|
|
21
|
+
|
|
22
|
+
update accounts update accounts
|
|
23
|
+
set balance = balance + 100 set balance = balance + 50
|
|
24
|
+
where id = 2; -- A waits for B where id = 1; -- B waits for A
|
|
25
|
+
|
|
26
|
+
-- DEADLOCK! Both waiting for each other
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
**Correct (lock rows in consistent order first):**
|
|
30
|
+
|
|
31
|
+
```sql
|
|
32
|
+
-- Explicitly acquire locks in ID order before updating
|
|
33
|
+
begin;
|
|
34
|
+
select * from accounts where id in (1, 2) order by id for update;
|
|
35
|
+
|
|
36
|
+
-- Now perform updates in any order - locks already held
|
|
37
|
+
update accounts set balance = balance - 100 where id = 1;
|
|
38
|
+
update accounts set balance = balance + 100 where id = 2;
|
|
39
|
+
commit;
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Alternative: use a single statement to update atomically:
|
|
43
|
+
|
|
44
|
+
```sql
|
|
45
|
+
-- Single statement acquires all locks atomically
|
|
46
|
+
begin;
|
|
47
|
+
update accounts
|
|
48
|
+
set balance = balance + case id
|
|
49
|
+
when 1 then -100
|
|
50
|
+
when 2 then 100
|
|
51
|
+
end
|
|
52
|
+
where id in (1, 2);
|
|
53
|
+
commit;
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
Detect deadlocks in logs:
|
|
57
|
+
|
|
58
|
+
```sql
|
|
59
|
+
-- Check for recent deadlocks
|
|
60
|
+
select * from pg_stat_database where deadlocks > 0;
|
|
61
|
+
|
|
62
|
+
-- Enable deadlock logging
|
|
63
|
+
set log_lock_waits = on;
|
|
64
|
+
set deadlock_timeout = '1s';
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
Reference:
|
|
68
|
+
[Deadlocks](https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-DEADLOCKS)
|