opencastle 0.33.9 → 0.34.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/init.d.ts.map +1 -1
- package/dist/cli/init.js +39 -17
- package/dist/cli/init.js.map +1 -1
- package/dist/cli/stack-config.d.ts.map +1 -1
- package/dist/cli/stack-config.js +5 -0
- package/dist/cli/stack-config.js.map +1 -1
- package/dist/cli/types.d.ts +1 -1
- package/dist/cli/types.d.ts.map +1 -1
- package/dist/orchestrator/plugins/cloudflare/config.d.ts +3 -0
- package/dist/orchestrator/plugins/cloudflare/config.d.ts.map +1 -0
- package/dist/orchestrator/plugins/cloudflare/config.js +23 -0
- package/dist/orchestrator/plugins/cloudflare/config.js.map +1 -0
- package/dist/orchestrator/plugins/coolify/config.d.ts +3 -0
- package/dist/orchestrator/plugins/coolify/config.d.ts.map +1 -0
- package/dist/orchestrator/plugins/coolify/config.js +28 -0
- package/dist/orchestrator/plugins/coolify/config.js.map +1 -0
- package/dist/orchestrator/plugins/drizzle/config.d.ts +3 -0
- package/dist/orchestrator/plugins/drizzle/config.d.ts.map +1 -0
- package/dist/orchestrator/plugins/drizzle/config.js +15 -0
- package/dist/orchestrator/plugins/drizzle/config.js.map +1 -0
- package/dist/orchestrator/plugins/expo/config.d.ts +3 -0
- package/dist/orchestrator/plugins/expo/config.d.ts.map +1 -0
- package/dist/orchestrator/plugins/expo/config.js +23 -0
- package/dist/orchestrator/plugins/expo/config.js.map +1 -0
- package/dist/orchestrator/plugins/index.d.ts.map +1 -1
- package/dist/orchestrator/plugins/index.js +12 -0
- package/dist/orchestrator/plugins/index.js.map +1 -1
- package/dist/orchestrator/plugins/sentry/config.d.ts +3 -0
- package/dist/orchestrator/plugins/sentry/config.d.ts.map +1 -0
- package/dist/orchestrator/plugins/sentry/config.js +28 -0
- package/dist/orchestrator/plugins/sentry/config.js.map +1 -0
- package/dist/orchestrator/plugins/stripe/config.d.ts +3 -0
- package/dist/orchestrator/plugins/stripe/config.d.ts.map +1 -0
- package/dist/orchestrator/plugins/stripe/config.js +42 -0
- package/dist/orchestrator/plugins/stripe/config.js.map +1 -0
- package/dist/orchestrator/plugins/types.d.ts +1 -1
- package/dist/orchestrator/plugins/types.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/cli/init.ts +43 -22
- package/src/cli/stack-config.ts +5 -0
- package/src/cli/types.ts +1 -1
- package/src/dashboard/dist/data/convoys/demo-api-v2.json +3 -3
- package/src/dashboard/dist/data/convoys/demo-auth-revamp.json +4 -4
- package/src/dashboard/dist/data/convoys/demo-dashboard-ui.json +12 -12
- package/src/dashboard/dist/data/convoys/demo-data-pipeline.json +3 -3
- package/src/dashboard/dist/data/convoys/demo-deploy-ci.json +1 -1
- package/src/dashboard/dist/data/convoys/demo-docs-update.json +3 -3
- package/src/dashboard/dist/data/convoys/demo-perf-opt.json +4 -4
- package/src/dashboard/node_modules/.vite/deps/_metadata.json +6 -6
- package/src/dashboard/public/data/convoys/demo-api-v2.json +3 -3
- package/src/dashboard/public/data/convoys/demo-auth-revamp.json +4 -4
- package/src/dashboard/public/data/convoys/demo-dashboard-ui.json +12 -12
- package/src/dashboard/public/data/convoys/demo-data-pipeline.json +3 -3
- package/src/dashboard/public/data/convoys/demo-deploy-ci.json +1 -1
- package/src/dashboard/public/data/convoys/demo-docs-update.json +3 -3
- package/src/dashboard/public/data/convoys/demo-perf-opt.json +4 -4
- package/src/orchestrator/customizations/agents/skill-matrix.json +24 -4
- package/src/orchestrator/customizations/agents/skill-matrix.md +5 -0
- package/src/orchestrator/plugins/cloudflare/SKILL.md +111 -0
- package/src/orchestrator/plugins/cloudflare/config.ts +24 -0
- package/src/orchestrator/plugins/cloudflare/references/deployment.md +147 -0
- package/src/orchestrator/plugins/cloudflare/references/storage.md +118 -0
- package/src/orchestrator/plugins/cloudflare/references/workers.md +135 -0
- package/src/orchestrator/plugins/convex/SKILL.md +62 -20
- package/src/orchestrator/plugins/convex/references/auth-auth0.md +116 -0
- package/src/orchestrator/plugins/convex/references/auth-clerk.md +113 -0
- package/src/orchestrator/plugins/convex/references/auth-convex-auth.md +143 -0
- package/src/orchestrator/plugins/convex/references/auth-setup.md +87 -0
- package/src/orchestrator/plugins/convex/references/auth-workos.md +114 -0
- package/src/orchestrator/plugins/convex/references/components-advanced.md +134 -0
- package/src/orchestrator/plugins/convex/references/components.md +171 -0
- package/src/orchestrator/plugins/convex/references/function-budget.md +232 -0
- package/src/orchestrator/plugins/convex/references/hot-path-rules.md +371 -0
- package/src/orchestrator/plugins/convex/references/migrations-component.md +170 -0
- package/src/orchestrator/plugins/convex/references/migrations.md +259 -0
- package/src/orchestrator/plugins/convex/references/occ-conflicts.md +126 -0
- package/src/orchestrator/plugins/convex/references/performance-audit.md +80 -0
- package/src/orchestrator/plugins/convex/references/quickstart.md +176 -0
- package/src/orchestrator/plugins/convex/references/subscription-cost.md +252 -0
- package/src/orchestrator/plugins/coolify/SKILL.md +134 -0
- package/src/orchestrator/plugins/coolify/config.ts +29 -0
- package/src/orchestrator/plugins/coolify/references/applications.md +65 -0
- package/src/orchestrator/plugins/coolify/references/ci-cd-webhooks.md +73 -0
- package/src/orchestrator/plugins/coolify/references/databases-services.md +57 -0
- package/src/orchestrator/plugins/coolify/references/docker-compose.md +121 -0
- package/src/orchestrator/plugins/coolify/references/infrastructure.md +77 -0
- package/src/orchestrator/plugins/drizzle/SKILL.md +123 -0
- package/src/orchestrator/plugins/drizzle/config.ts +16 -0
- package/src/orchestrator/plugins/drizzle/references/migrations.md +112 -0
- package/src/orchestrator/plugins/drizzle/references/query-patterns.md +127 -0
- package/src/orchestrator/plugins/drizzle/references/schema-patterns.md +105 -0
- package/src/orchestrator/plugins/expo/SKILL.md +114 -0
- package/src/orchestrator/plugins/expo/config.ts +24 -0
- package/src/orchestrator/plugins/expo/references/eas-build.md +73 -0
- package/src/orchestrator/plugins/expo/references/native-modules.md +71 -0
- package/src/orchestrator/plugins/expo/references/routing.md +83 -0
- package/src/orchestrator/plugins/index.ts +12 -0
- package/src/orchestrator/plugins/linear/SKILL.md +21 -3
- package/src/orchestrator/plugins/sentry/SKILL.md +94 -0
- package/src/orchestrator/plugins/sentry/config.ts +29 -0
- package/src/orchestrator/plugins/sentry/references/error-patterns.md +112 -0
- package/src/orchestrator/plugins/sentry/references/performance.md +66 -0
- package/src/orchestrator/plugins/sentry/references/sdk-setup.md +108 -0
- package/src/orchestrator/plugins/stripe/SKILL.md +138 -0
- package/src/orchestrator/plugins/stripe/config.ts +43 -0
- package/src/orchestrator/plugins/stripe/references/api-patterns.md +57 -0
- package/src/orchestrator/plugins/stripe/references/projects-setup.md +30 -0
- package/src/orchestrator/plugins/stripe/references/upgrade-guide.md +105 -0
- package/src/orchestrator/plugins/types.ts +1 -1
- package/src/orchestrator/skills/backbone-scaffolding/EXAMPLES.md +1 -1
- package/src/orchestrator/skills/backbone-scaffolding/SKILL.md +32 -16
- package/src/orchestrator/plugins/convex/REFERENCE.md +0 -9
|
@@ -0,0 +1,371 @@
|
|
|
1
|
+
# Hot Path Rules
|
|
2
|
+
|
|
3
|
+
Use these rules when the top-level workflow points to read amplification, denormalization, index rollout, reactive query cost, or invalidation-heavy writes.
|
|
4
|
+
|
|
5
|
+
## Contents
|
|
6
|
+
|
|
7
|
+
- Core Principle
|
|
8
|
+
- Consistency Rule
|
|
9
|
+
- 1. Push Filters To Storage (indexes, migration rule, redundant indexes)
|
|
10
|
+
- 2. Minimize Data Sources (denormalization, fallback rule)
|
|
11
|
+
- 3. Minimize Row Size (digest tables)
|
|
12
|
+
- 4. Skip No-Op Writes
|
|
13
|
+
- 5. Match Consistency To Read Patterns (high-read/low-write, high-read/high-write)
|
|
14
|
+
- Convex-Specific Notes (reactive queries, point-in-time reads, triggers, aggregates, backfills)
|
|
15
|
+
- Verification
|
|
16
|
+
|
|
17
|
+
## Core Principle
|
|
18
|
+
|
|
19
|
+
Every byte read or written multiplies with concurrency.
|
|
20
|
+
|
|
21
|
+
Think:
|
|
22
|
+
|
|
23
|
+
`cost x calls_per_second x 86400`
|
|
24
|
+
|
|
25
|
+
In Convex, every write can also fan out into reactive invalidation, replication work, and downstream sync.
|
|
26
|
+
|
|
27
|
+
## Consistency Rule
|
|
28
|
+
|
|
29
|
+
If you fix a hot-path pattern for one function, audit sibling functions touching the same tables for the same pattern.
|
|
30
|
+
|
|
31
|
+
Do this especially for:
|
|
32
|
+
|
|
33
|
+
- multiple list queries over the same table
|
|
34
|
+
- multiple writers to the same table
|
|
35
|
+
- public browse and search queries over the same records
|
|
36
|
+
- helper functions reused by more than one endpoint
|
|
37
|
+
|
|
38
|
+
## 1. Push Filters To Storage
|
|
39
|
+
|
|
40
|
+
Both JavaScript `.filter()` and the Convex query `.filter()` method after a DB scan mean you already paid for the read. The Convex `.filter()` method has the same performance as filtering in JS, it does not push the predicate to the storage layer. Only `.withIndex()` and `.withSearchIndex()` actually reduce the documents scanned.
|
|
41
|
+
|
|
42
|
+
Prefer:
|
|
43
|
+
|
|
44
|
+
- `withIndex(...)`
|
|
45
|
+
- `.withSearchIndex(...)` for text search
|
|
46
|
+
- narrower tables
|
|
47
|
+
- summary tables
|
|
48
|
+
|
|
49
|
+
before accepting a scan-plus-filter pattern.
|
|
50
|
+
|
|
51
|
+
```ts
|
|
52
|
+
// Bad: scans then filters in JavaScript
|
|
53
|
+
export const listOpen = query({
|
|
54
|
+
args: {},
|
|
55
|
+
handler: async (ctx) => {
|
|
56
|
+
const tasks = await ctx.db.query("tasks").collect();
|
|
57
|
+
return tasks.filter((task) => task.status === "open");
|
|
58
|
+
},
|
|
59
|
+
});
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
```ts
|
|
63
|
+
// Also bad: Convex .filter() does not push to storage either
|
|
64
|
+
export const listOpen = query({
|
|
65
|
+
args: {},
|
|
66
|
+
handler: async (ctx) => {
|
|
67
|
+
return await ctx.db
|
|
68
|
+
.query("tasks")
|
|
69
|
+
.filter((q) => q.eq(q.field("status"), "open"))
|
|
70
|
+
.collect();
|
|
71
|
+
},
|
|
72
|
+
});
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
```ts
|
|
76
|
+
// Good: use an index so storage does the filtering
|
|
77
|
+
export const listOpen = query({
|
|
78
|
+
args: {},
|
|
79
|
+
handler: async (ctx) => {
|
|
80
|
+
return await ctx.db
|
|
81
|
+
.query("tasks")
|
|
82
|
+
.withIndex("by_status", (q) => q.eq("status", "open"))
|
|
83
|
+
.collect();
|
|
84
|
+
},
|
|
85
|
+
});
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
### Migration rule for indexes
|
|
89
|
+
|
|
90
|
+
New indexes on partially backfilled fields can create correctness bugs during rollout.
|
|
91
|
+
|
|
92
|
+
Important Convex detail:
|
|
93
|
+
|
|
94
|
+
`undefined !== false`
|
|
95
|
+
|
|
96
|
+
If an older document is missing a field entirely, it will not match a compound index entry that expects `false`.
|
|
97
|
+
|
|
98
|
+
Do not trust old comments saying a field is "not backfilled" or "already backfilled". Verify.
|
|
99
|
+
|
|
100
|
+
If correctness depends on handling old and new states during rollout, do not improvise a partial-backfill workaround in the hot path. Use a migration-safe rollout and consult `skills/convex-migration-helper/SKILL.md`.
|
|
101
|
+
|
|
102
|
+
```ts
|
|
103
|
+
// Bad: optional booleans can miss older rows where the field is undefined
|
|
104
|
+
const projects = await ctx.db
|
|
105
|
+
.query("projects")
|
|
106
|
+
.withIndex("by_archived_and_updated", (q) => q.eq("isArchived", false))
|
|
107
|
+
.order("desc")
|
|
108
|
+
.take(20);
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
```ts
|
|
112
|
+
// Good: switch hot-path reads only after the rollout is migration-safe
|
|
113
|
+
// See the migration helper skill for dual-read / backfill / cutover patterns.
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
### Check for redundant indexes
|
|
117
|
+
|
|
118
|
+
Indexes like `by_foo` and `by_foo_and_bar` are usually redundant. You only need `by_foo_and_bar`, since you can query it with just the `foo` condition and omit `bar`. Extra indexes add storage cost and write overhead on every insert, patch, and delete.
|
|
119
|
+
|
|
120
|
+
```ts
|
|
121
|
+
// Bad: two indexes where one would do
|
|
122
|
+
defineTable({ team: v.id("teams"), user: v.id("users") })
|
|
123
|
+
.index("by_team", ["team"])
|
|
124
|
+
.index("by_team_and_user", ["team", "user"])
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
```ts
|
|
128
|
+
// Good: single compound index serves both query patterns
|
|
129
|
+
defineTable({ team: v.id("teams"), user: v.id("users") })
|
|
130
|
+
.index("by_team_and_user", ["team", "user"])
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
Exception: `.index("by_foo", ["foo"])` is really an index on `foo` + `_creationTime`, while `.index("by_foo_and_bar", ["foo", "bar"])` is on `foo` + `bar` + `_creationTime`. If you need results sorted by `foo` then `_creationTime`, you need the single-field index because the compound one would sort by `bar` first.
|
|
134
|
+
|
|
135
|
+
## 2. Minimize Data Sources
|
|
136
|
+
|
|
137
|
+
Trace every read.
|
|
138
|
+
|
|
139
|
+
If a function resolves a foreign key for a tiny display field and a denormalized copy already exists, prefer the denormalized field on the hot path.
|
|
140
|
+
|
|
141
|
+
### When to denormalize
|
|
142
|
+
|
|
143
|
+
Denormalize when all of these are true:
|
|
144
|
+
|
|
145
|
+
- the path is hot
|
|
146
|
+
- the joined document is much larger than the field you need
|
|
147
|
+
- many readers are paying that join cost repeatedly
|
|
148
|
+
|
|
149
|
+
Useful mental model:
|
|
150
|
+
|
|
151
|
+
`join_cost = rows_per_page x foreign_doc_size x pages_per_second`
|
|
152
|
+
|
|
153
|
+
Small-table joins are often fine. Large-document joins for tiny fields on hot list pages are usually not.
|
|
154
|
+
|
|
155
|
+
### Fallback rule
|
|
156
|
+
|
|
157
|
+
Denormalized data is an optimization. Live data is the correctness path.
|
|
158
|
+
|
|
159
|
+
Rules:
|
|
160
|
+
|
|
161
|
+
- If the denormalized field is missing or null, fall back to the live read
|
|
162
|
+
- Do not show placeholders instead of falling back
|
|
163
|
+
- In lookup maps, only include fully populated entries
|
|
164
|
+
|
|
165
|
+
```ts
|
|
166
|
+
// Bad: missing denormalized data becomes a placeholder and blocks correctness
|
|
167
|
+
const ownerName = project.ownerName ?? "Unknown owner";
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
```ts
|
|
171
|
+
// Good: denormalized data is an optimization, not the only source of truth
|
|
172
|
+
const ownerName =
|
|
173
|
+
project.ownerName ??
|
|
174
|
+
(await ctx.db.get(project.ownerId))?.name ??
|
|
175
|
+
null;
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
Bad lookup map pattern:
|
|
179
|
+
|
|
180
|
+
```ts
|
|
181
|
+
const ownersById = {
|
|
182
|
+
[project.ownerId]: { ownerName: null },
|
|
183
|
+
};
|
|
184
|
+
```
|
|
185
|
+
|
|
186
|
+
That blocks fallback because the map says "I have data" when it does not.
|
|
187
|
+
|
|
188
|
+
Good lookup map pattern:
|
|
189
|
+
|
|
190
|
+
```ts
|
|
191
|
+
const ownersById =
|
|
192
|
+
project.ownerName !== undefined && project.ownerName !== null
|
|
193
|
+
? { [project.ownerId]: { ownerName: project.ownerName } }
|
|
194
|
+
: {};
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
### No denormalized copy yet
|
|
198
|
+
|
|
199
|
+
Prefer adding fields to an existing summary, companion, or digest table instead of bloating the primary hot-path table.
|
|
200
|
+
|
|
201
|
+
If introducing the new field or table requires a staged rollout, backfill, or old/new-shape handling, use the migration helper skill for the rollout plan.
|
|
202
|
+
|
|
203
|
+
Rollout order:
|
|
204
|
+
|
|
205
|
+
1. Update schema
|
|
206
|
+
2. Update write path
|
|
207
|
+
3. Backfill
|
|
208
|
+
4. Switch read path
|
|
209
|
+
|
|
210
|
+
## 3. Minimize Row Size
|
|
211
|
+
|
|
212
|
+
Hot list pages should read the smallest document shape that still answers the UI.
|
|
213
|
+
|
|
214
|
+
Prefer summary or digest tables over full source tables when:
|
|
215
|
+
|
|
216
|
+
- the list page only needs a subset of fields
|
|
217
|
+
- source documents are large
|
|
218
|
+
- the query is high volume
|
|
219
|
+
|
|
220
|
+
An 800 byte summary row is materially cheaper than a 3 KB full document on a hot page.
|
|
221
|
+
|
|
222
|
+
Digest tables are a tradeoff, not a default:
|
|
223
|
+
|
|
224
|
+
- Worth it when the path is clearly hot, the source rows are much larger than the UI needs, or many readers are repeatedly paying the same join and payload cost
|
|
225
|
+
- Probably not worth it when an indexed read on the source table is already cheap enough, the table is still small, or the extra write and migration complexity would dominate the benefit
|
|
226
|
+
|
|
227
|
+
```ts
|
|
228
|
+
// Bad: list page reads source docs, then joins owner data per row
|
|
229
|
+
const projects = await ctx.db
|
|
230
|
+
.query("projects")
|
|
231
|
+
.withIndex("by_public", (q) => q.eq("isPublic", true))
|
|
232
|
+
.collect();
|
|
233
|
+
```
|
|
234
|
+
|
|
235
|
+
```ts
|
|
236
|
+
// Good: list page reads the smaller digest shape first
|
|
237
|
+
const projects = await ctx.db
|
|
238
|
+
.query("projectDigests")
|
|
239
|
+
.withIndex("by_public_and_updated", (q) => q.eq("isPublic", true))
|
|
240
|
+
.order("desc")
|
|
241
|
+
.take(20);
|
|
242
|
+
```
|
|
243
|
+
|
|
244
|
+
## 4. Skip No-Op Writes
|
|
245
|
+
|
|
246
|
+
No-op writes still cost work in Convex:
|
|
247
|
+
|
|
248
|
+
- invalidation
|
|
249
|
+
- replication
|
|
250
|
+
- trigger execution
|
|
251
|
+
- downstream sync
|
|
252
|
+
|
|
253
|
+
Before `patch` or `replace`, compare against the existing document and skip the write if nothing changed.
|
|
254
|
+
|
|
255
|
+
Apply this across sibling writers too. One careful writer does not help much if three other mutations still patch unconditionally.
|
|
256
|
+
|
|
257
|
+
```ts
|
|
258
|
+
// Bad: patching unchanged values still triggers invalidation and downstream work
|
|
259
|
+
await ctx.db.patch(settings._id, {
|
|
260
|
+
theme: args.theme,
|
|
261
|
+
locale: args.locale,
|
|
262
|
+
});
|
|
263
|
+
```
|
|
264
|
+
|
|
265
|
+
```ts
|
|
266
|
+
// Good: only write when something actually changed
|
|
267
|
+
if (settings.theme !== args.theme || settings.locale !== args.locale) {
|
|
268
|
+
await ctx.db.patch(settings._id, {
|
|
269
|
+
theme: args.theme,
|
|
270
|
+
locale: args.locale,
|
|
271
|
+
});
|
|
272
|
+
}
|
|
273
|
+
```
|
|
274
|
+
|
|
275
|
+
## 5. Match Consistency To Read Patterns
|
|
276
|
+
|
|
277
|
+
Choose read strategy based on traffic shape.
|
|
278
|
+
|
|
279
|
+
### High-read, low-write
|
|
280
|
+
|
|
281
|
+
Examples:
|
|
282
|
+
|
|
283
|
+
- public browse pages
|
|
284
|
+
- search results
|
|
285
|
+
- landing pages
|
|
286
|
+
- directory listings
|
|
287
|
+
|
|
288
|
+
Prefer:
|
|
289
|
+
|
|
290
|
+
- point-in-time reads where appropriate
|
|
291
|
+
- explicit refresh
|
|
292
|
+
- local state for pagination
|
|
293
|
+
- caching where appropriate
|
|
294
|
+
|
|
295
|
+
Do not treat subscriptions as automatically wrong here. Prefer point-in-time reads only when the product does not need live freshness and the reactive cost is material. See `subscription-cost.md` for detailed patterns.
|
|
296
|
+
|
|
297
|
+
### High-read, high-write
|
|
298
|
+
|
|
299
|
+
Examples:
|
|
300
|
+
|
|
301
|
+
- collaborative editors
|
|
302
|
+
- live dashboards
|
|
303
|
+
- presence-heavy views
|
|
304
|
+
|
|
305
|
+
Reactive queries may be worth the ongoing cost.
|
|
306
|
+
|
|
307
|
+
## Convex-Specific Notes
|
|
308
|
+
|
|
309
|
+
### Reactive queries
|
|
310
|
+
|
|
311
|
+
Every `ctx.db.get()` and `ctx.db.query()` contributes to the invalidation set for the query.
|
|
312
|
+
|
|
313
|
+
On the client:
|
|
314
|
+
|
|
315
|
+
- `useQuery` creates a live subscription
|
|
316
|
+
- `usePaginatedQuery` creates a live subscription per page
|
|
317
|
+
|
|
318
|
+
For low-freshness flows, consider a point-in-time read instead of a live subscription only when the product does not need updates pushed automatically.
|
|
319
|
+
|
|
320
|
+
### Point-in-time reads
|
|
321
|
+
|
|
322
|
+
Framework helpers, server-rendered fetches, or one-shot client reads can avoid ongoing subscription cost when live updates are not useful.
|
|
323
|
+
|
|
324
|
+
Use them for:
|
|
325
|
+
|
|
326
|
+
- aggregate snapshots
|
|
327
|
+
- reports
|
|
328
|
+
- low-churn listings
|
|
329
|
+
- pages where explicit refresh is fine
|
|
330
|
+
|
|
331
|
+
### Triggers and fan-out
|
|
332
|
+
|
|
333
|
+
Triggers fire on every write, including writes that did not materially change the document.
|
|
334
|
+
|
|
335
|
+
When a write exists only to keep derived state in sync:
|
|
336
|
+
|
|
337
|
+
- diff before patching
|
|
338
|
+
- move expensive non-blocking work to `ctx.scheduler.runAfter` when appropriate
|
|
339
|
+
|
|
340
|
+
### Aggregates
|
|
341
|
+
|
|
342
|
+
Reactive global counts invalidate frequently on busy tables.
|
|
343
|
+
|
|
344
|
+
Prefer:
|
|
345
|
+
|
|
346
|
+
- one-shot aggregate fetches
|
|
347
|
+
- periodic recomputation
|
|
348
|
+
- precomputed summary rows
|
|
349
|
+
|
|
350
|
+
for global stats that do not need live updates every second.
|
|
351
|
+
|
|
352
|
+
### Backfills
|
|
353
|
+
|
|
354
|
+
For larger backfills, use cursor-based, self-scheduling `internalMutation` jobs or the migrations component.
|
|
355
|
+
|
|
356
|
+
Deploy code that can handle both states before running the backfill.
|
|
357
|
+
|
|
358
|
+
During the gap:
|
|
359
|
+
|
|
360
|
+
- writes should populate the new shape
|
|
361
|
+
- reads should fall back safely
|
|
362
|
+
|
|
363
|
+
## Verification
|
|
364
|
+
|
|
365
|
+
Before closing the audit, confirm:
|
|
366
|
+
|
|
367
|
+
1. Same results as before, no dropped records
|
|
368
|
+
2. The removed table or lookup is no longer in the hot-path read set
|
|
369
|
+
3. Tests or validation cover fallback behavior
|
|
370
|
+
4. Migration safety is preserved while fields or indexes are unbackfilled
|
|
371
|
+
5. Sibling functions were fixed consistently
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
# Migrations Component Reference
|
|
2
|
+
|
|
3
|
+
Complete guide to the [`@convex-dev/migrations`](https://www.convex.dev/components/migrations) component for batched, resumable Convex data migrations.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @convex-dev/migrations
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Setup
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
// convex/convex.config.ts
|
|
15
|
+
import { defineApp } from "convex/server";
|
|
16
|
+
import migrations from "@convex-dev/migrations/convex.config.js";
|
|
17
|
+
|
|
18
|
+
const app = defineApp();
|
|
19
|
+
app.use(migrations);
|
|
20
|
+
export default app;
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
```typescript
|
|
24
|
+
// convex/migrations.ts
|
|
25
|
+
import { Migrations } from "@convex-dev/migrations";
|
|
26
|
+
import { components } from "./_generated/api.js";
|
|
27
|
+
import { DataModel } from "./_generated/dataModel.js";
|
|
28
|
+
|
|
29
|
+
export const migrations = new Migrations<DataModel>(components.migrations);
|
|
30
|
+
export const run = migrations.runner();
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
The `DataModel` type parameter is optional but provides type safety for migration definitions.
|
|
34
|
+
|
|
35
|
+
## Define a Migration
|
|
36
|
+
|
|
37
|
+
The `migrateOne` function processes a single document. The component handles batching and pagination automatically.
|
|
38
|
+
|
|
39
|
+
```typescript
|
|
40
|
+
// convex/migrations.ts
|
|
41
|
+
export const addDefaultRole = migrations.define({
|
|
42
|
+
table: "users",
|
|
43
|
+
migrateOne: async (ctx, user) => {
|
|
44
|
+
if (user.role === undefined) {
|
|
45
|
+
await ctx.db.patch(user._id, { role: "user" });
|
|
46
|
+
}
|
|
47
|
+
},
|
|
48
|
+
});
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
Shorthand: if you return an object, it is applied as a patch automatically.
|
|
52
|
+
|
|
53
|
+
```typescript
|
|
54
|
+
export const clearDeprecatedField = migrations.define({
|
|
55
|
+
table: "users",
|
|
56
|
+
migrateOne: () => ({ legacyField: undefined }),
|
|
57
|
+
});
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
## Run a Migration
|
|
61
|
+
|
|
62
|
+
From the CLI:
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
# Define a one-off runner in convex/migrations.ts:
|
|
66
|
+
# export const runIt = migrations.runner(internal.migrations.addDefaultRole);
|
|
67
|
+
npx convex run migrations:runIt
|
|
68
|
+
|
|
69
|
+
# Or use the general-purpose runner
|
|
70
|
+
npx convex run migrations:run '{"fn": "migrations:addDefaultRole"}'
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
Programmatically from another Convex function:
|
|
74
|
+
|
|
75
|
+
```typescript
|
|
76
|
+
await migrations.runOne(ctx, internal.migrations.addDefaultRole);
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
## Run Multiple Migrations in Order
|
|
80
|
+
|
|
81
|
+
```typescript
|
|
82
|
+
export const runAll = migrations.runner([
|
|
83
|
+
internal.migrations.addDefaultRole,
|
|
84
|
+
internal.migrations.clearDeprecatedField,
|
|
85
|
+
internal.migrations.normalizeEmails,
|
|
86
|
+
]);
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
```bash
|
|
90
|
+
npx convex run migrations:runAll
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
If one fails, it stops and will not continue to the next. Call it again to retry from where it left off. Completed migrations are skipped automatically.
|
|
94
|
+
|
|
95
|
+
## Dry Run
|
|
96
|
+
|
|
97
|
+
Test a migration before committing changes:
|
|
98
|
+
|
|
99
|
+
```bash
|
|
100
|
+
npx convex run migrations:runIt '{"dryRun": true}'
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
This runs one batch and then rolls back, so you can see what it would do without changing any data.
|
|
104
|
+
|
|
105
|
+
## Check Migration Status
|
|
106
|
+
|
|
107
|
+
```bash
|
|
108
|
+
npx convex run --component migrations lib:getStatus --watch
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## Cancel a Running Migration
|
|
112
|
+
|
|
113
|
+
```bash
|
|
114
|
+
npx convex run --component migrations lib:cancel '{"name": "migrations:addDefaultRole"}'
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
Or programmatically:
|
|
118
|
+
|
|
119
|
+
```typescript
|
|
120
|
+
await migrations.cancel(ctx, internal.migrations.addDefaultRole);
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
## Run Migrations on Deploy
|
|
124
|
+
|
|
125
|
+
Chain migration execution after deploying:
|
|
126
|
+
|
|
127
|
+
```bash
|
|
128
|
+
npx convex deploy --cmd 'npm run build' && npx convex run migrations:runAll --prod
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
## Configuration Options
|
|
132
|
+
|
|
133
|
+
### Custom Batch Size
|
|
134
|
+
|
|
135
|
+
If documents are large or the table has heavy write traffic, reduce the batch size to avoid transaction limits or OCC conflicts:
|
|
136
|
+
|
|
137
|
+
```typescript
|
|
138
|
+
export const migrateHeavyTable = migrations.define({
|
|
139
|
+
table: "largeDocuments",
|
|
140
|
+
batchSize: 10,
|
|
141
|
+
migrateOne: async (ctx, doc) => {
|
|
142
|
+
// migration logic
|
|
143
|
+
},
|
|
144
|
+
});
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
### Migrate a Subset Using an Index
|
|
148
|
+
|
|
149
|
+
Process only matching documents instead of the full table:
|
|
150
|
+
|
|
151
|
+
```typescript
|
|
152
|
+
export const fixEmptyNames = migrations.define({
|
|
153
|
+
table: "users",
|
|
154
|
+
customRange: (query) =>
|
|
155
|
+
query.withIndex("by_name", (q) => q.eq("name", "")),
|
|
156
|
+
migrateOne: () => ({ name: "<unknown>" }),
|
|
157
|
+
});
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
### Parallelize Within a Batch
|
|
161
|
+
|
|
162
|
+
By default each document in a batch is processed serially. Enable parallel processing if your migration logic does not depend on ordering:
|
|
163
|
+
|
|
164
|
+
```typescript
|
|
165
|
+
export const clearField = migrations.define({
|
|
166
|
+
table: "myTable",
|
|
167
|
+
parallelize: true,
|
|
168
|
+
migrateOne: () => ({ optionalField: undefined }),
|
|
169
|
+
});
|
|
170
|
+
```
|