thevoidforge 21.0.11 → 21.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.claude/commands/ai.md +69 -0
- package/dist/.claude/commands/architect.md +121 -0
- package/dist/.claude/commands/assemble.md +201 -0
- package/dist/.claude/commands/assess.md +75 -0
- package/dist/.claude/commands/blueprint.md +135 -0
- package/dist/.claude/commands/build.md +116 -0
- package/dist/.claude/commands/campaign.md +201 -0
- package/dist/.claude/commands/cultivation.md +166 -0
- package/dist/.claude/commands/current.md +128 -0
- package/dist/.claude/commands/dangerroom.md +74 -0
- package/dist/.claude/commands/debrief.md +178 -0
- package/dist/.claude/commands/deploy.md +99 -0
- package/dist/.claude/commands/devops.md +143 -0
- package/dist/.claude/commands/gauntlet.md +140 -0
- package/dist/.claude/commands/git.md +104 -0
- package/dist/.claude/commands/grow.md +146 -0
- package/dist/.claude/commands/imagine.md +126 -0
- package/dist/.claude/commands/portfolio.md +50 -0
- package/dist/.claude/commands/prd.md +113 -0
- package/dist/.claude/commands/qa.md +107 -0
- package/dist/.claude/commands/review.md +151 -0
- package/dist/.claude/commands/security.md +100 -0
- package/dist/.claude/commands/test.md +96 -0
- package/dist/.claude/commands/thumper.md +116 -0
- package/dist/.claude/commands/treasury.md +100 -0
- package/dist/.claude/commands/ux.md +118 -0
- package/dist/.claude/commands/vault.md +189 -0
- package/dist/.claude/commands/void.md +108 -0
- package/dist/CHANGELOG.md +1918 -0
- package/dist/CLAUDE.md +250 -0
- package/dist/HOLOCRON.md +856 -0
- package/dist/VERSION.md +123 -0
- package/dist/docs/NAMING_REGISTRY.md +478 -0
- package/dist/docs/methods/AI_INTELLIGENCE.md +276 -0
- package/dist/docs/methods/ASSEMBLER.md +142 -0
- package/dist/docs/methods/BACKEND_ENGINEER.md +165 -0
- package/dist/docs/methods/BUILD_JOURNAL.md +185 -0
- package/dist/docs/methods/BUILD_PROTOCOL.md +426 -0
- package/dist/docs/methods/CAMPAIGN.md +568 -0
- package/dist/docs/methods/CONTEXT_MANAGEMENT.md +189 -0
- package/dist/docs/methods/DEEP_CURRENT.md +184 -0
- package/dist/docs/methods/DEVOPS_ENGINEER.md +295 -0
- package/dist/docs/methods/FIELD_MEDIC.md +261 -0
- package/dist/docs/methods/FORGE_ARTIST.md +108 -0
- package/dist/docs/methods/FORGE_KEEPER.md +268 -0
- package/dist/docs/methods/GAUNTLET.md +344 -0
- package/dist/docs/methods/GROWTH_STRATEGIST.md +466 -0
- package/dist/docs/methods/HEARTBEAT.md +168 -0
- package/dist/docs/methods/MCP_INTEGRATION.md +139 -0
- package/dist/docs/methods/MUSTER.md +148 -0
- package/dist/docs/methods/PRD_GENERATOR.md +186 -0
- package/dist/docs/methods/PRODUCT_DESIGN_FRONTEND.md +250 -0
- package/dist/docs/methods/QA_ENGINEER.md +337 -0
- package/dist/docs/methods/RELEASE_MANAGER.md +145 -0
- package/dist/docs/methods/SECURITY_AUDITOR.md +320 -0
- package/dist/docs/methods/SUB_AGENTS.md +335 -0
- package/dist/docs/methods/SYSTEMS_ARCHITECT.md +171 -0
- package/dist/docs/methods/TESTING.md +359 -0
- package/dist/docs/methods/THUMPER.md +175 -0
- package/dist/docs/methods/TIME_VAULT.md +120 -0
- package/dist/docs/methods/TREASURY.md +184 -0
- package/dist/docs/methods/TROUBLESHOOTING.md +265 -0
- package/dist/docs/patterns/README.md +52 -0
- package/dist/docs/patterns/ad-billing-adapter.ts +537 -0
- package/dist/docs/patterns/ad-platform-adapter.ts +421 -0
- package/dist/docs/patterns/ai-classifier.ts +195 -0
- package/dist/docs/patterns/ai-eval.ts +272 -0
- package/dist/docs/patterns/ai-orchestrator.ts +341 -0
- package/dist/docs/patterns/ai-router.ts +194 -0
- package/dist/docs/patterns/ai-tool-schema.ts +237 -0
- package/dist/docs/patterns/api-route.ts +241 -0
- package/dist/docs/patterns/backtest-engine.ts +499 -0
- package/dist/docs/patterns/browser-review.ts +292 -0
- package/dist/docs/patterns/combobox.tsx +300 -0
- package/dist/docs/patterns/component.tsx +262 -0
- package/dist/docs/patterns/daemon-process.ts +338 -0
- package/dist/docs/patterns/data-pipeline.ts +297 -0
- package/dist/docs/patterns/database-migration.ts +466 -0
- package/dist/docs/patterns/e2e-test.ts +629 -0
- package/dist/docs/patterns/error-handling.ts +312 -0
- package/dist/docs/patterns/execution-safety.ts +601 -0
- package/dist/docs/patterns/financial-transaction.ts +342 -0
- package/dist/docs/patterns/funding-plan.ts +462 -0
- package/dist/docs/patterns/game-entity.ts +137 -0
- package/dist/docs/patterns/game-loop.ts +113 -0
- package/dist/docs/patterns/game-state.ts +143 -0
- package/dist/docs/patterns/job-queue.ts +225 -0
- package/dist/docs/patterns/kongo-integration.ts +164 -0
- package/dist/docs/patterns/middleware.ts +363 -0
- package/dist/docs/patterns/mobile-screen.tsx +139 -0
- package/dist/docs/patterns/mobile-service.ts +167 -0
- package/dist/docs/patterns/multi-tenant.ts +382 -0
- package/dist/docs/patterns/oauth-token-lifecycle.ts +223 -0
- package/dist/docs/patterns/outbound-rate-limiter.ts +260 -0
- package/dist/docs/patterns/prompt-template.ts +195 -0
- package/dist/docs/patterns/revenue-source-adapter.ts +311 -0
- package/dist/docs/patterns/service.ts +224 -0
- package/dist/docs/patterns/sse-endpoint.ts +118 -0
- package/dist/docs/patterns/stablecoin-adapter.ts +511 -0
- package/dist/docs/patterns/third-party-script.ts +68 -0
- package/dist/scripts/thumper/gom-jabbar.sh +241 -0
- package/dist/scripts/thumper/relay.sh +610 -0
- package/dist/scripts/thumper/scan.sh +359 -0
- package/dist/scripts/thumper/thumper.sh +190 -0
- package/dist/scripts/thumper/water-rings.sh +76 -0
- package/dist/wizard/ui/index.html +1 -1
- package/package.json +1 -1
- package/dist/tsconfig.tsbuildinfo +0 -1
|
@@ -0,0 +1,466 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Pattern: Database Migration Safety
|
|
3
|
+
*
|
|
4
|
+
* Key principles:
|
|
5
|
+
* - Backward-compatible by default — add columns as nullable, backfill, then constrain
|
|
6
|
+
* - Safe column removal — stop reading in code first, deploy, THEN drop column
|
|
7
|
+
* - Large table operations use batched processing — never full-table locks on >100k rows
|
|
8
|
+
* - Every migration has a tested rollback (down migration)
|
|
9
|
+
* - Data backfills are idempotent — safe to re-run if interrupted mid-batch
|
|
10
|
+
* - Zero-downtime deployments require migration/code ordering discipline
|
|
11
|
+
*
|
|
12
|
+
* Agents: Banner (database), Kusanagi (deploy), Stark (services)
|
|
13
|
+
*
|
|
14
|
+
* Framework adaptations:
|
|
15
|
+
* Prisma (Node.js): This file — raw SQL for batched ops, Prisma Migrate for schema
|
|
16
|
+
* Alembic (Python/FastAPI): op.add_column(), op.batch_alter_table(), bulk_insert()
|
|
17
|
+
* ActiveRecord (Rails): add_column with default, change_column_null, in_batches
|
|
18
|
+
* Django: AddField(null=True), RunPython for data migrations, SeparateDatabaseAndState
|
|
19
|
+
*
|
|
20
|
+
* === Alembic Deep Dive ===
|
|
21
|
+
*
|
|
22
|
+
* # alembic/versions/001_add_display_name.py
|
|
23
|
+
* from alembic import op
|
|
24
|
+
* import sqlalchemy as sa
|
|
25
|
+
*
|
|
26
|
+
* def upgrade():
|
|
27
|
+
* # Step 1: Add nullable column
|
|
28
|
+
* op.add_column('users', sa.Column('display_name', sa.String(255), nullable=True))
|
|
29
|
+
*
|
|
30
|
+
* def downgrade():
|
|
31
|
+
* op.drop_column('users', 'display_name')
|
|
32
|
+
*
|
|
33
|
+
* # Step 2 (separate migration): Backfill + constrain
|
|
34
|
+
* # alembic/versions/002_backfill_display_name.py
|
|
35
|
+
* from alembic import op
|
|
36
|
+
* from sqlalchemy import text
|
|
37
|
+
*
|
|
38
|
+
* def upgrade():
|
|
39
|
+
* conn = op.get_bind()
|
|
40
|
+
* while True:
|
|
41
|
+
* result = conn.execute(text(
|
|
42
|
+
* "UPDATE users SET display_name = name "
|
|
43
|
+
* "WHERE display_name IS NULL LIMIT 1000"
|
|
44
|
+
* ))
|
|
45
|
+
* if result.rowcount == 0:
|
|
46
|
+
* break
|
|
47
|
+
* op.alter_column('users', 'display_name', nullable=False)
|
|
48
|
+
*
|
|
49
|
+
* def downgrade():
|
|
50
|
+
* op.alter_column('users', 'display_name', nullable=True)
|
|
51
|
+
*
|
|
52
|
+
* === ActiveRecord (Rails) Deep Dive ===
|
|
53
|
+
*
|
|
54
|
+
* # db/migrate/001_add_display_name_to_users.rb
|
|
55
|
+
* class AddDisplayNameToUsers < ActiveRecord::Migration[7.1]
|
|
56
|
+
* def change
|
|
57
|
+
* add_column :users, :display_name, :string, null: true
|
|
58
|
+
* end
|
|
59
|
+
* end
|
|
60
|
+
*
|
|
61
|
+
* # db/migrate/002_backfill_display_name.rb
|
|
62
|
+
* class BackfillDisplayName < ActiveRecord::Migration[7.1]
|
|
63
|
+
* disable_ddl_transaction! # Required for batched operations
|
|
64
|
+
*
|
|
65
|
+
* def up
|
|
66
|
+
* User.where(display_name: nil).in_batches(of: 1000) do |batch|
|
|
67
|
+
* batch.update_all("display_name = name")
|
|
68
|
+
* end
|
|
69
|
+
* change_column_null :users, :display_name, false
|
|
70
|
+
* end
|
|
71
|
+
*
|
|
72
|
+
* def down
|
|
73
|
+
* change_column_null :users, :display_name, true
|
|
74
|
+
* end
|
|
75
|
+
* end
|
|
76
|
+
*
|
|
77
|
+
* === Django Deep Dive ===
|
|
78
|
+
*
|
|
79
|
+
* # migrations/0001_add_display_name.py
|
|
80
|
+
* from django.db import migrations, models
|
|
81
|
+
*
|
|
82
|
+
* class Migration(migrations.Migration):
|
|
83
|
+
* operations = [
|
|
84
|
+
* migrations.AddField(
|
|
85
|
+
* model_name='user',
|
|
86
|
+
* name='display_name',
|
|
87
|
+
* field=models.CharField(max_length=255, null=True),
|
|
88
|
+
* ),
|
|
89
|
+
* ]
|
|
90
|
+
*
|
|
91
|
+
* # migrations/0002_backfill_display_name.py
|
|
92
|
+
* from django.db import migrations
|
|
93
|
+
*
|
|
94
|
+
* def backfill_display_name(apps, schema_editor):
|
|
95
|
+
* User = apps.get_model('users', 'User')
|
|
96
|
+
* batch_size = 1000
|
|
97
|
+
* while True:
|
|
98
|
+
* ids = list(
|
|
99
|
+
* User.objects.filter(display_name__isnull=True)
|
|
100
|
+
* .values_list('id', flat=True)[:batch_size]
|
|
101
|
+
* )
|
|
102
|
+
* if not ids:
|
|
103
|
+
* break
|
|
104
|
+
* User.objects.filter(id__in=ids).update(display_name=models.F('name'))
|
|
105
|
+
*
|
|
106
|
+
* class Migration(migrations.Migration):
|
|
107
|
+
* operations = [
|
|
108
|
+
* migrations.RunPython(backfill_display_name, migrations.RunPython.noop),
|
|
109
|
+
* migrations.AlterField(
|
|
110
|
+
* model_name='user',
|
|
111
|
+
* name='display_name',
|
|
112
|
+
* field=models.CharField(max_length=255, null=False),
|
|
113
|
+
* ),
|
|
114
|
+
* ]
|
|
115
|
+
*
|
|
116
|
+
* See /docs/patterns/service.ts for service layer conventions.
|
|
117
|
+
*/
|
|
118
|
+
|
|
119
|
+
// ── Types ────────────────────────────────────────────────
|
|
120
|
+
|
|
121
|
+
/** A single migration step with forward and rollback operations. */
|
|
122
|
+
interface MigrationStep {
|
|
123
|
+
/** Unique identifier, e.g. "20260324_001_add_display_name" */
|
|
124
|
+
id: string;
|
|
125
|
+
/** Human-readable description of what this migration does */
|
|
126
|
+
description: string;
|
|
127
|
+
/** Forward migration — applies the schema or data change */
|
|
128
|
+
up: (ctx: MigrationContext) => Promise<void>;
|
|
129
|
+
/** Rollback migration — reverses the forward change */
|
|
130
|
+
down: (ctx: MigrationContext) => Promise<void>;
|
|
131
|
+
/** If true, this migration uses batched processing for large tables */
|
|
132
|
+
batched?: boolean;
|
|
133
|
+
/** If true, this migration is a data-only change (no schema DDL) */
|
|
134
|
+
dataOnly?: boolean;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
interface MigrationContext {
|
|
138
|
+
/** Execute raw SQL — use for DDL and batched operations */
|
|
139
|
+
execute: (sql: string, params?: unknown[]) => Promise<{ rowCount: number }>;
|
|
140
|
+
/** Structured logger */
|
|
141
|
+
log: (event: string, data: Record<string, unknown>) => void;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
/** Configuration for chunked data backfill operations. */
|
|
145
|
+
interface BackfillConfig {
|
|
146
|
+
/** Table to backfill */
|
|
147
|
+
table: string;
|
|
148
|
+
/** SET clause for the UPDATE statement */
|
|
149
|
+
setClause: string;
|
|
150
|
+
/** WHERE clause to identify rows needing backfill */
|
|
151
|
+
whereClause: string;
|
|
152
|
+
/** Rows per batch — keep under 5000 to avoid lock contention */
|
|
153
|
+
batchSize: number;
|
|
154
|
+
/** Optional: column to checkpoint progress (must be indexed) */
|
|
155
|
+
checkpointColumn?: string;
|
|
156
|
+
/** Optional: resume from this checkpoint value */
|
|
157
|
+
resumeFrom?: string | number;
|
|
158
|
+
/** Delay between batches in ms — gives replicas time to catch up */
|
|
159
|
+
delayMs?: number;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
// ── Backward-Compatible Column Addition ──────────────────
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* Safe pattern: Add a column in two migrations.
|
|
166
|
+
*
|
|
167
|
+
* Migration 1: Add column as nullable (no default — avoids full table rewrite)
|
|
168
|
+
* Migration 2: Backfill data + add NOT NULL constraint
|
|
169
|
+
*
|
|
170
|
+
* WHY two migrations: A single ALTER TABLE ... ADD COLUMN ... NOT NULL
|
|
171
|
+
* on a table with millions of rows will lock the table for the duration
|
|
172
|
+
* of the rewrite. Splitting into nullable-add + backfill + constrain
|
|
173
|
+
* keeps each lock short.
|
|
174
|
+
*/
|
|
175
|
+
const addDisplayNameStep1: MigrationStep = {
|
|
176
|
+
id: '20260324_001_add_display_name_nullable',
|
|
177
|
+
description: 'Add display_name column as nullable (safe for zero-downtime)',
|
|
178
|
+
async up(ctx) {
|
|
179
|
+
await ctx.execute(`
|
|
180
|
+
ALTER TABLE users
|
|
181
|
+
ADD COLUMN display_name VARCHAR(255) NULL
|
|
182
|
+
`);
|
|
183
|
+
ctx.log('migration.column_added', { table: 'users', column: 'display_name', nullable: true });
|
|
184
|
+
},
|
|
185
|
+
async down(ctx) {
|
|
186
|
+
await ctx.execute(`ALTER TABLE users DROP COLUMN display_name`);
|
|
187
|
+
ctx.log('migration.column_dropped', { table: 'users', column: 'display_name' });
|
|
188
|
+
},
|
|
189
|
+
};
|
|
190
|
+
|
|
191
|
+
const addDisplayNameStep2: MigrationStep = {
|
|
192
|
+
id: '20260324_002_backfill_display_name',
|
|
193
|
+
description: 'Backfill display_name from name, then add NOT NULL constraint',
|
|
194
|
+
batched: true,
|
|
195
|
+
dataOnly: true,
|
|
196
|
+
async up(ctx) {
|
|
197
|
+
// Backfill in batches — see batchProcess() below
|
|
198
|
+
await batchProcess(ctx, {
|
|
199
|
+
table: 'users',
|
|
200
|
+
setClause: 'display_name = name',
|
|
201
|
+
whereClause: 'display_name IS NULL',
|
|
202
|
+
batchSize: 1000,
|
|
203
|
+
delayMs: 100,
|
|
204
|
+
});
|
|
205
|
+
|
|
206
|
+
// Only constrain AFTER all rows are backfilled
|
|
207
|
+
await ctx.execute(`ALTER TABLE users ALTER COLUMN display_name SET NOT NULL`);
|
|
208
|
+
ctx.log('migration.constraint_added', { table: 'users', column: 'display_name', constraint: 'NOT NULL' });
|
|
209
|
+
},
|
|
210
|
+
async down(ctx) {
|
|
211
|
+
await ctx.execute(`ALTER TABLE users ALTER COLUMN display_name DROP NOT NULL`);
|
|
212
|
+
ctx.log('migration.constraint_removed', { table: 'users', column: 'display_name', constraint: 'NOT NULL' });
|
|
213
|
+
},
|
|
214
|
+
};
|
|
215
|
+
|
|
216
|
+
// ── Safe Column Removal (Two-Phase) ─────────────────────
|
|
217
|
+
|
|
218
|
+
/**
|
|
219
|
+
* Removing a column requires TWO deploys:
|
|
220
|
+
*
|
|
221
|
+
* Deploy 1: Update code to stop reading/writing the column.
|
|
222
|
+
* - Remove from SELECT queries, ORM select/include lists
|
|
223
|
+
* - Remove from INSERT/UPDATE statements
|
|
224
|
+
* - Deploy this code change FIRST
|
|
225
|
+
*
|
|
226
|
+
* Deploy 2: Drop the column in a migration (this step).
|
|
227
|
+
* - Only safe AFTER Deploy 1 is live and stable
|
|
228
|
+
* - If you drop before Deploy 1, existing instances will crash
|
|
229
|
+
*
|
|
230
|
+
* For Prisma: remove the field from schema.prisma in Deploy 1,
|
|
231
|
+
* run `prisma generate` (updates client), then `prisma migrate`
|
|
232
|
+
* to drop the column in Deploy 2.
|
|
233
|
+
*/
|
|
234
|
+
const dropLegacyAvatarUrl: MigrationStep = {
|
|
235
|
+
id: '20260324_003_drop_legacy_avatar_url',
|
|
236
|
+
description: 'Drop avatar_url column (code already stopped reading it in v16.0)',
|
|
237
|
+
async up(ctx) {
|
|
238
|
+
await ctx.execute(`ALTER TABLE users DROP COLUMN IF EXISTS avatar_url`);
|
|
239
|
+
ctx.log('migration.column_dropped', { table: 'users', column: 'avatar_url' });
|
|
240
|
+
},
|
|
241
|
+
async down(ctx) {
|
|
242
|
+
// Restore the column as nullable — data is gone, but schema is recoverable
|
|
243
|
+
await ctx.execute(`ALTER TABLE users ADD COLUMN avatar_url TEXT NULL`);
|
|
244
|
+
ctx.log('migration.column_restored', { table: 'users', column: 'avatar_url', note: 'data not recoverable' });
|
|
245
|
+
},
|
|
246
|
+
};
|
|
247
|
+
|
|
248
|
+
// ── Batched Processing for Large Tables ─────────────────
|
|
249
|
+
|
|
250
|
+
/**
|
|
251
|
+
* Process rows in chunks to avoid full-table locks.
|
|
252
|
+
*
|
|
253
|
+
* Key behaviors:
|
|
254
|
+
* - Updates batchSize rows per iteration
|
|
255
|
+
* - Logs progress after each batch (structured JSON)
|
|
256
|
+
* - Supports checkpoint/resume via checkpointColumn
|
|
257
|
+
* - Idempotent: re-running processes only remaining unprocessed rows
|
|
258
|
+
* - Optional delay between batches for replica catch-up
|
|
259
|
+
*/
|
|
260
|
+
async function batchProcess(
|
|
261
|
+
ctx: MigrationContext,
|
|
262
|
+
config: BackfillConfig
|
|
263
|
+
): Promise<{ totalProcessed: number }> {
|
|
264
|
+
const { table, setClause, whereClause, batchSize, checkpointColumn, resumeFrom, delayMs } = config;
|
|
265
|
+
let totalProcessed = 0;
|
|
266
|
+
let checkpoint = resumeFrom;
|
|
267
|
+
|
|
268
|
+
while (true) {
|
|
269
|
+
// Build WHERE clause with optional checkpoint for resumability
|
|
270
|
+
let fullWhere = whereClause;
|
|
271
|
+
if (checkpointColumn && checkpoint !== undefined) {
|
|
272
|
+
fullWhere = `${whereClause} AND ${checkpointColumn} > ${typeof checkpoint === 'string' ? `'${checkpoint}'` : checkpoint}`;
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
// Use a subquery to limit the batch — avoids locking rows beyond the batch
|
|
276
|
+
const sql = `
|
|
277
|
+
UPDATE ${table}
|
|
278
|
+
SET ${setClause}
|
|
279
|
+
WHERE id IN (
|
|
280
|
+
SELECT id FROM ${table}
|
|
281
|
+
WHERE ${fullWhere}
|
|
282
|
+
ORDER BY ${checkpointColumn ?? 'id'}
|
|
283
|
+
LIMIT ${batchSize}
|
|
284
|
+
)
|
|
285
|
+
`;
|
|
286
|
+
|
|
287
|
+
const result = await ctx.execute(sql);
|
|
288
|
+
|
|
289
|
+
if (result.rowCount === 0) {
|
|
290
|
+
break; // All rows processed
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
totalProcessed += result.rowCount;
|
|
294
|
+
|
|
295
|
+
ctx.log('migration.batch_processed', {
|
|
296
|
+
table,
|
|
297
|
+
batchSize: result.rowCount,
|
|
298
|
+
totalProcessed,
|
|
299
|
+
});
|
|
300
|
+
|
|
301
|
+
// Optional delay — prevents replica lag on high-traffic tables
|
|
302
|
+
if (delayMs && delayMs > 0) {
|
|
303
|
+
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
ctx.log('migration.backfill_complete', { table, totalProcessed });
|
|
308
|
+
return { totalProcessed };
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
// ── Zero-Downtime Migration Safety Validation ───────────
|
|
312
|
+
|
|
313
|
+
interface MigrationSafetyResult {
|
|
314
|
+
safe: boolean;
|
|
315
|
+
violations: string[];
|
|
316
|
+
warnings: string[];
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
/**
|
|
320
|
+
* Validate a migration SQL string against zero-downtime rules.
|
|
321
|
+
*
|
|
322
|
+
* Run this in CI or as a pre-migration hook. Catches the most common
|
|
323
|
+
* migration mistakes that cause downtime or data loss.
|
|
324
|
+
*/
|
|
325
|
+
function validateMigrationSafety(sql: string): MigrationSafetyResult {
|
|
326
|
+
const violations: string[] = [];
|
|
327
|
+
const warnings: string[] = [];
|
|
328
|
+
const upperSql = sql.toUpperCase();
|
|
329
|
+
|
|
330
|
+
// Rule 1: No bare NOT NULL without DEFAULT on ADD COLUMN
|
|
331
|
+
if (/ADD\s+COLUMN\s+\w+\s+\w+.*NOT\s+NULL/i.test(sql) && !/DEFAULT/i.test(sql)) {
|
|
332
|
+
violations.push(
|
|
333
|
+
'ADD COLUMN with NOT NULL but no DEFAULT — will fail on existing rows. ' +
|
|
334
|
+
'Add as nullable first, backfill, then add constraint in a separate migration.'
|
|
335
|
+
);
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
// Rule 2: No DROP COLUMN without IF EXISTS
|
|
339
|
+
if (/DROP\s+COLUMN\s+(?!IF\s+EXISTS)/i.test(sql)) {
|
|
340
|
+
warnings.push(
|
|
341
|
+
'DROP COLUMN without IF EXISTS — migration will fail if column already dropped. ' +
|
|
342
|
+
'Use DROP COLUMN IF EXISTS for idempotent migrations.'
|
|
343
|
+
);
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
// Rule 3: No RENAME COLUMN (breaks running code)
|
|
347
|
+
if (/RENAME\s+COLUMN/i.test(sql)) {
|
|
348
|
+
violations.push(
|
|
349
|
+
'RENAME COLUMN causes downtime — old code references the old name. ' +
|
|
350
|
+
'Instead: add new column, backfill, update code, drop old column.'
|
|
351
|
+
);
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
// Rule 4: No ALTER TYPE on large tables without checking
|
|
355
|
+
if (/ALTER\s+(COLUMN\s+)?\w+\s+(SET\s+DATA\s+)?TYPE/i.test(sql)) {
|
|
356
|
+
warnings.push(
|
|
357
|
+
'ALTER TYPE may rewrite the entire table. Verify table size and consider ' +
|
|
358
|
+
'add-new-column + backfill + swap strategy for large tables.'
|
|
359
|
+
);
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
// Rule 5: No CREATE INDEX without CONCURRENTLY (PostgreSQL)
|
|
363
|
+
if (/CREATE\s+INDEX\s+(?!CONCURRENTLY)/i.test(sql) && !/CREATE\s+UNIQUE\s+INDEX\s+CONCURRENTLY/i.test(sql)) {
|
|
364
|
+
if (!/CONCURRENTLY/i.test(sql)) {
|
|
365
|
+
warnings.push(
|
|
366
|
+
'CREATE INDEX without CONCURRENTLY locks writes for the duration. ' +
|
|
367
|
+
'Use CREATE INDEX CONCURRENTLY for zero-downtime.'
|
|
368
|
+
);
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
// Rule 6: No LOCK TABLE
|
|
373
|
+
if (upperSql.includes('LOCK TABLE')) {
|
|
374
|
+
violations.push(
|
|
375
|
+
'Explicit LOCK TABLE will block queries. Use row-level locking or batched operations.'
|
|
376
|
+
);
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
// Rule 7: UPDATE/DELETE without LIMIT or batching hint
|
|
380
|
+
if (/(?:UPDATE|DELETE\s+FROM)\s+\w+\s+SET/.test(sql) || /DELETE\s+FROM\s+\w+\s+WHERE/.test(sql)) {
|
|
381
|
+
if (!/LIMIT/i.test(sql) && !/IN\s*\(\s*SELECT/i.test(sql)) {
|
|
382
|
+
warnings.push(
|
|
383
|
+
'UPDATE/DELETE without LIMIT or subquery batch — may lock entire table. ' +
|
|
384
|
+
'Use batchProcess() for large operations.'
|
|
385
|
+
);
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
return {
|
|
390
|
+
safe: violations.length === 0,
|
|
391
|
+
violations,
|
|
392
|
+
warnings,
|
|
393
|
+
};
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
// ── Migration Runner (simplified) ───────────────────────
|
|
397
|
+
|
|
398
|
+
/**
|
|
399
|
+
* Execute a list of migration steps in order.
|
|
400
|
+
* In production, use your framework's migration runner (Prisma Migrate,
|
|
401
|
+
* Alembic, ActiveRecord, Django). This shows the pattern.
|
|
402
|
+
*/
|
|
403
|
+
async function runMigrations(
|
|
404
|
+
steps: MigrationStep[],
|
|
405
|
+
ctx: MigrationContext,
|
|
406
|
+
direction: 'up' | 'down' = 'up'
|
|
407
|
+
): Promise<void> {
|
|
408
|
+
const ordered = direction === 'down' ? [...steps].reverse() : steps;
|
|
409
|
+
|
|
410
|
+
for (const step of ordered) {
|
|
411
|
+
ctx.log('migration.start', { id: step.id, direction, description: step.description });
|
|
412
|
+
|
|
413
|
+
try {
|
|
414
|
+
if (direction === 'up') {
|
|
415
|
+
await step.up(ctx);
|
|
416
|
+
} else {
|
|
417
|
+
await step.down(ctx);
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
ctx.log('migration.complete', { id: step.id, direction });
|
|
421
|
+
} catch (error) {
|
|
422
|
+
ctx.log('migration.failed', {
|
|
423
|
+
id: step.id,
|
|
424
|
+
direction,
|
|
425
|
+
error: error instanceof Error ? error.message : String(error),
|
|
426
|
+
});
|
|
427
|
+
throw error; // Stop on first failure — do not continue with partial state
|
|
428
|
+
}
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
// ── Prisma-Specific Notes ───────────────────────────────
|
|
433
|
+
/*
|
|
434
|
+
Prisma Migrate generates SQL from schema.prisma changes.
|
|
435
|
+
For safe migrations with Prisma:
|
|
436
|
+
|
|
437
|
+
1. Add column as optional first:
|
|
438
|
+
model User {
|
|
439
|
+
displayName String? // nullable
|
|
440
|
+
}
|
|
441
|
+
Run: npx prisma migrate dev --name add_display_name
|
|
442
|
+
|
|
443
|
+
2. Backfill via raw SQL (Prisma doesn't generate data migrations):
|
|
444
|
+
import { PrismaClient } from '@prisma/client'
|
|
445
|
+
const prisma = new PrismaClient()
|
|
446
|
+
// Use $executeRaw for batched updates
|
|
447
|
+
let updated = 1
|
|
448
|
+
while (updated > 0) {
|
|
449
|
+
updated = await prisma.$executeRaw`
|
|
450
|
+
UPDATE users SET display_name = name
|
|
451
|
+
WHERE display_name IS NULL
|
|
452
|
+
LIMIT 1000
|
|
453
|
+
`
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
3. Make column required:
|
|
457
|
+
model User {
|
|
458
|
+
displayName String // non-nullable
|
|
459
|
+
}
|
|
460
|
+
Run: npx prisma migrate dev --name require_display_name
|
|
461
|
+
|
|
462
|
+
For column removal:
|
|
463
|
+
1. Remove field from schema.prisma, run prisma generate (updates client)
|
|
464
|
+
2. Deploy code (app no longer reads/writes the column)
|
|
465
|
+
3. Run prisma migrate dev --name drop_avatar_url (generates DROP COLUMN)
|
|
466
|
+
*/
|