drizzle-multitenant 1.0.10 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/index.js +4441 -742
- package/dist/cli/index.js.map +1 -1
- package/dist/{context-DoHx79MS.d.ts → context-Vki959ri.d.ts} +1 -1
- package/dist/index.d.ts +5 -4
- package/dist/index.js +3392 -742
- package/dist/index.js.map +1 -1
- package/dist/integrations/express.d.ts +3 -3
- package/dist/integrations/fastify.d.ts +3 -3
- package/dist/integrations/nestjs/index.d.ts +1 -1
- package/dist/integrations/nestjs/index.js +565 -91
- package/dist/integrations/nestjs/index.js.map +1 -1
- package/dist/migrator/index.d.ts +986 -281
- package/dist/migrator/index.js +2799 -664
- package/dist/migrator/index.js.map +1 -1
- package/dist/migrator-BDgFzSh8.d.ts +824 -0
- package/dist/{types-B5eSRLFW.d.ts → types-BhK96FPC.d.ts} +115 -1
- package/package.json +1 -1
package/dist/migrator/index.js
CHANGED
|
@@ -2,6 +2,7 @@ import { readdir, readFile } from 'fs/promises';
|
|
|
2
2
|
import { join, basename } from 'path';
|
|
3
3
|
import { createHash } from 'crypto';
|
|
4
4
|
import { Pool } from 'pg';
|
|
5
|
+
import { drizzle } from 'drizzle-orm/node-postgres';
|
|
5
6
|
|
|
6
7
|
// src/migrator/migrator.ts
|
|
7
8
|
|
|
@@ -114,246 +115,108 @@ function getFormatConfig(format, tableName = "__drizzle_migrations") {
|
|
|
114
115
|
};
|
|
115
116
|
}
|
|
116
117
|
}
|
|
117
|
-
|
|
118
|
-
// src/migrator/migrator.ts
|
|
119
118
|
var DEFAULT_MIGRATIONS_TABLE = "__drizzle_migrations";
|
|
120
|
-
var
|
|
121
|
-
constructor(
|
|
122
|
-
this.
|
|
123
|
-
this.
|
|
124
|
-
this.migrationsTable = migratorConfig.migrationsTable ?? DEFAULT_MIGRATIONS_TABLE;
|
|
119
|
+
var SchemaManager = class {
|
|
120
|
+
constructor(config, migrationsTable) {
|
|
121
|
+
this.config = config;
|
|
122
|
+
this.migrationsTable = migrationsTable ?? DEFAULT_MIGRATIONS_TABLE;
|
|
125
123
|
}
|
|
126
124
|
migrationsTable;
|
|
127
125
|
/**
|
|
128
|
-
*
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
const migrations = await this.loadMigrations();
|
|
139
|
-
const results = [];
|
|
140
|
-
let aborted = false;
|
|
141
|
-
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
142
|
-
const batch = tenantIds.slice(i, i + concurrency);
|
|
143
|
-
const batchResults = await Promise.all(
|
|
144
|
-
batch.map(async (tenantId) => {
|
|
145
|
-
if (aborted) {
|
|
146
|
-
return this.createSkippedResult(tenantId);
|
|
147
|
-
}
|
|
148
|
-
try {
|
|
149
|
-
onProgress?.(tenantId, "starting");
|
|
150
|
-
const result = await this.migrateTenant(tenantId, migrations, { dryRun, onProgress });
|
|
151
|
-
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
152
|
-
return result;
|
|
153
|
-
} catch (error) {
|
|
154
|
-
onProgress?.(tenantId, "failed");
|
|
155
|
-
const action = onError?.(tenantId, error);
|
|
156
|
-
if (action === "abort") {
|
|
157
|
-
aborted = true;
|
|
158
|
-
}
|
|
159
|
-
return this.createErrorResult(tenantId, error);
|
|
160
|
-
}
|
|
161
|
-
})
|
|
162
|
-
);
|
|
163
|
-
results.push(...batchResults);
|
|
164
|
-
}
|
|
165
|
-
if (aborted) {
|
|
166
|
-
const remaining = tenantIds.slice(results.length);
|
|
167
|
-
for (const tenantId of remaining) {
|
|
168
|
-
results.push(this.createSkippedResult(tenantId));
|
|
169
|
-
}
|
|
170
|
-
}
|
|
171
|
-
return this.aggregateResults(results);
|
|
172
|
-
}
|
|
173
|
-
/**
|
|
174
|
-
* Migrate a single tenant
|
|
175
|
-
*/
|
|
176
|
-
async migrateTenant(tenantId, migrations, options = {}) {
|
|
177
|
-
const startTime = Date.now();
|
|
178
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
179
|
-
const appliedMigrations = [];
|
|
180
|
-
const pool = await this.createPool(schemaName);
|
|
181
|
-
try {
|
|
182
|
-
await this.migratorConfig.hooks?.beforeTenant?.(tenantId);
|
|
183
|
-
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
184
|
-
await this.ensureMigrationsTable(pool, schemaName, format);
|
|
185
|
-
const allMigrations = migrations ?? await this.loadMigrations();
|
|
186
|
-
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
187
|
-
const appliedSet = new Set(applied.map((m) => m.identifier));
|
|
188
|
-
const pending = allMigrations.filter(
|
|
189
|
-
(m) => !this.isMigrationApplied(m, appliedSet, format)
|
|
190
|
-
);
|
|
191
|
-
if (options.dryRun) {
|
|
192
|
-
return {
|
|
193
|
-
tenantId,
|
|
194
|
-
schemaName,
|
|
195
|
-
success: true,
|
|
196
|
-
appliedMigrations: pending.map((m) => m.name),
|
|
197
|
-
durationMs: Date.now() - startTime,
|
|
198
|
-
format: format.format
|
|
199
|
-
};
|
|
200
|
-
}
|
|
201
|
-
for (const migration of pending) {
|
|
202
|
-
const migrationStart = Date.now();
|
|
203
|
-
options.onProgress?.(tenantId, "migrating", migration.name);
|
|
204
|
-
await this.migratorConfig.hooks?.beforeMigration?.(tenantId, migration.name);
|
|
205
|
-
await this.applyMigration(pool, schemaName, migration, format);
|
|
206
|
-
await this.migratorConfig.hooks?.afterMigration?.(
|
|
207
|
-
tenantId,
|
|
208
|
-
migration.name,
|
|
209
|
-
Date.now() - migrationStart
|
|
210
|
-
);
|
|
211
|
-
appliedMigrations.push(migration.name);
|
|
212
|
-
}
|
|
213
|
-
const result = {
|
|
214
|
-
tenantId,
|
|
215
|
-
schemaName,
|
|
216
|
-
success: true,
|
|
217
|
-
appliedMigrations,
|
|
218
|
-
durationMs: Date.now() - startTime,
|
|
219
|
-
format: format.format
|
|
220
|
-
};
|
|
221
|
-
await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
|
|
222
|
-
return result;
|
|
223
|
-
} catch (error) {
|
|
224
|
-
const result = {
|
|
225
|
-
tenantId,
|
|
226
|
-
schemaName,
|
|
227
|
-
success: false,
|
|
228
|
-
appliedMigrations,
|
|
229
|
-
error: error.message,
|
|
230
|
-
durationMs: Date.now() - startTime
|
|
231
|
-
};
|
|
232
|
-
await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
|
|
233
|
-
return result;
|
|
234
|
-
} finally {
|
|
235
|
-
await pool.end();
|
|
236
|
-
}
|
|
237
|
-
}
|
|
238
|
-
/**
|
|
239
|
-
* Migrate specific tenants
|
|
126
|
+
* Get the schema name for a tenant ID using the configured template
|
|
127
|
+
*
|
|
128
|
+
* @param tenantId - The tenant identifier
|
|
129
|
+
* @returns The PostgreSQL schema name
|
|
130
|
+
*
|
|
131
|
+
* @example
|
|
132
|
+
* ```typescript
|
|
133
|
+
* const schemaName = schemaManager.getSchemaName('tenant-123');
|
|
134
|
+
* // Returns: 'tenant_tenant-123' (depends on schemaNameTemplate)
|
|
135
|
+
* ```
|
|
240
136
|
*/
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
const results = [];
|
|
244
|
-
const { concurrency = 10, onProgress, onError } = options;
|
|
245
|
-
for (let i = 0; i < tenantIds.length; i += concurrency) {
|
|
246
|
-
const batch = tenantIds.slice(i, i + concurrency);
|
|
247
|
-
const batchResults = await Promise.all(
|
|
248
|
-
batch.map(async (tenantId) => {
|
|
249
|
-
try {
|
|
250
|
-
onProgress?.(tenantId, "starting");
|
|
251
|
-
const result = await this.migrateTenant(tenantId, migrations, { dryRun: options.dryRun ?? false, onProgress });
|
|
252
|
-
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
253
|
-
return result;
|
|
254
|
-
} catch (error) {
|
|
255
|
-
onProgress?.(tenantId, "failed");
|
|
256
|
-
onError?.(tenantId, error);
|
|
257
|
-
return this.createErrorResult(tenantId, error);
|
|
258
|
-
}
|
|
259
|
-
})
|
|
260
|
-
);
|
|
261
|
-
results.push(...batchResults);
|
|
262
|
-
}
|
|
263
|
-
return this.aggregateResults(results);
|
|
137
|
+
getSchemaName(tenantId) {
|
|
138
|
+
return this.config.isolation.schemaNameTemplate(tenantId);
|
|
264
139
|
}
|
|
265
140
|
/**
|
|
266
|
-
*
|
|
141
|
+
* Create a PostgreSQL pool for a specific schema
|
|
142
|
+
*
|
|
143
|
+
* The pool is configured with `search_path` set to the schema,
|
|
144
|
+
* allowing queries to run in tenant isolation.
|
|
145
|
+
*
|
|
146
|
+
* @param schemaName - The PostgreSQL schema name
|
|
147
|
+
* @returns A configured Pool instance
|
|
148
|
+
*
|
|
149
|
+
* @example
|
|
150
|
+
* ```typescript
|
|
151
|
+
* const pool = await schemaManager.createPool('tenant_123');
|
|
152
|
+
* try {
|
|
153
|
+
* await pool.query('SELECT * FROM users'); // Queries tenant_123.users
|
|
154
|
+
* } finally {
|
|
155
|
+
* await pool.end();
|
|
156
|
+
* }
|
|
157
|
+
* ```
|
|
267
158
|
*/
|
|
268
|
-
async
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
}
|
|
275
|
-
return statuses;
|
|
159
|
+
async createPool(schemaName) {
|
|
160
|
+
return new Pool({
|
|
161
|
+
connectionString: this.config.connection.url,
|
|
162
|
+
...this.config.connection.poolConfig,
|
|
163
|
+
options: `-c search_path="${schemaName}",public`
|
|
164
|
+
});
|
|
276
165
|
}
|
|
277
166
|
/**
|
|
278
|
-
*
|
|
167
|
+
* Create a PostgreSQL pool without schema-specific search_path
|
|
168
|
+
*
|
|
169
|
+
* Used for operations that need to work across schemas or
|
|
170
|
+
* before a schema exists (like creating the schema itself).
|
|
171
|
+
*
|
|
172
|
+
* @returns A Pool instance connected to the database
|
|
279
173
|
*/
|
|
280
|
-
async
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
const tableExists = await this.migrationsTableExists(pool, schemaName);
|
|
286
|
-
if (!tableExists) {
|
|
287
|
-
return {
|
|
288
|
-
tenantId,
|
|
289
|
-
schemaName,
|
|
290
|
-
appliedCount: 0,
|
|
291
|
-
pendingCount: allMigrations.length,
|
|
292
|
-
pendingMigrations: allMigrations.map((m) => m.name),
|
|
293
|
-
status: allMigrations.length > 0 ? "behind" : "ok",
|
|
294
|
-
format: null
|
|
295
|
-
// New tenant, no table yet
|
|
296
|
-
};
|
|
297
|
-
}
|
|
298
|
-
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
299
|
-
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
300
|
-
const appliedSet = new Set(applied.map((m) => m.identifier));
|
|
301
|
-
const pending = allMigrations.filter(
|
|
302
|
-
(m) => !this.isMigrationApplied(m, appliedSet, format)
|
|
303
|
-
);
|
|
304
|
-
return {
|
|
305
|
-
tenantId,
|
|
306
|
-
schemaName,
|
|
307
|
-
appliedCount: applied.length,
|
|
308
|
-
pendingCount: pending.length,
|
|
309
|
-
pendingMigrations: pending.map((m) => m.name),
|
|
310
|
-
status: pending.length > 0 ? "behind" : "ok",
|
|
311
|
-
format: format.format
|
|
312
|
-
};
|
|
313
|
-
} catch (error) {
|
|
314
|
-
return {
|
|
315
|
-
tenantId,
|
|
316
|
-
schemaName,
|
|
317
|
-
appliedCount: 0,
|
|
318
|
-
pendingCount: 0,
|
|
319
|
-
pendingMigrations: [],
|
|
320
|
-
status: "error",
|
|
321
|
-
error: error.message,
|
|
322
|
-
format: null
|
|
323
|
-
};
|
|
324
|
-
} finally {
|
|
325
|
-
await pool.end();
|
|
326
|
-
}
|
|
174
|
+
async createRootPool() {
|
|
175
|
+
return new Pool({
|
|
176
|
+
connectionString: this.config.connection.url,
|
|
177
|
+
...this.config.connection.poolConfig
|
|
178
|
+
});
|
|
327
179
|
}
|
|
328
180
|
/**
|
|
329
|
-
* Create a new tenant schema
|
|
181
|
+
* Create a new tenant schema in the database
|
|
182
|
+
*
|
|
183
|
+
* @param tenantId - The tenant identifier
|
|
184
|
+
* @returns Promise that resolves when schema is created
|
|
185
|
+
*
|
|
186
|
+
* @example
|
|
187
|
+
* ```typescript
|
|
188
|
+
* await schemaManager.createSchema('new-tenant');
|
|
189
|
+
* ```
|
|
330
190
|
*/
|
|
331
|
-
async
|
|
332
|
-
const
|
|
333
|
-
const
|
|
334
|
-
const pool = new Pool({
|
|
335
|
-
connectionString: this.tenantConfig.connection.url,
|
|
336
|
-
...this.tenantConfig.connection.poolConfig
|
|
337
|
-
});
|
|
191
|
+
async createSchema(tenantId) {
|
|
192
|
+
const schemaName = this.getSchemaName(tenantId);
|
|
193
|
+
const pool = await this.createRootPool();
|
|
338
194
|
try {
|
|
339
195
|
await pool.query(`CREATE SCHEMA IF NOT EXISTS "${schemaName}"`);
|
|
340
|
-
if (migrate) {
|
|
341
|
-
await this.migrateTenant(tenantId);
|
|
342
|
-
}
|
|
343
196
|
} finally {
|
|
344
197
|
await pool.end();
|
|
345
198
|
}
|
|
346
199
|
}
|
|
347
200
|
/**
|
|
348
|
-
* Drop a tenant schema
|
|
201
|
+
* Drop a tenant schema from the database
|
|
202
|
+
*
|
|
203
|
+
* @param tenantId - The tenant identifier
|
|
204
|
+
* @param options - Drop options (cascade, force)
|
|
205
|
+
* @returns Promise that resolves when schema is dropped
|
|
206
|
+
*
|
|
207
|
+
* @example
|
|
208
|
+
* ```typescript
|
|
209
|
+
* // Drop with CASCADE (removes all objects)
|
|
210
|
+
* await schemaManager.dropSchema('old-tenant', { cascade: true });
|
|
211
|
+
*
|
|
212
|
+
* // Drop with RESTRICT (fails if objects exist)
|
|
213
|
+
* await schemaManager.dropSchema('old-tenant', { cascade: false });
|
|
214
|
+
* ```
|
|
349
215
|
*/
|
|
350
|
-
async
|
|
216
|
+
async dropSchema(tenantId, options = {}) {
|
|
351
217
|
const { cascade = true } = options;
|
|
352
|
-
const schemaName = this.
|
|
353
|
-
const pool =
|
|
354
|
-
connectionString: this.tenantConfig.connection.url,
|
|
355
|
-
...this.tenantConfig.connection.poolConfig
|
|
356
|
-
});
|
|
218
|
+
const schemaName = this.getSchemaName(tenantId);
|
|
219
|
+
const pool = await this.createRootPool();
|
|
357
220
|
try {
|
|
358
221
|
const cascadeSql = cascade ? "CASCADE" : "RESTRICT";
|
|
359
222
|
await pool.query(`DROP SCHEMA IF EXISTS "${schemaName}" ${cascadeSql}`);
|
|
@@ -362,14 +225,21 @@ var Migrator = class {
|
|
|
362
225
|
}
|
|
363
226
|
}
|
|
364
227
|
/**
|
|
365
|
-
* Check if a tenant schema exists
|
|
228
|
+
* Check if a tenant schema exists in the database
|
|
229
|
+
*
|
|
230
|
+
* @param tenantId - The tenant identifier
|
|
231
|
+
* @returns True if schema exists, false otherwise
|
|
232
|
+
*
|
|
233
|
+
* @example
|
|
234
|
+
* ```typescript
|
|
235
|
+
* if (await schemaManager.schemaExists('tenant-123')) {
|
|
236
|
+
* console.log('Tenant schema exists');
|
|
237
|
+
* }
|
|
238
|
+
* ```
|
|
366
239
|
*/
|
|
367
|
-
async
|
|
368
|
-
const schemaName = this.
|
|
369
|
-
const pool =
|
|
370
|
-
connectionString: this.tenantConfig.connection.url,
|
|
371
|
-
...this.tenantConfig.connection.poolConfig
|
|
372
|
-
});
|
|
240
|
+
async schemaExists(tenantId) {
|
|
241
|
+
const schemaName = this.getSchemaName(tenantId);
|
|
242
|
+
const pool = await this.createRootPool();
|
|
373
243
|
try {
|
|
374
244
|
const result = await pool.query(
|
|
375
245
|
`SELECT 1 FROM information_schema.schemata WHERE schema_name = $1`,
|
|
@@ -381,225 +251,790 @@ var Migrator = class {
|
|
|
381
251
|
}
|
|
382
252
|
}
|
|
383
253
|
/**
|
|
384
|
-
*
|
|
385
|
-
*
|
|
254
|
+
* List all schemas matching a pattern
|
|
255
|
+
*
|
|
256
|
+
* @param pattern - SQL LIKE pattern to filter schemas (optional)
|
|
257
|
+
* @returns Array of schema names
|
|
258
|
+
*
|
|
259
|
+
* @example
|
|
260
|
+
* ```typescript
|
|
261
|
+
* // List all tenant schemas
|
|
262
|
+
* const schemas = await schemaManager.listSchemas('tenant_%');
|
|
263
|
+
* ```
|
|
386
264
|
*/
|
|
387
|
-
async
|
|
388
|
-
const
|
|
389
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
390
|
-
const markedMigrations = [];
|
|
391
|
-
const pool = await this.createPool(schemaName);
|
|
265
|
+
async listSchemas(pattern) {
|
|
266
|
+
const pool = await this.createRootPool();
|
|
392
267
|
try {
|
|
393
|
-
|
|
394
|
-
const
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
398
|
-
const appliedSet = new Set(applied.map((m) => m.identifier));
|
|
399
|
-
const pending = allMigrations.filter(
|
|
400
|
-
(m) => !this.isMigrationApplied(m, appliedSet, format)
|
|
268
|
+
const query = pattern ? `SELECT schema_name FROM information_schema.schemata WHERE schema_name LIKE $1 ORDER BY schema_name` : `SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema', 'pg_toast') ORDER BY schema_name`;
|
|
269
|
+
const result = await pool.query(
|
|
270
|
+
query,
|
|
271
|
+
pattern ? [pattern] : []
|
|
401
272
|
);
|
|
402
|
-
|
|
403
|
-
const migrationStart = Date.now();
|
|
404
|
-
options.onProgress?.(tenantId, "migrating", migration.name);
|
|
405
|
-
await this.migratorConfig.hooks?.beforeMigration?.(tenantId, migration.name);
|
|
406
|
-
await this.recordMigration(pool, schemaName, migration, format);
|
|
407
|
-
await this.migratorConfig.hooks?.afterMigration?.(
|
|
408
|
-
tenantId,
|
|
409
|
-
migration.name,
|
|
410
|
-
Date.now() - migrationStart
|
|
411
|
-
);
|
|
412
|
-
markedMigrations.push(migration.name);
|
|
413
|
-
}
|
|
414
|
-
const result = {
|
|
415
|
-
tenantId,
|
|
416
|
-
schemaName,
|
|
417
|
-
success: true,
|
|
418
|
-
appliedMigrations: markedMigrations,
|
|
419
|
-
durationMs: Date.now() - startTime,
|
|
420
|
-
format: format.format
|
|
421
|
-
};
|
|
422
|
-
await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
|
|
423
|
-
return result;
|
|
424
|
-
} catch (error) {
|
|
425
|
-
const result = {
|
|
426
|
-
tenantId,
|
|
427
|
-
schemaName,
|
|
428
|
-
success: false,
|
|
429
|
-
appliedMigrations: markedMigrations,
|
|
430
|
-
error: error.message,
|
|
431
|
-
durationMs: Date.now() - startTime
|
|
432
|
-
};
|
|
433
|
-
await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
|
|
434
|
-
return result;
|
|
273
|
+
return result.rows.map((row) => row.schema_name);
|
|
435
274
|
} finally {
|
|
436
275
|
await pool.end();
|
|
437
276
|
}
|
|
438
277
|
}
|
|
439
278
|
/**
|
|
440
|
-
*
|
|
441
|
-
*
|
|
279
|
+
* Ensure the migrations table exists with the correct format
|
|
280
|
+
*
|
|
281
|
+
* Creates the migrations tracking table if it doesn't exist,
|
|
282
|
+
* using the appropriate column types based on the format.
|
|
283
|
+
*
|
|
284
|
+
* @param pool - Database pool to use
|
|
285
|
+
* @param schemaName - The schema to create the table in
|
|
286
|
+
* @param format - The detected/configured table format
|
|
287
|
+
*
|
|
288
|
+
* @example
|
|
289
|
+
* ```typescript
|
|
290
|
+
* const pool = await schemaManager.createPool('tenant_123');
|
|
291
|
+
* await schemaManager.ensureMigrationsTable(pool, 'tenant_123', format);
|
|
292
|
+
* ```
|
|
442
293
|
*/
|
|
443
|
-
async
|
|
444
|
-
const {
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
294
|
+
async ensureMigrationsTable(pool, schemaName, format) {
|
|
295
|
+
const { identifier, timestamp, timestampType } = format.columns;
|
|
296
|
+
const identifierCol = identifier === "name" ? "name VARCHAR(255) NOT NULL UNIQUE" : "hash TEXT NOT NULL";
|
|
297
|
+
const timestampCol = timestampType === "bigint" ? `${timestamp} BIGINT NOT NULL` : `${timestamp} TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP`;
|
|
298
|
+
await pool.query(`
|
|
299
|
+
CREATE TABLE IF NOT EXISTS "${schemaName}"."${format.tableName}" (
|
|
300
|
+
id SERIAL PRIMARY KEY,
|
|
301
|
+
${identifierCol},
|
|
302
|
+
${timestampCol}
|
|
303
|
+
)
|
|
304
|
+
`);
|
|
305
|
+
}
|
|
306
|
+
/**
|
|
307
|
+
* Check if the migrations table exists in a schema
|
|
308
|
+
*
|
|
309
|
+
* @param pool - Database pool to use
|
|
310
|
+
* @param schemaName - The schema to check
|
|
311
|
+
* @returns True if migrations table exists
|
|
312
|
+
*
|
|
313
|
+
* @example
|
|
314
|
+
* ```typescript
|
|
315
|
+
* const pool = await schemaManager.createPool('tenant_123');
|
|
316
|
+
* if (await schemaManager.migrationsTableExists(pool, 'tenant_123')) {
|
|
317
|
+
* console.log('Migrations table exists');
|
|
318
|
+
* }
|
|
319
|
+
* ```
|
|
320
|
+
*/
|
|
321
|
+
async migrationsTableExists(pool, schemaName) {
|
|
322
|
+
const result = await pool.query(
|
|
323
|
+
`SELECT 1 FROM information_schema.tables
|
|
324
|
+
WHERE table_schema = $1 AND table_name = $2`,
|
|
325
|
+
[schemaName, this.migrationsTable]
|
|
326
|
+
);
|
|
327
|
+
return result.rowCount !== null && result.rowCount > 0;
|
|
328
|
+
}
|
|
329
|
+
/**
|
|
330
|
+
* Get the configured migrations table name
|
|
331
|
+
*
|
|
332
|
+
* @returns The migrations table name
|
|
333
|
+
*/
|
|
334
|
+
getMigrationsTableName() {
|
|
335
|
+
return this.migrationsTable;
|
|
336
|
+
}
|
|
337
|
+
};
|
|
338
|
+
function createSchemaManager(config, migrationsTable) {
|
|
339
|
+
return new SchemaManager(config, migrationsTable);
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
// src/migrator/drift/column-analyzer.ts
|
|
343
|
+
async function introspectColumns(pool, schemaName, tableName) {
|
|
344
|
+
const result = await pool.query(
|
|
345
|
+
`SELECT
|
|
346
|
+
column_name,
|
|
347
|
+
data_type,
|
|
348
|
+
udt_name,
|
|
349
|
+
is_nullable,
|
|
350
|
+
column_default,
|
|
351
|
+
character_maximum_length,
|
|
352
|
+
numeric_precision,
|
|
353
|
+
numeric_scale,
|
|
354
|
+
ordinal_position
|
|
355
|
+
FROM information_schema.columns
|
|
356
|
+
WHERE table_schema = $1 AND table_name = $2
|
|
357
|
+
ORDER BY ordinal_position`,
|
|
358
|
+
[schemaName, tableName]
|
|
359
|
+
);
|
|
360
|
+
return result.rows.map((row) => ({
|
|
361
|
+
name: row.column_name,
|
|
362
|
+
dataType: row.data_type,
|
|
363
|
+
udtName: row.udt_name,
|
|
364
|
+
isNullable: row.is_nullable === "YES",
|
|
365
|
+
columnDefault: row.column_default,
|
|
366
|
+
characterMaximumLength: row.character_maximum_length,
|
|
367
|
+
numericPrecision: row.numeric_precision,
|
|
368
|
+
numericScale: row.numeric_scale,
|
|
369
|
+
ordinalPosition: row.ordinal_position
|
|
370
|
+
}));
|
|
371
|
+
}
|
|
372
|
+
function normalizeDefault(value) {
|
|
373
|
+
if (value === null) return null;
|
|
374
|
+
return value.replace(/^'(.+)'::.+$/, "$1").replace(/^(.+)::.+$/, "$1").trim();
|
|
375
|
+
}
|
|
376
|
+
function compareColumns(reference, target) {
|
|
377
|
+
const drifts = [];
|
|
378
|
+
const refColMap = new Map(reference.map((c) => [c.name, c]));
|
|
379
|
+
const targetColMap = new Map(target.map((c) => [c.name, c]));
|
|
380
|
+
for (const refCol of reference) {
|
|
381
|
+
const targetCol = targetColMap.get(refCol.name);
|
|
382
|
+
if (!targetCol) {
|
|
383
|
+
drifts.push({
|
|
384
|
+
column: refCol.name,
|
|
385
|
+
type: "missing",
|
|
386
|
+
expected: refCol.dataType,
|
|
387
|
+
description: `Column "${refCol.name}" (${refCol.dataType}) is missing`
|
|
388
|
+
});
|
|
389
|
+
continue;
|
|
390
|
+
}
|
|
391
|
+
if (refCol.udtName !== targetCol.udtName) {
|
|
392
|
+
drifts.push({
|
|
393
|
+
column: refCol.name,
|
|
394
|
+
type: "type_mismatch",
|
|
395
|
+
expected: refCol.udtName,
|
|
396
|
+
actual: targetCol.udtName,
|
|
397
|
+
description: `Column "${refCol.name}" type mismatch: expected "${refCol.udtName}", got "${targetCol.udtName}"`
|
|
398
|
+
});
|
|
399
|
+
}
|
|
400
|
+
if (refCol.isNullable !== targetCol.isNullable) {
|
|
401
|
+
drifts.push({
|
|
402
|
+
column: refCol.name,
|
|
403
|
+
type: "nullable_mismatch",
|
|
404
|
+
expected: refCol.isNullable,
|
|
405
|
+
actual: targetCol.isNullable,
|
|
406
|
+
description: `Column "${refCol.name}" nullable mismatch: expected ${refCol.isNullable ? "NULL" : "NOT NULL"}, got ${targetCol.isNullable ? "NULL" : "NOT NULL"}`
|
|
407
|
+
});
|
|
408
|
+
}
|
|
409
|
+
const normalizedRefDefault = normalizeDefault(refCol.columnDefault);
|
|
410
|
+
const normalizedTargetDefault = normalizeDefault(targetCol.columnDefault);
|
|
411
|
+
if (normalizedRefDefault !== normalizedTargetDefault) {
|
|
412
|
+
drifts.push({
|
|
413
|
+
column: refCol.name,
|
|
414
|
+
type: "default_mismatch",
|
|
415
|
+
expected: refCol.columnDefault,
|
|
416
|
+
actual: targetCol.columnDefault,
|
|
417
|
+
description: `Column "${refCol.name}" default mismatch: expected "${refCol.columnDefault ?? "none"}", got "${targetCol.columnDefault ?? "none"}"`
|
|
418
|
+
});
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
for (const targetCol of target) {
|
|
422
|
+
if (!refColMap.has(targetCol.name)) {
|
|
423
|
+
drifts.push({
|
|
424
|
+
column: targetCol.name,
|
|
425
|
+
type: "extra",
|
|
426
|
+
actual: targetCol.dataType,
|
|
427
|
+
description: `Extra column "${targetCol.name}" (${targetCol.dataType}) not in reference`
|
|
428
|
+
});
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
return drifts;
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
// src/migrator/drift/index-analyzer.ts
|
|
435
|
+
async function introspectIndexes(pool, schemaName, tableName) {
|
|
436
|
+
const indexResult = await pool.query(
|
|
437
|
+
`SELECT indexname, indexdef
|
|
438
|
+
FROM pg_indexes
|
|
439
|
+
WHERE schemaname = $1 AND tablename = $2
|
|
440
|
+
ORDER BY indexname`,
|
|
441
|
+
[schemaName, tableName]
|
|
442
|
+
);
|
|
443
|
+
const indexDetails = await pool.query(
|
|
444
|
+
`SELECT
|
|
445
|
+
i.relname as indexname,
|
|
446
|
+
a.attname as column_name,
|
|
447
|
+
ix.indisunique as is_unique,
|
|
448
|
+
ix.indisprimary as is_primary
|
|
449
|
+
FROM pg_class t
|
|
450
|
+
JOIN pg_index ix ON t.oid = ix.indrelid
|
|
451
|
+
JOIN pg_class i ON i.oid = ix.indexrelid
|
|
452
|
+
JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = ANY(ix.indkey)
|
|
453
|
+
JOIN pg_namespace n ON n.oid = t.relnamespace
|
|
454
|
+
WHERE n.nspname = $1 AND t.relname = $2
|
|
455
|
+
ORDER BY i.relname, a.attnum`,
|
|
456
|
+
[schemaName, tableName]
|
|
457
|
+
);
|
|
458
|
+
const indexColumnsMap = /* @__PURE__ */ new Map();
|
|
459
|
+
for (const row of indexDetails.rows) {
|
|
460
|
+
const existing = indexColumnsMap.get(row.indexname);
|
|
461
|
+
if (existing) {
|
|
462
|
+
existing.columns.push(row.column_name);
|
|
463
|
+
} else {
|
|
464
|
+
indexColumnsMap.set(row.indexname, {
|
|
465
|
+
columns: [row.column_name],
|
|
466
|
+
isUnique: row.is_unique,
|
|
467
|
+
isPrimary: row.is_primary
|
|
468
|
+
});
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
return indexResult.rows.map((row) => {
|
|
472
|
+
const details = indexColumnsMap.get(row.indexname);
|
|
473
|
+
return {
|
|
474
|
+
name: row.indexname,
|
|
475
|
+
columns: details?.columns ?? [],
|
|
476
|
+
isUnique: details?.isUnique ?? false,
|
|
477
|
+
isPrimary: details?.isPrimary ?? false,
|
|
478
|
+
definition: row.indexdef
|
|
479
|
+
};
|
|
480
|
+
});
|
|
481
|
+
}
|
|
482
|
+
function compareIndexes(reference, target) {
|
|
483
|
+
const drifts = [];
|
|
484
|
+
const refIndexMap = new Map(reference.map((i) => [i.name, i]));
|
|
485
|
+
const targetIndexMap = new Map(target.map((i) => [i.name, i]));
|
|
486
|
+
for (const refIndex of reference) {
|
|
487
|
+
const targetIndex = targetIndexMap.get(refIndex.name);
|
|
488
|
+
if (!targetIndex) {
|
|
489
|
+
drifts.push({
|
|
490
|
+
index: refIndex.name,
|
|
491
|
+
type: "missing",
|
|
492
|
+
expected: refIndex.definition,
|
|
493
|
+
description: `Index "${refIndex.name}" is missing`
|
|
494
|
+
});
|
|
495
|
+
continue;
|
|
496
|
+
}
|
|
497
|
+
const refCols = refIndex.columns.sort().join(",");
|
|
498
|
+
const targetCols = targetIndex.columns.sort().join(",");
|
|
499
|
+
if (refCols !== targetCols || refIndex.isUnique !== targetIndex.isUnique) {
|
|
500
|
+
drifts.push({
|
|
501
|
+
index: refIndex.name,
|
|
502
|
+
type: "definition_mismatch",
|
|
503
|
+
expected: refIndex.definition,
|
|
504
|
+
actual: targetIndex.definition,
|
|
505
|
+
description: `Index "${refIndex.name}" definition differs`
|
|
506
|
+
});
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
for (const targetIndex of target) {
|
|
510
|
+
if (!refIndexMap.has(targetIndex.name)) {
|
|
511
|
+
drifts.push({
|
|
512
|
+
index: targetIndex.name,
|
|
513
|
+
type: "extra",
|
|
514
|
+
actual: targetIndex.definition,
|
|
515
|
+
description: `Extra index "${targetIndex.name}" not in reference`
|
|
516
|
+
});
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
return drifts;
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
// src/migrator/drift/constraint-analyzer.ts
|
|
523
|
+
async function introspectConstraints(pool, schemaName, tableName) {
|
|
524
|
+
const result = await pool.query(
|
|
525
|
+
`SELECT
|
|
526
|
+
tc.constraint_name,
|
|
527
|
+
tc.constraint_type,
|
|
528
|
+
kcu.column_name,
|
|
529
|
+
ccu.table_schema as foreign_table_schema,
|
|
530
|
+
ccu.table_name as foreign_table_name,
|
|
531
|
+
ccu.column_name as foreign_column_name,
|
|
532
|
+
cc.check_clause
|
|
533
|
+
FROM information_schema.table_constraints tc
|
|
534
|
+
LEFT JOIN information_schema.key_column_usage kcu
|
|
535
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
536
|
+
AND tc.table_schema = kcu.table_schema
|
|
537
|
+
LEFT JOIN information_schema.constraint_column_usage ccu
|
|
538
|
+
ON tc.constraint_name = ccu.constraint_name
|
|
539
|
+
AND tc.constraint_type = 'FOREIGN KEY'
|
|
540
|
+
LEFT JOIN information_schema.check_constraints cc
|
|
541
|
+
ON tc.constraint_name = cc.constraint_name
|
|
542
|
+
AND tc.constraint_type = 'CHECK'
|
|
543
|
+
WHERE tc.table_schema = $1 AND tc.table_name = $2
|
|
544
|
+
ORDER BY tc.constraint_name, kcu.ordinal_position`,
|
|
545
|
+
[schemaName, tableName]
|
|
546
|
+
);
|
|
547
|
+
const constraintMap = /* @__PURE__ */ new Map();
|
|
548
|
+
for (const row of result.rows) {
|
|
549
|
+
const existing = constraintMap.get(row.constraint_name);
|
|
550
|
+
if (existing) {
|
|
551
|
+
if (row.column_name && !existing.columns.includes(row.column_name)) {
|
|
552
|
+
existing.columns.push(row.column_name);
|
|
553
|
+
}
|
|
554
|
+
if (row.foreign_column_name && existing.foreignColumns && !existing.foreignColumns.includes(row.foreign_column_name)) {
|
|
555
|
+
existing.foreignColumns.push(row.foreign_column_name);
|
|
556
|
+
}
|
|
557
|
+
} else {
|
|
558
|
+
const constraint = {
|
|
559
|
+
name: row.constraint_name,
|
|
560
|
+
type: row.constraint_type,
|
|
561
|
+
columns: row.column_name ? [row.column_name] : []
|
|
562
|
+
};
|
|
563
|
+
if (row.foreign_table_name) {
|
|
564
|
+
constraint.foreignTable = row.foreign_table_name;
|
|
565
|
+
}
|
|
566
|
+
if (row.foreign_column_name) {
|
|
567
|
+
constraint.foreignColumns = [row.foreign_column_name];
|
|
568
|
+
}
|
|
569
|
+
if (row.check_clause) {
|
|
570
|
+
constraint.checkExpression = row.check_clause;
|
|
571
|
+
}
|
|
572
|
+
constraintMap.set(row.constraint_name, constraint);
|
|
573
|
+
}
|
|
574
|
+
}
|
|
575
|
+
return Array.from(constraintMap.values());
|
|
576
|
+
}
|
|
577
|
+
function compareConstraints(reference, target) {
|
|
578
|
+
const drifts = [];
|
|
579
|
+
const refConstraintMap = new Map(reference.map((c) => [c.name, c]));
|
|
580
|
+
const targetConstraintMap = new Map(target.map((c) => [c.name, c]));
|
|
581
|
+
for (const refConstraint of reference) {
|
|
582
|
+
const targetConstraint = targetConstraintMap.get(refConstraint.name);
|
|
583
|
+
if (!targetConstraint) {
|
|
584
|
+
drifts.push({
|
|
585
|
+
constraint: refConstraint.name,
|
|
586
|
+
type: "missing",
|
|
587
|
+
expected: `${refConstraint.type} on (${refConstraint.columns.join(", ")})`,
|
|
588
|
+
description: `Constraint "${refConstraint.name}" (${refConstraint.type}) is missing`
|
|
589
|
+
});
|
|
590
|
+
continue;
|
|
591
|
+
}
|
|
592
|
+
const refCols = refConstraint.columns.sort().join(",");
|
|
593
|
+
const targetCols = targetConstraint.columns.sort().join(",");
|
|
594
|
+
if (refConstraint.type !== targetConstraint.type || refCols !== targetCols) {
|
|
595
|
+
drifts.push({
|
|
596
|
+
constraint: refConstraint.name,
|
|
597
|
+
type: "definition_mismatch",
|
|
598
|
+
expected: `${refConstraint.type} on (${refConstraint.columns.join(", ")})`,
|
|
599
|
+
actual: `${targetConstraint.type} on (${targetConstraint.columns.join(", ")})`,
|
|
600
|
+
description: `Constraint "${refConstraint.name}" definition differs`
|
|
601
|
+
});
|
|
602
|
+
}
|
|
603
|
+
}
|
|
604
|
+
for (const targetConstraint of target) {
|
|
605
|
+
if (!refConstraintMap.has(targetConstraint.name)) {
|
|
606
|
+
drifts.push({
|
|
607
|
+
constraint: targetConstraint.name,
|
|
608
|
+
type: "extra",
|
|
609
|
+
actual: `${targetConstraint.type} on (${targetConstraint.columns.join(", ")})`,
|
|
610
|
+
description: `Extra constraint "${targetConstraint.name}" (${targetConstraint.type}) not in reference`
|
|
611
|
+
});
|
|
612
|
+
}
|
|
613
|
+
}
|
|
614
|
+
return drifts;
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
// src/migrator/drift/drift-detector.ts
|
|
618
|
+
var DEFAULT_MIGRATIONS_TABLE2 = "__drizzle_migrations";
|
|
619
|
+
var DriftDetector = class {
|
|
620
|
+
constructor(tenantConfig, schemaManager, driftConfig) {
|
|
621
|
+
this.tenantConfig = tenantConfig;
|
|
622
|
+
this.schemaManager = schemaManager;
|
|
623
|
+
this.driftConfig = driftConfig;
|
|
624
|
+
this.migrationsTable = driftConfig.migrationsTable ?? DEFAULT_MIGRATIONS_TABLE2;
|
|
625
|
+
}
|
|
626
|
+
migrationsTable;
|
|
627
|
+
/**
|
|
628
|
+
* Get the schema name for a tenant ID
|
|
629
|
+
*/
|
|
630
|
+
getSchemaName(tenantId) {
|
|
631
|
+
return this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
632
|
+
}
|
|
633
|
+
/**
|
|
634
|
+
* Create a pool for a schema
|
|
635
|
+
*/
|
|
636
|
+
async createPool(schemaName) {
|
|
637
|
+
return this.schemaManager.createPool(schemaName);
|
|
638
|
+
}
|
|
639
|
+
/**
|
|
640
|
+
* Detect schema drift across all tenants.
|
|
641
|
+
*
|
|
642
|
+
* Compares each tenant's schema against a reference tenant (first tenant by default).
|
|
643
|
+
* Returns a comprehensive report of all differences found.
|
|
644
|
+
*
|
|
645
|
+
* @param options - Detection options
|
|
646
|
+
* @returns Schema drift status with details for each tenant
|
|
647
|
+
*
|
|
648
|
+
* @example
|
|
649
|
+
* ```typescript
|
|
650
|
+
* // Basic usage - compare all tenants against the first one
|
|
651
|
+
* const status = await detector.detectDrift();
|
|
652
|
+
*
|
|
653
|
+
* // Use a specific tenant as reference
|
|
654
|
+
* const status = await detector.detectDrift({
|
|
655
|
+
* referenceTenant: 'golden-tenant',
|
|
656
|
+
* });
|
|
657
|
+
*
|
|
658
|
+
* // Check specific tenants only
|
|
659
|
+
* const status = await detector.detectDrift({
|
|
660
|
+
* tenantIds: ['tenant-1', 'tenant-2'],
|
|
661
|
+
* });
|
|
662
|
+
*
|
|
663
|
+
* // Skip index and constraint comparison for faster checks
|
|
664
|
+
* const status = await detector.detectDrift({
|
|
665
|
+
* includeIndexes: false,
|
|
666
|
+
* includeConstraints: false,
|
|
667
|
+
* });
|
|
668
|
+
* ```
|
|
669
|
+
*/
|
|
670
|
+
async detectDrift(options = {}) {
|
|
671
|
+
const startTime = Date.now();
|
|
672
|
+
const {
|
|
673
|
+
concurrency = 10,
|
|
674
|
+
includeIndexes = true,
|
|
675
|
+
includeConstraints = true,
|
|
676
|
+
excludeTables = [this.migrationsTable],
|
|
677
|
+
onProgress
|
|
678
|
+
} = options;
|
|
679
|
+
const tenantIds = options.tenantIds ?? await this.driftConfig.tenantDiscovery();
|
|
680
|
+
if (tenantIds.length === 0) {
|
|
681
|
+
return {
|
|
682
|
+
referenceTenant: "",
|
|
683
|
+
total: 0,
|
|
684
|
+
noDrift: 0,
|
|
685
|
+
withDrift: 0,
|
|
686
|
+
error: 0,
|
|
687
|
+
details: [],
|
|
688
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
689
|
+
durationMs: Date.now() - startTime
|
|
690
|
+
};
|
|
691
|
+
}
|
|
692
|
+
const referenceTenant = options.referenceTenant ?? tenantIds[0];
|
|
693
|
+
onProgress?.(referenceTenant, "starting");
|
|
694
|
+
onProgress?.(referenceTenant, "introspecting");
|
|
695
|
+
const referenceSchema = await this.introspectSchema(referenceTenant, {
|
|
696
|
+
includeIndexes,
|
|
697
|
+
includeConstraints,
|
|
698
|
+
excludeTables
|
|
699
|
+
});
|
|
700
|
+
if (!referenceSchema) {
|
|
701
|
+
return {
|
|
702
|
+
referenceTenant,
|
|
703
|
+
total: tenantIds.length,
|
|
704
|
+
noDrift: 0,
|
|
705
|
+
withDrift: 0,
|
|
706
|
+
error: tenantIds.length,
|
|
707
|
+
details: tenantIds.map((id) => ({
|
|
708
|
+
tenantId: id,
|
|
709
|
+
schemaName: this.getSchemaName(id),
|
|
710
|
+
hasDrift: false,
|
|
711
|
+
tables: [],
|
|
712
|
+
issueCount: 0,
|
|
713
|
+
error: id === referenceTenant ? "Failed to introspect reference tenant" : "Reference tenant introspection failed"
|
|
714
|
+
})),
|
|
715
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
716
|
+
durationMs: Date.now() - startTime
|
|
717
|
+
};
|
|
718
|
+
}
|
|
719
|
+
onProgress?.(referenceTenant, "completed");
|
|
720
|
+
const tenantsToCheck = tenantIds.filter((id) => id !== referenceTenant);
|
|
721
|
+
const results = [];
|
|
722
|
+
results.push({
|
|
723
|
+
tenantId: referenceTenant,
|
|
724
|
+
schemaName: referenceSchema.schemaName,
|
|
725
|
+
hasDrift: false,
|
|
726
|
+
tables: [],
|
|
727
|
+
issueCount: 0
|
|
728
|
+
});
|
|
729
|
+
for (let i = 0; i < tenantsToCheck.length; i += concurrency) {
|
|
730
|
+
const batch = tenantsToCheck.slice(i, i + concurrency);
|
|
731
|
+
const batchResults = await Promise.all(
|
|
732
|
+
batch.map(async (tenantId) => {
|
|
733
|
+
try {
|
|
734
|
+
onProgress?.(tenantId, "starting");
|
|
735
|
+
onProgress?.(tenantId, "introspecting");
|
|
736
|
+
const tenantSchema = await this.introspectSchema(tenantId, {
|
|
737
|
+
includeIndexes,
|
|
738
|
+
includeConstraints,
|
|
739
|
+
excludeTables
|
|
740
|
+
});
|
|
741
|
+
if (!tenantSchema) {
|
|
742
|
+
onProgress?.(tenantId, "failed");
|
|
743
|
+
return {
|
|
744
|
+
tenantId,
|
|
745
|
+
schemaName: this.getSchemaName(tenantId),
|
|
746
|
+
hasDrift: false,
|
|
747
|
+
tables: [],
|
|
748
|
+
issueCount: 0,
|
|
749
|
+
error: "Failed to introspect schema"
|
|
750
|
+
};
|
|
751
|
+
}
|
|
752
|
+
onProgress?.(tenantId, "comparing");
|
|
753
|
+
const drift = this.compareSchemas(referenceSchema, tenantSchema, {
|
|
754
|
+
includeIndexes,
|
|
755
|
+
includeConstraints
|
|
756
|
+
});
|
|
757
|
+
onProgress?.(tenantId, "completed");
|
|
758
|
+
return drift;
|
|
464
759
|
} catch (error) {
|
|
465
760
|
onProgress?.(tenantId, "failed");
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
761
|
+
return {
|
|
762
|
+
tenantId,
|
|
763
|
+
schemaName: this.getSchemaName(tenantId),
|
|
764
|
+
hasDrift: false,
|
|
765
|
+
tables: [],
|
|
766
|
+
issueCount: 0,
|
|
767
|
+
error: error.message
|
|
768
|
+
};
|
|
471
769
|
}
|
|
472
770
|
})
|
|
473
771
|
);
|
|
474
772
|
results.push(...batchResults);
|
|
475
773
|
}
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
774
|
+
return {
|
|
775
|
+
referenceTenant,
|
|
776
|
+
total: results.length,
|
|
777
|
+
noDrift: results.filter((r) => !r.hasDrift && !r.error).length,
|
|
778
|
+
withDrift: results.filter((r) => r.hasDrift && !r.error).length,
|
|
779
|
+
error: results.filter((r) => !!r.error).length,
|
|
780
|
+
details: results,
|
|
781
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
782
|
+
durationMs: Date.now() - startTime
|
|
783
|
+
};
|
|
483
784
|
}
|
|
484
785
|
/**
|
|
485
|
-
*
|
|
486
|
-
*
|
|
786
|
+
* Compare a specific tenant against a reference tenant.
|
|
787
|
+
*
|
|
788
|
+
* @param tenantId - Tenant to check
|
|
789
|
+
* @param referenceTenantId - Tenant to use as reference
|
|
790
|
+
* @param options - Introspection options
|
|
791
|
+
* @returns Drift details for the tenant
|
|
792
|
+
*
|
|
793
|
+
* @example
|
|
794
|
+
* ```typescript
|
|
795
|
+
* const drift = await detector.compareTenant('tenant-123', 'golden-tenant');
|
|
796
|
+
* if (drift.hasDrift) {
|
|
797
|
+
* console.log(`Found ${drift.issueCount} issues`);
|
|
798
|
+
* }
|
|
799
|
+
* ```
|
|
487
800
|
*/
|
|
488
|
-
async
|
|
489
|
-
const
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
801
|
+
async compareTenant(tenantId, referenceTenantId, options = {}) {
|
|
802
|
+
const {
|
|
803
|
+
includeIndexes = true,
|
|
804
|
+
includeConstraints = true,
|
|
805
|
+
excludeTables = [this.migrationsTable]
|
|
806
|
+
} = options;
|
|
807
|
+
const referenceSchema = await this.introspectSchema(referenceTenantId, {
|
|
808
|
+
includeIndexes,
|
|
809
|
+
includeConstraints,
|
|
810
|
+
excludeTables
|
|
811
|
+
});
|
|
812
|
+
if (!referenceSchema) {
|
|
813
|
+
return {
|
|
814
|
+
tenantId,
|
|
815
|
+
schemaName: this.getSchemaName(tenantId),
|
|
816
|
+
hasDrift: false,
|
|
817
|
+
tables: [],
|
|
818
|
+
issueCount: 0,
|
|
819
|
+
error: "Failed to introspect reference tenant"
|
|
820
|
+
};
|
|
494
821
|
}
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
822
|
+
const tenantSchema = await this.introspectSchema(tenantId, {
|
|
823
|
+
includeIndexes,
|
|
824
|
+
includeConstraints,
|
|
825
|
+
excludeTables
|
|
826
|
+
});
|
|
827
|
+
if (!tenantSchema) {
|
|
828
|
+
return {
|
|
829
|
+
tenantId,
|
|
830
|
+
schemaName: this.getSchemaName(tenantId),
|
|
831
|
+
hasDrift: false,
|
|
832
|
+
tables: [],
|
|
833
|
+
issueCount: 0,
|
|
834
|
+
error: "Failed to introspect tenant schema"
|
|
835
|
+
};
|
|
836
|
+
}
|
|
837
|
+
return this.compareSchemas(referenceSchema, tenantSchema, {
|
|
838
|
+
includeIndexes,
|
|
839
|
+
includeConstraints
|
|
840
|
+
});
|
|
502
841
|
}
|
|
503
842
|
/**
|
|
504
|
-
*
|
|
843
|
+
* Introspect a tenant's schema structure.
|
|
844
|
+
*
|
|
845
|
+
* Retrieves all tables, columns, indexes, and constraints
|
|
846
|
+
* for a tenant's schema.
|
|
847
|
+
*
|
|
848
|
+
* @param tenantId - Tenant to introspect
|
|
849
|
+
* @param options - Introspection options
|
|
850
|
+
* @returns Schema structure or null if introspection fails
|
|
851
|
+
*
|
|
852
|
+
* @example
|
|
853
|
+
* ```typescript
|
|
854
|
+
* const schema = await detector.introspectSchema('tenant-123');
|
|
855
|
+
* if (schema) {
|
|
856
|
+
* console.log(`Found ${schema.tables.length} tables`);
|
|
857
|
+
* for (const table of schema.tables) {
|
|
858
|
+
* console.log(` ${table.name}: ${table.columns.length} columns`);
|
|
859
|
+
* }
|
|
860
|
+
* }
|
|
861
|
+
* ```
|
|
505
862
|
*/
|
|
506
|
-
async
|
|
507
|
-
const schemaName = this.
|
|
863
|
+
async introspectSchema(tenantId, options = {}) {
|
|
864
|
+
const schemaName = this.getSchemaName(tenantId);
|
|
508
865
|
const pool = await this.createPool(schemaName);
|
|
509
866
|
try {
|
|
510
|
-
const
|
|
511
|
-
const migrationNames = new Set(allMigrations.map((m) => m.name));
|
|
512
|
-
const migrationHashes = new Set(allMigrations.map((m) => m.hash));
|
|
513
|
-
const tableExists = await this.migrationsTableExists(pool, schemaName);
|
|
514
|
-
if (!tableExists) {
|
|
515
|
-
return {
|
|
516
|
-
tenantId,
|
|
517
|
-
schemaName,
|
|
518
|
-
missing: allMigrations.map((m) => m.name),
|
|
519
|
-
orphans: [],
|
|
520
|
-
inSync: allMigrations.length === 0,
|
|
521
|
-
format: null
|
|
522
|
-
};
|
|
523
|
-
}
|
|
524
|
-
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
525
|
-
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
526
|
-
const appliedIdentifiers = new Set(applied.map((m) => m.identifier));
|
|
527
|
-
const missing = allMigrations.filter((m) => !this.isMigrationApplied(m, appliedIdentifiers, format)).map((m) => m.name);
|
|
528
|
-
const orphans = applied.filter((m) => {
|
|
529
|
-
if (format.columns.identifier === "name") {
|
|
530
|
-
return !migrationNames.has(m.identifier);
|
|
531
|
-
}
|
|
532
|
-
return !migrationHashes.has(m.identifier) && !migrationNames.has(m.identifier);
|
|
533
|
-
}).map((m) => m.identifier);
|
|
534
|
-
return {
|
|
535
|
-
tenantId,
|
|
536
|
-
schemaName,
|
|
537
|
-
missing,
|
|
538
|
-
orphans,
|
|
539
|
-
inSync: missing.length === 0 && orphans.length === 0,
|
|
540
|
-
format: format.format
|
|
541
|
-
};
|
|
542
|
-
} catch (error) {
|
|
867
|
+
const tables = await this.introspectTables(pool, schemaName, options);
|
|
543
868
|
return {
|
|
544
869
|
tenantId,
|
|
545
870
|
schemaName,
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
inSync: false,
|
|
549
|
-
format: null,
|
|
550
|
-
error: error.message
|
|
871
|
+
tables,
|
|
872
|
+
introspectedAt: /* @__PURE__ */ new Date()
|
|
551
873
|
};
|
|
874
|
+
} catch {
|
|
875
|
+
return null;
|
|
552
876
|
} finally {
|
|
553
877
|
await pool.end();
|
|
554
878
|
}
|
|
555
879
|
}
|
|
556
880
|
/**
|
|
557
|
-
*
|
|
881
|
+
* Compare two schema snapshots.
|
|
882
|
+
*
|
|
883
|
+
* This method compares pre-introspected schema snapshots,
|
|
884
|
+
* useful when you already have the schema data available.
|
|
885
|
+
*
|
|
886
|
+
* @param reference - Reference (expected) schema
|
|
887
|
+
* @param target - Target (actual) schema
|
|
888
|
+
* @param options - Comparison options
|
|
889
|
+
* @returns Drift details
|
|
890
|
+
*
|
|
891
|
+
* @example
|
|
892
|
+
* ```typescript
|
|
893
|
+
* const refSchema = await detector.introspectSchema('golden-tenant');
|
|
894
|
+
* const targetSchema = await detector.introspectSchema('tenant-123');
|
|
895
|
+
*
|
|
896
|
+
* if (refSchema && targetSchema) {
|
|
897
|
+
* const drift = detector.compareSchemas(refSchema, targetSchema);
|
|
898
|
+
* console.log(`Drift detected: ${drift.hasDrift}`);
|
|
899
|
+
* }
|
|
900
|
+
* ```
|
|
558
901
|
*/
|
|
559
|
-
|
|
560
|
-
const
|
|
561
|
-
const
|
|
562
|
-
|
|
563
|
-
const
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
902
|
+
compareSchemas(reference, target, options = {}) {
|
|
903
|
+
const { includeIndexes = true, includeConstraints = true } = options;
|
|
904
|
+
const tableDrifts = [];
|
|
905
|
+
let totalIssues = 0;
|
|
906
|
+
const refTableMap = new Map(reference.tables.map((t) => [t.name, t]));
|
|
907
|
+
const targetTableMap = new Map(target.tables.map((t) => [t.name, t]));
|
|
908
|
+
for (const refTable of reference.tables) {
|
|
909
|
+
const targetTable = targetTableMap.get(refTable.name);
|
|
910
|
+
if (!targetTable) {
|
|
911
|
+
tableDrifts.push({
|
|
912
|
+
table: refTable.name,
|
|
913
|
+
status: "missing",
|
|
914
|
+
columns: refTable.columns.map((c) => ({
|
|
915
|
+
column: c.name,
|
|
916
|
+
type: "missing",
|
|
917
|
+
expected: c.dataType,
|
|
918
|
+
description: `Column "${c.name}" (${c.dataType}) is missing`
|
|
919
|
+
})),
|
|
920
|
+
indexes: [],
|
|
921
|
+
constraints: []
|
|
922
|
+
});
|
|
923
|
+
totalIssues += refTable.columns.length;
|
|
924
|
+
continue;
|
|
576
925
|
}
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
926
|
+
const columnDrifts = compareColumns(refTable.columns, targetTable.columns);
|
|
927
|
+
const indexDrifts = includeIndexes ? compareIndexes(refTable.indexes, targetTable.indexes) : [];
|
|
928
|
+
const constraintDrifts = includeConstraints ? compareConstraints(refTable.constraints, targetTable.constraints) : [];
|
|
929
|
+
const issues = columnDrifts.length + indexDrifts.length + constraintDrifts.length;
|
|
930
|
+
totalIssues += issues;
|
|
931
|
+
if (issues > 0) {
|
|
932
|
+
tableDrifts.push({
|
|
933
|
+
table: refTable.name,
|
|
934
|
+
status: "drifted",
|
|
935
|
+
columns: columnDrifts,
|
|
936
|
+
indexes: indexDrifts,
|
|
937
|
+
constraints: constraintDrifts
|
|
938
|
+
});
|
|
586
939
|
}
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
940
|
+
}
|
|
941
|
+
for (const targetTable of target.tables) {
|
|
942
|
+
if (!refTableMap.has(targetTable.name)) {
|
|
943
|
+
tableDrifts.push({
|
|
944
|
+
table: targetTable.name,
|
|
945
|
+
status: "extra",
|
|
946
|
+
columns: targetTable.columns.map((c) => ({
|
|
947
|
+
column: c.name,
|
|
948
|
+
type: "extra",
|
|
949
|
+
actual: c.dataType,
|
|
950
|
+
description: `Extra column "${c.name}" (${c.dataType}) not in reference`
|
|
951
|
+
})),
|
|
952
|
+
indexes: [],
|
|
953
|
+
constraints: []
|
|
954
|
+
});
|
|
955
|
+
totalIssues += targetTable.columns.length;
|
|
956
|
+
}
|
|
957
|
+
}
|
|
958
|
+
return {
|
|
959
|
+
tenantId: target.tenantId,
|
|
960
|
+
schemaName: target.schemaName,
|
|
961
|
+
hasDrift: totalIssues > 0,
|
|
962
|
+
tables: tableDrifts,
|
|
963
|
+
issueCount: totalIssues
|
|
964
|
+
};
|
|
965
|
+
}
|
|
966
|
+
/**
|
|
967
|
+
* Introspect all tables in a schema
|
|
968
|
+
*/
|
|
969
|
+
async introspectTables(pool, schemaName, options) {
|
|
970
|
+
const { includeIndexes = true, includeConstraints = true, excludeTables = [] } = options;
|
|
971
|
+
const tablesResult = await pool.query(
|
|
972
|
+
`SELECT table_name
|
|
973
|
+
FROM information_schema.tables
|
|
974
|
+
WHERE table_schema = $1
|
|
975
|
+
AND table_type = 'BASE TABLE'
|
|
976
|
+
ORDER BY table_name`,
|
|
977
|
+
[schemaName]
|
|
978
|
+
);
|
|
979
|
+
const tables = [];
|
|
980
|
+
for (const row of tablesResult.rows) {
|
|
981
|
+
if (excludeTables.includes(row.table_name)) {
|
|
982
|
+
continue;
|
|
596
983
|
}
|
|
984
|
+
const columns = await introspectColumns(pool, schemaName, row.table_name);
|
|
985
|
+
const indexes = includeIndexes ? await introspectIndexes(pool, schemaName, row.table_name) : [];
|
|
986
|
+
const constraints = includeConstraints ? await introspectConstraints(pool, schemaName, row.table_name) : [];
|
|
987
|
+
tables.push({
|
|
988
|
+
name: row.table_name,
|
|
989
|
+
columns,
|
|
990
|
+
indexes,
|
|
991
|
+
constraints
|
|
992
|
+
});
|
|
993
|
+
}
|
|
994
|
+
return tables;
|
|
995
|
+
}
|
|
996
|
+
};
|
|
997
|
+
var Seeder = class {
|
|
998
|
+
constructor(config, deps) {
|
|
999
|
+
this.config = config;
|
|
1000
|
+
this.deps = deps;
|
|
1001
|
+
}
|
|
1002
|
+
/**
|
|
1003
|
+
* Seed a single tenant with initial data
|
|
1004
|
+
*
|
|
1005
|
+
* Creates a database connection for the tenant, executes the seed function,
|
|
1006
|
+
* and properly cleans up the connection afterward.
|
|
1007
|
+
*
|
|
1008
|
+
* @param tenantId - The tenant identifier
|
|
1009
|
+
* @param seedFn - Function that seeds the database
|
|
1010
|
+
* @returns Result of the seeding operation
|
|
1011
|
+
*
|
|
1012
|
+
* @example
|
|
1013
|
+
* ```typescript
|
|
1014
|
+
* const result = await seeder.seedTenant('tenant-123', async (db, tenantId) => {
|
|
1015
|
+
* await db.insert(users).values([
|
|
1016
|
+
* { name: 'Admin', email: `admin@${tenantId}.com` },
|
|
1017
|
+
* ]);
|
|
1018
|
+
* });
|
|
1019
|
+
*
|
|
1020
|
+
* if (result.success) {
|
|
1021
|
+
* console.log(`Seeded ${result.tenantId} in ${result.durationMs}ms`);
|
|
1022
|
+
* }
|
|
1023
|
+
* ```
|
|
1024
|
+
*/
|
|
1025
|
+
async seedTenant(tenantId, seedFn) {
|
|
1026
|
+
const startTime = Date.now();
|
|
1027
|
+
const schemaName = this.deps.schemaNameTemplate(tenantId);
|
|
1028
|
+
const pool = await this.deps.createPool(schemaName);
|
|
1029
|
+
try {
|
|
1030
|
+
const db = drizzle(pool, {
|
|
1031
|
+
schema: this.deps.tenantSchema
|
|
1032
|
+
});
|
|
1033
|
+
await seedFn(db, tenantId);
|
|
597
1034
|
return {
|
|
598
1035
|
tenantId,
|
|
599
1036
|
schemaName,
|
|
600
1037
|
success: true,
|
|
601
|
-
markedMigrations,
|
|
602
|
-
removedOrphans: [],
|
|
603
1038
|
durationMs: Date.now() - startTime
|
|
604
1039
|
};
|
|
605
1040
|
} catch (error) {
|
|
@@ -607,8 +1042,6 @@ var Migrator = class {
|
|
|
607
1042
|
tenantId,
|
|
608
1043
|
schemaName,
|
|
609
1044
|
success: false,
|
|
610
|
-
markedMigrations,
|
|
611
|
-
removedOrphans: [],
|
|
612
1045
|
error: error.message,
|
|
613
1046
|
durationMs: Date.now() - startTime
|
|
614
1047
|
};
|
|
@@ -617,11 +1050,365 @@ var Migrator = class {
|
|
|
617
1050
|
}
|
|
618
1051
|
}
|
|
619
1052
|
/**
|
|
620
|
-
*
|
|
1053
|
+
* Seed all tenants with initial data in parallel
|
|
1054
|
+
*
|
|
1055
|
+
* Discovers all tenants and seeds them in batches with configurable concurrency.
|
|
1056
|
+
* Supports progress callbacks and abort-on-error behavior.
|
|
1057
|
+
*
|
|
1058
|
+
* @param seedFn - Function that seeds each database
|
|
1059
|
+
* @param options - Seeding options
|
|
1060
|
+
* @returns Aggregate results of all seeding operations
|
|
1061
|
+
*
|
|
1062
|
+
* @example
|
|
1063
|
+
* ```typescript
|
|
1064
|
+
* const results = await seeder.seedAll(
|
|
1065
|
+
* async (db, tenantId) => {
|
|
1066
|
+
* await db.insert(settings).values({ key: 'initialized', value: 'true' });
|
|
1067
|
+
* },
|
|
1068
|
+
* {
|
|
1069
|
+
* concurrency: 5,
|
|
1070
|
+
* onProgress: (id, status) => console.log(`${id}: ${status}`),
|
|
1071
|
+
* }
|
|
1072
|
+
* );
|
|
1073
|
+
*
|
|
1074
|
+
* console.log(`Succeeded: ${results.succeeded}/${results.total}`);
|
|
1075
|
+
* ```
|
|
621
1076
|
*/
|
|
622
|
-
async
|
|
1077
|
+
async seedAll(seedFn, options = {}) {
|
|
623
1078
|
const { concurrency = 10, onProgress, onError } = options;
|
|
624
|
-
const tenantIds = await this.
|
|
1079
|
+
const tenantIds = await this.config.tenantDiscovery();
|
|
1080
|
+
const results = [];
|
|
1081
|
+
let aborted = false;
|
|
1082
|
+
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
1083
|
+
const batch = tenantIds.slice(i, i + concurrency);
|
|
1084
|
+
const batchResults = await Promise.all(
|
|
1085
|
+
batch.map(async (tenantId) => {
|
|
1086
|
+
if (aborted) {
|
|
1087
|
+
return this.createSkippedResult(tenantId);
|
|
1088
|
+
}
|
|
1089
|
+
try {
|
|
1090
|
+
onProgress?.(tenantId, "starting");
|
|
1091
|
+
onProgress?.(tenantId, "seeding");
|
|
1092
|
+
const result = await this.seedTenant(tenantId, seedFn);
|
|
1093
|
+
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
1094
|
+
return result;
|
|
1095
|
+
} catch (error) {
|
|
1096
|
+
onProgress?.(tenantId, "failed");
|
|
1097
|
+
const action = onError?.(tenantId, error);
|
|
1098
|
+
if (action === "abort") {
|
|
1099
|
+
aborted = true;
|
|
1100
|
+
}
|
|
1101
|
+
return this.createErrorResult(tenantId, error);
|
|
1102
|
+
}
|
|
1103
|
+
})
|
|
1104
|
+
);
|
|
1105
|
+
results.push(...batchResults);
|
|
1106
|
+
}
|
|
1107
|
+
if (aborted) {
|
|
1108
|
+
const remaining = tenantIds.slice(results.length);
|
|
1109
|
+
for (const tenantId of remaining) {
|
|
1110
|
+
results.push(this.createSkippedResult(tenantId));
|
|
1111
|
+
}
|
|
1112
|
+
}
|
|
1113
|
+
return this.aggregateResults(results);
|
|
1114
|
+
}
|
|
1115
|
+
/**
|
|
1116
|
+
* Seed specific tenants with initial data
|
|
1117
|
+
*
|
|
1118
|
+
* Seeds only the specified tenants in batches with configurable concurrency.
|
|
1119
|
+
*
|
|
1120
|
+
* @param tenantIds - List of tenant IDs to seed
|
|
1121
|
+
* @param seedFn - Function that seeds each database
|
|
1122
|
+
* @param options - Seeding options
|
|
1123
|
+
* @returns Aggregate results of seeding operations
|
|
1124
|
+
*
|
|
1125
|
+
* @example
|
|
1126
|
+
* ```typescript
|
|
1127
|
+
* const results = await seeder.seedTenants(
|
|
1128
|
+
* ['tenant-1', 'tenant-2', 'tenant-3'],
|
|
1129
|
+
* async (db) => {
|
|
1130
|
+
* await db.insert(config).values({ setup: true });
|
|
1131
|
+
* },
|
|
1132
|
+
* { concurrency: 2 }
|
|
1133
|
+
* );
|
|
1134
|
+
* ```
|
|
1135
|
+
*/
|
|
1136
|
+
async seedTenants(tenantIds, seedFn, options = {}) {
|
|
1137
|
+
const { concurrency = 10, onProgress, onError } = options;
|
|
1138
|
+
const results = [];
|
|
1139
|
+
for (let i = 0; i < tenantIds.length; i += concurrency) {
|
|
1140
|
+
const batch = tenantIds.slice(i, i + concurrency);
|
|
1141
|
+
const batchResults = await Promise.all(
|
|
1142
|
+
batch.map(async (tenantId) => {
|
|
1143
|
+
try {
|
|
1144
|
+
onProgress?.(tenantId, "starting");
|
|
1145
|
+
onProgress?.(tenantId, "seeding");
|
|
1146
|
+
const result = await this.seedTenant(tenantId, seedFn);
|
|
1147
|
+
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
1148
|
+
return result;
|
|
1149
|
+
} catch (error) {
|
|
1150
|
+
onProgress?.(tenantId, "failed");
|
|
1151
|
+
onError?.(tenantId, error);
|
|
1152
|
+
return this.createErrorResult(tenantId, error);
|
|
1153
|
+
}
|
|
1154
|
+
})
|
|
1155
|
+
);
|
|
1156
|
+
results.push(...batchResults);
|
|
1157
|
+
}
|
|
1158
|
+
return this.aggregateResults(results);
|
|
1159
|
+
}
|
|
1160
|
+
/**
|
|
1161
|
+
* Create a skipped result for aborted seeding
|
|
1162
|
+
*/
|
|
1163
|
+
createSkippedResult(tenantId) {
|
|
1164
|
+
return {
|
|
1165
|
+
tenantId,
|
|
1166
|
+
schemaName: this.deps.schemaNameTemplate(tenantId),
|
|
1167
|
+
success: false,
|
|
1168
|
+
error: "Skipped due to abort",
|
|
1169
|
+
durationMs: 0
|
|
1170
|
+
};
|
|
1171
|
+
}
|
|
1172
|
+
/**
|
|
1173
|
+
* Create an error result for failed seeding
|
|
1174
|
+
*/
|
|
1175
|
+
createErrorResult(tenantId, error) {
|
|
1176
|
+
return {
|
|
1177
|
+
tenantId,
|
|
1178
|
+
schemaName: this.deps.schemaNameTemplate(tenantId),
|
|
1179
|
+
success: false,
|
|
1180
|
+
error: error.message,
|
|
1181
|
+
durationMs: 0
|
|
1182
|
+
};
|
|
1183
|
+
}
|
|
1184
|
+
/**
|
|
1185
|
+
* Aggregate individual results into a summary
|
|
1186
|
+
*/
|
|
1187
|
+
aggregateResults(results) {
|
|
1188
|
+
return {
|
|
1189
|
+
total: results.length,
|
|
1190
|
+
succeeded: results.filter((r) => r.success).length,
|
|
1191
|
+
failed: results.filter(
|
|
1192
|
+
(r) => !r.success && r.error !== "Skipped due to abort"
|
|
1193
|
+
).length,
|
|
1194
|
+
skipped: results.filter((r) => r.error === "Skipped due to abort").length,
|
|
1195
|
+
details: results
|
|
1196
|
+
};
|
|
1197
|
+
}
|
|
1198
|
+
};
|
|
1199
|
+
function createSeeder(config, dependencies) {
|
|
1200
|
+
return new Seeder(config, dependencies);
|
|
1201
|
+
}
|
|
1202
|
+
|
|
1203
|
+
// src/migrator/sync/sync-manager.ts
|
|
1204
|
+
var SyncManager = class {
|
|
1205
|
+
constructor(config, deps) {
|
|
1206
|
+
this.config = config;
|
|
1207
|
+
this.deps = deps;
|
|
1208
|
+
}
|
|
1209
|
+
/**
|
|
1210
|
+
* Get sync status for all tenants
|
|
1211
|
+
*
|
|
1212
|
+
* Detects divergences between migrations on disk and tracking in database.
|
|
1213
|
+
* A tenant is "in sync" when all disk migrations are tracked and no orphan records exist.
|
|
1214
|
+
*
|
|
1215
|
+
* @returns Aggregate sync status for all tenants
|
|
1216
|
+
*
|
|
1217
|
+
* @example
|
|
1218
|
+
* ```typescript
|
|
1219
|
+
* const status = await syncManager.getSyncStatus();
|
|
1220
|
+
* console.log(`Total: ${status.total}, In sync: ${status.inSync}, Out of sync: ${status.outOfSync}`);
|
|
1221
|
+
*
|
|
1222
|
+
* for (const tenant of status.details.filter(d => !d.inSync)) {
|
|
1223
|
+
* console.log(`${tenant.tenantId}: missing=${tenant.missing.length}, orphans=${tenant.orphans.length}`);
|
|
1224
|
+
* }
|
|
1225
|
+
* ```
|
|
1226
|
+
*/
|
|
1227
|
+
async getSyncStatus() {
|
|
1228
|
+
const tenantIds = await this.config.tenantDiscovery();
|
|
1229
|
+
const migrations = await this.deps.loadMigrations();
|
|
1230
|
+
const statuses = [];
|
|
1231
|
+
for (const tenantId of tenantIds) {
|
|
1232
|
+
statuses.push(await this.getTenantSyncStatus(tenantId, migrations));
|
|
1233
|
+
}
|
|
1234
|
+
return {
|
|
1235
|
+
total: statuses.length,
|
|
1236
|
+
inSync: statuses.filter((s) => s.inSync && !s.error).length,
|
|
1237
|
+
outOfSync: statuses.filter((s) => !s.inSync && !s.error).length,
|
|
1238
|
+
error: statuses.filter((s) => !!s.error).length,
|
|
1239
|
+
details: statuses
|
|
1240
|
+
};
|
|
1241
|
+
}
|
|
1242
|
+
/**
|
|
1243
|
+
* Get sync status for a specific tenant
|
|
1244
|
+
*
|
|
1245
|
+
* Compares migrations on disk with records in the database.
|
|
1246
|
+
* Identifies missing migrations (on disk but not tracked) and
|
|
1247
|
+
* orphan records (tracked but not on disk).
|
|
1248
|
+
*
|
|
1249
|
+
* @param tenantId - The tenant identifier
|
|
1250
|
+
* @param migrations - Optional pre-loaded migrations (avoids reloading from disk)
|
|
1251
|
+
* @returns Sync status for the tenant
|
|
1252
|
+
*
|
|
1253
|
+
* @example
|
|
1254
|
+
* ```typescript
|
|
1255
|
+
* const status = await syncManager.getTenantSyncStatus('tenant-123');
|
|
1256
|
+
* if (status.missing.length > 0) {
|
|
1257
|
+
* console.log(`Missing: ${status.missing.join(', ')}`);
|
|
1258
|
+
* }
|
|
1259
|
+
* if (status.orphans.length > 0) {
|
|
1260
|
+
* console.log(`Orphans: ${status.orphans.join(', ')}`);
|
|
1261
|
+
* }
|
|
1262
|
+
* ```
|
|
1263
|
+
*/
|
|
1264
|
+
async getTenantSyncStatus(tenantId, migrations) {
|
|
1265
|
+
const schemaName = this.deps.schemaNameTemplate(tenantId);
|
|
1266
|
+
const pool = await this.deps.createPool(schemaName);
|
|
1267
|
+
try {
|
|
1268
|
+
const allMigrations = migrations ?? await this.deps.loadMigrations();
|
|
1269
|
+
const migrationNames = new Set(allMigrations.map((m) => m.name));
|
|
1270
|
+
const migrationHashes = new Set(allMigrations.map((m) => m.hash));
|
|
1271
|
+
const tableExists = await this.deps.migrationsTableExists(pool, schemaName);
|
|
1272
|
+
if (!tableExists) {
|
|
1273
|
+
return {
|
|
1274
|
+
tenantId,
|
|
1275
|
+
schemaName,
|
|
1276
|
+
missing: allMigrations.map((m) => m.name),
|
|
1277
|
+
orphans: [],
|
|
1278
|
+
inSync: allMigrations.length === 0,
|
|
1279
|
+
format: null
|
|
1280
|
+
};
|
|
1281
|
+
}
|
|
1282
|
+
const format = await this.deps.getOrDetectFormat(pool, schemaName);
|
|
1283
|
+
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
1284
|
+
const appliedIdentifiers = new Set(applied.map((m) => m.identifier));
|
|
1285
|
+
const missing = allMigrations.filter((m) => !this.isMigrationApplied(m, appliedIdentifiers, format)).map((m) => m.name);
|
|
1286
|
+
const orphans = applied.filter((m) => {
|
|
1287
|
+
if (format.columns.identifier === "name") {
|
|
1288
|
+
return !migrationNames.has(m.identifier);
|
|
1289
|
+
}
|
|
1290
|
+
return !migrationHashes.has(m.identifier) && !migrationNames.has(m.identifier);
|
|
1291
|
+
}).map((m) => m.identifier);
|
|
1292
|
+
return {
|
|
1293
|
+
tenantId,
|
|
1294
|
+
schemaName,
|
|
1295
|
+
missing,
|
|
1296
|
+
orphans,
|
|
1297
|
+
inSync: missing.length === 0 && orphans.length === 0,
|
|
1298
|
+
format: format.format
|
|
1299
|
+
};
|
|
1300
|
+
} catch (error) {
|
|
1301
|
+
return {
|
|
1302
|
+
tenantId,
|
|
1303
|
+
schemaName,
|
|
1304
|
+
missing: [],
|
|
1305
|
+
orphans: [],
|
|
1306
|
+
inSync: false,
|
|
1307
|
+
format: null,
|
|
1308
|
+
error: error.message
|
|
1309
|
+
};
|
|
1310
|
+
} finally {
|
|
1311
|
+
await pool.end();
|
|
1312
|
+
}
|
|
1313
|
+
}
|
|
1314
|
+
/**
|
|
1315
|
+
* Mark missing migrations as applied for a tenant
|
|
1316
|
+
*
|
|
1317
|
+
* Records migrations that exist on disk but are not tracked in the database.
|
|
1318
|
+
* Useful for syncing tracking state with already-applied migrations.
|
|
1319
|
+
*
|
|
1320
|
+
* @param tenantId - The tenant identifier
|
|
1321
|
+
* @returns Result of the mark operation
|
|
1322
|
+
*
|
|
1323
|
+
* @example
|
|
1324
|
+
* ```typescript
|
|
1325
|
+
* const result = await syncManager.markMissing('tenant-123');
|
|
1326
|
+
* if (result.success) {
|
|
1327
|
+
* console.log(`Marked ${result.markedMigrations.length} migrations as applied`);
|
|
1328
|
+
* }
|
|
1329
|
+
* ```
|
|
1330
|
+
*/
|
|
1331
|
+
async markMissing(tenantId) {
|
|
1332
|
+
const startTime = Date.now();
|
|
1333
|
+
const schemaName = this.deps.schemaNameTemplate(tenantId);
|
|
1334
|
+
const markedMigrations = [];
|
|
1335
|
+
const pool = await this.deps.createPool(schemaName);
|
|
1336
|
+
try {
|
|
1337
|
+
const syncStatus = await this.getTenantSyncStatus(tenantId);
|
|
1338
|
+
if (syncStatus.error) {
|
|
1339
|
+
return {
|
|
1340
|
+
tenantId,
|
|
1341
|
+
schemaName,
|
|
1342
|
+
success: false,
|
|
1343
|
+
markedMigrations: [],
|
|
1344
|
+
removedOrphans: [],
|
|
1345
|
+
error: syncStatus.error,
|
|
1346
|
+
durationMs: Date.now() - startTime
|
|
1347
|
+
};
|
|
1348
|
+
}
|
|
1349
|
+
if (syncStatus.missing.length === 0) {
|
|
1350
|
+
return {
|
|
1351
|
+
tenantId,
|
|
1352
|
+
schemaName,
|
|
1353
|
+
success: true,
|
|
1354
|
+
markedMigrations: [],
|
|
1355
|
+
removedOrphans: [],
|
|
1356
|
+
durationMs: Date.now() - startTime
|
|
1357
|
+
};
|
|
1358
|
+
}
|
|
1359
|
+
const format = await this.deps.getOrDetectFormat(pool, schemaName);
|
|
1360
|
+
await this.deps.ensureMigrationsTable(pool, schemaName, format);
|
|
1361
|
+
const allMigrations = await this.deps.loadMigrations();
|
|
1362
|
+
const missingSet = new Set(syncStatus.missing);
|
|
1363
|
+
for (const migration of allMigrations) {
|
|
1364
|
+
if (missingSet.has(migration.name)) {
|
|
1365
|
+
await this.recordMigration(pool, schemaName, migration, format);
|
|
1366
|
+
markedMigrations.push(migration.name);
|
|
1367
|
+
}
|
|
1368
|
+
}
|
|
1369
|
+
return {
|
|
1370
|
+
tenantId,
|
|
1371
|
+
schemaName,
|
|
1372
|
+
success: true,
|
|
1373
|
+
markedMigrations,
|
|
1374
|
+
removedOrphans: [],
|
|
1375
|
+
durationMs: Date.now() - startTime
|
|
1376
|
+
};
|
|
1377
|
+
} catch (error) {
|
|
1378
|
+
return {
|
|
1379
|
+
tenantId,
|
|
1380
|
+
schemaName,
|
|
1381
|
+
success: false,
|
|
1382
|
+
markedMigrations,
|
|
1383
|
+
removedOrphans: [],
|
|
1384
|
+
error: error.message,
|
|
1385
|
+
durationMs: Date.now() - startTime
|
|
1386
|
+
};
|
|
1387
|
+
} finally {
|
|
1388
|
+
await pool.end();
|
|
1389
|
+
}
|
|
1390
|
+
}
|
|
1391
|
+
/**
|
|
1392
|
+
* Mark missing migrations as applied for all tenants
|
|
1393
|
+
*
|
|
1394
|
+
* Processes all tenants in parallel with configurable concurrency.
|
|
1395
|
+
* Supports progress callbacks and abort-on-error behavior.
|
|
1396
|
+
*
|
|
1397
|
+
* @param options - Sync options
|
|
1398
|
+
* @returns Aggregate results of all mark operations
|
|
1399
|
+
*
|
|
1400
|
+
* @example
|
|
1401
|
+
* ```typescript
|
|
1402
|
+
* const results = await syncManager.markAllMissing({
|
|
1403
|
+
* concurrency: 10,
|
|
1404
|
+
* onProgress: (id, status) => console.log(`${id}: ${status}`),
|
|
1405
|
+
* });
|
|
1406
|
+
* console.log(`Succeeded: ${results.succeeded}/${results.total}`);
|
|
1407
|
+
* ```
|
|
1408
|
+
*/
|
|
1409
|
+
async markAllMissing(options = {}) {
|
|
1410
|
+
const { concurrency = 10, onProgress, onError } = options;
|
|
1411
|
+
const tenantIds = await this.config.tenantDiscovery();
|
|
625
1412
|
const results = [];
|
|
626
1413
|
let aborted = false;
|
|
627
1414
|
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
@@ -650,101 +1437,1587 @@ var Migrator = class {
|
|
|
650
1437
|
}
|
|
651
1438
|
return this.aggregateSyncResults(results);
|
|
652
1439
|
}
|
|
1440
|
+
/**
|
|
1441
|
+
* Remove orphan migration records for a tenant
|
|
1442
|
+
*
|
|
1443
|
+
* Deletes records from the migrations table that don't have
|
|
1444
|
+
* corresponding files on disk.
|
|
1445
|
+
*
|
|
1446
|
+
* @param tenantId - The tenant identifier
|
|
1447
|
+
* @returns Result of the clean operation
|
|
1448
|
+
*
|
|
1449
|
+
* @example
|
|
1450
|
+
* ```typescript
|
|
1451
|
+
* const result = await syncManager.cleanOrphans('tenant-123');
|
|
1452
|
+
* if (result.success) {
|
|
1453
|
+
* console.log(`Removed ${result.removedOrphans.length} orphan records`);
|
|
1454
|
+
* }
|
|
1455
|
+
* ```
|
|
1456
|
+
*/
|
|
1457
|
+
async cleanOrphans(tenantId) {
|
|
1458
|
+
const startTime = Date.now();
|
|
1459
|
+
const schemaName = this.deps.schemaNameTemplate(tenantId);
|
|
1460
|
+
const removedOrphans = [];
|
|
1461
|
+
const pool = await this.deps.createPool(schemaName);
|
|
1462
|
+
try {
|
|
1463
|
+
const syncStatus = await this.getTenantSyncStatus(tenantId);
|
|
1464
|
+
if (syncStatus.error) {
|
|
1465
|
+
return {
|
|
1466
|
+
tenantId,
|
|
1467
|
+
schemaName,
|
|
1468
|
+
success: false,
|
|
1469
|
+
markedMigrations: [],
|
|
1470
|
+
removedOrphans: [],
|
|
1471
|
+
error: syncStatus.error,
|
|
1472
|
+
durationMs: Date.now() - startTime
|
|
1473
|
+
};
|
|
1474
|
+
}
|
|
1475
|
+
if (syncStatus.orphans.length === 0) {
|
|
1476
|
+
return {
|
|
1477
|
+
tenantId,
|
|
1478
|
+
schemaName,
|
|
1479
|
+
success: true,
|
|
1480
|
+
markedMigrations: [],
|
|
1481
|
+
removedOrphans: [],
|
|
1482
|
+
durationMs: Date.now() - startTime
|
|
1483
|
+
};
|
|
1484
|
+
}
|
|
1485
|
+
const format = await this.deps.getOrDetectFormat(pool, schemaName);
|
|
1486
|
+
const identifierColumn = format.columns.identifier;
|
|
1487
|
+
for (const orphan of syncStatus.orphans) {
|
|
1488
|
+
await pool.query(
|
|
1489
|
+
`DELETE FROM "${schemaName}"."${format.tableName}" WHERE "${identifierColumn}" = $1`,
|
|
1490
|
+
[orphan]
|
|
1491
|
+
);
|
|
1492
|
+
removedOrphans.push(orphan);
|
|
1493
|
+
}
|
|
1494
|
+
return {
|
|
1495
|
+
tenantId,
|
|
1496
|
+
schemaName,
|
|
1497
|
+
success: true,
|
|
1498
|
+
markedMigrations: [],
|
|
1499
|
+
removedOrphans,
|
|
1500
|
+
durationMs: Date.now() - startTime
|
|
1501
|
+
};
|
|
1502
|
+
} catch (error) {
|
|
1503
|
+
return {
|
|
1504
|
+
tenantId,
|
|
1505
|
+
schemaName,
|
|
1506
|
+
success: false,
|
|
1507
|
+
markedMigrations: [],
|
|
1508
|
+
removedOrphans,
|
|
1509
|
+
error: error.message,
|
|
1510
|
+
durationMs: Date.now() - startTime
|
|
1511
|
+
};
|
|
1512
|
+
} finally {
|
|
1513
|
+
await pool.end();
|
|
1514
|
+
}
|
|
1515
|
+
}
|
|
1516
|
+
/**
|
|
1517
|
+
* Remove orphan migration records for all tenants
|
|
1518
|
+
*
|
|
1519
|
+
* Processes all tenants in parallel with configurable concurrency.
|
|
1520
|
+
* Supports progress callbacks and abort-on-error behavior.
|
|
1521
|
+
*
|
|
1522
|
+
* @param options - Sync options
|
|
1523
|
+
* @returns Aggregate results of all clean operations
|
|
1524
|
+
*
|
|
1525
|
+
* @example
|
|
1526
|
+
* ```typescript
|
|
1527
|
+
* const results = await syncManager.cleanAllOrphans({
|
|
1528
|
+
* concurrency: 10,
|
|
1529
|
+
* onProgress: (id, status) => console.log(`${id}: ${status}`),
|
|
1530
|
+
* });
|
|
1531
|
+
* console.log(`Succeeded: ${results.succeeded}/${results.total}`);
|
|
1532
|
+
* ```
|
|
1533
|
+
*/
|
|
1534
|
+
async cleanAllOrphans(options = {}) {
|
|
1535
|
+
const { concurrency = 10, onProgress, onError } = options;
|
|
1536
|
+
const tenantIds = await this.config.tenantDiscovery();
|
|
1537
|
+
const results = [];
|
|
1538
|
+
let aborted = false;
|
|
1539
|
+
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
1540
|
+
const batch = tenantIds.slice(i, i + concurrency);
|
|
1541
|
+
const batchResults = await Promise.all(
|
|
1542
|
+
batch.map(async (tenantId) => {
|
|
1543
|
+
if (aborted) {
|
|
1544
|
+
return this.createSkippedSyncResult(tenantId);
|
|
1545
|
+
}
|
|
1546
|
+
try {
|
|
1547
|
+
onProgress?.(tenantId, "starting");
|
|
1548
|
+
const result = await this.cleanOrphans(tenantId);
|
|
1549
|
+
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
1550
|
+
return result;
|
|
1551
|
+
} catch (error) {
|
|
1552
|
+
onProgress?.(tenantId, "failed");
|
|
1553
|
+
const action = onError?.(tenantId, error);
|
|
1554
|
+
if (action === "abort") {
|
|
1555
|
+
aborted = true;
|
|
1556
|
+
}
|
|
1557
|
+
return this.createErrorSyncResult(tenantId, error);
|
|
1558
|
+
}
|
|
1559
|
+
})
|
|
1560
|
+
);
|
|
1561
|
+
results.push(...batchResults);
|
|
1562
|
+
}
|
|
1563
|
+
return this.aggregateSyncResults(results);
|
|
1564
|
+
}
|
|
1565
|
+
// ============================================================================
|
|
1566
|
+
// Private Helper Methods
|
|
1567
|
+
// ============================================================================
|
|
1568
|
+
/**
|
|
1569
|
+
* Get applied migrations for a schema
|
|
1570
|
+
*/
|
|
1571
|
+
async getAppliedMigrations(pool, schemaName, format) {
|
|
1572
|
+
const identifierColumn = format.columns.identifier;
|
|
1573
|
+
const timestampColumn = format.columns.timestamp;
|
|
1574
|
+
const result = await pool.query(
|
|
1575
|
+
`SELECT id, "${identifierColumn}" as identifier, "${timestampColumn}" as applied_at
|
|
1576
|
+
FROM "${schemaName}"."${format.tableName}"
|
|
1577
|
+
ORDER BY id`
|
|
1578
|
+
);
|
|
1579
|
+
return result.rows.map((row) => {
|
|
1580
|
+
const appliedAt = format.columns.timestampType === "bigint" ? new Date(Number(row.applied_at)) : new Date(row.applied_at);
|
|
1581
|
+
return {
|
|
1582
|
+
identifier: row.identifier,
|
|
1583
|
+
appliedAt
|
|
1584
|
+
};
|
|
1585
|
+
});
|
|
1586
|
+
}
|
|
1587
|
+
/**
|
|
1588
|
+
* Check if a migration has been applied
|
|
1589
|
+
*/
|
|
1590
|
+
isMigrationApplied(migration, appliedIdentifiers, format) {
|
|
1591
|
+
if (format.columns.identifier === "name") {
|
|
1592
|
+
return appliedIdentifiers.has(migration.name);
|
|
1593
|
+
}
|
|
1594
|
+
return appliedIdentifiers.has(migration.hash) || appliedIdentifiers.has(migration.name);
|
|
1595
|
+
}
|
|
1596
|
+
/**
|
|
1597
|
+
* Record a migration as applied (without executing SQL)
|
|
1598
|
+
*/
|
|
1599
|
+
async recordMigration(pool, schemaName, migration, format) {
|
|
1600
|
+
const { identifier, timestamp, timestampType } = format.columns;
|
|
1601
|
+
const identifierValue = identifier === "name" ? migration.name : migration.hash;
|
|
1602
|
+
const timestampValue = timestampType === "bigint" ? Date.now() : /* @__PURE__ */ new Date();
|
|
1603
|
+
await pool.query(
|
|
1604
|
+
`INSERT INTO "${schemaName}"."${format.tableName}" ("${identifier}", "${timestamp}") VALUES ($1, $2)`,
|
|
1605
|
+
[identifierValue, timestampValue]
|
|
1606
|
+
);
|
|
1607
|
+
}
|
|
1608
|
+
/**
|
|
1609
|
+
* Create a skipped sync result for aborted operations
|
|
1610
|
+
*/
|
|
1611
|
+
createSkippedSyncResult(tenantId) {
|
|
1612
|
+
return {
|
|
1613
|
+
tenantId,
|
|
1614
|
+
schemaName: this.deps.schemaNameTemplate(tenantId),
|
|
1615
|
+
success: false,
|
|
1616
|
+
markedMigrations: [],
|
|
1617
|
+
removedOrphans: [],
|
|
1618
|
+
error: "Skipped due to abort",
|
|
1619
|
+
durationMs: 0
|
|
1620
|
+
};
|
|
1621
|
+
}
|
|
1622
|
+
/**
|
|
1623
|
+
* Create an error sync result for failed operations
|
|
1624
|
+
*/
|
|
1625
|
+
createErrorSyncResult(tenantId, error) {
|
|
1626
|
+
return {
|
|
1627
|
+
tenantId,
|
|
1628
|
+
schemaName: this.deps.schemaNameTemplate(tenantId),
|
|
1629
|
+
success: false,
|
|
1630
|
+
markedMigrations: [],
|
|
1631
|
+
removedOrphans: [],
|
|
1632
|
+
error: error.message,
|
|
1633
|
+
durationMs: 0
|
|
1634
|
+
};
|
|
1635
|
+
}
|
|
1636
|
+
/**
|
|
1637
|
+
* Aggregate individual sync results into a summary
|
|
1638
|
+
*/
|
|
1639
|
+
aggregateSyncResults(results) {
|
|
1640
|
+
return {
|
|
1641
|
+
total: results.length,
|
|
1642
|
+
succeeded: results.filter((r) => r.success).length,
|
|
1643
|
+
failed: results.filter((r) => !r.success).length,
|
|
1644
|
+
details: results
|
|
1645
|
+
};
|
|
1646
|
+
}
|
|
1647
|
+
};
|
|
1648
|
+
function createSyncManager(config, dependencies) {
|
|
1649
|
+
return new SyncManager(config, dependencies);
|
|
1650
|
+
}
|
|
1651
|
+
|
|
1652
|
+
// src/migrator/executor/migration-executor.ts
|
|
1653
|
+
var MigrationExecutor = class {
|
|
1654
|
+
constructor(config, deps) {
|
|
1655
|
+
this.config = config;
|
|
1656
|
+
this.deps = deps;
|
|
1657
|
+
}
|
|
1658
|
+
/**
|
|
1659
|
+
* Migrate a single tenant
|
|
1660
|
+
*
|
|
1661
|
+
* Applies all pending migrations to the tenant's schema.
|
|
1662
|
+
* Creates the migrations table if it doesn't exist.
|
|
1663
|
+
*
|
|
1664
|
+
* @param tenantId - The tenant identifier
|
|
1665
|
+
* @param migrations - Optional pre-loaded migrations (avoids reloading from disk)
|
|
1666
|
+
* @param options - Migration options (dryRun, onProgress)
|
|
1667
|
+
* @returns Migration result with applied migrations and duration
|
|
1668
|
+
*
|
|
1669
|
+
* @example
|
|
1670
|
+
* ```typescript
|
|
1671
|
+
* const result = await executor.migrateTenant('tenant-123', undefined, {
|
|
1672
|
+
* dryRun: false,
|
|
1673
|
+
* onProgress: (id, status, name) => console.log(`${id}: ${status} ${name}`),
|
|
1674
|
+
* });
|
|
1675
|
+
*
|
|
1676
|
+
* if (result.success) {
|
|
1677
|
+
* console.log(`Applied ${result.appliedMigrations.length} migrations`);
|
|
1678
|
+
* }
|
|
1679
|
+
* ```
|
|
1680
|
+
*/
|
|
1681
|
+
async migrateTenant(tenantId, migrations, options = {}) {
|
|
1682
|
+
const startTime = Date.now();
|
|
1683
|
+
const schemaName = this.deps.schemaNameTemplate(tenantId);
|
|
1684
|
+
const appliedMigrations = [];
|
|
1685
|
+
const pool = await this.deps.createPool(schemaName);
|
|
1686
|
+
try {
|
|
1687
|
+
await this.config.hooks?.beforeTenant?.(tenantId);
|
|
1688
|
+
const format = await this.deps.getOrDetectFormat(pool, schemaName);
|
|
1689
|
+
await this.deps.ensureMigrationsTable(pool, schemaName, format);
|
|
1690
|
+
const allMigrations = migrations ?? await this.deps.loadMigrations();
|
|
1691
|
+
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
1692
|
+
const appliedSet = new Set(applied.map((m) => m.identifier));
|
|
1693
|
+
const pending = allMigrations.filter(
|
|
1694
|
+
(m) => !this.isMigrationApplied(m, appliedSet, format)
|
|
1695
|
+
);
|
|
1696
|
+
if (options.dryRun) {
|
|
1697
|
+
return {
|
|
1698
|
+
tenantId,
|
|
1699
|
+
schemaName,
|
|
1700
|
+
success: true,
|
|
1701
|
+
appliedMigrations: pending.map((m) => m.name),
|
|
1702
|
+
durationMs: Date.now() - startTime,
|
|
1703
|
+
format: format.format
|
|
1704
|
+
};
|
|
1705
|
+
}
|
|
1706
|
+
for (const migration of pending) {
|
|
1707
|
+
const migrationStart = Date.now();
|
|
1708
|
+
options.onProgress?.(tenantId, "migrating", migration.name);
|
|
1709
|
+
await this.config.hooks?.beforeMigration?.(tenantId, migration.name);
|
|
1710
|
+
await this.applyMigration(pool, schemaName, migration, format);
|
|
1711
|
+
await this.config.hooks?.afterMigration?.(
|
|
1712
|
+
tenantId,
|
|
1713
|
+
migration.name,
|
|
1714
|
+
Date.now() - migrationStart
|
|
1715
|
+
);
|
|
1716
|
+
appliedMigrations.push(migration.name);
|
|
1717
|
+
}
|
|
1718
|
+
const result = {
|
|
1719
|
+
tenantId,
|
|
1720
|
+
schemaName,
|
|
1721
|
+
success: true,
|
|
1722
|
+
appliedMigrations,
|
|
1723
|
+
durationMs: Date.now() - startTime,
|
|
1724
|
+
format: format.format
|
|
1725
|
+
};
|
|
1726
|
+
await this.config.hooks?.afterTenant?.(tenantId, result);
|
|
1727
|
+
return result;
|
|
1728
|
+
} catch (error) {
|
|
1729
|
+
const result = {
|
|
1730
|
+
tenantId,
|
|
1731
|
+
schemaName,
|
|
1732
|
+
success: false,
|
|
1733
|
+
appliedMigrations,
|
|
1734
|
+
error: error.message,
|
|
1735
|
+
durationMs: Date.now() - startTime
|
|
1736
|
+
};
|
|
1737
|
+
await this.config.hooks?.afterTenant?.(tenantId, result);
|
|
1738
|
+
return result;
|
|
1739
|
+
} finally {
|
|
1740
|
+
await pool.end();
|
|
1741
|
+
}
|
|
1742
|
+
}
|
|
1743
|
+
/**
|
|
1744
|
+
* Mark migrations as applied without executing SQL
|
|
1745
|
+
*
|
|
1746
|
+
* Useful for syncing tracking state with already-applied migrations
|
|
1747
|
+
* or when migrations were applied manually.
|
|
1748
|
+
*
|
|
1749
|
+
* @param tenantId - The tenant identifier
|
|
1750
|
+
* @param options - Options with progress callback
|
|
1751
|
+
* @returns Result with list of marked migrations
|
|
1752
|
+
*
|
|
1753
|
+
* @example
|
|
1754
|
+
* ```typescript
|
|
1755
|
+
* const result = await executor.markAsApplied('tenant-123', {
|
|
1756
|
+
* onProgress: (id, status, name) => console.log(`${id}: marking ${name}`),
|
|
1757
|
+
* });
|
|
1758
|
+
*
|
|
1759
|
+
* console.log(`Marked ${result.appliedMigrations.length} migrations`);
|
|
1760
|
+
* ```
|
|
1761
|
+
*/
|
|
1762
|
+
async markAsApplied(tenantId, options = {}) {
|
|
1763
|
+
const startTime = Date.now();
|
|
1764
|
+
const schemaName = this.deps.schemaNameTemplate(tenantId);
|
|
1765
|
+
const markedMigrations = [];
|
|
1766
|
+
const pool = await this.deps.createPool(schemaName);
|
|
1767
|
+
try {
|
|
1768
|
+
await this.config.hooks?.beforeTenant?.(tenantId);
|
|
1769
|
+
const format = await this.deps.getOrDetectFormat(pool, schemaName);
|
|
1770
|
+
await this.deps.ensureMigrationsTable(pool, schemaName, format);
|
|
1771
|
+
const allMigrations = await this.deps.loadMigrations();
|
|
1772
|
+
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
1773
|
+
const appliedSet = new Set(applied.map((m) => m.identifier));
|
|
1774
|
+
const pending = allMigrations.filter(
|
|
1775
|
+
(m) => !this.isMigrationApplied(m, appliedSet, format)
|
|
1776
|
+
);
|
|
1777
|
+
for (const migration of pending) {
|
|
1778
|
+
const migrationStart = Date.now();
|
|
1779
|
+
options.onProgress?.(tenantId, "migrating", migration.name);
|
|
1780
|
+
await this.config.hooks?.beforeMigration?.(tenantId, migration.name);
|
|
1781
|
+
await this.recordMigration(pool, schemaName, migration, format);
|
|
1782
|
+
await this.config.hooks?.afterMigration?.(
|
|
1783
|
+
tenantId,
|
|
1784
|
+
migration.name,
|
|
1785
|
+
Date.now() - migrationStart
|
|
1786
|
+
);
|
|
1787
|
+
markedMigrations.push(migration.name);
|
|
1788
|
+
}
|
|
1789
|
+
const result = {
|
|
1790
|
+
tenantId,
|
|
1791
|
+
schemaName,
|
|
1792
|
+
success: true,
|
|
1793
|
+
appliedMigrations: markedMigrations,
|
|
1794
|
+
durationMs: Date.now() - startTime,
|
|
1795
|
+
format: format.format
|
|
1796
|
+
};
|
|
1797
|
+
await this.config.hooks?.afterTenant?.(tenantId, result);
|
|
1798
|
+
return result;
|
|
1799
|
+
} catch (error) {
|
|
1800
|
+
const result = {
|
|
1801
|
+
tenantId,
|
|
1802
|
+
schemaName,
|
|
1803
|
+
success: false,
|
|
1804
|
+
appliedMigrations: markedMigrations,
|
|
1805
|
+
error: error.message,
|
|
1806
|
+
durationMs: Date.now() - startTime
|
|
1807
|
+
};
|
|
1808
|
+
await this.config.hooks?.afterTenant?.(tenantId, result);
|
|
1809
|
+
return result;
|
|
1810
|
+
} finally {
|
|
1811
|
+
await pool.end();
|
|
1812
|
+
}
|
|
1813
|
+
}
|
|
1814
|
+
/**
|
|
1815
|
+
* Get migration status for a specific tenant
|
|
1816
|
+
*
|
|
1817
|
+
* Returns information about applied and pending migrations.
|
|
1818
|
+
*
|
|
1819
|
+
* @param tenantId - The tenant identifier
|
|
1820
|
+
* @param migrations - Optional pre-loaded migrations
|
|
1821
|
+
* @returns Migration status with counts and pending list
|
|
1822
|
+
*
|
|
1823
|
+
* @example
|
|
1824
|
+
* ```typescript
|
|
1825
|
+
* const status = await executor.getTenantStatus('tenant-123');
|
|
1826
|
+
* if (status.status === 'behind') {
|
|
1827
|
+
* console.log(`Pending: ${status.pendingMigrations.join(', ')}`);
|
|
1828
|
+
* }
|
|
1829
|
+
* ```
|
|
1830
|
+
*/
|
|
1831
|
+
async getTenantStatus(tenantId, migrations) {
|
|
1832
|
+
const schemaName = this.deps.schemaNameTemplate(tenantId);
|
|
1833
|
+
const pool = await this.deps.createPool(schemaName);
|
|
1834
|
+
try {
|
|
1835
|
+
const allMigrations = migrations ?? await this.deps.loadMigrations();
|
|
1836
|
+
const tableExists = await this.deps.migrationsTableExists(pool, schemaName);
|
|
1837
|
+
if (!tableExists) {
|
|
1838
|
+
return {
|
|
1839
|
+
tenantId,
|
|
1840
|
+
schemaName,
|
|
1841
|
+
appliedCount: 0,
|
|
1842
|
+
pendingCount: allMigrations.length,
|
|
1843
|
+
pendingMigrations: allMigrations.map((m) => m.name),
|
|
1844
|
+
status: allMigrations.length > 0 ? "behind" : "ok",
|
|
1845
|
+
format: null
|
|
1846
|
+
};
|
|
1847
|
+
}
|
|
1848
|
+
const format = await this.deps.getOrDetectFormat(pool, schemaName);
|
|
1849
|
+
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
1850
|
+
const appliedSet = new Set(applied.map((m) => m.identifier));
|
|
1851
|
+
const pending = allMigrations.filter(
|
|
1852
|
+
(m) => !this.isMigrationApplied(m, appliedSet, format)
|
|
1853
|
+
);
|
|
1854
|
+
return {
|
|
1855
|
+
tenantId,
|
|
1856
|
+
schemaName,
|
|
1857
|
+
appliedCount: applied.length,
|
|
1858
|
+
pendingCount: pending.length,
|
|
1859
|
+
pendingMigrations: pending.map((m) => m.name),
|
|
1860
|
+
status: pending.length > 0 ? "behind" : "ok",
|
|
1861
|
+
format: format.format
|
|
1862
|
+
};
|
|
1863
|
+
} catch (error) {
|
|
1864
|
+
return {
|
|
1865
|
+
tenantId,
|
|
1866
|
+
schemaName,
|
|
1867
|
+
appliedCount: 0,
|
|
1868
|
+
pendingCount: 0,
|
|
1869
|
+
pendingMigrations: [],
|
|
1870
|
+
status: "error",
|
|
1871
|
+
error: error.message,
|
|
1872
|
+
format: null
|
|
1873
|
+
};
|
|
1874
|
+
} finally {
|
|
1875
|
+
await pool.end();
|
|
1876
|
+
}
|
|
1877
|
+
}
|
|
1878
|
+
// ============================================================================
|
|
1879
|
+
// IMigrationExecutor Interface Methods
|
|
1880
|
+
// ============================================================================
|
|
1881
|
+
/**
|
|
1882
|
+
* Execute a single migration on a schema
|
|
1883
|
+
*/
|
|
1884
|
+
async executeMigration(pool, schemaName, migration, format, options) {
|
|
1885
|
+
if (options?.markOnly) {
|
|
1886
|
+
options.onProgress?.("recording");
|
|
1887
|
+
await this.recordMigration(pool, schemaName, migration, format);
|
|
1888
|
+
} else {
|
|
1889
|
+
options?.onProgress?.("applying");
|
|
1890
|
+
await this.applyMigration(pool, schemaName, migration, format);
|
|
1891
|
+
}
|
|
1892
|
+
}
|
|
1893
|
+
/**
|
|
1894
|
+
* Execute multiple migrations on a schema
|
|
1895
|
+
*/
|
|
1896
|
+
async executeMigrations(pool, schemaName, migrations, format, options) {
|
|
1897
|
+
const appliedNames = [];
|
|
1898
|
+
for (const migration of migrations) {
|
|
1899
|
+
await this.executeMigration(pool, schemaName, migration, format, options);
|
|
1900
|
+
appliedNames.push(migration.name);
|
|
1901
|
+
}
|
|
1902
|
+
return appliedNames;
|
|
1903
|
+
}
|
|
1904
|
+
/**
|
|
1905
|
+
* Record a migration as applied without executing SQL
|
|
1906
|
+
*/
|
|
1907
|
+
async recordMigration(pool, schemaName, migration, format) {
|
|
1908
|
+
const { identifier, timestamp, timestampType } = format.columns;
|
|
1909
|
+
const identifierValue = identifier === "name" ? migration.name : migration.hash;
|
|
1910
|
+
const timestampValue = timestampType === "bigint" ? Date.now() : /* @__PURE__ */ new Date();
|
|
1911
|
+
await pool.query(
|
|
1912
|
+
`INSERT INTO "${schemaName}"."${format.tableName}" ("${identifier}", "${timestamp}") VALUES ($1, $2)`,
|
|
1913
|
+
[identifierValue, timestampValue]
|
|
1914
|
+
);
|
|
1915
|
+
}
|
|
1916
|
+
/**
|
|
1917
|
+
* Get list of applied migrations for a tenant
|
|
1918
|
+
*/
|
|
1919
|
+
async getAppliedMigrations(pool, schemaName, format) {
|
|
1920
|
+
const identifierColumn = format.columns.identifier;
|
|
1921
|
+
const timestampColumn = format.columns.timestamp;
|
|
1922
|
+
const result = await pool.query(
|
|
1923
|
+
`SELECT id, "${identifierColumn}" as identifier, "${timestampColumn}" as applied_at
|
|
1924
|
+
FROM "${schemaName}"."${format.tableName}"
|
|
1925
|
+
ORDER BY id`
|
|
1926
|
+
);
|
|
1927
|
+
return result.rows.map((row) => {
|
|
1928
|
+
const appliedAt = format.columns.timestampType === "bigint" ? new Date(Number(row.applied_at)) : new Date(row.applied_at);
|
|
1929
|
+
return {
|
|
1930
|
+
identifier: row.identifier,
|
|
1931
|
+
// Set name or hash based on format
|
|
1932
|
+
...format.columns.identifier === "name" ? { name: row.identifier } : { hash: row.identifier },
|
|
1933
|
+
appliedAt
|
|
1934
|
+
};
|
|
1935
|
+
});
|
|
1936
|
+
}
|
|
1937
|
+
/**
|
|
1938
|
+
* Get pending migrations (not yet applied)
|
|
1939
|
+
*/
|
|
1940
|
+
async getPendingMigrations(pool, schemaName, allMigrations, format) {
|
|
1941
|
+
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
1942
|
+
const appliedSet = new Set(applied.map((m) => m.identifier));
|
|
1943
|
+
return allMigrations.filter(
|
|
1944
|
+
(m) => !this.isMigrationApplied(m, appliedSet, format)
|
|
1945
|
+
);
|
|
1946
|
+
}
|
|
1947
|
+
// ============================================================================
|
|
1948
|
+
// Private Helper Methods
|
|
1949
|
+
// ============================================================================
|
|
1950
|
+
/**
|
|
1951
|
+
* Check if a migration has been applied
|
|
1952
|
+
*/
|
|
1953
|
+
isMigrationApplied(migration, appliedIdentifiers, format) {
|
|
1954
|
+
if (format.columns.identifier === "name") {
|
|
1955
|
+
return appliedIdentifiers.has(migration.name);
|
|
1956
|
+
}
|
|
1957
|
+
return appliedIdentifiers.has(migration.hash) || appliedIdentifiers.has(migration.name);
|
|
1958
|
+
}
|
|
1959
|
+
/**
|
|
1960
|
+
* Apply a migration to a schema (execute SQL + record)
|
|
1961
|
+
*/
|
|
1962
|
+
async applyMigration(pool, schemaName, migration, format) {
|
|
1963
|
+
const client = await pool.connect();
|
|
1964
|
+
try {
|
|
1965
|
+
await client.query("BEGIN");
|
|
1966
|
+
await client.query(migration.sql);
|
|
1967
|
+
const { identifier, timestamp, timestampType } = format.columns;
|
|
1968
|
+
const identifierValue = identifier === "name" ? migration.name : migration.hash;
|
|
1969
|
+
const timestampValue = timestampType === "bigint" ? Date.now() : /* @__PURE__ */ new Date();
|
|
1970
|
+
await client.query(
|
|
1971
|
+
`INSERT INTO "${schemaName}"."${format.tableName}" ("${identifier}", "${timestamp}") VALUES ($1, $2)`,
|
|
1972
|
+
[identifierValue, timestampValue]
|
|
1973
|
+
);
|
|
1974
|
+
await client.query("COMMIT");
|
|
1975
|
+
} catch (error) {
|
|
1976
|
+
await client.query("ROLLBACK");
|
|
1977
|
+
throw error;
|
|
1978
|
+
} finally {
|
|
1979
|
+
client.release();
|
|
1980
|
+
}
|
|
1981
|
+
}
|
|
1982
|
+
};
|
|
1983
|
+
function createMigrationExecutor(config, dependencies) {
|
|
1984
|
+
return new MigrationExecutor(config, dependencies);
|
|
1985
|
+
}
|
|
1986
|
+
|
|
1987
|
+
// src/migrator/executor/batch-executor.ts
|
|
1988
|
+
var BatchExecutor = class {
|
|
1989
|
+
constructor(config, executor, loadMigrations) {
|
|
1990
|
+
this.config = config;
|
|
1991
|
+
this.executor = executor;
|
|
1992
|
+
this.loadMigrations = loadMigrations;
|
|
1993
|
+
}
|
|
1994
|
+
/**
|
|
1995
|
+
* Migrate all tenants in parallel
|
|
1996
|
+
*
|
|
1997
|
+
* Processes tenants in batches with configurable concurrency.
|
|
1998
|
+
* Supports progress callbacks, error handling, and abort behavior.
|
|
1999
|
+
*
|
|
2000
|
+
* @param options - Migration options (concurrency, dryRun, callbacks)
|
|
2001
|
+
* @returns Aggregate results for all tenants
|
|
2002
|
+
*
|
|
2003
|
+
* @example
|
|
2004
|
+
* ```typescript
|
|
2005
|
+
* const results = await batchExecutor.migrateAll({
|
|
2006
|
+
* concurrency: 10,
|
|
2007
|
+
* dryRun: false,
|
|
2008
|
+
* onProgress: (id, status) => console.log(`${id}: ${status}`),
|
|
2009
|
+
* onError: (id, error) => {
|
|
2010
|
+
* console.error(`${id} failed: ${error.message}`);
|
|
2011
|
+
* return 'continue'; // or 'abort' to stop all
|
|
2012
|
+
* },
|
|
2013
|
+
* });
|
|
2014
|
+
*
|
|
2015
|
+
* console.log(`Succeeded: ${results.succeeded}/${results.total}`);
|
|
2016
|
+
* ```
|
|
2017
|
+
*/
|
|
2018
|
+
async migrateAll(options = {}) {
|
|
2019
|
+
const {
|
|
2020
|
+
concurrency = 10,
|
|
2021
|
+
onProgress,
|
|
2022
|
+
onError,
|
|
2023
|
+
dryRun = false
|
|
2024
|
+
} = options;
|
|
2025
|
+
const tenantIds = await this.config.tenantDiscovery();
|
|
2026
|
+
const migrations = await this.loadMigrations();
|
|
2027
|
+
const results = [];
|
|
2028
|
+
let aborted = false;
|
|
2029
|
+
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
2030
|
+
const batch = tenantIds.slice(i, i + concurrency);
|
|
2031
|
+
const batchResults = await Promise.all(
|
|
2032
|
+
batch.map(async (tenantId) => {
|
|
2033
|
+
if (aborted) {
|
|
2034
|
+
return this.createSkippedResult(tenantId);
|
|
2035
|
+
}
|
|
2036
|
+
try {
|
|
2037
|
+
onProgress?.(tenantId, "starting");
|
|
2038
|
+
const result = await this.executor.migrateTenant(tenantId, migrations, { dryRun, onProgress });
|
|
2039
|
+
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
2040
|
+
return result;
|
|
2041
|
+
} catch (error) {
|
|
2042
|
+
onProgress?.(tenantId, "failed");
|
|
2043
|
+
const action = onError?.(tenantId, error);
|
|
2044
|
+
if (action === "abort") {
|
|
2045
|
+
aborted = true;
|
|
2046
|
+
}
|
|
2047
|
+
return this.createErrorResult(tenantId, error);
|
|
2048
|
+
}
|
|
2049
|
+
})
|
|
2050
|
+
);
|
|
2051
|
+
results.push(...batchResults);
|
|
2052
|
+
}
|
|
2053
|
+
if (aborted) {
|
|
2054
|
+
const remaining = tenantIds.slice(results.length);
|
|
2055
|
+
for (const tenantId of remaining) {
|
|
2056
|
+
results.push(this.createSkippedResult(tenantId));
|
|
2057
|
+
}
|
|
2058
|
+
}
|
|
2059
|
+
return this.aggregateResults(results);
|
|
2060
|
+
}
|
|
2061
|
+
/**
|
|
2062
|
+
* Migrate specific tenants in parallel
|
|
2063
|
+
*
|
|
2064
|
+
* Same as migrateAll but for a subset of tenants.
|
|
2065
|
+
*
|
|
2066
|
+
* @param tenantIds - List of tenant IDs to migrate
|
|
2067
|
+
* @param options - Migration options
|
|
2068
|
+
* @returns Aggregate results for specified tenants
|
|
2069
|
+
*
|
|
2070
|
+
* @example
|
|
2071
|
+
* ```typescript
|
|
2072
|
+
* const results = await batchExecutor.migrateTenants(
|
|
2073
|
+
* ['tenant-1', 'tenant-2', 'tenant-3'],
|
|
2074
|
+
* { concurrency: 5 }
|
|
2075
|
+
* );
|
|
2076
|
+
* ```
|
|
2077
|
+
*/
|
|
2078
|
+
async migrateTenants(tenantIds, options = {}) {
|
|
2079
|
+
const migrations = await this.loadMigrations();
|
|
2080
|
+
const results = [];
|
|
2081
|
+
const { concurrency = 10, onProgress, onError, dryRun = false } = options;
|
|
2082
|
+
for (let i = 0; i < tenantIds.length; i += concurrency) {
|
|
2083
|
+
const batch = tenantIds.slice(i, i + concurrency);
|
|
2084
|
+
const batchResults = await Promise.all(
|
|
2085
|
+
batch.map(async (tenantId) => {
|
|
2086
|
+
try {
|
|
2087
|
+
onProgress?.(tenantId, "starting");
|
|
2088
|
+
const result = await this.executor.migrateTenant(tenantId, migrations, { dryRun, onProgress });
|
|
2089
|
+
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
2090
|
+
return result;
|
|
2091
|
+
} catch (error) {
|
|
2092
|
+
onProgress?.(tenantId, "failed");
|
|
2093
|
+
onError?.(tenantId, error);
|
|
2094
|
+
return this.createErrorResult(tenantId, error);
|
|
2095
|
+
}
|
|
2096
|
+
})
|
|
2097
|
+
);
|
|
2098
|
+
results.push(...batchResults);
|
|
2099
|
+
}
|
|
2100
|
+
return this.aggregateResults(results);
|
|
2101
|
+
}
|
|
2102
|
+
/**
|
|
2103
|
+
* Mark all tenants as applied without executing SQL
|
|
2104
|
+
*
|
|
2105
|
+
* Useful for syncing tracking state with already-applied migrations.
|
|
2106
|
+
* Processes tenants in parallel with configurable concurrency.
|
|
2107
|
+
*
|
|
2108
|
+
* @param options - Migration options
|
|
2109
|
+
* @returns Aggregate results for all tenants
|
|
2110
|
+
*
|
|
2111
|
+
* @example
|
|
2112
|
+
* ```typescript
|
|
2113
|
+
* const results = await batchExecutor.markAllAsApplied({
|
|
2114
|
+
* concurrency: 10,
|
|
2115
|
+
* onProgress: (id, status) => console.log(`${id}: ${status}`),
|
|
2116
|
+
* });
|
|
2117
|
+
* ```
|
|
2118
|
+
*/
|
|
2119
|
+
async markAllAsApplied(options = {}) {
|
|
2120
|
+
const {
|
|
2121
|
+
concurrency = 10,
|
|
2122
|
+
onProgress,
|
|
2123
|
+
onError
|
|
2124
|
+
} = options;
|
|
2125
|
+
const tenantIds = await this.config.tenantDiscovery();
|
|
2126
|
+
const results = [];
|
|
2127
|
+
let aborted = false;
|
|
2128
|
+
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
2129
|
+
const batch = tenantIds.slice(i, i + concurrency);
|
|
2130
|
+
const batchResults = await Promise.all(
|
|
2131
|
+
batch.map(async (tenantId) => {
|
|
2132
|
+
if (aborted) {
|
|
2133
|
+
return this.createSkippedResult(tenantId);
|
|
2134
|
+
}
|
|
2135
|
+
try {
|
|
2136
|
+
onProgress?.(tenantId, "starting");
|
|
2137
|
+
const result = await this.executor.markAsApplied(tenantId, { onProgress });
|
|
2138
|
+
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
2139
|
+
return result;
|
|
2140
|
+
} catch (error) {
|
|
2141
|
+
onProgress?.(tenantId, "failed");
|
|
2142
|
+
const action = onError?.(tenantId, error);
|
|
2143
|
+
if (action === "abort") {
|
|
2144
|
+
aborted = true;
|
|
2145
|
+
}
|
|
2146
|
+
return this.createErrorResult(tenantId, error);
|
|
2147
|
+
}
|
|
2148
|
+
})
|
|
2149
|
+
);
|
|
2150
|
+
results.push(...batchResults);
|
|
2151
|
+
}
|
|
2152
|
+
if (aborted) {
|
|
2153
|
+
const remaining = tenantIds.slice(results.length);
|
|
2154
|
+
for (const tenantId of remaining) {
|
|
2155
|
+
results.push(this.createSkippedResult(tenantId));
|
|
2156
|
+
}
|
|
2157
|
+
}
|
|
2158
|
+
return this.aggregateResults(results);
|
|
2159
|
+
}
|
|
2160
|
+
/**
|
|
2161
|
+
* Get migration status for all tenants
|
|
2162
|
+
*
|
|
2163
|
+
* Queries each tenant's migration status sequentially.
|
|
2164
|
+
*
|
|
2165
|
+
* @returns List of migration status for all tenants
|
|
2166
|
+
*
|
|
2167
|
+
* @example
|
|
2168
|
+
* ```typescript
|
|
2169
|
+
* const statuses = await batchExecutor.getStatus();
|
|
2170
|
+
* const behind = statuses.filter(s => s.status === 'behind');
|
|
2171
|
+
* console.log(`${behind.length} tenants need migrations`);
|
|
2172
|
+
* ```
|
|
2173
|
+
*/
|
|
2174
|
+
async getStatus() {
|
|
2175
|
+
const tenantIds = await this.config.tenantDiscovery();
|
|
2176
|
+
const migrations = await this.loadMigrations();
|
|
2177
|
+
const statuses = [];
|
|
2178
|
+
for (const tenantId of tenantIds) {
|
|
2179
|
+
statuses.push(await this.executor.getTenantStatus(tenantId, migrations));
|
|
2180
|
+
}
|
|
2181
|
+
return statuses;
|
|
2182
|
+
}
|
|
2183
|
+
// ============================================================================
|
|
2184
|
+
// Private Helper Methods
|
|
2185
|
+
// ============================================================================
|
|
2186
|
+
/**
|
|
2187
|
+
* Create a skipped result for aborted operations
|
|
2188
|
+
*/
|
|
2189
|
+
createSkippedResult(tenantId) {
|
|
2190
|
+
return {
|
|
2191
|
+
tenantId,
|
|
2192
|
+
schemaName: "",
|
|
2193
|
+
// Schema name not available in batch context
|
|
2194
|
+
success: false,
|
|
2195
|
+
appliedMigrations: [],
|
|
2196
|
+
error: "Skipped due to abort",
|
|
2197
|
+
durationMs: 0
|
|
2198
|
+
};
|
|
2199
|
+
}
|
|
2200
|
+
/**
|
|
2201
|
+
* Create an error result for failed operations
|
|
2202
|
+
*/
|
|
2203
|
+
createErrorResult(tenantId, error) {
|
|
2204
|
+
return {
|
|
2205
|
+
tenantId,
|
|
2206
|
+
schemaName: "",
|
|
2207
|
+
// Schema name not available in batch context
|
|
2208
|
+
success: false,
|
|
2209
|
+
appliedMigrations: [],
|
|
2210
|
+
error: error.message,
|
|
2211
|
+
durationMs: 0
|
|
2212
|
+
};
|
|
2213
|
+
}
|
|
2214
|
+
/**
|
|
2215
|
+
* Aggregate individual migration results into a summary
|
|
2216
|
+
*/
|
|
2217
|
+
aggregateResults(results) {
|
|
2218
|
+
return {
|
|
2219
|
+
total: results.length,
|
|
2220
|
+
succeeded: results.filter((r) => r.success).length,
|
|
2221
|
+
failed: results.filter((r) => !r.success && r.error !== "Skipped due to abort").length,
|
|
2222
|
+
skipped: results.filter((r) => r.error === "Skipped due to abort").length,
|
|
2223
|
+
details: results
|
|
2224
|
+
};
|
|
2225
|
+
}
|
|
2226
|
+
};
|
|
2227
|
+
function createBatchExecutor(config, executor, loadMigrations) {
|
|
2228
|
+
return new BatchExecutor(config, executor, loadMigrations);
|
|
2229
|
+
}
|
|
2230
|
+
|
|
2231
|
+
// src/migrator/clone/ddl-generator.ts
|
|
2232
|
+
async function listTables(pool, schemaName, excludeTables = []) {
|
|
2233
|
+
const excludePlaceholders = excludeTables.length > 0 ? excludeTables.map((_, i) => `$${i + 2}`).join(", ") : "''::text";
|
|
2234
|
+
const result = await pool.query(
|
|
2235
|
+
`SELECT table_name
|
|
2236
|
+
FROM information_schema.tables
|
|
2237
|
+
WHERE table_schema = $1
|
|
2238
|
+
AND table_type = 'BASE TABLE'
|
|
2239
|
+
AND table_name NOT IN (${excludePlaceholders})
|
|
2240
|
+
ORDER BY table_name`,
|
|
2241
|
+
[schemaName, ...excludeTables]
|
|
2242
|
+
);
|
|
2243
|
+
return result.rows.map((r) => r.table_name);
|
|
2244
|
+
}
|
|
2245
|
+
async function getColumns(pool, schemaName, tableName) {
|
|
2246
|
+
const result = await pool.query(
|
|
2247
|
+
`SELECT
|
|
2248
|
+
column_name,
|
|
2249
|
+
data_type,
|
|
2250
|
+
udt_name,
|
|
2251
|
+
is_nullable,
|
|
2252
|
+
column_default,
|
|
2253
|
+
character_maximum_length,
|
|
2254
|
+
numeric_precision,
|
|
2255
|
+
numeric_scale
|
|
2256
|
+
FROM information_schema.columns
|
|
2257
|
+
WHERE table_schema = $1 AND table_name = $2
|
|
2258
|
+
ORDER BY ordinal_position`,
|
|
2259
|
+
[schemaName, tableName]
|
|
2260
|
+
);
|
|
2261
|
+
return result.rows.map((row) => ({
|
|
2262
|
+
columnName: row.column_name,
|
|
2263
|
+
dataType: row.data_type,
|
|
2264
|
+
udtName: row.udt_name,
|
|
2265
|
+
isNullable: row.is_nullable === "YES",
|
|
2266
|
+
columnDefault: row.column_default,
|
|
2267
|
+
characterMaximumLength: row.character_maximum_length,
|
|
2268
|
+
numericPrecision: row.numeric_precision,
|
|
2269
|
+
numericScale: row.numeric_scale
|
|
2270
|
+
}));
|
|
2271
|
+
}
|
|
2272
|
+
async function generateTableDdl(pool, schemaName, tableName) {
|
|
2273
|
+
const columns = await getColumns(pool, schemaName, tableName);
|
|
2274
|
+
const columnDefs = columns.map((col) => {
|
|
2275
|
+
let type = col.udtName;
|
|
2276
|
+
if (col.dataType === "character varying" && col.characterMaximumLength) {
|
|
2277
|
+
type = `varchar(${col.characterMaximumLength})`;
|
|
2278
|
+
} else if (col.dataType === "character" && col.characterMaximumLength) {
|
|
2279
|
+
type = `char(${col.characterMaximumLength})`;
|
|
2280
|
+
} else if (col.dataType === "numeric" && col.numericPrecision) {
|
|
2281
|
+
type = `numeric(${col.numericPrecision}${col.numericScale ? `, ${col.numericScale}` : ""})`;
|
|
2282
|
+
} else if (col.dataType === "ARRAY") {
|
|
2283
|
+
type = col.udtName.replace(/^_/, "") + "[]";
|
|
2284
|
+
}
|
|
2285
|
+
let definition = `"${col.columnName}" ${type}`;
|
|
2286
|
+
if (!col.isNullable) {
|
|
2287
|
+
definition += " NOT NULL";
|
|
2288
|
+
}
|
|
2289
|
+
if (col.columnDefault) {
|
|
2290
|
+
const defaultValue = col.columnDefault.replace(
|
|
2291
|
+
new RegExp(`"?${schemaName}"?\\.`, "g"),
|
|
2292
|
+
""
|
|
2293
|
+
);
|
|
2294
|
+
definition += ` DEFAULT ${defaultValue}`;
|
|
2295
|
+
}
|
|
2296
|
+
return definition;
|
|
2297
|
+
});
|
|
2298
|
+
return `CREATE TABLE IF NOT EXISTS "${tableName}" (
|
|
2299
|
+
${columnDefs.join(",\n ")}
|
|
2300
|
+
)`;
|
|
2301
|
+
}
|
|
2302
|
+
async function generateIndexDdls(pool, sourceSchema, targetSchema, tableName) {
|
|
2303
|
+
const result = await pool.query(
|
|
2304
|
+
`SELECT indexname, indexdef
|
|
2305
|
+
FROM pg_indexes
|
|
2306
|
+
WHERE schemaname = $1 AND tablename = $2
|
|
2307
|
+
AND indexname NOT LIKE '%_pkey'`,
|
|
2308
|
+
[sourceSchema, tableName]
|
|
2309
|
+
);
|
|
2310
|
+
return result.rows.map(
|
|
2311
|
+
(row) => (
|
|
2312
|
+
// Replace source schema with target schema
|
|
2313
|
+
row.indexdef.replace(new RegExp(`ON "${sourceSchema}"\\."`, "g"), `ON "${targetSchema}"."`).replace(new RegExp(`"${sourceSchema}"\\."`, "g"), `"${targetSchema}"."`)
|
|
2314
|
+
)
|
|
2315
|
+
);
|
|
2316
|
+
}
|
|
2317
|
+
async function generatePrimaryKeyDdl(pool, schemaName, tableName) {
|
|
2318
|
+
const result = await pool.query(
|
|
2319
|
+
`SELECT
|
|
2320
|
+
tc.constraint_name,
|
|
2321
|
+
kcu.column_name
|
|
2322
|
+
FROM information_schema.table_constraints tc
|
|
2323
|
+
JOIN information_schema.key_column_usage kcu
|
|
2324
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
2325
|
+
AND tc.table_schema = kcu.table_schema
|
|
2326
|
+
WHERE tc.table_schema = $1
|
|
2327
|
+
AND tc.table_name = $2
|
|
2328
|
+
AND tc.constraint_type = 'PRIMARY KEY'
|
|
2329
|
+
ORDER BY kcu.ordinal_position`,
|
|
2330
|
+
[schemaName, tableName]
|
|
2331
|
+
);
|
|
2332
|
+
if (result.rows.length === 0) return null;
|
|
2333
|
+
const columns = result.rows.map((r) => `"${r.column_name}"`).join(", ");
|
|
2334
|
+
const constraintName = result.rows[0].constraint_name;
|
|
2335
|
+
return `ALTER TABLE "${tableName}" ADD CONSTRAINT "${constraintName}" PRIMARY KEY (${columns})`;
|
|
2336
|
+
}
|
|
2337
|
+
async function generateForeignKeyDdls(pool, sourceSchema, targetSchema, tableName) {
|
|
2338
|
+
const result = await pool.query(
|
|
2339
|
+
`SELECT
|
|
2340
|
+
tc.constraint_name,
|
|
2341
|
+
kcu.column_name,
|
|
2342
|
+
ccu.table_name as foreign_table_name,
|
|
2343
|
+
ccu.column_name as foreign_column_name,
|
|
2344
|
+
rc.update_rule,
|
|
2345
|
+
rc.delete_rule
|
|
2346
|
+
FROM information_schema.table_constraints tc
|
|
2347
|
+
JOIN information_schema.key_column_usage kcu
|
|
2348
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
2349
|
+
AND tc.table_schema = kcu.table_schema
|
|
2350
|
+
JOIN information_schema.constraint_column_usage ccu
|
|
2351
|
+
ON tc.constraint_name = ccu.constraint_name
|
|
2352
|
+
AND tc.table_schema = ccu.table_schema
|
|
2353
|
+
JOIN information_schema.referential_constraints rc
|
|
2354
|
+
ON tc.constraint_name = rc.constraint_name
|
|
2355
|
+
AND tc.table_schema = rc.constraint_schema
|
|
2356
|
+
WHERE tc.table_schema = $1
|
|
2357
|
+
AND tc.table_name = $2
|
|
2358
|
+
AND tc.constraint_type = 'FOREIGN KEY'
|
|
2359
|
+
ORDER BY tc.constraint_name, kcu.ordinal_position`,
|
|
2360
|
+
[sourceSchema, tableName]
|
|
2361
|
+
);
|
|
2362
|
+
const fkMap = /* @__PURE__ */ new Map();
|
|
2363
|
+
for (const row of result.rows) {
|
|
2364
|
+
const existing = fkMap.get(row.constraint_name);
|
|
2365
|
+
if (existing) {
|
|
2366
|
+
existing.columns.push(row.column_name);
|
|
2367
|
+
existing.foreignColumns.push(row.foreign_column_name);
|
|
2368
|
+
} else {
|
|
2369
|
+
fkMap.set(row.constraint_name, {
|
|
2370
|
+
columns: [row.column_name],
|
|
2371
|
+
foreignTable: row.foreign_table_name,
|
|
2372
|
+
foreignColumns: [row.foreign_column_name],
|
|
2373
|
+
updateRule: row.update_rule,
|
|
2374
|
+
deleteRule: row.delete_rule
|
|
2375
|
+
});
|
|
2376
|
+
}
|
|
2377
|
+
}
|
|
2378
|
+
return Array.from(fkMap.entries()).map(([name, fk]) => {
|
|
2379
|
+
const columns = fk.columns.map((c) => `"${c}"`).join(", ");
|
|
2380
|
+
const foreignColumns = fk.foreignColumns.map((c) => `"${c}"`).join(", ");
|
|
2381
|
+
let ddl = `ALTER TABLE "${targetSchema}"."${tableName}" `;
|
|
2382
|
+
ddl += `ADD CONSTRAINT "${name}" FOREIGN KEY (${columns}) `;
|
|
2383
|
+
ddl += `REFERENCES "${targetSchema}"."${fk.foreignTable}" (${foreignColumns})`;
|
|
2384
|
+
if (fk.updateRule !== "NO ACTION") {
|
|
2385
|
+
ddl += ` ON UPDATE ${fk.updateRule}`;
|
|
2386
|
+
}
|
|
2387
|
+
if (fk.deleteRule !== "NO ACTION") {
|
|
2388
|
+
ddl += ` ON DELETE ${fk.deleteRule}`;
|
|
2389
|
+
}
|
|
2390
|
+
return ddl;
|
|
2391
|
+
});
|
|
2392
|
+
}
|
|
2393
|
+
async function generateUniqueDdls(pool, schemaName, tableName) {
|
|
2394
|
+
const result = await pool.query(
|
|
2395
|
+
`SELECT
|
|
2396
|
+
tc.constraint_name,
|
|
2397
|
+
kcu.column_name
|
|
2398
|
+
FROM information_schema.table_constraints tc
|
|
2399
|
+
JOIN information_schema.key_column_usage kcu
|
|
2400
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
2401
|
+
AND tc.table_schema = kcu.table_schema
|
|
2402
|
+
WHERE tc.table_schema = $1
|
|
2403
|
+
AND tc.table_name = $2
|
|
2404
|
+
AND tc.constraint_type = 'UNIQUE'
|
|
2405
|
+
ORDER BY tc.constraint_name, kcu.ordinal_position`,
|
|
2406
|
+
[schemaName, tableName]
|
|
2407
|
+
);
|
|
2408
|
+
const uniqueMap = /* @__PURE__ */ new Map();
|
|
2409
|
+
for (const row of result.rows) {
|
|
2410
|
+
const existing = uniqueMap.get(row.constraint_name);
|
|
2411
|
+
if (existing) {
|
|
2412
|
+
existing.push(row.column_name);
|
|
2413
|
+
} else {
|
|
2414
|
+
uniqueMap.set(row.constraint_name, [row.column_name]);
|
|
2415
|
+
}
|
|
2416
|
+
}
|
|
2417
|
+
return Array.from(uniqueMap.entries()).map(([name, columns]) => {
|
|
2418
|
+
const cols = columns.map((c) => `"${c}"`).join(", ");
|
|
2419
|
+
return `ALTER TABLE "${tableName}" ADD CONSTRAINT "${name}" UNIQUE (${cols})`;
|
|
2420
|
+
});
|
|
2421
|
+
}
|
|
2422
|
+
async function generateCheckDdls(pool, schemaName, tableName) {
|
|
2423
|
+
const result = await pool.query(
|
|
2424
|
+
`SELECT
|
|
2425
|
+
tc.constraint_name,
|
|
2426
|
+
cc.check_clause
|
|
2427
|
+
FROM information_schema.table_constraints tc
|
|
2428
|
+
JOIN information_schema.check_constraints cc
|
|
2429
|
+
ON tc.constraint_name = cc.constraint_name
|
|
2430
|
+
AND tc.constraint_schema = cc.constraint_schema
|
|
2431
|
+
WHERE tc.table_schema = $1
|
|
2432
|
+
AND tc.table_name = $2
|
|
2433
|
+
AND tc.constraint_type = 'CHECK'
|
|
2434
|
+
AND tc.constraint_name NOT LIKE '%_not_null'`,
|
|
2435
|
+
[schemaName, tableName]
|
|
2436
|
+
);
|
|
2437
|
+
return result.rows.map(
|
|
2438
|
+
(row) => `ALTER TABLE "${tableName}" ADD CONSTRAINT "${row.constraint_name}" CHECK (${row.check_clause})`
|
|
2439
|
+
);
|
|
2440
|
+
}
|
|
2441
|
+
async function getRowCount(pool, schemaName, tableName) {
|
|
2442
|
+
const result = await pool.query(
|
|
2443
|
+
`SELECT count(*) FROM "${schemaName}"."${tableName}"`
|
|
2444
|
+
);
|
|
2445
|
+
return parseInt(result.rows[0].count, 10);
|
|
2446
|
+
}
|
|
2447
|
+
async function generateTableCloneInfo(pool, sourceSchema, targetSchema, tableName) {
|
|
2448
|
+
const [createDdl, indexDdls, pkDdl, uniqueDdls, checkDdls, fkDdls, rowCount] = await Promise.all([
|
|
2449
|
+
generateTableDdl(pool, sourceSchema, tableName),
|
|
2450
|
+
generateIndexDdls(pool, sourceSchema, targetSchema, tableName),
|
|
2451
|
+
generatePrimaryKeyDdl(pool, sourceSchema, tableName),
|
|
2452
|
+
generateUniqueDdls(pool, sourceSchema, tableName),
|
|
2453
|
+
generateCheckDdls(pool, sourceSchema, tableName),
|
|
2454
|
+
generateForeignKeyDdls(pool, sourceSchema, targetSchema, tableName),
|
|
2455
|
+
getRowCount(pool, sourceSchema, tableName)
|
|
2456
|
+
]);
|
|
2457
|
+
return {
|
|
2458
|
+
name: tableName,
|
|
2459
|
+
createDdl,
|
|
2460
|
+
indexDdls,
|
|
2461
|
+
constraintDdls: [
|
|
2462
|
+
...pkDdl ? [pkDdl] : [],
|
|
2463
|
+
...uniqueDdls,
|
|
2464
|
+
...checkDdls,
|
|
2465
|
+
...fkDdls
|
|
2466
|
+
],
|
|
2467
|
+
rowCount
|
|
2468
|
+
};
|
|
2469
|
+
}
|
|
2470
|
+
|
|
2471
|
+
// src/migrator/clone/data-copier.ts
|
|
2472
|
+
async function getTableColumns(pool, schemaName, tableName) {
|
|
2473
|
+
const result = await pool.query(
|
|
2474
|
+
`SELECT column_name
|
|
2475
|
+
FROM information_schema.columns
|
|
2476
|
+
WHERE table_schema = $1 AND table_name = $2
|
|
2477
|
+
ORDER BY ordinal_position`,
|
|
2478
|
+
[schemaName, tableName]
|
|
2479
|
+
);
|
|
2480
|
+
return result.rows.map((r) => r.column_name);
|
|
2481
|
+
}
|
|
2482
|
+
function formatAnonymizeValue(value) {
|
|
2483
|
+
if (value === null) {
|
|
2484
|
+
return "NULL";
|
|
2485
|
+
}
|
|
2486
|
+
if (typeof value === "string") {
|
|
2487
|
+
return `'${value.replace(/'/g, "''")}'`;
|
|
2488
|
+
}
|
|
2489
|
+
if (typeof value === "boolean") {
|
|
2490
|
+
return value ? "TRUE" : "FALSE";
|
|
2491
|
+
}
|
|
2492
|
+
return String(value);
|
|
2493
|
+
}
|
|
2494
|
+
async function copyTableData(pool, sourceSchema, targetSchema, tableName, anonymizeRules) {
|
|
2495
|
+
const columns = await getTableColumns(pool, sourceSchema, tableName);
|
|
2496
|
+
if (columns.length === 0) {
|
|
2497
|
+
return 0;
|
|
2498
|
+
}
|
|
2499
|
+
const tableRules = anonymizeRules?.[tableName] ?? {};
|
|
2500
|
+
const selectColumns = columns.map((col) => {
|
|
2501
|
+
if (col in tableRules) {
|
|
2502
|
+
const value = tableRules[col];
|
|
2503
|
+
return `${formatAnonymizeValue(value)} as "${col}"`;
|
|
2504
|
+
}
|
|
2505
|
+
return `"${col}"`;
|
|
2506
|
+
});
|
|
2507
|
+
const insertColumns = columns.map((c) => `"${c}"`).join(", ");
|
|
2508
|
+
const selectExpr = selectColumns.join(", ");
|
|
2509
|
+
const result = await pool.query(
|
|
2510
|
+
`INSERT INTO "${targetSchema}"."${tableName}" (${insertColumns})
|
|
2511
|
+
SELECT ${selectExpr}
|
|
2512
|
+
FROM "${sourceSchema}"."${tableName}"`
|
|
2513
|
+
);
|
|
2514
|
+
return result.rowCount ?? 0;
|
|
2515
|
+
}
|
|
2516
|
+
async function getTablesInDependencyOrder(pool, schemaName, tables) {
|
|
2517
|
+
const result = await pool.query(
|
|
2518
|
+
`SELECT DISTINCT
|
|
2519
|
+
tc.table_name,
|
|
2520
|
+
ccu.table_name as foreign_table_name
|
|
2521
|
+
FROM information_schema.table_constraints tc
|
|
2522
|
+
JOIN information_schema.constraint_column_usage ccu
|
|
2523
|
+
ON tc.constraint_name = ccu.constraint_name
|
|
2524
|
+
AND tc.table_schema = ccu.table_schema
|
|
2525
|
+
WHERE tc.table_schema = $1
|
|
2526
|
+
AND tc.constraint_type = 'FOREIGN KEY'
|
|
2527
|
+
AND tc.table_name != ccu.table_name`,
|
|
2528
|
+
[schemaName]
|
|
2529
|
+
);
|
|
2530
|
+
const dependencies = /* @__PURE__ */ new Map();
|
|
2531
|
+
const tableSet = new Set(tables);
|
|
2532
|
+
for (const table of tables) {
|
|
2533
|
+
dependencies.set(table, /* @__PURE__ */ new Set());
|
|
2534
|
+
}
|
|
2535
|
+
for (const row of result.rows) {
|
|
2536
|
+
if (tableSet.has(row.table_name) && tableSet.has(row.foreign_table_name)) {
|
|
2537
|
+
dependencies.get(row.table_name).add(row.foreign_table_name);
|
|
2538
|
+
}
|
|
2539
|
+
}
|
|
2540
|
+
const sorted = [];
|
|
2541
|
+
const inDegree = /* @__PURE__ */ new Map();
|
|
2542
|
+
const queue = [];
|
|
2543
|
+
for (const table of tables) {
|
|
2544
|
+
inDegree.set(table, 0);
|
|
2545
|
+
}
|
|
2546
|
+
for (const [table, deps] of dependencies) {
|
|
2547
|
+
for (const dep of deps) {
|
|
2548
|
+
inDegree.set(dep, (inDegree.get(dep) ?? 0) + 1);
|
|
2549
|
+
}
|
|
2550
|
+
}
|
|
2551
|
+
for (const [table, degree] of inDegree) {
|
|
2552
|
+
if (degree === 0) {
|
|
2553
|
+
queue.push(table);
|
|
2554
|
+
}
|
|
2555
|
+
}
|
|
2556
|
+
while (queue.length > 0) {
|
|
2557
|
+
const table = queue.shift();
|
|
2558
|
+
sorted.push(table);
|
|
2559
|
+
for (const [otherTable, deps] of dependencies) {
|
|
2560
|
+
if (deps.has(table)) {
|
|
2561
|
+
deps.delete(table);
|
|
2562
|
+
const newDegree = (inDegree.get(otherTable) ?? 0) - 1;
|
|
2563
|
+
inDegree.set(otherTable, newDegree);
|
|
2564
|
+
if (newDegree === 0) {
|
|
2565
|
+
queue.push(otherTable);
|
|
2566
|
+
}
|
|
2567
|
+
}
|
|
2568
|
+
}
|
|
2569
|
+
}
|
|
2570
|
+
const remaining = tables.filter((t) => !sorted.includes(t));
|
|
2571
|
+
return [...sorted, ...remaining];
|
|
2572
|
+
}
|
|
2573
|
+
async function copyAllData(pool, sourceSchema, targetSchema, tables, anonymizeRules, onProgress) {
|
|
2574
|
+
let totalRows = 0;
|
|
2575
|
+
const orderedTables = await getTablesInDependencyOrder(pool, sourceSchema, tables);
|
|
2576
|
+
await pool.query("SET session_replication_role = replica");
|
|
2577
|
+
try {
|
|
2578
|
+
for (let i = 0; i < orderedTables.length; i++) {
|
|
2579
|
+
const table = orderedTables[i];
|
|
2580
|
+
onProgress?.("copying_data", {
|
|
2581
|
+
table,
|
|
2582
|
+
progress: i + 1,
|
|
2583
|
+
total: orderedTables.length
|
|
2584
|
+
});
|
|
2585
|
+
const rows = await copyTableData(pool, sourceSchema, targetSchema, table, anonymizeRules);
|
|
2586
|
+
totalRows += rows;
|
|
2587
|
+
}
|
|
2588
|
+
} finally {
|
|
2589
|
+
await pool.query("SET session_replication_role = DEFAULT");
|
|
2590
|
+
}
|
|
2591
|
+
return totalRows;
|
|
2592
|
+
}
|
|
2593
|
+
|
|
2594
|
+
// src/migrator/clone/cloner.ts
|
|
2595
|
+
var DEFAULT_MIGRATIONS_TABLE3 = "__drizzle_migrations";
|
|
2596
|
+
var Cloner = class {
|
|
2597
|
+
constructor(config, deps) {
|
|
2598
|
+
this.deps = deps;
|
|
2599
|
+
this.migrationsTable = config.migrationsTable ?? DEFAULT_MIGRATIONS_TABLE3;
|
|
2600
|
+
}
|
|
2601
|
+
migrationsTable;
|
|
2602
|
+
/**
|
|
2603
|
+
* Clone a tenant to another
|
|
2604
|
+
*
|
|
2605
|
+
* @param sourceTenantId - Source tenant ID
|
|
2606
|
+
* @param targetTenantId - Target tenant ID
|
|
2607
|
+
* @param options - Clone options
|
|
2608
|
+
* @returns Clone result
|
|
2609
|
+
*/
|
|
2610
|
+
async cloneTenant(sourceTenantId, targetTenantId, options = {}) {
|
|
2611
|
+
const startTime = Date.now();
|
|
2612
|
+
const {
|
|
2613
|
+
includeData = false,
|
|
2614
|
+
anonymize,
|
|
2615
|
+
excludeTables = [],
|
|
2616
|
+
onProgress
|
|
2617
|
+
} = options;
|
|
2618
|
+
const sourceSchema = this.deps.schemaNameTemplate(sourceTenantId);
|
|
2619
|
+
const targetSchema = this.deps.schemaNameTemplate(targetTenantId);
|
|
2620
|
+
const allExcludes = [this.migrationsTable, ...excludeTables];
|
|
2621
|
+
let sourcePool = null;
|
|
2622
|
+
let rootPool = null;
|
|
2623
|
+
try {
|
|
2624
|
+
onProgress?.("starting");
|
|
2625
|
+
const sourceExists = await this.deps.schemaExists(sourceTenantId);
|
|
2626
|
+
if (!sourceExists) {
|
|
2627
|
+
return this.createErrorResult(
|
|
2628
|
+
sourceTenantId,
|
|
2629
|
+
targetTenantId,
|
|
2630
|
+
targetSchema,
|
|
2631
|
+
`Source tenant "${sourceTenantId}" does not exist`,
|
|
2632
|
+
startTime
|
|
2633
|
+
);
|
|
2634
|
+
}
|
|
2635
|
+
const targetExists = await this.deps.schemaExists(targetTenantId);
|
|
2636
|
+
if (targetExists) {
|
|
2637
|
+
return this.createErrorResult(
|
|
2638
|
+
sourceTenantId,
|
|
2639
|
+
targetTenantId,
|
|
2640
|
+
targetSchema,
|
|
2641
|
+
`Target tenant "${targetTenantId}" already exists`,
|
|
2642
|
+
startTime
|
|
2643
|
+
);
|
|
2644
|
+
}
|
|
2645
|
+
onProgress?.("introspecting");
|
|
2646
|
+
sourcePool = await this.deps.createPool(sourceSchema);
|
|
2647
|
+
const tables = await listTables(sourcePool, sourceSchema, allExcludes);
|
|
2648
|
+
if (tables.length === 0) {
|
|
2649
|
+
onProgress?.("creating_schema");
|
|
2650
|
+
await this.deps.createSchema(targetTenantId);
|
|
2651
|
+
onProgress?.("completed");
|
|
2652
|
+
return {
|
|
2653
|
+
sourceTenant: sourceTenantId,
|
|
2654
|
+
targetTenant: targetTenantId,
|
|
2655
|
+
targetSchema,
|
|
2656
|
+
success: true,
|
|
2657
|
+
tables: [],
|
|
2658
|
+
durationMs: Date.now() - startTime
|
|
2659
|
+
};
|
|
2660
|
+
}
|
|
2661
|
+
const tableInfos = await Promise.all(
|
|
2662
|
+
tables.map((t) => generateTableCloneInfo(sourcePool, sourceSchema, targetSchema, t))
|
|
2663
|
+
);
|
|
2664
|
+
await sourcePool.end();
|
|
2665
|
+
sourcePool = null;
|
|
2666
|
+
onProgress?.("creating_schema");
|
|
2667
|
+
await this.deps.createSchema(targetTenantId);
|
|
2668
|
+
rootPool = await this.deps.createRootPool();
|
|
2669
|
+
onProgress?.("creating_tables");
|
|
2670
|
+
for (const info of tableInfos) {
|
|
2671
|
+
await rootPool.query(`SET search_path TO "${targetSchema}"; ${info.createDdl}`);
|
|
2672
|
+
}
|
|
2673
|
+
onProgress?.("creating_constraints");
|
|
2674
|
+
for (const info of tableInfos) {
|
|
2675
|
+
for (const constraint of info.constraintDdls.filter((c) => !c.includes("FOREIGN KEY"))) {
|
|
2676
|
+
try {
|
|
2677
|
+
await rootPool.query(`SET search_path TO "${targetSchema}"; ${constraint}`);
|
|
2678
|
+
} catch (error) {
|
|
2679
|
+
}
|
|
2680
|
+
}
|
|
2681
|
+
}
|
|
2682
|
+
onProgress?.("creating_indexes");
|
|
2683
|
+
for (const info of tableInfos) {
|
|
2684
|
+
for (const index of info.indexDdls) {
|
|
2685
|
+
try {
|
|
2686
|
+
await rootPool.query(index);
|
|
2687
|
+
} catch (error) {
|
|
2688
|
+
}
|
|
2689
|
+
}
|
|
2690
|
+
}
|
|
2691
|
+
let rowsCopied = 0;
|
|
2692
|
+
if (includeData) {
|
|
2693
|
+
onProgress?.("copying_data");
|
|
2694
|
+
rowsCopied = await copyAllData(
|
|
2695
|
+
rootPool,
|
|
2696
|
+
sourceSchema,
|
|
2697
|
+
targetSchema,
|
|
2698
|
+
tables,
|
|
2699
|
+
anonymize?.enabled ? anonymize.rules : void 0,
|
|
2700
|
+
onProgress
|
|
2701
|
+
);
|
|
2702
|
+
}
|
|
2703
|
+
for (const info of tableInfos) {
|
|
2704
|
+
for (const fk of info.constraintDdls.filter((c) => c.includes("FOREIGN KEY"))) {
|
|
2705
|
+
try {
|
|
2706
|
+
await rootPool.query(fk);
|
|
2707
|
+
} catch (error) {
|
|
2708
|
+
}
|
|
2709
|
+
}
|
|
2710
|
+
}
|
|
2711
|
+
onProgress?.("completed");
|
|
2712
|
+
const result = {
|
|
2713
|
+
sourceTenant: sourceTenantId,
|
|
2714
|
+
targetTenant: targetTenantId,
|
|
2715
|
+
targetSchema,
|
|
2716
|
+
success: true,
|
|
2717
|
+
tables,
|
|
2718
|
+
durationMs: Date.now() - startTime
|
|
2719
|
+
};
|
|
2720
|
+
if (includeData) {
|
|
2721
|
+
result.rowsCopied = rowsCopied;
|
|
2722
|
+
}
|
|
2723
|
+
return result;
|
|
2724
|
+
} catch (error) {
|
|
2725
|
+
options.onError?.(error);
|
|
2726
|
+
onProgress?.("failed");
|
|
2727
|
+
return this.createErrorResult(
|
|
2728
|
+
sourceTenantId,
|
|
2729
|
+
targetTenantId,
|
|
2730
|
+
targetSchema,
|
|
2731
|
+
error.message,
|
|
2732
|
+
startTime
|
|
2733
|
+
);
|
|
2734
|
+
} finally {
|
|
2735
|
+
if (sourcePool) {
|
|
2736
|
+
await sourcePool.end().catch(() => {
|
|
2737
|
+
});
|
|
2738
|
+
}
|
|
2739
|
+
if (rootPool) {
|
|
2740
|
+
await rootPool.end().catch(() => {
|
|
2741
|
+
});
|
|
2742
|
+
}
|
|
2743
|
+
}
|
|
2744
|
+
}
|
|
2745
|
+
createErrorResult(source, target, schema, error, startTime) {
|
|
2746
|
+
return {
|
|
2747
|
+
sourceTenant: source,
|
|
2748
|
+
targetTenant: target,
|
|
2749
|
+
targetSchema: schema,
|
|
2750
|
+
success: false,
|
|
2751
|
+
error,
|
|
2752
|
+
tables: [],
|
|
2753
|
+
durationMs: Date.now() - startTime
|
|
2754
|
+
};
|
|
2755
|
+
}
|
|
2756
|
+
};
|
|
2757
|
+
function createCloner(config, dependencies) {
|
|
2758
|
+
return new Cloner(config, dependencies);
|
|
2759
|
+
}
|
|
2760
|
+
|
|
2761
|
+
// src/migrator/migrator.ts
|
|
2762
|
+
var DEFAULT_MIGRATIONS_TABLE4 = "__drizzle_migrations";
|
|
2763
|
+
var Migrator = class {
|
|
2764
|
+
constructor(tenantConfig, migratorConfig) {
|
|
2765
|
+
this.migratorConfig = migratorConfig;
|
|
2766
|
+
this.migrationsTable = migratorConfig.migrationsTable ?? DEFAULT_MIGRATIONS_TABLE4;
|
|
2767
|
+
this.schemaManager = new SchemaManager(tenantConfig, this.migrationsTable);
|
|
2768
|
+
this.driftDetector = new DriftDetector(tenantConfig, this.schemaManager, {
|
|
2769
|
+
migrationsTable: this.migrationsTable,
|
|
2770
|
+
tenantDiscovery: migratorConfig.tenantDiscovery
|
|
2771
|
+
});
|
|
2772
|
+
this.seeder = new Seeder(
|
|
2773
|
+
{ tenantDiscovery: migratorConfig.tenantDiscovery },
|
|
2774
|
+
{
|
|
2775
|
+
createPool: this.schemaManager.createPool.bind(this.schemaManager),
|
|
2776
|
+
schemaNameTemplate: tenantConfig.isolation.schemaNameTemplate,
|
|
2777
|
+
tenantSchema: tenantConfig.schemas.tenant
|
|
2778
|
+
}
|
|
2779
|
+
);
|
|
2780
|
+
this.syncManager = new SyncManager(
|
|
2781
|
+
{
|
|
2782
|
+
tenantDiscovery: migratorConfig.tenantDiscovery,
|
|
2783
|
+
migrationsFolder: migratorConfig.migrationsFolder,
|
|
2784
|
+
migrationsTable: this.migrationsTable
|
|
2785
|
+
},
|
|
2786
|
+
{
|
|
2787
|
+
createPool: this.schemaManager.createPool.bind(this.schemaManager),
|
|
2788
|
+
schemaNameTemplate: tenantConfig.isolation.schemaNameTemplate,
|
|
2789
|
+
migrationsTableExists: this.schemaManager.migrationsTableExists.bind(this.schemaManager),
|
|
2790
|
+
ensureMigrationsTable: this.schemaManager.ensureMigrationsTable.bind(this.schemaManager),
|
|
2791
|
+
getOrDetectFormat: this.getOrDetectFormat.bind(this),
|
|
2792
|
+
loadMigrations: this.loadMigrations.bind(this)
|
|
2793
|
+
}
|
|
2794
|
+
);
|
|
2795
|
+
this.migrationExecutor = new MigrationExecutor(
|
|
2796
|
+
{ hooks: migratorConfig.hooks },
|
|
2797
|
+
{
|
|
2798
|
+
createPool: this.schemaManager.createPool.bind(this.schemaManager),
|
|
2799
|
+
schemaNameTemplate: tenantConfig.isolation.schemaNameTemplate,
|
|
2800
|
+
migrationsTableExists: this.schemaManager.migrationsTableExists.bind(this.schemaManager),
|
|
2801
|
+
ensureMigrationsTable: this.schemaManager.ensureMigrationsTable.bind(this.schemaManager),
|
|
2802
|
+
getOrDetectFormat: this.getOrDetectFormat.bind(this),
|
|
2803
|
+
loadMigrations: this.loadMigrations.bind(this)
|
|
2804
|
+
}
|
|
2805
|
+
);
|
|
2806
|
+
this.batchExecutor = new BatchExecutor(
|
|
2807
|
+
{ tenantDiscovery: migratorConfig.tenantDiscovery },
|
|
2808
|
+
this.migrationExecutor,
|
|
2809
|
+
this.loadMigrations.bind(this)
|
|
2810
|
+
);
|
|
2811
|
+
this.cloner = new Cloner(
|
|
2812
|
+
{ migrationsTable: this.migrationsTable },
|
|
2813
|
+
{
|
|
2814
|
+
createPool: this.schemaManager.createPool.bind(this.schemaManager),
|
|
2815
|
+
createRootPool: this.schemaManager.createRootPool.bind(this.schemaManager),
|
|
2816
|
+
schemaNameTemplate: tenantConfig.isolation.schemaNameTemplate,
|
|
2817
|
+
schemaExists: this.schemaManager.schemaExists.bind(this.schemaManager),
|
|
2818
|
+
createSchema: this.schemaManager.createSchema.bind(this.schemaManager)
|
|
2819
|
+
}
|
|
2820
|
+
);
|
|
2821
|
+
}
|
|
2822
|
+
migrationsTable;
|
|
2823
|
+
schemaManager;
|
|
2824
|
+
driftDetector;
|
|
2825
|
+
seeder;
|
|
2826
|
+
syncManager;
|
|
2827
|
+
migrationExecutor;
|
|
2828
|
+
batchExecutor;
|
|
2829
|
+
cloner;
|
|
2830
|
+
/**
|
|
2831
|
+
* Migrate all tenants in parallel
|
|
2832
|
+
*
|
|
2833
|
+
* Delegates to BatchExecutor for parallel migration operations.
|
|
2834
|
+
*/
|
|
2835
|
+
async migrateAll(options = {}) {
|
|
2836
|
+
return this.batchExecutor.migrateAll(options);
|
|
2837
|
+
}
|
|
2838
|
+
/**
|
|
2839
|
+
* Migrate a single tenant
|
|
2840
|
+
*
|
|
2841
|
+
* Delegates to MigrationExecutor for single tenant operations.
|
|
2842
|
+
*/
|
|
2843
|
+
async migrateTenant(tenantId, migrations, options = {}) {
|
|
2844
|
+
return this.migrationExecutor.migrateTenant(tenantId, migrations, options);
|
|
2845
|
+
}
|
|
2846
|
+
/**
|
|
2847
|
+
* Migrate specific tenants
|
|
2848
|
+
*
|
|
2849
|
+
* Delegates to BatchExecutor for parallel migration operations.
|
|
2850
|
+
*/
|
|
2851
|
+
async migrateTenants(tenantIds, options = {}) {
|
|
2852
|
+
return this.batchExecutor.migrateTenants(tenantIds, options);
|
|
2853
|
+
}
|
|
2854
|
+
/**
|
|
2855
|
+
* Get migration status for all tenants
|
|
2856
|
+
*
|
|
2857
|
+
* Delegates to BatchExecutor for status operations.
|
|
2858
|
+
*/
|
|
2859
|
+
async getStatus() {
|
|
2860
|
+
return this.batchExecutor.getStatus();
|
|
2861
|
+
}
|
|
2862
|
+
/**
|
|
2863
|
+
* Get migration status for a specific tenant
|
|
2864
|
+
*
|
|
2865
|
+
* Delegates to MigrationExecutor for single tenant operations.
|
|
2866
|
+
*/
|
|
2867
|
+
async getTenantStatus(tenantId, migrations) {
|
|
2868
|
+
return this.migrationExecutor.getTenantStatus(tenantId, migrations);
|
|
2869
|
+
}
|
|
2870
|
+
/**
|
|
2871
|
+
* Create a new tenant schema and optionally apply migrations
|
|
2872
|
+
*/
|
|
2873
|
+
async createTenant(tenantId, options = {}) {
|
|
2874
|
+
const { migrate = true } = options;
|
|
2875
|
+
await this.schemaManager.createSchema(tenantId);
|
|
2876
|
+
if (migrate) {
|
|
2877
|
+
await this.migrateTenant(tenantId);
|
|
2878
|
+
}
|
|
2879
|
+
}
|
|
2880
|
+
/**
|
|
2881
|
+
* Drop a tenant schema
|
|
2882
|
+
*/
|
|
2883
|
+
async dropTenant(tenantId, options = {}) {
|
|
2884
|
+
await this.schemaManager.dropSchema(tenantId, options);
|
|
2885
|
+
}
|
|
2886
|
+
/**
|
|
2887
|
+
* Check if a tenant schema exists
|
|
2888
|
+
*/
|
|
2889
|
+
async tenantExists(tenantId) {
|
|
2890
|
+
return this.schemaManager.schemaExists(tenantId);
|
|
2891
|
+
}
|
|
2892
|
+
/**
|
|
2893
|
+
* Clone a tenant to a new tenant
|
|
2894
|
+
*
|
|
2895
|
+
* By default, clones only schema structure. Use includeData to copy data.
|
|
2896
|
+
*
|
|
2897
|
+
* @example
|
|
2898
|
+
* ```typescript
|
|
2899
|
+
* // Schema-only clone
|
|
2900
|
+
* await migrator.cloneTenant('production', 'dev');
|
|
2901
|
+
*
|
|
2902
|
+
* // Clone with data
|
|
2903
|
+
* await migrator.cloneTenant('production', 'dev', { includeData: true });
|
|
2904
|
+
*
|
|
2905
|
+
* // Clone with anonymization
|
|
2906
|
+
* await migrator.cloneTenant('production', 'dev', {
|
|
2907
|
+
* includeData: true,
|
|
2908
|
+
* anonymize: {
|
|
2909
|
+
* enabled: true,
|
|
2910
|
+
* rules: {
|
|
2911
|
+
* users: { email: null, phone: null },
|
|
2912
|
+
* },
|
|
2913
|
+
* },
|
|
2914
|
+
* });
|
|
2915
|
+
* ```
|
|
2916
|
+
*/
|
|
2917
|
+
async cloneTenant(sourceTenantId, targetTenantId, options = {}) {
|
|
2918
|
+
return this.cloner.cloneTenant(sourceTenantId, targetTenantId, options);
|
|
2919
|
+
}
|
|
2920
|
+
/**
|
|
2921
|
+
* Mark migrations as applied without executing SQL
|
|
2922
|
+
* Useful for syncing tracking state with already-applied migrations
|
|
2923
|
+
*
|
|
2924
|
+
* Delegates to MigrationExecutor for single tenant operations.
|
|
2925
|
+
*/
|
|
2926
|
+
async markAsApplied(tenantId, options = {}) {
|
|
2927
|
+
return this.migrationExecutor.markAsApplied(tenantId, options);
|
|
2928
|
+
}
|
|
2929
|
+
/**
|
|
2930
|
+
* Mark migrations as applied for all tenants without executing SQL
|
|
2931
|
+
* Useful for syncing tracking state with already-applied migrations
|
|
2932
|
+
*
|
|
2933
|
+
* Delegates to BatchExecutor for parallel operations.
|
|
2934
|
+
*/
|
|
2935
|
+
async markAllAsApplied(options = {}) {
|
|
2936
|
+
return this.batchExecutor.markAllAsApplied(options);
|
|
2937
|
+
}
|
|
2938
|
+
// ============================================================================
|
|
2939
|
+
// Sync Methods (delegated to SyncManager)
|
|
2940
|
+
// ============================================================================
|
|
2941
|
+
/**
|
|
2942
|
+
* Get sync status for all tenants
|
|
2943
|
+
* Detects divergences between migrations on disk and tracking in database
|
|
2944
|
+
*/
|
|
2945
|
+
async getSyncStatus() {
|
|
2946
|
+
return this.syncManager.getSyncStatus();
|
|
2947
|
+
}
|
|
2948
|
+
/**
|
|
2949
|
+
* Get sync status for a specific tenant
|
|
2950
|
+
*/
|
|
2951
|
+
async getTenantSyncStatus(tenantId, migrations) {
|
|
2952
|
+
return this.syncManager.getTenantSyncStatus(tenantId, migrations);
|
|
2953
|
+
}
|
|
2954
|
+
/**
|
|
2955
|
+
* Mark missing migrations as applied for a tenant
|
|
2956
|
+
*/
|
|
2957
|
+
async markMissing(tenantId) {
|
|
2958
|
+
return this.syncManager.markMissing(tenantId);
|
|
2959
|
+
}
|
|
2960
|
+
/**
|
|
2961
|
+
* Mark missing migrations as applied for all tenants
|
|
2962
|
+
*/
|
|
2963
|
+
async markAllMissing(options = {}) {
|
|
2964
|
+
return this.syncManager.markAllMissing(options);
|
|
2965
|
+
}
|
|
653
2966
|
/**
|
|
654
2967
|
* Remove orphan migration records for a tenant
|
|
655
2968
|
*/
|
|
656
2969
|
async cleanOrphans(tenantId) {
|
|
657
|
-
|
|
658
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
659
|
-
const removedOrphans = [];
|
|
660
|
-
const pool = await this.createPool(schemaName);
|
|
661
|
-
try {
|
|
662
|
-
const syncStatus = await this.getTenantSyncStatus(tenantId);
|
|
663
|
-
if (syncStatus.error) {
|
|
664
|
-
return {
|
|
665
|
-
tenantId,
|
|
666
|
-
schemaName,
|
|
667
|
-
success: false,
|
|
668
|
-
markedMigrations: [],
|
|
669
|
-
removedOrphans: [],
|
|
670
|
-
error: syncStatus.error,
|
|
671
|
-
durationMs: Date.now() - startTime
|
|
672
|
-
};
|
|
673
|
-
}
|
|
674
|
-
if (syncStatus.orphans.length === 0) {
|
|
675
|
-
return {
|
|
676
|
-
tenantId,
|
|
677
|
-
schemaName,
|
|
678
|
-
success: true,
|
|
679
|
-
markedMigrations: [],
|
|
680
|
-
removedOrphans: [],
|
|
681
|
-
durationMs: Date.now() - startTime
|
|
682
|
-
};
|
|
683
|
-
}
|
|
684
|
-
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
685
|
-
const identifierColumn = format.columns.identifier;
|
|
686
|
-
for (const orphan of syncStatus.orphans) {
|
|
687
|
-
await pool.query(
|
|
688
|
-
`DELETE FROM "${schemaName}"."${format.tableName}" WHERE "${identifierColumn}" = $1`,
|
|
689
|
-
[orphan]
|
|
690
|
-
);
|
|
691
|
-
removedOrphans.push(orphan);
|
|
692
|
-
}
|
|
693
|
-
return {
|
|
694
|
-
tenantId,
|
|
695
|
-
schemaName,
|
|
696
|
-
success: true,
|
|
697
|
-
markedMigrations: [],
|
|
698
|
-
removedOrphans,
|
|
699
|
-
durationMs: Date.now() - startTime
|
|
700
|
-
};
|
|
701
|
-
} catch (error) {
|
|
702
|
-
return {
|
|
703
|
-
tenantId,
|
|
704
|
-
schemaName,
|
|
705
|
-
success: false,
|
|
706
|
-
markedMigrations: [],
|
|
707
|
-
removedOrphans,
|
|
708
|
-
error: error.message,
|
|
709
|
-
durationMs: Date.now() - startTime
|
|
710
|
-
};
|
|
711
|
-
} finally {
|
|
712
|
-
await pool.end();
|
|
713
|
-
}
|
|
2970
|
+
return this.syncManager.cleanOrphans(tenantId);
|
|
714
2971
|
}
|
|
715
2972
|
/**
|
|
716
2973
|
* Remove orphan migration records for all tenants
|
|
717
2974
|
*/
|
|
718
2975
|
async cleanAllOrphans(options = {}) {
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
2976
|
+
return this.syncManager.cleanAllOrphans(options);
|
|
2977
|
+
}
|
|
2978
|
+
// ============================================================================
|
|
2979
|
+
// Seeding Methods (delegated to Seeder)
|
|
2980
|
+
// ============================================================================
|
|
2981
|
+
/**
|
|
2982
|
+
* Seed a single tenant with initial data
|
|
2983
|
+
*
|
|
2984
|
+
* @example
|
|
2985
|
+
* ```typescript
|
|
2986
|
+
* const seed: SeedFunction = async (db, tenantId) => {
|
|
2987
|
+
* await db.insert(roles).values([
|
|
2988
|
+
* { name: 'admin', permissions: ['*'] },
|
|
2989
|
+
* { name: 'user', permissions: ['read'] },
|
|
2990
|
+
* ]);
|
|
2991
|
+
* };
|
|
2992
|
+
*
|
|
2993
|
+
* await migrator.seedTenant('tenant-123', seed);
|
|
2994
|
+
* ```
|
|
2995
|
+
*/
|
|
2996
|
+
async seedTenant(tenantId, seedFn) {
|
|
2997
|
+
return this.seeder.seedTenant(tenantId, seedFn);
|
|
2998
|
+
}
|
|
2999
|
+
/**
|
|
3000
|
+
* Seed all tenants with initial data in parallel
|
|
3001
|
+
*
|
|
3002
|
+
* @example
|
|
3003
|
+
* ```typescript
|
|
3004
|
+
* const seed: SeedFunction = async (db, tenantId) => {
|
|
3005
|
+
* await db.insert(roles).values([
|
|
3006
|
+
* { name: 'admin', permissions: ['*'] },
|
|
3007
|
+
* ]);
|
|
3008
|
+
* };
|
|
3009
|
+
*
|
|
3010
|
+
* await migrator.seedAll(seed, { concurrency: 10 });
|
|
3011
|
+
* ```
|
|
3012
|
+
*/
|
|
3013
|
+
async seedAll(seedFn, options = {}) {
|
|
3014
|
+
return this.seeder.seedAll(seedFn, options);
|
|
3015
|
+
}
|
|
3016
|
+
/**
|
|
3017
|
+
* Seed specific tenants with initial data
|
|
3018
|
+
*/
|
|
3019
|
+
async seedTenants(tenantIds, seedFn, options = {}) {
|
|
3020
|
+
return this.seeder.seedTenants(tenantIds, seedFn, options);
|
|
748
3021
|
}
|
|
749
3022
|
/**
|
|
750
3023
|
* Load migration files from the migrations folder
|
|
@@ -769,76 +3042,11 @@ var Migrator = class {
|
|
|
769
3042
|
}
|
|
770
3043
|
return migrations.sort((a, b) => a.timestamp - b.timestamp);
|
|
771
3044
|
}
|
|
772
|
-
/**
|
|
773
|
-
* Create a pool for a specific schema
|
|
774
|
-
*/
|
|
775
|
-
async createPool(schemaName) {
|
|
776
|
-
return new Pool({
|
|
777
|
-
connectionString: this.tenantConfig.connection.url,
|
|
778
|
-
...this.tenantConfig.connection.poolConfig,
|
|
779
|
-
options: `-c search_path="${schemaName}",public`
|
|
780
|
-
});
|
|
781
|
-
}
|
|
782
|
-
/**
|
|
783
|
-
* Ensure migrations table exists with the correct format
|
|
784
|
-
*/
|
|
785
|
-
async ensureMigrationsTable(pool, schemaName, format) {
|
|
786
|
-
const { identifier, timestamp, timestampType } = format.columns;
|
|
787
|
-
const identifierCol = identifier === "name" ? "name VARCHAR(255) NOT NULL UNIQUE" : "hash TEXT NOT NULL";
|
|
788
|
-
const timestampCol = timestampType === "bigint" ? `${timestamp} BIGINT NOT NULL` : `${timestamp} TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP`;
|
|
789
|
-
await pool.query(`
|
|
790
|
-
CREATE TABLE IF NOT EXISTS "${schemaName}"."${format.tableName}" (
|
|
791
|
-
id SERIAL PRIMARY KEY,
|
|
792
|
-
${identifierCol},
|
|
793
|
-
${timestampCol}
|
|
794
|
-
)
|
|
795
|
-
`);
|
|
796
|
-
}
|
|
797
|
-
/**
|
|
798
|
-
* Check if migrations table exists
|
|
799
|
-
*/
|
|
800
|
-
async migrationsTableExists(pool, schemaName) {
|
|
801
|
-
const result = await pool.query(
|
|
802
|
-
`SELECT 1 FROM information_schema.tables
|
|
803
|
-
WHERE table_schema = $1 AND table_name = $2`,
|
|
804
|
-
[schemaName, this.migrationsTable]
|
|
805
|
-
);
|
|
806
|
-
return result.rowCount !== null && result.rowCount > 0;
|
|
807
|
-
}
|
|
808
|
-
/**
|
|
809
|
-
* Get applied migrations for a schema
|
|
810
|
-
*/
|
|
811
|
-
async getAppliedMigrations(pool, schemaName, format) {
|
|
812
|
-
const identifierColumn = format.columns.identifier;
|
|
813
|
-
const timestampColumn = format.columns.timestamp;
|
|
814
|
-
const result = await pool.query(
|
|
815
|
-
`SELECT id, "${identifierColumn}" as identifier, "${timestampColumn}" as applied_at
|
|
816
|
-
FROM "${schemaName}"."${format.tableName}"
|
|
817
|
-
ORDER BY id`
|
|
818
|
-
);
|
|
819
|
-
return result.rows.map((row) => {
|
|
820
|
-
const appliedAt = format.columns.timestampType === "bigint" ? new Date(Number(row.applied_at)) : new Date(row.applied_at);
|
|
821
|
-
return {
|
|
822
|
-
id: row.id,
|
|
823
|
-
identifier: row.identifier,
|
|
824
|
-
// Set name or hash based on format
|
|
825
|
-
...format.columns.identifier === "name" ? { name: row.identifier } : { hash: row.identifier },
|
|
826
|
-
appliedAt
|
|
827
|
-
};
|
|
828
|
-
});
|
|
829
|
-
}
|
|
830
|
-
/**
|
|
831
|
-
* Check if a migration has been applied
|
|
832
|
-
*/
|
|
833
|
-
isMigrationApplied(migration, appliedIdentifiers, format) {
|
|
834
|
-
if (format.columns.identifier === "name") {
|
|
835
|
-
return appliedIdentifiers.has(migration.name);
|
|
836
|
-
}
|
|
837
|
-
return appliedIdentifiers.has(migration.hash) || appliedIdentifiers.has(migration.name);
|
|
838
|
-
}
|
|
839
3045
|
/**
|
|
840
3046
|
* Get or detect the format for a schema
|
|
841
3047
|
* Returns the configured format or auto-detects from existing table
|
|
3048
|
+
*
|
|
3049
|
+
* Note: This method is shared with SyncManager and MigrationExecutor via dependency injection.
|
|
842
3050
|
*/
|
|
843
3051
|
async getOrDetectFormat(pool, schemaName) {
|
|
844
3052
|
const configuredFormat = this.migratorConfig.tableFormat ?? "auto";
|
|
@@ -852,124 +3060,51 @@ var Migrator = class {
|
|
|
852
3060
|
const defaultFormat = this.migratorConfig.defaultFormat ?? "name";
|
|
853
3061
|
return getFormatConfig(defaultFormat, this.migrationsTable);
|
|
854
3062
|
}
|
|
3063
|
+
// ============================================================================
|
|
3064
|
+
// Schema Drift Detection Methods (delegated to DriftDetector)
|
|
3065
|
+
// ============================================================================
|
|
855
3066
|
/**
|
|
856
|
-
*
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
}
|
|
877
|
-
}
|
|
878
|
-
/**
|
|
879
|
-
* Record a migration as applied without executing SQL
|
|
880
|
-
* Used by markAsApplied to sync tracking state
|
|
881
|
-
*/
|
|
882
|
-
async recordMigration(pool, schemaName, migration, format) {
|
|
883
|
-
const { identifier, timestamp, timestampType } = format.columns;
|
|
884
|
-
const identifierValue = identifier === "name" ? migration.name : migration.hash;
|
|
885
|
-
const timestampValue = timestampType === "bigint" ? Date.now() : /* @__PURE__ */ new Date();
|
|
886
|
-
await pool.query(
|
|
887
|
-
`INSERT INTO "${schemaName}"."${format.tableName}" ("${identifier}", "${timestamp}") VALUES ($1, $2)`,
|
|
888
|
-
[identifierValue, timestampValue]
|
|
889
|
-
);
|
|
890
|
-
}
|
|
891
|
-
/**
|
|
892
|
-
* Create a skipped result
|
|
893
|
-
*/
|
|
894
|
-
createSkippedResult(tenantId) {
|
|
895
|
-
return {
|
|
896
|
-
tenantId,
|
|
897
|
-
schemaName: this.tenantConfig.isolation.schemaNameTemplate(tenantId),
|
|
898
|
-
success: false,
|
|
899
|
-
appliedMigrations: [],
|
|
900
|
-
error: "Skipped due to abort",
|
|
901
|
-
durationMs: 0
|
|
902
|
-
};
|
|
903
|
-
}
|
|
904
|
-
/**
|
|
905
|
-
* Create an error result
|
|
906
|
-
*/
|
|
907
|
-
createErrorResult(tenantId, error) {
|
|
908
|
-
return {
|
|
909
|
-
tenantId,
|
|
910
|
-
schemaName: this.tenantConfig.isolation.schemaNameTemplate(tenantId),
|
|
911
|
-
success: false,
|
|
912
|
-
appliedMigrations: [],
|
|
913
|
-
error: error.message,
|
|
914
|
-
durationMs: 0
|
|
915
|
-
};
|
|
916
|
-
}
|
|
917
|
-
/**
|
|
918
|
-
* Aggregate migration results
|
|
919
|
-
*/
|
|
920
|
-
aggregateResults(results) {
|
|
921
|
-
return {
|
|
922
|
-
total: results.length,
|
|
923
|
-
succeeded: results.filter((r) => r.success).length,
|
|
924
|
-
failed: results.filter((r) => !r.success && r.error !== "Skipped due to abort").length,
|
|
925
|
-
skipped: results.filter((r) => r.error === "Skipped due to abort").length,
|
|
926
|
-
details: results
|
|
927
|
-
};
|
|
928
|
-
}
|
|
929
|
-
/**
|
|
930
|
-
* Create a skipped sync result
|
|
3067
|
+
* Detect schema drift across all tenants
|
|
3068
|
+
* Compares each tenant's schema against a reference tenant (first tenant by default)
|
|
3069
|
+
*
|
|
3070
|
+
* @example
|
|
3071
|
+
* ```typescript
|
|
3072
|
+
* const drift = await migrator.getSchemaDrift();
|
|
3073
|
+
* if (drift.withDrift > 0) {
|
|
3074
|
+
* console.log('Schema drift detected!');
|
|
3075
|
+
* for (const tenant of drift.details) {
|
|
3076
|
+
* if (tenant.hasDrift) {
|
|
3077
|
+
* console.log(`Tenant ${tenant.tenantId} has drift:`);
|
|
3078
|
+
* for (const table of tenant.tables) {
|
|
3079
|
+
* for (const col of table.columns) {
|
|
3080
|
+
* console.log(` - ${table.table}.${col.column}: ${col.description}`);
|
|
3081
|
+
* }
|
|
3082
|
+
* }
|
|
3083
|
+
* }
|
|
3084
|
+
* }
|
|
3085
|
+
* }
|
|
3086
|
+
* ```
|
|
931
3087
|
*/
|
|
932
|
-
|
|
933
|
-
return
|
|
934
|
-
tenantId,
|
|
935
|
-
schemaName: this.tenantConfig.isolation.schemaNameTemplate(tenantId),
|
|
936
|
-
success: false,
|
|
937
|
-
markedMigrations: [],
|
|
938
|
-
removedOrphans: [],
|
|
939
|
-
error: "Skipped due to abort",
|
|
940
|
-
durationMs: 0
|
|
941
|
-
};
|
|
3088
|
+
async getSchemaDrift(options = {}) {
|
|
3089
|
+
return this.driftDetector.detectDrift(options);
|
|
942
3090
|
}
|
|
943
3091
|
/**
|
|
944
|
-
*
|
|
3092
|
+
* Get schema drift for a specific tenant compared to a reference
|
|
945
3093
|
*/
|
|
946
|
-
|
|
947
|
-
return
|
|
948
|
-
tenantId,
|
|
949
|
-
schemaName: this.tenantConfig.isolation.schemaNameTemplate(tenantId),
|
|
950
|
-
success: false,
|
|
951
|
-
markedMigrations: [],
|
|
952
|
-
removedOrphans: [],
|
|
953
|
-
error: error.message,
|
|
954
|
-
durationMs: 0
|
|
955
|
-
};
|
|
3094
|
+
async getTenantSchemaDrift(tenantId, referenceTenantId, options = {}) {
|
|
3095
|
+
return this.driftDetector.compareTenant(tenantId, referenceTenantId, options);
|
|
956
3096
|
}
|
|
957
3097
|
/**
|
|
958
|
-
*
|
|
3098
|
+
* Introspect the schema of a tenant
|
|
959
3099
|
*/
|
|
960
|
-
|
|
961
|
-
return
|
|
962
|
-
total: results.length,
|
|
963
|
-
succeeded: results.filter((r) => r.success).length,
|
|
964
|
-
failed: results.filter((r) => !r.success).length,
|
|
965
|
-
details: results
|
|
966
|
-
};
|
|
3100
|
+
async introspectTenantSchema(tenantId, options = {}) {
|
|
3101
|
+
return this.driftDetector.introspectSchema(tenantId, options);
|
|
967
3102
|
}
|
|
968
3103
|
};
|
|
969
3104
|
function createMigrator(tenantConfig, migratorConfig) {
|
|
970
3105
|
return new Migrator(tenantConfig, migratorConfig);
|
|
971
3106
|
}
|
|
972
3107
|
|
|
973
|
-
export { DEFAULT_FORMAT, DRIZZLE_KIT_FORMAT, Migrator, createMigrator, detectTableFormat, getFormatConfig };
|
|
3108
|
+
export { BatchExecutor, Cloner, DEFAULT_FORMAT, DRIZZLE_KIT_FORMAT, MigrationExecutor, Migrator, SchemaManager, Seeder, SyncManager, createBatchExecutor, createCloner, createMigrationExecutor, createMigrator, createSchemaManager, createSeeder, createSyncManager, detectTableFormat, getFormatConfig };
|
|
974
3109
|
//# sourceMappingURL=index.js.map
|
|
975
3110
|
//# sourceMappingURL=index.js.map
|