drizzle-multitenant 1.0.10 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -254,8 +254,161 @@ var DebugLogger = class {
254
254
  function createDebugLogger(config) {
255
255
  return new DebugLogger(config);
256
256
  }
257
+ var PoolCache = class {
258
+ cache;
259
+ poolTtlMs;
260
+ onDispose;
261
+ constructor(options) {
262
+ this.poolTtlMs = options.poolTtlMs;
263
+ this.onDispose = options.onDispose;
264
+ this.cache = new LRUCache({
265
+ max: options.maxPools,
266
+ dispose: (entry, key) => {
267
+ void this.handleDispose(key, entry);
268
+ },
269
+ noDisposeOnSet: true
270
+ });
271
+ }
272
+ /**
273
+ * Get a pool entry from cache
274
+ *
275
+ * This does NOT update the last access time automatically.
276
+ * Use `touch()` to update access time when needed.
277
+ */
278
+ get(schemaName) {
279
+ return this.cache.get(schemaName);
280
+ }
281
+ /**
282
+ * Set a pool entry in cache
283
+ *
284
+ * If the cache is full, the least recently used entry will be evicted.
285
+ */
286
+ set(schemaName, entry) {
287
+ this.cache.set(schemaName, entry);
288
+ }
289
+ /**
290
+ * Check if a pool exists in cache
291
+ */
292
+ has(schemaName) {
293
+ return this.cache.has(schemaName);
294
+ }
295
+ /**
296
+ * Delete a pool from cache
297
+ *
298
+ * Note: This triggers the dispose callback if configured.
299
+ */
300
+ delete(schemaName) {
301
+ return this.cache.delete(schemaName);
302
+ }
303
+ /**
304
+ * Get the number of pools in cache
305
+ */
306
+ size() {
307
+ return this.cache.size;
308
+ }
309
+ /**
310
+ * Get all schema names in cache
311
+ */
312
+ keys() {
313
+ return Array.from(this.cache.keys());
314
+ }
315
+ /**
316
+ * Iterate over all entries in cache
317
+ *
318
+ * @yields [schemaName, entry] pairs
319
+ */
320
+ *entries() {
321
+ for (const [key, value] of this.cache.entries()) {
322
+ yield [key, value];
323
+ }
324
+ }
325
+ /**
326
+ * Clear all pools from cache
327
+ *
328
+ * Each pool's dispose callback will be triggered by the LRU cache.
329
+ */
330
+ async clear() {
331
+ this.cache.clear();
332
+ await Promise.resolve();
333
+ }
334
+ /**
335
+ * Evict the least recently used pool
336
+ *
337
+ * @returns The schema name of the evicted pool, or undefined if cache is empty
338
+ */
339
+ evictLRU() {
340
+ const keys = Array.from(this.cache.keys());
341
+ if (keys.length === 0) {
342
+ return void 0;
343
+ }
344
+ const lruKey = keys[keys.length - 1];
345
+ this.cache.delete(lruKey);
346
+ return lruKey;
347
+ }
348
+ /**
349
+ * Evict pools that have exceeded TTL
350
+ *
351
+ * @returns Array of schema names that were evicted
352
+ */
353
+ async evictExpired() {
354
+ if (!this.poolTtlMs) {
355
+ return [];
356
+ }
357
+ const now = Date.now();
358
+ const toEvict = [];
359
+ for (const [schemaName, entry] of this.cache.entries()) {
360
+ if (now - entry.lastAccess > this.poolTtlMs) {
361
+ toEvict.push(schemaName);
362
+ }
363
+ }
364
+ for (const schemaName of toEvict) {
365
+ this.cache.delete(schemaName);
366
+ }
367
+ return toEvict;
368
+ }
369
+ /**
370
+ * Update last access time for a pool
371
+ *
372
+ * This moves the pool to the front of the LRU list.
373
+ */
374
+ touch(schemaName) {
375
+ const entry = this.cache.get(schemaName);
376
+ if (entry) {
377
+ entry.lastAccess = Date.now();
378
+ }
379
+ }
380
+ /**
381
+ * Get the maximum number of pools allowed in cache
382
+ */
383
+ getMaxPools() {
384
+ return this.cache.max;
385
+ }
386
+ /**
387
+ * Get the configured TTL in milliseconds
388
+ */
389
+ getTtlMs() {
390
+ return this.poolTtlMs;
391
+ }
392
+ /**
393
+ * Check if an entry has expired based on TTL
394
+ */
395
+ isExpired(entry) {
396
+ if (!this.poolTtlMs) {
397
+ return false;
398
+ }
399
+ return Date.now() - entry.lastAccess > this.poolTtlMs;
400
+ }
401
+ /**
402
+ * Handle disposal of a cache entry
403
+ */
404
+ async handleDispose(schemaName, entry) {
405
+ if (this.onDispose) {
406
+ await this.onDispose(schemaName, entry);
407
+ }
408
+ }
409
+ };
257
410
 
258
- // src/retry.ts
411
+ // src/pool/retry/retry-handler.ts
259
412
  function isRetryableError(error) {
260
413
  const message = error.message.toLowerCase();
261
414
  if (message.includes("econnrefused") || message.includes("econnreset") || message.includes("etimedout") || message.includes("enotfound") || message.includes("connection refused") || message.includes("connection reset") || message.includes("connection terminated") || message.includes("connection timed out") || message.includes("timeout expired") || message.includes("socket hang up")) {
@@ -269,82 +422,315 @@ function isRetryableError(error) {
269
422
  }
270
423
  return false;
271
424
  }
272
- function calculateDelay(attempt, config) {
273
- const exponentialDelay = config.initialDelayMs * Math.pow(config.backoffMultiplier, attempt);
274
- const cappedDelay = Math.min(exponentialDelay, config.maxDelayMs);
275
- if (config.jitter) {
276
- const jitterFactor = 1 + Math.random() * 0.25;
277
- return Math.floor(cappedDelay * jitterFactor);
278
- }
279
- return Math.floor(cappedDelay);
280
- }
281
425
  function sleep(ms) {
282
426
  return new Promise((resolve) => setTimeout(resolve, ms));
283
427
  }
284
- async function withRetry(operation, config) {
285
- const retryConfig = {
286
- maxAttempts: config?.maxAttempts ?? DEFAULT_CONFIG.retry.maxAttempts,
287
- initialDelayMs: config?.initialDelayMs ?? DEFAULT_CONFIG.retry.initialDelayMs,
288
- maxDelayMs: config?.maxDelayMs ?? DEFAULT_CONFIG.retry.maxDelayMs,
289
- backoffMultiplier: config?.backoffMultiplier ?? DEFAULT_CONFIG.retry.backoffMultiplier,
290
- jitter: config?.jitter ?? DEFAULT_CONFIG.retry.jitter,
291
- isRetryable: config?.isRetryable ?? isRetryableError,
292
- onRetry: config?.onRetry
293
- };
294
- const startTime = Date.now();
295
- let lastError = null;
296
- for (let attempt = 0; attempt < retryConfig.maxAttempts; attempt++) {
428
+ var RetryHandler = class {
429
+ config;
430
+ constructor(config) {
431
+ this.config = {
432
+ maxAttempts: config?.maxAttempts ?? DEFAULT_CONFIG.retry.maxAttempts,
433
+ initialDelayMs: config?.initialDelayMs ?? DEFAULT_CONFIG.retry.initialDelayMs,
434
+ maxDelayMs: config?.maxDelayMs ?? DEFAULT_CONFIG.retry.maxDelayMs,
435
+ backoffMultiplier: config?.backoffMultiplier ?? DEFAULT_CONFIG.retry.backoffMultiplier,
436
+ jitter: config?.jitter ?? DEFAULT_CONFIG.retry.jitter,
437
+ isRetryable: config?.isRetryable ?? isRetryableError,
438
+ onRetry: config?.onRetry
439
+ };
440
+ }
441
+ /**
442
+ * Execute an operation with retry logic
443
+ *
444
+ * @param operation - The async operation to execute
445
+ * @param overrideConfig - Optional config to override defaults for this call
446
+ * @returns Result with metadata about attempts and timing
447
+ */
448
+ async withRetry(operation, overrideConfig) {
449
+ const config = overrideConfig ? { ...this.config, ...overrideConfig } : this.config;
450
+ const startTime = Date.now();
451
+ let lastError = null;
452
+ for (let attempt = 0; attempt < config.maxAttempts; attempt++) {
453
+ try {
454
+ const result = await operation();
455
+ return {
456
+ result,
457
+ attempts: attempt + 1,
458
+ totalTimeMs: Date.now() - startTime
459
+ };
460
+ } catch (error) {
461
+ lastError = error;
462
+ const isLastAttempt = attempt >= config.maxAttempts - 1;
463
+ const checkRetryable = config.isRetryable ?? this.isRetryable;
464
+ if (isLastAttempt || !checkRetryable(lastError)) {
465
+ throw lastError;
466
+ }
467
+ const delay = this.calculateDelay(attempt, config);
468
+ config.onRetry?.(attempt + 1, lastError, delay);
469
+ await sleep(delay);
470
+ }
471
+ }
472
+ throw lastError ?? new Error("Retry failed with no error");
473
+ }
474
+ /**
475
+ * Calculate delay with exponential backoff and optional jitter
476
+ *
477
+ * @param attempt - Current attempt number (0-indexed)
478
+ * @param config - Retry configuration
479
+ * @returns Delay in milliseconds
480
+ */
481
+ calculateDelay(attempt, config) {
482
+ const cfg = config ? { ...this.config, ...config } : this.config;
483
+ const exponentialDelay = cfg.initialDelayMs * Math.pow(cfg.backoffMultiplier, attempt);
484
+ const cappedDelay = Math.min(exponentialDelay, cfg.maxDelayMs);
485
+ if (cfg.jitter) {
486
+ const jitterFactor = 1 + Math.random() * 0.25;
487
+ return Math.floor(cappedDelay * jitterFactor);
488
+ }
489
+ return Math.floor(cappedDelay);
490
+ }
491
+ /**
492
+ * Check if an error is retryable
493
+ *
494
+ * Uses the configured isRetryable function or the default implementation.
495
+ */
496
+ isRetryable(error) {
497
+ return (this.config.isRetryable ?? isRetryableError)(error);
498
+ }
499
+ /**
500
+ * Get the current configuration
501
+ */
502
+ getConfig() {
503
+ return { ...this.config };
504
+ }
505
+ /**
506
+ * Get the maximum number of attempts
507
+ */
508
+ getMaxAttempts() {
509
+ return this.config.maxAttempts;
510
+ }
511
+ };
512
+
513
+ // src/pool/health/health-checker.ts
514
+ var HealthChecker = class {
515
+ constructor(deps) {
516
+ this.deps = deps;
517
+ }
518
+ /**
519
+ * Check health of all pools and connections
520
+ *
521
+ * Verifies the health of tenant pools and optionally the shared database.
522
+ * Returns detailed status information for monitoring and load balancer integration.
523
+ *
524
+ * @example
525
+ * ```typescript
526
+ * // Basic health check
527
+ * const health = await healthChecker.checkHealth();
528
+ * console.log(health.healthy); // true/false
529
+ *
530
+ * // Check specific tenants only
531
+ * const health = await healthChecker.checkHealth({
532
+ * tenantIds: ['tenant-1', 'tenant-2'],
533
+ * ping: true,
534
+ * pingTimeoutMs: 3000,
535
+ * });
536
+ * ```
537
+ */
538
+ async checkHealth(options = {}) {
539
+ const startTime = Date.now();
540
+ const {
541
+ ping = true,
542
+ pingTimeoutMs = 5e3,
543
+ includeShared = true,
544
+ tenantIds
545
+ } = options;
546
+ const poolHealthResults = [];
547
+ let sharedDbStatus = "ok";
548
+ let sharedDbResponseTimeMs;
549
+ let sharedDbError;
550
+ const poolsToCheck = this.getPoolsToCheck(tenantIds);
551
+ const poolChecks = poolsToCheck.map(async ({ schemaName, tenantId, entry }) => {
552
+ return this.checkPoolHealth(tenantId, schemaName, entry, ping, pingTimeoutMs);
553
+ });
554
+ poolHealthResults.push(...await Promise.all(poolChecks));
555
+ const sharedPool = this.deps.getSharedPool();
556
+ if (includeShared && sharedPool) {
557
+ const sharedResult = await this.checkSharedDbHealth(sharedPool, ping, pingTimeoutMs);
558
+ sharedDbStatus = sharedResult.status;
559
+ sharedDbResponseTimeMs = sharedResult.responseTimeMs;
560
+ sharedDbError = sharedResult.error;
561
+ }
562
+ const degradedPools = poolHealthResults.filter((p) => p.status === "degraded").length;
563
+ const unhealthyPools = poolHealthResults.filter((p) => p.status === "unhealthy").length;
564
+ const healthy = unhealthyPools === 0 && sharedDbStatus !== "unhealthy";
565
+ const result = {
566
+ healthy,
567
+ pools: poolHealthResults,
568
+ sharedDb: sharedDbStatus,
569
+ totalPools: poolHealthResults.length,
570
+ degradedPools,
571
+ unhealthyPools,
572
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
573
+ durationMs: Date.now() - startTime
574
+ };
575
+ if (sharedDbResponseTimeMs !== void 0) {
576
+ result.sharedDbResponseTimeMs = sharedDbResponseTimeMs;
577
+ }
578
+ if (sharedDbError !== void 0) {
579
+ result.sharedDbError = sharedDbError;
580
+ }
581
+ return result;
582
+ }
583
+ /**
584
+ * Get pools to check based on options
585
+ */
586
+ getPoolsToCheck(tenantIds) {
587
+ const poolsToCheck = [];
588
+ if (tenantIds && tenantIds.length > 0) {
589
+ for (const tenantId of tenantIds) {
590
+ const schemaName = this.deps.getSchemaName(tenantId);
591
+ const entry = this.deps.getPoolEntry(schemaName);
592
+ if (entry) {
593
+ poolsToCheck.push({ schemaName, tenantId, entry });
594
+ }
595
+ }
596
+ } else {
597
+ for (const [schemaName, entry] of this.deps.getPoolEntries()) {
598
+ const tenantId = this.deps.getTenantIdBySchema(schemaName) ?? schemaName;
599
+ poolsToCheck.push({ schemaName, tenantId, entry });
600
+ }
601
+ }
602
+ return poolsToCheck;
603
+ }
604
+ /**
605
+ * Check health of a single tenant pool
606
+ */
607
+ async checkPoolHealth(tenantId, schemaName, entry, ping, pingTimeoutMs) {
608
+ const pool = entry.pool;
609
+ const totalConnections = pool.totalCount;
610
+ const idleConnections = pool.idleCount;
611
+ const waitingRequests = pool.waitingCount;
612
+ let status = "ok";
613
+ let responseTimeMs;
614
+ let error;
615
+ if (waitingRequests > 0) {
616
+ status = "degraded";
617
+ }
618
+ if (ping) {
619
+ const pingResult = await this.executePingQuery(pool, pingTimeoutMs);
620
+ responseTimeMs = pingResult.responseTimeMs;
621
+ if (!pingResult.success) {
622
+ status = "unhealthy";
623
+ error = pingResult.error;
624
+ } else if (pingResult.responseTimeMs && pingResult.responseTimeMs > pingTimeoutMs / 2) {
625
+ if (status === "ok") {
626
+ status = "degraded";
627
+ }
628
+ }
629
+ }
630
+ const result = {
631
+ tenantId,
632
+ schemaName,
633
+ status,
634
+ totalConnections,
635
+ idleConnections,
636
+ waitingRequests
637
+ };
638
+ if (responseTimeMs !== void 0) {
639
+ result.responseTimeMs = responseTimeMs;
640
+ }
641
+ if (error !== void 0) {
642
+ result.error = error;
643
+ }
644
+ return result;
645
+ }
646
+ /**
647
+ * Check health of shared database
648
+ */
649
+ async checkSharedDbHealth(sharedPool, ping, pingTimeoutMs) {
650
+ let status = "ok";
651
+ let responseTimeMs;
652
+ let error;
653
+ const waitingRequests = sharedPool.waitingCount;
654
+ if (waitingRequests > 0) {
655
+ status = "degraded";
656
+ }
657
+ if (ping) {
658
+ const pingResult = await this.executePingQuery(sharedPool, pingTimeoutMs);
659
+ responseTimeMs = pingResult.responseTimeMs;
660
+ if (!pingResult.success) {
661
+ status = "unhealthy";
662
+ error = pingResult.error;
663
+ } else if (pingResult.responseTimeMs && pingResult.responseTimeMs > pingTimeoutMs / 2) {
664
+ if (status === "ok") {
665
+ status = "degraded";
666
+ }
667
+ }
668
+ }
669
+ const result = { status };
670
+ if (responseTimeMs !== void 0) {
671
+ result.responseTimeMs = responseTimeMs;
672
+ }
673
+ if (error !== void 0) {
674
+ result.error = error;
675
+ }
676
+ return result;
677
+ }
678
+ /**
679
+ * Execute a ping query with timeout
680
+ */
681
+ async executePingQuery(pool, timeoutMs) {
682
+ const startTime = Date.now();
297
683
  try {
298
- const result = await operation();
684
+ const timeoutPromise = new Promise((_, reject) => {
685
+ setTimeout(() => reject(new Error("Health check ping timeout")), timeoutMs);
686
+ });
687
+ const queryPromise = pool.query("SELECT 1");
688
+ await Promise.race([queryPromise, timeoutPromise]);
299
689
  return {
300
- result,
301
- attempts: attempt + 1,
302
- totalTimeMs: Date.now() - startTime
690
+ success: true,
691
+ responseTimeMs: Date.now() - startTime
692
+ };
693
+ } catch (err) {
694
+ return {
695
+ success: false,
696
+ responseTimeMs: Date.now() - startTime,
697
+ error: err.message
303
698
  };
304
- } catch (error) {
305
- lastError = error;
306
- const isLastAttempt = attempt >= retryConfig.maxAttempts - 1;
307
- if (isLastAttempt || !retryConfig.isRetryable(lastError)) {
308
- throw lastError;
309
- }
310
- const delay = calculateDelay(attempt, retryConfig);
311
- retryConfig.onRetry?.(attempt + 1, lastError, delay);
312
- await sleep(delay);
313
699
  }
314
700
  }
315
- throw lastError ?? new Error("Retry failed with no error");
316
- }
317
- function createRetrier(config) {
318
- return (operation) => {
319
- return withRetry(operation, config);
320
- };
321
- }
701
+ /**
702
+ * Determine overall health status from pool health results
703
+ */
704
+ determineOverallHealth(pools, sharedDbStatus = "ok") {
705
+ const unhealthyPools = pools.filter((p) => p.status === "unhealthy").length;
706
+ return unhealthyPools === 0 && sharedDbStatus !== "unhealthy";
707
+ }
708
+ };
322
709
 
323
710
  // src/pool.ts
324
711
  var PoolManager = class {
325
712
  constructor(config) {
326
713
  this.config = config;
327
714
  const maxPools = config.isolation.maxPools ?? DEFAULT_CONFIG.maxPools;
715
+ const poolTtlMs = config.isolation.poolTtlMs ?? DEFAULT_CONFIG.poolTtlMs;
328
716
  this.debugLogger = createDebugLogger(config.debug);
329
- const userRetry = config.connection.retry ?? {};
330
- this.retryConfig = {
331
- maxAttempts: userRetry.maxAttempts ?? DEFAULT_CONFIG.retry.maxAttempts,
332
- initialDelayMs: userRetry.initialDelayMs ?? DEFAULT_CONFIG.retry.initialDelayMs,
333
- maxDelayMs: userRetry.maxDelayMs ?? DEFAULT_CONFIG.retry.maxDelayMs,
334
- backoffMultiplier: userRetry.backoffMultiplier ?? DEFAULT_CONFIG.retry.backoffMultiplier,
335
- jitter: userRetry.jitter ?? DEFAULT_CONFIG.retry.jitter,
336
- isRetryable: userRetry.isRetryable ?? isRetryableError,
337
- onRetry: userRetry.onRetry
338
- };
339
- this.pools = new LRUCache({
340
- max: maxPools,
341
- dispose: (entry, key) => {
342
- this.disposePoolEntry(entry, key);
343
- },
344
- noDisposeOnSet: true
717
+ this.retryHandler = new RetryHandler(config.connection.retry);
718
+ this.poolCache = new PoolCache({
719
+ maxPools,
720
+ poolTtlMs,
721
+ onDispose: (schemaName, entry) => {
722
+ this.disposePoolEntry(entry, schemaName);
723
+ }
724
+ });
725
+ this.healthChecker = new HealthChecker({
726
+ getPoolEntries: () => this.poolCache.entries(),
727
+ getTenantIdBySchema: (schemaName) => this.tenantIdBySchema.get(schemaName),
728
+ getPoolEntry: (schemaName) => this.poolCache.get(schemaName),
729
+ getSchemaName: (tenantId) => this.config.isolation.schemaNameTemplate(tenantId),
730
+ getSharedPool: () => this.sharedPool
345
731
  });
346
732
  }
347
- pools;
733
+ poolCache;
348
734
  tenantIdBySchema = /* @__PURE__ */ new Map();
349
735
  pendingConnections = /* @__PURE__ */ new Map();
350
736
  sharedPool = null;
@@ -353,22 +739,23 @@ var PoolManager = class {
353
739
  cleanupInterval = null;
354
740
  disposed = false;
355
741
  debugLogger;
356
- retryConfig;
742
+ retryHandler;
743
+ healthChecker;
357
744
  /**
358
745
  * Get or create a database connection for a tenant
359
746
  */
360
747
  getDb(tenantId) {
361
748
  this.ensureNotDisposed();
362
749
  const schemaName = this.config.isolation.schemaNameTemplate(tenantId);
363
- let entry = this.pools.get(schemaName);
750
+ let entry = this.poolCache.get(schemaName);
364
751
  if (!entry) {
365
752
  entry = this.createPoolEntry(tenantId, schemaName);
366
- this.pools.set(schemaName, entry);
753
+ this.poolCache.set(schemaName, entry);
367
754
  this.tenantIdBySchema.set(schemaName, tenantId);
368
755
  this.debugLogger.logPoolCreated(tenantId, schemaName);
369
756
  void this.config.hooks?.onPoolCreated?.(tenantId);
370
757
  }
371
- entry.lastAccess = Date.now();
758
+ this.poolCache.touch(schemaName);
372
759
  return entry.db;
373
760
  }
374
761
  /**
@@ -389,26 +776,26 @@ var PoolManager = class {
389
776
  async getDbAsync(tenantId) {
390
777
  this.ensureNotDisposed();
391
778
  const schemaName = this.config.isolation.schemaNameTemplate(tenantId);
392
- let entry = this.pools.get(schemaName);
779
+ let entry = this.poolCache.get(schemaName);
393
780
  if (entry) {
394
- entry.lastAccess = Date.now();
781
+ this.poolCache.touch(schemaName);
395
782
  return entry.db;
396
783
  }
397
784
  const pending = this.pendingConnections.get(schemaName);
398
785
  if (pending) {
399
786
  entry = await pending;
400
- entry.lastAccess = Date.now();
787
+ this.poolCache.touch(schemaName);
401
788
  return entry.db;
402
789
  }
403
790
  const connectionPromise = this.connectWithRetry(tenantId, schemaName);
404
791
  this.pendingConnections.set(schemaName, connectionPromise);
405
792
  try {
406
793
  entry = await connectionPromise;
407
- this.pools.set(schemaName, entry);
794
+ this.poolCache.set(schemaName, entry);
408
795
  this.tenantIdBySchema.set(schemaName, tenantId);
409
796
  this.debugLogger.logPoolCreated(tenantId, schemaName);
410
797
  void this.config.hooks?.onPoolCreated?.(tenantId);
411
- entry.lastAccess = Date.now();
798
+ this.poolCache.touch(schemaName);
412
799
  return entry.db;
413
800
  } finally {
414
801
  this.pendingConnections.delete(schemaName);
@@ -418,8 +805,9 @@ var PoolManager = class {
418
805
  * Connect to a tenant database with retry logic
419
806
  */
420
807
  async connectWithRetry(tenantId, schemaName) {
421
- const maxAttempts = this.retryConfig.maxAttempts;
422
- const result = await withRetry(
808
+ const retryConfig = this.retryHandler.getConfig();
809
+ const maxAttempts = retryConfig.maxAttempts;
810
+ const result = await this.retryHandler.withRetry(
423
811
  async () => {
424
812
  const entry = this.createPoolEntry(tenantId, schemaName);
425
813
  try {
@@ -434,10 +822,9 @@ var PoolManager = class {
434
822
  }
435
823
  },
436
824
  {
437
- ...this.retryConfig,
438
825
  onRetry: (attempt, error, delayMs) => {
439
826
  this.debugLogger.logConnectionRetry(tenantId, attempt, maxAttempts, error, delayMs);
440
- this.retryConfig.onRetry?.(attempt, error, delayMs);
827
+ retryConfig.onRetry?.(attempt, error, delayMs);
441
828
  }
442
829
  }
443
830
  );
@@ -499,8 +886,9 @@ var PoolManager = class {
499
886
  * Connect to shared database with retry logic
500
887
  */
501
888
  async connectSharedWithRetry() {
502
- const maxAttempts = this.retryConfig.maxAttempts;
503
- const result = await withRetry(
889
+ const retryConfig = this.retryHandler.getConfig();
890
+ const maxAttempts = retryConfig.maxAttempts;
891
+ const result = await this.retryHandler.withRetry(
504
892
  async () => {
505
893
  const pool = new Pool({
506
894
  connectionString: this.config.connection.url,
@@ -526,10 +914,9 @@ var PoolManager = class {
526
914
  }
527
915
  },
528
916
  {
529
- ...this.retryConfig,
530
917
  onRetry: (attempt, error, delayMs) => {
531
918
  this.debugLogger.logConnectionRetry("shared", attempt, maxAttempts, error, delayMs);
532
- this.retryConfig.onRetry?.(attempt, error, delayMs);
919
+ retryConfig.onRetry?.(attempt, error, delayMs);
533
920
  }
534
921
  }
535
922
  );
@@ -547,13 +934,13 @@ var PoolManager = class {
547
934
  */
548
935
  hasPool(tenantId) {
549
936
  const schemaName = this.config.isolation.schemaNameTemplate(tenantId);
550
- return this.pools.has(schemaName);
937
+ return this.poolCache.has(schemaName);
551
938
  }
552
939
  /**
553
940
  * Get count of active pools
554
941
  */
555
942
  getPoolCount() {
556
- return this.pools.size;
943
+ return this.poolCache.size();
557
944
  }
558
945
  /**
559
946
  * Get all active tenant IDs
@@ -565,7 +952,7 @@ var PoolManager = class {
565
952
  * Get the retry configuration
566
953
  */
567
954
  getRetryConfig() {
568
- return { ...this.retryConfig };
955
+ return this.retryHandler.getConfig();
569
956
  }
570
957
  /**
571
958
  * Pre-warm pools for specified tenants to reduce cold start latency
@@ -624,15 +1011,97 @@ var PoolManager = class {
624
1011
  details: results
625
1012
  };
626
1013
  }
1014
+ /**
1015
+ * Get current metrics for all pools
1016
+ *
1017
+ * Collects metrics on demand with zero overhead when not called.
1018
+ * Returns raw data that can be formatted for any monitoring system.
1019
+ *
1020
+ * @example
1021
+ * ```typescript
1022
+ * const metrics = manager.getMetrics();
1023
+ * console.log(metrics.pools.total); // 15
1024
+ *
1025
+ * // Format for Prometheus
1026
+ * for (const pool of metrics.pools.tenants) {
1027
+ * gauge.labels(pool.tenantId).set(pool.connections.idle);
1028
+ * }
1029
+ * ```
1030
+ */
1031
+ getMetrics() {
1032
+ this.ensureNotDisposed();
1033
+ const maxPools = this.config.isolation.maxPools ?? DEFAULT_CONFIG.maxPools;
1034
+ const tenantMetrics = [];
1035
+ for (const [schemaName, entry] of this.poolCache.entries()) {
1036
+ const tenantId = this.tenantIdBySchema.get(schemaName) ?? schemaName;
1037
+ const pool = entry.pool;
1038
+ tenantMetrics.push({
1039
+ tenantId,
1040
+ schemaName,
1041
+ connections: {
1042
+ total: pool.totalCount,
1043
+ idle: pool.idleCount,
1044
+ waiting: pool.waitingCount
1045
+ },
1046
+ lastAccessedAt: new Date(entry.lastAccess).toISOString()
1047
+ });
1048
+ }
1049
+ return {
1050
+ pools: {
1051
+ total: tenantMetrics.length,
1052
+ maxPools,
1053
+ tenants: tenantMetrics
1054
+ },
1055
+ shared: {
1056
+ initialized: this.sharedPool !== null,
1057
+ connections: this.sharedPool ? {
1058
+ total: this.sharedPool.totalCount,
1059
+ idle: this.sharedPool.idleCount,
1060
+ waiting: this.sharedPool.waitingCount
1061
+ } : null
1062
+ },
1063
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
1064
+ };
1065
+ }
1066
+ /**
1067
+ * Check health of all pools and connections
1068
+ *
1069
+ * Verifies the health of tenant pools and optionally the shared database.
1070
+ * Returns detailed status information for monitoring and load balancer integration.
1071
+ *
1072
+ * @example
1073
+ * ```typescript
1074
+ * // Basic health check
1075
+ * const health = await manager.healthCheck();
1076
+ * console.log(health.healthy); // true/false
1077
+ *
1078
+ * // Use with Express endpoint
1079
+ * app.get('/health', async (req, res) => {
1080
+ * const health = await manager.healthCheck();
1081
+ * res.status(health.healthy ? 200 : 503).json(health);
1082
+ * });
1083
+ *
1084
+ * // Check specific tenants only
1085
+ * const health = await manager.healthCheck({
1086
+ * tenantIds: ['tenant-1', 'tenant-2'],
1087
+ * ping: true,
1088
+ * pingTimeoutMs: 3000,
1089
+ * });
1090
+ * ```
1091
+ */
1092
+ async healthCheck(options = {}) {
1093
+ this.ensureNotDisposed();
1094
+ return this.healthChecker.checkHealth(options);
1095
+ }
627
1096
  /**
628
1097
  * Manually evict a tenant pool
629
1098
  */
630
1099
  async evictPool(tenantId, reason = "manual") {
631
1100
  const schemaName = this.config.isolation.schemaNameTemplate(tenantId);
632
- const entry = this.pools.get(schemaName);
1101
+ const entry = this.poolCache.get(schemaName);
633
1102
  if (entry) {
634
1103
  this.debugLogger.logPoolEvicted(tenantId, schemaName, reason);
635
- this.pools.delete(schemaName);
1104
+ this.poolCache.delete(schemaName);
636
1105
  this.tenantIdBySchema.delete(schemaName);
637
1106
  await this.closePool(entry.pool, tenantId);
638
1107
  }
@@ -642,10 +1111,9 @@ var PoolManager = class {
642
1111
  */
643
1112
  startCleanup() {
644
1113
  if (this.cleanupInterval) return;
645
- const poolTtlMs = this.config.isolation.poolTtlMs ?? DEFAULT_CONFIG.poolTtlMs;
646
1114
  const cleanupIntervalMs = DEFAULT_CONFIG.cleanupIntervalMs;
647
1115
  this.cleanupInterval = setInterval(() => {
648
- void this.cleanupIdlePools(poolTtlMs);
1116
+ void this.cleanupIdlePools();
649
1117
  }, cleanupIntervalMs);
650
1118
  this.cleanupInterval.unref();
651
1119
  }
@@ -666,11 +1134,11 @@ var PoolManager = class {
666
1134
  this.disposed = true;
667
1135
  this.stopCleanup();
668
1136
  const closePromises = [];
669
- for (const [schemaName, entry] of this.pools.entries()) {
1137
+ for (const [schemaName, entry] of this.poolCache.entries()) {
670
1138
  const tenantId = this.tenantIdBySchema.get(schemaName);
671
1139
  closePromises.push(this.closePool(entry.pool, tenantId ?? schemaName));
672
1140
  }
673
- this.pools.clear();
1141
+ await this.poolCache.clear();
674
1142
  this.tenantIdBySchema.clear();
675
1143
  if (this.sharedPool) {
676
1144
  closePromises.push(this.closePool(this.sharedPool, "shared"));
@@ -732,18 +1200,13 @@ var PoolManager = class {
732
1200
  /**
733
1201
  * Cleanup pools that have been idle for too long
734
1202
  */
735
- async cleanupIdlePools(poolTtlMs) {
736
- const now = Date.now();
737
- const toEvict = [];
738
- for (const [schemaName, entry] of this.pools.entries()) {
739
- if (now - entry.lastAccess > poolTtlMs) {
740
- toEvict.push(schemaName);
741
- }
742
- }
743
- for (const schemaName of toEvict) {
1203
+ async cleanupIdlePools() {
1204
+ const evictedSchemas = await this.poolCache.evictExpired();
1205
+ for (const schemaName of evictedSchemas) {
744
1206
  const tenantId = this.tenantIdBySchema.get(schemaName);
745
1207
  if (tenantId) {
746
- await this.evictPool(tenantId, "ttl_expired");
1208
+ this.debugLogger.logPoolEvicted(tenantId, schemaName, "ttl_expired");
1209
+ this.tenantIdBySchema.delete(schemaName);
747
1210
  }
748
1211
  }
749
1212
  }
@@ -795,6 +1258,12 @@ function createTenantManager(config) {
795
1258
  async warmup(tenantIds, options) {
796
1259
  return poolManager.warmup(tenantIds, options);
797
1260
  },
1261
+ async healthCheck(options) {
1262
+ return poolManager.healthCheck(options);
1263
+ },
1264
+ getMetrics() {
1265
+ return poolManager.getMetrics();
1266
+ },
798
1267
  async dispose() {
799
1268
  await poolManager.dispose();
800
1269
  }
@@ -935,246 +1404,108 @@ function getFormatConfig(format, tableName = "__drizzle_migrations") {
935
1404
  };
936
1405
  }
937
1406
  }
938
-
939
- // src/migrator/migrator.ts
940
1407
  var DEFAULT_MIGRATIONS_TABLE = "__drizzle_migrations";
941
- var Migrator = class {
942
- constructor(tenantConfig, migratorConfig) {
943
- this.tenantConfig = tenantConfig;
944
- this.migratorConfig = migratorConfig;
945
- this.migrationsTable = migratorConfig.migrationsTable ?? DEFAULT_MIGRATIONS_TABLE;
1408
+ var SchemaManager = class {
1409
+ constructor(config, migrationsTable) {
1410
+ this.config = config;
1411
+ this.migrationsTable = migrationsTable ?? DEFAULT_MIGRATIONS_TABLE;
946
1412
  }
947
1413
  migrationsTable;
948
1414
  /**
949
- * Migrate all tenants in parallel
950
- */
951
- async migrateAll(options = {}) {
952
- const {
953
- concurrency = 10,
954
- onProgress,
955
- onError,
956
- dryRun = false
957
- } = options;
958
- const tenantIds = await this.migratorConfig.tenantDiscovery();
959
- const migrations = await this.loadMigrations();
960
- const results = [];
961
- let aborted = false;
962
- for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
963
- const batch = tenantIds.slice(i, i + concurrency);
964
- const batchResults = await Promise.all(
965
- batch.map(async (tenantId) => {
966
- if (aborted) {
967
- return this.createSkippedResult(tenantId);
968
- }
969
- try {
970
- onProgress?.(tenantId, "starting");
971
- const result = await this.migrateTenant(tenantId, migrations, { dryRun, onProgress });
972
- onProgress?.(tenantId, result.success ? "completed" : "failed");
973
- return result;
974
- } catch (error) {
975
- onProgress?.(tenantId, "failed");
976
- const action = onError?.(tenantId, error);
977
- if (action === "abort") {
978
- aborted = true;
979
- }
980
- return this.createErrorResult(tenantId, error);
981
- }
982
- })
983
- );
984
- results.push(...batchResults);
985
- }
986
- if (aborted) {
987
- const remaining = tenantIds.slice(results.length);
988
- for (const tenantId of remaining) {
989
- results.push(this.createSkippedResult(tenantId));
990
- }
991
- }
992
- return this.aggregateResults(results);
993
- }
994
- /**
995
- * Migrate a single tenant
996
- */
997
- async migrateTenant(tenantId, migrations, options = {}) {
998
- const startTime = Date.now();
999
- const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
1000
- const appliedMigrations = [];
1001
- const pool = await this.createPool(schemaName);
1002
- try {
1003
- await this.migratorConfig.hooks?.beforeTenant?.(tenantId);
1004
- const format = await this.getOrDetectFormat(pool, schemaName);
1005
- await this.ensureMigrationsTable(pool, schemaName, format);
1006
- const allMigrations = migrations ?? await this.loadMigrations();
1007
- const applied = await this.getAppliedMigrations(pool, schemaName, format);
1008
- const appliedSet = new Set(applied.map((m) => m.identifier));
1009
- const pending = allMigrations.filter(
1010
- (m) => !this.isMigrationApplied(m, appliedSet, format)
1011
- );
1012
- if (options.dryRun) {
1013
- return {
1014
- tenantId,
1015
- schemaName,
1016
- success: true,
1017
- appliedMigrations: pending.map((m) => m.name),
1018
- durationMs: Date.now() - startTime,
1019
- format: format.format
1020
- };
1021
- }
1022
- for (const migration of pending) {
1023
- const migrationStart = Date.now();
1024
- options.onProgress?.(tenantId, "migrating", migration.name);
1025
- await this.migratorConfig.hooks?.beforeMigration?.(tenantId, migration.name);
1026
- await this.applyMigration(pool, schemaName, migration, format);
1027
- await this.migratorConfig.hooks?.afterMigration?.(
1028
- tenantId,
1029
- migration.name,
1030
- Date.now() - migrationStart
1031
- );
1032
- appliedMigrations.push(migration.name);
1033
- }
1034
- const result = {
1035
- tenantId,
1036
- schemaName,
1037
- success: true,
1038
- appliedMigrations,
1039
- durationMs: Date.now() - startTime,
1040
- format: format.format
1041
- };
1042
- await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
1043
- return result;
1044
- } catch (error) {
1045
- const result = {
1046
- tenantId,
1047
- schemaName,
1048
- success: false,
1049
- appliedMigrations,
1050
- error: error.message,
1051
- durationMs: Date.now() - startTime
1052
- };
1053
- await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
1054
- return result;
1055
- } finally {
1056
- await pool.end();
1057
- }
1058
- }
1059
- /**
1060
- * Migrate specific tenants
1415
+ * Get the schema name for a tenant ID using the configured template
1416
+ *
1417
+ * @param tenantId - The tenant identifier
1418
+ * @returns The PostgreSQL schema name
1419
+ *
1420
+ * @example
1421
+ * ```typescript
1422
+ * const schemaName = schemaManager.getSchemaName('tenant-123');
1423
+ * // Returns: 'tenant_tenant-123' (depends on schemaNameTemplate)
1424
+ * ```
1061
1425
  */
1062
- async migrateTenants(tenantIds, options = {}) {
1063
- const migrations = await this.loadMigrations();
1064
- const results = [];
1065
- const { concurrency = 10, onProgress, onError } = options;
1066
- for (let i = 0; i < tenantIds.length; i += concurrency) {
1067
- const batch = tenantIds.slice(i, i + concurrency);
1068
- const batchResults = await Promise.all(
1069
- batch.map(async (tenantId) => {
1070
- try {
1071
- onProgress?.(tenantId, "starting");
1072
- const result = await this.migrateTenant(tenantId, migrations, { dryRun: options.dryRun ?? false, onProgress });
1073
- onProgress?.(tenantId, result.success ? "completed" : "failed");
1074
- return result;
1075
- } catch (error) {
1076
- onProgress?.(tenantId, "failed");
1077
- onError?.(tenantId, error);
1078
- return this.createErrorResult(tenantId, error);
1079
- }
1080
- })
1081
- );
1082
- results.push(...batchResults);
1083
- }
1084
- return this.aggregateResults(results);
1426
+ getSchemaName(tenantId) {
1427
+ return this.config.isolation.schemaNameTemplate(tenantId);
1085
1428
  }
1086
1429
  /**
1087
- * Get migration status for all tenants
1430
+ * Create a PostgreSQL pool for a specific schema
1431
+ *
1432
+ * The pool is configured with `search_path` set to the schema,
1433
+ * allowing queries to run in tenant isolation.
1434
+ *
1435
+ * @param schemaName - The PostgreSQL schema name
1436
+ * @returns A configured Pool instance
1437
+ *
1438
+ * @example
1439
+ * ```typescript
1440
+ * const pool = await schemaManager.createPool('tenant_123');
1441
+ * try {
1442
+ * await pool.query('SELECT * FROM users'); // Queries tenant_123.users
1443
+ * } finally {
1444
+ * await pool.end();
1445
+ * }
1446
+ * ```
1088
1447
  */
1089
- async getStatus() {
1090
- const tenantIds = await this.migratorConfig.tenantDiscovery();
1091
- const migrations = await this.loadMigrations();
1092
- const statuses = [];
1093
- for (const tenantId of tenantIds) {
1094
- statuses.push(await this.getTenantStatus(tenantId, migrations));
1095
- }
1096
- return statuses;
1448
+ async createPool(schemaName) {
1449
+ return new Pool({
1450
+ connectionString: this.config.connection.url,
1451
+ ...this.config.connection.poolConfig,
1452
+ options: `-c search_path="${schemaName}",public`
1453
+ });
1097
1454
  }
1098
1455
  /**
1099
- * Get migration status for a specific tenant
1456
+ * Create a PostgreSQL pool without schema-specific search_path
1457
+ *
1458
+ * Used for operations that need to work across schemas or
1459
+ * before a schema exists (like creating the schema itself).
1460
+ *
1461
+ * @returns A Pool instance connected to the database
1100
1462
  */
1101
- async getTenantStatus(tenantId, migrations) {
1102
- const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
1103
- const pool = await this.createPool(schemaName);
1104
- try {
1105
- const allMigrations = migrations ?? await this.loadMigrations();
1106
- const tableExists = await this.migrationsTableExists(pool, schemaName);
1107
- if (!tableExists) {
1108
- return {
1109
- tenantId,
1110
- schemaName,
1111
- appliedCount: 0,
1112
- pendingCount: allMigrations.length,
1113
- pendingMigrations: allMigrations.map((m) => m.name),
1114
- status: allMigrations.length > 0 ? "behind" : "ok",
1115
- format: null
1116
- // New tenant, no table yet
1117
- };
1118
- }
1119
- const format = await this.getOrDetectFormat(pool, schemaName);
1120
- const applied = await this.getAppliedMigrations(pool, schemaName, format);
1121
- const appliedSet = new Set(applied.map((m) => m.identifier));
1122
- const pending = allMigrations.filter(
1123
- (m) => !this.isMigrationApplied(m, appliedSet, format)
1124
- );
1125
- return {
1126
- tenantId,
1127
- schemaName,
1128
- appliedCount: applied.length,
1129
- pendingCount: pending.length,
1130
- pendingMigrations: pending.map((m) => m.name),
1131
- status: pending.length > 0 ? "behind" : "ok",
1132
- format: format.format
1133
- };
1134
- } catch (error) {
1135
- return {
1136
- tenantId,
1137
- schemaName,
1138
- appliedCount: 0,
1139
- pendingCount: 0,
1140
- pendingMigrations: [],
1141
- status: "error",
1142
- error: error.message,
1143
- format: null
1144
- };
1145
- } finally {
1146
- await pool.end();
1147
- }
1463
+ async createRootPool() {
1464
+ return new Pool({
1465
+ connectionString: this.config.connection.url,
1466
+ ...this.config.connection.poolConfig
1467
+ });
1148
1468
  }
1149
1469
  /**
1150
- * Create a new tenant schema and optionally apply migrations
1470
+ * Create a new tenant schema in the database
1471
+ *
1472
+ * @param tenantId - The tenant identifier
1473
+ * @returns Promise that resolves when schema is created
1474
+ *
1475
+ * @example
1476
+ * ```typescript
1477
+ * await schemaManager.createSchema('new-tenant');
1478
+ * ```
1151
1479
  */
1152
- async createTenant(tenantId, options = {}) {
1153
- const { migrate = true } = options;
1154
- const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
1155
- const pool = new Pool({
1156
- connectionString: this.tenantConfig.connection.url,
1157
- ...this.tenantConfig.connection.poolConfig
1158
- });
1480
+ async createSchema(tenantId) {
1481
+ const schemaName = this.getSchemaName(tenantId);
1482
+ const pool = await this.createRootPool();
1159
1483
  try {
1160
1484
  await pool.query(`CREATE SCHEMA IF NOT EXISTS "${schemaName}"`);
1161
- if (migrate) {
1162
- await this.migrateTenant(tenantId);
1163
- }
1164
1485
  } finally {
1165
1486
  await pool.end();
1166
1487
  }
1167
1488
  }
1168
1489
  /**
1169
- * Drop a tenant schema
1490
+ * Drop a tenant schema from the database
1491
+ *
1492
+ * @param tenantId - The tenant identifier
1493
+ * @param options - Drop options (cascade, force)
1494
+ * @returns Promise that resolves when schema is dropped
1495
+ *
1496
+ * @example
1497
+ * ```typescript
1498
+ * // Drop with CASCADE (removes all objects)
1499
+ * await schemaManager.dropSchema('old-tenant', { cascade: true });
1500
+ *
1501
+ * // Drop with RESTRICT (fails if objects exist)
1502
+ * await schemaManager.dropSchema('old-tenant', { cascade: false });
1503
+ * ```
1170
1504
  */
1171
- async dropTenant(tenantId, options = {}) {
1505
+ async dropSchema(tenantId, options = {}) {
1172
1506
  const { cascade = true } = options;
1173
- const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
1174
- const pool = new Pool({
1175
- connectionString: this.tenantConfig.connection.url,
1176
- ...this.tenantConfig.connection.poolConfig
1177
- });
1507
+ const schemaName = this.getSchemaName(tenantId);
1508
+ const pool = await this.createRootPool();
1178
1509
  try {
1179
1510
  const cascadeSql = cascade ? "CASCADE" : "RESTRICT";
1180
1511
  await pool.query(`DROP SCHEMA IF EXISTS "${schemaName}" ${cascadeSql}`);
@@ -1183,14 +1514,21 @@ var Migrator = class {
1183
1514
  }
1184
1515
  }
1185
1516
  /**
1186
- * Check if a tenant schema exists
1517
+ * Check if a tenant schema exists in the database
1518
+ *
1519
+ * @param tenantId - The tenant identifier
1520
+ * @returns True if schema exists, false otherwise
1521
+ *
1522
+ * @example
1523
+ * ```typescript
1524
+ * if (await schemaManager.schemaExists('tenant-123')) {
1525
+ * console.log('Tenant schema exists');
1526
+ * }
1527
+ * ```
1187
1528
  */
1188
- async tenantExists(tenantId) {
1189
- const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
1190
- const pool = new Pool({
1191
- connectionString: this.tenantConfig.connection.url,
1192
- ...this.tenantConfig.connection.poolConfig
1193
- });
1529
+ async schemaExists(tenantId) {
1530
+ const schemaName = this.getSchemaName(tenantId);
1531
+ const pool = await this.createRootPool();
1194
1532
  try {
1195
1533
  const result = await pool.query(
1196
1534
  `SELECT 1 FROM information_schema.schemata WHERE schema_name = $1`,
@@ -1202,225 +1540,787 @@ var Migrator = class {
1202
1540
  }
1203
1541
  }
1204
1542
  /**
1205
- * Mark migrations as applied without executing SQL
1206
- * Useful for syncing tracking state with already-applied migrations
1543
+ * List all schemas matching a pattern
1544
+ *
1545
+ * @param pattern - SQL LIKE pattern to filter schemas (optional)
1546
+ * @returns Array of schema names
1547
+ *
1548
+ * @example
1549
+ * ```typescript
1550
+ * // List all tenant schemas
1551
+ * const schemas = await schemaManager.listSchemas('tenant_%');
1552
+ * ```
1207
1553
  */
1208
- async markAsApplied(tenantId, options = {}) {
1209
- const startTime = Date.now();
1210
- const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
1211
- const markedMigrations = [];
1212
- const pool = await this.createPool(schemaName);
1554
+ async listSchemas(pattern) {
1555
+ const pool = await this.createRootPool();
1213
1556
  try {
1214
- await this.migratorConfig.hooks?.beforeTenant?.(tenantId);
1215
- const format = await this.getOrDetectFormat(pool, schemaName);
1216
- await this.ensureMigrationsTable(pool, schemaName, format);
1217
- const allMigrations = await this.loadMigrations();
1218
- const applied = await this.getAppliedMigrations(pool, schemaName, format);
1219
- const appliedSet = new Set(applied.map((m) => m.identifier));
1220
- const pending = allMigrations.filter(
1221
- (m) => !this.isMigrationApplied(m, appliedSet, format)
1557
+ const query = pattern ? `SELECT schema_name FROM information_schema.schemata WHERE schema_name LIKE $1 ORDER BY schema_name` : `SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema', 'pg_toast') ORDER BY schema_name`;
1558
+ const result = await pool.query(
1559
+ query,
1560
+ pattern ? [pattern] : []
1222
1561
  );
1223
- for (const migration of pending) {
1224
- const migrationStart = Date.now();
1225
- options.onProgress?.(tenantId, "migrating", migration.name);
1226
- await this.migratorConfig.hooks?.beforeMigration?.(tenantId, migration.name);
1227
- await this.recordMigration(pool, schemaName, migration, format);
1228
- await this.migratorConfig.hooks?.afterMigration?.(
1229
- tenantId,
1230
- migration.name,
1231
- Date.now() - migrationStart
1232
- );
1233
- markedMigrations.push(migration.name);
1234
- }
1235
- const result = {
1236
- tenantId,
1237
- schemaName,
1238
- success: true,
1239
- appliedMigrations: markedMigrations,
1240
- durationMs: Date.now() - startTime,
1241
- format: format.format
1242
- };
1243
- await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
1244
- return result;
1245
- } catch (error) {
1246
- const result = {
1247
- tenantId,
1248
- schemaName,
1249
- success: false,
1250
- appliedMigrations: markedMigrations,
1251
- error: error.message,
1252
- durationMs: Date.now() - startTime
1253
- };
1254
- await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
1255
- return result;
1562
+ return result.rows.map((row) => row.schema_name);
1256
1563
  } finally {
1257
1564
  await pool.end();
1258
1565
  }
1259
1566
  }
1260
1567
  /**
1261
- * Mark migrations as applied for all tenants without executing SQL
1262
- * Useful for syncing tracking state with already-applied migrations
1568
+ * Ensure the migrations table exists with the correct format
1569
+ *
1570
+ * Creates the migrations tracking table if it doesn't exist,
1571
+ * using the appropriate column types based on the format.
1572
+ *
1573
+ * @param pool - Database pool to use
1574
+ * @param schemaName - The schema to create the table in
1575
+ * @param format - The detected/configured table format
1576
+ *
1577
+ * @example
1578
+ * ```typescript
1579
+ * const pool = await schemaManager.createPool('tenant_123');
1580
+ * await schemaManager.ensureMigrationsTable(pool, 'tenant_123', format);
1581
+ * ```
1263
1582
  */
1264
- async markAllAsApplied(options = {}) {
1583
+ async ensureMigrationsTable(pool, schemaName, format) {
1584
+ const { identifier, timestamp, timestampType } = format.columns;
1585
+ const identifierCol = identifier === "name" ? "name VARCHAR(255) NOT NULL UNIQUE" : "hash TEXT NOT NULL";
1586
+ const timestampCol = timestampType === "bigint" ? `${timestamp} BIGINT NOT NULL` : `${timestamp} TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP`;
1587
+ await pool.query(`
1588
+ CREATE TABLE IF NOT EXISTS "${schemaName}"."${format.tableName}" (
1589
+ id SERIAL PRIMARY KEY,
1590
+ ${identifierCol},
1591
+ ${timestampCol}
1592
+ )
1593
+ `);
1594
+ }
1595
+ /**
1596
+ * Check if the migrations table exists in a schema
1597
+ *
1598
+ * @param pool - Database pool to use
1599
+ * @param schemaName - The schema to check
1600
+ * @returns True if migrations table exists
1601
+ *
1602
+ * @example
1603
+ * ```typescript
1604
+ * const pool = await schemaManager.createPool('tenant_123');
1605
+ * if (await schemaManager.migrationsTableExists(pool, 'tenant_123')) {
1606
+ * console.log('Migrations table exists');
1607
+ * }
1608
+ * ```
1609
+ */
1610
+ async migrationsTableExists(pool, schemaName) {
1611
+ const result = await pool.query(
1612
+ `SELECT 1 FROM information_schema.tables
1613
+ WHERE table_schema = $1 AND table_name = $2`,
1614
+ [schemaName, this.migrationsTable]
1615
+ );
1616
+ return result.rowCount !== null && result.rowCount > 0;
1617
+ }
1618
+ /**
1619
+ * Get the configured migrations table name
1620
+ *
1621
+ * @returns The migrations table name
1622
+ */
1623
+ getMigrationsTableName() {
1624
+ return this.migrationsTable;
1625
+ }
1626
+ };
1627
+
1628
+ // src/migrator/drift/column-analyzer.ts
1629
+ async function introspectColumns(pool, schemaName, tableName) {
1630
+ const result = await pool.query(
1631
+ `SELECT
1632
+ column_name,
1633
+ data_type,
1634
+ udt_name,
1635
+ is_nullable,
1636
+ column_default,
1637
+ character_maximum_length,
1638
+ numeric_precision,
1639
+ numeric_scale,
1640
+ ordinal_position
1641
+ FROM information_schema.columns
1642
+ WHERE table_schema = $1 AND table_name = $2
1643
+ ORDER BY ordinal_position`,
1644
+ [schemaName, tableName]
1645
+ );
1646
+ return result.rows.map((row) => ({
1647
+ name: row.column_name,
1648
+ dataType: row.data_type,
1649
+ udtName: row.udt_name,
1650
+ isNullable: row.is_nullable === "YES",
1651
+ columnDefault: row.column_default,
1652
+ characterMaximumLength: row.character_maximum_length,
1653
+ numericPrecision: row.numeric_precision,
1654
+ numericScale: row.numeric_scale,
1655
+ ordinalPosition: row.ordinal_position
1656
+ }));
1657
+ }
1658
+ function normalizeDefault(value) {
1659
+ if (value === null) return null;
1660
+ return value.replace(/^'(.+)'::.+$/, "$1").replace(/^(.+)::.+$/, "$1").trim();
1661
+ }
1662
+ function compareColumns(reference, target) {
1663
+ const drifts = [];
1664
+ const refColMap = new Map(reference.map((c) => [c.name, c]));
1665
+ const targetColMap = new Map(target.map((c) => [c.name, c]));
1666
+ for (const refCol of reference) {
1667
+ const targetCol = targetColMap.get(refCol.name);
1668
+ if (!targetCol) {
1669
+ drifts.push({
1670
+ column: refCol.name,
1671
+ type: "missing",
1672
+ expected: refCol.dataType,
1673
+ description: `Column "${refCol.name}" (${refCol.dataType}) is missing`
1674
+ });
1675
+ continue;
1676
+ }
1677
+ if (refCol.udtName !== targetCol.udtName) {
1678
+ drifts.push({
1679
+ column: refCol.name,
1680
+ type: "type_mismatch",
1681
+ expected: refCol.udtName,
1682
+ actual: targetCol.udtName,
1683
+ description: `Column "${refCol.name}" type mismatch: expected "${refCol.udtName}", got "${targetCol.udtName}"`
1684
+ });
1685
+ }
1686
+ if (refCol.isNullable !== targetCol.isNullable) {
1687
+ drifts.push({
1688
+ column: refCol.name,
1689
+ type: "nullable_mismatch",
1690
+ expected: refCol.isNullable,
1691
+ actual: targetCol.isNullable,
1692
+ description: `Column "${refCol.name}" nullable mismatch: expected ${refCol.isNullable ? "NULL" : "NOT NULL"}, got ${targetCol.isNullable ? "NULL" : "NOT NULL"}`
1693
+ });
1694
+ }
1695
+ const normalizedRefDefault = normalizeDefault(refCol.columnDefault);
1696
+ const normalizedTargetDefault = normalizeDefault(targetCol.columnDefault);
1697
+ if (normalizedRefDefault !== normalizedTargetDefault) {
1698
+ drifts.push({
1699
+ column: refCol.name,
1700
+ type: "default_mismatch",
1701
+ expected: refCol.columnDefault,
1702
+ actual: targetCol.columnDefault,
1703
+ description: `Column "${refCol.name}" default mismatch: expected "${refCol.columnDefault ?? "none"}", got "${targetCol.columnDefault ?? "none"}"`
1704
+ });
1705
+ }
1706
+ }
1707
+ for (const targetCol of target) {
1708
+ if (!refColMap.has(targetCol.name)) {
1709
+ drifts.push({
1710
+ column: targetCol.name,
1711
+ type: "extra",
1712
+ actual: targetCol.dataType,
1713
+ description: `Extra column "${targetCol.name}" (${targetCol.dataType}) not in reference`
1714
+ });
1715
+ }
1716
+ }
1717
+ return drifts;
1718
+ }
1719
+
1720
+ // src/migrator/drift/index-analyzer.ts
1721
+ async function introspectIndexes(pool, schemaName, tableName) {
1722
+ const indexResult = await pool.query(
1723
+ `SELECT indexname, indexdef
1724
+ FROM pg_indexes
1725
+ WHERE schemaname = $1 AND tablename = $2
1726
+ ORDER BY indexname`,
1727
+ [schemaName, tableName]
1728
+ );
1729
+ const indexDetails = await pool.query(
1730
+ `SELECT
1731
+ i.relname as indexname,
1732
+ a.attname as column_name,
1733
+ ix.indisunique as is_unique,
1734
+ ix.indisprimary as is_primary
1735
+ FROM pg_class t
1736
+ JOIN pg_index ix ON t.oid = ix.indrelid
1737
+ JOIN pg_class i ON i.oid = ix.indexrelid
1738
+ JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = ANY(ix.indkey)
1739
+ JOIN pg_namespace n ON n.oid = t.relnamespace
1740
+ WHERE n.nspname = $1 AND t.relname = $2
1741
+ ORDER BY i.relname, a.attnum`,
1742
+ [schemaName, tableName]
1743
+ );
1744
+ const indexColumnsMap = /* @__PURE__ */ new Map();
1745
+ for (const row of indexDetails.rows) {
1746
+ const existing = indexColumnsMap.get(row.indexname);
1747
+ if (existing) {
1748
+ existing.columns.push(row.column_name);
1749
+ } else {
1750
+ indexColumnsMap.set(row.indexname, {
1751
+ columns: [row.column_name],
1752
+ isUnique: row.is_unique,
1753
+ isPrimary: row.is_primary
1754
+ });
1755
+ }
1756
+ }
1757
+ return indexResult.rows.map((row) => {
1758
+ const details = indexColumnsMap.get(row.indexname);
1759
+ return {
1760
+ name: row.indexname,
1761
+ columns: details?.columns ?? [],
1762
+ isUnique: details?.isUnique ?? false,
1763
+ isPrimary: details?.isPrimary ?? false,
1764
+ definition: row.indexdef
1765
+ };
1766
+ });
1767
+ }
1768
+ function compareIndexes(reference, target) {
1769
+ const drifts = [];
1770
+ const refIndexMap = new Map(reference.map((i) => [i.name, i]));
1771
+ const targetIndexMap = new Map(target.map((i) => [i.name, i]));
1772
+ for (const refIndex of reference) {
1773
+ const targetIndex = targetIndexMap.get(refIndex.name);
1774
+ if (!targetIndex) {
1775
+ drifts.push({
1776
+ index: refIndex.name,
1777
+ type: "missing",
1778
+ expected: refIndex.definition,
1779
+ description: `Index "${refIndex.name}" is missing`
1780
+ });
1781
+ continue;
1782
+ }
1783
+ const refCols = refIndex.columns.sort().join(",");
1784
+ const targetCols = targetIndex.columns.sort().join(",");
1785
+ if (refCols !== targetCols || refIndex.isUnique !== targetIndex.isUnique) {
1786
+ drifts.push({
1787
+ index: refIndex.name,
1788
+ type: "definition_mismatch",
1789
+ expected: refIndex.definition,
1790
+ actual: targetIndex.definition,
1791
+ description: `Index "${refIndex.name}" definition differs`
1792
+ });
1793
+ }
1794
+ }
1795
+ for (const targetIndex of target) {
1796
+ if (!refIndexMap.has(targetIndex.name)) {
1797
+ drifts.push({
1798
+ index: targetIndex.name,
1799
+ type: "extra",
1800
+ actual: targetIndex.definition,
1801
+ description: `Extra index "${targetIndex.name}" not in reference`
1802
+ });
1803
+ }
1804
+ }
1805
+ return drifts;
1806
+ }
1807
+
1808
+ // src/migrator/drift/constraint-analyzer.ts
1809
+ async function introspectConstraints(pool, schemaName, tableName) {
1810
+ const result = await pool.query(
1811
+ `SELECT
1812
+ tc.constraint_name,
1813
+ tc.constraint_type,
1814
+ kcu.column_name,
1815
+ ccu.table_schema as foreign_table_schema,
1816
+ ccu.table_name as foreign_table_name,
1817
+ ccu.column_name as foreign_column_name,
1818
+ cc.check_clause
1819
+ FROM information_schema.table_constraints tc
1820
+ LEFT JOIN information_schema.key_column_usage kcu
1821
+ ON tc.constraint_name = kcu.constraint_name
1822
+ AND tc.table_schema = kcu.table_schema
1823
+ LEFT JOIN information_schema.constraint_column_usage ccu
1824
+ ON tc.constraint_name = ccu.constraint_name
1825
+ AND tc.constraint_type = 'FOREIGN KEY'
1826
+ LEFT JOIN information_schema.check_constraints cc
1827
+ ON tc.constraint_name = cc.constraint_name
1828
+ AND tc.constraint_type = 'CHECK'
1829
+ WHERE tc.table_schema = $1 AND tc.table_name = $2
1830
+ ORDER BY tc.constraint_name, kcu.ordinal_position`,
1831
+ [schemaName, tableName]
1832
+ );
1833
+ const constraintMap = /* @__PURE__ */ new Map();
1834
+ for (const row of result.rows) {
1835
+ const existing = constraintMap.get(row.constraint_name);
1836
+ if (existing) {
1837
+ if (row.column_name && !existing.columns.includes(row.column_name)) {
1838
+ existing.columns.push(row.column_name);
1839
+ }
1840
+ if (row.foreign_column_name && existing.foreignColumns && !existing.foreignColumns.includes(row.foreign_column_name)) {
1841
+ existing.foreignColumns.push(row.foreign_column_name);
1842
+ }
1843
+ } else {
1844
+ const constraint = {
1845
+ name: row.constraint_name,
1846
+ type: row.constraint_type,
1847
+ columns: row.column_name ? [row.column_name] : []
1848
+ };
1849
+ if (row.foreign_table_name) {
1850
+ constraint.foreignTable = row.foreign_table_name;
1851
+ }
1852
+ if (row.foreign_column_name) {
1853
+ constraint.foreignColumns = [row.foreign_column_name];
1854
+ }
1855
+ if (row.check_clause) {
1856
+ constraint.checkExpression = row.check_clause;
1857
+ }
1858
+ constraintMap.set(row.constraint_name, constraint);
1859
+ }
1860
+ }
1861
+ return Array.from(constraintMap.values());
1862
+ }
1863
+ function compareConstraints(reference, target) {
1864
+ const drifts = [];
1865
+ const refConstraintMap = new Map(reference.map((c) => [c.name, c]));
1866
+ const targetConstraintMap = new Map(target.map((c) => [c.name, c]));
1867
+ for (const refConstraint of reference) {
1868
+ const targetConstraint = targetConstraintMap.get(refConstraint.name);
1869
+ if (!targetConstraint) {
1870
+ drifts.push({
1871
+ constraint: refConstraint.name,
1872
+ type: "missing",
1873
+ expected: `${refConstraint.type} on (${refConstraint.columns.join(", ")})`,
1874
+ description: `Constraint "${refConstraint.name}" (${refConstraint.type}) is missing`
1875
+ });
1876
+ continue;
1877
+ }
1878
+ const refCols = refConstraint.columns.sort().join(",");
1879
+ const targetCols = targetConstraint.columns.sort().join(",");
1880
+ if (refConstraint.type !== targetConstraint.type || refCols !== targetCols) {
1881
+ drifts.push({
1882
+ constraint: refConstraint.name,
1883
+ type: "definition_mismatch",
1884
+ expected: `${refConstraint.type} on (${refConstraint.columns.join(", ")})`,
1885
+ actual: `${targetConstraint.type} on (${targetConstraint.columns.join(", ")})`,
1886
+ description: `Constraint "${refConstraint.name}" definition differs`
1887
+ });
1888
+ }
1889
+ }
1890
+ for (const targetConstraint of target) {
1891
+ if (!refConstraintMap.has(targetConstraint.name)) {
1892
+ drifts.push({
1893
+ constraint: targetConstraint.name,
1894
+ type: "extra",
1895
+ actual: `${targetConstraint.type} on (${targetConstraint.columns.join(", ")})`,
1896
+ description: `Extra constraint "${targetConstraint.name}" (${targetConstraint.type}) not in reference`
1897
+ });
1898
+ }
1899
+ }
1900
+ return drifts;
1901
+ }
1902
+
1903
+ // src/migrator/drift/drift-detector.ts
1904
+ var DEFAULT_MIGRATIONS_TABLE2 = "__drizzle_migrations";
1905
+ var DriftDetector = class {
1906
+ constructor(tenantConfig, schemaManager, driftConfig) {
1907
+ this.tenantConfig = tenantConfig;
1908
+ this.schemaManager = schemaManager;
1909
+ this.driftConfig = driftConfig;
1910
+ this.migrationsTable = driftConfig.migrationsTable ?? DEFAULT_MIGRATIONS_TABLE2;
1911
+ }
1912
+ migrationsTable;
1913
+ /**
1914
+ * Get the schema name for a tenant ID
1915
+ */
1916
+ getSchemaName(tenantId) {
1917
+ return this.tenantConfig.isolation.schemaNameTemplate(tenantId);
1918
+ }
1919
+ /**
1920
+ * Create a pool for a schema
1921
+ */
1922
+ async createPool(schemaName) {
1923
+ return this.schemaManager.createPool(schemaName);
1924
+ }
1925
+ /**
1926
+ * Detect schema drift across all tenants.
1927
+ *
1928
+ * Compares each tenant's schema against a reference tenant (first tenant by default).
1929
+ * Returns a comprehensive report of all differences found.
1930
+ *
1931
+ * @param options - Detection options
1932
+ * @returns Schema drift status with details for each tenant
1933
+ *
1934
+ * @example
1935
+ * ```typescript
1936
+ * // Basic usage - compare all tenants against the first one
1937
+ * const status = await detector.detectDrift();
1938
+ *
1939
+ * // Use a specific tenant as reference
1940
+ * const status = await detector.detectDrift({
1941
+ * referenceTenant: 'golden-tenant',
1942
+ * });
1943
+ *
1944
+ * // Check specific tenants only
1945
+ * const status = await detector.detectDrift({
1946
+ * tenantIds: ['tenant-1', 'tenant-2'],
1947
+ * });
1948
+ *
1949
+ * // Skip index and constraint comparison for faster checks
1950
+ * const status = await detector.detectDrift({
1951
+ * includeIndexes: false,
1952
+ * includeConstraints: false,
1953
+ * });
1954
+ * ```
1955
+ */
1956
+ async detectDrift(options = {}) {
1957
+ const startTime = Date.now();
1265
1958
  const {
1266
1959
  concurrency = 10,
1267
- onProgress,
1268
- onError
1960
+ includeIndexes = true,
1961
+ includeConstraints = true,
1962
+ excludeTables = [this.migrationsTable],
1963
+ onProgress
1269
1964
  } = options;
1270
- const tenantIds = await this.migratorConfig.tenantDiscovery();
1965
+ const tenantIds = options.tenantIds ?? await this.driftConfig.tenantDiscovery();
1966
+ if (tenantIds.length === 0) {
1967
+ return {
1968
+ referenceTenant: "",
1969
+ total: 0,
1970
+ noDrift: 0,
1971
+ withDrift: 0,
1972
+ error: 0,
1973
+ details: [],
1974
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
1975
+ durationMs: Date.now() - startTime
1976
+ };
1977
+ }
1978
+ const referenceTenant = options.referenceTenant ?? tenantIds[0];
1979
+ onProgress?.(referenceTenant, "starting");
1980
+ onProgress?.(referenceTenant, "introspecting");
1981
+ const referenceSchema = await this.introspectSchema(referenceTenant, {
1982
+ includeIndexes,
1983
+ includeConstraints,
1984
+ excludeTables
1985
+ });
1986
+ if (!referenceSchema) {
1987
+ return {
1988
+ referenceTenant,
1989
+ total: tenantIds.length,
1990
+ noDrift: 0,
1991
+ withDrift: 0,
1992
+ error: tenantIds.length,
1993
+ details: tenantIds.map((id) => ({
1994
+ tenantId: id,
1995
+ schemaName: this.getSchemaName(id),
1996
+ hasDrift: false,
1997
+ tables: [],
1998
+ issueCount: 0,
1999
+ error: id === referenceTenant ? "Failed to introspect reference tenant" : "Reference tenant introspection failed"
2000
+ })),
2001
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2002
+ durationMs: Date.now() - startTime
2003
+ };
2004
+ }
2005
+ onProgress?.(referenceTenant, "completed");
2006
+ const tenantsToCheck = tenantIds.filter((id) => id !== referenceTenant);
1271
2007
  const results = [];
1272
- let aborted = false;
1273
- for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
1274
- const batch = tenantIds.slice(i, i + concurrency);
2008
+ results.push({
2009
+ tenantId: referenceTenant,
2010
+ schemaName: referenceSchema.schemaName,
2011
+ hasDrift: false,
2012
+ tables: [],
2013
+ issueCount: 0
2014
+ });
2015
+ for (let i = 0; i < tenantsToCheck.length; i += concurrency) {
2016
+ const batch = tenantsToCheck.slice(i, i + concurrency);
1275
2017
  const batchResults = await Promise.all(
1276
2018
  batch.map(async (tenantId) => {
1277
- if (aborted) {
1278
- return this.createSkippedResult(tenantId);
1279
- }
1280
2019
  try {
1281
2020
  onProgress?.(tenantId, "starting");
1282
- const result = await this.markAsApplied(tenantId, { onProgress });
1283
- onProgress?.(tenantId, result.success ? "completed" : "failed");
1284
- return result;
2021
+ onProgress?.(tenantId, "introspecting");
2022
+ const tenantSchema = await this.introspectSchema(tenantId, {
2023
+ includeIndexes,
2024
+ includeConstraints,
2025
+ excludeTables
2026
+ });
2027
+ if (!tenantSchema) {
2028
+ onProgress?.(tenantId, "failed");
2029
+ return {
2030
+ tenantId,
2031
+ schemaName: this.getSchemaName(tenantId),
2032
+ hasDrift: false,
2033
+ tables: [],
2034
+ issueCount: 0,
2035
+ error: "Failed to introspect schema"
2036
+ };
2037
+ }
2038
+ onProgress?.(tenantId, "comparing");
2039
+ const drift = this.compareSchemas(referenceSchema, tenantSchema, {
2040
+ includeIndexes,
2041
+ includeConstraints
2042
+ });
2043
+ onProgress?.(tenantId, "completed");
2044
+ return drift;
1285
2045
  } catch (error) {
1286
2046
  onProgress?.(tenantId, "failed");
1287
- const action = onError?.(tenantId, error);
1288
- if (action === "abort") {
1289
- aborted = true;
1290
- }
1291
- return this.createErrorResult(tenantId, error);
2047
+ return {
2048
+ tenantId,
2049
+ schemaName: this.getSchemaName(tenantId),
2050
+ hasDrift: false,
2051
+ tables: [],
2052
+ issueCount: 0,
2053
+ error: error.message
2054
+ };
1292
2055
  }
1293
2056
  })
1294
2057
  );
1295
2058
  results.push(...batchResults);
1296
2059
  }
1297
- if (aborted) {
1298
- const remaining = tenantIds.slice(results.length);
1299
- for (const tenantId of remaining) {
1300
- results.push(this.createSkippedResult(tenantId));
1301
- }
1302
- }
1303
- return this.aggregateResults(results);
1304
- }
1305
- /**
1306
- * Get sync status for all tenants
1307
- * Detects divergences between migrations on disk and tracking in database
1308
- */
1309
- async getSyncStatus() {
1310
- const tenantIds = await this.migratorConfig.tenantDiscovery();
1311
- const migrations = await this.loadMigrations();
1312
- const statuses = [];
1313
- for (const tenantId of tenantIds) {
1314
- statuses.push(await this.getTenantSyncStatus(tenantId, migrations));
1315
- }
1316
2060
  return {
1317
- total: statuses.length,
1318
- inSync: statuses.filter((s) => s.inSync && !s.error).length,
1319
- outOfSync: statuses.filter((s) => !s.inSync && !s.error).length,
1320
- error: statuses.filter((s) => !!s.error).length,
1321
- details: statuses
2061
+ referenceTenant,
2062
+ total: results.length,
2063
+ noDrift: results.filter((r) => !r.hasDrift && !r.error).length,
2064
+ withDrift: results.filter((r) => r.hasDrift && !r.error).length,
2065
+ error: results.filter((r) => !!r.error).length,
2066
+ details: results,
2067
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2068
+ durationMs: Date.now() - startTime
1322
2069
  };
1323
2070
  }
1324
2071
  /**
1325
- * Get sync status for a specific tenant
2072
+ * Compare a specific tenant against a reference tenant.
2073
+ *
2074
+ * @param tenantId - Tenant to check
2075
+ * @param referenceTenantId - Tenant to use as reference
2076
+ * @param options - Introspection options
2077
+ * @returns Drift details for the tenant
2078
+ *
2079
+ * @example
2080
+ * ```typescript
2081
+ * const drift = await detector.compareTenant('tenant-123', 'golden-tenant');
2082
+ * if (drift.hasDrift) {
2083
+ * console.log(`Found ${drift.issueCount} issues`);
2084
+ * }
2085
+ * ```
1326
2086
  */
1327
- async getTenantSyncStatus(tenantId, migrations) {
1328
- const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
1329
- const pool = await this.createPool(schemaName);
1330
- try {
1331
- const allMigrations = migrations ?? await this.loadMigrations();
1332
- const migrationNames = new Set(allMigrations.map((m) => m.name));
1333
- const migrationHashes = new Set(allMigrations.map((m) => m.hash));
1334
- const tableExists = await this.migrationsTableExists(pool, schemaName);
1335
- if (!tableExists) {
1336
- return {
1337
- tenantId,
1338
- schemaName,
1339
- missing: allMigrations.map((m) => m.name),
1340
- orphans: [],
1341
- inSync: allMigrations.length === 0,
1342
- format: null
1343
- };
1344
- }
1345
- const format = await this.getOrDetectFormat(pool, schemaName);
1346
- const applied = await this.getAppliedMigrations(pool, schemaName, format);
1347
- const appliedIdentifiers = new Set(applied.map((m) => m.identifier));
1348
- const missing = allMigrations.filter((m) => !this.isMigrationApplied(m, appliedIdentifiers, format)).map((m) => m.name);
1349
- const orphans = applied.filter((m) => {
1350
- if (format.columns.identifier === "name") {
1351
- return !migrationNames.has(m.identifier);
1352
- }
1353
- return !migrationHashes.has(m.identifier) && !migrationNames.has(m.identifier);
1354
- }).map((m) => m.identifier);
2087
+ async compareTenant(tenantId, referenceTenantId, options = {}) {
2088
+ const {
2089
+ includeIndexes = true,
2090
+ includeConstraints = true,
2091
+ excludeTables = [this.migrationsTable]
2092
+ } = options;
2093
+ const referenceSchema = await this.introspectSchema(referenceTenantId, {
2094
+ includeIndexes,
2095
+ includeConstraints,
2096
+ excludeTables
2097
+ });
2098
+ if (!referenceSchema) {
1355
2099
  return {
1356
2100
  tenantId,
1357
- schemaName,
1358
- missing,
1359
- orphans,
1360
- inSync: missing.length === 0 && orphans.length === 0,
1361
- format: format.format
2101
+ schemaName: this.getSchemaName(tenantId),
2102
+ hasDrift: false,
2103
+ tables: [],
2104
+ issueCount: 0,
2105
+ error: "Failed to introspect reference tenant"
1362
2106
  };
1363
- } catch (error) {
2107
+ }
2108
+ const tenantSchema = await this.introspectSchema(tenantId, {
2109
+ includeIndexes,
2110
+ includeConstraints,
2111
+ excludeTables
2112
+ });
2113
+ if (!tenantSchema) {
1364
2114
  return {
1365
2115
  tenantId,
1366
- schemaName,
1367
- missing: [],
1368
- orphans: [],
1369
- inSync: false,
1370
- format: null,
1371
- error: error.message
2116
+ schemaName: this.getSchemaName(tenantId),
2117
+ hasDrift: false,
2118
+ tables: [],
2119
+ issueCount: 0,
2120
+ error: "Failed to introspect tenant schema"
1372
2121
  };
1373
- } finally {
1374
- await pool.end();
1375
2122
  }
2123
+ return this.compareSchemas(referenceSchema, tenantSchema, {
2124
+ includeIndexes,
2125
+ includeConstraints
2126
+ });
1376
2127
  }
1377
2128
  /**
1378
- * Mark missing migrations as applied for a tenant
1379
- */
1380
- async markMissing(tenantId) {
1381
- const startTime = Date.now();
1382
- const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
1383
- const markedMigrations = [];
1384
- const pool = await this.createPool(schemaName);
2129
+ * Introspect a tenant's schema structure.
2130
+ *
2131
+ * Retrieves all tables, columns, indexes, and constraints
2132
+ * for a tenant's schema.
2133
+ *
2134
+ * @param tenantId - Tenant to introspect
2135
+ * @param options - Introspection options
2136
+ * @returns Schema structure or null if introspection fails
2137
+ *
2138
+ * @example
2139
+ * ```typescript
2140
+ * const schema = await detector.introspectSchema('tenant-123');
2141
+ * if (schema) {
2142
+ * console.log(`Found ${schema.tables.length} tables`);
2143
+ * for (const table of schema.tables) {
2144
+ * console.log(` ${table.name}: ${table.columns.length} columns`);
2145
+ * }
2146
+ * }
2147
+ * ```
2148
+ */
2149
+ async introspectSchema(tenantId, options = {}) {
2150
+ const schemaName = this.getSchemaName(tenantId);
2151
+ const pool = await this.createPool(schemaName);
1385
2152
  try {
1386
- const syncStatus = await this.getTenantSyncStatus(tenantId);
1387
- if (syncStatus.error) {
1388
- return {
1389
- tenantId,
1390
- schemaName,
1391
- success: false,
1392
- markedMigrations: [],
1393
- removedOrphans: [],
1394
- error: syncStatus.error,
1395
- durationMs: Date.now() - startTime
1396
- };
2153
+ const tables = await this.introspectTables(pool, schemaName, options);
2154
+ return {
2155
+ tenantId,
2156
+ schemaName,
2157
+ tables,
2158
+ introspectedAt: /* @__PURE__ */ new Date()
2159
+ };
2160
+ } catch {
2161
+ return null;
2162
+ } finally {
2163
+ await pool.end();
2164
+ }
2165
+ }
2166
+ /**
2167
+ * Compare two schema snapshots.
2168
+ *
2169
+ * This method compares pre-introspected schema snapshots,
2170
+ * useful when you already have the schema data available.
2171
+ *
2172
+ * @param reference - Reference (expected) schema
2173
+ * @param target - Target (actual) schema
2174
+ * @param options - Comparison options
2175
+ * @returns Drift details
2176
+ *
2177
+ * @example
2178
+ * ```typescript
2179
+ * const refSchema = await detector.introspectSchema('golden-tenant');
2180
+ * const targetSchema = await detector.introspectSchema('tenant-123');
2181
+ *
2182
+ * if (refSchema && targetSchema) {
2183
+ * const drift = detector.compareSchemas(refSchema, targetSchema);
2184
+ * console.log(`Drift detected: ${drift.hasDrift}`);
2185
+ * }
2186
+ * ```
2187
+ */
2188
+ compareSchemas(reference, target, options = {}) {
2189
+ const { includeIndexes = true, includeConstraints = true } = options;
2190
+ const tableDrifts = [];
2191
+ let totalIssues = 0;
2192
+ const refTableMap = new Map(reference.tables.map((t) => [t.name, t]));
2193
+ const targetTableMap = new Map(target.tables.map((t) => [t.name, t]));
2194
+ for (const refTable of reference.tables) {
2195
+ const targetTable = targetTableMap.get(refTable.name);
2196
+ if (!targetTable) {
2197
+ tableDrifts.push({
2198
+ table: refTable.name,
2199
+ status: "missing",
2200
+ columns: refTable.columns.map((c) => ({
2201
+ column: c.name,
2202
+ type: "missing",
2203
+ expected: c.dataType,
2204
+ description: `Column "${c.name}" (${c.dataType}) is missing`
2205
+ })),
2206
+ indexes: [],
2207
+ constraints: []
2208
+ });
2209
+ totalIssues += refTable.columns.length;
2210
+ continue;
1397
2211
  }
1398
- if (syncStatus.missing.length === 0) {
1399
- return {
1400
- tenantId,
1401
- schemaName,
1402
- success: true,
1403
- markedMigrations: [],
1404
- removedOrphans: [],
1405
- durationMs: Date.now() - startTime
1406
- };
2212
+ const columnDrifts = compareColumns(refTable.columns, targetTable.columns);
2213
+ const indexDrifts = includeIndexes ? compareIndexes(refTable.indexes, targetTable.indexes) : [];
2214
+ const constraintDrifts = includeConstraints ? compareConstraints(refTable.constraints, targetTable.constraints) : [];
2215
+ const issues = columnDrifts.length + indexDrifts.length + constraintDrifts.length;
2216
+ totalIssues += issues;
2217
+ if (issues > 0) {
2218
+ tableDrifts.push({
2219
+ table: refTable.name,
2220
+ status: "drifted",
2221
+ columns: columnDrifts,
2222
+ indexes: indexDrifts,
2223
+ constraints: constraintDrifts
2224
+ });
1407
2225
  }
1408
- const format = await this.getOrDetectFormat(pool, schemaName);
1409
- await this.ensureMigrationsTable(pool, schemaName, format);
1410
- const allMigrations = await this.loadMigrations();
1411
- const missingSet = new Set(syncStatus.missing);
1412
- for (const migration of allMigrations) {
1413
- if (missingSet.has(migration.name)) {
1414
- await this.recordMigration(pool, schemaName, migration, format);
1415
- markedMigrations.push(migration.name);
1416
- }
2226
+ }
2227
+ for (const targetTable of target.tables) {
2228
+ if (!refTableMap.has(targetTable.name)) {
2229
+ tableDrifts.push({
2230
+ table: targetTable.name,
2231
+ status: "extra",
2232
+ columns: targetTable.columns.map((c) => ({
2233
+ column: c.name,
2234
+ type: "extra",
2235
+ actual: c.dataType,
2236
+ description: `Extra column "${c.name}" (${c.dataType}) not in reference`
2237
+ })),
2238
+ indexes: [],
2239
+ constraints: []
2240
+ });
2241
+ totalIssues += targetTable.columns.length;
2242
+ }
2243
+ }
2244
+ return {
2245
+ tenantId: target.tenantId,
2246
+ schemaName: target.schemaName,
2247
+ hasDrift: totalIssues > 0,
2248
+ tables: tableDrifts,
2249
+ issueCount: totalIssues
2250
+ };
2251
+ }
2252
+ /**
2253
+ * Introspect all tables in a schema
2254
+ */
2255
+ async introspectTables(pool, schemaName, options) {
2256
+ const { includeIndexes = true, includeConstraints = true, excludeTables = [] } = options;
2257
+ const tablesResult = await pool.query(
2258
+ `SELECT table_name
2259
+ FROM information_schema.tables
2260
+ WHERE table_schema = $1
2261
+ AND table_type = 'BASE TABLE'
2262
+ ORDER BY table_name`,
2263
+ [schemaName]
2264
+ );
2265
+ const tables = [];
2266
+ for (const row of tablesResult.rows) {
2267
+ if (excludeTables.includes(row.table_name)) {
2268
+ continue;
1417
2269
  }
2270
+ const columns = await introspectColumns(pool, schemaName, row.table_name);
2271
+ const indexes = includeIndexes ? await introspectIndexes(pool, schemaName, row.table_name) : [];
2272
+ const constraints = includeConstraints ? await introspectConstraints(pool, schemaName, row.table_name) : [];
2273
+ tables.push({
2274
+ name: row.table_name,
2275
+ columns,
2276
+ indexes,
2277
+ constraints
2278
+ });
2279
+ }
2280
+ return tables;
2281
+ }
2282
+ };
2283
+ var Seeder = class {
2284
+ constructor(config, deps) {
2285
+ this.config = config;
2286
+ this.deps = deps;
2287
+ }
2288
+ /**
2289
+ * Seed a single tenant with initial data
2290
+ *
2291
+ * Creates a database connection for the tenant, executes the seed function,
2292
+ * and properly cleans up the connection afterward.
2293
+ *
2294
+ * @param tenantId - The tenant identifier
2295
+ * @param seedFn - Function that seeds the database
2296
+ * @returns Result of the seeding operation
2297
+ *
2298
+ * @example
2299
+ * ```typescript
2300
+ * const result = await seeder.seedTenant('tenant-123', async (db, tenantId) => {
2301
+ * await db.insert(users).values([
2302
+ * { name: 'Admin', email: `admin@${tenantId}.com` },
2303
+ * ]);
2304
+ * });
2305
+ *
2306
+ * if (result.success) {
2307
+ * console.log(`Seeded ${result.tenantId} in ${result.durationMs}ms`);
2308
+ * }
2309
+ * ```
2310
+ */
2311
+ async seedTenant(tenantId, seedFn) {
2312
+ const startTime = Date.now();
2313
+ const schemaName = this.deps.schemaNameTemplate(tenantId);
2314
+ const pool = await this.deps.createPool(schemaName);
2315
+ try {
2316
+ const db = drizzle(pool, {
2317
+ schema: this.deps.tenantSchema
2318
+ });
2319
+ await seedFn(db, tenantId);
1418
2320
  return {
1419
2321
  tenantId,
1420
2322
  schemaName,
1421
2323
  success: true,
1422
- markedMigrations,
1423
- removedOrphans: [],
1424
2324
  durationMs: Date.now() - startTime
1425
2325
  };
1426
2326
  } catch (error) {
@@ -1428,8 +2328,6 @@ var Migrator = class {
1428
2328
  tenantId,
1429
2329
  schemaName,
1430
2330
  success: false,
1431
- markedMigrations,
1432
- removedOrphans: [],
1433
2331
  error: error.message,
1434
2332
  durationMs: Date.now() - startTime
1435
2333
  };
@@ -1438,11 +2336,33 @@ var Migrator = class {
1438
2336
  }
1439
2337
  }
1440
2338
  /**
1441
- * Mark missing migrations as applied for all tenants
2339
+ * Seed all tenants with initial data in parallel
2340
+ *
2341
+ * Discovers all tenants and seeds them in batches with configurable concurrency.
2342
+ * Supports progress callbacks and abort-on-error behavior.
2343
+ *
2344
+ * @param seedFn - Function that seeds each database
2345
+ * @param options - Seeding options
2346
+ * @returns Aggregate results of all seeding operations
2347
+ *
2348
+ * @example
2349
+ * ```typescript
2350
+ * const results = await seeder.seedAll(
2351
+ * async (db, tenantId) => {
2352
+ * await db.insert(settings).values({ key: 'initialized', value: 'true' });
2353
+ * },
2354
+ * {
2355
+ * concurrency: 5,
2356
+ * onProgress: (id, status) => console.log(`${id}: ${status}`),
2357
+ * }
2358
+ * );
2359
+ *
2360
+ * console.log(`Succeeded: ${results.succeeded}/${results.total}`);
2361
+ * ```
1442
2362
  */
1443
- async markAllMissing(options = {}) {
2363
+ async seedAll(seedFn, options = {}) {
1444
2364
  const { concurrency = 10, onProgress, onError } = options;
1445
- const tenantIds = await this.migratorConfig.tenantDiscovery();
2365
+ const tenantIds = await this.config.tenantDiscovery();
1446
2366
  const results = [];
1447
2367
  let aborted = false;
1448
2368
  for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
@@ -1450,11 +2370,12 @@ var Migrator = class {
1450
2370
  const batchResults = await Promise.all(
1451
2371
  batch.map(async (tenantId) => {
1452
2372
  if (aborted) {
1453
- return this.createSkippedSyncResult(tenantId);
2373
+ return this.createSkippedResult(tenantId);
1454
2374
  }
1455
2375
  try {
1456
2376
  onProgress?.(tenantId, "starting");
1457
- const result = await this.markMissing(tenantId);
2377
+ onProgress?.(tenantId, "seeding");
2378
+ const result = await this.seedTenant(tenantId, seedFn);
1458
2379
  onProgress?.(tenantId, result.success ? "completed" : "failed");
1459
2380
  return result;
1460
2381
  } catch (error) {
@@ -1463,67 +2384,409 @@ var Migrator = class {
1463
2384
  if (action === "abort") {
1464
2385
  aborted = true;
1465
2386
  }
1466
- return this.createErrorSyncResult(tenantId, error);
2387
+ return this.createErrorResult(tenantId, error);
1467
2388
  }
1468
2389
  })
1469
2390
  );
1470
2391
  results.push(...batchResults);
1471
2392
  }
1472
- return this.aggregateSyncResults(results);
2393
+ if (aborted) {
2394
+ const remaining = tenantIds.slice(results.length);
2395
+ for (const tenantId of remaining) {
2396
+ results.push(this.createSkippedResult(tenantId));
2397
+ }
2398
+ }
2399
+ return this.aggregateResults(results);
1473
2400
  }
1474
2401
  /**
1475
- * Remove orphan migration records for a tenant
2402
+ * Seed specific tenants with initial data
2403
+ *
2404
+ * Seeds only the specified tenants in batches with configurable concurrency.
2405
+ *
2406
+ * @param tenantIds - List of tenant IDs to seed
2407
+ * @param seedFn - Function that seeds each database
2408
+ * @param options - Seeding options
2409
+ * @returns Aggregate results of seeding operations
2410
+ *
2411
+ * @example
2412
+ * ```typescript
2413
+ * const results = await seeder.seedTenants(
2414
+ * ['tenant-1', 'tenant-2', 'tenant-3'],
2415
+ * async (db) => {
2416
+ * await db.insert(config).values({ setup: true });
2417
+ * },
2418
+ * { concurrency: 2 }
2419
+ * );
2420
+ * ```
1476
2421
  */
1477
- async cleanOrphans(tenantId) {
1478
- const startTime = Date.now();
1479
- const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
1480
- const removedOrphans = [];
1481
- const pool = await this.createPool(schemaName);
2422
+ async seedTenants(tenantIds, seedFn, options = {}) {
2423
+ const { concurrency = 10, onProgress, onError } = options;
2424
+ const results = [];
2425
+ for (let i = 0; i < tenantIds.length; i += concurrency) {
2426
+ const batch = tenantIds.slice(i, i + concurrency);
2427
+ const batchResults = await Promise.all(
2428
+ batch.map(async (tenantId) => {
2429
+ try {
2430
+ onProgress?.(tenantId, "starting");
2431
+ onProgress?.(tenantId, "seeding");
2432
+ const result = await this.seedTenant(tenantId, seedFn);
2433
+ onProgress?.(tenantId, result.success ? "completed" : "failed");
2434
+ return result;
2435
+ } catch (error) {
2436
+ onProgress?.(tenantId, "failed");
2437
+ onError?.(tenantId, error);
2438
+ return this.createErrorResult(tenantId, error);
2439
+ }
2440
+ })
2441
+ );
2442
+ results.push(...batchResults);
2443
+ }
2444
+ return this.aggregateResults(results);
2445
+ }
2446
+ /**
2447
+ * Create a skipped result for aborted seeding
2448
+ */
2449
+ createSkippedResult(tenantId) {
2450
+ return {
2451
+ tenantId,
2452
+ schemaName: this.deps.schemaNameTemplate(tenantId),
2453
+ success: false,
2454
+ error: "Skipped due to abort",
2455
+ durationMs: 0
2456
+ };
2457
+ }
2458
+ /**
2459
+ * Create an error result for failed seeding
2460
+ */
2461
+ createErrorResult(tenantId, error) {
2462
+ return {
2463
+ tenantId,
2464
+ schemaName: this.deps.schemaNameTemplate(tenantId),
2465
+ success: false,
2466
+ error: error.message,
2467
+ durationMs: 0
2468
+ };
2469
+ }
2470
+ /**
2471
+ * Aggregate individual results into a summary
2472
+ */
2473
+ aggregateResults(results) {
2474
+ return {
2475
+ total: results.length,
2476
+ succeeded: results.filter((r) => r.success).length,
2477
+ failed: results.filter(
2478
+ (r) => !r.success && r.error !== "Skipped due to abort"
2479
+ ).length,
2480
+ skipped: results.filter((r) => r.error === "Skipped due to abort").length,
2481
+ details: results
2482
+ };
2483
+ }
2484
+ };
2485
+
2486
+ // src/migrator/sync/sync-manager.ts
2487
+ var SyncManager = class {
2488
+ constructor(config, deps) {
2489
+ this.config = config;
2490
+ this.deps = deps;
2491
+ }
2492
+ /**
2493
+ * Get sync status for all tenants
2494
+ *
2495
+ * Detects divergences between migrations on disk and tracking in database.
2496
+ * A tenant is "in sync" when all disk migrations are tracked and no orphan records exist.
2497
+ *
2498
+ * @returns Aggregate sync status for all tenants
2499
+ *
2500
+ * @example
2501
+ * ```typescript
2502
+ * const status = await syncManager.getSyncStatus();
2503
+ * console.log(`Total: ${status.total}, In sync: ${status.inSync}, Out of sync: ${status.outOfSync}`);
2504
+ *
2505
+ * for (const tenant of status.details.filter(d => !d.inSync)) {
2506
+ * console.log(`${tenant.tenantId}: missing=${tenant.missing.length}, orphans=${tenant.orphans.length}`);
2507
+ * }
2508
+ * ```
2509
+ */
2510
+ async getSyncStatus() {
2511
+ const tenantIds = await this.config.tenantDiscovery();
2512
+ const migrations = await this.deps.loadMigrations();
2513
+ const statuses = [];
2514
+ for (const tenantId of tenantIds) {
2515
+ statuses.push(await this.getTenantSyncStatus(tenantId, migrations));
2516
+ }
2517
+ return {
2518
+ total: statuses.length,
2519
+ inSync: statuses.filter((s) => s.inSync && !s.error).length,
2520
+ outOfSync: statuses.filter((s) => !s.inSync && !s.error).length,
2521
+ error: statuses.filter((s) => !!s.error).length,
2522
+ details: statuses
2523
+ };
2524
+ }
2525
+ /**
2526
+ * Get sync status for a specific tenant
2527
+ *
2528
+ * Compares migrations on disk with records in the database.
2529
+ * Identifies missing migrations (on disk but not tracked) and
2530
+ * orphan records (tracked but not on disk).
2531
+ *
2532
+ * @param tenantId - The tenant identifier
2533
+ * @param migrations - Optional pre-loaded migrations (avoids reloading from disk)
2534
+ * @returns Sync status for the tenant
2535
+ *
2536
+ * @example
2537
+ * ```typescript
2538
+ * const status = await syncManager.getTenantSyncStatus('tenant-123');
2539
+ * if (status.missing.length > 0) {
2540
+ * console.log(`Missing: ${status.missing.join(', ')}`);
2541
+ * }
2542
+ * if (status.orphans.length > 0) {
2543
+ * console.log(`Orphans: ${status.orphans.join(', ')}`);
2544
+ * }
2545
+ * ```
2546
+ */
2547
+ async getTenantSyncStatus(tenantId, migrations) {
2548
+ const schemaName = this.deps.schemaNameTemplate(tenantId);
2549
+ const pool = await this.deps.createPool(schemaName);
1482
2550
  try {
1483
- const syncStatus = await this.getTenantSyncStatus(tenantId);
1484
- if (syncStatus.error) {
1485
- return {
1486
- tenantId,
1487
- schemaName,
1488
- success: false,
1489
- markedMigrations: [],
1490
- removedOrphans: [],
1491
- error: syncStatus.error,
1492
- durationMs: Date.now() - startTime
1493
- };
1494
- }
1495
- if (syncStatus.orphans.length === 0) {
2551
+ const allMigrations = migrations ?? await this.deps.loadMigrations();
2552
+ const migrationNames = new Set(allMigrations.map((m) => m.name));
2553
+ const migrationHashes = new Set(allMigrations.map((m) => m.hash));
2554
+ const tableExists = await this.deps.migrationsTableExists(pool, schemaName);
2555
+ if (!tableExists) {
1496
2556
  return {
1497
2557
  tenantId,
1498
2558
  schemaName,
1499
- success: true,
1500
- markedMigrations: [],
1501
- removedOrphans: [],
1502
- durationMs: Date.now() - startTime
2559
+ missing: allMigrations.map((m) => m.name),
2560
+ orphans: [],
2561
+ inSync: allMigrations.length === 0,
2562
+ format: null
1503
2563
  };
1504
2564
  }
1505
- const format = await this.getOrDetectFormat(pool, schemaName);
1506
- const identifierColumn = format.columns.identifier;
1507
- for (const orphan of syncStatus.orphans) {
1508
- await pool.query(
1509
- `DELETE FROM "${schemaName}"."${format.tableName}" WHERE "${identifierColumn}" = $1`,
1510
- [orphan]
1511
- );
1512
- removedOrphans.push(orphan);
1513
- }
2565
+ const format = await this.deps.getOrDetectFormat(pool, schemaName);
2566
+ const applied = await this.getAppliedMigrations(pool, schemaName, format);
2567
+ const appliedIdentifiers = new Set(applied.map((m) => m.identifier));
2568
+ const missing = allMigrations.filter((m) => !this.isMigrationApplied(m, appliedIdentifiers, format)).map((m) => m.name);
2569
+ const orphans = applied.filter((m) => {
2570
+ if (format.columns.identifier === "name") {
2571
+ return !migrationNames.has(m.identifier);
2572
+ }
2573
+ return !migrationHashes.has(m.identifier) && !migrationNames.has(m.identifier);
2574
+ }).map((m) => m.identifier);
1514
2575
  return {
1515
2576
  tenantId,
1516
2577
  schemaName,
1517
- success: true,
1518
- markedMigrations: [],
1519
- removedOrphans,
1520
- durationMs: Date.now() - startTime
2578
+ missing,
2579
+ orphans,
2580
+ inSync: missing.length === 0 && orphans.length === 0,
2581
+ format: format.format
1521
2582
  };
1522
2583
  } catch (error) {
1523
2584
  return {
1524
2585
  tenantId,
1525
2586
  schemaName,
1526
- success: false,
2587
+ missing: [],
2588
+ orphans: [],
2589
+ inSync: false,
2590
+ format: null,
2591
+ error: error.message
2592
+ };
2593
+ } finally {
2594
+ await pool.end();
2595
+ }
2596
+ }
2597
+ /**
2598
+ * Mark missing migrations as applied for a tenant
2599
+ *
2600
+ * Records migrations that exist on disk but are not tracked in the database.
2601
+ * Useful for syncing tracking state with already-applied migrations.
2602
+ *
2603
+ * @param tenantId - The tenant identifier
2604
+ * @returns Result of the mark operation
2605
+ *
2606
+ * @example
2607
+ * ```typescript
2608
+ * const result = await syncManager.markMissing('tenant-123');
2609
+ * if (result.success) {
2610
+ * console.log(`Marked ${result.markedMigrations.length} migrations as applied`);
2611
+ * }
2612
+ * ```
2613
+ */
2614
+ async markMissing(tenantId) {
2615
+ const startTime = Date.now();
2616
+ const schemaName = this.deps.schemaNameTemplate(tenantId);
2617
+ const markedMigrations = [];
2618
+ const pool = await this.deps.createPool(schemaName);
2619
+ try {
2620
+ const syncStatus = await this.getTenantSyncStatus(tenantId);
2621
+ if (syncStatus.error) {
2622
+ return {
2623
+ tenantId,
2624
+ schemaName,
2625
+ success: false,
2626
+ markedMigrations: [],
2627
+ removedOrphans: [],
2628
+ error: syncStatus.error,
2629
+ durationMs: Date.now() - startTime
2630
+ };
2631
+ }
2632
+ if (syncStatus.missing.length === 0) {
2633
+ return {
2634
+ tenantId,
2635
+ schemaName,
2636
+ success: true,
2637
+ markedMigrations: [],
2638
+ removedOrphans: [],
2639
+ durationMs: Date.now() - startTime
2640
+ };
2641
+ }
2642
+ const format = await this.deps.getOrDetectFormat(pool, schemaName);
2643
+ await this.deps.ensureMigrationsTable(pool, schemaName, format);
2644
+ const allMigrations = await this.deps.loadMigrations();
2645
+ const missingSet = new Set(syncStatus.missing);
2646
+ for (const migration of allMigrations) {
2647
+ if (missingSet.has(migration.name)) {
2648
+ await this.recordMigration(pool, schemaName, migration, format);
2649
+ markedMigrations.push(migration.name);
2650
+ }
2651
+ }
2652
+ return {
2653
+ tenantId,
2654
+ schemaName,
2655
+ success: true,
2656
+ markedMigrations,
2657
+ removedOrphans: [],
2658
+ durationMs: Date.now() - startTime
2659
+ };
2660
+ } catch (error) {
2661
+ return {
2662
+ tenantId,
2663
+ schemaName,
2664
+ success: false,
2665
+ markedMigrations,
2666
+ removedOrphans: [],
2667
+ error: error.message,
2668
+ durationMs: Date.now() - startTime
2669
+ };
2670
+ } finally {
2671
+ await pool.end();
2672
+ }
2673
+ }
2674
+ /**
2675
+ * Mark missing migrations as applied for all tenants
2676
+ *
2677
+ * Processes all tenants in parallel with configurable concurrency.
2678
+ * Supports progress callbacks and abort-on-error behavior.
2679
+ *
2680
+ * @param options - Sync options
2681
+ * @returns Aggregate results of all mark operations
2682
+ *
2683
+ * @example
2684
+ * ```typescript
2685
+ * const results = await syncManager.markAllMissing({
2686
+ * concurrency: 10,
2687
+ * onProgress: (id, status) => console.log(`${id}: ${status}`),
2688
+ * });
2689
+ * console.log(`Succeeded: ${results.succeeded}/${results.total}`);
2690
+ * ```
2691
+ */
2692
+ async markAllMissing(options = {}) {
2693
+ const { concurrency = 10, onProgress, onError } = options;
2694
+ const tenantIds = await this.config.tenantDiscovery();
2695
+ const results = [];
2696
+ let aborted = false;
2697
+ for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
2698
+ const batch = tenantIds.slice(i, i + concurrency);
2699
+ const batchResults = await Promise.all(
2700
+ batch.map(async (tenantId) => {
2701
+ if (aborted) {
2702
+ return this.createSkippedSyncResult(tenantId);
2703
+ }
2704
+ try {
2705
+ onProgress?.(tenantId, "starting");
2706
+ const result = await this.markMissing(tenantId);
2707
+ onProgress?.(tenantId, result.success ? "completed" : "failed");
2708
+ return result;
2709
+ } catch (error) {
2710
+ onProgress?.(tenantId, "failed");
2711
+ const action = onError?.(tenantId, error);
2712
+ if (action === "abort") {
2713
+ aborted = true;
2714
+ }
2715
+ return this.createErrorSyncResult(tenantId, error);
2716
+ }
2717
+ })
2718
+ );
2719
+ results.push(...batchResults);
2720
+ }
2721
+ return this.aggregateSyncResults(results);
2722
+ }
2723
+ /**
2724
+ * Remove orphan migration records for a tenant
2725
+ *
2726
+ * Deletes records from the migrations table that don't have
2727
+ * corresponding files on disk.
2728
+ *
2729
+ * @param tenantId - The tenant identifier
2730
+ * @returns Result of the clean operation
2731
+ *
2732
+ * @example
2733
+ * ```typescript
2734
+ * const result = await syncManager.cleanOrphans('tenant-123');
2735
+ * if (result.success) {
2736
+ * console.log(`Removed ${result.removedOrphans.length} orphan records`);
2737
+ * }
2738
+ * ```
2739
+ */
2740
+ async cleanOrphans(tenantId) {
2741
+ const startTime = Date.now();
2742
+ const schemaName = this.deps.schemaNameTemplate(tenantId);
2743
+ const removedOrphans = [];
2744
+ const pool = await this.deps.createPool(schemaName);
2745
+ try {
2746
+ const syncStatus = await this.getTenantSyncStatus(tenantId);
2747
+ if (syncStatus.error) {
2748
+ return {
2749
+ tenantId,
2750
+ schemaName,
2751
+ success: false,
2752
+ markedMigrations: [],
2753
+ removedOrphans: [],
2754
+ error: syncStatus.error,
2755
+ durationMs: Date.now() - startTime
2756
+ };
2757
+ }
2758
+ if (syncStatus.orphans.length === 0) {
2759
+ return {
2760
+ tenantId,
2761
+ schemaName,
2762
+ success: true,
2763
+ markedMigrations: [],
2764
+ removedOrphans: [],
2765
+ durationMs: Date.now() - startTime
2766
+ };
2767
+ }
2768
+ const format = await this.deps.getOrDetectFormat(pool, schemaName);
2769
+ const identifierColumn = format.columns.identifier;
2770
+ for (const orphan of syncStatus.orphans) {
2771
+ await pool.query(
2772
+ `DELETE FROM "${schemaName}"."${format.tableName}" WHERE "${identifierColumn}" = $1`,
2773
+ [orphan]
2774
+ );
2775
+ removedOrphans.push(orphan);
2776
+ }
2777
+ return {
2778
+ tenantId,
2779
+ schemaName,
2780
+ success: true,
2781
+ markedMigrations: [],
2782
+ removedOrphans,
2783
+ durationMs: Date.now() - startTime
2784
+ };
2785
+ } catch (error) {
2786
+ return {
2787
+ tenantId,
2788
+ schemaName,
2789
+ success: false,
1527
2790
  markedMigrations: [],
1528
2791
  removedOrphans,
1529
2792
  error: error.message,
@@ -1534,38 +2797,1498 @@ var Migrator = class {
1534
2797
  }
1535
2798
  }
1536
2799
  /**
1537
- * Remove orphan migration records for all tenants
2800
+ * Remove orphan migration records for all tenants
2801
+ *
2802
+ * Processes all tenants in parallel with configurable concurrency.
2803
+ * Supports progress callbacks and abort-on-error behavior.
2804
+ *
2805
+ * @param options - Sync options
2806
+ * @returns Aggregate results of all clean operations
2807
+ *
2808
+ * @example
2809
+ * ```typescript
2810
+ * const results = await syncManager.cleanAllOrphans({
2811
+ * concurrency: 10,
2812
+ * onProgress: (id, status) => console.log(`${id}: ${status}`),
2813
+ * });
2814
+ * console.log(`Succeeded: ${results.succeeded}/${results.total}`);
2815
+ * ```
2816
+ */
2817
+ async cleanAllOrphans(options = {}) {
2818
+ const { concurrency = 10, onProgress, onError } = options;
2819
+ const tenantIds = await this.config.tenantDiscovery();
2820
+ const results = [];
2821
+ let aborted = false;
2822
+ for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
2823
+ const batch = tenantIds.slice(i, i + concurrency);
2824
+ const batchResults = await Promise.all(
2825
+ batch.map(async (tenantId) => {
2826
+ if (aborted) {
2827
+ return this.createSkippedSyncResult(tenantId);
2828
+ }
2829
+ try {
2830
+ onProgress?.(tenantId, "starting");
2831
+ const result = await this.cleanOrphans(tenantId);
2832
+ onProgress?.(tenantId, result.success ? "completed" : "failed");
2833
+ return result;
2834
+ } catch (error) {
2835
+ onProgress?.(tenantId, "failed");
2836
+ const action = onError?.(tenantId, error);
2837
+ if (action === "abort") {
2838
+ aborted = true;
2839
+ }
2840
+ return this.createErrorSyncResult(tenantId, error);
2841
+ }
2842
+ })
2843
+ );
2844
+ results.push(...batchResults);
2845
+ }
2846
+ return this.aggregateSyncResults(results);
2847
+ }
2848
+ // ============================================================================
2849
+ // Private Helper Methods
2850
+ // ============================================================================
2851
+ /**
2852
+ * Get applied migrations for a schema
2853
+ */
2854
+ async getAppliedMigrations(pool, schemaName, format) {
2855
+ const identifierColumn = format.columns.identifier;
2856
+ const timestampColumn = format.columns.timestamp;
2857
+ const result = await pool.query(
2858
+ `SELECT id, "${identifierColumn}" as identifier, "${timestampColumn}" as applied_at
2859
+ FROM "${schemaName}"."${format.tableName}"
2860
+ ORDER BY id`
2861
+ );
2862
+ return result.rows.map((row) => {
2863
+ const appliedAt = format.columns.timestampType === "bigint" ? new Date(Number(row.applied_at)) : new Date(row.applied_at);
2864
+ return {
2865
+ identifier: row.identifier,
2866
+ appliedAt
2867
+ };
2868
+ });
2869
+ }
2870
+ /**
2871
+ * Check if a migration has been applied
2872
+ */
2873
+ isMigrationApplied(migration, appliedIdentifiers, format) {
2874
+ if (format.columns.identifier === "name") {
2875
+ return appliedIdentifiers.has(migration.name);
2876
+ }
2877
+ return appliedIdentifiers.has(migration.hash) || appliedIdentifiers.has(migration.name);
2878
+ }
2879
+ /**
2880
+ * Record a migration as applied (without executing SQL)
2881
+ */
2882
+ async recordMigration(pool, schemaName, migration, format) {
2883
+ const { identifier, timestamp, timestampType } = format.columns;
2884
+ const identifierValue = identifier === "name" ? migration.name : migration.hash;
2885
+ const timestampValue = timestampType === "bigint" ? Date.now() : /* @__PURE__ */ new Date();
2886
+ await pool.query(
2887
+ `INSERT INTO "${schemaName}"."${format.tableName}" ("${identifier}", "${timestamp}") VALUES ($1, $2)`,
2888
+ [identifierValue, timestampValue]
2889
+ );
2890
+ }
2891
+ /**
2892
+ * Create a skipped sync result for aborted operations
2893
+ */
2894
+ createSkippedSyncResult(tenantId) {
2895
+ return {
2896
+ tenantId,
2897
+ schemaName: this.deps.schemaNameTemplate(tenantId),
2898
+ success: false,
2899
+ markedMigrations: [],
2900
+ removedOrphans: [],
2901
+ error: "Skipped due to abort",
2902
+ durationMs: 0
2903
+ };
2904
+ }
2905
+ /**
2906
+ * Create an error sync result for failed operations
2907
+ */
2908
+ createErrorSyncResult(tenantId, error) {
2909
+ return {
2910
+ tenantId,
2911
+ schemaName: this.deps.schemaNameTemplate(tenantId),
2912
+ success: false,
2913
+ markedMigrations: [],
2914
+ removedOrphans: [],
2915
+ error: error.message,
2916
+ durationMs: 0
2917
+ };
2918
+ }
2919
+ /**
2920
+ * Aggregate individual sync results into a summary
2921
+ */
2922
+ aggregateSyncResults(results) {
2923
+ return {
2924
+ total: results.length,
2925
+ succeeded: results.filter((r) => r.success).length,
2926
+ failed: results.filter((r) => !r.success).length,
2927
+ details: results
2928
+ };
2929
+ }
2930
+ };
2931
+
2932
+ // src/migrator/executor/migration-executor.ts
2933
+ var MigrationExecutor = class {
2934
+ constructor(config, deps) {
2935
+ this.config = config;
2936
+ this.deps = deps;
2937
+ }
2938
+ /**
2939
+ * Migrate a single tenant
2940
+ *
2941
+ * Applies all pending migrations to the tenant's schema.
2942
+ * Creates the migrations table if it doesn't exist.
2943
+ *
2944
+ * @param tenantId - The tenant identifier
2945
+ * @param migrations - Optional pre-loaded migrations (avoids reloading from disk)
2946
+ * @param options - Migration options (dryRun, onProgress)
2947
+ * @returns Migration result with applied migrations and duration
2948
+ *
2949
+ * @example
2950
+ * ```typescript
2951
+ * const result = await executor.migrateTenant('tenant-123', undefined, {
2952
+ * dryRun: false,
2953
+ * onProgress: (id, status, name) => console.log(`${id}: ${status} ${name}`),
2954
+ * });
2955
+ *
2956
+ * if (result.success) {
2957
+ * console.log(`Applied ${result.appliedMigrations.length} migrations`);
2958
+ * }
2959
+ * ```
2960
+ */
2961
+ async migrateTenant(tenantId, migrations, options = {}) {
2962
+ const startTime = Date.now();
2963
+ const schemaName = this.deps.schemaNameTemplate(tenantId);
2964
+ const appliedMigrations = [];
2965
+ const pool = await this.deps.createPool(schemaName);
2966
+ try {
2967
+ await this.config.hooks?.beforeTenant?.(tenantId);
2968
+ const format = await this.deps.getOrDetectFormat(pool, schemaName);
2969
+ await this.deps.ensureMigrationsTable(pool, schemaName, format);
2970
+ const allMigrations = migrations ?? await this.deps.loadMigrations();
2971
+ const applied = await this.getAppliedMigrations(pool, schemaName, format);
2972
+ const appliedSet = new Set(applied.map((m) => m.identifier));
2973
+ const pending = allMigrations.filter(
2974
+ (m) => !this.isMigrationApplied(m, appliedSet, format)
2975
+ );
2976
+ if (options.dryRun) {
2977
+ return {
2978
+ tenantId,
2979
+ schemaName,
2980
+ success: true,
2981
+ appliedMigrations: pending.map((m) => m.name),
2982
+ durationMs: Date.now() - startTime,
2983
+ format: format.format
2984
+ };
2985
+ }
2986
+ for (const migration of pending) {
2987
+ const migrationStart = Date.now();
2988
+ options.onProgress?.(tenantId, "migrating", migration.name);
2989
+ await this.config.hooks?.beforeMigration?.(tenantId, migration.name);
2990
+ await this.applyMigration(pool, schemaName, migration, format);
2991
+ await this.config.hooks?.afterMigration?.(
2992
+ tenantId,
2993
+ migration.name,
2994
+ Date.now() - migrationStart
2995
+ );
2996
+ appliedMigrations.push(migration.name);
2997
+ }
2998
+ const result = {
2999
+ tenantId,
3000
+ schemaName,
3001
+ success: true,
3002
+ appliedMigrations,
3003
+ durationMs: Date.now() - startTime,
3004
+ format: format.format
3005
+ };
3006
+ await this.config.hooks?.afterTenant?.(tenantId, result);
3007
+ return result;
3008
+ } catch (error) {
3009
+ const result = {
3010
+ tenantId,
3011
+ schemaName,
3012
+ success: false,
3013
+ appliedMigrations,
3014
+ error: error.message,
3015
+ durationMs: Date.now() - startTime
3016
+ };
3017
+ await this.config.hooks?.afterTenant?.(tenantId, result);
3018
+ return result;
3019
+ } finally {
3020
+ await pool.end();
3021
+ }
3022
+ }
3023
+ /**
3024
+ * Mark migrations as applied without executing SQL
3025
+ *
3026
+ * Useful for syncing tracking state with already-applied migrations
3027
+ * or when migrations were applied manually.
3028
+ *
3029
+ * @param tenantId - The tenant identifier
3030
+ * @param options - Options with progress callback
3031
+ * @returns Result with list of marked migrations
3032
+ *
3033
+ * @example
3034
+ * ```typescript
3035
+ * const result = await executor.markAsApplied('tenant-123', {
3036
+ * onProgress: (id, status, name) => console.log(`${id}: marking ${name}`),
3037
+ * });
3038
+ *
3039
+ * console.log(`Marked ${result.appliedMigrations.length} migrations`);
3040
+ * ```
3041
+ */
3042
+ async markAsApplied(tenantId, options = {}) {
3043
+ const startTime = Date.now();
3044
+ const schemaName = this.deps.schemaNameTemplate(tenantId);
3045
+ const markedMigrations = [];
3046
+ const pool = await this.deps.createPool(schemaName);
3047
+ try {
3048
+ await this.config.hooks?.beforeTenant?.(tenantId);
3049
+ const format = await this.deps.getOrDetectFormat(pool, schemaName);
3050
+ await this.deps.ensureMigrationsTable(pool, schemaName, format);
3051
+ const allMigrations = await this.deps.loadMigrations();
3052
+ const applied = await this.getAppliedMigrations(pool, schemaName, format);
3053
+ const appliedSet = new Set(applied.map((m) => m.identifier));
3054
+ const pending = allMigrations.filter(
3055
+ (m) => !this.isMigrationApplied(m, appliedSet, format)
3056
+ );
3057
+ for (const migration of pending) {
3058
+ const migrationStart = Date.now();
3059
+ options.onProgress?.(tenantId, "migrating", migration.name);
3060
+ await this.config.hooks?.beforeMigration?.(tenantId, migration.name);
3061
+ await this.recordMigration(pool, schemaName, migration, format);
3062
+ await this.config.hooks?.afterMigration?.(
3063
+ tenantId,
3064
+ migration.name,
3065
+ Date.now() - migrationStart
3066
+ );
3067
+ markedMigrations.push(migration.name);
3068
+ }
3069
+ const result = {
3070
+ tenantId,
3071
+ schemaName,
3072
+ success: true,
3073
+ appliedMigrations: markedMigrations,
3074
+ durationMs: Date.now() - startTime,
3075
+ format: format.format
3076
+ };
3077
+ await this.config.hooks?.afterTenant?.(tenantId, result);
3078
+ return result;
3079
+ } catch (error) {
3080
+ const result = {
3081
+ tenantId,
3082
+ schemaName,
3083
+ success: false,
3084
+ appliedMigrations: markedMigrations,
3085
+ error: error.message,
3086
+ durationMs: Date.now() - startTime
3087
+ };
3088
+ await this.config.hooks?.afterTenant?.(tenantId, result);
3089
+ return result;
3090
+ } finally {
3091
+ await pool.end();
3092
+ }
3093
+ }
3094
+ /**
3095
+ * Get migration status for a specific tenant
3096
+ *
3097
+ * Returns information about applied and pending migrations.
3098
+ *
3099
+ * @param tenantId - The tenant identifier
3100
+ * @param migrations - Optional pre-loaded migrations
3101
+ * @returns Migration status with counts and pending list
3102
+ *
3103
+ * @example
3104
+ * ```typescript
3105
+ * const status = await executor.getTenantStatus('tenant-123');
3106
+ * if (status.status === 'behind') {
3107
+ * console.log(`Pending: ${status.pendingMigrations.join(', ')}`);
3108
+ * }
3109
+ * ```
3110
+ */
3111
+ async getTenantStatus(tenantId, migrations) {
3112
+ const schemaName = this.deps.schemaNameTemplate(tenantId);
3113
+ const pool = await this.deps.createPool(schemaName);
3114
+ try {
3115
+ const allMigrations = migrations ?? await this.deps.loadMigrations();
3116
+ const tableExists = await this.deps.migrationsTableExists(pool, schemaName);
3117
+ if (!tableExists) {
3118
+ return {
3119
+ tenantId,
3120
+ schemaName,
3121
+ appliedCount: 0,
3122
+ pendingCount: allMigrations.length,
3123
+ pendingMigrations: allMigrations.map((m) => m.name),
3124
+ status: allMigrations.length > 0 ? "behind" : "ok",
3125
+ format: null
3126
+ };
3127
+ }
3128
+ const format = await this.deps.getOrDetectFormat(pool, schemaName);
3129
+ const applied = await this.getAppliedMigrations(pool, schemaName, format);
3130
+ const appliedSet = new Set(applied.map((m) => m.identifier));
3131
+ const pending = allMigrations.filter(
3132
+ (m) => !this.isMigrationApplied(m, appliedSet, format)
3133
+ );
3134
+ return {
3135
+ tenantId,
3136
+ schemaName,
3137
+ appliedCount: applied.length,
3138
+ pendingCount: pending.length,
3139
+ pendingMigrations: pending.map((m) => m.name),
3140
+ status: pending.length > 0 ? "behind" : "ok",
3141
+ format: format.format
3142
+ };
3143
+ } catch (error) {
3144
+ return {
3145
+ tenantId,
3146
+ schemaName,
3147
+ appliedCount: 0,
3148
+ pendingCount: 0,
3149
+ pendingMigrations: [],
3150
+ status: "error",
3151
+ error: error.message,
3152
+ format: null
3153
+ };
3154
+ } finally {
3155
+ await pool.end();
3156
+ }
3157
+ }
3158
+ // ============================================================================
3159
+ // IMigrationExecutor Interface Methods
3160
+ // ============================================================================
3161
+ /**
3162
+ * Execute a single migration on a schema
3163
+ */
3164
+ async executeMigration(pool, schemaName, migration, format, options) {
3165
+ if (options?.markOnly) {
3166
+ options.onProgress?.("recording");
3167
+ await this.recordMigration(pool, schemaName, migration, format);
3168
+ } else {
3169
+ options?.onProgress?.("applying");
3170
+ await this.applyMigration(pool, schemaName, migration, format);
3171
+ }
3172
+ }
3173
+ /**
3174
+ * Execute multiple migrations on a schema
3175
+ */
3176
+ async executeMigrations(pool, schemaName, migrations, format, options) {
3177
+ const appliedNames = [];
3178
+ for (const migration of migrations) {
3179
+ await this.executeMigration(pool, schemaName, migration, format, options);
3180
+ appliedNames.push(migration.name);
3181
+ }
3182
+ return appliedNames;
3183
+ }
3184
+ /**
3185
+ * Record a migration as applied without executing SQL
3186
+ */
3187
+ async recordMigration(pool, schemaName, migration, format) {
3188
+ const { identifier, timestamp, timestampType } = format.columns;
3189
+ const identifierValue = identifier === "name" ? migration.name : migration.hash;
3190
+ const timestampValue = timestampType === "bigint" ? Date.now() : /* @__PURE__ */ new Date();
3191
+ await pool.query(
3192
+ `INSERT INTO "${schemaName}"."${format.tableName}" ("${identifier}", "${timestamp}") VALUES ($1, $2)`,
3193
+ [identifierValue, timestampValue]
3194
+ );
3195
+ }
3196
+ /**
3197
+ * Get list of applied migrations for a tenant
3198
+ */
3199
+ async getAppliedMigrations(pool, schemaName, format) {
3200
+ const identifierColumn = format.columns.identifier;
3201
+ const timestampColumn = format.columns.timestamp;
3202
+ const result = await pool.query(
3203
+ `SELECT id, "${identifierColumn}" as identifier, "${timestampColumn}" as applied_at
3204
+ FROM "${schemaName}"."${format.tableName}"
3205
+ ORDER BY id`
3206
+ );
3207
+ return result.rows.map((row) => {
3208
+ const appliedAt = format.columns.timestampType === "bigint" ? new Date(Number(row.applied_at)) : new Date(row.applied_at);
3209
+ return {
3210
+ identifier: row.identifier,
3211
+ // Set name or hash based on format
3212
+ ...format.columns.identifier === "name" ? { name: row.identifier } : { hash: row.identifier },
3213
+ appliedAt
3214
+ };
3215
+ });
3216
+ }
3217
+ /**
3218
+ * Get pending migrations (not yet applied)
3219
+ */
3220
+ async getPendingMigrations(pool, schemaName, allMigrations, format) {
3221
+ const applied = await this.getAppliedMigrations(pool, schemaName, format);
3222
+ const appliedSet = new Set(applied.map((m) => m.identifier));
3223
+ return allMigrations.filter(
3224
+ (m) => !this.isMigrationApplied(m, appliedSet, format)
3225
+ );
3226
+ }
3227
+ // ============================================================================
3228
+ // Private Helper Methods
3229
+ // ============================================================================
3230
+ /**
3231
+ * Check if a migration has been applied
3232
+ */
3233
+ isMigrationApplied(migration, appliedIdentifiers, format) {
3234
+ if (format.columns.identifier === "name") {
3235
+ return appliedIdentifiers.has(migration.name);
3236
+ }
3237
+ return appliedIdentifiers.has(migration.hash) || appliedIdentifiers.has(migration.name);
3238
+ }
3239
+ /**
3240
+ * Apply a migration to a schema (execute SQL + record)
3241
+ */
3242
+ async applyMigration(pool, schemaName, migration, format) {
3243
+ const client = await pool.connect();
3244
+ try {
3245
+ await client.query("BEGIN");
3246
+ await client.query(migration.sql);
3247
+ const { identifier, timestamp, timestampType } = format.columns;
3248
+ const identifierValue = identifier === "name" ? migration.name : migration.hash;
3249
+ const timestampValue = timestampType === "bigint" ? Date.now() : /* @__PURE__ */ new Date();
3250
+ await client.query(
3251
+ `INSERT INTO "${schemaName}"."${format.tableName}" ("${identifier}", "${timestamp}") VALUES ($1, $2)`,
3252
+ [identifierValue, timestampValue]
3253
+ );
3254
+ await client.query("COMMIT");
3255
+ } catch (error) {
3256
+ await client.query("ROLLBACK");
3257
+ throw error;
3258
+ } finally {
3259
+ client.release();
3260
+ }
3261
+ }
3262
+ };
3263
+
3264
+ // src/migrator/executor/batch-executor.ts
3265
+ var BatchExecutor = class {
3266
+ constructor(config, executor, loadMigrations) {
3267
+ this.config = config;
3268
+ this.executor = executor;
3269
+ this.loadMigrations = loadMigrations;
3270
+ }
3271
+ /**
3272
+ * Migrate all tenants in parallel
3273
+ *
3274
+ * Processes tenants in batches with configurable concurrency.
3275
+ * Supports progress callbacks, error handling, and abort behavior.
3276
+ *
3277
+ * @param options - Migration options (concurrency, dryRun, callbacks)
3278
+ * @returns Aggregate results for all tenants
3279
+ *
3280
+ * @example
3281
+ * ```typescript
3282
+ * const results = await batchExecutor.migrateAll({
3283
+ * concurrency: 10,
3284
+ * dryRun: false,
3285
+ * onProgress: (id, status) => console.log(`${id}: ${status}`),
3286
+ * onError: (id, error) => {
3287
+ * console.error(`${id} failed: ${error.message}`);
3288
+ * return 'continue'; // or 'abort' to stop all
3289
+ * },
3290
+ * });
3291
+ *
3292
+ * console.log(`Succeeded: ${results.succeeded}/${results.total}`);
3293
+ * ```
3294
+ */
3295
+ async migrateAll(options = {}) {
3296
+ const {
3297
+ concurrency = 10,
3298
+ onProgress,
3299
+ onError,
3300
+ dryRun = false
3301
+ } = options;
3302
+ const tenantIds = await this.config.tenantDiscovery();
3303
+ const migrations = await this.loadMigrations();
3304
+ const results = [];
3305
+ let aborted = false;
3306
+ for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
3307
+ const batch = tenantIds.slice(i, i + concurrency);
3308
+ const batchResults = await Promise.all(
3309
+ batch.map(async (tenantId) => {
3310
+ if (aborted) {
3311
+ return this.createSkippedResult(tenantId);
3312
+ }
3313
+ try {
3314
+ onProgress?.(tenantId, "starting");
3315
+ const result = await this.executor.migrateTenant(tenantId, migrations, { dryRun, onProgress });
3316
+ onProgress?.(tenantId, result.success ? "completed" : "failed");
3317
+ return result;
3318
+ } catch (error) {
3319
+ onProgress?.(tenantId, "failed");
3320
+ const action = onError?.(tenantId, error);
3321
+ if (action === "abort") {
3322
+ aborted = true;
3323
+ }
3324
+ return this.createErrorResult(tenantId, error);
3325
+ }
3326
+ })
3327
+ );
3328
+ results.push(...batchResults);
3329
+ }
3330
+ if (aborted) {
3331
+ const remaining = tenantIds.slice(results.length);
3332
+ for (const tenantId of remaining) {
3333
+ results.push(this.createSkippedResult(tenantId));
3334
+ }
3335
+ }
3336
+ return this.aggregateResults(results);
3337
+ }
3338
+ /**
3339
+ * Migrate specific tenants in parallel
3340
+ *
3341
+ * Same as migrateAll but for a subset of tenants.
3342
+ *
3343
+ * @param tenantIds - List of tenant IDs to migrate
3344
+ * @param options - Migration options
3345
+ * @returns Aggregate results for specified tenants
3346
+ *
3347
+ * @example
3348
+ * ```typescript
3349
+ * const results = await batchExecutor.migrateTenants(
3350
+ * ['tenant-1', 'tenant-2', 'tenant-3'],
3351
+ * { concurrency: 5 }
3352
+ * );
3353
+ * ```
3354
+ */
3355
+ async migrateTenants(tenantIds, options = {}) {
3356
+ const migrations = await this.loadMigrations();
3357
+ const results = [];
3358
+ const { concurrency = 10, onProgress, onError, dryRun = false } = options;
3359
+ for (let i = 0; i < tenantIds.length; i += concurrency) {
3360
+ const batch = tenantIds.slice(i, i + concurrency);
3361
+ const batchResults = await Promise.all(
3362
+ batch.map(async (tenantId) => {
3363
+ try {
3364
+ onProgress?.(tenantId, "starting");
3365
+ const result = await this.executor.migrateTenant(tenantId, migrations, { dryRun, onProgress });
3366
+ onProgress?.(tenantId, result.success ? "completed" : "failed");
3367
+ return result;
3368
+ } catch (error) {
3369
+ onProgress?.(tenantId, "failed");
3370
+ onError?.(tenantId, error);
3371
+ return this.createErrorResult(tenantId, error);
3372
+ }
3373
+ })
3374
+ );
3375
+ results.push(...batchResults);
3376
+ }
3377
+ return this.aggregateResults(results);
3378
+ }
3379
+ /**
3380
+ * Mark all tenants as applied without executing SQL
3381
+ *
3382
+ * Useful for syncing tracking state with already-applied migrations.
3383
+ * Processes tenants in parallel with configurable concurrency.
3384
+ *
3385
+ * @param options - Migration options
3386
+ * @returns Aggregate results for all tenants
3387
+ *
3388
+ * @example
3389
+ * ```typescript
3390
+ * const results = await batchExecutor.markAllAsApplied({
3391
+ * concurrency: 10,
3392
+ * onProgress: (id, status) => console.log(`${id}: ${status}`),
3393
+ * });
3394
+ * ```
3395
+ */
3396
+ async markAllAsApplied(options = {}) {
3397
+ const {
3398
+ concurrency = 10,
3399
+ onProgress,
3400
+ onError
3401
+ } = options;
3402
+ const tenantIds = await this.config.tenantDiscovery();
3403
+ const results = [];
3404
+ let aborted = false;
3405
+ for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
3406
+ const batch = tenantIds.slice(i, i + concurrency);
3407
+ const batchResults = await Promise.all(
3408
+ batch.map(async (tenantId) => {
3409
+ if (aborted) {
3410
+ return this.createSkippedResult(tenantId);
3411
+ }
3412
+ try {
3413
+ onProgress?.(tenantId, "starting");
3414
+ const result = await this.executor.markAsApplied(tenantId, { onProgress });
3415
+ onProgress?.(tenantId, result.success ? "completed" : "failed");
3416
+ return result;
3417
+ } catch (error) {
3418
+ onProgress?.(tenantId, "failed");
3419
+ const action = onError?.(tenantId, error);
3420
+ if (action === "abort") {
3421
+ aborted = true;
3422
+ }
3423
+ return this.createErrorResult(tenantId, error);
3424
+ }
3425
+ })
3426
+ );
3427
+ results.push(...batchResults);
3428
+ }
3429
+ if (aborted) {
3430
+ const remaining = tenantIds.slice(results.length);
3431
+ for (const tenantId of remaining) {
3432
+ results.push(this.createSkippedResult(tenantId));
3433
+ }
3434
+ }
3435
+ return this.aggregateResults(results);
3436
+ }
3437
+ /**
3438
+ * Get migration status for all tenants
3439
+ *
3440
+ * Queries each tenant's migration status sequentially.
3441
+ *
3442
+ * @returns List of migration status for all tenants
3443
+ *
3444
+ * @example
3445
+ * ```typescript
3446
+ * const statuses = await batchExecutor.getStatus();
3447
+ * const behind = statuses.filter(s => s.status === 'behind');
3448
+ * console.log(`${behind.length} tenants need migrations`);
3449
+ * ```
3450
+ */
3451
+ async getStatus() {
3452
+ const tenantIds = await this.config.tenantDiscovery();
3453
+ const migrations = await this.loadMigrations();
3454
+ const statuses = [];
3455
+ for (const tenantId of tenantIds) {
3456
+ statuses.push(await this.executor.getTenantStatus(tenantId, migrations));
3457
+ }
3458
+ return statuses;
3459
+ }
3460
+ // ============================================================================
3461
+ // Private Helper Methods
3462
+ // ============================================================================
3463
+ /**
3464
+ * Create a skipped result for aborted operations
3465
+ */
3466
+ createSkippedResult(tenantId) {
3467
+ return {
3468
+ tenantId,
3469
+ schemaName: "",
3470
+ // Schema name not available in batch context
3471
+ success: false,
3472
+ appliedMigrations: [],
3473
+ error: "Skipped due to abort",
3474
+ durationMs: 0
3475
+ };
3476
+ }
3477
+ /**
3478
+ * Create an error result for failed operations
3479
+ */
3480
+ createErrorResult(tenantId, error) {
3481
+ return {
3482
+ tenantId,
3483
+ schemaName: "",
3484
+ // Schema name not available in batch context
3485
+ success: false,
3486
+ appliedMigrations: [],
3487
+ error: error.message,
3488
+ durationMs: 0
3489
+ };
3490
+ }
3491
+ /**
3492
+ * Aggregate individual migration results into a summary
3493
+ */
3494
+ aggregateResults(results) {
3495
+ return {
3496
+ total: results.length,
3497
+ succeeded: results.filter((r) => r.success).length,
3498
+ failed: results.filter((r) => !r.success && r.error !== "Skipped due to abort").length,
3499
+ skipped: results.filter((r) => r.error === "Skipped due to abort").length,
3500
+ details: results
3501
+ };
3502
+ }
3503
+ };
3504
+
3505
+ // src/migrator/clone/ddl-generator.ts
3506
+ async function listTables(pool, schemaName, excludeTables = []) {
3507
+ const excludePlaceholders = excludeTables.length > 0 ? excludeTables.map((_, i) => `$${i + 2}`).join(", ") : "''::text";
3508
+ const result = await pool.query(
3509
+ `SELECT table_name
3510
+ FROM information_schema.tables
3511
+ WHERE table_schema = $1
3512
+ AND table_type = 'BASE TABLE'
3513
+ AND table_name NOT IN (${excludePlaceholders})
3514
+ ORDER BY table_name`,
3515
+ [schemaName, ...excludeTables]
3516
+ );
3517
+ return result.rows.map((r) => r.table_name);
3518
+ }
3519
+ async function getColumns(pool, schemaName, tableName) {
3520
+ const result = await pool.query(
3521
+ `SELECT
3522
+ column_name,
3523
+ data_type,
3524
+ udt_name,
3525
+ is_nullable,
3526
+ column_default,
3527
+ character_maximum_length,
3528
+ numeric_precision,
3529
+ numeric_scale
3530
+ FROM information_schema.columns
3531
+ WHERE table_schema = $1 AND table_name = $2
3532
+ ORDER BY ordinal_position`,
3533
+ [schemaName, tableName]
3534
+ );
3535
+ return result.rows.map((row) => ({
3536
+ columnName: row.column_name,
3537
+ dataType: row.data_type,
3538
+ udtName: row.udt_name,
3539
+ isNullable: row.is_nullable === "YES",
3540
+ columnDefault: row.column_default,
3541
+ characterMaximumLength: row.character_maximum_length,
3542
+ numericPrecision: row.numeric_precision,
3543
+ numericScale: row.numeric_scale
3544
+ }));
3545
+ }
3546
+ async function generateTableDdl(pool, schemaName, tableName) {
3547
+ const columns = await getColumns(pool, schemaName, tableName);
3548
+ const columnDefs = columns.map((col) => {
3549
+ let type = col.udtName;
3550
+ if (col.dataType === "character varying" && col.characterMaximumLength) {
3551
+ type = `varchar(${col.characterMaximumLength})`;
3552
+ } else if (col.dataType === "character" && col.characterMaximumLength) {
3553
+ type = `char(${col.characterMaximumLength})`;
3554
+ } else if (col.dataType === "numeric" && col.numericPrecision) {
3555
+ type = `numeric(${col.numericPrecision}${col.numericScale ? `, ${col.numericScale}` : ""})`;
3556
+ } else if (col.dataType === "ARRAY") {
3557
+ type = col.udtName.replace(/^_/, "") + "[]";
3558
+ }
3559
+ let definition = `"${col.columnName}" ${type}`;
3560
+ if (!col.isNullable) {
3561
+ definition += " NOT NULL";
3562
+ }
3563
+ if (col.columnDefault) {
3564
+ const defaultValue = col.columnDefault.replace(
3565
+ new RegExp(`"?${schemaName}"?\\.`, "g"),
3566
+ ""
3567
+ );
3568
+ definition += ` DEFAULT ${defaultValue}`;
3569
+ }
3570
+ return definition;
3571
+ });
3572
+ return `CREATE TABLE IF NOT EXISTS "${tableName}" (
3573
+ ${columnDefs.join(",\n ")}
3574
+ )`;
3575
+ }
3576
+ async function generateIndexDdls(pool, sourceSchema, targetSchema, tableName) {
3577
+ const result = await pool.query(
3578
+ `SELECT indexname, indexdef
3579
+ FROM pg_indexes
3580
+ WHERE schemaname = $1 AND tablename = $2
3581
+ AND indexname NOT LIKE '%_pkey'`,
3582
+ [sourceSchema, tableName]
3583
+ );
3584
+ return result.rows.map(
3585
+ (row) => (
3586
+ // Replace source schema with target schema
3587
+ row.indexdef.replace(new RegExp(`ON "${sourceSchema}"\\."`, "g"), `ON "${targetSchema}"."`).replace(new RegExp(`"${sourceSchema}"\\."`, "g"), `"${targetSchema}"."`)
3588
+ )
3589
+ );
3590
+ }
3591
+ async function generatePrimaryKeyDdl(pool, schemaName, tableName) {
3592
+ const result = await pool.query(
3593
+ `SELECT
3594
+ tc.constraint_name,
3595
+ kcu.column_name
3596
+ FROM information_schema.table_constraints tc
3597
+ JOIN information_schema.key_column_usage kcu
3598
+ ON tc.constraint_name = kcu.constraint_name
3599
+ AND tc.table_schema = kcu.table_schema
3600
+ WHERE tc.table_schema = $1
3601
+ AND tc.table_name = $2
3602
+ AND tc.constraint_type = 'PRIMARY KEY'
3603
+ ORDER BY kcu.ordinal_position`,
3604
+ [schemaName, tableName]
3605
+ );
3606
+ if (result.rows.length === 0) return null;
3607
+ const columns = result.rows.map((r) => `"${r.column_name}"`).join(", ");
3608
+ const constraintName = result.rows[0].constraint_name;
3609
+ return `ALTER TABLE "${tableName}" ADD CONSTRAINT "${constraintName}" PRIMARY KEY (${columns})`;
3610
+ }
3611
+ async function generateForeignKeyDdls(pool, sourceSchema, targetSchema, tableName) {
3612
+ const result = await pool.query(
3613
+ `SELECT
3614
+ tc.constraint_name,
3615
+ kcu.column_name,
3616
+ ccu.table_name as foreign_table_name,
3617
+ ccu.column_name as foreign_column_name,
3618
+ rc.update_rule,
3619
+ rc.delete_rule
3620
+ FROM information_schema.table_constraints tc
3621
+ JOIN information_schema.key_column_usage kcu
3622
+ ON tc.constraint_name = kcu.constraint_name
3623
+ AND tc.table_schema = kcu.table_schema
3624
+ JOIN information_schema.constraint_column_usage ccu
3625
+ ON tc.constraint_name = ccu.constraint_name
3626
+ AND tc.table_schema = ccu.table_schema
3627
+ JOIN information_schema.referential_constraints rc
3628
+ ON tc.constraint_name = rc.constraint_name
3629
+ AND tc.table_schema = rc.constraint_schema
3630
+ WHERE tc.table_schema = $1
3631
+ AND tc.table_name = $2
3632
+ AND tc.constraint_type = 'FOREIGN KEY'
3633
+ ORDER BY tc.constraint_name, kcu.ordinal_position`,
3634
+ [sourceSchema, tableName]
3635
+ );
3636
+ const fkMap = /* @__PURE__ */ new Map();
3637
+ for (const row of result.rows) {
3638
+ const existing = fkMap.get(row.constraint_name);
3639
+ if (existing) {
3640
+ existing.columns.push(row.column_name);
3641
+ existing.foreignColumns.push(row.foreign_column_name);
3642
+ } else {
3643
+ fkMap.set(row.constraint_name, {
3644
+ columns: [row.column_name],
3645
+ foreignTable: row.foreign_table_name,
3646
+ foreignColumns: [row.foreign_column_name],
3647
+ updateRule: row.update_rule,
3648
+ deleteRule: row.delete_rule
3649
+ });
3650
+ }
3651
+ }
3652
+ return Array.from(fkMap.entries()).map(([name, fk]) => {
3653
+ const columns = fk.columns.map((c) => `"${c}"`).join(", ");
3654
+ const foreignColumns = fk.foreignColumns.map((c) => `"${c}"`).join(", ");
3655
+ let ddl = `ALTER TABLE "${targetSchema}"."${tableName}" `;
3656
+ ddl += `ADD CONSTRAINT "${name}" FOREIGN KEY (${columns}) `;
3657
+ ddl += `REFERENCES "${targetSchema}"."${fk.foreignTable}" (${foreignColumns})`;
3658
+ if (fk.updateRule !== "NO ACTION") {
3659
+ ddl += ` ON UPDATE ${fk.updateRule}`;
3660
+ }
3661
+ if (fk.deleteRule !== "NO ACTION") {
3662
+ ddl += ` ON DELETE ${fk.deleteRule}`;
3663
+ }
3664
+ return ddl;
3665
+ });
3666
+ }
3667
+ async function generateUniqueDdls(pool, schemaName, tableName) {
3668
+ const result = await pool.query(
3669
+ `SELECT
3670
+ tc.constraint_name,
3671
+ kcu.column_name
3672
+ FROM information_schema.table_constraints tc
3673
+ JOIN information_schema.key_column_usage kcu
3674
+ ON tc.constraint_name = kcu.constraint_name
3675
+ AND tc.table_schema = kcu.table_schema
3676
+ WHERE tc.table_schema = $1
3677
+ AND tc.table_name = $2
3678
+ AND tc.constraint_type = 'UNIQUE'
3679
+ ORDER BY tc.constraint_name, kcu.ordinal_position`,
3680
+ [schemaName, tableName]
3681
+ );
3682
+ const uniqueMap = /* @__PURE__ */ new Map();
3683
+ for (const row of result.rows) {
3684
+ const existing = uniqueMap.get(row.constraint_name);
3685
+ if (existing) {
3686
+ existing.push(row.column_name);
3687
+ } else {
3688
+ uniqueMap.set(row.constraint_name, [row.column_name]);
3689
+ }
3690
+ }
3691
+ return Array.from(uniqueMap.entries()).map(([name, columns]) => {
3692
+ const cols = columns.map((c) => `"${c}"`).join(", ");
3693
+ return `ALTER TABLE "${tableName}" ADD CONSTRAINT "${name}" UNIQUE (${cols})`;
3694
+ });
3695
+ }
3696
+ async function generateCheckDdls(pool, schemaName, tableName) {
3697
+ const result = await pool.query(
3698
+ `SELECT
3699
+ tc.constraint_name,
3700
+ cc.check_clause
3701
+ FROM information_schema.table_constraints tc
3702
+ JOIN information_schema.check_constraints cc
3703
+ ON tc.constraint_name = cc.constraint_name
3704
+ AND tc.constraint_schema = cc.constraint_schema
3705
+ WHERE tc.table_schema = $1
3706
+ AND tc.table_name = $2
3707
+ AND tc.constraint_type = 'CHECK'
3708
+ AND tc.constraint_name NOT LIKE '%_not_null'`,
3709
+ [schemaName, tableName]
3710
+ );
3711
+ return result.rows.map(
3712
+ (row) => `ALTER TABLE "${tableName}" ADD CONSTRAINT "${row.constraint_name}" CHECK (${row.check_clause})`
3713
+ );
3714
+ }
3715
+ async function getRowCount(pool, schemaName, tableName) {
3716
+ const result = await pool.query(
3717
+ `SELECT count(*) FROM "${schemaName}"."${tableName}"`
3718
+ );
3719
+ return parseInt(result.rows[0].count, 10);
3720
+ }
3721
+ async function generateTableCloneInfo(pool, sourceSchema, targetSchema, tableName) {
3722
+ const [createDdl, indexDdls, pkDdl, uniqueDdls, checkDdls, fkDdls, rowCount] = await Promise.all([
3723
+ generateTableDdl(pool, sourceSchema, tableName),
3724
+ generateIndexDdls(pool, sourceSchema, targetSchema, tableName),
3725
+ generatePrimaryKeyDdl(pool, sourceSchema, tableName),
3726
+ generateUniqueDdls(pool, sourceSchema, tableName),
3727
+ generateCheckDdls(pool, sourceSchema, tableName),
3728
+ generateForeignKeyDdls(pool, sourceSchema, targetSchema, tableName),
3729
+ getRowCount(pool, sourceSchema, tableName)
3730
+ ]);
3731
+ return {
3732
+ name: tableName,
3733
+ createDdl,
3734
+ indexDdls,
3735
+ constraintDdls: [
3736
+ ...pkDdl ? [pkDdl] : [],
3737
+ ...uniqueDdls,
3738
+ ...checkDdls,
3739
+ ...fkDdls
3740
+ ],
3741
+ rowCount
3742
+ };
3743
+ }
3744
+
3745
+ // src/migrator/clone/data-copier.ts
3746
+ async function getTableColumns(pool, schemaName, tableName) {
3747
+ const result = await pool.query(
3748
+ `SELECT column_name
3749
+ FROM information_schema.columns
3750
+ WHERE table_schema = $1 AND table_name = $2
3751
+ ORDER BY ordinal_position`,
3752
+ [schemaName, tableName]
3753
+ );
3754
+ return result.rows.map((r) => r.column_name);
3755
+ }
3756
+ function formatAnonymizeValue(value) {
3757
+ if (value === null) {
3758
+ return "NULL";
3759
+ }
3760
+ if (typeof value === "string") {
3761
+ return `'${value.replace(/'/g, "''")}'`;
3762
+ }
3763
+ if (typeof value === "boolean") {
3764
+ return value ? "TRUE" : "FALSE";
3765
+ }
3766
+ return String(value);
3767
+ }
3768
+ async function copyTableData(pool, sourceSchema, targetSchema, tableName, anonymizeRules) {
3769
+ const columns = await getTableColumns(pool, sourceSchema, tableName);
3770
+ if (columns.length === 0) {
3771
+ return 0;
3772
+ }
3773
+ const tableRules = anonymizeRules?.[tableName] ?? {};
3774
+ const selectColumns = columns.map((col) => {
3775
+ if (col in tableRules) {
3776
+ const value = tableRules[col];
3777
+ return `${formatAnonymizeValue(value)} as "${col}"`;
3778
+ }
3779
+ return `"${col}"`;
3780
+ });
3781
+ const insertColumns = columns.map((c) => `"${c}"`).join(", ");
3782
+ const selectExpr = selectColumns.join(", ");
3783
+ const result = await pool.query(
3784
+ `INSERT INTO "${targetSchema}"."${tableName}" (${insertColumns})
3785
+ SELECT ${selectExpr}
3786
+ FROM "${sourceSchema}"."${tableName}"`
3787
+ );
3788
+ return result.rowCount ?? 0;
3789
+ }
3790
+ async function getTablesInDependencyOrder(pool, schemaName, tables) {
3791
+ const result = await pool.query(
3792
+ `SELECT DISTINCT
3793
+ tc.table_name,
3794
+ ccu.table_name as foreign_table_name
3795
+ FROM information_schema.table_constraints tc
3796
+ JOIN information_schema.constraint_column_usage ccu
3797
+ ON tc.constraint_name = ccu.constraint_name
3798
+ AND tc.table_schema = ccu.table_schema
3799
+ WHERE tc.table_schema = $1
3800
+ AND tc.constraint_type = 'FOREIGN KEY'
3801
+ AND tc.table_name != ccu.table_name`,
3802
+ [schemaName]
3803
+ );
3804
+ const dependencies = /* @__PURE__ */ new Map();
3805
+ const tableSet = new Set(tables);
3806
+ for (const table of tables) {
3807
+ dependencies.set(table, /* @__PURE__ */ new Set());
3808
+ }
3809
+ for (const row of result.rows) {
3810
+ if (tableSet.has(row.table_name) && tableSet.has(row.foreign_table_name)) {
3811
+ dependencies.get(row.table_name).add(row.foreign_table_name);
3812
+ }
3813
+ }
3814
+ const sorted = [];
3815
+ const inDegree = /* @__PURE__ */ new Map();
3816
+ const queue = [];
3817
+ for (const table of tables) {
3818
+ inDegree.set(table, 0);
3819
+ }
3820
+ for (const [table, deps] of dependencies) {
3821
+ for (const dep of deps) {
3822
+ inDegree.set(dep, (inDegree.get(dep) ?? 0) + 1);
3823
+ }
3824
+ }
3825
+ for (const [table, degree] of inDegree) {
3826
+ if (degree === 0) {
3827
+ queue.push(table);
3828
+ }
3829
+ }
3830
+ while (queue.length > 0) {
3831
+ const table = queue.shift();
3832
+ sorted.push(table);
3833
+ for (const [otherTable, deps] of dependencies) {
3834
+ if (deps.has(table)) {
3835
+ deps.delete(table);
3836
+ const newDegree = (inDegree.get(otherTable) ?? 0) - 1;
3837
+ inDegree.set(otherTable, newDegree);
3838
+ if (newDegree === 0) {
3839
+ queue.push(otherTable);
3840
+ }
3841
+ }
3842
+ }
3843
+ }
3844
+ const remaining = tables.filter((t) => !sorted.includes(t));
3845
+ return [...sorted, ...remaining];
3846
+ }
3847
+ async function copyAllData(pool, sourceSchema, targetSchema, tables, anonymizeRules, onProgress) {
3848
+ let totalRows = 0;
3849
+ const orderedTables = await getTablesInDependencyOrder(pool, sourceSchema, tables);
3850
+ await pool.query("SET session_replication_role = replica");
3851
+ try {
3852
+ for (let i = 0; i < orderedTables.length; i++) {
3853
+ const table = orderedTables[i];
3854
+ onProgress?.("copying_data", {
3855
+ table,
3856
+ progress: i + 1,
3857
+ total: orderedTables.length
3858
+ });
3859
+ const rows = await copyTableData(pool, sourceSchema, targetSchema, table, anonymizeRules);
3860
+ totalRows += rows;
3861
+ }
3862
+ } finally {
3863
+ await pool.query("SET session_replication_role = DEFAULT");
3864
+ }
3865
+ return totalRows;
3866
+ }
3867
+
3868
+ // src/migrator/clone/cloner.ts
3869
+ var DEFAULT_MIGRATIONS_TABLE3 = "__drizzle_migrations";
3870
+ var Cloner = class {
3871
+ constructor(config, deps) {
3872
+ this.deps = deps;
3873
+ this.migrationsTable = config.migrationsTable ?? DEFAULT_MIGRATIONS_TABLE3;
3874
+ }
3875
+ migrationsTable;
3876
+ /**
3877
+ * Clone a tenant to another
3878
+ *
3879
+ * @param sourceTenantId - Source tenant ID
3880
+ * @param targetTenantId - Target tenant ID
3881
+ * @param options - Clone options
3882
+ * @returns Clone result
3883
+ */
3884
+ async cloneTenant(sourceTenantId, targetTenantId, options = {}) {
3885
+ const startTime = Date.now();
3886
+ const {
3887
+ includeData = false,
3888
+ anonymize,
3889
+ excludeTables = [],
3890
+ onProgress
3891
+ } = options;
3892
+ const sourceSchema = this.deps.schemaNameTemplate(sourceTenantId);
3893
+ const targetSchema = this.deps.schemaNameTemplate(targetTenantId);
3894
+ const allExcludes = [this.migrationsTable, ...excludeTables];
3895
+ let sourcePool = null;
3896
+ let rootPool = null;
3897
+ try {
3898
+ onProgress?.("starting");
3899
+ const sourceExists = await this.deps.schemaExists(sourceTenantId);
3900
+ if (!sourceExists) {
3901
+ return this.createErrorResult(
3902
+ sourceTenantId,
3903
+ targetTenantId,
3904
+ targetSchema,
3905
+ `Source tenant "${sourceTenantId}" does not exist`,
3906
+ startTime
3907
+ );
3908
+ }
3909
+ const targetExists = await this.deps.schemaExists(targetTenantId);
3910
+ if (targetExists) {
3911
+ return this.createErrorResult(
3912
+ sourceTenantId,
3913
+ targetTenantId,
3914
+ targetSchema,
3915
+ `Target tenant "${targetTenantId}" already exists`,
3916
+ startTime
3917
+ );
3918
+ }
3919
+ onProgress?.("introspecting");
3920
+ sourcePool = await this.deps.createPool(sourceSchema);
3921
+ const tables = await listTables(sourcePool, sourceSchema, allExcludes);
3922
+ if (tables.length === 0) {
3923
+ onProgress?.("creating_schema");
3924
+ await this.deps.createSchema(targetTenantId);
3925
+ onProgress?.("completed");
3926
+ return {
3927
+ sourceTenant: sourceTenantId,
3928
+ targetTenant: targetTenantId,
3929
+ targetSchema,
3930
+ success: true,
3931
+ tables: [],
3932
+ durationMs: Date.now() - startTime
3933
+ };
3934
+ }
3935
+ const tableInfos = await Promise.all(
3936
+ tables.map((t) => generateTableCloneInfo(sourcePool, sourceSchema, targetSchema, t))
3937
+ );
3938
+ await sourcePool.end();
3939
+ sourcePool = null;
3940
+ onProgress?.("creating_schema");
3941
+ await this.deps.createSchema(targetTenantId);
3942
+ rootPool = await this.deps.createRootPool();
3943
+ onProgress?.("creating_tables");
3944
+ for (const info of tableInfos) {
3945
+ await rootPool.query(`SET search_path TO "${targetSchema}"; ${info.createDdl}`);
3946
+ }
3947
+ onProgress?.("creating_constraints");
3948
+ for (const info of tableInfos) {
3949
+ for (const constraint of info.constraintDdls.filter((c) => !c.includes("FOREIGN KEY"))) {
3950
+ try {
3951
+ await rootPool.query(`SET search_path TO "${targetSchema}"; ${constraint}`);
3952
+ } catch (error) {
3953
+ }
3954
+ }
3955
+ }
3956
+ onProgress?.("creating_indexes");
3957
+ for (const info of tableInfos) {
3958
+ for (const index of info.indexDdls) {
3959
+ try {
3960
+ await rootPool.query(index);
3961
+ } catch (error) {
3962
+ }
3963
+ }
3964
+ }
3965
+ let rowsCopied = 0;
3966
+ if (includeData) {
3967
+ onProgress?.("copying_data");
3968
+ rowsCopied = await copyAllData(
3969
+ rootPool,
3970
+ sourceSchema,
3971
+ targetSchema,
3972
+ tables,
3973
+ anonymize?.enabled ? anonymize.rules : void 0,
3974
+ onProgress
3975
+ );
3976
+ }
3977
+ for (const info of tableInfos) {
3978
+ for (const fk of info.constraintDdls.filter((c) => c.includes("FOREIGN KEY"))) {
3979
+ try {
3980
+ await rootPool.query(fk);
3981
+ } catch (error) {
3982
+ }
3983
+ }
3984
+ }
3985
+ onProgress?.("completed");
3986
+ const result = {
3987
+ sourceTenant: sourceTenantId,
3988
+ targetTenant: targetTenantId,
3989
+ targetSchema,
3990
+ success: true,
3991
+ tables,
3992
+ durationMs: Date.now() - startTime
3993
+ };
3994
+ if (includeData) {
3995
+ result.rowsCopied = rowsCopied;
3996
+ }
3997
+ return result;
3998
+ } catch (error) {
3999
+ options.onError?.(error);
4000
+ onProgress?.("failed");
4001
+ return this.createErrorResult(
4002
+ sourceTenantId,
4003
+ targetTenantId,
4004
+ targetSchema,
4005
+ error.message,
4006
+ startTime
4007
+ );
4008
+ } finally {
4009
+ if (sourcePool) {
4010
+ await sourcePool.end().catch(() => {
4011
+ });
4012
+ }
4013
+ if (rootPool) {
4014
+ await rootPool.end().catch(() => {
4015
+ });
4016
+ }
4017
+ }
4018
+ }
4019
+ createErrorResult(source, target, schema, error, startTime) {
4020
+ return {
4021
+ sourceTenant: source,
4022
+ targetTenant: target,
4023
+ targetSchema: schema,
4024
+ success: false,
4025
+ error,
4026
+ tables: [],
4027
+ durationMs: Date.now() - startTime
4028
+ };
4029
+ }
4030
+ };
4031
+
4032
+ // src/migrator/migrator.ts
4033
+ var DEFAULT_MIGRATIONS_TABLE4 = "__drizzle_migrations";
4034
+ var Migrator = class {
4035
+ constructor(tenantConfig, migratorConfig) {
4036
+ this.migratorConfig = migratorConfig;
4037
+ this.migrationsTable = migratorConfig.migrationsTable ?? DEFAULT_MIGRATIONS_TABLE4;
4038
+ this.schemaManager = new SchemaManager(tenantConfig, this.migrationsTable);
4039
+ this.driftDetector = new DriftDetector(tenantConfig, this.schemaManager, {
4040
+ migrationsTable: this.migrationsTable,
4041
+ tenantDiscovery: migratorConfig.tenantDiscovery
4042
+ });
4043
+ this.seeder = new Seeder(
4044
+ { tenantDiscovery: migratorConfig.tenantDiscovery },
4045
+ {
4046
+ createPool: this.schemaManager.createPool.bind(this.schemaManager),
4047
+ schemaNameTemplate: tenantConfig.isolation.schemaNameTemplate,
4048
+ tenantSchema: tenantConfig.schemas.tenant
4049
+ }
4050
+ );
4051
+ this.syncManager = new SyncManager(
4052
+ {
4053
+ tenantDiscovery: migratorConfig.tenantDiscovery,
4054
+ migrationsFolder: migratorConfig.migrationsFolder,
4055
+ migrationsTable: this.migrationsTable
4056
+ },
4057
+ {
4058
+ createPool: this.schemaManager.createPool.bind(this.schemaManager),
4059
+ schemaNameTemplate: tenantConfig.isolation.schemaNameTemplate,
4060
+ migrationsTableExists: this.schemaManager.migrationsTableExists.bind(this.schemaManager),
4061
+ ensureMigrationsTable: this.schemaManager.ensureMigrationsTable.bind(this.schemaManager),
4062
+ getOrDetectFormat: this.getOrDetectFormat.bind(this),
4063
+ loadMigrations: this.loadMigrations.bind(this)
4064
+ }
4065
+ );
4066
+ this.migrationExecutor = new MigrationExecutor(
4067
+ { hooks: migratorConfig.hooks },
4068
+ {
4069
+ createPool: this.schemaManager.createPool.bind(this.schemaManager),
4070
+ schemaNameTemplate: tenantConfig.isolation.schemaNameTemplate,
4071
+ migrationsTableExists: this.schemaManager.migrationsTableExists.bind(this.schemaManager),
4072
+ ensureMigrationsTable: this.schemaManager.ensureMigrationsTable.bind(this.schemaManager),
4073
+ getOrDetectFormat: this.getOrDetectFormat.bind(this),
4074
+ loadMigrations: this.loadMigrations.bind(this)
4075
+ }
4076
+ );
4077
+ this.batchExecutor = new BatchExecutor(
4078
+ { tenantDiscovery: migratorConfig.tenantDiscovery },
4079
+ this.migrationExecutor,
4080
+ this.loadMigrations.bind(this)
4081
+ );
4082
+ this.cloner = new Cloner(
4083
+ { migrationsTable: this.migrationsTable },
4084
+ {
4085
+ createPool: this.schemaManager.createPool.bind(this.schemaManager),
4086
+ createRootPool: this.schemaManager.createRootPool.bind(this.schemaManager),
4087
+ schemaNameTemplate: tenantConfig.isolation.schemaNameTemplate,
4088
+ schemaExists: this.schemaManager.schemaExists.bind(this.schemaManager),
4089
+ createSchema: this.schemaManager.createSchema.bind(this.schemaManager)
4090
+ }
4091
+ );
4092
+ }
4093
+ migrationsTable;
4094
+ schemaManager;
4095
+ driftDetector;
4096
+ seeder;
4097
+ syncManager;
4098
+ migrationExecutor;
4099
+ batchExecutor;
4100
+ cloner;
4101
+ /**
4102
+ * Migrate all tenants in parallel
4103
+ *
4104
+ * Delegates to BatchExecutor for parallel migration operations.
4105
+ */
4106
+ async migrateAll(options = {}) {
4107
+ return this.batchExecutor.migrateAll(options);
4108
+ }
4109
+ /**
4110
+ * Migrate a single tenant
4111
+ *
4112
+ * Delegates to MigrationExecutor for single tenant operations.
4113
+ */
4114
+ async migrateTenant(tenantId, migrations, options = {}) {
4115
+ return this.migrationExecutor.migrateTenant(tenantId, migrations, options);
4116
+ }
4117
+ /**
4118
+ * Migrate specific tenants
4119
+ *
4120
+ * Delegates to BatchExecutor for parallel migration operations.
4121
+ */
4122
+ async migrateTenants(tenantIds, options = {}) {
4123
+ return this.batchExecutor.migrateTenants(tenantIds, options);
4124
+ }
4125
+ /**
4126
+ * Get migration status for all tenants
4127
+ *
4128
+ * Delegates to BatchExecutor for status operations.
4129
+ */
4130
+ async getStatus() {
4131
+ return this.batchExecutor.getStatus();
4132
+ }
4133
+ /**
4134
+ * Get migration status for a specific tenant
4135
+ *
4136
+ * Delegates to MigrationExecutor for single tenant operations.
4137
+ */
4138
+ async getTenantStatus(tenantId, migrations) {
4139
+ return this.migrationExecutor.getTenantStatus(tenantId, migrations);
4140
+ }
4141
+ /**
4142
+ * Create a new tenant schema and optionally apply migrations
4143
+ */
4144
+ async createTenant(tenantId, options = {}) {
4145
+ const { migrate = true } = options;
4146
+ await this.schemaManager.createSchema(tenantId);
4147
+ if (migrate) {
4148
+ await this.migrateTenant(tenantId);
4149
+ }
4150
+ }
4151
+ /**
4152
+ * Drop a tenant schema
4153
+ */
4154
+ async dropTenant(tenantId, options = {}) {
4155
+ await this.schemaManager.dropSchema(tenantId, options);
4156
+ }
4157
+ /**
4158
+ * Check if a tenant schema exists
4159
+ */
4160
+ async tenantExists(tenantId) {
4161
+ return this.schemaManager.schemaExists(tenantId);
4162
+ }
4163
+ /**
4164
+ * Clone a tenant to a new tenant
4165
+ *
4166
+ * By default, clones only schema structure. Use includeData to copy data.
4167
+ *
4168
+ * @example
4169
+ * ```typescript
4170
+ * // Schema-only clone
4171
+ * await migrator.cloneTenant('production', 'dev');
4172
+ *
4173
+ * // Clone with data
4174
+ * await migrator.cloneTenant('production', 'dev', { includeData: true });
4175
+ *
4176
+ * // Clone with anonymization
4177
+ * await migrator.cloneTenant('production', 'dev', {
4178
+ * includeData: true,
4179
+ * anonymize: {
4180
+ * enabled: true,
4181
+ * rules: {
4182
+ * users: { email: null, phone: null },
4183
+ * },
4184
+ * },
4185
+ * });
4186
+ * ```
4187
+ */
4188
+ async cloneTenant(sourceTenantId, targetTenantId, options = {}) {
4189
+ return this.cloner.cloneTenant(sourceTenantId, targetTenantId, options);
4190
+ }
4191
+ /**
4192
+ * Mark migrations as applied without executing SQL
4193
+ * Useful for syncing tracking state with already-applied migrations
4194
+ *
4195
+ * Delegates to MigrationExecutor for single tenant operations.
4196
+ */
4197
+ async markAsApplied(tenantId, options = {}) {
4198
+ return this.migrationExecutor.markAsApplied(tenantId, options);
4199
+ }
4200
+ /**
4201
+ * Mark migrations as applied for all tenants without executing SQL
4202
+ * Useful for syncing tracking state with already-applied migrations
4203
+ *
4204
+ * Delegates to BatchExecutor for parallel operations.
4205
+ */
4206
+ async markAllAsApplied(options = {}) {
4207
+ return this.batchExecutor.markAllAsApplied(options);
4208
+ }
4209
+ // ============================================================================
4210
+ // Sync Methods (delegated to SyncManager)
4211
+ // ============================================================================
4212
+ /**
4213
+ * Get sync status for all tenants
4214
+ * Detects divergences between migrations on disk and tracking in database
4215
+ */
4216
+ async getSyncStatus() {
4217
+ return this.syncManager.getSyncStatus();
4218
+ }
4219
+ /**
4220
+ * Get sync status for a specific tenant
4221
+ */
4222
+ async getTenantSyncStatus(tenantId, migrations) {
4223
+ return this.syncManager.getTenantSyncStatus(tenantId, migrations);
4224
+ }
4225
+ /**
4226
+ * Mark missing migrations as applied for a tenant
4227
+ */
4228
+ async markMissing(tenantId) {
4229
+ return this.syncManager.markMissing(tenantId);
4230
+ }
4231
+ /**
4232
+ * Mark missing migrations as applied for all tenants
4233
+ */
4234
+ async markAllMissing(options = {}) {
4235
+ return this.syncManager.markAllMissing(options);
4236
+ }
4237
+ /**
4238
+ * Remove orphan migration records for a tenant
4239
+ */
4240
+ async cleanOrphans(tenantId) {
4241
+ return this.syncManager.cleanOrphans(tenantId);
4242
+ }
4243
+ /**
4244
+ * Remove orphan migration records for all tenants
4245
+ */
4246
+ async cleanAllOrphans(options = {}) {
4247
+ return this.syncManager.cleanAllOrphans(options);
4248
+ }
4249
+ // ============================================================================
4250
+ // Seeding Methods (delegated to Seeder)
4251
+ // ============================================================================
4252
+ /**
4253
+ * Seed a single tenant with initial data
4254
+ *
4255
+ * @example
4256
+ * ```typescript
4257
+ * const seed: SeedFunction = async (db, tenantId) => {
4258
+ * await db.insert(roles).values([
4259
+ * { name: 'admin', permissions: ['*'] },
4260
+ * { name: 'user', permissions: ['read'] },
4261
+ * ]);
4262
+ * };
4263
+ *
4264
+ * await migrator.seedTenant('tenant-123', seed);
4265
+ * ```
4266
+ */
4267
+ async seedTenant(tenantId, seedFn) {
4268
+ return this.seeder.seedTenant(tenantId, seedFn);
4269
+ }
4270
+ /**
4271
+ * Seed all tenants with initial data in parallel
4272
+ *
4273
+ * @example
4274
+ * ```typescript
4275
+ * const seed: SeedFunction = async (db, tenantId) => {
4276
+ * await db.insert(roles).values([
4277
+ * { name: 'admin', permissions: ['*'] },
4278
+ * ]);
4279
+ * };
4280
+ *
4281
+ * await migrator.seedAll(seed, { concurrency: 10 });
4282
+ * ```
4283
+ */
4284
+ async seedAll(seedFn, options = {}) {
4285
+ return this.seeder.seedAll(seedFn, options);
4286
+ }
4287
+ /**
4288
+ * Seed specific tenants with initial data
1538
4289
  */
1539
- async cleanAllOrphans(options = {}) {
1540
- const { concurrency = 10, onProgress, onError } = options;
1541
- const tenantIds = await this.migratorConfig.tenantDiscovery();
1542
- const results = [];
1543
- let aborted = false;
1544
- for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
1545
- const batch = tenantIds.slice(i, i + concurrency);
1546
- const batchResults = await Promise.all(
1547
- batch.map(async (tenantId) => {
1548
- if (aborted) {
1549
- return this.createSkippedSyncResult(tenantId);
1550
- }
1551
- try {
1552
- onProgress?.(tenantId, "starting");
1553
- const result = await this.cleanOrphans(tenantId);
1554
- onProgress?.(tenantId, result.success ? "completed" : "failed");
1555
- return result;
1556
- } catch (error) {
1557
- onProgress?.(tenantId, "failed");
1558
- const action = onError?.(tenantId, error);
1559
- if (action === "abort") {
1560
- aborted = true;
1561
- }
1562
- return this.createErrorSyncResult(tenantId, error);
1563
- }
1564
- })
1565
- );
1566
- results.push(...batchResults);
1567
- }
1568
- return this.aggregateSyncResults(results);
4290
+ async seedTenants(tenantIds, seedFn, options = {}) {
4291
+ return this.seeder.seedTenants(tenantIds, seedFn, options);
1569
4292
  }
1570
4293
  /**
1571
4294
  * Load migration files from the migrations folder
@@ -1590,76 +4313,11 @@ var Migrator = class {
1590
4313
  }
1591
4314
  return migrations.sort((a, b) => a.timestamp - b.timestamp);
1592
4315
  }
1593
- /**
1594
- * Create a pool for a specific schema
1595
- */
1596
- async createPool(schemaName) {
1597
- return new Pool({
1598
- connectionString: this.tenantConfig.connection.url,
1599
- ...this.tenantConfig.connection.poolConfig,
1600
- options: `-c search_path="${schemaName}",public`
1601
- });
1602
- }
1603
- /**
1604
- * Ensure migrations table exists with the correct format
1605
- */
1606
- async ensureMigrationsTable(pool, schemaName, format) {
1607
- const { identifier, timestamp, timestampType } = format.columns;
1608
- const identifierCol = identifier === "name" ? "name VARCHAR(255) NOT NULL UNIQUE" : "hash TEXT NOT NULL";
1609
- const timestampCol = timestampType === "bigint" ? `${timestamp} BIGINT NOT NULL` : `${timestamp} TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP`;
1610
- await pool.query(`
1611
- CREATE TABLE IF NOT EXISTS "${schemaName}"."${format.tableName}" (
1612
- id SERIAL PRIMARY KEY,
1613
- ${identifierCol},
1614
- ${timestampCol}
1615
- )
1616
- `);
1617
- }
1618
- /**
1619
- * Check if migrations table exists
1620
- */
1621
- async migrationsTableExists(pool, schemaName) {
1622
- const result = await pool.query(
1623
- `SELECT 1 FROM information_schema.tables
1624
- WHERE table_schema = $1 AND table_name = $2`,
1625
- [schemaName, this.migrationsTable]
1626
- );
1627
- return result.rowCount !== null && result.rowCount > 0;
1628
- }
1629
- /**
1630
- * Get applied migrations for a schema
1631
- */
1632
- async getAppliedMigrations(pool, schemaName, format) {
1633
- const identifierColumn = format.columns.identifier;
1634
- const timestampColumn = format.columns.timestamp;
1635
- const result = await pool.query(
1636
- `SELECT id, "${identifierColumn}" as identifier, "${timestampColumn}" as applied_at
1637
- FROM "${schemaName}"."${format.tableName}"
1638
- ORDER BY id`
1639
- );
1640
- return result.rows.map((row) => {
1641
- const appliedAt = format.columns.timestampType === "bigint" ? new Date(Number(row.applied_at)) : new Date(row.applied_at);
1642
- return {
1643
- id: row.id,
1644
- identifier: row.identifier,
1645
- // Set name or hash based on format
1646
- ...format.columns.identifier === "name" ? { name: row.identifier } : { hash: row.identifier },
1647
- appliedAt
1648
- };
1649
- });
1650
- }
1651
- /**
1652
- * Check if a migration has been applied
1653
- */
1654
- isMigrationApplied(migration, appliedIdentifiers, format) {
1655
- if (format.columns.identifier === "name") {
1656
- return appliedIdentifiers.has(migration.name);
1657
- }
1658
- return appliedIdentifiers.has(migration.hash) || appliedIdentifiers.has(migration.name);
1659
- }
1660
4316
  /**
1661
4317
  * Get or detect the format for a schema
1662
4318
  * Returns the configured format or auto-detects from existing table
4319
+ *
4320
+ * Note: This method is shared with SyncManager and MigrationExecutor via dependency injection.
1663
4321
  */
1664
4322
  async getOrDetectFormat(pool, schemaName) {
1665
4323
  const configuredFormat = this.migratorConfig.tableFormat ?? "auto";
@@ -1673,118 +4331,45 @@ var Migrator = class {
1673
4331
  const defaultFormat = this.migratorConfig.defaultFormat ?? "name";
1674
4332
  return getFormatConfig(defaultFormat, this.migrationsTable);
1675
4333
  }
4334
+ // ============================================================================
4335
+ // Schema Drift Detection Methods (delegated to DriftDetector)
4336
+ // ============================================================================
1676
4337
  /**
1677
- * Apply a migration to a schema
1678
- */
1679
- async applyMigration(pool, schemaName, migration, format) {
1680
- const client = await pool.connect();
1681
- try {
1682
- await client.query("BEGIN");
1683
- await client.query(migration.sql);
1684
- const { identifier, timestamp, timestampType } = format.columns;
1685
- const identifierValue = identifier === "name" ? migration.name : migration.hash;
1686
- const timestampValue = timestampType === "bigint" ? Date.now() : /* @__PURE__ */ new Date();
1687
- await client.query(
1688
- `INSERT INTO "${schemaName}"."${format.tableName}" ("${identifier}", "${timestamp}") VALUES ($1, $2)`,
1689
- [identifierValue, timestampValue]
1690
- );
1691
- await client.query("COMMIT");
1692
- } catch (error) {
1693
- await client.query("ROLLBACK");
1694
- throw error;
1695
- } finally {
1696
- client.release();
1697
- }
1698
- }
1699
- /**
1700
- * Record a migration as applied without executing SQL
1701
- * Used by markAsApplied to sync tracking state
1702
- */
1703
- async recordMigration(pool, schemaName, migration, format) {
1704
- const { identifier, timestamp, timestampType } = format.columns;
1705
- const identifierValue = identifier === "name" ? migration.name : migration.hash;
1706
- const timestampValue = timestampType === "bigint" ? Date.now() : /* @__PURE__ */ new Date();
1707
- await pool.query(
1708
- `INSERT INTO "${schemaName}"."${format.tableName}" ("${identifier}", "${timestamp}") VALUES ($1, $2)`,
1709
- [identifierValue, timestampValue]
1710
- );
1711
- }
1712
- /**
1713
- * Create a skipped result
1714
- */
1715
- createSkippedResult(tenantId) {
1716
- return {
1717
- tenantId,
1718
- schemaName: this.tenantConfig.isolation.schemaNameTemplate(tenantId),
1719
- success: false,
1720
- appliedMigrations: [],
1721
- error: "Skipped due to abort",
1722
- durationMs: 0
1723
- };
1724
- }
1725
- /**
1726
- * Create an error result
1727
- */
1728
- createErrorResult(tenantId, error) {
1729
- return {
1730
- tenantId,
1731
- schemaName: this.tenantConfig.isolation.schemaNameTemplate(tenantId),
1732
- success: false,
1733
- appliedMigrations: [],
1734
- error: error.message,
1735
- durationMs: 0
1736
- };
1737
- }
1738
- /**
1739
- * Aggregate migration results
1740
- */
1741
- aggregateResults(results) {
1742
- return {
1743
- total: results.length,
1744
- succeeded: results.filter((r) => r.success).length,
1745
- failed: results.filter((r) => !r.success && r.error !== "Skipped due to abort").length,
1746
- skipped: results.filter((r) => r.error === "Skipped due to abort").length,
1747
- details: results
1748
- };
1749
- }
1750
- /**
1751
- * Create a skipped sync result
4338
+ * Detect schema drift across all tenants
4339
+ * Compares each tenant's schema against a reference tenant (first tenant by default)
4340
+ *
4341
+ * @example
4342
+ * ```typescript
4343
+ * const drift = await migrator.getSchemaDrift();
4344
+ * if (drift.withDrift > 0) {
4345
+ * console.log('Schema drift detected!');
4346
+ * for (const tenant of drift.details) {
4347
+ * if (tenant.hasDrift) {
4348
+ * console.log(`Tenant ${tenant.tenantId} has drift:`);
4349
+ * for (const table of tenant.tables) {
4350
+ * for (const col of table.columns) {
4351
+ * console.log(` - ${table.table}.${col.column}: ${col.description}`);
4352
+ * }
4353
+ * }
4354
+ * }
4355
+ * }
4356
+ * }
4357
+ * ```
1752
4358
  */
1753
- createSkippedSyncResult(tenantId) {
1754
- return {
1755
- tenantId,
1756
- schemaName: this.tenantConfig.isolation.schemaNameTemplate(tenantId),
1757
- success: false,
1758
- markedMigrations: [],
1759
- removedOrphans: [],
1760
- error: "Skipped due to abort",
1761
- durationMs: 0
1762
- };
4359
+ async getSchemaDrift(options = {}) {
4360
+ return this.driftDetector.detectDrift(options);
1763
4361
  }
1764
4362
  /**
1765
- * Create an error sync result
4363
+ * Get schema drift for a specific tenant compared to a reference
1766
4364
  */
1767
- createErrorSyncResult(tenantId, error) {
1768
- return {
1769
- tenantId,
1770
- schemaName: this.tenantConfig.isolation.schemaNameTemplate(tenantId),
1771
- success: false,
1772
- markedMigrations: [],
1773
- removedOrphans: [],
1774
- error: error.message,
1775
- durationMs: 0
1776
- };
4365
+ async getTenantSchemaDrift(tenantId, referenceTenantId, options = {}) {
4366
+ return this.driftDetector.compareTenant(tenantId, referenceTenantId, options);
1777
4367
  }
1778
4368
  /**
1779
- * Aggregate sync results
4369
+ * Introspect the schema of a tenant
1780
4370
  */
1781
- aggregateSyncResults(results) {
1782
- return {
1783
- total: results.length,
1784
- succeeded: results.filter((r) => r.success).length,
1785
- failed: results.filter((r) => !r.success).length,
1786
- details: results
1787
- };
4371
+ async introspectTenantSchema(tenantId, options = {}) {
4372
+ return this.driftDetector.introspectSchema(tenantId, options);
1788
4373
  }
1789
4374
  };
1790
4375
  function createMigrator(tenantConfig, migratorConfig) {
@@ -2210,6 +4795,71 @@ function withShared(tenantDb, _sharedDb, schemas, options) {
2210
4795
  );
2211
4796
  }
2212
4797
 
2213
- export { CrossSchemaQueryBuilder, DEFAULT_CONFIG, Migrator, WithSharedQueryBuilder, buildCrossSchemaSelect, calculateDelay, createCrossSchemaQuery, createMigrator, createRetrier, createTenantContext, createTenantManager, crossSchemaRaw, defineConfig, isRetryableError, withRetry, withShared, withSharedLookup };
4798
+ // src/retry.ts
4799
+ function isRetryableError2(error) {
4800
+ const message = error.message.toLowerCase();
4801
+ if (message.includes("econnrefused") || message.includes("econnreset") || message.includes("etimedout") || message.includes("enotfound") || message.includes("connection refused") || message.includes("connection reset") || message.includes("connection terminated") || message.includes("connection timed out") || message.includes("timeout expired") || message.includes("socket hang up")) {
4802
+ return true;
4803
+ }
4804
+ if (message.includes("too many connections") || message.includes("sorry, too many clients") || message.includes("the database system is starting up") || message.includes("the database system is shutting down") || message.includes("server closed the connection unexpectedly") || message.includes("could not connect to server")) {
4805
+ return true;
4806
+ }
4807
+ if (message.includes("ssl connection") || message.includes("ssl handshake")) {
4808
+ return true;
4809
+ }
4810
+ return false;
4811
+ }
4812
+ function calculateDelay(attempt, config) {
4813
+ const exponentialDelay = config.initialDelayMs * Math.pow(config.backoffMultiplier, attempt);
4814
+ const cappedDelay = Math.min(exponentialDelay, config.maxDelayMs);
4815
+ if (config.jitter) {
4816
+ const jitterFactor = 1 + Math.random() * 0.25;
4817
+ return Math.floor(cappedDelay * jitterFactor);
4818
+ }
4819
+ return Math.floor(cappedDelay);
4820
+ }
4821
+ function sleep2(ms) {
4822
+ return new Promise((resolve) => setTimeout(resolve, ms));
4823
+ }
4824
+ async function withRetry(operation, config) {
4825
+ const retryConfig = {
4826
+ maxAttempts: config?.maxAttempts ?? DEFAULT_CONFIG.retry.maxAttempts,
4827
+ initialDelayMs: config?.initialDelayMs ?? DEFAULT_CONFIG.retry.initialDelayMs,
4828
+ maxDelayMs: config?.maxDelayMs ?? DEFAULT_CONFIG.retry.maxDelayMs,
4829
+ backoffMultiplier: config?.backoffMultiplier ?? DEFAULT_CONFIG.retry.backoffMultiplier,
4830
+ jitter: config?.jitter ?? DEFAULT_CONFIG.retry.jitter,
4831
+ isRetryable: config?.isRetryable ?? isRetryableError2,
4832
+ onRetry: config?.onRetry
4833
+ };
4834
+ const startTime = Date.now();
4835
+ let lastError = null;
4836
+ for (let attempt = 0; attempt < retryConfig.maxAttempts; attempt++) {
4837
+ try {
4838
+ const result = await operation();
4839
+ return {
4840
+ result,
4841
+ attempts: attempt + 1,
4842
+ totalTimeMs: Date.now() - startTime
4843
+ };
4844
+ } catch (error) {
4845
+ lastError = error;
4846
+ const isLastAttempt = attempt >= retryConfig.maxAttempts - 1;
4847
+ if (isLastAttempt || !retryConfig.isRetryable(lastError)) {
4848
+ throw lastError;
4849
+ }
4850
+ const delay = calculateDelay(attempt, retryConfig);
4851
+ retryConfig.onRetry?.(attempt + 1, lastError, delay);
4852
+ await sleep2(delay);
4853
+ }
4854
+ }
4855
+ throw lastError ?? new Error("Retry failed with no error");
4856
+ }
4857
+ function createRetrier(config) {
4858
+ return (operation) => {
4859
+ return withRetry(operation, config);
4860
+ };
4861
+ }
4862
+
4863
+ export { CrossSchemaQueryBuilder, DEFAULT_CONFIG, Migrator, WithSharedQueryBuilder, buildCrossSchemaSelect, calculateDelay, createCrossSchemaQuery, createMigrator, createRetrier, createTenantContext, createTenantManager, crossSchemaRaw, defineConfig, isRetryableError2 as isRetryableError, withRetry, withShared, withSharedLookup };
2214
4864
  //# sourceMappingURL=index.js.map
2215
4865
  //# sourceMappingURL=index.js.map