drizzle-multitenant 1.0.8 → 1.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +94 -339
- package/dist/cli/index.js +686 -6
- package/dist/cli/index.js.map +1 -1
- package/dist/{context-DBerWr50.d.ts → context-DoHx79MS.d.ts} +1 -1
- package/dist/cross-schema/index.d.ts +152 -1
- package/dist/cross-schema/index.js +208 -1
- package/dist/cross-schema/index.js.map +1 -1
- package/dist/index.d.ts +62 -5
- package/dist/index.js +1181 -50
- package/dist/index.js.map +1 -1
- package/dist/integrations/express.d.ts +3 -3
- package/dist/integrations/fastify.d.ts +3 -3
- package/dist/integrations/nestjs/index.d.ts +1 -1
- package/dist/integrations/nestjs/index.js +484 -3
- package/dist/integrations/nestjs/index.js.map +1 -1
- package/dist/migrator/index.d.ts +116 -1
- package/dist/migrator/index.js +418 -0
- package/dist/migrator/index.js.map +1 -1
- package/dist/types-B5eSRLFW.d.ts +235 -0
- package/package.json +9 -3
- package/dist/types-DKVaTaIb.d.ts +0 -130
package/dist/index.js
CHANGED
|
@@ -39,6 +39,26 @@ function validateConfig(config) {
|
|
|
39
39
|
if (config.isolation.poolTtlMs !== void 0 && config.isolation.poolTtlMs < 0) {
|
|
40
40
|
throw new Error("[drizzle-multitenant] isolation.poolTtlMs must be non-negative");
|
|
41
41
|
}
|
|
42
|
+
if (config.connection.retry) {
|
|
43
|
+
const retry = config.connection.retry;
|
|
44
|
+
if (retry.maxAttempts !== void 0 && retry.maxAttempts < 1) {
|
|
45
|
+
throw new Error("[drizzle-multitenant] connection.retry.maxAttempts must be at least 1");
|
|
46
|
+
}
|
|
47
|
+
if (retry.initialDelayMs !== void 0 && retry.initialDelayMs < 0) {
|
|
48
|
+
throw new Error("[drizzle-multitenant] connection.retry.initialDelayMs must be non-negative");
|
|
49
|
+
}
|
|
50
|
+
if (retry.maxDelayMs !== void 0 && retry.maxDelayMs < 0) {
|
|
51
|
+
throw new Error("[drizzle-multitenant] connection.retry.maxDelayMs must be non-negative");
|
|
52
|
+
}
|
|
53
|
+
if (retry.backoffMultiplier !== void 0 && retry.backoffMultiplier < 1) {
|
|
54
|
+
throw new Error("[drizzle-multitenant] connection.retry.backoffMultiplier must be at least 1");
|
|
55
|
+
}
|
|
56
|
+
if (retry.initialDelayMs !== void 0 && retry.maxDelayMs !== void 0 && retry.initialDelayMs > retry.maxDelayMs) {
|
|
57
|
+
throw new Error(
|
|
58
|
+
"[drizzle-multitenant] connection.retry.initialDelayMs cannot be greater than maxDelayMs"
|
|
59
|
+
);
|
|
60
|
+
}
|
|
61
|
+
}
|
|
42
62
|
}
|
|
43
63
|
|
|
44
64
|
// src/types.ts
|
|
@@ -52,14 +72,270 @@ var DEFAULT_CONFIG = {
|
|
|
52
72
|
max: 10,
|
|
53
73
|
idleTimeoutMillis: 3e4,
|
|
54
74
|
connectionTimeoutMillis: 5e3
|
|
75
|
+
},
|
|
76
|
+
retry: {
|
|
77
|
+
maxAttempts: 3,
|
|
78
|
+
initialDelayMs: 100,
|
|
79
|
+
maxDelayMs: 5e3,
|
|
80
|
+
backoffMultiplier: 2,
|
|
81
|
+
jitter: true
|
|
82
|
+
}
|
|
83
|
+
};
|
|
84
|
+
|
|
85
|
+
// src/debug.ts
|
|
86
|
+
var PREFIX = "[drizzle-multitenant]";
|
|
87
|
+
var DEFAULT_SLOW_QUERY_THRESHOLD = 1e3;
|
|
88
|
+
var DebugLogger = class {
|
|
89
|
+
enabled;
|
|
90
|
+
logQueries;
|
|
91
|
+
logPoolEvents;
|
|
92
|
+
slowQueryThreshold;
|
|
93
|
+
logger;
|
|
94
|
+
constructor(config) {
|
|
95
|
+
this.enabled = config?.enabled ?? false;
|
|
96
|
+
this.logQueries = config?.logQueries ?? true;
|
|
97
|
+
this.logPoolEvents = config?.logPoolEvents ?? true;
|
|
98
|
+
this.slowQueryThreshold = config?.slowQueryThreshold ?? DEFAULT_SLOW_QUERY_THRESHOLD;
|
|
99
|
+
this.logger = config?.logger ?? this.defaultLogger;
|
|
100
|
+
}
|
|
101
|
+
/**
|
|
102
|
+
* Check if debug mode is enabled
|
|
103
|
+
*/
|
|
104
|
+
isEnabled() {
|
|
105
|
+
return this.enabled;
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Log a query execution
|
|
109
|
+
*/
|
|
110
|
+
logQuery(tenantId, query, durationMs) {
|
|
111
|
+
if (!this.enabled || !this.logQueries) return;
|
|
112
|
+
const isSlowQuery = durationMs >= this.slowQueryThreshold;
|
|
113
|
+
const type = isSlowQuery ? "slow_query" : "query";
|
|
114
|
+
const context = {
|
|
115
|
+
type,
|
|
116
|
+
tenantId,
|
|
117
|
+
query: this.truncateQuery(query),
|
|
118
|
+
durationMs
|
|
119
|
+
};
|
|
120
|
+
if (isSlowQuery) {
|
|
121
|
+
this.logger(
|
|
122
|
+
`${PREFIX} tenant=${tenantId} SLOW_QUERY duration=${durationMs}ms query="${this.truncateQuery(query)}"`,
|
|
123
|
+
context
|
|
124
|
+
);
|
|
125
|
+
} else {
|
|
126
|
+
this.logger(
|
|
127
|
+
`${PREFIX} tenant=${tenantId} query="${this.truncateQuery(query)}" duration=${durationMs}ms`,
|
|
128
|
+
context
|
|
129
|
+
);
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
/**
|
|
133
|
+
* Log pool creation
|
|
134
|
+
*/
|
|
135
|
+
logPoolCreated(tenantId, schemaName) {
|
|
136
|
+
if (!this.enabled || !this.logPoolEvents) return;
|
|
137
|
+
const context = {
|
|
138
|
+
type: "pool_created",
|
|
139
|
+
tenantId,
|
|
140
|
+
schemaName
|
|
141
|
+
};
|
|
142
|
+
this.logger(
|
|
143
|
+
`${PREFIX} tenant=${tenantId} POOL_CREATED schema=${schemaName}`,
|
|
144
|
+
context
|
|
145
|
+
);
|
|
146
|
+
}
|
|
147
|
+
/**
|
|
148
|
+
* Log pool eviction
|
|
149
|
+
*/
|
|
150
|
+
logPoolEvicted(tenantId, schemaName, reason) {
|
|
151
|
+
if (!this.enabled || !this.logPoolEvents) return;
|
|
152
|
+
const context = {
|
|
153
|
+
type: "pool_evicted",
|
|
154
|
+
tenantId,
|
|
155
|
+
schemaName,
|
|
156
|
+
metadata: reason ? { reason } : void 0
|
|
157
|
+
};
|
|
158
|
+
const reasonStr = reason ? ` reason=${reason}` : "";
|
|
159
|
+
this.logger(
|
|
160
|
+
`${PREFIX} tenant=${tenantId} POOL_EVICTED schema=${schemaName}${reasonStr}`,
|
|
161
|
+
context
|
|
162
|
+
);
|
|
163
|
+
}
|
|
164
|
+
/**
|
|
165
|
+
* Log pool error
|
|
166
|
+
*/
|
|
167
|
+
logPoolError(tenantId, error) {
|
|
168
|
+
if (!this.enabled || !this.logPoolEvents) return;
|
|
169
|
+
const context = {
|
|
170
|
+
type: "pool_error",
|
|
171
|
+
tenantId,
|
|
172
|
+
error: error.message
|
|
173
|
+
};
|
|
174
|
+
this.logger(
|
|
175
|
+
`${PREFIX} tenant=${tenantId} POOL_ERROR error="${error.message}"`,
|
|
176
|
+
context
|
|
177
|
+
);
|
|
178
|
+
}
|
|
179
|
+
/**
|
|
180
|
+
* Log warmup event
|
|
181
|
+
*/
|
|
182
|
+
logWarmup(tenantId, success, durationMs, alreadyWarm) {
|
|
183
|
+
if (!this.enabled || !this.logPoolEvents) return;
|
|
184
|
+
const context = {
|
|
185
|
+
type: "warmup",
|
|
186
|
+
tenantId,
|
|
187
|
+
durationMs,
|
|
188
|
+
metadata: { success, alreadyWarm }
|
|
189
|
+
};
|
|
190
|
+
const status = alreadyWarm ? "already_warm" : success ? "success" : "failed";
|
|
191
|
+
this.logger(
|
|
192
|
+
`${PREFIX} tenant=${tenantId} WARMUP status=${status} duration=${durationMs}ms`,
|
|
193
|
+
context
|
|
194
|
+
);
|
|
195
|
+
}
|
|
196
|
+
/**
|
|
197
|
+
* Log connection retry event
|
|
198
|
+
*/
|
|
199
|
+
logConnectionRetry(identifier, attempt, maxAttempts, error, delayMs) {
|
|
200
|
+
if (!this.enabled || !this.logPoolEvents) return;
|
|
201
|
+
const context = {
|
|
202
|
+
type: "connection_retry",
|
|
203
|
+
tenantId: identifier,
|
|
204
|
+
error: error.message,
|
|
205
|
+
metadata: { attempt, maxAttempts, delayMs }
|
|
206
|
+
};
|
|
207
|
+
this.logger(
|
|
208
|
+
`${PREFIX} tenant=${identifier} CONNECTION_RETRY attempt=${attempt}/${maxAttempts} delay=${delayMs}ms error="${error.message}"`,
|
|
209
|
+
context
|
|
210
|
+
);
|
|
211
|
+
}
|
|
212
|
+
/**
|
|
213
|
+
* Log connection success after retries
|
|
214
|
+
*/
|
|
215
|
+
logConnectionSuccess(identifier, attempts, totalTimeMs) {
|
|
216
|
+
if (!this.enabled || !this.logPoolEvents) return;
|
|
217
|
+
const context = {
|
|
218
|
+
type: "pool_created",
|
|
219
|
+
tenantId: identifier,
|
|
220
|
+
durationMs: totalTimeMs,
|
|
221
|
+
metadata: { attempts }
|
|
222
|
+
};
|
|
223
|
+
if (attempts > 1) {
|
|
224
|
+
this.logger(
|
|
225
|
+
`${PREFIX} tenant=${identifier} CONNECTION_SUCCESS attempts=${attempts} totalTime=${totalTimeMs}ms`,
|
|
226
|
+
context
|
|
227
|
+
);
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
/**
|
|
231
|
+
* Log a custom debug message
|
|
232
|
+
*/
|
|
233
|
+
log(message, context) {
|
|
234
|
+
if (!this.enabled) return;
|
|
235
|
+
this.logger(`${PREFIX} ${message}`, context);
|
|
236
|
+
}
|
|
237
|
+
/**
|
|
238
|
+
* Default logger implementation using console
|
|
239
|
+
*/
|
|
240
|
+
defaultLogger(message, _context) {
|
|
241
|
+
console.log(message);
|
|
242
|
+
}
|
|
243
|
+
/**
|
|
244
|
+
* Truncate long queries for readability
|
|
245
|
+
*/
|
|
246
|
+
truncateQuery(query, maxLength = 100) {
|
|
247
|
+
const normalized = query.replace(/\s+/g, " ").trim();
|
|
248
|
+
if (normalized.length <= maxLength) {
|
|
249
|
+
return normalized;
|
|
250
|
+
}
|
|
251
|
+
return normalized.substring(0, maxLength - 3) + "...";
|
|
55
252
|
}
|
|
56
253
|
};
|
|
254
|
+
function createDebugLogger(config) {
|
|
255
|
+
return new DebugLogger(config);
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
// src/retry.ts
|
|
259
|
+
function isRetryableError(error) {
|
|
260
|
+
const message = error.message.toLowerCase();
|
|
261
|
+
if (message.includes("econnrefused") || message.includes("econnreset") || message.includes("etimedout") || message.includes("enotfound") || message.includes("connection refused") || message.includes("connection reset") || message.includes("connection terminated") || message.includes("connection timed out") || message.includes("timeout expired") || message.includes("socket hang up")) {
|
|
262
|
+
return true;
|
|
263
|
+
}
|
|
264
|
+
if (message.includes("too many connections") || message.includes("sorry, too many clients") || message.includes("the database system is starting up") || message.includes("the database system is shutting down") || message.includes("server closed the connection unexpectedly") || message.includes("could not connect to server")) {
|
|
265
|
+
return true;
|
|
266
|
+
}
|
|
267
|
+
if (message.includes("ssl connection") || message.includes("ssl handshake")) {
|
|
268
|
+
return true;
|
|
269
|
+
}
|
|
270
|
+
return false;
|
|
271
|
+
}
|
|
272
|
+
function calculateDelay(attempt, config) {
|
|
273
|
+
const exponentialDelay = config.initialDelayMs * Math.pow(config.backoffMultiplier, attempt);
|
|
274
|
+
const cappedDelay = Math.min(exponentialDelay, config.maxDelayMs);
|
|
275
|
+
if (config.jitter) {
|
|
276
|
+
const jitterFactor = 1 + Math.random() * 0.25;
|
|
277
|
+
return Math.floor(cappedDelay * jitterFactor);
|
|
278
|
+
}
|
|
279
|
+
return Math.floor(cappedDelay);
|
|
280
|
+
}
|
|
281
|
+
function sleep(ms) {
|
|
282
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
283
|
+
}
|
|
284
|
+
async function withRetry(operation, config) {
|
|
285
|
+
const retryConfig = {
|
|
286
|
+
maxAttempts: config?.maxAttempts ?? DEFAULT_CONFIG.retry.maxAttempts,
|
|
287
|
+
initialDelayMs: config?.initialDelayMs ?? DEFAULT_CONFIG.retry.initialDelayMs,
|
|
288
|
+
maxDelayMs: config?.maxDelayMs ?? DEFAULT_CONFIG.retry.maxDelayMs,
|
|
289
|
+
backoffMultiplier: config?.backoffMultiplier ?? DEFAULT_CONFIG.retry.backoffMultiplier,
|
|
290
|
+
jitter: config?.jitter ?? DEFAULT_CONFIG.retry.jitter,
|
|
291
|
+
isRetryable: config?.isRetryable ?? isRetryableError,
|
|
292
|
+
onRetry: config?.onRetry
|
|
293
|
+
};
|
|
294
|
+
const startTime = Date.now();
|
|
295
|
+
let lastError = null;
|
|
296
|
+
for (let attempt = 0; attempt < retryConfig.maxAttempts; attempt++) {
|
|
297
|
+
try {
|
|
298
|
+
const result = await operation();
|
|
299
|
+
return {
|
|
300
|
+
result,
|
|
301
|
+
attempts: attempt + 1,
|
|
302
|
+
totalTimeMs: Date.now() - startTime
|
|
303
|
+
};
|
|
304
|
+
} catch (error) {
|
|
305
|
+
lastError = error;
|
|
306
|
+
const isLastAttempt = attempt >= retryConfig.maxAttempts - 1;
|
|
307
|
+
if (isLastAttempt || !retryConfig.isRetryable(lastError)) {
|
|
308
|
+
throw lastError;
|
|
309
|
+
}
|
|
310
|
+
const delay = calculateDelay(attempt, retryConfig);
|
|
311
|
+
retryConfig.onRetry?.(attempt + 1, lastError, delay);
|
|
312
|
+
await sleep(delay);
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
throw lastError ?? new Error("Retry failed with no error");
|
|
316
|
+
}
|
|
317
|
+
function createRetrier(config) {
|
|
318
|
+
return (operation) => {
|
|
319
|
+
return withRetry(operation, config);
|
|
320
|
+
};
|
|
321
|
+
}
|
|
57
322
|
|
|
58
323
|
// src/pool.ts
|
|
59
324
|
var PoolManager = class {
|
|
60
325
|
constructor(config) {
|
|
61
326
|
this.config = config;
|
|
62
327
|
const maxPools = config.isolation.maxPools ?? DEFAULT_CONFIG.maxPools;
|
|
328
|
+
this.debugLogger = createDebugLogger(config.debug);
|
|
329
|
+
const userRetry = config.connection.retry ?? {};
|
|
330
|
+
this.retryConfig = {
|
|
331
|
+
maxAttempts: userRetry.maxAttempts ?? DEFAULT_CONFIG.retry.maxAttempts,
|
|
332
|
+
initialDelayMs: userRetry.initialDelayMs ?? DEFAULT_CONFIG.retry.initialDelayMs,
|
|
333
|
+
maxDelayMs: userRetry.maxDelayMs ?? DEFAULT_CONFIG.retry.maxDelayMs,
|
|
334
|
+
backoffMultiplier: userRetry.backoffMultiplier ?? DEFAULT_CONFIG.retry.backoffMultiplier,
|
|
335
|
+
jitter: userRetry.jitter ?? DEFAULT_CONFIG.retry.jitter,
|
|
336
|
+
isRetryable: userRetry.isRetryable ?? isRetryableError,
|
|
337
|
+
onRetry: userRetry.onRetry
|
|
338
|
+
};
|
|
63
339
|
this.pools = new LRUCache({
|
|
64
340
|
max: maxPools,
|
|
65
341
|
dispose: (entry, key) => {
|
|
@@ -70,10 +346,14 @@ var PoolManager = class {
|
|
|
70
346
|
}
|
|
71
347
|
pools;
|
|
72
348
|
tenantIdBySchema = /* @__PURE__ */ new Map();
|
|
349
|
+
pendingConnections = /* @__PURE__ */ new Map();
|
|
73
350
|
sharedPool = null;
|
|
74
351
|
sharedDb = null;
|
|
352
|
+
sharedDbPending = null;
|
|
75
353
|
cleanupInterval = null;
|
|
76
354
|
disposed = false;
|
|
355
|
+
debugLogger;
|
|
356
|
+
retryConfig;
|
|
77
357
|
/**
|
|
78
358
|
* Get or create a database connection for a tenant
|
|
79
359
|
*/
|
|
@@ -85,11 +365,85 @@ var PoolManager = class {
|
|
|
85
365
|
entry = this.createPoolEntry(tenantId, schemaName);
|
|
86
366
|
this.pools.set(schemaName, entry);
|
|
87
367
|
this.tenantIdBySchema.set(schemaName, tenantId);
|
|
368
|
+
this.debugLogger.logPoolCreated(tenantId, schemaName);
|
|
88
369
|
void this.config.hooks?.onPoolCreated?.(tenantId);
|
|
89
370
|
}
|
|
90
371
|
entry.lastAccess = Date.now();
|
|
91
372
|
return entry.db;
|
|
92
373
|
}
|
|
374
|
+
/**
|
|
375
|
+
* Get or create a database connection for a tenant with retry and validation
|
|
376
|
+
*
|
|
377
|
+
* This async version validates the connection by executing a ping query
|
|
378
|
+
* and retries on transient failures with exponential backoff.
|
|
379
|
+
*
|
|
380
|
+
* @example
|
|
381
|
+
* ```typescript
|
|
382
|
+
* // Get tenant database with automatic retry
|
|
383
|
+
* const db = await manager.getDbAsync('tenant-123');
|
|
384
|
+
*
|
|
385
|
+
* // Queries will use the validated connection
|
|
386
|
+
* const users = await db.select().from(users);
|
|
387
|
+
* ```
|
|
388
|
+
*/
|
|
389
|
+
async getDbAsync(tenantId) {
|
|
390
|
+
this.ensureNotDisposed();
|
|
391
|
+
const schemaName = this.config.isolation.schemaNameTemplate(tenantId);
|
|
392
|
+
let entry = this.pools.get(schemaName);
|
|
393
|
+
if (entry) {
|
|
394
|
+
entry.lastAccess = Date.now();
|
|
395
|
+
return entry.db;
|
|
396
|
+
}
|
|
397
|
+
const pending = this.pendingConnections.get(schemaName);
|
|
398
|
+
if (pending) {
|
|
399
|
+
entry = await pending;
|
|
400
|
+
entry.lastAccess = Date.now();
|
|
401
|
+
return entry.db;
|
|
402
|
+
}
|
|
403
|
+
const connectionPromise = this.connectWithRetry(tenantId, schemaName);
|
|
404
|
+
this.pendingConnections.set(schemaName, connectionPromise);
|
|
405
|
+
try {
|
|
406
|
+
entry = await connectionPromise;
|
|
407
|
+
this.pools.set(schemaName, entry);
|
|
408
|
+
this.tenantIdBySchema.set(schemaName, tenantId);
|
|
409
|
+
this.debugLogger.logPoolCreated(tenantId, schemaName);
|
|
410
|
+
void this.config.hooks?.onPoolCreated?.(tenantId);
|
|
411
|
+
entry.lastAccess = Date.now();
|
|
412
|
+
return entry.db;
|
|
413
|
+
} finally {
|
|
414
|
+
this.pendingConnections.delete(schemaName);
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
/**
|
|
418
|
+
* Connect to a tenant database with retry logic
|
|
419
|
+
*/
|
|
420
|
+
async connectWithRetry(tenantId, schemaName) {
|
|
421
|
+
const maxAttempts = this.retryConfig.maxAttempts;
|
|
422
|
+
const result = await withRetry(
|
|
423
|
+
async () => {
|
|
424
|
+
const entry = this.createPoolEntry(tenantId, schemaName);
|
|
425
|
+
try {
|
|
426
|
+
await entry.pool.query("SELECT 1");
|
|
427
|
+
return entry;
|
|
428
|
+
} catch (error) {
|
|
429
|
+
try {
|
|
430
|
+
await entry.pool.end();
|
|
431
|
+
} catch {
|
|
432
|
+
}
|
|
433
|
+
throw error;
|
|
434
|
+
}
|
|
435
|
+
},
|
|
436
|
+
{
|
|
437
|
+
...this.retryConfig,
|
|
438
|
+
onRetry: (attempt, error, delayMs) => {
|
|
439
|
+
this.debugLogger.logConnectionRetry(tenantId, attempt, maxAttempts, error, delayMs);
|
|
440
|
+
this.retryConfig.onRetry?.(attempt, error, delayMs);
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
);
|
|
444
|
+
this.debugLogger.logConnectionSuccess(tenantId, result.attempts, result.totalTimeMs);
|
|
445
|
+
return result.result;
|
|
446
|
+
}
|
|
93
447
|
/**
|
|
94
448
|
* Get or create the shared database connection
|
|
95
449
|
*/
|
|
@@ -110,6 +464,78 @@ var PoolManager = class {
|
|
|
110
464
|
}
|
|
111
465
|
return this.sharedDb;
|
|
112
466
|
}
|
|
467
|
+
/**
|
|
468
|
+
* Get or create the shared database connection with retry and validation
|
|
469
|
+
*
|
|
470
|
+
* This async version validates the connection by executing a ping query
|
|
471
|
+
* and retries on transient failures with exponential backoff.
|
|
472
|
+
*
|
|
473
|
+
* @example
|
|
474
|
+
* ```typescript
|
|
475
|
+
* // Get shared database with automatic retry
|
|
476
|
+
* const sharedDb = await manager.getSharedDbAsync();
|
|
477
|
+
*
|
|
478
|
+
* // Queries will use the validated connection
|
|
479
|
+
* const plans = await sharedDb.select().from(plans);
|
|
480
|
+
* ```
|
|
481
|
+
*/
|
|
482
|
+
async getSharedDbAsync() {
|
|
483
|
+
this.ensureNotDisposed();
|
|
484
|
+
if (this.sharedDb) {
|
|
485
|
+
return this.sharedDb;
|
|
486
|
+
}
|
|
487
|
+
if (this.sharedDbPending) {
|
|
488
|
+
return this.sharedDbPending;
|
|
489
|
+
}
|
|
490
|
+
this.sharedDbPending = this.connectSharedWithRetry();
|
|
491
|
+
try {
|
|
492
|
+
const db = await this.sharedDbPending;
|
|
493
|
+
return db;
|
|
494
|
+
} finally {
|
|
495
|
+
this.sharedDbPending = null;
|
|
496
|
+
}
|
|
497
|
+
}
|
|
498
|
+
/**
|
|
499
|
+
* Connect to shared database with retry logic
|
|
500
|
+
*/
|
|
501
|
+
async connectSharedWithRetry() {
|
|
502
|
+
const maxAttempts = this.retryConfig.maxAttempts;
|
|
503
|
+
const result = await withRetry(
|
|
504
|
+
async () => {
|
|
505
|
+
const pool = new Pool({
|
|
506
|
+
connectionString: this.config.connection.url,
|
|
507
|
+
...DEFAULT_CONFIG.poolConfig,
|
|
508
|
+
...this.config.connection.poolConfig
|
|
509
|
+
});
|
|
510
|
+
try {
|
|
511
|
+
await pool.query("SELECT 1");
|
|
512
|
+
pool.on("error", (err) => {
|
|
513
|
+
void this.config.hooks?.onError?.("shared", err);
|
|
514
|
+
});
|
|
515
|
+
this.sharedPool = pool;
|
|
516
|
+
this.sharedDb = drizzle(pool, {
|
|
517
|
+
schema: this.config.schemas.shared
|
|
518
|
+
});
|
|
519
|
+
return this.sharedDb;
|
|
520
|
+
} catch (error) {
|
|
521
|
+
try {
|
|
522
|
+
await pool.end();
|
|
523
|
+
} catch {
|
|
524
|
+
}
|
|
525
|
+
throw error;
|
|
526
|
+
}
|
|
527
|
+
},
|
|
528
|
+
{
|
|
529
|
+
...this.retryConfig,
|
|
530
|
+
onRetry: (attempt, error, delayMs) => {
|
|
531
|
+
this.debugLogger.logConnectionRetry("shared", attempt, maxAttempts, error, delayMs);
|
|
532
|
+
this.retryConfig.onRetry?.(attempt, error, delayMs);
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
);
|
|
536
|
+
this.debugLogger.logConnectionSuccess("shared", result.attempts, result.totalTimeMs);
|
|
537
|
+
return result.result;
|
|
538
|
+
}
|
|
113
539
|
/**
|
|
114
540
|
* Get schema name for a tenant
|
|
115
541
|
*/
|
|
@@ -135,13 +561,77 @@ var PoolManager = class {
|
|
|
135
561
|
getActiveTenantIds() {
|
|
136
562
|
return Array.from(this.tenantIdBySchema.values());
|
|
137
563
|
}
|
|
564
|
+
/**
|
|
565
|
+
* Get the retry configuration
|
|
566
|
+
*/
|
|
567
|
+
getRetryConfig() {
|
|
568
|
+
return { ...this.retryConfig };
|
|
569
|
+
}
|
|
570
|
+
/**
|
|
571
|
+
* Pre-warm pools for specified tenants to reduce cold start latency
|
|
572
|
+
*
|
|
573
|
+
* Uses automatic retry with exponential backoff for connection failures.
|
|
574
|
+
*/
|
|
575
|
+
async warmup(tenantIds, options = {}) {
|
|
576
|
+
this.ensureNotDisposed();
|
|
577
|
+
const startTime = Date.now();
|
|
578
|
+
const { concurrency = 10, ping = true, onProgress } = options;
|
|
579
|
+
const results = [];
|
|
580
|
+
for (let i = 0; i < tenantIds.length; i += concurrency) {
|
|
581
|
+
const batch = tenantIds.slice(i, i + concurrency);
|
|
582
|
+
const batchResults = await Promise.all(
|
|
583
|
+
batch.map(async (tenantId) => {
|
|
584
|
+
const tenantStart = Date.now();
|
|
585
|
+
onProgress?.(tenantId, "starting");
|
|
586
|
+
try {
|
|
587
|
+
const alreadyWarm = this.hasPool(tenantId);
|
|
588
|
+
if (ping) {
|
|
589
|
+
await this.getDbAsync(tenantId);
|
|
590
|
+
} else {
|
|
591
|
+
this.getDb(tenantId);
|
|
592
|
+
}
|
|
593
|
+
const durationMs = Date.now() - tenantStart;
|
|
594
|
+
onProgress?.(tenantId, "completed");
|
|
595
|
+
this.debugLogger.logWarmup(tenantId, true, durationMs, alreadyWarm);
|
|
596
|
+
return {
|
|
597
|
+
tenantId,
|
|
598
|
+
success: true,
|
|
599
|
+
alreadyWarm,
|
|
600
|
+
durationMs
|
|
601
|
+
};
|
|
602
|
+
} catch (error) {
|
|
603
|
+
const durationMs = Date.now() - tenantStart;
|
|
604
|
+
onProgress?.(tenantId, "failed");
|
|
605
|
+
this.debugLogger.logWarmup(tenantId, false, durationMs, false);
|
|
606
|
+
return {
|
|
607
|
+
tenantId,
|
|
608
|
+
success: false,
|
|
609
|
+
alreadyWarm: false,
|
|
610
|
+
durationMs,
|
|
611
|
+
error: error.message
|
|
612
|
+
};
|
|
613
|
+
}
|
|
614
|
+
})
|
|
615
|
+
);
|
|
616
|
+
results.push(...batchResults);
|
|
617
|
+
}
|
|
618
|
+
return {
|
|
619
|
+
total: results.length,
|
|
620
|
+
succeeded: results.filter((r) => r.success).length,
|
|
621
|
+
failed: results.filter((r) => !r.success).length,
|
|
622
|
+
alreadyWarm: results.filter((r) => r.alreadyWarm).length,
|
|
623
|
+
durationMs: Date.now() - startTime,
|
|
624
|
+
details: results
|
|
625
|
+
};
|
|
626
|
+
}
|
|
138
627
|
/**
|
|
139
628
|
* Manually evict a tenant pool
|
|
140
629
|
*/
|
|
141
|
-
async evictPool(tenantId) {
|
|
630
|
+
async evictPool(tenantId, reason = "manual") {
|
|
142
631
|
const schemaName = this.config.isolation.schemaNameTemplate(tenantId);
|
|
143
632
|
const entry = this.pools.get(schemaName);
|
|
144
633
|
if (entry) {
|
|
634
|
+
this.debugLogger.logPoolEvicted(tenantId, schemaName, reason);
|
|
145
635
|
this.pools.delete(schemaName);
|
|
146
636
|
this.tenantIdBySchema.delete(schemaName);
|
|
147
637
|
await this.closePool(entry.pool, tenantId);
|
|
@@ -200,8 +690,9 @@ var PoolManager = class {
|
|
|
200
690
|
options: `-c search_path=${schemaName},public`
|
|
201
691
|
});
|
|
202
692
|
pool.on("error", async (err) => {
|
|
693
|
+
this.debugLogger.logPoolError(tenantId, err);
|
|
203
694
|
void this.config.hooks?.onError?.(tenantId, err);
|
|
204
|
-
await this.evictPool(tenantId);
|
|
695
|
+
await this.evictPool(tenantId, "error");
|
|
205
696
|
});
|
|
206
697
|
const db = drizzle(pool, {
|
|
207
698
|
schema: this.config.schemas.tenant
|
|
@@ -219,6 +710,9 @@ var PoolManager = class {
|
|
|
219
710
|
disposePoolEntry(entry, schemaName) {
|
|
220
711
|
const tenantId = this.tenantIdBySchema.get(schemaName);
|
|
221
712
|
this.tenantIdBySchema.delete(schemaName);
|
|
713
|
+
if (tenantId) {
|
|
714
|
+
this.debugLogger.logPoolEvicted(tenantId, schemaName, "lru_eviction");
|
|
715
|
+
}
|
|
222
716
|
void this.closePool(entry.pool, tenantId ?? schemaName).then(() => {
|
|
223
717
|
if (tenantId) {
|
|
224
718
|
void this.config.hooks?.onPoolEvicted?.(tenantId);
|
|
@@ -249,7 +743,7 @@ var PoolManager = class {
|
|
|
249
743
|
for (const schemaName of toEvict) {
|
|
250
744
|
const tenantId = this.tenantIdBySchema.get(schemaName);
|
|
251
745
|
if (tenantId) {
|
|
252
|
-
await this.evictPool(tenantId);
|
|
746
|
+
await this.evictPool(tenantId, "ttl_expired");
|
|
253
747
|
}
|
|
254
748
|
}
|
|
255
749
|
}
|
|
@@ -271,9 +765,15 @@ function createTenantManager(config) {
|
|
|
271
765
|
getDb(tenantId) {
|
|
272
766
|
return poolManager.getDb(tenantId);
|
|
273
767
|
},
|
|
768
|
+
async getDbAsync(tenantId) {
|
|
769
|
+
return poolManager.getDbAsync(tenantId);
|
|
770
|
+
},
|
|
274
771
|
getSharedDb() {
|
|
275
772
|
return poolManager.getSharedDb();
|
|
276
773
|
},
|
|
774
|
+
async getSharedDbAsync() {
|
|
775
|
+
return poolManager.getSharedDbAsync();
|
|
776
|
+
},
|
|
277
777
|
getSchemaName(tenantId) {
|
|
278
778
|
return poolManager.getSchemaName(tenantId);
|
|
279
779
|
},
|
|
@@ -286,9 +786,15 @@ function createTenantManager(config) {
|
|
|
286
786
|
getActiveTenantIds() {
|
|
287
787
|
return poolManager.getActiveTenantIds();
|
|
288
788
|
},
|
|
789
|
+
getRetryConfig() {
|
|
790
|
+
return poolManager.getRetryConfig();
|
|
791
|
+
},
|
|
289
792
|
async evictPool(tenantId) {
|
|
290
793
|
await poolManager.evictPool(tenantId);
|
|
291
794
|
},
|
|
795
|
+
async warmup(tenantIds, options) {
|
|
796
|
+
return poolManager.warmup(tenantIds, options);
|
|
797
|
+
},
|
|
292
798
|
async dispose() {
|
|
293
799
|
await poolManager.dispose();
|
|
294
800
|
}
|
|
@@ -696,57 +1202,423 @@ var Migrator = class {
|
|
|
696
1202
|
}
|
|
697
1203
|
}
|
|
698
1204
|
/**
|
|
699
|
-
*
|
|
1205
|
+
* Mark migrations as applied without executing SQL
|
|
1206
|
+
* Useful for syncing tracking state with already-applied migrations
|
|
700
1207
|
*/
|
|
701
|
-
async
|
|
702
|
-
const
|
|
703
|
-
const
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
const
|
|
709
|
-
|
|
710
|
-
const
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
1208
|
+
async markAsApplied(tenantId, options = {}) {
|
|
1209
|
+
const startTime = Date.now();
|
|
1210
|
+
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1211
|
+
const markedMigrations = [];
|
|
1212
|
+
const pool = await this.createPool(schemaName);
|
|
1213
|
+
try {
|
|
1214
|
+
await this.migratorConfig.hooks?.beforeTenant?.(tenantId);
|
|
1215
|
+
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
1216
|
+
await this.ensureMigrationsTable(pool, schemaName, format);
|
|
1217
|
+
const allMigrations = await this.loadMigrations();
|
|
1218
|
+
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
1219
|
+
const appliedSet = new Set(applied.map((m) => m.identifier));
|
|
1220
|
+
const pending = allMigrations.filter(
|
|
1221
|
+
(m) => !this.isMigrationApplied(m, appliedSet, format)
|
|
1222
|
+
);
|
|
1223
|
+
for (const migration of pending) {
|
|
1224
|
+
const migrationStart = Date.now();
|
|
1225
|
+
options.onProgress?.(tenantId, "migrating", migration.name);
|
|
1226
|
+
await this.migratorConfig.hooks?.beforeMigration?.(tenantId, migration.name);
|
|
1227
|
+
await this.recordMigration(pool, schemaName, migration, format);
|
|
1228
|
+
await this.migratorConfig.hooks?.afterMigration?.(
|
|
1229
|
+
tenantId,
|
|
1230
|
+
migration.name,
|
|
1231
|
+
Date.now() - migrationStart
|
|
1232
|
+
);
|
|
1233
|
+
markedMigrations.push(migration.name);
|
|
1234
|
+
}
|
|
1235
|
+
const result = {
|
|
1236
|
+
tenantId,
|
|
1237
|
+
schemaName,
|
|
1238
|
+
success: true,
|
|
1239
|
+
appliedMigrations: markedMigrations,
|
|
1240
|
+
durationMs: Date.now() - startTime,
|
|
1241
|
+
format: format.format
|
|
1242
|
+
};
|
|
1243
|
+
await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
|
|
1244
|
+
return result;
|
|
1245
|
+
} catch (error) {
|
|
1246
|
+
const result = {
|
|
1247
|
+
tenantId,
|
|
1248
|
+
schemaName,
|
|
1249
|
+
success: false,
|
|
1250
|
+
appliedMigrations: markedMigrations,
|
|
1251
|
+
error: error.message,
|
|
1252
|
+
durationMs: Date.now() - startTime
|
|
1253
|
+
};
|
|
1254
|
+
await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
|
|
1255
|
+
return result;
|
|
1256
|
+
} finally {
|
|
1257
|
+
await pool.end();
|
|
718
1258
|
}
|
|
719
|
-
return migrations.sort((a, b) => a.timestamp - b.timestamp);
|
|
720
|
-
}
|
|
721
|
-
/**
|
|
722
|
-
* Create a pool for a specific schema
|
|
723
|
-
*/
|
|
724
|
-
async createPool(schemaName) {
|
|
725
|
-
return new Pool({
|
|
726
|
-
connectionString: this.tenantConfig.connection.url,
|
|
727
|
-
...this.tenantConfig.connection.poolConfig,
|
|
728
|
-
options: `-c search_path="${schemaName}",public`
|
|
729
|
-
});
|
|
730
|
-
}
|
|
731
|
-
/**
|
|
732
|
-
* Ensure migrations table exists with the correct format
|
|
733
|
-
*/
|
|
734
|
-
async ensureMigrationsTable(pool, schemaName, format) {
|
|
735
|
-
const { identifier, timestamp, timestampType } = format.columns;
|
|
736
|
-
const identifierCol = identifier === "name" ? "name VARCHAR(255) NOT NULL UNIQUE" : "hash TEXT NOT NULL";
|
|
737
|
-
const timestampCol = timestampType === "bigint" ? `${timestamp} BIGINT NOT NULL` : `${timestamp} TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP`;
|
|
738
|
-
await pool.query(`
|
|
739
|
-
CREATE TABLE IF NOT EXISTS "${schemaName}"."${format.tableName}" (
|
|
740
|
-
id SERIAL PRIMARY KEY,
|
|
741
|
-
${identifierCol},
|
|
742
|
-
${timestampCol}
|
|
743
|
-
)
|
|
744
|
-
`);
|
|
745
1259
|
}
|
|
746
1260
|
/**
|
|
747
|
-
*
|
|
1261
|
+
* Mark migrations as applied for all tenants without executing SQL
|
|
1262
|
+
* Useful for syncing tracking state with already-applied migrations
|
|
748
1263
|
*/
|
|
749
|
-
async
|
|
1264
|
+
async markAllAsApplied(options = {}) {
|
|
1265
|
+
const {
|
|
1266
|
+
concurrency = 10,
|
|
1267
|
+
onProgress,
|
|
1268
|
+
onError
|
|
1269
|
+
} = options;
|
|
1270
|
+
const tenantIds = await this.migratorConfig.tenantDiscovery();
|
|
1271
|
+
const results = [];
|
|
1272
|
+
let aborted = false;
|
|
1273
|
+
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
1274
|
+
const batch = tenantIds.slice(i, i + concurrency);
|
|
1275
|
+
const batchResults = await Promise.all(
|
|
1276
|
+
batch.map(async (tenantId) => {
|
|
1277
|
+
if (aborted) {
|
|
1278
|
+
return this.createSkippedResult(tenantId);
|
|
1279
|
+
}
|
|
1280
|
+
try {
|
|
1281
|
+
onProgress?.(tenantId, "starting");
|
|
1282
|
+
const result = await this.markAsApplied(tenantId, { onProgress });
|
|
1283
|
+
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
1284
|
+
return result;
|
|
1285
|
+
} catch (error) {
|
|
1286
|
+
onProgress?.(tenantId, "failed");
|
|
1287
|
+
const action = onError?.(tenantId, error);
|
|
1288
|
+
if (action === "abort") {
|
|
1289
|
+
aborted = true;
|
|
1290
|
+
}
|
|
1291
|
+
return this.createErrorResult(tenantId, error);
|
|
1292
|
+
}
|
|
1293
|
+
})
|
|
1294
|
+
);
|
|
1295
|
+
results.push(...batchResults);
|
|
1296
|
+
}
|
|
1297
|
+
if (aborted) {
|
|
1298
|
+
const remaining = tenantIds.slice(results.length);
|
|
1299
|
+
for (const tenantId of remaining) {
|
|
1300
|
+
results.push(this.createSkippedResult(tenantId));
|
|
1301
|
+
}
|
|
1302
|
+
}
|
|
1303
|
+
return this.aggregateResults(results);
|
|
1304
|
+
}
|
|
1305
|
+
/**
|
|
1306
|
+
* Get sync status for all tenants
|
|
1307
|
+
* Detects divergences between migrations on disk and tracking in database
|
|
1308
|
+
*/
|
|
1309
|
+
async getSyncStatus() {
|
|
1310
|
+
const tenantIds = await this.migratorConfig.tenantDiscovery();
|
|
1311
|
+
const migrations = await this.loadMigrations();
|
|
1312
|
+
const statuses = [];
|
|
1313
|
+
for (const tenantId of tenantIds) {
|
|
1314
|
+
statuses.push(await this.getTenantSyncStatus(tenantId, migrations));
|
|
1315
|
+
}
|
|
1316
|
+
return {
|
|
1317
|
+
total: statuses.length,
|
|
1318
|
+
inSync: statuses.filter((s) => s.inSync && !s.error).length,
|
|
1319
|
+
outOfSync: statuses.filter((s) => !s.inSync && !s.error).length,
|
|
1320
|
+
error: statuses.filter((s) => !!s.error).length,
|
|
1321
|
+
details: statuses
|
|
1322
|
+
};
|
|
1323
|
+
}
|
|
1324
|
+
/**
|
|
1325
|
+
* Get sync status for a specific tenant
|
|
1326
|
+
*/
|
|
1327
|
+
async getTenantSyncStatus(tenantId, migrations) {
|
|
1328
|
+
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1329
|
+
const pool = await this.createPool(schemaName);
|
|
1330
|
+
try {
|
|
1331
|
+
const allMigrations = migrations ?? await this.loadMigrations();
|
|
1332
|
+
const migrationNames = new Set(allMigrations.map((m) => m.name));
|
|
1333
|
+
const migrationHashes = new Set(allMigrations.map((m) => m.hash));
|
|
1334
|
+
const tableExists = await this.migrationsTableExists(pool, schemaName);
|
|
1335
|
+
if (!tableExists) {
|
|
1336
|
+
return {
|
|
1337
|
+
tenantId,
|
|
1338
|
+
schemaName,
|
|
1339
|
+
missing: allMigrations.map((m) => m.name),
|
|
1340
|
+
orphans: [],
|
|
1341
|
+
inSync: allMigrations.length === 0,
|
|
1342
|
+
format: null
|
|
1343
|
+
};
|
|
1344
|
+
}
|
|
1345
|
+
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
1346
|
+
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
1347
|
+
const appliedIdentifiers = new Set(applied.map((m) => m.identifier));
|
|
1348
|
+
const missing = allMigrations.filter((m) => !this.isMigrationApplied(m, appliedIdentifiers, format)).map((m) => m.name);
|
|
1349
|
+
const orphans = applied.filter((m) => {
|
|
1350
|
+
if (format.columns.identifier === "name") {
|
|
1351
|
+
return !migrationNames.has(m.identifier);
|
|
1352
|
+
}
|
|
1353
|
+
return !migrationHashes.has(m.identifier) && !migrationNames.has(m.identifier);
|
|
1354
|
+
}).map((m) => m.identifier);
|
|
1355
|
+
return {
|
|
1356
|
+
tenantId,
|
|
1357
|
+
schemaName,
|
|
1358
|
+
missing,
|
|
1359
|
+
orphans,
|
|
1360
|
+
inSync: missing.length === 0 && orphans.length === 0,
|
|
1361
|
+
format: format.format
|
|
1362
|
+
};
|
|
1363
|
+
} catch (error) {
|
|
1364
|
+
return {
|
|
1365
|
+
tenantId,
|
|
1366
|
+
schemaName,
|
|
1367
|
+
missing: [],
|
|
1368
|
+
orphans: [],
|
|
1369
|
+
inSync: false,
|
|
1370
|
+
format: null,
|
|
1371
|
+
error: error.message
|
|
1372
|
+
};
|
|
1373
|
+
} finally {
|
|
1374
|
+
await pool.end();
|
|
1375
|
+
}
|
|
1376
|
+
}
|
|
1377
|
+
/**
|
|
1378
|
+
* Mark missing migrations as applied for a tenant
|
|
1379
|
+
*/
|
|
1380
|
+
async markMissing(tenantId) {
|
|
1381
|
+
const startTime = Date.now();
|
|
1382
|
+
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1383
|
+
const markedMigrations = [];
|
|
1384
|
+
const pool = await this.createPool(schemaName);
|
|
1385
|
+
try {
|
|
1386
|
+
const syncStatus = await this.getTenantSyncStatus(tenantId);
|
|
1387
|
+
if (syncStatus.error) {
|
|
1388
|
+
return {
|
|
1389
|
+
tenantId,
|
|
1390
|
+
schemaName,
|
|
1391
|
+
success: false,
|
|
1392
|
+
markedMigrations: [],
|
|
1393
|
+
removedOrphans: [],
|
|
1394
|
+
error: syncStatus.error,
|
|
1395
|
+
durationMs: Date.now() - startTime
|
|
1396
|
+
};
|
|
1397
|
+
}
|
|
1398
|
+
if (syncStatus.missing.length === 0) {
|
|
1399
|
+
return {
|
|
1400
|
+
tenantId,
|
|
1401
|
+
schemaName,
|
|
1402
|
+
success: true,
|
|
1403
|
+
markedMigrations: [],
|
|
1404
|
+
removedOrphans: [],
|
|
1405
|
+
durationMs: Date.now() - startTime
|
|
1406
|
+
};
|
|
1407
|
+
}
|
|
1408
|
+
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
1409
|
+
await this.ensureMigrationsTable(pool, schemaName, format);
|
|
1410
|
+
const allMigrations = await this.loadMigrations();
|
|
1411
|
+
const missingSet = new Set(syncStatus.missing);
|
|
1412
|
+
for (const migration of allMigrations) {
|
|
1413
|
+
if (missingSet.has(migration.name)) {
|
|
1414
|
+
await this.recordMigration(pool, schemaName, migration, format);
|
|
1415
|
+
markedMigrations.push(migration.name);
|
|
1416
|
+
}
|
|
1417
|
+
}
|
|
1418
|
+
return {
|
|
1419
|
+
tenantId,
|
|
1420
|
+
schemaName,
|
|
1421
|
+
success: true,
|
|
1422
|
+
markedMigrations,
|
|
1423
|
+
removedOrphans: [],
|
|
1424
|
+
durationMs: Date.now() - startTime
|
|
1425
|
+
};
|
|
1426
|
+
} catch (error) {
|
|
1427
|
+
return {
|
|
1428
|
+
tenantId,
|
|
1429
|
+
schemaName,
|
|
1430
|
+
success: false,
|
|
1431
|
+
markedMigrations,
|
|
1432
|
+
removedOrphans: [],
|
|
1433
|
+
error: error.message,
|
|
1434
|
+
durationMs: Date.now() - startTime
|
|
1435
|
+
};
|
|
1436
|
+
} finally {
|
|
1437
|
+
await pool.end();
|
|
1438
|
+
}
|
|
1439
|
+
}
|
|
1440
|
+
/**
|
|
1441
|
+
* Mark missing migrations as applied for all tenants
|
|
1442
|
+
*/
|
|
1443
|
+
async markAllMissing(options = {}) {
|
|
1444
|
+
const { concurrency = 10, onProgress, onError } = options;
|
|
1445
|
+
const tenantIds = await this.migratorConfig.tenantDiscovery();
|
|
1446
|
+
const results = [];
|
|
1447
|
+
let aborted = false;
|
|
1448
|
+
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
1449
|
+
const batch = tenantIds.slice(i, i + concurrency);
|
|
1450
|
+
const batchResults = await Promise.all(
|
|
1451
|
+
batch.map(async (tenantId) => {
|
|
1452
|
+
if (aborted) {
|
|
1453
|
+
return this.createSkippedSyncResult(tenantId);
|
|
1454
|
+
}
|
|
1455
|
+
try {
|
|
1456
|
+
onProgress?.(tenantId, "starting");
|
|
1457
|
+
const result = await this.markMissing(tenantId);
|
|
1458
|
+
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
1459
|
+
return result;
|
|
1460
|
+
} catch (error) {
|
|
1461
|
+
onProgress?.(tenantId, "failed");
|
|
1462
|
+
const action = onError?.(tenantId, error);
|
|
1463
|
+
if (action === "abort") {
|
|
1464
|
+
aborted = true;
|
|
1465
|
+
}
|
|
1466
|
+
return this.createErrorSyncResult(tenantId, error);
|
|
1467
|
+
}
|
|
1468
|
+
})
|
|
1469
|
+
);
|
|
1470
|
+
results.push(...batchResults);
|
|
1471
|
+
}
|
|
1472
|
+
return this.aggregateSyncResults(results);
|
|
1473
|
+
}
|
|
1474
|
+
/**
|
|
1475
|
+
* Remove orphan migration records for a tenant
|
|
1476
|
+
*/
|
|
1477
|
+
async cleanOrphans(tenantId) {
|
|
1478
|
+
const startTime = Date.now();
|
|
1479
|
+
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1480
|
+
const removedOrphans = [];
|
|
1481
|
+
const pool = await this.createPool(schemaName);
|
|
1482
|
+
try {
|
|
1483
|
+
const syncStatus = await this.getTenantSyncStatus(tenantId);
|
|
1484
|
+
if (syncStatus.error) {
|
|
1485
|
+
return {
|
|
1486
|
+
tenantId,
|
|
1487
|
+
schemaName,
|
|
1488
|
+
success: false,
|
|
1489
|
+
markedMigrations: [],
|
|
1490
|
+
removedOrphans: [],
|
|
1491
|
+
error: syncStatus.error,
|
|
1492
|
+
durationMs: Date.now() - startTime
|
|
1493
|
+
};
|
|
1494
|
+
}
|
|
1495
|
+
if (syncStatus.orphans.length === 0) {
|
|
1496
|
+
return {
|
|
1497
|
+
tenantId,
|
|
1498
|
+
schemaName,
|
|
1499
|
+
success: true,
|
|
1500
|
+
markedMigrations: [],
|
|
1501
|
+
removedOrphans: [],
|
|
1502
|
+
durationMs: Date.now() - startTime
|
|
1503
|
+
};
|
|
1504
|
+
}
|
|
1505
|
+
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
1506
|
+
const identifierColumn = format.columns.identifier;
|
|
1507
|
+
for (const orphan of syncStatus.orphans) {
|
|
1508
|
+
await pool.query(
|
|
1509
|
+
`DELETE FROM "${schemaName}"."${format.tableName}" WHERE "${identifierColumn}" = $1`,
|
|
1510
|
+
[orphan]
|
|
1511
|
+
);
|
|
1512
|
+
removedOrphans.push(orphan);
|
|
1513
|
+
}
|
|
1514
|
+
return {
|
|
1515
|
+
tenantId,
|
|
1516
|
+
schemaName,
|
|
1517
|
+
success: true,
|
|
1518
|
+
markedMigrations: [],
|
|
1519
|
+
removedOrphans,
|
|
1520
|
+
durationMs: Date.now() - startTime
|
|
1521
|
+
};
|
|
1522
|
+
} catch (error) {
|
|
1523
|
+
return {
|
|
1524
|
+
tenantId,
|
|
1525
|
+
schemaName,
|
|
1526
|
+
success: false,
|
|
1527
|
+
markedMigrations: [],
|
|
1528
|
+
removedOrphans,
|
|
1529
|
+
error: error.message,
|
|
1530
|
+
durationMs: Date.now() - startTime
|
|
1531
|
+
};
|
|
1532
|
+
} finally {
|
|
1533
|
+
await pool.end();
|
|
1534
|
+
}
|
|
1535
|
+
}
|
|
1536
|
+
/**
|
|
1537
|
+
* Remove orphan migration records for all tenants
|
|
1538
|
+
*/
|
|
1539
|
+
async cleanAllOrphans(options = {}) {
|
|
1540
|
+
const { concurrency = 10, onProgress, onError } = options;
|
|
1541
|
+
const tenantIds = await this.migratorConfig.tenantDiscovery();
|
|
1542
|
+
const results = [];
|
|
1543
|
+
let aborted = false;
|
|
1544
|
+
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
1545
|
+
const batch = tenantIds.slice(i, i + concurrency);
|
|
1546
|
+
const batchResults = await Promise.all(
|
|
1547
|
+
batch.map(async (tenantId) => {
|
|
1548
|
+
if (aborted) {
|
|
1549
|
+
return this.createSkippedSyncResult(tenantId);
|
|
1550
|
+
}
|
|
1551
|
+
try {
|
|
1552
|
+
onProgress?.(tenantId, "starting");
|
|
1553
|
+
const result = await this.cleanOrphans(tenantId);
|
|
1554
|
+
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
1555
|
+
return result;
|
|
1556
|
+
} catch (error) {
|
|
1557
|
+
onProgress?.(tenantId, "failed");
|
|
1558
|
+
const action = onError?.(tenantId, error);
|
|
1559
|
+
if (action === "abort") {
|
|
1560
|
+
aborted = true;
|
|
1561
|
+
}
|
|
1562
|
+
return this.createErrorSyncResult(tenantId, error);
|
|
1563
|
+
}
|
|
1564
|
+
})
|
|
1565
|
+
);
|
|
1566
|
+
results.push(...batchResults);
|
|
1567
|
+
}
|
|
1568
|
+
return this.aggregateSyncResults(results);
|
|
1569
|
+
}
|
|
1570
|
+
/**
|
|
1571
|
+
* Load migration files from the migrations folder
|
|
1572
|
+
*/
|
|
1573
|
+
async loadMigrations() {
|
|
1574
|
+
const files = await readdir(this.migratorConfig.migrationsFolder);
|
|
1575
|
+
const migrations = [];
|
|
1576
|
+
for (const file of files) {
|
|
1577
|
+
if (!file.endsWith(".sql")) continue;
|
|
1578
|
+
const filePath = join(this.migratorConfig.migrationsFolder, file);
|
|
1579
|
+
const content = await readFile(filePath, "utf-8");
|
|
1580
|
+
const match = file.match(/^(\d+)_/);
|
|
1581
|
+
const timestamp = match?.[1] ? parseInt(match[1], 10) : 0;
|
|
1582
|
+
const hash = createHash("sha256").update(content).digest("hex");
|
|
1583
|
+
migrations.push({
|
|
1584
|
+
name: basename(file, ".sql"),
|
|
1585
|
+
path: filePath,
|
|
1586
|
+
sql: content,
|
|
1587
|
+
timestamp,
|
|
1588
|
+
hash
|
|
1589
|
+
});
|
|
1590
|
+
}
|
|
1591
|
+
return migrations.sort((a, b) => a.timestamp - b.timestamp);
|
|
1592
|
+
}
|
|
1593
|
+
/**
|
|
1594
|
+
* Create a pool for a specific schema
|
|
1595
|
+
*/
|
|
1596
|
+
async createPool(schemaName) {
|
|
1597
|
+
return new Pool({
|
|
1598
|
+
connectionString: this.tenantConfig.connection.url,
|
|
1599
|
+
...this.tenantConfig.connection.poolConfig,
|
|
1600
|
+
options: `-c search_path="${schemaName}",public`
|
|
1601
|
+
});
|
|
1602
|
+
}
|
|
1603
|
+
/**
|
|
1604
|
+
* Ensure migrations table exists with the correct format
|
|
1605
|
+
*/
|
|
1606
|
+
async ensureMigrationsTable(pool, schemaName, format) {
|
|
1607
|
+
const { identifier, timestamp, timestampType } = format.columns;
|
|
1608
|
+
const identifierCol = identifier === "name" ? "name VARCHAR(255) NOT NULL UNIQUE" : "hash TEXT NOT NULL";
|
|
1609
|
+
const timestampCol = timestampType === "bigint" ? `${timestamp} BIGINT NOT NULL` : `${timestamp} TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP`;
|
|
1610
|
+
await pool.query(`
|
|
1611
|
+
CREATE TABLE IF NOT EXISTS "${schemaName}"."${format.tableName}" (
|
|
1612
|
+
id SERIAL PRIMARY KEY,
|
|
1613
|
+
${identifierCol},
|
|
1614
|
+
${timestampCol}
|
|
1615
|
+
)
|
|
1616
|
+
`);
|
|
1617
|
+
}
|
|
1618
|
+
/**
|
|
1619
|
+
* Check if migrations table exists
|
|
1620
|
+
*/
|
|
1621
|
+
async migrationsTableExists(pool, schemaName) {
|
|
750
1622
|
const result = await pool.query(
|
|
751
1623
|
`SELECT 1 FROM information_schema.tables
|
|
752
1624
|
WHERE table_schema = $1 AND table_name = $2`,
|
|
@@ -824,6 +1696,19 @@ var Migrator = class {
|
|
|
824
1696
|
client.release();
|
|
825
1697
|
}
|
|
826
1698
|
}
|
|
1699
|
+
/**
|
|
1700
|
+
* Record a migration as applied without executing SQL
|
|
1701
|
+
* Used by markAsApplied to sync tracking state
|
|
1702
|
+
*/
|
|
1703
|
+
async recordMigration(pool, schemaName, migration, format) {
|
|
1704
|
+
const { identifier, timestamp, timestampType } = format.columns;
|
|
1705
|
+
const identifierValue = identifier === "name" ? migration.name : migration.hash;
|
|
1706
|
+
const timestampValue = timestampType === "bigint" ? Date.now() : /* @__PURE__ */ new Date();
|
|
1707
|
+
await pool.query(
|
|
1708
|
+
`INSERT INTO "${schemaName}"."${format.tableName}" ("${identifier}", "${timestamp}") VALUES ($1, $2)`,
|
|
1709
|
+
[identifierValue, timestampValue]
|
|
1710
|
+
);
|
|
1711
|
+
}
|
|
827
1712
|
/**
|
|
828
1713
|
* Create a skipped result
|
|
829
1714
|
*/
|
|
@@ -862,6 +1747,45 @@ var Migrator = class {
|
|
|
862
1747
|
details: results
|
|
863
1748
|
};
|
|
864
1749
|
}
|
|
1750
|
+
/**
|
|
1751
|
+
* Create a skipped sync result
|
|
1752
|
+
*/
|
|
1753
|
+
createSkippedSyncResult(tenantId) {
|
|
1754
|
+
return {
|
|
1755
|
+
tenantId,
|
|
1756
|
+
schemaName: this.tenantConfig.isolation.schemaNameTemplate(tenantId),
|
|
1757
|
+
success: false,
|
|
1758
|
+
markedMigrations: [],
|
|
1759
|
+
removedOrphans: [],
|
|
1760
|
+
error: "Skipped due to abort",
|
|
1761
|
+
durationMs: 0
|
|
1762
|
+
};
|
|
1763
|
+
}
|
|
1764
|
+
/**
|
|
1765
|
+
* Create an error sync result
|
|
1766
|
+
*/
|
|
1767
|
+
createErrorSyncResult(tenantId, error) {
|
|
1768
|
+
return {
|
|
1769
|
+
tenantId,
|
|
1770
|
+
schemaName: this.tenantConfig.isolation.schemaNameTemplate(tenantId),
|
|
1771
|
+
success: false,
|
|
1772
|
+
markedMigrations: [],
|
|
1773
|
+
removedOrphans: [],
|
|
1774
|
+
error: error.message,
|
|
1775
|
+
durationMs: 0
|
|
1776
|
+
};
|
|
1777
|
+
}
|
|
1778
|
+
/**
|
|
1779
|
+
* Aggregate sync results
|
|
1780
|
+
*/
|
|
1781
|
+
aggregateSyncResults(results) {
|
|
1782
|
+
return {
|
|
1783
|
+
total: results.length,
|
|
1784
|
+
succeeded: results.filter((r) => r.success).length,
|
|
1785
|
+
failed: results.filter((r) => !r.success).length,
|
|
1786
|
+
details: results
|
|
1787
|
+
};
|
|
1788
|
+
}
|
|
865
1789
|
};
|
|
866
1790
|
function createMigrator(tenantConfig, migratorConfig) {
|
|
867
1791
|
return new Migrator(tenantConfig, migratorConfig);
|
|
@@ -1078,7 +2002,214 @@ function buildCrossSchemaSelect(fields, tenantSchema, _sharedSchema) {
|
|
|
1078
2002
|
};
|
|
1079
2003
|
return { columns, getSchema };
|
|
1080
2004
|
}
|
|
2005
|
+
function extractTablesFromSchema(schema) {
|
|
2006
|
+
const tables = /* @__PURE__ */ new Set();
|
|
2007
|
+
for (const value of Object.values(schema)) {
|
|
2008
|
+
if (value && typeof value === "object" && "_" in value) {
|
|
2009
|
+
const branded = value;
|
|
2010
|
+
if (branded._?.brand === "Table") {
|
|
2011
|
+
tables.add(value);
|
|
2012
|
+
}
|
|
2013
|
+
}
|
|
2014
|
+
}
|
|
2015
|
+
return tables;
|
|
2016
|
+
}
|
|
2017
|
+
function isSharedTable(table, sharedTables) {
|
|
2018
|
+
return sharedTables.has(table);
|
|
2019
|
+
}
|
|
2020
|
+
var WithSharedQueryBuilder = class {
|
|
2021
|
+
constructor(tenantDb, sharedTables, tenantSchemaName, sharedSchemaName = "public") {
|
|
2022
|
+
this.tenantDb = tenantDb;
|
|
2023
|
+
this.sharedTables = sharedTables;
|
|
2024
|
+
this.tenantSchemaName = tenantSchemaName;
|
|
2025
|
+
this.sharedSchemaName = sharedSchemaName;
|
|
2026
|
+
}
|
|
2027
|
+
fromTable = null;
|
|
2028
|
+
joins = [];
|
|
2029
|
+
selectFields = {};
|
|
2030
|
+
whereCondition = null;
|
|
2031
|
+
orderByFields = [];
|
|
2032
|
+
limitValue = null;
|
|
2033
|
+
offsetValue = null;
|
|
2034
|
+
/**
|
|
2035
|
+
* Set the main table to query from
|
|
2036
|
+
* Automatically detects if it's a tenant or shared table
|
|
2037
|
+
*/
|
|
2038
|
+
from(table) {
|
|
2039
|
+
const isShared = isSharedTable(table, this.sharedTables);
|
|
2040
|
+
this.fromTable = {
|
|
2041
|
+
table,
|
|
2042
|
+
isShared,
|
|
2043
|
+
schemaName: isShared ? this.sharedSchemaName : this.tenantSchemaName
|
|
2044
|
+
};
|
|
2045
|
+
return this;
|
|
2046
|
+
}
|
|
2047
|
+
/**
|
|
2048
|
+
* Add a left join with automatic schema detection
|
|
2049
|
+
*/
|
|
2050
|
+
leftJoin(table, condition) {
|
|
2051
|
+
return this.addJoin(table, condition, "left");
|
|
2052
|
+
}
|
|
2053
|
+
/**
|
|
2054
|
+
* Add an inner join with automatic schema detection
|
|
2055
|
+
*/
|
|
2056
|
+
innerJoin(table, condition) {
|
|
2057
|
+
return this.addJoin(table, condition, "inner");
|
|
2058
|
+
}
|
|
2059
|
+
/**
|
|
2060
|
+
* Add a right join with automatic schema detection
|
|
2061
|
+
*/
|
|
2062
|
+
rightJoin(table, condition) {
|
|
2063
|
+
return this.addJoin(table, condition, "right");
|
|
2064
|
+
}
|
|
2065
|
+
/**
|
|
2066
|
+
* Add a full outer join with automatic schema detection
|
|
2067
|
+
*/
|
|
2068
|
+
fullJoin(table, condition) {
|
|
2069
|
+
return this.addJoin(table, condition, "full");
|
|
2070
|
+
}
|
|
2071
|
+
/**
|
|
2072
|
+
* Select specific fields
|
|
2073
|
+
*/
|
|
2074
|
+
select(fields) {
|
|
2075
|
+
this.selectFields = fields;
|
|
2076
|
+
return this;
|
|
2077
|
+
}
|
|
2078
|
+
/**
|
|
2079
|
+
* Add a WHERE condition
|
|
2080
|
+
*/
|
|
2081
|
+
where(condition) {
|
|
2082
|
+
this.whereCondition = condition;
|
|
2083
|
+
return this;
|
|
2084
|
+
}
|
|
2085
|
+
/**
|
|
2086
|
+
* Add ORDER BY
|
|
2087
|
+
*/
|
|
2088
|
+
orderBy(...fields) {
|
|
2089
|
+
this.orderByFields = fields;
|
|
2090
|
+
return this;
|
|
2091
|
+
}
|
|
2092
|
+
/**
|
|
2093
|
+
* Set LIMIT
|
|
2094
|
+
*/
|
|
2095
|
+
limit(value) {
|
|
2096
|
+
this.limitValue = value;
|
|
2097
|
+
return this;
|
|
2098
|
+
}
|
|
2099
|
+
/**
|
|
2100
|
+
* Set OFFSET
|
|
2101
|
+
*/
|
|
2102
|
+
offset(value) {
|
|
2103
|
+
this.offsetValue = value;
|
|
2104
|
+
return this;
|
|
2105
|
+
}
|
|
2106
|
+
/**
|
|
2107
|
+
* Execute the query and return typed results
|
|
2108
|
+
*/
|
|
2109
|
+
async execute() {
|
|
2110
|
+
if (!this.fromTable) {
|
|
2111
|
+
throw new Error("[drizzle-multitenant] No table specified. Use .from() first.");
|
|
2112
|
+
}
|
|
2113
|
+
const sqlQuery = this.buildSql();
|
|
2114
|
+
const result = await this.tenantDb.execute(sqlQuery);
|
|
2115
|
+
return result.rows;
|
|
2116
|
+
}
|
|
2117
|
+
/**
|
|
2118
|
+
* Add a join to the query
|
|
2119
|
+
*/
|
|
2120
|
+
addJoin(table, condition, type) {
|
|
2121
|
+
const isShared = isSharedTable(table, this.sharedTables);
|
|
2122
|
+
this.joins.push({
|
|
2123
|
+
table,
|
|
2124
|
+
isShared,
|
|
2125
|
+
schemaName: isShared ? this.sharedSchemaName : this.tenantSchemaName,
|
|
2126
|
+
condition,
|
|
2127
|
+
type
|
|
2128
|
+
});
|
|
2129
|
+
return this;
|
|
2130
|
+
}
|
|
2131
|
+
/**
|
|
2132
|
+
* Build the SQL query
|
|
2133
|
+
*/
|
|
2134
|
+
buildSql() {
|
|
2135
|
+
if (!this.fromTable) {
|
|
2136
|
+
throw new Error("[drizzle-multitenant] No table specified");
|
|
2137
|
+
}
|
|
2138
|
+
const parts = [];
|
|
2139
|
+
const selectParts = Object.entries(this.selectFields).map(([alias, column]) => {
|
|
2140
|
+
const columnName = column.name;
|
|
2141
|
+
const tableName = this.getTableAliasForColumn(column);
|
|
2142
|
+
if (tableName) {
|
|
2143
|
+
return sql`${sql.raw(`"${tableName}"."${columnName}"`)} as ${sql.raw(`"${alias}"`)}`;
|
|
2144
|
+
}
|
|
2145
|
+
return sql`${sql.raw(`"${columnName}"`)} as ${sql.raw(`"${alias}"`)}`;
|
|
2146
|
+
});
|
|
2147
|
+
if (selectParts.length === 0) {
|
|
2148
|
+
parts.push(sql`SELECT *`);
|
|
2149
|
+
} else {
|
|
2150
|
+
parts.push(sql`SELECT ${sql.join(selectParts, sql`, `)}`);
|
|
2151
|
+
}
|
|
2152
|
+
const fromTableName = getTableName(this.fromTable.table);
|
|
2153
|
+
const fromTableRef = `"${this.fromTable.schemaName}"."${fromTableName}"`;
|
|
2154
|
+
parts.push(sql` FROM ${sql.raw(fromTableRef)} "${sql.raw(fromTableName)}"`);
|
|
2155
|
+
for (const join2 of this.joins) {
|
|
2156
|
+
const joinTableName = getTableName(join2.table);
|
|
2157
|
+
const joinTableRef = `"${join2.schemaName}"."${joinTableName}"`;
|
|
2158
|
+
const joinKeyword = this.getJoinKeyword(join2.type);
|
|
2159
|
+
parts.push(
|
|
2160
|
+
sql` ${sql.raw(joinKeyword)} ${sql.raw(joinTableRef)} "${sql.raw(joinTableName)}" ON ${join2.condition}`
|
|
2161
|
+
);
|
|
2162
|
+
}
|
|
2163
|
+
if (this.whereCondition) {
|
|
2164
|
+
parts.push(sql` WHERE ${this.whereCondition}`);
|
|
2165
|
+
}
|
|
2166
|
+
if (this.orderByFields.length > 0) {
|
|
2167
|
+
parts.push(sql` ORDER BY ${sql.join(this.orderByFields, sql`, `)}`);
|
|
2168
|
+
}
|
|
2169
|
+
if (this.limitValue !== null) {
|
|
2170
|
+
parts.push(sql` LIMIT ${sql.raw(this.limitValue.toString())}`);
|
|
2171
|
+
}
|
|
2172
|
+
if (this.offsetValue !== null) {
|
|
2173
|
+
parts.push(sql` OFFSET ${sql.raw(this.offsetValue.toString())}`);
|
|
2174
|
+
}
|
|
2175
|
+
return sql.join(parts, sql``);
|
|
2176
|
+
}
|
|
2177
|
+
/**
|
|
2178
|
+
* Get table alias for a column (used in SELECT)
|
|
2179
|
+
*/
|
|
2180
|
+
getTableAliasForColumn(column) {
|
|
2181
|
+
const columnTable = column.table;
|
|
2182
|
+
if (columnTable) {
|
|
2183
|
+
return getTableName(columnTable);
|
|
2184
|
+
}
|
|
2185
|
+
return null;
|
|
2186
|
+
}
|
|
2187
|
+
/**
|
|
2188
|
+
* Get SQL keyword for join type
|
|
2189
|
+
*/
|
|
2190
|
+
getJoinKeyword(type) {
|
|
2191
|
+
switch (type) {
|
|
2192
|
+
case "inner":
|
|
2193
|
+
return "INNER JOIN";
|
|
2194
|
+
case "left":
|
|
2195
|
+
return "LEFT JOIN";
|
|
2196
|
+
case "right":
|
|
2197
|
+
return "RIGHT JOIN";
|
|
2198
|
+
case "full":
|
|
2199
|
+
return "FULL OUTER JOIN";
|
|
2200
|
+
}
|
|
2201
|
+
}
|
|
2202
|
+
};
|
|
2203
|
+
function withShared(tenantDb, _sharedDb, schemas, options) {
|
|
2204
|
+
const sharedTables = extractTablesFromSchema(schemas.shared);
|
|
2205
|
+
return new WithSharedQueryBuilder(
|
|
2206
|
+
tenantDb,
|
|
2207
|
+
sharedTables,
|
|
2208
|
+
options?.tenantSchema ?? "tenant",
|
|
2209
|
+
options?.sharedSchema ?? "public"
|
|
2210
|
+
);
|
|
2211
|
+
}
|
|
1081
2212
|
|
|
1082
|
-
export { CrossSchemaQueryBuilder, DEFAULT_CONFIG, Migrator, buildCrossSchemaSelect, createCrossSchemaQuery, createMigrator, createTenantContext, createTenantManager, crossSchemaRaw, defineConfig, withSharedLookup };
|
|
2213
|
+
export { CrossSchemaQueryBuilder, DEFAULT_CONFIG, Migrator, WithSharedQueryBuilder, buildCrossSchemaSelect, calculateDelay, createCrossSchemaQuery, createMigrator, createRetrier, createTenantContext, createTenantManager, crossSchemaRaw, defineConfig, isRetryableError, withRetry, withShared, withSharedLookup };
|
|
1083
2214
|
//# sourceMappingURL=index.js.map
|
|
1084
2215
|
//# sourceMappingURL=index.js.map
|