drizzle-multitenant 1.1.0 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +28 -8
- package/dist/cli/index.js +2001 -2442
- package/dist/{context-Vki959ri.d.ts → context-BBLPNjmk.d.ts} +1 -1
- package/dist/cross-schema/index.js +1 -426
- package/dist/export/index.d.ts +395 -0
- package/dist/export/index.js +9 -0
- package/dist/index.d.ts +5 -4
- package/dist/index.js +149 -2437
- package/dist/integrations/express.d.ts +3 -3
- package/dist/integrations/express.js +1 -110
- package/dist/integrations/fastify.d.ts +3 -3
- package/dist/integrations/fastify.js +1 -236
- package/dist/integrations/hono.js +0 -3
- package/dist/integrations/nestjs/index.d.ts +1 -1
- package/dist/integrations/nestjs/index.js +3 -10759
- package/dist/lint/index.d.ts +475 -0
- package/dist/lint/index.js +5 -0
- package/dist/metrics/index.d.ts +530 -0
- package/dist/metrics/index.js +3 -0
- package/dist/migrator/index.d.ts +1087 -270
- package/dist/migrator/index.js +149 -970
- package/dist/migrator-B7oPKe73.d.ts +1067 -0
- package/dist/scaffold/index.d.ts +330 -0
- package/dist/scaffold/index.js +277 -0
- package/dist/{types-BhK96FPC.d.ts → types-CGqsPe2Q.d.ts} +49 -1
- package/package.json +18 -1
- package/dist/cli/index.js.map +0 -1
- package/dist/cross-schema/index.js.map +0 -1
- package/dist/index.js.map +0 -1
- package/dist/integrations/express.js.map +0 -1
- package/dist/integrations/fastify.js.map +0 -1
- package/dist/integrations/hono.js.map +0 -1
- package/dist/integrations/nestjs/index.js.map +0 -1
- package/dist/migrator/index.js.map +0 -1
package/dist/index.js
CHANGED
|
@@ -1,2442 +1,154 @@
|
|
|
1
|
-
import { Pool } from 'pg';
|
|
2
|
-
import { drizzle } from 'drizzle-orm/node-postgres';
|
|
3
|
-
import { LRUCache } from 'lru-cache';
|
|
4
|
-
import { AsyncLocalStorage } from 'async_hooks';
|
|
5
|
-
import { readdir, readFile } from 'fs/promises';
|
|
6
|
-
import { join, basename } from 'path';
|
|
7
|
-
import { createHash } from 'crypto';
|
|
8
|
-
import { sql, getTableName } from 'drizzle-orm';
|
|
9
|
-
|
|
10
|
-
// src/config.ts
|
|
11
|
-
function defineConfig(config) {
|
|
12
|
-
validateConfig(config);
|
|
13
|
-
return config;
|
|
14
|
-
}
|
|
15
|
-
function validateConfig(config) {
|
|
16
|
-
if (!config.connection.url) {
|
|
17
|
-
throw new Error("[drizzle-multitenant] connection.url is required");
|
|
18
|
-
}
|
|
19
|
-
if (!config.isolation.strategy) {
|
|
20
|
-
throw new Error("[drizzle-multitenant] isolation.strategy is required");
|
|
21
|
-
}
|
|
22
|
-
if (config.isolation.strategy !== "schema") {
|
|
23
|
-
throw new Error(
|
|
24
|
-
`[drizzle-multitenant] isolation.strategy "${config.isolation.strategy}" is not yet supported. Only "schema" is currently available.`
|
|
25
|
-
);
|
|
26
|
-
}
|
|
27
|
-
if (!config.isolation.schemaNameTemplate) {
|
|
28
|
-
throw new Error("[drizzle-multitenant] isolation.schemaNameTemplate is required");
|
|
29
|
-
}
|
|
30
|
-
if (typeof config.isolation.schemaNameTemplate !== "function") {
|
|
31
|
-
throw new Error("[drizzle-multitenant] isolation.schemaNameTemplate must be a function");
|
|
32
|
-
}
|
|
33
|
-
if (!config.schemas.tenant) {
|
|
34
|
-
throw new Error("[drizzle-multitenant] schemas.tenant is required");
|
|
35
|
-
}
|
|
36
|
-
if (config.isolation.maxPools !== void 0 && config.isolation.maxPools < 1) {
|
|
37
|
-
throw new Error("[drizzle-multitenant] isolation.maxPools must be at least 1");
|
|
38
|
-
}
|
|
39
|
-
if (config.isolation.poolTtlMs !== void 0 && config.isolation.poolTtlMs < 0) {
|
|
40
|
-
throw new Error("[drizzle-multitenant] isolation.poolTtlMs must be non-negative");
|
|
41
|
-
}
|
|
42
|
-
if (config.connection.retry) {
|
|
43
|
-
const retry = config.connection.retry;
|
|
44
|
-
if (retry.maxAttempts !== void 0 && retry.maxAttempts < 1) {
|
|
45
|
-
throw new Error("[drizzle-multitenant] connection.retry.maxAttempts must be at least 1");
|
|
46
|
-
}
|
|
47
|
-
if (retry.initialDelayMs !== void 0 && retry.initialDelayMs < 0) {
|
|
48
|
-
throw new Error("[drizzle-multitenant] connection.retry.initialDelayMs must be non-negative");
|
|
49
|
-
}
|
|
50
|
-
if (retry.maxDelayMs !== void 0 && retry.maxDelayMs < 0) {
|
|
51
|
-
throw new Error("[drizzle-multitenant] connection.retry.maxDelayMs must be non-negative");
|
|
52
|
-
}
|
|
53
|
-
if (retry.backoffMultiplier !== void 0 && retry.backoffMultiplier < 1) {
|
|
54
|
-
throw new Error("[drizzle-multitenant] connection.retry.backoffMultiplier must be at least 1");
|
|
55
|
-
}
|
|
56
|
-
if (retry.initialDelayMs !== void 0 && retry.maxDelayMs !== void 0 && retry.initialDelayMs > retry.maxDelayMs) {
|
|
57
|
-
throw new Error(
|
|
58
|
-
"[drizzle-multitenant] connection.retry.initialDelayMs cannot be greater than maxDelayMs"
|
|
59
|
-
);
|
|
60
|
-
}
|
|
61
|
-
}
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
// src/types.ts
|
|
65
|
-
var DEFAULT_CONFIG = {
|
|
66
|
-
maxPools: 50,
|
|
67
|
-
poolTtlMs: 60 * 60 * 1e3,
|
|
68
|
-
// 1 hour
|
|
69
|
-
cleanupIntervalMs: 6e4,
|
|
70
|
-
// 1 minute
|
|
71
|
-
poolConfig: {
|
|
72
|
-
max: 10,
|
|
73
|
-
idleTimeoutMillis: 3e4,
|
|
74
|
-
connectionTimeoutMillis: 5e3
|
|
75
|
-
},
|
|
76
|
-
retry: {
|
|
77
|
-
maxAttempts: 3,
|
|
78
|
-
initialDelayMs: 100,
|
|
79
|
-
maxDelayMs: 5e3,
|
|
80
|
-
backoffMultiplier: 2,
|
|
81
|
-
jitter: true
|
|
82
|
-
}
|
|
83
|
-
};
|
|
84
|
-
|
|
85
|
-
// src/debug.ts
|
|
86
|
-
var PREFIX = "[drizzle-multitenant]";
|
|
87
|
-
var DEFAULT_SLOW_QUERY_THRESHOLD = 1e3;
|
|
88
|
-
var DebugLogger = class {
|
|
89
|
-
enabled;
|
|
90
|
-
logQueries;
|
|
91
|
-
logPoolEvents;
|
|
92
|
-
slowQueryThreshold;
|
|
93
|
-
logger;
|
|
94
|
-
constructor(config) {
|
|
95
|
-
this.enabled = config?.enabled ?? false;
|
|
96
|
-
this.logQueries = config?.logQueries ?? true;
|
|
97
|
-
this.logPoolEvents = config?.logPoolEvents ?? true;
|
|
98
|
-
this.slowQueryThreshold = config?.slowQueryThreshold ?? DEFAULT_SLOW_QUERY_THRESHOLD;
|
|
99
|
-
this.logger = config?.logger ?? this.defaultLogger;
|
|
100
|
-
}
|
|
101
|
-
/**
|
|
102
|
-
* Check if debug mode is enabled
|
|
103
|
-
*/
|
|
104
|
-
isEnabled() {
|
|
105
|
-
return this.enabled;
|
|
106
|
-
}
|
|
107
|
-
/**
|
|
108
|
-
* Log a query execution
|
|
109
|
-
*/
|
|
110
|
-
logQuery(tenantId, query, durationMs) {
|
|
111
|
-
if (!this.enabled || !this.logQueries) return;
|
|
112
|
-
const isSlowQuery = durationMs >= this.slowQueryThreshold;
|
|
113
|
-
const type = isSlowQuery ? "slow_query" : "query";
|
|
114
|
-
const context = {
|
|
115
|
-
type,
|
|
116
|
-
tenantId,
|
|
117
|
-
query: this.truncateQuery(query),
|
|
118
|
-
durationMs
|
|
119
|
-
};
|
|
120
|
-
if (isSlowQuery) {
|
|
121
|
-
this.logger(
|
|
122
|
-
`${PREFIX} tenant=${tenantId} SLOW_QUERY duration=${durationMs}ms query="${this.truncateQuery(query)}"`,
|
|
123
|
-
context
|
|
124
|
-
);
|
|
125
|
-
} else {
|
|
126
|
-
this.logger(
|
|
127
|
-
`${PREFIX} tenant=${tenantId} query="${this.truncateQuery(query)}" duration=${durationMs}ms`,
|
|
128
|
-
context
|
|
129
|
-
);
|
|
130
|
-
}
|
|
131
|
-
}
|
|
132
|
-
/**
|
|
133
|
-
* Log pool creation
|
|
134
|
-
*/
|
|
135
|
-
logPoolCreated(tenantId, schemaName) {
|
|
136
|
-
if (!this.enabled || !this.logPoolEvents) return;
|
|
137
|
-
const context = {
|
|
138
|
-
type: "pool_created",
|
|
139
|
-
tenantId,
|
|
140
|
-
schemaName
|
|
141
|
-
};
|
|
142
|
-
this.logger(
|
|
143
|
-
`${PREFIX} tenant=${tenantId} POOL_CREATED schema=${schemaName}`,
|
|
144
|
-
context
|
|
145
|
-
);
|
|
146
|
-
}
|
|
147
|
-
/**
|
|
148
|
-
* Log pool eviction
|
|
149
|
-
*/
|
|
150
|
-
logPoolEvicted(tenantId, schemaName, reason) {
|
|
151
|
-
if (!this.enabled || !this.logPoolEvents) return;
|
|
152
|
-
const context = {
|
|
153
|
-
type: "pool_evicted",
|
|
154
|
-
tenantId,
|
|
155
|
-
schemaName,
|
|
156
|
-
metadata: reason ? { reason } : void 0
|
|
157
|
-
};
|
|
158
|
-
const reasonStr = reason ? ` reason=${reason}` : "";
|
|
159
|
-
this.logger(
|
|
160
|
-
`${PREFIX} tenant=${tenantId} POOL_EVICTED schema=${schemaName}${reasonStr}`,
|
|
161
|
-
context
|
|
162
|
-
);
|
|
163
|
-
}
|
|
164
|
-
/**
|
|
165
|
-
* Log pool error
|
|
166
|
-
*/
|
|
167
|
-
logPoolError(tenantId, error) {
|
|
168
|
-
if (!this.enabled || !this.logPoolEvents) return;
|
|
169
|
-
const context = {
|
|
170
|
-
type: "pool_error",
|
|
171
|
-
tenantId,
|
|
172
|
-
error: error.message
|
|
173
|
-
};
|
|
174
|
-
this.logger(
|
|
175
|
-
`${PREFIX} tenant=${tenantId} POOL_ERROR error="${error.message}"`,
|
|
176
|
-
context
|
|
177
|
-
);
|
|
178
|
-
}
|
|
179
|
-
/**
|
|
180
|
-
* Log warmup event
|
|
181
|
-
*/
|
|
182
|
-
logWarmup(tenantId, success, durationMs, alreadyWarm) {
|
|
183
|
-
if (!this.enabled || !this.logPoolEvents) return;
|
|
184
|
-
const context = {
|
|
185
|
-
type: "warmup",
|
|
186
|
-
tenantId,
|
|
187
|
-
durationMs,
|
|
188
|
-
metadata: { success, alreadyWarm }
|
|
189
|
-
};
|
|
190
|
-
const status = alreadyWarm ? "already_warm" : success ? "success" : "failed";
|
|
191
|
-
this.logger(
|
|
192
|
-
`${PREFIX} tenant=${tenantId} WARMUP status=${status} duration=${durationMs}ms`,
|
|
193
|
-
context
|
|
194
|
-
);
|
|
195
|
-
}
|
|
196
|
-
/**
|
|
197
|
-
* Log connection retry event
|
|
198
|
-
*/
|
|
199
|
-
logConnectionRetry(identifier, attempt, maxAttempts, error, delayMs) {
|
|
200
|
-
if (!this.enabled || !this.logPoolEvents) return;
|
|
201
|
-
const context = {
|
|
202
|
-
type: "connection_retry",
|
|
203
|
-
tenantId: identifier,
|
|
204
|
-
error: error.message,
|
|
205
|
-
metadata: { attempt, maxAttempts, delayMs }
|
|
206
|
-
};
|
|
207
|
-
this.logger(
|
|
208
|
-
`${PREFIX} tenant=${identifier} CONNECTION_RETRY attempt=${attempt}/${maxAttempts} delay=${delayMs}ms error="${error.message}"`,
|
|
209
|
-
context
|
|
210
|
-
);
|
|
211
|
-
}
|
|
212
|
-
/**
|
|
213
|
-
* Log connection success after retries
|
|
214
|
-
*/
|
|
215
|
-
logConnectionSuccess(identifier, attempts, totalTimeMs) {
|
|
216
|
-
if (!this.enabled || !this.logPoolEvents) return;
|
|
217
|
-
const context = {
|
|
218
|
-
type: "pool_created",
|
|
219
|
-
tenantId: identifier,
|
|
220
|
-
durationMs: totalTimeMs,
|
|
221
|
-
metadata: { attempts }
|
|
222
|
-
};
|
|
223
|
-
if (attempts > 1) {
|
|
224
|
-
this.logger(
|
|
225
|
-
`${PREFIX} tenant=${identifier} CONNECTION_SUCCESS attempts=${attempts} totalTime=${totalTimeMs}ms`,
|
|
226
|
-
context
|
|
227
|
-
);
|
|
228
|
-
}
|
|
229
|
-
}
|
|
230
|
-
/**
|
|
231
|
-
* Log a custom debug message
|
|
232
|
-
*/
|
|
233
|
-
log(message, context) {
|
|
234
|
-
if (!this.enabled) return;
|
|
235
|
-
this.logger(`${PREFIX} ${message}`, context);
|
|
236
|
-
}
|
|
237
|
-
/**
|
|
238
|
-
* Default logger implementation using console
|
|
239
|
-
*/
|
|
240
|
-
defaultLogger(message, _context) {
|
|
241
|
-
console.log(message);
|
|
242
|
-
}
|
|
243
|
-
/**
|
|
244
|
-
* Truncate long queries for readability
|
|
245
|
-
*/
|
|
246
|
-
truncateQuery(query, maxLength = 100) {
|
|
247
|
-
const normalized = query.replace(/\s+/g, " ").trim();
|
|
248
|
-
if (normalized.length <= maxLength) {
|
|
249
|
-
return normalized;
|
|
250
|
-
}
|
|
251
|
-
return normalized.substring(0, maxLength - 3) + "...";
|
|
252
|
-
}
|
|
253
|
-
};
|
|
254
|
-
function createDebugLogger(config) {
|
|
255
|
-
return new DebugLogger(config);
|
|
256
|
-
}
|
|
257
|
-
|
|
258
|
-
// src/retry.ts
|
|
259
|
-
function isRetryableError(error) {
|
|
260
|
-
const message = error.message.toLowerCase();
|
|
261
|
-
if (message.includes("econnrefused") || message.includes("econnreset") || message.includes("etimedout") || message.includes("enotfound") || message.includes("connection refused") || message.includes("connection reset") || message.includes("connection terminated") || message.includes("connection timed out") || message.includes("timeout expired") || message.includes("socket hang up")) {
|
|
262
|
-
return true;
|
|
263
|
-
}
|
|
264
|
-
if (message.includes("too many connections") || message.includes("sorry, too many clients") || message.includes("the database system is starting up") || message.includes("the database system is shutting down") || message.includes("server closed the connection unexpectedly") || message.includes("could not connect to server")) {
|
|
265
|
-
return true;
|
|
266
|
-
}
|
|
267
|
-
if (message.includes("ssl connection") || message.includes("ssl handshake")) {
|
|
268
|
-
return true;
|
|
269
|
-
}
|
|
270
|
-
return false;
|
|
271
|
-
}
|
|
272
|
-
function calculateDelay(attempt, config) {
|
|
273
|
-
const exponentialDelay = config.initialDelayMs * Math.pow(config.backoffMultiplier, attempt);
|
|
274
|
-
const cappedDelay = Math.min(exponentialDelay, config.maxDelayMs);
|
|
275
|
-
if (config.jitter) {
|
|
276
|
-
const jitterFactor = 1 + Math.random() * 0.25;
|
|
277
|
-
return Math.floor(cappedDelay * jitterFactor);
|
|
278
|
-
}
|
|
279
|
-
return Math.floor(cappedDelay);
|
|
280
|
-
}
|
|
281
|
-
function sleep(ms) {
|
|
282
|
-
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
283
|
-
}
|
|
284
|
-
async function withRetry(operation, config) {
|
|
285
|
-
const retryConfig = {
|
|
286
|
-
maxAttempts: config?.maxAttempts ?? DEFAULT_CONFIG.retry.maxAttempts,
|
|
287
|
-
initialDelayMs: config?.initialDelayMs ?? DEFAULT_CONFIG.retry.initialDelayMs,
|
|
288
|
-
maxDelayMs: config?.maxDelayMs ?? DEFAULT_CONFIG.retry.maxDelayMs,
|
|
289
|
-
backoffMultiplier: config?.backoffMultiplier ?? DEFAULT_CONFIG.retry.backoffMultiplier,
|
|
290
|
-
jitter: config?.jitter ?? DEFAULT_CONFIG.retry.jitter,
|
|
291
|
-
isRetryable: config?.isRetryable ?? isRetryableError,
|
|
292
|
-
onRetry: config?.onRetry
|
|
293
|
-
};
|
|
294
|
-
const startTime = Date.now();
|
|
295
|
-
let lastError = null;
|
|
296
|
-
for (let attempt = 0; attempt < retryConfig.maxAttempts; attempt++) {
|
|
297
|
-
try {
|
|
298
|
-
const result = await operation();
|
|
299
|
-
return {
|
|
300
|
-
result,
|
|
301
|
-
attempts: attempt + 1,
|
|
302
|
-
totalTimeMs: Date.now() - startTime
|
|
303
|
-
};
|
|
304
|
-
} catch (error) {
|
|
305
|
-
lastError = error;
|
|
306
|
-
const isLastAttempt = attempt >= retryConfig.maxAttempts - 1;
|
|
307
|
-
if (isLastAttempt || !retryConfig.isRetryable(lastError)) {
|
|
308
|
-
throw lastError;
|
|
309
|
-
}
|
|
310
|
-
const delay = calculateDelay(attempt, retryConfig);
|
|
311
|
-
retryConfig.onRetry?.(attempt + 1, lastError, delay);
|
|
312
|
-
await sleep(delay);
|
|
313
|
-
}
|
|
314
|
-
}
|
|
315
|
-
throw lastError ?? new Error("Retry failed with no error");
|
|
316
|
-
}
|
|
317
|
-
function createRetrier(config) {
|
|
318
|
-
return (operation) => {
|
|
319
|
-
return withRetry(operation, config);
|
|
320
|
-
};
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
// src/pool.ts
|
|
324
|
-
var PoolManager = class {
|
|
325
|
-
constructor(config) {
|
|
326
|
-
this.config = config;
|
|
327
|
-
const maxPools = config.isolation.maxPools ?? DEFAULT_CONFIG.maxPools;
|
|
328
|
-
this.debugLogger = createDebugLogger(config.debug);
|
|
329
|
-
const userRetry = config.connection.retry ?? {};
|
|
330
|
-
this.retryConfig = {
|
|
331
|
-
maxAttempts: userRetry.maxAttempts ?? DEFAULT_CONFIG.retry.maxAttempts,
|
|
332
|
-
initialDelayMs: userRetry.initialDelayMs ?? DEFAULT_CONFIG.retry.initialDelayMs,
|
|
333
|
-
maxDelayMs: userRetry.maxDelayMs ?? DEFAULT_CONFIG.retry.maxDelayMs,
|
|
334
|
-
backoffMultiplier: userRetry.backoffMultiplier ?? DEFAULT_CONFIG.retry.backoffMultiplier,
|
|
335
|
-
jitter: userRetry.jitter ?? DEFAULT_CONFIG.retry.jitter,
|
|
336
|
-
isRetryable: userRetry.isRetryable ?? isRetryableError,
|
|
337
|
-
onRetry: userRetry.onRetry
|
|
338
|
-
};
|
|
339
|
-
this.pools = new LRUCache({
|
|
340
|
-
max: maxPools,
|
|
341
|
-
dispose: (entry, key) => {
|
|
342
|
-
this.disposePoolEntry(entry, key);
|
|
343
|
-
},
|
|
344
|
-
noDisposeOnSet: true
|
|
345
|
-
});
|
|
346
|
-
}
|
|
347
|
-
pools;
|
|
348
|
-
tenantIdBySchema = /* @__PURE__ */ new Map();
|
|
349
|
-
pendingConnections = /* @__PURE__ */ new Map();
|
|
350
|
-
sharedPool = null;
|
|
351
|
-
sharedDb = null;
|
|
352
|
-
sharedDbPending = null;
|
|
353
|
-
cleanupInterval = null;
|
|
354
|
-
disposed = false;
|
|
355
|
-
debugLogger;
|
|
356
|
-
retryConfig;
|
|
357
|
-
/**
|
|
358
|
-
* Get or create a database connection for a tenant
|
|
359
|
-
*/
|
|
360
|
-
getDb(tenantId) {
|
|
361
|
-
this.ensureNotDisposed();
|
|
362
|
-
const schemaName = this.config.isolation.schemaNameTemplate(tenantId);
|
|
363
|
-
let entry = this.pools.get(schemaName);
|
|
364
|
-
if (!entry) {
|
|
365
|
-
entry = this.createPoolEntry(tenantId, schemaName);
|
|
366
|
-
this.pools.set(schemaName, entry);
|
|
367
|
-
this.tenantIdBySchema.set(schemaName, tenantId);
|
|
368
|
-
this.debugLogger.logPoolCreated(tenantId, schemaName);
|
|
369
|
-
void this.config.hooks?.onPoolCreated?.(tenantId);
|
|
370
|
-
}
|
|
371
|
-
entry.lastAccess = Date.now();
|
|
372
|
-
return entry.db;
|
|
373
|
-
}
|
|
374
|
-
/**
|
|
375
|
-
* Get or create a database connection for a tenant with retry and validation
|
|
376
|
-
*
|
|
377
|
-
* This async version validates the connection by executing a ping query
|
|
378
|
-
* and retries on transient failures with exponential backoff.
|
|
379
|
-
*
|
|
380
|
-
* @example
|
|
381
|
-
* ```typescript
|
|
382
|
-
* // Get tenant database with automatic retry
|
|
383
|
-
* const db = await manager.getDbAsync('tenant-123');
|
|
384
|
-
*
|
|
385
|
-
* // Queries will use the validated connection
|
|
386
|
-
* const users = await db.select().from(users);
|
|
387
|
-
* ```
|
|
388
|
-
*/
|
|
389
|
-
async getDbAsync(tenantId) {
|
|
390
|
-
this.ensureNotDisposed();
|
|
391
|
-
const schemaName = this.config.isolation.schemaNameTemplate(tenantId);
|
|
392
|
-
let entry = this.pools.get(schemaName);
|
|
393
|
-
if (entry) {
|
|
394
|
-
entry.lastAccess = Date.now();
|
|
395
|
-
return entry.db;
|
|
396
|
-
}
|
|
397
|
-
const pending = this.pendingConnections.get(schemaName);
|
|
398
|
-
if (pending) {
|
|
399
|
-
entry = await pending;
|
|
400
|
-
entry.lastAccess = Date.now();
|
|
401
|
-
return entry.db;
|
|
402
|
-
}
|
|
403
|
-
const connectionPromise = this.connectWithRetry(tenantId, schemaName);
|
|
404
|
-
this.pendingConnections.set(schemaName, connectionPromise);
|
|
405
|
-
try {
|
|
406
|
-
entry = await connectionPromise;
|
|
407
|
-
this.pools.set(schemaName, entry);
|
|
408
|
-
this.tenantIdBySchema.set(schemaName, tenantId);
|
|
409
|
-
this.debugLogger.logPoolCreated(tenantId, schemaName);
|
|
410
|
-
void this.config.hooks?.onPoolCreated?.(tenantId);
|
|
411
|
-
entry.lastAccess = Date.now();
|
|
412
|
-
return entry.db;
|
|
413
|
-
} finally {
|
|
414
|
-
this.pendingConnections.delete(schemaName);
|
|
415
|
-
}
|
|
416
|
-
}
|
|
417
|
-
/**
|
|
418
|
-
* Connect to a tenant database with retry logic
|
|
419
|
-
*/
|
|
420
|
-
async connectWithRetry(tenantId, schemaName) {
|
|
421
|
-
const maxAttempts = this.retryConfig.maxAttempts;
|
|
422
|
-
const result = await withRetry(
|
|
423
|
-
async () => {
|
|
424
|
-
const entry = this.createPoolEntry(tenantId, schemaName);
|
|
425
|
-
try {
|
|
426
|
-
await entry.pool.query("SELECT 1");
|
|
427
|
-
return entry;
|
|
428
|
-
} catch (error) {
|
|
429
|
-
try {
|
|
430
|
-
await entry.pool.end();
|
|
431
|
-
} catch {
|
|
432
|
-
}
|
|
433
|
-
throw error;
|
|
434
|
-
}
|
|
435
|
-
},
|
|
436
|
-
{
|
|
437
|
-
...this.retryConfig,
|
|
438
|
-
onRetry: (attempt, error, delayMs) => {
|
|
439
|
-
this.debugLogger.logConnectionRetry(tenantId, attempt, maxAttempts, error, delayMs);
|
|
440
|
-
this.retryConfig.onRetry?.(attempt, error, delayMs);
|
|
441
|
-
}
|
|
442
|
-
}
|
|
443
|
-
);
|
|
444
|
-
this.debugLogger.logConnectionSuccess(tenantId, result.attempts, result.totalTimeMs);
|
|
445
|
-
return result.result;
|
|
446
|
-
}
|
|
447
|
-
/**
|
|
448
|
-
* Get or create the shared database connection
|
|
449
|
-
*/
|
|
450
|
-
getSharedDb() {
|
|
451
|
-
this.ensureNotDisposed();
|
|
452
|
-
if (!this.sharedDb) {
|
|
453
|
-
this.sharedPool = new Pool({
|
|
454
|
-
connectionString: this.config.connection.url,
|
|
455
|
-
...DEFAULT_CONFIG.poolConfig,
|
|
456
|
-
...this.config.connection.poolConfig
|
|
457
|
-
});
|
|
458
|
-
this.sharedPool.on("error", (err) => {
|
|
459
|
-
void this.config.hooks?.onError?.("shared", err);
|
|
460
|
-
});
|
|
461
|
-
this.sharedDb = drizzle(this.sharedPool, {
|
|
462
|
-
schema: this.config.schemas.shared
|
|
463
|
-
});
|
|
464
|
-
}
|
|
465
|
-
return this.sharedDb;
|
|
466
|
-
}
|
|
467
|
-
/**
|
|
468
|
-
* Get or create the shared database connection with retry and validation
|
|
469
|
-
*
|
|
470
|
-
* This async version validates the connection by executing a ping query
|
|
471
|
-
* and retries on transient failures with exponential backoff.
|
|
472
|
-
*
|
|
473
|
-
* @example
|
|
474
|
-
* ```typescript
|
|
475
|
-
* // Get shared database with automatic retry
|
|
476
|
-
* const sharedDb = await manager.getSharedDbAsync();
|
|
477
|
-
*
|
|
478
|
-
* // Queries will use the validated connection
|
|
479
|
-
* const plans = await sharedDb.select().from(plans);
|
|
480
|
-
* ```
|
|
481
|
-
*/
|
|
482
|
-
async getSharedDbAsync() {
|
|
483
|
-
this.ensureNotDisposed();
|
|
484
|
-
if (this.sharedDb) {
|
|
485
|
-
return this.sharedDb;
|
|
486
|
-
}
|
|
487
|
-
if (this.sharedDbPending) {
|
|
488
|
-
return this.sharedDbPending;
|
|
489
|
-
}
|
|
490
|
-
this.sharedDbPending = this.connectSharedWithRetry();
|
|
491
|
-
try {
|
|
492
|
-
const db = await this.sharedDbPending;
|
|
493
|
-
return db;
|
|
494
|
-
} finally {
|
|
495
|
-
this.sharedDbPending = null;
|
|
496
|
-
}
|
|
497
|
-
}
|
|
498
|
-
/**
|
|
499
|
-
* Connect to shared database with retry logic
|
|
500
|
-
*/
|
|
501
|
-
async connectSharedWithRetry() {
|
|
502
|
-
const maxAttempts = this.retryConfig.maxAttempts;
|
|
503
|
-
const result = await withRetry(
|
|
504
|
-
async () => {
|
|
505
|
-
const pool = new Pool({
|
|
506
|
-
connectionString: this.config.connection.url,
|
|
507
|
-
...DEFAULT_CONFIG.poolConfig,
|
|
508
|
-
...this.config.connection.poolConfig
|
|
509
|
-
});
|
|
510
|
-
try {
|
|
511
|
-
await pool.query("SELECT 1");
|
|
512
|
-
pool.on("error", (err) => {
|
|
513
|
-
void this.config.hooks?.onError?.("shared", err);
|
|
514
|
-
});
|
|
515
|
-
this.sharedPool = pool;
|
|
516
|
-
this.sharedDb = drizzle(pool, {
|
|
517
|
-
schema: this.config.schemas.shared
|
|
518
|
-
});
|
|
519
|
-
return this.sharedDb;
|
|
520
|
-
} catch (error) {
|
|
521
|
-
try {
|
|
522
|
-
await pool.end();
|
|
523
|
-
} catch {
|
|
524
|
-
}
|
|
525
|
-
throw error;
|
|
526
|
-
}
|
|
527
|
-
},
|
|
528
|
-
{
|
|
529
|
-
...this.retryConfig,
|
|
530
|
-
onRetry: (attempt, error, delayMs) => {
|
|
531
|
-
this.debugLogger.logConnectionRetry("shared", attempt, maxAttempts, error, delayMs);
|
|
532
|
-
this.retryConfig.onRetry?.(attempt, error, delayMs);
|
|
533
|
-
}
|
|
534
|
-
}
|
|
535
|
-
);
|
|
536
|
-
this.debugLogger.logConnectionSuccess("shared", result.attempts, result.totalTimeMs);
|
|
537
|
-
return result.result;
|
|
538
|
-
}
|
|
539
|
-
/**
|
|
540
|
-
* Get schema name for a tenant
|
|
541
|
-
*/
|
|
542
|
-
getSchemaName(tenantId) {
|
|
543
|
-
return this.config.isolation.schemaNameTemplate(tenantId);
|
|
544
|
-
}
|
|
545
|
-
/**
|
|
546
|
-
* Check if a pool exists for a tenant
|
|
547
|
-
*/
|
|
548
|
-
hasPool(tenantId) {
|
|
549
|
-
const schemaName = this.config.isolation.schemaNameTemplate(tenantId);
|
|
550
|
-
return this.pools.has(schemaName);
|
|
551
|
-
}
|
|
552
|
-
/**
|
|
553
|
-
* Get count of active pools
|
|
554
|
-
*/
|
|
555
|
-
getPoolCount() {
|
|
556
|
-
return this.pools.size;
|
|
557
|
-
}
|
|
558
|
-
/**
|
|
559
|
-
* Get all active tenant IDs
|
|
560
|
-
*/
|
|
561
|
-
getActiveTenantIds() {
|
|
562
|
-
return Array.from(this.tenantIdBySchema.values());
|
|
563
|
-
}
|
|
564
|
-
/**
|
|
565
|
-
* Get the retry configuration
|
|
566
|
-
*/
|
|
567
|
-
getRetryConfig() {
|
|
568
|
-
return { ...this.retryConfig };
|
|
569
|
-
}
|
|
570
|
-
/**
|
|
571
|
-
* Pre-warm pools for specified tenants to reduce cold start latency
|
|
572
|
-
*
|
|
573
|
-
* Uses automatic retry with exponential backoff for connection failures.
|
|
574
|
-
*/
|
|
575
|
-
async warmup(tenantIds, options = {}) {
|
|
576
|
-
this.ensureNotDisposed();
|
|
577
|
-
const startTime = Date.now();
|
|
578
|
-
const { concurrency = 10, ping = true, onProgress } = options;
|
|
579
|
-
const results = [];
|
|
580
|
-
for (let i = 0; i < tenantIds.length; i += concurrency) {
|
|
581
|
-
const batch = tenantIds.slice(i, i + concurrency);
|
|
582
|
-
const batchResults = await Promise.all(
|
|
583
|
-
batch.map(async (tenantId) => {
|
|
584
|
-
const tenantStart = Date.now();
|
|
585
|
-
onProgress?.(tenantId, "starting");
|
|
586
|
-
try {
|
|
587
|
-
const alreadyWarm = this.hasPool(tenantId);
|
|
588
|
-
if (ping) {
|
|
589
|
-
await this.getDbAsync(tenantId);
|
|
590
|
-
} else {
|
|
591
|
-
this.getDb(tenantId);
|
|
592
|
-
}
|
|
593
|
-
const durationMs = Date.now() - tenantStart;
|
|
594
|
-
onProgress?.(tenantId, "completed");
|
|
595
|
-
this.debugLogger.logWarmup(tenantId, true, durationMs, alreadyWarm);
|
|
596
|
-
return {
|
|
597
|
-
tenantId,
|
|
598
|
-
success: true,
|
|
599
|
-
alreadyWarm,
|
|
600
|
-
durationMs
|
|
601
|
-
};
|
|
602
|
-
} catch (error) {
|
|
603
|
-
const durationMs = Date.now() - tenantStart;
|
|
604
|
-
onProgress?.(tenantId, "failed");
|
|
605
|
-
this.debugLogger.logWarmup(tenantId, false, durationMs, false);
|
|
606
|
-
return {
|
|
607
|
-
tenantId,
|
|
608
|
-
success: false,
|
|
609
|
-
alreadyWarm: false,
|
|
610
|
-
durationMs,
|
|
611
|
-
error: error.message
|
|
612
|
-
};
|
|
613
|
-
}
|
|
614
|
-
})
|
|
615
|
-
);
|
|
616
|
-
results.push(...batchResults);
|
|
617
|
-
}
|
|
618
|
-
return {
|
|
619
|
-
total: results.length,
|
|
620
|
-
succeeded: results.filter((r) => r.success).length,
|
|
621
|
-
failed: results.filter((r) => !r.success).length,
|
|
622
|
-
alreadyWarm: results.filter((r) => r.alreadyWarm).length,
|
|
623
|
-
durationMs: Date.now() - startTime,
|
|
624
|
-
details: results
|
|
625
|
-
};
|
|
626
|
-
}
|
|
627
|
-
/**
|
|
628
|
-
* Get current metrics for all pools
|
|
629
|
-
*
|
|
630
|
-
* Collects metrics on demand with zero overhead when not called.
|
|
631
|
-
* Returns raw data that can be formatted for any monitoring system.
|
|
632
|
-
*
|
|
633
|
-
* @example
|
|
634
|
-
* ```typescript
|
|
635
|
-
* const metrics = manager.getMetrics();
|
|
636
|
-
* console.log(metrics.pools.total); // 15
|
|
637
|
-
*
|
|
638
|
-
* // Format for Prometheus
|
|
639
|
-
* for (const pool of metrics.pools.tenants) {
|
|
640
|
-
* gauge.labels(pool.tenantId).set(pool.connections.idle);
|
|
641
|
-
* }
|
|
642
|
-
* ```
|
|
643
|
-
*/
|
|
644
|
-
getMetrics() {
|
|
645
|
-
this.ensureNotDisposed();
|
|
646
|
-
const maxPools = this.config.isolation.maxPools ?? DEFAULT_CONFIG.maxPools;
|
|
647
|
-
const tenantMetrics = [];
|
|
648
|
-
for (const [schemaName, entry] of this.pools.entries()) {
|
|
649
|
-
const tenantId = this.tenantIdBySchema.get(schemaName) ?? schemaName;
|
|
650
|
-
const pool = entry.pool;
|
|
651
|
-
tenantMetrics.push({
|
|
652
|
-
tenantId,
|
|
653
|
-
schemaName,
|
|
654
|
-
connections: {
|
|
655
|
-
total: pool.totalCount,
|
|
656
|
-
idle: pool.idleCount,
|
|
657
|
-
waiting: pool.waitingCount
|
|
658
|
-
},
|
|
659
|
-
lastAccessedAt: new Date(entry.lastAccess).toISOString()
|
|
660
|
-
});
|
|
661
|
-
}
|
|
662
|
-
return {
|
|
663
|
-
pools: {
|
|
664
|
-
total: tenantMetrics.length,
|
|
665
|
-
maxPools,
|
|
666
|
-
tenants: tenantMetrics
|
|
667
|
-
},
|
|
668
|
-
shared: {
|
|
669
|
-
initialized: this.sharedPool !== null,
|
|
670
|
-
connections: this.sharedPool ? {
|
|
671
|
-
total: this.sharedPool.totalCount,
|
|
672
|
-
idle: this.sharedPool.idleCount,
|
|
673
|
-
waiting: this.sharedPool.waitingCount
|
|
674
|
-
} : null
|
|
675
|
-
},
|
|
676
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
677
|
-
};
|
|
678
|
-
}
|
|
679
|
-
/**
|
|
680
|
-
* Check health of all pools and connections
|
|
681
|
-
*
|
|
682
|
-
* Verifies the health of tenant pools and optionally the shared database.
|
|
683
|
-
* Returns detailed status information for monitoring and load balancer integration.
|
|
684
|
-
*
|
|
685
|
-
* @example
|
|
686
|
-
* ```typescript
|
|
687
|
-
* // Basic health check
|
|
688
|
-
* const health = await manager.healthCheck();
|
|
689
|
-
* console.log(health.healthy); // true/false
|
|
690
|
-
*
|
|
691
|
-
* // Use with Express endpoint
|
|
692
|
-
* app.get('/health', async (req, res) => {
|
|
693
|
-
* const health = await manager.healthCheck();
|
|
694
|
-
* res.status(health.healthy ? 200 : 503).json(health);
|
|
695
|
-
* });
|
|
696
|
-
*
|
|
697
|
-
* // Check specific tenants only
|
|
698
|
-
* const health = await manager.healthCheck({
|
|
699
|
-
* tenantIds: ['tenant-1', 'tenant-2'],
|
|
700
|
-
* ping: true,
|
|
701
|
-
* pingTimeoutMs: 3000,
|
|
702
|
-
* });
|
|
703
|
-
* ```
|
|
704
|
-
*/
|
|
705
|
-
async healthCheck(options = {}) {
|
|
706
|
-
this.ensureNotDisposed();
|
|
707
|
-
const startTime = Date.now();
|
|
708
|
-
const {
|
|
709
|
-
ping = true,
|
|
710
|
-
pingTimeoutMs = 5e3,
|
|
711
|
-
includeShared = true,
|
|
712
|
-
tenantIds
|
|
713
|
-
} = options;
|
|
714
|
-
const poolHealthResults = [];
|
|
715
|
-
let sharedDbStatus = "ok";
|
|
716
|
-
let sharedDbResponseTimeMs;
|
|
717
|
-
let sharedDbError;
|
|
718
|
-
const poolsToCheck = [];
|
|
719
|
-
if (tenantIds && tenantIds.length > 0) {
|
|
720
|
-
for (const tenantId of tenantIds) {
|
|
721
|
-
const schemaName = this.config.isolation.schemaNameTemplate(tenantId);
|
|
722
|
-
const entry = this.pools.get(schemaName);
|
|
723
|
-
if (entry) {
|
|
724
|
-
poolsToCheck.push({ schemaName, tenantId, entry });
|
|
725
|
-
}
|
|
726
|
-
}
|
|
727
|
-
} else {
|
|
728
|
-
for (const [schemaName, entry] of this.pools.entries()) {
|
|
729
|
-
const tenantId = this.tenantIdBySchema.get(schemaName) ?? schemaName;
|
|
730
|
-
poolsToCheck.push({ schemaName, tenantId, entry });
|
|
731
|
-
}
|
|
732
|
-
}
|
|
733
|
-
const poolChecks = poolsToCheck.map(async ({ schemaName, tenantId, entry }) => {
|
|
734
|
-
const poolHealth = await this.checkPoolHealth(tenantId, schemaName, entry, ping, pingTimeoutMs);
|
|
735
|
-
return poolHealth;
|
|
736
|
-
});
|
|
737
|
-
poolHealthResults.push(...await Promise.all(poolChecks));
|
|
738
|
-
if (includeShared && this.sharedPool) {
|
|
739
|
-
const sharedResult = await this.checkSharedDbHealth(ping, pingTimeoutMs);
|
|
740
|
-
sharedDbStatus = sharedResult.status;
|
|
741
|
-
sharedDbResponseTimeMs = sharedResult.responseTimeMs;
|
|
742
|
-
sharedDbError = sharedResult.error;
|
|
743
|
-
}
|
|
744
|
-
const degradedPools = poolHealthResults.filter((p) => p.status === "degraded").length;
|
|
745
|
-
const unhealthyPools = poolHealthResults.filter((p) => p.status === "unhealthy").length;
|
|
746
|
-
const healthy = unhealthyPools === 0 && sharedDbStatus !== "unhealthy";
|
|
747
|
-
return {
|
|
748
|
-
healthy,
|
|
749
|
-
pools: poolHealthResults,
|
|
750
|
-
sharedDb: sharedDbStatus,
|
|
751
|
-
sharedDbResponseTimeMs,
|
|
752
|
-
sharedDbError,
|
|
753
|
-
totalPools: poolHealthResults.length,
|
|
754
|
-
degradedPools,
|
|
755
|
-
unhealthyPools,
|
|
756
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
757
|
-
durationMs: Date.now() - startTime
|
|
758
|
-
};
|
|
759
|
-
}
|
|
760
|
-
/**
|
|
761
|
-
* Check health of a single tenant pool
|
|
762
|
-
*/
|
|
763
|
-
async checkPoolHealth(tenantId, schemaName, entry, ping, pingTimeoutMs) {
|
|
764
|
-
const pool = entry.pool;
|
|
765
|
-
const totalConnections = pool.totalCount;
|
|
766
|
-
const idleConnections = pool.idleCount;
|
|
767
|
-
const waitingRequests = pool.waitingCount;
|
|
768
|
-
let status = "ok";
|
|
769
|
-
let responseTimeMs;
|
|
770
|
-
let error;
|
|
771
|
-
if (waitingRequests > 0) {
|
|
772
|
-
status = "degraded";
|
|
773
|
-
}
|
|
774
|
-
if (ping) {
|
|
775
|
-
const pingResult = await this.executePingQuery(pool, pingTimeoutMs);
|
|
776
|
-
responseTimeMs = pingResult.responseTimeMs;
|
|
777
|
-
if (!pingResult.success) {
|
|
778
|
-
status = "unhealthy";
|
|
779
|
-
error = pingResult.error;
|
|
780
|
-
} else if (pingResult.responseTimeMs && pingResult.responseTimeMs > pingTimeoutMs / 2) {
|
|
781
|
-
if (status === "ok") {
|
|
782
|
-
status = "degraded";
|
|
783
|
-
}
|
|
784
|
-
}
|
|
785
|
-
}
|
|
786
|
-
return {
|
|
787
|
-
tenantId,
|
|
788
|
-
schemaName,
|
|
789
|
-
status,
|
|
790
|
-
totalConnections,
|
|
791
|
-
idleConnections,
|
|
792
|
-
waitingRequests,
|
|
793
|
-
responseTimeMs,
|
|
794
|
-
error
|
|
795
|
-
};
|
|
796
|
-
}
|
|
797
|
-
/**
|
|
798
|
-
* Check health of shared database
|
|
799
|
-
*/
|
|
800
|
-
async checkSharedDbHealth(ping, pingTimeoutMs) {
|
|
801
|
-
if (!this.sharedPool) {
|
|
802
|
-
return { status: "ok" };
|
|
803
|
-
}
|
|
804
|
-
let status = "ok";
|
|
805
|
-
let responseTimeMs;
|
|
806
|
-
let error;
|
|
807
|
-
const waitingRequests = this.sharedPool.waitingCount;
|
|
808
|
-
if (waitingRequests > 0) {
|
|
809
|
-
status = "degraded";
|
|
810
|
-
}
|
|
811
|
-
if (ping) {
|
|
812
|
-
const pingResult = await this.executePingQuery(this.sharedPool, pingTimeoutMs);
|
|
813
|
-
responseTimeMs = pingResult.responseTimeMs;
|
|
814
|
-
if (!pingResult.success) {
|
|
815
|
-
status = "unhealthy";
|
|
816
|
-
error = pingResult.error;
|
|
817
|
-
} else if (pingResult.responseTimeMs && pingResult.responseTimeMs > pingTimeoutMs / 2) {
|
|
818
|
-
if (status === "ok") {
|
|
819
|
-
status = "degraded";
|
|
820
|
-
}
|
|
821
|
-
}
|
|
822
|
-
}
|
|
823
|
-
return { status, responseTimeMs, error };
|
|
824
|
-
}
|
|
825
|
-
/**
|
|
826
|
-
* Execute a ping query with timeout
|
|
827
|
-
*/
|
|
828
|
-
async executePingQuery(pool, timeoutMs) {
|
|
829
|
-
const startTime = Date.now();
|
|
830
|
-
try {
|
|
831
|
-
const timeoutPromise = new Promise((_, reject) => {
|
|
832
|
-
setTimeout(() => reject(new Error("Health check ping timeout")), timeoutMs);
|
|
833
|
-
});
|
|
834
|
-
const queryPromise = pool.query("SELECT 1");
|
|
835
|
-
await Promise.race([queryPromise, timeoutPromise]);
|
|
836
|
-
return {
|
|
837
|
-
success: true,
|
|
838
|
-
responseTimeMs: Date.now() - startTime
|
|
839
|
-
};
|
|
840
|
-
} catch (err) {
|
|
841
|
-
return {
|
|
842
|
-
success: false,
|
|
843
|
-
responseTimeMs: Date.now() - startTime,
|
|
844
|
-
error: err.message
|
|
845
|
-
};
|
|
846
|
-
}
|
|
847
|
-
}
|
|
848
|
-
/**
|
|
849
|
-
* Manually evict a tenant pool
|
|
850
|
-
*/
|
|
851
|
-
async evictPool(tenantId, reason = "manual") {
|
|
852
|
-
const schemaName = this.config.isolation.schemaNameTemplate(tenantId);
|
|
853
|
-
const entry = this.pools.get(schemaName);
|
|
854
|
-
if (entry) {
|
|
855
|
-
this.debugLogger.logPoolEvicted(tenantId, schemaName, reason);
|
|
856
|
-
this.pools.delete(schemaName);
|
|
857
|
-
this.tenantIdBySchema.delete(schemaName);
|
|
858
|
-
await this.closePool(entry.pool, tenantId);
|
|
859
|
-
}
|
|
860
|
-
}
|
|
861
|
-
/**
|
|
862
|
-
* Start automatic cleanup of idle pools
|
|
863
|
-
*/
|
|
864
|
-
startCleanup() {
|
|
865
|
-
if (this.cleanupInterval) return;
|
|
866
|
-
const poolTtlMs = this.config.isolation.poolTtlMs ?? DEFAULT_CONFIG.poolTtlMs;
|
|
867
|
-
const cleanupIntervalMs = DEFAULT_CONFIG.cleanupIntervalMs;
|
|
868
|
-
this.cleanupInterval = setInterval(() => {
|
|
869
|
-
void this.cleanupIdlePools(poolTtlMs);
|
|
870
|
-
}, cleanupIntervalMs);
|
|
871
|
-
this.cleanupInterval.unref();
|
|
872
|
-
}
|
|
873
|
-
/**
|
|
874
|
-
* Stop automatic cleanup
|
|
875
|
-
*/
|
|
876
|
-
stopCleanup() {
|
|
877
|
-
if (this.cleanupInterval) {
|
|
878
|
-
clearInterval(this.cleanupInterval);
|
|
879
|
-
this.cleanupInterval = null;
|
|
880
|
-
}
|
|
881
|
-
}
|
|
882
|
-
/**
|
|
883
|
-
* Dispose all pools and cleanup resources
|
|
884
|
-
*/
|
|
885
|
-
async dispose() {
|
|
886
|
-
if (this.disposed) return;
|
|
887
|
-
this.disposed = true;
|
|
888
|
-
this.stopCleanup();
|
|
889
|
-
const closePromises = [];
|
|
890
|
-
for (const [schemaName, entry] of this.pools.entries()) {
|
|
891
|
-
const tenantId = this.tenantIdBySchema.get(schemaName);
|
|
892
|
-
closePromises.push(this.closePool(entry.pool, tenantId ?? schemaName));
|
|
893
|
-
}
|
|
894
|
-
this.pools.clear();
|
|
895
|
-
this.tenantIdBySchema.clear();
|
|
896
|
-
if (this.sharedPool) {
|
|
897
|
-
closePromises.push(this.closePool(this.sharedPool, "shared"));
|
|
898
|
-
this.sharedPool = null;
|
|
899
|
-
this.sharedDb = null;
|
|
900
|
-
}
|
|
901
|
-
await Promise.all(closePromises);
|
|
902
|
-
}
|
|
903
|
-
/**
|
|
904
|
-
* Create a new pool entry for a tenant
|
|
905
|
-
*/
|
|
906
|
-
createPoolEntry(tenantId, schemaName) {
|
|
907
|
-
const pool = new Pool({
|
|
908
|
-
connectionString: this.config.connection.url,
|
|
909
|
-
...DEFAULT_CONFIG.poolConfig,
|
|
910
|
-
...this.config.connection.poolConfig,
|
|
911
|
-
options: `-c search_path=${schemaName},public`
|
|
912
|
-
});
|
|
913
|
-
pool.on("error", async (err) => {
|
|
914
|
-
this.debugLogger.logPoolError(tenantId, err);
|
|
915
|
-
void this.config.hooks?.onError?.(tenantId, err);
|
|
916
|
-
await this.evictPool(tenantId, "error");
|
|
917
|
-
});
|
|
918
|
-
const db = drizzle(pool, {
|
|
919
|
-
schema: this.config.schemas.tenant
|
|
920
|
-
});
|
|
921
|
-
return {
|
|
922
|
-
db,
|
|
923
|
-
pool,
|
|
924
|
-
lastAccess: Date.now(),
|
|
925
|
-
schemaName
|
|
926
|
-
};
|
|
927
|
-
}
|
|
928
|
-
/**
|
|
929
|
-
* Dispose a pool entry (called by LRU cache)
|
|
930
|
-
*/
|
|
931
|
-
disposePoolEntry(entry, schemaName) {
|
|
932
|
-
const tenantId = this.tenantIdBySchema.get(schemaName);
|
|
933
|
-
this.tenantIdBySchema.delete(schemaName);
|
|
934
|
-
if (tenantId) {
|
|
935
|
-
this.debugLogger.logPoolEvicted(tenantId, schemaName, "lru_eviction");
|
|
936
|
-
}
|
|
937
|
-
void this.closePool(entry.pool, tenantId ?? schemaName).then(() => {
|
|
938
|
-
if (tenantId) {
|
|
939
|
-
void this.config.hooks?.onPoolEvicted?.(tenantId);
|
|
940
|
-
}
|
|
941
|
-
});
|
|
942
|
-
}
|
|
943
|
-
/**
|
|
944
|
-
* Close a pool gracefully
|
|
945
|
-
*/
|
|
946
|
-
async closePool(pool, identifier) {
|
|
947
|
-
try {
|
|
948
|
-
await pool.end();
|
|
949
|
-
} catch (error) {
|
|
950
|
-
void this.config.hooks?.onError?.(identifier, error);
|
|
951
|
-
}
|
|
952
|
-
}
|
|
953
|
-
/**
|
|
954
|
-
* Cleanup pools that have been idle for too long
|
|
955
|
-
*/
|
|
956
|
-
async cleanupIdlePools(poolTtlMs) {
|
|
957
|
-
const now = Date.now();
|
|
958
|
-
const toEvict = [];
|
|
959
|
-
for (const [schemaName, entry] of this.pools.entries()) {
|
|
960
|
-
if (now - entry.lastAccess > poolTtlMs) {
|
|
961
|
-
toEvict.push(schemaName);
|
|
962
|
-
}
|
|
963
|
-
}
|
|
964
|
-
for (const schemaName of toEvict) {
|
|
965
|
-
const tenantId = this.tenantIdBySchema.get(schemaName);
|
|
966
|
-
if (tenantId) {
|
|
967
|
-
await this.evictPool(tenantId, "ttl_expired");
|
|
968
|
-
}
|
|
969
|
-
}
|
|
970
|
-
}
|
|
971
|
-
/**
|
|
972
|
-
* Ensure the manager hasn't been disposed
|
|
973
|
-
*/
|
|
974
|
-
ensureNotDisposed() {
|
|
975
|
-
if (this.disposed) {
|
|
976
|
-
throw new Error("[drizzle-multitenant] TenantManager has been disposed");
|
|
977
|
-
}
|
|
978
|
-
}
|
|
979
|
-
};
|
|
980
|
-
|
|
981
|
-
// src/manager.ts
|
|
982
|
-
function createTenantManager(config) {
|
|
983
|
-
const poolManager = new PoolManager(config);
|
|
984
|
-
poolManager.startCleanup();
|
|
985
|
-
return {
|
|
986
|
-
getDb(tenantId) {
|
|
987
|
-
return poolManager.getDb(tenantId);
|
|
988
|
-
},
|
|
989
|
-
async getDbAsync(tenantId) {
|
|
990
|
-
return poolManager.getDbAsync(tenantId);
|
|
991
|
-
},
|
|
992
|
-
getSharedDb() {
|
|
993
|
-
return poolManager.getSharedDb();
|
|
994
|
-
},
|
|
995
|
-
async getSharedDbAsync() {
|
|
996
|
-
return poolManager.getSharedDbAsync();
|
|
997
|
-
},
|
|
998
|
-
getSchemaName(tenantId) {
|
|
999
|
-
return poolManager.getSchemaName(tenantId);
|
|
1000
|
-
},
|
|
1001
|
-
hasPool(tenantId) {
|
|
1002
|
-
return poolManager.hasPool(tenantId);
|
|
1003
|
-
},
|
|
1004
|
-
getPoolCount() {
|
|
1005
|
-
return poolManager.getPoolCount();
|
|
1006
|
-
},
|
|
1007
|
-
getActiveTenantIds() {
|
|
1008
|
-
return poolManager.getActiveTenantIds();
|
|
1009
|
-
},
|
|
1010
|
-
getRetryConfig() {
|
|
1011
|
-
return poolManager.getRetryConfig();
|
|
1012
|
-
},
|
|
1013
|
-
async evictPool(tenantId) {
|
|
1014
|
-
await poolManager.evictPool(tenantId);
|
|
1015
|
-
},
|
|
1016
|
-
async warmup(tenantIds, options) {
|
|
1017
|
-
return poolManager.warmup(tenantIds, options);
|
|
1018
|
-
},
|
|
1019
|
-
async healthCheck(options) {
|
|
1020
|
-
return poolManager.healthCheck(options);
|
|
1021
|
-
},
|
|
1022
|
-
getMetrics() {
|
|
1023
|
-
return poolManager.getMetrics();
|
|
1024
|
-
},
|
|
1025
|
-
async dispose() {
|
|
1026
|
-
await poolManager.dispose();
|
|
1027
|
-
}
|
|
1028
|
-
};
|
|
1029
|
-
}
|
|
1030
|
-
function createTenantContext(manager) {
|
|
1031
|
-
const storage = new AsyncLocalStorage();
|
|
1032
|
-
function getTenantOrNull() {
|
|
1033
|
-
return storage.getStore();
|
|
1034
|
-
}
|
|
1035
|
-
function getTenant() {
|
|
1036
|
-
const context = getTenantOrNull();
|
|
1037
|
-
if (!context) {
|
|
1038
|
-
throw new Error(
|
|
1039
|
-
"[drizzle-multitenant] No tenant context found. Make sure you are calling this within runWithTenant()."
|
|
1040
|
-
);
|
|
1041
|
-
}
|
|
1042
|
-
return context;
|
|
1043
|
-
}
|
|
1044
|
-
function getTenantId() {
|
|
1045
|
-
return getTenant().tenantId;
|
|
1046
|
-
}
|
|
1047
|
-
function getTenantDb() {
|
|
1048
|
-
const tenantId = getTenantId();
|
|
1049
|
-
return manager.getDb(tenantId);
|
|
1050
|
-
}
|
|
1051
|
-
function getSharedDb() {
|
|
1052
|
-
return manager.getSharedDb();
|
|
1053
|
-
}
|
|
1054
|
-
function isInTenantContext() {
|
|
1055
|
-
return getTenantOrNull() !== void 0;
|
|
1056
|
-
}
|
|
1057
|
-
function runWithTenant(context, callback) {
|
|
1058
|
-
if (!context.tenantId) {
|
|
1059
|
-
throw new Error("[drizzle-multitenant] tenantId is required in context");
|
|
1060
|
-
}
|
|
1061
|
-
return storage.run(context, callback);
|
|
1062
|
-
}
|
|
1063
|
-
return {
|
|
1064
|
-
runWithTenant,
|
|
1065
|
-
getTenant,
|
|
1066
|
-
getTenantOrNull,
|
|
1067
|
-
getTenantId,
|
|
1068
|
-
getTenantDb,
|
|
1069
|
-
getSharedDb,
|
|
1070
|
-
isInTenantContext
|
|
1071
|
-
};
|
|
1072
|
-
}
|
|
1073
|
-
|
|
1074
|
-
// src/migrator/table-format.ts
|
|
1075
|
-
async function detectTableFormat(pool, schemaName, tableName) {
|
|
1076
|
-
const tableExists = await pool.query(
|
|
1077
|
-
`SELECT EXISTS (
|
|
1
|
+
import {Pool}from'pg';import {drizzle}from'drizzle-orm/node-postgres';import {LRUCache}from'lru-cache';import {AsyncLocalStorage}from'async_hooks';import {readdir,readFile}from'fs/promises';import {join,basename}from'path';import {createHash}from'crypto';import {existsSync}from'fs';import {sql,getTableName}from'drizzle-orm';function Se(c){return be(c),c}function be(c){if(!c.connection.url)throw new Error("[drizzle-multitenant] connection.url is required");if(!c.isolation.strategy)throw new Error("[drizzle-multitenant] isolation.strategy is required");if(c.isolation.strategy!=="schema")throw new Error(`[drizzle-multitenant] isolation.strategy "${c.isolation.strategy}" is not yet supported. Only "schema" is currently available.`);if(!c.isolation.schemaNameTemplate)throw new Error("[drizzle-multitenant] isolation.schemaNameTemplate is required");if(typeof c.isolation.schemaNameTemplate!="function")throw new Error("[drizzle-multitenant] isolation.schemaNameTemplate must be a function");if(!c.schemas.tenant)throw new Error("[drizzle-multitenant] schemas.tenant is required");if(c.isolation.maxPools!==void 0&&c.isolation.maxPools<1)throw new Error("[drizzle-multitenant] isolation.maxPools must be at least 1");if(c.isolation.poolTtlMs!==void 0&&c.isolation.poolTtlMs<0)throw new Error("[drizzle-multitenant] isolation.poolTtlMs must be non-negative");if(c.connection.retry){let e=c.connection.retry;if(e.maxAttempts!==void 0&&e.maxAttempts<1)throw new Error("[drizzle-multitenant] connection.retry.maxAttempts must be at least 1");if(e.initialDelayMs!==void 0&&e.initialDelayMs<0)throw new Error("[drizzle-multitenant] connection.retry.initialDelayMs must be non-negative");if(e.maxDelayMs!==void 0&&e.maxDelayMs<0)throw new Error("[drizzle-multitenant] connection.retry.maxDelayMs must be non-negative");if(e.backoffMultiplier!==void 0&&e.backoffMultiplier<1)throw new Error("[drizzle-multitenant] connection.retry.backoffMultiplier must be at least 1");if(e.initialDelayMs!==void 0&&e.maxDelayMs!==void 0&&e.initialDelayMs>e.maxDelayMs)throw new Error("[drizzle-multitenant] connection.retry.initialDelayMs cannot be greater than maxDelayMs")}}var T={maxPools:50,poolTtlMs:36e5,cleanupIntervalMs:6e4,poolConfig:{max:10,idleTimeoutMillis:3e4,connectionTimeoutMillis:5e3},retry:{maxAttempts:3,initialDelayMs:100,maxDelayMs:5e3,backoffMultiplier:2,jitter:true}};var R="[drizzle-multitenant]";var W=class{enabled;logQueries;logPoolEvents;slowQueryThreshold;logger;constructor(e){this.enabled=e?.enabled??false,this.logQueries=e?.logQueries??true,this.logPoolEvents=e?.logPoolEvents??true,this.slowQueryThreshold=e?.slowQueryThreshold??1e3,this.logger=e?.logger??this.defaultLogger;}isEnabled(){return this.enabled}logQuery(e,t,n){if(!this.enabled||!this.logQueries)return;let a=n>=this.slowQueryThreshold,s={type:a?"slow_query":"query",tenantId:e,query:this.truncateQuery(t),durationMs:n};a?this.logger(`${R} tenant=${e} SLOW_QUERY duration=${n}ms query="${this.truncateQuery(t)}"`,s):this.logger(`${R} tenant=${e} query="${this.truncateQuery(t)}" duration=${n}ms`,s);}logPoolCreated(e,t){if(!this.enabled||!this.logPoolEvents)return;let n={type:"pool_created",tenantId:e,schemaName:t};this.logger(`${R} tenant=${e} POOL_CREATED schema=${t}`,n);}logPoolEvicted(e,t,n){if(!this.enabled||!this.logPoolEvents)return;let a={type:"pool_evicted",tenantId:e,schemaName:t,metadata:n?{reason:n}:void 0},r=n?` reason=${n}`:"";this.logger(`${R} tenant=${e} POOL_EVICTED schema=${t}${r}`,a);}logPoolError(e,t){if(!this.enabled||!this.logPoolEvents)return;let n={type:"pool_error",tenantId:e,error:t.message};this.logger(`${R} tenant=${e} POOL_ERROR error="${t.message}"`,n);}logWarmup(e,t,n,a){if(!this.enabled||!this.logPoolEvents)return;let r={type:"warmup",tenantId:e,durationMs:n,metadata:{success:t,alreadyWarm:a}},s=a?"already_warm":t?"success":"failed";this.logger(`${R} tenant=${e} WARMUP status=${s} duration=${n}ms`,r);}logConnectionRetry(e,t,n,a,r){if(!this.enabled||!this.logPoolEvents)return;let s={type:"connection_retry",tenantId:e,error:a.message,metadata:{attempt:t,maxAttempts:n,delayMs:r}};this.logger(`${R} tenant=${e} CONNECTION_RETRY attempt=${t}/${n} delay=${r}ms error="${a.message}"`,s);}logConnectionSuccess(e,t,n){if(!this.enabled||!this.logPoolEvents)return;let a={type:"pool_created",tenantId:e,durationMs:n,metadata:{attempts:t}};t>1&&this.logger(`${R} tenant=${e} CONNECTION_SUCCESS attempts=${t} totalTime=${n}ms`,a);}log(e,t){this.enabled&&this.logger(`${R} ${e}`,t);}defaultLogger(e,t){console.log(e);}truncateQuery(e,t=100){let n=e.replace(/\s+/g," ").trim();return n.length<=t?n:n.substring(0,t-3)+"..."}};function G(c){return new W(c)}var x=class{cache;poolTtlMs;onDispose;constructor(e){this.poolTtlMs=e.poolTtlMs,this.onDispose=e.onDispose,this.cache=new LRUCache({max:e.maxPools,dispose:(t,n)=>{this.handleDispose(n,t);},noDisposeOnSet:true});}get(e){return this.cache.get(e)}set(e,t){this.cache.set(e,t);}has(e){return this.cache.has(e)}delete(e){return this.cache.delete(e)}size(){return this.cache.size}keys(){return Array.from(this.cache.keys())}*entries(){for(let[e,t]of this.cache.entries())yield [e,t];}async clear(){this.cache.clear(),await Promise.resolve();}evictLRU(){let e=Array.from(this.cache.keys());if(e.length===0)return;let t=e[e.length-1];return this.cache.delete(t),t}async evictExpired(){if(!this.poolTtlMs)return [];let e=Date.now(),t=[];for(let[n,a]of this.cache.entries())e-a.lastAccess>this.poolTtlMs&&t.push(n);for(let n of t)this.cache.delete(n);return t}touch(e){let t=this.cache.get(e);t&&(t.lastAccess=Date.now());}getMaxPools(){return this.cache.max}getTtlMs(){return this.poolTtlMs}isExpired(e){return this.poolTtlMs?Date.now()-e.lastAccess>this.poolTtlMs:false}async handleDispose(e,t){this.onDispose&&await this.onDispose(e,t);}};function J(c){let e=c.message.toLowerCase();return !!(e.includes("econnrefused")||e.includes("econnreset")||e.includes("etimedout")||e.includes("enotfound")||e.includes("connection refused")||e.includes("connection reset")||e.includes("connection terminated")||e.includes("connection timed out")||e.includes("timeout expired")||e.includes("socket hang up")||e.includes("too many connections")||e.includes("sorry, too many clients")||e.includes("the database system is starting up")||e.includes("the database system is shutting down")||e.includes("server closed the connection unexpectedly")||e.includes("could not connect to server")||e.includes("ssl connection")||e.includes("ssl handshake"))}function Me(c){return new Promise(e=>setTimeout(e,c))}var E=class{config;constructor(e){this.config={maxAttempts:e?.maxAttempts??T.retry.maxAttempts,initialDelayMs:e?.initialDelayMs??T.retry.initialDelayMs,maxDelayMs:e?.maxDelayMs??T.retry.maxDelayMs,backoffMultiplier:e?.backoffMultiplier??T.retry.backoffMultiplier,jitter:e?.jitter??T.retry.jitter,isRetryable:e?.isRetryable??J,onRetry:e?.onRetry};}async withRetry(e,t){let n=t?{...this.config,...t}:this.config,a=Date.now(),r=null;for(let s=0;s<n.maxAttempts;s++)try{return {result:await e(),attempts:s+1,totalTimeMs:Date.now()-a}}catch(i){r=i;let o=s>=n.maxAttempts-1,m=n.isRetryable??this.isRetryable;if(o||!m(r))throw r;let l=this.calculateDelay(s,n);n.onRetry?.(s+1,r,l),await Me(l);}throw r??new Error("Retry failed with no error")}calculateDelay(e,t){let n=t?{...this.config,...t}:this.config,a=n.initialDelayMs*Math.pow(n.backoffMultiplier,e),r=Math.min(a,n.maxDelayMs);if(n.jitter){let s=1+Math.random()*.25;return Math.floor(r*s)}return Math.floor(r)}isRetryable(e){return (this.config.isRetryable??J)(e)}getConfig(){return {...this.config}}getMaxAttempts(){return this.config.maxAttempts}};var P=class{constructor(e){this.deps=e;}async checkHealth(e={}){let t=Date.now(),{ping:n=true,pingTimeoutMs:a=5e3,includeShared:r=true,tenantIds:s}=e,i=[],o="ok",m,l,h=this.getPoolsToCheck(s).map(async({schemaName:S,tenantId:N,entry:M})=>this.checkPoolHealth(N,S,M,n,a));i.push(...await Promise.all(h));let d=this.deps.getSharedPool();if(r&&d){let S=await this.checkSharedDbHealth(d,n,a);o=S.status,m=S.responseTimeMs,l=S.error;}let g=i.filter(S=>S.status==="degraded").length,w=i.filter(S=>S.status==="unhealthy").length,b={healthy:w===0&&o!=="unhealthy",pools:i,sharedDb:o,totalPools:i.length,degradedPools:g,unhealthyPools:w,timestamp:new Date().toISOString(),durationMs:Date.now()-t};return m!==void 0&&(b.sharedDbResponseTimeMs=m),l!==void 0&&(b.sharedDbError=l),b}getPoolsToCheck(e){let t=[];if(e&&e.length>0)for(let n of e){let a=this.deps.getSchemaName(n),r=this.deps.getPoolEntry(a);r&&t.push({schemaName:a,tenantId:n,entry:r});}else for(let[n,a]of this.deps.getPoolEntries()){let r=this.deps.getTenantIdBySchema(n)??n;t.push({schemaName:n,tenantId:r,entry:a});}return t}async checkPoolHealth(e,t,n,a,r){let s=n.pool,i=s.totalCount,o=s.idleCount,m=s.waitingCount,l="ok",u,h;if(m>0&&(l="degraded"),a){let g=await this.executePingQuery(s,r);u=g.responseTimeMs,g.success?g.responseTimeMs&&g.responseTimeMs>r/2&&l==="ok"&&(l="degraded"):(l="unhealthy",h=g.error);}let d={tenantId:e,schemaName:t,status:l,totalConnections:i,idleConnections:o,waitingRequests:m};return u!==void 0&&(d.responseTimeMs=u),h!==void 0&&(d.error=h),d}async checkSharedDbHealth(e,t,n){let a="ok",r,s;if(e.waitingCount>0&&(a="degraded"),t){let m=await this.executePingQuery(e,n);r=m.responseTimeMs,m.success?m.responseTimeMs&&m.responseTimeMs>n/2&&a==="ok"&&(a="degraded"):(a="unhealthy",s=m.error);}let o={status:a};return r!==void 0&&(o.responseTimeMs=r),s!==void 0&&(o.error=s),o}async executePingQuery(e,t){let n=Date.now();try{let a=new Promise((s,i)=>{setTimeout(()=>i(new Error("Health check ping timeout")),t);}),r=e.query("SELECT 1");return await Promise.race([r,a]),{success:!0,responseTimeMs:Date.now()-n}}catch(a){return {success:false,responseTimeMs:Date.now()-n,error:a.message}}}determineOverallHealth(e,t="ok"){return e.filter(a=>a.status==="unhealthy").length===0&&t!=="unhealthy"}};var k=class{constructor(e){this.config=e;let t=e.isolation.maxPools??T.maxPools,n=e.isolation.poolTtlMs??T.poolTtlMs;this.debugLogger=G(e.debug),this.retryHandler=new E(e.connection.retry),this.poolCache=new x({maxPools:t,poolTtlMs:n,onDispose:(a,r)=>{this.disposePoolEntry(r,a);}}),this.healthChecker=new P({getPoolEntries:()=>this.poolCache.entries(),getTenantIdBySchema:a=>this.tenantIdBySchema.get(a),getPoolEntry:a=>this.poolCache.get(a),getSchemaName:a=>this.config.isolation.schemaNameTemplate(a),getSharedPool:()=>this.sharedPool});}poolCache;tenantIdBySchema=new Map;pendingConnections=new Map;sharedPool=null;sharedDb=null;sharedDbPending=null;cleanupInterval=null;disposed=false;debugLogger;retryHandler;healthChecker;getDb(e){this.ensureNotDisposed();let t=this.config.isolation.schemaNameTemplate(e),n=this.poolCache.get(t);return n||(n=this.createPoolEntry(e,t),this.poolCache.set(t,n),this.tenantIdBySchema.set(t,e),this.debugLogger.logPoolCreated(e,t),this.config.hooks?.onPoolCreated?.(e)),this.poolCache.touch(t),n.db}async getDbAsync(e){this.ensureNotDisposed();let t=this.config.isolation.schemaNameTemplate(e),n=this.poolCache.get(t);if(n)return this.poolCache.touch(t),n.db;let a=this.pendingConnections.get(t);if(a)return n=await a,this.poolCache.touch(t),n.db;let r=this.connectWithRetry(e,t);this.pendingConnections.set(t,r);try{return n=await r,this.poolCache.set(t,n),this.tenantIdBySchema.set(t,e),this.debugLogger.logPoolCreated(e,t),this.config.hooks?.onPoolCreated?.(e),this.poolCache.touch(t),n.db}finally{this.pendingConnections.delete(t);}}async connectWithRetry(e,t){let n=this.retryHandler.getConfig(),a=n.maxAttempts,r=await this.retryHandler.withRetry(async()=>{let s=this.createPoolEntry(e,t);try{return await s.pool.query("SELECT 1"),s}catch(i){try{await s.pool.end();}catch{}throw i}},{onRetry:(s,i,o)=>{this.debugLogger.logConnectionRetry(e,s,a,i,o),n.onRetry?.(s,i,o);}});return this.debugLogger.logConnectionSuccess(e,r.attempts,r.totalTimeMs),r.result}getSharedDb(){return this.ensureNotDisposed(),this.sharedDb||(this.sharedPool=new Pool({connectionString:this.config.connection.url,...T.poolConfig,...this.config.connection.poolConfig}),this.sharedPool.on("error",e=>{this.config.hooks?.onError?.("shared",e);}),this.sharedDb=drizzle(this.sharedPool,{schema:this.config.schemas.shared})),this.sharedDb}async getSharedDbAsync(){if(this.ensureNotDisposed(),this.sharedDb)return this.sharedDb;if(this.sharedDbPending)return this.sharedDbPending;this.sharedDbPending=this.connectSharedWithRetry();try{return await this.sharedDbPending}finally{this.sharedDbPending=null;}}async connectSharedWithRetry(){let e=this.retryHandler.getConfig(),t=e.maxAttempts,n=await this.retryHandler.withRetry(async()=>{let a=new Pool({connectionString:this.config.connection.url,...T.poolConfig,...this.config.connection.poolConfig});try{return await a.query("SELECT 1"),a.on("error",r=>{this.config.hooks?.onError?.("shared",r);}),this.sharedPool=a,this.sharedDb=drizzle(a,{schema:this.config.schemas.shared}),this.sharedDb}catch(r){try{await a.end();}catch{}throw r}},{onRetry:(a,r,s)=>{this.debugLogger.logConnectionRetry("shared",a,t,r,s),e.onRetry?.(a,r,s);}});return this.debugLogger.logConnectionSuccess("shared",n.attempts,n.totalTimeMs),n.result}getSchemaName(e){return this.config.isolation.schemaNameTemplate(e)}hasPool(e){let t=this.config.isolation.schemaNameTemplate(e);return this.poolCache.has(t)}getPoolCount(){return this.poolCache.size()}getActiveTenantIds(){return Array.from(this.tenantIdBySchema.values())}getRetryConfig(){return this.retryHandler.getConfig()}async warmup(e,t={}){this.ensureNotDisposed();let n=Date.now(),{concurrency:a=10,ping:r=true,onProgress:s}=t,i=[];for(let o=0;o<e.length;o+=a){let m=e.slice(o,o+a),l=await Promise.all(m.map(async u=>{let h=Date.now();s?.(u,"starting");try{let d=this.hasPool(u);r?await this.getDbAsync(u):this.getDb(u);let g=Date.now()-h;return s?.(u,"completed"),this.debugLogger.logWarmup(u,!0,g,d),{tenantId:u,success:!0,alreadyWarm:d,durationMs:g}}catch(d){let g=Date.now()-h;return s?.(u,"failed"),this.debugLogger.logWarmup(u,false,g,false),{tenantId:u,success:false,alreadyWarm:false,durationMs:g,error:d.message}}}));i.push(...l);}return {total:i.length,succeeded:i.filter(o=>o.success).length,failed:i.filter(o=>!o.success).length,alreadyWarm:i.filter(o=>o.alreadyWarm).length,durationMs:Date.now()-n,details:i}}getMetrics(){this.ensureNotDisposed();let e=this.config.isolation.maxPools??T.maxPools,t=[];for(let[n,a]of this.poolCache.entries()){let r=this.tenantIdBySchema.get(n)??n,s=a.pool;t.push({tenantId:r,schemaName:n,connections:{total:s.totalCount,idle:s.idleCount,waiting:s.waitingCount},lastAccessedAt:new Date(a.lastAccess).toISOString()});}return {pools:{total:t.length,maxPools:e,tenants:t},shared:{initialized:this.sharedPool!==null,connections:this.sharedPool?{total:this.sharedPool.totalCount,idle:this.sharedPool.idleCount,waiting:this.sharedPool.waitingCount}:null},timestamp:new Date().toISOString()}}async healthCheck(e={}){return this.ensureNotDisposed(),this.healthChecker.checkHealth(e)}async evictPool(e,t="manual"){let n=this.config.isolation.schemaNameTemplate(e),a=this.poolCache.get(n);a&&(this.debugLogger.logPoolEvicted(e,n,t),this.poolCache.delete(n),this.tenantIdBySchema.delete(n),await this.closePool(a.pool,e));}startCleanup(){if(this.cleanupInterval)return;let e=T.cleanupIntervalMs;this.cleanupInterval=setInterval(()=>{this.cleanupIdlePools();},e),this.cleanupInterval.unref();}stopCleanup(){this.cleanupInterval&&(clearInterval(this.cleanupInterval),this.cleanupInterval=null);}async dispose(){if(this.disposed)return;this.disposed=true,this.stopCleanup();let e=[];for(let[t,n]of this.poolCache.entries()){let a=this.tenantIdBySchema.get(t);e.push(this.closePool(n.pool,a??t));}await this.poolCache.clear(),this.tenantIdBySchema.clear(),this.sharedPool&&(e.push(this.closePool(this.sharedPool,"shared")),this.sharedPool=null,this.sharedDb=null),await Promise.all(e);}createPoolEntry(e,t){let n=new Pool({connectionString:this.config.connection.url,...T.poolConfig,...this.config.connection.poolConfig,options:`-c search_path=${t},public`});return n.on("error",async r=>{this.debugLogger.logPoolError(e,r),this.config.hooks?.onError?.(e,r),await this.evictPool(e,"error");}),{db:drizzle(n,{schema:this.config.schemas.tenant}),pool:n,lastAccess:Date.now(),schemaName:t}}disposePoolEntry(e,t){let n=this.tenantIdBySchema.get(t);this.tenantIdBySchema.delete(t),n&&this.debugLogger.logPoolEvicted(n,t,"lru_eviction"),this.closePool(e.pool,n??t).then(()=>{n&&this.config.hooks?.onPoolEvicted?.(n);});}async closePool(e,t){try{await e.end();}catch(n){this.config.hooks?.onError?.(t,n);}}async cleanupIdlePools(){let e=await this.poolCache.evictExpired();for(let t of e){let n=this.tenantIdBySchema.get(t);n&&(this.debugLogger.logPoolEvicted(n,t,"ttl_expired"),this.tenantIdBySchema.delete(t));}}ensureNotDisposed(){if(this.disposed)throw new Error("[drizzle-multitenant] TenantManager has been disposed")}};function Re(c){let e=new k(c);return e.startCleanup(),{getDb(t){return e.getDb(t)},async getDbAsync(t){return e.getDbAsync(t)},getSharedDb(){return e.getSharedDb()},async getSharedDbAsync(){return e.getSharedDbAsync()},getSchemaName(t){return e.getSchemaName(t)},hasPool(t){return e.hasPool(t)},getPoolCount(){return e.getPoolCount()},getActiveTenantIds(){return e.getActiveTenantIds()},getRetryConfig(){return e.getRetryConfig()},async evictPool(t){await e.evictPool(t);},async warmup(t,n){return e.warmup(t,n)},async healthCheck(t){return e.healthCheck(t)},getMetrics(){return e.getMetrics()},async dispose(){await e.dispose();}}}function De(c){let e=new AsyncLocalStorage;function t(){return e.getStore()}function n(){let m=t();if(!m)throw new Error("[drizzle-multitenant] No tenant context found. Make sure you are calling this within runWithTenant().");return m}function a(){return n().tenantId}function r(){let m=a();return c.getDb(m)}function s(){return c.getSharedDb()}function i(){return t()!==void 0}function o(m,l){if(!m.tenantId)throw new Error("[drizzle-multitenant] tenantId is required in context");return e.run(m,l)}return {runWithTenant:o,getTenant:n,getTenantOrNull:t,getTenantId:a,getTenantDb:r,getSharedDb:s,isInTenantContext:i}}async function Y(c,e,t){if(!(await c.query(`SELECT EXISTS (
|
|
1078
2
|
SELECT 1 FROM information_schema.tables
|
|
1079
3
|
WHERE table_schema = $1 AND table_name = $2
|
|
1080
|
-
) as exists`,
|
|
1081
|
-
[schemaName, tableName]
|
|
1082
|
-
);
|
|
1083
|
-
if (!tableExists.rows[0]?.exists) {
|
|
1084
|
-
return null;
|
|
1085
|
-
}
|
|
1086
|
-
const columnsResult = await pool.query(
|
|
1087
|
-
`SELECT column_name, data_type
|
|
4
|
+
) as exists`,[e,t])).rows[0]?.exists)return null;let a=await c.query(`SELECT column_name, data_type
|
|
1088
5
|
FROM information_schema.columns
|
|
1089
|
-
WHERE table_schema = $1 AND table_name = $2`,
|
|
1090
|
-
|
|
1091
|
-
);
|
|
1092
|
-
const columnMap = new Map(
|
|
1093
|
-
columnsResult.rows.map((r) => [r.column_name, r.data_type])
|
|
1094
|
-
);
|
|
1095
|
-
if (columnMap.has("name")) {
|
|
1096
|
-
return {
|
|
1097
|
-
format: "name",
|
|
1098
|
-
tableName,
|
|
1099
|
-
columns: {
|
|
1100
|
-
identifier: "name",
|
|
1101
|
-
timestamp: columnMap.has("applied_at") ? "applied_at" : "created_at",
|
|
1102
|
-
timestampType: "timestamp"
|
|
1103
|
-
}
|
|
1104
|
-
};
|
|
1105
|
-
}
|
|
1106
|
-
if (columnMap.has("hash")) {
|
|
1107
|
-
const createdAtType = columnMap.get("created_at");
|
|
1108
|
-
if (createdAtType === "bigint") {
|
|
1109
|
-
return {
|
|
1110
|
-
format: "drizzle-kit",
|
|
1111
|
-
tableName,
|
|
1112
|
-
columns: {
|
|
1113
|
-
identifier: "hash",
|
|
1114
|
-
timestamp: "created_at",
|
|
1115
|
-
timestampType: "bigint"
|
|
1116
|
-
}
|
|
1117
|
-
};
|
|
1118
|
-
}
|
|
1119
|
-
return {
|
|
1120
|
-
format: "hash",
|
|
1121
|
-
tableName,
|
|
1122
|
-
columns: {
|
|
1123
|
-
identifier: "hash",
|
|
1124
|
-
timestamp: "created_at",
|
|
1125
|
-
timestampType: "timestamp"
|
|
1126
|
-
}
|
|
1127
|
-
};
|
|
1128
|
-
}
|
|
1129
|
-
return null;
|
|
1130
|
-
}
|
|
1131
|
-
function getFormatConfig(format, tableName = "__drizzle_migrations") {
|
|
1132
|
-
switch (format) {
|
|
1133
|
-
case "name":
|
|
1134
|
-
return {
|
|
1135
|
-
format: "name",
|
|
1136
|
-
tableName,
|
|
1137
|
-
columns: {
|
|
1138
|
-
identifier: "name",
|
|
1139
|
-
timestamp: "applied_at",
|
|
1140
|
-
timestampType: "timestamp"
|
|
1141
|
-
}
|
|
1142
|
-
};
|
|
1143
|
-
case "hash":
|
|
1144
|
-
return {
|
|
1145
|
-
format: "hash",
|
|
1146
|
-
tableName,
|
|
1147
|
-
columns: {
|
|
1148
|
-
identifier: "hash",
|
|
1149
|
-
timestamp: "created_at",
|
|
1150
|
-
timestampType: "timestamp"
|
|
1151
|
-
}
|
|
1152
|
-
};
|
|
1153
|
-
case "drizzle-kit":
|
|
1154
|
-
return {
|
|
1155
|
-
format: "drizzle-kit",
|
|
1156
|
-
tableName,
|
|
1157
|
-
columns: {
|
|
1158
|
-
identifier: "hash",
|
|
1159
|
-
timestamp: "created_at",
|
|
1160
|
-
timestampType: "bigint"
|
|
1161
|
-
}
|
|
1162
|
-
};
|
|
1163
|
-
}
|
|
1164
|
-
}
|
|
1165
|
-
|
|
1166
|
-
// src/migrator/migrator.ts
|
|
1167
|
-
var DEFAULT_MIGRATIONS_TABLE = "__drizzle_migrations";
|
|
1168
|
-
var Migrator = class {
|
|
1169
|
-
constructor(tenantConfig, migratorConfig) {
|
|
1170
|
-
this.tenantConfig = tenantConfig;
|
|
1171
|
-
this.migratorConfig = migratorConfig;
|
|
1172
|
-
this.migrationsTable = migratorConfig.migrationsTable ?? DEFAULT_MIGRATIONS_TABLE;
|
|
1173
|
-
}
|
|
1174
|
-
migrationsTable;
|
|
1175
|
-
/**
|
|
1176
|
-
* Migrate all tenants in parallel
|
|
1177
|
-
*/
|
|
1178
|
-
async migrateAll(options = {}) {
|
|
1179
|
-
const {
|
|
1180
|
-
concurrency = 10,
|
|
1181
|
-
onProgress,
|
|
1182
|
-
onError,
|
|
1183
|
-
dryRun = false
|
|
1184
|
-
} = options;
|
|
1185
|
-
const tenantIds = await this.migratorConfig.tenantDiscovery();
|
|
1186
|
-
const migrations = await this.loadMigrations();
|
|
1187
|
-
const results = [];
|
|
1188
|
-
let aborted = false;
|
|
1189
|
-
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
1190
|
-
const batch = tenantIds.slice(i, i + concurrency);
|
|
1191
|
-
const batchResults = await Promise.all(
|
|
1192
|
-
batch.map(async (tenantId) => {
|
|
1193
|
-
if (aborted) {
|
|
1194
|
-
return this.createSkippedResult(tenantId);
|
|
1195
|
-
}
|
|
1196
|
-
try {
|
|
1197
|
-
onProgress?.(tenantId, "starting");
|
|
1198
|
-
const result = await this.migrateTenant(tenantId, migrations, { dryRun, onProgress });
|
|
1199
|
-
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
1200
|
-
return result;
|
|
1201
|
-
} catch (error) {
|
|
1202
|
-
onProgress?.(tenantId, "failed");
|
|
1203
|
-
const action = onError?.(tenantId, error);
|
|
1204
|
-
if (action === "abort") {
|
|
1205
|
-
aborted = true;
|
|
1206
|
-
}
|
|
1207
|
-
return this.createErrorResult(tenantId, error);
|
|
1208
|
-
}
|
|
1209
|
-
})
|
|
1210
|
-
);
|
|
1211
|
-
results.push(...batchResults);
|
|
1212
|
-
}
|
|
1213
|
-
if (aborted) {
|
|
1214
|
-
const remaining = tenantIds.slice(results.length);
|
|
1215
|
-
for (const tenantId of remaining) {
|
|
1216
|
-
results.push(this.createSkippedResult(tenantId));
|
|
1217
|
-
}
|
|
1218
|
-
}
|
|
1219
|
-
return this.aggregateResults(results);
|
|
1220
|
-
}
|
|
1221
|
-
/**
|
|
1222
|
-
* Migrate a single tenant
|
|
1223
|
-
*/
|
|
1224
|
-
async migrateTenant(tenantId, migrations, options = {}) {
|
|
1225
|
-
const startTime = Date.now();
|
|
1226
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1227
|
-
const appliedMigrations = [];
|
|
1228
|
-
const pool = await this.createPool(schemaName);
|
|
1229
|
-
try {
|
|
1230
|
-
await this.migratorConfig.hooks?.beforeTenant?.(tenantId);
|
|
1231
|
-
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
1232
|
-
await this.ensureMigrationsTable(pool, schemaName, format);
|
|
1233
|
-
const allMigrations = migrations ?? await this.loadMigrations();
|
|
1234
|
-
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
1235
|
-
const appliedSet = new Set(applied.map((m) => m.identifier));
|
|
1236
|
-
const pending = allMigrations.filter(
|
|
1237
|
-
(m) => !this.isMigrationApplied(m, appliedSet, format)
|
|
1238
|
-
);
|
|
1239
|
-
if (options.dryRun) {
|
|
1240
|
-
return {
|
|
1241
|
-
tenantId,
|
|
1242
|
-
schemaName,
|
|
1243
|
-
success: true,
|
|
1244
|
-
appliedMigrations: pending.map((m) => m.name),
|
|
1245
|
-
durationMs: Date.now() - startTime,
|
|
1246
|
-
format: format.format
|
|
1247
|
-
};
|
|
1248
|
-
}
|
|
1249
|
-
for (const migration of pending) {
|
|
1250
|
-
const migrationStart = Date.now();
|
|
1251
|
-
options.onProgress?.(tenantId, "migrating", migration.name);
|
|
1252
|
-
await this.migratorConfig.hooks?.beforeMigration?.(tenantId, migration.name);
|
|
1253
|
-
await this.applyMigration(pool, schemaName, migration, format);
|
|
1254
|
-
await this.migratorConfig.hooks?.afterMigration?.(
|
|
1255
|
-
tenantId,
|
|
1256
|
-
migration.name,
|
|
1257
|
-
Date.now() - migrationStart
|
|
1258
|
-
);
|
|
1259
|
-
appliedMigrations.push(migration.name);
|
|
1260
|
-
}
|
|
1261
|
-
const result = {
|
|
1262
|
-
tenantId,
|
|
1263
|
-
schemaName,
|
|
1264
|
-
success: true,
|
|
1265
|
-
appliedMigrations,
|
|
1266
|
-
durationMs: Date.now() - startTime,
|
|
1267
|
-
format: format.format
|
|
1268
|
-
};
|
|
1269
|
-
await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
|
|
1270
|
-
return result;
|
|
1271
|
-
} catch (error) {
|
|
1272
|
-
const result = {
|
|
1273
|
-
tenantId,
|
|
1274
|
-
schemaName,
|
|
1275
|
-
success: false,
|
|
1276
|
-
appliedMigrations,
|
|
1277
|
-
error: error.message,
|
|
1278
|
-
durationMs: Date.now() - startTime
|
|
1279
|
-
};
|
|
1280
|
-
await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
|
|
1281
|
-
return result;
|
|
1282
|
-
} finally {
|
|
1283
|
-
await pool.end();
|
|
1284
|
-
}
|
|
1285
|
-
}
|
|
1286
|
-
/**
|
|
1287
|
-
* Migrate specific tenants
|
|
1288
|
-
*/
|
|
1289
|
-
async migrateTenants(tenantIds, options = {}) {
|
|
1290
|
-
const migrations = await this.loadMigrations();
|
|
1291
|
-
const results = [];
|
|
1292
|
-
const { concurrency = 10, onProgress, onError } = options;
|
|
1293
|
-
for (let i = 0; i < tenantIds.length; i += concurrency) {
|
|
1294
|
-
const batch = tenantIds.slice(i, i + concurrency);
|
|
1295
|
-
const batchResults = await Promise.all(
|
|
1296
|
-
batch.map(async (tenantId) => {
|
|
1297
|
-
try {
|
|
1298
|
-
onProgress?.(tenantId, "starting");
|
|
1299
|
-
const result = await this.migrateTenant(tenantId, migrations, { dryRun: options.dryRun ?? false, onProgress });
|
|
1300
|
-
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
1301
|
-
return result;
|
|
1302
|
-
} catch (error) {
|
|
1303
|
-
onProgress?.(tenantId, "failed");
|
|
1304
|
-
onError?.(tenantId, error);
|
|
1305
|
-
return this.createErrorResult(tenantId, error);
|
|
1306
|
-
}
|
|
1307
|
-
})
|
|
1308
|
-
);
|
|
1309
|
-
results.push(...batchResults);
|
|
1310
|
-
}
|
|
1311
|
-
return this.aggregateResults(results);
|
|
1312
|
-
}
|
|
1313
|
-
/**
|
|
1314
|
-
* Get migration status for all tenants
|
|
1315
|
-
*/
|
|
1316
|
-
async getStatus() {
|
|
1317
|
-
const tenantIds = await this.migratorConfig.tenantDiscovery();
|
|
1318
|
-
const migrations = await this.loadMigrations();
|
|
1319
|
-
const statuses = [];
|
|
1320
|
-
for (const tenantId of tenantIds) {
|
|
1321
|
-
statuses.push(await this.getTenantStatus(tenantId, migrations));
|
|
1322
|
-
}
|
|
1323
|
-
return statuses;
|
|
1324
|
-
}
|
|
1325
|
-
/**
|
|
1326
|
-
* Get migration status for a specific tenant
|
|
1327
|
-
*/
|
|
1328
|
-
async getTenantStatus(tenantId, migrations) {
|
|
1329
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1330
|
-
const pool = await this.createPool(schemaName);
|
|
1331
|
-
try {
|
|
1332
|
-
const allMigrations = migrations ?? await this.loadMigrations();
|
|
1333
|
-
const tableExists = await this.migrationsTableExists(pool, schemaName);
|
|
1334
|
-
if (!tableExists) {
|
|
1335
|
-
return {
|
|
1336
|
-
tenantId,
|
|
1337
|
-
schemaName,
|
|
1338
|
-
appliedCount: 0,
|
|
1339
|
-
pendingCount: allMigrations.length,
|
|
1340
|
-
pendingMigrations: allMigrations.map((m) => m.name),
|
|
1341
|
-
status: allMigrations.length > 0 ? "behind" : "ok",
|
|
1342
|
-
format: null
|
|
1343
|
-
// New tenant, no table yet
|
|
1344
|
-
};
|
|
1345
|
-
}
|
|
1346
|
-
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
1347
|
-
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
1348
|
-
const appliedSet = new Set(applied.map((m) => m.identifier));
|
|
1349
|
-
const pending = allMigrations.filter(
|
|
1350
|
-
(m) => !this.isMigrationApplied(m, appliedSet, format)
|
|
1351
|
-
);
|
|
1352
|
-
return {
|
|
1353
|
-
tenantId,
|
|
1354
|
-
schemaName,
|
|
1355
|
-
appliedCount: applied.length,
|
|
1356
|
-
pendingCount: pending.length,
|
|
1357
|
-
pendingMigrations: pending.map((m) => m.name),
|
|
1358
|
-
status: pending.length > 0 ? "behind" : "ok",
|
|
1359
|
-
format: format.format
|
|
1360
|
-
};
|
|
1361
|
-
} catch (error) {
|
|
1362
|
-
return {
|
|
1363
|
-
tenantId,
|
|
1364
|
-
schemaName,
|
|
1365
|
-
appliedCount: 0,
|
|
1366
|
-
pendingCount: 0,
|
|
1367
|
-
pendingMigrations: [],
|
|
1368
|
-
status: "error",
|
|
1369
|
-
error: error.message,
|
|
1370
|
-
format: null
|
|
1371
|
-
};
|
|
1372
|
-
} finally {
|
|
1373
|
-
await pool.end();
|
|
1374
|
-
}
|
|
1375
|
-
}
|
|
1376
|
-
/**
|
|
1377
|
-
* Create a new tenant schema and optionally apply migrations
|
|
1378
|
-
*/
|
|
1379
|
-
async createTenant(tenantId, options = {}) {
|
|
1380
|
-
const { migrate = true } = options;
|
|
1381
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1382
|
-
const pool = new Pool({
|
|
1383
|
-
connectionString: this.tenantConfig.connection.url,
|
|
1384
|
-
...this.tenantConfig.connection.poolConfig
|
|
1385
|
-
});
|
|
1386
|
-
try {
|
|
1387
|
-
await pool.query(`CREATE SCHEMA IF NOT EXISTS "${schemaName}"`);
|
|
1388
|
-
if (migrate) {
|
|
1389
|
-
await this.migrateTenant(tenantId);
|
|
1390
|
-
}
|
|
1391
|
-
} finally {
|
|
1392
|
-
await pool.end();
|
|
1393
|
-
}
|
|
1394
|
-
}
|
|
1395
|
-
/**
|
|
1396
|
-
* Drop a tenant schema
|
|
1397
|
-
*/
|
|
1398
|
-
async dropTenant(tenantId, options = {}) {
|
|
1399
|
-
const { cascade = true } = options;
|
|
1400
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1401
|
-
const pool = new Pool({
|
|
1402
|
-
connectionString: this.tenantConfig.connection.url,
|
|
1403
|
-
...this.tenantConfig.connection.poolConfig
|
|
1404
|
-
});
|
|
1405
|
-
try {
|
|
1406
|
-
const cascadeSql = cascade ? "CASCADE" : "RESTRICT";
|
|
1407
|
-
await pool.query(`DROP SCHEMA IF EXISTS "${schemaName}" ${cascadeSql}`);
|
|
1408
|
-
} finally {
|
|
1409
|
-
await pool.end();
|
|
1410
|
-
}
|
|
1411
|
-
}
|
|
1412
|
-
/**
|
|
1413
|
-
* Check if a tenant schema exists
|
|
1414
|
-
*/
|
|
1415
|
-
async tenantExists(tenantId) {
|
|
1416
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1417
|
-
const pool = new Pool({
|
|
1418
|
-
connectionString: this.tenantConfig.connection.url,
|
|
1419
|
-
...this.tenantConfig.connection.poolConfig
|
|
1420
|
-
});
|
|
1421
|
-
try {
|
|
1422
|
-
const result = await pool.query(
|
|
1423
|
-
`SELECT 1 FROM information_schema.schemata WHERE schema_name = $1`,
|
|
1424
|
-
[schemaName]
|
|
1425
|
-
);
|
|
1426
|
-
return result.rowCount !== null && result.rowCount > 0;
|
|
1427
|
-
} finally {
|
|
1428
|
-
await pool.end();
|
|
1429
|
-
}
|
|
1430
|
-
}
|
|
1431
|
-
/**
|
|
1432
|
-
* Mark migrations as applied without executing SQL
|
|
1433
|
-
* Useful for syncing tracking state with already-applied migrations
|
|
1434
|
-
*/
|
|
1435
|
-
async markAsApplied(tenantId, options = {}) {
|
|
1436
|
-
const startTime = Date.now();
|
|
1437
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1438
|
-
const markedMigrations = [];
|
|
1439
|
-
const pool = await this.createPool(schemaName);
|
|
1440
|
-
try {
|
|
1441
|
-
await this.migratorConfig.hooks?.beforeTenant?.(tenantId);
|
|
1442
|
-
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
1443
|
-
await this.ensureMigrationsTable(pool, schemaName, format);
|
|
1444
|
-
const allMigrations = await this.loadMigrations();
|
|
1445
|
-
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
1446
|
-
const appliedSet = new Set(applied.map((m) => m.identifier));
|
|
1447
|
-
const pending = allMigrations.filter(
|
|
1448
|
-
(m) => !this.isMigrationApplied(m, appliedSet, format)
|
|
1449
|
-
);
|
|
1450
|
-
for (const migration of pending) {
|
|
1451
|
-
const migrationStart = Date.now();
|
|
1452
|
-
options.onProgress?.(tenantId, "migrating", migration.name);
|
|
1453
|
-
await this.migratorConfig.hooks?.beforeMigration?.(tenantId, migration.name);
|
|
1454
|
-
await this.recordMigration(pool, schemaName, migration, format);
|
|
1455
|
-
await this.migratorConfig.hooks?.afterMigration?.(
|
|
1456
|
-
tenantId,
|
|
1457
|
-
migration.name,
|
|
1458
|
-
Date.now() - migrationStart
|
|
1459
|
-
);
|
|
1460
|
-
markedMigrations.push(migration.name);
|
|
1461
|
-
}
|
|
1462
|
-
const result = {
|
|
1463
|
-
tenantId,
|
|
1464
|
-
schemaName,
|
|
1465
|
-
success: true,
|
|
1466
|
-
appliedMigrations: markedMigrations,
|
|
1467
|
-
durationMs: Date.now() - startTime,
|
|
1468
|
-
format: format.format
|
|
1469
|
-
};
|
|
1470
|
-
await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
|
|
1471
|
-
return result;
|
|
1472
|
-
} catch (error) {
|
|
1473
|
-
const result = {
|
|
1474
|
-
tenantId,
|
|
1475
|
-
schemaName,
|
|
1476
|
-
success: false,
|
|
1477
|
-
appliedMigrations: markedMigrations,
|
|
1478
|
-
error: error.message,
|
|
1479
|
-
durationMs: Date.now() - startTime
|
|
1480
|
-
};
|
|
1481
|
-
await this.migratorConfig.hooks?.afterTenant?.(tenantId, result);
|
|
1482
|
-
return result;
|
|
1483
|
-
} finally {
|
|
1484
|
-
await pool.end();
|
|
1485
|
-
}
|
|
1486
|
-
}
|
|
1487
|
-
/**
|
|
1488
|
-
* Mark migrations as applied for all tenants without executing SQL
|
|
1489
|
-
* Useful for syncing tracking state with already-applied migrations
|
|
1490
|
-
*/
|
|
1491
|
-
async markAllAsApplied(options = {}) {
|
|
1492
|
-
const {
|
|
1493
|
-
concurrency = 10,
|
|
1494
|
-
onProgress,
|
|
1495
|
-
onError
|
|
1496
|
-
} = options;
|
|
1497
|
-
const tenantIds = await this.migratorConfig.tenantDiscovery();
|
|
1498
|
-
const results = [];
|
|
1499
|
-
let aborted = false;
|
|
1500
|
-
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
1501
|
-
const batch = tenantIds.slice(i, i + concurrency);
|
|
1502
|
-
const batchResults = await Promise.all(
|
|
1503
|
-
batch.map(async (tenantId) => {
|
|
1504
|
-
if (aborted) {
|
|
1505
|
-
return this.createSkippedResult(tenantId);
|
|
1506
|
-
}
|
|
1507
|
-
try {
|
|
1508
|
-
onProgress?.(tenantId, "starting");
|
|
1509
|
-
const result = await this.markAsApplied(tenantId, { onProgress });
|
|
1510
|
-
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
1511
|
-
return result;
|
|
1512
|
-
} catch (error) {
|
|
1513
|
-
onProgress?.(tenantId, "failed");
|
|
1514
|
-
const action = onError?.(tenantId, error);
|
|
1515
|
-
if (action === "abort") {
|
|
1516
|
-
aborted = true;
|
|
1517
|
-
}
|
|
1518
|
-
return this.createErrorResult(tenantId, error);
|
|
1519
|
-
}
|
|
1520
|
-
})
|
|
1521
|
-
);
|
|
1522
|
-
results.push(...batchResults);
|
|
1523
|
-
}
|
|
1524
|
-
if (aborted) {
|
|
1525
|
-
const remaining = tenantIds.slice(results.length);
|
|
1526
|
-
for (const tenantId of remaining) {
|
|
1527
|
-
results.push(this.createSkippedResult(tenantId));
|
|
1528
|
-
}
|
|
1529
|
-
}
|
|
1530
|
-
return this.aggregateResults(results);
|
|
1531
|
-
}
|
|
1532
|
-
/**
|
|
1533
|
-
* Get sync status for all tenants
|
|
1534
|
-
* Detects divergences between migrations on disk and tracking in database
|
|
1535
|
-
*/
|
|
1536
|
-
async getSyncStatus() {
|
|
1537
|
-
const tenantIds = await this.migratorConfig.tenantDiscovery();
|
|
1538
|
-
const migrations = await this.loadMigrations();
|
|
1539
|
-
const statuses = [];
|
|
1540
|
-
for (const tenantId of tenantIds) {
|
|
1541
|
-
statuses.push(await this.getTenantSyncStatus(tenantId, migrations));
|
|
1542
|
-
}
|
|
1543
|
-
return {
|
|
1544
|
-
total: statuses.length,
|
|
1545
|
-
inSync: statuses.filter((s) => s.inSync && !s.error).length,
|
|
1546
|
-
outOfSync: statuses.filter((s) => !s.inSync && !s.error).length,
|
|
1547
|
-
error: statuses.filter((s) => !!s.error).length,
|
|
1548
|
-
details: statuses
|
|
1549
|
-
};
|
|
1550
|
-
}
|
|
1551
|
-
/**
|
|
1552
|
-
* Get sync status for a specific tenant
|
|
1553
|
-
*/
|
|
1554
|
-
async getTenantSyncStatus(tenantId, migrations) {
|
|
1555
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1556
|
-
const pool = await this.createPool(schemaName);
|
|
1557
|
-
try {
|
|
1558
|
-
const allMigrations = migrations ?? await this.loadMigrations();
|
|
1559
|
-
const migrationNames = new Set(allMigrations.map((m) => m.name));
|
|
1560
|
-
const migrationHashes = new Set(allMigrations.map((m) => m.hash));
|
|
1561
|
-
const tableExists = await this.migrationsTableExists(pool, schemaName);
|
|
1562
|
-
if (!tableExists) {
|
|
1563
|
-
return {
|
|
1564
|
-
tenantId,
|
|
1565
|
-
schemaName,
|
|
1566
|
-
missing: allMigrations.map((m) => m.name),
|
|
1567
|
-
orphans: [],
|
|
1568
|
-
inSync: allMigrations.length === 0,
|
|
1569
|
-
format: null
|
|
1570
|
-
};
|
|
1571
|
-
}
|
|
1572
|
-
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
1573
|
-
const applied = await this.getAppliedMigrations(pool, schemaName, format);
|
|
1574
|
-
const appliedIdentifiers = new Set(applied.map((m) => m.identifier));
|
|
1575
|
-
const missing = allMigrations.filter((m) => !this.isMigrationApplied(m, appliedIdentifiers, format)).map((m) => m.name);
|
|
1576
|
-
const orphans = applied.filter((m) => {
|
|
1577
|
-
if (format.columns.identifier === "name") {
|
|
1578
|
-
return !migrationNames.has(m.identifier);
|
|
1579
|
-
}
|
|
1580
|
-
return !migrationHashes.has(m.identifier) && !migrationNames.has(m.identifier);
|
|
1581
|
-
}).map((m) => m.identifier);
|
|
1582
|
-
return {
|
|
1583
|
-
tenantId,
|
|
1584
|
-
schemaName,
|
|
1585
|
-
missing,
|
|
1586
|
-
orphans,
|
|
1587
|
-
inSync: missing.length === 0 && orphans.length === 0,
|
|
1588
|
-
format: format.format
|
|
1589
|
-
};
|
|
1590
|
-
} catch (error) {
|
|
1591
|
-
return {
|
|
1592
|
-
tenantId,
|
|
1593
|
-
schemaName,
|
|
1594
|
-
missing: [],
|
|
1595
|
-
orphans: [],
|
|
1596
|
-
inSync: false,
|
|
1597
|
-
format: null,
|
|
1598
|
-
error: error.message
|
|
1599
|
-
};
|
|
1600
|
-
} finally {
|
|
1601
|
-
await pool.end();
|
|
1602
|
-
}
|
|
1603
|
-
}
|
|
1604
|
-
/**
|
|
1605
|
-
* Mark missing migrations as applied for a tenant
|
|
1606
|
-
*/
|
|
1607
|
-
async markMissing(tenantId) {
|
|
1608
|
-
const startTime = Date.now();
|
|
1609
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1610
|
-
const markedMigrations = [];
|
|
1611
|
-
const pool = await this.createPool(schemaName);
|
|
1612
|
-
try {
|
|
1613
|
-
const syncStatus = await this.getTenantSyncStatus(tenantId);
|
|
1614
|
-
if (syncStatus.error) {
|
|
1615
|
-
return {
|
|
1616
|
-
tenantId,
|
|
1617
|
-
schemaName,
|
|
1618
|
-
success: false,
|
|
1619
|
-
markedMigrations: [],
|
|
1620
|
-
removedOrphans: [],
|
|
1621
|
-
error: syncStatus.error,
|
|
1622
|
-
durationMs: Date.now() - startTime
|
|
1623
|
-
};
|
|
1624
|
-
}
|
|
1625
|
-
if (syncStatus.missing.length === 0) {
|
|
1626
|
-
return {
|
|
1627
|
-
tenantId,
|
|
1628
|
-
schemaName,
|
|
1629
|
-
success: true,
|
|
1630
|
-
markedMigrations: [],
|
|
1631
|
-
removedOrphans: [],
|
|
1632
|
-
durationMs: Date.now() - startTime
|
|
1633
|
-
};
|
|
1634
|
-
}
|
|
1635
|
-
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
1636
|
-
await this.ensureMigrationsTable(pool, schemaName, format);
|
|
1637
|
-
const allMigrations = await this.loadMigrations();
|
|
1638
|
-
const missingSet = new Set(syncStatus.missing);
|
|
1639
|
-
for (const migration of allMigrations) {
|
|
1640
|
-
if (missingSet.has(migration.name)) {
|
|
1641
|
-
await this.recordMigration(pool, schemaName, migration, format);
|
|
1642
|
-
markedMigrations.push(migration.name);
|
|
1643
|
-
}
|
|
1644
|
-
}
|
|
1645
|
-
return {
|
|
1646
|
-
tenantId,
|
|
1647
|
-
schemaName,
|
|
1648
|
-
success: true,
|
|
1649
|
-
markedMigrations,
|
|
1650
|
-
removedOrphans: [],
|
|
1651
|
-
durationMs: Date.now() - startTime
|
|
1652
|
-
};
|
|
1653
|
-
} catch (error) {
|
|
1654
|
-
return {
|
|
1655
|
-
tenantId,
|
|
1656
|
-
schemaName,
|
|
1657
|
-
success: false,
|
|
1658
|
-
markedMigrations,
|
|
1659
|
-
removedOrphans: [],
|
|
1660
|
-
error: error.message,
|
|
1661
|
-
durationMs: Date.now() - startTime
|
|
1662
|
-
};
|
|
1663
|
-
} finally {
|
|
1664
|
-
await pool.end();
|
|
1665
|
-
}
|
|
1666
|
-
}
|
|
1667
|
-
/**
|
|
1668
|
-
* Mark missing migrations as applied for all tenants
|
|
1669
|
-
*/
|
|
1670
|
-
async markAllMissing(options = {}) {
|
|
1671
|
-
const { concurrency = 10, onProgress, onError } = options;
|
|
1672
|
-
const tenantIds = await this.migratorConfig.tenantDiscovery();
|
|
1673
|
-
const results = [];
|
|
1674
|
-
let aborted = false;
|
|
1675
|
-
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
1676
|
-
const batch = tenantIds.slice(i, i + concurrency);
|
|
1677
|
-
const batchResults = await Promise.all(
|
|
1678
|
-
batch.map(async (tenantId) => {
|
|
1679
|
-
if (aborted) {
|
|
1680
|
-
return this.createSkippedSyncResult(tenantId);
|
|
1681
|
-
}
|
|
1682
|
-
try {
|
|
1683
|
-
onProgress?.(tenantId, "starting");
|
|
1684
|
-
const result = await this.markMissing(tenantId);
|
|
1685
|
-
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
1686
|
-
return result;
|
|
1687
|
-
} catch (error) {
|
|
1688
|
-
onProgress?.(tenantId, "failed");
|
|
1689
|
-
const action = onError?.(tenantId, error);
|
|
1690
|
-
if (action === "abort") {
|
|
1691
|
-
aborted = true;
|
|
1692
|
-
}
|
|
1693
|
-
return this.createErrorSyncResult(tenantId, error);
|
|
1694
|
-
}
|
|
1695
|
-
})
|
|
1696
|
-
);
|
|
1697
|
-
results.push(...batchResults);
|
|
1698
|
-
}
|
|
1699
|
-
return this.aggregateSyncResults(results);
|
|
1700
|
-
}
|
|
1701
|
-
/**
|
|
1702
|
-
* Remove orphan migration records for a tenant
|
|
1703
|
-
*/
|
|
1704
|
-
async cleanOrphans(tenantId) {
|
|
1705
|
-
const startTime = Date.now();
|
|
1706
|
-
const schemaName = this.tenantConfig.isolation.schemaNameTemplate(tenantId);
|
|
1707
|
-
const removedOrphans = [];
|
|
1708
|
-
const pool = await this.createPool(schemaName);
|
|
1709
|
-
try {
|
|
1710
|
-
const syncStatus = await this.getTenantSyncStatus(tenantId);
|
|
1711
|
-
if (syncStatus.error) {
|
|
1712
|
-
return {
|
|
1713
|
-
tenantId,
|
|
1714
|
-
schemaName,
|
|
1715
|
-
success: false,
|
|
1716
|
-
markedMigrations: [],
|
|
1717
|
-
removedOrphans: [],
|
|
1718
|
-
error: syncStatus.error,
|
|
1719
|
-
durationMs: Date.now() - startTime
|
|
1720
|
-
};
|
|
1721
|
-
}
|
|
1722
|
-
if (syncStatus.orphans.length === 0) {
|
|
1723
|
-
return {
|
|
1724
|
-
tenantId,
|
|
1725
|
-
schemaName,
|
|
1726
|
-
success: true,
|
|
1727
|
-
markedMigrations: [],
|
|
1728
|
-
removedOrphans: [],
|
|
1729
|
-
durationMs: Date.now() - startTime
|
|
1730
|
-
};
|
|
1731
|
-
}
|
|
1732
|
-
const format = await this.getOrDetectFormat(pool, schemaName);
|
|
1733
|
-
const identifierColumn = format.columns.identifier;
|
|
1734
|
-
for (const orphan of syncStatus.orphans) {
|
|
1735
|
-
await pool.query(
|
|
1736
|
-
`DELETE FROM "${schemaName}"."${format.tableName}" WHERE "${identifierColumn}" = $1`,
|
|
1737
|
-
[orphan]
|
|
1738
|
-
);
|
|
1739
|
-
removedOrphans.push(orphan);
|
|
1740
|
-
}
|
|
1741
|
-
return {
|
|
1742
|
-
tenantId,
|
|
1743
|
-
schemaName,
|
|
1744
|
-
success: true,
|
|
1745
|
-
markedMigrations: [],
|
|
1746
|
-
removedOrphans,
|
|
1747
|
-
durationMs: Date.now() - startTime
|
|
1748
|
-
};
|
|
1749
|
-
} catch (error) {
|
|
1750
|
-
return {
|
|
1751
|
-
tenantId,
|
|
1752
|
-
schemaName,
|
|
1753
|
-
success: false,
|
|
1754
|
-
markedMigrations: [],
|
|
1755
|
-
removedOrphans,
|
|
1756
|
-
error: error.message,
|
|
1757
|
-
durationMs: Date.now() - startTime
|
|
1758
|
-
};
|
|
1759
|
-
} finally {
|
|
1760
|
-
await pool.end();
|
|
1761
|
-
}
|
|
1762
|
-
}
|
|
1763
|
-
/**
|
|
1764
|
-
* Remove orphan migration records for all tenants
|
|
1765
|
-
*/
|
|
1766
|
-
async cleanAllOrphans(options = {}) {
|
|
1767
|
-
const { concurrency = 10, onProgress, onError } = options;
|
|
1768
|
-
const tenantIds = await this.migratorConfig.tenantDiscovery();
|
|
1769
|
-
const results = [];
|
|
1770
|
-
let aborted = false;
|
|
1771
|
-
for (let i = 0; i < tenantIds.length && !aborted; i += concurrency) {
|
|
1772
|
-
const batch = tenantIds.slice(i, i + concurrency);
|
|
1773
|
-
const batchResults = await Promise.all(
|
|
1774
|
-
batch.map(async (tenantId) => {
|
|
1775
|
-
if (aborted) {
|
|
1776
|
-
return this.createSkippedSyncResult(tenantId);
|
|
1777
|
-
}
|
|
1778
|
-
try {
|
|
1779
|
-
onProgress?.(tenantId, "starting");
|
|
1780
|
-
const result = await this.cleanOrphans(tenantId);
|
|
1781
|
-
onProgress?.(tenantId, result.success ? "completed" : "failed");
|
|
1782
|
-
return result;
|
|
1783
|
-
} catch (error) {
|
|
1784
|
-
onProgress?.(tenantId, "failed");
|
|
1785
|
-
const action = onError?.(tenantId, error);
|
|
1786
|
-
if (action === "abort") {
|
|
1787
|
-
aborted = true;
|
|
1788
|
-
}
|
|
1789
|
-
return this.createErrorSyncResult(tenantId, error);
|
|
1790
|
-
}
|
|
1791
|
-
})
|
|
1792
|
-
);
|
|
1793
|
-
results.push(...batchResults);
|
|
1794
|
-
}
|
|
1795
|
-
return this.aggregateSyncResults(results);
|
|
1796
|
-
}
|
|
1797
|
-
/**
|
|
1798
|
-
* Load migration files from the migrations folder
|
|
1799
|
-
*/
|
|
1800
|
-
async loadMigrations() {
|
|
1801
|
-
const files = await readdir(this.migratorConfig.migrationsFolder);
|
|
1802
|
-
const migrations = [];
|
|
1803
|
-
for (const file of files) {
|
|
1804
|
-
if (!file.endsWith(".sql")) continue;
|
|
1805
|
-
const filePath = join(this.migratorConfig.migrationsFolder, file);
|
|
1806
|
-
const content = await readFile(filePath, "utf-8");
|
|
1807
|
-
const match = file.match(/^(\d+)_/);
|
|
1808
|
-
const timestamp = match?.[1] ? parseInt(match[1], 10) : 0;
|
|
1809
|
-
const hash = createHash("sha256").update(content).digest("hex");
|
|
1810
|
-
migrations.push({
|
|
1811
|
-
name: basename(file, ".sql"),
|
|
1812
|
-
path: filePath,
|
|
1813
|
-
sql: content,
|
|
1814
|
-
timestamp,
|
|
1815
|
-
hash
|
|
1816
|
-
});
|
|
1817
|
-
}
|
|
1818
|
-
return migrations.sort((a, b) => a.timestamp - b.timestamp);
|
|
1819
|
-
}
|
|
1820
|
-
/**
|
|
1821
|
-
* Create a pool for a specific schema
|
|
1822
|
-
*/
|
|
1823
|
-
async createPool(schemaName) {
|
|
1824
|
-
return new Pool({
|
|
1825
|
-
connectionString: this.tenantConfig.connection.url,
|
|
1826
|
-
...this.tenantConfig.connection.poolConfig,
|
|
1827
|
-
options: `-c search_path="${schemaName}",public`
|
|
1828
|
-
});
|
|
1829
|
-
}
|
|
1830
|
-
/**
|
|
1831
|
-
* Ensure migrations table exists with the correct format
|
|
1832
|
-
*/
|
|
1833
|
-
async ensureMigrationsTable(pool, schemaName, format) {
|
|
1834
|
-
const { identifier, timestamp, timestampType } = format.columns;
|
|
1835
|
-
const identifierCol = identifier === "name" ? "name VARCHAR(255) NOT NULL UNIQUE" : "hash TEXT NOT NULL";
|
|
1836
|
-
const timestampCol = timestampType === "bigint" ? `${timestamp} BIGINT NOT NULL` : `${timestamp} TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP`;
|
|
1837
|
-
await pool.query(`
|
|
1838
|
-
CREATE TABLE IF NOT EXISTS "${schemaName}"."${format.tableName}" (
|
|
6
|
+
WHERE table_schema = $1 AND table_name = $2`,[e,t]),r=new Map(a.rows.map(s=>[s.column_name,s.data_type]));return r.has("name")?{format:"name",tableName:t,columns:{identifier:"name",timestamp:r.has("applied_at")?"applied_at":"created_at",timestampType:"timestamp"}}:r.has("hash")?r.get("created_at")==="bigint"?{format:"drizzle-kit",tableName:t,columns:{identifier:"hash",timestamp:"created_at",timestampType:"bigint"}}:{format:"hash",tableName:t,columns:{identifier:"hash",timestamp:"created_at",timestampType:"timestamp"}}:null}function _(c,e="__drizzle_migrations"){switch(c){case "name":return {format:"name",tableName:e,columns:{identifier:"name",timestamp:"applied_at",timestampType:"timestamp"}};case "hash":return {format:"hash",tableName:e,columns:{identifier:"hash",timestamp:"created_at",timestampType:"timestamp"}};case "drizzle-kit":return {format:"drizzle-kit",tableName:e,columns:{identifier:"hash",timestamp:"created_at",timestampType:"bigint"}}}}var xe="__drizzle_migrations",O=class{constructor(e,t){this.config=e;this.migrationsTable=t??xe;}migrationsTable;getSchemaName(e){return this.config.isolation.schemaNameTemplate(e)}async createPool(e){return new Pool({connectionString:this.config.connection.url,...this.config.connection.poolConfig,options:`-c search_path="${e}",public`})}async createRootPool(){return new Pool({connectionString:this.config.connection.url,...this.config.connection.poolConfig})}async createSchema(e){let t=this.getSchemaName(e),n=await this.createRootPool();try{await n.query(`CREATE SCHEMA IF NOT EXISTS "${t}"`);}finally{await n.end();}}async dropSchema(e,t={}){let{cascade:n=true}=t,a=this.getSchemaName(e),r=await this.createRootPool();try{let s=n?"CASCADE":"RESTRICT";await r.query(`DROP SCHEMA IF EXISTS "${a}" ${s}`);}finally{await r.end();}}async schemaExists(e){let t=this.getSchemaName(e),n=await this.createRootPool();try{let a=await n.query("SELECT 1 FROM information_schema.schemata WHERE schema_name = $1",[t]);return a.rowCount!==null&&a.rowCount>0}finally{await n.end();}}async listSchemas(e){let t=await this.createRootPool();try{let n=e?"SELECT schema_name FROM information_schema.schemata WHERE schema_name LIKE $1 ORDER BY schema_name":"SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema', 'pg_toast') ORDER BY schema_name";return (await t.query(n,e?[e]:[])).rows.map(r=>r.schema_name)}finally{await t.end();}}async ensureMigrationsTable(e,t,n){let{identifier:a,timestamp:r,timestampType:s}=n.columns,i=a==="name"?"name VARCHAR(255) NOT NULL UNIQUE":"hash TEXT NOT NULL",o=s==="bigint"?`${r} BIGINT NOT NULL`:`${r} TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP`;await e.query(`
|
|
7
|
+
CREATE TABLE IF NOT EXISTS "${t}"."${n.tableName}" (
|
|
1839
8
|
id SERIAL PRIMARY KEY,
|
|
1840
|
-
${
|
|
1841
|
-
${
|
|
9
|
+
${i},
|
|
10
|
+
${o}
|
|
1842
11
|
)
|
|
1843
|
-
`);
|
|
1844
|
-
|
|
1845
|
-
|
|
1846
|
-
|
|
1847
|
-
|
|
1848
|
-
|
|
1849
|
-
|
|
1850
|
-
|
|
1851
|
-
|
|
1852
|
-
|
|
1853
|
-
|
|
1854
|
-
|
|
1855
|
-
|
|
1856
|
-
|
|
1857
|
-
|
|
1858
|
-
|
|
1859
|
-
|
|
1860
|
-
|
|
1861
|
-
|
|
1862
|
-
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
|
|
1889
|
-
|
|
1890
|
-
|
|
1891
|
-
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
|
|
1899
|
-
|
|
1900
|
-
|
|
1901
|
-
|
|
1902
|
-
|
|
1903
|
-
|
|
1904
|
-
|
|
1905
|
-
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
|
|
1912
|
-
|
|
1913
|
-
|
|
1914
|
-
|
|
1915
|
-
|
|
1916
|
-
|
|
1917
|
-
|
|
1918
|
-
|
|
1919
|
-
|
|
1920
|
-
|
|
1921
|
-
|
|
1922
|
-
|
|
1923
|
-
|
|
1924
|
-
|
|
1925
|
-
|
|
1926
|
-
|
|
1927
|
-
|
|
1928
|
-
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
|
|
1932
|
-
|
|
1933
|
-
|
|
1934
|
-
|
|
1935
|
-
|
|
1936
|
-
|
|
1937
|
-
|
|
1938
|
-
|
|
1939
|
-
|
|
1940
|
-
|
|
1941
|
-
|
|
1942
|
-
|
|
1943
|
-
|
|
1944
|
-
|
|
1945
|
-
|
|
1946
|
-
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1951
|
-
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
|
|
1957
|
-
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
|
|
1961
|
-
|
|
1962
|
-
|
|
1963
|
-
|
|
1964
|
-
|
|
1965
|
-
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
|
-
|
|
1978
|
-
|
|
1979
|
-
|
|
1980
|
-
|
|
1981
|
-
|
|
1982
|
-
|
|
1983
|
-
|
|
1984
|
-
|
|
1985
|
-
markedMigrations: [],
|
|
1986
|
-
removedOrphans: [],
|
|
1987
|
-
error: "Skipped due to abort",
|
|
1988
|
-
durationMs: 0
|
|
1989
|
-
};
|
|
1990
|
-
}
|
|
1991
|
-
/**
|
|
1992
|
-
* Create an error sync result
|
|
1993
|
-
*/
|
|
1994
|
-
createErrorSyncResult(tenantId, error) {
|
|
1995
|
-
return {
|
|
1996
|
-
tenantId,
|
|
1997
|
-
schemaName: this.tenantConfig.isolation.schemaNameTemplate(tenantId),
|
|
1998
|
-
success: false,
|
|
1999
|
-
markedMigrations: [],
|
|
2000
|
-
removedOrphans: [],
|
|
2001
|
-
error: error.message,
|
|
2002
|
-
durationMs: 0
|
|
2003
|
-
};
|
|
2004
|
-
}
|
|
2005
|
-
/**
|
|
2006
|
-
* Aggregate sync results
|
|
2007
|
-
*/
|
|
2008
|
-
aggregateSyncResults(results) {
|
|
2009
|
-
return {
|
|
2010
|
-
total: results.length,
|
|
2011
|
-
succeeded: results.filter((r) => r.success).length,
|
|
2012
|
-
failed: results.filter((r) => !r.success).length,
|
|
2013
|
-
details: results
|
|
2014
|
-
};
|
|
2015
|
-
}
|
|
2016
|
-
};
|
|
2017
|
-
function createMigrator(tenantConfig, migratorConfig) {
|
|
2018
|
-
return new Migrator(tenantConfig, migratorConfig);
|
|
2019
|
-
}
|
|
2020
|
-
var CrossSchemaQueryBuilder = class {
|
|
2021
|
-
constructor(context) {
|
|
2022
|
-
this.context = context;
|
|
2023
|
-
}
|
|
2024
|
-
fromTable = null;
|
|
2025
|
-
joins = [];
|
|
2026
|
-
selectFields = {};
|
|
2027
|
-
whereCondition = null;
|
|
2028
|
-
orderByFields = [];
|
|
2029
|
-
limitValue = null;
|
|
2030
|
-
offsetValue = null;
|
|
2031
|
-
/**
|
|
2032
|
-
* Set the main table to query from
|
|
2033
|
-
*/
|
|
2034
|
-
from(source, table) {
|
|
2035
|
-
const schemaName = this.getSchemaName(source);
|
|
2036
|
-
this.fromTable = { table, source, schemaName };
|
|
2037
|
-
return this;
|
|
2038
|
-
}
|
|
2039
|
-
/**
|
|
2040
|
-
* Add an inner join
|
|
2041
|
-
*/
|
|
2042
|
-
innerJoin(source, table, condition) {
|
|
2043
|
-
return this.addJoin(source, table, condition, "inner");
|
|
2044
|
-
}
|
|
2045
|
-
/**
|
|
2046
|
-
* Add a left join
|
|
2047
|
-
*/
|
|
2048
|
-
leftJoin(source, table, condition) {
|
|
2049
|
-
return this.addJoin(source, table, condition, "left");
|
|
2050
|
-
}
|
|
2051
|
-
/**
|
|
2052
|
-
* Add a right join
|
|
2053
|
-
*/
|
|
2054
|
-
rightJoin(source, table, condition) {
|
|
2055
|
-
return this.addJoin(source, table, condition, "right");
|
|
2056
|
-
}
|
|
2057
|
-
/**
|
|
2058
|
-
* Add a full outer join
|
|
2059
|
-
*/
|
|
2060
|
-
fullJoin(source, table, condition) {
|
|
2061
|
-
return this.addJoin(source, table, condition, "full");
|
|
2062
|
-
}
|
|
2063
|
-
/**
|
|
2064
|
-
* Select specific fields
|
|
2065
|
-
*/
|
|
2066
|
-
select(fields) {
|
|
2067
|
-
this.selectFields = fields;
|
|
2068
|
-
return this;
|
|
2069
|
-
}
|
|
2070
|
-
/**
|
|
2071
|
-
* Add a where condition
|
|
2072
|
-
*/
|
|
2073
|
-
where(condition) {
|
|
2074
|
-
this.whereCondition = condition;
|
|
2075
|
-
return this;
|
|
2076
|
-
}
|
|
2077
|
-
/**
|
|
2078
|
-
* Add order by
|
|
2079
|
-
*/
|
|
2080
|
-
orderBy(...fields) {
|
|
2081
|
-
this.orderByFields = fields;
|
|
2082
|
-
return this;
|
|
2083
|
-
}
|
|
2084
|
-
/**
|
|
2085
|
-
* Set limit
|
|
2086
|
-
*/
|
|
2087
|
-
limit(value) {
|
|
2088
|
-
this.limitValue = value;
|
|
2089
|
-
return this;
|
|
2090
|
-
}
|
|
2091
|
-
/**
|
|
2092
|
-
* Set offset
|
|
2093
|
-
*/
|
|
2094
|
-
offset(value) {
|
|
2095
|
-
this.offsetValue = value;
|
|
2096
|
-
return this;
|
|
2097
|
-
}
|
|
2098
|
-
/**
|
|
2099
|
-
* Execute the query and return typed results
|
|
2100
|
-
*/
|
|
2101
|
-
async execute() {
|
|
2102
|
-
if (!this.fromTable) {
|
|
2103
|
-
throw new Error("[drizzle-multitenant] No table specified. Use .from() first.");
|
|
2104
|
-
}
|
|
2105
|
-
const sqlQuery = this.buildSql();
|
|
2106
|
-
const result = await this.context.tenantDb.execute(sqlQuery);
|
|
2107
|
-
return result.rows;
|
|
2108
|
-
}
|
|
2109
|
-
/**
|
|
2110
|
-
* Build the SQL query
|
|
2111
|
-
*/
|
|
2112
|
-
buildSql() {
|
|
2113
|
-
if (!this.fromTable) {
|
|
2114
|
-
throw new Error("[drizzle-multitenant] No table specified");
|
|
2115
|
-
}
|
|
2116
|
-
const parts = [];
|
|
2117
|
-
const selectParts = Object.entries(this.selectFields).map(([alias, column]) => {
|
|
2118
|
-
const columnName = column.name;
|
|
2119
|
-
return sql`${sql.raw(`"${columnName}"`)} as ${sql.raw(`"${alias}"`)}`;
|
|
2120
|
-
});
|
|
2121
|
-
if (selectParts.length === 0) {
|
|
2122
|
-
parts.push(sql`SELECT *`);
|
|
2123
|
-
} else {
|
|
2124
|
-
parts.push(sql`SELECT ${sql.join(selectParts, sql`, `)}`);
|
|
2125
|
-
}
|
|
2126
|
-
const fromTableRef = this.getFullTableName(this.fromTable.schemaName, this.fromTable.table);
|
|
2127
|
-
parts.push(sql` FROM ${sql.raw(fromTableRef)}`);
|
|
2128
|
-
for (const join2 of this.joins) {
|
|
2129
|
-
const joinTableRef = this.getFullTableName(join2.schemaName, join2.table);
|
|
2130
|
-
const joinType = this.getJoinKeyword(join2.type);
|
|
2131
|
-
parts.push(sql` ${sql.raw(joinType)} ${sql.raw(joinTableRef)} ON ${join2.condition}`);
|
|
2132
|
-
}
|
|
2133
|
-
if (this.whereCondition) {
|
|
2134
|
-
parts.push(sql` WHERE ${this.whereCondition}`);
|
|
2135
|
-
}
|
|
2136
|
-
if (this.orderByFields.length > 0) {
|
|
2137
|
-
parts.push(sql` ORDER BY ${sql.join(this.orderByFields, sql`, `)}`);
|
|
2138
|
-
}
|
|
2139
|
-
if (this.limitValue !== null) {
|
|
2140
|
-
parts.push(sql` LIMIT ${sql.raw(this.limitValue.toString())}`);
|
|
2141
|
-
}
|
|
2142
|
-
if (this.offsetValue !== null) {
|
|
2143
|
-
parts.push(sql` OFFSET ${sql.raw(this.offsetValue.toString())}`);
|
|
2144
|
-
}
|
|
2145
|
-
return sql.join(parts, sql``);
|
|
2146
|
-
}
|
|
2147
|
-
/**
|
|
2148
|
-
* Add a join to the query
|
|
2149
|
-
*/
|
|
2150
|
-
addJoin(source, table, condition, type) {
|
|
2151
|
-
const schemaName = this.getSchemaName(source);
|
|
2152
|
-
this.joins.push({ table, source, schemaName, condition, type });
|
|
2153
|
-
return this;
|
|
2154
|
-
}
|
|
2155
|
-
/**
|
|
2156
|
-
* Get schema name for a source
|
|
2157
|
-
*/
|
|
2158
|
-
getSchemaName(source) {
|
|
2159
|
-
if (source === "tenant") {
|
|
2160
|
-
return this.context.tenantSchema ?? "tenant";
|
|
2161
|
-
}
|
|
2162
|
-
return this.context.sharedSchema ?? "public";
|
|
2163
|
-
}
|
|
2164
|
-
/**
|
|
2165
|
-
* Get fully qualified table name
|
|
2166
|
-
*/
|
|
2167
|
-
getFullTableName(schemaName, table) {
|
|
2168
|
-
const tableName = getTableName(table);
|
|
2169
|
-
return `"${schemaName}"."${tableName}"`;
|
|
2170
|
-
}
|
|
2171
|
-
/**
|
|
2172
|
-
* Get SQL keyword for join type
|
|
2173
|
-
*/
|
|
2174
|
-
getJoinKeyword(type) {
|
|
2175
|
-
switch (type) {
|
|
2176
|
-
case "inner":
|
|
2177
|
-
return "INNER JOIN";
|
|
2178
|
-
case "left":
|
|
2179
|
-
return "LEFT JOIN";
|
|
2180
|
-
case "right":
|
|
2181
|
-
return "RIGHT JOIN";
|
|
2182
|
-
case "full":
|
|
2183
|
-
return "FULL OUTER JOIN";
|
|
2184
|
-
}
|
|
2185
|
-
}
|
|
2186
|
-
};
|
|
2187
|
-
function createCrossSchemaQuery(context) {
|
|
2188
|
-
return new CrossSchemaQueryBuilder(context);
|
|
2189
|
-
}
|
|
2190
|
-
async function withSharedLookup(config) {
|
|
2191
|
-
const {
|
|
2192
|
-
tenantDb,
|
|
2193
|
-
tenantTable,
|
|
2194
|
-
sharedTable,
|
|
2195
|
-
foreignKey,
|
|
2196
|
-
sharedKey = "id",
|
|
2197
|
-
sharedFields,
|
|
2198
|
-
where: whereCondition
|
|
2199
|
-
} = config;
|
|
2200
|
-
const tenantTableName = getTableName(tenantTable);
|
|
2201
|
-
const sharedTableName = getTableName(sharedTable);
|
|
2202
|
-
const sharedFieldList = sharedFields.map((field) => `s."${String(field)}"`).join(", ");
|
|
2203
|
-
const queryParts = [
|
|
2204
|
-
`SELECT t.*, ${sharedFieldList}`,
|
|
2205
|
-
`FROM "${tenantTableName}" t`,
|
|
2206
|
-
`LEFT JOIN "public"."${sharedTableName}" s ON t."${String(foreignKey)}" = s."${String(sharedKey)}"`
|
|
2207
|
-
];
|
|
2208
|
-
if (whereCondition) {
|
|
2209
|
-
queryParts.push("WHERE");
|
|
2210
|
-
}
|
|
2211
|
-
const sqlQuery = sql.raw(queryParts.join(" "));
|
|
2212
|
-
const result = await tenantDb.execute(sqlQuery);
|
|
2213
|
-
return result.rows;
|
|
2214
|
-
}
|
|
2215
|
-
async function crossSchemaRaw(db, options) {
|
|
2216
|
-
const { tenantSchema, sharedSchema, sql: rawSql } = options;
|
|
2217
|
-
const processedSql = rawSql.replace(/\$tenant\./g, `"${tenantSchema}".`).replace(/\$shared\./g, `"${sharedSchema}".`);
|
|
2218
|
-
const query = sql.raw(processedSql);
|
|
2219
|
-
const result = await db.execute(query);
|
|
2220
|
-
return result.rows;
|
|
2221
|
-
}
|
|
2222
|
-
function buildCrossSchemaSelect(fields, tenantSchema, _sharedSchema) {
|
|
2223
|
-
const columns = Object.entries(fields).map(([alias, column]) => {
|
|
2224
|
-
const columnName = column.name;
|
|
2225
|
-
return `"${columnName}" as "${alias}"`;
|
|
2226
|
-
});
|
|
2227
|
-
const getSchema = () => {
|
|
2228
|
-
return tenantSchema;
|
|
2229
|
-
};
|
|
2230
|
-
return { columns, getSchema };
|
|
2231
|
-
}
|
|
2232
|
-
function extractTablesFromSchema(schema) {
|
|
2233
|
-
const tables = /* @__PURE__ */ new Set();
|
|
2234
|
-
for (const value of Object.values(schema)) {
|
|
2235
|
-
if (value && typeof value === "object" && "_" in value) {
|
|
2236
|
-
const branded = value;
|
|
2237
|
-
if (branded._?.brand === "Table") {
|
|
2238
|
-
tables.add(value);
|
|
2239
|
-
}
|
|
2240
|
-
}
|
|
2241
|
-
}
|
|
2242
|
-
return tables;
|
|
2243
|
-
}
|
|
2244
|
-
function isSharedTable(table, sharedTables) {
|
|
2245
|
-
return sharedTables.has(table);
|
|
2246
|
-
}
|
|
2247
|
-
var WithSharedQueryBuilder = class {
|
|
2248
|
-
constructor(tenantDb, sharedTables, tenantSchemaName, sharedSchemaName = "public") {
|
|
2249
|
-
this.tenantDb = tenantDb;
|
|
2250
|
-
this.sharedTables = sharedTables;
|
|
2251
|
-
this.tenantSchemaName = tenantSchemaName;
|
|
2252
|
-
this.sharedSchemaName = sharedSchemaName;
|
|
2253
|
-
}
|
|
2254
|
-
fromTable = null;
|
|
2255
|
-
joins = [];
|
|
2256
|
-
selectFields = {};
|
|
2257
|
-
whereCondition = null;
|
|
2258
|
-
orderByFields = [];
|
|
2259
|
-
limitValue = null;
|
|
2260
|
-
offsetValue = null;
|
|
2261
|
-
/**
|
|
2262
|
-
* Set the main table to query from
|
|
2263
|
-
* Automatically detects if it's a tenant or shared table
|
|
2264
|
-
*/
|
|
2265
|
-
from(table) {
|
|
2266
|
-
const isShared = isSharedTable(table, this.sharedTables);
|
|
2267
|
-
this.fromTable = {
|
|
2268
|
-
table,
|
|
2269
|
-
isShared,
|
|
2270
|
-
schemaName: isShared ? this.sharedSchemaName : this.tenantSchemaName
|
|
2271
|
-
};
|
|
2272
|
-
return this;
|
|
2273
|
-
}
|
|
2274
|
-
/**
|
|
2275
|
-
* Add a left join with automatic schema detection
|
|
2276
|
-
*/
|
|
2277
|
-
leftJoin(table, condition) {
|
|
2278
|
-
return this.addJoin(table, condition, "left");
|
|
2279
|
-
}
|
|
2280
|
-
/**
|
|
2281
|
-
* Add an inner join with automatic schema detection
|
|
2282
|
-
*/
|
|
2283
|
-
innerJoin(table, condition) {
|
|
2284
|
-
return this.addJoin(table, condition, "inner");
|
|
2285
|
-
}
|
|
2286
|
-
/**
|
|
2287
|
-
* Add a right join with automatic schema detection
|
|
2288
|
-
*/
|
|
2289
|
-
rightJoin(table, condition) {
|
|
2290
|
-
return this.addJoin(table, condition, "right");
|
|
2291
|
-
}
|
|
2292
|
-
/**
|
|
2293
|
-
* Add a full outer join with automatic schema detection
|
|
2294
|
-
*/
|
|
2295
|
-
fullJoin(table, condition) {
|
|
2296
|
-
return this.addJoin(table, condition, "full");
|
|
2297
|
-
}
|
|
2298
|
-
/**
|
|
2299
|
-
* Select specific fields
|
|
2300
|
-
*/
|
|
2301
|
-
select(fields) {
|
|
2302
|
-
this.selectFields = fields;
|
|
2303
|
-
return this;
|
|
2304
|
-
}
|
|
2305
|
-
/**
|
|
2306
|
-
* Add a WHERE condition
|
|
2307
|
-
*/
|
|
2308
|
-
where(condition) {
|
|
2309
|
-
this.whereCondition = condition;
|
|
2310
|
-
return this;
|
|
2311
|
-
}
|
|
2312
|
-
/**
|
|
2313
|
-
* Add ORDER BY
|
|
2314
|
-
*/
|
|
2315
|
-
orderBy(...fields) {
|
|
2316
|
-
this.orderByFields = fields;
|
|
2317
|
-
return this;
|
|
2318
|
-
}
|
|
2319
|
-
/**
|
|
2320
|
-
* Set LIMIT
|
|
2321
|
-
*/
|
|
2322
|
-
limit(value) {
|
|
2323
|
-
this.limitValue = value;
|
|
2324
|
-
return this;
|
|
2325
|
-
}
|
|
2326
|
-
/**
|
|
2327
|
-
* Set OFFSET
|
|
2328
|
-
*/
|
|
2329
|
-
offset(value) {
|
|
2330
|
-
this.offsetValue = value;
|
|
2331
|
-
return this;
|
|
2332
|
-
}
|
|
2333
|
-
/**
|
|
2334
|
-
* Execute the query and return typed results
|
|
2335
|
-
*/
|
|
2336
|
-
async execute() {
|
|
2337
|
-
if (!this.fromTable) {
|
|
2338
|
-
throw new Error("[drizzle-multitenant] No table specified. Use .from() first.");
|
|
2339
|
-
}
|
|
2340
|
-
const sqlQuery = this.buildSql();
|
|
2341
|
-
const result = await this.tenantDb.execute(sqlQuery);
|
|
2342
|
-
return result.rows;
|
|
2343
|
-
}
|
|
2344
|
-
/**
|
|
2345
|
-
* Add a join to the query
|
|
2346
|
-
*/
|
|
2347
|
-
addJoin(table, condition, type) {
|
|
2348
|
-
const isShared = isSharedTable(table, this.sharedTables);
|
|
2349
|
-
this.joins.push({
|
|
2350
|
-
table,
|
|
2351
|
-
isShared,
|
|
2352
|
-
schemaName: isShared ? this.sharedSchemaName : this.tenantSchemaName,
|
|
2353
|
-
condition,
|
|
2354
|
-
type
|
|
2355
|
-
});
|
|
2356
|
-
return this;
|
|
2357
|
-
}
|
|
2358
|
-
/**
|
|
2359
|
-
* Build the SQL query
|
|
2360
|
-
*/
|
|
2361
|
-
buildSql() {
|
|
2362
|
-
if (!this.fromTable) {
|
|
2363
|
-
throw new Error("[drizzle-multitenant] No table specified");
|
|
2364
|
-
}
|
|
2365
|
-
const parts = [];
|
|
2366
|
-
const selectParts = Object.entries(this.selectFields).map(([alias, column]) => {
|
|
2367
|
-
const columnName = column.name;
|
|
2368
|
-
const tableName = this.getTableAliasForColumn(column);
|
|
2369
|
-
if (tableName) {
|
|
2370
|
-
return sql`${sql.raw(`"${tableName}"."${columnName}"`)} as ${sql.raw(`"${alias}"`)}`;
|
|
2371
|
-
}
|
|
2372
|
-
return sql`${sql.raw(`"${columnName}"`)} as ${sql.raw(`"${alias}"`)}`;
|
|
2373
|
-
});
|
|
2374
|
-
if (selectParts.length === 0) {
|
|
2375
|
-
parts.push(sql`SELECT *`);
|
|
2376
|
-
} else {
|
|
2377
|
-
parts.push(sql`SELECT ${sql.join(selectParts, sql`, `)}`);
|
|
2378
|
-
}
|
|
2379
|
-
const fromTableName = getTableName(this.fromTable.table);
|
|
2380
|
-
const fromTableRef = `"${this.fromTable.schemaName}"."${fromTableName}"`;
|
|
2381
|
-
parts.push(sql` FROM ${sql.raw(fromTableRef)} "${sql.raw(fromTableName)}"`);
|
|
2382
|
-
for (const join2 of this.joins) {
|
|
2383
|
-
const joinTableName = getTableName(join2.table);
|
|
2384
|
-
const joinTableRef = `"${join2.schemaName}"."${joinTableName}"`;
|
|
2385
|
-
const joinKeyword = this.getJoinKeyword(join2.type);
|
|
2386
|
-
parts.push(
|
|
2387
|
-
sql` ${sql.raw(joinKeyword)} ${sql.raw(joinTableRef)} "${sql.raw(joinTableName)}" ON ${join2.condition}`
|
|
2388
|
-
);
|
|
2389
|
-
}
|
|
2390
|
-
if (this.whereCondition) {
|
|
2391
|
-
parts.push(sql` WHERE ${this.whereCondition}`);
|
|
2392
|
-
}
|
|
2393
|
-
if (this.orderByFields.length > 0) {
|
|
2394
|
-
parts.push(sql` ORDER BY ${sql.join(this.orderByFields, sql`, `)}`);
|
|
2395
|
-
}
|
|
2396
|
-
if (this.limitValue !== null) {
|
|
2397
|
-
parts.push(sql` LIMIT ${sql.raw(this.limitValue.toString())}`);
|
|
2398
|
-
}
|
|
2399
|
-
if (this.offsetValue !== null) {
|
|
2400
|
-
parts.push(sql` OFFSET ${sql.raw(this.offsetValue.toString())}`);
|
|
2401
|
-
}
|
|
2402
|
-
return sql.join(parts, sql``);
|
|
2403
|
-
}
|
|
2404
|
-
/**
|
|
2405
|
-
* Get table alias for a column (used in SELECT)
|
|
2406
|
-
*/
|
|
2407
|
-
getTableAliasForColumn(column) {
|
|
2408
|
-
const columnTable = column.table;
|
|
2409
|
-
if (columnTable) {
|
|
2410
|
-
return getTableName(columnTable);
|
|
2411
|
-
}
|
|
2412
|
-
return null;
|
|
2413
|
-
}
|
|
2414
|
-
/**
|
|
2415
|
-
* Get SQL keyword for join type
|
|
2416
|
-
*/
|
|
2417
|
-
getJoinKeyword(type) {
|
|
2418
|
-
switch (type) {
|
|
2419
|
-
case "inner":
|
|
2420
|
-
return "INNER JOIN";
|
|
2421
|
-
case "left":
|
|
2422
|
-
return "LEFT JOIN";
|
|
2423
|
-
case "right":
|
|
2424
|
-
return "RIGHT JOIN";
|
|
2425
|
-
case "full":
|
|
2426
|
-
return "FULL OUTER JOIN";
|
|
2427
|
-
}
|
|
2428
|
-
}
|
|
2429
|
-
};
|
|
2430
|
-
function withShared(tenantDb, _sharedDb, schemas, options) {
|
|
2431
|
-
const sharedTables = extractTablesFromSchema(schemas.shared);
|
|
2432
|
-
return new WithSharedQueryBuilder(
|
|
2433
|
-
tenantDb,
|
|
2434
|
-
sharedTables,
|
|
2435
|
-
options?.tenantSchema ?? "tenant",
|
|
2436
|
-
options?.sharedSchema ?? "public"
|
|
2437
|
-
);
|
|
2438
|
-
}
|
|
2439
|
-
|
|
2440
|
-
export { CrossSchemaQueryBuilder, DEFAULT_CONFIG, Migrator, WithSharedQueryBuilder, buildCrossSchemaSelect, calculateDelay, createCrossSchemaQuery, createMigrator, createRetrier, createTenantContext, createTenantManager, crossSchemaRaw, defineConfig, isRetryableError, withRetry, withShared, withSharedLookup };
|
|
2441
|
-
//# sourceMappingURL=index.js.map
|
|
2442
|
-
//# sourceMappingURL=index.js.map
|
|
12
|
+
`);}async migrationsTableExists(e,t){let n=await e.query(`SELECT 1 FROM information_schema.tables
|
|
13
|
+
WHERE table_schema = $1 AND table_name = $2`,[t,this.migrationsTable]);return n.rowCount!==null&&n.rowCount>0}getMigrationsTableName(){return this.migrationsTable}};async function ee(c,e,t){return (await c.query(`SELECT
|
|
14
|
+
column_name,
|
|
15
|
+
data_type,
|
|
16
|
+
udt_name,
|
|
17
|
+
is_nullable,
|
|
18
|
+
column_default,
|
|
19
|
+
character_maximum_length,
|
|
20
|
+
numeric_precision,
|
|
21
|
+
numeric_scale,
|
|
22
|
+
ordinal_position
|
|
23
|
+
FROM information_schema.columns
|
|
24
|
+
WHERE table_schema = $1 AND table_name = $2
|
|
25
|
+
ORDER BY ordinal_position`,[e,t])).rows.map(a=>({name:a.column_name,dataType:a.data_type,udtName:a.udt_name,isNullable:a.is_nullable==="YES",columnDefault:a.column_default,characterMaximumLength:a.character_maximum_length,numericPrecision:a.numeric_precision,numericScale:a.numeric_scale,ordinalPosition:a.ordinal_position}))}function Z(c){return c===null?null:c.replace(/^'(.+)'::.+$/,"$1").replace(/^(.+)::.+$/,"$1").trim()}function te(c,e){let t=[],n=new Map(c.map(r=>[r.name,r])),a=new Map(e.map(r=>[r.name,r]));for(let r of c){let s=a.get(r.name);if(!s){t.push({column:r.name,type:"missing",expected:r.dataType,description:`Column "${r.name}" (${r.dataType}) is missing`});continue}r.udtName!==s.udtName&&t.push({column:r.name,type:"type_mismatch",expected:r.udtName,actual:s.udtName,description:`Column "${r.name}" type mismatch: expected "${r.udtName}", got "${s.udtName}"`}),r.isNullable!==s.isNullable&&t.push({column:r.name,type:"nullable_mismatch",expected:r.isNullable,actual:s.isNullable,description:`Column "${r.name}" nullable mismatch: expected ${r.isNullable?"NULL":"NOT NULL"}, got ${s.isNullable?"NULL":"NOT NULL"}`});let i=Z(r.columnDefault),o=Z(s.columnDefault);i!==o&&t.push({column:r.name,type:"default_mismatch",expected:r.columnDefault,actual:s.columnDefault,description:`Column "${r.name}" default mismatch: expected "${r.columnDefault??"none"}", got "${s.columnDefault??"none"}"`});}for(let r of e)n.has(r.name)||t.push({column:r.name,type:"extra",actual:r.dataType,description:`Extra column "${r.name}" (${r.dataType}) not in reference`});return t}async function ne(c,e,t){let n=await c.query(`SELECT indexname, indexdef
|
|
26
|
+
FROM pg_indexes
|
|
27
|
+
WHERE schemaname = $1 AND tablename = $2
|
|
28
|
+
ORDER BY indexname`,[e,t]),a=await c.query(`SELECT
|
|
29
|
+
i.relname as indexname,
|
|
30
|
+
a.attname as column_name,
|
|
31
|
+
ix.indisunique as is_unique,
|
|
32
|
+
ix.indisprimary as is_primary
|
|
33
|
+
FROM pg_class t
|
|
34
|
+
JOIN pg_index ix ON t.oid = ix.indrelid
|
|
35
|
+
JOIN pg_class i ON i.oid = ix.indexrelid
|
|
36
|
+
JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = ANY(ix.indkey)
|
|
37
|
+
JOIN pg_namespace n ON n.oid = t.relnamespace
|
|
38
|
+
WHERE n.nspname = $1 AND t.relname = $2
|
|
39
|
+
ORDER BY i.relname, a.attnum`,[e,t]),r=new Map;for(let s of a.rows){let i=r.get(s.indexname);i?i.columns.push(s.column_name):r.set(s.indexname,{columns:[s.column_name],isUnique:s.is_unique,isPrimary:s.is_primary});}return n.rows.map(s=>{let i=r.get(s.indexname);return {name:s.indexname,columns:i?.columns??[],isUnique:i?.isUnique??false,isPrimary:i?.isPrimary??false,definition:s.indexdef}})}function re(c,e){let t=[],n=new Map(c.map(r=>[r.name,r])),a=new Map(e.map(r=>[r.name,r]));for(let r of c){let s=a.get(r.name);if(!s){t.push({index:r.name,type:"missing",expected:r.definition,description:`Index "${r.name}" is missing`});continue}let i=r.columns.sort().join(","),o=s.columns.sort().join(",");(i!==o||r.isUnique!==s.isUnique)&&t.push({index:r.name,type:"definition_mismatch",expected:r.definition,actual:s.definition,description:`Index "${r.name}" definition differs`});}for(let r of e)n.has(r.name)||t.push({index:r.name,type:"extra",actual:r.definition,description:`Extra index "${r.name}" not in reference`});return t}async function ae(c,e,t){let n=await c.query(`SELECT
|
|
40
|
+
tc.constraint_name,
|
|
41
|
+
tc.constraint_type,
|
|
42
|
+
kcu.column_name,
|
|
43
|
+
ccu.table_schema as foreign_table_schema,
|
|
44
|
+
ccu.table_name as foreign_table_name,
|
|
45
|
+
ccu.column_name as foreign_column_name,
|
|
46
|
+
cc.check_clause
|
|
47
|
+
FROM information_schema.table_constraints tc
|
|
48
|
+
LEFT JOIN information_schema.key_column_usage kcu
|
|
49
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
50
|
+
AND tc.table_schema = kcu.table_schema
|
|
51
|
+
LEFT JOIN information_schema.constraint_column_usage ccu
|
|
52
|
+
ON tc.constraint_name = ccu.constraint_name
|
|
53
|
+
AND tc.constraint_type = 'FOREIGN KEY'
|
|
54
|
+
LEFT JOIN information_schema.check_constraints cc
|
|
55
|
+
ON tc.constraint_name = cc.constraint_name
|
|
56
|
+
AND tc.constraint_type = 'CHECK'
|
|
57
|
+
WHERE tc.table_schema = $1 AND tc.table_name = $2
|
|
58
|
+
ORDER BY tc.constraint_name, kcu.ordinal_position`,[e,t]),a=new Map;for(let r of n.rows){let s=a.get(r.constraint_name);if(s)r.column_name&&!s.columns.includes(r.column_name)&&s.columns.push(r.column_name),r.foreign_column_name&&s.foreignColumns&&!s.foreignColumns.includes(r.foreign_column_name)&&s.foreignColumns.push(r.foreign_column_name);else {let i={name:r.constraint_name,type:r.constraint_type,columns:r.column_name?[r.column_name]:[]};r.foreign_table_name&&(i.foreignTable=r.foreign_table_name),r.foreign_column_name&&(i.foreignColumns=[r.foreign_column_name]),r.check_clause&&(i.checkExpression=r.check_clause),a.set(r.constraint_name,i);}}return Array.from(a.values())}function se(c,e){let t=[],n=new Map(c.map(r=>[r.name,r])),a=new Map(e.map(r=>[r.name,r]));for(let r of c){let s=a.get(r.name);if(!s){t.push({constraint:r.name,type:"missing",expected:`${r.type} on (${r.columns.join(", ")})`,description:`Constraint "${r.name}" (${r.type}) is missing`});continue}let i=r.columns.sort().join(","),o=s.columns.sort().join(",");(r.type!==s.type||i!==o)&&t.push({constraint:r.name,type:"definition_mismatch",expected:`${r.type} on (${r.columns.join(", ")})`,actual:`${s.type} on (${s.columns.join(", ")})`,description:`Constraint "${r.name}" definition differs`});}for(let r of e)n.has(r.name)||t.push({constraint:r.name,type:"extra",actual:`${r.type} on (${r.columns.join(", ")})`,description:`Extra constraint "${r.name}" (${r.type}) not in reference`});return t}var Ee="__drizzle_migrations",$=class{constructor(e,t,n){this.tenantConfig=e;this.schemaManager=t;this.driftConfig=n;this.migrationsTable=n.migrationsTable??Ee;}migrationsTable;getSchemaName(e){return this.tenantConfig.isolation.schemaNameTemplate(e)}async createPool(e){return this.schemaManager.createPool(e)}async detectDrift(e={}){let t=Date.now(),{concurrency:n=10,includeIndexes:a=true,includeConstraints:r=true,excludeTables:s=[this.migrationsTable],onProgress:i}=e,o=e.tenantIds??await this.driftConfig.tenantDiscovery();if(o.length===0)return {referenceTenant:"",total:0,noDrift:0,withDrift:0,error:0,details:[],timestamp:new Date().toISOString(),durationMs:Date.now()-t};let m=e.referenceTenant??o[0];i?.(m,"starting"),i?.(m,"introspecting");let l=await this.introspectSchema(m,{includeIndexes:a,includeConstraints:r,excludeTables:s});if(!l)return {referenceTenant:m,total:o.length,noDrift:0,withDrift:0,error:o.length,details:o.map(d=>({tenantId:d,schemaName:this.getSchemaName(d),hasDrift:false,tables:[],issueCount:0,error:d===m?"Failed to introspect reference tenant":"Reference tenant introspection failed"})),timestamp:new Date().toISOString(),durationMs:Date.now()-t};i?.(m,"completed");let u=o.filter(d=>d!==m),h=[];h.push({tenantId:m,schemaName:l.schemaName,hasDrift:false,tables:[],issueCount:0});for(let d=0;d<u.length;d+=n){let g=u.slice(d,d+n),w=await Promise.all(g.map(async y=>{try{i?.(y,"starting"),i?.(y,"introspecting");let b=await this.introspectSchema(y,{includeIndexes:a,includeConstraints:r,excludeTables:s});if(!b)return i?.(y,"failed"),{tenantId:y,schemaName:this.getSchemaName(y),hasDrift:!1,tables:[],issueCount:0,error:"Failed to introspect schema"};i?.(y,"comparing");let S=this.compareSchemas(l,b,{includeIndexes:a,includeConstraints:r});return i?.(y,"completed"),S}catch(b){return i?.(y,"failed"),{tenantId:y,schemaName:this.getSchemaName(y),hasDrift:false,tables:[],issueCount:0,error:b.message}}}));h.push(...w);}return {referenceTenant:m,total:h.length,noDrift:h.filter(d=>!d.hasDrift&&!d.error).length,withDrift:h.filter(d=>d.hasDrift&&!d.error).length,error:h.filter(d=>!!d.error).length,details:h,timestamp:new Date().toISOString(),durationMs:Date.now()-t}}async compareTenant(e,t,n={}){let{includeIndexes:a=true,includeConstraints:r=true,excludeTables:s=[this.migrationsTable]}=n,i=await this.introspectSchema(t,{includeIndexes:a,includeConstraints:r,excludeTables:s});if(!i)return {tenantId:e,schemaName:this.getSchemaName(e),hasDrift:false,tables:[],issueCount:0,error:"Failed to introspect reference tenant"};let o=await this.introspectSchema(e,{includeIndexes:a,includeConstraints:r,excludeTables:s});return o?this.compareSchemas(i,o,{includeIndexes:a,includeConstraints:r}):{tenantId:e,schemaName:this.getSchemaName(e),hasDrift:false,tables:[],issueCount:0,error:"Failed to introspect tenant schema"}}async introspectSchema(e,t={}){let n=this.getSchemaName(e),a=await this.createPool(n);try{let r=await this.introspectTables(a,n,t);return {tenantId:e,schemaName:n,tables:r,introspectedAt:new Date}}catch{return null}finally{await a.end();}}compareSchemas(e,t,n={}){let{includeIndexes:a=true,includeConstraints:r=true}=n,s=[],i=0,o=new Map(e.tables.map(l=>[l.name,l])),m=new Map(t.tables.map(l=>[l.name,l]));for(let l of e.tables){let u=m.get(l.name);if(!u){s.push({table:l.name,status:"missing",columns:l.columns.map(y=>({column:y.name,type:"missing",expected:y.dataType,description:`Column "${y.name}" (${y.dataType}) is missing`})),indexes:[],constraints:[]}),i+=l.columns.length;continue}let h=te(l.columns,u.columns),d=a?re(l.indexes,u.indexes):[],g=r?se(l.constraints,u.constraints):[],w=h.length+d.length+g.length;i+=w,w>0&&s.push({table:l.name,status:"drifted",columns:h,indexes:d,constraints:g});}for(let l of t.tables)o.has(l.name)||(s.push({table:l.name,status:"extra",columns:l.columns.map(u=>({column:u.name,type:"extra",actual:u.dataType,description:`Extra column "${u.name}" (${u.dataType}) not in reference`})),indexes:[],constraints:[]}),i+=l.columns.length);return {tenantId:t.tenantId,schemaName:t.schemaName,hasDrift:i>0,tables:s,issueCount:i}}async introspectTables(e,t,n){let{includeIndexes:a=true,includeConstraints:r=true,excludeTables:s=[]}=n,i=await e.query(`SELECT table_name
|
|
59
|
+
FROM information_schema.tables
|
|
60
|
+
WHERE table_schema = $1
|
|
61
|
+
AND table_type = 'BASE TABLE'
|
|
62
|
+
ORDER BY table_name`,[t]),o=[];for(let m of i.rows){if(s.includes(m.table_name))continue;let l=await ee(e,t,m.table_name),u=a?await ne(e,t,m.table_name):[],h=r?await ae(e,t,m.table_name):[];o.push({name:m.table_name,columns:l,indexes:u,constraints:h});}return o}};var A=class{constructor(e,t){this.config=e;this.deps=t;}async seedTenant(e,t){let n=Date.now(),a=this.deps.schemaNameTemplate(e),r=await this.deps.createPool(a);try{let s=drizzle(r,{schema:this.deps.tenantSchema});return await t(s,e),{tenantId:e,schemaName:a,success:!0,durationMs:Date.now()-n}}catch(s){return {tenantId:e,schemaName:a,success:false,error:s.message,durationMs:Date.now()-n}}finally{await r.end();}}async seedAll(e,t={}){let{concurrency:n=10,onProgress:a,onError:r}=t,s=await this.config.tenantDiscovery(),i=[],o=false;for(let m=0;m<s.length&&!o;m+=n){let l=s.slice(m,m+n),u=await Promise.all(l.map(async h=>{if(o)return this.createSkippedResult(h);try{a?.(h,"starting"),a?.(h,"seeding");let d=await this.seedTenant(h,e);return a?.(h,d.success?"completed":"failed"),d}catch(d){return a?.(h,"failed"),r?.(h,d)==="abort"&&(o=true),this.createErrorResult(h,d)}}));i.push(...u);}if(o){let m=s.slice(i.length);for(let l of m)i.push(this.createSkippedResult(l));}return this.aggregateResults(i)}async seedTenants(e,t,n={}){let{concurrency:a=10,onProgress:r,onError:s}=n,i=[];for(let o=0;o<e.length;o+=a){let m=e.slice(o,o+a),l=await Promise.all(m.map(async u=>{try{r?.(u,"starting"),r?.(u,"seeding");let h=await this.seedTenant(u,t);return r?.(u,h.success?"completed":"failed"),h}catch(h){return r?.(u,"failed"),s?.(u,h),this.createErrorResult(u,h)}}));i.push(...l);}return this.aggregateResults(i)}createSkippedResult(e){return {tenantId:e,schemaName:this.deps.schemaNameTemplate(e),success:false,error:"Skipped due to abort",durationMs:0}}createErrorResult(e,t){return {tenantId:e,schemaName:this.deps.schemaNameTemplate(e),success:false,error:t.message,durationMs:0}}aggregateResults(e){return {total:e.length,succeeded:e.filter(t=>t.success).length,failed:e.filter(t=>!t.success&&t.error!=="Skipped due to abort").length,skipped:e.filter(t=>t.error==="Skipped due to abort").length,details:e}}};var Ne="public",v=class{constructor(e,t){this.config=e;this.deps=t;this.schemaName=e.schemaName??Ne;}schemaName;async seed(e){let t=Date.now(),n=await this.deps.createPool();try{this.config.hooks?.onStart?.();let a=drizzle(n,{schema:this.deps.sharedSchema});return await e(a),this.config.hooks?.onComplete?.(),{schemaName:this.schemaName,success:!0,durationMs:Date.now()-t}}catch(a){return this.config.hooks?.onError?.(a),{schemaName:this.schemaName,success:false,error:a.message,durationMs:Date.now()-t}}finally{await n.end();}}};var F=class{constructor(e,t){this.config=e;this.deps=t;}async getSyncStatus(){let e=await this.config.tenantDiscovery(),t=await this.deps.loadMigrations(),n=[];for(let a of e)n.push(await this.getTenantSyncStatus(a,t));return {total:n.length,inSync:n.filter(a=>a.inSync&&!a.error).length,outOfSync:n.filter(a=>!a.inSync&&!a.error).length,error:n.filter(a=>!!a.error).length,details:n}}async getTenantSyncStatus(e,t){let n=this.deps.schemaNameTemplate(e),a=await this.deps.createPool(n);try{let r=t??await this.deps.loadMigrations(),s=new Set(r.map(g=>g.name)),i=new Set(r.map(g=>g.hash));if(!await this.deps.migrationsTableExists(a,n))return {tenantId:e,schemaName:n,missing:r.map(g=>g.name),orphans:[],inSync:r.length===0,format:null};let m=await this.deps.getOrDetectFormat(a,n),l=await this.getAppliedMigrations(a,n,m),u=new Set(l.map(g=>g.identifier)),h=r.filter(g=>!this.isMigrationApplied(g,u,m)).map(g=>g.name),d=l.filter(g=>(m.columns.identifier==="name"||!i.has(g.identifier))&&!s.has(g.identifier)).map(g=>g.identifier);return {tenantId:e,schemaName:n,missing:h,orphans:d,inSync:h.length===0&&d.length===0,format:m.format}}catch(r){return {tenantId:e,schemaName:n,missing:[],orphans:[],inSync:false,format:null,error:r.message}}finally{await a.end();}}async markMissing(e){let t=Date.now(),n=this.deps.schemaNameTemplate(e),a=[],r=await this.deps.createPool(n);try{let s=await this.getTenantSyncStatus(e);if(s.error)return {tenantId:e,schemaName:n,success:!1,markedMigrations:[],removedOrphans:[],error:s.error,durationMs:Date.now()-t};if(s.missing.length===0)return {tenantId:e,schemaName:n,success:!0,markedMigrations:[],removedOrphans:[],durationMs:Date.now()-t};let i=await this.deps.getOrDetectFormat(r,n);await this.deps.ensureMigrationsTable(r,n,i);let o=await this.deps.loadMigrations(),m=new Set(s.missing);for(let l of o)m.has(l.name)&&(await this.recordMigration(r,n,l,i),a.push(l.name));return {tenantId:e,schemaName:n,success:!0,markedMigrations:a,removedOrphans:[],durationMs:Date.now()-t}}catch(s){return {tenantId:e,schemaName:n,success:false,markedMigrations:a,removedOrphans:[],error:s.message,durationMs:Date.now()-t}}finally{await r.end();}}async markAllMissing(e={}){let{concurrency:t=10,onProgress:n,onError:a}=e,r=await this.config.tenantDiscovery(),s=[],i=false;for(let o=0;o<r.length&&!i;o+=t){let m=r.slice(o,o+t),l=await Promise.all(m.map(async u=>{if(i)return this.createSkippedSyncResult(u);try{n?.(u,"starting");let h=await this.markMissing(u);return n?.(u,h.success?"completed":"failed"),h}catch(h){return n?.(u,"failed"),a?.(u,h)==="abort"&&(i=true),this.createErrorSyncResult(u,h)}}));s.push(...l);}return this.aggregateSyncResults(s)}async cleanOrphans(e){let t=Date.now(),n=this.deps.schemaNameTemplate(e),a=[],r=await this.deps.createPool(n);try{let s=await this.getTenantSyncStatus(e);if(s.error)return {tenantId:e,schemaName:n,success:!1,markedMigrations:[],removedOrphans:[],error:s.error,durationMs:Date.now()-t};if(s.orphans.length===0)return {tenantId:e,schemaName:n,success:!0,markedMigrations:[],removedOrphans:[],durationMs:Date.now()-t};let i=await this.deps.getOrDetectFormat(r,n),o=i.columns.identifier;for(let m of s.orphans)await r.query(`DELETE FROM "${n}"."${i.tableName}" WHERE "${o}" = $1`,[m]),a.push(m);return {tenantId:e,schemaName:n,success:!0,markedMigrations:[],removedOrphans:a,durationMs:Date.now()-t}}catch(s){return {tenantId:e,schemaName:n,success:false,markedMigrations:[],removedOrphans:a,error:s.message,durationMs:Date.now()-t}}finally{await r.end();}}async cleanAllOrphans(e={}){let{concurrency:t=10,onProgress:n,onError:a}=e,r=await this.config.tenantDiscovery(),s=[],i=false;for(let o=0;o<r.length&&!i;o+=t){let m=r.slice(o,o+t),l=await Promise.all(m.map(async u=>{if(i)return this.createSkippedSyncResult(u);try{n?.(u,"starting");let h=await this.cleanOrphans(u);return n?.(u,h.success?"completed":"failed"),h}catch(h){return n?.(u,"failed"),a?.(u,h)==="abort"&&(i=true),this.createErrorSyncResult(u,h)}}));s.push(...l);}return this.aggregateSyncResults(s)}async getAppliedMigrations(e,t,n){let a=n.columns.identifier,r=n.columns.timestamp;return (await e.query(`SELECT id, "${a}" as identifier, "${r}" as applied_at
|
|
63
|
+
FROM "${t}"."${n.tableName}"
|
|
64
|
+
ORDER BY id`)).rows.map(i=>{let o=n.columns.timestampType==="bigint"?new Date(Number(i.applied_at)):new Date(i.applied_at);return {identifier:i.identifier,appliedAt:o}})}isMigrationApplied(e,t,n){return n.columns.identifier==="name"?t.has(e.name):t.has(e.hash)||t.has(e.name)}async recordMigration(e,t,n,a){let{identifier:r,timestamp:s,timestampType:i}=a.columns,o=r==="name"?n.name:n.hash,m=i==="bigint"?Date.now():new Date;await e.query(`INSERT INTO "${t}"."${a.tableName}" ("${r}", "${s}") VALUES ($1, $2)`,[o,m]);}createSkippedSyncResult(e){return {tenantId:e,schemaName:this.deps.schemaNameTemplate(e),success:false,markedMigrations:[],removedOrphans:[],error:"Skipped due to abort",durationMs:0}}createErrorSyncResult(e,t){return {tenantId:e,schemaName:this.deps.schemaNameTemplate(e),success:false,markedMigrations:[],removedOrphans:[],error:t.message,durationMs:0}}aggregateSyncResults(e){return {total:e.length,succeeded:e.filter(t=>t.success).length,failed:e.filter(t=>!t.success).length,details:e}}};var L=class{constructor(e,t){this.config=e;this.deps=t;}async migrateTenant(e,t,n={}){let a=Date.now(),r=this.deps.schemaNameTemplate(e),s=[],i=await this.deps.createPool(r);try{await this.config.hooks?.beforeTenant?.(e);let o=await this.deps.getOrDetectFormat(i,r);await this.deps.ensureMigrationsTable(i,r,o);let m=t??await this.deps.loadMigrations(),l=await this.getAppliedMigrations(i,r,o),u=new Set(l.map(g=>g.identifier)),h=m.filter(g=>!this.isMigrationApplied(g,u,o));if(n.dryRun)return {tenantId:e,schemaName:r,success:!0,appliedMigrations:h.map(g=>g.name),durationMs:Date.now()-a,format:o.format};for(let g of h){let w=Date.now();n.onProgress?.(e,"migrating",g.name),await this.config.hooks?.beforeMigration?.(e,g.name),await this.applyMigration(i,r,g,o),await this.config.hooks?.afterMigration?.(e,g.name,Date.now()-w),s.push(g.name);}let d={tenantId:e,schemaName:r,success:!0,appliedMigrations:s,durationMs:Date.now()-a,format:o.format};return await this.config.hooks?.afterTenant?.(e,d),d}catch(o){let m={tenantId:e,schemaName:r,success:false,appliedMigrations:s,error:o.message,durationMs:Date.now()-a};return await this.config.hooks?.afterTenant?.(e,m),m}finally{await i.end();}}async markAsApplied(e,t={}){let n=Date.now(),a=this.deps.schemaNameTemplate(e),r=[],s=await this.deps.createPool(a);try{await this.config.hooks?.beforeTenant?.(e);let i=await this.deps.getOrDetectFormat(s,a);await this.deps.ensureMigrationsTable(s,a,i);let o=await this.deps.loadMigrations(),m=await this.getAppliedMigrations(s,a,i),l=new Set(m.map(d=>d.identifier)),u=o.filter(d=>!this.isMigrationApplied(d,l,i));for(let d of u){let g=Date.now();t.onProgress?.(e,"migrating",d.name),await this.config.hooks?.beforeMigration?.(e,d.name),await this.recordMigration(s,a,d,i),await this.config.hooks?.afterMigration?.(e,d.name,Date.now()-g),r.push(d.name);}let h={tenantId:e,schemaName:a,success:!0,appliedMigrations:r,durationMs:Date.now()-n,format:i.format};return await this.config.hooks?.afterTenant?.(e,h),h}catch(i){let o={tenantId:e,schemaName:a,success:false,appliedMigrations:r,error:i.message,durationMs:Date.now()-n};return await this.config.hooks?.afterTenant?.(e,o),o}finally{await s.end();}}async getTenantStatus(e,t){let n=this.deps.schemaNameTemplate(e),a=await this.deps.createPool(n);try{let r=t??await this.deps.loadMigrations();if(!await this.deps.migrationsTableExists(a,n))return {tenantId:e,schemaName:n,appliedCount:0,pendingCount:r.length,pendingMigrations:r.map(u=>u.name),status:r.length>0?"behind":"ok",format:null};let i=await this.deps.getOrDetectFormat(a,n),o=await this.getAppliedMigrations(a,n,i),m=new Set(o.map(u=>u.identifier)),l=r.filter(u=>!this.isMigrationApplied(u,m,i));return {tenantId:e,schemaName:n,appliedCount:o.length,pendingCount:l.length,pendingMigrations:l.map(u=>u.name),status:l.length>0?"behind":"ok",format:i.format}}catch(r){return {tenantId:e,schemaName:n,appliedCount:0,pendingCount:0,pendingMigrations:[],status:"error",error:r.message,format:null}}finally{await a.end();}}async executeMigration(e,t,n,a,r){r?.markOnly?(r.onProgress?.("recording"),await this.recordMigration(e,t,n,a)):(r?.onProgress?.("applying"),await this.applyMigration(e,t,n,a));}async executeMigrations(e,t,n,a,r){let s=[];for(let i of n)await this.executeMigration(e,t,i,a,r),s.push(i.name);return s}async recordMigration(e,t,n,a){let{identifier:r,timestamp:s,timestampType:i}=a.columns,o=r==="name"?n.name:n.hash,m=i==="bigint"?Date.now():new Date;await e.query(`INSERT INTO "${t}"."${a.tableName}" ("${r}", "${s}") VALUES ($1, $2)`,[o,m]);}async getAppliedMigrations(e,t,n){let a=n.columns.identifier,r=n.columns.timestamp;return (await e.query(`SELECT id, "${a}" as identifier, "${r}" as applied_at
|
|
65
|
+
FROM "${t}"."${n.tableName}"
|
|
66
|
+
ORDER BY id`)).rows.map(i=>{let o=n.columns.timestampType==="bigint"?new Date(Number(i.applied_at)):new Date(i.applied_at);return {identifier:i.identifier,...n.columns.identifier==="name"?{name:i.identifier}:{hash:i.identifier},appliedAt:o}})}async getPendingMigrations(e,t,n,a){let r=await this.getAppliedMigrations(e,t,a),s=new Set(r.map(i=>i.identifier));return n.filter(i=>!this.isMigrationApplied(i,s,a))}isMigrationApplied(e,t,n){return n.columns.identifier==="name"?t.has(e.name):t.has(e.hash)||t.has(e.name)}async applyMigration(e,t,n,a){let r=await e.connect();try{await r.query("BEGIN"),await r.query(n.sql);let{identifier:s,timestamp:i,timestampType:o}=a.columns,m=s==="name"?n.name:n.hash,l=o==="bigint"?Date.now():new Date;await r.query(`INSERT INTO "${t}"."${a.tableName}" ("${s}", "${i}") VALUES ($1, $2)`,[m,l]),await r.query("COMMIT");}catch(s){throw await r.query("ROLLBACK"),s}finally{r.release();}}};var I=class{constructor(e,t,n){this.config=e;this.executor=t;this.loadMigrations=n;}async migrateAll(e={}){let{concurrency:t=10,onProgress:n,onError:a,dryRun:r=false}=e,s=await this.config.tenantDiscovery(),i=await this.loadMigrations(),o=[],m=false;for(let l=0;l<s.length&&!m;l+=t){let u=s.slice(l,l+t),h=await Promise.all(u.map(async d=>{if(m)return this.createSkippedResult(d);try{n?.(d,"starting");let g=await this.executor.migrateTenant(d,i,{dryRun:r,onProgress:n});return n?.(d,g.success?"completed":"failed"),g}catch(g){return n?.(d,"failed"),a?.(d,g)==="abort"&&(m=true),this.createErrorResult(d,g)}}));o.push(...h);}if(m){let l=s.slice(o.length);for(let u of l)o.push(this.createSkippedResult(u));}return this.aggregateResults(o)}async migrateTenants(e,t={}){let n=await this.loadMigrations(),a=[],{concurrency:r=10,onProgress:s,onError:i,dryRun:o=false}=t;for(let m=0;m<e.length;m+=r){let l=e.slice(m,m+r),u=await Promise.all(l.map(async h=>{try{s?.(h,"starting");let d=await this.executor.migrateTenant(h,n,{dryRun:o,onProgress:s});return s?.(h,d.success?"completed":"failed"),d}catch(d){return s?.(h,"failed"),i?.(h,d),this.createErrorResult(h,d)}}));a.push(...u);}return this.aggregateResults(a)}async markAllAsApplied(e={}){let{concurrency:t=10,onProgress:n,onError:a}=e,r=await this.config.tenantDiscovery(),s=[],i=false;for(let o=0;o<r.length&&!i;o+=t){let m=r.slice(o,o+t),l=await Promise.all(m.map(async u=>{if(i)return this.createSkippedResult(u);try{n?.(u,"starting");let h=await this.executor.markAsApplied(u,{onProgress:n});return n?.(u,h.success?"completed":"failed"),h}catch(h){return n?.(u,"failed"),a?.(u,h)==="abort"&&(i=true),this.createErrorResult(u,h)}}));s.push(...l);}if(i){let o=r.slice(s.length);for(let m of o)s.push(this.createSkippedResult(m));}return this.aggregateResults(s)}async getStatus(){let e=await this.config.tenantDiscovery(),t=await this.loadMigrations(),n=[];for(let a of e)n.push(await this.executor.getTenantStatus(a,t));return n}createSkippedResult(e){return {tenantId:e,schemaName:"",success:false,appliedMigrations:[],error:"Skipped due to abort",durationMs:0}}createErrorResult(e,t){return {tenantId:e,schemaName:"",success:false,appliedMigrations:[],error:t.message,durationMs:0}}aggregateResults(e){return {total:e.length,succeeded:e.filter(t=>t.success).length,failed:e.filter(t=>!t.success&&t.error!=="Skipped due to abort").length,skipped:e.filter(t=>t.error==="Skipped due to abort").length,details:e}}};async function ie(c,e,t=[]){let n=t.length>0?t.map((r,s)=>`$${s+2}`).join(", "):"''::text";return (await c.query(`SELECT table_name
|
|
67
|
+
FROM information_schema.tables
|
|
68
|
+
WHERE table_schema = $1
|
|
69
|
+
AND table_type = 'BASE TABLE'
|
|
70
|
+
AND table_name NOT IN (${n})
|
|
71
|
+
ORDER BY table_name`,[e,...t])).rows.map(r=>r.table_name)}async function ke(c,e,t){return (await c.query(`SELECT
|
|
72
|
+
column_name,
|
|
73
|
+
data_type,
|
|
74
|
+
udt_name,
|
|
75
|
+
is_nullable,
|
|
76
|
+
column_default,
|
|
77
|
+
character_maximum_length,
|
|
78
|
+
numeric_precision,
|
|
79
|
+
numeric_scale
|
|
80
|
+
FROM information_schema.columns
|
|
81
|
+
WHERE table_schema = $1 AND table_name = $2
|
|
82
|
+
ORDER BY ordinal_position`,[e,t])).rows.map(a=>({columnName:a.column_name,dataType:a.data_type,udtName:a.udt_name,isNullable:a.is_nullable==="YES",columnDefault:a.column_default,characterMaximumLength:a.character_maximum_length,numericPrecision:a.numeric_precision,numericScale:a.numeric_scale}))}async function Oe(c,e,t){let a=(await ke(c,e,t)).map(r=>{let s=r.udtName;r.dataType==="character varying"&&r.characterMaximumLength?s=`varchar(${r.characterMaximumLength})`:r.dataType==="character"&&r.characterMaximumLength?s=`char(${r.characterMaximumLength})`:r.dataType==="numeric"&&r.numericPrecision?s=`numeric(${r.numericPrecision}${r.numericScale?`, ${r.numericScale}`:""})`:r.dataType==="ARRAY"&&(s=r.udtName.replace(/^_/,"")+"[]");let i=`"${r.columnName}" ${s}`;if(r.isNullable||(i+=" NOT NULL"),r.columnDefault){let o=r.columnDefault.replace(new RegExp(`"?${e}"?\\.`,"g"),"");i+=` DEFAULT ${o}`;}return i});return `CREATE TABLE IF NOT EXISTS "${t}" (
|
|
83
|
+
${a.join(`,
|
|
84
|
+
`)}
|
|
85
|
+
)`}async function $e(c,e,t,n){return (await c.query(`SELECT indexname, indexdef
|
|
86
|
+
FROM pg_indexes
|
|
87
|
+
WHERE schemaname = $1 AND tablename = $2
|
|
88
|
+
AND indexname NOT LIKE '%_pkey'`,[e,n])).rows.map(r=>r.indexdef.replace(new RegExp(`ON "${e}"\\."`,"g"),`ON "${t}"."`).replace(new RegExp(`"${e}"\\."`,"g"),`"${t}"."`))}async function Ae(c,e,t){let n=await c.query(`SELECT
|
|
89
|
+
tc.constraint_name,
|
|
90
|
+
kcu.column_name
|
|
91
|
+
FROM information_schema.table_constraints tc
|
|
92
|
+
JOIN information_schema.key_column_usage kcu
|
|
93
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
94
|
+
AND tc.table_schema = kcu.table_schema
|
|
95
|
+
WHERE tc.table_schema = $1
|
|
96
|
+
AND tc.table_name = $2
|
|
97
|
+
AND tc.constraint_type = 'PRIMARY KEY'
|
|
98
|
+
ORDER BY kcu.ordinal_position`,[e,t]);if(n.rows.length===0)return null;let a=n.rows.map(s=>`"${s.column_name}"`).join(", "),r=n.rows[0].constraint_name;return `ALTER TABLE "${t}" ADD CONSTRAINT "${r}" PRIMARY KEY (${a})`}async function ve(c,e,t,n){let a=await c.query(`SELECT
|
|
99
|
+
tc.constraint_name,
|
|
100
|
+
kcu.column_name,
|
|
101
|
+
ccu.table_name as foreign_table_name,
|
|
102
|
+
ccu.column_name as foreign_column_name,
|
|
103
|
+
rc.update_rule,
|
|
104
|
+
rc.delete_rule
|
|
105
|
+
FROM information_schema.table_constraints tc
|
|
106
|
+
JOIN information_schema.key_column_usage kcu
|
|
107
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
108
|
+
AND tc.table_schema = kcu.table_schema
|
|
109
|
+
JOIN information_schema.constraint_column_usage ccu
|
|
110
|
+
ON tc.constraint_name = ccu.constraint_name
|
|
111
|
+
AND tc.table_schema = ccu.table_schema
|
|
112
|
+
JOIN information_schema.referential_constraints rc
|
|
113
|
+
ON tc.constraint_name = rc.constraint_name
|
|
114
|
+
AND tc.table_schema = rc.constraint_schema
|
|
115
|
+
WHERE tc.table_schema = $1
|
|
116
|
+
AND tc.table_name = $2
|
|
117
|
+
AND tc.constraint_type = 'FOREIGN KEY'
|
|
118
|
+
ORDER BY tc.constraint_name, kcu.ordinal_position`,[e,n]),r=new Map;for(let s of a.rows){let i=r.get(s.constraint_name);i?(i.columns.push(s.column_name),i.foreignColumns.push(s.foreign_column_name)):r.set(s.constraint_name,{columns:[s.column_name],foreignTable:s.foreign_table_name,foreignColumns:[s.foreign_column_name],updateRule:s.update_rule,deleteRule:s.delete_rule});}return Array.from(r.entries()).map(([s,i])=>{let o=i.columns.map(u=>`"${u}"`).join(", "),m=i.foreignColumns.map(u=>`"${u}"`).join(", "),l=`ALTER TABLE "${t}"."${n}" `;return l+=`ADD CONSTRAINT "${s}" FOREIGN KEY (${o}) `,l+=`REFERENCES "${t}"."${i.foreignTable}" (${m})`,i.updateRule!=="NO ACTION"&&(l+=` ON UPDATE ${i.updateRule}`),i.deleteRule!=="NO ACTION"&&(l+=` ON DELETE ${i.deleteRule}`),l})}async function Fe(c,e,t){let n=await c.query(`SELECT
|
|
119
|
+
tc.constraint_name,
|
|
120
|
+
kcu.column_name
|
|
121
|
+
FROM information_schema.table_constraints tc
|
|
122
|
+
JOIN information_schema.key_column_usage kcu
|
|
123
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
124
|
+
AND tc.table_schema = kcu.table_schema
|
|
125
|
+
WHERE tc.table_schema = $1
|
|
126
|
+
AND tc.table_name = $2
|
|
127
|
+
AND tc.constraint_type = 'UNIQUE'
|
|
128
|
+
ORDER BY tc.constraint_name, kcu.ordinal_position`,[e,t]),a=new Map;for(let r of n.rows){let s=a.get(r.constraint_name);s?s.push(r.column_name):a.set(r.constraint_name,[r.column_name]);}return Array.from(a.entries()).map(([r,s])=>{let i=s.map(o=>`"${o}"`).join(", ");return `ALTER TABLE "${t}" ADD CONSTRAINT "${r}" UNIQUE (${i})`})}async function Le(c,e,t){return (await c.query(`SELECT
|
|
129
|
+
tc.constraint_name,
|
|
130
|
+
cc.check_clause
|
|
131
|
+
FROM information_schema.table_constraints tc
|
|
132
|
+
JOIN information_schema.check_constraints cc
|
|
133
|
+
ON tc.constraint_name = cc.constraint_name
|
|
134
|
+
AND tc.constraint_schema = cc.constraint_schema
|
|
135
|
+
WHERE tc.table_schema = $1
|
|
136
|
+
AND tc.table_name = $2
|
|
137
|
+
AND tc.constraint_type = 'CHECK'
|
|
138
|
+
AND tc.constraint_name NOT LIKE '%_not_null'`,[e,t])).rows.map(a=>`ALTER TABLE "${t}" ADD CONSTRAINT "${a.constraint_name}" CHECK (${a.check_clause})`)}async function Ie(c,e,t){let n=await c.query(`SELECT count(*) FROM "${e}"."${t}"`);return parseInt(n.rows[0].count,10)}async function oe(c,e,t,n){let[a,r,s,i,o,m,l]=await Promise.all([Oe(c,e,n),$e(c,e,t,n),Ae(c,e,n),Fe(c,e,n),Le(c,e,n),ve(c,e,t,n),Ie(c,e,n)]);return {name:n,createDdl:a,indexDdls:r,constraintDdls:[...s?[s]:[],...i,...o,...m],rowCount:l}}async function He(c,e,t){return (await c.query(`SELECT column_name
|
|
139
|
+
FROM information_schema.columns
|
|
140
|
+
WHERE table_schema = $1 AND table_name = $2
|
|
141
|
+
ORDER BY ordinal_position`,[e,t])).rows.map(a=>a.column_name)}function qe(c){return c===null?"NULL":typeof c=="string"?`'${c.replace(/'/g,"''")}'`:typeof c=="boolean"?c?"TRUE":"FALSE":String(c)}async function je(c,e,t,n,a){let r=await He(c,e,n);if(r.length===0)return 0;let s=a?.[n]??{},i=r.map(u=>{if(u in s){let h=s[u];return `${qe(h)} as "${u}"`}return `"${u}"`}),o=r.map(u=>`"${u}"`).join(", "),m=i.join(", ");return (await c.query(`INSERT INTO "${t}"."${n}" (${o})
|
|
142
|
+
SELECT ${m}
|
|
143
|
+
FROM "${e}"."${n}"`)).rowCount??0}async function ze(c,e,t){let n=await c.query(`SELECT DISTINCT
|
|
144
|
+
tc.table_name,
|
|
145
|
+
ccu.table_name as foreign_table_name
|
|
146
|
+
FROM information_schema.table_constraints tc
|
|
147
|
+
JOIN information_schema.constraint_column_usage ccu
|
|
148
|
+
ON tc.constraint_name = ccu.constraint_name
|
|
149
|
+
AND tc.table_schema = ccu.table_schema
|
|
150
|
+
WHERE tc.table_schema = $1
|
|
151
|
+
AND tc.constraint_type = 'FOREIGN KEY'
|
|
152
|
+
AND tc.table_name != ccu.table_name`,[e]),a=new Map,r=new Set(t);for(let l of t)a.set(l,new Set);for(let l of n.rows)r.has(l.table_name)&&r.has(l.foreign_table_name)&&a.get(l.table_name).add(l.foreign_table_name);let s=[],i=new Map,o=[];for(let l of t)i.set(l,0);for(let[l,u]of a)for(let h of u)i.set(h,(i.get(h)??0)+1);for(let[l,u]of i)u===0&&o.push(l);for(;o.length>0;){let l=o.shift();s.push(l);for(let[u,h]of a)if(h.has(l)){h.delete(l);let d=(i.get(u)??0)-1;i.set(u,d),d===0&&o.push(u);}}let m=t.filter(l=>!s.includes(l));return [...s,...m]}async function ce(c,e,t,n,a,r){let s=0,i=await ze(c,e,n);await c.query("SET session_replication_role = replica");try{for(let o=0;o<i.length;o++){let m=i[o];r?.("copying_data",{table:m,progress:o+1,total:i.length});let l=await je(c,e,t,m,a);s+=l;}}finally{await c.query("SET session_replication_role = DEFAULT");}return s}var Be="__drizzle_migrations",H=class{constructor(e,t){this.deps=t;this.migrationsTable=e.migrationsTable??Be;}migrationsTable;async cloneTenant(e,t,n={}){let a=Date.now(),{includeData:r=false,anonymize:s,excludeTables:i=[],onProgress:o}=n,m=this.deps.schemaNameTemplate(e),l=this.deps.schemaNameTemplate(t),u=[this.migrationsTable,...i],h=null,d=null;try{if(o?.("starting"),!await this.deps.schemaExists(e))return this.createErrorResult(e,t,l,`Source tenant "${e}" does not exist`,a);if(await this.deps.schemaExists(t))return this.createErrorResult(e,t,l,`Target tenant "${t}" already exists`,a);o?.("introspecting"),h=await this.deps.createPool(m);let y=await ie(h,m,u);if(y.length===0)return o?.("creating_schema"),await this.deps.createSchema(t),o?.("completed"),{sourceTenant:e,targetTenant:t,targetSchema:l,success:!0,tables:[],durationMs:Date.now()-a};let b=await Promise.all(y.map(M=>oe(h,m,l,M)));await h.end(),h=null,o?.("creating_schema"),await this.deps.createSchema(t),d=await this.deps.createRootPool(),o?.("creating_tables");for(let M of b)await d.query(`SET search_path TO "${l}"; ${M.createDdl}`);o?.("creating_constraints");for(let M of b)for(let D of M.constraintDdls.filter(C=>!C.includes("FOREIGN KEY")))try{await d.query(`SET search_path TO "${l}"; ${D}`);}catch{}o?.("creating_indexes");for(let M of b)for(let D of M.indexDdls)try{await d.query(D);}catch{}let S=0;r&&(o?.("copying_data"),S=await ce(d,m,l,y,s?.enabled?s.rules:void 0,o));for(let M of b)for(let D of M.constraintDdls.filter(C=>C.includes("FOREIGN KEY")))try{await d.query(D);}catch{}o?.("completed");let N={sourceTenant:e,targetTenant:t,targetSchema:l,success:!0,tables:y,durationMs:Date.now()-a};return r&&(N.rowsCopied=S),N}catch(g){return n.onError?.(g),o?.("failed"),this.createErrorResult(e,t,l,g.message,a)}finally{h&&await h.end().catch(()=>{}),d&&await d.end().catch(()=>{});}}createErrorResult(e,t,n,a,r){return {sourceTenant:e,targetTenant:t,targetSchema:n,success:false,error:a,tables:[],durationMs:Date.now()-r}}};var We="public",q=class{constructor(e,t){this.config=e;this.deps=t;this.schemaName=e.schemaName??We;}schemaName;async migrate(e={}){let t=Date.now(),n=[],a=await this.deps.createPool();try{e.onProgress?.("starting"),await this.config.hooks?.beforeMigration?.();let r=await this.deps.getOrDetectFormat(a,this.schemaName);await this.deps.ensureMigrationsTable(a,this.schemaName,r);let s=await this.deps.loadMigrations(),i=await this.getAppliedMigrations(a,r),o=new Set(i.map(l=>l.identifier)),m=s.filter(l=>!this.isMigrationApplied(l,o,r));if(e.dryRun)return {schemaName:this.schemaName,success:!0,appliedMigrations:m.map(l=>l.name),durationMs:Date.now()-t,format:r.format};for(let l of m){let u=Date.now();e.onProgress?.("migrating",l.name),await this.applyMigration(a,l,r),await this.config.hooks?.afterMigration?.(l.name,Date.now()-u),n.push(l.name);}return e.onProgress?.("completed"),{schemaName:this.schemaName,success:!0,appliedMigrations:n,durationMs:Date.now()-t,format:r.format}}catch(r){return e.onProgress?.("failed"),{schemaName:this.schemaName,success:false,appliedMigrations:n,error:r.message,durationMs:Date.now()-t}}finally{await a.end();}}async markAsApplied(e={}){let t=Date.now(),n=[],a=await this.deps.createPool();try{e.onProgress?.("starting");let r=await this.deps.getOrDetectFormat(a,this.schemaName);await this.deps.ensureMigrationsTable(a,this.schemaName,r);let s=await this.deps.loadMigrations(),i=await this.getAppliedMigrations(a,r),o=new Set(i.map(l=>l.identifier)),m=s.filter(l=>!this.isMigrationApplied(l,o,r));for(let l of m)e.onProgress?.("migrating",l.name),await this.recordMigration(a,l,r),n.push(l.name);return e.onProgress?.("completed"),{schemaName:this.schemaName,success:!0,appliedMigrations:n,durationMs:Date.now()-t,format:r.format}}catch(r){return e.onProgress?.("failed"),{schemaName:this.schemaName,success:false,appliedMigrations:n,error:r.message,durationMs:Date.now()-t}}finally{await a.end();}}async getStatus(){let e=await this.deps.createPool();try{let t=await this.deps.loadMigrations();if(!await this.deps.migrationsTableExists(e,this.schemaName))return {schemaName:this.schemaName,appliedCount:0,pendingCount:t.length,pendingMigrations:t.map(o=>o.name),status:t.length>0?"behind":"ok",format:null};let a=await this.deps.getOrDetectFormat(e,this.schemaName),r=await this.getAppliedMigrations(e,a),s=new Set(r.map(o=>o.identifier)),i=t.filter(o=>!this.isMigrationApplied(o,s,a));return {schemaName:this.schemaName,appliedCount:r.length,pendingCount:i.length,pendingMigrations:i.map(o=>o.name),status:i.length>0?"behind":"ok",format:a.format}}catch(t){return {schemaName:this.schemaName,appliedCount:0,pendingCount:0,pendingMigrations:[],status:"error",error:t.message,format:null}}finally{await e.end();}}async getAppliedMigrations(e,t){let n=t.columns.identifier,a=t.columns.timestamp;return (await e.query(`SELECT id, "${n}" as identifier, "${a}" as applied_at
|
|
153
|
+
FROM "${this.schemaName}"."${t.tableName}"
|
|
154
|
+
ORDER BY id`)).rows.map(s=>{let i=t.columns.timestampType==="bigint"?new Date(Number(s.applied_at)):new Date(s.applied_at);return {identifier:s.identifier,...t.columns.identifier==="name"?{name:s.identifier}:{hash:s.identifier},appliedAt:i}})}isMigrationApplied(e,t,n){return n.columns.identifier==="name"?t.has(e.name):t.has(e.hash)||t.has(e.name)}async applyMigration(e,t,n){let a=await e.connect();try{await a.query("BEGIN"),await a.query(t.sql);let{identifier:r,timestamp:s,timestampType:i}=n.columns,o=r==="name"?t.name:t.hash,m=i==="bigint"?Date.now():new Date;await a.query(`INSERT INTO "${this.schemaName}"."${n.tableName}" ("${r}", "${s}") VALUES ($1, $2)`,[o,m]),await a.query("COMMIT");}catch(r){throw await a.query("ROLLBACK"),r}finally{a.release();}}async recordMigration(e,t,n){let{identifier:a,timestamp:r,timestampType:s}=n.columns,i=a==="name"?t.name:t.hash,o=s==="bigint"?Date.now():new Date;await e.query(`INSERT INTO "${this.schemaName}"."${n.tableName}" ("${a}", "${r}") VALUES ($1, $2)`,[i,o]);}};var Ue="__drizzle_migrations",ge="__drizzle_shared_migrations",j=class{constructor(e,t){this.migratorConfig=t;if(this.migrationsTable=t.migrationsTable??Ue,this.schemaManager=new O(e,this.migrationsTable),this.driftDetector=new $(e,this.schemaManager,{migrationsTable:this.migrationsTable,tenantDiscovery:t.tenantDiscovery}),this.seeder=new A({tenantDiscovery:t.tenantDiscovery},{createPool:this.schemaManager.createPool.bind(this.schemaManager),schemaNameTemplate:e.isolation.schemaNameTemplate,tenantSchema:e.schemas.tenant}),this.syncManager=new F({tenantDiscovery:t.tenantDiscovery,migrationsFolder:t.migrationsFolder,migrationsTable:this.migrationsTable},{createPool:this.schemaManager.createPool.bind(this.schemaManager),schemaNameTemplate:e.isolation.schemaNameTemplate,migrationsTableExists:this.schemaManager.migrationsTableExists.bind(this.schemaManager),ensureMigrationsTable:this.schemaManager.ensureMigrationsTable.bind(this.schemaManager),getOrDetectFormat:this.getOrDetectFormat.bind(this),loadMigrations:this.loadMigrations.bind(this)}),this.migrationExecutor=new L({hooks:t.hooks},{createPool:this.schemaManager.createPool.bind(this.schemaManager),schemaNameTemplate:e.isolation.schemaNameTemplate,migrationsTableExists:this.schemaManager.migrationsTableExists.bind(this.schemaManager),ensureMigrationsTable:this.schemaManager.ensureMigrationsTable.bind(this.schemaManager),getOrDetectFormat:this.getOrDetectFormat.bind(this),loadMigrations:this.loadMigrations.bind(this)}),this.batchExecutor=new I({tenantDiscovery:t.tenantDiscovery},this.migrationExecutor,this.loadMigrations.bind(this)),this.cloner=new H({migrationsTable:this.migrationsTable},{createPool:this.schemaManager.createPool.bind(this.schemaManager),createRootPool:this.schemaManager.createRootPool.bind(this.schemaManager),schemaNameTemplate:e.isolation.schemaNameTemplate,schemaExists:this.schemaManager.schemaExists.bind(this.schemaManager),createSchema:this.schemaManager.createSchema.bind(this.schemaManager)}),t.sharedMigrationsFolder&&existsSync(t.sharedMigrationsFolder)){let n=t.sharedMigrationsTable??ge,a=t.sharedHooks,r={schemaName:"public",migrationsTable:n};(a?.beforeMigration||a?.afterApply)&&(r.hooks={},a.beforeMigration&&(r.hooks.beforeMigration=a.beforeMigration),a.afterApply&&(r.hooks.afterMigration=a.afterApply)),this.sharedMigrationExecutor=new q(r,{createPool:this.schemaManager.createRootPool.bind(this.schemaManager),migrationsTableExists:this.schemaManager.migrationsTableExists.bind(this.schemaManager),ensureMigrationsTable:this.schemaManager.ensureMigrationsTable.bind(this.schemaManager),getOrDetectFormat:this.getOrDetectSharedFormat.bind(this),loadMigrations:this.loadSharedMigrations.bind(this)});}else this.sharedMigrationExecutor=null;e.schemas.shared?this.sharedSeeder=new v({schemaName:"public"},{createPool:this.schemaManager.createRootPool.bind(this.schemaManager),sharedSchema:e.schemas.shared}):this.sharedSeeder=null;}migrationsTable;schemaManager;driftDetector;seeder;syncManager;migrationExecutor;batchExecutor;cloner;sharedMigrationExecutor;sharedSeeder;async migrateAll(e={}){return this.batchExecutor.migrateAll(e)}async migrateTenant(e,t,n={}){return this.migrationExecutor.migrateTenant(e,t,n)}async migrateTenants(e,t={}){return this.batchExecutor.migrateTenants(e,t)}async getStatus(){return this.batchExecutor.getStatus()}async getTenantStatus(e,t){return this.migrationExecutor.getTenantStatus(e,t)}async createTenant(e,t={}){let{migrate:n=true}=t;await this.schemaManager.createSchema(e),n&&await this.migrateTenant(e);}async dropTenant(e,t={}){await this.schemaManager.dropSchema(e,t);}async tenantExists(e){return this.schemaManager.schemaExists(e)}async cloneTenant(e,t,n={}){return this.cloner.cloneTenant(e,t,n)}async markAsApplied(e,t={}){return this.migrationExecutor.markAsApplied(e,t)}async markAllAsApplied(e={}){return this.batchExecutor.markAllAsApplied(e)}async getSyncStatus(){return this.syncManager.getSyncStatus()}async getTenantSyncStatus(e,t){return this.syncManager.getTenantSyncStatus(e,t)}async markMissing(e){return this.syncManager.markMissing(e)}async markAllMissing(e={}){return this.syncManager.markAllMissing(e)}async cleanOrphans(e){return this.syncManager.cleanOrphans(e)}async cleanAllOrphans(e={}){return this.syncManager.cleanAllOrphans(e)}async seedTenant(e,t){return this.seeder.seedTenant(e,t)}async seedAll(e,t={}){return this.seeder.seedAll(e,t)}async seedTenants(e,t,n={}){return this.seeder.seedTenants(e,t,n)}hasSharedSeeding(){return this.sharedSeeder!==null}async seedShared(e){return this.sharedSeeder?this.sharedSeeder.seed(e):{schemaName:"public",success:false,error:"Shared schema not configured. Set schemas.shared in tenant config.",durationMs:0}}async seedAllWithShared(e,t,n={}){let a=await this.seedShared(e),r=await this.seedAll(t,n);return {shared:a,tenants:r}}async loadMigrations(){let e=await readdir(this.migratorConfig.migrationsFolder),t=[];for(let n of e){if(!n.endsWith(".sql"))continue;let a=join(this.migratorConfig.migrationsFolder,n),r=await readFile(a,"utf-8"),s=n.match(/^(\d+)_/),i=s?.[1]?parseInt(s[1],10):0,o=createHash("sha256").update(r).digest("hex");t.push({name:basename(n,".sql"),path:a,sql:r,timestamp:i,hash:o});}return t.sort((n,a)=>n.timestamp-a.timestamp)}async getOrDetectFormat(e,t){let n=this.migratorConfig.tableFormat??"auto";if(n!=="auto")return _(n,this.migrationsTable);let a=await Y(e,t,this.migrationsTable);if(a)return a;let r=this.migratorConfig.defaultFormat??"name";return _(r,this.migrationsTable)}async loadSharedMigrations(){if(!this.migratorConfig.sharedMigrationsFolder)return [];let e=await readdir(this.migratorConfig.sharedMigrationsFolder),t=[];for(let n of e){if(!n.endsWith(".sql"))continue;let a=join(this.migratorConfig.sharedMigrationsFolder,n),r=await readFile(a,"utf-8"),s=n.match(/^(\d+)_/),i=s?.[1]?parseInt(s[1],10):0,o=createHash("sha256").update(r).digest("hex");t.push({name:basename(n,".sql"),path:a,sql:r,timestamp:i,hash:o});}return t.sort((n,a)=>n.timestamp-a.timestamp)}async getOrDetectSharedFormat(e,t){let n=this.migratorConfig.sharedMigrationsTable??ge,a=this.migratorConfig.tableFormat??"auto";if(a!=="auto")return _(a,n);let r=await Y(e,t,n);if(r)return r;let s=this.migratorConfig.defaultFormat??"name";return _(s,n)}hasSharedMigrations(){return this.sharedMigrationExecutor!==null}async migrateShared(e={}){return this.sharedMigrationExecutor?this.sharedMigrationExecutor.migrate(e):{schemaName:"public",success:false,appliedMigrations:[],error:"Shared migrations not configured. Set sharedMigrationsFolder in migrator config.",durationMs:0}}async getSharedStatus(){return this.sharedMigrationExecutor?this.sharedMigrationExecutor.getStatus():{schemaName:"public",appliedCount:0,pendingCount:0,pendingMigrations:[],status:"error",error:"Shared migrations not configured. Set sharedMigrationsFolder in migrator config.",format:null}}async markSharedAsApplied(e={}){return this.sharedMigrationExecutor?this.sharedMigrationExecutor.markAsApplied(e):{schemaName:"public",success:false,appliedMigrations:[],error:"Shared migrations not configured. Set sharedMigrationsFolder in migrator config.",durationMs:0}}async migrateAllWithShared(e={}){let{sharedOptions:t,...n}=e,a=await this.migrateShared(t??{}),r=await this.migrateAll(n);return {shared:a,tenants:r}}async getSchemaDrift(e={}){return this.driftDetector.detectDrift(e)}async getTenantSchemaDrift(e,t,n={}){return this.driftDetector.compareTenant(e,t,n)}async introspectTenantSchema(e,t={}){return this.driftDetector.introspectSchema(e,t)}};function Qe(c,e){return new j(c,e)}var z=class{constructor(e){this.context=e;}fromTable=null;joins=[];selectFields={};whereCondition=null;orderByFields=[];limitValue=null;offsetValue=null;from(e,t){let n=this.getSchemaName(e);return this.fromTable={table:t,source:e,schemaName:n},this}innerJoin(e,t,n){return this.addJoin(e,t,n,"inner")}leftJoin(e,t,n){return this.addJoin(e,t,n,"left")}rightJoin(e,t,n){return this.addJoin(e,t,n,"right")}fullJoin(e,t,n){return this.addJoin(e,t,n,"full")}select(e){return this.selectFields=e,this}where(e){return this.whereCondition=e,this}orderBy(...e){return this.orderByFields=e,this}limit(e){return this.limitValue=e,this}offset(e){return this.offsetValue=e,this}async execute(){if(!this.fromTable)throw new Error("[drizzle-multitenant] No table specified. Use .from() first.");let e=this.buildSql();return (await this.context.tenantDb.execute(e)).rows}buildSql(){if(!this.fromTable)throw new Error("[drizzle-multitenant] No table specified");let e=[],t=Object.entries(this.selectFields).map(([a,r])=>{let s=r.name;return sql`${sql.raw(`"${s}"`)} as ${sql.raw(`"${a}"`)}`});t.length===0?e.push(sql`SELECT *`):e.push(sql`SELECT ${sql.join(t,sql`, `)}`);let n=this.getFullTableName(this.fromTable.schemaName,this.fromTable.table);e.push(sql` FROM ${sql.raw(n)}`);for(let a of this.joins){let r=this.getFullTableName(a.schemaName,a.table),s=this.getJoinKeyword(a.type);e.push(sql` ${sql.raw(s)} ${sql.raw(r)} ON ${a.condition}`);}return this.whereCondition&&e.push(sql` WHERE ${this.whereCondition}`),this.orderByFields.length>0&&e.push(sql` ORDER BY ${sql.join(this.orderByFields,sql`, `)}`),this.limitValue!==null&&e.push(sql` LIMIT ${sql.raw(this.limitValue.toString())}`),this.offsetValue!==null&&e.push(sql` OFFSET ${sql.raw(this.offsetValue.toString())}`),sql.join(e,sql``)}addJoin(e,t,n,a){let r=this.getSchemaName(e);return this.joins.push({table:t,source:e,schemaName:r,condition:n,type:a}),this}getSchemaName(e){return e==="tenant"?this.context.tenantSchema??"tenant":this.context.sharedSchema??"public"}getFullTableName(e,t){let n=getTableName(t);return `"${e}"."${n}"`}getJoinKeyword(e){switch(e){case "inner":return "INNER JOIN";case "left":return "LEFT JOIN";case "right":return "RIGHT JOIN";case "full":return "FULL OUTER JOIN"}}};function Ye(c){return new z(c)}async function Ve(c){let{tenantDb:e,tenantTable:t,sharedTable:n,foreignKey:a,sharedKey:r="id",sharedFields:s,where:i}=c,o=getTableName(t),m=getTableName(n),u=[`SELECT t.*, ${s.map(g=>`s."${String(g)}"`).join(", ")}`,`FROM "${o}" t`,`LEFT JOIN "public"."${m}" s ON t."${String(a)}" = s."${String(r)}"`];i&&u.push("WHERE");let h=sql.raw(u.join(" "));return (await e.execute(h)).rows}async function Ke(c,e){let{tenantSchema:t,sharedSchema:n,sql:a}=e,r=a.replace(/\$tenant\./g,`"${t}".`).replace(/\$shared\./g,`"${n}".`),s=sql.raw(r);return (await c.execute(s)).rows}function Ge(c,e,t){return {columns:Object.entries(c).map(([r,s])=>`"${s.name}" as "${r}"`),getSchema:()=>e}}function Xe(c){let e=new Set;for(let t of Object.values(c))t&&typeof t=="object"&&"_"in t&&t._?.brand==="Table"&&e.add(t);return e}function pe(c,e){return e.has(c)}var B=class{constructor(e,t,n,a="public"){this.tenantDb=e;this.sharedTables=t;this.tenantSchemaName=n;this.sharedSchemaName=a;}fromTable=null;joins=[];selectFields={};whereCondition=null;orderByFields=[];limitValue=null;offsetValue=null;from(e){let t=pe(e,this.sharedTables);return this.fromTable={table:e,isShared:t,schemaName:t?this.sharedSchemaName:this.tenantSchemaName},this}leftJoin(e,t){return this.addJoin(e,t,"left")}innerJoin(e,t){return this.addJoin(e,t,"inner")}rightJoin(e,t){return this.addJoin(e,t,"right")}fullJoin(e,t){return this.addJoin(e,t,"full")}select(e){return this.selectFields=e,this}where(e){return this.whereCondition=e,this}orderBy(...e){return this.orderByFields=e,this}limit(e){return this.limitValue=e,this}offset(e){return this.offsetValue=e,this}async execute(){if(!this.fromTable)throw new Error("[drizzle-multitenant] No table specified. Use .from() first.");let e=this.buildSql();return (await this.tenantDb.execute(e)).rows}addJoin(e,t,n){let a=pe(e,this.sharedTables);return this.joins.push({table:e,isShared:a,schemaName:a?this.sharedSchemaName:this.tenantSchemaName,condition:t,type:n}),this}buildSql(){if(!this.fromTable)throw new Error("[drizzle-multitenant] No table specified");let e=[],t=Object.entries(this.selectFields).map(([r,s])=>{let i=s.name,o=this.getTableAliasForColumn(s);return o?sql`${sql.raw(`"${o}"."${i}"`)} as ${sql.raw(`"${r}"`)}`:sql`${sql.raw(`"${i}"`)} as ${sql.raw(`"${r}"`)}`});t.length===0?e.push(sql`SELECT *`):e.push(sql`SELECT ${sql.join(t,sql`, `)}`);let n=getTableName(this.fromTable.table),a=`"${this.fromTable.schemaName}"."${n}"`;e.push(sql` FROM ${sql.raw(a)} "${sql.raw(n)}"`);for(let r of this.joins){let s=getTableName(r.table),i=`"${r.schemaName}"."${s}"`,o=this.getJoinKeyword(r.type);e.push(sql` ${sql.raw(o)} ${sql.raw(i)} "${sql.raw(s)}" ON ${r.condition}`);}return this.whereCondition&&e.push(sql` WHERE ${this.whereCondition}`),this.orderByFields.length>0&&e.push(sql` ORDER BY ${sql.join(this.orderByFields,sql`, `)}`),this.limitValue!==null&&e.push(sql` LIMIT ${sql.raw(this.limitValue.toString())}`),this.offsetValue!==null&&e.push(sql` OFFSET ${sql.raw(this.offsetValue.toString())}`),sql.join(e,sql``)}getTableAliasForColumn(e){let t=e.table;return t?getTableName(t):null}getJoinKeyword(e){switch(e){case "inner":return "INNER JOIN";case "left":return "LEFT JOIN";case "right":return "RIGHT JOIN";case "full":return "FULL OUTER JOIN"}}};function Ze(c,e,t,n){let a=Xe(t.shared);return new B(c,a,n?.tenantSchema??"tenant",n?.sharedSchema??"public")}function fe(c){let e=c.message.toLowerCase();return !!(e.includes("econnrefused")||e.includes("econnreset")||e.includes("etimedout")||e.includes("enotfound")||e.includes("connection refused")||e.includes("connection reset")||e.includes("connection terminated")||e.includes("connection timed out")||e.includes("timeout expired")||e.includes("socket hang up")||e.includes("too many connections")||e.includes("sorry, too many clients")||e.includes("the database system is starting up")||e.includes("the database system is shutting down")||e.includes("server closed the connection unexpectedly")||e.includes("could not connect to server")||e.includes("ssl connection")||e.includes("ssl handshake"))}function ye(c,e){let t=e.initialDelayMs*Math.pow(e.backoffMultiplier,c),n=Math.min(t,e.maxDelayMs);if(e.jitter){let a=1+Math.random()*.25;return Math.floor(n*a)}return Math.floor(n)}function et(c){return new Promise(e=>setTimeout(e,c))}async function Te(c,e){let t={maxAttempts:e?.maxAttempts??T.retry.maxAttempts,initialDelayMs:e?.initialDelayMs??T.retry.initialDelayMs,maxDelayMs:e?.maxDelayMs??T.retry.maxDelayMs,backoffMultiplier:e?.backoffMultiplier??T.retry.backoffMultiplier,jitter:e?.jitter??T.retry.jitter,isRetryable:e?.isRetryable??fe,onRetry:e?.onRetry},n=Date.now(),a=null;for(let r=0;r<t.maxAttempts;r++)try{return {result:await c(),attempts:r+1,totalTimeMs:Date.now()-n}}catch(s){if(a=s,r>=t.maxAttempts-1||!t.isRetryable(a))throw a;let o=ye(r,t);t.onRetry?.(r+1,a,o),await et(o);}throw a??new Error("Retry failed with no error")}function tt(c){return e=>Te(e,c)}export{z as CrossSchemaQueryBuilder,T as DEFAULT_CONFIG,j as Migrator,B as WithSharedQueryBuilder,Ge as buildCrossSchemaSelect,ye as calculateDelay,Ye as createCrossSchemaQuery,Qe as createMigrator,tt as createRetrier,De as createTenantContext,Re as createTenantManager,Ke as crossSchemaRaw,Se as defineConfig,fe as isRetryableError,Te as withRetry,Ze as withShared,Ve as withSharedLookup};
|