prisma-sql 1.75.5 → 1.75.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,694 @@
1
+ #!/usr/bin/env node
2
+ import { writeFile } from 'fs/promises';
3
+ import { isAbsolute, resolve } from 'path';
4
+
5
+ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
6
+ get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
7
+ }) : x)(function(x) {
8
+ if (typeof require !== "undefined") return require.apply(this, arguments);
9
+ throw Error('Dynamic require of "' + x + '" is not supported');
10
+ });
11
+ var __async = (__this, __arguments, generator) => {
12
+ return new Promise((resolve2, reject) => {
13
+ var fulfilled = (value) => {
14
+ try {
15
+ step(generator.next(value));
16
+ } catch (e) {
17
+ reject(e);
18
+ }
19
+ };
20
+ var rejected = (value) => {
21
+ try {
22
+ step(generator.throw(value));
23
+ } catch (e) {
24
+ reject(e);
25
+ }
26
+ };
27
+ var step = (x) => x.done ? resolve2(x.value) : Promise.resolve(x.value).then(fulfilled, rejected);
28
+ step((generator = generator.apply(__this, __arguments)).next());
29
+ });
30
+ };
31
+
32
+ // src/utils/pure-utils.ts
33
+ function toNumberOrZero(v) {
34
+ if (typeof v === "number" && Number.isFinite(v)) return v;
35
+ if (typeof v === "bigint") return Number(v);
36
+ if (typeof v === "string" && v.trim() !== "") {
37
+ const n = Number(v);
38
+ if (Number.isFinite(n)) return n;
39
+ }
40
+ return 0;
41
+ }
42
+ function clampStatsMonotonic(avg, p95, p99, max, coverage) {
43
+ const safeAvg = Math.max(1, avg);
44
+ const safeP95 = Math.max(safeAvg, p95);
45
+ const safeP99 = Math.max(safeP95, p99);
46
+ const safeMax = Math.max(safeP99, max);
47
+ const safeCoverage = Math.max(0, Math.min(1, coverage));
48
+ return {
49
+ avg: safeAvg,
50
+ p95: safeP95,
51
+ p99: safeP99,
52
+ max: safeMax,
53
+ coverage: safeCoverage
54
+ };
55
+ }
56
+ function normalizeStats(row) {
57
+ return clampStatsMonotonic(
58
+ toNumberOrZero(row.avg),
59
+ toNumberOrZero(row.p95),
60
+ toNumberOrZero(row.p99),
61
+ toNumberOrZero(row.max),
62
+ toNumberOrZero(row.coverage)
63
+ );
64
+ }
65
+ function stableJson(value) {
66
+ return JSON.stringify(
67
+ value,
68
+ (_k, v) => {
69
+ if (!v || typeof v !== "object" || Array.isArray(v)) return v;
70
+ const obj = v;
71
+ const out = {};
72
+ for (const k of Object.keys(obj).sort()) out[k] = obj[k];
73
+ return out;
74
+ },
75
+ 2
76
+ );
77
+ }
78
+ function cleanDatabaseUrl(url) {
79
+ try {
80
+ const parsed = new URL(url);
81
+ parsed.search = "";
82
+ if (parsed.password) {
83
+ parsed.password = "***";
84
+ }
85
+ if (parsed.username && parsed.username.length > 0) {
86
+ if (parsed.username.length <= 3) {
87
+ parsed.username = "***";
88
+ } else {
89
+ parsed.username = parsed.username.slice(0, 3) + "***";
90
+ }
91
+ }
92
+ return parsed.toString();
93
+ } catch (error) {
94
+ return "[invalid-url]";
95
+ }
96
+ }
97
+
98
+ // src/cardinality-planner.ts
99
+ function quoteIdent(dialect, ident) {
100
+ return `"${ident.replace(/"/g, '""')}"`;
101
+ }
102
+ function createDatabaseExecutor(params) {
103
+ return __async(this, null, function* () {
104
+ const { databaseUrl} = params;
105
+ {
106
+ const postgres = yield import('postgres');
107
+ cleanDatabaseUrl(databaseUrl);
108
+ const sql = postgres.default(databaseUrl, { max: 1 });
109
+ return {
110
+ executor: {
111
+ query: (sqlStr, params2) => __async(null, null, function* () {
112
+ return yield sql.unsafe(sqlStr, params2 || []);
113
+ })
114
+ },
115
+ cleanup: () => __async(null, null, function* () {
116
+ yield sql.end();
117
+ })
118
+ };
119
+ }
120
+ });
121
+ }
122
+ function extractMeasurableOneToManyEdges(datamodel) {
123
+ const modelByName = new Map(datamodel.models.map((m) => [m.name, m]));
124
+ const edges = [];
125
+ for (const parent of datamodel.models) {
126
+ const pkFields = parent.fields.filter((f) => f.isId);
127
+ if (pkFields.length === 0) continue;
128
+ pkFields.map((f) => f.dbName || f.name);
129
+ const parentTable = parent.dbName || parent.name;
130
+ for (const f of parent.fields) {
131
+ if (!f.relationName) continue;
132
+ if (!f.isList) continue;
133
+ const child = modelByName.get(f.type);
134
+ if (!child) continue;
135
+ const childRelField = child.fields.find(
136
+ (cf) => cf.relationName === f.relationName && cf.type === parent.name
137
+ );
138
+ if (!childRelField) continue;
139
+ const fkFieldNames = childRelField.relationFromFields || [];
140
+ if (fkFieldNames.length === 0) continue;
141
+ const fkFields = fkFieldNames.map((name) => {
142
+ const fld = child.fields.find((x) => x.name === name);
143
+ return fld ? fld.dbName || fld.name : name;
144
+ });
145
+ const refFieldNames = childRelField.relationToFields || [];
146
+ if (refFieldNames.length === 0) continue;
147
+ const references = refFieldNames.map((name) => {
148
+ const fld = parent.fields.find((x) => x.name === name);
149
+ return fld ? fld.dbName || fld.name : name;
150
+ });
151
+ if (fkFields.length !== references.length) continue;
152
+ const childTable = child.dbName || child.name;
153
+ edges.push({
154
+ parentModel: parent.name,
155
+ relName: f.name,
156
+ childModel: child.name,
157
+ parentTable,
158
+ childTable,
159
+ parentPkColumns: references,
160
+ childFkColumns: fkFields,
161
+ isMany: true
162
+ });
163
+ }
164
+ }
165
+ return edges;
166
+ }
167
+ function buildPostgresStatsSql(edge) {
168
+ const childTable = quoteIdent("postgres", edge.childTable);
169
+ const parentTable = quoteIdent("postgres", edge.parentTable);
170
+ const groupCols = edge.childFkColumns.map((c) => quoteIdent("postgres", c)).join(", ");
171
+ return `
172
+ WITH counts AS (
173
+ SELECT ${groupCols}, COUNT(*) AS cnt
174
+ FROM ${childTable}
175
+ GROUP BY ${groupCols}
176
+ ),
177
+ total_parents AS (
178
+ SELECT COUNT(*) AS total FROM ${parentTable}
179
+ )
180
+ SELECT
181
+ AVG(cnt)::float AS avg,
182
+ MAX(cnt)::int AS max,
183
+ PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY cnt)::float AS p95,
184
+ PERCENTILE_CONT(0.99) WITHIN GROUP (ORDER BY cnt)::float AS p99,
185
+ (SELECT COUNT(*) FROM counts)::float / GREATEST(1, (SELECT total FROM total_parents)) AS coverage
186
+ FROM counts
187
+ `.trim();
188
+ }
189
+ function buildSqliteStatsSql(edge) {
190
+ const childTable = quoteIdent("sqlite", edge.childTable);
191
+ const parentTable = quoteIdent("sqlite", edge.parentTable);
192
+ const groupCols = edge.childFkColumns.map((c) => quoteIdent("sqlite", c)).join(", ");
193
+ return `
194
+ WITH counts AS (
195
+ SELECT ${groupCols}, COUNT(*) AS cnt
196
+ FROM ${childTable}
197
+ GROUP BY ${groupCols}
198
+ ),
199
+ n AS (
200
+ SELECT COUNT(*) AS total FROM counts
201
+ ),
202
+ parent_n AS (
203
+ SELECT COUNT(*) AS total FROM ${parentTable}
204
+ ),
205
+ ordered AS (
206
+ SELECT cnt
207
+ FROM counts
208
+ ORDER BY cnt
209
+ )
210
+ SELECT
211
+ (SELECT AVG(cnt) FROM counts) AS avg,
212
+ (SELECT MAX(cnt) FROM counts) AS max,
213
+ (
214
+ SELECT cnt
215
+ FROM ordered
216
+ LIMIT 1
217
+ OFFSET (
218
+ SELECT
219
+ CASE
220
+ WHEN total <= 1 THEN 0
221
+ ELSE CAST((0.95 * (total - 1)) AS INT)
222
+ END
223
+ FROM n
224
+ )
225
+ ) AS p95,
226
+ (
227
+ SELECT cnt
228
+ FROM ordered
229
+ LIMIT 1
230
+ OFFSET (
231
+ SELECT
232
+ CASE
233
+ WHEN total <= 1 THEN 0
234
+ ELSE CAST((0.99 * (total - 1)) AS INT)
235
+ END
236
+ FROM n
237
+ )
238
+ ) AS p99,
239
+ CAST((SELECT total FROM n) AS FLOAT) / MAX(1, (SELECT total FROM parent_n)) AS coverage
240
+ `.trim();
241
+ }
242
+ function buildFanoutStatsSql(dialect, edge) {
243
+ return dialect === "postgres" ? buildPostgresStatsSql(edge) : buildSqliteStatsSql(edge);
244
+ }
245
+ function findLargestTable(params) {
246
+ return __async(this, null, function* () {
247
+ var _a;
248
+ const { executor, dialect, datamodel } = params;
249
+ let best = null;
250
+ for (const model of datamodel.models) {
251
+ const table = quoteIdent(dialect, model.dbName || model.name);
252
+ try {
253
+ const rows = yield executor.query(`SELECT COUNT(*) AS cnt FROM ${table}`);
254
+ const count = toNumberOrZero((_a = rows[0]) == null ? void 0 : _a.cnt);
255
+ if (!best || count > best.rowCount) {
256
+ best = { tableName: table, rowCount: count };
257
+ }
258
+ } catch (_) {
259
+ }
260
+ }
261
+ return best;
262
+ });
263
+ }
264
+ function measureRoundtripCost(params) {
265
+ return __async(this, null, function* () {
266
+ var _a, _b;
267
+ const { executor, dialect, datamodel } = params;
268
+ const WARMUP = 5;
269
+ const SAMPLES = 15;
270
+ for (let i = 0; i < WARMUP; i++) {
271
+ yield executor.query("SELECT 1");
272
+ }
273
+ const roundtripTimes = [];
274
+ for (let i = 0; i < SAMPLES; i++) {
275
+ const start = performance.now();
276
+ yield executor.query("SELECT 1");
277
+ roundtripTimes.push(performance.now() - start);
278
+ }
279
+ roundtripTimes.sort((a, b) => a - b);
280
+ const medianRoundtrip = roundtripTimes[Math.floor(SAMPLES / 2)];
281
+ console.log(
282
+ ` [roundtrip] SELECT 1 times (ms): min=${roundtripTimes[0].toFixed(3)} median=${medianRoundtrip.toFixed(3)} max=${roundtripTimes[SAMPLES - 1].toFixed(3)}`
283
+ );
284
+ const largest = yield findLargestTable({ executor, dialect, datamodel });
285
+ if (!largest || largest.rowCount < 50) {
286
+ console.log(
287
+ ` [roundtrip] Largest table: ${(_a = largest == null ? void 0 : largest.tableName) != null ? _a : "none"} (${(_b = largest == null ? void 0 : largest.rowCount) != null ? _b : 0} rows) \u2014 too small, using default 50`
288
+ );
289
+ return 50;
290
+ }
291
+ console.log(
292
+ ` [roundtrip] Using table ${largest.tableName} (${largest.rowCount} rows)`
293
+ );
294
+ return estimateFromQueryPairRatio({
295
+ executor,
296
+ tableName: largest.tableName,
297
+ tableRowCount: largest.rowCount
298
+ });
299
+ });
300
+ }
301
+ function estimateFromQueryPairRatio(params) {
302
+ return __async(this, null, function* () {
303
+ const { executor, tableName, tableRowCount } = params;
304
+ const WARMUP = 5;
305
+ const SAMPLES = 10;
306
+ const smallLimit = 1;
307
+ const largeLimit = Math.min(1e3, tableRowCount);
308
+ for (let i = 0; i < WARMUP; i++) {
309
+ yield executor.query(`SELECT * FROM ${tableName} LIMIT ${largeLimit}`);
310
+ }
311
+ const smallTimes = [];
312
+ for (let i = 0; i < SAMPLES; i++) {
313
+ const start = performance.now();
314
+ yield executor.query(`SELECT * FROM ${tableName} LIMIT ${smallLimit}`);
315
+ smallTimes.push(performance.now() - start);
316
+ }
317
+ smallTimes.sort((a, b) => a - b);
318
+ const medianSmall = smallTimes[Math.floor(SAMPLES / 2)];
319
+ const largeTimes = [];
320
+ let actualLargeRows = 0;
321
+ for (let i = 0; i < SAMPLES; i++) {
322
+ const start = performance.now();
323
+ const rows = yield executor.query(
324
+ `SELECT * FROM ${tableName} LIMIT ${largeLimit}`
325
+ );
326
+ largeTimes.push(performance.now() - start);
327
+ actualLargeRows = rows.length;
328
+ }
329
+ largeTimes.sort((a, b) => a - b);
330
+ const medianLarge = largeTimes[Math.floor(SAMPLES / 2)];
331
+ const rowDiff = actualLargeRows - smallLimit;
332
+ const timeDiff = medianLarge - medianSmall;
333
+ console.log(
334
+ ` [roundtrip] LIMIT ${smallLimit}: median=${medianSmall.toFixed(3)}ms`
335
+ );
336
+ console.log(
337
+ ` [roundtrip] LIMIT ${largeLimit} (got ${actualLargeRows}): median=${medianLarge.toFixed(3)}ms`
338
+ );
339
+ console.log(
340
+ ` [roundtrip] Time diff: ${timeDiff.toFixed(3)}ms for ${rowDiff} rows`
341
+ );
342
+ if (rowDiff < 50 || timeDiff <= 0.05) {
343
+ console.log(
344
+ ` [roundtrip] Insufficient signal (need \u226550 row diff and >0.05ms time diff), defaulting to 50`
345
+ );
346
+ return 50;
347
+ }
348
+ const perRow = timeDiff / rowDiff;
349
+ const sequentialTimes = [];
350
+ for (let i = 0; i < SAMPLES; i++) {
351
+ const start = performance.now();
352
+ yield executor.query(`SELECT * FROM ${tableName} LIMIT ${smallLimit}`);
353
+ yield executor.query(`SELECT * FROM ${tableName} LIMIT ${smallLimit}`);
354
+ yield executor.query(`SELECT * FROM ${tableName} LIMIT ${smallLimit}`);
355
+ sequentialTimes.push(performance.now() - start);
356
+ }
357
+ sequentialTimes.sort((a, b) => a - b);
358
+ const median3Sequential = sequentialTimes[Math.floor(SAMPLES / 2)];
359
+ const marginalQueryCost = (median3Sequential - medianSmall) / 2;
360
+ console.log(
361
+ ` [roundtrip] 3x sequential LIMIT 1: median=${median3Sequential.toFixed(3)}ms`
362
+ );
363
+ console.log(` [roundtrip] Single query: ${medianSmall.toFixed(3)}ms`);
364
+ console.log(
365
+ ` [roundtrip] Marginal query cost: ${marginalQueryCost.toFixed(3)}ms`
366
+ );
367
+ console.log(` [roundtrip] Per-row cost: ${perRow.toFixed(4)}ms`);
368
+ const equivalent = Math.round(marginalQueryCost / perRow);
369
+ console.log(` [roundtrip] Raw equivalent: ${equivalent} rows`);
370
+ const clamped = Math.max(10, Math.min(500, equivalent));
371
+ console.log(` [roundtrip] Final (clamped): ${clamped} rows`);
372
+ return clamped;
373
+ });
374
+ }
375
+ function measureJsonOverhead(params) {
376
+ return __async(this, null, function* () {
377
+ const { executor, tableName, tableRowCount } = params;
378
+ const WARMUP = 3;
379
+ const SAMPLES = 10;
380
+ const limit = Math.min(500, tableRowCount);
381
+ const rawSql = `SELECT * FROM ${tableName} LIMIT ${limit}`;
382
+ const colsResult = yield executor.query(
383
+ `SELECT column_name FROM information_schema.columns WHERE table_name = ${tableName.replace(/"/g, "'")} LIMIT 10`
384
+ );
385
+ let aggSql;
386
+ if (colsResult.length >= 3) {
387
+ const cols = colsResult.slice(0, 6).map((r) => `"${r.column_name}"`);
388
+ const aggExprs = cols.map((c) => `array_agg(${c})`).join(", ");
389
+ const groupCol = cols[0];
390
+ aggSql = `SELECT ${groupCol}, ${aggExprs} FROM ${tableName} GROUP BY ${groupCol} LIMIT ${limit}`;
391
+ } else {
392
+ aggSql = `SELECT json_agg(t) FROM (SELECT * FROM ${tableName} LIMIT ${limit}) t`;
393
+ }
394
+ for (let i = 0; i < WARMUP; i++) {
395
+ yield executor.query(rawSql);
396
+ yield executor.query(aggSql);
397
+ }
398
+ const rawTimes = [];
399
+ for (let i = 0; i < SAMPLES; i++) {
400
+ const start = performance.now();
401
+ yield executor.query(rawSql);
402
+ rawTimes.push(performance.now() - start);
403
+ }
404
+ rawTimes.sort((a, b) => a - b);
405
+ const medianRaw = rawTimes[Math.floor(SAMPLES / 2)];
406
+ const aggTimes = [];
407
+ for (let i = 0; i < SAMPLES; i++) {
408
+ const start = performance.now();
409
+ yield executor.query(aggSql);
410
+ aggTimes.push(performance.now() - start);
411
+ }
412
+ aggTimes.sort((a, b) => a - b);
413
+ const medianAgg = aggTimes[Math.floor(SAMPLES / 2)];
414
+ const factor = medianRaw > 0.01 ? medianAgg / medianRaw : 3;
415
+ console.log(` [json] Raw ${limit} rows: ${medianRaw.toFixed(3)}ms`);
416
+ console.log(` [json] array_agg grouped: ${medianAgg.toFixed(3)}ms`);
417
+ console.log(` [json] Overhead factor: ${factor.toFixed(2)}x`);
418
+ return Math.max(1.5, Math.min(8, factor));
419
+ });
420
+ }
421
+ function collectPostgresStatsFromCatalog(params) {
422
+ return __async(this, null, function* () {
423
+ const { executor, datamodel } = params;
424
+ const edges = extractMeasurableOneToManyEdges(datamodel);
425
+ const out = {};
426
+ const tablesToAnalyze = /* @__PURE__ */ new Set();
427
+ for (const edge of edges) {
428
+ tablesToAnalyze.add(edge.parentTable);
429
+ tablesToAnalyze.add(edge.childTable);
430
+ }
431
+ for (const table of tablesToAnalyze) {
432
+ try {
433
+ yield executor.query(`ANALYZE ${quoteIdent("postgres", table)}`);
434
+ } catch (_) {
435
+ }
436
+ }
437
+ const tableStatsQuery = `
438
+ SELECT
439
+ c.relname as table_name,
440
+ c.reltuples::bigint as row_count
441
+ FROM pg_class c
442
+ JOIN pg_namespace n ON n.oid = c.relnamespace
443
+ WHERE c.relkind = 'r'
444
+ AND n.nspname NOT IN ('pg_catalog', 'information_schema')
445
+ `;
446
+ const tableStats = yield executor.query(tableStatsQuery, []);
447
+ const rowCounts = /* @__PURE__ */ new Map();
448
+ for (const row of tableStats) {
449
+ const tableName = String(row.table_name);
450
+ const count = toNumberOrZero(row.row_count);
451
+ rowCounts.set(tableName, count);
452
+ }
453
+ for (const edge of edges) {
454
+ const parentRows = rowCounts.get(edge.parentTable) || 0;
455
+ const childRows = rowCounts.get(edge.childTable) || 0;
456
+ if (parentRows === 0 || childRows === 0) {
457
+ if (!out[edge.parentModel]) out[edge.parentModel] = {};
458
+ out[edge.parentModel][edge.relName] = {
459
+ avg: 1,
460
+ p95: 1,
461
+ p99: 1,
462
+ max: 1,
463
+ coverage: 0
464
+ };
465
+ continue;
466
+ }
467
+ const fkColumn = edge.childFkColumns[0];
468
+ const statsQuery = `
469
+ SELECT
470
+ s.n_distinct,
471
+ s.correlation,
472
+ (s.most_common_freqs)[1] as max_freq
473
+ FROM pg_stats s
474
+ WHERE s.tablename = $1
475
+ AND s.attname = $2
476
+ AND s.schemaname NOT IN ('pg_catalog', 'information_schema')
477
+ `;
478
+ const statsRows = yield executor.query(statsQuery, [
479
+ edge.childTable,
480
+ fkColumn
481
+ ]);
482
+ let avg;
483
+ let p95;
484
+ let p99;
485
+ let max;
486
+ let coverage;
487
+ if (statsRows.length > 0) {
488
+ const stats = statsRows[0];
489
+ const nDistinct = toNumberOrZero(stats.n_distinct);
490
+ const correlation = stats.correlation !== null ? Number(stats.correlation) : 0;
491
+ const maxFreq = stats.max_freq !== null ? Number(stats.max_freq) : null;
492
+ const distinctCount = nDistinct < 0 ? Math.abs(nDistinct) * childRows : nDistinct > 0 ? nDistinct : parentRows;
493
+ avg = distinctCount > 0 ? childRows / distinctCount : childRows / parentRows;
494
+ coverage = Math.min(1, distinctCount / parentRows);
495
+ const skewFactor = Math.abs(correlation) > 0.5 ? 2.5 : 1.5;
496
+ p95 = avg * skewFactor;
497
+ p99 = avg * (skewFactor * 1.3);
498
+ max = maxFreq ? Math.ceil(childRows * maxFreq) : Math.ceil(p99 * 1.5);
499
+ } else {
500
+ avg = childRows / parentRows;
501
+ coverage = 1;
502
+ p95 = avg * 2;
503
+ p99 = avg * 3;
504
+ max = avg * 5;
505
+ }
506
+ if (!out[edge.parentModel]) out[edge.parentModel] = {};
507
+ out[edge.parentModel][edge.relName] = clampStatsMonotonic(
508
+ Math.ceil(avg),
509
+ Math.ceil(p95),
510
+ Math.ceil(p99),
511
+ Math.ceil(max),
512
+ coverage
513
+ );
514
+ }
515
+ return out;
516
+ });
517
+ }
518
+ function collectPreciseCardinalities(params) {
519
+ return __async(this, null, function* () {
520
+ const { executor, datamodel, dialect } = params;
521
+ const edges = extractMeasurableOneToManyEdges(datamodel);
522
+ const out = {};
523
+ for (const edge of edges) {
524
+ const sql = buildFanoutStatsSql(dialect, edge);
525
+ const rows = yield executor.query(sql, []);
526
+ const row = rows[0] || {};
527
+ const stats = normalizeStats(row);
528
+ if (!out[edge.parentModel]) out[edge.parentModel] = {};
529
+ out[edge.parentModel][edge.relName] = stats;
530
+ }
531
+ return out;
532
+ });
533
+ }
534
+ function collectRelationCardinalities(params) {
535
+ return __async(this, null, function* () {
536
+ const { executor, datamodel, dialect, mode = "precise" } = params;
537
+ if (mode === "fast") {
538
+ const stats = yield collectPostgresStatsFromCatalog({ executor, datamodel });
539
+ let allTrivial = true;
540
+ for (const model of Object.values(stats)) {
541
+ for (const rel of Object.values(model)) {
542
+ if (rel.avg > 1 || rel.coverage > 0.5) {
543
+ allTrivial = false;
544
+ break;
545
+ }
546
+ }
547
+ if (!allTrivial) break;
548
+ }
549
+ if (allTrivial && Object.keys(stats).length > 0) {
550
+ console.warn("\u26A0 Catalog stats look stale, falling back to precise mode");
551
+ return collectPreciseCardinalities({ executor, datamodel, dialect });
552
+ }
553
+ return stats;
554
+ }
555
+ return collectPreciseCardinalities({ executor, datamodel, dialect });
556
+ });
557
+ }
558
+ function collectPlannerArtifacts(params) {
559
+ return __async(this, null, function* () {
560
+ const { executor, datamodel, dialect, mode } = params;
561
+ const largest = yield findLargestTable({ executor, dialect, datamodel });
562
+ const [relationStats, roundtripRowEquivalent, jsonRowFactor] = yield Promise.all([
563
+ collectRelationCardinalities({ executor, datamodel, dialect, mode }),
564
+ measureRoundtripCost({ executor, dialect, datamodel }),
565
+ largest && largest.rowCount >= 50 && dialect === "postgres" ? measureJsonOverhead({
566
+ executor,
567
+ tableName: largest.tableName,
568
+ tableRowCount: largest.rowCount
569
+ }) : Promise.resolve(1.5)
570
+ ]);
571
+ console.log(` Roundtrip cost: ~${roundtripRowEquivalent} row equivalents`);
572
+ console.log(` JSON overhead factor: ${jsonRowFactor.toFixed(2)}x`);
573
+ return { relationStats, roundtripRowEquivalent, jsonRowFactor };
574
+ });
575
+ }
576
+ function emitPlannerGeneratedModule(artifacts) {
577
+ return [
578
+ `export const RELATION_STATS = ${stableJson(artifacts.relationStats)} as const`,
579
+ ``,
580
+ `export type RelationStats = typeof RELATION_STATS`,
581
+ ``,
582
+ `export const ROUNDTRIP_ROW_EQUIVALENT = ${artifacts.roundtripRowEquivalent}`,
583
+ ``,
584
+ `export const JSON_ROW_FACTOR = ${artifacts.jsonRowFactor.toFixed(2)}`,
585
+ ``
586
+ ].join("\n");
587
+ }
588
+
589
+ // src/collect-planner-stats.ts
590
+ var CONNECT_TIMEOUT_MS = 1e4;
591
+ function parseArgs(argv) {
592
+ const outputIdx = argv.indexOf("--output");
593
+ const clientIdx = argv.indexOf("--prisma-client");
594
+ const output = outputIdx !== -1 && argv[outputIdx + 1] ? argv[outputIdx + 1] : "./dist/prisma/generated/sql/planner.generated.js";
595
+ const clientPath = clientIdx !== -1 && argv[clientIdx + 1] ? argv[clientIdx + 1] : "@prisma/client";
596
+ return { output, clientPath };
597
+ }
598
+ function resolveOutput(output) {
599
+ return isAbsolute(output) ? output : resolve(process.cwd(), output);
600
+ }
601
+ function emitCJS(artifacts) {
602
+ const ts = emitPlannerGeneratedModule(artifacts);
603
+ return ts.replace(/^export const (\w+)/gm, "exports.$1").replace(/^import .*$/gm, "").trimStart();
604
+ }
605
+ function connectWithTimeout(databaseUrl, dialect) {
606
+ return __async(this, null, function* () {
607
+ let settled = false;
608
+ const connectPromise = createDatabaseExecutor({ databaseUrl}).then(
609
+ (conn) => {
610
+ if (settled) {
611
+ conn.cleanup().catch(() => {
612
+ });
613
+ throw new Error("Timed out");
614
+ }
615
+ return conn;
616
+ }
617
+ );
618
+ const timeoutPromise = new Promise((_, reject) => {
619
+ var _a;
620
+ const id = setTimeout(() => {
621
+ settled = true;
622
+ reject(new Error(`Connection timed out after ${CONNECT_TIMEOUT_MS}ms`));
623
+ }, CONNECT_TIMEOUT_MS);
624
+ (_a = id.unref) == null ? void 0 : _a.call(id);
625
+ });
626
+ try {
627
+ const result = yield Promise.race([connectPromise, timeoutPromise]);
628
+ settled = true;
629
+ return result;
630
+ } catch (err) {
631
+ settled = true;
632
+ throw err;
633
+ }
634
+ });
635
+ }
636
+ function main() {
637
+ return __async(this, null, function* () {
638
+ var _a, _b;
639
+ const { output, clientPath } = parseArgs(process.argv.slice(2));
640
+ const outputPath = resolveOutput(output);
641
+ const url = process.env.DATABASE_URL;
642
+ if (!url) {
643
+ console.warn(
644
+ "[prisma-sql] DATABASE_URL not set, skipping planner stats collection"
645
+ );
646
+ process.exit(0);
647
+ }
648
+ let executor;
649
+ let cleanup;
650
+ try {
651
+ const conn = yield connectWithTimeout(url, "postgres");
652
+ executor = conn.executor;
653
+ cleanup = conn.cleanup;
654
+ } catch (err) {
655
+ console.warn(
656
+ "[prisma-sql] Failed to connect:",
657
+ err instanceof Error ? err.message : err
658
+ );
659
+ process.exit(0);
660
+ }
661
+ try {
662
+ let dmmf;
663
+ try {
664
+ const client = __require(clientPath);
665
+ dmmf = (_b = (_a = client.Prisma) == null ? void 0 : _a.dmmf) != null ? _b : client.dmmf;
666
+ if (!(dmmf == null ? void 0 : dmmf.datamodel)) {
667
+ throw new Error(`Could not read dmmf.datamodel from ${clientPath}`);
668
+ }
669
+ } catch (err) {
670
+ throw new Error(
671
+ `Failed to load Prisma client from "${clientPath}": ${err instanceof Error ? err.message : err}`
672
+ );
673
+ }
674
+ const artifacts = yield collectPlannerArtifacts({
675
+ executor,
676
+ datamodel: dmmf.datamodel,
677
+ dialect: "postgres"
678
+ });
679
+ yield writeFile(outputPath, emitCJS(artifacts), "utf8");
680
+ console.log("[prisma-sql] \u2713 Planner stats written to", outputPath);
681
+ } catch (err) {
682
+ console.warn(
683
+ "[prisma-sql] Failed to collect stats:",
684
+ err instanceof Error ? err.message : err
685
+ );
686
+ } finally {
687
+ yield cleanup == null ? void 0 : cleanup();
688
+ }
689
+ process.exit(0);
690
+ });
691
+ }
692
+ main();
693
+ //# sourceMappingURL=collect-planner-stats.js.map
694
+ //# sourceMappingURL=collect-planner-stats.js.map