prisma-sql 1.75.5 → 1.75.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,696 @@
1
+ #!/usr/bin/env node
2
+ 'use strict';
3
+
4
+ var promises = require('fs/promises');
5
+ var path = require('path');
6
+
7
+ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
8
+ get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
9
+ }) : x)(function(x) {
10
+ if (typeof require !== "undefined") return require.apply(this, arguments);
11
+ throw Error('Dynamic require of "' + x + '" is not supported');
12
+ });
13
+ var __async = (__this, __arguments, generator) => {
14
+ return new Promise((resolve2, reject) => {
15
+ var fulfilled = (value) => {
16
+ try {
17
+ step(generator.next(value));
18
+ } catch (e) {
19
+ reject(e);
20
+ }
21
+ };
22
+ var rejected = (value) => {
23
+ try {
24
+ step(generator.throw(value));
25
+ } catch (e) {
26
+ reject(e);
27
+ }
28
+ };
29
+ var step = (x) => x.done ? resolve2(x.value) : Promise.resolve(x.value).then(fulfilled, rejected);
30
+ step((generator = generator.apply(__this, __arguments)).next());
31
+ });
32
+ };
33
+
34
+ // src/utils/pure-utils.ts
35
+ function toNumberOrZero(v) {
36
+ if (typeof v === "number" && Number.isFinite(v)) return v;
37
+ if (typeof v === "bigint") return Number(v);
38
+ if (typeof v === "string" && v.trim() !== "") {
39
+ const n = Number(v);
40
+ if (Number.isFinite(n)) return n;
41
+ }
42
+ return 0;
43
+ }
44
+ function clampStatsMonotonic(avg, p95, p99, max, coverage) {
45
+ const safeAvg = Math.max(1, avg);
46
+ const safeP95 = Math.max(safeAvg, p95);
47
+ const safeP99 = Math.max(safeP95, p99);
48
+ const safeMax = Math.max(safeP99, max);
49
+ const safeCoverage = Math.max(0, Math.min(1, coverage));
50
+ return {
51
+ avg: safeAvg,
52
+ p95: safeP95,
53
+ p99: safeP99,
54
+ max: safeMax,
55
+ coverage: safeCoverage
56
+ };
57
+ }
58
+ function normalizeStats(row) {
59
+ return clampStatsMonotonic(
60
+ toNumberOrZero(row.avg),
61
+ toNumberOrZero(row.p95),
62
+ toNumberOrZero(row.p99),
63
+ toNumberOrZero(row.max),
64
+ toNumberOrZero(row.coverage)
65
+ );
66
+ }
67
+ function stableJson(value) {
68
+ return JSON.stringify(
69
+ value,
70
+ (_k, v) => {
71
+ if (!v || typeof v !== "object" || Array.isArray(v)) return v;
72
+ const obj = v;
73
+ const out = {};
74
+ for (const k of Object.keys(obj).sort()) out[k] = obj[k];
75
+ return out;
76
+ },
77
+ 2
78
+ );
79
+ }
80
+ function cleanDatabaseUrl(url) {
81
+ try {
82
+ const parsed = new URL(url);
83
+ parsed.search = "";
84
+ if (parsed.password) {
85
+ parsed.password = "***";
86
+ }
87
+ if (parsed.username && parsed.username.length > 0) {
88
+ if (parsed.username.length <= 3) {
89
+ parsed.username = "***";
90
+ } else {
91
+ parsed.username = parsed.username.slice(0, 3) + "***";
92
+ }
93
+ }
94
+ return parsed.toString();
95
+ } catch (error) {
96
+ return "[invalid-url]";
97
+ }
98
+ }
99
+
100
+ // src/cardinality-planner.ts
101
+ function quoteIdent(dialect, ident) {
102
+ return `"${ident.replace(/"/g, '""')}"`;
103
+ }
104
+ function createDatabaseExecutor(params) {
105
+ return __async(this, null, function* () {
106
+ const { databaseUrl} = params;
107
+ {
108
+ const postgres = yield import('postgres');
109
+ cleanDatabaseUrl(databaseUrl);
110
+ const sql = postgres.default(databaseUrl, { max: 1 });
111
+ return {
112
+ executor: {
113
+ query: (sqlStr, params2) => __async(null, null, function* () {
114
+ return yield sql.unsafe(sqlStr, params2 || []);
115
+ })
116
+ },
117
+ cleanup: () => __async(null, null, function* () {
118
+ yield sql.end();
119
+ })
120
+ };
121
+ }
122
+ });
123
+ }
124
+ function extractMeasurableOneToManyEdges(datamodel) {
125
+ const modelByName = new Map(datamodel.models.map((m) => [m.name, m]));
126
+ const edges = [];
127
+ for (const parent of datamodel.models) {
128
+ const pkFields = parent.fields.filter((f) => f.isId);
129
+ if (pkFields.length === 0) continue;
130
+ pkFields.map((f) => f.dbName || f.name);
131
+ const parentTable = parent.dbName || parent.name;
132
+ for (const f of parent.fields) {
133
+ if (!f.relationName) continue;
134
+ if (!f.isList) continue;
135
+ const child = modelByName.get(f.type);
136
+ if (!child) continue;
137
+ const childRelField = child.fields.find(
138
+ (cf) => cf.relationName === f.relationName && cf.type === parent.name
139
+ );
140
+ if (!childRelField) continue;
141
+ const fkFieldNames = childRelField.relationFromFields || [];
142
+ if (fkFieldNames.length === 0) continue;
143
+ const fkFields = fkFieldNames.map((name) => {
144
+ const fld = child.fields.find((x) => x.name === name);
145
+ return fld ? fld.dbName || fld.name : name;
146
+ });
147
+ const refFieldNames = childRelField.relationToFields || [];
148
+ if (refFieldNames.length === 0) continue;
149
+ const references = refFieldNames.map((name) => {
150
+ const fld = parent.fields.find((x) => x.name === name);
151
+ return fld ? fld.dbName || fld.name : name;
152
+ });
153
+ if (fkFields.length !== references.length) continue;
154
+ const childTable = child.dbName || child.name;
155
+ edges.push({
156
+ parentModel: parent.name,
157
+ relName: f.name,
158
+ childModel: child.name,
159
+ parentTable,
160
+ childTable,
161
+ parentPkColumns: references,
162
+ childFkColumns: fkFields,
163
+ isMany: true
164
+ });
165
+ }
166
+ }
167
+ return edges;
168
+ }
169
+ function buildPostgresStatsSql(edge) {
170
+ const childTable = quoteIdent("postgres", edge.childTable);
171
+ const parentTable = quoteIdent("postgres", edge.parentTable);
172
+ const groupCols = edge.childFkColumns.map((c) => quoteIdent("postgres", c)).join(", ");
173
+ return `
174
+ WITH counts AS (
175
+ SELECT ${groupCols}, COUNT(*) AS cnt
176
+ FROM ${childTable}
177
+ GROUP BY ${groupCols}
178
+ ),
179
+ total_parents AS (
180
+ SELECT COUNT(*) AS total FROM ${parentTable}
181
+ )
182
+ SELECT
183
+ AVG(cnt)::float AS avg,
184
+ MAX(cnt)::int AS max,
185
+ PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY cnt)::float AS p95,
186
+ PERCENTILE_CONT(0.99) WITHIN GROUP (ORDER BY cnt)::float AS p99,
187
+ (SELECT COUNT(*) FROM counts)::float / GREATEST(1, (SELECT total FROM total_parents)) AS coverage
188
+ FROM counts
189
+ `.trim();
190
+ }
191
+ function buildSqliteStatsSql(edge) {
192
+ const childTable = quoteIdent("sqlite", edge.childTable);
193
+ const parentTable = quoteIdent("sqlite", edge.parentTable);
194
+ const groupCols = edge.childFkColumns.map((c) => quoteIdent("sqlite", c)).join(", ");
195
+ return `
196
+ WITH counts AS (
197
+ SELECT ${groupCols}, COUNT(*) AS cnt
198
+ FROM ${childTable}
199
+ GROUP BY ${groupCols}
200
+ ),
201
+ n AS (
202
+ SELECT COUNT(*) AS total FROM counts
203
+ ),
204
+ parent_n AS (
205
+ SELECT COUNT(*) AS total FROM ${parentTable}
206
+ ),
207
+ ordered AS (
208
+ SELECT cnt
209
+ FROM counts
210
+ ORDER BY cnt
211
+ )
212
+ SELECT
213
+ (SELECT AVG(cnt) FROM counts) AS avg,
214
+ (SELECT MAX(cnt) FROM counts) AS max,
215
+ (
216
+ SELECT cnt
217
+ FROM ordered
218
+ LIMIT 1
219
+ OFFSET (
220
+ SELECT
221
+ CASE
222
+ WHEN total <= 1 THEN 0
223
+ ELSE CAST((0.95 * (total - 1)) AS INT)
224
+ END
225
+ FROM n
226
+ )
227
+ ) AS p95,
228
+ (
229
+ SELECT cnt
230
+ FROM ordered
231
+ LIMIT 1
232
+ OFFSET (
233
+ SELECT
234
+ CASE
235
+ WHEN total <= 1 THEN 0
236
+ ELSE CAST((0.99 * (total - 1)) AS INT)
237
+ END
238
+ FROM n
239
+ )
240
+ ) AS p99,
241
+ CAST((SELECT total FROM n) AS FLOAT) / MAX(1, (SELECT total FROM parent_n)) AS coverage
242
+ `.trim();
243
+ }
244
+ function buildFanoutStatsSql(dialect, edge) {
245
+ return dialect === "postgres" ? buildPostgresStatsSql(edge) : buildSqliteStatsSql(edge);
246
+ }
247
+ function findLargestTable(params) {
248
+ return __async(this, null, function* () {
249
+ var _a;
250
+ const { executor, dialect, datamodel } = params;
251
+ let best = null;
252
+ for (const model of datamodel.models) {
253
+ const table = quoteIdent(dialect, model.dbName || model.name);
254
+ try {
255
+ const rows = yield executor.query(`SELECT COUNT(*) AS cnt FROM ${table}`);
256
+ const count = toNumberOrZero((_a = rows[0]) == null ? void 0 : _a.cnt);
257
+ if (!best || count > best.rowCount) {
258
+ best = { tableName: table, rowCount: count };
259
+ }
260
+ } catch (_) {
261
+ }
262
+ }
263
+ return best;
264
+ });
265
+ }
266
+ function measureRoundtripCost(params) {
267
+ return __async(this, null, function* () {
268
+ var _a, _b;
269
+ const { executor, dialect, datamodel } = params;
270
+ const WARMUP = 5;
271
+ const SAMPLES = 15;
272
+ for (let i = 0; i < WARMUP; i++) {
273
+ yield executor.query("SELECT 1");
274
+ }
275
+ const roundtripTimes = [];
276
+ for (let i = 0; i < SAMPLES; i++) {
277
+ const start = performance.now();
278
+ yield executor.query("SELECT 1");
279
+ roundtripTimes.push(performance.now() - start);
280
+ }
281
+ roundtripTimes.sort((a, b) => a - b);
282
+ const medianRoundtrip = roundtripTimes[Math.floor(SAMPLES / 2)];
283
+ console.log(
284
+ ` [roundtrip] SELECT 1 times (ms): min=${roundtripTimes[0].toFixed(3)} median=${medianRoundtrip.toFixed(3)} max=${roundtripTimes[SAMPLES - 1].toFixed(3)}`
285
+ );
286
+ const largest = yield findLargestTable({ executor, dialect, datamodel });
287
+ if (!largest || largest.rowCount < 50) {
288
+ console.log(
289
+ ` [roundtrip] Largest table: ${(_a = largest == null ? void 0 : largest.tableName) != null ? _a : "none"} (${(_b = largest == null ? void 0 : largest.rowCount) != null ? _b : 0} rows) \u2014 too small, using default 50`
290
+ );
291
+ return 50;
292
+ }
293
+ console.log(
294
+ ` [roundtrip] Using table ${largest.tableName} (${largest.rowCount} rows)`
295
+ );
296
+ return estimateFromQueryPairRatio({
297
+ executor,
298
+ tableName: largest.tableName,
299
+ tableRowCount: largest.rowCount
300
+ });
301
+ });
302
+ }
303
+ function estimateFromQueryPairRatio(params) {
304
+ return __async(this, null, function* () {
305
+ const { executor, tableName, tableRowCount } = params;
306
+ const WARMUP = 5;
307
+ const SAMPLES = 10;
308
+ const smallLimit = 1;
309
+ const largeLimit = Math.min(1e3, tableRowCount);
310
+ for (let i = 0; i < WARMUP; i++) {
311
+ yield executor.query(`SELECT * FROM ${tableName} LIMIT ${largeLimit}`);
312
+ }
313
+ const smallTimes = [];
314
+ for (let i = 0; i < SAMPLES; i++) {
315
+ const start = performance.now();
316
+ yield executor.query(`SELECT * FROM ${tableName} LIMIT ${smallLimit}`);
317
+ smallTimes.push(performance.now() - start);
318
+ }
319
+ smallTimes.sort((a, b) => a - b);
320
+ const medianSmall = smallTimes[Math.floor(SAMPLES / 2)];
321
+ const largeTimes = [];
322
+ let actualLargeRows = 0;
323
+ for (let i = 0; i < SAMPLES; i++) {
324
+ const start = performance.now();
325
+ const rows = yield executor.query(
326
+ `SELECT * FROM ${tableName} LIMIT ${largeLimit}`
327
+ );
328
+ largeTimes.push(performance.now() - start);
329
+ actualLargeRows = rows.length;
330
+ }
331
+ largeTimes.sort((a, b) => a - b);
332
+ const medianLarge = largeTimes[Math.floor(SAMPLES / 2)];
333
+ const rowDiff = actualLargeRows - smallLimit;
334
+ const timeDiff = medianLarge - medianSmall;
335
+ console.log(
336
+ ` [roundtrip] LIMIT ${smallLimit}: median=${medianSmall.toFixed(3)}ms`
337
+ );
338
+ console.log(
339
+ ` [roundtrip] LIMIT ${largeLimit} (got ${actualLargeRows}): median=${medianLarge.toFixed(3)}ms`
340
+ );
341
+ console.log(
342
+ ` [roundtrip] Time diff: ${timeDiff.toFixed(3)}ms for ${rowDiff} rows`
343
+ );
344
+ if (rowDiff < 50 || timeDiff <= 0.05) {
345
+ console.log(
346
+ ` [roundtrip] Insufficient signal (need \u226550 row diff and >0.05ms time diff), defaulting to 50`
347
+ );
348
+ return 50;
349
+ }
350
+ const perRow = timeDiff / rowDiff;
351
+ const sequentialTimes = [];
352
+ for (let i = 0; i < SAMPLES; i++) {
353
+ const start = performance.now();
354
+ yield executor.query(`SELECT * FROM ${tableName} LIMIT ${smallLimit}`);
355
+ yield executor.query(`SELECT * FROM ${tableName} LIMIT ${smallLimit}`);
356
+ yield executor.query(`SELECT * FROM ${tableName} LIMIT ${smallLimit}`);
357
+ sequentialTimes.push(performance.now() - start);
358
+ }
359
+ sequentialTimes.sort((a, b) => a - b);
360
+ const median3Sequential = sequentialTimes[Math.floor(SAMPLES / 2)];
361
+ const marginalQueryCost = (median3Sequential - medianSmall) / 2;
362
+ console.log(
363
+ ` [roundtrip] 3x sequential LIMIT 1: median=${median3Sequential.toFixed(3)}ms`
364
+ );
365
+ console.log(` [roundtrip] Single query: ${medianSmall.toFixed(3)}ms`);
366
+ console.log(
367
+ ` [roundtrip] Marginal query cost: ${marginalQueryCost.toFixed(3)}ms`
368
+ );
369
+ console.log(` [roundtrip] Per-row cost: ${perRow.toFixed(4)}ms`);
370
+ const equivalent = Math.round(marginalQueryCost / perRow);
371
+ console.log(` [roundtrip] Raw equivalent: ${equivalent} rows`);
372
+ const clamped = Math.max(10, Math.min(500, equivalent));
373
+ console.log(` [roundtrip] Final (clamped): ${clamped} rows`);
374
+ return clamped;
375
+ });
376
+ }
377
+ function measureJsonOverhead(params) {
378
+ return __async(this, null, function* () {
379
+ const { executor, tableName, tableRowCount } = params;
380
+ const WARMUP = 3;
381
+ const SAMPLES = 10;
382
+ const limit = Math.min(500, tableRowCount);
383
+ const rawSql = `SELECT * FROM ${tableName} LIMIT ${limit}`;
384
+ const colsResult = yield executor.query(
385
+ `SELECT column_name FROM information_schema.columns WHERE table_name = ${tableName.replace(/"/g, "'")} LIMIT 10`
386
+ );
387
+ let aggSql;
388
+ if (colsResult.length >= 3) {
389
+ const cols = colsResult.slice(0, 6).map((r) => `"${r.column_name}"`);
390
+ const aggExprs = cols.map((c) => `array_agg(${c})`).join(", ");
391
+ const groupCol = cols[0];
392
+ aggSql = `SELECT ${groupCol}, ${aggExprs} FROM ${tableName} GROUP BY ${groupCol} LIMIT ${limit}`;
393
+ } else {
394
+ aggSql = `SELECT json_agg(t) FROM (SELECT * FROM ${tableName} LIMIT ${limit}) t`;
395
+ }
396
+ for (let i = 0; i < WARMUP; i++) {
397
+ yield executor.query(rawSql);
398
+ yield executor.query(aggSql);
399
+ }
400
+ const rawTimes = [];
401
+ for (let i = 0; i < SAMPLES; i++) {
402
+ const start = performance.now();
403
+ yield executor.query(rawSql);
404
+ rawTimes.push(performance.now() - start);
405
+ }
406
+ rawTimes.sort((a, b) => a - b);
407
+ const medianRaw = rawTimes[Math.floor(SAMPLES / 2)];
408
+ const aggTimes = [];
409
+ for (let i = 0; i < SAMPLES; i++) {
410
+ const start = performance.now();
411
+ yield executor.query(aggSql);
412
+ aggTimes.push(performance.now() - start);
413
+ }
414
+ aggTimes.sort((a, b) => a - b);
415
+ const medianAgg = aggTimes[Math.floor(SAMPLES / 2)];
416
+ const factor = medianRaw > 0.01 ? medianAgg / medianRaw : 3;
417
+ console.log(` [json] Raw ${limit} rows: ${medianRaw.toFixed(3)}ms`);
418
+ console.log(` [json] array_agg grouped: ${medianAgg.toFixed(3)}ms`);
419
+ console.log(` [json] Overhead factor: ${factor.toFixed(2)}x`);
420
+ return Math.max(1.5, Math.min(8, factor));
421
+ });
422
+ }
423
+ function collectPostgresStatsFromCatalog(params) {
424
+ return __async(this, null, function* () {
425
+ const { executor, datamodel } = params;
426
+ const edges = extractMeasurableOneToManyEdges(datamodel);
427
+ const out = {};
428
+ const tablesToAnalyze = /* @__PURE__ */ new Set();
429
+ for (const edge of edges) {
430
+ tablesToAnalyze.add(edge.parentTable);
431
+ tablesToAnalyze.add(edge.childTable);
432
+ }
433
+ for (const table of tablesToAnalyze) {
434
+ try {
435
+ yield executor.query(`ANALYZE ${quoteIdent("postgres", table)}`);
436
+ } catch (_) {
437
+ }
438
+ }
439
+ const tableStatsQuery = `
440
+ SELECT
441
+ c.relname as table_name,
442
+ c.reltuples::bigint as row_count
443
+ FROM pg_class c
444
+ JOIN pg_namespace n ON n.oid = c.relnamespace
445
+ WHERE c.relkind = 'r'
446
+ AND n.nspname NOT IN ('pg_catalog', 'information_schema')
447
+ `;
448
+ const tableStats = yield executor.query(tableStatsQuery, []);
449
+ const rowCounts = /* @__PURE__ */ new Map();
450
+ for (const row of tableStats) {
451
+ const tableName = String(row.table_name);
452
+ const count = toNumberOrZero(row.row_count);
453
+ rowCounts.set(tableName, count);
454
+ }
455
+ for (const edge of edges) {
456
+ const parentRows = rowCounts.get(edge.parentTable) || 0;
457
+ const childRows = rowCounts.get(edge.childTable) || 0;
458
+ if (parentRows === 0 || childRows === 0) {
459
+ if (!out[edge.parentModel]) out[edge.parentModel] = {};
460
+ out[edge.parentModel][edge.relName] = {
461
+ avg: 1,
462
+ p95: 1,
463
+ p99: 1,
464
+ max: 1,
465
+ coverage: 0
466
+ };
467
+ continue;
468
+ }
469
+ const fkColumn = edge.childFkColumns[0];
470
+ const statsQuery = `
471
+ SELECT
472
+ s.n_distinct,
473
+ s.correlation,
474
+ (s.most_common_freqs)[1] as max_freq
475
+ FROM pg_stats s
476
+ WHERE s.tablename = $1
477
+ AND s.attname = $2
478
+ AND s.schemaname NOT IN ('pg_catalog', 'information_schema')
479
+ `;
480
+ const statsRows = yield executor.query(statsQuery, [
481
+ edge.childTable,
482
+ fkColumn
483
+ ]);
484
+ let avg;
485
+ let p95;
486
+ let p99;
487
+ let max;
488
+ let coverage;
489
+ if (statsRows.length > 0) {
490
+ const stats = statsRows[0];
491
+ const nDistinct = toNumberOrZero(stats.n_distinct);
492
+ const correlation = stats.correlation !== null ? Number(stats.correlation) : 0;
493
+ const maxFreq = stats.max_freq !== null ? Number(stats.max_freq) : null;
494
+ const distinctCount = nDistinct < 0 ? Math.abs(nDistinct) * childRows : nDistinct > 0 ? nDistinct : parentRows;
495
+ avg = distinctCount > 0 ? childRows / distinctCount : childRows / parentRows;
496
+ coverage = Math.min(1, distinctCount / parentRows);
497
+ const skewFactor = Math.abs(correlation) > 0.5 ? 2.5 : 1.5;
498
+ p95 = avg * skewFactor;
499
+ p99 = avg * (skewFactor * 1.3);
500
+ max = maxFreq ? Math.ceil(childRows * maxFreq) : Math.ceil(p99 * 1.5);
501
+ } else {
502
+ avg = childRows / parentRows;
503
+ coverage = 1;
504
+ p95 = avg * 2;
505
+ p99 = avg * 3;
506
+ max = avg * 5;
507
+ }
508
+ if (!out[edge.parentModel]) out[edge.parentModel] = {};
509
+ out[edge.parentModel][edge.relName] = clampStatsMonotonic(
510
+ Math.ceil(avg),
511
+ Math.ceil(p95),
512
+ Math.ceil(p99),
513
+ Math.ceil(max),
514
+ coverage
515
+ );
516
+ }
517
+ return out;
518
+ });
519
+ }
520
+ function collectPreciseCardinalities(params) {
521
+ return __async(this, null, function* () {
522
+ const { executor, datamodel, dialect } = params;
523
+ const edges = extractMeasurableOneToManyEdges(datamodel);
524
+ const out = {};
525
+ for (const edge of edges) {
526
+ const sql = buildFanoutStatsSql(dialect, edge);
527
+ const rows = yield executor.query(sql, []);
528
+ const row = rows[0] || {};
529
+ const stats = normalizeStats(row);
530
+ if (!out[edge.parentModel]) out[edge.parentModel] = {};
531
+ out[edge.parentModel][edge.relName] = stats;
532
+ }
533
+ return out;
534
+ });
535
+ }
536
+ function collectRelationCardinalities(params) {
537
+ return __async(this, null, function* () {
538
+ const { executor, datamodel, dialect, mode = "precise" } = params;
539
+ if (mode === "fast") {
540
+ const stats = yield collectPostgresStatsFromCatalog({ executor, datamodel });
541
+ let allTrivial = true;
542
+ for (const model of Object.values(stats)) {
543
+ for (const rel of Object.values(model)) {
544
+ if (rel.avg > 1 || rel.coverage > 0.5) {
545
+ allTrivial = false;
546
+ break;
547
+ }
548
+ }
549
+ if (!allTrivial) break;
550
+ }
551
+ if (allTrivial && Object.keys(stats).length > 0) {
552
+ console.warn("\u26A0 Catalog stats look stale, falling back to precise mode");
553
+ return collectPreciseCardinalities({ executor, datamodel, dialect });
554
+ }
555
+ return stats;
556
+ }
557
+ return collectPreciseCardinalities({ executor, datamodel, dialect });
558
+ });
559
+ }
560
+ function collectPlannerArtifacts(params) {
561
+ return __async(this, null, function* () {
562
+ const { executor, datamodel, dialect, mode } = params;
563
+ const largest = yield findLargestTable({ executor, dialect, datamodel });
564
+ const [relationStats, roundtripRowEquivalent, jsonRowFactor] = yield Promise.all([
565
+ collectRelationCardinalities({ executor, datamodel, dialect, mode }),
566
+ measureRoundtripCost({ executor, dialect, datamodel }),
567
+ largest && largest.rowCount >= 50 && dialect === "postgres" ? measureJsonOverhead({
568
+ executor,
569
+ tableName: largest.tableName,
570
+ tableRowCount: largest.rowCount
571
+ }) : Promise.resolve(1.5)
572
+ ]);
573
+ console.log(` Roundtrip cost: ~${roundtripRowEquivalent} row equivalents`);
574
+ console.log(` JSON overhead factor: ${jsonRowFactor.toFixed(2)}x`);
575
+ return { relationStats, roundtripRowEquivalent, jsonRowFactor };
576
+ });
577
+ }
578
+ function emitPlannerGeneratedModule(artifacts) {
579
+ return [
580
+ `export const RELATION_STATS = ${stableJson(artifacts.relationStats)} as const`,
581
+ ``,
582
+ `export type RelationStats = typeof RELATION_STATS`,
583
+ ``,
584
+ `export const ROUNDTRIP_ROW_EQUIVALENT = ${artifacts.roundtripRowEquivalent}`,
585
+ ``,
586
+ `export const JSON_ROW_FACTOR = ${artifacts.jsonRowFactor.toFixed(2)}`,
587
+ ``
588
+ ].join("\n");
589
+ }
590
+
591
+ // src/collect-planner-stats.ts
592
+ var CONNECT_TIMEOUT_MS = 1e4;
593
+ function parseArgs(argv) {
594
+ const outputIdx = argv.indexOf("--output");
595
+ const clientIdx = argv.indexOf("--prisma-client");
596
+ const output = outputIdx !== -1 && argv[outputIdx + 1] ? argv[outputIdx + 1] : "./dist/prisma/generated/sql/planner.generated.js";
597
+ const clientPath = clientIdx !== -1 && argv[clientIdx + 1] ? argv[clientIdx + 1] : "@prisma/client";
598
+ return { output, clientPath };
599
+ }
600
+ function resolveOutput(output) {
601
+ return path.isAbsolute(output) ? output : path.resolve(process.cwd(), output);
602
+ }
603
+ function emitCJS(artifacts) {
604
+ const ts = emitPlannerGeneratedModule(artifacts);
605
+ return ts.replace(/^export const (\w+)/gm, "exports.$1").replace(/^import .*$/gm, "").trimStart();
606
+ }
607
+ function connectWithTimeout(databaseUrl, dialect) {
608
+ return __async(this, null, function* () {
609
+ let settled = false;
610
+ const connectPromise = createDatabaseExecutor({ databaseUrl}).then(
611
+ (conn) => {
612
+ if (settled) {
613
+ conn.cleanup().catch(() => {
614
+ });
615
+ throw new Error("Timed out");
616
+ }
617
+ return conn;
618
+ }
619
+ );
620
+ const timeoutPromise = new Promise((_, reject) => {
621
+ var _a;
622
+ const id = setTimeout(() => {
623
+ settled = true;
624
+ reject(new Error(`Connection timed out after ${CONNECT_TIMEOUT_MS}ms`));
625
+ }, CONNECT_TIMEOUT_MS);
626
+ (_a = id.unref) == null ? void 0 : _a.call(id);
627
+ });
628
+ try {
629
+ const result = yield Promise.race([connectPromise, timeoutPromise]);
630
+ settled = true;
631
+ return result;
632
+ } catch (err) {
633
+ settled = true;
634
+ throw err;
635
+ }
636
+ });
637
+ }
638
+ function main() {
639
+ return __async(this, null, function* () {
640
+ var _a, _b;
641
+ const { output, clientPath } = parseArgs(process.argv.slice(2));
642
+ const outputPath = resolveOutput(output);
643
+ const url = process.env.DATABASE_URL;
644
+ if (!url) {
645
+ console.warn(
646
+ "[prisma-sql] DATABASE_URL not set, skipping planner stats collection"
647
+ );
648
+ process.exit(0);
649
+ }
650
+ let executor;
651
+ let cleanup;
652
+ try {
653
+ const conn = yield connectWithTimeout(url, "postgres");
654
+ executor = conn.executor;
655
+ cleanup = conn.cleanup;
656
+ } catch (err) {
657
+ console.warn(
658
+ "[prisma-sql] Failed to connect:",
659
+ err instanceof Error ? err.message : err
660
+ );
661
+ process.exit(0);
662
+ }
663
+ try {
664
+ let dmmf;
665
+ try {
666
+ const client = __require(clientPath);
667
+ dmmf = (_b = (_a = client.Prisma) == null ? void 0 : _a.dmmf) != null ? _b : client.dmmf;
668
+ if (!(dmmf == null ? void 0 : dmmf.datamodel)) {
669
+ throw new Error(`Could not read dmmf.datamodel from ${clientPath}`);
670
+ }
671
+ } catch (err) {
672
+ throw new Error(
673
+ `Failed to load Prisma client from "${clientPath}": ${err instanceof Error ? err.message : err}`
674
+ );
675
+ }
676
+ const artifacts = yield collectPlannerArtifacts({
677
+ executor,
678
+ datamodel: dmmf.datamodel,
679
+ dialect: "postgres"
680
+ });
681
+ yield promises.writeFile(outputPath, emitCJS(artifacts), "utf8");
682
+ console.log("[prisma-sql] \u2713 Planner stats written to", outputPath);
683
+ } catch (err) {
684
+ console.warn(
685
+ "[prisma-sql] Failed to collect stats:",
686
+ err instanceof Error ? err.message : err
687
+ );
688
+ } finally {
689
+ yield cleanup == null ? void 0 : cleanup();
690
+ }
691
+ process.exit(0);
692
+ });
693
+ }
694
+ main();
695
+ //# sourceMappingURL=collect-planner-stats.cjs.map
696
+ //# sourceMappingURL=collect-planner-stats.cjs.map