forge-sql-orm 2.0.18 → 2.0.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/README.md +95 -4
  2. package/dist/ForgeSQLORM.js +315 -49
  3. package/dist/ForgeSQLORM.js.map +1 -1
  4. package/dist/ForgeSQLORM.mjs +315 -49
  5. package/dist/ForgeSQLORM.mjs.map +1 -1
  6. package/dist/core/ForgeSQLAnalyseOperations.d.ts +250 -0
  7. package/dist/core/ForgeSQLAnalyseOperations.d.ts.map +1 -0
  8. package/dist/core/ForgeSQLORM.d.ts +12 -2
  9. package/dist/core/ForgeSQLORM.d.ts.map +1 -1
  10. package/dist/core/ForgeSQLQueryBuilder.d.ts +105 -9
  11. package/dist/core/ForgeSQLQueryBuilder.d.ts.map +1 -1
  12. package/dist/core/ForgeSQLSelectOperations.d.ts.map +1 -1
  13. package/dist/core/SystemTables.d.ts +167 -0
  14. package/dist/core/SystemTables.d.ts.map +1 -1
  15. package/dist/index.d.ts +1 -0
  16. package/dist/index.d.ts.map +1 -1
  17. package/dist/utils/sqlUtils.d.ts +2 -2
  18. package/dist/utils/sqlUtils.d.ts.map +1 -1
  19. package/dist/webtriggers/applyMigrationsWebTrigger.d.ts.map +1 -1
  20. package/dist/webtriggers/dropMigrationWebTrigger.d.ts +2 -4
  21. package/dist/webtriggers/dropMigrationWebTrigger.d.ts.map +1 -1
  22. package/package.json +11 -19
  23. package/src/core/ForgeSQLAnalyseOperations.ts +462 -0
  24. package/src/core/ForgeSQLORM.ts +43 -7
  25. package/src/core/ForgeSQLQueryBuilder.ts +121 -18
  26. package/src/core/ForgeSQLSelectOperations.ts +4 -6
  27. package/src/core/SystemTables.ts +175 -0
  28. package/src/index.ts +1 -0
  29. package/src/utils/forgeDriverProxy.ts +1 -1
  30. package/src/utils/sqlUtils.ts +10 -16
  31. package/src/webtriggers/applyMigrationsWebTrigger.ts +32 -16
  32. package/src/webtriggers/dropMigrationWebTrigger.ts +5 -6
  33. package/src/webtriggers/fetchSchemaWebTrigger.ts +2 -10
@@ -0,0 +1,462 @@
1
+ import { ForgeSqlOperation, SchemaAnalyzeForgeSql } from "./ForgeSQLQueryBuilder";
2
+ import { Query } from "drizzle-orm";
3
+ import {
4
+ ClusterStatementRowCamelCase,
5
+ ExplainAnalyzeRow,
6
+ SlowQueryNormalized,
7
+ } from "./SystemTables";
8
+ import { SqlParameters } from "@forge/sql/out/sql-statement";
9
+ import { AnyMySqlTable } from "drizzle-orm/mysql-core/index";
10
+ import { getTableName } from "drizzle-orm/table";
11
+ import moment from "moment";
12
+
13
+ /**
14
+ * Interface representing a row from the EXPLAIN ANALYZE output
15
+ */
16
+ interface DecodedPlanRow {
17
+ id: string;
18
+ estRows?: string;
19
+ estCost?: string;
20
+ actRows?: string;
21
+ task?: string;
22
+ "access object"?: string;
23
+ "execution info"?: string;
24
+ "operator info"?: string;
25
+ memory?: string;
26
+ disk?: string;
27
+ }
28
+
29
+ /**
30
+ * Interface representing a raw slow query row from the database
31
+ */
32
+ interface SlowQueryRaw {
33
+ Time: string;
34
+ Txn_start_ts: number;
35
+ User: string;
36
+ Host: string;
37
+ Conn_ID: number;
38
+ DB: string;
39
+ Query: string;
40
+ Digest: string;
41
+ Query_time: number;
42
+ Compile_time: number;
43
+ Optimize_time: number;
44
+ Process_time: number;
45
+ Wait_time: number;
46
+ Parse_time: number;
47
+ Rewrite_time: number;
48
+ Cop_time: number;
49
+ Cop_proc_avg: number;
50
+ Cop_proc_max: number;
51
+ Cop_proc_p90: number;
52
+ Cop_proc_addr: string;
53
+ Cop_wait_avg: number;
54
+ Cop_wait_max: number;
55
+ Cop_wait_p90: number;
56
+ Cop_wait_addr: string;
57
+ Mem_max: number;
58
+ Disk_max: number;
59
+ Total_keys: number;
60
+ Process_keys: number;
61
+ Request_count: number;
62
+ KV_total: number;
63
+ PD_total: number;
64
+ Result_rows: number;
65
+ Rocksdb_block_cache_hit_count: number;
66
+ Rocksdb_block_read_count: number;
67
+ Rocksdb_block_read_byte: number;
68
+ Plan: string;
69
+ Binary_plan: string;
70
+ Plan_digest: string;
71
+ }
72
+
73
+ /**
74
+ * Interface representing a row from the cluster statements table
75
+ */
76
+ export interface ClusterStatementRow {
77
+ INSTANCE: string;
78
+ SUMMARY_BEGIN_TIME: string;
79
+ SUMMARY_END_TIME: string;
80
+ STMT_TYPE: string;
81
+ SCHEMA_NAME: string;
82
+ DIGEST: string;
83
+ DIGEST_TEXT: string;
84
+ TABLE_NAMES: string;
85
+ INDEX_NAMES: string | null;
86
+ SAMPLE_USER: string;
87
+ EXEC_COUNT: number;
88
+ SUM_ERRORS: number;
89
+ SUM_WARNINGS: number;
90
+ SUM_LATENCY: number;
91
+ MAX_LATENCY: number;
92
+ MIN_LATENCY: number;
93
+ AVG_LATENCY: number;
94
+ AVG_PARSE_LATENCY: number;
95
+ MAX_PARSE_LATENCY: number;
96
+ AVG_COMPILE_LATENCY: number;
97
+ MAX_COMPILE_LATENCY: number;
98
+ SUM_COP_TASK_NUM: number;
99
+ MAX_COP_PROCESS_TIME: number;
100
+ MAX_COP_PROCESS_ADDRESS: string;
101
+ MAX_COP_WAIT_TIME: number;
102
+ MAX_COP_WAIT_ADDRESS: string;
103
+ AVG_PROCESS_TIME: number;
104
+ MAX_PROCESS_TIME: number;
105
+ AVG_WAIT_TIME: number;
106
+ MAX_WAIT_TIME: number;
107
+ AVG_BACKOFF_TIME: number;
108
+ MAX_BACKOFF_TIME: number;
109
+ AVG_TOTAL_KEYS: number;
110
+ MAX_TOTAL_KEYS: number;
111
+ AVG_PROCESSED_KEYS: number;
112
+ MAX_PROCESSED_KEYS: number;
113
+ AVG_ROCKSDB_DELETE_SKIPPED_COUNT: number;
114
+ MAX_ROCKSDB_DELETE_SKIPPED_COUNT: number;
115
+ AVG_ROCKSDB_KEY_SKIPPED_COUNT: number;
116
+ MAX_ROCKSDB_KEY_SKIPPED_COUNT: number;
117
+ AVG_ROCKSDB_BLOCK_CACHE_HIT_COUNT: number;
118
+ MAX_ROCKSDB_BLOCK_CACHE_HIT_COUNT: number;
119
+ AVG_ROCKSDB_BLOCK_READ_COUNT: number;
120
+ MAX_ROCKSDB_BLOCK_READ_COUNT: number;
121
+ AVG_ROCKSDB_BLOCK_READ_BYTE: number;
122
+ MAX_ROCKSDB_BLOCK_READ_BYTE: number;
123
+ AVG_PREWRITE_TIME: number;
124
+ MAX_PREWRITE_TIME: number;
125
+ AVG_COMMIT_TIME: number;
126
+ MAX_COMMIT_TIME: number;
127
+ AVG_GET_COMMIT_TS_TIME: number;
128
+ MAX_GET_COMMIT_TS_TIME: number;
129
+ AVG_COMMIT_BACKOFF_TIME: number;
130
+ MAX_COMMIT_BACKOFF_TIME: number;
131
+ AVG_RESOLVE_LOCK_TIME: number;
132
+ MAX_RESOLVE_LOCK_TIME: number;
133
+ AVG_LOCAL_LATCH_WAIT_TIME: number;
134
+ MAX_LOCAL_LATCH_WAIT_TIME: number;
135
+ AVG_WRITE_KEYS: number;
136
+ MAX_WRITE_KEYS: number;
137
+ AVG_WRITE_SIZE: number;
138
+ MAX_WRITE_SIZE: number;
139
+ AVG_PREWRITE_REGIONS: number;
140
+ MAX_PREWRITE_REGIONS: number;
141
+ AVG_TXN_RETRY: number;
142
+ MAX_TXN_RETRY: number;
143
+ SUM_EXEC_RETRY: number;
144
+ SUM_EXEC_RETRY_TIME: number;
145
+ SUM_BACKOFF_TIMES: number;
146
+ BACKOFF_TYPES: string | null;
147
+ AVG_MEM: number;
148
+ MAX_MEM: number;
149
+ AVG_DISK: number;
150
+ MAX_DISK: number;
151
+ AVG_KV_TIME: number;
152
+ AVG_PD_TIME: number;
153
+ AVG_BACKOFF_TOTAL_TIME: number;
154
+ AVG_WRITE_SQL_RESP_TIME: number;
155
+ AVG_TIDB_CPU_TIME: number;
156
+ AVG_TIKV_CPU_TIME: number;
157
+ MAX_RESULT_ROWS: number;
158
+ MIN_RESULT_ROWS: number;
159
+ AVG_RESULT_ROWS: number;
160
+ PREPARED: number;
161
+ AVG_AFFECTED_ROWS: number;
162
+ FIRST_SEEN: string;
163
+ LAST_SEEN: string;
164
+ PLAN_IN_CACHE: number;
165
+ PLAN_CACHE_HITS: number;
166
+ PLAN_IN_BINDING: number;
167
+ QUERY_SAMPLE_TEXT: string;
168
+ PREV_SAMPLE_TEXT: string;
169
+ PLAN_DIGEST: string;
170
+ PLAN: string;
171
+ BINARY_PLAN: string;
172
+ CHARSET: string;
173
+ COLLATION: string;
174
+ PLAN_HINT: string;
175
+ MAX_REQUEST_UNIT_READ: number;
176
+ AVG_REQUEST_UNIT_READ: number;
177
+ MAX_REQUEST_UNIT_WRITE: number;
178
+ AVG_REQUEST_UNIT_WRITE: number;
179
+ MAX_QUEUED_RC_TIME: number;
180
+ AVG_QUEUED_RC_TIME: number;
181
+ RESOURCE_GROUP: string;
182
+ PLAN_CACHE_UNQUALIFIED: number;
183
+ PLAN_CACHE_UNQUALIFIED_LAST_REASON: string;
184
+ }
185
+
186
+ /**
187
+ * Class implementing SQL analysis operations for ForgeSQL ORM.
188
+ * Provides methods for analyzing query performance, execution plans, and slow queries.
189
+ */
190
+ export class ForgeSQLAnalyseOperation implements SchemaAnalyzeForgeSql {
191
+ private readonly forgeOperations: ForgeSqlOperation;
192
+
193
+ /**
194
+ * Creates a new instance of ForgeSQLAnalizeOperation.
195
+ * @param {ForgeSqlOperation} forgeOperations - The ForgeSQL operations instance
196
+ */
197
+ constructor(forgeOperations: ForgeSqlOperation) {
198
+ this.forgeOperations = forgeOperations;
199
+ this.mapToCamelCaseClusterStatement = this.mapToCamelCaseClusterStatement.bind(this);
200
+ }
201
+
202
+ /**
203
+ * Executes EXPLAIN on a raw SQL query.
204
+ * @param {string} query - The SQL query to analyze
205
+ * @param {unknown[]} bindParams - The query parameters
206
+ * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
207
+ */
208
+ async explainRaw(query: string, bindParams: unknown[]): Promise<ExplainAnalyzeRow[]> {
209
+ const results = await this.forgeOperations
210
+ .fetch()
211
+ .executeRawSQL<DecodedPlanRow>(`EXPLAIN ${query}`, bindParams as SqlParameters);
212
+ return results.map((row) => ({
213
+ id: row.id,
214
+ estRows: row.estRows,
215
+ actRows: row.actRows,
216
+ task: row.task,
217
+ accessObject: row["access object"],
218
+ executionInfo: row["execution info"],
219
+ operatorInfo: row["operator info"],
220
+ memory: row.memory,
221
+ disk: row.disk,
222
+ }));
223
+ }
224
+
225
+ /**
226
+ * Executes EXPLAIN on a Drizzle query.
227
+ * @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
228
+ * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
229
+ */
230
+ async explain(query: { toSQL: () => Query }): Promise<ExplainAnalyzeRow[]> {
231
+ const { sql, params } = query.toSQL();
232
+ return this.explainRaw(sql, params);
233
+ }
234
+
235
+ /**
236
+ * Executes EXPLAIN ANALYZE on a raw SQL query.
237
+ * @param {string} query - The SQL query to analyze
238
+ * @param {unknown[]} bindParams - The query parameters
239
+ * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
240
+ */
241
+ async explainAnalyzeRaw(query: string, bindParams: unknown[]): Promise<ExplainAnalyzeRow[]> {
242
+ const results = await this.forgeOperations
243
+ .fetch()
244
+ .executeRawSQL<DecodedPlanRow>(`EXPLAIN ANALYZE ${query}`, bindParams as SqlParameters);
245
+ return results.map((row) => ({
246
+ id: row.id,
247
+ estRows: row.estRows,
248
+ actRows: row.actRows,
249
+ task: row.task,
250
+ accessObject: row["access object"],
251
+ executionInfo: row["execution info"],
252
+ operatorInfo: row["operator info"],
253
+ memory: row.memory,
254
+ disk: row.disk,
255
+ }));
256
+ }
257
+
258
+ /**
259
+ * Executes EXPLAIN ANALYZE on a Drizzle query.
260
+ * @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
261
+ * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
262
+ */
263
+ async explainAnalyze(query: { toSQL: () => Query }): Promise<ExplainAnalyzeRow[]> {
264
+ const { sql, params } = query.toSQL();
265
+ return this.explainAnalyzeRaw(sql, params);
266
+ }
267
+
268
+ /**
269
+ * Decodes a query execution plan from its string representation.
270
+ * @param {string} input - The raw execution plan string
271
+ * @returns {ExplainAnalyzeRow[]} The decoded execution plan rows
272
+ */
273
+ decodedPlan(input: string): ExplainAnalyzeRow[] {
274
+ if (!input) {
275
+ return [];
276
+ }
277
+ const lines = input.trim().split("\n");
278
+ if (lines.length < 2) return [];
279
+
280
+ const headersRaw = lines[0]
281
+ .split("\t")
282
+ .map((h) => h.trim())
283
+ .filter(Boolean);
284
+ const headers = headersRaw.map((h) => {
285
+ return h
286
+ .replace(/\s+/g, " ")
287
+ .replace(/[-\s]+(.)?/g, (_, c) => (c ? c.toUpperCase() : ""))
288
+ .replace(/^./, (s) => s.toLowerCase());
289
+ });
290
+
291
+ return lines.slice(1).map((line) => {
292
+ const values = line
293
+ .split("\t")
294
+ .map((s) => s.trim())
295
+ .filter(Boolean);
296
+ const row: Record<string, string> = {};
297
+ headers.forEach((key, i) => {
298
+ row[key] = values[i] ?? "";
299
+ });
300
+ return row as unknown as ExplainAnalyzeRow;
301
+ });
302
+ }
303
+
304
+ /**
305
+ * Normalizes a raw slow query row into a more structured format.
306
+ * @param {SlowQueryRaw} row - The raw slow query data
307
+ * @returns {SlowQueryNormalized} The normalized slow query data
308
+ */
309
+ normalizeSlowQuery(row: SlowQueryRaw): SlowQueryNormalized {
310
+ return {
311
+ time: row.Time,
312
+ txnStartTs: row.Txn_start_ts,
313
+ user: row.User,
314
+ host: row.Host,
315
+ connId: row.Conn_ID,
316
+ db: row.DB,
317
+ query: row.Query,
318
+ digest: row.Digest,
319
+ queryTime: row.Query_time,
320
+ compileTime: row.Compile_time,
321
+ optimizeTime: row.Optimize_time,
322
+ processTime: row.Process_time,
323
+ waitTime: row.Wait_time,
324
+ parseTime: row.Parse_time,
325
+ rewriteTime: row.Rewrite_time,
326
+ copTime: row.Cop_time,
327
+ copProcAvg: row.Cop_proc_avg,
328
+ copProcMax: row.Cop_proc_max,
329
+ copProcP90: row.Cop_proc_p90,
330
+ copProcAddr: row.Cop_proc_addr,
331
+ copWaitAvg: row.Cop_wait_avg,
332
+ copWaitMax: row.Cop_wait_max,
333
+ copWaitP90: row.Cop_wait_p90,
334
+ copWaitAddr: row.Cop_wait_addr,
335
+ memMax: row.Mem_max,
336
+ diskMax: row.Disk_max,
337
+ totalKeys: row.Total_keys,
338
+ processKeys: row.Process_keys,
339
+ requestCount: row.Request_count,
340
+ kvTotal: row.KV_total,
341
+ pdTotal: row.PD_total,
342
+ resultRows: row.Result_rows,
343
+ rocksdbBlockCacheHitCount: row.Rocksdb_block_cache_hit_count,
344
+ rocksdbBlockReadCount: row.Rocksdb_block_read_count,
345
+ rocksdbBlockReadByte: row.Rocksdb_block_read_byte,
346
+ plan: row.Plan,
347
+ binaryPlan: row.Binary_plan,
348
+ planDigest: row.Plan_digest,
349
+ parsedPlan: this.decodedPlan(row.Plan),
350
+ };
351
+ }
352
+
353
+ /**
354
+ * Builds a SQL query for retrieving cluster statement history.
355
+ * @param {string[]} tables - The tables to analyze
356
+ * @param {Date} [from] - The start date for the analysis
357
+ * @param {Date} [to] - The end date for the analysis
358
+ * @returns {string} The SQL query for cluster statement history
359
+ */
360
+ buildClusterStatementQuery(tables: string[], from?: Date, to?: Date): string {
361
+ const formatDateTime = (date: Date): string => moment(date).format("YYYY-MM-DDTHH:mm:ss.SSS");
362
+
363
+ const tableConditions = tables
364
+ .map((table) => `TABLE_NAMES LIKE CONCAT(SCHEMA_NAME, '.', '%', '${table}', '%')`)
365
+ .join(" OR ");
366
+
367
+ const timeConditions: string[] = [];
368
+ if (from) {
369
+ timeConditions.push(`SUMMARY_BEGIN_TIME >= '${formatDateTime(from)}'`);
370
+ }
371
+ if (to) {
372
+ timeConditions.push(`SUMMARY_END_TIME <= '${formatDateTime(to)}'`);
373
+ }
374
+
375
+ let whereClauses;
376
+ if (tableConditions?.length) {
377
+ whereClauses = [tableConditions ? `(${tableConditions})` : "", ...timeConditions];
378
+ } else {
379
+ whereClauses = timeConditions;
380
+ }
381
+
382
+ return `
383
+ SELECT *
384
+ FROM (
385
+ SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY
386
+ UNION ALL
387
+ SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
388
+ ) AS combined
389
+ ${whereClauses?.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : ""}
390
+ `;
391
+ }
392
+
393
+ /**
394
+ * Retrieves and analyzes slow queries from the database.
395
+ * @returns {Promise<SlowQueryNormalized[]>} The normalized slow query data
396
+ */
397
+ // CLUSTER_SLOW_QUERY STATISTICS
398
+ async analyzeSlowQueries(): Promise<SlowQueryNormalized[]> {
399
+ const results = await this.forgeOperations.fetch().executeRawSQL<SlowQueryRaw>(`
400
+ SELECT *
401
+ FROM information_schema.slow_query
402
+ ORDER BY time DESC
403
+ `);
404
+ return results.map((row) => this.normalizeSlowQuery(row));
405
+ }
406
+
407
+ /**
408
+ * Converts a cluster statement row to camelCase format.
409
+ * @param {Record<string, any>} input - The input row data
410
+ * @returns {ClusterStatementRowCamelCase} The converted row data
411
+ */
412
+ mapToCamelCaseClusterStatement(input: Record<string, any>): ClusterStatementRowCamelCase {
413
+ if (!input) {
414
+ return {} as ClusterStatementRowCamelCase;
415
+ }
416
+
417
+ const result: any = {};
418
+ result.parsedPlan = this.decodedPlan(input["PLAN"] ?? "");
419
+ for (const key in input) {
420
+ const camelKey = key.toLowerCase().replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
421
+ result[camelKey] = input[key];
422
+ }
423
+
424
+ return result as ClusterStatementRowCamelCase;
425
+ }
426
+
427
+ /**
428
+ * Analyzes query history for specific tables using raw table names.
429
+ * @param {string[]} tables - The table names to analyze
430
+ * @param {Date} [fromDate] - The start date for the analysis
431
+ * @param {Date} [toDate] - The end date for the analysis
432
+ * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
433
+ */
434
+ async analyzeQueriesHistoryRaw(
435
+ tables: string[],
436
+ fromDate?: Date,
437
+ toDate?: Date,
438
+ ): Promise<ClusterStatementRowCamelCase[]> {
439
+ const results = await this.forgeOperations
440
+ .fetch()
441
+ .executeRawSQL<ClusterStatementRow>(
442
+ this.buildClusterStatementQuery(tables ?? [], fromDate, toDate),
443
+ );
444
+ return results.map((r) => this.mapToCamelCaseClusterStatement(r));
445
+ }
446
+
447
+ /**
448
+ * Analyzes query history for specific tables using Drizzle table objects.
449
+ * @param {AnyMySqlTable[]} tables - The Drizzle table objects to analyze
450
+ * @param {Date} [fromDate] - The start date for the analysis
451
+ * @param {Date} [toDate] - The end date for the analysis
452
+ * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
453
+ */
454
+ async analyzeQueriesHistory(
455
+ tables: AnyMySqlTable[],
456
+ fromDate?: Date,
457
+ toDate?: Date,
458
+ ): Promise<ClusterStatementRowCamelCase[]> {
459
+ const tableNames = tables?.map((table) => getTableName(table)) ?? [];
460
+ return this.analyzeQueriesHistoryRaw(tableNames, fromDate, toDate);
461
+ }
462
+ }
@@ -3,6 +3,7 @@ import {
3
3
  CRUDForgeSQL,
4
4
  ForgeSqlOperation,
5
5
  ForgeSqlOrmOptions,
6
+ SchemaAnalyzeForgeSql,
6
7
  SchemaSqlForgeSql,
7
8
  } from "./ForgeSQLQueryBuilder";
8
9
  import { ForgeSQLSelectOperations } from "./ForgeSQLSelectOperations";
@@ -11,6 +12,7 @@ import { createForgeDriverProxy } from "../utils/forgeDriverProxy";
11
12
  import type { SelectedFields } from "drizzle-orm/mysql-core/query-builders/select.types";
12
13
  import { MySqlSelectBuilder } from "drizzle-orm/mysql-core";
13
14
  import { patchDbWithSelectAliased } from "../lib/drizzle/extensions/selectAliased";
15
+ import { ForgeSQLAnalyseOperation } from "./ForgeSQLAnalyseOperations";
14
16
 
15
17
  /**
16
18
  * Implementation of ForgeSQLORM that uses Drizzle ORM for query building.
@@ -19,9 +21,17 @@ import { patchDbWithSelectAliased } from "../lib/drizzle/extensions/selectAliase
19
21
  */
20
22
  class ForgeSQLORMImpl implements ForgeSqlOperation {
21
23
  private static instance: ForgeSQLORMImpl | null = null;
22
- private readonly drizzle;
24
+ private readonly drizzle: MySqlRemoteDatabase<any> & {
25
+ selectAliased: <TSelection extends SelectedFields>(
26
+ fields: TSelection,
27
+ ) => MySqlSelectBuilder<TSelection, MySqlRemotePreparedQueryHKT>;
28
+ selectAliasedDistinct: <TSelection extends SelectedFields>(
29
+ fields: TSelection,
30
+ ) => MySqlSelectBuilder<TSelection, MySqlRemotePreparedQueryHKT>;
31
+ };
23
32
  private readonly crudOperations: CRUDForgeSQL;
24
33
  private readonly fetchOperations: SchemaSqlForgeSql;
34
+ private readonly analyzeOperations: SchemaAnalyzeForgeSql;
25
35
 
26
36
  /**
27
37
  * Private constructor to enforce singleton behavior.
@@ -43,21 +53,28 @@ class ForgeSQLORMImpl implements ForgeSqlOperation {
43
53
  );
44
54
  this.crudOperations = new ForgeSQLCrudOperations(this, newOptions);
45
55
  this.fetchOperations = new ForgeSQLSelectOperations(newOptions);
56
+ this.analyzeOperations = new ForgeSQLAnalyseOperation(this);
46
57
  } catch (error) {
47
58
  console.error("ForgeSQLORM initialization failed:", error);
48
59
  throw error;
49
60
  }
50
61
  }
51
62
 
63
+ /**
64
+ * Create the modify operations instance.
65
+ * @returns modify operations.
66
+ */
67
+ modify(): CRUDForgeSQL {
68
+ return this.crudOperations;
69
+ }
70
+
52
71
  /**
53
72
  * Returns the singleton instance of ForgeSQLORMImpl.
54
73
  * @param options - Options for configuring ForgeSQL ORM behavior.
55
74
  * @returns The singleton instance of ForgeSQLORMImpl.
56
75
  */
57
76
  static getInstance(options?: ForgeSqlOrmOptions): ForgeSqlOperation {
58
- if (!ForgeSQLORMImpl.instance) {
59
- ForgeSQLORMImpl.instance = new ForgeSQLORMImpl(options);
60
- }
77
+ ForgeSQLORMImpl.instance ??= new ForgeSQLORMImpl(options);
61
78
  return ForgeSQLORMImpl.instance;
62
79
  }
63
80
 
@@ -66,7 +83,7 @@ class ForgeSQLORMImpl implements ForgeSqlOperation {
66
83
  * @returns CRUD operations.
67
84
  */
68
85
  crud(): CRUDForgeSQL {
69
- return this.crudOperations;
86
+ return this.modify();
70
87
  }
71
88
 
72
89
  /**
@@ -76,6 +93,9 @@ class ForgeSQLORMImpl implements ForgeSqlOperation {
76
93
  fetch(): SchemaSqlForgeSql {
77
94
  return this.fetchOperations;
78
95
  }
96
+ analyze(): SchemaAnalyzeForgeSql {
97
+ return this.analyzeOperations;
98
+ }
79
99
 
80
100
  /**
81
101
  * Returns a Drizzle query builder instance.
@@ -180,7 +200,7 @@ class ForgeSQLORM implements ForgeSqlOperation {
180
200
  *
181
201
  * @template TSelection - The type of the selected fields
182
202
  * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
183
- * @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A distinct select query builder with unique field aliases
203
+ * @returns {MySqlSelectBuilder<TSelection, MySqlRemotePreparedQueryHKT>} A distinct select query builder with unique field aliases
184
204
  * @throws {Error} If fields parameter is empty
185
205
  * @example
186
206
  * ```typescript
@@ -201,7 +221,15 @@ class ForgeSQLORM implements ForgeSqlOperation {
201
221
  * @returns CRUD operations.
202
222
  */
203
223
  crud(): CRUDForgeSQL {
204
- return this.ormInstance.crud();
224
+ return this.ormInstance.modify();
225
+ }
226
+
227
+ /**
228
+ * Proxies the `modify` method from `ForgeSQLORMImpl`.
229
+ * @returns Modify operations.
230
+ */
231
+ modify(): CRUDForgeSQL {
232
+ return this.ormInstance.modify();
205
233
  }
206
234
 
207
235
  /**
@@ -212,6 +240,14 @@ class ForgeSQLORM implements ForgeSqlOperation {
212
240
  return this.ormInstance.fetch();
213
241
  }
214
242
 
243
+ /**
244
+ * Provides query analysis capabilities including EXPLAIN ANALYZE and slow query analysis.
245
+ * @returns {SchemaAnalyzeForgeSql} Interface for analyzing query performance
246
+ */
247
+ analyze(): SchemaAnalyzeForgeSql {
248
+ return this.ormInstance.analyze();
249
+ }
250
+
215
251
  /**
216
252
  * Returns a Drizzle query builder instance.
217
253
  *