forge-sql-orm 2.0.17 → 2.0.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +95 -4
- package/dist/ForgeSQLORM.js +382 -60
- package/dist/ForgeSQLORM.js.map +1 -1
- package/dist/ForgeSQLORM.mjs +382 -60
- package/dist/ForgeSQLORM.mjs.map +1 -1
- package/dist/core/ForgeSQLAnalyseOperations.d.ts +250 -0
- package/dist/core/ForgeSQLAnalyseOperations.d.ts.map +1 -0
- package/dist/core/ForgeSQLCrudOperations.d.ts +1 -1
- package/dist/core/ForgeSQLCrudOperations.d.ts.map +1 -1
- package/dist/core/ForgeSQLORM.d.ts +12 -2
- package/dist/core/ForgeSQLORM.d.ts.map +1 -1
- package/dist/core/ForgeSQLQueryBuilder.d.ts +112 -21
- package/dist/core/ForgeSQLQueryBuilder.d.ts.map +1 -1
- package/dist/core/ForgeSQLSelectOperations.d.ts.map +1 -1
- package/dist/core/SystemTables.d.ts +167 -0
- package/dist/core/SystemTables.d.ts.map +1 -1
- package/dist/index.d.ts +1 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/utils/forgeDriverProxy.d.ts +11 -0
- package/dist/utils/forgeDriverProxy.d.ts.map +1 -0
- package/dist/utils/sqlHints.d.ts +21 -0
- package/dist/utils/sqlHints.d.ts.map +1 -0
- package/dist/utils/sqlUtils.d.ts +2 -8
- package/dist/utils/sqlUtils.d.ts.map +1 -1
- package/dist/webtriggers/applyMigrationsWebTrigger.d.ts.map +1 -1
- package/dist/webtriggers/dropMigrationWebTrigger.d.ts +2 -4
- package/dist/webtriggers/dropMigrationWebTrigger.d.ts.map +1 -1
- package/package.json +4 -12
- package/src/core/ForgeSQLAnalyseOperations.ts +461 -0
- package/src/core/ForgeSQLCrudOperations.ts +15 -8
- package/src/core/ForgeSQLORM.ts +46 -9
- package/src/core/ForgeSQLQueryBuilder.ts +129 -32
- package/src/core/ForgeSQLSelectOperations.ts +4 -6
- package/src/core/SystemTables.ts +175 -0
- package/src/index.ts +1 -0
- package/src/utils/forgeDriverProxy.ts +27 -0
- package/src/utils/sqlHints.ts +63 -0
- package/src/utils/sqlUtils.ts +36 -32
- package/src/webtriggers/applyMigrationsWebTrigger.ts +32 -16
- package/src/webtriggers/dropMigrationWebTrigger.ts +5 -6
- package/src/webtriggers/fetchSchemaWebTrigger.ts +2 -10
|
@@ -0,0 +1,461 @@
|
|
|
1
|
+
import { ForgeSqlOperation, SchemaAnalyzeForgeSql } from "./ForgeSQLQueryBuilder";
|
|
2
|
+
import { Query } from "drizzle-orm";
|
|
3
|
+
import {
|
|
4
|
+
ClusterStatementRowCamelCase,
|
|
5
|
+
ExplainAnalyzeRow,
|
|
6
|
+
SlowQueryNormalized,
|
|
7
|
+
} from "./SystemTables";
|
|
8
|
+
import { SqlParameters } from "@forge/sql/out/sql-statement";
|
|
9
|
+
import { AnyMySqlTable } from "drizzle-orm/mysql-core/index";
|
|
10
|
+
import { getTableName } from "drizzle-orm/table";
|
|
11
|
+
import moment from "moment";
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Interface representing a row from the EXPLAIN ANALYZE output
|
|
15
|
+
*/
|
|
16
|
+
interface DecodedPlanRow {
|
|
17
|
+
id: string;
|
|
18
|
+
estRows?: string;
|
|
19
|
+
estCost?: string;
|
|
20
|
+
actRows?: string;
|
|
21
|
+
task?: string;
|
|
22
|
+
"access object"?: string;
|
|
23
|
+
"execution info"?: string;
|
|
24
|
+
"operator info"?: string;
|
|
25
|
+
memory?: string;
|
|
26
|
+
disk?: string;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Interface representing a raw slow query row from the database
|
|
31
|
+
*/
|
|
32
|
+
interface SlowQueryRaw {
|
|
33
|
+
Time: string;
|
|
34
|
+
Txn_start_ts: number;
|
|
35
|
+
User: string;
|
|
36
|
+
Host: string;
|
|
37
|
+
Conn_ID: number;
|
|
38
|
+
DB: string;
|
|
39
|
+
Query: string;
|
|
40
|
+
Digest: string;
|
|
41
|
+
Query_time: number;
|
|
42
|
+
Compile_time: number;
|
|
43
|
+
Optimize_time: number;
|
|
44
|
+
Process_time: number;
|
|
45
|
+
Wait_time: number;
|
|
46
|
+
Parse_time: number;
|
|
47
|
+
Rewrite_time: number;
|
|
48
|
+
Cop_time: number;
|
|
49
|
+
Cop_proc_avg: number;
|
|
50
|
+
Cop_proc_max: number;
|
|
51
|
+
Cop_proc_p90: number;
|
|
52
|
+
Cop_proc_addr: string;
|
|
53
|
+
Cop_wait_avg: number;
|
|
54
|
+
Cop_wait_max: number;
|
|
55
|
+
Cop_wait_p90: number;
|
|
56
|
+
Cop_wait_addr: string;
|
|
57
|
+
Mem_max: number;
|
|
58
|
+
Disk_max: number;
|
|
59
|
+
Total_keys: number;
|
|
60
|
+
Process_keys: number;
|
|
61
|
+
Request_count: number;
|
|
62
|
+
KV_total: number;
|
|
63
|
+
PD_total: number;
|
|
64
|
+
Result_rows: number;
|
|
65
|
+
Rocksdb_block_cache_hit_count: number;
|
|
66
|
+
Rocksdb_block_read_count: number;
|
|
67
|
+
Rocksdb_block_read_byte: number;
|
|
68
|
+
Plan: string;
|
|
69
|
+
Binary_plan: string;
|
|
70
|
+
Plan_digest: string;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Interface representing a row from the cluster statements table
|
|
75
|
+
*/
|
|
76
|
+
export interface ClusterStatementRow {
|
|
77
|
+
INSTANCE: string;
|
|
78
|
+
SUMMARY_BEGIN_TIME: string;
|
|
79
|
+
SUMMARY_END_TIME: string;
|
|
80
|
+
STMT_TYPE: string;
|
|
81
|
+
SCHEMA_NAME: string;
|
|
82
|
+
DIGEST: string;
|
|
83
|
+
DIGEST_TEXT: string;
|
|
84
|
+
TABLE_NAMES: string;
|
|
85
|
+
INDEX_NAMES: string | null;
|
|
86
|
+
SAMPLE_USER: string;
|
|
87
|
+
EXEC_COUNT: number;
|
|
88
|
+
SUM_ERRORS: number;
|
|
89
|
+
SUM_WARNINGS: number;
|
|
90
|
+
SUM_LATENCY: number;
|
|
91
|
+
MAX_LATENCY: number;
|
|
92
|
+
MIN_LATENCY: number;
|
|
93
|
+
AVG_LATENCY: number;
|
|
94
|
+
AVG_PARSE_LATENCY: number;
|
|
95
|
+
MAX_PARSE_LATENCY: number;
|
|
96
|
+
AVG_COMPILE_LATENCY: number;
|
|
97
|
+
MAX_COMPILE_LATENCY: number;
|
|
98
|
+
SUM_COP_TASK_NUM: number;
|
|
99
|
+
MAX_COP_PROCESS_TIME: number;
|
|
100
|
+
MAX_COP_PROCESS_ADDRESS: string;
|
|
101
|
+
MAX_COP_WAIT_TIME: number;
|
|
102
|
+
MAX_COP_WAIT_ADDRESS: string;
|
|
103
|
+
AVG_PROCESS_TIME: number;
|
|
104
|
+
MAX_PROCESS_TIME: number;
|
|
105
|
+
AVG_WAIT_TIME: number;
|
|
106
|
+
MAX_WAIT_TIME: number;
|
|
107
|
+
AVG_BACKOFF_TIME: number;
|
|
108
|
+
MAX_BACKOFF_TIME: number;
|
|
109
|
+
AVG_TOTAL_KEYS: number;
|
|
110
|
+
MAX_TOTAL_KEYS: number;
|
|
111
|
+
AVG_PROCESSED_KEYS: number;
|
|
112
|
+
MAX_PROCESSED_KEYS: number;
|
|
113
|
+
AVG_ROCKSDB_DELETE_SKIPPED_COUNT: number;
|
|
114
|
+
MAX_ROCKSDB_DELETE_SKIPPED_COUNT: number;
|
|
115
|
+
AVG_ROCKSDB_KEY_SKIPPED_COUNT: number;
|
|
116
|
+
MAX_ROCKSDB_KEY_SKIPPED_COUNT: number;
|
|
117
|
+
AVG_ROCKSDB_BLOCK_CACHE_HIT_COUNT: number;
|
|
118
|
+
MAX_ROCKSDB_BLOCK_CACHE_HIT_COUNT: number;
|
|
119
|
+
AVG_ROCKSDB_BLOCK_READ_COUNT: number;
|
|
120
|
+
MAX_ROCKSDB_BLOCK_READ_COUNT: number;
|
|
121
|
+
AVG_ROCKSDB_BLOCK_READ_BYTE: number;
|
|
122
|
+
MAX_ROCKSDB_BLOCK_READ_BYTE: number;
|
|
123
|
+
AVG_PREWRITE_TIME: number;
|
|
124
|
+
MAX_PREWRITE_TIME: number;
|
|
125
|
+
AVG_COMMIT_TIME: number;
|
|
126
|
+
MAX_COMMIT_TIME: number;
|
|
127
|
+
AVG_GET_COMMIT_TS_TIME: number;
|
|
128
|
+
MAX_GET_COMMIT_TS_TIME: number;
|
|
129
|
+
AVG_COMMIT_BACKOFF_TIME: number;
|
|
130
|
+
MAX_COMMIT_BACKOFF_TIME: number;
|
|
131
|
+
AVG_RESOLVE_LOCK_TIME: number;
|
|
132
|
+
MAX_RESOLVE_LOCK_TIME: number;
|
|
133
|
+
AVG_LOCAL_LATCH_WAIT_TIME: number;
|
|
134
|
+
MAX_LOCAL_LATCH_WAIT_TIME: number;
|
|
135
|
+
AVG_WRITE_KEYS: number;
|
|
136
|
+
MAX_WRITE_KEYS: number;
|
|
137
|
+
AVG_WRITE_SIZE: number;
|
|
138
|
+
MAX_WRITE_SIZE: number;
|
|
139
|
+
AVG_PREWRITE_REGIONS: number;
|
|
140
|
+
MAX_PREWRITE_REGIONS: number;
|
|
141
|
+
AVG_TXN_RETRY: number;
|
|
142
|
+
MAX_TXN_RETRY: number;
|
|
143
|
+
SUM_EXEC_RETRY: number;
|
|
144
|
+
SUM_EXEC_RETRY_TIME: number;
|
|
145
|
+
SUM_BACKOFF_TIMES: number;
|
|
146
|
+
BACKOFF_TYPES: string | null;
|
|
147
|
+
AVG_MEM: number;
|
|
148
|
+
MAX_MEM: number;
|
|
149
|
+
AVG_DISK: number;
|
|
150
|
+
MAX_DISK: number;
|
|
151
|
+
AVG_KV_TIME: number;
|
|
152
|
+
AVG_PD_TIME: number;
|
|
153
|
+
AVG_BACKOFF_TOTAL_TIME: number;
|
|
154
|
+
AVG_WRITE_SQL_RESP_TIME: number;
|
|
155
|
+
AVG_TIDB_CPU_TIME: number;
|
|
156
|
+
AVG_TIKV_CPU_TIME: number;
|
|
157
|
+
MAX_RESULT_ROWS: number;
|
|
158
|
+
MIN_RESULT_ROWS: number;
|
|
159
|
+
AVG_RESULT_ROWS: number;
|
|
160
|
+
PREPARED: number;
|
|
161
|
+
AVG_AFFECTED_ROWS: number;
|
|
162
|
+
FIRST_SEEN: string;
|
|
163
|
+
LAST_SEEN: string;
|
|
164
|
+
PLAN_IN_CACHE: number;
|
|
165
|
+
PLAN_CACHE_HITS: number;
|
|
166
|
+
PLAN_IN_BINDING: number;
|
|
167
|
+
QUERY_SAMPLE_TEXT: string;
|
|
168
|
+
PREV_SAMPLE_TEXT: string;
|
|
169
|
+
PLAN_DIGEST: string;
|
|
170
|
+
PLAN: string;
|
|
171
|
+
BINARY_PLAN: string;
|
|
172
|
+
CHARSET: string;
|
|
173
|
+
COLLATION: string;
|
|
174
|
+
PLAN_HINT: string;
|
|
175
|
+
MAX_REQUEST_UNIT_READ: number;
|
|
176
|
+
AVG_REQUEST_UNIT_READ: number;
|
|
177
|
+
MAX_REQUEST_UNIT_WRITE: number;
|
|
178
|
+
AVG_REQUEST_UNIT_WRITE: number;
|
|
179
|
+
MAX_QUEUED_RC_TIME: number;
|
|
180
|
+
AVG_QUEUED_RC_TIME: number;
|
|
181
|
+
RESOURCE_GROUP: string;
|
|
182
|
+
PLAN_CACHE_UNQUALIFIED: number;
|
|
183
|
+
PLAN_CACHE_UNQUALIFIED_LAST_REASON: string;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/**
|
|
187
|
+
* Class implementing SQL analysis operations for ForgeSQL ORM.
|
|
188
|
+
* Provides methods for analyzing query performance, execution plans, and slow queries.
|
|
189
|
+
*/
|
|
190
|
+
export class ForgeSQLAnalyseOperation implements SchemaAnalyzeForgeSql {
|
|
191
|
+
private readonly forgeOperations: ForgeSqlOperation;
|
|
192
|
+
|
|
193
|
+
/**
|
|
194
|
+
* Creates a new instance of ForgeSQLAnalizeOperation.
|
|
195
|
+
* @param {ForgeSqlOperation} forgeOperations - The ForgeSQL operations instance
|
|
196
|
+
*/
|
|
197
|
+
constructor(forgeOperations: ForgeSqlOperation) {
|
|
198
|
+
this.forgeOperations = forgeOperations;
|
|
199
|
+
this.mapToCamelCaseClusterStatement = this.mapToCamelCaseClusterStatement.bind(this);
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
* Executes EXPLAIN on a raw SQL query.
|
|
204
|
+
* @param {string} query - The SQL query to analyze
|
|
205
|
+
* @param {unknown[]} bindParams - The query parameters
|
|
206
|
+
* @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
|
|
207
|
+
*/
|
|
208
|
+
async explainRaw(query: string, bindParams: unknown[]): Promise<ExplainAnalyzeRow[]> {
|
|
209
|
+
const results = await this.forgeOperations
|
|
210
|
+
.fetch()
|
|
211
|
+
.executeRawSQL<DecodedPlanRow>(`EXPLAIN ${query}`, bindParams as SqlParameters);
|
|
212
|
+
return results.map((row) => ({
|
|
213
|
+
id: row.id,
|
|
214
|
+
estRows: row.estRows,
|
|
215
|
+
actRows: row.actRows,
|
|
216
|
+
task: row.task,
|
|
217
|
+
accessObject: row["access object"],
|
|
218
|
+
executionInfo: row["execution info"],
|
|
219
|
+
operatorInfo: row["operator info"],
|
|
220
|
+
memory: row.memory,
|
|
221
|
+
disk: row.disk,
|
|
222
|
+
}));
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
/**
|
|
226
|
+
* Executes EXPLAIN on a Drizzle query.
|
|
227
|
+
* @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
|
|
228
|
+
* @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
|
|
229
|
+
*/
|
|
230
|
+
async explain(query: { toSQL: () => Query }): Promise<ExplainAnalyzeRow[]> {
|
|
231
|
+
const { sql, params } = query.toSQL();
|
|
232
|
+
return this.explainRaw(sql, params);
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
/**
|
|
236
|
+
* Executes EXPLAIN ANALYZE on a raw SQL query.
|
|
237
|
+
* @param {string} query - The SQL query to analyze
|
|
238
|
+
* @param {unknown[]} bindParams - The query parameters
|
|
239
|
+
* @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
|
|
240
|
+
*/
|
|
241
|
+
async explainAnalyzeRaw(query: string, bindParams: unknown[]): Promise<ExplainAnalyzeRow[]> {
|
|
242
|
+
const results = await this.forgeOperations
|
|
243
|
+
.fetch()
|
|
244
|
+
.executeRawSQL<DecodedPlanRow>(`EXPLAIN ANALYZE ${query}`, bindParams as SqlParameters);
|
|
245
|
+
return results.map((row) => ({
|
|
246
|
+
id: row.id,
|
|
247
|
+
estRows: row.estRows,
|
|
248
|
+
actRows: row.actRows,
|
|
249
|
+
task: row.task,
|
|
250
|
+
accessObject: row["access object"],
|
|
251
|
+
executionInfo: row["execution info"],
|
|
252
|
+
operatorInfo: row["operator info"],
|
|
253
|
+
memory: row.memory,
|
|
254
|
+
disk: row.disk,
|
|
255
|
+
}));
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
/**
|
|
259
|
+
* Executes EXPLAIN ANALYZE on a Drizzle query.
|
|
260
|
+
* @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
|
|
261
|
+
* @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
|
|
262
|
+
*/
|
|
263
|
+
async explainAnalyze(query: { toSQL: () => Query }): Promise<ExplainAnalyzeRow[]> {
|
|
264
|
+
const { sql, params } = query.toSQL();
|
|
265
|
+
return this.explainAnalyzeRaw(sql, params);
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
/**
|
|
269
|
+
* Decodes a query execution plan from its string representation.
|
|
270
|
+
* @param {string} input - The raw execution plan string
|
|
271
|
+
* @returns {ExplainAnalyzeRow[]} The decoded execution plan rows
|
|
272
|
+
*/
|
|
273
|
+
decodedPlan(input: string): ExplainAnalyzeRow[] {
|
|
274
|
+
if (!input) {
|
|
275
|
+
return [];
|
|
276
|
+
}
|
|
277
|
+
const lines = input.trim().split("\n");
|
|
278
|
+
if (lines.length < 2) return [];
|
|
279
|
+
|
|
280
|
+
const headersRaw = lines[0]
|
|
281
|
+
.split("\t")
|
|
282
|
+
.map((h) => h.trim())
|
|
283
|
+
.filter(Boolean);
|
|
284
|
+
const headers = headersRaw.map((h) => {
|
|
285
|
+
return h
|
|
286
|
+
.replace(/\s+/g, " ")
|
|
287
|
+
.replace(/[-\s]+(.)?/g, (_, c) => (c ? c.toUpperCase() : ""))
|
|
288
|
+
.replace(/^./, (s) => s.toLowerCase());
|
|
289
|
+
});
|
|
290
|
+
|
|
291
|
+
return lines.slice(1).map((line) => {
|
|
292
|
+
const values = line
|
|
293
|
+
.split("\t")
|
|
294
|
+
.map((s) => s.trim())
|
|
295
|
+
.filter(Boolean);
|
|
296
|
+
const row: Record<string, string> = {};
|
|
297
|
+
headers.forEach((key, i) => {
|
|
298
|
+
row[key] = values[i] ?? "";
|
|
299
|
+
});
|
|
300
|
+
return row as unknown as ExplainAnalyzeRow;
|
|
301
|
+
});
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
/**
|
|
305
|
+
* Normalizes a raw slow query row into a more structured format.
|
|
306
|
+
* @param {SlowQueryRaw} row - The raw slow query data
|
|
307
|
+
* @returns {SlowQueryNormalized} The normalized slow query data
|
|
308
|
+
*/
|
|
309
|
+
normalizeSlowQuery(row: SlowQueryRaw): SlowQueryNormalized {
|
|
310
|
+
return {
|
|
311
|
+
time: row.Time,
|
|
312
|
+
txnStartTs: row.Txn_start_ts,
|
|
313
|
+
user: row.User,
|
|
314
|
+
host: row.Host,
|
|
315
|
+
connId: row.Conn_ID,
|
|
316
|
+
db: row.DB,
|
|
317
|
+
query: row.Query,
|
|
318
|
+
digest: row.Digest,
|
|
319
|
+
queryTime: row.Query_time,
|
|
320
|
+
compileTime: row.Compile_time,
|
|
321
|
+
optimizeTime: row.Optimize_time,
|
|
322
|
+
processTime: row.Process_time,
|
|
323
|
+
waitTime: row.Wait_time,
|
|
324
|
+
parseTime: row.Parse_time,
|
|
325
|
+
rewriteTime: row.Rewrite_time,
|
|
326
|
+
copTime: row.Cop_time,
|
|
327
|
+
copProcAvg: row.Cop_proc_avg,
|
|
328
|
+
copProcMax: row.Cop_proc_max,
|
|
329
|
+
copProcP90: row.Cop_proc_p90,
|
|
330
|
+
copProcAddr: row.Cop_proc_addr,
|
|
331
|
+
copWaitAvg: row.Cop_wait_avg,
|
|
332
|
+
copWaitMax: row.Cop_wait_max,
|
|
333
|
+
copWaitP90: row.Cop_wait_p90,
|
|
334
|
+
copWaitAddr: row.Cop_wait_addr,
|
|
335
|
+
memMax: row.Mem_max,
|
|
336
|
+
diskMax: row.Disk_max,
|
|
337
|
+
totalKeys: row.Total_keys,
|
|
338
|
+
processKeys: row.Process_keys,
|
|
339
|
+
requestCount: row.Request_count,
|
|
340
|
+
kvTotal: row.KV_total,
|
|
341
|
+
pdTotal: row.PD_total,
|
|
342
|
+
resultRows: row.Result_rows,
|
|
343
|
+
rocksdbBlockCacheHitCount: row.Rocksdb_block_cache_hit_count,
|
|
344
|
+
rocksdbBlockReadCount: row.Rocksdb_block_read_count,
|
|
345
|
+
rocksdbBlockReadByte: row.Rocksdb_block_read_byte,
|
|
346
|
+
plan: row.Plan,
|
|
347
|
+
binaryPlan: row.Binary_plan,
|
|
348
|
+
planDigest: row.Plan_digest,
|
|
349
|
+
parsedPlan: this.decodedPlan(row.Plan),
|
|
350
|
+
};
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
/**
|
|
354
|
+
* Builds a SQL query for retrieving cluster statement history.
|
|
355
|
+
* @param {string[]} tables - The tables to analyze
|
|
356
|
+
* @param {Date} [from] - The start date for the analysis
|
|
357
|
+
* @param {Date} [to] - The end date for the analysis
|
|
358
|
+
* @returns {string} The SQL query for cluster statement history
|
|
359
|
+
*/
|
|
360
|
+
buildClusterStatementQuery(tables: string[], from?: Date, to?: Date): string {
|
|
361
|
+
const formatDateTime = (date: Date): string => moment(date).format("YYYY-MM-DDTHH:mm:ss.SSS");
|
|
362
|
+
|
|
363
|
+
const tableConditions = tables
|
|
364
|
+
.map((table) => `TABLE_NAMES LIKE CONCAT(SCHEMA_NAME, '.', '%', '${table}', '%')`)
|
|
365
|
+
.join(" OR ");
|
|
366
|
+
|
|
367
|
+
const timeConditions: string[] = [];
|
|
368
|
+
if (from) {
|
|
369
|
+
timeConditions.push(`SUMMARY_BEGIN_TIME >= '${formatDateTime(from)}'`);
|
|
370
|
+
}
|
|
371
|
+
if (to) {
|
|
372
|
+
timeConditions.push(`SUMMARY_END_TIME <= '${formatDateTime(to)}'`);
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
let whereClauses;
|
|
376
|
+
if (tableConditions?.length) {
|
|
377
|
+
whereClauses = [tableConditions ? `(${tableConditions})` : "", ...timeConditions];
|
|
378
|
+
} else {
|
|
379
|
+
whereClauses = timeConditions;
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
return `
|
|
383
|
+
SELECT *
|
|
384
|
+
FROM (
|
|
385
|
+
SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY
|
|
386
|
+
UNION ALL
|
|
387
|
+
SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
|
|
388
|
+
) AS combined
|
|
389
|
+
${whereClauses?.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : ""}
|
|
390
|
+
`;
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
/**
|
|
394
|
+
* Retrieves and analyzes slow queries from the database.
|
|
395
|
+
* @returns {Promise<SlowQueryNormalized[]>} The normalized slow query data
|
|
396
|
+
*/
|
|
397
|
+
async analyzeSlowQueries(): Promise<SlowQueryNormalized[]> {
|
|
398
|
+
const results = await this.forgeOperations.fetch().executeRawSQL<SlowQueryRaw>(`
|
|
399
|
+
SELECT *
|
|
400
|
+
FROM information_schema.slow_query
|
|
401
|
+
ORDER BY time DESC
|
|
402
|
+
`);
|
|
403
|
+
return results.map((row) => this.normalizeSlowQuery(row));
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
/**
|
|
407
|
+
* Converts a cluster statement row to camelCase format.
|
|
408
|
+
* @param {Record<string, any>} input - The input row data
|
|
409
|
+
* @returns {ClusterStatementRowCamelCase} The converted row data
|
|
410
|
+
*/
|
|
411
|
+
mapToCamelCaseClusterStatement(input: Record<string, any>): ClusterStatementRowCamelCase {
|
|
412
|
+
if (!input) {
|
|
413
|
+
return {} as ClusterStatementRowCamelCase;
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
const result: any = {};
|
|
417
|
+
result.parsedPlan = this.decodedPlan(input["PLAN"] ?? "");
|
|
418
|
+
for (const key in input) {
|
|
419
|
+
const camelKey = key.toLowerCase().replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
|
|
420
|
+
result[camelKey] = input[key];
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
return result as ClusterStatementRowCamelCase;
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
/**
|
|
427
|
+
* Analyzes query history for specific tables using raw table names.
|
|
428
|
+
* @param {string[]} tables - The table names to analyze
|
|
429
|
+
* @param {Date} [fromDate] - The start date for the analysis
|
|
430
|
+
* @param {Date} [toDate] - The end date for the analysis
|
|
431
|
+
* @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
|
|
432
|
+
*/
|
|
433
|
+
async analyzeQueriesHistoryRaw(
|
|
434
|
+
tables: string[],
|
|
435
|
+
fromDate?: Date,
|
|
436
|
+
toDate?: Date,
|
|
437
|
+
): Promise<ClusterStatementRowCamelCase[]> {
|
|
438
|
+
const results = await this.forgeOperations
|
|
439
|
+
.fetch()
|
|
440
|
+
.executeRawSQL<ClusterStatementRow>(
|
|
441
|
+
this.buildClusterStatementQuery(tables ?? [], fromDate, toDate),
|
|
442
|
+
);
|
|
443
|
+
return results.map((r) => this.mapToCamelCaseClusterStatement(r));
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
/**
|
|
447
|
+
* Analyzes query history for specific tables using Drizzle table objects.
|
|
448
|
+
* @param {AnyMySqlTable[]} tables - The Drizzle table objects to analyze
|
|
449
|
+
* @param {Date} [fromDate] - The start date for the analysis
|
|
450
|
+
* @param {Date} [toDate] - The end date for the analysis
|
|
451
|
+
* @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
|
|
452
|
+
*/
|
|
453
|
+
async analyzeQueriesHistory(
|
|
454
|
+
tables: AnyMySqlTable[],
|
|
455
|
+
fromDate?: Date,
|
|
456
|
+
toDate?: Date,
|
|
457
|
+
): Promise<ClusterStatementRowCamelCase[]> {
|
|
458
|
+
const tableNames = tables?.map((table) => getTableName(table)) ?? [];
|
|
459
|
+
return this.analyzeQueriesHistoryRaw(tableNames, fromDate, toDate);
|
|
460
|
+
}
|
|
461
|
+
}
|
|
@@ -37,7 +37,7 @@ export class ForgeSQLCrudOperations implements CRUDForgeSQL {
|
|
|
37
37
|
*/
|
|
38
38
|
async insert<T extends AnyMySqlTable>(
|
|
39
39
|
schema: T,
|
|
40
|
-
models:
|
|
40
|
+
models: InferInsertModel<T>[],
|
|
41
41
|
updateIfExists: boolean = false,
|
|
42
42
|
): Promise<number> {
|
|
43
43
|
if (!models?.length) return 0;
|
|
@@ -257,12 +257,12 @@ export class ForgeSQLCrudOperations implements CRUDForgeSQL {
|
|
|
257
257
|
}
|
|
258
258
|
const versionMetadata = this.options.additionalMetadata?.[tableName]?.versionField;
|
|
259
259
|
if (!versionMetadata) return undefined;
|
|
260
|
-
let fieldName = versionMetadata.fieldName;
|
|
260
|
+
let fieldName = versionMetadata.fieldName;
|
|
261
261
|
|
|
262
262
|
let versionField = columns[versionMetadata.fieldName];
|
|
263
|
-
if (!versionField){
|
|
264
|
-
const find = Object.entries(columns).find(([
|
|
265
|
-
if (find){
|
|
263
|
+
if (!versionField) {
|
|
264
|
+
const find = Object.entries(columns).find(([, c]) => c.name === versionMetadata.fieldName);
|
|
265
|
+
if (find) {
|
|
266
266
|
fieldName = find[0];
|
|
267
267
|
versionField = find[1];
|
|
268
268
|
}
|
|
@@ -349,15 +349,22 @@ let fieldName = versionMetadata.fieldName;
|
|
|
349
349
|
columns: Record<string, AnyColumn>,
|
|
350
350
|
): InferInsertModel<T> {
|
|
351
351
|
if (!versionMetadata || !columns) return model as InferInsertModel<T>;
|
|
352
|
+
let fieldName = versionMetadata.fieldName;
|
|
353
|
+
let versionField = columns[versionMetadata.fieldName];
|
|
354
|
+
if (!versionField) {
|
|
355
|
+
const find = Object.entries(columns).find(([, c]) => c.name === versionMetadata.fieldName);
|
|
356
|
+
if (find) {
|
|
357
|
+
fieldName = find[0];
|
|
358
|
+
versionField = find[1];
|
|
359
|
+
}
|
|
360
|
+
}
|
|
352
361
|
|
|
353
|
-
const versionField = columns[versionMetadata.fieldName];
|
|
354
362
|
if (!versionField) return model as InferInsertModel<T>;
|
|
355
363
|
|
|
356
364
|
const modelWithVersion = { ...model };
|
|
357
365
|
const fieldType = versionField.getSQLType();
|
|
358
366
|
const versionValue = fieldType === "datetime" || fieldType === "timestamp" ? new Date() : 1;
|
|
359
|
-
modelWithVersion[
|
|
360
|
-
versionValue as any;
|
|
367
|
+
modelWithVersion[fieldName as keyof typeof modelWithVersion] = versionValue as any;
|
|
361
368
|
|
|
362
369
|
return modelWithVersion as InferInsertModel<T>;
|
|
363
370
|
}
|
package/src/core/ForgeSQLORM.ts
CHANGED
|
@@ -3,14 +3,16 @@ import {
|
|
|
3
3
|
CRUDForgeSQL,
|
|
4
4
|
ForgeSqlOperation,
|
|
5
5
|
ForgeSqlOrmOptions,
|
|
6
|
+
SchemaAnalyzeForgeSql,
|
|
6
7
|
SchemaSqlForgeSql,
|
|
7
8
|
} from "./ForgeSQLQueryBuilder";
|
|
8
9
|
import { ForgeSQLSelectOperations } from "./ForgeSQLSelectOperations";
|
|
9
10
|
import { drizzle, MySqlRemoteDatabase, MySqlRemotePreparedQueryHKT } from "drizzle-orm/mysql-proxy";
|
|
10
|
-
import {
|
|
11
|
+
import { createForgeDriverProxy } from "../utils/forgeDriverProxy";
|
|
11
12
|
import type { SelectedFields } from "drizzle-orm/mysql-core/query-builders/select.types";
|
|
12
13
|
import { MySqlSelectBuilder } from "drizzle-orm/mysql-core";
|
|
13
14
|
import { patchDbWithSelectAliased } from "../lib/drizzle/extensions/selectAliased";
|
|
15
|
+
import { ForgeSQLAnalyseOperation } from "./ForgeSQLAnalyseOperations";
|
|
14
16
|
|
|
15
17
|
/**
|
|
16
18
|
* Implementation of ForgeSQLORM that uses Drizzle ORM for query building.
|
|
@@ -19,9 +21,17 @@ import { patchDbWithSelectAliased } from "../lib/drizzle/extensions/selectAliase
|
|
|
19
21
|
*/
|
|
20
22
|
class ForgeSQLORMImpl implements ForgeSqlOperation {
|
|
21
23
|
private static instance: ForgeSQLORMImpl | null = null;
|
|
22
|
-
private readonly drizzle
|
|
24
|
+
private readonly drizzle: MySqlRemoteDatabase<any> & {
|
|
25
|
+
selectAliased: <TSelection extends SelectedFields>(
|
|
26
|
+
fields: TSelection,
|
|
27
|
+
) => MySqlSelectBuilder<TSelection, MySqlRemotePreparedQueryHKT>;
|
|
28
|
+
selectAliasedDistinct: <TSelection extends SelectedFields>(
|
|
29
|
+
fields: TSelection,
|
|
30
|
+
) => MySqlSelectBuilder<TSelection, MySqlRemotePreparedQueryHKT>;
|
|
31
|
+
};
|
|
23
32
|
private readonly crudOperations: CRUDForgeSQL;
|
|
24
33
|
private readonly fetchOperations: SchemaSqlForgeSql;
|
|
34
|
+
private readonly analyzeOperations: SchemaAnalyzeForgeSql;
|
|
25
35
|
|
|
26
36
|
/**
|
|
27
37
|
* Private constructor to enforce singleton behavior.
|
|
@@ -37,26 +47,34 @@ class ForgeSQLORMImpl implements ForgeSqlOperation {
|
|
|
37
47
|
console.debug("Initializing ForgeSQLORM...");
|
|
38
48
|
}
|
|
39
49
|
// Initialize Drizzle instance with our custom driver
|
|
50
|
+
const proxiedDriver = createForgeDriverProxy(newOptions.hints, newOptions.logRawSqlQuery);
|
|
40
51
|
this.drizzle = patchDbWithSelectAliased(
|
|
41
|
-
drizzle(
|
|
52
|
+
drizzle(proxiedDriver, { logger: newOptions.logRawSqlQuery }),
|
|
42
53
|
);
|
|
43
54
|
this.crudOperations = new ForgeSQLCrudOperations(this, newOptions);
|
|
44
55
|
this.fetchOperations = new ForgeSQLSelectOperations(newOptions);
|
|
56
|
+
this.analyzeOperations = new ForgeSQLAnalyseOperation(this);
|
|
45
57
|
} catch (error) {
|
|
46
58
|
console.error("ForgeSQLORM initialization failed:", error);
|
|
47
59
|
throw error;
|
|
48
60
|
}
|
|
49
61
|
}
|
|
50
62
|
|
|
63
|
+
/**
|
|
64
|
+
* Create the modify operations instance.
|
|
65
|
+
* @returns modify operations.
|
|
66
|
+
*/
|
|
67
|
+
modify(): CRUDForgeSQL {
|
|
68
|
+
return this.crudOperations;
|
|
69
|
+
}
|
|
70
|
+
|
|
51
71
|
/**
|
|
52
72
|
* Returns the singleton instance of ForgeSQLORMImpl.
|
|
53
73
|
* @param options - Options for configuring ForgeSQL ORM behavior.
|
|
54
74
|
* @returns The singleton instance of ForgeSQLORMImpl.
|
|
55
75
|
*/
|
|
56
76
|
static getInstance(options?: ForgeSqlOrmOptions): ForgeSqlOperation {
|
|
57
|
-
|
|
58
|
-
ForgeSQLORMImpl.instance = new ForgeSQLORMImpl(options);
|
|
59
|
-
}
|
|
77
|
+
ForgeSQLORMImpl.instance ??= new ForgeSQLORMImpl(options);
|
|
60
78
|
return ForgeSQLORMImpl.instance;
|
|
61
79
|
}
|
|
62
80
|
|
|
@@ -65,7 +83,7 @@ class ForgeSQLORMImpl implements ForgeSqlOperation {
|
|
|
65
83
|
* @returns CRUD operations.
|
|
66
84
|
*/
|
|
67
85
|
crud(): CRUDForgeSQL {
|
|
68
|
-
return this.
|
|
86
|
+
return this.modify();
|
|
69
87
|
}
|
|
70
88
|
|
|
71
89
|
/**
|
|
@@ -75,6 +93,9 @@ class ForgeSQLORMImpl implements ForgeSqlOperation {
|
|
|
75
93
|
fetch(): SchemaSqlForgeSql {
|
|
76
94
|
return this.fetchOperations;
|
|
77
95
|
}
|
|
96
|
+
analyze(): SchemaAnalyzeForgeSql {
|
|
97
|
+
return this.analyzeOperations;
|
|
98
|
+
}
|
|
78
99
|
|
|
79
100
|
/**
|
|
80
101
|
* Returns a Drizzle query builder instance.
|
|
@@ -179,7 +200,7 @@ class ForgeSQLORM implements ForgeSqlOperation {
|
|
|
179
200
|
*
|
|
180
201
|
* @template TSelection - The type of the selected fields
|
|
181
202
|
* @param {TSelection} fields - Object containing the fields to select, with table schemas as values
|
|
182
|
-
* @returns {MySqlSelectBuilder<TSelection,
|
|
203
|
+
* @returns {MySqlSelectBuilder<TSelection, MySqlRemotePreparedQueryHKT>} A distinct select query builder with unique field aliases
|
|
183
204
|
* @throws {Error} If fields parameter is empty
|
|
184
205
|
* @example
|
|
185
206
|
* ```typescript
|
|
@@ -200,7 +221,15 @@ class ForgeSQLORM implements ForgeSqlOperation {
|
|
|
200
221
|
* @returns CRUD operations.
|
|
201
222
|
*/
|
|
202
223
|
crud(): CRUDForgeSQL {
|
|
203
|
-
return this.ormInstance.
|
|
224
|
+
return this.ormInstance.modify();
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
/**
|
|
228
|
+
* Proxies the `modify` method from `ForgeSQLORMImpl`.
|
|
229
|
+
* @returns Modify operations.
|
|
230
|
+
*/
|
|
231
|
+
modify(): CRUDForgeSQL {
|
|
232
|
+
return this.ormInstance.modify();
|
|
204
233
|
}
|
|
205
234
|
|
|
206
235
|
/**
|
|
@@ -211,6 +240,14 @@ class ForgeSQLORM implements ForgeSqlOperation {
|
|
|
211
240
|
return this.ormInstance.fetch();
|
|
212
241
|
}
|
|
213
242
|
|
|
243
|
+
/**
|
|
244
|
+
* Provides query analysis capabilities including EXPLAIN ANALYZE and slow query analysis.
|
|
245
|
+
* @returns {SchemaAnalyzeForgeSql} Interface for analyzing query performance
|
|
246
|
+
*/
|
|
247
|
+
analyze(): SchemaAnalyzeForgeSql {
|
|
248
|
+
return this.ormInstance.analyze();
|
|
249
|
+
}
|
|
250
|
+
|
|
214
251
|
/**
|
|
215
252
|
* Returns a Drizzle query builder instance.
|
|
216
253
|
*
|