forge-sql-orm 2.1.4 → 2.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +195 -27
- package/dist/ForgeSQLORM.js +632 -192
- package/dist/ForgeSQLORM.js.map +1 -1
- package/dist/ForgeSQLORM.mjs +632 -192
- package/dist/ForgeSQLORM.mjs.map +1 -1
- package/dist/core/ForgeSQLCrudOperations.d.ts.map +1 -1
- package/dist/core/ForgeSQLORM.d.ts +114 -3
- package/dist/core/ForgeSQLORM.d.ts.map +1 -1
- package/dist/core/ForgeSQLQueryBuilder.d.ts +125 -7
- package/dist/core/ForgeSQLQueryBuilder.d.ts.map +1 -1
- package/dist/core/ForgeSQLSelectOperations.d.ts.map +1 -1
- package/dist/core/SystemTables.d.ts +3654 -0
- package/dist/core/SystemTables.d.ts.map +1 -1
- package/dist/lib/drizzle/extensions/additionalActions.d.ts +2 -2
- package/dist/lib/drizzle/extensions/additionalActions.d.ts.map +1 -1
- package/dist/utils/cacheContextUtils.d.ts.map +1 -1
- package/dist/utils/cacheUtils.d.ts.map +1 -1
- package/dist/utils/forgeDriver.d.ts +71 -3
- package/dist/utils/forgeDriver.d.ts.map +1 -1
- package/dist/utils/forgeDriverProxy.d.ts.map +1 -1
- package/dist/utils/metadataContextUtils.d.ts +11 -0
- package/dist/utils/metadataContextUtils.d.ts.map +1 -0
- package/dist/utils/requestTypeContextUtils.d.ts +8 -0
- package/dist/utils/requestTypeContextUtils.d.ts.map +1 -0
- package/dist/utils/sqlUtils.d.ts.map +1 -1
- package/dist/webtriggers/applyMigrationsWebTrigger.d.ts.map +1 -1
- package/dist/webtriggers/clearCacheSchedulerTrigger.d.ts.map +1 -1
- package/dist/webtriggers/dropMigrationWebTrigger.d.ts.map +1 -1
- package/dist/webtriggers/dropTablesMigrationWebTrigger.d.ts.map +1 -1
- package/dist/webtriggers/fetchSchemaWebTrigger.d.ts.map +1 -1
- package/dist/webtriggers/topSlowestStatementLastHourTrigger.d.ts +85 -43
- package/dist/webtriggers/topSlowestStatementLastHourTrigger.d.ts.map +1 -1
- package/package.json +9 -9
- package/src/core/ForgeSQLCrudOperations.ts +3 -0
- package/src/core/ForgeSQLORM.ts +287 -9
- package/src/core/ForgeSQLQueryBuilder.ts +138 -8
- package/src/core/ForgeSQLSelectOperations.ts +2 -0
- package/src/core/SystemTables.ts +16 -0
- package/src/lib/drizzle/extensions/additionalActions.ts +10 -12
- package/src/utils/cacheContextUtils.ts +4 -2
- package/src/utils/cacheUtils.ts +20 -8
- package/src/utils/forgeDriver.ts +223 -23
- package/src/utils/forgeDriverProxy.ts +2 -0
- package/src/utils/metadataContextUtils.ts +22 -0
- package/src/utils/requestTypeContextUtils.ts +11 -0
- package/src/utils/sqlUtils.ts +1 -0
- package/src/webtriggers/applyMigrationsWebTrigger.ts +9 -6
- package/src/webtriggers/clearCacheSchedulerTrigger.ts +1 -0
- package/src/webtriggers/dropMigrationWebTrigger.ts +2 -0
- package/src/webtriggers/dropTablesMigrationWebTrigger.ts +2 -0
- package/src/webtriggers/fetchSchemaWebTrigger.ts +1 -0
- package/src/webtriggers/topSlowestStatementLastHourTrigger.ts +515 -257
package/dist/ForgeSQLORM.js
CHANGED
|
@@ -410,7 +410,7 @@ async function clearCursorCache(tables, cursor, options) {
|
|
|
410
410
|
entityQueryBuilder = entityQueryBuilder.cursor(cursor);
|
|
411
411
|
}
|
|
412
412
|
const listResult = await entityQueryBuilder.limit(100).getMany();
|
|
413
|
-
if (options.
|
|
413
|
+
if (options.logCache) {
|
|
414
414
|
console.warn(`clear cache Records: ${JSON.stringify(listResult.results.map((r) => r.key))}`);
|
|
415
415
|
}
|
|
416
416
|
await deleteCacheEntriesInBatches(listResult.results, cacheEntityName);
|
|
@@ -431,7 +431,7 @@ async function clearExpirationCursorCache(cursor, options) {
|
|
|
431
431
|
entityQueryBuilder = entityQueryBuilder.cursor(cursor);
|
|
432
432
|
}
|
|
433
433
|
const listResult = await entityQueryBuilder.limit(100).getMany();
|
|
434
|
-
if (options.
|
|
434
|
+
if (options.logCache) {
|
|
435
435
|
console.warn(`clear expired Records: ${JSON.stringify(listResult.results.map((r) => r.key))}`);
|
|
436
436
|
}
|
|
437
437
|
await deleteCacheEntriesInBatches(listResult.results, cacheEntityName);
|
|
@@ -480,7 +480,7 @@ async function clearTablesCache(tables, options) {
|
|
|
480
480
|
"clearing cache"
|
|
481
481
|
);
|
|
482
482
|
} finally {
|
|
483
|
-
if (options.
|
|
483
|
+
if (options.logCache) {
|
|
484
484
|
const duration = luxon.DateTime.now().toSeconds() - startTime.toSeconds();
|
|
485
485
|
console.info(`Cleared ${totalRecords} cache records in ${duration} seconds`);
|
|
486
486
|
}
|
|
@@ -499,7 +499,7 @@ async function clearExpiredCache(options) {
|
|
|
499
499
|
);
|
|
500
500
|
} finally {
|
|
501
501
|
const duration = luxon.DateTime.now().toSeconds() - startTime.toSeconds();
|
|
502
|
-
if (options?.
|
|
502
|
+
if (options?.logCache) {
|
|
503
503
|
console.debug(`Cleared ${totalRecords} expired cache records in ${duration} seconds`);
|
|
504
504
|
}
|
|
505
505
|
}
|
|
@@ -514,7 +514,7 @@ async function getFromCache(query, options) {
|
|
|
514
514
|
const sqlQuery = query.toSQL();
|
|
515
515
|
const key = hashKey(sqlQuery);
|
|
516
516
|
if (await isTableContainsTableInCacheContext(sqlQuery.sql, options)) {
|
|
517
|
-
if (options.
|
|
517
|
+
if (options.logCache) {
|
|
518
518
|
console.warn(`Context contains value to clear. Skip getting from cache`);
|
|
519
519
|
}
|
|
520
520
|
return void 0;
|
|
@@ -522,7 +522,7 @@ async function getFromCache(query, options) {
|
|
|
522
522
|
try {
|
|
523
523
|
const cacheResult = await kvs.kvs.entity(options.cacheEntityName).get(key);
|
|
524
524
|
if (cacheResult && cacheResult[expirationName] >= getCurrentTime() && sqlQuery.sql.toLowerCase() === cacheResult[entityQueryName]) {
|
|
525
|
-
if (options.
|
|
525
|
+
if (options.logCache) {
|
|
526
526
|
console.warn(`Get value from cache, cacheKey: ${key}`);
|
|
527
527
|
}
|
|
528
528
|
const results = cacheResult[dataName];
|
|
@@ -543,7 +543,7 @@ async function setCacheResult(query, options, results, cacheTtl) {
|
|
|
543
543
|
const dataName = options.cacheEntityDataName ?? CACHE_CONSTANTS.DEFAULT_DATA_NAME;
|
|
544
544
|
const sqlQuery = query.toSQL();
|
|
545
545
|
if (await isTableContainsTableInCacheContext(sqlQuery.sql, options)) {
|
|
546
|
-
if (options.
|
|
546
|
+
if (options.logCache) {
|
|
547
547
|
console.warn(`Context contains value to clear. Skip setting from cache`);
|
|
548
548
|
}
|
|
549
549
|
return;
|
|
@@ -558,7 +558,7 @@ async function setCacheResult(query, options, results, cacheTtl) {
|
|
|
558
558
|
},
|
|
559
559
|
{ entityName: options.cacheEntityName }
|
|
560
560
|
).execute();
|
|
561
|
-
if (options.
|
|
561
|
+
if (options.logCache) {
|
|
562
562
|
console.warn(`Store value to cache, cacheKey: ${key}`);
|
|
563
563
|
}
|
|
564
564
|
} catch (error) {
|
|
@@ -586,7 +586,7 @@ async function saveQueryLocalCacheQuery(query, rows, options) {
|
|
|
586
586
|
sql: sql2.toSQL().sql.toLowerCase(),
|
|
587
587
|
data: rows
|
|
588
588
|
};
|
|
589
|
-
if (options.
|
|
589
|
+
if (options.logCache) {
|
|
590
590
|
const q = sql2.toSQL();
|
|
591
591
|
console.debug(
|
|
592
592
|
`[forge-sql-orm][local-cache][SAVE] Stored result in cache. sql="${q.sql}", params=${JSON.stringify(q.params)}`
|
|
@@ -603,7 +603,7 @@ async function getQueryLocalCacheQuery(query, options) {
|
|
|
603
603
|
const sql2 = query;
|
|
604
604
|
const key = hashKey(sql2.toSQL());
|
|
605
605
|
if (context.cache[key] && context.cache[key].sql === sql2.toSQL().sql.toLowerCase()) {
|
|
606
|
-
if (options.
|
|
606
|
+
if (options.logCache) {
|
|
607
607
|
const q = sql2.toSQL();
|
|
608
608
|
console.debug(
|
|
609
609
|
`[forge-sql-orm][local-cache][HIT] Returned cached result. sql="${q.sql}", params=${JSON.stringify(q.params)}`
|
|
@@ -1034,25 +1034,113 @@ class ForgeSQLSelectOperations {
|
|
|
1034
1034
|
return updateQueryResponseResults.rows;
|
|
1035
1035
|
}
|
|
1036
1036
|
}
|
|
1037
|
-
const
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1037
|
+
const metadataQueryContext = new node_async_hooks.AsyncLocalStorage();
|
|
1038
|
+
async function saveMetaDataToContext(metadata) {
|
|
1039
|
+
const context = metadataQueryContext.getStore();
|
|
1040
|
+
if (context && metadata) {
|
|
1041
|
+
context.totalResponseSize += metadata.responseSize;
|
|
1042
|
+
context.totalDbExecutionTime += metadata.dbExecutionTime;
|
|
1043
|
+
context.lastMetadata = metadata;
|
|
1044
|
+
}
|
|
1045
|
+
}
|
|
1046
|
+
async function getLastestMetadata() {
|
|
1047
|
+
return metadataQueryContext.getStore();
|
|
1048
|
+
}
|
|
1049
|
+
const operationTypeQueryContext = new node_async_hooks.AsyncLocalStorage();
|
|
1050
|
+
async function getOperationType() {
|
|
1051
|
+
return operationTypeQueryContext.getStore()?.operationType ?? "DML";
|
|
1052
|
+
}
|
|
1053
|
+
function isUpdateQueryResponse(obj) {
|
|
1054
|
+
return obj !== null && typeof obj === "object" && typeof obj.affectedRows === "number" && typeof obj.insertId === "number";
|
|
1055
|
+
}
|
|
1056
|
+
async function withTimeout$1(promise, timeoutMs = 1e4) {
|
|
1057
|
+
let timeoutId;
|
|
1058
|
+
const timeoutPromise = new Promise((_, reject) => {
|
|
1059
|
+
timeoutId = setTimeout(() => {
|
|
1060
|
+
reject(
|
|
1061
|
+
new Error(
|
|
1062
|
+
`Atlassian @forge/sql did not return a response within ${timeoutMs}ms (${timeoutMs / 1e3} seconds), so the request is blocked. Possible causes: slow query, network issues, or exceeding Forge SQL limits.`
|
|
1063
|
+
)
|
|
1064
|
+
);
|
|
1065
|
+
}, timeoutMs);
|
|
1066
|
+
});
|
|
1067
|
+
try {
|
|
1068
|
+
return await Promise.race([promise, timeoutPromise]);
|
|
1069
|
+
} finally {
|
|
1070
|
+
if (timeoutId) {
|
|
1071
|
+
clearTimeout(timeoutId);
|
|
1042
1072
|
}
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1073
|
+
}
|
|
1074
|
+
}
|
|
1075
|
+
function inlineParams(sql2, params) {
|
|
1076
|
+
let i = 0;
|
|
1077
|
+
return sql2.replace(/\?/g, () => {
|
|
1078
|
+
const val = params[i++];
|
|
1079
|
+
if (val === null) return "NULL";
|
|
1080
|
+
if (typeof val === "number") return val.toString();
|
|
1081
|
+
return `'${String(val).replace(/'/g, "''")}'`;
|
|
1082
|
+
});
|
|
1083
|
+
}
|
|
1084
|
+
async function processDDLResult(method, result) {
|
|
1085
|
+
if (result.metadata) {
|
|
1086
|
+
await saveMetaDataToContext(result.metadata);
|
|
1087
|
+
}
|
|
1088
|
+
if (!result.rows) {
|
|
1089
|
+
return { rows: [] };
|
|
1090
|
+
}
|
|
1091
|
+
if (isUpdateQueryResponse(result.rows)) {
|
|
1092
|
+
const oneRow = result.rows;
|
|
1093
|
+
return { ...oneRow, rows: [oneRow] };
|
|
1094
|
+
}
|
|
1095
|
+
if (Array.isArray(result.rows)) {
|
|
1096
|
+
if (method === "execute") {
|
|
1097
|
+
return { rows: result.rows };
|
|
1098
|
+
} else {
|
|
1099
|
+
const rows = result.rows.map((r) => Object.values(r));
|
|
1100
|
+
return { rows };
|
|
1050
1101
|
}
|
|
1051
|
-
const result = await sqlStatement.execute();
|
|
1052
|
-
let rows;
|
|
1053
|
-
rows = result.rows.map((r) => Object.values(r));
|
|
1054
|
-
return { rows };
|
|
1055
1102
|
}
|
|
1103
|
+
return { rows: [] };
|
|
1104
|
+
}
|
|
1105
|
+
async function processExecuteMethod(query, params) {
|
|
1106
|
+
const sqlStatement = sql$1.sql.prepare(query);
|
|
1107
|
+
if (params) {
|
|
1108
|
+
sqlStatement.bindParams(...params);
|
|
1109
|
+
}
|
|
1110
|
+
const result = await withTimeout$1(sqlStatement.execute());
|
|
1111
|
+
await saveMetaDataToContext(result.metadata);
|
|
1112
|
+
if (!result.rows) {
|
|
1113
|
+
return { rows: [] };
|
|
1114
|
+
}
|
|
1115
|
+
if (isUpdateQueryResponse(result.rows)) {
|
|
1116
|
+
const oneRow = result.rows;
|
|
1117
|
+
return { ...oneRow, rows: [oneRow] };
|
|
1118
|
+
}
|
|
1119
|
+
return { rows: result.rows };
|
|
1120
|
+
}
|
|
1121
|
+
async function processAllMethod(query, params) {
|
|
1122
|
+
const sqlStatement = await sql$1.sql.prepare(query);
|
|
1123
|
+
if (params) {
|
|
1124
|
+
await sqlStatement.bindParams(...params);
|
|
1125
|
+
}
|
|
1126
|
+
const result = await withTimeout$1(sqlStatement.execute());
|
|
1127
|
+
await saveMetaDataToContext(result.metadata);
|
|
1128
|
+
if (!result.rows) {
|
|
1129
|
+
return { rows: [] };
|
|
1130
|
+
}
|
|
1131
|
+
const rows = result.rows.map((r) => Object.values(r));
|
|
1132
|
+
return { rows };
|
|
1133
|
+
}
|
|
1134
|
+
const forgeDriver = async (query, params, method) => {
|
|
1135
|
+
const operationType = await getOperationType();
|
|
1136
|
+
if (operationType === "DDL") {
|
|
1137
|
+
const result = await withTimeout$1(sql$1.sql.executeDDL(inlineParams(query, params)));
|
|
1138
|
+
return await processDDLResult(method, result);
|
|
1139
|
+
}
|
|
1140
|
+
if (method === "execute") {
|
|
1141
|
+
return await processExecuteMethod(query, params ?? []);
|
|
1142
|
+
}
|
|
1143
|
+
return await processAllMethod(query, params ?? []);
|
|
1056
1144
|
};
|
|
1057
1145
|
function injectSqlHints(query, hints) {
|
|
1058
1146
|
if (!hints) {
|
|
@@ -1294,10 +1382,8 @@ function createRawQueryExecutor(db, options, useGlobalCache = false) {
|
|
|
1294
1382
|
return async function(query, cacheTtl) {
|
|
1295
1383
|
let sql$12;
|
|
1296
1384
|
if (sql.isSQLWrapper(query)) {
|
|
1297
|
-
const
|
|
1298
|
-
sql$12 =
|
|
1299
|
-
db.dialect
|
|
1300
|
-
);
|
|
1385
|
+
const dialect = db.dialect;
|
|
1386
|
+
sql$12 = dialect.sqlToQuery(query);
|
|
1301
1387
|
} else {
|
|
1302
1388
|
sql$12 = {
|
|
1303
1389
|
sql: query,
|
|
@@ -1798,6 +1884,7 @@ class ForgeSQLORMImpl {
|
|
|
1798
1884
|
try {
|
|
1799
1885
|
const newOptions = options ?? {
|
|
1800
1886
|
logRawSqlQuery: false,
|
|
1887
|
+
logCache: false,
|
|
1801
1888
|
disableOptimisticLocking: false,
|
|
1802
1889
|
cacheWrapTable: true,
|
|
1803
1890
|
cacheTTL: 120,
|
|
@@ -1823,6 +1910,49 @@ class ForgeSQLORMImpl {
|
|
|
1823
1910
|
throw error;
|
|
1824
1911
|
}
|
|
1825
1912
|
}
|
|
1913
|
+
/**
|
|
1914
|
+
* Executes a query and provides access to execution metadata.
|
|
1915
|
+
* This method allows you to capture detailed information about query execution
|
|
1916
|
+
* including database execution time, response size, and Forge SQL metadata.
|
|
1917
|
+
*
|
|
1918
|
+
* @template T - The return type of the query
|
|
1919
|
+
* @param query - A function that returns a Promise with the query result
|
|
1920
|
+
* @param onMetadata - Callback function that receives execution metadata
|
|
1921
|
+
* @returns Promise with the query result
|
|
1922
|
+
* @example
|
|
1923
|
+
* ```typescript
|
|
1924
|
+
* const result = await forgeSQL.executeWithMetadata(
|
|
1925
|
+
* async () => await forgeSQL.select().from(users).where(eq(users.id, 1)),
|
|
1926
|
+
* (dbTime, responseSize, metadata) => {
|
|
1927
|
+
* console.log(`DB execution time: ${dbTime}ms`);
|
|
1928
|
+
* console.log(`Response size: ${responseSize} bytes`);
|
|
1929
|
+
* console.log('Forge metadata:', metadata);
|
|
1930
|
+
* }
|
|
1931
|
+
* );
|
|
1932
|
+
* ```
|
|
1933
|
+
*/
|
|
1934
|
+
async executeWithMetadata(query, onMetadata) {
|
|
1935
|
+
return metadataQueryContext.run(
|
|
1936
|
+
{
|
|
1937
|
+
totalDbExecutionTime: 0,
|
|
1938
|
+
totalResponseSize: 0
|
|
1939
|
+
},
|
|
1940
|
+
async () => {
|
|
1941
|
+
try {
|
|
1942
|
+
return await query();
|
|
1943
|
+
} finally {
|
|
1944
|
+
const metadata = await getLastestMetadata();
|
|
1945
|
+
if (metadata && metadata.lastMetadata) {
|
|
1946
|
+
await onMetadata(
|
|
1947
|
+
metadata.totalDbExecutionTime,
|
|
1948
|
+
metadata.totalResponseSize,
|
|
1949
|
+
metadata.lastMetadata
|
|
1950
|
+
);
|
|
1951
|
+
}
|
|
1952
|
+
}
|
|
1953
|
+
}
|
|
1954
|
+
);
|
|
1955
|
+
}
|
|
1826
1956
|
/**
|
|
1827
1957
|
* Executes operations within a cache context that collects cache eviction events.
|
|
1828
1958
|
* All clearCache calls within the context are collected and executed in batch at the end.
|
|
@@ -2204,6 +2334,97 @@ class ForgeSQLORMImpl {
|
|
|
2204
2334
|
execute(query) {
|
|
2205
2335
|
return this.drizzle.executeQuery(query);
|
|
2206
2336
|
}
|
|
2337
|
+
/**
|
|
2338
|
+
* Executes a Data Definition Language (DDL) SQL query.
|
|
2339
|
+
* DDL operations include CREATE, ALTER, DROP, TRUNCATE, and other schema modification statements.
|
|
2340
|
+
*
|
|
2341
|
+
* This method is specifically designed for DDL operations and provides:
|
|
2342
|
+
* - Proper operation type context for DDL queries
|
|
2343
|
+
* - No caching (DDL operations should not be cached)
|
|
2344
|
+
* - Direct execution without query optimization
|
|
2345
|
+
*
|
|
2346
|
+
* @template T - The expected return type of the query result
|
|
2347
|
+
* @param query - The DDL SQL query to execute (SQLWrapper or string)
|
|
2348
|
+
* @returns Promise with query results
|
|
2349
|
+
* @throws {Error} If the DDL operation fails
|
|
2350
|
+
*
|
|
2351
|
+
* @example
|
|
2352
|
+
* ```typescript
|
|
2353
|
+
* // Create a new table
|
|
2354
|
+
* await forgeSQL.executeDDL(`
|
|
2355
|
+
* CREATE TABLE users (
|
|
2356
|
+
* id INT PRIMARY KEY AUTO_INCREMENT,
|
|
2357
|
+
* name VARCHAR(255) NOT NULL,
|
|
2358
|
+
* email VARCHAR(255) UNIQUE
|
|
2359
|
+
* )
|
|
2360
|
+
* `);
|
|
2361
|
+
*
|
|
2362
|
+
* // Alter table structure
|
|
2363
|
+
* await forgeSQL.executeDDL(sql`
|
|
2364
|
+
* ALTER TABLE users
|
|
2365
|
+
* ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
2366
|
+
* `);
|
|
2367
|
+
*
|
|
2368
|
+
* // Drop a table
|
|
2369
|
+
* await forgeSQL.executeDDL("DROP TABLE IF EXISTS old_users");
|
|
2370
|
+
* ```
|
|
2371
|
+
*/
|
|
2372
|
+
async executeDDL(query) {
|
|
2373
|
+
return this.executeDDLActions(async () => this.drizzle.executeQuery(query));
|
|
2374
|
+
}
|
|
2375
|
+
/**
|
|
2376
|
+
* Executes a series of actions within a DDL operation context.
|
|
2377
|
+
* This method provides a way to execute regular SQL queries that should be treated
|
|
2378
|
+
* as DDL operations, ensuring proper operation type context for performance monitoring.
|
|
2379
|
+
*
|
|
2380
|
+
* This method is useful for:
|
|
2381
|
+
* - Executing regular SQL queries in DDL context for monitoring purposes
|
|
2382
|
+
* - Wrapping non-DDL operations that should be treated as DDL for analysis
|
|
2383
|
+
* - Ensuring proper operation type context for complex workflows
|
|
2384
|
+
* - Maintaining DDL operation context across multiple function calls
|
|
2385
|
+
*
|
|
2386
|
+
* @template T - The return type of the actions function
|
|
2387
|
+
* @param actions - Function containing SQL operations to execute in DDL context
|
|
2388
|
+
* @returns Promise that resolves to the return value of the actions function
|
|
2389
|
+
*
|
|
2390
|
+
* @example
|
|
2391
|
+
* ```typescript
|
|
2392
|
+
* // Execute regular SQL queries in DDL context for monitoring
|
|
2393
|
+
* await forgeSQL.executeDDLActions(async () => {
|
|
2394
|
+
* const slowQueries = await forgeSQL.execute(`
|
|
2395
|
+
* SELECT * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
2396
|
+
* WHERE AVG_LATENCY > 1000000
|
|
2397
|
+
* `);
|
|
2398
|
+
* return slowQueries;
|
|
2399
|
+
* });
|
|
2400
|
+
*
|
|
2401
|
+
* // Execute complex analysis queries in DDL context
|
|
2402
|
+
* const result = await forgeSQL.executeDDLActions(async () => {
|
|
2403
|
+
* const tableInfo = await forgeSQL.execute("SHOW TABLES");
|
|
2404
|
+
* const performanceData = await forgeSQL.execute(`
|
|
2405
|
+
* SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
|
|
2406
|
+
* WHERE SUMMARY_END_TIME > DATE_SUB(NOW(), INTERVAL 1 HOUR)
|
|
2407
|
+
* `);
|
|
2408
|
+
* return { tableInfo, performanceData };
|
|
2409
|
+
* });
|
|
2410
|
+
*
|
|
2411
|
+
* // Execute monitoring queries with error handling
|
|
2412
|
+
* try {
|
|
2413
|
+
* await forgeSQL.executeDDLActions(async () => {
|
|
2414
|
+
* const metrics = await forgeSQL.execute(`
|
|
2415
|
+
* SELECT COUNT(*) as query_count
|
|
2416
|
+
* FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
2417
|
+
* `);
|
|
2418
|
+
* console.log(`Total queries: ${metrics[0].query_count}`);
|
|
2419
|
+
* });
|
|
2420
|
+
* } catch (error) {
|
|
2421
|
+
* console.error("Monitoring query failed:", error);
|
|
2422
|
+
* }
|
|
2423
|
+
* ```
|
|
2424
|
+
*/
|
|
2425
|
+
async executeDDLActions(actions) {
|
|
2426
|
+
return operationTypeQueryContext.run({ operationType: "DDL" }, async () => actions());
|
|
2427
|
+
}
|
|
2207
2428
|
/**
|
|
2208
2429
|
* Executes a raw SQL query with both local and global cache support.
|
|
2209
2430
|
* This method provides comprehensive caching for raw SQL queries:
|
|
@@ -2270,6 +2491,30 @@ class ForgeSQLORM {
|
|
|
2270
2491
|
constructor(options) {
|
|
2271
2492
|
this.ormInstance = ForgeSQLORMImpl.getInstance(options);
|
|
2272
2493
|
}
|
|
2494
|
+
/**
|
|
2495
|
+
* Executes a query and provides access to execution metadata.
|
|
2496
|
+
* This method allows you to capture detailed information about query execution
|
|
2497
|
+
* including database execution time, response size, and Forge SQL metadata.
|
|
2498
|
+
*
|
|
2499
|
+
* @template T - The return type of the query
|
|
2500
|
+
* @param query - A function that returns a Promise with the query result
|
|
2501
|
+
* @param onMetadata - Callback function that receives execution metadata
|
|
2502
|
+
* @returns Promise with the query result
|
|
2503
|
+
* @example
|
|
2504
|
+
* ```typescript
|
|
2505
|
+
* const result = await forgeSQL.executeWithMetadata(
|
|
2506
|
+
* async () => await forgeSQL.select().from(users).where(eq(users.id, 1)),
|
|
2507
|
+
* (dbTime, responseSize, metadata) => {
|
|
2508
|
+
* console.log(`DB execution time: ${dbTime}ms`);
|
|
2509
|
+
* console.log(`Response size: ${responseSize} bytes`);
|
|
2510
|
+
* console.log('Forge metadata:', metadata);
|
|
2511
|
+
* }
|
|
2512
|
+
* );
|
|
2513
|
+
* ```
|
|
2514
|
+
*/
|
|
2515
|
+
async executeWithMetadata(query, onMetadata) {
|
|
2516
|
+
return this.ormInstance.executeWithMetadata(query, onMetadata);
|
|
2517
|
+
}
|
|
2273
2518
|
selectCacheable(fields, cacheTTL) {
|
|
2274
2519
|
return this.ormInstance.selectCacheable(fields, cacheTTL);
|
|
2275
2520
|
}
|
|
@@ -2527,7 +2772,98 @@ class ForgeSQLORM {
|
|
|
2527
2772
|
* ```
|
|
2528
2773
|
*/
|
|
2529
2774
|
execute(query) {
|
|
2530
|
-
return this.ormInstance.
|
|
2775
|
+
return this.ormInstance.execute(query);
|
|
2776
|
+
}
|
|
2777
|
+
/**
|
|
2778
|
+
* Executes a Data Definition Language (DDL) SQL query.
|
|
2779
|
+
* DDL operations include CREATE, ALTER, DROP, TRUNCATE, and other schema modification statements.
|
|
2780
|
+
*
|
|
2781
|
+
* This method is specifically designed for DDL operations and provides:
|
|
2782
|
+
* - Proper operation type context for DDL queries
|
|
2783
|
+
* - No caching (DDL operations should not be cached)
|
|
2784
|
+
* - Direct execution without query optimization
|
|
2785
|
+
*
|
|
2786
|
+
* @template T - The expected return type of the query result
|
|
2787
|
+
* @param query - The DDL SQL query to execute (SQLWrapper or string)
|
|
2788
|
+
* @returns Promise with query results
|
|
2789
|
+
* @throws {Error} If the DDL operation fails
|
|
2790
|
+
*
|
|
2791
|
+
* @example
|
|
2792
|
+
* ```typescript
|
|
2793
|
+
* // Create a new table
|
|
2794
|
+
* await forgeSQL.executeDDL(`
|
|
2795
|
+
* CREATE TABLE users (
|
|
2796
|
+
* id INT PRIMARY KEY AUTO_INCREMENT,
|
|
2797
|
+
* name VARCHAR(255) NOT NULL,
|
|
2798
|
+
* email VARCHAR(255) UNIQUE
|
|
2799
|
+
* )
|
|
2800
|
+
* `);
|
|
2801
|
+
*
|
|
2802
|
+
* // Alter table structure
|
|
2803
|
+
* await forgeSQL.executeDDL(sql`
|
|
2804
|
+
* ALTER TABLE users
|
|
2805
|
+
* ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
2806
|
+
* `);
|
|
2807
|
+
*
|
|
2808
|
+
* // Drop a table
|
|
2809
|
+
* await forgeSQL.executeDDL("DROP TABLE IF EXISTS old_users");
|
|
2810
|
+
* ```
|
|
2811
|
+
*/
|
|
2812
|
+
executeDDL(query) {
|
|
2813
|
+
return this.ormInstance.executeDDL(query);
|
|
2814
|
+
}
|
|
2815
|
+
/**
|
|
2816
|
+
* Executes a series of actions within a DDL operation context.
|
|
2817
|
+
* This method provides a way to execute regular SQL queries that should be treated
|
|
2818
|
+
* as DDL operations, ensuring proper operation type context for performance monitoring.
|
|
2819
|
+
*
|
|
2820
|
+
* This method is useful for:
|
|
2821
|
+
* - Executing regular SQL queries in DDL context for monitoring purposes
|
|
2822
|
+
* - Wrapping non-DDL operations that should be treated as DDL for analysis
|
|
2823
|
+
* - Ensuring proper operation type context for complex workflows
|
|
2824
|
+
* - Maintaining DDL operation context across multiple function calls
|
|
2825
|
+
*
|
|
2826
|
+
* @template T - The return type of the actions function
|
|
2827
|
+
* @param actions - Function containing SQL operations to execute in DDL context
|
|
2828
|
+
* @returns Promise that resolves to the return value of the actions function
|
|
2829
|
+
*
|
|
2830
|
+
* @example
|
|
2831
|
+
* ```typescript
|
|
2832
|
+
* // Execute regular SQL queries in DDL context for monitoring
|
|
2833
|
+
* await forgeSQL.executeDDLActions(async () => {
|
|
2834
|
+
* const slowQueries = await forgeSQL.execute(`
|
|
2835
|
+
* SELECT * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
2836
|
+
* WHERE AVG_LATENCY > 1000000
|
|
2837
|
+
* `);
|
|
2838
|
+
* return slowQueries;
|
|
2839
|
+
* });
|
|
2840
|
+
*
|
|
2841
|
+
* // Execute complex analysis queries in DDL context
|
|
2842
|
+
* const result = await forgeSQL.executeDDLActions(async () => {
|
|
2843
|
+
* const tableInfo = await forgeSQL.execute("SHOW TABLES");
|
|
2844
|
+
* const performanceData = await forgeSQL.execute(`
|
|
2845
|
+
* SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
|
|
2846
|
+
* WHERE SUMMARY_END_TIME > DATE_SUB(NOW(), INTERVAL 1 HOUR)
|
|
2847
|
+
* `);
|
|
2848
|
+
* return { tableInfo, performanceData };
|
|
2849
|
+
* });
|
|
2850
|
+
*
|
|
2851
|
+
* // Execute monitoring queries with error handling
|
|
2852
|
+
* try {
|
|
2853
|
+
* await forgeSQL.executeDDLActions(async () => {
|
|
2854
|
+
* const metrics = await forgeSQL.execute(`
|
|
2855
|
+
* SELECT COUNT(*) as query_count
|
|
2856
|
+
* FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
2857
|
+
* `);
|
|
2858
|
+
* console.log(`Total queries: ${metrics[0].query_count}`);
|
|
2859
|
+
* });
|
|
2860
|
+
* } catch (error) {
|
|
2861
|
+
* console.error("Monitoring query failed:", error);
|
|
2862
|
+
* }
|
|
2863
|
+
* ```
|
|
2864
|
+
*/
|
|
2865
|
+
executeDDLActions(actions) {
|
|
2866
|
+
return this.ormInstance.executeDDLActions(actions);
|
|
2531
2867
|
}
|
|
2532
2868
|
/**
|
|
2533
2869
|
* Executes a raw SQL query with both local and global cache support.
|
|
@@ -2548,7 +2884,7 @@ class ForgeSQLORM {
|
|
|
2548
2884
|
* ```
|
|
2549
2885
|
*/
|
|
2550
2886
|
executeCacheable(query, cacheTtl) {
|
|
2551
|
-
return this.ormInstance.
|
|
2887
|
+
return this.ormInstance.executeCacheable(query, cacheTtl);
|
|
2552
2888
|
}
|
|
2553
2889
|
/**
|
|
2554
2890
|
* Creates a Common Table Expression (CTE) builder for complex queries.
|
|
@@ -2558,7 +2894,7 @@ class ForgeSQLORM {
|
|
|
2558
2894
|
* @example
|
|
2559
2895
|
* ```typescript
|
|
2560
2896
|
* const withQuery = forgeSQL.$with('userStats').as(
|
|
2561
|
-
* forgeSQL.select({ userId: users.id, count: sql<number>`count(*)` })
|
|
2897
|
+
* forgeSQL.getDrizzleQueryBuilder().select({ userId: users.id, count: sql<number>`count(*)` })
|
|
2562
2898
|
* .from(users)
|
|
2563
2899
|
* .groupBy(users.id)
|
|
2564
2900
|
* );
|
|
@@ -2576,7 +2912,7 @@ class ForgeSQLORM {
|
|
|
2576
2912
|
* @example
|
|
2577
2913
|
* ```typescript
|
|
2578
2914
|
* const withQuery = forgeSQL.$with('userStats').as(
|
|
2579
|
-
* forgeSQL.select({ userId: users.id, count: sql<number>`count(*)` })
|
|
2915
|
+
* forgeSQL.getDrizzleQueryBuilder().select({ userId: users.id, count: sql<number>`count(*)` })
|
|
2580
2916
|
* .from(users)
|
|
2581
2917
|
* .groupBy(users.id)
|
|
2582
2918
|
* );
|
|
@@ -3067,6 +3403,14 @@ const clusterStatementsSummaryHistory = informationSchema.table(
|
|
|
3067
3403
|
"CLUSTER_STATEMENTS_SUMMARY_HISTORY",
|
|
3068
3404
|
createClusterStatementsSummarySchema()
|
|
3069
3405
|
);
|
|
3406
|
+
const statementsSummaryHistory = informationSchema.table(
|
|
3407
|
+
"STATEMENTS_SUMMARY_HISTORY",
|
|
3408
|
+
createClusterStatementsSummarySchema()
|
|
3409
|
+
);
|
|
3410
|
+
const statementsSummary = informationSchema.table(
|
|
3411
|
+
"STATEMENTS_SUMMARY",
|
|
3412
|
+
createClusterStatementsSummarySchema()
|
|
3413
|
+
);
|
|
3070
3414
|
const clusterStatementsSummary = informationSchema.table(
|
|
3071
3415
|
"CLUSTER_STATEMENTS_SUMMARY",
|
|
3072
3416
|
createClusterStatementsSummarySchema()
|
|
@@ -3099,12 +3443,12 @@ const applySchemaMigrations = async (migration) => {
|
|
|
3099
3443
|
if (typeof migration !== "function") {
|
|
3100
3444
|
throw new Error("migration is not a function");
|
|
3101
3445
|
}
|
|
3102
|
-
console.
|
|
3446
|
+
console.debug("Provisioning the database");
|
|
3103
3447
|
await sql$1.sql._provision();
|
|
3104
|
-
console.
|
|
3448
|
+
console.debug("Running schema migrations");
|
|
3105
3449
|
const migrations2 = await migration(sql$1.migrationRunner);
|
|
3106
3450
|
const successfulMigrations = await migrations2.run();
|
|
3107
|
-
console.
|
|
3451
|
+
console.debug("Migrations applied:", successfulMigrations);
|
|
3108
3452
|
const migrationList = await sql$1.migrationRunner.list();
|
|
3109
3453
|
let migrationHistory = "No migrations found";
|
|
3110
3454
|
if (Array.isArray(migrationList) && migrationList.length > 0) {
|
|
@@ -3113,7 +3457,7 @@ const applySchemaMigrations = async (migration) => {
|
|
|
3113
3457
|
);
|
|
3114
3458
|
migrationHistory = sortedMigrations.map((y) => `${y.id}, ${y.name}, ${y.migratedAt.toUTCString()}`).join("\n");
|
|
3115
3459
|
}
|
|
3116
|
-
console.
|
|
3460
|
+
console.debug("Migrations history:\nid, name, migrated_at\n", migrationHistory);
|
|
3117
3461
|
return {
|
|
3118
3462
|
headers: { "Content-Type": ["application/json"] },
|
|
3119
3463
|
statusCode: 200,
|
|
@@ -3218,167 +3562,260 @@ const clearCacheSchedulerTrigger = async (options) => {
|
|
|
3218
3562
|
};
|
|
3219
3563
|
}
|
|
3220
3564
|
};
|
|
3221
|
-
const
|
|
3222
|
-
|
|
3223
|
-
|
|
3224
|
-
|
|
3225
|
-
|
|
3226
|
-
|
|
3227
|
-
|
|
3228
|
-
|
|
3229
|
-
|
|
3230
|
-
|
|
3231
|
-
const
|
|
3565
|
+
const DEFAULT_MEMORY_THRESHOLD = 8 * 1024 * 1024;
|
|
3566
|
+
const DEFAULT_TIMEOUT = 300;
|
|
3567
|
+
const DEFAULT_TOP_N = 1;
|
|
3568
|
+
const DEFAULT_HOURS = 1;
|
|
3569
|
+
const DEFAULT_TABLES = "CLUSTER_SUMMARY_AND_HISTORY";
|
|
3570
|
+
const MAX_QUERY_TIMEOUT_MS = 3e3;
|
|
3571
|
+
const MAX_SQL_LENGTH = 1e3;
|
|
3572
|
+
const RETRY_ATTEMPTS = 2;
|
|
3573
|
+
const RETRY_BASE_DELAY_MS = 1e3;
|
|
3574
|
+
const nsToMs = (value) => {
|
|
3575
|
+
const n = Number(value);
|
|
3576
|
+
return Number.isFinite(n) ? n / 1e6 : NaN;
|
|
3577
|
+
};
|
|
3578
|
+
const bytesToMB = (value) => {
|
|
3579
|
+
const n = Number(value);
|
|
3580
|
+
return Number.isFinite(n) ? n / (1024 * 1024) : NaN;
|
|
3581
|
+
};
|
|
3582
|
+
const jsonSafeStringify = (value) => JSON.stringify(value, (_k, v) => typeof v === "bigint" ? v.toString() : v);
|
|
3583
|
+
const sanitizeSQL = (sql2, maxLen = MAX_SQL_LENGTH) => {
|
|
3584
|
+
let s = sql2;
|
|
3585
|
+
s = s.replace(/--[^\n\r]*/g, "").replace(/\/\*[\s\S]*?\*\//g, "");
|
|
3586
|
+
s = s.replace(/'(?:\\'|[^'])*'/g, "?");
|
|
3587
|
+
s = s.replace(/\b-?\d+(?:\.\d+)?\b/g, "?");
|
|
3588
|
+
s = s.replace(/\s+/g, " ").trim();
|
|
3589
|
+
if (s.length > maxLen) {
|
|
3590
|
+
s = s.slice(0, maxLen) + " …[truncated]";
|
|
3591
|
+
}
|
|
3592
|
+
return s;
|
|
3593
|
+
};
|
|
3594
|
+
const withTimeout = async (promise, ms) => {
|
|
3595
|
+
let timer;
|
|
3232
3596
|
try {
|
|
3233
|
-
|
|
3234
|
-
|
|
3235
|
-
|
|
3236
|
-
|
|
3237
|
-
|
|
3238
|
-
|
|
3239
|
-
|
|
3240
|
-
|
|
3241
|
-
|
|
3242
|
-
|
|
3243
|
-
|
|
3244
|
-
|
|
3245
|
-
|
|
3246
|
-
|
|
3247
|
-
|
|
3248
|
-
|
|
3249
|
-
|
|
3250
|
-
|
|
3251
|
-
|
|
3252
|
-
|
|
3253
|
-
|
|
3254
|
-
|
|
3255
|
-
});
|
|
3256
|
-
const lastHourFilterHistory = drizzleOrm.gte(
|
|
3257
|
-
summaryHistory.summaryEndTime,
|
|
3258
|
-
drizzleOrm.sql`DATE_SUB(NOW(), INTERVAL 1 HOUR)`
|
|
3259
|
-
);
|
|
3260
|
-
const lastHourFilterSummary = drizzleOrm.gte(
|
|
3261
|
-
summary.summaryEndTime,
|
|
3262
|
-
drizzleOrm.sql`DATE_SUB(NOW(), INTERVAL 1 HOUR)`
|
|
3263
|
-
);
|
|
3264
|
-
const qHistory = orm.getDrizzleQueryBuilder().select(selectShape(summaryHistory)).from(summaryHistory).where(lastHourFilterHistory);
|
|
3265
|
-
const qSummary = orm.getDrizzleQueryBuilder().select(selectShape(summary)).from(summary).where(lastHourFilterSummary);
|
|
3266
|
-
const combined = mysqlCore.unionAll(qHistory, qSummary).as("combined");
|
|
3267
|
-
const thresholdNs = Math.floor(warnThresholdMs * 1e6);
|
|
3268
|
-
const grouped = orm.getDrizzleQueryBuilder().select({
|
|
3269
|
-
digest: combined.digest,
|
|
3270
|
-
stmtType: combined.stmtType,
|
|
3271
|
-
schemaName: combined.schemaName,
|
|
3272
|
-
execCount: drizzleOrm.sql`SUM(${combined.execCount})`.as("execCount"),
|
|
3273
|
-
avgLatencyNs: drizzleOrm.sql`MAX(${combined.avgLatencyNs})`.as("avgLatencyNs"),
|
|
3274
|
-
maxLatencyNs: drizzleOrm.sql`MAX(${combined.maxLatencyNs})`.as("maxLatencyNs"),
|
|
3275
|
-
minLatencyNs: drizzleOrm.sql`MIN(${combined.minLatencyNs})`.as("minLatencyNs"),
|
|
3276
|
-
avgProcessTimeNs: drizzleOrm.sql`MAX(${combined.avgProcessTimeNs})`.as("avgProcessTimeNs"),
|
|
3277
|
-
avgWaitTimeNs: drizzleOrm.sql`MAX(${combined.avgWaitTimeNs})`.as("avgWaitTimeNs"),
|
|
3278
|
-
avgBackoffTimeNs: drizzleOrm.sql`MAX(${combined.avgBackoffTimeNs})`.as("avgBackoffTimeNs"),
|
|
3279
|
-
avgMemBytes: drizzleOrm.sql`MAX(${combined.avgMemBytes})`.as("avgMemBytes"),
|
|
3280
|
-
maxMemBytes: drizzleOrm.sql`MAX(${combined.maxMemBytes})`.as("maxMemBytes"),
|
|
3281
|
-
avgTotalKeys: drizzleOrm.sql`MAX(${combined.avgTotalKeys})`.as("avgTotalKeys"),
|
|
3282
|
-
firstSeen: drizzleOrm.sql`MIN(${combined.firstSeen})`.as("firstSeen"),
|
|
3283
|
-
lastSeen: drizzleOrm.sql`MAX(${combined.lastSeen})`.as("lastSeen"),
|
|
3284
|
-
planInCache: drizzleOrm.sql`MAX(${combined.planInCache})`.as("planInCache"),
|
|
3285
|
-
planCacheHits: drizzleOrm.sql`SUM(${combined.planCacheHits})`.as("planCacheHits"),
|
|
3286
|
-
// Prefer a non-empty sample text/plan via MAX; acceptable for de-dup
|
|
3287
|
-
digestText: drizzleOrm.sql`MAX(${combined.digestText})`.as("digestText"),
|
|
3288
|
-
plan: drizzleOrm.sql`MAX(${combined.plan})`.as("plan")
|
|
3289
|
-
}).from(combined).where(
|
|
3290
|
-
drizzleOrm.sql`COALESCE(${combined.digest}, '') <> '' AND COALESCE(${combined.digestText}, '') <> '' AND COALESCE(${combined.stmtType}, '') NOT IN ('Use','Set','Show')`
|
|
3291
|
-
).groupBy(combined.digest, combined.stmtType, combined.schemaName).as("grouped");
|
|
3292
|
-
const rows = await orm.getDrizzleQueryBuilder().select({
|
|
3293
|
-
digest: grouped.digest,
|
|
3294
|
-
stmtType: grouped.stmtType,
|
|
3295
|
-
schemaName: grouped.schemaName,
|
|
3296
|
-
execCount: grouped.execCount,
|
|
3297
|
-
avgLatencyNs: grouped.avgLatencyNs,
|
|
3298
|
-
maxLatencyNs: grouped.maxLatencyNs,
|
|
3299
|
-
minLatencyNs: grouped.minLatencyNs,
|
|
3300
|
-
avgProcessTimeNs: grouped.avgProcessTimeNs,
|
|
3301
|
-
avgWaitTimeNs: grouped.avgWaitTimeNs,
|
|
3302
|
-
avgBackoffTimeNs: grouped.avgBackoffTimeNs,
|
|
3303
|
-
avgMemBytes: grouped.avgMemBytes,
|
|
3304
|
-
maxMemBytes: grouped.maxMemBytes,
|
|
3305
|
-
avgTotalKeys: grouped.avgTotalKeys,
|
|
3306
|
-
firstSeen: grouped.firstSeen,
|
|
3307
|
-
lastSeen: grouped.lastSeen,
|
|
3308
|
-
planInCache: grouped.planInCache,
|
|
3309
|
-
planCacheHits: grouped.planCacheHits,
|
|
3310
|
-
digestText: grouped.digestText,
|
|
3311
|
-
plan: grouped.plan
|
|
3312
|
-
}).from(grouped).where(
|
|
3313
|
-
drizzleOrm.sql`${grouped.avgLatencyNs} > ${thresholdNs} OR ${grouped.avgMemBytes} > ${memoryThresholdBytes}`
|
|
3314
|
-
).orderBy(drizzleOrm.desc(grouped.avgLatencyNs)).limit(formatLimitOffset(TOP_N));
|
|
3315
|
-
const formatted = rows.map((r, i) => ({
|
|
3316
|
-
rank: i + 1,
|
|
3317
|
-
// 1-based rank in the top N
|
|
3318
|
-
digest: r.digest,
|
|
3319
|
-
stmtType: r.stmtType,
|
|
3320
|
-
schemaName: r.schemaName,
|
|
3321
|
-
execCount: r.execCount,
|
|
3322
|
-
avgLatencyMs: nsToMs(r.avgLatencyNs),
|
|
3323
|
-
// Convert ns to ms for readability
|
|
3324
|
-
maxLatencyMs: nsToMs(r.maxLatencyNs),
|
|
3325
|
-
minLatencyMs: nsToMs(r.minLatencyNs),
|
|
3326
|
-
avgProcessTimeMs: nsToMs(r.avgProcessTimeNs),
|
|
3327
|
-
avgWaitTimeMs: nsToMs(r.avgWaitTimeNs),
|
|
3328
|
-
avgBackoffTimeMs: nsToMs(r.avgBackoffTimeNs),
|
|
3329
|
-
avgMemMB: bytesToMB(r.avgMemBytes),
|
|
3330
|
-
maxMemMB: bytesToMB(r.maxMemBytes),
|
|
3331
|
-
avgMemBytes: r.avgMemBytes,
|
|
3332
|
-
maxMemBytes: r.maxMemBytes,
|
|
3333
|
-
avgTotalKeys: r.avgTotalKeys,
|
|
3334
|
-
firstSeen: r.firstSeen,
|
|
3335
|
-
lastSeen: r.lastSeen,
|
|
3336
|
-
planInCache: r.planInCache,
|
|
3337
|
-
planCacheHits: r.planCacheHits,
|
|
3338
|
-
digestText: r.digestText,
|
|
3339
|
-
plan: r.plan
|
|
3340
|
-
}));
|
|
3341
|
-
for (const f of formatted) {
|
|
3597
|
+
return await Promise.race([
|
|
3598
|
+
promise,
|
|
3599
|
+
new Promise((_resolve, reject) => {
|
|
3600
|
+
timer = setTimeout(() => reject(new Error(`TIMEOUT:${ms}`)), ms);
|
|
3601
|
+
})
|
|
3602
|
+
]);
|
|
3603
|
+
} finally {
|
|
3604
|
+
if (timer) clearTimeout(timer);
|
|
3605
|
+
}
|
|
3606
|
+
};
|
|
3607
|
+
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
3608
|
+
const executeWithRetries = async (task, label) => {
|
|
3609
|
+
let attempt = 0;
|
|
3610
|
+
let delay = RETRY_BASE_DELAY_MS;
|
|
3611
|
+
while (true) {
|
|
3612
|
+
try {
|
|
3613
|
+
attempt++;
|
|
3614
|
+
return await task();
|
|
3615
|
+
} catch (error) {
|
|
3616
|
+
const msg = String(error?.message ?? error);
|
|
3617
|
+
const isTimeout = msg.startsWith("TIMEOUT:");
|
|
3618
|
+
if (attempt > RETRY_ATTEMPTS) throw error;
|
|
3342
3619
|
console.warn(
|
|
3343
|
-
`${
|
|
3344
|
-
|
|
3345
|
-
sql=${(f.digestText || "").slice(0, 300)}${f.digestText && f.digestText.length > 300 ? "…" : ""}`
|
|
3620
|
+
`${label}: attempt ${attempt} failed${isTimeout ? " (timeout)" : ""}; retrying in ${delay}ms...`,
|
|
3621
|
+
error
|
|
3346
3622
|
);
|
|
3347
|
-
|
|
3348
|
-
|
|
3349
|
-
${f.plan}`);
|
|
3350
|
-
}
|
|
3623
|
+
await sleep(delay);
|
|
3624
|
+
delay *= 2;
|
|
3351
3625
|
}
|
|
3352
|
-
|
|
3353
|
-
|
|
3354
|
-
|
|
3355
|
-
|
|
3356
|
-
|
|
3357
|
-
|
|
3358
|
-
|
|
3359
|
-
|
|
3360
|
-
|
|
3361
|
-
|
|
3362
|
-
|
|
3363
|
-
|
|
3364
|
-
|
|
3365
|
-
|
|
3626
|
+
}
|
|
3627
|
+
};
|
|
3628
|
+
const createErrorResponse = (message, error) => ({
|
|
3629
|
+
headers: { "Content-Type": ["application/json"] },
|
|
3630
|
+
statusCode: 500,
|
|
3631
|
+
statusText: "Internal Server Error",
|
|
3632
|
+
body: jsonSafeStringify({
|
|
3633
|
+
success: false,
|
|
3634
|
+
message,
|
|
3635
|
+
error: error?.cause?.context?.debug?.sqlMessage ?? error?.cause?.message ?? error?.message,
|
|
3636
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
3637
|
+
})
|
|
3638
|
+
});
|
|
3639
|
+
const createSuccessResponse = (formatted, options) => ({
|
|
3640
|
+
headers: { "Content-Type": ["application/json"] },
|
|
3641
|
+
statusCode: 200,
|
|
3642
|
+
statusText: "OK",
|
|
3643
|
+
body: jsonSafeStringify({
|
|
3644
|
+
success: true,
|
|
3645
|
+
window: `last_${options.hours}h`,
|
|
3646
|
+
top: options.topN,
|
|
3647
|
+
warnThresholdMs: options.warnThresholdMs,
|
|
3648
|
+
memoryThresholdBytes: options.memoryThresholdBytes,
|
|
3649
|
+
showPlan: options.showPlan,
|
|
3650
|
+
rows: formatted,
|
|
3651
|
+
generatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
3652
|
+
})
|
|
3653
|
+
});
|
|
3654
|
+
const createSelectShape = (table2) => ({
|
|
3655
|
+
digest: table2.digest,
|
|
3656
|
+
stmtType: table2.stmtType,
|
|
3657
|
+
schemaName: table2.schemaName,
|
|
3658
|
+
execCount: table2.execCount,
|
|
3659
|
+
avgLatencyNs: table2.avgLatency,
|
|
3660
|
+
maxLatencyNs: table2.maxLatency,
|
|
3661
|
+
minLatencyNs: table2.minLatency,
|
|
3662
|
+
avgProcessTimeNs: table2.avgProcessTime,
|
|
3663
|
+
avgWaitTimeNs: table2.avgWaitTime,
|
|
3664
|
+
avgBackoffTimeNs: table2.avgBackoffTime,
|
|
3665
|
+
avgTotalKeys: table2.avgTotalKeys,
|
|
3666
|
+
firstSeen: table2.firstSeen,
|
|
3667
|
+
lastSeen: table2.lastSeen,
|
|
3668
|
+
planInCache: table2.planInCache,
|
|
3669
|
+
planCacheHits: table2.planCacheHits,
|
|
3670
|
+
digestText: table2.digestText,
|
|
3671
|
+
plan: table2.plan,
|
|
3672
|
+
avgMemBytes: table2.avgMem,
|
|
3673
|
+
maxMemBytes: table2.maxMem
|
|
3674
|
+
});
|
|
3675
|
+
const buildCombinedQuery = (orm, options) => {
|
|
3676
|
+
const summaryHistory = statementsSummary;
|
|
3677
|
+
const summary = statementsSummary;
|
|
3678
|
+
const summaryHistoryCluster = clusterStatementsSummaryHistory;
|
|
3679
|
+
const summaryCluster = clusterStatementsSummary;
|
|
3680
|
+
const lastHoursFilter = (table2) => drizzleOrm.gte(table2.summaryEndTime, drizzleOrm.sql`DATE_SUB(NOW(), INTERVAL ${options.hours} HOUR)`);
|
|
3681
|
+
const qHistory = orm.getDrizzleQueryBuilder().select(createSelectShape(summaryHistory)).from(summaryHistory).where(lastHoursFilter(summaryHistory));
|
|
3682
|
+
const qSummary = orm.getDrizzleQueryBuilder().select(createSelectShape(summary)).from(summary).where(lastHoursFilter(summary));
|
|
3683
|
+
const qHistoryCluster = orm.getDrizzleQueryBuilder().select(createSelectShape(summaryHistoryCluster)).from(summaryHistoryCluster).where(lastHoursFilter(summaryHistoryCluster));
|
|
3684
|
+
const qSummaryCluster = orm.getDrizzleQueryBuilder().select(createSelectShape(summaryCluster)).from(summaryCluster).where(lastHoursFilter(summaryCluster));
|
|
3685
|
+
switch (options.tables) {
|
|
3686
|
+
case "SUMMARY_AND_HISTORY":
|
|
3687
|
+
return mysqlCore.unionAll(qHistory, qSummary).as("combined");
|
|
3688
|
+
case "CLUSTER_SUMMARY_AND_HISTORY":
|
|
3689
|
+
return mysqlCore.unionAll(qHistoryCluster, qSummaryCluster).as("combined");
|
|
3690
|
+
default:
|
|
3691
|
+
throw new Error(`Unsupported table configuration: ${options.tables}`);
|
|
3692
|
+
}
|
|
3693
|
+
};
|
|
3694
|
+
const buildGroupedQuery = (orm, combined) => {
|
|
3695
|
+
return orm.getDrizzleQueryBuilder().select({
|
|
3696
|
+
digest: combined.digest,
|
|
3697
|
+
stmtType: combined.stmtType,
|
|
3698
|
+
schemaName: combined.schemaName,
|
|
3699
|
+
execCount: drizzleOrm.sql`SUM(${combined.execCount})`.as("execCount"),
|
|
3700
|
+
avgLatencyNs: drizzleOrm.sql`MAX(${combined.avgLatencyNs})`.as("avgLatencyNs"),
|
|
3701
|
+
maxLatencyNs: drizzleOrm.sql`MAX(${combined.maxLatencyNs})`.as("maxLatencyNs"),
|
|
3702
|
+
minLatencyNs: drizzleOrm.sql`MIN(${combined.minLatencyNs})`.as("minLatencyNs"),
|
|
3703
|
+
avgProcessTimeNs: drizzleOrm.sql`MAX(${combined.avgProcessTimeNs})`.as("avgProcessTimeNs"),
|
|
3704
|
+
avgWaitTimeNs: drizzleOrm.sql`MAX(${combined.avgWaitTimeNs})`.as("avgWaitTimeNs"),
|
|
3705
|
+
avgBackoffTimeNs: drizzleOrm.sql`MAX(${combined.avgBackoffTimeNs})`.as("avgBackoffTimeNs"),
|
|
3706
|
+
avgMemBytes: drizzleOrm.sql`MAX(${combined.avgMemBytes})`.as("avgMemBytes"),
|
|
3707
|
+
maxMemBytes: drizzleOrm.sql`MAX(${combined.maxMemBytes})`.as("maxMemBytes"),
|
|
3708
|
+
avgTotalKeys: drizzleOrm.sql`MAX(${combined.avgTotalKeys})`.as("avgTotalKeys"),
|
|
3709
|
+
firstSeen: drizzleOrm.sql`MIN(${combined.firstSeen})`.as("firstSeen"),
|
|
3710
|
+
lastSeen: drizzleOrm.sql`MAX(${combined.lastSeen})`.as("lastSeen"),
|
|
3711
|
+
planInCache: drizzleOrm.sql`MAX(${combined.planInCache})`.as("planInCache"),
|
|
3712
|
+
planCacheHits: drizzleOrm.sql`SUM(${combined.planCacheHits})`.as("planCacheHits"),
|
|
3713
|
+
digestText: drizzleOrm.sql`MAX(${combined.digestText})`.as("digestText"),
|
|
3714
|
+
plan: drizzleOrm.sql`MAX(${combined.plan})`.as("plan")
|
|
3715
|
+
}).from(combined).where(
|
|
3716
|
+
drizzleOrm.sql`COALESCE(${combined.digest}, '') <> '' AND COALESCE(${combined.digestText}, '') <> '' AND COALESCE(${combined.stmtType}, '') NOT IN ('Use','Set','Show')`
|
|
3717
|
+
).groupBy(combined.digest, combined.stmtType, combined.schemaName).as("grouped");
|
|
3718
|
+
};
|
|
3719
|
+
const buildFinalQuery = (orm, grouped, options) => {
|
|
3720
|
+
const thresholdNs = Math.floor(options.warnThresholdMs * 1e6);
|
|
3721
|
+
const memoryThresholdBytes = options.memoryThresholdBytes;
|
|
3722
|
+
const query = orm.getDrizzleQueryBuilder().select({
|
|
3723
|
+
digest: grouped.digest,
|
|
3724
|
+
stmtType: grouped.stmtType,
|
|
3725
|
+
schemaName: grouped.schemaName,
|
|
3726
|
+
execCount: grouped.execCount,
|
|
3727
|
+
avgLatencyNs: grouped.avgLatencyNs,
|
|
3728
|
+
maxLatencyNs: grouped.maxLatencyNs,
|
|
3729
|
+
minLatencyNs: grouped.minLatencyNs,
|
|
3730
|
+
avgProcessTimeNs: grouped.avgProcessTimeNs,
|
|
3731
|
+
avgWaitTimeNs: grouped.avgWaitTimeNs,
|
|
3732
|
+
avgBackoffTimeNs: grouped.avgBackoffTimeNs,
|
|
3733
|
+
avgMemBytes: grouped.avgMemBytes,
|
|
3734
|
+
maxMemBytes: grouped.maxMemBytes,
|
|
3735
|
+
avgTotalKeys: grouped.avgTotalKeys,
|
|
3736
|
+
firstSeen: grouped.firstSeen,
|
|
3737
|
+
lastSeen: grouped.lastSeen,
|
|
3738
|
+
planInCache: grouped.planInCache,
|
|
3739
|
+
planCacheHits: grouped.planCacheHits,
|
|
3740
|
+
digestText: grouped.digestText,
|
|
3741
|
+
plan: grouped.plan
|
|
3742
|
+
}).from(grouped).where(
|
|
3743
|
+
drizzleOrm.sql`${grouped.avgLatencyNs} > ${thresholdNs} OR ${grouped.avgMemBytes} > ${memoryThresholdBytes}`
|
|
3744
|
+
).orderBy(drizzleOrm.desc(grouped.avgLatencyNs)).limit(formatLimitOffset(options.topN));
|
|
3745
|
+
if (options.operationType === "DDL") {
|
|
3746
|
+
return orm.executeDDLActions(async () => await query);
|
|
3747
|
+
}
|
|
3748
|
+
return query;
|
|
3749
|
+
};
|
|
3750
|
+
const formatQueryResults = (rows, options) => {
|
|
3751
|
+
return rows.map((row, index) => ({
|
|
3752
|
+
rank: index + 1,
|
|
3753
|
+
digest: row.digest,
|
|
3754
|
+
stmtType: row.stmtType,
|
|
3755
|
+
schemaName: row.schemaName,
|
|
3756
|
+
execCount: row.execCount,
|
|
3757
|
+
avgLatencyMs: nsToMs(row.avgLatencyNs),
|
|
3758
|
+
maxLatencyMs: nsToMs(row.maxLatencyNs),
|
|
3759
|
+
minLatencyMs: nsToMs(row.minLatencyNs),
|
|
3760
|
+
avgProcessTimeMs: nsToMs(row.avgProcessTimeNs),
|
|
3761
|
+
avgWaitTimeMs: nsToMs(row.avgWaitTimeNs),
|
|
3762
|
+
avgBackoffTimeMs: nsToMs(row.avgBackoffTimeNs),
|
|
3763
|
+
avgMemMB: bytesToMB(row.avgMemBytes),
|
|
3764
|
+
maxMemMB: bytesToMB(row.maxMemBytes),
|
|
3765
|
+
avgMemBytes: row.avgMemBytes,
|
|
3766
|
+
maxMemBytes: row.maxMemBytes,
|
|
3767
|
+
avgTotalKeys: row.avgTotalKeys,
|
|
3768
|
+
firstSeen: row.firstSeen,
|
|
3769
|
+
lastSeen: row.lastSeen,
|
|
3770
|
+
planInCache: row.planInCache,
|
|
3771
|
+
planCacheHits: row.planCacheHits,
|
|
3772
|
+
digestText: options.operationType === "DDL" ? row.digestText : sanitizeSQL(row.digestText),
|
|
3773
|
+
plan: options.showPlan ? row.plan : void 0
|
|
3774
|
+
}));
|
|
3775
|
+
};
|
|
3776
|
+
const logQueryResults = (formatted, options) => {
|
|
3777
|
+
for (const result of formatted) {
|
|
3778
|
+
console.warn(
|
|
3779
|
+
`${result.rank}. ${result.stmtType} avg=${result.avgLatencyMs?.toFixed?.(2)}ms max=${result.maxLatencyMs?.toFixed?.(2)}ms mem≈${result.avgMemMB?.toFixed?.(2)}MB(max ${result.maxMemMB?.toFixed?.(2)}MB) exec=${result.execCount}
|
|
3780
|
+
digest=${result.digest}
|
|
3781
|
+
sql=${(result.digestText || "").slice(0, 300)}${result.digestText && result.digestText.length > 300 ? "…" : ""}`
|
|
3782
|
+
);
|
|
3783
|
+
if (options.showPlan && result.plan) {
|
|
3784
|
+
console.warn(` full plan:
|
|
3785
|
+
${result.plan}`);
|
|
3786
|
+
}
|
|
3787
|
+
}
|
|
3788
|
+
};
|
|
3789
|
+
const topSlowestStatementLastHourTrigger = async (orm, options) => {
|
|
3790
|
+
if (!orm) {
|
|
3791
|
+
return createErrorResponse("ORM instance is required");
|
|
3792
|
+
}
|
|
3793
|
+
const mergedOptions = {
|
|
3794
|
+
warnThresholdMs: options?.warnThresholdMs ?? DEFAULT_TIMEOUT,
|
|
3795
|
+
memoryThresholdBytes: options?.memoryThresholdBytes ?? DEFAULT_MEMORY_THRESHOLD,
|
|
3796
|
+
showPlan: options?.showPlan ?? false,
|
|
3797
|
+
operationType: options?.operationType ?? "DML",
|
|
3798
|
+
topN: options?.topN ?? DEFAULT_TOP_N,
|
|
3799
|
+
hours: options?.hours ?? DEFAULT_HOURS,
|
|
3800
|
+
tables: options?.tables ?? DEFAULT_TABLES
|
|
3801
|
+
};
|
|
3802
|
+
try {
|
|
3803
|
+
const combined = buildCombinedQuery(orm, mergedOptions);
|
|
3804
|
+
const grouped = buildGroupedQuery(orm, combined);
|
|
3805
|
+
const finalQuery = buildFinalQuery(orm, grouped, mergedOptions);
|
|
3806
|
+
const rows = await executeWithRetries(
|
|
3807
|
+
() => withTimeout(finalQuery, MAX_QUERY_TIMEOUT_MS),
|
|
3808
|
+
"topSlowestStatementLastHourTrigger"
|
|
3809
|
+
);
|
|
3810
|
+
const formatted = formatQueryResults(rows, mergedOptions);
|
|
3811
|
+
logQueryResults(formatted, mergedOptions);
|
|
3812
|
+
return createSuccessResponse(formatted, mergedOptions);
|
|
3366
3813
|
} catch (error) {
|
|
3367
|
-
console.
|
|
3368
|
-
"Error in topSlowestStatementLastHourTrigger:",
|
|
3814
|
+
console.warn(
|
|
3815
|
+
"Error in topSlowestStatementLastHourTrigger (one-off errors can be ignored; if it recurs, investigate):",
|
|
3369
3816
|
error?.cause?.context?.debug?.sqlMessage ?? error?.cause ?? error
|
|
3370
3817
|
);
|
|
3371
|
-
return
|
|
3372
|
-
headers: { "Content-Type": ["application/json"] },
|
|
3373
|
-
statusCode: 500,
|
|
3374
|
-
statusText: "Internal Server Error",
|
|
3375
|
-
body: jsonSafeStringify({
|
|
3376
|
-
success: false,
|
|
3377
|
-
message: "Failed to fetch or log slow queries",
|
|
3378
|
-
error: error?.cause?.context?.debug?.sqlMessage ?? error?.cause?.message,
|
|
3379
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
3380
|
-
})
|
|
3381
|
-
};
|
|
3818
|
+
return createErrorResponse("Failed to fetch or log slow queries", error);
|
|
3382
3819
|
}
|
|
3383
3820
|
};
|
|
3384
3821
|
const getHttpResponse = (statusCode, body) => {
|
|
@@ -3419,6 +3856,7 @@ exports.getHttpResponse = getHttpResponse;
|
|
|
3419
3856
|
exports.getPrimaryKeys = getPrimaryKeys;
|
|
3420
3857
|
exports.getTableMetadata = getTableMetadata;
|
|
3421
3858
|
exports.getTables = getTables;
|
|
3859
|
+
exports.isUpdateQueryResponse = isUpdateQueryResponse;
|
|
3422
3860
|
exports.mapSelectAllFieldsToAlias = mapSelectAllFieldsToAlias;
|
|
3423
3861
|
exports.mapSelectFieldsWithAlias = mapSelectFieldsWithAlias;
|
|
3424
3862
|
exports.migrations = migrations;
|
|
@@ -3426,5 +3864,7 @@ exports.nextVal = nextVal;
|
|
|
3426
3864
|
exports.parseDateTime = parseDateTime;
|
|
3427
3865
|
exports.patchDbWithSelectAliased = patchDbWithSelectAliased;
|
|
3428
3866
|
exports.slowQuery = slowQuery;
|
|
3867
|
+
exports.statementsSummary = statementsSummary;
|
|
3868
|
+
exports.statementsSummaryHistory = statementsSummaryHistory;
|
|
3429
3869
|
exports.topSlowestStatementLastHourTrigger = topSlowestStatementLastHourTrigger;
|
|
3430
3870
|
//# sourceMappingURL=ForgeSQLORM.js.map
|