forge-sql-orm 2.1.4 → 2.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +195 -27
- package/dist/ForgeSQLORM.js +632 -192
- package/dist/ForgeSQLORM.js.map +1 -1
- package/dist/ForgeSQLORM.mjs +632 -192
- package/dist/ForgeSQLORM.mjs.map +1 -1
- package/dist/core/ForgeSQLCrudOperations.d.ts.map +1 -1
- package/dist/core/ForgeSQLORM.d.ts +114 -3
- package/dist/core/ForgeSQLORM.d.ts.map +1 -1
- package/dist/core/ForgeSQLQueryBuilder.d.ts +125 -7
- package/dist/core/ForgeSQLQueryBuilder.d.ts.map +1 -1
- package/dist/core/ForgeSQLSelectOperations.d.ts.map +1 -1
- package/dist/core/SystemTables.d.ts +3654 -0
- package/dist/core/SystemTables.d.ts.map +1 -1
- package/dist/lib/drizzle/extensions/additionalActions.d.ts +2 -2
- package/dist/lib/drizzle/extensions/additionalActions.d.ts.map +1 -1
- package/dist/utils/cacheContextUtils.d.ts.map +1 -1
- package/dist/utils/cacheUtils.d.ts.map +1 -1
- package/dist/utils/forgeDriver.d.ts +71 -3
- package/dist/utils/forgeDriver.d.ts.map +1 -1
- package/dist/utils/forgeDriverProxy.d.ts.map +1 -1
- package/dist/utils/metadataContextUtils.d.ts +11 -0
- package/dist/utils/metadataContextUtils.d.ts.map +1 -0
- package/dist/utils/requestTypeContextUtils.d.ts +8 -0
- package/dist/utils/requestTypeContextUtils.d.ts.map +1 -0
- package/dist/utils/sqlUtils.d.ts.map +1 -1
- package/dist/webtriggers/applyMigrationsWebTrigger.d.ts.map +1 -1
- package/dist/webtriggers/clearCacheSchedulerTrigger.d.ts.map +1 -1
- package/dist/webtriggers/dropMigrationWebTrigger.d.ts.map +1 -1
- package/dist/webtriggers/dropTablesMigrationWebTrigger.d.ts.map +1 -1
- package/dist/webtriggers/fetchSchemaWebTrigger.d.ts.map +1 -1
- package/dist/webtriggers/topSlowestStatementLastHourTrigger.d.ts +85 -43
- package/dist/webtriggers/topSlowestStatementLastHourTrigger.d.ts.map +1 -1
- package/package.json +9 -9
- package/src/core/ForgeSQLCrudOperations.ts +3 -0
- package/src/core/ForgeSQLORM.ts +287 -9
- package/src/core/ForgeSQLQueryBuilder.ts +138 -8
- package/src/core/ForgeSQLSelectOperations.ts +2 -0
- package/src/core/SystemTables.ts +16 -0
- package/src/lib/drizzle/extensions/additionalActions.ts +10 -12
- package/src/utils/cacheContextUtils.ts +4 -2
- package/src/utils/cacheUtils.ts +20 -8
- package/src/utils/forgeDriver.ts +223 -23
- package/src/utils/forgeDriverProxy.ts +2 -0
- package/src/utils/metadataContextUtils.ts +22 -0
- package/src/utils/requestTypeContextUtils.ts +11 -0
- package/src/utils/sqlUtils.ts +1 -0
- package/src/webtriggers/applyMigrationsWebTrigger.ts +9 -6
- package/src/webtriggers/clearCacheSchedulerTrigger.ts +1 -0
- package/src/webtriggers/dropMigrationWebTrigger.ts +2 -0
- package/src/webtriggers/dropTablesMigrationWebTrigger.ts +2 -0
- package/src/webtriggers/fetchSchemaWebTrigger.ts +1 -0
- package/src/webtriggers/topSlowestStatementLastHourTrigger.ts +515 -257
package/dist/ForgeSQLORM.mjs
CHANGED
|
@@ -391,7 +391,7 @@ async function clearCursorCache(tables, cursor, options) {
|
|
|
391
391
|
entityQueryBuilder = entityQueryBuilder.cursor(cursor);
|
|
392
392
|
}
|
|
393
393
|
const listResult = await entityQueryBuilder.limit(100).getMany();
|
|
394
|
-
if (options.
|
|
394
|
+
if (options.logCache) {
|
|
395
395
|
console.warn(`clear cache Records: ${JSON.stringify(listResult.results.map((r) => r.key))}`);
|
|
396
396
|
}
|
|
397
397
|
await deleteCacheEntriesInBatches(listResult.results, cacheEntityName);
|
|
@@ -412,7 +412,7 @@ async function clearExpirationCursorCache(cursor, options) {
|
|
|
412
412
|
entityQueryBuilder = entityQueryBuilder.cursor(cursor);
|
|
413
413
|
}
|
|
414
414
|
const listResult = await entityQueryBuilder.limit(100).getMany();
|
|
415
|
-
if (options.
|
|
415
|
+
if (options.logCache) {
|
|
416
416
|
console.warn(`clear expired Records: ${JSON.stringify(listResult.results.map((r) => r.key))}`);
|
|
417
417
|
}
|
|
418
418
|
await deleteCacheEntriesInBatches(listResult.results, cacheEntityName);
|
|
@@ -461,7 +461,7 @@ async function clearTablesCache(tables, options) {
|
|
|
461
461
|
"clearing cache"
|
|
462
462
|
);
|
|
463
463
|
} finally {
|
|
464
|
-
if (options.
|
|
464
|
+
if (options.logCache) {
|
|
465
465
|
const duration = DateTime.now().toSeconds() - startTime.toSeconds();
|
|
466
466
|
console.info(`Cleared ${totalRecords} cache records in ${duration} seconds`);
|
|
467
467
|
}
|
|
@@ -480,7 +480,7 @@ async function clearExpiredCache(options) {
|
|
|
480
480
|
);
|
|
481
481
|
} finally {
|
|
482
482
|
const duration = DateTime.now().toSeconds() - startTime.toSeconds();
|
|
483
|
-
if (options?.
|
|
483
|
+
if (options?.logCache) {
|
|
484
484
|
console.debug(`Cleared ${totalRecords} expired cache records in ${duration} seconds`);
|
|
485
485
|
}
|
|
486
486
|
}
|
|
@@ -495,7 +495,7 @@ async function getFromCache(query, options) {
|
|
|
495
495
|
const sqlQuery = query.toSQL();
|
|
496
496
|
const key = hashKey(sqlQuery);
|
|
497
497
|
if (await isTableContainsTableInCacheContext(sqlQuery.sql, options)) {
|
|
498
|
-
if (options.
|
|
498
|
+
if (options.logCache) {
|
|
499
499
|
console.warn(`Context contains value to clear. Skip getting from cache`);
|
|
500
500
|
}
|
|
501
501
|
return void 0;
|
|
@@ -503,7 +503,7 @@ async function getFromCache(query, options) {
|
|
|
503
503
|
try {
|
|
504
504
|
const cacheResult = await kvs.entity(options.cacheEntityName).get(key);
|
|
505
505
|
if (cacheResult && cacheResult[expirationName] >= getCurrentTime() && sqlQuery.sql.toLowerCase() === cacheResult[entityQueryName]) {
|
|
506
|
-
if (options.
|
|
506
|
+
if (options.logCache) {
|
|
507
507
|
console.warn(`Get value from cache, cacheKey: ${key}`);
|
|
508
508
|
}
|
|
509
509
|
const results = cacheResult[dataName];
|
|
@@ -524,7 +524,7 @@ async function setCacheResult(query, options, results, cacheTtl) {
|
|
|
524
524
|
const dataName = options.cacheEntityDataName ?? CACHE_CONSTANTS.DEFAULT_DATA_NAME;
|
|
525
525
|
const sqlQuery = query.toSQL();
|
|
526
526
|
if (await isTableContainsTableInCacheContext(sqlQuery.sql, options)) {
|
|
527
|
-
if (options.
|
|
527
|
+
if (options.logCache) {
|
|
528
528
|
console.warn(`Context contains value to clear. Skip setting from cache`);
|
|
529
529
|
}
|
|
530
530
|
return;
|
|
@@ -539,7 +539,7 @@ async function setCacheResult(query, options, results, cacheTtl) {
|
|
|
539
539
|
},
|
|
540
540
|
{ entityName: options.cacheEntityName }
|
|
541
541
|
).execute();
|
|
542
|
-
if (options.
|
|
542
|
+
if (options.logCache) {
|
|
543
543
|
console.warn(`Store value to cache, cacheKey: ${key}`);
|
|
544
544
|
}
|
|
545
545
|
} catch (error) {
|
|
@@ -567,7 +567,7 @@ async function saveQueryLocalCacheQuery(query, rows, options) {
|
|
|
567
567
|
sql: sql2.toSQL().sql.toLowerCase(),
|
|
568
568
|
data: rows
|
|
569
569
|
};
|
|
570
|
-
if (options.
|
|
570
|
+
if (options.logCache) {
|
|
571
571
|
const q = sql2.toSQL();
|
|
572
572
|
console.debug(
|
|
573
573
|
`[forge-sql-orm][local-cache][SAVE] Stored result in cache. sql="${q.sql}", params=${JSON.stringify(q.params)}`
|
|
@@ -584,7 +584,7 @@ async function getQueryLocalCacheQuery(query, options) {
|
|
|
584
584
|
const sql2 = query;
|
|
585
585
|
const key = hashKey(sql2.toSQL());
|
|
586
586
|
if (context.cache[key] && context.cache[key].sql === sql2.toSQL().sql.toLowerCase()) {
|
|
587
|
-
if (options.
|
|
587
|
+
if (options.logCache) {
|
|
588
588
|
const q = sql2.toSQL();
|
|
589
589
|
console.debug(
|
|
590
590
|
`[forge-sql-orm][local-cache][HIT] Returned cached result. sql="${q.sql}", params=${JSON.stringify(q.params)}`
|
|
@@ -1015,25 +1015,113 @@ class ForgeSQLSelectOperations {
|
|
|
1015
1015
|
return updateQueryResponseResults.rows;
|
|
1016
1016
|
}
|
|
1017
1017
|
}
|
|
1018
|
-
const
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1018
|
+
const metadataQueryContext = new AsyncLocalStorage();
|
|
1019
|
+
async function saveMetaDataToContext(metadata) {
|
|
1020
|
+
const context = metadataQueryContext.getStore();
|
|
1021
|
+
if (context && metadata) {
|
|
1022
|
+
context.totalResponseSize += metadata.responseSize;
|
|
1023
|
+
context.totalDbExecutionTime += metadata.dbExecutionTime;
|
|
1024
|
+
context.lastMetadata = metadata;
|
|
1025
|
+
}
|
|
1026
|
+
}
|
|
1027
|
+
async function getLastestMetadata() {
|
|
1028
|
+
return metadataQueryContext.getStore();
|
|
1029
|
+
}
|
|
1030
|
+
const operationTypeQueryContext = new AsyncLocalStorage();
|
|
1031
|
+
async function getOperationType() {
|
|
1032
|
+
return operationTypeQueryContext.getStore()?.operationType ?? "DML";
|
|
1033
|
+
}
|
|
1034
|
+
function isUpdateQueryResponse(obj) {
|
|
1035
|
+
return obj !== null && typeof obj === "object" && typeof obj.affectedRows === "number" && typeof obj.insertId === "number";
|
|
1036
|
+
}
|
|
1037
|
+
async function withTimeout$1(promise, timeoutMs = 1e4) {
|
|
1038
|
+
let timeoutId;
|
|
1039
|
+
const timeoutPromise = new Promise((_, reject) => {
|
|
1040
|
+
timeoutId = setTimeout(() => {
|
|
1041
|
+
reject(
|
|
1042
|
+
new Error(
|
|
1043
|
+
`Atlassian @forge/sql did not return a response within ${timeoutMs}ms (${timeoutMs / 1e3} seconds), so the request is blocked. Possible causes: slow query, network issues, or exceeding Forge SQL limits.`
|
|
1044
|
+
)
|
|
1045
|
+
);
|
|
1046
|
+
}, timeoutMs);
|
|
1047
|
+
});
|
|
1048
|
+
try {
|
|
1049
|
+
return await Promise.race([promise, timeoutPromise]);
|
|
1050
|
+
} finally {
|
|
1051
|
+
if (timeoutId) {
|
|
1052
|
+
clearTimeout(timeoutId);
|
|
1023
1053
|
}
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1054
|
+
}
|
|
1055
|
+
}
|
|
1056
|
+
function inlineParams(sql2, params) {
|
|
1057
|
+
let i = 0;
|
|
1058
|
+
return sql2.replace(/\?/g, () => {
|
|
1059
|
+
const val = params[i++];
|
|
1060
|
+
if (val === null) return "NULL";
|
|
1061
|
+
if (typeof val === "number") return val.toString();
|
|
1062
|
+
return `'${String(val).replace(/'/g, "''")}'`;
|
|
1063
|
+
});
|
|
1064
|
+
}
|
|
1065
|
+
async function processDDLResult(method, result) {
|
|
1066
|
+
if (result.metadata) {
|
|
1067
|
+
await saveMetaDataToContext(result.metadata);
|
|
1068
|
+
}
|
|
1069
|
+
if (!result.rows) {
|
|
1070
|
+
return { rows: [] };
|
|
1071
|
+
}
|
|
1072
|
+
if (isUpdateQueryResponse(result.rows)) {
|
|
1073
|
+
const oneRow = result.rows;
|
|
1074
|
+
return { ...oneRow, rows: [oneRow] };
|
|
1075
|
+
}
|
|
1076
|
+
if (Array.isArray(result.rows)) {
|
|
1077
|
+
if (method === "execute") {
|
|
1078
|
+
return { rows: result.rows };
|
|
1079
|
+
} else {
|
|
1080
|
+
const rows = result.rows.map((r) => Object.values(r));
|
|
1081
|
+
return { rows };
|
|
1031
1082
|
}
|
|
1032
|
-
const result = await sqlStatement.execute();
|
|
1033
|
-
let rows;
|
|
1034
|
-
rows = result.rows.map((r) => Object.values(r));
|
|
1035
|
-
return { rows };
|
|
1036
1083
|
}
|
|
1084
|
+
return { rows: [] };
|
|
1085
|
+
}
|
|
1086
|
+
async function processExecuteMethod(query, params) {
|
|
1087
|
+
const sqlStatement = sql$1.prepare(query);
|
|
1088
|
+
if (params) {
|
|
1089
|
+
sqlStatement.bindParams(...params);
|
|
1090
|
+
}
|
|
1091
|
+
const result = await withTimeout$1(sqlStatement.execute());
|
|
1092
|
+
await saveMetaDataToContext(result.metadata);
|
|
1093
|
+
if (!result.rows) {
|
|
1094
|
+
return { rows: [] };
|
|
1095
|
+
}
|
|
1096
|
+
if (isUpdateQueryResponse(result.rows)) {
|
|
1097
|
+
const oneRow = result.rows;
|
|
1098
|
+
return { ...oneRow, rows: [oneRow] };
|
|
1099
|
+
}
|
|
1100
|
+
return { rows: result.rows };
|
|
1101
|
+
}
|
|
1102
|
+
async function processAllMethod(query, params) {
|
|
1103
|
+
const sqlStatement = await sql$1.prepare(query);
|
|
1104
|
+
if (params) {
|
|
1105
|
+
await sqlStatement.bindParams(...params);
|
|
1106
|
+
}
|
|
1107
|
+
const result = await withTimeout$1(sqlStatement.execute());
|
|
1108
|
+
await saveMetaDataToContext(result.metadata);
|
|
1109
|
+
if (!result.rows) {
|
|
1110
|
+
return { rows: [] };
|
|
1111
|
+
}
|
|
1112
|
+
const rows = result.rows.map((r) => Object.values(r));
|
|
1113
|
+
return { rows };
|
|
1114
|
+
}
|
|
1115
|
+
const forgeDriver = async (query, params, method) => {
|
|
1116
|
+
const operationType = await getOperationType();
|
|
1117
|
+
if (operationType === "DDL") {
|
|
1118
|
+
const result = await withTimeout$1(sql$1.executeDDL(inlineParams(query, params)));
|
|
1119
|
+
return await processDDLResult(method, result);
|
|
1120
|
+
}
|
|
1121
|
+
if (method === "execute") {
|
|
1122
|
+
return await processExecuteMethod(query, params ?? []);
|
|
1123
|
+
}
|
|
1124
|
+
return await processAllMethod(query, params ?? []);
|
|
1037
1125
|
};
|
|
1038
1126
|
function injectSqlHints(query, hints) {
|
|
1039
1127
|
if (!hints) {
|
|
@@ -1275,10 +1363,8 @@ function createRawQueryExecutor(db, options, useGlobalCache = false) {
|
|
|
1275
1363
|
return async function(query, cacheTtl) {
|
|
1276
1364
|
let sql2;
|
|
1277
1365
|
if (isSQLWrapper(query)) {
|
|
1278
|
-
const
|
|
1279
|
-
sql2 =
|
|
1280
|
-
db.dialect
|
|
1281
|
-
);
|
|
1366
|
+
const dialect = db.dialect;
|
|
1367
|
+
sql2 = dialect.sqlToQuery(query);
|
|
1282
1368
|
} else {
|
|
1283
1369
|
sql2 = {
|
|
1284
1370
|
sql: query,
|
|
@@ -1779,6 +1865,7 @@ class ForgeSQLORMImpl {
|
|
|
1779
1865
|
try {
|
|
1780
1866
|
const newOptions = options ?? {
|
|
1781
1867
|
logRawSqlQuery: false,
|
|
1868
|
+
logCache: false,
|
|
1782
1869
|
disableOptimisticLocking: false,
|
|
1783
1870
|
cacheWrapTable: true,
|
|
1784
1871
|
cacheTTL: 120,
|
|
@@ -1804,6 +1891,49 @@ class ForgeSQLORMImpl {
|
|
|
1804
1891
|
throw error;
|
|
1805
1892
|
}
|
|
1806
1893
|
}
|
|
1894
|
+
/**
|
|
1895
|
+
* Executes a query and provides access to execution metadata.
|
|
1896
|
+
* This method allows you to capture detailed information about query execution
|
|
1897
|
+
* including database execution time, response size, and Forge SQL metadata.
|
|
1898
|
+
*
|
|
1899
|
+
* @template T - The return type of the query
|
|
1900
|
+
* @param query - A function that returns a Promise with the query result
|
|
1901
|
+
* @param onMetadata - Callback function that receives execution metadata
|
|
1902
|
+
* @returns Promise with the query result
|
|
1903
|
+
* @example
|
|
1904
|
+
* ```typescript
|
|
1905
|
+
* const result = await forgeSQL.executeWithMetadata(
|
|
1906
|
+
* async () => await forgeSQL.select().from(users).where(eq(users.id, 1)),
|
|
1907
|
+
* (dbTime, responseSize, metadata) => {
|
|
1908
|
+
* console.log(`DB execution time: ${dbTime}ms`);
|
|
1909
|
+
* console.log(`Response size: ${responseSize} bytes`);
|
|
1910
|
+
* console.log('Forge metadata:', metadata);
|
|
1911
|
+
* }
|
|
1912
|
+
* );
|
|
1913
|
+
* ```
|
|
1914
|
+
*/
|
|
1915
|
+
async executeWithMetadata(query, onMetadata) {
|
|
1916
|
+
return metadataQueryContext.run(
|
|
1917
|
+
{
|
|
1918
|
+
totalDbExecutionTime: 0,
|
|
1919
|
+
totalResponseSize: 0
|
|
1920
|
+
},
|
|
1921
|
+
async () => {
|
|
1922
|
+
try {
|
|
1923
|
+
return await query();
|
|
1924
|
+
} finally {
|
|
1925
|
+
const metadata = await getLastestMetadata();
|
|
1926
|
+
if (metadata && metadata.lastMetadata) {
|
|
1927
|
+
await onMetadata(
|
|
1928
|
+
metadata.totalDbExecutionTime,
|
|
1929
|
+
metadata.totalResponseSize,
|
|
1930
|
+
metadata.lastMetadata
|
|
1931
|
+
);
|
|
1932
|
+
}
|
|
1933
|
+
}
|
|
1934
|
+
}
|
|
1935
|
+
);
|
|
1936
|
+
}
|
|
1807
1937
|
/**
|
|
1808
1938
|
* Executes operations within a cache context that collects cache eviction events.
|
|
1809
1939
|
* All clearCache calls within the context are collected and executed in batch at the end.
|
|
@@ -2185,6 +2315,97 @@ class ForgeSQLORMImpl {
|
|
|
2185
2315
|
execute(query) {
|
|
2186
2316
|
return this.drizzle.executeQuery(query);
|
|
2187
2317
|
}
|
|
2318
|
+
/**
|
|
2319
|
+
* Executes a Data Definition Language (DDL) SQL query.
|
|
2320
|
+
* DDL operations include CREATE, ALTER, DROP, TRUNCATE, and other schema modification statements.
|
|
2321
|
+
*
|
|
2322
|
+
* This method is specifically designed for DDL operations and provides:
|
|
2323
|
+
* - Proper operation type context for DDL queries
|
|
2324
|
+
* - No caching (DDL operations should not be cached)
|
|
2325
|
+
* - Direct execution without query optimization
|
|
2326
|
+
*
|
|
2327
|
+
* @template T - The expected return type of the query result
|
|
2328
|
+
* @param query - The DDL SQL query to execute (SQLWrapper or string)
|
|
2329
|
+
* @returns Promise with query results
|
|
2330
|
+
* @throws {Error} If the DDL operation fails
|
|
2331
|
+
*
|
|
2332
|
+
* @example
|
|
2333
|
+
* ```typescript
|
|
2334
|
+
* // Create a new table
|
|
2335
|
+
* await forgeSQL.executeDDL(`
|
|
2336
|
+
* CREATE TABLE users (
|
|
2337
|
+
* id INT PRIMARY KEY AUTO_INCREMENT,
|
|
2338
|
+
* name VARCHAR(255) NOT NULL,
|
|
2339
|
+
* email VARCHAR(255) UNIQUE
|
|
2340
|
+
* )
|
|
2341
|
+
* `);
|
|
2342
|
+
*
|
|
2343
|
+
* // Alter table structure
|
|
2344
|
+
* await forgeSQL.executeDDL(sql`
|
|
2345
|
+
* ALTER TABLE users
|
|
2346
|
+
* ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
2347
|
+
* `);
|
|
2348
|
+
*
|
|
2349
|
+
* // Drop a table
|
|
2350
|
+
* await forgeSQL.executeDDL("DROP TABLE IF EXISTS old_users");
|
|
2351
|
+
* ```
|
|
2352
|
+
*/
|
|
2353
|
+
async executeDDL(query) {
|
|
2354
|
+
return this.executeDDLActions(async () => this.drizzle.executeQuery(query));
|
|
2355
|
+
}
|
|
2356
|
+
/**
|
|
2357
|
+
* Executes a series of actions within a DDL operation context.
|
|
2358
|
+
* This method provides a way to execute regular SQL queries that should be treated
|
|
2359
|
+
* as DDL operations, ensuring proper operation type context for performance monitoring.
|
|
2360
|
+
*
|
|
2361
|
+
* This method is useful for:
|
|
2362
|
+
* - Executing regular SQL queries in DDL context for monitoring purposes
|
|
2363
|
+
* - Wrapping non-DDL operations that should be treated as DDL for analysis
|
|
2364
|
+
* - Ensuring proper operation type context for complex workflows
|
|
2365
|
+
* - Maintaining DDL operation context across multiple function calls
|
|
2366
|
+
*
|
|
2367
|
+
* @template T - The return type of the actions function
|
|
2368
|
+
* @param actions - Function containing SQL operations to execute in DDL context
|
|
2369
|
+
* @returns Promise that resolves to the return value of the actions function
|
|
2370
|
+
*
|
|
2371
|
+
* @example
|
|
2372
|
+
* ```typescript
|
|
2373
|
+
* // Execute regular SQL queries in DDL context for monitoring
|
|
2374
|
+
* await forgeSQL.executeDDLActions(async () => {
|
|
2375
|
+
* const slowQueries = await forgeSQL.execute(`
|
|
2376
|
+
* SELECT * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
2377
|
+
* WHERE AVG_LATENCY > 1000000
|
|
2378
|
+
* `);
|
|
2379
|
+
* return slowQueries;
|
|
2380
|
+
* });
|
|
2381
|
+
*
|
|
2382
|
+
* // Execute complex analysis queries in DDL context
|
|
2383
|
+
* const result = await forgeSQL.executeDDLActions(async () => {
|
|
2384
|
+
* const tableInfo = await forgeSQL.execute("SHOW TABLES");
|
|
2385
|
+
* const performanceData = await forgeSQL.execute(`
|
|
2386
|
+
* SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
|
|
2387
|
+
* WHERE SUMMARY_END_TIME > DATE_SUB(NOW(), INTERVAL 1 HOUR)
|
|
2388
|
+
* `);
|
|
2389
|
+
* return { tableInfo, performanceData };
|
|
2390
|
+
* });
|
|
2391
|
+
*
|
|
2392
|
+
* // Execute monitoring queries with error handling
|
|
2393
|
+
* try {
|
|
2394
|
+
* await forgeSQL.executeDDLActions(async () => {
|
|
2395
|
+
* const metrics = await forgeSQL.execute(`
|
|
2396
|
+
* SELECT COUNT(*) as query_count
|
|
2397
|
+
* FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
2398
|
+
* `);
|
|
2399
|
+
* console.log(`Total queries: ${metrics[0].query_count}`);
|
|
2400
|
+
* });
|
|
2401
|
+
* } catch (error) {
|
|
2402
|
+
* console.error("Monitoring query failed:", error);
|
|
2403
|
+
* }
|
|
2404
|
+
* ```
|
|
2405
|
+
*/
|
|
2406
|
+
async executeDDLActions(actions) {
|
|
2407
|
+
return operationTypeQueryContext.run({ operationType: "DDL" }, async () => actions());
|
|
2408
|
+
}
|
|
2188
2409
|
/**
|
|
2189
2410
|
* Executes a raw SQL query with both local and global cache support.
|
|
2190
2411
|
* This method provides comprehensive caching for raw SQL queries:
|
|
@@ -2251,6 +2472,30 @@ class ForgeSQLORM {
|
|
|
2251
2472
|
constructor(options) {
|
|
2252
2473
|
this.ormInstance = ForgeSQLORMImpl.getInstance(options);
|
|
2253
2474
|
}
|
|
2475
|
+
/**
|
|
2476
|
+
* Executes a query and provides access to execution metadata.
|
|
2477
|
+
* This method allows you to capture detailed information about query execution
|
|
2478
|
+
* including database execution time, response size, and Forge SQL metadata.
|
|
2479
|
+
*
|
|
2480
|
+
* @template T - The return type of the query
|
|
2481
|
+
* @param query - A function that returns a Promise with the query result
|
|
2482
|
+
* @param onMetadata - Callback function that receives execution metadata
|
|
2483
|
+
* @returns Promise with the query result
|
|
2484
|
+
* @example
|
|
2485
|
+
* ```typescript
|
|
2486
|
+
* const result = await forgeSQL.executeWithMetadata(
|
|
2487
|
+
* async () => await forgeSQL.select().from(users).where(eq(users.id, 1)),
|
|
2488
|
+
* (dbTime, responseSize, metadata) => {
|
|
2489
|
+
* console.log(`DB execution time: ${dbTime}ms`);
|
|
2490
|
+
* console.log(`Response size: ${responseSize} bytes`);
|
|
2491
|
+
* console.log('Forge metadata:', metadata);
|
|
2492
|
+
* }
|
|
2493
|
+
* );
|
|
2494
|
+
* ```
|
|
2495
|
+
*/
|
|
2496
|
+
async executeWithMetadata(query, onMetadata) {
|
|
2497
|
+
return this.ormInstance.executeWithMetadata(query, onMetadata);
|
|
2498
|
+
}
|
|
2254
2499
|
selectCacheable(fields, cacheTTL) {
|
|
2255
2500
|
return this.ormInstance.selectCacheable(fields, cacheTTL);
|
|
2256
2501
|
}
|
|
@@ -2508,7 +2753,98 @@ class ForgeSQLORM {
|
|
|
2508
2753
|
* ```
|
|
2509
2754
|
*/
|
|
2510
2755
|
execute(query) {
|
|
2511
|
-
return this.ormInstance.
|
|
2756
|
+
return this.ormInstance.execute(query);
|
|
2757
|
+
}
|
|
2758
|
+
/**
|
|
2759
|
+
* Executes a Data Definition Language (DDL) SQL query.
|
|
2760
|
+
* DDL operations include CREATE, ALTER, DROP, TRUNCATE, and other schema modification statements.
|
|
2761
|
+
*
|
|
2762
|
+
* This method is specifically designed for DDL operations and provides:
|
|
2763
|
+
* - Proper operation type context for DDL queries
|
|
2764
|
+
* - No caching (DDL operations should not be cached)
|
|
2765
|
+
* - Direct execution without query optimization
|
|
2766
|
+
*
|
|
2767
|
+
* @template T - The expected return type of the query result
|
|
2768
|
+
* @param query - The DDL SQL query to execute (SQLWrapper or string)
|
|
2769
|
+
* @returns Promise with query results
|
|
2770
|
+
* @throws {Error} If the DDL operation fails
|
|
2771
|
+
*
|
|
2772
|
+
* @example
|
|
2773
|
+
* ```typescript
|
|
2774
|
+
* // Create a new table
|
|
2775
|
+
* await forgeSQL.executeDDL(`
|
|
2776
|
+
* CREATE TABLE users (
|
|
2777
|
+
* id INT PRIMARY KEY AUTO_INCREMENT,
|
|
2778
|
+
* name VARCHAR(255) NOT NULL,
|
|
2779
|
+
* email VARCHAR(255) UNIQUE
|
|
2780
|
+
* )
|
|
2781
|
+
* `);
|
|
2782
|
+
*
|
|
2783
|
+
* // Alter table structure
|
|
2784
|
+
* await forgeSQL.executeDDL(sql`
|
|
2785
|
+
* ALTER TABLE users
|
|
2786
|
+
* ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
2787
|
+
* `);
|
|
2788
|
+
*
|
|
2789
|
+
* // Drop a table
|
|
2790
|
+
* await forgeSQL.executeDDL("DROP TABLE IF EXISTS old_users");
|
|
2791
|
+
* ```
|
|
2792
|
+
*/
|
|
2793
|
+
executeDDL(query) {
|
|
2794
|
+
return this.ormInstance.executeDDL(query);
|
|
2795
|
+
}
|
|
2796
|
+
/**
|
|
2797
|
+
* Executes a series of actions within a DDL operation context.
|
|
2798
|
+
* This method provides a way to execute regular SQL queries that should be treated
|
|
2799
|
+
* as DDL operations, ensuring proper operation type context for performance monitoring.
|
|
2800
|
+
*
|
|
2801
|
+
* This method is useful for:
|
|
2802
|
+
* - Executing regular SQL queries in DDL context for monitoring purposes
|
|
2803
|
+
* - Wrapping non-DDL operations that should be treated as DDL for analysis
|
|
2804
|
+
* - Ensuring proper operation type context for complex workflows
|
|
2805
|
+
* - Maintaining DDL operation context across multiple function calls
|
|
2806
|
+
*
|
|
2807
|
+
* @template T - The return type of the actions function
|
|
2808
|
+
* @param actions - Function containing SQL operations to execute in DDL context
|
|
2809
|
+
* @returns Promise that resolves to the return value of the actions function
|
|
2810
|
+
*
|
|
2811
|
+
* @example
|
|
2812
|
+
* ```typescript
|
|
2813
|
+
* // Execute regular SQL queries in DDL context for monitoring
|
|
2814
|
+
* await forgeSQL.executeDDLActions(async () => {
|
|
2815
|
+
* const slowQueries = await forgeSQL.execute(`
|
|
2816
|
+
* SELECT * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
2817
|
+
* WHERE AVG_LATENCY > 1000000
|
|
2818
|
+
* `);
|
|
2819
|
+
* return slowQueries;
|
|
2820
|
+
* });
|
|
2821
|
+
*
|
|
2822
|
+
* // Execute complex analysis queries in DDL context
|
|
2823
|
+
* const result = await forgeSQL.executeDDLActions(async () => {
|
|
2824
|
+
* const tableInfo = await forgeSQL.execute("SHOW TABLES");
|
|
2825
|
+
* const performanceData = await forgeSQL.execute(`
|
|
2826
|
+
* SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
|
|
2827
|
+
* WHERE SUMMARY_END_TIME > DATE_SUB(NOW(), INTERVAL 1 HOUR)
|
|
2828
|
+
* `);
|
|
2829
|
+
* return { tableInfo, performanceData };
|
|
2830
|
+
* });
|
|
2831
|
+
*
|
|
2832
|
+
* // Execute monitoring queries with error handling
|
|
2833
|
+
* try {
|
|
2834
|
+
* await forgeSQL.executeDDLActions(async () => {
|
|
2835
|
+
* const metrics = await forgeSQL.execute(`
|
|
2836
|
+
* SELECT COUNT(*) as query_count
|
|
2837
|
+
* FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
2838
|
+
* `);
|
|
2839
|
+
* console.log(`Total queries: ${metrics[0].query_count}`);
|
|
2840
|
+
* });
|
|
2841
|
+
* } catch (error) {
|
|
2842
|
+
* console.error("Monitoring query failed:", error);
|
|
2843
|
+
* }
|
|
2844
|
+
* ```
|
|
2845
|
+
*/
|
|
2846
|
+
executeDDLActions(actions) {
|
|
2847
|
+
return this.ormInstance.executeDDLActions(actions);
|
|
2512
2848
|
}
|
|
2513
2849
|
/**
|
|
2514
2850
|
* Executes a raw SQL query with both local and global cache support.
|
|
@@ -2529,7 +2865,7 @@ class ForgeSQLORM {
|
|
|
2529
2865
|
* ```
|
|
2530
2866
|
*/
|
|
2531
2867
|
executeCacheable(query, cacheTtl) {
|
|
2532
|
-
return this.ormInstance.
|
|
2868
|
+
return this.ormInstance.executeCacheable(query, cacheTtl);
|
|
2533
2869
|
}
|
|
2534
2870
|
/**
|
|
2535
2871
|
* Creates a Common Table Expression (CTE) builder for complex queries.
|
|
@@ -2539,7 +2875,7 @@ class ForgeSQLORM {
|
|
|
2539
2875
|
* @example
|
|
2540
2876
|
* ```typescript
|
|
2541
2877
|
* const withQuery = forgeSQL.$with('userStats').as(
|
|
2542
|
-
* forgeSQL.select({ userId: users.id, count: sql<number>`count(*)` })
|
|
2878
|
+
* forgeSQL.getDrizzleQueryBuilder().select({ userId: users.id, count: sql<number>`count(*)` })
|
|
2543
2879
|
* .from(users)
|
|
2544
2880
|
* .groupBy(users.id)
|
|
2545
2881
|
* );
|
|
@@ -2557,7 +2893,7 @@ class ForgeSQLORM {
|
|
|
2557
2893
|
* @example
|
|
2558
2894
|
* ```typescript
|
|
2559
2895
|
* const withQuery = forgeSQL.$with('userStats').as(
|
|
2560
|
-
* forgeSQL.select({ userId: users.id, count: sql<number>`count(*)` })
|
|
2896
|
+
* forgeSQL.getDrizzleQueryBuilder().select({ userId: users.id, count: sql<number>`count(*)` })
|
|
2561
2897
|
* .from(users)
|
|
2562
2898
|
* .groupBy(users.id)
|
|
2563
2899
|
* );
|
|
@@ -3048,6 +3384,14 @@ const clusterStatementsSummaryHistory = informationSchema.table(
|
|
|
3048
3384
|
"CLUSTER_STATEMENTS_SUMMARY_HISTORY",
|
|
3049
3385
|
createClusterStatementsSummarySchema()
|
|
3050
3386
|
);
|
|
3387
|
+
const statementsSummaryHistory = informationSchema.table(
|
|
3388
|
+
"STATEMENTS_SUMMARY_HISTORY",
|
|
3389
|
+
createClusterStatementsSummarySchema()
|
|
3390
|
+
);
|
|
3391
|
+
const statementsSummary = informationSchema.table(
|
|
3392
|
+
"STATEMENTS_SUMMARY",
|
|
3393
|
+
createClusterStatementsSummarySchema()
|
|
3394
|
+
);
|
|
3051
3395
|
const clusterStatementsSummary = informationSchema.table(
|
|
3052
3396
|
"CLUSTER_STATEMENTS_SUMMARY",
|
|
3053
3397
|
createClusterStatementsSummarySchema()
|
|
@@ -3080,12 +3424,12 @@ const applySchemaMigrations = async (migration) => {
|
|
|
3080
3424
|
if (typeof migration !== "function") {
|
|
3081
3425
|
throw new Error("migration is not a function");
|
|
3082
3426
|
}
|
|
3083
|
-
console.
|
|
3427
|
+
console.debug("Provisioning the database");
|
|
3084
3428
|
await sql$1._provision();
|
|
3085
|
-
console.
|
|
3429
|
+
console.debug("Running schema migrations");
|
|
3086
3430
|
const migrations2 = await migration(migrationRunner);
|
|
3087
3431
|
const successfulMigrations = await migrations2.run();
|
|
3088
|
-
console.
|
|
3432
|
+
console.debug("Migrations applied:", successfulMigrations);
|
|
3089
3433
|
const migrationList = await migrationRunner.list();
|
|
3090
3434
|
let migrationHistory = "No migrations found";
|
|
3091
3435
|
if (Array.isArray(migrationList) && migrationList.length > 0) {
|
|
@@ -3094,7 +3438,7 @@ const applySchemaMigrations = async (migration) => {
|
|
|
3094
3438
|
);
|
|
3095
3439
|
migrationHistory = sortedMigrations.map((y) => `${y.id}, ${y.name}, ${y.migratedAt.toUTCString()}`).join("\n");
|
|
3096
3440
|
}
|
|
3097
|
-
console.
|
|
3441
|
+
console.debug("Migrations history:\nid, name, migrated_at\n", migrationHistory);
|
|
3098
3442
|
return {
|
|
3099
3443
|
headers: { "Content-Type": ["application/json"] },
|
|
3100
3444
|
statusCode: 200,
|
|
@@ -3199,167 +3543,260 @@ const clearCacheSchedulerTrigger = async (options) => {
|
|
|
3199
3543
|
};
|
|
3200
3544
|
}
|
|
3201
3545
|
};
|
|
3202
|
-
const
|
|
3203
|
-
|
|
3204
|
-
|
|
3205
|
-
|
|
3206
|
-
|
|
3207
|
-
|
|
3208
|
-
|
|
3209
|
-
|
|
3210
|
-
|
|
3211
|
-
|
|
3212
|
-
const
|
|
3546
|
+
const DEFAULT_MEMORY_THRESHOLD = 8 * 1024 * 1024;
|
|
3547
|
+
const DEFAULT_TIMEOUT = 300;
|
|
3548
|
+
const DEFAULT_TOP_N = 1;
|
|
3549
|
+
const DEFAULT_HOURS = 1;
|
|
3550
|
+
const DEFAULT_TABLES = "CLUSTER_SUMMARY_AND_HISTORY";
|
|
3551
|
+
const MAX_QUERY_TIMEOUT_MS = 3e3;
|
|
3552
|
+
const MAX_SQL_LENGTH = 1e3;
|
|
3553
|
+
const RETRY_ATTEMPTS = 2;
|
|
3554
|
+
const RETRY_BASE_DELAY_MS = 1e3;
|
|
3555
|
+
const nsToMs = (value) => {
|
|
3556
|
+
const n = Number(value);
|
|
3557
|
+
return Number.isFinite(n) ? n / 1e6 : NaN;
|
|
3558
|
+
};
|
|
3559
|
+
const bytesToMB = (value) => {
|
|
3560
|
+
const n = Number(value);
|
|
3561
|
+
return Number.isFinite(n) ? n / (1024 * 1024) : NaN;
|
|
3562
|
+
};
|
|
3563
|
+
const jsonSafeStringify = (value) => JSON.stringify(value, (_k, v) => typeof v === "bigint" ? v.toString() : v);
|
|
3564
|
+
const sanitizeSQL = (sql2, maxLen = MAX_SQL_LENGTH) => {
|
|
3565
|
+
let s = sql2;
|
|
3566
|
+
s = s.replace(/--[^\n\r]*/g, "").replace(/\/\*[\s\S]*?\*\//g, "");
|
|
3567
|
+
s = s.replace(/'(?:\\'|[^'])*'/g, "?");
|
|
3568
|
+
s = s.replace(/\b-?\d+(?:\.\d+)?\b/g, "?");
|
|
3569
|
+
s = s.replace(/\s+/g, " ").trim();
|
|
3570
|
+
if (s.length > maxLen) {
|
|
3571
|
+
s = s.slice(0, maxLen) + " …[truncated]";
|
|
3572
|
+
}
|
|
3573
|
+
return s;
|
|
3574
|
+
};
|
|
3575
|
+
const withTimeout = async (promise, ms) => {
|
|
3576
|
+
let timer;
|
|
3213
3577
|
try {
|
|
3214
|
-
|
|
3215
|
-
|
|
3216
|
-
|
|
3217
|
-
|
|
3218
|
-
|
|
3219
|
-
|
|
3220
|
-
|
|
3221
|
-
|
|
3222
|
-
|
|
3223
|
-
|
|
3224
|
-
|
|
3225
|
-
|
|
3226
|
-
|
|
3227
|
-
|
|
3228
|
-
|
|
3229
|
-
|
|
3230
|
-
|
|
3231
|
-
|
|
3232
|
-
|
|
3233
|
-
|
|
3234
|
-
|
|
3235
|
-
|
|
3236
|
-
});
|
|
3237
|
-
const lastHourFilterHistory = gte(
|
|
3238
|
-
summaryHistory.summaryEndTime,
|
|
3239
|
-
sql`DATE_SUB(NOW(), INTERVAL 1 HOUR)`
|
|
3240
|
-
);
|
|
3241
|
-
const lastHourFilterSummary = gte(
|
|
3242
|
-
summary.summaryEndTime,
|
|
3243
|
-
sql`DATE_SUB(NOW(), INTERVAL 1 HOUR)`
|
|
3244
|
-
);
|
|
3245
|
-
const qHistory = orm.getDrizzleQueryBuilder().select(selectShape(summaryHistory)).from(summaryHistory).where(lastHourFilterHistory);
|
|
3246
|
-
const qSummary = orm.getDrizzleQueryBuilder().select(selectShape(summary)).from(summary).where(lastHourFilterSummary);
|
|
3247
|
-
const combined = unionAll(qHistory, qSummary).as("combined");
|
|
3248
|
-
const thresholdNs = Math.floor(warnThresholdMs * 1e6);
|
|
3249
|
-
const grouped = orm.getDrizzleQueryBuilder().select({
|
|
3250
|
-
digest: combined.digest,
|
|
3251
|
-
stmtType: combined.stmtType,
|
|
3252
|
-
schemaName: combined.schemaName,
|
|
3253
|
-
execCount: sql`SUM(${combined.execCount})`.as("execCount"),
|
|
3254
|
-
avgLatencyNs: sql`MAX(${combined.avgLatencyNs})`.as("avgLatencyNs"),
|
|
3255
|
-
maxLatencyNs: sql`MAX(${combined.maxLatencyNs})`.as("maxLatencyNs"),
|
|
3256
|
-
minLatencyNs: sql`MIN(${combined.minLatencyNs})`.as("minLatencyNs"),
|
|
3257
|
-
avgProcessTimeNs: sql`MAX(${combined.avgProcessTimeNs})`.as("avgProcessTimeNs"),
|
|
3258
|
-
avgWaitTimeNs: sql`MAX(${combined.avgWaitTimeNs})`.as("avgWaitTimeNs"),
|
|
3259
|
-
avgBackoffTimeNs: sql`MAX(${combined.avgBackoffTimeNs})`.as("avgBackoffTimeNs"),
|
|
3260
|
-
avgMemBytes: sql`MAX(${combined.avgMemBytes})`.as("avgMemBytes"),
|
|
3261
|
-
maxMemBytes: sql`MAX(${combined.maxMemBytes})`.as("maxMemBytes"),
|
|
3262
|
-
avgTotalKeys: sql`MAX(${combined.avgTotalKeys})`.as("avgTotalKeys"),
|
|
3263
|
-
firstSeen: sql`MIN(${combined.firstSeen})`.as("firstSeen"),
|
|
3264
|
-
lastSeen: sql`MAX(${combined.lastSeen})`.as("lastSeen"),
|
|
3265
|
-
planInCache: sql`MAX(${combined.planInCache})`.as("planInCache"),
|
|
3266
|
-
planCacheHits: sql`SUM(${combined.planCacheHits})`.as("planCacheHits"),
|
|
3267
|
-
// Prefer a non-empty sample text/plan via MAX; acceptable for de-dup
|
|
3268
|
-
digestText: sql`MAX(${combined.digestText})`.as("digestText"),
|
|
3269
|
-
plan: sql`MAX(${combined.plan})`.as("plan")
|
|
3270
|
-
}).from(combined).where(
|
|
3271
|
-
sql`COALESCE(${combined.digest}, '') <> '' AND COALESCE(${combined.digestText}, '') <> '' AND COALESCE(${combined.stmtType}, '') NOT IN ('Use','Set','Show')`
|
|
3272
|
-
).groupBy(combined.digest, combined.stmtType, combined.schemaName).as("grouped");
|
|
3273
|
-
const rows = await orm.getDrizzleQueryBuilder().select({
|
|
3274
|
-
digest: grouped.digest,
|
|
3275
|
-
stmtType: grouped.stmtType,
|
|
3276
|
-
schemaName: grouped.schemaName,
|
|
3277
|
-
execCount: grouped.execCount,
|
|
3278
|
-
avgLatencyNs: grouped.avgLatencyNs,
|
|
3279
|
-
maxLatencyNs: grouped.maxLatencyNs,
|
|
3280
|
-
minLatencyNs: grouped.minLatencyNs,
|
|
3281
|
-
avgProcessTimeNs: grouped.avgProcessTimeNs,
|
|
3282
|
-
avgWaitTimeNs: grouped.avgWaitTimeNs,
|
|
3283
|
-
avgBackoffTimeNs: grouped.avgBackoffTimeNs,
|
|
3284
|
-
avgMemBytes: grouped.avgMemBytes,
|
|
3285
|
-
maxMemBytes: grouped.maxMemBytes,
|
|
3286
|
-
avgTotalKeys: grouped.avgTotalKeys,
|
|
3287
|
-
firstSeen: grouped.firstSeen,
|
|
3288
|
-
lastSeen: grouped.lastSeen,
|
|
3289
|
-
planInCache: grouped.planInCache,
|
|
3290
|
-
planCacheHits: grouped.planCacheHits,
|
|
3291
|
-
digestText: grouped.digestText,
|
|
3292
|
-
plan: grouped.plan
|
|
3293
|
-
}).from(grouped).where(
|
|
3294
|
-
sql`${grouped.avgLatencyNs} > ${thresholdNs} OR ${grouped.avgMemBytes} > ${memoryThresholdBytes}`
|
|
3295
|
-
).orderBy(desc(grouped.avgLatencyNs)).limit(formatLimitOffset(TOP_N));
|
|
3296
|
-
const formatted = rows.map((r, i) => ({
|
|
3297
|
-
rank: i + 1,
|
|
3298
|
-
// 1-based rank in the top N
|
|
3299
|
-
digest: r.digest,
|
|
3300
|
-
stmtType: r.stmtType,
|
|
3301
|
-
schemaName: r.schemaName,
|
|
3302
|
-
execCount: r.execCount,
|
|
3303
|
-
avgLatencyMs: nsToMs(r.avgLatencyNs),
|
|
3304
|
-
// Convert ns to ms for readability
|
|
3305
|
-
maxLatencyMs: nsToMs(r.maxLatencyNs),
|
|
3306
|
-
minLatencyMs: nsToMs(r.minLatencyNs),
|
|
3307
|
-
avgProcessTimeMs: nsToMs(r.avgProcessTimeNs),
|
|
3308
|
-
avgWaitTimeMs: nsToMs(r.avgWaitTimeNs),
|
|
3309
|
-
avgBackoffTimeMs: nsToMs(r.avgBackoffTimeNs),
|
|
3310
|
-
avgMemMB: bytesToMB(r.avgMemBytes),
|
|
3311
|
-
maxMemMB: bytesToMB(r.maxMemBytes),
|
|
3312
|
-
avgMemBytes: r.avgMemBytes,
|
|
3313
|
-
maxMemBytes: r.maxMemBytes,
|
|
3314
|
-
avgTotalKeys: r.avgTotalKeys,
|
|
3315
|
-
firstSeen: r.firstSeen,
|
|
3316
|
-
lastSeen: r.lastSeen,
|
|
3317
|
-
planInCache: r.planInCache,
|
|
3318
|
-
planCacheHits: r.planCacheHits,
|
|
3319
|
-
digestText: r.digestText,
|
|
3320
|
-
plan: r.plan
|
|
3321
|
-
}));
|
|
3322
|
-
for (const f of formatted) {
|
|
3578
|
+
return await Promise.race([
|
|
3579
|
+
promise,
|
|
3580
|
+
new Promise((_resolve, reject) => {
|
|
3581
|
+
timer = setTimeout(() => reject(new Error(`TIMEOUT:${ms}`)), ms);
|
|
3582
|
+
})
|
|
3583
|
+
]);
|
|
3584
|
+
} finally {
|
|
3585
|
+
if (timer) clearTimeout(timer);
|
|
3586
|
+
}
|
|
3587
|
+
};
|
|
3588
|
+
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
3589
|
+
const executeWithRetries = async (task, label) => {
|
|
3590
|
+
let attempt = 0;
|
|
3591
|
+
let delay = RETRY_BASE_DELAY_MS;
|
|
3592
|
+
while (true) {
|
|
3593
|
+
try {
|
|
3594
|
+
attempt++;
|
|
3595
|
+
return await task();
|
|
3596
|
+
} catch (error) {
|
|
3597
|
+
const msg = String(error?.message ?? error);
|
|
3598
|
+
const isTimeout = msg.startsWith("TIMEOUT:");
|
|
3599
|
+
if (attempt > RETRY_ATTEMPTS) throw error;
|
|
3323
3600
|
console.warn(
|
|
3324
|
-
`${
|
|
3325
|
-
|
|
3326
|
-
sql=${(f.digestText || "").slice(0, 300)}${f.digestText && f.digestText.length > 300 ? "…" : ""}`
|
|
3601
|
+
`${label}: attempt ${attempt} failed${isTimeout ? " (timeout)" : ""}; retrying in ${delay}ms...`,
|
|
3602
|
+
error
|
|
3327
3603
|
);
|
|
3328
|
-
|
|
3329
|
-
|
|
3330
|
-
${f.plan}`);
|
|
3331
|
-
}
|
|
3604
|
+
await sleep(delay);
|
|
3605
|
+
delay *= 2;
|
|
3332
3606
|
}
|
|
3333
|
-
|
|
3334
|
-
|
|
3335
|
-
|
|
3336
|
-
|
|
3337
|
-
|
|
3338
|
-
|
|
3339
|
-
|
|
3340
|
-
|
|
3341
|
-
|
|
3342
|
-
|
|
3343
|
-
|
|
3344
|
-
|
|
3345
|
-
|
|
3346
|
-
|
|
3607
|
+
}
|
|
3608
|
+
};
|
|
3609
|
+
const createErrorResponse = (message, error) => ({
|
|
3610
|
+
headers: { "Content-Type": ["application/json"] },
|
|
3611
|
+
statusCode: 500,
|
|
3612
|
+
statusText: "Internal Server Error",
|
|
3613
|
+
body: jsonSafeStringify({
|
|
3614
|
+
success: false,
|
|
3615
|
+
message,
|
|
3616
|
+
error: error?.cause?.context?.debug?.sqlMessage ?? error?.cause?.message ?? error?.message,
|
|
3617
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
3618
|
+
})
|
|
3619
|
+
});
|
|
3620
|
+
const createSuccessResponse = (formatted, options) => ({
|
|
3621
|
+
headers: { "Content-Type": ["application/json"] },
|
|
3622
|
+
statusCode: 200,
|
|
3623
|
+
statusText: "OK",
|
|
3624
|
+
body: jsonSafeStringify({
|
|
3625
|
+
success: true,
|
|
3626
|
+
window: `last_${options.hours}h`,
|
|
3627
|
+
top: options.topN,
|
|
3628
|
+
warnThresholdMs: options.warnThresholdMs,
|
|
3629
|
+
memoryThresholdBytes: options.memoryThresholdBytes,
|
|
3630
|
+
showPlan: options.showPlan,
|
|
3631
|
+
rows: formatted,
|
|
3632
|
+
generatedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
3633
|
+
})
|
|
3634
|
+
});
|
|
3635
|
+
const createSelectShape = (table) => ({
|
|
3636
|
+
digest: table.digest,
|
|
3637
|
+
stmtType: table.stmtType,
|
|
3638
|
+
schemaName: table.schemaName,
|
|
3639
|
+
execCount: table.execCount,
|
|
3640
|
+
avgLatencyNs: table.avgLatency,
|
|
3641
|
+
maxLatencyNs: table.maxLatency,
|
|
3642
|
+
minLatencyNs: table.minLatency,
|
|
3643
|
+
avgProcessTimeNs: table.avgProcessTime,
|
|
3644
|
+
avgWaitTimeNs: table.avgWaitTime,
|
|
3645
|
+
avgBackoffTimeNs: table.avgBackoffTime,
|
|
3646
|
+
avgTotalKeys: table.avgTotalKeys,
|
|
3647
|
+
firstSeen: table.firstSeen,
|
|
3648
|
+
lastSeen: table.lastSeen,
|
|
3649
|
+
planInCache: table.planInCache,
|
|
3650
|
+
planCacheHits: table.planCacheHits,
|
|
3651
|
+
digestText: table.digestText,
|
|
3652
|
+
plan: table.plan,
|
|
3653
|
+
avgMemBytes: table.avgMem,
|
|
3654
|
+
maxMemBytes: table.maxMem
|
|
3655
|
+
});
|
|
3656
|
+
const buildCombinedQuery = (orm, options) => {
|
|
3657
|
+
const summaryHistory = statementsSummary;
|
|
3658
|
+
const summary = statementsSummary;
|
|
3659
|
+
const summaryHistoryCluster = clusterStatementsSummaryHistory;
|
|
3660
|
+
const summaryCluster = clusterStatementsSummary;
|
|
3661
|
+
const lastHoursFilter = (table) => gte(table.summaryEndTime, sql`DATE_SUB(NOW(), INTERVAL ${options.hours} HOUR)`);
|
|
3662
|
+
const qHistory = orm.getDrizzleQueryBuilder().select(createSelectShape(summaryHistory)).from(summaryHistory).where(lastHoursFilter(summaryHistory));
|
|
3663
|
+
const qSummary = orm.getDrizzleQueryBuilder().select(createSelectShape(summary)).from(summary).where(lastHoursFilter(summary));
|
|
3664
|
+
const qHistoryCluster = orm.getDrizzleQueryBuilder().select(createSelectShape(summaryHistoryCluster)).from(summaryHistoryCluster).where(lastHoursFilter(summaryHistoryCluster));
|
|
3665
|
+
const qSummaryCluster = orm.getDrizzleQueryBuilder().select(createSelectShape(summaryCluster)).from(summaryCluster).where(lastHoursFilter(summaryCluster));
|
|
3666
|
+
switch (options.tables) {
|
|
3667
|
+
case "SUMMARY_AND_HISTORY":
|
|
3668
|
+
return unionAll(qHistory, qSummary).as("combined");
|
|
3669
|
+
case "CLUSTER_SUMMARY_AND_HISTORY":
|
|
3670
|
+
return unionAll(qHistoryCluster, qSummaryCluster).as("combined");
|
|
3671
|
+
default:
|
|
3672
|
+
throw new Error(`Unsupported table configuration: ${options.tables}`);
|
|
3673
|
+
}
|
|
3674
|
+
};
|
|
3675
|
+
const buildGroupedQuery = (orm, combined) => {
|
|
3676
|
+
return orm.getDrizzleQueryBuilder().select({
|
|
3677
|
+
digest: combined.digest,
|
|
3678
|
+
stmtType: combined.stmtType,
|
|
3679
|
+
schemaName: combined.schemaName,
|
|
3680
|
+
execCount: sql`SUM(${combined.execCount})`.as("execCount"),
|
|
3681
|
+
avgLatencyNs: sql`MAX(${combined.avgLatencyNs})`.as("avgLatencyNs"),
|
|
3682
|
+
maxLatencyNs: sql`MAX(${combined.maxLatencyNs})`.as("maxLatencyNs"),
|
|
3683
|
+
minLatencyNs: sql`MIN(${combined.minLatencyNs})`.as("minLatencyNs"),
|
|
3684
|
+
avgProcessTimeNs: sql`MAX(${combined.avgProcessTimeNs})`.as("avgProcessTimeNs"),
|
|
3685
|
+
avgWaitTimeNs: sql`MAX(${combined.avgWaitTimeNs})`.as("avgWaitTimeNs"),
|
|
3686
|
+
avgBackoffTimeNs: sql`MAX(${combined.avgBackoffTimeNs})`.as("avgBackoffTimeNs"),
|
|
3687
|
+
avgMemBytes: sql`MAX(${combined.avgMemBytes})`.as("avgMemBytes"),
|
|
3688
|
+
maxMemBytes: sql`MAX(${combined.maxMemBytes})`.as("maxMemBytes"),
|
|
3689
|
+
avgTotalKeys: sql`MAX(${combined.avgTotalKeys})`.as("avgTotalKeys"),
|
|
3690
|
+
firstSeen: sql`MIN(${combined.firstSeen})`.as("firstSeen"),
|
|
3691
|
+
lastSeen: sql`MAX(${combined.lastSeen})`.as("lastSeen"),
|
|
3692
|
+
planInCache: sql`MAX(${combined.planInCache})`.as("planInCache"),
|
|
3693
|
+
planCacheHits: sql`SUM(${combined.planCacheHits})`.as("planCacheHits"),
|
|
3694
|
+
digestText: sql`MAX(${combined.digestText})`.as("digestText"),
|
|
3695
|
+
plan: sql`MAX(${combined.plan})`.as("plan")
|
|
3696
|
+
}).from(combined).where(
|
|
3697
|
+
sql`COALESCE(${combined.digest}, '') <> '' AND COALESCE(${combined.digestText}, '') <> '' AND COALESCE(${combined.stmtType}, '') NOT IN ('Use','Set','Show')`
|
|
3698
|
+
).groupBy(combined.digest, combined.stmtType, combined.schemaName).as("grouped");
|
|
3699
|
+
};
|
|
3700
|
+
const buildFinalQuery = (orm, grouped, options) => {
|
|
3701
|
+
const thresholdNs = Math.floor(options.warnThresholdMs * 1e6);
|
|
3702
|
+
const memoryThresholdBytes = options.memoryThresholdBytes;
|
|
3703
|
+
const query = orm.getDrizzleQueryBuilder().select({
|
|
3704
|
+
digest: grouped.digest,
|
|
3705
|
+
stmtType: grouped.stmtType,
|
|
3706
|
+
schemaName: grouped.schemaName,
|
|
3707
|
+
execCount: grouped.execCount,
|
|
3708
|
+
avgLatencyNs: grouped.avgLatencyNs,
|
|
3709
|
+
maxLatencyNs: grouped.maxLatencyNs,
|
|
3710
|
+
minLatencyNs: grouped.minLatencyNs,
|
|
3711
|
+
avgProcessTimeNs: grouped.avgProcessTimeNs,
|
|
3712
|
+
avgWaitTimeNs: grouped.avgWaitTimeNs,
|
|
3713
|
+
avgBackoffTimeNs: grouped.avgBackoffTimeNs,
|
|
3714
|
+
avgMemBytes: grouped.avgMemBytes,
|
|
3715
|
+
maxMemBytes: grouped.maxMemBytes,
|
|
3716
|
+
avgTotalKeys: grouped.avgTotalKeys,
|
|
3717
|
+
firstSeen: grouped.firstSeen,
|
|
3718
|
+
lastSeen: grouped.lastSeen,
|
|
3719
|
+
planInCache: grouped.planInCache,
|
|
3720
|
+
planCacheHits: grouped.planCacheHits,
|
|
3721
|
+
digestText: grouped.digestText,
|
|
3722
|
+
plan: grouped.plan
|
|
3723
|
+
}).from(grouped).where(
|
|
3724
|
+
sql`${grouped.avgLatencyNs} > ${thresholdNs} OR ${grouped.avgMemBytes} > ${memoryThresholdBytes}`
|
|
3725
|
+
).orderBy(desc(grouped.avgLatencyNs)).limit(formatLimitOffset(options.topN));
|
|
3726
|
+
if (options.operationType === "DDL") {
|
|
3727
|
+
return orm.executeDDLActions(async () => await query);
|
|
3728
|
+
}
|
|
3729
|
+
return query;
|
|
3730
|
+
};
|
|
3731
|
+
const formatQueryResults = (rows, options) => {
|
|
3732
|
+
return rows.map((row, index) => ({
|
|
3733
|
+
rank: index + 1,
|
|
3734
|
+
digest: row.digest,
|
|
3735
|
+
stmtType: row.stmtType,
|
|
3736
|
+
schemaName: row.schemaName,
|
|
3737
|
+
execCount: row.execCount,
|
|
3738
|
+
avgLatencyMs: nsToMs(row.avgLatencyNs),
|
|
3739
|
+
maxLatencyMs: nsToMs(row.maxLatencyNs),
|
|
3740
|
+
minLatencyMs: nsToMs(row.minLatencyNs),
|
|
3741
|
+
avgProcessTimeMs: nsToMs(row.avgProcessTimeNs),
|
|
3742
|
+
avgWaitTimeMs: nsToMs(row.avgWaitTimeNs),
|
|
3743
|
+
avgBackoffTimeMs: nsToMs(row.avgBackoffTimeNs),
|
|
3744
|
+
avgMemMB: bytesToMB(row.avgMemBytes),
|
|
3745
|
+
maxMemMB: bytesToMB(row.maxMemBytes),
|
|
3746
|
+
avgMemBytes: row.avgMemBytes,
|
|
3747
|
+
maxMemBytes: row.maxMemBytes,
|
|
3748
|
+
avgTotalKeys: row.avgTotalKeys,
|
|
3749
|
+
firstSeen: row.firstSeen,
|
|
3750
|
+
lastSeen: row.lastSeen,
|
|
3751
|
+
planInCache: row.planInCache,
|
|
3752
|
+
planCacheHits: row.planCacheHits,
|
|
3753
|
+
digestText: options.operationType === "DDL" ? row.digestText : sanitizeSQL(row.digestText),
|
|
3754
|
+
plan: options.showPlan ? row.plan : void 0
|
|
3755
|
+
}));
|
|
3756
|
+
};
|
|
3757
|
+
const logQueryResults = (formatted, options) => {
|
|
3758
|
+
for (const result of formatted) {
|
|
3759
|
+
console.warn(
|
|
3760
|
+
`${result.rank}. ${result.stmtType} avg=${result.avgLatencyMs?.toFixed?.(2)}ms max=${result.maxLatencyMs?.toFixed?.(2)}ms mem≈${result.avgMemMB?.toFixed?.(2)}MB(max ${result.maxMemMB?.toFixed?.(2)}MB) exec=${result.execCount}
|
|
3761
|
+
digest=${result.digest}
|
|
3762
|
+
sql=${(result.digestText || "").slice(0, 300)}${result.digestText && result.digestText.length > 300 ? "…" : ""}`
|
|
3763
|
+
);
|
|
3764
|
+
if (options.showPlan && result.plan) {
|
|
3765
|
+
console.warn(` full plan:
|
|
3766
|
+
${result.plan}`);
|
|
3767
|
+
}
|
|
3768
|
+
}
|
|
3769
|
+
};
|
|
3770
|
+
const topSlowestStatementLastHourTrigger = async (orm, options) => {
|
|
3771
|
+
if (!orm) {
|
|
3772
|
+
return createErrorResponse("ORM instance is required");
|
|
3773
|
+
}
|
|
3774
|
+
const mergedOptions = {
|
|
3775
|
+
warnThresholdMs: options?.warnThresholdMs ?? DEFAULT_TIMEOUT,
|
|
3776
|
+
memoryThresholdBytes: options?.memoryThresholdBytes ?? DEFAULT_MEMORY_THRESHOLD,
|
|
3777
|
+
showPlan: options?.showPlan ?? false,
|
|
3778
|
+
operationType: options?.operationType ?? "DML",
|
|
3779
|
+
topN: options?.topN ?? DEFAULT_TOP_N,
|
|
3780
|
+
hours: options?.hours ?? DEFAULT_HOURS,
|
|
3781
|
+
tables: options?.tables ?? DEFAULT_TABLES
|
|
3782
|
+
};
|
|
3783
|
+
try {
|
|
3784
|
+
const combined = buildCombinedQuery(orm, mergedOptions);
|
|
3785
|
+
const grouped = buildGroupedQuery(orm, combined);
|
|
3786
|
+
const finalQuery = buildFinalQuery(orm, grouped, mergedOptions);
|
|
3787
|
+
const rows = await executeWithRetries(
|
|
3788
|
+
() => withTimeout(finalQuery, MAX_QUERY_TIMEOUT_MS),
|
|
3789
|
+
"topSlowestStatementLastHourTrigger"
|
|
3790
|
+
);
|
|
3791
|
+
const formatted = formatQueryResults(rows, mergedOptions);
|
|
3792
|
+
logQueryResults(formatted, mergedOptions);
|
|
3793
|
+
return createSuccessResponse(formatted, mergedOptions);
|
|
3347
3794
|
} catch (error) {
|
|
3348
|
-
console.
|
|
3349
|
-
"Error in topSlowestStatementLastHourTrigger:",
|
|
3795
|
+
console.warn(
|
|
3796
|
+
"Error in topSlowestStatementLastHourTrigger (one-off errors can be ignored; if it recurs, investigate):",
|
|
3350
3797
|
error?.cause?.context?.debug?.sqlMessage ?? error?.cause ?? error
|
|
3351
3798
|
);
|
|
3352
|
-
return
|
|
3353
|
-
headers: { "Content-Type": ["application/json"] },
|
|
3354
|
-
statusCode: 500,
|
|
3355
|
-
statusText: "Internal Server Error",
|
|
3356
|
-
body: jsonSafeStringify({
|
|
3357
|
-
success: false,
|
|
3358
|
-
message: "Failed to fetch or log slow queries",
|
|
3359
|
-
error: error?.cause?.context?.debug?.sqlMessage ?? error?.cause?.message,
|
|
3360
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
3361
|
-
})
|
|
3362
|
-
};
|
|
3799
|
+
return createErrorResponse("Failed to fetch or log slow queries", error);
|
|
3363
3800
|
}
|
|
3364
3801
|
};
|
|
3365
3802
|
const getHttpResponse = (statusCode, body) => {
|
|
@@ -3401,6 +3838,7 @@ export {
|
|
|
3401
3838
|
getPrimaryKeys,
|
|
3402
3839
|
getTableMetadata,
|
|
3403
3840
|
getTables,
|
|
3841
|
+
isUpdateQueryResponse,
|
|
3404
3842
|
mapSelectAllFieldsToAlias,
|
|
3405
3843
|
mapSelectFieldsWithAlias,
|
|
3406
3844
|
migrations,
|
|
@@ -3408,6 +3846,8 @@ export {
|
|
|
3408
3846
|
parseDateTime,
|
|
3409
3847
|
patchDbWithSelectAliased,
|
|
3410
3848
|
slowQuery,
|
|
3849
|
+
statementsSummary,
|
|
3850
|
+
statementsSummaryHistory,
|
|
3411
3851
|
topSlowestStatementLastHourTrigger
|
|
3412
3852
|
};
|
|
3413
3853
|
//# sourceMappingURL=ForgeSQLORM.mjs.map
|