forge-sql-orm 2.1.3 → 2.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +291 -0
  2. package/dist/ForgeSQLORM.js +713 -12
  3. package/dist/ForgeSQLORM.js.map +1 -1
  4. package/dist/ForgeSQLORM.mjs +716 -15
  5. package/dist/ForgeSQLORM.mjs.map +1 -1
  6. package/dist/core/ForgeSQLCrudOperations.d.ts.map +1 -1
  7. package/dist/core/ForgeSQLORM.d.ts +23 -0
  8. package/dist/core/ForgeSQLORM.d.ts.map +1 -1
  9. package/dist/core/ForgeSQLQueryBuilder.d.ts +36 -5
  10. package/dist/core/ForgeSQLQueryBuilder.d.ts.map +1 -1
  11. package/dist/core/ForgeSQLSelectOperations.d.ts.map +1 -1
  12. package/dist/core/SystemTables.d.ts +5039 -0
  13. package/dist/core/SystemTables.d.ts.map +1 -1
  14. package/dist/lib/drizzle/extensions/additionalActions.d.ts.map +1 -1
  15. package/dist/utils/cacheContextUtils.d.ts.map +1 -1
  16. package/dist/utils/cacheUtils.d.ts.map +1 -1
  17. package/dist/utils/forgeDriver.d.ts +21 -0
  18. package/dist/utils/forgeDriver.d.ts.map +1 -1
  19. package/dist/utils/forgeDriverProxy.d.ts.map +1 -1
  20. package/dist/utils/metadataContextUtils.d.ts +11 -0
  21. package/dist/utils/metadataContextUtils.d.ts.map +1 -0
  22. package/dist/utils/sqlUtils.d.ts.map +1 -1
  23. package/dist/webtriggers/applyMigrationsWebTrigger.d.ts +1 -1
  24. package/dist/webtriggers/applyMigrationsWebTrigger.d.ts.map +1 -1
  25. package/dist/webtriggers/clearCacheSchedulerTrigger.d.ts.map +1 -1
  26. package/dist/webtriggers/dropMigrationWebTrigger.d.ts.map +1 -1
  27. package/dist/webtriggers/dropTablesMigrationWebTrigger.d.ts.map +1 -1
  28. package/dist/webtriggers/fetchSchemaWebTrigger.d.ts.map +1 -1
  29. package/dist/webtriggers/index.d.ts +1 -0
  30. package/dist/webtriggers/index.d.ts.map +1 -1
  31. package/dist/webtriggers/topSlowestStatementLastHourTrigger.d.ts +89 -0
  32. package/dist/webtriggers/topSlowestStatementLastHourTrigger.d.ts.map +1 -0
  33. package/package.json +4 -3
  34. package/src/core/ForgeSQLCrudOperations.ts +3 -0
  35. package/src/core/ForgeSQLORM.ts +119 -51
  36. package/src/core/ForgeSQLQueryBuilder.ts +51 -17
  37. package/src/core/ForgeSQLSelectOperations.ts +2 -0
  38. package/src/core/SystemTables.ts +313 -1
  39. package/src/lib/drizzle/extensions/additionalActions.ts +2 -0
  40. package/src/utils/cacheContextUtils.ts +4 -2
  41. package/src/utils/cacheUtils.ts +20 -8
  42. package/src/utils/forgeDriver.ts +22 -1
  43. package/src/utils/forgeDriverProxy.ts +2 -0
  44. package/src/utils/metadataContextUtils.ts +24 -0
  45. package/src/utils/sqlUtils.ts +1 -0
  46. package/src/webtriggers/applyMigrationsWebTrigger.ts +10 -5
  47. package/src/webtriggers/clearCacheSchedulerTrigger.ts +1 -0
  48. package/src/webtriggers/dropMigrationWebTrigger.ts +2 -0
  49. package/src/webtriggers/dropTablesMigrationWebTrigger.ts +2 -0
  50. package/src/webtriggers/fetchSchemaWebTrigger.ts +1 -0
  51. package/src/webtriggers/index.ts +1 -0
  52. package/src/webtriggers/topSlowestStatementLastHourTrigger.ts +360 -0
@@ -0,0 +1,24 @@
1
+ import { AsyncLocalStorage } from "node:async_hooks";
2
+ import {ForgeSQLMetadata} from "./forgeDriver";
3
+
4
+ export type MetadataQueryContext = {
5
+ totalDbExecutionTime: number,
6
+ totalResponseSize: number,
7
+ lastMetadata?: ForgeSQLMetadata;
8
+ }
9
+ export const metadataQueryContext = new AsyncLocalStorage<MetadataQueryContext>();
10
+
11
+ export async function saveMetaDataInContextContext(
12
+ metadata: ForgeSQLMetadata,
13
+ ): Promise<void> {
14
+ const context = metadataQueryContext.getStore();
15
+ if (context && metadata) {
16
+ context.totalResponseSize += metadata.responseSize
17
+ context.totalDbExecutionTime += metadata.dbExecutionTime
18
+ context.lastMetadata = metadata;
19
+ }
20
+ }
21
+
22
+ export async function getLastestMetadata():Promise<MetadataQueryContext|undefined> {
23
+ return metadataQueryContext.getStore();
24
+ }
@@ -329,6 +329,7 @@ export function generateDropTableStatements(
329
329
  const dropStatements: string[] = [];
330
330
  const validOptions = options ?? { sequence: true, table: true };
331
331
  if (!validOptions.sequence && !validOptions.table) {
332
+ // eslint-disable-next-line no-console
332
333
  console.warn('No drop operations requested: both "table" and "sequence" options are false');
333
334
  return [];
334
335
  }
@@ -31,12 +31,14 @@ export const applySchemaMigrations = async (
31
31
  if (typeof migration !== "function") {
32
32
  throw new Error("migration is not a function");
33
33
  }
34
-
34
+ // eslint-disable-next-line no-console
35
35
  console.log("Provisioning the database");
36
36
  await sql._provision();
37
+ // eslint-disable-next-line no-console
37
38
  console.info("Running schema migrations");
38
39
  const migrations = await migration(migrationRunner);
39
40
  const successfulMigrations = await migrations.run();
41
+ // eslint-disable-next-line no-console
40
42
  console.info("Migrations applied:", successfulMigrations);
41
43
 
42
44
  const migrationList = await migrationRunner.list();
@@ -51,7 +53,7 @@ export const applySchemaMigrations = async (
51
53
  .map((y) => `${y.id}, ${y.name}, ${y.migratedAt.toUTCString()}`)
52
54
  .join("\n");
53
55
  }
54
-
56
+ // eslint-disable-next-line no-console
55
57
  console.info("Migrations history:\nid, name, migrated_at\n", migrationHistory);
56
58
 
57
59
  return {
@@ -62,16 +64,19 @@ export const applySchemaMigrations = async (
62
64
  };
63
65
  } catch (error: any) {
64
66
  const errorMessage =
65
- error?.debug?.sqlMessage ??
66
- error?.debug?.message ??
67
+ error?.cause?.context?.debug?.sqlMessage ??
68
+ error?.cause?.context?.debug?.message ??
69
+ error?.debug?.context?.sqlMessage ??
70
+ error?.debug?.context?.message ??
67
71
  error.message ??
68
72
  "Unknown error occurred";
73
+ // eslint-disable-next-line no-console
69
74
  console.error("Error during migration:", errorMessage);
70
75
  return {
71
76
  headers: { "Content-Type": ["application/json"] },
72
77
  statusCode: 500,
73
78
  statusText: "Internal Server Error",
74
- body: error instanceof Error ? error.message : "Unknown error during migration",
79
+ body: error instanceof Error ? errorMessage : "Unknown error during migration",
75
80
  };
76
81
  }
77
82
  };
@@ -64,6 +64,7 @@ export const clearCacheSchedulerTrigger = async (options?: ForgeSqlOrmOptions) =
64
64
  }),
65
65
  };
66
66
  } catch (error) {
67
+ // eslint-disable-next-line no-console
67
68
  console.error("Error during cache cleanup: ", JSON.stringify(error));
68
69
  return {
69
70
  headers: { "Content-Type": ["application/json"] },
@@ -34,6 +34,7 @@ export async function dropSchemaMigrations(): Promise<TriggerResponse<string>> {
34
34
 
35
35
  // Execute each statement
36
36
  for (const statement of dropStatements) {
37
+ // eslint-disable-next-line no-console
37
38
  console.debug(`execute DDL: ${statement}`);
38
39
  await sql.executeDDL(statement);
39
40
  }
@@ -48,6 +49,7 @@ export async function dropSchemaMigrations(): Promise<TriggerResponse<string>> {
48
49
  error?.debug?.message ??
49
50
  error.message ??
50
51
  "Unknown error occurred";
52
+ // eslint-disable-next-line no-console
51
53
  console.error(errorMessage);
52
54
  return getHttpResponse<string>(500, errorMessage);
53
55
  }
@@ -34,6 +34,7 @@ export async function dropTableSchemaMigrations(): Promise<TriggerResponse<strin
34
34
 
35
35
  // Execute each statement
36
36
  for (const statement of dropStatements) {
37
+ // eslint-disable-next-line no-console
37
38
  console.debug(`execute DDL: ${statement}`);
38
39
  await sql.executeDDL(statement);
39
40
  }
@@ -48,6 +49,7 @@ export async function dropTableSchemaMigrations(): Promise<TriggerResponse<strin
48
49
  error?.debug?.message ??
49
50
  error.message ??
50
51
  "Unknown error occurred";
52
+ // eslint-disable-next-line no-console
51
53
  console.error(errorMessage);
52
54
  return getHttpResponse<string>(500, errorMessage);
53
55
  }
@@ -46,6 +46,7 @@ export async function fetchSchemaWebTrigger(): Promise<TriggerResponse<string>>
46
46
  error?.debug?.message ??
47
47
  error.message ??
48
48
  "Unknown error occurred";
49
+ // eslint-disable-next-line no-console
49
50
  console.error(errorMessage);
50
51
  return getHttpResponse<string>(500, errorMessage);
51
52
  }
@@ -3,6 +3,7 @@ export * from "./applyMigrationsWebTrigger";
3
3
  export * from "./fetchSchemaWebTrigger";
4
4
  export * from "./dropTablesMigrationWebTrigger";
5
5
  export * from "./clearCacheSchedulerTrigger";
6
+ export * from "./topSlowestStatementLastHourTrigger";
6
7
 
7
8
  export interface TriggerResponse<BODY> {
8
9
  body?: BODY;
@@ -0,0 +1,360 @@
1
+ import { ForgeSqlOperation } from "../core/ForgeSQLQueryBuilder";
2
+ import { clusterStatementsSummary, clusterStatementsSummaryHistory } from "../core/SystemTables";
3
+ import { desc, gte, sql } from "drizzle-orm";
4
+ import { unionAll } from "drizzle-orm/mysql-core";
5
+ import { formatLimitOffset } from "../utils/sqlUtils";
6
+
7
+ const DEFAULT_MEMORY_THRESHOLD = 8 * 1024 * 1024;
8
+ const DEFAULT_TIMEOUT = 300;
9
+ /**
10
+ * Scheduler trigger: log and return the single slowest statement from the last hour, filtered by latency OR memory usage.
11
+ *
12
+ * When scheduled (e.g. hourly), this trigger queries
13
+ * INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY for the last hour
14
+ * and prints the TOP 1 entry (by AVG_LATENCY) if it exceeds either threshold.
15
+ *
16
+ * **OR Logic**: Statements are included if they exceed EITHER threshold:
17
+ * - avgLatencyMs > warnThresholdMs OR
18
+ * - avgMemBytes > memoryThresholdBytes
19
+ *
20
+ * **Pro Tips:**
21
+ * - Memory-only monitoring: Set warnThresholdMs to 10000ms (effectively disabled)
22
+ * - Latency-only monitoring: Set memoryThresholdBytes to 16MB (16 * 1024 * 1024) (effectively disabled)
23
+ * - Combined monitoring: Use both thresholds for comprehensive monitoring
24
+ *
25
+ * Excludes statements with empty `digestText`, empty `digest`, or service statements (`Use`, `Set`, `Show`).
26
+ *
27
+ * Logging rule:
28
+ * - Query exceeds warnThresholdMs OR memoryThresholdBytes → console.warn (logged)
29
+ * - otherwise → not logged
30
+ *
31
+ * @param orm ForgeSQL ORM instance (required)
32
+ * @param options Configuration options object
33
+ * @param options.warnThresholdMs Milliseconds threshold for logging and filtering (default: 300ms)
34
+ * @param options.memoryThresholdBytes Bytes threshold for average memory usage (default: 8MB)
35
+ * @param options.showPlan Whether to include execution plan in logs (default: false)
36
+ * @returns HTTP response with a JSON payload containing the filtered rows
37
+ *
38
+ * @example
39
+ * ```ts
40
+ * import ForgeSQL, { topSlowestStatementLastHourTrigger } from "forge-sql-orm";
41
+ *
42
+ * const FORGE_SQL_ORM = new ForgeSQL();
43
+ *
44
+ * // Default thresholds: 300ms latency OR 8MB memory
45
+ * export const topSlowQueryTrigger = () =>
46
+ * topSlowestStatementLastHourTrigger(FORGE_SQL_ORM);
47
+ *
48
+ * // Only latency monitoring: 500ms threshold (memory effectively disabled)
49
+ * export const latencyOnlyTrigger = () =>
50
+ * topSlowestStatementLastHourTrigger(FORGE_SQL_ORM, { warnThresholdMs: 500, memoryThresholdBytes: 16 * 1024 * 1024 });
51
+ *
52
+ * // Only memory monitoring: 4MB threshold (latency effectively disabled)
53
+ * export const memoryOnlyTrigger = () =>
54
+ * topSlowestStatementLastHourTrigger(FORGE_SQL_ORM, { warnThresholdMs: 10000, memoryThresholdBytes: 4 * 1024 * 1024 });
55
+ *
56
+ * // Both thresholds: 500ms latency OR 8MB memory
57
+ * export const bothThresholdsTrigger = () =>
58
+ * topSlowestStatementLastHourTrigger(FORGE_SQL_ORM, { warnThresholdMs: 500, memoryThresholdBytes: 8 * 1024 * 1024 });
59
+ *
60
+ * // With execution plan in logs
61
+ * export const withPlanTrigger = () =>
62
+ * topSlowestStatementLastHourTrigger(FORGE_SQL_ORM, { showPlan: true });
63
+ *
64
+ *
65
+ * @example
66
+ * ```yaml
67
+ * scheduledTrigger:
68
+ * - key: top-slow-query-trigger
69
+ * function: topSlowQueryTrigger
70
+ * interval: hour
71
+ *
72
+ * function:
73
+ * - key: topSlowQueryTrigger
74
+ * handler: index.topSlowQueryTrigger
75
+ * ```
76
+ */
77
+ // Main scheduler trigger function to log the single slowest SQL statement from the last hour.
78
+ export const topSlowestStatementLastHourTrigger = async (
79
+ orm: ForgeSqlOperation, options?: {
80
+ warnThresholdMs?:number,
81
+ memoryThresholdBytes?: number,
82
+ showPlan?: boolean
83
+ }
84
+ ) => {
85
+ // Validate required parameters
86
+ if (!orm) {
87
+ return {
88
+ statusCode: 500,
89
+ headers: { "Content-Type": ["application/json"] },
90
+ body: JSON.stringify({
91
+ success: false,
92
+ message: "ORM instance is required",
93
+ timestamp: new Date().toISOString(),
94
+ }),
95
+ };
96
+ }
97
+ let newOptions= options ?? {
98
+ warnThresholdMs: DEFAULT_TIMEOUT,
99
+ memoryThresholdBytes: DEFAULT_MEMORY_THRESHOLD,
100
+ showPlan: false
101
+ };
102
+
103
+ // Helper: Convert nanoseconds to milliseconds (for latency fields)
104
+ const nsToMs = (v: unknown) => {
105
+ const n = Number(v);
106
+ return Number.isFinite(n) ? n / 1e6 : NaN;
107
+ };
108
+
109
+ // Helper: Convert bytes to megabytes (for memory fields)
110
+ const bytesToMB = (v: unknown) => {
111
+ const n = Number(v);
112
+ return Number.isFinite(n) ? n / (1024 * 1024) : NaN;
113
+ };
114
+
115
+ // Helper: JSON.stringify replacer to handle BigInt values (so BigInt serializes as string)
116
+ const jsonSafeStringify = (value: unknown) =>
117
+ JSON.stringify(value, (_k, v) => (typeof v === "bigint" ? v.toString() : v));
118
+
119
+ /**
120
+ * Simple SQL sanitizer for safe logging.
121
+ * - removes comments
122
+ * - replaces string and numeric literals with '?'
123
+ * - normalizes whitespace
124
+ * - truncates long queries
125
+ */
126
+ function sanitizeSQL(sql: string, maxLen = 1000): string {
127
+ let s = sql;
128
+
129
+ // 1. Remove comments (-- ... and /* ... */)
130
+ s = s.replace(/--[^\n\r]*/g, "")
131
+ .replace(/\/\*[\s\S]*?\*\//g, "");
132
+
133
+ // 2. Replace string literals with '?'
134
+ s = s.replace(/'(?:\\'|[^'])*'/g, "?");
135
+ // 3. Replace numbers with '?'
136
+ s = s.replace(/\b-?\d+(?:\.\d+)?\b/g, "?");
137
+ // 4. Normalize whitespace
138
+ s = s.replace(/\s+/g, " ").trim();
139
+ // 5. Truncate long queries
140
+ if (s.length > maxLen) {
141
+ s = s.slice(0, maxLen) + " …[truncated]";
142
+ }
143
+ return s;
144
+ }
145
+
146
+ // Number of top slow queries to fetch
147
+ const TOP_N = 1;
148
+
149
+ try {
150
+ // Get references to system summary tables
151
+ const summaryHistory = clusterStatementsSummaryHistory;
152
+ const summary = clusterStatementsSummary;
153
+ // Helper to define the selected fields (selection shape) for both tables
154
+ const selectShape = (t: typeof summaryHistory | typeof summary) => ({
155
+ digest: t.digest,
156
+ stmtType: t.stmtType,
157
+ schemaName: t.schemaName,
158
+ execCount: t.execCount,
159
+
160
+ avgLatencyNs: t.avgLatency,
161
+ maxLatencyNs: t.maxLatency,
162
+ minLatencyNs: t.minLatency,
163
+
164
+ avgProcessTimeNs: t.avgProcessTime,
165
+ avgWaitTimeNs: t.avgWaitTime,
166
+ avgBackoffTimeNs: t.avgBackoffTime,
167
+
168
+ avgTotalKeys: t.avgTotalKeys,
169
+ firstSeen: t.firstSeen,
170
+ lastSeen: t.lastSeen,
171
+ planInCache: t.planInCache,
172
+ planCacheHits: t.planCacheHits,
173
+ digestText: t.digestText,
174
+ plan: t.plan,
175
+ avgMemBytes: (t as any).avgMem,
176
+ maxMemBytes: (t as any).maxMem,
177
+ });
178
+
179
+ // Filters: Only include rows from the last hour for each table
180
+ const lastHourFilterHistory = gte(
181
+ summaryHistory.summaryEndTime,
182
+ sql`DATE_SUB(NOW(), INTERVAL 1 HOUR)`,
183
+ );
184
+ const lastHourFilterSummary = gte(
185
+ summary.summaryEndTime,
186
+ sql`DATE_SUB(NOW(), INTERVAL 1 HOUR)`,
187
+ );
188
+
189
+ // Query for summary history table (last hour)
190
+ const qHistory = orm
191
+ .getDrizzleQueryBuilder()
192
+ .select(selectShape(summaryHistory))
193
+ .from(summaryHistory)
194
+ .where(lastHourFilterHistory);
195
+
196
+ // Query for summary table (last hour)
197
+ const qSummary = orm
198
+ .getDrizzleQueryBuilder()
199
+ .select(selectShape(summary))
200
+ .from(summary)
201
+ .where(lastHourFilterSummary);
202
+
203
+ // Use UNION ALL to combine results from both tables (avoids duplicates, keeps all rows)
204
+ // This is necessary because some statements may only be present in one of the tables.
205
+ const combined = unionAll(qHistory, qSummary).as("combined");
206
+
207
+ // Threshold in nanoseconds (warnThresholdMs → ns)
208
+ const thresholdNs = Math.floor((newOptions.warnThresholdMs ?? DEFAULT_TIMEOUT) * 1e6);
209
+ // memoryThresholdBytes is already provided in bytes (default 8MB)
210
+ const memoryThresholdBytes = newOptions.memoryThresholdBytes ?? DEFAULT_MEMORY_THRESHOLD;
211
+
212
+ // Group duplicates by digest+stmtType+schemaName and aggregate metrics
213
+ const grouped = orm
214
+ .getDrizzleQueryBuilder()
215
+ .select({
216
+ digest: combined.digest,
217
+ stmtType: combined.stmtType,
218
+ schemaName: combined.schemaName,
219
+ execCount: sql<number>`SUM(${combined.execCount})`.as("execCount"),
220
+
221
+ avgLatencyNs: sql<number>`MAX(${combined.avgLatencyNs})`.as("avgLatencyNs"),
222
+ maxLatencyNs: sql<number>`MAX(${combined.maxLatencyNs})`.as("maxLatencyNs"),
223
+ minLatencyNs: sql<number>`MIN(${combined.minLatencyNs})`.as("minLatencyNs"),
224
+
225
+ avgProcessTimeNs: sql<number>`MAX(${combined.avgProcessTimeNs})`.as("avgProcessTimeNs"),
226
+ avgWaitTimeNs: sql<number>`MAX(${combined.avgWaitTimeNs})`.as("avgWaitTimeNs"),
227
+ avgBackoffTimeNs: sql<number>`MAX(${combined.avgBackoffTimeNs})`.as("avgBackoffTimeNs"),
228
+
229
+ avgMemBytes: sql<number>`MAX(${combined.avgMemBytes})`.as("avgMemBytes"),
230
+ maxMemBytes: sql<number>`MAX(${combined.maxMemBytes})`.as("maxMemBytes"),
231
+
232
+ avgTotalKeys: sql<number>`MAX(${combined.avgTotalKeys})`.as("avgTotalKeys"),
233
+ firstSeen: sql<string>`MIN(${combined.firstSeen})`.as("firstSeen"),
234
+ lastSeen: sql<string>`MAX(${combined.lastSeen})`.as("lastSeen"),
235
+ planInCache: sql<boolean>`MAX(${combined.planInCache})`.as("planInCache"),
236
+ planCacheHits: sql<number>`SUM(${combined.planCacheHits})`.as("planCacheHits"),
237
+ // Prefer a non-empty sample text/plan via MAX; acceptable for de-dup
238
+ digestText: sql<string>`MAX(${combined.digestText})`.as("digestText"),
239
+ plan: sql<string>`MAX(${combined.plan})`.as("plan"),
240
+ })
241
+ .from(combined)
242
+ .where(
243
+ sql`COALESCE(${combined.digest}, '') <> '' AND COALESCE(${combined.digestText}, '') <> '' AND COALESCE(${combined.stmtType}, '') NOT IN ('Use','Set','Show')`,
244
+ )
245
+ .groupBy(combined.digest, combined.stmtType, combined.schemaName)
246
+ .as("grouped");
247
+
248
+ // Final selection: filter by threshold, sort by avg latency desc, limit TOP_N
249
+ const rows = await orm
250
+ .getDrizzleQueryBuilder()
251
+ .select({
252
+ digest: grouped.digest,
253
+ stmtType: grouped.stmtType,
254
+ schemaName: grouped.schemaName,
255
+ execCount: grouped.execCount,
256
+
257
+ avgLatencyNs: grouped.avgLatencyNs,
258
+ maxLatencyNs: grouped.maxLatencyNs,
259
+ minLatencyNs: grouped.minLatencyNs,
260
+
261
+ avgProcessTimeNs: grouped.avgProcessTimeNs,
262
+ avgWaitTimeNs: grouped.avgWaitTimeNs,
263
+ avgBackoffTimeNs: grouped.avgBackoffTimeNs,
264
+
265
+ avgMemBytes: grouped.avgMemBytes,
266
+ maxMemBytes: grouped.maxMemBytes,
267
+
268
+ avgTotalKeys: grouped.avgTotalKeys,
269
+ firstSeen: grouped.firstSeen,
270
+ lastSeen: grouped.lastSeen,
271
+ planInCache: grouped.planInCache,
272
+ planCacheHits: grouped.planCacheHits,
273
+ digestText: grouped.digestText,
274
+ plan: grouped.plan,
275
+ })
276
+ .from(grouped)
277
+ .where(
278
+ sql`${grouped.avgLatencyNs} > ${thresholdNs} OR ${grouped.avgMemBytes} > ${memoryThresholdBytes}`,
279
+ )
280
+ .orderBy(desc(grouped.avgLatencyNs))
281
+ .limit(formatLimitOffset(TOP_N));
282
+
283
+ // Map each row into a formatted object with ms and rank, for easier consumption/logging
284
+ const formatted = rows.map((r, i) => ({
285
+ rank: i + 1, // 1-based rank in the top N
286
+ digest: r.digest,
287
+ stmtType: r.stmtType,
288
+ schemaName: r.schemaName,
289
+ execCount: r.execCount,
290
+ avgLatencyMs: nsToMs(r.avgLatencyNs), // Convert ns to ms for readability
291
+ maxLatencyMs: nsToMs(r.maxLatencyNs),
292
+ minLatencyMs: nsToMs(r.minLatencyNs),
293
+ avgProcessTimeMs: nsToMs(r.avgProcessTimeNs),
294
+ avgWaitTimeMs: nsToMs(r.avgWaitTimeNs),
295
+ avgBackoffTimeMs: nsToMs(r.avgBackoffTimeNs),
296
+ avgMemMB: bytesToMB(r.avgMemBytes),
297
+ maxMemMB: bytesToMB(r.maxMemBytes),
298
+ avgMemBytes: r.avgMemBytes,
299
+ maxMemBytes: r.maxMemBytes,
300
+ avgTotalKeys: r.avgTotalKeys,
301
+ firstSeen: r.firstSeen,
302
+ lastSeen: r.lastSeen,
303
+ planInCache: r.planInCache,
304
+ planCacheHits: r.planCacheHits,
305
+ digestText: sanitizeSQL(r.digestText),
306
+ plan: newOptions.showPlan? r.plan: undefined,
307
+ }));
308
+
309
+ // Log each entry (SQL already filtered by threshold)
310
+ for (const f of formatted) {
311
+ // eslint-disable-next-line no-console
312
+ console.warn(
313
+ `${f.rank}. ${f.stmtType} avg=${f.avgLatencyMs?.toFixed?.(2)}ms max=${f.maxLatencyMs?.toFixed?.(2)}ms mem≈${f.avgMemMB?.toFixed?.(2)}MB(max ${f.maxMemMB?.toFixed?.(2)}MB) exec=${f.execCount} \n` +
314
+ ` digest=${f.digest}\n` +
315
+ ` sql=${(f.digestText || "").slice(0, 300)}${f.digestText && f.digestText.length > 300 ? "…" : ""}`,
316
+ );
317
+ if (newOptions.showPlan && f.plan ) {
318
+ // print full plan separately (not truncated)
319
+ // eslint-disable-next-line no-console
320
+ console.warn(` full plan:\n${f.plan}`);
321
+ }
322
+ }
323
+
324
+ // Return HTTP response with JSON payload of the results
325
+ return {
326
+ headers: { "Content-Type": ["application/json"] },
327
+ statusCode: 200,
328
+ statusText: "OK",
329
+ body: jsonSafeStringify({
330
+ success: true,
331
+ window: "last_1h",
332
+ top: TOP_N,
333
+ warnThresholdMs: newOptions.warnThresholdMs,
334
+ memoryThresholdBytes: newOptions.memoryThresholdBytes,
335
+ showPlan: newOptions.showPlan,
336
+ rows: formatted,
337
+ generatedAt: new Date().toISOString(),
338
+ }),
339
+ };
340
+ } catch (error: any) {
341
+ // Catch any error (DB, logic, etc) and log with details for debugging
342
+ // This ensures the scheduler never crashes and always returns a response.
343
+ // eslint-disable-next-line no-console
344
+ console.error(
345
+ "Error in topSlowestStatementLastHourTrigger:",
346
+ error?.cause?.context?.debug?.sqlMessage ?? error?.cause ?? error,
347
+ );
348
+ return {
349
+ headers: { "Content-Type": ["application/json"] },
350
+ statusCode: 500,
351
+ statusText: "Internal Server Error",
352
+ body: jsonSafeStringify({
353
+ success: false,
354
+ message: "Failed to fetch or log slow queries",
355
+ error: error?.cause?.context?.debug?.sqlMessage ?? error?.cause?.message,
356
+ timestamp: new Date().toISOString(),
357
+ }),
358
+ };
359
+ }
360
+ };