forge-sql-orm 2.1.5 → 2.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +135 -53
  2. package/dist/ForgeSQLORM.js +572 -231
  3. package/dist/ForgeSQLORM.js.map +1 -1
  4. package/dist/ForgeSQLORM.mjs +572 -231
  5. package/dist/ForgeSQLORM.mjs.map +1 -1
  6. package/dist/core/ForgeSQLORM.d.ts +91 -3
  7. package/dist/core/ForgeSQLORM.d.ts.map +1 -1
  8. package/dist/core/ForgeSQLQueryBuilder.d.ts +89 -2
  9. package/dist/core/ForgeSQLQueryBuilder.d.ts.map +1 -1
  10. package/dist/core/SystemTables.d.ts +3654 -0
  11. package/dist/core/SystemTables.d.ts.map +1 -1
  12. package/dist/lib/drizzle/extensions/additionalActions.d.ts +2 -2
  13. package/dist/lib/drizzle/extensions/additionalActions.d.ts.map +1 -1
  14. package/dist/utils/forgeDriver.d.ts +61 -14
  15. package/dist/utils/forgeDriver.d.ts.map +1 -1
  16. package/dist/utils/metadataContextUtils.d.ts +1 -1
  17. package/dist/utils/metadataContextUtils.d.ts.map +1 -1
  18. package/dist/utils/requestTypeContextUtils.d.ts +8 -0
  19. package/dist/utils/requestTypeContextUtils.d.ts.map +1 -0
  20. package/dist/webtriggers/topSlowestStatementLastHourTrigger.d.ts +90 -65
  21. package/dist/webtriggers/topSlowestStatementLastHourTrigger.d.ts.map +1 -1
  22. package/package.json +9 -9
  23. package/src/core/ForgeSQLCrudOperations.ts +3 -3
  24. package/src/core/ForgeSQLORM.ts +334 -124
  25. package/src/core/ForgeSQLQueryBuilder.ts +116 -20
  26. package/src/core/ForgeSQLSelectOperations.ts +2 -2
  27. package/src/core/SystemTables.ts +16 -0
  28. package/src/lib/drizzle/extensions/additionalActions.ts +24 -22
  29. package/src/utils/cacheContextUtils.ts +2 -2
  30. package/src/utils/cacheUtils.ts +12 -12
  31. package/src/utils/forgeDriver.ts +219 -40
  32. package/src/utils/forgeDriverProxy.ts +2 -2
  33. package/src/utils/metadataContextUtils.ts +11 -13
  34. package/src/utils/requestTypeContextUtils.ts +11 -0
  35. package/src/utils/sqlUtils.ts +1 -1
  36. package/src/webtriggers/applyMigrationsWebTrigger.ts +9 -9
  37. package/src/webtriggers/clearCacheSchedulerTrigger.ts +1 -1
  38. package/src/webtriggers/dropMigrationWebTrigger.ts +2 -2
  39. package/src/webtriggers/dropTablesMigrationWebTrigger.ts +2 -2
  40. package/src/webtriggers/fetchSchemaWebTrigger.ts +1 -1
  41. package/src/webtriggers/topSlowestStatementLastHourTrigger.ts +511 -308
@@ -1,360 +1,563 @@
1
1
  import { ForgeSqlOperation } from "../core/ForgeSQLQueryBuilder";
2
- import { clusterStatementsSummary, clusterStatementsSummaryHistory } from "../core/SystemTables";
2
+ import {
3
+ clusterStatementsSummary,
4
+ clusterStatementsSummaryHistory,
5
+ statementsSummary,
6
+ } from "../core/SystemTables";
3
7
  import { desc, gte, sql } from "drizzle-orm";
4
8
  import { unionAll } from "drizzle-orm/mysql-core";
5
9
  import { formatLimitOffset } from "../utils/sqlUtils";
10
+ import { OperationType } from "../utils/requestTypeContextUtils";
11
+
12
+ // Constants
13
+ const DEFAULT_MEMORY_THRESHOLD = 8 * 1024 * 1024; // 8MB
14
+ const DEFAULT_TIMEOUT = 300; // 300ms
15
+ const DEFAULT_TOP_N = 1;
16
+ const DEFAULT_HOURS = 1;
17
+ const DEFAULT_TABLES = "CLUSTER_SUMMARY_AND_HISTORY" as const;
18
+ const MAX_QUERY_TIMEOUT_MS = 3_000;
19
+ const MAX_SQL_LENGTH = 1000;
20
+ const RETRY_ATTEMPTS = 2;
21
+ const RETRY_BASE_DELAY_MS = 1_000;
22
+
23
+ // Types
24
+ interface TriggerOptions {
25
+ warnThresholdMs?: number;
26
+ memoryThresholdBytes?: number;
27
+ showPlan?: boolean;
28
+ operationType?: OperationType;
29
+ topN?: number;
30
+ hours?: number;
31
+ tables?: "SUMMARY_AND_HISTORY" | "CLUSTER_SUMMARY_AND_HISTORY";
32
+ }
33
+
34
+ interface FormattedQueryResult {
35
+ rank: number;
36
+ digest: string;
37
+ stmtType: string;
38
+ schemaName: string;
39
+ execCount: number;
40
+ avgLatencyMs: number;
41
+ maxLatencyMs: number;
42
+ minLatencyMs: number;
43
+ avgProcessTimeMs: number;
44
+ avgWaitTimeMs: number;
45
+ avgBackoffTimeMs: number;
46
+ avgMemMB: number;
47
+ maxMemMB: number;
48
+ avgMemBytes: number;
49
+ maxMemBytes: number;
50
+ avgTotalKeys: number;
51
+ firstSeen: string;
52
+ lastSeen: string;
53
+ planInCache: boolean;
54
+ planCacheHits: number;
55
+ digestText: string;
56
+ plan?: string;
57
+ }
58
+
59
+ interface TriggerResponse {
60
+ headers: { "Content-Type": string[] };
61
+ statusCode: number;
62
+ statusText?: string;
63
+ body: string;
64
+ }
65
+
66
+ // Utility Functions
67
+ /**
68
+ * Converts nanoseconds to milliseconds for better readability
69
+ */
70
+ const nsToMs = (value: unknown): number => {
71
+ const n = Number(value);
72
+ return Number.isFinite(n) ? n / 1e6 : NaN;
73
+ };
74
+
75
+ /**
76
+ * Converts bytes to megabytes for better readability
77
+ */
78
+ const bytesToMB = (value: unknown): number => {
79
+ const n = Number(value);
80
+ return Number.isFinite(n) ? n / (1024 * 1024) : NaN;
81
+ };
82
+
83
+ /**
84
+ * JSON stringify replacer to handle BigInt values safely
85
+ */
86
+ const jsonSafeStringify = (value: unknown): string =>
87
+ JSON.stringify(value, (_k, v) => (typeof v === "bigint" ? v.toString() : v));
88
+
89
+ /**
90
+ * Sanitizes SQL for safe logging by removing comments, replacing literals, and truncating
91
+ */
92
+ const sanitizeSQL = (sql: string, maxLen = MAX_SQL_LENGTH): string => {
93
+ let s = sql;
94
+
95
+ // Remove comments (-- ... and /* ... */)
96
+ s = s.replace(/--[^\n\r]*/g, "").replace(/\/\*[\s\S]*?\*\//g, "");
97
+
98
+ // Replace string literals with '?'
99
+ s = s.replace(/'(?:\\'|[^'])*'/g, "?");
100
+
101
+ // Replace numbers with '?'
102
+ s = s.replace(/\b-?\d+(?:\.\d+)?\b/g, "?");
103
+
104
+ // Normalize whitespace
105
+ s = s.replace(/\s+/g, " ").trim();
106
+
107
+ // Truncate long queries
108
+ if (s.length > maxLen) {
109
+ s = s.slice(0, maxLen) + " …[truncated]";
110
+ }
111
+
112
+ return s;
113
+ };
114
+
115
+ /**
116
+ * Promise timeout helper that rejects if the promise doesn't settle within the specified time
117
+ */
118
+ const withTimeout = async <T>(promise: Promise<T>, ms: number): Promise<T> => {
119
+ let timer: ReturnType<typeof setTimeout> | undefined;
120
+ try {
121
+ return await Promise.race<T>([
122
+ promise,
123
+ new Promise<T>((_resolve, reject) => {
124
+ timer = setTimeout(() => reject(new Error(`TIMEOUT:${ms}`)), ms);
125
+ }),
126
+ ]);
127
+ } finally {
128
+ if (timer) clearTimeout(timer);
129
+ }
130
+ };
131
+
132
+ /**
133
+ * Sleep utility function
134
+ */
135
+ const sleep = (ms: number): Promise<void> => new Promise((resolve) => setTimeout(resolve, ms));
136
+
137
+ /**
138
+ * Executes a task with retries and exponential backoff
139
+ */
140
+ const executeWithRetries = async <T>(task: () => Promise<T>, label: string): Promise<T> => {
141
+ let attempt = 0;
142
+ let delay = RETRY_BASE_DELAY_MS;
143
+
144
+ while (true) {
145
+ try {
146
+ attempt++;
147
+ return await task();
148
+ } catch (error: any) {
149
+ const msg = String(error?.message ?? error);
150
+ const isTimeout = msg.startsWith("TIMEOUT:");
151
+
152
+ if (attempt > RETRY_ATTEMPTS) throw error;
153
+ // eslint-disable-next-line no-console
154
+ console.warn(
155
+ `${label}: attempt ${attempt} failed${isTimeout ? " (timeout)" : ""}; retrying in ${delay}ms...`,
156
+ error,
157
+ );
158
+
159
+ await sleep(delay);
160
+ delay *= 2; // Exponential backoff
161
+ }
162
+ }
163
+ };
164
+
165
+ /**
166
+ * Creates error response for failed operations
167
+ */
168
+ const createErrorResponse = (message: string, error?: any): TriggerResponse => ({
169
+ headers: { "Content-Type": ["application/json"] },
170
+ statusCode: 500,
171
+ statusText: "Internal Server Error",
172
+ body: jsonSafeStringify({
173
+ success: false,
174
+ message,
175
+ error: error?.cause?.context?.debug?.sqlMessage ?? error?.cause?.message ?? error?.message,
176
+ timestamp: new Date().toISOString(),
177
+ }),
178
+ });
179
+
180
+ /**
181
+ * Creates success response with query results
182
+ */
183
+ const createSuccessResponse = (
184
+ formatted: FormattedQueryResult[],
185
+ options: Required<TriggerOptions>,
186
+ ): TriggerResponse => ({
187
+ headers: { "Content-Type": ["application/json"] },
188
+ statusCode: 200,
189
+ statusText: "OK",
190
+ body: jsonSafeStringify({
191
+ success: true,
192
+ window: `last_${options.hours}h`,
193
+ top: options.topN,
194
+ warnThresholdMs: options.warnThresholdMs,
195
+ memoryThresholdBytes: options.memoryThresholdBytes,
196
+ showPlan: options.showPlan,
197
+ rows: formatted,
198
+ generatedAt: new Date().toISOString(),
199
+ }),
200
+ });
201
+
202
+ // Query Building Functions
203
+ /**
204
+ * Creates the selection shape for query results
205
+ */
206
+ const createSelectShape = (table: any) => ({
207
+ digest: table.digest,
208
+ stmtType: table.stmtType,
209
+ schemaName: table.schemaName,
210
+ execCount: table.execCount,
211
+ avgLatencyNs: table.avgLatency,
212
+ maxLatencyNs: table.maxLatency,
213
+ minLatencyNs: table.minLatency,
214
+ avgProcessTimeNs: table.avgProcessTime,
215
+ avgWaitTimeNs: table.avgWaitTime,
216
+ avgBackoffTimeNs: table.avgBackoffTime,
217
+ avgTotalKeys: table.avgTotalKeys,
218
+ firstSeen: table.firstSeen,
219
+ lastSeen: table.lastSeen,
220
+ planInCache: table.planInCache,
221
+ planCacheHits: table.planCacheHits,
222
+ digestText: table.digestText,
223
+ plan: table.plan,
224
+ avgMemBytes: (table as any).avgMem,
225
+ maxMemBytes: (table as any).maxMem,
226
+ });
227
+
228
+ /**
229
+ * Builds the combined query from multiple tables
230
+ */
231
+ const buildCombinedQuery = (orm: ForgeSqlOperation, options: Required<TriggerOptions>) => {
232
+ const summaryHistory = statementsSummary;
233
+ const summary = statementsSummary;
234
+ const summaryHistoryCluster = clusterStatementsSummaryHistory;
235
+ const summaryCluster = clusterStatementsSummary;
236
+
237
+ // Time filters for last N hours
238
+ const lastHoursFilter = (table: any) =>
239
+ gte(table.summaryEndTime, sql`DATE_SUB(NOW(), INTERVAL ${options.hours} HOUR)`);
240
+
241
+ // Build queries for each table
242
+ const qHistory = orm
243
+ .getDrizzleQueryBuilder()
244
+ .select(createSelectShape(summaryHistory))
245
+ .from(summaryHistory)
246
+ .where(lastHoursFilter(summaryHistory));
247
+
248
+ const qSummary = orm
249
+ .getDrizzleQueryBuilder()
250
+ .select(createSelectShape(summary))
251
+ .from(summary)
252
+ .where(lastHoursFilter(summary));
253
+
254
+ const qHistoryCluster = orm
255
+ .getDrizzleQueryBuilder()
256
+ .select(createSelectShape(summaryHistoryCluster))
257
+ .from(summaryHistoryCluster)
258
+ .where(lastHoursFilter(summaryHistoryCluster));
259
+
260
+ const qSummaryCluster = orm
261
+ .getDrizzleQueryBuilder()
262
+ .select(createSelectShape(summaryCluster))
263
+ .from(summaryCluster)
264
+ .where(lastHoursFilter(summaryCluster));
265
+
266
+ // Combine tables based on configuration
267
+ switch (options.tables) {
268
+ case "SUMMARY_AND_HISTORY":
269
+ return unionAll(qHistory, qSummary).as("combined");
270
+ case "CLUSTER_SUMMARY_AND_HISTORY":
271
+ return unionAll(qHistoryCluster, qSummaryCluster).as("combined");
272
+ default:
273
+ throw new Error(`Unsupported table configuration: ${options.tables}`);
274
+ }
275
+ };
276
+
277
+ /**
278
+ * Builds the final grouped query with filtering and sorting
279
+ */
280
+ const buildGroupedQuery = (orm: ForgeSqlOperation, combined: any) => {
281
+ return orm
282
+ .getDrizzleQueryBuilder()
283
+ .select({
284
+ digest: combined.digest,
285
+ stmtType: combined.stmtType,
286
+ schemaName: combined.schemaName,
287
+ execCount: sql<number>`SUM(${combined.execCount})`.as("execCount"),
288
+ avgLatencyNs: sql<number>`MAX(${combined.avgLatencyNs})`.as("avgLatencyNs"),
289
+ maxLatencyNs: sql<number>`MAX(${combined.maxLatencyNs})`.as("maxLatencyNs"),
290
+ minLatencyNs: sql<number>`MIN(${combined.minLatencyNs})`.as("minLatencyNs"),
291
+ avgProcessTimeNs: sql<number>`MAX(${combined.avgProcessTimeNs})`.as("avgProcessTimeNs"),
292
+ avgWaitTimeNs: sql<number>`MAX(${combined.avgWaitTimeNs})`.as("avgWaitTimeNs"),
293
+ avgBackoffTimeNs: sql<number>`MAX(${combined.avgBackoffTimeNs})`.as("avgBackoffTimeNs"),
294
+ avgMemBytes: sql<number>`MAX(${combined.avgMemBytes})`.as("avgMemBytes"),
295
+ maxMemBytes: sql<number>`MAX(${combined.maxMemBytes})`.as("maxMemBytes"),
296
+ avgTotalKeys: sql<number>`MAX(${combined.avgTotalKeys})`.as("avgTotalKeys"),
297
+ firstSeen: sql<string>`MIN(${combined.firstSeen})`.as("firstSeen"),
298
+ lastSeen: sql<string>`MAX(${combined.lastSeen})`.as("lastSeen"),
299
+ planInCache: sql<boolean>`MAX(${combined.planInCache})`.as("planInCache"),
300
+ planCacheHits: sql<number>`SUM(${combined.planCacheHits})`.as("planCacheHits"),
301
+ digestText: sql<string>`MAX(${combined.digestText})`.as("digestText"),
302
+ plan: sql<string>`MAX(${combined.plan})`.as("plan"),
303
+ })
304
+ .from(combined)
305
+ .where(
306
+ sql`COALESCE(${combined.digest}, '') <> '' AND COALESCE(${combined.digestText}, '') <> '' AND COALESCE(${combined.stmtType}, '') NOT IN ('Use','Set','Show')`,
307
+ )
308
+ .groupBy(combined.digest, combined.stmtType, combined.schemaName)
309
+ .as("grouped");
310
+ };
311
+
312
+ /**
313
+ * Builds the final query with filtering, sorting, and limiting
314
+ */
315
+ const buildFinalQuery = (
316
+ orm: ForgeSqlOperation,
317
+ grouped: any,
318
+ options: Required<TriggerOptions>,
319
+ ) => {
320
+ const thresholdNs = Math.floor(options.warnThresholdMs * 1e6);
321
+ const memoryThresholdBytes = options.memoryThresholdBytes;
322
+
323
+ const query = orm
324
+ .getDrizzleQueryBuilder()
325
+ .select({
326
+ digest: grouped.digest,
327
+ stmtType: grouped.stmtType,
328
+ schemaName: grouped.schemaName,
329
+ execCount: grouped.execCount,
330
+ avgLatencyNs: grouped.avgLatencyNs,
331
+ maxLatencyNs: grouped.maxLatencyNs,
332
+ minLatencyNs: grouped.minLatencyNs,
333
+ avgProcessTimeNs: grouped.avgProcessTimeNs,
334
+ avgWaitTimeNs: grouped.avgWaitTimeNs,
335
+ avgBackoffTimeNs: grouped.avgBackoffTimeNs,
336
+ avgMemBytes: grouped.avgMemBytes,
337
+ maxMemBytes: grouped.maxMemBytes,
338
+ avgTotalKeys: grouped.avgTotalKeys,
339
+ firstSeen: grouped.firstSeen,
340
+ lastSeen: grouped.lastSeen,
341
+ planInCache: grouped.planInCache,
342
+ planCacheHits: grouped.planCacheHits,
343
+ digestText: grouped.digestText,
344
+ plan: grouped.plan,
345
+ })
346
+ .from(grouped)
347
+ .where(
348
+ sql`${grouped.avgLatencyNs} > ${thresholdNs} OR ${grouped.avgMemBytes} > ${memoryThresholdBytes}`,
349
+ )
350
+ .orderBy(desc(grouped.avgLatencyNs))
351
+ .limit(formatLimitOffset(options.topN));
352
+
353
+ // Execute with DDL context if specified
354
+ if (options.operationType === "DDL") {
355
+ return orm.executeDDLActions(async () => await query);
356
+ }
357
+
358
+ return query;
359
+ };
6
360
 
7
- const DEFAULT_MEMORY_THRESHOLD = 8 * 1024 * 1024;
8
- const DEFAULT_TIMEOUT = 300;
9
361
  /**
10
- * Scheduler trigger: log and return the single slowest statement from the last hour, filtered by latency OR memory usage.
362
+ * Formats query results for output
363
+ */
364
+ const formatQueryResults = (
365
+ rows: any[],
366
+ options: Required<TriggerOptions>,
367
+ ): FormattedQueryResult[] => {
368
+ return rows.map((row, index) => ({
369
+ rank: index + 1,
370
+ digest: row.digest,
371
+ stmtType: row.stmtType,
372
+ schemaName: row.schemaName,
373
+ execCount: row.execCount,
374
+ avgLatencyMs: nsToMs(row.avgLatencyNs),
375
+ maxLatencyMs: nsToMs(row.maxLatencyNs),
376
+ minLatencyMs: nsToMs(row.minLatencyNs),
377
+ avgProcessTimeMs: nsToMs(row.avgProcessTimeNs),
378
+ avgWaitTimeMs: nsToMs(row.avgWaitTimeNs),
379
+ avgBackoffTimeMs: nsToMs(row.avgBackoffTimeNs),
380
+ avgMemMB: bytesToMB(row.avgMemBytes),
381
+ maxMemMB: bytesToMB(row.maxMemBytes),
382
+ avgMemBytes: row.avgMemBytes,
383
+ maxMemBytes: row.maxMemBytes,
384
+ avgTotalKeys: row.avgTotalKeys,
385
+ firstSeen: row.firstSeen,
386
+ lastSeen: row.lastSeen,
387
+ planInCache: row.planInCache,
388
+ planCacheHits: row.planCacheHits,
389
+ digestText: options.operationType === "DDL" ? row.digestText : sanitizeSQL(row.digestText),
390
+ plan: options.showPlan ? row.plan : undefined,
391
+ }));
392
+ };
393
+
394
+ /**
395
+ * Logs formatted query results to console
396
+ */
397
+ const logQueryResults = (
398
+ formatted: FormattedQueryResult[],
399
+ options: Required<TriggerOptions>,
400
+ ): void => {
401
+ for (const result of formatted) {
402
+ // eslint-disable-next-line no-console
403
+ console.warn(
404
+ `${result.rank}. ${result.stmtType} avg=${result.avgLatencyMs?.toFixed?.(2)}ms max=${result.maxLatencyMs?.toFixed?.(2)}ms mem≈${result.avgMemMB?.toFixed?.(2)}MB(max ${result.maxMemMB?.toFixed?.(2)}MB) exec=${result.execCount} \n` +
405
+ ` digest=${result.digest}\n` +
406
+ ` sql=${(result.digestText || "").slice(0, 300)}${result.digestText && result.digestText.length > 300 ? "…" : ""}`,
407
+ );
408
+
409
+ if (options.showPlan && result.plan) {
410
+ // eslint-disable-next-line no-console
411
+ console.warn(` full plan:\n${result.plan}`);
412
+ }
413
+ }
414
+ };
415
+
416
+ /**
417
+ * Performance monitoring scheduler trigger for Atlassian Forge SQL.
11
418
  *
12
- * When scheduled (e.g. hourly), this trigger queries
13
- * INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY for the last hour
14
- * and prints the TOP 1 entry (by AVG_LATENCY) if it exceeds either threshold.
419
+ * This trigger analyzes query performance from the last hour and identifies slow or memory-intensive queries
420
+ * that exceed configurable thresholds. It's designed specifically for Atlassian Forge's 16 MiB memory limit
421
+ * and provides detailed insights for query optimization.
15
422
  *
16
- * **OR Logic**: Statements are included if they exceed EITHER threshold:
17
- * - avgLatencyMs > warnThresholdMs OR
18
- * - avgMemBytes > memoryThresholdBytes
423
+ * ## Key Features
424
+ * - **Memory-focused monitoring**: Primary focus on memory usage with configurable thresholds
425
+ * - **Atlassian 16 MiB limit awareness**: Designed specifically for Forge SQL's memory constraints
426
+ * - **Execution plan analysis**: Shows detailed query plans to help optimize memory consumption
427
+ * - **Configurable thresholds**: Set custom memory usage and latency thresholds
428
+ * - **Automatic filtering**: Excludes system queries (`Use`, `Set`, `Show`) and empty queries
429
+ * - **Scheduled monitoring**: Run automatically on configurable intervals
19
430
  *
20
- * **Pro Tips:**
21
- * - Memory-only monitoring: Set warnThresholdMs to 10000ms (effectively disabled)
22
- * - Latency-only monitoring: Set memoryThresholdBytes to 16MB (16 * 1024 * 1024) (effectively disabled)
23
- * - Combined monitoring: Use both thresholds for comprehensive monitoring
431
+ * ## OR Logic Thresholds
432
+ * Statements are included if they exceed **EITHER** threshold:
433
+ * - `avgLatencyMs > warnThresholdMs` **OR**
434
+ * - `avgMemBytes > memoryThresholdBytes`
24
435
  *
25
- * Excludes statements with empty `digestText`, empty `digest`, or service statements (`Use`, `Set`, `Show`).
436
+ * ## Configuration Tips
437
+ * - **Memory-only monitoring**: Set `warnThresholdMs` to 10000ms (effectively disabled)
438
+ * - **Latency-only monitoring**: Set `memoryThresholdBytes` to 16MB (16 * 1024 * 1024) (effectively disabled)
439
+ * - **Combined monitoring**: Use both thresholds for comprehensive monitoring
440
+ * - **Conservative monitoring**: 4MB warning (25% of 16MB limit)
441
+ * - **Default monitoring**: 8MB warning (50% of 16MB limit)
442
+ * - **Aggressive monitoring**: 12MB warning (75% of 16MB limit)
26
443
  *
27
- * Logging rule:
28
- * - Query exceeds warnThresholdMs OR memoryThresholdBytes → console.warn (logged)
29
- * - otherwise not logged
444
+ * ## Exclusions
445
+ * - Statements with empty `digestText` or `digest`
446
+ * - Service statements (`Use`, `Set`, `Show`)
447
+ * - Queries that don't exceed either threshold
30
448
  *
31
- * @param orm ForgeSQL ORM instance (required)
32
- * @param options Configuration options object
33
- * @param options.warnThresholdMs Milliseconds threshold for logging and filtering (default: 300ms)
34
- * @param options.memoryThresholdBytes Bytes threshold for average memory usage (default: 8MB)
35
- * @param options.showPlan Whether to include execution plan in logs (default: false)
36
- * @returns HTTP response with a JSON payload containing the filtered rows
449
+ * @param orm - ForgeSQL ORM instance (required)
450
+ * @param options - Configuration options
451
+ * @param options.warnThresholdMs - Milliseconds threshold for latency monitoring (default: 300ms)
452
+ * @param options.memoryThresholdBytes - Bytes threshold for memory usage monitoring (default: 8MB)
453
+ * @param options.showPlan - Whether to include execution plan in logs (default: false)
454
+ * @param options.operationType - Operation type context for query execution (default: "DML")
455
+ * @param options.topN - Number of top slow queries to return (default: 1)
456
+ * @param options.hours - Number of hours to look back (default: 1)
457
+ * @param options.tables - Table configuration to use (default: "CLUSTER_SUMMARY_AND_HISTORY")
458
+ * @returns Promise<TriggerResponse> - HTTP response with query results or error
37
459
  *
38
460
  * @example
39
- * ```ts
461
+ * ```typescript
40
462
  * import ForgeSQL, { topSlowestStatementLastHourTrigger } from "forge-sql-orm";
41
463
  *
42
- * const FORGE_SQL_ORM = new ForgeSQL();
464
+ * const forgeSQL = new ForgeSQL();
43
465
  *
44
466
  * // Default thresholds: 300ms latency OR 8MB memory
45
- * export const topSlowQueryTrigger = () =>
46
- * topSlowestStatementLastHourTrigger(FORGE_SQL_ORM);
467
+ * export const performanceTrigger = () =>
468
+ * topSlowestStatementLastHourTrigger(forgeSQL);
47
469
  *
48
- * // Only latency monitoring: 500ms threshold (memory effectively disabled)
49
- * export const latencyOnlyTrigger = () =>
50
- * topSlowestStatementLastHourTrigger(FORGE_SQL_ORM, { warnThresholdMs: 500, memoryThresholdBytes: 16 * 1024 * 1024 });
470
+ * // Conservative memory monitoring: 4MB threshold
471
+ * export const conservativeTrigger = () =>
472
+ * topSlowestStatementLastHourTrigger(forgeSQL, {
473
+ * memoryThresholdBytes: 4 * 1024 * 1024
474
+ * });
51
475
  *
52
- * // Only memory monitoring: 4MB threshold (latency effectively disabled)
476
+ * // Memory-only monitoring: 4MB threshold (latency effectively disabled)
53
477
  * export const memoryOnlyTrigger = () =>
54
- * topSlowestStatementLastHourTrigger(FORGE_SQL_ORM, { warnThresholdMs: 10000, memoryThresholdBytes: 4 * 1024 * 1024 });
55
- *
56
- * // Both thresholds: 500ms latency OR 8MB memory
57
- * export const bothThresholdsTrigger = () =>
58
- * topSlowestStatementLastHourTrigger(FORGE_SQL_ORM, { warnThresholdMs: 500, memoryThresholdBytes: 8 * 1024 * 1024 });
478
+ * topSlowestStatementLastHourTrigger(forgeSQL, {
479
+ * warnThresholdMs: 10000,
480
+ * memoryThresholdBytes: 4 * 1024 * 1024
481
+ * });
59
482
  *
60
483
  * // With execution plan in logs
61
484
  * export const withPlanTrigger = () =>
62
- * topSlowestStatementLastHourTrigger(FORGE_SQL_ORM, { showPlan: true });
63
- *
485
+ * topSlowestStatementLastHourTrigger(forgeSQL, { showPlan: true });
486
+ * ```
64
487
  *
65
488
  * @example
66
489
  * ```yaml
490
+ * # manifest.yml configuration
67
491
  * scheduledTrigger:
68
- * - key: top-slow-query-trigger
69
- * function: topSlowQueryTrigger
492
+ * - key: performance-trigger
493
+ * function: performanceTrigger
70
494
  * interval: hour
71
495
  *
72
496
  * function:
73
- * - key: topSlowQueryTrigger
74
- * handler: index.topSlowQueryTrigger
497
+ * - key: performanceTrigger
498
+ * handler: index.performanceTrigger
75
499
  * ```
76
500
  */
77
- // Main scheduler trigger function to log the single slowest SQL statement from the last hour.
501
+ /**
502
+ * Main scheduler trigger function to log the single slowest SQL statement from the last hour.
503
+ *
504
+ * @param orm - ForgeSQL ORM instance (required)
505
+ * @param options - Configuration options
506
+ * @returns Promise<TriggerResponse> - HTTP response with query results or error
507
+ */
78
508
  export const topSlowestStatementLastHourTrigger = async (
79
- orm: ForgeSqlOperation, options?: {
80
- warnThresholdMs?:number,
81
- memoryThresholdBytes?: number,
82
- showPlan?: boolean
83
- }
84
- ) => {
509
+ orm: ForgeSqlOperation,
510
+ options?: TriggerOptions,
511
+ ): Promise<TriggerResponse> => {
85
512
  // Validate required parameters
86
513
  if (!orm) {
87
- return {
88
- statusCode: 500,
89
- headers: { "Content-Type": ["application/json"] },
90
- body: JSON.stringify({
91
- success: false,
92
- message: "ORM instance is required",
93
- timestamp: new Date().toISOString(),
94
- }),
95
- };
514
+ return createErrorResponse("ORM instance is required");
96
515
  }
97
- let newOptions= options ?? {
98
- warnThresholdMs: DEFAULT_TIMEOUT,
99
- memoryThresholdBytes: DEFAULT_MEMORY_THRESHOLD,
100
- showPlan: false
101
- };
102
516
 
103
- // Helper: Convert nanoseconds to milliseconds (for latency fields)
104
- const nsToMs = (v: unknown) => {
105
- const n = Number(v);
106
- return Number.isFinite(n) ? n / 1e6 : NaN;
517
+ // Merge options with defaults
518
+ const mergedOptions: Required<TriggerOptions> = {
519
+ warnThresholdMs: options?.warnThresholdMs ?? DEFAULT_TIMEOUT,
520
+ memoryThresholdBytes: options?.memoryThresholdBytes ?? DEFAULT_MEMORY_THRESHOLD,
521
+ showPlan: options?.showPlan ?? false,
522
+ operationType: options?.operationType ?? "DML",
523
+ topN: options?.topN ?? DEFAULT_TOP_N,
524
+ hours: options?.hours ?? DEFAULT_HOURS,
525
+ tables: options?.tables ?? DEFAULT_TABLES,
107
526
  };
108
527
 
109
- // Helper: Convert bytes to megabytes (for memory fields)
110
- const bytesToMB = (v: unknown) => {
111
- const n = Number(v);
112
- return Number.isFinite(n) ? n / (1024 * 1024) : NaN;
113
- };
528
+ try {
529
+ // Build the combined query from multiple tables
530
+ const combined = buildCombinedQuery(orm, mergedOptions);
114
531
 
115
- // Helper: JSON.stringify replacer to handle BigInt values (so BigInt serializes as string)
116
- const jsonSafeStringify = (value: unknown) =>
117
- JSON.stringify(value, (_k, v) => (typeof v === "bigint" ? v.toString() : v));
118
-
119
- /**
120
- * Simple SQL sanitizer for safe logging.
121
- * - removes comments
122
- * - replaces string and numeric literals with '?'
123
- * - normalizes whitespace
124
- * - truncates long queries
125
- */
126
- function sanitizeSQL(sql: string, maxLen = 1000): string {
127
- let s = sql;
128
-
129
- // 1. Remove comments (-- ... and /* ... */)
130
- s = s.replace(/--[^\n\r]*/g, "")
131
- .replace(/\/\*[\s\S]*?\*\//g, "");
132
-
133
- // 2. Replace string literals with '?'
134
- s = s.replace(/'(?:\\'|[^'])*'/g, "?");
135
- // 3. Replace numbers with '?'
136
- s = s.replace(/\b-?\d+(?:\.\d+)?\b/g, "?");
137
- // 4. Normalize whitespace
138
- s = s.replace(/\s+/g, " ").trim();
139
- // 5. Truncate long queries
140
- if (s.length > maxLen) {
141
- s = s.slice(0, maxLen) + " …[truncated]";
142
- }
143
- return s;
144
- }
532
+ // Build the grouped query with filtering and aggregation
533
+ const grouped = buildGroupedQuery(orm, combined);
145
534
 
146
- // Number of top slow queries to fetch
147
- const TOP_N = 1;
535
+ // Build the final query with filtering, sorting, and limiting
536
+ const finalQuery = buildFinalQuery(orm, grouped, mergedOptions);
148
537
 
149
- try {
150
- // Get references to system summary tables
151
- const summaryHistory = clusterStatementsSummaryHistory;
152
- const summary = clusterStatementsSummary;
153
- // Helper to define the selected fields (selection shape) for both tables
154
- const selectShape = (t: typeof summaryHistory | typeof summary) => ({
155
- digest: t.digest,
156
- stmtType: t.stmtType,
157
- schemaName: t.schemaName,
158
- execCount: t.execCount,
159
-
160
- avgLatencyNs: t.avgLatency,
161
- maxLatencyNs: t.maxLatency,
162
- minLatencyNs: t.minLatency,
163
-
164
- avgProcessTimeNs: t.avgProcessTime,
165
- avgWaitTimeNs: t.avgWaitTime,
166
- avgBackoffTimeNs: t.avgBackoffTime,
167
-
168
- avgTotalKeys: t.avgTotalKeys,
169
- firstSeen: t.firstSeen,
170
- lastSeen: t.lastSeen,
171
- planInCache: t.planInCache,
172
- planCacheHits: t.planCacheHits,
173
- digestText: t.digestText,
174
- plan: t.plan,
175
- avgMemBytes: (t as any).avgMem,
176
- maxMemBytes: (t as any).maxMem,
177
- });
178
-
179
- // Filters: Only include rows from the last hour for each table
180
- const lastHourFilterHistory = gte(
181
- summaryHistory.summaryEndTime,
182
- sql`DATE_SUB(NOW(), INTERVAL 1 HOUR)`,
183
- );
184
- const lastHourFilterSummary = gte(
185
- summary.summaryEndTime,
186
- sql`DATE_SUB(NOW(), INTERVAL 1 HOUR)`,
538
+ // Execute the query with retries and timeout
539
+ const rows = await executeWithRetries(
540
+ () => withTimeout(finalQuery, MAX_QUERY_TIMEOUT_MS),
541
+ "topSlowestStatementLastHourTrigger",
187
542
  );
188
543
 
189
- // Query for summary history table (last hour)
190
- const qHistory = orm
191
- .getDrizzleQueryBuilder()
192
- .select(selectShape(summaryHistory))
193
- .from(summaryHistory)
194
- .where(lastHourFilterHistory);
195
-
196
- // Query for summary table (last hour)
197
- const qSummary = orm
198
- .getDrizzleQueryBuilder()
199
- .select(selectShape(summary))
200
- .from(summary)
201
- .where(lastHourFilterSummary);
202
-
203
- // Use UNION ALL to combine results from both tables (avoids duplicates, keeps all rows)
204
- // This is necessary because some statements may only be present in one of the tables.
205
- const combined = unionAll(qHistory, qSummary).as("combined");
206
-
207
- // Threshold in nanoseconds (warnThresholdMs → ns)
208
- const thresholdNs = Math.floor((newOptions.warnThresholdMs ?? DEFAULT_TIMEOUT) * 1e6);
209
- // memoryThresholdBytes is already provided in bytes (default 8MB)
210
- const memoryThresholdBytes = newOptions.memoryThresholdBytes ?? DEFAULT_MEMORY_THRESHOLD;
211
-
212
- // Group duplicates by digest+stmtType+schemaName and aggregate metrics
213
- const grouped = orm
214
- .getDrizzleQueryBuilder()
215
- .select({
216
- digest: combined.digest,
217
- stmtType: combined.stmtType,
218
- schemaName: combined.schemaName,
219
- execCount: sql<number>`SUM(${combined.execCount})`.as("execCount"),
220
-
221
- avgLatencyNs: sql<number>`MAX(${combined.avgLatencyNs})`.as("avgLatencyNs"),
222
- maxLatencyNs: sql<number>`MAX(${combined.maxLatencyNs})`.as("maxLatencyNs"),
223
- minLatencyNs: sql<number>`MIN(${combined.minLatencyNs})`.as("minLatencyNs"),
224
-
225
- avgProcessTimeNs: sql<number>`MAX(${combined.avgProcessTimeNs})`.as("avgProcessTimeNs"),
226
- avgWaitTimeNs: sql<number>`MAX(${combined.avgWaitTimeNs})`.as("avgWaitTimeNs"),
227
- avgBackoffTimeNs: sql<number>`MAX(${combined.avgBackoffTimeNs})`.as("avgBackoffTimeNs"),
228
-
229
- avgMemBytes: sql<number>`MAX(${combined.avgMemBytes})`.as("avgMemBytes"),
230
- maxMemBytes: sql<number>`MAX(${combined.maxMemBytes})`.as("maxMemBytes"),
231
-
232
- avgTotalKeys: sql<number>`MAX(${combined.avgTotalKeys})`.as("avgTotalKeys"),
233
- firstSeen: sql<string>`MIN(${combined.firstSeen})`.as("firstSeen"),
234
- lastSeen: sql<string>`MAX(${combined.lastSeen})`.as("lastSeen"),
235
- planInCache: sql<boolean>`MAX(${combined.planInCache})`.as("planInCache"),
236
- planCacheHits: sql<number>`SUM(${combined.planCacheHits})`.as("planCacheHits"),
237
- // Prefer a non-empty sample text/plan via MAX; acceptable for de-dup
238
- digestText: sql<string>`MAX(${combined.digestText})`.as("digestText"),
239
- plan: sql<string>`MAX(${combined.plan})`.as("plan"),
240
- })
241
- .from(combined)
242
- .where(
243
- sql`COALESCE(${combined.digest}, '') <> '' AND COALESCE(${combined.digestText}, '') <> '' AND COALESCE(${combined.stmtType}, '') NOT IN ('Use','Set','Show')`,
244
- )
245
- .groupBy(combined.digest, combined.stmtType, combined.schemaName)
246
- .as("grouped");
247
-
248
- // Final selection: filter by threshold, sort by avg latency desc, limit TOP_N
249
- const rows = await orm
250
- .getDrizzleQueryBuilder()
251
- .select({
252
- digest: grouped.digest,
253
- stmtType: grouped.stmtType,
254
- schemaName: grouped.schemaName,
255
- execCount: grouped.execCount,
256
-
257
- avgLatencyNs: grouped.avgLatencyNs,
258
- maxLatencyNs: grouped.maxLatencyNs,
259
- minLatencyNs: grouped.minLatencyNs,
260
-
261
- avgProcessTimeNs: grouped.avgProcessTimeNs,
262
- avgWaitTimeNs: grouped.avgWaitTimeNs,
263
- avgBackoffTimeNs: grouped.avgBackoffTimeNs,
264
-
265
- avgMemBytes: grouped.avgMemBytes,
266
- maxMemBytes: grouped.maxMemBytes,
267
-
268
- avgTotalKeys: grouped.avgTotalKeys,
269
- firstSeen: grouped.firstSeen,
270
- lastSeen: grouped.lastSeen,
271
- planInCache: grouped.planInCache,
272
- planCacheHits: grouped.planCacheHits,
273
- digestText: grouped.digestText,
274
- plan: grouped.plan,
275
- })
276
- .from(grouped)
277
- .where(
278
- sql`${grouped.avgLatencyNs} > ${thresholdNs} OR ${grouped.avgMemBytes} > ${memoryThresholdBytes}`,
279
- )
280
- .orderBy(desc(grouped.avgLatencyNs))
281
- .limit(formatLimitOffset(TOP_N));
282
-
283
- // Map each row into a formatted object with ms and rank, for easier consumption/logging
284
- const formatted = rows.map((r, i) => ({
285
- rank: i + 1, // 1-based rank in the top N
286
- digest: r.digest,
287
- stmtType: r.stmtType,
288
- schemaName: r.schemaName,
289
- execCount: r.execCount,
290
- avgLatencyMs: nsToMs(r.avgLatencyNs), // Convert ns to ms for readability
291
- maxLatencyMs: nsToMs(r.maxLatencyNs),
292
- minLatencyMs: nsToMs(r.minLatencyNs),
293
- avgProcessTimeMs: nsToMs(r.avgProcessTimeNs),
294
- avgWaitTimeMs: nsToMs(r.avgWaitTimeNs),
295
- avgBackoffTimeMs: nsToMs(r.avgBackoffTimeNs),
296
- avgMemMB: bytesToMB(r.avgMemBytes),
297
- maxMemMB: bytesToMB(r.maxMemBytes),
298
- avgMemBytes: r.avgMemBytes,
299
- maxMemBytes: r.maxMemBytes,
300
- avgTotalKeys: r.avgTotalKeys,
301
- firstSeen: r.firstSeen,
302
- lastSeen: r.lastSeen,
303
- planInCache: r.planInCache,
304
- planCacheHits: r.planCacheHits,
305
- digestText: sanitizeSQL(r.digestText),
306
- plan: newOptions.showPlan? r.plan: undefined,
307
- }));
308
-
309
- // Log each entry (SQL already filtered by threshold)
310
- for (const f of formatted) {
311
- // eslint-disable-next-line no-console
312
- console.warn(
313
- `${f.rank}. ${f.stmtType} avg=${f.avgLatencyMs?.toFixed?.(2)}ms max=${f.maxLatencyMs?.toFixed?.(2)}ms mem≈${f.avgMemMB?.toFixed?.(2)}MB(max ${f.maxMemMB?.toFixed?.(2)}MB) exec=${f.execCount} \n` +
314
- ` digest=${f.digest}\n` +
315
- ` sql=${(f.digestText || "").slice(0, 300)}${f.digestText && f.digestText.length > 300 ? "…" : ""}`,
316
- );
317
- if (newOptions.showPlan && f.plan ) {
318
- // print full plan separately (not truncated)
319
- // eslint-disable-next-line no-console
320
- console.warn(` full plan:\n${f.plan}`);
321
- }
322
- }
544
+ // Format the results for output
545
+ const formatted = formatQueryResults(rows, mergedOptions);
323
546
 
324
- // Return HTTP response with JSON payload of the results
325
- return {
326
- headers: { "Content-Type": ["application/json"] },
327
- statusCode: 200,
328
- statusText: "OK",
329
- body: jsonSafeStringify({
330
- success: true,
331
- window: "last_1h",
332
- top: TOP_N,
333
- warnThresholdMs: newOptions.warnThresholdMs,
334
- memoryThresholdBytes: newOptions.memoryThresholdBytes,
335
- showPlan: newOptions.showPlan,
336
- rows: formatted,
337
- generatedAt: new Date().toISOString(),
338
- }),
339
- };
547
+ // Log the results to console
548
+ logQueryResults(formatted, mergedOptions);
549
+
550
+ // Return success response
551
+ return createSuccessResponse(formatted, mergedOptions);
340
552
  } catch (error: any) {
341
- // Catch any error (DB, logic, etc) and log with details for debugging
342
- // This ensures the scheduler never crashes and always returns a response.
553
+ // Log error details for debugging
343
554
  // eslint-disable-next-line no-console
344
- console.error(
345
- "Error in topSlowestStatementLastHourTrigger:",
555
+ console.warn(
556
+ "Error in topSlowestStatementLastHourTrigger (one-off errors can be ignored; if it recurs, investigate):",
346
557
  error?.cause?.context?.debug?.sqlMessage ?? error?.cause ?? error,
347
558
  );
348
- return {
349
- headers: { "Content-Type": ["application/json"] },
350
- statusCode: 500,
351
- statusText: "Internal Server Error",
352
- body: jsonSafeStringify({
353
- success: false,
354
- message: "Failed to fetch or log slow queries",
355
- error: error?.cause?.context?.debug?.sqlMessage ?? error?.cause?.message,
356
- timestamp: new Date().toISOString(),
357
- }),
358
- };
559
+
560
+ // Return error response
561
+ return createErrorResponse("Failed to fetch or log slow queries", error);
359
562
  }
360
563
  };