forge-sql-orm 2.1.2 → 2.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +205 -0
  2. package/dist/ForgeSQLORM.js +677 -56
  3. package/dist/ForgeSQLORM.js.map +1 -1
  4. package/dist/ForgeSQLORM.mjs +680 -59
  5. package/dist/ForgeSQLORM.mjs.map +1 -1
  6. package/dist/core/ForgeSQLQueryBuilder.d.ts +1 -2
  7. package/dist/core/ForgeSQLQueryBuilder.d.ts.map +1 -1
  8. package/dist/core/SystemTables.d.ts +5039 -0
  9. package/dist/core/SystemTables.d.ts.map +1 -1
  10. package/dist/utils/cacheUtils.d.ts.map +1 -1
  11. package/dist/utils/forgeDriver.d.ts.map +1 -1
  12. package/dist/utils/forgeDriverProxy.d.ts.map +1 -1
  13. package/dist/utils/sqlUtils.d.ts +1 -1
  14. package/dist/utils/sqlUtils.d.ts.map +1 -1
  15. package/dist/webtriggers/applyMigrationsWebTrigger.d.ts +1 -1
  16. package/dist/webtriggers/applyMigrationsWebTrigger.d.ts.map +1 -1
  17. package/dist/webtriggers/dropMigrationWebTrigger.d.ts.map +1 -1
  18. package/dist/webtriggers/dropTablesMigrationWebTrigger.d.ts.map +1 -1
  19. package/dist/webtriggers/fetchSchemaWebTrigger.d.ts.map +1 -1
  20. package/dist/webtriggers/index.d.ts +1 -0
  21. package/dist/webtriggers/index.d.ts.map +1 -1
  22. package/dist/webtriggers/topSlowestStatementLastHourTrigger.d.ts +72 -0
  23. package/dist/webtriggers/topSlowestStatementLastHourTrigger.d.ts.map +1 -0
  24. package/package.json +8 -7
  25. package/src/core/ForgeSQLQueryBuilder.ts +13 -9
  26. package/src/core/SystemTables.ts +313 -1
  27. package/src/lib/drizzle/extensions/additionalActions.ts +2 -2
  28. package/src/utils/cacheContextUtils.ts +2 -2
  29. package/src/utils/cacheUtils.ts +3 -1
  30. package/src/utils/forgeDriver.ts +16 -21
  31. package/src/utils/forgeDriverProxy.ts +10 -3
  32. package/src/utils/sqlUtils.ts +32 -7
  33. package/src/webtriggers/applyMigrationsWebTrigger.ts +21 -15
  34. package/src/webtriggers/dropMigrationWebTrigger.ts +8 -4
  35. package/src/webtriggers/dropTablesMigrationWebTrigger.ts +8 -4
  36. package/src/webtriggers/fetchSchemaWebTrigger.ts +7 -3
  37. package/src/webtriggers/index.ts +1 -0
  38. package/src/webtriggers/topSlowestStatementLastHourTrigger.ts +305 -0
@@ -1,4 +1,15 @@
1
- import { bigint, mysqlTable, timestamp, varchar } from "drizzle-orm/mysql-core";
1
+ import {
2
+ bigint,
3
+ mysqlTable,
4
+ timestamp,
5
+ varchar,
6
+ double,
7
+ mysqlSchema,
8
+ longtext,
9
+ int,
10
+ text,
11
+ boolean,
12
+ } from "drizzle-orm/mysql-core";
2
13
  import { Table } from "drizzle-orm";
3
14
  import { sql } from "@forge/sql";
4
15
 
@@ -8,6 +19,307 @@ export const migrations = mysqlTable("__migrations", {
8
19
  migratedAt: timestamp("migratedAt").defaultNow().notNull(),
9
20
  });
10
21
 
22
+ const informationSchema = mysqlSchema("information_schema");
23
+
24
+ export const slowQuery = informationSchema.table("SLOW_QUERY", {
25
+ time: timestamp("Time", { fsp: 6, mode: "string" }).notNull(), // Timestamp when the slow query was recorded
26
+
27
+ txnStartTs: bigint("Txn_start_ts", { mode: "bigint", unsigned: true }), // Transaction start timestamp (TSO)
28
+ user: varchar("User", { length: 64 }), // User executing the query
29
+ host: varchar("Host", { length: 64 }), // Host from which the query originated
30
+ connId: bigint("Conn_ID", { mode: "bigint", unsigned: true }), // Connection ID
31
+ sessionAlias: varchar("Session_alias", { length: 64 }), // Session alias
32
+
33
+ execRetryCount: bigint("Exec_retry_count", { mode: "bigint", unsigned: true }), // Number of retries during execution
34
+ execRetryTime: double("Exec_retry_time"), // Time spent in retries
35
+ queryTime: double("Query_time"), // Total execution time
36
+ parseTime: double("Parse_time"), // Time spent parsing SQL
37
+ compileTime: double("Compile_time"), // Time spent compiling query plan
38
+ rewriteTime: double("Rewrite_time"), // Time spent rewriting query
39
+ preprocSubqueries: bigint("Preproc_subqueries", { mode: "bigint", unsigned: true }), // Number of subqueries preprocessed
40
+ preprocSubqueriesTime: double("Preproc_subqueries_time"), // Time spent preprocessing subqueries
41
+ optimizeTime: double("Optimize_time"), // Time spent in optimizer
42
+ waitTs: double("Wait_TS"), // Wait time for getting TSO
43
+ prewriteTime: double("Prewrite_time"), // Time spent in prewrite phase
44
+ waitPrewriteBinlogTime: double("Wait_prewrite_binlog_time"), // Time waiting for binlog prewrite
45
+ commitTime: double("Commit_time"), // Commit duration
46
+ getCommitTsTime: double("Get_commit_ts_time"), // Time waiting for commit TSO
47
+ commitBackoffTime: double("Commit_backoff_time"), // Backoff time during commit
48
+ backoffTypes: varchar("Backoff_types", { length: 64 }), // Types of backoff occurred
49
+ resolveLockTime: double("Resolve_lock_time"), // Time resolving locks
50
+ localLatchWaitTime: double("Local_latch_wait_time"), // Time waiting on local latch
51
+
52
+ writeKeys: bigint("Write_keys", { mode: "bigint" }), // Number of keys written
53
+ writeSize: bigint("Write_size", { mode: "bigint" }), // Amount of data written
54
+ prewriteRegion: bigint("Prewrite_region", { mode: "bigint" }), // Regions involved in prewrite
55
+ txnRetry: bigint("Txn_retry", { mode: "bigint" }), // Transaction retry count
56
+
57
+ copTime: double("Cop_time"), // Time spent in coprocessor
58
+ processTime: double("Process_time"), // Processing time
59
+ waitTime: double("Wait_time"), // Wait time in TiKV
60
+ backoffTime: double("Backoff_time"), // Backoff wait time
61
+ lockKeysTime: double("LockKeys_time"), // Time spent waiting for locks
62
+
63
+ requestCount: bigint("Request_count", { mode: "bigint", unsigned: true }), // Total number of requests
64
+ totalKeys: bigint("Total_keys", { mode: "bigint", unsigned: true }), // Total keys scanned
65
+ processKeys: bigint("Process_keys", { mode: "bigint", unsigned: true }), // Keys processed
66
+
67
+ rocksdbDeleteSkippedCount: bigint("Rocksdb_delete_skipped_count", {
68
+ mode: "bigint",
69
+ unsigned: true,
70
+ }), // RocksDB delete skips
71
+ rocksdbKeySkippedCount: bigint("Rocksdb_key_skipped_count", { mode: "bigint", unsigned: true }), // RocksDB key skips
72
+ rocksdbBlockCacheHitCount: bigint("Rocksdb_block_cache_hit_count", {
73
+ mode: "bigint",
74
+ unsigned: true,
75
+ }), // RocksDB block cache hits
76
+ rocksdbBlockReadCount: bigint("Rocksdb_block_read_count", { mode: "bigint", unsigned: true }), // RocksDB block reads
77
+ rocksdbBlockReadByte: bigint("Rocksdb_block_read_byte", { mode: "bigint", unsigned: true }), // RocksDB block read bytes
78
+
79
+ db: varchar("DB", { length: 64 }), // Database name
80
+ indexNames: varchar("Index_names", { length: 100 }), // Indexes used
81
+
82
+ isInternal: boolean("Is_internal"), // Whether the query is internal
83
+ digest: varchar("Digest", { length: 64 }), // SQL digest hash
84
+ stats: varchar("Stats", { length: 512 }), // Stats used during planning
85
+
86
+ copProcAvg: double("Cop_proc_avg"), // Coprocessor average processing time
87
+ copProcP90: double("Cop_proc_p90"), // Coprocessor 90th percentile processing time
88
+ copProcMax: double("Cop_proc_max"), // Coprocessor max processing time
89
+ copProcAddr: varchar("Cop_proc_addr", { length: 64 }), // Coprocessor address for processing
90
+
91
+ copWaitAvg: double("Cop_wait_avg"), // Coprocessor average wait time
92
+ copWaitP90: double("Cop_wait_p90"), // Coprocessor 90th percentile wait time
93
+ copWaitMax: double("Cop_wait_max"), // Coprocessor max wait time
94
+ copWaitAddr: varchar("Cop_wait_addr", { length: 64 }), // Coprocessor address for wait
95
+
96
+ memMax: bigint("Mem_max", { mode: "bigint" }), // Max memory usage
97
+ diskMax: bigint("Disk_max", { mode: "bigint" }), // Max disk usage
98
+
99
+ kvTotal: double("KV_total"), // Total KV request time
100
+ pdTotal: double("PD_total"), // Total PD request time
101
+ backoffTotal: double("Backoff_total"), // Total backoff time
102
+ writeSqlResponseTotal: double("Write_sql_response_total"), // SQL response write time
103
+
104
+ resultRows: bigint("Result_rows", { mode: "bigint" }), // Rows returned
105
+ warnings: longtext("Warnings"), // Warnings during execution
106
+ backoffDetail: varchar("Backoff_Detail", { length: 4096 }), // Detailed backoff info
107
+
108
+ prepared: boolean("Prepared"), // Whether query was prepared
109
+ succ: boolean("Succ"), // Success flag
110
+ isExplicitTxn: boolean("IsExplicitTxn"), // Whether explicit transaction
111
+ isWriteCacheTable: boolean("IsWriteCacheTable"), // Whether wrote to cache table
112
+ planFromCache: boolean("Plan_from_cache"), // Plan was from cache
113
+ planFromBinding: boolean("Plan_from_binding"), // Plan was from binding
114
+ hasMoreResults: boolean("Has_more_results"), // Query returned multiple results
115
+
116
+ resourceGroup: varchar("Resource_group", { length: 64 }), // Resource group name
117
+ requestUnitRead: double("Request_unit_read"), // RU consumed for read
118
+ requestUnitWrite: double("Request_unit_write"), // RU consumed for write
119
+ timeQueuedByRc: double("Time_queued_by_rc"), // Time queued by resource control
120
+
121
+ tidbCpuTime: double("Tidb_cpu_time"), // TiDB CPU time
122
+ tikvCpuTime: double("Tikv_cpu_time"), // TiKV CPU time
123
+
124
+ plan: longtext("Plan"), // Query execution plan
125
+ planDigest: varchar("Plan_digest", { length: 128 }), // Plan digest hash
126
+ binaryPlan: longtext("Binary_plan"), // Binary execution plan
127
+ prevStmt: longtext("Prev_stmt"), // Previous statement in session
128
+ query: longtext("Query"), // Original SQL query
129
+ });
130
+
131
+ export type SlowQuery = typeof slowQuery.$inferSelect;
132
+
133
+ // Common schema for cluster statements summary tables
134
+ const createClusterStatementsSummarySchema = () => ({
135
+ instance: varchar("INSTANCE", { length: 64 }), // TiDB/TiKV instance address
136
+
137
+ summaryBeginTime: timestamp("SUMMARY_BEGIN_TIME", { mode: "string" }).notNull(), // Begin time of this summary window
138
+ summaryEndTime: timestamp("SUMMARY_END_TIME", { mode: "string" }).notNull(), // End time of this summary window
139
+
140
+ stmtType: varchar("STMT_TYPE", { length: 64 }).notNull(), // Statement type (e.g., Select/Insert/Update)
141
+ schemaName: varchar("SCHEMA_NAME", { length: 64 }), // Current schema name
142
+ digest: varchar("DIGEST", { length: 64 }), // SQL digest (normalized hash)
143
+ digestText: text("DIGEST_TEXT").notNull(), // Normalized SQL text
144
+
145
+ tableNames: text("TABLE_NAMES"), // Involved table names
146
+ indexNames: text("INDEX_NAMES"), // Used index names
147
+
148
+ sampleUser: varchar("SAMPLE_USER", { length: 64 }), // Sampled user who executed the statements
149
+
150
+ execCount: bigint("EXEC_COUNT", { mode: "bigint", unsigned: true }).notNull(), // Total executions
151
+ sumErrors: int("SUM_ERRORS", { unsigned: true }).notNull(), // Sum of errors
152
+ sumWarnings: int("SUM_WARNINGS", { unsigned: true }).notNull(), // Sum of warnings
153
+
154
+ sumLatency: bigint("SUM_LATENCY", { mode: "bigint", unsigned: true }).notNull(), // Sum of latency (ns)
155
+ maxLatency: bigint("MAX_LATENCY", { mode: "bigint", unsigned: true }).notNull(), // Max latency (ns)
156
+ minLatency: bigint("MIN_LATENCY", { mode: "bigint", unsigned: true }).notNull(), // Min latency (ns)
157
+ avgLatency: bigint("AVG_LATENCY", { mode: "bigint", unsigned: true }).notNull(), // Avg latency (ns)
158
+
159
+ avgParseLatency: bigint("AVG_PARSE_LATENCY", { mode: "bigint", unsigned: true }).notNull(), // Avg parse time (ns)
160
+ maxParseLatency: bigint("MAX_PARSE_LATENCY", { mode: "bigint", unsigned: true }).notNull(), // Max parse time (ns)
161
+ avgCompileLatency: bigint("AVG_COMPILE_LATENCY", { mode: "bigint", unsigned: true }).notNull(), // Avg compile time (ns)
162
+ maxCompileLatency: bigint("MAX_COMPILE_LATENCY", { mode: "bigint", unsigned: true }).notNull(), // Max compile time (ns)
163
+
164
+ sumCopTaskNum: bigint("SUM_COP_TASK_NUM", { mode: "bigint", unsigned: true }).notNull(), // Total number of cop tasks
165
+ maxCopProcessTime: bigint("MAX_COP_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(), // Max TiKV coprocessor processing time (ns)
166
+ maxCopProcessAddress: varchar("MAX_COP_PROCESS_ADDRESS", { length: 256 }), // Address of cop task with max processing time
167
+ maxCopWaitTime: bigint("MAX_COP_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(), // Max TiKV coprocessor wait time (ns)
168
+ maxCopWaitAddress: varchar("MAX_COP_WAIT_ADDRESS", { length: 256 }), // Address of cop task with max wait time
169
+
170
+ avgProcessTime: bigint("AVG_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(), // Avg TiKV processing time (ns)
171
+ maxProcessTime: bigint("MAX_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(), // Max TiKV processing time (ns)
172
+ avgWaitTime: bigint("AVG_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(), // Avg TiKV wait time (ns)
173
+ maxWaitTime: bigint("MAX_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(), // Max TiKV wait time (ns)
174
+
175
+ avgBackoffTime: bigint("AVG_BACKOFF_TIME", { mode: "bigint", unsigned: true }).notNull(), // Avg backoff time before retry (ns)
176
+ maxBackoffTime: bigint("MAX_BACKOFF_TIME", { mode: "bigint", unsigned: true }).notNull(), // Max backoff time before retry (ns)
177
+
178
+ avgTotalKeys: bigint("AVG_TOTAL_KEYS", { mode: "bigint", unsigned: true }).notNull(), // Avg scanned keys
179
+ maxTotalKeys: bigint("MAX_TOTAL_KEYS", { mode: "bigint", unsigned: true }).notNull(), // Max scanned keys
180
+ avgProcessedKeys: bigint("AVG_PROCESSED_KEYS", { mode: "bigint", unsigned: true }).notNull(), // Avg processed keys
181
+ maxProcessedKeys: bigint("MAX_PROCESSED_KEYS", { mode: "bigint", unsigned: true }).notNull(), // Max processed keys
182
+
183
+ avgRocksdbDeleteSkippedCount: double("AVG_ROCKSDB_DELETE_SKIPPED_COUNT").notNull(), // Avg RocksDB deletes skipped
184
+ maxRocksdbDeleteSkippedCount: int("MAX_ROCKSDB_DELETE_SKIPPED_COUNT", {
185
+ unsigned: true,
186
+ }).notNull(), // Max RocksDB deletes skipped
187
+ avgRocksdbKeySkippedCount: double("AVG_ROCKSDB_KEY_SKIPPED_COUNT").notNull(), // Avg RocksDB keys skipped
188
+ maxRocksdbKeySkippedCount: int("MAX_ROCKSDB_KEY_SKIPPED_COUNT", { unsigned: true }).notNull(), // Max RocksDB keys skipped
189
+ avgRocksdbBlockCacheHitCount: double("AVG_ROCKSDB_BLOCK_CACHE_HIT_COUNT").notNull(), // Avg RocksDB block cache hits
190
+ maxRocksdbBlockCacheHitCount: int("MAX_ROCKSDB_BLOCK_CACHE_HIT_COUNT", {
191
+ unsigned: true,
192
+ }).notNull(), // Max RocksDB block cache hits
193
+ avgRocksdbBlockReadCount: double("AVG_ROCKSDB_BLOCK_READ_COUNT").notNull(), // Avg RocksDB block reads
194
+ maxRocksdbBlockReadCount: int("MAX_ROCKSDB_BLOCK_READ_COUNT", { unsigned: true }).notNull(), // Max RocksDB block reads
195
+ avgRocksdbBlockReadByte: double("AVG_ROCKSDB_BLOCK_READ_BYTE").notNull(), // Avg RocksDB block read bytes
196
+ maxRocksdbBlockReadByte: int("MAX_ROCKSDB_BLOCK_READ_BYTE", { unsigned: true }).notNull(), // Max RocksDB block read bytes
197
+
198
+ avgPrewriteTime: bigint("AVG_PREWRITE_TIME", { mode: "bigint", unsigned: true }).notNull(), // Avg prewrite phase time (ns)
199
+ maxPrewriteTime: bigint("MAX_PREWRITE_TIME", { mode: "bigint", unsigned: true }).notNull(), // Max prewrite phase time (ns)
200
+ avgCommitTime: bigint("AVG_COMMIT_TIME", { mode: "bigint", unsigned: true }).notNull(), // Avg commit phase time (ns)
201
+ maxCommitTime: bigint("MAX_COMMIT_TIME", { mode: "bigint", unsigned: true }).notNull(), // Max commit phase time (ns)
202
+ avgGetCommitTsTime: bigint("AVG_GET_COMMIT_TS_TIME", {
203
+ mode: "bigint",
204
+ unsigned: true,
205
+ }).notNull(), // Avg get commit_ts time (ns)
206
+ maxGetCommitTsTime: bigint("MAX_GET_COMMIT_TS_TIME", {
207
+ mode: "bigint",
208
+ unsigned: true,
209
+ }).notNull(), // Max get commit_ts time (ns)
210
+ avgCommitBackoffTime: bigint("AVG_COMMIT_BACKOFF_TIME", {
211
+ mode: "bigint",
212
+ unsigned: true,
213
+ }).notNull(), // Avg backoff during commit (ns)
214
+ maxCommitBackoffTime: bigint("MAX_COMMIT_BACKOFF_TIME", {
215
+ mode: "bigint",
216
+ unsigned: true,
217
+ }).notNull(), // Max backoff during commit (ns)
218
+ avgResolveLockTime: bigint("AVG_RESOLVE_LOCK_TIME", {
219
+ mode: "bigint",
220
+ unsigned: true,
221
+ }).notNull(), // Avg resolve lock time (ns)
222
+ maxResolveLockTime: bigint("MAX_RESOLVE_LOCK_TIME", {
223
+ mode: "bigint",
224
+ unsigned: true,
225
+ }).notNull(), // Max resolve lock time (ns)
226
+ avgLocalLatchWaitTime: bigint("AVG_LOCAL_LATCH_WAIT_TIME", {
227
+ mode: "bigint",
228
+ unsigned: true,
229
+ }).notNull(), // Avg local latch wait (ns)
230
+ maxLocalLatchWaitTime: bigint("MAX_LOCAL_LATCH_WAIT_TIME", {
231
+ mode: "bigint",
232
+ unsigned: true,
233
+ }).notNull(), // Max local latch wait (ns)
234
+
235
+ avgWriteKeys: double("AVG_WRITE_KEYS").notNull(), // Avg number of written keys
236
+ maxWriteKeys: bigint("MAX_WRITE_KEYS", { mode: "bigint", unsigned: true }).notNull(), // Max written keys
237
+ avgWriteSize: double("AVG_WRITE_SIZE").notNull(), // Avg written bytes
238
+ maxWriteSize: bigint("MAX_WRITE_SIZE", { mode: "bigint", unsigned: true }).notNull(), // Max written bytes
239
+ avgPrewriteRegions: double("AVG_PREWRITE_REGIONS").notNull(), // Avg regions in prewrite
240
+ maxPrewriteRegions: int("MAX_PREWRITE_REGIONS", { unsigned: true }).notNull(), // Max regions in prewrite
241
+ avgTxnRetry: double("AVG_TXN_RETRY").notNull(), // Avg transaction retry count
242
+ maxTxnRetry: int("MAX_TXN_RETRY", { unsigned: true }).notNull(), // Max transaction retry count
243
+
244
+ sumExecRetry: bigint("SUM_EXEC_RETRY", { mode: "bigint", unsigned: true }).notNull(), // Sum of execution retries (pessimistic)
245
+ sumExecRetryTime: bigint("SUM_EXEC_RETRY_TIME", { mode: "bigint", unsigned: true }).notNull(), // Sum time of execution retries (ns)
246
+ sumBackoffTimes: bigint("SUM_BACKOFF_TIMES", { mode: "bigint", unsigned: true }).notNull(), // Sum of backoff retries
247
+ backoffTypes: varchar("BACKOFF_TYPES", { length: 1024 }), // Backoff types with counts
248
+
249
+ avgMem: bigint("AVG_MEM", { mode: "bigint", unsigned: true }).notNull(), // Avg memory used (bytes)
250
+ maxMem: bigint("MAX_MEM", { mode: "bigint", unsigned: true }).notNull(), // Max memory used (bytes)
251
+ avgDisk: bigint("AVG_DISK", { mode: "bigint", unsigned: true }).notNull(), // Avg disk used (bytes)
252
+ maxDisk: bigint("MAX_DISK", { mode: "bigint", unsigned: true }).notNull(), // Max disk used (bytes)
253
+
254
+ avgKvTime: bigint("AVG_KV_TIME", { mode: "bigint", unsigned: true }).notNull(), // Avg time spent in TiKV (ns)
255
+ avgPdTime: bigint("AVG_PD_TIME", { mode: "bigint", unsigned: true }).notNull(), // Avg time spent in PD (ns)
256
+ avgBackoffTotalTime: bigint("AVG_BACKOFF_TOTAL_TIME", {
257
+ mode: "bigint",
258
+ unsigned: true,
259
+ }).notNull(), // Avg total backoff time (ns)
260
+ avgWriteSqlRespTime: bigint("AVG_WRITE_SQL_RESP_TIME", {
261
+ mode: "bigint",
262
+ unsigned: true,
263
+ }).notNull(), // Avg write SQL response time (ns)
264
+
265
+ avgTidbCpuTime: bigint("AVG_TIDB_CPU_TIME", { mode: "bigint", unsigned: true }).notNull(), // Avg TiDB CPU time (ns)
266
+ avgTikvCpuTime: bigint("AVG_TIKV_CPU_TIME", { mode: "bigint", unsigned: true }).notNull(), // Avg TiKV CPU time (ns)
267
+
268
+ maxResultRows: bigint("MAX_RESULT_ROWS", { mode: "bigint" }).notNull(), // Max number of result rows
269
+ minResultRows: bigint("MIN_RESULT_ROWS", { mode: "bigint" }).notNull(), // Min number of result rows
270
+ avgResultRows: bigint("AVG_RESULT_ROWS", { mode: "bigint" }).notNull(), // Avg number of result rows
271
+
272
+ prepared: boolean("PREPARED").notNull(), // Whether statements are prepared
273
+ avgAffectedRows: double("AVG_AFFECTED_ROWS").notNull(), // Avg affected rows
274
+
275
+ firstSeen: timestamp("FIRST_SEEN", { mode: "string" }).notNull(), // First time statements observed
276
+ lastSeen: timestamp("LAST_SEEN", { mode: "string" }).notNull(), // Last time statements observed
277
+
278
+ planInCache: boolean("PLAN_IN_CACHE").notNull(), // Whether last stmt hit plan cache
279
+ planCacheHits: bigint("PLAN_CACHE_HITS", { mode: "bigint" }).notNull(), // Number of plan cache hits
280
+ planInBinding: boolean("PLAN_IN_BINDING").notNull(), // Whether matched bindings
281
+
282
+ querySampleText: text("QUERY_SAMPLE_TEXT"), // Sampled original SQL
283
+ prevSampleText: text("PREV_SAMPLE_TEXT"), // Sampled previous SQL before commit
284
+
285
+ planDigest: varchar("PLAN_DIGEST", { length: 64 }), // Plan digest hash
286
+ plan: text("PLAN"), // Sampled textual plan
287
+ binaryPlan: text("BINARY_PLAN"), // Sampled binary plan
288
+
289
+ charset: varchar("CHARSET", { length: 64 }), // Sampled charset
290
+ collation: varchar("COLLATION", { length: 64 }), // Sampled collation
291
+ planHint: varchar("PLAN_HINT", { length: 64 }), // Sampled plan hint
292
+
293
+ maxRequestUnitRead: double("MAX_REQUEST_UNIT_READ").notNull(), // Max RU cost (read)
294
+ avgRequestUnitRead: double("AVG_REQUEST_UNIT_READ").notNull(), // Avg RU cost (read)
295
+ maxRequestUnitWrite: double("MAX_REQUEST_UNIT_WRITE").notNull(), // Max RU cost (write)
296
+ avgRequestUnitWrite: double("AVG_REQUEST_UNIT_WRITE").notNull(), // Avg RU cost (write)
297
+
298
+ maxQueuedRcTime: bigint("MAX_QUEUED_RC_TIME", { mode: "bigint", unsigned: true }).notNull(), // Max queued time waiting for RU (ns)
299
+ avgQueuedRcTime: bigint("AVG_QUEUED_RC_TIME", { mode: "bigint", unsigned: true }).notNull(), // Avg queued time waiting for RU (ns)
300
+
301
+ resourceGroup: varchar("RESOURCE_GROUP", { length: 64 }), // Bound resource group name
302
+
303
+ planCacheUnqualified: bigint("PLAN_CACHE_UNQUALIFIED", { mode: "bigint" }).notNull(), // Times not eligible for plan cache
304
+ planCacheUnqualifiedLastReason: text("PLAN_CACHE_UNQUALIFIED_LAST_REASON"), // Last reason of plan cache ineligibility
305
+ });
306
+
307
+ export const clusterStatementsSummaryHistory = informationSchema.table(
308
+ "CLUSTER_STATEMENTS_SUMMARY_HISTORY",
309
+ createClusterStatementsSummarySchema(),
310
+ );
311
+
312
+ // Types
313
+ export type ClusterStatementsSummaryHistory = typeof clusterStatementsSummaryHistory.$inferSelect;
314
+
315
+ export const clusterStatementsSummary = informationSchema.table(
316
+ "CLUSTER_STATEMENTS_SUMMARY",
317
+ createClusterStatementsSummarySchema(),
318
+ );
319
+
320
+ // Types
321
+ export type ClusterStatementsSummary = typeof clusterStatementsSummary.$inferSelect;
322
+
11
323
  export interface ExplainAnalyzeRow {
12
324
  id: string;
13
325
  estRows?: string;
@@ -267,8 +267,8 @@ async function handleSuccessfulExecution(
267
267
  if (shouldClearCacheOnError(error)) {
268
268
  await evictLocalCacheQuery(table, options);
269
269
  if (isCached) {
270
- await clearCache(table, options).catch(() => {
271
- console.warn("Ignore cache clear errors");
270
+ await clearCache(table, options).catch((e) => {
271
+ console.warn("Ignore cache clear errors", e);
272
272
  });
273
273
  } else {
274
274
  await saveTableIfInsideCacheContext(table);
@@ -104,7 +104,7 @@ export async function saveQueryLocalCacheQuery<
104
104
  };
105
105
  if (options.logRawSqlQuery) {
106
106
  const q = sql.toSQL();
107
- console.log(
107
+ console.debug(
108
108
  `[forge-sql-orm][local-cache][SAVE] Stored result in cache. sql="${q.sql}", params=${JSON.stringify(q.params)}`,
109
109
  );
110
110
  }
@@ -142,7 +142,7 @@ export async function getQueryLocalCacheQuery<
142
142
  if (context.cache[key] && context.cache[key].sql === sql.toSQL().sql.toLowerCase()) {
143
143
  if (options.logRawSqlQuery) {
144
144
  const q = sql.toSQL();
145
- console.log(
145
+ console.debug(
146
146
  `[forge-sql-orm][local-cache][HIT] Returned cached result. sql="${q.sql}", params=${JSON.stringify(q.params)}`,
147
147
  );
148
148
  }
@@ -287,7 +287,9 @@ export async function clearExpiredCache(options: ForgeSqlOrmOptions): Promise<vo
287
287
  );
288
288
  } finally {
289
289
  const duration = DateTime.now().toSeconds() - startTime.toSeconds();
290
- console.info(`Cleared ${totalRecords} expired cache records in ${duration} seconds`);
290
+ if (options?.logRawSqlQuery) {
291
+ console.debug(`Cleared ${totalRecords} expired cache records in ${duration} seconds`);
292
+ }
291
293
  }
292
294
  }
293
295
 
@@ -13,27 +13,22 @@ export const forgeDriver = async (
13
13
  insertId?: number;
14
14
  affectedRows?: number;
15
15
  }> => {
16
- try {
17
- if (method == "execute") {
18
- const sqlStatement = sql.prepare<UpdateQueryResponse>(query);
19
- if (params) {
20
- sqlStatement.bindParams(...params);
21
- }
22
- const updateQueryResponseResults = await sqlStatement.execute();
23
- let result = updateQueryResponseResults.rows as any;
24
- return { ...result, rows: [result] };
25
- } else {
26
- const sqlStatement = await sql.prepare<unknown>(query);
27
- if (params) {
28
- await sqlStatement.bindParams(...params);
29
- }
30
- const result = (await sqlStatement.execute()) as ForgeSQLResult;
31
- let rows;
32
- rows = (result.rows as any[]).map((r) => Object.values(r as Record<string, unknown>));
33
- return { rows: rows };
16
+ if (method == "execute") {
17
+ const sqlStatement = sql.prepare<UpdateQueryResponse>(query);
18
+ if (params) {
19
+ sqlStatement.bindParams(...params);
34
20
  }
35
- } catch (error) {
36
- console.error("SQL Error:", JSON.stringify(error));
37
- throw error;
21
+ const updateQueryResponseResults = await sqlStatement.execute();
22
+ let result = updateQueryResponseResults.rows as any;
23
+ return { ...result, rows: [result] };
24
+ } else {
25
+ const sqlStatement = await sql.prepare<unknown>(query);
26
+ if (params) {
27
+ await sqlStatement.bindParams(...params);
28
+ }
29
+ const result = (await sqlStatement.execute()) as ForgeSQLResult;
30
+ let rows;
31
+ rows = (result.rows as any[]).map((r) => Object.values(r as Record<string, unknown>));
32
+ return { rows: rows };
38
33
  }
39
34
  };
@@ -19,9 +19,16 @@ export function createForgeDriverProxy(options?: SqlHints, logRawSqlQuery?: bool
19
19
  const modifiedQuery = injectSqlHints(query, options);
20
20
 
21
21
  if (options && logRawSqlQuery && modifiedQuery !== query) {
22
- console.warn("modified query: " + modifiedQuery);
22
+ console.debug("injected Hints: " + modifiedQuery);
23
+ }
24
+ try {
25
+ // Call the original forgeDriver with the modified query
26
+ return await forgeDriver(modifiedQuery, params, method);
27
+ } catch (error) {
28
+ if (logRawSqlQuery) {
29
+ console.debug("SQL Error:", JSON.stringify(error));
30
+ }
31
+ throw error;
23
32
  }
24
- // Call the original forgeDriver with the modified query
25
- return forgeDriver(modifiedQuery, params, method);
26
33
  };
27
34
  }
@@ -57,13 +57,19 @@ export const parseDateTime = (value: string | Date, format: string): Date => {
57
57
  if (dt.isValid) {
58
58
  result = dt.toJSDate();
59
59
  } else {
60
- // 2. Try to parse as ISO string
61
- const isoDt = DateTime.fromISO(value);
62
- if (isoDt.isValid) {
63
- result = isoDt.toJSDate();
60
+ // 2. Try to parse as SQL string
61
+ const sqlDt = DateTime.fromSQL(value);
62
+ if (sqlDt.isValid) {
63
+ result = sqlDt.toJSDate();
64
64
  } else {
65
- // 3. Fallback: use native Date constructor
66
- result = new Date(value);
65
+ // 3. Try to parse as RFC2822 string
66
+ const isoDt = DateTime.fromRFC2822(value);
67
+ if (isoDt.isValid) {
68
+ result = isoDt.toJSDate();
69
+ } else {
70
+ // 4. Fallback: use native Date constructor
71
+ result = new Date(value);
72
+ }
67
73
  }
68
74
  }
69
75
  }
@@ -81,7 +87,11 @@ export const parseDateTime = (value: string | Date, format: string): Date => {
81
87
  * @returns Formatted date string.
82
88
  * @throws Error if value cannot be parsed as a valid date.
83
89
  */
84
- export function formatDateTime(value: Date | string | number, format: string): string {
90
+ export function formatDateTime(
91
+ value: Date | string | number,
92
+ format: string,
93
+ isTimeStamp: boolean,
94
+ ): string {
85
95
  let dt: DateTime | null = null;
86
96
 
87
97
  if (value instanceof Date) {
@@ -111,6 +121,21 @@ export function formatDateTime(value: Date | string | number, format: string): s
111
121
  if (!dt?.isValid) {
112
122
  throw new Error("Invalid Date");
113
123
  }
124
+ const minDate = DateTime.fromSeconds(1);
125
+ const maxDate = DateTime.fromMillis(2147483647 * 1000); // 2038-01-19 03:14:07.999 UTC
126
+
127
+ if (isTimeStamp) {
128
+ if (dt < minDate) {
129
+ throw new Error(
130
+ "Atlassian Forge does not support zero or negative timestamps. Allowed range: from '1970-01-01 00:00:01.000000' to '2038-01-19 03:14:07.999999'.",
131
+ );
132
+ }
133
+ if (dt > maxDate) {
134
+ throw new Error(
135
+ "Atlassian Forge does not support timestamps beyond 2038-01-19 03:14:07.999999. Please use a smaller date within the supported range.",
136
+ );
137
+ }
138
+ }
114
139
 
115
140
  return dt.toFormat(format);
116
141
  }
@@ -40,13 +40,17 @@ export const applySchemaMigrations = async (
40
40
  console.info("Migrations applied:", successfulMigrations);
41
41
 
42
42
  const migrationList = await migrationRunner.list();
43
- const migrationHistory =
44
- Array.isArray(migrationList) && migrationList.length > 0
45
- ? migrationList
46
- .sort((a, b) => a.migratedAt.getTime() - b.migratedAt.getTime())
47
- .map((y) => `${y.id}, ${y.name}, ${y.migratedAt.toUTCString()}`)
48
- .join("\n")
49
- : "No migrations found";
43
+ let migrationHistory = "No migrations found";
44
+
45
+ if (Array.isArray(migrationList) && migrationList.length > 0) {
46
+ const sortedMigrations = migrationList.toSorted(
47
+ (a, b) => a.migratedAt.getTime() - b.migratedAt.getTime(),
48
+ );
49
+
50
+ migrationHistory = sortedMigrations
51
+ .map((y) => `${y.id}, ${y.name}, ${y.migratedAt.toUTCString()}`)
52
+ .join("\n");
53
+ }
50
54
 
51
55
  console.info("Migrations history:\nid, name, migrated_at\n", migrationHistory);
52
56
 
@@ -56,18 +60,20 @@ export const applySchemaMigrations = async (
56
60
  statusText: "OK",
57
61
  body: "Migrations successfully executed",
58
62
  };
59
- } catch (error) {
60
- try {
61
- console.error("Error during migration:", JSON.stringify(error));
62
- } catch (e) {
63
- console.trace("Error stringify:", e);
64
- console.error("Error during migration:", error);
65
- }
63
+ } catch (error: any) {
64
+ const errorMessage =
65
+ error?.cause?.context?.debug?.sqlMessage ??
66
+ error?.cause?.context?.debug?.message ??
67
+ error?.debug?.context?.sqlMessage ??
68
+ error?.debug?.context?.message ??
69
+ error.message ??
70
+ "Unknown error occurred";
71
+ console.error("Error during migration:", errorMessage);
66
72
  return {
67
73
  headers: { "Content-Type": ["application/json"] },
68
74
  statusCode: 500,
69
75
  statusText: "Internal Server Error",
70
- body: error instanceof Error ? error.message : "Unknown error during migration",
76
+ body: error instanceof Error ? errorMessage : "Unknown error during migration",
71
77
  };
72
78
  }
73
79
  };
@@ -34,7 +34,7 @@ export async function dropSchemaMigrations(): Promise<TriggerResponse<string>> {
34
34
 
35
35
  // Execute each statement
36
36
  for (const statement of dropStatements) {
37
- console.warn(statement);
37
+ console.debug(`execute DDL: ${statement}`);
38
38
  await sql.executeDDL(statement);
39
39
  }
40
40
 
@@ -42,9 +42,13 @@ export async function dropSchemaMigrations(): Promise<TriggerResponse<string>> {
42
42
  200,
43
43
  "⚠️ All data in these tables has been permanently deleted. This operation cannot be undone.",
44
44
  );
45
- } catch (error: unknown) {
46
- console.error(error);
47
- const errorMessage = error instanceof Error ? error.message : "Unknown error occurred";
45
+ } catch (error: any) {
46
+ const errorMessage =
47
+ error?.debug?.sqlMessage ??
48
+ error?.debug?.message ??
49
+ error.message ??
50
+ "Unknown error occurred";
51
+ console.error(errorMessage);
48
52
  return getHttpResponse<string>(500, errorMessage);
49
53
  }
50
54
  }
@@ -34,7 +34,7 @@ export async function dropTableSchemaMigrations(): Promise<TriggerResponse<strin
34
34
 
35
35
  // Execute each statement
36
36
  for (const statement of dropStatements) {
37
- console.warn(statement);
37
+ console.debug(`execute DDL: ${statement}`);
38
38
  await sql.executeDDL(statement);
39
39
  }
40
40
 
@@ -42,9 +42,13 @@ export async function dropTableSchemaMigrations(): Promise<TriggerResponse<strin
42
42
  200,
43
43
  "⚠️ All data in these tables has been permanently deleted. This operation cannot be undone.",
44
44
  );
45
- } catch (error: unknown) {
46
- console.error(error);
47
- const errorMessage = error instanceof Error ? error.message : "Unknown error occurred";
45
+ } catch (error: any) {
46
+ const errorMessage =
47
+ error?.debug?.sqlMessage ??
48
+ error?.debug?.message ??
49
+ error.message ??
50
+ "Unknown error occurred";
51
+ console.error(errorMessage);
48
52
  return getHttpResponse<string>(500, errorMessage);
49
53
  }
50
54
  }
@@ -40,9 +40,13 @@ export async function fetchSchemaWebTrigger(): Promise<TriggerResponse<string>>
40
40
  const sqlStatements = wrapWithForeignKeyChecks(createTableStatements);
41
41
 
42
42
  return getHttpResponse<string>(200, sqlStatements.join(";\n"));
43
- } catch (error: unknown) {
44
- console.error(JSON.stringify(error));
45
- const errorMessage = error instanceof Error ? error.message : "Unknown error occurred";
43
+ } catch (error: any) {
44
+ const errorMessage =
45
+ error?.debug?.sqlMessage ??
46
+ error?.debug?.message ??
47
+ error.message ??
48
+ "Unknown error occurred";
49
+ console.error(errorMessage);
46
50
  return getHttpResponse<string>(500, errorMessage);
47
51
  }
48
52
  }
@@ -3,6 +3,7 @@ export * from "./applyMigrationsWebTrigger";
3
3
  export * from "./fetchSchemaWebTrigger";
4
4
  export * from "./dropTablesMigrationWebTrigger";
5
5
  export * from "./clearCacheSchedulerTrigger";
6
+ export * from "./topSlowestStatementLastHourTrigger";
6
7
 
7
8
  export interface TriggerResponse<BODY> {
8
9
  body?: BODY;