forge-sql-orm 2.1.11 → 2.1.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/README.md +800 -541
  2. package/dist/core/ForgeSQLAnalyseOperations.d.ts.map +1 -1
  3. package/dist/core/ForgeSQLAnalyseOperations.js +257 -0
  4. package/dist/core/ForgeSQLAnalyseOperations.js.map +1 -0
  5. package/dist/core/ForgeSQLCacheOperations.js +172 -0
  6. package/dist/core/ForgeSQLCacheOperations.js.map +1 -0
  7. package/dist/core/ForgeSQLCrudOperations.js +349 -0
  8. package/dist/core/ForgeSQLCrudOperations.js.map +1 -0
  9. package/dist/core/ForgeSQLORM.d.ts +1 -1
  10. package/dist/core/ForgeSQLORM.d.ts.map +1 -1
  11. package/dist/core/ForgeSQLORM.js +1191 -0
  12. package/dist/core/ForgeSQLORM.js.map +1 -0
  13. package/dist/core/ForgeSQLQueryBuilder.js +77 -0
  14. package/dist/core/ForgeSQLQueryBuilder.js.map +1 -0
  15. package/dist/core/ForgeSQLSelectOperations.js +81 -0
  16. package/dist/core/ForgeSQLSelectOperations.js.map +1 -0
  17. package/dist/core/SystemTables.js +258 -0
  18. package/dist/core/SystemTables.js.map +1 -0
  19. package/dist/index.js +30 -0
  20. package/dist/index.js.map +1 -0
  21. package/dist/lib/drizzle/extensions/additionalActions.d.ts.map +1 -1
  22. package/dist/lib/drizzle/extensions/additionalActions.js +527 -0
  23. package/dist/lib/drizzle/extensions/additionalActions.js.map +1 -0
  24. package/dist/utils/cacheContextUtils.d.ts.map +1 -1
  25. package/dist/utils/cacheContextUtils.js +198 -0
  26. package/dist/utils/cacheContextUtils.js.map +1 -0
  27. package/dist/utils/cacheUtils.d.ts.map +1 -1
  28. package/dist/utils/cacheUtils.js +383 -0
  29. package/dist/utils/cacheUtils.js.map +1 -0
  30. package/dist/utils/forgeDriver.d.ts.map +1 -1
  31. package/dist/utils/forgeDriver.js +139 -0
  32. package/dist/utils/forgeDriver.js.map +1 -0
  33. package/dist/utils/forgeDriverProxy.js +68 -0
  34. package/dist/utils/forgeDriverProxy.js.map +1 -0
  35. package/dist/utils/metadataContextUtils.d.ts.map +1 -1
  36. package/dist/utils/metadataContextUtils.js +28 -0
  37. package/dist/utils/metadataContextUtils.js.map +1 -0
  38. package/dist/utils/requestTypeContextUtils.js +10 -0
  39. package/dist/utils/requestTypeContextUtils.js.map +1 -0
  40. package/dist/utils/sqlHints.js +52 -0
  41. package/dist/utils/sqlHints.js.map +1 -0
  42. package/dist/utils/sqlUtils.d.ts.map +1 -1
  43. package/dist/utils/sqlUtils.js +590 -0
  44. package/dist/utils/sqlUtils.js.map +1 -0
  45. package/dist/webtriggers/applyMigrationsWebTrigger.js +77 -0
  46. package/dist/webtriggers/applyMigrationsWebTrigger.js.map +1 -0
  47. package/dist/webtriggers/clearCacheSchedulerTrigger.js +83 -0
  48. package/dist/webtriggers/clearCacheSchedulerTrigger.js.map +1 -0
  49. package/dist/webtriggers/dropMigrationWebTrigger.js +54 -0
  50. package/dist/webtriggers/dropMigrationWebTrigger.js.map +1 -0
  51. package/dist/webtriggers/dropTablesMigrationWebTrigger.js +54 -0
  52. package/dist/webtriggers/dropTablesMigrationWebTrigger.js.map +1 -0
  53. package/dist/webtriggers/fetchSchemaWebTrigger.js +82 -0
  54. package/dist/webtriggers/fetchSchemaWebTrigger.js.map +1 -0
  55. package/dist/webtriggers/index.js +40 -0
  56. package/dist/webtriggers/index.js.map +1 -0
  57. package/dist/webtriggers/slowQuerySchedulerTrigger.d.ts.map +1 -1
  58. package/dist/webtriggers/slowQuerySchedulerTrigger.js +80 -0
  59. package/dist/webtriggers/slowQuerySchedulerTrigger.js.map +1 -0
  60. package/package.json +28 -23
  61. package/src/core/ForgeSQLAnalyseOperations.ts +3 -2
  62. package/src/core/ForgeSQLORM.ts +33 -27
  63. package/src/lib/drizzle/extensions/additionalActions.ts +11 -0
  64. package/src/utils/cacheContextUtils.ts +9 -6
  65. package/src/utils/cacheUtils.ts +28 -5
  66. package/src/utils/forgeDriver.ts +10 -6
  67. package/src/utils/metadataContextUtils.ts +1 -4
  68. package/src/utils/sqlUtils.ts +136 -125
  69. package/src/webtriggers/slowQuerySchedulerTrigger.ts +40 -33
  70. package/dist/ForgeSQLORM.js +0 -3896
  71. package/dist/ForgeSQLORM.js.map +0 -1
  72. package/dist/ForgeSQLORM.mjs +0 -3879
  73. package/dist/ForgeSQLORM.mjs.map +0 -1
@@ -1,3879 +0,0 @@
1
- import { isTable, sql as sql$1, and, isNotNull, not, ilike, notInArray, gte, ne, eq, getTableColumns } from "drizzle-orm";
2
- import { DateTime } from "luxon";
3
- import { isSQLWrapper } from "drizzle-orm/sql/sql";
4
- import { mysqlTable, timestamp, varchar, bigint, mysqlSchema, longtext, double, boolean, text, int, customType } from "drizzle-orm/mysql-core";
5
- import { sql, migrationRunner } from "@forge/sql";
6
- import { AsyncLocalStorage } from "node:async_hooks";
7
- import { getTableName } from "drizzle-orm/table";
8
- import * as crypto from "crypto";
9
- import { kvs, WhereConditions, Filter, FilterConditions } from "@forge/kvs";
10
- import { drizzle } from "drizzle-orm/mysql-proxy";
11
- const migrations = mysqlTable("__migrations", {
12
- id: bigint("id", { mode: "number" }).primaryKey().autoincrement(),
13
- name: varchar("name", { length: 255 }).notNull(),
14
- migratedAt: timestamp("migratedAt").defaultNow().notNull()
15
- });
16
- const informationSchema = mysqlSchema("information_schema");
17
- const slowQuery = informationSchema.table("CLUSTER_SLOW_QUERY", {
18
- time: timestamp("Time", { fsp: 6, mode: "string" }).notNull(),
19
- // Timestamp when the slow query was recorded
20
- txnStartTs: bigint("Txn_start_ts", { mode: "bigint", unsigned: true }),
21
- // Transaction start timestamp (TSO)
22
- user: varchar("User", { length: 64 }),
23
- // User executing the query
24
- host: varchar("Host", { length: 64 }),
25
- // Host from which the query originated
26
- connId: bigint("Conn_ID", { mode: "bigint", unsigned: true }),
27
- // Connection ID
28
- sessionAlias: varchar("Session_alias", { length: 64 }),
29
- // Session alias
30
- execRetryCount: bigint("Exec_retry_count", { mode: "bigint", unsigned: true }),
31
- // Number of retries during execution
32
- execRetryTime: double("Exec_retry_time"),
33
- // Time spent in retries
34
- queryTime: double("Query_time"),
35
- // Total execution time
36
- parseTime: double("Parse_time"),
37
- // Time spent parsing SQL
38
- compileTime: double("Compile_time"),
39
- // Time spent compiling query plan
40
- rewriteTime: double("Rewrite_time"),
41
- // Time spent rewriting query
42
- preprocSubqueries: bigint("Preproc_subqueries", { mode: "bigint", unsigned: true }),
43
- // Number of subqueries preprocessed
44
- preprocSubqueriesTime: double("Preproc_subqueries_time"),
45
- // Time spent preprocessing subqueries
46
- optimizeTime: double("Optimize_time"),
47
- // Time spent in optimizer
48
- waitTs: double("Wait_TS"),
49
- // Wait time for getting TSO
50
- prewriteTime: double("Prewrite_time"),
51
- // Time spent in prewrite phase
52
- waitPrewriteBinlogTime: double("Wait_prewrite_binlog_time"),
53
- // Time waiting for binlog prewrite
54
- commitTime: double("Commit_time"),
55
- // Commit duration
56
- getCommitTsTime: double("Get_commit_ts_time"),
57
- // Time waiting for commit TSO
58
- commitBackoffTime: double("Commit_backoff_time"),
59
- // Backoff time during commit
60
- backoffTypes: varchar("Backoff_types", { length: 64 }),
61
- // Types of backoff occurred
62
- resolveLockTime: double("Resolve_lock_time"),
63
- // Time resolving locks
64
- localLatchWaitTime: double("Local_latch_wait_time"),
65
- // Time waiting on local latch
66
- writeKeys: bigint("Write_keys", { mode: "bigint" }),
67
- // Number of keys written
68
- writeSize: bigint("Write_size", { mode: "bigint" }),
69
- // Amount of data written
70
- prewriteRegion: bigint("Prewrite_region", { mode: "bigint" }),
71
- // Regions involved in prewrite
72
- txnRetry: bigint("Txn_retry", { mode: "bigint" }),
73
- // Transaction retry count
74
- copTime: double("Cop_time"),
75
- // Time spent in coprocessor
76
- processTime: double("Process_time"),
77
- // Processing time
78
- waitTime: double("Wait_time"),
79
- // Wait time in TiKV
80
- backoffTime: double("Backoff_time"),
81
- // Backoff wait time
82
- lockKeysTime: double("LockKeys_time"),
83
- // Time spent waiting for locks
84
- requestCount: bigint("Request_count", { mode: "bigint", unsigned: true }),
85
- // Total number of requests
86
- totalKeys: bigint("Total_keys", { mode: "bigint", unsigned: true }),
87
- // Total keys scanned
88
- processKeys: bigint("Process_keys", { mode: "bigint", unsigned: true }),
89
- // Keys processed
90
- rocksdbDeleteSkippedCount: bigint("Rocksdb_delete_skipped_count", {
91
- mode: "bigint",
92
- unsigned: true
93
- }),
94
- // RocksDB delete skips
95
- rocksdbKeySkippedCount: bigint("Rocksdb_key_skipped_count", { mode: "bigint", unsigned: true }),
96
- // RocksDB key skips
97
- rocksdbBlockCacheHitCount: bigint("Rocksdb_block_cache_hit_count", {
98
- mode: "bigint",
99
- unsigned: true
100
- }),
101
- // RocksDB block cache hits
102
- rocksdbBlockReadCount: bigint("Rocksdb_block_read_count", { mode: "bigint", unsigned: true }),
103
- // RocksDB block reads
104
- rocksdbBlockReadByte: bigint("Rocksdb_block_read_byte", { mode: "bigint", unsigned: true }),
105
- // RocksDB block read bytes
106
- db: varchar("DB", { length: 64 }),
107
- // Database name
108
- indexNames: varchar("Index_names", { length: 100 }),
109
- // Indexes used
110
- isInternal: boolean("Is_internal"),
111
- // Whether the query is internal
112
- digest: varchar("Digest", { length: 64 }),
113
- // SQL digest hash
114
- stats: varchar("Stats", { length: 512 }),
115
- // Stats used during planning
116
- copProcAvg: double("Cop_proc_avg"),
117
- // Coprocessor average processing time
118
- copProcP90: double("Cop_proc_p90"),
119
- // Coprocessor 90th percentile processing time
120
- copProcMax: double("Cop_proc_max"),
121
- // Coprocessor max processing time
122
- copProcAddr: varchar("Cop_proc_addr", { length: 64 }),
123
- // Coprocessor address for processing
124
- copWaitAvg: double("Cop_wait_avg"),
125
- // Coprocessor average wait time
126
- copWaitP90: double("Cop_wait_p90"),
127
- // Coprocessor 90th percentile wait time
128
- copWaitMax: double("Cop_wait_max"),
129
- // Coprocessor max wait time
130
- copWaitAddr: varchar("Cop_wait_addr", { length: 64 }),
131
- // Coprocessor address for wait
132
- memMax: bigint("Mem_max", { mode: "bigint" }),
133
- // Max memory usage
134
- diskMax: bigint("Disk_max", { mode: "bigint" }),
135
- // Max disk usage
136
- kvTotal: double("KV_total"),
137
- // Total KV request time
138
- pdTotal: double("PD_total"),
139
- // Total PD request time
140
- backoffTotal: double("Backoff_total"),
141
- // Total backoff time
142
- writeSqlResponseTotal: double("Write_sql_response_total"),
143
- // SQL response write time
144
- resultRows: bigint("Result_rows", { mode: "bigint" }),
145
- // Rows returned
146
- warnings: longtext("Warnings"),
147
- // Warnings during execution
148
- backoffDetail: varchar("Backoff_Detail", { length: 4096 }),
149
- // Detailed backoff info
150
- prepared: boolean("Prepared"),
151
- // Whether query was prepared
152
- succ: boolean("Succ"),
153
- // Success flag
154
- isExplicitTxn: boolean("IsExplicitTxn"),
155
- // Whether explicit transaction
156
- isWriteCacheTable: boolean("IsWriteCacheTable"),
157
- // Whether wrote to cache table
158
- planFromCache: boolean("Plan_from_cache"),
159
- // Plan was from cache
160
- planFromBinding: boolean("Plan_from_binding"),
161
- // Plan was from binding
162
- hasMoreResults: boolean("Has_more_results"),
163
- // Query returned multiple results
164
- resourceGroup: varchar("Resource_group", { length: 64 }),
165
- // Resource group name
166
- requestUnitRead: double("Request_unit_read"),
167
- // RU consumed for read
168
- requestUnitWrite: double("Request_unit_write"),
169
- // RU consumed for write
170
- timeQueuedByRc: double("Time_queued_by_rc"),
171
- // Time queued by resource control
172
- tidbCpuTime: double("Tidb_cpu_time"),
173
- // TiDB CPU time
174
- tikvCpuTime: double("Tikv_cpu_time"),
175
- // TiKV CPU time
176
- plan: longtext("Plan"),
177
- // Query execution plan
178
- planDigest: varchar("Plan_digest", { length: 128 }),
179
- // Plan digest hash
180
- binaryPlan: longtext("Binary_plan"),
181
- // Binary execution plan
182
- prevStmt: longtext("Prev_stmt"),
183
- // Previous statement in session
184
- query: longtext("Query")
185
- // Original SQL query
186
- });
187
- const createClusterStatementsSummarySchema = () => ({
188
- instance: varchar("INSTANCE", { length: 64 }),
189
- // TiDB/TiKV instance address
190
- summaryBeginTime: timestamp("SUMMARY_BEGIN_TIME", { mode: "string" }).notNull(),
191
- // Begin time of this summary window
192
- summaryEndTime: timestamp("SUMMARY_END_TIME", { mode: "string" }).notNull(),
193
- // End time of this summary window
194
- stmtType: varchar("STMT_TYPE", { length: 64 }).notNull(),
195
- // Statement type (e.g., Select/Insert/Update)
196
- schemaName: varchar("SCHEMA_NAME", { length: 64 }),
197
- // Current schema name
198
- digest: varchar("DIGEST", { length: 64 }),
199
- // SQL digest (normalized hash)
200
- digestText: text("DIGEST_TEXT").notNull(),
201
- // Normalized SQL text
202
- tableNames: text("TABLE_NAMES"),
203
- // Involved table names
204
- indexNames: text("INDEX_NAMES"),
205
- // Used index names
206
- sampleUser: varchar("SAMPLE_USER", { length: 64 }),
207
- // Sampled user who executed the statements
208
- execCount: bigint("EXEC_COUNT", { mode: "bigint", unsigned: true }).notNull(),
209
- // Total executions
210
- sumErrors: int("SUM_ERRORS", { unsigned: true }).notNull(),
211
- // Sum of errors
212
- sumWarnings: int("SUM_WARNINGS", { unsigned: true }).notNull(),
213
- // Sum of warnings
214
- sumLatency: bigint("SUM_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
215
- // Sum of latency (ns)
216
- maxLatency: bigint("MAX_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
217
- // Max latency (ns)
218
- minLatency: bigint("MIN_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
219
- // Min latency (ns)
220
- avgLatency: bigint("AVG_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
221
- // Avg latency (ns)
222
- avgParseLatency: bigint("AVG_PARSE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
223
- // Avg parse time (ns)
224
- maxParseLatency: bigint("MAX_PARSE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
225
- // Max parse time (ns)
226
- avgCompileLatency: bigint("AVG_COMPILE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
227
- // Avg compile time (ns)
228
- maxCompileLatency: bigint("MAX_COMPILE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
229
- // Max compile time (ns)
230
- sumCopTaskNum: bigint("SUM_COP_TASK_NUM", { mode: "bigint", unsigned: true }).notNull(),
231
- // Total number of cop tasks
232
- maxCopProcessTime: bigint("MAX_COP_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(),
233
- // Max TiKV coprocessor processing time (ns)
234
- maxCopProcessAddress: varchar("MAX_COP_PROCESS_ADDRESS", { length: 256 }),
235
- // Address of cop task with max processing time
236
- maxCopWaitTime: bigint("MAX_COP_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
237
- // Max TiKV coprocessor wait time (ns)
238
- maxCopWaitAddress: varchar("MAX_COP_WAIT_ADDRESS", { length: 256 }),
239
- // Address of cop task with max wait time
240
- avgProcessTime: bigint("AVG_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(),
241
- // Avg TiKV processing time (ns)
242
- maxProcessTime: bigint("MAX_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(),
243
- // Max TiKV processing time (ns)
244
- avgWaitTime: bigint("AVG_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
245
- // Avg TiKV wait time (ns)
246
- maxWaitTime: bigint("MAX_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
247
- // Max TiKV wait time (ns)
248
- avgBackoffTime: bigint("AVG_BACKOFF_TIME", { mode: "bigint", unsigned: true }).notNull(),
249
- // Avg backoff time before retry (ns)
250
- maxBackoffTime: bigint("MAX_BACKOFF_TIME", { mode: "bigint", unsigned: true }).notNull(),
251
- // Max backoff time before retry (ns)
252
- avgTotalKeys: bigint("AVG_TOTAL_KEYS", { mode: "bigint", unsigned: true }).notNull(),
253
- // Avg scanned keys
254
- maxTotalKeys: bigint("MAX_TOTAL_KEYS", { mode: "bigint", unsigned: true }).notNull(),
255
- // Max scanned keys
256
- avgProcessedKeys: bigint("AVG_PROCESSED_KEYS", { mode: "bigint", unsigned: true }).notNull(),
257
- // Avg processed keys
258
- maxProcessedKeys: bigint("MAX_PROCESSED_KEYS", { mode: "bigint", unsigned: true }).notNull(),
259
- // Max processed keys
260
- avgRocksdbDeleteSkippedCount: double("AVG_ROCKSDB_DELETE_SKIPPED_COUNT").notNull(),
261
- // Avg RocksDB deletes skipped
262
- maxRocksdbDeleteSkippedCount: int("MAX_ROCKSDB_DELETE_SKIPPED_COUNT", {
263
- unsigned: true
264
- }).notNull(),
265
- // Max RocksDB deletes skipped
266
- avgRocksdbKeySkippedCount: double("AVG_ROCKSDB_KEY_SKIPPED_COUNT").notNull(),
267
- // Avg RocksDB keys skipped
268
- maxRocksdbKeySkippedCount: int("MAX_ROCKSDB_KEY_SKIPPED_COUNT", { unsigned: true }).notNull(),
269
- // Max RocksDB keys skipped
270
- avgRocksdbBlockCacheHitCount: double("AVG_ROCKSDB_BLOCK_CACHE_HIT_COUNT").notNull(),
271
- // Avg RocksDB block cache hits
272
- maxRocksdbBlockCacheHitCount: int("MAX_ROCKSDB_BLOCK_CACHE_HIT_COUNT", {
273
- unsigned: true
274
- }).notNull(),
275
- // Max RocksDB block cache hits
276
- avgRocksdbBlockReadCount: double("AVG_ROCKSDB_BLOCK_READ_COUNT").notNull(),
277
- // Avg RocksDB block reads
278
- maxRocksdbBlockReadCount: int("MAX_ROCKSDB_BLOCK_READ_COUNT", { unsigned: true }).notNull(),
279
- // Max RocksDB block reads
280
- avgRocksdbBlockReadByte: double("AVG_ROCKSDB_BLOCK_READ_BYTE").notNull(),
281
- // Avg RocksDB block read bytes
282
- maxRocksdbBlockReadByte: int("MAX_ROCKSDB_BLOCK_READ_BYTE", { unsigned: true }).notNull(),
283
- // Max RocksDB block read bytes
284
- avgPrewriteTime: bigint("AVG_PREWRITE_TIME", { mode: "bigint", unsigned: true }).notNull(),
285
- // Avg prewrite phase time (ns)
286
- maxPrewriteTime: bigint("MAX_PREWRITE_TIME", { mode: "bigint", unsigned: true }).notNull(),
287
- // Max prewrite phase time (ns)
288
- avgCommitTime: bigint("AVG_COMMIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
289
- // Avg commit phase time (ns)
290
- maxCommitTime: bigint("MAX_COMMIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
291
- // Max commit phase time (ns)
292
- avgGetCommitTsTime: bigint("AVG_GET_COMMIT_TS_TIME", {
293
- mode: "bigint",
294
- unsigned: true
295
- }).notNull(),
296
- // Avg get commit_ts time (ns)
297
- maxGetCommitTsTime: bigint("MAX_GET_COMMIT_TS_TIME", {
298
- mode: "bigint",
299
- unsigned: true
300
- }).notNull(),
301
- // Max get commit_ts time (ns)
302
- avgCommitBackoffTime: bigint("AVG_COMMIT_BACKOFF_TIME", {
303
- mode: "bigint",
304
- unsigned: true
305
- }).notNull(),
306
- // Avg backoff during commit (ns)
307
- maxCommitBackoffTime: bigint("MAX_COMMIT_BACKOFF_TIME", {
308
- mode: "bigint",
309
- unsigned: true
310
- }).notNull(),
311
- // Max backoff during commit (ns)
312
- avgResolveLockTime: bigint("AVG_RESOLVE_LOCK_TIME", {
313
- mode: "bigint",
314
- unsigned: true
315
- }).notNull(),
316
- // Avg resolve lock time (ns)
317
- maxResolveLockTime: bigint("MAX_RESOLVE_LOCK_TIME", {
318
- mode: "bigint",
319
- unsigned: true
320
- }).notNull(),
321
- // Max resolve lock time (ns)
322
- avgLocalLatchWaitTime: bigint("AVG_LOCAL_LATCH_WAIT_TIME", {
323
- mode: "bigint",
324
- unsigned: true
325
- }).notNull(),
326
- // Avg local latch wait (ns)
327
- maxLocalLatchWaitTime: bigint("MAX_LOCAL_LATCH_WAIT_TIME", {
328
- mode: "bigint",
329
- unsigned: true
330
- }).notNull(),
331
- // Max local latch wait (ns)
332
- avgWriteKeys: double("AVG_WRITE_KEYS").notNull(),
333
- // Avg number of written keys
334
- maxWriteKeys: bigint("MAX_WRITE_KEYS", { mode: "bigint", unsigned: true }).notNull(),
335
- // Max written keys
336
- avgWriteSize: double("AVG_WRITE_SIZE").notNull(),
337
- // Avg written bytes
338
- maxWriteSize: bigint("MAX_WRITE_SIZE", { mode: "bigint", unsigned: true }).notNull(),
339
- // Max written bytes
340
- avgPrewriteRegions: double("AVG_PREWRITE_REGIONS").notNull(),
341
- // Avg regions in prewrite
342
- maxPrewriteRegions: int("MAX_PREWRITE_REGIONS", { unsigned: true }).notNull(),
343
- // Max regions in prewrite
344
- avgTxnRetry: double("AVG_TXN_RETRY").notNull(),
345
- // Avg transaction retry count
346
- maxTxnRetry: int("MAX_TXN_RETRY", { unsigned: true }).notNull(),
347
- // Max transaction retry count
348
- sumExecRetry: bigint("SUM_EXEC_RETRY", { mode: "bigint", unsigned: true }).notNull(),
349
- // Sum of execution retries (pessimistic)
350
- sumExecRetryTime: bigint("SUM_EXEC_RETRY_TIME", { mode: "bigint", unsigned: true }).notNull(),
351
- // Sum time of execution retries (ns)
352
- sumBackoffTimes: bigint("SUM_BACKOFF_TIMES", { mode: "bigint", unsigned: true }).notNull(),
353
- // Sum of backoff retries
354
- backoffTypes: varchar("BACKOFF_TYPES", { length: 1024 }),
355
- // Backoff types with counts
356
- avgMem: bigint("AVG_MEM", { mode: "bigint", unsigned: true }).notNull(),
357
- // Avg memory used (bytes)
358
- maxMem: bigint("MAX_MEM", { mode: "bigint", unsigned: true }).notNull(),
359
- // Max memory used (bytes)
360
- avgDisk: bigint("AVG_DISK", { mode: "bigint", unsigned: true }).notNull(),
361
- // Avg disk used (bytes)
362
- maxDisk: bigint("MAX_DISK", { mode: "bigint", unsigned: true }).notNull(),
363
- // Max disk used (bytes)
364
- avgKvTime: bigint("AVG_KV_TIME", { mode: "bigint", unsigned: true }).notNull(),
365
- // Avg time spent in TiKV (ns)
366
- avgPdTime: bigint("AVG_PD_TIME", { mode: "bigint", unsigned: true }).notNull(),
367
- // Avg time spent in PD (ns)
368
- avgBackoffTotalTime: bigint("AVG_BACKOFF_TOTAL_TIME", {
369
- mode: "bigint",
370
- unsigned: true
371
- }).notNull(),
372
- // Avg total backoff time (ns)
373
- avgWriteSqlRespTime: bigint("AVG_WRITE_SQL_RESP_TIME", {
374
- mode: "bigint",
375
- unsigned: true
376
- }).notNull(),
377
- // Avg write SQL response time (ns)
378
- avgTidbCpuTime: bigint("AVG_TIDB_CPU_TIME", { mode: "bigint", unsigned: true }).notNull(),
379
- // Avg TiDB CPU time (ns)
380
- avgTikvCpuTime: bigint("AVG_TIKV_CPU_TIME", { mode: "bigint", unsigned: true }).notNull(),
381
- // Avg TiKV CPU time (ns)
382
- maxResultRows: bigint("MAX_RESULT_ROWS", { mode: "bigint" }).notNull(),
383
- // Max number of result rows
384
- minResultRows: bigint("MIN_RESULT_ROWS", { mode: "bigint" }).notNull(),
385
- // Min number of result rows
386
- avgResultRows: bigint("AVG_RESULT_ROWS", { mode: "bigint" }).notNull(),
387
- // Avg number of result rows
388
- prepared: boolean("PREPARED").notNull(),
389
- // Whether statements are prepared
390
- avgAffectedRows: double("AVG_AFFECTED_ROWS").notNull(),
391
- // Avg affected rows
392
- firstSeen: timestamp("FIRST_SEEN", { mode: "string" }).notNull(),
393
- // First time statements observed
394
- lastSeen: timestamp("LAST_SEEN", { mode: "string" }).notNull(),
395
- // Last time statements observed
396
- planInCache: boolean("PLAN_IN_CACHE").notNull(),
397
- // Whether last stmt hit plan cache
398
- planCacheHits: bigint("PLAN_CACHE_HITS", { mode: "bigint" }).notNull(),
399
- // Number of plan cache hits
400
- planInBinding: boolean("PLAN_IN_BINDING").notNull(),
401
- // Whether matched bindings
402
- querySampleText: text("QUERY_SAMPLE_TEXT"),
403
- // Sampled original SQL
404
- prevSampleText: text("PREV_SAMPLE_TEXT"),
405
- // Sampled previous SQL before commit
406
- planDigest: varchar("PLAN_DIGEST", { length: 64 }),
407
- // Plan digest hash
408
- plan: text("PLAN"),
409
- // Sampled textual plan
410
- binaryPlan: text("BINARY_PLAN"),
411
- // Sampled binary plan
412
- charset: varchar("CHARSET", { length: 64 }),
413
- // Sampled charset
414
- collation: varchar("COLLATION", { length: 64 }),
415
- // Sampled collation
416
- planHint: varchar("PLAN_HINT", { length: 64 }),
417
- // Sampled plan hint
418
- maxRequestUnitRead: double("MAX_REQUEST_UNIT_READ").notNull(),
419
- // Max RU cost (read)
420
- avgRequestUnitRead: double("AVG_REQUEST_UNIT_READ").notNull(),
421
- // Avg RU cost (read)
422
- maxRequestUnitWrite: double("MAX_REQUEST_UNIT_WRITE").notNull(),
423
- // Max RU cost (write)
424
- avgRequestUnitWrite: double("AVG_REQUEST_UNIT_WRITE").notNull(),
425
- // Avg RU cost (write)
426
- maxQueuedRcTime: bigint("MAX_QUEUED_RC_TIME", { mode: "bigint", unsigned: true }).notNull(),
427
- // Max queued time waiting for RU (ns)
428
- avgQueuedRcTime: bigint("AVG_QUEUED_RC_TIME", { mode: "bigint", unsigned: true }).notNull(),
429
- // Avg queued time waiting for RU (ns)
430
- resourceGroup: varchar("RESOURCE_GROUP", { length: 64 }),
431
- // Bound resource group name
432
- planCacheUnqualified: bigint("PLAN_CACHE_UNQUALIFIED", { mode: "bigint" }).notNull(),
433
- // Times not eligible for plan cache
434
- planCacheUnqualifiedLastReason: text("PLAN_CACHE_UNQUALIFIED_LAST_REASON")
435
- // Last reason of plan cache ineligibility
436
- });
437
- const clusterStatementsSummaryHistory = informationSchema.table(
438
- "CLUSTER_STATEMENTS_SUMMARY_HISTORY",
439
- createClusterStatementsSummarySchema()
440
- );
441
- const statementsSummaryHistory = informationSchema.table(
442
- "STATEMENTS_SUMMARY_HISTORY",
443
- createClusterStatementsSummarySchema()
444
- );
445
- const statementsSummary = informationSchema.table(
446
- "STATEMENTS_SUMMARY",
447
- createClusterStatementsSummarySchema()
448
- );
449
- const clusterStatementsSummary = informationSchema.table(
450
- "CLUSTER_STATEMENTS_SUMMARY",
451
- createClusterStatementsSummarySchema()
452
- );
453
- async function getTables() {
454
- const tables = await sql.executeDDL("SHOW TABLES");
455
- return tables.rows.flatMap((tableInfo) => Object.values(tableInfo));
456
- }
457
- const forgeSystemTables = [migrations];
458
- const parseDateTime = (value, format) => {
459
- let result;
460
- if (value instanceof Date) {
461
- result = value;
462
- } else {
463
- const dt = DateTime.fromFormat(value, format);
464
- if (dt.isValid) {
465
- result = dt.toJSDate();
466
- } else {
467
- const sqlDt = DateTime.fromSQL(value);
468
- if (sqlDt.isValid) {
469
- result = sqlDt.toJSDate();
470
- } else {
471
- const isoDt = DateTime.fromRFC2822(value);
472
- if (isoDt.isValid) {
473
- result = isoDt.toJSDate();
474
- } else {
475
- result = new Date(value);
476
- }
477
- }
478
- }
479
- }
480
- if (isNaN(result.getTime())) {
481
- result = new Date(value);
482
- }
483
- return result;
484
- };
485
- function formatDateTime(value, format, isTimeStamp) {
486
- let dt = null;
487
- if (value instanceof Date) {
488
- dt = DateTime.fromJSDate(value);
489
- } else if (typeof value === "string") {
490
- for (const parser of [
491
- DateTime.fromISO,
492
- DateTime.fromRFC2822,
493
- DateTime.fromSQL,
494
- DateTime.fromHTTP
495
- ]) {
496
- dt = parser(value);
497
- if (dt.isValid) break;
498
- }
499
- if (!dt?.isValid) {
500
- const parsed = Number(value);
501
- if (!isNaN(parsed)) {
502
- dt = DateTime.fromMillis(parsed);
503
- }
504
- }
505
- } else if (typeof value === "number") {
506
- dt = DateTime.fromMillis(value);
507
- } else {
508
- throw new Error("Unsupported type");
509
- }
510
- if (!dt?.isValid) {
511
- throw new Error("Invalid Date");
512
- }
513
- const minDate = DateTime.fromSeconds(1);
514
- const maxDate = DateTime.fromMillis(2147483647 * 1e3);
515
- if (isTimeStamp) {
516
- if (dt < minDate) {
517
- throw new Error(
518
- "Atlassian Forge does not support zero or negative timestamps. Allowed range: from '1970-01-01 00:00:01.000000' to '2038-01-19 03:14:07.999999'."
519
- );
520
- }
521
- if (dt > maxDate) {
522
- throw new Error(
523
- "Atlassian Forge does not support timestamps beyond 2038-01-19 03:14:07.999999. Please use a smaller date within the supported range."
524
- );
525
- }
526
- }
527
- return dt.toFormat(format);
528
- }
529
- function getPrimaryKeys(table) {
530
- const { columns, primaryKeys } = getTableMetadata(table);
531
- const columnPrimaryKeys = Object.entries(columns).filter(([, column]) => column.primary);
532
- if (columnPrimaryKeys.length > 0) {
533
- return columnPrimaryKeys;
534
- }
535
- if (Array.isArray(primaryKeys) && primaryKeys.length > 0) {
536
- const primaryKeyColumns = /* @__PURE__ */ new Set();
537
- primaryKeys.forEach((primaryKeyBuilder) => {
538
- Object.entries(columns).filter(([, column]) => {
539
- return primaryKeyBuilder.columns.includes(column);
540
- }).forEach(([name, column]) => {
541
- primaryKeyColumns.add([name, column]);
542
- });
543
- });
544
- return Array.from(primaryKeyColumns);
545
- }
546
- return [];
547
- }
548
- function processForeignKeys(table, foreignKeysSymbol, extraSymbol) {
549
- const foreignKeys = [];
550
- if (foreignKeysSymbol) {
551
- const fkArray = table[foreignKeysSymbol];
552
- if (fkArray) {
553
- fkArray.forEach((fk) => {
554
- if (fk.reference) {
555
- const item = fk.reference(fk);
556
- foreignKeys.push(item);
557
- }
558
- });
559
- }
560
- }
561
- if (extraSymbol) {
562
- const extraConfigBuilder = table[extraSymbol];
563
- if (extraConfigBuilder && typeof extraConfigBuilder === "function") {
564
- const configBuilderData = extraConfigBuilder(table);
565
- if (configBuilderData) {
566
- const configBuilders = Array.isArray(configBuilderData) ? configBuilderData : Object.values(configBuilderData).map(
567
- (item) => item.value ?? item
568
- );
569
- configBuilders.forEach((builder) => {
570
- if (!builder?.constructor) return;
571
- const builderName = builder.constructor.name.toLowerCase();
572
- if (builderName.includes("foreignkeybuilder")) {
573
- foreignKeys.push(builder);
574
- }
575
- });
576
- }
577
- }
578
- }
579
- return foreignKeys;
580
- }
581
- function getTableMetadata(table) {
582
- const symbols = Object.getOwnPropertySymbols(table);
583
- const nameSymbol = symbols.find((s) => s.toString().includes("Name"));
584
- const columnsSymbol = symbols.find((s) => s.toString().includes("Columns"));
585
- const foreignKeysSymbol = symbols.find((s) => s.toString().includes("ForeignKeys)"));
586
- const extraSymbol = symbols.find((s) => s.toString().includes("ExtraConfigBuilder"));
587
- const builders = {
588
- indexes: [],
589
- checks: [],
590
- foreignKeys: [],
591
- primaryKeys: [],
592
- uniqueConstraints: [],
593
- extras: []
594
- };
595
- builders.foreignKeys = processForeignKeys(table, foreignKeysSymbol, extraSymbol);
596
- if (extraSymbol) {
597
- const extraConfigBuilder = table[extraSymbol];
598
- if (extraConfigBuilder && typeof extraConfigBuilder === "function") {
599
- const configBuilderData = extraConfigBuilder(table);
600
- if (configBuilderData) {
601
- const configBuilders = Array.isArray(configBuilderData) ? configBuilderData : Object.values(configBuilderData).map(
602
- (item) => item.value ?? item
603
- );
604
- configBuilders.forEach((builder) => {
605
- if (!builder?.constructor) return;
606
- const builderName = builder.constructor.name.toLowerCase();
607
- const builderMap = {
608
- indexbuilder: builders.indexes,
609
- checkbuilder: builders.checks,
610
- primarykeybuilder: builders.primaryKeys,
611
- uniqueconstraintbuilder: builders.uniqueConstraints
612
- };
613
- for (const [type, array] of Object.entries(builderMap)) {
614
- if (builderName.includes(type)) {
615
- array.push(builder);
616
- break;
617
- }
618
- }
619
- builders.extras.push(builder);
620
- });
621
- }
622
- }
623
- }
624
- return {
625
- tableName: nameSymbol ? table[nameSymbol] : "",
626
- columns: columnsSymbol ? table[columnsSymbol] : {},
627
- ...builders
628
- };
629
- }
630
- function generateDropTableStatements(tables, options) {
631
- const dropStatements = [];
632
- const validOptions = options ?? { sequence: true, table: true };
633
- if (!validOptions.sequence && !validOptions.table) {
634
- console.warn('No drop operations requested: both "table" and "sequence" options are false');
635
- return [];
636
- }
637
- tables.forEach((tableName) => {
638
- if (validOptions.table) {
639
- dropStatements.push(`DROP TABLE IF EXISTS \`${tableName}\`;`);
640
- }
641
- if (validOptions.sequence) {
642
- dropStatements.push(`DROP SEQUENCE IF EXISTS \`${tableName}\`;`);
643
- }
644
- });
645
- return dropStatements;
646
- }
647
- function mapSelectTableToAlias(table, uniqPrefix, aliasMap) {
648
- const { columns, tableName } = getTableMetadata(table);
649
- const selectionsTableFields = {};
650
- Object.keys(columns).forEach((name) => {
651
- const column = columns[name];
652
- const uniqName = `a_${uniqPrefix}_${tableName}_${column.name}`.toLowerCase();
653
- const fieldAlias = sql$1.raw(uniqName);
654
- selectionsTableFields[name] = sql$1`${column} as \`${fieldAlias}\``;
655
- aliasMap[uniqName] = column;
656
- });
657
- return selectionsTableFields;
658
- }
659
- function isDrizzleColumn(column) {
660
- return column && typeof column === "object" && "table" in column;
661
- }
662
- function mapSelectAllFieldsToAlias(selections, name, uniqName, fields, aliasMap) {
663
- if (isTable(fields)) {
664
- selections[name] = mapSelectTableToAlias(fields, uniqName, aliasMap);
665
- } else if (isDrizzleColumn(fields)) {
666
- const column = fields;
667
- const uniqAliasName = `a_${uniqName}_${column.name}`.toLowerCase();
668
- let aliasName = sql$1.raw(uniqAliasName);
669
- selections[name] = sql$1`${column} as \`${aliasName}\``;
670
- aliasMap[uniqAliasName] = column;
671
- } else if (isSQLWrapper(fields)) {
672
- selections[name] = fields;
673
- } else {
674
- const innerSelections = {};
675
- Object.entries(fields).forEach(([iname, ifields]) => {
676
- mapSelectAllFieldsToAlias(innerSelections, iname, `${uniqName}_${iname}`, ifields, aliasMap);
677
- });
678
- selections[name] = innerSelections;
679
- }
680
- return selections;
681
- }
682
- function mapSelectFieldsWithAlias(fields) {
683
- if (!fields) {
684
- throw new Error("fields is empty");
685
- }
686
- const aliasMap = {};
687
- const selections = {};
688
- Object.entries(fields).forEach(([name, fields2]) => {
689
- mapSelectAllFieldsToAlias(selections, name, name, fields2, aliasMap);
690
- });
691
- return { selections, aliasMap };
692
- }
693
- function getAliasFromDrizzleAlias(value) {
694
- const isSQL = value !== null && typeof value === "object" && isSQLWrapper(value) && "queryChunks" in value;
695
- if (isSQL) {
696
- const sql2 = value;
697
- const queryChunks = sql2.queryChunks;
698
- if (queryChunks.length > 3) {
699
- const aliasNameChunk = queryChunks[queryChunks.length - 2];
700
- if (isSQLWrapper(aliasNameChunk) && "queryChunks" in aliasNameChunk) {
701
- const aliasNameChunkSql = aliasNameChunk;
702
- if (aliasNameChunkSql.queryChunks?.length === 1 && aliasNameChunkSql.queryChunks[0]) {
703
- const queryChunksStringChunc = aliasNameChunkSql.queryChunks[0];
704
- if ("value" in queryChunksStringChunc) {
705
- const values = queryChunksStringChunc.value;
706
- if (values && values.length === 1) {
707
- return values[0];
708
- }
709
- }
710
- }
711
- }
712
- }
713
- }
714
- return void 0;
715
- }
716
- function transformValue(value, alias, aliasMap) {
717
- const column = aliasMap[alias];
718
- if (!column) return value;
719
- let customColumn = column;
720
- const fromDriver = customColumn?.mapFrom;
721
- if (fromDriver && value !== null && value !== void 0) {
722
- return fromDriver(value);
723
- }
724
- return value;
725
- }
726
- function transformObject(obj, selections, aliasMap) {
727
- const result = {};
728
- for (const [key, value] of Object.entries(obj)) {
729
- const selection = selections[key];
730
- const alias = getAliasFromDrizzleAlias(selection);
731
- if (alias && aliasMap[alias]) {
732
- result[key] = transformValue(value, alias, aliasMap);
733
- } else if (selection && typeof selection === "object" && !isSQLWrapper(selection)) {
734
- result[key] = transformObject(
735
- value,
736
- selection,
737
- aliasMap
738
- );
739
- } else {
740
- result[key] = value;
741
- }
742
- }
743
- return result;
744
- }
745
- function applyFromDriverTransform(rows, selections, aliasMap) {
746
- return rows.map((row) => {
747
- const transformed = transformObject(
748
- row,
749
- selections,
750
- aliasMap
751
- );
752
- return processNullBranches(transformed);
753
- });
754
- }
755
- function processNullBranches(obj) {
756
- if (obj === null || typeof obj !== "object") {
757
- return obj;
758
- }
759
- if (obj.constructor && obj.constructor.name !== "Object") {
760
- return obj;
761
- }
762
- const result = {};
763
- let allNull = true;
764
- for (const [key, value] of Object.entries(obj)) {
765
- if (value === null || value === void 0) {
766
- result[key] = null;
767
- continue;
768
- }
769
- if (typeof value === "object") {
770
- const processed = processNullBranches(value);
771
- result[key] = processed;
772
- if (processed !== null) {
773
- allNull = false;
774
- }
775
- } else {
776
- result[key] = value;
777
- allNull = false;
778
- }
779
- }
780
- return allNull ? null : result;
781
- }
782
- function formatLimitOffset(limitOrOffset) {
783
- if (typeof limitOrOffset !== "number" || isNaN(limitOrOffset)) {
784
- throw new Error("limitOrOffset must be a valid number");
785
- }
786
- return sql$1.raw(`${limitOrOffset}`);
787
- }
788
- function nextVal(sequenceName) {
789
- return sql$1.raw(`NEXTVAL(${sequenceName})`);
790
- }
791
- async function printQueriesWithPlan(forgeSQLORM, timeDiffMs, timeout) {
792
- try {
793
- const statementsTable = clusterStatementsSummary;
794
- const timeoutMs2 = timeout ?? 3e3;
795
- const results = await withTimeout(
796
- forgeSQLORM.getDrizzleQueryBuilder().select({
797
- digestText: withTidbHint(statementsTable.digestText),
798
- avgLatency: statementsTable.avgLatency,
799
- avgMem: statementsTable.avgMem,
800
- execCount: statementsTable.execCount,
801
- plan: statementsTable.plan,
802
- stmtType: statementsTable.stmtType
803
- }).from(statementsTable).where(
804
- and(
805
- isNotNull(statementsTable.digest),
806
- not(ilike(statementsTable.digestText, "%information_schema%")),
807
- notInArray(statementsTable.stmtType, ["Use", "Set", "Show", "Commit", "Rollback", "Begin"]),
808
- gte(
809
- statementsTable.lastSeen,
810
- sql$1`DATE_SUB
811
- (NOW(), INTERVAL
812
- ${timeDiffMs * 1e3}
813
- MICROSECOND
814
- )`
815
- )
816
- )
817
- ),
818
- `Timeout ${timeoutMs2}ms in printQueriesWithPlan - transient timeouts are usually fine; repeated timeouts mean this diagnostic query is consistently slow and should be investigated`,
819
- timeoutMs2 + 200
820
- );
821
- results.forEach((result) => {
822
- const avgTimeMs = Number(result.avgLatency) / 1e6;
823
- const avgMemMB = Number(result.avgMem) / 1e6;
824
- console.warn(
825
- `SQL: ${result.digestText} | Memory: ${avgMemMB.toFixed(2)} MB | Time: ${avgTimeMs.toFixed(2)} ms | stmtType: ${result.stmtType} | Executions: ${result.execCount}
826
- Plan:${result.plan}`
827
- );
828
- });
829
- } catch (error) {
830
- console.debug(
831
- `Error occurred while retrieving query execution plan: ${error instanceof Error ? error.message : "Unknown error"}. Try again after some time`,
832
- error
833
- );
834
- }
835
- }
836
- const SESSION_ALIAS_NAME_ORM = "orm";
837
- async function slowQueryPerHours(forgeSQLORM, hours, timeout) {
838
- try {
839
- const timeoutMs2 = timeout ?? 1500;
840
- const results = await withTimeout(
841
- forgeSQLORM.getDrizzleQueryBuilder().select({
842
- query: withTidbHint(slowQuery.query),
843
- queryTime: slowQuery.queryTime,
844
- memMax: slowQuery.memMax,
845
- plan: slowQuery.plan
846
- }).from(slowQuery).where(
847
- and(
848
- isNotNull(slowQuery.digest),
849
- ne(slowQuery.sessionAlias, SESSION_ALIAS_NAME_ORM),
850
- gte(
851
- slowQuery.time,
852
- sql$1`DATE_SUB
853
- (NOW(), INTERVAL
854
- ${hours}
855
- HOUR
856
- )`
857
- )
858
- )
859
- ),
860
- `Timeout ${timeoutMs2}ms in slowQueryPerHours - transient timeouts are usually fine; repeated timeouts mean this diagnostic query is consistently slow and should be investigated`,
861
- timeoutMs2
862
- );
863
- const response = [];
864
- results.forEach((result) => {
865
- const memMaxMB = result.memMax ? Number(result.memMax) / 1e6 : 0;
866
- const message = `Found SlowQuery SQL: ${result.query} | Memory: ${memMaxMB.toFixed(2)} MB | Time: ${result.queryTime} ms
867
- Plan:${result.plan}`;
868
- response.push(message);
869
- console.warn(
870
- message
871
- );
872
- });
873
- return response;
874
- } catch (error) {
875
- console.debug(
876
- `Error occurred while retrieving query execution plan: ${error instanceof Error ? error.message : "Unknown error"}. Try again after some time`,
877
- error
878
- );
879
- return [`Error occurred while retrieving query execution plan: ${error instanceof Error ? error.message : "Unknown error"}`];
880
- }
881
- }
882
- async function withTimeout(promise, message, timeoutMs2) {
883
- let timeoutId;
884
- const timeoutPromise = new Promise((_, reject) => {
885
- timeoutId = setTimeout(() => {
886
- reject(
887
- new Error(message)
888
- );
889
- }, timeoutMs2);
890
- });
891
- try {
892
- return await Promise.race([promise, timeoutPromise]);
893
- } finally {
894
- if (timeoutId) {
895
- clearTimeout(timeoutId);
896
- }
897
- }
898
- }
899
- function withTidbHint(column) {
900
- return sql$1`/*+ SET_VAR(tidb_session_alias=${sql$1.raw(`${SESSION_ALIAS_NAME_ORM}`)}) */ ${column}`;
901
- }
902
- const CACHE_CONSTANTS = {
903
- BATCH_SIZE: 25,
904
- MAX_RETRY_ATTEMPTS: 3,
905
- INITIAL_RETRY_DELAY: 1e3,
906
- RETRY_DELAY_MULTIPLIER: 2,
907
- DEFAULT_ENTITY_QUERY_NAME: "sql",
908
- DEFAULT_EXPIRATION_NAME: "expiration",
909
- DEFAULT_DATA_NAME: "data",
910
- HASH_LENGTH: 32
911
- };
912
- function getCurrentTime() {
913
- const dt = DateTime.now();
914
- return Math.floor(dt.toSeconds());
915
- }
916
- function nowPlusSeconds(secondsToAdd) {
917
- const dt = DateTime.now().plus({ seconds: secondsToAdd });
918
- return Math.floor(dt.toSeconds());
919
- }
920
- function hashKey(query) {
921
- const h = crypto.createHash("sha256");
922
- h.update(query.sql.toLowerCase());
923
- h.update(JSON.stringify(query.params));
924
- return "CachedQuery_" + h.digest("hex").slice(0, CACHE_CONSTANTS.HASH_LENGTH);
925
- }
926
- async function deleteCacheEntriesInBatches(results, cacheEntityName) {
927
- for (let i = 0; i < results.length; i += CACHE_CONSTANTS.BATCH_SIZE) {
928
- const batch = results.slice(i, i + CACHE_CONSTANTS.BATCH_SIZE);
929
- let transactionBuilder = kvs.transact();
930
- batch.forEach((result) => {
931
- transactionBuilder = transactionBuilder.delete(result.key, { entityName: cacheEntityName });
932
- });
933
- await transactionBuilder.execute();
934
- }
935
- }
936
- async function clearCursorCache(tables, cursor, options) {
937
- const cacheEntityName = options.cacheEntityName;
938
- if (!cacheEntityName) {
939
- throw new Error("cacheEntityName is not configured");
940
- }
941
- const entityQueryName = options.cacheEntityQueryName ?? CACHE_CONSTANTS.DEFAULT_ENTITY_QUERY_NAME;
942
- let filters = new Filter();
943
- for (const table of tables) {
944
- const wrapIfNeeded = options.cacheWrapTable ? `\`${table}\`` : table;
945
- filters.or(entityQueryName, FilterConditions.contains(wrapIfNeeded?.toLowerCase()));
946
- }
947
- let entityQueryBuilder = kvs.entity(cacheEntityName).query().index(entityQueryName).filters(filters);
948
- if (cursor) {
949
- entityQueryBuilder = entityQueryBuilder.cursor(cursor);
950
- }
951
- const listResult = await entityQueryBuilder.limit(100).getMany();
952
- if (options.logCache) {
953
- console.warn(`clear cache Records: ${JSON.stringify(listResult.results.map((r) => r.key))}`);
954
- }
955
- await deleteCacheEntriesInBatches(listResult.results, cacheEntityName);
956
- if (listResult.nextCursor) {
957
- return listResult.results.length + await clearCursorCache(tables, listResult.nextCursor, options);
958
- } else {
959
- return listResult.results.length;
960
- }
961
- }
962
- async function clearExpirationCursorCache(cursor, options) {
963
- const cacheEntityName = options.cacheEntityName;
964
- if (!cacheEntityName) {
965
- throw new Error("cacheEntityName is not configured");
966
- }
967
- const entityExpirationName = options.cacheEntityExpirationName ?? CACHE_CONSTANTS.DEFAULT_EXPIRATION_NAME;
968
- let entityQueryBuilder = kvs.entity(cacheEntityName).query().index(entityExpirationName).where(WhereConditions.lessThan(Math.floor(DateTime.now().toSeconds())));
969
- if (cursor) {
970
- entityQueryBuilder = entityQueryBuilder.cursor(cursor);
971
- }
972
- const listResult = await entityQueryBuilder.limit(100).getMany();
973
- if (options.logCache) {
974
- console.warn(`clear expired Records: ${JSON.stringify(listResult.results.map((r) => r.key))}`);
975
- }
976
- await deleteCacheEntriesInBatches(listResult.results, cacheEntityName);
977
- if (listResult.nextCursor) {
978
- return listResult.results.length + await clearExpirationCursorCache(listResult.nextCursor, options);
979
- } else {
980
- return listResult.results.length;
981
- }
982
- }
983
- async function executeWithRetry(operation, operationName) {
984
- let attempt = 0;
985
- let delay = CACHE_CONSTANTS.INITIAL_RETRY_DELAY;
986
- while (attempt < CACHE_CONSTANTS.MAX_RETRY_ATTEMPTS) {
987
- try {
988
- return await operation();
989
- } catch (err) {
990
- console.warn(`Error during ${operationName}: ${err.message}, retry ${attempt}`, err);
991
- attempt++;
992
- if (attempt >= CACHE_CONSTANTS.MAX_RETRY_ATTEMPTS) {
993
- console.error(`Error during ${operationName}: ${err.message}`, err);
994
- throw err;
995
- }
996
- await new Promise((resolve) => setTimeout(resolve, delay));
997
- delay *= CACHE_CONSTANTS.RETRY_DELAY_MULTIPLIER;
998
- }
999
- }
1000
- throw new Error(`Maximum retry attempts exceeded for ${operationName}`);
1001
- }
1002
- async function clearCache(schema, options) {
1003
- const tableName = getTableName(schema);
1004
- if (cacheApplicationContext.getStore()) {
1005
- cacheApplicationContext.getStore()?.tables.add(tableName);
1006
- } else {
1007
- await clearTablesCache([tableName], options);
1008
- }
1009
- }
1010
- async function clearTablesCache(tables, options) {
1011
- if (!options.cacheEntityName) {
1012
- throw new Error("cacheEntityName is not configured");
1013
- }
1014
- const startTime = DateTime.now();
1015
- let totalRecords = 0;
1016
- try {
1017
- totalRecords = await executeWithRetry(
1018
- () => clearCursorCache(tables, "", options),
1019
- "clearing cache"
1020
- );
1021
- } finally {
1022
- if (options.logCache) {
1023
- const duration = DateTime.now().toSeconds() - startTime.toSeconds();
1024
- console.info(`Cleared ${totalRecords} cache records in ${duration} seconds`);
1025
- }
1026
- }
1027
- }
1028
- async function clearExpiredCache(options) {
1029
- if (!options.cacheEntityName) {
1030
- throw new Error("cacheEntityName is not configured");
1031
- }
1032
- const startTime = DateTime.now();
1033
- let totalRecords = 0;
1034
- try {
1035
- totalRecords = await executeWithRetry(
1036
- () => clearExpirationCursorCache("", options),
1037
- "clearing expired cache"
1038
- );
1039
- } finally {
1040
- const duration = DateTime.now().toSeconds() - startTime.toSeconds();
1041
- if (options?.logCache) {
1042
- console.debug(`Cleared ${totalRecords} expired cache records in ${duration} seconds`);
1043
- }
1044
- }
1045
- }
1046
- async function getFromCache(query, options) {
1047
- if (!options.cacheEntityName) {
1048
- throw new Error("cacheEntityName is not configured");
1049
- }
1050
- const entityQueryName = options.cacheEntityQueryName ?? CACHE_CONSTANTS.DEFAULT_ENTITY_QUERY_NAME;
1051
- const expirationName = options.cacheEntityExpirationName ?? CACHE_CONSTANTS.DEFAULT_EXPIRATION_NAME;
1052
- const dataName = options.cacheEntityDataName ?? CACHE_CONSTANTS.DEFAULT_DATA_NAME;
1053
- const sqlQuery = query.toSQL();
1054
- const key = hashKey(sqlQuery);
1055
- if (await isTableContainsTableInCacheContext(sqlQuery.sql, options)) {
1056
- if (options.logCache) {
1057
- console.warn(`Context contains value to clear. Skip getting from cache`);
1058
- }
1059
- return void 0;
1060
- }
1061
- try {
1062
- const cacheResult = await kvs.entity(options.cacheEntityName).get(key);
1063
- if (cacheResult && cacheResult[expirationName] >= getCurrentTime() && sqlQuery.sql.toLowerCase() === cacheResult[entityQueryName]) {
1064
- if (options.logCache) {
1065
- console.warn(`Get value from cache, cacheKey: ${key}`);
1066
- }
1067
- const results = cacheResult[dataName];
1068
- return JSON.parse(results);
1069
- }
1070
- } catch (error) {
1071
- console.error(`Error getting from cache: ${error.message}`, error);
1072
- }
1073
- return void 0;
1074
- }
1075
- async function setCacheResult(query, options, results, cacheTtl) {
1076
- if (!options.cacheEntityName) {
1077
- throw new Error("cacheEntityName is not configured");
1078
- }
1079
- try {
1080
- const entityQueryName = options.cacheEntityQueryName ?? CACHE_CONSTANTS.DEFAULT_ENTITY_QUERY_NAME;
1081
- const expirationName = options.cacheEntityExpirationName ?? CACHE_CONSTANTS.DEFAULT_EXPIRATION_NAME;
1082
- const dataName = options.cacheEntityDataName ?? CACHE_CONSTANTS.DEFAULT_DATA_NAME;
1083
- const sqlQuery = query.toSQL();
1084
- if (await isTableContainsTableInCacheContext(sqlQuery.sql, options)) {
1085
- if (options.logCache) {
1086
- console.warn(`Context contains value to clear. Skip setting from cache`);
1087
- }
1088
- return;
1089
- }
1090
- const key = hashKey(sqlQuery);
1091
- await kvs.transact().set(
1092
- key,
1093
- {
1094
- [entityQueryName]: sqlQuery.sql.toLowerCase(),
1095
- [expirationName]: nowPlusSeconds(cacheTtl),
1096
- [dataName]: JSON.stringify(results)
1097
- },
1098
- { entityName: options.cacheEntityName }
1099
- ).execute();
1100
- if (options.logCache) {
1101
- console.warn(`Store value to cache, cacheKey: ${key}`);
1102
- }
1103
- } catch (error) {
1104
- console.error(`Error setting cache: ${error.message}`, error);
1105
- }
1106
- }
1107
- function isQuery(obj) {
1108
- return typeof obj === "object" && obj !== null && typeof obj.sql === "string" && Array.isArray(obj.params);
1109
- }
1110
- const cacheApplicationContext = new AsyncLocalStorage();
1111
- const localCacheApplicationContext = new AsyncLocalStorage();
1112
- async function saveTableIfInsideCacheContext(table) {
1113
- const context = cacheApplicationContext.getStore();
1114
- if (context) {
1115
- const tableName = getTableName(table).toLowerCase();
1116
- context.tables.add(tableName);
1117
- }
1118
- }
1119
- async function saveQueryLocalCacheQuery(query, rows, options) {
1120
- const context = localCacheApplicationContext.getStore();
1121
- if (context) {
1122
- if (!context.cache) {
1123
- context.cache = {};
1124
- }
1125
- let sql2;
1126
- if (isQuery(query)) {
1127
- sql2 = { toSQL: () => query };
1128
- } else {
1129
- sql2 = query;
1130
- }
1131
- const key = hashKey(sql2.toSQL());
1132
- context.cache[key] = {
1133
- sql: sql2.toSQL().sql.toLowerCase(),
1134
- data: rows
1135
- };
1136
- if (options.logCache) {
1137
- const q = sql2.toSQL();
1138
- console.debug(
1139
- `[forge-sql-orm][local-cache][SAVE] Stored result in cache. sql="${q.sql}", params=${JSON.stringify(q.params)}`
1140
- );
1141
- }
1142
- }
1143
- }
1144
- async function getQueryLocalCacheQuery(query, options) {
1145
- const context = localCacheApplicationContext.getStore();
1146
- if (context) {
1147
- if (!context.cache) {
1148
- context.cache = {};
1149
- }
1150
- let sql2;
1151
- if (isQuery(query)) {
1152
- sql2 = { toSQL: () => query };
1153
- } else {
1154
- sql2 = query;
1155
- }
1156
- const key = hashKey(sql2.toSQL());
1157
- if (context.cache[key] && context.cache[key].sql === sql2.toSQL().sql.toLowerCase()) {
1158
- if (options.logCache) {
1159
- const q = sql2.toSQL();
1160
- console.debug(
1161
- `[forge-sql-orm][local-cache][HIT] Returned cached result. sql="${q.sql}", params=${JSON.stringify(q.params)}`
1162
- );
1163
- }
1164
- return context.cache[key].data;
1165
- }
1166
- }
1167
- return void 0;
1168
- }
1169
- async function evictLocalCacheQuery(table, options) {
1170
- const context = localCacheApplicationContext.getStore();
1171
- if (context) {
1172
- if (!context.cache) {
1173
- context.cache = {};
1174
- }
1175
- const tableName = getTableName(table);
1176
- const searchString = options.cacheWrapTable ? `\`${tableName}\`` : tableName;
1177
- const keyToEvicts = [];
1178
- Object.keys(context.cache).forEach((key) => {
1179
- if (context.cache[key].sql.includes(searchString)) {
1180
- keyToEvicts.push(key);
1181
- }
1182
- });
1183
- keyToEvicts.forEach((key) => delete context.cache[key]);
1184
- }
1185
- }
1186
- async function isTableContainsTableInCacheContext(sql2, options) {
1187
- const context = cacheApplicationContext.getStore();
1188
- if (!context) {
1189
- return false;
1190
- }
1191
- const tables = Array.from(context.tables);
1192
- const lowerSql = sql2.toLowerCase();
1193
- return tables.some((table) => {
1194
- const tablePattern = options.cacheWrapTable ? `\`${table}\`` : table;
1195
- return lowerSql.includes(tablePattern);
1196
- });
1197
- }
1198
- class ForgeSQLCrudOperations {
1199
- forgeOperations;
1200
- options;
1201
- /**
1202
- * Creates a new instance of ForgeSQLCrudOperations.
1203
- * @param forgeSqlOperations - The ForgeSQL operations instance
1204
- * @param options - Configuration options for the ORM
1205
- */
1206
- constructor(forgeSqlOperations, options) {
1207
- this.forgeOperations = forgeSqlOperations;
1208
- this.options = options;
1209
- }
1210
- /**
1211
- * Inserts records into the database with optional versioning support.
1212
- * If a version field exists in the schema, versioning is applied.
1213
- *
1214
- * This method automatically handles:
1215
- * - Version field initialization for optimistic locking
1216
- * - Batch insertion for multiple records
1217
- * - Duplicate key handling with optional updates
1218
- *
1219
- * @template T - The type of the table schema
1220
- * @param schema - The entity schema
1221
- * @param models - Array of entities to insert
1222
- * @param updateIfExists - Whether to update existing records (default: false)
1223
- * @returns Promise that resolves to the number of inserted rows
1224
- * @throws Error if the insert operation fails
1225
- */
1226
- async insert(schema, models, updateIfExists = false) {
1227
- if (!models?.length) return 0;
1228
- const { tableName, columns } = getTableMetadata(schema);
1229
- const versionMetadata = this.validateVersionField(tableName, columns);
1230
- const preparedModels = models.map(
1231
- (model) => this.prepareModelWithVersion(model, versionMetadata, columns)
1232
- );
1233
- const queryBuilder = this.forgeOperations.insert(schema).values(preparedModels);
1234
- const finalQuery = updateIfExists ? queryBuilder.onDuplicateKeyUpdate({
1235
- set: Object.fromEntries(
1236
- Object.keys(preparedModels[0]).map((key) => [key, schema[key]])
1237
- )
1238
- }) : queryBuilder;
1239
- const result = await finalQuery;
1240
- await saveTableIfInsideCacheContext(schema);
1241
- return result[0].insertId;
1242
- }
1243
- /**
1244
- * Deletes a record by its primary key with optional version check.
1245
- * If versioning is enabled, ensures the record hasn't been modified since last read.
1246
- *
1247
- * This method automatically handles:
1248
- * - Single primary key validation
1249
- * - Optimistic locking checks if versioning is enabled
1250
- * - Version field validation before deletion
1251
- *
1252
- * @template T - The type of the table schema
1253
- * @param id - The ID of the record to delete
1254
- * @param schema - The entity schema
1255
- * @returns Promise that resolves to the number of affected rows
1256
- * @throws Error if the delete operation fails
1257
- * @throws Error if multiple primary keys are found
1258
- * @throws Error if optimistic locking check fails
1259
- */
1260
- async deleteById(id, schema) {
1261
- const { tableName, columns } = getTableMetadata(schema);
1262
- const primaryKeys = this.getPrimaryKeys(schema);
1263
- if (primaryKeys.length !== 1) {
1264
- throw new Error("Only single primary key is supported");
1265
- }
1266
- const [primaryKeyName, primaryKeyColumn] = primaryKeys[0];
1267
- const versionMetadata = this.validateVersionField(tableName, columns);
1268
- const conditions = [eq(primaryKeyColumn, id)];
1269
- if (versionMetadata && columns) {
1270
- const versionField = columns[versionMetadata.fieldName];
1271
- if (versionField) {
1272
- const oldModel = await this.getOldModel({ [primaryKeyName]: id }, schema, [
1273
- versionMetadata.fieldName,
1274
- versionField
1275
- ]);
1276
- conditions.push(eq(versionField, oldModel[versionMetadata.fieldName]));
1277
- }
1278
- }
1279
- const queryBuilder = this.forgeOperations.delete(schema).where(and(...conditions));
1280
- const result = await queryBuilder;
1281
- if (versionMetadata && result[0].affectedRows === 0) {
1282
- throw new Error(`Optimistic locking failed: record with primary key ${id} has been modified`);
1283
- }
1284
- await saveTableIfInsideCacheContext(schema);
1285
- return result[0].affectedRows;
1286
- }
1287
- /**
1288
- * Updates a record by its primary key with optimistic locking support.
1289
- * If versioning is enabled:
1290
- * - Retrieves the current version
1291
- * - Checks for concurrent modifications
1292
- * - Increments the version on successful update
1293
- *
1294
- * This method automatically handles:
1295
- * - Primary key validation
1296
- * - Version field retrieval and validation
1297
- * - Optimistic locking conflict detection
1298
- * - Version field incrementation
1299
- *
1300
- * @template T - The type of the table schema
1301
- * @param entity - The entity with updated values (must include primary key)
1302
- * @param schema - The entity schema
1303
- * @returns Promise that resolves to the number of affected rows
1304
- * @throws Error if the primary key is not provided
1305
- * @throws Error if optimistic locking check fails
1306
- * @throws Error if multiple primary keys are found
1307
- */
1308
- async updateById(entity, schema) {
1309
- const { tableName, columns } = getTableMetadata(schema);
1310
- const primaryKeys = this.getPrimaryKeys(schema);
1311
- if (primaryKeys.length !== 1) {
1312
- throw new Error("Only single primary key is supported");
1313
- }
1314
- const [primaryKeyName, primaryKeyColumn] = primaryKeys[0];
1315
- const versionMetadata = this.validateVersionField(tableName, columns);
1316
- if (!(primaryKeyName in entity)) {
1317
- throw new Error(`Primary key ${primaryKeyName} must be provided in the entity`);
1318
- }
1319
- const currentVersion = await this.getCurrentVersion(
1320
- entity,
1321
- primaryKeyName,
1322
- versionMetadata,
1323
- columns,
1324
- schema
1325
- );
1326
- const updateData = this.prepareUpdateData(entity, versionMetadata, columns, currentVersion);
1327
- const conditions = [
1328
- eq(primaryKeyColumn, entity[primaryKeyName])
1329
- ];
1330
- if (versionMetadata && columns) {
1331
- const versionField = columns[versionMetadata.fieldName];
1332
- if (versionField) {
1333
- conditions.push(eq(versionField, currentVersion));
1334
- }
1335
- }
1336
- const queryBuilder = this.forgeOperations.update(schema).set(updateData).where(and(...conditions));
1337
- const result = await queryBuilder;
1338
- if (versionMetadata && result[0].affectedRows === 0) {
1339
- throw new Error(
1340
- `Optimistic locking failed: record with primary key ${entity[primaryKeyName]} has been modified`
1341
- );
1342
- }
1343
- await saveTableIfInsideCacheContext(schema);
1344
- return result[0].affectedRows;
1345
- }
1346
- /**
1347
- * Updates specified fields of records based on provided conditions.
1348
- * This method does not support versioning and should be used with caution.
1349
- *
1350
- * @template T - The type of the table schema
1351
- * @param {Partial<InferInsertModel<T>>} updateData - The data to update
1352
- * @param {T} schema - The entity schema
1353
- * @param {SQL<unknown>} where - The WHERE conditions
1354
- * @returns {Promise<number>} Number of affected rows
1355
- * @throws {Error} If WHERE conditions are not provided
1356
- * @throws {Error} If the update operation fails
1357
- */
1358
- async updateFields(updateData, schema, where) {
1359
- if (!where) {
1360
- throw new Error("WHERE conditions must be provided");
1361
- }
1362
- const queryBuilder = this.forgeOperations.update(schema).set(updateData).where(where);
1363
- const result = await queryBuilder;
1364
- await saveTableIfInsideCacheContext(schema);
1365
- return result[0].affectedRows;
1366
- }
1367
- // Helper methods
1368
- /**
1369
- * Gets primary keys from the schema.
1370
- * @template T - The type of the table schema
1371
- * @param {T} schema - The table schema
1372
- * @returns {[string, AnyColumn][]} Array of primary key name and column pairs
1373
- * @throws {Error} If no primary keys are found
1374
- */
1375
- getPrimaryKeys(schema) {
1376
- const primaryKeys = getPrimaryKeys(schema);
1377
- if (!primaryKeys) {
1378
- throw new Error(`No primary keys found for schema: ${schema}`);
1379
- }
1380
- return primaryKeys;
1381
- }
1382
- /**
1383
- * Validates and retrieves version field metadata.
1384
- * @param {string} tableName - The name of the table
1385
- * @param {Record<string, AnyColumn>} columns - The table columns
1386
- * @returns {Object | undefined} Version field metadata if valid, undefined otherwise
1387
- */
1388
- validateVersionField(tableName, columns) {
1389
- if (this.options.disableOptimisticLocking) {
1390
- return void 0;
1391
- }
1392
- const versionMetadata = this.options.additionalMetadata?.[tableName]?.versionField;
1393
- if (!versionMetadata) return void 0;
1394
- let fieldName = versionMetadata.fieldName;
1395
- let versionField = columns[versionMetadata.fieldName];
1396
- if (!versionField) {
1397
- const find = Object.entries(columns).find(([, c]) => c.name === versionMetadata.fieldName);
1398
- if (find) {
1399
- fieldName = find[0];
1400
- versionField = find[1];
1401
- }
1402
- }
1403
- if (!versionField) {
1404
- console.warn(
1405
- `Version field "${versionMetadata.fieldName}" not found in table ${tableName}. Versioning will be skipped.`
1406
- );
1407
- return void 0;
1408
- }
1409
- if (!versionField.notNull) {
1410
- console.warn(
1411
- `Version field "${versionMetadata.fieldName}" in table ${tableName} is nullable. Versioning may not work correctly.`
1412
- );
1413
- return void 0;
1414
- }
1415
- const fieldType = versionField.getSQLType();
1416
- const isSupportedType = fieldType === "datetime" || fieldType === "timestamp" || fieldType === "int" || fieldType === "number" || fieldType === "decimal";
1417
- if (!isSupportedType) {
1418
- console.warn(
1419
- `Version field "${versionMetadata.fieldName}" in table ${tableName} has unsupported type "${fieldType}". Only datetime, timestamp, int, and decimal types are supported for versioning. Versioning will be skipped.`
1420
- );
1421
- return void 0;
1422
- }
1423
- return { fieldName, type: fieldType };
1424
- }
1425
- /**
1426
- * Gets the current version of an entity.
1427
- * @template T - The type of the table schema
1428
- * @param {Partial<InferInsertModel<T>>} entity - The entity
1429
- * @param {string} primaryKeyName - The name of the primary key
1430
- * @param {Object | undefined} versionMetadata - Version field metadata
1431
- * @param {Record<string, AnyColumn>} columns - The table columns
1432
- * @param {T} schema - The table schema
1433
- * @returns {Promise<unknown>} The current version value
1434
- */
1435
- async getCurrentVersion(entity, primaryKeyName, versionMetadata, columns, schema) {
1436
- if (!versionMetadata || !columns) return void 0;
1437
- const versionField = columns[versionMetadata.fieldName];
1438
- if (!versionField) return void 0;
1439
- if (versionMetadata.fieldName in entity) {
1440
- return entity[versionMetadata.fieldName];
1441
- }
1442
- const oldModel = await this.getOldModel(
1443
- { [primaryKeyName]: entity[primaryKeyName] },
1444
- schema,
1445
- [versionMetadata.fieldName, versionField]
1446
- );
1447
- return oldModel[versionMetadata.fieldName];
1448
- }
1449
- /**
1450
- * Prepares a model for insertion with version field.
1451
- * @template T - The type of the table schema
1452
- * @param {Partial<InferInsertModel<T>>} model - The model to prepare
1453
- * @param {Object | undefined} versionMetadata - Version field metadata
1454
- * @param {Record<string, AnyColumn>} columns - The table columns
1455
- * @returns {InferInsertModel<T>} The prepared model
1456
- */
1457
- prepareModelWithVersion(model, versionMetadata, columns) {
1458
- if (!versionMetadata || !columns) return model;
1459
- let fieldName = versionMetadata.fieldName;
1460
- let versionField = columns[versionMetadata.fieldName];
1461
- if (!versionField) {
1462
- const find = Object.entries(columns).find(([, c]) => c.name === versionMetadata.fieldName);
1463
- if (find) {
1464
- fieldName = find[0];
1465
- versionField = find[1];
1466
- }
1467
- }
1468
- if (!versionField) return model;
1469
- const modelWithVersion = { ...model };
1470
- const fieldType = versionField.getSQLType();
1471
- const versionValue = fieldType === "datetime" || fieldType === "timestamp" ? /* @__PURE__ */ new Date() : 1;
1472
- modelWithVersion[fieldName] = versionValue;
1473
- return modelWithVersion;
1474
- }
1475
- /**
1476
- * Prepares update data with version field.
1477
- * @template T - The type of the table schema
1478
- * @param {Partial<InferInsertModel<T>>} entity - The entity to update
1479
- * @param {Object | undefined} versionMetadata - Version field metadata
1480
- * @param {Record<string, AnyColumn>} columns - The table columns
1481
- * @param {unknown} currentVersion - The current version value
1482
- * @returns {Partial<InferInsertModel<T>>} The prepared update data
1483
- */
1484
- prepareUpdateData(entity, versionMetadata, columns, currentVersion) {
1485
- const updateData = { ...entity };
1486
- if (versionMetadata && columns) {
1487
- const versionField = columns[versionMetadata.fieldName];
1488
- if (versionField) {
1489
- const fieldType = versionField.getSQLType();
1490
- updateData[versionMetadata.fieldName] = fieldType === "datetime" || fieldType === "timestamp" ? /* @__PURE__ */ new Date() : currentVersion + 1;
1491
- }
1492
- }
1493
- return updateData;
1494
- }
1495
- /**
1496
- * Retrieves an existing model by primary key.
1497
- * @template T - The type of the table schema
1498
- * @param {Record<string, unknown>} primaryKeyValues - The primary key values
1499
- * @param {T} schema - The table schema
1500
- * @param {[string, AnyColumn]} versionField - The version field name and column
1501
- * @returns {Promise<Awaited<T> extends Array<any> ? Awaited<T>[number] | undefined : Awaited<T> | undefined>} The existing model
1502
- * @throws {Error} If the record is not found
1503
- */
1504
- async getOldModel(primaryKeyValues, schema, versionField) {
1505
- const [versionFieldName, versionFieldColumn] = versionField;
1506
- const primaryKeys = this.getPrimaryKeys(schema);
1507
- const [primaryKeyName, primaryKeyColumn] = primaryKeys[0];
1508
- const resultQuery = this.forgeOperations.select({
1509
- [primaryKeyName]: primaryKeyColumn,
1510
- [versionFieldName]: versionFieldColumn
1511
- }).from(schema).where(eq(primaryKeyColumn, primaryKeyValues[primaryKeyName]));
1512
- const model = await this.forgeOperations.fetch().executeQueryOnlyOne(resultQuery);
1513
- if (!model) {
1514
- throw new Error(`Record not found in table ${schema}`);
1515
- }
1516
- return model;
1517
- }
1518
- }
1519
- class ForgeSQLSelectOperations {
1520
- options;
1521
- /**
1522
- * Creates a new instance of ForgeSQLSelectOperations.
1523
- * @param {ForgeSqlOrmOptions} options - Configuration options for the ORM
1524
- */
1525
- constructor(options) {
1526
- this.options = options;
1527
- }
1528
- /**
1529
- * Executes a Drizzle query and returns a single result.
1530
- * Throws an error if more than one record is returned.
1531
- *
1532
- * @template T - The type of the query builder
1533
- * @param {T} query - The Drizzle query to execute
1534
- * @returns {Promise<Awaited<T> extends Array<any> ? Awaited<T>[number] | undefined : Awaited<T> | undefined>} A single result object or undefined
1535
- * @throws {Error} If more than one record is returned
1536
- */
1537
- async executeQueryOnlyOne(query) {
1538
- const results = await query;
1539
- const datas = results;
1540
- if (!datas.length) {
1541
- return void 0;
1542
- }
1543
- if (datas.length > 1) {
1544
- throw new Error(`Expected 1 record but returned ${datas.length}`);
1545
- }
1546
- return datas[0];
1547
- }
1548
- /**
1549
- * Executes a raw SQL query and returns the results.
1550
- * Logs the query if logging is enabled.
1551
- *
1552
- * @template T - The type of the result objects
1553
- * @param {string} query - The raw SQL query to execute
1554
- * @param {SqlParameters[]} [params] - Optional SQL parameters
1555
- * @returns {Promise<T[]>} A list of results as objects
1556
- */
1557
- async executeRawSQL(query, params) {
1558
- if (this.options.logRawSqlQuery) {
1559
- const paramsStr = params ? `, with params: ${JSON.stringify(params)}` : "";
1560
- console.debug(`Executing with SQL ${query}${paramsStr}`);
1561
- }
1562
- const sqlStatement = sql.prepare(query);
1563
- if (params) {
1564
- sqlStatement.bindParams(...params);
1565
- }
1566
- const result = await sqlStatement.execute();
1567
- return result.rows;
1568
- }
1569
- /**
1570
- * Executes a raw SQL update query.
1571
- * @param {string} query - The raw SQL update query
1572
- * @param {SqlParameters[]} [params] - Optional SQL parameters
1573
- * @returns {Promise<UpdateQueryResponse>} The update response containing affected rows
1574
- */
1575
- async executeRawUpdateSQL(query, params) {
1576
- const sqlStatement = sql.prepare(query);
1577
- if (params) {
1578
- sqlStatement.bindParams(...params);
1579
- }
1580
- if (this.options.logRawSqlQuery) {
1581
- console.debug(
1582
- `Executing Update with SQL ${query}` + (params ? `, with params: ${JSON.stringify(params)}` : "")
1583
- );
1584
- }
1585
- const updateQueryResponseResults = await sqlStatement.execute();
1586
- return updateQueryResponseResults.rows;
1587
- }
1588
- }
1589
- const metadataQueryContext = new AsyncLocalStorage();
1590
- async function saveMetaDataToContext(metadata) {
1591
- const context = metadataQueryContext.getStore();
1592
- if (context) {
1593
- context.printQueriesWithPlan = async () => {
1594
- if (process.env.NODE_ENV !== "test") {
1595
- await new Promise((r) => setTimeout(r, 200));
1596
- }
1597
- await printQueriesWithPlan(
1598
- context.forgeSQLORM,
1599
- Date.now() - context.beginTime.getTime()
1600
- );
1601
- };
1602
- if (metadata) {
1603
- context.totalResponseSize += metadata.responseSize;
1604
- context.totalDbExecutionTime += metadata.dbExecutionTime;
1605
- }
1606
- }
1607
- }
1608
- async function getLastestMetadata() {
1609
- return metadataQueryContext.getStore();
1610
- }
1611
- const operationTypeQueryContext = new AsyncLocalStorage();
1612
- async function getOperationType() {
1613
- return operationTypeQueryContext.getStore()?.operationType ?? "DML";
1614
- }
1615
- const timeoutMs = 1e4;
1616
- const timeoutMessage = `Atlassian @forge/sql did not return a response within ${timeoutMs}ms (${timeoutMs / 1e3} seconds), so the request is blocked. Possible causes: slow query, network issues, or exceeding Forge SQL limits.`;
1617
- function isUpdateQueryResponse(obj) {
1618
- return obj !== null && typeof obj === "object" && typeof obj.affectedRows === "number" && typeof obj.insertId === "number";
1619
- }
1620
- function inlineParams(sql2, params) {
1621
- let i = 0;
1622
- return sql2.replace(/\?/g, () => {
1623
- const val = params[i++];
1624
- if (val === null) return "NULL";
1625
- if (typeof val === "number") return val.toString();
1626
- return `'${String(val).replace(/'/g, "''")}'`;
1627
- });
1628
- }
1629
- async function processDDLResult(method, result) {
1630
- if (result.metadata) {
1631
- await saveMetaDataToContext(result.metadata);
1632
- }
1633
- if (!result?.rows) {
1634
- return { rows: [] };
1635
- }
1636
- if (isUpdateQueryResponse(result.rows)) {
1637
- const oneRow = result.rows;
1638
- return { ...oneRow, rows: [oneRow] };
1639
- }
1640
- if (Array.isArray(result.rows)) {
1641
- if (method === "execute") {
1642
- return { rows: [result.rows] };
1643
- } else {
1644
- const rows = result.rows.map((r) => Object.values(r));
1645
- return { rows };
1646
- }
1647
- }
1648
- return { rows: [] };
1649
- }
1650
- async function processExecuteMethod(query, params) {
1651
- const sqlStatement = sql.prepare(query);
1652
- if (params) {
1653
- sqlStatement.bindParams(...params);
1654
- }
1655
- const result = await withTimeout(sqlStatement.execute(), timeoutMessage, timeoutMs);
1656
- await saveMetaDataToContext(result.metadata);
1657
- if (!result.rows) {
1658
- return { rows: [[]] };
1659
- }
1660
- return { rows: [result.rows] };
1661
- }
1662
- async function processAllMethod(query, params) {
1663
- const sqlStatement = await sql.prepare(query);
1664
- if (params) {
1665
- await sqlStatement.bindParams(...params);
1666
- }
1667
- const result = await withTimeout(sqlStatement.execute(), timeoutMessage, timeoutMs);
1668
- await saveMetaDataToContext(result.metadata);
1669
- if (!result.rows) {
1670
- return { rows: [] };
1671
- }
1672
- const rows = result.rows.map((r) => Object.values(r));
1673
- return { rows };
1674
- }
1675
- const forgeDriver = async (query, params, method) => {
1676
- const operationType = await getOperationType();
1677
- if (operationType === "DDL") {
1678
- const result = await withTimeout(sql.executeDDL(inlineParams(query, params)), timeoutMessage, timeoutMs);
1679
- return await processDDLResult(method, result);
1680
- }
1681
- if (method === "execute") {
1682
- return await processExecuteMethod(query, params ?? []);
1683
- }
1684
- return await processAllMethod(query, params ?? []);
1685
- };
1686
- function injectSqlHints(query, hints) {
1687
- if (!hints) {
1688
- return query;
1689
- }
1690
- const normalizedQuery = query.trim().toUpperCase();
1691
- let queryHints;
1692
- if (normalizedQuery.startsWith("SELECT")) {
1693
- queryHints = hints.select;
1694
- } else if (normalizedQuery.startsWith("INSERT")) {
1695
- queryHints = hints.insert;
1696
- } else if (normalizedQuery.startsWith("UPDATE")) {
1697
- queryHints = hints.update;
1698
- } else if (normalizedQuery.startsWith("DELETE")) {
1699
- queryHints = hints.delete;
1700
- }
1701
- if (!queryHints || queryHints.length === 0) {
1702
- return query;
1703
- }
1704
- const hintsString = queryHints.join(" ");
1705
- if (normalizedQuery.startsWith("SELECT")) {
1706
- return `SELECT /*+ ${hintsString} */ ${query.substring(6)}`;
1707
- } else if (normalizedQuery.startsWith("INSERT")) {
1708
- return `INSERT /*+ ${hintsString} */ ${query.substring(6)}`;
1709
- } else if (normalizedQuery.startsWith("UPDATE")) {
1710
- return `UPDATE /*+ ${hintsString} */ ${query.substring(6)}`;
1711
- } else if (normalizedQuery.startsWith("DELETE")) {
1712
- return `DELETE /*+ ${hintsString} */ ${query.substring(6)}`;
1713
- }
1714
- return query;
1715
- }
1716
- const QUERY_ERROR_CODES = {
1717
- TIMEOUT: "SQL_QUERY_TIMEOUT",
1718
- OUT_OF_MEMORY_ERRNO: 8175
1719
- };
1720
- const STATEMENTS_SUMMARY_DELAY_MS = 200;
1721
- function createForgeDriverProxy(forgeSqlOperation, options, logRawSqlQuery) {
1722
- return async (query, params, method) => {
1723
- const modifiedQuery = injectSqlHints(query, options);
1724
- if (options && logRawSqlQuery && modifiedQuery !== query) {
1725
- console.debug(`SQL Hints injected: ${modifiedQuery}`);
1726
- }
1727
- const queryStartTime = Date.now();
1728
- try {
1729
- return await forgeDriver(modifiedQuery, params, method);
1730
- } catch (error) {
1731
- const isTimeoutError = error.code === QUERY_ERROR_CODES.TIMEOUT;
1732
- const isOutOfMemoryError = error?.context?.debug?.errno === QUERY_ERROR_CODES.OUT_OF_MEMORY_ERRNO;
1733
- if (isTimeoutError || isOutOfMemoryError) {
1734
- if (isTimeoutError) {
1735
- console.error(` TIMEOUT detected - Query exceeded time limit`);
1736
- } else {
1737
- console.error(`OUT OF MEMORY detected - Query exceeded memory limit`);
1738
- }
1739
- await new Promise((resolve) => setTimeout(resolve, STATEMENTS_SUMMARY_DELAY_MS));
1740
- const queryEndTime = Date.now();
1741
- const queryDuration = queryEndTime - queryStartTime;
1742
- await printQueriesWithPlan(forgeSqlOperation, queryDuration);
1743
- }
1744
- if (logRawSqlQuery) {
1745
- console.debug(`SQL Error Details:`, JSON.stringify(error, null, 2));
1746
- }
1747
- throw error;
1748
- }
1749
- };
1750
- }
1751
- const NON_CACHE_CLEARING_ERROR_CODES = ["VALIDATION_ERROR", "CONSTRAINT_ERROR"];
1752
- const CACHE_CLEARING_ERROR_CODES = ["DEADLOCK", "LOCK_WAIT_TIMEOUT", "CONNECTION_ERROR"];
1753
- const NON_CACHE_CLEARING_PATTERNS = [/validation/i, /constraint/i];
1754
- const CACHE_CLEARING_PATTERNS = [/timeout/i, /connection/i];
1755
- function shouldClearCacheOnError(error) {
1756
- if (error?.code && NON_CACHE_CLEARING_ERROR_CODES.includes(error.code)) {
1757
- return false;
1758
- }
1759
- if (error?.message && NON_CACHE_CLEARING_PATTERNS.some((pattern) => pattern.test(error.message))) {
1760
- return false;
1761
- }
1762
- if (error?.code && CACHE_CLEARING_ERROR_CODES.includes(error.code)) {
1763
- return true;
1764
- }
1765
- if (error?.message && CACHE_CLEARING_PATTERNS.some((pattern) => pattern.test(error.message))) {
1766
- return true;
1767
- }
1768
- return true;
1769
- }
1770
- async function handleSuccessfulExecution(rows, onfulfilled, table, options, isCached) {
1771
- try {
1772
- await evictLocalCacheQuery(table, options);
1773
- await saveTableIfInsideCacheContext(table);
1774
- if (isCached && !cacheApplicationContext.getStore()) {
1775
- await clearCache(table, options);
1776
- }
1777
- const result = onfulfilled ? onfulfilled(rows) : rows;
1778
- return result;
1779
- } catch (error) {
1780
- if (shouldClearCacheOnError(error)) {
1781
- await evictLocalCacheQuery(table, options);
1782
- if (isCached) {
1783
- await clearCache(table, options).catch((e) => {
1784
- console.warn("Ignore cache clear errors", e);
1785
- });
1786
- } else {
1787
- await saveTableIfInsideCacheContext(table);
1788
- }
1789
- }
1790
- throw error;
1791
- }
1792
- }
1793
- function handleFunctionCall(value, target, args, table, options, isCached) {
1794
- const result = value.apply(target, args);
1795
- if (typeof result === "object" && result !== null && "execute" in result) {
1796
- return wrapCacheEvictBuilder(result, table, options, isCached);
1797
- }
1798
- return result;
1799
- }
1800
- const wrapCacheEvictBuilder = (rawBuilder, table, options, isCached) => {
1801
- return new Proxy(rawBuilder, {
1802
- get(target, prop, receiver) {
1803
- if (prop === "then") {
1804
- return (onfulfilled, onrejected) => target.execute().then(
1805
- (rows) => handleSuccessfulExecution(rows, onfulfilled, table, options, isCached),
1806
- onrejected
1807
- );
1808
- }
1809
- const value = Reflect.get(target, prop, receiver);
1810
- if (typeof value === "function") {
1811
- return (...args) => handleFunctionCall(value, target, args, table, options, isCached);
1812
- }
1813
- return value;
1814
- }
1815
- });
1816
- };
1817
- function insertAndEvictCacheBuilder(db, table, options, isCached) {
1818
- const builder = db.insert(table);
1819
- return wrapCacheEvictBuilder(
1820
- builder,
1821
- table,
1822
- options,
1823
- isCached
1824
- );
1825
- }
1826
- function updateAndEvictCacheBuilder(db, table, options, isCached) {
1827
- const builder = db.update(table);
1828
- return wrapCacheEvictBuilder(
1829
- builder,
1830
- table,
1831
- options,
1832
- isCached
1833
- );
1834
- }
1835
- function deleteAndEvictCacheBuilder(db, table, options, isCached) {
1836
- const builder = db.delete(table);
1837
- return wrapCacheEvictBuilder(
1838
- builder,
1839
- table,
1840
- options,
1841
- isCached
1842
- );
1843
- }
1844
- async function handleCachedQuery(target, options, cacheTtl, selections, aliasMap, onfulfilled, onrejected) {
1845
- try {
1846
- const localCached = await getQueryLocalCacheQuery(target, options);
1847
- if (localCached) {
1848
- return onfulfilled ? onfulfilled(localCached) : localCached;
1849
- }
1850
- const cacheResult = await getFromCache(target, options);
1851
- if (cacheResult) {
1852
- return onfulfilled ? onfulfilled(cacheResult) : cacheResult;
1853
- }
1854
- const rows = await target.execute();
1855
- const transformed = applyFromDriverTransform(rows, selections, aliasMap);
1856
- await saveQueryLocalCacheQuery(target, transformed, options);
1857
- await setCacheResult(target, options, transformed, cacheTtl).catch((cacheError) => {
1858
- console.warn("Cache set error:", cacheError);
1859
- });
1860
- return onfulfilled ? onfulfilled(transformed) : transformed;
1861
- } catch (error) {
1862
- if (onrejected) {
1863
- return onrejected(error);
1864
- }
1865
- throw error;
1866
- }
1867
- }
1868
- async function handleNonCachedQuery(target, options, selections, aliasMap, onfulfilled, onrejected) {
1869
- try {
1870
- const localCached = await getQueryLocalCacheQuery(target, options);
1871
- if (localCached) {
1872
- return onfulfilled ? onfulfilled(localCached) : localCached;
1873
- }
1874
- const rows = await target.execute();
1875
- const transformed = applyFromDriverTransform(rows, selections, aliasMap);
1876
- await saveQueryLocalCacheQuery(target, transformed, options);
1877
- return onfulfilled ? onfulfilled(transformed) : transformed;
1878
- } catch (error) {
1879
- if (onrejected) {
1880
- return onrejected(error);
1881
- }
1882
- throw error;
1883
- }
1884
- }
1885
- function createAliasedSelectBuilder(db, fields, selectFn, useCache, options, cacheTtl) {
1886
- const { selections, aliasMap } = mapSelectFieldsWithAlias(fields);
1887
- const builder = selectFn(selections);
1888
- const wrapBuilder = (rawBuilder) => {
1889
- return new Proxy(rawBuilder, {
1890
- get(target, prop, receiver) {
1891
- if (prop === "execute") {
1892
- return async (...args) => {
1893
- const rows = await target.execute(...args);
1894
- return applyFromDriverTransform(rows, selections, aliasMap);
1895
- };
1896
- }
1897
- if (prop === "then") {
1898
- return (onfulfilled, onrejected) => {
1899
- if (useCache) {
1900
- const ttl = cacheTtl ?? options.cacheTTL ?? 120;
1901
- return handleCachedQuery(
1902
- target,
1903
- options,
1904
- ttl,
1905
- selections,
1906
- aliasMap,
1907
- onfulfilled,
1908
- onrejected
1909
- );
1910
- } else {
1911
- return handleNonCachedQuery(
1912
- target,
1913
- options,
1914
- selections,
1915
- aliasMap,
1916
- onfulfilled,
1917
- onrejected
1918
- );
1919
- }
1920
- };
1921
- }
1922
- const value = Reflect.get(target, prop, receiver);
1923
- if (typeof value === "function") {
1924
- return (...args) => {
1925
- const result = value.apply(target, args);
1926
- if (typeof result === "object" && result !== null && "execute" in result) {
1927
- return wrapBuilder(result);
1928
- }
1929
- return result;
1930
- };
1931
- }
1932
- return value;
1933
- }
1934
- });
1935
- };
1936
- return wrapBuilder(builder);
1937
- }
1938
- const DEFAULT_OPTIONS = {
1939
- logRawSqlQuery: false,
1940
- disableOptimisticLocking: false,
1941
- cacheTTL: 120,
1942
- cacheWrapTable: true,
1943
- cacheEntityQueryName: "sql",
1944
- cacheEntityExpirationName: "expiration",
1945
- cacheEntityDataName: "data"
1946
- };
1947
- function createRawQueryExecutor(db, options, useGlobalCache = false) {
1948
- return async function(query, cacheTtl) {
1949
- let sql2;
1950
- if (isSQLWrapper(query)) {
1951
- const dialect = db.dialect;
1952
- sql2 = dialect.sqlToQuery(query);
1953
- } else {
1954
- sql2 = {
1955
- sql: query,
1956
- params: []
1957
- };
1958
- }
1959
- const localCacheResult = await getQueryLocalCacheQuery(sql2, options);
1960
- if (localCacheResult) {
1961
- return localCacheResult;
1962
- }
1963
- if (useGlobalCache) {
1964
- const cacheResult = await getFromCache({ toSQL: () => sql2 }, options);
1965
- if (cacheResult) {
1966
- return cacheResult;
1967
- }
1968
- }
1969
- const results = await db.execute(query);
1970
- await saveQueryLocalCacheQuery(sql2, results, options);
1971
- if (useGlobalCache) {
1972
- await setCacheResult(
1973
- { toSQL: () => sql2 },
1974
- options,
1975
- results,
1976
- cacheTtl ?? options.cacheTTL ?? 120
1977
- );
1978
- }
1979
- return results;
1980
- };
1981
- }
1982
- function patchDbWithSelectAliased(db, options) {
1983
- const newOptions = { ...DEFAULT_OPTIONS, ...options };
1984
- db.selectAliased = function(fields) {
1985
- return createAliasedSelectBuilder(
1986
- db,
1987
- fields,
1988
- (selections) => db.select(selections),
1989
- false,
1990
- newOptions
1991
- );
1992
- };
1993
- db.selectAliasedCacheable = function(fields, cacheTtl) {
1994
- return createAliasedSelectBuilder(
1995
- db,
1996
- fields,
1997
- (selections) => db.select(selections),
1998
- true,
1999
- newOptions,
2000
- cacheTtl
2001
- );
2002
- };
2003
- db.selectAliasedDistinct = function(fields) {
2004
- return createAliasedSelectBuilder(
2005
- db,
2006
- fields,
2007
- (selections) => db.selectDistinct(selections),
2008
- false,
2009
- newOptions
2010
- );
2011
- };
2012
- db.selectAliasedDistinctCacheable = function(fields, cacheTtl) {
2013
- return createAliasedSelectBuilder(
2014
- db,
2015
- fields,
2016
- (selections) => db.selectDistinct(selections),
2017
- true,
2018
- newOptions,
2019
- cacheTtl
2020
- );
2021
- };
2022
- db.selectFrom = function(table) {
2023
- return db.selectAliased(getTableColumns(table)).from(table);
2024
- };
2025
- db.selectFromCacheable = function(table, cacheTtl) {
2026
- return db.selectAliasedCacheable(getTableColumns(table), cacheTtl).from(table);
2027
- };
2028
- db.selectDistinctFrom = function(table) {
2029
- return db.selectAliasedDistinct(getTableColumns(table)).from(table);
2030
- };
2031
- db.selectDistinctFromCacheable = function(table, cacheTtl) {
2032
- return db.selectAliasedDistinctCacheable(getTableColumns(table), cacheTtl).from(table);
2033
- };
2034
- db.insertWithCacheContext = function(table) {
2035
- return insertAndEvictCacheBuilder(db, table, newOptions, false);
2036
- };
2037
- db.insertAndEvictCache = function(table) {
2038
- return insertAndEvictCacheBuilder(db, table, newOptions, true);
2039
- };
2040
- db.updateWithCacheContext = function(table) {
2041
- return updateAndEvictCacheBuilder(db, table, newOptions, false);
2042
- };
2043
- db.updateAndEvictCache = function(table) {
2044
- return updateAndEvictCacheBuilder(db, table, newOptions, true);
2045
- };
2046
- db.deleteWithCacheContext = function(table) {
2047
- return deleteAndEvictCacheBuilder(db, table, newOptions, false);
2048
- };
2049
- db.deleteAndEvictCache = function(table) {
2050
- return deleteAndEvictCacheBuilder(db, table, newOptions, true);
2051
- };
2052
- db.executeQuery = createRawQueryExecutor(db, newOptions, false);
2053
- db.executeQueryCacheable = createRawQueryExecutor(db, newOptions, true);
2054
- return db;
2055
- }
2056
- class ForgeSQLAnalyseOperation {
2057
- forgeOperations;
2058
- /**
2059
- * Creates a new instance of ForgeSQLAnalizeOperation.
2060
- * @param {ForgeSqlOperation} forgeOperations - The ForgeSQL operations instance
2061
- */
2062
- constructor(forgeOperations) {
2063
- this.forgeOperations = forgeOperations;
2064
- this.mapToCamelCaseClusterStatement = this.mapToCamelCaseClusterStatement.bind(this);
2065
- }
2066
- /**
2067
- * Executes EXPLAIN on a raw SQL query.
2068
- * @param {string} query - The SQL query to analyze
2069
- * @param {unknown[]} bindParams - The query parameters
2070
- * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
2071
- */
2072
- async explainRaw(query, bindParams) {
2073
- const results = await this.forgeOperations.fetch().executeRawSQL(`EXPLAIN ${query}`, bindParams);
2074
- return results.map((row) => ({
2075
- id: row.id,
2076
- estRows: row.estRows,
2077
- actRows: row.actRows,
2078
- task: row.task,
2079
- accessObject: row["access object"],
2080
- executionInfo: row["execution info"],
2081
- operatorInfo: row["operator info"],
2082
- memory: row.memory,
2083
- disk: row.disk
2084
- }));
2085
- }
2086
- /**
2087
- * Executes EXPLAIN on a Drizzle query.
2088
- * @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
2089
- * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
2090
- */
2091
- async explain(query) {
2092
- const { sql: sql2, params } = query.toSQL();
2093
- return this.explainRaw(sql2, params);
2094
- }
2095
- /**
2096
- * Executes EXPLAIN ANALYZE on a raw SQL query.
2097
- * @param {string} query - The SQL query to analyze
2098
- * @param {unknown[]} bindParams - The query parameters
2099
- * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
2100
- */
2101
- async explainAnalyzeRaw(query, bindParams) {
2102
- const results = await this.forgeOperations.fetch().executeRawSQL(`EXPLAIN ANALYZE ${query}`, bindParams);
2103
- return results.map((row) => ({
2104
- id: row.id,
2105
- estRows: row.estRows,
2106
- actRows: row.actRows,
2107
- task: row.task,
2108
- accessObject: row["access object"],
2109
- executionInfo: row["execution info"],
2110
- operatorInfo: row["operator info"],
2111
- memory: row.memory,
2112
- disk: row.disk
2113
- }));
2114
- }
2115
- /**
2116
- * Executes EXPLAIN ANALYZE on a Drizzle query.
2117
- * @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
2118
- * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
2119
- */
2120
- async explainAnalyze(query) {
2121
- const { sql: sql2, params } = query.toSQL();
2122
- return this.explainAnalyzeRaw(sql2, params);
2123
- }
2124
- /**
2125
- * Decodes a query execution plan from its string representation.
2126
- * @param {string} input - The raw execution plan string
2127
- * @returns {ExplainAnalyzeRow[]} The decoded execution plan rows
2128
- */
2129
- decodedPlan(input) {
2130
- if (!input) {
2131
- return [];
2132
- }
2133
- const lines = input.trim().split("\n");
2134
- if (lines.length < 2) return [];
2135
- const headersRaw = lines[0].split(" ").map((h) => h.trim()).filter(Boolean);
2136
- const headers = headersRaw.map((h) => {
2137
- return h.replace(/\s+/g, " ").replace(/[-\s]+(.)?/g, (_, c) => c ? c.toUpperCase() : "").replace(/^./, (s) => s.toLowerCase());
2138
- });
2139
- return lines.slice(1).map((line) => {
2140
- const values = line.split(" ").map((s) => s.trim()).filter(Boolean);
2141
- const row = {};
2142
- headers.forEach((key, i) => {
2143
- row[key] = values[i] ?? "";
2144
- });
2145
- return row;
2146
- });
2147
- }
2148
- /**
2149
- * Normalizes a raw slow query row into a more structured format.
2150
- * @param {SlowQueryRaw} row - The raw slow query data
2151
- * @returns {SlowQueryNormalized} The normalized slow query data
2152
- */
2153
- normalizeSlowQuery(row) {
2154
- return {
2155
- time: row.Time,
2156
- txnStartTs: row.Txn_start_ts,
2157
- user: row.User,
2158
- host: row.Host,
2159
- connId: row.Conn_ID,
2160
- db: row.DB,
2161
- query: row.Query,
2162
- digest: row.Digest,
2163
- queryTime: row.Query_time,
2164
- compileTime: row.Compile_time,
2165
- optimizeTime: row.Optimize_time,
2166
- processTime: row.Process_time,
2167
- waitTime: row.Wait_time,
2168
- parseTime: row.Parse_time,
2169
- rewriteTime: row.Rewrite_time,
2170
- copTime: row.Cop_time,
2171
- copProcAvg: row.Cop_proc_avg,
2172
- copProcMax: row.Cop_proc_max,
2173
- copProcP90: row.Cop_proc_p90,
2174
- copProcAddr: row.Cop_proc_addr,
2175
- copWaitAvg: row.Cop_wait_avg,
2176
- copWaitMax: row.Cop_wait_max,
2177
- copWaitP90: row.Cop_wait_p90,
2178
- copWaitAddr: row.Cop_wait_addr,
2179
- memMax: row.Mem_max,
2180
- diskMax: row.Disk_max,
2181
- totalKeys: row.Total_keys,
2182
- processKeys: row.Process_keys,
2183
- requestCount: row.Request_count,
2184
- kvTotal: row.KV_total,
2185
- pdTotal: row.PD_total,
2186
- resultRows: row.Result_rows,
2187
- rocksdbBlockCacheHitCount: row.Rocksdb_block_cache_hit_count,
2188
- rocksdbBlockReadCount: row.Rocksdb_block_read_count,
2189
- rocksdbBlockReadByte: row.Rocksdb_block_read_byte,
2190
- plan: row.Plan,
2191
- binaryPlan: row.Binary_plan,
2192
- planDigest: row.Plan_digest,
2193
- parsedPlan: this.decodedPlan(row.Plan)
2194
- };
2195
- }
2196
- /**
2197
- * Builds a SQL query for retrieving cluster statement history.
2198
- * @param {string[]} tables - The tables to analyze
2199
- * @param {Date} [from] - The start date for the analysis
2200
- * @param {Date} [to] - The end date for the analysis
2201
- * @returns {string} The SQL query for cluster statement history
2202
- */
2203
- buildClusterStatementQuery(tables, from, to) {
2204
- const formatDateTime2 = (date) => DateTime.fromJSDate(date).toFormat("yyyy-LL-dd'T'HH:mm:ss.SSS");
2205
- const tableConditions = tables.map((table) => `TABLE_NAMES LIKE CONCAT(SCHEMA_NAME, '.', '%', '${table}', '%')`).join(" OR ");
2206
- const timeConditions = [];
2207
- if (from) {
2208
- timeConditions.push(`SUMMARY_BEGIN_TIME >= '${formatDateTime2(from)}'`);
2209
- }
2210
- if (to) {
2211
- timeConditions.push(`SUMMARY_END_TIME <= '${formatDateTime2(to)}'`);
2212
- }
2213
- let whereClauses;
2214
- if (tableConditions?.length) {
2215
- whereClauses = [tableConditions ? `(${tableConditions})` : "", ...timeConditions];
2216
- } else {
2217
- whereClauses = timeConditions;
2218
- }
2219
- return `
2220
- SELECT *
2221
- FROM (
2222
- SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY
2223
- UNION ALL
2224
- SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
2225
- ) AS combined
2226
- ${whereClauses?.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : ""}
2227
- `;
2228
- }
2229
- /**
2230
- * Retrieves and analyzes slow queries from the database.
2231
- * @returns {Promise<SlowQueryNormalized[]>} The normalized slow query data
2232
- */
2233
- // CLUSTER_SLOW_QUERY STATISTICS
2234
- async analyzeSlowQueries() {
2235
- const results = await this.forgeOperations.fetch().executeRawSQL(`
2236
- SELECT *
2237
- FROM information_schema.slow_query
2238
- ORDER BY time DESC
2239
- `);
2240
- return results.map((row) => this.normalizeSlowQuery(row));
2241
- }
2242
- /**
2243
- * Converts a cluster statement row to camelCase format.
2244
- * @param {Record<string, any>} input - The input row data
2245
- * @returns {ClusterStatementRowCamelCase} The converted row data
2246
- */
2247
- mapToCamelCaseClusterStatement(input) {
2248
- if (!input) {
2249
- return {};
2250
- }
2251
- const result = {};
2252
- result.parsedPlan = this.decodedPlan(input["PLAN"] ?? "");
2253
- for (const key in input) {
2254
- const camelKey = key.toLowerCase().replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
2255
- result[camelKey] = input[key];
2256
- }
2257
- return result;
2258
- }
2259
- /**
2260
- * Analyzes query history for specific tables using raw table names.
2261
- * @param {string[]} tables - The table names to analyze
2262
- * @param {Date} [fromDate] - The start date for the analysis
2263
- * @param {Date} [toDate] - The end date for the analysis
2264
- * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
2265
- */
2266
- async analyzeQueriesHistoryRaw(tables, fromDate, toDate) {
2267
- const results = await this.forgeOperations.fetch().executeRawSQL(
2268
- this.buildClusterStatementQuery(tables ?? [], fromDate, toDate)
2269
- );
2270
- return results.map((r) => this.mapToCamelCaseClusterStatement(r));
2271
- }
2272
- /**
2273
- * Analyzes query history for specific tables using Drizzle table objects.
2274
- * @param {AnyMySqlTable[]} tables - The Drizzle table objects to analyze
2275
- * @param {Date} [fromDate] - The start date for the analysis
2276
- * @param {Date} [toDate] - The end date for the analysis
2277
- * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
2278
- */
2279
- async analyzeQueriesHistory(tables, fromDate, toDate) {
2280
- const tableNames = tables?.map((table) => getTableName(table)) ?? [];
2281
- return this.analyzeQueriesHistoryRaw(tableNames, fromDate, toDate);
2282
- }
2283
- }
2284
- class ForgeSQLCacheOperations {
2285
- options;
2286
- forgeOperations;
2287
- /**
2288
- * Creates a new instance of ForgeSQLCacheOperations.
2289
- *
2290
- * @param options - Configuration options for the ORM
2291
- * @param forgeOperations - The ForgeSQL operations instance
2292
- */
2293
- constructor(options, forgeOperations) {
2294
- this.options = options;
2295
- this.forgeOperations = forgeOperations;
2296
- }
2297
- /**
2298
- * Evicts cache for multiple tables using Drizzle table objects.
2299
- *
2300
- * @param tables - Array of Drizzle table objects to clear cache for
2301
- * @returns Promise that resolves when cache eviction is complete
2302
- * @throws Error if cacheEntityName is not configured
2303
- */
2304
- async evictCacheEntities(tables) {
2305
- if (!this.options.cacheEntityName) {
2306
- throw new Error("cacheEntityName is not configured");
2307
- }
2308
- await this.evictCache(tables.map((t) => getTableName(t)));
2309
- }
2310
- /**
2311
- * Evicts cache for multiple tables by their names.
2312
- *
2313
- * @param tables - Array of table names to clear cache for
2314
- * @returns Promise that resolves when cache eviction is complete
2315
- * @throws Error if cacheEntityName is not configured
2316
- */
2317
- async evictCache(tables) {
2318
- if (!this.options.cacheEntityName) {
2319
- throw new Error("cacheEntityName is not configured");
2320
- }
2321
- await clearTablesCache(tables, this.options);
2322
- }
2323
- /**
2324
- * Inserts records with optimistic locking/versioning and automatically evicts cache.
2325
- *
2326
- * This method uses `modifyWithVersioning().insert()` internally, providing:
2327
- * - Automatic version field initialization
2328
- * - Optimistic locking support
2329
- * - Cache eviction after successful operation
2330
- *
2331
- * @param schema - The table schema
2332
- * @param models - Array of entities to insert
2333
- * @param updateIfExists - Whether to update existing records
2334
- * @returns Promise that resolves to the number of inserted rows
2335
- * @throws Error if cacheEntityName is not configured
2336
- * @throws Error if optimistic locking check fails
2337
- */
2338
- async insert(schema, models, updateIfExists) {
2339
- this.validateCacheConfiguration();
2340
- const number = await this.forgeOperations.modifyWithVersioning().insert(schema, models, updateIfExists);
2341
- await clearCache(schema, this.options);
2342
- return number;
2343
- }
2344
- /**
2345
- * Deletes a record by ID with optimistic locking/versioning and automatically evicts cache.
2346
- *
2347
- * This method uses `modifyWithVersioning().deleteById()` internally, providing:
2348
- * - Optimistic locking checks before deletion
2349
- * - Version field validation
2350
- * - Cache eviction after successful operation
2351
- *
2352
- * @param id - The ID of the record to delete
2353
- * @param schema - The table schema
2354
- * @returns Promise that resolves to the number of affected rows
2355
- * @throws Error if cacheEntityName is not configured
2356
- * @throws Error if optimistic locking check fails
2357
- */
2358
- async deleteById(id, schema) {
2359
- this.validateCacheConfiguration();
2360
- const number = await this.forgeOperations.modifyWithVersioning().deleteById(id, schema);
2361
- await clearCache(schema, this.options);
2362
- return number;
2363
- }
2364
- /**
2365
- * Updates a record by ID with optimistic locking/versioning and automatically evicts cache.
2366
- *
2367
- * This method uses `modifyWithVersioning().updateById()` internally, providing:
2368
- * - Optimistic locking checks before update
2369
- * - Version field incrementation
2370
- * - Cache eviction after successful operation
2371
- *
2372
- * @param entity - The entity with updated values (must include primary key)
2373
- * @param schema - The table schema
2374
- * @returns Promise that resolves to the number of affected rows
2375
- * @throws Error if cacheEntityName is not configured
2376
- * @throws Error if optimistic locking check fails
2377
- */
2378
- async updateById(entity, schema) {
2379
- this.validateCacheConfiguration();
2380
- const number = await this.forgeOperations.modifyWithVersioning().updateById(entity, schema);
2381
- await clearCache(schema, this.options);
2382
- return number;
2383
- }
2384
- /**
2385
- * Updates fields based on conditions with optimistic locking/versioning and automatically evicts cache.
2386
- *
2387
- * This method uses `modifyWithVersioning().updateFields()` internally, providing:
2388
- * - Optimistic locking support (if version field is configured)
2389
- * - Version field validation and incrementation
2390
- * - Cache eviction after successful operation
2391
- *
2392
- * @param updateData - The data to update
2393
- * @param schema - The table schema
2394
- * @param where - Optional WHERE conditions
2395
- * @returns Promise that resolves to the number of affected rows
2396
- * @throws Error if cacheEntityName is not configured
2397
- * @throws Error if optimistic locking check fails
2398
- */
2399
- async updateFields(updateData, schema, where) {
2400
- this.validateCacheConfiguration();
2401
- const number = await this.forgeOperations.modifyWithVersioning().updateFields(updateData, schema, where);
2402
- await clearCache(schema, this.options);
2403
- return number;
2404
- }
2405
- /**
2406
- * Executes a query with caching support.
2407
- * First checks cache, if not found executes query and stores result in cache.
2408
- *
2409
- * @param query - The Drizzle query to execute
2410
- * @param cacheTtl - Optional cache TTL override
2411
- * @returns Promise that resolves to the query results
2412
- * @throws Error if cacheEntityName is not configured
2413
- */
2414
- async executeQuery(query, cacheTtl) {
2415
- this.validateCacheConfiguration();
2416
- const sqlQuery = query;
2417
- const cacheResult = await getFromCache(sqlQuery, this.options);
2418
- if (cacheResult) {
2419
- return cacheResult;
2420
- }
2421
- const results = await query;
2422
- await setCacheResult(sqlQuery, this.options, results, cacheTtl ?? this.options.cacheTTL ?? 60);
2423
- return results;
2424
- }
2425
- /**
2426
- * Validates that cache configuration is properly set up.
2427
- *
2428
- * @throws Error if cacheEntityName is not configured
2429
- * @private
2430
- */
2431
- validateCacheConfiguration() {
2432
- if (!this.options.cacheEntityName) {
2433
- throw new Error("cacheEntityName is not configured");
2434
- }
2435
- }
2436
- }
2437
- class ForgeSQLORMImpl {
2438
- static instance = null;
2439
- drizzle;
2440
- crudOperations;
2441
- fetchOperations;
2442
- analyzeOperations;
2443
- cacheOperations;
2444
- options;
2445
- /**
2446
- * Private constructor to enforce singleton behavior.
2447
- * @param options - Options for configuring ForgeSQL ORM behavior.
2448
- */
2449
- constructor(options) {
2450
- try {
2451
- const newOptions = options ?? {
2452
- logRawSqlQuery: false,
2453
- logCache: false,
2454
- disableOptimisticLocking: false,
2455
- cacheWrapTable: true,
2456
- cacheTTL: 120,
2457
- cacheEntityQueryName: "sql",
2458
- cacheEntityExpirationName: "expiration",
2459
- cacheEntityDataName: "data"
2460
- };
2461
- this.options = newOptions;
2462
- if (newOptions.logRawSqlQuery) {
2463
- console.debug("Initializing ForgeSQLORM...");
2464
- }
2465
- const proxiedDriver = createForgeDriverProxy(
2466
- this,
2467
- newOptions.hints,
2468
- newOptions.logRawSqlQuery
2469
- );
2470
- this.drizzle = patchDbWithSelectAliased(
2471
- drizzle(proxiedDriver, { logger: newOptions.logRawSqlQuery }),
2472
- newOptions
2473
- );
2474
- this.crudOperations = new ForgeSQLCrudOperations(this, newOptions);
2475
- this.fetchOperations = new ForgeSQLSelectOperations(newOptions);
2476
- this.analyzeOperations = new ForgeSQLAnalyseOperation(this);
2477
- this.cacheOperations = new ForgeSQLCacheOperations(newOptions, this);
2478
- } catch (error) {
2479
- console.error("ForgeSQLORM initialization failed:", error);
2480
- throw error;
2481
- }
2482
- }
2483
- /**
2484
- * Executes a query and provides access to execution metadata with performance monitoring.
2485
- * This method allows you to capture detailed information about query execution
2486
- * including database execution time, response size, and query analysis capabilities.
2487
- *
2488
- * The method aggregates metrics across all database operations within the query function,
2489
- * making it ideal for monitoring resolver performance and detecting performance issues.
2490
- *
2491
- * @template T - The return type of the query
2492
- * @param query - A function that returns a Promise with the query result. Can contain multiple database operations.
2493
- * @param onMetadata - Callback function that receives aggregated execution metadata
2494
- * @param onMetadata.totalDbExecutionTime - Total database execution time across all operations in the query function (in milliseconds)
2495
- * @param onMetadata.totalResponseSize - Total response size across all operations (in bytes)
2496
- * @param onMetadata.printQueries - Function to analyze and print query execution plans from CLUSTER_STATEMENTS_SUMMARY
2497
- * @returns Promise with the query result
2498
- *
2499
- * @example
2500
- * ```typescript
2501
- * // Basic usage with performance monitoring
2502
- * const result = await forgeSQL.executeWithMetadata(
2503
- * async () => {
2504
- * const users = await forgeSQL.selectFrom(usersTable);
2505
- * const orders = await forgeSQL.selectFrom(ordersTable).where(eq(ordersTable.userId, usersTable.id));
2506
- * return { users, orders };
2507
- * },
2508
- * (totalDbExecutionTime, totalResponseSize, printQueries) => {
2509
- * const threshold = 500; // ms baseline for this resolver
2510
- *
2511
- * if (totalDbExecutionTime > threshold * 1.5) {
2512
- * console.warn(`[Performance Warning] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
2513
- * await printQueries(); // Analyze and print query execution plans
2514
- * } else if (totalDbExecutionTime > threshold) {
2515
- * console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
2516
- * }
2517
- *
2518
- * console.log(`DB response size: ${totalResponseSize} bytes`);
2519
- * }
2520
- * );
2521
- * ```
2522
- *
2523
- * @example
2524
- * ```typescript
2525
- * // Resolver with performance monitoring
2526
- * resolver.define("fetch", async (req: Request) => {
2527
- * try {
2528
- * return await forgeSQL.executeWithMetadata(
2529
- * async () => {
2530
- * // Resolver logic with multiple queries
2531
- * const users = await forgeSQL.selectFrom(demoUsers);
2532
- * const orders = await forgeSQL.selectFrom(demoOrders)
2533
- * .where(eq(demoOrders.userId, demoUsers.id));
2534
- * return { users, orders };
2535
- * },
2536
- * async (totalDbExecutionTime, totalResponseSize, printQueries) => {
2537
- * const threshold = 500; // ms baseline for this resolver
2538
- *
2539
- * if (totalDbExecutionTime > threshold * 1.5) {
2540
- * console.warn(`[Performance Warning fetch] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
2541
- * await printQueries(); // Optionally log or capture diagnostics for further analysis
2542
- * } else if (totalDbExecutionTime > threshold) {
2543
- * console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
2544
- * }
2545
- *
2546
- * console.log(`DB response size: ${totalResponseSize} bytes`);
2547
- * }
2548
- * );
2549
- * } catch (e) {
2550
- * const error = e?.cause?.debug?.sqlMessage ?? e?.cause;
2551
- * console.error(error, e);
2552
- * throw error;
2553
- * }
2554
- * });
2555
- * ```
2556
- *
2557
- * @note **Important**: When multiple resolvers are running concurrently, their query data may also appear in `printQueries()` analysis, as it queries the global `CLUSTER_STATEMENTS_SUMMARY` table.
2558
- */
2559
- async executeWithMetadata(query, onMetadata) {
2560
- return metadataQueryContext.run(
2561
- {
2562
- totalDbExecutionTime: 0,
2563
- totalResponseSize: 0,
2564
- beginTime: /* @__PURE__ */ new Date(),
2565
- forgeSQLORM: this,
2566
- printQueriesWithPlan: async () => {
2567
- return;
2568
- }
2569
- },
2570
- async () => {
2571
- const result = await query();
2572
- const metadata = await getLastestMetadata();
2573
- try {
2574
- if (metadata) {
2575
- await onMetadata(
2576
- metadata.totalDbExecutionTime,
2577
- metadata.totalResponseSize,
2578
- metadata.printQueriesWithPlan
2579
- );
2580
- }
2581
- } catch (e) {
2582
- console.error(
2583
- "[ForgeSQLORM][executeWithMetadata] Failed to run onMetadata callback",
2584
- {
2585
- errorMessage: e?.message,
2586
- errorStack: e?.stack,
2587
- totalDbExecutionTime: metadata?.totalDbExecutionTime,
2588
- totalResponseSize: metadata?.totalResponseSize,
2589
- beginTime: metadata?.beginTime
2590
- },
2591
- e
2592
- );
2593
- }
2594
- return result;
2595
- }
2596
- );
2597
- }
2598
- /**
2599
- * Executes operations within a cache context that collects cache eviction events.
2600
- * All clearCache calls within the context are collected and executed in batch at the end.
2601
- * Queries executed within this context will bypass cache for tables that were marked for clearing.
2602
- *
2603
- * This is useful for:
2604
- * - Batch operations that affect multiple tables
2605
- * - Transaction-like operations where you want to clear cache only at the end
2606
- * - Performance optimization by reducing cache clear operations
2607
- *
2608
- * @param cacheContext - Function containing operations that may trigger cache evictions
2609
- * @returns Promise that resolves when all operations and cache clearing are complete
2610
- *
2611
- * @example
2612
- * ```typescript
2613
- * await forgeSQL.executeWithCacheContext(async () => {
2614
- * await forgeSQL.modifyWithVersioning().insert(users, userData);
2615
- * await forgeSQL.modifyWithVersioning().insert(orders, orderData);
2616
- * // Cache for both users and orders tables will be cleared at the end
2617
- * });
2618
- * ```
2619
- */
2620
- executeWithCacheContext(cacheContext) {
2621
- return this.executeWithCacheContextAndReturnValue(cacheContext);
2622
- }
2623
- /**
2624
- * Executes operations within a cache context and returns a value.
2625
- * All clearCache calls within the context are collected and executed in batch at the end.
2626
- * Queries executed within this context will bypass cache for tables that were marked for clearing.
2627
- *
2628
- * @param cacheContext - Function containing operations that may trigger cache evictions
2629
- * @returns Promise that resolves to the return value of the cacheContext function
2630
- *
2631
- * @example
2632
- * ```typescript
2633
- * const result = await forgeSQL.executeWithCacheContextAndReturnValue(async () => {
2634
- * await forgeSQL.modifyWithVersioning().insert(users, userData);
2635
- * return await forgeSQL.fetch().executeQueryOnlyOne(selectUserQuery);
2636
- * });
2637
- * ```
2638
- */
2639
- async executeWithCacheContextAndReturnValue(cacheContext) {
2640
- return await this.executeWithLocalCacheContextAndReturnValue(
2641
- async () => await cacheApplicationContext.run(
2642
- cacheApplicationContext.getStore() ?? { tables: /* @__PURE__ */ new Set() },
2643
- async () => {
2644
- try {
2645
- return await cacheContext();
2646
- } finally {
2647
- await clearTablesCache(
2648
- Array.from(cacheApplicationContext.getStore()?.tables ?? []),
2649
- this.options
2650
- );
2651
- }
2652
- }
2653
- )
2654
- );
2655
- }
2656
- /**
2657
- * Executes operations within a local cache context and returns a value.
2658
- * This provides in-memory caching for select queries within a single request scope.
2659
- *
2660
- * @param cacheContext - Function containing operations that will benefit from local caching
2661
- * @returns Promise that resolves to the return value of the cacheContext function
2662
- */
2663
- async executeWithLocalCacheContextAndReturnValue(cacheContext) {
2664
- return await localCacheApplicationContext.run(
2665
- localCacheApplicationContext.getStore() ?? { cache: {} },
2666
- async () => {
2667
- return await cacheContext();
2668
- }
2669
- );
2670
- }
2671
- /**
2672
- * Executes operations within a local cache context.
2673
- * This provides in-memory caching for select queries within a single request scope.
2674
- *
2675
- * @param cacheContext - Function containing operations that will benefit from local caching
2676
- * @returns Promise that resolves when all operations are complete
2677
- */
2678
- executeWithLocalContext(cacheContext) {
2679
- return this.executeWithLocalCacheContextAndReturnValue(cacheContext);
2680
- }
2681
- /**
2682
- * Creates an insert query builder.
2683
- *
2684
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2685
- * For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
2686
- *
2687
- * @param table - The table to insert into
2688
- * @returns Insert query builder (no versioning, no cache management)
2689
- */
2690
- insert(table) {
2691
- return this.drizzle.insertWithCacheContext(table);
2692
- }
2693
- /**
2694
- * Creates an insert query builder that automatically evicts cache after execution.
2695
- *
2696
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2697
- * For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
2698
- *
2699
- * @param table - The table to insert into
2700
- * @returns Insert query builder with automatic cache eviction (no versioning)
2701
- */
2702
- insertAndEvictCache(table) {
2703
- return this.drizzle.insertAndEvictCache(table);
2704
- }
2705
- /**
2706
- * Creates an update query builder that automatically evicts cache after execution.
2707
- *
2708
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2709
- * For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
2710
- *
2711
- * @param table - The table to update
2712
- * @returns Update query builder with automatic cache eviction (no versioning)
2713
- */
2714
- updateAndEvictCache(table) {
2715
- return this.drizzle.updateAndEvictCache(table);
2716
- }
2717
- /**
2718
- * Creates an update query builder.
2719
- *
2720
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2721
- * For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
2722
- *
2723
- * @param table - The table to update
2724
- * @returns Update query builder (no versioning, no cache management)
2725
- */
2726
- update(table) {
2727
- return this.drizzle.updateWithCacheContext(table);
2728
- }
2729
- /**
2730
- * Creates a delete query builder.
2731
- *
2732
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2733
- * For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
2734
- *
2735
- * @param table - The table to delete from
2736
- * @returns Delete query builder (no versioning, no cache management)
2737
- */
2738
- delete(table) {
2739
- return this.drizzle.deleteWithCacheContext(table);
2740
- }
2741
- /**
2742
- * Creates a delete query builder that automatically evicts cache after execution.
2743
- *
2744
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2745
- * For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
2746
- *
2747
- * @param table - The table to delete from
2748
- * @returns Delete query builder with automatic cache eviction (no versioning)
2749
- */
2750
- deleteAndEvictCache(table) {
2751
- return this.drizzle.deleteAndEvictCache(table);
2752
- }
2753
- /**
2754
- * Create the modify operations instance.
2755
- * @returns modify operations.
2756
- */
2757
- modifyWithVersioning() {
2758
- return this.crudOperations;
2759
- }
2760
- /**
2761
- * Returns the singleton instance of ForgeSQLORMImpl.
2762
- * @param options - Options for configuring ForgeSQL ORM behavior.
2763
- * @returns The singleton instance of ForgeSQLORMImpl.
2764
- */
2765
- static getInstance(options) {
2766
- ForgeSQLORMImpl.instance ??= new ForgeSQLORMImpl(options);
2767
- return ForgeSQLORMImpl.instance;
2768
- }
2769
- /**
2770
- * Retrieves the fetch operations instance.
2771
- * @returns Fetch operations.
2772
- */
2773
- fetch() {
2774
- return this.fetchOperations;
2775
- }
2776
- /**
2777
- * Provides query analysis capabilities including EXPLAIN ANALYZE and slow query analysis.
2778
- * @returns {SchemaAnalyzeForgeSql} Interface for analyzing query performance
2779
- */
2780
- analyze() {
2781
- return this.analyzeOperations;
2782
- }
2783
- /**
2784
- * Provides schema-level SQL operations with optimistic locking/versioning and automatic cache eviction.
2785
- *
2786
- * This method returns operations that use `modifyWithVersioning()` internally, providing:
2787
- * - Optimistic locking support
2788
- * - Automatic version field management
2789
- * - Cache eviction after successful operations
2790
- *
2791
- * @returns {ForgeSQLCacheOperations} Interface for executing versioned SQL operations with cache management
2792
- */
2793
- modifyWithVersioningAndEvictCache() {
2794
- return this.cacheOperations;
2795
- }
2796
- /**
2797
- * Returns a Drizzle query builder instance.
2798
- *
2799
- * ⚠️ IMPORTANT: This method should be used ONLY for query building purposes.
2800
- * The returned instance should NOT be used for direct database connections or query execution.
2801
- * All database operations should be performed through Forge SQL's executeRawSQL or executeRawUpdateSQL methods.
2802
- *
2803
- * @returns A Drizzle query builder instance for query construction only.
2804
- */
2805
- getDrizzleQueryBuilder() {
2806
- return this.drizzle;
2807
- }
2808
- /**
2809
- * Creates a select query with unique field aliases to prevent field name collisions in joins.
2810
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
2811
- *
2812
- * @template TSelection - The type of the selected fields
2813
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
2814
- * @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A select query builder with unique field aliases
2815
- * @throws {Error} If fields parameter is empty
2816
- * @example
2817
- * ```typescript
2818
- * await forgeSQL
2819
- * .select({user: users, order: orders})
2820
- * .from(orders)
2821
- * .innerJoin(users, eq(orders.userId, users.id));
2822
- * ```
2823
- */
2824
- select(fields) {
2825
- if (!fields) {
2826
- throw new Error("fields is empty");
2827
- }
2828
- return this.drizzle.selectAliased(fields);
2829
- }
2830
- /**
2831
- * Creates a distinct select query with unique field aliases to prevent field name collisions in joins.
2832
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
2833
- *
2834
- * @template TSelection - The type of the selected fields
2835
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
2836
- * @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A distinct select query builder with unique field aliases
2837
- * @throws {Error} If fields parameter is empty
2838
- * @example
2839
- * ```typescript
2840
- * await forgeSQL
2841
- * .selectDistinct({user: users, order: orders})
2842
- * .from(orders)
2843
- * .innerJoin(users, eq(orders.userId, users.id));
2844
- * ```
2845
- */
2846
- selectDistinct(fields) {
2847
- if (!fields) {
2848
- throw new Error("fields is empty");
2849
- }
2850
- return this.drizzle.selectAliasedDistinct(fields);
2851
- }
2852
- /**
2853
- * Creates a cacheable select query with unique field aliases to prevent field name collisions in joins.
2854
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
2855
- *
2856
- * @template TSelection - The type of the selected fields
2857
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
2858
- * @param {number} cacheTTL - cache ttl optional default is 60 sec.
2859
- * @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A select query builder with unique field aliases
2860
- * @throws {Error} If fields parameter is empty
2861
- * @example
2862
- * ```typescript
2863
- * await forgeSQL
2864
- * .selectCacheable({user: users, order: orders},60)
2865
- * .from(orders)
2866
- * .innerJoin(users, eq(orders.userId, users.id));
2867
- * ```
2868
- */
2869
- selectCacheable(fields, cacheTTL) {
2870
- if (!fields) {
2871
- throw new Error("fields is empty");
2872
- }
2873
- return this.drizzle.selectAliasedCacheable(fields, cacheTTL);
2874
- }
2875
- /**
2876
- * Creates a cacheable distinct select query with unique field aliases to prevent field name collisions in joins.
2877
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
2878
- *
2879
- * @template TSelection - The type of the selected fields
2880
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
2881
- * @param {number} cacheTTL - cache ttl optional default is 60 sec.
2882
- * @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A distinct select query builder with unique field aliases
2883
- * @throws {Error} If fields parameter is empty
2884
- * @example
2885
- * ```typescript
2886
- * await forgeSQL
2887
- * .selectDistinctCacheable({user: users, order: orders}, 60)
2888
- * .from(orders)
2889
- * .innerJoin(users, eq(orders.userId, users.id));
2890
- * ```
2891
- */
2892
- selectDistinctCacheable(fields, cacheTTL) {
2893
- if (!fields) {
2894
- throw new Error("fields is empty");
2895
- }
2896
- return this.drizzle.selectAliasedDistinctCacheable(fields, cacheTTL);
2897
- }
2898
- /**
2899
- * Creates a select query builder for all columns from a table with field aliasing support.
2900
- * This is a convenience method that automatically selects all columns from the specified table.
2901
- *
2902
- * @template T - The type of the table
2903
- * @param table - The table to select from
2904
- * @returns Select query builder with all table columns and field aliasing support
2905
- * @example
2906
- * ```typescript
2907
- * const users = await forgeSQL.selectFrom(userTable).where(eq(userTable.id, 1));
2908
- * ```
2909
- */
2910
- selectFrom(table) {
2911
- return this.drizzle.selectFrom(table);
2912
- }
2913
- /**
2914
- * Creates a select distinct query builder for all columns from a table with field aliasing support.
2915
- * This is a convenience method that automatically selects all distinct columns from the specified table.
2916
- *
2917
- * @template T - The type of the table
2918
- * @param table - The table to select from
2919
- * @returns Select distinct query builder with all table columns and field aliasing support
2920
- * @example
2921
- * ```typescript
2922
- * const uniqueUsers = await forgeSQL.selectDistinctFrom(userTable).where(eq(userTable.status, 'active'));
2923
- * ```
2924
- */
2925
- selectDistinctFrom(table) {
2926
- return this.drizzle.selectDistinctFrom(table);
2927
- }
2928
- /**
2929
- * Creates a cacheable select query builder for all columns from a table with field aliasing and caching support.
2930
- * This is a convenience method that automatically selects all columns from the specified table with caching enabled.
2931
- *
2932
- * @template T - The type of the table
2933
- * @param table - The table to select from
2934
- * @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
2935
- * @returns Select query builder with all table columns, field aliasing, and caching support
2936
- * @example
2937
- * ```typescript
2938
- * const users = await forgeSQL.selectCacheableFrom(userTable, 300).where(eq(userTable.id, 1));
2939
- * ```
2940
- */
2941
- selectCacheableFrom(table, cacheTTL) {
2942
- return this.drizzle.selectFromCacheable(table, cacheTTL);
2943
- }
2944
- /**
2945
- * Creates a cacheable select distinct query builder for all columns from a table with field aliasing and caching support.
2946
- * This is a convenience method that automatically selects all distinct columns from the specified table with caching enabled.
2947
- *
2948
- * @template T - The type of the table
2949
- * @param table - The table to select from
2950
- * @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
2951
- * @returns Select distinct query builder with all table columns, field aliasing, and caching support
2952
- * @example
2953
- * ```typescript
2954
- * const uniqueUsers = await forgeSQL.selectDistinctCacheableFrom(userTable, 300).where(eq(userTable.status, 'active'));
2955
- * ```
2956
- */
2957
- selectDistinctCacheableFrom(table, cacheTTL) {
2958
- return this.drizzle.selectDistinctFromCacheable(table, cacheTTL);
2959
- }
2960
- /**
2961
- * Executes a raw SQL query with local cache support.
2962
- * This method provides local caching for raw SQL queries within the current invocation context.
2963
- * Results are cached locally and will be returned from cache on subsequent identical queries.
2964
- *
2965
- * @param query - The SQL query to execute (SQLWrapper or string)
2966
- * @returns Promise with query results
2967
- * @example
2968
- * ```typescript
2969
- * // Using SQLWrapper
2970
- * const result = await forgeSQL.execute(sql`SELECT * FROM users WHERE id = ${userId}`);
2971
- *
2972
- * // Using string
2973
- * const result = await forgeSQL.execute("SELECT * FROM users WHERE status = 'active'");
2974
- * ```
2975
- */
2976
- execute(query) {
2977
- return this.drizzle.executeQuery(query);
2978
- }
2979
- /**
2980
- * Executes a Data Definition Language (DDL) SQL query.
2981
- * DDL operations include CREATE, ALTER, DROP, TRUNCATE, and other schema modification statements.
2982
- *
2983
- * This method is specifically designed for DDL operations and provides:
2984
- * - Proper operation type context for DDL queries
2985
- * - No caching (DDL operations should not be cached)
2986
- * - Direct execution without query optimization
2987
- *
2988
- * @template T - The expected return type of the query result
2989
- * @param query - The DDL SQL query to execute (SQLWrapper or string)
2990
- * @returns Promise with query results
2991
- * @throws {Error} If the DDL operation fails
2992
- *
2993
- * @example
2994
- * ```typescript
2995
- * // Create a new table
2996
- * await forgeSQL.executeDDL(`
2997
- * CREATE TABLE users (
2998
- * id INT PRIMARY KEY AUTO_INCREMENT,
2999
- * name VARCHAR(255) NOT NULL,
3000
- * email VARCHAR(255) UNIQUE
3001
- * )
3002
- * `);
3003
- *
3004
- * // Alter table structure
3005
- * await forgeSQL.executeDDL(sql`
3006
- * ALTER TABLE users
3007
- * ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
3008
- * `);
3009
- *
3010
- * // Drop a table
3011
- * await forgeSQL.executeDDL("DROP TABLE IF EXISTS old_users");
3012
- * ```
3013
- */
3014
- async executeDDL(query) {
3015
- return this.executeDDLActions(async () => this.drizzle.executeQuery(query));
3016
- }
3017
- /**
3018
- * Executes a series of actions within a DDL operation context.
3019
- * This method provides a way to execute regular SQL queries that should be treated
3020
- * as DDL operations, ensuring proper operation type context for performance monitoring.
3021
- *
3022
- * This method is useful for:
3023
- * - Executing regular SQL queries in DDL context for monitoring purposes
3024
- * - Wrapping non-DDL operations that should be treated as DDL for analysis
3025
- * - Ensuring proper operation type context for complex workflows
3026
- * - Maintaining DDL operation context across multiple function calls
3027
- *
3028
- * @template T - The return type of the actions function
3029
- * @param actions - Function containing SQL operations to execute in DDL context
3030
- * @returns Promise that resolves to the return value of the actions function
3031
- *
3032
- * @example
3033
- * ```typescript
3034
- * // Execute regular SQL queries in DDL context for monitoring
3035
- * await forgeSQL.executeDDLActions(async () => {
3036
- * const slowQueries = await forgeSQL.execute(`
3037
- * SELECT * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
3038
- * WHERE AVG_LATENCY > 1000000
3039
- * `);
3040
- * return slowQueries;
3041
- * });
3042
- *
3043
- * // Execute complex analysis queries in DDL context
3044
- * const result = await forgeSQL.executeDDLActions(async () => {
3045
- * const tableInfo = await forgeSQL.execute("SHOW TABLES");
3046
- * const performanceData = await forgeSQL.execute(`
3047
- * SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
3048
- * WHERE SUMMARY_END_TIME > DATE_SUB(NOW(), INTERVAL 1 HOUR)
3049
- * `);
3050
- * return { tableInfo, performanceData };
3051
- * });
3052
- *
3053
- * // Execute monitoring queries with error handling
3054
- * try {
3055
- * await forgeSQL.executeDDLActions(async () => {
3056
- * const metrics = await forgeSQL.execute(`
3057
- * SELECT COUNT(*) as query_count
3058
- * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
3059
- * `);
3060
- * console.log(`Total queries: ${metrics[0].query_count}`);
3061
- * });
3062
- * } catch (error) {
3063
- * console.error("Monitoring query failed:", error);
3064
- * }
3065
- * ```
3066
- */
3067
- async executeDDLActions(actions) {
3068
- return operationTypeQueryContext.run({ operationType: "DDL" }, async () => actions());
3069
- }
3070
- /**
3071
- * Executes a raw SQL query with both local and global cache support.
3072
- * This method provides comprehensive caching for raw SQL queries:
3073
- * - Local cache: Within the current invocation context
3074
- * - Global cache: Cross-invocation caching using @forge/kvs
3075
- *
3076
- * @param query - The SQL query to execute (SQLWrapper or string)
3077
- * @param cacheTtl - Optional cache TTL override (defaults to global cache TTL)
3078
- * @returns Promise with query results
3079
- * @example
3080
- * ```typescript
3081
- * // Using SQLWrapper with custom TTL
3082
- * const result = await forgeSQL.executeCacheable(sql`SELECT * FROM users WHERE id = ${userId}`, 300);
3083
- *
3084
- * // Using string with default TTL
3085
- * const result = await forgeSQL.executeCacheable("SELECT * FROM users WHERE status = 'active'");
3086
- * ```
3087
- */
3088
- executeCacheable(query, cacheTtl) {
3089
- return this.drizzle.executeQueryCacheable(query, cacheTtl);
3090
- }
3091
- /**
3092
- * Creates a Common Table Expression (CTE) builder for complex queries.
3093
- * CTEs allow you to define temporary named result sets that exist within the scope of a single query.
3094
- *
3095
- * @returns WithBuilder for creating CTEs
3096
- * @example
3097
- * ```typescript
3098
- * const withQuery = forgeSQL.$with('userStats').as(
3099
- * forgeSQL.select({ userId: users.id, count: sql<number>`count(*)` })
3100
- * .from(users)
3101
- * .groupBy(users.id)
3102
- * );
3103
- * ```
3104
- */
3105
- get $with() {
3106
- return this.drizzle.$with;
3107
- }
3108
- /**
3109
- * Creates a query builder that uses Common Table Expressions (CTEs).
3110
- * CTEs allow you to define temporary named result sets that exist within the scope of a single query.
3111
- *
3112
- * @param queries - Array of CTE queries created with $with()
3113
- * @returns Query builder with CTE support
3114
- * @example
3115
- * ```typescript
3116
- * const withQuery = forgeSQL.$with('userStats').as(
3117
- * forgeSQL.select({ userId: users.id, count: sql<number>`count(*)` })
3118
- * .from(users)
3119
- * .groupBy(users.id)
3120
- * );
3121
- *
3122
- * const result = await forgeSQL.with(withQuery)
3123
- * .select({ userId: withQuery.userId, count: withQuery.count })
3124
- * .from(withQuery);
3125
- * ```
3126
- */
3127
- with(...queries) {
3128
- return this.drizzle.with(...queries);
3129
- }
3130
- }
3131
- class ForgeSQLORM {
3132
- ormInstance;
3133
- constructor(options) {
3134
- this.ormInstance = ForgeSQLORMImpl.getInstance(options);
3135
- }
3136
- /**
3137
- * Executes a query and provides access to execution metadata with performance monitoring.
3138
- * This method allows you to capture detailed information about query execution
3139
- * including database execution time, response size, and query analysis capabilities.
3140
- *
3141
- * The method aggregates metrics across all database operations within the query function,
3142
- * making it ideal for monitoring resolver performance and detecting performance issues.
3143
- *
3144
- * @template T - The return type of the query
3145
- * @param query - A function that returns a Promise with the query result. Can contain multiple database operations.
3146
- * @param onMetadata - Callback function that receives aggregated execution metadata
3147
- * @param onMetadata.totalDbExecutionTime - Total database execution time across all operations in the query function (in milliseconds)
3148
- * @param onMetadata.totalResponseSize - Total response size across all operations (in bytes)
3149
- * @param onMetadata.printQueries - Function to analyze and print query execution plans from CLUSTER_STATEMENTS_SUMMARY
3150
- * @returns Promise with the query result
3151
- *
3152
- * @example
3153
- * ```typescript
3154
- * // Basic usage with performance monitoring
3155
- * const result = await forgeSQL.executeWithMetadata(
3156
- * async () => {
3157
- * const users = await forgeSQL.selectFrom(usersTable);
3158
- * const orders = await forgeSQL.selectFrom(ordersTable).where(eq(ordersTable.userId, usersTable.id));
3159
- * return { users, orders };
3160
- * },
3161
- * (totalDbExecutionTime, totalResponseSize, printQueries) => {
3162
- * const threshold = 500; // ms baseline for this resolver
3163
- *
3164
- * if (totalDbExecutionTime > threshold * 1.5) {
3165
- * console.warn(`[Performance Warning] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
3166
- * await printQueries(); // Analyze and print query execution plans
3167
- * } else if (totalDbExecutionTime > threshold) {
3168
- * console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
3169
- * }
3170
- *
3171
- * console.log(`DB response size: ${totalResponseSize} bytes`);
3172
- * }
3173
- * );
3174
- * ```
3175
- *
3176
- * @example
3177
- * ```typescript
3178
- * // Resolver with performance monitoring
3179
- * resolver.define("fetch", async (req: Request) => {
3180
- * try {
3181
- * return await forgeSQL.executeWithMetadata(
3182
- * async () => {
3183
- * // Resolver logic with multiple queries
3184
- * const users = await forgeSQL.selectFrom(demoUsers);
3185
- * const orders = await forgeSQL.selectFrom(demoOrders)
3186
- * .where(eq(demoOrders.userId, demoUsers.id));
3187
- * return { users, orders };
3188
- * },
3189
- * async (totalDbExecutionTime, totalResponseSize, printQueries) => {
3190
- * const threshold = 500; // ms baseline for this resolver
3191
- *
3192
- * if (totalDbExecutionTime > threshold * 1.5) {
3193
- * console.warn(`[Performance Warning fetch] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
3194
- * await printQueries(); // Optionally log or capture diagnostics for further analysis
3195
- * } else if (totalDbExecutionTime > threshold) {
3196
- * console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
3197
- * }
3198
- *
3199
- * console.log(`DB response size: ${totalResponseSize} bytes`);
3200
- * }
3201
- * );
3202
- * } catch (e) {
3203
- * const error = e?.cause?.debug?.sqlMessage ?? e?.cause;
3204
- * console.error(error, e);
3205
- * throw error;
3206
- * }
3207
- * });
3208
- * ```
3209
- *
3210
- * @note **Important**: When multiple resolvers are running concurrently, their query data may also appear in `printQueries()` analysis, as it queries the global `CLUSTER_STATEMENTS_SUMMARY` table.
3211
- */
3212
- async executeWithMetadata(query, onMetadata) {
3213
- return this.ormInstance.executeWithMetadata(query, onMetadata);
3214
- }
3215
- selectCacheable(fields, cacheTTL) {
3216
- return this.ormInstance.selectCacheable(fields, cacheTTL);
3217
- }
3218
- selectDistinctCacheable(fields, cacheTTL) {
3219
- return this.ormInstance.selectDistinctCacheable(fields, cacheTTL);
3220
- }
3221
- /**
3222
- * Creates a select query builder for all columns from a table with field aliasing support.
3223
- * This is a convenience method that automatically selects all columns from the specified table.
3224
- *
3225
- * @template T - The type of the table
3226
- * @param table - The table to select from
3227
- * @returns Select query builder with all table columns and field aliasing support
3228
- * @example
3229
- * ```typescript
3230
- * const users = await forgeSQL.selectFrom(userTable).where(eq(userTable.id, 1));
3231
- * ```
3232
- */
3233
- selectFrom(table) {
3234
- return this.ormInstance.getDrizzleQueryBuilder().selectFrom(table);
3235
- }
3236
- /**
3237
- * Creates a select distinct query builder for all columns from a table with field aliasing support.
3238
- * This is a convenience method that automatically selects all distinct columns from the specified table.
3239
- *
3240
- * @template T - The type of the table
3241
- * @param table - The table to select from
3242
- * @returns Select distinct query builder with all table columns and field aliasing support
3243
- * @example
3244
- * ```typescript
3245
- * const uniqueUsers = await forgeSQL.selectDistinctFrom(userTable).where(eq(userTable.status, 'active'));
3246
- * ```
3247
- */
3248
- selectDistinctFrom(table) {
3249
- return this.ormInstance.getDrizzleQueryBuilder().selectDistinctFrom(table);
3250
- }
3251
- /**
3252
- * Creates a cacheable select query builder for all columns from a table with field aliasing and caching support.
3253
- * This is a convenience method that automatically selects all columns from the specified table with caching enabled.
3254
- *
3255
- * @template T - The type of the table
3256
- * @param table - The table to select from
3257
- * @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
3258
- * @returns Select query builder with all table columns, field aliasing, and caching support
3259
- * @example
3260
- * ```typescript
3261
- * const users = await forgeSQL.selectCacheableFrom(userTable, 300).where(eq(userTable.id, 1));
3262
- * ```
3263
- */
3264
- selectCacheableFrom(table, cacheTTL) {
3265
- return this.ormInstance.getDrizzleQueryBuilder().selectFromCacheable(table, cacheTTL);
3266
- }
3267
- /**
3268
- * Creates a cacheable select distinct query builder for all columns from a table with field aliasing and caching support.
3269
- * This is a convenience method that automatically selects all distinct columns from the specified table with caching enabled.
3270
- *
3271
- * @template T - The type of the table
3272
- * @param table - The table to select from
3273
- * @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
3274
- * @returns Select distinct query builder with all table columns, field aliasing, and caching support
3275
- * @example
3276
- * ```typescript
3277
- * const uniqueUsers = await forgeSQL.selectDistinctCacheableFrom(userTable, 300).where(eq(userTable.status, 'active'));
3278
- * ```
3279
- */
3280
- selectDistinctCacheableFrom(table, cacheTTL) {
3281
- return this.ormInstance.getDrizzleQueryBuilder().selectDistinctFromCacheable(table, cacheTTL);
3282
- }
3283
- executeWithCacheContext(cacheContext) {
3284
- return this.ormInstance.executeWithCacheContext(cacheContext);
3285
- }
3286
- executeWithCacheContextAndReturnValue(cacheContext) {
3287
- return this.ormInstance.executeWithCacheContextAndReturnValue(cacheContext);
3288
- }
3289
- /**
3290
- * Executes operations within a local cache context.
3291
- * This provides in-memory caching for select queries within a single request scope.
3292
- *
3293
- * @param cacheContext - Function containing operations that will benefit from local caching
3294
- * @returns Promise that resolves when all operations are complete
3295
- */
3296
- executeWithLocalContext(cacheContext) {
3297
- return this.ormInstance.executeWithLocalContext(cacheContext);
3298
- }
3299
- /**
3300
- * Executes operations within a local cache context and returns a value.
3301
- * This provides in-memory caching for select queries within a single request scope.
3302
- *
3303
- * @param cacheContext - Function containing operations that will benefit from local caching
3304
- * @returns Promise that resolves to the return value of the cacheContext function
3305
- */
3306
- executeWithLocalCacheContextAndReturnValue(cacheContext) {
3307
- return this.ormInstance.executeWithLocalCacheContextAndReturnValue(cacheContext);
3308
- }
3309
- /**
3310
- * Creates an insert query builder.
3311
- *
3312
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3313
- * For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
3314
- *
3315
- * @param table - The table to insert into
3316
- * @returns Insert query builder (no versioning, no cache management)
3317
- */
3318
- insert(table) {
3319
- return this.ormInstance.insert(table);
3320
- }
3321
- /**
3322
- * Creates an insert query builder that automatically evicts cache after execution.
3323
- *
3324
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3325
- * For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
3326
- *
3327
- * @param table - The table to insert into
3328
- * @returns Insert query builder with automatic cache eviction (no versioning)
3329
- */
3330
- insertAndEvictCache(table) {
3331
- return this.ormInstance.insertAndEvictCache(table);
3332
- }
3333
- /**
3334
- * Creates an update query builder.
3335
- *
3336
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3337
- * For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
3338
- *
3339
- * @param table - The table to update
3340
- * @returns Update query builder (no versioning, no cache management)
3341
- */
3342
- update(table) {
3343
- return this.ormInstance.update(table);
3344
- }
3345
- /**
3346
- * Creates an update query builder that automatically evicts cache after execution.
3347
- *
3348
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3349
- * For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
3350
- *
3351
- * @param table - The table to update
3352
- * @returns Update query builder with automatic cache eviction (no versioning)
3353
- */
3354
- updateAndEvictCache(table) {
3355
- return this.ormInstance.updateAndEvictCache(table);
3356
- }
3357
- /**
3358
- * Creates a delete query builder.
3359
- *
3360
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3361
- * For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
3362
- *
3363
- * @param table - The table to delete from
3364
- * @returns Delete query builder (no versioning, no cache management)
3365
- */
3366
- delete(table) {
3367
- return this.ormInstance.delete(table);
3368
- }
3369
- /**
3370
- * Creates a delete query builder that automatically evicts cache after execution.
3371
- *
3372
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3373
- * For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
3374
- *
3375
- * @param table - The table to delete from
3376
- * @returns Delete query builder with automatic cache eviction (no versioning)
3377
- */
3378
- deleteAndEvictCache(table) {
3379
- return this.ormInstance.deleteAndEvictCache(table);
3380
- }
3381
- /**
3382
- * Creates a select query with unique field aliases to prevent field name collisions in joins.
3383
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
3384
- *
3385
- * @template TSelection - The type of the selected fields
3386
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
3387
- * @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A select query builder with unique field aliases
3388
- * @throws {Error} If fields parameter is empty
3389
- * @example
3390
- * ```typescript
3391
- * await forgeSQL
3392
- * .select({user: users, order: orders})
3393
- * .from(orders)
3394
- * .innerJoin(users, eq(orders.userId, users.id));
3395
- * ```
3396
- */
3397
- select(fields) {
3398
- return this.ormInstance.select(fields);
3399
- }
3400
- /**
3401
- * Creates a distinct select query with unique field aliases to prevent field name collisions in joins.
3402
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
3403
- *
3404
- * @template TSelection - The type of the selected fields
3405
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
3406
- * @returns {MySqlSelectBuilder<TSelection, MySqlRemotePreparedQueryHKT>} A distinct select query builder with unique field aliases
3407
- * @throws {Error} If fields parameter is empty
3408
- * @example
3409
- * ```typescript
3410
- * await forgeSQL
3411
- * .selectDistinct({user: users, order: orders})
3412
- * .from(orders)
3413
- * .innerJoin(users, eq(orders.userId, users.id));
3414
- * ```
3415
- */
3416
- selectDistinct(fields) {
3417
- return this.ormInstance.selectDistinct(fields);
3418
- }
3419
- /**
3420
- * Proxies the `modify` method from `ForgeSQLORMImpl`.
3421
- * @returns Modify operations.
3422
- */
3423
- modifyWithVersioning() {
3424
- return this.ormInstance.modifyWithVersioning();
3425
- }
3426
- /**
3427
- * Proxies the `fetch` method from `ForgeSQLORMImpl`.
3428
- * @returns Fetch operations.
3429
- */
3430
- fetch() {
3431
- return this.ormInstance.fetch();
3432
- }
3433
- /**
3434
- * Provides query analysis capabilities including EXPLAIN ANALYZE and slow query analysis.
3435
- * @returns {SchemaAnalyzeForgeSql} Interface for analyzing query performance
3436
- */
3437
- analyze() {
3438
- return this.ormInstance.analyze();
3439
- }
3440
- /**
3441
- * Provides schema-level SQL cacheable operations with type safety.
3442
- * @returns {ForgeSQLCacheOperations} Interface for executing schema-bound SQL queries
3443
- */
3444
- modifyWithVersioningAndEvictCache() {
3445
- return this.ormInstance.modifyWithVersioningAndEvictCache();
3446
- }
3447
- /**
3448
- * Returns a Drizzle query builder instance.
3449
- *
3450
- * @returns A Drizzle query builder instance for query construction only.
3451
- */
3452
- getDrizzleQueryBuilder() {
3453
- return this.ormInstance.getDrizzleQueryBuilder();
3454
- }
3455
- /**
3456
- * Executes a raw SQL query with local cache support.
3457
- * This method provides local caching for raw SQL queries within the current invocation context.
3458
- * Results are cached locally and will be returned from cache on subsequent identical queries.
3459
- *
3460
- * @param query - The SQL query to execute (SQLWrapper or string)
3461
- * @returns Promise with query results
3462
- * @example
3463
- * ```typescript
3464
- * // Using SQLWrapper
3465
- * const result = await forgeSQL.execute(sql`SELECT * FROM users WHERE id = ${userId}`);
3466
- *
3467
- * // Using string
3468
- * const result = await forgeSQL.execute("SELECT * FROM users WHERE status = 'active'");
3469
- * ```
3470
- */
3471
- execute(query) {
3472
- return this.ormInstance.execute(query);
3473
- }
3474
- /**
3475
- * Executes a Data Definition Language (DDL) SQL query.
3476
- * DDL operations include CREATE, ALTER, DROP, TRUNCATE, and other schema modification statements.
3477
- *
3478
- * This method is specifically designed for DDL operations and provides:
3479
- * - Proper operation type context for DDL queries
3480
- * - No caching (DDL operations should not be cached)
3481
- * - Direct execution without query optimization
3482
- *
3483
- * @template T - The expected return type of the query result
3484
- * @param query - The DDL SQL query to execute (SQLWrapper or string)
3485
- * @returns Promise with query results
3486
- * @throws {Error} If the DDL operation fails
3487
- *
3488
- * @example
3489
- * ```typescript
3490
- * // Create a new table
3491
- * await forgeSQL.executeDDL(`
3492
- * CREATE TABLE users (
3493
- * id INT PRIMARY KEY AUTO_INCREMENT,
3494
- * name VARCHAR(255) NOT NULL,
3495
- * email VARCHAR(255) UNIQUE
3496
- * )
3497
- * `);
3498
- *
3499
- * // Alter table structure
3500
- * await forgeSQL.executeDDL(sql`
3501
- * ALTER TABLE users
3502
- * ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
3503
- * `);
3504
- *
3505
- * // Drop a table
3506
- * await forgeSQL.executeDDL("DROP TABLE IF EXISTS old_users");
3507
- * ```
3508
- */
3509
- executeDDL(query) {
3510
- return this.ormInstance.executeDDL(query);
3511
- }
3512
- /**
3513
- * Executes a series of actions within a DDL operation context.
3514
- * This method provides a way to execute regular SQL queries that should be treated
3515
- * as DDL operations, ensuring proper operation type context for performance monitoring.
3516
- *
3517
- * This method is useful for:
3518
- * - Executing regular SQL queries in DDL context for monitoring purposes
3519
- * - Wrapping non-DDL operations that should be treated as DDL for analysis
3520
- * - Ensuring proper operation type context for complex workflows
3521
- * - Maintaining DDL operation context across multiple function calls
3522
- *
3523
- * @template T - The return type of the actions function
3524
- * @param actions - Function containing SQL operations to execute in DDL context
3525
- * @returns Promise that resolves to the return value of the actions function
3526
- *
3527
- * @example
3528
- * ```typescript
3529
- * // Execute regular SQL queries in DDL context for monitoring
3530
- * await forgeSQL.executeDDLActions(async () => {
3531
- * const slowQueries = await forgeSQL.execute(`
3532
- * SELECT * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
3533
- * WHERE AVG_LATENCY > 1000000
3534
- * `);
3535
- * return slowQueries;
3536
- * });
3537
- *
3538
- * // Execute complex analysis queries in DDL context
3539
- * const result = await forgeSQL.executeDDLActions(async () => {
3540
- * const tableInfo = await forgeSQL.execute("SHOW TABLES");
3541
- * const performanceData = await forgeSQL.execute(`
3542
- * SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
3543
- * WHERE SUMMARY_END_TIME > DATE_SUB(NOW(), INTERVAL 1 HOUR)
3544
- * `);
3545
- * return { tableInfo, performanceData };
3546
- * });
3547
- *
3548
- * // Execute monitoring queries with error handling
3549
- * try {
3550
- * await forgeSQL.executeDDLActions(async () => {
3551
- * const metrics = await forgeSQL.execute(`
3552
- * SELECT COUNT(*) as query_count
3553
- * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
3554
- * `);
3555
- * console.log(`Total queries: ${metrics[0].query_count}`);
3556
- * });
3557
- * } catch (error) {
3558
- * console.error("Monitoring query failed:", error);
3559
- * }
3560
- * ```
3561
- */
3562
- executeDDLActions(actions) {
3563
- return this.ormInstance.executeDDLActions(actions);
3564
- }
3565
- /**
3566
- * Executes a raw SQL query with both local and global cache support.
3567
- * This method provides comprehensive caching for raw SQL queries:
3568
- * - Local cache: Within the current invocation context
3569
- * - Global cache: Cross-invocation caching using @forge/kvs
3570
- *
3571
- * @param query - The SQL query to execute (SQLWrapper or string)
3572
- * @param cacheTtl - Optional cache TTL override (defaults to global cache TTL)
3573
- * @returns Promise with query results
3574
- * @example
3575
- * ```typescript
3576
- * // Using SQLWrapper with custom TTL
3577
- * const result = await forgeSQL.executeCacheable(sql`SELECT * FROM users WHERE id = ${userId}`, 300);
3578
- *
3579
- * // Using string with default TTL
3580
- * const result = await forgeSQL.executeCacheable("SELECT * FROM users WHERE status = 'active'");
3581
- * ```
3582
- */
3583
- executeCacheable(query, cacheTtl) {
3584
- return this.ormInstance.executeCacheable(query, cacheTtl);
3585
- }
3586
- /**
3587
- * Creates a Common Table Expression (CTE) builder for complex queries.
3588
- * CTEs allow you to define temporary named result sets that exist within the scope of a single query.
3589
- *
3590
- * @returns WithBuilder for creating CTEs
3591
- * @example
3592
- * ```typescript
3593
- * const withQuery = forgeSQL.$with('userStats').as(
3594
- * forgeSQL.getDrizzleQueryBuilder().select({ userId: users.id, count: sql<number>`count(*)` })
3595
- * .from(users)
3596
- * .groupBy(users.id)
3597
- * );
3598
- * ```
3599
- */
3600
- get $with() {
3601
- return this.ormInstance.getDrizzleQueryBuilder().$with;
3602
- }
3603
- /**
3604
- * Creates a query builder that uses Common Table Expressions (CTEs).
3605
- * CTEs allow you to define temporary named result sets that exist within the scope of a single query.
3606
- *
3607
- * @param queries - Array of CTE queries created with $with()
3608
- * @returns Query builder with CTE support
3609
- * @example
3610
- * ```typescript
3611
- * const withQuery = forgeSQL.$with('userStats').as(
3612
- * forgeSQL.getDrizzleQueryBuilder().select({ userId: users.id, count: sql<number>`count(*)` })
3613
- * .from(users)
3614
- * .groupBy(users.id)
3615
- * );
3616
- *
3617
- * const result = await forgeSQL.with(withQuery)
3618
- * .select({ userId: withQuery.userId, count: withQuery.count })
3619
- * .from(withQuery);
3620
- * ```
3621
- */
3622
- with(...queries) {
3623
- return this.ormInstance.getDrizzleQueryBuilder().with(...queries);
3624
- }
3625
- }
3626
- const forgeDateTimeString = customType({
3627
- dataType() {
3628
- return "datetime";
3629
- },
3630
- toDriver(value) {
3631
- return formatDateTime(value, "yyyy-MM-dd' 'HH:mm:ss.SSS", false);
3632
- },
3633
- fromDriver(value) {
3634
- const format = "yyyy-MM-dd' 'HH:mm:ss.SSS";
3635
- return parseDateTime(value, format);
3636
- }
3637
- });
3638
- const forgeTimestampString = customType({
3639
- dataType() {
3640
- return "timestamp";
3641
- },
3642
- toDriver(value) {
3643
- return formatDateTime(value, "yyyy-MM-dd' 'HH:mm:ss.SSS", true);
3644
- },
3645
- fromDriver(value) {
3646
- const format = "yyyy-MM-dd' 'HH:mm:ss.SSS";
3647
- return parseDateTime(value, format);
3648
- }
3649
- });
3650
- const forgeDateString = customType({
3651
- dataType() {
3652
- return "date";
3653
- },
3654
- toDriver(value) {
3655
- return formatDateTime(value, "yyyy-MM-dd", false);
3656
- },
3657
- fromDriver(value) {
3658
- const format = "yyyy-MM-dd";
3659
- return parseDateTime(value, format);
3660
- }
3661
- });
3662
- const forgeTimeString = customType({
3663
- dataType() {
3664
- return "time";
3665
- },
3666
- toDriver(value) {
3667
- return formatDateTime(value, "HH:mm:ss.SSS", false);
3668
- },
3669
- fromDriver(value) {
3670
- return parseDateTime(value, "HH:mm:ss.SSS");
3671
- }
3672
- });
3673
- async function dropSchemaMigrations() {
3674
- try {
3675
- const tables = await getTables();
3676
- const dropStatements = generateDropTableStatements(tables, { sequence: true, table: true });
3677
- for (const statement of dropStatements) {
3678
- console.debug(`execute DDL: ${statement}`);
3679
- await sql.executeDDL(statement);
3680
- }
3681
- return getHttpResponse(
3682
- 200,
3683
- "⚠️ All data in these tables has been permanently deleted. This operation cannot be undone."
3684
- );
3685
- } catch (error) {
3686
- const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
3687
- console.error(errorMessage);
3688
- return getHttpResponse(500, errorMessage);
3689
- }
3690
- }
3691
- const applySchemaMigrations = async (migration) => {
3692
- try {
3693
- if (typeof migration !== "function") {
3694
- throw new Error("migration is not a function");
3695
- }
3696
- console.debug("Provisioning the database");
3697
- await sql._provision();
3698
- console.debug("Running schema migrations");
3699
- const migrations2 = await migration(migrationRunner);
3700
- const successfulMigrations = await migrations2.run();
3701
- console.debug("Migrations applied:", successfulMigrations);
3702
- const migrationList = await migrationRunner.list();
3703
- let migrationHistory = "No migrations found";
3704
- if (Array.isArray(migrationList) && migrationList.length > 0) {
3705
- const sortedMigrations = migrationList.toSorted(
3706
- (a, b) => a.migratedAt.getTime() - b.migratedAt.getTime()
3707
- );
3708
- migrationHistory = sortedMigrations.map((y) => `${y.id}, ${y.name}, ${y.migratedAt.toUTCString()}`).join("\n");
3709
- }
3710
- console.debug("Migrations history:\nid, name, migrated_at\n", migrationHistory);
3711
- return {
3712
- headers: { "Content-Type": ["application/json"] },
3713
- statusCode: 200,
3714
- statusText: "OK",
3715
- body: "Migrations successfully executed"
3716
- };
3717
- } catch (error) {
3718
- const errorMessage = error?.cause?.context?.debug?.sqlMessage ?? error?.cause?.context?.debug?.message ?? error?.debug?.context?.sqlMessage ?? error?.debug?.context?.message ?? error.message ?? "Unknown error occurred";
3719
- console.error("Error during migration:", errorMessage);
3720
- return {
3721
- headers: { "Content-Type": ["application/json"] },
3722
- statusCode: 500,
3723
- statusText: "Internal Server Error",
3724
- body: error instanceof Error ? errorMessage : "Unknown error during migration"
3725
- };
3726
- }
3727
- };
3728
- async function fetchSchemaWebTrigger() {
3729
- try {
3730
- const tables = await getTables();
3731
- const createTableStatements = await generateCreateTableStatements(tables);
3732
- const sqlStatements = wrapWithForeignKeyChecks(createTableStatements);
3733
- return getHttpResponse(200, sqlStatements.join(";\n"));
3734
- } catch (error) {
3735
- const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
3736
- console.error(errorMessage);
3737
- return getHttpResponse(500, errorMessage);
3738
- }
3739
- }
3740
- async function generateCreateTableStatements(tables) {
3741
- const statements = [];
3742
- for (const table of tables) {
3743
- const createTableResult = await sql.executeDDL(`SHOW CREATE TABLE "${table}"`);
3744
- const createTableStatements = createTableResult.rows.filter((row) => !isSystemTable(row.Table)).map((row) => formatCreateTableStatement(row["Create Table"]));
3745
- statements.push(...createTableStatements);
3746
- }
3747
- return statements;
3748
- }
3749
- function isSystemTable(tableName) {
3750
- return forgeSystemTables.some((st) => getTableName(st) === tableName);
3751
- }
3752
- function formatCreateTableStatement(statement) {
3753
- return statement.replace(/"/g, "").replace("CREATE TABLE", "CREATE TABLE IF NOT EXISTS");
3754
- }
3755
- function wrapWithForeignKeyChecks(statements) {
3756
- return ["SET foreign_key_checks = 0", ...statements, "SET foreign_key_checks = 1"];
3757
- }
3758
- async function dropTableSchemaMigrations() {
3759
- try {
3760
- const tables = await getTables();
3761
- const dropStatements = generateDropTableStatements(tables, { sequence: false, table: true });
3762
- for (const statement of dropStatements) {
3763
- console.debug(`execute DDL: ${statement}`);
3764
- await sql.executeDDL(statement);
3765
- }
3766
- return getHttpResponse(
3767
- 200,
3768
- "⚠️ All data in these tables has been permanently deleted. This operation cannot be undone."
3769
- );
3770
- } catch (error) {
3771
- const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
3772
- console.error(errorMessage);
3773
- return getHttpResponse(500, errorMessage);
3774
- }
3775
- }
3776
- const clearCacheSchedulerTrigger = async (options) => {
3777
- try {
3778
- const newOptions = options ?? {
3779
- logRawSqlQuery: false,
3780
- disableOptimisticLocking: false,
3781
- cacheTTL: 120,
3782
- cacheEntityName: "cache",
3783
- cacheEntityQueryName: "sql",
3784
- cacheEntityExpirationName: "expiration",
3785
- cacheEntityDataName: "data"
3786
- };
3787
- if (!newOptions.cacheEntityName) {
3788
- throw new Error("cacheEntityName is not configured");
3789
- }
3790
- await clearExpiredCache(newOptions);
3791
- return {
3792
- headers: { "Content-Type": ["application/json"] },
3793
- statusCode: 200,
3794
- statusText: "OK",
3795
- body: JSON.stringify({
3796
- success: true,
3797
- message: "Cache cleanup completed successfully",
3798
- timestamp: (/* @__PURE__ */ new Date()).toISOString()
3799
- })
3800
- };
3801
- } catch (error) {
3802
- console.error("Error during cache cleanup: ", JSON.stringify(error));
3803
- return {
3804
- headers: { "Content-Type": ["application/json"] },
3805
- statusCode: 500,
3806
- statusText: "Internal Server Error",
3807
- body: JSON.stringify({
3808
- success: false,
3809
- error: error instanceof Error ? error.message : "Unknown error during cache cleanup",
3810
- timestamp: (/* @__PURE__ */ new Date()).toISOString()
3811
- })
3812
- };
3813
- }
3814
- };
3815
- async function slowQuerySchedulerTrigger(forgeSQLORM, options) {
3816
- try {
3817
- return getHttpResponse(200, JSON.stringify(await slowQueryPerHours(forgeSQLORM, options?.hours ?? 1, options?.timeout ?? 3e3)));
3818
- } catch (error) {
3819
- const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
3820
- console.error(errorMessage);
3821
- return getHttpResponse(500, errorMessage);
3822
- }
3823
- }
3824
- const getHttpResponse = (statusCode, body) => {
3825
- let statusText = "";
3826
- if (statusCode === 200) {
3827
- statusText = "Ok";
3828
- } else {
3829
- statusText = "Bad Request";
3830
- }
3831
- return {
3832
- headers: { "Content-Type": ["application/json"] },
3833
- statusCode,
3834
- statusText,
3835
- body
3836
- };
3837
- };
3838
- export {
3839
- ForgeSQLCrudOperations,
3840
- ForgeSQLSelectOperations,
3841
- applyFromDriverTransform,
3842
- applySchemaMigrations,
3843
- clearCacheSchedulerTrigger,
3844
- clusterStatementsSummary,
3845
- clusterStatementsSummaryHistory,
3846
- ForgeSQLORM as default,
3847
- dropSchemaMigrations,
3848
- dropTableSchemaMigrations,
3849
- fetchSchemaWebTrigger,
3850
- forgeDateString,
3851
- forgeDateTimeString,
3852
- forgeDriver,
3853
- forgeSystemTables,
3854
- forgeTimeString,
3855
- forgeTimestampString,
3856
- formatDateTime,
3857
- formatLimitOffset,
3858
- generateDropTableStatements,
3859
- getHttpResponse,
3860
- getPrimaryKeys,
3861
- getTableMetadata,
3862
- getTables,
3863
- isUpdateQueryResponse,
3864
- mapSelectAllFieldsToAlias,
3865
- mapSelectFieldsWithAlias,
3866
- migrations,
3867
- nextVal,
3868
- parseDateTime,
3869
- patchDbWithSelectAliased,
3870
- printQueriesWithPlan,
3871
- slowQuery,
3872
- slowQueryPerHours,
3873
- slowQuerySchedulerTrigger,
3874
- statementsSummary,
3875
- statementsSummaryHistory,
3876
- withTidbHint,
3877
- withTimeout
3878
- };
3879
- //# sourceMappingURL=ForgeSQLORM.mjs.map