forge-sql-orm 2.1.12 → 2.1.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/README.md +662 -548
  2. package/dist/core/ForgeSQLAnalyseOperations.d.ts.map +1 -1
  3. package/dist/core/ForgeSQLAnalyseOperations.js +257 -0
  4. package/dist/core/ForgeSQLAnalyseOperations.js.map +1 -0
  5. package/dist/core/ForgeSQLCacheOperations.js +172 -0
  6. package/dist/core/ForgeSQLCacheOperations.js.map +1 -0
  7. package/dist/core/ForgeSQLCrudOperations.js +349 -0
  8. package/dist/core/ForgeSQLCrudOperations.js.map +1 -0
  9. package/dist/core/ForgeSQLORM.js +1191 -0
  10. package/dist/core/ForgeSQLORM.js.map +1 -0
  11. package/dist/core/ForgeSQLQueryBuilder.js +77 -0
  12. package/dist/core/ForgeSQLQueryBuilder.js.map +1 -0
  13. package/dist/core/ForgeSQLSelectOperations.js +81 -0
  14. package/dist/core/ForgeSQLSelectOperations.js.map +1 -0
  15. package/dist/core/SystemTables.js +258 -0
  16. package/dist/core/SystemTables.js.map +1 -0
  17. package/dist/index.js +30 -0
  18. package/dist/index.js.map +1 -0
  19. package/dist/lib/drizzle/extensions/additionalActions.d.ts.map +1 -1
  20. package/dist/lib/drizzle/extensions/additionalActions.js +527 -0
  21. package/dist/lib/drizzle/extensions/additionalActions.js.map +1 -0
  22. package/dist/utils/cacheContextUtils.d.ts.map +1 -1
  23. package/dist/utils/cacheContextUtils.js +198 -0
  24. package/dist/utils/cacheContextUtils.js.map +1 -0
  25. package/dist/utils/cacheUtils.d.ts.map +1 -1
  26. package/dist/utils/cacheUtils.js +383 -0
  27. package/dist/utils/cacheUtils.js.map +1 -0
  28. package/dist/utils/forgeDriver.d.ts.map +1 -1
  29. package/dist/utils/forgeDriver.js +139 -0
  30. package/dist/utils/forgeDriver.js.map +1 -0
  31. package/dist/utils/forgeDriverProxy.js +68 -0
  32. package/dist/utils/forgeDriverProxy.js.map +1 -0
  33. package/dist/utils/metadataContextUtils.js +28 -0
  34. package/dist/utils/metadataContextUtils.js.map +1 -0
  35. package/dist/utils/requestTypeContextUtils.js +10 -0
  36. package/dist/utils/requestTypeContextUtils.js.map +1 -0
  37. package/dist/utils/sqlHints.js +52 -0
  38. package/dist/utils/sqlHints.js.map +1 -0
  39. package/dist/utils/sqlUtils.d.ts.map +1 -1
  40. package/dist/utils/sqlUtils.js +590 -0
  41. package/dist/utils/sqlUtils.js.map +1 -0
  42. package/dist/webtriggers/applyMigrationsWebTrigger.js +77 -0
  43. package/dist/webtriggers/applyMigrationsWebTrigger.js.map +1 -0
  44. package/dist/webtriggers/clearCacheSchedulerTrigger.js +83 -0
  45. package/dist/webtriggers/clearCacheSchedulerTrigger.js.map +1 -0
  46. package/dist/webtriggers/dropMigrationWebTrigger.js +54 -0
  47. package/dist/webtriggers/dropMigrationWebTrigger.js.map +1 -0
  48. package/dist/webtriggers/dropTablesMigrationWebTrigger.js +54 -0
  49. package/dist/webtriggers/dropTablesMigrationWebTrigger.js.map +1 -0
  50. package/dist/webtriggers/fetchSchemaWebTrigger.js +82 -0
  51. package/dist/webtriggers/fetchSchemaWebTrigger.js.map +1 -0
  52. package/dist/webtriggers/index.js +40 -0
  53. package/dist/webtriggers/index.js.map +1 -0
  54. package/dist/webtriggers/slowQuerySchedulerTrigger.js +80 -0
  55. package/dist/webtriggers/slowQuerySchedulerTrigger.js.map +1 -0
  56. package/package.json +28 -23
  57. package/src/core/ForgeSQLAnalyseOperations.ts +3 -2
  58. package/src/lib/drizzle/extensions/additionalActions.ts +11 -0
  59. package/src/utils/cacheContextUtils.ts +9 -6
  60. package/src/utils/cacheUtils.ts +6 -4
  61. package/src/utils/forgeDriver.ts +3 -7
  62. package/src/utils/sqlUtils.ts +33 -34
  63. package/dist/ForgeSQLORM.js +0 -3922
  64. package/dist/ForgeSQLORM.js.map +0 -1
  65. package/dist/ForgeSQLORM.mjs +0 -3905
  66. package/dist/ForgeSQLORM.mjs.map +0 -1
@@ -1,3922 +0,0 @@
1
- "use strict";
2
- Object.defineProperties(exports, { __esModule: { value: true }, [Symbol.toStringTag]: { value: "Module" } });
3
- const drizzleOrm = require("drizzle-orm");
4
- const luxon = require("luxon");
5
- const sql$1 = require("drizzle-orm/sql/sql");
6
- const mysqlCore = require("drizzle-orm/mysql-core");
7
- const sql = require("@forge/sql");
8
- const node_async_hooks = require("node:async_hooks");
9
- const table = require("drizzle-orm/table");
10
- const crypto = require("crypto");
11
- const kvs = require("@forge/kvs");
12
- const mysqlProxy = require("drizzle-orm/mysql-proxy");
13
- function _interopNamespaceDefault(e) {
14
- const n = Object.create(null, { [Symbol.toStringTag]: { value: "Module" } });
15
- if (e) {
16
- for (const k in e) {
17
- if (k !== "default") {
18
- const d = Object.getOwnPropertyDescriptor(e, k);
19
- Object.defineProperty(n, k, d.get ? d : {
20
- enumerable: true,
21
- get: () => e[k]
22
- });
23
- }
24
- }
25
- }
26
- n.default = e;
27
- return Object.freeze(n);
28
- }
29
- const crypto__namespace = /* @__PURE__ */ _interopNamespaceDefault(crypto);
30
- const migrations = mysqlCore.mysqlTable("__migrations", {
31
- id: mysqlCore.bigint("id", { mode: "number" }).primaryKey().autoincrement(),
32
- name: mysqlCore.varchar("name", { length: 255 }).notNull(),
33
- migratedAt: mysqlCore.timestamp("migratedAt").defaultNow().notNull()
34
- });
35
- const informationSchema = mysqlCore.mysqlSchema("information_schema");
36
- const slowQuery = informationSchema.table("CLUSTER_SLOW_QUERY", {
37
- time: mysqlCore.timestamp("Time", { fsp: 6, mode: "string" }).notNull(),
38
- // Timestamp when the slow query was recorded
39
- txnStartTs: mysqlCore.bigint("Txn_start_ts", { mode: "bigint", unsigned: true }),
40
- // Transaction start timestamp (TSO)
41
- user: mysqlCore.varchar("User", { length: 64 }),
42
- // User executing the query
43
- host: mysqlCore.varchar("Host", { length: 64 }),
44
- // Host from which the query originated
45
- connId: mysqlCore.bigint("Conn_ID", { mode: "bigint", unsigned: true }),
46
- // Connection ID
47
- sessionAlias: mysqlCore.varchar("Session_alias", { length: 64 }),
48
- // Session alias
49
- execRetryCount: mysqlCore.bigint("Exec_retry_count", { mode: "bigint", unsigned: true }),
50
- // Number of retries during execution
51
- execRetryTime: mysqlCore.double("Exec_retry_time"),
52
- // Time spent in retries
53
- queryTime: mysqlCore.double("Query_time"),
54
- // Total execution time
55
- parseTime: mysqlCore.double("Parse_time"),
56
- // Time spent parsing SQL
57
- compileTime: mysqlCore.double("Compile_time"),
58
- // Time spent compiling query plan
59
- rewriteTime: mysqlCore.double("Rewrite_time"),
60
- // Time spent rewriting query
61
- preprocSubqueries: mysqlCore.bigint("Preproc_subqueries", { mode: "bigint", unsigned: true }),
62
- // Number of subqueries preprocessed
63
- preprocSubqueriesTime: mysqlCore.double("Preproc_subqueries_time"),
64
- // Time spent preprocessing subqueries
65
- optimizeTime: mysqlCore.double("Optimize_time"),
66
- // Time spent in optimizer
67
- waitTs: mysqlCore.double("Wait_TS"),
68
- // Wait time for getting TSO
69
- prewriteTime: mysqlCore.double("Prewrite_time"),
70
- // Time spent in prewrite phase
71
- waitPrewriteBinlogTime: mysqlCore.double("Wait_prewrite_binlog_time"),
72
- // Time waiting for binlog prewrite
73
- commitTime: mysqlCore.double("Commit_time"),
74
- // Commit duration
75
- getCommitTsTime: mysqlCore.double("Get_commit_ts_time"),
76
- // Time waiting for commit TSO
77
- commitBackoffTime: mysqlCore.double("Commit_backoff_time"),
78
- // Backoff time during commit
79
- backoffTypes: mysqlCore.varchar("Backoff_types", { length: 64 }),
80
- // Types of backoff occurred
81
- resolveLockTime: mysqlCore.double("Resolve_lock_time"),
82
- // Time resolving locks
83
- localLatchWaitTime: mysqlCore.double("Local_latch_wait_time"),
84
- // Time waiting on local latch
85
- writeKeys: mysqlCore.bigint("Write_keys", { mode: "bigint" }),
86
- // Number of keys written
87
- writeSize: mysqlCore.bigint("Write_size", { mode: "bigint" }),
88
- // Amount of data written
89
- prewriteRegion: mysqlCore.bigint("Prewrite_region", { mode: "bigint" }),
90
- // Regions involved in prewrite
91
- txnRetry: mysqlCore.bigint("Txn_retry", { mode: "bigint" }),
92
- // Transaction retry count
93
- copTime: mysqlCore.double("Cop_time"),
94
- // Time spent in coprocessor
95
- processTime: mysqlCore.double("Process_time"),
96
- // Processing time
97
- waitTime: mysqlCore.double("Wait_time"),
98
- // Wait time in TiKV
99
- backoffTime: mysqlCore.double("Backoff_time"),
100
- // Backoff wait time
101
- lockKeysTime: mysqlCore.double("LockKeys_time"),
102
- // Time spent waiting for locks
103
- requestCount: mysqlCore.bigint("Request_count", { mode: "bigint", unsigned: true }),
104
- // Total number of requests
105
- totalKeys: mysqlCore.bigint("Total_keys", { mode: "bigint", unsigned: true }),
106
- // Total keys scanned
107
- processKeys: mysqlCore.bigint("Process_keys", { mode: "bigint", unsigned: true }),
108
- // Keys processed
109
- rocksdbDeleteSkippedCount: mysqlCore.bigint("Rocksdb_delete_skipped_count", {
110
- mode: "bigint",
111
- unsigned: true
112
- }),
113
- // RocksDB delete skips
114
- rocksdbKeySkippedCount: mysqlCore.bigint("Rocksdb_key_skipped_count", { mode: "bigint", unsigned: true }),
115
- // RocksDB key skips
116
- rocksdbBlockCacheHitCount: mysqlCore.bigint("Rocksdb_block_cache_hit_count", {
117
- mode: "bigint",
118
- unsigned: true
119
- }),
120
- // RocksDB block cache hits
121
- rocksdbBlockReadCount: mysqlCore.bigint("Rocksdb_block_read_count", { mode: "bigint", unsigned: true }),
122
- // RocksDB block reads
123
- rocksdbBlockReadByte: mysqlCore.bigint("Rocksdb_block_read_byte", { mode: "bigint", unsigned: true }),
124
- // RocksDB block read bytes
125
- db: mysqlCore.varchar("DB", { length: 64 }),
126
- // Database name
127
- indexNames: mysqlCore.varchar("Index_names", { length: 100 }),
128
- // Indexes used
129
- isInternal: mysqlCore.boolean("Is_internal"),
130
- // Whether the query is internal
131
- digest: mysqlCore.varchar("Digest", { length: 64 }),
132
- // SQL digest hash
133
- stats: mysqlCore.varchar("Stats", { length: 512 }),
134
- // Stats used during planning
135
- copProcAvg: mysqlCore.double("Cop_proc_avg"),
136
- // Coprocessor average processing time
137
- copProcP90: mysqlCore.double("Cop_proc_p90"),
138
- // Coprocessor 90th percentile processing time
139
- copProcMax: mysqlCore.double("Cop_proc_max"),
140
- // Coprocessor max processing time
141
- copProcAddr: mysqlCore.varchar("Cop_proc_addr", { length: 64 }),
142
- // Coprocessor address for processing
143
- copWaitAvg: mysqlCore.double("Cop_wait_avg"),
144
- // Coprocessor average wait time
145
- copWaitP90: mysqlCore.double("Cop_wait_p90"),
146
- // Coprocessor 90th percentile wait time
147
- copWaitMax: mysqlCore.double("Cop_wait_max"),
148
- // Coprocessor max wait time
149
- copWaitAddr: mysqlCore.varchar("Cop_wait_addr", { length: 64 }),
150
- // Coprocessor address for wait
151
- memMax: mysqlCore.bigint("Mem_max", { mode: "bigint" }),
152
- // Max memory usage
153
- diskMax: mysqlCore.bigint("Disk_max", { mode: "bigint" }),
154
- // Max disk usage
155
- kvTotal: mysqlCore.double("KV_total"),
156
- // Total KV request time
157
- pdTotal: mysqlCore.double("PD_total"),
158
- // Total PD request time
159
- backoffTotal: mysqlCore.double("Backoff_total"),
160
- // Total backoff time
161
- writeSqlResponseTotal: mysqlCore.double("Write_sql_response_total"),
162
- // SQL response write time
163
- resultRows: mysqlCore.bigint("Result_rows", { mode: "bigint" }),
164
- // Rows returned
165
- warnings: mysqlCore.longtext("Warnings"),
166
- // Warnings during execution
167
- backoffDetail: mysqlCore.varchar("Backoff_Detail", { length: 4096 }),
168
- // Detailed backoff info
169
- prepared: mysqlCore.boolean("Prepared"),
170
- // Whether query was prepared
171
- succ: mysqlCore.boolean("Succ"),
172
- // Success flag
173
- isExplicitTxn: mysqlCore.boolean("IsExplicitTxn"),
174
- // Whether explicit transaction
175
- isWriteCacheTable: mysqlCore.boolean("IsWriteCacheTable"),
176
- // Whether wrote to cache table
177
- planFromCache: mysqlCore.boolean("Plan_from_cache"),
178
- // Plan was from cache
179
- planFromBinding: mysqlCore.boolean("Plan_from_binding"),
180
- // Plan was from binding
181
- hasMoreResults: mysqlCore.boolean("Has_more_results"),
182
- // Query returned multiple results
183
- resourceGroup: mysqlCore.varchar("Resource_group", { length: 64 }),
184
- // Resource group name
185
- requestUnitRead: mysqlCore.double("Request_unit_read"),
186
- // RU consumed for read
187
- requestUnitWrite: mysqlCore.double("Request_unit_write"),
188
- // RU consumed for write
189
- timeQueuedByRc: mysqlCore.double("Time_queued_by_rc"),
190
- // Time queued by resource control
191
- tidbCpuTime: mysqlCore.double("Tidb_cpu_time"),
192
- // TiDB CPU time
193
- tikvCpuTime: mysqlCore.double("Tikv_cpu_time"),
194
- // TiKV CPU time
195
- plan: mysqlCore.longtext("Plan"),
196
- // Query execution plan
197
- planDigest: mysqlCore.varchar("Plan_digest", { length: 128 }),
198
- // Plan digest hash
199
- binaryPlan: mysqlCore.longtext("Binary_plan"),
200
- // Binary execution plan
201
- prevStmt: mysqlCore.longtext("Prev_stmt"),
202
- // Previous statement in session
203
- query: mysqlCore.longtext("Query")
204
- // Original SQL query
205
- });
206
- const createClusterStatementsSummarySchema = () => ({
207
- instance: mysqlCore.varchar("INSTANCE", { length: 64 }),
208
- // TiDB/TiKV instance address
209
- summaryBeginTime: mysqlCore.timestamp("SUMMARY_BEGIN_TIME", { mode: "string" }).notNull(),
210
- // Begin time of this summary window
211
- summaryEndTime: mysqlCore.timestamp("SUMMARY_END_TIME", { mode: "string" }).notNull(),
212
- // End time of this summary window
213
- stmtType: mysqlCore.varchar("STMT_TYPE", { length: 64 }).notNull(),
214
- // Statement type (e.g., Select/Insert/Update)
215
- schemaName: mysqlCore.varchar("SCHEMA_NAME", { length: 64 }),
216
- // Current schema name
217
- digest: mysqlCore.varchar("DIGEST", { length: 64 }),
218
- // SQL digest (normalized hash)
219
- digestText: mysqlCore.text("DIGEST_TEXT").notNull(),
220
- // Normalized SQL text
221
- tableNames: mysqlCore.text("TABLE_NAMES"),
222
- // Involved table names
223
- indexNames: mysqlCore.text("INDEX_NAMES"),
224
- // Used index names
225
- sampleUser: mysqlCore.varchar("SAMPLE_USER", { length: 64 }),
226
- // Sampled user who executed the statements
227
- execCount: mysqlCore.bigint("EXEC_COUNT", { mode: "bigint", unsigned: true }).notNull(),
228
- // Total executions
229
- sumErrors: mysqlCore.int("SUM_ERRORS", { unsigned: true }).notNull(),
230
- // Sum of errors
231
- sumWarnings: mysqlCore.int("SUM_WARNINGS", { unsigned: true }).notNull(),
232
- // Sum of warnings
233
- sumLatency: mysqlCore.bigint("SUM_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
234
- // Sum of latency (ns)
235
- maxLatency: mysqlCore.bigint("MAX_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
236
- // Max latency (ns)
237
- minLatency: mysqlCore.bigint("MIN_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
238
- // Min latency (ns)
239
- avgLatency: mysqlCore.bigint("AVG_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
240
- // Avg latency (ns)
241
- avgParseLatency: mysqlCore.bigint("AVG_PARSE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
242
- // Avg parse time (ns)
243
- maxParseLatency: mysqlCore.bigint("MAX_PARSE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
244
- // Max parse time (ns)
245
- avgCompileLatency: mysqlCore.bigint("AVG_COMPILE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
246
- // Avg compile time (ns)
247
- maxCompileLatency: mysqlCore.bigint("MAX_COMPILE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
248
- // Max compile time (ns)
249
- sumCopTaskNum: mysqlCore.bigint("SUM_COP_TASK_NUM", { mode: "bigint", unsigned: true }).notNull(),
250
- // Total number of cop tasks
251
- maxCopProcessTime: mysqlCore.bigint("MAX_COP_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(),
252
- // Max TiKV coprocessor processing time (ns)
253
- maxCopProcessAddress: mysqlCore.varchar("MAX_COP_PROCESS_ADDRESS", { length: 256 }),
254
- // Address of cop task with max processing time
255
- maxCopWaitTime: mysqlCore.bigint("MAX_COP_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
256
- // Max TiKV coprocessor wait time (ns)
257
- maxCopWaitAddress: mysqlCore.varchar("MAX_COP_WAIT_ADDRESS", { length: 256 }),
258
- // Address of cop task with max wait time
259
- avgProcessTime: mysqlCore.bigint("AVG_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(),
260
- // Avg TiKV processing time (ns)
261
- maxProcessTime: mysqlCore.bigint("MAX_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(),
262
- // Max TiKV processing time (ns)
263
- avgWaitTime: mysqlCore.bigint("AVG_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
264
- // Avg TiKV wait time (ns)
265
- maxWaitTime: mysqlCore.bigint("MAX_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
266
- // Max TiKV wait time (ns)
267
- avgBackoffTime: mysqlCore.bigint("AVG_BACKOFF_TIME", { mode: "bigint", unsigned: true }).notNull(),
268
- // Avg backoff time before retry (ns)
269
- maxBackoffTime: mysqlCore.bigint("MAX_BACKOFF_TIME", { mode: "bigint", unsigned: true }).notNull(),
270
- // Max backoff time before retry (ns)
271
- avgTotalKeys: mysqlCore.bigint("AVG_TOTAL_KEYS", { mode: "bigint", unsigned: true }).notNull(),
272
- // Avg scanned keys
273
- maxTotalKeys: mysqlCore.bigint("MAX_TOTAL_KEYS", { mode: "bigint", unsigned: true }).notNull(),
274
- // Max scanned keys
275
- avgProcessedKeys: mysqlCore.bigint("AVG_PROCESSED_KEYS", { mode: "bigint", unsigned: true }).notNull(),
276
- // Avg processed keys
277
- maxProcessedKeys: mysqlCore.bigint("MAX_PROCESSED_KEYS", { mode: "bigint", unsigned: true }).notNull(),
278
- // Max processed keys
279
- avgRocksdbDeleteSkippedCount: mysqlCore.double("AVG_ROCKSDB_DELETE_SKIPPED_COUNT").notNull(),
280
- // Avg RocksDB deletes skipped
281
- maxRocksdbDeleteSkippedCount: mysqlCore.int("MAX_ROCKSDB_DELETE_SKIPPED_COUNT", {
282
- unsigned: true
283
- }).notNull(),
284
- // Max RocksDB deletes skipped
285
- avgRocksdbKeySkippedCount: mysqlCore.double("AVG_ROCKSDB_KEY_SKIPPED_COUNT").notNull(),
286
- // Avg RocksDB keys skipped
287
- maxRocksdbKeySkippedCount: mysqlCore.int("MAX_ROCKSDB_KEY_SKIPPED_COUNT", { unsigned: true }).notNull(),
288
- // Max RocksDB keys skipped
289
- avgRocksdbBlockCacheHitCount: mysqlCore.double("AVG_ROCKSDB_BLOCK_CACHE_HIT_COUNT").notNull(),
290
- // Avg RocksDB block cache hits
291
- maxRocksdbBlockCacheHitCount: mysqlCore.int("MAX_ROCKSDB_BLOCK_CACHE_HIT_COUNT", {
292
- unsigned: true
293
- }).notNull(),
294
- // Max RocksDB block cache hits
295
- avgRocksdbBlockReadCount: mysqlCore.double("AVG_ROCKSDB_BLOCK_READ_COUNT").notNull(),
296
- // Avg RocksDB block reads
297
- maxRocksdbBlockReadCount: mysqlCore.int("MAX_ROCKSDB_BLOCK_READ_COUNT", { unsigned: true }).notNull(),
298
- // Max RocksDB block reads
299
- avgRocksdbBlockReadByte: mysqlCore.double("AVG_ROCKSDB_BLOCK_READ_BYTE").notNull(),
300
- // Avg RocksDB block read bytes
301
- maxRocksdbBlockReadByte: mysqlCore.int("MAX_ROCKSDB_BLOCK_READ_BYTE", { unsigned: true }).notNull(),
302
- // Max RocksDB block read bytes
303
- avgPrewriteTime: mysqlCore.bigint("AVG_PREWRITE_TIME", { mode: "bigint", unsigned: true }).notNull(),
304
- // Avg prewrite phase time (ns)
305
- maxPrewriteTime: mysqlCore.bigint("MAX_PREWRITE_TIME", { mode: "bigint", unsigned: true }).notNull(),
306
- // Max prewrite phase time (ns)
307
- avgCommitTime: mysqlCore.bigint("AVG_COMMIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
308
- // Avg commit phase time (ns)
309
- maxCommitTime: mysqlCore.bigint("MAX_COMMIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
310
- // Max commit phase time (ns)
311
- avgGetCommitTsTime: mysqlCore.bigint("AVG_GET_COMMIT_TS_TIME", {
312
- mode: "bigint",
313
- unsigned: true
314
- }).notNull(),
315
- // Avg get commit_ts time (ns)
316
- maxGetCommitTsTime: mysqlCore.bigint("MAX_GET_COMMIT_TS_TIME", {
317
- mode: "bigint",
318
- unsigned: true
319
- }).notNull(),
320
- // Max get commit_ts time (ns)
321
- avgCommitBackoffTime: mysqlCore.bigint("AVG_COMMIT_BACKOFF_TIME", {
322
- mode: "bigint",
323
- unsigned: true
324
- }).notNull(),
325
- // Avg backoff during commit (ns)
326
- maxCommitBackoffTime: mysqlCore.bigint("MAX_COMMIT_BACKOFF_TIME", {
327
- mode: "bigint",
328
- unsigned: true
329
- }).notNull(),
330
- // Max backoff during commit (ns)
331
- avgResolveLockTime: mysqlCore.bigint("AVG_RESOLVE_LOCK_TIME", {
332
- mode: "bigint",
333
- unsigned: true
334
- }).notNull(),
335
- // Avg resolve lock time (ns)
336
- maxResolveLockTime: mysqlCore.bigint("MAX_RESOLVE_LOCK_TIME", {
337
- mode: "bigint",
338
- unsigned: true
339
- }).notNull(),
340
- // Max resolve lock time (ns)
341
- avgLocalLatchWaitTime: mysqlCore.bigint("AVG_LOCAL_LATCH_WAIT_TIME", {
342
- mode: "bigint",
343
- unsigned: true
344
- }).notNull(),
345
- // Avg local latch wait (ns)
346
- maxLocalLatchWaitTime: mysqlCore.bigint("MAX_LOCAL_LATCH_WAIT_TIME", {
347
- mode: "bigint",
348
- unsigned: true
349
- }).notNull(),
350
- // Max local latch wait (ns)
351
- avgWriteKeys: mysqlCore.double("AVG_WRITE_KEYS").notNull(),
352
- // Avg number of written keys
353
- maxWriteKeys: mysqlCore.bigint("MAX_WRITE_KEYS", { mode: "bigint", unsigned: true }).notNull(),
354
- // Max written keys
355
- avgWriteSize: mysqlCore.double("AVG_WRITE_SIZE").notNull(),
356
- // Avg written bytes
357
- maxWriteSize: mysqlCore.bigint("MAX_WRITE_SIZE", { mode: "bigint", unsigned: true }).notNull(),
358
- // Max written bytes
359
- avgPrewriteRegions: mysqlCore.double("AVG_PREWRITE_REGIONS").notNull(),
360
- // Avg regions in prewrite
361
- maxPrewriteRegions: mysqlCore.int("MAX_PREWRITE_REGIONS", { unsigned: true }).notNull(),
362
- // Max regions in prewrite
363
- avgTxnRetry: mysqlCore.double("AVG_TXN_RETRY").notNull(),
364
- // Avg transaction retry count
365
- maxTxnRetry: mysqlCore.int("MAX_TXN_RETRY", { unsigned: true }).notNull(),
366
- // Max transaction retry count
367
- sumExecRetry: mysqlCore.bigint("SUM_EXEC_RETRY", { mode: "bigint", unsigned: true }).notNull(),
368
- // Sum of execution retries (pessimistic)
369
- sumExecRetryTime: mysqlCore.bigint("SUM_EXEC_RETRY_TIME", { mode: "bigint", unsigned: true }).notNull(),
370
- // Sum time of execution retries (ns)
371
- sumBackoffTimes: mysqlCore.bigint("SUM_BACKOFF_TIMES", { mode: "bigint", unsigned: true }).notNull(),
372
- // Sum of backoff retries
373
- backoffTypes: mysqlCore.varchar("BACKOFF_TYPES", { length: 1024 }),
374
- // Backoff types with counts
375
- avgMem: mysqlCore.bigint("AVG_MEM", { mode: "bigint", unsigned: true }).notNull(),
376
- // Avg memory used (bytes)
377
- maxMem: mysqlCore.bigint("MAX_MEM", { mode: "bigint", unsigned: true }).notNull(),
378
- // Max memory used (bytes)
379
- avgDisk: mysqlCore.bigint("AVG_DISK", { mode: "bigint", unsigned: true }).notNull(),
380
- // Avg disk used (bytes)
381
- maxDisk: mysqlCore.bigint("MAX_DISK", { mode: "bigint", unsigned: true }).notNull(),
382
- // Max disk used (bytes)
383
- avgKvTime: mysqlCore.bigint("AVG_KV_TIME", { mode: "bigint", unsigned: true }).notNull(),
384
- // Avg time spent in TiKV (ns)
385
- avgPdTime: mysqlCore.bigint("AVG_PD_TIME", { mode: "bigint", unsigned: true }).notNull(),
386
- // Avg time spent in PD (ns)
387
- avgBackoffTotalTime: mysqlCore.bigint("AVG_BACKOFF_TOTAL_TIME", {
388
- mode: "bigint",
389
- unsigned: true
390
- }).notNull(),
391
- // Avg total backoff time (ns)
392
- avgWriteSqlRespTime: mysqlCore.bigint("AVG_WRITE_SQL_RESP_TIME", {
393
- mode: "bigint",
394
- unsigned: true
395
- }).notNull(),
396
- // Avg write SQL response time (ns)
397
- avgTidbCpuTime: mysqlCore.bigint("AVG_TIDB_CPU_TIME", { mode: "bigint", unsigned: true }).notNull(),
398
- // Avg TiDB CPU time (ns)
399
- avgTikvCpuTime: mysqlCore.bigint("AVG_TIKV_CPU_TIME", { mode: "bigint", unsigned: true }).notNull(),
400
- // Avg TiKV CPU time (ns)
401
- maxResultRows: mysqlCore.bigint("MAX_RESULT_ROWS", { mode: "bigint" }).notNull(),
402
- // Max number of result rows
403
- minResultRows: mysqlCore.bigint("MIN_RESULT_ROWS", { mode: "bigint" }).notNull(),
404
- // Min number of result rows
405
- avgResultRows: mysqlCore.bigint("AVG_RESULT_ROWS", { mode: "bigint" }).notNull(),
406
- // Avg number of result rows
407
- prepared: mysqlCore.boolean("PREPARED").notNull(),
408
- // Whether statements are prepared
409
- avgAffectedRows: mysqlCore.double("AVG_AFFECTED_ROWS").notNull(),
410
- // Avg affected rows
411
- firstSeen: mysqlCore.timestamp("FIRST_SEEN", { mode: "string" }).notNull(),
412
- // First time statements observed
413
- lastSeen: mysqlCore.timestamp("LAST_SEEN", { mode: "string" }).notNull(),
414
- // Last time statements observed
415
- planInCache: mysqlCore.boolean("PLAN_IN_CACHE").notNull(),
416
- // Whether last stmt hit plan cache
417
- planCacheHits: mysqlCore.bigint("PLAN_CACHE_HITS", { mode: "bigint" }).notNull(),
418
- // Number of plan cache hits
419
- planInBinding: mysqlCore.boolean("PLAN_IN_BINDING").notNull(),
420
- // Whether matched bindings
421
- querySampleText: mysqlCore.text("QUERY_SAMPLE_TEXT"),
422
- // Sampled original SQL
423
- prevSampleText: mysqlCore.text("PREV_SAMPLE_TEXT"),
424
- // Sampled previous SQL before commit
425
- planDigest: mysqlCore.varchar("PLAN_DIGEST", { length: 64 }),
426
- // Plan digest hash
427
- plan: mysqlCore.text("PLAN"),
428
- // Sampled textual plan
429
- binaryPlan: mysqlCore.text("BINARY_PLAN"),
430
- // Sampled binary plan
431
- charset: mysqlCore.varchar("CHARSET", { length: 64 }),
432
- // Sampled charset
433
- collation: mysqlCore.varchar("COLLATION", { length: 64 }),
434
- // Sampled collation
435
- planHint: mysqlCore.varchar("PLAN_HINT", { length: 64 }),
436
- // Sampled plan hint
437
- maxRequestUnitRead: mysqlCore.double("MAX_REQUEST_UNIT_READ").notNull(),
438
- // Max RU cost (read)
439
- avgRequestUnitRead: mysqlCore.double("AVG_REQUEST_UNIT_READ").notNull(),
440
- // Avg RU cost (read)
441
- maxRequestUnitWrite: mysqlCore.double("MAX_REQUEST_UNIT_WRITE").notNull(),
442
- // Max RU cost (write)
443
- avgRequestUnitWrite: mysqlCore.double("AVG_REQUEST_UNIT_WRITE").notNull(),
444
- // Avg RU cost (write)
445
- maxQueuedRcTime: mysqlCore.bigint("MAX_QUEUED_RC_TIME", { mode: "bigint", unsigned: true }).notNull(),
446
- // Max queued time waiting for RU (ns)
447
- avgQueuedRcTime: mysqlCore.bigint("AVG_QUEUED_RC_TIME", { mode: "bigint", unsigned: true }).notNull(),
448
- // Avg queued time waiting for RU (ns)
449
- resourceGroup: mysqlCore.varchar("RESOURCE_GROUP", { length: 64 }),
450
- // Bound resource group name
451
- planCacheUnqualified: mysqlCore.bigint("PLAN_CACHE_UNQUALIFIED", { mode: "bigint" }).notNull(),
452
- // Times not eligible for plan cache
453
- planCacheUnqualifiedLastReason: mysqlCore.text("PLAN_CACHE_UNQUALIFIED_LAST_REASON")
454
- // Last reason of plan cache ineligibility
455
- });
456
- const clusterStatementsSummaryHistory = informationSchema.table(
457
- "CLUSTER_STATEMENTS_SUMMARY_HISTORY",
458
- createClusterStatementsSummarySchema()
459
- );
460
- const statementsSummaryHistory = informationSchema.table(
461
- "STATEMENTS_SUMMARY_HISTORY",
462
- createClusterStatementsSummarySchema()
463
- );
464
- const statementsSummary = informationSchema.table(
465
- "STATEMENTS_SUMMARY",
466
- createClusterStatementsSummarySchema()
467
- );
468
- const clusterStatementsSummary = informationSchema.table(
469
- "CLUSTER_STATEMENTS_SUMMARY",
470
- createClusterStatementsSummarySchema()
471
- );
472
- async function getTables() {
473
- const tables = await sql.sql.executeDDL("SHOW TABLES");
474
- return tables.rows.flatMap((tableInfo) => Object.values(tableInfo));
475
- }
476
- const forgeSystemTables = [migrations];
477
- const parseDateTime = (value, format) => {
478
- let result;
479
- if (value instanceof Date) {
480
- result = value;
481
- } else {
482
- const dt = luxon.DateTime.fromFormat(value, format);
483
- if (dt.isValid) {
484
- result = dt.toJSDate();
485
- } else {
486
- const sqlDt = luxon.DateTime.fromSQL(value);
487
- if (sqlDt.isValid) {
488
- result = sqlDt.toJSDate();
489
- } else {
490
- const isoDt = luxon.DateTime.fromRFC2822(value);
491
- if (isoDt.isValid) {
492
- result = isoDt.toJSDate();
493
- } else {
494
- result = new Date(value);
495
- }
496
- }
497
- }
498
- }
499
- if (isNaN(result.getTime())) {
500
- result = new Date(value);
501
- }
502
- return result;
503
- };
504
- function formatDateTime(value, format, isTimeStamp) {
505
- let dt = null;
506
- if (value instanceof Date) {
507
- dt = luxon.DateTime.fromJSDate(value);
508
- } else if (typeof value === "string") {
509
- for (const parser of [
510
- luxon.DateTime.fromISO,
511
- luxon.DateTime.fromRFC2822,
512
- luxon.DateTime.fromSQL,
513
- luxon.DateTime.fromHTTP
514
- ]) {
515
- dt = parser(value);
516
- if (dt.isValid) break;
517
- }
518
- if (!dt?.isValid) {
519
- const parsed = Number(value);
520
- if (!isNaN(parsed)) {
521
- dt = luxon.DateTime.fromMillis(parsed);
522
- }
523
- }
524
- } else if (typeof value === "number") {
525
- dt = luxon.DateTime.fromMillis(value);
526
- } else {
527
- throw new Error("Unsupported type");
528
- }
529
- if (!dt?.isValid) {
530
- throw new Error("Invalid Date");
531
- }
532
- const minDate = luxon.DateTime.fromSeconds(1);
533
- const maxDate = luxon.DateTime.fromMillis(2147483647 * 1e3);
534
- if (isTimeStamp) {
535
- if (dt < minDate) {
536
- throw new Error(
537
- "Atlassian Forge does not support zero or negative timestamps. Allowed range: from '1970-01-01 00:00:01.000000' to '2038-01-19 03:14:07.999999'."
538
- );
539
- }
540
- if (dt > maxDate) {
541
- throw new Error(
542
- "Atlassian Forge does not support timestamps beyond 2038-01-19 03:14:07.999999. Please use a smaller date within the supported range."
543
- );
544
- }
545
- }
546
- return dt.toFormat(format);
547
- }
548
- function getPrimaryKeys(table2) {
549
- const { columns, primaryKeys } = getTableMetadata(table2);
550
- const columnPrimaryKeys = Object.entries(columns).filter(([, column]) => column.primary);
551
- if (columnPrimaryKeys.length > 0) {
552
- return columnPrimaryKeys;
553
- }
554
- if (Array.isArray(primaryKeys) && primaryKeys.length > 0) {
555
- const primaryKeyColumns = /* @__PURE__ */ new Set();
556
- primaryKeys.forEach((primaryKeyBuilder) => {
557
- Object.entries(columns).filter(([, column]) => {
558
- return primaryKeyBuilder.columns.includes(column);
559
- }).forEach(([name, column]) => {
560
- primaryKeyColumns.add([name, column]);
561
- });
562
- });
563
- return Array.from(primaryKeyColumns);
564
- }
565
- return [];
566
- }
567
- function processForeignKeys(table2, foreignKeysSymbol, extraSymbol) {
568
- const foreignKeys = [];
569
- if (foreignKeysSymbol) {
570
- const fkArray = table2[foreignKeysSymbol];
571
- if (fkArray) {
572
- fkArray.forEach((fk) => {
573
- if (fk.reference) {
574
- const item = fk.reference(fk);
575
- foreignKeys.push(item);
576
- }
577
- });
578
- }
579
- }
580
- if (extraSymbol) {
581
- const extraConfigBuilder = table2[extraSymbol];
582
- if (extraConfigBuilder && typeof extraConfigBuilder === "function") {
583
- const configBuilderData = extraConfigBuilder(table2);
584
- if (configBuilderData) {
585
- const configBuilders = Array.isArray(configBuilderData) ? configBuilderData : Object.values(configBuilderData).map(
586
- (item) => item.value ?? item
587
- );
588
- configBuilders.forEach((builder) => {
589
- if (!builder?.constructor) return;
590
- const builderName = builder.constructor.name.toLowerCase();
591
- if (builderName.includes("foreignkeybuilder")) {
592
- foreignKeys.push(builder);
593
- }
594
- });
595
- }
596
- }
597
- }
598
- return foreignKeys;
599
- }
600
- function getTableMetadata(table2) {
601
- const symbols = Object.getOwnPropertySymbols(table2);
602
- const nameSymbol = symbols.find((s) => s.toString().includes("Name"));
603
- const columnsSymbol = symbols.find((s) => s.toString().includes("Columns"));
604
- const foreignKeysSymbol = symbols.find((s) => s.toString().includes("ForeignKeys)"));
605
- const extraSymbol = symbols.find((s) => s.toString().includes("ExtraConfigBuilder"));
606
- const builders = {
607
- indexes: [],
608
- checks: [],
609
- foreignKeys: [],
610
- primaryKeys: [],
611
- uniqueConstraints: [],
612
- extras: []
613
- };
614
- builders.foreignKeys = processForeignKeys(table2, foreignKeysSymbol, extraSymbol);
615
- if (extraSymbol) {
616
- const extraConfigBuilder = table2[extraSymbol];
617
- if (extraConfigBuilder && typeof extraConfigBuilder === "function") {
618
- const configBuilderData = extraConfigBuilder(table2);
619
- if (configBuilderData) {
620
- const configBuilders = Array.isArray(configBuilderData) ? configBuilderData : Object.values(configBuilderData).map(
621
- (item) => item.value ?? item
622
- );
623
- configBuilders.forEach((builder) => {
624
- if (!builder?.constructor) return;
625
- const builderName = builder.constructor.name.toLowerCase();
626
- const builderMap = {
627
- indexbuilder: builders.indexes,
628
- checkbuilder: builders.checks,
629
- primarykeybuilder: builders.primaryKeys,
630
- uniqueconstraintbuilder: builders.uniqueConstraints
631
- };
632
- for (const [type, array] of Object.entries(builderMap)) {
633
- if (builderName.includes(type)) {
634
- array.push(builder);
635
- break;
636
- }
637
- }
638
- builders.extras.push(builder);
639
- });
640
- }
641
- }
642
- }
643
- return {
644
- tableName: nameSymbol ? table2[nameSymbol] : "",
645
- columns: columnsSymbol ? table2[columnsSymbol] : {},
646
- ...builders
647
- };
648
- }
649
- function generateDropTableStatements(tables, options) {
650
- const dropStatements = [];
651
- const validOptions = options ?? { sequence: true, table: true };
652
- if (!validOptions.sequence && !validOptions.table) {
653
- console.warn('No drop operations requested: both "table" and "sequence" options are false');
654
- return [];
655
- }
656
- tables.forEach((tableName) => {
657
- if (validOptions.table) {
658
- dropStatements.push(`DROP TABLE IF EXISTS \`${tableName}\`;`);
659
- }
660
- if (validOptions.sequence) {
661
- dropStatements.push(`DROP SEQUENCE IF EXISTS \`${tableName}\`;`);
662
- }
663
- });
664
- return dropStatements;
665
- }
666
- function mapSelectTableToAlias(table2, uniqPrefix, aliasMap) {
667
- const { columns, tableName } = getTableMetadata(table2);
668
- const selectionsTableFields = {};
669
- Object.keys(columns).forEach((name) => {
670
- const column = columns[name];
671
- const uniqName = `a_${uniqPrefix}_${tableName}_${column.name}`.toLowerCase();
672
- const fieldAlias = drizzleOrm.sql.raw(uniqName);
673
- selectionsTableFields[name] = drizzleOrm.sql`${column} as \`${fieldAlias}\``;
674
- aliasMap[uniqName] = column;
675
- });
676
- return selectionsTableFields;
677
- }
678
- function isDrizzleColumn(column) {
679
- return column && typeof column === "object" && "table" in column;
680
- }
681
- function mapSelectAllFieldsToAlias(selections, name, uniqName, fields, aliasMap) {
682
- if (drizzleOrm.isTable(fields)) {
683
- selections[name] = mapSelectTableToAlias(fields, uniqName, aliasMap);
684
- } else if (isDrizzleColumn(fields)) {
685
- const column = fields;
686
- const uniqAliasName = `a_${uniqName}_${column.name}`.toLowerCase();
687
- let aliasName = drizzleOrm.sql.raw(uniqAliasName);
688
- selections[name] = drizzleOrm.sql`${column} as \`${aliasName}\``;
689
- aliasMap[uniqAliasName] = column;
690
- } else if (sql$1.isSQLWrapper(fields)) {
691
- selections[name] = fields;
692
- } else {
693
- const innerSelections = {};
694
- Object.entries(fields).forEach(([iname, ifields]) => {
695
- mapSelectAllFieldsToAlias(innerSelections, iname, `${uniqName}_${iname}`, ifields, aliasMap);
696
- });
697
- selections[name] = innerSelections;
698
- }
699
- return selections;
700
- }
701
- function mapSelectFieldsWithAlias(fields) {
702
- if (!fields) {
703
- throw new Error("fields is empty");
704
- }
705
- const aliasMap = {};
706
- const selections = {};
707
- Object.entries(fields).forEach(([name, fields2]) => {
708
- mapSelectAllFieldsToAlias(selections, name, name, fields2, aliasMap);
709
- });
710
- return { selections, aliasMap };
711
- }
712
- function getAliasFromDrizzleAlias(value) {
713
- const isSQL = value !== null && typeof value === "object" && sql$1.isSQLWrapper(value) && "queryChunks" in value;
714
- if (isSQL) {
715
- const sql2 = value;
716
- const queryChunks = sql2.queryChunks;
717
- if (queryChunks.length > 3) {
718
- const aliasNameChunk = queryChunks[queryChunks.length - 2];
719
- if (sql$1.isSQLWrapper(aliasNameChunk) && "queryChunks" in aliasNameChunk) {
720
- const aliasNameChunkSql = aliasNameChunk;
721
- if (aliasNameChunkSql.queryChunks?.length === 1 && aliasNameChunkSql.queryChunks[0]) {
722
- const queryChunksStringChunc = aliasNameChunkSql.queryChunks[0];
723
- if ("value" in queryChunksStringChunc) {
724
- const values = queryChunksStringChunc.value;
725
- if (values && values.length === 1) {
726
- return values[0];
727
- }
728
- }
729
- }
730
- }
731
- }
732
- }
733
- return void 0;
734
- }
735
- function transformValue(value, alias, aliasMap) {
736
- const column = aliasMap[alias];
737
- if (!column) return value;
738
- let customColumn = column;
739
- const fromDriver = customColumn?.mapFrom;
740
- if (fromDriver && value !== null && value !== void 0) {
741
- return fromDriver(value);
742
- }
743
- return value;
744
- }
745
- function transformObject(obj, selections, aliasMap) {
746
- const result = {};
747
- for (const [key, value] of Object.entries(obj)) {
748
- const selection = selections[key];
749
- const alias = getAliasFromDrizzleAlias(selection);
750
- if (alias && aliasMap[alias]) {
751
- result[key] = transformValue(value, alias, aliasMap);
752
- } else if (selection && typeof selection === "object" && !sql$1.isSQLWrapper(selection)) {
753
- result[key] = transformObject(
754
- value,
755
- selection,
756
- aliasMap
757
- );
758
- } else {
759
- result[key] = value;
760
- }
761
- }
762
- return result;
763
- }
764
- function applyFromDriverTransform(rows, selections, aliasMap) {
765
- return rows.map((row) => {
766
- const transformed = transformObject(
767
- row,
768
- selections,
769
- aliasMap
770
- );
771
- return processNullBranches(transformed);
772
- });
773
- }
774
- function processNullBranches(obj) {
775
- if (obj === null || typeof obj !== "object") {
776
- return obj;
777
- }
778
- if (obj.constructor && obj.constructor.name !== "Object") {
779
- return obj;
780
- }
781
- const result = {};
782
- let allNull = true;
783
- for (const [key, value] of Object.entries(obj)) {
784
- if (value === null || value === void 0) {
785
- result[key] = null;
786
- continue;
787
- }
788
- if (typeof value === "object") {
789
- const processed = processNullBranches(value);
790
- result[key] = processed;
791
- if (processed !== null) {
792
- allNull = false;
793
- }
794
- } else {
795
- result[key] = value;
796
- allNull = false;
797
- }
798
- }
799
- return allNull ? null : result;
800
- }
801
- function formatLimitOffset(limitOrOffset) {
802
- if (typeof limitOrOffset !== "number" || isNaN(limitOrOffset)) {
803
- throw new Error("limitOrOffset must be a valid number");
804
- }
805
- return drizzleOrm.sql.raw(`${limitOrOffset}`);
806
- }
807
- function nextVal(sequenceName) {
808
- return drizzleOrm.sql.raw(`NEXTVAL(${sequenceName})`);
809
- }
810
- async function printQueriesWithPlan(forgeSQLORM, timeDiffMs, timeout) {
811
- try {
812
- const statementsTable = clusterStatementsSummary;
813
- const timeoutMs2 = timeout ?? 3e3;
814
- const results = await withTimeout(
815
- forgeSQLORM.getDrizzleQueryBuilder().select({
816
- digestText: withTidbHint(statementsTable.digestText),
817
- avgLatency: statementsTable.avgLatency,
818
- avgMem: statementsTable.avgMem,
819
- execCount: statementsTable.execCount,
820
- plan: statementsTable.plan,
821
- stmtType: statementsTable.stmtType
822
- }).from(statementsTable).where(
823
- drizzleOrm.and(
824
- drizzleOrm.isNotNull(statementsTable.digest),
825
- drizzleOrm.not(drizzleOrm.ilike(statementsTable.digestText, "%information_schema%")),
826
- drizzleOrm.notInArray(statementsTable.stmtType, [
827
- "Use",
828
- "Set",
829
- "Show",
830
- "Commit",
831
- "Rollback",
832
- "Begin"
833
- ]),
834
- drizzleOrm.gte(
835
- statementsTable.lastSeen,
836
- drizzleOrm.sql`DATE_SUB
837
- (NOW(), INTERVAL
838
- ${timeDiffMs * 1e3}
839
- MICROSECOND
840
- )`
841
- )
842
- )
843
- ),
844
- `Timeout ${timeoutMs2}ms in printQueriesWithPlan - transient timeouts are usually fine; repeated timeouts mean this diagnostic query is consistently slow and should be investigated`,
845
- timeoutMs2 + 200
846
- );
847
- results.forEach((result) => {
848
- const avgTimeMs = Number(result.avgLatency) / 1e6;
849
- const avgMemMB = Number(result.avgMem) / 1e6;
850
- console.warn(
851
- `SQL: ${result.digestText} | Memory: ${avgMemMB.toFixed(2)} MB | Time: ${avgTimeMs.toFixed(2)} ms | stmtType: ${result.stmtType} | Executions: ${result.execCount}
852
- Plan:${result.plan}`
853
- );
854
- });
855
- } catch (error) {
856
- console.debug(
857
- `Error occurred while retrieving query execution plan: ${error instanceof Error ? error.message : "Unknown error"}. Try again after some time`,
858
- error
859
- );
860
- }
861
- }
862
- const SESSION_ALIAS_NAME_ORM = "orm";
863
- async function slowQueryPerHours(forgeSQLORM, hours, timeout) {
864
- try {
865
- const timeoutMs2 = timeout ?? 1500;
866
- const results = await withTimeout(
867
- forgeSQLORM.getDrizzleQueryBuilder().select({
868
- query: withTidbHint(slowQuery.query),
869
- queryTime: slowQuery.queryTime,
870
- memMax: slowQuery.memMax,
871
- plan: slowQuery.plan
872
- }).from(slowQuery).where(
873
- drizzleOrm.and(
874
- drizzleOrm.isNotNull(slowQuery.digest),
875
- drizzleOrm.ne(slowQuery.sessionAlias, SESSION_ALIAS_NAME_ORM),
876
- drizzleOrm.gte(
877
- slowQuery.time,
878
- drizzleOrm.sql`DATE_SUB
879
- (NOW(), INTERVAL
880
- ${hours}
881
- HOUR
882
- )`
883
- )
884
- )
885
- ),
886
- `Timeout ${timeoutMs2}ms in slowQueryPerHours - transient timeouts are usually fine; repeated timeouts mean this diagnostic query is consistently slow and should be investigated`,
887
- timeoutMs2
888
- );
889
- const response = [];
890
- results.forEach((result) => {
891
- const memMaxMB = result.memMax ? Number(result.memMax) / 1e6 : 0;
892
- const message = `Found SlowQuery SQL: ${result.query} | Memory: ${memMaxMB.toFixed(2)} MB | Time: ${result.queryTime} ms
893
- Plan:${result.plan}`;
894
- response.push(message);
895
- console.warn(message);
896
- });
897
- return response;
898
- } catch (error) {
899
- console.debug(
900
- `Error occurred while retrieving query execution plan: ${error instanceof Error ? error.message : "Unknown error"}. Try again after some time`,
901
- error
902
- );
903
- return [
904
- `Error occurred while retrieving query execution plan: ${error instanceof Error ? error.message : "Unknown error"}`
905
- ];
906
- }
907
- }
908
- async function withTimeout(promise, message, timeoutMs2) {
909
- let timeoutId;
910
- const timeoutPromise = new Promise((_, reject) => {
911
- timeoutId = setTimeout(() => {
912
- reject(new Error(message));
913
- }, timeoutMs2);
914
- });
915
- try {
916
- return await Promise.race([promise, timeoutPromise]);
917
- } finally {
918
- if (timeoutId) {
919
- clearTimeout(timeoutId);
920
- }
921
- }
922
- }
923
- function withTidbHint(column) {
924
- return drizzleOrm.sql`/*+ SET_VAR(tidb_session_alias=${drizzleOrm.sql.raw(`${SESSION_ALIAS_NAME_ORM}`)}) */ ${column}`;
925
- }
926
- const CACHE_CONSTANTS = {
927
- BATCH_SIZE: 25,
928
- MAX_RETRY_ATTEMPTS: 3,
929
- INITIAL_RETRY_DELAY: 1e3,
930
- RETRY_DELAY_MULTIPLIER: 2,
931
- DEFAULT_ENTITY_QUERY_NAME: "sql",
932
- DEFAULT_EXPIRATION_NAME: "expiration",
933
- DEFAULT_DATA_NAME: "data",
934
- HASH_LENGTH: 32
935
- };
936
- function getCurrentTime() {
937
- const dt = luxon.DateTime.now();
938
- return Math.floor(dt.toSeconds());
939
- }
940
- function nowPlusSeconds(secondsToAdd) {
941
- const dt = luxon.DateTime.now().plus({ seconds: secondsToAdd });
942
- return Math.floor(dt.toSeconds());
943
- }
944
- function extractBacktickedValues(sql2) {
945
- const regex = /`([^`]+)`/g;
946
- const matches = /* @__PURE__ */ new Set();
947
- let match;
948
- while ((match = regex.exec(sql2.toLowerCase())) !== null) {
949
- if (!match[1].startsWith("a_")) {
950
- matches.add(`\`${match[1]}\``);
951
- }
952
- }
953
- return Array.from(matches).sort().join(",");
954
- }
955
- function hashKey(query) {
956
- const h = crypto__namespace.createHash("sha256");
957
- h.update(query.sql.toLowerCase());
958
- h.update(JSON.stringify(query.params));
959
- return "CachedQuery_" + h.digest("hex").slice(0, CACHE_CONSTANTS.HASH_LENGTH);
960
- }
961
- async function deleteCacheEntriesInBatches(results, cacheEntityName) {
962
- for (let i = 0; i < results.length; i += CACHE_CONSTANTS.BATCH_SIZE) {
963
- const batch = results.slice(i, i + CACHE_CONSTANTS.BATCH_SIZE);
964
- let transactionBuilder = kvs.kvs.transact();
965
- batch.forEach((result) => {
966
- transactionBuilder = transactionBuilder.delete(result.key, { entityName: cacheEntityName });
967
- });
968
- await transactionBuilder.execute();
969
- }
970
- }
971
- async function clearCursorCache(tables, cursor, options) {
972
- const cacheEntityName = options.cacheEntityName;
973
- if (!cacheEntityName) {
974
- throw new Error("cacheEntityName is not configured");
975
- }
976
- const entityQueryName = options.cacheEntityQueryName ?? CACHE_CONSTANTS.DEFAULT_ENTITY_QUERY_NAME;
977
- let filters = new kvs.Filter();
978
- for (const table2 of tables) {
979
- const wrapIfNeeded = options.cacheWrapTable ? `\`${table2}\`` : table2;
980
- filters.or(entityQueryName, kvs.FilterConditions.contains(wrapIfNeeded?.toLowerCase()));
981
- }
982
- let entityQueryBuilder = kvs.kvs.entity(cacheEntityName).query().index(entityQueryName).filters(filters);
983
- if (cursor) {
984
- entityQueryBuilder = entityQueryBuilder.cursor(cursor);
985
- }
986
- const listResult = await entityQueryBuilder.limit(100).getMany();
987
- if (options.logCache) {
988
- console.warn(`clear cache Records: ${JSON.stringify(listResult.results.map((r) => r.key))}`);
989
- }
990
- await deleteCacheEntriesInBatches(listResult.results, cacheEntityName);
991
- if (listResult.nextCursor) {
992
- return listResult.results.length + await clearCursorCache(tables, listResult.nextCursor, options);
993
- } else {
994
- return listResult.results.length;
995
- }
996
- }
997
- async function clearExpirationCursorCache(cursor, options) {
998
- const cacheEntityName = options.cacheEntityName;
999
- if (!cacheEntityName) {
1000
- throw new Error("cacheEntityName is not configured");
1001
- }
1002
- const entityExpirationName = options.cacheEntityExpirationName ?? CACHE_CONSTANTS.DEFAULT_EXPIRATION_NAME;
1003
- let entityQueryBuilder = kvs.kvs.entity(cacheEntityName).query().index(entityExpirationName).where(kvs.WhereConditions.lessThan(Math.floor(luxon.DateTime.now().toSeconds())));
1004
- if (cursor) {
1005
- entityQueryBuilder = entityQueryBuilder.cursor(cursor);
1006
- }
1007
- const listResult = await entityQueryBuilder.limit(100).getMany();
1008
- if (options.logCache) {
1009
- console.warn(`clear expired Records: ${JSON.stringify(listResult.results.map((r) => r.key))}`);
1010
- }
1011
- await deleteCacheEntriesInBatches(listResult.results, cacheEntityName);
1012
- if (listResult.nextCursor) {
1013
- return listResult.results.length + await clearExpirationCursorCache(listResult.nextCursor, options);
1014
- } else {
1015
- return listResult.results.length;
1016
- }
1017
- }
1018
- async function executeWithRetry(operation, operationName) {
1019
- let attempt = 0;
1020
- let delay = CACHE_CONSTANTS.INITIAL_RETRY_DELAY;
1021
- while (attempt < CACHE_CONSTANTS.MAX_RETRY_ATTEMPTS) {
1022
- try {
1023
- return await operation();
1024
- } catch (err) {
1025
- console.warn(`Error during ${operationName}: ${err.message}, retry ${attempt}`, err);
1026
- attempt++;
1027
- if (attempt >= CACHE_CONSTANTS.MAX_RETRY_ATTEMPTS) {
1028
- console.error(`Error during ${operationName}: ${err.message}`, err);
1029
- throw err;
1030
- }
1031
- await new Promise((resolve) => setTimeout(resolve, delay));
1032
- delay *= CACHE_CONSTANTS.RETRY_DELAY_MULTIPLIER;
1033
- }
1034
- }
1035
- throw new Error(`Maximum retry attempts exceeded for ${operationName}`);
1036
- }
1037
- async function clearCache(schema, options) {
1038
- const tableName = table.getTableName(schema);
1039
- if (cacheApplicationContext.getStore()) {
1040
- cacheApplicationContext.getStore()?.tables.add(tableName);
1041
- } else {
1042
- await clearTablesCache([tableName], options);
1043
- }
1044
- }
1045
- async function clearTablesCache(tables, options) {
1046
- if (!options.cacheEntityName) {
1047
- throw new Error("cacheEntityName is not configured");
1048
- }
1049
- const startTime = luxon.DateTime.now();
1050
- let totalRecords = 0;
1051
- try {
1052
- totalRecords = await executeWithRetry(
1053
- () => clearCursorCache(tables, "", options),
1054
- "clearing cache"
1055
- );
1056
- } finally {
1057
- if (options.logCache) {
1058
- const duration = luxon.DateTime.now().toSeconds() - startTime.toSeconds();
1059
- console.info(`Cleared ${totalRecords} cache records in ${duration} seconds`);
1060
- }
1061
- }
1062
- }
1063
- async function clearExpiredCache(options) {
1064
- if (!options.cacheEntityName) {
1065
- throw new Error("cacheEntityName is not configured");
1066
- }
1067
- const startTime = luxon.DateTime.now();
1068
- let totalRecords = 0;
1069
- try {
1070
- totalRecords = await executeWithRetry(
1071
- () => clearExpirationCursorCache("", options),
1072
- "clearing expired cache"
1073
- );
1074
- } finally {
1075
- const duration = luxon.DateTime.now().toSeconds() - startTime.toSeconds();
1076
- if (options?.logCache) {
1077
- console.debug(`Cleared ${totalRecords} expired cache records in ${duration} seconds`);
1078
- }
1079
- }
1080
- }
1081
- async function getFromCache(query, options) {
1082
- if (!options.cacheEntityName) {
1083
- throw new Error("cacheEntityName is not configured");
1084
- }
1085
- const entityQueryName = options.cacheEntityQueryName ?? CACHE_CONSTANTS.DEFAULT_ENTITY_QUERY_NAME;
1086
- const expirationName = options.cacheEntityExpirationName ?? CACHE_CONSTANTS.DEFAULT_EXPIRATION_NAME;
1087
- const dataName = options.cacheEntityDataName ?? CACHE_CONSTANTS.DEFAULT_DATA_NAME;
1088
- const sqlQuery = query.toSQL();
1089
- const key = hashKey(sqlQuery);
1090
- if (await isTableContainsTableInCacheContext(sqlQuery.sql, options)) {
1091
- if (options.logCache) {
1092
- console.warn(`Context contains value to clear. Skip getting from cache`);
1093
- }
1094
- return void 0;
1095
- }
1096
- try {
1097
- const cacheResult = await kvs.kvs.entity(options.cacheEntityName).get(key);
1098
- if (cacheResult && cacheResult[expirationName] >= getCurrentTime() && extractBacktickedValues(sqlQuery.sql) === cacheResult[entityQueryName]) {
1099
- if (options.logCache) {
1100
- console.warn(`Get value from cache, cacheKey: ${key}`);
1101
- }
1102
- const results = cacheResult[dataName];
1103
- return JSON.parse(results);
1104
- }
1105
- } catch (error) {
1106
- console.error(`Error getting from cache: ${error.message}`, error);
1107
- }
1108
- return void 0;
1109
- }
1110
- async function setCacheResult(query, options, results, cacheTtl) {
1111
- if (!options.cacheEntityName) {
1112
- throw new Error("cacheEntityName is not configured");
1113
- }
1114
- try {
1115
- const entityQueryName = options.cacheEntityQueryName ?? CACHE_CONSTANTS.DEFAULT_ENTITY_QUERY_NAME;
1116
- const expirationName = options.cacheEntityExpirationName ?? CACHE_CONSTANTS.DEFAULT_EXPIRATION_NAME;
1117
- const dataName = options.cacheEntityDataName ?? CACHE_CONSTANTS.DEFAULT_DATA_NAME;
1118
- const sqlQuery = query.toSQL();
1119
- if (await isTableContainsTableInCacheContext(sqlQuery.sql, options)) {
1120
- if (options.logCache) {
1121
- console.warn(`Context contains value to clear. Skip setting from cache`);
1122
- }
1123
- return;
1124
- }
1125
- const key = hashKey(sqlQuery);
1126
- await kvs.kvs.transact().set(
1127
- key,
1128
- {
1129
- [entityQueryName]: extractBacktickedValues(sqlQuery.sql),
1130
- [expirationName]: nowPlusSeconds(cacheTtl),
1131
- [dataName]: JSON.stringify(results)
1132
- },
1133
- { entityName: options.cacheEntityName }
1134
- ).execute();
1135
- if (options.logCache) {
1136
- console.warn(`Store value to cache, cacheKey: ${key}`);
1137
- }
1138
- } catch (error) {
1139
- console.error(`Error setting cache: ${error.message}`, error);
1140
- }
1141
- }
1142
- function isQuery(obj) {
1143
- return typeof obj === "object" && obj !== null && typeof obj.sql === "string" && Array.isArray(obj.params);
1144
- }
1145
- const cacheApplicationContext = new node_async_hooks.AsyncLocalStorage();
1146
- const localCacheApplicationContext = new node_async_hooks.AsyncLocalStorage();
1147
- async function saveTableIfInsideCacheContext(table$1) {
1148
- const context = cacheApplicationContext.getStore();
1149
- if (context) {
1150
- const tableName = table.getTableName(table$1).toLowerCase();
1151
- context.tables.add(tableName);
1152
- }
1153
- }
1154
- async function saveQueryLocalCacheQuery(query, rows, options) {
1155
- const context = localCacheApplicationContext.getStore();
1156
- if (context) {
1157
- if (!context.cache) {
1158
- context.cache = {};
1159
- }
1160
- let sql2;
1161
- if (isQuery(query)) {
1162
- sql2 = { toSQL: () => query };
1163
- } else {
1164
- sql2 = query;
1165
- }
1166
- const key = hashKey(sql2.toSQL());
1167
- context.cache[key] = {
1168
- sql: sql2.toSQL().sql.toLowerCase(),
1169
- data: rows
1170
- };
1171
- if (options.logCache) {
1172
- const q = sql2.toSQL();
1173
- console.debug(
1174
- `[forge-sql-orm][local-cache][SAVE] Stored result in cache. sql="${q.sql}", params=${JSON.stringify(q.params)}`
1175
- );
1176
- }
1177
- }
1178
- }
1179
- async function getQueryLocalCacheQuery(query, options) {
1180
- const context = localCacheApplicationContext.getStore();
1181
- if (context) {
1182
- if (!context.cache) {
1183
- context.cache = {};
1184
- }
1185
- let sql2;
1186
- if (isQuery(query)) {
1187
- sql2 = { toSQL: () => query };
1188
- } else {
1189
- sql2 = query;
1190
- }
1191
- const key = hashKey(sql2.toSQL());
1192
- if (context.cache[key] && context.cache[key].sql === sql2.toSQL().sql.toLowerCase()) {
1193
- if (options.logCache) {
1194
- const q = sql2.toSQL();
1195
- console.debug(
1196
- `[forge-sql-orm][local-cache][HIT] Returned cached result. sql="${q.sql}", params=${JSON.stringify(q.params)}`
1197
- );
1198
- }
1199
- return context.cache[key].data;
1200
- }
1201
- }
1202
- return void 0;
1203
- }
1204
- async function evictLocalCacheQuery(table$1, options) {
1205
- const context = localCacheApplicationContext.getStore();
1206
- if (context) {
1207
- if (!context.cache) {
1208
- context.cache = {};
1209
- }
1210
- const tableName = table.getTableName(table$1);
1211
- const searchString = options.cacheWrapTable ? `\`${tableName}\`` : tableName;
1212
- const keyToEvicts = [];
1213
- Object.keys(context.cache).forEach((key) => {
1214
- if (context.cache[key].sql.includes(searchString)) {
1215
- keyToEvicts.push(key);
1216
- }
1217
- });
1218
- keyToEvicts.forEach((key) => delete context.cache[key]);
1219
- }
1220
- }
1221
- async function isTableContainsTableInCacheContext(sql2, options) {
1222
- const context = cacheApplicationContext.getStore();
1223
- if (!context) {
1224
- return false;
1225
- }
1226
- const tables = Array.from(context.tables);
1227
- const lowerSql = sql2.toLowerCase();
1228
- return tables.some((table2) => {
1229
- const tablePattern = options.cacheWrapTable ? `\`${table2}\`` : table2;
1230
- return lowerSql.includes(tablePattern);
1231
- });
1232
- }
1233
- class ForgeSQLCrudOperations {
1234
- forgeOperations;
1235
- options;
1236
- /**
1237
- * Creates a new instance of ForgeSQLCrudOperations.
1238
- * @param forgeSqlOperations - The ForgeSQL operations instance
1239
- * @param options - Configuration options for the ORM
1240
- */
1241
- constructor(forgeSqlOperations, options) {
1242
- this.forgeOperations = forgeSqlOperations;
1243
- this.options = options;
1244
- }
1245
- /**
1246
- * Inserts records into the database with optional versioning support.
1247
- * If a version field exists in the schema, versioning is applied.
1248
- *
1249
- * This method automatically handles:
1250
- * - Version field initialization for optimistic locking
1251
- * - Batch insertion for multiple records
1252
- * - Duplicate key handling with optional updates
1253
- *
1254
- * @template T - The type of the table schema
1255
- * @param schema - The entity schema
1256
- * @param models - Array of entities to insert
1257
- * @param updateIfExists - Whether to update existing records (default: false)
1258
- * @returns Promise that resolves to the number of inserted rows
1259
- * @throws Error if the insert operation fails
1260
- */
1261
- async insert(schema, models, updateIfExists = false) {
1262
- if (!models?.length) return 0;
1263
- const { tableName, columns } = getTableMetadata(schema);
1264
- const versionMetadata = this.validateVersionField(tableName, columns);
1265
- const preparedModels = models.map(
1266
- (model) => this.prepareModelWithVersion(model, versionMetadata, columns)
1267
- );
1268
- const queryBuilder = this.forgeOperations.insert(schema).values(preparedModels);
1269
- const finalQuery = updateIfExists ? queryBuilder.onDuplicateKeyUpdate({
1270
- set: Object.fromEntries(
1271
- Object.keys(preparedModels[0]).map((key) => [key, schema[key]])
1272
- )
1273
- }) : queryBuilder;
1274
- const result = await finalQuery;
1275
- await saveTableIfInsideCacheContext(schema);
1276
- return result[0].insertId;
1277
- }
1278
- /**
1279
- * Deletes a record by its primary key with optional version check.
1280
- * If versioning is enabled, ensures the record hasn't been modified since last read.
1281
- *
1282
- * This method automatically handles:
1283
- * - Single primary key validation
1284
- * - Optimistic locking checks if versioning is enabled
1285
- * - Version field validation before deletion
1286
- *
1287
- * @template T - The type of the table schema
1288
- * @param id - The ID of the record to delete
1289
- * @param schema - The entity schema
1290
- * @returns Promise that resolves to the number of affected rows
1291
- * @throws Error if the delete operation fails
1292
- * @throws Error if multiple primary keys are found
1293
- * @throws Error if optimistic locking check fails
1294
- */
1295
- async deleteById(id, schema) {
1296
- const { tableName, columns } = getTableMetadata(schema);
1297
- const primaryKeys = this.getPrimaryKeys(schema);
1298
- if (primaryKeys.length !== 1) {
1299
- throw new Error("Only single primary key is supported");
1300
- }
1301
- const [primaryKeyName, primaryKeyColumn] = primaryKeys[0];
1302
- const versionMetadata = this.validateVersionField(tableName, columns);
1303
- const conditions = [drizzleOrm.eq(primaryKeyColumn, id)];
1304
- if (versionMetadata && columns) {
1305
- const versionField = columns[versionMetadata.fieldName];
1306
- if (versionField) {
1307
- const oldModel = await this.getOldModel({ [primaryKeyName]: id }, schema, [
1308
- versionMetadata.fieldName,
1309
- versionField
1310
- ]);
1311
- conditions.push(drizzleOrm.eq(versionField, oldModel[versionMetadata.fieldName]));
1312
- }
1313
- }
1314
- const queryBuilder = this.forgeOperations.delete(schema).where(drizzleOrm.and(...conditions));
1315
- const result = await queryBuilder;
1316
- if (versionMetadata && result[0].affectedRows === 0) {
1317
- throw new Error(`Optimistic locking failed: record with primary key ${id} has been modified`);
1318
- }
1319
- await saveTableIfInsideCacheContext(schema);
1320
- return result[0].affectedRows;
1321
- }
1322
- /**
1323
- * Updates a record by its primary key with optimistic locking support.
1324
- * If versioning is enabled:
1325
- * - Retrieves the current version
1326
- * - Checks for concurrent modifications
1327
- * - Increments the version on successful update
1328
- *
1329
- * This method automatically handles:
1330
- * - Primary key validation
1331
- * - Version field retrieval and validation
1332
- * - Optimistic locking conflict detection
1333
- * - Version field incrementation
1334
- *
1335
- * @template T - The type of the table schema
1336
- * @param entity - The entity with updated values (must include primary key)
1337
- * @param schema - The entity schema
1338
- * @returns Promise that resolves to the number of affected rows
1339
- * @throws Error if the primary key is not provided
1340
- * @throws Error if optimistic locking check fails
1341
- * @throws Error if multiple primary keys are found
1342
- */
1343
- async updateById(entity, schema) {
1344
- const { tableName, columns } = getTableMetadata(schema);
1345
- const primaryKeys = this.getPrimaryKeys(schema);
1346
- if (primaryKeys.length !== 1) {
1347
- throw new Error("Only single primary key is supported");
1348
- }
1349
- const [primaryKeyName, primaryKeyColumn] = primaryKeys[0];
1350
- const versionMetadata = this.validateVersionField(tableName, columns);
1351
- if (!(primaryKeyName in entity)) {
1352
- throw new Error(`Primary key ${primaryKeyName} must be provided in the entity`);
1353
- }
1354
- const currentVersion = await this.getCurrentVersion(
1355
- entity,
1356
- primaryKeyName,
1357
- versionMetadata,
1358
- columns,
1359
- schema
1360
- );
1361
- const updateData = this.prepareUpdateData(entity, versionMetadata, columns, currentVersion);
1362
- const conditions = [
1363
- drizzleOrm.eq(primaryKeyColumn, entity[primaryKeyName])
1364
- ];
1365
- if (versionMetadata && columns) {
1366
- const versionField = columns[versionMetadata.fieldName];
1367
- if (versionField) {
1368
- conditions.push(drizzleOrm.eq(versionField, currentVersion));
1369
- }
1370
- }
1371
- const queryBuilder = this.forgeOperations.update(schema).set(updateData).where(drizzleOrm.and(...conditions));
1372
- const result = await queryBuilder;
1373
- if (versionMetadata && result[0].affectedRows === 0) {
1374
- throw new Error(
1375
- `Optimistic locking failed: record with primary key ${entity[primaryKeyName]} has been modified`
1376
- );
1377
- }
1378
- await saveTableIfInsideCacheContext(schema);
1379
- return result[0].affectedRows;
1380
- }
1381
- /**
1382
- * Updates specified fields of records based on provided conditions.
1383
- * This method does not support versioning and should be used with caution.
1384
- *
1385
- * @template T - The type of the table schema
1386
- * @param {Partial<InferInsertModel<T>>} updateData - The data to update
1387
- * @param {T} schema - The entity schema
1388
- * @param {SQL<unknown>} where - The WHERE conditions
1389
- * @returns {Promise<number>} Number of affected rows
1390
- * @throws {Error} If WHERE conditions are not provided
1391
- * @throws {Error} If the update operation fails
1392
- */
1393
- async updateFields(updateData, schema, where) {
1394
- if (!where) {
1395
- throw new Error("WHERE conditions must be provided");
1396
- }
1397
- const queryBuilder = this.forgeOperations.update(schema).set(updateData).where(where);
1398
- const result = await queryBuilder;
1399
- await saveTableIfInsideCacheContext(schema);
1400
- return result[0].affectedRows;
1401
- }
1402
- // Helper methods
1403
- /**
1404
- * Gets primary keys from the schema.
1405
- * @template T - The type of the table schema
1406
- * @param {T} schema - The table schema
1407
- * @returns {[string, AnyColumn][]} Array of primary key name and column pairs
1408
- * @throws {Error} If no primary keys are found
1409
- */
1410
- getPrimaryKeys(schema) {
1411
- const primaryKeys = getPrimaryKeys(schema);
1412
- if (!primaryKeys) {
1413
- throw new Error(`No primary keys found for schema: ${schema}`);
1414
- }
1415
- return primaryKeys;
1416
- }
1417
- /**
1418
- * Validates and retrieves version field metadata.
1419
- * @param {string} tableName - The name of the table
1420
- * @param {Record<string, AnyColumn>} columns - The table columns
1421
- * @returns {Object | undefined} Version field metadata if valid, undefined otherwise
1422
- */
1423
- validateVersionField(tableName, columns) {
1424
- if (this.options.disableOptimisticLocking) {
1425
- return void 0;
1426
- }
1427
- const versionMetadata = this.options.additionalMetadata?.[tableName]?.versionField;
1428
- if (!versionMetadata) return void 0;
1429
- let fieldName = versionMetadata.fieldName;
1430
- let versionField = columns[versionMetadata.fieldName];
1431
- if (!versionField) {
1432
- const find = Object.entries(columns).find(([, c]) => c.name === versionMetadata.fieldName);
1433
- if (find) {
1434
- fieldName = find[0];
1435
- versionField = find[1];
1436
- }
1437
- }
1438
- if (!versionField) {
1439
- console.warn(
1440
- `Version field "${versionMetadata.fieldName}" not found in table ${tableName}. Versioning will be skipped.`
1441
- );
1442
- return void 0;
1443
- }
1444
- if (!versionField.notNull) {
1445
- console.warn(
1446
- `Version field "${versionMetadata.fieldName}" in table ${tableName} is nullable. Versioning may not work correctly.`
1447
- );
1448
- return void 0;
1449
- }
1450
- const fieldType = versionField.getSQLType();
1451
- const isSupportedType = fieldType === "datetime" || fieldType === "timestamp" || fieldType === "int" || fieldType === "number" || fieldType === "decimal";
1452
- if (!isSupportedType) {
1453
- console.warn(
1454
- `Version field "${versionMetadata.fieldName}" in table ${tableName} has unsupported type "${fieldType}". Only datetime, timestamp, int, and decimal types are supported for versioning. Versioning will be skipped.`
1455
- );
1456
- return void 0;
1457
- }
1458
- return { fieldName, type: fieldType };
1459
- }
1460
- /**
1461
- * Gets the current version of an entity.
1462
- * @template T - The type of the table schema
1463
- * @param {Partial<InferInsertModel<T>>} entity - The entity
1464
- * @param {string} primaryKeyName - The name of the primary key
1465
- * @param {Object | undefined} versionMetadata - Version field metadata
1466
- * @param {Record<string, AnyColumn>} columns - The table columns
1467
- * @param {T} schema - The table schema
1468
- * @returns {Promise<unknown>} The current version value
1469
- */
1470
- async getCurrentVersion(entity, primaryKeyName, versionMetadata, columns, schema) {
1471
- if (!versionMetadata || !columns) return void 0;
1472
- const versionField = columns[versionMetadata.fieldName];
1473
- if (!versionField) return void 0;
1474
- if (versionMetadata.fieldName in entity) {
1475
- return entity[versionMetadata.fieldName];
1476
- }
1477
- const oldModel = await this.getOldModel(
1478
- { [primaryKeyName]: entity[primaryKeyName] },
1479
- schema,
1480
- [versionMetadata.fieldName, versionField]
1481
- );
1482
- return oldModel[versionMetadata.fieldName];
1483
- }
1484
- /**
1485
- * Prepares a model for insertion with version field.
1486
- * @template T - The type of the table schema
1487
- * @param {Partial<InferInsertModel<T>>} model - The model to prepare
1488
- * @param {Object | undefined} versionMetadata - Version field metadata
1489
- * @param {Record<string, AnyColumn>} columns - The table columns
1490
- * @returns {InferInsertModel<T>} The prepared model
1491
- */
1492
- prepareModelWithVersion(model, versionMetadata, columns) {
1493
- if (!versionMetadata || !columns) return model;
1494
- let fieldName = versionMetadata.fieldName;
1495
- let versionField = columns[versionMetadata.fieldName];
1496
- if (!versionField) {
1497
- const find = Object.entries(columns).find(([, c]) => c.name === versionMetadata.fieldName);
1498
- if (find) {
1499
- fieldName = find[0];
1500
- versionField = find[1];
1501
- }
1502
- }
1503
- if (!versionField) return model;
1504
- const modelWithVersion = { ...model };
1505
- const fieldType = versionField.getSQLType();
1506
- const versionValue = fieldType === "datetime" || fieldType === "timestamp" ? /* @__PURE__ */ new Date() : 1;
1507
- modelWithVersion[fieldName] = versionValue;
1508
- return modelWithVersion;
1509
- }
1510
- /**
1511
- * Prepares update data with version field.
1512
- * @template T - The type of the table schema
1513
- * @param {Partial<InferInsertModel<T>>} entity - The entity to update
1514
- * @param {Object | undefined} versionMetadata - Version field metadata
1515
- * @param {Record<string, AnyColumn>} columns - The table columns
1516
- * @param {unknown} currentVersion - The current version value
1517
- * @returns {Partial<InferInsertModel<T>>} The prepared update data
1518
- */
1519
- prepareUpdateData(entity, versionMetadata, columns, currentVersion) {
1520
- const updateData = { ...entity };
1521
- if (versionMetadata && columns) {
1522
- const versionField = columns[versionMetadata.fieldName];
1523
- if (versionField) {
1524
- const fieldType = versionField.getSQLType();
1525
- updateData[versionMetadata.fieldName] = fieldType === "datetime" || fieldType === "timestamp" ? /* @__PURE__ */ new Date() : currentVersion + 1;
1526
- }
1527
- }
1528
- return updateData;
1529
- }
1530
- /**
1531
- * Retrieves an existing model by primary key.
1532
- * @template T - The type of the table schema
1533
- * @param {Record<string, unknown>} primaryKeyValues - The primary key values
1534
- * @param {T} schema - The table schema
1535
- * @param {[string, AnyColumn]} versionField - The version field name and column
1536
- * @returns {Promise<Awaited<T> extends Array<any> ? Awaited<T>[number] | undefined : Awaited<T> | undefined>} The existing model
1537
- * @throws {Error} If the record is not found
1538
- */
1539
- async getOldModel(primaryKeyValues, schema, versionField) {
1540
- const [versionFieldName, versionFieldColumn] = versionField;
1541
- const primaryKeys = this.getPrimaryKeys(schema);
1542
- const [primaryKeyName, primaryKeyColumn] = primaryKeys[0];
1543
- const resultQuery = this.forgeOperations.select({
1544
- [primaryKeyName]: primaryKeyColumn,
1545
- [versionFieldName]: versionFieldColumn
1546
- }).from(schema).where(drizzleOrm.eq(primaryKeyColumn, primaryKeyValues[primaryKeyName]));
1547
- const model = await this.forgeOperations.fetch().executeQueryOnlyOne(resultQuery);
1548
- if (!model) {
1549
- throw new Error(`Record not found in table ${schema}`);
1550
- }
1551
- return model;
1552
- }
1553
- }
1554
- class ForgeSQLSelectOperations {
1555
- options;
1556
- /**
1557
- * Creates a new instance of ForgeSQLSelectOperations.
1558
- * @param {ForgeSqlOrmOptions} options - Configuration options for the ORM
1559
- */
1560
- constructor(options) {
1561
- this.options = options;
1562
- }
1563
- /**
1564
- * Executes a Drizzle query and returns a single result.
1565
- * Throws an error if more than one record is returned.
1566
- *
1567
- * @template T - The type of the query builder
1568
- * @param {T} query - The Drizzle query to execute
1569
- * @returns {Promise<Awaited<T> extends Array<any> ? Awaited<T>[number] | undefined : Awaited<T> | undefined>} A single result object or undefined
1570
- * @throws {Error} If more than one record is returned
1571
- */
1572
- async executeQueryOnlyOne(query) {
1573
- const results = await query;
1574
- const datas = results;
1575
- if (!datas.length) {
1576
- return void 0;
1577
- }
1578
- if (datas.length > 1) {
1579
- throw new Error(`Expected 1 record but returned ${datas.length}`);
1580
- }
1581
- return datas[0];
1582
- }
1583
- /**
1584
- * Executes a raw SQL query and returns the results.
1585
- * Logs the query if logging is enabled.
1586
- *
1587
- * @template T - The type of the result objects
1588
- * @param {string} query - The raw SQL query to execute
1589
- * @param {SqlParameters[]} [params] - Optional SQL parameters
1590
- * @returns {Promise<T[]>} A list of results as objects
1591
- */
1592
- async executeRawSQL(query, params) {
1593
- if (this.options.logRawSqlQuery) {
1594
- const paramsStr = params ? `, with params: ${JSON.stringify(params)}` : "";
1595
- console.debug(`Executing with SQL ${query}${paramsStr}`);
1596
- }
1597
- const sqlStatement = sql.sql.prepare(query);
1598
- if (params) {
1599
- sqlStatement.bindParams(...params);
1600
- }
1601
- const result = await sqlStatement.execute();
1602
- return result.rows;
1603
- }
1604
- /**
1605
- * Executes a raw SQL update query.
1606
- * @param {string} query - The raw SQL update query
1607
- * @param {SqlParameters[]} [params] - Optional SQL parameters
1608
- * @returns {Promise<UpdateQueryResponse>} The update response containing affected rows
1609
- */
1610
- async executeRawUpdateSQL(query, params) {
1611
- const sqlStatement = sql.sql.prepare(query);
1612
- if (params) {
1613
- sqlStatement.bindParams(...params);
1614
- }
1615
- if (this.options.logRawSqlQuery) {
1616
- console.debug(
1617
- `Executing Update with SQL ${query}` + (params ? `, with params: ${JSON.stringify(params)}` : "")
1618
- );
1619
- }
1620
- const updateQueryResponseResults = await sqlStatement.execute();
1621
- return updateQueryResponseResults.rows;
1622
- }
1623
- }
1624
- const metadataQueryContext = new node_async_hooks.AsyncLocalStorage();
1625
- async function saveMetaDataToContext(metadata) {
1626
- const context = metadataQueryContext.getStore();
1627
- if (context) {
1628
- context.printQueriesWithPlan = async () => {
1629
- if (process.env.NODE_ENV !== "test") {
1630
- await new Promise((r) => setTimeout(r, 200));
1631
- }
1632
- await printQueriesWithPlan(context.forgeSQLORM, Date.now() - context.beginTime.getTime());
1633
- };
1634
- if (metadata) {
1635
- context.totalResponseSize += metadata.responseSize;
1636
- context.totalDbExecutionTime += metadata.dbExecutionTime;
1637
- }
1638
- }
1639
- }
1640
- async function getLastestMetadata() {
1641
- return metadataQueryContext.getStore();
1642
- }
1643
- const operationTypeQueryContext = new node_async_hooks.AsyncLocalStorage();
1644
- async function getOperationType() {
1645
- return operationTypeQueryContext.getStore()?.operationType ?? "DML";
1646
- }
1647
- const timeoutMs = 1e4;
1648
- const timeoutMessage = `Atlassian @forge/sql did not return a response within ${timeoutMs}ms (${timeoutMs / 1e3} seconds), so the request is blocked. Possible causes: slow query, network issues, or exceeding Forge SQL limits.`;
1649
- function isUpdateQueryResponse(obj) {
1650
- return obj !== null && typeof obj === "object" && typeof obj.affectedRows === "number" && typeof obj.insertId === "number";
1651
- }
1652
- function inlineParams(sql2, params) {
1653
- let i = 0;
1654
- return sql2.replace(/\?/g, () => {
1655
- const val = params[i++];
1656
- if (val === null) return "NULL";
1657
- if (typeof val === "number") return val.toString();
1658
- return `'${String(val).replace(/'/g, "''")}'`;
1659
- });
1660
- }
1661
- async function processDDLResult(method, result) {
1662
- if (result.metadata) {
1663
- await saveMetaDataToContext(result.metadata);
1664
- }
1665
- if (!result?.rows) {
1666
- return { rows: [] };
1667
- }
1668
- if (isUpdateQueryResponse(result.rows)) {
1669
- const oneRow = result.rows;
1670
- return { ...oneRow, rows: [oneRow] };
1671
- }
1672
- if (Array.isArray(result.rows)) {
1673
- if (method === "execute") {
1674
- return { rows: [result.rows] };
1675
- } else {
1676
- const rows = result.rows.map((r) => Object.values(r));
1677
- return { rows };
1678
- }
1679
- }
1680
- return { rows: [] };
1681
- }
1682
- async function processExecuteMethod(query, params) {
1683
- const sqlStatement = sql.sql.prepare(query);
1684
- if (params) {
1685
- sqlStatement.bindParams(...params);
1686
- }
1687
- const result = await withTimeout(sqlStatement.execute(), timeoutMessage, timeoutMs);
1688
- await saveMetaDataToContext(result.metadata);
1689
- if (!result.rows) {
1690
- return { rows: [[]] };
1691
- }
1692
- return { rows: [result.rows] };
1693
- }
1694
- async function processAllMethod(query, params) {
1695
- const sqlStatement = await sql.sql.prepare(query);
1696
- if (params) {
1697
- await sqlStatement.bindParams(...params);
1698
- }
1699
- const result = await withTimeout(
1700
- sqlStatement.execute(),
1701
- timeoutMessage,
1702
- timeoutMs
1703
- );
1704
- await saveMetaDataToContext(result.metadata);
1705
- if (!result.rows) {
1706
- return { rows: [] };
1707
- }
1708
- const rows = result.rows.map((r) => Object.values(r));
1709
- return { rows };
1710
- }
1711
- const forgeDriver = async (query, params, method) => {
1712
- const operationType = await getOperationType();
1713
- if (operationType === "DDL") {
1714
- const result = await withTimeout(
1715
- sql.sql.executeDDL(inlineParams(query, params)),
1716
- timeoutMessage,
1717
- timeoutMs
1718
- );
1719
- return await processDDLResult(method, result);
1720
- }
1721
- if (method === "execute") {
1722
- return await processExecuteMethod(query, params ?? []);
1723
- }
1724
- return await processAllMethod(query, params ?? []);
1725
- };
1726
- function injectSqlHints(query, hints) {
1727
- if (!hints) {
1728
- return query;
1729
- }
1730
- const normalizedQuery = query.trim().toUpperCase();
1731
- let queryHints;
1732
- if (normalizedQuery.startsWith("SELECT")) {
1733
- queryHints = hints.select;
1734
- } else if (normalizedQuery.startsWith("INSERT")) {
1735
- queryHints = hints.insert;
1736
- } else if (normalizedQuery.startsWith("UPDATE")) {
1737
- queryHints = hints.update;
1738
- } else if (normalizedQuery.startsWith("DELETE")) {
1739
- queryHints = hints.delete;
1740
- }
1741
- if (!queryHints || queryHints.length === 0) {
1742
- return query;
1743
- }
1744
- const hintsString = queryHints.join(" ");
1745
- if (normalizedQuery.startsWith("SELECT")) {
1746
- return `SELECT /*+ ${hintsString} */ ${query.substring(6)}`;
1747
- } else if (normalizedQuery.startsWith("INSERT")) {
1748
- return `INSERT /*+ ${hintsString} */ ${query.substring(6)}`;
1749
- } else if (normalizedQuery.startsWith("UPDATE")) {
1750
- return `UPDATE /*+ ${hintsString} */ ${query.substring(6)}`;
1751
- } else if (normalizedQuery.startsWith("DELETE")) {
1752
- return `DELETE /*+ ${hintsString} */ ${query.substring(6)}`;
1753
- }
1754
- return query;
1755
- }
1756
- const QUERY_ERROR_CODES = {
1757
- TIMEOUT: "SQL_QUERY_TIMEOUT",
1758
- OUT_OF_MEMORY_ERRNO: 8175
1759
- };
1760
- const STATEMENTS_SUMMARY_DELAY_MS = 200;
1761
- function createForgeDriverProxy(forgeSqlOperation, options, logRawSqlQuery) {
1762
- return async (query, params, method) => {
1763
- const modifiedQuery = injectSqlHints(query, options);
1764
- if (options && logRawSqlQuery && modifiedQuery !== query) {
1765
- console.debug(`SQL Hints injected: ${modifiedQuery}`);
1766
- }
1767
- const queryStartTime = Date.now();
1768
- try {
1769
- return await forgeDriver(modifiedQuery, params, method);
1770
- } catch (error) {
1771
- const isTimeoutError = error.code === QUERY_ERROR_CODES.TIMEOUT;
1772
- const isOutOfMemoryError = error?.context?.debug?.errno === QUERY_ERROR_CODES.OUT_OF_MEMORY_ERRNO;
1773
- if (isTimeoutError || isOutOfMemoryError) {
1774
- if (isTimeoutError) {
1775
- console.error(` TIMEOUT detected - Query exceeded time limit`);
1776
- } else {
1777
- console.error(`OUT OF MEMORY detected - Query exceeded memory limit`);
1778
- }
1779
- await new Promise((resolve) => setTimeout(resolve, STATEMENTS_SUMMARY_DELAY_MS));
1780
- const queryEndTime = Date.now();
1781
- const queryDuration = queryEndTime - queryStartTime;
1782
- await printQueriesWithPlan(forgeSqlOperation, queryDuration);
1783
- }
1784
- if (logRawSqlQuery) {
1785
- console.debug(`SQL Error Details:`, JSON.stringify(error, null, 2));
1786
- }
1787
- throw error;
1788
- }
1789
- };
1790
- }
1791
- const NON_CACHE_CLEARING_ERROR_CODES = ["VALIDATION_ERROR", "CONSTRAINT_ERROR"];
1792
- const CACHE_CLEARING_ERROR_CODES = ["DEADLOCK", "LOCK_WAIT_TIMEOUT", "CONNECTION_ERROR"];
1793
- const NON_CACHE_CLEARING_PATTERNS = [/validation/i, /constraint/i];
1794
- const CACHE_CLEARING_PATTERNS = [/timeout/i, /connection/i];
1795
- function shouldClearCacheOnError(error) {
1796
- if (error?.code && NON_CACHE_CLEARING_ERROR_CODES.includes(error.code)) {
1797
- return false;
1798
- }
1799
- if (error?.message && NON_CACHE_CLEARING_PATTERNS.some((pattern) => pattern.test(error.message))) {
1800
- return false;
1801
- }
1802
- if (error?.code && CACHE_CLEARING_ERROR_CODES.includes(error.code)) {
1803
- return true;
1804
- }
1805
- if (error?.message && CACHE_CLEARING_PATTERNS.some((pattern) => pattern.test(error.message))) {
1806
- return true;
1807
- }
1808
- return true;
1809
- }
1810
- async function handleSuccessfulExecution(rows, onfulfilled, table2, options, isCached) {
1811
- try {
1812
- await evictLocalCacheQuery(table2, options);
1813
- await saveTableIfInsideCacheContext(table2);
1814
- if (isCached && !cacheApplicationContext.getStore()) {
1815
- await clearCache(table2, options);
1816
- }
1817
- const result = onfulfilled ? onfulfilled(rows) : rows;
1818
- return result;
1819
- } catch (error) {
1820
- if (shouldClearCacheOnError(error)) {
1821
- await evictLocalCacheQuery(table2, options);
1822
- if (isCached) {
1823
- await clearCache(table2, options).catch((e) => {
1824
- console.warn("Ignore cache clear errors", e);
1825
- });
1826
- } else {
1827
- await saveTableIfInsideCacheContext(table2);
1828
- }
1829
- }
1830
- throw error;
1831
- }
1832
- }
1833
- function handleFunctionCall(value, target, args, table2, options, isCached) {
1834
- const result = value.apply(target, args);
1835
- if (typeof result === "object" && result !== null && "execute" in result) {
1836
- return wrapCacheEvictBuilder(result, table2, options, isCached);
1837
- }
1838
- return result;
1839
- }
1840
- const wrapCacheEvictBuilder = (rawBuilder, table2, options, isCached) => {
1841
- return new Proxy(rawBuilder, {
1842
- get(target, prop, receiver) {
1843
- if (prop === "then") {
1844
- return (onfulfilled, onrejected) => target.execute().then(
1845
- (rows) => handleSuccessfulExecution(rows, onfulfilled, table2, options, isCached),
1846
- onrejected
1847
- );
1848
- }
1849
- const value = Reflect.get(target, prop, receiver);
1850
- if (typeof value === "function") {
1851
- return (...args) => handleFunctionCall(value, target, args, table2, options, isCached);
1852
- }
1853
- return value;
1854
- }
1855
- });
1856
- };
1857
- function insertAndEvictCacheBuilder(db, table2, options, isCached) {
1858
- const builder = db.insert(table2);
1859
- return wrapCacheEvictBuilder(
1860
- builder,
1861
- table2,
1862
- options,
1863
- isCached
1864
- );
1865
- }
1866
- function updateAndEvictCacheBuilder(db, table2, options, isCached) {
1867
- const builder = db.update(table2);
1868
- return wrapCacheEvictBuilder(
1869
- builder,
1870
- table2,
1871
- options,
1872
- isCached
1873
- );
1874
- }
1875
- function deleteAndEvictCacheBuilder(db, table2, options, isCached) {
1876
- const builder = db.delete(table2);
1877
- return wrapCacheEvictBuilder(
1878
- builder,
1879
- table2,
1880
- options,
1881
- isCached
1882
- );
1883
- }
1884
- async function handleCachedQuery(target, options, cacheTtl, selections, aliasMap, onfulfilled, onrejected) {
1885
- try {
1886
- const localCached = await getQueryLocalCacheQuery(target, options);
1887
- if (localCached) {
1888
- return onfulfilled ? onfulfilled(localCached) : localCached;
1889
- }
1890
- const cacheResult = await getFromCache(target, options);
1891
- if (cacheResult) {
1892
- return onfulfilled ? onfulfilled(cacheResult) : cacheResult;
1893
- }
1894
- const rows = await target.execute();
1895
- const transformed = applyFromDriverTransform(rows, selections, aliasMap);
1896
- await saveQueryLocalCacheQuery(target, transformed, options);
1897
- await setCacheResult(target, options, transformed, cacheTtl).catch((cacheError) => {
1898
- console.warn("Cache set error:", cacheError);
1899
- });
1900
- return onfulfilled ? onfulfilled(transformed) : transformed;
1901
- } catch (error) {
1902
- if (onrejected) {
1903
- return onrejected(error);
1904
- }
1905
- throw error;
1906
- }
1907
- }
1908
- async function handleNonCachedQuery(target, options, selections, aliasMap, onfulfilled, onrejected) {
1909
- try {
1910
- const localCached = await getQueryLocalCacheQuery(target, options);
1911
- if (localCached) {
1912
- return onfulfilled ? onfulfilled(localCached) : localCached;
1913
- }
1914
- const rows = await target.execute();
1915
- const transformed = applyFromDriverTransform(rows, selections, aliasMap);
1916
- await saveQueryLocalCacheQuery(target, transformed, options);
1917
- return onfulfilled ? onfulfilled(transformed) : transformed;
1918
- } catch (error) {
1919
- if (onrejected) {
1920
- return onrejected(error);
1921
- }
1922
- throw error;
1923
- }
1924
- }
1925
- function createAliasedSelectBuilder(db, fields, selectFn, useCache, options, cacheTtl) {
1926
- const { selections, aliasMap } = mapSelectFieldsWithAlias(fields);
1927
- const builder = selectFn(selections);
1928
- const wrapBuilder = (rawBuilder) => {
1929
- return new Proxy(rawBuilder, {
1930
- get(target, prop, receiver) {
1931
- if (prop === "execute") {
1932
- return async (...args) => {
1933
- const rows = await target.execute(...args);
1934
- return applyFromDriverTransform(rows, selections, aliasMap);
1935
- };
1936
- }
1937
- if (prop === "then") {
1938
- return (onfulfilled, onrejected) => {
1939
- if (useCache) {
1940
- const ttl = cacheTtl ?? options.cacheTTL ?? 120;
1941
- return handleCachedQuery(
1942
- target,
1943
- options,
1944
- ttl,
1945
- selections,
1946
- aliasMap,
1947
- onfulfilled,
1948
- onrejected
1949
- );
1950
- } else {
1951
- return handleNonCachedQuery(
1952
- target,
1953
- options,
1954
- selections,
1955
- aliasMap,
1956
- onfulfilled,
1957
- onrejected
1958
- );
1959
- }
1960
- };
1961
- }
1962
- const value = Reflect.get(target, prop, receiver);
1963
- if (typeof value === "function") {
1964
- return (...args) => {
1965
- const result = value.apply(target, args);
1966
- if (typeof result === "object" && result !== null && "execute" in result) {
1967
- return wrapBuilder(result);
1968
- }
1969
- return result;
1970
- };
1971
- }
1972
- return value;
1973
- }
1974
- });
1975
- };
1976
- return wrapBuilder(builder);
1977
- }
1978
- const DEFAULT_OPTIONS = {
1979
- logRawSqlQuery: false,
1980
- disableOptimisticLocking: false,
1981
- cacheTTL: 120,
1982
- cacheWrapTable: true,
1983
- cacheEntityQueryName: "sql",
1984
- cacheEntityExpirationName: "expiration",
1985
- cacheEntityDataName: "data"
1986
- };
1987
- function createRawQueryExecutor(db, options, useGlobalCache = false) {
1988
- return async function(query, cacheTtl) {
1989
- let sql2;
1990
- if (sql$1.isSQLWrapper(query)) {
1991
- const dialect = db.dialect;
1992
- sql2 = dialect.sqlToQuery(query);
1993
- } else {
1994
- sql2 = {
1995
- sql: query,
1996
- params: []
1997
- };
1998
- }
1999
- const localCacheResult = await getQueryLocalCacheQuery(sql2, options);
2000
- if (localCacheResult) {
2001
- return localCacheResult;
2002
- }
2003
- if (useGlobalCache) {
2004
- const cacheResult = await getFromCache({ toSQL: () => sql2 }, options);
2005
- if (cacheResult) {
2006
- return cacheResult;
2007
- }
2008
- }
2009
- const results = await db.execute(query);
2010
- await saveQueryLocalCacheQuery(sql2, results, options);
2011
- if (useGlobalCache) {
2012
- await setCacheResult(
2013
- { toSQL: () => sql2 },
2014
- options,
2015
- results,
2016
- cacheTtl ?? options.cacheTTL ?? 120
2017
- );
2018
- }
2019
- return results;
2020
- };
2021
- }
2022
- function patchDbWithSelectAliased(db, options) {
2023
- const newOptions = { ...DEFAULT_OPTIONS, ...options };
2024
- db.selectAliased = function(fields) {
2025
- return createAliasedSelectBuilder(
2026
- db,
2027
- fields,
2028
- (selections) => db.select(selections),
2029
- false,
2030
- newOptions
2031
- );
2032
- };
2033
- db.selectAliasedCacheable = function(fields, cacheTtl) {
2034
- return createAliasedSelectBuilder(
2035
- db,
2036
- fields,
2037
- (selections) => db.select(selections),
2038
- true,
2039
- newOptions,
2040
- cacheTtl
2041
- );
2042
- };
2043
- db.selectAliasedDistinct = function(fields) {
2044
- return createAliasedSelectBuilder(
2045
- db,
2046
- fields,
2047
- (selections) => db.selectDistinct(selections),
2048
- false,
2049
- newOptions
2050
- );
2051
- };
2052
- db.selectAliasedDistinctCacheable = function(fields, cacheTtl) {
2053
- return createAliasedSelectBuilder(
2054
- db,
2055
- fields,
2056
- (selections) => db.selectDistinct(selections),
2057
- true,
2058
- newOptions,
2059
- cacheTtl
2060
- );
2061
- };
2062
- db.selectFrom = function(table2) {
2063
- return db.selectAliased(drizzleOrm.getTableColumns(table2)).from(table2);
2064
- };
2065
- db.selectFromCacheable = function(table2, cacheTtl) {
2066
- return db.selectAliasedCacheable(drizzleOrm.getTableColumns(table2), cacheTtl).from(table2);
2067
- };
2068
- db.selectDistinctFrom = function(table2) {
2069
- return db.selectAliasedDistinct(drizzleOrm.getTableColumns(table2)).from(table2);
2070
- };
2071
- db.selectDistinctFromCacheable = function(table2, cacheTtl) {
2072
- return db.selectAliasedDistinctCacheable(drizzleOrm.getTableColumns(table2), cacheTtl).from(table2);
2073
- };
2074
- db.insertWithCacheContext = function(table2) {
2075
- return insertAndEvictCacheBuilder(db, table2, newOptions, false);
2076
- };
2077
- db.insertAndEvictCache = function(table2) {
2078
- return insertAndEvictCacheBuilder(db, table2, newOptions, true);
2079
- };
2080
- db.updateWithCacheContext = function(table2) {
2081
- return updateAndEvictCacheBuilder(db, table2, newOptions, false);
2082
- };
2083
- db.updateAndEvictCache = function(table2) {
2084
- return updateAndEvictCacheBuilder(db, table2, newOptions, true);
2085
- };
2086
- db.deleteWithCacheContext = function(table2) {
2087
- return deleteAndEvictCacheBuilder(db, table2, newOptions, false);
2088
- };
2089
- db.deleteAndEvictCache = function(table2) {
2090
- return deleteAndEvictCacheBuilder(db, table2, newOptions, true);
2091
- };
2092
- db.executeQuery = createRawQueryExecutor(db, newOptions, false);
2093
- db.executeQueryCacheable = createRawQueryExecutor(db, newOptions, true);
2094
- return db;
2095
- }
2096
- class ForgeSQLAnalyseOperation {
2097
- forgeOperations;
2098
- /**
2099
- * Creates a new instance of ForgeSQLAnalizeOperation.
2100
- * @param {ForgeSqlOperation} forgeOperations - The ForgeSQL operations instance
2101
- */
2102
- constructor(forgeOperations) {
2103
- this.forgeOperations = forgeOperations;
2104
- this.mapToCamelCaseClusterStatement = this.mapToCamelCaseClusterStatement.bind(this);
2105
- }
2106
- /**
2107
- * Executes EXPLAIN on a raw SQL query.
2108
- * @param {string} query - The SQL query to analyze
2109
- * @param {unknown[]} bindParams - The query parameters
2110
- * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
2111
- */
2112
- async explainRaw(query, bindParams) {
2113
- const results = await this.forgeOperations.fetch().executeRawSQL(`EXPLAIN ${query}`, bindParams);
2114
- return results.map((row) => ({
2115
- id: row.id,
2116
- estRows: row.estRows,
2117
- actRows: row.actRows,
2118
- task: row.task,
2119
- accessObject: row["access object"],
2120
- executionInfo: row["execution info"],
2121
- operatorInfo: row["operator info"],
2122
- memory: row.memory,
2123
- disk: row.disk
2124
- }));
2125
- }
2126
- /**
2127
- * Executes EXPLAIN on a Drizzle query.
2128
- * @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
2129
- * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
2130
- */
2131
- async explain(query) {
2132
- const { sql: sql2, params } = query.toSQL();
2133
- return this.explainRaw(sql2, params);
2134
- }
2135
- /**
2136
- * Executes EXPLAIN ANALYZE on a raw SQL query.
2137
- * @param {string} query - The SQL query to analyze
2138
- * @param {unknown[]} bindParams - The query parameters
2139
- * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
2140
- */
2141
- async explainAnalyzeRaw(query, bindParams) {
2142
- const results = await this.forgeOperations.fetch().executeRawSQL(`EXPLAIN ANALYZE ${query}`, bindParams);
2143
- return results.map((row) => ({
2144
- id: row.id,
2145
- estRows: row.estRows,
2146
- actRows: row.actRows,
2147
- task: row.task,
2148
- accessObject: row["access object"],
2149
- executionInfo: row["execution info"],
2150
- operatorInfo: row["operator info"],
2151
- memory: row.memory,
2152
- disk: row.disk
2153
- }));
2154
- }
2155
- /**
2156
- * Executes EXPLAIN ANALYZE on a Drizzle query.
2157
- * @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
2158
- * @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
2159
- */
2160
- async explainAnalyze(query) {
2161
- const { sql: sql2, params } = query.toSQL();
2162
- return this.explainAnalyzeRaw(sql2, params);
2163
- }
2164
- /**
2165
- * Decodes a query execution plan from its string representation.
2166
- * @param {string} input - The raw execution plan string
2167
- * @returns {ExplainAnalyzeRow[]} The decoded execution plan rows
2168
- */
2169
- decodedPlan(input) {
2170
- if (!input) {
2171
- return [];
2172
- }
2173
- const lines = input.trim().split("\n");
2174
- if (lines.length < 2) return [];
2175
- const headersRaw = lines[0].split(" ").map((h) => h.trim()).filter(Boolean);
2176
- const headers = headersRaw.map((h) => {
2177
- return h.replace(/\s+/g, " ").replace(/[-\s]+(.)?/g, (_, c) => c ? c.toUpperCase() : "").replace(/^./, (s) => s.toLowerCase());
2178
- });
2179
- return lines.slice(1).map((line) => {
2180
- const values = line.split(" ").map((s) => s.trim()).filter(Boolean);
2181
- const row = {};
2182
- headers.forEach((key, i) => {
2183
- row[key] = values[i] ?? "";
2184
- });
2185
- return row;
2186
- });
2187
- }
2188
- /**
2189
- * Normalizes a raw slow query row into a more structured format.
2190
- * @param {SlowQueryRaw} row - The raw slow query data
2191
- * @returns {SlowQueryNormalized} The normalized slow query data
2192
- */
2193
- normalizeSlowQuery(row) {
2194
- return {
2195
- time: row.Time,
2196
- txnStartTs: row.Txn_start_ts,
2197
- user: row.User,
2198
- host: row.Host,
2199
- connId: row.Conn_ID,
2200
- db: row.DB,
2201
- query: row.Query,
2202
- digest: row.Digest,
2203
- queryTime: row.Query_time,
2204
- compileTime: row.Compile_time,
2205
- optimizeTime: row.Optimize_time,
2206
- processTime: row.Process_time,
2207
- waitTime: row.Wait_time,
2208
- parseTime: row.Parse_time,
2209
- rewriteTime: row.Rewrite_time,
2210
- copTime: row.Cop_time,
2211
- copProcAvg: row.Cop_proc_avg,
2212
- copProcMax: row.Cop_proc_max,
2213
- copProcP90: row.Cop_proc_p90,
2214
- copProcAddr: row.Cop_proc_addr,
2215
- copWaitAvg: row.Cop_wait_avg,
2216
- copWaitMax: row.Cop_wait_max,
2217
- copWaitP90: row.Cop_wait_p90,
2218
- copWaitAddr: row.Cop_wait_addr,
2219
- memMax: row.Mem_max,
2220
- diskMax: row.Disk_max,
2221
- totalKeys: row.Total_keys,
2222
- processKeys: row.Process_keys,
2223
- requestCount: row.Request_count,
2224
- kvTotal: row.KV_total,
2225
- pdTotal: row.PD_total,
2226
- resultRows: row.Result_rows,
2227
- rocksdbBlockCacheHitCount: row.Rocksdb_block_cache_hit_count,
2228
- rocksdbBlockReadCount: row.Rocksdb_block_read_count,
2229
- rocksdbBlockReadByte: row.Rocksdb_block_read_byte,
2230
- plan: row.Plan,
2231
- binaryPlan: row.Binary_plan,
2232
- planDigest: row.Plan_digest,
2233
- parsedPlan: this.decodedPlan(row.Plan)
2234
- };
2235
- }
2236
- /**
2237
- * Builds a SQL query for retrieving cluster statement history.
2238
- * @param {string[]} tables - The tables to analyze
2239
- * @param {Date} [from] - The start date for the analysis
2240
- * @param {Date} [to] - The end date for the analysis
2241
- * @returns {string} The SQL query for cluster statement history
2242
- */
2243
- buildClusterStatementQuery(tables, from, to) {
2244
- const formatDateTime2 = (date) => luxon.DateTime.fromJSDate(date).toFormat("yyyy-LL-dd'T'HH:mm:ss.SSS");
2245
- const tableConditions = tables.map((table2) => `TABLE_NAMES LIKE CONCAT(SCHEMA_NAME, '.', '%', '${table2}', '%')`).join(" OR ");
2246
- const timeConditions = [];
2247
- if (from) {
2248
- timeConditions.push(`SUMMARY_BEGIN_TIME >= '${formatDateTime2(from)}'`);
2249
- }
2250
- if (to) {
2251
- timeConditions.push(`SUMMARY_END_TIME <= '${formatDateTime2(to)}'`);
2252
- }
2253
- let whereClauses;
2254
- if (tableConditions?.length) {
2255
- whereClauses = [tableConditions ? `(${tableConditions})` : "", ...timeConditions];
2256
- } else {
2257
- whereClauses = timeConditions;
2258
- }
2259
- return `
2260
- SELECT *
2261
- FROM (
2262
- SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY
2263
- UNION ALL
2264
- SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
2265
- ) AS combined
2266
- ${whereClauses?.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : ""}
2267
- `;
2268
- }
2269
- /**
2270
- * Retrieves and analyzes slow queries from the database.
2271
- * @returns {Promise<SlowQueryNormalized[]>} The normalized slow query data
2272
- */
2273
- // CLUSTER_SLOW_QUERY STATISTICS
2274
- async analyzeSlowQueries() {
2275
- const results = await this.forgeOperations.fetch().executeRawSQL(`
2276
- SELECT *
2277
- FROM information_schema.slow_query
2278
- ORDER BY time DESC
2279
- `);
2280
- return results.map((row) => this.normalizeSlowQuery(row));
2281
- }
2282
- /**
2283
- * Converts a cluster statement row to camelCase format.
2284
- * @param {Record<string, any>} input - The input row data
2285
- * @returns {ClusterStatementRowCamelCase} The converted row data
2286
- */
2287
- mapToCamelCaseClusterStatement(input) {
2288
- if (!input) {
2289
- return {};
2290
- }
2291
- const result = {};
2292
- result.parsedPlan = this.decodedPlan(input["PLAN"] ?? "");
2293
- for (const key in input) {
2294
- const camelKey = key.toLowerCase().replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
2295
- result[camelKey] = input[key];
2296
- }
2297
- return result;
2298
- }
2299
- /**
2300
- * Analyzes query history for specific tables using raw table names.
2301
- * @param {string[]} tables - The table names to analyze
2302
- * @param {Date} [fromDate] - The start date for the analysis
2303
- * @param {Date} [toDate] - The end date for the analysis
2304
- * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
2305
- */
2306
- async analyzeQueriesHistoryRaw(tables, fromDate, toDate) {
2307
- const results = await this.forgeOperations.fetch().executeRawSQL(
2308
- this.buildClusterStatementQuery(tables ?? [], fromDate, toDate)
2309
- );
2310
- return results.map((r) => this.mapToCamelCaseClusterStatement(r));
2311
- }
2312
- /**
2313
- * Analyzes query history for specific tables using Drizzle table objects.
2314
- * @param {AnyMySqlTable[]} tables - The Drizzle table objects to analyze
2315
- * @param {Date} [fromDate] - The start date for the analysis
2316
- * @param {Date} [toDate] - The end date for the analysis
2317
- * @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
2318
- */
2319
- async analyzeQueriesHistory(tables, fromDate, toDate) {
2320
- const tableNames = tables?.map((table$1) => table.getTableName(table$1)) ?? [];
2321
- return this.analyzeQueriesHistoryRaw(tableNames, fromDate, toDate);
2322
- }
2323
- }
2324
- class ForgeSQLCacheOperations {
2325
- options;
2326
- forgeOperations;
2327
- /**
2328
- * Creates a new instance of ForgeSQLCacheOperations.
2329
- *
2330
- * @param options - Configuration options for the ORM
2331
- * @param forgeOperations - The ForgeSQL operations instance
2332
- */
2333
- constructor(options, forgeOperations) {
2334
- this.options = options;
2335
- this.forgeOperations = forgeOperations;
2336
- }
2337
- /**
2338
- * Evicts cache for multiple tables using Drizzle table objects.
2339
- *
2340
- * @param tables - Array of Drizzle table objects to clear cache for
2341
- * @returns Promise that resolves when cache eviction is complete
2342
- * @throws Error if cacheEntityName is not configured
2343
- */
2344
- async evictCacheEntities(tables) {
2345
- if (!this.options.cacheEntityName) {
2346
- throw new Error("cacheEntityName is not configured");
2347
- }
2348
- await this.evictCache(tables.map((t) => table.getTableName(t)));
2349
- }
2350
- /**
2351
- * Evicts cache for multiple tables by their names.
2352
- *
2353
- * @param tables - Array of table names to clear cache for
2354
- * @returns Promise that resolves when cache eviction is complete
2355
- * @throws Error if cacheEntityName is not configured
2356
- */
2357
- async evictCache(tables) {
2358
- if (!this.options.cacheEntityName) {
2359
- throw new Error("cacheEntityName is not configured");
2360
- }
2361
- await clearTablesCache(tables, this.options);
2362
- }
2363
- /**
2364
- * Inserts records with optimistic locking/versioning and automatically evicts cache.
2365
- *
2366
- * This method uses `modifyWithVersioning().insert()` internally, providing:
2367
- * - Automatic version field initialization
2368
- * - Optimistic locking support
2369
- * - Cache eviction after successful operation
2370
- *
2371
- * @param schema - The table schema
2372
- * @param models - Array of entities to insert
2373
- * @param updateIfExists - Whether to update existing records
2374
- * @returns Promise that resolves to the number of inserted rows
2375
- * @throws Error if cacheEntityName is not configured
2376
- * @throws Error if optimistic locking check fails
2377
- */
2378
- async insert(schema, models, updateIfExists) {
2379
- this.validateCacheConfiguration();
2380
- const number = await this.forgeOperations.modifyWithVersioning().insert(schema, models, updateIfExists);
2381
- await clearCache(schema, this.options);
2382
- return number;
2383
- }
2384
- /**
2385
- * Deletes a record by ID with optimistic locking/versioning and automatically evicts cache.
2386
- *
2387
- * This method uses `modifyWithVersioning().deleteById()` internally, providing:
2388
- * - Optimistic locking checks before deletion
2389
- * - Version field validation
2390
- * - Cache eviction after successful operation
2391
- *
2392
- * @param id - The ID of the record to delete
2393
- * @param schema - The table schema
2394
- * @returns Promise that resolves to the number of affected rows
2395
- * @throws Error if cacheEntityName is not configured
2396
- * @throws Error if optimistic locking check fails
2397
- */
2398
- async deleteById(id, schema) {
2399
- this.validateCacheConfiguration();
2400
- const number = await this.forgeOperations.modifyWithVersioning().deleteById(id, schema);
2401
- await clearCache(schema, this.options);
2402
- return number;
2403
- }
2404
- /**
2405
- * Updates a record by ID with optimistic locking/versioning and automatically evicts cache.
2406
- *
2407
- * This method uses `modifyWithVersioning().updateById()` internally, providing:
2408
- * - Optimistic locking checks before update
2409
- * - Version field incrementation
2410
- * - Cache eviction after successful operation
2411
- *
2412
- * @param entity - The entity with updated values (must include primary key)
2413
- * @param schema - The table schema
2414
- * @returns Promise that resolves to the number of affected rows
2415
- * @throws Error if cacheEntityName is not configured
2416
- * @throws Error if optimistic locking check fails
2417
- */
2418
- async updateById(entity, schema) {
2419
- this.validateCacheConfiguration();
2420
- const number = await this.forgeOperations.modifyWithVersioning().updateById(entity, schema);
2421
- await clearCache(schema, this.options);
2422
- return number;
2423
- }
2424
- /**
2425
- * Updates fields based on conditions with optimistic locking/versioning and automatically evicts cache.
2426
- *
2427
- * This method uses `modifyWithVersioning().updateFields()` internally, providing:
2428
- * - Optimistic locking support (if version field is configured)
2429
- * - Version field validation and incrementation
2430
- * - Cache eviction after successful operation
2431
- *
2432
- * @param updateData - The data to update
2433
- * @param schema - The table schema
2434
- * @param where - Optional WHERE conditions
2435
- * @returns Promise that resolves to the number of affected rows
2436
- * @throws Error if cacheEntityName is not configured
2437
- * @throws Error if optimistic locking check fails
2438
- */
2439
- async updateFields(updateData, schema, where) {
2440
- this.validateCacheConfiguration();
2441
- const number = await this.forgeOperations.modifyWithVersioning().updateFields(updateData, schema, where);
2442
- await clearCache(schema, this.options);
2443
- return number;
2444
- }
2445
- /**
2446
- * Executes a query with caching support.
2447
- * First checks cache, if not found executes query and stores result in cache.
2448
- *
2449
- * @param query - The Drizzle query to execute
2450
- * @param cacheTtl - Optional cache TTL override
2451
- * @returns Promise that resolves to the query results
2452
- * @throws Error if cacheEntityName is not configured
2453
- */
2454
- async executeQuery(query, cacheTtl) {
2455
- this.validateCacheConfiguration();
2456
- const sqlQuery = query;
2457
- const cacheResult = await getFromCache(sqlQuery, this.options);
2458
- if (cacheResult) {
2459
- return cacheResult;
2460
- }
2461
- const results = await query;
2462
- await setCacheResult(sqlQuery, this.options, results, cacheTtl ?? this.options.cacheTTL ?? 60);
2463
- return results;
2464
- }
2465
- /**
2466
- * Validates that cache configuration is properly set up.
2467
- *
2468
- * @throws Error if cacheEntityName is not configured
2469
- * @private
2470
- */
2471
- validateCacheConfiguration() {
2472
- if (!this.options.cacheEntityName) {
2473
- throw new Error("cacheEntityName is not configured");
2474
- }
2475
- }
2476
- }
2477
- class ForgeSQLORMImpl {
2478
- static instance = null;
2479
- drizzle;
2480
- crudOperations;
2481
- fetchOperations;
2482
- analyzeOperations;
2483
- cacheOperations;
2484
- options;
2485
- /**
2486
- * Private constructor to enforce singleton behavior.
2487
- * @param options - Options for configuring ForgeSQL ORM behavior.
2488
- */
2489
- constructor(options) {
2490
- try {
2491
- const newOptions = options ?? {
2492
- logRawSqlQuery: false,
2493
- logCache: false,
2494
- disableOptimisticLocking: false,
2495
- cacheWrapTable: true,
2496
- cacheTTL: 120,
2497
- cacheEntityQueryName: "sql",
2498
- cacheEntityExpirationName: "expiration",
2499
- cacheEntityDataName: "data"
2500
- };
2501
- this.options = newOptions;
2502
- if (newOptions.logRawSqlQuery) {
2503
- console.debug("Initializing ForgeSQLORM...");
2504
- }
2505
- const proxiedDriver = createForgeDriverProxy(
2506
- this,
2507
- newOptions.hints,
2508
- newOptions.logRawSqlQuery
2509
- );
2510
- this.drizzle = patchDbWithSelectAliased(
2511
- mysqlProxy.drizzle(proxiedDriver, { logger: newOptions.logRawSqlQuery }),
2512
- newOptions
2513
- );
2514
- this.crudOperations = new ForgeSQLCrudOperations(this, newOptions);
2515
- this.fetchOperations = new ForgeSQLSelectOperations(newOptions);
2516
- this.analyzeOperations = new ForgeSQLAnalyseOperation(this);
2517
- this.cacheOperations = new ForgeSQLCacheOperations(newOptions, this);
2518
- } catch (error) {
2519
- console.error("ForgeSQLORM initialization failed:", error);
2520
- throw error;
2521
- }
2522
- }
2523
- /**
2524
- * Executes a query and provides access to execution metadata with performance monitoring.
2525
- * This method allows you to capture detailed information about query execution
2526
- * including database execution time, response size, and query analysis capabilities.
2527
- *
2528
- * The method aggregates metrics across all database operations within the query function,
2529
- * making it ideal for monitoring resolver performance and detecting performance issues.
2530
- *
2531
- * @template T - The return type of the query
2532
- * @param query - A function that returns a Promise with the query result. Can contain multiple database operations.
2533
- * @param onMetadata - Callback function that receives aggregated execution metadata
2534
- * @param onMetadata.totalDbExecutionTime - Total database execution time across all operations in the query function (in milliseconds)
2535
- * @param onMetadata.totalResponseSize - Total response size across all operations (in bytes)
2536
- * @param onMetadata.printQueries - Function to analyze and print query execution plans from CLUSTER_STATEMENTS_SUMMARY
2537
- * @returns Promise with the query result
2538
- *
2539
- * @example
2540
- * ```typescript
2541
- * // Basic usage with performance monitoring
2542
- * const result = await forgeSQL.executeWithMetadata(
2543
- * async () => {
2544
- * const users = await forgeSQL.selectFrom(usersTable);
2545
- * const orders = await forgeSQL.selectFrom(ordersTable).where(eq(ordersTable.userId, usersTable.id));
2546
- * return { users, orders };
2547
- * },
2548
- * (totalDbExecutionTime, totalResponseSize, printQueries) => {
2549
- * const threshold = 500; // ms baseline for this resolver
2550
- *
2551
- * if (totalDbExecutionTime > threshold * 1.5) {
2552
- * console.warn(`[Performance Warning] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
2553
- * await printQueries(); // Analyze and print query execution plans
2554
- * } else if (totalDbExecutionTime > threshold) {
2555
- * console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
2556
- * }
2557
- *
2558
- * console.log(`DB response size: ${totalResponseSize} bytes`);
2559
- * }
2560
- * );
2561
- * ```
2562
- *
2563
- * @example
2564
- * ```typescript
2565
- * // Resolver with performance monitoring
2566
- * resolver.define("fetch", async (req: Request) => {
2567
- * try {
2568
- * return await forgeSQL.executeWithMetadata(
2569
- * async () => {
2570
- * // Resolver logic with multiple queries
2571
- * const users = await forgeSQL.selectFrom(demoUsers);
2572
- * const orders = await forgeSQL.selectFrom(demoOrders)
2573
- * .where(eq(demoOrders.userId, demoUsers.id));
2574
- * return { users, orders };
2575
- * },
2576
- * async (totalDbExecutionTime, totalResponseSize, printQueries) => {
2577
- * const threshold = 500; // ms baseline for this resolver
2578
- *
2579
- * if (totalDbExecutionTime > threshold * 1.5) {
2580
- * console.warn(`[Performance Warning fetch] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
2581
- * await printQueries(); // Optionally log or capture diagnostics for further analysis
2582
- * } else if (totalDbExecutionTime > threshold) {
2583
- * console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
2584
- * }
2585
- *
2586
- * console.log(`DB response size: ${totalResponseSize} bytes`);
2587
- * }
2588
- * );
2589
- * } catch (e) {
2590
- * const error = e?.cause?.debug?.sqlMessage ?? e?.cause;
2591
- * console.error(error, e);
2592
- * throw error;
2593
- * }
2594
- * });
2595
- * ```
2596
- *
2597
- * @note **Important**: When multiple resolvers are running concurrently, their query data may also appear in `printQueries()` analysis, as it queries the global `CLUSTER_STATEMENTS_SUMMARY` table.
2598
- */
2599
- async executeWithMetadata(query, onMetadata) {
2600
- return metadataQueryContext.run(
2601
- {
2602
- totalDbExecutionTime: 0,
2603
- totalResponseSize: 0,
2604
- beginTime: /* @__PURE__ */ new Date(),
2605
- forgeSQLORM: this,
2606
- printQueriesWithPlan: async () => {
2607
- return;
2608
- }
2609
- },
2610
- async () => {
2611
- const result = await query();
2612
- const metadata = await getLastestMetadata();
2613
- try {
2614
- if (metadata) {
2615
- await onMetadata(
2616
- metadata.totalDbExecutionTime,
2617
- metadata.totalResponseSize,
2618
- metadata.printQueriesWithPlan
2619
- );
2620
- }
2621
- } catch (e) {
2622
- console.error(
2623
- "[ForgeSQLORM][executeWithMetadata] Failed to run onMetadata callback",
2624
- {
2625
- errorMessage: e?.message,
2626
- errorStack: e?.stack,
2627
- totalDbExecutionTime: metadata?.totalDbExecutionTime,
2628
- totalResponseSize: metadata?.totalResponseSize,
2629
- beginTime: metadata?.beginTime
2630
- },
2631
- e
2632
- );
2633
- }
2634
- return result;
2635
- }
2636
- );
2637
- }
2638
- /**
2639
- * Executes operations within a cache context that collects cache eviction events.
2640
- * All clearCache calls within the context are collected and executed in batch at the end.
2641
- * Queries executed within this context will bypass cache for tables that were marked for clearing.
2642
- *
2643
- * This is useful for:
2644
- * - Batch operations that affect multiple tables
2645
- * - Transaction-like operations where you want to clear cache only at the end
2646
- * - Performance optimization by reducing cache clear operations
2647
- *
2648
- * @param cacheContext - Function containing operations that may trigger cache evictions
2649
- * @returns Promise that resolves when all operations and cache clearing are complete
2650
- *
2651
- * @example
2652
- * ```typescript
2653
- * await forgeSQL.executeWithCacheContext(async () => {
2654
- * await forgeSQL.modifyWithVersioning().insert(users, userData);
2655
- * await forgeSQL.modifyWithVersioning().insert(orders, orderData);
2656
- * // Cache for both users and orders tables will be cleared at the end
2657
- * });
2658
- * ```
2659
- */
2660
- executeWithCacheContext(cacheContext) {
2661
- return this.executeWithCacheContextAndReturnValue(cacheContext);
2662
- }
2663
- /**
2664
- * Executes operations within a cache context and returns a value.
2665
- * All clearCache calls within the context are collected and executed in batch at the end.
2666
- * Queries executed within this context will bypass cache for tables that were marked for clearing.
2667
- *
2668
- * @param cacheContext - Function containing operations that may trigger cache evictions
2669
- * @returns Promise that resolves to the return value of the cacheContext function
2670
- *
2671
- * @example
2672
- * ```typescript
2673
- * const result = await forgeSQL.executeWithCacheContextAndReturnValue(async () => {
2674
- * await forgeSQL.modifyWithVersioning().insert(users, userData);
2675
- * return await forgeSQL.fetch().executeQueryOnlyOne(selectUserQuery);
2676
- * });
2677
- * ```
2678
- */
2679
- async executeWithCacheContextAndReturnValue(cacheContext) {
2680
- return await this.executeWithLocalCacheContextAndReturnValue(
2681
- async () => await cacheApplicationContext.run(
2682
- cacheApplicationContext.getStore() ?? { tables: /* @__PURE__ */ new Set() },
2683
- async () => {
2684
- try {
2685
- return await cacheContext();
2686
- } finally {
2687
- await clearTablesCache(
2688
- Array.from(cacheApplicationContext.getStore()?.tables ?? []),
2689
- this.options
2690
- );
2691
- }
2692
- }
2693
- )
2694
- );
2695
- }
2696
- /**
2697
- * Executes operations within a local cache context and returns a value.
2698
- * This provides in-memory caching for select queries within a single request scope.
2699
- *
2700
- * @param cacheContext - Function containing operations that will benefit from local caching
2701
- * @returns Promise that resolves to the return value of the cacheContext function
2702
- */
2703
- async executeWithLocalCacheContextAndReturnValue(cacheContext) {
2704
- return await localCacheApplicationContext.run(
2705
- localCacheApplicationContext.getStore() ?? { cache: {} },
2706
- async () => {
2707
- return await cacheContext();
2708
- }
2709
- );
2710
- }
2711
- /**
2712
- * Executes operations within a local cache context.
2713
- * This provides in-memory caching for select queries within a single request scope.
2714
- *
2715
- * @param cacheContext - Function containing operations that will benefit from local caching
2716
- * @returns Promise that resolves when all operations are complete
2717
- */
2718
- executeWithLocalContext(cacheContext) {
2719
- return this.executeWithLocalCacheContextAndReturnValue(cacheContext);
2720
- }
2721
- /**
2722
- * Creates an insert query builder.
2723
- *
2724
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2725
- * For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
2726
- *
2727
- * @param table - The table to insert into
2728
- * @returns Insert query builder (no versioning, no cache management)
2729
- */
2730
- insert(table2) {
2731
- return this.drizzle.insertWithCacheContext(table2);
2732
- }
2733
- /**
2734
- * Creates an insert query builder that automatically evicts cache after execution.
2735
- *
2736
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2737
- * For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
2738
- *
2739
- * @param table - The table to insert into
2740
- * @returns Insert query builder with automatic cache eviction (no versioning)
2741
- */
2742
- insertAndEvictCache(table2) {
2743
- return this.drizzle.insertAndEvictCache(table2);
2744
- }
2745
- /**
2746
- * Creates an update query builder that automatically evicts cache after execution.
2747
- *
2748
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2749
- * For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
2750
- *
2751
- * @param table - The table to update
2752
- * @returns Update query builder with automatic cache eviction (no versioning)
2753
- */
2754
- updateAndEvictCache(table2) {
2755
- return this.drizzle.updateAndEvictCache(table2);
2756
- }
2757
- /**
2758
- * Creates an update query builder.
2759
- *
2760
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2761
- * For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
2762
- *
2763
- * @param table - The table to update
2764
- * @returns Update query builder (no versioning, no cache management)
2765
- */
2766
- update(table2) {
2767
- return this.drizzle.updateWithCacheContext(table2);
2768
- }
2769
- /**
2770
- * Creates a delete query builder.
2771
- *
2772
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2773
- * For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
2774
- *
2775
- * @param table - The table to delete from
2776
- * @returns Delete query builder (no versioning, no cache management)
2777
- */
2778
- delete(table2) {
2779
- return this.drizzle.deleteWithCacheContext(table2);
2780
- }
2781
- /**
2782
- * Creates a delete query builder that automatically evicts cache after execution.
2783
- *
2784
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
2785
- * For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
2786
- *
2787
- * @param table - The table to delete from
2788
- * @returns Delete query builder with automatic cache eviction (no versioning)
2789
- */
2790
- deleteAndEvictCache(table2) {
2791
- return this.drizzle.deleteAndEvictCache(table2);
2792
- }
2793
- /**
2794
- * Create the modify operations instance.
2795
- * @returns modify operations.
2796
- */
2797
- modifyWithVersioning() {
2798
- return this.crudOperations;
2799
- }
2800
- /**
2801
- * Returns the singleton instance of ForgeSQLORMImpl.
2802
- * @param options - Options for configuring ForgeSQL ORM behavior.
2803
- * @returns The singleton instance of ForgeSQLORMImpl.
2804
- */
2805
- static getInstance(options) {
2806
- ForgeSQLORMImpl.instance ??= new ForgeSQLORMImpl(options);
2807
- return ForgeSQLORMImpl.instance;
2808
- }
2809
- /**
2810
- * Retrieves the fetch operations instance.
2811
- * @returns Fetch operations.
2812
- */
2813
- fetch() {
2814
- return this.fetchOperations;
2815
- }
2816
- /**
2817
- * Provides query analysis capabilities including EXPLAIN ANALYZE and slow query analysis.
2818
- * @returns {SchemaAnalyzeForgeSql} Interface for analyzing query performance
2819
- */
2820
- analyze() {
2821
- return this.analyzeOperations;
2822
- }
2823
- /**
2824
- * Provides schema-level SQL operations with optimistic locking/versioning and automatic cache eviction.
2825
- *
2826
- * This method returns operations that use `modifyWithVersioning()` internally, providing:
2827
- * - Optimistic locking support
2828
- * - Automatic version field management
2829
- * - Cache eviction after successful operations
2830
- *
2831
- * @returns {ForgeSQLCacheOperations} Interface for executing versioned SQL operations with cache management
2832
- */
2833
- modifyWithVersioningAndEvictCache() {
2834
- return this.cacheOperations;
2835
- }
2836
- /**
2837
- * Returns a Drizzle query builder instance.
2838
- *
2839
- * ⚠️ IMPORTANT: This method should be used ONLY for query building purposes.
2840
- * The returned instance should NOT be used for direct database connections or query execution.
2841
- * All database operations should be performed through Forge SQL's executeRawSQL or executeRawUpdateSQL methods.
2842
- *
2843
- * @returns A Drizzle query builder instance for query construction only.
2844
- */
2845
- getDrizzleQueryBuilder() {
2846
- return this.drizzle;
2847
- }
2848
- /**
2849
- * Creates a select query with unique field aliases to prevent field name collisions in joins.
2850
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
2851
- *
2852
- * @template TSelection - The type of the selected fields
2853
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
2854
- * @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A select query builder with unique field aliases
2855
- * @throws {Error} If fields parameter is empty
2856
- * @example
2857
- * ```typescript
2858
- * await forgeSQL
2859
- * .select({user: users, order: orders})
2860
- * .from(orders)
2861
- * .innerJoin(users, eq(orders.userId, users.id));
2862
- * ```
2863
- */
2864
- select(fields) {
2865
- if (!fields) {
2866
- throw new Error("fields is empty");
2867
- }
2868
- return this.drizzle.selectAliased(fields);
2869
- }
2870
- /**
2871
- * Creates a distinct select query with unique field aliases to prevent field name collisions in joins.
2872
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
2873
- *
2874
- * @template TSelection - The type of the selected fields
2875
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
2876
- * @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A distinct select query builder with unique field aliases
2877
- * @throws {Error} If fields parameter is empty
2878
- * @example
2879
- * ```typescript
2880
- * await forgeSQL
2881
- * .selectDistinct({user: users, order: orders})
2882
- * .from(orders)
2883
- * .innerJoin(users, eq(orders.userId, users.id));
2884
- * ```
2885
- */
2886
- selectDistinct(fields) {
2887
- if (!fields) {
2888
- throw new Error("fields is empty");
2889
- }
2890
- return this.drizzle.selectAliasedDistinct(fields);
2891
- }
2892
- /**
2893
- * Creates a cacheable select query with unique field aliases to prevent field name collisions in joins.
2894
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
2895
- *
2896
- * @template TSelection - The type of the selected fields
2897
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
2898
- * @param {number} cacheTTL - cache ttl optional default is 60 sec.
2899
- * @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A select query builder with unique field aliases
2900
- * @throws {Error} If fields parameter is empty
2901
- * @example
2902
- * ```typescript
2903
- * await forgeSQL
2904
- * .selectCacheable({user: users, order: orders},60)
2905
- * .from(orders)
2906
- * .innerJoin(users, eq(orders.userId, users.id));
2907
- * ```
2908
- */
2909
- selectCacheable(fields, cacheTTL) {
2910
- if (!fields) {
2911
- throw new Error("fields is empty");
2912
- }
2913
- return this.drizzle.selectAliasedCacheable(fields, cacheTTL);
2914
- }
2915
- /**
2916
- * Creates a cacheable distinct select query with unique field aliases to prevent field name collisions in joins.
2917
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
2918
- *
2919
- * @template TSelection - The type of the selected fields
2920
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
2921
- * @param {number} cacheTTL - cache ttl optional default is 60 sec.
2922
- * @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A distinct select query builder with unique field aliases
2923
- * @throws {Error} If fields parameter is empty
2924
- * @example
2925
- * ```typescript
2926
- * await forgeSQL
2927
- * .selectDistinctCacheable({user: users, order: orders}, 60)
2928
- * .from(orders)
2929
- * .innerJoin(users, eq(orders.userId, users.id));
2930
- * ```
2931
- */
2932
- selectDistinctCacheable(fields, cacheTTL) {
2933
- if (!fields) {
2934
- throw new Error("fields is empty");
2935
- }
2936
- return this.drizzle.selectAliasedDistinctCacheable(fields, cacheTTL);
2937
- }
2938
- /**
2939
- * Creates a select query builder for all columns from a table with field aliasing support.
2940
- * This is a convenience method that automatically selects all columns from the specified table.
2941
- *
2942
- * @template T - The type of the table
2943
- * @param table - The table to select from
2944
- * @returns Select query builder with all table columns and field aliasing support
2945
- * @example
2946
- * ```typescript
2947
- * const users = await forgeSQL.selectFrom(userTable).where(eq(userTable.id, 1));
2948
- * ```
2949
- */
2950
- selectFrom(table2) {
2951
- return this.drizzle.selectFrom(table2);
2952
- }
2953
- /**
2954
- * Creates a select distinct query builder for all columns from a table with field aliasing support.
2955
- * This is a convenience method that automatically selects all distinct columns from the specified table.
2956
- *
2957
- * @template T - The type of the table
2958
- * @param table - The table to select from
2959
- * @returns Select distinct query builder with all table columns and field aliasing support
2960
- * @example
2961
- * ```typescript
2962
- * const uniqueUsers = await forgeSQL.selectDistinctFrom(userTable).where(eq(userTable.status, 'active'));
2963
- * ```
2964
- */
2965
- selectDistinctFrom(table2) {
2966
- return this.drizzle.selectDistinctFrom(table2);
2967
- }
2968
- /**
2969
- * Creates a cacheable select query builder for all columns from a table with field aliasing and caching support.
2970
- * This is a convenience method that automatically selects all columns from the specified table with caching enabled.
2971
- *
2972
- * @template T - The type of the table
2973
- * @param table - The table to select from
2974
- * @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
2975
- * @returns Select query builder with all table columns, field aliasing, and caching support
2976
- * @example
2977
- * ```typescript
2978
- * const users = await forgeSQL.selectCacheableFrom(userTable, 300).where(eq(userTable.id, 1));
2979
- * ```
2980
- */
2981
- selectCacheableFrom(table2, cacheTTL) {
2982
- return this.drizzle.selectFromCacheable(table2, cacheTTL);
2983
- }
2984
- /**
2985
- * Creates a cacheable select distinct query builder for all columns from a table with field aliasing and caching support.
2986
- * This is a convenience method that automatically selects all distinct columns from the specified table with caching enabled.
2987
- *
2988
- * @template T - The type of the table
2989
- * @param table - The table to select from
2990
- * @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
2991
- * @returns Select distinct query builder with all table columns, field aliasing, and caching support
2992
- * @example
2993
- * ```typescript
2994
- * const uniqueUsers = await forgeSQL.selectDistinctCacheableFrom(userTable, 300).where(eq(userTable.status, 'active'));
2995
- * ```
2996
- */
2997
- selectDistinctCacheableFrom(table2, cacheTTL) {
2998
- return this.drizzle.selectDistinctFromCacheable(table2, cacheTTL);
2999
- }
3000
- /**
3001
- * Executes a raw SQL query with local cache support.
3002
- * This method provides local caching for raw SQL queries within the current invocation context.
3003
- * Results are cached locally and will be returned from cache on subsequent identical queries.
3004
- *
3005
- * @param query - The SQL query to execute (SQLWrapper or string)
3006
- * @returns Promise with query results
3007
- * @example
3008
- * ```typescript
3009
- * // Using SQLWrapper
3010
- * const result = await forgeSQL.execute(sql`SELECT * FROM users WHERE id = ${userId}`);
3011
- *
3012
- * // Using string
3013
- * const result = await forgeSQL.execute("SELECT * FROM users WHERE status = 'active'");
3014
- * ```
3015
- */
3016
- execute(query) {
3017
- return this.drizzle.executeQuery(query);
3018
- }
3019
- /**
3020
- * Executes a Data Definition Language (DDL) SQL query.
3021
- * DDL operations include CREATE, ALTER, DROP, TRUNCATE, and other schema modification statements.
3022
- *
3023
- * This method is specifically designed for DDL operations and provides:
3024
- * - Proper operation type context for DDL queries
3025
- * - No caching (DDL operations should not be cached)
3026
- * - Direct execution without query optimization
3027
- *
3028
- * @template T - The expected return type of the query result
3029
- * @param query - The DDL SQL query to execute (SQLWrapper or string)
3030
- * @returns Promise with query results
3031
- * @throws {Error} If the DDL operation fails
3032
- *
3033
- * @example
3034
- * ```typescript
3035
- * // Create a new table
3036
- * await forgeSQL.executeDDL(`
3037
- * CREATE TABLE users (
3038
- * id INT PRIMARY KEY AUTO_INCREMENT,
3039
- * name VARCHAR(255) NOT NULL,
3040
- * email VARCHAR(255) UNIQUE
3041
- * )
3042
- * `);
3043
- *
3044
- * // Alter table structure
3045
- * await forgeSQL.executeDDL(sql`
3046
- * ALTER TABLE users
3047
- * ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
3048
- * `);
3049
- *
3050
- * // Drop a table
3051
- * await forgeSQL.executeDDL("DROP TABLE IF EXISTS old_users");
3052
- * ```
3053
- */
3054
- async executeDDL(query) {
3055
- return this.executeDDLActions(async () => this.drizzle.executeQuery(query));
3056
- }
3057
- /**
3058
- * Executes a series of actions within a DDL operation context.
3059
- * This method provides a way to execute regular SQL queries that should be treated
3060
- * as DDL operations, ensuring proper operation type context for performance monitoring.
3061
- *
3062
- * This method is useful for:
3063
- * - Executing regular SQL queries in DDL context for monitoring purposes
3064
- * - Wrapping non-DDL operations that should be treated as DDL for analysis
3065
- * - Ensuring proper operation type context for complex workflows
3066
- * - Maintaining DDL operation context across multiple function calls
3067
- *
3068
- * @template T - The return type of the actions function
3069
- * @param actions - Function containing SQL operations to execute in DDL context
3070
- * @returns Promise that resolves to the return value of the actions function
3071
- *
3072
- * @example
3073
- * ```typescript
3074
- * // Execute regular SQL queries in DDL context for monitoring
3075
- * await forgeSQL.executeDDLActions(async () => {
3076
- * const slowQueries = await forgeSQL.execute(`
3077
- * SELECT * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
3078
- * WHERE AVG_LATENCY > 1000000
3079
- * `);
3080
- * return slowQueries;
3081
- * });
3082
- *
3083
- * // Execute complex analysis queries in DDL context
3084
- * const result = await forgeSQL.executeDDLActions(async () => {
3085
- * const tableInfo = await forgeSQL.execute("SHOW TABLES");
3086
- * const performanceData = await forgeSQL.execute(`
3087
- * SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
3088
- * WHERE SUMMARY_END_TIME > DATE_SUB(NOW(), INTERVAL 1 HOUR)
3089
- * `);
3090
- * return { tableInfo, performanceData };
3091
- * });
3092
- *
3093
- * // Execute monitoring queries with error handling
3094
- * try {
3095
- * await forgeSQL.executeDDLActions(async () => {
3096
- * const metrics = await forgeSQL.execute(`
3097
- * SELECT COUNT(*) as query_count
3098
- * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
3099
- * `);
3100
- * console.log(`Total queries: ${metrics[0].query_count}`);
3101
- * });
3102
- * } catch (error) {
3103
- * console.error("Monitoring query failed:", error);
3104
- * }
3105
- * ```
3106
- */
3107
- async executeDDLActions(actions) {
3108
- return operationTypeQueryContext.run({ operationType: "DDL" }, async () => actions());
3109
- }
3110
- /**
3111
- * Executes a raw SQL query with both local and global cache support.
3112
- * This method provides comprehensive caching for raw SQL queries:
3113
- * - Local cache: Within the current invocation context
3114
- * - Global cache: Cross-invocation caching using @forge/kvs
3115
- *
3116
- * @param query - The SQL query to execute (SQLWrapper or string)
3117
- * @param cacheTtl - Optional cache TTL override (defaults to global cache TTL)
3118
- * @returns Promise with query results
3119
- * @example
3120
- * ```typescript
3121
- * // Using SQLWrapper with custom TTL
3122
- * const result = await forgeSQL.executeCacheable(sql`SELECT * FROM users WHERE id = ${userId}`, 300);
3123
- *
3124
- * // Using string with default TTL
3125
- * const result = await forgeSQL.executeCacheable("SELECT * FROM users WHERE status = 'active'");
3126
- * ```
3127
- */
3128
- executeCacheable(query, cacheTtl) {
3129
- return this.drizzle.executeQueryCacheable(query, cacheTtl);
3130
- }
3131
- /**
3132
- * Creates a Common Table Expression (CTE) builder for complex queries.
3133
- * CTEs allow you to define temporary named result sets that exist within the scope of a single query.
3134
- *
3135
- * @returns WithBuilder for creating CTEs
3136
- * @example
3137
- * ```typescript
3138
- * const withQuery = forgeSQL.$with('userStats').as(
3139
- * forgeSQL.select({ userId: users.id, count: sql<number>`count(*)` })
3140
- * .from(users)
3141
- * .groupBy(users.id)
3142
- * );
3143
- * ```
3144
- */
3145
- get $with() {
3146
- return this.drizzle.$with;
3147
- }
3148
- /**
3149
- * Creates a query builder that uses Common Table Expressions (CTEs).
3150
- * CTEs allow you to define temporary named result sets that exist within the scope of a single query.
3151
- *
3152
- * @param queries - Array of CTE queries created with $with()
3153
- * @returns Query builder with CTE support
3154
- * @example
3155
- * ```typescript
3156
- * const withQuery = forgeSQL.$with('userStats').as(
3157
- * forgeSQL.select({ userId: users.id, count: sql<number>`count(*)` })
3158
- * .from(users)
3159
- * .groupBy(users.id)
3160
- * );
3161
- *
3162
- * const result = await forgeSQL.with(withQuery)
3163
- * .select({ userId: withQuery.userId, count: withQuery.count })
3164
- * .from(withQuery);
3165
- * ```
3166
- */
3167
- with(...queries) {
3168
- return this.drizzle.with(...queries);
3169
- }
3170
- }
3171
- class ForgeSQLORM {
3172
- ormInstance;
3173
- constructor(options) {
3174
- this.ormInstance = ForgeSQLORMImpl.getInstance(options);
3175
- }
3176
- /**
3177
- * Executes a query and provides access to execution metadata with performance monitoring.
3178
- * This method allows you to capture detailed information about query execution
3179
- * including database execution time, response size, and query analysis capabilities.
3180
- *
3181
- * The method aggregates metrics across all database operations within the query function,
3182
- * making it ideal for monitoring resolver performance and detecting performance issues.
3183
- *
3184
- * @template T - The return type of the query
3185
- * @param query - A function that returns a Promise with the query result. Can contain multiple database operations.
3186
- * @param onMetadata - Callback function that receives aggregated execution metadata
3187
- * @param onMetadata.totalDbExecutionTime - Total database execution time across all operations in the query function (in milliseconds)
3188
- * @param onMetadata.totalResponseSize - Total response size across all operations (in bytes)
3189
- * @param onMetadata.printQueries - Function to analyze and print query execution plans from CLUSTER_STATEMENTS_SUMMARY
3190
- * @returns Promise with the query result
3191
- *
3192
- * @example
3193
- * ```typescript
3194
- * // Basic usage with performance monitoring
3195
- * const result = await forgeSQL.executeWithMetadata(
3196
- * async () => {
3197
- * const users = await forgeSQL.selectFrom(usersTable);
3198
- * const orders = await forgeSQL.selectFrom(ordersTable).where(eq(ordersTable.userId, usersTable.id));
3199
- * return { users, orders };
3200
- * },
3201
- * (totalDbExecutionTime, totalResponseSize, printQueries) => {
3202
- * const threshold = 500; // ms baseline for this resolver
3203
- *
3204
- * if (totalDbExecutionTime > threshold * 1.5) {
3205
- * console.warn(`[Performance Warning] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
3206
- * await printQueries(); // Analyze and print query execution plans
3207
- * } else if (totalDbExecutionTime > threshold) {
3208
- * console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
3209
- * }
3210
- *
3211
- * console.log(`DB response size: ${totalResponseSize} bytes`);
3212
- * }
3213
- * );
3214
- * ```
3215
- *
3216
- * @example
3217
- * ```typescript
3218
- * // Resolver with performance monitoring
3219
- * resolver.define("fetch", async (req: Request) => {
3220
- * try {
3221
- * return await forgeSQL.executeWithMetadata(
3222
- * async () => {
3223
- * // Resolver logic with multiple queries
3224
- * const users = await forgeSQL.selectFrom(demoUsers);
3225
- * const orders = await forgeSQL.selectFrom(demoOrders)
3226
- * .where(eq(demoOrders.userId, demoUsers.id));
3227
- * return { users, orders };
3228
- * },
3229
- * async (totalDbExecutionTime, totalResponseSize, printQueries) => {
3230
- * const threshold = 500; // ms baseline for this resolver
3231
- *
3232
- * if (totalDbExecutionTime > threshold * 1.5) {
3233
- * console.warn(`[Performance Warning fetch] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
3234
- * await printQueries(); // Optionally log or capture diagnostics for further analysis
3235
- * } else if (totalDbExecutionTime > threshold) {
3236
- * console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
3237
- * }
3238
- *
3239
- * console.log(`DB response size: ${totalResponseSize} bytes`);
3240
- * }
3241
- * );
3242
- * } catch (e) {
3243
- * const error = e?.cause?.debug?.sqlMessage ?? e?.cause;
3244
- * console.error(error, e);
3245
- * throw error;
3246
- * }
3247
- * });
3248
- * ```
3249
- *
3250
- * @note **Important**: When multiple resolvers are running concurrently, their query data may also appear in `printQueries()` analysis, as it queries the global `CLUSTER_STATEMENTS_SUMMARY` table.
3251
- */
3252
- async executeWithMetadata(query, onMetadata) {
3253
- return this.ormInstance.executeWithMetadata(query, onMetadata);
3254
- }
3255
- selectCacheable(fields, cacheTTL) {
3256
- return this.ormInstance.selectCacheable(fields, cacheTTL);
3257
- }
3258
- selectDistinctCacheable(fields, cacheTTL) {
3259
- return this.ormInstance.selectDistinctCacheable(fields, cacheTTL);
3260
- }
3261
- /**
3262
- * Creates a select query builder for all columns from a table with field aliasing support.
3263
- * This is a convenience method that automatically selects all columns from the specified table.
3264
- *
3265
- * @template T - The type of the table
3266
- * @param table - The table to select from
3267
- * @returns Select query builder with all table columns and field aliasing support
3268
- * @example
3269
- * ```typescript
3270
- * const users = await forgeSQL.selectFrom(userTable).where(eq(userTable.id, 1));
3271
- * ```
3272
- */
3273
- selectFrom(table2) {
3274
- return this.ormInstance.getDrizzleQueryBuilder().selectFrom(table2);
3275
- }
3276
- /**
3277
- * Creates a select distinct query builder for all columns from a table with field aliasing support.
3278
- * This is a convenience method that automatically selects all distinct columns from the specified table.
3279
- *
3280
- * @template T - The type of the table
3281
- * @param table - The table to select from
3282
- * @returns Select distinct query builder with all table columns and field aliasing support
3283
- * @example
3284
- * ```typescript
3285
- * const uniqueUsers = await forgeSQL.selectDistinctFrom(userTable).where(eq(userTable.status, 'active'));
3286
- * ```
3287
- */
3288
- selectDistinctFrom(table2) {
3289
- return this.ormInstance.getDrizzleQueryBuilder().selectDistinctFrom(table2);
3290
- }
3291
- /**
3292
- * Creates a cacheable select query builder for all columns from a table with field aliasing and caching support.
3293
- * This is a convenience method that automatically selects all columns from the specified table with caching enabled.
3294
- *
3295
- * @template T - The type of the table
3296
- * @param table - The table to select from
3297
- * @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
3298
- * @returns Select query builder with all table columns, field aliasing, and caching support
3299
- * @example
3300
- * ```typescript
3301
- * const users = await forgeSQL.selectCacheableFrom(userTable, 300).where(eq(userTable.id, 1));
3302
- * ```
3303
- */
3304
- selectCacheableFrom(table2, cacheTTL) {
3305
- return this.ormInstance.getDrizzleQueryBuilder().selectFromCacheable(table2, cacheTTL);
3306
- }
3307
- /**
3308
- * Creates a cacheable select distinct query builder for all columns from a table with field aliasing and caching support.
3309
- * This is a convenience method that automatically selects all distinct columns from the specified table with caching enabled.
3310
- *
3311
- * @template T - The type of the table
3312
- * @param table - The table to select from
3313
- * @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
3314
- * @returns Select distinct query builder with all table columns, field aliasing, and caching support
3315
- * @example
3316
- * ```typescript
3317
- * const uniqueUsers = await forgeSQL.selectDistinctCacheableFrom(userTable, 300).where(eq(userTable.status, 'active'));
3318
- * ```
3319
- */
3320
- selectDistinctCacheableFrom(table2, cacheTTL) {
3321
- return this.ormInstance.getDrizzleQueryBuilder().selectDistinctFromCacheable(table2, cacheTTL);
3322
- }
3323
- executeWithCacheContext(cacheContext) {
3324
- return this.ormInstance.executeWithCacheContext(cacheContext);
3325
- }
3326
- executeWithCacheContextAndReturnValue(cacheContext) {
3327
- return this.ormInstance.executeWithCacheContextAndReturnValue(cacheContext);
3328
- }
3329
- /**
3330
- * Executes operations within a local cache context.
3331
- * This provides in-memory caching for select queries within a single request scope.
3332
- *
3333
- * @param cacheContext - Function containing operations that will benefit from local caching
3334
- * @returns Promise that resolves when all operations are complete
3335
- */
3336
- executeWithLocalContext(cacheContext) {
3337
- return this.ormInstance.executeWithLocalContext(cacheContext);
3338
- }
3339
- /**
3340
- * Executes operations within a local cache context and returns a value.
3341
- * This provides in-memory caching for select queries within a single request scope.
3342
- *
3343
- * @param cacheContext - Function containing operations that will benefit from local caching
3344
- * @returns Promise that resolves to the return value of the cacheContext function
3345
- */
3346
- executeWithLocalCacheContextAndReturnValue(cacheContext) {
3347
- return this.ormInstance.executeWithLocalCacheContextAndReturnValue(cacheContext);
3348
- }
3349
- /**
3350
- * Creates an insert query builder.
3351
- *
3352
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3353
- * For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
3354
- *
3355
- * @param table - The table to insert into
3356
- * @returns Insert query builder (no versioning, no cache management)
3357
- */
3358
- insert(table2) {
3359
- return this.ormInstance.insert(table2);
3360
- }
3361
- /**
3362
- * Creates an insert query builder that automatically evicts cache after execution.
3363
- *
3364
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3365
- * For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
3366
- *
3367
- * @param table - The table to insert into
3368
- * @returns Insert query builder with automatic cache eviction (no versioning)
3369
- */
3370
- insertAndEvictCache(table2) {
3371
- return this.ormInstance.insertAndEvictCache(table2);
3372
- }
3373
- /**
3374
- * Creates an update query builder.
3375
- *
3376
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3377
- * For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
3378
- *
3379
- * @param table - The table to update
3380
- * @returns Update query builder (no versioning, no cache management)
3381
- */
3382
- update(table2) {
3383
- return this.ormInstance.update(table2);
3384
- }
3385
- /**
3386
- * Creates an update query builder that automatically evicts cache after execution.
3387
- *
3388
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3389
- * For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
3390
- *
3391
- * @param table - The table to update
3392
- * @returns Update query builder with automatic cache eviction (no versioning)
3393
- */
3394
- updateAndEvictCache(table2) {
3395
- return this.ormInstance.updateAndEvictCache(table2);
3396
- }
3397
- /**
3398
- * Creates a delete query builder.
3399
- *
3400
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3401
- * For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
3402
- *
3403
- * @param table - The table to delete from
3404
- * @returns Delete query builder (no versioning, no cache management)
3405
- */
3406
- delete(table2) {
3407
- return this.ormInstance.delete(table2);
3408
- }
3409
- /**
3410
- * Creates a delete query builder that automatically evicts cache after execution.
3411
- *
3412
- * ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
3413
- * For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
3414
- *
3415
- * @param table - The table to delete from
3416
- * @returns Delete query builder with automatic cache eviction (no versioning)
3417
- */
3418
- deleteAndEvictCache(table2) {
3419
- return this.ormInstance.deleteAndEvictCache(table2);
3420
- }
3421
- /**
3422
- * Creates a select query with unique field aliases to prevent field name collisions in joins.
3423
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
3424
- *
3425
- * @template TSelection - The type of the selected fields
3426
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
3427
- * @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A select query builder with unique field aliases
3428
- * @throws {Error} If fields parameter is empty
3429
- * @example
3430
- * ```typescript
3431
- * await forgeSQL
3432
- * .select({user: users, order: orders})
3433
- * .from(orders)
3434
- * .innerJoin(users, eq(orders.userId, users.id));
3435
- * ```
3436
- */
3437
- select(fields) {
3438
- return this.ormInstance.select(fields);
3439
- }
3440
- /**
3441
- * Creates a distinct select query with unique field aliases to prevent field name collisions in joins.
3442
- * This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
3443
- *
3444
- * @template TSelection - The type of the selected fields
3445
- * @param {TSelection} fields - Object containing the fields to select, with table schemas as values
3446
- * @returns {MySqlSelectBuilder<TSelection, MySqlRemotePreparedQueryHKT>} A distinct select query builder with unique field aliases
3447
- * @throws {Error} If fields parameter is empty
3448
- * @example
3449
- * ```typescript
3450
- * await forgeSQL
3451
- * .selectDistinct({user: users, order: orders})
3452
- * .from(orders)
3453
- * .innerJoin(users, eq(orders.userId, users.id));
3454
- * ```
3455
- */
3456
- selectDistinct(fields) {
3457
- return this.ormInstance.selectDistinct(fields);
3458
- }
3459
- /**
3460
- * Proxies the `modify` method from `ForgeSQLORMImpl`.
3461
- * @returns Modify operations.
3462
- */
3463
- modifyWithVersioning() {
3464
- return this.ormInstance.modifyWithVersioning();
3465
- }
3466
- /**
3467
- * Proxies the `fetch` method from `ForgeSQLORMImpl`.
3468
- * @returns Fetch operations.
3469
- */
3470
- fetch() {
3471
- return this.ormInstance.fetch();
3472
- }
3473
- /**
3474
- * Provides query analysis capabilities including EXPLAIN ANALYZE and slow query analysis.
3475
- * @returns {SchemaAnalyzeForgeSql} Interface for analyzing query performance
3476
- */
3477
- analyze() {
3478
- return this.ormInstance.analyze();
3479
- }
3480
- /**
3481
- * Provides schema-level SQL cacheable operations with type safety.
3482
- * @returns {ForgeSQLCacheOperations} Interface for executing schema-bound SQL queries
3483
- */
3484
- modifyWithVersioningAndEvictCache() {
3485
- return this.ormInstance.modifyWithVersioningAndEvictCache();
3486
- }
3487
- /**
3488
- * Returns a Drizzle query builder instance.
3489
- *
3490
- * @returns A Drizzle query builder instance for query construction only.
3491
- */
3492
- getDrizzleQueryBuilder() {
3493
- return this.ormInstance.getDrizzleQueryBuilder();
3494
- }
3495
- /**
3496
- * Executes a raw SQL query with local cache support.
3497
- * This method provides local caching for raw SQL queries within the current invocation context.
3498
- * Results are cached locally and will be returned from cache on subsequent identical queries.
3499
- *
3500
- * @param query - The SQL query to execute (SQLWrapper or string)
3501
- * @returns Promise with query results
3502
- * @example
3503
- * ```typescript
3504
- * // Using SQLWrapper
3505
- * const result = await forgeSQL.execute(sql`SELECT * FROM users WHERE id = ${userId}`);
3506
- *
3507
- * // Using string
3508
- * const result = await forgeSQL.execute("SELECT * FROM users WHERE status = 'active'");
3509
- * ```
3510
- */
3511
- execute(query) {
3512
- return this.ormInstance.execute(query);
3513
- }
3514
- /**
3515
- * Executes a Data Definition Language (DDL) SQL query.
3516
- * DDL operations include CREATE, ALTER, DROP, TRUNCATE, and other schema modification statements.
3517
- *
3518
- * This method is specifically designed for DDL operations and provides:
3519
- * - Proper operation type context for DDL queries
3520
- * - No caching (DDL operations should not be cached)
3521
- * - Direct execution without query optimization
3522
- *
3523
- * @template T - The expected return type of the query result
3524
- * @param query - The DDL SQL query to execute (SQLWrapper or string)
3525
- * @returns Promise with query results
3526
- * @throws {Error} If the DDL operation fails
3527
- *
3528
- * @example
3529
- * ```typescript
3530
- * // Create a new table
3531
- * await forgeSQL.executeDDL(`
3532
- * CREATE TABLE users (
3533
- * id INT PRIMARY KEY AUTO_INCREMENT,
3534
- * name VARCHAR(255) NOT NULL,
3535
- * email VARCHAR(255) UNIQUE
3536
- * )
3537
- * `);
3538
- *
3539
- * // Alter table structure
3540
- * await forgeSQL.executeDDL(sql`
3541
- * ALTER TABLE users
3542
- * ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
3543
- * `);
3544
- *
3545
- * // Drop a table
3546
- * await forgeSQL.executeDDL("DROP TABLE IF EXISTS old_users");
3547
- * ```
3548
- */
3549
- executeDDL(query) {
3550
- return this.ormInstance.executeDDL(query);
3551
- }
3552
- /**
3553
- * Executes a series of actions within a DDL operation context.
3554
- * This method provides a way to execute regular SQL queries that should be treated
3555
- * as DDL operations, ensuring proper operation type context for performance monitoring.
3556
- *
3557
- * This method is useful for:
3558
- * - Executing regular SQL queries in DDL context for monitoring purposes
3559
- * - Wrapping non-DDL operations that should be treated as DDL for analysis
3560
- * - Ensuring proper operation type context for complex workflows
3561
- * - Maintaining DDL operation context across multiple function calls
3562
- *
3563
- * @template T - The return type of the actions function
3564
- * @param actions - Function containing SQL operations to execute in DDL context
3565
- * @returns Promise that resolves to the return value of the actions function
3566
- *
3567
- * @example
3568
- * ```typescript
3569
- * // Execute regular SQL queries in DDL context for monitoring
3570
- * await forgeSQL.executeDDLActions(async () => {
3571
- * const slowQueries = await forgeSQL.execute(`
3572
- * SELECT * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
3573
- * WHERE AVG_LATENCY > 1000000
3574
- * `);
3575
- * return slowQueries;
3576
- * });
3577
- *
3578
- * // Execute complex analysis queries in DDL context
3579
- * const result = await forgeSQL.executeDDLActions(async () => {
3580
- * const tableInfo = await forgeSQL.execute("SHOW TABLES");
3581
- * const performanceData = await forgeSQL.execute(`
3582
- * SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
3583
- * WHERE SUMMARY_END_TIME > DATE_SUB(NOW(), INTERVAL 1 HOUR)
3584
- * `);
3585
- * return { tableInfo, performanceData };
3586
- * });
3587
- *
3588
- * // Execute monitoring queries with error handling
3589
- * try {
3590
- * await forgeSQL.executeDDLActions(async () => {
3591
- * const metrics = await forgeSQL.execute(`
3592
- * SELECT COUNT(*) as query_count
3593
- * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
3594
- * `);
3595
- * console.log(`Total queries: ${metrics[0].query_count}`);
3596
- * });
3597
- * } catch (error) {
3598
- * console.error("Monitoring query failed:", error);
3599
- * }
3600
- * ```
3601
- */
3602
- executeDDLActions(actions) {
3603
- return this.ormInstance.executeDDLActions(actions);
3604
- }
3605
- /**
3606
- * Executes a raw SQL query with both local and global cache support.
3607
- * This method provides comprehensive caching for raw SQL queries:
3608
- * - Local cache: Within the current invocation context
3609
- * - Global cache: Cross-invocation caching using @forge/kvs
3610
- *
3611
- * @param query - The SQL query to execute (SQLWrapper or string)
3612
- * @param cacheTtl - Optional cache TTL override (defaults to global cache TTL)
3613
- * @returns Promise with query results
3614
- * @example
3615
- * ```typescript
3616
- * // Using SQLWrapper with custom TTL
3617
- * const result = await forgeSQL.executeCacheable(sql`SELECT * FROM users WHERE id = ${userId}`, 300);
3618
- *
3619
- * // Using string with default TTL
3620
- * const result = await forgeSQL.executeCacheable("SELECT * FROM users WHERE status = 'active'");
3621
- * ```
3622
- */
3623
- executeCacheable(query, cacheTtl) {
3624
- return this.ormInstance.executeCacheable(query, cacheTtl);
3625
- }
3626
- /**
3627
- * Creates a Common Table Expression (CTE) builder for complex queries.
3628
- * CTEs allow you to define temporary named result sets that exist within the scope of a single query.
3629
- *
3630
- * @returns WithBuilder for creating CTEs
3631
- * @example
3632
- * ```typescript
3633
- * const withQuery = forgeSQL.$with('userStats').as(
3634
- * forgeSQL.getDrizzleQueryBuilder().select({ userId: users.id, count: sql<number>`count(*)` })
3635
- * .from(users)
3636
- * .groupBy(users.id)
3637
- * );
3638
- * ```
3639
- */
3640
- get $with() {
3641
- return this.ormInstance.getDrizzleQueryBuilder().$with;
3642
- }
3643
- /**
3644
- * Creates a query builder that uses Common Table Expressions (CTEs).
3645
- * CTEs allow you to define temporary named result sets that exist within the scope of a single query.
3646
- *
3647
- * @param queries - Array of CTE queries created with $with()
3648
- * @returns Query builder with CTE support
3649
- * @example
3650
- * ```typescript
3651
- * const withQuery = forgeSQL.$with('userStats').as(
3652
- * forgeSQL.getDrizzleQueryBuilder().select({ userId: users.id, count: sql<number>`count(*)` })
3653
- * .from(users)
3654
- * .groupBy(users.id)
3655
- * );
3656
- *
3657
- * const result = await forgeSQL.with(withQuery)
3658
- * .select({ userId: withQuery.userId, count: withQuery.count })
3659
- * .from(withQuery);
3660
- * ```
3661
- */
3662
- with(...queries) {
3663
- return this.ormInstance.getDrizzleQueryBuilder().with(...queries);
3664
- }
3665
- }
3666
- const forgeDateTimeString = mysqlCore.customType({
3667
- dataType() {
3668
- return "datetime";
3669
- },
3670
- toDriver(value) {
3671
- return formatDateTime(value, "yyyy-MM-dd' 'HH:mm:ss.SSS", false);
3672
- },
3673
- fromDriver(value) {
3674
- const format = "yyyy-MM-dd' 'HH:mm:ss.SSS";
3675
- return parseDateTime(value, format);
3676
- }
3677
- });
3678
- const forgeTimestampString = mysqlCore.customType({
3679
- dataType() {
3680
- return "timestamp";
3681
- },
3682
- toDriver(value) {
3683
- return formatDateTime(value, "yyyy-MM-dd' 'HH:mm:ss.SSS", true);
3684
- },
3685
- fromDriver(value) {
3686
- const format = "yyyy-MM-dd' 'HH:mm:ss.SSS";
3687
- return parseDateTime(value, format);
3688
- }
3689
- });
3690
- const forgeDateString = mysqlCore.customType({
3691
- dataType() {
3692
- return "date";
3693
- },
3694
- toDriver(value) {
3695
- return formatDateTime(value, "yyyy-MM-dd", false);
3696
- },
3697
- fromDriver(value) {
3698
- const format = "yyyy-MM-dd";
3699
- return parseDateTime(value, format);
3700
- }
3701
- });
3702
- const forgeTimeString = mysqlCore.customType({
3703
- dataType() {
3704
- return "time";
3705
- },
3706
- toDriver(value) {
3707
- return formatDateTime(value, "HH:mm:ss.SSS", false);
3708
- },
3709
- fromDriver(value) {
3710
- return parseDateTime(value, "HH:mm:ss.SSS");
3711
- }
3712
- });
3713
- async function dropSchemaMigrations() {
3714
- try {
3715
- const tables = await getTables();
3716
- const dropStatements = generateDropTableStatements(tables, { sequence: true, table: true });
3717
- for (const statement of dropStatements) {
3718
- console.debug(`execute DDL: ${statement}`);
3719
- await sql.sql.executeDDL(statement);
3720
- }
3721
- return getHttpResponse(
3722
- 200,
3723
- "⚠️ All data in these tables has been permanently deleted. This operation cannot be undone."
3724
- );
3725
- } catch (error) {
3726
- const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
3727
- console.error(errorMessage);
3728
- return getHttpResponse(500, errorMessage);
3729
- }
3730
- }
3731
- const applySchemaMigrations = async (migration) => {
3732
- try {
3733
- if (typeof migration !== "function") {
3734
- throw new Error("migration is not a function");
3735
- }
3736
- console.debug("Provisioning the database");
3737
- await sql.sql._provision();
3738
- console.debug("Running schema migrations");
3739
- const migrations2 = await migration(sql.migrationRunner);
3740
- const successfulMigrations = await migrations2.run();
3741
- console.debug("Migrations applied:", successfulMigrations);
3742
- const migrationList = await sql.migrationRunner.list();
3743
- let migrationHistory = "No migrations found";
3744
- if (Array.isArray(migrationList) && migrationList.length > 0) {
3745
- const sortedMigrations = migrationList.toSorted(
3746
- (a, b) => a.migratedAt.getTime() - b.migratedAt.getTime()
3747
- );
3748
- migrationHistory = sortedMigrations.map((y) => `${y.id}, ${y.name}, ${y.migratedAt.toUTCString()}`).join("\n");
3749
- }
3750
- console.debug("Migrations history:\nid, name, migrated_at\n", migrationHistory);
3751
- return {
3752
- headers: { "Content-Type": ["application/json"] },
3753
- statusCode: 200,
3754
- statusText: "OK",
3755
- body: "Migrations successfully executed"
3756
- };
3757
- } catch (error) {
3758
- const errorMessage = error?.cause?.context?.debug?.sqlMessage ?? error?.cause?.context?.debug?.message ?? error?.debug?.context?.sqlMessage ?? error?.debug?.context?.message ?? error.message ?? "Unknown error occurred";
3759
- console.error("Error during migration:", errorMessage);
3760
- return {
3761
- headers: { "Content-Type": ["application/json"] },
3762
- statusCode: 500,
3763
- statusText: "Internal Server Error",
3764
- body: error instanceof Error ? errorMessage : "Unknown error during migration"
3765
- };
3766
- }
3767
- };
3768
- async function fetchSchemaWebTrigger() {
3769
- try {
3770
- const tables = await getTables();
3771
- const createTableStatements = await generateCreateTableStatements(tables);
3772
- const sqlStatements = wrapWithForeignKeyChecks(createTableStatements);
3773
- return getHttpResponse(200, sqlStatements.join(";\n"));
3774
- } catch (error) {
3775
- const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
3776
- console.error(errorMessage);
3777
- return getHttpResponse(500, errorMessage);
3778
- }
3779
- }
3780
- async function generateCreateTableStatements(tables) {
3781
- const statements = [];
3782
- for (const table2 of tables) {
3783
- const createTableResult = await sql.sql.executeDDL(`SHOW CREATE TABLE "${table2}"`);
3784
- const createTableStatements = createTableResult.rows.filter((row) => !isSystemTable(row.Table)).map((row) => formatCreateTableStatement(row["Create Table"]));
3785
- statements.push(...createTableStatements);
3786
- }
3787
- return statements;
3788
- }
3789
- function isSystemTable(tableName) {
3790
- return forgeSystemTables.some((st) => table.getTableName(st) === tableName);
3791
- }
3792
- function formatCreateTableStatement(statement) {
3793
- return statement.replace(/"/g, "").replace("CREATE TABLE", "CREATE TABLE IF NOT EXISTS");
3794
- }
3795
- function wrapWithForeignKeyChecks(statements) {
3796
- return ["SET foreign_key_checks = 0", ...statements, "SET foreign_key_checks = 1"];
3797
- }
3798
- async function dropTableSchemaMigrations() {
3799
- try {
3800
- const tables = await getTables();
3801
- const dropStatements = generateDropTableStatements(tables, { sequence: false, table: true });
3802
- for (const statement of dropStatements) {
3803
- console.debug(`execute DDL: ${statement}`);
3804
- await sql.sql.executeDDL(statement);
3805
- }
3806
- return getHttpResponse(
3807
- 200,
3808
- "⚠️ All data in these tables has been permanently deleted. This operation cannot be undone."
3809
- );
3810
- } catch (error) {
3811
- const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
3812
- console.error(errorMessage);
3813
- return getHttpResponse(500, errorMessage);
3814
- }
3815
- }
3816
- const clearCacheSchedulerTrigger = async (options) => {
3817
- try {
3818
- const newOptions = options ?? {
3819
- logRawSqlQuery: false,
3820
- disableOptimisticLocking: false,
3821
- cacheTTL: 120,
3822
- cacheEntityName: "cache",
3823
- cacheEntityQueryName: "sql",
3824
- cacheEntityExpirationName: "expiration",
3825
- cacheEntityDataName: "data"
3826
- };
3827
- if (!newOptions.cacheEntityName) {
3828
- throw new Error("cacheEntityName is not configured");
3829
- }
3830
- await clearExpiredCache(newOptions);
3831
- return {
3832
- headers: { "Content-Type": ["application/json"] },
3833
- statusCode: 200,
3834
- statusText: "OK",
3835
- body: JSON.stringify({
3836
- success: true,
3837
- message: "Cache cleanup completed successfully",
3838
- timestamp: (/* @__PURE__ */ new Date()).toISOString()
3839
- })
3840
- };
3841
- } catch (error) {
3842
- console.error("Error during cache cleanup: ", JSON.stringify(error));
3843
- return {
3844
- headers: { "Content-Type": ["application/json"] },
3845
- statusCode: 500,
3846
- statusText: "Internal Server Error",
3847
- body: JSON.stringify({
3848
- success: false,
3849
- error: error instanceof Error ? error.message : "Unknown error during cache cleanup",
3850
- timestamp: (/* @__PURE__ */ new Date()).toISOString()
3851
- })
3852
- };
3853
- }
3854
- };
3855
- async function slowQuerySchedulerTrigger(forgeSQLORM, options) {
3856
- try {
3857
- return getHttpResponse(
3858
- 200,
3859
- JSON.stringify(
3860
- await slowQueryPerHours(forgeSQLORM, options?.hours ?? 1, options?.timeout ?? 3e3)
3861
- )
3862
- );
3863
- } catch (error) {
3864
- const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
3865
- console.error(errorMessage);
3866
- return getHttpResponse(500, errorMessage);
3867
- }
3868
- }
3869
- const getHttpResponse = (statusCode, body) => {
3870
- let statusText = "";
3871
- if (statusCode === 200) {
3872
- statusText = "Ok";
3873
- } else {
3874
- statusText = "Bad Request";
3875
- }
3876
- return {
3877
- headers: { "Content-Type": ["application/json"] },
3878
- statusCode,
3879
- statusText,
3880
- body
3881
- };
3882
- };
3883
- exports.ForgeSQLCrudOperations = ForgeSQLCrudOperations;
3884
- exports.ForgeSQLSelectOperations = ForgeSQLSelectOperations;
3885
- exports.applyFromDriverTransform = applyFromDriverTransform;
3886
- exports.applySchemaMigrations = applySchemaMigrations;
3887
- exports.clearCacheSchedulerTrigger = clearCacheSchedulerTrigger;
3888
- exports.clusterStatementsSummary = clusterStatementsSummary;
3889
- exports.clusterStatementsSummaryHistory = clusterStatementsSummaryHistory;
3890
- exports.default = ForgeSQLORM;
3891
- exports.dropSchemaMigrations = dropSchemaMigrations;
3892
- exports.dropTableSchemaMigrations = dropTableSchemaMigrations;
3893
- exports.fetchSchemaWebTrigger = fetchSchemaWebTrigger;
3894
- exports.forgeDateString = forgeDateString;
3895
- exports.forgeDateTimeString = forgeDateTimeString;
3896
- exports.forgeDriver = forgeDriver;
3897
- exports.forgeSystemTables = forgeSystemTables;
3898
- exports.forgeTimeString = forgeTimeString;
3899
- exports.forgeTimestampString = forgeTimestampString;
3900
- exports.formatDateTime = formatDateTime;
3901
- exports.formatLimitOffset = formatLimitOffset;
3902
- exports.generateDropTableStatements = generateDropTableStatements;
3903
- exports.getHttpResponse = getHttpResponse;
3904
- exports.getPrimaryKeys = getPrimaryKeys;
3905
- exports.getTableMetadata = getTableMetadata;
3906
- exports.getTables = getTables;
3907
- exports.isUpdateQueryResponse = isUpdateQueryResponse;
3908
- exports.mapSelectAllFieldsToAlias = mapSelectAllFieldsToAlias;
3909
- exports.mapSelectFieldsWithAlias = mapSelectFieldsWithAlias;
3910
- exports.migrations = migrations;
3911
- exports.nextVal = nextVal;
3912
- exports.parseDateTime = parseDateTime;
3913
- exports.patchDbWithSelectAliased = patchDbWithSelectAliased;
3914
- exports.printQueriesWithPlan = printQueriesWithPlan;
3915
- exports.slowQuery = slowQuery;
3916
- exports.slowQueryPerHours = slowQueryPerHours;
3917
- exports.slowQuerySchedulerTrigger = slowQuerySchedulerTrigger;
3918
- exports.statementsSummary = statementsSummary;
3919
- exports.statementsSummaryHistory = statementsSummaryHistory;
3920
- exports.withTidbHint = withTidbHint;
3921
- exports.withTimeout = withTimeout;
3922
- //# sourceMappingURL=ForgeSQLORM.js.map