forge-sql-orm 2.1.12 → 2.1.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +662 -548
- package/dist/core/ForgeSQLAnalyseOperations.d.ts.map +1 -1
- package/dist/core/ForgeSQLAnalyseOperations.js +257 -0
- package/dist/core/ForgeSQLAnalyseOperations.js.map +1 -0
- package/dist/core/ForgeSQLCacheOperations.js +172 -0
- package/dist/core/ForgeSQLCacheOperations.js.map +1 -0
- package/dist/core/ForgeSQLCrudOperations.js +349 -0
- package/dist/core/ForgeSQLCrudOperations.js.map +1 -0
- package/dist/core/ForgeSQLORM.js +1191 -0
- package/dist/core/ForgeSQLORM.js.map +1 -0
- package/dist/core/ForgeSQLQueryBuilder.js +77 -0
- package/dist/core/ForgeSQLQueryBuilder.js.map +1 -0
- package/dist/core/ForgeSQLSelectOperations.js +81 -0
- package/dist/core/ForgeSQLSelectOperations.js.map +1 -0
- package/dist/core/SystemTables.js +258 -0
- package/dist/core/SystemTables.js.map +1 -0
- package/dist/index.js +30 -0
- package/dist/index.js.map +1 -0
- package/dist/lib/drizzle/extensions/additionalActions.d.ts.map +1 -1
- package/dist/lib/drizzle/extensions/additionalActions.js +527 -0
- package/dist/lib/drizzle/extensions/additionalActions.js.map +1 -0
- package/dist/utils/cacheContextUtils.d.ts.map +1 -1
- package/dist/utils/cacheContextUtils.js +198 -0
- package/dist/utils/cacheContextUtils.js.map +1 -0
- package/dist/utils/cacheUtils.d.ts.map +1 -1
- package/dist/utils/cacheUtils.js +383 -0
- package/dist/utils/cacheUtils.js.map +1 -0
- package/dist/utils/forgeDriver.d.ts.map +1 -1
- package/dist/utils/forgeDriver.js +139 -0
- package/dist/utils/forgeDriver.js.map +1 -0
- package/dist/utils/forgeDriverProxy.js +68 -0
- package/dist/utils/forgeDriverProxy.js.map +1 -0
- package/dist/utils/metadataContextUtils.js +28 -0
- package/dist/utils/metadataContextUtils.js.map +1 -0
- package/dist/utils/requestTypeContextUtils.js +10 -0
- package/dist/utils/requestTypeContextUtils.js.map +1 -0
- package/dist/utils/sqlHints.js +52 -0
- package/dist/utils/sqlHints.js.map +1 -0
- package/dist/utils/sqlUtils.d.ts.map +1 -1
- package/dist/utils/sqlUtils.js +590 -0
- package/dist/utils/sqlUtils.js.map +1 -0
- package/dist/webtriggers/applyMigrationsWebTrigger.js +77 -0
- package/dist/webtriggers/applyMigrationsWebTrigger.js.map +1 -0
- package/dist/webtriggers/clearCacheSchedulerTrigger.js +83 -0
- package/dist/webtriggers/clearCacheSchedulerTrigger.js.map +1 -0
- package/dist/webtriggers/dropMigrationWebTrigger.js +54 -0
- package/dist/webtriggers/dropMigrationWebTrigger.js.map +1 -0
- package/dist/webtriggers/dropTablesMigrationWebTrigger.js +54 -0
- package/dist/webtriggers/dropTablesMigrationWebTrigger.js.map +1 -0
- package/dist/webtriggers/fetchSchemaWebTrigger.js +82 -0
- package/dist/webtriggers/fetchSchemaWebTrigger.js.map +1 -0
- package/dist/webtriggers/index.js +40 -0
- package/dist/webtriggers/index.js.map +1 -0
- package/dist/webtriggers/slowQuerySchedulerTrigger.js +80 -0
- package/dist/webtriggers/slowQuerySchedulerTrigger.js.map +1 -0
- package/package.json +28 -23
- package/src/core/ForgeSQLAnalyseOperations.ts +3 -2
- package/src/lib/drizzle/extensions/additionalActions.ts +11 -0
- package/src/utils/cacheContextUtils.ts +9 -6
- package/src/utils/cacheUtils.ts +6 -4
- package/src/utils/forgeDriver.ts +3 -7
- package/src/utils/sqlUtils.ts +33 -34
- package/dist/ForgeSQLORM.js +0 -3922
- package/dist/ForgeSQLORM.js.map +0 -1
- package/dist/ForgeSQLORM.mjs +0 -3905
- package/dist/ForgeSQLORM.mjs.map +0 -1
package/dist/ForgeSQLORM.mjs
DELETED
|
@@ -1,3905 +0,0 @@
|
|
|
1
|
-
import { isTable, sql as sql$1, and, isNotNull, not, ilike, notInArray, gte, ne, eq, getTableColumns } from "drizzle-orm";
|
|
2
|
-
import { DateTime } from "luxon";
|
|
3
|
-
import { isSQLWrapper } from "drizzle-orm/sql/sql";
|
|
4
|
-
import { mysqlTable, timestamp, varchar, bigint, mysqlSchema, longtext, double, boolean, text, int, customType } from "drizzle-orm/mysql-core";
|
|
5
|
-
import { sql, migrationRunner } from "@forge/sql";
|
|
6
|
-
import { AsyncLocalStorage } from "node:async_hooks";
|
|
7
|
-
import { getTableName } from "drizzle-orm/table";
|
|
8
|
-
import * as crypto from "crypto";
|
|
9
|
-
import { kvs, WhereConditions, Filter, FilterConditions } from "@forge/kvs";
|
|
10
|
-
import { drizzle } from "drizzle-orm/mysql-proxy";
|
|
11
|
-
const migrations = mysqlTable("__migrations", {
|
|
12
|
-
id: bigint("id", { mode: "number" }).primaryKey().autoincrement(),
|
|
13
|
-
name: varchar("name", { length: 255 }).notNull(),
|
|
14
|
-
migratedAt: timestamp("migratedAt").defaultNow().notNull()
|
|
15
|
-
});
|
|
16
|
-
const informationSchema = mysqlSchema("information_schema");
|
|
17
|
-
const slowQuery = informationSchema.table("CLUSTER_SLOW_QUERY", {
|
|
18
|
-
time: timestamp("Time", { fsp: 6, mode: "string" }).notNull(),
|
|
19
|
-
// Timestamp when the slow query was recorded
|
|
20
|
-
txnStartTs: bigint("Txn_start_ts", { mode: "bigint", unsigned: true }),
|
|
21
|
-
// Transaction start timestamp (TSO)
|
|
22
|
-
user: varchar("User", { length: 64 }),
|
|
23
|
-
// User executing the query
|
|
24
|
-
host: varchar("Host", { length: 64 }),
|
|
25
|
-
// Host from which the query originated
|
|
26
|
-
connId: bigint("Conn_ID", { mode: "bigint", unsigned: true }),
|
|
27
|
-
// Connection ID
|
|
28
|
-
sessionAlias: varchar("Session_alias", { length: 64 }),
|
|
29
|
-
// Session alias
|
|
30
|
-
execRetryCount: bigint("Exec_retry_count", { mode: "bigint", unsigned: true }),
|
|
31
|
-
// Number of retries during execution
|
|
32
|
-
execRetryTime: double("Exec_retry_time"),
|
|
33
|
-
// Time spent in retries
|
|
34
|
-
queryTime: double("Query_time"),
|
|
35
|
-
// Total execution time
|
|
36
|
-
parseTime: double("Parse_time"),
|
|
37
|
-
// Time spent parsing SQL
|
|
38
|
-
compileTime: double("Compile_time"),
|
|
39
|
-
// Time spent compiling query plan
|
|
40
|
-
rewriteTime: double("Rewrite_time"),
|
|
41
|
-
// Time spent rewriting query
|
|
42
|
-
preprocSubqueries: bigint("Preproc_subqueries", { mode: "bigint", unsigned: true }),
|
|
43
|
-
// Number of subqueries preprocessed
|
|
44
|
-
preprocSubqueriesTime: double("Preproc_subqueries_time"),
|
|
45
|
-
// Time spent preprocessing subqueries
|
|
46
|
-
optimizeTime: double("Optimize_time"),
|
|
47
|
-
// Time spent in optimizer
|
|
48
|
-
waitTs: double("Wait_TS"),
|
|
49
|
-
// Wait time for getting TSO
|
|
50
|
-
prewriteTime: double("Prewrite_time"),
|
|
51
|
-
// Time spent in prewrite phase
|
|
52
|
-
waitPrewriteBinlogTime: double("Wait_prewrite_binlog_time"),
|
|
53
|
-
// Time waiting for binlog prewrite
|
|
54
|
-
commitTime: double("Commit_time"),
|
|
55
|
-
// Commit duration
|
|
56
|
-
getCommitTsTime: double("Get_commit_ts_time"),
|
|
57
|
-
// Time waiting for commit TSO
|
|
58
|
-
commitBackoffTime: double("Commit_backoff_time"),
|
|
59
|
-
// Backoff time during commit
|
|
60
|
-
backoffTypes: varchar("Backoff_types", { length: 64 }),
|
|
61
|
-
// Types of backoff occurred
|
|
62
|
-
resolveLockTime: double("Resolve_lock_time"),
|
|
63
|
-
// Time resolving locks
|
|
64
|
-
localLatchWaitTime: double("Local_latch_wait_time"),
|
|
65
|
-
// Time waiting on local latch
|
|
66
|
-
writeKeys: bigint("Write_keys", { mode: "bigint" }),
|
|
67
|
-
// Number of keys written
|
|
68
|
-
writeSize: bigint("Write_size", { mode: "bigint" }),
|
|
69
|
-
// Amount of data written
|
|
70
|
-
prewriteRegion: bigint("Prewrite_region", { mode: "bigint" }),
|
|
71
|
-
// Regions involved in prewrite
|
|
72
|
-
txnRetry: bigint("Txn_retry", { mode: "bigint" }),
|
|
73
|
-
// Transaction retry count
|
|
74
|
-
copTime: double("Cop_time"),
|
|
75
|
-
// Time spent in coprocessor
|
|
76
|
-
processTime: double("Process_time"),
|
|
77
|
-
// Processing time
|
|
78
|
-
waitTime: double("Wait_time"),
|
|
79
|
-
// Wait time in TiKV
|
|
80
|
-
backoffTime: double("Backoff_time"),
|
|
81
|
-
// Backoff wait time
|
|
82
|
-
lockKeysTime: double("LockKeys_time"),
|
|
83
|
-
// Time spent waiting for locks
|
|
84
|
-
requestCount: bigint("Request_count", { mode: "bigint", unsigned: true }),
|
|
85
|
-
// Total number of requests
|
|
86
|
-
totalKeys: bigint("Total_keys", { mode: "bigint", unsigned: true }),
|
|
87
|
-
// Total keys scanned
|
|
88
|
-
processKeys: bigint("Process_keys", { mode: "bigint", unsigned: true }),
|
|
89
|
-
// Keys processed
|
|
90
|
-
rocksdbDeleteSkippedCount: bigint("Rocksdb_delete_skipped_count", {
|
|
91
|
-
mode: "bigint",
|
|
92
|
-
unsigned: true
|
|
93
|
-
}),
|
|
94
|
-
// RocksDB delete skips
|
|
95
|
-
rocksdbKeySkippedCount: bigint("Rocksdb_key_skipped_count", { mode: "bigint", unsigned: true }),
|
|
96
|
-
// RocksDB key skips
|
|
97
|
-
rocksdbBlockCacheHitCount: bigint("Rocksdb_block_cache_hit_count", {
|
|
98
|
-
mode: "bigint",
|
|
99
|
-
unsigned: true
|
|
100
|
-
}),
|
|
101
|
-
// RocksDB block cache hits
|
|
102
|
-
rocksdbBlockReadCount: bigint("Rocksdb_block_read_count", { mode: "bigint", unsigned: true }),
|
|
103
|
-
// RocksDB block reads
|
|
104
|
-
rocksdbBlockReadByte: bigint("Rocksdb_block_read_byte", { mode: "bigint", unsigned: true }),
|
|
105
|
-
// RocksDB block read bytes
|
|
106
|
-
db: varchar("DB", { length: 64 }),
|
|
107
|
-
// Database name
|
|
108
|
-
indexNames: varchar("Index_names", { length: 100 }),
|
|
109
|
-
// Indexes used
|
|
110
|
-
isInternal: boolean("Is_internal"),
|
|
111
|
-
// Whether the query is internal
|
|
112
|
-
digest: varchar("Digest", { length: 64 }),
|
|
113
|
-
// SQL digest hash
|
|
114
|
-
stats: varchar("Stats", { length: 512 }),
|
|
115
|
-
// Stats used during planning
|
|
116
|
-
copProcAvg: double("Cop_proc_avg"),
|
|
117
|
-
// Coprocessor average processing time
|
|
118
|
-
copProcP90: double("Cop_proc_p90"),
|
|
119
|
-
// Coprocessor 90th percentile processing time
|
|
120
|
-
copProcMax: double("Cop_proc_max"),
|
|
121
|
-
// Coprocessor max processing time
|
|
122
|
-
copProcAddr: varchar("Cop_proc_addr", { length: 64 }),
|
|
123
|
-
// Coprocessor address for processing
|
|
124
|
-
copWaitAvg: double("Cop_wait_avg"),
|
|
125
|
-
// Coprocessor average wait time
|
|
126
|
-
copWaitP90: double("Cop_wait_p90"),
|
|
127
|
-
// Coprocessor 90th percentile wait time
|
|
128
|
-
copWaitMax: double("Cop_wait_max"),
|
|
129
|
-
// Coprocessor max wait time
|
|
130
|
-
copWaitAddr: varchar("Cop_wait_addr", { length: 64 }),
|
|
131
|
-
// Coprocessor address for wait
|
|
132
|
-
memMax: bigint("Mem_max", { mode: "bigint" }),
|
|
133
|
-
// Max memory usage
|
|
134
|
-
diskMax: bigint("Disk_max", { mode: "bigint" }),
|
|
135
|
-
// Max disk usage
|
|
136
|
-
kvTotal: double("KV_total"),
|
|
137
|
-
// Total KV request time
|
|
138
|
-
pdTotal: double("PD_total"),
|
|
139
|
-
// Total PD request time
|
|
140
|
-
backoffTotal: double("Backoff_total"),
|
|
141
|
-
// Total backoff time
|
|
142
|
-
writeSqlResponseTotal: double("Write_sql_response_total"),
|
|
143
|
-
// SQL response write time
|
|
144
|
-
resultRows: bigint("Result_rows", { mode: "bigint" }),
|
|
145
|
-
// Rows returned
|
|
146
|
-
warnings: longtext("Warnings"),
|
|
147
|
-
// Warnings during execution
|
|
148
|
-
backoffDetail: varchar("Backoff_Detail", { length: 4096 }),
|
|
149
|
-
// Detailed backoff info
|
|
150
|
-
prepared: boolean("Prepared"),
|
|
151
|
-
// Whether query was prepared
|
|
152
|
-
succ: boolean("Succ"),
|
|
153
|
-
// Success flag
|
|
154
|
-
isExplicitTxn: boolean("IsExplicitTxn"),
|
|
155
|
-
// Whether explicit transaction
|
|
156
|
-
isWriteCacheTable: boolean("IsWriteCacheTable"),
|
|
157
|
-
// Whether wrote to cache table
|
|
158
|
-
planFromCache: boolean("Plan_from_cache"),
|
|
159
|
-
// Plan was from cache
|
|
160
|
-
planFromBinding: boolean("Plan_from_binding"),
|
|
161
|
-
// Plan was from binding
|
|
162
|
-
hasMoreResults: boolean("Has_more_results"),
|
|
163
|
-
// Query returned multiple results
|
|
164
|
-
resourceGroup: varchar("Resource_group", { length: 64 }),
|
|
165
|
-
// Resource group name
|
|
166
|
-
requestUnitRead: double("Request_unit_read"),
|
|
167
|
-
// RU consumed for read
|
|
168
|
-
requestUnitWrite: double("Request_unit_write"),
|
|
169
|
-
// RU consumed for write
|
|
170
|
-
timeQueuedByRc: double("Time_queued_by_rc"),
|
|
171
|
-
// Time queued by resource control
|
|
172
|
-
tidbCpuTime: double("Tidb_cpu_time"),
|
|
173
|
-
// TiDB CPU time
|
|
174
|
-
tikvCpuTime: double("Tikv_cpu_time"),
|
|
175
|
-
// TiKV CPU time
|
|
176
|
-
plan: longtext("Plan"),
|
|
177
|
-
// Query execution plan
|
|
178
|
-
planDigest: varchar("Plan_digest", { length: 128 }),
|
|
179
|
-
// Plan digest hash
|
|
180
|
-
binaryPlan: longtext("Binary_plan"),
|
|
181
|
-
// Binary execution plan
|
|
182
|
-
prevStmt: longtext("Prev_stmt"),
|
|
183
|
-
// Previous statement in session
|
|
184
|
-
query: longtext("Query")
|
|
185
|
-
// Original SQL query
|
|
186
|
-
});
|
|
187
|
-
const createClusterStatementsSummarySchema = () => ({
|
|
188
|
-
instance: varchar("INSTANCE", { length: 64 }),
|
|
189
|
-
// TiDB/TiKV instance address
|
|
190
|
-
summaryBeginTime: timestamp("SUMMARY_BEGIN_TIME", { mode: "string" }).notNull(),
|
|
191
|
-
// Begin time of this summary window
|
|
192
|
-
summaryEndTime: timestamp("SUMMARY_END_TIME", { mode: "string" }).notNull(),
|
|
193
|
-
// End time of this summary window
|
|
194
|
-
stmtType: varchar("STMT_TYPE", { length: 64 }).notNull(),
|
|
195
|
-
// Statement type (e.g., Select/Insert/Update)
|
|
196
|
-
schemaName: varchar("SCHEMA_NAME", { length: 64 }),
|
|
197
|
-
// Current schema name
|
|
198
|
-
digest: varchar("DIGEST", { length: 64 }),
|
|
199
|
-
// SQL digest (normalized hash)
|
|
200
|
-
digestText: text("DIGEST_TEXT").notNull(),
|
|
201
|
-
// Normalized SQL text
|
|
202
|
-
tableNames: text("TABLE_NAMES"),
|
|
203
|
-
// Involved table names
|
|
204
|
-
indexNames: text("INDEX_NAMES"),
|
|
205
|
-
// Used index names
|
|
206
|
-
sampleUser: varchar("SAMPLE_USER", { length: 64 }),
|
|
207
|
-
// Sampled user who executed the statements
|
|
208
|
-
execCount: bigint("EXEC_COUNT", { mode: "bigint", unsigned: true }).notNull(),
|
|
209
|
-
// Total executions
|
|
210
|
-
sumErrors: int("SUM_ERRORS", { unsigned: true }).notNull(),
|
|
211
|
-
// Sum of errors
|
|
212
|
-
sumWarnings: int("SUM_WARNINGS", { unsigned: true }).notNull(),
|
|
213
|
-
// Sum of warnings
|
|
214
|
-
sumLatency: bigint("SUM_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
|
|
215
|
-
// Sum of latency (ns)
|
|
216
|
-
maxLatency: bigint("MAX_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
|
|
217
|
-
// Max latency (ns)
|
|
218
|
-
minLatency: bigint("MIN_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
|
|
219
|
-
// Min latency (ns)
|
|
220
|
-
avgLatency: bigint("AVG_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
|
|
221
|
-
// Avg latency (ns)
|
|
222
|
-
avgParseLatency: bigint("AVG_PARSE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
|
|
223
|
-
// Avg parse time (ns)
|
|
224
|
-
maxParseLatency: bigint("MAX_PARSE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
|
|
225
|
-
// Max parse time (ns)
|
|
226
|
-
avgCompileLatency: bigint("AVG_COMPILE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
|
|
227
|
-
// Avg compile time (ns)
|
|
228
|
-
maxCompileLatency: bigint("MAX_COMPILE_LATENCY", { mode: "bigint", unsigned: true }).notNull(),
|
|
229
|
-
// Max compile time (ns)
|
|
230
|
-
sumCopTaskNum: bigint("SUM_COP_TASK_NUM", { mode: "bigint", unsigned: true }).notNull(),
|
|
231
|
-
// Total number of cop tasks
|
|
232
|
-
maxCopProcessTime: bigint("MAX_COP_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
233
|
-
// Max TiKV coprocessor processing time (ns)
|
|
234
|
-
maxCopProcessAddress: varchar("MAX_COP_PROCESS_ADDRESS", { length: 256 }),
|
|
235
|
-
// Address of cop task with max processing time
|
|
236
|
-
maxCopWaitTime: bigint("MAX_COP_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
237
|
-
// Max TiKV coprocessor wait time (ns)
|
|
238
|
-
maxCopWaitAddress: varchar("MAX_COP_WAIT_ADDRESS", { length: 256 }),
|
|
239
|
-
// Address of cop task with max wait time
|
|
240
|
-
avgProcessTime: bigint("AVG_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
241
|
-
// Avg TiKV processing time (ns)
|
|
242
|
-
maxProcessTime: bigint("MAX_PROCESS_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
243
|
-
// Max TiKV processing time (ns)
|
|
244
|
-
avgWaitTime: bigint("AVG_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
245
|
-
// Avg TiKV wait time (ns)
|
|
246
|
-
maxWaitTime: bigint("MAX_WAIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
247
|
-
// Max TiKV wait time (ns)
|
|
248
|
-
avgBackoffTime: bigint("AVG_BACKOFF_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
249
|
-
// Avg backoff time before retry (ns)
|
|
250
|
-
maxBackoffTime: bigint("MAX_BACKOFF_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
251
|
-
// Max backoff time before retry (ns)
|
|
252
|
-
avgTotalKeys: bigint("AVG_TOTAL_KEYS", { mode: "bigint", unsigned: true }).notNull(),
|
|
253
|
-
// Avg scanned keys
|
|
254
|
-
maxTotalKeys: bigint("MAX_TOTAL_KEYS", { mode: "bigint", unsigned: true }).notNull(),
|
|
255
|
-
// Max scanned keys
|
|
256
|
-
avgProcessedKeys: bigint("AVG_PROCESSED_KEYS", { mode: "bigint", unsigned: true }).notNull(),
|
|
257
|
-
// Avg processed keys
|
|
258
|
-
maxProcessedKeys: bigint("MAX_PROCESSED_KEYS", { mode: "bigint", unsigned: true }).notNull(),
|
|
259
|
-
// Max processed keys
|
|
260
|
-
avgRocksdbDeleteSkippedCount: double("AVG_ROCKSDB_DELETE_SKIPPED_COUNT").notNull(),
|
|
261
|
-
// Avg RocksDB deletes skipped
|
|
262
|
-
maxRocksdbDeleteSkippedCount: int("MAX_ROCKSDB_DELETE_SKIPPED_COUNT", {
|
|
263
|
-
unsigned: true
|
|
264
|
-
}).notNull(),
|
|
265
|
-
// Max RocksDB deletes skipped
|
|
266
|
-
avgRocksdbKeySkippedCount: double("AVG_ROCKSDB_KEY_SKIPPED_COUNT").notNull(),
|
|
267
|
-
// Avg RocksDB keys skipped
|
|
268
|
-
maxRocksdbKeySkippedCount: int("MAX_ROCKSDB_KEY_SKIPPED_COUNT", { unsigned: true }).notNull(),
|
|
269
|
-
// Max RocksDB keys skipped
|
|
270
|
-
avgRocksdbBlockCacheHitCount: double("AVG_ROCKSDB_BLOCK_CACHE_HIT_COUNT").notNull(),
|
|
271
|
-
// Avg RocksDB block cache hits
|
|
272
|
-
maxRocksdbBlockCacheHitCount: int("MAX_ROCKSDB_BLOCK_CACHE_HIT_COUNT", {
|
|
273
|
-
unsigned: true
|
|
274
|
-
}).notNull(),
|
|
275
|
-
// Max RocksDB block cache hits
|
|
276
|
-
avgRocksdbBlockReadCount: double("AVG_ROCKSDB_BLOCK_READ_COUNT").notNull(),
|
|
277
|
-
// Avg RocksDB block reads
|
|
278
|
-
maxRocksdbBlockReadCount: int("MAX_ROCKSDB_BLOCK_READ_COUNT", { unsigned: true }).notNull(),
|
|
279
|
-
// Max RocksDB block reads
|
|
280
|
-
avgRocksdbBlockReadByte: double("AVG_ROCKSDB_BLOCK_READ_BYTE").notNull(),
|
|
281
|
-
// Avg RocksDB block read bytes
|
|
282
|
-
maxRocksdbBlockReadByte: int("MAX_ROCKSDB_BLOCK_READ_BYTE", { unsigned: true }).notNull(),
|
|
283
|
-
// Max RocksDB block read bytes
|
|
284
|
-
avgPrewriteTime: bigint("AVG_PREWRITE_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
285
|
-
// Avg prewrite phase time (ns)
|
|
286
|
-
maxPrewriteTime: bigint("MAX_PREWRITE_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
287
|
-
// Max prewrite phase time (ns)
|
|
288
|
-
avgCommitTime: bigint("AVG_COMMIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
289
|
-
// Avg commit phase time (ns)
|
|
290
|
-
maxCommitTime: bigint("MAX_COMMIT_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
291
|
-
// Max commit phase time (ns)
|
|
292
|
-
avgGetCommitTsTime: bigint("AVG_GET_COMMIT_TS_TIME", {
|
|
293
|
-
mode: "bigint",
|
|
294
|
-
unsigned: true
|
|
295
|
-
}).notNull(),
|
|
296
|
-
// Avg get commit_ts time (ns)
|
|
297
|
-
maxGetCommitTsTime: bigint("MAX_GET_COMMIT_TS_TIME", {
|
|
298
|
-
mode: "bigint",
|
|
299
|
-
unsigned: true
|
|
300
|
-
}).notNull(),
|
|
301
|
-
// Max get commit_ts time (ns)
|
|
302
|
-
avgCommitBackoffTime: bigint("AVG_COMMIT_BACKOFF_TIME", {
|
|
303
|
-
mode: "bigint",
|
|
304
|
-
unsigned: true
|
|
305
|
-
}).notNull(),
|
|
306
|
-
// Avg backoff during commit (ns)
|
|
307
|
-
maxCommitBackoffTime: bigint("MAX_COMMIT_BACKOFF_TIME", {
|
|
308
|
-
mode: "bigint",
|
|
309
|
-
unsigned: true
|
|
310
|
-
}).notNull(),
|
|
311
|
-
// Max backoff during commit (ns)
|
|
312
|
-
avgResolveLockTime: bigint("AVG_RESOLVE_LOCK_TIME", {
|
|
313
|
-
mode: "bigint",
|
|
314
|
-
unsigned: true
|
|
315
|
-
}).notNull(),
|
|
316
|
-
// Avg resolve lock time (ns)
|
|
317
|
-
maxResolveLockTime: bigint("MAX_RESOLVE_LOCK_TIME", {
|
|
318
|
-
mode: "bigint",
|
|
319
|
-
unsigned: true
|
|
320
|
-
}).notNull(),
|
|
321
|
-
// Max resolve lock time (ns)
|
|
322
|
-
avgLocalLatchWaitTime: bigint("AVG_LOCAL_LATCH_WAIT_TIME", {
|
|
323
|
-
mode: "bigint",
|
|
324
|
-
unsigned: true
|
|
325
|
-
}).notNull(),
|
|
326
|
-
// Avg local latch wait (ns)
|
|
327
|
-
maxLocalLatchWaitTime: bigint("MAX_LOCAL_LATCH_WAIT_TIME", {
|
|
328
|
-
mode: "bigint",
|
|
329
|
-
unsigned: true
|
|
330
|
-
}).notNull(),
|
|
331
|
-
// Max local latch wait (ns)
|
|
332
|
-
avgWriteKeys: double("AVG_WRITE_KEYS").notNull(),
|
|
333
|
-
// Avg number of written keys
|
|
334
|
-
maxWriteKeys: bigint("MAX_WRITE_KEYS", { mode: "bigint", unsigned: true }).notNull(),
|
|
335
|
-
// Max written keys
|
|
336
|
-
avgWriteSize: double("AVG_WRITE_SIZE").notNull(),
|
|
337
|
-
// Avg written bytes
|
|
338
|
-
maxWriteSize: bigint("MAX_WRITE_SIZE", { mode: "bigint", unsigned: true }).notNull(),
|
|
339
|
-
// Max written bytes
|
|
340
|
-
avgPrewriteRegions: double("AVG_PREWRITE_REGIONS").notNull(),
|
|
341
|
-
// Avg regions in prewrite
|
|
342
|
-
maxPrewriteRegions: int("MAX_PREWRITE_REGIONS", { unsigned: true }).notNull(),
|
|
343
|
-
// Max regions in prewrite
|
|
344
|
-
avgTxnRetry: double("AVG_TXN_RETRY").notNull(),
|
|
345
|
-
// Avg transaction retry count
|
|
346
|
-
maxTxnRetry: int("MAX_TXN_RETRY", { unsigned: true }).notNull(),
|
|
347
|
-
// Max transaction retry count
|
|
348
|
-
sumExecRetry: bigint("SUM_EXEC_RETRY", { mode: "bigint", unsigned: true }).notNull(),
|
|
349
|
-
// Sum of execution retries (pessimistic)
|
|
350
|
-
sumExecRetryTime: bigint("SUM_EXEC_RETRY_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
351
|
-
// Sum time of execution retries (ns)
|
|
352
|
-
sumBackoffTimes: bigint("SUM_BACKOFF_TIMES", { mode: "bigint", unsigned: true }).notNull(),
|
|
353
|
-
// Sum of backoff retries
|
|
354
|
-
backoffTypes: varchar("BACKOFF_TYPES", { length: 1024 }),
|
|
355
|
-
// Backoff types with counts
|
|
356
|
-
avgMem: bigint("AVG_MEM", { mode: "bigint", unsigned: true }).notNull(),
|
|
357
|
-
// Avg memory used (bytes)
|
|
358
|
-
maxMem: bigint("MAX_MEM", { mode: "bigint", unsigned: true }).notNull(),
|
|
359
|
-
// Max memory used (bytes)
|
|
360
|
-
avgDisk: bigint("AVG_DISK", { mode: "bigint", unsigned: true }).notNull(),
|
|
361
|
-
// Avg disk used (bytes)
|
|
362
|
-
maxDisk: bigint("MAX_DISK", { mode: "bigint", unsigned: true }).notNull(),
|
|
363
|
-
// Max disk used (bytes)
|
|
364
|
-
avgKvTime: bigint("AVG_KV_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
365
|
-
// Avg time spent in TiKV (ns)
|
|
366
|
-
avgPdTime: bigint("AVG_PD_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
367
|
-
// Avg time spent in PD (ns)
|
|
368
|
-
avgBackoffTotalTime: bigint("AVG_BACKOFF_TOTAL_TIME", {
|
|
369
|
-
mode: "bigint",
|
|
370
|
-
unsigned: true
|
|
371
|
-
}).notNull(),
|
|
372
|
-
// Avg total backoff time (ns)
|
|
373
|
-
avgWriteSqlRespTime: bigint("AVG_WRITE_SQL_RESP_TIME", {
|
|
374
|
-
mode: "bigint",
|
|
375
|
-
unsigned: true
|
|
376
|
-
}).notNull(),
|
|
377
|
-
// Avg write SQL response time (ns)
|
|
378
|
-
avgTidbCpuTime: bigint("AVG_TIDB_CPU_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
379
|
-
// Avg TiDB CPU time (ns)
|
|
380
|
-
avgTikvCpuTime: bigint("AVG_TIKV_CPU_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
381
|
-
// Avg TiKV CPU time (ns)
|
|
382
|
-
maxResultRows: bigint("MAX_RESULT_ROWS", { mode: "bigint" }).notNull(),
|
|
383
|
-
// Max number of result rows
|
|
384
|
-
minResultRows: bigint("MIN_RESULT_ROWS", { mode: "bigint" }).notNull(),
|
|
385
|
-
// Min number of result rows
|
|
386
|
-
avgResultRows: bigint("AVG_RESULT_ROWS", { mode: "bigint" }).notNull(),
|
|
387
|
-
// Avg number of result rows
|
|
388
|
-
prepared: boolean("PREPARED").notNull(),
|
|
389
|
-
// Whether statements are prepared
|
|
390
|
-
avgAffectedRows: double("AVG_AFFECTED_ROWS").notNull(),
|
|
391
|
-
// Avg affected rows
|
|
392
|
-
firstSeen: timestamp("FIRST_SEEN", { mode: "string" }).notNull(),
|
|
393
|
-
// First time statements observed
|
|
394
|
-
lastSeen: timestamp("LAST_SEEN", { mode: "string" }).notNull(),
|
|
395
|
-
// Last time statements observed
|
|
396
|
-
planInCache: boolean("PLAN_IN_CACHE").notNull(),
|
|
397
|
-
// Whether last stmt hit plan cache
|
|
398
|
-
planCacheHits: bigint("PLAN_CACHE_HITS", { mode: "bigint" }).notNull(),
|
|
399
|
-
// Number of plan cache hits
|
|
400
|
-
planInBinding: boolean("PLAN_IN_BINDING").notNull(),
|
|
401
|
-
// Whether matched bindings
|
|
402
|
-
querySampleText: text("QUERY_SAMPLE_TEXT"),
|
|
403
|
-
// Sampled original SQL
|
|
404
|
-
prevSampleText: text("PREV_SAMPLE_TEXT"),
|
|
405
|
-
// Sampled previous SQL before commit
|
|
406
|
-
planDigest: varchar("PLAN_DIGEST", { length: 64 }),
|
|
407
|
-
// Plan digest hash
|
|
408
|
-
plan: text("PLAN"),
|
|
409
|
-
// Sampled textual plan
|
|
410
|
-
binaryPlan: text("BINARY_PLAN"),
|
|
411
|
-
// Sampled binary plan
|
|
412
|
-
charset: varchar("CHARSET", { length: 64 }),
|
|
413
|
-
// Sampled charset
|
|
414
|
-
collation: varchar("COLLATION", { length: 64 }),
|
|
415
|
-
// Sampled collation
|
|
416
|
-
planHint: varchar("PLAN_HINT", { length: 64 }),
|
|
417
|
-
// Sampled plan hint
|
|
418
|
-
maxRequestUnitRead: double("MAX_REQUEST_UNIT_READ").notNull(),
|
|
419
|
-
// Max RU cost (read)
|
|
420
|
-
avgRequestUnitRead: double("AVG_REQUEST_UNIT_READ").notNull(),
|
|
421
|
-
// Avg RU cost (read)
|
|
422
|
-
maxRequestUnitWrite: double("MAX_REQUEST_UNIT_WRITE").notNull(),
|
|
423
|
-
// Max RU cost (write)
|
|
424
|
-
avgRequestUnitWrite: double("AVG_REQUEST_UNIT_WRITE").notNull(),
|
|
425
|
-
// Avg RU cost (write)
|
|
426
|
-
maxQueuedRcTime: bigint("MAX_QUEUED_RC_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
427
|
-
// Max queued time waiting for RU (ns)
|
|
428
|
-
avgQueuedRcTime: bigint("AVG_QUEUED_RC_TIME", { mode: "bigint", unsigned: true }).notNull(),
|
|
429
|
-
// Avg queued time waiting for RU (ns)
|
|
430
|
-
resourceGroup: varchar("RESOURCE_GROUP", { length: 64 }),
|
|
431
|
-
// Bound resource group name
|
|
432
|
-
planCacheUnqualified: bigint("PLAN_CACHE_UNQUALIFIED", { mode: "bigint" }).notNull(),
|
|
433
|
-
// Times not eligible for plan cache
|
|
434
|
-
planCacheUnqualifiedLastReason: text("PLAN_CACHE_UNQUALIFIED_LAST_REASON")
|
|
435
|
-
// Last reason of plan cache ineligibility
|
|
436
|
-
});
|
|
437
|
-
const clusterStatementsSummaryHistory = informationSchema.table(
|
|
438
|
-
"CLUSTER_STATEMENTS_SUMMARY_HISTORY",
|
|
439
|
-
createClusterStatementsSummarySchema()
|
|
440
|
-
);
|
|
441
|
-
const statementsSummaryHistory = informationSchema.table(
|
|
442
|
-
"STATEMENTS_SUMMARY_HISTORY",
|
|
443
|
-
createClusterStatementsSummarySchema()
|
|
444
|
-
);
|
|
445
|
-
const statementsSummary = informationSchema.table(
|
|
446
|
-
"STATEMENTS_SUMMARY",
|
|
447
|
-
createClusterStatementsSummarySchema()
|
|
448
|
-
);
|
|
449
|
-
const clusterStatementsSummary = informationSchema.table(
|
|
450
|
-
"CLUSTER_STATEMENTS_SUMMARY",
|
|
451
|
-
createClusterStatementsSummarySchema()
|
|
452
|
-
);
|
|
453
|
-
async function getTables() {
|
|
454
|
-
const tables = await sql.executeDDL("SHOW TABLES");
|
|
455
|
-
return tables.rows.flatMap((tableInfo) => Object.values(tableInfo));
|
|
456
|
-
}
|
|
457
|
-
const forgeSystemTables = [migrations];
|
|
458
|
-
const parseDateTime = (value, format) => {
|
|
459
|
-
let result;
|
|
460
|
-
if (value instanceof Date) {
|
|
461
|
-
result = value;
|
|
462
|
-
} else {
|
|
463
|
-
const dt = DateTime.fromFormat(value, format);
|
|
464
|
-
if (dt.isValid) {
|
|
465
|
-
result = dt.toJSDate();
|
|
466
|
-
} else {
|
|
467
|
-
const sqlDt = DateTime.fromSQL(value);
|
|
468
|
-
if (sqlDt.isValid) {
|
|
469
|
-
result = sqlDt.toJSDate();
|
|
470
|
-
} else {
|
|
471
|
-
const isoDt = DateTime.fromRFC2822(value);
|
|
472
|
-
if (isoDt.isValid) {
|
|
473
|
-
result = isoDt.toJSDate();
|
|
474
|
-
} else {
|
|
475
|
-
result = new Date(value);
|
|
476
|
-
}
|
|
477
|
-
}
|
|
478
|
-
}
|
|
479
|
-
}
|
|
480
|
-
if (isNaN(result.getTime())) {
|
|
481
|
-
result = new Date(value);
|
|
482
|
-
}
|
|
483
|
-
return result;
|
|
484
|
-
};
|
|
485
|
-
function formatDateTime(value, format, isTimeStamp) {
|
|
486
|
-
let dt = null;
|
|
487
|
-
if (value instanceof Date) {
|
|
488
|
-
dt = DateTime.fromJSDate(value);
|
|
489
|
-
} else if (typeof value === "string") {
|
|
490
|
-
for (const parser of [
|
|
491
|
-
DateTime.fromISO,
|
|
492
|
-
DateTime.fromRFC2822,
|
|
493
|
-
DateTime.fromSQL,
|
|
494
|
-
DateTime.fromHTTP
|
|
495
|
-
]) {
|
|
496
|
-
dt = parser(value);
|
|
497
|
-
if (dt.isValid) break;
|
|
498
|
-
}
|
|
499
|
-
if (!dt?.isValid) {
|
|
500
|
-
const parsed = Number(value);
|
|
501
|
-
if (!isNaN(parsed)) {
|
|
502
|
-
dt = DateTime.fromMillis(parsed);
|
|
503
|
-
}
|
|
504
|
-
}
|
|
505
|
-
} else if (typeof value === "number") {
|
|
506
|
-
dt = DateTime.fromMillis(value);
|
|
507
|
-
} else {
|
|
508
|
-
throw new Error("Unsupported type");
|
|
509
|
-
}
|
|
510
|
-
if (!dt?.isValid) {
|
|
511
|
-
throw new Error("Invalid Date");
|
|
512
|
-
}
|
|
513
|
-
const minDate = DateTime.fromSeconds(1);
|
|
514
|
-
const maxDate = DateTime.fromMillis(2147483647 * 1e3);
|
|
515
|
-
if (isTimeStamp) {
|
|
516
|
-
if (dt < minDate) {
|
|
517
|
-
throw new Error(
|
|
518
|
-
"Atlassian Forge does not support zero or negative timestamps. Allowed range: from '1970-01-01 00:00:01.000000' to '2038-01-19 03:14:07.999999'."
|
|
519
|
-
);
|
|
520
|
-
}
|
|
521
|
-
if (dt > maxDate) {
|
|
522
|
-
throw new Error(
|
|
523
|
-
"Atlassian Forge does not support timestamps beyond 2038-01-19 03:14:07.999999. Please use a smaller date within the supported range."
|
|
524
|
-
);
|
|
525
|
-
}
|
|
526
|
-
}
|
|
527
|
-
return dt.toFormat(format);
|
|
528
|
-
}
|
|
529
|
-
function getPrimaryKeys(table) {
|
|
530
|
-
const { columns, primaryKeys } = getTableMetadata(table);
|
|
531
|
-
const columnPrimaryKeys = Object.entries(columns).filter(([, column]) => column.primary);
|
|
532
|
-
if (columnPrimaryKeys.length > 0) {
|
|
533
|
-
return columnPrimaryKeys;
|
|
534
|
-
}
|
|
535
|
-
if (Array.isArray(primaryKeys) && primaryKeys.length > 0) {
|
|
536
|
-
const primaryKeyColumns = /* @__PURE__ */ new Set();
|
|
537
|
-
primaryKeys.forEach((primaryKeyBuilder) => {
|
|
538
|
-
Object.entries(columns).filter(([, column]) => {
|
|
539
|
-
return primaryKeyBuilder.columns.includes(column);
|
|
540
|
-
}).forEach(([name, column]) => {
|
|
541
|
-
primaryKeyColumns.add([name, column]);
|
|
542
|
-
});
|
|
543
|
-
});
|
|
544
|
-
return Array.from(primaryKeyColumns);
|
|
545
|
-
}
|
|
546
|
-
return [];
|
|
547
|
-
}
|
|
548
|
-
function processForeignKeys(table, foreignKeysSymbol, extraSymbol) {
|
|
549
|
-
const foreignKeys = [];
|
|
550
|
-
if (foreignKeysSymbol) {
|
|
551
|
-
const fkArray = table[foreignKeysSymbol];
|
|
552
|
-
if (fkArray) {
|
|
553
|
-
fkArray.forEach((fk) => {
|
|
554
|
-
if (fk.reference) {
|
|
555
|
-
const item = fk.reference(fk);
|
|
556
|
-
foreignKeys.push(item);
|
|
557
|
-
}
|
|
558
|
-
});
|
|
559
|
-
}
|
|
560
|
-
}
|
|
561
|
-
if (extraSymbol) {
|
|
562
|
-
const extraConfigBuilder = table[extraSymbol];
|
|
563
|
-
if (extraConfigBuilder && typeof extraConfigBuilder === "function") {
|
|
564
|
-
const configBuilderData = extraConfigBuilder(table);
|
|
565
|
-
if (configBuilderData) {
|
|
566
|
-
const configBuilders = Array.isArray(configBuilderData) ? configBuilderData : Object.values(configBuilderData).map(
|
|
567
|
-
(item) => item.value ?? item
|
|
568
|
-
);
|
|
569
|
-
configBuilders.forEach((builder) => {
|
|
570
|
-
if (!builder?.constructor) return;
|
|
571
|
-
const builderName = builder.constructor.name.toLowerCase();
|
|
572
|
-
if (builderName.includes("foreignkeybuilder")) {
|
|
573
|
-
foreignKeys.push(builder);
|
|
574
|
-
}
|
|
575
|
-
});
|
|
576
|
-
}
|
|
577
|
-
}
|
|
578
|
-
}
|
|
579
|
-
return foreignKeys;
|
|
580
|
-
}
|
|
581
|
-
function getTableMetadata(table) {
|
|
582
|
-
const symbols = Object.getOwnPropertySymbols(table);
|
|
583
|
-
const nameSymbol = symbols.find((s) => s.toString().includes("Name"));
|
|
584
|
-
const columnsSymbol = symbols.find((s) => s.toString().includes("Columns"));
|
|
585
|
-
const foreignKeysSymbol = symbols.find((s) => s.toString().includes("ForeignKeys)"));
|
|
586
|
-
const extraSymbol = symbols.find((s) => s.toString().includes("ExtraConfigBuilder"));
|
|
587
|
-
const builders = {
|
|
588
|
-
indexes: [],
|
|
589
|
-
checks: [],
|
|
590
|
-
foreignKeys: [],
|
|
591
|
-
primaryKeys: [],
|
|
592
|
-
uniqueConstraints: [],
|
|
593
|
-
extras: []
|
|
594
|
-
};
|
|
595
|
-
builders.foreignKeys = processForeignKeys(table, foreignKeysSymbol, extraSymbol);
|
|
596
|
-
if (extraSymbol) {
|
|
597
|
-
const extraConfigBuilder = table[extraSymbol];
|
|
598
|
-
if (extraConfigBuilder && typeof extraConfigBuilder === "function") {
|
|
599
|
-
const configBuilderData = extraConfigBuilder(table);
|
|
600
|
-
if (configBuilderData) {
|
|
601
|
-
const configBuilders = Array.isArray(configBuilderData) ? configBuilderData : Object.values(configBuilderData).map(
|
|
602
|
-
(item) => item.value ?? item
|
|
603
|
-
);
|
|
604
|
-
configBuilders.forEach((builder) => {
|
|
605
|
-
if (!builder?.constructor) return;
|
|
606
|
-
const builderName = builder.constructor.name.toLowerCase();
|
|
607
|
-
const builderMap = {
|
|
608
|
-
indexbuilder: builders.indexes,
|
|
609
|
-
checkbuilder: builders.checks,
|
|
610
|
-
primarykeybuilder: builders.primaryKeys,
|
|
611
|
-
uniqueconstraintbuilder: builders.uniqueConstraints
|
|
612
|
-
};
|
|
613
|
-
for (const [type, array] of Object.entries(builderMap)) {
|
|
614
|
-
if (builderName.includes(type)) {
|
|
615
|
-
array.push(builder);
|
|
616
|
-
break;
|
|
617
|
-
}
|
|
618
|
-
}
|
|
619
|
-
builders.extras.push(builder);
|
|
620
|
-
});
|
|
621
|
-
}
|
|
622
|
-
}
|
|
623
|
-
}
|
|
624
|
-
return {
|
|
625
|
-
tableName: nameSymbol ? table[nameSymbol] : "",
|
|
626
|
-
columns: columnsSymbol ? table[columnsSymbol] : {},
|
|
627
|
-
...builders
|
|
628
|
-
};
|
|
629
|
-
}
|
|
630
|
-
function generateDropTableStatements(tables, options) {
|
|
631
|
-
const dropStatements = [];
|
|
632
|
-
const validOptions = options ?? { sequence: true, table: true };
|
|
633
|
-
if (!validOptions.sequence && !validOptions.table) {
|
|
634
|
-
console.warn('No drop operations requested: both "table" and "sequence" options are false');
|
|
635
|
-
return [];
|
|
636
|
-
}
|
|
637
|
-
tables.forEach((tableName) => {
|
|
638
|
-
if (validOptions.table) {
|
|
639
|
-
dropStatements.push(`DROP TABLE IF EXISTS \`${tableName}\`;`);
|
|
640
|
-
}
|
|
641
|
-
if (validOptions.sequence) {
|
|
642
|
-
dropStatements.push(`DROP SEQUENCE IF EXISTS \`${tableName}\`;`);
|
|
643
|
-
}
|
|
644
|
-
});
|
|
645
|
-
return dropStatements;
|
|
646
|
-
}
|
|
647
|
-
function mapSelectTableToAlias(table, uniqPrefix, aliasMap) {
|
|
648
|
-
const { columns, tableName } = getTableMetadata(table);
|
|
649
|
-
const selectionsTableFields = {};
|
|
650
|
-
Object.keys(columns).forEach((name) => {
|
|
651
|
-
const column = columns[name];
|
|
652
|
-
const uniqName = `a_${uniqPrefix}_${tableName}_${column.name}`.toLowerCase();
|
|
653
|
-
const fieldAlias = sql$1.raw(uniqName);
|
|
654
|
-
selectionsTableFields[name] = sql$1`${column} as \`${fieldAlias}\``;
|
|
655
|
-
aliasMap[uniqName] = column;
|
|
656
|
-
});
|
|
657
|
-
return selectionsTableFields;
|
|
658
|
-
}
|
|
659
|
-
function isDrizzleColumn(column) {
|
|
660
|
-
return column && typeof column === "object" && "table" in column;
|
|
661
|
-
}
|
|
662
|
-
function mapSelectAllFieldsToAlias(selections, name, uniqName, fields, aliasMap) {
|
|
663
|
-
if (isTable(fields)) {
|
|
664
|
-
selections[name] = mapSelectTableToAlias(fields, uniqName, aliasMap);
|
|
665
|
-
} else if (isDrizzleColumn(fields)) {
|
|
666
|
-
const column = fields;
|
|
667
|
-
const uniqAliasName = `a_${uniqName}_${column.name}`.toLowerCase();
|
|
668
|
-
let aliasName = sql$1.raw(uniqAliasName);
|
|
669
|
-
selections[name] = sql$1`${column} as \`${aliasName}\``;
|
|
670
|
-
aliasMap[uniqAliasName] = column;
|
|
671
|
-
} else if (isSQLWrapper(fields)) {
|
|
672
|
-
selections[name] = fields;
|
|
673
|
-
} else {
|
|
674
|
-
const innerSelections = {};
|
|
675
|
-
Object.entries(fields).forEach(([iname, ifields]) => {
|
|
676
|
-
mapSelectAllFieldsToAlias(innerSelections, iname, `${uniqName}_${iname}`, ifields, aliasMap);
|
|
677
|
-
});
|
|
678
|
-
selections[name] = innerSelections;
|
|
679
|
-
}
|
|
680
|
-
return selections;
|
|
681
|
-
}
|
|
682
|
-
function mapSelectFieldsWithAlias(fields) {
|
|
683
|
-
if (!fields) {
|
|
684
|
-
throw new Error("fields is empty");
|
|
685
|
-
}
|
|
686
|
-
const aliasMap = {};
|
|
687
|
-
const selections = {};
|
|
688
|
-
Object.entries(fields).forEach(([name, fields2]) => {
|
|
689
|
-
mapSelectAllFieldsToAlias(selections, name, name, fields2, aliasMap);
|
|
690
|
-
});
|
|
691
|
-
return { selections, aliasMap };
|
|
692
|
-
}
|
|
693
|
-
function getAliasFromDrizzleAlias(value) {
|
|
694
|
-
const isSQL = value !== null && typeof value === "object" && isSQLWrapper(value) && "queryChunks" in value;
|
|
695
|
-
if (isSQL) {
|
|
696
|
-
const sql2 = value;
|
|
697
|
-
const queryChunks = sql2.queryChunks;
|
|
698
|
-
if (queryChunks.length > 3) {
|
|
699
|
-
const aliasNameChunk = queryChunks[queryChunks.length - 2];
|
|
700
|
-
if (isSQLWrapper(aliasNameChunk) && "queryChunks" in aliasNameChunk) {
|
|
701
|
-
const aliasNameChunkSql = aliasNameChunk;
|
|
702
|
-
if (aliasNameChunkSql.queryChunks?.length === 1 && aliasNameChunkSql.queryChunks[0]) {
|
|
703
|
-
const queryChunksStringChunc = aliasNameChunkSql.queryChunks[0];
|
|
704
|
-
if ("value" in queryChunksStringChunc) {
|
|
705
|
-
const values = queryChunksStringChunc.value;
|
|
706
|
-
if (values && values.length === 1) {
|
|
707
|
-
return values[0];
|
|
708
|
-
}
|
|
709
|
-
}
|
|
710
|
-
}
|
|
711
|
-
}
|
|
712
|
-
}
|
|
713
|
-
}
|
|
714
|
-
return void 0;
|
|
715
|
-
}
|
|
716
|
-
function transformValue(value, alias, aliasMap) {
|
|
717
|
-
const column = aliasMap[alias];
|
|
718
|
-
if (!column) return value;
|
|
719
|
-
let customColumn = column;
|
|
720
|
-
const fromDriver = customColumn?.mapFrom;
|
|
721
|
-
if (fromDriver && value !== null && value !== void 0) {
|
|
722
|
-
return fromDriver(value);
|
|
723
|
-
}
|
|
724
|
-
return value;
|
|
725
|
-
}
|
|
726
|
-
function transformObject(obj, selections, aliasMap) {
|
|
727
|
-
const result = {};
|
|
728
|
-
for (const [key, value] of Object.entries(obj)) {
|
|
729
|
-
const selection = selections[key];
|
|
730
|
-
const alias = getAliasFromDrizzleAlias(selection);
|
|
731
|
-
if (alias && aliasMap[alias]) {
|
|
732
|
-
result[key] = transformValue(value, alias, aliasMap);
|
|
733
|
-
} else if (selection && typeof selection === "object" && !isSQLWrapper(selection)) {
|
|
734
|
-
result[key] = transformObject(
|
|
735
|
-
value,
|
|
736
|
-
selection,
|
|
737
|
-
aliasMap
|
|
738
|
-
);
|
|
739
|
-
} else {
|
|
740
|
-
result[key] = value;
|
|
741
|
-
}
|
|
742
|
-
}
|
|
743
|
-
return result;
|
|
744
|
-
}
|
|
745
|
-
function applyFromDriverTransform(rows, selections, aliasMap) {
|
|
746
|
-
return rows.map((row) => {
|
|
747
|
-
const transformed = transformObject(
|
|
748
|
-
row,
|
|
749
|
-
selections,
|
|
750
|
-
aliasMap
|
|
751
|
-
);
|
|
752
|
-
return processNullBranches(transformed);
|
|
753
|
-
});
|
|
754
|
-
}
|
|
755
|
-
function processNullBranches(obj) {
|
|
756
|
-
if (obj === null || typeof obj !== "object") {
|
|
757
|
-
return obj;
|
|
758
|
-
}
|
|
759
|
-
if (obj.constructor && obj.constructor.name !== "Object") {
|
|
760
|
-
return obj;
|
|
761
|
-
}
|
|
762
|
-
const result = {};
|
|
763
|
-
let allNull = true;
|
|
764
|
-
for (const [key, value] of Object.entries(obj)) {
|
|
765
|
-
if (value === null || value === void 0) {
|
|
766
|
-
result[key] = null;
|
|
767
|
-
continue;
|
|
768
|
-
}
|
|
769
|
-
if (typeof value === "object") {
|
|
770
|
-
const processed = processNullBranches(value);
|
|
771
|
-
result[key] = processed;
|
|
772
|
-
if (processed !== null) {
|
|
773
|
-
allNull = false;
|
|
774
|
-
}
|
|
775
|
-
} else {
|
|
776
|
-
result[key] = value;
|
|
777
|
-
allNull = false;
|
|
778
|
-
}
|
|
779
|
-
}
|
|
780
|
-
return allNull ? null : result;
|
|
781
|
-
}
|
|
782
|
-
function formatLimitOffset(limitOrOffset) {
|
|
783
|
-
if (typeof limitOrOffset !== "number" || isNaN(limitOrOffset)) {
|
|
784
|
-
throw new Error("limitOrOffset must be a valid number");
|
|
785
|
-
}
|
|
786
|
-
return sql$1.raw(`${limitOrOffset}`);
|
|
787
|
-
}
|
|
788
|
-
function nextVal(sequenceName) {
|
|
789
|
-
return sql$1.raw(`NEXTVAL(${sequenceName})`);
|
|
790
|
-
}
|
|
791
|
-
async function printQueriesWithPlan(forgeSQLORM, timeDiffMs, timeout) {
|
|
792
|
-
try {
|
|
793
|
-
const statementsTable = clusterStatementsSummary;
|
|
794
|
-
const timeoutMs2 = timeout ?? 3e3;
|
|
795
|
-
const results = await withTimeout(
|
|
796
|
-
forgeSQLORM.getDrizzleQueryBuilder().select({
|
|
797
|
-
digestText: withTidbHint(statementsTable.digestText),
|
|
798
|
-
avgLatency: statementsTable.avgLatency,
|
|
799
|
-
avgMem: statementsTable.avgMem,
|
|
800
|
-
execCount: statementsTable.execCount,
|
|
801
|
-
plan: statementsTable.plan,
|
|
802
|
-
stmtType: statementsTable.stmtType
|
|
803
|
-
}).from(statementsTable).where(
|
|
804
|
-
and(
|
|
805
|
-
isNotNull(statementsTable.digest),
|
|
806
|
-
not(ilike(statementsTable.digestText, "%information_schema%")),
|
|
807
|
-
notInArray(statementsTable.stmtType, [
|
|
808
|
-
"Use",
|
|
809
|
-
"Set",
|
|
810
|
-
"Show",
|
|
811
|
-
"Commit",
|
|
812
|
-
"Rollback",
|
|
813
|
-
"Begin"
|
|
814
|
-
]),
|
|
815
|
-
gte(
|
|
816
|
-
statementsTable.lastSeen,
|
|
817
|
-
sql$1`DATE_SUB
|
|
818
|
-
(NOW(), INTERVAL
|
|
819
|
-
${timeDiffMs * 1e3}
|
|
820
|
-
MICROSECOND
|
|
821
|
-
)`
|
|
822
|
-
)
|
|
823
|
-
)
|
|
824
|
-
),
|
|
825
|
-
`Timeout ${timeoutMs2}ms in printQueriesWithPlan - transient timeouts are usually fine; repeated timeouts mean this diagnostic query is consistently slow and should be investigated`,
|
|
826
|
-
timeoutMs2 + 200
|
|
827
|
-
);
|
|
828
|
-
results.forEach((result) => {
|
|
829
|
-
const avgTimeMs = Number(result.avgLatency) / 1e6;
|
|
830
|
-
const avgMemMB = Number(result.avgMem) / 1e6;
|
|
831
|
-
console.warn(
|
|
832
|
-
`SQL: ${result.digestText} | Memory: ${avgMemMB.toFixed(2)} MB | Time: ${avgTimeMs.toFixed(2)} ms | stmtType: ${result.stmtType} | Executions: ${result.execCount}
|
|
833
|
-
Plan:${result.plan}`
|
|
834
|
-
);
|
|
835
|
-
});
|
|
836
|
-
} catch (error) {
|
|
837
|
-
console.debug(
|
|
838
|
-
`Error occurred while retrieving query execution plan: ${error instanceof Error ? error.message : "Unknown error"}. Try again after some time`,
|
|
839
|
-
error
|
|
840
|
-
);
|
|
841
|
-
}
|
|
842
|
-
}
|
|
843
|
-
const SESSION_ALIAS_NAME_ORM = "orm";
|
|
844
|
-
async function slowQueryPerHours(forgeSQLORM, hours, timeout) {
|
|
845
|
-
try {
|
|
846
|
-
const timeoutMs2 = timeout ?? 1500;
|
|
847
|
-
const results = await withTimeout(
|
|
848
|
-
forgeSQLORM.getDrizzleQueryBuilder().select({
|
|
849
|
-
query: withTidbHint(slowQuery.query),
|
|
850
|
-
queryTime: slowQuery.queryTime,
|
|
851
|
-
memMax: slowQuery.memMax,
|
|
852
|
-
plan: slowQuery.plan
|
|
853
|
-
}).from(slowQuery).where(
|
|
854
|
-
and(
|
|
855
|
-
isNotNull(slowQuery.digest),
|
|
856
|
-
ne(slowQuery.sessionAlias, SESSION_ALIAS_NAME_ORM),
|
|
857
|
-
gte(
|
|
858
|
-
slowQuery.time,
|
|
859
|
-
sql$1`DATE_SUB
|
|
860
|
-
(NOW(), INTERVAL
|
|
861
|
-
${hours}
|
|
862
|
-
HOUR
|
|
863
|
-
)`
|
|
864
|
-
)
|
|
865
|
-
)
|
|
866
|
-
),
|
|
867
|
-
`Timeout ${timeoutMs2}ms in slowQueryPerHours - transient timeouts are usually fine; repeated timeouts mean this diagnostic query is consistently slow and should be investigated`,
|
|
868
|
-
timeoutMs2
|
|
869
|
-
);
|
|
870
|
-
const response = [];
|
|
871
|
-
results.forEach((result) => {
|
|
872
|
-
const memMaxMB = result.memMax ? Number(result.memMax) / 1e6 : 0;
|
|
873
|
-
const message = `Found SlowQuery SQL: ${result.query} | Memory: ${memMaxMB.toFixed(2)} MB | Time: ${result.queryTime} ms
|
|
874
|
-
Plan:${result.plan}`;
|
|
875
|
-
response.push(message);
|
|
876
|
-
console.warn(message);
|
|
877
|
-
});
|
|
878
|
-
return response;
|
|
879
|
-
} catch (error) {
|
|
880
|
-
console.debug(
|
|
881
|
-
`Error occurred while retrieving query execution plan: ${error instanceof Error ? error.message : "Unknown error"}. Try again after some time`,
|
|
882
|
-
error
|
|
883
|
-
);
|
|
884
|
-
return [
|
|
885
|
-
`Error occurred while retrieving query execution plan: ${error instanceof Error ? error.message : "Unknown error"}`
|
|
886
|
-
];
|
|
887
|
-
}
|
|
888
|
-
}
|
|
889
|
-
async function withTimeout(promise, message, timeoutMs2) {
|
|
890
|
-
let timeoutId;
|
|
891
|
-
const timeoutPromise = new Promise((_, reject) => {
|
|
892
|
-
timeoutId = setTimeout(() => {
|
|
893
|
-
reject(new Error(message));
|
|
894
|
-
}, timeoutMs2);
|
|
895
|
-
});
|
|
896
|
-
try {
|
|
897
|
-
return await Promise.race([promise, timeoutPromise]);
|
|
898
|
-
} finally {
|
|
899
|
-
if (timeoutId) {
|
|
900
|
-
clearTimeout(timeoutId);
|
|
901
|
-
}
|
|
902
|
-
}
|
|
903
|
-
}
|
|
904
|
-
function withTidbHint(column) {
|
|
905
|
-
return sql$1`/*+ SET_VAR(tidb_session_alias=${sql$1.raw(`${SESSION_ALIAS_NAME_ORM}`)}) */ ${column}`;
|
|
906
|
-
}
|
|
907
|
-
const CACHE_CONSTANTS = {
|
|
908
|
-
BATCH_SIZE: 25,
|
|
909
|
-
MAX_RETRY_ATTEMPTS: 3,
|
|
910
|
-
INITIAL_RETRY_DELAY: 1e3,
|
|
911
|
-
RETRY_DELAY_MULTIPLIER: 2,
|
|
912
|
-
DEFAULT_ENTITY_QUERY_NAME: "sql",
|
|
913
|
-
DEFAULT_EXPIRATION_NAME: "expiration",
|
|
914
|
-
DEFAULT_DATA_NAME: "data",
|
|
915
|
-
HASH_LENGTH: 32
|
|
916
|
-
};
|
|
917
|
-
function getCurrentTime() {
|
|
918
|
-
const dt = DateTime.now();
|
|
919
|
-
return Math.floor(dt.toSeconds());
|
|
920
|
-
}
|
|
921
|
-
function nowPlusSeconds(secondsToAdd) {
|
|
922
|
-
const dt = DateTime.now().plus({ seconds: secondsToAdd });
|
|
923
|
-
return Math.floor(dt.toSeconds());
|
|
924
|
-
}
|
|
925
|
-
function extractBacktickedValues(sql2) {
|
|
926
|
-
const regex = /`([^`]+)`/g;
|
|
927
|
-
const matches = /* @__PURE__ */ new Set();
|
|
928
|
-
let match;
|
|
929
|
-
while ((match = regex.exec(sql2.toLowerCase())) !== null) {
|
|
930
|
-
if (!match[1].startsWith("a_")) {
|
|
931
|
-
matches.add(`\`${match[1]}\``);
|
|
932
|
-
}
|
|
933
|
-
}
|
|
934
|
-
return Array.from(matches).sort().join(",");
|
|
935
|
-
}
|
|
936
|
-
function hashKey(query) {
|
|
937
|
-
const h = crypto.createHash("sha256");
|
|
938
|
-
h.update(query.sql.toLowerCase());
|
|
939
|
-
h.update(JSON.stringify(query.params));
|
|
940
|
-
return "CachedQuery_" + h.digest("hex").slice(0, CACHE_CONSTANTS.HASH_LENGTH);
|
|
941
|
-
}
|
|
942
|
-
async function deleteCacheEntriesInBatches(results, cacheEntityName) {
|
|
943
|
-
for (let i = 0; i < results.length; i += CACHE_CONSTANTS.BATCH_SIZE) {
|
|
944
|
-
const batch = results.slice(i, i + CACHE_CONSTANTS.BATCH_SIZE);
|
|
945
|
-
let transactionBuilder = kvs.transact();
|
|
946
|
-
batch.forEach((result) => {
|
|
947
|
-
transactionBuilder = transactionBuilder.delete(result.key, { entityName: cacheEntityName });
|
|
948
|
-
});
|
|
949
|
-
await transactionBuilder.execute();
|
|
950
|
-
}
|
|
951
|
-
}
|
|
952
|
-
async function clearCursorCache(tables, cursor, options) {
|
|
953
|
-
const cacheEntityName = options.cacheEntityName;
|
|
954
|
-
if (!cacheEntityName) {
|
|
955
|
-
throw new Error("cacheEntityName is not configured");
|
|
956
|
-
}
|
|
957
|
-
const entityQueryName = options.cacheEntityQueryName ?? CACHE_CONSTANTS.DEFAULT_ENTITY_QUERY_NAME;
|
|
958
|
-
let filters = new Filter();
|
|
959
|
-
for (const table of tables) {
|
|
960
|
-
const wrapIfNeeded = options.cacheWrapTable ? `\`${table}\`` : table;
|
|
961
|
-
filters.or(entityQueryName, FilterConditions.contains(wrapIfNeeded?.toLowerCase()));
|
|
962
|
-
}
|
|
963
|
-
let entityQueryBuilder = kvs.entity(cacheEntityName).query().index(entityQueryName).filters(filters);
|
|
964
|
-
if (cursor) {
|
|
965
|
-
entityQueryBuilder = entityQueryBuilder.cursor(cursor);
|
|
966
|
-
}
|
|
967
|
-
const listResult = await entityQueryBuilder.limit(100).getMany();
|
|
968
|
-
if (options.logCache) {
|
|
969
|
-
console.warn(`clear cache Records: ${JSON.stringify(listResult.results.map((r) => r.key))}`);
|
|
970
|
-
}
|
|
971
|
-
await deleteCacheEntriesInBatches(listResult.results, cacheEntityName);
|
|
972
|
-
if (listResult.nextCursor) {
|
|
973
|
-
return listResult.results.length + await clearCursorCache(tables, listResult.nextCursor, options);
|
|
974
|
-
} else {
|
|
975
|
-
return listResult.results.length;
|
|
976
|
-
}
|
|
977
|
-
}
|
|
978
|
-
async function clearExpirationCursorCache(cursor, options) {
|
|
979
|
-
const cacheEntityName = options.cacheEntityName;
|
|
980
|
-
if (!cacheEntityName) {
|
|
981
|
-
throw new Error("cacheEntityName is not configured");
|
|
982
|
-
}
|
|
983
|
-
const entityExpirationName = options.cacheEntityExpirationName ?? CACHE_CONSTANTS.DEFAULT_EXPIRATION_NAME;
|
|
984
|
-
let entityQueryBuilder = kvs.entity(cacheEntityName).query().index(entityExpirationName).where(WhereConditions.lessThan(Math.floor(DateTime.now().toSeconds())));
|
|
985
|
-
if (cursor) {
|
|
986
|
-
entityQueryBuilder = entityQueryBuilder.cursor(cursor);
|
|
987
|
-
}
|
|
988
|
-
const listResult = await entityQueryBuilder.limit(100).getMany();
|
|
989
|
-
if (options.logCache) {
|
|
990
|
-
console.warn(`clear expired Records: ${JSON.stringify(listResult.results.map((r) => r.key))}`);
|
|
991
|
-
}
|
|
992
|
-
await deleteCacheEntriesInBatches(listResult.results, cacheEntityName);
|
|
993
|
-
if (listResult.nextCursor) {
|
|
994
|
-
return listResult.results.length + await clearExpirationCursorCache(listResult.nextCursor, options);
|
|
995
|
-
} else {
|
|
996
|
-
return listResult.results.length;
|
|
997
|
-
}
|
|
998
|
-
}
|
|
999
|
-
async function executeWithRetry(operation, operationName) {
|
|
1000
|
-
let attempt = 0;
|
|
1001
|
-
let delay = CACHE_CONSTANTS.INITIAL_RETRY_DELAY;
|
|
1002
|
-
while (attempt < CACHE_CONSTANTS.MAX_RETRY_ATTEMPTS) {
|
|
1003
|
-
try {
|
|
1004
|
-
return await operation();
|
|
1005
|
-
} catch (err) {
|
|
1006
|
-
console.warn(`Error during ${operationName}: ${err.message}, retry ${attempt}`, err);
|
|
1007
|
-
attempt++;
|
|
1008
|
-
if (attempt >= CACHE_CONSTANTS.MAX_RETRY_ATTEMPTS) {
|
|
1009
|
-
console.error(`Error during ${operationName}: ${err.message}`, err);
|
|
1010
|
-
throw err;
|
|
1011
|
-
}
|
|
1012
|
-
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
1013
|
-
delay *= CACHE_CONSTANTS.RETRY_DELAY_MULTIPLIER;
|
|
1014
|
-
}
|
|
1015
|
-
}
|
|
1016
|
-
throw new Error(`Maximum retry attempts exceeded for ${operationName}`);
|
|
1017
|
-
}
|
|
1018
|
-
async function clearCache(schema, options) {
|
|
1019
|
-
const tableName = getTableName(schema);
|
|
1020
|
-
if (cacheApplicationContext.getStore()) {
|
|
1021
|
-
cacheApplicationContext.getStore()?.tables.add(tableName);
|
|
1022
|
-
} else {
|
|
1023
|
-
await clearTablesCache([tableName], options);
|
|
1024
|
-
}
|
|
1025
|
-
}
|
|
1026
|
-
async function clearTablesCache(tables, options) {
|
|
1027
|
-
if (!options.cacheEntityName) {
|
|
1028
|
-
throw new Error("cacheEntityName is not configured");
|
|
1029
|
-
}
|
|
1030
|
-
const startTime = DateTime.now();
|
|
1031
|
-
let totalRecords = 0;
|
|
1032
|
-
try {
|
|
1033
|
-
totalRecords = await executeWithRetry(
|
|
1034
|
-
() => clearCursorCache(tables, "", options),
|
|
1035
|
-
"clearing cache"
|
|
1036
|
-
);
|
|
1037
|
-
} finally {
|
|
1038
|
-
if (options.logCache) {
|
|
1039
|
-
const duration = DateTime.now().toSeconds() - startTime.toSeconds();
|
|
1040
|
-
console.info(`Cleared ${totalRecords} cache records in ${duration} seconds`);
|
|
1041
|
-
}
|
|
1042
|
-
}
|
|
1043
|
-
}
|
|
1044
|
-
async function clearExpiredCache(options) {
|
|
1045
|
-
if (!options.cacheEntityName) {
|
|
1046
|
-
throw new Error("cacheEntityName is not configured");
|
|
1047
|
-
}
|
|
1048
|
-
const startTime = DateTime.now();
|
|
1049
|
-
let totalRecords = 0;
|
|
1050
|
-
try {
|
|
1051
|
-
totalRecords = await executeWithRetry(
|
|
1052
|
-
() => clearExpirationCursorCache("", options),
|
|
1053
|
-
"clearing expired cache"
|
|
1054
|
-
);
|
|
1055
|
-
} finally {
|
|
1056
|
-
const duration = DateTime.now().toSeconds() - startTime.toSeconds();
|
|
1057
|
-
if (options?.logCache) {
|
|
1058
|
-
console.debug(`Cleared ${totalRecords} expired cache records in ${duration} seconds`);
|
|
1059
|
-
}
|
|
1060
|
-
}
|
|
1061
|
-
}
|
|
1062
|
-
async function getFromCache(query, options) {
|
|
1063
|
-
if (!options.cacheEntityName) {
|
|
1064
|
-
throw new Error("cacheEntityName is not configured");
|
|
1065
|
-
}
|
|
1066
|
-
const entityQueryName = options.cacheEntityQueryName ?? CACHE_CONSTANTS.DEFAULT_ENTITY_QUERY_NAME;
|
|
1067
|
-
const expirationName = options.cacheEntityExpirationName ?? CACHE_CONSTANTS.DEFAULT_EXPIRATION_NAME;
|
|
1068
|
-
const dataName = options.cacheEntityDataName ?? CACHE_CONSTANTS.DEFAULT_DATA_NAME;
|
|
1069
|
-
const sqlQuery = query.toSQL();
|
|
1070
|
-
const key = hashKey(sqlQuery);
|
|
1071
|
-
if (await isTableContainsTableInCacheContext(sqlQuery.sql, options)) {
|
|
1072
|
-
if (options.logCache) {
|
|
1073
|
-
console.warn(`Context contains value to clear. Skip getting from cache`);
|
|
1074
|
-
}
|
|
1075
|
-
return void 0;
|
|
1076
|
-
}
|
|
1077
|
-
try {
|
|
1078
|
-
const cacheResult = await kvs.entity(options.cacheEntityName).get(key);
|
|
1079
|
-
if (cacheResult && cacheResult[expirationName] >= getCurrentTime() && extractBacktickedValues(sqlQuery.sql) === cacheResult[entityQueryName]) {
|
|
1080
|
-
if (options.logCache) {
|
|
1081
|
-
console.warn(`Get value from cache, cacheKey: ${key}`);
|
|
1082
|
-
}
|
|
1083
|
-
const results = cacheResult[dataName];
|
|
1084
|
-
return JSON.parse(results);
|
|
1085
|
-
}
|
|
1086
|
-
} catch (error) {
|
|
1087
|
-
console.error(`Error getting from cache: ${error.message}`, error);
|
|
1088
|
-
}
|
|
1089
|
-
return void 0;
|
|
1090
|
-
}
|
|
1091
|
-
async function setCacheResult(query, options, results, cacheTtl) {
|
|
1092
|
-
if (!options.cacheEntityName) {
|
|
1093
|
-
throw new Error("cacheEntityName is not configured");
|
|
1094
|
-
}
|
|
1095
|
-
try {
|
|
1096
|
-
const entityQueryName = options.cacheEntityQueryName ?? CACHE_CONSTANTS.DEFAULT_ENTITY_QUERY_NAME;
|
|
1097
|
-
const expirationName = options.cacheEntityExpirationName ?? CACHE_CONSTANTS.DEFAULT_EXPIRATION_NAME;
|
|
1098
|
-
const dataName = options.cacheEntityDataName ?? CACHE_CONSTANTS.DEFAULT_DATA_NAME;
|
|
1099
|
-
const sqlQuery = query.toSQL();
|
|
1100
|
-
if (await isTableContainsTableInCacheContext(sqlQuery.sql, options)) {
|
|
1101
|
-
if (options.logCache) {
|
|
1102
|
-
console.warn(`Context contains value to clear. Skip setting from cache`);
|
|
1103
|
-
}
|
|
1104
|
-
return;
|
|
1105
|
-
}
|
|
1106
|
-
const key = hashKey(sqlQuery);
|
|
1107
|
-
await kvs.transact().set(
|
|
1108
|
-
key,
|
|
1109
|
-
{
|
|
1110
|
-
[entityQueryName]: extractBacktickedValues(sqlQuery.sql),
|
|
1111
|
-
[expirationName]: nowPlusSeconds(cacheTtl),
|
|
1112
|
-
[dataName]: JSON.stringify(results)
|
|
1113
|
-
},
|
|
1114
|
-
{ entityName: options.cacheEntityName }
|
|
1115
|
-
).execute();
|
|
1116
|
-
if (options.logCache) {
|
|
1117
|
-
console.warn(`Store value to cache, cacheKey: ${key}`);
|
|
1118
|
-
}
|
|
1119
|
-
} catch (error) {
|
|
1120
|
-
console.error(`Error setting cache: ${error.message}`, error);
|
|
1121
|
-
}
|
|
1122
|
-
}
|
|
1123
|
-
function isQuery(obj) {
|
|
1124
|
-
return typeof obj === "object" && obj !== null && typeof obj.sql === "string" && Array.isArray(obj.params);
|
|
1125
|
-
}
|
|
1126
|
-
const cacheApplicationContext = new AsyncLocalStorage();
|
|
1127
|
-
const localCacheApplicationContext = new AsyncLocalStorage();
|
|
1128
|
-
async function saveTableIfInsideCacheContext(table) {
|
|
1129
|
-
const context = cacheApplicationContext.getStore();
|
|
1130
|
-
if (context) {
|
|
1131
|
-
const tableName = getTableName(table).toLowerCase();
|
|
1132
|
-
context.tables.add(tableName);
|
|
1133
|
-
}
|
|
1134
|
-
}
|
|
1135
|
-
async function saveQueryLocalCacheQuery(query, rows, options) {
|
|
1136
|
-
const context = localCacheApplicationContext.getStore();
|
|
1137
|
-
if (context) {
|
|
1138
|
-
if (!context.cache) {
|
|
1139
|
-
context.cache = {};
|
|
1140
|
-
}
|
|
1141
|
-
let sql2;
|
|
1142
|
-
if (isQuery(query)) {
|
|
1143
|
-
sql2 = { toSQL: () => query };
|
|
1144
|
-
} else {
|
|
1145
|
-
sql2 = query;
|
|
1146
|
-
}
|
|
1147
|
-
const key = hashKey(sql2.toSQL());
|
|
1148
|
-
context.cache[key] = {
|
|
1149
|
-
sql: sql2.toSQL().sql.toLowerCase(),
|
|
1150
|
-
data: rows
|
|
1151
|
-
};
|
|
1152
|
-
if (options.logCache) {
|
|
1153
|
-
const q = sql2.toSQL();
|
|
1154
|
-
console.debug(
|
|
1155
|
-
`[forge-sql-orm][local-cache][SAVE] Stored result in cache. sql="${q.sql}", params=${JSON.stringify(q.params)}`
|
|
1156
|
-
);
|
|
1157
|
-
}
|
|
1158
|
-
}
|
|
1159
|
-
}
|
|
1160
|
-
async function getQueryLocalCacheQuery(query, options) {
|
|
1161
|
-
const context = localCacheApplicationContext.getStore();
|
|
1162
|
-
if (context) {
|
|
1163
|
-
if (!context.cache) {
|
|
1164
|
-
context.cache = {};
|
|
1165
|
-
}
|
|
1166
|
-
let sql2;
|
|
1167
|
-
if (isQuery(query)) {
|
|
1168
|
-
sql2 = { toSQL: () => query };
|
|
1169
|
-
} else {
|
|
1170
|
-
sql2 = query;
|
|
1171
|
-
}
|
|
1172
|
-
const key = hashKey(sql2.toSQL());
|
|
1173
|
-
if (context.cache[key] && context.cache[key].sql === sql2.toSQL().sql.toLowerCase()) {
|
|
1174
|
-
if (options.logCache) {
|
|
1175
|
-
const q = sql2.toSQL();
|
|
1176
|
-
console.debug(
|
|
1177
|
-
`[forge-sql-orm][local-cache][HIT] Returned cached result. sql="${q.sql}", params=${JSON.stringify(q.params)}`
|
|
1178
|
-
);
|
|
1179
|
-
}
|
|
1180
|
-
return context.cache[key].data;
|
|
1181
|
-
}
|
|
1182
|
-
}
|
|
1183
|
-
return void 0;
|
|
1184
|
-
}
|
|
1185
|
-
async function evictLocalCacheQuery(table, options) {
|
|
1186
|
-
const context = localCacheApplicationContext.getStore();
|
|
1187
|
-
if (context) {
|
|
1188
|
-
if (!context.cache) {
|
|
1189
|
-
context.cache = {};
|
|
1190
|
-
}
|
|
1191
|
-
const tableName = getTableName(table);
|
|
1192
|
-
const searchString = options.cacheWrapTable ? `\`${tableName}\`` : tableName;
|
|
1193
|
-
const keyToEvicts = [];
|
|
1194
|
-
Object.keys(context.cache).forEach((key) => {
|
|
1195
|
-
if (context.cache[key].sql.includes(searchString)) {
|
|
1196
|
-
keyToEvicts.push(key);
|
|
1197
|
-
}
|
|
1198
|
-
});
|
|
1199
|
-
keyToEvicts.forEach((key) => delete context.cache[key]);
|
|
1200
|
-
}
|
|
1201
|
-
}
|
|
1202
|
-
async function isTableContainsTableInCacheContext(sql2, options) {
|
|
1203
|
-
const context = cacheApplicationContext.getStore();
|
|
1204
|
-
if (!context) {
|
|
1205
|
-
return false;
|
|
1206
|
-
}
|
|
1207
|
-
const tables = Array.from(context.tables);
|
|
1208
|
-
const lowerSql = sql2.toLowerCase();
|
|
1209
|
-
return tables.some((table) => {
|
|
1210
|
-
const tablePattern = options.cacheWrapTable ? `\`${table}\`` : table;
|
|
1211
|
-
return lowerSql.includes(tablePattern);
|
|
1212
|
-
});
|
|
1213
|
-
}
|
|
1214
|
-
class ForgeSQLCrudOperations {
|
|
1215
|
-
forgeOperations;
|
|
1216
|
-
options;
|
|
1217
|
-
/**
|
|
1218
|
-
* Creates a new instance of ForgeSQLCrudOperations.
|
|
1219
|
-
* @param forgeSqlOperations - The ForgeSQL operations instance
|
|
1220
|
-
* @param options - Configuration options for the ORM
|
|
1221
|
-
*/
|
|
1222
|
-
constructor(forgeSqlOperations, options) {
|
|
1223
|
-
this.forgeOperations = forgeSqlOperations;
|
|
1224
|
-
this.options = options;
|
|
1225
|
-
}
|
|
1226
|
-
/**
|
|
1227
|
-
* Inserts records into the database with optional versioning support.
|
|
1228
|
-
* If a version field exists in the schema, versioning is applied.
|
|
1229
|
-
*
|
|
1230
|
-
* This method automatically handles:
|
|
1231
|
-
* - Version field initialization for optimistic locking
|
|
1232
|
-
* - Batch insertion for multiple records
|
|
1233
|
-
* - Duplicate key handling with optional updates
|
|
1234
|
-
*
|
|
1235
|
-
* @template T - The type of the table schema
|
|
1236
|
-
* @param schema - The entity schema
|
|
1237
|
-
* @param models - Array of entities to insert
|
|
1238
|
-
* @param updateIfExists - Whether to update existing records (default: false)
|
|
1239
|
-
* @returns Promise that resolves to the number of inserted rows
|
|
1240
|
-
* @throws Error if the insert operation fails
|
|
1241
|
-
*/
|
|
1242
|
-
async insert(schema, models, updateIfExists = false) {
|
|
1243
|
-
if (!models?.length) return 0;
|
|
1244
|
-
const { tableName, columns } = getTableMetadata(schema);
|
|
1245
|
-
const versionMetadata = this.validateVersionField(tableName, columns);
|
|
1246
|
-
const preparedModels = models.map(
|
|
1247
|
-
(model) => this.prepareModelWithVersion(model, versionMetadata, columns)
|
|
1248
|
-
);
|
|
1249
|
-
const queryBuilder = this.forgeOperations.insert(schema).values(preparedModels);
|
|
1250
|
-
const finalQuery = updateIfExists ? queryBuilder.onDuplicateKeyUpdate({
|
|
1251
|
-
set: Object.fromEntries(
|
|
1252
|
-
Object.keys(preparedModels[0]).map((key) => [key, schema[key]])
|
|
1253
|
-
)
|
|
1254
|
-
}) : queryBuilder;
|
|
1255
|
-
const result = await finalQuery;
|
|
1256
|
-
await saveTableIfInsideCacheContext(schema);
|
|
1257
|
-
return result[0].insertId;
|
|
1258
|
-
}
|
|
1259
|
-
/**
|
|
1260
|
-
* Deletes a record by its primary key with optional version check.
|
|
1261
|
-
* If versioning is enabled, ensures the record hasn't been modified since last read.
|
|
1262
|
-
*
|
|
1263
|
-
* This method automatically handles:
|
|
1264
|
-
* - Single primary key validation
|
|
1265
|
-
* - Optimistic locking checks if versioning is enabled
|
|
1266
|
-
* - Version field validation before deletion
|
|
1267
|
-
*
|
|
1268
|
-
* @template T - The type of the table schema
|
|
1269
|
-
* @param id - The ID of the record to delete
|
|
1270
|
-
* @param schema - The entity schema
|
|
1271
|
-
* @returns Promise that resolves to the number of affected rows
|
|
1272
|
-
* @throws Error if the delete operation fails
|
|
1273
|
-
* @throws Error if multiple primary keys are found
|
|
1274
|
-
* @throws Error if optimistic locking check fails
|
|
1275
|
-
*/
|
|
1276
|
-
async deleteById(id, schema) {
|
|
1277
|
-
const { tableName, columns } = getTableMetadata(schema);
|
|
1278
|
-
const primaryKeys = this.getPrimaryKeys(schema);
|
|
1279
|
-
if (primaryKeys.length !== 1) {
|
|
1280
|
-
throw new Error("Only single primary key is supported");
|
|
1281
|
-
}
|
|
1282
|
-
const [primaryKeyName, primaryKeyColumn] = primaryKeys[0];
|
|
1283
|
-
const versionMetadata = this.validateVersionField(tableName, columns);
|
|
1284
|
-
const conditions = [eq(primaryKeyColumn, id)];
|
|
1285
|
-
if (versionMetadata && columns) {
|
|
1286
|
-
const versionField = columns[versionMetadata.fieldName];
|
|
1287
|
-
if (versionField) {
|
|
1288
|
-
const oldModel = await this.getOldModel({ [primaryKeyName]: id }, schema, [
|
|
1289
|
-
versionMetadata.fieldName,
|
|
1290
|
-
versionField
|
|
1291
|
-
]);
|
|
1292
|
-
conditions.push(eq(versionField, oldModel[versionMetadata.fieldName]));
|
|
1293
|
-
}
|
|
1294
|
-
}
|
|
1295
|
-
const queryBuilder = this.forgeOperations.delete(schema).where(and(...conditions));
|
|
1296
|
-
const result = await queryBuilder;
|
|
1297
|
-
if (versionMetadata && result[0].affectedRows === 0) {
|
|
1298
|
-
throw new Error(`Optimistic locking failed: record with primary key ${id} has been modified`);
|
|
1299
|
-
}
|
|
1300
|
-
await saveTableIfInsideCacheContext(schema);
|
|
1301
|
-
return result[0].affectedRows;
|
|
1302
|
-
}
|
|
1303
|
-
/**
|
|
1304
|
-
* Updates a record by its primary key with optimistic locking support.
|
|
1305
|
-
* If versioning is enabled:
|
|
1306
|
-
* - Retrieves the current version
|
|
1307
|
-
* - Checks for concurrent modifications
|
|
1308
|
-
* - Increments the version on successful update
|
|
1309
|
-
*
|
|
1310
|
-
* This method automatically handles:
|
|
1311
|
-
* - Primary key validation
|
|
1312
|
-
* - Version field retrieval and validation
|
|
1313
|
-
* - Optimistic locking conflict detection
|
|
1314
|
-
* - Version field incrementation
|
|
1315
|
-
*
|
|
1316
|
-
* @template T - The type of the table schema
|
|
1317
|
-
* @param entity - The entity with updated values (must include primary key)
|
|
1318
|
-
* @param schema - The entity schema
|
|
1319
|
-
* @returns Promise that resolves to the number of affected rows
|
|
1320
|
-
* @throws Error if the primary key is not provided
|
|
1321
|
-
* @throws Error if optimistic locking check fails
|
|
1322
|
-
* @throws Error if multiple primary keys are found
|
|
1323
|
-
*/
|
|
1324
|
-
async updateById(entity, schema) {
|
|
1325
|
-
const { tableName, columns } = getTableMetadata(schema);
|
|
1326
|
-
const primaryKeys = this.getPrimaryKeys(schema);
|
|
1327
|
-
if (primaryKeys.length !== 1) {
|
|
1328
|
-
throw new Error("Only single primary key is supported");
|
|
1329
|
-
}
|
|
1330
|
-
const [primaryKeyName, primaryKeyColumn] = primaryKeys[0];
|
|
1331
|
-
const versionMetadata = this.validateVersionField(tableName, columns);
|
|
1332
|
-
if (!(primaryKeyName in entity)) {
|
|
1333
|
-
throw new Error(`Primary key ${primaryKeyName} must be provided in the entity`);
|
|
1334
|
-
}
|
|
1335
|
-
const currentVersion = await this.getCurrentVersion(
|
|
1336
|
-
entity,
|
|
1337
|
-
primaryKeyName,
|
|
1338
|
-
versionMetadata,
|
|
1339
|
-
columns,
|
|
1340
|
-
schema
|
|
1341
|
-
);
|
|
1342
|
-
const updateData = this.prepareUpdateData(entity, versionMetadata, columns, currentVersion);
|
|
1343
|
-
const conditions = [
|
|
1344
|
-
eq(primaryKeyColumn, entity[primaryKeyName])
|
|
1345
|
-
];
|
|
1346
|
-
if (versionMetadata && columns) {
|
|
1347
|
-
const versionField = columns[versionMetadata.fieldName];
|
|
1348
|
-
if (versionField) {
|
|
1349
|
-
conditions.push(eq(versionField, currentVersion));
|
|
1350
|
-
}
|
|
1351
|
-
}
|
|
1352
|
-
const queryBuilder = this.forgeOperations.update(schema).set(updateData).where(and(...conditions));
|
|
1353
|
-
const result = await queryBuilder;
|
|
1354
|
-
if (versionMetadata && result[0].affectedRows === 0) {
|
|
1355
|
-
throw new Error(
|
|
1356
|
-
`Optimistic locking failed: record with primary key ${entity[primaryKeyName]} has been modified`
|
|
1357
|
-
);
|
|
1358
|
-
}
|
|
1359
|
-
await saveTableIfInsideCacheContext(schema);
|
|
1360
|
-
return result[0].affectedRows;
|
|
1361
|
-
}
|
|
1362
|
-
/**
|
|
1363
|
-
* Updates specified fields of records based on provided conditions.
|
|
1364
|
-
* This method does not support versioning and should be used with caution.
|
|
1365
|
-
*
|
|
1366
|
-
* @template T - The type of the table schema
|
|
1367
|
-
* @param {Partial<InferInsertModel<T>>} updateData - The data to update
|
|
1368
|
-
* @param {T} schema - The entity schema
|
|
1369
|
-
* @param {SQL<unknown>} where - The WHERE conditions
|
|
1370
|
-
* @returns {Promise<number>} Number of affected rows
|
|
1371
|
-
* @throws {Error} If WHERE conditions are not provided
|
|
1372
|
-
* @throws {Error} If the update operation fails
|
|
1373
|
-
*/
|
|
1374
|
-
async updateFields(updateData, schema, where) {
|
|
1375
|
-
if (!where) {
|
|
1376
|
-
throw new Error("WHERE conditions must be provided");
|
|
1377
|
-
}
|
|
1378
|
-
const queryBuilder = this.forgeOperations.update(schema).set(updateData).where(where);
|
|
1379
|
-
const result = await queryBuilder;
|
|
1380
|
-
await saveTableIfInsideCacheContext(schema);
|
|
1381
|
-
return result[0].affectedRows;
|
|
1382
|
-
}
|
|
1383
|
-
// Helper methods
|
|
1384
|
-
/**
|
|
1385
|
-
* Gets primary keys from the schema.
|
|
1386
|
-
* @template T - The type of the table schema
|
|
1387
|
-
* @param {T} schema - The table schema
|
|
1388
|
-
* @returns {[string, AnyColumn][]} Array of primary key name and column pairs
|
|
1389
|
-
* @throws {Error} If no primary keys are found
|
|
1390
|
-
*/
|
|
1391
|
-
getPrimaryKeys(schema) {
|
|
1392
|
-
const primaryKeys = getPrimaryKeys(schema);
|
|
1393
|
-
if (!primaryKeys) {
|
|
1394
|
-
throw new Error(`No primary keys found for schema: ${schema}`);
|
|
1395
|
-
}
|
|
1396
|
-
return primaryKeys;
|
|
1397
|
-
}
|
|
1398
|
-
/**
|
|
1399
|
-
* Validates and retrieves version field metadata.
|
|
1400
|
-
* @param {string} tableName - The name of the table
|
|
1401
|
-
* @param {Record<string, AnyColumn>} columns - The table columns
|
|
1402
|
-
* @returns {Object | undefined} Version field metadata if valid, undefined otherwise
|
|
1403
|
-
*/
|
|
1404
|
-
validateVersionField(tableName, columns) {
|
|
1405
|
-
if (this.options.disableOptimisticLocking) {
|
|
1406
|
-
return void 0;
|
|
1407
|
-
}
|
|
1408
|
-
const versionMetadata = this.options.additionalMetadata?.[tableName]?.versionField;
|
|
1409
|
-
if (!versionMetadata) return void 0;
|
|
1410
|
-
let fieldName = versionMetadata.fieldName;
|
|
1411
|
-
let versionField = columns[versionMetadata.fieldName];
|
|
1412
|
-
if (!versionField) {
|
|
1413
|
-
const find = Object.entries(columns).find(([, c]) => c.name === versionMetadata.fieldName);
|
|
1414
|
-
if (find) {
|
|
1415
|
-
fieldName = find[0];
|
|
1416
|
-
versionField = find[1];
|
|
1417
|
-
}
|
|
1418
|
-
}
|
|
1419
|
-
if (!versionField) {
|
|
1420
|
-
console.warn(
|
|
1421
|
-
`Version field "${versionMetadata.fieldName}" not found in table ${tableName}. Versioning will be skipped.`
|
|
1422
|
-
);
|
|
1423
|
-
return void 0;
|
|
1424
|
-
}
|
|
1425
|
-
if (!versionField.notNull) {
|
|
1426
|
-
console.warn(
|
|
1427
|
-
`Version field "${versionMetadata.fieldName}" in table ${tableName} is nullable. Versioning may not work correctly.`
|
|
1428
|
-
);
|
|
1429
|
-
return void 0;
|
|
1430
|
-
}
|
|
1431
|
-
const fieldType = versionField.getSQLType();
|
|
1432
|
-
const isSupportedType = fieldType === "datetime" || fieldType === "timestamp" || fieldType === "int" || fieldType === "number" || fieldType === "decimal";
|
|
1433
|
-
if (!isSupportedType) {
|
|
1434
|
-
console.warn(
|
|
1435
|
-
`Version field "${versionMetadata.fieldName}" in table ${tableName} has unsupported type "${fieldType}". Only datetime, timestamp, int, and decimal types are supported for versioning. Versioning will be skipped.`
|
|
1436
|
-
);
|
|
1437
|
-
return void 0;
|
|
1438
|
-
}
|
|
1439
|
-
return { fieldName, type: fieldType };
|
|
1440
|
-
}
|
|
1441
|
-
/**
|
|
1442
|
-
* Gets the current version of an entity.
|
|
1443
|
-
* @template T - The type of the table schema
|
|
1444
|
-
* @param {Partial<InferInsertModel<T>>} entity - The entity
|
|
1445
|
-
* @param {string} primaryKeyName - The name of the primary key
|
|
1446
|
-
* @param {Object | undefined} versionMetadata - Version field metadata
|
|
1447
|
-
* @param {Record<string, AnyColumn>} columns - The table columns
|
|
1448
|
-
* @param {T} schema - The table schema
|
|
1449
|
-
* @returns {Promise<unknown>} The current version value
|
|
1450
|
-
*/
|
|
1451
|
-
async getCurrentVersion(entity, primaryKeyName, versionMetadata, columns, schema) {
|
|
1452
|
-
if (!versionMetadata || !columns) return void 0;
|
|
1453
|
-
const versionField = columns[versionMetadata.fieldName];
|
|
1454
|
-
if (!versionField) return void 0;
|
|
1455
|
-
if (versionMetadata.fieldName in entity) {
|
|
1456
|
-
return entity[versionMetadata.fieldName];
|
|
1457
|
-
}
|
|
1458
|
-
const oldModel = await this.getOldModel(
|
|
1459
|
-
{ [primaryKeyName]: entity[primaryKeyName] },
|
|
1460
|
-
schema,
|
|
1461
|
-
[versionMetadata.fieldName, versionField]
|
|
1462
|
-
);
|
|
1463
|
-
return oldModel[versionMetadata.fieldName];
|
|
1464
|
-
}
|
|
1465
|
-
/**
|
|
1466
|
-
* Prepares a model for insertion with version field.
|
|
1467
|
-
* @template T - The type of the table schema
|
|
1468
|
-
* @param {Partial<InferInsertModel<T>>} model - The model to prepare
|
|
1469
|
-
* @param {Object | undefined} versionMetadata - Version field metadata
|
|
1470
|
-
* @param {Record<string, AnyColumn>} columns - The table columns
|
|
1471
|
-
* @returns {InferInsertModel<T>} The prepared model
|
|
1472
|
-
*/
|
|
1473
|
-
prepareModelWithVersion(model, versionMetadata, columns) {
|
|
1474
|
-
if (!versionMetadata || !columns) return model;
|
|
1475
|
-
let fieldName = versionMetadata.fieldName;
|
|
1476
|
-
let versionField = columns[versionMetadata.fieldName];
|
|
1477
|
-
if (!versionField) {
|
|
1478
|
-
const find = Object.entries(columns).find(([, c]) => c.name === versionMetadata.fieldName);
|
|
1479
|
-
if (find) {
|
|
1480
|
-
fieldName = find[0];
|
|
1481
|
-
versionField = find[1];
|
|
1482
|
-
}
|
|
1483
|
-
}
|
|
1484
|
-
if (!versionField) return model;
|
|
1485
|
-
const modelWithVersion = { ...model };
|
|
1486
|
-
const fieldType = versionField.getSQLType();
|
|
1487
|
-
const versionValue = fieldType === "datetime" || fieldType === "timestamp" ? /* @__PURE__ */ new Date() : 1;
|
|
1488
|
-
modelWithVersion[fieldName] = versionValue;
|
|
1489
|
-
return modelWithVersion;
|
|
1490
|
-
}
|
|
1491
|
-
/**
|
|
1492
|
-
* Prepares update data with version field.
|
|
1493
|
-
* @template T - The type of the table schema
|
|
1494
|
-
* @param {Partial<InferInsertModel<T>>} entity - The entity to update
|
|
1495
|
-
* @param {Object | undefined} versionMetadata - Version field metadata
|
|
1496
|
-
* @param {Record<string, AnyColumn>} columns - The table columns
|
|
1497
|
-
* @param {unknown} currentVersion - The current version value
|
|
1498
|
-
* @returns {Partial<InferInsertModel<T>>} The prepared update data
|
|
1499
|
-
*/
|
|
1500
|
-
prepareUpdateData(entity, versionMetadata, columns, currentVersion) {
|
|
1501
|
-
const updateData = { ...entity };
|
|
1502
|
-
if (versionMetadata && columns) {
|
|
1503
|
-
const versionField = columns[versionMetadata.fieldName];
|
|
1504
|
-
if (versionField) {
|
|
1505
|
-
const fieldType = versionField.getSQLType();
|
|
1506
|
-
updateData[versionMetadata.fieldName] = fieldType === "datetime" || fieldType === "timestamp" ? /* @__PURE__ */ new Date() : currentVersion + 1;
|
|
1507
|
-
}
|
|
1508
|
-
}
|
|
1509
|
-
return updateData;
|
|
1510
|
-
}
|
|
1511
|
-
/**
|
|
1512
|
-
* Retrieves an existing model by primary key.
|
|
1513
|
-
* @template T - The type of the table schema
|
|
1514
|
-
* @param {Record<string, unknown>} primaryKeyValues - The primary key values
|
|
1515
|
-
* @param {T} schema - The table schema
|
|
1516
|
-
* @param {[string, AnyColumn]} versionField - The version field name and column
|
|
1517
|
-
* @returns {Promise<Awaited<T> extends Array<any> ? Awaited<T>[number] | undefined : Awaited<T> | undefined>} The existing model
|
|
1518
|
-
* @throws {Error} If the record is not found
|
|
1519
|
-
*/
|
|
1520
|
-
async getOldModel(primaryKeyValues, schema, versionField) {
|
|
1521
|
-
const [versionFieldName, versionFieldColumn] = versionField;
|
|
1522
|
-
const primaryKeys = this.getPrimaryKeys(schema);
|
|
1523
|
-
const [primaryKeyName, primaryKeyColumn] = primaryKeys[0];
|
|
1524
|
-
const resultQuery = this.forgeOperations.select({
|
|
1525
|
-
[primaryKeyName]: primaryKeyColumn,
|
|
1526
|
-
[versionFieldName]: versionFieldColumn
|
|
1527
|
-
}).from(schema).where(eq(primaryKeyColumn, primaryKeyValues[primaryKeyName]));
|
|
1528
|
-
const model = await this.forgeOperations.fetch().executeQueryOnlyOne(resultQuery);
|
|
1529
|
-
if (!model) {
|
|
1530
|
-
throw new Error(`Record not found in table ${schema}`);
|
|
1531
|
-
}
|
|
1532
|
-
return model;
|
|
1533
|
-
}
|
|
1534
|
-
}
|
|
1535
|
-
class ForgeSQLSelectOperations {
|
|
1536
|
-
options;
|
|
1537
|
-
/**
|
|
1538
|
-
* Creates a new instance of ForgeSQLSelectOperations.
|
|
1539
|
-
* @param {ForgeSqlOrmOptions} options - Configuration options for the ORM
|
|
1540
|
-
*/
|
|
1541
|
-
constructor(options) {
|
|
1542
|
-
this.options = options;
|
|
1543
|
-
}
|
|
1544
|
-
/**
|
|
1545
|
-
* Executes a Drizzle query and returns a single result.
|
|
1546
|
-
* Throws an error if more than one record is returned.
|
|
1547
|
-
*
|
|
1548
|
-
* @template T - The type of the query builder
|
|
1549
|
-
* @param {T} query - The Drizzle query to execute
|
|
1550
|
-
* @returns {Promise<Awaited<T> extends Array<any> ? Awaited<T>[number] | undefined : Awaited<T> | undefined>} A single result object or undefined
|
|
1551
|
-
* @throws {Error} If more than one record is returned
|
|
1552
|
-
*/
|
|
1553
|
-
async executeQueryOnlyOne(query) {
|
|
1554
|
-
const results = await query;
|
|
1555
|
-
const datas = results;
|
|
1556
|
-
if (!datas.length) {
|
|
1557
|
-
return void 0;
|
|
1558
|
-
}
|
|
1559
|
-
if (datas.length > 1) {
|
|
1560
|
-
throw new Error(`Expected 1 record but returned ${datas.length}`);
|
|
1561
|
-
}
|
|
1562
|
-
return datas[0];
|
|
1563
|
-
}
|
|
1564
|
-
/**
|
|
1565
|
-
* Executes a raw SQL query and returns the results.
|
|
1566
|
-
* Logs the query if logging is enabled.
|
|
1567
|
-
*
|
|
1568
|
-
* @template T - The type of the result objects
|
|
1569
|
-
* @param {string} query - The raw SQL query to execute
|
|
1570
|
-
* @param {SqlParameters[]} [params] - Optional SQL parameters
|
|
1571
|
-
* @returns {Promise<T[]>} A list of results as objects
|
|
1572
|
-
*/
|
|
1573
|
-
async executeRawSQL(query, params) {
|
|
1574
|
-
if (this.options.logRawSqlQuery) {
|
|
1575
|
-
const paramsStr = params ? `, with params: ${JSON.stringify(params)}` : "";
|
|
1576
|
-
console.debug(`Executing with SQL ${query}${paramsStr}`);
|
|
1577
|
-
}
|
|
1578
|
-
const sqlStatement = sql.prepare(query);
|
|
1579
|
-
if (params) {
|
|
1580
|
-
sqlStatement.bindParams(...params);
|
|
1581
|
-
}
|
|
1582
|
-
const result = await sqlStatement.execute();
|
|
1583
|
-
return result.rows;
|
|
1584
|
-
}
|
|
1585
|
-
/**
|
|
1586
|
-
* Executes a raw SQL update query.
|
|
1587
|
-
* @param {string} query - The raw SQL update query
|
|
1588
|
-
* @param {SqlParameters[]} [params] - Optional SQL parameters
|
|
1589
|
-
* @returns {Promise<UpdateQueryResponse>} The update response containing affected rows
|
|
1590
|
-
*/
|
|
1591
|
-
async executeRawUpdateSQL(query, params) {
|
|
1592
|
-
const sqlStatement = sql.prepare(query);
|
|
1593
|
-
if (params) {
|
|
1594
|
-
sqlStatement.bindParams(...params);
|
|
1595
|
-
}
|
|
1596
|
-
if (this.options.logRawSqlQuery) {
|
|
1597
|
-
console.debug(
|
|
1598
|
-
`Executing Update with SQL ${query}` + (params ? `, with params: ${JSON.stringify(params)}` : "")
|
|
1599
|
-
);
|
|
1600
|
-
}
|
|
1601
|
-
const updateQueryResponseResults = await sqlStatement.execute();
|
|
1602
|
-
return updateQueryResponseResults.rows;
|
|
1603
|
-
}
|
|
1604
|
-
}
|
|
1605
|
-
const metadataQueryContext = new AsyncLocalStorage();
|
|
1606
|
-
async function saveMetaDataToContext(metadata) {
|
|
1607
|
-
const context = metadataQueryContext.getStore();
|
|
1608
|
-
if (context) {
|
|
1609
|
-
context.printQueriesWithPlan = async () => {
|
|
1610
|
-
if (process.env.NODE_ENV !== "test") {
|
|
1611
|
-
await new Promise((r) => setTimeout(r, 200));
|
|
1612
|
-
}
|
|
1613
|
-
await printQueriesWithPlan(context.forgeSQLORM, Date.now() - context.beginTime.getTime());
|
|
1614
|
-
};
|
|
1615
|
-
if (metadata) {
|
|
1616
|
-
context.totalResponseSize += metadata.responseSize;
|
|
1617
|
-
context.totalDbExecutionTime += metadata.dbExecutionTime;
|
|
1618
|
-
}
|
|
1619
|
-
}
|
|
1620
|
-
}
|
|
1621
|
-
async function getLastestMetadata() {
|
|
1622
|
-
return metadataQueryContext.getStore();
|
|
1623
|
-
}
|
|
1624
|
-
const operationTypeQueryContext = new AsyncLocalStorage();
|
|
1625
|
-
async function getOperationType() {
|
|
1626
|
-
return operationTypeQueryContext.getStore()?.operationType ?? "DML";
|
|
1627
|
-
}
|
|
1628
|
-
const timeoutMs = 1e4;
|
|
1629
|
-
const timeoutMessage = `Atlassian @forge/sql did not return a response within ${timeoutMs}ms (${timeoutMs / 1e3} seconds), so the request is blocked. Possible causes: slow query, network issues, or exceeding Forge SQL limits.`;
|
|
1630
|
-
function isUpdateQueryResponse(obj) {
|
|
1631
|
-
return obj !== null && typeof obj === "object" && typeof obj.affectedRows === "number" && typeof obj.insertId === "number";
|
|
1632
|
-
}
|
|
1633
|
-
function inlineParams(sql2, params) {
|
|
1634
|
-
let i = 0;
|
|
1635
|
-
return sql2.replace(/\?/g, () => {
|
|
1636
|
-
const val = params[i++];
|
|
1637
|
-
if (val === null) return "NULL";
|
|
1638
|
-
if (typeof val === "number") return val.toString();
|
|
1639
|
-
return `'${String(val).replace(/'/g, "''")}'`;
|
|
1640
|
-
});
|
|
1641
|
-
}
|
|
1642
|
-
async function processDDLResult(method, result) {
|
|
1643
|
-
if (result.metadata) {
|
|
1644
|
-
await saveMetaDataToContext(result.metadata);
|
|
1645
|
-
}
|
|
1646
|
-
if (!result?.rows) {
|
|
1647
|
-
return { rows: [] };
|
|
1648
|
-
}
|
|
1649
|
-
if (isUpdateQueryResponse(result.rows)) {
|
|
1650
|
-
const oneRow = result.rows;
|
|
1651
|
-
return { ...oneRow, rows: [oneRow] };
|
|
1652
|
-
}
|
|
1653
|
-
if (Array.isArray(result.rows)) {
|
|
1654
|
-
if (method === "execute") {
|
|
1655
|
-
return { rows: [result.rows] };
|
|
1656
|
-
} else {
|
|
1657
|
-
const rows = result.rows.map((r) => Object.values(r));
|
|
1658
|
-
return { rows };
|
|
1659
|
-
}
|
|
1660
|
-
}
|
|
1661
|
-
return { rows: [] };
|
|
1662
|
-
}
|
|
1663
|
-
async function processExecuteMethod(query, params) {
|
|
1664
|
-
const sqlStatement = sql.prepare(query);
|
|
1665
|
-
if (params) {
|
|
1666
|
-
sqlStatement.bindParams(...params);
|
|
1667
|
-
}
|
|
1668
|
-
const result = await withTimeout(sqlStatement.execute(), timeoutMessage, timeoutMs);
|
|
1669
|
-
await saveMetaDataToContext(result.metadata);
|
|
1670
|
-
if (!result.rows) {
|
|
1671
|
-
return { rows: [[]] };
|
|
1672
|
-
}
|
|
1673
|
-
return { rows: [result.rows] };
|
|
1674
|
-
}
|
|
1675
|
-
async function processAllMethod(query, params) {
|
|
1676
|
-
const sqlStatement = await sql.prepare(query);
|
|
1677
|
-
if (params) {
|
|
1678
|
-
await sqlStatement.bindParams(...params);
|
|
1679
|
-
}
|
|
1680
|
-
const result = await withTimeout(
|
|
1681
|
-
sqlStatement.execute(),
|
|
1682
|
-
timeoutMessage,
|
|
1683
|
-
timeoutMs
|
|
1684
|
-
);
|
|
1685
|
-
await saveMetaDataToContext(result.metadata);
|
|
1686
|
-
if (!result.rows) {
|
|
1687
|
-
return { rows: [] };
|
|
1688
|
-
}
|
|
1689
|
-
const rows = result.rows.map((r) => Object.values(r));
|
|
1690
|
-
return { rows };
|
|
1691
|
-
}
|
|
1692
|
-
const forgeDriver = async (query, params, method) => {
|
|
1693
|
-
const operationType = await getOperationType();
|
|
1694
|
-
if (operationType === "DDL") {
|
|
1695
|
-
const result = await withTimeout(
|
|
1696
|
-
sql.executeDDL(inlineParams(query, params)),
|
|
1697
|
-
timeoutMessage,
|
|
1698
|
-
timeoutMs
|
|
1699
|
-
);
|
|
1700
|
-
return await processDDLResult(method, result);
|
|
1701
|
-
}
|
|
1702
|
-
if (method === "execute") {
|
|
1703
|
-
return await processExecuteMethod(query, params ?? []);
|
|
1704
|
-
}
|
|
1705
|
-
return await processAllMethod(query, params ?? []);
|
|
1706
|
-
};
|
|
1707
|
-
function injectSqlHints(query, hints) {
|
|
1708
|
-
if (!hints) {
|
|
1709
|
-
return query;
|
|
1710
|
-
}
|
|
1711
|
-
const normalizedQuery = query.trim().toUpperCase();
|
|
1712
|
-
let queryHints;
|
|
1713
|
-
if (normalizedQuery.startsWith("SELECT")) {
|
|
1714
|
-
queryHints = hints.select;
|
|
1715
|
-
} else if (normalizedQuery.startsWith("INSERT")) {
|
|
1716
|
-
queryHints = hints.insert;
|
|
1717
|
-
} else if (normalizedQuery.startsWith("UPDATE")) {
|
|
1718
|
-
queryHints = hints.update;
|
|
1719
|
-
} else if (normalizedQuery.startsWith("DELETE")) {
|
|
1720
|
-
queryHints = hints.delete;
|
|
1721
|
-
}
|
|
1722
|
-
if (!queryHints || queryHints.length === 0) {
|
|
1723
|
-
return query;
|
|
1724
|
-
}
|
|
1725
|
-
const hintsString = queryHints.join(" ");
|
|
1726
|
-
if (normalizedQuery.startsWith("SELECT")) {
|
|
1727
|
-
return `SELECT /*+ ${hintsString} */ ${query.substring(6)}`;
|
|
1728
|
-
} else if (normalizedQuery.startsWith("INSERT")) {
|
|
1729
|
-
return `INSERT /*+ ${hintsString} */ ${query.substring(6)}`;
|
|
1730
|
-
} else if (normalizedQuery.startsWith("UPDATE")) {
|
|
1731
|
-
return `UPDATE /*+ ${hintsString} */ ${query.substring(6)}`;
|
|
1732
|
-
} else if (normalizedQuery.startsWith("DELETE")) {
|
|
1733
|
-
return `DELETE /*+ ${hintsString} */ ${query.substring(6)}`;
|
|
1734
|
-
}
|
|
1735
|
-
return query;
|
|
1736
|
-
}
|
|
1737
|
-
const QUERY_ERROR_CODES = {
|
|
1738
|
-
TIMEOUT: "SQL_QUERY_TIMEOUT",
|
|
1739
|
-
OUT_OF_MEMORY_ERRNO: 8175
|
|
1740
|
-
};
|
|
1741
|
-
const STATEMENTS_SUMMARY_DELAY_MS = 200;
|
|
1742
|
-
function createForgeDriverProxy(forgeSqlOperation, options, logRawSqlQuery) {
|
|
1743
|
-
return async (query, params, method) => {
|
|
1744
|
-
const modifiedQuery = injectSqlHints(query, options);
|
|
1745
|
-
if (options && logRawSqlQuery && modifiedQuery !== query) {
|
|
1746
|
-
console.debug(`SQL Hints injected: ${modifiedQuery}`);
|
|
1747
|
-
}
|
|
1748
|
-
const queryStartTime = Date.now();
|
|
1749
|
-
try {
|
|
1750
|
-
return await forgeDriver(modifiedQuery, params, method);
|
|
1751
|
-
} catch (error) {
|
|
1752
|
-
const isTimeoutError = error.code === QUERY_ERROR_CODES.TIMEOUT;
|
|
1753
|
-
const isOutOfMemoryError = error?.context?.debug?.errno === QUERY_ERROR_CODES.OUT_OF_MEMORY_ERRNO;
|
|
1754
|
-
if (isTimeoutError || isOutOfMemoryError) {
|
|
1755
|
-
if (isTimeoutError) {
|
|
1756
|
-
console.error(` TIMEOUT detected - Query exceeded time limit`);
|
|
1757
|
-
} else {
|
|
1758
|
-
console.error(`OUT OF MEMORY detected - Query exceeded memory limit`);
|
|
1759
|
-
}
|
|
1760
|
-
await new Promise((resolve) => setTimeout(resolve, STATEMENTS_SUMMARY_DELAY_MS));
|
|
1761
|
-
const queryEndTime = Date.now();
|
|
1762
|
-
const queryDuration = queryEndTime - queryStartTime;
|
|
1763
|
-
await printQueriesWithPlan(forgeSqlOperation, queryDuration);
|
|
1764
|
-
}
|
|
1765
|
-
if (logRawSqlQuery) {
|
|
1766
|
-
console.debug(`SQL Error Details:`, JSON.stringify(error, null, 2));
|
|
1767
|
-
}
|
|
1768
|
-
throw error;
|
|
1769
|
-
}
|
|
1770
|
-
};
|
|
1771
|
-
}
|
|
1772
|
-
const NON_CACHE_CLEARING_ERROR_CODES = ["VALIDATION_ERROR", "CONSTRAINT_ERROR"];
|
|
1773
|
-
const CACHE_CLEARING_ERROR_CODES = ["DEADLOCK", "LOCK_WAIT_TIMEOUT", "CONNECTION_ERROR"];
|
|
1774
|
-
const NON_CACHE_CLEARING_PATTERNS = [/validation/i, /constraint/i];
|
|
1775
|
-
const CACHE_CLEARING_PATTERNS = [/timeout/i, /connection/i];
|
|
1776
|
-
function shouldClearCacheOnError(error) {
|
|
1777
|
-
if (error?.code && NON_CACHE_CLEARING_ERROR_CODES.includes(error.code)) {
|
|
1778
|
-
return false;
|
|
1779
|
-
}
|
|
1780
|
-
if (error?.message && NON_CACHE_CLEARING_PATTERNS.some((pattern) => pattern.test(error.message))) {
|
|
1781
|
-
return false;
|
|
1782
|
-
}
|
|
1783
|
-
if (error?.code && CACHE_CLEARING_ERROR_CODES.includes(error.code)) {
|
|
1784
|
-
return true;
|
|
1785
|
-
}
|
|
1786
|
-
if (error?.message && CACHE_CLEARING_PATTERNS.some((pattern) => pattern.test(error.message))) {
|
|
1787
|
-
return true;
|
|
1788
|
-
}
|
|
1789
|
-
return true;
|
|
1790
|
-
}
|
|
1791
|
-
async function handleSuccessfulExecution(rows, onfulfilled, table, options, isCached) {
|
|
1792
|
-
try {
|
|
1793
|
-
await evictLocalCacheQuery(table, options);
|
|
1794
|
-
await saveTableIfInsideCacheContext(table);
|
|
1795
|
-
if (isCached && !cacheApplicationContext.getStore()) {
|
|
1796
|
-
await clearCache(table, options);
|
|
1797
|
-
}
|
|
1798
|
-
const result = onfulfilled ? onfulfilled(rows) : rows;
|
|
1799
|
-
return result;
|
|
1800
|
-
} catch (error) {
|
|
1801
|
-
if (shouldClearCacheOnError(error)) {
|
|
1802
|
-
await evictLocalCacheQuery(table, options);
|
|
1803
|
-
if (isCached) {
|
|
1804
|
-
await clearCache(table, options).catch((e) => {
|
|
1805
|
-
console.warn("Ignore cache clear errors", e);
|
|
1806
|
-
});
|
|
1807
|
-
} else {
|
|
1808
|
-
await saveTableIfInsideCacheContext(table);
|
|
1809
|
-
}
|
|
1810
|
-
}
|
|
1811
|
-
throw error;
|
|
1812
|
-
}
|
|
1813
|
-
}
|
|
1814
|
-
function handleFunctionCall(value, target, args, table, options, isCached) {
|
|
1815
|
-
const result = value.apply(target, args);
|
|
1816
|
-
if (typeof result === "object" && result !== null && "execute" in result) {
|
|
1817
|
-
return wrapCacheEvictBuilder(result, table, options, isCached);
|
|
1818
|
-
}
|
|
1819
|
-
return result;
|
|
1820
|
-
}
|
|
1821
|
-
const wrapCacheEvictBuilder = (rawBuilder, table, options, isCached) => {
|
|
1822
|
-
return new Proxy(rawBuilder, {
|
|
1823
|
-
get(target, prop, receiver) {
|
|
1824
|
-
if (prop === "then") {
|
|
1825
|
-
return (onfulfilled, onrejected) => target.execute().then(
|
|
1826
|
-
(rows) => handleSuccessfulExecution(rows, onfulfilled, table, options, isCached),
|
|
1827
|
-
onrejected
|
|
1828
|
-
);
|
|
1829
|
-
}
|
|
1830
|
-
const value = Reflect.get(target, prop, receiver);
|
|
1831
|
-
if (typeof value === "function") {
|
|
1832
|
-
return (...args) => handleFunctionCall(value, target, args, table, options, isCached);
|
|
1833
|
-
}
|
|
1834
|
-
return value;
|
|
1835
|
-
}
|
|
1836
|
-
});
|
|
1837
|
-
};
|
|
1838
|
-
function insertAndEvictCacheBuilder(db, table, options, isCached) {
|
|
1839
|
-
const builder = db.insert(table);
|
|
1840
|
-
return wrapCacheEvictBuilder(
|
|
1841
|
-
builder,
|
|
1842
|
-
table,
|
|
1843
|
-
options,
|
|
1844
|
-
isCached
|
|
1845
|
-
);
|
|
1846
|
-
}
|
|
1847
|
-
function updateAndEvictCacheBuilder(db, table, options, isCached) {
|
|
1848
|
-
const builder = db.update(table);
|
|
1849
|
-
return wrapCacheEvictBuilder(
|
|
1850
|
-
builder,
|
|
1851
|
-
table,
|
|
1852
|
-
options,
|
|
1853
|
-
isCached
|
|
1854
|
-
);
|
|
1855
|
-
}
|
|
1856
|
-
function deleteAndEvictCacheBuilder(db, table, options, isCached) {
|
|
1857
|
-
const builder = db.delete(table);
|
|
1858
|
-
return wrapCacheEvictBuilder(
|
|
1859
|
-
builder,
|
|
1860
|
-
table,
|
|
1861
|
-
options,
|
|
1862
|
-
isCached
|
|
1863
|
-
);
|
|
1864
|
-
}
|
|
1865
|
-
async function handleCachedQuery(target, options, cacheTtl, selections, aliasMap, onfulfilled, onrejected) {
|
|
1866
|
-
try {
|
|
1867
|
-
const localCached = await getQueryLocalCacheQuery(target, options);
|
|
1868
|
-
if (localCached) {
|
|
1869
|
-
return onfulfilled ? onfulfilled(localCached) : localCached;
|
|
1870
|
-
}
|
|
1871
|
-
const cacheResult = await getFromCache(target, options);
|
|
1872
|
-
if (cacheResult) {
|
|
1873
|
-
return onfulfilled ? onfulfilled(cacheResult) : cacheResult;
|
|
1874
|
-
}
|
|
1875
|
-
const rows = await target.execute();
|
|
1876
|
-
const transformed = applyFromDriverTransform(rows, selections, aliasMap);
|
|
1877
|
-
await saveQueryLocalCacheQuery(target, transformed, options);
|
|
1878
|
-
await setCacheResult(target, options, transformed, cacheTtl).catch((cacheError) => {
|
|
1879
|
-
console.warn("Cache set error:", cacheError);
|
|
1880
|
-
});
|
|
1881
|
-
return onfulfilled ? onfulfilled(transformed) : transformed;
|
|
1882
|
-
} catch (error) {
|
|
1883
|
-
if (onrejected) {
|
|
1884
|
-
return onrejected(error);
|
|
1885
|
-
}
|
|
1886
|
-
throw error;
|
|
1887
|
-
}
|
|
1888
|
-
}
|
|
1889
|
-
async function handleNonCachedQuery(target, options, selections, aliasMap, onfulfilled, onrejected) {
|
|
1890
|
-
try {
|
|
1891
|
-
const localCached = await getQueryLocalCacheQuery(target, options);
|
|
1892
|
-
if (localCached) {
|
|
1893
|
-
return onfulfilled ? onfulfilled(localCached) : localCached;
|
|
1894
|
-
}
|
|
1895
|
-
const rows = await target.execute();
|
|
1896
|
-
const transformed = applyFromDriverTransform(rows, selections, aliasMap);
|
|
1897
|
-
await saveQueryLocalCacheQuery(target, transformed, options);
|
|
1898
|
-
return onfulfilled ? onfulfilled(transformed) : transformed;
|
|
1899
|
-
} catch (error) {
|
|
1900
|
-
if (onrejected) {
|
|
1901
|
-
return onrejected(error);
|
|
1902
|
-
}
|
|
1903
|
-
throw error;
|
|
1904
|
-
}
|
|
1905
|
-
}
|
|
1906
|
-
function createAliasedSelectBuilder(db, fields, selectFn, useCache, options, cacheTtl) {
|
|
1907
|
-
const { selections, aliasMap } = mapSelectFieldsWithAlias(fields);
|
|
1908
|
-
const builder = selectFn(selections);
|
|
1909
|
-
const wrapBuilder = (rawBuilder) => {
|
|
1910
|
-
return new Proxy(rawBuilder, {
|
|
1911
|
-
get(target, prop, receiver) {
|
|
1912
|
-
if (prop === "execute") {
|
|
1913
|
-
return async (...args) => {
|
|
1914
|
-
const rows = await target.execute(...args);
|
|
1915
|
-
return applyFromDriverTransform(rows, selections, aliasMap);
|
|
1916
|
-
};
|
|
1917
|
-
}
|
|
1918
|
-
if (prop === "then") {
|
|
1919
|
-
return (onfulfilled, onrejected) => {
|
|
1920
|
-
if (useCache) {
|
|
1921
|
-
const ttl = cacheTtl ?? options.cacheTTL ?? 120;
|
|
1922
|
-
return handleCachedQuery(
|
|
1923
|
-
target,
|
|
1924
|
-
options,
|
|
1925
|
-
ttl,
|
|
1926
|
-
selections,
|
|
1927
|
-
aliasMap,
|
|
1928
|
-
onfulfilled,
|
|
1929
|
-
onrejected
|
|
1930
|
-
);
|
|
1931
|
-
} else {
|
|
1932
|
-
return handleNonCachedQuery(
|
|
1933
|
-
target,
|
|
1934
|
-
options,
|
|
1935
|
-
selections,
|
|
1936
|
-
aliasMap,
|
|
1937
|
-
onfulfilled,
|
|
1938
|
-
onrejected
|
|
1939
|
-
);
|
|
1940
|
-
}
|
|
1941
|
-
};
|
|
1942
|
-
}
|
|
1943
|
-
const value = Reflect.get(target, prop, receiver);
|
|
1944
|
-
if (typeof value === "function") {
|
|
1945
|
-
return (...args) => {
|
|
1946
|
-
const result = value.apply(target, args);
|
|
1947
|
-
if (typeof result === "object" && result !== null && "execute" in result) {
|
|
1948
|
-
return wrapBuilder(result);
|
|
1949
|
-
}
|
|
1950
|
-
return result;
|
|
1951
|
-
};
|
|
1952
|
-
}
|
|
1953
|
-
return value;
|
|
1954
|
-
}
|
|
1955
|
-
});
|
|
1956
|
-
};
|
|
1957
|
-
return wrapBuilder(builder);
|
|
1958
|
-
}
|
|
1959
|
-
const DEFAULT_OPTIONS = {
|
|
1960
|
-
logRawSqlQuery: false,
|
|
1961
|
-
disableOptimisticLocking: false,
|
|
1962
|
-
cacheTTL: 120,
|
|
1963
|
-
cacheWrapTable: true,
|
|
1964
|
-
cacheEntityQueryName: "sql",
|
|
1965
|
-
cacheEntityExpirationName: "expiration",
|
|
1966
|
-
cacheEntityDataName: "data"
|
|
1967
|
-
};
|
|
1968
|
-
function createRawQueryExecutor(db, options, useGlobalCache = false) {
|
|
1969
|
-
return async function(query, cacheTtl) {
|
|
1970
|
-
let sql2;
|
|
1971
|
-
if (isSQLWrapper(query)) {
|
|
1972
|
-
const dialect = db.dialect;
|
|
1973
|
-
sql2 = dialect.sqlToQuery(query);
|
|
1974
|
-
} else {
|
|
1975
|
-
sql2 = {
|
|
1976
|
-
sql: query,
|
|
1977
|
-
params: []
|
|
1978
|
-
};
|
|
1979
|
-
}
|
|
1980
|
-
const localCacheResult = await getQueryLocalCacheQuery(sql2, options);
|
|
1981
|
-
if (localCacheResult) {
|
|
1982
|
-
return localCacheResult;
|
|
1983
|
-
}
|
|
1984
|
-
if (useGlobalCache) {
|
|
1985
|
-
const cacheResult = await getFromCache({ toSQL: () => sql2 }, options);
|
|
1986
|
-
if (cacheResult) {
|
|
1987
|
-
return cacheResult;
|
|
1988
|
-
}
|
|
1989
|
-
}
|
|
1990
|
-
const results = await db.execute(query);
|
|
1991
|
-
await saveQueryLocalCacheQuery(sql2, results, options);
|
|
1992
|
-
if (useGlobalCache) {
|
|
1993
|
-
await setCacheResult(
|
|
1994
|
-
{ toSQL: () => sql2 },
|
|
1995
|
-
options,
|
|
1996
|
-
results,
|
|
1997
|
-
cacheTtl ?? options.cacheTTL ?? 120
|
|
1998
|
-
);
|
|
1999
|
-
}
|
|
2000
|
-
return results;
|
|
2001
|
-
};
|
|
2002
|
-
}
|
|
2003
|
-
function patchDbWithSelectAliased(db, options) {
|
|
2004
|
-
const newOptions = { ...DEFAULT_OPTIONS, ...options };
|
|
2005
|
-
db.selectAliased = function(fields) {
|
|
2006
|
-
return createAliasedSelectBuilder(
|
|
2007
|
-
db,
|
|
2008
|
-
fields,
|
|
2009
|
-
(selections) => db.select(selections),
|
|
2010
|
-
false,
|
|
2011
|
-
newOptions
|
|
2012
|
-
);
|
|
2013
|
-
};
|
|
2014
|
-
db.selectAliasedCacheable = function(fields, cacheTtl) {
|
|
2015
|
-
return createAliasedSelectBuilder(
|
|
2016
|
-
db,
|
|
2017
|
-
fields,
|
|
2018
|
-
(selections) => db.select(selections),
|
|
2019
|
-
true,
|
|
2020
|
-
newOptions,
|
|
2021
|
-
cacheTtl
|
|
2022
|
-
);
|
|
2023
|
-
};
|
|
2024
|
-
db.selectAliasedDistinct = function(fields) {
|
|
2025
|
-
return createAliasedSelectBuilder(
|
|
2026
|
-
db,
|
|
2027
|
-
fields,
|
|
2028
|
-
(selections) => db.selectDistinct(selections),
|
|
2029
|
-
false,
|
|
2030
|
-
newOptions
|
|
2031
|
-
);
|
|
2032
|
-
};
|
|
2033
|
-
db.selectAliasedDistinctCacheable = function(fields, cacheTtl) {
|
|
2034
|
-
return createAliasedSelectBuilder(
|
|
2035
|
-
db,
|
|
2036
|
-
fields,
|
|
2037
|
-
(selections) => db.selectDistinct(selections),
|
|
2038
|
-
true,
|
|
2039
|
-
newOptions,
|
|
2040
|
-
cacheTtl
|
|
2041
|
-
);
|
|
2042
|
-
};
|
|
2043
|
-
db.selectFrom = function(table) {
|
|
2044
|
-
return db.selectAliased(getTableColumns(table)).from(table);
|
|
2045
|
-
};
|
|
2046
|
-
db.selectFromCacheable = function(table, cacheTtl) {
|
|
2047
|
-
return db.selectAliasedCacheable(getTableColumns(table), cacheTtl).from(table);
|
|
2048
|
-
};
|
|
2049
|
-
db.selectDistinctFrom = function(table) {
|
|
2050
|
-
return db.selectAliasedDistinct(getTableColumns(table)).from(table);
|
|
2051
|
-
};
|
|
2052
|
-
db.selectDistinctFromCacheable = function(table, cacheTtl) {
|
|
2053
|
-
return db.selectAliasedDistinctCacheable(getTableColumns(table), cacheTtl).from(table);
|
|
2054
|
-
};
|
|
2055
|
-
db.insertWithCacheContext = function(table) {
|
|
2056
|
-
return insertAndEvictCacheBuilder(db, table, newOptions, false);
|
|
2057
|
-
};
|
|
2058
|
-
db.insertAndEvictCache = function(table) {
|
|
2059
|
-
return insertAndEvictCacheBuilder(db, table, newOptions, true);
|
|
2060
|
-
};
|
|
2061
|
-
db.updateWithCacheContext = function(table) {
|
|
2062
|
-
return updateAndEvictCacheBuilder(db, table, newOptions, false);
|
|
2063
|
-
};
|
|
2064
|
-
db.updateAndEvictCache = function(table) {
|
|
2065
|
-
return updateAndEvictCacheBuilder(db, table, newOptions, true);
|
|
2066
|
-
};
|
|
2067
|
-
db.deleteWithCacheContext = function(table) {
|
|
2068
|
-
return deleteAndEvictCacheBuilder(db, table, newOptions, false);
|
|
2069
|
-
};
|
|
2070
|
-
db.deleteAndEvictCache = function(table) {
|
|
2071
|
-
return deleteAndEvictCacheBuilder(db, table, newOptions, true);
|
|
2072
|
-
};
|
|
2073
|
-
db.executeQuery = createRawQueryExecutor(db, newOptions, false);
|
|
2074
|
-
db.executeQueryCacheable = createRawQueryExecutor(db, newOptions, true);
|
|
2075
|
-
return db;
|
|
2076
|
-
}
|
|
2077
|
-
class ForgeSQLAnalyseOperation {
|
|
2078
|
-
forgeOperations;
|
|
2079
|
-
/**
|
|
2080
|
-
* Creates a new instance of ForgeSQLAnalizeOperation.
|
|
2081
|
-
* @param {ForgeSqlOperation} forgeOperations - The ForgeSQL operations instance
|
|
2082
|
-
*/
|
|
2083
|
-
constructor(forgeOperations) {
|
|
2084
|
-
this.forgeOperations = forgeOperations;
|
|
2085
|
-
this.mapToCamelCaseClusterStatement = this.mapToCamelCaseClusterStatement.bind(this);
|
|
2086
|
-
}
|
|
2087
|
-
/**
|
|
2088
|
-
* Executes EXPLAIN on a raw SQL query.
|
|
2089
|
-
* @param {string} query - The SQL query to analyze
|
|
2090
|
-
* @param {unknown[]} bindParams - The query parameters
|
|
2091
|
-
* @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
|
|
2092
|
-
*/
|
|
2093
|
-
async explainRaw(query, bindParams) {
|
|
2094
|
-
const results = await this.forgeOperations.fetch().executeRawSQL(`EXPLAIN ${query}`, bindParams);
|
|
2095
|
-
return results.map((row) => ({
|
|
2096
|
-
id: row.id,
|
|
2097
|
-
estRows: row.estRows,
|
|
2098
|
-
actRows: row.actRows,
|
|
2099
|
-
task: row.task,
|
|
2100
|
-
accessObject: row["access object"],
|
|
2101
|
-
executionInfo: row["execution info"],
|
|
2102
|
-
operatorInfo: row["operator info"],
|
|
2103
|
-
memory: row.memory,
|
|
2104
|
-
disk: row.disk
|
|
2105
|
-
}));
|
|
2106
|
-
}
|
|
2107
|
-
/**
|
|
2108
|
-
* Executes EXPLAIN on a Drizzle query.
|
|
2109
|
-
* @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
|
|
2110
|
-
* @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
|
|
2111
|
-
*/
|
|
2112
|
-
async explain(query) {
|
|
2113
|
-
const { sql: sql2, params } = query.toSQL();
|
|
2114
|
-
return this.explainRaw(sql2, params);
|
|
2115
|
-
}
|
|
2116
|
-
/**
|
|
2117
|
-
* Executes EXPLAIN ANALYZE on a raw SQL query.
|
|
2118
|
-
* @param {string} query - The SQL query to analyze
|
|
2119
|
-
* @param {unknown[]} bindParams - The query parameters
|
|
2120
|
-
* @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
|
|
2121
|
-
*/
|
|
2122
|
-
async explainAnalyzeRaw(query, bindParams) {
|
|
2123
|
-
const results = await this.forgeOperations.fetch().executeRawSQL(`EXPLAIN ANALYZE ${query}`, bindParams);
|
|
2124
|
-
return results.map((row) => ({
|
|
2125
|
-
id: row.id,
|
|
2126
|
-
estRows: row.estRows,
|
|
2127
|
-
actRows: row.actRows,
|
|
2128
|
-
task: row.task,
|
|
2129
|
-
accessObject: row["access object"],
|
|
2130
|
-
executionInfo: row["execution info"],
|
|
2131
|
-
operatorInfo: row["operator info"],
|
|
2132
|
-
memory: row.memory,
|
|
2133
|
-
disk: row.disk
|
|
2134
|
-
}));
|
|
2135
|
-
}
|
|
2136
|
-
/**
|
|
2137
|
-
* Executes EXPLAIN ANALYZE on a Drizzle query.
|
|
2138
|
-
* @param {{ toSQL: () => Query }} query - The Drizzle query to analyze
|
|
2139
|
-
* @returns {Promise<ExplainAnalyzeRow[]>} The execution plan analysis results
|
|
2140
|
-
*/
|
|
2141
|
-
async explainAnalyze(query) {
|
|
2142
|
-
const { sql: sql2, params } = query.toSQL();
|
|
2143
|
-
return this.explainAnalyzeRaw(sql2, params);
|
|
2144
|
-
}
|
|
2145
|
-
/**
|
|
2146
|
-
* Decodes a query execution plan from its string representation.
|
|
2147
|
-
* @param {string} input - The raw execution plan string
|
|
2148
|
-
* @returns {ExplainAnalyzeRow[]} The decoded execution plan rows
|
|
2149
|
-
*/
|
|
2150
|
-
decodedPlan(input) {
|
|
2151
|
-
if (!input) {
|
|
2152
|
-
return [];
|
|
2153
|
-
}
|
|
2154
|
-
const lines = input.trim().split("\n");
|
|
2155
|
-
if (lines.length < 2) return [];
|
|
2156
|
-
const headersRaw = lines[0].split(" ").map((h) => h.trim()).filter(Boolean);
|
|
2157
|
-
const headers = headersRaw.map((h) => {
|
|
2158
|
-
return h.replace(/\s+/g, " ").replace(/[-\s]+(.)?/g, (_, c) => c ? c.toUpperCase() : "").replace(/^./, (s) => s.toLowerCase());
|
|
2159
|
-
});
|
|
2160
|
-
return lines.slice(1).map((line) => {
|
|
2161
|
-
const values = line.split(" ").map((s) => s.trim()).filter(Boolean);
|
|
2162
|
-
const row = {};
|
|
2163
|
-
headers.forEach((key, i) => {
|
|
2164
|
-
row[key] = values[i] ?? "";
|
|
2165
|
-
});
|
|
2166
|
-
return row;
|
|
2167
|
-
});
|
|
2168
|
-
}
|
|
2169
|
-
/**
|
|
2170
|
-
* Normalizes a raw slow query row into a more structured format.
|
|
2171
|
-
* @param {SlowQueryRaw} row - The raw slow query data
|
|
2172
|
-
* @returns {SlowQueryNormalized} The normalized slow query data
|
|
2173
|
-
*/
|
|
2174
|
-
normalizeSlowQuery(row) {
|
|
2175
|
-
return {
|
|
2176
|
-
time: row.Time,
|
|
2177
|
-
txnStartTs: row.Txn_start_ts,
|
|
2178
|
-
user: row.User,
|
|
2179
|
-
host: row.Host,
|
|
2180
|
-
connId: row.Conn_ID,
|
|
2181
|
-
db: row.DB,
|
|
2182
|
-
query: row.Query,
|
|
2183
|
-
digest: row.Digest,
|
|
2184
|
-
queryTime: row.Query_time,
|
|
2185
|
-
compileTime: row.Compile_time,
|
|
2186
|
-
optimizeTime: row.Optimize_time,
|
|
2187
|
-
processTime: row.Process_time,
|
|
2188
|
-
waitTime: row.Wait_time,
|
|
2189
|
-
parseTime: row.Parse_time,
|
|
2190
|
-
rewriteTime: row.Rewrite_time,
|
|
2191
|
-
copTime: row.Cop_time,
|
|
2192
|
-
copProcAvg: row.Cop_proc_avg,
|
|
2193
|
-
copProcMax: row.Cop_proc_max,
|
|
2194
|
-
copProcP90: row.Cop_proc_p90,
|
|
2195
|
-
copProcAddr: row.Cop_proc_addr,
|
|
2196
|
-
copWaitAvg: row.Cop_wait_avg,
|
|
2197
|
-
copWaitMax: row.Cop_wait_max,
|
|
2198
|
-
copWaitP90: row.Cop_wait_p90,
|
|
2199
|
-
copWaitAddr: row.Cop_wait_addr,
|
|
2200
|
-
memMax: row.Mem_max,
|
|
2201
|
-
diskMax: row.Disk_max,
|
|
2202
|
-
totalKeys: row.Total_keys,
|
|
2203
|
-
processKeys: row.Process_keys,
|
|
2204
|
-
requestCount: row.Request_count,
|
|
2205
|
-
kvTotal: row.KV_total,
|
|
2206
|
-
pdTotal: row.PD_total,
|
|
2207
|
-
resultRows: row.Result_rows,
|
|
2208
|
-
rocksdbBlockCacheHitCount: row.Rocksdb_block_cache_hit_count,
|
|
2209
|
-
rocksdbBlockReadCount: row.Rocksdb_block_read_count,
|
|
2210
|
-
rocksdbBlockReadByte: row.Rocksdb_block_read_byte,
|
|
2211
|
-
plan: row.Plan,
|
|
2212
|
-
binaryPlan: row.Binary_plan,
|
|
2213
|
-
planDigest: row.Plan_digest,
|
|
2214
|
-
parsedPlan: this.decodedPlan(row.Plan)
|
|
2215
|
-
};
|
|
2216
|
-
}
|
|
2217
|
-
/**
|
|
2218
|
-
* Builds a SQL query for retrieving cluster statement history.
|
|
2219
|
-
* @param {string[]} tables - The tables to analyze
|
|
2220
|
-
* @param {Date} [from] - The start date for the analysis
|
|
2221
|
-
* @param {Date} [to] - The end date for the analysis
|
|
2222
|
-
* @returns {string} The SQL query for cluster statement history
|
|
2223
|
-
*/
|
|
2224
|
-
buildClusterStatementQuery(tables, from, to) {
|
|
2225
|
-
const formatDateTime2 = (date) => DateTime.fromJSDate(date).toFormat("yyyy-LL-dd'T'HH:mm:ss.SSS");
|
|
2226
|
-
const tableConditions = tables.map((table) => `TABLE_NAMES LIKE CONCAT(SCHEMA_NAME, '.', '%', '${table}', '%')`).join(" OR ");
|
|
2227
|
-
const timeConditions = [];
|
|
2228
|
-
if (from) {
|
|
2229
|
-
timeConditions.push(`SUMMARY_BEGIN_TIME >= '${formatDateTime2(from)}'`);
|
|
2230
|
-
}
|
|
2231
|
-
if (to) {
|
|
2232
|
-
timeConditions.push(`SUMMARY_END_TIME <= '${formatDateTime2(to)}'`);
|
|
2233
|
-
}
|
|
2234
|
-
let whereClauses;
|
|
2235
|
-
if (tableConditions?.length) {
|
|
2236
|
-
whereClauses = [tableConditions ? `(${tableConditions})` : "", ...timeConditions];
|
|
2237
|
-
} else {
|
|
2238
|
-
whereClauses = timeConditions;
|
|
2239
|
-
}
|
|
2240
|
-
return `
|
|
2241
|
-
SELECT *
|
|
2242
|
-
FROM (
|
|
2243
|
-
SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY
|
|
2244
|
-
UNION ALL
|
|
2245
|
-
SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
|
|
2246
|
-
) AS combined
|
|
2247
|
-
${whereClauses?.length > 0 ? `WHERE ${whereClauses.join(" AND ")}` : ""}
|
|
2248
|
-
`;
|
|
2249
|
-
}
|
|
2250
|
-
/**
|
|
2251
|
-
* Retrieves and analyzes slow queries from the database.
|
|
2252
|
-
* @returns {Promise<SlowQueryNormalized[]>} The normalized slow query data
|
|
2253
|
-
*/
|
|
2254
|
-
// CLUSTER_SLOW_QUERY STATISTICS
|
|
2255
|
-
async analyzeSlowQueries() {
|
|
2256
|
-
const results = await this.forgeOperations.fetch().executeRawSQL(`
|
|
2257
|
-
SELECT *
|
|
2258
|
-
FROM information_schema.slow_query
|
|
2259
|
-
ORDER BY time DESC
|
|
2260
|
-
`);
|
|
2261
|
-
return results.map((row) => this.normalizeSlowQuery(row));
|
|
2262
|
-
}
|
|
2263
|
-
/**
|
|
2264
|
-
* Converts a cluster statement row to camelCase format.
|
|
2265
|
-
* @param {Record<string, any>} input - The input row data
|
|
2266
|
-
* @returns {ClusterStatementRowCamelCase} The converted row data
|
|
2267
|
-
*/
|
|
2268
|
-
mapToCamelCaseClusterStatement(input) {
|
|
2269
|
-
if (!input) {
|
|
2270
|
-
return {};
|
|
2271
|
-
}
|
|
2272
|
-
const result = {};
|
|
2273
|
-
result.parsedPlan = this.decodedPlan(input["PLAN"] ?? "");
|
|
2274
|
-
for (const key in input) {
|
|
2275
|
-
const camelKey = key.toLowerCase().replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
|
|
2276
|
-
result[camelKey] = input[key];
|
|
2277
|
-
}
|
|
2278
|
-
return result;
|
|
2279
|
-
}
|
|
2280
|
-
/**
|
|
2281
|
-
* Analyzes query history for specific tables using raw table names.
|
|
2282
|
-
* @param {string[]} tables - The table names to analyze
|
|
2283
|
-
* @param {Date} [fromDate] - The start date for the analysis
|
|
2284
|
-
* @param {Date} [toDate] - The end date for the analysis
|
|
2285
|
-
* @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
|
|
2286
|
-
*/
|
|
2287
|
-
async analyzeQueriesHistoryRaw(tables, fromDate, toDate) {
|
|
2288
|
-
const results = await this.forgeOperations.fetch().executeRawSQL(
|
|
2289
|
-
this.buildClusterStatementQuery(tables ?? [], fromDate, toDate)
|
|
2290
|
-
);
|
|
2291
|
-
return results.map((r) => this.mapToCamelCaseClusterStatement(r));
|
|
2292
|
-
}
|
|
2293
|
-
/**
|
|
2294
|
-
* Analyzes query history for specific tables using Drizzle table objects.
|
|
2295
|
-
* @param {AnyMySqlTable[]} tables - The Drizzle table objects to analyze
|
|
2296
|
-
* @param {Date} [fromDate] - The start date for the analysis
|
|
2297
|
-
* @param {Date} [toDate] - The end date for the analysis
|
|
2298
|
-
* @returns {Promise<ClusterStatementRowCamelCase[]>} The analyzed query history
|
|
2299
|
-
*/
|
|
2300
|
-
async analyzeQueriesHistory(tables, fromDate, toDate) {
|
|
2301
|
-
const tableNames = tables?.map((table) => getTableName(table)) ?? [];
|
|
2302
|
-
return this.analyzeQueriesHistoryRaw(tableNames, fromDate, toDate);
|
|
2303
|
-
}
|
|
2304
|
-
}
|
|
2305
|
-
class ForgeSQLCacheOperations {
|
|
2306
|
-
options;
|
|
2307
|
-
forgeOperations;
|
|
2308
|
-
/**
|
|
2309
|
-
* Creates a new instance of ForgeSQLCacheOperations.
|
|
2310
|
-
*
|
|
2311
|
-
* @param options - Configuration options for the ORM
|
|
2312
|
-
* @param forgeOperations - The ForgeSQL operations instance
|
|
2313
|
-
*/
|
|
2314
|
-
constructor(options, forgeOperations) {
|
|
2315
|
-
this.options = options;
|
|
2316
|
-
this.forgeOperations = forgeOperations;
|
|
2317
|
-
}
|
|
2318
|
-
/**
|
|
2319
|
-
* Evicts cache for multiple tables using Drizzle table objects.
|
|
2320
|
-
*
|
|
2321
|
-
* @param tables - Array of Drizzle table objects to clear cache for
|
|
2322
|
-
* @returns Promise that resolves when cache eviction is complete
|
|
2323
|
-
* @throws Error if cacheEntityName is not configured
|
|
2324
|
-
*/
|
|
2325
|
-
async evictCacheEntities(tables) {
|
|
2326
|
-
if (!this.options.cacheEntityName) {
|
|
2327
|
-
throw new Error("cacheEntityName is not configured");
|
|
2328
|
-
}
|
|
2329
|
-
await this.evictCache(tables.map((t) => getTableName(t)));
|
|
2330
|
-
}
|
|
2331
|
-
/**
|
|
2332
|
-
* Evicts cache for multiple tables by their names.
|
|
2333
|
-
*
|
|
2334
|
-
* @param tables - Array of table names to clear cache for
|
|
2335
|
-
* @returns Promise that resolves when cache eviction is complete
|
|
2336
|
-
* @throws Error if cacheEntityName is not configured
|
|
2337
|
-
*/
|
|
2338
|
-
async evictCache(tables) {
|
|
2339
|
-
if (!this.options.cacheEntityName) {
|
|
2340
|
-
throw new Error("cacheEntityName is not configured");
|
|
2341
|
-
}
|
|
2342
|
-
await clearTablesCache(tables, this.options);
|
|
2343
|
-
}
|
|
2344
|
-
/**
|
|
2345
|
-
* Inserts records with optimistic locking/versioning and automatically evicts cache.
|
|
2346
|
-
*
|
|
2347
|
-
* This method uses `modifyWithVersioning().insert()` internally, providing:
|
|
2348
|
-
* - Automatic version field initialization
|
|
2349
|
-
* - Optimistic locking support
|
|
2350
|
-
* - Cache eviction after successful operation
|
|
2351
|
-
*
|
|
2352
|
-
* @param schema - The table schema
|
|
2353
|
-
* @param models - Array of entities to insert
|
|
2354
|
-
* @param updateIfExists - Whether to update existing records
|
|
2355
|
-
* @returns Promise that resolves to the number of inserted rows
|
|
2356
|
-
* @throws Error if cacheEntityName is not configured
|
|
2357
|
-
* @throws Error if optimistic locking check fails
|
|
2358
|
-
*/
|
|
2359
|
-
async insert(schema, models, updateIfExists) {
|
|
2360
|
-
this.validateCacheConfiguration();
|
|
2361
|
-
const number = await this.forgeOperations.modifyWithVersioning().insert(schema, models, updateIfExists);
|
|
2362
|
-
await clearCache(schema, this.options);
|
|
2363
|
-
return number;
|
|
2364
|
-
}
|
|
2365
|
-
/**
|
|
2366
|
-
* Deletes a record by ID with optimistic locking/versioning and automatically evicts cache.
|
|
2367
|
-
*
|
|
2368
|
-
* This method uses `modifyWithVersioning().deleteById()` internally, providing:
|
|
2369
|
-
* - Optimistic locking checks before deletion
|
|
2370
|
-
* - Version field validation
|
|
2371
|
-
* - Cache eviction after successful operation
|
|
2372
|
-
*
|
|
2373
|
-
* @param id - The ID of the record to delete
|
|
2374
|
-
* @param schema - The table schema
|
|
2375
|
-
* @returns Promise that resolves to the number of affected rows
|
|
2376
|
-
* @throws Error if cacheEntityName is not configured
|
|
2377
|
-
* @throws Error if optimistic locking check fails
|
|
2378
|
-
*/
|
|
2379
|
-
async deleteById(id, schema) {
|
|
2380
|
-
this.validateCacheConfiguration();
|
|
2381
|
-
const number = await this.forgeOperations.modifyWithVersioning().deleteById(id, schema);
|
|
2382
|
-
await clearCache(schema, this.options);
|
|
2383
|
-
return number;
|
|
2384
|
-
}
|
|
2385
|
-
/**
|
|
2386
|
-
* Updates a record by ID with optimistic locking/versioning and automatically evicts cache.
|
|
2387
|
-
*
|
|
2388
|
-
* This method uses `modifyWithVersioning().updateById()` internally, providing:
|
|
2389
|
-
* - Optimistic locking checks before update
|
|
2390
|
-
* - Version field incrementation
|
|
2391
|
-
* - Cache eviction after successful operation
|
|
2392
|
-
*
|
|
2393
|
-
* @param entity - The entity with updated values (must include primary key)
|
|
2394
|
-
* @param schema - The table schema
|
|
2395
|
-
* @returns Promise that resolves to the number of affected rows
|
|
2396
|
-
* @throws Error if cacheEntityName is not configured
|
|
2397
|
-
* @throws Error if optimistic locking check fails
|
|
2398
|
-
*/
|
|
2399
|
-
async updateById(entity, schema) {
|
|
2400
|
-
this.validateCacheConfiguration();
|
|
2401
|
-
const number = await this.forgeOperations.modifyWithVersioning().updateById(entity, schema);
|
|
2402
|
-
await clearCache(schema, this.options);
|
|
2403
|
-
return number;
|
|
2404
|
-
}
|
|
2405
|
-
/**
|
|
2406
|
-
* Updates fields based on conditions with optimistic locking/versioning and automatically evicts cache.
|
|
2407
|
-
*
|
|
2408
|
-
* This method uses `modifyWithVersioning().updateFields()` internally, providing:
|
|
2409
|
-
* - Optimistic locking support (if version field is configured)
|
|
2410
|
-
* - Version field validation and incrementation
|
|
2411
|
-
* - Cache eviction after successful operation
|
|
2412
|
-
*
|
|
2413
|
-
* @param updateData - The data to update
|
|
2414
|
-
* @param schema - The table schema
|
|
2415
|
-
* @param where - Optional WHERE conditions
|
|
2416
|
-
* @returns Promise that resolves to the number of affected rows
|
|
2417
|
-
* @throws Error if cacheEntityName is not configured
|
|
2418
|
-
* @throws Error if optimistic locking check fails
|
|
2419
|
-
*/
|
|
2420
|
-
async updateFields(updateData, schema, where) {
|
|
2421
|
-
this.validateCacheConfiguration();
|
|
2422
|
-
const number = await this.forgeOperations.modifyWithVersioning().updateFields(updateData, schema, where);
|
|
2423
|
-
await clearCache(schema, this.options);
|
|
2424
|
-
return number;
|
|
2425
|
-
}
|
|
2426
|
-
/**
|
|
2427
|
-
* Executes a query with caching support.
|
|
2428
|
-
* First checks cache, if not found executes query and stores result in cache.
|
|
2429
|
-
*
|
|
2430
|
-
* @param query - The Drizzle query to execute
|
|
2431
|
-
* @param cacheTtl - Optional cache TTL override
|
|
2432
|
-
* @returns Promise that resolves to the query results
|
|
2433
|
-
* @throws Error if cacheEntityName is not configured
|
|
2434
|
-
*/
|
|
2435
|
-
async executeQuery(query, cacheTtl) {
|
|
2436
|
-
this.validateCacheConfiguration();
|
|
2437
|
-
const sqlQuery = query;
|
|
2438
|
-
const cacheResult = await getFromCache(sqlQuery, this.options);
|
|
2439
|
-
if (cacheResult) {
|
|
2440
|
-
return cacheResult;
|
|
2441
|
-
}
|
|
2442
|
-
const results = await query;
|
|
2443
|
-
await setCacheResult(sqlQuery, this.options, results, cacheTtl ?? this.options.cacheTTL ?? 60);
|
|
2444
|
-
return results;
|
|
2445
|
-
}
|
|
2446
|
-
/**
|
|
2447
|
-
* Validates that cache configuration is properly set up.
|
|
2448
|
-
*
|
|
2449
|
-
* @throws Error if cacheEntityName is not configured
|
|
2450
|
-
* @private
|
|
2451
|
-
*/
|
|
2452
|
-
validateCacheConfiguration() {
|
|
2453
|
-
if (!this.options.cacheEntityName) {
|
|
2454
|
-
throw new Error("cacheEntityName is not configured");
|
|
2455
|
-
}
|
|
2456
|
-
}
|
|
2457
|
-
}
|
|
2458
|
-
class ForgeSQLORMImpl {
|
|
2459
|
-
static instance = null;
|
|
2460
|
-
drizzle;
|
|
2461
|
-
crudOperations;
|
|
2462
|
-
fetchOperations;
|
|
2463
|
-
analyzeOperations;
|
|
2464
|
-
cacheOperations;
|
|
2465
|
-
options;
|
|
2466
|
-
/**
|
|
2467
|
-
* Private constructor to enforce singleton behavior.
|
|
2468
|
-
* @param options - Options for configuring ForgeSQL ORM behavior.
|
|
2469
|
-
*/
|
|
2470
|
-
constructor(options) {
|
|
2471
|
-
try {
|
|
2472
|
-
const newOptions = options ?? {
|
|
2473
|
-
logRawSqlQuery: false,
|
|
2474
|
-
logCache: false,
|
|
2475
|
-
disableOptimisticLocking: false,
|
|
2476
|
-
cacheWrapTable: true,
|
|
2477
|
-
cacheTTL: 120,
|
|
2478
|
-
cacheEntityQueryName: "sql",
|
|
2479
|
-
cacheEntityExpirationName: "expiration",
|
|
2480
|
-
cacheEntityDataName: "data"
|
|
2481
|
-
};
|
|
2482
|
-
this.options = newOptions;
|
|
2483
|
-
if (newOptions.logRawSqlQuery) {
|
|
2484
|
-
console.debug("Initializing ForgeSQLORM...");
|
|
2485
|
-
}
|
|
2486
|
-
const proxiedDriver = createForgeDriverProxy(
|
|
2487
|
-
this,
|
|
2488
|
-
newOptions.hints,
|
|
2489
|
-
newOptions.logRawSqlQuery
|
|
2490
|
-
);
|
|
2491
|
-
this.drizzle = patchDbWithSelectAliased(
|
|
2492
|
-
drizzle(proxiedDriver, { logger: newOptions.logRawSqlQuery }),
|
|
2493
|
-
newOptions
|
|
2494
|
-
);
|
|
2495
|
-
this.crudOperations = new ForgeSQLCrudOperations(this, newOptions);
|
|
2496
|
-
this.fetchOperations = new ForgeSQLSelectOperations(newOptions);
|
|
2497
|
-
this.analyzeOperations = new ForgeSQLAnalyseOperation(this);
|
|
2498
|
-
this.cacheOperations = new ForgeSQLCacheOperations(newOptions, this);
|
|
2499
|
-
} catch (error) {
|
|
2500
|
-
console.error("ForgeSQLORM initialization failed:", error);
|
|
2501
|
-
throw error;
|
|
2502
|
-
}
|
|
2503
|
-
}
|
|
2504
|
-
/**
|
|
2505
|
-
* Executes a query and provides access to execution metadata with performance monitoring.
|
|
2506
|
-
* This method allows you to capture detailed information about query execution
|
|
2507
|
-
* including database execution time, response size, and query analysis capabilities.
|
|
2508
|
-
*
|
|
2509
|
-
* The method aggregates metrics across all database operations within the query function,
|
|
2510
|
-
* making it ideal for monitoring resolver performance and detecting performance issues.
|
|
2511
|
-
*
|
|
2512
|
-
* @template T - The return type of the query
|
|
2513
|
-
* @param query - A function that returns a Promise with the query result. Can contain multiple database operations.
|
|
2514
|
-
* @param onMetadata - Callback function that receives aggregated execution metadata
|
|
2515
|
-
* @param onMetadata.totalDbExecutionTime - Total database execution time across all operations in the query function (in milliseconds)
|
|
2516
|
-
* @param onMetadata.totalResponseSize - Total response size across all operations (in bytes)
|
|
2517
|
-
* @param onMetadata.printQueries - Function to analyze and print query execution plans from CLUSTER_STATEMENTS_SUMMARY
|
|
2518
|
-
* @returns Promise with the query result
|
|
2519
|
-
*
|
|
2520
|
-
* @example
|
|
2521
|
-
* ```typescript
|
|
2522
|
-
* // Basic usage with performance monitoring
|
|
2523
|
-
* const result = await forgeSQL.executeWithMetadata(
|
|
2524
|
-
* async () => {
|
|
2525
|
-
* const users = await forgeSQL.selectFrom(usersTable);
|
|
2526
|
-
* const orders = await forgeSQL.selectFrom(ordersTable).where(eq(ordersTable.userId, usersTable.id));
|
|
2527
|
-
* return { users, orders };
|
|
2528
|
-
* },
|
|
2529
|
-
* (totalDbExecutionTime, totalResponseSize, printQueries) => {
|
|
2530
|
-
* const threshold = 500; // ms baseline for this resolver
|
|
2531
|
-
*
|
|
2532
|
-
* if (totalDbExecutionTime > threshold * 1.5) {
|
|
2533
|
-
* console.warn(`[Performance Warning] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
|
|
2534
|
-
* await printQueries(); // Analyze and print query execution plans
|
|
2535
|
-
* } else if (totalDbExecutionTime > threshold) {
|
|
2536
|
-
* console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
|
|
2537
|
-
* }
|
|
2538
|
-
*
|
|
2539
|
-
* console.log(`DB response size: ${totalResponseSize} bytes`);
|
|
2540
|
-
* }
|
|
2541
|
-
* );
|
|
2542
|
-
* ```
|
|
2543
|
-
*
|
|
2544
|
-
* @example
|
|
2545
|
-
* ```typescript
|
|
2546
|
-
* // Resolver with performance monitoring
|
|
2547
|
-
* resolver.define("fetch", async (req: Request) => {
|
|
2548
|
-
* try {
|
|
2549
|
-
* return await forgeSQL.executeWithMetadata(
|
|
2550
|
-
* async () => {
|
|
2551
|
-
* // Resolver logic with multiple queries
|
|
2552
|
-
* const users = await forgeSQL.selectFrom(demoUsers);
|
|
2553
|
-
* const orders = await forgeSQL.selectFrom(demoOrders)
|
|
2554
|
-
* .where(eq(demoOrders.userId, demoUsers.id));
|
|
2555
|
-
* return { users, orders };
|
|
2556
|
-
* },
|
|
2557
|
-
* async (totalDbExecutionTime, totalResponseSize, printQueries) => {
|
|
2558
|
-
* const threshold = 500; // ms baseline for this resolver
|
|
2559
|
-
*
|
|
2560
|
-
* if (totalDbExecutionTime > threshold * 1.5) {
|
|
2561
|
-
* console.warn(`[Performance Warning fetch] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
|
|
2562
|
-
* await printQueries(); // Optionally log or capture diagnostics for further analysis
|
|
2563
|
-
* } else if (totalDbExecutionTime > threshold) {
|
|
2564
|
-
* console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
|
|
2565
|
-
* }
|
|
2566
|
-
*
|
|
2567
|
-
* console.log(`DB response size: ${totalResponseSize} bytes`);
|
|
2568
|
-
* }
|
|
2569
|
-
* );
|
|
2570
|
-
* } catch (e) {
|
|
2571
|
-
* const error = e?.cause?.debug?.sqlMessage ?? e?.cause;
|
|
2572
|
-
* console.error(error, e);
|
|
2573
|
-
* throw error;
|
|
2574
|
-
* }
|
|
2575
|
-
* });
|
|
2576
|
-
* ```
|
|
2577
|
-
*
|
|
2578
|
-
* @note **Important**: When multiple resolvers are running concurrently, their query data may also appear in `printQueries()` analysis, as it queries the global `CLUSTER_STATEMENTS_SUMMARY` table.
|
|
2579
|
-
*/
|
|
2580
|
-
async executeWithMetadata(query, onMetadata) {
|
|
2581
|
-
return metadataQueryContext.run(
|
|
2582
|
-
{
|
|
2583
|
-
totalDbExecutionTime: 0,
|
|
2584
|
-
totalResponseSize: 0,
|
|
2585
|
-
beginTime: /* @__PURE__ */ new Date(),
|
|
2586
|
-
forgeSQLORM: this,
|
|
2587
|
-
printQueriesWithPlan: async () => {
|
|
2588
|
-
return;
|
|
2589
|
-
}
|
|
2590
|
-
},
|
|
2591
|
-
async () => {
|
|
2592
|
-
const result = await query();
|
|
2593
|
-
const metadata = await getLastestMetadata();
|
|
2594
|
-
try {
|
|
2595
|
-
if (metadata) {
|
|
2596
|
-
await onMetadata(
|
|
2597
|
-
metadata.totalDbExecutionTime,
|
|
2598
|
-
metadata.totalResponseSize,
|
|
2599
|
-
metadata.printQueriesWithPlan
|
|
2600
|
-
);
|
|
2601
|
-
}
|
|
2602
|
-
} catch (e) {
|
|
2603
|
-
console.error(
|
|
2604
|
-
"[ForgeSQLORM][executeWithMetadata] Failed to run onMetadata callback",
|
|
2605
|
-
{
|
|
2606
|
-
errorMessage: e?.message,
|
|
2607
|
-
errorStack: e?.stack,
|
|
2608
|
-
totalDbExecutionTime: metadata?.totalDbExecutionTime,
|
|
2609
|
-
totalResponseSize: metadata?.totalResponseSize,
|
|
2610
|
-
beginTime: metadata?.beginTime
|
|
2611
|
-
},
|
|
2612
|
-
e
|
|
2613
|
-
);
|
|
2614
|
-
}
|
|
2615
|
-
return result;
|
|
2616
|
-
}
|
|
2617
|
-
);
|
|
2618
|
-
}
|
|
2619
|
-
/**
|
|
2620
|
-
* Executes operations within a cache context that collects cache eviction events.
|
|
2621
|
-
* All clearCache calls within the context are collected and executed in batch at the end.
|
|
2622
|
-
* Queries executed within this context will bypass cache for tables that were marked for clearing.
|
|
2623
|
-
*
|
|
2624
|
-
* This is useful for:
|
|
2625
|
-
* - Batch operations that affect multiple tables
|
|
2626
|
-
* - Transaction-like operations where you want to clear cache only at the end
|
|
2627
|
-
* - Performance optimization by reducing cache clear operations
|
|
2628
|
-
*
|
|
2629
|
-
* @param cacheContext - Function containing operations that may trigger cache evictions
|
|
2630
|
-
* @returns Promise that resolves when all operations and cache clearing are complete
|
|
2631
|
-
*
|
|
2632
|
-
* @example
|
|
2633
|
-
* ```typescript
|
|
2634
|
-
* await forgeSQL.executeWithCacheContext(async () => {
|
|
2635
|
-
* await forgeSQL.modifyWithVersioning().insert(users, userData);
|
|
2636
|
-
* await forgeSQL.modifyWithVersioning().insert(orders, orderData);
|
|
2637
|
-
* // Cache for both users and orders tables will be cleared at the end
|
|
2638
|
-
* });
|
|
2639
|
-
* ```
|
|
2640
|
-
*/
|
|
2641
|
-
executeWithCacheContext(cacheContext) {
|
|
2642
|
-
return this.executeWithCacheContextAndReturnValue(cacheContext);
|
|
2643
|
-
}
|
|
2644
|
-
/**
|
|
2645
|
-
* Executes operations within a cache context and returns a value.
|
|
2646
|
-
* All clearCache calls within the context are collected and executed in batch at the end.
|
|
2647
|
-
* Queries executed within this context will bypass cache for tables that were marked for clearing.
|
|
2648
|
-
*
|
|
2649
|
-
* @param cacheContext - Function containing operations that may trigger cache evictions
|
|
2650
|
-
* @returns Promise that resolves to the return value of the cacheContext function
|
|
2651
|
-
*
|
|
2652
|
-
* @example
|
|
2653
|
-
* ```typescript
|
|
2654
|
-
* const result = await forgeSQL.executeWithCacheContextAndReturnValue(async () => {
|
|
2655
|
-
* await forgeSQL.modifyWithVersioning().insert(users, userData);
|
|
2656
|
-
* return await forgeSQL.fetch().executeQueryOnlyOne(selectUserQuery);
|
|
2657
|
-
* });
|
|
2658
|
-
* ```
|
|
2659
|
-
*/
|
|
2660
|
-
async executeWithCacheContextAndReturnValue(cacheContext) {
|
|
2661
|
-
return await this.executeWithLocalCacheContextAndReturnValue(
|
|
2662
|
-
async () => await cacheApplicationContext.run(
|
|
2663
|
-
cacheApplicationContext.getStore() ?? { tables: /* @__PURE__ */ new Set() },
|
|
2664
|
-
async () => {
|
|
2665
|
-
try {
|
|
2666
|
-
return await cacheContext();
|
|
2667
|
-
} finally {
|
|
2668
|
-
await clearTablesCache(
|
|
2669
|
-
Array.from(cacheApplicationContext.getStore()?.tables ?? []),
|
|
2670
|
-
this.options
|
|
2671
|
-
);
|
|
2672
|
-
}
|
|
2673
|
-
}
|
|
2674
|
-
)
|
|
2675
|
-
);
|
|
2676
|
-
}
|
|
2677
|
-
/**
|
|
2678
|
-
* Executes operations within a local cache context and returns a value.
|
|
2679
|
-
* This provides in-memory caching for select queries within a single request scope.
|
|
2680
|
-
*
|
|
2681
|
-
* @param cacheContext - Function containing operations that will benefit from local caching
|
|
2682
|
-
* @returns Promise that resolves to the return value of the cacheContext function
|
|
2683
|
-
*/
|
|
2684
|
-
async executeWithLocalCacheContextAndReturnValue(cacheContext) {
|
|
2685
|
-
return await localCacheApplicationContext.run(
|
|
2686
|
-
localCacheApplicationContext.getStore() ?? { cache: {} },
|
|
2687
|
-
async () => {
|
|
2688
|
-
return await cacheContext();
|
|
2689
|
-
}
|
|
2690
|
-
);
|
|
2691
|
-
}
|
|
2692
|
-
/**
|
|
2693
|
-
* Executes operations within a local cache context.
|
|
2694
|
-
* This provides in-memory caching for select queries within a single request scope.
|
|
2695
|
-
*
|
|
2696
|
-
* @param cacheContext - Function containing operations that will benefit from local caching
|
|
2697
|
-
* @returns Promise that resolves when all operations are complete
|
|
2698
|
-
*/
|
|
2699
|
-
executeWithLocalContext(cacheContext) {
|
|
2700
|
-
return this.executeWithLocalCacheContextAndReturnValue(cacheContext);
|
|
2701
|
-
}
|
|
2702
|
-
/**
|
|
2703
|
-
* Creates an insert query builder.
|
|
2704
|
-
*
|
|
2705
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
2706
|
-
* For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
|
|
2707
|
-
*
|
|
2708
|
-
* @param table - The table to insert into
|
|
2709
|
-
* @returns Insert query builder (no versioning, no cache management)
|
|
2710
|
-
*/
|
|
2711
|
-
insert(table) {
|
|
2712
|
-
return this.drizzle.insertWithCacheContext(table);
|
|
2713
|
-
}
|
|
2714
|
-
/**
|
|
2715
|
-
* Creates an insert query builder that automatically evicts cache after execution.
|
|
2716
|
-
*
|
|
2717
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
2718
|
-
* For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
|
|
2719
|
-
*
|
|
2720
|
-
* @param table - The table to insert into
|
|
2721
|
-
* @returns Insert query builder with automatic cache eviction (no versioning)
|
|
2722
|
-
*/
|
|
2723
|
-
insertAndEvictCache(table) {
|
|
2724
|
-
return this.drizzle.insertAndEvictCache(table);
|
|
2725
|
-
}
|
|
2726
|
-
/**
|
|
2727
|
-
* Creates an update query builder that automatically evicts cache after execution.
|
|
2728
|
-
*
|
|
2729
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
2730
|
-
* For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
|
|
2731
|
-
*
|
|
2732
|
-
* @param table - The table to update
|
|
2733
|
-
* @returns Update query builder with automatic cache eviction (no versioning)
|
|
2734
|
-
*/
|
|
2735
|
-
updateAndEvictCache(table) {
|
|
2736
|
-
return this.drizzle.updateAndEvictCache(table);
|
|
2737
|
-
}
|
|
2738
|
-
/**
|
|
2739
|
-
* Creates an update query builder.
|
|
2740
|
-
*
|
|
2741
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
2742
|
-
* For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
|
|
2743
|
-
*
|
|
2744
|
-
* @param table - The table to update
|
|
2745
|
-
* @returns Update query builder (no versioning, no cache management)
|
|
2746
|
-
*/
|
|
2747
|
-
update(table) {
|
|
2748
|
-
return this.drizzle.updateWithCacheContext(table);
|
|
2749
|
-
}
|
|
2750
|
-
/**
|
|
2751
|
-
* Creates a delete query builder.
|
|
2752
|
-
*
|
|
2753
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
2754
|
-
* For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
|
|
2755
|
-
*
|
|
2756
|
-
* @param table - The table to delete from
|
|
2757
|
-
* @returns Delete query builder (no versioning, no cache management)
|
|
2758
|
-
*/
|
|
2759
|
-
delete(table) {
|
|
2760
|
-
return this.drizzle.deleteWithCacheContext(table);
|
|
2761
|
-
}
|
|
2762
|
-
/**
|
|
2763
|
-
* Creates a delete query builder that automatically evicts cache after execution.
|
|
2764
|
-
*
|
|
2765
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
2766
|
-
* For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
|
|
2767
|
-
*
|
|
2768
|
-
* @param table - The table to delete from
|
|
2769
|
-
* @returns Delete query builder with automatic cache eviction (no versioning)
|
|
2770
|
-
*/
|
|
2771
|
-
deleteAndEvictCache(table) {
|
|
2772
|
-
return this.drizzle.deleteAndEvictCache(table);
|
|
2773
|
-
}
|
|
2774
|
-
/**
|
|
2775
|
-
* Create the modify operations instance.
|
|
2776
|
-
* @returns modify operations.
|
|
2777
|
-
*/
|
|
2778
|
-
modifyWithVersioning() {
|
|
2779
|
-
return this.crudOperations;
|
|
2780
|
-
}
|
|
2781
|
-
/**
|
|
2782
|
-
* Returns the singleton instance of ForgeSQLORMImpl.
|
|
2783
|
-
* @param options - Options for configuring ForgeSQL ORM behavior.
|
|
2784
|
-
* @returns The singleton instance of ForgeSQLORMImpl.
|
|
2785
|
-
*/
|
|
2786
|
-
static getInstance(options) {
|
|
2787
|
-
ForgeSQLORMImpl.instance ??= new ForgeSQLORMImpl(options);
|
|
2788
|
-
return ForgeSQLORMImpl.instance;
|
|
2789
|
-
}
|
|
2790
|
-
/**
|
|
2791
|
-
* Retrieves the fetch operations instance.
|
|
2792
|
-
* @returns Fetch operations.
|
|
2793
|
-
*/
|
|
2794
|
-
fetch() {
|
|
2795
|
-
return this.fetchOperations;
|
|
2796
|
-
}
|
|
2797
|
-
/**
|
|
2798
|
-
* Provides query analysis capabilities including EXPLAIN ANALYZE and slow query analysis.
|
|
2799
|
-
* @returns {SchemaAnalyzeForgeSql} Interface for analyzing query performance
|
|
2800
|
-
*/
|
|
2801
|
-
analyze() {
|
|
2802
|
-
return this.analyzeOperations;
|
|
2803
|
-
}
|
|
2804
|
-
/**
|
|
2805
|
-
* Provides schema-level SQL operations with optimistic locking/versioning and automatic cache eviction.
|
|
2806
|
-
*
|
|
2807
|
-
* This method returns operations that use `modifyWithVersioning()` internally, providing:
|
|
2808
|
-
* - Optimistic locking support
|
|
2809
|
-
* - Automatic version field management
|
|
2810
|
-
* - Cache eviction after successful operations
|
|
2811
|
-
*
|
|
2812
|
-
* @returns {ForgeSQLCacheOperations} Interface for executing versioned SQL operations with cache management
|
|
2813
|
-
*/
|
|
2814
|
-
modifyWithVersioningAndEvictCache() {
|
|
2815
|
-
return this.cacheOperations;
|
|
2816
|
-
}
|
|
2817
|
-
/**
|
|
2818
|
-
* Returns a Drizzle query builder instance.
|
|
2819
|
-
*
|
|
2820
|
-
* ⚠️ IMPORTANT: This method should be used ONLY for query building purposes.
|
|
2821
|
-
* The returned instance should NOT be used for direct database connections or query execution.
|
|
2822
|
-
* All database operations should be performed through Forge SQL's executeRawSQL or executeRawUpdateSQL methods.
|
|
2823
|
-
*
|
|
2824
|
-
* @returns A Drizzle query builder instance for query construction only.
|
|
2825
|
-
*/
|
|
2826
|
-
getDrizzleQueryBuilder() {
|
|
2827
|
-
return this.drizzle;
|
|
2828
|
-
}
|
|
2829
|
-
/**
|
|
2830
|
-
* Creates a select query with unique field aliases to prevent field name collisions in joins.
|
|
2831
|
-
* This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
|
|
2832
|
-
*
|
|
2833
|
-
* @template TSelection - The type of the selected fields
|
|
2834
|
-
* @param {TSelection} fields - Object containing the fields to select, with table schemas as values
|
|
2835
|
-
* @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A select query builder with unique field aliases
|
|
2836
|
-
* @throws {Error} If fields parameter is empty
|
|
2837
|
-
* @example
|
|
2838
|
-
* ```typescript
|
|
2839
|
-
* await forgeSQL
|
|
2840
|
-
* .select({user: users, order: orders})
|
|
2841
|
-
* .from(orders)
|
|
2842
|
-
* .innerJoin(users, eq(orders.userId, users.id));
|
|
2843
|
-
* ```
|
|
2844
|
-
*/
|
|
2845
|
-
select(fields) {
|
|
2846
|
-
if (!fields) {
|
|
2847
|
-
throw new Error("fields is empty");
|
|
2848
|
-
}
|
|
2849
|
-
return this.drizzle.selectAliased(fields);
|
|
2850
|
-
}
|
|
2851
|
-
/**
|
|
2852
|
-
* Creates a distinct select query with unique field aliases to prevent field name collisions in joins.
|
|
2853
|
-
* This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
|
|
2854
|
-
*
|
|
2855
|
-
* @template TSelection - The type of the selected fields
|
|
2856
|
-
* @param {TSelection} fields - Object containing the fields to select, with table schemas as values
|
|
2857
|
-
* @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A distinct select query builder with unique field aliases
|
|
2858
|
-
* @throws {Error} If fields parameter is empty
|
|
2859
|
-
* @example
|
|
2860
|
-
* ```typescript
|
|
2861
|
-
* await forgeSQL
|
|
2862
|
-
* .selectDistinct({user: users, order: orders})
|
|
2863
|
-
* .from(orders)
|
|
2864
|
-
* .innerJoin(users, eq(orders.userId, users.id));
|
|
2865
|
-
* ```
|
|
2866
|
-
*/
|
|
2867
|
-
selectDistinct(fields) {
|
|
2868
|
-
if (!fields) {
|
|
2869
|
-
throw new Error("fields is empty");
|
|
2870
|
-
}
|
|
2871
|
-
return this.drizzle.selectAliasedDistinct(fields);
|
|
2872
|
-
}
|
|
2873
|
-
/**
|
|
2874
|
-
* Creates a cacheable select query with unique field aliases to prevent field name collisions in joins.
|
|
2875
|
-
* This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
|
|
2876
|
-
*
|
|
2877
|
-
* @template TSelection - The type of the selected fields
|
|
2878
|
-
* @param {TSelection} fields - Object containing the fields to select, with table schemas as values
|
|
2879
|
-
* @param {number} cacheTTL - cache ttl optional default is 60 sec.
|
|
2880
|
-
* @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A select query builder with unique field aliases
|
|
2881
|
-
* @throws {Error} If fields parameter is empty
|
|
2882
|
-
* @example
|
|
2883
|
-
* ```typescript
|
|
2884
|
-
* await forgeSQL
|
|
2885
|
-
* .selectCacheable({user: users, order: orders},60)
|
|
2886
|
-
* .from(orders)
|
|
2887
|
-
* .innerJoin(users, eq(orders.userId, users.id));
|
|
2888
|
-
* ```
|
|
2889
|
-
*/
|
|
2890
|
-
selectCacheable(fields, cacheTTL) {
|
|
2891
|
-
if (!fields) {
|
|
2892
|
-
throw new Error("fields is empty");
|
|
2893
|
-
}
|
|
2894
|
-
return this.drizzle.selectAliasedCacheable(fields, cacheTTL);
|
|
2895
|
-
}
|
|
2896
|
-
/**
|
|
2897
|
-
* Creates a cacheable distinct select query with unique field aliases to prevent field name collisions in joins.
|
|
2898
|
-
* This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
|
|
2899
|
-
*
|
|
2900
|
-
* @template TSelection - The type of the selected fields
|
|
2901
|
-
* @param {TSelection} fields - Object containing the fields to select, with table schemas as values
|
|
2902
|
-
* @param {number} cacheTTL - cache ttl optional default is 60 sec.
|
|
2903
|
-
* @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A distinct select query builder with unique field aliases
|
|
2904
|
-
* @throws {Error} If fields parameter is empty
|
|
2905
|
-
* @example
|
|
2906
|
-
* ```typescript
|
|
2907
|
-
* await forgeSQL
|
|
2908
|
-
* .selectDistinctCacheable({user: users, order: orders}, 60)
|
|
2909
|
-
* .from(orders)
|
|
2910
|
-
* .innerJoin(users, eq(orders.userId, users.id));
|
|
2911
|
-
* ```
|
|
2912
|
-
*/
|
|
2913
|
-
selectDistinctCacheable(fields, cacheTTL) {
|
|
2914
|
-
if (!fields) {
|
|
2915
|
-
throw new Error("fields is empty");
|
|
2916
|
-
}
|
|
2917
|
-
return this.drizzle.selectAliasedDistinctCacheable(fields, cacheTTL);
|
|
2918
|
-
}
|
|
2919
|
-
/**
|
|
2920
|
-
* Creates a select query builder for all columns from a table with field aliasing support.
|
|
2921
|
-
* This is a convenience method that automatically selects all columns from the specified table.
|
|
2922
|
-
*
|
|
2923
|
-
* @template T - The type of the table
|
|
2924
|
-
* @param table - The table to select from
|
|
2925
|
-
* @returns Select query builder with all table columns and field aliasing support
|
|
2926
|
-
* @example
|
|
2927
|
-
* ```typescript
|
|
2928
|
-
* const users = await forgeSQL.selectFrom(userTable).where(eq(userTable.id, 1));
|
|
2929
|
-
* ```
|
|
2930
|
-
*/
|
|
2931
|
-
selectFrom(table) {
|
|
2932
|
-
return this.drizzle.selectFrom(table);
|
|
2933
|
-
}
|
|
2934
|
-
/**
|
|
2935
|
-
* Creates a select distinct query builder for all columns from a table with field aliasing support.
|
|
2936
|
-
* This is a convenience method that automatically selects all distinct columns from the specified table.
|
|
2937
|
-
*
|
|
2938
|
-
* @template T - The type of the table
|
|
2939
|
-
* @param table - The table to select from
|
|
2940
|
-
* @returns Select distinct query builder with all table columns and field aliasing support
|
|
2941
|
-
* @example
|
|
2942
|
-
* ```typescript
|
|
2943
|
-
* const uniqueUsers = await forgeSQL.selectDistinctFrom(userTable).where(eq(userTable.status, 'active'));
|
|
2944
|
-
* ```
|
|
2945
|
-
*/
|
|
2946
|
-
selectDistinctFrom(table) {
|
|
2947
|
-
return this.drizzle.selectDistinctFrom(table);
|
|
2948
|
-
}
|
|
2949
|
-
/**
|
|
2950
|
-
* Creates a cacheable select query builder for all columns from a table with field aliasing and caching support.
|
|
2951
|
-
* This is a convenience method that automatically selects all columns from the specified table with caching enabled.
|
|
2952
|
-
*
|
|
2953
|
-
* @template T - The type of the table
|
|
2954
|
-
* @param table - The table to select from
|
|
2955
|
-
* @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
|
|
2956
|
-
* @returns Select query builder with all table columns, field aliasing, and caching support
|
|
2957
|
-
* @example
|
|
2958
|
-
* ```typescript
|
|
2959
|
-
* const users = await forgeSQL.selectCacheableFrom(userTable, 300).where(eq(userTable.id, 1));
|
|
2960
|
-
* ```
|
|
2961
|
-
*/
|
|
2962
|
-
selectCacheableFrom(table, cacheTTL) {
|
|
2963
|
-
return this.drizzle.selectFromCacheable(table, cacheTTL);
|
|
2964
|
-
}
|
|
2965
|
-
/**
|
|
2966
|
-
* Creates a cacheable select distinct query builder for all columns from a table with field aliasing and caching support.
|
|
2967
|
-
* This is a convenience method that automatically selects all distinct columns from the specified table with caching enabled.
|
|
2968
|
-
*
|
|
2969
|
-
* @template T - The type of the table
|
|
2970
|
-
* @param table - The table to select from
|
|
2971
|
-
* @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
|
|
2972
|
-
* @returns Select distinct query builder with all table columns, field aliasing, and caching support
|
|
2973
|
-
* @example
|
|
2974
|
-
* ```typescript
|
|
2975
|
-
* const uniqueUsers = await forgeSQL.selectDistinctCacheableFrom(userTable, 300).where(eq(userTable.status, 'active'));
|
|
2976
|
-
* ```
|
|
2977
|
-
*/
|
|
2978
|
-
selectDistinctCacheableFrom(table, cacheTTL) {
|
|
2979
|
-
return this.drizzle.selectDistinctFromCacheable(table, cacheTTL);
|
|
2980
|
-
}
|
|
2981
|
-
/**
|
|
2982
|
-
* Executes a raw SQL query with local cache support.
|
|
2983
|
-
* This method provides local caching for raw SQL queries within the current invocation context.
|
|
2984
|
-
* Results are cached locally and will be returned from cache on subsequent identical queries.
|
|
2985
|
-
*
|
|
2986
|
-
* @param query - The SQL query to execute (SQLWrapper or string)
|
|
2987
|
-
* @returns Promise with query results
|
|
2988
|
-
* @example
|
|
2989
|
-
* ```typescript
|
|
2990
|
-
* // Using SQLWrapper
|
|
2991
|
-
* const result = await forgeSQL.execute(sql`SELECT * FROM users WHERE id = ${userId}`);
|
|
2992
|
-
*
|
|
2993
|
-
* // Using string
|
|
2994
|
-
* const result = await forgeSQL.execute("SELECT * FROM users WHERE status = 'active'");
|
|
2995
|
-
* ```
|
|
2996
|
-
*/
|
|
2997
|
-
execute(query) {
|
|
2998
|
-
return this.drizzle.executeQuery(query);
|
|
2999
|
-
}
|
|
3000
|
-
/**
|
|
3001
|
-
* Executes a Data Definition Language (DDL) SQL query.
|
|
3002
|
-
* DDL operations include CREATE, ALTER, DROP, TRUNCATE, and other schema modification statements.
|
|
3003
|
-
*
|
|
3004
|
-
* This method is specifically designed for DDL operations and provides:
|
|
3005
|
-
* - Proper operation type context for DDL queries
|
|
3006
|
-
* - No caching (DDL operations should not be cached)
|
|
3007
|
-
* - Direct execution without query optimization
|
|
3008
|
-
*
|
|
3009
|
-
* @template T - The expected return type of the query result
|
|
3010
|
-
* @param query - The DDL SQL query to execute (SQLWrapper or string)
|
|
3011
|
-
* @returns Promise with query results
|
|
3012
|
-
* @throws {Error} If the DDL operation fails
|
|
3013
|
-
*
|
|
3014
|
-
* @example
|
|
3015
|
-
* ```typescript
|
|
3016
|
-
* // Create a new table
|
|
3017
|
-
* await forgeSQL.executeDDL(`
|
|
3018
|
-
* CREATE TABLE users (
|
|
3019
|
-
* id INT PRIMARY KEY AUTO_INCREMENT,
|
|
3020
|
-
* name VARCHAR(255) NOT NULL,
|
|
3021
|
-
* email VARCHAR(255) UNIQUE
|
|
3022
|
-
* )
|
|
3023
|
-
* `);
|
|
3024
|
-
*
|
|
3025
|
-
* // Alter table structure
|
|
3026
|
-
* await forgeSQL.executeDDL(sql`
|
|
3027
|
-
* ALTER TABLE users
|
|
3028
|
-
* ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
3029
|
-
* `);
|
|
3030
|
-
*
|
|
3031
|
-
* // Drop a table
|
|
3032
|
-
* await forgeSQL.executeDDL("DROP TABLE IF EXISTS old_users");
|
|
3033
|
-
* ```
|
|
3034
|
-
*/
|
|
3035
|
-
async executeDDL(query) {
|
|
3036
|
-
return this.executeDDLActions(async () => this.drizzle.executeQuery(query));
|
|
3037
|
-
}
|
|
3038
|
-
/**
|
|
3039
|
-
* Executes a series of actions within a DDL operation context.
|
|
3040
|
-
* This method provides a way to execute regular SQL queries that should be treated
|
|
3041
|
-
* as DDL operations, ensuring proper operation type context for performance monitoring.
|
|
3042
|
-
*
|
|
3043
|
-
* This method is useful for:
|
|
3044
|
-
* - Executing regular SQL queries in DDL context for monitoring purposes
|
|
3045
|
-
* - Wrapping non-DDL operations that should be treated as DDL for analysis
|
|
3046
|
-
* - Ensuring proper operation type context for complex workflows
|
|
3047
|
-
* - Maintaining DDL operation context across multiple function calls
|
|
3048
|
-
*
|
|
3049
|
-
* @template T - The return type of the actions function
|
|
3050
|
-
* @param actions - Function containing SQL operations to execute in DDL context
|
|
3051
|
-
* @returns Promise that resolves to the return value of the actions function
|
|
3052
|
-
*
|
|
3053
|
-
* @example
|
|
3054
|
-
* ```typescript
|
|
3055
|
-
* // Execute regular SQL queries in DDL context for monitoring
|
|
3056
|
-
* await forgeSQL.executeDDLActions(async () => {
|
|
3057
|
-
* const slowQueries = await forgeSQL.execute(`
|
|
3058
|
-
* SELECT * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
3059
|
-
* WHERE AVG_LATENCY > 1000000
|
|
3060
|
-
* `);
|
|
3061
|
-
* return slowQueries;
|
|
3062
|
-
* });
|
|
3063
|
-
*
|
|
3064
|
-
* // Execute complex analysis queries in DDL context
|
|
3065
|
-
* const result = await forgeSQL.executeDDLActions(async () => {
|
|
3066
|
-
* const tableInfo = await forgeSQL.execute("SHOW TABLES");
|
|
3067
|
-
* const performanceData = await forgeSQL.execute(`
|
|
3068
|
-
* SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
|
|
3069
|
-
* WHERE SUMMARY_END_TIME > DATE_SUB(NOW(), INTERVAL 1 HOUR)
|
|
3070
|
-
* `);
|
|
3071
|
-
* return { tableInfo, performanceData };
|
|
3072
|
-
* });
|
|
3073
|
-
*
|
|
3074
|
-
* // Execute monitoring queries with error handling
|
|
3075
|
-
* try {
|
|
3076
|
-
* await forgeSQL.executeDDLActions(async () => {
|
|
3077
|
-
* const metrics = await forgeSQL.execute(`
|
|
3078
|
-
* SELECT COUNT(*) as query_count
|
|
3079
|
-
* FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
3080
|
-
* `);
|
|
3081
|
-
* console.log(`Total queries: ${metrics[0].query_count}`);
|
|
3082
|
-
* });
|
|
3083
|
-
* } catch (error) {
|
|
3084
|
-
* console.error("Monitoring query failed:", error);
|
|
3085
|
-
* }
|
|
3086
|
-
* ```
|
|
3087
|
-
*/
|
|
3088
|
-
async executeDDLActions(actions) {
|
|
3089
|
-
return operationTypeQueryContext.run({ operationType: "DDL" }, async () => actions());
|
|
3090
|
-
}
|
|
3091
|
-
/**
|
|
3092
|
-
* Executes a raw SQL query with both local and global cache support.
|
|
3093
|
-
* This method provides comprehensive caching for raw SQL queries:
|
|
3094
|
-
* - Local cache: Within the current invocation context
|
|
3095
|
-
* - Global cache: Cross-invocation caching using @forge/kvs
|
|
3096
|
-
*
|
|
3097
|
-
* @param query - The SQL query to execute (SQLWrapper or string)
|
|
3098
|
-
* @param cacheTtl - Optional cache TTL override (defaults to global cache TTL)
|
|
3099
|
-
* @returns Promise with query results
|
|
3100
|
-
* @example
|
|
3101
|
-
* ```typescript
|
|
3102
|
-
* // Using SQLWrapper with custom TTL
|
|
3103
|
-
* const result = await forgeSQL.executeCacheable(sql`SELECT * FROM users WHERE id = ${userId}`, 300);
|
|
3104
|
-
*
|
|
3105
|
-
* // Using string with default TTL
|
|
3106
|
-
* const result = await forgeSQL.executeCacheable("SELECT * FROM users WHERE status = 'active'");
|
|
3107
|
-
* ```
|
|
3108
|
-
*/
|
|
3109
|
-
executeCacheable(query, cacheTtl) {
|
|
3110
|
-
return this.drizzle.executeQueryCacheable(query, cacheTtl);
|
|
3111
|
-
}
|
|
3112
|
-
/**
|
|
3113
|
-
* Creates a Common Table Expression (CTE) builder for complex queries.
|
|
3114
|
-
* CTEs allow you to define temporary named result sets that exist within the scope of a single query.
|
|
3115
|
-
*
|
|
3116
|
-
* @returns WithBuilder for creating CTEs
|
|
3117
|
-
* @example
|
|
3118
|
-
* ```typescript
|
|
3119
|
-
* const withQuery = forgeSQL.$with('userStats').as(
|
|
3120
|
-
* forgeSQL.select({ userId: users.id, count: sql<number>`count(*)` })
|
|
3121
|
-
* .from(users)
|
|
3122
|
-
* .groupBy(users.id)
|
|
3123
|
-
* );
|
|
3124
|
-
* ```
|
|
3125
|
-
*/
|
|
3126
|
-
get $with() {
|
|
3127
|
-
return this.drizzle.$with;
|
|
3128
|
-
}
|
|
3129
|
-
/**
|
|
3130
|
-
* Creates a query builder that uses Common Table Expressions (CTEs).
|
|
3131
|
-
* CTEs allow you to define temporary named result sets that exist within the scope of a single query.
|
|
3132
|
-
*
|
|
3133
|
-
* @param queries - Array of CTE queries created with $with()
|
|
3134
|
-
* @returns Query builder with CTE support
|
|
3135
|
-
* @example
|
|
3136
|
-
* ```typescript
|
|
3137
|
-
* const withQuery = forgeSQL.$with('userStats').as(
|
|
3138
|
-
* forgeSQL.select({ userId: users.id, count: sql<number>`count(*)` })
|
|
3139
|
-
* .from(users)
|
|
3140
|
-
* .groupBy(users.id)
|
|
3141
|
-
* );
|
|
3142
|
-
*
|
|
3143
|
-
* const result = await forgeSQL.with(withQuery)
|
|
3144
|
-
* .select({ userId: withQuery.userId, count: withQuery.count })
|
|
3145
|
-
* .from(withQuery);
|
|
3146
|
-
* ```
|
|
3147
|
-
*/
|
|
3148
|
-
with(...queries) {
|
|
3149
|
-
return this.drizzle.with(...queries);
|
|
3150
|
-
}
|
|
3151
|
-
}
|
|
3152
|
-
class ForgeSQLORM {
|
|
3153
|
-
ormInstance;
|
|
3154
|
-
constructor(options) {
|
|
3155
|
-
this.ormInstance = ForgeSQLORMImpl.getInstance(options);
|
|
3156
|
-
}
|
|
3157
|
-
/**
|
|
3158
|
-
* Executes a query and provides access to execution metadata with performance monitoring.
|
|
3159
|
-
* This method allows you to capture detailed information about query execution
|
|
3160
|
-
* including database execution time, response size, and query analysis capabilities.
|
|
3161
|
-
*
|
|
3162
|
-
* The method aggregates metrics across all database operations within the query function,
|
|
3163
|
-
* making it ideal for monitoring resolver performance and detecting performance issues.
|
|
3164
|
-
*
|
|
3165
|
-
* @template T - The return type of the query
|
|
3166
|
-
* @param query - A function that returns a Promise with the query result. Can contain multiple database operations.
|
|
3167
|
-
* @param onMetadata - Callback function that receives aggregated execution metadata
|
|
3168
|
-
* @param onMetadata.totalDbExecutionTime - Total database execution time across all operations in the query function (in milliseconds)
|
|
3169
|
-
* @param onMetadata.totalResponseSize - Total response size across all operations (in bytes)
|
|
3170
|
-
* @param onMetadata.printQueries - Function to analyze and print query execution plans from CLUSTER_STATEMENTS_SUMMARY
|
|
3171
|
-
* @returns Promise with the query result
|
|
3172
|
-
*
|
|
3173
|
-
* @example
|
|
3174
|
-
* ```typescript
|
|
3175
|
-
* // Basic usage with performance monitoring
|
|
3176
|
-
* const result = await forgeSQL.executeWithMetadata(
|
|
3177
|
-
* async () => {
|
|
3178
|
-
* const users = await forgeSQL.selectFrom(usersTable);
|
|
3179
|
-
* const orders = await forgeSQL.selectFrom(ordersTable).where(eq(ordersTable.userId, usersTable.id));
|
|
3180
|
-
* return { users, orders };
|
|
3181
|
-
* },
|
|
3182
|
-
* (totalDbExecutionTime, totalResponseSize, printQueries) => {
|
|
3183
|
-
* const threshold = 500; // ms baseline for this resolver
|
|
3184
|
-
*
|
|
3185
|
-
* if (totalDbExecutionTime > threshold * 1.5) {
|
|
3186
|
-
* console.warn(`[Performance Warning] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
|
|
3187
|
-
* await printQueries(); // Analyze and print query execution plans
|
|
3188
|
-
* } else if (totalDbExecutionTime > threshold) {
|
|
3189
|
-
* console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
|
|
3190
|
-
* }
|
|
3191
|
-
*
|
|
3192
|
-
* console.log(`DB response size: ${totalResponseSize} bytes`);
|
|
3193
|
-
* }
|
|
3194
|
-
* );
|
|
3195
|
-
* ```
|
|
3196
|
-
*
|
|
3197
|
-
* @example
|
|
3198
|
-
* ```typescript
|
|
3199
|
-
* // Resolver with performance monitoring
|
|
3200
|
-
* resolver.define("fetch", async (req: Request) => {
|
|
3201
|
-
* try {
|
|
3202
|
-
* return await forgeSQL.executeWithMetadata(
|
|
3203
|
-
* async () => {
|
|
3204
|
-
* // Resolver logic with multiple queries
|
|
3205
|
-
* const users = await forgeSQL.selectFrom(demoUsers);
|
|
3206
|
-
* const orders = await forgeSQL.selectFrom(demoOrders)
|
|
3207
|
-
* .where(eq(demoOrders.userId, demoUsers.id));
|
|
3208
|
-
* return { users, orders };
|
|
3209
|
-
* },
|
|
3210
|
-
* async (totalDbExecutionTime, totalResponseSize, printQueries) => {
|
|
3211
|
-
* const threshold = 500; // ms baseline for this resolver
|
|
3212
|
-
*
|
|
3213
|
-
* if (totalDbExecutionTime > threshold * 1.5) {
|
|
3214
|
-
* console.warn(`[Performance Warning fetch] Resolver exceeded DB time: ${totalDbExecutionTime} ms`);
|
|
3215
|
-
* await printQueries(); // Optionally log or capture diagnostics for further analysis
|
|
3216
|
-
* } else if (totalDbExecutionTime > threshold) {
|
|
3217
|
-
* console.debug(`[Performance Debug] High DB time: ${totalDbExecutionTime} ms`);
|
|
3218
|
-
* }
|
|
3219
|
-
*
|
|
3220
|
-
* console.log(`DB response size: ${totalResponseSize} bytes`);
|
|
3221
|
-
* }
|
|
3222
|
-
* );
|
|
3223
|
-
* } catch (e) {
|
|
3224
|
-
* const error = e?.cause?.debug?.sqlMessage ?? e?.cause;
|
|
3225
|
-
* console.error(error, e);
|
|
3226
|
-
* throw error;
|
|
3227
|
-
* }
|
|
3228
|
-
* });
|
|
3229
|
-
* ```
|
|
3230
|
-
*
|
|
3231
|
-
* @note **Important**: When multiple resolvers are running concurrently, their query data may also appear in `printQueries()` analysis, as it queries the global `CLUSTER_STATEMENTS_SUMMARY` table.
|
|
3232
|
-
*/
|
|
3233
|
-
async executeWithMetadata(query, onMetadata) {
|
|
3234
|
-
return this.ormInstance.executeWithMetadata(query, onMetadata);
|
|
3235
|
-
}
|
|
3236
|
-
selectCacheable(fields, cacheTTL) {
|
|
3237
|
-
return this.ormInstance.selectCacheable(fields, cacheTTL);
|
|
3238
|
-
}
|
|
3239
|
-
selectDistinctCacheable(fields, cacheTTL) {
|
|
3240
|
-
return this.ormInstance.selectDistinctCacheable(fields, cacheTTL);
|
|
3241
|
-
}
|
|
3242
|
-
/**
|
|
3243
|
-
* Creates a select query builder for all columns from a table with field aliasing support.
|
|
3244
|
-
* This is a convenience method that automatically selects all columns from the specified table.
|
|
3245
|
-
*
|
|
3246
|
-
* @template T - The type of the table
|
|
3247
|
-
* @param table - The table to select from
|
|
3248
|
-
* @returns Select query builder with all table columns and field aliasing support
|
|
3249
|
-
* @example
|
|
3250
|
-
* ```typescript
|
|
3251
|
-
* const users = await forgeSQL.selectFrom(userTable).where(eq(userTable.id, 1));
|
|
3252
|
-
* ```
|
|
3253
|
-
*/
|
|
3254
|
-
selectFrom(table) {
|
|
3255
|
-
return this.ormInstance.getDrizzleQueryBuilder().selectFrom(table);
|
|
3256
|
-
}
|
|
3257
|
-
/**
|
|
3258
|
-
* Creates a select distinct query builder for all columns from a table with field aliasing support.
|
|
3259
|
-
* This is a convenience method that automatically selects all distinct columns from the specified table.
|
|
3260
|
-
*
|
|
3261
|
-
* @template T - The type of the table
|
|
3262
|
-
* @param table - The table to select from
|
|
3263
|
-
* @returns Select distinct query builder with all table columns and field aliasing support
|
|
3264
|
-
* @example
|
|
3265
|
-
* ```typescript
|
|
3266
|
-
* const uniqueUsers = await forgeSQL.selectDistinctFrom(userTable).where(eq(userTable.status, 'active'));
|
|
3267
|
-
* ```
|
|
3268
|
-
*/
|
|
3269
|
-
selectDistinctFrom(table) {
|
|
3270
|
-
return this.ormInstance.getDrizzleQueryBuilder().selectDistinctFrom(table);
|
|
3271
|
-
}
|
|
3272
|
-
/**
|
|
3273
|
-
* Creates a cacheable select query builder for all columns from a table with field aliasing and caching support.
|
|
3274
|
-
* This is a convenience method that automatically selects all columns from the specified table with caching enabled.
|
|
3275
|
-
*
|
|
3276
|
-
* @template T - The type of the table
|
|
3277
|
-
* @param table - The table to select from
|
|
3278
|
-
* @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
|
|
3279
|
-
* @returns Select query builder with all table columns, field aliasing, and caching support
|
|
3280
|
-
* @example
|
|
3281
|
-
* ```typescript
|
|
3282
|
-
* const users = await forgeSQL.selectCacheableFrom(userTable, 300).where(eq(userTable.id, 1));
|
|
3283
|
-
* ```
|
|
3284
|
-
*/
|
|
3285
|
-
selectCacheableFrom(table, cacheTTL) {
|
|
3286
|
-
return this.ormInstance.getDrizzleQueryBuilder().selectFromCacheable(table, cacheTTL);
|
|
3287
|
-
}
|
|
3288
|
-
/**
|
|
3289
|
-
* Creates a cacheable select distinct query builder for all columns from a table with field aliasing and caching support.
|
|
3290
|
-
* This is a convenience method that automatically selects all distinct columns from the specified table with caching enabled.
|
|
3291
|
-
*
|
|
3292
|
-
* @template T - The type of the table
|
|
3293
|
-
* @param table - The table to select from
|
|
3294
|
-
* @param cacheTTL - Optional cache TTL override (defaults to global cache TTL)
|
|
3295
|
-
* @returns Select distinct query builder with all table columns, field aliasing, and caching support
|
|
3296
|
-
* @example
|
|
3297
|
-
* ```typescript
|
|
3298
|
-
* const uniqueUsers = await forgeSQL.selectDistinctCacheableFrom(userTable, 300).where(eq(userTable.status, 'active'));
|
|
3299
|
-
* ```
|
|
3300
|
-
*/
|
|
3301
|
-
selectDistinctCacheableFrom(table, cacheTTL) {
|
|
3302
|
-
return this.ormInstance.getDrizzleQueryBuilder().selectDistinctFromCacheable(table, cacheTTL);
|
|
3303
|
-
}
|
|
3304
|
-
executeWithCacheContext(cacheContext) {
|
|
3305
|
-
return this.ormInstance.executeWithCacheContext(cacheContext);
|
|
3306
|
-
}
|
|
3307
|
-
executeWithCacheContextAndReturnValue(cacheContext) {
|
|
3308
|
-
return this.ormInstance.executeWithCacheContextAndReturnValue(cacheContext);
|
|
3309
|
-
}
|
|
3310
|
-
/**
|
|
3311
|
-
* Executes operations within a local cache context.
|
|
3312
|
-
* This provides in-memory caching for select queries within a single request scope.
|
|
3313
|
-
*
|
|
3314
|
-
* @param cacheContext - Function containing operations that will benefit from local caching
|
|
3315
|
-
* @returns Promise that resolves when all operations are complete
|
|
3316
|
-
*/
|
|
3317
|
-
executeWithLocalContext(cacheContext) {
|
|
3318
|
-
return this.ormInstance.executeWithLocalContext(cacheContext);
|
|
3319
|
-
}
|
|
3320
|
-
/**
|
|
3321
|
-
* Executes operations within a local cache context and returns a value.
|
|
3322
|
-
* This provides in-memory caching for select queries within a single request scope.
|
|
3323
|
-
*
|
|
3324
|
-
* @param cacheContext - Function containing operations that will benefit from local caching
|
|
3325
|
-
* @returns Promise that resolves to the return value of the cacheContext function
|
|
3326
|
-
*/
|
|
3327
|
-
executeWithLocalCacheContextAndReturnValue(cacheContext) {
|
|
3328
|
-
return this.ormInstance.executeWithLocalCacheContextAndReturnValue(cacheContext);
|
|
3329
|
-
}
|
|
3330
|
-
/**
|
|
3331
|
-
* Creates an insert query builder.
|
|
3332
|
-
*
|
|
3333
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
3334
|
-
* For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
|
|
3335
|
-
*
|
|
3336
|
-
* @param table - The table to insert into
|
|
3337
|
-
* @returns Insert query builder (no versioning, no cache management)
|
|
3338
|
-
*/
|
|
3339
|
-
insert(table) {
|
|
3340
|
-
return this.ormInstance.insert(table);
|
|
3341
|
-
}
|
|
3342
|
-
/**
|
|
3343
|
-
* Creates an insert query builder that automatically evicts cache after execution.
|
|
3344
|
-
*
|
|
3345
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
3346
|
-
* For versioned inserts, use `modifyWithVersioning().insert()` or `modifyWithVersioningAndEvictCache().insert()` instead.
|
|
3347
|
-
*
|
|
3348
|
-
* @param table - The table to insert into
|
|
3349
|
-
* @returns Insert query builder with automatic cache eviction (no versioning)
|
|
3350
|
-
*/
|
|
3351
|
-
insertAndEvictCache(table) {
|
|
3352
|
-
return this.ormInstance.insertAndEvictCache(table);
|
|
3353
|
-
}
|
|
3354
|
-
/**
|
|
3355
|
-
* Creates an update query builder.
|
|
3356
|
-
*
|
|
3357
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
3358
|
-
* For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
|
|
3359
|
-
*
|
|
3360
|
-
* @param table - The table to update
|
|
3361
|
-
* @returns Update query builder (no versioning, no cache management)
|
|
3362
|
-
*/
|
|
3363
|
-
update(table) {
|
|
3364
|
-
return this.ormInstance.update(table);
|
|
3365
|
-
}
|
|
3366
|
-
/**
|
|
3367
|
-
* Creates an update query builder that automatically evicts cache after execution.
|
|
3368
|
-
*
|
|
3369
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
3370
|
-
* For versioned updates, use `modifyWithVersioning().updateById()` or `modifyWithVersioningAndEvictCache().updateById()` instead.
|
|
3371
|
-
*
|
|
3372
|
-
* @param table - The table to update
|
|
3373
|
-
* @returns Update query builder with automatic cache eviction (no versioning)
|
|
3374
|
-
*/
|
|
3375
|
-
updateAndEvictCache(table) {
|
|
3376
|
-
return this.ormInstance.updateAndEvictCache(table);
|
|
3377
|
-
}
|
|
3378
|
-
/**
|
|
3379
|
-
* Creates a delete query builder.
|
|
3380
|
-
*
|
|
3381
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
3382
|
-
* For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
|
|
3383
|
-
*
|
|
3384
|
-
* @param table - The table to delete from
|
|
3385
|
-
* @returns Delete query builder (no versioning, no cache management)
|
|
3386
|
-
*/
|
|
3387
|
-
delete(table) {
|
|
3388
|
-
return this.ormInstance.delete(table);
|
|
3389
|
-
}
|
|
3390
|
-
/**
|
|
3391
|
-
* Creates a delete query builder that automatically evicts cache after execution.
|
|
3392
|
-
*
|
|
3393
|
-
* ⚠️ **IMPORTANT**: This method does NOT support optimistic locking/versioning.
|
|
3394
|
-
* For versioned deletes, use `modifyWithVersioning().deleteById()` or `modifyWithVersioningAndEvictCache().deleteById()` instead.
|
|
3395
|
-
*
|
|
3396
|
-
* @param table - The table to delete from
|
|
3397
|
-
* @returns Delete query builder with automatic cache eviction (no versioning)
|
|
3398
|
-
*/
|
|
3399
|
-
deleteAndEvictCache(table) {
|
|
3400
|
-
return this.ormInstance.deleteAndEvictCache(table);
|
|
3401
|
-
}
|
|
3402
|
-
/**
|
|
3403
|
-
* Creates a select query with unique field aliases to prevent field name collisions in joins.
|
|
3404
|
-
* This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
|
|
3405
|
-
*
|
|
3406
|
-
* @template TSelection - The type of the selected fields
|
|
3407
|
-
* @param {TSelection} fields - Object containing the fields to select, with table schemas as values
|
|
3408
|
-
* @returns {MySqlSelectBuilder<TSelection, MySql2PreparedQueryHKT>} A select query builder with unique field aliases
|
|
3409
|
-
* @throws {Error} If fields parameter is empty
|
|
3410
|
-
* @example
|
|
3411
|
-
* ```typescript
|
|
3412
|
-
* await forgeSQL
|
|
3413
|
-
* .select({user: users, order: orders})
|
|
3414
|
-
* .from(orders)
|
|
3415
|
-
* .innerJoin(users, eq(orders.userId, users.id));
|
|
3416
|
-
* ```
|
|
3417
|
-
*/
|
|
3418
|
-
select(fields) {
|
|
3419
|
-
return this.ormInstance.select(fields);
|
|
3420
|
-
}
|
|
3421
|
-
/**
|
|
3422
|
-
* Creates a distinct select query with unique field aliases to prevent field name collisions in joins.
|
|
3423
|
-
* This is particularly useful when working with Atlassian Forge SQL, which collapses fields with the same name in joined tables.
|
|
3424
|
-
*
|
|
3425
|
-
* @template TSelection - The type of the selected fields
|
|
3426
|
-
* @param {TSelection} fields - Object containing the fields to select, with table schemas as values
|
|
3427
|
-
* @returns {MySqlSelectBuilder<TSelection, MySqlRemotePreparedQueryHKT>} A distinct select query builder with unique field aliases
|
|
3428
|
-
* @throws {Error} If fields parameter is empty
|
|
3429
|
-
* @example
|
|
3430
|
-
* ```typescript
|
|
3431
|
-
* await forgeSQL
|
|
3432
|
-
* .selectDistinct({user: users, order: orders})
|
|
3433
|
-
* .from(orders)
|
|
3434
|
-
* .innerJoin(users, eq(orders.userId, users.id));
|
|
3435
|
-
* ```
|
|
3436
|
-
*/
|
|
3437
|
-
selectDistinct(fields) {
|
|
3438
|
-
return this.ormInstance.selectDistinct(fields);
|
|
3439
|
-
}
|
|
3440
|
-
/**
|
|
3441
|
-
* Proxies the `modify` method from `ForgeSQLORMImpl`.
|
|
3442
|
-
* @returns Modify operations.
|
|
3443
|
-
*/
|
|
3444
|
-
modifyWithVersioning() {
|
|
3445
|
-
return this.ormInstance.modifyWithVersioning();
|
|
3446
|
-
}
|
|
3447
|
-
/**
|
|
3448
|
-
* Proxies the `fetch` method from `ForgeSQLORMImpl`.
|
|
3449
|
-
* @returns Fetch operations.
|
|
3450
|
-
*/
|
|
3451
|
-
fetch() {
|
|
3452
|
-
return this.ormInstance.fetch();
|
|
3453
|
-
}
|
|
3454
|
-
/**
|
|
3455
|
-
* Provides query analysis capabilities including EXPLAIN ANALYZE and slow query analysis.
|
|
3456
|
-
* @returns {SchemaAnalyzeForgeSql} Interface for analyzing query performance
|
|
3457
|
-
*/
|
|
3458
|
-
analyze() {
|
|
3459
|
-
return this.ormInstance.analyze();
|
|
3460
|
-
}
|
|
3461
|
-
/**
|
|
3462
|
-
* Provides schema-level SQL cacheable operations with type safety.
|
|
3463
|
-
* @returns {ForgeSQLCacheOperations} Interface for executing schema-bound SQL queries
|
|
3464
|
-
*/
|
|
3465
|
-
modifyWithVersioningAndEvictCache() {
|
|
3466
|
-
return this.ormInstance.modifyWithVersioningAndEvictCache();
|
|
3467
|
-
}
|
|
3468
|
-
/**
|
|
3469
|
-
* Returns a Drizzle query builder instance.
|
|
3470
|
-
*
|
|
3471
|
-
* @returns A Drizzle query builder instance for query construction only.
|
|
3472
|
-
*/
|
|
3473
|
-
getDrizzleQueryBuilder() {
|
|
3474
|
-
return this.ormInstance.getDrizzleQueryBuilder();
|
|
3475
|
-
}
|
|
3476
|
-
/**
|
|
3477
|
-
* Executes a raw SQL query with local cache support.
|
|
3478
|
-
* This method provides local caching for raw SQL queries within the current invocation context.
|
|
3479
|
-
* Results are cached locally and will be returned from cache on subsequent identical queries.
|
|
3480
|
-
*
|
|
3481
|
-
* @param query - The SQL query to execute (SQLWrapper or string)
|
|
3482
|
-
* @returns Promise with query results
|
|
3483
|
-
* @example
|
|
3484
|
-
* ```typescript
|
|
3485
|
-
* // Using SQLWrapper
|
|
3486
|
-
* const result = await forgeSQL.execute(sql`SELECT * FROM users WHERE id = ${userId}`);
|
|
3487
|
-
*
|
|
3488
|
-
* // Using string
|
|
3489
|
-
* const result = await forgeSQL.execute("SELECT * FROM users WHERE status = 'active'");
|
|
3490
|
-
* ```
|
|
3491
|
-
*/
|
|
3492
|
-
execute(query) {
|
|
3493
|
-
return this.ormInstance.execute(query);
|
|
3494
|
-
}
|
|
3495
|
-
/**
|
|
3496
|
-
* Executes a Data Definition Language (DDL) SQL query.
|
|
3497
|
-
* DDL operations include CREATE, ALTER, DROP, TRUNCATE, and other schema modification statements.
|
|
3498
|
-
*
|
|
3499
|
-
* This method is specifically designed for DDL operations and provides:
|
|
3500
|
-
* - Proper operation type context for DDL queries
|
|
3501
|
-
* - No caching (DDL operations should not be cached)
|
|
3502
|
-
* - Direct execution without query optimization
|
|
3503
|
-
*
|
|
3504
|
-
* @template T - The expected return type of the query result
|
|
3505
|
-
* @param query - The DDL SQL query to execute (SQLWrapper or string)
|
|
3506
|
-
* @returns Promise with query results
|
|
3507
|
-
* @throws {Error} If the DDL operation fails
|
|
3508
|
-
*
|
|
3509
|
-
* @example
|
|
3510
|
-
* ```typescript
|
|
3511
|
-
* // Create a new table
|
|
3512
|
-
* await forgeSQL.executeDDL(`
|
|
3513
|
-
* CREATE TABLE users (
|
|
3514
|
-
* id INT PRIMARY KEY AUTO_INCREMENT,
|
|
3515
|
-
* name VARCHAR(255) NOT NULL,
|
|
3516
|
-
* email VARCHAR(255) UNIQUE
|
|
3517
|
-
* )
|
|
3518
|
-
* `);
|
|
3519
|
-
*
|
|
3520
|
-
* // Alter table structure
|
|
3521
|
-
* await forgeSQL.executeDDL(sql`
|
|
3522
|
-
* ALTER TABLE users
|
|
3523
|
-
* ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
|
3524
|
-
* `);
|
|
3525
|
-
*
|
|
3526
|
-
* // Drop a table
|
|
3527
|
-
* await forgeSQL.executeDDL("DROP TABLE IF EXISTS old_users");
|
|
3528
|
-
* ```
|
|
3529
|
-
*/
|
|
3530
|
-
executeDDL(query) {
|
|
3531
|
-
return this.ormInstance.executeDDL(query);
|
|
3532
|
-
}
|
|
3533
|
-
/**
|
|
3534
|
-
* Executes a series of actions within a DDL operation context.
|
|
3535
|
-
* This method provides a way to execute regular SQL queries that should be treated
|
|
3536
|
-
* as DDL operations, ensuring proper operation type context for performance monitoring.
|
|
3537
|
-
*
|
|
3538
|
-
* This method is useful for:
|
|
3539
|
-
* - Executing regular SQL queries in DDL context for monitoring purposes
|
|
3540
|
-
* - Wrapping non-DDL operations that should be treated as DDL for analysis
|
|
3541
|
-
* - Ensuring proper operation type context for complex workflows
|
|
3542
|
-
* - Maintaining DDL operation context across multiple function calls
|
|
3543
|
-
*
|
|
3544
|
-
* @template T - The return type of the actions function
|
|
3545
|
-
* @param actions - Function containing SQL operations to execute in DDL context
|
|
3546
|
-
* @returns Promise that resolves to the return value of the actions function
|
|
3547
|
-
*
|
|
3548
|
-
* @example
|
|
3549
|
-
* ```typescript
|
|
3550
|
-
* // Execute regular SQL queries in DDL context for monitoring
|
|
3551
|
-
* await forgeSQL.executeDDLActions(async () => {
|
|
3552
|
-
* const slowQueries = await forgeSQL.execute(`
|
|
3553
|
-
* SELECT * FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
3554
|
-
* WHERE AVG_LATENCY > 1000000
|
|
3555
|
-
* `);
|
|
3556
|
-
* return slowQueries;
|
|
3557
|
-
* });
|
|
3558
|
-
*
|
|
3559
|
-
* // Execute complex analysis queries in DDL context
|
|
3560
|
-
* const result = await forgeSQL.executeDDLActions(async () => {
|
|
3561
|
-
* const tableInfo = await forgeSQL.execute("SHOW TABLES");
|
|
3562
|
-
* const performanceData = await forgeSQL.execute(`
|
|
3563
|
-
* SELECT * FROM INFORMATION_SCHEMA.CLUSTER_STATEMENTS_SUMMARY_HISTORY
|
|
3564
|
-
* WHERE SUMMARY_END_TIME > DATE_SUB(NOW(), INTERVAL 1 HOUR)
|
|
3565
|
-
* `);
|
|
3566
|
-
* return { tableInfo, performanceData };
|
|
3567
|
-
* });
|
|
3568
|
-
*
|
|
3569
|
-
* // Execute monitoring queries with error handling
|
|
3570
|
-
* try {
|
|
3571
|
-
* await forgeSQL.executeDDLActions(async () => {
|
|
3572
|
-
* const metrics = await forgeSQL.execute(`
|
|
3573
|
-
* SELECT COUNT(*) as query_count
|
|
3574
|
-
* FROM INFORMATION_SCHEMA.STATEMENTS_SUMMARY
|
|
3575
|
-
* `);
|
|
3576
|
-
* console.log(`Total queries: ${metrics[0].query_count}`);
|
|
3577
|
-
* });
|
|
3578
|
-
* } catch (error) {
|
|
3579
|
-
* console.error("Monitoring query failed:", error);
|
|
3580
|
-
* }
|
|
3581
|
-
* ```
|
|
3582
|
-
*/
|
|
3583
|
-
executeDDLActions(actions) {
|
|
3584
|
-
return this.ormInstance.executeDDLActions(actions);
|
|
3585
|
-
}
|
|
3586
|
-
/**
|
|
3587
|
-
* Executes a raw SQL query with both local and global cache support.
|
|
3588
|
-
* This method provides comprehensive caching for raw SQL queries:
|
|
3589
|
-
* - Local cache: Within the current invocation context
|
|
3590
|
-
* - Global cache: Cross-invocation caching using @forge/kvs
|
|
3591
|
-
*
|
|
3592
|
-
* @param query - The SQL query to execute (SQLWrapper or string)
|
|
3593
|
-
* @param cacheTtl - Optional cache TTL override (defaults to global cache TTL)
|
|
3594
|
-
* @returns Promise with query results
|
|
3595
|
-
* @example
|
|
3596
|
-
* ```typescript
|
|
3597
|
-
* // Using SQLWrapper with custom TTL
|
|
3598
|
-
* const result = await forgeSQL.executeCacheable(sql`SELECT * FROM users WHERE id = ${userId}`, 300);
|
|
3599
|
-
*
|
|
3600
|
-
* // Using string with default TTL
|
|
3601
|
-
* const result = await forgeSQL.executeCacheable("SELECT * FROM users WHERE status = 'active'");
|
|
3602
|
-
* ```
|
|
3603
|
-
*/
|
|
3604
|
-
executeCacheable(query, cacheTtl) {
|
|
3605
|
-
return this.ormInstance.executeCacheable(query, cacheTtl);
|
|
3606
|
-
}
|
|
3607
|
-
/**
|
|
3608
|
-
* Creates a Common Table Expression (CTE) builder for complex queries.
|
|
3609
|
-
* CTEs allow you to define temporary named result sets that exist within the scope of a single query.
|
|
3610
|
-
*
|
|
3611
|
-
* @returns WithBuilder for creating CTEs
|
|
3612
|
-
* @example
|
|
3613
|
-
* ```typescript
|
|
3614
|
-
* const withQuery = forgeSQL.$with('userStats').as(
|
|
3615
|
-
* forgeSQL.getDrizzleQueryBuilder().select({ userId: users.id, count: sql<number>`count(*)` })
|
|
3616
|
-
* .from(users)
|
|
3617
|
-
* .groupBy(users.id)
|
|
3618
|
-
* );
|
|
3619
|
-
* ```
|
|
3620
|
-
*/
|
|
3621
|
-
get $with() {
|
|
3622
|
-
return this.ormInstance.getDrizzleQueryBuilder().$with;
|
|
3623
|
-
}
|
|
3624
|
-
/**
|
|
3625
|
-
* Creates a query builder that uses Common Table Expressions (CTEs).
|
|
3626
|
-
* CTEs allow you to define temporary named result sets that exist within the scope of a single query.
|
|
3627
|
-
*
|
|
3628
|
-
* @param queries - Array of CTE queries created with $with()
|
|
3629
|
-
* @returns Query builder with CTE support
|
|
3630
|
-
* @example
|
|
3631
|
-
* ```typescript
|
|
3632
|
-
* const withQuery = forgeSQL.$with('userStats').as(
|
|
3633
|
-
* forgeSQL.getDrizzleQueryBuilder().select({ userId: users.id, count: sql<number>`count(*)` })
|
|
3634
|
-
* .from(users)
|
|
3635
|
-
* .groupBy(users.id)
|
|
3636
|
-
* );
|
|
3637
|
-
*
|
|
3638
|
-
* const result = await forgeSQL.with(withQuery)
|
|
3639
|
-
* .select({ userId: withQuery.userId, count: withQuery.count })
|
|
3640
|
-
* .from(withQuery);
|
|
3641
|
-
* ```
|
|
3642
|
-
*/
|
|
3643
|
-
with(...queries) {
|
|
3644
|
-
return this.ormInstance.getDrizzleQueryBuilder().with(...queries);
|
|
3645
|
-
}
|
|
3646
|
-
}
|
|
3647
|
-
const forgeDateTimeString = customType({
|
|
3648
|
-
dataType() {
|
|
3649
|
-
return "datetime";
|
|
3650
|
-
},
|
|
3651
|
-
toDriver(value) {
|
|
3652
|
-
return formatDateTime(value, "yyyy-MM-dd' 'HH:mm:ss.SSS", false);
|
|
3653
|
-
},
|
|
3654
|
-
fromDriver(value) {
|
|
3655
|
-
const format = "yyyy-MM-dd' 'HH:mm:ss.SSS";
|
|
3656
|
-
return parseDateTime(value, format);
|
|
3657
|
-
}
|
|
3658
|
-
});
|
|
3659
|
-
const forgeTimestampString = customType({
|
|
3660
|
-
dataType() {
|
|
3661
|
-
return "timestamp";
|
|
3662
|
-
},
|
|
3663
|
-
toDriver(value) {
|
|
3664
|
-
return formatDateTime(value, "yyyy-MM-dd' 'HH:mm:ss.SSS", true);
|
|
3665
|
-
},
|
|
3666
|
-
fromDriver(value) {
|
|
3667
|
-
const format = "yyyy-MM-dd' 'HH:mm:ss.SSS";
|
|
3668
|
-
return parseDateTime(value, format);
|
|
3669
|
-
}
|
|
3670
|
-
});
|
|
3671
|
-
const forgeDateString = customType({
|
|
3672
|
-
dataType() {
|
|
3673
|
-
return "date";
|
|
3674
|
-
},
|
|
3675
|
-
toDriver(value) {
|
|
3676
|
-
return formatDateTime(value, "yyyy-MM-dd", false);
|
|
3677
|
-
},
|
|
3678
|
-
fromDriver(value) {
|
|
3679
|
-
const format = "yyyy-MM-dd";
|
|
3680
|
-
return parseDateTime(value, format);
|
|
3681
|
-
}
|
|
3682
|
-
});
|
|
3683
|
-
const forgeTimeString = customType({
|
|
3684
|
-
dataType() {
|
|
3685
|
-
return "time";
|
|
3686
|
-
},
|
|
3687
|
-
toDriver(value) {
|
|
3688
|
-
return formatDateTime(value, "HH:mm:ss.SSS", false);
|
|
3689
|
-
},
|
|
3690
|
-
fromDriver(value) {
|
|
3691
|
-
return parseDateTime(value, "HH:mm:ss.SSS");
|
|
3692
|
-
}
|
|
3693
|
-
});
|
|
3694
|
-
async function dropSchemaMigrations() {
|
|
3695
|
-
try {
|
|
3696
|
-
const tables = await getTables();
|
|
3697
|
-
const dropStatements = generateDropTableStatements(tables, { sequence: true, table: true });
|
|
3698
|
-
for (const statement of dropStatements) {
|
|
3699
|
-
console.debug(`execute DDL: ${statement}`);
|
|
3700
|
-
await sql.executeDDL(statement);
|
|
3701
|
-
}
|
|
3702
|
-
return getHttpResponse(
|
|
3703
|
-
200,
|
|
3704
|
-
"⚠️ All data in these tables has been permanently deleted. This operation cannot be undone."
|
|
3705
|
-
);
|
|
3706
|
-
} catch (error) {
|
|
3707
|
-
const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
|
|
3708
|
-
console.error(errorMessage);
|
|
3709
|
-
return getHttpResponse(500, errorMessage);
|
|
3710
|
-
}
|
|
3711
|
-
}
|
|
3712
|
-
const applySchemaMigrations = async (migration) => {
|
|
3713
|
-
try {
|
|
3714
|
-
if (typeof migration !== "function") {
|
|
3715
|
-
throw new Error("migration is not a function");
|
|
3716
|
-
}
|
|
3717
|
-
console.debug("Provisioning the database");
|
|
3718
|
-
await sql._provision();
|
|
3719
|
-
console.debug("Running schema migrations");
|
|
3720
|
-
const migrations2 = await migration(migrationRunner);
|
|
3721
|
-
const successfulMigrations = await migrations2.run();
|
|
3722
|
-
console.debug("Migrations applied:", successfulMigrations);
|
|
3723
|
-
const migrationList = await migrationRunner.list();
|
|
3724
|
-
let migrationHistory = "No migrations found";
|
|
3725
|
-
if (Array.isArray(migrationList) && migrationList.length > 0) {
|
|
3726
|
-
const sortedMigrations = migrationList.toSorted(
|
|
3727
|
-
(a, b) => a.migratedAt.getTime() - b.migratedAt.getTime()
|
|
3728
|
-
);
|
|
3729
|
-
migrationHistory = sortedMigrations.map((y) => `${y.id}, ${y.name}, ${y.migratedAt.toUTCString()}`).join("\n");
|
|
3730
|
-
}
|
|
3731
|
-
console.debug("Migrations history:\nid, name, migrated_at\n", migrationHistory);
|
|
3732
|
-
return {
|
|
3733
|
-
headers: { "Content-Type": ["application/json"] },
|
|
3734
|
-
statusCode: 200,
|
|
3735
|
-
statusText: "OK",
|
|
3736
|
-
body: "Migrations successfully executed"
|
|
3737
|
-
};
|
|
3738
|
-
} catch (error) {
|
|
3739
|
-
const errorMessage = error?.cause?.context?.debug?.sqlMessage ?? error?.cause?.context?.debug?.message ?? error?.debug?.context?.sqlMessage ?? error?.debug?.context?.message ?? error.message ?? "Unknown error occurred";
|
|
3740
|
-
console.error("Error during migration:", errorMessage);
|
|
3741
|
-
return {
|
|
3742
|
-
headers: { "Content-Type": ["application/json"] },
|
|
3743
|
-
statusCode: 500,
|
|
3744
|
-
statusText: "Internal Server Error",
|
|
3745
|
-
body: error instanceof Error ? errorMessage : "Unknown error during migration"
|
|
3746
|
-
};
|
|
3747
|
-
}
|
|
3748
|
-
};
|
|
3749
|
-
async function fetchSchemaWebTrigger() {
|
|
3750
|
-
try {
|
|
3751
|
-
const tables = await getTables();
|
|
3752
|
-
const createTableStatements = await generateCreateTableStatements(tables);
|
|
3753
|
-
const sqlStatements = wrapWithForeignKeyChecks(createTableStatements);
|
|
3754
|
-
return getHttpResponse(200, sqlStatements.join(";\n"));
|
|
3755
|
-
} catch (error) {
|
|
3756
|
-
const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
|
|
3757
|
-
console.error(errorMessage);
|
|
3758
|
-
return getHttpResponse(500, errorMessage);
|
|
3759
|
-
}
|
|
3760
|
-
}
|
|
3761
|
-
async function generateCreateTableStatements(tables) {
|
|
3762
|
-
const statements = [];
|
|
3763
|
-
for (const table of tables) {
|
|
3764
|
-
const createTableResult = await sql.executeDDL(`SHOW CREATE TABLE "${table}"`);
|
|
3765
|
-
const createTableStatements = createTableResult.rows.filter((row) => !isSystemTable(row.Table)).map((row) => formatCreateTableStatement(row["Create Table"]));
|
|
3766
|
-
statements.push(...createTableStatements);
|
|
3767
|
-
}
|
|
3768
|
-
return statements;
|
|
3769
|
-
}
|
|
3770
|
-
function isSystemTable(tableName) {
|
|
3771
|
-
return forgeSystemTables.some((st) => getTableName(st) === tableName);
|
|
3772
|
-
}
|
|
3773
|
-
function formatCreateTableStatement(statement) {
|
|
3774
|
-
return statement.replace(/"/g, "").replace("CREATE TABLE", "CREATE TABLE IF NOT EXISTS");
|
|
3775
|
-
}
|
|
3776
|
-
function wrapWithForeignKeyChecks(statements) {
|
|
3777
|
-
return ["SET foreign_key_checks = 0", ...statements, "SET foreign_key_checks = 1"];
|
|
3778
|
-
}
|
|
3779
|
-
async function dropTableSchemaMigrations() {
|
|
3780
|
-
try {
|
|
3781
|
-
const tables = await getTables();
|
|
3782
|
-
const dropStatements = generateDropTableStatements(tables, { sequence: false, table: true });
|
|
3783
|
-
for (const statement of dropStatements) {
|
|
3784
|
-
console.debug(`execute DDL: ${statement}`);
|
|
3785
|
-
await sql.executeDDL(statement);
|
|
3786
|
-
}
|
|
3787
|
-
return getHttpResponse(
|
|
3788
|
-
200,
|
|
3789
|
-
"⚠️ All data in these tables has been permanently deleted. This operation cannot be undone."
|
|
3790
|
-
);
|
|
3791
|
-
} catch (error) {
|
|
3792
|
-
const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
|
|
3793
|
-
console.error(errorMessage);
|
|
3794
|
-
return getHttpResponse(500, errorMessage);
|
|
3795
|
-
}
|
|
3796
|
-
}
|
|
3797
|
-
const clearCacheSchedulerTrigger = async (options) => {
|
|
3798
|
-
try {
|
|
3799
|
-
const newOptions = options ?? {
|
|
3800
|
-
logRawSqlQuery: false,
|
|
3801
|
-
disableOptimisticLocking: false,
|
|
3802
|
-
cacheTTL: 120,
|
|
3803
|
-
cacheEntityName: "cache",
|
|
3804
|
-
cacheEntityQueryName: "sql",
|
|
3805
|
-
cacheEntityExpirationName: "expiration",
|
|
3806
|
-
cacheEntityDataName: "data"
|
|
3807
|
-
};
|
|
3808
|
-
if (!newOptions.cacheEntityName) {
|
|
3809
|
-
throw new Error("cacheEntityName is not configured");
|
|
3810
|
-
}
|
|
3811
|
-
await clearExpiredCache(newOptions);
|
|
3812
|
-
return {
|
|
3813
|
-
headers: { "Content-Type": ["application/json"] },
|
|
3814
|
-
statusCode: 200,
|
|
3815
|
-
statusText: "OK",
|
|
3816
|
-
body: JSON.stringify({
|
|
3817
|
-
success: true,
|
|
3818
|
-
message: "Cache cleanup completed successfully",
|
|
3819
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
3820
|
-
})
|
|
3821
|
-
};
|
|
3822
|
-
} catch (error) {
|
|
3823
|
-
console.error("Error during cache cleanup: ", JSON.stringify(error));
|
|
3824
|
-
return {
|
|
3825
|
-
headers: { "Content-Type": ["application/json"] },
|
|
3826
|
-
statusCode: 500,
|
|
3827
|
-
statusText: "Internal Server Error",
|
|
3828
|
-
body: JSON.stringify({
|
|
3829
|
-
success: false,
|
|
3830
|
-
error: error instanceof Error ? error.message : "Unknown error during cache cleanup",
|
|
3831
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
3832
|
-
})
|
|
3833
|
-
};
|
|
3834
|
-
}
|
|
3835
|
-
};
|
|
3836
|
-
async function slowQuerySchedulerTrigger(forgeSQLORM, options) {
|
|
3837
|
-
try {
|
|
3838
|
-
return getHttpResponse(
|
|
3839
|
-
200,
|
|
3840
|
-
JSON.stringify(
|
|
3841
|
-
await slowQueryPerHours(forgeSQLORM, options?.hours ?? 1, options?.timeout ?? 3e3)
|
|
3842
|
-
)
|
|
3843
|
-
);
|
|
3844
|
-
} catch (error) {
|
|
3845
|
-
const errorMessage = error?.debug?.sqlMessage ?? error?.debug?.message ?? error.message ?? "Unknown error occurred";
|
|
3846
|
-
console.error(errorMessage);
|
|
3847
|
-
return getHttpResponse(500, errorMessage);
|
|
3848
|
-
}
|
|
3849
|
-
}
|
|
3850
|
-
const getHttpResponse = (statusCode, body) => {
|
|
3851
|
-
let statusText = "";
|
|
3852
|
-
if (statusCode === 200) {
|
|
3853
|
-
statusText = "Ok";
|
|
3854
|
-
} else {
|
|
3855
|
-
statusText = "Bad Request";
|
|
3856
|
-
}
|
|
3857
|
-
return {
|
|
3858
|
-
headers: { "Content-Type": ["application/json"] },
|
|
3859
|
-
statusCode,
|
|
3860
|
-
statusText,
|
|
3861
|
-
body
|
|
3862
|
-
};
|
|
3863
|
-
};
|
|
3864
|
-
export {
|
|
3865
|
-
ForgeSQLCrudOperations,
|
|
3866
|
-
ForgeSQLSelectOperations,
|
|
3867
|
-
applyFromDriverTransform,
|
|
3868
|
-
applySchemaMigrations,
|
|
3869
|
-
clearCacheSchedulerTrigger,
|
|
3870
|
-
clusterStatementsSummary,
|
|
3871
|
-
clusterStatementsSummaryHistory,
|
|
3872
|
-
ForgeSQLORM as default,
|
|
3873
|
-
dropSchemaMigrations,
|
|
3874
|
-
dropTableSchemaMigrations,
|
|
3875
|
-
fetchSchemaWebTrigger,
|
|
3876
|
-
forgeDateString,
|
|
3877
|
-
forgeDateTimeString,
|
|
3878
|
-
forgeDriver,
|
|
3879
|
-
forgeSystemTables,
|
|
3880
|
-
forgeTimeString,
|
|
3881
|
-
forgeTimestampString,
|
|
3882
|
-
formatDateTime,
|
|
3883
|
-
formatLimitOffset,
|
|
3884
|
-
generateDropTableStatements,
|
|
3885
|
-
getHttpResponse,
|
|
3886
|
-
getPrimaryKeys,
|
|
3887
|
-
getTableMetadata,
|
|
3888
|
-
getTables,
|
|
3889
|
-
isUpdateQueryResponse,
|
|
3890
|
-
mapSelectAllFieldsToAlias,
|
|
3891
|
-
mapSelectFieldsWithAlias,
|
|
3892
|
-
migrations,
|
|
3893
|
-
nextVal,
|
|
3894
|
-
parseDateTime,
|
|
3895
|
-
patchDbWithSelectAliased,
|
|
3896
|
-
printQueriesWithPlan,
|
|
3897
|
-
slowQuery,
|
|
3898
|
-
slowQueryPerHours,
|
|
3899
|
-
slowQuerySchedulerTrigger,
|
|
3900
|
-
statementsSummary,
|
|
3901
|
-
statementsSummaryHistory,
|
|
3902
|
-
withTidbHint,
|
|
3903
|
-
withTimeout
|
|
3904
|
-
};
|
|
3905
|
-
//# sourceMappingURL=ForgeSQLORM.mjs.map
|