odac 1.4.6 → 1.4.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +37 -0
- package/client/odac.js +1 -1
- package/docs/ai/README.md +1 -1
- package/docs/ai/skills/SKILL.md +1 -1
- package/docs/ai/skills/backend/database.md +103 -12
- package/docs/ai/skills/backend/ipc.md +71 -12
- package/docs/ai/skills/backend/views.md +6 -1
- package/docs/backend/00-getting-started/01-quick-start.md +77 -0
- package/docs/backend/07-views/03-template-syntax.md +5 -0
- package/docs/backend/07-views/04-request-data.md +13 -0
- package/docs/backend/08-database/05-write-behind-cache.md +230 -0
- package/docs/backend/13-utilities/02-ipc.md +117 -0
- package/docs/index.json +10 -0
- package/package.json +1 -1
- package/src/Database/WriteBuffer.js +605 -0
- package/src/Database.js +32 -1
- package/src/Ipc.js +343 -81
- package/src/Odac.js +2 -1
- package/src/Storage.js +4 -2
- package/src/View.js +33 -18
- package/test/Database/WriteBuffer/_recoverFromCheckpoint.test.js +207 -0
- package/test/Database/WriteBuffer/buffer.test.js +143 -0
- package/test/Database/WriteBuffer/flush.test.js +192 -0
- package/test/Database/WriteBuffer/get.test.js +72 -0
- package/test/Database/WriteBuffer/increment.test.js +118 -0
- package/test/Database/WriteBuffer/update.test.js +178 -0
- package/test/Ipc/hset.test.js +59 -0
- package/test/Ipc/incrBy.test.js +65 -0
- package/test/Ipc/lock.test.js +62 -0
- package/test/Ipc/rpush.test.js +68 -0
- package/test/Ipc/sadd.test.js +68 -0
- package/test/View/addNavigateAttribute.test.js +53 -0
- package/test/View/print.test.js +45 -1
- package/test/View/tags.test.js +132 -0
|
@@ -0,0 +1,605 @@
|
|
|
1
|
+
'use strict'
|
|
2
|
+
const cluster = require('node:cluster')
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Write-Behind Cache with Write Coalescing for ODAC Database layer.
|
|
6
|
+
*
|
|
7
|
+
* Why: Individual UPDATE SET col = col + 1 per request is expensive at scale.
|
|
8
|
+
* This module buffers increments (counters), updates (last-write-wins), and inserts (queues)
|
|
9
|
+
* via the Ipc layer, then flushes them to the database in batches.
|
|
10
|
+
*
|
|
11
|
+
* Architecture: Fully delegated to Odac.Ipc for state management.
|
|
12
|
+
* - Memory driver: Primary process holds state in Maps via cluster IPC (single machine).
|
|
13
|
+
* - Redis driver: All state lives in Redis — works across horizontal load balancers.
|
|
14
|
+
* - On both drivers, the API is identical and WriteBuffer is driver-agnostic.
|
|
15
|
+
* - Distributed lock (Ipc.lock) ensures only one process flushes at a time.
|
|
16
|
+
* - LMDB checkpoints (memory driver only) protect against crash data loss.
|
|
17
|
+
*
|
|
18
|
+
* Key namespaces in Ipc:
|
|
19
|
+
* wb:c:{connection}:{table}:{where}:{column} — counter delta (number via incrBy)
|
|
20
|
+
* wb:b:{connection}:{table}:{where}:{column} — counter base from DB (number via set)
|
|
21
|
+
* wb:u:{connection}:{table}:{where} — update fields (hash via hset)
|
|
22
|
+
* wb:q:{connection}:{table} — insert queue (list via rpush)
|
|
23
|
+
* wb:idx:counters — set of active counter keys
|
|
24
|
+
* wb:idx:updates — set of active update keys
|
|
25
|
+
* wb:idx:queues — set of active queue keys
|
|
26
|
+
* wb:lock:flush — distributed flush lock
|
|
27
|
+
*
|
|
28
|
+
* API (exposed via Database.js proxy):
|
|
29
|
+
* Odac.DB.posts.buffer.where(id).increment('views', 5)
|
|
30
|
+
* Odac.DB.posts.buffer.where(id).get('views')
|
|
31
|
+
* Odac.DB.posts.buffer.where(id).update({active_date: new Date()})
|
|
32
|
+
* Odac.DB.posts.buffer.insert(row)
|
|
33
|
+
* Odac.DB.posts.buffer.flush()
|
|
34
|
+
* Odac.DB.buffer.flush() — global flush
|
|
35
|
+
*/
|
|
36
|
+
|
|
37
|
+
const DEFAULT_CONFIG = {
|
|
38
|
+
flushInterval: 5000,
|
|
39
|
+
checkpointInterval: 30000,
|
|
40
|
+
maxQueueSize: 10000,
|
|
41
|
+
primaryKey: 'id',
|
|
42
|
+
insertBatchSize: 1000
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
class WriteBuffer {
|
|
46
|
+
constructor() {
|
|
47
|
+
this._flushTimer = null
|
|
48
|
+
this._checkpointTimer = null
|
|
49
|
+
this._config = {}
|
|
50
|
+
this._initialized = false
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// ─── Lifecycle ──────────────────────────────────────────────
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* Why: Initializes the WriteBuffer. Called from Database.init() after Ipc is ready.
|
|
57
|
+
* Primary: recovers LMDB checkpoint, starts flush/checkpoint timers.
|
|
58
|
+
* All processes: stores connection references for flush DB writes.
|
|
59
|
+
*/
|
|
60
|
+
async init(connections) {
|
|
61
|
+
if (this._initialized) return
|
|
62
|
+
this._initialized = true
|
|
63
|
+
|
|
64
|
+
this._connections = connections
|
|
65
|
+
this._config = {...DEFAULT_CONFIG, ...Odac.Config.buffer}
|
|
66
|
+
|
|
67
|
+
if (cluster.isPrimary) {
|
|
68
|
+
await this._recoverFromCheckpoint()
|
|
69
|
+
this._startFlushTimer()
|
|
70
|
+
this._startCheckpointTimer()
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// ─── Public API ─────────────────────────────────────────────
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Atomically increments a counter by delta (default 1).
|
|
78
|
+
* Returns the current total (base + accumulated delta).
|
|
79
|
+
*
|
|
80
|
+
* Why: Uses Ipc.incrBy for atomic delta accumulation — safe across workers AND servers.
|
|
81
|
+
* Base is fetched from DB once and cached in Ipc for subsequent reads.
|
|
82
|
+
*/
|
|
83
|
+
async increment(connection, table, where, column, delta = 1) {
|
|
84
|
+
const key = this._counterKey(connection, table, where, column)
|
|
85
|
+
|
|
86
|
+
// Atomic increment — returns new total delta
|
|
87
|
+
const totalDelta = await Odac.Ipc.incrBy(`wb:c:${key}`, delta)
|
|
88
|
+
|
|
89
|
+
// Track this key in the counter index for flush discovery
|
|
90
|
+
await Odac.Ipc.sadd('wb:idx:counters', key)
|
|
91
|
+
|
|
92
|
+
// Fetch or read cached base from DB
|
|
93
|
+
const base = await this._fetchBase(connection, table, where, column)
|
|
94
|
+
return base + totalDelta
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Returns the current value of a buffered counter (base + pending delta).
|
|
99
|
+
*
|
|
100
|
+
* Why: Reads from Ipc — returns accurate value even in horizontal scaling (Redis driver).
|
|
101
|
+
* For memory driver, reads from Primary's store via cluster IPC.
|
|
102
|
+
*/
|
|
103
|
+
async get(connection, table, where, column) {
|
|
104
|
+
const key = this._counterKey(connection, table, where, column)
|
|
105
|
+
const totalDelta = (await Odac.Ipc.get(`wb:c:${key}`)) || 0
|
|
106
|
+
const base = await this._fetchBase(connection, table, where, column)
|
|
107
|
+
return base + totalDelta
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Buffers column updates for a row using last-write-wins coalescing.
|
|
112
|
+
*
|
|
113
|
+
* Why: Ipc.hset merges fields atomically — multiple workers updating different columns
|
|
114
|
+
* on the same row collapse into a single UPDATE query at flush time.
|
|
115
|
+
*/
|
|
116
|
+
async update(connection, table, where, data) {
|
|
117
|
+
const key = this._updateKey(connection, table, where)
|
|
118
|
+
await Odac.Ipc.hset(`wb:u:${key}`, data)
|
|
119
|
+
await Odac.Ipc.sadd('wb:idx:updates', key)
|
|
120
|
+
return true
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Buffers a row for batch INSERT. Auto-flushes when maxQueueSize is reached.
|
|
125
|
+
*
|
|
126
|
+
* Why: Ipc.rpush appends to a shared list — multiple workers/servers contribute rows
|
|
127
|
+
* that are drained to the database in a single INSERT batch.
|
|
128
|
+
*/
|
|
129
|
+
async insert(connection, table, row) {
|
|
130
|
+
const queueKey = `${connection}:${table}`
|
|
131
|
+
const length = await Odac.Ipc.rpush(`wb:q:${queueKey}`, row)
|
|
132
|
+
await Odac.Ipc.sadd('wb:idx:queues', queueKey)
|
|
133
|
+
|
|
134
|
+
// Auto-flush when queue exceeds threshold
|
|
135
|
+
if (length >= this._config.maxQueueSize) {
|
|
136
|
+
void this._flushQueues(connection, table).catch(() => {})
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
return true
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
/**
|
|
143
|
+
* Force-flushes all pending data (or a specific table) to the database.
|
|
144
|
+
*
|
|
145
|
+
* Why: Distributed lock ensures exactly one process flushes at a time,
|
|
146
|
+
* preventing duplicate writes in horizontal scaling scenarios.
|
|
147
|
+
*/
|
|
148
|
+
async flush(connection, table) {
|
|
149
|
+
const acquired = await Odac.Ipc.lock('wb:lock:flush', 10)
|
|
150
|
+
if (!acquired) return
|
|
151
|
+
|
|
152
|
+
try {
|
|
153
|
+
await this._flushCounters(connection, table)
|
|
154
|
+
await this._flushUpdates(connection, table)
|
|
155
|
+
await this._flushQueues(connection, table)
|
|
156
|
+
this._clearCheckpoint(connection, table)
|
|
157
|
+
} finally {
|
|
158
|
+
await Odac.Ipc.unlock('wb:lock:flush')
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
// ─── Flush Logic ───────────────────────────────────────────
|
|
163
|
+
|
|
164
|
+
async _flushCounters(filterConnection, filterTable) {
|
|
165
|
+
const keys = await Odac.Ipc.smembers('wb:idx:counters')
|
|
166
|
+
if (keys.length === 0) return
|
|
167
|
+
|
|
168
|
+
// Group by connection+table for transaction batching
|
|
169
|
+
const grouped = new Map()
|
|
170
|
+
|
|
171
|
+
for (const key of keys) {
|
|
172
|
+
const parsed = this._parseCounterKey(key)
|
|
173
|
+
if (!parsed) continue
|
|
174
|
+
if (filterConnection && parsed.connection !== filterConnection) continue
|
|
175
|
+
if (filterTable && parsed.table !== filterTable) continue
|
|
176
|
+
|
|
177
|
+
const groupKey = `${parsed.connection}:${parsed.table}`
|
|
178
|
+
if (!grouped.has(groupKey)) {
|
|
179
|
+
grouped.set(groupKey, {connection: parsed.connection, table: parsed.table, entries: []})
|
|
180
|
+
}
|
|
181
|
+
grouped.get(groupKey).entries.push({key, ...parsed})
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
for (const [, group] of grouped) {
|
|
185
|
+
const knex = this._connections[group.connection]
|
|
186
|
+
if (!knex) continue
|
|
187
|
+
|
|
188
|
+
try {
|
|
189
|
+
// Read all deltas for this group BEFORE the transaction
|
|
190
|
+
const deltas = new Map()
|
|
191
|
+
for (const entry of group.entries) {
|
|
192
|
+
const delta = (await Odac.Ipc.get(`wb:c:${entry.key}`)) || 0
|
|
193
|
+
if (delta !== 0) deltas.set(entry.key, {delta, ...entry})
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
if (deltas.size === 0) continue
|
|
197
|
+
|
|
198
|
+
await knex.transaction(async trx => {
|
|
199
|
+
for (const [, entry] of deltas) {
|
|
200
|
+
const whereClause = this._normalizeWhere(entry.where)
|
|
201
|
+
await trx(group.table)
|
|
202
|
+
.where(whereClause)
|
|
203
|
+
.update({
|
|
204
|
+
[entry.column]: trx.raw(`?? + ?`, [entry.column, entry.delta])
|
|
205
|
+
})
|
|
206
|
+
}
|
|
207
|
+
})
|
|
208
|
+
|
|
209
|
+
// After successful commit: subtract flushed deltas, update bases
|
|
210
|
+
for (const [entryKey, entry] of deltas) {
|
|
211
|
+
await Odac.Ipc.decrBy(`wb:c:${entryKey}`, entry.delta)
|
|
212
|
+
|
|
213
|
+
// Update cached base: base += flushed delta
|
|
214
|
+
const baseKey = `wb:b:${entryKey}`
|
|
215
|
+
const currentBase = (await Odac.Ipc.get(baseKey)) || 0
|
|
216
|
+
await Odac.Ipc.set(baseKey, currentBase + entry.delta)
|
|
217
|
+
|
|
218
|
+
// If counter is now 0, clean up
|
|
219
|
+
const remaining = (await Odac.Ipc.get(`wb:c:${entryKey}`)) || 0
|
|
220
|
+
if (remaining === 0) {
|
|
221
|
+
await Odac.Ipc.del(`wb:c:${entryKey}`)
|
|
222
|
+
await Odac.Ipc.srem('wb:idx:counters', entryKey)
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
} catch (err) {
|
|
226
|
+
// Keep deltas in Ipc on failure — will retry next cycle
|
|
227
|
+
console.error(`\x1b[31m[WriteBuffer]\x1b[0m Counter flush failed for ${group.table}:`, err.message)
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
async _flushUpdates(filterConnection, filterTable) {
|
|
233
|
+
const keys = await Odac.Ipc.smembers('wb:idx:updates')
|
|
234
|
+
if (keys.length === 0) return
|
|
235
|
+
|
|
236
|
+
const grouped = new Map()
|
|
237
|
+
|
|
238
|
+
for (const key of keys) {
|
|
239
|
+
const parsed = this._parseUpdateKey(key)
|
|
240
|
+
if (!parsed) continue
|
|
241
|
+
if (filterConnection && parsed.connection !== filterConnection) continue
|
|
242
|
+
if (filterTable && parsed.table !== filterTable) continue
|
|
243
|
+
|
|
244
|
+
const groupKey = `${parsed.connection}:${parsed.table}`
|
|
245
|
+
if (!grouped.has(groupKey)) {
|
|
246
|
+
grouped.set(groupKey, {connection: parsed.connection, table: parsed.table, entries: []})
|
|
247
|
+
}
|
|
248
|
+
grouped.get(groupKey).entries.push({key, ...parsed})
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
for (const [, group] of grouped) {
|
|
252
|
+
const knex = this._connections[group.connection]
|
|
253
|
+
if (!knex) continue
|
|
254
|
+
|
|
255
|
+
try {
|
|
256
|
+
// Read all pending update hashes
|
|
257
|
+
const updates = new Map()
|
|
258
|
+
for (const entry of group.entries) {
|
|
259
|
+
const data = await Odac.Ipc.hgetall(`wb:u:${entry.key}`)
|
|
260
|
+
if (data && Object.keys(data).length > 0) {
|
|
261
|
+
updates.set(entry.key, {data, ...entry})
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
if (updates.size === 0) continue
|
|
266
|
+
|
|
267
|
+
await knex.transaction(async trx => {
|
|
268
|
+
for (const [, entry] of updates) {
|
|
269
|
+
const whereClause = this._normalizeWhere(entry.where)
|
|
270
|
+
await trx(group.table).where(whereClause).update(entry.data)
|
|
271
|
+
}
|
|
272
|
+
})
|
|
273
|
+
|
|
274
|
+
// After successful commit: clear flushed update hashes
|
|
275
|
+
for (const [entryKey] of updates) {
|
|
276
|
+
await Odac.Ipc.del(`wb:u:${entryKey}`)
|
|
277
|
+
await Odac.Ipc.srem('wb:idx:updates', entryKey)
|
|
278
|
+
}
|
|
279
|
+
} catch (err) {
|
|
280
|
+
console.error(`\x1b[31m[WriteBuffer]\x1b[0m Update flush failed for ${group.table}:`, err.message)
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
async _flushQueues(filterConnection, filterTable) {
|
|
286
|
+
const queueKeys = await Odac.Ipc.smembers('wb:idx:queues')
|
|
287
|
+
if (queueKeys.length === 0) return
|
|
288
|
+
|
|
289
|
+
for (const queueKey of queueKeys) {
|
|
290
|
+
const [connection, table] = queueKey.split(':')
|
|
291
|
+
if (filterConnection && connection !== filterConnection) continue
|
|
292
|
+
if (filterTable && table !== filterTable) continue
|
|
293
|
+
|
|
294
|
+
const knex = this._connections[connection]
|
|
295
|
+
if (!knex) continue
|
|
296
|
+
|
|
297
|
+
// Atomic drain — reads all rows and clears the list in one step
|
|
298
|
+
const rows = await Odac.Ipc.lrangeAndDel(`wb:q:${queueKey}`)
|
|
299
|
+
if (rows.length === 0) continue
|
|
300
|
+
|
|
301
|
+
try {
|
|
302
|
+
// Wrap all chunks in a single transaction to prevent partial-insert duplicates
|
|
303
|
+
await knex.transaction(async trx => {
|
|
304
|
+
for (let i = 0; i < rows.length; i += this._config.insertBatchSize) {
|
|
305
|
+
const chunk = rows.slice(i, i + this._config.insertBatchSize)
|
|
306
|
+
await trx(table).insert(chunk)
|
|
307
|
+
}
|
|
308
|
+
})
|
|
309
|
+
|
|
310
|
+
// Clean up index after successful insert
|
|
311
|
+
await Odac.Ipc.srem('wb:idx:queues', queueKey)
|
|
312
|
+
} catch (err) {
|
|
313
|
+
// Re-queue failed rows by pushing them back
|
|
314
|
+
for (const row of rows) {
|
|
315
|
+
await Odac.Ipc.rpush(`wb:q:${queueKey}`, row)
|
|
316
|
+
}
|
|
317
|
+
console.error(`\x1b[31m[WriteBuffer]\x1b[0m Queue flush failed for ${table}:`, err.message)
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
// ─── DB Base Fetch ─────────────────────────────────────────
|
|
323
|
+
|
|
324
|
+
/**
|
|
325
|
+
* Why: Fetches the current DB value for a counter column and caches it in Ipc.
|
|
326
|
+
* Subsequent reads use the cached base — no DB query per get()/increment().
|
|
327
|
+
* Cache is invalidated after flush (base += flushed delta).
|
|
328
|
+
*/
|
|
329
|
+
async _fetchBase(connection, table, where, column) {
|
|
330
|
+
const key = this._counterKey(connection, table, where, column)
|
|
331
|
+
const baseKey = `wb:b:${key}`
|
|
332
|
+
|
|
333
|
+
const cached = await Odac.Ipc.get(baseKey)
|
|
334
|
+
if (cached != null) return cached
|
|
335
|
+
|
|
336
|
+
const knex = this._connections[connection]
|
|
337
|
+
if (!knex) {
|
|
338
|
+
await Odac.Ipc.set(baseKey, 0)
|
|
339
|
+
return 0
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
try {
|
|
343
|
+
const whereClause = this._normalizeWhere(where)
|
|
344
|
+
const row = await knex(table).where(whereClause).select(column).first()
|
|
345
|
+
const value = row ? Number(row[column]) || 0 : 0
|
|
346
|
+
await Odac.Ipc.set(baseKey, value)
|
|
347
|
+
return value
|
|
348
|
+
} catch (err) {
|
|
349
|
+
console.error(`\x1b[31m[WriteBuffer]\x1b[0m Base fetch failed for ${table}.${column}:`, err.message)
|
|
350
|
+
await Odac.Ipc.set(baseKey, 0)
|
|
351
|
+
return 0
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
// ─── LMDB Checkpoint (Memory Driver Only) ──────────────────
|
|
356
|
+
|
|
357
|
+
/**
|
|
358
|
+
* Why: When Ipc uses memory driver, all state lives in the Primary's RAM.
|
|
359
|
+
* A crash loses everything. Periodic LMDB checkpoint provides crash safety.
|
|
360
|
+
* Skipped when Ipc uses Redis — Redis itself is the persistent store.
|
|
361
|
+
*/
|
|
362
|
+
async _writeCheckpoint() {
|
|
363
|
+
if (Odac.Ipc?.config?.driver === 'redis') return
|
|
364
|
+
if (!Odac.Storage?.isReady()) return
|
|
365
|
+
|
|
366
|
+
// Counters
|
|
367
|
+
const counterKeys = await Odac.Ipc.smembers('wb:idx:counters')
|
|
368
|
+
for (const key of counterKeys) {
|
|
369
|
+
const delta = (await Odac.Ipc.get(`wb:c:${key}`)) || 0
|
|
370
|
+
if (delta === 0) continue
|
|
371
|
+
const base = (await Odac.Ipc.get(`wb:b:${key}`)) || 0
|
|
372
|
+
Odac.Storage.put(`wb:c:${key}`, {delta, base})
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
// Updates
|
|
376
|
+
const updateKeys = await Odac.Ipc.smembers('wb:idx:updates')
|
|
377
|
+
for (const key of updateKeys) {
|
|
378
|
+
const data = await Odac.Ipc.hgetall(`wb:u:${key}`)
|
|
379
|
+
if (data && Object.keys(data).length > 0) {
|
|
380
|
+
Odac.Storage.put(`wb:u:${key}`, data)
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
// Queues
|
|
385
|
+
const queueKeys = await Odac.Ipc.smembers('wb:idx:queues')
|
|
386
|
+
for (const key of queueKeys) {
|
|
387
|
+
const rows = await Odac.Ipc.lrange(`wb:q:${key}`, 0, -1)
|
|
388
|
+
if (rows.length > 0) {
|
|
389
|
+
Odac.Storage.put(`wb:q:${key}`, rows)
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
_clearCheckpoint(filterConnection, filterTable) {
|
|
395
|
+
if (Odac.Ipc?.config?.driver === 'redis') return
|
|
396
|
+
if (!Odac.Storage?.isReady()) return
|
|
397
|
+
|
|
398
|
+
for (const {key} of Odac.Storage.getRange({start: 'wb:c:', end: 'wb:c:~'})) {
|
|
399
|
+
if (filterConnection || filterTable) {
|
|
400
|
+
const parsed = this._parseCounterKey(key.slice(5))
|
|
401
|
+
if (parsed && filterConnection && parsed.connection !== filterConnection) continue
|
|
402
|
+
if (parsed && filterTable && parsed.table !== filterTable) continue
|
|
403
|
+
}
|
|
404
|
+
Odac.Storage.remove(key)
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
for (const {key} of Odac.Storage.getRange({start: 'wb:u:', end: 'wb:u:~'})) {
|
|
408
|
+
if (filterConnection || filterTable) {
|
|
409
|
+
const parsed = this._parseUpdateKey(key.slice(5))
|
|
410
|
+
if (parsed && filterConnection && parsed.connection !== filterConnection) continue
|
|
411
|
+
if (parsed && filterTable && parsed.table !== filterTable) continue
|
|
412
|
+
}
|
|
413
|
+
Odac.Storage.remove(key)
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
for (const {key} of Odac.Storage.getRange({start: 'wb:q:', end: 'wb:q:~'})) {
|
|
417
|
+
if (filterConnection || filterTable) {
|
|
418
|
+
const queueMeta = key.slice(5)
|
|
419
|
+
const [conn, tbl] = queueMeta.split(':')
|
|
420
|
+
if (filterConnection && conn !== filterConnection) continue
|
|
421
|
+
if (filterTable && tbl !== filterTable) continue
|
|
422
|
+
}
|
|
423
|
+
Odac.Storage.remove(key)
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
/**
|
|
428
|
+
* Why: On startup, recover any buffered data that was checkpointed before a crash.
|
|
429
|
+
* Writes recovered data back into Ipc so it will be flushed in the next cycle.
|
|
430
|
+
* Memory driver only — Redis state survives crashes natively.
|
|
431
|
+
*/
|
|
432
|
+
async _recoverFromCheckpoint() {
|
|
433
|
+
if (Odac.Ipc?.config?.driver === 'redis') return
|
|
434
|
+
if (!Odac.Storage?.isReady()) return
|
|
435
|
+
|
|
436
|
+
let counterCount = 0
|
|
437
|
+
let updateCount = 0
|
|
438
|
+
let queueCount = 0
|
|
439
|
+
|
|
440
|
+
// Recover counters
|
|
441
|
+
for (const {key, value} of Odac.Storage.getRange({start: 'wb:c:', end: 'wb:c:~'})) {
|
|
442
|
+
if (!value || typeof value.delta !== 'number') continue
|
|
443
|
+
const counterKey = key.slice(5) // Strip 'wb:c:' prefix
|
|
444
|
+
await Odac.Ipc.incrBy(`wb:c:${counterKey}`, value.delta)
|
|
445
|
+
if (value.base != null) await Odac.Ipc.set(`wb:b:${counterKey}`, value.base)
|
|
446
|
+
await Odac.Ipc.sadd('wb:idx:counters', counterKey)
|
|
447
|
+
counterCount++
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
// Recover updates
|
|
451
|
+
for (const {key, value} of Odac.Storage.getRange({start: 'wb:u:', end: 'wb:u:~'})) {
|
|
452
|
+
if (!value || typeof value !== 'object') continue
|
|
453
|
+
const updateKey = key.slice(5)
|
|
454
|
+
await Odac.Ipc.hset(`wb:u:${updateKey}`, value)
|
|
455
|
+
await Odac.Ipc.sadd('wb:idx:updates', updateKey)
|
|
456
|
+
updateCount++
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
// Recover queues
|
|
460
|
+
for (const {key, value} of Odac.Storage.getRange({start: 'wb:q:', end: 'wb:q:~'})) {
|
|
461
|
+
if (!Array.isArray(value)) continue
|
|
462
|
+
const queueKey = key.slice(5)
|
|
463
|
+
for (const row of value) {
|
|
464
|
+
await Odac.Ipc.rpush(`wb:q:${queueKey}`, row)
|
|
465
|
+
}
|
|
466
|
+
await Odac.Ipc.sadd('wb:idx:queues', queueKey)
|
|
467
|
+
queueCount++
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
if (counterCount > 0 || updateCount > 0 || queueCount > 0) {
|
|
471
|
+
console.log(
|
|
472
|
+
`\x1b[36m[WriteBuffer]\x1b[0m Recovered from checkpoint: ${counterCount} counters, ${updateCount} updates, ${queueCount} queues.`
|
|
473
|
+
)
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
// ─── Timers ────────────────────────────────────────────────
|
|
478
|
+
|
|
479
|
+
_startFlushTimer() {
|
|
480
|
+
if (this._flushTimer) clearInterval(this._flushTimer)
|
|
481
|
+
|
|
482
|
+
this._flushTimer = setInterval(async () => {
|
|
483
|
+
try {
|
|
484
|
+
await this.flush()
|
|
485
|
+
} catch (err) {
|
|
486
|
+
console.error('\x1b[31m[WriteBuffer]\x1b[0m Periodic flush error:', err.message)
|
|
487
|
+
}
|
|
488
|
+
}, this._config.flushInterval)
|
|
489
|
+
|
|
490
|
+
this._flushTimer.unref()
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
_startCheckpointTimer() {
|
|
494
|
+
if (this._checkpointTimer) clearInterval(this._checkpointTimer)
|
|
495
|
+
|
|
496
|
+
this._checkpointTimer = setInterval(async () => {
|
|
497
|
+
try {
|
|
498
|
+
await this._writeCheckpoint()
|
|
499
|
+
} catch (err) {
|
|
500
|
+
console.error('\x1b[31m[WriteBuffer]\x1b[0m Checkpoint error:', err.message)
|
|
501
|
+
}
|
|
502
|
+
}, this._config.checkpointInterval)
|
|
503
|
+
|
|
504
|
+
this._checkpointTimer.unref()
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
// ─── Key Utilities ─────────────────────────────────────────
|
|
508
|
+
|
|
509
|
+
_counterKey(connection, table, where, column) {
|
|
510
|
+
const whereStr = typeof where === 'object' ? JSON.stringify(this._sortedObject(where)) : String(where)
|
|
511
|
+
return `${connection}:${table}:${whereStr}:${column}`
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
_updateKey(connection, table, where) {
|
|
515
|
+
const whereStr = typeof where === 'object' ? JSON.stringify(this._sortedObject(where)) : String(where)
|
|
516
|
+
return `${connection}:${table}:${whereStr}`
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
_parseUpdateKey(key) {
|
|
520
|
+
const firstColon = key.indexOf(':')
|
|
521
|
+
if (firstColon === -1) return null
|
|
522
|
+
const connection = key.slice(0, firstColon)
|
|
523
|
+
|
|
524
|
+
const secondColon = key.indexOf(':', firstColon + 1)
|
|
525
|
+
if (secondColon === -1) return null
|
|
526
|
+
const table = key.slice(firstColon + 1, secondColon)
|
|
527
|
+
|
|
528
|
+
const where = key.slice(secondColon + 1)
|
|
529
|
+
return {connection, table, where}
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
_parseCounterKey(key) {
|
|
533
|
+
const firstColon = key.indexOf(':')
|
|
534
|
+
if (firstColon === -1) return null
|
|
535
|
+
const connection = key.slice(0, firstColon)
|
|
536
|
+
|
|
537
|
+
const secondColon = key.indexOf(':', firstColon + 1)
|
|
538
|
+
if (secondColon === -1) return null
|
|
539
|
+
const table = key.slice(firstColon + 1, secondColon)
|
|
540
|
+
|
|
541
|
+
const lastColon = key.lastIndexOf(':')
|
|
542
|
+
if (lastColon <= secondColon) return null
|
|
543
|
+
const column = key.slice(lastColon + 1)
|
|
544
|
+
|
|
545
|
+
const where = key.slice(secondColon + 1, lastColon)
|
|
546
|
+
return {connection, table, where, column}
|
|
547
|
+
}
|
|
548
|
+
|
|
549
|
+
_normalizeWhere(where) {
|
|
550
|
+
if (typeof where === 'object' && where !== null) return where
|
|
551
|
+
if (typeof where === 'string' && where.startsWith('{')) {
|
|
552
|
+
try {
|
|
553
|
+
return JSON.parse(where)
|
|
554
|
+
} catch {
|
|
555
|
+
// Not valid JSON — treat as scalar
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
return {[this._config.primaryKey]: where}
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
_sortedObject(obj) {
|
|
562
|
+
const sorted = {}
|
|
563
|
+
for (const key of Object.keys(obj).sort()) {
|
|
564
|
+
sorted[key] = obj[key]
|
|
565
|
+
}
|
|
566
|
+
return sorted
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
// ─── Teardown ──────────────────────────────────────────────
|
|
570
|
+
|
|
571
|
+
/**
|
|
572
|
+
* Why: Graceful shutdown — flush all pending data, clear checkpoints, stop timers.
|
|
573
|
+
* Final flush writes everything to DB. If that fails, LMDB checkpoint preserves data.
|
|
574
|
+
*/
|
|
575
|
+
async close() {
|
|
576
|
+
if (this._flushTimer) {
|
|
577
|
+
clearInterval(this._flushTimer)
|
|
578
|
+
this._flushTimer = null
|
|
579
|
+
}
|
|
580
|
+
if (this._checkpointTimer) {
|
|
581
|
+
clearInterval(this._checkpointTimer)
|
|
582
|
+
this._checkpointTimer = null
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
if (cluster.isPrimary) {
|
|
586
|
+
try {
|
|
587
|
+
// Force-acquire lock for final flush (bypass distributed lock)
|
|
588
|
+
await Odac.Ipc.unlock('wb:lock:flush')
|
|
589
|
+
await this.flush()
|
|
590
|
+
console.log('\x1b[32m[WriteBuffer]\x1b[0m Final flush completed.')
|
|
591
|
+
} catch (err) {
|
|
592
|
+
console.error('\x1b[31m[WriteBuffer]\x1b[0m Final flush failed:', err.message)
|
|
593
|
+
try {
|
|
594
|
+
await this._writeCheckpoint()
|
|
595
|
+
} catch {
|
|
596
|
+
// Last resort failed — data may be lost
|
|
597
|
+
}
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
this._initialized = false
|
|
602
|
+
}
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
module.exports = new WriteBuffer()
|
package/src/Database.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
'use strict'
|
|
2
2
|
const {buildConnections} = require('./Database/ConnectionFactory')
|
|
3
3
|
const nanoid = require('./Database/nanoid')
|
|
4
|
+
const writeBuffer = require('./Database/WriteBuffer')
|
|
4
5
|
|
|
5
6
|
class DatabaseManager {
|
|
6
7
|
constructor() {
|
|
@@ -31,6 +32,9 @@ class DatabaseManager {
|
|
|
31
32
|
// Cache nanoid column metadata from schema files for insert-time auto-generation.
|
|
32
33
|
// Runs on ALL processes (primary + workers) since every process may insert data.
|
|
33
34
|
this._loadNanoidMeta()
|
|
35
|
+
|
|
36
|
+
// Initialize Write-Behind Cache (Primary holds state, Workers communicate via IPC)
|
|
37
|
+
await writeBuffer.init(this.connections)
|
|
34
38
|
}
|
|
35
39
|
|
|
36
40
|
/**
|
|
@@ -63,9 +67,17 @@ class DatabaseManager {
|
|
|
63
67
|
|
|
64
68
|
/**
|
|
65
69
|
* Gracefully destroys all active database connections.
|
|
70
|
+
* Flushes WriteBuffer before closing to prevent data loss.
|
|
66
71
|
* Called during shutdown to release connection pools and prevent resource leaks.
|
|
67
72
|
*/
|
|
68
73
|
async close() {
|
|
74
|
+
// Flush buffered writes before destroying connections
|
|
75
|
+
try {
|
|
76
|
+
await writeBuffer.close()
|
|
77
|
+
} catch (err) {
|
|
78
|
+
console.error('\x1b[31m[Database]\x1b[0m WriteBuffer close error:', err.message)
|
|
79
|
+
}
|
|
80
|
+
|
|
69
81
|
const entries = Object.entries(this.connections)
|
|
70
82
|
if (entries.length === 0) return
|
|
71
83
|
|
|
@@ -174,6 +186,19 @@ const tableProxyHandler = {
|
|
|
174
186
|
|
|
175
187
|
// Create the Query Builder
|
|
176
188
|
const qb = knexInstance(prop)
|
|
189
|
+
const connectionKey = knexInstance._odacConnectionKey || 'default'
|
|
190
|
+
|
|
191
|
+
// Write-Behind Cache: Odac.DB.posts.buffer.where(id).update({...}) / .increment('col') / .get('col')
|
|
192
|
+
// Odac.DB.posts.buffer.insert(row) / .flush()
|
|
193
|
+
qb.buffer = {
|
|
194
|
+
where: where => ({
|
|
195
|
+
update: data => writeBuffer.update(connectionKey, prop, where, data),
|
|
196
|
+
increment: (column, delta = 1) => writeBuffer.increment(connectionKey, prop, where, column, delta),
|
|
197
|
+
get: column => writeBuffer.get(connectionKey, prop, where, column)
|
|
198
|
+
}),
|
|
199
|
+
insert: row => writeBuffer.insert(connectionKey, prop, row),
|
|
200
|
+
flush: () => writeBuffer.flush(connectionKey, prop)
|
|
201
|
+
}
|
|
177
202
|
|
|
178
203
|
// Odac DX Improvement: Wrap count() to return a clean number
|
|
179
204
|
const originalCount = qb.count
|
|
@@ -184,7 +209,6 @@ const tableProxyHandler = {
|
|
|
184
209
|
|
|
185
210
|
// Odac DX Improvement: Auto-generate NanoID for columns defined as type 'nanoid' in schema.
|
|
186
211
|
// Why: Zero-config ID generation — no manual Odac.DB.nanoid() calls needed.
|
|
187
|
-
const connectionKey = knexInstance._odacConnectionKey || 'default'
|
|
188
212
|
const nanoidCols = manager._nanoidColumns[connectionKey]?.[prop]
|
|
189
213
|
if (nanoidCols) {
|
|
190
214
|
const originalInsert = qb.insert
|
|
@@ -257,6 +281,13 @@ const rootProxy = new Proxy(manager, {
|
|
|
257
281
|
if (prop === '_nanoidColumns') return target._nanoidColumns
|
|
258
282
|
if (prop === '_loadNanoidMeta') return target._loadNanoidMeta.bind(target)
|
|
259
283
|
|
|
284
|
+
// Global WriteBuffer: Odac.DB.buffer.flush()
|
|
285
|
+
if (prop === 'buffer') {
|
|
286
|
+
return {
|
|
287
|
+
flush: (connection, table) => writeBuffer.flush(connection, table)
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
|
|
260
291
|
// Access to specific database connection: Odac.DB.analytics
|
|
261
292
|
if (target.connections[prop]) {
|
|
262
293
|
return new Proxy(target.connections[prop], tableProxyHandler)
|