odac 1.4.7 → 1.4.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/Ipc.js CHANGED
@@ -64,6 +64,197 @@ class Ipc extends EventEmitter {
64
64
  }
65
65
  }
66
66
 
67
+ // --- Atomic Counter Operations ---
68
+
69
+ /**
70
+ * Why: Atomically increment a numeric value. Essential for Write-Behind Cache counters
71
+ * where concurrent workers must not cause lost updates (get→modify→set race).
72
+ * Redis: INCRBYFLOAT. Memory: single-threaded Primary guarantees atomicity.
73
+ *
74
+ * @param {string} key
75
+ * @param {number} delta - Amount to add (can be negative)
76
+ * @returns {Promise<number>} New value after increment
77
+ */
78
+ async incrBy(key, delta) {
79
+ if (this.config.driver === 'redis') {
80
+ return Number(await this.redis.incrByFloat(key, delta))
81
+ }
82
+ return this._sendMemory('incrBy', {key, delta})
83
+ }
84
+
85
+ /**
86
+ * Why: Convenience wrapper. Flush logic needs to subtract flushed deltas atomically.
87
+ */
88
+ async decrBy(key, delta) {
89
+ return this.incrBy(key, -delta)
90
+ }
91
+
92
+ // --- Hash Operations ---
93
+
94
+ /**
95
+ * Why: Write-Behind Cache update coalescing stores pending column updates as hash fields.
96
+ * Merge semantics: existing fields are overwritten, new fields are added (last-write-wins).
97
+ * Redis: HSET key f1 v1 f2 v2. Memory: Object.assign into stored object.
98
+ *
99
+ * @param {string} key
100
+ * @param {object} obj - Field-value pairs to merge
101
+ * @returns {Promise<boolean>}
102
+ */
103
+ async hset(key, obj) {
104
+ if (this.config.driver === 'redis') {
105
+ const args = {}
106
+ for (const [field, value] of Object.entries(obj)) {
107
+ args[field] = JSON.stringify(value)
108
+ }
109
+ await this.redis.hSet(key, args)
110
+ return true
111
+ }
112
+ return this._sendMemory('hset', {key, obj})
113
+ }
114
+
115
+ /**
116
+ * Why: Flush reads all pending update fields for a row in one call.
117
+ *
118
+ * @param {string} key
119
+ * @returns {Promise<object|null>} All field-value pairs, or null if key doesn't exist
120
+ */
121
+ async hgetall(key) {
122
+ if (this.config.driver === 'redis') {
123
+ const raw = await this.redis.hGetAll(key)
124
+ if (!raw || Object.keys(raw).length === 0) return null
125
+ const result = {}
126
+ for (const [field, value] of Object.entries(raw)) {
127
+ result[field] = JSON.parse(value)
128
+ }
129
+ return result
130
+ }
131
+ return this._sendMemory('hgetall', {key})
132
+ }
133
+
134
+ // --- List Operations ---
135
+
136
+ /**
137
+ * Why: Write-Behind Cache batch insert queue. Workers push rows to a shared list;
138
+ * flush drains it to the database in a single INSERT.
139
+ * Redis: RPUSH. Memory: Array.push on Primary.
140
+ *
141
+ * @param {string} key
142
+ * @param {...*} items - Items to append
143
+ * @returns {Promise<number>} New list length
144
+ */
145
+ async rpush(key, ...items) {
146
+ if (this.config.driver === 'redis') {
147
+ const serialized = items.map(i => JSON.stringify(i))
148
+ return this.redis.rPush(key, serialized)
149
+ }
150
+ return this._sendMemory('rpush', {key, items})
151
+ }
152
+
153
+ /**
154
+ * Why: Flush reads queued rows before writing them to the database.
155
+ *
156
+ * @param {string} key
157
+ * @param {number} start - Start index (0-based, inclusive)
158
+ * @param {number} stop - End index (inclusive, -1 for last element)
159
+ * @returns {Promise<Array>} Elements in range
160
+ */
161
+ async lrange(key, start, stop) {
162
+ if (this.config.driver === 'redis') {
163
+ const raw = await this.redis.lRange(key, start, stop)
164
+ return raw.map(i => JSON.parse(i))
165
+ }
166
+ return this._sendMemory('lrange', {key, start, stop})
167
+ }
168
+
169
+ /**
170
+ * Why: Atomic read-and-clear for queue flush. Prevents data loss caused by
171
+ * non-atomic lrange() + del() where new rpush() arrivals between the two
172
+ * calls would be silently deleted. Redis: MULTI/EXEC pipeline.
173
+ * Memory: single-threaded Primary guarantees atomicity.
174
+ *
175
+ * @param {string} key
176
+ * @returns {Promise<Array>} All elements that were in the list
177
+ */
178
+ async lrangeAndDel(key) {
179
+ if (this.config.driver === 'redis') {
180
+ const results = await this.redis.multi().lRange(key, 0, -1).del(key).exec()
181
+ const raw = results[0]
182
+ if (!raw || !Array.isArray(raw)) return []
183
+ return raw.map(i => JSON.parse(i))
184
+ }
185
+ return this._sendMemory('lrangeAndDel', {key})
186
+ }
187
+
188
+ // --- Set Operations ---
189
+
190
+ /**
191
+ * Why: WriteBuffer maintains index sets (e.g., 'wb:idx:counters') to track which keys
192
+ * have pending data. Avoids expensive SCAN/KEYS pattern matching on flush.
193
+ *
194
+ * @param {string} key
195
+ * @param {...string} members
196
+ * @returns {Promise<number>} Number of members added
197
+ */
198
+ async sadd(key, ...members) {
199
+ if (this.config.driver === 'redis') {
200
+ return this.redis.sAdd(key, members)
201
+ }
202
+ return this._sendMemory('sadd', {key, members})
203
+ }
204
+
205
+ /**
206
+ * Why: Flush iterates all tracked keys in an index set to drain pending data.
207
+ *
208
+ * @param {string} key
209
+ * @returns {Promise<Array<string>>} All members
210
+ */
211
+ async smembers(key) {
212
+ if (this.config.driver === 'redis') {
213
+ return this.redis.sMembers(key)
214
+ }
215
+ return this._sendMemory('smembers', {key})
216
+ }
217
+
218
+ /**
219
+ * Why: After flushing a counter/update/queue key, remove it from the tracking index.
220
+ *
221
+ * @param {string} key
222
+ * @param {...string} members
223
+ * @returns {Promise<number>} Number of members removed
224
+ */
225
+ async srem(key, ...members) {
226
+ if (this.config.driver === 'redis') {
227
+ return this.redis.sRem(key, members)
228
+ }
229
+ return this._sendMemory('srem', {key, members})
230
+ }
231
+
232
+ // --- Distributed Lock ---
233
+
234
+ /**
235
+ * Why: Horizontal scaling requires exactly ONE server to run flush at a time.
236
+ * Redis: SET NX EX (atomic test-and-set with TTL). Memory: Primary-local boolean.
237
+ * TTL prevents deadlocks if the lock holder crashes mid-flush.
238
+ *
239
+ * @param {string} key
240
+ * @param {number} [ttl=10] - Lock time-to-live in seconds
241
+ * @returns {Promise<boolean>} true if lock acquired
242
+ */
243
+ async lock(key, ttl = 10) {
244
+ if (this.config.driver === 'redis') {
245
+ const result = await this.redis.set(key, '1', {NX: true, EX: ttl})
246
+ return result === 'OK'
247
+ }
248
+ return this._sendMemory('lock', {key, ttl})
249
+ }
250
+
251
+ /**
252
+ * Why: Release flush lock after completion so the next cycle can proceed.
253
+ */
254
+ async unlock(key) {
255
+ return this.del(key)
256
+ }
257
+
67
258
  async publish(channel, message) {
68
259
  if (this.config.driver === 'redis') {
69
260
  return this.redis.publish(channel, JSON.stringify(message))
@@ -219,32 +410,152 @@ class Ipc extends EventEmitter {
219
410
  }
220
411
 
221
412
  _handleDirectPrimaryCall(action, payload) {
222
- // Basic implementation for Primary process using itself
223
- if (action === 'set') {
224
- const expireAt = payload.ttl > 0 ? Date.now() + payload.ttl * 1000 : Infinity
225
- this._memoryStore.set(payload.key, {value: payload.value, expireAt})
226
- return true
227
- }
228
- if (action === 'get') {
229
- const data = this._memoryStore.get(payload.key)
230
- if (!data) return null
231
- if (data.expireAt !== Infinity && Date.now() > data.expireAt) {
232
- this._memoryStore.delete(payload.key)
233
- return null
234
- }
235
- return data.value
236
- }
237
- if (action === 'del') return this._memoryStore.delete(payload.key)
238
- if (action === 'publish') {
239
- const workers = this._memorySubs.get(payload.channel)
240
- if (workers) {
241
- workers.forEach(wId => {
242
- const w = cluster.workers[wId]
243
- if (w) w.send({type: 'ipc:message', channel: payload.channel, message: payload.message})
244
- })
413
+ return this._executePrimaryAction(action, payload)
414
+ }
415
+
416
+ /**
417
+ * Why: Single source of truth for all memory-driver operations.
418
+ * Both _handlePrimaryMessage (worker→primary) and _handleDirectPrimaryCall (primary self-call)
419
+ * funnel through this method, eliminating logic duplication.
420
+ */
421
+ _executePrimaryAction(action, msg) {
422
+ switch (action) {
423
+ case 'set': {
424
+ const expireAt = msg.ttl > 0 ? Date.now() + msg.ttl * 1000 : Infinity
425
+ this._memoryStore.set(msg.key, {value: msg.value, expireAt})
426
+ return true
427
+ }
428
+ case 'get': {
429
+ const data = this._memoryStore.get(msg.key)
430
+ if (!data) return null
431
+ if (data.expireAt !== Infinity && Date.now() > data.expireAt) {
432
+ this._memoryStore.delete(msg.key)
433
+ return null
434
+ }
435
+ return data.value
436
+ }
437
+ case 'del':
438
+ return this._memoryStore.delete(msg.key)
439
+
440
+ // --- Atomic Counter ---
441
+ case 'incrBy': {
442
+ const data = this._memoryStore.get(msg.key) || {value: 0, expireAt: Infinity}
443
+ data.value = (typeof data.value === 'number' ? data.value : 0) + msg.delta
444
+ this._memoryStore.set(msg.key, data)
445
+ return data.value
446
+ }
447
+
448
+ // --- Hash ---
449
+ case 'hset': {
450
+ const data = this._memoryStore.get(msg.key) || {value: {}, expireAt: Infinity}
451
+ if (typeof data.value !== 'object' || data.value === null || Array.isArray(data.value)) {
452
+ data.value = {}
453
+ }
454
+ Object.assign(data.value, msg.obj)
455
+ this._memoryStore.set(msg.key, data)
456
+ return true
457
+ }
458
+ case 'hgetall': {
459
+ const data = this._memoryStore.get(msg.key)
460
+ if (!data || typeof data.value !== 'object' || Array.isArray(data.value)) return null
461
+ return {...data.value}
462
+ }
463
+
464
+ // --- List ---
465
+ case 'rpush': {
466
+ const data = this._memoryStore.get(msg.key) || {value: [], expireAt: Infinity}
467
+ if (!Array.isArray(data.value)) data.value = []
468
+ data.value.push(...msg.items)
469
+ this._memoryStore.set(msg.key, data)
470
+ return data.value.length
471
+ }
472
+ case 'lrange': {
473
+ const data = this._memoryStore.get(msg.key)
474
+ if (!data || !Array.isArray(data.value)) return []
475
+ return msg.stop === -1 ? data.value.slice(msg.start) : data.value.slice(msg.start, msg.stop + 1)
476
+ }
477
+ case 'lrangeAndDel': {
478
+ const data = this._memoryStore.get(msg.key)
479
+ if (!data || !Array.isArray(data.value)) return []
480
+ const items = data.value
481
+ this._memoryStore.delete(msg.key)
482
+ return items
483
+ }
484
+
485
+ // --- Set ---
486
+ case 'sadd': {
487
+ let data = this._memoryStore.get(msg.key)
488
+ if (!data) {
489
+ data = {value: [], expireAt: Infinity}
490
+ this._memoryStore.set(msg.key, data)
491
+ }
492
+ if (!Array.isArray(data.value)) data.value = []
493
+ let added = 0
494
+ for (const m of msg.members) {
495
+ if (!data.value.includes(m)) {
496
+ data.value.push(m)
497
+ added++
498
+ }
499
+ }
500
+ return added
501
+ }
502
+ case 'smembers': {
503
+ const data = this._memoryStore.get(msg.key)
504
+ return data && Array.isArray(data.value) ? data.value.slice() : []
505
+ }
506
+ case 'srem': {
507
+ const data = this._memoryStore.get(msg.key)
508
+ if (!data || !Array.isArray(data.value)) return 0
509
+ let removed = 0
510
+ for (const m of msg.members) {
511
+ const idx = data.value.indexOf(m)
512
+ if (idx !== -1) {
513
+ data.value.splice(idx, 1)
514
+ removed++
515
+ }
516
+ }
517
+ return removed
518
+ }
519
+
520
+ // --- Lock ---
521
+ case 'lock': {
522
+ const existing = this._memoryStore.get(msg.key)
523
+ if (existing && existing.expireAt > Date.now()) return false
524
+ const expireAt = Date.now() + (msg.ttl || 10) * 1000
525
+ this._memoryStore.set(msg.key, {value: '1', expireAt})
526
+ return true
527
+ }
528
+
529
+ // --- Pub/Sub ---
530
+ case 'publish': {
531
+ const workers = this._memorySubs.get(msg.channel)
532
+ if (workers) {
533
+ workers.forEach(wId => {
534
+ const w = require('node:cluster').workers[wId]
535
+ if (w) w.send({type: 'ipc:message', channel: msg.channel, message: msg.message})
536
+ })
537
+ }
538
+ return undefined
539
+ }
540
+ case 'subscribe': {
541
+ if (!this._memorySubs.has(msg.channel)) {
542
+ this._memorySubs.set(msg.channel, new Set())
543
+ }
544
+ // msg.workerId is set by _handlePrimaryMessage for worker context
545
+ if (msg.workerId) this._memorySubs.get(msg.channel).add(msg.workerId)
546
+ return undefined
547
+ }
548
+ case 'unsubscribe': {
549
+ if (this._memorySubs.has(msg.channel)) {
550
+ this._memorySubs.get(msg.channel).delete(msg.workerId)
551
+ if (this._memorySubs.get(msg.channel).size === 0) {
552
+ this._memorySubs.delete(msg.channel)
553
+ }
554
+ }
555
+ return undefined
245
556
  }
246
557
  }
247
- // subscribe on primary not deeply implemented to avoid complexity, usually workers listen.
558
+ return null
248
559
  }
249
560
 
250
561
  _startGarbageCollector() {
@@ -308,66 +619,17 @@ class Ipc extends EventEmitter {
308
619
  }
309
620
 
310
621
  _handlePrimaryMessage(worker, msg) {
311
- const {type, id, key, value, ttl, channel, message} = msg
312
- const action = type.replace('ipc:', '')
313
-
314
- let response = null
622
+ const action = msg.type.replace('ipc:', '')
315
623
 
316
- switch (action) {
317
- case 'set': {
318
- const expireAt = ttl > 0 ? Date.now() + ttl * 1000 : Infinity
319
- this._memoryStore.set(key, {value, expireAt})
320
- response = true
321
- break
322
- }
323
- case 'get': {
324
- const data = this._memoryStore.get(key)
325
- if (data) {
326
- if (data.expireAt !== Infinity && Date.now() > data.expireAt) {
327
- this._memoryStore.delete(key)
328
- response = null
329
- } else {
330
- response = data.value
331
- }
332
- } else {
333
- response = null
334
- }
335
- break
336
- }
337
- case 'del':
338
- response = this._memoryStore.delete(key)
339
- break
340
- case 'subscribe':
341
- if (!this._memorySubs.has(channel)) {
342
- this._memorySubs.set(channel, new Set())
343
- }
344
- this._memorySubs.get(channel).add(worker.id)
345
- break
346
- case 'unsubscribe':
347
- if (this._memorySubs.has(channel)) {
348
- this._memorySubs.get(channel).delete(worker.id)
349
- if (this._memorySubs.get(channel).size === 0) {
350
- this._memorySubs.delete(channel)
351
- }
352
- }
353
- break
354
- case 'publish': {
355
- // Relay to all subscribed workers
356
- const workers = this._memorySubs.get(channel)
357
- if (workers) {
358
- workers.forEach(wId => {
359
- // Don't echo back to sender if desired? Usually pub/sub receives own too if subbed.
360
- // Redis publishes to all subscribers.
361
- const w = cluster.workers[wId]
362
- if (w) w.send({type: 'ipc:message', channel, message})
363
- })
364
- }
365
- break
366
- }
624
+ // Inject worker context for subscribe/unsubscribe
625
+ if (action === 'subscribe' || action === 'unsubscribe') {
626
+ msg.workerId = worker.id
367
627
  }
368
628
 
369
- if (id) {
370
- worker.send({type: 'ipc:response', id, data: response})
629
+ const response = this._executePrimaryAction(action, msg)
630
+
631
+ if (msg.id) {
632
+ worker.send({type: 'ipc:response', id: msg.id, data: response})
371
633
  }
372
634
  }
373
635
  }
package/src/Odac.js CHANGED
@@ -6,11 +6,12 @@ module.exports = {
6
6
 
7
7
  await global.Odac.Env.init()
8
8
  await global.Odac.Config.init()
9
- await global.Odac.Database.init()
10
9
 
11
10
  global.Odac.Ipc = require('./Ipc.js')
12
11
  await global.Odac.Ipc.init()
13
12
 
13
+ await global.Odac.Database.init()
14
+
14
15
  await global.Odac.Route.init()
15
16
  await global.Odac.Server.init()
16
17
  global.Odac.instance = this.instance
package/src/Storage.js CHANGED
@@ -45,12 +45,14 @@ class OdacStorage {
45
45
 
46
46
  put(key, value) {
47
47
  if (!this.ready) return false
48
- return this.db.put(key, value)
48
+ this.db.putSync(key, value)
49
+ return true
49
50
  }
50
51
 
51
52
  remove(key) {
52
53
  if (!this.ready) return false
53
- return this.db.remove(key)
54
+ this.db.removeSync(key)
55
+ return true
54
56
  }
55
57
 
56
58
  // --- Range Operations ---
@@ -0,0 +1,207 @@
1
+ 'use strict'
2
+
3
+ const cluster = require('node:cluster')
4
+
5
+ /**
6
+ * Tests WriteBuffer LMDB checkpoint and crash recovery.
7
+ * Why: Validates zero data loss guarantee — buffered data survives process crashes
8
+ * via periodic LMDB checkpoints and is recovered on next startup.
9
+ */
10
+
11
+ let knexLib, db, storageData
12
+
13
+ function createMockStorage() {
14
+ storageData = new Map()
15
+ return {
16
+ isReady: () => true,
17
+ put: (key, value) => storageData.set(key, value),
18
+ remove: key => storageData.delete(key),
19
+ get: key => storageData.get(key) ?? null,
20
+ getRange: ({start, end}) => {
21
+ const results = []
22
+ for (const [key, value] of storageData) {
23
+ if (key >= start && key < end) {
24
+ results.push({key, value})
25
+ }
26
+ }
27
+ return results
28
+ }
29
+ }
30
+ }
31
+
32
+ beforeEach(async () => {
33
+ jest.resetModules()
34
+
35
+ knexLib = require('knex')
36
+ db = knexLib({client: 'sqlite3', connection: {filename: ':memory:'}, useNullAsDefault: true})
37
+
38
+ await db.schema.createTable('posts', table => {
39
+ table.integer('id').primary()
40
+ table.integer('views').defaultTo(0)
41
+ })
42
+ await db('posts').insert({id: 1, views: 100})
43
+
44
+ Object.defineProperty(cluster, 'isPrimary', {value: true, configurable: true})
45
+ })
46
+
47
+ afterEach(async () => {
48
+ await db.destroy()
49
+ delete global.Odac
50
+ })
51
+
52
+ describe('WriteBuffer - Checkpoint', () => {
53
+ it('should write counter deltas to LMDB on checkpoint', async () => {
54
+ const Ipc = require('../../../src/Ipc')
55
+ global.Odac = {
56
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
57
+ Storage: createMockStorage(),
58
+ Ipc
59
+ }
60
+ await Ipc.init()
61
+
62
+ const WriteBuffer = require('../../../src/Database/WriteBuffer')
63
+ await WriteBuffer.init({default: db})
64
+
65
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
66
+ await WriteBuffer._writeCheckpoint()
67
+
68
+ const checkpoint = storageData.get('wb:c:default:posts:1:views')
69
+ expect(checkpoint).toBeDefined()
70
+ expect(checkpoint.delta).toBe(5)
71
+ expect(checkpoint.base).toBe(100)
72
+
73
+ await WriteBuffer.close()
74
+ await Ipc.close()
75
+ })
76
+
77
+ it('should write queue rows to LMDB on checkpoint', async () => {
78
+ const Ipc = require('../../../src/Ipc')
79
+ global.Odac = {
80
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
81
+ Storage: createMockStorage(),
82
+ Ipc
83
+ }
84
+ await Ipc.init()
85
+
86
+ const WriteBuffer = require('../../../src/Database/WriteBuffer')
87
+ await WriteBuffer.init({default: db})
88
+
89
+ await WriteBuffer.insert('default', 'activity_log', {user_id: 1, action: 'view'})
90
+ await WriteBuffer.insert('default', 'activity_log', {user_id: 2, action: 'click'})
91
+ await WriteBuffer._writeCheckpoint()
92
+
93
+ const checkpoint = storageData.get('wb:q:default:activity_log')
94
+ expect(checkpoint).toBeDefined()
95
+ expect(checkpoint).toHaveLength(2)
96
+ expect(checkpoint[0].action).toBe('view')
97
+
98
+ await WriteBuffer.close()
99
+ await Ipc.close()
100
+ })
101
+ })
102
+
103
+ describe('WriteBuffer - Recovery', () => {
104
+ it('should recover counter deltas from LMDB on startup', async () => {
105
+ // Simulate crash: write checkpoint data before init
106
+ const mockStorage = createMockStorage()
107
+ storageData.set('wb:c:default:posts:1:views', {delta: 7, base: 100})
108
+
109
+ const Ipc = require('../../../src/Ipc')
110
+ global.Odac = {
111
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
112
+ Storage: mockStorage,
113
+ Ipc
114
+ }
115
+ await Ipc.init()
116
+
117
+ const WriteBuffer = require('../../../src/Database/WriteBuffer')
118
+ await WriteBuffer.init({default: db})
119
+
120
+ // Should recover the delta from checkpoint
121
+ const result = await WriteBuffer.get('default', 'posts', 1, 'views')
122
+ expect(result).toBe(107) // base 100 + recovered delta 7
123
+
124
+ await WriteBuffer.close()
125
+ await Ipc.close()
126
+ })
127
+
128
+ it('should recover queue rows from LMDB on startup', async () => {
129
+ await db.schema.createTable('activity_log', table => {
130
+ table.increments('id')
131
+ table.integer('user_id')
132
+ table.string('action', 50)
133
+ })
134
+
135
+ const mockStorage = createMockStorage()
136
+ storageData.set('wb:q:default:activity_log', [{user_id: 1, action: 'recovered_view'}])
137
+
138
+ const Ipc = require('../../../src/Ipc')
139
+ global.Odac = {
140
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
141
+ Storage: mockStorage,
142
+ Ipc
143
+ }
144
+ await Ipc.init()
145
+
146
+ const WriteBuffer = require('../../../src/Database/WriteBuffer')
147
+ await WriteBuffer.init({default: db})
148
+
149
+ // Flush recovered data
150
+ await WriteBuffer.flush()
151
+
152
+ const rows = await db('activity_log').select()
153
+ expect(rows).toHaveLength(1)
154
+ expect(rows[0].action).toBe('recovered_view')
155
+
156
+ await WriteBuffer.close()
157
+ await Ipc.close()
158
+ })
159
+
160
+ it('should merge recovered data with new increments', async () => {
161
+ const mockStorage = createMockStorage()
162
+ storageData.set('wb:c:default:posts:1:views', {delta: 5, base: 100})
163
+
164
+ const Ipc = require('../../../src/Ipc')
165
+ global.Odac = {
166
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
167
+ Storage: mockStorage,
168
+ Ipc
169
+ }
170
+ await Ipc.init()
171
+
172
+ const WriteBuffer = require('../../../src/Database/WriteBuffer')
173
+ await WriteBuffer.init({default: db})
174
+
175
+ // Add more increments on top of recovered data
176
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 3)
177
+
178
+ const result = await WriteBuffer.get('default', 'posts', 1, 'views')
179
+ expect(result).toBe(108) // base 100 + recovered 5 + new 3
180
+
181
+ await WriteBuffer.close()
182
+ await Ipc.close()
183
+ })
184
+
185
+ it('should clear LMDB checkpoint after successful flush', async () => {
186
+ const mockStorage = createMockStorage()
187
+ storageData.set('wb:c:default:posts:1:views', {delta: 5, base: 100})
188
+
189
+ const Ipc = require('../../../src/Ipc')
190
+ global.Odac = {
191
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
192
+ Storage: mockStorage,
193
+ Ipc
194
+ }
195
+ await Ipc.init()
196
+
197
+ const WriteBuffer = require('../../../src/Database/WriteBuffer')
198
+ await WriteBuffer.init({default: db})
199
+ await WriteBuffer.flush()
200
+
201
+ // Checkpoint data should be cleared
202
+ expect(storageData.has('wb:c:default:posts:1:views')).toBe(false)
203
+
204
+ await WriteBuffer.close()
205
+ await Ipc.close()
206
+ })
207
+ })