odac 1.4.7 → 1.4.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,143 @@
1
+ 'use strict'
2
+
3
+ const cluster = require('node:cluster')
4
+
5
+ /**
6
+ * Tests the chainable buffer API exposed via Database.js proxy.
7
+ * Why: Validates that the Odac.DB.table.buffer.where(id).update(data) pattern
8
+ * correctly delegates to WriteBuffer's internal methods.
9
+ */
10
+
11
+ let knexLib, db
12
+
13
+ beforeEach(async () => {
14
+ jest.resetModules()
15
+
16
+ knexLib = require('knex')
17
+ db = knexLib({client: 'sqlite3', connection: {filename: ':memory:'}, useNullAsDefault: true})
18
+
19
+ await db.schema.createTable('posts', table => {
20
+ table.integer('id').primary()
21
+ table.integer('views').defaultTo(0)
22
+ table.string('title', 255)
23
+ })
24
+
25
+ await db.schema.createTable('activity_log', table => {
26
+ table.increments('id')
27
+ table.integer('user_id')
28
+ table.string('action', 50)
29
+ })
30
+
31
+ await db('posts').insert([
32
+ {id: 1, views: 100, title: 'First Post'},
33
+ {id: 2, views: 200, title: 'Second Post'}
34
+ ])
35
+
36
+ Object.defineProperty(cluster, 'isPrimary', {value: true, configurable: true})
37
+
38
+ const Ipc = require('../../../src/Ipc')
39
+ global.Odac = {
40
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
41
+ Storage: {
42
+ isReady: () => false,
43
+ put: jest.fn(),
44
+ remove: jest.fn(),
45
+ getRange: () => []
46
+ },
47
+ Ipc
48
+ }
49
+ await Ipc.init()
50
+
51
+ // Initialize WriteBuffer with our test DB
52
+ const writeBuffer = require('../../../src/Database/WriteBuffer')
53
+ await writeBuffer.init({default: db})
54
+
55
+ // Wire up Database.js proxy
56
+ const DB = require('../../../src/Database')
57
+ DB.connections = {default: db}
58
+ })
59
+
60
+ afterEach(async () => {
61
+ const writeBuffer = require('../../../src/Database/WriteBuffer')
62
+ await writeBuffer.close()
63
+ await Odac.Ipc.close()
64
+ await db.destroy()
65
+ delete global.Odac
66
+ })
67
+
68
+ describe('Database.js Proxy - buffer.where().update()', () => {
69
+ it('should buffer and flush via Odac.DB.posts.buffer.where(id).update()', async () => {
70
+ const DB = require('../../../src/Database')
71
+
72
+ await DB.posts.buffer.where(1).update({title: 'Updated Title'})
73
+ await DB.posts.buffer.flush()
74
+
75
+ const row = await db('posts').where({id: 1}).first()
76
+ expect(row.title).toBe('Updated Title')
77
+ })
78
+
79
+ it('should merge multiple updates via chainable API', async () => {
80
+ const DB = require('../../../src/Database')
81
+
82
+ await DB.posts.buffer.where(1).update({title: 'New Title'})
83
+ await DB.posts.buffer.where(1).update({title: 'Final Title'})
84
+ await DB.posts.buffer.flush()
85
+
86
+ const row = await db('posts').where({id: 1}).first()
87
+ expect(row.title).toBe('Final Title')
88
+ })
89
+ })
90
+
91
+ describe('Database.js Proxy - buffer.where().increment()', () => {
92
+ it('should increment via Odac.DB.posts.buffer.where(id).increment(col)', async () => {
93
+ const DB = require('../../../src/Database')
94
+
95
+ const result = await DB.posts.buffer.where(1).increment('views')
96
+ expect(result).toBe(101)
97
+ })
98
+
99
+ it('should support custom delta', async () => {
100
+ const DB = require('../../../src/Database')
101
+
102
+ const result = await DB.posts.buffer.where(1).increment('views', 5)
103
+ expect(result).toBe(105)
104
+ })
105
+ })
106
+
107
+ describe('Database.js Proxy - buffer.where().get()', () => {
108
+ it('should get buffered value via Odac.DB.posts.buffer.where(id).get(col)', async () => {
109
+ const DB = require('../../../src/Database')
110
+
111
+ await DB.posts.buffer.where(1).increment('views', 10)
112
+ const result = await DB.posts.buffer.where(1).get('views')
113
+ expect(result).toBe(110)
114
+ })
115
+ })
116
+
117
+ describe('Database.js Proxy - buffer.insert()', () => {
118
+ it('should buffer insert via Odac.DB.activity_log.buffer.insert(row)', async () => {
119
+ const DB = require('../../../src/Database')
120
+
121
+ await DB.activity_log.buffer.insert({user_id: 1, action: 'view'})
122
+ await DB.activity_log.buffer.insert({user_id: 2, action: 'click'})
123
+ await DB.activity_log.buffer.flush()
124
+
125
+ const rows = await db('activity_log').select()
126
+ expect(rows).toHaveLength(2)
127
+ })
128
+ })
129
+
130
+ describe('Database.js Proxy - buffer.flush()', () => {
131
+ it('should flush all buffered data for the table', async () => {
132
+ const DB = require('../../../src/Database')
133
+
134
+ await DB.posts.buffer.where(1).increment('views', 5)
135
+ await DB.posts.buffer.where(2).update({title: 'Changed'})
136
+ await DB.posts.buffer.flush()
137
+
138
+ const row1 = await db('posts').where({id: 1}).first()
139
+ const row2 = await db('posts').where({id: 2}).first()
140
+ expect(row1.views).toBe(105)
141
+ expect(row2.title).toBe('Changed')
142
+ })
143
+ })
@@ -0,0 +1,192 @@
1
+ 'use strict'
2
+
3
+ const cluster = require('node:cluster')
4
+
5
+ /**
6
+ * Tests WriteBuffer flush operations: counter flush (batch UPDATE) and queue flush (batch INSERT).
7
+ * Why: Validates that buffered data is correctly persisted to the database,
8
+ * with proper transaction handling and error recovery.
9
+ */
10
+
11
+ let knexLib, db, WriteBuffer
12
+
13
+ beforeEach(async () => {
14
+ jest.resetModules()
15
+
16
+ knexLib = require('knex')
17
+ db = knexLib({client: 'sqlite3', connection: {filename: ':memory:'}, useNullAsDefault: true})
18
+
19
+ await db.schema.createTable('posts', table => {
20
+ table.integer('id').primary()
21
+ table.integer('views').defaultTo(0)
22
+ table.integer('likes').defaultTo(0)
23
+ table.string('title', 255)
24
+ })
25
+
26
+ await db('posts').insert([
27
+ {id: 1, views: 100, likes: 10, title: 'First Post'},
28
+ {id: 2, views: 200, likes: 20, title: 'Second Post'}
29
+ ])
30
+
31
+ Object.defineProperty(cluster, 'isPrimary', {value: true, configurable: true})
32
+
33
+ const Ipc = require('../../../src/Ipc')
34
+ global.Odac = {
35
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
36
+ Storage: {
37
+ isReady: () => false,
38
+ put: jest.fn(),
39
+ remove: jest.fn(),
40
+ getRange: () => []
41
+ },
42
+ Ipc
43
+ }
44
+ await Ipc.init()
45
+
46
+ WriteBuffer = require('../../../src/Database/WriteBuffer')
47
+ await WriteBuffer.init({default: db})
48
+ })
49
+
50
+ afterEach(async () => {
51
+ await WriteBuffer.close()
52
+ await Odac.Ipc.close()
53
+ await db.destroy()
54
+ delete global.Odac
55
+ })
56
+
57
+ describe('WriteBuffer - Counter Flush', () => {
58
+ it('should persist accumulated deltas to the database', async () => {
59
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
60
+ await WriteBuffer.increment('default', 'posts', 2, 'views', 10)
61
+
62
+ await WriteBuffer.flush()
63
+
64
+ const row1 = await db('posts').where({id: 1}).first()
65
+ const row2 = await db('posts').where({id: 2}).first()
66
+ expect(row1.views).toBe(105)
67
+ expect(row2.views).toBe(210)
68
+ })
69
+
70
+ it('should flush multiple columns for the same row', async () => {
71
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 3)
72
+ await WriteBuffer.increment('default', 'posts', 1, 'likes', 7)
73
+
74
+ await WriteBuffer.flush()
75
+
76
+ const row = await db('posts').where({id: 1}).first()
77
+ expect(row.views).toBe(103)
78
+ expect(row.likes).toBe(17)
79
+ })
80
+
81
+ it('should clear counter index after successful flush', async () => {
82
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
83
+ await WriteBuffer.flush()
84
+
85
+ // Counter index should be empty after flush
86
+ const remaining = await Odac.Ipc.smembers('wb:idx:counters')
87
+ expect(remaining).toHaveLength(0)
88
+ })
89
+
90
+ it('should update base after flush so subsequent reads are correct', async () => {
91
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
92
+ await WriteBuffer.flush()
93
+
94
+ // New increment after flush
95
+ const result = await WriteBuffer.increment('default', 'posts', 1, 'views', 2)
96
+ expect(result).toBe(107) // base updated to 105, + 2
97
+
98
+ await WriteBuffer.flush()
99
+ const row = await db('posts').where({id: 1}).first()
100
+ expect(row.views).toBe(107)
101
+ })
102
+
103
+ it('should accumulate new deltas during flush correctly', async () => {
104
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 10)
105
+ await WriteBuffer.flush()
106
+
107
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 3)
108
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 2)
109
+ await WriteBuffer.flush()
110
+
111
+ const row = await db('posts').where({id: 1}).first()
112
+ expect(row.views).toBe(115) // 100 + 10 + 5
113
+ })
114
+
115
+ it('should scope flush to specific table when provided', async () => {
116
+ await db.schema.createTable('comments', table => {
117
+ table.integer('id').primary()
118
+ table.integer('votes').defaultTo(0)
119
+ })
120
+ await db('comments').insert({id: 1, votes: 50})
121
+
122
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
123
+ await WriteBuffer.increment('default', 'comments', 1, 'votes', 3)
124
+
125
+ // Flush only posts
126
+ await WriteBuffer.flush('default', 'posts')
127
+
128
+ const post = await db('posts').where({id: 1}).first()
129
+ const comment = await db('comments').where({id: 1}).first()
130
+ expect(post.views).toBe(105) // Flushed
131
+ expect(comment.votes).toBe(50) // Not flushed
132
+ })
133
+ })
134
+
135
+ describe('WriteBuffer - Queue Flush (Batch Insert)', () => {
136
+ it('should batch insert queued rows', async () => {
137
+ await db.schema.createTable('activity_log', table => {
138
+ table.increments('id')
139
+ table.integer('user_id')
140
+ table.string('action', 50)
141
+ })
142
+
143
+ await WriteBuffer.insert('default', 'activity_log', {user_id: 1, action: 'view'})
144
+ await WriteBuffer.insert('default', 'activity_log', {user_id: 2, action: 'click'})
145
+ await WriteBuffer.insert('default', 'activity_log', {user_id: 1, action: 'scroll'})
146
+
147
+ await WriteBuffer.flush()
148
+
149
+ const rows = await db('activity_log').select()
150
+ expect(rows).toHaveLength(3)
151
+ expect(rows[0].action).toBe('view')
152
+ expect(rows[1].action).toBe('click')
153
+ expect(rows[2].action).toBe('scroll')
154
+ })
155
+
156
+ it('should clear queue after successful flush', async () => {
157
+ await db.schema.createTable('events', table => {
158
+ table.increments('id')
159
+ table.string('type', 50)
160
+ })
161
+
162
+ await WriteBuffer.insert('default', 'events', {type: 'pageview'})
163
+ await WriteBuffer.flush()
164
+
165
+ const queue = await Odac.Ipc.lrange('wb:q:default:events', 0, -1)
166
+ expect(queue).toHaveLength(0)
167
+ })
168
+
169
+ it('should handle empty queues gracefully', async () => {
170
+ await expect(WriteBuffer.flush()).resolves.not.toThrow()
171
+ })
172
+
173
+ it('should auto-flush when maxQueueSize is reached', async () => {
174
+ await db.schema.createTable('logs', table => {
175
+ table.increments('id')
176
+ table.string('msg', 50)
177
+ })
178
+
179
+ // Set low threshold for testing
180
+ WriteBuffer._config.maxQueueSize = 3
181
+
182
+ await WriteBuffer.insert('default', 'logs', {msg: 'a'})
183
+ await WriteBuffer.insert('default', 'logs', {msg: 'b'})
184
+ await WriteBuffer.insert('default', 'logs', {msg: 'c'}) // Triggers auto-flush
185
+
186
+ // Wait a tick for async auto-flush
187
+ await new Promise(r => setTimeout(r, 50))
188
+
189
+ const rows = await db('logs').select()
190
+ expect(rows.length).toBeGreaterThanOrEqual(3)
191
+ })
192
+ })
@@ -0,0 +1,72 @@
1
+ 'use strict'
2
+
3
+ const cluster = require('node:cluster')
4
+
5
+ /**
6
+ * Tests WriteBuffer.get().
7
+ * Why: Validates that get() returns the accurate current value (DB base + buffered delta)
8
+ * without flushing to the database.
9
+ */
10
+
11
+ let knexLib, db, WriteBuffer
12
+
13
+ beforeEach(async () => {
14
+ jest.resetModules()
15
+
16
+ knexLib = require('knex')
17
+ db = knexLib({client: 'sqlite3', connection: {filename: ':memory:'}, useNullAsDefault: true})
18
+
19
+ await db.schema.createTable('posts', table => {
20
+ table.integer('id').primary()
21
+ table.integer('views').defaultTo(0)
22
+ table.string('title', 255)
23
+ })
24
+
25
+ await db('posts').insert([
26
+ {id: 1, views: 100, title: 'First Post'},
27
+ {id: 2, views: 200, title: 'Second Post'}
28
+ ])
29
+
30
+ Object.defineProperty(cluster, 'isPrimary', {value: true, configurable: true})
31
+
32
+ const Ipc = require('../../../src/Ipc')
33
+ global.Odac = {
34
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
35
+ Storage: {
36
+ isReady: () => false,
37
+ put: jest.fn(),
38
+ remove: jest.fn(),
39
+ getRange: () => []
40
+ },
41
+ Ipc
42
+ }
43
+ await Ipc.init()
44
+
45
+ WriteBuffer = require('../../../src/Database/WriteBuffer')
46
+ await WriteBuffer.init({default: db})
47
+ })
48
+
49
+ afterEach(async () => {
50
+ await WriteBuffer.close()
51
+ await Odac.Ipc.close()
52
+ await db.destroy()
53
+ delete global.Odac
54
+ })
55
+
56
+ describe('WriteBuffer - get()', () => {
57
+ it('should return base value from DB when no buffer exists', async () => {
58
+ const result = await WriteBuffer.get('default', 'posts', 1, 'views')
59
+ expect(result).toBe(100)
60
+ })
61
+
62
+ it('should return base + delta when buffer exists', async () => {
63
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 7)
64
+ const result = await WriteBuffer.get('default', 'posts', 1, 'views')
65
+ expect(result).toBe(107)
66
+ })
67
+
68
+ it('should return 0 for non-existent rows', async () => {
69
+ const result = await WriteBuffer.get('default', 'posts', 999, 'views')
70
+ expect(result).toBe(0)
71
+ })
72
+ })
@@ -0,0 +1,118 @@
1
+ 'use strict'
2
+
3
+ const cluster = require('node:cluster')
4
+
5
+ /**
6
+ * Tests WriteBuffer.increment().
7
+ * Why: Validates that the Write-Behind Cache correctly accumulates deltas
8
+ * and returns accurate current totals (base + buffered delta) without a DB write.
9
+ */
10
+
11
+ let knexLib, db, WriteBuffer
12
+
13
+ beforeEach(async () => {
14
+ jest.resetModules()
15
+
16
+ knexLib = require('knex')
17
+ db = knexLib({client: 'sqlite3', connection: {filename: ':memory:'}, useNullAsDefault: true})
18
+
19
+ // Create test table
20
+ await db.schema.createTable('posts', table => {
21
+ table.integer('id').primary()
22
+ table.integer('views').defaultTo(0)
23
+ table.integer('likes').defaultTo(0)
24
+ table.string('title', 255)
25
+ })
26
+
27
+ // Seed data
28
+ await db('posts').insert([
29
+ {id: 1, views: 100, likes: 10, title: 'First Post'},
30
+ {id: 2, views: 200, likes: 20, title: 'Second Post'}
31
+ ])
32
+
33
+ // Mock cluster as primary
34
+ Object.defineProperty(cluster, 'isPrimary', {value: true, configurable: true})
35
+
36
+ const Ipc = require('../../../src/Ipc')
37
+ global.Odac = {
38
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
39
+ Storage: {
40
+ isReady: () => false,
41
+ put: jest.fn(),
42
+ remove: jest.fn(),
43
+ getRange: () => []
44
+ },
45
+ Ipc
46
+ }
47
+ await Ipc.init()
48
+
49
+ WriteBuffer = require('../../../src/Database/WriteBuffer')
50
+ await WriteBuffer.init({default: db})
51
+ })
52
+
53
+ afterEach(async () => {
54
+ await WriteBuffer.close()
55
+ await Odac.Ipc.close()
56
+ await db.destroy()
57
+ delete global.Odac
58
+ })
59
+
60
+ describe('WriteBuffer - increment()', () => {
61
+ it('should increment and return base + delta on first call', async () => {
62
+ const result = await WriteBuffer.increment('default', 'posts', 1, 'views')
63
+ expect(result).toBe(101) // DB base 100 + delta 1
64
+ })
65
+
66
+ it('should accumulate multiple increments correctly', async () => {
67
+ await WriteBuffer.increment('default', 'posts', 1, 'views')
68
+ await WriteBuffer.increment('default', 'posts', 1, 'views')
69
+ const result = await WriteBuffer.increment('default', 'posts', 1, 'views')
70
+ expect(result).toBe(103) // 100 + 3
71
+ })
72
+
73
+ it('should support custom delta values', async () => {
74
+ const result = await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
75
+ expect(result).toBe(105) // 100 + 5
76
+ })
77
+
78
+ it('should handle different columns independently', async () => {
79
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 3)
80
+ await WriteBuffer.increment('default', 'posts', 1, 'likes', 2)
81
+
82
+ const views = await WriteBuffer.get('default', 'posts', 1, 'views')
83
+ const likes = await WriteBuffer.get('default', 'posts', 1, 'likes')
84
+
85
+ expect(views).toBe(103) // 100 + 3
86
+ expect(likes).toBe(12) // 10 + 2
87
+ })
88
+
89
+ it('should handle different rows independently', async () => {
90
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
91
+ await WriteBuffer.increment('default', 'posts', 2, 'views', 10)
92
+
93
+ const row1 = await WriteBuffer.get('default', 'posts', 1, 'views')
94
+ const row2 = await WriteBuffer.get('default', 'posts', 2, 'views')
95
+
96
+ expect(row1).toBe(105) // 100 + 5
97
+ expect(row2).toBe(210) // 200 + 10
98
+ })
99
+
100
+ it('should handle composite where keys', async () => {
101
+ // Create a table with composite key
102
+ await db.schema.createTable('post_stats', table => {
103
+ table.integer('post_id')
104
+ table.string('date', 10)
105
+ table.integer('views').defaultTo(0)
106
+ table.primary(['post_id', 'date'])
107
+ })
108
+ await db('post_stats').insert({post_id: 1, date: '2026-04-01', views: 50})
109
+
110
+ const result = await WriteBuffer.increment('default', 'post_stats', {post_id: 1, date: '2026-04-01'}, 'views', 3)
111
+ expect(result).toBe(53) // 50 + 3
112
+ })
113
+
114
+ it('should return 0 + delta for non-existent rows', async () => {
115
+ const result = await WriteBuffer.increment('default', 'posts', 999, 'views')
116
+ expect(result).toBe(1) // 0 base + 1
117
+ })
118
+ })