odac 1.4.7 → 1.4.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/CHANGELOG.md +45 -0
  2. package/client/odac.js +1 -1
  3. package/docs/ai/README.md +3 -2
  4. package/docs/ai/skills/SKILL.md +3 -2
  5. package/docs/ai/skills/backend/authentication.md +12 -6
  6. package/docs/ai/skills/backend/database.md +183 -12
  7. package/docs/ai/skills/backend/ipc.md +71 -12
  8. package/docs/ai/skills/backend/migrations.md +23 -0
  9. package/docs/ai/skills/backend/odac-var.md +155 -0
  10. package/docs/ai/skills/backend/utilities.md +1 -1
  11. package/docs/ai/skills/frontend/forms.md +23 -1
  12. package/docs/backend/04-routing/09-websocket-quick-reference.md +21 -1
  13. package/docs/backend/04-routing/09-websocket.md +22 -1
  14. package/docs/backend/08-database/05-write-behind-cache.md +230 -0
  15. package/docs/backend/08-database/06-read-through-cache.md +206 -0
  16. package/docs/backend/10-authentication/01-authentication-basics.md +53 -0
  17. package/docs/backend/10-authentication/05-session-management.md +12 -3
  18. package/docs/backend/13-utilities/01-odac-var.md +13 -19
  19. package/docs/backend/13-utilities/02-ipc.md +117 -0
  20. package/docs/frontend/03-forms/01-form-handling.md +15 -2
  21. package/docs/index.json +1 -1
  22. package/package.json +1 -1
  23. package/src/Auth.js +17 -0
  24. package/src/Database/Migration.js +219 -3
  25. package/src/Database/ReadCache.js +174 -0
  26. package/src/Database/WriteBuffer.js +605 -0
  27. package/src/Database.js +95 -1
  28. package/src/Ipc.js +343 -81
  29. package/src/Odac.js +2 -1
  30. package/src/Storage.js +4 -2
  31. package/src/Validator.js +1 -1
  32. package/src/Var.js +1 -0
  33. package/src/WebSocket.js +80 -23
  34. package/test/Database/Migration/migrate_column.test.js +168 -0
  35. package/test/Database/ReadCache/crossTable.test.js +179 -0
  36. package/test/Database/ReadCache/get.test.js +128 -0
  37. package/test/Database/ReadCache/invalidate.test.js +103 -0
  38. package/test/Database/ReadCache/proxy.test.js +184 -0
  39. package/test/Database/WriteBuffer/_recoverFromCheckpoint.test.js +207 -0
  40. package/test/Database/WriteBuffer/buffer.test.js +143 -0
  41. package/test/Database/WriteBuffer/flush.test.js +192 -0
  42. package/test/Database/WriteBuffer/get.test.js +72 -0
  43. package/test/Database/WriteBuffer/increment.test.js +118 -0
  44. package/test/Database/WriteBuffer/update.test.js +178 -0
  45. package/test/Database/insert.test.js +98 -0
  46. package/test/Ipc/hset.test.js +59 -0
  47. package/test/Ipc/incrBy.test.js +65 -0
  48. package/test/Ipc/lock.test.js +62 -0
  49. package/test/Ipc/rpush.test.js +68 -0
  50. package/test/Ipc/sadd.test.js +68 -0
  51. package/test/WebSocket/Client/fragmentation.test.js +130 -0
  52. package/test/WebSocket/Client/limits.test.js +10 -4
  53. package/test/WebSocket/Client/readyState.test.js +154 -0
  54. package/docs/backend/10-authentication/01-user-logins-with-authjs.md +0 -55
@@ -0,0 +1,192 @@
1
+ 'use strict'
2
+
3
+ const cluster = require('node:cluster')
4
+
5
+ /**
6
+ * Tests WriteBuffer flush operations: counter flush (batch UPDATE) and queue flush (batch INSERT).
7
+ * Why: Validates that buffered data is correctly persisted to the database,
8
+ * with proper transaction handling and error recovery.
9
+ */
10
+
11
+ let knexLib, db, WriteBuffer
12
+
13
+ beforeEach(async () => {
14
+ jest.resetModules()
15
+
16
+ knexLib = require('knex')
17
+ db = knexLib({client: 'sqlite3', connection: {filename: ':memory:'}, useNullAsDefault: true})
18
+
19
+ await db.schema.createTable('posts', table => {
20
+ table.integer('id').primary()
21
+ table.integer('views').defaultTo(0)
22
+ table.integer('likes').defaultTo(0)
23
+ table.string('title', 255)
24
+ })
25
+
26
+ await db('posts').insert([
27
+ {id: 1, views: 100, likes: 10, title: 'First Post'},
28
+ {id: 2, views: 200, likes: 20, title: 'Second Post'}
29
+ ])
30
+
31
+ Object.defineProperty(cluster, 'isPrimary', {value: true, configurable: true})
32
+
33
+ const Ipc = require('../../../src/Ipc')
34
+ global.Odac = {
35
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
36
+ Storage: {
37
+ isReady: () => false,
38
+ put: jest.fn(),
39
+ remove: jest.fn(),
40
+ getRange: () => []
41
+ },
42
+ Ipc
43
+ }
44
+ await Ipc.init()
45
+
46
+ WriteBuffer = require('../../../src/Database/WriteBuffer')
47
+ await WriteBuffer.init({default: db})
48
+ })
49
+
50
+ afterEach(async () => {
51
+ await WriteBuffer.close()
52
+ await Odac.Ipc.close()
53
+ await db.destroy()
54
+ delete global.Odac
55
+ })
56
+
57
+ describe('WriteBuffer - Counter Flush', () => {
58
+ it('should persist accumulated deltas to the database', async () => {
59
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
60
+ await WriteBuffer.increment('default', 'posts', 2, 'views', 10)
61
+
62
+ await WriteBuffer.flush()
63
+
64
+ const row1 = await db('posts').where({id: 1}).first()
65
+ const row2 = await db('posts').where({id: 2}).first()
66
+ expect(row1.views).toBe(105)
67
+ expect(row2.views).toBe(210)
68
+ })
69
+
70
+ it('should flush multiple columns for the same row', async () => {
71
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 3)
72
+ await WriteBuffer.increment('default', 'posts', 1, 'likes', 7)
73
+
74
+ await WriteBuffer.flush()
75
+
76
+ const row = await db('posts').where({id: 1}).first()
77
+ expect(row.views).toBe(103)
78
+ expect(row.likes).toBe(17)
79
+ })
80
+
81
+ it('should clear counter index after successful flush', async () => {
82
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
83
+ await WriteBuffer.flush()
84
+
85
+ // Counter index should be empty after flush
86
+ const remaining = await Odac.Ipc.smembers('wb:idx:counters')
87
+ expect(remaining).toHaveLength(0)
88
+ })
89
+
90
+ it('should update base after flush so subsequent reads are correct', async () => {
91
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
92
+ await WriteBuffer.flush()
93
+
94
+ // New increment after flush
95
+ const result = await WriteBuffer.increment('default', 'posts', 1, 'views', 2)
96
+ expect(result).toBe(107) // base updated to 105, + 2
97
+
98
+ await WriteBuffer.flush()
99
+ const row = await db('posts').where({id: 1}).first()
100
+ expect(row.views).toBe(107)
101
+ })
102
+
103
+ it('should accumulate new deltas during flush correctly', async () => {
104
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 10)
105
+ await WriteBuffer.flush()
106
+
107
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 3)
108
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 2)
109
+ await WriteBuffer.flush()
110
+
111
+ const row = await db('posts').where({id: 1}).first()
112
+ expect(row.views).toBe(115) // 100 + 10 + 5
113
+ })
114
+
115
+ it('should scope flush to specific table when provided', async () => {
116
+ await db.schema.createTable('comments', table => {
117
+ table.integer('id').primary()
118
+ table.integer('votes').defaultTo(0)
119
+ })
120
+ await db('comments').insert({id: 1, votes: 50})
121
+
122
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
123
+ await WriteBuffer.increment('default', 'comments', 1, 'votes', 3)
124
+
125
+ // Flush only posts
126
+ await WriteBuffer.flush('default', 'posts')
127
+
128
+ const post = await db('posts').where({id: 1}).first()
129
+ const comment = await db('comments').where({id: 1}).first()
130
+ expect(post.views).toBe(105) // Flushed
131
+ expect(comment.votes).toBe(50) // Not flushed
132
+ })
133
+ })
134
+
135
+ describe('WriteBuffer - Queue Flush (Batch Insert)', () => {
136
+ it('should batch insert queued rows', async () => {
137
+ await db.schema.createTable('activity_log', table => {
138
+ table.increments('id')
139
+ table.integer('user_id')
140
+ table.string('action', 50)
141
+ })
142
+
143
+ await WriteBuffer.insert('default', 'activity_log', {user_id: 1, action: 'view'})
144
+ await WriteBuffer.insert('default', 'activity_log', {user_id: 2, action: 'click'})
145
+ await WriteBuffer.insert('default', 'activity_log', {user_id: 1, action: 'scroll'})
146
+
147
+ await WriteBuffer.flush()
148
+
149
+ const rows = await db('activity_log').select()
150
+ expect(rows).toHaveLength(3)
151
+ expect(rows[0].action).toBe('view')
152
+ expect(rows[1].action).toBe('click')
153
+ expect(rows[2].action).toBe('scroll')
154
+ })
155
+
156
+ it('should clear queue after successful flush', async () => {
157
+ await db.schema.createTable('events', table => {
158
+ table.increments('id')
159
+ table.string('type', 50)
160
+ })
161
+
162
+ await WriteBuffer.insert('default', 'events', {type: 'pageview'})
163
+ await WriteBuffer.flush()
164
+
165
+ const queue = await Odac.Ipc.lrange('wb:q:default:events', 0, -1)
166
+ expect(queue).toHaveLength(0)
167
+ })
168
+
169
+ it('should handle empty queues gracefully', async () => {
170
+ await expect(WriteBuffer.flush()).resolves.not.toThrow()
171
+ })
172
+
173
+ it('should auto-flush when maxQueueSize is reached', async () => {
174
+ await db.schema.createTable('logs', table => {
175
+ table.increments('id')
176
+ table.string('msg', 50)
177
+ })
178
+
179
+ // Set low threshold for testing
180
+ WriteBuffer._config.maxQueueSize = 3
181
+
182
+ await WriteBuffer.insert('default', 'logs', {msg: 'a'})
183
+ await WriteBuffer.insert('default', 'logs', {msg: 'b'})
184
+ await WriteBuffer.insert('default', 'logs', {msg: 'c'}) // Triggers auto-flush
185
+
186
+ // Wait a tick for async auto-flush
187
+ await new Promise(r => setTimeout(r, 50))
188
+
189
+ const rows = await db('logs').select()
190
+ expect(rows.length).toBeGreaterThanOrEqual(3)
191
+ })
192
+ })
@@ -0,0 +1,72 @@
1
+ 'use strict'
2
+
3
+ const cluster = require('node:cluster')
4
+
5
+ /**
6
+ * Tests WriteBuffer.get().
7
+ * Why: Validates that get() returns the accurate current value (DB base + buffered delta)
8
+ * without flushing to the database.
9
+ */
10
+
11
+ let knexLib, db, WriteBuffer
12
+
13
+ beforeEach(async () => {
14
+ jest.resetModules()
15
+
16
+ knexLib = require('knex')
17
+ db = knexLib({client: 'sqlite3', connection: {filename: ':memory:'}, useNullAsDefault: true})
18
+
19
+ await db.schema.createTable('posts', table => {
20
+ table.integer('id').primary()
21
+ table.integer('views').defaultTo(0)
22
+ table.string('title', 255)
23
+ })
24
+
25
+ await db('posts').insert([
26
+ {id: 1, views: 100, title: 'First Post'},
27
+ {id: 2, views: 200, title: 'Second Post'}
28
+ ])
29
+
30
+ Object.defineProperty(cluster, 'isPrimary', {value: true, configurable: true})
31
+
32
+ const Ipc = require('../../../src/Ipc')
33
+ global.Odac = {
34
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
35
+ Storage: {
36
+ isReady: () => false,
37
+ put: jest.fn(),
38
+ remove: jest.fn(),
39
+ getRange: () => []
40
+ },
41
+ Ipc
42
+ }
43
+ await Ipc.init()
44
+
45
+ WriteBuffer = require('../../../src/Database/WriteBuffer')
46
+ await WriteBuffer.init({default: db})
47
+ })
48
+
49
+ afterEach(async () => {
50
+ await WriteBuffer.close()
51
+ await Odac.Ipc.close()
52
+ await db.destroy()
53
+ delete global.Odac
54
+ })
55
+
56
+ describe('WriteBuffer - get()', () => {
57
+ it('should return base value from DB when no buffer exists', async () => {
58
+ const result = await WriteBuffer.get('default', 'posts', 1, 'views')
59
+ expect(result).toBe(100)
60
+ })
61
+
62
+ it('should return base + delta when buffer exists', async () => {
63
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 7)
64
+ const result = await WriteBuffer.get('default', 'posts', 1, 'views')
65
+ expect(result).toBe(107)
66
+ })
67
+
68
+ it('should return 0 for non-existent rows', async () => {
69
+ const result = await WriteBuffer.get('default', 'posts', 999, 'views')
70
+ expect(result).toBe(0)
71
+ })
72
+ })
@@ -0,0 +1,118 @@
1
+ 'use strict'
2
+
3
+ const cluster = require('node:cluster')
4
+
5
+ /**
6
+ * Tests WriteBuffer.increment().
7
+ * Why: Validates that the Write-Behind Cache correctly accumulates deltas
8
+ * and returns accurate current totals (base + buffered delta) without a DB write.
9
+ */
10
+
11
+ let knexLib, db, WriteBuffer
12
+
13
+ beforeEach(async () => {
14
+ jest.resetModules()
15
+
16
+ knexLib = require('knex')
17
+ db = knexLib({client: 'sqlite3', connection: {filename: ':memory:'}, useNullAsDefault: true})
18
+
19
+ // Create test table
20
+ await db.schema.createTable('posts', table => {
21
+ table.integer('id').primary()
22
+ table.integer('views').defaultTo(0)
23
+ table.integer('likes').defaultTo(0)
24
+ table.string('title', 255)
25
+ })
26
+
27
+ // Seed data
28
+ await db('posts').insert([
29
+ {id: 1, views: 100, likes: 10, title: 'First Post'},
30
+ {id: 2, views: 200, likes: 20, title: 'Second Post'}
31
+ ])
32
+
33
+ // Mock cluster as primary
34
+ Object.defineProperty(cluster, 'isPrimary', {value: true, configurable: true})
35
+
36
+ const Ipc = require('../../../src/Ipc')
37
+ global.Odac = {
38
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
39
+ Storage: {
40
+ isReady: () => false,
41
+ put: jest.fn(),
42
+ remove: jest.fn(),
43
+ getRange: () => []
44
+ },
45
+ Ipc
46
+ }
47
+ await Ipc.init()
48
+
49
+ WriteBuffer = require('../../../src/Database/WriteBuffer')
50
+ await WriteBuffer.init({default: db})
51
+ })
52
+
53
+ afterEach(async () => {
54
+ await WriteBuffer.close()
55
+ await Odac.Ipc.close()
56
+ await db.destroy()
57
+ delete global.Odac
58
+ })
59
+
60
+ describe('WriteBuffer - increment()', () => {
61
+ it('should increment and return base + delta on first call', async () => {
62
+ const result = await WriteBuffer.increment('default', 'posts', 1, 'views')
63
+ expect(result).toBe(101) // DB base 100 + delta 1
64
+ })
65
+
66
+ it('should accumulate multiple increments correctly', async () => {
67
+ await WriteBuffer.increment('default', 'posts', 1, 'views')
68
+ await WriteBuffer.increment('default', 'posts', 1, 'views')
69
+ const result = await WriteBuffer.increment('default', 'posts', 1, 'views')
70
+ expect(result).toBe(103) // 100 + 3
71
+ })
72
+
73
+ it('should support custom delta values', async () => {
74
+ const result = await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
75
+ expect(result).toBe(105) // 100 + 5
76
+ })
77
+
78
+ it('should handle different columns independently', async () => {
79
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 3)
80
+ await WriteBuffer.increment('default', 'posts', 1, 'likes', 2)
81
+
82
+ const views = await WriteBuffer.get('default', 'posts', 1, 'views')
83
+ const likes = await WriteBuffer.get('default', 'posts', 1, 'likes')
84
+
85
+ expect(views).toBe(103) // 100 + 3
86
+ expect(likes).toBe(12) // 10 + 2
87
+ })
88
+
89
+ it('should handle different rows independently', async () => {
90
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
91
+ await WriteBuffer.increment('default', 'posts', 2, 'views', 10)
92
+
93
+ const row1 = await WriteBuffer.get('default', 'posts', 1, 'views')
94
+ const row2 = await WriteBuffer.get('default', 'posts', 2, 'views')
95
+
96
+ expect(row1).toBe(105) // 100 + 5
97
+ expect(row2).toBe(210) // 200 + 10
98
+ })
99
+
100
+ it('should handle composite where keys', async () => {
101
+ // Create a table with composite key
102
+ await db.schema.createTable('post_stats', table => {
103
+ table.integer('post_id')
104
+ table.string('date', 10)
105
+ table.integer('views').defaultTo(0)
106
+ table.primary(['post_id', 'date'])
107
+ })
108
+ await db('post_stats').insert({post_id: 1, date: '2026-04-01', views: 50})
109
+
110
+ const result = await WriteBuffer.increment('default', 'post_stats', {post_id: 1, date: '2026-04-01'}, 'views', 3)
111
+ expect(result).toBe(53) // 50 + 3
112
+ })
113
+
114
+ it('should return 0 + delta for non-existent rows', async () => {
115
+ const result = await WriteBuffer.increment('default', 'posts', 999, 'views')
116
+ expect(result).toBe(1) // 0 base + 1
117
+ })
118
+ })
@@ -0,0 +1,178 @@
1
+ 'use strict'
2
+
3
+ const cluster = require('node:cluster')
4
+
5
+ /**
6
+ * Tests WriteBuffer.update() — last-write-wins coalescing.
7
+ * Why: Validates that repeated updates to the same row collapse into one UPDATE query,
8
+ * and that different rows/tables are isolated correctly.
9
+ */
10
+
11
+ let knexLib, db, WriteBuffer
12
+
13
+ beforeEach(async () => {
14
+ jest.resetModules()
15
+
16
+ knexLib = require('knex')
17
+ db = knexLib({client: 'sqlite3', connection: {filename: ':memory:'}, useNullAsDefault: true})
18
+
19
+ await db.schema.createTable('posts', table => {
20
+ table.integer('id').primary()
21
+ table.integer('views').defaultTo(0)
22
+ table.string('title', 255)
23
+ table.string('slug', 255)
24
+ })
25
+
26
+ await db('posts').insert([
27
+ {id: 1, views: 100, title: 'First Post', slug: 'first-post'},
28
+ {id: 2, views: 200, title: 'Second Post', slug: 'second-post'}
29
+ ])
30
+
31
+ Object.defineProperty(cluster, 'isPrimary', {value: true, configurable: true})
32
+
33
+ const Ipc = require('../../../src/Ipc')
34
+ global.Odac = {
35
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
36
+ Storage: {
37
+ isReady: () => false,
38
+ put: jest.fn(),
39
+ remove: jest.fn(),
40
+ getRange: () => []
41
+ },
42
+ Ipc
43
+ }
44
+ await Ipc.init()
45
+
46
+ WriteBuffer = require('../../../src/Database/WriteBuffer')
47
+ await WriteBuffer.init({default: db})
48
+ })
49
+
50
+ afterEach(async () => {
51
+ await WriteBuffer.close()
52
+ await Odac.Ipc.close()
53
+ await db.destroy()
54
+ delete global.Odac
55
+ })
56
+
57
+ describe('WriteBuffer - update()', () => {
58
+ it('should buffer and flush a single update', async () => {
59
+ await WriteBuffer.update('default', 'posts', 1, {title: 'Updated Title'})
60
+ await WriteBuffer.flush()
61
+
62
+ const row = await db('posts').where({id: 1}).first()
63
+ expect(row.title).toBe('Updated Title')
64
+ expect(row.slug).toBe('first-post') // Untouched
65
+ })
66
+
67
+ it('should merge multiple updates (last-write-wins)', async () => {
68
+ await WriteBuffer.update('default', 'posts', 1, {title: 'First Update'})
69
+ await WriteBuffer.update('default', 'posts', 1, {title: 'Second Update'})
70
+ await WriteBuffer.flush()
71
+
72
+ const row = await db('posts').where({id: 1}).first()
73
+ expect(row.title).toBe('Second Update')
74
+ })
75
+
76
+ it('should merge different columns from multiple updates', async () => {
77
+ await WriteBuffer.update('default', 'posts', 1, {title: 'New Title'})
78
+ await WriteBuffer.update('default', 'posts', 1, {slug: 'new-slug'})
79
+ await WriteBuffer.flush()
80
+
81
+ const row = await db('posts').where({id: 1}).first()
82
+ expect(row.title).toBe('New Title')
83
+ expect(row.slug).toBe('new-slug')
84
+ })
85
+
86
+ it('should handle different rows independently', async () => {
87
+ await WriteBuffer.update('default', 'posts', 1, {title: 'Row 1'})
88
+ await WriteBuffer.update('default', 'posts', 2, {title: 'Row 2'})
89
+ await WriteBuffer.flush()
90
+
91
+ const row1 = await db('posts').where({id: 1}).first()
92
+ const row2 = await db('posts').where({id: 2}).first()
93
+ expect(row1.title).toBe('Row 1')
94
+ expect(row2.title).toBe('Row 2')
95
+ })
96
+
97
+ it('should handle composite where key', async () => {
98
+ await db.schema.createTable('user_prefs', table => {
99
+ table.string('pref_key', 50)
100
+ table.integer('user_id')
101
+ table.string('value', 255)
102
+ table.primary(['pref_key', 'user_id'])
103
+ })
104
+ await db('user_prefs').insert({pref_key: 'theme', user_id: 1, value: 'light'})
105
+
106
+ await WriteBuffer.update('default', 'user_prefs', {pref_key: 'theme', user_id: 1}, {value: 'dark'})
107
+ await WriteBuffer.flush()
108
+
109
+ const row = await db('user_prefs').where({pref_key: 'theme', user_id: 1}).first()
110
+ expect(row.value).toBe('dark')
111
+ })
112
+
113
+ it('should return true when buffered', async () => {
114
+ const result = await WriteBuffer.update('default', 'posts', 1, {title: 'Test'})
115
+ expect(result).toBe(true)
116
+ })
117
+
118
+ it('should clear update index after successful flush', async () => {
119
+ await WriteBuffer.update('default', 'posts', 1, {title: 'Test'})
120
+ await WriteBuffer.flush()
121
+
122
+ const remaining = await Odac.Ipc.smembers('wb:idx:updates')
123
+ expect(remaining).toHaveLength(0)
124
+ })
125
+
126
+ it('should combine increment and update on same row during flush', async () => {
127
+ await WriteBuffer.increment('default', 'posts', 1, 'views', 5)
128
+ await WriteBuffer.update('default', 'posts', 1, {title: 'Combo Test'})
129
+ await WriteBuffer.flush()
130
+
131
+ const row = await db('posts').where({id: 1}).first()
132
+ expect(row.views).toBe(105)
133
+ expect(row.title).toBe('Combo Test')
134
+ })
135
+
136
+ it('should handle different tables independently', async () => {
137
+ await db.schema.createTable('comments', table => {
138
+ table.integer('id').primary()
139
+ table.string('body', 255)
140
+ })
141
+ await db('comments').insert({id: 1, body: 'Original'})
142
+
143
+ await WriteBuffer.update('default', 'posts', 1, {title: 'Post Updated'})
144
+ await WriteBuffer.update('default', 'comments', 1, {body: 'Comment Updated'})
145
+ await WriteBuffer.flush()
146
+
147
+ const post = await db('posts').where({id: 1}).first()
148
+ const comment = await db('comments').where({id: 1}).first()
149
+ expect(post.title).toBe('Post Updated')
150
+ expect(comment.body).toBe('Comment Updated')
151
+ })
152
+
153
+ it('should scope flush to specific table when provided', async () => {
154
+ await db.schema.createTable('comments', table => {
155
+ table.integer('id').primary()
156
+ table.string('body', 255)
157
+ })
158
+ await db('comments').insert({id: 1, body: 'Original'})
159
+
160
+ await WriteBuffer.update('default', 'posts', 1, {title: 'Post Updated'})
161
+ await WriteBuffer.update('default', 'comments', 1, {body: 'Comment Updated'})
162
+
163
+ // Flush only posts
164
+ await WriteBuffer.flush('default', 'posts')
165
+
166
+ const post = await db('posts').where({id: 1}).first()
167
+ const comment = await db('comments').where({id: 1}).first()
168
+ expect(post.title).toBe('Post Updated')
169
+ expect(comment.body).toBe('Original') // Not flushed
170
+ })
171
+
172
+ it('should not modify DB before flush is called', async () => {
173
+ await WriteBuffer.update('default', 'posts', 1, {title: 'Buffered Only'})
174
+
175
+ const row = await db('posts').where({id: 1}).first()
176
+ expect(row.title).toBe('First Post') // Still original
177
+ })
178
+ })
@@ -0,0 +1,98 @@
1
+ 'use strict'
2
+
3
+ const knexLib = require('knex')
4
+
5
+ /**
6
+ * Tests the wrapWithInvalidation thenable returned by write operations (insert/update/delete/truncate).
7
+ * Why: Ensures the thenable is fully Promise-compatible — supporting both await and .catch() chaining.
8
+ * This prevents TypeError when consumers call .insert(...).catch() or .update(...).catch().
9
+ */
10
+
11
+ let db
12
+
13
+ beforeEach(async () => {
14
+ db = knexLib({client: 'sqlite3', connection: {filename: ':memory:'}, useNullAsDefault: true})
15
+ await db.schema.createTable('tokens', table => {
16
+ table.string('id', 21).primary()
17
+ table.string('user', 100)
18
+ table.string('token_x', 64)
19
+ })
20
+ })
21
+
22
+ afterEach(async () => {
23
+ await db.destroy()
24
+ jest.resetModules()
25
+ })
26
+
27
+ describe('Database.js - wrapWithInvalidation thenable', () => {
28
+ it('insert().catch() should be a function', () => {
29
+ const DB = require('../../src/Database')
30
+ DB.connections = {default: db}
31
+ db._odacConnectionKey = 'default'
32
+ DB._nanoidColumns = {}
33
+
34
+ const result = DB.tokens.insert({id: 'test1', user: 'u1', token_x: 'tx1'})
35
+ expect(typeof result.catch).toBe('function')
36
+ })
37
+
38
+ it('insert().catch() should resolve on success', async () => {
39
+ const DB = require('../../src/Database')
40
+ DB.connections = {default: db}
41
+ db._odacConnectionKey = 'default'
42
+ DB._nanoidColumns = {}
43
+
44
+ const result = await DB.tokens.insert({id: 'test2', user: 'u2', token_x: 'tx2'}).catch(() => false)
45
+ expect(result).not.toBe(false)
46
+
47
+ const rows = await db('tokens').where('id', 'test2')
48
+ expect(rows).toHaveLength(1)
49
+ expect(rows[0].user).toBe('u2')
50
+ })
51
+
52
+ it('insert().catch() should catch errors gracefully', async () => {
53
+ const DB = require('../../src/Database')
54
+ DB.connections = {default: db}
55
+ db._odacConnectionKey = 'default'
56
+ DB._nanoidColumns = {}
57
+
58
+ // Insert first row
59
+ await DB.tokens.insert({id: 'dup1', user: 'u1', token_x: 'tx1'})
60
+
61
+ // Duplicate primary key — should trigger catch
62
+ const result = await DB.tokens.insert({id: 'dup1', user: 'u2', token_x: 'tx2'}).catch(() => 'caught')
63
+ expect(result).toBe('caught')
64
+ })
65
+
66
+ it('update().catch() should be a function', () => {
67
+ const DB = require('../../src/Database')
68
+ DB.connections = {default: db}
69
+ db._odacConnectionKey = 'default'
70
+ DB._nanoidColumns = {}
71
+
72
+ const result = DB.tokens.where('id', 'x').update({user: 'new'})
73
+ expect(typeof result.catch).toBe('function')
74
+ })
75
+
76
+ it('delete().catch() should be a function', () => {
77
+ const DB = require('../../src/Database')
78
+ DB.connections = {default: db}
79
+ db._odacConnectionKey = 'default'
80
+ DB._nanoidColumns = {}
81
+
82
+ const result = DB.tokens.where('id', 'x').delete()
83
+ expect(typeof result.catch).toBe('function')
84
+ })
85
+
86
+ it('insert() should work with await (no .catch)', async () => {
87
+ const DB = require('../../src/Database')
88
+ DB.connections = {default: db}
89
+ db._odacConnectionKey = 'default'
90
+ DB._nanoidColumns = {}
91
+
92
+ await DB.tokens.insert({id: 'await1', user: 'u_await', token_x: 'tx_await'})
93
+
94
+ const rows = await db('tokens').where('id', 'await1')
95
+ expect(rows).toHaveLength(1)
96
+ expect(rows[0].user).toBe('u_await')
97
+ })
98
+ })