odac 1.4.9 → 1.4.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -7,7 +7,7 @@
7
7
  "email": "mail@emre.red",
8
8
  "url": "https://emre.red"
9
9
  },
10
- "version": "1.4.9",
10
+ "version": "1.4.11",
11
11
  "license": "MIT",
12
12
  "engines": {
13
13
  "node": ">=18.0.0"
@@ -22,6 +22,7 @@
22
22
  },
23
23
  "dependencies": {
24
24
  "@tailwindcss/cli": "^4.1.18",
25
+ "esbuild": "^0.25.12",
25
26
  "knex": "^3.1.0",
26
27
  "lmdb": "^3.4.4",
27
28
  "tailwindcss": "^4.1.18"
package/src/Config.js CHANGED
@@ -27,6 +27,13 @@ module.exports = {
27
27
  driver: 'memory',
28
28
  redis: 'default'
29
29
  },
30
+ js: {
31
+ target: 'es2020',
32
+ minify: true,
33
+ sourcemap: false,
34
+ bundle: true,
35
+ obfuscate: false
36
+ },
30
37
  debug: process.env.NODE_ENV !== 'production',
31
38
 
32
39
  init: function () {
@@ -524,7 +524,7 @@ class Migration {
524
524
  if (!currentColumns[colName]) continue // New column, handled above
525
525
 
526
526
  if (this._columnNeedsAlter(colDef, currentColumns[colName])) {
527
- ops.push({type: 'alter_column', column: colName, definition: colDef})
527
+ ops.push({type: 'alter_column', column: colName, definition: colDef, currentNullable: currentColumns[colName].nullable})
528
528
  }
529
529
  }
530
530
 
@@ -582,18 +582,35 @@ class Migration {
582
582
  /**
583
583
  * Checks if a column definition differs from the current DB metadata enough to warrant ALTER.
584
584
  * Conservative: only alters when there is a clear type or constraint mismatch.
585
+ * Why: Without type comparison, changing a column from e.g. 'string' to 'text' in the
586
+ * schema file would be silently ignored — the DB would never receive the ALTER.
585
587
  * @param {object} desired - Column definition from schema file
586
588
  * @param {object} current - Column metadata from introspection
587
589
  * @returns {boolean}
588
590
  */
589
591
  _columnNeedsAlter(desired, current) {
592
+ // Type mismatch — map the raw DB type back to an ODAC type and compare.
593
+ // nanoid is stored as 'string' (varchar) in the DB, so normalize before comparison.
594
+ // specificType uses the raw DB type directly (def.length holds the actual PG type),
595
+ // so compare against the raw introspected type instead of reverse-mapping.
596
+ if (desired.type === 'specificType') {
597
+ const rawDesired = (desired.length || '').toLowerCase().trim()
598
+ const rawCurrent = (current.type || '').toLowerCase().trim()
599
+ if (rawDesired !== rawCurrent) return true
600
+ } else {
601
+ const desiredType = desired.type === 'nanoid' ? 'string' : desired.type
602
+ const currentType = this._reverseMapType(current.type)
603
+ if (desiredType !== currentType) return true
604
+ }
605
+
590
606
  // Nullable mismatch
591
607
  if (desired.nullable === false && current.nullable === true) return true
592
608
  if (desired.nullable === true && current.nullable === false) return true
593
609
 
594
610
  // Length mismatch for string types — use Number() coercion since some
595
611
  // drivers (SQLite) return maxLength as a string, e.g. '100' vs 100.
596
- if (desired.length && current.maxLength && Number(desired.length) !== Number(current.maxLength)) return true
612
+ if (desired.type !== 'specificType' && desired.length && current.maxLength && Number(desired.length) !== Number(current.maxLength))
613
+ return true
597
614
 
598
615
  // Default value mismatch — normalize both sides before comparing because
599
616
  // drivers return defaults as strings (e.g. "'active'" in PG, "active" in SQLite).
@@ -686,10 +703,17 @@ class Migration {
686
703
  const indexOps = diff.filter(op => op.type === 'add_index' || op.type === 'drop_index')
687
704
  const fkOps = diff.filter(op => op.type === 'add_foreign_key' || op.type === 'drop_foreign_key')
688
705
 
689
- // Phase 1: Column operationsatomic batch
690
- if (columnOps.length > 0) {
706
+ // Separate primary key alter ops PostgreSQL's ALTER COLUMN via Knex emits
707
+ // DROP NOT NULL before SET NOT NULL, which PG rejects on PK columns (42P16).
708
+ // These must be handled with raw ALTER COLUMN ... TYPE ... USING instead.
709
+ const isPG = knex.client?.config?.client === 'pg' || knex.client?.config?.client === 'postgresql'
710
+ const pkAlterOps = isPG ? columnOps.filter(op => op.type === 'alter_column' && op.definition.primary) : []
711
+ const batchOps = isPG ? columnOps.filter(op => !(op.type === 'alter_column' && op.definition.primary)) : columnOps
712
+
713
+ // Phase 1a: Batch column operations (non-PK alters + adds + drops)
714
+ if (batchOps.length > 0) {
691
715
  await knex.schema.alterTable(tableName, table => {
692
- for (const op of columnOps) {
716
+ for (const op of batchOps) {
693
717
  switch (op.type) {
694
718
  case 'add_column':
695
719
  this._addColumn(table, op.column, op.definition)
@@ -698,13 +722,31 @@ class Migration {
698
722
  table.dropColumn(op.column)
699
723
  break
700
724
  case 'alter_column':
701
- this._alterColumn(table, op.column, op.definition)
725
+ this._alterColumn(table, op.column, op.definition, op.currentNullable)
702
726
  break
703
727
  }
704
728
  }
705
729
  })
706
730
  }
707
731
 
732
+ // Phase 1b: Primary key column type changes on PostgreSQL — raw SQL.
733
+ // Why: Knex .alter() generates "DROP NOT NULL" + "SET NOT NULL" sequence,
734
+ // but PG forbids DROP NOT NULL on primary key columns. Raw ALTER COLUMN TYPE
735
+ // changes the type without touching the NOT NULL constraint.
736
+ for (const op of pkAlterOps) {
737
+ const sqlType = this._pgColumnType(op.definition)
738
+ await knex.raw(`ALTER TABLE ?? ALTER COLUMN ?? TYPE ${sqlType} USING ??::${sqlType}`, [tableName, op.column, op.column])
739
+
740
+ // Apply default value change if specified
741
+ if (op.definition.default !== undefined) {
742
+ if (op.definition.default === 'now()') {
743
+ await knex.raw(`ALTER TABLE ?? ALTER COLUMN ?? SET DEFAULT now()`, [tableName, op.column])
744
+ } else {
745
+ await knex.raw(`ALTER TABLE ?? ALTER COLUMN ?? SET DEFAULT ?`, [tableName, op.column, op.definition.default])
746
+ }
747
+ }
748
+ }
749
+
708
750
  // Phase 2: Foreign key operations — drop before add to handle replacements
709
751
  for (const op of fkOps) {
710
752
  if (op.type === 'drop_foreign_key') {
@@ -723,6 +765,51 @@ class Migration {
723
765
  }
724
766
  }
725
767
 
768
+ /**
769
+ * Maps an ODAC column definition to a PostgreSQL type string for raw ALTER COLUMN TYPE.
770
+ * @param {object} def - Column definition from schema
771
+ * @returns {string} PostgreSQL type name
772
+ */
773
+ _pgColumnType(def) {
774
+ switch (def.type) {
775
+ case 'nanoid':
776
+ case 'string':
777
+ return `varchar(${def.length || (def.type === 'nanoid' ? 21 : 255)})`
778
+ case 'text':
779
+ return 'text'
780
+ case 'integer':
781
+ return 'integer'
782
+ case 'bigInteger':
783
+ return 'bigint'
784
+ case 'boolean':
785
+ return 'boolean'
786
+ case 'float':
787
+ return 'double precision'
788
+ case 'decimal':
789
+ return `numeric(${def.precision || 10},${def.scale || 2})`
790
+ case 'uuid':
791
+ return 'uuid'
792
+ case 'json':
793
+ return 'json'
794
+ case 'jsonb':
795
+ return 'jsonb'
796
+ case 'timestamp':
797
+ return 'timestamp'
798
+ case 'datetime':
799
+ return 'timestamp'
800
+ case 'date':
801
+ return 'date'
802
+ case 'time':
803
+ return 'time'
804
+ case 'binary':
805
+ return 'bytea'
806
+ case 'specificType':
807
+ return def.length || def.specificType || def.type
808
+ default:
809
+ return def.type
810
+ }
811
+ }
812
+
726
813
  /**
727
814
  * Why: PostgreSQL introspection can miss existing constraints across PG versions
728
815
  * (int2vector cast edge cases, search_path mismatches, expression indexes).
@@ -902,6 +989,8 @@ class Migration {
902
989
  return table.uuid(colName)
903
990
  case 'enum':
904
991
  return table.enum(colName, def.values || [])
992
+ case 'specificType':
993
+ return table.specificType(colName, def.length || def.specificType || def.type)
905
994
  default:
906
995
  return table.specificType(colName, def.type)
907
996
  }
@@ -950,12 +1039,18 @@ class Migration {
950
1039
  * @param {string} colName - Column name
951
1040
  * @param {object} def - Column definition
952
1041
  */
953
- _alterColumn(table, colName, def) {
1042
+ _alterColumn(table, colName, def, currentNullable) {
954
1043
  const col = this._createColumnBuilder(table, colName, def)
955
1044
  if (!col) return
956
1045
 
1046
+ // Knex .alter() defaults to nullable when no explicit nullable/notNullable is set,
1047
+ // which generates "ALTER COLUMN ... DROP NOT NULL" — PostgreSQL rejects this on
1048
+ // primary key columns (error 42P16). When the schema doesn't specify nullable,
1049
+ // preserve the column's current DB state to avoid destructive no-op alterations.
957
1050
  if (def.nullable === false) col.notNullable()
958
1051
  else if (def.nullable === true) col.nullable()
1052
+ else if (currentNullable === false) col.notNullable()
1053
+ else if (currentNullable === true) col.nullable()
959
1054
 
960
1055
  if (def.default !== undefined) col.defaultTo(def.default)
961
1056
 
@@ -1,5 +1,6 @@
1
1
  'use strict'
2
2
  const cluster = require('node:cluster')
3
+ const nanoid = require('./nanoid')
3
4
 
4
5
  /**
5
6
  * Write-Behind Cache with Write Coalescing for ODAC Database layer.
@@ -56,12 +57,15 @@ class WriteBuffer {
56
57
  * Why: Initializes the WriteBuffer. Called from Database.init() after Ipc is ready.
57
58
  * Primary: recovers LMDB checkpoint, starts flush/checkpoint timers.
58
59
  * All processes: stores connection references for flush DB writes.
60
+ * @param {object} connections - Knex connection map {connectionKey: knexInstance}
61
+ * @param {object} nanoidColumns - NanoID column metadata from DatabaseManager {connectionKey: {tableName: [{column, size}]}}
59
62
  */
60
- async init(connections) {
63
+ async init(connections, nanoidColumns = {}) {
61
64
  if (this._initialized) return
62
65
  this._initialized = true
63
66
 
64
67
  this._connections = connections
68
+ this._nanoidColumns = nanoidColumns
65
69
  this._config = {...DEFAULT_CONFIG, ...Odac.Config.buffer}
66
70
 
67
71
  if (cluster.isPrimary) {
@@ -127,6 +131,16 @@ class WriteBuffer {
127
131
  * that are drained to the database in a single INSERT batch.
128
132
  */
129
133
  async insert(connection, table, row) {
134
+ // Auto-generate nanoid values for columns defined as type 'nanoid' in schema.
135
+ // Why: The Database.js proxy nanoid injection only covers direct QB calls.
136
+ // WriteBuffer bypasses that proxy — rows must be populated here before queuing.
137
+ const nanoidCols = this._nanoidColumns?.[connection]?.[table]
138
+ if (nanoidCols) {
139
+ for (const {column, size} of nanoidCols) {
140
+ if (!row[column]) row[column] = nanoid(size)
141
+ }
142
+ }
143
+
130
144
  const queueKey = `${connection}:${table}`
131
145
  const length = await Odac.Ipc.rpush(`wb:q:${queueKey}`, row)
132
146
  await Odac.Ipc.sadd('wb:idx:queues', queueKey)
package/src/Database.js CHANGED
@@ -38,7 +38,7 @@ class DatabaseManager {
38
38
  readCache.init()
39
39
 
40
40
  // Initialize Write-Behind Cache (Primary holds state, Workers communicate via IPC)
41
- await writeBuffer.init(this.connections)
41
+ await writeBuffer.init(this.connections, this._nanoidColumns)
42
42
  }
43
43
 
44
44
  /**
@@ -281,8 +281,22 @@ const tableProxyHandler = {
281
281
  }
282
282
 
283
283
  // Cache invalidation for insert — applied AFTER nanoid wrap so both paths are covered.
284
- const currentInsert = qb.insert
285
- qb.insert = wrapWithInvalidation(currentInsert)
284
+ // IMPORTANT: Unlike update/delete/truncate, insert is NOT terminal — it supports
285
+ // chaining (e.g. .insert().onConflict().merge()). So we cannot use wrapWithInvalidation
286
+ // which returns a plain thenable. Instead, override .then() on the query builder to
287
+ // inject invalidation at execution time, preserving the full Knex chain.
288
+ const insertBeforeInvalidation = qb.insert
289
+ qb.insert = function (...args) {
290
+ const result = insertBeforeInvalidation.apply(this, args)
291
+ const origThen = result.then
292
+ result.then = function (resolve, reject) {
293
+ return origThen
294
+ .call(this)
295
+ .then(res => readCache.invalidate(connectionKey, prop).then(() => res))
296
+ .then(resolve, reject)
297
+ }
298
+ return result
299
+ }
286
300
 
287
301
  const originalThen = qb.then
288
302
  qb.then = function (resolve, reject) {
package/src/Odac.js CHANGED
@@ -156,6 +156,9 @@ module.exports = {
156
156
  _odac.write = function (value) {
157
157
  return _odac.Request.write(value)
158
158
  }
159
+ _odac.cache = function (seconds) {
160
+ return _odac.Request.cache(seconds)
161
+ }
159
162
  _odac.stream = function (input) {
160
163
  _odac.Request.clearTimeout()
161
164
  return new (require('./Stream'))(_odac.Request.req, _odac.Request.res, input, _odac)
package/src/Request.js CHANGED
@@ -310,6 +310,27 @@ class OdacRequest {
310
310
  }
311
311
  }
312
312
 
313
+ // - SET PROXY CACHE
314
+ /**
315
+ * Enables ODAC Proxy caching for the current response.
316
+ * Sets the X-ODAC-Cache header with the specified TTL (in seconds)
317
+ * and updates Cache-Control to allow proxy caching.
318
+ *
319
+ * Why: Allows controllers to declaratively opt-in to proxy-level
320
+ * caching for static or semi-static HTML responses, offloading
321
+ * repeated rendering from the application server.
322
+ *
323
+ * @param {number} seconds - Cache TTL in seconds (must be a positive integer)
324
+ * @throws {TypeError} If seconds is not a positive integer
325
+ */
326
+ cache(seconds) {
327
+ if (!Number.isInteger(seconds) || seconds < 1) {
328
+ throw new TypeError('Odac.cache() requires a positive integer (seconds)')
329
+ }
330
+ this.header('X-ODAC-Cache', seconds)
331
+ this.header('Cache-Control', `public, max-age=${seconds}`)
332
+ }
333
+
313
334
  // - HTTP CODE
314
335
  status(code) {
315
336
  this.#status = code
File without changes
@@ -1,8 +1,14 @@
1
- /* global Odac */
2
1
  /**
3
2
  * ODAC Template - Client-Side Application
4
3
  *
5
- * This file demonstrates odac.js features including:
4
+ * This file is automatically compiled by ODAC's JS/TS pipeline.
5
+ * Write your frontend logic here — TypeScript (.ts) and plain JavaScript (.js) both work.
6
+ *
7
+ * - Place files in view/js/ to create entry points
8
+ * - Files starting with _ are ignored (use them as shared imports)
9
+ * - Output goes to public/assets/js/{name}.js
10
+ *
11
+ * Features demonstrated:
6
12
  * - AJAX page loading with Odac.loader() for smooth navigation
7
13
  * - History API integration
8
14
  * - Event delegation
@@ -218,3 +218,146 @@ describe('Migration.migrate() - Foreign Key Diff', () => {
218
218
  warnSpy.mockRestore()
219
219
  })
220
220
  })
221
+
222
+ describe('Migration.migrate() - Column Type Change', () => {
223
+ it('should alter a column when its type changes (string → text)', async () => {
224
+ writeSchema('articles', {columns: {id: {type: 'increments'}, body: {type: 'string'}}})
225
+ await Migration.migrate()
226
+
227
+ writeSchema('articles', {columns: {id: {type: 'increments'}, body: {type: 'text'}}})
228
+ const result = await Migration.migrate()
229
+ const alterOps = result.default.schema.filter(op => op.type === 'alter_column')
230
+
231
+ expect(alterOps).toEqual(expect.arrayContaining([expect.objectContaining({type: 'alter_column', column: 'body', table: 'articles'})]))
232
+ })
233
+
234
+ it('should alter a column when its type changes (integer → bigInteger)', async () => {
235
+ writeSchema('counters', {columns: {id: {type: 'increments'}, value: {type: 'integer'}}})
236
+ await Migration.migrate()
237
+
238
+ writeSchema('counters', {columns: {id: {type: 'increments'}, value: {type: 'bigInteger'}}})
239
+ const result = await Migration.migrate()
240
+ const alterOps = result.default.schema.filter(op => op.type === 'alter_column')
241
+
242
+ expect(alterOps).toEqual(expect.arrayContaining([expect.objectContaining({type: 'alter_column', column: 'value', table: 'counters'})]))
243
+ })
244
+
245
+ it('should not alter a column when its type is unchanged', async () => {
246
+ writeSchema('logs', {columns: {id: {type: 'increments'}, message: {type: 'text'}}})
247
+ await Migration.migrate()
248
+
249
+ writeSchema('logs', {columns: {id: {type: 'increments'}, message: {type: 'text'}}})
250
+ const result = await Migration.migrate()
251
+ const alterOps = result.default.schema.filter(op => op.type === 'alter_column')
252
+
253
+ expect(alterOps).toHaveLength(0)
254
+ })
255
+
256
+ it('should not produce alter for nanoid columns stored as string', async () => {
257
+ writeSchema('tokens', {columns: {id: {type: 'nanoid', length: 21}, name: {type: 'string'}}})
258
+ await Migration.migrate()
259
+
260
+ // Re-run with same schema — nanoid maps to varchar in DB, should not trigger false alter
261
+ writeSchema('tokens', {columns: {id: {type: 'nanoid', length: 21}, name: {type: 'string'}}})
262
+ const result = await Migration.migrate()
263
+ const alterOps = result.default.schema.filter(op => op.type === 'alter_column')
264
+
265
+ expect(alterOps).toHaveLength(0)
266
+ })
267
+ })
268
+
269
+ describe('Migration.migrate() - Nullable Preservation on Alter', () => {
270
+ it('should preserve NOT NULL when altering a column that has no explicit nullable in schema', async () => {
271
+ // Create table with a NOT NULL column
272
+ writeSchema('domains', {columns: {id: {type: 'increments'}, code: {type: 'string', nullable: false, default: 'A'}}})
273
+ await Migration.migrate()
274
+
275
+ // Change default value but omit nullable — should preserve NOT NULL from DB
276
+ writeSchema('domains', {columns: {id: {type: 'increments'}, code: {type: 'string', default: 'B'}}})
277
+ const result = await Migration.migrate()
278
+ const alterOps = result.default.schema.filter(op => op.type === 'alter_column')
279
+
280
+ expect(alterOps).toHaveLength(1)
281
+ expect(alterOps[0]).toMatchObject({column: 'code', currentNullable: false})
282
+ })
283
+
284
+ it('should preserve NULLABLE when altering a column that has no explicit nullable in schema', async () => {
285
+ // Create table with a NULLABLE column
286
+ writeSchema('logs', {columns: {id: {type: 'increments'}, note: {type: 'string', nullable: true, default: 'x'}}})
287
+ await Migration.migrate()
288
+
289
+ // Change default but omit nullable — should preserve nullable from DB
290
+ writeSchema('logs', {columns: {id: {type: 'increments'}, note: {type: 'string', default: 'y'}}})
291
+ const result = await Migration.migrate()
292
+ const alterOps = result.default.schema.filter(op => op.type === 'alter_column')
293
+
294
+ expect(alterOps).toHaveLength(1)
295
+ expect(alterOps[0]).toMatchObject({column: 'note', currentNullable: true})
296
+ })
297
+ })
298
+
299
+ describe('Migration - PG Primary Key Alter Safety', () => {
300
+ it('should carry primary flag in alter_column diff for PK columns', async () => {
301
+ // Create table with a primary nanoid column
302
+ writeSchema('domains', {columns: {id: {type: 'nanoid', primary: true}, name: {type: 'string'}}})
303
+ await Migration.migrate()
304
+
305
+ // Simulate a type mismatch by changing to a different length — triggers alter
306
+ writeSchema('domains', {columns: {id: {type: 'nanoid', primary: true, length: 30}, name: {type: 'string'}}})
307
+ const result = await Migration.migrate({dryRun: true})
308
+ const alterOps = result.default.schema.filter(op => op.type === 'alter_column' && op.column === 'id')
309
+
310
+ expect(alterOps).toHaveLength(1)
311
+ expect(alterOps[0].definition.primary).toBe(true)
312
+ })
313
+
314
+ it('should map ODAC types to PG types correctly via _pgColumnType', () => {
315
+ const m = Migration
316
+ expect(m._pgColumnType({type: 'nanoid'})).toBe('varchar(21)')
317
+ expect(m._pgColumnType({type: 'nanoid', length: 30})).toBe('varchar(30)')
318
+ expect(m._pgColumnType({type: 'string'})).toBe('varchar(255)')
319
+ expect(m._pgColumnType({type: 'string', length: 100})).toBe('varchar(100)')
320
+ expect(m._pgColumnType({type: 'text'})).toBe('text')
321
+ expect(m._pgColumnType({type: 'integer'})).toBe('integer')
322
+ expect(m._pgColumnType({type: 'bigInteger'})).toBe('bigint')
323
+ expect(m._pgColumnType({type: 'boolean'})).toBe('boolean')
324
+ expect(m._pgColumnType({type: 'uuid'})).toBe('uuid')
325
+ expect(m._pgColumnType({type: 'jsonb'})).toBe('jsonb')
326
+ expect(m._pgColumnType({type: 'timestamp'})).toBe('timestamp')
327
+ expect(m._pgColumnType({type: 'binary'})).toBe('bytea')
328
+ expect(m._pgColumnType({type: 'decimal'})).toBe('numeric(10,2)')
329
+ expect(m._pgColumnType({type: 'decimal', precision: 8, scale: 4})).toBe('numeric(8,4)')
330
+ expect(m._pgColumnType({type: 'specificType', length: 'text[]'})).toBe('text[]')
331
+ })
332
+ })
333
+
334
+ describe('Migration - specificType handling', () => {
335
+ it('should create a specificType column using the length field as the raw DB type', async () => {
336
+ writeSchema('events', {
337
+ columns: {
338
+ id: {type: 'increments'},
339
+ tags: {type: 'specificType', length: 'text'}
340
+ }
341
+ })
342
+ await Migration.migrate()
343
+
344
+ const info = await db('events').columnInfo()
345
+ expect(info).toHaveProperty('tags')
346
+ })
347
+
348
+ it('should not produce false alter for specificType when DB type matches', async () => {
349
+ writeSchema('events', {
350
+ columns: {
351
+ id: {type: 'increments'},
352
+ tags: {type: 'specificType', length: 'text'}
353
+ }
354
+ })
355
+ await Migration.migrate()
356
+
357
+ // Re-run with same schema — should not trigger alter
358
+ const result = await Migration.migrate()
359
+ const alterOps = result.default.schema.filter(op => op.type === 'alter_column')
360
+
361
+ expect(alterOps).toHaveLength(0)
362
+ })
363
+ })
@@ -0,0 +1,118 @@
1
+ 'use strict'
2
+
3
+ const cluster = require('node:cluster')
4
+
5
+ /**
6
+ * Tests WriteBuffer.insert() nanoid auto-generation.
7
+ * Why: WriteBuffer bypasses the Database.js proxy QB nanoid injection.
8
+ * Rows must be populated with nanoid values before being queued to IPC,
9
+ * otherwise flush writes to DB with a null primary key and violates NOT NULL.
10
+ */
11
+
12
+ let knexLib, db
13
+
14
+ beforeEach(async () => {
15
+ jest.resetModules()
16
+
17
+ knexLib = require('knex')
18
+ db = knexLib({client: 'sqlite3', connection: {filename: ':memory:'}, useNullAsDefault: true})
19
+
20
+ await db.schema.createTable('activity', table => {
21
+ table.string('id', 21).primary().notNullable()
22
+ table.string('user', 255).notNullable()
23
+ table.string('action', 50).notNullable()
24
+ })
25
+
26
+ await db.schema.createTable('events', table => {
27
+ table.string('eid', 12).primary().notNullable()
28
+ table.string('name', 100)
29
+ })
30
+
31
+ Object.defineProperty(cluster, 'isPrimary', {value: true, configurable: true})
32
+
33
+ const Ipc = require('../../../src/Ipc')
34
+ global.Odac = {
35
+ Config: {buffer: {flushInterval: 999999, checkpointInterval: 999999}},
36
+ Storage: {
37
+ isReady: () => false,
38
+ put: jest.fn(),
39
+ remove: jest.fn(),
40
+ getRange: () => []
41
+ },
42
+ Ipc
43
+ }
44
+ await Ipc.init()
45
+
46
+ const writeBuffer = require('../../../src/Database/WriteBuffer')
47
+ await writeBuffer.init(
48
+ {default: db},
49
+ {
50
+ default: {
51
+ activity: [{column: 'id', size: 21}],
52
+ events: [{column: 'eid', size: 12}]
53
+ }
54
+ }
55
+ )
56
+
57
+ const DB = require('../../../src/Database')
58
+ DB.connections = {default: db}
59
+ })
60
+
61
+ afterEach(async () => {
62
+ const writeBuffer = require('../../../src/Database/WriteBuffer')
63
+ await writeBuffer.close()
64
+ await Odac.Ipc.close()
65
+ await db.destroy()
66
+ delete global.Odac
67
+ })
68
+
69
+ describe('WriteBuffer.insert() - NanoID auto-generation', () => {
70
+ it('should auto-generate nanoid for a column when not provided', async () => {
71
+ const DB = require('../../../src/Database')
72
+
73
+ await DB.activity.buffer.insert({user: 'alice', action: 'login'})
74
+ await DB.activity.buffer.flush()
75
+
76
+ const rows = await db('activity').select()
77
+ expect(rows).toHaveLength(1)
78
+ expect(rows[0].id).toBeTruthy()
79
+ expect(rows[0].id).toHaveLength(21)
80
+ })
81
+
82
+ it('should not overwrite an explicitly provided id', async () => {
83
+ const DB = require('../../../src/Database')
84
+
85
+ await DB.activity.buffer.insert({id: 'my-custom-id-00000', user: 'bob', action: 'logout'})
86
+ await DB.activity.buffer.flush()
87
+
88
+ const row = await db('activity').first()
89
+ expect(row.id).toBe('my-custom-id-00000')
90
+ })
91
+
92
+ it('should respect custom nanoid length from schema metadata', async () => {
93
+ const DB = require('../../../src/Database')
94
+
95
+ await DB.events.buffer.insert({name: 'page_view'})
96
+ await DB.events.buffer.flush()
97
+
98
+ const row = await db('events').first()
99
+ expect(row.eid).toBeTruthy()
100
+ expect(row.eid).toHaveLength(12)
101
+ })
102
+
103
+ it('should generate unique ids for multiple buffered inserts', async () => {
104
+ const DB = require('../../../src/Database')
105
+
106
+ await DB.activity.buffer.insert({user: 'alice', action: 'login'})
107
+ await DB.activity.buffer.insert({user: 'bob', action: 'view'})
108
+ await DB.activity.buffer.insert({user: 'carol', action: 'logout'})
109
+ await DB.activity.buffer.flush()
110
+
111
+ const rows = await db('activity').select()
112
+ expect(rows).toHaveLength(3)
113
+
114
+ const ids = rows.map(r => r.id)
115
+ expect(new Set(ids).size).toBe(3) // all unique
116
+ ids.forEach(id => expect(id).toHaveLength(21))
117
+ })
118
+ })
@@ -0,0 +1,54 @@
1
+ const Odac = require('../../src/Odac')
2
+
3
+ describe('Odac.cache()', () => {
4
+ let mockOdac
5
+
6
+ beforeEach(() => {
7
+ mockOdac = {
8
+ Config: {request: {timeout: 1000}},
9
+ Route: {routes: {www: {}}},
10
+ Storage: {get: jest.fn(), put: jest.fn()}
11
+ }
12
+ global.Odac = mockOdac
13
+ global.__dir = '/mock'
14
+ })
15
+
16
+ afterEach(() => {
17
+ delete global.Odac
18
+ delete global.__dir
19
+ })
20
+
21
+ it('should expose cache() as a shorthand on the instance', () => {
22
+ const mockReq = {
23
+ method: 'GET',
24
+ url: '/',
25
+ headers: {host: 'www.example.com'},
26
+ connection: {remoteAddress: '127.0.0.1'},
27
+ on: jest.fn()
28
+ }
29
+ const mockRes = {on: jest.fn(), writeHead: jest.fn(), end: jest.fn()}
30
+
31
+ const ctx = Odac.instance('id', mockReq, mockRes)
32
+
33
+ expect(typeof ctx.cache).toBe('function')
34
+ })
35
+
36
+ it('should delegate to Request.cache()', () => {
37
+ const mockReq = {
38
+ method: 'GET',
39
+ url: '/',
40
+ headers: {host: 'www.example.com'},
41
+ connection: {remoteAddress: '127.0.0.1'},
42
+ on: jest.fn()
43
+ }
44
+ const mockRes = {on: jest.fn(), writeHead: jest.fn(), end: jest.fn(), finished: false}
45
+
46
+ const ctx = Odac.instance('id', mockReq, mockRes)
47
+ ctx.cache(3600)
48
+
49
+ ctx.Request.print()
50
+ const headers = mockRes.writeHead.mock.calls[0][1]
51
+ expect(headers['X-ODAC-Cache']).toBe(3600)
52
+ expect(headers['Cache-Control']).toBe('public, max-age=3600')
53
+ })
54
+ })