odac 1.4.0 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. package/.agent/rules/memory.md +8 -0
  2. package/.github/workflows/release.yml +1 -1
  3. package/.releaserc.js +9 -2
  4. package/CHANGELOG.md +61 -0
  5. package/README.md +10 -0
  6. package/bin/odac.js +193 -2
  7. package/client/odac.js +32 -13
  8. package/docs/ai/skills/SKILL.md +4 -3
  9. package/docs/ai/skills/backend/authentication.md +7 -0
  10. package/docs/ai/skills/backend/config.md +7 -0
  11. package/docs/ai/skills/backend/controllers.md +7 -0
  12. package/docs/ai/skills/backend/cron.md +9 -2
  13. package/docs/ai/skills/backend/database.md +37 -2
  14. package/docs/ai/skills/backend/forms.md +112 -11
  15. package/docs/ai/skills/backend/ipc.md +7 -0
  16. package/docs/ai/skills/backend/mail.md +7 -0
  17. package/docs/ai/skills/backend/migrations.md +86 -0
  18. package/docs/ai/skills/backend/request_response.md +7 -0
  19. package/docs/ai/skills/backend/routing.md +7 -0
  20. package/docs/ai/skills/backend/storage.md +7 -0
  21. package/docs/ai/skills/backend/streaming.md +7 -0
  22. package/docs/ai/skills/backend/structure.md +8 -1
  23. package/docs/ai/skills/backend/translations.md +7 -0
  24. package/docs/ai/skills/backend/utilities.md +7 -0
  25. package/docs/ai/skills/backend/validation.md +138 -31
  26. package/docs/ai/skills/backend/views.md +7 -0
  27. package/docs/ai/skills/frontend/core.md +7 -0
  28. package/docs/ai/skills/frontend/forms.md +48 -13
  29. package/docs/ai/skills/frontend/navigation.md +7 -0
  30. package/docs/ai/skills/frontend/realtime.md +7 -0
  31. package/docs/backend/08-database/02-basics.md +49 -9
  32. package/docs/backend/08-database/04-migrations.md +259 -37
  33. package/package.json +1 -1
  34. package/src/Auth.js +82 -43
  35. package/src/Config.js +1 -1
  36. package/src/Database/ConnectionFactory.js +70 -0
  37. package/src/Database/Migration.js +1228 -0
  38. package/src/Database/nanoid.js +30 -0
  39. package/src/Database.js +157 -46
  40. package/src/Ipc.js +37 -0
  41. package/src/Odac.js +1 -1
  42. package/src/Route/Cron.js +11 -0
  43. package/src/Route.js +8 -0
  44. package/src/Server.js +77 -23
  45. package/src/Storage.js +15 -1
  46. package/src/Validator.js +22 -20
  47. package/template/schema/users.js +23 -0
  48. package/test/{Auth.test.js → Auth/check.test.js} +153 -6
  49. package/test/Client/data.test.js +91 -0
  50. package/test/Client/get.test.js +90 -0
  51. package/test/Client/storage.test.js +87 -0
  52. package/test/Client/token.test.js +82 -0
  53. package/test/Client/ws.test.js +86 -0
  54. package/test/Config/deepMerge.test.js +14 -0
  55. package/test/Config/init.test.js +66 -0
  56. package/test/Config/interpolate.test.js +35 -0
  57. package/test/Database/ConnectionFactory/buildConnectionConfig.test.js +13 -0
  58. package/test/Database/ConnectionFactory/buildConnections.test.js +31 -0
  59. package/test/Database/ConnectionFactory/resolveClient.test.js +12 -0
  60. package/test/Database/Migration/migrate_column.test.js +52 -0
  61. package/test/Database/Migration/migrate_files.test.js +70 -0
  62. package/test/Database/Migration/migrate_index.test.js +89 -0
  63. package/test/Database/Migration/migrate_nanoid.test.js +160 -0
  64. package/test/Database/Migration/migrate_seed.test.js +77 -0
  65. package/test/Database/Migration/migrate_table.test.js +88 -0
  66. package/test/Database/Migration/rollback.test.js +61 -0
  67. package/test/Database/Migration/snapshot.test.js +38 -0
  68. package/test/Database/Migration/status.test.js +41 -0
  69. package/test/Database/autoNanoid.test.js +215 -0
  70. package/test/Database/nanoid.test.js +19 -0
  71. package/test/Lang/constructor.test.js +25 -0
  72. package/test/Lang/get.test.js +65 -0
  73. package/test/Lang/set.test.js +49 -0
  74. package/test/Odac/init.test.js +42 -0
  75. package/test/Odac/instance.test.js +58 -0
  76. package/test/Route/{Middleware.test.js → Middleware/chaining.test.js} +5 -29
  77. package/test/Route/Middleware/use.test.js +35 -0
  78. package/test/{Route.test.js → Route/check.test.js} +4 -55
  79. package/test/Route/set.test.js +52 -0
  80. package/test/Route/ws.test.js +23 -0
  81. package/test/View/EarlyHints/cache.test.js +32 -0
  82. package/test/View/EarlyHints/extractFromHtml.test.js +143 -0
  83. package/test/View/EarlyHints/formatLinkHeader.test.js +33 -0
  84. package/test/View/EarlyHints/send.test.js +99 -0
  85. package/test/View/{Form.test.js → Form/generateFieldHtml.test.js} +2 -2
  86. package/test/View/constructor.test.js +22 -0
  87. package/test/View/print.test.js +19 -0
  88. package/test/WebSocket/Client/limits.test.js +55 -0
  89. package/test/WebSocket/Server/broadcast.test.js +33 -0
  90. package/test/WebSocket/Server/route.test.js +37 -0
  91. package/test/Client.test.js +0 -197
  92. package/test/Config.test.js +0 -112
  93. package/test/Lang.test.js +0 -92
  94. package/test/Odac.test.js +0 -88
  95. package/test/View/EarlyHints.test.js +0 -282
  96. package/test/WebSocket.test.js +0 -238
@@ -0,0 +1,1228 @@
1
+ 'use strict'
2
+
3
+ const fs = require('node:fs')
4
+ const path = require('node:path')
5
+ const nanoid = require('./nanoid')
6
+
7
+ /**
8
+ * ODAC Migration Engine — "Schema-First with Auto-Diff"
9
+ *
10
+ * Why: AI agents and developers need a single source of truth for database state.
11
+ * Instead of scanning hundreds of migration files, read `schema/` to know the final state.
12
+ * The engine diffs desired state vs current DB state and applies changes automatically.
13
+ */
14
+ class Migration {
15
+ constructor() {
16
+ this.schemaDir = null
17
+ this.migrationDir = null
18
+ this.connections = null
19
+ this.trackingTable = '_odac_migrations'
20
+ }
21
+
22
+ /**
23
+ * Initializes the migration engine with the project directory context.
24
+ * @param {string} projectDir - Absolute path to the project root
25
+ * @param {object} connections - DatabaseManager.connections map
26
+ */
27
+ init(projectDir, connections) {
28
+ this.schemaDir = path.join(projectDir, 'schema')
29
+ this.migrationDir = path.join(projectDir, 'migration')
30
+ this.connections = connections
31
+ }
32
+
33
+ // ---------------------------------------------------------------------------
34
+ // PUBLIC API
35
+ // ---------------------------------------------------------------------------
36
+
37
+ /**
38
+ * Runs all pending migrations: schema diff + imperative migration files + seeds.
39
+ * @param {object} options
40
+ * @param {string} [options.db] - Target a specific connection key (default: all)
41
+ * @param {boolean} [options.dryRun=false] - Only show changes, don't apply
42
+ * @returns {Promise<object>} Summary of applied changes per connection
43
+ */
44
+ async migrate(options = {}) {
45
+ const targetDb = options.db || null
46
+ const dryRun = options.dryRun || false
47
+ const summary = {}
48
+
49
+ const connectionKeys = targetDb ? [targetDb] : Object.keys(this.connections)
50
+
51
+ for (const key of connectionKeys) {
52
+ const knex = this.connections[key]
53
+ if (!knex) throw new Error(`ODAC Migration: Unknown database connection '${key}'.`)
54
+
55
+ await this._ensureTrackingTable(knex)
56
+
57
+ const schemaChanges = await this._applySchemaChanges(knex, key, dryRun)
58
+ const fileChanges = await this._applyMigrationFiles(knex, key, dryRun)
59
+ const seedChanges = await this._applySeeds(knex, key, dryRun)
60
+
61
+ summary[key] = {schema: schemaChanges, files: fileChanges, seeds: seedChanges}
62
+ }
63
+
64
+ return summary
65
+ }
66
+
67
+ /**
68
+ * Shows pending changes without applying them.
69
+ * @param {object} options
70
+ * @param {string} [options.db] - Target a specific connection key
71
+ * @returns {Promise<object>} Pending changes per connection
72
+ */
73
+ async status(options = {}) {
74
+ return this.migrate({...options, dryRun: true})
75
+ }
76
+
77
+ /**
78
+ * Rolls back the last batch of imperative migration files.
79
+ * Schema changes are NOT rolled back (use schema files to revert).
80
+ * @param {object} options
81
+ * @param {string} [options.db] - Target a specific connection key
82
+ * @returns {Promise<object>} Rolled-back migrations per connection
83
+ */
84
+ async rollback(options = {}) {
85
+ const targetDb = options.db || null
86
+ const result = {}
87
+
88
+ const connectionKeys = targetDb ? [targetDb] : Object.keys(this.connections)
89
+
90
+ for (const key of connectionKeys) {
91
+ const knex = this.connections[key]
92
+ if (!knex) throw new Error(`ODAC Migration: Unknown database connection '${key}'.`)
93
+
94
+ await this._ensureTrackingTable(knex)
95
+ result[key] = await this._rollbackLastBatch(knex, key)
96
+ }
97
+
98
+ return result
99
+ }
100
+
101
+ /**
102
+ * Reverse-engineers the current database into schema/ files.
103
+ * @param {object} options
104
+ * @param {string} [options.db] - Target a specific connection key
105
+ * @returns {Promise<object>} Generated file paths per connection
106
+ */
107
+ async snapshot(options = {}) {
108
+ const targetDb = options.db || null
109
+ const result = {}
110
+
111
+ const connectionKeys = targetDb ? [targetDb] : Object.keys(this.connections)
112
+
113
+ for (const key of connectionKeys) {
114
+ const knex = this.connections[key]
115
+ if (!knex) throw new Error(`ODAC Migration: Unknown database connection '${key}'.`)
116
+
117
+ result[key] = await this._snapshotDatabase(knex, key)
118
+ }
119
+
120
+ return result
121
+ }
122
+
123
+ // ---------------------------------------------------------------------------
124
+ // SCHEMA DIFF PIPELINE
125
+ // ---------------------------------------------------------------------------
126
+
127
+ /**
128
+ * Reads schema files, diffs against DB, and applies structural changes.
129
+ * @param {object} knex - Knex connection instance
130
+ * @param {string} connectionKey - Connection identifier
131
+ * @param {boolean} dryRun - If true, only compute changes
132
+ * @returns {Promise<Array>} List of applied operations
133
+ */
134
+ async _applySchemaChanges(knex, connectionKey, dryRun) {
135
+ const desiredSchemas = this._loadSchemaFiles(connectionKey)
136
+ const operations = []
137
+
138
+ for (const [tableName, desired] of Object.entries(desiredSchemas)) {
139
+ const exists = await knex.schema.hasTable(tableName)
140
+
141
+ if (!exists) {
142
+ const op = {type: 'create_table', table: tableName, columns: desired.columns, indexes: desired.indexes}
143
+ operations.push(op)
144
+
145
+ if (!dryRun) {
146
+ await this._createTable(knex, tableName, desired)
147
+ }
148
+ } else {
149
+ const currentColumns = await this._introspectColumns(knex, tableName)
150
+ const currentIndexes = await this._introspectIndexes(knex, tableName)
151
+ const diff = this._computeDiff(desired, currentColumns, currentIndexes)
152
+
153
+ if (diff.length > 0) {
154
+ operations.push(...diff.map(d => ({...d, table: tableName})))
155
+
156
+ if (!dryRun) {
157
+ await this._applyDiff(knex, tableName, diff)
158
+ }
159
+ }
160
+ }
161
+ }
162
+
163
+ return operations
164
+ }
165
+
166
+ /**
167
+ * Loads and parses schema definition files from the schema/ directory.
168
+ * Root-level files map to the 'default' connection.
169
+ * Subdirectories map to named connections.
170
+ * @param {string} connectionKey - Which connection to load schemas for
171
+ * @returns {object} Map of tableName -> schema definition
172
+ */
173
+ _loadSchemaFiles(connectionKey) {
174
+ const schemas = {}
175
+
176
+ if (!fs.existsSync(this.schemaDir)) return schemas
177
+
178
+ if (connectionKey === 'default') {
179
+ const files = fs.readdirSync(this.schemaDir).filter(f => f.endsWith('.js') && fs.statSync(path.join(this.schemaDir, f)).isFile())
180
+ for (const file of files) {
181
+ const tableName = path.basename(file, '.js')
182
+ const filePath = path.join(this.schemaDir, file)
183
+ schemas[tableName] = this._normalizeSchema(this._requireSchema(filePath))
184
+ }
185
+ } else {
186
+ const subDir = path.join(this.schemaDir, connectionKey)
187
+ if (!fs.existsSync(subDir)) return schemas
188
+
189
+ const files = fs.readdirSync(subDir).filter(f => f.endsWith('.js') && fs.statSync(path.join(subDir, f)).isFile())
190
+ for (const file of files) {
191
+ const tableName = path.basename(file, '.js')
192
+ const filePath = path.join(subDir, file)
193
+ schemas[tableName] = this._normalizeSchema(this._requireSchema(filePath))
194
+ }
195
+ }
196
+
197
+ return schemas
198
+ }
199
+
200
+ /**
201
+ * Why: Column-level `unique: true` creates a DB constraint during CREATE but is
202
+ * invisible to the diff engine's index comparison. This caused two bugs:
203
+ * 1. Silent constraint DROP on subsequent runs (not in desiredIndexes).
204
+ * 2. Duplicate constraint ADD if also listed explicitly in indexes array.
205
+ * Normalizing once at load time gives every downstream path (create, diff, apply)
206
+ * a single, deduplicated source of truth for indexes.
207
+ * @param {object} schema - Raw schema definition from file
208
+ * @returns {object} Schema with column-level unique constraints merged into indexes
209
+ */
210
+ _normalizeSchema(schema) {
211
+ const columns = schema.columns || {}
212
+ const indexes = [...(schema.indexes || [])]
213
+ const existingSignatures = new Set(indexes.map(idx => this._indexSignature(idx)))
214
+
215
+ for (const [colName, colDef] of Object.entries(columns)) {
216
+ if (!colDef.unique) continue
217
+ if (colDef.type === 'timestamps' || colDef.type === 'increments' || colDef.type === 'bigIncrements' || colDef.type === 'nanoid')
218
+ continue
219
+
220
+ const implicitIdx = {columns: [colName], unique: true}
221
+ const sig = this._indexSignature(implicitIdx)
222
+
223
+ if (!existingSignatures.has(sig)) {
224
+ indexes.push(implicitIdx)
225
+ existingSignatures.add(sig)
226
+ }
227
+ }
228
+
229
+ return {...schema, indexes}
230
+ }
231
+
232
+ /**
233
+ * Loads a schema/migration file from disk without relying on require.cache.
234
+ * Why: Node's require cache (and Jest's module registry) can serve stale modules
235
+ * when files are overwritten at the same path. Reading raw source avoids this.
236
+ * @param {string} filePath - Absolute path to schema file
237
+ * @returns {object} Parsed module exports
238
+ */
239
+ _requireSchema(filePath) {
240
+ const Module = require('node:module')
241
+ const source = fs.readFileSync(filePath, 'utf8')
242
+ const m = new Module(filePath)
243
+ m.filename = filePath
244
+ m.paths = Module._nodeModulePaths(path.dirname(filePath))
245
+ m._compile(source, filePath)
246
+ return m.exports
247
+ }
248
+
249
+ // ---------------------------------------------------------------------------
250
+ // INTROSPECTION — Read current DB state
251
+ // ---------------------------------------------------------------------------
252
+
253
+ /**
254
+ * Reads column metadata from the database for a given table.
255
+ * Uses knex.columnInfo() augmented with raw queries for precision.
256
+ * @param {object} knex - Knex connection instance
257
+ * @param {string} tableName - Table to introspect
258
+ * @returns {Promise<object>} Normalized column map
259
+ */
260
+ async _introspectColumns(knex, tableName) {
261
+ const info = await knex(tableName).columnInfo()
262
+ const columns = {}
263
+
264
+ for (const [colName, meta] of Object.entries(info)) {
265
+ columns[colName] = {
266
+ type: meta.type,
267
+ maxLength: meta.maxLength,
268
+ nullable: meta.nullable,
269
+ defaultValue: meta.defaultValue
270
+ }
271
+ }
272
+
273
+ return columns
274
+ }
275
+
276
+ /**
277
+ * Reads index metadata from the database for a given table.
278
+ * Supports MySQL, PostgreSQL, and SQLite.
279
+ * @param {object} knex - Knex connection instance
280
+ * @param {string} tableName - Table to introspect
281
+ * @returns {Promise<Array>} Normalized index list
282
+ */
283
+ async _introspectIndexes(knex, tableName) {
284
+ const client = knex.client.config.client
285
+
286
+ if (client === 'mysql2' || client === 'mysql') {
287
+ return this._introspectIndexesMySQL(knex, tableName)
288
+ } else if (client === 'pg') {
289
+ return this._introspectIndexesPG(knex, tableName)
290
+ } else if (client === 'sqlite3') {
291
+ return this._introspectIndexesSQLite(knex, tableName)
292
+ }
293
+
294
+ return []
295
+ }
296
+
297
+ async _introspectIndexesMySQL(knex, tableName) {
298
+ const [rows] = await knex.raw('SHOW INDEX FROM ??', [tableName])
299
+ const indexMap = {}
300
+
301
+ for (const row of rows) {
302
+ const name = row.Key_name
303
+ if (name === 'PRIMARY') continue
304
+
305
+ if (!indexMap[name]) {
306
+ indexMap[name] = {
307
+ name,
308
+ columns: [],
309
+ unique: !row.Non_unique
310
+ }
311
+ }
312
+ indexMap[name].columns.push(row.Column_name)
313
+ }
314
+
315
+ return Object.values(indexMap)
316
+ }
317
+
318
+ /**
319
+ * Why: The previous pg_class + pg_index + pg_attribute + int2vector::int[] cast
320
+ * approach broke across PostgreSQL versions and non-default search_path configs.
321
+ * pg_indexes is a stable, high-level view that works reliably across all PG
322
+ * versions (9.1+) without manual type casting or complex joins.
323
+ * We parse column names from the index definition using a regex to avoid all
324
+ * low-level catalog compatibility issues.
325
+ * @param {object} knex - Knex connection instance
326
+ * @param {string} tableName - Table to introspect
327
+ * @returns {Promise<Array>} Normalized index list
328
+ */
329
+ async _introspectIndexesPG(knex, tableName) {
330
+ const result = await knex.raw(
331
+ `
332
+ SELECT
333
+ i.relname AS index_name,
334
+ ix.indisunique AS is_unique,
335
+ array_agg(a.attname ORDER BY a.attnum) AS columns
336
+ FROM pg_index ix
337
+ JOIN pg_class t ON t.oid = ix.indrelid
338
+ JOIN pg_class i ON i.oid = ix.indexrelid
339
+ JOIN pg_namespace n ON n.oid = t.relnamespace
340
+ JOIN LATERAL unnest(ix.indkey) WITH ORDINALITY AS k(attnum, ord) ON true
341
+ JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = k.attnum
342
+ WHERE t.relname = ?
343
+ AND n.nspname = current_schema()
344
+ AND ix.indisprimary = false
345
+ AND a.attnum > 0
346
+ GROUP BY i.relname, ix.indisunique
347
+ `,
348
+ [tableName]
349
+ )
350
+
351
+ return result.rows.map(row => ({
352
+ name: row.index_name,
353
+ columns: Array.isArray(row.columns) ? row.columns : [],
354
+ unique: !!row.is_unique
355
+ }))
356
+ }
357
+
358
+ async _introspectIndexesSQLite(knex, tableName) {
359
+ const safeTableName = this._quoteSQLiteIdentifier(tableName)
360
+ const rawIndexes = await knex.raw(`PRAGMA index_list(${safeTableName})`)
361
+ const indexes = Array.isArray(rawIndexes) ? rawIndexes : []
362
+ const result = []
363
+
364
+ for (const idx of indexes) {
365
+ if (idx.origin === 'pk') continue
366
+ // Skip auto-generated unique constraint indexes (created by Knex .unique())
367
+ // These have origin='c' but we still track them since they are user-defined
368
+
369
+ const safeIndexName = this._quoteSQLiteIdentifier(idx.name)
370
+ const rawCols = await knex.raw(`PRAGMA index_info(${safeIndexName})`)
371
+ const cols = Array.isArray(rawCols) ? rawCols : []
372
+ result.push({
373
+ name: idx.name,
374
+ columns: cols.map(c => c.name),
375
+ unique: !!idx.unique
376
+ })
377
+ }
378
+
379
+ return result
380
+ }
381
+
382
+ // ---------------------------------------------------------------------------
383
+ // DIFF ENGINE — Compute desired vs current delta
384
+ // ---------------------------------------------------------------------------
385
+
386
+ /**
387
+ * Computes the structural diff between desired schema and current DB state.
388
+ * Produces a list of atomic operations to reconcile the two.
389
+ * @param {object} desired - Schema definition from file
390
+ * @param {object} currentColumns - Introspected column map
391
+ * @param {Array} currentIndexes - Introspected index list
392
+ * @returns {Array} Ordered list of diff operations
393
+ */
394
+ _computeDiff(desired, currentColumns, currentIndexes) {
395
+ const ops = []
396
+ const desiredColumns = desired.columns || {}
397
+ const desiredIndexes = desired.indexes || []
398
+ const currentColNames = Object.keys(currentColumns)
399
+
400
+ // --- Column additions ---
401
+ for (const [colName, colDef] of Object.entries(desiredColumns)) {
402
+ if (colDef.type === 'timestamps') continue // Virtual type handled separately
403
+ if (!currentColumns[colName]) {
404
+ ops.push({type: 'add_column', column: colName, definition: colDef})
405
+ }
406
+ }
407
+
408
+ // Handle timestamps virtual type
409
+ if (this._hasTimestamps(desiredColumns)) {
410
+ if (!currentColumns['created_at']) {
411
+ ops.push({type: 'add_column', column: 'created_at', definition: {type: 'timestamp'}})
412
+ }
413
+ if (!currentColumns['updated_at']) {
414
+ ops.push({type: 'add_column', column: 'updated_at', definition: {type: 'timestamp'}})
415
+ }
416
+ }
417
+
418
+ // --- Column removals ---
419
+ const desiredColNames = new Set()
420
+ for (const [colName, colDef] of Object.entries(desiredColumns)) {
421
+ if (colDef.type === 'timestamps') {
422
+ desiredColNames.add('created_at')
423
+ desiredColNames.add('updated_at')
424
+ } else {
425
+ desiredColNames.add(colName)
426
+ }
427
+ }
428
+
429
+ for (const colName of currentColNames) {
430
+ if (!desiredColNames.has(colName)) {
431
+ ops.push({type: 'drop_column', column: colName})
432
+ }
433
+ }
434
+
435
+ // --- Column modifications ---
436
+ for (const [colName, colDef] of Object.entries(desiredColumns)) {
437
+ if (colDef.type === 'timestamps' || colDef.type === 'increments') continue
438
+ if (!currentColumns[colName]) continue // New column, handled above
439
+
440
+ if (this._columnNeedsAlter(colDef, currentColumns[colName])) {
441
+ ops.push({type: 'alter_column', column: colName, definition: colDef})
442
+ }
443
+ }
444
+
445
+ // --- Index synchronization ---
446
+ const desiredIndexSignatures = new Set(desiredIndexes.map(idx => this._indexSignature(idx)))
447
+ const currentIndexSignatures = new Set(currentIndexes.map(idx => this._indexSignature(idx)))
448
+
449
+ // Indexes to add
450
+ for (const idx of desiredIndexes) {
451
+ const sig = this._indexSignature(idx)
452
+ if (!currentIndexSignatures.has(sig)) {
453
+ ops.push({type: 'add_index', index: idx})
454
+ }
455
+ }
456
+
457
+ // Indexes to drop
458
+ for (const idx of currentIndexes) {
459
+ const sig = this._indexSignature(idx)
460
+ if (!desiredIndexSignatures.has(sig)) {
461
+ ops.push({type: 'drop_index', index: idx})
462
+ }
463
+ }
464
+
465
+ return ops
466
+ }
467
+
468
+ /**
469
+ * Checks if a column definition differs from the current DB metadata enough to warrant ALTER.
470
+ * Conservative: only alters when there is a clear type or constraint mismatch.
471
+ * @param {object} desired - Column definition from schema file
472
+ * @param {object} current - Column metadata from introspection
473
+ * @returns {boolean}
474
+ */
475
+ _columnNeedsAlter(desired, current) {
476
+ // Nullable mismatch
477
+ if (desired.nullable === false && current.nullable === true) return true
478
+ if (desired.nullable === true && current.nullable === false) return true
479
+
480
+ // Length mismatch for string types — use Number() coercion since some
481
+ // drivers (SQLite) return maxLength as a string, e.g. '100' vs 100.
482
+ if (desired.length && current.maxLength && Number(desired.length) !== Number(current.maxLength)) return true
483
+
484
+ return false
485
+ }
486
+
487
+ /**
488
+ * Generates a deterministic signature for an index to enable set comparison.
489
+ * @param {object} idx - Index definition {columns, unique}
490
+ * @returns {string} Canonical signature string
491
+ */
492
+ _indexSignature(idx) {
493
+ const cols = [...idx.columns].sort().join(',')
494
+ return `${idx.unique ? 'U' : 'I'}:${cols}`
495
+ }
496
+
497
+ /**
498
+ * Checks if the desired columns include a 'timestamps' virtual type.
499
+ * @param {object} columns - Desired column definitions
500
+ * @returns {boolean}
501
+ */
502
+ _hasTimestamps(columns) {
503
+ for (const colDef of Object.values(columns)) {
504
+ if (colDef.type === 'timestamps') return true
505
+ }
506
+ return false
507
+ }
508
+
509
+ // ---------------------------------------------------------------------------
510
+ // APPLY CHANGES — Execute DDL operations
511
+ // ---------------------------------------------------------------------------
512
+
513
+ /**
514
+ * Creates a new table from a schema definition.
515
+ * @param {object} knex - Knex connection instance
516
+ * @param {string} tableName - Table name
517
+ * @param {object} schema - Full schema definition
518
+ */
519
+ async _createTable(knex, tableName, schema) {
520
+ await knex.schema.createTable(tableName, table => {
521
+ this._buildColumns(table, schema.columns)
522
+ this._buildIndexes(table, schema.indexes)
523
+ })
524
+ }
525
+
526
+ /**
527
+ * Applies a list of diff operations to an existing table.
528
+ * Why split into two phases: Knex wraps all alterTable operations into a single
529
+ * statement batch. If one index DDL fails (e.g. "already exists" due to introspection
530
+ * gaps across PG versions), the entire batch — including column changes — is aborted.
531
+ * Phase 1 handles column ops in a single alterTable. Phase 2 handles index ops
532
+ * individually with idempotent error handling so duplicate/missing index errors
533
+ * never crash the migration pipeline.
534
+ * @param {object} knex - Knex connection instance
535
+ * @param {string} tableName - Table name
536
+ * @param {Array} diff - List of operations from _computeDiff
537
+ */
538
+ async _applyDiff(knex, tableName, diff) {
539
+ const columnOps = diff.filter(op => op.type === 'add_column' || op.type === 'drop_column' || op.type === 'alter_column')
540
+ const indexOps = diff.filter(op => op.type === 'add_index' || op.type === 'drop_index')
541
+
542
+ // Phase 1: Column operations — atomic batch
543
+ if (columnOps.length > 0) {
544
+ await knex.schema.alterTable(tableName, table => {
545
+ for (const op of columnOps) {
546
+ switch (op.type) {
547
+ case 'add_column':
548
+ this._addColumn(table, op.column, op.definition)
549
+ break
550
+ case 'drop_column':
551
+ table.dropColumn(op.column)
552
+ break
553
+ case 'alter_column':
554
+ this._alterColumn(table, op.column, op.definition)
555
+ break
556
+ }
557
+ }
558
+ })
559
+ }
560
+
561
+ // Phase 2: Index operations — each applied individually for idempotent safety
562
+ for (const op of indexOps) {
563
+ await this._applyIndexOp(knex, tableName, op)
564
+ }
565
+ }
566
+
567
+ /**
568
+ * Why: PostgreSQL introspection can miss existing constraints across PG versions
569
+ * (int2vector cast edge cases, search_path mismatches, expression indexes).
570
+ * Rather than silently crashing the entire migration, we catch "already exists"
571
+ * (42P07) and "does not exist" (42704/3F000) errors that indicate the DB is
572
+ * already in the desired state.
573
+ * @param {object} knex - Knex connection instance
574
+ * @param {string} tableName - Table name
575
+ * @param {object} op - Single index diff operation
576
+ */
577
+ async _applyIndexOp(knex, tableName, op) {
578
+ try {
579
+ if (op.type === 'add_index') {
580
+ await knex.schema.alterTable(tableName, table => {
581
+ if (op.index.unique) {
582
+ table.unique(op.index.columns)
583
+ } else {
584
+ table.index(op.index.columns)
585
+ }
586
+ })
587
+ } else if (op.type === 'drop_index') {
588
+ await knex.schema.alterTable(tableName, table => {
589
+ if (op.index.unique) {
590
+ table.dropUnique(op.index.columns)
591
+ } else {
592
+ table.dropIndex(op.index.columns)
593
+ }
594
+ })
595
+ }
596
+ } catch (e) {
597
+ const isDuplicate = e.code === '42P07' || e.code === 'ER_DUP_KEYNAME' || (e.message && e.message.includes('already exists'))
598
+ const isNotFound = e.code === '42704' || e.code === '3F000' || (e.message && e.message.includes('does not exist'))
599
+
600
+ if ((op.type === 'add_index' && isDuplicate) || (op.type === 'drop_index' && isNotFound)) {
601
+ // DB is already in the desired state — safe no-op
602
+ return
603
+ }
604
+
605
+ throw e
606
+ }
607
+ }
608
+
609
+ /**
610
+ * Translates schema column definitions into Knex schema builder calls.
611
+ * Supports all common column types with their modifiers.
612
+ * @param {object} table - Knex TableBuilder instance
613
+ * @param {object} columns - Column definition map
614
+ */
615
+ _buildColumns(table, columns) {
616
+ if (!columns) return
617
+
618
+ for (const [colName, def] of Object.entries(columns)) {
619
+ if (def.type === 'timestamps') {
620
+ table.timestamps(true, true)
621
+ continue
622
+ }
623
+
624
+ const col = this._createColumnBuilder(table, colName, def)
625
+ if (!col) continue
626
+
627
+ if (def.nullable === false) col.notNullable()
628
+ else if (def.nullable === true) col.nullable()
629
+
630
+ if (def.default !== undefined) col.defaultTo(def.default)
631
+ if (def.unsigned) col.unsigned()
632
+ // Column-level unique is handled via _normalizeSchema → _buildIndexes.
633
+ // Applying it here as well would create duplicate constraints.
634
+ if (def.primary) col.primary()
635
+ if (def.references) col.references(def.references.column).inTable(def.references.table)
636
+ if (def.onDelete) col.onDelete(def.onDelete)
637
+ if (def.onUpdate) col.onUpdate(def.onUpdate)
638
+ if (def.comment) col.comment(def.comment)
639
+ }
640
+ }
641
+
642
+ /**
643
+ * Creates a Knex column builder call for a given type.
644
+ * @param {object} table - Knex TableBuilder
645
+ * @param {string} colName - Column name
646
+ * @param {object} def - Column definition
647
+ * @returns {object|null} Knex ColumnBuilder or null
648
+ */
649
+ _createColumnBuilder(table, colName, def) {
650
+ switch (def.type) {
651
+ case 'increments':
652
+ return table.increments(colName)
653
+ case 'bigIncrements':
654
+ return table.bigIncrements(colName)
655
+ case 'integer':
656
+ return table.integer(colName)
657
+ case 'bigInteger':
658
+ return table.bigInteger(colName)
659
+ case 'float':
660
+ return table.float(colName, def.precision, def.scale)
661
+ case 'decimal':
662
+ return table.decimal(colName, def.precision || 10, def.scale || 2)
663
+ case 'string':
664
+ return table.string(colName, def.length || 255)
665
+ case 'text':
666
+ return table.text(colName, def.textType || 'text')
667
+ case 'boolean':
668
+ return table.boolean(colName)
669
+ case 'date':
670
+ return table.date(colName)
671
+ case 'datetime':
672
+ return table.datetime(colName)
673
+ case 'timestamp':
674
+ return table.timestamp(colName)
675
+ case 'time':
676
+ return table.time(colName)
677
+ case 'binary':
678
+ return table.binary(colName, def.length)
679
+ case 'json':
680
+ return table.json(colName)
681
+ case 'jsonb':
682
+ return table.jsonb(colName)
683
+ case 'nanoid':
684
+ return table.string(colName, def.length || 21)
685
+ case 'uuid':
686
+ return table.uuid(colName)
687
+ case 'enum':
688
+ return table.enum(colName, def.values || [])
689
+ default:
690
+ return table.specificType(colName, def.type)
691
+ }
692
+ }
693
+
694
+ /**
695
+ * Builds index definitions on a table during creation.
696
+ * @param {object} table - Knex TableBuilder
697
+ * @param {Array} indexes - Index definition array
698
+ */
699
+ _buildIndexes(table, indexes) {
700
+ if (!indexes || !Array.isArray(indexes)) return
701
+
702
+ for (const idx of indexes) {
703
+ if (idx.unique) {
704
+ table.unique(idx.columns)
705
+ } else {
706
+ table.index(idx.columns)
707
+ }
708
+ }
709
+ }
710
+
711
+ /**
712
+ * Adds a single column to an existing table via ALTER.
713
+ * @param {object} table - Knex TableBuilder (alter context)
714
+ * @param {string} colName - Column name
715
+ * @param {object} def - Column definition
716
+ */
717
+ _addColumn(table, colName, def) {
718
+ const col = this._createColumnBuilder(table, colName, def)
719
+ if (!col) return
720
+
721
+ if (def.nullable === false) col.notNullable()
722
+ else col.nullable()
723
+
724
+ if (def.default !== undefined) col.defaultTo(def.default)
725
+ if (def.unsigned) col.unsigned()
726
+ if (def.references) col.references(def.references.column).inTable(def.references.table)
727
+ if (def.onDelete) col.onDelete(def.onDelete)
728
+ if (def.onUpdate) col.onUpdate(def.onUpdate)
729
+ }
730
+
731
+ /**
732
+ * Alters an existing column to match the desired definition.
733
+ * @param {object} table - Knex TableBuilder (alter context)
734
+ * @param {string} colName - Column name
735
+ * @param {object} def - Column definition
736
+ */
737
+ _alterColumn(table, colName, def) {
738
+ const col = this._createColumnBuilder(table, colName, def)
739
+ if (!col) return
740
+
741
+ if (def.nullable === false) col.notNullable()
742
+ else if (def.nullable === true) col.nullable()
743
+
744
+ if (def.default !== undefined) col.defaultTo(def.default)
745
+
746
+ col.alter()
747
+ }
748
+
749
+ // ---------------------------------------------------------------------------
750
+ // IMPERATIVE MIGRATION FILES
751
+ // ---------------------------------------------------------------------------
752
+
753
+ /**
754
+ * Runs pending imperative migration files (developer-written data migrations).
755
+ * @param {object} knex - Knex connection instance
756
+ * @param {string} connectionKey - Connection identifier
757
+ * @param {boolean} dryRun - If true, only list pending files
758
+ * @returns {Promise<Array>} Applied migration file names
759
+ */
760
+ async _applyMigrationFiles(knex, connectionKey, dryRun) {
761
+ const migrationFiles = this._loadMigrationFiles(connectionKey)
762
+ if (migrationFiles.length === 0) return []
763
+
764
+ const applied = await knex(this.trackingTable).where('connection', connectionKey).where('type', 'file').select('name')
765
+
766
+ const appliedNames = new Set(applied.map(r => r.name))
767
+ const pending = migrationFiles.filter(f => !appliedNames.has(f.name))
768
+
769
+ if (dryRun) {
770
+ return pending.map(f => ({type: 'pending_file', name: f.name}))
771
+ }
772
+
773
+ // Determine the next batch number
774
+ const lastBatch = await knex(this.trackingTable).where('connection', connectionKey).max('batch as maxBatch').first()
775
+ const batch = (lastBatch?.maxBatch || 0) + 1
776
+
777
+ const results = []
778
+
779
+ for (const file of pending) {
780
+ const migration = this._requireSchema(file.path)
781
+
782
+ if (typeof migration.up !== 'function') {
783
+ throw new Error(`ODAC Migration: File '${file.name}' is missing an 'up' function.`)
784
+ }
785
+
786
+ await migration.up(knex)
787
+
788
+ await knex(this.trackingTable).insert({
789
+ name: file.name,
790
+ connection: connectionKey,
791
+ type: 'file',
792
+ batch,
793
+ applied_at: new Date()
794
+ })
795
+
796
+ results.push({type: 'applied_file', name: file.name})
797
+ }
798
+
799
+ return results
800
+ }
801
+
802
+ /**
803
+ * Loads imperative migration files sorted by filename (timestamp order).
804
+ * @param {string} connectionKey - Connection identifier
805
+ * @returns {Array<{name: string, path: string}>} Sorted migration file descriptors
806
+ */
807
+ _loadMigrationFiles(connectionKey) {
808
+ let dir
809
+
810
+ if (connectionKey === 'default') {
811
+ dir = this.migrationDir
812
+ } else {
813
+ dir = path.join(this.migrationDir, connectionKey)
814
+ }
815
+
816
+ if (!fs.existsSync(dir)) return []
817
+
818
+ return fs
819
+ .readdirSync(dir)
820
+ .filter(f => f.endsWith('.js') && !fs.statSync(path.join(dir, f)).isDirectory())
821
+ .sort()
822
+ .map(f => ({name: f, path: path.join(dir, f)}))
823
+ }
824
+
825
+ /**
826
+ * Rolls back the last batch of imperative migration files.
827
+ * @param {object} knex - Knex connection instance
828
+ * @param {string} connectionKey - Connection identifier
829
+ * @returns {Promise<Array>} Rolled-back migration names
830
+ */
831
+ async _rollbackLastBatch(knex, connectionKey) {
832
+ const lastBatch = await knex(this.trackingTable)
833
+ .where('connection', connectionKey)
834
+ .where('type', 'file')
835
+ .max('batch as maxBatch')
836
+ .first()
837
+
838
+ if (!lastBatch?.maxBatch) return []
839
+
840
+ const migrations = await knex(this.trackingTable)
841
+ .where('connection', connectionKey)
842
+ .where('type', 'file')
843
+ .where('batch', lastBatch.maxBatch)
844
+ .orderBy('name', 'desc')
845
+ .select('name')
846
+
847
+ const results = []
848
+
849
+ for (const row of migrations) {
850
+ const filePath = this._resolveMigrationFilePath(connectionKey, row.name)
851
+ if (!filePath) continue
852
+
853
+ const migration = this._requireSchema(filePath)
854
+
855
+ if (typeof migration.down === 'function') {
856
+ await migration.down(knex)
857
+ }
858
+
859
+ await knex(this.trackingTable).where('connection', connectionKey).where('name', row.name).where('type', 'file').del()
860
+
861
+ results.push({type: 'rolled_back', name: row.name})
862
+ }
863
+
864
+ return results
865
+ }
866
+
867
+ /**
868
+ * Resolves the absolute file path for a migration file by name.
869
+ * @param {string} connectionKey - Connection identifier
870
+ * @param {string} name - Migration file name (e.g. '20260225_001_auto.js')
871
+ * @returns {string|null} Absolute path or null if not found
872
+ */
873
+ _resolveMigrationFilePath(connectionKey, name) {
874
+ const dir = connectionKey === 'default' ? this.migrationDir : path.join(this.migrationDir, connectionKey)
875
+
876
+ const filePath = path.join(dir, name)
877
+ return fs.existsSync(filePath) ? filePath : null
878
+ }
879
+
880
+ // ---------------------------------------------------------------------------
881
+ // SEED DATA
882
+ // ---------------------------------------------------------------------------
883
+
884
+ /**
885
+ * Applies seed data from schema definitions using idempotent upsert logic.
886
+ * @param {object} knex - Knex connection instance
887
+ * @param {string} connectionKey - Connection identifier
888
+ * @param {boolean} dryRun - If true, only list pending seeds
889
+ * @returns {Promise<Array>} Seed operation results
890
+ */
891
+ async _applySeeds(knex, connectionKey, dryRun) {
892
+ const schemas = this._loadSchemaFiles(connectionKey)
893
+ const results = []
894
+
895
+ for (const [tableName, schema] of Object.entries(schemas)) {
896
+ if (!schema.seed || !Array.isArray(schema.seed) || schema.seed.length === 0) continue
897
+
898
+ const seedKey = schema.seedKey
899
+
900
+ if (!seedKey) {
901
+ throw new Error(`ODAC Migration: Schema '${tableName}' has seed data but no seedKey defined.`)
902
+ }
903
+
904
+ for (const row of schema.seed) {
905
+ const keyValue = row[seedKey]
906
+ if (keyValue === undefined) continue
907
+
908
+ const preparedRow = this._prepareSeedRow(row, schema)
909
+ const existing = await knex(tableName).where(seedKey, keyValue).first()
910
+
911
+ if (!existing) {
912
+ // Auto-generate nanoid for columns with type 'nanoid' that are missing from seed data
913
+ this._fillNanoidColumns(preparedRow, schema)
914
+
915
+ if (!dryRun) {
916
+ await knex(tableName).insert(preparedRow)
917
+ }
918
+ results.push({type: 'seed_insert', table: tableName, key: keyValue})
919
+ } else {
920
+ const needsUpdate = this._seedRowNeedsUpdate(row, existing, seedKey)
921
+
922
+ if (needsUpdate) {
923
+ if (!dryRun) {
924
+ await knex(tableName).where(seedKey, keyValue).update(preparedRow)
925
+ }
926
+ results.push({type: 'seed_update', table: tableName, key: keyValue})
927
+ }
928
+ }
929
+ }
930
+ }
931
+
932
+ return results
933
+ }
934
+
935
+ /**
936
+ * Prepares a seed row for insertion/updating by stringifying JSON columns.
937
+ * Why: Knex/pg driver converts JavaScript arrays to PostgreSQL array literals (e.g. {1,2,3})
938
+ * instead of JSON arrays (e.g. [1,2,3]). This causes "invalid input syntax for type json"
939
+ * when seeding JSONB columns with arrays. Stringifying them explicitly fixes this.
940
+ * @param {object} row - Raw seed row
941
+ * @param {object} schema - Table schema definition
942
+ * @returns {object} Prepared row
943
+ */
944
+ _prepareSeedRow(row, schema) {
945
+ const prepared = {...row}
946
+ const columns = schema.columns || {}
947
+
948
+ for (const [key, value] of Object.entries(prepared)) {
949
+ const colDef = columns[key]
950
+ if (colDef && (colDef.type === 'json' || colDef.type === 'jsonb')) {
951
+ if (value !== null && typeof value !== 'string') {
952
+ prepared[key] = JSON.stringify(value)
953
+ }
954
+ }
955
+ }
956
+
957
+ return prepared
958
+ }
959
+
960
+ /**
961
+ * Why: The previous `String()` coercion broke for JSON/JSONB columns in two ways:
962
+ * 1. `String({})` produces "[object Object]" — useless for deep comparison.
963
+ * 2. PG may return parsed objects while seeds hold raw objects — identical data
964
+ * compared as different → false-positive UPDATE → Knex double-stringifies
965
+ * the already-serialized JSON → PG throws "invalid input syntax for type json".
966
+ *
967
+ * This method normalizes both sides to canonical JSON before comparing, which
968
+ * handles: objects, arrays, numbers-as-strings (SQLite), null vs undefined,
969
+ * and Date objects.
970
+ * @param {object} seedRow - Desired seed row from schema file
971
+ * @param {object} existingRow - Current row from DB
972
+ * @param {string} seedKey - The key column to skip during comparison
973
+ * @returns {boolean} True if the DB row needs updating
974
+ */
975
+ _seedRowNeedsUpdate(seedRow, existingRow, seedKey) {
976
+ for (const key of Object.keys(seedRow)) {
977
+ if (key === seedKey) continue
978
+
979
+ const desired = seedRow[key]
980
+ const current = existingRow[key]
981
+
982
+ // Both nullish — no change
983
+ if (desired == null && current == null) continue
984
+
985
+ // One nullish, other not — changed
986
+ if (desired == null || current == null) return true
987
+
988
+ // Both primitives — numeric-safe loose comparison
989
+ if (typeof desired !== 'object' && typeof current !== 'object') {
990
+ if (String(desired) !== String(current)) return true
991
+ continue
992
+ }
993
+
994
+ // At least one side is an object/array — canonical JSON comparison
995
+ const desiredJson = typeof desired === 'string' ? desired : JSON.stringify(desired)
996
+ const currentJson = typeof current === 'string' ? current : JSON.stringify(current)
997
+
998
+ if (desiredJson !== currentJson) return true
999
+ }
1000
+
1001
+ return false
1002
+ }
1003
+
1004
+ // ---------------------------------------------------------------------------
1005
+ // SNAPSHOT — Reverse-engineer DB into schema files
1006
+ // ---------------------------------------------------------------------------
1007
+
1008
+ /**
1009
+ * Reads the current database structure and generates schema/ files.
1010
+ * @param {object} knex - Knex connection instance
1011
+ * @param {string} connectionKey - Connection identifier
1012
+ * @returns {Promise<Array>} Generated file paths
1013
+ */
1014
+ async _snapshotDatabase(knex, connectionKey) {
1015
+ const tables = await this._listTables(knex)
1016
+ const generatedFiles = []
1017
+ const targetDir = connectionKey === 'default' ? this.schemaDir : path.join(this.schemaDir, connectionKey)
1018
+
1019
+ if (!fs.existsSync(targetDir)) {
1020
+ fs.mkdirSync(targetDir, {recursive: true})
1021
+ }
1022
+
1023
+ for (const tableName of tables) {
1024
+ if (tableName === this.trackingTable) continue
1025
+
1026
+ const columns = await this._introspectColumns(knex, tableName)
1027
+ const indexes = await this._introspectIndexes(knex, tableName)
1028
+ const schemaContent = this._generateSchemaFileContent(tableName, columns, indexes)
1029
+ const safeFileStem = this._toSafeFileStem(tableName)
1030
+ const filePath = path.resolve(targetDir, `${safeFileStem}.js`)
1031
+ const targetRoot = path.resolve(targetDir) + path.sep
1032
+
1033
+ if (!filePath.startsWith(targetRoot)) {
1034
+ throw new Error(`ODAC Migration: Unsafe snapshot path generated for table '${tableName}'.`)
1035
+ }
1036
+
1037
+ fs.writeFileSync(filePath, schemaContent, 'utf8')
1038
+ generatedFiles.push(filePath)
1039
+ }
1040
+
1041
+ return generatedFiles
1042
+ }
1043
+
1044
+ /**
1045
+ * Lists all user tables in the current database (excluding system tables).
1046
+ * @param {object} knex - Knex connection
1047
+ * @returns {Promise<string[]>} Table name list
1048
+ */
1049
+ async _listTables(knex) {
1050
+ const client = knex.client.config.client
1051
+
1052
+ if (client === 'mysql2' || client === 'mysql') {
1053
+ const [rows] = await knex.raw('SHOW TABLES')
1054
+ return rows.map(row => Object.values(row)[0])
1055
+ } else if (client === 'pg') {
1056
+ const result = await knex.raw("SELECT tablename FROM pg_tables WHERE schemaname = 'public'")
1057
+ return result.rows.map(r => r.tablename)
1058
+ } else if (client === 'sqlite3') {
1059
+ const rows = await knex.raw("SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'")
1060
+ return rows.map(r => r.name)
1061
+ }
1062
+
1063
+ return []
1064
+ }
1065
+
1066
+ /**
1067
+ * Generates a human-readable schema file from introspected metadata.
1068
+ * @param {string} tableName - Table name
1069
+ * @param {object} columns - Introspected column map
1070
+ * @param {Array} indexes - Introspected index list
1071
+ * @returns {string} JavaScript module source code
1072
+ */
1073
+ _generateSchemaFileContent(tableName, columns, indexes) {
1074
+ const lines = []
1075
+ const safeTableLabel = this._toJsLiteral(String(tableName))
1076
+ lines.push(`// Schema definition for ${safeTableLabel} — auto-generated by ODAC snapshot`)
1077
+ lines.push(`// Review and adjust types/constraints as needed before using as source of truth.`)
1078
+ lines.push(`'use strict'`)
1079
+ lines.push('')
1080
+ lines.push('module.exports = {')
1081
+ lines.push(' columns: {')
1082
+
1083
+ const colEntries = Object.entries(columns)
1084
+ for (let i = 0; i < colEntries.length; i++) {
1085
+ const [colName, meta] = colEntries[i]
1086
+ const parts = []
1087
+
1088
+ const mappedType = this._reverseMapType(meta.type)
1089
+ parts.push(`type: ${this._toJsLiteral(mappedType)}`)
1090
+
1091
+ if (meta.maxLength) {
1092
+ const parsedLength = Number(meta.maxLength)
1093
+ if (Number.isFinite(parsedLength) && parsedLength > 0) {
1094
+ parts.push(`length: ${Math.trunc(parsedLength)}`)
1095
+ }
1096
+ }
1097
+ if (meta.nullable === false) parts.push('nullable: false')
1098
+ if (meta.defaultValue !== null && meta.defaultValue !== undefined) {
1099
+ parts.push(`default: ${this._toJsLiteral(meta.defaultValue)}`)
1100
+ }
1101
+
1102
+ const comma = i < colEntries.length - 1 ? ',' : ''
1103
+ lines.push(` ${this._toObjectKey(colName)}: {${parts.join(', ')}}${comma}`)
1104
+ }
1105
+
1106
+ lines.push(' },')
1107
+ lines.push('')
1108
+
1109
+ if (indexes.length > 0) {
1110
+ lines.push(' indexes: [')
1111
+ for (let i = 0; i < indexes.length; i++) {
1112
+ const idx = indexes[i]
1113
+ const colsStr = idx.columns.map(c => this._toJsLiteral(String(c))).join(', ')
1114
+ const uniqueStr = idx.unique ? ', unique: true' : ''
1115
+ const comma = i < indexes.length - 1 ? ',' : ''
1116
+ lines.push(` {columns: [${colsStr}]${uniqueStr}}${comma}`)
1117
+ }
1118
+ lines.push(' ]')
1119
+ } else {
1120
+ lines.push(' indexes: []')
1121
+ }
1122
+
1123
+ lines.push('}')
1124
+ lines.push('')
1125
+
1126
+ return lines.join('\n')
1127
+ }
1128
+
1129
+ _toJsLiteral(value) {
1130
+ if (typeof value === 'bigint') return `${value}n`
1131
+ return JSON.stringify(value)
1132
+ }
1133
+
1134
+ _toObjectKey(key) {
1135
+ const normalized = String(key)
1136
+ if (/^[A-Za-z_$][A-Za-z0-9_$]*$/.test(normalized)) return normalized
1137
+ return this._toJsLiteral(normalized)
1138
+ }
1139
+
1140
+ _toSafeFileStem(name) {
1141
+ const normalized = String(name)
1142
+ .normalize('NFKC')
1143
+ .replace(/[\\/\0]/g, '_')
1144
+ .replace(/\.+/g, '.')
1145
+ .replace(/[^A-Za-z0-9._-]/g, '_')
1146
+ .replace(/^\.+/, '')
1147
+ .trim()
1148
+
1149
+ return normalized.length > 0 ? normalized : 'table'
1150
+ }
1151
+
1152
+ _quoteSQLiteIdentifier(value) {
1153
+ const normalized = String(value)
1154
+ return `"${normalized.replace(/"/g, '""')}"`
1155
+ }
1156
+
1157
+ /**
1158
+ * Maps raw database type strings back to ODAC schema type names.
1159
+ * @param {string} rawType - Database-reported type string
1160
+ * @returns {string} ODAC schema type
1161
+ */
1162
+ _reverseMapType(rawType) {
1163
+ if (!rawType) return 'string'
1164
+ const t = rawType.toLowerCase()
1165
+
1166
+ if (t.includes('int') && t.includes('auto')) return 'increments'
1167
+ if (t === 'bigint') return 'bigInteger'
1168
+ if (t.includes('int')) return 'integer'
1169
+ if (t.includes('varchar') || t.includes('character varying')) return 'string'
1170
+ if (t === 'text' || t === 'mediumtext' || t === 'longtext') return 'text'
1171
+ if (t === 'boolean' || t === 'tinyint(1)') return 'boolean'
1172
+ if (t === 'date') return 'date'
1173
+ if (t.includes('datetime')) return 'datetime'
1174
+ if (t.includes('timestamp')) return 'timestamp'
1175
+ if (t === 'time') return 'time'
1176
+ if (t.includes('decimal') || t.includes('numeric')) return 'decimal'
1177
+ if (t.includes('float') || t.includes('double') || t.includes('real')) return 'float'
1178
+ if (t === 'json' || t === 'jsonb') return t
1179
+ if (t === 'uuid') return 'uuid'
1180
+ if (t.includes('blob') || t.includes('binary') || t.includes('bytea')) return 'binary'
1181
+ if (t.includes('enum')) return 'enum'
1182
+
1183
+ return 'string'
1184
+ }
1185
+
1186
+ // ---------------------------------------------------------------------------
1187
+ // TRACKING TABLE
1188
+ // ---------------------------------------------------------------------------
1189
+
1190
+ /**
1191
+ * Ensures the migration tracking table exists in the given connection.
1192
+ * @param {object} knex - Knex connection instance
1193
+ */
1194
+ async _ensureTrackingTable(knex) {
1195
+ const exists = await knex.schema.hasTable(this.trackingTable)
1196
+ if (exists) return
1197
+
1198
+ await knex.schema.createTable(this.trackingTable, table => {
1199
+ table.increments('id')
1200
+ table.string('name').notNullable()
1201
+ table.string('connection').notNullable()
1202
+ table.string('type').notNullable() // 'file' or 'schema'
1203
+ table.integer('batch').notNullable()
1204
+ table.timestamp('applied_at').defaultTo(knex.fn.now())
1205
+ table.index(['connection', 'type'])
1206
+ })
1207
+ }
1208
+
1209
+ /**
1210
+ * Populates missing nanoid columns in a data row before insertion.
1211
+ * Why: Zero-config DX — developers should not manually call nanoid() for every insert.
1212
+ * When a schema defines a column as type 'nanoid', the framework auto-generates
1213
+ * the value if the caller did not provide one.
1214
+ * @param {object} row - Data row to mutate in-place
1215
+ * @param {object} schema - Table schema definition
1216
+ */
1217
+ _fillNanoidColumns(row, schema) {
1218
+ const columns = schema.columns || {}
1219
+
1220
+ for (const [colName, colDef] of Object.entries(columns)) {
1221
+ if (colDef.type === 'nanoid' && !row[colName]) {
1222
+ row[colName] = nanoid(colDef.length || 21)
1223
+ }
1224
+ }
1225
+ }
1226
+ }
1227
+
1228
+ module.exports = new Migration()