@naturalcycles/db-lib 10.42.0 → 10.42.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/dist/adapter/file/file.db.d.ts +1 -1
  2. package/dist/adapter/file/file.db.js +1 -1
  3. package/dist/adapter/file/localFile.persistence.plugin.js +1 -1
  4. package/dist/cnst.js +2 -1
  5. package/dist/commondao/common.dao.d.ts +69 -7
  6. package/dist/commondao/common.dao.js +202 -66
  7. package/dist/commondao/common.dao.model.d.ts +6 -5
  8. package/dist/commondao/common.dao.model.js +2 -1
  9. package/dist/commondb/common.db.js +2 -1
  10. package/dist/db.model.js +4 -2
  11. package/dist/inmemory/inMemory.db.d.ts +1 -1
  12. package/dist/inmemory/inMemory.db.js +1 -1
  13. package/dist/inmemory/inMemoryKeyValueDB.d.ts +2 -2
  14. package/dist/kv/commonKeyValueDao.d.ts +1 -1
  15. package/dist/kv/commonKeyValueDao.js +7 -7
  16. package/dist/pipeline/dbPipelineBackup.js +1 -1
  17. package/dist/pipeline/dbPipelineCopy.js +2 -2
  18. package/dist/pipeline/dbPipelineRestore.d.ts +2 -1
  19. package/dist/pipeline/dbPipelineRestore.js +4 -4
  20. package/dist/testing/commonDBTest.js +3 -3
  21. package/dist/testing/commonDaoTest.js +4 -4
  22. package/dist/testing/test.model.d.ts +1 -1
  23. package/dist/timeseries/commonTimeSeriesDao.js +1 -1
  24. package/dist/validation/index.d.ts +0 -2
  25. package/dist/validation/index.js +2 -2
  26. package/package.json +2 -1
  27. package/src/adapter/file/file.db.ts +2 -5
  28. package/src/commondao/common.dao.model.ts +6 -5
  29. package/src/commondao/common.dao.ts +208 -68
  30. package/src/inmemory/inMemory.db.ts +2 -7
  31. package/src/kv/commonKeyValueDao.ts +2 -1
  32. package/src/pipeline/dbPipelineRestore.ts +4 -5
  33. package/src/testing/test.model.ts +2 -1
  34. package/src/validation/index.ts +4 -10
@@ -1,4 +1,4 @@
1
- import { type ObjectWithId } from '@naturalcycles/js-lib/types';
1
+ import type { ObjectWithId } from '@naturalcycles/js-lib/types';
2
2
  import type { JsonSchema } from '@naturalcycles/nodejs-lib/ajv';
3
3
  import { Pipeline } from '@naturalcycles/nodejs-lib/stream';
4
4
  import { BaseCommonDB } from '../../commondb/base.common.db.js';
@@ -3,7 +3,7 @@ import { _by, _sortBy } from '@naturalcycles/js-lib/array';
3
3
  import { _since, localTime } from '@naturalcycles/js-lib/datetime';
4
4
  import { _assert } from '@naturalcycles/js-lib/error/assert.js';
5
5
  import { _deepEquals, _filterUndefinedValues, _sortObjectDeep } from '@naturalcycles/js-lib/object';
6
- import { _stringMapValues, } from '@naturalcycles/js-lib/types';
6
+ import { _stringMapValues } from '@naturalcycles/js-lib/types';
7
7
  import { generateJsonSchemaFromData } from '@naturalcycles/nodejs-lib/ajv';
8
8
  import { dimGrey } from '@naturalcycles/nodejs-lib/colors';
9
9
  import { Pipeline } from '@naturalcycles/nodejs-lib/stream';
@@ -28,7 +28,7 @@ export class LocalFilePersistencePlugin {
28
28
  return await Pipeline.fromNDJsonFile(filePath).toArray();
29
29
  }
30
30
  async saveFiles(ops) {
31
- await pMap(ops, async (op) => await this.saveFile(op.table, op.rows), { concurrency: 32 });
31
+ await pMap(ops, async op => await this.saveFile(op.table, op.rows), { concurrency: 32 });
32
32
  }
33
33
  async saveFile(table, rows) {
34
34
  await fs2.ensureDirAsync(this.cfg.storagePath);
package/dist/cnst.js CHANGED
@@ -1,4 +1,5 @@
1
- export var DBLibError;
1
+ export { DBLibError };
2
+ var DBLibError;
2
3
  (function (DBLibError) {
3
4
  DBLibError["DB_ROW_REQUIRED"] = "DB_ROW_REQUIRED";
4
5
  DBLibError["DAO_IS_READ_ONLY"] = "DAO_IS_READ_ONLY";
@@ -1,4 +1,4 @@
1
- import { type BaseDBEntity, type NonNegativeInteger, type StringMap, type Unsaved } from '@naturalcycles/js-lib/types';
1
+ import type { BaseDBEntity, NonNegativeInteger, ObjectWithId, StringMap, Unsaved } from '@naturalcycles/js-lib/types';
2
2
  import type { JsonSchema } from '@naturalcycles/nodejs-lib/ajv';
3
3
  import type { Pipeline } from '@naturalcycles/nodejs-lib/stream';
4
4
  import type { CommonDBTransactionOptions, RunQueryResult } from '../db.model.js';
@@ -9,9 +9,12 @@ import { CommonDaoTransaction } from './commonDaoTransaction.js';
9
9
  /**
10
10
  * Lowest common denominator API between supported Databases.
11
11
  *
12
- * DBM = Database model (how it's stored in DB)
13
12
  * BM = Backend model (optimized for API access)
13
+ * DBM = Database model (logical representation, before compression)
14
14
  * TM = Transport model (optimized to be sent over the wire)
15
+ *
16
+ * Note: When auto-compression is enabled, the physical storage format differs from DBM.
17
+ * Compression/decompression is handled transparently at the storage boundary.
15
18
  */
16
19
  export declare class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM, ID extends string = BM['id']> {
17
20
  cfg: CommonDaoCfg<BM, DBM, ID>;
@@ -46,7 +49,6 @@ export declare class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity
46
49
  runQueryCount(q: DBQuery<DBM>, opt?: CommonDaoReadOptions): Promise<number>;
47
50
  streamQueryAsDBM(q: DBQuery<DBM>, opt?: CommonDaoStreamOptions<DBM>): Pipeline<DBM>;
48
51
  streamQuery(q: DBQuery<DBM>, opt?: CommonDaoStreamOptions<BM>): Pipeline<BM>;
49
- private streamQueryRaw;
50
52
  queryIds(q: DBQuery<DBM>, opt?: CommonDaoReadOptions): Promise<ID[]>;
51
53
  streamQueryIds(q: DBQuery<DBM>, opt?: CommonDaoStreamOptions<ID>): Pipeline<ID>;
52
54
  /**
@@ -132,13 +134,36 @@ export declare class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity
132
134
  bmToDBM(bm?: BM, opt?: CommonDaoOptions): Promise<DBM>;
133
135
  bmsToDBM(bms: BM[], opt?: CommonDaoOptions): Promise<DBM[]>;
134
136
  /**
135
- * Mutates `dbm`.
137
+ * Converts a DBM to storage format, applying compression if configured.
138
+ *
139
+ * Use this when you need to write directly to the database, bypassing the DAO save methods.
140
+ * The returned value is opaque and should only be passed to db.saveBatch() or similar.
141
+ *
142
+ * @example
143
+ * const storageRow = await dao.dbmToStorageRow(dbm)
144
+ * await db.saveBatch(table, [storageRow])
145
+ */
146
+ dbmToStorageRow(dbm: DBM): Promise<ObjectWithId>;
147
+ /**
148
+ * Converts multiple DBMs to storage rows.
149
+ */
150
+ dbmsToStorageRows(dbms: DBM[]): Promise<ObjectWithId[]>;
151
+ /**
152
+ * Converts a storage row back to a DBM, applying decompression if needed.
153
+ *
154
+ * Use this when you need to read directly from the database, bypassing the DAO load methods.
155
+ *
156
+ * @example
157
+ * const rows = await db.getByIds(table, ids)
158
+ * const dbms = await Promise.all(rows.map(row => dao.storageRowToDBM(row)))
136
159
  */
137
- compress(dbm: DBM): Promise<void>;
160
+ storageRowToDBM(row: ObjectWithId): Promise<DBM>;
138
161
  /**
139
- * Mutates `dbm`.
162
+ * Converts multiple storage rows to DBMs.
140
163
  */
141
- decompress(dbm: DBM): Promise<void>;
164
+ storageRowsToDBMs(rows: ObjectWithId[]): Promise<DBM[]>;
165
+ private compress;
166
+ private decompress;
142
167
  anyToDBM(dbm: undefined, opt?: CommonDaoOptions): Promise<null>;
143
168
  anyToDBM(dbm?: any, opt?: CommonDaoOptions): Promise<DBM>;
144
169
  anyToDBMs(rows: DBM[], opt?: CommonDaoOptions): Promise<DBM[]>;
@@ -158,6 +183,43 @@ export declare class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity
158
183
  withIds(ids: ID[]): DaoWithIds<CommonDao<BM, DBM, ID>>;
159
184
  withRowsToSave(rows: Unsaved<BM>[]): DaoWithRows<CommonDao<BM, DBM, ID>>;
160
185
  withRowToSave(row: Unsaved<BM>, opt?: DaoWithRowOptions<BM>): DaoWithRow<CommonDao<BM, DBM, ID>>;
186
+ /**
187
+ * Helper to decompress legacy compressed data when migrating away from auto-compression.
188
+ * Use as your `beforeDBMToBM` hook to decompress legacy rows on read.
189
+ *
190
+ * @example
191
+ * const dao = new CommonDao({
192
+ * hooks: {
193
+ * beforeDBMToBM: CommonDao.decompressLegacyRow,
194
+ * }
195
+ * })
196
+ *
197
+ * // Or within an existing hook:
198
+ * beforeDBMToBM: async (dbm) => {
199
+ * await CommonDao.decompressLegacyRow(dbm)
200
+ * // ... other transformations
201
+ * return dbm
202
+ * }
203
+ */
204
+ static decompressLegacyRow<T extends ObjectWithId>(row: T): Promise<T>;
205
+ /**
206
+ * Temporary helper to migrate from the old `data` compressed property to the new `__compressed` property.
207
+ * Use as your `beforeDBMToBM` hook during the migration period.
208
+ *
209
+ * Migration steps:
210
+ * 1. Add `beforeDBMToBM: CommonDao.migrateCompressedDataProperty` to your hooks
211
+ * 2. Deploy - old data (with `data` property) will be decompressed on read and recompressed to `__compressed` on write
212
+ * 3. Once all data has been naturally rewritten, remove the hook
213
+ *
214
+ * @example
215
+ * const dao = new CommonDao({
216
+ * compress: { keys: ['field1', 'field2'] },
217
+ * hooks: {
218
+ * beforeDBMToBM: CommonDao.migrateCompressedDataProperty,
219
+ * }
220
+ * })
221
+ */
222
+ static migrateCompressedDataProperty<T extends ObjectWithId>(row: T): Promise<T>;
161
223
  /**
162
224
  * Load rows (by their ids) from Multiple tables at once.
163
225
  * An optimized way to load data, minimizing DB round-trips.
@@ -14,9 +14,12 @@ import { CommonDaoTransaction } from './commonDaoTransaction.js';
14
14
  /**
15
15
  * Lowest common denominator API between supported Databases.
16
16
  *
17
- * DBM = Database model (how it's stored in DB)
18
17
  * BM = Backend model (optimized for API access)
18
+ * DBM = Database model (logical representation, before compression)
19
19
  * TM = Transport model (optimized to be sent over the wire)
20
+ *
21
+ * Note: When auto-compression is enabled, the physical storage format differs from DBM.
22
+ * Compression/decompression is handled transparently at the storage boundary.
20
23
  */
21
24
  export class CommonDao {
22
25
  cfg;
@@ -44,6 +47,15 @@ export class CommonDao {
44
47
  else {
45
48
  delete this.cfg.hooks.createRandomId;
46
49
  }
50
+ // If the auto-compression is enabled,
51
+ // then we need to ensure that the '__compressed' property is part of the index exclusion list.
52
+ if (this.cfg.compress?.keys) {
53
+ const current = this.cfg.excludeFromIndexes;
54
+ this.cfg.excludeFromIndexes = current ? [...current] : [];
55
+ if (!this.cfg.excludeFromIndexes.includes('__compressed')) {
56
+ this.cfg.excludeFromIndexes.push('__compressed');
57
+ }
58
+ }
47
59
  }
48
60
  // CREATE
49
61
  create(part = {}, opt = {}) {
@@ -92,7 +104,8 @@ export class CommonDao {
92
104
  if (!ids.length)
93
105
  return [];
94
106
  const table = opt.table || this.cfg.table;
95
- return await (opt.tx || this.cfg.db).getByIds(table, ids, opt);
107
+ const rows = await (opt.tx || this.cfg.db).getByIds(table, ids, opt);
108
+ return await this.storageRowsToDBMs(rows);
96
109
  }
97
110
  async getBy(by, value, limit = 0, opt) {
98
111
  return await this.query().filterEq(by, value).limit(limit).runQuery(opt);
@@ -127,14 +140,15 @@ export class CommonDao {
127
140
  * Order is not guaranteed, as queries run in parallel.
128
141
  */
129
142
  async runUnionQueries(queries, opt) {
130
- const results = (await pMap(queries, async (q) => (await this.runQueryExtended(q, opt)).rows)).flat();
143
+ const results = (await pMap(queries, async q => (await this.runQueryExtended(q, opt)).rows)).flat();
131
144
  return _uniqBy(results, r => r.id);
132
145
  }
133
146
  async runQueryExtended(q, opt = {}) {
134
147
  this.validateQueryIndexes(q); // throws if query uses `excludeFromIndexes` property
135
148
  q.table = opt.table || q.table;
136
- const { rows, ...queryResult } = await this.cfg.db.runQuery(q, opt);
149
+ const { rows: rawRows, ...queryResult } = await this.cfg.db.runQuery(q, opt);
137
150
  const isPartialQuery = !!q._selectedFieldNames;
151
+ const rows = isPartialQuery ? rawRows : await this.storageRowsToDBMs(rawRows);
138
152
  const bms = isPartialQuery ? rows : await this.dbmsToBM(rows, opt);
139
153
  return {
140
154
  rows: bms,
@@ -148,8 +162,9 @@ export class CommonDao {
148
162
  async runQueryExtendedAsDBM(q, opt = {}) {
149
163
  this.validateQueryIndexes(q); // throws if query uses `excludeFromIndexes` property
150
164
  q.table = opt.table || q.table;
151
- const { rows, ...queryResult } = await this.cfg.db.runQuery(q, opt);
165
+ const { rows: rawRows, ...queryResult } = await this.cfg.db.runQuery(q, opt);
152
166
  const isPartialQuery = !!q._selectedFieldNames;
167
+ const rows = isPartialQuery ? rawRows : await this.storageRowsToDBMs(rawRows);
153
168
  const dbms = isPartialQuery ? rows : await this.anyToDBMs(rows, opt);
154
169
  return { rows: dbms, ...queryResult };
155
170
  }
@@ -159,27 +174,32 @@ export class CommonDao {
159
174
  return await this.cfg.db.runQueryCount(q, opt);
160
175
  }
161
176
  streamQueryAsDBM(q, opt = {}) {
162
- const pipeline = this.streamQueryRaw(q, opt);
177
+ this.validateQueryIndexes(q); // throws if query uses `excludeFromIndexes` property
178
+ q.table = opt.table || q.table;
179
+ let pipeline = this.cfg.db.streamQuery(q, opt);
180
+ if (this.cfg.compress?.keys.length) {
181
+ pipeline = pipeline.map(async row => await this.storageRowToDBM(row));
182
+ }
163
183
  const isPartialQuery = !!q._selectedFieldNames;
164
184
  if (isPartialQuery)
165
185
  return pipeline;
166
186
  opt.skipValidation ??= true;
167
187
  opt.errorMode ||= ErrorMode.SUPPRESS;
168
- return pipeline.map(async (dbm) => await this.anyToDBM(dbm, opt), { errorMode: opt.errorMode });
188
+ return pipeline.map(async dbm => await this.anyToDBM(dbm, opt), { errorMode: opt.errorMode });
169
189
  }
170
190
  streamQuery(q, opt = {}) {
171
- const pipeline = this.streamQueryRaw(q, opt);
191
+ this.validateQueryIndexes(q); // throws if query uses `excludeFromIndexes` property
192
+ q.table = opt.table || q.table;
193
+ let pipeline = this.cfg.db.streamQuery(q, opt);
194
+ if (this.cfg.compress?.keys.length) {
195
+ pipeline = pipeline.map(async row => await this.storageRowToDBM(row));
196
+ }
172
197
  const isPartialQuery = !!q._selectedFieldNames;
173
198
  if (isPartialQuery)
174
199
  return pipeline;
175
200
  opt.skipValidation ??= true;
176
201
  opt.errorMode ||= ErrorMode.SUPPRESS;
177
- return pipeline.map(async (dbm) => await this.dbmToBM(dbm, opt), { errorMode: opt.errorMode });
178
- }
179
- streamQueryRaw(q, opt = {}) {
180
- this.validateQueryIndexes(q); // throws if query uses `excludeFromIndexes` property
181
- q.table = opt.table || q.table;
182
- return this.cfg.db.streamQuery(q, opt);
202
+ return pipeline.map(async dbm => await this.dbmToBM(dbm, opt), { errorMode: opt.errorMode });
183
203
  }
184
204
  async queryIds(q, opt = {}) {
185
205
  this.validateQueryIndexes(q); // throws if query uses `excludeFromIndexes` property
@@ -254,7 +274,7 @@ export class CommonDao {
254
274
  * Like patchById, but runs all operations within a Transaction.
255
275
  */
256
276
  async patchByIdInTransaction(id, patch, opt) {
257
- return await this.runInTransaction(async (daoTx) => {
277
+ return await this.runInTransaction(async daoTx => {
258
278
  return await this.patchById(id, patch, { ...opt, tx: daoTx.tx });
259
279
  });
260
280
  }
@@ -306,7 +326,7 @@ export class CommonDao {
306
326
  * Like patch, but runs all operations within a Transaction.
307
327
  */
308
328
  async patchInTransaction(bm, patch, opt) {
309
- return await this.runInTransaction(async (daoTx) => {
329
+ return await this.runInTransaction(async daoTx => {
310
330
  return await this.patch(bm, patch, { ...opt, tx: daoTx.tx });
311
331
  });
312
332
  }
@@ -331,7 +351,8 @@ export class CommonDao {
331
351
  this.cfg.hooks.beforeSave?.(dbm);
332
352
  const table = opt.table || this.cfg.table;
333
353
  const saveOptions = this.prepareSaveOptions(opt);
334
- await (opt.tx || this.cfg.db).saveBatch(table, [dbm], saveOptions);
354
+ const row = await this.dbmToStorageRow(dbm);
355
+ await (opt.tx || this.cfg.db).saveBatch(table, [row], saveOptions);
335
356
  if (saveOptions.assignGeneratedIds) {
336
357
  bm.id = dbm.id;
337
358
  }
@@ -340,15 +361,16 @@ export class CommonDao {
340
361
  async saveAsDBM(dbm, opt = {}) {
341
362
  this.requireWriteAccess();
342
363
  this.assignIdCreatedUpdated(dbm, opt); // mutates
343
- const row = await this.anyToDBM(dbm, opt);
344
- this.cfg.hooks.beforeSave?.(row);
364
+ const validDbm = await this.anyToDBM(dbm, opt);
365
+ this.cfg.hooks.beforeSave?.(validDbm);
345
366
  const table = opt.table || this.cfg.table;
346
367
  const saveOptions = this.prepareSaveOptions(opt);
368
+ const row = await this.dbmToStorageRow(validDbm);
347
369
  await (opt.tx || this.cfg.db).saveBatch(table, [row], saveOptions);
348
370
  if (saveOptions.assignGeneratedIds) {
349
- dbm.id = row.id;
371
+ dbm.id = validDbm.id;
350
372
  }
351
- return row;
373
+ return validDbm;
352
374
  }
353
375
  async saveBatch(bms, opt = {}) {
354
376
  if (!bms.length)
@@ -361,7 +383,8 @@ export class CommonDao {
361
383
  }
362
384
  const table = opt.table || this.cfg.table;
363
385
  const saveOptions = this.prepareSaveOptions(opt);
364
- await (opt.tx || this.cfg.db).saveBatch(table, dbms, saveOptions);
386
+ const rows = await this.dbmsToStorageRows(dbms);
387
+ await (opt.tx || this.cfg.db).saveBatch(table, rows, saveOptions);
365
388
  if (saveOptions.assignGeneratedIds) {
366
389
  dbms.forEach((dbm, i) => (bms[i].id = dbm.id));
367
390
  }
@@ -372,24 +395,28 @@ export class CommonDao {
372
395
  return [];
373
396
  this.requireWriteAccess();
374
397
  dbms.forEach(dbm => this.assignIdCreatedUpdated(dbm, opt));
375
- const rows = await this.anyToDBMs(dbms, opt);
398
+ const validDbms = await this.anyToDBMs(dbms, opt);
376
399
  if (this.cfg.hooks.beforeSave) {
377
- rows.forEach(row => this.cfg.hooks.beforeSave(row));
400
+ validDbms.forEach(dbm => this.cfg.hooks.beforeSave(dbm));
378
401
  }
379
402
  const table = opt.table || this.cfg.table;
380
403
  const saveOptions = this.prepareSaveOptions(opt);
404
+ const rows = await this.dbmsToStorageRows(validDbms);
381
405
  await (opt.tx || this.cfg.db).saveBatch(table, rows, saveOptions);
382
406
  if (saveOptions.assignGeneratedIds) {
383
- rows.forEach((row, i) => (dbms[i].id = row.id));
407
+ validDbms.forEach((dbm, i) => (dbms[i].id = dbm.id));
384
408
  }
385
- return rows;
409
+ return validDbms;
386
410
  }
387
411
  prepareSaveOptions(opt) {
388
412
  let { saveMethod, assignGeneratedIds = this.cfg.assignGeneratedIds, excludeFromIndexes = this.cfg.excludeFromIndexes, } = opt;
413
+ // If the user passed in custom `excludeFromIndexes` with the save() call,
414
+ // and the auto-compression is enabled,
415
+ // then we need to ensure that the '__compressed' property is part of the list.
389
416
  if (this.cfg.compress?.keys) {
390
417
  excludeFromIndexes ??= [];
391
- if (!excludeFromIndexes.includes('data')) {
392
- excludeFromIndexes.push('data');
418
+ if (!excludeFromIndexes.includes('__compressed')) {
419
+ excludeFromIndexes.push('__compressed');
393
420
  }
394
421
  }
395
422
  if (this.cfg.immutable && !opt.allowMutability && !opt.saveMethod) {
@@ -397,7 +424,7 @@ export class CommonDao {
397
424
  }
398
425
  return {
399
426
  ...opt,
400
- excludeFromIndexes,
427
+ excludeFromIndexes: excludeFromIndexes,
401
428
  saveMethod,
402
429
  assignGeneratedIds,
403
430
  };
@@ -416,25 +443,19 @@ export class CommonDao {
416
443
  const table = opt.table || this.cfg.table;
417
444
  opt.skipValidation ??= true;
418
445
  opt.errorMode ||= ErrorMode.SUPPRESS;
419
- if (this.cfg.immutable && !opt.allowMutability && !opt.saveMethod) {
420
- opt = { ...opt, saveMethod: 'insert' };
421
- }
422
- const excludeFromIndexes = opt.excludeFromIndexes || this.cfg.excludeFromIndexes;
446
+ const saveOptions = this.prepareSaveOptions(opt);
423
447
  const { beforeSave } = this.cfg.hooks;
424
448
  const { chunkSize = 500, chunkConcurrency = 32, errorMode } = opt;
425
449
  await p
426
- .map(async (bm) => {
450
+ .map(async bm => {
427
451
  this.assignIdCreatedUpdated(bm, opt);
428
452
  const dbm = await this.bmToDBM(bm, opt);
429
453
  beforeSave?.(dbm);
430
- return dbm;
454
+ return await this.dbmToStorageRow(dbm);
431
455
  }, { errorMode })
432
456
  .chunk(chunkSize)
433
- .map(async (batch) => {
434
- await this.cfg.db.saveBatch(table, batch, {
435
- ...opt,
436
- excludeFromIndexes,
437
- });
457
+ .map(async batch => {
458
+ await this.cfg.db.saveBatch(table, batch, saveOptions);
438
459
  return batch;
439
460
  }, {
440
461
  concurrency: chunkConcurrency,
@@ -480,7 +501,7 @@ export class CommonDao {
480
501
  .streamQuery(q.select(['id']), opt)
481
502
  .mapSync(r => r.id)
482
503
  .chunk(chunkSize)
483
- .map(async (ids) => {
504
+ .map(async ids => {
484
505
  await this.cfg.db.deleteByIds(q.table, ids, opt);
485
506
  deleted += ids.length;
486
507
  }, {
@@ -545,15 +566,13 @@ export class CommonDao {
545
566
  // optimization: no need to run full joi DBM validation, cause BM validation will be run
546
567
  // const dbm = this.anyToDBM(_dbm, opt)
547
568
  const dbm = { ..._dbm, ...this.cfg.hooks.parseNaturalId(_dbm.id) };
548
- // Decompress
549
- await this.decompress(dbm);
550
569
  // DBM > BM
551
570
  const bm = ((await this.cfg.hooks.beforeDBMToBM?.(dbm)) || dbm);
552
571
  // Validate/convert BM
553
572
  return this.validateAndConvert(bm, 'load', opt);
554
573
  }
555
574
  async dbmsToBM(dbms, opt = {}) {
556
- return await pMap(dbms, async (dbm) => await this.dbmToBM(dbm, opt));
575
+ return await pMap(dbms, async dbm => await this.dbmToBM(dbm, opt));
557
576
  }
558
577
  async bmToDBM(bm, opt) {
559
578
  if (bm === undefined)
@@ -562,14 +581,64 @@ export class CommonDao {
562
581
  bm = this.validateAndConvert(bm, 'save', opt);
563
582
  // BM > DBM
564
583
  const dbm = ((await this.cfg.hooks.beforeBMToDBM?.(bm)) || bm);
565
- // Compress
566
- if (this.cfg.compress)
567
- await this.compress(dbm);
568
584
  return dbm;
569
585
  }
570
586
  async bmsToDBM(bms, opt = {}) {
571
587
  // try/catch?
572
- return await pMap(bms, async (bm) => await this.bmToDBM(bm, opt));
588
+ return await pMap(bms, async bm => await this.bmToDBM(bm, opt));
589
+ }
590
+ // STORAGE LAYER (compression/decompression at DB boundary)
591
+ // These methods convert between DBM (logical model) and storage format (physical, possibly compressed).
592
+ // Public methods allow external code to bypass the DAO layer for direct DB access
593
+ // (e.g., cross-environment data copy).
594
+ /**
595
+ * Converts a DBM to storage format, applying compression if configured.
596
+ *
597
+ * Use this when you need to write directly to the database, bypassing the DAO save methods.
598
+ * The returned value is opaque and should only be passed to db.saveBatch() or similar.
599
+ *
600
+ * @example
601
+ * const storageRow = await dao.dbmToStorageRow(dbm)
602
+ * await db.saveBatch(table, [storageRow])
603
+ */
604
+ async dbmToStorageRow(dbm) {
605
+ if (!this.cfg.compress?.keys.length)
606
+ return dbm;
607
+ const row = { ...dbm };
608
+ await this.compress(row);
609
+ return row;
610
+ }
611
+ /**
612
+ * Converts multiple DBMs to storage rows.
613
+ */
614
+ async dbmsToStorageRows(dbms) {
615
+ if (!this.cfg.compress?.keys.length)
616
+ return dbms;
617
+ return await pMap(dbms, async dbm => await this.dbmToStorageRow(dbm));
618
+ }
619
+ /**
620
+ * Converts a storage row back to a DBM, applying decompression if needed.
621
+ *
622
+ * Use this when you need to read directly from the database, bypassing the DAO load methods.
623
+ *
624
+ * @example
625
+ * const rows = await db.getByIds(table, ids)
626
+ * const dbms = await Promise.all(rows.map(row => dao.storageRowToDBM(row)))
627
+ */
628
+ async storageRowToDBM(row) {
629
+ if (!this.cfg.compress?.keys.length)
630
+ return row;
631
+ const dbm = { ...row };
632
+ await this.decompress(dbm);
633
+ return dbm;
634
+ }
635
+ /**
636
+ * Converts multiple storage rows to DBMs.
637
+ */
638
+ async storageRowsToDBMs(rows) {
639
+ if (!this.cfg.compress?.keys.length)
640
+ return rows;
641
+ return await pMap(rows, async row => await this.storageRowToDBM(row));
573
642
  }
574
643
  /**
575
644
  * Mutates `dbm`.
@@ -579,26 +648,22 @@ export class CommonDao {
579
648
  return; // No compression requested
580
649
  const { keys } = this.cfg.compress;
581
650
  const properties = _pick(dbm, keys);
582
- _assert(!('data' in dbm) || 'data' in properties, `Data (${dbm.id}) already has a "data" property. When using compression, this property must be included in the compression keys list.`);
583
651
  const bufferString = JSON.stringify(properties);
584
- const data = await zstdCompress(bufferString);
652
+ const __compressed = await zstdCompress(bufferString);
585
653
  _omitWithUndefined(dbm, _objectKeys(properties), { mutate: true });
586
- Object.assign(dbm, { data });
654
+ Object.assign(dbm, { __compressed });
587
655
  }
588
656
  /**
589
657
  * Mutates `dbm`.
590
658
  */
591
659
  async decompress(dbm) {
592
660
  _typeCast(dbm);
593
- if (!this.cfg.compress)
594
- return; // Auto-compression not turned on
595
- if (!Buffer.isBuffer(dbm.data))
661
+ if (!Buffer.isBuffer(dbm.__compressed))
596
662
  return; // No compressed data
597
- // try-catch to avoid a `data` with Buffer which is not compressed, but legit data
598
663
  try {
599
- const bufferString = await decompressZstdOrInflateToString(dbm.data);
664
+ const bufferString = await decompressZstdOrInflateToString(dbm.__compressed);
600
665
  const properties = JSON.parse(bufferString);
601
- dbm.data = undefined;
666
+ dbm.__compressed = undefined;
602
667
  Object.assign(dbm, properties);
603
668
  }
604
669
  catch { }
@@ -609,14 +674,12 @@ export class CommonDao {
609
674
  // this shouldn't be happening on load! but should on save!
610
675
  // this.assignIdCreatedUpdated(dbm, opt)
611
676
  dbm = { ...dbm, ...this.cfg.hooks.parseNaturalId(dbm.id) };
612
- // Decompress
613
- await this.decompress(dbm);
614
677
  // Validate/convert DBM
615
678
  // return this.validateAndConvert(dbm, this.cfg.dbmSchema, DBModelType.DBM, opt)
616
679
  return dbm;
617
680
  }
618
681
  async anyToDBMs(rows, opt = {}) {
619
- return await pMap(rows, async (entity) => await this.anyToDBM(entity, opt));
682
+ return await pMap(rows, async entity => await this.anyToDBM(entity, opt));
620
683
  }
621
684
  /**
622
685
  * Returns *converted value* (NOT the same reference).
@@ -688,6 +751,73 @@ export class CommonDao {
688
751
  opt: opt,
689
752
  };
690
753
  }
754
+ /**
755
+ * Helper to decompress legacy compressed data when migrating away from auto-compression.
756
+ * Use as your `beforeDBMToBM` hook to decompress legacy rows on read.
757
+ *
758
+ * @example
759
+ * const dao = new CommonDao({
760
+ * hooks: {
761
+ * beforeDBMToBM: CommonDao.decompressLegacyRow,
762
+ * }
763
+ * })
764
+ *
765
+ * // Or within an existing hook:
766
+ * beforeDBMToBM: async (dbm) => {
767
+ * await CommonDao.decompressLegacyRow(dbm)
768
+ * // ... other transformations
769
+ * return dbm
770
+ * }
771
+ */
772
+ static async decompressLegacyRow(row) {
773
+ // Check both __compressed (current) and data (legacy) for backward compatibility
774
+ const compressed = row.__compressed ?? row.data;
775
+ if (!Buffer.isBuffer(compressed))
776
+ return row;
777
+ try {
778
+ const bufferString = await decompressZstdOrInflateToString(compressed);
779
+ const properties = JSON.parse(bufferString);
780
+ row.__compressed = undefined;
781
+ row.data = undefined;
782
+ Object.assign(row, properties);
783
+ }
784
+ catch {
785
+ // Decompression failed - field is not compressed, leave as-is
786
+ }
787
+ return row;
788
+ }
789
+ /**
790
+ * Temporary helper to migrate from the old `data` compressed property to the new `__compressed` property.
791
+ * Use as your `beforeDBMToBM` hook during the migration period.
792
+ *
793
+ * Migration steps:
794
+ * 1. Add `beforeDBMToBM: CommonDao.migrateCompressedDataProperty` to your hooks
795
+ * 2. Deploy - old data (with `data` property) will be decompressed on read and recompressed to `__compressed` on write
796
+ * 3. Once all data has been naturally rewritten, remove the hook
797
+ *
798
+ * @example
799
+ * const dao = new CommonDao({
800
+ * compress: { keys: ['field1', 'field2'] },
801
+ * hooks: {
802
+ * beforeDBMToBM: CommonDao.migrateCompressedDataProperty,
803
+ * }
804
+ * })
805
+ */
806
+ static async migrateCompressedDataProperty(row) {
807
+ const data = row.data;
808
+ if (!Buffer.isBuffer(data))
809
+ return row;
810
+ try {
811
+ const bufferString = await decompressZstdOrInflateToString(data);
812
+ const properties = JSON.parse(bufferString);
813
+ row.data = undefined;
814
+ Object.assign(row, properties);
815
+ }
816
+ catch {
817
+ // Decompression failed - data field is not compressed, leave as-is
818
+ }
819
+ return row;
820
+ }
691
821
  /**
692
822
  * Load rows (by their ids) from Multiple tables at once.
693
823
  * An optimized way to load data, minimizing DB round-trips.
@@ -748,14 +878,18 @@ export class CommonDao {
748
878
  const { table } = dao.cfg;
749
879
  if ('id' in input) {
750
880
  // Singular
751
- const dbm = dbmByTableById[table][input.id];
881
+ const row = dbmByTableById[table][input.id];
882
+ // Decompress before converting to BM
883
+ const dbm = row ? await dao.storageRowToDBM(row) : undefined;
752
884
  bmsByProp[prop] = (await dao.dbmToBM(dbm, opt)) || null;
753
885
  }
754
886
  else {
755
887
  // Plural
756
888
  // We apply filtering, to be able to support multiple input props fetching from the same table.
757
889
  // Without filtering - every prop will get ALL rows from that table.
758
- const dbms = input.ids.map(id => dbmByTableById[table][id]).filter(_isTruthy);
890
+ const rows = input.ids.map(id => dbmByTableById[table][id]).filter(_isTruthy);
891
+ // Decompress before converting to BM
892
+ const dbms = await dao.storageRowsToDBMs(rows);
759
893
  bmsByProp[prop] = await dao.dbmsToBM(dbms, opt);
760
894
  }
761
895
  });
@@ -789,7 +923,7 @@ export class CommonDao {
789
923
  return;
790
924
  const { db } = inputs[0].dao.cfg;
791
925
  const dbmsByTable = {};
792
- await pMap(inputs, async (input) => {
926
+ await pMap(inputs, async input => {
793
927
  const { dao } = input;
794
928
  const { table } = dao.cfg;
795
929
  dbmsByTable[table] ||= [];
@@ -809,7 +943,8 @@ export class CommonDao {
809
943
  dao.assignIdCreatedUpdated(row, opt);
810
944
  const dbm = await dao.bmToDBM(row, opt);
811
945
  dao.cfg.hooks.beforeSave?.(dbm);
812
- dbmsByTable[table].push(dbm);
946
+ const storageRow = await dao.dbmToStorageRow(dbm);
947
+ dbmsByTable[table].push(storageRow);
813
948
  }
814
949
  else {
815
950
  // Plural
@@ -818,7 +953,8 @@ export class CommonDao {
818
953
  if (dao.cfg.hooks.beforeSave) {
819
954
  dbms.forEach(dbm => dao.cfg.hooks.beforeSave(dbm));
820
955
  }
821
- dbmsByTable[table].push(...dbms);
956
+ const storageRows = await dao.dbmsToStorageRows(dbms);
957
+ dbmsByTable[table].push(...storageRows);
822
958
  }
823
959
  });
824
960
  await db.multiSave(dbmsByTable);
@@ -829,7 +965,7 @@ export class CommonDao {
829
965
  }
830
966
  async runInTransaction(fn, opt) {
831
967
  let r;
832
- await this.cfg.db.runInTransaction(async (tx) => {
968
+ await this.cfg.db.runInTransaction(async tx => {
833
969
  const daoTx = new CommonDaoTransaction(tx, this.cfg.logger);
834
970
  try {
835
971
  r = await fn(daoTx);