@naturalcycles/db-lib 9.11.0 → 9.12.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -112,9 +112,9 @@ export declare class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity
112
112
  saveBatchAsDBM(dbms: Unsaved<DBM>[], opt?: CommonDaoSaveBatchOptions<DBM>): Promise<DBM[]>;
113
113
  /**
114
114
  * "Streaming" is implemented by buffering incoming rows into **batches**
115
- * (of size opt.batchSize, which defaults to 500),
116
- * and then executing db.saveBatch(batch) with the concurrency
117
- * of opt.batchConcurrency (which defaults to 16).
115
+ * (of size opt.chunkSize, which defaults to 500),
116
+ * and then executing db.saveBatch(chunk) with the concurrency
117
+ * of opt.chunkConcurrency (which defaults to 16).
118
118
  */
119
119
  streamSaveTransform(opt?: CommonDaoStreamSaveOptions<DBM>): Transform[];
120
120
  /**
@@ -123,9 +123,9 @@ export declare class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity
123
123
  deleteById(id?: string | null, opt?: CommonDaoOptions): Promise<number>;
124
124
  deleteByIds(ids: string[], opt?: CommonDaoOptions): Promise<number>;
125
125
  /**
126
- * Pass `stream: true` option to use Streaming: it will Stream the query, batch by 500, and execute
127
- * `deleteByIds` for each batch concurrently (infinite concurrency).
128
- * This is expected to be more memory-efficient way of deleting big numbers of rows.
126
+ * Pass `chunkSize: number` (e.g 500) option to use Streaming: it will Stream the query, chunk by 500, and execute
127
+ * `deleteByIds` for each chunk concurrently (infinite concurrency).
128
+ * This is expected to be more memory-efficient way of deleting large number of rows.
129
129
  */
130
130
  deleteByQuery(q: DBQuery<DBM>, opt?: CommonDaoStreamDeleteOptions<DBM>): Promise<number>;
131
131
  updateById(id: string, patch: DBPatch<DBM>, opt?: CommonDaoOptions): Promise<number>;
@@ -448,7 +448,7 @@ class CommonDao {
448
448
  * "Returns", just to have a type of "Saved"
449
449
  */
450
450
  assignIdCreatedUpdated(obj, opt = {}) {
451
- const now = Math.floor(Date.now() / 1000);
451
+ const now = (0, js_lib_1.nowUnix)();
452
452
  if (this.cfg.useCreatedProperty) {
453
453
  obj.created ||= obj.updated || now;
454
454
  }
@@ -697,9 +697,9 @@ class CommonDao {
697
697
  }
698
698
  /**
699
699
  * "Streaming" is implemented by buffering incoming rows into **batches**
700
- * (of size opt.batchSize, which defaults to 500),
701
- * and then executing db.saveBatch(batch) with the concurrency
702
- * of opt.batchConcurrency (which defaults to 16).
700
+ * (of size opt.chunkSize, which defaults to 500),
701
+ * and then executing db.saveBatch(chunk) with the concurrency
702
+ * of opt.chunkConcurrency (which defaults to 16).
703
703
  */
704
704
  streamSaveTransform(opt = {}) {
705
705
  this.requireWriteAccess();
@@ -711,7 +711,7 @@ class CommonDao {
711
711
  }
712
712
  const excludeFromIndexes = opt.excludeFromIndexes || this.cfg.excludeFromIndexes;
713
713
  const { beforeSave } = this.cfg.hooks;
714
- const { batchSize = 500, batchConcurrency = 16, errorMode } = opt;
714
+ const { chunkSize = 500, chunkConcurrency = 16, errorMode } = opt;
715
715
  return [
716
716
  (0, nodejs_lib_1.transformMap)(async (bm) => {
717
717
  this.assignIdCreatedUpdated(bm, opt); // mutates
@@ -725,7 +725,7 @@ class CommonDao {
725
725
  }, {
726
726
  errorMode,
727
727
  }),
728
- (0, nodejs_lib_1.transformBuffer)({ batchSize }),
728
+ (0, nodejs_lib_1.transformChunk)({ chunkSize }),
729
729
  (0, nodejs_lib_1.transformMap)(async (batch) => {
730
730
  await this.cfg.db.saveBatch(table, batch, {
731
731
  ...opt,
@@ -733,7 +733,7 @@ class CommonDao {
733
733
  });
734
734
  return batch;
735
735
  }, {
736
- concurrency: batchConcurrency,
736
+ concurrency: chunkConcurrency,
737
737
  errorMode,
738
738
  flattenArrayOutput: true,
739
739
  }),
@@ -768,9 +768,9 @@ class CommonDao {
768
768
  return count;
769
769
  }
770
770
  /**
771
- * Pass `stream: true` option to use Streaming: it will Stream the query, batch by 500, and execute
772
- * `deleteByIds` for each batch concurrently (infinite concurrency).
773
- * This is expected to be more memory-efficient way of deleting big numbers of rows.
771
+ * Pass `chunkSize: number` (e.g 500) option to use Streaming: it will Stream the query, chunk by 500, and execute
772
+ * `deleteByIds` for each chunk concurrently (infinite concurrency).
773
+ * This is expected to be more memory-efficient way of deleting large number of rows.
774
774
  */
775
775
  async deleteByQuery(q, opt = {}) {
776
776
  this.requireWriteAccess();
@@ -779,25 +779,25 @@ class CommonDao {
779
779
  const op = `deleteByQuery(${q.pretty()})`;
780
780
  const started = this.logStarted(op, q.table);
781
781
  let deleted = 0;
782
- if (opt.batchSize) {
783
- const { batchSize, batchConcurrency = 16 } = opt;
782
+ if (opt.chunkSize) {
783
+ const { chunkSize, chunkConcurrency = 16 } = opt;
784
784
  await (0, nodejs_lib_1._pipeline)([
785
785
  this.cfg.db.streamQuery(q.select(['id']), opt),
786
786
  (0, nodejs_lib_1.transformMapSimple)(r => r.id, {
787
787
  errorMode: js_lib_1.ErrorMode.SUPPRESS,
788
788
  }),
789
- (0, nodejs_lib_1.transformBuffer)({ batchSize }),
789
+ (0, nodejs_lib_1.transformChunk)({ chunkSize }),
790
790
  (0, nodejs_lib_1.transformMap)(async (ids) => {
791
791
  deleted += await this.cfg.db.deleteByQuery(dbQuery_1.DBQuery.create(q.table).filterIn('id', ids), opt);
792
792
  }, {
793
793
  predicate: js_lib_1._passthroughPredicate,
794
- concurrency: batchConcurrency,
794
+ concurrency: chunkConcurrency,
795
795
  }),
796
796
  // LogProgress should be AFTER the mapper, to be able to report correct stats
797
797
  (0, nodejs_lib_1.transformLogProgress)({
798
798
  metric: q.table,
799
799
  logEvery: 2, // 500 * 2 === 1000
800
- batchSize,
800
+ chunkSize,
801
801
  ...opt,
802
802
  }),
803
803
  (0, nodejs_lib_1.writableVoid)(),
@@ -250,15 +250,15 @@ export interface CommonDaoStreamOptions<IN> extends CommonDaoOptions, TransformL
250
250
  errorMode?: ErrorMode;
251
251
  /**
252
252
  * Applicable to some of stream operations, e.g deleteByQuery.
253
- * If set - `deleteByQuery` won't execute it "all at once", but in batches.
253
+ * If set - `deleteByQuery` won't execute it "all at once", but in batches (chunks).
254
254
  *
255
255
  * Defaults to undefined, so the operation is executed "all at once".
256
256
  */
257
- batchSize?: number;
257
+ chunkSize?: number;
258
258
  /**
259
- * When batchSize is set - this option controls how many batches to run concurrently.
259
+ * When chunkSize is set - this option controls how many chunks to run concurrently.
260
260
  * Defaults to 16, "the magic number of JavaScript concurrency".
261
261
  */
262
- batchConcurrency?: number;
262
+ chunkConcurrency?: number;
263
263
  }
264
264
  export type CommonDaoCreateOptions = CommonDBCreateOptions;
@@ -3,7 +3,7 @@ import { CommonLogger, KeyValueTuple } from '@naturalcycles/js-lib';
3
3
  import { ReadableTyped } from '@naturalcycles/nodejs-lib';
4
4
  import { CommonDaoLogLevel } from '../commondao/common.dao.model';
5
5
  import { CommonDBCreateOptions } from '../db.model';
6
- import { CommonKeyValueDB, KeyValueDBTuple } from './commonKeyValueDB';
6
+ import { CommonKeyValueDB, CommonKeyValueDBSaveBatchOptions, KeyValueDBTuple } from './commonKeyValueDB';
7
7
  export interface CommonKeyValueDaoCfg<T> {
8
8
  db: CommonKeyValueDB;
9
9
  table: string;
@@ -36,6 +36,7 @@ export interface CommonKeyValueDaoCfg<T> {
36
36
  */
37
37
  deflatedJsonValue?: boolean;
38
38
  }
39
+ export type CommonKeyValueDaoSaveOptions = CommonKeyValueDBSaveBatchOptions;
39
40
  export declare class CommonKeyValueDao<T> {
40
41
  constructor(cfg: CommonKeyValueDaoCfg<T>);
41
42
  cfg: CommonKeyValueDaoCfg<T> & {
@@ -50,13 +51,13 @@ export declare class CommonKeyValueDao<T> {
50
51
  requireById(id: string): Promise<T>;
51
52
  requireByIdAsBuffer(id: string): Promise<Buffer>;
52
53
  getByIdOrEmpty(id: string, part?: Partial<T>): Promise<T>;
53
- patch(id: string, patch: Partial<T>): Promise<T>;
54
+ patch(id: string, patch: Partial<T>, opt?: CommonKeyValueDaoSaveOptions): Promise<T>;
54
55
  getByIds(ids: string[]): Promise<KeyValueTuple<string, T>[]>;
55
56
  getByIdsAsBuffer(ids: string[]): Promise<KeyValueTuple<string, Buffer>[]>;
56
- save(id: string, value: T): Promise<void>;
57
- saveAsBuffer(id: string, value: Buffer): Promise<void>;
58
- saveBatch(entries: KeyValueTuple<string, T>[]): Promise<void>;
59
- saveBatchAsBuffer(entries: KeyValueDBTuple[]): Promise<void>;
57
+ save(id: string, value: T, opt?: CommonKeyValueDaoSaveOptions): Promise<void>;
58
+ saveAsBuffer(id: string, value: Buffer, opt?: CommonKeyValueDaoSaveOptions): Promise<void>;
59
+ saveBatch(entries: KeyValueTuple<string, T>[], opt?: CommonKeyValueDaoSaveOptions): Promise<void>;
60
+ saveBatchAsBuffer(entries: KeyValueDBTuple[], opt?: CommonKeyValueDaoSaveOptions): Promise<void>;
60
61
  deleteByIds(ids: string[]): Promise<void>;
61
62
  deleteById(id: string): Promise<void>;
62
63
  streamIds(limit?: number): ReadableTyped<string>;
@@ -74,12 +74,12 @@ class CommonKeyValueDao {
74
74
  ...part,
75
75
  };
76
76
  }
77
- async patch(id, patch) {
77
+ async patch(id, patch, opt) {
78
78
  const v = {
79
79
  ...(await this.getByIdOrEmpty(id)),
80
80
  ...patch,
81
81
  };
82
- await this.save(id, v);
82
+ await this.save(id, v, opt);
83
83
  return v;
84
84
  }
85
85
  async getByIds(ids) {
@@ -94,27 +94,25 @@ class CommonKeyValueDao {
94
94
  async getByIdsAsBuffer(ids) {
95
95
  return await this.cfg.db.getByIds(this.cfg.table, ids);
96
96
  }
97
- async save(id, value) {
98
- await this.saveBatch([[id, value]]);
97
+ async save(id, value, opt) {
98
+ await this.saveBatch([[id, value]], opt);
99
99
  }
100
- async saveAsBuffer(id, value) {
101
- await this.cfg.db.saveBatch(this.cfg.table, [[id, value]]);
100
+ async saveAsBuffer(id, value, opt) {
101
+ await this.cfg.db.saveBatch(this.cfg.table, [[id, value]], opt);
102
102
  }
103
- async saveBatch(entries) {
103
+ async saveBatch(entries, opt) {
104
+ const { mapValueToBuffer } = this.cfg.hooks;
104
105
  let bufferEntries;
105
- if (!this.cfg.hooks.mapValueToBuffer) {
106
+ if (!mapValueToBuffer) {
106
107
  bufferEntries = entries;
107
108
  }
108
109
  else {
109
- bufferEntries = await (0, js_lib_1.pMap)(entries, async ([id, v]) => [
110
- id,
111
- await this.cfg.hooks.mapValueToBuffer(v),
112
- ]);
110
+ bufferEntries = await (0, js_lib_1.pMap)(entries, async ([id, v]) => [id, await mapValueToBuffer(v)]);
113
111
  }
114
- await this.cfg.db.saveBatch(this.cfg.table, bufferEntries);
112
+ await this.cfg.db.saveBatch(this.cfg.table, bufferEntries, opt);
115
113
  }
116
- async saveBatchAsBuffer(entries) {
117
- await this.cfg.db.saveBatch(this.cfg.table, entries);
114
+ async saveBatchAsBuffer(entries, opt) {
115
+ await this.cfg.db.saveBatch(this.cfg.table, entries, opt);
118
116
  }
119
117
  async deleteByIds(ids) {
120
118
  await this.cfg.db.deleteByIds(this.cfg.table, ids);
@@ -130,10 +128,7 @@ class CommonKeyValueDao {
130
128
  if (!mapBufferToValue) {
131
129
  return this.cfg.db.streamValues(this.cfg.table, limit);
132
130
  }
133
- const stream = this.cfg.db
134
- .streamValues(this.cfg.table, limit)
135
- // .on('error', err => stream.emit('error', err))
136
- .flatMap(async (buf) => {
131
+ return this.cfg.db.streamValues(this.cfg.table, limit).flatMap(async (buf) => {
137
132
  try {
138
133
  return [await mapBufferToValue(buf)];
139
134
  }
@@ -141,18 +136,16 @@ class CommonKeyValueDao {
141
136
  this.cfg.logger.error(err);
142
137
  return []; // SKIP
143
138
  }
139
+ }, {
140
+ concurrency: 16,
144
141
  });
145
- return stream;
146
142
  }
147
143
  streamEntries(limit) {
148
144
  const { mapBufferToValue } = this.cfg.hooks;
149
145
  if (!mapBufferToValue) {
150
146
  return this.cfg.db.streamEntries(this.cfg.table, limit);
151
147
  }
152
- const stream = this.cfg.db
153
- .streamEntries(this.cfg.table, limit)
154
- // .on('error', err => stream.emit('error', err))
155
- .flatMap(async ([id, buf]) => {
148
+ return this.cfg.db.streamEntries(this.cfg.table, limit).flatMap(async ([id, buf]) => {
156
149
  try {
157
150
  return [[id, await mapBufferToValue(buf)]];
158
151
  }
@@ -160,8 +153,9 @@ class CommonKeyValueDao {
160
153
  this.cfg.logger.error(err);
161
154
  return []; // SKIP
162
155
  }
156
+ }, {
157
+ concurrency: 16,
163
158
  });
164
- return stream;
165
159
  }
166
160
  }
167
161
  exports.CommonKeyValueDao = CommonKeyValueDao;
@@ -1,5 +1,12 @@
1
- import { AsyncMemoCache, MISS } from '@naturalcycles/js-lib';
1
+ import { AsyncMemoCache, MISS, NumberOfSeconds } from '@naturalcycles/js-lib';
2
2
  import { CommonKeyValueDao } from './commonKeyValueDao';
3
+ export interface CommonKeyValueDaoMemoCacheCfg<VALUE> {
4
+ dao: CommonKeyValueDao<VALUE>;
5
+ /**
6
+ * If set, every `set()` will set `expireAt` (TTL) option.
7
+ */
8
+ ttl?: NumberOfSeconds;
9
+ }
3
10
  /**
4
11
  * AsyncMemoCache implementation, backed by CommonKeyValueDao.
5
12
  *
@@ -9,8 +16,8 @@ import { CommonKeyValueDao } from './commonKeyValueDao';
9
16
  * clear the whole table/cache.
10
17
  */
11
18
  export declare class CommonKeyValueDaoMemoCache<VALUE = any> implements AsyncMemoCache<string, VALUE> {
12
- private dao;
13
- constructor(dao: CommonKeyValueDao<VALUE>);
19
+ private cfg;
20
+ constructor(cfg: CommonKeyValueDaoMemoCacheCfg<VALUE>);
14
21
  get(k: string): Promise<VALUE | typeof MISS>;
15
22
  set(k: string, v: VALUE): Promise<void>;
16
23
  clear(): Promise<void>;
@@ -11,14 +11,15 @@ const js_lib_1 = require("@naturalcycles/js-lib");
11
11
  * clear the whole table/cache.
12
12
  */
13
13
  class CommonKeyValueDaoMemoCache {
14
- constructor(dao) {
15
- this.dao = dao;
14
+ constructor(cfg) {
15
+ this.cfg = cfg;
16
16
  }
17
17
  async get(k) {
18
- return (await this.dao.getById(k)) || js_lib_1.MISS;
18
+ return (await this.cfg.dao.getById(k)) || js_lib_1.MISS;
19
19
  }
20
20
  async set(k, v) {
21
- await this.dao.save(k, v);
21
+ const opt = this.cfg.ttl ? { expireAt: (0, js_lib_1.nowUnix)() + this.cfg.ttl } : undefined;
22
+ await this.cfg.dao.save(k, v, opt);
22
23
  }
23
24
  async clear() {
24
25
  throw new Error('CommonKeyValueDaoMemoCache.clear is not supported, because cache is expected to be persistent');
@@ -1,9 +1,10 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.serializeJsonField = exports.deserializeJsonField = exports.createdUpdatedIdFields = exports.createdUpdatedFields = void 0;
4
+ const js_lib_1 = require("@naturalcycles/js-lib");
4
5
  const nodejs_lib_1 = require("@naturalcycles/nodejs-lib");
5
6
  function createdUpdatedFields(existingObject) {
6
- const now = Math.floor(Date.now() / 1000);
7
+ const now = (0, js_lib_1.nowUnix)();
7
8
  return {
8
9
  created: existingObject?.created || now,
9
10
  updated: now,
@@ -11,7 +12,7 @@ function createdUpdatedFields(existingObject) {
11
12
  }
12
13
  exports.createdUpdatedFields = createdUpdatedFields;
13
14
  function createdUpdatedIdFields(existingObject) {
14
- const now = Math.floor(Date.now() / 1000);
15
+ const now = (0, js_lib_1.nowUnix)();
15
16
  return {
16
17
  created: existingObject?.created || now,
17
18
  id: existingObject?.id || (0, nodejs_lib_1.stringId)(),
@@ -21,7 +21,7 @@ export interface DBPipelineCopyOptions extends TransformLogProgressOptions {
21
21
  *
22
22
  * Determines the size of .saveBatch()
23
23
  */
24
- batchSize?: number;
24
+ chunkSize?: number;
25
25
  /**
26
26
  * @default ErrorMode.SUPPRESS
27
27
  *
@@ -11,7 +11,7 @@ const dbQuery_1 = require("../query/dbQuery");
11
11
  * Handles backpressure.
12
12
  */
13
13
  async function dbPipelineCopy(opt) {
14
- const { batchSize = 100, dbInput, dbOutput, concurrency = 16, limit = 0, sinceUpdated, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, } = opt;
14
+ const { chunkSize = 100, dbInput, dbOutput, concurrency = 16, limit = 0, sinceUpdated, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, } = opt;
15
15
  let { tables } = opt;
16
16
  const sinceUpdatedStr = sinceUpdated ? ' since ' + (0, nodejs_lib_1.grey)((0, js_lib_1.localTime)(sinceUpdated).toPretty()) : '';
17
17
  console.log(`>> ${(0, nodejs_lib_1.dimWhite)('dbPipelineCopy')} started...${sinceUpdatedStr}`);
@@ -42,7 +42,7 @@ async function dbPipelineCopy(opt) {
42
42
  metric: table,
43
43
  }),
44
44
  (0, nodejs_lib_1.transformTap)(() => rows++),
45
- (0, nodejs_lib_1.transformBuffer)({ batchSize }),
45
+ (0, nodejs_lib_1.transformChunk)({ chunkSize }),
46
46
  (0, nodejs_lib_1.writableForEach)(async (dbms) => {
47
47
  await dbOutput.saveBatch(table, dbms, saveOptions);
48
48
  }),
@@ -14,7 +14,7 @@ const nodejs_lib_1 = require("@naturalcycles/nodejs-lib");
14
14
  * Optionally you can provide mapperPerTable and @param transformMapOptions (one for all mappers) - it will run for each table.
15
15
  */
16
16
  async function dbPipelineRestore(opt) {
17
- const { db, concurrency = 16, batchSize = 100, limit, sinceUpdated, inputDirPath, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, recreateTables = false, } = opt;
17
+ const { db, concurrency = 16, chunkSize = 100, limit, sinceUpdated, inputDirPath, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, recreateTables = false, } = opt;
18
18
  const strict = errorMode !== js_lib_1.ErrorMode.SUPPRESS;
19
19
  const onlyTables = opt.tables && new Set(opt.tables);
20
20
  const sinceUpdatedStr = sinceUpdated ? ' since ' + (0, nodejs_lib_1.grey)((0, js_lib_1.localTime)(sinceUpdated).toPretty()) : '';
@@ -87,7 +87,7 @@ async function dbPipelineRestore(opt) {
87
87
  ...transformMapOptions,
88
88
  metric: table,
89
89
  }),
90
- (0, nodejs_lib_1.transformBuffer)({ batchSize }),
90
+ (0, nodejs_lib_1.transformChunk)({ chunkSize }),
91
91
  (0, nodejs_lib_1.writableForEach)(async (dbms) => {
92
92
  await db.saveBatch(table, dbms, saveOptions);
93
93
  }),
package/package.json CHANGED
@@ -40,7 +40,7 @@
40
40
  "engines": {
41
41
  "node": ">=18.12"
42
42
  },
43
- "version": "9.11.0",
43
+ "version": "9.12.1",
44
44
  "description": "Lowest Common Denominator API to supported Databases",
45
45
  "keywords": [
46
46
  "db",
@@ -313,17 +313,17 @@ export interface CommonDaoStreamOptions<IN>
313
313
 
314
314
  /**
315
315
  * Applicable to some of stream operations, e.g deleteByQuery.
316
- * If set - `deleteByQuery` won't execute it "all at once", but in batches.
316
+ * If set - `deleteByQuery` won't execute it "all at once", but in batches (chunks).
317
317
  *
318
318
  * Defaults to undefined, so the operation is executed "all at once".
319
319
  */
320
- batchSize?: number
320
+ chunkSize?: number
321
321
 
322
322
  /**
323
- * When batchSize is set - this option controls how many batches to run concurrently.
323
+ * When chunkSize is set - this option controls how many chunks to run concurrently.
324
324
  * Defaults to 16, "the magic number of JavaScript concurrency".
325
325
  */
326
- batchConcurrency?: number
326
+ chunkConcurrency?: number
327
327
  }
328
328
 
329
329
  export type CommonDaoCreateOptions = CommonDBCreateOptions
@@ -18,6 +18,7 @@ import {
18
18
  ErrorMode,
19
19
  JsonSchemaObject,
20
20
  JsonSchemaRootObject,
21
+ nowUnix,
21
22
  ObjectWithId,
22
23
  pMap,
23
24
  SKIP,
@@ -36,7 +37,7 @@ import {
36
37
  ObjectSchema,
37
38
  ReadableTyped,
38
39
  stringId,
39
- transformBuffer,
40
+ transformChunk,
40
41
  transformLogProgress,
41
42
  transformMap,
42
43
  transformMapSimple,
@@ -606,7 +607,7 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
606
607
  * "Returns", just to have a type of "Saved"
607
608
  */
608
609
  assignIdCreatedUpdated<T extends BaseDBEntity>(obj: Partial<T>, opt: CommonDaoOptions = {}): T {
609
- const now = Math.floor(Date.now() / 1000)
610
+ const now = nowUnix()
610
611
 
611
612
  if (this.cfg.useCreatedProperty) {
612
613
  obj.created ||= obj.updated || now
@@ -917,9 +918,9 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
917
918
 
918
919
  /**
919
920
  * "Streaming" is implemented by buffering incoming rows into **batches**
920
- * (of size opt.batchSize, which defaults to 500),
921
- * and then executing db.saveBatch(batch) with the concurrency
922
- * of opt.batchConcurrency (which defaults to 16).
921
+ * (of size opt.chunkSize, which defaults to 500),
922
+ * and then executing db.saveBatch(chunk) with the concurrency
923
+ * of opt.chunkConcurrency (which defaults to 16).
923
924
  */
924
925
  streamSaveTransform(opt: CommonDaoStreamSaveOptions<DBM> = {}): Transform[] {
925
926
  this.requireWriteAccess()
@@ -935,7 +936,7 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
935
936
  const excludeFromIndexes = opt.excludeFromIndexes || this.cfg.excludeFromIndexes
936
937
  const { beforeSave } = this.cfg.hooks!
937
938
 
938
- const { batchSize = 500, batchConcurrency = 16, errorMode } = opt
939
+ const { chunkSize = 500, chunkConcurrency = 16, errorMode } = opt
939
940
 
940
941
  return [
941
942
  transformMap<BM, DBM>(
@@ -955,7 +956,7 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
955
956
  errorMode,
956
957
  },
957
958
  ),
958
- transformBuffer<DBM>({ batchSize }),
959
+ transformChunk<DBM>({ chunkSize }),
959
960
  transformMap<DBM[], DBM[]>(
960
961
  async batch => {
961
962
  await this.cfg.db.saveBatch(table, batch, {
@@ -965,7 +966,7 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
965
966
  return batch
966
967
  },
967
968
  {
968
- concurrency: batchConcurrency,
969
+ concurrency: chunkConcurrency,
969
970
  errorMode,
970
971
  flattenArrayOutput: true,
971
972
  },
@@ -1002,9 +1003,9 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
1002
1003
  }
1003
1004
 
1004
1005
  /**
1005
- * Pass `stream: true` option to use Streaming: it will Stream the query, batch by 500, and execute
1006
- * `deleteByIds` for each batch concurrently (infinite concurrency).
1007
- * This is expected to be more memory-efficient way of deleting big numbers of rows.
1006
+ * Pass `chunkSize: number` (e.g 500) option to use Streaming: it will Stream the query, chunk by 500, and execute
1007
+ * `deleteByIds` for each chunk concurrently (infinite concurrency).
1008
+ * This is expected to be more memory-efficient way of deleting large number of rows.
1008
1009
  */
1009
1010
  async deleteByQuery(
1010
1011
  q: DBQuery<DBM>,
@@ -1017,15 +1018,15 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
1017
1018
  const started = this.logStarted(op, q.table)
1018
1019
  let deleted = 0
1019
1020
 
1020
- if (opt.batchSize) {
1021
- const { batchSize, batchConcurrency = 16 } = opt
1021
+ if (opt.chunkSize) {
1022
+ const { chunkSize, chunkConcurrency = 16 } = opt
1022
1023
 
1023
1024
  await _pipeline([
1024
1025
  this.cfg.db.streamQuery<DBM>(q.select(['id']), opt),
1025
1026
  transformMapSimple<ObjectWithId, string>(r => r.id, {
1026
1027
  errorMode: ErrorMode.SUPPRESS,
1027
1028
  }),
1028
- transformBuffer<string>({ batchSize }),
1029
+ transformChunk<string>({ chunkSize }),
1029
1030
  transformMap<string[], void>(
1030
1031
  async ids => {
1031
1032
  deleted += await this.cfg.db.deleteByQuery(
@@ -1035,14 +1036,14 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
1035
1036
  },
1036
1037
  {
1037
1038
  predicate: _passthroughPredicate,
1038
- concurrency: batchConcurrency,
1039
+ concurrency: chunkConcurrency,
1039
1040
  },
1040
1041
  ),
1041
1042
  // LogProgress should be AFTER the mapper, to be able to report correct stats
1042
1043
  transformLogProgress({
1043
1044
  metric: q.table,
1044
1045
  logEvery: 2, // 500 * 2 === 1000
1045
- batchSize,
1046
+ chunkSize,
1046
1047
  ...opt,
1047
1048
  }),
1048
1049
  writableVoid(),
@@ -2,7 +2,11 @@ import { AppError, CommonLogger, KeyValueTuple, pMap } from '@naturalcycles/js-l
2
2
  import { deflateString, inflateToString, ReadableTyped } from '@naturalcycles/nodejs-lib'
3
3
  import { CommonDaoLogLevel } from '../commondao/common.dao.model'
4
4
  import { CommonDBCreateOptions } from '../db.model'
5
- import { CommonKeyValueDB, KeyValueDBTuple } from './commonKeyValueDB'
5
+ import {
6
+ CommonKeyValueDB,
7
+ CommonKeyValueDBSaveBatchOptions,
8
+ KeyValueDBTuple,
9
+ } from './commonKeyValueDB'
6
10
 
7
11
  export interface CommonKeyValueDaoCfg<T> {
8
12
  db: CommonKeyValueDB
@@ -44,6 +48,8 @@ export interface CommonKeyValueDaoCfg<T> {
44
48
  deflatedJsonValue?: boolean
45
49
  }
46
50
 
51
+ export type CommonKeyValueDaoSaveOptions = CommonKeyValueDBSaveBatchOptions
52
+
47
53
  // todo: logging
48
54
  // todo: readonly
49
55
 
@@ -133,13 +139,13 @@ export class CommonKeyValueDao<T> {
133
139
  } as T
134
140
  }
135
141
 
136
- async patch(id: string, patch: Partial<T>): Promise<T> {
142
+ async patch(id: string, patch: Partial<T>, opt?: CommonKeyValueDaoSaveOptions): Promise<T> {
137
143
  const v: T = {
138
144
  ...(await this.getByIdOrEmpty(id)),
139
145
  ...patch,
140
146
  }
141
147
 
142
- await this.save(id, v)
148
+ await this.save(id, v, opt)
143
149
 
144
150
  return v
145
151
  }
@@ -158,31 +164,35 @@ export class CommonKeyValueDao<T> {
158
164
  return await this.cfg.db.getByIds(this.cfg.table, ids)
159
165
  }
160
166
 
161
- async save(id: string, value: T): Promise<void> {
162
- await this.saveBatch([[id, value]])
167
+ async save(id: string, value: T, opt?: CommonKeyValueDaoSaveOptions): Promise<void> {
168
+ await this.saveBatch([[id, value]], opt)
163
169
  }
164
170
 
165
- async saveAsBuffer(id: string, value: Buffer): Promise<void> {
166
- await this.cfg.db.saveBatch(this.cfg.table, [[id, value]])
171
+ async saveAsBuffer(id: string, value: Buffer, opt?: CommonKeyValueDaoSaveOptions): Promise<void> {
172
+ await this.cfg.db.saveBatch(this.cfg.table, [[id, value]], opt)
167
173
  }
168
174
 
169
- async saveBatch(entries: KeyValueTuple<string, T>[]): Promise<void> {
175
+ async saveBatch(
176
+ entries: KeyValueTuple<string, T>[],
177
+ opt?: CommonKeyValueDaoSaveOptions,
178
+ ): Promise<void> {
179
+ const { mapValueToBuffer } = this.cfg.hooks
170
180
  let bufferEntries: KeyValueDBTuple[]
171
181
 
172
- if (!this.cfg.hooks.mapValueToBuffer) {
182
+ if (!mapValueToBuffer) {
173
183
  bufferEntries = entries as any
174
184
  } else {
175
- bufferEntries = await pMap(entries, async ([id, v]) => [
176
- id,
177
- await this.cfg.hooks.mapValueToBuffer!(v),
178
- ])
185
+ bufferEntries = await pMap(entries, async ([id, v]) => [id, await mapValueToBuffer(v)])
179
186
  }
180
187
 
181
- await this.cfg.db.saveBatch(this.cfg.table, bufferEntries)
188
+ await this.cfg.db.saveBatch(this.cfg.table, bufferEntries, opt)
182
189
  }
183
190
 
184
- async saveBatchAsBuffer(entries: KeyValueDBTuple[]): Promise<void> {
185
- await this.cfg.db.saveBatch(this.cfg.table, entries)
191
+ async saveBatchAsBuffer(
192
+ entries: KeyValueDBTuple[],
193
+ opt?: CommonKeyValueDaoSaveOptions,
194
+ ): Promise<void> {
195
+ await this.cfg.db.saveBatch(this.cfg.table, entries, opt)
186
196
  }
187
197
 
188
198
  async deleteByIds(ids: string[]): Promise<void> {
@@ -204,19 +214,19 @@ export class CommonKeyValueDao<T> {
204
214
  return this.cfg.db.streamValues(this.cfg.table, limit) as ReadableTyped<T>
205
215
  }
206
216
 
207
- const stream: ReadableTyped<T> = this.cfg.db
208
- .streamValues(this.cfg.table, limit)
209
- // .on('error', err => stream.emit('error', err))
210
- .flatMap(async buf => {
217
+ return this.cfg.db.streamValues(this.cfg.table, limit).flatMap(
218
+ async buf => {
211
219
  try {
212
220
  return [await mapBufferToValue(buf)]
213
221
  } catch (err) {
214
222
  this.cfg.logger.error(err)
215
223
  return [] // SKIP
216
224
  }
217
- })
218
-
219
- return stream
225
+ },
226
+ {
227
+ concurrency: 16,
228
+ },
229
+ )
220
230
  }
221
231
 
222
232
  streamEntries(limit?: number): ReadableTyped<KeyValueTuple<string, T>> {
@@ -228,18 +238,18 @@ export class CommonKeyValueDao<T> {
228
238
  >
229
239
  }
230
240
 
231
- const stream: ReadableTyped<KeyValueTuple<string, T>> = this.cfg.db
232
- .streamEntries(this.cfg.table, limit)
233
- // .on('error', err => stream.emit('error', err))
234
- .flatMap(async ([id, buf]) => {
241
+ return this.cfg.db.streamEntries(this.cfg.table, limit).flatMap(
242
+ async ([id, buf]) => {
235
243
  try {
236
244
  return [[id, await mapBufferToValue(buf)]]
237
245
  } catch (err) {
238
246
  this.cfg.logger.error(err)
239
247
  return [] // SKIP
240
248
  }
241
- })
242
-
243
- return stream
249
+ },
250
+ {
251
+ concurrency: 16,
252
+ },
253
+ )
244
254
  }
245
255
  }
@@ -1,6 +1,15 @@
1
- import { AsyncMemoCache, MISS } from '@naturalcycles/js-lib'
1
+ import { AsyncMemoCache, MISS, nowUnix, NumberOfSeconds } from '@naturalcycles/js-lib'
2
2
  import { CommonKeyValueDao } from './commonKeyValueDao'
3
3
 
4
+ export interface CommonKeyValueDaoMemoCacheCfg<VALUE> {
5
+ dao: CommonKeyValueDao<VALUE>
6
+
7
+ /**
8
+ * If set, every `set()` will set `expireAt` (TTL) option.
9
+ */
10
+ ttl?: NumberOfSeconds
11
+ }
12
+
4
13
  /**
5
14
  * AsyncMemoCache implementation, backed by CommonKeyValueDao.
6
15
  *
@@ -10,14 +19,16 @@ import { CommonKeyValueDao } from './commonKeyValueDao'
10
19
  * clear the whole table/cache.
11
20
  */
12
21
  export class CommonKeyValueDaoMemoCache<VALUE = any> implements AsyncMemoCache<string, VALUE> {
13
- constructor(private dao: CommonKeyValueDao<VALUE>) {}
22
+ constructor(private cfg: CommonKeyValueDaoMemoCacheCfg<VALUE>) {}
14
23
 
15
24
  async get(k: string): Promise<VALUE | typeof MISS> {
16
- return (await this.dao.getById(k)) || MISS
25
+ return (await this.cfg.dao.getById(k)) || MISS
17
26
  }
18
27
 
19
28
  async set(k: string, v: VALUE): Promise<void> {
20
- await this.dao.save(k, v)
29
+ const opt = this.cfg.ttl ? { expireAt: nowUnix() + this.cfg.ttl } : undefined
30
+
31
+ await this.cfg.dao.save(k, v, opt)
21
32
  }
22
33
 
23
34
  async clear(): Promise<void> {
package/src/model.util.ts CHANGED
@@ -1,10 +1,10 @@
1
- import { CreatedUpdated, CreatedUpdatedId } from '@naturalcycles/js-lib'
1
+ import { CreatedUpdated, CreatedUpdatedId, nowUnix } from '@naturalcycles/js-lib'
2
2
  import { stringId } from '@naturalcycles/nodejs-lib'
3
3
 
4
4
  export function createdUpdatedFields(
5
5
  existingObject?: Partial<CreatedUpdated> | null,
6
6
  ): CreatedUpdated {
7
- const now = Math.floor(Date.now() / 1000)
7
+ const now = nowUnix()
8
8
  return {
9
9
  created: existingObject?.created || now,
10
10
  updated: now,
@@ -14,7 +14,7 @@ export function createdUpdatedFields(
14
14
  export function createdUpdatedIdFields(
15
15
  existingObject?: Partial<CreatedUpdatedId> | null,
16
16
  ): CreatedUpdatedId {
17
- const now = Math.floor(Date.now() / 1000)
17
+ const now = nowUnix()
18
18
  return {
19
19
  created: existingObject?.created || now,
20
20
  id: existingObject?.id || stringId(),
@@ -8,7 +8,7 @@ import {
8
8
  } from '@naturalcycles/js-lib'
9
9
  import {
10
10
  NDJsonStats,
11
- transformBuffer,
11
+ transformChunk,
12
12
  transformLogProgress,
13
13
  TransformLogProgressOptions,
14
14
  transformMap,
@@ -47,7 +47,7 @@ export interface DBPipelineCopyOptions extends TransformLogProgressOptions {
47
47
  *
48
48
  * Determines the size of .saveBatch()
49
49
  */
50
- batchSize?: number
50
+ chunkSize?: number
51
51
 
52
52
  /**
53
53
  * @default ErrorMode.SUPPRESS
@@ -98,7 +98,7 @@ export interface DBPipelineCopyOptions extends TransformLogProgressOptions {
98
98
  */
99
99
  export async function dbPipelineCopy(opt: DBPipelineCopyOptions): Promise<NDJsonStats> {
100
100
  const {
101
- batchSize = 100,
101
+ chunkSize = 100,
102
102
  dbInput,
103
103
  dbOutput,
104
104
  concurrency = 16,
@@ -153,7 +153,7 @@ export async function dbPipelineCopy(opt: DBPipelineCopyOptions): Promise<NDJson
153
153
  metric: table,
154
154
  }),
155
155
  transformTap(() => rows++),
156
- transformBuffer({ batchSize }),
156
+ transformChunk({ chunkSize }),
157
157
  writableForEach(async dbms => {
158
158
  await dbOutput.saveBatch(table, dbms, saveOptions)
159
159
  }),
@@ -13,7 +13,7 @@ import {
13
13
  } from '@naturalcycles/js-lib'
14
14
  import {
15
15
  NDJsonStats,
16
- transformBuffer,
16
+ transformChunk,
17
17
  transformFilterSync,
18
18
  transformJsonParse,
19
19
  transformLimit,
@@ -125,7 +125,7 @@ export async function dbPipelineRestore(opt: DBPipelineRestoreOptions): Promise<
125
125
  const {
126
126
  db,
127
127
  concurrency = 16,
128
- batchSize = 100,
128
+ chunkSize = 100,
129
129
  limit,
130
130
  sinceUpdated,
131
131
  inputDirPath,
@@ -224,7 +224,7 @@ export async function dbPipelineRestore(opt: DBPipelineRestoreOptions): Promise<
224
224
  ...transformMapOptions,
225
225
  metric: table,
226
226
  }),
227
- transformBuffer({ batchSize }),
227
+ transformChunk({ chunkSize }),
228
228
  writableForEach(async dbms => {
229
229
  await db.saveBatch(table, dbms, saveOptions)
230
230
  }),