@naturalcycles/db-lib 9.12.0 → 9.12.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -112,9 +112,9 @@ export declare class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity
112
112
  saveBatchAsDBM(dbms: Unsaved<DBM>[], opt?: CommonDaoSaveBatchOptions<DBM>): Promise<DBM[]>;
113
113
  /**
114
114
  * "Streaming" is implemented by buffering incoming rows into **batches**
115
- * (of size opt.batchSize, which defaults to 500),
116
- * and then executing db.saveBatch(batch) with the concurrency
117
- * of opt.batchConcurrency (which defaults to 16).
115
+ * (of size opt.chunkSize, which defaults to 500),
116
+ * and then executing db.saveBatch(chunk) with the concurrency
117
+ * of opt.chunkConcurrency (which defaults to 16).
118
118
  */
119
119
  streamSaveTransform(opt?: CommonDaoStreamSaveOptions<DBM>): Transform[];
120
120
  /**
@@ -123,9 +123,9 @@ export declare class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity
123
123
  deleteById(id?: string | null, opt?: CommonDaoOptions): Promise<number>;
124
124
  deleteByIds(ids: string[], opt?: CommonDaoOptions): Promise<number>;
125
125
  /**
126
- * Pass `stream: true` option to use Streaming: it will Stream the query, batch by 500, and execute
127
- * `deleteByIds` for each batch concurrently (infinite concurrency).
128
- * This is expected to be more memory-efficient way of deleting big numbers of rows.
126
+ * Pass `chunkSize: number` (e.g 500) option to use Streaming: it will Stream the query, chunk by 500, and execute
127
+ * `deleteByIds` for each chunk concurrently (infinite concurrency).
128
+ * This is expected to be more memory-efficient way of deleting large number of rows.
129
129
  */
130
130
  deleteByQuery(q: DBQuery<DBM>, opt?: CommonDaoStreamDeleteOptions<DBM>): Promise<number>;
131
131
  updateById(id: string, patch: DBPatch<DBM>, opt?: CommonDaoOptions): Promise<number>;
@@ -697,9 +697,9 @@ class CommonDao {
697
697
  }
698
698
  /**
699
699
  * "Streaming" is implemented by buffering incoming rows into **batches**
700
- * (of size opt.batchSize, which defaults to 500),
701
- * and then executing db.saveBatch(batch) with the concurrency
702
- * of opt.batchConcurrency (which defaults to 16).
700
+ * (of size opt.chunkSize, which defaults to 500),
701
+ * and then executing db.saveBatch(chunk) with the concurrency
702
+ * of opt.chunkConcurrency (which defaults to 16).
703
703
  */
704
704
  streamSaveTransform(opt = {}) {
705
705
  this.requireWriteAccess();
@@ -711,7 +711,7 @@ class CommonDao {
711
711
  }
712
712
  const excludeFromIndexes = opt.excludeFromIndexes || this.cfg.excludeFromIndexes;
713
713
  const { beforeSave } = this.cfg.hooks;
714
- const { batchSize = 500, batchConcurrency = 16, errorMode } = opt;
714
+ const { chunkSize = 500, chunkConcurrency = 16, errorMode } = opt;
715
715
  return [
716
716
  (0, nodejs_lib_1.transformMap)(async (bm) => {
717
717
  this.assignIdCreatedUpdated(bm, opt); // mutates
@@ -725,7 +725,7 @@ class CommonDao {
725
725
  }, {
726
726
  errorMode,
727
727
  }),
728
- (0, nodejs_lib_1.transformBuffer)({ batchSize }),
728
+ (0, nodejs_lib_1.transformChunk)({ chunkSize }),
729
729
  (0, nodejs_lib_1.transformMap)(async (batch) => {
730
730
  await this.cfg.db.saveBatch(table, batch, {
731
731
  ...opt,
@@ -733,7 +733,7 @@ class CommonDao {
733
733
  });
734
734
  return batch;
735
735
  }, {
736
- concurrency: batchConcurrency,
736
+ concurrency: chunkConcurrency,
737
737
  errorMode,
738
738
  flattenArrayOutput: true,
739
739
  }),
@@ -768,9 +768,9 @@ class CommonDao {
768
768
  return count;
769
769
  }
770
770
  /**
771
- * Pass `stream: true` option to use Streaming: it will Stream the query, batch by 500, and execute
772
- * `deleteByIds` for each batch concurrently (infinite concurrency).
773
- * This is expected to be more memory-efficient way of deleting big numbers of rows.
771
+ * Pass `chunkSize: number` (e.g 500) option to use Streaming: it will Stream the query, chunk by 500, and execute
772
+ * `deleteByIds` for each chunk concurrently (infinite concurrency).
773
+ * This is expected to be more memory-efficient way of deleting large number of rows.
774
774
  */
775
775
  async deleteByQuery(q, opt = {}) {
776
776
  this.requireWriteAccess();
@@ -779,25 +779,25 @@ class CommonDao {
779
779
  const op = `deleteByQuery(${q.pretty()})`;
780
780
  const started = this.logStarted(op, q.table);
781
781
  let deleted = 0;
782
- if (opt.batchSize) {
783
- const { batchSize, batchConcurrency = 16 } = opt;
782
+ if (opt.chunkSize) {
783
+ const { chunkSize, chunkConcurrency = 16 } = opt;
784
784
  await (0, nodejs_lib_1._pipeline)([
785
785
  this.cfg.db.streamQuery(q.select(['id']), opt),
786
786
  (0, nodejs_lib_1.transformMapSimple)(r => r.id, {
787
787
  errorMode: js_lib_1.ErrorMode.SUPPRESS,
788
788
  }),
789
- (0, nodejs_lib_1.transformBuffer)({ batchSize }),
789
+ (0, nodejs_lib_1.transformChunk)({ chunkSize }),
790
790
  (0, nodejs_lib_1.transformMap)(async (ids) => {
791
791
  deleted += await this.cfg.db.deleteByQuery(dbQuery_1.DBQuery.create(q.table).filterIn('id', ids), opt);
792
792
  }, {
793
793
  predicate: js_lib_1._passthroughPredicate,
794
- concurrency: batchConcurrency,
794
+ concurrency: chunkConcurrency,
795
795
  }),
796
796
  // LogProgress should be AFTER the mapper, to be able to report correct stats
797
797
  (0, nodejs_lib_1.transformLogProgress)({
798
798
  metric: q.table,
799
799
  logEvery: 2, // 500 * 2 === 1000
800
- batchSize,
800
+ chunkSize,
801
801
  ...opt,
802
802
  }),
803
803
  (0, nodejs_lib_1.writableVoid)(),
@@ -250,15 +250,15 @@ export interface CommonDaoStreamOptions<IN> extends CommonDaoOptions, TransformL
250
250
  errorMode?: ErrorMode;
251
251
  /**
252
252
  * Applicable to some of stream operations, e.g deleteByQuery.
253
- * If set - `deleteByQuery` won't execute it "all at once", but in batches.
253
+ * If set - `deleteByQuery` won't execute it "all at once", but in batches (chunks).
254
254
  *
255
255
  * Defaults to undefined, so the operation is executed "all at once".
256
256
  */
257
- batchSize?: number;
257
+ chunkSize?: number;
258
258
  /**
259
- * When batchSize is set - this option controls how many batches to run concurrently.
259
+ * When chunkSize is set - this option controls how many chunks to run concurrently.
260
260
  * Defaults to 16, "the magic number of JavaScript concurrency".
261
261
  */
262
- batchConcurrency?: number;
262
+ chunkConcurrency?: number;
263
263
  }
264
264
  export type CommonDaoCreateOptions = CommonDBCreateOptions;
@@ -21,7 +21,7 @@ export interface DBPipelineCopyOptions extends TransformLogProgressOptions {
21
21
  *
22
22
  * Determines the size of .saveBatch()
23
23
  */
24
- batchSize?: number;
24
+ chunkSize?: number;
25
25
  /**
26
26
  * @default ErrorMode.SUPPRESS
27
27
  *
@@ -11,7 +11,7 @@ const dbQuery_1 = require("../query/dbQuery");
11
11
  * Handles backpressure.
12
12
  */
13
13
  async function dbPipelineCopy(opt) {
14
- const { batchSize = 100, dbInput, dbOutput, concurrency = 16, limit = 0, sinceUpdated, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, } = opt;
14
+ const { chunkSize = 100, dbInput, dbOutput, concurrency = 16, limit = 0, sinceUpdated, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, } = opt;
15
15
  let { tables } = opt;
16
16
  const sinceUpdatedStr = sinceUpdated ? ' since ' + (0, nodejs_lib_1.grey)((0, js_lib_1.localTime)(sinceUpdated).toPretty()) : '';
17
17
  console.log(`>> ${(0, nodejs_lib_1.dimWhite)('dbPipelineCopy')} started...${sinceUpdatedStr}`);
@@ -42,7 +42,7 @@ async function dbPipelineCopy(opt) {
42
42
  metric: table,
43
43
  }),
44
44
  (0, nodejs_lib_1.transformTap)(() => rows++),
45
- (0, nodejs_lib_1.transformBuffer)({ batchSize }),
45
+ (0, nodejs_lib_1.transformChunk)({ chunkSize }),
46
46
  (0, nodejs_lib_1.writableForEach)(async (dbms) => {
47
47
  await dbOutput.saveBatch(table, dbms, saveOptions);
48
48
  }),
@@ -14,7 +14,7 @@ const nodejs_lib_1 = require("@naturalcycles/nodejs-lib");
14
14
  * Optionally you can provide mapperPerTable and @param transformMapOptions (one for all mappers) - it will run for each table.
15
15
  */
16
16
  async function dbPipelineRestore(opt) {
17
- const { db, concurrency = 16, batchSize = 100, limit, sinceUpdated, inputDirPath, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, recreateTables = false, } = opt;
17
+ const { db, concurrency = 16, chunkSize = 100, limit, sinceUpdated, inputDirPath, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, recreateTables = false, } = opt;
18
18
  const strict = errorMode !== js_lib_1.ErrorMode.SUPPRESS;
19
19
  const onlyTables = opt.tables && new Set(opt.tables);
20
20
  const sinceUpdatedStr = sinceUpdated ? ' since ' + (0, nodejs_lib_1.grey)((0, js_lib_1.localTime)(sinceUpdated).toPretty()) : '';
@@ -87,7 +87,7 @@ async function dbPipelineRestore(opt) {
87
87
  ...transformMapOptions,
88
88
  metric: table,
89
89
  }),
90
- (0, nodejs_lib_1.transformBuffer)({ batchSize }),
90
+ (0, nodejs_lib_1.transformChunk)({ chunkSize }),
91
91
  (0, nodejs_lib_1.writableForEach)(async (dbms) => {
92
92
  await db.saveBatch(table, dbms, saveOptions);
93
93
  }),
package/package.json CHANGED
@@ -40,7 +40,7 @@
40
40
  "engines": {
41
41
  "node": ">=18.12"
42
42
  },
43
- "version": "9.12.0",
43
+ "version": "9.12.1",
44
44
  "description": "Lowest Common Denominator API to supported Databases",
45
45
  "keywords": [
46
46
  "db",
@@ -313,17 +313,17 @@ export interface CommonDaoStreamOptions<IN>
313
313
 
314
314
  /**
315
315
  * Applicable to some of stream operations, e.g deleteByQuery.
316
- * If set - `deleteByQuery` won't execute it "all at once", but in batches.
316
+ * If set - `deleteByQuery` won't execute it "all at once", but in batches (chunks).
317
317
  *
318
318
  * Defaults to undefined, so the operation is executed "all at once".
319
319
  */
320
- batchSize?: number
320
+ chunkSize?: number
321
321
 
322
322
  /**
323
- * When batchSize is set - this option controls how many batches to run concurrently.
323
+ * When chunkSize is set - this option controls how many chunks to run concurrently.
324
324
  * Defaults to 16, "the magic number of JavaScript concurrency".
325
325
  */
326
- batchConcurrency?: number
326
+ chunkConcurrency?: number
327
327
  }
328
328
 
329
329
  export type CommonDaoCreateOptions = CommonDBCreateOptions
@@ -37,7 +37,7 @@ import {
37
37
  ObjectSchema,
38
38
  ReadableTyped,
39
39
  stringId,
40
- transformBuffer,
40
+ transformChunk,
41
41
  transformLogProgress,
42
42
  transformMap,
43
43
  transformMapSimple,
@@ -918,9 +918,9 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
918
918
 
919
919
  /**
920
920
  * "Streaming" is implemented by buffering incoming rows into **batches**
921
- * (of size opt.batchSize, which defaults to 500),
922
- * and then executing db.saveBatch(batch) with the concurrency
923
- * of opt.batchConcurrency (which defaults to 16).
921
+ * (of size opt.chunkSize, which defaults to 500),
922
+ * and then executing db.saveBatch(chunk) with the concurrency
923
+ * of opt.chunkConcurrency (which defaults to 16).
924
924
  */
925
925
  streamSaveTransform(opt: CommonDaoStreamSaveOptions<DBM> = {}): Transform[] {
926
926
  this.requireWriteAccess()
@@ -936,7 +936,7 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
936
936
  const excludeFromIndexes = opt.excludeFromIndexes || this.cfg.excludeFromIndexes
937
937
  const { beforeSave } = this.cfg.hooks!
938
938
 
939
- const { batchSize = 500, batchConcurrency = 16, errorMode } = opt
939
+ const { chunkSize = 500, chunkConcurrency = 16, errorMode } = opt
940
940
 
941
941
  return [
942
942
  transformMap<BM, DBM>(
@@ -956,7 +956,7 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
956
956
  errorMode,
957
957
  },
958
958
  ),
959
- transformBuffer<DBM>({ batchSize }),
959
+ transformChunk<DBM>({ chunkSize }),
960
960
  transformMap<DBM[], DBM[]>(
961
961
  async batch => {
962
962
  await this.cfg.db.saveBatch(table, batch, {
@@ -966,7 +966,7 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
966
966
  return batch
967
967
  },
968
968
  {
969
- concurrency: batchConcurrency,
969
+ concurrency: chunkConcurrency,
970
970
  errorMode,
971
971
  flattenArrayOutput: true,
972
972
  },
@@ -1003,9 +1003,9 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
1003
1003
  }
1004
1004
 
1005
1005
  /**
1006
- * Pass `stream: true` option to use Streaming: it will Stream the query, batch by 500, and execute
1007
- * `deleteByIds` for each batch concurrently (infinite concurrency).
1008
- * This is expected to be more memory-efficient way of deleting big numbers of rows.
1006
+ * Pass `chunkSize: number` (e.g 500) option to use Streaming: it will Stream the query, chunk by 500, and execute
1007
+ * `deleteByIds` for each chunk concurrently (infinite concurrency).
1008
+ * This is expected to be more memory-efficient way of deleting large number of rows.
1009
1009
  */
1010
1010
  async deleteByQuery(
1011
1011
  q: DBQuery<DBM>,
@@ -1018,15 +1018,15 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
1018
1018
  const started = this.logStarted(op, q.table)
1019
1019
  let deleted = 0
1020
1020
 
1021
- if (opt.batchSize) {
1022
- const { batchSize, batchConcurrency = 16 } = opt
1021
+ if (opt.chunkSize) {
1022
+ const { chunkSize, chunkConcurrency = 16 } = opt
1023
1023
 
1024
1024
  await _pipeline([
1025
1025
  this.cfg.db.streamQuery<DBM>(q.select(['id']), opt),
1026
1026
  transformMapSimple<ObjectWithId, string>(r => r.id, {
1027
1027
  errorMode: ErrorMode.SUPPRESS,
1028
1028
  }),
1029
- transformBuffer<string>({ batchSize }),
1029
+ transformChunk<string>({ chunkSize }),
1030
1030
  transformMap<string[], void>(
1031
1031
  async ids => {
1032
1032
  deleted += await this.cfg.db.deleteByQuery(
@@ -1036,14 +1036,14 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
1036
1036
  },
1037
1037
  {
1038
1038
  predicate: _passthroughPredicate,
1039
- concurrency: batchConcurrency,
1039
+ concurrency: chunkConcurrency,
1040
1040
  },
1041
1041
  ),
1042
1042
  // LogProgress should be AFTER the mapper, to be able to report correct stats
1043
1043
  transformLogProgress({
1044
1044
  metric: q.table,
1045
1045
  logEvery: 2, // 500 * 2 === 1000
1046
- batchSize,
1046
+ chunkSize,
1047
1047
  ...opt,
1048
1048
  }),
1049
1049
  writableVoid(),
@@ -8,7 +8,7 @@ import {
8
8
  } from '@naturalcycles/js-lib'
9
9
  import {
10
10
  NDJsonStats,
11
- transformBuffer,
11
+ transformChunk,
12
12
  transformLogProgress,
13
13
  TransformLogProgressOptions,
14
14
  transformMap,
@@ -47,7 +47,7 @@ export interface DBPipelineCopyOptions extends TransformLogProgressOptions {
47
47
  *
48
48
  * Determines the size of .saveBatch()
49
49
  */
50
- batchSize?: number
50
+ chunkSize?: number
51
51
 
52
52
  /**
53
53
  * @default ErrorMode.SUPPRESS
@@ -98,7 +98,7 @@ export interface DBPipelineCopyOptions extends TransformLogProgressOptions {
98
98
  */
99
99
  export async function dbPipelineCopy(opt: DBPipelineCopyOptions): Promise<NDJsonStats> {
100
100
  const {
101
- batchSize = 100,
101
+ chunkSize = 100,
102
102
  dbInput,
103
103
  dbOutput,
104
104
  concurrency = 16,
@@ -153,7 +153,7 @@ export async function dbPipelineCopy(opt: DBPipelineCopyOptions): Promise<NDJson
153
153
  metric: table,
154
154
  }),
155
155
  transformTap(() => rows++),
156
- transformBuffer({ batchSize }),
156
+ transformChunk({ chunkSize }),
157
157
  writableForEach(async dbms => {
158
158
  await dbOutput.saveBatch(table, dbms, saveOptions)
159
159
  }),
@@ -13,7 +13,7 @@ import {
13
13
  } from '@naturalcycles/js-lib'
14
14
  import {
15
15
  NDJsonStats,
16
- transformBuffer,
16
+ transformChunk,
17
17
  transformFilterSync,
18
18
  transformJsonParse,
19
19
  transformLimit,
@@ -125,7 +125,7 @@ export async function dbPipelineRestore(opt: DBPipelineRestoreOptions): Promise<
125
125
  const {
126
126
  db,
127
127
  concurrency = 16,
128
- batchSize = 100,
128
+ chunkSize = 100,
129
129
  limit,
130
130
  sinceUpdated,
131
131
  inputDirPath,
@@ -224,7 +224,7 @@ export async function dbPipelineRestore(opt: DBPipelineRestoreOptions): Promise<
224
224
  ...transformMapOptions,
225
225
  metric: table,
226
226
  }),
227
- transformBuffer({ batchSize }),
227
+ transformChunk({ chunkSize }),
228
228
  writableForEach(async dbms => {
229
229
  await db.saveBatch(table, dbms, saveOptions)
230
230
  }),