@naturalcycles/db-lib 9.13.0 → 9.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,11 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.LocalFilePersistencePlugin = void 0;
4
- const tslib_1 = require("tslib");
5
- const node_fs_1 = tslib_1.__importDefault(require("node:fs"));
6
- const promises_1 = tslib_1.__importDefault(require("node:fs/promises"));
7
4
  const node_stream_1 = require("node:stream");
8
- const node_zlib_1 = require("node:zlib");
9
5
  const js_lib_1 = require("@naturalcycles/js-lib");
10
6
  const nodejs_lib_1 = require("@naturalcycles/nodejs-lib");
11
7
  /**
@@ -21,7 +17,7 @@ class LocalFilePersistencePlugin {
21
17
  }
22
18
  async ping() { }
23
19
  async getTables() {
24
- return (await promises_1.default.readdir(this.cfg.storagePath))
20
+ return (await nodejs_lib_1.fs2.readdirAsync(this.cfg.storagePath))
25
21
  .filter(f => f.includes('.ndjson'))
26
22
  .map(f => f.split('.ndjson')[0]);
27
23
  }
@@ -31,16 +27,7 @@ class LocalFilePersistencePlugin {
31
27
  const filePath = `${this.cfg.storagePath}/${table}.${ext}`;
32
28
  if (!(await nodejs_lib_1.fs2.pathExistsAsync(filePath)))
33
29
  return [];
34
- const transformUnzip = this.cfg.gzip ? [(0, node_zlib_1.createUnzip)()] : [];
35
- const rows = [];
36
- await (0, nodejs_lib_1._pipeline)([
37
- node_fs_1.default.createReadStream(filePath),
38
- ...transformUnzip,
39
- (0, nodejs_lib_1.transformSplit)(), // splits by \n
40
- (0, nodejs_lib_1.transformJsonParse)(),
41
- (0, nodejs_lib_1.writablePushToArray)(rows),
42
- ]);
43
- return rows;
30
+ return await nodejs_lib_1.fs2.createReadStreamAsNDJSON(filePath).toArray();
44
31
  }
45
32
  async saveFiles(ops) {
46
33
  await (0, js_lib_1.pMap)(ops, async (op) => await this.saveFile(op.table, op.rows), { concurrency: 32 });
@@ -49,13 +36,7 @@ class LocalFilePersistencePlugin {
49
36
  await nodejs_lib_1.fs2.ensureDirAsync(this.cfg.storagePath);
50
37
  const ext = `ndjson${this.cfg.gzip ? '.gz' : ''}`;
51
38
  const filePath = `${this.cfg.storagePath}/${table}.${ext}`;
52
- const transformZip = this.cfg.gzip ? [(0, node_zlib_1.createGzip)()] : [];
53
- await (0, nodejs_lib_1._pipeline)([
54
- node_stream_1.Readable.from(rows),
55
- (0, nodejs_lib_1.transformToNDJson)(),
56
- ...transformZip,
57
- node_fs_1.default.createWriteStream(filePath),
58
- ]);
39
+ await (0, nodejs_lib_1._pipeline)([node_stream_1.Readable.from(rows), ...nodejs_lib_1.fs2.createWriteStreamAsNDJSON(filePath)]);
59
40
  }
60
41
  }
61
42
  exports.LocalFilePersistencePlugin = LocalFilePersistencePlugin;
@@ -29,7 +29,7 @@ export interface InMemoryDBCfg {
29
29
  */
30
30
  persistenceEnabled: boolean;
31
31
  /**
32
- * @default ./tmp/inmemorydb
32
+ * @default ./tmp/inmemorydb.ndjson.gz
33
33
  *
34
34
  * Will store one ndjson file per table.
35
35
  * Will only flush on demand (see .flushToDisk() and .restoreFromDisk() methods).
@@ -1,11 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.InMemoryDBTransaction = exports.InMemoryDB = void 0;
4
- const tslib_1 = require("tslib");
5
- const node_fs_1 = tslib_1.__importDefault(require("node:fs"));
6
- const promises_1 = tslib_1.__importDefault(require("node:fs/promises"));
7
4
  const node_stream_1 = require("node:stream");
8
- const node_zlib_1 = require("node:zlib");
9
5
  const js_lib_1 = require("@naturalcycles/js-lib");
10
6
  const nodejs_lib_1 = require("@naturalcycles/nodejs-lib");
11
7
  const __1 = require("../..");
@@ -169,7 +165,6 @@ class InMemoryDB {
169
165
  const { persistentStoragePath, persistZip } = this.cfg;
170
166
  const started = Date.now();
171
167
  await nodejs_lib_1.fs2.emptyDirAsync(persistentStoragePath);
172
- const transformZip = persistZip ? [(0, node_zlib_1.createGzip)()] : [];
173
168
  let tables = 0;
174
169
  // infinite concurrency for now
175
170
  await (0, js_lib_1.pMap)(Object.keys(this.data), async (table) => {
@@ -178,12 +173,7 @@ class InMemoryDB {
178
173
  return; // 0 rows
179
174
  tables++;
180
175
  const fname = `${persistentStoragePath}/${table}.ndjson${persistZip ? '.gz' : ''}`;
181
- await (0, nodejs_lib_1._pipeline)([
182
- node_stream_1.Readable.from(rows),
183
- (0, nodejs_lib_1.transformToNDJson)(),
184
- ...transformZip,
185
- node_fs_1.default.createWriteStream(fname),
186
- ]);
176
+ await (0, nodejs_lib_1._pipeline)([node_stream_1.Readable.from(rows), ...nodejs_lib_1.fs2.createWriteStreamAsNDJSON(fname)]);
187
177
  });
188
178
  this.cfg.logger.log(`flushToDisk took ${(0, nodejs_lib_1.dimGrey)((0, js_lib_1._since)(started))} to save ${(0, nodejs_lib_1.yellow)(tables)} tables`);
189
179
  }
@@ -196,20 +186,12 @@ class InMemoryDB {
196
186
  const started = Date.now();
197
187
  await nodejs_lib_1.fs2.ensureDirAsync(persistentStoragePath);
198
188
  this.data = {}; // empty it in the beginning!
199
- const files = (await promises_1.default.readdir(persistentStoragePath)).filter(f => f.includes('.ndjson'));
189
+ const files = (await nodejs_lib_1.fs2.readdirAsync(persistentStoragePath)).filter(f => f.includes('.ndjson'));
200
190
  // infinite concurrency for now
201
191
  await (0, js_lib_1.pMap)(files, async (file) => {
202
192
  const fname = `${persistentStoragePath}/${file}`;
203
193
  const table = file.split('.ndjson')[0];
204
- const transformUnzip = file.endsWith('.gz') ? [(0, node_zlib_1.createUnzip)()] : [];
205
- const rows = [];
206
- await (0, nodejs_lib_1._pipeline)([
207
- node_fs_1.default.createReadStream(fname),
208
- ...transformUnzip,
209
- (0, nodejs_lib_1.transformSplit)(), // splits by \n
210
- (0, nodejs_lib_1.transformJsonParse)(),
211
- (0, nodejs_lib_1.writablePushToArray)(rows),
212
- ]);
194
+ const rows = await nodejs_lib_1.fs2.createReadStreamAsNDJSON(fname).toArray();
213
195
  this.data[table] = (0, js_lib_1._by)(rows, r => r.id);
214
196
  });
215
197
  this.cfg.logger.log(`restoreFromDisk took ${(0, nodejs_lib_1.dimGrey)((0, js_lib_1._since)(started))} to read ${(0, nodejs_lib_1.yellow)(files.length)} tables`);
@@ -423,8 +423,7 @@ class CommonDao {
423
423
  const started = this.logStarted(op, q.table, true);
424
424
  let count = 0;
425
425
  await (0, nodejs_lib_1._pipeline)([
426
- this.cfg.db.streamQuery(q.select(['id']), opt),
427
- (0, nodejs_lib_1.transformMapSimple)(r => {
426
+ this.cfg.db.streamQuery(q.select(['id']), opt).map(r => {
428
427
  count++;
429
428
  return r.id;
430
429
  }),
@@ -782,13 +781,11 @@ class CommonDao {
782
781
  if (opt.chunkSize) {
783
782
  const { chunkSize, chunkConcurrency = 32 } = opt;
784
783
  await (0, nodejs_lib_1._pipeline)([
785
- this.cfg.db.streamQuery(q.select(['id']), opt),
786
- (0, nodejs_lib_1.transformMapSimple)(r => r.id, {
787
- errorMode: js_lib_1.ErrorMode.SUPPRESS,
788
- }),
784
+ this.cfg.db.streamQuery(q.select(['id']), opt).map(r => r.id),
789
785
  (0, nodejs_lib_1.transformChunk)({ chunkSize }),
790
786
  (0, nodejs_lib_1.transformMap)(async (ids) => {
791
- deleted += await this.cfg.db.deleteByQuery(dbQuery_1.DBQuery.create(q.table).filterIn('id', ids), opt);
787
+ await this.cfg.db.deleteByIds(q.table, ids, opt);
788
+ deleted += ids.length;
792
789
  }, {
793
790
  predicate: js_lib_1._passthroughPredicate,
794
791
  concurrency: chunkConcurrency,
@@ -1,5 +1,3 @@
1
- /// <reference types="node" />
2
- import { ZlibOptions } from 'node:zlib';
3
1
  import { AsyncMapper, ErrorMode, UnixTimestampNumber, StringMap } from '@naturalcycles/js-lib';
4
2
  import { NDJsonStats, TransformLogProgressOptions, TransformMapOptions } from '@naturalcycles/nodejs-lib';
5
3
  import { CommonDB } from '../common.db';
@@ -65,8 +63,8 @@ export interface DBPipelineBackupOptions extends TransformLogProgressOptions {
65
63
  gzip?: boolean;
66
64
  /**
67
65
  * Only applicable if `gzip` is enabled
66
+ * Currently not available.
68
67
  */
69
- zlibOptions?: ZlibOptions;
70
68
  /**
71
69
  * Optionally you can provide mapper that is going to run for each table.
72
70
  *
@@ -96,10 +94,6 @@ export interface DBPipelineBackupOptions extends TransformLogProgressOptions {
96
94
  * If true - will use CommonDB.getTableSchema() and emit schema.
97
95
  */
98
96
  emitSchemaFromDB?: boolean;
99
- /**
100
- * @default false
101
- */
102
- sortObjects?: boolean;
103
97
  }
104
98
  /**
105
99
  * Pipeline from input stream(s) to a NDJSON file (optionally gzipped).
@@ -1,10 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.dbPipelineBackup = void 0;
4
- const tslib_1 = require("tslib");
5
- const node_fs_1 = tslib_1.__importDefault(require("node:fs"));
6
- const promises_1 = tslib_1.__importDefault(require("node:fs/promises"));
7
- const node_zlib_1 = require("node:zlib");
8
4
  const js_lib_1 = require("@naturalcycles/js-lib");
9
5
  const nodejs_lib_1 = require("@naturalcycles/nodejs-lib");
10
6
  const index_1 = require("../index");
@@ -18,8 +14,7 @@ const index_1 = require("../index");
18
14
  * Optionally you can provide mapperPerTable and @param transformMapOptions (one for all mappers) - it will run for each table.
19
15
  */
20
16
  async function dbPipelineBackup(opt) {
21
- const { db, concurrency = 16, limit = 0, outputDirPath, protectFromOverwrite = false, zlibOptions, mapperPerTable = {}, queryPerTable = {}, logEveryPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, emitSchemaFromDB = false, sortObjects = false, } = opt;
22
- const strict = errorMode !== js_lib_1.ErrorMode.SUPPRESS;
17
+ const { db, concurrency = 16, limit = 0, outputDirPath, protectFromOverwrite = false, mapperPerTable = {}, queryPerTable = {}, logEveryPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, emitSchemaFromDB = false, } = opt;
23
18
  const gzip = opt.gzip !== false; // default to true
24
19
  let { tables } = opt;
25
20
  console.log(`>> ${(0, nodejs_lib_1.dimWhite)('dbPipelineBackup')} started in ${(0, nodejs_lib_1.grey)(outputDirPath)}...`);
@@ -74,11 +69,9 @@ async function dbPipelineBackup(opt) {
74
69
  (0, nodejs_lib_1.transformTap)(() => {
75
70
  rows++;
76
71
  }),
77
- (0, nodejs_lib_1.transformToNDJson)({ strict, sortObjects }),
78
- ...(gzip ? [(0, node_zlib_1.createGzip)(zlibOptions)] : []), // optional gzip
79
- node_fs_1.default.createWriteStream(filePath),
72
+ ...nodejs_lib_1.fs2.createWriteStreamAsNDJSON(filePath),
80
73
  ]);
81
- const { size: sizeBytes } = await promises_1.default.stat(filePath);
74
+ const { size: sizeBytes } = await nodejs_lib_1.fs2.statAsync(filePath);
82
75
  const stats = nodejs_lib_1.NDJsonStats.create({
83
76
  tookMillis: Date.now() - started,
84
77
  rows,
@@ -1,9 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.dbPipelineRestore = void 0;
4
- const tslib_1 = require("tslib");
5
- const node_fs_1 = tslib_1.__importDefault(require("node:fs"));
6
- const node_zlib_1 = require("node:zlib");
7
4
  const js_lib_1 = require("@naturalcycles/js-lib");
8
5
  const nodejs_lib_1 = require("@naturalcycles/nodejs-lib");
9
6
  /**
@@ -15,7 +12,6 @@ const nodejs_lib_1 = require("@naturalcycles/nodejs-lib");
15
12
  */
16
13
  async function dbPipelineRestore(opt) {
17
14
  const { db, concurrency = 16, chunkSize = 100, limit, sinceUpdated, inputDirPath, mapperPerTable = {}, saveOptionsPerTable = {}, transformMapOptions, errorMode = js_lib_1.ErrorMode.SUPPRESS, recreateTables = false, } = opt;
18
- const strict = errorMode !== js_lib_1.ErrorMode.SUPPRESS;
19
15
  const onlyTables = opt.tables && new Set(opt.tables);
20
16
  const sinceUpdatedStr = sinceUpdated ? ' since ' + (0, nodejs_lib_1.grey)((0, js_lib_1.localTime)(sinceUpdated).toPretty()) : '';
21
17
  console.log(`>> ${(0, nodejs_lib_1.dimWhite)('dbPipelineRestore')} started in ${(0, nodejs_lib_1.grey)(inputDirPath)}...${sinceUpdatedStr}`);
@@ -24,7 +20,7 @@ async function dbPipelineRestore(opt) {
24
20
  const sizeByTable = {};
25
21
  const statsPerTable = {};
26
22
  const tables = [];
27
- node_fs_1.default.readdirSync(inputDirPath).forEach(f => {
23
+ nodejs_lib_1.fs2.readdir(inputDirPath).forEach(f => {
28
24
  let table;
29
25
  let gzip = false;
30
26
  if (f.endsWith('.ndjson')) {
@@ -42,7 +38,7 @@ async function dbPipelineRestore(opt) {
42
38
  tables.push(table);
43
39
  if (gzip)
44
40
  tablesToGzip.add(table);
45
- sizeByTable[table] = node_fs_1.default.statSync(`${inputDirPath}/${f}`).size;
41
+ sizeByTable[table] = nodejs_lib_1.fs2.stat(`${inputDirPath}/${f}`).size;
46
42
  });
47
43
  const sizeStrByTable = (0, js_lib_1._mapValues)(sizeByTable, (_k, b) => (0, js_lib_1._hb)(b));
48
44
  console.log(`${(0, nodejs_lib_1.yellow)(tables.length)} ${(0, nodejs_lib_1.boldWhite)('table(s)')}:\n`, sizeStrByTable);
@@ -50,7 +46,7 @@ async function dbPipelineRestore(opt) {
50
46
  if (recreateTables) {
51
47
  await (0, js_lib_1.pMap)(tables, async (table) => {
52
48
  const schemaFilePath = `${inputDirPath}/${table}.schema.json`;
53
- if (!node_fs_1.default.existsSync(schemaFilePath)) {
49
+ if (!nodejs_lib_1.fs2.pathExists(schemaFilePath)) {
54
50
  console.warn(`${schemaFilePath} does not exist!`);
55
51
  return;
56
52
  }
@@ -67,17 +63,13 @@ async function dbPipelineRestore(opt) {
67
63
  const sizeBytes = sizeByTable[table];
68
64
  console.log(`<< ${(0, nodejs_lib_1.grey)(filePath)} ${(0, nodejs_lib_1.dimWhite)((0, js_lib_1._hb)(sizeBytes))} started...`);
69
65
  await (0, nodejs_lib_1._pipeline)([
70
- node_fs_1.default.createReadStream(filePath),
71
- ...(gzip ? [(0, node_zlib_1.createUnzip)()] : []),
72
- (0, nodejs_lib_1.transformSplit)(), // splits by \n
73
- (0, nodejs_lib_1.transformJsonParse)({ strict }),
66
+ nodejs_lib_1.fs2.createReadStreamAsNDJSON(filePath).take(limit || Number.POSITIVE_INFINITY),
74
67
  (0, nodejs_lib_1.transformTap)(() => rows++),
75
68
  (0, nodejs_lib_1.transformLogProgress)({
76
69
  logEvery: 1000,
77
70
  ...opt,
78
71
  metric: table,
79
72
  }),
80
- (0, nodejs_lib_1.transformLimit)({ limit }),
81
73
  ...(sinceUpdated
82
74
  ? [(0, nodejs_lib_1.transformFilterSync)(r => r.updated >= sinceUpdated)]
83
75
  : []),
package/package.json CHANGED
@@ -40,7 +40,7 @@
40
40
  "engines": {
41
41
  "node": ">=18.12"
42
42
  },
43
- "version": "9.13.0",
43
+ "version": "9.14.0",
44
44
  "description": "Lowest Common Denominator API to supported Databases",
45
45
  "keywords": [
46
46
  "db",
@@ -1,16 +1,6 @@
1
- import fs from 'node:fs'
2
- import fsp from 'node:fs/promises'
3
1
  import { Readable } from 'node:stream'
4
- import { createGzip, createUnzip } from 'node:zlib'
5
2
  import { ObjectWithId, pMap } from '@naturalcycles/js-lib'
6
- import {
7
- transformJsonParse,
8
- transformSplit,
9
- transformToNDJson,
10
- writablePushToArray,
11
- _pipeline,
12
- fs2,
13
- } from '@naturalcycles/nodejs-lib'
3
+ import { _pipeline, fs2 } from '@naturalcycles/nodejs-lib'
14
4
  import { DBSaveBatchOperation } from '../../db.model'
15
5
  import { FileDBPersistencePlugin } from './file.db.model'
16
6
 
@@ -43,7 +33,7 @@ export class LocalFilePersistencePlugin implements FileDBPersistencePlugin {
43
33
  async ping(): Promise<void> {}
44
34
 
45
35
  async getTables(): Promise<string[]> {
46
- return (await fsp.readdir(this.cfg.storagePath))
36
+ return (await fs2.readdirAsync(this.cfg.storagePath))
47
37
  .filter(f => f.includes('.ndjson'))
48
38
  .map(f => f.split('.ndjson')[0]!)
49
39
  }
@@ -55,19 +45,7 @@ export class LocalFilePersistencePlugin implements FileDBPersistencePlugin {
55
45
 
56
46
  if (!(await fs2.pathExistsAsync(filePath))) return []
57
47
 
58
- const transformUnzip = this.cfg.gzip ? [createUnzip()] : []
59
-
60
- const rows: ROW[] = []
61
-
62
- await _pipeline([
63
- fs.createReadStream(filePath),
64
- ...transformUnzip,
65
- transformSplit(), // splits by \n
66
- transformJsonParse(),
67
- writablePushToArray(rows),
68
- ])
69
-
70
- return rows
48
+ return await fs2.createReadStreamAsNDJSON(filePath).toArray()
71
49
  }
72
50
 
73
51
  async saveFiles(ops: DBSaveBatchOperation<any>[]): Promise<void> {
@@ -78,13 +56,7 @@ export class LocalFilePersistencePlugin implements FileDBPersistencePlugin {
78
56
  await fs2.ensureDirAsync(this.cfg.storagePath)
79
57
  const ext = `ndjson${this.cfg.gzip ? '.gz' : ''}`
80
58
  const filePath = `${this.cfg.storagePath}/${table}.${ext}`
81
- const transformZip = this.cfg.gzip ? [createGzip()] : []
82
59
 
83
- await _pipeline([
84
- Readable.from(rows),
85
- transformToNDJson(),
86
- ...transformZip,
87
- fs.createWriteStream(filePath),
88
- ])
60
+ await _pipeline([Readable.from(rows), ...fs2.createWriteStreamAsNDJSON(filePath)])
89
61
  }
90
62
  }
@@ -1,7 +1,4 @@
1
- import fs from 'node:fs'
2
- import fsp from 'node:fs/promises'
3
1
  import { Readable } from 'node:stream'
4
- import { createGzip, createUnzip } from 'node:zlib'
5
2
  import {
6
3
  generateJsonSchemaFromData,
7
4
  JsonSchemaObject,
@@ -20,10 +17,6 @@ import {
20
17
  import {
21
18
  bufferReviver,
22
19
  ReadableTyped,
23
- transformJsonParse,
24
- transformSplit,
25
- transformToNDJson,
26
- writablePushToArray,
27
20
  _pipeline,
28
21
  dimGrey,
29
22
  yellow,
@@ -78,7 +71,7 @@ export interface InMemoryDBCfg {
78
71
  persistenceEnabled: boolean
79
72
 
80
73
  /**
81
- * @default ./tmp/inmemorydb
74
+ * @default ./tmp/inmemorydb.ndjson.gz
82
75
  *
83
76
  * Will store one ndjson file per table.
84
77
  * Will only flush on demand (see .flushToDisk() and .restoreFromDisk() methods).
@@ -312,7 +305,6 @@ export class InMemoryDB implements CommonDB {
312
305
 
313
306
  await fs2.emptyDirAsync(persistentStoragePath)
314
307
 
315
- const transformZip = persistZip ? [createGzip()] : []
316
308
  let tables = 0
317
309
 
318
310
  // infinite concurrency for now
@@ -323,12 +315,7 @@ export class InMemoryDB implements CommonDB {
323
315
  tables++
324
316
  const fname = `${persistentStoragePath}/${table}.ndjson${persistZip ? '.gz' : ''}`
325
317
 
326
- await _pipeline([
327
- Readable.from(rows),
328
- transformToNDJson(),
329
- ...transformZip,
330
- fs.createWriteStream(fname),
331
- ])
318
+ await _pipeline([Readable.from(rows), ...fs2.createWriteStreamAsNDJSON(fname)])
332
319
  })
333
320
 
334
321
  this.cfg.logger!.log(
@@ -349,24 +336,14 @@ export class InMemoryDB implements CommonDB {
349
336
 
350
337
  this.data = {} // empty it in the beginning!
351
338
 
352
- const files = (await fsp.readdir(persistentStoragePath)).filter(f => f.includes('.ndjson'))
339
+ const files = (await fs2.readdirAsync(persistentStoragePath)).filter(f => f.includes('.ndjson'))
353
340
 
354
341
  // infinite concurrency for now
355
342
  await pMap(files, async file => {
356
343
  const fname = `${persistentStoragePath}/${file}`
357
344
  const table = file.split('.ndjson')[0]!
358
345
 
359
- const transformUnzip = file.endsWith('.gz') ? [createUnzip()] : []
360
-
361
- const rows: any[] = []
362
-
363
- await _pipeline([
364
- fs.createReadStream(fname),
365
- ...transformUnzip,
366
- transformSplit(), // splits by \n
367
- transformJsonParse(),
368
- writablePushToArray(rows),
369
- ])
346
+ const rows = await fs2.createReadStreamAsNDJSON(fname).toArray()
370
347
 
371
348
  this.data[table] = _by(rows, r => r.id)
372
349
  })
@@ -40,7 +40,6 @@ import {
40
40
  transformChunk,
41
41
  transformLogProgress,
42
42
  transformMap,
43
- transformMapSimple,
44
43
  transformNoOp,
45
44
  writableVoid,
46
45
  } from '@naturalcycles/nodejs-lib'
@@ -580,8 +579,7 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
580
579
  let count = 0
581
580
 
582
581
  await _pipeline([
583
- this.cfg.db.streamQuery<DBM>(q.select(['id']), opt),
584
- transformMapSimple<DBM, string>(r => {
582
+ this.cfg.db.streamQuery<DBM>(q.select(['id']), opt).map(r => {
585
583
  count++
586
584
  return r.id
587
585
  }),
@@ -1022,17 +1020,12 @@ export class CommonDao<BM extends BaseDBEntity, DBM extends BaseDBEntity = BM> {
1022
1020
  const { chunkSize, chunkConcurrency = 32 } = opt
1023
1021
 
1024
1022
  await _pipeline([
1025
- this.cfg.db.streamQuery<DBM>(q.select(['id']), opt),
1026
- transformMapSimple<ObjectWithId, string>(r => r.id, {
1027
- errorMode: ErrorMode.SUPPRESS,
1028
- }),
1023
+ this.cfg.db.streamQuery<DBM>(q.select(['id']), opt).map(r => r.id),
1029
1024
  transformChunk<string>({ chunkSize }),
1030
1025
  transformMap<string[], void>(
1031
1026
  async ids => {
1032
- deleted += await this.cfg.db.deleteByQuery(
1033
- DBQuery.create(q.table).filterIn('id', ids),
1034
- opt,
1035
- )
1027
+ await this.cfg.db.deleteByIds(q.table, ids, opt)
1028
+ deleted += ids.length
1036
1029
  },
1037
1030
  {
1038
1031
  predicate: _passthroughPredicate,
@@ -1,6 +1,3 @@
1
- import fs from 'node:fs'
2
- import fsp from 'node:fs/promises'
3
- import { createGzip, ZlibOptions } from 'node:zlib'
4
1
  import {
5
2
  AppError,
6
3
  AsyncMapper,
@@ -18,7 +15,6 @@ import {
18
15
  transformMap,
19
16
  TransformMapOptions,
20
17
  transformTap,
21
- transformToNDJson,
22
18
  _pipeline,
23
19
  boldWhite,
24
20
  dimWhite,
@@ -101,8 +97,9 @@ export interface DBPipelineBackupOptions extends TransformLogProgressOptions {
101
97
 
102
98
  /**
103
99
  * Only applicable if `gzip` is enabled
100
+ * Currently not available.
104
101
  */
105
- zlibOptions?: ZlibOptions
102
+ // zlibOptions?: ZlibOptions
106
103
 
107
104
  /**
108
105
  * Optionally you can provide mapper that is going to run for each table.
@@ -138,11 +135,6 @@ export interface DBPipelineBackupOptions extends TransformLogProgressOptions {
138
135
  * If true - will use CommonDB.getTableSchema() and emit schema.
139
136
  */
140
137
  emitSchemaFromDB?: boolean
141
-
142
- /**
143
- * @default false
144
- */
145
- sortObjects?: boolean
146
138
  }
147
139
 
148
140
  /**
@@ -161,16 +153,13 @@ export async function dbPipelineBackup(opt: DBPipelineBackupOptions): Promise<ND
161
153
  limit = 0,
162
154
  outputDirPath,
163
155
  protectFromOverwrite = false,
164
- zlibOptions,
165
156
  mapperPerTable = {},
166
157
  queryPerTable = {},
167
158
  logEveryPerTable = {},
168
159
  transformMapOptions,
169
160
  errorMode = ErrorMode.SUPPRESS,
170
161
  emitSchemaFromDB = false,
171
- sortObjects = false,
172
162
  } = opt
173
- const strict = errorMode !== ErrorMode.SUPPRESS
174
163
  const gzip = opt.gzip !== false // default to true
175
164
 
176
165
  let { tables } = opt
@@ -243,12 +232,10 @@ export async function dbPipelineBackup(opt: DBPipelineBackupOptions): Promise<ND
243
232
  transformTap(() => {
244
233
  rows++
245
234
  }),
246
- transformToNDJson({ strict, sortObjects }),
247
- ...(gzip ? [createGzip(zlibOptions)] : []), // optional gzip
248
- fs.createWriteStream(filePath),
235
+ ...fs2.createWriteStreamAsNDJSON(filePath),
249
236
  ])
250
237
 
251
- const { size: sizeBytes } = await fsp.stat(filePath)
238
+ const { size: sizeBytes } = await fs2.statAsync(filePath)
252
239
 
253
240
  const stats = NDJsonStats.create({
254
241
  tookMillis: Date.now() - started,
@@ -1,5 +1,3 @@
1
- import fs from 'node:fs'
2
- import { createUnzip } from 'node:zlib'
3
1
  import {
4
2
  AsyncMapper,
5
3
  ErrorMode,
@@ -15,13 +13,10 @@ import {
15
13
  NDJsonStats,
16
14
  transformChunk,
17
15
  transformFilterSync,
18
- transformJsonParse,
19
- transformLimit,
20
16
  transformLogProgress,
21
17
  TransformLogProgressOptions,
22
18
  transformMap,
23
19
  TransformMapOptions,
24
- transformSplit,
25
20
  transformTap,
26
21
  writableForEach,
27
22
  _pipeline,
@@ -135,7 +130,6 @@ export async function dbPipelineRestore(opt: DBPipelineRestoreOptions): Promise<
135
130
  errorMode = ErrorMode.SUPPRESS,
136
131
  recreateTables = false,
137
132
  } = opt
138
- const strict = errorMode !== ErrorMode.SUPPRESS
139
133
  const onlyTables = opt.tables && new Set(opt.tables)
140
134
 
141
135
  const sinceUpdatedStr = sinceUpdated ? ' since ' + grey(localTime(sinceUpdated).toPretty()) : ''
@@ -150,7 +144,7 @@ export async function dbPipelineRestore(opt: DBPipelineRestoreOptions): Promise<
150
144
  const sizeByTable: Record<string, number> = {}
151
145
  const statsPerTable: Record<string, NDJsonStats> = {}
152
146
  const tables: string[] = []
153
- fs.readdirSync(inputDirPath).forEach(f => {
147
+ fs2.readdir(inputDirPath).forEach(f => {
154
148
  let table: string
155
149
  let gzip = false
156
150
 
@@ -167,7 +161,7 @@ export async function dbPipelineRestore(opt: DBPipelineRestoreOptions): Promise<
167
161
 
168
162
  tables.push(table)
169
163
  if (gzip) tablesToGzip.add(table)
170
- sizeByTable[table] = fs.statSync(`${inputDirPath}/${f}`).size
164
+ sizeByTable[table] = fs2.stat(`${inputDirPath}/${f}`).size
171
165
  })
172
166
 
173
167
  const sizeStrByTable = _mapValues(sizeByTable, (_k, b) => _hb(b))
@@ -179,7 +173,7 @@ export async function dbPipelineRestore(opt: DBPipelineRestoreOptions): Promise<
179
173
  if (recreateTables) {
180
174
  await pMap(tables, async table => {
181
175
  const schemaFilePath = `${inputDirPath}/${table}.schema.json`
182
- if (!fs.existsSync(schemaFilePath)) {
176
+ if (!fs2.pathExists(schemaFilePath)) {
183
177
  console.warn(`${schemaFilePath} does not exist!`)
184
178
  return
185
179
  }
@@ -204,17 +198,13 @@ export async function dbPipelineRestore(opt: DBPipelineRestoreOptions): Promise<
204
198
  console.log(`<< ${grey(filePath)} ${dimWhite(_hb(sizeBytes))} started...`)
205
199
 
206
200
  await _pipeline([
207
- fs.createReadStream(filePath),
208
- ...(gzip ? [createUnzip()] : []),
209
- transformSplit(), // splits by \n
210
- transformJsonParse({ strict }),
201
+ fs2.createReadStreamAsNDJSON(filePath).take(limit || Number.POSITIVE_INFINITY),
211
202
  transformTap(() => rows++),
212
203
  transformLogProgress({
213
204
  logEvery: 1000,
214
205
  ...opt,
215
206
  metric: table,
216
207
  }),
217
- transformLimit({ limit }),
218
208
  ...(sinceUpdated
219
209
  ? [transformFilterSync<BaseDBEntity>(r => r.updated >= sinceUpdated)]
220
210
  : []),