@engine9/input-tools 2.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1076 @@
1
+ import fs from 'node:fs';
2
+ import path from 'node:path';
3
+ import zlib from 'node:zlib';
4
+ import nodestream from 'node:stream';
5
+ import promises from 'node:stream/promises';
6
+ import { parse, stringify } from 'csv';
7
+ import debug$0 from 'debug';
8
+ import xlstream from 'xlstream';
9
+ import JSON5 from 'json5';
10
+ import languageEncoding from 'detect-file-encoding-and-language';
11
+ import R2Worker from './R2.js';
12
+ import S3Worker from './S3.js';
13
+ import ParquetWorker from './Parquet.js';
14
+ import { bool, getTempFilename, getStringArray, getTempDir, makeStrings, streamPacket, relativeDate } from './tools.js';
15
+ const fsp = fs.promises;
16
+ const { Readable, Transform, PassThrough, Writable } = nodestream;
17
+ const { pipeline } = promises;
18
+
19
+ const debug = debug$0('@engine9-io/file');
20
+ const { getXlsxStream } = xlstream;
21
+
22
+ function Worker({ accountId }) {
23
+ this.accountId = accountId;
24
+ }
25
+ class LineReaderTransform extends Transform {
26
+ constructor(options = {}) {
27
+ super({ ...options, readableObjectMode: true });
28
+ this.buffer = '';
29
+ }
30
+ _transform(chunk, encoding, callback) {
31
+ this.buffer += chunk.toString();
32
+ const lines = this.buffer.split(/\r?\n/);
33
+ this.buffer = lines.pop();
34
+ lines.forEach((line) => this.push(line));
35
+ callback();
36
+ }
37
+ _flush(callback) {
38
+ if (this.buffer) {
39
+ this.push(this.buffer);
40
+ }
41
+ callback();
42
+ }
43
+ }
44
+ Worker.prototype.csvToObjectTransforms = function (options) {
45
+ const transforms = [];
46
+ const delimiter = options.delimiter || ',';
47
+ const headerMapping =
48
+ options.headerMapping ||
49
+ function (d) {
50
+ return d;
51
+ };
52
+ let lastLine = null;
53
+ let head = null;
54
+ const skipLinesWithError = bool(options.skip_lines_with_error, false);
55
+ const parserOptions = {
56
+ relax: true,
57
+ skip_empty_lines: true,
58
+ delimiter,
59
+ max_limit_on_data_read: 10000000,
60
+ skip_lines_with_error: skipLinesWithError
61
+ };
62
+ if (options.skip) parserOptions.from_line = options.skip;
63
+ if (options.relax_column_count) parserOptions.relax_column_count = true;
64
+ if (options.quote_escape) {
65
+ parserOptions.escape = options.quote_escape;
66
+ }
67
+ if (options.limit) {
68
+ parserOptions.to = options.limit;
69
+ }
70
+ debug('Parser options=', parserOptions);
71
+ const parser = parse(parserOptions);
72
+ parser.on('error', (error) => {
73
+ debug('fileToObjectStream: Error parsing csv file');
74
+ debug(lastLine);
75
+ throw new Error(error);
76
+ });
77
+ const blankAndHeaderCheck = new Transform({
78
+ objectMode: true,
79
+ transform(row, enc, cb) {
80
+ // Blank rows
81
+ if (row.length === 0) return cb();
82
+ if (row.length === 1 && !row[0]) return cb();
83
+ if (!head) {
84
+ head = row.map(headerMapping);
85
+ return cb();
86
+ }
87
+ const o = {};
88
+ head.forEach((_h, i) => {
89
+ const h = _h.trim();
90
+ if (h) {
91
+ o[h] = row[i];
92
+ }
93
+ });
94
+ lastLine = row.join(delimiter);
95
+ return cb(null, o);
96
+ }
97
+ });
98
+ transforms.push(parser);
99
+ transforms.push(blankAndHeaderCheck);
100
+ return { transforms };
101
+ };
102
+ Worker.prototype.detectEncoding = async function (options) {
103
+ if (options.encoding_override) return { encoding: options.encoding_override };
104
+ // Limit to only the top N bytes -- for perfomance
105
+ // Be wary, though, as gzip files may require a certain minimum number of bytes to decompress
106
+ const bytes = 64 * 1024;
107
+ const buff = Buffer.alloc(bytes);
108
+ const fd = await fsp.open(options.filename);
109
+ await fd.read(buff, 0, bytes);
110
+ let finalBuff = buff;
111
+ if (options.filename.slice(-3) === '.gz') {
112
+ // This code deals with scenarios where the buffer coming in may not be exactly the gzip
113
+ // needed chunk size.
114
+ finalBuff = await new Promise((resolve, reject) => {
115
+ const bufferBuilder = [];
116
+ const decompressStream = zlib
117
+ .createGunzip()
118
+ .on('data', (chunk) => {
119
+ bufferBuilder.push(chunk);
120
+ })
121
+ .on('close', () => {
122
+ resolve(Buffer.concat(bufferBuilder));
123
+ })
124
+ .on('error', (err) => {
125
+ if (err.errno !== -5) {
126
+ // EOF: expected
127
+ reject(err);
128
+ }
129
+ });
130
+ decompressStream.write(buff);
131
+ decompressStream.end();
132
+ });
133
+ }
134
+ return languageEncoding(finalBuff);
135
+ };
136
+ Worker.prototype.detectEncoding.metadata = {
137
+ options: {
138
+ filename: { required: true }
139
+ }
140
+ };
141
+ Worker.prototype.xlsxToObjectStream = async function (options) {
142
+ let { filename } = options;
143
+ if (filename.startsWith('s3://') || filename.startsWith('r2://')) {
144
+ // We need to copy and delete
145
+ let worker = null;
146
+ if (filename.startsWith('r2://')) {
147
+ worker = new R2Worker(this);
148
+ } else {
149
+ worker = new S3Worker(this);
150
+ }
151
+ const target = getTempFilename({ targetFilename: filename.split('/').pop() });
152
+ await worker.copy({ filename, target });
153
+ filename = target;
154
+ }
155
+ let stream = await getXlsxStream({
156
+ filePath: filename,
157
+ sheet: 0
158
+ });
159
+ let keys = null;
160
+ stream = stream.pipe(
161
+ new Transform({
162
+ objectMode: true,
163
+ transform(d, enc, cb) {
164
+ if (!keys) {
165
+ keys = d?.raw.arr;
166
+ cb();
167
+ } else {
168
+ let o = {};
169
+ keys.forEach((k, i) => {
170
+ o[k] = d?.raw?.arr?.[i];
171
+ });
172
+ cb(null, o);
173
+ }
174
+ }
175
+ })
176
+ );
177
+ return { stream };
178
+ };
179
+ Worker.prototype.getFormat = async function (options) {
180
+ const { sourcePostfix, filename, format: formatOverride } = options;
181
+ let postfix = sourcePostfix || filename.toLowerCase().split('.').pop();
182
+ if (postfix === 'gz') {
183
+ postfix = filename.toLowerCase().split('.');
184
+ postfix = postfix[postfix.length - 2];
185
+ }
186
+ return formatOverride || postfix;
187
+ };
188
+ /*
189
+ Commonly used method to transform a file into a stream of objects.
190
+ */
191
+ Worker.prototype.fileToObjectStream = async function (options) {
192
+ const { filename, columns, limit: limitOption, format: formatOverride } = options;
193
+ // handle stream item
194
+ if (options.stream) {
195
+ if (Array.isArray(options.stream)) {
196
+ return { stream: Readable.from(options.stream) };
197
+ }
198
+ // probably already a stream
199
+ if (typeof options.stream === 'object') return { stream: options.stream };
200
+ throw new Error(`Invalid stream type:${typeof options.stream}`);
201
+ }
202
+ let limit;
203
+ if (limitOption) limit = parseInt(limitOption, 10);
204
+ if (!filename) throw new Error('fileToObjectStream: filename is required');
205
+ if (filename.split('.').pop().toLowerCase() === 'xlsx') {
206
+ return this.xlsxToObjectStream(options);
207
+ }
208
+ let postfix = options.sourcePostfix || filename.toLowerCase().split('.').pop();
209
+ if (postfix === 'zip') {
210
+ debug('Invalid filename:', { filename });
211
+ throw new Error('Cowardly refusing to turn a .zip file into an object stream, turn into a csv first');
212
+ }
213
+ const streamInfo = await this.stream({
214
+ filename,
215
+ columns,
216
+ limit
217
+ });
218
+ const { encoding } = streamInfo;
219
+ let { stream } = streamInfo;
220
+ if (!stream) throw new Error(`No stream found in fileToObjectStream from filename ${filename}`);
221
+ if (encoding === 'object') {
222
+ // already an object
223
+ return { stream };
224
+ }
225
+ let count = 0;
226
+ let transforms = [];
227
+ if (postfix === 'gz') {
228
+ const gunzip = zlib.createGunzip();
229
+ transforms.push(gunzip);
230
+ gunzip.setEncoding(encoding);
231
+ // encoding = null;// Default encoding
232
+ postfix = filename.toLowerCase().split('.');
233
+ postfix = postfix[postfix.length - 2];
234
+ debug(`Using gunzip parser because postfix is .gz, encoding=${encoding}`);
235
+ } else {
236
+ stream.setEncoding(encoding);
237
+ }
238
+ let format = formatOverride || postfix;
239
+ debug(`Reading file ${filename} with encoding: ${encoding} and format ${format}`);
240
+ if (format === 'csv') {
241
+ const csvTransforms = this.csvToObjectTransforms({ ...options });
242
+ transforms = transforms.concat(csvTransforms.transforms);
243
+ } else if (format === 'txt') {
244
+ const csvTransforms = this.csvToObjectTransforms({ ...options, delimiter: '\t' });
245
+ transforms = transforms.concat(csvTransforms.transforms);
246
+ } else if (format === 'jsonl') {
247
+ /* Type of JSON that has the names in an array in the first record,
248
+ and the values in JSON arrays thereafter
249
+ */
250
+ let headers = null;
251
+ const lineReader = new LineReaderTransform();
252
+ const jsonlTransform = new Transform({
253
+ objectMode: true,
254
+ transform(d, enc, cb) {
255
+ if (!d) return cb();
256
+ let obj;
257
+ try {
258
+ obj = JSON5.parse(d);
259
+ } catch (e) {
260
+ debug('Invalid line:');
261
+ debug(d);
262
+ throw e;
263
+ }
264
+ /* JSONL could potentially start with an array of names,
265
+ in which case we need to map the subsequent values
266
+ */
267
+ if (headers === null) {
268
+ if (Array.isArray(obj)) {
269
+ headers = obj;
270
+ return cb();
271
+ }
272
+ headers = false;
273
+ }
274
+ if (headers) {
275
+ const mapped = {};
276
+ headers.forEach((name, i) => {
277
+ mapped[name] = obj[i];
278
+ });
279
+ this.push(mapped);
280
+ } else {
281
+ this.push(obj);
282
+ }
283
+ return cb();
284
+ }
285
+ });
286
+ transforms.push(lineReader);
287
+ transforms.push(jsonlTransform);
288
+ } else {
289
+ throw new Error(`Unsupported file type: ${postfix}`);
290
+ }
291
+ const countAndDebug = new Transform({
292
+ objectMode: true,
293
+ transform(d, enc, cb) {
294
+ if (count === 0) {
295
+ debug('Sample object from file:', d);
296
+ }
297
+ count += 1;
298
+ if ((count < 5000 && count % 1000 === 0) || count % 50000 === 0) {
299
+ debug(`fileToObjectStream transformed ${count} lines`);
300
+ }
301
+ this.push(d);
302
+ cb();
303
+ },
304
+ flush(cb) {
305
+ // If there's no records at all, push a dummy record, and specify 0 records
306
+ // Don't push dummy records anymore -- legacy cruft
307
+ debug(`Completed reading file, records=${count}`);
308
+ /* if (count === 0) {
309
+ const o = { _is_placeholder: true };
310
+
311
+ if (head) head.forEach((c) => { o[c] = null; });
312
+ this.push(o);
313
+ } */
314
+ cb();
315
+ }
316
+ });
317
+ transforms.push(countAndDebug);
318
+ transforms.forEach((t) => {
319
+ stream = stream.pipe(t);
320
+ });
321
+ return { stream };
322
+ };
323
+ Worker.prototype.getFileWriterStream = async function (options = {}) {
324
+ const accountId = options.accountId || this.accountId;
325
+ if (!accountId) throw new Error('getFileWriterStream has no accountId');
326
+ const targetFormat = options.targetFormat || 'csv';
327
+ const tempDir = await getTempDir({ accountId });
328
+ let { fileExtendedType } = options;
329
+ if (fileExtendedType) fileExtendedType += '.';
330
+ else fileExtendedType = '';
331
+ // So, this could change, but it's easier to read
332
+ // dates in a filename than UUIDs, so this is
333
+ // a unique-ish filename generator
334
+ const uniqueNumberedDate = `${new Date().toISOString().replace(/[^0-9]*/g, '')}.${Math.floor(Math.random() * 1000)}`;
335
+ let filename = `${tempDir}${path.sep}${uniqueNumberedDate}.${fileExtendedType}${targetFormat}`;
336
+ if (bool(options.gzip, false)) filename += '.gz';
337
+ const stream = fs.createWriteStream(filename);
338
+ debug('FileWriterStream writing to file ', filename);
339
+ return { filename, stream };
340
+ };
341
+ Worker.prototype.getOutputStreams = async function (options) {
342
+ const { filename, stream: fileWriterStream } = await this.getFileWriterStream(options);
343
+ let { transform } = options;
344
+ if (typeof options.transform === 'function') {
345
+ if (options.transform.length === 3) {
346
+ transform = new Transform({
347
+ objectMode: true,
348
+ async transform(item, encoding, cb) {
349
+ options.transform(item, encoding, cb);
350
+ }
351
+ });
352
+ } else {
353
+ transform = new Transform({
354
+ objectMode: true,
355
+ async transform(item, encoding, cb) {
356
+ cb(null, options.transform(item));
357
+ }
358
+ });
359
+ }
360
+ } else if (options.transform) {
361
+ transform = options.transform;
362
+ }
363
+ const { flatten } = options;
364
+ let flattenTransform = null;
365
+ if (bool(flatten, false)) {
366
+ flattenTransform = new Transform({
367
+ objectMode: true,
368
+ async transform(item, enc, cb) {
369
+ // first item establishes the keys to use
370
+ let o = {};
371
+ Object.keys(item).forEach((k) => {
372
+ let v = item[k];
373
+ if (!o[k]) {
374
+ if (typeof v === 'object') {
375
+ while (Array.isArray(v)) [v] = v; // get first array item
376
+ o = { ...o, ...v };
377
+ } else {
378
+ o[k] = v;
379
+ }
380
+ }
381
+ });
382
+ cb(null, o);
383
+ }
384
+ });
385
+ }
386
+ const stats = {
387
+ records: 0
388
+ };
389
+ let stringifier;
390
+ if (options.targetFormat === 'jsonl') {
391
+ stringifier = new Transform({
392
+ objectMode: true,
393
+ transform(d, encoding, cb) {
394
+ cb(false, `${JSON.stringify(d)}\n`);
395
+ }
396
+ });
397
+ } else {
398
+ stringifier = stringify({ header: true });
399
+ }
400
+ let gzip = new PassThrough();
401
+ if (options.gzip) {
402
+ gzip = zlib.createGzip();
403
+ }
404
+ const streams = [
405
+ transform,
406
+ flattenTransform,
407
+ new Transform({
408
+ objectMode: true,
409
+ transform(d, enc, cb) {
410
+ stats.records += 1;
411
+ cb(null, d);
412
+ }
413
+ }),
414
+ stringifier,
415
+ gzip,
416
+ fileWriterStream
417
+ ].filter(Boolean);
418
+ return { filename, streams, stats };
419
+ };
420
+ Worker.prototype.objectStreamToFile = async function (options) {
421
+ const { filename, streams, stats } = await this.getOutputStreams(options);
422
+ const { stream: inStream } = options;
423
+ streams.unshift(inStream);
424
+ await pipeline(streams);
425
+ return { filename, records: stats.records };
426
+ };
427
+ Worker.prototype.transform = async function (options) {
428
+ const worker = this;
429
+ const { filename } = options;
430
+ debug(`Transforming ${filename}`);
431
+ options.filename = filename;
432
+ let { stream } = await worker.fileToObjectStream(options);
433
+ if (typeof stream.pipe !== 'function') {
434
+ debug(stream);
435
+ throw new Error('No pipe in stream');
436
+ }
437
+ let t = options.transform;
438
+ // No longer need this
439
+ delete options.transform;
440
+ if (!t) {
441
+ t = function (d, enc, cb) {
442
+ d.is_test_transform = true;
443
+ cb(null, d);
444
+ };
445
+ }
446
+ if (!Array.isArray(t)) t = [t];
447
+ Object.keys(t).forEach((key) => {
448
+ let f = t[key];
449
+ if (typeof f === 'function') {
450
+ f = new Transform({
451
+ objectMode: true,
452
+ transform: f
453
+ });
454
+ }
455
+ stream = stream.pipe(f);
456
+ });
457
+ const { targetFormat } = options;
458
+ if (
459
+ !targetFormat &&
460
+ (filename.toLowerCase().slice(-4) === '.csv' || filename.toLowerCase().slice(-7) === '.csv.gz')
461
+ ) {
462
+ options.targetFormat = 'csv';
463
+ }
464
+ return worker.objectStreamToFile({ ...options, stream });
465
+ };
466
+ Worker.prototype.transform.metadata = {
467
+ options: {
468
+ sourcePostfix: { description: "Override the source postfix, if for example it's a csv" },
469
+ encoding: { description: 'Manual override of source file encoding' },
470
+ names: { description: 'Target field names (e.g. my_new_field,x,y,z)' },
471
+ values: {
472
+ description:
473
+ "Comma delimited source field name, or Handlebars [[ ]] merge fields (e.g. 'my_field,x,y,z', '[[field1]]-[[field2]]', etc)"
474
+ },
475
+ targetFilename: { description: 'Custom name of the output file (default auto-generated)' },
476
+ targetFormat: { description: 'Output format -- csv supported, or none for txt (default)' },
477
+ targetRowDelimiter: { description: 'Row delimiter (default \n)' },
478
+ targetFieldDelimiter: { description: 'Field delimiter (default \t or ,)' }
479
+ }
480
+ };
481
+ Worker.prototype.testTransform = async function (options) {
482
+ return this.transform({
483
+ ...options,
484
+ transform(d, enc, cb) {
485
+ d.transform_time = new Date();
486
+ cb(null, d);
487
+ }
488
+ });
489
+ };
490
+ Worker.prototype.testTransform.metadata = {
491
+ options: {
492
+ filename: true
493
+ }
494
+ };
495
+ /* Get a stream from an actual stream, or an array, or a file */
496
+ Worker.prototype.stream = async function (options) {
497
+ const { stream: inputStream, packet, type, columns, limit, filename: filenameOpt } = options;
498
+ let filename = filenameOpt;
499
+ if (inputStream) {
500
+ if (Array.isArray(inputStream)) {
501
+ return { stream: Readable.from(inputStream) };
502
+ }
503
+ // probably already a stream
504
+ if (typeof inputStream === 'object') return { stream: inputStream, encoding: 'object' };
505
+ throw new Error(`Invalid stream type:${typeof inputStream}`);
506
+ } else if (filename) {
507
+ if (filename.startsWith('engine9-accounts/')) {
508
+ filename = `${process.env.ENGINE9_ACCOUNT_DIR}/${filename.slice('engine9-accounts/'.length)}`;
509
+ // debug(`Prepending file with ${process.env.ENGINE9_ACCOUNT_DIR}, filename=${filename}`);
510
+ } else {
511
+ // debug(`Not prepending filename:${filename}`);
512
+ }
513
+ let encoding;
514
+ let stream;
515
+ if (filename.slice(-8) === '.parquet') {
516
+ const pq = new ParquetWorker(this);
517
+ stream = (await pq.stream({ filename, columns, limit })).stream;
518
+ encoding = 'object';
519
+ } else if (filename.startsWith('s3://')) {
520
+ const s3Worker = new S3Worker(this);
521
+ stream = (await s3Worker.stream({ filename, columns, limit })).stream;
522
+ encoding = 'UTF-8';
523
+ } else if (filename.startsWith('r2://')) {
524
+ const r2Worker = new R2Worker(this);
525
+ stream = (await r2Worker.stream({ filename, columns, limit })).stream;
526
+ encoding = 'UTF-8';
527
+ } else {
528
+ // Check if the file exists, and fast fail if not
529
+ // Otherwise the stream hangs out as a handle
530
+ try {
531
+ await fsp.stat(filename);
532
+ } catch (e) {
533
+ debug(
534
+ `Error reading file ${filename}, current directory: ${process.cwd()},import.meta.dirname:${
535
+ import.meta.dirname
536
+ }`
537
+ );
538
+ throw e;
539
+ }
540
+ stream = fs.createReadStream(filename);
541
+ encoding = (await this.detectEncoding({ filename })).encoding;
542
+ }
543
+ return { stream, encoding };
544
+ } else if (packet) {
545
+ let { stream: packetStream } = await streamPacket({ packet, type, limit });
546
+ const { transforms } = this.csvToObjectTransforms({});
547
+ transforms.forEach((t) => {
548
+ packetStream = packetStream.pipe(t);
549
+ });
550
+ return { stream: packetStream };
551
+ } else {
552
+ throw new Error('stream must be passed a stream, filename, or packet');
553
+ }
554
+ };
555
+ Worker.prototype.sample = async function (opts) {
556
+ opts.limit = opts.limit || 10;
557
+ const { stream } = await this.fileToObjectStream(opts);
558
+ return stream.toArray();
559
+ };
560
+ Worker.prototype.sample.metadata = {
561
+ options: {
562
+ filename: {}
563
+ }
564
+ };
565
+ Worker.prototype.toArray = async function (opts) {
566
+ const { stream } = await this.fileToObjectStream(opts);
567
+ return stream.toArray();
568
+ };
569
+ Worker.prototype.toArray.metadata = {
570
+ options: {
571
+ filename: {}
572
+ }
573
+ };
574
+ Worker.prototype.write = async function (opts) {
575
+ const { filename, content } = opts;
576
+ if (filename.startsWith('s3://') || filename.startsWith('r2://')) {
577
+ const worker = new (filename.startsWith('r2://') ? R2Worker : S3Worker)(this);
578
+ const parts = filename.split('/');
579
+ const directory = parts.slice(0, -1).join('/');
580
+ const file = parts.slice(-1)[0];
581
+ // debug(JSON.stringify({ parts, directory, file }));
582
+ await worker.write({
583
+ directory,
584
+ file,
585
+ content
586
+ });
587
+ } else {
588
+ const directory = path.dirname(filename);
589
+ await fsp.mkdir(directory, { recursive: true });
590
+ await fsp.writeFile(filename, content);
591
+ }
592
+ return { success: true, filename };
593
+ };
594
+ Worker.prototype.write.metadata = {
595
+ options: {
596
+ filename: { description: 'Location to write content to, can be local or s3:// or r2://' },
597
+ content: {}
598
+ }
599
+ };
600
+ async function streamToString(stream) {
601
+ // lets have a ReadableStream as a stream variable
602
+ const chunks = [];
603
+ for await (const chunk of stream) {
604
+ chunks.push(Buffer.from(chunk));
605
+ }
606
+ return Buffer.concat(chunks).toString('utf-8');
607
+ }
608
+ /*
609
+ Retrieves and parsed
610
+ */
611
+ Worker.prototype.json = async function (opts) {
612
+ const { stream } = await this.stream(opts);
613
+ const str = await streamToString(stream);
614
+ try {
615
+ return JSON5.parse(str);
616
+ } catch (e) {
617
+ debug(e);
618
+ throw new Error(`Unparseable JSON received: ${opts.filename || '(native stream)'}`);
619
+ }
620
+ };
621
+ Worker.prototype.json.metadata = {
622
+ options: {
623
+ filename: { description: 'Get a javascript object from a file' }
624
+ }
625
+ };
626
+ Worker.prototype.list = async function ({ directory, start: s, end: e }) {
627
+ if (!directory) throw new Error('directory is required');
628
+ let start = null;
629
+ let end = null;
630
+ if (s) start = relativeDate(s);
631
+ if (e) end = relativeDate(e);
632
+ if (directory.startsWith('s3://') || directory.startsWith('r2://')) {
633
+ const worker = new (directory.startsWith('r2://') ? R2Worker : S3Worker)(this);
634
+ return worker.list({ directory, start, end });
635
+ }
636
+ const a = await fsp.readdir(directory, { withFileTypes: true });
637
+ const withModified = [];
638
+ for (const file of a) {
639
+ const fullPath = path.join(directory, file.name);
640
+ const stats = await fsp.stat(fullPath);
641
+ if (start && stats.mtime < start.getTime()) {
642
+ //do not include
643
+ } else if (end && stats.mtime > end.getTime()) {
644
+ //do nothing
645
+ } else {
646
+ withModified.push({
647
+ name: file.name,
648
+ type: file.isDirectory() ? 'directory' : 'file',
649
+ modifiedAt: new Date(stats.mtime).toISOString()
650
+ });
651
+ }
652
+ }
653
+ return withModified;
654
+ };
655
+ Worker.prototype.list.metadata = {
656
+ options: {
657
+ directory: { required: true }
658
+ }
659
+ };
660
+ Worker.prototype.listAll = async function ({ directory, start: s, end: e }) {
661
+ if (!directory) throw new Error('directory is required');
662
+ let start = null;
663
+ let end = null;
664
+ if (s) start = relativeDate(s).getTime();
665
+ if (e) end = relativeDate(e).getTime();
666
+ if (directory.startsWith('s3://') || directory.startsWith('r2://')) {
667
+ const worker = new (directory.startsWith('r2://') ? R2Worker : S3Worker)(this);
668
+ return worker.listAll({ directory, start, end });
669
+ }
670
+ const a = await fsp.readdir(directory, { recursive: true });
671
+ let files = a.map((f) => `${directory}/${f}`);
672
+ if (!start && !end) {
673
+ return files;
674
+ }
675
+ const pLimit = await import('p-limit');
676
+ const limitedMethod = pLimit.default(10);
677
+ const filesWithinLimit = [];
678
+ await Promise.all(
679
+ files.map((filename) =>
680
+ limitedMethod(async () => {
681
+ const stats = await fsp.stat(filename);
682
+ if (start && stats.mtime < start) {
683
+ //do not include
684
+ } else if (end && stats.mtime > end) {
685
+ //do nothing
686
+ } else {
687
+ filesWithinLimit.push({
688
+ name: filename,
689
+ type: stats.isDirectory() ? 'directory' : 'file',
690
+ modifiedAt: new Date(stats.mtime).toISOString()
691
+ });
692
+ }
693
+ })
694
+ )
695
+ );
696
+ return filesWithinLimit;
697
+ };
698
+ Worker.prototype.listAll.metadata = {
699
+ options: {
700
+ directory: { required: true },
701
+ start: {},
702
+ end: {}
703
+ }
704
+ };
705
+ Worker.prototype.moveAll = async function (options) {
706
+ const { directory, targetDirectory } = options;
707
+ if (!directory) throw new Error('directory is required');
708
+ if (directory.startsWith('s3://') || directory.startsWith('r2://')) {
709
+ const worker = new (directory.startsWith('r2://') ? R2Worker : S3Worker)(this);
710
+ return worker.moveAll(options);
711
+ }
712
+ const a = await this.listAll(options);
713
+ let configs = a.map((f) => {
714
+ let filename = typeof f === 'string' ? f : f.filename;
715
+ return {
716
+ filename,
717
+ target: filename.replace(directory, targetDirectory)
718
+ };
719
+ });
720
+ const pLimit = await import('p-limit');
721
+ const limitedMethod = pLimit.default(10);
722
+ return Promise.all(configs.map(({ filename, target }) => limitedMethod(async () => this.move({ filename, target }))));
723
+ };
724
+ Worker.prototype.moveAll.metadata = {
725
+ options: {
726
+ directory: { required: true },
727
+ targetDirectory: { required: true }
728
+ }
729
+ };
730
+ Worker.prototype.empty = async function ({ directory }) {
731
+ if (!directory) throw new Error('directory is required');
732
+ if (directory.startsWith('s3://') || directory.startsWith('r2://')) {
733
+ // currently not emptying S3 this way -- dangerous
734
+ throw new Error('Cannot empty an s3:// or r2:// directory');
735
+ }
736
+ const removed = [];
737
+ for (const file of await fsp.readdir(directory)) {
738
+ removed.push(file);
739
+ await fsp.unlink(path.join(directory, file));
740
+ }
741
+ return { directory, removed };
742
+ };
743
+ Worker.prototype.empty.metadata = {
744
+ options: {
745
+ directory: { required: true }
746
+ }
747
+ };
748
+ Worker.prototype.removeAll = async function (options) {
749
+ const filenames = await this.listAll(options);
750
+ const pLimit = await import('p-limit');
751
+ const limitedMethod = pLimit.default(10);
752
+ return Promise.all(filenames.map((filename) => limitedMethod(async () => this.remove({ filename }))));
753
+ };
754
+ Worker.prototype.removeAll.metadata = {
755
+ options: {
756
+ directory: { required: true },
757
+ start: {},
758
+ end: {}
759
+ }
760
+ };
761
+ Worker.prototype.remove = async function ({ filename }) {
762
+ if (!filename) throw new Error('filename is required');
763
+ if (typeof filename !== 'string') throw new Error(`filename isn't a string:${JSON.stringify(filename)}`);
764
+ if (filename.startsWith('s3://') || filename.startsWith('r2://')) {
765
+ let worker = null;
766
+ if (filename.startsWith('r2://')) {
767
+ worker = new R2Worker(this);
768
+ } else {
769
+ worker = new S3Worker(this);
770
+ }
771
+ await worker.remove({ filename });
772
+ } else {
773
+ fsp.unlink(filename);
774
+ }
775
+ return { removed: filename };
776
+ };
777
+ Worker.prototype.remove.metadata = {
778
+ options: {
779
+ filename: {}
780
+ }
781
+ };
782
+ Worker.prototype.move = async function ({ filename, target, remove = true }) {
783
+ if (!target) throw new Error('target is required');
784
+ if (typeof target !== 'string') throw new Error(`target isn't a string:${JSON.stringify(target)}`);
785
+ if (target.startsWith('s3://') || target.startsWith('r2://')) {
786
+ if (
787
+ (target.startsWith('s3://') && filename.startsWith('r2://')) ||
788
+ (target.startsWith('r2://') && filename.startsWith('s3://'))
789
+ ) {
790
+ throw new Error('Cowardly not copying between services');
791
+ }
792
+ let worker = null;
793
+ if (target.startsWith('r2://')) {
794
+ worker = new R2Worker(this);
795
+ } else {
796
+ worker = new S3Worker(this);
797
+ }
798
+ if (filename.startsWith('s3://') || filename.startsWith('r2://')) {
799
+ // We need to copy and delete
800
+ const output = await worker.copy({ filename, target });
801
+ if (remove) await worker.remove({ filename });
802
+ return output;
803
+ }
804
+ const parts = target.split('/');
805
+ return worker.put({ filename, directory: parts.slice(0, -1).join('/'), file: parts.slice(-1)[0] });
806
+ }
807
+ await fsp.mkdir(path.dirname(target), { recursive: true });
808
+ if (remove) {
809
+ try {
810
+ await fsp.rename(filename, target);
811
+ } catch (e) {
812
+ //it may be a filesystem issue moving between items
813
+ debug('Assuming this is a filesystem crosslink error, ignoring ', e);
814
+ await fsp.copyFile(filename, target);
815
+ await fsp.unlink(filename);
816
+ }
817
+ } else {
818
+ await fsp.copyFile(filename, target);
819
+ }
820
+ return { filename: target };
821
+ };
822
+ Worker.prototype.move.metadata = {
823
+ options: {
824
+ filename: {},
825
+ target: {}
826
+ }
827
+ };
828
+ Worker.prototype.copy = async function (opts) {
829
+ return this.move({ ...opts, remove: false });
830
+ };
831
+ Worker.prototype.copy.metadata = {
832
+ options: {
833
+ filename: {},
834
+ target: {}
835
+ }
836
+ };
837
+ Worker.prototype.stat = async function ({ filename }) {
838
+ if (!filename) throw new Error('filename is required');
839
+ const output = {};
840
+ if (filename.slice(-8) === '.parquet') {
841
+ const pq = new ParquetWorker(this);
842
+ output.schema = (await pq.schema({ filename }))?.schema;
843
+ output.records = (await pq.meta({ filename }))?.records;
844
+ }
845
+ if (filename.startsWith('s3://') || filename.startsWith('r2://')) {
846
+ const worker = new (filename.startsWith('r2://') ? R2Worker : S3Worker)(this);
847
+ Object.assign(output, await worker.stat({ filename }));
848
+ } else {
849
+ const { ctime, birthtime, size } = await fsp.stat(filename);
850
+ const modifiedAt = new Date(ctime);
851
+ let createdAt = birthtime;
852
+ if (createdAt === 0 || !createdAt) createdAt = ctime;
853
+ createdAt = new Date(createdAt);
854
+ Object.assign(output, {
855
+ createdAt,
856
+ modifiedAt,
857
+ size
858
+ });
859
+ }
860
+ return output;
861
+ };
862
+ Worker.prototype.stat.metadata = {
863
+ options: {
864
+ filename: {}
865
+ }
866
+ };
867
+ Worker.prototype.download = async function ({ filename }) {
868
+ if (!filename) throw new Error('filename is required');
869
+ if (filename.startsWith('s3://') || filename.startsWith('r2://')) {
870
+ const worker = new (filename.startsWith('r2://') ? R2Worker : S3Worker)(this);
871
+ return worker.download({ filename });
872
+ }
873
+ throw new Error('Cannot download a local file');
874
+ };
875
+ Worker.prototype.download.metadata = {
876
+ options: {
877
+ filename: {}
878
+ }
879
+ };
880
+ Worker.prototype.head = async function (options) {
881
+ const limit = options.limit || 3;
882
+ const { stream } = await this.fileToObjectStream({ ...options, limit });
883
+ const chunks = [];
884
+ let counter = 0;
885
+ for await (const chunk of stream) {
886
+ chunks.push(chunk);
887
+ counter += 1;
888
+ if (counter >= limit) break;
889
+ }
890
+ return chunks;
891
+ };
892
+ Worker.prototype.head.metadata = {
893
+ options: {
894
+ filename: { required: true }
895
+ }
896
+ };
897
+ Worker.prototype.columns = async function (options) {
898
+ const head = await this.head(options);
899
+ if (head.length == 0) {
900
+ return {
901
+ records: 0,
902
+ likelyHeaderLines: 0,
903
+ columns: []
904
+ };
905
+ }
906
+ let likelyHeaderLines = 1;
907
+ const columns = Object.keys(head[0]);
908
+ let s = columns.join(',');
909
+ if (s.match(/[()@#%!]/)) {
910
+ likelyHeaderLines = 0;
911
+ }
912
+ return {
913
+ likelyHeaderLines,
914
+ columns
915
+ };
916
+ };
917
+ Worker.prototype.columns.metadata = {
918
+ options: {
919
+ filename: { required: true }
920
+ }
921
+ };
922
+ Worker.prototype.count = async function (options) {
923
+ const { stream } = await this.fileToObjectStream(options);
924
+ const sample = [];
925
+ const limit = options.limit || 5;
926
+ let records = 0;
927
+ for await (const chunk of stream) {
928
+ records += 1;
929
+ if (records < limit) {
930
+ sample.push(chunk);
931
+ }
932
+ }
933
+ return { sample, records };
934
+ };
935
+ Worker.prototype.count.metadata = {
936
+ options: {
937
+ filename: { required: true }
938
+ }
939
+ };
940
+ // Get a set of unique entries from a uniqueFunction
941
+ // This could be large
942
+ Worker.prototype.getUniqueSet = async function (options) {
943
+ const existingFiles = getStringArray(options.filenames);
944
+ const sample = {};
945
+ let { uniqueFunction } = options;
946
+ if (!uniqueFunction) {
947
+ uniqueFunction = (o) => JSON.stringify(o);
948
+ }
949
+ const uniqueSet = new Set();
950
+ for (const filename of existingFiles) {
951
+ const { stream: existsStream } = await this.fileToObjectStream({ filename });
952
+ await pipeline(
953
+ existsStream,
954
+ new Transform({
955
+ objectMode: true,
956
+ transform(d, enc, cb) {
957
+ const v = uniqueFunction(makeStrings(d)) || '';
958
+ if (uniqueSet.size < 3) {
959
+ sample[v] = d;
960
+ }
961
+ uniqueSet.add(v);
962
+ cb(null, d);
963
+ }
964
+ }),
965
+ new Writable({
966
+ objectMode: true,
967
+ write(d, enc, cb) {
968
+ cb();
969
+ }
970
+ })
971
+ );
972
+ debug(`Finished loading ${filename}`);
973
+ }
974
+ return { uniqueFunction, uniqueSet, sample };
975
+ };
976
+ Worker.prototype.getUniqueStream = async function (options) {
977
+ const includeDuplicateSourceRecords = bool(options.includeDuplicateSourceRecords, false);
978
+ const { uniqueSet, uniqueFunction, sample } = await this.getUniqueSet({
979
+ filenames: options.existingFiles,
980
+ uniqueFunction: options.uniqueFunction
981
+ });
982
+ const { stream: inStream } = await this.fileToObjectStream(options);
983
+ const uniqueStream = inStream.pipe(
984
+ new Transform({
985
+ objectMode: true,
986
+ transform(d, enc, cb) {
987
+ const v = uniqueFunction(makeStrings(d)) || '';
988
+ if (!v) {
989
+ // falsey unique function includes
990
+ // by default
991
+ cb(null, d);
992
+ } else if (uniqueSet.has(v)) {
993
+ // do nothing
994
+ cb();
995
+ } else {
996
+ if (!includeDuplicateSourceRecords) {
997
+ // add it to the set for the next time
998
+ uniqueSet.add(v);
999
+ }
1000
+ cb(null, d);
1001
+ }
1002
+ }
1003
+ })
1004
+ );
1005
+ return { stream: uniqueStream, sample };
1006
+ };
1007
+ Worker.prototype.getUniqueStream.metadata = {
1008
+ options: {
1009
+ existingFiles: {},
1010
+ uniqueFunction: {},
1011
+ filename: { description: 'Specify a source filename or a stream' },
1012
+ stream: { description: 'Specify a source filename or a stream' },
1013
+ includeDuplicateSourceRecords: {
1014
+ description: 'Sometimes you want the output to include source dupes, sometimes not, default false'
1015
+ }
1016
+ }
1017
+ };
1018
+ Worker.prototype.getUniqueFile = async function (options) {
1019
+ const { stream, sample } = await this.getUniqueStream(options);
1020
+ const { filename, records } = await this.objectStreamToFile({ stream });
1021
+ return { filename, records, sample };
1022
+ };
1023
+ Worker.prototype.getUniqueFile.metadata = {
1024
+ options: {
1025
+ existingFiles: {},
1026
+ uniqueFunction: {},
1027
+ filename: { description: 'Specify a source filename or a stream' },
1028
+ stream: { description: 'Specify a source filename or a stream' },
1029
+ includeDuplicateSourceRecords: {
1030
+ description: 'Sometimes you want the output to include source dupes, sometimes not, default false'
1031
+ }
1032
+ }
1033
+ };
1034
+ /*
1035
+ diff that allows for unordered files, and doesn't store full objects in memory.
1036
+ Requires 2 passes of the files,
1037
+ but that's a better tradeoff than trying to store huge files in memory
1038
+ */
1039
+ Worker.prototype.diff = async function (options) {
1040
+ const { fileA, fileB, uniqueFunction: ufOpt, columns, includeDuplicateSourceRecords } = options;
1041
+ if (options.fields) throw new Error('fields is deprecated, use columns');
1042
+ if (ufOpt && columns) throw new Error('fields and uniqueFunction cannot both be specified');
1043
+ let uniqueFunction = ufOpt;
1044
+ if (!uniqueFunction && columns) {
1045
+ const farr = getStringArray(columns);
1046
+ uniqueFunction = (o) => farr.map((f) => o[f] || '').join('.');
1047
+ }
1048
+ const left = await this.getUniqueFile({
1049
+ existingFiles: [fileB],
1050
+ filename: fileA,
1051
+ uniqueFunction,
1052
+ includeDuplicateSourceRecords
1053
+ });
1054
+ const right = await this.getUniqueFile({
1055
+ existingFiles: [fileA],
1056
+ filename: fileB,
1057
+ uniqueFunction,
1058
+ includeDuplicateSourceRecords
1059
+ });
1060
+ return {
1061
+ left,
1062
+ right
1063
+ };
1064
+ };
1065
+ Worker.prototype.diff.metadata = {
1066
+ options: {
1067
+ fileA: {},
1068
+ fileB: {},
1069
+ columns: { description: 'Columns to use for uniqueness -- aka primary key. Defaults to JSON of line' },
1070
+ uniqueFunction: {},
1071
+ includeDuplicateSourceRecords: {
1072
+ description: 'Sometimes you want the output to include source dupes, sometimes not, default false'
1073
+ }
1074
+ }
1075
+ };
1076
+ export default Worker;