b2-cloud-storage 1.0.4 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.js +407 -347
  2. package/package.json +14 -15
package/index.js CHANGED
@@ -1,13 +1,14 @@
1
1
  /* eslint-disable unicorn/explicit-length-check */
2
2
  'use strict';
3
- const url = require('url');
4
- const crypto = require('crypto');
5
- const os = require('os');
6
- const fs = require('fs');
3
+ const crypto = require('node:crypto');
4
+ const fs = require('node:fs');
5
+ const os = require('node:os');
6
+ const { Transform } = require('node:stream');
7
+ const url = require('node:url');
7
8
 
8
- const request = require('request');
9
- const _ = require('lodash');
10
9
  const async = require('async');
10
+ const _ = require('lodash');
11
+ const request = require('request');
11
12
 
12
13
  const nodeVersion = process.version;
13
14
  const packageVersion = require('./package.json').version;
@@ -28,33 +29,34 @@ const b2CloudStorage = class {
28
29
  * @param {number} options.maxPartAttempts Maximum retries each part can reattempt before erroring when uploading a Large File.
29
30
  * @param {number} options.maxTotalErrors Maximum total errors the collective list of file parts can trigger (below the individual maxPartAttempts) before the Large File upload is considered failed.
30
31
  * @param {number} options.maxReauthAttempts Maximum times this library will try to reauthenticate if an auth token expires, before assuming failure.
32
+ * @param {number} options.defaultUploadConcurrency Default number of concurrent part uploads for large files. Defaults to 4.
31
33
  * @return {undefined}
32
34
  */
33
- constructor(options){
34
- if(!options || !options.auth){
35
+ constructor(options) {
36
+ if (!options || !options.auth) {
35
37
  throw new Error('Missing authentication object');
36
38
  }
37
- if(!options.auth.accountId){
39
+ if (!options.auth.accountId) {
38
40
  throw new Error('Missing authentication accountId');
39
41
  }
40
- if(!options.auth.applicationKey){
42
+ if (!options.auth.applicationKey) {
41
43
  throw new Error('Missing authentication applicationKey');
42
44
  }
43
45
 
44
46
  this.maxSmallFileSize = options.maxSmallFileSize || 100_000_000; // default to 100MB
45
- if(this.maxSmallFileSize > 5_000_000_000){
47
+ if (this.maxSmallFileSize > 5_000_000_000) {
46
48
  throw new Error('maxSmallFileSize can not exceed 5GB');
47
49
  }
48
- if(this.maxSmallFileSize < 100_000_000){
50
+ if (this.maxSmallFileSize < 100_000_000) {
49
51
  throw new Error('maxSmallFileSize can not be less than 100MB');
50
52
  }
51
53
 
52
54
  this.maxCopyWorkers = options.maxCopyWorkers || (os.cpus().length * 5); // default to the number of available CPUs * 5 (web requests are cheap)
53
55
  this.maxSmallCopyFileSize = options.maxSmallCopyFileSize || 100_000_000; // default to 5GB
54
- if(this.maxSmallCopyFileSize > 5_000_000_000){
56
+ if (this.maxSmallCopyFileSize > 5_000_000_000) {
55
57
  throw new Error('maxSmallFileSize can not exceed 5GB');
56
58
  }
57
- if(this.maxSmallCopyFileSize < 5_000_000){
59
+ if (this.maxSmallCopyFileSize < 5_000_000) {
58
60
  throw new Error('maxSmallFileSize can not be less than 5MB');
59
61
  }
60
62
 
@@ -64,6 +66,7 @@ const b2CloudStorage = class {
64
66
  this.maxPartAttempts = options.maxPartAttempts || 3; // retry each chunk up to 3 times
65
67
  this.maxTotalErrors = options.maxTotalErrors || 10; // quit if 10 chunks fail
66
68
  this.maxReauthAttempts = options.maxReauthAttempts || 3; // quit if 3 re-auth attempts fail
69
+ this.defaultUploadConcurrency = options.defaultUploadConcurrency || 4;
67
70
  }
68
71
 
69
72
  /**
@@ -71,7 +74,7 @@ const b2CloudStorage = class {
71
74
  * @param {string} fileName File name for upload
72
75
  * @returns {string} Returns a safe and URL encoded file name for upload
73
76
  */
74
- static getUrlEncodedFileName(fileName){
77
+ static getUrlEncodedFileName(fileName) {
75
78
  return fileName.split('/').map(component => encodeURIComponent(component)).join('/');
76
79
  }
77
80
 
@@ -79,7 +82,7 @@ const b2CloudStorage = class {
79
82
  * `b2_authorize_account` method, required before calling any B2 API routes.
80
83
  * @param {Function} [callback]
81
84
  */
82
- authorize(callback){
85
+ authorize(callback) {
83
86
  this.request({
84
87
  auth: {
85
88
  user: this.auth.accountId,
@@ -88,7 +91,7 @@ const b2CloudStorage = class {
88
91
  apiUrl: 'https://api.backblazeb2.com',
89
92
  url: 'b2_authorize_account',
90
93
  }, (err, results) => {
91
- if(err){
94
+ if (err) {
92
95
  return callback(err);
93
96
  }
94
97
  this.authData = results;
@@ -100,7 +103,7 @@ const b2CloudStorage = class {
100
103
 
101
104
  /**
102
105
  * Upload file with `b2_upload_file` or as several parts of a large file upload.
103
- * This method also will get the filesize & sha1 hash of the entire file.
106
+ * This method also will get the filesize & sha1 hash of the entire file (unless `data.hash` is already provided or set to `false`).
104
107
  * @param {String} filename Path to filename to for upload.
105
108
  * @param {Object} data Configuration data passed from the `uploadFile` method.
106
109
  * @param {String} data.bucketId The target bucket the file is to be uploaded.
@@ -113,18 +116,18 @@ const b2CloudStorage = class {
113
116
  * @param {Number} [data.progressInterval] How frequently the `onUploadProgress` callback is fired during upload
114
117
  * @param {Number} [data.partSize] Overwrite the default part size as defined by the b2 authorization process
115
118
  * @param {Object} [data.info] File info metadata for the file.
116
- * @param {String} [data.hash] Skips the sha1 hash step with hash already provided.
119
+ * @param {String|false} [data.hash] When a string is provided, skips the whole-file sha1 computation and uses the given hash. Set to `false` to skip hashing entirely; small files will use `do_not_verify`, while large file parts are always verified post-upload against B2's response.
117
120
  * @param {('fail_some_uploads'|'expire_some_account_authorization_tokens'|'force_cap_exceeded')} [data.testMode] Enables B2 test mode by setting the `X-Bz-Test-Mode` header, which will cause intermittent artificial failures.
118
121
  * @param {Function} [callback]
119
122
  * @returns {object} Returns an object with 3 helper methods: `cancel()`, `progress()`, & `info()`
120
123
  */
121
- uploadFile(filename, data, callback = function(){}){
122
- if(!this.authData){
124
+ uploadFile(filename, data, callback = function() {}) {
125
+ if (!this.authData) {
123
126
  return callback(new Error('Not authenticated. Did you forget to call authorize()?'));
124
127
  }
125
128
 
126
129
  // todo: check if allowed (access) to upload files
127
- if(data.partSize < 5_000_000){
130
+ if (data.partSize < 5_000_000) {
128
131
  return callback(new Error('partSize can not be lower than 5MB'));
129
132
  }
130
133
 
@@ -134,14 +137,14 @@ const b2CloudStorage = class {
134
137
 
135
138
  let fileFuncs = {};
136
139
  const returnFuncs = {
137
- cancel: function(){
140
+ cancel: function() {
138
141
  cancel = true;
139
- if(fileFuncs.cancel){
142
+ if (fileFuncs.cancel) {
140
143
  return fileFuncs.cancel();
141
144
  }
142
145
  },
143
- progress: function(){
144
- if(fileFuncs.progress){
146
+ progress: function() {
147
+ if (fileFuncs.progress) {
145
148
  return fileFuncs.progress();
146
149
  }
147
150
  return {
@@ -150,35 +153,35 @@ const b2CloudStorage = class {
150
153
  bytesTotal: data.size || 0,
151
154
  };
152
155
  },
153
- info: function(){
154
- if(fileFuncs.info){
156
+ info: function() {
157
+ if (fileFuncs.info) {
155
158
  return fileFuncs.info();
156
159
  }
157
160
  return null;
158
161
  },
159
162
  };
160
163
  async.series([
161
- function(cb){
162
- if(cancel){
164
+ function(cb) {
165
+ if (cancel) {
163
166
  return cb(new Error('B2 upload canceled'));
164
167
  }
165
- if(data.hash){
168
+ if (typeof data.hash === 'string' || data.hash === false) {
166
169
  return cb();
167
170
  }
168
- self.getFileHash(filename, function(err, hash){
169
- if(err){
171
+ self.getFileHash(filename, function(err, hash) {
172
+ if (err) {
170
173
  return cb(err);
171
174
  }
172
175
  data.hash = hash;
173
176
  return cb();
174
177
  });
175
178
  },
176
- function(cb){
177
- if(cancel){
179
+ function(cb) {
180
+ if (cancel) {
178
181
  return cb(new Error('B2 upload canceled'));
179
182
  }
180
- self.getStat(filename, function(err, stat){
181
- if(err){
183
+ self.getStat(filename, function(err, stat) {
184
+ if (err) {
182
185
  return cb(err);
183
186
  }
184
187
  data.stat = stat;
@@ -187,18 +190,18 @@ const b2CloudStorage = class {
187
190
  return cb();
188
191
  });
189
192
  },
190
- ], function(err){
191
- if(cancel){
193
+ ], function(err) {
194
+ if (cancel) {
192
195
  return callback(new Error('B2 upload canceled'));
193
196
  }
194
- if(err){
197
+ if (err) {
195
198
  return callback(err);
196
199
  }
197
200
  // properly encode file name for upload
198
- if(data.fileName){
201
+ if (data.fileName) {
199
202
  data.fileName = b2CloudStorage.getUrlEncodedFileName(data.fileName);
200
203
  }
201
- if(smallFile){
204
+ if (smallFile) {
202
205
  fileFuncs = self.uploadFileSmall(filename, data, callback);
203
206
  return;
204
207
  }
@@ -215,7 +218,7 @@ const b2CloudStorage = class {
215
218
  * @param {Number} [data.maxPartCount] The maximum number of parts to return from this call. The default value is 100, and the maximum allowed is 1000.
216
219
  * @param {Function} [callback]
217
220
  */
218
- listParts(data, callback){
221
+ listParts(data, callback) {
219
222
  return this.request({
220
223
  url: 'b2_list_parts',
221
224
  method: 'POST',
@@ -232,7 +235,7 @@ const b2CloudStorage = class {
232
235
  * @param {Number} [data.maxFileCount] The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 100.
233
236
  * @param {Function} [callback]
234
237
  */
235
- listUnfinishedLargeFiles(data, callback){
238
+ listUnfinishedLargeFiles(data, callback) {
236
239
  return this.request({
237
240
  url: 'b2_list_unfinished_large_files',
238
241
  method: 'POST',
@@ -246,7 +249,7 @@ const b2CloudStorage = class {
246
249
  * @param {String} data.fileId The ID returned by b2_start_large_file.
247
250
  * @param {Function} [callback]
248
251
  */
249
- cancelLargeFile(data, callback){
252
+ cancelLargeFile(data, callback) {
250
253
  return this.request({
251
254
  url: 'b2_cancel_large_file',
252
255
  method: 'POST',
@@ -259,7 +262,7 @@ const b2CloudStorage = class {
259
262
  * @param {String} fileId The ID of the file, as returned by `b2_upload_file`, `b2_hide_file`, `b2_list_file_names`, or `b2_list_file_versions`.
260
263
  * @param {Function} [callback]
261
264
  */
262
- getFileInfo(fileId, callback){
265
+ getFileInfo(fileId, callback) {
263
266
  return this.request({
264
267
  url: 'b2_get_file_info',
265
268
  method: 'POST',
@@ -277,12 +280,12 @@ const b2CloudStorage = class {
277
280
  * @param {Array} [data.bucketTypes] One of: "allPublic", "allPrivate", "snapshot", or other values added in the future. "allPublic" means that anybody can download the files is the bucket; "allPrivate" means that you need an authorization token to download them; "snapshot" means that it's a private bucket containing snapshots created on the B2 web site.
278
281
  * @param {Function} [callback]
279
282
  */
280
- listBuckets(data, callback){
281
- if(!callback && data){
283
+ listBuckets(data, callback) {
284
+ if (!callback && data) {
282
285
  callback = data;
283
286
  data = {};
284
287
  }
285
- if(!data.accountId){
288
+ if (!data.accountId) {
286
289
  data.accountId = this.authData.accountId;
287
290
  }
288
291
  return this.request({
@@ -303,7 +306,7 @@ const b2CloudStorage = class {
303
306
  * @param {Object} [data.range] The range of bytes to copy. If not provided, the whole source file will be copied.
304
307
  * @param {Function} [callback]
305
308
  */
306
- copyFilePart(data, callback){
309
+ copyFilePart(data, callback) {
307
310
  return this.request({
308
311
  url: 'b2_copy_part',
309
312
  method: 'POST',
@@ -328,8 +331,8 @@ const b2CloudStorage = class {
328
331
  * @param {Function} [callback]
329
332
  * @returns {object} Returns an object with 3 helper methods: `cancel()`, `progress()`, & `info()`
330
333
  */
331
- copyFile(data, callback){
332
- if(!this.authData){
334
+ copyFile(data, callback) {
335
+ if (!this.authData) {
333
336
  return callback(new Error('Not authenticated. Did you forget to call authorize()?'));
334
337
  }
335
338
 
@@ -340,14 +343,14 @@ const b2CloudStorage = class {
340
343
  let fileFuncs = {};
341
344
 
342
345
  const returnFuncs = {
343
- cancel: function(){
346
+ cancel: function() {
344
347
  cancel = true;
345
- if(fileFuncs.cancel){
348
+ if (fileFuncs.cancel) {
346
349
  return fileFuncs.cancel();
347
350
  }
348
351
  },
349
- progress: function(){
350
- if(fileFuncs.progress){
352
+ progress: function() {
353
+ if (fileFuncs.progress) {
351
354
  return fileFuncs.progress();
352
355
  }
353
356
  return {
@@ -356,8 +359,8 @@ const b2CloudStorage = class {
356
359
  bytesTotal: data.size || 0,
357
360
  };
358
361
  },
359
- info: function(){
360
- if(fileFuncs.info){
362
+ info: function() {
363
+ if (fileFuncs.info) {
361
364
  return fileFuncs.info();
362
365
  }
363
366
  return null;
@@ -365,15 +368,15 @@ const b2CloudStorage = class {
365
368
  };
366
369
 
367
370
  async.series([
368
- function(cb){
369
- if(cancel){
371
+ function(cb) {
372
+ if (cancel) {
370
373
  return cb(new Error('B2 copy canceled'));
371
374
  }
372
- if(data.size && data.hash && data.destinationBucketId && data.contentType){
375
+ if (data.size && data.hash && data.destinationBucketId && data.contentType) {
373
376
  return cb();
374
377
  }
375
- self.getFileInfo(data.sourceFileId, function(err, results){
376
- if(err){
378
+ self.getFileInfo(data.sourceFileId, function(err, results) {
379
+ if (err) {
377
380
  return cb(err);
378
381
  }
379
382
  data.size = data.size || results.contentLength;
@@ -383,13 +386,13 @@ const b2CloudStorage = class {
383
386
  return cb();
384
387
  });
385
388
  },
386
- function(cb){
387
- if(cancel){
389
+ function(cb) {
390
+ if (cancel) {
388
391
  return cb(new Error('B2 copy canceled'));
389
392
  }
390
- if(data.size > self.maxSmallCopyFileSize){
391
- fileFuncs = self.copyLargeFile(data, function(err, results){
392
- if(err){
393
+ if (data.size > self.maxSmallCopyFileSize) {
394
+ fileFuncs = self.copyLargeFile(data, function(err, results) {
395
+ if (err) {
393
396
  return cb(err);
394
397
  }
395
398
  returnData = results;
@@ -405,19 +408,19 @@ const b2CloudStorage = class {
405
408
  'metadataDirective',
406
409
  ];
407
410
  // only required for metadata replace
408
- if(data.metadataDirective === 'REPLACE'){
411
+ if (data.metadataDirective === 'REPLACE') {
409
412
  fields.push('contentType', 'fileInfo');
410
413
  }
411
- fileFuncs = self.copySmallFile(_.pick(data, fields), function(err, results){
412
- if(err){
414
+ fileFuncs = self.copySmallFile(_.pick(data, fields), function(err, results) {
415
+ if (err) {
413
416
  return cb(err);
414
417
  }
415
418
  returnData = results;
416
419
  return cb();
417
420
  });
418
421
  },
419
- ], function(err){
420
- if(err){
422
+ ], function(err) {
423
+ if (err) {
421
424
  return callback(err);
422
425
  }
423
426
  return callback(null, returnData);
@@ -436,11 +439,11 @@ const b2CloudStorage = class {
436
439
  * @param {Array} [data.lifecycleRules] The initial list (a JSON array) of lifecycle rules for this bucket. Structure defined below. See Lifecycle Rules.
437
440
  * @param {Function} [callback]
438
441
  */
439
- createBucket(data, callback){
440
- if(!this.authData){
442
+ createBucket(data, callback) {
443
+ if (!this.authData) {
441
444
  return callback(new Error('Not authenticated. Did you forget to call authorize()?'));
442
445
  }
443
- if(!data.accountId){
446
+ if (!data.accountId) {
444
447
  data.accountId = this.authData.accountId;
445
448
  }
446
449
  return this.request({
@@ -462,11 +465,11 @@ const b2CloudStorage = class {
462
465
  * @param {Array} [data.ifRevisionIs] When set, the update will only happen if the revision number stored in the B2 service matches the one passed in. This can be used to avoid having simultaneous updates make conflicting changes.
463
466
  * @param {Function} [callback]
464
467
  */
465
- updateBucket(data, callback){
466
- if(!this.authData){
468
+ updateBucket(data, callback) {
469
+ if (!this.authData) {
467
470
  return callback(new Error('Not authenticated. Did you forget to call authorize()?'));
468
471
  }
469
- if(!data.accountId){
472
+ if (!data.accountId) {
470
473
  data.accountId = this.authData.accountId;
471
474
  }
472
475
  return this.request({
@@ -483,16 +486,16 @@ const b2CloudStorage = class {
483
486
  * @param {String} [data.accountId] The ID of your account. When unset will use the `b2_authorize` results `accountId`.
484
487
  * @param {Function} [callback]
485
488
  */
486
- deleteBucket(data, callback){
487
- if(!this.authData){
489
+ deleteBucket(data, callback) {
490
+ if (!this.authData) {
488
491
  return callback(new Error('Not authenticated. Did you forget to call authorize()?'));
489
492
  }
490
- if(typeof(data) === 'string'){
493
+ if (typeof(data) === 'string') {
491
494
  data = {
492
495
  bucketId: data,
493
496
  };
494
497
  }
495
- if(!data.accountId){
498
+ if (!data.accountId) {
496
499
  data.accountId = this.authData.accountId;
497
500
  }
498
501
  return this.request({
@@ -514,7 +517,7 @@ const b2CloudStorage = class {
514
517
  * @param {String} [data.delimiter] files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
515
518
  * @param {Function} [callback]
516
519
  */
517
- listFileNames(data, callback){
520
+ listFileNames(data, callback) {
518
521
  return this.request({
519
522
  url: 'b2_list_file_names',
520
523
  method: 'POST',
@@ -533,7 +536,7 @@ const b2CloudStorage = class {
533
536
  * @param {String} [data.delimiter] files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
534
537
  * @param {Function} [callback]
535
538
  */
536
- listFileVersions(data, callback){
539
+ listFileVersions(data, callback) {
537
540
  return this.request({
538
541
  url: 'b2_list_file_versions',
539
542
  method: 'POST',
@@ -549,15 +552,15 @@ const b2CloudStorage = class {
549
552
  * @param {String} [data.startApplicationKeyId] The first key to return. Used when a query hits the maxKeyCount, and you want to get more. Set to the value returned as the nextApplicationKeyId in the previous query.
550
553
  * @param {Function} [callback]
551
554
  */
552
- listKeys(data, callback){
553
- if(!this.authData){
555
+ listKeys(data, callback) {
556
+ if (!this.authData) {
554
557
  return callback(new Error('Not authenticated. Did you forget to call authorize()?'));
555
558
  }
556
- if(!callback && data){
559
+ if (!callback && data) {
557
560
  callback = data;
558
561
  data = {};
559
562
  }
560
- if(!data.accountId){
563
+ if (!data.accountId) {
561
564
  data.accountId = this.authData.accountId;
562
565
  }
563
566
  return this.request({
@@ -578,11 +581,11 @@ const b2CloudStorage = class {
578
581
  * @param {String} [data.namePrefix] When present, restricts access to files whose names start with the prefix. You must set `bucketId` when setting this.
579
582
  * @param {Function} [callback]
580
583
  */
581
- createKey(data, callback){
582
- if(!this.authData){
584
+ createKey(data, callback) {
585
+ if (!this.authData) {
583
586
  return callback(new Error('Not authenticated. Did you forget to call authorize()?'));
584
587
  }
585
- if(!data.accountId){
588
+ if (!data.accountId) {
586
589
  data.accountId = this.authData.accountId;
587
590
  }
588
591
  return this.request({
@@ -597,7 +600,7 @@ const b2CloudStorage = class {
597
600
  * @param {String} applicationKeyId The key to delete.
598
601
  * @param {Function} [callback]
599
602
  */
600
- deleteKey(applicationKeyId, callback){
603
+ deleteKey(applicationKeyId, callback) {
601
604
  return this.request({
602
605
  url: 'b2_delete_key',
603
606
  method: 'POST',
@@ -616,7 +619,7 @@ const b2CloudStorage = class {
616
619
  * @param {String} data.fileId The ID of the file, as returned by `b2_upload_file`, `b2_list_file_names`, or `b2_list_file_versions`.
617
620
  * @param {Function} [callback]
618
621
  */
619
- deleteFileVersion(data, callback){
622
+ deleteFileVersion(data, callback) {
620
623
  return this.request({
621
624
  url: 'b2_delete_file_version',
622
625
  method: 'POST',
@@ -635,8 +638,8 @@ const b2CloudStorage = class {
635
638
  * @param {String} [data.b2ContentDisposition] If this is present, B2 will use it as the value of the 'Content-Disposition' header, overriding any 'b2-content-disposition' specified when the file was uploaded.
636
639
  * @param {Function} [callback]
637
640
  */
638
- downloadFileById(data, callback){
639
- if(!callback && typeof(callback) === 'function'){
641
+ downloadFileById(data, callback) {
642
+ if (!callback && typeof(callback) === 'function') {
640
643
  callback = data;
641
644
  data = {};
642
645
  }
@@ -650,13 +653,13 @@ const b2CloudStorage = class {
650
653
  fileId: data.fileId,
651
654
  },
652
655
  };
653
- if(data.Authorization){
656
+ if (data.Authorization) {
654
657
  requestData.headers.Authorization = data.Authorization;
655
658
  }
656
- if(data.Range){
659
+ if (data.Range) {
657
660
  requestData.headers.Range = data.Range;
658
661
  }
659
- if(data.b2ContentDisposition){
662
+ if (data.b2ContentDisposition) {
660
663
  requestData.headers.b2ContentDisposition = data.b2ContentDisposition;
661
664
  }
662
665
  return this.request(requestData, callback);
@@ -673,20 +676,20 @@ const b2CloudStorage = class {
673
676
  * @param {String} [data.b2ContentDisposition] If this is present, B2 will use it as the value of the 'Content-Disposition' header, overriding any 'b2-content-disposition' specified when the file was uploaded.
674
677
  * @param {Function} [callback]
675
678
  */
676
- downloadFileByName(data, callback){
679
+ downloadFileByName(data, callback) {
677
680
  const requestData = {
678
681
  apiUrl: `${this.downloadUrl}/file/${data.bucket}/${data.fileName}`,
679
682
  json: false,
680
683
  appendPath: false,
681
684
  headers: {},
682
685
  };
683
- if(data.Authorization){
686
+ if (data.Authorization) {
684
687
  requestData.headers.Authorization = data.Authorization;
685
688
  }
686
- if(data.Range){
689
+ if (data.Range) {
687
690
  requestData.headers.Range = data.Range;
688
691
  }
689
- if(data.b2ContentDisposition){
692
+ if (data.b2ContentDisposition) {
690
693
  requestData.headers.b2ContentDisposition = data.b2ContentDisposition;
691
694
  }
692
695
  return this.request(requestData, callback);
@@ -701,7 +704,7 @@ const b2CloudStorage = class {
701
704
  * @param {Number} [data.b2ContentDisposition] If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition. The value must match the grammar specified in RFC 6266 (except that parameter names that contain an '*' are not allowed).
702
705
  * @param {Function} [callback]
703
706
  */
704
- getDownloadAuthorization(data, callback){
707
+ getDownloadAuthorization(data, callback) {
705
708
  return this.request({
706
709
  url: 'b2_get_download_authorization',
707
710
  method: 'POST',
@@ -716,7 +719,7 @@ const b2CloudStorage = class {
716
719
  * @param {String} data.fileName The name of the file to hide.
717
720
  * @param {Function} [callback]
718
721
  */
719
- hideFile(data, callback){
722
+ hideFile(data, callback) {
720
723
  return this.request({
721
724
  url: 'b2_hide_file',
722
725
  method: 'POST',
@@ -732,10 +735,10 @@ const b2CloudStorage = class {
732
735
  * @param {boolean} data.apiUrl (internal) Full URL path or hostname to replace. Most useful when combined with `appendPath`.
733
736
  * @param {Function} callback [description]
734
737
  */
735
- request(data, callback){
738
+ request(data, callback) {
736
739
  const apiUrl = new url.URL(data.apiUrl || this.url);
737
740
 
738
- if(data.appendPath !== false){
741
+ if (data.appendPath !== false) {
739
742
  apiUrl.pathname += `b2api/${this.version}/${data.url}`;
740
743
  }
741
744
  const requestData = _.defaults(data, {
@@ -745,54 +748,54 @@ const b2CloudStorage = class {
745
748
  });
746
749
  requestData.url = apiUrl.toString();
747
750
  // if auth data is set from `authorize` function and we haven't overridden it via `data.auth` or request headers, set it for this request
748
- if(this.authData && !data.auth && !requestData.headers.Authorization){
751
+ if (this.authData && !data.auth && !requestData.headers.Authorization) {
749
752
  requestData.headers.Authorization = this.authData.authorizationToken;
750
753
  }
751
754
  requestData.headers.Accept = 'application/json';
752
- if(!requestData.headers.Authorization && !requestData.auth){
755
+ if (!requestData.headers.Authorization && !requestData.auth) {
753
756
  return callback(new Error('Not yet authorised. Call `.authorize` before running any functions.'));
754
757
  }
755
758
  // default user agent to package version and node version if not already set
756
- if(!requestData.headers['User-Agent']){
759
+ if (!requestData.headers['User-Agent']) {
757
760
  requestData.headers['User-Agent'] = `b2-cloud-storage/${packageVersion}+node/${nodeVersion}`;
758
761
  }
759
762
  let reqCount = 0;
760
763
  const doRequest = () => {
761
- if(reqCount >= this.maxReauthAttempts){
764
+ if (reqCount >= this.maxReauthAttempts) {
762
765
  return callback(new Error('Auth token expired, and unable to re-authenticate to acquire new token.'));
763
766
  }
764
767
  reqCount++;
765
768
  return request(requestData, (err, res, body) => {
766
- if(err){
769
+ if (err) {
767
770
  return callback(err, null, res);
768
771
  }
769
- if(res.headers['content-type'].includes('application/json') && typeof(body) === 'string'){
770
- try{
772
+ if (res.headers['content-type'] && res.headers['content-type'].includes('application/json') && typeof(body) === 'string') {
773
+ try {
771
774
  body = JSON.parse(body);
772
- }catch{
775
+ } catch {
773
776
  // we tried
774
777
  }
775
778
  }
776
779
  // auth expired, re-authorize and then make request again
777
- if(res.statusCode === 401 && body && body.code === 'expired_auth_token'){
780
+ if (res.statusCode === 401 && body && body.code === 'expired_auth_token') {
778
781
  return this.authorize(doRequest);
779
782
  }
780
- if(res.statusCode === 403 || (body && body.code === 'storage_cap_exceeded')){
783
+ if (res.statusCode === 403 || (body && body.code === 'storage_cap_exceeded')) {
781
784
  return callback(new Error('B2 Cap Exceeded. Check your Backblaze account for more details.'), body, res);
782
785
  }
783
786
  // todo: handle more response codes.
784
- if(res.statusCode !== 200){
787
+ if (res.statusCode !== 200) {
785
788
  let error = null;
786
- if(typeof(body) === 'string'){
789
+ if (typeof(body) === 'string') {
787
790
  error = new Error(body);
788
791
  }
789
- if(body && body.code && !body.message){
792
+ if (body && body.code && !body.message) {
790
793
  error = new Error('API returned error code: ' + body.code);
791
794
  }
792
- if(body && body.message){
795
+ if (body && body.message) {
793
796
  error = new Error(body.message);
794
797
  }
795
- if(!error){
798
+ if (!error) {
796
799
  error = new Error('Invalid response from API.');
797
800
  }
798
801
  return callback(error, body, res);
@@ -809,11 +812,11 @@ const b2CloudStorage = class {
809
812
  * @param {Stream} fileStream File stream from `fs.readFileStream`.
810
813
  * @param {Function} [callback]
811
814
  */
812
- getHash(fileStream, callback){
815
+ getHash(fileStream, callback) {
813
816
  const hash = crypto.createHash('sha1');
814
- fileStream.on('data', function(chunk){
817
+ fileStream.on('data', function(chunk) {
815
818
  hash.update(chunk);
816
- }).on('error', err => callback(err)).on('end', function(){
819
+ }).on('error', err => callback(err)).on('end', function() {
817
820
  return callback(null, hash.digest('hex'));
818
821
  });
819
822
  }
@@ -824,7 +827,7 @@ const b2CloudStorage = class {
824
827
  * @param {String} Path to filename to get sha1 hash.
825
828
  * @param {Function} [callback]
826
829
  */
827
- getFileHash(filename, callback){
830
+ getFileHash(filename, callback) {
828
831
  return this.getHash(fs.createReadStream(filename), callback);
829
832
  }
830
833
 
@@ -834,7 +837,7 @@ const b2CloudStorage = class {
834
837
  * @param {String} Path to filename to get file stats.
835
838
  * @param {Function} [callback]
836
839
  */
837
- getStat(filename, callback){
840
+ getStat(filename, callback) {
838
841
  return fs.stat(filename, callback);
839
842
  }
840
843
 
@@ -851,7 +854,7 @@ const b2CloudStorage = class {
851
854
  * @param {Function} [callback]
852
855
  * @returns {object} Returns an object with 1 helper method: `cancel()`
853
856
  */
854
- copySmallFile(data, callback){
857
+ copySmallFile(data, callback) {
855
858
  const req = this.request({
856
859
  url: 'b2_copy_file',
857
860
  method: 'POST',
@@ -860,7 +863,7 @@ const b2CloudStorage = class {
860
863
 
861
864
  // If we had a progress and info we could return those as well
862
865
  return {
863
- cancel: function(){
866
+ cancel: function() {
864
867
  req.abort();
865
868
  },
866
869
  };
@@ -881,8 +884,8 @@ const b2CloudStorage = class {
881
884
  * @param {Object} [data.fileInfo] Must only be supplied if the metadataDirective is REPLACE. This field stores the metadata that will be stored with the file.
882
885
  * @param {Function} [callback]
883
886
  */
884
- copyLargeFile(data, callback){
885
- if(!this.authData){
887
+ copyLargeFile(data, callback) {
888
+ if (!this.authData) {
886
889
  return callback(new Error('Not authenticated. Did you forget to call authorize()?'));
887
890
  }
888
891
  const self = this;
@@ -892,7 +895,7 @@ const b2CloudStorage = class {
892
895
 
893
896
  let interval = null;
894
897
  async.series([
895
- function(cb){
898
+ function(cb) {
896
899
  self.request({
897
900
  url: 'b2_start_large_file',
898
901
  method: 'POST',
@@ -900,23 +903,27 @@ const b2CloudStorage = class {
900
903
  bucketId: data.destinationBucketId,
901
904
  fileName: data.fileName,
902
905
  contentType: data.contentType,
903
- fileInfo: _.defaults(data.fileInfo, {
906
+ fileInfo: _.defaults({}, data.fileInfo, data.hash ? {
904
907
  large_file_sha1: data.hash,
905
908
  hash_sha1: data.hash,
909
+ } : {}, {
906
910
  src_last_modified_millis: String(Date.now()),
907
911
  }),
908
912
  },
909
913
  }, (err, results) => {
910
- if(err){
914
+ if (err) {
911
915
  return cb(err);
912
916
  }
913
917
  info.fileId = results.fileId;
914
918
  return cb();
915
919
  });
916
920
  },
917
- function(cb){
918
- // todo: maybe tweak recommendedPartSize if the total number of chunks exceeds the total backblaze limit (10000)
919
- const partSize = data.partSize || self.authData.recommendedPartSize;
921
+ function(cb) {
922
+ let partSize = data.partSize || self.authData.recommendedPartSize;
923
+ const minPartSize = Math.ceil(data.size / 10000);
924
+ if (minPartSize > partSize) {
925
+ partSize = minPartSize;
926
+ }
920
927
 
921
928
  // track the current chunk
922
929
  const fsOptions = {
@@ -930,8 +937,8 @@ const b2CloudStorage = class {
930
937
  info.chunks = [];
931
938
  info.lastPart = 1;
932
939
  // create array with calculated number of chunks (floored)
933
- const pushChunks = Array.from({length: Math.floor(data.size / partSize)});
934
- _.each(pushChunks, function(){
940
+ const pushChunks = Array.from({ length: Math.floor(data.size / partSize) });
941
+ _.each(pushChunks, function() {
935
942
  info.chunks.push(_.clone(fsOptions));
936
943
  fsOptions.part++;
937
944
  fsOptions.start += partSize;
@@ -939,9 +946,9 @@ const b2CloudStorage = class {
939
946
  });
940
947
  // calculate remainder left (less than single chunk)
941
948
  const remainder = data.size % partSize;
942
- if(remainder > 0){
949
+ if (remainder > 0) {
943
950
  const item = _.clone(fsOptions);
944
- item.end = data.size;
951
+ item.end = data.size - 1;
945
952
  item.size = remainder;
946
953
  info.chunks.push(item);
947
954
  }
@@ -949,20 +956,20 @@ const b2CloudStorage = class {
949
956
 
950
957
  return process.nextTick(cb);
951
958
  },
952
- function(cb){
959
+ function(cb) {
953
960
  info.shaParts = {};
954
961
  info.totalCopied = 0;
955
962
 
956
963
  let queue = null; // initialise queue to avoid no-use-before-define eslint error
957
- const reQueue = function(task, incrementCount = true){
958
- if(incrementCount){
964
+ const reQueue = function(task, incrementCount = true) {
965
+ if (incrementCount) {
959
966
  task.attempts++;
960
967
  }
961
968
  queue.push(task);
962
969
  };
963
- queue = async.queue(function(task, queueCB){
970
+ queue = async.queue(function(task, queueCB) {
964
971
  // if the queue has already errored, just callback immediately
965
- if(info.error){
972
+ if (info.error) {
966
973
  return process.nextTick(queueCB);
967
974
  }
968
975
  self.request({
@@ -974,14 +981,14 @@ const b2CloudStorage = class {
974
981
  partNumber: task.part,
975
982
  range: `bytes=${task.start}-${task.end}`,
976
983
  },
977
- }, function(err, results){
978
- if(err){
984
+ }, function(err, results) {
985
+ if (err) {
979
986
  // if upload fails, error if exceeded max attempts, else requeue
980
- if(task.attempts > self.maxPartAttempts || info.totalErrors > self.maxTotalErrors){
987
+ info.totalErrors++;
988
+ if (task.attempts > self.maxPartAttempts || info.totalErrors >= self.maxTotalErrors) {
981
989
  info.error = err;
982
990
  return queueCB(err);
983
991
  }
984
- info.totalErrors++;
985
992
  reQueue(task);
986
993
  return queueCB();
987
994
  }
@@ -992,20 +999,20 @@ const b2CloudStorage = class {
992
999
  }, self.maxCopyWorkers);
993
1000
 
994
1001
  // callback when queue has completed
995
- queue.drain(function(){
1002
+ queue.drain(function() {
996
1003
  clearInterval(interval);
997
- if(info.error){
1004
+ if (info.error) {
998
1005
  return cb();
999
1006
  }
1000
1007
  info.partSha1Array = [];
1001
1008
  let i = 1;
1002
- while(i <= info.lastPart){
1009
+ while (i <= info.lastPart) {
1003
1010
  info.partSha1Array.push(info.shaParts[i++]);
1004
1011
  }
1005
1012
  return cb();
1006
1013
  });
1007
- interval = setInterval(function(){
1008
- if(!data.onUploadProgress || typeof(data.onUploadProgress) !== 'function'){
1014
+ interval = setInterval(function() {
1015
+ if (!data.onUploadProgress || typeof(data.onUploadProgress) !== 'function') {
1009
1016
  return;
1010
1017
  }
1011
1018
  const percent = Math.floor((info.totalCopied / data.size) * 100);
@@ -1018,13 +1025,13 @@ const b2CloudStorage = class {
1018
1025
 
1019
1026
  queue.push(info.chunks);
1020
1027
  },
1021
- function(cb){
1022
- if(interval){
1028
+ function(cb) {
1029
+ if (interval) {
1023
1030
  clearInterval(interval);
1024
1031
  }
1025
1032
 
1026
1033
  // cleanup large file upload if error occurred
1027
- if(!info.error){
1034
+ if (!info.error) {
1028
1035
  return cb();
1029
1036
  }
1030
1037
 
@@ -1036,8 +1043,8 @@ const b2CloudStorage = class {
1036
1043
  },
1037
1044
  }, cb);
1038
1045
  },
1039
- function(cb){
1040
- if(info.error){
1046
+ function(cb) {
1047
+ if (info.error) {
1041
1048
  return cb(info.error);
1042
1049
  }
1043
1050
  self.request({
@@ -1047,34 +1054,34 @@ const b2CloudStorage = class {
1047
1054
  fileId: info.fileId,
1048
1055
  partSha1Array: info.partSha1Array,
1049
1056
  },
1050
- }, function(err, results){
1051
- if(err){
1057
+ }, function(err, results) {
1058
+ if (err) {
1052
1059
  return cb(err);
1053
1060
  }
1054
1061
  info.returnData = results;
1055
1062
  return cb();
1056
1063
  });
1057
1064
  },
1058
- ], function(err){
1059
- if(interval){
1065
+ ], function(err) {
1066
+ if (interval) {
1060
1067
  clearInterval(interval);
1061
1068
  }
1062
- if(err || info.error){
1069
+ if (err || info.error) {
1063
1070
  return callback(err || info.error);
1064
1071
  }
1065
1072
  return callback(null, info.returnData);
1066
1073
  });
1067
1074
 
1068
1075
  return {
1069
- cancel: function(){
1076
+ cancel: function() {
1070
1077
  info.error = new Error('B2 upload canceled');
1071
1078
  // TODO: cancel all concurrent copy part requests
1072
1079
  },
1073
- progress: function(){
1080
+ progress: function() {
1074
1081
  return info.progress;
1075
1082
  },
1076
- info: function(){
1077
- if(info.returnData){
1083
+ info: function() {
1084
+ if (info.returnData) {
1078
1085
  return info.returnData;
1079
1086
  }
1080
1087
  return {
@@ -1091,7 +1098,7 @@ const b2CloudStorage = class {
1091
1098
  * @param {Object} data Configuration data passed from the `uploadFile` method.
1092
1099
  * @param {Function} [callback]
1093
1100
  */
1094
- uploadFileSmall(filename, data, callback = function(){}){
1101
+ uploadFileSmall(filename, data, callback = function() {}) {
1095
1102
  let req = null;
1096
1103
  const info = {};
1097
1104
  let attempts = 0;
@@ -1103,7 +1110,7 @@ const b2CloudStorage = class {
1103
1110
  bucketId: data.bucketId,
1104
1111
  },
1105
1112
  }, (err, results) => {
1106
- if(err){
1113
+ if (err) {
1107
1114
  return callback(err);
1108
1115
  }
1109
1116
  const requestData = {
@@ -1116,43 +1123,43 @@ const b2CloudStorage = class {
1116
1123
  'Content-Type': data.contentType,
1117
1124
  'Content-Length': data.size,
1118
1125
  'X-Bz-File-Name': data.fileName,
1119
- 'X-Bz-Content-Sha1': data.hash,
1126
+ 'X-Bz-Content-Sha1': data.hash === false ? 'do_not_verify' : data.hash,
1120
1127
  },
1121
1128
  body: fs.createReadStream(filename),
1122
1129
  };
1123
- if(data.testMode){
1130
+ if (data.testMode) {
1124
1131
  requestData.headers['X-Bz-Test-Mode'] = data.testMode;
1125
1132
  }
1126
- data.info = _.defaults({
1127
- 'hash_sha1': data.hash,
1128
- }, data.info, {
1129
- 'src_last_modified_millis': data.stat.mtime.getTime(),
1133
+ data.info = _.defaults(data.hash ? {
1134
+ hash_sha1: data.hash,
1135
+ } : {}, data.info, {
1136
+ src_last_modified_millis: data.stat.mtime.getTime(),
1130
1137
  });
1131
- _.each(data.info || {}, function(value, key){
1138
+ _.each(data.info || {}, function(value, key) {
1132
1139
  requestData.headers['X-Bz-Info-' + key] = value;
1133
1140
  });
1134
1141
  data.info = _.mapValues(data.info, _.toString);
1135
1142
 
1136
1143
  let interval = null;
1137
1144
  callback = _.once(callback);
1138
- req = this.request(requestData, function(err, results, res){
1145
+ req = this.request(requestData, function(err, results, res) {
1139
1146
  attempts++;
1140
- if(err){
1141
- if(attempts > data.maxPartAttempts || attempts > data.maxTotalErrors){
1147
+ if (err) {
1148
+ if (attempts > data.maxPartAttempts || attempts > data.maxTotalErrors) {
1142
1149
  return callback(new Error('Exceeded max retry attempts for upload'));
1143
1150
  }
1144
1151
  // handle connection failures that should trigger a retry (https://www.backblaze.com/b2/docs/integration_checklist.html)
1145
- if(err.code === 'EPIPE' || err.code === 'ETIMEDOUT' || err.code === 'ESOCKETTIMEDOUT'){
1152
+ if (err.code === 'EPIPE' || err.code === 'ETIMEDOUT' || err.code === 'ESOCKETTIMEDOUT') {
1146
1153
  return upload();
1147
1154
  }
1148
1155
  // handle status codes that should trigger a retry (https://www.backblaze.com/b2/docs/integration_checklist.html)
1149
- if(res && (res.statusCode === 408 || (res.statusCode >= 500 && res.statusCode <= 599))){
1156
+ if (res && (res.statusCode === 408 || (res.statusCode >= 500 && res.statusCode <= 599))) {
1150
1157
  return upload();
1151
1158
  }
1152
1159
  return callback(err);
1153
1160
  }
1154
1161
  info.returnData = results;
1155
- if(data.onFileId && typeof(data.onFileId) === 'function'){
1162
+ if (data.onFileId && typeof(data.onFileId) === 'function') {
1156
1163
  data.onFileId(results.fileId);
1157
1164
  }
1158
1165
  return callback(null, results);
@@ -1160,16 +1167,17 @@ const b2CloudStorage = class {
1160
1167
  clearInterval(interval);
1161
1168
  }).on('error', () => {
1162
1169
  clearInterval(interval);
1163
- }).on('abort', () => {
1164
- clearInterval(interval);
1165
- return callback(new Error('B2 upload canceled'));
1166
- });
1167
- interval = setInterval(function(){
1168
- if(!data.onUploadProgress || typeof(data.onUploadProgress) !== 'function'){
1170
+ })
1171
+ .on('abort', () => {
1172
+ clearInterval(interval);
1173
+ return callback(new Error('B2 upload canceled'));
1174
+ });
1175
+ interval = setInterval(function() {
1176
+ if (!data.onUploadProgress || typeof(data.onUploadProgress) !== 'function') {
1169
1177
  return;
1170
1178
  }
1171
1179
  let bytesDispatched = 0;
1172
- if(req.req && req.req.connection && req.req.connection._bytesDispatched){
1180
+ if (req.req && req.req.connection && req.req.connection._bytesDispatched) {
1173
1181
  bytesDispatched = req.req.connection._bytesDispatched;
1174
1182
  }
1175
1183
  const percent = Math.floor((bytesDispatched / data.size) * 100);
@@ -1184,15 +1192,15 @@ const b2CloudStorage = class {
1184
1192
  };
1185
1193
  upload();
1186
1194
  return {
1187
- cancel: function(){
1188
- if(req && req.abort){
1195
+ cancel: function() {
1196
+ if (req && req.abort) {
1189
1197
  req.abort();
1190
1198
  }
1191
1199
  },
1192
- progress: function(){
1200
+ progress: function() {
1193
1201
  return info.progress;
1194
1202
  },
1195
- info: function(){
1203
+ info: function() {
1196
1204
  return info.returnData;
1197
1205
  },
1198
1206
  };
@@ -1200,15 +1208,16 @@ const b2CloudStorage = class {
1200
1208
 
1201
1209
  /**
1202
1210
  * Helper method: Uploads a large file as several parts
1203
- * This method will split the large files into several chunks & sha1 hash each part.
1204
- * These chunks are uploaded in parallel to B2 and will retry on fail.
1211
+ * This method will split the large file into several chunks, uploading them in parallel to B2.
1212
+ * Each part's sha1 hash is computed inline during upload via a transform stream and verified against B2's response.
1213
+ * Parts will retry on failure.
1205
1214
  * @private
1206
1215
  * @param {String} filename Path to filename for upload.
1207
1216
  * @param {Object} data Configuration data passed from the `uploadFile` method.
1208
1217
  * @param {Function} [callback]
1209
1218
  */
1210
- uploadFileLarge(filename, data, callback = function(){}){
1211
- if(!this.authData){
1219
+ uploadFileLarge(filename, data, callback = function() {}) {
1220
+ if (!this.authData) {
1212
1221
  return callback(new Error('Not authenticated. Did you forget to call authorize()?'));
1213
1222
  }
1214
1223
  const self = this;
@@ -1224,17 +1233,17 @@ const b2CloudStorage = class {
1224
1233
  };
1225
1234
  // TODO: handle update callbacks
1226
1235
 
1227
- data.limit = data.limit || 4; // todo: calculate / dynamic or something
1236
+ data.limit = data.limit || self.defaultUploadConcurrency;
1228
1237
 
1229
- const generateUploadURL = function(num, callback){
1238
+ const generateUploadURL = function(num, callback) {
1230
1239
  self.request({
1231
1240
  url: 'b2_get_upload_part_url',
1232
1241
  method: 'POST',
1233
1242
  json: {
1234
1243
  fileId: info.fileId,
1235
1244
  },
1236
- }, function(err, results){
1237
- if(err){
1245
+ }, function(err, results) {
1246
+ if (err) {
1238
1247
  return callback(err);
1239
1248
  }
1240
1249
  info.upload_urls[num] = {
@@ -1247,28 +1256,28 @@ const b2CloudStorage = class {
1247
1256
  };
1248
1257
  let interval = null;
1249
1258
  async.series([
1250
- function(cb){
1251
- if(!data.largeFileId){
1259
+ function(cb) {
1260
+ if (!data.largeFileId) {
1252
1261
  return cb();
1253
1262
  }
1254
1263
  // resuming a file upload
1255
1264
  const parts = {};
1256
1265
  let startPartNumber = 0;
1257
1266
  let validFileId = false;
1258
- async.whilst(function(wcb){
1267
+ async.whilst(function(wcb) {
1259
1268
  return wcb(null, startPartNumber !== null);
1260
- }, function(wcb){
1269
+ }, function(wcb) {
1261
1270
  const partsData = {
1262
1271
  fileId: data.largeFileId,
1263
1272
  maxPartCount: 1000,
1264
1273
  };
1265
- if(startPartNumber){
1274
+ if (startPartNumber) {
1266
1275
  partsData.startPartNumber = startPartNumber;
1267
1276
  }
1268
- self.listParts(partsData, function(err, results){
1269
- if(err){
1277
+ self.listParts(partsData, function(err, results) {
1278
+ if (err) {
1270
1279
  // failed to find the fileId or invalid fileId
1271
- if(results.status === 400 && data.ignoreFileIdError){
1280
+ if (results.status === 400 && data.ignoreFileIdError) {
1272
1281
  startPartNumber = null;
1273
1282
  return wcb();
1274
1283
  }
@@ -1277,14 +1286,14 @@ const b2CloudStorage = class {
1277
1286
  validFileId = true;
1278
1287
  startPartNumber = results.nextPartNumber; // will return null or the next number
1279
1288
  let partTrack = 1;
1280
- _.each(results.parts, function(part){
1281
- if(info.lastUploadedPart < part.partNumber){
1289
+ _.each(results.parts, function(part) {
1290
+ if (info.lastUploadedPart < part.partNumber) {
1282
1291
  info.lastUploadedPart = part.partNumber;
1283
1292
  }
1284
- if(partTrack !== part.partNumber){
1293
+ if (partTrack !== part.partNumber) {
1285
1294
  return;
1286
1295
  } // ignore gaps in upload, TODO: check for order?
1287
- if(info.lastConsecutivePart < part.partNumber){
1296
+ if (info.lastConsecutivePart < part.partNumber) {
1288
1297
  info.lastConsecutivePart = part.partNumber;
1289
1298
  }
1290
1299
  parts[part.partNumber] = part.contentLength;
@@ -1293,14 +1302,14 @@ const b2CloudStorage = class {
1293
1302
  });
1294
1303
  return wcb();
1295
1304
  });
1296
- }, function(err){
1297
- if(err){
1305
+ }, function(err) {
1306
+ if (err) {
1298
1307
  // TODO detect when invalid file ID, don't error
1299
1308
  return cb(err);
1300
1309
  }
1301
- if(validFileId){
1310
+ if (validFileId) {
1302
1311
  info.fileId = data.largeFileId;
1303
- if(data.onFileId && typeof(data.onFileId) === 'function'){
1312
+ if (data.onFileId && typeof(data.onFileId) === 'function') {
1304
1313
  data.onFileId(info.fileId);
1305
1314
  }
1306
1315
  info.uploadedParts = parts;
@@ -1309,10 +1318,13 @@ const b2CloudStorage = class {
1309
1318
  return cb();
1310
1319
  });
1311
1320
  },
1312
- function(cb){
1321
+ function(cb) {
1313
1322
  // check our parts
1314
- // todo: maybe tweak recommendedPartSize if the total number of chunks exceeds the total backblaze limit (10000)
1315
- const partSize = data.partSize || self.authData.recommendedPartSize;
1323
+ let partSize = data.partSize || self.authData.recommendedPartSize;
1324
+ const minPartSize = Math.ceil(data.size / 10000);
1325
+ if (minPartSize > partSize) {
1326
+ partSize = minPartSize;
1327
+ }
1316
1328
 
1317
1329
  // track the current chunk
1318
1330
  const partTemplate = {
@@ -1325,29 +1337,29 @@ const b2CloudStorage = class {
1325
1337
  info.chunks = [];
1326
1338
  info.lastPart = 1;
1327
1339
  let chunkError = null;
1328
- while(!chunkError && data.size > partTemplate.end){
1340
+ while (!chunkError && data.size > partTemplate.end) {
1329
1341
  partTemplate.part++;
1330
1342
 
1331
1343
  let currentPartSize = partSize; // default to recommended size
1332
1344
  // check previously uploaded parts
1333
- if(info.uploadedParts[partTemplate.part]){
1345
+ if (info.uploadedParts[partTemplate.part]) {
1334
1346
  currentPartSize = info.uploadedParts[partTemplate.part];
1335
1347
  }
1336
1348
  // calculates at least how big each chunk has to be to fit into the chunks previously uploaded
1337
1349
  // we don't know the start/end of those chunks and they MUST be overwritten
1338
- if(partTemplate.part > info.lastConsecutivePart && partTemplate.part < info.lastUploadedPart){
1339
- if(!info.missingPartSize){
1350
+ if (partTemplate.part > info.lastConsecutivePart && partTemplate.part < info.lastUploadedPart) {
1351
+ if (!info.missingPartSize) {
1340
1352
  const accountedForParts = partTemplate.end + 1; // last uploaded part
1341
1353
  info.missingPartSize = Math.ceil((data.size - accountedForParts) / (info.lastUploadedPart - info.lastConsecutivePart));
1342
1354
  // if this exceeds the recommended size, we can lower the part size and write more chunks after the
1343
1355
  // higher number of chunks previously uploaded
1344
- if(info.missingPartSize > partSize){
1356
+ if (info.missingPartSize > partSize) {
1345
1357
  info.missingPartSize = partSize;
1346
1358
  }
1347
1359
  }
1348
1360
  currentPartSize = info.missingPartSize;
1349
1361
  }
1350
- if(currentPartSize <= 0){
1362
+ if (currentPartSize <= 0) {
1351
1363
  chunkError = new Error('B2 part size cannot be zero');
1352
1364
  chunkError.chunk = partTemplate;
1353
1365
  break;
@@ -1355,42 +1367,42 @@ const b2CloudStorage = class {
1355
1367
 
1356
1368
  partTemplate.end += currentPartSize; // minus 1 to prevent overlapping chunks
1357
1369
  // check for end of file, adjust part size
1358
- if(partTemplate.end + 1 >= data.size){
1370
+ if (partTemplate.end + 1 >= data.size) {
1359
1371
  // calculate the part size with the remainder
1360
1372
  // started with -1, so needs to be padded to prevent off by 1 errors
1361
1373
  currentPartSize = currentPartSize - (partTemplate.end + 1 - data.size);
1362
- partTemplate.end = data.size;
1374
+ partTemplate.end = data.size - 1;
1363
1375
  }
1364
1376
  partTemplate.start += partTemplate.size; // last part size
1365
1377
  partTemplate.size = currentPartSize;
1366
- if(partTemplate.part === 1){
1378
+ if (partTemplate.part === 1) {
1367
1379
  partTemplate.start = 0;
1368
1380
  }
1369
- if(partTemplate.size > partSize){
1381
+ if (partTemplate.size > partSize) {
1370
1382
  chunkError = new Error('B2 part size overflows maximum recommended chunk to resume upload.');
1371
1383
  chunkError.chunk = partTemplate;
1372
1384
  break;
1373
1385
  }
1374
- if(info.lastPart < partTemplate.part){
1386
+ if (info.lastPart < partTemplate.part) {
1375
1387
  info.lastPart = partTemplate.part;
1376
1388
  }
1377
1389
  info.chunks.push(_.clone(partTemplate));
1378
1390
  }
1379
- return process.nextTick(function(){
1380
- if(chunkError){
1391
+ return process.nextTick(function() {
1392
+ if (chunkError) {
1381
1393
  return cb(chunkError);
1382
1394
  }
1383
1395
  return cb();
1384
1396
  });
1385
1397
  },
1386
- function(cb){
1387
- if(info.fileId){
1398
+ function(cb) {
1399
+ if (info.fileId) {
1388
1400
  return cb();
1389
1401
  }
1390
- let fileInfo = _.defaults({
1402
+ let fileInfo = _.defaults(data.hash ? {
1391
1403
  large_file_sha1: data.hash,
1392
1404
  hash_sha1: data.hash,
1393
- }, data.info, {
1405
+ } : {}, data.info, {
1394
1406
  src_last_modified_millis: data.stat.mtime.getTime(),
1395
1407
  });
1396
1408
  fileInfo = _.mapValues(fileInfo, _.toString);
@@ -1404,39 +1416,41 @@ const b2CloudStorage = class {
1404
1416
  fileInfo: fileInfo,
1405
1417
  },
1406
1418
  }, (err, results) => {
1407
- if(err){
1419
+ if (err) {
1408
1420
  return cb(err);
1409
1421
  }
1410
1422
  info.fileId = results.fileId;
1411
- if(data.onFileId && typeof(data.onFileId) === 'function'){
1423
+ if (data.onFileId && typeof(data.onFileId) === 'function') {
1412
1424
  data.onFileId(info.fileId);
1413
1425
  }
1414
1426
  return cb();
1415
1427
  });
1416
1428
  },
1417
- function(cb){
1418
- async.times(data.limit, function(num, next){
1429
+ function(cb) {
1430
+ async.times(data.limit, function(num, next) {
1419
1431
  return generateUploadURL(num, next);
1420
1432
  }, cb);
1421
1433
  },
1422
- function(cb){
1434
+ function(cb) {
1423
1435
  info.totalUploaded = 0;
1424
1436
 
1425
1437
  let queue = null; // initialise queue to avoid no-use-before-define eslint error
1426
- const reQueue = function(task, incrementCount = true){
1427
- if(incrementCount){
1438
+ const reQueue = function(task, incrementCount = true) {
1439
+ if (incrementCount) {
1428
1440
  task.attempts++;
1429
1441
  }
1430
1442
  queue.push(task);
1431
1443
  };
1432
- queue = async.queue(function(task, queueCB){
1444
+ queue = async.queue(function(task, queueCB) {
1445
+ queueCB = _.once(queueCB);
1446
+
1433
1447
  // if the queue has already errored, just callback immediately
1434
- if(info.error){
1448
+ if (info.error) {
1435
1449
  return process.nextTick(queueCB);
1436
1450
  }
1437
1451
 
1438
1452
  // check for previously uploaded
1439
- if(info.uploadedParts[task.part]){
1453
+ if (info.uploadedParts[task.part]) {
1440
1454
  // already uploaded
1441
1455
  info.totalUploaded += task.size;
1442
1456
  return process.nextTick(queueCB);
@@ -1446,122 +1460,168 @@ const b2CloudStorage = class {
1446
1460
  // re-queue if no url found (shouldn't ever happen)
1447
1461
  let url = null;
1448
1462
  let urlIndex = null;
1449
- for(const key in info.upload_urls){
1450
- if(url){ break; }
1451
- if(info.upload_urls[key].in_use === false){
1463
+ for (const key in info.upload_urls) {
1464
+ if (url) { break; }
1465
+ if (info.upload_urls[key].in_use === false) {
1452
1466
  url = info.upload_urls[key];
1453
1467
  urlIndex = key;
1454
1468
  }
1455
1469
  }
1456
- if(!urlIndex || !url){
1470
+ if (!urlIndex || !url) {
1457
1471
  return reQueue(task, false);
1458
1472
  }
1459
1473
  url.in_use = true;
1460
-
1461
- // create file hash stream
1462
- const hashStream = fs.createReadStream(filename, {
1474
+ url.request = null;
1475
+
1476
+ // single-read: hash the part data while streaming the upload, then verify against B2's response
1477
+ const sha1 = crypto.createHash('sha1');
1478
+ const hashTransform = new Transform({
1479
+ transform(chunk, encoding, cb) {
1480
+ sha1.update(chunk);
1481
+ cb(null, chunk);
1482
+ },
1483
+ });
1484
+ const fileStream = fs.createReadStream(filename, {
1463
1485
  start: task.start,
1464
1486
  end: task.end,
1465
1487
  encoding: null,
1466
1488
  });
1467
1489
 
1468
- // get hash
1469
- self.getHash(hashStream, function(err, hash){
1470
- // if hash fails, error if exceeded max attempts, else requeue
1471
- if(err){
1472
- url.in_use = false;
1473
- if(task.attempts > self.maxPartAttempts || info.totalErrors > self.maxTotalErrors){
1474
- info.error = err;
1475
- return queueCB(err);
1476
- }
1477
- info.totalErrors++;
1478
- reQueue(task);
1479
- return queueCB();
1490
+ let streamErrorHandled = false;
1491
+ const cleanupStreams = function() {
1492
+ fileStream.destroy();
1493
+ hashTransform.destroy();
1494
+ if (url.request && url.request.abort) {
1495
+ url.request.abort();
1480
1496
  }
1497
+ url.in_use = false;
1498
+ url.request = null;
1499
+ };
1481
1500
 
1482
- // create file stream for upload
1483
- const fileStream = fs.createReadStream(filename, {
1484
- start: task.start,
1485
- end: task.end,
1486
- encoding: null,
1487
- });
1488
- queueCB = _.once(queueCB);
1489
- const reqOptions = {
1490
- apiUrl: url.uploadUrl,
1491
- appendPath: false,
1492
- method: 'POST',
1493
- json: false,
1494
- headers: {
1495
- 'Authorization': url.authorizationToken,
1496
- 'X-Bz-Part-Number': task.part,
1497
- 'X-Bz-Content-Sha1': hash,
1498
- 'Content-Length': task.size,
1499
- },
1500
- body: fileStream,
1501
+ const handleStreamError = function(err) {
1502
+ if (streamErrorHandled) { return; }
1503
+ streamErrorHandled = true;
1504
+ cleanupStreams();
1505
+ info.totalErrors++;
1506
+ if (task.attempts > self.maxPartAttempts || info.totalErrors >= self.maxTotalErrors) {
1507
+ info.error = err;
1508
+ return queueCB(err);
1509
+ }
1510
+ reQueue(task);
1511
+ return queueCB();
1512
+ };
1513
+
1514
+ fileStream.on('error', handleStreamError);
1515
+ hashTransform.on('error', handleStreamError);
1516
+ fileStream.pipe(hashTransform);
1517
+
1518
+ const reqOptions = {
1519
+ apiUrl: url.uploadUrl,
1520
+ appendPath: false,
1521
+ method: 'POST',
1522
+ json: false,
1523
+ headers: {
1524
+ 'Authorization': url.authorizationToken,
1525
+ 'X-Bz-Part-Number': task.part,
1526
+ 'X-Bz-Content-Sha1': 'do_not_verify',
1527
+ 'Content-Length': task.size,
1528
+ },
1529
+ body: hashTransform,
1530
+ };
1531
+ if (data.testMode) {
1532
+ reqOptions.headers['X-Bz-Test-Mode'] = data.testMode;
1533
+ }
1534
+ url.request = self.request(reqOptions, function(err, body, res) {
1535
+ // release upload url
1536
+ url.in_use = false;
1537
+ url.request = null;
1538
+
1539
+ const retry = function() {
1540
+ return generateUploadURL(urlIndex, function(err) {
1541
+ // if we're unable to get an upload URL from B2, we can't attempt to retry
1542
+ if (err) { return queueCB(err); }
1543
+ reQueue(task);
1544
+ return queueCB();
1545
+ });
1501
1546
  };
1502
- if(data.testMode){
1503
- reqOptions.headers['X-Bz-Test-Mode'] = data.testMode;
1547
+ // if upload fails, error if exceeded max attempts, else requeue
1548
+ if (err) {
1549
+ if (!streamErrorHandled) {
1550
+ info.totalErrors++;
1551
+ }
1552
+ // fail immediately if max errors exceeded
1553
+ if (task.attempts > self.maxPartAttempts || info.totalErrors >= self.maxTotalErrors) {
1554
+ info.error = err;
1555
+ return queueCB(err);
1556
+ }
1557
+ // handle connection failures that should trigger a retry (https://www.backblaze.com/b2/docs/integration_checklist.html)
1558
+ if (err.code === 'EPIPE' || err.code === 'ETIMEDOUT' || err.code === 'ESOCKETTIMEDOUT') {
1559
+ return retry();
1560
+ }
1561
+ // handle status codes that should trigger a retry (https://www.backblaze.com/b2/docs/integration_checklist.html)
1562
+ if (res && (res.statusCode === 408 || (res.statusCode >= 500 && res.statusCode <= 599))) {
1563
+ return retry();
1564
+ }
1565
+ return queueCB(err);
1504
1566
  }
1505
- url.request = self.request(reqOptions, function(err, body, res){
1506
- // release upload url
1507
- url.in_use = false;
1508
- url.request = null;
1509
-
1510
- const retry = function(){
1511
- return generateUploadURL(urlIndex, function(err){
1512
- // if we're unable to get an upload URL from B2, we can't attempt to retry
1513
- if(err){ return queueCB(err); }
1514
- reQueue(task);
1515
- return queueCB();
1516
- });
1517
- };
1518
- // if upload fails, error if exceeded max attempts, else requeue
1519
- if(err){
1520
- // handle connection failures that should trigger a retry (https://www.backblaze.com/b2/docs/integration_checklist.html)
1567
+ // verify locally computed hash matches B2's response
1568
+ if (typeof body === 'string') {
1569
+ try {
1570
+ body = JSON.parse(body);
1571
+ } catch {
1521
1572
  info.totalErrors++;
1522
- if(err.code === 'EPIPE' || err.code === 'ETIMEDOUT' || err.code === 'ESOCKETTIMEDOUT'){
1523
- return retry();
1524
- }
1525
- // handle status codes that should trigger a retry (https://www.backblaze.com/b2/docs/integration_checklist.html)
1526
- if(res && (res.statusCode === 408 || (res.statusCode >= 500 && res.statusCode <= 599))){
1527
- return retry();
1528
- }
1529
- // push back to queue
1530
- if(task.attempts > self.maxPartAttempts || info.totalErrors > self.maxTotalErrors){
1531
- info.error = err;
1532
- return queueCB(err);
1573
+ const parseErr = new Error('Failed to parse B2 upload response as JSON');
1574
+ if (task.attempts > self.maxPartAttempts || info.totalErrors >= self.maxTotalErrors) {
1575
+ info.error = parseErr;
1576
+ return queueCB(parseErr);
1533
1577
  }
1534
- return queueCB(err);
1578
+ reQueue(task);
1579
+ return queueCB();
1535
1580
  }
1536
- info.shaParts[task.part] = hash;
1537
- info.totalUploaded += task.size;
1581
+ }
1582
+ const localHash = sha1.digest('hex');
1583
+ const remoteHash = body && body.contentSha1;
1584
+ if (!remoteHash || (remoteHash !== 'do_not_verify' && remoteHash !== localHash)) {
1585
+ info.totalErrors++;
1586
+ if (task.attempts > self.maxPartAttempts || info.totalErrors >= self.maxTotalErrors) {
1587
+ const hashErr = !remoteHash
1588
+ ? new Error('B2 response missing contentSha1 for hash verification')
1589
+ : new Error('SHA1 mismatch: local ' + localHash + ' != remote ' + remoteHash);
1590
+ info.error = hashErr;
1591
+ return queueCB(hashErr);
1592
+ }
1593
+ reQueue(task);
1538
1594
  return queueCB();
1539
- }).on('error', () => {
1540
- // do nothing. Error is handled by callback above, but we need(?) to catch this to prevent it throwing
1541
- }).on('abort', () => queueCB());
1542
- });
1595
+ }
1596
+ info.shaParts[task.part] = localHash;
1597
+ info.totalUploaded += task.size;
1598
+ return queueCB();
1599
+ }).on('error', () => {
1600
+ // Error is handled by the request callback with proper retry logic.
1601
+ // This handler only prevents unhandled 'error' event crashes.
1602
+ }).on('abort', () => queueCB());
1543
1603
  }, _.size(info.upload_urls));
1544
1604
 
1545
1605
  // callback when queue has completed
1546
- queue.drain(function(){
1606
+ queue.drain(function() {
1547
1607
  clearInterval(interval);
1548
- if(info.error){
1608
+ if (info.error) {
1549
1609
  return cb();
1550
1610
  }
1551
1611
  info.partSha1Array = [];
1552
1612
  let i = 1;
1553
- while(i <= info.lastPart){
1613
+ while (i <= info.lastPart) {
1554
1614
  info.partSha1Array.push(info.shaParts[i++]);
1555
1615
  }
1556
1616
  return cb();
1557
1617
  });
1558
- interval = setInterval(function(){
1559
- if(!data.onUploadProgress || typeof(data.onUploadProgress) !== 'function'){
1618
+ interval = setInterval(function() {
1619
+ if (!data.onUploadProgress || typeof(data.onUploadProgress) !== 'function') {
1560
1620
  return;
1561
1621
  }
1562
1622
  let bytesDispatched = 0;
1563
- bytesDispatched = _.sumBy(Object.values(info.upload_urls), function(url){
1564
- if(url && url.request && url.request.req && url.request.req.connection && url.request.req.connection._bytesDispatched){
1623
+ bytesDispatched = _.sumBy(Object.values(info.upload_urls), function(url) {
1624
+ if (url && url.request && url.request.req && url.request.req.connection && url.request.req.connection._bytesDispatched) {
1565
1625
  return url.request.req.connection._bytesDispatched;
1566
1626
  }
1567
1627
  return 0;
@@ -1577,13 +1637,13 @@ const b2CloudStorage = class {
1577
1637
 
1578
1638
  queue.push(info.chunks);
1579
1639
  },
1580
- function(cb){
1581
- if(interval){
1640
+ function(cb) {
1641
+ if (interval) {
1582
1642
  clearInterval(interval);
1583
1643
  }
1584
1644
 
1585
1645
  // cleanup large file upload if error occurred
1586
- if(!info.error){
1646
+ if (!info.error) {
1587
1647
  return cb();
1588
1648
  }
1589
1649
 
@@ -1595,8 +1655,8 @@ const b2CloudStorage = class {
1595
1655
  },
1596
1656
  }, cb);
1597
1657
  },
1598
- function(cb){
1599
- if(info.error){
1658
+ function(cb) {
1659
+ if (info.error) {
1600
1660
  return cb(info.error);
1601
1661
  }
1602
1662
  self.request({
@@ -1606,37 +1666,37 @@ const b2CloudStorage = class {
1606
1666
  fileId: info.fileId,
1607
1667
  partSha1Array: info.partSha1Array,
1608
1668
  },
1609
- }, function(err, results){
1610
- if(err){
1669
+ }, function(err, results) {
1670
+ if (err) {
1611
1671
  return cb(err);
1612
1672
  }
1613
1673
  info.returnData = results;
1614
1674
  return cb();
1615
1675
  });
1616
1676
  },
1617
- ], function(err){
1618
- if(interval){
1677
+ ], function(err) {
1678
+ if (interval) {
1619
1679
  clearInterval(interval);
1620
1680
  }
1621
- if(err || info.error){
1681
+ if (err || info.error) {
1622
1682
  return callback(err || info.error);
1623
1683
  }
1624
1684
  return callback(null, info.returnData);
1625
1685
  });
1626
1686
  return {
1627
- cancel: function(){
1687
+ cancel: function() {
1628
1688
  info.error = new Error('B2 upload canceled');
1629
- _.each(info.upload_urls, function(url){
1630
- if(url.request && url.request.abort){
1689
+ _.each(info.upload_urls, function(url) {
1690
+ if (url.request && url.request.abort) {
1631
1691
  url.request.abort();
1632
1692
  }
1633
1693
  });
1634
1694
  },
1635
- progress: function(){
1695
+ progress: function() {
1636
1696
  return info.progress;
1637
1697
  },
1638
- info: function(){
1639
- if(info.returnData){
1698
+ info: function() {
1699
+ if (info.returnData) {
1640
1700
  return info.returnData;
1641
1701
  }
1642
1702
  return {
@@ -1647,4 +1707,4 @@ const b2CloudStorage = class {
1647
1707
  }
1648
1708
  };
1649
1709
 
1650
- module.exports = b2CloudStorage;
1710
+ module.exports = b2CloudStorage;