files.com 1.0.413 → 1.0.415

Sign up to get free protection for your applications and to get access to all the features.
package/_VERSION CHANGED
@@ -1 +1 @@
1
- 1.0.413
1
+ 1.0.415
package/lib/Api.js CHANGED
@@ -28,11 +28,14 @@ var _fetchWithTimeout = function _fetchWithTimeout(url) {
28
28
  var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {},
29
29
  timeoutSecs = _ref.timeoutSecs,
30
30
  options = (0, _objectWithoutProperties2.default)(_ref, _excluded);
31
+ var timeoutId;
31
32
  return timeoutSecs <= 0 ? (0, _crossFetch.default)(url, options) : Promise.race([(0, _crossFetch.default)(url, options), new Promise(function (_, reject) {
32
- setTimeout(function () {
33
+ timeoutId = setTimeout(function () {
33
34
  return reject(new errors.FilesError('Request timed out'));
34
35
  }, timeoutSecs * 1000);
35
- })]);
36
+ })]).finally(function () {
37
+ return clearTimeout(timeoutId);
38
+ });
36
39
  };
37
40
  var fetchWithRetry = /*#__PURE__*/function () {
38
41
  var _ref2 = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee(url, options) {
package/lib/Files.js CHANGED
@@ -11,7 +11,7 @@ var endpointPrefix = '/api/rest/v1';
11
11
  var apiKey;
12
12
  var baseUrl = 'https://app.files.com';
13
13
  var sessionId = null;
14
- var version = "1.0.413";
14
+ var version = "1.0.415";
15
15
  var userAgent = "Files.com JavaScript SDK v".concat(version);
16
16
  var logLevel = _Logger.LogLevel.INFO;
17
17
  var debugRequest = false;
@@ -936,118 +936,165 @@ _class = File;
936
936
  var chunks = [];
937
937
  var length = 0;
938
938
  var concurrentUploads = [];
939
+ var chunkBuffer = null;
940
+ var streamEnded = false;
941
+ var handleStreamEnd = /*#__PURE__*/function () {
942
+ var _ref19 = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee15() {
943
+ var _options$getAgentForU, buffer, nextFileUploadPart, upload_uri, agent, response, createdFile;
944
+ return _regenerator.default.wrap(function _callee15$(_context15) {
945
+ while (1) switch (_context15.prev = _context15.next) {
946
+ case 0:
947
+ if (!(chunkBuffer !== null || !streamEnded)) {
948
+ _context15.next = 2;
949
+ break;
950
+ }
951
+ return _context15.abrupt("return");
952
+ case 2:
953
+ _context15.prev = 2;
954
+ if (!(chunks.length > 0)) {
955
+ _context15.next = 11;
956
+ break;
957
+ }
958
+ buffer = _safeBuffer.Buffer.concat(chunks);
959
+ _context15.next = 7;
960
+ return _class._continueUpload(destinationPath, ++part, firstFileUploadPart, options);
961
+ case 7:
962
+ nextFileUploadPart = _context15.sent;
963
+ upload_uri = determinePartUploadUri(nextFileUploadPart); // instantiate an httpsAgent dynamically if needed
964
+ agent = ((_options$getAgentForU = options.getAgentForUrl) === null || _options$getAgentForU === void 0 ? void 0 : _options$getAgentForU.call(options, upload_uri)) || (options === null || options === void 0 ? void 0 : options.agent);
965
+ concurrentUploads.push(_Api.default.sendFilePart(upload_uri, 'PUT', buffer, {
966
+ agent: agent
967
+ }));
968
+ case 11:
969
+ _context15.next = 13;
970
+ return Promise.all(concurrentUploads);
971
+ case 13:
972
+ _context15.next = 15;
973
+ return _class._completeUpload(firstFileUploadPart, options);
974
+ case 15:
975
+ response = _context15.sent;
976
+ createdFile = new _class(response.data, options);
977
+ resolve(createdFile);
978
+ _context15.next = 23;
979
+ break;
980
+ case 20:
981
+ _context15.prev = 20;
982
+ _context15.t0 = _context15["catch"](2);
983
+ reject(_context15.t0);
984
+ case 23:
985
+ case "end":
986
+ return _context15.stop();
987
+ }
988
+ }, _callee15, null, [[2, 20]]);
989
+ }));
990
+ return function handleStreamEnd() {
991
+ return _ref19.apply(this, arguments);
992
+ };
993
+ }();
939
994
  readableStream.on('error', function (error) {
940
995
  reject(error);
941
996
  });
997
+
998
+ // note that for a network stream, each chunk is typically less than partsize * 2, but
999
+ // if a stream has been created based on very large data, it's possible for a chunk to
1000
+ // contain the entire file and we could get a single chunk with length >= partsize * 3
942
1001
  readableStream.on('data', /*#__PURE__*/function () {
943
- var _ref19 = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee15(chunk) {
944
- var nextLength, excessLength, chunkBuffer, _options$getAgentForU, tailLength, lastChunkForPart, firstChunkForNextPart, buffer, nextFileUploadPart, upload_uri, agent, uploadPromise;
945
- return _regenerator.default.wrap(function _callee15$(_context15) {
946
- while (1) switch (_context15.prev = _context15.next) {
1002
+ var _ref20 = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee16(chunk) {
1003
+ var excessLength, _options$getAgentForU2, lengthForEndOfCurrentPart, lastChunkForCurrentPart, chunkBufferAfterCurrentPart, buffer, nextFileUploadPart, upload_uri, agent, uploadPromise, isNextChunkAtLeastOnePart;
1004
+ return _regenerator.default.wrap(function _callee16$(_context16) {
1005
+ while (1) switch (_context16.prev = _context16.next) {
947
1006
  case 0:
948
- _context15.prev = 0;
949
- nextLength = length + chunk.length;
950
- excessLength = nextLength - firstFileUploadPart.partsize;
1007
+ _context16.prev = 0;
1008
+ excessLength = length + chunk.length - firstFileUploadPart.partsize;
951
1009
  chunkBuffer = _safeBuffer.Buffer.from(chunk);
952
1010
  if (!(excessLength > 0)) {
953
- _context15.next = 28;
1011
+ _context16.next = 30;
954
1012
  break;
955
1013
  }
956
1014
  readableStream.pause();
957
-
1015
+ case 5:
1016
+ if (!chunkBuffer) {
1017
+ _context16.next = 27;
1018
+ break;
1019
+ }
958
1020
  // the amount to append this last part with to make it exactly the full partsize
959
- tailLength = chunkBuffer.length - excessLength;
960
- lastChunkForPart = chunkBuffer.subarray(0, tailLength);
961
- firstChunkForNextPart = chunkBuffer.subarray(tailLength);
962
- chunks.push(lastChunkForPart);
1021
+ lengthForEndOfCurrentPart = chunkBuffer.length - excessLength;
1022
+ lastChunkForCurrentPart = chunkBuffer.subarray(0, lengthForEndOfCurrentPart);
1023
+ chunkBufferAfterCurrentPart = chunkBuffer.subarray(lengthForEndOfCurrentPart);
1024
+ chunks.push(lastChunkForCurrentPart);
963
1025
  buffer = _safeBuffer.Buffer.concat(chunks);
964
- _context15.next = 13;
1026
+ _context16.next = 13;
965
1027
  return _class._continueUpload(destinationPath, ++part, firstFileUploadPart, options);
966
1028
  case 13:
967
- nextFileUploadPart = _context15.sent;
1029
+ nextFileUploadPart = _context16.sent;
968
1030
  upload_uri = determinePartUploadUri(nextFileUploadPart); // instantiate an httpsAgent dynamically if needed
969
- agent = ((_options$getAgentForU = options.getAgentForUrl) === null || _options$getAgentForU === void 0 ? void 0 : _options$getAgentForU.call(options, upload_uri)) || (options === null || options === void 0 ? void 0 : options.agent);
1031
+ agent = ((_options$getAgentForU2 = options.getAgentForUrl) === null || _options$getAgentForU2 === void 0 ? void 0 : _options$getAgentForU2.call(options, upload_uri)) || (options === null || options === void 0 ? void 0 : options.agent);
970
1032
  uploadPromise = _Api.default.sendFilePart(upload_uri, 'PUT', buffer, {
971
1033
  agent: agent
972
1034
  });
973
1035
  if (!firstFileUploadPart.parallel_parts) {
974
- _context15.next = 21;
1036
+ _context16.next = 21;
975
1037
  break;
976
1038
  }
977
1039
  concurrentUploads.push(uploadPromise);
978
- _context15.next = 23;
1040
+ _context16.next = 23;
979
1041
  break;
980
1042
  case 21:
981
- _context15.next = 23;
1043
+ _context16.next = 23;
982
1044
  return uploadPromise;
983
1045
  case 23:
984
- chunks = [firstChunkForNextPart];
985
- length = firstChunkForNextPart.length;
1046
+ // determine if the remainder of the excess chunk data is too large to be a single part
1047
+ isNextChunkAtLeastOnePart = chunkBufferAfterCurrentPart.length >= firstFileUploadPart.partsize; // the excess data contains >= 1 full part, so we'll loop again to enqueue
1048
+ // the next part for upload and continue processing any excess beyond that
1049
+ if (isNextChunkAtLeastOnePart) {
1050
+ chunks = [];
1051
+ length = 0;
1052
+ chunkBuffer = chunkBufferAfterCurrentPart;
1053
+ excessLength = chunkBuffer.length - firstFileUploadPart.partsize;
1054
+ // the excess data is less than a full part, so we'll enqueue it
1055
+ } else if (chunkBufferAfterCurrentPart.length > 0) {
1056
+ chunks = [chunkBufferAfterCurrentPart];
1057
+ length = chunkBufferAfterCurrentPart.length;
1058
+ chunkBuffer = null;
1059
+ } else {
1060
+ chunkBuffer = null;
1061
+ }
1062
+ _context16.next = 5;
1063
+ break;
1064
+ case 27:
986
1065
  readableStream.resume();
987
- _context15.next = 30;
1066
+ _context16.next = 33;
988
1067
  break;
989
- case 28:
1068
+ case 30:
990
1069
  chunks.push(chunkBuffer);
991
1070
  length += chunk.length;
992
- case 30:
993
- _context15.next = 35;
1071
+ chunkBuffer = null;
1072
+ case 33:
1073
+ if (streamEnded) {
1074
+ handleStreamEnd();
1075
+ }
1076
+ _context16.next = 39;
994
1077
  break;
995
- case 32:
996
- _context15.prev = 32;
997
- _context15.t0 = _context15["catch"](0);
998
- reject(_context15.t0);
999
- case 35:
1078
+ case 36:
1079
+ _context16.prev = 36;
1080
+ _context16.t0 = _context16["catch"](0);
1081
+ reject(_context16.t0);
1082
+ case 39:
1000
1083
  case "end":
1001
- return _context15.stop();
1084
+ return _context16.stop();
1002
1085
  }
1003
- }, _callee15, null, [[0, 32]]);
1086
+ }, _callee16, null, [[0, 36]]);
1004
1087
  }));
1005
1088
  return function (_x20) {
1006
- return _ref19.apply(this, arguments);
1089
+ return _ref20.apply(this, arguments);
1007
1090
  };
1008
1091
  }());
1009
- readableStream.on('end', /*#__PURE__*/(0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee16() {
1010
- var _options$getAgentForU2, buffer, nextFileUploadPart, upload_uri, agent, response, createdFile;
1011
- return _regenerator.default.wrap(function _callee16$(_context16) {
1012
- while (1) switch (_context16.prev = _context16.next) {
1013
- case 0:
1014
- _context16.prev = 0;
1015
- if (!(chunks.length > 0)) {
1016
- _context16.next = 9;
1017
- break;
1018
- }
1019
- buffer = _safeBuffer.Buffer.concat(chunks);
1020
- _context16.next = 5;
1021
- return _class._continueUpload(destinationPath, ++part, firstFileUploadPart, options);
1022
- case 5:
1023
- nextFileUploadPart = _context16.sent;
1024
- upload_uri = determinePartUploadUri(nextFileUploadPart); // instantiate an httpsAgent dynamically if needed
1025
- agent = ((_options$getAgentForU2 = options.getAgentForUrl) === null || _options$getAgentForU2 === void 0 ? void 0 : _options$getAgentForU2.call(options, upload_uri)) || (options === null || options === void 0 ? void 0 : options.agent);
1026
- concurrentUploads.push(_Api.default.sendFilePart(upload_uri, 'PUT', buffer, {
1027
- agent: agent
1028
- }));
1029
- case 9:
1030
- _context16.next = 11;
1031
- return Promise.all(concurrentUploads);
1032
- case 11:
1033
- _context16.next = 13;
1034
- return _class._completeUpload(firstFileUploadPart, options);
1035
- case 13:
1036
- response = _context16.sent;
1037
- createdFile = new _class(response.data, options);
1038
- resolve(createdFile);
1039
- _context16.next = 21;
1040
- break;
1041
- case 18:
1042
- _context16.prev = 18;
1043
- _context16.t0 = _context16["catch"](0);
1044
- reject(_context16.t0);
1045
- case 21:
1046
- case "end":
1047
- return _context16.stop();
1048
- }
1049
- }, _callee16, null, [[0, 18]]);
1050
- })));
1092
+
1093
+ // note that this event may occur while there is still data being processed above
1094
+ readableStream.on('end', function () {
1095
+ streamEnded = true;
1096
+ handleStreamEnd();
1097
+ });
1051
1098
  });
1052
1099
  case 10:
1053
1100
  file = _context17.sent;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "files.com",
3
- "version": "1.0.413",
3
+ "version": "1.0.415",
4
4
  "description": "Files.com SDK for JavaScript",
5
5
  "keywords": [
6
6
  "files.com",
package/src/Api.js CHANGED
@@ -5,15 +5,17 @@ import * as errors from './Errors'
5
5
  import Logger from './Logger'
6
6
  import { isEmpty, isObject } from './utils'
7
7
 
8
- const _fetchWithTimeout = (url, { timeoutSecs, ...options } = {}) =>
9
- timeoutSecs <= 0
8
+ const _fetchWithTimeout = (url, { timeoutSecs, ...options } = {}) => {
9
+ let timeoutId
10
+ return timeoutSecs <= 0
10
11
  ? fetch(url, options)
11
12
  : Promise.race([
12
13
  fetch(url, options),
13
14
  new Promise((_, reject) => {
14
- setTimeout(() => reject(new errors.FilesError('Request timed out')), timeoutSecs * 1000)
15
+ timeoutId = setTimeout(() => reject(new errors.FilesError('Request timed out')), timeoutSecs * 1000)
15
16
  })
16
- ])
17
+ ]).finally(() => clearTimeout(timeoutId))
18
+ }
17
19
 
18
20
  const fetchWithRetry = async (url, options, retries = 0) => {
19
21
  const maxRetries = Files.getMaxNetworkRetries()
package/src/Files.js CHANGED
@@ -5,7 +5,7 @@ const endpointPrefix = '/api/rest/v1'
5
5
  let apiKey
6
6
  let baseUrl = 'https://app.files.com'
7
7
  let sessionId = null
8
- let version = "1.0.413"
8
+ let version = "1.0.415"
9
9
  let userAgent = `Files.com JavaScript SDK v${version}`
10
10
 
11
11
  let logLevel = LogLevel.INFO
@@ -97,79 +97,121 @@ class File {
97
97
  let length = 0
98
98
  const concurrentUploads = []
99
99
 
100
+ let chunkBuffer = null
101
+ let streamEnded = false
102
+
103
+ const handleStreamEnd = async () => {
104
+ if (chunkBuffer !== null || !streamEnded) {
105
+ return
106
+ }
107
+
108
+ try {
109
+ if (chunks.length > 0) {
110
+ const buffer = Buffer.concat(chunks)
111
+ const nextFileUploadPart = await File._continueUpload(destinationPath, ++part, firstFileUploadPart, options)
112
+
113
+ const upload_uri = determinePartUploadUri(nextFileUploadPart)
114
+
115
+ // instantiate an httpsAgent dynamically if needed
116
+ const agent = options.getAgentForUrl?.(upload_uri) || options?.agent
117
+
118
+ concurrentUploads.push(Api.sendFilePart(upload_uri, 'PUT', buffer, { agent }))
119
+ }
120
+
121
+ await Promise.all(concurrentUploads)
122
+
123
+ const response = await File._completeUpload(firstFileUploadPart, options)
124
+ const createdFile = new File(response.data, options)
125
+
126
+ resolve(createdFile)
127
+ } catch (error) {
128
+ reject(error)
129
+ }
130
+ }
131
+
100
132
  readableStream.on('error', error => { reject(error) })
101
133
 
134
+ // note that for a network stream, each chunk is typically less than partsize * 2, but
135
+ // if a stream has been created based on very large data, it's possible for a chunk to
136
+ // contain the entire file and we could get a single chunk with length >= partsize * 3
102
137
  readableStream.on('data', async chunk => {
103
138
  try {
104
- const nextLength = length + chunk.length
105
- const excessLength = nextLength - firstFileUploadPart.partsize
139
+ let excessLength = (length + chunk.length) - firstFileUploadPart.partsize
106
140
 
107
- const chunkBuffer = Buffer.from(chunk)
141
+ chunkBuffer = Buffer.from(chunk)
108
142
 
109
143
  if (excessLength > 0) {
110
144
  readableStream.pause()
111
145
 
112
- // the amount to append this last part with to make it exactly the full partsize
113
- const tailLength = chunkBuffer.length - excessLength
146
+ while (chunkBuffer) {
147
+ // the amount to append this last part with to make it exactly the full partsize
148
+ const lengthForEndOfCurrentPart = chunkBuffer.length - excessLength
114
149
 
115
- const lastChunkForPart = chunkBuffer.subarray(0, tailLength)
116
- const firstChunkForNextPart = chunkBuffer.subarray(tailLength)
150
+ const lastChunkForCurrentPart = chunkBuffer.subarray(0, lengthForEndOfCurrentPart)
151
+ const chunkBufferAfterCurrentPart = chunkBuffer.subarray(lengthForEndOfCurrentPart)
117
152
 
118
- chunks.push(lastChunkForPart)
153
+ chunks.push(lastChunkForCurrentPart)
119
154
 
120
- const buffer = Buffer.concat(chunks)
121
- const nextFileUploadPart = await File._continueUpload(destinationPath, ++part, firstFileUploadPart, options)
155
+ const buffer = Buffer.concat(chunks)
156
+ const nextFileUploadPart = await File._continueUpload(destinationPath, ++part, firstFileUploadPart, options)
122
157
 
123
- const upload_uri = determinePartUploadUri(nextFileUploadPart)
158
+ const upload_uri = determinePartUploadUri(nextFileUploadPart)
124
159
 
125
- // instantiate an httpsAgent dynamically if needed
126
- const agent = options.getAgentForUrl?.(upload_uri) || options?.agent
160
+ // instantiate an httpsAgent dynamically if needed
161
+ const agent = options.getAgentForUrl?.(upload_uri) || options?.agent
127
162
 
128
- const uploadPromise = Api.sendFilePart(upload_uri, 'PUT', buffer, { agent })
163
+ const uploadPromise = Api.sendFilePart(upload_uri, 'PUT', buffer, { agent })
129
164
 
130
- if (firstFileUploadPart.parallel_parts) {
131
- concurrentUploads.push(uploadPromise)
132
- } else {
133
- await uploadPromise
134
- }
165
+ if (firstFileUploadPart.parallel_parts) {
166
+ concurrentUploads.push(uploadPromise)
167
+ } else {
168
+ await uploadPromise
169
+ }
170
+
171
+ // determine if the remainder of the excess chunk data is too large to be a single part
172
+ const isNextChunkAtLeastOnePart = chunkBufferAfterCurrentPart.length >= firstFileUploadPart.partsize
173
+
174
+ // the excess data contains >= 1 full part, so we'll loop again to enqueue
175
+ // the next part for upload and continue processing any excess beyond that
176
+ if (isNextChunkAtLeastOnePart) {
177
+ chunks = []
178
+ length = 0
135
179
 
136
- chunks = [firstChunkForNextPart]
137
- length = firstChunkForNextPart.length
180
+ chunkBuffer = chunkBufferAfterCurrentPart
181
+ excessLength = chunkBuffer.length - firstFileUploadPart.partsize
182
+ // the excess data is less than a full part, so we'll enqueue it
183
+ } else if (chunkBufferAfterCurrentPart.length > 0) {
184
+ chunks = [chunkBufferAfterCurrentPart]
185
+ length = chunkBufferAfterCurrentPart.length
186
+
187
+ chunkBuffer = null
188
+ } else {
189
+ chunkBuffer = null
190
+ }
191
+ }
138
192
 
139
193
  readableStream.resume()
140
194
  } else {
141
195
  chunks.push(chunkBuffer)
142
196
  length += chunk.length
143
- }
144
- } catch (error) {
145
- reject(error)
146
- }
147
- })
148
-
149
- readableStream.on('end', async () => {
150
- try {
151
- if (chunks.length > 0) {
152
- const buffer = Buffer.concat(chunks)
153
- const nextFileUploadPart = await File._continueUpload(destinationPath, ++part, firstFileUploadPart, options)
154
-
155
- const upload_uri = determinePartUploadUri(nextFileUploadPart)
156
197
 
157
- // instantiate an httpsAgent dynamically if needed
158
- const agent = options.getAgentForUrl?.(upload_uri) || options?.agent
159
-
160
- concurrentUploads.push(Api.sendFilePart(upload_uri, 'PUT', buffer, { agent }))
198
+ chunkBuffer = null
161
199
  }
162
200
 
163
- await Promise.all(concurrentUploads)
164
-
165
- const response = await File._completeUpload(firstFileUploadPart, options)
166
- const createdFile = new File(response.data, options)
167
-
168
- resolve(createdFile)
201
+ if (streamEnded) {
202
+ handleStreamEnd()
203
+ }
169
204
  } catch (error) {
170
205
  reject(error)
171
206
  }
172
207
  })
208
+
209
+ // note that this event may occur while there is still data being processed above
210
+ readableStream.on('end', () => {
211
+ streamEnded = true
212
+
213
+ handleStreamEnd()
214
+ })
173
215
  })
174
216
 
175
217
  return file
package/test/src/index.js CHANGED
@@ -142,7 +142,7 @@ const testSuite = async () => {
142
142
  }
143
143
 
144
144
  /* to run this test, put a file (or symlink) at huge-file.ext * /
145
- const testUploadHugeFile = async () => {
145
+ const testUploadFileForHugeFile = async () => {
146
146
  const sourceFilePath = '../huge-file.ext'
147
147
 
148
148
  const displayName = `huge-file__${nonce}.ext`
@@ -161,7 +161,33 @@ const testSuite = async () => {
161
161
 
162
162
  await file.delete()
163
163
 
164
- Logger.info('***** testUploadHugeFile() succeeded! *****')
164
+ Logger.info('***** testUploadFileForHugeFile() succeeded! *****')
165
+ }
166
+
167
+ /* to run this test, put a file (or symlink) at huge-file.ext * /
168
+ const testUploadDataForHugeFile = async () => {
169
+ const sourceFilePath = '../huge-file.ext'
170
+
171
+ const displayName = `huge-file__${nonce}.ext`
172
+ const destinationPath = `${SDK_TEST_ROOT_FOLDER}/${displayName}`
173
+
174
+ const fs = require('fs/promises')
175
+ const data = await fs.readFile(sourceFilePath, { encoding: "utf8" })
176
+
177
+ const file = await File.uploadData(destinationPath, data)
178
+
179
+ invariant(!!file.path, 'Uploaded file response object should have a path')
180
+ invariant(file.display_name === displayName, 'Uploaded file response object should have the same display_name as the file we uploaded')
181
+
182
+ const foundFile = await File.find(destinationPath)
183
+
184
+ invariant(foundFile.path === destinationPath, 'Found file should have the same path as the file we uploaded')
185
+ invariant(foundFile.display_name === displayName, 'Found file should have the same display_name as the file we uploaded')
186
+ invariant(typeof foundFile.getDownloadUri() === 'undefined', 'Found file should not have a download uri yet')
187
+
188
+ await file.delete()
189
+
190
+ Logger.info('***** testUploadDataForHugeFile() succeeded! *****')
165
191
  }
166
192
  /**/
167
193
 
@@ -242,7 +268,8 @@ const testSuite = async () => {
242
268
  await testFolderListAutoPagination()
243
269
  await testUploadAndDownloadToFile()
244
270
  await testUploadAndDownloadToString()
245
- // await testUploadHugeFile() // to run this test, put a file (or symlink) at huge-file.ext
271
+ // await testUploadDataForHugeFile() // to run this test, put a file (or symlink) at huge-file.ext
272
+ // await testUploadFileForHugeFile() // to run this test, put a file (or symlink) at huge-file.ext
246
273
  await testSession()
247
274
  await testFailure()
248
275
  await testUserListAndUpdate()