@karpeleslab/klbfw 0.1.13 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CLAUDE.md +50 -0
  2. package/README.md +199 -35
  3. package/cookies.js +107 -41
  4. package/coverage/clover.xml +835 -0
  5. package/coverage/coverage-final.json +9 -0
  6. package/coverage/lcov-report/base.css +224 -0
  7. package/coverage/lcov-report/block-navigation.js +87 -0
  8. package/coverage/lcov-report/cookies.js.html +334 -0
  9. package/coverage/lcov-report/favicon.png +0 -0
  10. package/coverage/lcov-report/fw-wrapper.js.html +163 -0
  11. package/coverage/lcov-report/index.html +131 -0
  12. package/coverage/lcov-report/index.js.html +196 -0
  13. package/coverage/lcov-report/internal.js.html +604 -0
  14. package/coverage/lcov-report/klbfw/cookies.js.html +490 -0
  15. package/coverage/lcov-report/klbfw/fw-wrapper.js.html +745 -0
  16. package/coverage/lcov-report/klbfw/index.html +206 -0
  17. package/coverage/lcov-report/klbfw/index.js.html +235 -0
  18. package/coverage/lcov-report/klbfw/internal.js.html +811 -0
  19. package/coverage/lcov-report/klbfw/rest.js.html +565 -0
  20. package/coverage/lcov-report/klbfw/test/index.html +116 -0
  21. package/coverage/lcov-report/klbfw/test/setup.js.html +1105 -0
  22. package/coverage/lcov-report/klbfw/upload.js.html +3487 -0
  23. package/coverage/lcov-report/klbfw/util.js.html +388 -0
  24. package/coverage/lcov-report/prettify.css +1 -0
  25. package/coverage/lcov-report/prettify.js +2 -0
  26. package/coverage/lcov-report/rest.js.html +472 -0
  27. package/coverage/lcov-report/sort-arrow-sprite.png +0 -0
  28. package/coverage/lcov-report/sorter.js +196 -0
  29. package/coverage/lcov-report/upload.js.html +1789 -0
  30. package/coverage/lcov-report/util.js.html +313 -0
  31. package/coverage/lcov.info +1617 -0
  32. package/fw-wrapper.js +221 -26
  33. package/index.js +16 -2
  34. package/internal.js +186 -102
  35. package/package.json +21 -3
  36. package/rest.js +129 -81
  37. package/test/README.md +62 -0
  38. package/test/api.test.js +102 -0
  39. package/test/cookies.test.js +65 -0
  40. package/test/integration.test.js +481 -0
  41. package/test/rest.test.js +93 -0
  42. package/test/setup.js +341 -0
  43. package/test/upload.test.js +689 -0
  44. package/test/util.test.js +46 -0
  45. package/upload.js +987 -421
  46. package/util.js +59 -21
package/upload.js CHANGED
@@ -1,354 +1,822 @@
1
+ /**
2
+ * KLB Upload Module
3
+ *
4
+ * This module handles file uploads to KLB API endpoints.
5
+ * It supports both browser and Node.js environments with a unified API.
6
+ *
7
+ * The module handles:
8
+ * - File upload to KLB API endpoints
9
+ * - Multiple upload protocols (PUT and AWS multipart)
10
+ * - Progress tracking
11
+ * - Pause, resume, retry, and cancel operations
12
+ * - Browser and Node.js compatibility
13
+ *
14
+ * Browser usage:
15
+ * ```js
16
+ * // Open file picker and upload selected files
17
+ * upload.upload.init('Misc/Debug:testUpload')()
18
+ * .then(result => console.log('Upload complete', result));
19
+ *
20
+ * // Upload a specific File object
21
+ * upload.upload.append('Misc/Debug:testUpload', fileObject)
22
+ * .then(result => console.log('Upload complete', result));
23
+ *
24
+ * // Track progress
25
+ * upload.upload.onprogress = (status) => {
26
+ * console.log('Progress:', status.running.map(i => i.status));
27
+ * };
28
+ *
29
+ * // Cancel an upload
30
+ * upload.upload.cancelItem(uploadId);
31
+ * ```
32
+ *
33
+ * Node.js usage:
34
+ * ```js
35
+ * // For Node.js environments, first install dependencies:
36
+ * // npm install node-fetch xmldom
37
+ *
38
+ * // Initialize upload with specific file paths
39
+ * upload.upload.init('Misc/Debug:testUpload')(['./file1.txt', './file2.jpg'])
40
+ * .then(result => console.log('Upload complete', result));
41
+ *
42
+ * // Or create a custom file object with path
43
+ * const file = {
44
+ * name: 'test.txt',
45
+ * size: 1024,
46
+ * type: 'text/plain',
47
+ * path: '/path/to/file.txt'
48
+ * };
49
+ * upload.upload.append('Misc/Debug:testUpload', file)
50
+ * .then(result => console.log('Upload complete', result));
51
+ * ```
52
+ *
53
+ * @module upload
54
+ */
55
+
1
56
  const rest = require('./rest');
2
57
  const fwWrapper = require('./fw-wrapper');
3
- var sha256 = require('js-sha256').sha256;
4
-
5
- // retunr time in amz format, eg 20180930T132108Z
6
- function getAmzTime() {
7
- var t = new Date();
8
- return t.getUTCFullYear() +
9
- '' + pad(t.getUTCMonth() + 1) +
10
- pad(t.getUTCDate()) +
11
- 'T' + pad(t.getUTCHours()) +
12
- pad(t.getUTCMinutes()) +
13
- pad(t.getUTCSeconds()) +
14
- 'Z';
58
+ const sha256 = require('js-sha256').sha256;
59
+
60
+ /**
61
+ * Environment detection and cross-platform utilities
62
+ */
63
+ const env = {
64
+ /**
65
+ * Detect if running in a browser environment
66
+ */
67
+ isBrowser: typeof window !== 'undefined' && typeof document !== 'undefined',
68
+
69
+ /**
70
+ * Detect if running in a Node.js environment
71
+ */
72
+ isNode: typeof process !== 'undefined' && process.versions && process.versions.node,
73
+
74
+ /**
75
+ * Node.js specific modules (lazy-loaded)
76
+ */
77
+ node: {
78
+ fetch: null,
79
+ xmlParser: null,
80
+ fs: null,
81
+ path: null,
82
+ EventEmitter: null,
83
+ eventEmitter: null
84
+ }
85
+ };
86
+
87
+ /**
88
+ * Initialize Node.js dependencies when in Node environment
89
+ */
90
+ if (env.isNode && !env.isBrowser) {
91
+ try {
92
+ env.node.fetch = require('node-fetch');
93
+ env.node.xmlParser = require('xmldom');
94
+ env.node.fs = require('fs');
95
+ env.node.path = require('path');
96
+ env.node.EventEmitter = require('events');
97
+ env.node.eventEmitter = new (env.node.EventEmitter)();
98
+ } catch (e) {
99
+ console.warn('Node.js dependencies not available. Some functionality may be limited:', e.message);
100
+ console.warn('To use in Node.js, install: npm install node-fetch xmldom');
101
+ }
15
102
  }
16
103
 
17
- function pad(number) {
18
- if (number < 10) {
19
- return '0' + number;
104
+ /**
105
+ * Cross-platform utilities
106
+ */
107
+ const utils = {
108
+ /**
109
+ * Environment-agnostic fetch implementation
110
+ * @param {string} url - The URL to fetch
111
+ * @param {Object} options - Fetch options
112
+ * @returns {Promise} - Fetch promise
113
+ */
114
+ fetch(url, options) {
115
+ if (env.isBrowser && typeof window.fetch === 'function') {
116
+ return window.fetch(url, options);
117
+ } else if (env.isNode && env.node.fetch) {
118
+ return env.node.fetch(url, options);
119
+ } else if (typeof fetch === 'function') {
120
+ // For environments where fetch is globally available
121
+ return fetch(url, options);
20
122
  }
21
- return number;
22
- }
23
-
24
- // perform call against AWS S3 with the appropriate signature obtained from server
123
+ return Promise.reject(new Error('fetch not available in this environment'));
124
+ },
125
+
126
+ /**
127
+ * Environment-agnostic XML parser
128
+ * @param {string} xmlString - XML string to parse
129
+ * @returns {Document} - DOM-like document
130
+ */
131
+ parseXML(xmlString) {
132
+ if (env.isBrowser) {
133
+ return new DOMParser().parseFromString(xmlString, 'text/xml');
134
+ } else if (env.isNode && env.node.xmlParser) {
135
+ const DOMParserNode = env.node.xmlParser.DOMParser;
136
+ const dom = new DOMParserNode().parseFromString(xmlString, 'text/xml');
137
+
138
+ // Add querySelector interface for compatibility
139
+ dom.querySelector = function(selector) {
140
+ if (selector === 'UploadId') {
141
+ const elements = this.getElementsByTagName('UploadId');
142
+ return elements.length > 0 ? { innerHTML: elements[0].textContent } : null;
143
+ }
144
+ return null;
145
+ };
146
+
147
+ return dom;
148
+ }
149
+ throw new Error('XML parsing not available in this environment');
150
+ },
151
+
152
+ /**
153
+ * Read a file as ArrayBuffer in any environment
154
+ * @param {File|Object} file - File object or file-like object with path
155
+ * @param {Function} callback - Callback function(buffer, error)
156
+ */
157
+ readFileAsArrayBuffer(file, callback) {
158
+ if (env.isBrowser) {
159
+ const reader = new FileReader();
160
+ reader.addEventListener('loadend', () => callback(reader.result));
161
+ reader.addEventListener('error', (e) => callback(null, e));
162
+ reader.readAsArrayBuffer(file);
163
+ } else if (env.isNode && env.node.fs) {
164
+ if (file.path) {
165
+ // Read from filesystem
166
+ const readStream = env.node.fs.createReadStream(file.path, {
167
+ start: file.start || 0,
168
+ end: file.end || undefined
169
+ });
170
+
171
+ const chunks = [];
172
+ readStream.on('data', chunk => chunks.push(chunk));
173
+ readStream.on('end', () => {
174
+ const buffer = Buffer.concat(chunks);
175
+ callback(buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength));
176
+ });
177
+ readStream.on('error', err => callback(null, err));
178
+ } else if (file.content) {
179
+ // Memory buffer
180
+ const buffer = Buffer.from(file.content);
181
+ callback(buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength));
182
+ } else {
183
+ callback(null, new Error('No file path or content provided'));
184
+ }
185
+ } else {
186
+ callback(null, new Error('File reading not available in this environment'));
187
+ }
188
+ },
189
+
190
+ /**
191
+ * Dispatch a custom event in any environment
192
+ * @param {string} eventName - Event name
193
+ * @param {Object} detail - Event details
194
+ */
195
+ dispatchEvent(eventName, detail) {
196
+ if (env.isBrowser) {
197
+ const evt = new CustomEvent(eventName, { detail });
198
+ document.dispatchEvent(evt);
199
+ } else if (env.isNode && env.node.eventEmitter) {
200
+ env.node.eventEmitter.emit(eventName, detail);
201
+ }
202
+ // In other environments, events are silently ignored
203
+ },
204
+
205
+ /**
206
+ * Format a date for AWS (YYYYMMDDTHHMMSSZ)
207
+ * @returns {string} Formatted date
208
+ */
209
+ getAmzTime() {
210
+ const t = new Date();
211
+ return t.getUTCFullYear() +
212
+ this.pad(t.getUTCMonth() + 1) +
213
+ this.pad(t.getUTCDate()) +
214
+ 'T' + this.pad(t.getUTCHours()) +
215
+ this.pad(t.getUTCMinutes()) +
216
+ this.pad(t.getUTCSeconds()) +
217
+ 'Z';
218
+ },
219
+
220
+ /**
221
+ * Pad a number with leading zero if needed
222
+ * @param {number} number - Number to pad
223
+ * @returns {string} Padded number
224
+ */
225
+ pad(number) {
226
+ return number < 10 ? '0' + number : String(number);
227
+ }
228
+ };
229
+
230
+ /**
231
+ * AWS S3 request handler
232
+ * Performs a signed request to AWS S3 using a signature obtained from the server
233
+ *
234
+ * @param {Object} upInfo - Upload info including bucket endpoint and key
235
+ * @param {string} method - HTTP method (GET, POST, PUT)
236
+ * @param {string} query - Query parameters
237
+ * @param {*} body - Request body
238
+ * @param {Object} headers - Request headers
239
+ * @param {Object} context - Request context
240
+ * @returns {Promise} - Request promise
241
+ */
25
242
  function awsReq(upInfo, method, query, body, headers, context) {
26
243
  headers = headers || {};
27
244
  context = context || {};
28
245
 
29
- if (body == "") {
30
- var bodyHash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; // sha256('')
246
+ // Calculate body hash for AWS signature
247
+ let bodyHash;
248
+
249
+ if (!body || body === "") {
250
+ // Empty body hash
251
+ bodyHash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
31
252
  } else {
32
- var bodyHash = sha256(body);
253
+ try {
254
+ // Handle different body types
255
+ let bodyForHash = body;
256
+
257
+ if (body instanceof ArrayBuffer || (body.constructor && body.constructor.name === 'ArrayBuffer')) {
258
+ bodyForHash = new Uint8Array(body);
259
+ } else if (body.constructor && body.constructor.name === 'Buffer') {
260
+ bodyForHash = Buffer.from(body).toString();
261
+ }
262
+
263
+ bodyHash = sha256(bodyForHash);
264
+ } catch (e) {
265
+ console.error("Error calculating hash:", e.message);
266
+ bodyHash = "UNSIGNED-PAYLOAD";
267
+ }
33
268
  }
34
269
 
35
- var ts = getAmzTime(); // aws format, eg 20180930T132108Z
36
- var ts_d = ts.substring(0, 8);
270
+ // Create AWS timestamp
271
+ const timestamp = utils.getAmzTime();
272
+ const datestamp = timestamp.substring(0, 8);
37
273
 
274
+ // Set AWS headers
38
275
  headers["X-Amz-Content-Sha256"] = bodyHash;
39
- headers["X-Amz-Date"] = ts;
276
+ headers["X-Amz-Date"] = timestamp;
40
277
 
41
- // prepare auth string
42
- var aws_auth_str = [
278
+ // Prepare the string to sign
279
+ const authStringParts = [
43
280
  "AWS4-HMAC-SHA256",
44
- ts,
45
- ts_d + "/" + upInfo.Bucket_Endpoint.Region + "/s3/aws4_request",
281
+ timestamp,
282
+ `${datestamp}/${upInfo.Bucket_Endpoint.Region}/s3/aws4_request`,
46
283
  method,
47
- "/" + upInfo.Bucket_Endpoint.Name + "/" + upInfo.Key,
284
+ `/${upInfo.Bucket_Endpoint.Name}/${upInfo.Key}`,
48
285
  query,
49
- "host:" + upInfo.Bucket_Endpoint.Host,
286
+ `host:${upInfo.Bucket_Endpoint.Host}`
50
287
  ];
51
288
 
52
- // list headers to sign (host and anything starting with x-)
53
- var sign_head = ['host'];
54
- var k = Object.keys(headers).sort();
55
- for (var i = 0; i < k.length; i++) {
56
- var s = k[i].toLowerCase();
57
- if (s.substring(0, 2) != "x-") {
58
- continue;
289
+ // Add x-* headers to sign
290
+ const headersToSign = ['host'];
291
+ const sortedHeaderKeys = Object.keys(headers).sort();
292
+
293
+ for (const key of sortedHeaderKeys) {
294
+ const lowerKey = key.toLowerCase();
295
+ if (lowerKey.startsWith('x-')) {
296
+ headersToSign.push(lowerKey);
297
+ authStringParts.push(`${lowerKey}:${headers[key]}`);
59
298
  }
60
- sign_head.push(s);
61
- aws_auth_str.push(s + ":" + headers[k[i]]);
62
299
  }
63
- aws_auth_str.push("");
64
- aws_auth_str.push(sign_head.join(";"));
65
- aws_auth_str.push(bodyHash);
66
-
67
- var promise = new Promise(function (resolve, reject) {
68
-
69
- rest.rest("Cloud/Aws/Bucket/Upload/" + upInfo.Cloud_Aws_Bucket_Upload__ + ":signV4", "POST", {headers: aws_auth_str.join("\n")}, context)
70
- .then(function (ares) {
71
- var u = "https://" + upInfo.Bucket_Endpoint.Host + "/" + upInfo.Bucket_Endpoint.Name + "/" + upInfo.Key;
72
- if (query != "") u = u + "?" + query;
73
-
74
- headers["Authorization"] = ares.data.authorization;
75
-
76
- fetch(u, {
77
- method: method,
78
- body: body,
79
- headers: headers
80
- })
81
- .then(resolve, reject)
82
- .catch(reject);
83
-
84
-
85
- }, reject)
86
- .catch(reject);
87
-
300
+
301
+ // Complete the string to sign
302
+ authStringParts.push('');
303
+ authStringParts.push(headersToSign.join(';'));
304
+ authStringParts.push(bodyHash);
305
+
306
+ return new Promise((resolve, reject) => {
307
+ // Get signature from server
308
+ rest.rest(
309
+ `Cloud/Aws/Bucket/Upload/${upInfo.Cloud_Aws_Bucket_Upload__}:signV4`,
310
+ "POST",
311
+ { headers: authStringParts.join("\n") },
312
+ context
313
+ )
314
+ .then(response => {
315
+ // Construct the S3 URL
316
+ let url = `https://${upInfo.Bucket_Endpoint.Host}/${upInfo.Bucket_Endpoint.Name}/${upInfo.Key}`;
317
+ if (query) url += `?${query}`;
318
+
319
+ // Add the authorization header
320
+ headers["Authorization"] = response.data.authorization;
321
+
322
+ // Make the actual request to S3
323
+ return utils.fetch(url, {
324
+ method,
325
+ body,
326
+ headers
327
+ });
328
+ })
329
+ .then(resolve)
330
+ .catch(reject);
88
331
  });
89
-
90
- return promise;
91
332
  }
92
333
 
334
+ /**
335
+ * Upload module (IIFE pattern)
336
+ * @returns {Object} Upload interface
337
+ */
93
338
  module.exports.upload = (function () {
94
- var upload = {};
95
- var upload_queue = []; // queue of uploads to run
96
- var upload_failed = []; // failed upload(s)
97
- var upload_running = {}; // currently processing uploads
98
- var up_id = 0; // next upload id
99
- var last_input = null;
100
-
101
-
102
- function sendprogress() {
103
- if (typeof upload.onprogress === "undefined") return;
104
-
105
- upload.onprogress(upload.getStatus());
339
+ /**
340
+ * Upload state
341
+ */
342
+ const state = {
343
+ queue: [], // Queued uploads
344
+ failed: [], // Failed uploads
345
+ running: {}, // Currently processing uploads
346
+ nextId: 0, // Next upload ID
347
+ lastInput: null // Last created file input element (browser only)
348
+ };
349
+
350
+ // Public API object
351
+ const upload = {};
352
+
353
+ /**
354
+ * Helper Functions
355
+ */
356
+
357
+ /**
358
+ * Notify progress to listeners
359
+ * Calls onprogress callback and dispatches events
360
+ */
361
+ function sendProgress() {
362
+ const status = upload.getStatus();
363
+
364
+ // Call the onprogress callback if defined
365
+ if (typeof upload.onprogress === "function") {
366
+ upload.onprogress(status);
367
+ }
368
+
369
+ // Dispatch event for listeners
370
+ utils.dispatchEvent("upload:progress", status);
106
371
  }
107
-
108
- function do_process_pending(up) {
109
- up["status"] = "pending-wip";
110
- // up is an object with api path, file, dfd
111
- var params = up.params;
112
-
113
- // set params for upload
114
- params["filename"] = up.file.name;
115
- params["size"] = up.file.size;
116
- params["lastModified"] = up.file.lastModified / 1000;
117
- params["type"] = up.file.type;
118
-
119
- rest.rest(up.path, "POST", params, up.context).then(function (res) {
120
- // Method 1: aws signed multipart upload
121
- if (res["data"]["Cloud_Aws_Bucket_Upload__"]) {
122
- up.info = res["data"]; // contains stuff like Bucket_Endpoint, Key, etc
123
-
124
- // ok we are ready to upload - this will initiate an upload
125
- awsReq(up.info, "POST", "uploads=", "", {"Content-Type": up.file.type, "X-Amz-Acl": "private"}, up.context)
126
- .then(response => response.text())
127
- .then(str => (new DOMParser()).parseFromString(str, "text/xml"))
128
- .then(dom => dom.querySelector('UploadId').innerHTML)
129
- .then(function (uploadId) {
130
- up.uploadId = uploadId;
131
-
132
- // ok, let's compute block size so we know how many parts we need to send
133
- var fsize = up.file.size;
134
- var bsize = Math.ceil(fsize / 10000); // we want ~10k parts
135
- if (bsize < 5242880) bsize = 5242880; // minimum block size = 5MB
136
-
137
- up.method = 'aws';
138
- up.bsize = bsize;
139
- up.blocks = Math.ceil(fsize / bsize);
140
- up.b = {};
141
- up['status'] = 'uploading';
142
- upload.run();
143
- }).catch(res => failure(up, res))
144
- return;
372
+
373
+ /**
374
+ * Handle upload failure
375
+ * @param {Object} up - Upload object
376
+ * @param {*} error - Error data
377
+ */
378
+ function handleFailure(up, error) {
379
+ // Skip if upload is no longer running
380
+ if (!(up.up_id in state.running)) return;
381
+
382
+ // Check if already in failed list
383
+ for (const failedItem of state.failed) {
384
+ if (failedItem.up_id === up.up_id) {
385
+ return; // Already recorded as failed
145
386
  }
146
- // Method 2: PUT requests
147
- if (res["data"]["PUT"]) {
148
- var fsize = up.file.size;
149
- var bsize = fsize; // upload file in a single block
150
- if (res["data"]["Blocksize"]) {
151
- // this upload target supports multipart PUT upload
152
- bsize = res["data"]["Blocksize"]; // multipart upload
387
+ }
388
+
389
+ // Record failure
390
+ up.failure = error;
391
+ state.failed.push(up);
392
+ delete state.running[up.up_id];
393
+
394
+ // Continue processing queue
395
+ upload.run();
396
+
397
+ // Notify progress
398
+ sendProgress();
399
+
400
+ // Dispatch failure event
401
+ utils.dispatchEvent("upload:failed", {
402
+ item: up,
403
+ res: error
404
+ });
405
+ }
406
+
407
+ /**
408
+ * Process a pending upload
409
+ * Initiates the upload process with the server
410
+ * @param {Object} up - Upload object
411
+ */
412
+ function processUpload(up) {
413
+ // Mark as processing
414
+ up.status = "pending-wip";
415
+
416
+ // Prepare parameters
417
+ const params = up.params || {};
418
+
419
+ // Set file metadata
420
+ params.filename = up.file.name;
421
+ params.size = up.file.size;
422
+ params.lastModified = up.file.lastModified / 1000;
423
+ params.type = up.file.type;
424
+
425
+ // Initialize upload with the server
426
+ rest.rest(up.path, "POST", params, up.context)
427
+ .then(function(response) {
428
+ // Method 1: AWS signed multipart upload
429
+ if (response.data.Cloud_Aws_Bucket_Upload__) {
430
+ return handleAwsMultipartUpload(up, response.data);
153
431
  }
154
-
155
- up.info = res["data"];
156
- up.method = 'put';
157
- up.bsize = bsize;
158
- up.blocks = Math.ceil(fsize / bsize);
159
- up.b = {};
160
- up['status'] = 'uploading';
161
- upload.run();
162
- return;
163
- }
164
- // invalid data
165
- delete upload_running[up.up_id];
166
- upload_failed.push(up);
167
- up.reject();
168
- return;
432
+
433
+ // Method 2: Direct PUT upload
434
+ if (response.data.PUT) {
435
+ return handlePutUpload(up, response.data);
436
+ }
437
+
438
+ // Invalid response format
439
+ delete state.running[up.up_id];
440
+ state.failed.push(up);
441
+ up.reject(new Error('Invalid upload response format'));
442
+ })
443
+ .catch(error => handleFailure(up, error));
444
+ }
445
+
446
+ /**
447
+ * Set up AWS multipart upload
448
+ * @param {Object} up - Upload object
449
+ * @param {Object} data - Server response data
450
+ */
451
+ function handleAwsMultipartUpload(up, data) {
452
+ // Store upload info
453
+ up.info = data;
454
+
455
+ // Initialize multipart upload
456
+ return awsReq(
457
+ up.info,
458
+ "POST",
459
+ "uploads=",
460
+ "",
461
+ {"Content-Type": up.file.type, "X-Amz-Acl": "private"},
462
+ up.context
463
+ )
464
+ .then(response => response.text())
465
+ .then(str => utils.parseXML(str))
466
+ .then(dom => dom.querySelector('UploadId').innerHTML)
467
+ .then(uploadId => {
468
+ up.uploadId = uploadId;
469
+
470
+ // Calculate optimal block size
471
+ const fileSize = up.file.size;
472
+
473
+ // Target ~10k parts, but minimum 5MB per AWS requirements
474
+ let blockSize = Math.ceil(fileSize / 10000);
475
+ if (blockSize < 5242880) blockSize = 5242880;
476
+
477
+ // Set up upload parameters
478
+ up.method = 'aws';
479
+ up.bsize = blockSize;
480
+ up.blocks = Math.ceil(fileSize / blockSize);
481
+ up.b = {};
482
+ up.status = 'uploading';
483
+
484
+ // Continue upload process
485
+ upload.run();
169
486
  })
170
- .catch(res => failure(up, res));
487
+ .catch(error => handleFailure(up, error));
171
488
  }
172
-
173
-
174
- function failure(up, data) {
175
- if (!(up.up_id in upload_running)) return;
176
-
177
- for (var i = 0, len = upload_failed.length; i < len; i++) {
178
- if (upload_failed[i].up_id === up.up_id) {
179
- //already in
180
- return;
181
- }
489
+
490
+ /**
491
+ * Set up direct PUT upload
492
+ * @param {Object} up - Upload object
493
+ * @param {Object} data - Server response data
494
+ */
495
+ function handlePutUpload(up, data) {
496
+ // Store upload info
497
+ up.info = data;
498
+
499
+ // Calculate block size (if multipart PUT is supported)
500
+ const fileSize = up.file.size;
501
+ let blockSize = fileSize; // Default: single block
502
+
503
+ if (data.Blocksize) {
504
+ // Server supports multipart upload
505
+ blockSize = data.Blocksize;
182
506
  }
183
-
184
- up.failure = data;
185
- upload_failed.push(up);
186
- delete upload_running[up.up_id];
507
+
508
+ // Set up upload parameters
509
+ up.method = 'put';
510
+ up.bsize = blockSize;
511
+ up.blocks = Math.ceil(fileSize / blockSize);
512
+ up.b = {};
513
+ up.status = 'uploading';
514
+
515
+ // Continue upload process
187
516
  upload.run();
188
- sendprogress();
189
- if (typeof document !== "undefined") {
190
- setTimeout(function () {
191
- var evt = new CustomEvent("upload:failed", {
192
- detail: {
193
- item: up,
194
- res: data
195
- }
196
- });
197
- document.dispatchEvent(evt);
198
- }, 10);
199
- }
200
517
  }
201
518
 
202
- function do_upload_part(up, partno) {
203
- // ok, need to start this!
204
- up.b[partno] = "pending";
205
- var start = partno * up.bsize;
206
- var part = up.file.slice(start, start + up.bsize);
207
-
208
- var reader = new FileReader();
209
- reader.addEventListener("loadend", function () {
210
- switch(up.method) {
211
- case 'aws':
212
- awsReq(up.info, "PUT", "partNumber=" + (partno + 1) + "&uploadId=" + up.uploadId, reader.result, null, up.context)
213
- .then(function (response) {
214
- up.b[partno] = response.headers.get("ETag");
215
- sendprogress();
216
- upload.run();
217
- }).catch(res => failure(up, res));
218
- break;
219
- case 'put':
220
- let headers = {};
221
- headers["Content-Type"] = up.file.type;
222
- if (up.blocks > 1) {
223
- // add Content-Range header
224
- // Content-Range: bytes start-end/*
225
- const end = start + reader.result.byteLength - 1; // inclusive
226
- headers["Content-Range"] = "bytes "+start+"-"+end+"/*";
227
- }
519
+ /**
520
+ * Upload a single part of a file
521
+ * Handles both AWS multipart and direct PUT methods
522
+ * @param {Object} up - Upload object
523
+ * @param {number} partNumber - Part number (0-based)
524
+ */
525
+ function uploadPart(up, partNumber) {
526
+ // Mark part as pending
527
+ up.b[partNumber] = "pending";
528
+
529
+ // Calculate byte range for this part
530
+ const startByte = partNumber * up.bsize;
531
+ const endByte = Math.min(startByte + up.bsize, up.file.size);
532
+
533
+ // Get file slice based on environment
534
+ let filePart;
535
+
536
+ if (env.isBrowser) {
537
+ // Browser: use native File.slice
538
+ filePart = up.file.slice(startByte, endByte);
539
+ } else if (env.isNode) {
540
+ // Node.js: create a reference with start/end positions
541
+ filePart = {
542
+ path: up.file.path,
543
+ start: startByte,
544
+ end: endByte,
545
+ type: up.file.type,
546
+ content: up.file.content // For memory buffer based files
547
+ };
548
+ } else {
549
+ handleFailure(up, new Error('Environment not supported'));
550
+ return;
551
+ }
228
552
 
229
- fetch(up.info["PUT"], {
230
- method: "PUT",
231
- body: reader.result,
232
- headers: headers,
233
- }).then(function (response) {
234
- up.b[partno] = "done";
235
- sendprogress();
236
- upload.run();
237
- }).catch(res => failure(up, res));
238
- break;
553
+ // Read the file part as ArrayBuffer
554
+ utils.readFileAsArrayBuffer(filePart, (arrayBuffer, error) => {
555
+ if (error) {
556
+ handleFailure(up, error);
557
+ return;
558
+ }
559
+
560
+ // Choose upload method based on protocol
561
+ if (up.method === 'aws') {
562
+ uploadAwsPart(up, partNumber, arrayBuffer);
563
+ } else if (up.method === 'put') {
564
+ uploadPutPart(up, partNumber, startByte, arrayBuffer);
565
+ } else {
566
+ handleFailure(up, new Error(`Unknown upload method: ${up.method}`));
239
567
  }
240
568
  });
569
+ }
570
+
571
+ /**
572
+ * Upload a part using AWS multipart upload
573
+ * @param {Object} up - Upload object
574
+ * @param {number} partNumber - Part number (0-based)
575
+ * @param {ArrayBuffer} data - Part data
576
+ */
577
+ function uploadAwsPart(up, partNumber, data) {
578
+ // AWS part numbers are 1-based
579
+ const awsPartNumber = partNumber + 1;
580
+
581
+ awsReq(
582
+ up.info,
583
+ "PUT",
584
+ `partNumber=${awsPartNumber}&uploadId=${up.uploadId}`,
585
+ data,
586
+ null,
587
+ up.context
588
+ )
589
+ .then(response => {
590
+ // Store ETag for this part (needed for completion)
591
+ up.b[partNumber] = response.headers.get("ETag");
592
+
593
+ // Update progress and continue processing
594
+ sendProgress();
595
+ upload.run();
596
+ })
597
+ .catch(error => handleFailure(up, error));
598
+ }
599
+
600
+ /**
601
+ * Upload a part using direct PUT
602
+ * @param {Object} up - Upload object
603
+ * @param {number} partNumber - Part number (0-based)
604
+ * @param {number} startByte - Starting byte position
605
+ * @param {ArrayBuffer} data - Part data
606
+ */
607
+ function uploadPutPart(up, partNumber, startByte, data) {
608
+ // Set up headers
609
+ const headers = {
610
+ "Content-Type": up.file.type
611
+ };
612
+
613
+ // Add Content-Range header for multipart PUT
614
+ if (up.blocks > 1) {
615
+ const endByte = startByte + data.byteLength - 1; // inclusive
616
+ headers["Content-Range"] = `bytes ${startByte}-${endByte}/*`;
617
+ }
241
618
 
242
- reader.addEventListener("error", function (e) {
243
- failure(up, e);
244
- });
245
-
246
- reader.readAsArrayBuffer(part);
619
+ // Perform the PUT request
620
+ utils.fetch(up.info.PUT, {
621
+ method: "PUT",
622
+ body: data,
623
+ headers: headers,
624
+ })
625
+ .then(response => {
626
+ // Mark part as done
627
+ up.b[partNumber] = "done";
628
+
629
+ // Update progress and continue processing
630
+ sendProgress();
631
+ upload.run();
632
+ })
633
+ .catch(error => handleFailure(up, error));
247
634
  }
248
635
 
249
636
 
250
- function do_process_uploading(up) {
637
+ /**
638
+ * Process an upload in progress
639
+ * Manages uploading parts and completing the upload
640
+ * @param {Object} up - Upload object
641
+ */
642
+ function processActiveUpload(up) {
643
+ // Skip if paused or canceled
251
644
  if (up.paused || up.canceled) return;
252
645
 
253
- var p = 0; // pending
254
- var d = 0; // done
255
- for (var i = 0; i < up.blocks; i++) {
256
- if (up.b[i] == undefined) {
257
- if (up.paused) break; // do not start new parts if paused
258
- do_upload_part(up, i);
259
- } else if (up.b[i] != "pending") {
260
- d += 1;
646
+ // Track upload progress
647
+ let pendingParts = 0;
648
+ let completedParts = 0;
649
+
650
+ // Process each part
651
+ for (let i = 0; i < up.blocks; i++) {
652
+ if (up.b[i] === undefined) {
653
+ // Part not started yet
654
+ if (up.paused) break; // Don't start new parts when paused
655
+
656
+ // Start uploading this part
657
+ uploadPart(up, i);
658
+ pendingParts++;
659
+ } else if (up.b[i] !== "pending") {
660
+ // Part completed
661
+ completedParts++;
261
662
  continue;
663
+ } else {
664
+ // Part in progress
665
+ pendingParts++;
262
666
  }
263
- p += 1;
264
- if (p >= 3) break;
667
+
668
+ // Limit concurrent uploads
669
+ if (pendingParts >= 3) break;
265
670
  }
266
671
 
267
- up["done"] = d;
268
-
269
- if (p == 0) {
270
- up["status"] = "validating";
271
- switch(up.method) {
272
- case 'aws':
273
- // complete, see https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
274
- var xml = "<CompleteMultipartUpload>";
275
- for (var i = 0; i < up.blocks; i++) {
276
- xml += "<Part><PartNumber>" + (i + 1) + "</PartNumber><ETag>" + up.b[i] + "</ETag></Part>";
277
- }
278
- xml += "</CompleteMultipartUpload>";
279
- awsReq(up.info, "POST", "uploadId=" + up.uploadId, xml, null, up.context)
280
- .then(response => response.text())
281
- .then(function (r) {
282
- // if success, need to call finalize
283
- rest.rest("Cloud/Aws/Bucket/Upload/" + up.info.Cloud_Aws_Bucket_Upload__ + ":handleComplete", "POST", {}, up.context).then(function (ares) {
284
- // SUCCESS!
285
- up["status"] = "complete";
286
- up["final"] = ares["data"];
287
- sendprogress();
288
- up.resolve(up);
289
- delete upload_running[up.up_id];
290
- upload.run();
291
- }).catch(res => failure(up, res));
292
- }).catch(res => failure(up, res));
293
- break;
294
- case 'put':
295
- // complete, directly call handleComplete
296
- rest.rest(up.info.Complete, "POST", {}, up.context).then(function (ares) {
297
- // success!
298
- up["status"] = "complete";
299
- up["final"] = ares["data"];
300
- sendprogress();
301
- delete upload_running[up.up_id];
302
- up.resolve(up);
303
- upload.run();
304
- }).catch(res => failure(up, res));
305
- break;
672
+ // Update upload progress
673
+ up.done = completedParts;
674
+
675
+ // Check if all parts are complete
676
+ if (pendingParts === 0) {
677
+ // All parts complete, finalize the upload
678
+ up.status = "validating";
679
+
680
+ if (up.method === 'aws') {
681
+ completeAwsUpload(up);
682
+ } else if (up.method === 'put') {
683
+ completePutUpload(up);
306
684
  }
307
685
  }
308
686
  }
309
-
310
- // take tasks from queue and run them if needed
311
- function fillqueue() {
312
- if (Object.keys(upload_running).length >= 3) return; // nothing yet
313
- // if (upload_failed.length > 0) return; // need to push "retry" to resume
314
-
315
- // max 3 uploading files
316
- while (Object.keys(upload_running).length < 3) {
317
- if (upload_queue.length == 0) return;
318
- var up = upload_queue.shift();
319
- upload_running[up.up_id] = up;
687
+
688
+ /**
689
+ * Complete AWS multipart upload
690
+ * @param {Object} up - Upload object
691
+ */
692
+ function completeAwsUpload(up) {
693
+ // Create completion XML
694
+ // See: https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
695
+ let xml = "<CompleteMultipartUpload>";
696
+
697
+ for (let i = 0; i < up.blocks; i++) {
698
+ // AWS part numbers are 1-based
699
+ xml += `<Part><PartNumber>${i + 1}</PartNumber><ETag>${up.b[i]}</ETag></Part>`;
320
700
  }
321
- sendprogress();
701
+
702
+ xml += "</CompleteMultipartUpload>";
703
+
704
+ // Send completion request
705
+ awsReq(up.info, "POST", `uploadId=${up.uploadId}`, xml, null, up.context)
706
+ .then(response => response.text())
707
+ .then(() => {
708
+ // Call server-side completion handler
709
+ return rest.rest(
710
+ `Cloud/Aws/Bucket/Upload/${up.info.Cloud_Aws_Bucket_Upload__}:handleComplete`,
711
+ "POST",
712
+ {},
713
+ up.context
714
+ );
715
+ })
716
+ .then(response => {
717
+ // Mark upload as complete
718
+ up.status = "complete";
719
+ up.final = response.data;
720
+
721
+ // Notify listeners
722
+ sendProgress();
723
+
724
+ // Remove from running uploads
725
+ delete state.running[up.up_id];
726
+
727
+ // Resolve the upload promise
728
+ up.resolve(up);
729
+
730
+ // Continue processing queue
731
+ upload.run();
732
+ })
733
+ .catch(error => handleFailure(up, error));
734
+ }
735
+
736
+ /**
737
+ * Complete direct PUT upload
738
+ * @param {Object} up - Upload object
739
+ */
740
+ function completePutUpload(up) {
741
+ // Call completion endpoint
742
+ rest.rest(up.info.Complete, "POST", {}, up.context)
743
+ .then(response => {
744
+ // Mark upload as complete
745
+ up.status = "complete";
746
+ up.final = response.data;
747
+
748
+ // Notify listeners
749
+ sendProgress();
750
+
751
+ // Remove from running uploads
752
+ delete state.running[up.up_id];
753
+
754
+ // Resolve the upload promise
755
+ up.resolve(up);
756
+
757
+ // Continue processing queue
758
+ upload.run();
759
+ })
760
+ .catch(error => handleFailure(up, error));
322
761
  }
323
762
 
324
-
325
- upload.getStatus = function () {
326
- var prog = {
327
- "queue": upload_queue,
328
- "running": Object.keys(upload_running).map(function (e) {
329
- return upload_running[e]
330
- }),
331
- "failed": upload_failed,
763
+ /**
764
+ * Fill the upload queue with new upload tasks
765
+ * Takes items from the queue and adds them to running uploads
766
+ */
767
+ function fillUploadQueue() {
768
+ // Skip if we're already running the maximum number of uploads
769
+ if (Object.keys(state.running).length >= 3) return;
770
+
771
+ // Maximum of 3 concurrent uploads
772
+ while (Object.keys(state.running).length < 3 && state.queue.length > 0) {
773
+ // Get next upload from queue
774
+ const upload = state.queue.shift();
775
+
776
+ // Add to running uploads
777
+ state.running[upload.up_id] = upload;
778
+ }
779
+
780
+ // Notify progress
781
+ sendProgress();
782
+ }
783
+
784
+ // No need for backward compatibility for private methods
785
+
786
+ /**
787
+ * Get current upload status
788
+ * @returns {Object} Status object with queued, running and failed uploads
789
+ */
790
+ upload.getStatus = function() {
791
+ return {
792
+ queue: state.queue,
793
+ running: Object.keys(state.running).map(id => state.running[id]),
794
+ failed: state.failed
332
795
  };
333
-
334
- return prog;
335
796
  };
336
-
337
- upload.resume = function () {
338
- // put failed stuff at end of queue, resume upload
339
- while (upload_failed.length > 0) {
340
- upload_queue.push(upload_failed.shift());
797
+
798
+ /**
799
+ * Resume all failed uploads
800
+ * Moves failed uploads back to the queue
801
+ */
802
+ upload.resume = function() {
803
+ // Move all failed uploads back to the queue
804
+ while (state.failed.length > 0) {
805
+ state.queue.push(state.failed.shift());
341
806
  }
342
-
807
+
808
+ // Restart upload process
343
809
  upload.run();
344
810
  };
345
811
 
346
- if (typeof document !== "undefined") {
347
- upload.init = function (path, params, notify) {
348
- // perform upload to a given API, for example Drive/Item/<id>:upload
349
- // will allow multiple files to be uploaded
350
- params = params || {};
351
-
812
+ // Environment-specific initialization
813
+ upload.init = function (path, params, notify) {
814
+ // perform upload to a given API, for example Drive/Item/<id>:upload
815
+ // will allow multiple files to be uploaded
816
+ params = params || {};
817
+
818
+ if (isBrowser) {
819
+ // Browser implementation
352
820
  if (last_input != null) {
353
821
  last_input.parentNode.removeChild(last_input);
354
822
  last_input = null;
@@ -386,179 +854,277 @@ module.exports.upload = (function () {
386
854
 
387
855
  input.click();
388
856
  return promise;
389
- };
390
- }
857
+ } else if (isNode) {
858
+ // Node.js implementation
859
+ return function(filePaths) {
860
+ // Convert string to array if single file path provided
861
+ if (typeof filePaths === 'string') {
862
+ filePaths = [filePaths];
863
+ }
864
+
865
+ if (!Array.isArray(filePaths)) {
866
+ throw new Error('filePaths must be a string or array of strings');
867
+ }
868
+
869
+ return new Promise(function(resolve, reject) {
870
+ const count = filePaths.length;
871
+ if (count === 0) {
872
+ return resolve();
873
+ }
874
+
875
+ if (notify !== undefined) notify({status: 'init', count: count});
876
+
877
+ let remainingCount = count;
878
+
879
+ filePaths.forEach(filePath => {
880
+ try {
881
+ // Get file info
882
+ const stats = nodeFs.statSync(filePath);
883
+ const fileName = nodePath.basename(filePath);
884
+
885
+ // Create a file-like object
886
+ const file = {
887
+ name: fileName,
888
+ size: stats.size,
889
+ lastModified: stats.mtimeMs,
890
+ type: 'application/octet-stream', // Default type
891
+ path: filePath, // For Node.js reading
892
+ // Mock methods needed by upload.js
893
+ slice: function(start, end) {
894
+ return {
895
+ path: filePath,
896
+ start: start,
897
+ end: end || stats.size
898
+ };
899
+ }
900
+ };
901
+
902
+ upload.append(path, file, params, fwWrapper.getContext())
903
+ .then(function(obj) {
904
+ remainingCount -= 1;
905
+ if (notify !== undefined) notify(obj);
906
+ if (remainingCount === 0) resolve();
907
+ })
908
+ .catch(function(err) {
909
+ remainingCount -= 1;
910
+ console.error('Error uploading file:', err);
911
+ if (remainingCount === 0) resolve();
912
+ });
913
+ } catch (err) {
914
+ remainingCount -= 1;
915
+ console.error('Error processing file:', err);
916
+ if (remainingCount === 0) resolve();
917
+ }
918
+ });
919
+
920
+ upload.run();
921
+ });
922
+ };
923
+ } else {
924
+ // Default implementation for other environments
925
+ return function() {
926
+ return Promise.reject(new Error('File upload not supported in this environment'));
927
+ };
928
+ }
929
+ };
391
930
 
392
931
 
393
- upload.append = function (path, file, params, context) {
394
- var promise = new Promise(function (resolve, reject) {
932
+ /**
933
+ * Add a file to the upload queue
934
+ * @param {string} path - API path to upload to
935
+ * @param {File|Object} file - File to upload
936
+ * @param {Object} params - Upload parameters
937
+ * @param {Object} context - Request context
938
+ * @returns {Promise} - Upload promise
939
+ */
940
+ upload.append = function(path, file, params, context) {
941
+ return new Promise((resolve, reject) => {
942
+ // Process parameters
395
943
  params = params || {};
396
- context = context || fwWrapper.getContext(); // refer to https://git.atonline.com/templates/atonline_drive_2018/issues/58
397
-
398
- var ctx = {...{}, ...context};
399
- upload_queue.push({
944
+ context = context || fwWrapper.getContext();
945
+
946
+ // Create an upload object
947
+ const uploadObject = {
400
948
  path: path,
401
949
  file: file,
402
950
  resolve: resolve,
403
951
  reject: reject,
404
- "status": "pending",
952
+ status: "pending",
405
953
  paused: false,
406
- up_id: up_id++,
954
+ up_id: state.nextId++,
407
955
  params: params,
408
- context: ctx
409
- });
956
+ context: { ...context } // Create a copy to avoid modification
957
+ };
958
+
959
+ // Add to queue
960
+ state.queue.push(uploadObject);
410
961
  });
411
-
412
- return promise;
413
962
  };
414
963
 
415
964
 
416
- upload.cancelItem = function (up_id) {
417
- var itemKey = -1;
418
- for (var i in upload_running) {
419
- if (upload_running[i].up_id == up_id) {
420
- itemKey = i;
421
- break;
422
- }
423
- }
424
- if (itemKey >= 0) {
425
- upload_running[itemKey].canceled = true;
426
- } else { // /!\ we should be able to cancel the upload of an item even if it's pending, so we're going to look at the queued items
427
- for (var i = 0; i < upload_queue.length; i++) {
428
- if (upload_queue[i].up_id == up_id) {
429
- upload_queue[i].canceled = true;
965
+ /**
966
+ * Cancel an upload in progress or in queue
967
+ * @param {number} uploadId - Upload ID to cancel
968
+ */
969
+ upload.cancelItem = function(uploadId) {
970
+ // Check running uploads
971
+ if (state.running[uploadId]) {
972
+ // Mark running upload as canceled
973
+ state.running[uploadId].canceled = true;
974
+ } else {
975
+ // Check queued uploads
976
+ for (let i = 0; i < state.queue.length; i++) {
977
+ if (state.queue[i].up_id === uploadId) {
978
+ state.queue[i].canceled = true;
430
979
  break;
431
980
  }
432
981
  }
433
982
  }
434
- sendprogress();
983
+
984
+ // Update progress
985
+ sendProgress();
435
986
  };
436
-
437
- // removes the canceled item of given ID from the queue or running list.
438
- upload.deleteItem = function (up_id) {
439
- var itemKey = -1;
440
- for (var i in upload_running) {
441
- if (upload_running[i].up_id == up_id) {
442
- itemKey = i;
443
- break;
987
+
988
+ /**
989
+ * Delete an upload from queue or failed list
990
+ * Only canceled uploads can be removed from running list
991
+ * @param {number} uploadId - Upload ID to delete
992
+ */
993
+ upload.deleteItem = function(uploadId) {
994
+ // Check running uploads
995
+ if (state.running[uploadId]) {
996
+ // Only delete if canceled
997
+ if (state.running[uploadId].canceled) {
998
+ delete state.running[uploadId];
444
999
  }
445
- }
446
- if (itemKey >= 0) {
447
- if (upload_running[itemKey].canceled)
448
- delete upload_running[itemKey];
449
- } else { // /!\ we should be able to cancel the upload of an item even if it's pending, so we're going to look at the queued items
450
- for (var i = 0; i < upload_queue.length; i++) {
451
- if (upload_queue[i].up_id == up_id) {
452
- upload_queue.splice(i, 1);
1000
+ } else {
1001
+ // Check queue
1002
+ for (let i = 0; i < state.queue.length; i++) {
1003
+ if (state.queue[i].up_id === uploadId) {
1004
+ state.queue.splice(i, 1);
453
1005
  break;
454
1006
  }
455
1007
  }
456
-
457
- for (var i = 0; i < upload_failed.length; i++) {
458
- if (upload_failed[i].up_id == up_id) {
459
- upload_failed.splice(i, 1);
1008
+
1009
+ // Check failed uploads
1010
+ for (let i = 0; i < state.failed.length; i++) {
1011
+ if (state.failed[i].up_id === uploadId) {
1012
+ state.failed.splice(i, 1);
460
1013
  break;
461
1014
  }
462
1015
  }
463
1016
  }
464
- sendprogress();
1017
+
1018
+ // Update progress
1019
+ sendProgress();
465
1020
  };
466
-
467
-
468
- // changes the status of the item of given ID to "pause" so it stops triggering "do_process_uploading"
469
- upload.pauseItem = function (up_id) {
470
- var itemKey = -1;
471
- for (var i in upload_running) {
472
- if (upload_running[i].up_id == up_id) {
473
- itemKey = i;
474
- break;
475
- }
1021
+
1022
+ /**
1023
+ * Pause an active upload
1024
+ * @param {number} uploadId - Upload ID to pause
1025
+ */
1026
+ upload.pauseItem = function(uploadId) {
1027
+ // Find upload in running list
1028
+ const upload = state.running[uploadId];
1029
+
1030
+ // Only pause if active
1031
+ if (upload && upload.status === "uploading") {
1032
+ upload.paused = true;
476
1033
  }
477
- if (itemKey >= 0 && upload_running[itemKey].status == "uploading") // if the item we're willing to pause exists in the running list and is currently uploading
478
- upload_running[itemKey].paused = true;
479
-
480
- sendprogress();
1034
+
1035
+ // Update progress
1036
+ sendProgress();
481
1037
  };
482
-
483
-
484
- // changes the status of the item of given ID to "uploading" and triggers "do_process_uploading" on it
485
- upload.resumeItem = function (up_id) {
486
- var itemKey = -1;
487
- for (var i in upload_running) {
488
- if (upload_running[i].up_id == up_id) {
489
- itemKey = i;
490
- break;
491
- }
492
- }
493
- if (itemKey >= 0 && upload_running[itemKey].paused) { // if the item we're willing to resume exists in the running list and is currently paused
494
- upload_running[itemKey].paused = false;
495
- do_process_uploading(upload_running[itemKey]);
1038
+
1039
+ /**
1040
+ * Resume a paused upload
1041
+ * @param {number} uploadId - Upload ID to resume
1042
+ */
1043
+ upload.resumeItem = function(uploadId) {
1044
+ // Find upload in running list
1045
+ const upload = state.running[uploadId];
1046
+
1047
+ // Only resume if paused
1048
+ if (upload && upload.paused) {
1049
+ upload.paused = false;
1050
+ processActiveUpload(upload);
496
1051
  }
497
- sendprogress();
1052
+
1053
+ // Update progress
1054
+ sendProgress();
498
1055
  };
499
-
500
-
501
- upload.retryItem = function (up_id) {
502
- var itemKey = -1;
503
- var up = undefined;
504
- for (var i in upload_failed) {
505
- if (upload_failed[i].up_id == up_id) {
506
- itemKey = i;
507
- up = upload_failed[i];
1056
+
1057
+ /**
1058
+ * Retry a failed upload
1059
+ * @param {number} uploadId - Upload ID to retry
1060
+ */
1061
+ upload.retryItem = function(uploadId) {
1062
+ // Find upload in failed list
1063
+ let failedUpload = null;
1064
+ let failedIndex = -1;
1065
+
1066
+ for (let i = 0; i < state.failed.length; i++) {
1067
+ if (state.failed[i].up_id === uploadId) {
1068
+ failedUpload = state.failed[i];
1069
+ failedIndex = i;
508
1070
  break;
509
1071
  }
510
1072
  }
511
- if (itemKey >= 0) {
512
- up.failure = {};
513
- for (var i = 0, len = upload_queue.length; i < len; i++) {
514
- if (upload_queue[i].up_id === up.up_id) {
515
- //already in queue what ?
516
- return;
517
- }
1073
+
1074
+ // Skip if not found
1075
+ if (!failedUpload) return;
1076
+
1077
+ // Check if already in queue
1078
+ for (let i = 0; i < state.queue.length; i++) {
1079
+ if (state.queue[i].up_id === uploadId) {
1080
+ return; // Already in queue
518
1081
  }
519
-
520
- //reset pending partNumbers
521
- for (var i = 0; i < up.blocks; i++) {
522
- if (up.b[i] == "pending") {
523
- up.b[i] = undefined
524
- }
525
- }
526
-
527
-
528
- upload_failed.splice(itemKey, 1);
529
- upload_queue.push(up);
530
-
531
- upload.run();
532
- if (typeof document !== "undefined") {
533
- setTimeout(function () {
534
- var evt = new CustomEvent("upload:retry", {
535
- detail: {
536
- item: up,
537
- }
538
- });
539
- document.dispatchEvent(evt);
540
- }, 10);
1082
+ }
1083
+
1084
+ // Reset failure data
1085
+ failedUpload.failure = {};
1086
+
1087
+ // Reset pending parts
1088
+ for (let i = 0; i < failedUpload.blocks; i++) {
1089
+ if (failedUpload.b[i] === "pending") {
1090
+ failedUpload.b[i] = undefined;
541
1091
  }
542
1092
  }
543
- sendprogress();
1093
+
1094
+ // Move from failed to queue
1095
+ state.failed.splice(failedIndex, 1);
1096
+ state.queue.push(failedUpload);
1097
+
1098
+ // Restart upload
1099
+ upload.run();
1100
+
1101
+ // Dispatch retry event
1102
+ utils.dispatchEvent("upload:retry", { item: failedUpload });
1103
+
1104
+ // Update progress
1105
+ sendProgress();
544
1106
  };
545
1107
 
546
1108
 
547
- // perform an upload following a response to upload a file from an API.
548
- //
549
- // TODO: if file is small enough, we can skip the multipart upload and just perform a straight PUT (will fail over 5GB, but we probably want a smaller cutoff, like 32MB or less)
550
- upload.run = function () {
551
- fillqueue();
552
-
553
- // check for elements in "q", start uploads we can start
554
- for (var up_id in upload_running) {
555
- var up = upload_running[up_id];
556
- switch (up['status']) {
1109
+ /**
1110
+ * Start or continue the upload process
1111
+ * Processes queued uploads and continues running uploads
1112
+ */
1113
+ upload.run = function() {
1114
+ // Fill queue with new uploads
1115
+ fillUploadQueue();
1116
+
1117
+ // Process running uploads
1118
+ for (const uploadId in state.running) {
1119
+ const upload = state.running[uploadId];
1120
+
1121
+ // Process based on status
1122
+ switch (upload.status) {
557
1123
  case "pending":
558
- do_process_pending(up);
1124
+ processUpload(upload);
559
1125
  break;
560
1126
  case "uploading":
561
- do_process_uploading(up);
1127
+ processActiveUpload(upload);
562
1128
  break;
563
1129
  }
564
1130
  }