@karpeleslab/klbfw 0.1.12 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CLAUDE.md +50 -0
  2. package/README.md +199 -35
  3. package/cookies.js +107 -41
  4. package/coverage/clover.xml +835 -0
  5. package/coverage/coverage-final.json +9 -0
  6. package/coverage/lcov-report/base.css +224 -0
  7. package/coverage/lcov-report/block-navigation.js +87 -0
  8. package/coverage/lcov-report/cookies.js.html +334 -0
  9. package/coverage/lcov-report/favicon.png +0 -0
  10. package/coverage/lcov-report/fw-wrapper.js.html +163 -0
  11. package/coverage/lcov-report/index.html +131 -0
  12. package/coverage/lcov-report/index.js.html +196 -0
  13. package/coverage/lcov-report/internal.js.html +604 -0
  14. package/coverage/lcov-report/klbfw/cookies.js.html +490 -0
  15. package/coverage/lcov-report/klbfw/fw-wrapper.js.html +745 -0
  16. package/coverage/lcov-report/klbfw/index.html +206 -0
  17. package/coverage/lcov-report/klbfw/index.js.html +235 -0
  18. package/coverage/lcov-report/klbfw/internal.js.html +811 -0
  19. package/coverage/lcov-report/klbfw/rest.js.html +565 -0
  20. package/coverage/lcov-report/klbfw/test/index.html +116 -0
  21. package/coverage/lcov-report/klbfw/test/setup.js.html +1105 -0
  22. package/coverage/lcov-report/klbfw/upload.js.html +3487 -0
  23. package/coverage/lcov-report/klbfw/util.js.html +388 -0
  24. package/coverage/lcov-report/prettify.css +1 -0
  25. package/coverage/lcov-report/prettify.js +2 -0
  26. package/coverage/lcov-report/rest.js.html +472 -0
  27. package/coverage/lcov-report/sort-arrow-sprite.png +0 -0
  28. package/coverage/lcov-report/sorter.js +196 -0
  29. package/coverage/lcov-report/upload.js.html +1789 -0
  30. package/coverage/lcov-report/util.js.html +313 -0
  31. package/coverage/lcov.info +1617 -0
  32. package/fw-wrapper.js +221 -26
  33. package/index.js +16 -2
  34. package/internal.js +186 -102
  35. package/package.json +21 -3
  36. package/rest.js +129 -81
  37. package/test/README.md +62 -0
  38. package/test/api.test.js +102 -0
  39. package/test/cookies.test.js +65 -0
  40. package/test/integration.test.js +481 -0
  41. package/test/rest.test.js +93 -0
  42. package/test/setup.js +341 -0
  43. package/test/upload.test.js +689 -0
  44. package/test/util.test.js +46 -0
  45. package/upload.js +1012 -442
  46. package/util.js +59 -21
package/upload.js CHANGED
@@ -1,560 +1,1130 @@
1
+ /**
2
+ * KLB Upload Module
3
+ *
4
+ * This module handles file uploads to KLB API endpoints.
5
+ * It supports both browser and Node.js environments with a unified API.
6
+ *
7
+ * The module handles:
8
+ * - File upload to KLB API endpoints
9
+ * - Multiple upload protocols (PUT and AWS multipart)
10
+ * - Progress tracking
11
+ * - Pause, resume, retry, and cancel operations
12
+ * - Browser and Node.js compatibility
13
+ *
14
+ * Browser usage:
15
+ * ```js
16
+ * // Open file picker and upload selected files
17
+ * upload.upload.init('Misc/Debug:testUpload')()
18
+ * .then(result => console.log('Upload complete', result));
19
+ *
20
+ * // Upload a specific File object
21
+ * upload.upload.append('Misc/Debug:testUpload', fileObject)
22
+ * .then(result => console.log('Upload complete', result));
23
+ *
24
+ * // Track progress
25
+ * upload.upload.onprogress = (status) => {
26
+ * console.log('Progress:', status.running.map(i => i.status));
27
+ * };
28
+ *
29
+ * // Cancel an upload
30
+ * upload.upload.cancelItem(uploadId);
31
+ * ```
32
+ *
33
+ * Node.js usage:
34
+ * ```js
35
+ * // For Node.js environments, first install dependencies:
36
+ * // npm install node-fetch xmldom
37
+ *
38
+ * // Initialize upload with specific file paths
39
+ * upload.upload.init('Misc/Debug:testUpload')(['./file1.txt', './file2.jpg'])
40
+ * .then(result => console.log('Upload complete', result));
41
+ *
42
+ * // Or create a custom file object with path
43
+ * const file = {
44
+ * name: 'test.txt',
45
+ * size: 1024,
46
+ * type: 'text/plain',
47
+ * path: '/path/to/file.txt'
48
+ * };
49
+ * upload.upload.append('Misc/Debug:testUpload', file)
50
+ * .then(result => console.log('Upload complete', result));
51
+ * ```
52
+ *
53
+ * @module upload
54
+ */
55
+
1
56
  const rest = require('./rest');
2
57
  const fwWrapper = require('./fw-wrapper');
3
- var sha256 = require('js-sha256').sha256;
4
-
5
- // retunr time in amz format, eg 20180930T132108Z
6
- function getAmzTime() {
7
- var t = new Date();
8
- return t.getUTCFullYear() +
9
- '' + pad(t.getUTCMonth() + 1) +
10
- pad(t.getUTCDate()) +
11
- 'T' + pad(t.getUTCHours()) +
12
- pad(t.getUTCMinutes()) +
13
- pad(t.getUTCSeconds()) +
14
- 'Z';
58
+ const sha256 = require('js-sha256').sha256;
59
+
60
+ /**
61
+ * Environment detection and cross-platform utilities
62
+ */
63
+ const env = {
64
+ /**
65
+ * Detect if running in a browser environment
66
+ */
67
+ isBrowser: typeof window !== 'undefined' && typeof document !== 'undefined',
68
+
69
+ /**
70
+ * Detect if running in a Node.js environment
71
+ */
72
+ isNode: typeof process !== 'undefined' && process.versions && process.versions.node,
73
+
74
+ /**
75
+ * Node.js specific modules (lazy-loaded)
76
+ */
77
+ node: {
78
+ fetch: null,
79
+ xmlParser: null,
80
+ fs: null,
81
+ path: null,
82
+ EventEmitter: null,
83
+ eventEmitter: null
84
+ }
85
+ };
86
+
87
+ /**
88
+ * Initialize Node.js dependencies when in Node environment
89
+ */
90
+ if (env.isNode && !env.isBrowser) {
91
+ try {
92
+ env.node.fetch = require('node-fetch');
93
+ env.node.xmlParser = require('xmldom');
94
+ env.node.fs = require('fs');
95
+ env.node.path = require('path');
96
+ env.node.EventEmitter = require('events');
97
+ env.node.eventEmitter = new (env.node.EventEmitter)();
98
+ } catch (e) {
99
+ console.warn('Node.js dependencies not available. Some functionality may be limited:', e.message);
100
+ console.warn('To use in Node.js, install: npm install node-fetch xmldom');
101
+ }
15
102
  }
16
103
 
17
- function pad(number) {
18
- if (number < 10) {
19
- return '0' + number;
104
+ /**
105
+ * Cross-platform utilities
106
+ */
107
+ const utils = {
108
+ /**
109
+ * Environment-agnostic fetch implementation
110
+ * @param {string} url - The URL to fetch
111
+ * @param {Object} options - Fetch options
112
+ * @returns {Promise} - Fetch promise
113
+ */
114
+ fetch(url, options) {
115
+ if (env.isBrowser && typeof window.fetch === 'function') {
116
+ return window.fetch(url, options);
117
+ } else if (env.isNode && env.node.fetch) {
118
+ return env.node.fetch(url, options);
119
+ } else if (typeof fetch === 'function') {
120
+ // For environments where fetch is globally available
121
+ return fetch(url, options);
20
122
  }
21
- return number;
22
- }
23
-
24
- // perform call against AWS S3 with the appropriate signature obtained from server
123
+ return Promise.reject(new Error('fetch not available in this environment'));
124
+ },
125
+
126
+ /**
127
+ * Environment-agnostic XML parser
128
+ * @param {string} xmlString - XML string to parse
129
+ * @returns {Document} - DOM-like document
130
+ */
131
+ parseXML(xmlString) {
132
+ if (env.isBrowser) {
133
+ return new DOMParser().parseFromString(xmlString, 'text/xml');
134
+ } else if (env.isNode && env.node.xmlParser) {
135
+ const DOMParserNode = env.node.xmlParser.DOMParser;
136
+ const dom = new DOMParserNode().parseFromString(xmlString, 'text/xml');
137
+
138
+ // Add querySelector interface for compatibility
139
+ dom.querySelector = function(selector) {
140
+ if (selector === 'UploadId') {
141
+ const elements = this.getElementsByTagName('UploadId');
142
+ return elements.length > 0 ? { innerHTML: elements[0].textContent } : null;
143
+ }
144
+ return null;
145
+ };
146
+
147
+ return dom;
148
+ }
149
+ throw new Error('XML parsing not available in this environment');
150
+ },
151
+
152
+ /**
153
+ * Read a file as ArrayBuffer in any environment
154
+ * @param {File|Object} file - File object or file-like object with path
155
+ * @param {Function} callback - Callback function(buffer, error)
156
+ */
157
+ readFileAsArrayBuffer(file, callback) {
158
+ if (env.isBrowser) {
159
+ const reader = new FileReader();
160
+ reader.addEventListener('loadend', () => callback(reader.result));
161
+ reader.addEventListener('error', (e) => callback(null, e));
162
+ reader.readAsArrayBuffer(file);
163
+ } else if (env.isNode && env.node.fs) {
164
+ if (file.path) {
165
+ // Read from filesystem
166
+ const readStream = env.node.fs.createReadStream(file.path, {
167
+ start: file.start || 0,
168
+ end: file.end || undefined
169
+ });
170
+
171
+ const chunks = [];
172
+ readStream.on('data', chunk => chunks.push(chunk));
173
+ readStream.on('end', () => {
174
+ const buffer = Buffer.concat(chunks);
175
+ callback(buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength));
176
+ });
177
+ readStream.on('error', err => callback(null, err));
178
+ } else if (file.content) {
179
+ // Memory buffer
180
+ const buffer = Buffer.from(file.content);
181
+ callback(buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength));
182
+ } else {
183
+ callback(null, new Error('No file path or content provided'));
184
+ }
185
+ } else {
186
+ callback(null, new Error('File reading not available in this environment'));
187
+ }
188
+ },
189
+
190
+ /**
191
+ * Dispatch a custom event in any environment
192
+ * @param {string} eventName - Event name
193
+ * @param {Object} detail - Event details
194
+ */
195
+ dispatchEvent(eventName, detail) {
196
+ if (env.isBrowser) {
197
+ const evt = new CustomEvent(eventName, { detail });
198
+ document.dispatchEvent(evt);
199
+ } else if (env.isNode && env.node.eventEmitter) {
200
+ env.node.eventEmitter.emit(eventName, detail);
201
+ }
202
+ // In other environments, events are silently ignored
203
+ },
204
+
205
+ /**
206
+ * Format a date for AWS (YYYYMMDDTHHMMSSZ)
207
+ * @returns {string} Formatted date
208
+ */
209
+ getAmzTime() {
210
+ const t = new Date();
211
+ return t.getUTCFullYear() +
212
+ this.pad(t.getUTCMonth() + 1) +
213
+ this.pad(t.getUTCDate()) +
214
+ 'T' + this.pad(t.getUTCHours()) +
215
+ this.pad(t.getUTCMinutes()) +
216
+ this.pad(t.getUTCSeconds()) +
217
+ 'Z';
218
+ },
219
+
220
+ /**
221
+ * Pad a number with leading zero if needed
222
+ * @param {number} number - Number to pad
223
+ * @returns {string} Padded number
224
+ */
225
+ pad(number) {
226
+ return number < 10 ? '0' + number : String(number);
227
+ }
228
+ };
229
+
230
+ /**
231
+ * AWS S3 request handler
232
+ * Performs a signed request to AWS S3 using a signature obtained from the server
233
+ *
234
+ * @param {Object} upInfo - Upload info including bucket endpoint and key
235
+ * @param {string} method - HTTP method (GET, POST, PUT)
236
+ * @param {string} query - Query parameters
237
+ * @param {*} body - Request body
238
+ * @param {Object} headers - Request headers
239
+ * @param {Object} context - Request context
240
+ * @returns {Promise} - Request promise
241
+ */
25
242
  function awsReq(upInfo, method, query, body, headers, context) {
26
243
  headers = headers || {};
27
244
  context = context || {};
28
245
 
29
- if (body == "") {
30
- var bodyHash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; // sha256('')
246
+ // Calculate body hash for AWS signature
247
+ let bodyHash;
248
+
249
+ if (!body || body === "") {
250
+ // Empty body hash
251
+ bodyHash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
31
252
  } else {
32
- var bodyHash = sha256(body);
253
+ try {
254
+ // Handle different body types
255
+ let bodyForHash = body;
256
+
257
+ if (body instanceof ArrayBuffer || (body.constructor && body.constructor.name === 'ArrayBuffer')) {
258
+ bodyForHash = new Uint8Array(body);
259
+ } else if (body.constructor && body.constructor.name === 'Buffer') {
260
+ bodyForHash = Buffer.from(body).toString();
261
+ }
262
+
263
+ bodyHash = sha256(bodyForHash);
264
+ } catch (e) {
265
+ console.error("Error calculating hash:", e.message);
266
+ bodyHash = "UNSIGNED-PAYLOAD";
267
+ }
33
268
  }
34
269
 
35
- var ts = getAmzTime(); // aws format, eg 20180930T132108Z
36
- var ts_d = ts.substring(0, 8);
270
+ // Create AWS timestamp
271
+ const timestamp = utils.getAmzTime();
272
+ const datestamp = timestamp.substring(0, 8);
37
273
 
274
+ // Set AWS headers
38
275
  headers["X-Amz-Content-Sha256"] = bodyHash;
39
- headers["X-Amz-Date"] = ts;
276
+ headers["X-Amz-Date"] = timestamp;
40
277
 
41
- // prepare auth string
42
- var aws_auth_str = [
278
+ // Prepare the string to sign
279
+ const authStringParts = [
43
280
  "AWS4-HMAC-SHA256",
44
- ts,
45
- ts_d + "/" + upInfo.Bucket_Endpoint.Region + "/s3/aws4_request",
281
+ timestamp,
282
+ `${datestamp}/${upInfo.Bucket_Endpoint.Region}/s3/aws4_request`,
46
283
  method,
47
- "/" + upInfo.Bucket_Endpoint.Name + "/" + upInfo.Key,
284
+ `/${upInfo.Bucket_Endpoint.Name}/${upInfo.Key}`,
48
285
  query,
49
- "host:" + upInfo.Bucket_Endpoint.Host,
286
+ `host:${upInfo.Bucket_Endpoint.Host}`
50
287
  ];
51
288
 
52
- // list headers to sign (host and anything starting with x-)
53
- var sign_head = ['host'];
54
- var k = Object.keys(headers).sort();
55
- for (var i = 0; i < k.length; i++) {
56
- var s = k[i].toLowerCase();
57
- if (s.substring(0, 2) != "x-") {
58
- continue;
289
+ // Add x-* headers to sign
290
+ const headersToSign = ['host'];
291
+ const sortedHeaderKeys = Object.keys(headers).sort();
292
+
293
+ for (const key of sortedHeaderKeys) {
294
+ const lowerKey = key.toLowerCase();
295
+ if (lowerKey.startsWith('x-')) {
296
+ headersToSign.push(lowerKey);
297
+ authStringParts.push(`${lowerKey}:${headers[key]}`);
59
298
  }
60
- sign_head.push(s);
61
- aws_auth_str.push(s + ":" + headers[k[i]]);
62
299
  }
63
- aws_auth_str.push("");
64
- aws_auth_str.push(sign_head.join(";"));
65
- aws_auth_str.push(bodyHash);
66
-
67
- var promise = new Promise(function (resolve, reject) {
68
-
69
- rest.rest("Cloud/Aws/Bucket/Upload/" + upInfo.Cloud_Aws_Bucket_Upload__ + ":signV4", "POST", {headers: aws_auth_str.join("\n")}, context)
70
- .then(function (ares) {
71
- var u = "https://" + upInfo.Bucket_Endpoint.Host + "/" + upInfo.Bucket_Endpoint.Name + "/" + upInfo.Key;
72
- if (query != "") u = u + "?" + query;
73
-
74
- headers["Authorization"] = ares.data.authorization;
75
-
76
- fetch(u, {
77
- method: method,
78
- body: body,
79
- headers: headers
80
- })
81
- .then(resolve, reject)
82
- .catch(reject);
83
-
84
-
85
- }, reject)
86
- .catch(reject);
87
-
300
+
301
+ // Complete the string to sign
302
+ authStringParts.push('');
303
+ authStringParts.push(headersToSign.join(';'));
304
+ authStringParts.push(bodyHash);
305
+
306
+ return new Promise((resolve, reject) => {
307
+ // Get signature from server
308
+ rest.rest(
309
+ `Cloud/Aws/Bucket/Upload/${upInfo.Cloud_Aws_Bucket_Upload__}:signV4`,
310
+ "POST",
311
+ { headers: authStringParts.join("\n") },
312
+ context
313
+ )
314
+ .then(response => {
315
+ // Construct the S3 URL
316
+ let url = `https://${upInfo.Bucket_Endpoint.Host}/${upInfo.Bucket_Endpoint.Name}/${upInfo.Key}`;
317
+ if (query) url += `?${query}`;
318
+
319
+ // Add the authorization header
320
+ headers["Authorization"] = response.data.authorization;
321
+
322
+ // Make the actual request to S3
323
+ return utils.fetch(url, {
324
+ method,
325
+ body,
326
+ headers
327
+ });
328
+ })
329
+ .then(resolve)
330
+ .catch(reject);
88
331
  });
89
-
90
- return promise;
91
332
  }
92
333
 
334
+ /**
335
+ * Upload module (IIFE pattern)
336
+ * @returns {Object} Upload interface
337
+ */
93
338
  module.exports.upload = (function () {
94
- var upload = {};
95
- var upload_queue = []; // queue of uploads to run
96
- var upload_failed = []; // failed upload(s)
97
- var upload_running = {}; // currently processing uploads
98
- var up_id = 0; // next upload id
99
- var last_input = null;
100
-
101
-
102
- function sendprogress() {
103
- if (typeof upload.onprogress === "undefined") return;
104
-
105
- upload.onprogress(upload.getStatus());
339
+ /**
340
+ * Upload state
341
+ */
342
+ const state = {
343
+ queue: [], // Queued uploads
344
+ failed: [], // Failed uploads
345
+ running: {}, // Currently processing uploads
346
+ nextId: 0, // Next upload ID
347
+ lastInput: null // Last created file input element (browser only)
348
+ };
349
+
350
+ // Public API object
351
+ const upload = {};
352
+
353
+ /**
354
+ * Helper Functions
355
+ */
356
+
357
+ /**
358
+ * Notify progress to listeners
359
+ * Calls onprogress callback and dispatches events
360
+ */
361
+ function sendProgress() {
362
+ const status = upload.getStatus();
363
+
364
+ // Call the onprogress callback if defined
365
+ if (typeof upload.onprogress === "function") {
366
+ upload.onprogress(status);
367
+ }
368
+
369
+ // Dispatch event for listeners
370
+ utils.dispatchEvent("upload:progress", status);
106
371
  }
107
-
108
- function do_process_pending(up) {
109
- up["status"] = "pending-wip";
110
- // up is an object with api path, file, dfd
111
- var params = up.params;
112
-
113
- // set params for upload
114
- params["filename"] = up.file.name;
115
- params["size"] = up.file.size;
116
- params["lastModified"] = up.file.lastModified / 1000;
117
- params["type"] = up.file.type;
118
-
119
- rest.rest(up.path, "POST", params, up.context).then(function (res) {
120
- // Method 1: aws signed multipart upload
121
- if (res["data"]["Cloud_Aws_Bucket_Upload__"]) {
122
- up.info = res["data"]; // contains stuff like Bucket_Endpoint, Key, etc
123
-
124
- // ok we are ready to upload - this will initiate an upload
125
- awsReq(up.info, "POST", "uploads=", "", {"Content-Type": up.file.type, "X-Amz-Acl": "private"}, up.context)
126
- .then(response => response.text())
127
- .then(str => (new DOMParser()).parseFromString(str, "text/xml"))
128
- .then(dom => dom.querySelector('UploadId').innerHTML)
129
- .then(function (uploadId) {
130
- up.uploadId = uploadId;
131
-
132
- // ok, let's compute block size so we know how many parts we need to send
133
- var fsize = up.file.size;
134
- var bsize = Math.ceil(fsize / 10000); // we want ~10k parts
135
- if (bsize < 5242880) bsize = 5242880; // minimum block size = 5MB
136
-
137
- up.method = 'aws';
138
- up.bsize = bsize;
139
- up.blocks = Math.ceil(fsize / bsize);
140
- up.b = {};
141
- up['status'] = 'uploading';
142
- upload.run();
143
- }).catch(res => failure(up, res))
144
- return;
372
+
373
+ /**
374
+ * Handle upload failure
375
+ * @param {Object} up - Upload object
376
+ * @param {*} error - Error data
377
+ */
378
+ function handleFailure(up, error) {
379
+ // Skip if upload is no longer running
380
+ if (!(up.up_id in state.running)) return;
381
+
382
+ // Check if already in failed list
383
+ for (const failedItem of state.failed) {
384
+ if (failedItem.up_id === up.up_id) {
385
+ return; // Already recorded as failed
145
386
  }
146
- // Method 2: PUT requests
147
- if (res["data"]["PUT"]) {
148
- var fsize = up.file.size;
149
- var bsize = fsize; // upload file in a single block
150
- if (res["data"]["Blocksize"]) {
151
- // this upload target supports multipart PUT upload
152
- bsize = res["data"]["Blocksize"]; // multipart upload
387
+ }
388
+
389
+ // Record failure
390
+ up.failure = error;
391
+ state.failed.push(up);
392
+ delete state.running[up.up_id];
393
+
394
+ // Continue processing queue
395
+ upload.run();
396
+
397
+ // Notify progress
398
+ sendProgress();
399
+
400
+ // Dispatch failure event
401
+ utils.dispatchEvent("upload:failed", {
402
+ item: up,
403
+ res: error
404
+ });
405
+ }
406
+
407
+ /**
408
+ * Process a pending upload
409
+ * Initiates the upload process with the server
410
+ * @param {Object} up - Upload object
411
+ */
412
+ function processUpload(up) {
413
+ // Mark as processing
414
+ up.status = "pending-wip";
415
+
416
+ // Prepare parameters
417
+ const params = up.params || {};
418
+
419
+ // Set file metadata
420
+ params.filename = up.file.name;
421
+ params.size = up.file.size;
422
+ params.lastModified = up.file.lastModified / 1000;
423
+ params.type = up.file.type;
424
+
425
+ // Initialize upload with the server
426
+ rest.rest(up.path, "POST", params, up.context)
427
+ .then(function(response) {
428
+ // Method 1: AWS signed multipart upload
429
+ if (response.data.Cloud_Aws_Bucket_Upload__) {
430
+ return handleAwsMultipartUpload(up, response.data);
153
431
  }
154
-
155
- up.info = res["data"];
156
- up.method = 'put';
157
- up.bsize = bsize;
158
- up.blocks = Math.ceil(fsize / bsize);
159
- up.b = {};
160
- up['status'] = 'uploading';
161
- upload.run();
162
- return;
163
- }
164
- // invalid data
165
- delete upload_running[up.up_id];
166
- upload_failed.push(up);
167
- up.reject();
168
- return;
432
+
433
+ // Method 2: Direct PUT upload
434
+ if (response.data.PUT) {
435
+ return handlePutUpload(up, response.data);
436
+ }
437
+
438
+ // Invalid response format
439
+ delete state.running[up.up_id];
440
+ state.failed.push(up);
441
+ up.reject(new Error('Invalid upload response format'));
442
+ })
443
+ .catch(error => handleFailure(up, error));
444
+ }
445
+
446
+ /**
447
+ * Set up AWS multipart upload
448
+ * @param {Object} up - Upload object
449
+ * @param {Object} data - Server response data
450
+ */
451
+ function handleAwsMultipartUpload(up, data) {
452
+ // Store upload info
453
+ up.info = data;
454
+
455
+ // Initialize multipart upload
456
+ return awsReq(
457
+ up.info,
458
+ "POST",
459
+ "uploads=",
460
+ "",
461
+ {"Content-Type": up.file.type, "X-Amz-Acl": "private"},
462
+ up.context
463
+ )
464
+ .then(response => response.text())
465
+ .then(str => utils.parseXML(str))
466
+ .then(dom => dom.querySelector('UploadId').innerHTML)
467
+ .then(uploadId => {
468
+ up.uploadId = uploadId;
469
+
470
+ // Calculate optimal block size
471
+ const fileSize = up.file.size;
472
+
473
+ // Target ~10k parts, but minimum 5MB per AWS requirements
474
+ let blockSize = Math.ceil(fileSize / 10000);
475
+ if (blockSize < 5242880) blockSize = 5242880;
476
+
477
+ // Set up upload parameters
478
+ up.method = 'aws';
479
+ up.bsize = blockSize;
480
+ up.blocks = Math.ceil(fileSize / blockSize);
481
+ up.b = {};
482
+ up.status = 'uploading';
483
+
484
+ // Continue upload process
485
+ upload.run();
169
486
  })
170
- .catch(res => failure(up, res));
487
+ .catch(error => handleFailure(up, error));
171
488
  }
172
-
173
-
174
- function failure(up, data) {
175
- if (!(up.up_id in upload_running)) return;
176
-
177
- for (var i = 0, len = upload_failed.length; i < len; i++) {
178
- if (upload_failed[i].up_id === up.up_id) {
179
- //already in
180
- return;
181
- }
489
+
490
+ /**
491
+ * Set up direct PUT upload
492
+ * @param {Object} up - Upload object
493
+ * @param {Object} data - Server response data
494
+ */
495
+ function handlePutUpload(up, data) {
496
+ // Store upload info
497
+ up.info = data;
498
+
499
+ // Calculate block size (if multipart PUT is supported)
500
+ const fileSize = up.file.size;
501
+ let blockSize = fileSize; // Default: single block
502
+
503
+ if (data.Blocksize) {
504
+ // Server supports multipart upload
505
+ blockSize = data.Blocksize;
182
506
  }
183
-
184
- up.failure = data;
185
- upload_failed.push(up);
186
- delete upload_running[up.up_id];
507
+
508
+ // Set up upload parameters
509
+ up.method = 'put';
510
+ up.bsize = blockSize;
511
+ up.blocks = Math.ceil(fileSize / blockSize);
512
+ up.b = {};
513
+ up.status = 'uploading';
514
+
515
+ // Continue upload process
187
516
  upload.run();
188
- sendprogress();
189
- setTimeout(function () {
190
- var evt = new CustomEvent("upload:failed", {
191
- detail: {
192
- item: up,
193
- res: data
194
- }
195
- });
196
- document.dispatchEvent(evt);
197
- }, 10);
198
517
  }
199
518
 
200
- function do_upload_part(up, partno) {
201
- // ok, need to start this!
202
- up.b[partno] = "pending";
203
- var start = partno * up.bsize;
204
- var part = up.file.slice(start, start + up.bsize);
205
-
206
- var reader = new FileReader();
207
- reader.addEventListener("loadend", function () {
208
- switch(up.method) {
209
- case 'aws':
210
- awsReq(up.info, "PUT", "partNumber=" + (partno + 1) + "&uploadId=" + up.uploadId, reader.result, null, up.context)
211
- .then(function (response) {
212
- up.b[partno] = response.headers.get("ETag");
213
- sendprogress();
214
- upload.run();
215
- }).catch(res => failure(up, res));
216
- break;
217
- case 'put':
218
- let headers = {};
219
- headers["Content-Type"] = up.file.type;
220
- if (up.blocks > 1) {
221
- // add Content-Range header
222
- // Content-Range: bytes start-end/*
223
- const end = start + reader.result.byteLength - 1; // inclusive
224
- headers["Content-Range"] = "bytes "+start+"-"+end+"/*";
225
- }
519
+ /**
520
+ * Upload a single part of a file
521
+ * Handles both AWS multipart and direct PUT methods
522
+ * @param {Object} up - Upload object
523
+ * @param {number} partNumber - Part number (0-based)
524
+ */
525
+ function uploadPart(up, partNumber) {
526
+ // Mark part as pending
527
+ up.b[partNumber] = "pending";
528
+
529
+ // Calculate byte range for this part
530
+ const startByte = partNumber * up.bsize;
531
+ const endByte = Math.min(startByte + up.bsize, up.file.size);
532
+
533
+ // Get file slice based on environment
534
+ let filePart;
535
+
536
+ if (env.isBrowser) {
537
+ // Browser: use native File.slice
538
+ filePart = up.file.slice(startByte, endByte);
539
+ } else if (env.isNode) {
540
+ // Node.js: create a reference with start/end positions
541
+ filePart = {
542
+ path: up.file.path,
543
+ start: startByte,
544
+ end: endByte,
545
+ type: up.file.type,
546
+ content: up.file.content // For memory buffer based files
547
+ };
548
+ } else {
549
+ handleFailure(up, new Error('Environment not supported'));
550
+ return;
551
+ }
226
552
 
227
- fetch(up.info["PUT"], {
228
- method: "PUT",
229
- body: reader.result,
230
- headers: headers,
231
- }).then(function (response) {
232
- up.b[partno] = "done";
233
- sendprogress();
234
- upload.run();
235
- }).catch(res => failure(up, res));
236
- break;
553
+ // Read the file part as ArrayBuffer
554
+ utils.readFileAsArrayBuffer(filePart, (arrayBuffer, error) => {
555
+ if (error) {
556
+ handleFailure(up, error);
557
+ return;
558
+ }
559
+
560
+ // Choose upload method based on protocol
561
+ if (up.method === 'aws') {
562
+ uploadAwsPart(up, partNumber, arrayBuffer);
563
+ } else if (up.method === 'put') {
564
+ uploadPutPart(up, partNumber, startByte, arrayBuffer);
565
+ } else {
566
+ handleFailure(up, new Error(`Unknown upload method: ${up.method}`));
237
567
  }
238
568
  });
569
+ }
570
+
571
+ /**
572
+ * Upload a part using AWS multipart upload
573
+ * @param {Object} up - Upload object
574
+ * @param {number} partNumber - Part number (0-based)
575
+ * @param {ArrayBuffer} data - Part data
576
+ */
577
+ function uploadAwsPart(up, partNumber, data) {
578
+ // AWS part numbers are 1-based
579
+ const awsPartNumber = partNumber + 1;
580
+
581
+ awsReq(
582
+ up.info,
583
+ "PUT",
584
+ `partNumber=${awsPartNumber}&uploadId=${up.uploadId}`,
585
+ data,
586
+ null,
587
+ up.context
588
+ )
589
+ .then(response => {
590
+ // Store ETag for this part (needed for completion)
591
+ up.b[partNumber] = response.headers.get("ETag");
592
+
593
+ // Update progress and continue processing
594
+ sendProgress();
595
+ upload.run();
596
+ })
597
+ .catch(error => handleFailure(up, error));
598
+ }
599
+
600
+ /**
601
+ * Upload a part using direct PUT
602
+ * @param {Object} up - Upload object
603
+ * @param {number} partNumber - Part number (0-based)
604
+ * @param {number} startByte - Starting byte position
605
+ * @param {ArrayBuffer} data - Part data
606
+ */
607
+ function uploadPutPart(up, partNumber, startByte, data) {
608
+ // Set up headers
609
+ const headers = {
610
+ "Content-Type": up.file.type
611
+ };
612
+
613
+ // Add Content-Range header for multipart PUT
614
+ if (up.blocks > 1) {
615
+ const endByte = startByte + data.byteLength - 1; // inclusive
616
+ headers["Content-Range"] = `bytes ${startByte}-${endByte}/*`;
617
+ }
239
618
 
240
- reader.addEventListener("error", function (e) {
241
- failure(up, e);
242
- });
243
-
244
- reader.readAsArrayBuffer(part);
619
+ // Perform the PUT request
620
+ utils.fetch(up.info.PUT, {
621
+ method: "PUT",
622
+ body: data,
623
+ headers: headers,
624
+ })
625
+ .then(response => {
626
+ // Mark part as done
627
+ up.b[partNumber] = "done";
628
+
629
+ // Update progress and continue processing
630
+ sendProgress();
631
+ upload.run();
632
+ })
633
+ .catch(error => handleFailure(up, error));
245
634
  }
246
635
 
247
636
 
248
- function do_process_uploading(up) {
637
+ /**
638
+ * Process an upload in progress
639
+ * Manages uploading parts and completing the upload
640
+ * @param {Object} up - Upload object
641
+ */
642
+ function processActiveUpload(up) {
643
+ // Skip if paused or canceled
249
644
  if (up.paused || up.canceled) return;
250
645
 
251
- var p = 0; // pending
252
- var d = 0; // done
253
- for (var i = 0; i < up.blocks; i++) {
254
- if (up.b[i] == undefined) {
255
- if (up.paused) break; // do not start new parts if paused
256
- do_upload_part(up, i);
257
- } else if (up.b[i] != "pending") {
258
- d += 1;
646
+ // Track upload progress
647
+ let pendingParts = 0;
648
+ let completedParts = 0;
649
+
650
+ // Process each part
651
+ for (let i = 0; i < up.blocks; i++) {
652
+ if (up.b[i] === undefined) {
653
+ // Part not started yet
654
+ if (up.paused) break; // Don't start new parts when paused
655
+
656
+ // Start uploading this part
657
+ uploadPart(up, i);
658
+ pendingParts++;
659
+ } else if (up.b[i] !== "pending") {
660
+ // Part completed
661
+ completedParts++;
259
662
  continue;
663
+ } else {
664
+ // Part in progress
665
+ pendingParts++;
260
666
  }
261
- p += 1;
262
- if (p >= 3) break;
667
+
668
+ // Limit concurrent uploads
669
+ if (pendingParts >= 3) break;
263
670
  }
264
671
 
265
- up["done"] = d;
266
-
267
- if (p == 0) {
268
- up["status"] = "validating";
269
- switch(up.method) {
270
- case 'aws':
271
- // complete, see https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
272
- var xml = "<CompleteMultipartUpload>";
273
- for (var i = 0; i < up.blocks; i++) {
274
- xml += "<Part><PartNumber>" + (i + 1) + "</PartNumber><ETag>" + up.b[i] + "</ETag></Part>";
275
- }
276
- xml += "</CompleteMultipartUpload>";
277
- awsReq(up.info, "POST", "uploadId=" + up.uploadId, xml, null, up.context)
278
- .then(response => response.text())
279
- .then(function (r) {
280
- // if success, need to call finalize
281
- rest.rest("Cloud/Aws/Bucket/Upload/" + up.info.Cloud_Aws_Bucket_Upload__ + ":handleComplete", "POST", {}, up.context).then(function (ares) {
282
- // SUCCESS!
283
- up["status"] = "complete";
284
- up["final"] = ares["data"];
285
- sendprogress();
286
- up.resolve(up);
287
- delete upload_running[up.up_id];
288
- upload.run();
289
- }).catch(res => failure(up, res));
290
- }).catch(res => failure(up, res));
291
- break;
292
- case 'put':
293
- // complete, directly call handleComplete
294
- rest.rest(up.info.Complete, "POST", {}, up.context).then(function (ares) {
295
- // success!
296
- up["status"] = "complete";
297
- up["final"] = ares["data"];
298
- sendprogress();
299
- delete upload_running[up.up_id];
300
- up.resolve(up);
301
- upload.run();
302
- }).catch(res => failure(up, res));
303
- break;
672
+ // Update upload progress
673
+ up.done = completedParts;
674
+
675
+ // Check if all parts are complete
676
+ if (pendingParts === 0) {
677
+ // All parts complete, finalize the upload
678
+ up.status = "validating";
679
+
680
+ if (up.method === 'aws') {
681
+ completeAwsUpload(up);
682
+ } else if (up.method === 'put') {
683
+ completePutUpload(up);
304
684
  }
305
685
  }
306
686
  }
307
-
308
- // take tasks from queue and run them if needed
309
- function fillqueue() {
310
- if (Object.keys(upload_running).length >= 3) return; // nothing yet
311
- // if (upload_failed.length > 0) return; // need to push "retry" to resume
312
-
313
- // max 3 uploading files
314
- while (Object.keys(upload_running).length < 3) {
315
- if (upload_queue.length == 0) return;
316
- var up = upload_queue.shift();
317
- upload_running[up.up_id] = up;
687
+
688
+ /**
689
+ * Complete AWS multipart upload
690
+ * @param {Object} up - Upload object
691
+ */
692
+ function completeAwsUpload(up) {
693
+ // Create completion XML
694
+ // See: https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
695
+ let xml = "<CompleteMultipartUpload>";
696
+
697
+ for (let i = 0; i < up.blocks; i++) {
698
+ // AWS part numbers are 1-based
699
+ xml += `<Part><PartNumber>${i + 1}</PartNumber><ETag>${up.b[i]}</ETag></Part>`;
318
700
  }
319
- sendprogress();
701
+
702
+ xml += "</CompleteMultipartUpload>";
703
+
704
+ // Send completion request
705
+ awsReq(up.info, "POST", `uploadId=${up.uploadId}`, xml, null, up.context)
706
+ .then(response => response.text())
707
+ .then(() => {
708
+ // Call server-side completion handler
709
+ return rest.rest(
710
+ `Cloud/Aws/Bucket/Upload/${up.info.Cloud_Aws_Bucket_Upload__}:handleComplete`,
711
+ "POST",
712
+ {},
713
+ up.context
714
+ );
715
+ })
716
+ .then(response => {
717
+ // Mark upload as complete
718
+ up.status = "complete";
719
+ up.final = response.data;
720
+
721
+ // Notify listeners
722
+ sendProgress();
723
+
724
+ // Remove from running uploads
725
+ delete state.running[up.up_id];
726
+
727
+ // Resolve the upload promise
728
+ up.resolve(up);
729
+
730
+ // Continue processing queue
731
+ upload.run();
732
+ })
733
+ .catch(error => handleFailure(up, error));
734
+ }
735
+
736
+ /**
737
+ * Complete direct PUT upload
738
+ * @param {Object} up - Upload object
739
+ */
740
+ function completePutUpload(up) {
741
+ // Call completion endpoint
742
+ rest.rest(up.info.Complete, "POST", {}, up.context)
743
+ .then(response => {
744
+ // Mark upload as complete
745
+ up.status = "complete";
746
+ up.final = response.data;
747
+
748
+ // Notify listeners
749
+ sendProgress();
750
+
751
+ // Remove from running uploads
752
+ delete state.running[up.up_id];
753
+
754
+ // Resolve the upload promise
755
+ up.resolve(up);
756
+
757
+ // Continue processing queue
758
+ upload.run();
759
+ })
760
+ .catch(error => handleFailure(up, error));
320
761
  }
321
762
 
322
-
323
- upload.getStatus = function () {
324
- var prog = {
325
- "queue": upload_queue,
326
- "running": Object.keys(upload_running).map(function (e) {
327
- return upload_running[e]
328
- }),
329
- "failed": upload_failed,
763
+ /**
764
+ * Fill the upload queue with new upload tasks
765
+ * Takes items from the queue and adds them to running uploads
766
+ */
767
+ function fillUploadQueue() {
768
+ // Skip if we're already running the maximum number of uploads
769
+ if (Object.keys(state.running).length >= 3) return;
770
+
771
+ // Maximum of 3 concurrent uploads
772
+ while (Object.keys(state.running).length < 3 && state.queue.length > 0) {
773
+ // Get next upload from queue
774
+ const upload = state.queue.shift();
775
+
776
+ // Add to running uploads
777
+ state.running[upload.up_id] = upload;
778
+ }
779
+
780
+ // Notify progress
781
+ sendProgress();
782
+ }
783
+
784
+ // No need for backward compatibility for private methods
785
+
786
+ /**
787
+ * Get current upload status
788
+ * @returns {Object} Status object with queued, running and failed uploads
789
+ */
790
+ upload.getStatus = function() {
791
+ return {
792
+ queue: state.queue,
793
+ running: Object.keys(state.running).map(id => state.running[id]),
794
+ failed: state.failed
330
795
  };
331
-
332
- return prog;
333
796
  };
334
-
335
- upload.resume = function () {
336
- // put failed stuff at end of queue, resume upload
337
- while (upload_failed.length > 0) {
338
- upload_queue.push(upload_failed.shift());
797
+
798
+ /**
799
+ * Resume all failed uploads
800
+ * Moves failed uploads back to the queue
801
+ */
802
+ upload.resume = function() {
803
+ // Move all failed uploads back to the queue
804
+ while (state.failed.length > 0) {
805
+ state.queue.push(state.failed.shift());
339
806
  }
340
-
807
+
808
+ // Restart upload process
341
809
  upload.run();
342
810
  };
343
811
 
812
+ // Environment-specific initialization
344
813
  upload.init = function (path, params, notify) {
345
814
  // perform upload to a given API, for example Drive/Item/<id>:upload
346
815
  // will allow multiple files to be uploaded
347
816
  params = params || {};
817
+
818
+ if (isBrowser) {
819
+ // Browser implementation
820
+ if (last_input != null) {
821
+ last_input.parentNode.removeChild(last_input);
822
+ last_input = null;
823
+ }
348
824
 
349
- if (last_input != null) {
350
- last_input.parentNode.removeChild(last_input);
351
- last_input = null;
352
- }
825
+ var input = document.createElement("input");
826
+ input.type = "file";
827
+ input.style.display = "none";
828
+ if (!params["single"]) {
829
+ input.multiple = "multiple";
830
+ }
353
831
 
354
- var input = document.createElement("input");
355
- input.type = "file";
356
- input.style.display = "none";
357
- if (!params["single"]) {
358
- input.multiple = "multiple";
359
- }
832
+ document.getElementsByTagName('body')[0].appendChild(input);
833
+ last_input = input;
360
834
 
361
- document.getElementsByTagName('body')[0].appendChild(input);
362
- last_input = input;
835
+ var promise = new Promise(function (resolve, reject) {
836
+ input.onchange = function () {
837
+ if (this.files.length == 0) {
838
+ resolve();
839
+ }
363
840
 
364
- var promise = new Promise(function (resolve, reject) {
365
- input.onchange = function () {
366
- if (this.files.length == 0) {
367
- resolve();
368
- }
841
+ var count = this.files.length;
842
+ if (notify !== undefined) notify({status: 'init', count: count});
843
+ for (var i = 0; i < this.files.length; i++) {
844
+ upload.append(path, this.files[i], params, fwWrapper.getContext()).then(function (obj) {
845
+ count -= 1;
846
+ // Todo notify process
847
+ if (notify !== undefined) notify(obj);
848
+ if (count == 0) resolve();
849
+ });
850
+ }
851
+ upload.run();
852
+ };
853
+ });
369
854
 
370
- var count = this.files.length;
371
- if (notify !== undefined) notify({status: 'init', count: count});
372
- for (var i = 0; i < this.files.length; i++) {
373
- upload.append(path, this.files[i], params, fwWrapper.getContext()).then(function (obj) {
374
- count -= 1;
375
- // Todo notify process
376
- if (notify !== undefined) notify(obj);
377
- if (count == 0) resolve();
378
- });
855
+ input.click();
856
+ return promise;
857
+ } else if (isNode) {
858
+ // Node.js implementation
859
+ return function(filePaths) {
860
+ // Convert string to array if single file path provided
861
+ if (typeof filePaths === 'string') {
862
+ filePaths = [filePaths];
379
863
  }
380
- upload.run();
864
+
865
+ if (!Array.isArray(filePaths)) {
866
+ throw new Error('filePaths must be a string or array of strings');
867
+ }
868
+
869
+ return new Promise(function(resolve, reject) {
870
+ const count = filePaths.length;
871
+ if (count === 0) {
872
+ return resolve();
873
+ }
874
+
875
+ if (notify !== undefined) notify({status: 'init', count: count});
876
+
877
+ let remainingCount = count;
878
+
879
+ filePaths.forEach(filePath => {
880
+ try {
881
+ // Get file info
882
+ const stats = nodeFs.statSync(filePath);
883
+ const fileName = nodePath.basename(filePath);
884
+
885
+ // Create a file-like object
886
+ const file = {
887
+ name: fileName,
888
+ size: stats.size,
889
+ lastModified: stats.mtimeMs,
890
+ type: 'application/octet-stream', // Default type
891
+ path: filePath, // For Node.js reading
892
+ // Mock methods needed by upload.js
893
+ slice: function(start, end) {
894
+ return {
895
+ path: filePath,
896
+ start: start,
897
+ end: end || stats.size
898
+ };
899
+ }
900
+ };
901
+
902
+ upload.append(path, file, params, fwWrapper.getContext())
903
+ .then(function(obj) {
904
+ remainingCount -= 1;
905
+ if (notify !== undefined) notify(obj);
906
+ if (remainingCount === 0) resolve();
907
+ })
908
+ .catch(function(err) {
909
+ remainingCount -= 1;
910
+ console.error('Error uploading file:', err);
911
+ if (remainingCount === 0) resolve();
912
+ });
913
+ } catch (err) {
914
+ remainingCount -= 1;
915
+ console.error('Error processing file:', err);
916
+ if (remainingCount === 0) resolve();
917
+ }
918
+ });
919
+
920
+ upload.run();
921
+ });
381
922
  };
382
- });
383
-
384
- input.click();
385
- return promise;
923
+ } else {
924
+ // Default implementation for other environments
925
+ return function() {
926
+ return Promise.reject(new Error('File upload not supported in this environment'));
927
+ };
928
+ }
386
929
  };
387
930
 
388
931
 
389
- upload.append = function (path, file, params, context) {
390
- var promise = new Promise(function (resolve, reject) {
932
+ /**
933
+ * Add a file to the upload queue
934
+ * @param {string} path - API path to upload to
935
+ * @param {File|Object} file - File to upload
936
+ * @param {Object} params - Upload parameters
937
+ * @param {Object} context - Request context
938
+ * @returns {Promise} - Upload promise
939
+ */
940
+ upload.append = function(path, file, params, context) {
941
+ return new Promise((resolve, reject) => {
942
+ // Process parameters
391
943
  params = params || {};
392
- context = context || fwWrapper.getContext(); // refer to https://git.atonline.com/templates/atonline_drive_2018/issues/58
393
-
394
- var ctx = {...{}, ...context};
395
- upload_queue.push({
944
+ context = context || fwWrapper.getContext();
945
+
946
+ // Create an upload object
947
+ const uploadObject = {
396
948
  path: path,
397
949
  file: file,
398
950
  resolve: resolve,
399
951
  reject: reject,
400
- "status": "pending",
952
+ status: "pending",
401
953
  paused: false,
402
- up_id: up_id++,
954
+ up_id: state.nextId++,
403
955
  params: params,
404
- context: ctx
405
- });
956
+ context: { ...context } // Create a copy to avoid modification
957
+ };
958
+
959
+ // Add to queue
960
+ state.queue.push(uploadObject);
406
961
  });
407
-
408
- return promise;
409
962
  };
410
963
 
411
964
 
412
- upload.cancelItem = function (up_id) {
413
- var itemKey = -1;
414
- for (var i in upload_running) {
415
- if (upload_running[i].up_id == up_id) {
416
- itemKey = i;
417
- break;
418
- }
419
- }
420
- if (itemKey >= 0) {
421
- upload_running[itemKey].canceled = true;
422
- } else { // /!\ we should be able to cancel the upload of an item even if it's pending, so we're going to look at the queued items
423
- for (var i = 0; i < upload_queue.length; i++) {
424
- if (upload_queue[i].up_id == up_id) {
425
- upload_queue[i].canceled = true;
965
+ /**
966
+ * Cancel an upload in progress or in queue
967
+ * @param {number} uploadId - Upload ID to cancel
968
+ */
969
+ upload.cancelItem = function(uploadId) {
970
+ // Check running uploads
971
+ if (state.running[uploadId]) {
972
+ // Mark running upload as canceled
973
+ state.running[uploadId].canceled = true;
974
+ } else {
975
+ // Check queued uploads
976
+ for (let i = 0; i < state.queue.length; i++) {
977
+ if (state.queue[i].up_id === uploadId) {
978
+ state.queue[i].canceled = true;
426
979
  break;
427
980
  }
428
981
  }
429
982
  }
430
- sendprogress();
983
+
984
+ // Update progress
985
+ sendProgress();
431
986
  };
432
-
433
- // removes the canceled item of given ID from the queue or running list.
434
- upload.deleteItem = function (up_id) {
435
- var itemKey = -1;
436
- for (var i in upload_running) {
437
- if (upload_running[i].up_id == up_id) {
438
- itemKey = i;
439
- break;
987
+
988
+ /**
989
+ * Delete an upload from queue or failed list
990
+ * Only canceled uploads can be removed from running list
991
+ * @param {number} uploadId - Upload ID to delete
992
+ */
993
+ upload.deleteItem = function(uploadId) {
994
+ // Check running uploads
995
+ if (state.running[uploadId]) {
996
+ // Only delete if canceled
997
+ if (state.running[uploadId].canceled) {
998
+ delete state.running[uploadId];
440
999
  }
441
- }
442
- if (itemKey >= 0) {
443
- if (upload_running[itemKey].canceled)
444
- delete upload_running[itemKey];
445
- } else { // /!\ we should be able to cancel the upload of an item even if it's pending, so we're going to look at the queued items
446
- for (var i = 0; i < upload_queue.length; i++) {
447
- if (upload_queue[i].up_id == up_id) {
448
- upload_queue.splice(i, 1);
1000
+ } else {
1001
+ // Check queue
1002
+ for (let i = 0; i < state.queue.length; i++) {
1003
+ if (state.queue[i].up_id === uploadId) {
1004
+ state.queue.splice(i, 1);
449
1005
  break;
450
1006
  }
451
1007
  }
452
-
453
- for (var i = 0; i < upload_failed.length; i++) {
454
- if (upload_failed[i].up_id == up_id) {
455
- upload_failed.splice(i, 1);
1008
+
1009
+ // Check failed uploads
1010
+ for (let i = 0; i < state.failed.length; i++) {
1011
+ if (state.failed[i].up_id === uploadId) {
1012
+ state.failed.splice(i, 1);
456
1013
  break;
457
1014
  }
458
1015
  }
459
1016
  }
460
- sendprogress();
1017
+
1018
+ // Update progress
1019
+ sendProgress();
461
1020
  };
462
-
463
-
464
- // changes the status of the item of given ID to "pause" so it stops triggering "do_process_uploading"
465
- upload.pauseItem = function (up_id) {
466
- var itemKey = -1;
467
- for (var i in upload_running) {
468
- if (upload_running[i].up_id == up_id) {
469
- itemKey = i;
470
- break;
471
- }
1021
+
1022
+ /**
1023
+ * Pause an active upload
1024
+ * @param {number} uploadId - Upload ID to pause
1025
+ */
1026
+ upload.pauseItem = function(uploadId) {
1027
+ // Find upload in running list
1028
+ const upload = state.running[uploadId];
1029
+
1030
+ // Only pause if active
1031
+ if (upload && upload.status === "uploading") {
1032
+ upload.paused = true;
472
1033
  }
473
- if (itemKey >= 0 && upload_running[itemKey].status == "uploading") // if the item we're willing to pause exists in the running list and is currently uploading
474
- upload_running[itemKey].paused = true;
475
-
476
- sendprogress();
1034
+
1035
+ // Update progress
1036
+ sendProgress();
477
1037
  };
478
-
479
-
480
- // changes the status of the item of given ID to "uploading" and triggers "do_process_uploading" on it
481
- upload.resumeItem = function (up_id) {
482
- var itemKey = -1;
483
- for (var i in upload_running) {
484
- if (upload_running[i].up_id == up_id) {
485
- itemKey = i;
486
- break;
487
- }
488
- }
489
- if (itemKey >= 0 && upload_running[itemKey].paused) { // if the item we're willing to resume exists in the running list and is currently paused
490
- upload_running[itemKey].paused = false;
491
- do_process_uploading(upload_running[itemKey]);
1038
+
1039
+ /**
1040
+ * Resume a paused upload
1041
+ * @param {number} uploadId - Upload ID to resume
1042
+ */
1043
+ upload.resumeItem = function(uploadId) {
1044
+ // Find upload in running list
1045
+ const upload = state.running[uploadId];
1046
+
1047
+ // Only resume if paused
1048
+ if (upload && upload.paused) {
1049
+ upload.paused = false;
1050
+ processActiveUpload(upload);
492
1051
  }
493
- sendprogress();
1052
+
1053
+ // Update progress
1054
+ sendProgress();
494
1055
  };
495
-
496
-
497
- upload.retryItem = function (up_id) {
498
- var itemKey = -1;
499
- var up = undefined;
500
- for (var i in upload_failed) {
501
- if (upload_failed[i].up_id == up_id) {
502
- itemKey = i;
503
- up = upload_failed[i];
1056
+
1057
+ /**
1058
+ * Retry a failed upload
1059
+ * @param {number} uploadId - Upload ID to retry
1060
+ */
1061
+ upload.retryItem = function(uploadId) {
1062
+ // Find upload in failed list
1063
+ let failedUpload = null;
1064
+ let failedIndex = -1;
1065
+
1066
+ for (let i = 0; i < state.failed.length; i++) {
1067
+ if (state.failed[i].up_id === uploadId) {
1068
+ failedUpload = state.failed[i];
1069
+ failedIndex = i;
504
1070
  break;
505
1071
  }
506
1072
  }
507
- if (itemKey >= 0) {
508
- up.failure = {};
509
- for (var i = 0, len = upload_queue.length; i < len; i++) {
510
- if (upload_queue[i].up_id === up.up_id) {
511
- //already in queue what ?
512
- return;
513
- }
1073
+
1074
+ // Skip if not found
1075
+ if (!failedUpload) return;
1076
+
1077
+ // Check if already in queue
1078
+ for (let i = 0; i < state.queue.length; i++) {
1079
+ if (state.queue[i].up_id === uploadId) {
1080
+ return; // Already in queue
514
1081
  }
515
-
516
- //reset pending partNumbers
517
- for (var i = 0; i < up.blocks; i++) {
518
- if (up.b[i] == "pending") {
519
- up.b[i] = undefined
520
- }
1082
+ }
1083
+
1084
+ // Reset failure data
1085
+ failedUpload.failure = {};
1086
+
1087
+ // Reset pending parts
1088
+ for (let i = 0; i < failedUpload.blocks; i++) {
1089
+ if (failedUpload.b[i] === "pending") {
1090
+ failedUpload.b[i] = undefined;
521
1091
  }
522
-
523
-
524
- upload_failed.splice(itemKey, 1);
525
- upload_queue.push(up);
526
-
527
- upload.run();
528
- setTimeout(function () {
529
- var evt = new CustomEvent("upload:retry", {
530
- detail: {
531
- item: up,
532
- }
533
- });
534
- document.dispatchEvent(evt);
535
- }, 10);
536
-
537
-
538
1092
  }
539
- sendprogress();
1093
+
1094
+ // Move from failed to queue
1095
+ state.failed.splice(failedIndex, 1);
1096
+ state.queue.push(failedUpload);
1097
+
1098
+ // Restart upload
1099
+ upload.run();
1100
+
1101
+ // Dispatch retry event
1102
+ utils.dispatchEvent("upload:retry", { item: failedUpload });
1103
+
1104
+ // Update progress
1105
+ sendProgress();
540
1106
  };
541
1107
 
542
1108
 
543
- // perform an upload following a response to upload a file from an API.
544
- //
545
- // TODO: if file is small enough, we can skip the multipart upload and just perform a straight PUT (will fail over 5GB, but we probably want a smaller cutoff, like 32MB or less)
546
- upload.run = function () {
547
- fillqueue();
548
-
549
- // check for elements in "q", start uploads we can start
550
- for (var up_id in upload_running) {
551
- var up = upload_running[up_id];
552
- switch (up['status']) {
1109
+ /**
1110
+ * Start or continue the upload process
1111
+ * Processes queued uploads and continues running uploads
1112
+ */
1113
+ upload.run = function() {
1114
+ // Fill queue with new uploads
1115
+ fillUploadQueue();
1116
+
1117
+ // Process running uploads
1118
+ for (const uploadId in state.running) {
1119
+ const upload = state.running[uploadId];
1120
+
1121
+ // Process based on status
1122
+ switch (upload.status) {
553
1123
  case "pending":
554
- do_process_pending(up);
1124
+ processUpload(upload);
555
1125
  break;
556
1126
  case "uploading":
557
- do_process_uploading(up);
1127
+ processActiveUpload(upload);
558
1128
  break;
559
1129
  }
560
1130
  }