s3mini 0.4.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -4
- package/dist/s3mini.d.ts +179 -51
- package/dist/s3mini.js +439 -229
- package/dist/s3mini.js.map +1 -1
- package/dist/s3mini.min.js +1 -1
- package/dist/s3mini.min.js.map +1 -1
- package/package.json +22 -19
- package/src/S3.ts +491 -256
- package/src/consts.ts +1 -1
- package/src/index.ts +2 -2
- package/src/types.ts +62 -13
- package/src/utils.ts +67 -22
package/dist/s3mini.js
CHANGED
|
@@ -11,6 +11,7 @@ const SENSITIVE_KEYS_REDACTED = ['accessKeyId', 'secretAccessKey', 'sessionToken
|
|
|
11
11
|
const DEFAULT_REQUEST_SIZE_IN_BYTES = 8 * 1024 * 1024;
|
|
12
12
|
// Headers
|
|
13
13
|
const HEADER_AMZ_CONTENT_SHA256 = 'x-amz-content-sha256';
|
|
14
|
+
const HEADER_AMZ_CHECKSUM_SHA256 = 'x-amz-checksum-sha256';
|
|
14
15
|
const HEADER_AMZ_DATE = 'x-amz-date';
|
|
15
16
|
const HEADER_HOST = 'host';
|
|
16
17
|
const HEADER_AUTHORIZATION = 'authorization';
|
|
@@ -26,35 +27,70 @@ const ERROR_ENDPOINT_FORMAT = `${ERROR_PREFIX}endpoint must be a valid URL. Expe
|
|
|
26
27
|
const ERROR_KEY_REQUIRED = `${ERROR_PREFIX}key must be a non-empty string`;
|
|
27
28
|
const ERROR_UPLOAD_ID_REQUIRED = `${ERROR_PREFIX}uploadId must be a non-empty string`;
|
|
28
29
|
const ERROR_DATA_BUFFER_REQUIRED = `${ERROR_PREFIX}data must be a Buffer or string`;
|
|
29
|
-
// const ERROR_PATH_REQUIRED = `${ERROR_PREFIX}path must be a string`;
|
|
30
30
|
const ERROR_PREFIX_TYPE = `${ERROR_PREFIX}prefix must be a string`;
|
|
31
31
|
const ERROR_DELIMITER_REQUIRED = `${ERROR_PREFIX}delimiter must be a string`;
|
|
32
32
|
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
const
|
|
36
|
-
const
|
|
33
|
+
const ENCODR = new TextEncoder();
|
|
34
|
+
const chunkSize = 0x8000; // 32KB chunks
|
|
35
|
+
const HEXS = '0123456789abcdef';
|
|
36
|
+
const getByteSize = (data) => {
|
|
37
|
+
if (typeof data === 'string') {
|
|
38
|
+
return ENCODR.encode(data).byteLength;
|
|
39
|
+
}
|
|
40
|
+
if (data instanceof ArrayBuffer || data instanceof Uint8Array) {
|
|
41
|
+
return data.byteLength;
|
|
42
|
+
}
|
|
43
|
+
if (data instanceof Blob) {
|
|
44
|
+
return data.size;
|
|
45
|
+
}
|
|
46
|
+
throw new Error('Unsupported data type');
|
|
47
|
+
};
|
|
37
48
|
/**
|
|
38
|
-
*
|
|
39
|
-
* @param {
|
|
40
|
-
* @returns {string}
|
|
49
|
+
* Turn a raw ArrayBuffer into its hexadecimal representation.
|
|
50
|
+
* @param {ArrayBuffer} buffer The raw bytes.
|
|
51
|
+
* @returns {string} Hexadecimal string
|
|
41
52
|
*/
|
|
42
|
-
const
|
|
43
|
-
|
|
53
|
+
const hexFromBuffer = (buffer) => {
|
|
54
|
+
const bytes = new Uint8Array(buffer);
|
|
55
|
+
let hex = '';
|
|
56
|
+
for (const byte of bytes) {
|
|
57
|
+
hex += HEXS[byte >> 4] + HEXS[byte & 0x0f];
|
|
58
|
+
}
|
|
59
|
+
return hex;
|
|
60
|
+
};
|
|
61
|
+
/**
|
|
62
|
+
* Turn a raw ArrayBuffer into its base64 representation.
|
|
63
|
+
* @param {ArrayBuffer} buffer The raw bytes.
|
|
64
|
+
* @returns {string} Base64 string
|
|
65
|
+
*/
|
|
66
|
+
const base64FromBuffer = (buffer) => {
|
|
67
|
+
const bytes = new Uint8Array(buffer);
|
|
68
|
+
let result = '';
|
|
69
|
+
for (let i = 0; i < bytes.length; i += chunkSize) {
|
|
70
|
+
const chunk = bytes.subarray(i, i + chunkSize);
|
|
71
|
+
result += btoa(String.fromCharCode.apply(null, chunk));
|
|
72
|
+
}
|
|
73
|
+
return result;
|
|
44
74
|
};
|
|
45
|
-
|
|
46
|
-
|
|
75
|
+
/**
|
|
76
|
+
* Compute SHA-256 hash of arbitrary string data.
|
|
77
|
+
* @param {string} content The content to be hashed.
|
|
78
|
+
* @returns {ArrayBuffer} The raw hash
|
|
79
|
+
*/
|
|
80
|
+
const sha256 = async (content) => {
|
|
81
|
+
const data = ENCODR.encode(content);
|
|
82
|
+
return await globalThis.crypto.subtle.digest('SHA-256', data);
|
|
47
83
|
};
|
|
48
84
|
/**
|
|
49
|
-
* Compute HMAC-SHA-256 of arbitrary data
|
|
50
|
-
* @param {string|
|
|
51
|
-
* @param {string
|
|
52
|
-
* @
|
|
53
|
-
* @returns {string | Buffer} hex encoded HMAC
|
|
85
|
+
* Compute HMAC-SHA-256 of arbitrary data.
|
|
86
|
+
* @param {string|ArrayBuffer} key The key used to sign the content.
|
|
87
|
+
* @param {string} content The content to be signed.
|
|
88
|
+
* @returns {ArrayBuffer} The raw signature
|
|
54
89
|
*/
|
|
55
|
-
const hmac = (key, content
|
|
56
|
-
const
|
|
57
|
-
|
|
90
|
+
const hmac = async (key, content) => {
|
|
91
|
+
const secret = await globalThis.crypto.subtle.importKey('raw', typeof key === 'string' ? ENCODR.encode(key) : key, { name: 'HMAC', hash: 'SHA-256' }, false, ['sign']);
|
|
92
|
+
const data = ENCODR.encode(content);
|
|
93
|
+
return await globalThis.crypto.subtle.sign('HMAC', secret, data);
|
|
58
94
|
};
|
|
59
95
|
/**
|
|
60
96
|
* Sanitize ETag value by removing quotes and XML entities
|
|
@@ -69,7 +105,7 @@ const sanitizeETag = (etag) => {
|
|
|
69
105
|
'"': '',
|
|
70
106
|
'"': '',
|
|
71
107
|
};
|
|
72
|
-
return etag.replace(
|
|
108
|
+
return etag.replace(/(^("|"|"))|(("|"|")$)/g, m => replaceChars[m]);
|
|
73
109
|
};
|
|
74
110
|
const entityMap = {
|
|
75
111
|
'"': '"',
|
|
@@ -228,8 +264,8 @@ const runInBatches = async (tasks, batchSize = 30, minIntervalMs = 0) => {
|
|
|
228
264
|
* const s3 = new CoreS3({
|
|
229
265
|
* accessKeyId: 'your-access-key',
|
|
230
266
|
* secretAccessKey: 'your-secret-key',
|
|
231
|
-
* endpoint: 'https://your-s3-endpoint.com',
|
|
232
|
-
* region: '
|
|
267
|
+
* endpoint: 'https://your-s3-endpoint.com/bucket-name',
|
|
268
|
+
* region: 'auto' // by default is auto
|
|
233
269
|
* });
|
|
234
270
|
*
|
|
235
271
|
* // Upload a file
|
|
@@ -260,6 +296,7 @@ class S3mini {
|
|
|
260
296
|
secretAccessKey;
|
|
261
297
|
endpoint;
|
|
262
298
|
region;
|
|
299
|
+
bucketName;
|
|
263
300
|
requestSizeInBytes;
|
|
264
301
|
requestAbortTimeout;
|
|
265
302
|
logger;
|
|
@@ -269,8 +306,9 @@ class S3mini {
|
|
|
269
306
|
this._validateConstructorParams(accessKeyId, secretAccessKey, endpoint);
|
|
270
307
|
this.accessKeyId = accessKeyId;
|
|
271
308
|
this.secretAccessKey = secretAccessKey;
|
|
272
|
-
this.endpoint = this._ensureValidUrl(endpoint);
|
|
309
|
+
this.endpoint = new URL(this._ensureValidUrl(endpoint));
|
|
273
310
|
this.region = region;
|
|
311
|
+
this.bucketName = this._extractBucketName();
|
|
274
312
|
this.requestSizeInBytes = requestSizeInBytes;
|
|
275
313
|
this.requestAbortTimeout = requestAbortTimeout;
|
|
276
314
|
this.logger = logger;
|
|
@@ -307,7 +345,7 @@ class S3mini {
|
|
|
307
345
|
// Include some general context, but sanitize sensitive parts
|
|
308
346
|
context: this._sanitize({
|
|
309
347
|
region: this.region,
|
|
310
|
-
endpoint: this.endpoint,
|
|
348
|
+
endpoint: this.endpoint.toString(),
|
|
311
349
|
// Only include the first few characters of the access key, if it exists
|
|
312
350
|
accessKeyId: this.accessKeyId ? `${this.accessKeyId.substring(0, 4)}...` : undefined,
|
|
313
351
|
}),
|
|
@@ -395,12 +433,15 @@ class S3mini {
|
|
|
395
433
|
}
|
|
396
434
|
return { filteredOpts, conditionalHeaders };
|
|
397
435
|
}
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
if (!(data instanceof Buffer || typeof data === 'string')) {
|
|
436
|
+
_validateData(data) {
|
|
437
|
+
if (!((globalThis.Buffer && data instanceof globalThis.Buffer) || typeof data === 'string')) {
|
|
401
438
|
this._log('error', ERROR_DATA_BUFFER_REQUIRED);
|
|
402
439
|
throw new TypeError(ERROR_DATA_BUFFER_REQUIRED);
|
|
403
440
|
}
|
|
441
|
+
return data;
|
|
442
|
+
}
|
|
443
|
+
_validateUploadPartParams(key, uploadId, data, partNumber, opts) {
|
|
444
|
+
this._checkKey(key);
|
|
404
445
|
if (typeof uploadId !== 'string' || uploadId.trim().length === 0) {
|
|
405
446
|
this._log('error', ERROR_UPLOAD_ID_REQUIRED);
|
|
406
447
|
throw new TypeError(ERROR_UPLOAD_ID_REQUIRED);
|
|
@@ -410,8 +451,9 @@ class S3mini {
|
|
|
410
451
|
throw new TypeError(`${ERROR_PREFIX}partNumber must be a positive integer`);
|
|
411
452
|
}
|
|
412
453
|
this._checkOpts(opts);
|
|
454
|
+
return this._validateData(data);
|
|
413
455
|
}
|
|
414
|
-
_sign(method, keyPath, query = {}, headers = {}) {
|
|
456
|
+
async _sign(method, keyPath, query = {}, headers = {}) {
|
|
415
457
|
// Create URL without appending keyPath first
|
|
416
458
|
const url = new URL(this.endpoint);
|
|
417
459
|
// Properly format the pathname to avoid double slashes
|
|
@@ -419,76 +461,53 @@ class S3mini {
|
|
|
419
461
|
url.pathname =
|
|
420
462
|
url.pathname === '/' ? `/${keyPath.replace(/^\/+/, '')}` : `${url.pathname}/${keyPath.replace(/^\/+/, '')}`;
|
|
421
463
|
}
|
|
422
|
-
const
|
|
423
|
-
const
|
|
424
|
-
const
|
|
425
|
-
|
|
464
|
+
const d = new Date();
|
|
465
|
+
const year = d.getUTCFullYear();
|
|
466
|
+
const month = String(d.getUTCMonth() + 1).padStart(2, '0');
|
|
467
|
+
const day = String(d.getUTCDate()).padStart(2, '0');
|
|
468
|
+
const shortDatetime = `${year}${month}${day}`;
|
|
469
|
+
const fullDatetime = `${shortDatetime}T${String(d.getUTCHours()).padStart(2, '0')}${String(d.getUTCMinutes()).padStart(2, '0')}${String(d.getUTCSeconds()).padStart(2, '0')}Z`;
|
|
470
|
+
const credentialScope = `${shortDatetime}/${this.region}/${S3_SERVICE}/${AWS_REQUEST_TYPE}`;
|
|
471
|
+
headers[HEADER_AMZ_CONTENT_SHA256] = UNSIGNED_PAYLOAD;
|
|
426
472
|
headers[HEADER_AMZ_DATE] = fullDatetime;
|
|
427
473
|
headers[HEADER_HOST] = url.host;
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
let
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
return Object.entries(headers)
|
|
446
|
-
.map(([key, value]) => `${key.toLowerCase()}:${String(value).trim()}`)
|
|
447
|
-
.join('\n');
|
|
448
|
-
}
|
|
449
|
-
_buildCanonicalRequest(method, url, query, canonicalHeaders, signedHeaders) {
|
|
450
|
-
const parts = [
|
|
451
|
-
method,
|
|
452
|
-
url.pathname,
|
|
453
|
-
this._buildCanonicalQueryString(query),
|
|
454
|
-
canonicalHeaders + '\n', // Canonical headers end with extra newline
|
|
455
|
-
signedHeaders,
|
|
456
|
-
UNSIGNED_PAYLOAD,
|
|
457
|
-
];
|
|
458
|
-
return parts.join('\n');
|
|
459
|
-
}
|
|
460
|
-
_buildCredentialScope(shortDatetime) {
|
|
461
|
-
return [shortDatetime, this.region, S3_SERVICE, AWS_REQUEST_TYPE].join('/');
|
|
462
|
-
}
|
|
463
|
-
_buildStringToSign(fullDatetime, credentialScope, canonicalRequest) {
|
|
464
|
-
return [AWS_ALGORITHM, fullDatetime, credentialScope, hash(canonicalRequest)].join('\n');
|
|
465
|
-
}
|
|
466
|
-
_calculateSignature(shortDatetime, stringToSign) {
|
|
467
|
-
if (shortDatetime !== this.signingKeyDate) {
|
|
474
|
+
const ignoredHeaders = new Set(['authorization', 'content-length', 'content-type', 'user-agent']);
|
|
475
|
+
let canonicalHeaders = '';
|
|
476
|
+
let signedHeaders = '';
|
|
477
|
+
for (const [key, value] of Object.entries(headers).sort(([a], [b]) => a.localeCompare(b))) {
|
|
478
|
+
const lowerKey = key.toLowerCase();
|
|
479
|
+
if (!ignoredHeaders.has(lowerKey)) {
|
|
480
|
+
if (canonicalHeaders) {
|
|
481
|
+
canonicalHeaders += '\n';
|
|
482
|
+
signedHeaders += ';';
|
|
483
|
+
}
|
|
484
|
+
canonicalHeaders += `${lowerKey}:${String(value).trim()}`;
|
|
485
|
+
signedHeaders += lowerKey;
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
const canonicalRequest = `${method}\n${url.pathname}\n${this._buildCanonicalQueryString(query)}\n${canonicalHeaders}\n\n${signedHeaders}\n${UNSIGNED_PAYLOAD}`;
|
|
489
|
+
const stringToSign = `${AWS_ALGORITHM}\n${fullDatetime}\n${credentialScope}\n${hexFromBuffer(await sha256(canonicalRequest))}`;
|
|
490
|
+
if (shortDatetime !== this.signingKeyDate || !this.signingKey) {
|
|
468
491
|
this.signingKeyDate = shortDatetime;
|
|
469
|
-
this.signingKey = this._getSignatureKey(shortDatetime);
|
|
492
|
+
this.signingKey = await this._getSignatureKey(shortDatetime);
|
|
470
493
|
}
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
return
|
|
475
|
-
`${AWS_ALGORITHM} Credential=${this.accessKeyId}/${credentialScope}`,
|
|
476
|
-
`SignedHeaders=${signedHeaders}`,
|
|
477
|
-
`Signature=${signature}`,
|
|
478
|
-
].join(', ');
|
|
494
|
+
const signature = hexFromBuffer(await hmac(this.signingKey, stringToSign));
|
|
495
|
+
headers[HEADER_AUTHORIZATION] =
|
|
496
|
+
`${AWS_ALGORITHM} Credential=${this.accessKeyId}/${credentialScope}, SignedHeaders=${signedHeaders}, Signature=${signature}`;
|
|
497
|
+
return { url: url.toString(), headers };
|
|
479
498
|
}
|
|
480
499
|
async _signedRequest(method, // 'GET' | 'HEAD' | 'PUT' | 'POST' | 'DELETE'
|
|
481
500
|
key, // ‘’ allowed for bucket‑level ops
|
|
482
501
|
{ query = {}, // ?query=string
|
|
483
|
-
body = '', //
|
|
502
|
+
body = '', // BodyInit | undefined
|
|
484
503
|
headers = {}, // extra/override headers
|
|
485
504
|
tolerated = [], // [200, 404] etc.
|
|
486
505
|
withQuery = false, // append query string to signed URL
|
|
487
506
|
} = {}) {
|
|
488
507
|
// Basic validation
|
|
489
|
-
if (!['GET', 'HEAD', 'PUT', 'POST', 'DELETE'].includes(method)) {
|
|
490
|
-
|
|
491
|
-
}
|
|
508
|
+
// if (!['GET', 'HEAD', 'PUT', 'POST', 'DELETE'].includes(method)) {
|
|
509
|
+
// throw new Error(`${C.ERROR_PREFIX}Unsupported HTTP method ${method as string}`);
|
|
510
|
+
// }
|
|
492
511
|
const { filteredOpts, conditionalHeaders } = ['GET', 'HEAD'].includes(method)
|
|
493
512
|
? this._filterIfHeaders(query)
|
|
494
513
|
: { filteredOpts: query, conditionalHeaders: {} };
|
|
@@ -499,7 +518,7 @@ class S3mini {
|
|
|
499
518
|
...conditionalHeaders,
|
|
500
519
|
};
|
|
501
520
|
const encodedKey = key ? uriResourceEscape(key) : '';
|
|
502
|
-
const { url, headers: signedHeaders } = this._sign(method, encodedKey, filteredOpts, baseHeaders);
|
|
521
|
+
const { url, headers: signedHeaders } = await this._sign(method, encodedKey, filteredOpts, baseHeaders);
|
|
503
522
|
if (Object.keys(query).length > 0) {
|
|
504
523
|
withQuery = true; // append query string to signed URL
|
|
505
524
|
}
|
|
@@ -508,53 +527,6 @@ class S3mini {
|
|
|
508
527
|
const signedHeadersString = Object.fromEntries(Object.entries(signedHeaders).map(([k, v]) => [k, String(v)]));
|
|
509
528
|
return this._sendRequest(finalUrl, method, signedHeadersString, body, tolerated);
|
|
510
529
|
}
|
|
511
|
-
/**
|
|
512
|
-
* Gets the current configuration properties of the S3 instance.
|
|
513
|
-
* @returns {IT.S3Config} The current S3 configuration object containing all settings.
|
|
514
|
-
* @example
|
|
515
|
-
* const config = s3.getProps();
|
|
516
|
-
* console.log(config.endpoint); // 'https://s3.amazonaws.com/my-bucket'
|
|
517
|
-
*/
|
|
518
|
-
getProps() {
|
|
519
|
-
return {
|
|
520
|
-
accessKeyId: this.accessKeyId,
|
|
521
|
-
secretAccessKey: this.secretAccessKey,
|
|
522
|
-
endpoint: this.endpoint,
|
|
523
|
-
region: this.region,
|
|
524
|
-
requestSizeInBytes: this.requestSizeInBytes,
|
|
525
|
-
requestAbortTimeout: this.requestAbortTimeout,
|
|
526
|
-
logger: this.logger,
|
|
527
|
-
};
|
|
528
|
-
}
|
|
529
|
-
/**
|
|
530
|
-
* Updates the configuration properties of the S3 instance.
|
|
531
|
-
* @param {IT.S3Config} props - The new configuration object.
|
|
532
|
-
* @param {string} props.accessKeyId - The access key ID for authentication.
|
|
533
|
-
* @param {string} props.secretAccessKey - The secret access key for authentication.
|
|
534
|
-
* @param {string} props.endpoint - The endpoint URL of the S3-compatible service.
|
|
535
|
-
* @param {string} [props.region='auto'] - The region of the S3 service.
|
|
536
|
-
* @param {number} [props.requestSizeInBytes=8388608] - The request size of a single request in bytes.
|
|
537
|
-
* @param {number} [props.requestAbortTimeout] - The timeout in milliseconds after which a request should be aborted.
|
|
538
|
-
* @param {IT.Logger} [props.logger] - A logger object with methods like info, warn, error.
|
|
539
|
-
* @throws {TypeError} Will throw an error if required parameters are missing or of incorrect type.
|
|
540
|
-
* @example
|
|
541
|
-
* s3.setProps({
|
|
542
|
-
* accessKeyId: 'new-access-key',
|
|
543
|
-
* secretAccessKey: 'new-secret-key',
|
|
544
|
-
* endpoint: 'https://new-endpoint.com/my-bucket',
|
|
545
|
-
* region: 'us-west-2' // by default is auto
|
|
546
|
-
* });
|
|
547
|
-
*/
|
|
548
|
-
setProps(props) {
|
|
549
|
-
this._validateConstructorParams(props.accessKeyId, props.secretAccessKey, props.endpoint);
|
|
550
|
-
this.accessKeyId = props.accessKeyId;
|
|
551
|
-
this.secretAccessKey = props.secretAccessKey;
|
|
552
|
-
this.region = props.region || 'auto';
|
|
553
|
-
this.endpoint = props.endpoint;
|
|
554
|
-
this.requestSizeInBytes = props.requestSizeInBytes || DEFAULT_REQUEST_SIZE_IN_BYTES;
|
|
555
|
-
this.requestAbortTimeout = props.requestAbortTimeout;
|
|
556
|
-
this.logger = props.logger;
|
|
557
|
-
}
|
|
558
530
|
/**
|
|
559
531
|
* Sanitizes an ETag value by removing surrounding quotes and whitespace.
|
|
560
532
|
* Still returns RFC compliant ETag. https://www.rfc-editor.org/rfc/rfc9110#section-8.8.3
|
|
@@ -579,7 +551,7 @@ class S3mini {
|
|
|
579
551
|
`;
|
|
580
552
|
const headers = {
|
|
581
553
|
[HEADER_CONTENT_TYPE]: XML_CONTENT_TYPE,
|
|
582
|
-
[HEADER_CONTENT_LENGTH]:
|
|
554
|
+
[HEADER_CONTENT_LENGTH]: getByteSize(xmlBody),
|
|
583
555
|
};
|
|
584
556
|
const res = await this._signedRequest('PUT', '', {
|
|
585
557
|
body: xmlBody,
|
|
@@ -588,6 +560,35 @@ class S3mini {
|
|
|
588
560
|
});
|
|
589
561
|
return res.status === 200;
|
|
590
562
|
}
|
|
563
|
+
_extractBucketName() {
|
|
564
|
+
const url = this.endpoint;
|
|
565
|
+
// First check if bucket is in the pathname (path-style URLs)
|
|
566
|
+
const pathSegments = url.pathname.split('/').filter(p => p);
|
|
567
|
+
if (pathSegments.length > 0) {
|
|
568
|
+
if (typeof pathSegments[0] === 'string') {
|
|
569
|
+
return pathSegments[0];
|
|
570
|
+
}
|
|
571
|
+
}
|
|
572
|
+
// Otherwise extract from subdomain (virtual-hosted-style URLs)
|
|
573
|
+
const hostParts = url.hostname.split('.');
|
|
574
|
+
// Common patterns:
|
|
575
|
+
// bucket-name.s3.amazonaws.com
|
|
576
|
+
// bucket-name.s3.region.amazonaws.com
|
|
577
|
+
// bucket-name.region.digitaloceanspaces.com
|
|
578
|
+
// bucket-name.region.cdn.digitaloceanspaces.com
|
|
579
|
+
if (hostParts.length >= 3) {
|
|
580
|
+
// Check if it's a known S3-compatible service
|
|
581
|
+
const domain = hostParts.slice(-2).join('.');
|
|
582
|
+
const knownDomains = ['amazonaws.com', 'digitaloceanspaces.com', 'cloudflare.com'];
|
|
583
|
+
if (knownDomains.some(d => domain.includes(d))) {
|
|
584
|
+
if (typeof hostParts[0] === 'string') {
|
|
585
|
+
return hostParts[0];
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
}
|
|
589
|
+
// Fallback: use the first subdomain
|
|
590
|
+
return hostParts[0] || '';
|
|
591
|
+
}
|
|
591
592
|
/**
|
|
592
593
|
* Checks if a bucket exists.
|
|
593
594
|
* This method sends a request to check if the specified bucket exists in the S3-compatible service.
|
|
@@ -612,9 +613,7 @@ class S3mini {
|
|
|
612
613
|
* // List objects with prefix
|
|
613
614
|
* const photos = await s3.listObjects('/', 'photos/', 100);
|
|
614
615
|
*/
|
|
615
|
-
async listObjects(delimiter = '/', prefix = '', maxKeys,
|
|
616
|
-
// method: IT.HttpMethod = 'GET', // 'GET' or 'HEAD'
|
|
617
|
-
opts = {}) {
|
|
616
|
+
async listObjects(delimiter = '/', prefix = '', maxKeys, opts = {}) {
|
|
618
617
|
this._checkDelimiter(delimiter);
|
|
619
618
|
this._checkPrefix(prefix);
|
|
620
619
|
this._checkOpts(opts);
|
|
@@ -624,51 +623,80 @@ class S3mini {
|
|
|
624
623
|
let token;
|
|
625
624
|
const all = [];
|
|
626
625
|
do {
|
|
627
|
-
const
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
'max-keys': String(batchSize),
|
|
631
|
-
...(prefix ? { prefix } : {}),
|
|
632
|
-
...(token ? { 'continuation-token': token } : {}),
|
|
633
|
-
...opts,
|
|
634
|
-
};
|
|
635
|
-
const res = await this._signedRequest('GET', keyPath, {
|
|
636
|
-
query,
|
|
637
|
-
withQuery: true,
|
|
638
|
-
tolerated: [200, 404],
|
|
639
|
-
});
|
|
640
|
-
if (res.status === 404) {
|
|
641
|
-
return null;
|
|
642
|
-
}
|
|
643
|
-
if (res.status !== 200) {
|
|
644
|
-
const errorBody = await res.text();
|
|
645
|
-
const errorCode = res.headers.get('x-amz-error-code') || 'Unknown';
|
|
646
|
-
const errorMessage = res.headers.get('x-amz-error-message') || res.statusText;
|
|
647
|
-
this._log('error', `${ERROR_PREFIX}Request failed with status ${res.status}: ${errorCode} - ${errorMessage}, err body: ${errorBody}`);
|
|
648
|
-
throw new Error(`${ERROR_PREFIX}Request failed with status ${res.status}: ${errorCode} - ${errorMessage}, err body: ${errorBody}`);
|
|
626
|
+
const batchResult = await this._fetchObjectBatch(keyPath, prefix, remaining, token, opts);
|
|
627
|
+
if (batchResult === null) {
|
|
628
|
+
return null; // 404 - bucket not found
|
|
649
629
|
}
|
|
650
|
-
|
|
651
|
-
if (
|
|
652
|
-
|
|
653
|
-
throw new Error(`${ERROR_PREFIX}Unexpected listObjects response shape`);
|
|
630
|
+
all.push(...batchResult.objects);
|
|
631
|
+
if (!unlimited) {
|
|
632
|
+
remaining -= batchResult.objects.length;
|
|
654
633
|
}
|
|
655
|
-
|
|
656
|
-
/* accumulate Contents */
|
|
657
|
-
const contents = out.Contents || out.contents; // S3 v2 vs v1
|
|
658
|
-
if (contents) {
|
|
659
|
-
const batch = Array.isArray(contents) ? contents : [contents];
|
|
660
|
-
all.push(...batch);
|
|
661
|
-
if (!unlimited) {
|
|
662
|
-
remaining -= batch.length;
|
|
663
|
-
}
|
|
664
|
-
}
|
|
665
|
-
const truncated = out.IsTruncated === 'true' || out.isTruncated === 'true' || false;
|
|
666
|
-
token = truncated
|
|
667
|
-
? (out.NextContinuationToken || out.nextContinuationToken || out.NextMarker || out.nextMarker)
|
|
668
|
-
: undefined;
|
|
634
|
+
token = batchResult.continuationToken;
|
|
669
635
|
} while (token && remaining > 0);
|
|
670
636
|
return all;
|
|
671
637
|
}
|
|
638
|
+
async _fetchObjectBatch(keyPath, prefix, remaining, token, opts) {
|
|
639
|
+
const query = this._buildListObjectsQuery(prefix, remaining, token, opts);
|
|
640
|
+
const res = await this._signedRequest('GET', keyPath, {
|
|
641
|
+
query,
|
|
642
|
+
withQuery: true,
|
|
643
|
+
tolerated: [200, 404],
|
|
644
|
+
});
|
|
645
|
+
if (res.status === 404) {
|
|
646
|
+
return null;
|
|
647
|
+
}
|
|
648
|
+
if (res.status !== 200) {
|
|
649
|
+
await this._handleListObjectsError(res);
|
|
650
|
+
}
|
|
651
|
+
const xmlText = await res.text();
|
|
652
|
+
return this._parseListObjectsResponse(xmlText);
|
|
653
|
+
}
|
|
654
|
+
_buildListObjectsQuery(prefix, remaining, token, opts) {
|
|
655
|
+
const batchSize = Math.min(remaining, 1000); // S3 ceiling
|
|
656
|
+
return {
|
|
657
|
+
'list-type': LIST_TYPE, // =2 for V2
|
|
658
|
+
'max-keys': String(batchSize),
|
|
659
|
+
...(prefix ? { prefix } : {}),
|
|
660
|
+
...(token ? { 'continuation-token': token } : {}),
|
|
661
|
+
...opts,
|
|
662
|
+
};
|
|
663
|
+
}
|
|
664
|
+
async _handleListObjectsError(res) {
|
|
665
|
+
const errorBody = await res.text();
|
|
666
|
+
const parsedErrorBody = this._parseErrorXml(res.headers, errorBody);
|
|
667
|
+
const errorCode = res.headers.get('x-amz-error-code') ?? parsedErrorBody.svcCode ?? 'Unknown';
|
|
668
|
+
const errorMessage = res.headers.get('x-amz-error-message') ?? parsedErrorBody.errorMessage ?? res.statusText;
|
|
669
|
+
this._log('error', `${ERROR_PREFIX}Request failed with status ${res.status}: ${errorCode} - ${errorMessage}, err body: ${errorBody}`);
|
|
670
|
+
throw new Error(`${ERROR_PREFIX}Request failed with status ${res.status}: ${errorCode} - ${errorMessage}, err body: ${errorBody}`);
|
|
671
|
+
}
|
|
672
|
+
_parseListObjectsResponse(xmlText) {
|
|
673
|
+
const raw = parseXml(xmlText);
|
|
674
|
+
if (typeof raw !== 'object' || !raw || 'error' in raw) {
|
|
675
|
+
this._log('error', `${ERROR_PREFIX}Unexpected listObjects response shape: ${JSON.stringify(raw)}`);
|
|
676
|
+
throw new Error(`${ERROR_PREFIX}Unexpected listObjects response shape`);
|
|
677
|
+
}
|
|
678
|
+
const out = (raw.ListBucketResult || raw.listBucketResult || raw);
|
|
679
|
+
const objects = this._extractObjectsFromResponse(out);
|
|
680
|
+
const continuationToken = this._extractContinuationToken(out);
|
|
681
|
+
return { objects, continuationToken };
|
|
682
|
+
}
|
|
683
|
+
_extractObjectsFromResponse(response) {
|
|
684
|
+
const contents = response.Contents || response.contents; // S3 v2 vs v1
|
|
685
|
+
if (!contents) {
|
|
686
|
+
return [];
|
|
687
|
+
}
|
|
688
|
+
return Array.isArray(contents) ? contents : [contents];
|
|
689
|
+
}
|
|
690
|
+
_extractContinuationToken(response) {
|
|
691
|
+
const truncated = response.IsTruncated === 'true' || response.isTruncated === 'true' || false;
|
|
692
|
+
if (!truncated) {
|
|
693
|
+
return undefined;
|
|
694
|
+
}
|
|
695
|
+
return (response.NextContinuationToken ||
|
|
696
|
+
response.nextContinuationToken ||
|
|
697
|
+
response.NextMarker ||
|
|
698
|
+
response.nextMarker);
|
|
699
|
+
}
|
|
672
700
|
/**
|
|
673
701
|
* Lists multipart uploads in the bucket.
|
|
674
702
|
* This method sends a request to list multipart uploads in the specified bucket.
|
|
@@ -908,6 +936,7 @@ class S3mini {
|
|
|
908
936
|
* @param {string | Buffer} data - The data to upload (string or Buffer).
|
|
909
937
|
* @param {string} [fileType='application/octet-stream'] - The MIME type of the file.
|
|
910
938
|
* @param {IT.SSECHeaders} [ssecHeaders] - Server-Side Encryption headers, if any.
|
|
939
|
+
* @param {IT.AWSHeaders} [additionalHeaders] - Additional x-amz-* headers specific to this request, if any.
|
|
911
940
|
* @returns {Promise<Response>} A promise that resolves to the Response object from the upload request.
|
|
912
941
|
* @throws {TypeError} If data is not a string or Buffer.
|
|
913
942
|
* @example
|
|
@@ -918,15 +947,13 @@ class S3mini {
|
|
|
918
947
|
* const buffer = Buffer.from([0x89, 0x50, 0x4e, 0x47]);
|
|
919
948
|
* await s3.putObject('image.png', buffer, 'image/png');
|
|
920
949
|
*/
|
|
921
|
-
async putObject(key, data, fileType = DEFAULT_STREAM_CONTENT_TYPE, ssecHeaders) {
|
|
922
|
-
if (!(data instanceof Buffer || typeof data === 'string')) {
|
|
923
|
-
throw new TypeError(ERROR_DATA_BUFFER_REQUIRED);
|
|
924
|
-
}
|
|
950
|
+
async putObject(key, data, fileType = DEFAULT_STREAM_CONTENT_TYPE, ssecHeaders, additionalHeaders) {
|
|
925
951
|
return this._signedRequest('PUT', key, {
|
|
926
|
-
body: data,
|
|
952
|
+
body: this._validateData(data),
|
|
927
953
|
headers: {
|
|
928
|
-
[HEADER_CONTENT_LENGTH]:
|
|
954
|
+
[HEADER_CONTENT_LENGTH]: getByteSize(data),
|
|
929
955
|
[HEADER_CONTENT_TYPE]: fileType,
|
|
956
|
+
...additionalHeaders,
|
|
930
957
|
...ssecHeaders,
|
|
931
958
|
},
|
|
932
959
|
tolerated: [200],
|
|
@@ -957,15 +984,6 @@ class S3mini {
|
|
|
957
984
|
withQuery: true,
|
|
958
985
|
});
|
|
959
986
|
const parsed = parseXml(await res.text());
|
|
960
|
-
// if (
|
|
961
|
-
// parsed &&
|
|
962
|
-
// typeof parsed === 'object' &&
|
|
963
|
-
// 'initiateMultipartUploadResult' in parsed &&
|
|
964
|
-
// parsed.initiateMultipartUploadResult &&
|
|
965
|
-
// 'uploadId' in (parsed.initiateMultipartUploadResult as { uploadId: string })
|
|
966
|
-
// ) {
|
|
967
|
-
// return (parsed.initiateMultipartUploadResult as { uploadId: string }).uploadId;
|
|
968
|
-
// }
|
|
969
987
|
if (parsed && typeof parsed === 'object') {
|
|
970
988
|
// Check for both cases of InitiateMultipartUploadResult
|
|
971
989
|
const uploadResult = parsed.initiateMultipartUploadResult ||
|
|
@@ -1000,13 +1018,13 @@ class S3mini {
|
|
|
1000
1018
|
* console.log(`Part ${part.partNumber} uploaded with ETag: ${part.etag}`);
|
|
1001
1019
|
*/
|
|
1002
1020
|
async uploadPart(key, uploadId, data, partNumber, opts = {}, ssecHeaders) {
|
|
1003
|
-
this._validateUploadPartParams(key, uploadId, data, partNumber, opts);
|
|
1021
|
+
const body = this._validateUploadPartParams(key, uploadId, data, partNumber, opts);
|
|
1004
1022
|
const query = { uploadId, partNumber, ...opts };
|
|
1005
1023
|
const res = await this._signedRequest('PUT', key, {
|
|
1006
1024
|
query,
|
|
1007
|
-
body
|
|
1025
|
+
body,
|
|
1008
1026
|
headers: {
|
|
1009
|
-
[HEADER_CONTENT_LENGTH]:
|
|
1027
|
+
[HEADER_CONTENT_LENGTH]: getByteSize(data),
|
|
1010
1028
|
...ssecHeaders,
|
|
1011
1029
|
},
|
|
1012
1030
|
});
|
|
@@ -1035,7 +1053,7 @@ class S3mini {
|
|
|
1035
1053
|
const xmlBody = this._buildCompleteMultipartUploadXml(parts);
|
|
1036
1054
|
const headers = {
|
|
1037
1055
|
[HEADER_CONTENT_TYPE]: XML_CONTENT_TYPE,
|
|
1038
|
-
[HEADER_CONTENT_LENGTH]:
|
|
1056
|
+
[HEADER_CONTENT_LENGTH]: getByteSize(xmlBody),
|
|
1039
1057
|
};
|
|
1040
1058
|
const res = await this._signedRequest('POST', key, {
|
|
1041
1059
|
query,
|
|
@@ -1054,7 +1072,7 @@ class S3mini {
|
|
|
1054
1072
|
if (etag && typeof etag === 'string') {
|
|
1055
1073
|
return {
|
|
1056
1074
|
...resultObj,
|
|
1057
|
-
etag:
|
|
1075
|
+
etag: sanitizeETag(etag),
|
|
1058
1076
|
};
|
|
1059
1077
|
}
|
|
1060
1078
|
return result;
|
|
@@ -1102,18 +1120,193 @@ class S3mini {
|
|
|
1102
1120
|
return { status: 'Aborted', key, uploadId, response: parsed };
|
|
1103
1121
|
}
|
|
1104
1122
|
_buildCompleteMultipartUploadXml(parts) {
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1123
|
+
let xml = '<CompleteMultipartUpload>';
|
|
1124
|
+
for (const part of parts) {
|
|
1125
|
+
xml += `<Part><PartNumber>${part.partNumber}</PartNumber><ETag>${part.etag}</ETag></Part>`;
|
|
1126
|
+
}
|
|
1127
|
+
xml += '</CompleteMultipartUpload>';
|
|
1128
|
+
return xml;
|
|
1129
|
+
}
|
|
1130
|
+
/**
|
|
1131
|
+
* Executes the copy operation for local copying (same bucket/endpoint).
|
|
1132
|
+
* @private
|
|
1133
|
+
*/
|
|
1134
|
+
async _executeCopyOperation(destinationKey, copySource, options) {
|
|
1135
|
+
const { metadataDirective = 'COPY', metadata = {}, contentType, storageClass, taggingDirective, websiteRedirectLocation, sourceSSECHeaders = {}, destinationSSECHeaders = {}, additionalHeaders = {}, } = options;
|
|
1136
|
+
const headers = {
|
|
1137
|
+
'x-amz-copy-source': copySource,
|
|
1138
|
+
'x-amz-metadata-directive': metadataDirective,
|
|
1139
|
+
...additionalHeaders,
|
|
1140
|
+
...(contentType && { [HEADER_CONTENT_TYPE]: contentType }),
|
|
1141
|
+
...(storageClass && { 'x-amz-storage-class': storageClass }),
|
|
1142
|
+
...(taggingDirective && { 'x-amz-tagging-directive': taggingDirective }),
|
|
1143
|
+
...(websiteRedirectLocation && { 'x-amz-website-redirect-location': websiteRedirectLocation }),
|
|
1144
|
+
...this._buildSSECHeaders(sourceSSECHeaders, destinationSSECHeaders),
|
|
1145
|
+
...(metadataDirective === 'REPLACE' ? this._buildMetadataHeaders(metadata) : {}),
|
|
1146
|
+
};
|
|
1147
|
+
try {
|
|
1148
|
+
const res = await this._signedRequest('PUT', destinationKey, {
|
|
1149
|
+
headers,
|
|
1150
|
+
tolerated: [200],
|
|
1151
|
+
});
|
|
1152
|
+
return this._parseCopyObjectResponse(await res.text());
|
|
1153
|
+
}
|
|
1154
|
+
catch (err) {
|
|
1155
|
+
this._log('error', `Error in copy operation to ${destinationKey}`, {
|
|
1156
|
+
error: String(err),
|
|
1157
|
+
});
|
|
1158
|
+
throw err;
|
|
1159
|
+
}
|
|
1160
|
+
}
|
|
1161
|
+
/**
|
|
1162
|
+
* Copies an object within the same bucket.
|
|
1163
|
+
*
|
|
1164
|
+
* @param {string} sourceKey - The key of the source object to copy
|
|
1165
|
+
* @param {string} destinationKey - The key where the object will be copied to
|
|
1166
|
+
* @param {IT.CopyObjectOptions} [options={}] - Copy operation options
|
|
1167
|
+
* @param {string} [options.metadataDirective='COPY'] - How to handle metadata ('COPY' | 'REPLACE')
|
|
1168
|
+
* @param {Record<string,string>} [options.metadata={}] - New metadata (only used if metadataDirective='REPLACE')
|
|
1169
|
+
* @param {string} [options.contentType] - New content type for the destination object
|
|
1170
|
+
* @param {string} [options.storageClass] - Storage class for the destination object
|
|
1171
|
+
* @param {string} [options.taggingDirective] - How to handle object tags ('COPY' | 'REPLACE')
|
|
1172
|
+
* @param {string} [options.websiteRedirectLocation] - Website redirect location for the destination
|
|
1173
|
+
* @param {IT.SSECHeaders} [options.sourceSSECHeaders={}] - Encryption headers for reading source (if encrypted)
|
|
1174
|
+
* @param {IT.SSECHeaders} [options.destinationSSECHeaders={}] - Encryption headers for destination
|
|
1175
|
+
* @param {IT.AWSHeaders} [options.additionalHeaders={}] - Extra x-amz-* headers
|
|
1176
|
+
*
|
|
1177
|
+
* @returns {Promise<IT.CopyObjectResult>} Copy result with etag and lastModified date
|
|
1178
|
+
* @throws {TypeError} If sourceKey or destinationKey is invalid
|
|
1179
|
+
* @throws {Error} If copy operation fails or S3 returns an error
|
|
1180
|
+
*
|
|
1181
|
+
* @example
|
|
1182
|
+
* // Simple copy
|
|
1183
|
+
* const result = await s3.copyObject('report-2024.pdf', 'archive/report-2024.pdf');
|
|
1184
|
+
* console.log(`Copied with ETag: ${result.etag}`);
|
|
1185
|
+
*
|
|
1186
|
+
* @example
|
|
1187
|
+
* // Copy with new metadata and content type
|
|
1188
|
+
* const result = await s3.copyObject('data.csv', 'processed/data.csv', {
|
|
1189
|
+
* metadataDirective: 'REPLACE',
|
|
1190
|
+
* metadata: {
|
|
1191
|
+
* 'processed-date': new Date().toISOString(),
|
|
1192
|
+
* 'original-name': 'data.csv'
|
|
1193
|
+
* },
|
|
1194
|
+
* contentType: 'text/csv; charset=utf-8'
|
|
1195
|
+
* });
|
|
1196
|
+
*
|
|
1197
|
+
* @example
|
|
1198
|
+
* // Copy encrypted object (Cloudflare R2 SSE-C)
|
|
1199
|
+
* const ssecKey = 'n1TKiTaVHlYLMX9n0zHXyooMr026vOiTEFfT+719Hho=';
|
|
1200
|
+
* await s3.copyObject('sensitive.json', 'backup/sensitive.json', {
|
|
1201
|
+
* sourceSSECHeaders: {
|
|
1202
|
+
* 'x-amz-copy-source-server-side-encryption-customer-algorithm': 'AES256',
|
|
1203
|
+
* 'x-amz-copy-source-server-side-encryption-customer-key': ssecKey,
|
|
1204
|
+
* 'x-amz-copy-source-server-side-encryption-customer-key-md5': 'gepZmzgR7Be/1+K1Aw+6ow=='
|
|
1205
|
+
* },
|
|
1206
|
+
* destinationSSECHeaders: {
|
|
1207
|
+
* 'x-amz-server-side-encryption-customer-algorithm': 'AES256',
|
|
1208
|
+
* 'x-amz-server-side-encryption-customer-key': ssecKey,
|
|
1209
|
+
* 'x-amz-server-side-encryption-customer-key-md5': 'gepZmzgR7Be/1+K1Aw+6ow=='
|
|
1210
|
+
* }
|
|
1211
|
+
* });
|
|
1212
|
+
*/
|
|
1213
|
+
copyObject(sourceKey, destinationKey, options = {}) {
|
|
1214
|
+
// Validate parameters
|
|
1215
|
+
this._checkKey(sourceKey);
|
|
1216
|
+
this._checkKey(destinationKey);
|
|
1217
|
+
const copySource = `/${this.bucketName}/${uriEscape(sourceKey)}`;
|
|
1218
|
+
return this._executeCopyOperation(destinationKey, copySource, options);
|
|
1219
|
+
}
|
|
1220
|
+
_buildSSECHeaders(sourceHeaders, destHeaders) {
|
|
1221
|
+
const headers = {};
|
|
1222
|
+
Object.entries({ ...sourceHeaders, ...destHeaders }).forEach(([k, v]) => {
|
|
1223
|
+
if (v !== undefined) {
|
|
1224
|
+
headers[k] = v;
|
|
1225
|
+
}
|
|
1226
|
+
});
|
|
1227
|
+
return headers;
|
|
1228
|
+
}
|
|
1229
|
+
/**
|
|
1230
|
+
* Moves an object within the same bucket (copy + delete atomic-like operation).
|
|
1231
|
+
*
|
|
1232
|
+
* WARNING: Not truly atomic - if delete fails after successful copy, the object
|
|
1233
|
+
* will exist in both locations. Consider your use case carefully.
|
|
1234
|
+
*
|
|
1235
|
+
* @param {string} sourceKey - The key of the source object to move
|
|
1236
|
+
* @param {string} destinationKey - The key where the object will be moved to
|
|
1237
|
+
* @param {IT.CopyObjectOptions} [options={}] - Options passed to the copy operation
|
|
1238
|
+
*
|
|
1239
|
+
* @returns {Promise<IT.CopyObjectResult>} Result from the copy operation
|
|
1240
|
+
* @throws {TypeError} If sourceKey or destinationKey is invalid
|
|
1241
|
+
* @throws {Error} If copy succeeds but delete fails (includes copy result in error)
|
|
1242
|
+
*
|
|
1243
|
+
* @example
|
|
1244
|
+
* // Simple move
|
|
1245
|
+
* await s3.moveObject('temp/upload.tmp', 'files/document.pdf');
|
|
1246
|
+
*
|
|
1247
|
+
* @example
|
|
1248
|
+
* // Move with metadata update
|
|
1249
|
+
* await s3.moveObject('unprocessed/image.jpg', 'processed/image.jpg', {
|
|
1250
|
+
* metadataDirective: 'REPLACE',
|
|
1251
|
+
* metadata: {
|
|
1252
|
+
* 'status': 'processed',
|
|
1253
|
+
* 'processed-at': Date.now().toString()
|
|
1254
|
+
* },
|
|
1255
|
+
* contentType: 'image/jpeg'
|
|
1256
|
+
* });
|
|
1257
|
+
*
|
|
1258
|
+
* @example
|
|
1259
|
+
* // Safe move with error handling
|
|
1260
|
+
* try {
|
|
1261
|
+
* const result = await s3.moveObject('inbox/file.dat', 'archive/file.dat');
|
|
1262
|
+
* console.log(`Moved successfully: ${result.etag}`);
|
|
1263
|
+
* } catch (error) {
|
|
1264
|
+
* // Check if copy succeeded but delete failed
|
|
1265
|
+
* if (error.message.includes('delete source object after successful copy')) {
|
|
1266
|
+
* console.warn('File copied but not deleted from source - manual cleanup needed');
|
|
1267
|
+
* }
|
|
1268
|
+
* }
|
|
1269
|
+
*/
|
|
1270
|
+
async moveObject(sourceKey, destinationKey, options = {}) {
|
|
1271
|
+
try {
|
|
1272
|
+
// First copy the object
|
|
1273
|
+
const copyResult = await this.copyObject(sourceKey, destinationKey, options);
|
|
1274
|
+
// Then delete the source
|
|
1275
|
+
const deleteSuccess = await this.deleteObject(sourceKey);
|
|
1276
|
+
if (!deleteSuccess) {
|
|
1277
|
+
throw new Error(`${ERROR_PREFIX}Failed to delete source object after successful copy`);
|
|
1278
|
+
}
|
|
1279
|
+
return copyResult;
|
|
1280
|
+
}
|
|
1281
|
+
catch (err) {
|
|
1282
|
+
this._log('error', `Error moving object from ${sourceKey} to ${destinationKey}`, {
|
|
1283
|
+
error: String(err),
|
|
1284
|
+
});
|
|
1285
|
+
throw err;
|
|
1286
|
+
}
|
|
1287
|
+
}
|
|
1288
|
+
_buildMetadataHeaders(metadata) {
|
|
1289
|
+
const headers = {};
|
|
1290
|
+
Object.entries(metadata).forEach(([k, v]) => {
|
|
1291
|
+
headers[k.startsWith('x-amz-meta-') ? k : `x-amz-meta-${k}`] = v;
|
|
1292
|
+
});
|
|
1293
|
+
return headers;
|
|
1294
|
+
}
|
|
1295
|
+
_parseCopyObjectResponse(xmlText) {
|
|
1296
|
+
const parsed = parseXml(xmlText);
|
|
1297
|
+
if (!parsed || typeof parsed !== 'object') {
|
|
1298
|
+
throw new Error(`${ERROR_PREFIX}Unexpected copyObject response format`);
|
|
1299
|
+
}
|
|
1300
|
+
const result = (parsed.CopyObjectResult || parsed.copyObjectResult || parsed);
|
|
1301
|
+
const etag = result.ETag || result.eTag || result.etag;
|
|
1302
|
+
const lastModified = result.LastModified || result.lastModified;
|
|
1303
|
+
if (!etag || typeof etag !== 'string') {
|
|
1304
|
+
throw new Error(`${ERROR_PREFIX}ETag not found in copyObject response`);
|
|
1305
|
+
}
|
|
1306
|
+
return {
|
|
1307
|
+
etag: sanitizeETag(etag),
|
|
1308
|
+
lastModified: lastModified ? new Date(lastModified) : undefined,
|
|
1309
|
+
};
|
|
1117
1310
|
}
|
|
1118
1311
|
/**
|
|
1119
1312
|
* Deletes an object from the bucket.
|
|
@@ -1126,13 +1319,14 @@ class S3mini {
|
|
|
1126
1319
|
return res.status === 200 || res.status === 204;
|
|
1127
1320
|
}
|
|
1128
1321
|
async _deleteObjectsProcess(keys) {
|
|
1129
|
-
const
|
|
1322
|
+
const objectsXml = keys.map(key => `<Object><Key>${escapeXml(key)}</Key></Object>`).join('');
|
|
1323
|
+
const xmlBody = '<Delete>' + objectsXml + '</Delete>';
|
|
1130
1324
|
const query = { delete: '' };
|
|
1131
|
-
const
|
|
1325
|
+
const sha256base64 = base64FromBuffer(await sha256(xmlBody));
|
|
1132
1326
|
const headers = {
|
|
1133
1327
|
[HEADER_CONTENT_TYPE]: XML_CONTENT_TYPE,
|
|
1134
|
-
[HEADER_CONTENT_LENGTH]:
|
|
1135
|
-
|
|
1328
|
+
[HEADER_CONTENT_LENGTH]: getByteSize(xmlBody),
|
|
1329
|
+
[HEADER_AMZ_CHECKSUM_SHA256]: sha256base64,
|
|
1136
1330
|
};
|
|
1137
1331
|
const res = await this._signedRequest('POST', '', {
|
|
1138
1332
|
query,
|
|
@@ -1220,9 +1414,10 @@ class S3mini {
|
|
|
1220
1414
|
signal: this.requestAbortTimeout !== undefined ? AbortSignal.timeout(this.requestAbortTimeout) : undefined,
|
|
1221
1415
|
});
|
|
1222
1416
|
this._log('info', `Response status: ${res.status}, tolerated: ${toleratedStatusCodes.join(',')}`);
|
|
1223
|
-
if (
|
|
1224
|
-
|
|
1417
|
+
if (res.ok || toleratedStatusCodes.includes(res.status)) {
|
|
1418
|
+
return res;
|
|
1225
1419
|
}
|
|
1420
|
+
await this._handleErrorResponse(res);
|
|
1226
1421
|
return res;
|
|
1227
1422
|
}
|
|
1228
1423
|
catch (err) {
|
|
@@ -1233,10 +1428,29 @@ class S3mini {
|
|
|
1233
1428
|
throw err;
|
|
1234
1429
|
}
|
|
1235
1430
|
}
|
|
1431
|
+
_parseErrorXml(headers, body) {
|
|
1432
|
+
if (headers.get('content-type') !== 'application/xml') {
|
|
1433
|
+
return {};
|
|
1434
|
+
}
|
|
1435
|
+
const parsedBody = parseXml(body);
|
|
1436
|
+
if (!parsedBody ||
|
|
1437
|
+
typeof parsedBody !== 'object' ||
|
|
1438
|
+
!('Error' in parsedBody) ||
|
|
1439
|
+
!parsedBody.Error ||
|
|
1440
|
+
typeof parsedBody.Error !== 'object') {
|
|
1441
|
+
return {};
|
|
1442
|
+
}
|
|
1443
|
+
const error = parsedBody.Error;
|
|
1444
|
+
return {
|
|
1445
|
+
svcCode: 'Code' in error && typeof error.Code === 'string' ? error.Code : undefined,
|
|
1446
|
+
errorMessage: 'Message' in error && typeof error.Message === 'string' ? error.Message : undefined,
|
|
1447
|
+
};
|
|
1448
|
+
}
|
|
1236
1449
|
async _handleErrorResponse(res) {
|
|
1237
1450
|
const errorBody = await res.text();
|
|
1238
|
-
const
|
|
1239
|
-
const
|
|
1451
|
+
const parsedErrorBody = this._parseErrorXml(res.headers, errorBody);
|
|
1452
|
+
const svcCode = res.headers.get('x-amz-error-code') ?? parsedErrorBody.svcCode ?? 'Unknown';
|
|
1453
|
+
const errorMessage = res.headers.get('x-amz-error-message') ?? parsedErrorBody.errorMessage ?? res.statusText;
|
|
1240
1454
|
this._log('error', `${ERROR_PREFIX}Request failed with status ${res.status}: ${svcCode} - ${errorMessage},err body: ${errorBody}`);
|
|
1241
1455
|
throw new S3ServiceError(`S3 returned ${res.status} – ${svcCode}`, res.status, svcCode, errorBody);
|
|
1242
1456
|
}
|
|
@@ -1246,20 +1460,16 @@ class S3mini {
|
|
|
1246
1460
|
}
|
|
1247
1461
|
return Object.keys(queryParams)
|
|
1248
1462
|
.map(key => `${encodeURIComponent(key)}=${encodeURIComponent(queryParams[key])}`)
|
|
1249
|
-
.sort()
|
|
1463
|
+
.sort((a, b) => a.localeCompare(b))
|
|
1250
1464
|
.join('&');
|
|
1251
1465
|
}
|
|
1252
|
-
_getSignatureKey(dateStamp) {
|
|
1253
|
-
const kDate = hmac(`AWS4${this.secretAccessKey}`, dateStamp);
|
|
1254
|
-
const kRegion = hmac(kDate, this.region);
|
|
1255
|
-
const kService = hmac(kRegion, S3_SERVICE);
|
|
1256
|
-
return hmac(kService, AWS_REQUEST_TYPE);
|
|
1466
|
+
async _getSignatureKey(dateStamp) {
|
|
1467
|
+
const kDate = await hmac(`AWS4${this.secretAccessKey}`, dateStamp);
|
|
1468
|
+
const kRegion = await hmac(kDate, this.region);
|
|
1469
|
+
const kService = await hmac(kRegion, S3_SERVICE);
|
|
1470
|
+
return await hmac(kService, AWS_REQUEST_TYPE);
|
|
1257
1471
|
}
|
|
1258
1472
|
}
|
|
1259
|
-
/**
|
|
1260
|
-
* @deprecated Use `S3mini` instead.
|
|
1261
|
-
*/
|
|
1262
|
-
const s3mini = S3mini;
|
|
1263
1473
|
|
|
1264
|
-
export { S3mini, S3mini as default, runInBatches,
|
|
1474
|
+
export { S3mini, S3mini as default, runInBatches, sanitizeETag };
|
|
1265
1475
|
//# sourceMappingURL=s3mini.js.map
|