@pi-r/aws-v3 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +7 -0
- package/README.md +5 -0
- package/client/index.js +285 -0
- package/download/index.js +38 -0
- package/package.json +31 -0
- package/upload/index.js +153 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
Copyright 2023 An Pham
|
|
2
|
+
|
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
4
|
+
|
|
5
|
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
6
|
+
|
|
7
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
package/client/index.js
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.executeBatchQuery = exports.executeQuery = exports.deleteObjectsV2 = exports.deleteObjects = exports.setBucketWebsite = exports.setBucketPolicy = exports.createBucketV2 = exports.createBucket = exports.validateDatabase = exports.validateStorage = exports.createDatabaseClient = exports.createStorageClient = void 0;
|
|
4
|
+
const aws_1 = require("@pi-r/aws");
|
|
5
|
+
const util_1 = require("@e-mc/cloud/util");
|
|
6
|
+
const types_1 = require("@e-mc/types");
|
|
7
|
+
const Module = require("@e-mc/module");
|
|
8
|
+
const Cloud = require("@e-mc/cloud");
|
|
9
|
+
function setCannedAcl(S3, client, Bucket, ACL, service = 'aws-v3', recursive) {
|
|
10
|
+
let Policy;
|
|
11
|
+
switch (ACL) {
|
|
12
|
+
case 1:
|
|
13
|
+
Policy = (0, aws_1.getBucketPublicReadPolicy)(Bucket);
|
|
14
|
+
break;
|
|
15
|
+
case 'private':
|
|
16
|
+
Policy = (0, aws_1.getPrivatePolicy)(Bucket);
|
|
17
|
+
break;
|
|
18
|
+
default:
|
|
19
|
+
Policy = (0, aws_1.getPublicReadPolicy)(Bucket, ACL === 'authenticated-read', ACL === 'public-read-write');
|
|
20
|
+
break;
|
|
21
|
+
}
|
|
22
|
+
return client.send(new S3.PutBucketPolicyCommand({ Bucket, Policy }))
|
|
23
|
+
.then(() => {
|
|
24
|
+
this.formatMessage(64 /* LOG_TYPE.CLOUD */, service, ['Grant ' + ACL, Bucket], '', { ...recursive ? Cloud.LOG_CLOUD_DELAYED : Cloud.LOG_CLOUD_COMMAND });
|
|
25
|
+
})
|
|
26
|
+
.catch((err) => {
|
|
27
|
+
if (!recursive && err instanceof Error) {
|
|
28
|
+
if (err.name === 'OperationAborted') {
|
|
29
|
+
setTimeout(() => setCannedAcl.call(this, S3, client, Bucket, ACL, service, true), 60000 /* TIME.m */);
|
|
30
|
+
this.formatMessage(64 /* LOG_TYPE.CLOUD */, service, [`Grant ${ACL} (delayed)`, err.Endpoint || Bucket], err, { ...Cloud.LOG_CLOUD_DELAYED });
|
|
31
|
+
}
|
|
32
|
+
else {
|
|
33
|
+
this.formatMessage(64 /* LOG_TYPE.CLOUD */, service, ['Unable to grant ' + ACL, err.Endpoint || Bucket], err, { ...Cloud.LOG_CLOUD_WARN });
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
function sanitizeCredentials(credential) {
|
|
39
|
+
const result = credential.credentials || (credential.credentials = {});
|
|
40
|
+
for (const attr in credential) {
|
|
41
|
+
switch (attr) {
|
|
42
|
+
case 'accessKeyId':
|
|
43
|
+
case 'secretAccessKey':
|
|
44
|
+
case 'sessionToken':
|
|
45
|
+
case 'expiration':
|
|
46
|
+
if (typeof result !== 'function') {
|
|
47
|
+
result[attr] = credential[attr];
|
|
48
|
+
}
|
|
49
|
+
delete credential[attr];
|
|
50
|
+
break;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
return result;
|
|
54
|
+
}
|
|
55
|
+
const isNoSuchBucket = (err) => err instanceof Error && err.name === 'NoSuchBucket';
|
|
56
|
+
function createStorageClient(credential, service = 'aws-v3', sdk = '@aws-sdk/client-s3') {
|
|
57
|
+
try {
|
|
58
|
+
sanitizeCredentials(credential);
|
|
59
|
+
const expiration = credential.credentials?.expiration;
|
|
60
|
+
if (expiration && !(expiration instanceof Date)) {
|
|
61
|
+
delete credential.expiration;
|
|
62
|
+
}
|
|
63
|
+
const AWS = require(sdk);
|
|
64
|
+
return [new AWS.S3Client(credential), AWS];
|
|
65
|
+
}
|
|
66
|
+
catch (err) {
|
|
67
|
+
this.checkPackage(err, sdk.split('/')[0], { passThrough: true });
|
|
68
|
+
throw err;
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
exports.createStorageClient = createStorageClient;
|
|
72
|
+
function createDatabaseClient(credential) {
|
|
73
|
+
try {
|
|
74
|
+
const AWS = require('@aws-sdk/lib-dynamodb');
|
|
75
|
+
const { DynamoDBClient } = require('@aws-sdk/client-dynamodb');
|
|
76
|
+
return [AWS.DynamoDBDocumentClient.from(new DynamoDBClient(credential), credential.translateConfig), AWS];
|
|
77
|
+
}
|
|
78
|
+
catch (err) {
|
|
79
|
+
this.checkPackage(err, '@aws-sdk', { passThrough: true });
|
|
80
|
+
throw err;
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
exports.createDatabaseClient = createDatabaseClient;
|
|
84
|
+
function validateStorage(credential) {
|
|
85
|
+
const credentials = sanitizeCredentials(credential);
|
|
86
|
+
if (credential.profile || !credentials && process.env.AWS_SDK_LOAD_CONFIG === '1') {
|
|
87
|
+
const { fromIni } = require('@aws-sdk/credential-provider-ini');
|
|
88
|
+
credential.credentials = fromIni({ profile: credential.profile });
|
|
89
|
+
return true;
|
|
90
|
+
}
|
|
91
|
+
return !!credentials && (typeof credentials === 'function' || (0, aws_1.isAccessDefined)(credentials) || (0, aws_1.isEnvDefined)());
|
|
92
|
+
}
|
|
93
|
+
exports.validateStorage = validateStorage;
|
|
94
|
+
function validateDatabase(credential, data) {
|
|
95
|
+
return (0, aws_1.isDatabaseDefined)(credential, data) && validateStorage(credential);
|
|
96
|
+
}
|
|
97
|
+
exports.validateDatabase = validateDatabase;
|
|
98
|
+
function createBucket(credential, Bucket, publicRead, service = 'aws-v3', sdk = '@aws-sdk/client-s3') {
|
|
99
|
+
return createBucketV2.call(this, credential, Bucket, publicRead ? 'public-read' : undefined, undefined, service, sdk);
|
|
100
|
+
}
|
|
101
|
+
exports.createBucket = createBucket;
|
|
102
|
+
function createBucketV2(credential, Bucket, ACL, options, service = 'aws-v3', sdk = '@aws-sdk/client-s3') {
|
|
103
|
+
ACL = ACL === 1 ? 1 : (0, aws_1.checkBucketCannedACL)(ACL);
|
|
104
|
+
const [client, AWS] = createStorageClient.call(this, credential, service, sdk);
|
|
105
|
+
return client.send(new AWS.HeadBucketCommand({ Bucket }))
|
|
106
|
+
.then(async () => {
|
|
107
|
+
if (ACL) {
|
|
108
|
+
await setCannedAcl.call(this, AWS, client, Bucket, ACL, service);
|
|
109
|
+
}
|
|
110
|
+
return true;
|
|
111
|
+
})
|
|
112
|
+
.catch(() => {
|
|
113
|
+
const input = { ...options, Bucket };
|
|
114
|
+
const region = credential.region;
|
|
115
|
+
if (!input.CreateBucketConfiguration && typeof region === 'string' && region !== 'us-east-1') {
|
|
116
|
+
input.CreateBucketConfiguration = { LocationConstraint: region };
|
|
117
|
+
}
|
|
118
|
+
return client.send(new AWS.CreateBucketCommand(input))
|
|
119
|
+
.then(async () => {
|
|
120
|
+
this.formatMessage(64 /* LOG_TYPE.CLOUD */, service, ["Bucket created" /* VAL_CLOUD.CREATE_BUCKET */, Bucket], '', { ...Cloud.LOG_CLOUD_COMMAND });
|
|
121
|
+
if (ACL) {
|
|
122
|
+
await setCannedAcl.call(this, AWS, client, Bucket, ACL, service);
|
|
123
|
+
}
|
|
124
|
+
return true;
|
|
125
|
+
})
|
|
126
|
+
.catch(async (err) => {
|
|
127
|
+
switch (err instanceof Error && err.name) {
|
|
128
|
+
case 'BucketAlreadyExists':
|
|
129
|
+
case 'BucketAlreadyOwnedByYou':
|
|
130
|
+
if (ACL) {
|
|
131
|
+
await setCannedAcl.call(this, AWS, client, Bucket, ACL, service);
|
|
132
|
+
}
|
|
133
|
+
return true;
|
|
134
|
+
default:
|
|
135
|
+
this.formatFail(64 /* LOG_TYPE.CLOUD */, service, ["Unable to create bucket" /* ERR_CLOUD.CREATE_BUCKET */, Bucket], err, { ...Cloud.LOG_CLOUD_FAIL });
|
|
136
|
+
return false;
|
|
137
|
+
}
|
|
138
|
+
});
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
exports.createBucketV2 = createBucketV2;
|
|
142
|
+
function setBucketPolicy(credential, Bucket, options, service = 'aws', sdk = 'aws-sdk/clients/s3') {
|
|
143
|
+
const [client, AWS] = createStorageClient.call(this, credential, service, sdk);
|
|
144
|
+
const policy = 'Policy' in options && !('ACL' in options);
|
|
145
|
+
options.Bucket = Bucket;
|
|
146
|
+
return (policy ? client.send(new AWS.PutBucketPolicyCommand(options)) : client.send(new AWS.PutBucketAclCommand(options)))
|
|
147
|
+
.then(() => {
|
|
148
|
+
this.formatMessage(64 /* LOG_TYPE.CLOUD */, service, [policy ? "Bucket policy configured" /* VAL_CLOUD.POLICY_BUCKET */ : "Bucket ACL configured" /* VAL_CLOUD.ACL_BUCKET */, Bucket], '', { ...Cloud.LOG_CLOUD_COMMAND });
|
|
149
|
+
return true;
|
|
150
|
+
})
|
|
151
|
+
.catch(err => {
|
|
152
|
+
if (!isNoSuchBucket(err)) {
|
|
153
|
+
this.formatFail(64 /* LOG_TYPE.CLOUD */, service, ["Unable to update bucket policy" /* ERR_CLOUD.POLICY_BUCKET */, Bucket], err, { ...Cloud.LOG_CLOUD_FAIL, fatal: false });
|
|
154
|
+
}
|
|
155
|
+
return false;
|
|
156
|
+
});
|
|
157
|
+
}
|
|
158
|
+
exports.setBucketPolicy = setBucketPolicy;
|
|
159
|
+
function setBucketWebsite(credential, Bucket, options, service = 'aws', sdk = 'aws-sdk/clients/s3') {
|
|
160
|
+
const [client, AWS] = createStorageClient.call(this, credential, service, sdk);
|
|
161
|
+
const WebsiteConfiguration = {};
|
|
162
|
+
if ((0, types_1.isString)(options.indexPage)) {
|
|
163
|
+
WebsiteConfiguration.IndexDocument = { Suffix: options.indexPage };
|
|
164
|
+
}
|
|
165
|
+
if ((0, types_1.isString)(options.errorPage)) {
|
|
166
|
+
WebsiteConfiguration.ErrorDocument = { Key: options.errorPage };
|
|
167
|
+
}
|
|
168
|
+
return client.send(new AWS.PutBucketWebsiteCommand({ Bucket, WebsiteConfiguration }))
|
|
169
|
+
.then(() => {
|
|
170
|
+
this.formatMessage(64 /* LOG_TYPE.CLOUD */, service, ["Bucket configured" /* VAL_CLOUD.CONFIGURE_BUCKET */, Bucket], WebsiteConfiguration, { ...Cloud.LOG_CLOUD_COMMAND });
|
|
171
|
+
return true;
|
|
172
|
+
})
|
|
173
|
+
.catch(err => {
|
|
174
|
+
if (!isNoSuchBucket(err)) {
|
|
175
|
+
this.formatFail(64 /* LOG_TYPE.CLOUD */, service, ["Unable to configure bucket" /* ERR_CLOUD.CONFIGURE_BUCKET */, Bucket], err, { ...Cloud.LOG_CLOUD_FAIL, fatal: false });
|
|
176
|
+
}
|
|
177
|
+
return false;
|
|
178
|
+
});
|
|
179
|
+
}
|
|
180
|
+
exports.setBucketWebsite = setBucketWebsite;
|
|
181
|
+
function deleteObjects(credential, Bucket, service, sdk) {
|
|
182
|
+
return deleteObjectsV2.call(this, credential, Bucket, true, service, sdk);
|
|
183
|
+
}
|
|
184
|
+
exports.deleteObjects = deleteObjects;
|
|
185
|
+
async function deleteObjectsV2(credential, Bucket, recursive = true, service = 'aws-v3', sdk = '@aws-sdk/client-s3') {
|
|
186
|
+
const [client, AWS] = createStorageClient.call(this, credential, service, sdk);
|
|
187
|
+
return client.send(new AWS.ListObjectsCommand({ Bucket }))
|
|
188
|
+
.then(({ Contents }) => {
|
|
189
|
+
if (Contents?.length) {
|
|
190
|
+
let Objects = Contents.map(data => ({ Key: data.Key }));
|
|
191
|
+
if (!recursive) {
|
|
192
|
+
Objects = Objects.filter(value => value.Key.indexOf('/') === -1);
|
|
193
|
+
}
|
|
194
|
+
return client.send(new AWS.DeleteObjectsCommand({ Bucket, Delete: { Objects } }))
|
|
195
|
+
.then(data => {
|
|
196
|
+
if ((0, types_1.isArray)(data.Deleted)) {
|
|
197
|
+
const files = data.Deleted.length + ' files';
|
|
198
|
+
this.formatMessage(64 /* LOG_TYPE.CLOUD */, service, ["Bucket emptied" /* VAL_CLOUD.EMPTY_BUCKET */ + ` (${recursive ? 'recursive' : files})`, Bucket], recursive ? files : '', { ...Cloud.LOG_CLOUD_COMMAND });
|
|
199
|
+
}
|
|
200
|
+
})
|
|
201
|
+
.catch(err => {
|
|
202
|
+
this.formatFail(64 /* LOG_TYPE.CLOUD */, service, ["Unable to empty bucket" /* ERR_CLOUD.DELETE_BUCKET */, Bucket], err, { ...Cloud.LOG_CLOUD_FAIL, fatal: false });
|
|
203
|
+
});
|
|
204
|
+
}
|
|
205
|
+
})
|
|
206
|
+
.catch(err => {
|
|
207
|
+
if (!isNoSuchBucket(err)) {
|
|
208
|
+
this.formatFail(64 /* LOG_TYPE.CLOUD */, service, ["Unable to list bucket" /* ERR_CLOUD.LIST_BUCKET */, Bucket], err, { ...Cloud.LOG_CLOUD_FAIL, fatal: false });
|
|
209
|
+
}
|
|
210
|
+
});
|
|
211
|
+
}
|
|
212
|
+
exports.deleteObjectsV2 = deleteObjectsV2;
|
|
213
|
+
async function executeQuery(credential, data, sessionKey) {
|
|
214
|
+
return (await executeBatchQuery.call(this, credential, [data], sessionKey))[0] || [];
|
|
215
|
+
}
|
|
216
|
+
exports.executeQuery = executeQuery;
|
|
217
|
+
async function executeBatchQuery(credential, batch, sessionKey) {
|
|
218
|
+
const length = batch.length;
|
|
219
|
+
const result = new Array(length);
|
|
220
|
+
const caching = length > 0 && this.hasCache(batch[0].service, sessionKey);
|
|
221
|
+
const cacheValue = { value: this.valueOfKey(credential, 'cache'), sessionKey };
|
|
222
|
+
let client;
|
|
223
|
+
const createClient = () => client || (client = createDatabaseClient.call(this, length === 1 ? credential : { ...credential }));
|
|
224
|
+
const closeClient = () => client?.[0].destroy();
|
|
225
|
+
for (let i = 0; i < length; ++i) {
|
|
226
|
+
const item = batch[i];
|
|
227
|
+
const { service, table, id = '', query, partitionKey, key = partitionKey, limit = 0, update, ignoreCache } = item;
|
|
228
|
+
if (!table) {
|
|
229
|
+
closeClient();
|
|
230
|
+
throw (0, util_1.formatError)(item, "Missing database table" /* ERR_DB.TABLE */);
|
|
231
|
+
}
|
|
232
|
+
(0, aws_1.setDatabaseEndpoint)(credential);
|
|
233
|
+
const renewCache = ignoreCache === 0;
|
|
234
|
+
const getCache = (value) => {
|
|
235
|
+
if (ignoreCache === 1) {
|
|
236
|
+
return;
|
|
237
|
+
}
|
|
238
|
+
cacheValue.renewCache = renewCache;
|
|
239
|
+
return this.getQueryResult(service, credential, value, cacheValue);
|
|
240
|
+
};
|
|
241
|
+
let rows, queryString = caching && ignoreCache !== true || ignoreCache === false || ignoreCache === 1 || renewCache ? table + '_' : '';
|
|
242
|
+
if (key && (id || (0, types_1.isPlainObject)(key))) {
|
|
243
|
+
if (queryString) {
|
|
244
|
+
queryString += Module.asString(key, true) + id;
|
|
245
|
+
if (!update && (rows = getCache(queryString))) {
|
|
246
|
+
result[i] = rows;
|
|
247
|
+
continue;
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
const [db, AWS] = createClient();
|
|
251
|
+
const Key = (0, types_1.isPlainObject)(key) ? key : { [key]: id };
|
|
252
|
+
const command = { TableName: table, Key };
|
|
253
|
+
if (update) {
|
|
254
|
+
await db.send(new AWS.UpdateCommand({ ...command, ...update }));
|
|
255
|
+
}
|
|
256
|
+
const output = await db.send(new AWS.GetCommand(command));
|
|
257
|
+
if (output.Item) {
|
|
258
|
+
rows = [output.Item];
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
else if ((0, types_1.isPlainObject)(query)) {
|
|
262
|
+
if (queryString && (rows = getCache(queryString += Module.asString(query, true) + limit))) {
|
|
263
|
+
result[i] = rows;
|
|
264
|
+
continue;
|
|
265
|
+
}
|
|
266
|
+
query.TableName = table;
|
|
267
|
+
if (limit > 0) {
|
|
268
|
+
query.Limit = limit;
|
|
269
|
+
}
|
|
270
|
+
const [db, AWS] = createClient();
|
|
271
|
+
const output = await db.send(new AWS.QueryCommand(query));
|
|
272
|
+
if (output.Count && output.Items) {
|
|
273
|
+
rows = output.Items;
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
else {
|
|
277
|
+
closeClient();
|
|
278
|
+
throw (0, util_1.formatError)(item, "Missing database query" /* ERR_DB.QUERY */);
|
|
279
|
+
}
|
|
280
|
+
result[i] = this.setQueryResult(service, credential, queryString, rows, cacheValue);
|
|
281
|
+
}
|
|
282
|
+
closeClient();
|
|
283
|
+
return result;
|
|
284
|
+
}
|
|
285
|
+
exports.executeBatchQuery = executeBatchQuery;
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const client_1 = require("../client");
|
|
4
|
+
const util_1 = require("@e-mc/cloud/util");
|
|
5
|
+
const types_1 = require("@e-mc/types");
|
|
6
|
+
const Module = require("@e-mc/module");
|
|
7
|
+
const Cloud = require("@e-mc/cloud");
|
|
8
|
+
function download(config, service = 'aws-v3', sdk = '@aws-sdk/client-s3') {
|
|
9
|
+
const [client, AWS] = client_1.createStorageClient.call(this, config, service, sdk);
|
|
10
|
+
return (data, callback) => {
|
|
11
|
+
const { bucket: Bucket, download: target } = data;
|
|
12
|
+
const Key = target.filename;
|
|
13
|
+
if (!Bucket || !Key) {
|
|
14
|
+
callback((0, types_1.errorValue)('Missing property', !Bucket ? 'Bucket' : 'Key'));
|
|
15
|
+
return;
|
|
16
|
+
}
|
|
17
|
+
const location = Module.joinPath(Bucket, Key);
|
|
18
|
+
let input = { Bucket, Key, VersionId: target.versionId };
|
|
19
|
+
// @ts-ignore
|
|
20
|
+
client
|
|
21
|
+
.send(new AWS.GetObjectCommand(input), { abortSignal: this.signal })
|
|
22
|
+
.then(result => {
|
|
23
|
+
(0, util_1.readableAsBuffer)(result.Body).then(buffer => callback(null, buffer)).catch(err => callback(err));
|
|
24
|
+
const deleteObject = target.deleteObject;
|
|
25
|
+
if (deleteObject) {
|
|
26
|
+
if ((0, types_1.isPlainObject)(deleteObject)) {
|
|
27
|
+
input = Object.assign(deleteObject, input);
|
|
28
|
+
}
|
|
29
|
+
client.send(new AWS.DeleteObjectCommand(input))
|
|
30
|
+
.then(() => this.formatMessage(64 /* LOG_TYPE.CLOUD */, service, "Delete success" /* VAL_CLOUD.DELETE_FILE */, location, { ...Cloud.LOG_CLOUD_DELETE }))
|
|
31
|
+
.catch(err => this.formatFail(64 /* LOG_TYPE.CLOUD */, service, ["Delete failed" /* ERR_CLOUD.DELETE_FAIL */, location], err, { ...Cloud.LOG_CLOUD_FAIL, fatal: !!target.active }));
|
|
32
|
+
}
|
|
33
|
+
})
|
|
34
|
+
.catch(err => callback(err));
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
module.exports = download;
|
package/package.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@pi-r/aws-v3",
|
|
3
|
+
"version": "0.0.1",
|
|
4
|
+
"description": "AWS V3 cloud functions for E-mc.",
|
|
5
|
+
"main": "client/index.js",
|
|
6
|
+
"publishConfig": {
|
|
7
|
+
"access": "public"
|
|
8
|
+
},
|
|
9
|
+
"repository": {
|
|
10
|
+
"type": "git",
|
|
11
|
+
"url": "https://github.com/anpham6/pi-r.git",
|
|
12
|
+
"directory": "src/cloud/aws-v3"
|
|
13
|
+
},
|
|
14
|
+
"keywords": [
|
|
15
|
+
"squared",
|
|
16
|
+
"e-mc",
|
|
17
|
+
"squared-functions"
|
|
18
|
+
],
|
|
19
|
+
"author": "An Pham <anpham6@gmail.com>",
|
|
20
|
+
"license": "MIT",
|
|
21
|
+
"homepage": "https://github.com/anpham6/pi-r#readme",
|
|
22
|
+
"dependencies": {
|
|
23
|
+
"@e-mc/cloud": "^0.4.0",
|
|
24
|
+
"@e-mc/module": "^0.4.0",
|
|
25
|
+
"@e-mc/types": "^0.4.0",
|
|
26
|
+
"@pi-r/aws": "^0.0.1",
|
|
27
|
+
"@aws-sdk/client-dynamodb": "^3.293.0",
|
|
28
|
+
"@aws-sdk/client-s3": "^3.293.0",
|
|
29
|
+
"@aws-sdk/lib-dynamodb": "^3.293.0"
|
|
30
|
+
}
|
|
31
|
+
}
|
package/upload/index.js
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const path = require("path");
|
|
4
|
+
const fs = require("fs");
|
|
5
|
+
const stream = require("stream");
|
|
6
|
+
const client_1 = require("../client");
|
|
7
|
+
const util_1 = require("@e-mc/cloud/util");
|
|
8
|
+
const aws_1 = require("@pi-r/aws");
|
|
9
|
+
const types_1 = require("@e-mc/types");
|
|
10
|
+
const Module = require("@e-mc/module");
|
|
11
|
+
const Cloud = require("@e-mc/cloud");
|
|
12
|
+
const BUCKET_SESSION = new Set();
|
|
13
|
+
const BUCKET_RESPONSE = {};
|
|
14
|
+
function upload(credential, service = 'aws-v3', sdk = '@aws-sdk/client-s3') {
|
|
15
|
+
const [client, AWS] = client_1.createStorageClient.call(this, credential, service, sdk);
|
|
16
|
+
return async (data, callback) => {
|
|
17
|
+
var _a;
|
|
18
|
+
const { bucket: Bucket, localUri } = data;
|
|
19
|
+
const { pathname = '', fileGroup, contentType, metadata, endpoint, active, publicRead, acl, admin = {}, overwrite, options } = data.upload;
|
|
20
|
+
let filename = data.upload.filename || path.basename(localUri), bucketKey;
|
|
21
|
+
const cleanup = () => {
|
|
22
|
+
BUCKET_SESSION.delete(service + Bucket);
|
|
23
|
+
if (bucketKey && bucketKey in BUCKET_RESPONSE) {
|
|
24
|
+
delete BUCKET_RESPONSE[bucketKey];
|
|
25
|
+
}
|
|
26
|
+
};
|
|
27
|
+
const errorResponse = (err) => {
|
|
28
|
+
cleanup();
|
|
29
|
+
callback(err);
|
|
30
|
+
return false;
|
|
31
|
+
};
|
|
32
|
+
if (!BUCKET_SESSION.has(service + Bucket)) {
|
|
33
|
+
const bucketAcl = admin.publicRead ? 'public-read' : admin.acl;
|
|
34
|
+
const response = BUCKET_RESPONSE[_a = bucketKey = (0, aws_1.getBucketKey)(credential, Bucket, bucketAcl, service, sdk)] || (BUCKET_RESPONSE[_a] = client_1.createBucketV2.call(this, credential, Bucket, bucketAcl, admin.configBucket?.create, service, sdk));
|
|
35
|
+
if (!await response) {
|
|
36
|
+
errorResponse(null);
|
|
37
|
+
return;
|
|
38
|
+
}
|
|
39
|
+
BUCKET_SESSION.add(service + Bucket);
|
|
40
|
+
}
|
|
41
|
+
if (!overwrite) {
|
|
42
|
+
const current = filename;
|
|
43
|
+
const next = (0, util_1.generateFilename)(filename);
|
|
44
|
+
let i = 0, exists;
|
|
45
|
+
do {
|
|
46
|
+
if (i > 0) {
|
|
47
|
+
[filename, exists] = next(i);
|
|
48
|
+
if (!exists) {
|
|
49
|
+
break;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
exists = await client.send(new AWS.HeadObjectCommand({ Bucket, Key: pathname ? Module.joinPath(pathname, filename) : filename }))
|
|
53
|
+
.then(() => true)
|
|
54
|
+
.catch(err => {
|
|
55
|
+
if (err instanceof Error && err.name !== 'NotFound') {
|
|
56
|
+
filename = (0, types_1.generateUUID)() + path.extname(current);
|
|
57
|
+
return true;
|
|
58
|
+
}
|
|
59
|
+
return false;
|
|
60
|
+
});
|
|
61
|
+
} while (exists && ++i);
|
|
62
|
+
if (i > 0) {
|
|
63
|
+
this.formatMessage(64 /* LOG_TYPE.CLOUD */, service, ["File renamed" /* VAL_CLOUD.RENAME_FILE */, current], filename, { ...Cloud.LOG_CLOUD_WARN });
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
if (pathname) {
|
|
67
|
+
await client.send(new AWS.PutObjectCommand({ Bucket, Key: pathname, Body: Buffer.from(''), ContentLength: 0 })).catch(() => { });
|
|
68
|
+
}
|
|
69
|
+
const Key = [filename];
|
|
70
|
+
const Body = [data.buffer];
|
|
71
|
+
const ContentType = [contentType];
|
|
72
|
+
const addLog = (err) => err instanceof Error && this.addLog(this.statusType.WARN, err.message, service + ': ' + Bucket);
|
|
73
|
+
if (fileGroup) {
|
|
74
|
+
for (const [content, ext, localFile] of fileGroup) {
|
|
75
|
+
try {
|
|
76
|
+
Body.push(typeof content === 'string' ? fs.readFileSync(content) : content);
|
|
77
|
+
Key.push(ext === '.map' && localFile ? path.basename(localFile) : filename + ext);
|
|
78
|
+
}
|
|
79
|
+
catch (err) {
|
|
80
|
+
addLog(err);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
for (let i = 0; i < Key.length; ++i) {
|
|
85
|
+
const first = i === 0;
|
|
86
|
+
if (this.aborted) {
|
|
87
|
+
if (first) {
|
|
88
|
+
errorResponse((0, types_1.createAbortError)());
|
|
89
|
+
}
|
|
90
|
+
return;
|
|
91
|
+
}
|
|
92
|
+
const objectKey = Module.joinPath(pathname, Key[i]);
|
|
93
|
+
let params;
|
|
94
|
+
try {
|
|
95
|
+
params = { ...options, Bucket, Key: pathname + Key[i], Body: stream.Readable.from(Body[i]) };
|
|
96
|
+
const readable = publicRead || active && publicRead !== false && !acl;
|
|
97
|
+
if (first) {
|
|
98
|
+
params.ContentType || (params.ContentType = ContentType[i]);
|
|
99
|
+
if (readable) {
|
|
100
|
+
params.ACL = 'public-read';
|
|
101
|
+
}
|
|
102
|
+
else if (acl) {
|
|
103
|
+
params.ACL = acl;
|
|
104
|
+
}
|
|
105
|
+
if (metadata) {
|
|
106
|
+
params.Metadata = metadata;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
else {
|
|
110
|
+
params.ContentType = ContentType[i];
|
|
111
|
+
if (!params.ACL) {
|
|
112
|
+
if (readable) {
|
|
113
|
+
params.ACL = 'public-read';
|
|
114
|
+
}
|
|
115
|
+
else if (acl) {
|
|
116
|
+
params.ACL = acl;
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
catch (err) {
|
|
122
|
+
if (first) {
|
|
123
|
+
errorResponse(err);
|
|
124
|
+
return;
|
|
125
|
+
}
|
|
126
|
+
if (err instanceof Error) {
|
|
127
|
+
this.addLog(this.statusType.WARN, err.message, service + ': ' + Bucket);
|
|
128
|
+
}
|
|
129
|
+
continue;
|
|
130
|
+
}
|
|
131
|
+
// @ts-ignore
|
|
132
|
+
client.send(new AWS.PutObjectCommand(params), { abortSignal: this.signal })
|
|
133
|
+
.then(() => {
|
|
134
|
+
const url = endpoint ? Module.joinPath(endpoint, objectKey) : Module.joinPath(`https://${Bucket}.s3.${!credential.region || credential.region === 'us-east-1' ? 'us-east-1.' : ''}amazonaws.com`, objectKey);
|
|
135
|
+
if (first) {
|
|
136
|
+
cleanup();
|
|
137
|
+
callback(null, url);
|
|
138
|
+
}
|
|
139
|
+
this.formatMessage(64 /* LOG_TYPE.CLOUD */, service, "Upload success" /* VAL_CLOUD.UPLOAD_FILE */, url, { ...Cloud.LOG_CLOUD_UPLOAD });
|
|
140
|
+
})
|
|
141
|
+
.catch(err => {
|
|
142
|
+
if (first) {
|
|
143
|
+
errorResponse(err);
|
|
144
|
+
}
|
|
145
|
+
else {
|
|
146
|
+
addLog(err);
|
|
147
|
+
}
|
|
148
|
+
});
|
|
149
|
+
}
|
|
150
|
+
};
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
module.exports = upload;
|