@es-labs/jslib 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +4 -0
- package/README.md +42 -0
- package/__test__/services.test.js +32 -0
- package/auth/index.js +226 -0
- package/auth/keyv.js +23 -0
- package/auth/knex.js +29 -0
- package/auth/redis.js +23 -0
- package/comms/email.js +123 -0
- package/comms/nexmo.js +44 -0
- package/comms/telegram.js +43 -0
- package/comms/telegram2/inbound.js +314 -0
- package/comms/telegram2/outbound.js +574 -0
- package/comms/webpush.js +60 -0
- package/config.js +37 -0
- package/express/controller/auth/oauth.js +39 -0
- package/express/controller/auth/oidc.js +87 -0
- package/express/controller/auth/own.js +100 -0
- package/express/controller/auth/saml.js +74 -0
- package/express/upload.js +48 -0
- package/index.js +1 -0
- package/iso/README.md +4 -0
- package/iso/__tests__/csv-utils.spec.js +128 -0
- package/iso/__tests__/datetime.spec.js +101 -0
- package/iso/__tests__/fetch.spec.js +270 -0
- package/iso/csv-utils.js +206 -0
- package/iso/datetime.js +103 -0
- package/iso/fetch.js +129 -0
- package/iso/fetch2.js +180 -0
- package/iso/log-filter.js +17 -0
- package/iso/sleep.js +6 -0
- package/iso/ws.js +63 -0
- package/node/oss-files/oss-uploader-client-fetch.js +258 -0
- package/node/oss-files/oss-uploader-client-fetch.md +31 -0
- package/node/oss-files/oss-uploader-client.js +219 -0
- package/node/oss-files/oss-uploader-server.js +199 -0
- package/node/oss-files/oss-uploader-usage.js +121 -0
- package/node/oss-files/oss-uploader-usage.md +34 -0
- package/node/oss-files/s3-uploader-client.js +217 -0
- package/node/oss-files/s3-uploader-server.js +123 -0
- package/node/oss-files/s3-uploader-usage.js +77 -0
- package/node/oss-files/s3-uploader-usage.md +34 -0
- package/package.json +53 -0
- package/packageInfo.js +9 -0
- package/services/ali.js +279 -0
- package/services/aws.js +194 -0
- package/services/db/__tests__/keyv.spec.js +31 -0
- package/services/db/keyv.js +14 -0
- package/services/db/knex.js +67 -0
- package/services/db/redis.js +51 -0
- package/services/index.js +57 -0
- package/services/mq/README.md +8 -0
- package/services/websocket.js +139 -0
- package/t4t/README.md +1 -0
- package/traps.js +20 -0
- package/utils/__tests__/aes.spec.js +52 -0
- package/utils/aes.js +23 -0
- package/web/UI.md +71 -0
- package/web/bwc-autocomplete.js +211 -0
- package/web/bwc-combobox.js +343 -0
- package/web/bwc-fileupload.js +87 -0
- package/web/bwc-loading-overlay.js +54 -0
- package/web/bwc-t4t-form.js +511 -0
- package/web/bwc-table.js +756 -0
- package/web/fetch.js +129 -0
- package/web/i18n.js +24 -0
- package/web/idle.js +49 -0
- package/web/parse-jwt.js +15 -0
- package/web/pwa.js +84 -0
- package/web/sign-pad.js +164 -0
- package/web/t4t-fe.js +164 -0
- package/web/util.js +126 -0
- package/web/web-cam.js +182 -0
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* S3 Large File Uploader (Client-Side)
|
|
3
|
+
* Supports multipart upload for files > 5MB using pre-signed URLs
|
|
4
|
+
*
|
|
5
|
+
* Usage:
|
|
6
|
+
* const uploader = new S3Uploader({ getSignedUrlEndpoint: '/api/s3/sign' });
|
|
7
|
+
* await uploader.upload(file, { onProgress: (pct) => console.log(pct) });
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const CHUNK_SIZE = 10 * 1024 * 1024; // 10MB per part (min 5MB for S3 multipart)
|
|
11
|
+
const SINGLE_UPLOAD_LIMIT = 5 * 1024 * 1024; // Use multipart above 5MB
|
|
12
|
+
|
|
13
|
+
class S3Uploader {
|
|
14
|
+
/**
|
|
15
|
+
* @param {object} options
|
|
16
|
+
* @param {string} options.getSignedUrlEndpoint - Your backend endpoint that returns signed URLs
|
|
17
|
+
* @param {number} [options.chunkSize] - Bytes per part (default: 10MB)
|
|
18
|
+
* @param {number} [options.maxConcurrent] - Parallel part uploads (default: 3)
|
|
19
|
+
*/
|
|
20
|
+
constructor(options = {}) {
|
|
21
|
+
this.endpoint = options.getSignedUrlEndpoint;
|
|
22
|
+
this.chunkSize = options.chunkSize || CHUNK_SIZE;
|
|
23
|
+
this.maxConcurrent = options.maxConcurrent || 3;
|
|
24
|
+
|
|
25
|
+
if (!this.endpoint) {
|
|
26
|
+
throw new Error('getSignedUrlEndpoint is required');
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// ─── Public API ─────────────────────────────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Upload a File or Blob to S3.
|
|
34
|
+
* @param {File|Blob} file
|
|
35
|
+
* @param {object} [opts]
|
|
36
|
+
* @param {string} [opts.key] - S3 object key (defaults to file.name)
|
|
37
|
+
* @param {Function} [opts.onProgress] - Called with 0–100 progress percentage
|
|
38
|
+
* @param {AbortSignal} [opts.signal] - AbortController signal to cancel upload
|
|
39
|
+
* @returns {Promise<{ key: string, location: string }>}
|
|
40
|
+
*/
|
|
41
|
+
async upload(file, opts = {}) {
|
|
42
|
+
const key = opts.key || file.name;
|
|
43
|
+
const onProgress = opts.onProgress || (() => {});
|
|
44
|
+
const signal = opts.signal || null;
|
|
45
|
+
|
|
46
|
+
if (file.size <= SINGLE_UPLOAD_LIMIT) {
|
|
47
|
+
return this._singleUpload(file, key, onProgress, signal);
|
|
48
|
+
}
|
|
49
|
+
return this._multipartUpload(file, key, onProgress, signal);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// ─── Single-Part Upload (≤5MB) ───────────────────────────────────────────────
|
|
53
|
+
|
|
54
|
+
async _singleUpload(file, key, onProgress, signal) {
|
|
55
|
+
onProgress(0);
|
|
56
|
+
|
|
57
|
+
// 1. Get signed PUT URL from your backend
|
|
58
|
+
const { signedUrl, location } = await this._requestSignedUrl({
|
|
59
|
+
type: 'single',
|
|
60
|
+
key,
|
|
61
|
+
contentType: file.type,
|
|
62
|
+
size: file.size,
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
// 2. PUT the file directly to S3
|
|
66
|
+
await this._putToS3(signedUrl, file, file.type, (pct) => onProgress(pct), signal);
|
|
67
|
+
|
|
68
|
+
onProgress(100);
|
|
69
|
+
return { key, location };
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// ─── Multipart Upload (>5MB) ─────────────────────────────────────────────────
|
|
73
|
+
|
|
74
|
+
async _multipartUpload(file, key, onProgress, signal) {
|
|
75
|
+
// 1. Initiate multipart upload — get uploadId from your backend
|
|
76
|
+
const { uploadId } = await this._requestSignedUrl({
|
|
77
|
+
type: 'initiate',
|
|
78
|
+
key,
|
|
79
|
+
contentType: file.type,
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
const chunks = this._splitFile(file);
|
|
83
|
+
const totalParts = chunks.length;
|
|
84
|
+
const partProgress = new Array(totalParts).fill(0);
|
|
85
|
+
|
|
86
|
+
const updateProgress = () => {
|
|
87
|
+
const totalUploaded = partProgress.reduce((a, b) => a + b, 0);
|
|
88
|
+
onProgress(Math.round((totalUploaded / file.size) * 100));
|
|
89
|
+
};
|
|
90
|
+
|
|
91
|
+
// 2. Upload parts with concurrency control
|
|
92
|
+
let partNumber = 1;
|
|
93
|
+
const completedParts = [];
|
|
94
|
+
const queue = [...chunks.entries()]; // [[index, blob], ...]
|
|
95
|
+
|
|
96
|
+
const uploadWorker = async () => {
|
|
97
|
+
while (queue.length > 0) {
|
|
98
|
+
const [index, chunk] = queue.shift();
|
|
99
|
+
const currentPart = index + 1;
|
|
100
|
+
|
|
101
|
+
if (signal?.aborted) throw new DOMException('Upload aborted', 'AbortError');
|
|
102
|
+
|
|
103
|
+
// Get a signed URL for this part
|
|
104
|
+
const { signedUrl } = await this._requestSignedUrl({
|
|
105
|
+
type: 'part',
|
|
106
|
+
key,
|
|
107
|
+
uploadId,
|
|
108
|
+
partNumber: currentPart,
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
// Upload the part and capture the ETag
|
|
112
|
+
const etag = await this._putToS3(
|
|
113
|
+
signedUrl,
|
|
114
|
+
chunk,
|
|
115
|
+
file.type,
|
|
116
|
+
(bytesDone) => {
|
|
117
|
+
partProgress[index] = bytesDone;
|
|
118
|
+
updateProgress();
|
|
119
|
+
},
|
|
120
|
+
signal,
|
|
121
|
+
true // returnEtag
|
|
122
|
+
);
|
|
123
|
+
|
|
124
|
+
completedParts.push({ PartNumber: currentPart, ETag: etag });
|
|
125
|
+
}
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
// Run N workers in parallel
|
|
129
|
+
const workers = Array.from({ length: this.maxConcurrent }, () => uploadWorker());
|
|
130
|
+
await Promise.all(workers);
|
|
131
|
+
|
|
132
|
+
// 3. Complete the multipart upload
|
|
133
|
+
completedParts.sort((a, b) => a.PartNumber - b.PartNumber);
|
|
134
|
+
|
|
135
|
+
const { location } = await this._requestSignedUrl({
|
|
136
|
+
type: 'complete',
|
|
137
|
+
key,
|
|
138
|
+
uploadId,
|
|
139
|
+
parts: completedParts,
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
onProgress(100);
|
|
143
|
+
return { key, location };
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// ─── Helpers ─────────────────────────────────────────────────────────────────
|
|
147
|
+
|
|
148
|
+
/** Split a File/Blob into chunks */
|
|
149
|
+
_splitFile(file) {
|
|
150
|
+
const chunks = [];
|
|
151
|
+
let offset = 0;
|
|
152
|
+
while (offset < file.size) {
|
|
153
|
+
chunks.push(file.slice(offset, offset + this.chunkSize));
|
|
154
|
+
offset += this.chunkSize;
|
|
155
|
+
}
|
|
156
|
+
return chunks;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Call your backend to get signed URLs / manage multipart lifecycle.
|
|
161
|
+
* Adapt the request/response shape to match your backend API.
|
|
162
|
+
*/
|
|
163
|
+
async _requestSignedUrl(payload) {
|
|
164
|
+
const res = await fetch(this.endpoint, {
|
|
165
|
+
method: 'POST',
|
|
166
|
+
headers: { 'Content-Type': 'application/json' },
|
|
167
|
+
body: JSON.stringify(payload),
|
|
168
|
+
});
|
|
169
|
+
|
|
170
|
+
if (!res.ok) {
|
|
171
|
+
const text = await res.text();
|
|
172
|
+
throw new Error(`Signed URL request failed (${res.status}): ${text}`);
|
|
173
|
+
}
|
|
174
|
+
return res.json();
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* PUT a blob to a pre-signed S3 URL, reporting progress.
|
|
179
|
+
* Returns ETag if returnEtag=true (needed for multipart complete).
|
|
180
|
+
*/
|
|
181
|
+
_putToS3(signedUrl, blob, contentType, onChunkProgress, signal, returnEtag = false) {
|
|
182
|
+
return new Promise((resolve, reject) => {
|
|
183
|
+
const xhr = new XMLHttpRequest();
|
|
184
|
+
|
|
185
|
+
xhr.open('PUT', signedUrl);
|
|
186
|
+
xhr.setRequestHeader('Content-Type', contentType);
|
|
187
|
+
|
|
188
|
+
xhr.upload.addEventListener('progress', (e) => {
|
|
189
|
+
if (e.lengthComputable) onChunkProgress(e.loaded);
|
|
190
|
+
});
|
|
191
|
+
|
|
192
|
+
xhr.addEventListener('load', () => {
|
|
193
|
+
if (xhr.status >= 200 && xhr.status < 300) {
|
|
194
|
+
if (returnEtag) {
|
|
195
|
+
const etag = xhr.getResponseHeader('ETag');
|
|
196
|
+
resolve(etag);
|
|
197
|
+
} else {
|
|
198
|
+
resolve(xhr.responseURL || signedUrl); // rough "location"
|
|
199
|
+
}
|
|
200
|
+
} else {
|
|
201
|
+
reject(new Error(`S3 upload failed: HTTP ${xhr.status}`));
|
|
202
|
+
}
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
xhr.addEventListener('error', () => reject(new Error('Network error during upload')));
|
|
206
|
+
xhr.addEventListener('abort', () => reject(new DOMException('Upload aborted', 'AbortError')));
|
|
207
|
+
|
|
208
|
+
if (signal) {
|
|
209
|
+
signal.addEventListener('abort', () => xhr.abort());
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
xhr.send(blob);
|
|
213
|
+
});
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
export default S3Uploader;
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* S3 Signed URL Backend (Node.js / Express)
|
|
3
|
+
*
|
|
4
|
+
* Install deps:
|
|
5
|
+
* npm install @aws-sdk/client-s3 @aws-sdk/s3-request-presigner express
|
|
6
|
+
*
|
|
7
|
+
* Env vars required:
|
|
8
|
+
* AWS_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, S3_BUCKET_NAME
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import express from 'express';
|
|
12
|
+
import {
|
|
13
|
+
S3Client,
|
|
14
|
+
CreateMultipartUploadCommand,
|
|
15
|
+
UploadPartCommand,
|
|
16
|
+
CompleteMultipartUploadCommand,
|
|
17
|
+
AbortMultipartUploadCommand,
|
|
18
|
+
PutObjectCommand,
|
|
19
|
+
} from '@aws-sdk/client-s3';
|
|
20
|
+
import { getSignedUrl } from '@aws-sdk/s3-request-presigner';
|
|
21
|
+
|
|
22
|
+
const app = express();
|
|
23
|
+
app.use(express.json());
|
|
24
|
+
|
|
25
|
+
const s3 = new S3Client({
|
|
26
|
+
region: process.env.AWS_REGION || 'us-east-1',
|
|
27
|
+
credentials: {
|
|
28
|
+
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
|
|
29
|
+
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
|
|
30
|
+
},
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
const BUCKET = process.env.S3_BUCKET_NAME;
|
|
34
|
+
const URL_EXPIRY = 3600; // signed URL valid for 1 hour
|
|
35
|
+
|
|
36
|
+
// ─── Single endpoint that handles all upload phases ──────────────────────────
|
|
37
|
+
// POST /api/s3/sign
|
|
38
|
+
// Body: { type, key, contentType?, size?, uploadId?, partNumber?, parts? }
|
|
39
|
+
|
|
40
|
+
app.post('/api/s3/sign', async (req, res) => {
|
|
41
|
+
const { type, key, contentType, size, uploadId, partNumber, parts } = req.body;
|
|
42
|
+
|
|
43
|
+
try {
|
|
44
|
+
switch (type) {
|
|
45
|
+
// ── 1. Single-file upload (≤5MB) ─────────────────────────────────────
|
|
46
|
+
case 'single': {
|
|
47
|
+
const command = new PutObjectCommand({
|
|
48
|
+
Bucket: BUCKET,
|
|
49
|
+
Key: key,
|
|
50
|
+
ContentType: contentType,
|
|
51
|
+
ContentLength: size,
|
|
52
|
+
// Optional: enforce public/private ACL, SSE, etc.
|
|
53
|
+
// ServerSideEncryption: 'AES256',
|
|
54
|
+
});
|
|
55
|
+
const signedUrl = await getSignedUrl(s3, command, { expiresIn: URL_EXPIRY });
|
|
56
|
+
const location = `https://${BUCKET}.s3.amazonaws.com/${key}`;
|
|
57
|
+
return res.json({ signedUrl, location });
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// ── 2. Initiate multipart upload ──────────────────────────────────────
|
|
61
|
+
case 'initiate': {
|
|
62
|
+
const command = new CreateMultipartUploadCommand({
|
|
63
|
+
Bucket: BUCKET,
|
|
64
|
+
Key: key,
|
|
65
|
+
ContentType: contentType,
|
|
66
|
+
});
|
|
67
|
+
const response = await s3.send(command);
|
|
68
|
+
return res.json({ uploadId: response.UploadId });
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// ── 3. Sign an individual part ────────────────────────────────────────
|
|
72
|
+
case 'part': {
|
|
73
|
+
if (!uploadId || !partNumber) {
|
|
74
|
+
return res.status(400).json({ error: 'uploadId and partNumber are required' });
|
|
75
|
+
}
|
|
76
|
+
const command = new UploadPartCommand({
|
|
77
|
+
Bucket: BUCKET,
|
|
78
|
+
Key: key,
|
|
79
|
+
UploadId: uploadId,
|
|
80
|
+
PartNumber: partNumber,
|
|
81
|
+
});
|
|
82
|
+
const signedUrl = await getSignedUrl(s3, command, { expiresIn: URL_EXPIRY });
|
|
83
|
+
return res.json({ signedUrl });
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// ── 4. Complete multipart upload ──────────────────────────────────────
|
|
87
|
+
case 'complete': {
|
|
88
|
+
if (!uploadId || !parts?.length) {
|
|
89
|
+
return res.status(400).json({ error: 'uploadId and parts[] are required' });
|
|
90
|
+
}
|
|
91
|
+
const command = new CompleteMultipartUploadCommand({
|
|
92
|
+
Bucket: BUCKET,
|
|
93
|
+
Key: key,
|
|
94
|
+
UploadId: uploadId,
|
|
95
|
+
MultipartUpload: { Parts: parts }, // [{ PartNumber, ETag }, ...]
|
|
96
|
+
});
|
|
97
|
+
const response = await s3.send(command);
|
|
98
|
+
return res.json({ location: response.Location });
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// ── 5. Abort multipart upload (cleanup on error/cancel) ───────────────
|
|
102
|
+
case 'abort': {
|
|
103
|
+
if (!uploadId) return res.status(400).json({ error: 'uploadId is required' });
|
|
104
|
+
const command = new AbortMultipartUploadCommand({
|
|
105
|
+
Bucket: BUCKET,
|
|
106
|
+
Key: key,
|
|
107
|
+
UploadId: uploadId,
|
|
108
|
+
});
|
|
109
|
+
await s3.send(command);
|
|
110
|
+
return res.json({ success: true });
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
default:
|
|
114
|
+
return res.status(400).json({ error: `Unknown type: ${type}` });
|
|
115
|
+
}
|
|
116
|
+
} catch (err) {
|
|
117
|
+
console.error('[S3 sign error]', err);
|
|
118
|
+
res.status(500).json({ error: err.message });
|
|
119
|
+
}
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
app.listen(3000, () => console.log('Server running on http://localhost:3000'));
|
|
123
|
+
export default app;
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Usage example — drop this in your frontend
|
|
3
|
+
* Works with vanilla JS, React, Vue, etc.
|
|
4
|
+
*/
|
|
5
|
+
import S3Uploader from './s3-uploader-client.js';
|
|
6
|
+
|
|
7
|
+
// ─── Setup ────────────────────────────────────────────────────────────────────
|
|
8
|
+
|
|
9
|
+
const uploader = new S3Uploader({
|
|
10
|
+
getSignedUrlEndpoint: '/api/s3/sign', // your backend
|
|
11
|
+
chunkSize: 10 * 1024 * 1024, // 10MB parts
|
|
12
|
+
maxConcurrent: 3, // 3 parallel part uploads
|
|
13
|
+
});
|
|
14
|
+
|
|
15
|
+
// ─── Basic upload ─────────────────────────────────────────────────────────────
|
|
16
|
+
|
|
17
|
+
async function uploadFile(file) {
|
|
18
|
+
try {
|
|
19
|
+
const result = await uploader.upload(file, {
|
|
20
|
+
key: `uploads/${Date.now()}-${file.name}`, // custom S3 key (optional)
|
|
21
|
+
onProgress: (pct) => {
|
|
22
|
+
console.log(`Upload progress: ${pct}%`);
|
|
23
|
+
document.getElementById('progress').value = pct;
|
|
24
|
+
},
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
console.log('Upload complete!', result);
|
|
28
|
+
// result = { key: 'uploads/...', location: 'https://bucket.s3.amazonaws.com/...' }
|
|
29
|
+
} catch (err) {
|
|
30
|
+
console.error('Upload failed:', err.message);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
// ─── With cancellation support ────────────────────────────────────────────────
|
|
35
|
+
|
|
36
|
+
let controller;
|
|
37
|
+
|
|
38
|
+
async function uploadWithCancel(file) {
|
|
39
|
+
controller = new AbortController();
|
|
40
|
+
|
|
41
|
+
try {
|
|
42
|
+
const result = await uploader.upload(file, {
|
|
43
|
+
onProgress: (pct) => console.log(`${pct}%`),
|
|
44
|
+
signal: controller.signal,
|
|
45
|
+
});
|
|
46
|
+
console.log('Done:', result);
|
|
47
|
+
} catch (err) {
|
|
48
|
+
if (err.name === 'AbortError') {
|
|
49
|
+
console.log('Upload cancelled by user');
|
|
50
|
+
// Optionally call your backend to abort the multipart upload in S3
|
|
51
|
+
// to avoid storage costs for incomplete uploads
|
|
52
|
+
} else {
|
|
53
|
+
console.error('Error:', err);
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
function cancelUpload() {
|
|
59
|
+
controller?.abort();
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// ─── Wire up to a file input ──────────────────────────────────────────────────
|
|
63
|
+
|
|
64
|
+
document.getElementById('fileInput').addEventListener('change', (e) => {
|
|
65
|
+
const file = e.target.files[0];
|
|
66
|
+
if (file) uploadWithCancel(file);
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
document.getElementById('cancelBtn').addEventListener('click', cancelUpload);
|
|
70
|
+
|
|
71
|
+
/*
|
|
72
|
+
Minimal HTML to go with this:
|
|
73
|
+
|
|
74
|
+
<input type="file" id="fileInput" />
|
|
75
|
+
<progress id="progress" max="100" value="0"></progress>
|
|
76
|
+
<button id="cancelBtn">Cancel</button>
|
|
77
|
+
*/
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
Here's a comprehensive JavaScript solution for reading and uploading large files to S3 using signed URLs, with multipart upload support for reliability:Here's a breakdown of what's in each file:
|
|
2
|
+
|
|
3
|
+
---
|
|
4
|
+
|
|
5
|
+
**`s3-uploader-client.js`** — Browser-side `S3Uploader` class
|
|
6
|
+
- Files **≤ 5MB** → single `PUT` with a signed URL
|
|
7
|
+
- Files **> 5MB** → S3 multipart upload (10MB chunks by default)
|
|
8
|
+
- Concurrent part uploads (default: 3 in parallel) for speed
|
|
9
|
+
- `XMLHttpRequest`-based for accurate `upload.progress` events
|
|
10
|
+
- `AbortController` support to cancel mid-upload
|
|
11
|
+
|
|
12
|
+
**`s3-uploader-server.js`** — Node.js/Express backend
|
|
13
|
+
- Single `/api/s3/sign` endpoint handles all 5 phases: `single`, `initiate`, `part`, `complete`, `abort`
|
|
14
|
+
- Uses the AWS SDK v3 (`@aws-sdk/client-s3` + `@aws-sdk/s3-request-presigner`)
|
|
15
|
+
- Your AWS credentials never touch the browser
|
|
16
|
+
|
|
17
|
+
**`s3-uploader-usage.js`** — Integration examples with cancel support
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
**Quick setup:**
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
# Backend
|
|
25
|
+
npm install @aws-sdk/client-s3 @aws-sdk/s3-request-presigner express
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
Set env vars: `AWS_REGION`, `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `S3_BUCKET_NAME`
|
|
29
|
+
|
|
30
|
+
**S3 CORS config** you'll also need on your bucket:
|
|
31
|
+
```json
|
|
32
|
+
[{ "AllowedOrigins": ["*"], "AllowedMethods": ["PUT"], "AllowedHeaders": ["*"], "ExposeHeaders": ["ETag"] }]
|
|
33
|
+
```
|
|
34
|
+
The `ETag` exposure is critical — S3 multipart complete requires the ETags from each part.
|
package/package.json
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@es-labs/jslib",
|
|
3
|
+
"version": "0.0.1",
|
|
4
|
+
"author": "Aaron Gong",
|
|
5
|
+
"license": "MIT",
|
|
6
|
+
"description": "Shareable JS library using ES modules",
|
|
7
|
+
"keywords": [
|
|
8
|
+
"javscript",
|
|
9
|
+
"module"
|
|
10
|
+
],
|
|
11
|
+
"homepage": "https://github.com/es-labs/jscommon#readme",
|
|
12
|
+
"bugs": {
|
|
13
|
+
"url": "https://github.com/es-labs/jscommon/issues"
|
|
14
|
+
},
|
|
15
|
+
"repository": {
|
|
16
|
+
"type": "git",
|
|
17
|
+
"url": "git+https://github.com/es-labs/jscommon.git"
|
|
18
|
+
},
|
|
19
|
+
"type": "module",
|
|
20
|
+
"exports": {
|
|
21
|
+
"./auth": "./auth/index.js",
|
|
22
|
+
"./auth/*": "./auth/*.js",
|
|
23
|
+
"./comms/*": "./comms/*.js",
|
|
24
|
+
"./express/controller/auth/*": "./express/controller/auth/*.js",
|
|
25
|
+
"./express/*": "./express/*.js",
|
|
26
|
+
"./iso/*": "./iso/*.js",
|
|
27
|
+
"./services": "./services/index.js",
|
|
28
|
+
"./services/*": "./services/*.js",
|
|
29
|
+
"./utils/*": "./utils/*.js",
|
|
30
|
+
"./web/*": "./web/*.js",
|
|
31
|
+
"./*": "./*.js"
|
|
32
|
+
},
|
|
33
|
+
"main": "index.js",
|
|
34
|
+
"scripts": {
|
|
35
|
+
"test": "node --test-reporter=spec --experimental-test-coverage --test **/__tests__/*.spec.js",
|
|
36
|
+
"cover": "node --experimental-test-coverage --test-reporter=lcov --test-reporter-destination=lcov.info --test __tests__/**/*.spec.js"
|
|
37
|
+
},
|
|
38
|
+
"dependencies": {
|
|
39
|
+
"@aws-sdk/client-s3": "^3.1019.0",
|
|
40
|
+
"@node-saml/node-saml": "^5.1.0",
|
|
41
|
+
"ali-oss": "^6.23.0",
|
|
42
|
+
"bcryptjs": "^3.0.3",
|
|
43
|
+
"dotenv": "^17.3.1",
|
|
44
|
+
"ioredis": "^5.10.1",
|
|
45
|
+
"jsonwebtoken": "^9.0.3",
|
|
46
|
+
"keyv": "^5.6.0",
|
|
47
|
+
"knex": "^3.2.7",
|
|
48
|
+
"multer": "^2.1.1",
|
|
49
|
+
"otplib": "^13.4.0",
|
|
50
|
+
"web-push": "^3.6.7",
|
|
51
|
+
"ws": "^8.20.0"
|
|
52
|
+
}
|
|
53
|
+
}
|