@storecraft/storage-s3-compatible 1.0.0 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +17 -6
- package/adapter.js +28 -35
- package/jsconfig.json +13 -0
- package/package.json +3 -3
- package/tests/storage.aws-s3.test.js +90 -0
- package/tests/storage.r2.test.js +7 -5
- package/types.public.d.ts +11 -1
- package/index.js +0 -1
- package/tsconfig.json +0 -14
package/README.md
CHANGED
@@ -1,5 +1,10 @@
|
|
1
1
|
# Storecraft S3 compatible storage
|
2
2
|
|
3
|
+
<div style="text-align:center">
|
4
|
+
<img src='https://storecraft.app/storecraft-color.svg'
|
5
|
+
width='90%'' />
|
6
|
+
</div><hr/><br/>
|
7
|
+
|
3
8
|
`fetch` ready support for an `S3` like storage:
|
4
9
|
- `Amazon S3`
|
5
10
|
- `Cloudflare R2`
|
@@ -11,15 +16,21 @@ Features:
|
|
11
16
|
- Supports streaming `Get` / `Put` / `Delete`
|
12
17
|
- Supports `presigned` `Get` / `Put` requests to offload to client
|
13
18
|
|
19
|
+
```bash
|
20
|
+
npm i @storecraft/storage-s3-compatible
|
21
|
+
```
|
22
|
+
|
14
23
|
## usage
|
15
24
|
|
16
25
|
```js
|
17
|
-
import { R2 } from '@storecraft/storage-s3-compatible'
|
18
|
-
|
19
|
-
const storage = new R2(
|
20
|
-
|
21
|
-
process.env.
|
22
|
-
|
26
|
+
import { R2, S3, DigitalOceanSpaces, S3CompatibleStorage } from '@storecraft/storage-s3-compatible'
|
27
|
+
|
28
|
+
const storage = new R2({
|
29
|
+
accessKeyId: process.env.R2_ACCESS_KEY_ID,
|
30
|
+
account_id: process.env.R2_ACCOUNT_ID,
|
31
|
+
bucket: process.env.R2_BUCKET,
|
32
|
+
secretAccessKey: process.env.R2_SECRET_ACCESS_KEY
|
33
|
+
});
|
23
34
|
|
24
35
|
// write
|
25
36
|
await storage.putBlob(
|
package/adapter.js
CHANGED
@@ -25,7 +25,7 @@ const infer_content_type = (name) => {
|
|
25
25
|
|
26
26
|
|
27
27
|
/**
|
28
|
-
* @typedef {import('./types.public.
|
28
|
+
* @typedef {import('./types.public.d.ts').Config} Config
|
29
29
|
*/
|
30
30
|
|
31
31
|
/**
|
@@ -259,18 +259,16 @@ export class S3CompatibleStorage {
|
|
259
259
|
export class R2 extends S3CompatibleStorage {
|
260
260
|
|
261
261
|
/**
|
262
|
-
*
|
263
|
-
* @param {string} bucket
|
264
|
-
* @param {string} account_id
|
265
|
-
* @param {string} access_key_id
|
266
|
-
* @param {string} secret_access_key
|
262
|
+
* @param {import('./types.public.d.ts').R2Config} config
|
267
263
|
*/
|
268
|
-
constructor(bucket, account_id,
|
269
|
-
super(
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
264
|
+
constructor({bucket, account_id, accessKeyId, secretAccessKey}) {
|
265
|
+
super(
|
266
|
+
{
|
267
|
+
endpoint: `https://${account_id}.r2.cloudflarestorage.com`,
|
268
|
+
accessKeyId, secretAccessKey, bucket,
|
269
|
+
forcePathStyle: true, region: 'auto'
|
270
|
+
}
|
271
|
+
)
|
274
272
|
}
|
275
273
|
|
276
274
|
}
|
@@ -281,19 +279,16 @@ export class R2 extends S3CompatibleStorage {
|
|
281
279
|
export class S3 extends S3CompatibleStorage {
|
282
280
|
|
283
281
|
/**
|
284
|
-
*
|
285
|
-
* @param {string} bucket
|
286
|
-
* @param {string} region
|
287
|
-
* @param {string} access_key_id
|
288
|
-
* @param {string} secret_access_key
|
289
|
-
* @param {boolean} forcePathStyle
|
282
|
+
* @param {import('./types.public.d.ts').AwsS3Config} config
|
290
283
|
*/
|
291
|
-
constructor(bucket, region,
|
292
|
-
super(
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
284
|
+
constructor({bucket, region, accessKeyId, secretAccessKey, forcePathStyle=false}) {
|
285
|
+
super(
|
286
|
+
{
|
287
|
+
endpoint: `https://s3${region ? ('.'+region) : ''}.amazonaws.com`,
|
288
|
+
accessKeyId, secretAccessKey,
|
289
|
+
bucket, forcePathStyle, region
|
290
|
+
}
|
291
|
+
)
|
297
292
|
}
|
298
293
|
|
299
294
|
}
|
@@ -304,18 +299,16 @@ export class S3 extends S3CompatibleStorage {
|
|
304
299
|
export class DigitalOceanSpaces extends S3CompatibleStorage {
|
305
300
|
|
306
301
|
/**
|
307
|
-
*
|
308
|
-
* @param {string} bucket
|
309
|
-
* @param {string} region 'nyc3' for example
|
310
|
-
* @param {string} access_key_id
|
311
|
-
* @param {string} secret_access_key
|
302
|
+
* @param {Omit<import('./types.public.d.ts').Config, 'endpoint' | 'forcePathStyle'>} config
|
312
303
|
*/
|
313
|
-
constructor(bucket, region,
|
314
|
-
super(
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
304
|
+
constructor({bucket, region, accessKeyId, secretAccessKey}) {
|
305
|
+
super(
|
306
|
+
{
|
307
|
+
endpoint: `https://${region}.digitaloceanspaces.com`,
|
308
|
+
accessKeyId, secretAccessKey,
|
309
|
+
bucket, forcePathStyle: false, region: 'auto'
|
310
|
+
}
|
311
|
+
)
|
319
312
|
}
|
320
313
|
|
321
314
|
}
|
package/jsconfig.json
ADDED
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@storecraft/storage-s3-compatible",
|
3
|
-
"version": "1.0.
|
3
|
+
"version": "1.0.1",
|
4
4
|
"description": "Official S3-Compatible Storage adapter for storecraft",
|
5
5
|
"license": "MIT",
|
6
6
|
"author": "Tomer Shalev (https://github.com/store-craft)",
|
@@ -21,8 +21,8 @@
|
|
21
21
|
"storage-s3-compatible:publish": "npm publish --access public"
|
22
22
|
},
|
23
23
|
"type": "module",
|
24
|
-
"main": "
|
25
|
-
"types": "
|
24
|
+
"main": "adapter.js",
|
25
|
+
"types": "types.public.d.ts",
|
26
26
|
"dependencies": {
|
27
27
|
"@storecraft/core": "^1.0.0"
|
28
28
|
},
|
@@ -0,0 +1,90 @@
|
|
1
|
+
import 'dotenv/config';
|
2
|
+
import { test } from 'uvu';
|
3
|
+
import * as assert from 'uvu/assert';
|
4
|
+
import { S3 } from '@storecraft/storage-s3-compatible'
|
5
|
+
import { readFile } from 'node:fs/promises';
|
6
|
+
import { homedir } from 'node:os'
|
7
|
+
import * as path from 'node:path';
|
8
|
+
|
9
|
+
const areBlobsEqual = async (blob1, blob2) => {
|
10
|
+
return !Buffer.from(await blob1.arrayBuffer()).compare(
|
11
|
+
Buffer.from(await blob2.arrayBuffer())
|
12
|
+
);
|
13
|
+
};
|
14
|
+
|
15
|
+
const FORCE_PATH_STYLE = true;
|
16
|
+
|
17
|
+
const storage = new S3({
|
18
|
+
accessKeyId: process.env.S3_ACCESS_KEY_ID,
|
19
|
+
bucket: process.env.S3_BUCKET,
|
20
|
+
forcePathStyle: FORCE_PATH_STYLE,
|
21
|
+
region: process.env.S3_REGION,
|
22
|
+
secretAccessKey: process.env.S3_SECRET_KEY
|
23
|
+
});
|
24
|
+
|
25
|
+
test.before(async () => { await storage.init(null) })
|
26
|
+
|
27
|
+
test('blob put/get/delete', async () => {
|
28
|
+
const data = [
|
29
|
+
// {
|
30
|
+
// key: 'folder1/tomer.txt',
|
31
|
+
// blob: new Blob(['this is some text from tomer :)']),
|
32
|
+
// },
|
33
|
+
{
|
34
|
+
key: 'node2222.png',
|
35
|
+
blob: new Blob([await readFile('./node.png')])
|
36
|
+
}
|
37
|
+
];
|
38
|
+
|
39
|
+
data.forEach(
|
40
|
+
async d => {
|
41
|
+
// write
|
42
|
+
await storage.putBlob(d.key, d.blob);
|
43
|
+
// read
|
44
|
+
const { value: blob_read } = await storage.getBlob(d.key);
|
45
|
+
const url = await storage.getSigned(d.key);
|
46
|
+
console.log('presign GET url ', url);
|
47
|
+
|
48
|
+
// compare
|
49
|
+
const equal = await areBlobsEqual(blob_read, d.blob);
|
50
|
+
assert.ok(equal, 'Blobs are not equal !!!');
|
51
|
+
|
52
|
+
// delete
|
53
|
+
// await storage.remove(d.key);
|
54
|
+
}
|
55
|
+
);
|
56
|
+
|
57
|
+
});
|
58
|
+
|
59
|
+
test('blob put (presign)', async () => {
|
60
|
+
const data = [
|
61
|
+
// {
|
62
|
+
// key: 'folder1/tomer.txt',
|
63
|
+
// blob: new Blob(['this is some text from tomer :)']),
|
64
|
+
// },
|
65
|
+
{
|
66
|
+
key: 'node_test2.png',
|
67
|
+
blob: new Blob([await readFile('./node.png')])
|
68
|
+
}
|
69
|
+
];
|
70
|
+
|
71
|
+
data.forEach(
|
72
|
+
async d => {
|
73
|
+
// get put presigned url
|
74
|
+
const { url, method, headers } = await storage.putSigned(d.key);
|
75
|
+
// now let's use it to upload
|
76
|
+
const r = await fetch(
|
77
|
+
url, {
|
78
|
+
method,
|
79
|
+
headers,
|
80
|
+
body: d.blob
|
81
|
+
}
|
82
|
+
);
|
83
|
+
|
84
|
+
assert.ok(r.ok, 'upload failed')
|
85
|
+
}
|
86
|
+
);
|
87
|
+
|
88
|
+
});
|
89
|
+
|
90
|
+
test.run();
|
package/tests/storage.r2.test.js
CHANGED
@@ -12,12 +12,14 @@ const areBlobsEqual = async (blob1, blob2) => {
|
|
12
12
|
);
|
13
13
|
};
|
14
14
|
|
15
|
-
const storage = new R2(
|
16
|
-
|
17
|
-
process.env.
|
18
|
-
|
15
|
+
const storage = new R2({
|
16
|
+
accessKeyId: process.env.R2_ACCESS_KEY_ID,
|
17
|
+
account_id: process.env.R2_ACCOUNT_ID,
|
18
|
+
bucket: process.env.R2_BUCKET,
|
19
|
+
secretAccessKey: process.env.R2_SECRET_ACCESS_KEY
|
20
|
+
});
|
19
21
|
|
20
|
-
test.before(async () => await storage.init())
|
22
|
+
test.before(async () => { await storage.init(null) })
|
21
23
|
|
22
24
|
test('blob put/get/delete', async () => {
|
23
25
|
const data = [
|
package/types.public.d.ts
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
export
|
1
|
+
export { DigitalOceanSpaces, R2, S3, S3CompatibleStorage } from './adapter.js';
|
2
2
|
|
3
3
|
export type Config = {
|
4
4
|
endpoint: string;
|
@@ -9,3 +9,13 @@ export type Config = {
|
|
9
9
|
forcePathStyle: boolean;
|
10
10
|
}
|
11
11
|
|
12
|
+
export type R2Config = Omit<Config, 'region' | 'forcePathStyle' | 'endpoint'> & {
|
13
|
+
/**
|
14
|
+
* @description cloudflare account id
|
15
|
+
*/
|
16
|
+
account_id: string;
|
17
|
+
};
|
18
|
+
|
19
|
+
|
20
|
+
export type AwsS3Config = Omit<Config, 'endpoint'>;
|
21
|
+
|
package/index.js
DELETED
@@ -1 +0,0 @@
|
|
1
|
-
export * from './adapter.js'
|
package/tsconfig.json
DELETED
@@ -1,14 +0,0 @@
|
|
1
|
-
{
|
2
|
-
"compileOnSave": false,
|
3
|
-
"compilerOptions": {
|
4
|
-
"noEmit": true,
|
5
|
-
"allowJs": true,
|
6
|
-
"checkJs": true,
|
7
|
-
"target": "ESNext",
|
8
|
-
"resolveJsonModule": true,
|
9
|
-
"moduleResolution": "NodeNext",
|
10
|
-
"module": "NodeNext",
|
11
|
-
"composite": true,
|
12
|
-
},
|
13
|
-
"include": ["*", "src/*"]
|
14
|
-
}
|