@ethersphere/bee-js 6.8.0 → 6.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/modules/bytes.js +5 -3
- package/dist/cjs/modules/bzz.js +12 -7
- package/dist/cjs/types/index.js +30 -1
- package/dist/cjs/utils/expose.js +6 -2
- package/dist/cjs/utils/headers.js +31 -5
- package/dist/cjs/utils/redundancy.js +130 -0
- package/dist/index.browser.min.js +1 -1
- package/dist/index.browser.min.js.map +1 -1
- package/dist/mjs/modules/bytes.js +8 -6
- package/dist/mjs/modules/bzz.js +18 -10
- package/dist/mjs/types/index.js +29 -0
- package/dist/mjs/utils/expose.js +2 -1
- package/dist/mjs/utils/headers.js +32 -4
- package/dist/mjs/utils/redundancy.js +88 -0
- package/dist/types/bee.d.ts +6 -6
- package/dist/types/modules/bytes.d.ts +4 -4
- package/dist/types/modules/bzz.d.ts +5 -5
- package/dist/types/types/index.d.ts +44 -0
- package/dist/types/utils/expose.d.ts +2 -1
- package/dist/types/utils/headers.d.ts +3 -1
- package/dist/types/utils/redundancy.d.ts +21 -0
- package/package.json +3 -2
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { wrapBytesWithHelpers } from "../utils/bytes.js";
|
|
2
|
-
import {
|
|
2
|
+
import { extractDownloadHeaders, extractRedundantUploadHeaders } from "../utils/headers.js";
|
|
3
3
|
import { http } from "../utils/http.js";
|
|
4
4
|
import { makeTagUid } from "../utils/type.js";
|
|
5
5
|
const endpoint = 'bytes';
|
|
@@ -19,7 +19,7 @@ export async function upload(requestOptions, data, postageBatchId, options) {
|
|
|
19
19
|
data,
|
|
20
20
|
headers: {
|
|
21
21
|
'content-type': 'application/octet-stream',
|
|
22
|
-
...
|
|
22
|
+
...extractRedundantUploadHeaders(postageBatchId, options)
|
|
23
23
|
}
|
|
24
24
|
});
|
|
25
25
|
return {
|
|
@@ -33,10 +33,11 @@ export async function upload(requestOptions, data, postageBatchId, options) {
|
|
|
33
33
|
* @param ky
|
|
34
34
|
* @param hash Bee content reference
|
|
35
35
|
*/
|
|
36
|
-
export async function download(requestOptions, hash) {
|
|
36
|
+
export async function download(requestOptions, hash, options) {
|
|
37
37
|
const response = await http(requestOptions, {
|
|
38
38
|
responseType: 'arraybuffer',
|
|
39
|
-
url: `${endpoint}/${hash}
|
|
39
|
+
url: `${endpoint}/${hash}`,
|
|
40
|
+
headers: extractDownloadHeaders(options)
|
|
40
41
|
});
|
|
41
42
|
return wrapBytesWithHelpers(new Uint8Array(response.data));
|
|
42
43
|
}
|
|
@@ -46,10 +47,11 @@ export async function download(requestOptions, hash) {
|
|
|
46
47
|
* @param ky
|
|
47
48
|
* @param hash Bee content reference
|
|
48
49
|
*/
|
|
49
|
-
export async function downloadReadable(requestOptions, hash) {
|
|
50
|
+
export async function downloadReadable(requestOptions, hash, options) {
|
|
50
51
|
const response = await http(requestOptions, {
|
|
51
52
|
responseType: 'stream',
|
|
52
|
-
url: `${endpoint}/${hash}
|
|
53
|
+
url: `${endpoint}/${hash}`,
|
|
54
|
+
headers: extractDownloadHeaders(options)
|
|
53
55
|
});
|
|
54
56
|
return response.data;
|
|
55
57
|
}
|
package/dist/mjs/modules/bzz.js
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
import { wrapBytesWithHelpers } from "../utils/bytes.js";
|
|
2
2
|
import { assertCollection } from "../utils/collection.js";
|
|
3
|
-
import {
|
|
3
|
+
import { extractDownloadHeaders, extractRedundantUploadHeaders, readFileHeaders } from "../utils/headers.js";
|
|
4
4
|
import { http } from "../utils/http.js";
|
|
5
5
|
import { isReadable } from "../utils/stream.js";
|
|
6
6
|
import { makeTar } from "../utils/tar.js";
|
|
7
7
|
import { makeTagUid } from "../utils/type.js";
|
|
8
8
|
const bzzEndpoint = 'bzz';
|
|
9
9
|
function extractFileUploadHeaders(postageBatchId, options) {
|
|
10
|
-
const headers =
|
|
10
|
+
const headers = extractRedundantUploadHeaders(postageBatchId, options);
|
|
11
11
|
if (options?.size) headers['content-length'] = String(options.size);
|
|
12
12
|
if (options?.contentType) headers['content-type'] = options.contentType;
|
|
13
13
|
return headers;
|
|
@@ -23,7 +23,9 @@ function extractFileUploadHeaders(postageBatchId, options) {
|
|
|
23
23
|
*/
|
|
24
24
|
export async function uploadFile(requestOptions, data, postageBatchId, name, options) {
|
|
25
25
|
if (isReadable(data) && !options?.contentType) {
|
|
26
|
-
if (!options)
|
|
26
|
+
if (!options) {
|
|
27
|
+
options = {};
|
|
28
|
+
}
|
|
27
29
|
options.contentType = 'application/octet-stream';
|
|
28
30
|
}
|
|
29
31
|
const response = await http(requestOptions, {
|
|
@@ -50,11 +52,12 @@ export async function uploadFile(requestOptions, data, postageBatchId, name, opt
|
|
|
50
52
|
* @param hash Bee file or collection hash
|
|
51
53
|
* @param path If hash is collection then this defines path to a single file in the collection
|
|
52
54
|
*/
|
|
53
|
-
export async function downloadFile(requestOptions, hash, path = '') {
|
|
55
|
+
export async function downloadFile(requestOptions, hash, path = '', options) {
|
|
54
56
|
const response = await http(requestOptions, {
|
|
55
57
|
method: 'GET',
|
|
56
58
|
responseType: 'arraybuffer',
|
|
57
|
-
url: `${bzzEndpoint}/${hash}/${path}
|
|
59
|
+
url: `${bzzEndpoint}/${hash}/${path}`,
|
|
60
|
+
headers: extractDownloadHeaders(options)
|
|
58
61
|
});
|
|
59
62
|
const file = {
|
|
60
63
|
...readFileHeaders(response.headers),
|
|
@@ -69,11 +72,12 @@ export async function downloadFile(requestOptions, hash, path = '') {
|
|
|
69
72
|
* @param hash Bee file or collection hash
|
|
70
73
|
* @param path If hash is collection then this defines path to a single file in the collection
|
|
71
74
|
*/
|
|
72
|
-
export async function downloadFileReadable(requestOptions, hash, path = '') {
|
|
75
|
+
export async function downloadFileReadable(requestOptions, hash, path = '', options) {
|
|
73
76
|
const response = await http(requestOptions, {
|
|
74
77
|
method: 'GET',
|
|
75
78
|
responseType: 'stream',
|
|
76
|
-
url: `${bzzEndpoint}/${hash}/${path}
|
|
79
|
+
url: `${bzzEndpoint}/${hash}/${path}`,
|
|
80
|
+
headers: extractDownloadHeaders(options)
|
|
77
81
|
});
|
|
78
82
|
const file = {
|
|
79
83
|
...readFileHeaders(response.headers),
|
|
@@ -82,9 +86,13 @@ export async function downloadFileReadable(requestOptions, hash, path = '') {
|
|
|
82
86
|
return file;
|
|
83
87
|
}
|
|
84
88
|
function extractCollectionUploadHeaders(postageBatchId, options) {
|
|
85
|
-
const headers =
|
|
86
|
-
if (options?.indexDocument)
|
|
87
|
-
|
|
89
|
+
const headers = extractRedundantUploadHeaders(postageBatchId, options);
|
|
90
|
+
if (options?.indexDocument) {
|
|
91
|
+
headers['swarm-index-document'] = options.indexDocument;
|
|
92
|
+
}
|
|
93
|
+
if (options?.errorDocument) {
|
|
94
|
+
headers['swarm-error-document'] = options.errorDocument;
|
|
95
|
+
}
|
|
88
96
|
return headers;
|
|
89
97
|
}
|
|
90
98
|
/**
|
package/dist/mjs/types/index.js
CHANGED
|
@@ -22,6 +22,35 @@ export const STAMPS_DEPTH_MAX = 255;
|
|
|
22
22
|
export const TAGS_LIMIT_MIN = 1;
|
|
23
23
|
export const TAGS_LIMIT_MAX = 1000;
|
|
24
24
|
export const FEED_INDEX_HEX_LENGTH = 16;
|
|
25
|
+
/**
|
|
26
|
+
* Add redundancy to the data being uploaded so that downloaders can download it with better UX.
|
|
27
|
+
* 0 value is default and does not add any redundancy to the file.
|
|
28
|
+
*/
|
|
29
|
+
export var RedundancyLevel;
|
|
30
|
+
(function (RedundancyLevel) {
|
|
31
|
+
RedundancyLevel[RedundancyLevel["OFF"] = 0] = "OFF";
|
|
32
|
+
RedundancyLevel[RedundancyLevel["MEDIUM"] = 1] = "MEDIUM";
|
|
33
|
+
RedundancyLevel[RedundancyLevel["STRONG"] = 2] = "STRONG";
|
|
34
|
+
RedundancyLevel[RedundancyLevel["INSANE"] = 3] = "INSANE";
|
|
35
|
+
RedundancyLevel[RedundancyLevel["PARANOID"] = 4] = "PARANOID";
|
|
36
|
+
})(RedundancyLevel || (RedundancyLevel = {}));
|
|
37
|
+
/**
|
|
38
|
+
* Specify the retrieve strategy on redundant data.
|
|
39
|
+
* The possible values are NONE, DATA, PROX and RACE.
|
|
40
|
+
* Strategy NONE means no prefetching takes place.
|
|
41
|
+
* Strategy DATA means only data chunks are prefetched.
|
|
42
|
+
* Strategy PROX means only chunks that are close to the node are prefetched.
|
|
43
|
+
* Strategy RACE means all chunks are prefetched: n data chunks and k parity chunks. The first n chunks to arrive are used to reconstruct the file.
|
|
44
|
+
* Multiple strategies can be used in a fallback cascade if the swarm redundancy fallback mode is set to true.
|
|
45
|
+
* The default strategy is NONE, DATA, falling back to PROX, falling back to RACE
|
|
46
|
+
*/
|
|
47
|
+
export var RedundancyStrategy;
|
|
48
|
+
(function (RedundancyStrategy) {
|
|
49
|
+
RedundancyStrategy[RedundancyStrategy["NONE"] = 0] = "NONE";
|
|
50
|
+
RedundancyStrategy[RedundancyStrategy["DATA"] = 1] = "DATA";
|
|
51
|
+
RedundancyStrategy[RedundancyStrategy["PROX"] = 2] = "PROX";
|
|
52
|
+
RedundancyStrategy[RedundancyStrategy["RACE"] = 3] = "RACE";
|
|
53
|
+
})(RedundancyStrategy || (RedundancyStrategy = {}));
|
|
25
54
|
/*********************************************************
|
|
26
55
|
* Writers and Readers interfaces
|
|
27
56
|
*/
|
package/dist/mjs/utils/expose.js
CHANGED
|
@@ -6,4 +6,5 @@ export { ethToSwarmAddress, fromLittleEndian, isHexEthAddress, makeEthAddress, m
|
|
|
6
6
|
export { isNodeReadable, isReadable, isReadableStream, normalizeToReadableStream, readableNodeToWeb, readableWebToNode } from "./stream.js";
|
|
7
7
|
export { keccak256Hash } from "./hash.js";
|
|
8
8
|
export { makeMaxTarget } from "./pss.js";
|
|
9
|
-
export { getAmountForTtl, getDepthForCapacity, getStampCostInBzz, getStampCostInPlur,
|
|
9
|
+
export { getAmountForTtl, getDepthForCapacity, getStampCostInBzz, getStampCostInPlur, getStampEffectiveBytes, getStampMaximumCapacityBytes, getStampTtlSeconds, getStampUsage } from "./stamps.js";
|
|
10
|
+
export { approximateOverheadForRedundancyLevel, getRedundancyStat, getRedundancyStats } from "./redundancy.js";
|
|
@@ -42,9 +42,37 @@ export function extractUploadHeaders(postageBatchId, options) {
|
|
|
42
42
|
const headers = {
|
|
43
43
|
'swarm-postage-batch-id': postageBatchId
|
|
44
44
|
};
|
|
45
|
-
if (options?.pin)
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
if (
|
|
45
|
+
if (options?.pin) {
|
|
46
|
+
headers['swarm-pin'] = String(options.pin);
|
|
47
|
+
}
|
|
48
|
+
if (options?.encrypt) {
|
|
49
|
+
headers['swarm-encrypt'] = String(options.encrypt);
|
|
50
|
+
}
|
|
51
|
+
if (options?.tag) {
|
|
52
|
+
headers['swarm-tag'] = String(options.tag);
|
|
53
|
+
}
|
|
54
|
+
if (typeof options?.deferred === 'boolean') {
|
|
55
|
+
headers['swarm-deferred-upload'] = options.deferred.toString();
|
|
56
|
+
}
|
|
57
|
+
return headers;
|
|
58
|
+
}
|
|
59
|
+
export function extractRedundantUploadHeaders(postageBatchId, options) {
|
|
60
|
+
const headers = extractUploadHeaders(postageBatchId, options);
|
|
61
|
+
if (options?.redundancyLevel) {
|
|
62
|
+
headers['swarm-redundancy-level'] = String(options.redundancyLevel);
|
|
63
|
+
}
|
|
64
|
+
return headers;
|
|
65
|
+
}
|
|
66
|
+
export function extractDownloadHeaders(options) {
|
|
67
|
+
const headers = {};
|
|
68
|
+
if (options?.redundancyStrategy) {
|
|
69
|
+
headers['swarm-redundancy-strategy'] = String(options.redundancyStrategy);
|
|
70
|
+
}
|
|
71
|
+
if (options?.fallback === false) {
|
|
72
|
+
headers['swarm-redundancy-fallback-mode'] = 'false';
|
|
73
|
+
}
|
|
74
|
+
if (options?.timeoutMs !== undefined) {
|
|
75
|
+
headers['swarm-chunk-retrieval-timeout'] = String(options.timeoutMs);
|
|
76
|
+
}
|
|
49
77
|
return headers;
|
|
50
78
|
}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import { RedundancyLevel } from "../index.js";
|
|
2
|
+
const mediumTable = [[94, 68, 46, 28, 14, 5, 1], [9, 8, 7, 6, 5, 4, 3]];
|
|
3
|
+
const encMediumTable = [[47, 34, 23, 14, 7, 2], [9, 8, 7, 6, 5, 4]];
|
|
4
|
+
const strongTable = [[104, 95, 86, 77, 69, 61, 53, 46, 39, 32, 26, 20, 15, 10, 6, 3, 1], [21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5]];
|
|
5
|
+
const encStrongTable = [[52, 47, 43, 38, 34, 30, 26, 23, 19, 16, 13, 10, 7, 5, 3, 1], [21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6]];
|
|
6
|
+
const insaneTable = [[92, 87, 82, 77, 73, 68, 63, 59, 54, 50, 45, 41, 37, 33, 29, 26, 22, 19, 16, 13, 10, 8, 5, 3, 2, 1], [31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6]];
|
|
7
|
+
const encInsaneTable = [[46, 43, 41, 38, 36, 34, 31, 29, 27, 25, 22, 20, 18, 16, 14, 13, 11, 9, 8, 6, 5, 4, 2, 1], [31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 7]];
|
|
8
|
+
const paranoidTable = [[37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [90, 88, 87, 85, 84, 82, 81, 79, 77, 76, 74, 72, 71, 69, 67, 66, 64, 62, 60, 59, 57, 55, 53, 51, 49, 48, 46, 44, 41, 39, 37, 35, 32, 30, 27, 24, 20]];
|
|
9
|
+
const encParanoidTable = [[18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], [88, 85, 82, 79, 76, 72, 69, 66, 62, 59, 55, 51, 48, 44, 39, 35, 30, 24]];
|
|
10
|
+
const tables = {
|
|
11
|
+
[RedundancyLevel.MEDIUM]: [mediumTable, encMediumTable],
|
|
12
|
+
[RedundancyLevel.STRONG]: [strongTable, encStrongTable],
|
|
13
|
+
[RedundancyLevel.INSANE]: [insaneTable, encInsaneTable],
|
|
14
|
+
[RedundancyLevel.PARANOID]: [paranoidTable, encParanoidTable]
|
|
15
|
+
};
|
|
16
|
+
/**
|
|
17
|
+
* Returns an approximate multiplier for the overhead of a given redundancy level.
|
|
18
|
+
* Redundancy level is a tradeoff between storage overhead and fault tolerance.
|
|
19
|
+
* Use this number to estimate the amount of chunks that will be stored for a given
|
|
20
|
+
* redundancy level.
|
|
21
|
+
*/
|
|
22
|
+
export function approximateOverheadForRedundancyLevel(chunks, level, encrypted) {
|
|
23
|
+
const tableType = level === RedundancyLevel.MEDIUM ? tables[RedundancyLevel.MEDIUM] : level === RedundancyLevel.STRONG ? tables[RedundancyLevel.STRONG] : level === RedundancyLevel.INSANE ? tables[RedundancyLevel.INSANE] : tables[RedundancyLevel.PARANOID];
|
|
24
|
+
const table = encrypted ? tableType[1] : tableType[0];
|
|
25
|
+
const [supportedChunks, parities] = table;
|
|
26
|
+
for (let i = 0; i < supportedChunks.length; i++) {
|
|
27
|
+
if (chunks >= supportedChunks[i]) {
|
|
28
|
+
return parities[i] / supportedChunks[i];
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
return parities[parities.length - 1] / supportedChunks[supportedChunks.length - 1];
|
|
32
|
+
}
|
|
33
|
+
const medium = {
|
|
34
|
+
label: 'medium',
|
|
35
|
+
value: RedundancyLevel.MEDIUM,
|
|
36
|
+
errorTolerance: 0.01
|
|
37
|
+
};
|
|
38
|
+
const strong = {
|
|
39
|
+
label: 'strong',
|
|
40
|
+
value: RedundancyLevel.STRONG,
|
|
41
|
+
errorTolerance: 0.05
|
|
42
|
+
};
|
|
43
|
+
const insane = {
|
|
44
|
+
label: 'insane',
|
|
45
|
+
value: RedundancyLevel.INSANE,
|
|
46
|
+
errorTolerance: 0.1
|
|
47
|
+
};
|
|
48
|
+
const paranoid = {
|
|
49
|
+
label: 'paranoid',
|
|
50
|
+
value: RedundancyLevel.PARANOID,
|
|
51
|
+
errorTolerance: 0.5
|
|
52
|
+
};
|
|
53
|
+
export function getRedundancyStats() {
|
|
54
|
+
return {
|
|
55
|
+
medium,
|
|
56
|
+
strong,
|
|
57
|
+
insane,
|
|
58
|
+
paranoid
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
export function getRedundancyStat(level) {
|
|
62
|
+
if (typeof level === 'string') {
|
|
63
|
+
switch (level.toLowerCase()) {
|
|
64
|
+
case 'medium':
|
|
65
|
+
return medium;
|
|
66
|
+
case 'strong':
|
|
67
|
+
return strong;
|
|
68
|
+
case 'insane':
|
|
69
|
+
return insane;
|
|
70
|
+
case 'paranoid':
|
|
71
|
+
return paranoid;
|
|
72
|
+
default:
|
|
73
|
+
throw new Error(`Unknown redundancy level '${level}'`);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
switch (level) {
|
|
77
|
+
case RedundancyLevel.MEDIUM:
|
|
78
|
+
return medium;
|
|
79
|
+
case RedundancyLevel.STRONG:
|
|
80
|
+
return strong;
|
|
81
|
+
case RedundancyLevel.INSANE:
|
|
82
|
+
return insane;
|
|
83
|
+
case RedundancyLevel.PARANOID:
|
|
84
|
+
return paranoid;
|
|
85
|
+
default:
|
|
86
|
+
throw new Error(`Unknown redundancy level '${level}'`);
|
|
87
|
+
}
|
|
88
|
+
}
|
package/dist/types/bee.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { Index, IndexBytes } from './feed';
|
|
2
2
|
import { FeedType } from './feed/type';
|
|
3
|
-
import type { AddressPrefix, AnyJson, BatchId, BeeOptions, BeeRequestOptions, CollectionUploadOptions, Data, FeedReader, FeedWriter, FileData, FileUploadOptions, JsonFeedOptions, Pin, PssMessageHandler, PssSubscription, PublicKey, Reference, SOCReader, SOCWriter, Signer, Tag, Topic, UploadOptions, UploadResultWithCid } from './types';
|
|
3
|
+
import type { AddressPrefix, AnyJson, BatchId, BeeOptions, BeeRequestOptions, CollectionUploadOptions, Data, FeedReader, FeedWriter, FileData, FileUploadOptions, JsonFeedOptions, Pin, PssMessageHandler, PssSubscription, PublicKey, Reference, SOCReader, SOCWriter, Signer, Tag, Topic, UploadOptions, UploadRedundancyOptions, UploadResultWithCid } from './types';
|
|
4
4
|
import { AllTagsOptions, Collection, FeedManifestResult, Readable, ReferenceCidOrEns, ReferenceOrEns, UploadResult } from './types';
|
|
5
5
|
import { EthAddress } from './utils/eth';
|
|
6
6
|
/**
|
|
@@ -39,7 +39,7 @@ export declare class Bee {
|
|
|
39
39
|
* @see [Bee docs - Upload and download](https://docs.ethswarm.org/docs/access-the-swarm/upload-and-download)
|
|
40
40
|
* @see [Bee API reference - `POST /bytes`](https://docs.ethswarm.org/api/#tag/Bytes/paths/~1bytes/post)
|
|
41
41
|
*/
|
|
42
|
-
uploadData(postageBatchId: string | BatchId, data: string | Uint8Array, options?: UploadOptions, requestOptions?: BeeRequestOptions): Promise<UploadResult>;
|
|
42
|
+
uploadData(postageBatchId: string | BatchId, data: string | Uint8Array, options?: UploadOptions & UploadRedundancyOptions, requestOptions?: BeeRequestOptions): Promise<UploadResult>;
|
|
43
43
|
/**
|
|
44
44
|
* Download data as a byte array
|
|
45
45
|
*
|
|
@@ -101,7 +101,7 @@ export declare class Bee {
|
|
|
101
101
|
* @see [Bee API reference - `POST /bzz`](https://docs.ethswarm.org/api/#tag/File/paths/~1bzz/post)
|
|
102
102
|
* @returns reference is a content hash of the file
|
|
103
103
|
*/
|
|
104
|
-
uploadFile(postageBatchId: string | BatchId, data: string | Uint8Array | Readable | File, name?: string, options?: FileUploadOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>;
|
|
104
|
+
uploadFile(postageBatchId: string | BatchId, data: string | Uint8Array | Readable | File, name?: string, options?: FileUploadOptions & UploadRedundancyOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>;
|
|
105
105
|
/**
|
|
106
106
|
* Download single file.
|
|
107
107
|
*
|
|
@@ -144,7 +144,7 @@ export declare class Bee {
|
|
|
144
144
|
* @see [Bee docs - Upload directory](https://docs.ethswarm.org/docs/access-the-swarm/upload-a-directory/)
|
|
145
145
|
* @see [Bee API reference - `POST /bzz`](https://docs.ethswarm.org/api/#tag/Collection/paths/~1bzz/post)
|
|
146
146
|
*/
|
|
147
|
-
uploadFiles(postageBatchId: string | BatchId, fileList: FileList | File[], options?: CollectionUploadOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>;
|
|
147
|
+
uploadFiles(postageBatchId: string | BatchId, fileList: FileList | File[], options?: CollectionUploadOptions & UploadRedundancyOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>;
|
|
148
148
|
/**
|
|
149
149
|
* Upload Collection that you can assembly yourself.
|
|
150
150
|
*
|
|
@@ -155,7 +155,7 @@ export declare class Bee {
|
|
|
155
155
|
* @param collection
|
|
156
156
|
* @param options Collections and request options
|
|
157
157
|
*/
|
|
158
|
-
uploadCollection(postageBatchId: string | BatchId, collection: Collection<Uint8Array | Readable>, options?: CollectionUploadOptions): Promise<UploadResultWithCid>;
|
|
158
|
+
uploadCollection(postageBatchId: string | BatchId, collection: Collection<Uint8Array | Readable>, options?: CollectionUploadOptions & UploadRedundancyOptions): Promise<UploadResultWithCid>;
|
|
159
159
|
/**
|
|
160
160
|
* Upload collection of files.
|
|
161
161
|
*
|
|
@@ -172,7 +172,7 @@ export declare class Bee {
|
|
|
172
172
|
* @see [Bee docs - Upload directory](https://docs.ethswarm.org/docs/access-the-swarm/upload-a-directory/)
|
|
173
173
|
* @see [Bee API reference - `POST /bzz`](https://docs.ethswarm.org/api/#tag/Collection/paths/~1bzz/post)
|
|
174
174
|
*/
|
|
175
|
-
uploadFilesFromDirectory(postageBatchId: string | BatchId, dir: string, options?: CollectionUploadOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>;
|
|
175
|
+
uploadFilesFromDirectory(postageBatchId: string | BatchId, dir: string, options?: CollectionUploadOptions & UploadRedundancyOptions, requestOptions?: BeeRequestOptions): Promise<UploadResultWithCid>;
|
|
176
176
|
/**
|
|
177
177
|
* Create a new Tag which is meant for tracking progres of syncing data across network.
|
|
178
178
|
*
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { BatchId, BeeRequestOptions, Data, ReferenceOrEns, UploadOptions } from '../types';
|
|
1
|
+
import type { BatchId, BeeRequestOptions, Data, DownloadRedundancyOptions, ReferenceOrEns, UploadOptions, UploadRedundancyOptions } from '../types';
|
|
2
2
|
import { UploadResult } from '../types';
|
|
3
3
|
/**
|
|
4
4
|
* Upload data to a Bee node
|
|
@@ -8,18 +8,18 @@ import { UploadResult } from '../types';
|
|
|
8
8
|
* @param postageBatchId Postage BatchId that will be assigned to uploaded data
|
|
9
9
|
* @param options Additional options like tag, encryption, pinning
|
|
10
10
|
*/
|
|
11
|
-
export declare function upload(requestOptions: BeeRequestOptions, data: string | Uint8Array, postageBatchId: BatchId, options?: UploadOptions): Promise<UploadResult>;
|
|
11
|
+
export declare function upload(requestOptions: BeeRequestOptions, data: string | Uint8Array, postageBatchId: BatchId, options?: UploadOptions & UploadRedundancyOptions): Promise<UploadResult>;
|
|
12
12
|
/**
|
|
13
13
|
* Download data as a byte array
|
|
14
14
|
*
|
|
15
15
|
* @param ky
|
|
16
16
|
* @param hash Bee content reference
|
|
17
17
|
*/
|
|
18
|
-
export declare function download(requestOptions: BeeRequestOptions, hash: ReferenceOrEns): Promise<Data>;
|
|
18
|
+
export declare function download(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, options?: DownloadRedundancyOptions): Promise<Data>;
|
|
19
19
|
/**
|
|
20
20
|
* Download data as a readable stream
|
|
21
21
|
*
|
|
22
22
|
* @param ky
|
|
23
23
|
* @param hash Bee content reference
|
|
24
24
|
*/
|
|
25
|
-
export declare function downloadReadable(requestOptions: BeeRequestOptions, hash: ReferenceOrEns): Promise<ReadableStream<Uint8Array>>;
|
|
25
|
+
export declare function downloadReadable(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, options?: DownloadRedundancyOptions): Promise<ReadableStream<Uint8Array>>;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { BatchId, BeeRequestOptions, Collection, CollectionUploadOptions, Data, FileData, FileUploadOptions, Readable, ReferenceOrEns, UploadResult } from '../types';
|
|
1
|
+
import { BatchId, BeeRequestOptions, Collection, CollectionUploadOptions, Data, DownloadRedundancyOptions, FileData, FileUploadOptions, Readable, ReferenceOrEns, UploadRedundancyOptions, UploadResult } from '../types';
|
|
2
2
|
/**
|
|
3
3
|
* Upload single file
|
|
4
4
|
*
|
|
@@ -8,7 +8,7 @@ import { BatchId, BeeRequestOptions, Collection, CollectionUploadOptions, Data,
|
|
|
8
8
|
* @param name Name that will be attached to the uploaded file. Wraps the data into manifest with set index document.
|
|
9
9
|
* @param options
|
|
10
10
|
*/
|
|
11
|
-
export declare function uploadFile(requestOptions: BeeRequestOptions, data: string | Uint8Array | Readable | ArrayBuffer, postageBatchId: BatchId, name?: string, options?: FileUploadOptions): Promise<UploadResult>;
|
|
11
|
+
export declare function uploadFile(requestOptions: BeeRequestOptions, data: string | Uint8Array | Readable | ArrayBuffer, postageBatchId: BatchId, name?: string, options?: FileUploadOptions & UploadRedundancyOptions): Promise<UploadResult>;
|
|
12
12
|
/**
|
|
13
13
|
* Download single file as a buffer
|
|
14
14
|
*
|
|
@@ -16,7 +16,7 @@ export declare function uploadFile(requestOptions: BeeRequestOptions, data: stri
|
|
|
16
16
|
* @param hash Bee file or collection hash
|
|
17
17
|
* @param path If hash is collection then this defines path to a single file in the collection
|
|
18
18
|
*/
|
|
19
|
-
export declare function downloadFile(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, path?: string): Promise<FileData<Data>>;
|
|
19
|
+
export declare function downloadFile(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, path?: string, options?: DownloadRedundancyOptions): Promise<FileData<Data>>;
|
|
20
20
|
/**
|
|
21
21
|
* Download single file as a readable stream
|
|
22
22
|
*
|
|
@@ -24,7 +24,7 @@ export declare function downloadFile(requestOptions: BeeRequestOptions, hash: Re
|
|
|
24
24
|
* @param hash Bee file or collection hash
|
|
25
25
|
* @param path If hash is collection then this defines path to a single file in the collection
|
|
26
26
|
*/
|
|
27
|
-
export declare function downloadFileReadable(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, path?: string): Promise<FileData<ReadableStream<Uint8Array>>>;
|
|
27
|
+
export declare function downloadFileReadable(requestOptions: BeeRequestOptions, hash: ReferenceOrEns, path?: string, options?: DownloadRedundancyOptions): Promise<FileData<ReadableStream<Uint8Array>>>;
|
|
28
28
|
/**
|
|
29
29
|
* Upload collection
|
|
30
30
|
* @param kyOptions Ky Options for making requests
|
|
@@ -32,4 +32,4 @@ export declare function downloadFileReadable(requestOptions: BeeRequestOptions,
|
|
|
32
32
|
* @param postageBatchId Postage BatchId that will be assigned to uploaded data
|
|
33
33
|
* @param options
|
|
34
34
|
*/
|
|
35
|
-
export declare function uploadCollection(requestOptions: BeeRequestOptions, collection: Collection<Uint8Array>, postageBatchId: BatchId, options?: CollectionUploadOptions): Promise<UploadResult>;
|
|
35
|
+
export declare function uploadCollection(requestOptions: BeeRequestOptions, collection: Collection<Uint8Array>, postageBatchId: BatchId, options?: CollectionUploadOptions & UploadRedundancyOptions): Promise<UploadResult>;
|
|
@@ -148,6 +148,50 @@ export interface UploadOptions {
|
|
|
148
148
|
*/
|
|
149
149
|
deferred?: boolean;
|
|
150
150
|
}
|
|
151
|
+
/**
|
|
152
|
+
* Add redundancy to the data being uploaded so that downloaders can download it with better UX.
|
|
153
|
+
* 0 value is default and does not add any redundancy to the file.
|
|
154
|
+
*/
|
|
155
|
+
export declare enum RedundancyLevel {
|
|
156
|
+
OFF = 0,
|
|
157
|
+
MEDIUM = 1,
|
|
158
|
+
STRONG = 2,
|
|
159
|
+
INSANE = 3,
|
|
160
|
+
PARANOID = 4
|
|
161
|
+
}
|
|
162
|
+
export interface UploadRedundancyOptions {
|
|
163
|
+
redundancyLevel?: RedundancyLevel;
|
|
164
|
+
}
|
|
165
|
+
/**
|
|
166
|
+
* Specify the retrieve strategy on redundant data.
|
|
167
|
+
* The possible values are NONE, DATA, PROX and RACE.
|
|
168
|
+
* Strategy NONE means no prefetching takes place.
|
|
169
|
+
* Strategy DATA means only data chunks are prefetched.
|
|
170
|
+
* Strategy PROX means only chunks that are close to the node are prefetched.
|
|
171
|
+
* Strategy RACE means all chunks are prefetched: n data chunks and k parity chunks. The first n chunks to arrive are used to reconstruct the file.
|
|
172
|
+
* Multiple strategies can be used in a fallback cascade if the swarm redundancy fallback mode is set to true.
|
|
173
|
+
* The default strategy is NONE, DATA, falling back to PROX, falling back to RACE
|
|
174
|
+
*/
|
|
175
|
+
export declare enum RedundancyStrategy {
|
|
176
|
+
NONE = 0,
|
|
177
|
+
DATA = 1,
|
|
178
|
+
PROX = 2,
|
|
179
|
+
RACE = 3
|
|
180
|
+
}
|
|
181
|
+
export interface DownloadRedundancyOptions {
|
|
182
|
+
/**
|
|
183
|
+
* Specify the retrieve strategy on redundant data.
|
|
184
|
+
*/
|
|
185
|
+
redundancyStrategy?: RedundancyStrategy;
|
|
186
|
+
/**
|
|
187
|
+
* Specify if the retrieve strategies (chunk prefetching on redundant data) are used in a fallback cascade. The default is true.
|
|
188
|
+
*/
|
|
189
|
+
fallback?: boolean;
|
|
190
|
+
/**
|
|
191
|
+
* Specify the timeout for chunk retrieval. The default is 30 seconds.
|
|
192
|
+
*/
|
|
193
|
+
timeoutMs?: number;
|
|
194
|
+
}
|
|
151
195
|
export interface FileUploadOptions extends UploadOptions {
|
|
152
196
|
/**
|
|
153
197
|
* Specifies Content-Length for the given data. It is required when uploading with Readable.
|
|
@@ -6,4 +6,5 @@ export { EthAddress, ethToSwarmAddress, fromLittleEndian, isHexEthAddress, makeE
|
|
|
6
6
|
export { isNodeReadable, isReadable, isReadableStream, normalizeToReadableStream, readableNodeToWeb, readableWebToNode, } from './stream';
|
|
7
7
|
export { keccak256Hash } from './hash';
|
|
8
8
|
export { makeMaxTarget } from './pss';
|
|
9
|
-
export { getAmountForTtl, getDepthForCapacity, getStampCostInBzz, getStampCostInPlur,
|
|
9
|
+
export { getAmountForTtl, getDepthForCapacity, getStampCostInBzz, getStampCostInPlur, getStampEffectiveBytes, getStampMaximumCapacityBytes, getStampTtlSeconds, getStampUsage, } from './stamps';
|
|
10
|
+
export { approximateOverheadForRedundancyLevel, getRedundancyStat, getRedundancyStats } from './redundancy';
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
-
import { BatchId, FileHeaders, UploadOptions } from '../types';
|
|
1
|
+
import { BatchId, DownloadRedundancyOptions, FileHeaders, UploadOptions, UploadRedundancyOptions } from '../types';
|
|
2
2
|
export declare function readFileHeaders(headers: Record<string, string>): FileHeaders;
|
|
3
3
|
export declare function extractUploadHeaders(postageBatchId: BatchId, options?: UploadOptions): Record<string, string>;
|
|
4
|
+
export declare function extractRedundantUploadHeaders(postageBatchId: BatchId, options?: UploadOptions & UploadRedundancyOptions): Record<string, string>;
|
|
5
|
+
export declare function extractDownloadHeaders(options?: DownloadRedundancyOptions): Record<string, string>;
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { RedundancyLevel } from '..';
|
|
2
|
+
/**
|
|
3
|
+
* Returns an approximate multiplier for the overhead of a given redundancy level.
|
|
4
|
+
* Redundancy level is a tradeoff between storage overhead and fault tolerance.
|
|
5
|
+
* Use this number to estimate the amount of chunks that will be stored for a given
|
|
6
|
+
* redundancy level.
|
|
7
|
+
*/
|
|
8
|
+
export declare function approximateOverheadForRedundancyLevel(chunks: number, level: RedundancyLevel, encrypted: boolean): number;
|
|
9
|
+
interface RedundancyStats {
|
|
10
|
+
label: string;
|
|
11
|
+
value: RedundancyLevel;
|
|
12
|
+
errorTolerance: number;
|
|
13
|
+
}
|
|
14
|
+
export declare function getRedundancyStats(): {
|
|
15
|
+
medium: RedundancyStats;
|
|
16
|
+
strong: RedundancyStats;
|
|
17
|
+
insane: RedundancyStats;
|
|
18
|
+
paranoid: RedundancyStats;
|
|
19
|
+
};
|
|
20
|
+
export declare function getRedundancyStat(level?: string | RedundancyLevel): RedundancyStats;
|
|
21
|
+
export {};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@ethersphere/bee-js",
|
|
3
|
-
"version": "6.
|
|
3
|
+
"version": "6.9.0",
|
|
4
4
|
"description": "Javascript client for Bee",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"bee",
|
|
@@ -27,7 +27,8 @@
|
|
|
27
27
|
"exports": {
|
|
28
28
|
".": {
|
|
29
29
|
"import": "./dist/mjs/index.js",
|
|
30
|
-
"require": "./dist/cjs/index.js"
|
|
30
|
+
"require": "./dist/cjs/index.js",
|
|
31
|
+
"types": "./dist/types/index.d.ts"
|
|
31
32
|
}
|
|
32
33
|
},
|
|
33
34
|
"types": "dist/types/index.d.ts",
|