@net-protocol/storage 0.1.5 → 0.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -4
- package/dist/index.js +44 -13
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +44 -13
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -19,7 +19,7 @@ Net Storage supports three storage patterns for different file sizes:
|
|
|
19
19
|
|
|
20
20
|
### Regular Storage
|
|
21
21
|
|
|
22
|
-
**Best for**: Small data (
|
|
22
|
+
**Best for**: Small data (≤ 20KB)
|
|
23
23
|
**How it works**: Stores data directly as Net messages
|
|
24
24
|
**Use cases**: User settings, configuration, small metadata
|
|
25
25
|
|
|
@@ -27,12 +27,13 @@ Net Storage supports three storage patterns for different file sizes:
|
|
|
27
27
|
|
|
28
28
|
**Best for**: Medium files (20KB-80KB)
|
|
29
29
|
**How it works**: Compresses data (gzip) and splits into 20KB chunks
|
|
30
|
-
**Use cases**: Images, documents, medium-sized data
|
|
30
|
+
**Use cases**: Images, documents, medium-sized data
|
|
31
|
+
**Note**: ChunkedStorage is typically used internally by XML Storage. For direct usage, see `StorageClient.prepareChunkedPut()`.
|
|
31
32
|
|
|
32
33
|
### XML Storage
|
|
33
34
|
|
|
34
|
-
**Best for**: Large files (
|
|
35
|
-
**How it works**: Splits large files into 80KB pieces, stores each using ChunkedStorage, maintains references as XML metadata
|
|
35
|
+
**Best for**: Large files (> 20KB) or files containing XML references
|
|
36
|
+
**How it works**: Splits large files into 80KB pieces, stores each using ChunkedStorage (compressed and chunked into 20KB pieces), maintains references as XML metadata
|
|
36
37
|
**Use cases**: Videos, large images, datasets, any large file
|
|
37
38
|
|
|
38
39
|
## What can you do with this package?
|
package/dist/index.js
CHANGED
|
@@ -1088,17 +1088,37 @@ function useStorageTotalWrites({
|
|
|
1088
1088
|
var MAX_XML_DEPTH = 3;
|
|
1089
1089
|
var CONCURRENT_XML_FETCHES = 3;
|
|
1090
1090
|
function assembleXmlData(metadata, chunks, references) {
|
|
1091
|
+
const tagPositions = [];
|
|
1092
|
+
for (let i = 0; i < references.length; i++) {
|
|
1093
|
+
const ref = references[i];
|
|
1094
|
+
const chunkData2 = chunks[i];
|
|
1095
|
+
if (!chunkData2) continue;
|
|
1096
|
+
const indexAttr = ref.index !== void 0 ? ` i="${ref.index}"` : "";
|
|
1097
|
+
const operatorAttr = ref.operator ? ` o="${ref.operator}"` : "";
|
|
1098
|
+
const sourceAttr = ref.source ? ` s="${ref.source}"` : "";
|
|
1099
|
+
const xmlTag = `<net k="${ref.hash}" v="${ref.version}"${indexAttr}${operatorAttr}${sourceAttr} />`;
|
|
1100
|
+
const tagIndex = metadata.indexOf(xmlTag);
|
|
1101
|
+
if (tagIndex === -1) {
|
|
1102
|
+
continue;
|
|
1103
|
+
}
|
|
1104
|
+
tagPositions.push({
|
|
1105
|
+
ref,
|
|
1106
|
+
chunk: chunkData2,
|
|
1107
|
+
start: tagIndex,
|
|
1108
|
+
end: tagIndex + xmlTag.length,
|
|
1109
|
+
tag: xmlTag
|
|
1110
|
+
});
|
|
1111
|
+
}
|
|
1112
|
+
tagPositions.sort((a, b) => b.start - a.start);
|
|
1091
1113
|
let result = metadata;
|
|
1092
|
-
|
|
1093
|
-
const
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
const xmlTag = `<net k="${ref.hash}" v="${ref.version}"${indexAttr}${operatorAttr}${sourceAttr} />`;
|
|
1099
|
-
result = result.replace(xmlTag, chunkData2);
|
|
1114
|
+
for (let i = 0; i < tagPositions.length; i++) {
|
|
1115
|
+
const { ref, chunk, start, end } = tagPositions[i];
|
|
1116
|
+
try {
|
|
1117
|
+
result = result.substring(0, start) + chunk + result.substring(end);
|
|
1118
|
+
} catch (error) {
|
|
1119
|
+
throw error;
|
|
1100
1120
|
}
|
|
1101
|
-
}
|
|
1121
|
+
}
|
|
1102
1122
|
return result;
|
|
1103
1123
|
}
|
|
1104
1124
|
async function fetchFromDirectStorage(reference, operator, client) {
|
|
@@ -1929,9 +1949,7 @@ var StorageClient = class {
|
|
|
1929
1949
|
const transactionConfigs = [];
|
|
1930
1950
|
for (const xmlChunk of params.xmlChunks) {
|
|
1931
1951
|
const chunks = chunkDataForStorage(xmlChunk);
|
|
1932
|
-
const chunkedHash = core.keccak256HashString(
|
|
1933
|
-
xmlChunk + params.operatorAddress
|
|
1934
|
-
);
|
|
1952
|
+
const chunkedHash = core.keccak256HashString(xmlChunk);
|
|
1935
1953
|
chunkedStorageHashes.push(chunkedHash);
|
|
1936
1954
|
const config = this.prepareChunkedPut({
|
|
1937
1955
|
key: chunkedHash,
|
|
@@ -2141,9 +2159,22 @@ function detectFileTypeFromBase64(base64Data) {
|
|
|
2141
2159
|
} catch {
|
|
2142
2160
|
}
|
|
2143
2161
|
}
|
|
2144
|
-
if (base64Data.startsWith("SUQz")
|
|
2162
|
+
if (base64Data.startsWith("SUQz")) {
|
|
2145
2163
|
return "audio/mpeg";
|
|
2146
2164
|
}
|
|
2165
|
+
if (base64Data.length >= 4) {
|
|
2166
|
+
try {
|
|
2167
|
+
const decoded = atob(base64Data.substring(0, 4));
|
|
2168
|
+
const bytes = new Uint8Array(decoded.length);
|
|
2169
|
+
for (let i = 0; i < decoded.length; i++) {
|
|
2170
|
+
bytes[i] = decoded.charCodeAt(i);
|
|
2171
|
+
}
|
|
2172
|
+
if (bytes[0] === 255 && (bytes[1] & 240) === 240) {
|
|
2173
|
+
return "audio/mpeg";
|
|
2174
|
+
}
|
|
2175
|
+
} catch {
|
|
2176
|
+
}
|
|
2177
|
+
}
|
|
2147
2178
|
if (base64Data.length > 20) {
|
|
2148
2179
|
try {
|
|
2149
2180
|
const decoded = atob(base64Data.substring(0, Math.min(50, base64Data.length)));
|