rac-delta 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +1 -0
- package/dist/core/adapters/index.d.ts +2 -0
- package/dist/core/adapters/index.d.ts.map +1 -0
- package/dist/core/adapters/index.js +17 -0
- package/dist/core/adapters/storage-adapter.d.ts +125 -0
- package/dist/core/adapters/storage-adapter.d.ts.map +1 -0
- package/dist/core/adapters/storage-adapter.js +14 -0
- package/dist/core/config/index.d.ts +2 -0
- package/dist/core/config/index.d.ts.map +1 -0
- package/dist/core/config/index.js +17 -0
- package/dist/core/config/rac-delta-config.d.ts +132 -0
- package/dist/core/config/rac-delta-config.d.ts.map +1 -0
- package/dist/core/config/rac-delta-config.js +2 -0
- package/dist/core/exceptions.d.ts +25 -0
- package/dist/core/exceptions.d.ts.map +1 -0
- package/dist/core/exceptions.js +51 -0
- package/dist/core/models/chunk.d.ts +12 -0
- package/dist/core/models/chunk.d.ts.map +1 -0
- package/dist/core/models/chunk.js +2 -0
- package/dist/core/models/delta-plan.d.ts +12 -0
- package/dist/core/models/delta-plan.d.ts.map +1 -0
- package/dist/core/models/delta-plan.js +2 -0
- package/dist/core/models/file-entry.d.ts +9 -0
- package/dist/core/models/file-entry.d.ts.map +1 -0
- package/dist/core/models/file-entry.js +2 -0
- package/dist/core/models/index.d.ts +5 -0
- package/dist/core/models/index.d.ts.map +1 -0
- package/dist/core/models/index.js +20 -0
- package/dist/core/models/rd-index.d.ts +8 -0
- package/dist/core/models/rd-index.d.ts.map +1 -0
- package/dist/core/models/rd-index.js +2 -0
- package/dist/core/pipelines/download-pipeline.d.ts +142 -0
- package/dist/core/pipelines/download-pipeline.d.ts.map +1 -0
- package/dist/core/pipelines/download-pipeline.js +64 -0
- package/dist/core/pipelines/index.d.ts +3 -0
- package/dist/core/pipelines/index.d.ts.map +1 -0
- package/dist/core/pipelines/index.js +18 -0
- package/dist/core/pipelines/upload-pipeline.d.ts +60 -0
- package/dist/core/pipelines/upload-pipeline.d.ts.map +1 -0
- package/dist/core/pipelines/upload-pipeline.js +34 -0
- package/dist/core/services/delta-service.d.ts +76 -0
- package/dist/core/services/delta-service.d.ts.map +1 -0
- package/dist/core/services/delta-service.js +2 -0
- package/dist/core/services/hasher-service.d.ts +47 -0
- package/dist/core/services/hasher-service.d.ts.map +1 -0
- package/dist/core/services/hasher-service.js +2 -0
- package/dist/core/services/index.d.ts +5 -0
- package/dist/core/services/index.d.ts.map +1 -0
- package/dist/core/services/index.js +20 -0
- package/dist/core/services/reconstruction-service.d.ts +99 -0
- package/dist/core/services/reconstruction-service.d.ts.map +1 -0
- package/dist/core/services/reconstruction-service.js +4 -0
- package/dist/core/services/validation-service.d.ts +18 -0
- package/dist/core/services/validation-service.d.ts.map +1 -0
- package/dist/core/services/validation-service.js +2 -0
- package/dist/core/types/index.d.ts +2 -0
- package/dist/core/types/index.d.ts.map +1 -0
- package/dist/core/types/index.js +17 -0
- package/dist/core/types/types.d.ts +3 -0
- package/dist/core/types/types.d.ts.map +1 -0
- package/dist/core/types/types.js +2 -0
- package/dist/core/utils/index.d.ts +3 -0
- package/dist/core/utils/index.d.ts.map +1 -0
- package/dist/core/utils/index.js +18 -0
- package/dist/core/utils/invariant.d.ts +2 -0
- package/dist/core/utils/invariant.d.ts.map +1 -0
- package/dist/core/utils/invariant.js +11 -0
- package/dist/core/utils/stream-to-buffer.d.ts +3 -0
- package/dist/core/utils/stream-to-buffer.d.ts.map +1 -0
- package/dist/core/utils/stream-to-buffer.js +10 -0
- package/dist/index.d.ts +9 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +29 -0
- package/dist/infrastructure/adapters/azure-blob-storage-adapter.d.ts +24 -0
- package/dist/infrastructure/adapters/azure-blob-storage-adapter.d.ts.map +1 -0
- package/dist/infrastructure/adapters/azure-blob-storage-adapter.js +149 -0
- package/dist/infrastructure/adapters/gcs-storage-adapter.d.ts +20 -0
- package/dist/infrastructure/adapters/gcs-storage-adapter.d.ts.map +1 -0
- package/dist/infrastructure/adapters/gcs-storage-adapter.js +101 -0
- package/dist/infrastructure/adapters/http-storage-adapter.d.ts +23 -0
- package/dist/infrastructure/adapters/http-storage-adapter.d.ts.map +1 -0
- package/dist/infrastructure/adapters/http-storage-adapter.js +154 -0
- package/dist/infrastructure/adapters/local-storage-adapter.d.ts +23 -0
- package/dist/infrastructure/adapters/local-storage-adapter.d.ts.map +1 -0
- package/dist/infrastructure/adapters/local-storage-adapter.js +124 -0
- package/dist/infrastructure/adapters/s3-storage-adapter.d.ts +24 -0
- package/dist/infrastructure/adapters/s3-storage-adapter.d.ts.map +1 -0
- package/dist/infrastructure/adapters/s3-storage-adapter.js +139 -0
- package/dist/infrastructure/adapters/ssh-storage-adapter.d.ts +28 -0
- package/dist/infrastructure/adapters/ssh-storage-adapter.d.ts.map +1 -0
- package/dist/infrastructure/adapters/ssh-storage-adapter.js +237 -0
- package/dist/infrastructure/adapters/url-storage-adapter.d.ts +14 -0
- package/dist/infrastructure/adapters/url-storage-adapter.d.ts.map +1 -0
- package/dist/infrastructure/adapters/url-storage-adapter.js +92 -0
- package/dist/infrastructure/chunk-sources/disk-chunk-source.d.ts +12 -0
- package/dist/infrastructure/chunk-sources/disk-chunk-source.d.ts.map +1 -0
- package/dist/infrastructure/chunk-sources/disk-chunk-source.js +61 -0
- package/dist/infrastructure/chunk-sources/index.d.ts +4 -0
- package/dist/infrastructure/chunk-sources/index.d.ts.map +1 -0
- package/dist/infrastructure/chunk-sources/index.js +19 -0
- package/dist/infrastructure/chunk-sources/memory-chunk-source.d.ts +9 -0
- package/dist/infrastructure/chunk-sources/memory-chunk-source.d.ts.map +1 -0
- package/dist/infrastructure/chunk-sources/memory-chunk-source.js +29 -0
- package/dist/infrastructure/chunk-sources/storage-chunk-source.d.ts +21 -0
- package/dist/infrastructure/chunk-sources/storage-chunk-source.d.ts.map +1 -0
- package/dist/infrastructure/chunk-sources/storage-chunk-source.js +150 -0
- package/dist/infrastructure/client.d.ts +45 -0
- package/dist/infrastructure/client.d.ts.map +1 -0
- package/dist/infrastructure/client.js +52 -0
- package/dist/infrastructure/factories/pipeline-factory.d.ts +15 -0
- package/dist/infrastructure/factories/pipeline-factory.d.ts.map +1 -0
- package/dist/infrastructure/factories/pipeline-factory.js +26 -0
- package/dist/infrastructure/factories/service-factory.d.ts +11 -0
- package/dist/infrastructure/factories/service-factory.d.ts.map +1 -0
- package/dist/infrastructure/factories/service-factory.js +17 -0
- package/dist/infrastructure/factories/storage-adpater-factory.d.ts +41 -0
- package/dist/infrastructure/factories/storage-adpater-factory.d.ts.map +1 -0
- package/dist/infrastructure/factories/storage-adpater-factory.js +33 -0
- package/dist/infrastructure/pipelines/default-hash-download-pipeline.d.ts +27 -0
- package/dist/infrastructure/pipelines/default-hash-download-pipeline.d.ts.map +1 -0
- package/dist/infrastructure/pipelines/default-hash-download-pipeline.js +211 -0
- package/dist/infrastructure/pipelines/default-hash-upload-pipeline.d.ts +19 -0
- package/dist/infrastructure/pipelines/default-hash-upload-pipeline.d.ts.map +1 -0
- package/dist/infrastructure/pipelines/default-hash-upload-pipeline.js +170 -0
- package/dist/infrastructure/pipelines/default-url-download-pipeline.d.ts +30 -0
- package/dist/infrastructure/pipelines/default-url-download-pipeline.d.ts.map +1 -0
- package/dist/infrastructure/pipelines/default-url-download-pipeline.js +198 -0
- package/dist/infrastructure/pipelines/default-url-upload-pipeline.d.ts +20 -0
- package/dist/infrastructure/pipelines/default-url-upload-pipeline.d.ts.map +1 -0
- package/dist/infrastructure/pipelines/default-url-upload-pipeline.js +126 -0
- package/dist/infrastructure/services/hash-wasm-hasher-service.d.ts +13 -0
- package/dist/infrastructure/services/hash-wasm-hasher-service.d.ts.map +1 -0
- package/dist/infrastructure/services/hash-wasm-hasher-service.js +113 -0
- package/dist/infrastructure/services/memory-delta-service.d.ts +17 -0
- package/dist/infrastructure/services/memory-delta-service.d.ts.map +1 -0
- package/dist/infrastructure/services/memory-delta-service.js +198 -0
- package/dist/infrastructure/services/memory-reconstruction-service.d.ts +25 -0
- package/dist/infrastructure/services/memory-reconstruction-service.d.ts.map +1 -0
- package/dist/infrastructure/services/memory-reconstruction-service.js +329 -0
- package/dist/infrastructure/services/memory-validation-service.d.ts +9 -0
- package/dist/infrastructure/services/memory-validation-service.d.ts.map +1 -0
- package/dist/infrastructure/services/memory-validation-service.js +33 -0
- package/package.json +43 -0
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.MemoryDeltaService = void 0;
|
|
4
|
+
const path_1 = require("path");
|
|
5
|
+
const promises_1 = require("fs/promises");
|
|
6
|
+
const invariant_1 = require("../../core/utils/invariant");
|
|
7
|
+
class MemoryDeltaService {
|
|
8
|
+
hasher;
|
|
9
|
+
constructor(hasher) {
|
|
10
|
+
this.hasher = hasher;
|
|
11
|
+
}
|
|
12
|
+
async createIndexFromDirectory(rootPath, chunkSize, concurrency = 6, ignorePatterns) {
|
|
13
|
+
(0, invariant_1.invariant)('rootPath must be a valid string', typeof rootPath === 'string' && rootPath !== '');
|
|
14
|
+
const rootDir = (0, path_1.isAbsolute)(rootPath) ? rootPath : (0, path_1.resolve)(process.cwd(), rootPath);
|
|
15
|
+
const filePaths = [];
|
|
16
|
+
for await (const relativePath of this.walkFiles(rootDir, undefined, ignorePatterns)) {
|
|
17
|
+
filePaths.push(relativePath.replace(/\\/g, '/'));
|
|
18
|
+
}
|
|
19
|
+
const ignoredFiles = new Set(['rd-index.json']);
|
|
20
|
+
const filteredPaths = filePaths.filter((p) => !ignoredFiles.has(p));
|
|
21
|
+
const results = new Array(filteredPaths.length);
|
|
22
|
+
let currentIndex = 0;
|
|
23
|
+
const worker = async () => {
|
|
24
|
+
while (true) {
|
|
25
|
+
const i = currentIndex++;
|
|
26
|
+
if (i >= filteredPaths.length) {
|
|
27
|
+
break;
|
|
28
|
+
}
|
|
29
|
+
const relativePath = filteredPaths[i];
|
|
30
|
+
const fullPath = (0, path_1.join)(rootDir, relativePath);
|
|
31
|
+
const stats = await (0, promises_1.stat)(fullPath);
|
|
32
|
+
const fileEntry = await this.hasher.hashFile(relativePath, rootDir, chunkSize);
|
|
33
|
+
results[i] = {
|
|
34
|
+
...fileEntry,
|
|
35
|
+
path: relativePath,
|
|
36
|
+
modifiedAt: stats.mtimeMs,
|
|
37
|
+
};
|
|
38
|
+
}
|
|
39
|
+
};
|
|
40
|
+
await Promise.all(Array.from({ length: concurrency }, () => worker()));
|
|
41
|
+
return {
|
|
42
|
+
version: 1,
|
|
43
|
+
createdAt: Date.now(),
|
|
44
|
+
chunkSize,
|
|
45
|
+
files: results,
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
async createFileEntryFromStream(stream, path) {
|
|
49
|
+
const fileHasher = await this.hasher.createStreamingHasher();
|
|
50
|
+
const chunks = await this.hasher.hashStream(stream, (chunk) => {
|
|
51
|
+
fileHasher.update(chunk);
|
|
52
|
+
});
|
|
53
|
+
const fileHash = fileHasher.digest('hex');
|
|
54
|
+
const totalSize = chunks.reduce((total, chunk) => total + chunk.size, 0);
|
|
55
|
+
const fileEntry = {
|
|
56
|
+
path: path.replace(/\\/g, '/'),
|
|
57
|
+
size: totalSize,
|
|
58
|
+
hash: fileHash,
|
|
59
|
+
modifiedAt: Date.now(),
|
|
60
|
+
chunks,
|
|
61
|
+
};
|
|
62
|
+
return fileEntry;
|
|
63
|
+
}
|
|
64
|
+
async *walkFiles(dir, prefix = '', ignorePatterns) {
|
|
65
|
+
const entries = await (0, promises_1.readdir)(dir, { withFileTypes: true });
|
|
66
|
+
for (const entry of entries) {
|
|
67
|
+
const absPath = (0, path_1.join)(dir, entry.name);
|
|
68
|
+
const relPath = (0, path_1.join)(prefix, entry.name).replace(/\\/g, '/');
|
|
69
|
+
if (ignorePatterns?.length && this.matchesAnyPattern(relPath, ignorePatterns)) {
|
|
70
|
+
continue;
|
|
71
|
+
}
|
|
72
|
+
if (entry.isFile()) {
|
|
73
|
+
yield relPath;
|
|
74
|
+
}
|
|
75
|
+
else if (entry.isDirectory()) {
|
|
76
|
+
yield* this.walkFiles(absPath, relPath, ignorePatterns);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
matchesAnyPattern(path, patterns) {
|
|
81
|
+
for (const pattern of patterns) {
|
|
82
|
+
const regex = this.globToRegex(pattern);
|
|
83
|
+
if (regex.test(path)) {
|
|
84
|
+
return true;
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
return false;
|
|
88
|
+
}
|
|
89
|
+
globToRegex(glob) {
|
|
90
|
+
glob = glob.replace(/\\/g, '/');
|
|
91
|
+
let pattern = glob.replace(/([.+^${}()|[\]\\])/g, '\\$1');
|
|
92
|
+
pattern = pattern.replace(/\*\*\/?/g, '(.*/)?');
|
|
93
|
+
pattern = pattern.replace(/\*/g, '[^/]*');
|
|
94
|
+
pattern = '^' + pattern;
|
|
95
|
+
return new RegExp(pattern, 'i');
|
|
96
|
+
}
|
|
97
|
+
// source: where the changes come
|
|
98
|
+
// target: where the changes will apply
|
|
99
|
+
compare(source, target) {
|
|
100
|
+
const deltaPlan = {
|
|
101
|
+
deletedFiles: [],
|
|
102
|
+
missingChunks: [],
|
|
103
|
+
obsoleteChunks: [],
|
|
104
|
+
newAndModifiedFiles: [],
|
|
105
|
+
};
|
|
106
|
+
const targetFilesMap = new Map();
|
|
107
|
+
const sourcePaths = new Set(source.files.map((f) => f.path));
|
|
108
|
+
if (target) {
|
|
109
|
+
for (const file of target.files) {
|
|
110
|
+
targetFilesMap.set(file.path, file);
|
|
111
|
+
// Check for deleted files (they exist in target but not in source, so they will be removed from target)
|
|
112
|
+
if (!sourcePaths.has(file.path)) {
|
|
113
|
+
deltaPlan.deletedFiles.push(file.path);
|
|
114
|
+
for (const chunk of file.chunks) {
|
|
115
|
+
deltaPlan.obsoleteChunks.push({ ...chunk, filePath: file.path });
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
// Process source files
|
|
121
|
+
for (const srcFile of source.files) {
|
|
122
|
+
const targetFile = targetFilesMap.get(srcFile.path);
|
|
123
|
+
// New file (not in target)
|
|
124
|
+
if (!targetFile) {
|
|
125
|
+
deltaPlan.missingChunks.push(...srcFile.chunks.map((c) => ({ ...c, filePath: srcFile.path })));
|
|
126
|
+
deltaPlan.newAndModifiedFiles.push(srcFile);
|
|
127
|
+
continue;
|
|
128
|
+
}
|
|
129
|
+
const targetChunks = new Map(targetFile.chunks.map((c) => [`${c.hash}@${c.offset}`, c]));
|
|
130
|
+
const sourceChunkKeys = new Set(srcFile.chunks.map((c) => `${c.hash}@${c.offset}`));
|
|
131
|
+
// File exists -> compare chunks
|
|
132
|
+
for (const chunk of srcFile.chunks) {
|
|
133
|
+
const key = `${chunk.hash}@${chunk.offset}`;
|
|
134
|
+
if (!targetChunks.has(key)) {
|
|
135
|
+
deltaPlan.missingChunks.push({ ...chunk, filePath: srcFile.path });
|
|
136
|
+
deltaPlan.newAndModifiedFiles.push(srcFile);
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
// Check obsolete chunks in target that are not in source
|
|
140
|
+
for (const chunk of targetFile.chunks) {
|
|
141
|
+
const key = `${chunk.hash}@${chunk.offset}`;
|
|
142
|
+
if (!sourceChunkKeys.has(key)) {
|
|
143
|
+
deltaPlan.obsoleteChunks.push({ ...chunk, filePath: srcFile.path });
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
return deltaPlan;
|
|
148
|
+
}
|
|
149
|
+
mergePlans(base, updates) {
|
|
150
|
+
const mergeFiles = (a, b) => {
|
|
151
|
+
const seen = new Set();
|
|
152
|
+
const result = [];
|
|
153
|
+
for (const file of [...a, ...b]) {
|
|
154
|
+
if (!seen.has(file.path)) {
|
|
155
|
+
seen.add(file.path);
|
|
156
|
+
result.push(file);
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
return result;
|
|
160
|
+
};
|
|
161
|
+
const mergeChunks = (a, b) => {
|
|
162
|
+
const seen = new Set();
|
|
163
|
+
const result = [];
|
|
164
|
+
for (const chunk of [...a, ...b]) {
|
|
165
|
+
const key = `${chunk.hash}@${chunk.offset}`;
|
|
166
|
+
if (!seen.has(key)) {
|
|
167
|
+
seen.add(key);
|
|
168
|
+
result.push(chunk);
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
return result;
|
|
172
|
+
};
|
|
173
|
+
const mergeStrings = (a, b) => {
|
|
174
|
+
return Array.from(new Set([...a, ...b]));
|
|
175
|
+
};
|
|
176
|
+
return {
|
|
177
|
+
deletedFiles: mergeStrings(base.deletedFiles, updates.deletedFiles),
|
|
178
|
+
missingChunks: mergeChunks(base.missingChunks, updates.missingChunks),
|
|
179
|
+
obsoleteChunks: mergeChunks(base.obsoleteChunks, updates.obsoleteChunks),
|
|
180
|
+
newAndModifiedFiles: mergeFiles(base.newAndModifiedFiles, updates.newAndModifiedFiles),
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
async compareForUpload(localIndex, remoteIndex) {
|
|
184
|
+
const deltaPlan = this.compare(localIndex, remoteIndex);
|
|
185
|
+
const allNeededHashes = new Set();
|
|
186
|
+
for (const file of localIndex.files) {
|
|
187
|
+
for (const chunk of file.chunks) {
|
|
188
|
+
allNeededHashes.add(chunk.hash);
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
deltaPlan.obsoleteChunks = deltaPlan.obsoleteChunks.filter((chunk) => !allNeededHashes.has(chunk.hash));
|
|
192
|
+
return deltaPlan;
|
|
193
|
+
}
|
|
194
|
+
async compareForDownload(localIndex, remoteIndex) {
|
|
195
|
+
return this.compare(remoteIndex, localIndex);
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
exports.MemoryDeltaService = MemoryDeltaService;
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import { Readable } from 'stream';
|
|
2
|
+
import { ChunkSource, HasherService, ReconstructionOptions, ReconstructionService } from '../../core/services';
|
|
3
|
+
import { DeltaPlan, FileEntry } from '../../core/models';
|
|
4
|
+
import { Nullish } from '../../core/types';
|
|
5
|
+
export declare class MemoryReconstructionService implements ReconstructionService {
|
|
6
|
+
private readonly hasher;
|
|
7
|
+
constructor(hasher: HasherService);
|
|
8
|
+
reconstructAll(plan: DeltaPlan, outputDir: string, chunkSource: ChunkSource, options?: Nullish<ReconstructionOptions>): Promise<void>;
|
|
9
|
+
reconstructFile(entry: FileEntry, outputPath: string, chunkSource: ChunkSource, options?: ReconstructionOptions): Promise<void>;
|
|
10
|
+
reconstructToStream(entry: FileEntry, chunkSource: ChunkSource): Promise<Readable>;
|
|
11
|
+
private fileExists;
|
|
12
|
+
/** Performs partial reconstruction directly in the existing file */
|
|
13
|
+
private reconstructInPlace;
|
|
14
|
+
/** Reconstructs file fully or partially via .tmp file and replaces it atomically */
|
|
15
|
+
private reconstructToTemp;
|
|
16
|
+
/**
|
|
17
|
+
* Tries to read an existing chunk from a file descriptor if possible.
|
|
18
|
+
* Falls back to fetching from chunkSource if hash does not match.
|
|
19
|
+
*/
|
|
20
|
+
private processChunkDataSmart;
|
|
21
|
+
private getChunkData;
|
|
22
|
+
private writeToStream;
|
|
23
|
+
private fetchChunksSmart;
|
|
24
|
+
}
|
|
25
|
+
//# sourceMappingURL=memory-reconstruction-service.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"memory-reconstruction-service.d.ts","sourceRoot":"","sources":["../../../src/infrastructure/services/memory-reconstruction-service.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,QAAQ,EAAyB,MAAM,QAAQ,CAAC;AAIzD,OAAO,EACL,WAAW,EAEX,aAAa,EACb,qBAAqB,EACrB,qBAAqB,EACtB,MAAM,qBAAqB,CAAC;AAE7B,OAAO,EAAE,SAAS,EAAE,SAAS,EAAS,MAAM,mBAAmB,CAAC;AAEhE,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAE3C,qBAAa,2BAA4B,YAAW,qBAAqB;IAC3D,OAAO,CAAC,QAAQ,CAAC,MAAM;gBAAN,MAAM,EAAE,aAAa;IAE5C,cAAc,CAClB,IAAI,EAAE,SAAS,EACf,SAAS,EAAE,MAAM,EACjB,WAAW,EAAE,WAAW,EACxB,OAAO,CAAC,EAAE,OAAO,CAAC,qBAAqB,CAAC,GACvC,OAAO,CAAC,IAAI,CAAC;IAmFV,eAAe,CACnB,KAAK,EAAE,SAAS,EAChB,UAAU,EAAE,MAAM,EAClB,WAAW,EAAE,WAAW,EACxB,OAAO,GAAE,qBAIR,GACA,OAAO,CAAC,IAAI,CAAC;IAgDV,mBAAmB,CAAC,KAAK,EAAE,SAAS,EAAE,WAAW,EAAE,WAAW,GAAG,OAAO,CAAC,QAAQ,CAAC;YAsB1E,UAAU;IASxB,oEAAoE;YACtD,kBAAkB;IAiChC,oFAAoF;YACtE,iBAAiB;IAmF/B;;;OAGG;YACW,qBAAqB;YA2BrB,YAAY;YAcZ,aAAa;YAuDZ,gBAAgB;CAmChC"}
|
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.MemoryReconstructionService = void 0;
|
|
4
|
+
const promises_1 = require("fs/promises");
|
|
5
|
+
const path_1 = require("path");
|
|
6
|
+
const stream_1 = require("stream");
|
|
7
|
+
const promises_2 = require("stream/promises");
|
|
8
|
+
const fs_1 = require("fs");
|
|
9
|
+
const services_1 = require("../../core/services");
|
|
10
|
+
const exceptions_1 = require("../../core/exceptions");
|
|
11
|
+
const utils_1 = require("../../core/utils");
|
|
12
|
+
class MemoryReconstructionService {
|
|
13
|
+
hasher;
|
|
14
|
+
constructor(hasher) {
|
|
15
|
+
this.hasher = hasher;
|
|
16
|
+
}
|
|
17
|
+
async reconstructAll(plan, outputDir, chunkSource, options) {
|
|
18
|
+
const dir = (0, path_1.isAbsolute)(outputDir) ? outputDir : (0, path_1.resolve)(process.cwd(), outputDir);
|
|
19
|
+
await (0, promises_1.mkdir)(dir, { recursive: true });
|
|
20
|
+
const files = [...plan.newAndModifiedFiles];
|
|
21
|
+
const total = files.length;
|
|
22
|
+
let completed = 0;
|
|
23
|
+
let globalBytesWritten = 0;
|
|
24
|
+
let globalBytesReceived = 0;
|
|
25
|
+
const startTime = Date.now();
|
|
26
|
+
let error = null;
|
|
27
|
+
const queue = files.map((entry, index) => ({ entry, index }));
|
|
28
|
+
const fileProgressMap = new Map();
|
|
29
|
+
const isNetworkSource = typeof chunkSource.streamChunks === 'function';
|
|
30
|
+
const totalNetworkBytes = isNetworkSource
|
|
31
|
+
? plan.missingChunks.reduce((accumulator, chunk) => accumulator + chunk.size, 0)
|
|
32
|
+
: 0;
|
|
33
|
+
const totalBytesToWrite = files.reduce((accumulator, file) => accumulator + (file.size ?? 0), 0);
|
|
34
|
+
const next = async () => {
|
|
35
|
+
while (queue.length && !error) {
|
|
36
|
+
const { entry } = queue.shift();
|
|
37
|
+
const outputPath = (0, path_1.join)(dir, entry.path);
|
|
38
|
+
try {
|
|
39
|
+
await this.reconstructFile(entry, outputPath, chunkSource, {
|
|
40
|
+
...options,
|
|
41
|
+
onProgress: (fileProgress, fileBytesWritten, fileBytesReceived) => {
|
|
42
|
+
fileProgressMap.set(entry.path, fileProgress);
|
|
43
|
+
globalBytesWritten += fileBytesWritten ?? 0;
|
|
44
|
+
if (isNetworkSource) {
|
|
45
|
+
globalBytesReceived += fileBytesReceived ?? 0;
|
|
46
|
+
}
|
|
47
|
+
const reconstructProgress = Math.min((globalBytesWritten / totalBytesToWrite) * 100, 100);
|
|
48
|
+
const networkProgress = isNetworkSource
|
|
49
|
+
? Math.min((globalBytesReceived / totalNetworkBytes) * 100, 100)
|
|
50
|
+
: undefined;
|
|
51
|
+
const elapsed = Math.max((Date.now() - startTime) / 1000, 0.001);
|
|
52
|
+
const diskSpeed = globalBytesWritten / elapsed;
|
|
53
|
+
const netSpeed = isNetworkSource ? globalBytesReceived / elapsed : 0;
|
|
54
|
+
if (isNetworkSource) {
|
|
55
|
+
options?.onProgress?.(reconstructProgress, diskSpeed, networkProgress, netSpeed);
|
|
56
|
+
}
|
|
57
|
+
else {
|
|
58
|
+
options?.onProgress?.(reconstructProgress, diskSpeed);
|
|
59
|
+
}
|
|
60
|
+
},
|
|
61
|
+
});
|
|
62
|
+
completed++;
|
|
63
|
+
fileProgressMap.set(entry.path, 100);
|
|
64
|
+
}
|
|
65
|
+
catch (err) {
|
|
66
|
+
error = err instanceof Error ? err : new Error(String(err));
|
|
67
|
+
break;
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
};
|
|
71
|
+
const workers = Array.from({ length: Math.min(options?.fileConcurrency ?? 5, total) }, next);
|
|
72
|
+
await Promise.allSettled(workers);
|
|
73
|
+
if (error) {
|
|
74
|
+
throw error;
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
async reconstructFile(entry, outputPath, chunkSource, options = {
|
|
78
|
+
forceRebuild: false,
|
|
79
|
+
verifyAfterRebuild: true,
|
|
80
|
+
inPlaceReconstructionThreshold: services_1.DEFAULT_IN_PLACE_RECONSTRUCTION_THRESHOLD,
|
|
81
|
+
}) {
|
|
82
|
+
const defOutputPath = (0, path_1.isAbsolute)(outputPath) ? outputPath : (0, path_1.resolve)(process.cwd(), outputPath);
|
|
83
|
+
const { forceRebuild, verifyAfterRebuild, inPlaceReconstructionThreshold } = options;
|
|
84
|
+
const tempPath = `${defOutputPath}.tmp`;
|
|
85
|
+
await (0, promises_1.mkdir)((0, path_1.dirname)(defOutputPath), { recursive: true });
|
|
86
|
+
const exists = await this.fileExists(defOutputPath);
|
|
87
|
+
if (exists && !forceRebuild) {
|
|
88
|
+
const matches = await this.hasher.verifyFile(defOutputPath, entry.hash);
|
|
89
|
+
if (matches) {
|
|
90
|
+
return;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
const stats = exists ? await (0, promises_1.stat)(defOutputPath) : null;
|
|
94
|
+
const isLargeFile = stats && stats.size > inPlaceReconstructionThreshold;
|
|
95
|
+
const existingLargeFileWithoutRebuild = exists && !forceRebuild && isLargeFile;
|
|
96
|
+
const progressCb = (chunkBytes, netBytes, processedChunks) => {
|
|
97
|
+
const fileProgress = (processedChunks / entry.chunks.length) * 100;
|
|
98
|
+
options.onProgress?.(fileProgress, chunkBytes, netBytes);
|
|
99
|
+
};
|
|
100
|
+
try {
|
|
101
|
+
if (existingLargeFileWithoutRebuild && inPlaceReconstructionThreshold !== 0) {
|
|
102
|
+
await this.reconstructInPlace(entry, defOutputPath, chunkSource, progressCb);
|
|
103
|
+
}
|
|
104
|
+
else {
|
|
105
|
+
await this.reconstructToTemp(entry, defOutputPath, tempPath, chunkSource, !!verifyAfterRebuild, exists, !!options.forceRebuild, progressCb);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
catch (err) {
|
|
109
|
+
await (0, promises_1.rm)(tempPath, { force: true });
|
|
110
|
+
throw err;
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
// This will reconstruct to stream, not to disk
|
|
114
|
+
async reconstructToStream(entry, chunkSource) {
|
|
115
|
+
const output = new stream_1.PassThrough({ highWaterMark: 1024 * 1024 });
|
|
116
|
+
const chunks = entry.chunks ?? [];
|
|
117
|
+
// Starts async reconstruction without blocking stream return
|
|
118
|
+
const processChunks = async () => {
|
|
119
|
+
try {
|
|
120
|
+
for await (const { data } of this.fetchChunksSmart(chunks, chunkSource)) {
|
|
121
|
+
await this.writeToStream(data, output);
|
|
122
|
+
}
|
|
123
|
+
output.end();
|
|
124
|
+
}
|
|
125
|
+
catch (err) {
|
|
126
|
+
output.destroy(err instanceof Error ? err : new Error(String(err)));
|
|
127
|
+
}
|
|
128
|
+
};
|
|
129
|
+
void processChunks();
|
|
130
|
+
return output;
|
|
131
|
+
}
|
|
132
|
+
async fileExists(path) {
|
|
133
|
+
try {
|
|
134
|
+
await (0, promises_1.access)(path);
|
|
135
|
+
return true;
|
|
136
|
+
}
|
|
137
|
+
catch {
|
|
138
|
+
return false;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
/** Performs partial reconstruction directly in the existing file */
|
|
142
|
+
async reconstructInPlace(entry, outputPath, chunkSource, progressCb) {
|
|
143
|
+
const fd = await (0, promises_1.open)(outputPath, 'r+');
|
|
144
|
+
const isNetworkSource = typeof chunkSource.streamChunks === 'function';
|
|
145
|
+
try {
|
|
146
|
+
const chunkMap = new Map(entry.chunks.map((c) => [c.hash, c]));
|
|
147
|
+
let processed = 0;
|
|
148
|
+
for await (const { hash, data } of this.fetchChunksSmart(entry.chunks, chunkSource, false)) {
|
|
149
|
+
const chunk = chunkMap.get(hash);
|
|
150
|
+
if (!chunk) {
|
|
151
|
+
continue;
|
|
152
|
+
}
|
|
153
|
+
const buffer = Buffer.isBuffer(data) ? data : await (0, utils_1.streamToBuffer)(data);
|
|
154
|
+
await fd.write(buffer, 0, buffer.length, chunk.offset);
|
|
155
|
+
processed++;
|
|
156
|
+
const netBytes = isNetworkSource ? buffer.length : 0;
|
|
157
|
+
progressCb?.(buffer.length, netBytes, processed);
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
finally {
|
|
161
|
+
await fd.close();
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
/** Reconstructs file fully or partially via .tmp file and replaces it atomically */
|
|
165
|
+
async reconstructToTemp(entry, outputPath, tempPath, chunkSource, verifyAfterRebuild, fileExists, force, progressCb) {
|
|
166
|
+
const writeStream = (0, fs_1.createWriteStream)(tempPath, { flags: 'w' });
|
|
167
|
+
let writeError = null;
|
|
168
|
+
const onWriteError = (err) => {
|
|
169
|
+
writeError = err;
|
|
170
|
+
};
|
|
171
|
+
writeStream.once('error', onWriteError);
|
|
172
|
+
let processed = 0;
|
|
173
|
+
const isNetworkSource = typeof chunkSource.streamChunks === 'function';
|
|
174
|
+
try {
|
|
175
|
+
// Partial reconstruction via reading from existing file
|
|
176
|
+
if (fileExists && !force) {
|
|
177
|
+
const readFd = await (0, promises_1.open)(outputPath, 'r');
|
|
178
|
+
try {
|
|
179
|
+
for (const chunk of entry.chunks) {
|
|
180
|
+
await this.processChunkDataSmart(chunk, readFd, chunkSource, writeStream);
|
|
181
|
+
processed++;
|
|
182
|
+
progressCb?.(chunk.size, 0, processed);
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
finally {
|
|
186
|
+
await readFd.close();
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
// Full reconstruction
|
|
190
|
+
if (!fileExists || force) {
|
|
191
|
+
try {
|
|
192
|
+
for await (const { data } of this.fetchChunksSmart(entry.chunks, chunkSource)) {
|
|
193
|
+
let totalWritten = 0;
|
|
194
|
+
await this.writeToStream(data, writeStream, (totalBytes) => {
|
|
195
|
+
totalWritten = totalBytes;
|
|
196
|
+
});
|
|
197
|
+
processed++;
|
|
198
|
+
progressCb?.(totalWritten, isNetworkSource ? totalWritten : 0, processed);
|
|
199
|
+
}
|
|
200
|
+
if (writeError) {
|
|
201
|
+
throw writeError;
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
catch (err) {
|
|
205
|
+
writeStream.destroy();
|
|
206
|
+
throw err;
|
|
207
|
+
}
|
|
208
|
+
finally {
|
|
209
|
+
writeStream.removeListener('error', onWriteError);
|
|
210
|
+
writeStream.end();
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
writeStream.end();
|
|
214
|
+
await new Promise((resolve, reject) => {
|
|
215
|
+
writeStream.on('finish', resolve);
|
|
216
|
+
writeStream.on('error', reject);
|
|
217
|
+
});
|
|
218
|
+
if (verifyAfterRebuild) {
|
|
219
|
+
const valid = await this.hasher.verifyFile(tempPath, entry.hash);
|
|
220
|
+
if (!valid) {
|
|
221
|
+
throw new Error(`Hash mismatch after reconstructing ${entry.path}`);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
await (0, promises_1.rename)(tempPath, outputPath);
|
|
225
|
+
}
|
|
226
|
+
catch (err) {
|
|
227
|
+
throw err;
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
/**
|
|
231
|
+
* Tries to read an existing chunk from a file descriptor if possible.
|
|
232
|
+
* Falls back to fetching from chunkSource if hash does not match.
|
|
233
|
+
*/
|
|
234
|
+
async processChunkDataSmart(chunk, fd, chunkSource, writeStream) {
|
|
235
|
+
const buffer = Buffer.alloc(chunk.size);
|
|
236
|
+
await fd.read(buffer, 0, chunk.size, chunk.offset);
|
|
237
|
+
if (await this.hasher.verifyChunk(buffer, chunk.hash)) {
|
|
238
|
+
await this.writeToStream(buffer, writeStream);
|
|
239
|
+
return;
|
|
240
|
+
}
|
|
241
|
+
// fallback
|
|
242
|
+
const data = await this.getChunkData(chunk, chunkSource);
|
|
243
|
+
if (Buffer.isBuffer(data)) {
|
|
244
|
+
await this.writeToStream(data, writeStream);
|
|
245
|
+
return;
|
|
246
|
+
}
|
|
247
|
+
if (writeStream) {
|
|
248
|
+
await (0, promises_2.pipeline)(data, writeStream, { end: false });
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
async getChunkData(chunk, chunkSource) {
|
|
252
|
+
if (!chunkSource) {
|
|
253
|
+
throw new Error(`ChunkSource not provided for chunk ${chunk.hash}`);
|
|
254
|
+
}
|
|
255
|
+
const data = await chunkSource.getChunk(chunk.hash);
|
|
256
|
+
if (!data) {
|
|
257
|
+
throw new exceptions_1.ChunkNotFoundException(`${chunk.hash} not found in storage`);
|
|
258
|
+
}
|
|
259
|
+
return data;
|
|
260
|
+
}
|
|
261
|
+
async writeToStream(data, stream, onFinish) {
|
|
262
|
+
if (!stream) {
|
|
263
|
+
return;
|
|
264
|
+
}
|
|
265
|
+
if (Buffer.isBuffer(data)) {
|
|
266
|
+
const canContinue = stream.write(data);
|
|
267
|
+
if (!canContinue) {
|
|
268
|
+
await new Promise((resolve) => stream.once('drain', resolve));
|
|
269
|
+
}
|
|
270
|
+
onFinish?.(data.length);
|
|
271
|
+
return;
|
|
272
|
+
}
|
|
273
|
+
let totalBytes = 0;
|
|
274
|
+
await new Promise((resolve, reject) => {
|
|
275
|
+
const onError = (err) => {
|
|
276
|
+
cleanup();
|
|
277
|
+
reject(err instanceof Error ? err : new Error(String(err)));
|
|
278
|
+
};
|
|
279
|
+
const onEnd = () => {
|
|
280
|
+
cleanup();
|
|
281
|
+
onFinish?.(totalBytes);
|
|
282
|
+
resolve();
|
|
283
|
+
};
|
|
284
|
+
const cleanup = () => {
|
|
285
|
+
data.off('error', onError);
|
|
286
|
+
data.off('end', onEnd);
|
|
287
|
+
stream.off('error', onError);
|
|
288
|
+
};
|
|
289
|
+
data.on('error', onError);
|
|
290
|
+
stream.on('error', onError);
|
|
291
|
+
data.on('end', onEnd);
|
|
292
|
+
data.on('data', (chunk) => {
|
|
293
|
+
totalBytes += chunk.length;
|
|
294
|
+
const canContinue = stream.write(chunk);
|
|
295
|
+
if (!canContinue) {
|
|
296
|
+
data.pause();
|
|
297
|
+
stream.once('drain', () => data.resume());
|
|
298
|
+
}
|
|
299
|
+
});
|
|
300
|
+
});
|
|
301
|
+
}
|
|
302
|
+
async *fetchChunksSmart(chunks, chunkSource, preserveOrder = true) {
|
|
303
|
+
const hashes = chunks.map((c) => c.hash);
|
|
304
|
+
// 1. streamChunks available: streaming
|
|
305
|
+
if (chunkSource.streamChunks) {
|
|
306
|
+
for await (const { hash, data } of chunkSource.streamChunks(hashes, { preserveOrder })) {
|
|
307
|
+
yield { hash: hash, data };
|
|
308
|
+
}
|
|
309
|
+
return;
|
|
310
|
+
}
|
|
311
|
+
// 2. getChunks available: download all chunks
|
|
312
|
+
if (chunkSource.getChunks) {
|
|
313
|
+
const map = await chunkSource.getChunks(hashes);
|
|
314
|
+
for (const hash of hashes) {
|
|
315
|
+
const data = map.get(hash);
|
|
316
|
+
if (data) {
|
|
317
|
+
yield { hash, data };
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
return;
|
|
321
|
+
}
|
|
322
|
+
// 3. fallback: individual getChunk
|
|
323
|
+
for (const chunk of chunks) {
|
|
324
|
+
const data = await chunkSource.getChunk(chunk.hash);
|
|
325
|
+
yield { hash: chunk.hash, data };
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
}
|
|
329
|
+
exports.MemoryReconstructionService = MemoryReconstructionService;
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { ValidationService, HasherService } from '../../core/services';
|
|
2
|
+
import { FileEntry, RDIndex } from '../../core/models';
|
|
3
|
+
export declare class MemoryValidationService implements ValidationService {
|
|
4
|
+
private readonly hasher;
|
|
5
|
+
constructor(hasher: HasherService);
|
|
6
|
+
validateFile(entry: FileEntry, path: string): Promise<boolean>;
|
|
7
|
+
validateIndex(index: RDIndex, basePath: string): Promise<boolean>;
|
|
8
|
+
}
|
|
9
|
+
//# sourceMappingURL=memory-validation-service.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"memory-validation-service.d.ts","sourceRoot":"","sources":["../../../src/infrastructure/services/memory-validation-service.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,iBAAiB,EAAE,aAAa,EAAE,MAAM,qBAAqB,CAAC;AACvE,OAAO,EAAE,SAAS,EAAE,OAAO,EAAE,MAAM,mBAAmB,CAAC;AAEvD,qBAAa,uBAAwB,YAAW,iBAAiB;IACnD,OAAO,CAAC,QAAQ,CAAC,MAAM;gBAAN,MAAM,EAAE,aAAa;IAE5C,YAAY,CAAC,KAAK,EAAE,SAAS,EAAE,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC,OAAO,CAAC;IAY9D,aAAa,CAAC,KAAK,EAAE,OAAO,EAAE,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,OAAO,CAAC;CAcxE"}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.MemoryValidationService = void 0;
|
|
4
|
+
const path_1 = require("path");
|
|
5
|
+
const promises_1 = require("fs/promises");
|
|
6
|
+
class MemoryValidationService {
|
|
7
|
+
hasher;
|
|
8
|
+
constructor(hasher) {
|
|
9
|
+
this.hasher = hasher;
|
|
10
|
+
}
|
|
11
|
+
async validateFile(entry, path) {
|
|
12
|
+
try {
|
|
13
|
+
const finalPath = (0, path_1.isAbsolute)(path) ? path : (0, path_1.resolve)(process.cwd(), path);
|
|
14
|
+
const stats = await (0, promises_1.stat)(finalPath);
|
|
15
|
+
return stats.size === entry.size && (await this.hasher.verifyFile(finalPath, entry.hash));
|
|
16
|
+
}
|
|
17
|
+
catch {
|
|
18
|
+
return false;
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
async validateIndex(index, basePath) {
|
|
22
|
+
const directory = (0, path_1.isAbsolute)(basePath) ? basePath : (0, path_1.resolve)(process.cwd(), basePath);
|
|
23
|
+
for (const file of index.files) {
|
|
24
|
+
const filePath = (0, path_1.join)(directory, file.path);
|
|
25
|
+
const valid = await this.validateFile(file, filePath);
|
|
26
|
+
if (!valid) {
|
|
27
|
+
return false;
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
return true;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
exports.MemoryValidationService = MemoryValidationService;
|
package/package.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "rac-delta",
|
|
3
|
+
"description": "Storage agnostic delta patching implementation of rac-delta protocol for NodeJs. With streaming support and file reconstruction.",
|
|
4
|
+
"version": "1.0.0",
|
|
5
|
+
"main": "dist/index.js",
|
|
6
|
+
"types": "dist/index.d.ts",
|
|
7
|
+
"files": [
|
|
8
|
+
"dist"
|
|
9
|
+
],
|
|
10
|
+
"scripts": {
|
|
11
|
+
"build": "tsc",
|
|
12
|
+
"test": "vitest run",
|
|
13
|
+
"test:watch": "vitest"
|
|
14
|
+
},
|
|
15
|
+
"keywords": [
|
|
16
|
+
"delta",
|
|
17
|
+
"open",
|
|
18
|
+
"protocol",
|
|
19
|
+
"diff",
|
|
20
|
+
"delta-patching"
|
|
21
|
+
],
|
|
22
|
+
"author": "Rubén Cruz contact@raccreativegames.com",
|
|
23
|
+
"license": "MIT",
|
|
24
|
+
"dependencies": {
|
|
25
|
+
"hash-wasm": "^4.12.0"
|
|
26
|
+
},
|
|
27
|
+
"peerDependencies": {
|
|
28
|
+
"@aws-sdk/client-s3": "^3.908.0",
|
|
29
|
+
"@azure/identity": "^4.13.0",
|
|
30
|
+
"@azure/storage-blob": "^12.28.0",
|
|
31
|
+
"@google-cloud/storage": "^7.17.2",
|
|
32
|
+
"ssh2": "^1.17.0"
|
|
33
|
+
},
|
|
34
|
+
"devDependencies": {
|
|
35
|
+
"@types/ssh2": "^1.15.5",
|
|
36
|
+
"@vitest/coverage-v8": "^3.2.4",
|
|
37
|
+
"eslint": "^9.37.0",
|
|
38
|
+
"prettier": "^3.6.2",
|
|
39
|
+
"ts-node": "^10.9.2",
|
|
40
|
+
"typescript": "^5.9.3",
|
|
41
|
+
"vitest": "^3.2.4"
|
|
42
|
+
}
|
|
43
|
+
}
|