@twin.org/synchronised-storage-service 0.0.1-next.8 → 0.0.3-next.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/es/data/verifiableStorageKeys.json +5 -0
- package/dist/es/entities/syncSnapshotEntry.js +93 -0
- package/dist/es/entities/syncSnapshotEntry.js.map +1 -0
- package/dist/es/helpers/blobStorageHelper.js +185 -0
- package/dist/es/helpers/blobStorageHelper.js.map +1 -0
- package/dist/es/helpers/changeSetHelper.js +215 -0
- package/dist/es/helpers/changeSetHelper.js.map +1 -0
- package/dist/es/helpers/localSyncStateHelper.js +384 -0
- package/dist/es/helpers/localSyncStateHelper.js.map +1 -0
- package/dist/es/helpers/remoteSyncStateHelper.js +560 -0
- package/dist/es/helpers/remoteSyncStateHelper.js.map +1 -0
- package/dist/es/helpers/versions.js +6 -0
- package/dist/es/helpers/versions.js.map +1 -0
- package/dist/es/index.js +13 -0
- package/dist/es/index.js.map +1 -0
- package/dist/es/models/ISyncPointerStore.js +4 -0
- package/dist/es/models/ISyncPointerStore.js.map +1 -0
- package/dist/es/models/ISyncSnapshot.js +4 -0
- package/dist/es/models/ISyncSnapshot.js.map +1 -0
- package/dist/es/models/ISyncState.js +2 -0
- package/dist/es/models/ISyncState.js.map +1 -0
- package/dist/es/models/ISynchronisedStorageServiceConfig.js +4 -0
- package/dist/es/models/ISynchronisedStorageServiceConfig.js.map +1 -0
- package/dist/es/models/ISynchronisedStorageServiceConstructorOptions.js +2 -0
- package/dist/es/models/ISynchronisedStorageServiceConstructorOptions.js.map +1 -0
- package/dist/es/restEntryPoints.js +10 -0
- package/dist/es/restEntryPoints.js.map +1 -0
- package/dist/es/schema.js +11 -0
- package/dist/es/schema.js.map +1 -0
- package/dist/es/synchronisedStorageRoutes.js +153 -0
- package/dist/es/synchronisedStorageRoutes.js.map +1 -0
- package/dist/es/synchronisedStorageService.js +554 -0
- package/dist/es/synchronisedStorageService.js.map +1 -0
- package/dist/types/entities/syncSnapshotEntry.d.ts +3 -3
- package/dist/types/helpers/blobStorageHelper.d.ts +3 -3
- package/dist/types/helpers/changeSetHelper.d.ts +16 -32
- package/dist/types/helpers/localSyncStateHelper.d.ts +11 -11
- package/dist/types/helpers/remoteSyncStateHelper.d.ts +18 -14
- package/dist/types/index.d.ts +10 -10
- package/dist/types/models/ISyncState.d.ts +1 -1
- package/dist/types/models/ISynchronisedStorageServiceConfig.d.ts +3 -8
- package/dist/types/models/ISynchronisedStorageServiceConstructorOptions.d.ts +7 -6
- package/dist/types/synchronisedStorageRoutes.d.ts +1 -1
- package/dist/types/synchronisedStorageService.d.ts +18 -21
- package/docs/architecture.md +168 -12
- package/docs/changelog.md +149 -0
- package/docs/open-api/spec.json +62 -57
- package/docs/reference/classes/SyncSnapshotEntry.md +4 -10
- package/docs/reference/classes/SynchronisedStorageService.md +38 -50
- package/docs/reference/interfaces/ISynchronisedStorageServiceConfig.md +3 -17
- package/docs/reference/interfaces/ISynchronisedStorageServiceConstructorOptions.md +9 -8
- package/locales/en.json +11 -16
- package/package.json +26 -9
- package/dist/cjs/index.cjs +0 -2233
- package/dist/esm/index.mjs +0 -2225
package/dist/cjs/index.cjs
DELETED
|
@@ -1,2233 +0,0 @@
|
|
|
1
|
-
'use strict';
|
|
2
|
-
|
|
3
|
-
var entity = require('@twin.org/entity');
|
|
4
|
-
var core = require('@twin.org/core');
|
|
5
|
-
var web = require('@twin.org/web');
|
|
6
|
-
var blobStorageModels = require('@twin.org/blob-storage-models');
|
|
7
|
-
var entityStorageModels = require('@twin.org/entity-storage-models');
|
|
8
|
-
var identityModels = require('@twin.org/identity-models');
|
|
9
|
-
var standardsW3cDid = require('@twin.org/standards-w3c-did');
|
|
10
|
-
var synchronisedStorageModels = require('@twin.org/synchronised-storage-models');
|
|
11
|
-
var vaultModels = require('@twin.org/vault-models');
|
|
12
|
-
var verifiableStorageModels = require('@twin.org/verifiable-storage-models');
|
|
13
|
-
var crypto = require('@twin.org/crypto');
|
|
14
|
-
|
|
15
|
-
// Copyright 2024 IOTA Stiftung.
|
|
16
|
-
// SPDX-License-Identifier: Apache-2.0.
|
|
17
|
-
/**
|
|
18
|
-
* Class representing an entry for the sync snapshot.
|
|
19
|
-
*/
|
|
20
|
-
exports.SyncSnapshotEntry = class SyncSnapshotEntry {
|
|
21
|
-
/**
|
|
22
|
-
* The id for the snapshot.
|
|
23
|
-
*/
|
|
24
|
-
id;
|
|
25
|
-
/**
|
|
26
|
-
* The version for the snapshot.
|
|
27
|
-
*/
|
|
28
|
-
version;
|
|
29
|
-
/**
|
|
30
|
-
* The storage key for the snapshot i.e. which entity is being synchronized.
|
|
31
|
-
*/
|
|
32
|
-
storageKey;
|
|
33
|
-
/**
|
|
34
|
-
* The date the snapshot was created.
|
|
35
|
-
*/
|
|
36
|
-
dateCreated;
|
|
37
|
-
/**
|
|
38
|
-
* The date the snapshot was last modified.
|
|
39
|
-
*/
|
|
40
|
-
dateModified;
|
|
41
|
-
/**
|
|
42
|
-
* The flag to determine if this is the snapshot is the local one containing changes for this node.
|
|
43
|
-
*/
|
|
44
|
-
isLocal;
|
|
45
|
-
/**
|
|
46
|
-
* The flag to determine if this is a consolidated snapshot.
|
|
47
|
-
*/
|
|
48
|
-
isConsolidated;
|
|
49
|
-
/**
|
|
50
|
-
* The epoch for the changeset.
|
|
51
|
-
*/
|
|
52
|
-
epoch;
|
|
53
|
-
/**
|
|
54
|
-
* The ids of the storage for the change sets in the snapshot, if this is not a local snapshot.
|
|
55
|
-
*/
|
|
56
|
-
changeSetStorageIds;
|
|
57
|
-
/**
|
|
58
|
-
* The changes that were made in this snapshot, if this is a local snapshot.
|
|
59
|
-
*/
|
|
60
|
-
changes;
|
|
61
|
-
};
|
|
62
|
-
__decorate([
|
|
63
|
-
entity.property({ type: "string", isPrimary: true }),
|
|
64
|
-
__metadata("design:type", String)
|
|
65
|
-
], exports.SyncSnapshotEntry.prototype, "id", void 0);
|
|
66
|
-
__decorate([
|
|
67
|
-
entity.property({ type: "string" }),
|
|
68
|
-
__metadata("design:type", String)
|
|
69
|
-
], exports.SyncSnapshotEntry.prototype, "version", void 0);
|
|
70
|
-
__decorate([
|
|
71
|
-
entity.property({ type: "string", isSecondary: true }),
|
|
72
|
-
__metadata("design:type", String)
|
|
73
|
-
], exports.SyncSnapshotEntry.prototype, "storageKey", void 0);
|
|
74
|
-
__decorate([
|
|
75
|
-
entity.property({ type: "string" }),
|
|
76
|
-
__metadata("design:type", String)
|
|
77
|
-
], exports.SyncSnapshotEntry.prototype, "dateCreated", void 0);
|
|
78
|
-
__decorate([
|
|
79
|
-
entity.property({ type: "string" }),
|
|
80
|
-
__metadata("design:type", String)
|
|
81
|
-
], exports.SyncSnapshotEntry.prototype, "dateModified", void 0);
|
|
82
|
-
__decorate([
|
|
83
|
-
entity.property({ type: "boolean" }),
|
|
84
|
-
__metadata("design:type", Boolean)
|
|
85
|
-
], exports.SyncSnapshotEntry.prototype, "isLocal", void 0);
|
|
86
|
-
__decorate([
|
|
87
|
-
entity.property({ type: "boolean" }),
|
|
88
|
-
__metadata("design:type", Boolean)
|
|
89
|
-
], exports.SyncSnapshotEntry.prototype, "isConsolidated", void 0);
|
|
90
|
-
__decorate([
|
|
91
|
-
entity.property({ type: "number" }),
|
|
92
|
-
__metadata("design:type", Number)
|
|
93
|
-
], exports.SyncSnapshotEntry.prototype, "epoch", void 0);
|
|
94
|
-
__decorate([
|
|
95
|
-
entity.property({ type: "array", itemType: "string", optional: true }),
|
|
96
|
-
__metadata("design:type", Array)
|
|
97
|
-
], exports.SyncSnapshotEntry.prototype, "changeSetStorageIds", void 0);
|
|
98
|
-
__decorate([
|
|
99
|
-
entity.property({ type: "array", itemType: "object", optional: true }),
|
|
100
|
-
__metadata("design:type", Array)
|
|
101
|
-
], exports.SyncSnapshotEntry.prototype, "changes", void 0);
|
|
102
|
-
exports.SyncSnapshotEntry = __decorate([
|
|
103
|
-
entity.entity()
|
|
104
|
-
], exports.SyncSnapshotEntry);
|
|
105
|
-
|
|
106
|
-
/**
|
|
107
|
-
* The source used when communicating about these routes.
|
|
108
|
-
*/
|
|
109
|
-
const ROUTES_SOURCE = "synchronisedStorageRoutes";
|
|
110
|
-
/**
|
|
111
|
-
* The tag to associate with the routes.
|
|
112
|
-
*/
|
|
113
|
-
const tagsSynchronisedStorage = [
|
|
114
|
-
{
|
|
115
|
-
name: "Synchronised Storage",
|
|
116
|
-
description: "Endpoints which are modelled to access a synchronised storage contract."
|
|
117
|
-
}
|
|
118
|
-
];
|
|
119
|
-
/**
|
|
120
|
-
* The REST routes for synchronised storage.
|
|
121
|
-
* @param baseRouteName Prefix to prepend to the paths.
|
|
122
|
-
* @param componentName The name of the component to use in the routes stored in the ComponentFactory.
|
|
123
|
-
* @returns The generated routes.
|
|
124
|
-
*/
|
|
125
|
-
function generateRestRoutesSynchronisedStorage(baseRouteName, componentName) {
|
|
126
|
-
const syncChangeSetRoute = {
|
|
127
|
-
operationId: "synchronisedStorageSyncChangeSetRequest",
|
|
128
|
-
summary: "Request that the node perform a sync request for a changeset.",
|
|
129
|
-
tag: tagsSynchronisedStorage[0].name,
|
|
130
|
-
method: "POST",
|
|
131
|
-
path: `${baseRouteName}/sync-changeset`,
|
|
132
|
-
handler: async (httpRequestContext, request) => synchronisedStorageSyncChangeSetRequest(httpRequestContext, componentName, request),
|
|
133
|
-
requestType: {
|
|
134
|
-
type: "ISyncChangeSetRequest",
|
|
135
|
-
examples: [
|
|
136
|
-
{
|
|
137
|
-
id: "synchronisedStorageSyncChangeSetRequestExample",
|
|
138
|
-
request: {
|
|
139
|
-
body: {
|
|
140
|
-
id: "0909090909090909090909090909090909090909090909090909090909090909",
|
|
141
|
-
dateCreated: "2025-05-29T01:00:00.000Z",
|
|
142
|
-
dateModified: "2025-05-29T01:00:00.000Z",
|
|
143
|
-
nodeIdentity: "did:entity-storage:0xd2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2",
|
|
144
|
-
changes: [
|
|
145
|
-
{
|
|
146
|
-
entity: {
|
|
147
|
-
dateModified: "2025-01-01T00:00:00.000Z"
|
|
148
|
-
},
|
|
149
|
-
id: "test-id-1",
|
|
150
|
-
operation: "set"
|
|
151
|
-
}
|
|
152
|
-
],
|
|
153
|
-
proof: {
|
|
154
|
-
"@context": "https://www.w3.org/ns/credentials/v2",
|
|
155
|
-
created: "2025-05-29T01:00:00.000Z",
|
|
156
|
-
cryptosuite: "eddsa-jcs-2022",
|
|
157
|
-
proofPurpose: "assertionMethod",
|
|
158
|
-
proofValue: "z5efBErQs3YBLZoH7jgKMQaRc9YjAxA5XSYKmW3FmTBDw9WionT2NS2x1SMvcRyBvw53cSSoaCT1xQH9tkWngGCX3",
|
|
159
|
-
type: "DataIntegrityProof",
|
|
160
|
-
verificationMethod: "did:entity-storage:0xd0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0#synchronised-storage-assertion"
|
|
161
|
-
},
|
|
162
|
-
storageKey: "test-type"
|
|
163
|
-
}
|
|
164
|
-
}
|
|
165
|
-
}
|
|
166
|
-
]
|
|
167
|
-
},
|
|
168
|
-
responseType: [
|
|
169
|
-
{
|
|
170
|
-
type: "INoContentResponse"
|
|
171
|
-
}
|
|
172
|
-
],
|
|
173
|
-
// Authentication is provided by the proof in the request body.
|
|
174
|
-
skipAuth: true
|
|
175
|
-
};
|
|
176
|
-
const getDecryptionKeyRoute = {
|
|
177
|
-
operationId: "synchronisedStorageGetDecryptionKeyRequest",
|
|
178
|
-
summary: "Request the decryption key.",
|
|
179
|
-
tag: tagsSynchronisedStorage[0].name,
|
|
180
|
-
method: "POST",
|
|
181
|
-
path: `${baseRouteName}/decryption-key`,
|
|
182
|
-
handler: async (httpRequestContext, request) => synchronisedStorageGetDecryptionKeyRequest(httpRequestContext, componentName, request),
|
|
183
|
-
requestType: {
|
|
184
|
-
type: "ISyncChangeSetRequest",
|
|
185
|
-
examples: [
|
|
186
|
-
{
|
|
187
|
-
id: "synchronisedStorageSyncGetDecryptionKeyRequestExample",
|
|
188
|
-
request: {
|
|
189
|
-
body: {
|
|
190
|
-
nodeIdentity: "did:entity-storage:0xd2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2",
|
|
191
|
-
proof: {
|
|
192
|
-
"@context": "https://www.w3.org/ns/credentials/v2",
|
|
193
|
-
created: "2025-05-29T01:00:00.000Z",
|
|
194
|
-
cryptosuite: "eddsa-jcs-2022",
|
|
195
|
-
proofPurpose: "assertionMethod",
|
|
196
|
-
proofValue: "z5efBErQs3YBLZoH7jgKMQaRc9YjAxA5XSYKmW3FmTBDw9WionT2NS2x1SMvcRyBvw53cSSoaCT1xQH9tkWngGCX3",
|
|
197
|
-
type: "DataIntegrityProof",
|
|
198
|
-
verificationMethod: "did:entity-storage:0xd0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0#synchronised-storage-assertion"
|
|
199
|
-
}
|
|
200
|
-
}
|
|
201
|
-
}
|
|
202
|
-
}
|
|
203
|
-
]
|
|
204
|
-
},
|
|
205
|
-
responseType: [
|
|
206
|
-
{
|
|
207
|
-
type: "ISyncDecryptionKeyResponse",
|
|
208
|
-
examples: [
|
|
209
|
-
{
|
|
210
|
-
id: "synchronisedStorageSyncGetDecryptionKeyResponseExample",
|
|
211
|
-
response: {
|
|
212
|
-
body: {
|
|
213
|
-
decryptionKey: "z5efBErQs3YBLZoH7jgKMQaRc9YjAxA5XSYKmW3FmTBDw9WionT2NS2x1SMvcRyBvw53cSSoaCT1xQH9tkWngGCX3"
|
|
214
|
-
}
|
|
215
|
-
}
|
|
216
|
-
}
|
|
217
|
-
]
|
|
218
|
-
},
|
|
219
|
-
{
|
|
220
|
-
type: "IUnauthorizedResponse"
|
|
221
|
-
}
|
|
222
|
-
],
|
|
223
|
-
// Authentication is provided by the proof in the request body.
|
|
224
|
-
skipAuth: true
|
|
225
|
-
};
|
|
226
|
-
return [syncChangeSetRoute, getDecryptionKeyRoute];
|
|
227
|
-
}
|
|
228
|
-
/**
|
|
229
|
-
* Perform the sync change set operation.
|
|
230
|
-
* @param httpRequestContext The request context for the API.
|
|
231
|
-
* @param componentName The name of the component to use in the routes.
|
|
232
|
-
* @param request The request.
|
|
233
|
-
* @returns The response object with additional http response properties.
|
|
234
|
-
*/
|
|
235
|
-
async function synchronisedStorageSyncChangeSetRequest(httpRequestContext, componentName, request) {
|
|
236
|
-
core.Guards.object(ROUTES_SOURCE, "request", request);
|
|
237
|
-
core.Guards.object(ROUTES_SOURCE, "request.body", request.body);
|
|
238
|
-
const component = core.ComponentFactory.get(componentName);
|
|
239
|
-
await component.syncChangeSet(request.body);
|
|
240
|
-
return {
|
|
241
|
-
statusCode: web.HttpStatusCode.noContent
|
|
242
|
-
};
|
|
243
|
-
}
|
|
244
|
-
/**
|
|
245
|
-
* Request the decryption key.
|
|
246
|
-
* @param httpRequestContext The request context for the API.
|
|
247
|
-
* @param componentName The name of the component to use in the routes.
|
|
248
|
-
* @param request The request.
|
|
249
|
-
* @returns The response object with additional http response properties.
|
|
250
|
-
*/
|
|
251
|
-
async function synchronisedStorageGetDecryptionKeyRequest(httpRequestContext, componentName, request) {
|
|
252
|
-
core.Guards.object(ROUTES_SOURCE, "request", request);
|
|
253
|
-
core.Guards.object(ROUTES_SOURCE, "request.body", request.body);
|
|
254
|
-
const component = core.ComponentFactory.get(componentName);
|
|
255
|
-
const key = await component.getDecryptionKey(request.body.nodeIdentity, request.body.proof);
|
|
256
|
-
return {
|
|
257
|
-
body: {
|
|
258
|
-
decryptionKey: key
|
|
259
|
-
}
|
|
260
|
-
};
|
|
261
|
-
}
|
|
262
|
-
|
|
263
|
-
const restEntryPoints = [
|
|
264
|
-
{
|
|
265
|
-
name: "synchronised-storage",
|
|
266
|
-
defaultBaseRoute: "synchronised-storage",
|
|
267
|
-
tags: tagsSynchronisedStorage,
|
|
268
|
-
generateRoutes: generateRestRoutesSynchronisedStorage
|
|
269
|
-
}
|
|
270
|
-
];
|
|
271
|
-
|
|
272
|
-
// Copyright 2024 IOTA Stiftung.
|
|
273
|
-
// SPDX-License-Identifier: Apache-2.0.
|
|
274
|
-
/**
|
|
275
|
-
* Initialize the schema for the synchronised service.
|
|
276
|
-
*/
|
|
277
|
-
function initSchema() {
|
|
278
|
-
entity.EntitySchemaFactory.register("SyncSnapshotEntry", () => entity.EntitySchemaHelper.getSchema(exports.SyncSnapshotEntry));
|
|
279
|
-
}
|
|
280
|
-
|
|
281
|
-
var mainnet = "";
|
|
282
|
-
var testnet = "";
|
|
283
|
-
var devnet = "";
|
|
284
|
-
var verifiableStorageKeys = {
|
|
285
|
-
mainnet: mainnet,
|
|
286
|
-
testnet: testnet,
|
|
287
|
-
devnet: devnet
|
|
288
|
-
};
|
|
289
|
-
|
|
290
|
-
/**
|
|
291
|
-
* Class for performing blob storage operations.
|
|
292
|
-
*/
|
|
293
|
-
class BlobStorageHelper {
|
|
294
|
-
/**
|
|
295
|
-
* Runtime name for the class.
|
|
296
|
-
*/
|
|
297
|
-
CLASS_NAME = "BlobStorageHelper";
|
|
298
|
-
/**
|
|
299
|
-
* The logging component to use for logging.
|
|
300
|
-
* @internal
|
|
301
|
-
*/
|
|
302
|
-
_loggingComponent;
|
|
303
|
-
/**
|
|
304
|
-
* The vault connector.
|
|
305
|
-
* @internal
|
|
306
|
-
*/
|
|
307
|
-
_vaultConnector;
|
|
308
|
-
/**
|
|
309
|
-
* The blob storage connector to use.
|
|
310
|
-
* @internal
|
|
311
|
-
*/
|
|
312
|
-
_blobStorageConnector;
|
|
313
|
-
/**
|
|
314
|
-
* The id of the vault key to use for encrypting/decrypting blobs.
|
|
315
|
-
* @internal
|
|
316
|
-
*/
|
|
317
|
-
_blobStorageEncryptionKeyId;
|
|
318
|
-
/**
|
|
319
|
-
* Is this a trusted node.
|
|
320
|
-
* @internal
|
|
321
|
-
*/
|
|
322
|
-
_isTrustedNode;
|
|
323
|
-
/**
|
|
324
|
-
* Create a new instance of BlobStorageHelper.
|
|
325
|
-
* @param loggingComponent The logging connector to use for logging.
|
|
326
|
-
* @param vaultConnector The vault connector to use for for the encryption key.
|
|
327
|
-
* @param blobStorageConnector The blob storage component to use.
|
|
328
|
-
* @param blobStorageEncryptionKeyId The id of the vault key to use for encrypting/decrypting blobs.
|
|
329
|
-
* @param isTrustedNode Is this a trusted node.
|
|
330
|
-
*/
|
|
331
|
-
constructor(loggingComponent, vaultConnector, blobStorageConnector, blobStorageEncryptionKeyId, isTrustedNode) {
|
|
332
|
-
this._loggingComponent = loggingComponent;
|
|
333
|
-
this._vaultConnector = vaultConnector;
|
|
334
|
-
this._blobStorageConnector = blobStorageConnector;
|
|
335
|
-
this._blobStorageEncryptionKeyId = blobStorageEncryptionKeyId;
|
|
336
|
-
this._isTrustedNode = isTrustedNode;
|
|
337
|
-
}
|
|
338
|
-
/**
|
|
339
|
-
* Load a blob from storage.
|
|
340
|
-
* @param blobId The id of the blob to apply.
|
|
341
|
-
* @returns The blob.
|
|
342
|
-
*/
|
|
343
|
-
async loadBlob(blobId) {
|
|
344
|
-
await this._loggingComponent?.log({
|
|
345
|
-
level: "info",
|
|
346
|
-
source: this.CLASS_NAME,
|
|
347
|
-
message: "loadBlob",
|
|
348
|
-
data: {
|
|
349
|
-
blobId
|
|
350
|
-
}
|
|
351
|
-
});
|
|
352
|
-
try {
|
|
353
|
-
const encryptedBlob = await this._blobStorageConnector.get(blobId);
|
|
354
|
-
if (core.Is.uint8Array(encryptedBlob)) {
|
|
355
|
-
let compressedBlob;
|
|
356
|
-
// If this is a trusted node, we can decrypt the blob using the vault
|
|
357
|
-
if (this._isTrustedNode) {
|
|
358
|
-
compressedBlob = await this._vaultConnector.decrypt(this._blobStorageEncryptionKeyId, vaultModels.VaultEncryptionType.Rsa2048, encryptedBlob);
|
|
359
|
-
}
|
|
360
|
-
else {
|
|
361
|
-
// Otherwise we need the public key stored as a secret in the vault
|
|
362
|
-
const key = await this._vaultConnector.getSecret(this._blobStorageEncryptionKeyId);
|
|
363
|
-
const rsa = new crypto.RSA(core.Converter.base64ToBytes(key));
|
|
364
|
-
compressedBlob = rsa.decrypt(encryptedBlob);
|
|
365
|
-
}
|
|
366
|
-
const decompressedBlob = await core.Compression.decompress(compressedBlob, core.CompressionType.Gzip);
|
|
367
|
-
await this._loggingComponent?.log({
|
|
368
|
-
level: "info",
|
|
369
|
-
source: this.CLASS_NAME,
|
|
370
|
-
message: "loadedBlob",
|
|
371
|
-
data: {
|
|
372
|
-
blobId
|
|
373
|
-
}
|
|
374
|
-
});
|
|
375
|
-
return core.ObjectHelper.fromBytes(decompressedBlob);
|
|
376
|
-
}
|
|
377
|
-
}
|
|
378
|
-
catch (error) {
|
|
379
|
-
await this._loggingComponent?.log({
|
|
380
|
-
level: "error",
|
|
381
|
-
source: this.CLASS_NAME,
|
|
382
|
-
message: "loadBlobFailed",
|
|
383
|
-
data: {
|
|
384
|
-
blobId
|
|
385
|
-
},
|
|
386
|
-
error: core.BaseError.fromError(error)
|
|
387
|
-
});
|
|
388
|
-
}
|
|
389
|
-
await this._loggingComponent?.log({
|
|
390
|
-
level: "info",
|
|
391
|
-
source: this.CLASS_NAME,
|
|
392
|
-
message: "loadBlobEmpty",
|
|
393
|
-
data: {
|
|
394
|
-
blobId
|
|
395
|
-
}
|
|
396
|
-
});
|
|
397
|
-
}
|
|
398
|
-
/**
|
|
399
|
-
* Save a blob.
|
|
400
|
-
* @param blob The blob to save.
|
|
401
|
-
* @returns The id of the blob.
|
|
402
|
-
*/
|
|
403
|
-
async saveBlob(blob) {
|
|
404
|
-
await this._loggingComponent?.log({
|
|
405
|
-
level: "info",
|
|
406
|
-
source: this.CLASS_NAME,
|
|
407
|
-
message: "saveBlob"
|
|
408
|
-
});
|
|
409
|
-
if (!this._isTrustedNode) {
|
|
410
|
-
throw new core.GeneralError(this.CLASS_NAME, "notTrustedNode");
|
|
411
|
-
}
|
|
412
|
-
const compressedBlob = await core.Compression.compress(core.ObjectHelper.toBytes(blob), core.CompressionType.Gzip);
|
|
413
|
-
const encryptedBlob = await this._vaultConnector.encrypt(this._blobStorageEncryptionKeyId, vaultModels.VaultEncryptionType.Rsa2048, compressedBlob);
|
|
414
|
-
try {
|
|
415
|
-
const blobId = await this._blobStorageConnector.set(encryptedBlob);
|
|
416
|
-
await this._loggingComponent?.log({
|
|
417
|
-
level: "info",
|
|
418
|
-
source: this.CLASS_NAME,
|
|
419
|
-
message: "savedBlob",
|
|
420
|
-
data: {
|
|
421
|
-
blobId
|
|
422
|
-
}
|
|
423
|
-
});
|
|
424
|
-
return blobId;
|
|
425
|
-
}
|
|
426
|
-
catch (error) {
|
|
427
|
-
await this._loggingComponent?.log({
|
|
428
|
-
level: "error",
|
|
429
|
-
source: this.CLASS_NAME,
|
|
430
|
-
message: "saveBlobFailed",
|
|
431
|
-
error: core.BaseError.fromError(error)
|
|
432
|
-
});
|
|
433
|
-
throw error;
|
|
434
|
-
}
|
|
435
|
-
}
|
|
436
|
-
/**
|
|
437
|
-
* Remove a blob from storage.
|
|
438
|
-
* @param blobId The id of the blob to remove.
|
|
439
|
-
* @returns Nothing.
|
|
440
|
-
*/
|
|
441
|
-
async removeBlob(blobId) {
|
|
442
|
-
await this._loggingComponent?.log({
|
|
443
|
-
level: "info",
|
|
444
|
-
source: this.CLASS_NAME,
|
|
445
|
-
message: "removeBlob",
|
|
446
|
-
data: {
|
|
447
|
-
blobId
|
|
448
|
-
}
|
|
449
|
-
});
|
|
450
|
-
try {
|
|
451
|
-
await this._blobStorageConnector.remove(blobId);
|
|
452
|
-
await this._loggingComponent?.log({
|
|
453
|
-
level: "info",
|
|
454
|
-
source: this.CLASS_NAME,
|
|
455
|
-
message: "removedBlob",
|
|
456
|
-
data: {
|
|
457
|
-
blobId
|
|
458
|
-
}
|
|
459
|
-
});
|
|
460
|
-
}
|
|
461
|
-
catch (error) {
|
|
462
|
-
await this._loggingComponent?.log({
|
|
463
|
-
level: "error",
|
|
464
|
-
source: this.CLASS_NAME,
|
|
465
|
-
message: "removeBlobFailed",
|
|
466
|
-
data: {
|
|
467
|
-
blobId
|
|
468
|
-
},
|
|
469
|
-
error: core.BaseError.fromError(error)
|
|
470
|
-
});
|
|
471
|
-
}
|
|
472
|
-
await this._loggingComponent?.log({
|
|
473
|
-
level: "info",
|
|
474
|
-
source: this.CLASS_NAME,
|
|
475
|
-
message: "removeBlobEmpty",
|
|
476
|
-
data: {
|
|
477
|
-
blobId
|
|
478
|
-
}
|
|
479
|
-
});
|
|
480
|
-
}
|
|
481
|
-
}
|
|
482
|
-
|
|
483
|
-
// Copyright 2024 IOTA Stiftung.
|
|
484
|
-
// SPDX-License-Identifier: Apache-2.0.
|
|
485
|
-
/**
|
|
486
|
-
* Class for performing change set operations.
|
|
487
|
-
*/
|
|
488
|
-
class ChangeSetHelper {
|
|
489
|
-
/**
|
|
490
|
-
* Runtime name for the class.
|
|
491
|
-
*/
|
|
492
|
-
CLASS_NAME = "ChangeSetHelper";
|
|
493
|
-
/**
|
|
494
|
-
* The logging component to use for logging.
|
|
495
|
-
* @internal
|
|
496
|
-
*/
|
|
497
|
-
_loggingComponent;
|
|
498
|
-
/**
|
|
499
|
-
* The event bus component.
|
|
500
|
-
* @internal
|
|
501
|
-
*/
|
|
502
|
-
_eventBusComponent;
|
|
503
|
-
/**
|
|
504
|
-
* The blob storage helper to use for remote sync states.
|
|
505
|
-
* @internal
|
|
506
|
-
*/
|
|
507
|
-
_blobStorageHelper;
|
|
508
|
-
/**
|
|
509
|
-
* The identity connector to use for signing/verifying changesets.
|
|
510
|
-
* @internal
|
|
511
|
-
*/
|
|
512
|
-
_identityConnector;
|
|
513
|
-
/**
|
|
514
|
-
* The id of the identity method to use when signing/verifying changesets.
|
|
515
|
-
* @internal
|
|
516
|
-
*/
|
|
517
|
-
_decentralisedStorageMethodId;
|
|
518
|
-
/**
|
|
519
|
-
* The identity of the node that is performing the update.
|
|
520
|
-
* @internal
|
|
521
|
-
*/
|
|
522
|
-
_nodeIdentity;
|
|
523
|
-
/**
|
|
524
|
-
* Create a new instance of ChangeSetHelper.
|
|
525
|
-
* @param loggingComponent The logging connector to use for logging.
|
|
526
|
-
* @param eventBusComponent The event bus component to use for events.
|
|
527
|
-
* @param identityConnector The identity connector to use for signing/verifying changesets.
|
|
528
|
-
* @param blobStorageHelper The blob storage component to use for remote sync states.
|
|
529
|
-
* @param decentralisedStorageMethodId The id of the identity method to use when signing/verifying changesets.
|
|
530
|
-
*/
|
|
531
|
-
constructor(loggingComponent, eventBusComponent, identityConnector, blobStorageHelper, decentralisedStorageMethodId) {
|
|
532
|
-
this._loggingComponent = loggingComponent;
|
|
533
|
-
this._eventBusComponent = eventBusComponent;
|
|
534
|
-
this._decentralisedStorageMethodId = decentralisedStorageMethodId;
|
|
535
|
-
this._blobStorageHelper = blobStorageHelper;
|
|
536
|
-
this._identityConnector = identityConnector;
|
|
537
|
-
}
|
|
538
|
-
/**
|
|
539
|
-
* Set the node identity to use for signing changesets.
|
|
540
|
-
* @param nodeIdentity The identity of the node that is performing the update.
|
|
541
|
-
*/
|
|
542
|
-
setNodeIdentity(nodeIdentity) {
|
|
543
|
-
this._nodeIdentity = nodeIdentity;
|
|
544
|
-
}
|
|
545
|
-
/**
|
|
546
|
-
* Get and verify a changeset.
|
|
547
|
-
* @param changeSetStorageId The id of the sync changeset to apply.
|
|
548
|
-
* @returns The changeset if it was verified.
|
|
549
|
-
*/
|
|
550
|
-
async getAndVerifyChangeset(changeSetStorageId) {
|
|
551
|
-
await this._loggingComponent?.log({
|
|
552
|
-
level: "info",
|
|
553
|
-
source: this.CLASS_NAME,
|
|
554
|
-
message: "getChangeSet",
|
|
555
|
-
data: {
|
|
556
|
-
changeSetStorageId
|
|
557
|
-
}
|
|
558
|
-
});
|
|
559
|
-
try {
|
|
560
|
-
const syncChangeSet = await this._blobStorageHelper.loadBlob(changeSetStorageId);
|
|
561
|
-
if (core.Is.object(syncChangeSet)) {
|
|
562
|
-
const verified = await this.verifyChangesetProof(syncChangeSet);
|
|
563
|
-
return verified ? syncChangeSet : undefined;
|
|
564
|
-
}
|
|
565
|
-
}
|
|
566
|
-
catch (error) {
|
|
567
|
-
await this._loggingComponent?.log({
|
|
568
|
-
level: "warn",
|
|
569
|
-
source: this.CLASS_NAME,
|
|
570
|
-
message: "getChangeSetError",
|
|
571
|
-
data: {
|
|
572
|
-
changeSetStorageId
|
|
573
|
-
},
|
|
574
|
-
error: core.BaseError.fromError(error)
|
|
575
|
-
});
|
|
576
|
-
}
|
|
577
|
-
await this._loggingComponent?.log({
|
|
578
|
-
level: "info",
|
|
579
|
-
source: this.CLASS_NAME,
|
|
580
|
-
message: "getChangeSetEmpty",
|
|
581
|
-
data: {
|
|
582
|
-
changeSetStorageId
|
|
583
|
-
}
|
|
584
|
-
});
|
|
585
|
-
}
|
|
586
|
-
/**
|
|
587
|
-
* Apply a sync changeset.
|
|
588
|
-
* @param changeSetStorageId The id of the sync changeset to apply.
|
|
589
|
-
* @returns The changeset if it existed.
|
|
590
|
-
*/
|
|
591
|
-
async getAndApplyChangeset(changeSetStorageId) {
|
|
592
|
-
const syncChangeset = await this.getAndVerifyChangeset(changeSetStorageId);
|
|
593
|
-
// Only apply changesets from other nodes, we don't want to overwrite
|
|
594
|
-
// any changes we have made to local entity storage
|
|
595
|
-
if (!core.Is.empty(syncChangeset) && syncChangeset.nodeIdentity !== this._nodeIdentity) {
|
|
596
|
-
await this.applyChangeset(syncChangeset);
|
|
597
|
-
}
|
|
598
|
-
return syncChangeset;
|
|
599
|
-
}
|
|
600
|
-
/**
|
|
601
|
-
* Apply a sync changeset.
|
|
602
|
-
* @param syncChangeset The sync changeset to apply.
|
|
603
|
-
* @returns Nothing.
|
|
604
|
-
*/
|
|
605
|
-
async applyChangeset(syncChangeset) {
|
|
606
|
-
if (core.Is.arrayValue(syncChangeset.changes)) {
|
|
607
|
-
for (const change of syncChangeset.changes) {
|
|
608
|
-
await this._loggingComponent?.log({
|
|
609
|
-
level: "info",
|
|
610
|
-
source: this.CLASS_NAME,
|
|
611
|
-
message: "changeSetApplyingChange",
|
|
612
|
-
data: {
|
|
613
|
-
operation: change.operation,
|
|
614
|
-
id: change.id
|
|
615
|
-
}
|
|
616
|
-
});
|
|
617
|
-
switch (change.operation) {
|
|
618
|
-
case synchronisedStorageModels.SyncChangeOperation.Set:
|
|
619
|
-
if (!core.Is.empty(change.entity)) {
|
|
620
|
-
// The id was stripped from the entity as it is part of the operation
|
|
621
|
-
// we make sure we reinstate it in the publish
|
|
622
|
-
// Also the node identity was stripped when stored in the changeset
|
|
623
|
-
// as the changeset is signed with the node identity.
|
|
624
|
-
// so we need to restore it here.
|
|
625
|
-
await this._eventBusComponent.publish(synchronisedStorageModels.SynchronisedStorageTopics.RemoteItemSet, {
|
|
626
|
-
storageKey: syncChangeset.storageKey,
|
|
627
|
-
entity: {
|
|
628
|
-
...change.entity,
|
|
629
|
-
id: change.id,
|
|
630
|
-
nodeIdentity: syncChangeset.nodeIdentity
|
|
631
|
-
}
|
|
632
|
-
});
|
|
633
|
-
}
|
|
634
|
-
break;
|
|
635
|
-
case synchronisedStorageModels.SyncChangeOperation.Delete:
|
|
636
|
-
if (!core.Is.empty(change.id)) {
|
|
637
|
-
await this._eventBusComponent.publish(synchronisedStorageModels.SynchronisedStorageTopics.RemoteItemRemove, {
|
|
638
|
-
storageKey: syncChangeset.storageKey,
|
|
639
|
-
id: change.id,
|
|
640
|
-
nodeIdentity: syncChangeset.nodeIdentity
|
|
641
|
-
});
|
|
642
|
-
}
|
|
643
|
-
break;
|
|
644
|
-
}
|
|
645
|
-
}
|
|
646
|
-
}
|
|
647
|
-
}
|
|
648
|
-
/**
|
|
649
|
-
* Store the changeset.
|
|
650
|
-
* @param syncChangeSet The sync change set to store.
|
|
651
|
-
* @returns The id of the change set.
|
|
652
|
-
*/
|
|
653
|
-
async storeChangeSet(syncChangeSet) {
|
|
654
|
-
await this._loggingComponent?.log({
|
|
655
|
-
level: "info",
|
|
656
|
-
source: this.CLASS_NAME,
|
|
657
|
-
message: "changeSetStoring",
|
|
658
|
-
data: {
|
|
659
|
-
id: syncChangeSet.id
|
|
660
|
-
}
|
|
661
|
-
});
|
|
662
|
-
return this._blobStorageHelper.saveBlob(syncChangeSet);
|
|
663
|
-
}
|
|
664
|
-
/**
|
|
665
|
-
* Verify the proof of a sync changeset.
|
|
666
|
-
* @param syncChangeset The sync changeset to verify.
|
|
667
|
-
* @returns True if the proof is valid, false otherwise.
|
|
668
|
-
*/
|
|
669
|
-
async verifyChangesetProof(syncChangeset) {
|
|
670
|
-
if (core.Is.empty(syncChangeset.proof)) {
|
|
671
|
-
await this._loggingComponent?.log({
|
|
672
|
-
level: "info",
|
|
673
|
-
source: this.CLASS_NAME,
|
|
674
|
-
message: "verifyChangeSetProofMissing",
|
|
675
|
-
data: {
|
|
676
|
-
snapshotId: syncChangeset.id
|
|
677
|
-
}
|
|
678
|
-
});
|
|
679
|
-
return false;
|
|
680
|
-
}
|
|
681
|
-
// If the proof or verification method is missing, the proof is invalid
|
|
682
|
-
const verificationMethod = syncChangeset.proof?.verificationMethod;
|
|
683
|
-
if (!core.Is.stringValue(verificationMethod)) {
|
|
684
|
-
await this._loggingComponent?.log({
|
|
685
|
-
level: "error",
|
|
686
|
-
source: this.CLASS_NAME,
|
|
687
|
-
message: "verifyChangeSetProofMissing",
|
|
688
|
-
data: {
|
|
689
|
-
id: syncChangeset.id
|
|
690
|
-
}
|
|
691
|
-
});
|
|
692
|
-
}
|
|
693
|
-
// Parse the verification method and extract the node identity
|
|
694
|
-
// this should match the node identity of the changeset
|
|
695
|
-
// otherwise you could sign a changeset for another node
|
|
696
|
-
const changeSetNodeIdentity = identityModels.DocumentHelper.parseId(verificationMethod ?? "");
|
|
697
|
-
if (changeSetNodeIdentity.id !== syncChangeset.nodeIdentity) {
|
|
698
|
-
await this._loggingComponent?.log({
|
|
699
|
-
level: "error",
|
|
700
|
-
source: this.CLASS_NAME,
|
|
701
|
-
message: "verifyChangeSetProofNodeIdentityMismatch",
|
|
702
|
-
data: {
|
|
703
|
-
id: syncChangeset.id
|
|
704
|
-
}
|
|
705
|
-
});
|
|
706
|
-
}
|
|
707
|
-
const changeSetWithoutProof = core.ObjectHelper.clone(syncChangeset);
|
|
708
|
-
delete changeSetWithoutProof.proof;
|
|
709
|
-
const isValid = await this._identityConnector.verifyProof(changeSetWithoutProof, syncChangeset.proof);
|
|
710
|
-
if (!isValid) {
|
|
711
|
-
await this._loggingComponent?.log({
|
|
712
|
-
level: "error",
|
|
713
|
-
source: this.CLASS_NAME,
|
|
714
|
-
message: "verifyChangeSetProofInvalid",
|
|
715
|
-
data: {
|
|
716
|
-
id: syncChangeset.id
|
|
717
|
-
}
|
|
718
|
-
});
|
|
719
|
-
}
|
|
720
|
-
else {
|
|
721
|
-
await this._loggingComponent?.log({
|
|
722
|
-
level: "error",
|
|
723
|
-
source: this.CLASS_NAME,
|
|
724
|
-
message: "verifyChangeSetProofValid",
|
|
725
|
-
data: {
|
|
726
|
-
id: syncChangeset.id
|
|
727
|
-
}
|
|
728
|
-
});
|
|
729
|
-
}
|
|
730
|
-
return isValid;
|
|
731
|
-
}
|
|
732
|
-
/**
|
|
733
|
-
* Create the proof of a sync change set.
|
|
734
|
-
* @param syncChangeset The sync changeset to create the proof for.
|
|
735
|
-
* @returns The proof.
|
|
736
|
-
*/
|
|
737
|
-
async createChangeSetProof(syncChangeset) {
|
|
738
|
-
core.Guards.stringValue(this.CLASS_NAME, "nodeIdentity", this._nodeIdentity);
|
|
739
|
-
const changeSetWithoutProof = core.ObjectHelper.clone(syncChangeset);
|
|
740
|
-
delete changeSetWithoutProof.proof;
|
|
741
|
-
const proof = await this._identityConnector.createProof(this._nodeIdentity, identityModels.DocumentHelper.joinId(this._nodeIdentity, this._decentralisedStorageMethodId), standardsW3cDid.ProofTypes.DataIntegrityProof, changeSetWithoutProof);
|
|
742
|
-
await this._loggingComponent?.log({
|
|
743
|
-
level: "info",
|
|
744
|
-
source: this.CLASS_NAME,
|
|
745
|
-
message: "createdChangeSetProof",
|
|
746
|
-
data: {
|
|
747
|
-
id: syncChangeset.id,
|
|
748
|
-
...proof
|
|
749
|
-
}
|
|
750
|
-
});
|
|
751
|
-
return proof;
|
|
752
|
-
}
|
|
753
|
-
/**
|
|
754
|
-
* Copy a change set.
|
|
755
|
-
* @param syncChangeSet The sync changeset to copy.
|
|
756
|
-
* @returns The id of the updated change set.
|
|
757
|
-
*/
|
|
758
|
-
async copyChangeset(syncChangeSet) {
|
|
759
|
-
if (core.Is.stringValue(this._nodeIdentity)) {
|
|
760
|
-
const verified = await this.verifyChangesetProof(syncChangeSet);
|
|
761
|
-
if (verified) {
|
|
762
|
-
await this._loggingComponent?.log({
|
|
763
|
-
level: "info",
|
|
764
|
-
source: this.CLASS_NAME,
|
|
765
|
-
message: "copyChangeSet",
|
|
766
|
-
data: {
|
|
767
|
-
changeSetStorageId: syncChangeSet.id
|
|
768
|
-
}
|
|
769
|
-
});
|
|
770
|
-
// Allocate a new id to the changeset copy and re-create a proof using this nodes identity
|
|
771
|
-
const copy = core.ObjectHelper.clone(syncChangeSet);
|
|
772
|
-
copy.id = core.Converter.bytesToHex(core.RandomHelper.generate(32));
|
|
773
|
-
copy.proof = await this.createChangeSetProof(copy);
|
|
774
|
-
// Store the copy
|
|
775
|
-
return {
|
|
776
|
-
syncChangeSet: copy,
|
|
777
|
-
changeSetStorageId: await this.storeChangeSet(copy)
|
|
778
|
-
};
|
|
779
|
-
}
|
|
780
|
-
}
|
|
781
|
-
}
|
|
782
|
-
/**
|
|
783
|
-
* Reset the storage for a given storage key.
|
|
784
|
-
* @param storageKey The key of the storage to reset.
|
|
785
|
-
* @param resetMode The reset mode, this will use the nodeIdentity in the entities to determine which are local/remote.
|
|
786
|
-
* @returns Nothing.
|
|
787
|
-
*/
|
|
788
|
-
async reset(storageKey, resetMode) {
|
|
789
|
-
// If we are applying a consolidation we need to reset the local db
|
|
790
|
-
// but keep any entries from the local node, as they might have been updated
|
|
791
|
-
await this._loggingComponent?.log({
|
|
792
|
-
level: "info",
|
|
793
|
-
source: this.CLASS_NAME,
|
|
794
|
-
message: "storageReset",
|
|
795
|
-
data: {
|
|
796
|
-
storageKey
|
|
797
|
-
}
|
|
798
|
-
});
|
|
799
|
-
await this._eventBusComponent.publish(synchronisedStorageModels.SynchronisedStorageTopics.Reset, {
|
|
800
|
-
storageKey,
|
|
801
|
-
resetMode
|
|
802
|
-
});
|
|
803
|
-
}
|
|
804
|
-
}
|
|
805
|
-
|
|
806
|
-
// Copyright 2024 IOTA Stiftung.
|
|
807
|
-
// SPDX-License-Identifier: Apache-2.0.
|
|
808
|
-
const SYNC_STATE_VERSION = "1";
|
|
809
|
-
const SYNC_POINTER_STORE_VERSION = "1";
|
|
810
|
-
const SYNC_SNAPSHOT_VERSION = "1";
|
|
811
|
-
|
|
812
|
-
// Copyright 2024 IOTA Stiftung.
|
|
813
|
-
// SPDX-License-Identifier: Apache-2.0.
|
|
814
|
-
/**
|
|
815
|
-
* Class for performing entity storage operations in decentralised storage.
|
|
816
|
-
*/
|
|
817
|
-
class LocalSyncStateHelper {
|
|
818
|
-
/**
|
|
819
|
-
* Runtime name for the class.
|
|
820
|
-
*/
|
|
821
|
-
CLASS_NAME = "LocalSyncStateHelper";
|
|
822
|
-
/**
|
|
823
|
-
* The logging component to use for logging.
|
|
824
|
-
* @internal
|
|
825
|
-
*/
|
|
826
|
-
_loggingComponent;
|
|
827
|
-
/**
|
|
828
|
-
* The storage connector for the sync snapshot entries.
|
|
829
|
-
* @internal
|
|
830
|
-
*/
|
|
831
|
-
_snapshotEntryEntityStorage;
|
|
832
|
-
/**
|
|
833
|
-
* The change set helper to use for applying changesets.
|
|
834
|
-
* @internal
|
|
835
|
-
*/
|
|
836
|
-
_changeSetHelper;
|
|
837
|
-
/**
|
|
838
|
-
* Create a new instance of LocalSyncStateHelper.
|
|
839
|
-
* @param loggingComponent The logging connector to use for logging.
|
|
840
|
-
* @param snapshotEntryEntityStorage The storage connector for the sync snapshot entries.
|
|
841
|
-
* @param changeSetHelper The change set helper to use for applying changesets.
|
|
842
|
-
*/
|
|
843
|
-
constructor(loggingComponent, snapshotEntryEntityStorage, changeSetHelper) {
|
|
844
|
-
this._loggingComponent = loggingComponent;
|
|
845
|
-
this._snapshotEntryEntityStorage = snapshotEntryEntityStorage;
|
|
846
|
-
this._changeSetHelper = changeSetHelper;
|
|
847
|
-
}
|
|
848
|
-
/**
|
|
849
|
-
* Add a new change to the local snapshot.
|
|
850
|
-
* @param storageKey The storage key of the snapshot to add the change for.
|
|
851
|
-
* @param operation The operation to perform.
|
|
852
|
-
* @param id The id of the entity to add the change for.
|
|
853
|
-
* @returns Nothing.
|
|
854
|
-
*/
|
|
855
|
-
async addLocalChange(storageKey, operation, id) {
|
|
856
|
-
await this._loggingComponent?.log({
|
|
857
|
-
level: "info",
|
|
858
|
-
source: this.CLASS_NAME,
|
|
859
|
-
message: "addLocalChange",
|
|
860
|
-
data: {
|
|
861
|
-
storageKey,
|
|
862
|
-
operation,
|
|
863
|
-
id
|
|
864
|
-
}
|
|
865
|
-
});
|
|
866
|
-
const localChangeSnapshots = await this.getSnapshots(storageKey, true);
|
|
867
|
-
if (localChangeSnapshots.length > 0) {
|
|
868
|
-
const localChangeSnapshot = localChangeSnapshots[0];
|
|
869
|
-
localChangeSnapshot.changes ??= [];
|
|
870
|
-
// If we already have a change for this id we are
|
|
871
|
-
// about to supersede it, we remove the previous change
|
|
872
|
-
// to avoid having multiple changes for the same id
|
|
873
|
-
const previousChangeIndex = localChangeSnapshot.changes.findIndex(change => change.id === id);
|
|
874
|
-
if (previousChangeIndex !== -1) {
|
|
875
|
-
localChangeSnapshot.changes.splice(previousChangeIndex, 1);
|
|
876
|
-
}
|
|
877
|
-
// If we already have changes from previous updates
|
|
878
|
-
// then make sure we update the dateModified, otherwise
|
|
879
|
-
// we assume this is the first change and setting modified is not necessary
|
|
880
|
-
if (localChangeSnapshot.changes.length > 0) {
|
|
881
|
-
localChangeSnapshot.dateModified = new Date(Date.now()).toISOString();
|
|
882
|
-
}
|
|
883
|
-
localChangeSnapshot.changes.push({ operation, id });
|
|
884
|
-
await this.setLocalChangeSnapshot(localChangeSnapshot);
|
|
885
|
-
}
|
|
886
|
-
}
|
|
887
|
-
/**
|
|
888
|
-
* Get the snapshot which contains just the changes for this node.
|
|
889
|
-
* @param storageKey The storage key of the snapshot to get.
|
|
890
|
-
* @param isLocal Whether to get the local snapshot or not.
|
|
891
|
-
* @returns The local snapshot entry.
|
|
892
|
-
*/
|
|
893
|
-
async getSnapshots(storageKey, isLocal) {
|
|
894
|
-
await this._loggingComponent?.log({
|
|
895
|
-
level: "info",
|
|
896
|
-
source: this.CLASS_NAME,
|
|
897
|
-
message: "getSnapshots",
|
|
898
|
-
data: {
|
|
899
|
-
storageKey
|
|
900
|
-
}
|
|
901
|
-
});
|
|
902
|
-
const queryResult = await this._snapshotEntryEntityStorage.query({
|
|
903
|
-
conditions: [
|
|
904
|
-
{
|
|
905
|
-
property: "isLocal",
|
|
906
|
-
value: isLocal,
|
|
907
|
-
comparison: entity.ComparisonOperator.Equals
|
|
908
|
-
},
|
|
909
|
-
{
|
|
910
|
-
property: "storageKey",
|
|
911
|
-
value: storageKey,
|
|
912
|
-
comparison: entity.ComparisonOperator.Equals
|
|
913
|
-
}
|
|
914
|
-
]
|
|
915
|
-
});
|
|
916
|
-
if (queryResult.entities.length > 0) {
|
|
917
|
-
await this._loggingComponent?.log({
|
|
918
|
-
level: "info",
|
|
919
|
-
source: this.CLASS_NAME,
|
|
920
|
-
message: "getSnapshotsExists",
|
|
921
|
-
data: {
|
|
922
|
-
storageKey
|
|
923
|
-
}
|
|
924
|
-
});
|
|
925
|
-
return queryResult.entities;
|
|
926
|
-
}
|
|
927
|
-
await this._loggingComponent?.log({
|
|
928
|
-
level: "info",
|
|
929
|
-
source: this.CLASS_NAME,
|
|
930
|
-
message: "getSnapshotsDoesNotExist",
|
|
931
|
-
data: {
|
|
932
|
-
storageKey
|
|
933
|
-
}
|
|
934
|
-
});
|
|
935
|
-
const now = new Date(Date.now()).toISOString();
|
|
936
|
-
return [
|
|
937
|
-
{
|
|
938
|
-
version: SYNC_SNAPSHOT_VERSION,
|
|
939
|
-
id: core.Converter.bytesToHex(core.RandomHelper.generate(32)),
|
|
940
|
-
storageKey,
|
|
941
|
-
dateCreated: now,
|
|
942
|
-
dateModified: now,
|
|
943
|
-
changeSetStorageIds: [],
|
|
944
|
-
isLocal,
|
|
945
|
-
isConsolidated: false,
|
|
946
|
-
epoch: 0
|
|
947
|
-
}
|
|
948
|
-
];
|
|
949
|
-
}
|
|
950
|
-
/**
|
|
951
|
-
* Set the current local snapshot with changes for this node.
|
|
952
|
-
* @param localChangeSnapshot The local change snapshot to set.
|
|
953
|
-
* @returns Nothing.
|
|
954
|
-
*/
|
|
955
|
-
async setLocalChangeSnapshot(localChangeSnapshot) {
|
|
956
|
-
await this._loggingComponent?.log({
|
|
957
|
-
level: "info",
|
|
958
|
-
source: this.CLASS_NAME,
|
|
959
|
-
message: "setLocalChangeSnapshot",
|
|
960
|
-
data: {
|
|
961
|
-
storageKey: localChangeSnapshot.storageKey
|
|
962
|
-
}
|
|
963
|
-
});
|
|
964
|
-
await this._snapshotEntryEntityStorage.set(localChangeSnapshot);
|
|
965
|
-
}
|
|
966
|
-
/**
|
|
967
|
-
* Get the current local snapshot with the changes for this node.
|
|
968
|
-
* @param localChangeSnapshot The local change snapshot to remove.
|
|
969
|
-
* @returns Nothing.
|
|
970
|
-
*/
|
|
971
|
-
async removeLocalChangeSnapshot(localChangeSnapshot) {
|
|
972
|
-
await this._loggingComponent?.log({
|
|
973
|
-
level: "info",
|
|
974
|
-
source: this.CLASS_NAME,
|
|
975
|
-
message: "removeLocalChangeSnapshot",
|
|
976
|
-
data: {
|
|
977
|
-
snapshotId: localChangeSnapshot.id
|
|
978
|
-
}
|
|
979
|
-
});
|
|
980
|
-
await this._snapshotEntryEntityStorage.remove(localChangeSnapshot.id);
|
|
981
|
-
}
|
|
982
|
-
/**
|
|
983
|
-
* Apply a sync state to the local node.
|
|
984
|
-
* @param storageKey The storage key of the snapshot to sync with.
|
|
985
|
-
* @param syncState The sync state to sync with.
|
|
986
|
-
* @returns Nothing.
|
|
987
|
-
*/
|
|
988
|
-
async applySyncState(storageKey, syncState) {
|
|
989
|
-
await this._loggingComponent?.log({
|
|
990
|
-
level: "info",
|
|
991
|
-
source: this.CLASS_NAME,
|
|
992
|
-
message: "applySyncState",
|
|
993
|
-
data: {
|
|
994
|
-
snapshotCount: syncState.snapshots.length
|
|
995
|
-
}
|
|
996
|
-
});
|
|
997
|
-
// Get all the existing snapshots that we have processed previously
|
|
998
|
-
let existingSnapshots = await this.getSnapshots(storageKey, false);
|
|
999
|
-
// Sort from newest to oldest
|
|
1000
|
-
existingSnapshots = existingSnapshots.sort((a, b) => new Date(b.dateCreated).getTime() - new Date(a.dateCreated).getTime());
|
|
1001
|
-
// Sort from newest to oldest
|
|
1002
|
-
const syncStateSnapshots = syncState.snapshots.sort((a, b) => new Date(b.dateCreated).getTime() - new Date(a.dateCreated).getTime());
|
|
1003
|
-
// Get the newest epoch from the local storage
|
|
1004
|
-
const newestExistingEpoch = existingSnapshots[0]?.epoch ?? 0;
|
|
1005
|
-
// Get the oldest epoch from the remote storage
|
|
1006
|
-
const oldestSyncStateEpoch = syncStateSnapshots[syncStateSnapshots.length - 1]?.epoch ?? 0;
|
|
1007
|
-
// If there is a gap between the largest epoch we have locally
|
|
1008
|
-
// and the smallest epoch we have remotely then we have missed
|
|
1009
|
-
// data so we need to perform a full sync
|
|
1010
|
-
const hasEpochGap = newestExistingEpoch + 1 < oldestSyncStateEpoch;
|
|
1011
|
-
// If we have an epoch gap or no existing snapshots then we need to apply
|
|
1012
|
-
// a full sync from a consolidation
|
|
1013
|
-
if (!existingSnapshots.some(s => s.isConsolidated) || hasEpochGap) {
|
|
1014
|
-
await this._loggingComponent?.log({
|
|
1015
|
-
level: "info",
|
|
1016
|
-
source: this.CLASS_NAME,
|
|
1017
|
-
message: "applySnapshotNoExisting",
|
|
1018
|
-
data: {
|
|
1019
|
-
storageKey
|
|
1020
|
-
}
|
|
1021
|
-
});
|
|
1022
|
-
const mostRecentConsolidation = syncStateSnapshots.findIndex(snapshot => snapshot.isConsolidated);
|
|
1023
|
-
if (mostRecentConsolidation !== -1) {
|
|
1024
|
-
// We found the most recent consolidated snapshot, we can use it
|
|
1025
|
-
await this._loggingComponent?.log({
|
|
1026
|
-
level: "info",
|
|
1027
|
-
source: this.CLASS_NAME,
|
|
1028
|
-
message: "applySnapshotFoundConsolidated",
|
|
1029
|
-
data: {
|
|
1030
|
-
storageKey,
|
|
1031
|
-
snapshotId: syncStateSnapshots[mostRecentConsolidation].id
|
|
1032
|
-
}
|
|
1033
|
-
});
|
|
1034
|
-
// We need to reset the entity storage and remove all the remote items
|
|
1035
|
-
// so that we use just the ones from the consolidation, since
|
|
1036
|
-
// we don't have any existing there shouldn't be any remote entries
|
|
1037
|
-
// but we reset nonetheless
|
|
1038
|
-
await this._changeSetHelper.reset(storageKey, synchronisedStorageModels.SyncNodeIdentityMode.Remote);
|
|
1039
|
-
// We need to process the most recent consolidation and all changes
|
|
1040
|
-
// that were made since then, from newest to oldest (so newer changes override older ones)
|
|
1041
|
-
// Process snapshots from the consolidation point (most recent) back to the newest
|
|
1042
|
-
for (let i = mostRecentConsolidation; i >= 0; i--) {
|
|
1043
|
-
await this.processNewSnapshots([
|
|
1044
|
-
{
|
|
1045
|
-
...syncStateSnapshots[i],
|
|
1046
|
-
storageKey,
|
|
1047
|
-
isLocal: false
|
|
1048
|
-
}
|
|
1049
|
-
]);
|
|
1050
|
-
}
|
|
1051
|
-
}
|
|
1052
|
-
else {
|
|
1053
|
-
await this._loggingComponent?.log({
|
|
1054
|
-
level: "info",
|
|
1055
|
-
source: this.CLASS_NAME,
|
|
1056
|
-
message: "applySnapshotNoConsolidated",
|
|
1057
|
-
data: {
|
|
1058
|
-
storageKey
|
|
1059
|
-
}
|
|
1060
|
-
});
|
|
1061
|
-
}
|
|
1062
|
-
}
|
|
1063
|
-
else {
|
|
1064
|
-
// We have existing consolidated remote snapshots, so we can assume that we have
|
|
1065
|
-
// applied at least one consolidation snapshot, in this case we need to look at the changes since
|
|
1066
|
-
// then and apply them if we haven't already
|
|
1067
|
-
// We don't need to apply any additional consolidated snapshots, just the changesets
|
|
1068
|
-
// Create a lookup map for the existing snapshots
|
|
1069
|
-
const existingSnapshotsMap = {};
|
|
1070
|
-
for (const snapshot of existingSnapshots) {
|
|
1071
|
-
existingSnapshotsMap[snapshot.id] = snapshot;
|
|
1072
|
-
}
|
|
1073
|
-
const newSnapshots = [];
|
|
1074
|
-
const modifiedSnapshots = [];
|
|
1075
|
-
const referencedExistingSnapshots = Object.keys(existingSnapshotsMap);
|
|
1076
|
-
let completedProcessing = false;
|
|
1077
|
-
for (const snapshot of syncStateSnapshots) {
|
|
1078
|
-
await this._loggingComponent?.log({
|
|
1079
|
-
level: "info",
|
|
1080
|
-
source: this.CLASS_NAME,
|
|
1081
|
-
message: "applySnapshot",
|
|
1082
|
-
data: {
|
|
1083
|
-
snapshotId: snapshot.id,
|
|
1084
|
-
dateCreated: new Date(snapshot.dateCreated).toISOString()
|
|
1085
|
-
}
|
|
1086
|
-
});
|
|
1087
|
-
// See if we have the snapshot stored locally
|
|
1088
|
-
const currentSnapshot = existingSnapshotsMap[snapshot.id];
|
|
1089
|
-
// As we are referencing an existing snapshot, we need to remove it from the list
|
|
1090
|
-
// to allow us to cleanup any unreferenced snapshots later
|
|
1091
|
-
const idx = referencedExistingSnapshots.indexOf(snapshot.id);
|
|
1092
|
-
if (idx !== -1) {
|
|
1093
|
-
referencedExistingSnapshots.splice(idx, 1);
|
|
1094
|
-
}
|
|
1095
|
-
// No need to apply consolidated snapshots
|
|
1096
|
-
if (!snapshot.isConsolidated && !completedProcessing) {
|
|
1097
|
-
const updatedSnapshot = {
|
|
1098
|
-
...snapshot,
|
|
1099
|
-
storageKey,
|
|
1100
|
-
isLocal: false
|
|
1101
|
-
};
|
|
1102
|
-
if (core.Is.empty(currentSnapshot)) {
|
|
1103
|
-
// We don't have the snapshot locally, so we need to process all of it
|
|
1104
|
-
newSnapshots.push(updatedSnapshot);
|
|
1105
|
-
}
|
|
1106
|
-
else if (currentSnapshot.dateModified !== snapshot.dateModified) {
|
|
1107
|
-
// If the local snapshot has a different dateModified, we need to update it
|
|
1108
|
-
modifiedSnapshots.push({
|
|
1109
|
-
currentSnapshot,
|
|
1110
|
-
updatedSnapshot
|
|
1111
|
-
});
|
|
1112
|
-
}
|
|
1113
|
-
else {
|
|
1114
|
-
// we sorted the snapshots from newest to oldest, so if we found a local snapshot
|
|
1115
|
-
// with the same dateModified as the remote snapshot, we can stop processing further
|
|
1116
|
-
completedProcessing = true;
|
|
1117
|
-
}
|
|
1118
|
-
}
|
|
1119
|
-
}
|
|
1120
|
-
// We reverse the order of the snapshots to process them from oldest to newest
|
|
1121
|
-
// because we want to apply the changes in the order they were created
|
|
1122
|
-
await this.processModifiedSnapshots(modifiedSnapshots.reverse());
|
|
1123
|
-
await this.processNewSnapshots(newSnapshots.reverse());
|
|
1124
|
-
// Any ids remaining in this list are no longer referenced in the global state
|
|
1125
|
-
// so we should remove them from the local storage as they will never be updated again
|
|
1126
|
-
for (const referencedSnapshotId of referencedExistingSnapshots) {
|
|
1127
|
-
await this._snapshotEntryEntityStorage.remove(referencedSnapshotId);
|
|
1128
|
-
}
|
|
1129
|
-
}
|
|
1130
|
-
}
|
|
1131
|
-
/**
|
|
1132
|
-
* Process the modified snapshots and store them in the local storage.
|
|
1133
|
-
* @param modifiedSnapshots The modified snapshots to process.
|
|
1134
|
-
* @returns Nothing.
|
|
1135
|
-
* @internal
|
|
1136
|
-
*/
|
|
1137
|
-
async processModifiedSnapshots(modifiedSnapshots) {
|
|
1138
|
-
for (const modifiedSnapshot of modifiedSnapshots) {
|
|
1139
|
-
await this._loggingComponent?.log({
|
|
1140
|
-
level: "info",
|
|
1141
|
-
source: this.CLASS_NAME,
|
|
1142
|
-
message: "processModifiedSnapshot",
|
|
1143
|
-
data: {
|
|
1144
|
-
snapshotId: modifiedSnapshot.updatedSnapshot.id,
|
|
1145
|
-
localModified: new Date(modifiedSnapshot.currentSnapshot.dateModified ??
|
|
1146
|
-
modifiedSnapshot.currentSnapshot.dateCreated).toISOString(),
|
|
1147
|
-
remoteModified: new Date(modifiedSnapshot.updatedSnapshot.dateModified ??
|
|
1148
|
-
modifiedSnapshot.updatedSnapshot.dateCreated).toISOString()
|
|
1149
|
-
}
|
|
1150
|
-
});
|
|
1151
|
-
const remoteChangeSetStorageIds = modifiedSnapshot.updatedSnapshot.changeSetStorageIds;
|
|
1152
|
-
const localChangeSetStorageIds = modifiedSnapshot.currentSnapshot.changeSetStorageIds ?? [];
|
|
1153
|
-
if (core.Is.arrayValue(remoteChangeSetStorageIds)) {
|
|
1154
|
-
for (const storageId of remoteChangeSetStorageIds) {
|
|
1155
|
-
// Check if the local snapshot does not have the storageId
|
|
1156
|
-
if (!localChangeSetStorageIds.includes(storageId)) {
|
|
1157
|
-
await this._changeSetHelper.getAndApplyChangeset(storageId);
|
|
1158
|
-
}
|
|
1159
|
-
}
|
|
1160
|
-
}
|
|
1161
|
-
await this._snapshotEntryEntityStorage.set(modifiedSnapshot.updatedSnapshot);
|
|
1162
|
-
}
|
|
1163
|
-
}
|
|
1164
|
-
/**
|
|
1165
|
-
* Process the new snapshots and store them in the local storage.
|
|
1166
|
-
* @param newSnapshots The new snapshots to process.
|
|
1167
|
-
* @returns Nothing.
|
|
1168
|
-
* @internal
|
|
1169
|
-
*/
|
|
1170
|
-
async processNewSnapshots(newSnapshots) {
|
|
1171
|
-
for (const newSnapshot of newSnapshots) {
|
|
1172
|
-
await this._loggingComponent?.log({
|
|
1173
|
-
level: "info",
|
|
1174
|
-
source: this.CLASS_NAME,
|
|
1175
|
-
message: "processNewSnapshot",
|
|
1176
|
-
data: {
|
|
1177
|
-
snapshotId: newSnapshot.id,
|
|
1178
|
-
dateCreated: newSnapshot.dateCreated
|
|
1179
|
-
}
|
|
1180
|
-
});
|
|
1181
|
-
const newSnapshotChangeSetStorageIds = newSnapshot.changeSetStorageIds ?? [];
|
|
1182
|
-
if (core.Is.arrayValue(newSnapshotChangeSetStorageIds)) {
|
|
1183
|
-
for (const storageId of newSnapshotChangeSetStorageIds) {
|
|
1184
|
-
await this._changeSetHelper.getAndApplyChangeset(storageId);
|
|
1185
|
-
}
|
|
1186
|
-
}
|
|
1187
|
-
await this._snapshotEntryEntityStorage.set(newSnapshot);
|
|
1188
|
-
}
|
|
1189
|
-
}
|
|
1190
|
-
}
|
|
1191
|
-
|
|
1192
|
-
// Copyright 2024 IOTA Stiftung.
|
|
1193
|
-
// SPDX-License-Identifier: Apache-2.0.
|
|
1194
|
-
/**
|
|
1195
|
-
* Class for performing entity storage operations in decentralised storage.
|
|
1196
|
-
*/
|
|
1197
|
-
class RemoteSyncStateHelper {
|
|
1198
|
-
/**
|
|
1199
|
-
* Runtime name for the class.
|
|
1200
|
-
*/
|
|
1201
|
-
CLASS_NAME = "RemoteSyncStateHelper";
|
|
1202
|
-
/**
|
|
1203
|
-
* The logging component to use for logging.
|
|
1204
|
-
* @internal
|
|
1205
|
-
*/
|
|
1206
|
-
_loggingComponent;
|
|
1207
|
-
/**
|
|
1208
|
-
* The event bus component.
|
|
1209
|
-
* @internal
|
|
1210
|
-
*/
|
|
1211
|
-
_eventBusComponent;
|
|
1212
|
-
/**
|
|
1213
|
-
* The blob storage helper.
|
|
1214
|
-
* @internal
|
|
1215
|
-
*/
|
|
1216
|
-
_blobStorageHelper;
|
|
1217
|
-
/**
|
|
1218
|
-
* The verifiable storage connector to use for storing sync pointers.
|
|
1219
|
-
* @internal
|
|
1220
|
-
*/
|
|
1221
|
-
_verifiableSyncPointerStorageConnector;
|
|
1222
|
-
/**
|
|
1223
|
-
* The change set helper to use for applying changesets.
|
|
1224
|
-
* @internal
|
|
1225
|
-
*/
|
|
1226
|
-
_changeSetHelper;
|
|
1227
|
-
/**
|
|
1228
|
-
* The storage ids of the batch responses for each storage key.
|
|
1229
|
-
* @internal
|
|
1230
|
-
*/
|
|
1231
|
-
_batchResponseStorageIds;
|
|
1232
|
-
/**
|
|
1233
|
-
* The full changes for each storage key.
|
|
1234
|
-
* @internal
|
|
1235
|
-
*/
|
|
1236
|
-
_populateFullChanges;
|
|
1237
|
-
/**
|
|
1238
|
-
* The synchronised storage key to use for verified storage operations.
|
|
1239
|
-
* @internal
|
|
1240
|
-
*/
|
|
1241
|
-
_synchronisedStorageKey;
|
|
1242
|
-
/**
|
|
1243
|
-
* The identity of the node that is performing the update.
|
|
1244
|
-
* @internal
|
|
1245
|
-
*/
|
|
1246
|
-
_nodeIdentity;
|
|
1247
|
-
/**
|
|
1248
|
-
* Whether the node is trusted or not.
|
|
1249
|
-
* @internal
|
|
1250
|
-
*/
|
|
1251
|
-
_isTrustedNode;
|
|
1252
|
-
/**
|
|
1253
|
-
* Maximum number of consolidations to keep in storage.
|
|
1254
|
-
* @internal
|
|
1255
|
-
*/
|
|
1256
|
-
_maxConsolidations;
|
|
1257
|
-
/**
|
|
1258
|
-
* Create a new instance of DecentralisedEntityStorageConnector.
|
|
1259
|
-
* @param loggingComponent The logging component to use for logging.
|
|
1260
|
-
* @param eventBusComponent The event bus component to use for events.
|
|
1261
|
-
* @param verifiableSyncPointerStorageConnector The verifiable storage connector to use for storing sync pointers.
|
|
1262
|
-
* @param blobStorageHelper The blob storage helper to use for remote sync states.
|
|
1263
|
-
* @param changeSetHelper The change set helper to use for managing changesets.
|
|
1264
|
-
* @param isTrustedNode Whether the node is trusted or not.
|
|
1265
|
-
* @param maxConsolidations The maximum number of consolidations to keep in storage.
|
|
1266
|
-
*/
|
|
1267
|
-
constructor(loggingComponent, eventBusComponent, verifiableSyncPointerStorageConnector, blobStorageHelper, changeSetHelper, isTrustedNode, maxConsolidations) {
|
|
1268
|
-
this._loggingComponent = loggingComponent;
|
|
1269
|
-
this._eventBusComponent = eventBusComponent;
|
|
1270
|
-
this._verifiableSyncPointerStorageConnector = verifiableSyncPointerStorageConnector;
|
|
1271
|
-
this._changeSetHelper = changeSetHelper;
|
|
1272
|
-
this._blobStorageHelper = blobStorageHelper;
|
|
1273
|
-
this._isTrustedNode = isTrustedNode;
|
|
1274
|
-
this._maxConsolidations = maxConsolidations;
|
|
1275
|
-
this._batchResponseStorageIds = {};
|
|
1276
|
-
this._populateFullChanges = {};
|
|
1277
|
-
this._eventBusComponent.subscribe(synchronisedStorageModels.SynchronisedStorageTopics.BatchResponse, async (response) => {
|
|
1278
|
-
await this.handleBatchResponse(response.data);
|
|
1279
|
-
});
|
|
1280
|
-
this._eventBusComponent.subscribe(synchronisedStorageModels.SynchronisedStorageTopics.LocalItemResponse, async (response) => {
|
|
1281
|
-
await this.handleLocalItemResponse(response.data);
|
|
1282
|
-
});
|
|
1283
|
-
}
|
|
1284
|
-
/**
|
|
1285
|
-
* Set the node identity to use for signing changesets.
|
|
1286
|
-
* @param nodeIdentity The identity of the node that is performing the update.
|
|
1287
|
-
*/
|
|
1288
|
-
setNodeIdentity(nodeIdentity) {
|
|
1289
|
-
this._nodeIdentity = nodeIdentity;
|
|
1290
|
-
}
|
|
1291
|
-
/**
|
|
1292
|
-
* Set the synchronised storage key.
|
|
1293
|
-
* @param synchronisedStorageKey The synchronised storage key to use.
|
|
1294
|
-
*/
|
|
1295
|
-
setSynchronisedStorageKey(synchronisedStorageKey) {
|
|
1296
|
-
this._synchronisedStorageKey = synchronisedStorageKey;
|
|
1297
|
-
}
|
|
1298
|
-
/**
|
|
1299
|
-
* Build a changeset.
|
|
1300
|
-
* @param storageKey The storage key of the change set.
|
|
1301
|
-
* @param changes The changes to apply.
|
|
1302
|
-
* @param completeCallback The callback to call when the changeset is created and stored.
|
|
1303
|
-
* @returns The storage id of the change set if created.
|
|
1304
|
-
*/
|
|
1305
|
-
async buildChangeSet(storageKey, changes, completeCallback) {
|
|
1306
|
-
await this._loggingComponent?.log({
|
|
1307
|
-
level: "info",
|
|
1308
|
-
source: this.CLASS_NAME,
|
|
1309
|
-
message: "buildingChangeSet",
|
|
1310
|
-
data: {
|
|
1311
|
-
storageKey,
|
|
1312
|
-
changeCount: changes.length
|
|
1313
|
-
}
|
|
1314
|
-
});
|
|
1315
|
-
this._populateFullChanges[storageKey] = {
|
|
1316
|
-
changes,
|
|
1317
|
-
entities: {},
|
|
1318
|
-
requestIds: [],
|
|
1319
|
-
completeCallback: async () => this.finaliseFullChanges(storageKey, completeCallback)
|
|
1320
|
-
};
|
|
1321
|
-
const setChanges = changes.filter(c => c.operation === synchronisedStorageModels.SyncChangeOperation.Set);
|
|
1322
|
-
if (setChanges.length === 0) {
|
|
1323
|
-
// If we don't need to request any full details, we can just call the complete callback
|
|
1324
|
-
await this.finaliseFullChanges(storageKey, completeCallback);
|
|
1325
|
-
}
|
|
1326
|
-
else {
|
|
1327
|
-
// Otherwise we need to request the full details for each change
|
|
1328
|
-
this._populateFullChanges[storageKey].requestIds = setChanges.map(change => change.id);
|
|
1329
|
-
// Once all the requests are handled the callback will be called
|
|
1330
|
-
for (const change of setChanges) {
|
|
1331
|
-
// Create a request for each change to populate the full details
|
|
1332
|
-
await this._loggingComponent?.log({
|
|
1333
|
-
level: "info",
|
|
1334
|
-
source: this.CLASS_NAME,
|
|
1335
|
-
message: "createChangeSetRequestingItem",
|
|
1336
|
-
data: {
|
|
1337
|
-
storageKey,
|
|
1338
|
-
id: change.id
|
|
1339
|
-
}
|
|
1340
|
-
});
|
|
1341
|
-
this._eventBusComponent.publish(synchronisedStorageModels.SynchronisedStorageTopics.LocalItemRequest, {
|
|
1342
|
-
storageKey,
|
|
1343
|
-
id: change.id
|
|
1344
|
-
});
|
|
1345
|
-
}
|
|
1346
|
-
}
|
|
1347
|
-
}
|
|
1348
|
-
/**
|
|
1349
|
-
* Finalise the full details for the sync change set.
|
|
1350
|
-
* @param storageKey The storage key of the change set.
|
|
1351
|
-
* @param completeCallback The callback to call when the changeset is populated.
|
|
1352
|
-
* @returns Nothing.
|
|
1353
|
-
*/
|
|
1354
|
-
async finaliseFullChanges(storageKey, completeCallback) {
|
|
1355
|
-
await this._loggingComponent?.log({
|
|
1356
|
-
level: "info",
|
|
1357
|
-
source: this.CLASS_NAME,
|
|
1358
|
-
message: "finalisingSyncChanges",
|
|
1359
|
-
data: {
|
|
1360
|
-
storageKey
|
|
1361
|
-
}
|
|
1362
|
-
});
|
|
1363
|
-
if (core.Is.stringValue(this._nodeIdentity)) {
|
|
1364
|
-
const changes = this._populateFullChanges[storageKey].changes;
|
|
1365
|
-
for (const change of changes) {
|
|
1366
|
-
change.entity = this._populateFullChanges[storageKey].entities[change.id] ?? change.entity;
|
|
1367
|
-
if (change.operation === synchronisedStorageModels.SyncChangeOperation.Set && core.Is.objectValue(change.entity)) {
|
|
1368
|
-
// Remove the id from the entity as this is stored in the operation
|
|
1369
|
-
// and will be reinstated when the changeset is reconstituted
|
|
1370
|
-
core.ObjectHelper.propertyDelete(change.entity, "id");
|
|
1371
|
-
// Remove the node identity as the changeset has this stored at the top level
|
|
1372
|
-
// and we do not want to store it in the change itself to reduce redundancy
|
|
1373
|
-
core.ObjectHelper.propertyDelete(change.entity, "nodeIdentity");
|
|
1374
|
-
}
|
|
1375
|
-
}
|
|
1376
|
-
const now = new Date(Date.now()).toISOString();
|
|
1377
|
-
const syncChangeSet = {
|
|
1378
|
-
id: core.Converter.bytesToHex(core.RandomHelper.generate(32)),
|
|
1379
|
-
dateCreated: now,
|
|
1380
|
-
dateModified: now,
|
|
1381
|
-
storageKey,
|
|
1382
|
-
changes,
|
|
1383
|
-
nodeIdentity: this._nodeIdentity
|
|
1384
|
-
};
|
|
1385
|
-
try {
|
|
1386
|
-
// And sign it with the node identity
|
|
1387
|
-
syncChangeSet.proof = await this._changeSetHelper.createChangeSetProof(syncChangeSet);
|
|
1388
|
-
// If this is a trusted node, we also store the changeset
|
|
1389
|
-
let changeSetStorageId;
|
|
1390
|
-
if (this._isTrustedNode) {
|
|
1391
|
-
changeSetStorageId = await this._changeSetHelper.storeChangeSet(syncChangeSet);
|
|
1392
|
-
}
|
|
1393
|
-
await completeCallback(syncChangeSet, changeSetStorageId);
|
|
1394
|
-
}
|
|
1395
|
-
catch (err) {
|
|
1396
|
-
await this._loggingComponent?.log({
|
|
1397
|
-
level: "error",
|
|
1398
|
-
source: this.CLASS_NAME,
|
|
1399
|
-
message: "finalisingSyncChangesFailed",
|
|
1400
|
-
data: {
|
|
1401
|
-
storageKey
|
|
1402
|
-
},
|
|
1403
|
-
error: core.BaseError.fromError(err)
|
|
1404
|
-
});
|
|
1405
|
-
await completeCallback();
|
|
1406
|
-
}
|
|
1407
|
-
}
|
|
1408
|
-
else {
|
|
1409
|
-
await completeCallback();
|
|
1410
|
-
}
|
|
1411
|
-
}
|
|
1412
|
-
/**
|
|
1413
|
-
* Add a new changeset into the sync state.
|
|
1414
|
-
* @param storageKey The storage key of the change set to add.
|
|
1415
|
-
* @param changeSetStorageId The id of the change set to add the the current state
|
|
1416
|
-
* @returns Nothing.
|
|
1417
|
-
*/
|
|
1418
|
-
async addChangeSetToSyncState(storageKey, changeSetStorageId) {
|
|
1419
|
-
await this._loggingComponent?.log({
|
|
1420
|
-
level: "info",
|
|
1421
|
-
source: this.CLASS_NAME,
|
|
1422
|
-
message: "addChangeSetToSyncState",
|
|
1423
|
-
data: {
|
|
1424
|
-
storageKey,
|
|
1425
|
-
changeSetStorageId
|
|
1426
|
-
}
|
|
1427
|
-
});
|
|
1428
|
-
// First load the sync pointer store to get the current sync pointer for the storage key
|
|
1429
|
-
const syncPointerStore = await this.getVerifiableSyncPointerStore();
|
|
1430
|
-
let syncState;
|
|
1431
|
-
if (!core.Is.empty(syncPointerStore.syncPointers[storageKey])) {
|
|
1432
|
-
syncState = await this.getSyncState(syncPointerStore.syncPointers[storageKey]);
|
|
1433
|
-
}
|
|
1434
|
-
// No current sync state, so we create a new one
|
|
1435
|
-
if (core.Is.empty(syncState)) {
|
|
1436
|
-
syncState = { version: SYNC_STATE_VERSION, storageKey, snapshots: [] };
|
|
1437
|
-
}
|
|
1438
|
-
// Sort the snapshots so the newest snapshot is last in the array
|
|
1439
|
-
const sortedSnapshots = syncState.snapshots.sort((a, b) => a.dateCreated.localeCompare(b.dateCreated));
|
|
1440
|
-
// Get the current snapshot, if it does not exist we create a new one
|
|
1441
|
-
let currentSnapshot = sortedSnapshots[sortedSnapshots.length - 1];
|
|
1442
|
-
const currentEpoch = currentSnapshot?.epoch ?? 0;
|
|
1443
|
-
const now = new Date(Date.now()).toISOString();
|
|
1444
|
-
// If there is no snapshot or the current one is a consolidation
|
|
1445
|
-
// we start a new snapshot
|
|
1446
|
-
if (core.Is.empty(currentSnapshot) || currentSnapshot.isConsolidated) {
|
|
1447
|
-
currentSnapshot = {
|
|
1448
|
-
version: SYNC_SNAPSHOT_VERSION,
|
|
1449
|
-
id: core.Converter.bytesToHex(core.RandomHelper.generate(32)),
|
|
1450
|
-
dateCreated: now,
|
|
1451
|
-
dateModified: now,
|
|
1452
|
-
isConsolidated: false,
|
|
1453
|
-
epoch: currentEpoch + 1,
|
|
1454
|
-
changeSetStorageIds: []
|
|
1455
|
-
};
|
|
1456
|
-
syncState.snapshots.push(currentSnapshot);
|
|
1457
|
-
}
|
|
1458
|
-
else {
|
|
1459
|
-
// Snapshot exists, we update the dateModified
|
|
1460
|
-
currentSnapshot.dateModified = now;
|
|
1461
|
-
}
|
|
1462
|
-
// Add the changeset storage id to the current snapshot
|
|
1463
|
-
currentSnapshot.changeSetStorageIds.push(changeSetStorageId);
|
|
1464
|
-
// Store the sync state in the blob storage
|
|
1465
|
-
syncPointerStore.syncPointers[storageKey] = await this.storeRemoteSyncState(syncState);
|
|
1466
|
-
// Store the verifiable sync pointer store in the verifiable storage
|
|
1467
|
-
await this.storeVerifiableSyncPointerStore(syncPointerStore);
|
|
1468
|
-
}
|
|
1469
|
-
/**
|
|
1470
|
-
* Create a consolidated snapshot for the entire storage.
|
|
1471
|
-
* @param storageKey The storage key of the snapshot to create.
|
|
1472
|
-
* @param batchSize The batch size to use for consolidation.
|
|
1473
|
-
* @returns Nothing.
|
|
1474
|
-
*/
|
|
1475
|
-
async consolidationStart(storageKey, batchSize) {
|
|
1476
|
-
await this._loggingComponent?.log({
|
|
1477
|
-
level: "info",
|
|
1478
|
-
source: this.CLASS_NAME,
|
|
1479
|
-
message: "consolidationStarting"
|
|
1480
|
-
});
|
|
1481
|
-
// Perform a batch request to start the consolidation
|
|
1482
|
-
await this._eventBusComponent.publish(synchronisedStorageModels.SynchronisedStorageTopics.BatchRequest, { storageKey, batchSize, requestMode: synchronisedStorageModels.SyncNodeIdentityMode.All });
|
|
1483
|
-
}
|
|
1484
|
-
/**
|
|
1485
|
-
* Get the sync pointer store.
|
|
1486
|
-
* @returns The sync pointer store.
|
|
1487
|
-
*/
|
|
1488
|
-
async getVerifiableSyncPointerStore() {
|
|
1489
|
-
if (core.Is.stringValue(this._synchronisedStorageKey)) {
|
|
1490
|
-
try {
|
|
1491
|
-
await this._loggingComponent?.log({
|
|
1492
|
-
level: "info",
|
|
1493
|
-
source: this.CLASS_NAME,
|
|
1494
|
-
message: "verifiableSyncPointerStoreRetrieving",
|
|
1495
|
-
data: {
|
|
1496
|
-
key: this._synchronisedStorageKey
|
|
1497
|
-
}
|
|
1498
|
-
});
|
|
1499
|
-
const syncPointerStore = await this._verifiableSyncPointerStorageConnector.get(this._synchronisedStorageKey, { includeData: true });
|
|
1500
|
-
if (core.Is.uint8Array(syncPointerStore.data)) {
|
|
1501
|
-
const syncPointer = core.ObjectHelper.fromBytes(syncPointerStore.data);
|
|
1502
|
-
await this._loggingComponent?.log({
|
|
1503
|
-
level: "info",
|
|
1504
|
-
source: this.CLASS_NAME,
|
|
1505
|
-
message: "verifiableSyncPointerStoreRetrieved",
|
|
1506
|
-
data: {
|
|
1507
|
-
key: this._synchronisedStorageKey
|
|
1508
|
-
}
|
|
1509
|
-
});
|
|
1510
|
-
return syncPointer;
|
|
1511
|
-
}
|
|
1512
|
-
}
|
|
1513
|
-
catch (err) {
|
|
1514
|
-
if (!core.BaseError.someErrorName(err, core.NotFoundError.CLASS_NAME)) {
|
|
1515
|
-
throw err;
|
|
1516
|
-
}
|
|
1517
|
-
}
|
|
1518
|
-
await this._loggingComponent?.log({
|
|
1519
|
-
level: "info",
|
|
1520
|
-
source: this.CLASS_NAME,
|
|
1521
|
-
message: "verifiableSyncPointerStoreNotFound",
|
|
1522
|
-
data: {
|
|
1523
|
-
key: this._synchronisedStorageKey
|
|
1524
|
-
}
|
|
1525
|
-
});
|
|
1526
|
-
}
|
|
1527
|
-
// If no sync pointer store exists, we return an empty one
|
|
1528
|
-
return {
|
|
1529
|
-
version: SYNC_POINTER_STORE_VERSION,
|
|
1530
|
-
syncPointers: {}
|
|
1531
|
-
};
|
|
1532
|
-
}
|
|
1533
|
-
/**
|
|
1534
|
-
* Store the verifiable sync pointer in the verifiable storage.
|
|
1535
|
-
* @param syncPointerStore The sync pointer store to store.
|
|
1536
|
-
* @returns Nothing.
|
|
1537
|
-
*/
|
|
1538
|
-
async storeVerifiableSyncPointerStore(syncPointerStore) {
|
|
1539
|
-
if (core.Is.stringValue(this._nodeIdentity) && core.Is.stringValue(this._synchronisedStorageKey)) {
|
|
1540
|
-
await this._loggingComponent?.log({
|
|
1541
|
-
level: "info",
|
|
1542
|
-
source: this.CLASS_NAME,
|
|
1543
|
-
message: "verifiableSyncPointerStoreStoring",
|
|
1544
|
-
data: {
|
|
1545
|
-
key: this._synchronisedStorageKey
|
|
1546
|
-
}
|
|
1547
|
-
});
|
|
1548
|
-
// Store the verifiable sync pointer in the verifiable storage
|
|
1549
|
-
await this._verifiableSyncPointerStorageConnector.update(this._nodeIdentity, this._synchronisedStorageKey, core.ObjectHelper.toBytes(syncPointerStore));
|
|
1550
|
-
}
|
|
1551
|
-
}
|
|
1552
|
-
/**
|
|
1553
|
-
* Store the remote sync state.
|
|
1554
|
-
* @param syncState The sync state to store.
|
|
1555
|
-
* @returns The id of the sync state.
|
|
1556
|
-
*/
|
|
1557
|
-
async storeRemoteSyncState(syncState) {
|
|
1558
|
-
await this._loggingComponent?.log({
|
|
1559
|
-
level: "info",
|
|
1560
|
-
source: this.CLASS_NAME,
|
|
1561
|
-
message: "syncStateStoring",
|
|
1562
|
-
data: {
|
|
1563
|
-
snapshotCount: syncState.snapshots.length
|
|
1564
|
-
}
|
|
1565
|
-
});
|
|
1566
|
-
// Limits the number of consolidations in the list so that we can shrink decentralised
|
|
1567
|
-
// storage requirements, sort from newest to oldest so that we can easily find the
|
|
1568
|
-
// oldest snapshots to remove.
|
|
1569
|
-
const snapshots = syncState.snapshots.sort((a, b) => new Date(a.dateCreated).getTime() - new Date(b.dateCreated).getTime());
|
|
1570
|
-
// Find all the consolidation indexes
|
|
1571
|
-
const consolidationIndexes = [];
|
|
1572
|
-
for (let i = 0; i < snapshots.length; i++) {
|
|
1573
|
-
const snapshot = snapshots[i];
|
|
1574
|
-
if (snapshot.isConsolidated) {
|
|
1575
|
-
consolidationIndexes.push(i);
|
|
1576
|
-
}
|
|
1577
|
-
}
|
|
1578
|
-
if (consolidationIndexes.length > this._maxConsolidations) {
|
|
1579
|
-
// Once we have reached the max for consolidations we need to remove
|
|
1580
|
-
// all the snapshots, including non consolidated ones, beyond this point
|
|
1581
|
-
const toRemove = snapshots.slice(consolidationIndexes[this._maxConsolidations - 1] + 1);
|
|
1582
|
-
syncState.snapshots = snapshots.slice(0, consolidationIndexes[this._maxConsolidations - 1] + 1);
|
|
1583
|
-
for (const snapshot of toRemove) {
|
|
1584
|
-
// We need to remove all the storage ids associated with the snapshot
|
|
1585
|
-
if (core.Is.arrayValue(snapshot.changeSetStorageIds)) {
|
|
1586
|
-
for (const storageId of snapshot.changeSetStorageIds) {
|
|
1587
|
-
await this._blobStorageHelper.removeBlob(storageId);
|
|
1588
|
-
}
|
|
1589
|
-
}
|
|
1590
|
-
}
|
|
1591
|
-
}
|
|
1592
|
-
return this._blobStorageHelper.saveBlob(syncState);
|
|
1593
|
-
}
|
|
1594
|
-
/**
|
|
1595
|
-
* Get the remote sync state.
|
|
1596
|
-
* @param syncPointerId The id of the sync pointer to retrieve the state for.
|
|
1597
|
-
* @returns The remote sync state.
|
|
1598
|
-
*/
|
|
1599
|
-
async getSyncState(syncPointerId) {
|
|
1600
|
-
try {
|
|
1601
|
-
await this._loggingComponent?.log({
|
|
1602
|
-
level: "info",
|
|
1603
|
-
source: this.CLASS_NAME,
|
|
1604
|
-
message: "syncStateRetrieving",
|
|
1605
|
-
data: {
|
|
1606
|
-
syncPointerId
|
|
1607
|
-
}
|
|
1608
|
-
});
|
|
1609
|
-
const syncState = await this._blobStorageHelper.loadBlob(syncPointerId);
|
|
1610
|
-
if (core.Is.object(syncState)) {
|
|
1611
|
-
await this._loggingComponent?.log({
|
|
1612
|
-
level: "info",
|
|
1613
|
-
source: this.CLASS_NAME,
|
|
1614
|
-
message: "syncStateRetrieved",
|
|
1615
|
-
data: {
|
|
1616
|
-
syncPointerId,
|
|
1617
|
-
snapshotCount: syncState.snapshots.length
|
|
1618
|
-
}
|
|
1619
|
-
});
|
|
1620
|
-
return syncState;
|
|
1621
|
-
}
|
|
1622
|
-
}
|
|
1623
|
-
catch (error) {
|
|
1624
|
-
await this._loggingComponent?.log({
|
|
1625
|
-
level: "warn",
|
|
1626
|
-
source: this.CLASS_NAME,
|
|
1627
|
-
message: "getSyncStateError",
|
|
1628
|
-
data: {
|
|
1629
|
-
syncPointerId
|
|
1630
|
-
},
|
|
1631
|
-
error: core.BaseError.fromError(error)
|
|
1632
|
-
});
|
|
1633
|
-
}
|
|
1634
|
-
await this._loggingComponent?.log({
|
|
1635
|
-
level: "info",
|
|
1636
|
-
source: this.CLASS_NAME,
|
|
1637
|
-
message: "syncStateNotFound",
|
|
1638
|
-
data: {
|
|
1639
|
-
syncPointerId
|
|
1640
|
-
}
|
|
1641
|
-
});
|
|
1642
|
-
}
|
|
1643
|
-
/**
|
|
1644
|
-
* Handle the batch response which is triggered from a consolidation request.
|
|
1645
|
-
* @param response The batch response to handle.
|
|
1646
|
-
*/
|
|
1647
|
-
async handleBatchResponse(response) {
|
|
1648
|
-
if (core.Is.stringValue(this._nodeIdentity)) {
|
|
1649
|
-
const now = new Date(Date.now()).toISOString();
|
|
1650
|
-
// Create a new snapshot entry for the current batch
|
|
1651
|
-
const syncChangeSet = {
|
|
1652
|
-
id: core.Converter.bytesToHex(core.RandomHelper.generate(32)),
|
|
1653
|
-
dateCreated: now,
|
|
1654
|
-
dateModified: now,
|
|
1655
|
-
changes: response.entities.map(change => ({
|
|
1656
|
-
operation: synchronisedStorageModels.SyncChangeOperation.Set,
|
|
1657
|
-
id: change.id
|
|
1658
|
-
})),
|
|
1659
|
-
storageKey: response.storageKey,
|
|
1660
|
-
nodeIdentity: this._nodeIdentity
|
|
1661
|
-
};
|
|
1662
|
-
// And sign it with the node identity
|
|
1663
|
-
syncChangeSet.proof = await this._changeSetHelper.createChangeSetProof(syncChangeSet);
|
|
1664
|
-
// Store the changeset in the blob storage
|
|
1665
|
-
const changeSetStorageId = await this._changeSetHelper.storeChangeSet(syncChangeSet);
|
|
1666
|
-
// Add the changeset storage id to the snapshot ids
|
|
1667
|
-
this._batchResponseStorageIds[response.storageKey] ??= [];
|
|
1668
|
-
this._batchResponseStorageIds[response.storageKey].push(changeSetStorageId);
|
|
1669
|
-
// If this is the last entry in the batch response, we can create the consolidated snapshot
|
|
1670
|
-
if (response.lastEntry) {
|
|
1671
|
-
// Get the current sync pointer store
|
|
1672
|
-
const syncPointerStore = await this.getVerifiableSyncPointerStore();
|
|
1673
|
-
let syncState;
|
|
1674
|
-
if (core.Is.stringValue(syncPointerStore.syncPointers[response.storageKey])) {
|
|
1675
|
-
// If the sync pointer exists, we load the current sync state
|
|
1676
|
-
syncState = await this.getSyncState(syncPointerStore.syncPointers[response.storageKey]);
|
|
1677
|
-
}
|
|
1678
|
-
// If the sync state does not exist, we create a new one
|
|
1679
|
-
syncState ??= {
|
|
1680
|
-
version: SYNC_STATE_VERSION,
|
|
1681
|
-
storageKey: response.storageKey,
|
|
1682
|
-
snapshots: []
|
|
1683
|
-
};
|
|
1684
|
-
// Sort the snapshots so the newest snapshot is last in the array
|
|
1685
|
-
const sortedSnapshots = syncState.snapshots.sort((a, b) => a.dateCreated.localeCompare(b.dateCreated));
|
|
1686
|
-
const currentSnapshot = sortedSnapshots[sortedSnapshots.length - 1];
|
|
1687
|
-
const currentEpoch = currentSnapshot?.epoch ?? 0;
|
|
1688
|
-
const batchSnapshot = {
|
|
1689
|
-
version: SYNC_SNAPSHOT_VERSION,
|
|
1690
|
-
id: core.Converter.bytesToHex(core.RandomHelper.generate(32)),
|
|
1691
|
-
dateCreated: now,
|
|
1692
|
-
dateModified: now,
|
|
1693
|
-
isConsolidated: true,
|
|
1694
|
-
epoch: currentEpoch + 1,
|
|
1695
|
-
changeSetStorageIds: this._batchResponseStorageIds[response.storageKey]
|
|
1696
|
-
};
|
|
1697
|
-
syncState.snapshots.push(batchSnapshot);
|
|
1698
|
-
// Store the updated sync state
|
|
1699
|
-
const syncStateId = await this.storeRemoteSyncState(syncState);
|
|
1700
|
-
syncPointerStore.syncPointers[response.storageKey] = syncStateId;
|
|
1701
|
-
// Store the verifiable sync pointer in the verifiable storage
|
|
1702
|
-
await this.storeVerifiableSyncPointerStore(syncPointerStore);
|
|
1703
|
-
// Remove the batch response storage ids for the storage key
|
|
1704
|
-
// as we have consolidated the changes
|
|
1705
|
-
delete this._batchResponseStorageIds[response.storageKey];
|
|
1706
|
-
await this._loggingComponent?.log({
|
|
1707
|
-
level: "info",
|
|
1708
|
-
source: this.CLASS_NAME,
|
|
1709
|
-
message: "consolidationCompleted"
|
|
1710
|
-
});
|
|
1711
|
-
}
|
|
1712
|
-
}
|
|
1713
|
-
}
|
|
1714
|
-
/**
|
|
1715
|
-
* Handle the item response.
|
|
1716
|
-
* @param response The item response to handle.
|
|
1717
|
-
*/
|
|
1718
|
-
async handleLocalItemResponse(response) {
|
|
1719
|
-
await this._loggingComponent?.log({
|
|
1720
|
-
level: "info",
|
|
1721
|
-
source: this.CLASS_NAME,
|
|
1722
|
-
message: "createChangeSetRespondingItem",
|
|
1723
|
-
data: {
|
|
1724
|
-
storageKey: response.storageKey,
|
|
1725
|
-
id: response.id
|
|
1726
|
-
}
|
|
1727
|
-
});
|
|
1728
|
-
// We have received a response to an item request, find the right storage
|
|
1729
|
-
// for the request id
|
|
1730
|
-
if (!core.Is.empty(this._populateFullChanges[response.storageKey])) {
|
|
1731
|
-
const idx = this._populateFullChanges[response.storageKey].requestIds.indexOf(response.id);
|
|
1732
|
-
if (idx !== -1) {
|
|
1733
|
-
this._populateFullChanges[response.storageKey].requestIds.splice(idx, 1);
|
|
1734
|
-
this._populateFullChanges[response.storageKey].entities[response.id] = response.entity;
|
|
1735
|
-
// If there are no request ids remaining we can complete the population
|
|
1736
|
-
if (this._populateFullChanges[response.storageKey].requestIds.length === 0) {
|
|
1737
|
-
await this._populateFullChanges[response.storageKey].completeCallback();
|
|
1738
|
-
}
|
|
1739
|
-
}
|
|
1740
|
-
}
|
|
1741
|
-
}
|
|
1742
|
-
}
|
|
1743
|
-
|
|
1744
|
-
/**
|
|
1745
|
-
* Class for performing synchronised storage operations.
|
|
1746
|
-
*/
|
|
1747
|
-
class SynchronisedStorageService {
|
|
1748
|
-
/**
|
|
1749
|
-
* The default interval to check for entity updates.
|
|
1750
|
-
* @internal
|
|
1751
|
-
*/
|
|
1752
|
-
static _DEFAULT_ENTITY_UPDATE_INTERVAL_MINUTES = 5;
|
|
1753
|
-
/**
|
|
1754
|
-
* The default interval to perform consolidation.
|
|
1755
|
-
* @internal
|
|
1756
|
-
*/
|
|
1757
|
-
static _DEFAULT_CONSOLIDATION_INTERVAL_MINUTES = 60;
|
|
1758
|
-
/**
|
|
1759
|
-
* The default size of a consolidation batch.
|
|
1760
|
-
* @internal
|
|
1761
|
-
*/
|
|
1762
|
-
static _DEFAULT_CONSOLIDATION_BATCH_SIZE = 100;
|
|
1763
|
-
/**
|
|
1764
|
-
* The default max number of consolidations to keep in storage.
|
|
1765
|
-
* @internal
|
|
1766
|
-
*/
|
|
1767
|
-
static _DEFAULT_MAX_CONSOLIDATIONS = 5;
|
|
1768
|
-
/**
|
|
1769
|
-
* Runtime name for the class.
|
|
1770
|
-
*/
|
|
1771
|
-
CLASS_NAME = "SynchronisedStorageService";
|
|
1772
|
-
/**
|
|
1773
|
-
* The logging component to use for logging.
|
|
1774
|
-
* @internal
|
|
1775
|
-
*/
|
|
1776
|
-
_loggingComponent;
|
|
1777
|
-
/**
|
|
1778
|
-
* The event bus component.
|
|
1779
|
-
* @internal
|
|
1780
|
-
*/
|
|
1781
|
-
_eventBusComponent;
|
|
1782
|
-
/**
|
|
1783
|
-
* The vault connector.
|
|
1784
|
-
* @internal
|
|
1785
|
-
*/
|
|
1786
|
-
_vaultConnector;
|
|
1787
|
-
/**
|
|
1788
|
-
* The storage connector for the sync snapshot entries.
|
|
1789
|
-
* @internal
|
|
1790
|
-
*/
|
|
1791
|
-
_localSyncSnapshotEntryEntityStorage;
|
|
1792
|
-
/**
|
|
1793
|
-
* The blob storage connector to use for remote sync states.
|
|
1794
|
-
* @internal
|
|
1795
|
-
*/
|
|
1796
|
-
_blobStorageConnector;
|
|
1797
|
-
/**
|
|
1798
|
-
* The verifiable storage connector to use for storing sync pointers.
|
|
1799
|
-
* @internal
|
|
1800
|
-
*/
|
|
1801
|
-
_verifiableSyncPointerStorageConnector;
|
|
1802
|
-
/**
|
|
1803
|
-
* The identity connector to use for signing/verifying changesets.
|
|
1804
|
-
* @internal
|
|
1805
|
-
*/
|
|
1806
|
-
_identityConnector;
|
|
1807
|
-
/**
|
|
1808
|
-
* The task scheduler component.
|
|
1809
|
-
* @internal
|
|
1810
|
-
*/
|
|
1811
|
-
_taskSchedulerComponent;
|
|
1812
|
-
/**
|
|
1813
|
-
* The synchronised storage service to use when this is not a trusted node.
|
|
1814
|
-
* @internal
|
|
1815
|
-
*/
|
|
1816
|
-
_trustedSynchronisedStorageComponent;
|
|
1817
|
-
/**
|
|
1818
|
-
* The blob storage helper.
|
|
1819
|
-
* @internal
|
|
1820
|
-
*/
|
|
1821
|
-
_blobStorageHelper;
|
|
1822
|
-
/**
|
|
1823
|
-
* The change set helper.
|
|
1824
|
-
* @internal
|
|
1825
|
-
*/
|
|
1826
|
-
_changeSetHelper;
|
|
1827
|
-
/**
|
|
1828
|
-
* The local sync state helper to use for applying changesets.
|
|
1829
|
-
* @internal
|
|
1830
|
-
*/
|
|
1831
|
-
_localSyncStateHelper;
|
|
1832
|
-
/**
|
|
1833
|
-
* The remote sync state helper to use for applying changesets.
|
|
1834
|
-
* @internal
|
|
1835
|
-
*/
|
|
1836
|
-
_remoteSyncStateHelper;
|
|
1837
|
-
/**
|
|
1838
|
-
* The options for the connector.
|
|
1839
|
-
* @internal
|
|
1840
|
-
*/
|
|
1841
|
-
_config;
|
|
1842
|
-
/**
|
|
1843
|
-
* The synchronised storage key to use for the remote synchronised storage.
|
|
1844
|
-
* @internal
|
|
1845
|
-
*/
|
|
1846
|
-
_synchronisedStorageKey;
|
|
1847
|
-
/**
|
|
1848
|
-
* The flag to determine if the service has been started.
|
|
1849
|
-
* @internal
|
|
1850
|
-
*/
|
|
1851
|
-
_serviceStarted;
|
|
1852
|
-
/**
|
|
1853
|
-
* The active storage keys for the synchronised storage service.
|
|
1854
|
-
* @internal
|
|
1855
|
-
*/
|
|
1856
|
-
_activeStorageKeys;
|
|
1857
|
-
/**
|
|
1858
|
-
* The identity of the node this connector is running on.
|
|
1859
|
-
* @internal
|
|
1860
|
-
*/
|
|
1861
|
-
_nodeIdentity;
|
|
1862
|
-
/**
|
|
1863
|
-
* Create a new instance of SynchronisedStorageService.
|
|
1864
|
-
* @param options The options for the service.
|
|
1865
|
-
*/
|
|
1866
|
-
constructor(options) {
|
|
1867
|
-
core.Guards.object(this.CLASS_NAME, "options", options);
|
|
1868
|
-
core.Guards.object(this.CLASS_NAME, "options.config", options.config);
|
|
1869
|
-
this._eventBusComponent = core.ComponentFactory.get(options.eventBusComponentType ?? "event-bus");
|
|
1870
|
-
this._loggingComponent = core.ComponentFactory.getIfExists(options.loggingComponentType ?? "logging");
|
|
1871
|
-
this._vaultConnector = vaultModels.VaultConnectorFactory.get(options.vaultConnectorType ?? "vault");
|
|
1872
|
-
this._localSyncSnapshotEntryEntityStorage = entityStorageModels.EntityStorageConnectorFactory.get(options.syncSnapshotStorageConnectorType ?? "sync-snapshot-entry");
|
|
1873
|
-
this._verifiableSyncPointerStorageConnector = verifiableStorageModels.VerifiableStorageConnectorFactory.get(options.verifiableStorageConnectorType ?? "verifiable-storage");
|
|
1874
|
-
this._blobStorageConnector = blobStorageModels.BlobStorageConnectorFactory.get(options.blobStorageConnectorType ?? "blob-storage");
|
|
1875
|
-
this._identityConnector = identityModels.IdentityConnectorFactory.get(options.identityConnectorType ?? "identity");
|
|
1876
|
-
this._taskSchedulerComponent = core.ComponentFactory.get(options.taskSchedulerComponentType ?? "task-scheduler");
|
|
1877
|
-
// If this is empty we assume the local node has the rights to write to the verifiable storage.
|
|
1878
|
-
let isTrustedNode = true;
|
|
1879
|
-
if (!core.Is.empty(options.trustedSynchronisedStorageComponentType)) {
|
|
1880
|
-
isTrustedNode = false;
|
|
1881
|
-
// If it is set then we used the trusted component to send changesets to
|
|
1882
|
-
this._trustedSynchronisedStorageComponent =
|
|
1883
|
-
core.ComponentFactory.get(options.trustedSynchronisedStorageComponentType);
|
|
1884
|
-
}
|
|
1885
|
-
this._config = {
|
|
1886
|
-
synchronisedStorageMethodId: options.config.synchronisedStorageMethodId ?? "synchronised-storage-assertion",
|
|
1887
|
-
entityUpdateIntervalMinutes: options.config.entityUpdateIntervalMinutes ??
|
|
1888
|
-
SynchronisedStorageService._DEFAULT_ENTITY_UPDATE_INTERVAL_MINUTES,
|
|
1889
|
-
consolidationIntervalMinutes: options.config.consolidationIntervalMinutes ??
|
|
1890
|
-
SynchronisedStorageService._DEFAULT_CONSOLIDATION_INTERVAL_MINUTES,
|
|
1891
|
-
consolidationBatchSize: options.config.consolidationBatchSize ??
|
|
1892
|
-
SynchronisedStorageService._DEFAULT_CONSOLIDATION_BATCH_SIZE,
|
|
1893
|
-
maxConsolidations: options.config.maxConsolidations ?? SynchronisedStorageService._DEFAULT_MAX_CONSOLIDATIONS,
|
|
1894
|
-
blobStorageEncryptionKeyId: options.config.blobStorageEncryptionKeyId ?? "synchronised-storage-blob-encryption-key",
|
|
1895
|
-
verifiableStorageKeyId: options.config.verifiableStorageKeyId
|
|
1896
|
-
};
|
|
1897
|
-
this._synchronisedStorageKey =
|
|
1898
|
-
verifiableStorageKeys[options.config.verifiableStorageKeyId] ?? options.config.verifiableStorageKeyId;
|
|
1899
|
-
this._blobStorageHelper = new BlobStorageHelper(this._loggingComponent, this._vaultConnector, this._blobStorageConnector, this._config.blobStorageEncryptionKeyId, isTrustedNode);
|
|
1900
|
-
this._changeSetHelper = new ChangeSetHelper(this._loggingComponent, this._eventBusComponent, this._identityConnector, this._blobStorageHelper, this._config.synchronisedStorageMethodId);
|
|
1901
|
-
this._localSyncStateHelper = new LocalSyncStateHelper(this._loggingComponent, this._localSyncSnapshotEntryEntityStorage, this._changeSetHelper);
|
|
1902
|
-
this._remoteSyncStateHelper = new RemoteSyncStateHelper(this._loggingComponent, this._eventBusComponent, this._verifiableSyncPointerStorageConnector, this._blobStorageHelper, this._changeSetHelper, isTrustedNode, this._config.maxConsolidations);
|
|
1903
|
-
this._serviceStarted = false;
|
|
1904
|
-
this._activeStorageKeys = {};
|
|
1905
|
-
this._eventBusComponent.subscribe(synchronisedStorageModels.SynchronisedStorageTopics.RegisterStorageKey, async (event) => this.registerStorageKey(event.data));
|
|
1906
|
-
this._eventBusComponent.subscribe(synchronisedStorageModels.SynchronisedStorageTopics.LocalItemChange, async (event) => {
|
|
1907
|
-
// Make sure the change event is from this node
|
|
1908
|
-
if (core.Is.stringValue(this._nodeIdentity) && this._nodeIdentity === event.data.nodeIdentity) {
|
|
1909
|
-
await this._localSyncStateHelper.addLocalChange(event.data.storageKey, event.data.operation, event.data.id);
|
|
1910
|
-
}
|
|
1911
|
-
});
|
|
1912
|
-
}
|
|
1913
|
-
/**
|
|
1914
|
-
* The component needs to be started when the node is initialized.
|
|
1915
|
-
* @param nodeIdentity The identity of the node starting the component.
|
|
1916
|
-
* @param nodeLoggingConnectorType The node logging connector type, defaults to "node-logging".
|
|
1917
|
-
* @param componentState A persistent state which can be modified by the method.
|
|
1918
|
-
* @returns Nothing.
|
|
1919
|
-
*/
|
|
1920
|
-
async start(nodeIdentity, nodeLoggingConnectorType, componentState) {
|
|
1921
|
-
this._nodeIdentity = nodeIdentity;
|
|
1922
|
-
this._remoteSyncStateHelper.setNodeIdentity(nodeIdentity);
|
|
1923
|
-
this._changeSetHelper.setNodeIdentity(nodeIdentity);
|
|
1924
|
-
this._remoteSyncStateHelper.setSynchronisedStorageKey(this._synchronisedStorageKey);
|
|
1925
|
-
this._serviceStarted = true;
|
|
1926
|
-
// If this is not a trusted node we need to request the decryption key from a trusted node
|
|
1927
|
-
if (!core.Is.empty(this._trustedSynchronisedStorageComponent)) {
|
|
1928
|
-
const proof = await this._identityConnector.createProof(this._nodeIdentity, identityModels.DocumentHelper.joinId(this._nodeIdentity, this._config.synchronisedStorageMethodId), standardsW3cDid.ProofTypes.DataIntegrityProof, { nodeIdentity });
|
|
1929
|
-
const decryptionKey = await this._trustedSynchronisedStorageComponent.getDecryptionKey(this._nodeIdentity, proof);
|
|
1930
|
-
// We don't have the private key so instead we store the key as a secret in the vault
|
|
1931
|
-
await this._vaultConnector.setSecret(this._config.blobStorageEncryptionKeyId, decryptionKey);
|
|
1932
|
-
}
|
|
1933
|
-
// If there are already storage keys registered, we need to activate them
|
|
1934
|
-
for (const storageKey in this._activeStorageKeys) {
|
|
1935
|
-
await this.activateStorageKey(storageKey);
|
|
1936
|
-
}
|
|
1937
|
-
}
|
|
1938
|
-
/**
|
|
1939
|
-
* The component needs to be stopped when the node is closed.
|
|
1940
|
-
* @param nodeIdentity The identity of the node stopping the component.
|
|
1941
|
-
* @param nodeLoggingConnectorType The node logging connector type, defaults to "node-logging".
|
|
1942
|
-
* @param componentState A persistent state which can be modified by the method.
|
|
1943
|
-
* @returns Nothing.
|
|
1944
|
-
*/
|
|
1945
|
-
async stop(nodeIdentity, nodeLoggingConnectorType, componentState) {
|
|
1946
|
-
for (const storageKey in this._activeStorageKeys) {
|
|
1947
|
-
this._activeStorageKeys[storageKey] = false;
|
|
1948
|
-
this._taskSchedulerComponent.removeTask(`synchronised-storage-update-${storageKey}`);
|
|
1949
|
-
this._taskSchedulerComponent.removeTask(`synchronised-storage-consolidation-${storageKey}`);
|
|
1950
|
-
}
|
|
1951
|
-
}
|
|
1952
|
-
/**
|
|
1953
|
-
* Get the decryption key for the synchronised storage.
|
|
1954
|
-
* This is used to decrypt the data stored in the synchronised storage.
|
|
1955
|
-
* @param nodeIdentity The identity of the node requesting the decryption key.
|
|
1956
|
-
* @param proof The proof of the request so we know the request is from the specified node.
|
|
1957
|
-
* @returns The decryption key.
|
|
1958
|
-
*/
|
|
1959
|
-
async getDecryptionKey(nodeIdentity, proof) {
|
|
1960
|
-
if (!core.Is.empty(this._trustedSynchronisedStorageComponent)) {
|
|
1961
|
-
throw new core.GeneralError(this.CLASS_NAME, "notTrustedNode");
|
|
1962
|
-
}
|
|
1963
|
-
core.Guards.stringValue(this.CLASS_NAME, "nodeIdentity", nodeIdentity);
|
|
1964
|
-
core.Guards.object(this.CLASS_NAME, "proof", proof);
|
|
1965
|
-
const isValid = await this._identityConnector.verifyProof({ nodeIdentity }, proof);
|
|
1966
|
-
if (!isValid) {
|
|
1967
|
-
throw new core.UnauthorizedError(this.CLASS_NAME, "invalidProof");
|
|
1968
|
-
}
|
|
1969
|
-
// TODO: We need to check if the node has permissions to access the decryption key
|
|
1970
|
-
// using rights-management
|
|
1971
|
-
const key = await this._vaultConnector.getKey(this._config.blobStorageEncryptionKeyId);
|
|
1972
|
-
if (core.Is.undefined(key.publicKey)) {
|
|
1973
|
-
throw new core.UnauthorizedError(this.CLASS_NAME, "decryptionKeyNotFound");
|
|
1974
|
-
}
|
|
1975
|
-
return core.Converter.bytesToBase64(key.publicKey);
|
|
1976
|
-
}
|
|
1977
|
-
/**
|
|
1978
|
-
* Synchronise a set of changes from an untrusted node, assumes this is a trusted node.
|
|
1979
|
-
* @param syncChangeSet The change set to synchronise.
|
|
1980
|
-
* @returns Nothing.
|
|
1981
|
-
*/
|
|
1982
|
-
async syncChangeSet(syncChangeSet) {
|
|
1983
|
-
if (!core.Is.empty(this._trustedSynchronisedStorageComponent)) {
|
|
1984
|
-
throw new core.GeneralError(this.CLASS_NAME, "notTrustedNode");
|
|
1985
|
-
}
|
|
1986
|
-
core.Guards.object(this.CLASS_NAME, "syncChangeSet", syncChangeSet);
|
|
1987
|
-
await this._loggingComponent?.log({
|
|
1988
|
-
level: "info",
|
|
1989
|
-
source: this.CLASS_NAME,
|
|
1990
|
-
message: "syncChangeSetForRemoteNode",
|
|
1991
|
-
data: {
|
|
1992
|
-
changeSetStorageId: syncChangeSet.id
|
|
1993
|
-
}
|
|
1994
|
-
});
|
|
1995
|
-
// TODO: The change set has a proof signed by the originating node identity
|
|
1996
|
-
// The proof is verified that the change set is valid and has not been tampered with.
|
|
1997
|
-
// but we also need to check that the originating node has permissions
|
|
1998
|
-
// to store the change set in the synchronised storage.
|
|
1999
|
-
// This will be performed using rights-management
|
|
2000
|
-
const copy = await this._changeSetHelper.copyChangeset(syncChangeSet);
|
|
2001
|
-
if (!core.Is.empty(copy)) {
|
|
2002
|
-
// Apply the changes to this node
|
|
2003
|
-
await this._changeSetHelper.applyChangeset(copy.syncChangeSet);
|
|
2004
|
-
// And update the sync state with the latest changes
|
|
2005
|
-
await this._remoteSyncStateHelper.addChangeSetToSyncState(copy.syncChangeSet.storageKey, copy.changeSetStorageId);
|
|
2006
|
-
}
|
|
2007
|
-
}
|
|
2008
|
-
/**
|
|
2009
|
-
* Start the sync with further updates after an interval.
|
|
2010
|
-
* @param storageKey The storage key to sync.
|
|
2011
|
-
* @returns Nothing.
|
|
2012
|
-
* @internal
|
|
2013
|
-
*/
|
|
2014
|
-
async startEntitySync(storageKey) {
|
|
2015
|
-
try {
|
|
2016
|
-
await this._loggingComponent?.log({
|
|
2017
|
-
level: "info",
|
|
2018
|
-
source: this.CLASS_NAME,
|
|
2019
|
-
message: "startEntitySync",
|
|
2020
|
-
data: {
|
|
2021
|
-
storageKey
|
|
2022
|
-
}
|
|
2023
|
-
});
|
|
2024
|
-
// First we check for remote changes
|
|
2025
|
-
await this.updateFromRemoteSyncState(storageKey);
|
|
2026
|
-
// Now send any updates we have to the remote storage
|
|
2027
|
-
await this.updateFromLocalSyncState(storageKey);
|
|
2028
|
-
}
|
|
2029
|
-
catch (error) {
|
|
2030
|
-
await this._loggingComponent?.log({
|
|
2031
|
-
level: "error",
|
|
2032
|
-
source: this.CLASS_NAME,
|
|
2033
|
-
message: "entitySyncFailed",
|
|
2034
|
-
error: core.BaseError.fromError(error)
|
|
2035
|
-
});
|
|
2036
|
-
}
|
|
2037
|
-
}
|
|
2038
|
-
/**
|
|
2039
|
-
* Check for updates in the remote storage.
|
|
2040
|
-
* @param storageKey The storage key to check for updates.
|
|
2041
|
-
* @returns Nothing.
|
|
2042
|
-
* @internal
|
|
2043
|
-
*/
|
|
2044
|
-
async updateFromRemoteSyncState(storageKey) {
|
|
2045
|
-
await this._loggingComponent?.log({
|
|
2046
|
-
level: "info",
|
|
2047
|
-
source: this.CLASS_NAME,
|
|
2048
|
-
message: "updateFromRemoteSyncState",
|
|
2049
|
-
data: {
|
|
2050
|
-
storageKey
|
|
2051
|
-
}
|
|
2052
|
-
});
|
|
2053
|
-
// Get the verifiable sync pointer store from the verifiable storage
|
|
2054
|
-
const verifiableSyncPointerStore = await this._remoteSyncStateHelper.getVerifiableSyncPointerStore();
|
|
2055
|
-
if (!core.Is.empty(verifiableSyncPointerStore.syncPointers[storageKey])) {
|
|
2056
|
-
// Load the sync state from the remote blob storage using the sync pointer
|
|
2057
|
-
// to load the sync state
|
|
2058
|
-
const remoteSyncState = await this._remoteSyncStateHelper.getSyncState(verifiableSyncPointerStore.syncPointers[storageKey]);
|
|
2059
|
-
// If we got the sync state we can try and sync from it
|
|
2060
|
-
if (!core.Is.undefined(remoteSyncState)) {
|
|
2061
|
-
await this._localSyncStateHelper.applySyncState(storageKey, remoteSyncState);
|
|
2062
|
-
}
|
|
2063
|
-
}
|
|
2064
|
-
}
|
|
2065
|
-
/**
|
|
2066
|
-
* Find any local updates and send them to the remote storage.
|
|
2067
|
-
* @returns Nothing.
|
|
2068
|
-
* @internal
|
|
2069
|
-
*/
|
|
2070
|
-
async updateFromLocalSyncState(storageKey) {
|
|
2071
|
-
await this._loggingComponent?.log({
|
|
2072
|
-
level: "info",
|
|
2073
|
-
source: this.CLASS_NAME,
|
|
2074
|
-
message: "updateFromLocalSyncState",
|
|
2075
|
-
data: {
|
|
2076
|
-
storageKey
|
|
2077
|
-
}
|
|
2078
|
-
});
|
|
2079
|
-
const localChangeSnapshots = await this._localSyncStateHelper.getSnapshots(storageKey, true);
|
|
2080
|
-
if (localChangeSnapshots.length > 0) {
|
|
2081
|
-
const localChangeSnapshot = localChangeSnapshots[0];
|
|
2082
|
-
if (core.Is.arrayValue(localChangeSnapshot.changes)) {
|
|
2083
|
-
await this._remoteSyncStateHelper.buildChangeSet(storageKey, localChangeSnapshot.changes, async (syncChangeSet, changeSetStorageId) => {
|
|
2084
|
-
if (core.Is.empty(syncChangeSet) && core.Is.empty(changeSetStorageId)) {
|
|
2085
|
-
await this._loggingComponent?.log({
|
|
2086
|
-
level: "info",
|
|
2087
|
-
source: this.CLASS_NAME,
|
|
2088
|
-
message: "builtStorageChangeSetNone",
|
|
2089
|
-
data: {
|
|
2090
|
-
storageKey
|
|
2091
|
-
}
|
|
2092
|
-
});
|
|
2093
|
-
}
|
|
2094
|
-
else {
|
|
2095
|
-
await this._loggingComponent?.log({
|
|
2096
|
-
level: "info",
|
|
2097
|
-
source: this.CLASS_NAME,
|
|
2098
|
-
message: "builtStorageChangeSet",
|
|
2099
|
-
data: {
|
|
2100
|
-
storageKey,
|
|
2101
|
-
changeSetStorageId
|
|
2102
|
-
}
|
|
2103
|
-
});
|
|
2104
|
-
// Send the local changes to the remote storage if we are a trusted node
|
|
2105
|
-
if (core.Is.empty(this._trustedSynchronisedStorageComponent) &&
|
|
2106
|
-
core.Is.stringValue(changeSetStorageId)) {
|
|
2107
|
-
// If we are a trusted node, we can add the change set to the sync state
|
|
2108
|
-
// and remove the local change snapshot
|
|
2109
|
-
await this._remoteSyncStateHelper.addChangeSetToSyncState(storageKey, changeSetStorageId);
|
|
2110
|
-
await this._localSyncStateHelper.removeLocalChangeSnapshot(localChangeSnapshot);
|
|
2111
|
-
}
|
|
2112
|
-
else if (!core.Is.empty(this._trustedSynchronisedStorageComponent) &&
|
|
2113
|
-
core.Is.object(syncChangeSet)) {
|
|
2114
|
-
// If we are not a trusted node, we need to send the changes to the trusted node
|
|
2115
|
-
// and then remove the local change snapshot
|
|
2116
|
-
await this._loggingComponent?.log({
|
|
2117
|
-
level: "info",
|
|
2118
|
-
source: this.CLASS_NAME,
|
|
2119
|
-
message: "sendingChangeSetToTrustedNode",
|
|
2120
|
-
data: {
|
|
2121
|
-
storageKey,
|
|
2122
|
-
changeSetStorageId
|
|
2123
|
-
}
|
|
2124
|
-
});
|
|
2125
|
-
await this._trustedSynchronisedStorageComponent.syncChangeSet(syncChangeSet);
|
|
2126
|
-
await this._localSyncStateHelper.removeLocalChangeSnapshot(localChangeSnapshot);
|
|
2127
|
-
}
|
|
2128
|
-
}
|
|
2129
|
-
});
|
|
2130
|
-
}
|
|
2131
|
-
else {
|
|
2132
|
-
await this._loggingComponent?.log({
|
|
2133
|
-
level: "info",
|
|
2134
|
-
source: this.CLASS_NAME,
|
|
2135
|
-
message: "updateFromLocalSyncStateNoChanges",
|
|
2136
|
-
data: {
|
|
2137
|
-
storageKey
|
|
2138
|
-
}
|
|
2139
|
-
});
|
|
2140
|
-
}
|
|
2141
|
-
}
|
|
2142
|
-
}
|
|
2143
|
-
/**
|
|
2144
|
-
* Start the consolidation sync.
|
|
2145
|
-
* @param storageKey The storage key to consolidate.
|
|
2146
|
-
* @returns Nothing.
|
|
2147
|
-
* @internal
|
|
2148
|
-
*/
|
|
2149
|
-
async startConsolidationSync(storageKey) {
|
|
2150
|
-
try {
|
|
2151
|
-
// If we are going to perform a consolidation first take any local updates
|
|
2152
|
-
// we have and create a changeset from them, so that anybody applying
|
|
2153
|
-
// just changes since a consolidation can use the changeset
|
|
2154
|
-
// and skip the consolidation
|
|
2155
|
-
await this.updateFromLocalSyncState(storageKey);
|
|
2156
|
-
// Now start the consolidation
|
|
2157
|
-
await this._remoteSyncStateHelper.consolidationStart(storageKey, this._config.consolidationBatchSize ??
|
|
2158
|
-
SynchronisedStorageService._DEFAULT_CONSOLIDATION_BATCH_SIZE);
|
|
2159
|
-
}
|
|
2160
|
-
catch (error) {
|
|
2161
|
-
await this._loggingComponent?.log({
|
|
2162
|
-
level: "error",
|
|
2163
|
-
source: this.CLASS_NAME,
|
|
2164
|
-
message: "consolidationSyncFailed",
|
|
2165
|
-
error: core.BaseError.fromError(error)
|
|
2166
|
-
});
|
|
2167
|
-
}
|
|
2168
|
-
}
|
|
2169
|
-
/**
|
|
2170
|
-
* Register a new sync type.
|
|
2171
|
-
* @param syncRegisterStorageKey The sync register type to register.
|
|
2172
|
-
* @internal
|
|
2173
|
-
*/
|
|
2174
|
-
async registerStorageKey(syncRegisterStorageKey) {
|
|
2175
|
-
await this._loggingComponent?.log({
|
|
2176
|
-
level: "info",
|
|
2177
|
-
source: this.CLASS_NAME,
|
|
2178
|
-
message: "registerStorageKey",
|
|
2179
|
-
data: {
|
|
2180
|
-
storageKey: syncRegisterStorageKey.storageKey
|
|
2181
|
-
}
|
|
2182
|
-
});
|
|
2183
|
-
if (core.Is.empty(this._activeStorageKeys[syncRegisterStorageKey.storageKey])) {
|
|
2184
|
-
this._activeStorageKeys[syncRegisterStorageKey.storageKey] = false;
|
|
2185
|
-
if (this._serviceStarted) {
|
|
2186
|
-
await this.activateStorageKey(syncRegisterStorageKey.storageKey);
|
|
2187
|
-
}
|
|
2188
|
-
}
|
|
2189
|
-
}
|
|
2190
|
-
/**
|
|
2191
|
-
* Activate a storage key.
|
|
2192
|
-
* @param storageKey The storage key to activate.
|
|
2193
|
-
* @internal
|
|
2194
|
-
*/
|
|
2195
|
-
async activateStorageKey(storageKey) {
|
|
2196
|
-
if (!core.Is.empty(this._activeStorageKeys[storageKey]) && !this._activeStorageKeys[storageKey]) {
|
|
2197
|
-
await this._loggingComponent?.log({
|
|
2198
|
-
level: "info",
|
|
2199
|
-
source: this.CLASS_NAME,
|
|
2200
|
-
message: "activateStorageKey",
|
|
2201
|
-
data: {
|
|
2202
|
-
storageKey
|
|
2203
|
-
}
|
|
2204
|
-
});
|
|
2205
|
-
this._activeStorageKeys[storageKey] = true;
|
|
2206
|
-
if (this._config.entityUpdateIntervalMinutes > 0) {
|
|
2207
|
-
await this._taskSchedulerComponent.addTask(`synchronised-storage-update-${storageKey}`, [
|
|
2208
|
-
{
|
|
2209
|
-
nextTriggerTime: Date.now(),
|
|
2210
|
-
intervalMinutes: this._config.entityUpdateIntervalMinutes
|
|
2211
|
-
}
|
|
2212
|
-
], async () => this.startEntitySync(storageKey));
|
|
2213
|
-
}
|
|
2214
|
-
if (!core.Is.empty(this._trustedSynchronisedStorageComponent) &&
|
|
2215
|
-
this._config.consolidationIntervalMinutes > 0) {
|
|
2216
|
-
await this._taskSchedulerComponent.addTask(`synchronised-storage-consolidation-${storageKey}`, [
|
|
2217
|
-
{
|
|
2218
|
-
nextTriggerTime: Date.now(),
|
|
2219
|
-
intervalMinutes: this._config.consolidationIntervalMinutes
|
|
2220
|
-
}
|
|
2221
|
-
], async () => this.startConsolidationSync(storageKey));
|
|
2222
|
-
}
|
|
2223
|
-
}
|
|
2224
|
-
}
|
|
2225
|
-
}
|
|
2226
|
-
|
|
2227
|
-
exports.SynchronisedStorageService = SynchronisedStorageService;
|
|
2228
|
-
exports.generateRestRoutesSynchronisedStorage = generateRestRoutesSynchronisedStorage;
|
|
2229
|
-
exports.initSchema = initSchema;
|
|
2230
|
-
exports.restEntryPoints = restEntryPoints;
|
|
2231
|
-
exports.synchronisedStorageGetDecryptionKeyRequest = synchronisedStorageGetDecryptionKeyRequest;
|
|
2232
|
-
exports.synchronisedStorageSyncChangeSetRequest = synchronisedStorageSyncChangeSetRequest;
|
|
2233
|
-
exports.tagsSynchronisedStorage = tagsSynchronisedStorage;
|