@peerbit/shared-log 9.1.2 → 9.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/benchmark/get-samples.js +2 -3
- package/dist/benchmark/get-samples.js.map +1 -1
- package/dist/benchmark/index.js +4 -6
- package/dist/benchmark/index.js.map +1 -1
- package/dist/benchmark/memory/child.d.ts +2 -0
- package/dist/benchmark/memory/child.d.ts.map +1 -0
- package/dist/benchmark/memory/child.js +149 -0
- package/dist/benchmark/memory/child.js.map +1 -0
- package/dist/benchmark/memory/index.d.ts +2 -0
- package/dist/benchmark/memory/index.d.ts.map +1 -0
- package/dist/benchmark/memory/index.js +81 -0
- package/dist/benchmark/memory/index.js.map +1 -0
- package/dist/benchmark/memory/utils.d.ts +13 -0
- package/dist/benchmark/memory/utils.d.ts.map +1 -0
- package/dist/benchmark/memory/utils.js +2 -0
- package/dist/benchmark/memory/utils.js.map +1 -0
- package/dist/benchmark/replication-prune.js +27 -25
- package/dist/benchmark/replication-prune.js.map +1 -1
- package/dist/benchmark/replication.js +15 -16
- package/dist/benchmark/replication.js.map +1 -1
- package/dist/src/debounce.d.ts +25 -0
- package/dist/src/debounce.d.ts.map +1 -0
- package/dist/src/debounce.js +130 -0
- package/dist/src/debounce.js.map +1 -0
- package/dist/src/index.d.ts +55 -21
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/index.js +867 -390
- package/dist/src/index.js.map +1 -1
- package/dist/src/pid.d.ts.map +1 -1
- package/dist/src/pid.js +23 -21
- package/dist/src/pid.js.map +1 -1
- package/dist/src/ranges.d.ts +104 -8
- package/dist/src/ranges.d.ts.map +1 -1
- package/dist/src/ranges.js +518 -76
- package/dist/src/ranges.js.map +1 -1
- package/dist/src/replication-domain-hash.d.ts.map +1 -1
- package/dist/src/replication-domain-hash.js.map +1 -1
- package/dist/src/replication-domain-time.d.ts.map +1 -1
- package/dist/src/replication-domain-time.js.map +1 -1
- package/dist/src/replication-domain.d.ts +22 -2
- package/dist/src/replication-domain.d.ts.map +1 -1
- package/dist/src/replication-domain.js +33 -0
- package/dist/src/replication-domain.js.map +1 -1
- package/dist/src/replication.d.ts +1 -55
- package/dist/src/replication.d.ts.map +1 -1
- package/dist/src/replication.js +5 -215
- package/dist/src/replication.js.map +1 -1
- package/dist/src/role.d.ts +1 -0
- package/dist/src/role.d.ts.map +1 -1
- package/dist/src/role.js +1 -0
- package/dist/src/role.js.map +1 -1
- package/dist/src/utils.d.ts +6 -0
- package/dist/src/utils.d.ts.map +1 -0
- package/dist/src/utils.js +39 -0
- package/dist/src/utils.js.map +1 -0
- package/package.json +5 -5
- package/src/debounce.ts +172 -0
- package/src/index.ts +1282 -562
- package/src/pid.ts +27 -25
- package/src/ranges.ts +794 -181
- package/src/replication-domain-hash.ts +3 -1
- package/src/replication-domain-time.ts +2 -1
- package/src/replication-domain.ts +68 -5
- package/src/replication.ts +9 -235
- package/src/role.ts +1 -0
- package/src/utils.ts +49 -0
package/dist/src/index.js
CHANGED
|
@@ -8,36 +8,39 @@ var __metadata = (this && this.__metadata) || function (k, v) {
|
|
|
8
8
|
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
|
|
9
9
|
};
|
|
10
10
|
import { BorshError, field, variant } from "@dao-xyz/borsh";
|
|
11
|
-
import { CustomEvent } from "@libp2p/interface";
|
|
12
11
|
import { AnyBlockStore, RemoteBlocks } from "@peerbit/blocks";
|
|
13
12
|
import { Cache } from "@peerbit/cache";
|
|
14
13
|
import { AccessError, PublicSignKey, sha256Base64Sync, sha256Sync, } from "@peerbit/crypto";
|
|
15
|
-
import { And, ByteMatchQuery,
|
|
14
|
+
import { And, ByteMatchQuery, Or, Sort, StringMatch, } from "@peerbit/indexer-interface";
|
|
16
15
|
import { Entry, Log, ShallowEntry, } from "@peerbit/log";
|
|
17
16
|
import { logger as loggerFn } from "@peerbit/logger";
|
|
18
17
|
import { ClosedError, Program } from "@peerbit/program";
|
|
19
18
|
import { SubscriptionEvent, UnsubcriptionEvent, } from "@peerbit/pubsub-interface";
|
|
20
19
|
import { RPC } from "@peerbit/rpc";
|
|
21
20
|
import { AcknowledgeDelivery, DeliveryMode, NotStartedError, SilentDelivery, } from "@peerbit/stream-interface";
|
|
22
|
-
import { AbortError,
|
|
23
|
-
|
|
21
|
+
import { AbortError,
|
|
22
|
+
/* delay, */
|
|
23
|
+
waitFor, } from "@peerbit/time";
|
|
24
24
|
import pDefer, {} from "p-defer";
|
|
25
25
|
import PQueue from "p-queue";
|
|
26
26
|
import { concat } from "uint8arrays";
|
|
27
27
|
import { BlocksMessage } from "./blocks.js";
|
|
28
28
|
import { CPUUsageIntervalLag } from "./cpu.js";
|
|
29
|
+
import { debounceAcculmulator, debounceFixedInterval, debouncedAccumulatorMap, } from "./debounce.js";
|
|
29
30
|
import { EntryWithRefs, ExchangeHeadsMessage, RequestIPrune, RequestMaybeSync, ResponseIPrune, ResponseMaybeSync, createExchangeHeadsMessages, } from "./exchange-heads.js";
|
|
30
31
|
import { TransportMessage } from "./message.js";
|
|
31
32
|
import { PIDReplicationController } from "./pid.js";
|
|
32
|
-
import { getCoverSet, getSamples, hasCoveringRange, isMatured, minimumWidthToCover, } from "./ranges.js";
|
|
33
|
+
import { EntryReplicated, ReplicationIntent, ReplicationRange, ReplicationRangeIndexable, getCoverSet, getEvenlySpacedU32, getSamples, hasCoveringRange, isMatured, minimumWidthToCover, shouldAssigneToRangeBoundary, toRebalance, } from "./ranges.js";
|
|
33
34
|
import { createReplicationDomainHash, hashToU32, } from "./replication-domain-hash.js";
|
|
34
35
|
import { createReplicationDomainTime, } from "./replication-domain-time.js";
|
|
35
|
-
import {} from "./replication-domain.js";
|
|
36
|
-
import { AbsoluteReplicas, AddedReplicationSegmentMessage, AllReplicatingSegmentsMessage, ReplicationError,
|
|
36
|
+
import { debounceAggregationChanges, mergeReplicationChanges, } from "./replication-domain.js";
|
|
37
|
+
import { AbsoluteReplicas, AddedReplicationSegmentMessage, AllReplicatingSegmentsMessage, ReplicationError, RequestReplicationInfoMessage, ResponseRoleMessage, StoppedReplicating, decodeReplicas, encodeReplicas, maxReplicas, } from "./replication.js";
|
|
37
38
|
import { MAX_U32, Observer, Replicator, scaleToU32 } from "./role.js";
|
|
39
|
+
import { groupByGid } from "./utils.js";
|
|
38
40
|
export { createReplicationDomainHash, createReplicationDomainTime, };
|
|
39
41
|
export { CPUUsageIntervalLag };
|
|
40
42
|
export * from "./replication.js";
|
|
43
|
+
export { EntryReplicated };
|
|
41
44
|
export const logger = loggerFn({ module: "shared-log" });
|
|
42
45
|
const getLatestEntry = (entries) => {
|
|
43
46
|
let latest = undefined;
|
|
@@ -50,23 +53,6 @@ const getLatestEntry = (entries) => {
|
|
|
50
53
|
}
|
|
51
54
|
return latest;
|
|
52
55
|
};
|
|
53
|
-
const groupByGid = async (entries) => {
|
|
54
|
-
const groupByGid = new Map();
|
|
55
|
-
for (const head of entries) {
|
|
56
|
-
const gid = await (head instanceof Entry
|
|
57
|
-
? (await head.getMeta()).gid
|
|
58
|
-
: head instanceof ShallowEntry
|
|
59
|
-
? head.meta.gid
|
|
60
|
-
: (await head.entry.getMeta()).gid);
|
|
61
|
-
let value = groupByGid.get(gid);
|
|
62
|
-
if (!value) {
|
|
63
|
-
value = [];
|
|
64
|
-
groupByGid.set(gid, value);
|
|
65
|
-
}
|
|
66
|
-
value.push(head);
|
|
67
|
-
}
|
|
68
|
-
return groupByGid;
|
|
69
|
-
};
|
|
70
56
|
const isAdaptiveReplicatorOption = (options) => {
|
|
71
57
|
if (typeof options === "number") {
|
|
72
58
|
return false;
|
|
@@ -86,11 +72,35 @@ const isUnreplicationOptions = (options) => options === false ||
|
|
|
86
72
|
options === 0 ||
|
|
87
73
|
(options?.offset === undefined &&
|
|
88
74
|
options?.factor === 0);
|
|
75
|
+
const isReplicationOptionsDependentOnPreviousState = (options) => {
|
|
76
|
+
if (options === true) {
|
|
77
|
+
return true;
|
|
78
|
+
}
|
|
79
|
+
if (options == null) {
|
|
80
|
+
// when not providing options, we assume previous behaviour
|
|
81
|
+
return true;
|
|
82
|
+
}
|
|
83
|
+
// if empty object but with no keys
|
|
84
|
+
if (typeof options === "object" && Object.keys(options).length === 0) {
|
|
85
|
+
return true;
|
|
86
|
+
}
|
|
87
|
+
return false;
|
|
88
|
+
};
|
|
89
89
|
export const DEFAULT_MIN_REPLICAS = 2;
|
|
90
90
|
export const WAIT_FOR_REPLICATOR_TIMEOUT = 9000;
|
|
91
91
|
export const WAIT_FOR_ROLE_MATURITY = 5000;
|
|
92
|
-
const
|
|
92
|
+
const PRUNE_DEBOUNCE_INTERVAL = 500;
|
|
93
|
+
// DONT SET THIS ANY LOWER, because it will make the pid controller unstable as the system responses are not fast enough to updates from the pid controller
|
|
94
|
+
const RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL = 1000;
|
|
93
95
|
const DEFAULT_DISTRIBUTION_DEBOUNCE_TIME = 500;
|
|
96
|
+
const getIdForDynamicRange = (publicKey) => {
|
|
97
|
+
return sha256Sync(concat([publicKey.bytes, new TextEncoder().encode("dynamic")]));
|
|
98
|
+
};
|
|
99
|
+
const checkMinReplicasLimit = (minReplicas) => {
|
|
100
|
+
if (minReplicas > 100) {
|
|
101
|
+
throw new Error("Higher replication degree than 100 is not recommended for performance reasons");
|
|
102
|
+
}
|
|
103
|
+
};
|
|
94
104
|
let SharedLog = class SharedLog extends Program {
|
|
95
105
|
log;
|
|
96
106
|
rpc;
|
|
@@ -98,6 +108,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
98
108
|
_isReplicating;
|
|
99
109
|
_isAdaptiveReplicating;
|
|
100
110
|
_replicationRangeIndex;
|
|
111
|
+
_entryCoordinatesIndex;
|
|
101
112
|
/* private _totalParticipation!: number; */
|
|
102
113
|
_gidPeersHistory;
|
|
103
114
|
_onSubscriptionFn;
|
|
@@ -109,6 +120,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
109
120
|
_respondToIHaveTimeout;
|
|
110
121
|
_pendingDeletes;
|
|
111
122
|
_pendingIHave;
|
|
123
|
+
pendingMaturity; // map of peerId to timeout
|
|
112
124
|
latestReplicationInfoMessage;
|
|
113
125
|
remoteBlocks;
|
|
114
126
|
openTime;
|
|
@@ -116,8 +128,11 @@ let SharedLog = class SharedLog extends Program {
|
|
|
116
128
|
sync;
|
|
117
129
|
// A fn that we can call many times that recalculates the participation role
|
|
118
130
|
rebalanceParticipationDebounced;
|
|
131
|
+
// A fn for debouncing the calls for pruning
|
|
132
|
+
pruneDebouncedFn;
|
|
133
|
+
responseToPruneDebouncedFn;
|
|
134
|
+
replicationChangeDebounceFn;
|
|
119
135
|
// regular distribution checks
|
|
120
|
-
distributeInterval;
|
|
121
136
|
distributeQueue;
|
|
122
137
|
// Syncing and dedeplucation work
|
|
123
138
|
syncMoreInterval;
|
|
@@ -134,7 +149,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
134
149
|
replicationController;
|
|
135
150
|
history;
|
|
136
151
|
domain;
|
|
137
|
-
|
|
152
|
+
interval;
|
|
138
153
|
constructor(properties) {
|
|
139
154
|
super();
|
|
140
155
|
this.log = new Log(properties);
|
|
@@ -180,21 +195,29 @@ let SharedLog = class SharedLog extends Program {
|
|
|
180
195
|
return this._totalParticipation;
|
|
181
196
|
} */
|
|
182
197
|
async calculateTotalParticipation() {
|
|
183
|
-
const sum = await this.replicationIndex.sum(
|
|
198
|
+
const sum = await this.replicationIndex.sum({ key: "width" });
|
|
184
199
|
return Number(sum) / MAX_U32;
|
|
185
200
|
}
|
|
186
201
|
async countReplicationSegments() {
|
|
187
|
-
const count = await this.replicationIndex.count(
|
|
202
|
+
const count = await this.replicationIndex.count({
|
|
188
203
|
query: new StringMatch({
|
|
189
204
|
key: "hash",
|
|
190
205
|
value: this.node.identity.publicKey.hashcode(),
|
|
191
206
|
}),
|
|
192
|
-
})
|
|
207
|
+
});
|
|
193
208
|
return count;
|
|
194
209
|
}
|
|
195
|
-
setupRebalanceDebounceFunction() {
|
|
210
|
+
setupRebalanceDebounceFunction(interval = RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL) {
|
|
196
211
|
this.rebalanceParticipationDebounced = undefined;
|
|
197
|
-
|
|
212
|
+
// make the rebalancing to respect warmup time
|
|
213
|
+
let intervalTime = interval * 2;
|
|
214
|
+
let timeout = setTimeout(() => {
|
|
215
|
+
intervalTime = interval;
|
|
216
|
+
}, this.timeUntilRoleMaturity);
|
|
217
|
+
this._closeController.signal.addEventListener("abort", () => {
|
|
218
|
+
clearTimeout(timeout);
|
|
219
|
+
});
|
|
220
|
+
this.rebalanceParticipationDebounced = debounceFixedInterval(() => this.rebalanceParticipation(),
|
|
198
221
|
/* Math.max(
|
|
199
222
|
REBALANCE_DEBOUNCE_INTERVAL,
|
|
200
223
|
Math.log(
|
|
@@ -202,7 +225,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
202
225
|
REBALANCE_DEBOUNCE_INTERVAL
|
|
203
226
|
)
|
|
204
227
|
) */
|
|
205
|
-
|
|
228
|
+
() => intervalTime);
|
|
206
229
|
}
|
|
207
230
|
async _replicate(options, { reset, checkDuplicates, announce, } = {}) {
|
|
208
231
|
let offsetWasProvided = false;
|
|
@@ -261,6 +284,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
261
284
|
let factor = rangeArg.factor;
|
|
262
285
|
let width = normalized ? 1 : scaleToU32(1);
|
|
263
286
|
ranges.push(new ReplicationRangeIndexable({
|
|
287
|
+
id: rangeArg.id,
|
|
264
288
|
normalized,
|
|
265
289
|
offset: offset,
|
|
266
290
|
length: typeof factor === "number"
|
|
@@ -291,6 +315,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
291
315
|
checkDuplicates,
|
|
292
316
|
announce,
|
|
293
317
|
});
|
|
318
|
+
return ranges;
|
|
294
319
|
}
|
|
295
320
|
}
|
|
296
321
|
setupDebouncedRebalancing(options) {
|
|
@@ -312,7 +337,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
312
337
|
? options?.limits?.cpu?.monitor || new CPUUsageIntervalLag()
|
|
313
338
|
: new CPUUsageIntervalLag();
|
|
314
339
|
this.cpuUsage?.start?.();
|
|
315
|
-
this.setupRebalanceDebounceFunction();
|
|
340
|
+
this.setupRebalanceDebounceFunction(options?.limits?.interval);
|
|
316
341
|
}
|
|
317
342
|
async replicate(rangeOrEntry, options) {
|
|
318
343
|
let range = undefined;
|
|
@@ -345,10 +370,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
345
370
|
else {
|
|
346
371
|
range = rangeOrEntry ?? true;
|
|
347
372
|
}
|
|
348
|
-
|
|
349
|
-
// assume new role
|
|
350
|
-
await this.distribute();
|
|
351
|
-
return newRanges;
|
|
373
|
+
return this._replicate(range, options);
|
|
352
374
|
}
|
|
353
375
|
async unreplicate(rangeOrEntry) {
|
|
354
376
|
let range;
|
|
@@ -372,13 +394,13 @@ let SharedLog = class SharedLog extends Program {
|
|
|
372
394
|
// TODO support this by never deleting the range with the segment id that is generated by the dynamic replication method
|
|
373
395
|
throw new Error("Unsupported when adaptive replicating");
|
|
374
396
|
}
|
|
375
|
-
const indexed =
|
|
397
|
+
const indexed = this.replicationIndex.iterate({
|
|
376
398
|
query: {
|
|
377
399
|
width: 1,
|
|
378
400
|
start1: range.offset,
|
|
379
401
|
},
|
|
380
|
-
})
|
|
381
|
-
const segmentIds = indexed.
|
|
402
|
+
});
|
|
403
|
+
const segmentIds = (await indexed.all()).map((x) => x.id.key);
|
|
382
404
|
await this.removeReplicationRange(segmentIds, this.node.identity.publicKey);
|
|
383
405
|
await this.rpc.send(new StoppedReplicating({ segmentIds }), {
|
|
384
406
|
priority: 1,
|
|
@@ -386,7 +408,12 @@ let SharedLog = class SharedLog extends Program {
|
|
|
386
408
|
}
|
|
387
409
|
async removeReplicator(key) {
|
|
388
410
|
const fn = async () => {
|
|
389
|
-
await this.replicationIndex
|
|
411
|
+
const deleted = await this.replicationIndex
|
|
412
|
+
.iterate({
|
|
413
|
+
query: { hash: key.hashcode() },
|
|
414
|
+
})
|
|
415
|
+
.all();
|
|
416
|
+
await this.replicationIndex.del({ query: { hash: key.hashcode() } });
|
|
390
417
|
await this.updateOldestTimestampFromIndex();
|
|
391
418
|
if (this.node.identity.publicKey.equals(key)) {
|
|
392
419
|
// announce that we are no longer replicating
|
|
@@ -395,32 +422,54 @@ let SharedLog = class SharedLog extends Program {
|
|
|
395
422
|
this.events.dispatchEvent(new CustomEvent("replication:change", {
|
|
396
423
|
detail: { publicKey: key },
|
|
397
424
|
}));
|
|
425
|
+
deleted.forEach((x) => {
|
|
426
|
+
return this.replicationChangeDebounceFn.add({
|
|
427
|
+
range: x.value,
|
|
428
|
+
type: "removed",
|
|
429
|
+
});
|
|
430
|
+
});
|
|
431
|
+
const pendingMaturity = this.pendingMaturity.get(key.hashcode());
|
|
432
|
+
if (pendingMaturity) {
|
|
433
|
+
clearTimeout(pendingMaturity.timeout);
|
|
434
|
+
this.pendingMaturity.delete(key.hashcode());
|
|
435
|
+
}
|
|
398
436
|
if (!key.equals(this.node.identity.publicKey)) {
|
|
399
437
|
this.rebalanceParticipationDebounced?.();
|
|
400
438
|
}
|
|
401
439
|
};
|
|
402
|
-
return
|
|
440
|
+
return fn();
|
|
403
441
|
}
|
|
404
442
|
async updateOldestTimestampFromIndex() {
|
|
405
|
-
const
|
|
406
|
-
fetch: 1,
|
|
443
|
+
const iterator = await this.replicationIndex.iterate({
|
|
407
444
|
sort: [new Sort({ key: "timestamp", direction: "asc" })],
|
|
408
|
-
}
|
|
445
|
+
}, { reference: true });
|
|
446
|
+
const oldestTimestampFromDB = (await iterator.next(1))[0]?.value.timestamp;
|
|
447
|
+
await iterator.close();
|
|
409
448
|
this.oldestOpenTime =
|
|
410
449
|
oldestTimestampFromDB != null
|
|
411
450
|
? Number(oldestTimestampFromDB)
|
|
412
451
|
: +new Date();
|
|
413
452
|
}
|
|
414
|
-
async removeReplicationRange(
|
|
453
|
+
async removeReplicationRange(ids, from) {
|
|
415
454
|
const fn = async () => {
|
|
416
|
-
let idMatcher = new Or(
|
|
455
|
+
let idMatcher = new Or(ids.map((x) => new ByteMatchQuery({ key: "id", value: x })));
|
|
417
456
|
// make sure we are not removing something that is owned by the replicator
|
|
418
457
|
let identityMatcher = new StringMatch({
|
|
419
458
|
key: "hash",
|
|
420
459
|
value: from.hashcode(),
|
|
421
460
|
});
|
|
422
461
|
let query = new And([idMatcher, identityMatcher]);
|
|
423
|
-
|
|
462
|
+
const pendingMaturity = this.pendingMaturity.get(from.hashcode());
|
|
463
|
+
if (pendingMaturity) {
|
|
464
|
+
for (const id of ids) {
|
|
465
|
+
pendingMaturity.ranges.delete(id.toString());
|
|
466
|
+
}
|
|
467
|
+
if (pendingMaturity.ranges.size === 0) {
|
|
468
|
+
clearTimeout(pendingMaturity.timeout);
|
|
469
|
+
this.pendingMaturity.delete(from.hashcode());
|
|
470
|
+
}
|
|
471
|
+
}
|
|
472
|
+
await this.replicationIndex.del({ query });
|
|
424
473
|
await this.updateOldestTimestampFromIndex();
|
|
425
474
|
this.events.dispatchEvent(new CustomEvent("replication:change", {
|
|
426
475
|
detail: { publicKey: from },
|
|
@@ -429,35 +478,137 @@ let SharedLog = class SharedLog extends Program {
|
|
|
429
478
|
this.rebalanceParticipationDebounced?.();
|
|
430
479
|
}
|
|
431
480
|
};
|
|
432
|
-
return
|
|
481
|
+
return fn();
|
|
433
482
|
}
|
|
434
483
|
async addReplicationRange(ranges, from, { reset, checkDuplicates, } = {}) {
|
|
435
484
|
const fn = async () => {
|
|
436
485
|
if (this._isTrustedReplicator &&
|
|
437
486
|
!(await this._isTrustedReplicator(from))) {
|
|
438
|
-
return
|
|
487
|
+
return undefined;
|
|
439
488
|
}
|
|
440
|
-
let
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
const isNewReplicator = prevCount === 0;
|
|
489
|
+
let isNewReplicator = false;
|
|
490
|
+
let diffs;
|
|
491
|
+
let deleted = undefined;
|
|
444
492
|
if (reset) {
|
|
445
|
-
await this.replicationIndex
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
493
|
+
deleted = (await this.replicationIndex
|
|
494
|
+
.iterate({
|
|
495
|
+
query: { hash: from.hashcode() },
|
|
496
|
+
})
|
|
497
|
+
.all()).map((x) => x.value);
|
|
498
|
+
await this.replicationIndex.del({ query: { hash: from.hashcode() } });
|
|
499
|
+
diffs = [
|
|
500
|
+
...deleted.map((x) => {
|
|
501
|
+
return { range: x, type: "removed" };
|
|
502
|
+
}),
|
|
503
|
+
...ranges.map((x) => {
|
|
504
|
+
return { range: x, type: "added" };
|
|
505
|
+
}),
|
|
506
|
+
];
|
|
507
|
+
let prevCount = await this.replicationIndex.count({
|
|
508
|
+
query: new StringMatch({ key: "hash", value: from.hashcode() }),
|
|
509
|
+
});
|
|
510
|
+
isNewReplicator = prevCount === 0;
|
|
511
|
+
}
|
|
512
|
+
else {
|
|
513
|
+
let existing = await this.replicationIndex
|
|
514
|
+
.iterate({
|
|
515
|
+
query: ranges.map((x) => new ByteMatchQuery({ key: "id", value: x.id })),
|
|
516
|
+
}, { reference: true })
|
|
517
|
+
.all();
|
|
518
|
+
if (existing.length === 0) {
|
|
519
|
+
let prevCount = await this.replicationIndex.count({
|
|
520
|
+
query: new StringMatch({ key: "hash", value: from.hashcode() }),
|
|
521
|
+
});
|
|
522
|
+
isNewReplicator = prevCount === 0;
|
|
523
|
+
}
|
|
524
|
+
else {
|
|
525
|
+
isNewReplicator = false;
|
|
526
|
+
}
|
|
527
|
+
if (checkDuplicates) {
|
|
528
|
+
let deduplicated = [];
|
|
529
|
+
// TODO also deduplicate/de-overlap among the ranges that ought to be inserted?
|
|
530
|
+
for (const range of ranges) {
|
|
531
|
+
if (!(await hasCoveringRange(this.replicationIndex, range))) {
|
|
532
|
+
deduplicated.push(range);
|
|
533
|
+
}
|
|
453
534
|
}
|
|
535
|
+
ranges = deduplicated;
|
|
454
536
|
}
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
537
|
+
let existingMap = new Map();
|
|
538
|
+
for (const result of existing) {
|
|
539
|
+
existingMap.set(result.value.idString, result.value);
|
|
540
|
+
}
|
|
541
|
+
let changes = ranges
|
|
542
|
+
.map((x) => {
|
|
543
|
+
const prev = existingMap.get(x.idString);
|
|
544
|
+
if (prev) {
|
|
545
|
+
if (prev.equalRange(x)) {
|
|
546
|
+
return undefined;
|
|
547
|
+
}
|
|
548
|
+
return { range: x, prev, type: "updated" };
|
|
549
|
+
}
|
|
550
|
+
else {
|
|
551
|
+
return { range: x, type: "added" };
|
|
552
|
+
}
|
|
553
|
+
})
|
|
554
|
+
.filter((x) => x != null);
|
|
555
|
+
diffs = changes;
|
|
556
|
+
}
|
|
557
|
+
let now = +new Date();
|
|
558
|
+
let minRoleAge = await this.getDefaultMinRoleAge();
|
|
559
|
+
let isAllMature = true;
|
|
560
|
+
for (const diff of diffs) {
|
|
561
|
+
if (diff.type === "added" || diff.type === "updated") {
|
|
562
|
+
await this.replicationIndex.put(diff.range);
|
|
563
|
+
if (!reset) {
|
|
564
|
+
this.oldestOpenTime = Math.min(Number(diff.range.timestamp), this.oldestOpenTime);
|
|
565
|
+
}
|
|
566
|
+
const isMature = isMatured(diff.range, now, minRoleAge);
|
|
567
|
+
if (!isMature /* && diff.range.hash !== this.node.identity.publicKey.hashcode() */) {
|
|
568
|
+
// second condition is to avoid the case where we are adding a range that we own
|
|
569
|
+
isAllMature = false;
|
|
570
|
+
let prevPendingMaturity = this.pendingMaturity.get(diff.range.hash);
|
|
571
|
+
let map;
|
|
572
|
+
let waitForMaturityTime = Math.max(minRoleAge - (now - Number(diff.range.timestamp)), 0);
|
|
573
|
+
if (prevPendingMaturity) {
|
|
574
|
+
map = prevPendingMaturity.ranges;
|
|
575
|
+
if (prevPendingMaturity.timestamp < diff.range.timestamp) {
|
|
576
|
+
// something has changed so we need to reset the timeout
|
|
577
|
+
clearTimeout(prevPendingMaturity.timeout);
|
|
578
|
+
prevPendingMaturity.timestamp = diff.range.timestamp;
|
|
579
|
+
prevPendingMaturity.timeout = setTimeout(() => {
|
|
580
|
+
this.events.dispatchEvent(new CustomEvent("replicator:mature", {
|
|
581
|
+
detail: { publicKey: from },
|
|
582
|
+
}));
|
|
583
|
+
for (const value of map.values()) {
|
|
584
|
+
this.replicationChangeDebounceFn.add(value); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable!
|
|
585
|
+
}
|
|
586
|
+
}, waitForMaturityTime);
|
|
587
|
+
}
|
|
588
|
+
}
|
|
589
|
+
else {
|
|
590
|
+
map = new Map();
|
|
591
|
+
this.pendingMaturity.set(diff.range.hash, {
|
|
592
|
+
timestamp: diff.range.timestamp,
|
|
593
|
+
ranges: map,
|
|
594
|
+
timeout: setTimeout(() => {
|
|
595
|
+
this.events.dispatchEvent(new CustomEvent("replicator:mature", {
|
|
596
|
+
detail: { publicKey: from },
|
|
597
|
+
}));
|
|
598
|
+
for (const value of map.values()) {
|
|
599
|
+
this.replicationChangeDebounceFn.add(value); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable!
|
|
600
|
+
}
|
|
601
|
+
}, waitForMaturityTime),
|
|
602
|
+
});
|
|
603
|
+
}
|
|
604
|
+
map.set(diff.range.idString, diff);
|
|
605
|
+
}
|
|
606
|
+
}
|
|
607
|
+
else {
|
|
608
|
+
const prev = this.pendingMaturity.get(diff.range.hash);
|
|
609
|
+
if (prev) {
|
|
610
|
+
prev.ranges.delete(diff.range.idString);
|
|
611
|
+
}
|
|
461
612
|
}
|
|
462
613
|
}
|
|
463
614
|
if (reset) {
|
|
@@ -470,23 +621,33 @@ let SharedLog = class SharedLog extends Program {
|
|
|
470
621
|
this.events.dispatchEvent(new CustomEvent("replicator:join", {
|
|
471
622
|
detail: { publicKey: from },
|
|
472
623
|
}));
|
|
624
|
+
if (isAllMature) {
|
|
625
|
+
this.events.dispatchEvent(new CustomEvent("replicator:mature", {
|
|
626
|
+
detail: { publicKey: from },
|
|
627
|
+
}));
|
|
628
|
+
}
|
|
473
629
|
}
|
|
630
|
+
if (ranges.length === 0 && deleted?.length === 0) {
|
|
631
|
+
throw new Error("Unexpected");
|
|
632
|
+
}
|
|
633
|
+
diffs.length > 0 &&
|
|
634
|
+
diffs.map((x) => this.replicationChangeDebounceFn.add(x));
|
|
474
635
|
if (!from.equals(this.node.identity.publicKey)) {
|
|
475
636
|
this.rebalanceParticipationDebounced?.();
|
|
476
637
|
}
|
|
477
|
-
return
|
|
638
|
+
return diffs;
|
|
478
639
|
};
|
|
479
640
|
// we sequialize this because we are going to queries to check wether to add or not
|
|
480
641
|
// if two processes do the same this both process might add a range while only one in practice should
|
|
481
|
-
return
|
|
642
|
+
return fn();
|
|
482
643
|
}
|
|
483
644
|
async startAnnounceReplicating(range, options = {}) {
|
|
484
|
-
const
|
|
485
|
-
if (!
|
|
645
|
+
const change = await this.addReplicationRange(range, this.node.identity.publicKey, options);
|
|
646
|
+
if (!change) {
|
|
486
647
|
logger.warn("Not allowed to replicate by canReplicate");
|
|
487
648
|
}
|
|
488
649
|
let message;
|
|
489
|
-
if (
|
|
650
|
+
if (change) {
|
|
490
651
|
if (options.reset) {
|
|
491
652
|
message = new AllReplicatingSegmentsMessage({
|
|
492
653
|
segments: range.map((x) => x.toReplicationRange()),
|
|
@@ -509,11 +670,14 @@ let SharedLog = class SharedLog extends Program {
|
|
|
509
670
|
}
|
|
510
671
|
async append(data, options) {
|
|
511
672
|
const appendOptions = { ...options };
|
|
512
|
-
const
|
|
673
|
+
const minReplicas = options?.replicas
|
|
513
674
|
? typeof options.replicas === "number"
|
|
514
675
|
? new AbsoluteReplicas(options.replicas)
|
|
515
676
|
: options.replicas
|
|
516
|
-
: this.replicas.min
|
|
677
|
+
: this.replicas.min;
|
|
678
|
+
const minReplicasValue = minReplicas.getValue(this);
|
|
679
|
+
const minReplicasData = encodeReplicas(minReplicas);
|
|
680
|
+
checkMinReplicasLimit(minReplicasValue);
|
|
517
681
|
if (!appendOptions.meta) {
|
|
518
682
|
appendOptions.meta = {
|
|
519
683
|
data: minReplicasData,
|
|
@@ -541,40 +705,55 @@ let SharedLog = class SharedLog extends Program {
|
|
|
541
705
|
if (options?.replicate) {
|
|
542
706
|
await this.replicate(result.entry, { checkDuplicates: true });
|
|
543
707
|
}
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
708
|
+
let { leaders, isLeader } = await this.findLeadersPersist({
|
|
709
|
+
entry: result.entry,
|
|
710
|
+
minReplicas: minReplicas.getValue(this),
|
|
711
|
+
}, result.entry, { persist: {} });
|
|
712
|
+
// --------------
|
|
713
|
+
if (options?.target !== "none") {
|
|
714
|
+
for (const message of await createExchangeHeadsMessages(this.log, [result.entry], this._gidParentCache)) {
|
|
715
|
+
if (options?.target === "replicators" || !options?.target) {
|
|
716
|
+
if (message.heads[0].gidRefrences.length > 0) {
|
|
717
|
+
const newAndOldLeaders = new Map(leaders);
|
|
718
|
+
for (const ref of message.heads[0].gidRefrences) {
|
|
719
|
+
const entryFromGid = this.log.entryIndex.getHeads(ref, false);
|
|
720
|
+
for (const entry of await entryFromGid.all()) {
|
|
721
|
+
let coordinate = await this.getCoordinates(entry);
|
|
722
|
+
if (coordinate == null) {
|
|
723
|
+
coordinate = await this.createCoordinates(entry, minReplicasValue);
|
|
724
|
+
// TODO are we every to come here?
|
|
725
|
+
}
|
|
726
|
+
for (const [hash, features] of await this.findLeaders(coordinate)) {
|
|
727
|
+
newAndOldLeaders.set(hash, features);
|
|
728
|
+
}
|
|
556
729
|
}
|
|
557
730
|
}
|
|
731
|
+
leaders = newAndOldLeaders;
|
|
558
732
|
}
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
set.add(receiver);
|
|
733
|
+
let set = this._gidPeersHistory.get(result.entry.meta.gid);
|
|
734
|
+
if (!set) {
|
|
735
|
+
set = new Set(leaders.keys());
|
|
736
|
+
this._gidPeersHistory.set(result.entry.meta.gid, set);
|
|
737
|
+
}
|
|
738
|
+
else {
|
|
739
|
+
for (const [receiver, _features] of leaders) {
|
|
740
|
+
set.add(receiver);
|
|
741
|
+
}
|
|
569
742
|
}
|
|
743
|
+
mode = isLeader
|
|
744
|
+
? new SilentDelivery({ redundancy: 1, to: leaders.keys() })
|
|
745
|
+
: new AcknowledgeDelivery({ redundancy: 1, to: leaders.keys() });
|
|
570
746
|
}
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
747
|
+
// TODO add options for waiting ?
|
|
748
|
+
this.rpc.send(message, {
|
|
749
|
+
mode,
|
|
750
|
+
});
|
|
574
751
|
}
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
752
|
+
}
|
|
753
|
+
if (!isLeader) {
|
|
754
|
+
this.pruneDebouncedFn.add({
|
|
755
|
+
key: result.entry.hash,
|
|
756
|
+
value: result.entry,
|
|
578
757
|
});
|
|
579
758
|
}
|
|
580
759
|
this.rebalanceParticipationDebounced?.();
|
|
@@ -594,7 +773,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
594
773
|
: undefined,
|
|
595
774
|
};
|
|
596
775
|
this.domain = options?.domain ?? createReplicationDomainHash();
|
|
597
|
-
this._respondToIHaveTimeout = options?.respondToIHaveTimeout ??
|
|
776
|
+
this._respondToIHaveTimeout = options?.respondToIHaveTimeout ?? 2e4;
|
|
598
777
|
this._pendingDeletes = new Map();
|
|
599
778
|
this._pendingIHave = new Map();
|
|
600
779
|
this.latestReplicationInfoMessage = new Map();
|
|
@@ -614,7 +793,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
614
793
|
this._isTrustedReplicator = options?.canReplicate;
|
|
615
794
|
this.sync = options?.sync;
|
|
616
795
|
this._logProperties = options;
|
|
617
|
-
this.
|
|
796
|
+
this.pendingMaturity = new Map();
|
|
618
797
|
const id = sha256Base64Sync(this.log.id);
|
|
619
798
|
const storage = await this.node.storage.sublevel(id);
|
|
620
799
|
const localBlocks = await new AnyBlockStore(await storage.sublevel("blocks"));
|
|
@@ -634,16 +813,73 @@ let SharedLog = class SharedLog extends Program {
|
|
|
634
813
|
this._replicationRangeIndex = await replicationIndex.init({
|
|
635
814
|
schema: ReplicationRangeIndexable,
|
|
636
815
|
});
|
|
816
|
+
this._entryCoordinatesIndex = await replicationIndex.init({
|
|
817
|
+
schema: EntryReplicated,
|
|
818
|
+
});
|
|
637
819
|
const logIndex = await logScope.scope("log");
|
|
638
820
|
await this.node.indexer.start(); // TODO why do we need to start the indexer here?
|
|
639
|
-
const hasIndexedReplicationInfo = (await this.replicationIndex.
|
|
821
|
+
const hasIndexedReplicationInfo = (await this.replicationIndex.count({
|
|
822
|
+
query: [
|
|
823
|
+
new StringMatch({
|
|
824
|
+
key: "hash",
|
|
825
|
+
value: this.node.identity.publicKey.hashcode(),
|
|
826
|
+
}),
|
|
827
|
+
],
|
|
828
|
+
})) > 0;
|
|
640
829
|
/* this._totalParticipation = await this.calculateTotalParticipation(); */
|
|
641
830
|
this._gidPeersHistory = new Map();
|
|
831
|
+
this.replicationChangeDebounceFn = debounceAggregationChanges((change) => this.onReplicationChange(change).then(() => this.rebalanceParticipationDebounced?.()), this.distributionDebounceTime);
|
|
832
|
+
this.pruneDebouncedFn = debouncedAccumulatorMap((map) => {
|
|
833
|
+
this.prune(map);
|
|
834
|
+
}, PRUNE_DEBOUNCE_INTERVAL);
|
|
835
|
+
this.responseToPruneDebouncedFn = debounceAcculmulator((result) => {
|
|
836
|
+
let allRequestingPeers = new Set();
|
|
837
|
+
let hashes = [];
|
|
838
|
+
for (const [hash, requestingPeers] of result) {
|
|
839
|
+
for (const peer of requestingPeers) {
|
|
840
|
+
allRequestingPeers.add(peer);
|
|
841
|
+
}
|
|
842
|
+
hashes.push(hash);
|
|
843
|
+
}
|
|
844
|
+
hashes.length > 0 &&
|
|
845
|
+
this.rpc.send(new ResponseIPrune({ hashes }), {
|
|
846
|
+
mode: new SilentDelivery({
|
|
847
|
+
to: allRequestingPeers,
|
|
848
|
+
redundancy: 1,
|
|
849
|
+
}),
|
|
850
|
+
priority: 1,
|
|
851
|
+
});
|
|
852
|
+
}, () => {
|
|
853
|
+
let accumulator = new Map();
|
|
854
|
+
return {
|
|
855
|
+
add: (props) => {
|
|
856
|
+
for (const hash of props.hashes) {
|
|
857
|
+
let prev = accumulator.get(hash);
|
|
858
|
+
if (!prev) {
|
|
859
|
+
prev = new Set();
|
|
860
|
+
accumulator.set(hash, prev);
|
|
861
|
+
}
|
|
862
|
+
for (const peer of props.peers) {
|
|
863
|
+
prev.add(peer);
|
|
864
|
+
}
|
|
865
|
+
}
|
|
866
|
+
},
|
|
867
|
+
delete: (hash) => {
|
|
868
|
+
accumulator.delete(hash);
|
|
869
|
+
},
|
|
870
|
+
finalize: () => {
|
|
871
|
+
return undefined;
|
|
872
|
+
},
|
|
873
|
+
size: () => accumulator.size,
|
|
874
|
+
clear: () => accumulator.clear(),
|
|
875
|
+
value: accumulator,
|
|
876
|
+
};
|
|
877
|
+
}, PRUNE_DEBOUNCE_INTERVAL);
|
|
642
878
|
await this.log.open(this.remoteBlocks, this.node.identity, {
|
|
643
879
|
keychain: this.node.services.keychain,
|
|
644
880
|
...this._logProperties,
|
|
645
|
-
onChange: (change) => {
|
|
646
|
-
this.onChange(change);
|
|
881
|
+
onChange: async (change) => {
|
|
882
|
+
await this.onChange(change);
|
|
647
883
|
return this._logProperties?.onChange?.(change);
|
|
648
884
|
},
|
|
649
885
|
canAppend: async (entry) => {
|
|
@@ -661,7 +897,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
661
897
|
await this.rpc.open({
|
|
662
898
|
queryType: TransportMessage,
|
|
663
899
|
responseType: TransportMessage,
|
|
664
|
-
responseHandler: this._onMessage
|
|
900
|
+
responseHandler: (query, context) => this._onMessage(query, context),
|
|
665
901
|
topic: this.topic,
|
|
666
902
|
});
|
|
667
903
|
this._onSubscriptionFn =
|
|
@@ -671,13 +907,6 @@ let SharedLog = class SharedLog extends Program {
|
|
|
671
907
|
this._onUnsubscriptionFn || this._onUnsubscription.bind(this);
|
|
672
908
|
await this.node.services.pubsub.addEventListener("unsubscribe", this._onUnsubscriptionFn);
|
|
673
909
|
await this.rpc.subscribe();
|
|
674
|
-
// await this.log.load();
|
|
675
|
-
// TODO (do better)
|
|
676
|
-
// we do this distribution interval to eliminate the sideeffects arriving from updating roles and joining entries continously.
|
|
677
|
-
// an alternative to this would be to call distribute/maybe prune after every join if our role has changed
|
|
678
|
-
this.distributeInterval = setInterval(() => {
|
|
679
|
-
this.distribute();
|
|
680
|
-
}, 7.5 * 1000);
|
|
681
910
|
const requestSync = async () => {
|
|
682
911
|
/**
|
|
683
912
|
* This method fetches entries that we potentially want.
|
|
@@ -693,7 +922,16 @@ let SharedLog = class SharedLog extends Program {
|
|
|
693
922
|
// TODO test that this if statement actually does anymeaningfull
|
|
694
923
|
if (value.length > 0) {
|
|
695
924
|
requestHashes.push(key);
|
|
696
|
-
|
|
925
|
+
const publicKeyHash = value.shift().hashcode();
|
|
926
|
+
from.add(publicKeyHash);
|
|
927
|
+
const invertedSet = this.syncInFlightQueueInverted.get(publicKeyHash);
|
|
928
|
+
if (invertedSet) {
|
|
929
|
+
if (invertedSet.delete(key)) {
|
|
930
|
+
if (invertedSet.size === 0) {
|
|
931
|
+
this.syncInFlightQueueInverted.delete(publicKeyHash);
|
|
932
|
+
}
|
|
933
|
+
}
|
|
934
|
+
}
|
|
697
935
|
}
|
|
698
936
|
if (value.length === 0) {
|
|
699
937
|
this.syncInFlightQueue.delete(key); // no-one more to ask for this entry
|
|
@@ -719,17 +957,30 @@ let SharedLog = class SharedLog extends Program {
|
|
|
719
957
|
if (this.closed) {
|
|
720
958
|
return;
|
|
721
959
|
}
|
|
722
|
-
this.syncMoreInterval = setTimeout(requestSync,
|
|
960
|
+
this.syncMoreInterval = setTimeout(requestSync, 3e3);
|
|
723
961
|
});
|
|
724
962
|
};
|
|
725
963
|
// if we had a previous session with replication info, and new replication info dictates that we unreplicate
|
|
726
964
|
// we should do that. Otherwise if options is a unreplication we dont need to do anything because
|
|
727
965
|
// we are already unreplicated (as we are just opening)
|
|
728
|
-
|
|
729
|
-
|
|
966
|
+
let isUnreplicationOptionsDefined = isUnreplicationOptions(options?.replicate);
|
|
967
|
+
if (hasIndexedReplicationInfo && isUnreplicationOptionsDefined) {
|
|
730
968
|
await this.replicate(options?.replicate, { checkDuplicates: true });
|
|
731
969
|
}
|
|
970
|
+
else if (isReplicationOptionsDependentOnPreviousState(options?.replicate) &&
|
|
971
|
+
hasIndexedReplicationInfo) {
|
|
972
|
+
// dont do anthing since we are alread replicating stuff
|
|
973
|
+
}
|
|
974
|
+
else {
|
|
975
|
+
await this.replicate(options?.replicate, {
|
|
976
|
+
checkDuplicates: true,
|
|
977
|
+
reset: true,
|
|
978
|
+
});
|
|
979
|
+
}
|
|
732
980
|
requestSync();
|
|
981
|
+
this.interval = setInterval(() => {
|
|
982
|
+
this.rebalanceParticipationDebounced?.();
|
|
983
|
+
}, RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL);
|
|
733
984
|
}
|
|
734
985
|
async afterOpen() {
|
|
735
986
|
await super.afterOpen();
|
|
@@ -759,6 +1010,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
759
1010
|
this.onEntryAdded(added.entry);
|
|
760
1011
|
}
|
|
761
1012
|
for (const removed of change.removed) {
|
|
1013
|
+
await this.deleteCoordinates({ hash: removed.hash });
|
|
762
1014
|
this.onEntryRemoved(removed.hash);
|
|
763
1015
|
}
|
|
764
1016
|
}
|
|
@@ -772,6 +1024,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
772
1024
|
if (Number.isFinite(replicas) === false) {
|
|
773
1025
|
return false;
|
|
774
1026
|
}
|
|
1027
|
+
checkMinReplicasLimit(replicas);
|
|
775
1028
|
// Don't verify entries that we have created (TODO should we? perf impact?)
|
|
776
1029
|
if (!entry.createdLocally && !(await entry.verifySignatures())) {
|
|
777
1030
|
return false;
|
|
@@ -807,9 +1060,13 @@ let SharedLog = class SharedLog extends Program {
|
|
|
807
1060
|
}
|
|
808
1061
|
async _close() {
|
|
809
1062
|
clearTimeout(this.syncMoreInterval);
|
|
810
|
-
|
|
1063
|
+
for (const [_key, value] of this.pendingMaturity) {
|
|
1064
|
+
clearTimeout(value.timeout);
|
|
1065
|
+
}
|
|
1066
|
+
this.pendingMaturity.clear();
|
|
811
1067
|
this.distributeQueue?.clear();
|
|
812
1068
|
this._closeController.abort();
|
|
1069
|
+
clearInterval(this.interval);
|
|
813
1070
|
this.node.services.pubsub.removeEventListener("subscribe", this._onSubscriptionFn);
|
|
814
1071
|
this.node.services.pubsub.removeEventListener("unsubscribe", this._onUnsubscriptionFn);
|
|
815
1072
|
for (const [_k, v] of this._pendingDeletes) {
|
|
@@ -828,10 +1085,12 @@ let SharedLog = class SharedLog extends Program {
|
|
|
828
1085
|
this.syncInFlight.clear();
|
|
829
1086
|
this.latestReplicationInfoMessage.clear();
|
|
830
1087
|
this._gidPeersHistory.clear();
|
|
1088
|
+
this.pruneDebouncedFn = undefined;
|
|
1089
|
+
this.rebalanceParticipationDebounced = undefined;
|
|
831
1090
|
this._replicationRangeIndex = undefined;
|
|
1091
|
+
this._entryCoordinatesIndex = undefined;
|
|
832
1092
|
this.cpuUsage?.stop?.();
|
|
833
1093
|
/* this._totalParticipation = 0; */
|
|
834
|
-
this.pq.clear();
|
|
835
1094
|
}
|
|
836
1095
|
async close(from) {
|
|
837
1096
|
const superClosed = await super.close(from);
|
|
@@ -885,9 +1144,6 @@ let SharedLog = class SharedLog extends Program {
|
|
|
885
1144
|
if (filteredHeads.length === 0) {
|
|
886
1145
|
return;
|
|
887
1146
|
}
|
|
888
|
-
const toMerge = [];
|
|
889
|
-
let toDelete = undefined;
|
|
890
|
-
let maybeDelete = undefined;
|
|
891
1147
|
const groupedByGid = await groupByGid(filteredHeads);
|
|
892
1148
|
const promises = [];
|
|
893
1149
|
for (const [gid, entries] of groupedByGid) {
|
|
@@ -900,19 +1156,38 @@ let SharedLog = class SharedLog extends Program {
|
|
|
900
1156
|
? maxReplicas(this, [...headsWithGid.values()])
|
|
901
1157
|
: this.replicas.min.getValue(this);
|
|
902
1158
|
const maxReplicasFromNewEntries = maxReplicas(this, entries.map((x) => x.entry));
|
|
1159
|
+
const maxMaxReplicas = Math.max(maxReplicasFromHead, maxReplicasFromNewEntries);
|
|
1160
|
+
const cursor = await this.createCoordinates(latestEntry, maxMaxReplicas);
|
|
903
1161
|
const isReplicating = await this.isReplicating();
|
|
904
1162
|
let isLeader;
|
|
905
1163
|
if (isReplicating) {
|
|
906
|
-
isLeader = await this.waitForIsLeader(
|
|
1164
|
+
isLeader = await this.waitForIsLeader(cursor, this.node.identity.publicKey.hashcode());
|
|
907
1165
|
}
|
|
908
1166
|
else {
|
|
909
|
-
isLeader = await this.findLeaders(
|
|
910
|
-
isLeader = isLeader.
|
|
1167
|
+
isLeader = await this.findLeaders(cursor);
|
|
1168
|
+
isLeader = isLeader.has(this.node.identity.publicKey.hashcode())
|
|
911
1169
|
? isLeader
|
|
912
1170
|
: false;
|
|
913
1171
|
}
|
|
1172
|
+
if (this.closed) {
|
|
1173
|
+
return;
|
|
1174
|
+
}
|
|
1175
|
+
let maybeDelete;
|
|
1176
|
+
let toMerge = [];
|
|
1177
|
+
let toDelete;
|
|
914
1178
|
if (isLeader) {
|
|
915
|
-
|
|
1179
|
+
for (const entry of entries) {
|
|
1180
|
+
this.pruneDebouncedFn.delete(entry.entry.hash);
|
|
1181
|
+
}
|
|
1182
|
+
for (const entry of entries) {
|
|
1183
|
+
await this.persistCoordinate({
|
|
1184
|
+
leaders: isLeader,
|
|
1185
|
+
coordinates: cursor,
|
|
1186
|
+
entry: entry.entry,
|
|
1187
|
+
});
|
|
1188
|
+
}
|
|
1189
|
+
const fromIsLeader = isLeader.get(context.from.hashcode());
|
|
1190
|
+
if (fromIsLeader) {
|
|
916
1191
|
let peerSet = this._gidPeersHistory.get(gid);
|
|
917
1192
|
if (!peerSet) {
|
|
918
1193
|
peerSet = new Set();
|
|
@@ -940,95 +1215,112 @@ let SharedLog = class SharedLog extends Program {
|
|
|
940
1215
|
}
|
|
941
1216
|
logger.debug(`${this.node.identity.publicKey.hashcode()}: Dropping heads with gid: ${entry.entry.meta.gid}. Because not leader`);
|
|
942
1217
|
}
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
}
|
|
946
|
-
await Promise.all(promises);
|
|
947
|
-
if (this.closed) {
|
|
948
|
-
return;
|
|
949
|
-
}
|
|
950
|
-
if (toMerge.length > 0) {
|
|
951
|
-
await this.log.join(toMerge);
|
|
952
|
-
toDelete &&
|
|
953
|
-
Promise.all(this.prune(toDelete)).catch((e) => {
|
|
954
|
-
logger.info(e.toString());
|
|
955
|
-
});
|
|
956
|
-
this.rebalanceParticipationDebounced?.();
|
|
957
|
-
}
|
|
958
|
-
/// we clear sync in flight here because we want to join before that, so that entries are totally accounted for
|
|
959
|
-
for (const head of heads) {
|
|
960
|
-
const set = this.syncInFlight.get(context.from.hashcode());
|
|
961
|
-
if (set) {
|
|
962
|
-
set.delete(head.entry.hash);
|
|
963
|
-
if (set?.size === 0) {
|
|
964
|
-
this.syncInFlight.delete(context.from.hashcode());
|
|
1218
|
+
if (this.closed) {
|
|
1219
|
+
return;
|
|
965
1220
|
}
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
});
|
|
1221
|
+
if (toMerge.length > 0) {
|
|
1222
|
+
await this.log.join(toMerge);
|
|
1223
|
+
toDelete?.map((x) => this.pruneDebouncedFn.add({ key: x.hash, value: x }));
|
|
1224
|
+
this.rebalanceParticipationDebounced?.();
|
|
1225
|
+
}
|
|
1226
|
+
/// we clear sync in flight here because we want to join before that, so that entries are totally accounted for
|
|
1227
|
+
for (const entry of entries) {
|
|
1228
|
+
const set = this.syncInFlight.get(context.from.hashcode());
|
|
1229
|
+
if (set) {
|
|
1230
|
+
set.delete(entry.entry.hash);
|
|
1231
|
+
if (set?.size === 0) {
|
|
1232
|
+
this.syncInFlight.delete(context.from.hashcode());
|
|
1233
|
+
}
|
|
980
1234
|
}
|
|
981
1235
|
}
|
|
982
|
-
|
|
1236
|
+
if (maybeDelete) {
|
|
1237
|
+
for (const entries of maybeDelete) {
|
|
1238
|
+
const headsWithGid = await this.log.entryIndex
|
|
1239
|
+
.getHeads(entries[0].entry.meta.gid)
|
|
1240
|
+
.all();
|
|
1241
|
+
if (headsWithGid && headsWithGid.length > 0) {
|
|
1242
|
+
const minReplicas = maxReplicas(this, headsWithGid.values());
|
|
1243
|
+
const isLeader = await this.isLeader({
|
|
1244
|
+
entry: entries[0].entry,
|
|
1245
|
+
replicas: minReplicas,
|
|
1246
|
+
});
|
|
1247
|
+
if (!isLeader) {
|
|
1248
|
+
entries.map((x) => this.pruneDebouncedFn.add({
|
|
1249
|
+
key: x.entry.hash,
|
|
1250
|
+
value: x.entry,
|
|
1251
|
+
}));
|
|
1252
|
+
}
|
|
1253
|
+
}
|
|
1254
|
+
}
|
|
1255
|
+
}
|
|
1256
|
+
};
|
|
1257
|
+
promises.push(fn()); // we do this concurrently since waitForIsLeader might be a blocking operation for some entries
|
|
983
1258
|
}
|
|
1259
|
+
await Promise.all(promises);
|
|
984
1260
|
}
|
|
985
1261
|
}
|
|
986
1262
|
else if (msg instanceof RequestIPrune) {
|
|
987
1263
|
const hasAndIsLeader = [];
|
|
1264
|
+
// await delay(3000)
|
|
988
1265
|
for (const hash of msg.hashes) {
|
|
989
1266
|
const indexedEntry = await this.log.entryIndex.getShallow(hash);
|
|
990
1267
|
if (indexedEntry &&
|
|
991
|
-
(await this.
|
|
1268
|
+
(await this.findLeadersPersist({
|
|
1269
|
+
entry: indexedEntry.value,
|
|
1270
|
+
minReplicas: decodeReplicas(indexedEntry.value).getValue(this),
|
|
1271
|
+
}, indexedEntry.value)).isLeader) {
|
|
992
1272
|
this._gidPeersHistory
|
|
993
1273
|
.get(indexedEntry.value.meta.gid)
|
|
994
1274
|
?.delete(context.from.hashcode());
|
|
995
1275
|
hasAndIsLeader.push(hash);
|
|
1276
|
+
hasAndIsLeader.length > 0 &&
|
|
1277
|
+
this.responseToPruneDebouncedFn.add({
|
|
1278
|
+
hashes: hasAndIsLeader,
|
|
1279
|
+
peers: [context.from.hashcode()],
|
|
1280
|
+
});
|
|
996
1281
|
}
|
|
997
1282
|
else {
|
|
998
1283
|
const prevPendingIHave = this._pendingIHave.get(hash);
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
this._gidPeersHistory
|
|
1007
|
-
.get(entry.meta.gid)
|
|
1008
|
-
?.delete(context.from.hashcode());
|
|
1009
|
-
this.rpc.send(new ResponseIPrune({ hashes: [entry.hash] }), {
|
|
1010
|
-
mode: new SilentDelivery({
|
|
1011
|
-
to: [context.from],
|
|
1012
|
-
redundancy: 1,
|
|
1013
|
-
}),
|
|
1014
|
-
});
|
|
1015
|
-
}
|
|
1016
|
-
prevPendingIHave && prevPendingIHave.callback(entry);
|
|
1017
|
-
this._pendingIHave.delete(entry.hash);
|
|
1018
|
-
},
|
|
1019
|
-
};
|
|
1020
|
-
const timeout = setTimeout(() => {
|
|
1021
|
-
const pendingIHaveRef = this._pendingIHave.get(hash);
|
|
1022
|
-
if (pendingIHave === pendingIHaveRef) {
|
|
1284
|
+
if (prevPendingIHave) {
|
|
1285
|
+
prevPendingIHave.requesting.add(context.from.hashcode());
|
|
1286
|
+
prevPendingIHave.resetTimeout();
|
|
1287
|
+
}
|
|
1288
|
+
else {
|
|
1289
|
+
const requesting = new Set([context.from.hashcode()]);
|
|
1290
|
+
let timeout = setTimeout(() => {
|
|
1023
1291
|
this._pendingIHave.delete(hash);
|
|
1024
|
-
}
|
|
1025
|
-
|
|
1026
|
-
|
|
1292
|
+
}, this._respondToIHaveTimeout);
|
|
1293
|
+
const pendingIHave = {
|
|
1294
|
+
requesting,
|
|
1295
|
+
resetTimeout: () => {
|
|
1296
|
+
clearTimeout(timeout);
|
|
1297
|
+
timeout = setTimeout(() => {
|
|
1298
|
+
this._pendingIHave.delete(hash);
|
|
1299
|
+
}, this._respondToIHaveTimeout);
|
|
1300
|
+
},
|
|
1301
|
+
clear: () => {
|
|
1302
|
+
clearTimeout(timeout);
|
|
1303
|
+
},
|
|
1304
|
+
callback: async (entry) => {
|
|
1305
|
+
if ((await this.findLeadersPersist({
|
|
1306
|
+
entry,
|
|
1307
|
+
minReplicas: decodeReplicas(entry).getValue(this),
|
|
1308
|
+
}, entry)).isLeader) {
|
|
1309
|
+
for (const peer of requesting) {
|
|
1310
|
+
this._gidPeersHistory.get(entry.meta.gid)?.delete(peer);
|
|
1311
|
+
}
|
|
1312
|
+
this.responseToPruneDebouncedFn.add({
|
|
1313
|
+
hashes: [entry.hash],
|
|
1314
|
+
peers: requesting,
|
|
1315
|
+
});
|
|
1316
|
+
this._pendingIHave.delete(hash);
|
|
1317
|
+
}
|
|
1318
|
+
},
|
|
1319
|
+
};
|
|
1320
|
+
this._pendingIHave.set(hash, pendingIHave);
|
|
1321
|
+
}
|
|
1027
1322
|
}
|
|
1028
1323
|
}
|
|
1029
|
-
await this.rpc.send(new ResponseIPrune({ hashes: hasAndIsLeader }), {
|
|
1030
|
-
mode: new SilentDelivery({ to: [context.from], redundancy: 1 }),
|
|
1031
|
-
});
|
|
1032
1324
|
}
|
|
1033
1325
|
else if (msg instanceof ResponseIPrune) {
|
|
1034
1326
|
for (const hash of msg.hashes) {
|
|
@@ -1040,20 +1332,23 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1040
1332
|
for (const hash of msg.hashes) {
|
|
1041
1333
|
const inFlight = this.syncInFlightQueue.get(hash);
|
|
1042
1334
|
if (inFlight) {
|
|
1043
|
-
inFlight.
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1335
|
+
if (!inFlight.find((x) => x.hashcode() === context.from.hashcode())) {
|
|
1336
|
+
inFlight.push(context.from);
|
|
1337
|
+
let inverted = this.syncInFlightQueueInverted.get(context.from.hashcode());
|
|
1338
|
+
if (!inverted) {
|
|
1339
|
+
inverted = new Set();
|
|
1340
|
+
this.syncInFlightQueueInverted.set(context.from.hashcode(), inverted);
|
|
1341
|
+
}
|
|
1342
|
+
inverted.add(hash);
|
|
1048
1343
|
}
|
|
1049
|
-
inverted.add(hash);
|
|
1050
1344
|
}
|
|
1051
1345
|
else if (!(await this.log.has(hash))) {
|
|
1052
1346
|
this.syncInFlightQueue.set(hash, []);
|
|
1053
1347
|
requestHashes.push(hash); // request immediately (first time we have seen this hash)
|
|
1054
1348
|
}
|
|
1055
1349
|
}
|
|
1056
|
-
|
|
1350
|
+
requestHashes.length > 0 &&
|
|
1351
|
+
(await this.requestSync(requestHashes, [context.from.hashcode()]));
|
|
1057
1352
|
}
|
|
1058
1353
|
else if (msg instanceof ResponseMaybeSync) {
|
|
1059
1354
|
// TODO better choice of step size
|
|
@@ -1120,8 +1415,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1120
1415
|
}
|
|
1121
1416
|
this.latestReplicationInfoMessage.set(context.from.hashcode(), context.timestamp);
|
|
1122
1417
|
let reset = msg instanceof AllReplicatingSegmentsMessage;
|
|
1123
|
-
|
|
1124
|
-
added && (await this.distribute());
|
|
1418
|
+
await this.addReplicationRange(replicationInfoMessage.segments.map((x) => x.toReplicationRangeIndexable(context.from)), context.from, { reset, checkDuplicates: true });
|
|
1125
1419
|
/* await this._modifyReplicators(msg.role, context.from!); */
|
|
1126
1420
|
})
|
|
1127
1421
|
.catch((e) => {
|
|
@@ -1161,16 +1455,15 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1161
1455
|
}
|
|
1162
1456
|
}
|
|
1163
1457
|
async getMyReplicationSegments() {
|
|
1164
|
-
const ranges = await this.replicationIndex
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
return ranges.results.map((x) => x.value);
|
|
1458
|
+
const ranges = await this.replicationIndex
|
|
1459
|
+
.iterate({
|
|
1460
|
+
query: new StringMatch({
|
|
1461
|
+
key: "hash",
|
|
1462
|
+
value: this.node.identity.publicKey.hashcode(),
|
|
1463
|
+
}),
|
|
1464
|
+
})
|
|
1465
|
+
.all();
|
|
1466
|
+
return ranges.map((x) => x.value);
|
|
1174
1467
|
}
|
|
1175
1468
|
async getMyTotalParticipation() {
|
|
1176
1469
|
// sum all of my replicator rects
|
|
@@ -1182,13 +1475,21 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1182
1475
|
}
|
|
1183
1476
|
return this._replicationRangeIndex;
|
|
1184
1477
|
}
|
|
1478
|
+
get entryCoordinatesIndex() {
|
|
1479
|
+
if (!this._entryCoordinatesIndex) {
|
|
1480
|
+
throw new ClosedError();
|
|
1481
|
+
}
|
|
1482
|
+
return this._entryCoordinatesIndex;
|
|
1483
|
+
}
|
|
1185
1484
|
/**
|
|
1186
1485
|
* TODO improve efficiency
|
|
1187
1486
|
*/
|
|
1188
1487
|
async getReplicators() {
|
|
1189
1488
|
let set = new Set();
|
|
1190
|
-
const results = await this.replicationIndex
|
|
1191
|
-
|
|
1489
|
+
const results = await this.replicationIndex
|
|
1490
|
+
.iterate({}, { reference: true, shape: { hash: true } })
|
|
1491
|
+
.all();
|
|
1492
|
+
results.forEach((result) => {
|
|
1192
1493
|
set.add(result.value.hash);
|
|
1193
1494
|
});
|
|
1194
1495
|
return set;
|
|
@@ -1196,10 +1497,10 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1196
1497
|
async waitForReplicator(...keys) {
|
|
1197
1498
|
const check = async () => {
|
|
1198
1499
|
for (const k of keys) {
|
|
1199
|
-
const rects = await this.replicationIndex
|
|
1200
|
-
query:
|
|
1201
|
-
|
|
1202
|
-
const rect =
|
|
1500
|
+
const rects = await this.replicationIndex
|
|
1501
|
+
?.iterate({ query: new StringMatch({ key: "hash", value: k.hashcode() }) }, { reference: true })
|
|
1502
|
+
.all();
|
|
1503
|
+
const rect = rects[0]?.value;
|
|
1203
1504
|
if (!rect ||
|
|
1204
1505
|
!isMatured(rect, +new Date(), await this.getDefaultMinRoleAge())) {
|
|
1205
1506
|
return false;
|
|
@@ -1292,15 +1593,52 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1292
1593
|
});
|
|
1293
1594
|
}
|
|
1294
1595
|
}
|
|
1295
|
-
async
|
|
1296
|
-
const
|
|
1297
|
-
|
|
1596
|
+
async findLeadersPersist(cursor, entry, options) {
|
|
1597
|
+
const coordinates = Array.isArray(cursor)
|
|
1598
|
+
? cursor
|
|
1599
|
+
: await this.createCoordinates(cursor.entry, cursor.minReplicas);
|
|
1600
|
+
const leaders = await this.findLeaders(coordinates, options);
|
|
1601
|
+
const isLeader = leaders.has(this.node.identity.publicKey.hashcode());
|
|
1602
|
+
if (isLeader || options?.persist) {
|
|
1603
|
+
let assignToRangeBoundary = undefined;
|
|
1604
|
+
if (options?.persist?.prev) {
|
|
1605
|
+
assignToRangeBoundary = shouldAssigneToRangeBoundary(leaders);
|
|
1606
|
+
const prev = options.persist.prev;
|
|
1607
|
+
// dont do anthing if nothing has changed
|
|
1608
|
+
if (prev.length > 0) {
|
|
1609
|
+
let allTheSame = true;
|
|
1610
|
+
for (const element of prev) {
|
|
1611
|
+
if (element.assignedToRangeBoundary !== assignToRangeBoundary) {
|
|
1612
|
+
allTheSame = false;
|
|
1613
|
+
break;
|
|
1614
|
+
}
|
|
1615
|
+
}
|
|
1616
|
+
if (allTheSame) {
|
|
1617
|
+
return { leaders, isLeader };
|
|
1618
|
+
}
|
|
1619
|
+
}
|
|
1620
|
+
}
|
|
1621
|
+
!this.closed &&
|
|
1622
|
+
(await this.persistCoordinate({
|
|
1623
|
+
leaders,
|
|
1624
|
+
coordinates,
|
|
1625
|
+
entry,
|
|
1626
|
+
}, {
|
|
1627
|
+
assignToRangeBoundary: assignToRangeBoundary,
|
|
1628
|
+
}));
|
|
1629
|
+
}
|
|
1630
|
+
return { leaders, isLeader };
|
|
1631
|
+
}
|
|
1632
|
+
async isLeader(cursor, options) {
|
|
1633
|
+
const leaders = await this.findLeaders(cursor, options);
|
|
1634
|
+
return leaders.has(this.node.identity.publicKey.hashcode());
|
|
1298
1635
|
}
|
|
1299
|
-
async waitForIsLeader(
|
|
1636
|
+
async waitForIsLeader(cursor, hash, options = { timeout: this.waitForReplicatorTimeout }) {
|
|
1300
1637
|
return new Promise((resolve, reject) => {
|
|
1301
1638
|
const removeListeners = () => {
|
|
1302
1639
|
this.events.removeEventListener("replication:change", roleListener);
|
|
1303
|
-
this.
|
|
1640
|
+
this.events.removeEventListener("replicator:mature", roleListener); // TODO replication:change event ?
|
|
1641
|
+
this._closeController.signal.removeEventListener("abort", abortListener);
|
|
1304
1642
|
};
|
|
1305
1643
|
const abortListener = () => {
|
|
1306
1644
|
removeListeners();
|
|
@@ -1310,9 +1648,9 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1310
1648
|
const timer = setTimeout(() => {
|
|
1311
1649
|
removeListeners();
|
|
1312
1650
|
resolve(false);
|
|
1313
|
-
}, timeout);
|
|
1314
|
-
const check = () => this.findLeaders(
|
|
1315
|
-
const isLeader = leaders.
|
|
1651
|
+
}, options.timeout);
|
|
1652
|
+
const check = () => this.findLeaders(cursor).then((leaders) => {
|
|
1653
|
+
const isLeader = leaders.has(hash);
|
|
1316
1654
|
if (isLeader) {
|
|
1317
1655
|
removeListeners();
|
|
1318
1656
|
clearTimeout(timer);
|
|
@@ -1323,16 +1661,65 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1323
1661
|
check();
|
|
1324
1662
|
};
|
|
1325
1663
|
this.events.addEventListener("replication:change", roleListener); // TODO replication:change event ?
|
|
1664
|
+
this.events.addEventListener("replicator:mature", roleListener); // TODO replication:change event ?
|
|
1326
1665
|
this._closeController.signal.addEventListener("abort", abortListener);
|
|
1327
1666
|
check();
|
|
1328
1667
|
});
|
|
1329
1668
|
}
|
|
1330
|
-
async findLeaders(
|
|
1669
|
+
async findLeaders(cursor, options) {
|
|
1331
1670
|
if (this.closed) {
|
|
1332
|
-
|
|
1671
|
+
const map = new Map(); // Assumption: if the store is closed, always assume we have responsibility over the data
|
|
1672
|
+
map.set(this.node.identity.publicKey.hashcode(), { intersecting: false });
|
|
1673
|
+
return map;
|
|
1333
1674
|
}
|
|
1675
|
+
const coordinates = Array.isArray(cursor)
|
|
1676
|
+
? cursor
|
|
1677
|
+
: await this.createCoordinates(cursor.entry, cursor.replicas);
|
|
1678
|
+
const leaders = await this.findLeadersFromU32(coordinates, options);
|
|
1679
|
+
return leaders;
|
|
1680
|
+
}
|
|
1681
|
+
async groupByLeaders(cursors, options) {
|
|
1682
|
+
const leaders = await Promise.all(cursors.map((x) => this.findLeaders(x, options)));
|
|
1683
|
+
const map = new Map();
|
|
1684
|
+
leaders.forEach((leader, i) => {
|
|
1685
|
+
for (const [hash] of leader) {
|
|
1686
|
+
const arr = map.get(hash) ?? [];
|
|
1687
|
+
arr.push(i);
|
|
1688
|
+
map.set(hash, arr);
|
|
1689
|
+
}
|
|
1690
|
+
});
|
|
1691
|
+
return map;
|
|
1692
|
+
}
|
|
1693
|
+
async createCoordinates(entry, minReplicas) {
|
|
1334
1694
|
const cursor = await this.domain.fromEntry(entry);
|
|
1335
|
-
|
|
1695
|
+
const out = getEvenlySpacedU32(cursor, minReplicas);
|
|
1696
|
+
return out;
|
|
1697
|
+
}
|
|
1698
|
+
async getCoordinates(entry) {
|
|
1699
|
+
const result = await this.entryCoordinatesIndex
|
|
1700
|
+
.iterate({ query: { hash: entry.hash } })
|
|
1701
|
+
.all();
|
|
1702
|
+
return result.map((x) => x.value.coordinate);
|
|
1703
|
+
}
|
|
1704
|
+
async persistCoordinate(properties, options) {
|
|
1705
|
+
let assignedToRangeBoundary = options?.assignToRangeBoundary ??
|
|
1706
|
+
shouldAssigneToRangeBoundary(properties.leaders);
|
|
1707
|
+
for (const coordinate of properties.coordinates) {
|
|
1708
|
+
await this.entryCoordinatesIndex.put(new EntryReplicated({
|
|
1709
|
+
assignedToRangeBoundary,
|
|
1710
|
+
coordinate,
|
|
1711
|
+
meta: properties.entry.meta,
|
|
1712
|
+
hash: properties.entry.hash,
|
|
1713
|
+
}));
|
|
1714
|
+
}
|
|
1715
|
+
if (properties.entry.meta.next.length > 0) {
|
|
1716
|
+
await this.entryCoordinatesIndex.del({
|
|
1717
|
+
query: new Or(properties.entry.meta.next.map((x) => new StringMatch({ key: "hash", value: x }))),
|
|
1718
|
+
});
|
|
1719
|
+
}
|
|
1720
|
+
}
|
|
1721
|
+
async deleteCoordinates(properties) {
|
|
1722
|
+
await this.entryCoordinatesIndex.del({ query: properties });
|
|
1336
1723
|
}
|
|
1337
1724
|
async getDefaultMinRoleAge() {
|
|
1338
1725
|
if ((await this.isReplicating()) === false) {
|
|
@@ -1343,12 +1730,12 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1343
1730
|
const diffToOldest = replLength > 1 ? now - this.oldestOpenTime - 1 : Number.MAX_SAFE_INTEGER;
|
|
1344
1731
|
return Math.min(this.timeUntilRoleMaturity, Math.max(diffToOldest, this.timeUntilRoleMaturity), Math.max(Math.round((this.timeUntilRoleMaturity * Math.log(replLength + 1)) / 3), this.timeUntilRoleMaturity)); // / 3 so that if 2 replicators and timeUntilRoleMaturity = 1e4 the result will be 1
|
|
1345
1732
|
}
|
|
1346
|
-
async findLeadersFromU32(cursor,
|
|
1733
|
+
async findLeadersFromU32(cursor, options) {
|
|
1347
1734
|
const roleAge = options?.roleAge ?? (await this.getDefaultMinRoleAge()); // TODO -500 as is added so that i f someone else is just as new as us, then we treat them as mature as us. without -500 we might be slower syncing if two nodes starts almost at the same time
|
|
1348
|
-
return getSamples(cursor, this.replicationIndex,
|
|
1735
|
+
return getSamples(cursor, this.replicationIndex, roleAge);
|
|
1349
1736
|
}
|
|
1350
1737
|
async isReplicator(entry, options) {
|
|
1351
|
-
return this.isLeader(entry, decodeReplicas(entry).getValue(this), options);
|
|
1738
|
+
return this.isLeader({ entry, replicas: decodeReplicas(entry).getValue(this) }, options);
|
|
1352
1739
|
}
|
|
1353
1740
|
async handleSubscriptionChange(publicKey, topics, subscribed) {
|
|
1354
1741
|
for (const topic of topics) {
|
|
@@ -1360,20 +1747,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1360
1747
|
for (const [_a, b] of this._gidPeersHistory) {
|
|
1361
1748
|
b.delete(publicKey.hashcode());
|
|
1362
1749
|
}
|
|
1363
|
-
this.
|
|
1364
|
-
const waitingHashes = this.syncInFlightQueueInverted.get(publicKey.hashcode());
|
|
1365
|
-
if (waitingHashes) {
|
|
1366
|
-
for (const hash of waitingHashes) {
|
|
1367
|
-
let arr = this.syncInFlightQueue.get(hash);
|
|
1368
|
-
if (arr) {
|
|
1369
|
-
arr = arr.filter((x) => !x.equals(publicKey));
|
|
1370
|
-
}
|
|
1371
|
-
if (this.syncInFlightQueue.size === 0) {
|
|
1372
|
-
this.syncInFlightQueue.delete(hash);
|
|
1373
|
-
}
|
|
1374
|
-
}
|
|
1375
|
-
}
|
|
1376
|
-
this.syncInFlightQueueInverted.delete(publicKey.hashcode());
|
|
1750
|
+
this.clearSyncProcessPublicKey(publicKey);
|
|
1377
1751
|
}
|
|
1378
1752
|
if (subscribed) {
|
|
1379
1753
|
const replicationSegments = await this.getMyReplicationSegments();
|
|
@@ -1401,7 +1775,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1401
1775
|
}
|
|
1402
1776
|
prune(entries, options) {
|
|
1403
1777
|
if (options?.unchecked) {
|
|
1404
|
-
return entries.map((x) => {
|
|
1778
|
+
return [...entries.values()].map((x) => {
|
|
1405
1779
|
this._gidPeersHistory.delete(x.meta.gid);
|
|
1406
1780
|
return this.log.remove(x, {
|
|
1407
1781
|
recursively: true,
|
|
@@ -1416,7 +1790,8 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1416
1790
|
// - Peers join and leave, which means we might not be a replicator anymore
|
|
1417
1791
|
const promises = [];
|
|
1418
1792
|
const filteredEntries = [];
|
|
1419
|
-
|
|
1793
|
+
const deleted = new Set();
|
|
1794
|
+
for (const entry of entries.values()) {
|
|
1420
1795
|
const pendingPrev = this._pendingDeletes.get(entry.hash);
|
|
1421
1796
|
if (pendingPrev) {
|
|
1422
1797
|
promises.push(pendingPrev.promise.promise);
|
|
@@ -1442,9 +1817,10 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1442
1817
|
clear();
|
|
1443
1818
|
deferredPromise.reject(e);
|
|
1444
1819
|
};
|
|
1445
|
-
|
|
1820
|
+
let cursor = undefined;
|
|
1821
|
+
const timeout = setTimeout(async () => {
|
|
1446
1822
|
reject(new Error("Timeout for checked pruning: Closed: " + this.closed));
|
|
1447
|
-
}, options?.timeout ??
|
|
1823
|
+
}, options?.timeout ?? 1e4);
|
|
1448
1824
|
this._pendingDeletes.set(entry.hash, {
|
|
1449
1825
|
promise: deferredPromise,
|
|
1450
1826
|
clear: () => {
|
|
@@ -1456,12 +1832,13 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1456
1832
|
const minMinReplicasValue = this.replicas.max
|
|
1457
1833
|
? Math.min(minReplicasValue, this.replicas.max.getValue(this))
|
|
1458
1834
|
: minReplicasValue;
|
|
1459
|
-
const leaders = await this.
|
|
1460
|
-
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1835
|
+
const leaders = await this.waitForIsLeader(cursor ??
|
|
1836
|
+
(cursor = await this.createCoordinates(entry, minMinReplicasValue)), publicKeyHash);
|
|
1837
|
+
if (leaders) {
|
|
1838
|
+
if (leaders.has(this.node.identity.publicKey.hashcode())) {
|
|
1839
|
+
reject(new Error("Failed to delete, is leader"));
|
|
1840
|
+
return;
|
|
1841
|
+
}
|
|
1465
1842
|
existCounter.add(publicKeyHash);
|
|
1466
1843
|
if (minMinReplicasValue <= existCounter.size) {
|
|
1467
1844
|
clear();
|
|
@@ -1471,7 +1848,8 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1471
1848
|
recursively: true,
|
|
1472
1849
|
})
|
|
1473
1850
|
.then(() => {
|
|
1474
|
-
|
|
1851
|
+
deleted.add(entry.hash);
|
|
1852
|
+
return resolve();
|
|
1475
1853
|
})
|
|
1476
1854
|
.catch((e) => {
|
|
1477
1855
|
reject(new Error("Failed to delete entry: " + e.toString()));
|
|
@@ -1485,23 +1863,47 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1485
1863
|
if (filteredEntries.length === 0) {
|
|
1486
1864
|
return promises;
|
|
1487
1865
|
}
|
|
1488
|
-
|
|
1489
|
-
|
|
1866
|
+
const emitMessages = (entries, to) => {
|
|
1867
|
+
this.rpc.send(new RequestIPrune({
|
|
1868
|
+
hashes: entries,
|
|
1869
|
+
}), {
|
|
1870
|
+
mode: new SilentDelivery({
|
|
1871
|
+
to: [to], // TODO group by peers?
|
|
1872
|
+
redundancy: 1,
|
|
1873
|
+
}),
|
|
1874
|
+
priority: 1,
|
|
1875
|
+
});
|
|
1876
|
+
};
|
|
1877
|
+
const maxReplicasValue = maxReplicas(this, filteredEntries);
|
|
1878
|
+
this.groupByLeaders(filteredEntries.map((x) => {
|
|
1879
|
+
return { entry: x, replicas: maxReplicasValue }; // TODO choose right maxReplicasValue, should it really be for all entries combined?
|
|
1880
|
+
})).then((map) => {
|
|
1881
|
+
for (const [peer, idx] of map) {
|
|
1882
|
+
emitMessages(idx.map((i) => filteredEntries[i].hash), peer);
|
|
1883
|
+
}
|
|
1884
|
+
});
|
|
1885
|
+
const onPeersChange = async (e) => {
|
|
1490
1886
|
if (e.detail.publicKey.equals(this.node.identity.publicKey) === false) {
|
|
1491
|
-
await this.
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1887
|
+
const peerEntries = (await this.groupByLeaders(filteredEntries
|
|
1888
|
+
.filter((x) => !deleted.has(x.hash))
|
|
1889
|
+
.map((x) => {
|
|
1890
|
+
return { entry: x, replicas: maxReplicasValue }; // TODO choose right maxReplicasValue, should it really be for all entries combined?
|
|
1891
|
+
}))).get(e.detail.publicKey.hashcode());
|
|
1892
|
+
if (peerEntries && peerEntries.length > 0) {
|
|
1893
|
+
emitMessages(peerEntries.map((x) => filteredEntries[x].hash), e.detail.publicKey.hashcode());
|
|
1894
|
+
}
|
|
1497
1895
|
}
|
|
1498
1896
|
};
|
|
1499
1897
|
// check joining peers
|
|
1500
|
-
this.events.addEventListener("replicator:
|
|
1501
|
-
|
|
1898
|
+
this.events.addEventListener("replicator:mature", onPeersChange);
|
|
1899
|
+
this.events.addEventListener("replicator:join", onPeersChange);
|
|
1900
|
+
Promise.allSettled(promises).finally(() => {
|
|
1901
|
+
this.events.removeEventListener("replicator:mature", onPeersChange);
|
|
1902
|
+
this.events.removeEventListener("replicator:join", onPeersChange);
|
|
1903
|
+
});
|
|
1502
1904
|
return promises;
|
|
1503
1905
|
}
|
|
1504
|
-
async distribute() {
|
|
1906
|
+
/* async distribute() {
|
|
1505
1907
|
// if there is one or more items waiting for run, don't bother adding a new item just wait for the queue to empty
|
|
1506
1908
|
if (this.distributeQueue && this.distributeQueue?.size > 0) {
|
|
1507
1909
|
return this.distributeQueue.onEmpty();
|
|
@@ -1509,15 +1911,57 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1509
1911
|
if (this.closed) {
|
|
1510
1912
|
return;
|
|
1511
1913
|
}
|
|
1512
|
-
const queue =
|
|
1914
|
+
const queue =
|
|
1915
|
+
this.distributeQueue ||
|
|
1513
1916
|
(this.distributeQueue = new PQueue({ concurrency: 1 }));
|
|
1514
1917
|
return queue
|
|
1515
|
-
.add(() =>
|
|
1516
|
-
|
|
1517
|
-
|
|
1918
|
+
.add(() =>
|
|
1919
|
+
delay(Math.min(this.log.length, this.distributionDebounceTime), {
|
|
1920
|
+
signal: this._closeController.signal,
|
|
1921
|
+
}).then(() => this._distribute()),
|
|
1922
|
+
)
|
|
1518
1923
|
.catch(() => { }); // catch ignore delay abort errror
|
|
1924
|
+
} */
|
|
1925
|
+
/**
|
|
1926
|
+
* For debugging
|
|
1927
|
+
*/
|
|
1928
|
+
async getPrunable() {
|
|
1929
|
+
const heads = await this.log.getHeads(true).all();
|
|
1930
|
+
let prunable = [];
|
|
1931
|
+
for (const head of heads) {
|
|
1932
|
+
const isLeader = await this.isLeader({
|
|
1933
|
+
entry: head,
|
|
1934
|
+
replicas: maxReplicas(this, [head]),
|
|
1935
|
+
});
|
|
1936
|
+
if (!isLeader) {
|
|
1937
|
+
prunable.push(head);
|
|
1938
|
+
}
|
|
1939
|
+
}
|
|
1940
|
+
return prunable;
|
|
1941
|
+
}
|
|
1942
|
+
async getNonPrunable() {
|
|
1943
|
+
const heads = await this.log.getHeads(true).all();
|
|
1944
|
+
let nonPrunable = [];
|
|
1945
|
+
for (const head of heads) {
|
|
1946
|
+
const isLeader = await this.isLeader({
|
|
1947
|
+
entry: head,
|
|
1948
|
+
replicas: maxReplicas(this, [head]),
|
|
1949
|
+
});
|
|
1950
|
+
if (isLeader) {
|
|
1951
|
+
nonPrunable.push(head);
|
|
1952
|
+
}
|
|
1953
|
+
}
|
|
1954
|
+
return nonPrunable;
|
|
1519
1955
|
}
|
|
1520
|
-
async
|
|
1956
|
+
async rebalanceAll() {
|
|
1957
|
+
this.onReplicationChange((await this.getMyReplicationSegments()).map((x) => {
|
|
1958
|
+
return { range: x, type: "added" };
|
|
1959
|
+
}));
|
|
1960
|
+
}
|
|
1961
|
+
async waitForPruned() {
|
|
1962
|
+
await waitFor(() => this._pendingDeletes.size === 0);
|
|
1963
|
+
}
|
|
1964
|
+
async onReplicationChange(changeOrChanges) {
|
|
1521
1965
|
/**
|
|
1522
1966
|
* TODO use information of new joined/leaving peer to create a subset of heads
|
|
1523
1967
|
* that we potentially need to share with other peers
|
|
@@ -1525,69 +1969,81 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1525
1969
|
if (this.closed) {
|
|
1526
1970
|
return;
|
|
1527
1971
|
}
|
|
1972
|
+
const change = mergeReplicationChanges(changeOrChanges);
|
|
1528
1973
|
const changed = false;
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
|
-
}
|
|
1541
|
-
const oldPeersSet = this._gidPeersHistory.get(gid);
|
|
1542
|
-
const currentPeers = await this.findLeaders(getLatestEntry(entries), maxReplicas(this, entries));
|
|
1543
|
-
const isLeader = currentPeers.find((x) => x === this.node.identity.publicKey.hashcode());
|
|
1544
|
-
const currentPeersSet = new Set(currentPeers);
|
|
1545
|
-
this._gidPeersHistory.set(gid, currentPeersSet);
|
|
1546
|
-
for (const currentPeer of currentPeers) {
|
|
1547
|
-
if (currentPeer === this.node.identity.publicKey.hashcode()) {
|
|
1548
|
-
continue;
|
|
1974
|
+
try {
|
|
1975
|
+
await this.log.trim();
|
|
1976
|
+
const uncheckedDeliver = new Map();
|
|
1977
|
+
const allEntriesToDelete = [];
|
|
1978
|
+
for await (const { gid, entries: coordinates } of toRebalance(change, this.entryCoordinatesIndex)) {
|
|
1979
|
+
if (this.closed) {
|
|
1980
|
+
break;
|
|
1981
|
+
}
|
|
1982
|
+
const oldPeersSet = this._gidPeersHistory.get(gid);
|
|
1983
|
+
if (this.closed) {
|
|
1984
|
+
return;
|
|
1549
1985
|
}
|
|
1550
|
-
|
|
1551
|
-
|
|
1552
|
-
|
|
1553
|
-
|
|
1554
|
-
|
|
1986
|
+
let { isLeader, leaders: currentPeers } = await this.findLeadersPersist(coordinates.map((x) => x.coordinate), coordinates[0], {
|
|
1987
|
+
roleAge: 0,
|
|
1988
|
+
persist: {
|
|
1989
|
+
prev: coordinates,
|
|
1990
|
+
},
|
|
1991
|
+
});
|
|
1992
|
+
if (isLeader) {
|
|
1993
|
+
for (const entry of coordinates) {
|
|
1994
|
+
this.pruneDebouncedFn.delete(entry.hash);
|
|
1555
1995
|
}
|
|
1556
|
-
|
|
1557
|
-
|
|
1996
|
+
}
|
|
1997
|
+
const currentPeersSet = new Set(currentPeers.keys());
|
|
1998
|
+
this._gidPeersHistory.set(gid, currentPeersSet);
|
|
1999
|
+
for (const [currentPeer] of currentPeers) {
|
|
2000
|
+
if (currentPeer === this.node.identity.publicKey.hashcode()) {
|
|
2001
|
+
continue;
|
|
2002
|
+
}
|
|
2003
|
+
if (!oldPeersSet?.has(currentPeer)) {
|
|
2004
|
+
let arr = uncheckedDeliver.get(currentPeer);
|
|
2005
|
+
if (!arr) {
|
|
2006
|
+
arr = [];
|
|
2007
|
+
uncheckedDeliver.set(currentPeer, arr);
|
|
2008
|
+
}
|
|
2009
|
+
for (const entry of coordinates) {
|
|
2010
|
+
arr.push(entry);
|
|
2011
|
+
}
|
|
1558
2012
|
}
|
|
1559
2013
|
}
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
2014
|
+
if (!isLeader) {
|
|
2015
|
+
if (currentPeers.size > 0) {
|
|
2016
|
+
// If we are observer, never prune locally created entries, since we dont really know who can store them
|
|
2017
|
+
// if we are replicator, we will always persist entries that we need to so filtering on createdLocally will not make a difference
|
|
2018
|
+
let entriesToDelete = coordinates;
|
|
2019
|
+
if (this.sync) {
|
|
2020
|
+
entriesToDelete = entriesToDelete.filter((entry) => this.sync(entry) === false);
|
|
2021
|
+
}
|
|
2022
|
+
allEntriesToDelete.push(...entriesToDelete);
|
|
1568
2023
|
}
|
|
1569
|
-
allEntriesToDelete.push(...entriesToDelete);
|
|
1570
2024
|
}
|
|
1571
|
-
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
2025
|
+
else {
|
|
2026
|
+
for (const entry of coordinates) {
|
|
2027
|
+
await this._pendingDeletes
|
|
2028
|
+
.get(entry.hash)
|
|
2029
|
+
?.reject(new Error("Failed to delete, is leader again. Closed: " + this.closed));
|
|
2030
|
+
}
|
|
1577
2031
|
}
|
|
1578
2032
|
}
|
|
2033
|
+
for (const [target, entries] of uncheckedDeliver) {
|
|
2034
|
+
this.rpc.send(new RequestMaybeSync({ hashes: entries.map((x) => x.hash) }), {
|
|
2035
|
+
mode: new SilentDelivery({ to: [target], redundancy: 1 }),
|
|
2036
|
+
});
|
|
2037
|
+
}
|
|
2038
|
+
if (allEntriesToDelete.length > 0) {
|
|
2039
|
+
allEntriesToDelete.map((x) => this.pruneDebouncedFn.add({ key: x.hash, value: x }));
|
|
2040
|
+
}
|
|
2041
|
+
return changed;
|
|
1579
2042
|
}
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
});
|
|
1584
|
-
}
|
|
1585
|
-
if (allEntriesToDelete.length > 0) {
|
|
1586
|
-
Promise.allSettled(this.prune(allEntriesToDelete)).catch((e) => {
|
|
1587
|
-
logger.info(e.toString());
|
|
1588
|
-
});
|
|
2043
|
+
catch (error) {
|
|
2044
|
+
logger.error(error.toString());
|
|
2045
|
+
throw error;
|
|
1589
2046
|
}
|
|
1590
|
-
return changed;
|
|
1591
2047
|
}
|
|
1592
2048
|
async requestSync(hashes, to) {
|
|
1593
2049
|
const now = +new Date();
|
|
@@ -1608,7 +2064,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1608
2064
|
});
|
|
1609
2065
|
}
|
|
1610
2066
|
async _onUnsubscription(evt) {
|
|
1611
|
-
logger.debug(`Peer disconnected '${evt.detail.from.hashcode()}' from '${JSON.stringify(evt.detail.unsubscriptions.map((x) => x))}'`);
|
|
2067
|
+
logger.debug(`Peer disconnected '${evt.detail.from.hashcode()}' from '${JSON.stringify(evt.detail.unsubscriptions.map((x) => x))} '`);
|
|
1612
2068
|
this.latestReplicationInfoMessage.delete(evt.detail.from.hashcode());
|
|
1613
2069
|
// TODO only emit this if the peer is actually replicating anything
|
|
1614
2070
|
this.events.dispatchEvent(new CustomEvent("replicator:leave", {
|
|
@@ -1645,78 +2101,80 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1645
2101
|
return (factorChanges.reduce((sum, change) => sum + change, 0) /
|
|
1646
2102
|
factorChanges.length);
|
|
1647
2103
|
}
|
|
1648
|
-
async rebalanceParticipation(
|
|
2104
|
+
async rebalanceParticipation() {
|
|
1649
2105
|
// update more participation rate to converge to the average expected rate or bounded by
|
|
1650
2106
|
// resources such as memory and or cpu
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
const peersSize = (await peers.getSize()) || 1;
|
|
1666
|
-
const totalParticipation = await this.calculateTotalParticipation();
|
|
1667
|
-
const newFactor = this.replicationController.step({
|
|
1668
|
-
memoryUsage: usedMemory,
|
|
1669
|
-
currentFactor: dynamicRange.widthNormalized,
|
|
1670
|
-
totalFactor: totalParticipation, // TODO use this._totalParticipation when flakiness is fixed
|
|
1671
|
-
peerCount: peersSize,
|
|
1672
|
-
cpuUsage: this.cpuUsage?.value(),
|
|
1673
|
-
});
|
|
1674
|
-
const relativeDifference = Math.abs(dynamicRange.widthNormalized - newFactor) /
|
|
1675
|
-
dynamicRange.widthNormalized;
|
|
1676
|
-
if (relativeDifference > 0.0001) {
|
|
1677
|
-
// TODO can not reuse old range, since it will (potentially) affect the index because of sideeffects
|
|
1678
|
-
dynamicRange = new ReplicationRangeIndexable({
|
|
1679
|
-
offset: hashToU32(this.node.identity.publicKey.bytes),
|
|
1680
|
-
length: scaleToU32(newFactor),
|
|
1681
|
-
publicKeyHash: dynamicRange.hash,
|
|
1682
|
-
id: dynamicRange.id,
|
|
1683
|
-
mode: dynamicRange.mode,
|
|
1684
|
-
timestamp: dynamicRange.timestamp,
|
|
1685
|
-
});
|
|
1686
|
-
const canReplicate = !this._isTrustedReplicator ||
|
|
1687
|
-
(await this._isTrustedReplicator(this.node.identity.publicKey));
|
|
1688
|
-
if (!canReplicate) {
|
|
1689
|
-
return false;
|
|
2107
|
+
const fn = async () => {
|
|
2108
|
+
if (this.closed) {
|
|
2109
|
+
return false;
|
|
2110
|
+
}
|
|
2111
|
+
// The role is fixed (no changes depending on memory usage or peer count etc)
|
|
2112
|
+
if (!this._isReplicating) {
|
|
2113
|
+
return false;
|
|
2114
|
+
}
|
|
2115
|
+
if (this._isAdaptiveReplicating) {
|
|
2116
|
+
const peers = this.replicationIndex;
|
|
2117
|
+
const usedMemory = await this.getMemoryUsage();
|
|
2118
|
+
let dynamicRange = await this.getDynamicRange();
|
|
2119
|
+
if (!dynamicRange) {
|
|
2120
|
+
return; // not allowed to replicate
|
|
1690
2121
|
}
|
|
1691
|
-
await
|
|
1692
|
-
|
|
1693
|
-
|
|
2122
|
+
const peersSize = (await peers.getSize()) || 1;
|
|
2123
|
+
const totalParticipation = await this.calculateTotalParticipation();
|
|
2124
|
+
const newFactor = this.replicationController.step({
|
|
2125
|
+
memoryUsage: usedMemory,
|
|
2126
|
+
currentFactor: dynamicRange.widthNormalized,
|
|
2127
|
+
totalFactor: totalParticipation, // TODO use this._totalParticipation when flakiness is fixed
|
|
2128
|
+
peerCount: peersSize,
|
|
2129
|
+
cpuUsage: this.cpuUsage?.value(),
|
|
1694
2130
|
});
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
|
|
1700
|
-
|
|
2131
|
+
const relativeDifference = Math.abs(dynamicRange.widthNormalized - newFactor) /
|
|
2132
|
+
dynamicRange.widthNormalized;
|
|
2133
|
+
if (relativeDifference > 0.0001) {
|
|
2134
|
+
// TODO can not reuse old range, since it will (potentially) affect the index because of sideeffects
|
|
2135
|
+
dynamicRange = new ReplicationRangeIndexable({
|
|
2136
|
+
offset: hashToU32(this.node.identity.publicKey.bytes),
|
|
2137
|
+
length: scaleToU32(newFactor),
|
|
2138
|
+
publicKeyHash: dynamicRange.hash,
|
|
2139
|
+
id: dynamicRange.id,
|
|
2140
|
+
mode: dynamicRange.mode,
|
|
2141
|
+
timestamp: dynamicRange.timestamp,
|
|
2142
|
+
});
|
|
2143
|
+
const canReplicate = !this._isTrustedReplicator ||
|
|
2144
|
+
(await this._isTrustedReplicator(this.node.identity.publicKey));
|
|
2145
|
+
if (!canReplicate) {
|
|
2146
|
+
return false;
|
|
2147
|
+
}
|
|
2148
|
+
await this.startAnnounceReplicating([dynamicRange], {
|
|
2149
|
+
checkDuplicates: false,
|
|
2150
|
+
reset: false,
|
|
2151
|
+
});
|
|
2152
|
+
/* await this._updateRole(newRole, onRoleChange); */
|
|
2153
|
+
this.rebalanceParticipationDebounced?.();
|
|
2154
|
+
return true;
|
|
2155
|
+
}
|
|
2156
|
+
else {
|
|
2157
|
+
this.rebalanceParticipationDebounced?.();
|
|
2158
|
+
}
|
|
2159
|
+
return false;
|
|
1701
2160
|
}
|
|
1702
2161
|
return false;
|
|
1703
|
-
}
|
|
1704
|
-
|
|
2162
|
+
};
|
|
2163
|
+
const resp = await fn();
|
|
2164
|
+
return resp;
|
|
1705
2165
|
}
|
|
1706
2166
|
async getDynamicRange() {
|
|
1707
|
-
let dynamicRangeId =
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
]));
|
|
1711
|
-
let range = (await this.replicationIndex.query(new SearchRequest({
|
|
2167
|
+
let dynamicRangeId = getIdForDynamicRange(this.node.identity.publicKey);
|
|
2168
|
+
let range = (await this.replicationIndex
|
|
2169
|
+
.iterate({
|
|
1712
2170
|
query: [
|
|
1713
2171
|
new ByteMatchQuery({
|
|
1714
2172
|
key: "id",
|
|
1715
2173
|
value: dynamicRangeId,
|
|
1716
2174
|
}),
|
|
1717
2175
|
],
|
|
1718
|
-
|
|
1719
|
-
|
|
2176
|
+
})
|
|
2177
|
+
.all())?.[0]?.value;
|
|
1720
2178
|
if (!range) {
|
|
1721
2179
|
range = new ReplicationRangeIndexable({
|
|
1722
2180
|
normalized: true,
|
|
@@ -1750,7 +2208,26 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1750
2208
|
this.syncInFlightQueue.delete(hash);
|
|
1751
2209
|
}
|
|
1752
2210
|
}
|
|
1753
|
-
|
|
2211
|
+
clearSyncProcessPublicKey(publicKey) {
|
|
2212
|
+
this.syncInFlight.delete(publicKey.hashcode());
|
|
2213
|
+
const map = this.syncInFlightQueueInverted.get(publicKey.hashcode());
|
|
2214
|
+
if (map) {
|
|
2215
|
+
for (const hash of map) {
|
|
2216
|
+
const arr = this.syncInFlightQueue.get(hash);
|
|
2217
|
+
if (arr) {
|
|
2218
|
+
const filtered = arr.filter((x) => !x.equals(publicKey));
|
|
2219
|
+
if (filtered.length > 0) {
|
|
2220
|
+
this.syncInFlightQueue.set(hash, filtered);
|
|
2221
|
+
}
|
|
2222
|
+
else {
|
|
2223
|
+
this.syncInFlightQueue.delete(hash);
|
|
2224
|
+
}
|
|
2225
|
+
}
|
|
2226
|
+
}
|
|
2227
|
+
this.syncInFlightQueueInverted.delete(publicKey.hashcode());
|
|
2228
|
+
}
|
|
2229
|
+
}
|
|
2230
|
+
async onEntryAdded(entry) {
|
|
1754
2231
|
const ih = this._pendingIHave.get(entry.hash);
|
|
1755
2232
|
if (ih) {
|
|
1756
2233
|
ih.clear();
|