@peerbit/shared-log 9.2.13 → 10.0.0-05f4bef
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/benchmark/get-samples.js +190 -64
- package/dist/benchmark/get-samples.js.map +1 -1
- package/dist/benchmark/index.js +16 -38
- package/dist/benchmark/index.js.map +1 -1
- package/dist/benchmark/memory/child.js.map +1 -1
- package/dist/benchmark/partial-sync.d.ts +3 -0
- package/dist/benchmark/partial-sync.d.ts.map +1 -0
- package/dist/benchmark/partial-sync.js +121 -0
- package/dist/benchmark/partial-sync.js.map +1 -0
- package/dist/benchmark/replication-prune.js.map +1 -1
- package/dist/benchmark/replication.js.map +1 -1
- package/dist/benchmark/to-rebalance.d.ts +2 -0
- package/dist/benchmark/to-rebalance.d.ts.map +1 -0
- package/dist/benchmark/to-rebalance.js +117 -0
- package/dist/benchmark/to-rebalance.js.map +1 -0
- package/dist/benchmark/utils.d.ts +24 -0
- package/dist/benchmark/utils.d.ts.map +1 -0
- package/dist/benchmark/utils.js +47 -0
- package/dist/benchmark/utils.js.map +1 -0
- package/dist/src/debounce.d.ts +2 -2
- package/dist/src/debounce.d.ts.map +1 -1
- package/dist/src/debounce.js +17 -47
- package/dist/src/debounce.js.map +1 -1
- package/dist/src/exchange-heads.d.ts +1 -13
- package/dist/src/exchange-heads.d.ts.map +1 -1
- package/dist/src/exchange-heads.js +0 -32
- package/dist/src/exchange-heads.js.map +1 -1
- package/dist/src/index.d.ts +119 -60
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/index.js +1116 -762
- package/dist/src/index.js.map +1 -1
- package/dist/src/integers.d.ts +22 -0
- package/dist/src/integers.d.ts.map +1 -0
- package/dist/src/integers.js +76 -0
- package/dist/src/integers.js.map +1 -0
- package/dist/src/pid.d.ts.map +1 -1
- package/dist/src/pid.js +22 -22
- package/dist/src/pid.js.map +1 -1
- package/dist/src/ranges.d.ts +168 -38
- package/dist/src/ranges.d.ts.map +1 -1
- package/dist/src/ranges.js +869 -272
- package/dist/src/ranges.js.map +1 -1
- package/dist/src/replication-domain-hash.d.ts +2 -3
- package/dist/src/replication-domain-hash.d.ts.map +1 -1
- package/dist/src/replication-domain-hash.js +40 -15
- package/dist/src/replication-domain-hash.js.map +1 -1
- package/dist/src/replication-domain-time.d.ts +5 -5
- package/dist/src/replication-domain-time.d.ts.map +1 -1
- package/dist/src/replication-domain-time.js +2 -0
- package/dist/src/replication-domain-time.js.map +1 -1
- package/dist/src/replication-domain.d.ts +17 -19
- package/dist/src/replication-domain.d.ts.map +1 -1
- package/dist/src/replication-domain.js +2 -6
- package/dist/src/replication-domain.js.map +1 -1
- package/dist/src/replication.d.ts +6 -6
- package/dist/src/replication.d.ts.map +1 -1
- package/dist/src/replication.js +4 -4
- package/dist/src/replication.js.map +1 -1
- package/dist/src/role.d.ts +3 -6
- package/dist/src/role.d.ts.map +1 -1
- package/dist/src/role.js +4 -5
- package/dist/src/role.js.map +1 -1
- package/dist/src/sync/index.d.ts +40 -0
- package/dist/src/sync/index.d.ts.map +1 -0
- package/dist/src/sync/index.js +2 -0
- package/dist/src/sync/index.js.map +1 -0
- package/dist/src/sync/rateless-iblt.d.ts +124 -0
- package/dist/src/sync/rateless-iblt.d.ts.map +1 -0
- package/dist/src/sync/rateless-iblt.js +495 -0
- package/dist/src/sync/rateless-iblt.js.map +1 -0
- package/dist/src/sync/simple.d.ts +69 -0
- package/dist/src/sync/simple.d.ts.map +1 -0
- package/dist/src/sync/simple.js +338 -0
- package/dist/src/sync/simple.js.map +1 -0
- package/dist/src/sync/wasm-init.browser.d.ts +1 -0
- package/dist/src/sync/wasm-init.browser.d.ts.map +1 -0
- package/dist/src/sync/wasm-init.browser.js +3 -0
- package/dist/src/sync/wasm-init.browser.js.map +1 -0
- package/dist/src/sync/wasm-init.d.ts +2 -0
- package/dist/src/sync/wasm-init.d.ts.map +1 -0
- package/dist/src/sync/wasm-init.js +13 -0
- package/dist/src/sync/wasm-init.js.map +1 -0
- package/dist/src/utils.d.ts +3 -3
- package/dist/src/utils.d.ts.map +1 -1
- package/dist/src/utils.js +2 -2
- package/dist/src/utils.js.map +1 -1
- package/package.json +73 -69
- package/src/debounce.ts +16 -51
- package/src/exchange-heads.ts +1 -23
- package/src/index.ts +1532 -1038
- package/src/integers.ts +102 -0
- package/src/pid.ts +23 -22
- package/src/ranges.ts +1204 -413
- package/src/replication-domain-hash.ts +43 -18
- package/src/replication-domain-time.ts +9 -9
- package/src/replication-domain.ts +21 -31
- package/src/replication.ts +10 -9
- package/src/role.ts +4 -6
- package/src/sync/index.ts +51 -0
- package/src/sync/rateless-iblt.ts +617 -0
- package/src/sync/simple.ts +403 -0
- package/src/sync/wasm-init.browser.ts +1 -0
- package/src/sync/wasm-init.ts +14 -0
- package/src/utils.ts +10 -4
package/dist/src/index.js
CHANGED
|
@@ -9,37 +9,39 @@ var __metadata = (this && this.__metadata) || function (k, v) {
|
|
|
9
9
|
};
|
|
10
10
|
import { BorshError, field, variant } from "@dao-xyz/borsh";
|
|
11
11
|
import { AnyBlockStore, RemoteBlocks } from "@peerbit/blocks";
|
|
12
|
+
import { Cache } from "@peerbit/cache";
|
|
12
13
|
import { AccessError, PublicSignKey, sha256Base64Sync, sha256Sync, } from "@peerbit/crypto";
|
|
13
|
-
import { And, ByteMatchQuery, Or, Sort, StringMatch, } from "@peerbit/indexer-interface";
|
|
14
|
-
import { Entry, Log, ShallowEntry, } from "@peerbit/log";
|
|
14
|
+
import { And, ByteMatchQuery, NotStartedError as IndexNotStartedError, Or, Sort, StringMatch, toId, } from "@peerbit/indexer-interface";
|
|
15
|
+
import { Entry, Log, Meta, ShallowEntry, } from "@peerbit/log";
|
|
15
16
|
import { logger as loggerFn } from "@peerbit/logger";
|
|
16
17
|
import { ClosedError, Program } from "@peerbit/program";
|
|
17
18
|
import { SubscriptionEvent, UnsubcriptionEvent, } from "@peerbit/pubsub-interface";
|
|
18
19
|
import { RPC } from "@peerbit/rpc";
|
|
19
|
-
import { AcknowledgeDelivery, DeliveryMode, NotStartedError, SilentDelivery, } from "@peerbit/stream-interface";
|
|
20
|
-
import { AbortError,
|
|
21
|
-
/* delay, */
|
|
22
|
-
waitFor, } from "@peerbit/time";
|
|
20
|
+
import { AcknowledgeDelivery, DeliveryMode, NotStartedError, SeekDelivery, SilentDelivery, } from "@peerbit/stream-interface";
|
|
21
|
+
import { AbortError, waitFor } from "@peerbit/time";
|
|
23
22
|
import pDefer, {} from "p-defer";
|
|
24
23
|
import PQueue from "p-queue";
|
|
25
24
|
import { concat } from "uint8arrays";
|
|
26
25
|
import { BlocksMessage } from "./blocks.js";
|
|
27
26
|
import { CPUUsageIntervalLag } from "./cpu.js";
|
|
28
|
-
import {
|
|
29
|
-
import { EntryWithRefs, ExchangeHeadsMessage, RequestIPrune,
|
|
27
|
+
import { debounceAccumulator, debounceFixedInterval, debouncedAccumulatorMap, } from "./debounce.js";
|
|
28
|
+
import { EntryWithRefs, ExchangeHeadsMessage, RequestIPrune, ResponseIPrune, createExchangeHeadsMessages, } from "./exchange-heads.js";
|
|
29
|
+
import { MAX_U32, bytesToNumber, createNumbers, denormalizer, } from "./integers.js";
|
|
30
30
|
import { TransportMessage } from "./message.js";
|
|
31
31
|
import { PIDReplicationController } from "./pid.js";
|
|
32
|
-
import {
|
|
33
|
-
import { createReplicationDomainHash,
|
|
32
|
+
import { EntryReplicatedU32, EntryReplicatedU64, ReplicationIntent, ReplicationRangeIndexableU32, ReplicationRangeIndexableU64, ReplicationRangeMessage, SyncStatus, appromixateCoverage, getCoverSet, getSamples, iHaveCoveringRange, isMatured, isReplicationRangeMessage, mergeRanges, minimumWidthToCover, shouldAssigneToRangeBoundary as shouldAssignToRangeBoundary, toRebalance, } from "./ranges.js";
|
|
33
|
+
import { createReplicationDomainHash, } from "./replication-domain-hash.js";
|
|
34
34
|
import { createReplicationDomainTime, } from "./replication-domain-time.js";
|
|
35
35
|
import { debounceAggregationChanges, mergeReplicationChanges, } from "./replication-domain.js";
|
|
36
|
-
import { AbsoluteReplicas, AddedReplicationSegmentMessage, AllReplicatingSegmentsMessage, ReplicationError, RequestReplicationInfoMessage, ResponseRoleMessage, StoppedReplicating, decodeReplicas, encodeReplicas, maxReplicas, } from "./replication.js";
|
|
37
|
-
import {
|
|
36
|
+
import { AbsoluteReplicas, AddedReplicationSegmentMessage, AllReplicatingSegmentsMessage, MinReplicas, ReplicationError, RequestReplicationInfoMessage, ResponseRoleMessage, StoppedReplicating, decodeReplicas, encodeReplicas, maxReplicas, } from "./replication.js";
|
|
37
|
+
import { Observer, Replicator } from "./role.js";
|
|
38
|
+
import { RatelessIBLTSynchronizer } from "./sync/rateless-iblt.js";
|
|
39
|
+
import { SimpleSyncronizer } from "./sync/simple.js";
|
|
38
40
|
import { groupByGid } from "./utils.js";
|
|
39
41
|
export { createReplicationDomainHash, createReplicationDomainTime, };
|
|
40
42
|
export { CPUUsageIntervalLag };
|
|
41
43
|
export * from "./replication.js";
|
|
42
|
-
export {
|
|
44
|
+
export { EntryReplicatedU32, EntryReplicatedU64, };
|
|
43
45
|
export const logger = loggerFn({ module: "shared-log" });
|
|
44
46
|
const getLatestEntry = (entries) => {
|
|
45
47
|
let latest = undefined;
|
|
@@ -85,9 +87,33 @@ const isReplicationOptionsDependentOnPreviousState = (options) => {
|
|
|
85
87
|
}
|
|
86
88
|
return false;
|
|
87
89
|
};
|
|
90
|
+
const createIndexableDomainFromResolution = (resolution) => {
|
|
91
|
+
const denormalizerFn = denormalizer(resolution);
|
|
92
|
+
const byteToNumberFn = bytesToNumber(resolution);
|
|
93
|
+
if (resolution === "u32") {
|
|
94
|
+
return {
|
|
95
|
+
constructorEntry: EntryReplicatedU32,
|
|
96
|
+
constructorRange: ReplicationRangeIndexableU32,
|
|
97
|
+
denormalize: denormalizerFn,
|
|
98
|
+
bytesToNumber: byteToNumberFn,
|
|
99
|
+
numbers: createNumbers(resolution),
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
else if (resolution === "u64") {
|
|
103
|
+
return {
|
|
104
|
+
constructorEntry: EntryReplicatedU64,
|
|
105
|
+
constructorRange: ReplicationRangeIndexableU64,
|
|
106
|
+
denormalize: denormalizerFn,
|
|
107
|
+
bytesToNumber: byteToNumberFn,
|
|
108
|
+
numbers: createNumbers(resolution),
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
throw new Error("Unsupported resolution");
|
|
112
|
+
};
|
|
88
113
|
export const DEFAULT_MIN_REPLICAS = 2;
|
|
89
114
|
export const WAIT_FOR_REPLICATOR_TIMEOUT = 9000;
|
|
90
115
|
export const WAIT_FOR_ROLE_MATURITY = 5000;
|
|
116
|
+
export const WAIT_FOR_PRUNE_DELAY = 5000;
|
|
91
117
|
const PRUNE_DEBOUNCE_INTERVAL = 500;
|
|
92
118
|
// DONT SET THIS ANY LOWER, because it will make the pid controller unstable as the system responses are not fast enough to updates from the pid controller
|
|
93
119
|
const RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL = 1000;
|
|
@@ -108,7 +134,10 @@ let SharedLog = class SharedLog extends Program {
|
|
|
108
134
|
_isAdaptiveReplicating;
|
|
109
135
|
_replicationRangeIndex;
|
|
110
136
|
_entryCoordinatesIndex;
|
|
137
|
+
coordinateToHash;
|
|
138
|
+
uniqueReplicators;
|
|
111
139
|
/* private _totalParticipation!: number; */
|
|
140
|
+
// gid -> coordinate -> publicKeyHash list (of owners)
|
|
112
141
|
_gidPeersHistory;
|
|
113
142
|
_onSubscriptionFn;
|
|
114
143
|
_onUnsubscriptionFn;
|
|
@@ -118,6 +147,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
118
147
|
_respondToIHaveTimeout;
|
|
119
148
|
_pendingDeletes;
|
|
120
149
|
_pendingIHave;
|
|
150
|
+
// public key hash to range id to range
|
|
121
151
|
pendingMaturity; // map of peerId to timeout
|
|
122
152
|
latestReplicationInfoMessage;
|
|
123
153
|
remoteBlocks;
|
|
@@ -129,24 +159,22 @@ let SharedLog = class SharedLog extends Program {
|
|
|
129
159
|
// A fn for debouncing the calls for pruning
|
|
130
160
|
pruneDebouncedFn;
|
|
131
161
|
responseToPruneDebouncedFn;
|
|
162
|
+
_requestIPruneSent; // tracks entry hash to peer hash for requesting I prune messages
|
|
163
|
+
_requestIPruneResponseReplicatorSet; // tracks entry hash to peer hash
|
|
132
164
|
replicationChangeDebounceFn;
|
|
133
165
|
// regular distribution checks
|
|
134
166
|
distributeQueue;
|
|
135
|
-
|
|
136
|
-
syncMoreInterval;
|
|
137
|
-
// map of hash to public keys that we can ask for entries
|
|
138
|
-
syncInFlightQueue;
|
|
139
|
-
syncInFlightQueueInverted;
|
|
140
|
-
// map of hash to public keys that we have asked for entries
|
|
141
|
-
syncInFlight;
|
|
167
|
+
syncronizer;
|
|
142
168
|
replicas;
|
|
143
169
|
cpuUsage;
|
|
144
170
|
timeUntilRoleMaturity;
|
|
145
171
|
waitForReplicatorTimeout;
|
|
172
|
+
waitForPruneDelay;
|
|
146
173
|
distributionDebounceTime;
|
|
147
174
|
replicationController;
|
|
148
175
|
history;
|
|
149
176
|
domain;
|
|
177
|
+
indexableDomain;
|
|
150
178
|
interval;
|
|
151
179
|
constructor(properties) {
|
|
152
180
|
super();
|
|
@@ -168,6 +196,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
168
196
|
if (segments.length > 0) {
|
|
169
197
|
const segment = segments[0].toReplicationRange();
|
|
170
198
|
return new Replicator({
|
|
199
|
+
// TODO types
|
|
171
200
|
factor: segment.factor / MAX_U32,
|
|
172
201
|
offset: segment.offset / MAX_U32,
|
|
173
202
|
});
|
|
@@ -179,32 +208,8 @@ let SharedLog = class SharedLog extends Program {
|
|
|
179
208
|
if (!this._isReplicating) {
|
|
180
209
|
return false;
|
|
181
210
|
}
|
|
182
|
-
/*
|
|
183
|
-
if (isAdaptiveReplicatorOption(this._replicationSettings)) {
|
|
184
|
-
return true;
|
|
185
|
-
}
|
|
186
|
-
|
|
187
|
-
if ((this.replicationSettings as FixedReplicationOptions).factor !== 0) {
|
|
188
|
-
return true;
|
|
189
|
-
} */
|
|
190
211
|
return (await this.countReplicationSegments()) > 0;
|
|
191
212
|
}
|
|
192
|
-
/* get totalParticipation(): number {
|
|
193
|
-
return this._totalParticipation;
|
|
194
|
-
} */
|
|
195
|
-
async calculateTotalParticipation() {
|
|
196
|
-
const sum = await this.replicationIndex.sum({ key: "width" });
|
|
197
|
-
return Number(sum) / MAX_U32;
|
|
198
|
-
}
|
|
199
|
-
async countReplicationSegments() {
|
|
200
|
-
const count = await this.replicationIndex.count({
|
|
201
|
-
query: new StringMatch({
|
|
202
|
-
key: "hash",
|
|
203
|
-
value: this.node.identity.publicKey.hashcode(),
|
|
204
|
-
}),
|
|
205
|
-
});
|
|
206
|
-
return count;
|
|
207
|
-
}
|
|
208
213
|
setupRebalanceDebounceFunction(interval = RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL) {
|
|
209
214
|
this.rebalanceParticipationDebounced = undefined;
|
|
210
215
|
// make the rebalancing to respect warmup time
|
|
@@ -225,7 +230,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
225
230
|
) */
|
|
226
231
|
() => intervalTime);
|
|
227
232
|
}
|
|
228
|
-
async _replicate(options, { reset, checkDuplicates, announce, } = {}) {
|
|
233
|
+
async _replicate(options, { reset, checkDuplicates, syncStatus, announce, mergeSegments, } = {}) {
|
|
229
234
|
let offsetWasProvided = false;
|
|
230
235
|
if (isUnreplicationOptions(options)) {
|
|
231
236
|
await this.unreplicate();
|
|
@@ -252,7 +257,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
252
257
|
ranges = [maybeRange];
|
|
253
258
|
offsetWasProvided = true;
|
|
254
259
|
}
|
|
255
|
-
else if (options
|
|
260
|
+
else if (isReplicationRangeMessage(options)) {
|
|
256
261
|
ranges = [
|
|
257
262
|
options.toReplicationRangeIndexable(this.node.identity.publicKey),
|
|
258
263
|
];
|
|
@@ -275,28 +280,56 @@ let SharedLog = class SharedLog extends Program {
|
|
|
275
280
|
return;
|
|
276
281
|
}
|
|
277
282
|
for (const rangeArg of rangeArgs) {
|
|
283
|
+
let timestamp = undefined;
|
|
284
|
+
if (rangeArg.id != null) {
|
|
285
|
+
// fetch the previous timestamp if it exists
|
|
286
|
+
const indexed = await this.replicationIndex.get(toId(rangeArg.id), {
|
|
287
|
+
shape: { id: true, timestamp: true },
|
|
288
|
+
});
|
|
289
|
+
if (indexed) {
|
|
290
|
+
timestamp = indexed.value.timestamp;
|
|
291
|
+
}
|
|
292
|
+
}
|
|
278
293
|
const normalized = rangeArg.normalized ?? true;
|
|
279
294
|
offsetWasProvided = rangeArg.offset != null;
|
|
280
|
-
const offset = rangeArg.offset
|
|
281
|
-
|
|
295
|
+
const offset = rangeArg.offset != null
|
|
296
|
+
? normalized
|
|
297
|
+
? this.indexableDomain.numbers.denormalize(rangeArg.offset)
|
|
298
|
+
: rangeArg.offset
|
|
299
|
+
: this.indexableDomain.numbers.random();
|
|
282
300
|
let factor = rangeArg.factor;
|
|
283
|
-
let
|
|
284
|
-
|
|
301
|
+
let fullWidth = this.indexableDomain.numbers.maxValue;
|
|
302
|
+
let factorDenormalized = !normalized
|
|
303
|
+
? factor
|
|
304
|
+
: this.indexableDomain.numbers.denormalize(factor);
|
|
305
|
+
ranges.push(new this.indexableDomain.constructorRange({
|
|
285
306
|
id: rangeArg.id,
|
|
286
|
-
|
|
307
|
+
// @ts-ignore
|
|
287
308
|
offset: offset,
|
|
288
|
-
|
|
309
|
+
// @ts-ignore
|
|
310
|
+
length: (factor === "all"
|
|
311
|
+
? fullWidth
|
|
312
|
+
: factor === "right"
|
|
313
|
+
? // @ts-ignore
|
|
314
|
+
fullWidth - offset
|
|
315
|
+
: factorDenormalized),
|
|
316
|
+
/* typeof factor === "number"
|
|
289
317
|
? factor
|
|
290
318
|
: factor === "all"
|
|
291
319
|
? width
|
|
292
|
-
|
|
320
|
+
// @ts-ignore
|
|
321
|
+
: width - offset, */
|
|
293
322
|
publicKeyHash: this.node.identity.publicKey.hashcode(),
|
|
294
323
|
mode: rangeArg.strict
|
|
295
324
|
? ReplicationIntent.Strict
|
|
296
325
|
: ReplicationIntent.NonStrict, // automatic means that this range might be reused later for dynamic replication behaviour
|
|
297
|
-
timestamp: BigInt(+new Date()),
|
|
326
|
+
timestamp: timestamp ?? BigInt(+new Date()),
|
|
298
327
|
}));
|
|
299
328
|
}
|
|
329
|
+
if (mergeSegments && ranges.length > 1) {
|
|
330
|
+
const mergedSegment = mergeRanges(ranges, this.indexableDomain.numbers);
|
|
331
|
+
ranges = [mergedSegment];
|
|
332
|
+
}
|
|
300
333
|
}
|
|
301
334
|
for (const range of ranges) {
|
|
302
335
|
this.oldestOpenTime = Math.min(Number(range.timestamp), this.oldestOpenTime);
|
|
@@ -312,6 +345,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
312
345
|
reset: resetRanges ?? false,
|
|
313
346
|
checkDuplicates,
|
|
314
347
|
announce,
|
|
348
|
+
syncStatus,
|
|
315
349
|
});
|
|
316
350
|
return ranges;
|
|
317
351
|
}
|
|
@@ -339,7 +373,8 @@ let SharedLog = class SharedLog extends Program {
|
|
|
339
373
|
}
|
|
340
374
|
async replicate(rangeOrEntry, options) {
|
|
341
375
|
let range = undefined;
|
|
342
|
-
|
|
376
|
+
let syncStatus = SyncStatus.Unsynced;
|
|
377
|
+
if (rangeOrEntry instanceof ReplicationRangeMessage) {
|
|
343
378
|
range = rangeOrEntry;
|
|
344
379
|
}
|
|
345
380
|
else if (rangeOrEntry instanceof Entry) {
|
|
@@ -348,6 +383,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
348
383
|
offset: await this.domain.fromEntry(rangeOrEntry),
|
|
349
384
|
normalized: false,
|
|
350
385
|
};
|
|
386
|
+
syncStatus = SyncStatus.Synced; /// we already have the entries
|
|
351
387
|
}
|
|
352
388
|
else if (Array.isArray(rangeOrEntry)) {
|
|
353
389
|
let ranges = [];
|
|
@@ -358,6 +394,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
358
394
|
offset: await this.domain.fromEntry(entry),
|
|
359
395
|
normalized: false,
|
|
360
396
|
});
|
|
397
|
+
syncStatus = SyncStatus.Synced; /// we already have the entries
|
|
361
398
|
}
|
|
362
399
|
else {
|
|
363
400
|
ranges.push(entry);
|
|
@@ -368,7 +405,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
368
405
|
else {
|
|
369
406
|
range = rangeOrEntry ?? true;
|
|
370
407
|
}
|
|
371
|
-
return this._replicate(range, options);
|
|
408
|
+
return this._replicate(range, { ...options, syncStatus });
|
|
372
409
|
}
|
|
373
410
|
async unreplicate(rangeOrEntry) {
|
|
374
411
|
let range;
|
|
@@ -378,7 +415,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
378
415
|
offset: await this.domain.fromEntry(rangeOrEntry),
|
|
379
416
|
};
|
|
380
417
|
}
|
|
381
|
-
else if (rangeOrEntry instanceof
|
|
418
|
+
else if (rangeOrEntry instanceof ReplicationRangeMessage) {
|
|
382
419
|
range = rangeOrEntry;
|
|
383
420
|
}
|
|
384
421
|
else {
|
|
@@ -405,46 +442,48 @@ let SharedLog = class SharedLog extends Program {
|
|
|
405
442
|
});
|
|
406
443
|
}
|
|
407
444
|
async removeReplicator(key, options) {
|
|
408
|
-
const
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
445
|
+
const keyHash = typeof key === "string" ? key : key.hashcode();
|
|
446
|
+
const deleted = await this.replicationIndex
|
|
447
|
+
.iterate({
|
|
448
|
+
query: { hash: keyHash },
|
|
449
|
+
})
|
|
450
|
+
.all();
|
|
451
|
+
this.uniqueReplicators.delete(keyHash);
|
|
452
|
+
await this.replicationIndex.del({ query: { hash: keyHash } });
|
|
453
|
+
await this.updateOldestTimestampFromIndex();
|
|
454
|
+
const isMe = this.node.identity.publicKey.hashcode() === keyHash;
|
|
455
|
+
if (isMe) {
|
|
456
|
+
// announce that we are no longer replicating
|
|
457
|
+
await this.rpc.send(new AllReplicatingSegmentsMessage({ segments: [] }), {
|
|
458
|
+
priority: 1,
|
|
459
|
+
});
|
|
460
|
+
}
|
|
461
|
+
if (options?.noEvent !== true) {
|
|
462
|
+
if (key instanceof PublicSignKey) {
|
|
463
|
+
this.events.dispatchEvent(new CustomEvent("replication:change", {
|
|
464
|
+
detail: { publicKey: key },
|
|
465
|
+
}));
|
|
421
466
|
}
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
this.events.dispatchEvent(new CustomEvent("replication:change", {
|
|
425
|
-
detail: { publicKey: key },
|
|
426
|
-
}));
|
|
427
|
-
}
|
|
428
|
-
else {
|
|
429
|
-
throw new Error("Key was not a PublicSignKey");
|
|
430
|
-
}
|
|
467
|
+
else {
|
|
468
|
+
throw new Error("Key was not a PublicSignKey");
|
|
431
469
|
}
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
470
|
+
}
|
|
471
|
+
for (const x of deleted) {
|
|
472
|
+
this.replicationChangeDebounceFn.add({
|
|
473
|
+
range: x.value,
|
|
474
|
+
type: "removed",
|
|
437
475
|
});
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
if (!isMe) {
|
|
444
|
-
this.rebalanceParticipationDebounced?.();
|
|
476
|
+
}
|
|
477
|
+
const pendingMaturity = this.pendingMaturity.get(keyHash);
|
|
478
|
+
if (pendingMaturity) {
|
|
479
|
+
for (const [_k, v] of pendingMaturity) {
|
|
480
|
+
clearTimeout(v.timeout);
|
|
445
481
|
}
|
|
446
|
-
|
|
447
|
-
|
|
482
|
+
this.pendingMaturity.delete(keyHash);
|
|
483
|
+
}
|
|
484
|
+
if (!isMe) {
|
|
485
|
+
this.rebalanceParticipationDebounced?.();
|
|
486
|
+
}
|
|
448
487
|
}
|
|
449
488
|
async updateOldestTimestampFromIndex() {
|
|
450
489
|
const iterator = await this.replicationIndex.iterate({
|
|
@@ -458,190 +497,196 @@ let SharedLog = class SharedLog extends Program {
|
|
|
458
497
|
: +new Date();
|
|
459
498
|
}
|
|
460
499
|
async removeReplicationRange(ids, from) {
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
clearTimeout(pendingMaturity.timeout);
|
|
476
|
-
this.pendingMaturity.delete(from.hashcode());
|
|
500
|
+
let idMatcher = new Or(ids.map((x) => new ByteMatchQuery({ key: "id", value: x })));
|
|
501
|
+
// make sure we are not removing something that is owned by the replicator
|
|
502
|
+
let identityMatcher = new StringMatch({
|
|
503
|
+
key: "hash",
|
|
504
|
+
value: from.hashcode(),
|
|
505
|
+
});
|
|
506
|
+
let query = new And([idMatcher, identityMatcher]);
|
|
507
|
+
const pendingMaturity = this.pendingMaturity.get(from.hashcode());
|
|
508
|
+
if (pendingMaturity) {
|
|
509
|
+
for (const id of ids) {
|
|
510
|
+
const info = pendingMaturity.get(id.toString());
|
|
511
|
+
if (info) {
|
|
512
|
+
clearTimeout(info.timeout);
|
|
513
|
+
pendingMaturity.delete(id.toString());
|
|
477
514
|
}
|
|
478
515
|
}
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
this.events.dispatchEvent(new CustomEvent("replication:change", {
|
|
482
|
-
detail: { publicKey: from },
|
|
483
|
-
}));
|
|
484
|
-
if (!from.equals(this.node.identity.publicKey)) {
|
|
485
|
-
this.rebalanceParticipationDebounced?.();
|
|
516
|
+
if (pendingMaturity.size === 0) {
|
|
517
|
+
this.pendingMaturity.delete(from.hashcode());
|
|
486
518
|
}
|
|
487
|
-
}
|
|
488
|
-
|
|
519
|
+
}
|
|
520
|
+
await this.replicationIndex.del({ query });
|
|
521
|
+
const otherSegmentsIterator = this.replicationIndex.iterate({ query: { hash: from.hashcode() } }, { shape: { id: true } });
|
|
522
|
+
if ((await otherSegmentsIterator.next(1)).length === 0) {
|
|
523
|
+
this.uniqueReplicators.delete(from.hashcode());
|
|
524
|
+
}
|
|
525
|
+
await otherSegmentsIterator.close();
|
|
526
|
+
await this.updateOldestTimestampFromIndex();
|
|
527
|
+
this.events.dispatchEvent(new CustomEvent("replication:change", {
|
|
528
|
+
detail: { publicKey: from },
|
|
529
|
+
}));
|
|
530
|
+
if (!from.equals(this.node.identity.publicKey)) {
|
|
531
|
+
this.rebalanceParticipationDebounced?.();
|
|
532
|
+
}
|
|
489
533
|
}
|
|
490
534
|
async addReplicationRange(ranges, from, { reset, checkDuplicates, } = {}) {
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
}
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
535
|
+
if (this._isTrustedReplicator && !(await this._isTrustedReplicator(from))) {
|
|
536
|
+
return undefined;
|
|
537
|
+
}
|
|
538
|
+
let isNewReplicator = false;
|
|
539
|
+
let diffs;
|
|
540
|
+
let deleted = undefined;
|
|
541
|
+
if (reset) {
|
|
542
|
+
deleted = (await this.replicationIndex
|
|
543
|
+
.iterate({
|
|
544
|
+
query: { hash: from.hashcode() },
|
|
545
|
+
})
|
|
546
|
+
.all()).map((x) => x.value);
|
|
547
|
+
let prevCount = deleted.length;
|
|
548
|
+
await this.replicationIndex.del({ query: { hash: from.hashcode() } });
|
|
549
|
+
diffs = [
|
|
550
|
+
...deleted.map((x) => {
|
|
551
|
+
return { range: x, type: "removed" };
|
|
552
|
+
}),
|
|
553
|
+
...ranges.map((x) => {
|
|
554
|
+
return { range: x, type: "added" };
|
|
555
|
+
}),
|
|
556
|
+
];
|
|
557
|
+
isNewReplicator = prevCount === 0 && ranges.length > 0;
|
|
558
|
+
}
|
|
559
|
+
else {
|
|
560
|
+
let existing = await this.replicationIndex
|
|
561
|
+
.iterate({
|
|
562
|
+
query: ranges.map((x) => new ByteMatchQuery({ key: "id", value: x.id })),
|
|
563
|
+
}, { reference: true })
|
|
564
|
+
.all();
|
|
565
|
+
if (existing.length === 0) {
|
|
566
|
+
let prevCount = await this.replicationIndex.count({
|
|
567
|
+
query: new StringMatch({ key: "hash", value: from.hashcode() }),
|
|
568
|
+
});
|
|
569
|
+
isNewReplicator = prevCount === 0;
|
|
516
570
|
}
|
|
517
571
|
else {
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
}
|
|
527
|
-
isNewReplicator = prevCount === 0;
|
|
528
|
-
}
|
|
529
|
-
else {
|
|
530
|
-
isNewReplicator = false;
|
|
572
|
+
isNewReplicator = false;
|
|
573
|
+
}
|
|
574
|
+
if (checkDuplicates) {
|
|
575
|
+
let deduplicated = [];
|
|
576
|
+
// TODO also deduplicate/de-overlap among the ranges that ought to be inserted?
|
|
577
|
+
for (const range of ranges) {
|
|
578
|
+
if (!(await iHaveCoveringRange(this.replicationIndex, range))) {
|
|
579
|
+
deduplicated.push(range);
|
|
580
|
+
}
|
|
531
581
|
}
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
582
|
+
ranges = deduplicated;
|
|
583
|
+
}
|
|
584
|
+
let existingMap = new Map();
|
|
585
|
+
for (const result of existing) {
|
|
586
|
+
existingMap.set(result.value.idString, result.value);
|
|
587
|
+
}
|
|
588
|
+
let changes = ranges
|
|
589
|
+
.map((x) => {
|
|
590
|
+
const prev = existingMap.get(x.idString);
|
|
591
|
+
if (prev) {
|
|
592
|
+
if (prev.equalRange(x)) {
|
|
593
|
+
return undefined;
|
|
539
594
|
}
|
|
540
|
-
|
|
595
|
+
return { range: x, prev, type: "updated" };
|
|
541
596
|
}
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
existingMap.set(result.value.idString, result.value);
|
|
597
|
+
else {
|
|
598
|
+
return { range: x, type: "added" };
|
|
545
599
|
}
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
600
|
+
})
|
|
601
|
+
.filter((x) => x != null);
|
|
602
|
+
diffs = changes;
|
|
603
|
+
}
|
|
604
|
+
this.uniqueReplicators.add(from.hashcode());
|
|
605
|
+
let now = +new Date();
|
|
606
|
+
let minRoleAge = await this.getDefaultMinRoleAge();
|
|
607
|
+
let isAllMature = true;
|
|
608
|
+
for (const diff of diffs) {
|
|
609
|
+
if (diff.type === "added" || diff.type === "updated") {
|
|
610
|
+
/* if (this.closed) {
|
|
611
|
+
return;
|
|
612
|
+
} */
|
|
613
|
+
await this.replicationIndex.put(diff.range);
|
|
614
|
+
if (!reset) {
|
|
615
|
+
this.oldestOpenTime = Math.min(Number(diff.range.timestamp), this.oldestOpenTime);
|
|
616
|
+
}
|
|
617
|
+
const isMature = isMatured(diff.range, now, minRoleAge);
|
|
618
|
+
if (!isMature /* && diff.range.hash !== this.node.identity.publicKey.hashcode() */) {
|
|
619
|
+
// second condition is to avoid the case where we are adding a range that we own
|
|
620
|
+
isAllMature = false;
|
|
621
|
+
let pendingRanges = this.pendingMaturity.get(diff.range.hash);
|
|
622
|
+
if (!pendingRanges) {
|
|
623
|
+
pendingRanges = new Map();
|
|
624
|
+
this.pendingMaturity.set(diff.range.hash, pendingRanges);
|
|
625
|
+
}
|
|
626
|
+
let waitForMaturityTime = Math.max(minRoleAge - (now - Number(diff.range.timestamp)), 0);
|
|
627
|
+
const setupTimeout = () => setTimeout(async () => {
|
|
628
|
+
this.events.dispatchEvent(new CustomEvent("replicator:mature", {
|
|
629
|
+
detail: { publicKey: from },
|
|
630
|
+
}));
|
|
631
|
+
this.replicationChangeDebounceFn.add(diff); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable!
|
|
632
|
+
pendingRanges.delete(diff.range.idString);
|
|
633
|
+
if (pendingRanges.size === 0) {
|
|
634
|
+
this.pendingMaturity.delete(diff.range.hash);
|
|
552
635
|
}
|
|
553
|
-
|
|
636
|
+
}, waitForMaturityTime);
|
|
637
|
+
let prevPendingMaturity = pendingRanges.get(diff.range.idString);
|
|
638
|
+
if (prevPendingMaturity) {
|
|
639
|
+
// only reset the timer if the new range is older than the previous one, this means that waitForMaturityTime less than the previous one
|
|
640
|
+
clearTimeout(prevPendingMaturity.timeout);
|
|
641
|
+
prevPendingMaturity.timeout = setupTimeout();
|
|
554
642
|
}
|
|
555
643
|
else {
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
diffs = changes;
|
|
561
|
-
}
|
|
562
|
-
let now = +new Date();
|
|
563
|
-
let minRoleAge = await this.getDefaultMinRoleAge();
|
|
564
|
-
let isAllMature = true;
|
|
565
|
-
for (const diff of diffs) {
|
|
566
|
-
if (diff.type === "added" || diff.type === "updated") {
|
|
567
|
-
await this.replicationIndex.put(diff.range);
|
|
568
|
-
if (!reset) {
|
|
569
|
-
this.oldestOpenTime = Math.min(Number(diff.range.timestamp), this.oldestOpenTime);
|
|
570
|
-
}
|
|
571
|
-
const isMature = isMatured(diff.range, now, minRoleAge);
|
|
572
|
-
if (!isMature /* && diff.range.hash !== this.node.identity.publicKey.hashcode() */) {
|
|
573
|
-
// second condition is to avoid the case where we are adding a range that we own
|
|
574
|
-
isAllMature = false;
|
|
575
|
-
let prevPendingMaturity = this.pendingMaturity.get(diff.range.hash);
|
|
576
|
-
let map;
|
|
577
|
-
let waitForMaturityTime = Math.max(minRoleAge - (now - Number(diff.range.timestamp)), 0);
|
|
578
|
-
if (prevPendingMaturity) {
|
|
579
|
-
map = prevPendingMaturity.ranges;
|
|
580
|
-
if (prevPendingMaturity.timestamp < diff.range.timestamp) {
|
|
581
|
-
// something has changed so we need to reset the timeout
|
|
582
|
-
clearTimeout(prevPendingMaturity.timeout);
|
|
583
|
-
prevPendingMaturity.timestamp = diff.range.timestamp;
|
|
584
|
-
prevPendingMaturity.timeout = setTimeout(() => {
|
|
585
|
-
this.events.dispatchEvent(new CustomEvent("replicator:mature", {
|
|
586
|
-
detail: { publicKey: from },
|
|
587
|
-
}));
|
|
588
|
-
for (const value of map.values()) {
|
|
589
|
-
this.replicationChangeDebounceFn.add(value); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable!
|
|
590
|
-
}
|
|
591
|
-
}, waitForMaturityTime);
|
|
592
|
-
}
|
|
593
|
-
}
|
|
594
|
-
else {
|
|
595
|
-
map = new Map();
|
|
596
|
-
this.pendingMaturity.set(diff.range.hash, {
|
|
597
|
-
timestamp: diff.range.timestamp,
|
|
598
|
-
ranges: map,
|
|
599
|
-
timeout: setTimeout(() => {
|
|
600
|
-
this.events.dispatchEvent(new CustomEvent("replicator:mature", {
|
|
601
|
-
detail: { publicKey: from },
|
|
602
|
-
}));
|
|
603
|
-
for (const value of map.values()) {
|
|
604
|
-
this.replicationChangeDebounceFn.add(value); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable!
|
|
605
|
-
}
|
|
606
|
-
}, waitForMaturityTime),
|
|
607
|
-
});
|
|
608
|
-
}
|
|
609
|
-
map.set(diff.range.idString, diff);
|
|
644
|
+
pendingRanges.set(diff.range.idString, {
|
|
645
|
+
range: diff,
|
|
646
|
+
timeout: setupTimeout(),
|
|
647
|
+
});
|
|
610
648
|
}
|
|
611
649
|
}
|
|
612
|
-
|
|
613
|
-
|
|
650
|
+
}
|
|
651
|
+
else {
|
|
652
|
+
const pendingFromPeer = this.pendingMaturity.get(diff.range.hash);
|
|
653
|
+
if (pendingFromPeer) {
|
|
654
|
+
const prev = pendingFromPeer.get(diff.range.idString);
|
|
614
655
|
if (prev) {
|
|
615
|
-
prev.
|
|
656
|
+
clearTimeout(prev.timeout);
|
|
657
|
+
pendingFromPeer.delete(diff.range.idString);
|
|
658
|
+
}
|
|
659
|
+
if (pendingFromPeer.size === 0) {
|
|
660
|
+
this.pendingMaturity.delete(diff.range.hash);
|
|
616
661
|
}
|
|
617
662
|
}
|
|
618
663
|
}
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
664
|
+
}
|
|
665
|
+
if (reset) {
|
|
666
|
+
await this.updateOldestTimestampFromIndex();
|
|
667
|
+
}
|
|
668
|
+
this.events.dispatchEvent(new CustomEvent("replication:change", {
|
|
669
|
+
detail: { publicKey: from },
|
|
670
|
+
}));
|
|
671
|
+
if (isNewReplicator) {
|
|
672
|
+
this.events.dispatchEvent(new CustomEvent("replicator:join", {
|
|
623
673
|
detail: { publicKey: from },
|
|
624
674
|
}));
|
|
625
|
-
if (
|
|
626
|
-
this.events.dispatchEvent(new CustomEvent("replicator:
|
|
675
|
+
if (isAllMature) {
|
|
676
|
+
this.events.dispatchEvent(new CustomEvent("replicator:mature", {
|
|
627
677
|
detail: { publicKey: from },
|
|
628
678
|
}));
|
|
629
|
-
if (isAllMature) {
|
|
630
|
-
this.events.dispatchEvent(new CustomEvent("replicator:mature", {
|
|
631
|
-
detail: { publicKey: from },
|
|
632
|
-
}));
|
|
633
|
-
}
|
|
634
679
|
}
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
this.
|
|
680
|
+
}
|
|
681
|
+
if (diffs.length > 0) {
|
|
682
|
+
for (const diff of diffs) {
|
|
683
|
+
this.replicationChangeDebounceFn.add(diff);
|
|
639
684
|
}
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
return
|
|
685
|
+
}
|
|
686
|
+
if (!from.equals(this.node.identity.publicKey)) {
|
|
687
|
+
this.rebalanceParticipationDebounced?.();
|
|
688
|
+
}
|
|
689
|
+
return diffs;
|
|
645
690
|
}
|
|
646
691
|
async startAnnounceReplicating(range, options = {}) {
|
|
647
692
|
const change = await this.addReplicationRange(range, this.node.identity.publicKey, options);
|
|
@@ -670,15 +715,47 @@ let SharedLog = class SharedLog extends Program {
|
|
|
670
715
|
}
|
|
671
716
|
}
|
|
672
717
|
}
|
|
718
|
+
removePeerFromGidPeerHistory(publicKeyHash, gid) {
|
|
719
|
+
if (gid) {
|
|
720
|
+
const gidMap = this._gidPeersHistory.get(gid);
|
|
721
|
+
if (gidMap) {
|
|
722
|
+
gidMap.delete(publicKeyHash);
|
|
723
|
+
if (gidMap.size === 0) {
|
|
724
|
+
this._gidPeersHistory.delete(gid);
|
|
725
|
+
}
|
|
726
|
+
}
|
|
727
|
+
}
|
|
728
|
+
else {
|
|
729
|
+
for (const key of this._gidPeersHistory.keys()) {
|
|
730
|
+
this.removePeerFromGidPeerHistory(publicKeyHash, key);
|
|
731
|
+
}
|
|
732
|
+
}
|
|
733
|
+
}
|
|
734
|
+
addPeersToGidPeerHistory(gid, publicKeys, reset) {
|
|
735
|
+
let set = this._gidPeersHistory.get(gid);
|
|
736
|
+
if (!set) {
|
|
737
|
+
set = new Set();
|
|
738
|
+
this._gidPeersHistory.set(gid, set);
|
|
739
|
+
}
|
|
740
|
+
else {
|
|
741
|
+
if (reset) {
|
|
742
|
+
set.clear();
|
|
743
|
+
}
|
|
744
|
+
}
|
|
745
|
+
for (const key of publicKeys) {
|
|
746
|
+
set.add(key);
|
|
747
|
+
}
|
|
748
|
+
return set;
|
|
749
|
+
}
|
|
673
750
|
async append(data, options) {
|
|
674
751
|
const appendOptions = { ...options };
|
|
675
|
-
const minReplicas = options?.replicas
|
|
752
|
+
const minReplicas = this.getClampedReplicas(options?.replicas
|
|
676
753
|
? typeof options.replicas === "number"
|
|
677
754
|
? new AbsoluteReplicas(options.replicas)
|
|
678
755
|
: options.replicas
|
|
679
|
-
:
|
|
680
|
-
const minReplicasValue = minReplicas.getValue(this);
|
|
756
|
+
: undefined);
|
|
681
757
|
const minReplicasData = encodeReplicas(minReplicas);
|
|
758
|
+
const minReplicasValue = minReplicas.getValue(this);
|
|
682
759
|
checkMinReplicasLimit(minReplicasValue);
|
|
683
760
|
if (!appendOptions.meta) {
|
|
684
761
|
appendOptions.meta = {
|
|
@@ -707,10 +784,14 @@ let SharedLog = class SharedLog extends Program {
|
|
|
707
784
|
if (options?.replicate) {
|
|
708
785
|
await this.replicate(result.entry, { checkDuplicates: true });
|
|
709
786
|
}
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
787
|
+
const coordinates = await this.createCoordinates(result.entry, minReplicasValue);
|
|
788
|
+
let isLeader = false;
|
|
789
|
+
let leaders = await this.findLeaders(coordinates, result.entry, {
|
|
790
|
+
persist: {},
|
|
791
|
+
onLeader: (key) => {
|
|
792
|
+
isLeader = isLeader || this.node.identity.publicKey.hashcode() === key;
|
|
793
|
+
},
|
|
794
|
+
});
|
|
714
795
|
// --------------
|
|
715
796
|
if (options?.target !== "none") {
|
|
716
797
|
for await (const message of createExchangeHeadsMessages(this.log, [
|
|
@@ -718,35 +799,25 @@ let SharedLog = class SharedLog extends Program {
|
|
|
718
799
|
])) {
|
|
719
800
|
if (options?.target === "replicators" || !options?.target) {
|
|
720
801
|
if (message.heads[0].gidRefrences.length > 0) {
|
|
721
|
-
const newAndOldLeaders = new Map(leaders);
|
|
722
802
|
for (const ref of message.heads[0].gidRefrences) {
|
|
723
803
|
const entryFromGid = this.log.entryIndex.getHeads(ref, false);
|
|
724
804
|
for (const entry of await entryFromGid.all()) {
|
|
725
|
-
let
|
|
726
|
-
if (
|
|
727
|
-
|
|
805
|
+
let coordinates = await this.getCoordinates(entry);
|
|
806
|
+
if (coordinates == null) {
|
|
807
|
+
coordinates = await this.createCoordinates(entry, minReplicasValue);
|
|
728
808
|
// TODO are we every to come here?
|
|
729
809
|
}
|
|
730
|
-
|
|
731
|
-
|
|
810
|
+
const result = await this._findLeaders(coordinates);
|
|
811
|
+
for (const [k, v] of result) {
|
|
812
|
+
leaders.set(k, v);
|
|
732
813
|
}
|
|
733
814
|
}
|
|
734
815
|
}
|
|
735
|
-
leaders = newAndOldLeaders;
|
|
736
|
-
}
|
|
737
|
-
let set = this._gidPeersHistory.get(result.entry.meta.gid);
|
|
738
|
-
if (!set) {
|
|
739
|
-
set = new Set(leaders.keys());
|
|
740
|
-
this._gidPeersHistory.set(result.entry.meta.gid, set);
|
|
741
|
-
}
|
|
742
|
-
else {
|
|
743
|
-
for (const [receiver, _features] of leaders) {
|
|
744
|
-
set.add(receiver);
|
|
745
|
-
}
|
|
746
816
|
}
|
|
817
|
+
const set = this.addPeersToGidPeerHistory(result.entry.meta.gid, leaders.keys());
|
|
747
818
|
mode = isLeader
|
|
748
|
-
? new SilentDelivery({ redundancy: 1, to:
|
|
749
|
-
: new AcknowledgeDelivery({ redundancy: 1, to:
|
|
819
|
+
? new SilentDelivery({ redundancy: 1, to: set })
|
|
820
|
+
: new AcknowledgeDelivery({ redundancy: 1, to: set });
|
|
750
821
|
}
|
|
751
822
|
// TODO add options for waiting ?
|
|
752
823
|
this.rpc.send(message, {
|
|
@@ -757,7 +828,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
757
828
|
if (!isLeader) {
|
|
758
829
|
this.pruneDebouncedFn.add({
|
|
759
830
|
key: result.entry.hash,
|
|
760
|
-
value: result.entry,
|
|
831
|
+
value: { entry: result.entry, leaders },
|
|
761
832
|
});
|
|
762
833
|
}
|
|
763
834
|
this.rebalanceParticipationDebounced?.();
|
|
@@ -765,7 +836,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
765
836
|
}
|
|
766
837
|
async open(options) {
|
|
767
838
|
this.replicas = {
|
|
768
|
-
min: options?.replicas?.min
|
|
839
|
+
min: options?.replicas?.min != null
|
|
769
840
|
? typeof options?.replicas?.min === "number"
|
|
770
841
|
? new AbsoluteReplicas(options?.replicas?.min)
|
|
771
842
|
: options?.replicas?.min
|
|
@@ -776,14 +847,18 @@ let SharedLog = class SharedLog extends Program {
|
|
|
776
847
|
: options.replicas.max
|
|
777
848
|
: undefined,
|
|
778
849
|
};
|
|
779
|
-
this.
|
|
850
|
+
this._logProperties = options;
|
|
851
|
+
// TODO types
|
|
852
|
+
this.domain = options?.domain
|
|
853
|
+
? options.domain
|
|
854
|
+
: createReplicationDomainHash(options?.compatibility && options?.compatibility < 10 ? "u32" : "u64");
|
|
855
|
+
this.indexableDomain = createIndexableDomainFromResolution(this.domain.resolution);
|
|
780
856
|
this._respondToIHaveTimeout = options?.respondToIHaveTimeout ?? 2e4;
|
|
781
857
|
this._pendingDeletes = new Map();
|
|
782
858
|
this._pendingIHave = new Map();
|
|
783
859
|
this.latestReplicationInfoMessage = new Map();
|
|
784
|
-
this.
|
|
785
|
-
this.
|
|
786
|
-
this.syncInFlight = new Map();
|
|
860
|
+
this.coordinateToHash = new Cache({ max: 1e6, ttl: 1e4 });
|
|
861
|
+
this.uniqueReplicators = new Set();
|
|
787
862
|
this.openTime = +new Date();
|
|
788
863
|
this.oldestOpenTime = this.openTime;
|
|
789
864
|
this.distributionDebounceTime =
|
|
@@ -792,10 +867,13 @@ let SharedLog = class SharedLog extends Program {
|
|
|
792
867
|
options?.timeUntilRoleMaturity ?? WAIT_FOR_ROLE_MATURITY;
|
|
793
868
|
this.waitForReplicatorTimeout =
|
|
794
869
|
options?.waitForReplicatorTimeout || WAIT_FOR_REPLICATOR_TIMEOUT;
|
|
870
|
+
this.waitForPruneDelay = options?.waitForPruneDelay || WAIT_FOR_PRUNE_DELAY;
|
|
871
|
+
if (this.waitForReplicatorTimeout < this.timeUntilRoleMaturity) {
|
|
872
|
+
this.waitForReplicatorTimeout = this.timeUntilRoleMaturity; // does not makes sense to expect a replicator to mature faster than it is reachable
|
|
873
|
+
}
|
|
795
874
|
this._closeController = new AbortController();
|
|
796
875
|
this._isTrustedReplicator = options?.canReplicate;
|
|
797
876
|
this.sync = options?.sync;
|
|
798
|
-
this._logProperties = options;
|
|
799
877
|
this.pendingMaturity = new Map();
|
|
800
878
|
const id = sha256Base64Sync(this.log.id);
|
|
801
879
|
const storage = await this.node.storage.sublevel(id);
|
|
@@ -810,14 +888,13 @@ let SharedLog = class SharedLog extends Program {
|
|
|
810
888
|
waitFor: this.rpc.waitFor.bind(this.rpc),
|
|
811
889
|
});
|
|
812
890
|
await this.remoteBlocks.start();
|
|
813
|
-
/* this._totalParticipation = 0; */
|
|
814
891
|
const logScope = await this.node.indexer.scope(id);
|
|
815
892
|
const replicationIndex = await logScope.scope("replication");
|
|
816
893
|
this._replicationRangeIndex = await replicationIndex.init({
|
|
817
|
-
schema:
|
|
894
|
+
schema: this.indexableDomain.constructorRange,
|
|
818
895
|
});
|
|
819
896
|
this._entryCoordinatesIndex = await replicationIndex.init({
|
|
820
|
-
schema:
|
|
897
|
+
schema: this.indexableDomain.constructorEntry,
|
|
821
898
|
});
|
|
822
899
|
const logIndex = await logScope.scope("log");
|
|
823
900
|
await this.node.indexer.start(); // TODO why do we need to start the indexer here?
|
|
@@ -829,13 +906,21 @@ let SharedLog = class SharedLog extends Program {
|
|
|
829
906
|
}),
|
|
830
907
|
],
|
|
831
908
|
})) > 0;
|
|
832
|
-
/* this._totalParticipation = await this.calculateTotalParticipation(); */
|
|
833
909
|
this._gidPeersHistory = new Map();
|
|
910
|
+
this._requestIPruneSent = new Map();
|
|
911
|
+
this._requestIPruneResponseReplicatorSet = new Map();
|
|
834
912
|
this.replicationChangeDebounceFn = debounceAggregationChanges((change) => this.onReplicationChange(change).then(() => this.rebalanceParticipationDebounced?.()), this.distributionDebounceTime);
|
|
835
913
|
this.pruneDebouncedFn = debouncedAccumulatorMap((map) => {
|
|
836
914
|
this.prune(map);
|
|
837
|
-
}, PRUNE_DEBOUNCE_INTERVAL
|
|
838
|
-
|
|
915
|
+
}, PRUNE_DEBOUNCE_INTERVAL, // TODO make this dynamic on the number of replicators
|
|
916
|
+
(into, from) => {
|
|
917
|
+
for (const [k, v] of from.leaders) {
|
|
918
|
+
if (!into.leaders.has(k)) {
|
|
919
|
+
into.leaders.set(k, v);
|
|
920
|
+
}
|
|
921
|
+
}
|
|
922
|
+
});
|
|
923
|
+
this.responseToPruneDebouncedFn = debounceAccumulator((result) => {
|
|
839
924
|
let allRequestingPeers = new Set();
|
|
840
925
|
let hashes = [];
|
|
841
926
|
for (const [hash, requestingPeers] of result) {
|
|
@@ -896,6 +981,40 @@ let SharedLog = class SharedLog extends Program {
|
|
|
896
981
|
},
|
|
897
982
|
indexer: logIndex,
|
|
898
983
|
});
|
|
984
|
+
if (options?.syncronizer) {
|
|
985
|
+
this.syncronizer = new options.syncronizer({
|
|
986
|
+
numbers: this.indexableDomain.numbers,
|
|
987
|
+
entryIndex: this.entryCoordinatesIndex,
|
|
988
|
+
log: this.log,
|
|
989
|
+
rangeIndex: this._replicationRangeIndex,
|
|
990
|
+
rpc: this.rpc,
|
|
991
|
+
coordinateToHash: this.coordinateToHash,
|
|
992
|
+
});
|
|
993
|
+
}
|
|
994
|
+
else {
|
|
995
|
+
if (this._logProperties?.compatibility &&
|
|
996
|
+
this._logProperties.compatibility < 10) {
|
|
997
|
+
this.syncronizer = new SimpleSyncronizer({
|
|
998
|
+
log: this.log,
|
|
999
|
+
rpc: this.rpc,
|
|
1000
|
+
entryIndex: this.entryCoordinatesIndex,
|
|
1001
|
+
coordinateToHash: this.coordinateToHash,
|
|
1002
|
+
});
|
|
1003
|
+
}
|
|
1004
|
+
else {
|
|
1005
|
+
if (this.domain.resolution === "u32") {
|
|
1006
|
+
logger.warn("u32 resolution is not recommended for RatelessIBLTSynchronizer");
|
|
1007
|
+
}
|
|
1008
|
+
this.syncronizer = new RatelessIBLTSynchronizer({
|
|
1009
|
+
numbers: this.indexableDomain.numbers,
|
|
1010
|
+
entryIndex: this.entryCoordinatesIndex,
|
|
1011
|
+
log: this.log,
|
|
1012
|
+
rangeIndex: this._replicationRangeIndex,
|
|
1013
|
+
rpc: this.rpc,
|
|
1014
|
+
coordinateToHash: this.coordinateToHash,
|
|
1015
|
+
});
|
|
1016
|
+
}
|
|
1017
|
+
}
|
|
899
1018
|
// Open for communcation
|
|
900
1019
|
await this.rpc.open({
|
|
901
1020
|
queryType: TransportMessage,
|
|
@@ -910,59 +1029,6 @@ let SharedLog = class SharedLog extends Program {
|
|
|
910
1029
|
this._onUnsubscriptionFn || this._onUnsubscription.bind(this);
|
|
911
1030
|
await this.node.services.pubsub.addEventListener("unsubscribe", this._onUnsubscriptionFn);
|
|
912
1031
|
await this.rpc.subscribe();
|
|
913
|
-
const requestSync = async () => {
|
|
914
|
-
/**
|
|
915
|
-
* This method fetches entries that we potentially want.
|
|
916
|
-
* In a case in which we become replicator of a segment,
|
|
917
|
-
* multiple remote peers might want to send us entries
|
|
918
|
-
* This method makes sure that we only request on entry from the remotes at a time
|
|
919
|
-
* so we don't get flooded with the same entry
|
|
920
|
-
*/
|
|
921
|
-
const requestHashes = [];
|
|
922
|
-
const from = new Set();
|
|
923
|
-
for (const [key, value] of this.syncInFlightQueue) {
|
|
924
|
-
if (!(await this.log.has(key))) {
|
|
925
|
-
// TODO test that this if statement actually does anymeaningfull
|
|
926
|
-
if (value.length > 0) {
|
|
927
|
-
requestHashes.push(key);
|
|
928
|
-
const publicKeyHash = value.shift().hashcode();
|
|
929
|
-
from.add(publicKeyHash);
|
|
930
|
-
const invertedSet = this.syncInFlightQueueInverted.get(publicKeyHash);
|
|
931
|
-
if (invertedSet) {
|
|
932
|
-
if (invertedSet.delete(key)) {
|
|
933
|
-
if (invertedSet.size === 0) {
|
|
934
|
-
this.syncInFlightQueueInverted.delete(publicKeyHash);
|
|
935
|
-
}
|
|
936
|
-
}
|
|
937
|
-
}
|
|
938
|
-
}
|
|
939
|
-
if (value.length === 0) {
|
|
940
|
-
this.syncInFlightQueue.delete(key); // no-one more to ask for this entry
|
|
941
|
-
}
|
|
942
|
-
}
|
|
943
|
-
else {
|
|
944
|
-
this.syncInFlightQueue.delete(key);
|
|
945
|
-
}
|
|
946
|
-
}
|
|
947
|
-
const nowMin10s = +new Date() - 1e4;
|
|
948
|
-
for (const [key, map] of this.syncInFlight) {
|
|
949
|
-
// cleanup "old" missing syncs
|
|
950
|
-
for (const [hash, { timestamp }] of map) {
|
|
951
|
-
if (timestamp < nowMin10s) {
|
|
952
|
-
map.delete(hash);
|
|
953
|
-
}
|
|
954
|
-
}
|
|
955
|
-
if (map.size === 0) {
|
|
956
|
-
this.syncInFlight.delete(key);
|
|
957
|
-
}
|
|
958
|
-
}
|
|
959
|
-
this.requestSync(requestHashes, from).finally(() => {
|
|
960
|
-
if (this.closed) {
|
|
961
|
-
return;
|
|
962
|
-
}
|
|
963
|
-
this.syncMoreInterval = setTimeout(requestSync, 3e3);
|
|
964
|
-
});
|
|
965
|
-
};
|
|
966
1032
|
// if we had a previous session with replication info, and new replication info dictates that we unreplicate
|
|
967
1033
|
// we should do that. Otherwise if options is a unreplication we dont need to do anything because
|
|
968
1034
|
// we are already unreplicated (as we are just opening)
|
|
@@ -980,7 +1046,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
980
1046
|
reset: true,
|
|
981
1047
|
});
|
|
982
1048
|
}
|
|
983
|
-
|
|
1049
|
+
await this.syncronizer.open();
|
|
984
1050
|
this.interval = setInterval(() => {
|
|
985
1051
|
this.rebalanceParticipationDebounced?.();
|
|
986
1052
|
}, RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL);
|
|
@@ -1005,14 +1071,14 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1005
1071
|
// go through all segments and for waitForAll replicators to become reachable if not prune them away
|
|
1006
1072
|
const promises = [];
|
|
1007
1073
|
const iterator = this.replicationIndex.iterate();
|
|
1008
|
-
let
|
|
1074
|
+
let checkedIsAlive = new Set();
|
|
1009
1075
|
while (!iterator.done()) {
|
|
1010
1076
|
for (const segment of await iterator.next(1000)) {
|
|
1011
|
-
if (
|
|
1077
|
+
if (checkedIsAlive.has(segment.value.hash) ||
|
|
1012
1078
|
this.node.identity.publicKey.hashcode() === segment.value.hash) {
|
|
1013
1079
|
continue;
|
|
1014
1080
|
}
|
|
1015
|
-
|
|
1081
|
+
checkedIsAlive.add(segment.value.hash);
|
|
1016
1082
|
promises.push(this.waitFor(segment.value.hash, {
|
|
1017
1083
|
timeout: this.waitForReplicatorTimeout,
|
|
1018
1084
|
signal: this._closeController.signal,
|
|
@@ -1090,24 +1156,28 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1090
1156
|
peers: this.replicationIndex,
|
|
1091
1157
|
start: range.offset,
|
|
1092
1158
|
widthToCoverScaled: range.length ??
|
|
1093
|
-
(await minimumWidthToCover(this.replicas.min.getValue(this))),
|
|
1159
|
+
(await minimumWidthToCover(this.replicas.min.getValue(this), this.indexableDomain.numbers)),
|
|
1094
1160
|
roleAge,
|
|
1095
1161
|
eager,
|
|
1096
|
-
|
|
1162
|
+
numbers: this.indexableDomain.numbers,
|
|
1097
1163
|
});
|
|
1098
1164
|
// add all in flight
|
|
1099
|
-
for (const [key, _] of this.syncInFlight) {
|
|
1165
|
+
for (const [key, _] of this.syncronizer.syncInFlight) {
|
|
1100
1166
|
set.add(key);
|
|
1101
1167
|
}
|
|
1102
1168
|
return [...set];
|
|
1103
1169
|
}
|
|
1104
1170
|
async _close() {
|
|
1105
|
-
|
|
1106
|
-
for (const [_key,
|
|
1107
|
-
|
|
1171
|
+
await this.syncronizer.close();
|
|
1172
|
+
for (const [_key, peerMap] of this.pendingMaturity) {
|
|
1173
|
+
for (const [_key2, info] of peerMap) {
|
|
1174
|
+
clearTimeout(info.timeout);
|
|
1175
|
+
}
|
|
1108
1176
|
}
|
|
1109
1177
|
this.pendingMaturity.clear();
|
|
1110
1178
|
this.distributeQueue?.clear();
|
|
1179
|
+
this.coordinateToHash.clear();
|
|
1180
|
+
this.uniqueReplicators.clear();
|
|
1111
1181
|
this._closeController.abort();
|
|
1112
1182
|
clearInterval(this.interval);
|
|
1113
1183
|
this.node.services.pubsub.removeEventListener("subscribe", this._onSubscriptionFn);
|
|
@@ -1122,13 +1192,14 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1122
1192
|
await this.remoteBlocks.stop();
|
|
1123
1193
|
this._pendingDeletes.clear();
|
|
1124
1194
|
this._pendingIHave.clear();
|
|
1125
|
-
this.syncInFlightQueue.clear();
|
|
1126
|
-
this.syncInFlightQueueInverted.clear();
|
|
1127
|
-
this.syncInFlight.clear();
|
|
1128
1195
|
this.latestReplicationInfoMessage.clear();
|
|
1129
1196
|
this._gidPeersHistory.clear();
|
|
1197
|
+
this._requestIPruneSent.clear();
|
|
1198
|
+
this._requestIPruneResponseReplicatorSet.clear();
|
|
1130
1199
|
this.pruneDebouncedFn = undefined;
|
|
1131
1200
|
this.rebalanceParticipationDebounced = undefined;
|
|
1201
|
+
this._replicationRangeIndex.stop();
|
|
1202
|
+
this._entryCoordinatesIndex.stop();
|
|
1132
1203
|
this._replicationRangeIndex = undefined;
|
|
1133
1204
|
this._entryCoordinatesIndex = undefined;
|
|
1134
1205
|
this.cpuUsage?.stop?.();
|
|
@@ -1148,6 +1219,8 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1148
1219
|
if (!superDropped) {
|
|
1149
1220
|
return superDropped;
|
|
1150
1221
|
}
|
|
1222
|
+
await this._entryCoordinatesIndex.drop();
|
|
1223
|
+
await this._replicationRangeIndex.drop();
|
|
1151
1224
|
await this.log.drop();
|
|
1152
1225
|
await this._close();
|
|
1153
1226
|
return true;
|
|
@@ -1190,6 +1263,11 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1190
1263
|
const promises = [];
|
|
1191
1264
|
for (const [gid, entries] of groupedByGid) {
|
|
1192
1265
|
const fn = async () => {
|
|
1266
|
+
/// we clear sync in flight here because we want to join before that, so that entries are totally accounted for
|
|
1267
|
+
await this.syncronizer.onReceivedEntries({
|
|
1268
|
+
entries,
|
|
1269
|
+
from: context.from,
|
|
1270
|
+
});
|
|
1193
1271
|
const headsWithGid = await this.log.entryIndex
|
|
1194
1272
|
.getHeads(gid)
|
|
1195
1273
|
.all();
|
|
@@ -1200,16 +1278,40 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1200
1278
|
const maxReplicasFromNewEntries = maxReplicas(this, entries.map((x) => x.entry));
|
|
1201
1279
|
const maxMaxReplicas = Math.max(maxReplicasFromHead, maxReplicasFromNewEntries);
|
|
1202
1280
|
const cursor = await this.createCoordinates(latestEntry, maxMaxReplicas);
|
|
1203
|
-
const isReplicating =
|
|
1204
|
-
let isLeader;
|
|
1281
|
+
const isReplicating = this._isReplicating;
|
|
1282
|
+
let isLeader = false;
|
|
1283
|
+
let fromIsLeader = false;
|
|
1284
|
+
let leaders;
|
|
1205
1285
|
if (isReplicating) {
|
|
1206
|
-
|
|
1286
|
+
leaders = await this._waitForReplicators(cursor, latestEntry, [
|
|
1287
|
+
{
|
|
1288
|
+
key: this.node.identity.publicKey.hashcode(),
|
|
1289
|
+
replicator: true,
|
|
1290
|
+
},
|
|
1291
|
+
], {
|
|
1292
|
+
// we do this here so that we quickly assume leader role (and also so that 'from' is also assumed to be leader)
|
|
1293
|
+
// TODO potential side effects?
|
|
1294
|
+
roleAge: 0,
|
|
1295
|
+
timeout: 2e4,
|
|
1296
|
+
onLeader: (key) => {
|
|
1297
|
+
isLeader =
|
|
1298
|
+
isLeader ||
|
|
1299
|
+
this.node.identity.publicKey.hashcode() === key;
|
|
1300
|
+
fromIsLeader =
|
|
1301
|
+
fromIsLeader || context.from.hashcode() === key;
|
|
1302
|
+
},
|
|
1303
|
+
});
|
|
1207
1304
|
}
|
|
1208
1305
|
else {
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1306
|
+
leaders = await this.findLeaders(cursor, latestEntry, {
|
|
1307
|
+
onLeader: (key) => {
|
|
1308
|
+
fromIsLeader =
|
|
1309
|
+
fromIsLeader || context.from.hashcode() === key;
|
|
1310
|
+
isLeader =
|
|
1311
|
+
isLeader ||
|
|
1312
|
+
this.node.identity.publicKey.hashcode() === key;
|
|
1313
|
+
},
|
|
1314
|
+
});
|
|
1213
1315
|
}
|
|
1214
1316
|
if (this.closed) {
|
|
1215
1317
|
return;
|
|
@@ -1220,22 +1322,13 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1220
1322
|
if (isLeader) {
|
|
1221
1323
|
for (const entry of entries) {
|
|
1222
1324
|
this.pruneDebouncedFn.delete(entry.entry.hash);
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
});
|
|
1230
|
-
}
|
|
1231
|
-
const fromIsLeader = isLeader.get(context.from.hashcode());
|
|
1232
|
-
if (fromIsLeader) {
|
|
1233
|
-
let peerSet = this._gidPeersHistory.get(gid);
|
|
1234
|
-
if (!peerSet) {
|
|
1235
|
-
peerSet = new Set();
|
|
1236
|
-
this._gidPeersHistory.set(gid, peerSet);
|
|
1325
|
+
this._requestIPruneSent.delete(entry.entry.hash);
|
|
1326
|
+
this._requestIPruneResponseReplicatorSet.delete(entry.entry.hash);
|
|
1327
|
+
if (fromIsLeader) {
|
|
1328
|
+
this.addPeersToGidPeerHistory(gid, [
|
|
1329
|
+
context.from.hashcode(),
|
|
1330
|
+
]);
|
|
1237
1331
|
}
|
|
1238
|
-
peerSet.add(context.from.hashcode());
|
|
1239
1332
|
}
|
|
1240
1333
|
if (maxReplicasFromNewEntries < maxReplicasFromHead) {
|
|
1241
1334
|
(maybeDelete || (maybeDelete = [])).push(entries);
|
|
@@ -1262,19 +1355,14 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1262
1355
|
}
|
|
1263
1356
|
if (toMerge.length > 0) {
|
|
1264
1357
|
await this.log.join(toMerge);
|
|
1265
|
-
toDelete?.map((x) =>
|
|
1358
|
+
toDelete?.map((x) =>
|
|
1359
|
+
// TODO types
|
|
1360
|
+
this.pruneDebouncedFn.add({
|
|
1361
|
+
key: x.hash,
|
|
1362
|
+
value: { entry: x, leaders: leaders },
|
|
1363
|
+
}));
|
|
1266
1364
|
this.rebalanceParticipationDebounced?.();
|
|
1267
1365
|
}
|
|
1268
|
-
/// we clear sync in flight here because we want to join before that, so that entries are totally accounted for
|
|
1269
|
-
for (const entry of entries) {
|
|
1270
|
-
const set = this.syncInFlight.get(context.from.hashcode());
|
|
1271
|
-
if (set) {
|
|
1272
|
-
set.delete(entry.entry.hash);
|
|
1273
|
-
if (set?.size === 0) {
|
|
1274
|
-
this.syncInFlight.delete(context.from.hashcode());
|
|
1275
|
-
}
|
|
1276
|
-
}
|
|
1277
|
-
}
|
|
1278
1366
|
if (maybeDelete) {
|
|
1279
1367
|
for (const entries of maybeDelete) {
|
|
1280
1368
|
const headsWithGid = await this.log.entryIndex
|
|
@@ -1287,10 +1375,16 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1287
1375
|
replicas: minReplicas,
|
|
1288
1376
|
});
|
|
1289
1377
|
if (!isLeader) {
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1378
|
+
for (const x of entries) {
|
|
1379
|
+
this.pruneDebouncedFn.add({
|
|
1380
|
+
key: x.entry.hash,
|
|
1381
|
+
// TODO types
|
|
1382
|
+
value: {
|
|
1383
|
+
entry: x.entry,
|
|
1384
|
+
leaders: leaders,
|
|
1385
|
+
},
|
|
1386
|
+
});
|
|
1387
|
+
}
|
|
1294
1388
|
}
|
|
1295
1389
|
}
|
|
1296
1390
|
}
|
|
@@ -1303,17 +1397,30 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1303
1397
|
}
|
|
1304
1398
|
else if (msg instanceof RequestIPrune) {
|
|
1305
1399
|
const hasAndIsLeader = [];
|
|
1306
|
-
// await delay(3000)
|
|
1307
1400
|
for (const hash of msg.hashes) {
|
|
1401
|
+
// if we expect the remote to be owner of this entry because we are to prune ourselves, then we need to remove the remote
|
|
1402
|
+
// this is due to that the remote has previously indicated to be a replicator to help us prune but now has changed their mind
|
|
1403
|
+
const outGoingPrunes = this._requestIPruneResponseReplicatorSet.get(hash);
|
|
1404
|
+
if (outGoingPrunes) {
|
|
1405
|
+
outGoingPrunes.delete(context.from.hashcode());
|
|
1406
|
+
}
|
|
1308
1407
|
const indexedEntry = await this.log.entryIndex.getShallow(hash);
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1408
|
+
let isLeader = false;
|
|
1409
|
+
if (indexedEntry) {
|
|
1410
|
+
this.removePeerFromGidPeerHistory(context.from.hashcode(), indexedEntry.value.meta.gid);
|
|
1411
|
+
await this._waitForReplicators(await this.createCoordinates(indexedEntry.value, decodeReplicas(indexedEntry.value).getValue(this)), indexedEntry.value, [
|
|
1412
|
+
{
|
|
1413
|
+
key: this.node.identity.publicKey.hashcode(),
|
|
1414
|
+
replicator: true,
|
|
1415
|
+
},
|
|
1416
|
+
], {
|
|
1417
|
+
onLeader: (key) => {
|
|
1418
|
+
isLeader =
|
|
1419
|
+
isLeader || key === this.node.identity.publicKey.hashcode();
|
|
1420
|
+
},
|
|
1421
|
+
});
|
|
1422
|
+
}
|
|
1423
|
+
if (isLeader) {
|
|
1317
1424
|
hasAndIsLeader.push(hash);
|
|
1318
1425
|
hasAndIsLeader.length > 0 &&
|
|
1319
1426
|
this.responseToPruneDebouncedFn.add({
|
|
@@ -1344,13 +1451,16 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1344
1451
|
clearTimeout(timeout);
|
|
1345
1452
|
},
|
|
1346
1453
|
callback: async (entry) => {
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1454
|
+
this.removePeerFromGidPeerHistory(context.from.hashcode(), entry.meta.gid);
|
|
1455
|
+
let isLeader = false;
|
|
1456
|
+
await this.findLeaders(await this.createCoordinates(entry, decodeReplicas(entry).getValue(this)), entry, {
|
|
1457
|
+
onLeader: (key) => {
|
|
1458
|
+
isLeader =
|
|
1459
|
+
isLeader ||
|
|
1460
|
+
key === this.node.identity.publicKey.hashcode();
|
|
1461
|
+
},
|
|
1462
|
+
});
|
|
1463
|
+
if (isLeader) {
|
|
1354
1464
|
this.responseToPruneDebouncedFn.add({
|
|
1355
1465
|
hashes: [entry.hash],
|
|
1356
1466
|
peers: requesting,
|
|
@@ -1369,38 +1479,49 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1369
1479
|
this._pendingDeletes.get(hash)?.resolve(context.from.hashcode());
|
|
1370
1480
|
}
|
|
1371
1481
|
}
|
|
1372
|
-
else if (msg
|
|
1373
|
-
|
|
1482
|
+
else if (await this.syncronizer.onMessage(msg, context)) {
|
|
1483
|
+
return; // the syncronizer has handled the message
|
|
1484
|
+
} /* else if (msg instanceof RequestMaybeSync) {
|
|
1485
|
+
const requestHashes: string[] = [];
|
|
1486
|
+
|
|
1374
1487
|
for (const hash of msg.hashes) {
|
|
1375
1488
|
const inFlight = this.syncInFlightQueue.get(hash);
|
|
1376
1489
|
if (inFlight) {
|
|
1377
|
-
if (
|
|
1490
|
+
if (
|
|
1491
|
+
!inFlight.find((x) => x.hashcode() === context.from!.hashcode())
|
|
1492
|
+
) {
|
|
1378
1493
|
inFlight.push(context.from);
|
|
1379
|
-
let inverted = this.syncInFlightQueueInverted.get(
|
|
1494
|
+
let inverted = this.syncInFlightQueueInverted.get(
|
|
1495
|
+
context.from.hashcode(),
|
|
1496
|
+
);
|
|
1380
1497
|
if (!inverted) {
|
|
1381
1498
|
inverted = new Set();
|
|
1382
|
-
this.syncInFlightQueueInverted.set(
|
|
1499
|
+
this.syncInFlightQueueInverted.set(
|
|
1500
|
+
context.from.hashcode(),
|
|
1501
|
+
inverted,
|
|
1502
|
+
);
|
|
1383
1503
|
}
|
|
1384
1504
|
inverted.add(hash);
|
|
1385
1505
|
}
|
|
1386
|
-
}
|
|
1387
|
-
else if (!(await this.log.has(hash))) {
|
|
1506
|
+
} else if (!(await this.log.has(hash))) {
|
|
1388
1507
|
this.syncInFlightQueue.set(hash, []);
|
|
1389
1508
|
requestHashes.push(hash); // request immediately (first time we have seen this hash)
|
|
1390
1509
|
}
|
|
1391
1510
|
}
|
|
1392
1511
|
requestHashes.length > 0 &&
|
|
1393
1512
|
(await this.requestSync(requestHashes, [context.from.hashcode()]));
|
|
1394
|
-
}
|
|
1395
|
-
else if (msg instanceof ResponseMaybeSync) {
|
|
1513
|
+
} else if (msg instanceof ResponseMaybeSync) {
|
|
1396
1514
|
// TODO perhaps send less messages to more receivers for performance reasons?
|
|
1397
1515
|
// TODO wait for previous send to target before trying to send more?
|
|
1398
|
-
for await (const message of createExchangeHeadsMessages(
|
|
1516
|
+
for await (const message of createExchangeHeadsMessages(
|
|
1517
|
+
this.log,
|
|
1518
|
+
msg.hashes,
|
|
1519
|
+
)) {
|
|
1399
1520
|
await this.rpc.send(message, {
|
|
1400
|
-
mode: new SilentDelivery({ to: [context.from], redundancy: 1 }),
|
|
1521
|
+
mode: new SilentDelivery({ to: [context.from!], redundancy: 1 }),
|
|
1401
1522
|
});
|
|
1402
1523
|
}
|
|
1403
|
-
}
|
|
1524
|
+
} */
|
|
1404
1525
|
else if (msg instanceof BlocksMessage) {
|
|
1405
1526
|
await this.remoteBlocks.onMessage(msg.message);
|
|
1406
1527
|
}
|
|
@@ -1453,6 +1574,9 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1453
1574
|
}
|
|
1454
1575
|
this.latestReplicationInfoMessage.set(context.from.hashcode(), context.timestamp);
|
|
1455
1576
|
let reset = msg instanceof AllReplicatingSegmentsMessage;
|
|
1577
|
+
if (this.closed) {
|
|
1578
|
+
return;
|
|
1579
|
+
}
|
|
1456
1580
|
await this.addReplicationRange(replicationInfoMessage.segments.map((x) => x.toReplicationRangeIndexable(context.from)), context.from, { reset, checkDuplicates: true });
|
|
1457
1581
|
/* await this._modifyReplicators(msg.role, context.from!); */
|
|
1458
1582
|
})
|
|
@@ -1463,6 +1587,9 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1463
1587
|
if (e instanceof NotStartedError) {
|
|
1464
1588
|
return;
|
|
1465
1589
|
}
|
|
1590
|
+
if (e instanceof IndexNotStartedError) {
|
|
1591
|
+
return;
|
|
1592
|
+
}
|
|
1466
1593
|
logger.error("Failed to find peer who updated replication settings: " +
|
|
1467
1594
|
e?.message);
|
|
1468
1595
|
});
|
|
@@ -1478,7 +1605,9 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1478
1605
|
}
|
|
1479
1606
|
}
|
|
1480
1607
|
catch (e) {
|
|
1481
|
-
if (e instanceof AbortError
|
|
1608
|
+
if (e instanceof AbortError ||
|
|
1609
|
+
e instanceof NotStartedError ||
|
|
1610
|
+
e instanceof IndexNotStartedError) {
|
|
1482
1611
|
return;
|
|
1483
1612
|
}
|
|
1484
1613
|
if (e instanceof BorshError) {
|
|
@@ -1492,6 +1621,39 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1492
1621
|
logger.error(e);
|
|
1493
1622
|
}
|
|
1494
1623
|
}
|
|
1624
|
+
async calculateTotalParticipation(options) {
|
|
1625
|
+
if (options?.sum) {
|
|
1626
|
+
const ranges = await this.replicationIndex.iterate().all();
|
|
1627
|
+
let sum = 0;
|
|
1628
|
+
for (const range of ranges) {
|
|
1629
|
+
sum += range.value.widthNormalized;
|
|
1630
|
+
}
|
|
1631
|
+
return sum;
|
|
1632
|
+
}
|
|
1633
|
+
return appromixateCoverage({
|
|
1634
|
+
peers: this._replicationRangeIndex,
|
|
1635
|
+
numbers: this.indexableDomain.numbers,
|
|
1636
|
+
samples: 25,
|
|
1637
|
+
});
|
|
1638
|
+
}
|
|
1639
|
+
/* async calculateTotalParticipation() {
|
|
1640
|
+
const sum = await this.replicationIndex.sum({ key: "width" });
|
|
1641
|
+
return Number(sum) / MAX_U32;
|
|
1642
|
+
}
|
|
1643
|
+
*/
|
|
1644
|
+
async countReplicationSegments() {
|
|
1645
|
+
const count = await this.replicationIndex.count({
|
|
1646
|
+
query: new StringMatch({
|
|
1647
|
+
key: "hash",
|
|
1648
|
+
value: this.node.identity.publicKey.hashcode(),
|
|
1649
|
+
}),
|
|
1650
|
+
});
|
|
1651
|
+
return count;
|
|
1652
|
+
}
|
|
1653
|
+
async getAllReplicationSegments() {
|
|
1654
|
+
const ranges = await this.replicationIndex.iterate().all();
|
|
1655
|
+
return ranges.map((x) => x.value);
|
|
1656
|
+
}
|
|
1495
1657
|
async getMyReplicationSegments() {
|
|
1496
1658
|
const ranges = await this.replicationIndex
|
|
1497
1659
|
.iterate({
|
|
@@ -1503,7 +1665,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1503
1665
|
.all();
|
|
1504
1666
|
return ranges.map((x) => x.value);
|
|
1505
1667
|
}
|
|
1506
|
-
async
|
|
1668
|
+
async calculateMyTotalParticipation() {
|
|
1507
1669
|
// sum all of my replicator rects
|
|
1508
1670
|
return (await this.getMyReplicationSegments()).reduce((acc, { widthNormalized }) => acc + widthNormalized, 0);
|
|
1509
1671
|
}
|
|
@@ -1535,9 +1697,9 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1535
1697
|
async waitForReplicator(...keys) {
|
|
1536
1698
|
const check = async () => {
|
|
1537
1699
|
for (const k of keys) {
|
|
1538
|
-
const
|
|
1539
|
-
|
|
1540
|
-
|
|
1700
|
+
const iterator = this.replicationIndex?.iterate({ query: new StringMatch({ key: "hash", value: k.hashcode() }) }, { reference: true });
|
|
1701
|
+
const rects = await iterator?.next(1);
|
|
1702
|
+
await iterator.close();
|
|
1541
1703
|
const rect = rects[0]?.value;
|
|
1542
1704
|
if (!rect ||
|
|
1543
1705
|
!isMatured(rect, +new Date(), await this.getDefaultMinRoleAge())) {
|
|
@@ -1546,6 +1708,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1546
1708
|
}
|
|
1547
1709
|
return true;
|
|
1548
1710
|
};
|
|
1711
|
+
// TODO do event based
|
|
1549
1712
|
return waitFor(() => check(), {
|
|
1550
1713
|
signal: this._closeController.signal,
|
|
1551
1714
|
}).catch((e) => {
|
|
@@ -1557,119 +1720,137 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1557
1720
|
});
|
|
1558
1721
|
}
|
|
1559
1722
|
async join(entries, options) {
|
|
1560
|
-
let
|
|
1723
|
+
let entriesToReplicate = [];
|
|
1561
1724
|
if (options?.replicate) {
|
|
1562
1725
|
// TODO this block should perhaps be called from a callback on the this.log.join method on all the ignored element because already joined, like "onAlreadyJoined"
|
|
1563
1726
|
// check which entrise we already have but not are replicating, and replicate them
|
|
1564
|
-
|
|
1727
|
+
// we can not just do the 'join' call because it will ignore the already joined entries
|
|
1565
1728
|
for (const element of entries) {
|
|
1566
1729
|
if (typeof element === "string") {
|
|
1567
1730
|
const entry = await this.log.get(element);
|
|
1568
1731
|
if (entry) {
|
|
1569
|
-
|
|
1732
|
+
entriesToReplicate.push(entry);
|
|
1570
1733
|
}
|
|
1571
1734
|
}
|
|
1572
1735
|
else if (element instanceof Entry) {
|
|
1573
1736
|
if (await this.log.has(element.hash)) {
|
|
1574
|
-
|
|
1737
|
+
entriesToReplicate.push(element);
|
|
1575
1738
|
}
|
|
1576
1739
|
}
|
|
1577
1740
|
else {
|
|
1578
1741
|
const entry = await this.log.get(element.hash);
|
|
1579
1742
|
if (entry) {
|
|
1580
|
-
|
|
1743
|
+
entriesToReplicate.push(entry);
|
|
1581
1744
|
}
|
|
1582
1745
|
}
|
|
1583
1746
|
}
|
|
1584
|
-
// assume is heads
|
|
1585
|
-
await this.replicate(alreadyJoined, {
|
|
1586
|
-
checkDuplicates: true,
|
|
1587
|
-
announce: (msg) => {
|
|
1588
|
-
messageToSend = msg;
|
|
1589
|
-
},
|
|
1590
|
-
});
|
|
1591
1747
|
}
|
|
1592
|
-
|
|
1593
|
-
? {
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
if (entry.head) {
|
|
1599
|
-
await this.replicate(entry.entry, {
|
|
1600
|
-
checkDuplicates: true,
|
|
1601
|
-
// we override the announce step here to make sure we announce all new replication info
|
|
1602
|
-
// in one large message instead
|
|
1603
|
-
announce: (msg) => {
|
|
1604
|
-
if (msg instanceof AllReplicatingSegmentsMessage) {
|
|
1605
|
-
throw new Error("Unexpected");
|
|
1606
|
-
}
|
|
1607
|
-
if (messageToSend) {
|
|
1608
|
-
// merge segments to make it into one messages
|
|
1609
|
-
for (const segment of msg.segments) {
|
|
1610
|
-
messageToSend.segments.push(segment);
|
|
1611
|
-
}
|
|
1612
|
-
}
|
|
1613
|
-
else {
|
|
1614
|
-
messageToSend = msg;
|
|
1615
|
-
}
|
|
1616
|
-
},
|
|
1617
|
-
});
|
|
1618
|
-
}
|
|
1748
|
+
const onChangeForReplication = options?.replicate
|
|
1749
|
+
? async (change) => {
|
|
1750
|
+
if (change.added) {
|
|
1751
|
+
for (const entry of change.added) {
|
|
1752
|
+
if (entry.head) {
|
|
1753
|
+
entriesToReplicate.push(entry.entry);
|
|
1619
1754
|
}
|
|
1620
1755
|
}
|
|
1621
|
-
}
|
|
1756
|
+
}
|
|
1622
1757
|
}
|
|
1623
|
-
:
|
|
1758
|
+
: undefined;
|
|
1759
|
+
const persistCoordinate = async (entry) => {
|
|
1760
|
+
const minReplicas = decodeReplicas(entry).getValue(this);
|
|
1761
|
+
await this.findLeaders(await this.createCoordinates(entry, minReplicas), entry, { persist: {} });
|
|
1762
|
+
};
|
|
1763
|
+
let entriesToPersist = [];
|
|
1764
|
+
let joinOptions = {
|
|
1765
|
+
...options,
|
|
1766
|
+
onChange: async (change) => {
|
|
1767
|
+
await onChangeForReplication?.(change);
|
|
1768
|
+
for (const entry of change.added) {
|
|
1769
|
+
if (!entry.head) {
|
|
1770
|
+
continue;
|
|
1771
|
+
}
|
|
1772
|
+
if (!options?.replicate) {
|
|
1773
|
+
// we persist coordinates for all added entries here
|
|
1774
|
+
await persistCoordinate(entry.entry);
|
|
1775
|
+
}
|
|
1776
|
+
else {
|
|
1777
|
+
// else we persist after replication range update has been done so that
|
|
1778
|
+
// the indexed info becomes up to date
|
|
1779
|
+
entriesToPersist.push(entry.entry);
|
|
1780
|
+
}
|
|
1781
|
+
}
|
|
1782
|
+
},
|
|
1783
|
+
};
|
|
1624
1784
|
await this.log.join(entries, joinOptions);
|
|
1625
|
-
if (
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
const prev = options.persist.prev;
|
|
1643
|
-
// dont do anthing if nothing has changed
|
|
1644
|
-
if (prev.length > 0) {
|
|
1645
|
-
let allTheSame = true;
|
|
1646
|
-
for (const element of prev) {
|
|
1647
|
-
if (element.assignedToRangeBoundary !== assignToRangeBoundary) {
|
|
1648
|
-
allTheSame = false;
|
|
1649
|
-
break;
|
|
1785
|
+
if (options?.replicate) {
|
|
1786
|
+
let messageToSend = undefined;
|
|
1787
|
+
await this.replicate(entriesToReplicate, {
|
|
1788
|
+
checkDuplicates: true,
|
|
1789
|
+
mergeSegments: typeof options.replicate !== "boolean" && options.replicate
|
|
1790
|
+
? options.replicate.mergeSegments
|
|
1791
|
+
: false,
|
|
1792
|
+
// we override the announce step here to make sure we announce all new replication info
|
|
1793
|
+
// in one large message instead
|
|
1794
|
+
announce: (msg) => {
|
|
1795
|
+
if (msg instanceof AllReplicatingSegmentsMessage) {
|
|
1796
|
+
throw new Error("Unexpected");
|
|
1797
|
+
}
|
|
1798
|
+
if (messageToSend) {
|
|
1799
|
+
// merge segments to make it into one messages
|
|
1800
|
+
for (const segment of msg.segments) {
|
|
1801
|
+
messageToSend.segments.push(segment);
|
|
1650
1802
|
}
|
|
1651
1803
|
}
|
|
1652
|
-
|
|
1653
|
-
|
|
1804
|
+
else {
|
|
1805
|
+
messageToSend = msg;
|
|
1654
1806
|
}
|
|
1655
|
-
}
|
|
1807
|
+
},
|
|
1808
|
+
});
|
|
1809
|
+
for (const entry of entriesToPersist) {
|
|
1810
|
+
await persistCoordinate(entry);
|
|
1811
|
+
}
|
|
1812
|
+
if (messageToSend) {
|
|
1813
|
+
await this.rpc.send(messageToSend, {
|
|
1814
|
+
priority: 1,
|
|
1815
|
+
});
|
|
1656
1816
|
}
|
|
1657
|
-
!this.closed &&
|
|
1658
|
-
(await this.persistCoordinate({
|
|
1659
|
-
leaders,
|
|
1660
|
-
coordinates,
|
|
1661
|
-
entry,
|
|
1662
|
-
}, {
|
|
1663
|
-
assignToRangeBoundary,
|
|
1664
|
-
}));
|
|
1665
1817
|
}
|
|
1666
|
-
return { leaders, isLeader };
|
|
1667
1818
|
}
|
|
1668
|
-
|
|
1669
|
-
|
|
1670
|
-
|
|
1671
|
-
|
|
1672
|
-
|
|
1819
|
+
/*
|
|
1820
|
+
private async updateLeaders(
|
|
1821
|
+
cursor: NumberFromType<R>,
|
|
1822
|
+
prev: EntryReplicated<R>,
|
|
1823
|
+
options?: {
|
|
1824
|
+
roleAge?: number;
|
|
1825
|
+
},
|
|
1826
|
+
): Promise<{
|
|
1827
|
+
isLeader: boolean;
|
|
1828
|
+
leaders: Map<string, { intersecting: boolean }>;
|
|
1829
|
+
}> {
|
|
1830
|
+
// we consume a list of coordinates in this method since if we are leader of one coordinate we want to persist all of them
|
|
1831
|
+
const leaders = await this._findLeaders(cursor, options);
|
|
1832
|
+
const isLeader = leaders.has(this.node.identity.publicKey.hashcode());
|
|
1833
|
+
const isAtRangeBoundary = shouldAssignToRangeBoundary(leaders, 1);
|
|
1834
|
+
|
|
1835
|
+
// dont do anthing if nothing has changed
|
|
1836
|
+
if (prev.assignedToRangeBoundary !== isAtRangeBoundary) {
|
|
1837
|
+
return { isLeader, leaders };
|
|
1838
|
+
}
|
|
1839
|
+
|
|
1840
|
+
await this.entryCoordinatesIndex.put(
|
|
1841
|
+
new this.indexableDomain.constructorEntry({
|
|
1842
|
+
assignedToRangeBoundary: isAtRangeBoundary,
|
|
1843
|
+
coordinate: cursor,
|
|
1844
|
+
meta: prev.meta,
|
|
1845
|
+
hash: prev.hash,
|
|
1846
|
+
}),
|
|
1847
|
+
);
|
|
1848
|
+
|
|
1849
|
+
return { isLeader, leaders };
|
|
1850
|
+
}
|
|
1851
|
+
*/
|
|
1852
|
+
async _waitForReplicators(cursors, entry, waitFor, options = { timeout: this.waitForReplicatorTimeout }) {
|
|
1853
|
+
const timeout = options.timeout ?? this.waitForReplicatorTimeout;
|
|
1673
1854
|
return new Promise((resolve, reject) => {
|
|
1674
1855
|
const removeListeners = () => {
|
|
1675
1856
|
this.events.removeEventListener("replication:change", roleListener);
|
|
@@ -1681,18 +1862,32 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1681
1862
|
clearTimeout(timer);
|
|
1682
1863
|
resolve(false);
|
|
1683
1864
|
};
|
|
1684
|
-
const timer = setTimeout(() => {
|
|
1865
|
+
const timer = setTimeout(async () => {
|
|
1685
1866
|
removeListeners();
|
|
1686
1867
|
resolve(false);
|
|
1687
|
-
},
|
|
1688
|
-
const check =
|
|
1689
|
-
|
|
1690
|
-
|
|
1691
|
-
|
|
1692
|
-
|
|
1693
|
-
|
|
1868
|
+
}, timeout);
|
|
1869
|
+
const check = async () => {
|
|
1870
|
+
let leaderKeys = new Set();
|
|
1871
|
+
const leaders = await this.findLeaders(cursors, entry, {
|
|
1872
|
+
...options,
|
|
1873
|
+
onLeader: (key) => {
|
|
1874
|
+
options?.onLeader && options.onLeader(key);
|
|
1875
|
+
leaderKeys.add(key);
|
|
1876
|
+
},
|
|
1877
|
+
});
|
|
1878
|
+
for (const waitForKey of waitFor) {
|
|
1879
|
+
if (waitForKey.replicator && !leaderKeys.has(waitForKey.key)) {
|
|
1880
|
+
return;
|
|
1881
|
+
}
|
|
1882
|
+
if (!waitForKey.replicator && leaderKeys.has(waitForKey.key)) {
|
|
1883
|
+
return;
|
|
1884
|
+
}
|
|
1694
1885
|
}
|
|
1695
|
-
|
|
1886
|
+
options?.onLeader && leaderKeys.forEach(options.onLeader);
|
|
1887
|
+
removeListeners();
|
|
1888
|
+
clearTimeout(timer);
|
|
1889
|
+
resolve(leaders);
|
|
1890
|
+
};
|
|
1696
1891
|
const roleListener = () => {
|
|
1697
1892
|
check();
|
|
1698
1893
|
};
|
|
@@ -1702,51 +1897,135 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1702
1897
|
check();
|
|
1703
1898
|
});
|
|
1704
1899
|
}
|
|
1705
|
-
|
|
1900
|
+
/*
|
|
1901
|
+
private async waitForIsLeader(
|
|
1902
|
+
cursors: NumberFromType<R>[],
|
|
1903
|
+
hash: string,
|
|
1904
|
+
options: {
|
|
1905
|
+
timeout: number;
|
|
1906
|
+
} = { timeout: this.waitForReplicatorTimeout },
|
|
1907
|
+
): Promise<Map<string, { intersecting: boolean }> | false> {
|
|
1908
|
+
return new Promise((resolve, reject) => {
|
|
1909
|
+
const removeListeners = () => {
|
|
1910
|
+
this.events.removeEventListener("replication:change", roleListener);
|
|
1911
|
+
this.events.removeEventListener("replicator:mature", roleListener); // TODO replication:change event ?
|
|
1912
|
+
this._closeController.signal.removeEventListener(
|
|
1913
|
+
"abort",
|
|
1914
|
+
abortListener,
|
|
1915
|
+
);
|
|
1916
|
+
};
|
|
1917
|
+
const abortListener = () => {
|
|
1918
|
+
removeListeners();
|
|
1919
|
+
clearTimeout(timer);
|
|
1920
|
+
resolve(false);
|
|
1921
|
+
};
|
|
1922
|
+
|
|
1923
|
+
const timer = setTimeout(() => {
|
|
1924
|
+
removeListeners();
|
|
1925
|
+
resolve(false);
|
|
1926
|
+
}, options.timeout);
|
|
1927
|
+
|
|
1928
|
+
const check = async () => {
|
|
1929
|
+
const leaders = await this.mergeLeadersMap(await Promise.all(cursors.map(x => this.findLeaders(x))));
|
|
1930
|
+
const isLeader = leaders.has(hash);
|
|
1931
|
+
if (isLeader) {
|
|
1932
|
+
removeListeners();
|
|
1933
|
+
clearTimeout(timer);
|
|
1934
|
+
resolve(leaders);
|
|
1935
|
+
}
|
|
1936
|
+
}
|
|
1937
|
+
|
|
1938
|
+
const roleListener = () => {
|
|
1939
|
+
check();
|
|
1940
|
+
};
|
|
1941
|
+
|
|
1942
|
+
this.events.addEventListener("replication:change", roleListener); // TODO replication:change event ?
|
|
1943
|
+
this.events.addEventListener("replicator:mature", roleListener); // TODO replication:change event ?
|
|
1944
|
+
this._closeController.signal.addEventListener("abort", abortListener);
|
|
1945
|
+
check();
|
|
1946
|
+
});
|
|
1947
|
+
} */
|
|
1948
|
+
/* async findLeaders(
|
|
1949
|
+
cursor:
|
|
1950
|
+
| NumberFromType<R>[]
|
|
1951
|
+
| {
|
|
1952
|
+
entry: ShallowOrFullEntry<any> | EntryReplicated<R>;
|
|
1953
|
+
replicas: number;
|
|
1954
|
+
},
|
|
1955
|
+
options?: {
|
|
1956
|
+
roleAge?: number;
|
|
1957
|
+
},
|
|
1958
|
+
): Promise<Map<string, { intersecting: boolean }>> {
|
|
1706
1959
|
if (this.closed) {
|
|
1707
1960
|
const map = new Map(); // Assumption: if the store is closed, always assume we have responsibility over the data
|
|
1708
1961
|
map.set(this.node.identity.publicKey.hashcode(), { intersecting: false });
|
|
1709
1962
|
return map;
|
|
1710
1963
|
}
|
|
1964
|
+
|
|
1711
1965
|
const coordinates = Array.isArray(cursor)
|
|
1712
1966
|
? cursor
|
|
1713
1967
|
: await this.createCoordinates(cursor.entry, cursor.replicas);
|
|
1714
|
-
const leaders = await this.
|
|
1968
|
+
const leaders = await this.findLeadersFromN(coordinates, options);
|
|
1969
|
+
|
|
1715
1970
|
return leaders;
|
|
1716
|
-
}
|
|
1717
|
-
async groupByLeaders(
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1971
|
+
} */
|
|
1972
|
+
/* private async groupByLeaders(
|
|
1973
|
+
entries: (ShallowOrFullEntry<any> | EntryReplicated<R>)[],
|
|
1974
|
+
options?: {
|
|
1975
|
+
roleAge?: number;
|
|
1976
|
+
},
|
|
1977
|
+
) {
|
|
1978
|
+
try {
|
|
1979
|
+
const leaders = await Promise.all(
|
|
1980
|
+
entries.map(async (x) => {
|
|
1981
|
+
return this.findLeadersFromEntry(x, decodeReplicas(x).getValue(this), options);
|
|
1982
|
+
}),
|
|
1983
|
+
);
|
|
1984
|
+
const map = new Map<string, number[]>();
|
|
1985
|
+
leaders.forEach((leader, i) => {
|
|
1986
|
+
for (const [hash] of leader) {
|
|
1987
|
+
const arr = map.get(hash) ?? [];
|
|
1988
|
+
arr.push(i);
|
|
1989
|
+
map.set(hash, arr);
|
|
1990
|
+
}
|
|
1991
|
+
});
|
|
1992
|
+
return map;
|
|
1993
|
+
} catch (error) {
|
|
1994
|
+
if (error instanceof NotStartedError || error instanceof IndexNotStartedError) {
|
|
1995
|
+
// ignore because we are shutting down
|
|
1996
|
+
return new Map<string, number[]>();
|
|
1997
|
+
} else {
|
|
1998
|
+
throw error;
|
|
1725
1999
|
}
|
|
1726
|
-
}
|
|
1727
|
-
|
|
1728
|
-
}
|
|
2000
|
+
}
|
|
2001
|
+
} */
|
|
1729
2002
|
async createCoordinates(entry, minReplicas) {
|
|
1730
|
-
const cursor =
|
|
1731
|
-
|
|
2003
|
+
const cursor = typeof entry === "number" || typeof entry === "bigint"
|
|
2004
|
+
? entry
|
|
2005
|
+
: await this.domain.fromEntry(entry);
|
|
2006
|
+
const out = this.indexableDomain.numbers.getGrid(cursor, minReplicas);
|
|
1732
2007
|
return out;
|
|
1733
2008
|
}
|
|
1734
2009
|
async getCoordinates(entry) {
|
|
1735
2010
|
const result = await this.entryCoordinatesIndex
|
|
1736
2011
|
.iterate({ query: { hash: entry.hash } })
|
|
1737
2012
|
.all();
|
|
1738
|
-
return result.
|
|
1739
|
-
}
|
|
1740
|
-
async persistCoordinate(properties
|
|
1741
|
-
let assignedToRangeBoundary =
|
|
1742
|
-
|
|
2013
|
+
return result[0].value.coordinates;
|
|
2014
|
+
}
|
|
2015
|
+
async persistCoordinate(properties) {
|
|
2016
|
+
let assignedToRangeBoundary = shouldAssignToRangeBoundary(properties.leaders, properties.replicas);
|
|
2017
|
+
if (properties.prev &&
|
|
2018
|
+
properties.prev.assignedToRangeBoundary === assignedToRangeBoundary) {
|
|
2019
|
+
return; // no change
|
|
2020
|
+
}
|
|
2021
|
+
await this.entryCoordinatesIndex.put(new this.indexableDomain.constructorEntry({
|
|
2022
|
+
assignedToRangeBoundary,
|
|
2023
|
+
coordinates: properties.coordinates,
|
|
2024
|
+
meta: properties.entry.meta,
|
|
2025
|
+
hash: properties.entry.hash,
|
|
2026
|
+
}));
|
|
1743
2027
|
for (const coordinate of properties.coordinates) {
|
|
1744
|
-
|
|
1745
|
-
assignedToRangeBoundary,
|
|
1746
|
-
coordinate,
|
|
1747
|
-
meta: properties.entry.meta,
|
|
1748
|
-
hash: properties.entry.hash,
|
|
1749
|
-
}));
|
|
2028
|
+
this.coordinateToHash.add(coordinate, properties.entry.hash);
|
|
1750
2029
|
}
|
|
1751
2030
|
if (properties.entry.meta.next.length > 0) {
|
|
1752
2031
|
await this.entryCoordinatesIndex.del({
|
|
@@ -1758,30 +2037,85 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1758
2037
|
await this.entryCoordinatesIndex.del({ query: properties });
|
|
1759
2038
|
}
|
|
1760
2039
|
async getDefaultMinRoleAge() {
|
|
1761
|
-
if (
|
|
2040
|
+
if (this._isReplicating === false) {
|
|
1762
2041
|
return 0;
|
|
1763
2042
|
}
|
|
1764
2043
|
const now = +new Date();
|
|
1765
|
-
const
|
|
1766
|
-
|
|
1767
|
-
|
|
2044
|
+
const subscribers = (await this.node.services.pubsub.getSubscribers(this.rpc.topic))
|
|
2045
|
+
?.length ?? 1;
|
|
2046
|
+
const diffToOldest = subscribers > 1 ? now - this.oldestOpenTime - 1 : Number.MAX_SAFE_INTEGER;
|
|
2047
|
+
const result = Math.min(this.timeUntilRoleMaturity, Math.max(diffToOldest, this.timeUntilRoleMaturity), Math.max(Math.round((this.timeUntilRoleMaturity * Math.log(subscribers + 1)) / 3), this.timeUntilRoleMaturity)); // / 3 so that if 2 replicators and timeUntilRoleMaturity = 1e4 the result will be 1
|
|
2048
|
+
return result;
|
|
2049
|
+
/* return Math.min(1e3, this.timeUntilRoleMaturity); */
|
|
2050
|
+
}
|
|
2051
|
+
async findLeaders(cursors, entry, options) {
|
|
2052
|
+
// we consume a list of coordinates in this method since if we are leader of one coordinate we want to persist all of them
|
|
2053
|
+
let isLeader = false;
|
|
2054
|
+
const set = await this._findLeaders(cursors, options);
|
|
2055
|
+
for (const key of set.keys()) {
|
|
2056
|
+
if (options?.onLeader) {
|
|
2057
|
+
options.onLeader(key);
|
|
2058
|
+
isLeader = isLeader || key === this.node.identity.publicKey.hashcode();
|
|
2059
|
+
}
|
|
2060
|
+
}
|
|
2061
|
+
if (options?.persist !== false) {
|
|
2062
|
+
if (isLeader || options?.persist) {
|
|
2063
|
+
!this.closed &&
|
|
2064
|
+
(await this.persistCoordinate({
|
|
2065
|
+
leaders: set,
|
|
2066
|
+
coordinates: cursors,
|
|
2067
|
+
replicas: cursors.length,
|
|
2068
|
+
entry,
|
|
2069
|
+
prev: options?.persist?.prev,
|
|
2070
|
+
}));
|
|
2071
|
+
}
|
|
2072
|
+
}
|
|
2073
|
+
return set;
|
|
2074
|
+
}
|
|
2075
|
+
async isLeader(properties, options) {
|
|
2076
|
+
let cursors = await this.createCoordinates(properties.entry, properties.replicas);
|
|
2077
|
+
const leaders = await this.findLeaders(cursors, properties.entry, options);
|
|
2078
|
+
if (leaders.has(this.node.identity.publicKey.hashcode())) {
|
|
2079
|
+
return true;
|
|
2080
|
+
}
|
|
2081
|
+
return false;
|
|
1768
2082
|
}
|
|
1769
|
-
async
|
|
2083
|
+
async _findLeaders(cursors, options) {
|
|
1770
2084
|
const roleAge = options?.roleAge ?? (await this.getDefaultMinRoleAge()); // TODO -500 as is added so that i f someone else is just as new as us, then we treat them as mature as us. without -500 we might be slower syncing if two nodes starts almost at the same time
|
|
1771
|
-
return getSamples(
|
|
2085
|
+
return getSamples(cursors, this.replicationIndex, roleAge, this.indexableDomain.numbers, {
|
|
2086
|
+
uniqueReplicators: this.uniqueReplicators,
|
|
2087
|
+
});
|
|
2088
|
+
}
|
|
2089
|
+
async findLeadersFromEntry(entry, replicas, options) {
|
|
2090
|
+
const coordinates = await this.createCoordinates(entry, replicas);
|
|
2091
|
+
const result = await this._findLeaders(coordinates, options);
|
|
2092
|
+
return result;
|
|
1772
2093
|
}
|
|
1773
2094
|
async isReplicator(entry, options) {
|
|
1774
|
-
return this.isLeader({
|
|
2095
|
+
return this.isLeader({
|
|
2096
|
+
entry,
|
|
2097
|
+
replicas: maxReplicas(this, [entry]),
|
|
2098
|
+
}, options);
|
|
1775
2099
|
}
|
|
1776
2100
|
async handleSubscriptionChange(publicKey, topics, subscribed) {
|
|
1777
2101
|
if (!topics.includes(this.topic)) {
|
|
1778
2102
|
return;
|
|
1779
2103
|
}
|
|
1780
2104
|
if (!subscribed) {
|
|
1781
|
-
|
|
1782
|
-
|
|
2105
|
+
this.removePeerFromGidPeerHistory(publicKey.hashcode());
|
|
2106
|
+
for (const [k, v] of this._requestIPruneSent) {
|
|
2107
|
+
v.delete(publicKey.hashcode());
|
|
2108
|
+
if (v.size === 0) {
|
|
2109
|
+
this._requestIPruneSent.delete(k);
|
|
2110
|
+
}
|
|
1783
2111
|
}
|
|
1784
|
-
this.
|
|
2112
|
+
for (const [k, v] of this._requestIPruneResponseReplicatorSet) {
|
|
2113
|
+
v.delete(publicKey.hashcode());
|
|
2114
|
+
if (v.size === 0) {
|
|
2115
|
+
this._requestIPruneSent.delete(k);
|
|
2116
|
+
}
|
|
2117
|
+
}
|
|
2118
|
+
this.syncronizer.onPeerDisconnected(publicKey);
|
|
1785
2119
|
(await this.replicationIndex.count({
|
|
1786
2120
|
query: { hash: publicKey.hashcode() },
|
|
1787
2121
|
})) > 0 &&
|
|
@@ -1796,14 +2130,14 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1796
2130
|
.send(new AllReplicatingSegmentsMessage({
|
|
1797
2131
|
segments: replicationSegments.map((x) => x.toReplicationRange()),
|
|
1798
2132
|
}), {
|
|
1799
|
-
mode: new
|
|
2133
|
+
mode: new SeekDelivery({ redundancy: 1, to: [publicKey] }),
|
|
1800
2134
|
})
|
|
1801
2135
|
.catch((e) => logger.error(e.toString()));
|
|
1802
2136
|
if (this.v8Behaviour) {
|
|
1803
2137
|
// for backwards compatibility
|
|
1804
2138
|
this.rpc
|
|
1805
2139
|
.send(new ResponseRoleMessage({ role: await this.getRole() }), {
|
|
1806
|
-
mode: new
|
|
2140
|
+
mode: new SeekDelivery({ redundancy: 1, to: [publicKey] }),
|
|
1807
2141
|
})
|
|
1808
2142
|
.catch((e) => logger.error(e.toString()));
|
|
1809
2143
|
}
|
|
@@ -1813,11 +2147,24 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1813
2147
|
await this.removeReplicator(publicKey);
|
|
1814
2148
|
}
|
|
1815
2149
|
}
|
|
2150
|
+
getClampedReplicas(customValue) {
|
|
2151
|
+
if (!customValue) {
|
|
2152
|
+
return this.replicas.min;
|
|
2153
|
+
}
|
|
2154
|
+
const min = customValue.getValue(this);
|
|
2155
|
+
const maxValue = Math.max(this.replicas.min.getValue(this), min);
|
|
2156
|
+
if (this.replicas.max) {
|
|
2157
|
+
return new AbsoluteReplicas(Math.min(maxValue, this.replicas.max.getValue(this)));
|
|
2158
|
+
}
|
|
2159
|
+
return new AbsoluteReplicas(maxValue);
|
|
2160
|
+
}
|
|
1816
2161
|
prune(entries, options) {
|
|
1817
2162
|
if (options?.unchecked) {
|
|
1818
2163
|
return [...entries.values()].map((x) => {
|
|
1819
|
-
this._gidPeersHistory.delete(x.meta.gid);
|
|
1820
|
-
|
|
2164
|
+
this._gidPeersHistory.delete(x.entry.meta.gid);
|
|
2165
|
+
this._requestIPruneSent.delete(x.entry.hash);
|
|
2166
|
+
this._requestIPruneResponseReplicatorSet.delete(x.entry.hash);
|
|
2167
|
+
return this.log.remove(x.entry, {
|
|
1821
2168
|
recursively: true,
|
|
1822
2169
|
});
|
|
1823
2170
|
});
|
|
@@ -1829,16 +2176,22 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1829
2176
|
// - An entry is joined, where min replicas is lower than before (for all heads for this particular gid) and therefore we are not replicating anymore for this particular gid
|
|
1830
2177
|
// - Peers join and leave, which means we might not be a replicator anymore
|
|
1831
2178
|
const promises = [];
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
for (const entry of entries.values()) {
|
|
2179
|
+
let peerToEntries = new Map();
|
|
2180
|
+
let cleanupTimer = [];
|
|
2181
|
+
for (const { entry, leaders } of entries.values()) {
|
|
2182
|
+
for (const leader of leaders.keys()) {
|
|
2183
|
+
let set = peerToEntries.get(leader);
|
|
2184
|
+
if (!set) {
|
|
2185
|
+
set = [];
|
|
2186
|
+
peerToEntries.set(leader, set);
|
|
2187
|
+
}
|
|
2188
|
+
set.push(entry.hash);
|
|
2189
|
+
}
|
|
1835
2190
|
const pendingPrev = this._pendingDeletes.get(entry.hash);
|
|
1836
2191
|
if (pendingPrev) {
|
|
1837
2192
|
promises.push(pendingPrev.promise.promise);
|
|
1838
2193
|
continue;
|
|
1839
2194
|
}
|
|
1840
|
-
filteredEntries.push(entry);
|
|
1841
|
-
const existCounter = new Set();
|
|
1842
2195
|
const minReplicas = decodeReplicas(entry);
|
|
1843
2196
|
const deferredPromise = pDefer();
|
|
1844
2197
|
const clear = () => {
|
|
@@ -1851,10 +2204,45 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1851
2204
|
};
|
|
1852
2205
|
const resolve = () => {
|
|
1853
2206
|
clear();
|
|
1854
|
-
|
|
2207
|
+
cleanupTimer.push(setTimeout(async () => {
|
|
2208
|
+
if (await this.isLeader({
|
|
2209
|
+
entry,
|
|
2210
|
+
replicas: minReplicas.getValue(this),
|
|
2211
|
+
})) {
|
|
2212
|
+
deferredPromise.reject(new Error("Failed to delete, is leader again"));
|
|
2213
|
+
return;
|
|
2214
|
+
}
|
|
2215
|
+
this._gidPeersHistory.delete(entry.meta.gid);
|
|
2216
|
+
this._requestIPruneSent.delete(entry.hash);
|
|
2217
|
+
this._requestIPruneResponseReplicatorSet.delete(entry.hash);
|
|
2218
|
+
return this.log
|
|
2219
|
+
.remove(entry, {
|
|
2220
|
+
recursively: true,
|
|
2221
|
+
})
|
|
2222
|
+
.then(() => {
|
|
2223
|
+
deferredPromise.resolve();
|
|
2224
|
+
})
|
|
2225
|
+
.catch((e) => {
|
|
2226
|
+
deferredPromise.reject(e);
|
|
2227
|
+
})
|
|
2228
|
+
.finally(async () => {
|
|
2229
|
+
this._gidPeersHistory.delete(entry.meta.gid);
|
|
2230
|
+
this._requestIPruneSent.delete(entry.hash);
|
|
2231
|
+
this._requestIPruneResponseReplicatorSet.delete(entry.hash);
|
|
2232
|
+
// TODO in the case we become leader again here we need to re-add the entry
|
|
2233
|
+
if (await this.isLeader({
|
|
2234
|
+
entry,
|
|
2235
|
+
replicas: minReplicas.getValue(this),
|
|
2236
|
+
})) {
|
|
2237
|
+
logger.error("Unexpected: Is leader after delete");
|
|
2238
|
+
}
|
|
2239
|
+
});
|
|
2240
|
+
}, this.waitForPruneDelay));
|
|
1855
2241
|
};
|
|
1856
2242
|
const reject = (e) => {
|
|
1857
2243
|
clear();
|
|
2244
|
+
this._requestIPruneSent.delete(entry.hash);
|
|
2245
|
+
this._requestIPruneResponseReplicatorSet.delete(entry.hash);
|
|
1858
2246
|
deferredPromise.reject(e);
|
|
1859
2247
|
};
|
|
1860
2248
|
let cursor = undefined;
|
|
@@ -1868,106 +2256,143 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1868
2256
|
},
|
|
1869
2257
|
reject,
|
|
1870
2258
|
resolve: async (publicKeyHash) => {
|
|
1871
|
-
const
|
|
1872
|
-
const
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
|
|
1889
|
-
|
|
1890
|
-
|
|
1891
|
-
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
.catch((e) => {
|
|
1895
|
-
reject(new Error("Failed to delete entry: " + e.toString()));
|
|
1896
|
-
});
|
|
1897
|
-
}
|
|
2259
|
+
const minReplicasObj = this.getClampedReplicas(minReplicas);
|
|
2260
|
+
const minReplicasValue = minReplicasObj.getValue(this);
|
|
2261
|
+
// TODO is this check necessary
|
|
2262
|
+
if (!(await this._waitForReplicators(cursor ??
|
|
2263
|
+
(cursor = await this.createCoordinates(entry, minReplicasValue)), entry, [
|
|
2264
|
+
{ key: publicKeyHash, replicator: true },
|
|
2265
|
+
{
|
|
2266
|
+
key: this.node.identity.publicKey.hashcode(),
|
|
2267
|
+
replicator: false,
|
|
2268
|
+
},
|
|
2269
|
+
], {
|
|
2270
|
+
persist: false,
|
|
2271
|
+
}))) {
|
|
2272
|
+
return;
|
|
2273
|
+
}
|
|
2274
|
+
let existCounter = this._requestIPruneResponseReplicatorSet.get(entry.hash);
|
|
2275
|
+
if (!existCounter) {
|
|
2276
|
+
existCounter = new Set();
|
|
2277
|
+
this._requestIPruneResponseReplicatorSet.set(entry.hash, existCounter);
|
|
2278
|
+
}
|
|
2279
|
+
existCounter.add(publicKeyHash);
|
|
2280
|
+
if (minReplicasValue <= existCounter.size) {
|
|
2281
|
+
resolve();
|
|
1898
2282
|
}
|
|
1899
2283
|
},
|
|
1900
2284
|
});
|
|
1901
2285
|
promises.push(deferredPromise.promise);
|
|
1902
2286
|
}
|
|
1903
|
-
|
|
1904
|
-
|
|
2287
|
+
const emitMessages = async (entries, to) => {
|
|
2288
|
+
const filteredSet = [];
|
|
2289
|
+
for (const entry of entries) {
|
|
2290
|
+
let set = this._requestIPruneSent.get(entry);
|
|
2291
|
+
if (!set) {
|
|
2292
|
+
set = new Set();
|
|
2293
|
+
this._requestIPruneSent.set(entry, set);
|
|
2294
|
+
}
|
|
2295
|
+
/* if (set.has(to)) {
|
|
2296
|
+
continue;
|
|
2297
|
+
} */
|
|
2298
|
+
set.add(to);
|
|
2299
|
+
filteredSet.push(entry);
|
|
2300
|
+
}
|
|
2301
|
+
if (filteredSet.length > 0) {
|
|
2302
|
+
return this.rpc.send(new RequestIPrune({
|
|
2303
|
+
hashes: filteredSet,
|
|
2304
|
+
}), {
|
|
2305
|
+
mode: new SilentDelivery({
|
|
2306
|
+
to: [to], // TODO group by peers?
|
|
2307
|
+
redundancy: 1,
|
|
2308
|
+
}),
|
|
2309
|
+
priority: 1,
|
|
2310
|
+
});
|
|
2311
|
+
}
|
|
2312
|
+
};
|
|
2313
|
+
for (const [k, v] of peerToEntries) {
|
|
2314
|
+
emitMessages(v, k);
|
|
1905
2315
|
}
|
|
1906
|
-
const
|
|
1907
|
-
this.rpc.send(
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
mode: new SilentDelivery({
|
|
1911
|
-
to: [to], // TODO group by peers?
|
|
1912
|
-
redundancy: 1,
|
|
2316
|
+
/* const fn = async () => {
|
|
2317
|
+
this.rpc.send(
|
|
2318
|
+
new RequestIPrune({
|
|
2319
|
+
hashes: filteredEntries.map(x => x.hash),
|
|
1913
2320
|
}),
|
|
1914
|
-
|
|
1915
|
-
|
|
2321
|
+
{
|
|
2322
|
+
mode: new SilentDelivery({
|
|
2323
|
+
to: [...await this.getReplicators()],
|
|
2324
|
+
redundancy: 1,
|
|
2325
|
+
}),
|
|
2326
|
+
priority: 1,
|
|
2327
|
+
},
|
|
2328
|
+
)
|
|
1916
2329
|
};
|
|
1917
|
-
|
|
1918
|
-
|
|
1919
|
-
|
|
1920
|
-
|
|
1921
|
-
|
|
1922
|
-
|
|
1923
|
-
|
|
1924
|
-
|
|
1925
|
-
|
|
1926
|
-
|
|
1927
|
-
|
|
1928
|
-
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
|
|
1932
|
-
|
|
1933
|
-
|
|
2330
|
+
fn() */
|
|
2331
|
+
/* const onPeersChange = async (
|
|
2332
|
+
e?: CustomEvent<ReplicatorJoinEvent>,
|
|
2333
|
+
reason?: string,
|
|
2334
|
+
) => {
|
|
2335
|
+
if (
|
|
2336
|
+
true // e.detail.publicKey.equals(this.node.identity.publicKey) === false // TODO proper condition
|
|
2337
|
+
) {
|
|
2338
|
+
|
|
2339
|
+
const peerToEntryMap = await this.groupByLeaders(
|
|
2340
|
+
filteredEntries
|
|
2341
|
+
.filter((x) => !readyToDelete.has(x.hash))
|
|
2342
|
+
.map((x) => {
|
|
2343
|
+
return { entry: x, replicas: maxReplicasValue }; // TODO choose right maxReplicasValue, should it really be for all entries combined?
|
|
2344
|
+
}),
|
|
2345
|
+
);
|
|
2346
|
+
for (const receiver of peerToEntryMap.keys()) {
|
|
2347
|
+
if (receiver === this.node.identity.publicKey.hashcode()) {
|
|
2348
|
+
continue;
|
|
2349
|
+
}
|
|
2350
|
+
const peerEntries = peerToEntryMap.get(receiver);
|
|
2351
|
+
if (peerEntries && peerEntries.length > 0) {
|
|
2352
|
+
emitMessages(
|
|
2353
|
+
peerEntries.map((x) => filteredEntries[x].hash),
|
|
2354
|
+
receiver,
|
|
2355
|
+
);
|
|
2356
|
+
}
|
|
1934
2357
|
}
|
|
1935
2358
|
}
|
|
1936
|
-
};
|
|
2359
|
+
}; */
|
|
1937
2360
|
// check joining peers
|
|
2361
|
+
/* this.events.addEventListener("replication:change", onPeersChange);
|
|
1938
2362
|
this.events.addEventListener("replicator:mature", onPeersChange);
|
|
1939
|
-
this.events.addEventListener("replicator:join", onPeersChange);
|
|
1940
|
-
|
|
2363
|
+
this.events.addEventListener("replicator:join", onPeersChange); */
|
|
2364
|
+
let cleanup = () => {
|
|
2365
|
+
for (const timer of cleanupTimer) {
|
|
2366
|
+
clearTimeout(timer);
|
|
2367
|
+
}
|
|
2368
|
+
/* this.events.removeEventListener("replication:change", onPeersChange);
|
|
1941
2369
|
this.events.removeEventListener("replicator:mature", onPeersChange);
|
|
1942
|
-
this.events.removeEventListener("replicator:join", onPeersChange);
|
|
1943
|
-
|
|
2370
|
+
this.events.removeEventListener("replicator:join", onPeersChange); */
|
|
2371
|
+
this._closeController.signal.removeEventListener("abort", cleanup);
|
|
2372
|
+
};
|
|
2373
|
+
Promise.allSettled(promises).finally(cleanup);
|
|
2374
|
+
this._closeController.signal.addEventListener("abort", cleanup);
|
|
1944
2375
|
return promises;
|
|
1945
2376
|
}
|
|
1946
2377
|
/**
|
|
1947
2378
|
* For debugging
|
|
1948
2379
|
*/
|
|
1949
|
-
async getPrunable() {
|
|
2380
|
+
async getPrunable(roleAge) {
|
|
1950
2381
|
const heads = await this.log.getHeads(true).all();
|
|
1951
2382
|
let prunable = [];
|
|
1952
2383
|
for (const head of heads) {
|
|
1953
|
-
const isLeader = await this.isLeader({
|
|
1954
|
-
entry: head,
|
|
1955
|
-
replicas: maxReplicas(this, [head]),
|
|
1956
|
-
});
|
|
2384
|
+
const isLeader = await this.isLeader({ entry: head, replicas: maxReplicas(this, [head]) }, { roleAge });
|
|
1957
2385
|
if (!isLeader) {
|
|
1958
2386
|
prunable.push(head);
|
|
1959
2387
|
}
|
|
1960
2388
|
}
|
|
1961
2389
|
return prunable;
|
|
1962
2390
|
}
|
|
1963
|
-
async getNonPrunable() {
|
|
2391
|
+
async getNonPrunable(roleAge) {
|
|
1964
2392
|
const heads = await this.log.getHeads(true).all();
|
|
1965
2393
|
let nonPrunable = [];
|
|
1966
2394
|
for (const head of heads) {
|
|
1967
|
-
const isLeader = await this.isLeader({
|
|
1968
|
-
entry: head,
|
|
1969
|
-
replicas: maxReplicas(this, [head]),
|
|
1970
|
-
});
|
|
2395
|
+
const isLeader = await this.isLeader({ entry: head, replicas: maxReplicas(this, [head]) }, { roleAge });
|
|
1971
2396
|
if (isLeader) {
|
|
1972
2397
|
nonPrunable.push(head);
|
|
1973
2398
|
}
|
|
@@ -1978,7 +2403,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1978
2403
|
if (options?.clearCache) {
|
|
1979
2404
|
this._gidPeersHistory.clear();
|
|
1980
2405
|
}
|
|
1981
|
-
this.onReplicationChange((await this.
|
|
2406
|
+
this.onReplicationChange((await this.getAllReplicationSegments()).map((x) => {
|
|
1982
2407
|
return { range: x, type: "added" };
|
|
1983
2408
|
}));
|
|
1984
2409
|
}
|
|
@@ -1993,75 +2418,70 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1993
2418
|
if (this.closed) {
|
|
1994
2419
|
return;
|
|
1995
2420
|
}
|
|
2421
|
+
await this.log.trim();
|
|
1996
2422
|
const change = mergeReplicationChanges(changeOrChanges);
|
|
1997
2423
|
const changed = false;
|
|
1998
2424
|
try {
|
|
1999
|
-
await this.log.trim();
|
|
2000
2425
|
const uncheckedDeliver = new Map();
|
|
2001
|
-
const
|
|
2002
|
-
for await (const { gid, entries: coordinates } of toRebalance(change, this.entryCoordinatesIndex)) {
|
|
2426
|
+
for await (const entryReplicated of toRebalance(change, this.entryCoordinatesIndex)) {
|
|
2003
2427
|
if (this.closed) {
|
|
2004
2428
|
break;
|
|
2005
2429
|
}
|
|
2006
|
-
|
|
2007
|
-
|
|
2008
|
-
|
|
2009
|
-
|
|
2010
|
-
|
|
2430
|
+
let oldPeersSet = this._gidPeersHistory.get(entryReplicated.gid);
|
|
2431
|
+
let isLeader = false;
|
|
2432
|
+
let currentPeers = await this.findLeaders(entryReplicated.coordinates, entryReplicated, {
|
|
2433
|
+
// we do this to make sure new replicators get data even though they are not mature so they can figure out if they want to replicate more or less
|
|
2434
|
+
// TODO make this smarter because if a new replicator is not mature and want to replicate too much data the syncing overhead can be bad
|
|
2011
2435
|
roleAge: 0,
|
|
2012
|
-
persist: {
|
|
2013
|
-
prev: coordinates,
|
|
2014
|
-
},
|
|
2015
2436
|
});
|
|
2016
|
-
if (isLeader) {
|
|
2017
|
-
for (const entry of coordinates) {
|
|
2018
|
-
this.pruneDebouncedFn.delete(entry.hash);
|
|
2019
|
-
}
|
|
2020
|
-
}
|
|
2021
|
-
const currentPeersSet = new Set(currentPeers.keys());
|
|
2022
|
-
this._gidPeersHistory.set(gid, currentPeersSet);
|
|
2023
2437
|
for (const [currentPeer] of currentPeers) {
|
|
2024
2438
|
if (currentPeer === this.node.identity.publicKey.hashcode()) {
|
|
2439
|
+
isLeader = true;
|
|
2025
2440
|
continue;
|
|
2026
2441
|
}
|
|
2027
2442
|
if (!oldPeersSet?.has(currentPeer)) {
|
|
2028
2443
|
let set = uncheckedDeliver.get(currentPeer);
|
|
2029
2444
|
if (!set) {
|
|
2030
|
-
set = new
|
|
2445
|
+
set = new Map();
|
|
2031
2446
|
uncheckedDeliver.set(currentPeer, set);
|
|
2032
2447
|
}
|
|
2033
|
-
|
|
2034
|
-
set.
|
|
2448
|
+
if (!set.has(entryReplicated.hash)) {
|
|
2449
|
+
set.set(entryReplicated.hash, entryReplicated);
|
|
2035
2450
|
}
|
|
2451
|
+
/* for (const entry of coordinates) {
|
|
2452
|
+
let arr = set.get(entry.hash);
|
|
2453
|
+
if (!arr) {
|
|
2454
|
+
arr = [];
|
|
2455
|
+
set.set(entry.hash, arr);
|
|
2456
|
+
}
|
|
2457
|
+
arr.push(entry);
|
|
2458
|
+
} */
|
|
2036
2459
|
}
|
|
2037
2460
|
}
|
|
2461
|
+
this.addPeersToGidPeerHistory(entryReplicated.gid, currentPeers.keys(), true);
|
|
2038
2462
|
if (!isLeader) {
|
|
2039
|
-
if (
|
|
2040
|
-
|
|
2041
|
-
|
|
2042
|
-
|
|
2043
|
-
|
|
2044
|
-
entriesToDelete = entriesToDelete.filter((entry) => this.sync(entry) === false);
|
|
2045
|
-
}
|
|
2046
|
-
allEntriesToDelete.push(...entriesToDelete);
|
|
2463
|
+
if (!this.sync || this.sync(entryReplicated) === false) {
|
|
2464
|
+
this.pruneDebouncedFn.add({
|
|
2465
|
+
key: entryReplicated.hash,
|
|
2466
|
+
value: { entry: entryReplicated, leaders: currentPeers },
|
|
2467
|
+
});
|
|
2047
2468
|
}
|
|
2469
|
+
this.responseToPruneDebouncedFn.delete(entryReplicated.hash); // don't allow others to prune because of expecting me to replicating this entry
|
|
2048
2470
|
}
|
|
2049
2471
|
else {
|
|
2050
|
-
|
|
2051
|
-
|
|
2052
|
-
|
|
2053
|
-
|
|
2054
|
-
|
|
2472
|
+
this.pruneDebouncedFn.delete(entryReplicated.hash);
|
|
2473
|
+
await this._pendingDeletes
|
|
2474
|
+
.get(entryReplicated.hash)
|
|
2475
|
+
?.reject(new Error("Failed to delete, is leader again"));
|
|
2476
|
+
this._requestIPruneSent.delete(entryReplicated.hash);
|
|
2055
2477
|
}
|
|
2056
2478
|
}
|
|
2057
2479
|
for (const [target, entries] of uncheckedDeliver) {
|
|
2058
|
-
this.
|
|
2059
|
-
|
|
2480
|
+
this.syncronizer.onMaybeMissingEntries({
|
|
2481
|
+
entries,
|
|
2482
|
+
targets: [target],
|
|
2060
2483
|
});
|
|
2061
2484
|
}
|
|
2062
|
-
if (allEntriesToDelete.length > 0) {
|
|
2063
|
-
allEntriesToDelete.map((x) => this.pruneDebouncedFn.add({ key: x.hash, value: x }));
|
|
2064
|
-
}
|
|
2065
2485
|
return changed;
|
|
2066
2486
|
}
|
|
2067
2487
|
catch (error) {
|
|
@@ -2069,58 +2489,16 @@ let SharedLog = class SharedLog extends Program {
|
|
|
2069
2489
|
throw error;
|
|
2070
2490
|
}
|
|
2071
2491
|
}
|
|
2072
|
-
async requestSync(hashes, to) {
|
|
2073
|
-
const now = +new Date();
|
|
2074
|
-
for (const node of to) {
|
|
2075
|
-
let map = this.syncInFlight.get(node);
|
|
2076
|
-
if (!map) {
|
|
2077
|
-
map = new Map();
|
|
2078
|
-
this.syncInFlight.set(node, map);
|
|
2079
|
-
}
|
|
2080
|
-
for (const hash of hashes) {
|
|
2081
|
-
map.set(hash, { timestamp: now });
|
|
2082
|
-
}
|
|
2083
|
-
}
|
|
2084
|
-
await this.rpc.send(new ResponseMaybeSync({
|
|
2085
|
-
hashes: hashes,
|
|
2086
|
-
}), {
|
|
2087
|
-
mode: new SilentDelivery({ to, redundancy: 1 }),
|
|
2088
|
-
});
|
|
2089
|
-
}
|
|
2090
2492
|
async _onUnsubscription(evt) {
|
|
2091
|
-
logger.
|
|
2493
|
+
logger.trace(`Peer disconnected '${evt.detail.from.hashcode()}' from '${JSON.stringify(evt.detail.unsubscriptions.map((x) => x))} '`);
|
|
2092
2494
|
this.latestReplicationInfoMessage.delete(evt.detail.from.hashcode());
|
|
2093
2495
|
return this.handleSubscriptionChange(evt.detail.from, evt.detail.unsubscriptions, false);
|
|
2094
2496
|
}
|
|
2095
2497
|
async _onSubscription(evt) {
|
|
2096
|
-
logger.
|
|
2498
|
+
logger.trace(`New peer '${evt.detail.from.hashcode()}' connected to '${JSON.stringify(evt.detail.subscriptions.map((x) => x))}'`);
|
|
2097
2499
|
this.remoteBlocks.onReachable(evt.detail.from);
|
|
2098
2500
|
return this.handleSubscriptionChange(evt.detail.from, evt.detail.subscriptions, true);
|
|
2099
2501
|
}
|
|
2100
|
-
async addToHistory(usedMemory, factor) {
|
|
2101
|
-
(this.history || (this.history = [])).push({ usedMemory, factor });
|
|
2102
|
-
// Keep only the last N entries in the history array (you can adjust N based on your needs)
|
|
2103
|
-
const maxHistoryLength = 10;
|
|
2104
|
-
if (this.history.length > maxHistoryLength) {
|
|
2105
|
-
this.history.shift();
|
|
2106
|
-
}
|
|
2107
|
-
}
|
|
2108
|
-
async calculateTrend() {
|
|
2109
|
-
// Calculate the average change in factor per unit change in memory usage
|
|
2110
|
-
const factorChanges = this.history.map((entry, index) => {
|
|
2111
|
-
if (index > 0) {
|
|
2112
|
-
const memoryChange = entry.usedMemory - this.history[index - 1].usedMemory;
|
|
2113
|
-
if (memoryChange !== 0) {
|
|
2114
|
-
const factorChange = entry.factor - this.history[index - 1].factor;
|
|
2115
|
-
return factorChange / memoryChange;
|
|
2116
|
-
}
|
|
2117
|
-
}
|
|
2118
|
-
return 0;
|
|
2119
|
-
});
|
|
2120
|
-
// Return the average factor change per unit memory change
|
|
2121
|
-
return (factorChanges.reduce((sum, change) => sum + change, 0) /
|
|
2122
|
-
factorChanges.length);
|
|
2123
|
-
}
|
|
2124
2502
|
async rebalanceParticipation() {
|
|
2125
2503
|
// update more participation rate to converge to the average expected rate or bounded by
|
|
2126
2504
|
// resources such as memory and or cpu
|
|
@@ -2152,9 +2530,9 @@ let SharedLog = class SharedLog extends Program {
|
|
|
2152
2530
|
dynamicRange.widthNormalized;
|
|
2153
2531
|
if (relativeDifference > 0.0001) {
|
|
2154
2532
|
// TODO can not reuse old range, since it will (potentially) affect the index because of sideeffects
|
|
2155
|
-
dynamicRange = new
|
|
2156
|
-
offset:
|
|
2157
|
-
length:
|
|
2533
|
+
dynamicRange = new this.indexableDomain.constructorRange({
|
|
2534
|
+
offset: dynamicRange.start1,
|
|
2535
|
+
length: this.indexableDomain.numbers.denormalize(newFactor),
|
|
2158
2536
|
publicKeyHash: dynamicRange.hash,
|
|
2159
2537
|
id: dynamicRange.id,
|
|
2160
2538
|
mode: dynamicRange.mode,
|
|
@@ -2183,6 +2561,17 @@ let SharedLog = class SharedLog extends Program {
|
|
|
2183
2561
|
const resp = await fn();
|
|
2184
2562
|
return resp;
|
|
2185
2563
|
}
|
|
2564
|
+
getDynamicRangeOffset() {
|
|
2565
|
+
const options = this._logProperties
|
|
2566
|
+
?.replicate;
|
|
2567
|
+
if (options?.offset != null) {
|
|
2568
|
+
const normalized = options.normalized ?? true;
|
|
2569
|
+
return (normalized
|
|
2570
|
+
? this.indexableDomain.numbers.denormalize(Number(options.offset))
|
|
2571
|
+
: options.offset);
|
|
2572
|
+
}
|
|
2573
|
+
return this.indexableDomain.numbers.bytesToNumber(this.node.identity.publicKey.bytes);
|
|
2574
|
+
}
|
|
2186
2575
|
async getDynamicRange() {
|
|
2187
2576
|
let dynamicRangeId = getIdForDynamicRange(this.node.identity.publicKey);
|
|
2188
2577
|
let range = (await this.replicationIndex
|
|
@@ -2196,10 +2585,9 @@ let SharedLog = class SharedLog extends Program {
|
|
|
2196
2585
|
})
|
|
2197
2586
|
.all())?.[0]?.value;
|
|
2198
2587
|
if (!range) {
|
|
2199
|
-
range = new
|
|
2200
|
-
|
|
2201
|
-
|
|
2202
|
-
length: 0,
|
|
2588
|
+
range = new this.indexableDomain.constructorRange({
|
|
2589
|
+
offset: this.getDynamicRangeOffset(),
|
|
2590
|
+
length: this.indexableDomain.numbers.zero,
|
|
2203
2591
|
publicKeyHash: this.node.identity.publicKey.hashcode(),
|
|
2204
2592
|
mode: ReplicationIntent.NonStrict,
|
|
2205
2593
|
timestamp: BigInt(+new Date()),
|
|
@@ -2213,50 +2601,16 @@ let SharedLog = class SharedLog extends Program {
|
|
|
2213
2601
|
}
|
|
2214
2602
|
return range;
|
|
2215
2603
|
}
|
|
2216
|
-
clearSyncProcess(hash) {
|
|
2217
|
-
const inflight = this.syncInFlightQueue.get(hash);
|
|
2218
|
-
if (inflight) {
|
|
2219
|
-
for (const key of inflight) {
|
|
2220
|
-
const map = this.syncInFlightQueueInverted.get(key.hashcode());
|
|
2221
|
-
if (map) {
|
|
2222
|
-
map.delete(hash);
|
|
2223
|
-
if (map.size === 0) {
|
|
2224
|
-
this.syncInFlightQueueInverted.delete(key.hashcode());
|
|
2225
|
-
}
|
|
2226
|
-
}
|
|
2227
|
-
}
|
|
2228
|
-
this.syncInFlightQueue.delete(hash);
|
|
2229
|
-
}
|
|
2230
|
-
}
|
|
2231
|
-
clearSyncProcessPublicKey(publicKey) {
|
|
2232
|
-
this.syncInFlight.delete(publicKey.hashcode());
|
|
2233
|
-
const map = this.syncInFlightQueueInverted.get(publicKey.hashcode());
|
|
2234
|
-
if (map) {
|
|
2235
|
-
for (const hash of map) {
|
|
2236
|
-
const arr = this.syncInFlightQueue.get(hash);
|
|
2237
|
-
if (arr) {
|
|
2238
|
-
const filtered = arr.filter((x) => !x.equals(publicKey));
|
|
2239
|
-
if (filtered.length > 0) {
|
|
2240
|
-
this.syncInFlightQueue.set(hash, filtered);
|
|
2241
|
-
}
|
|
2242
|
-
else {
|
|
2243
|
-
this.syncInFlightQueue.delete(hash);
|
|
2244
|
-
}
|
|
2245
|
-
}
|
|
2246
|
-
}
|
|
2247
|
-
this.syncInFlightQueueInverted.delete(publicKey.hashcode());
|
|
2248
|
-
}
|
|
2249
|
-
}
|
|
2250
2604
|
async onEntryAdded(entry) {
|
|
2251
2605
|
const ih = this._pendingIHave.get(entry.hash);
|
|
2252
2606
|
if (ih) {
|
|
2253
2607
|
ih.clear();
|
|
2254
2608
|
ih.callback(entry);
|
|
2255
2609
|
}
|
|
2256
|
-
this.
|
|
2610
|
+
this.syncronizer.onEntryAdded(entry);
|
|
2257
2611
|
}
|
|
2258
2612
|
onEntryRemoved(hash) {
|
|
2259
|
-
this.
|
|
2613
|
+
this.syncronizer.onEntryRemoved(hash);
|
|
2260
2614
|
}
|
|
2261
2615
|
};
|
|
2262
2616
|
__decorate([
|