@peerbit/shared-log 10.0.6 → 10.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/benchmark/get-samples.js +1 -1
- package/dist/benchmark/get-samples.js.map +1 -1
- package/dist/benchmark/utils.js +1 -1
- package/dist/benchmark/utils.js.map +1 -1
- package/dist/src/index.d.ts +15 -10
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/index.js +138 -65
- package/dist/src/index.js.map +1 -1
- package/dist/src/ranges.d.ts +95 -11
- package/dist/src/ranges.d.ts.map +1 -1
- package/dist/src/ranges.js +437 -83
- package/dist/src/ranges.js.map +1 -1
- package/dist/src/replication-domain-hash.d.ts +2 -2
- package/dist/src/replication-domain-hash.d.ts.map +1 -1
- package/dist/src/replication-domain-hash.js +2 -17
- package/dist/src/replication-domain-hash.js.map +1 -1
- package/dist/src/replication-domain-time.d.ts +7 -2
- package/dist/src/replication-domain-time.d.ts.map +1 -1
- package/dist/src/replication-domain-time.js +7 -12
- package/dist/src/replication-domain-time.js.map +1 -1
- package/dist/src/replication-domain.d.ts +3 -20
- package/dist/src/replication-domain.d.ts.map +1 -1
- package/dist/src/replication-domain.js +0 -33
- package/dist/src/replication-domain.js.map +1 -1
- package/package.json +4 -4
- package/src/index.ts +205 -107
- package/src/ranges.ts +669 -127
- package/src/replication-domain-hash.ts +16 -29
- package/src/replication-domain-time.ts +46 -40
- package/src/replication-domain.ts +7 -59
package/dist/src/index.js
CHANGED
|
@@ -26,13 +26,13 @@ import { BlocksMessage } from "./blocks.js";
|
|
|
26
26
|
import { CPUUsageIntervalLag } from "./cpu.js";
|
|
27
27
|
import { debounceAccumulator, debounceFixedInterval, debouncedAccumulatorMap, } from "./debounce.js";
|
|
28
28
|
import { EntryWithRefs, ExchangeHeadsMessage, RequestIPrune, ResponseIPrune, createExchangeHeadsMessages, } from "./exchange-heads.js";
|
|
29
|
-
import { MAX_U32, bytesToNumber, createNumbers, denormalizer, } from "./integers.js";
|
|
29
|
+
import { MAX_U32, MAX_U64, bytesToNumber, createNumbers, denormalizer, } from "./integers.js";
|
|
30
30
|
import { TransportMessage } from "./message.js";
|
|
31
31
|
import { PIDReplicationController } from "./pid.js";
|
|
32
|
-
import { EntryReplicatedU32, EntryReplicatedU64, ReplicationIntent, ReplicationRangeIndexableU32, ReplicationRangeIndexableU64, ReplicationRangeMessage, SyncStatus, appromixateCoverage, getCoverSet, getSamples,
|
|
32
|
+
import { EntryReplicatedU32, EntryReplicatedU64, ReplicationIntent, ReplicationRangeIndexableU32, ReplicationRangeIndexableU64, ReplicationRangeMessage, SyncStatus, appromixateCoverage, countCoveringRangesSameOwner, debounceAggregationChanges, getAllMergeCandiates, getCoverSet, getSamples, isMatured, isReplicationRangeMessage, mergeRanges, minimumWidthToCover, shouldAssigneToRangeBoundary as shouldAssignToRangeBoundary, toRebalance, } from "./ranges.js";
|
|
33
33
|
import { createReplicationDomainHash, } from "./replication-domain-hash.js";
|
|
34
34
|
import { createReplicationDomainTime, } from "./replication-domain-time.js";
|
|
35
|
-
import {
|
|
35
|
+
import {} from "./replication-domain.js";
|
|
36
36
|
import { AbsoluteReplicas, AddedReplicationSegmentMessage, AllReplicatingSegmentsMessage, MinReplicas, ReplicationError, RequestReplicationInfoMessage, ResponseRoleMessage, StoppedReplicating, decodeReplicas, encodeReplicas, maxReplicas, } from "./replication.js";
|
|
37
37
|
import { Observer, Replicator } from "./role.js";
|
|
38
38
|
import { RatelessIBLTSynchronizer } from "./sync/rateless-iblt.js";
|
|
@@ -42,6 +42,7 @@ export { createReplicationDomainHash, createReplicationDomainTime, };
|
|
|
42
42
|
export { CPUUsageIntervalLag };
|
|
43
43
|
export * from "./replication.js";
|
|
44
44
|
export { EntryReplicatedU32, EntryReplicatedU64, };
|
|
45
|
+
export { MAX_U32, MAX_U64 };
|
|
45
46
|
export const logger = loggerFn({ module: "shared-log" });
|
|
46
47
|
const getLatestEntry = (entries) => {
|
|
47
48
|
let latest = undefined;
|
|
@@ -135,6 +136,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
135
136
|
_replicationRangeIndex;
|
|
136
137
|
_entryCoordinatesIndex;
|
|
137
138
|
coordinateToHash;
|
|
139
|
+
recentlyRebalanced;
|
|
138
140
|
uniqueReplicators;
|
|
139
141
|
/* private _totalParticipation!: number; */
|
|
140
142
|
// gid -> coordinate -> publicKeyHash list (of owners)
|
|
@@ -230,13 +232,14 @@ let SharedLog = class SharedLog extends Program {
|
|
|
230
232
|
) */
|
|
231
233
|
() => intervalTime);
|
|
232
234
|
}
|
|
233
|
-
async _replicate(options, { reset, checkDuplicates,
|
|
235
|
+
async _replicate(options, { reset, checkDuplicates, announce, mergeSegments, } = {}) {
|
|
234
236
|
let offsetWasProvided = false;
|
|
235
237
|
if (isUnreplicationOptions(options)) {
|
|
236
238
|
await this.unreplicate();
|
|
237
239
|
}
|
|
238
240
|
else {
|
|
239
|
-
let
|
|
241
|
+
let rangesToReplicate = [];
|
|
242
|
+
let rangesToUnreplicate = [];
|
|
240
243
|
if (options == null) {
|
|
241
244
|
options = {};
|
|
242
245
|
}
|
|
@@ -254,11 +257,11 @@ let SharedLog = class SharedLog extends Program {
|
|
|
254
257
|
// not allowed
|
|
255
258
|
return;
|
|
256
259
|
}
|
|
257
|
-
|
|
260
|
+
rangesToReplicate = [maybeRange];
|
|
258
261
|
offsetWasProvided = true;
|
|
259
262
|
}
|
|
260
263
|
else if (isReplicationRangeMessage(options)) {
|
|
261
|
-
|
|
264
|
+
rangesToReplicate = [
|
|
262
265
|
options.toReplicationRangeIndexable(this.node.identity.publicKey),
|
|
263
266
|
];
|
|
264
267
|
offsetWasProvided = true;
|
|
@@ -302,23 +305,17 @@ let SharedLog = class SharedLog extends Program {
|
|
|
302
305
|
let factorDenormalized = !normalized
|
|
303
306
|
? factor
|
|
304
307
|
: this.indexableDomain.numbers.denormalize(factor);
|
|
305
|
-
|
|
308
|
+
rangesToReplicate.push(new this.indexableDomain.constructorRange({
|
|
306
309
|
id: rangeArg.id,
|
|
307
310
|
// @ts-ignore
|
|
308
311
|
offset: offset,
|
|
309
312
|
// @ts-ignore
|
|
310
|
-
|
|
313
|
+
width: (factor === "all"
|
|
311
314
|
? fullWidth
|
|
312
315
|
: factor === "right"
|
|
313
316
|
? // @ts-ignore
|
|
314
317
|
fullWidth - offset
|
|
315
318
|
: factorDenormalized),
|
|
316
|
-
/* typeof factor === "number"
|
|
317
|
-
? factor
|
|
318
|
-
: factor === "all"
|
|
319
|
-
? width
|
|
320
|
-
// @ts-ignore
|
|
321
|
-
: width - offset, */
|
|
322
319
|
publicKeyHash: this.node.identity.publicKey.hashcode(),
|
|
323
320
|
mode: rangeArg.strict
|
|
324
321
|
? ReplicationIntent.Strict
|
|
@@ -326,12 +323,30 @@ let SharedLog = class SharedLog extends Program {
|
|
|
326
323
|
timestamp: timestamp ?? BigInt(+new Date()),
|
|
327
324
|
}));
|
|
328
325
|
}
|
|
329
|
-
if (mergeSegments
|
|
330
|
-
|
|
331
|
-
|
|
326
|
+
if (mergeSegments) {
|
|
327
|
+
let range = rangesToReplicate.length > 1
|
|
328
|
+
? mergeRanges(rangesToReplicate, this.indexableDomain.numbers)
|
|
329
|
+
: rangesToReplicate[0];
|
|
330
|
+
// also merge segments that are already in the index
|
|
331
|
+
if (this.domain.canMerge) {
|
|
332
|
+
const mergable = await getAllMergeCandiates(this.replicationIndex, range, this.indexableDomain.numbers);
|
|
333
|
+
const mergeableFiltered = [range];
|
|
334
|
+
for (const mergeCandidate of mergable) {
|
|
335
|
+
if (this.domain.canMerge(mergeCandidate, range)) {
|
|
336
|
+
mergeableFiltered.push(mergeCandidate);
|
|
337
|
+
if (mergeCandidate.idString !== range.idString) {
|
|
338
|
+
rangesToUnreplicate.push(mergeCandidate);
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
if (mergeableFiltered.length > 1) {
|
|
343
|
+
range = mergeRanges(mergeableFiltered, this.indexableDomain.numbers);
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
rangesToReplicate = [range];
|
|
332
347
|
}
|
|
333
348
|
}
|
|
334
|
-
for (const range of
|
|
349
|
+
for (const range of rangesToReplicate) {
|
|
335
350
|
this.oldestOpenTime = Math.min(Number(range.timestamp), this.oldestOpenTime);
|
|
336
351
|
}
|
|
337
352
|
let resetRanges = reset;
|
|
@@ -341,13 +356,20 @@ let SharedLog = class SharedLog extends Program {
|
|
|
341
356
|
// but ({ replicate: 0.5, offset: 0.5 }) means that we want to add a range
|
|
342
357
|
// TODO make behaviour more clear
|
|
343
358
|
}
|
|
344
|
-
await this.startAnnounceReplicating(
|
|
359
|
+
await this.startAnnounceReplicating(rangesToReplicate, {
|
|
345
360
|
reset: resetRanges ?? false,
|
|
346
361
|
checkDuplicates,
|
|
347
362
|
announce,
|
|
348
|
-
syncStatus,
|
|
349
363
|
});
|
|
350
|
-
|
|
364
|
+
if (rangesToUnreplicate.length > 0) {
|
|
365
|
+
await this.removeReplicationRanges(rangesToUnreplicate, this.node.identity.publicKey);
|
|
366
|
+
await this.rpc.send(new StoppedReplicating({
|
|
367
|
+
segmentIds: rangesToUnreplicate.map((x) => x.id),
|
|
368
|
+
}), {
|
|
369
|
+
priority: 1,
|
|
370
|
+
});
|
|
371
|
+
}
|
|
372
|
+
return rangesToReplicate;
|
|
351
373
|
}
|
|
352
374
|
}
|
|
353
375
|
setupDebouncedRebalancing(options) {
|
|
@@ -373,7 +395,6 @@ let SharedLog = class SharedLog extends Program {
|
|
|
373
395
|
}
|
|
374
396
|
async replicate(rangeOrEntry, options) {
|
|
375
397
|
let range = undefined;
|
|
376
|
-
let syncStatus = SyncStatus.Unsynced;
|
|
377
398
|
if (rangeOrEntry instanceof ReplicationRangeMessage) {
|
|
378
399
|
range = rangeOrEntry;
|
|
379
400
|
}
|
|
@@ -383,7 +404,6 @@ let SharedLog = class SharedLog extends Program {
|
|
|
383
404
|
offset: await this.domain.fromEntry(rangeOrEntry),
|
|
384
405
|
normalized: false,
|
|
385
406
|
};
|
|
386
|
-
syncStatus = SyncStatus.Synced; /// we already have the entries
|
|
387
407
|
}
|
|
388
408
|
else if (Array.isArray(rangeOrEntry)) {
|
|
389
409
|
let ranges = [];
|
|
@@ -393,8 +413,8 @@ let SharedLog = class SharedLog extends Program {
|
|
|
393
413
|
factor: 1,
|
|
394
414
|
offset: await this.domain.fromEntry(entry),
|
|
395
415
|
normalized: false,
|
|
416
|
+
strict: true,
|
|
396
417
|
});
|
|
397
|
-
syncStatus = SyncStatus.Synced; /// we already have the entries
|
|
398
418
|
}
|
|
399
419
|
else {
|
|
400
420
|
ranges.push(entry);
|
|
@@ -405,18 +425,34 @@ let SharedLog = class SharedLog extends Program {
|
|
|
405
425
|
else {
|
|
406
426
|
range = rangeOrEntry ?? true;
|
|
407
427
|
}
|
|
408
|
-
return this._replicate(range,
|
|
428
|
+
return this._replicate(range, options);
|
|
409
429
|
}
|
|
410
430
|
async unreplicate(rangeOrEntry) {
|
|
411
|
-
let
|
|
431
|
+
let segmentIds;
|
|
412
432
|
if (rangeOrEntry instanceof Entry) {
|
|
413
|
-
range = {
|
|
433
|
+
let range = {
|
|
414
434
|
factor: 1,
|
|
415
435
|
offset: await this.domain.fromEntry(rangeOrEntry),
|
|
416
436
|
};
|
|
437
|
+
const indexed = this.replicationIndex.iterate({
|
|
438
|
+
query: {
|
|
439
|
+
width: 1,
|
|
440
|
+
start1: range.offset /* ,
|
|
441
|
+
hash: this.node.identity.publicKey.hashcode(), */,
|
|
442
|
+
},
|
|
443
|
+
});
|
|
444
|
+
segmentIds = (await indexed.all()).map((x) => x.id.key);
|
|
445
|
+
if (segmentIds.length === 0) {
|
|
446
|
+
logger.warn("No segment found to unreplicate");
|
|
447
|
+
return;
|
|
448
|
+
}
|
|
417
449
|
}
|
|
418
|
-
else if (rangeOrEntry
|
|
419
|
-
|
|
450
|
+
else if (Array.isArray(rangeOrEntry)) {
|
|
451
|
+
segmentIds = rangeOrEntry.map((x) => x.id);
|
|
452
|
+
if (segmentIds.length === 0) {
|
|
453
|
+
logger.warn("No segment found to unreplicate");
|
|
454
|
+
return;
|
|
455
|
+
}
|
|
420
456
|
}
|
|
421
457
|
else {
|
|
422
458
|
this._isReplicating = false;
|
|
@@ -429,14 +465,8 @@ let SharedLog = class SharedLog extends Program {
|
|
|
429
465
|
// TODO support this by never deleting the range with the segment id that is generated by the dynamic replication method
|
|
430
466
|
throw new Error("Unsupported when adaptive replicating");
|
|
431
467
|
}
|
|
432
|
-
const
|
|
433
|
-
|
|
434
|
-
width: 1,
|
|
435
|
-
start1: range.offset,
|
|
436
|
-
},
|
|
437
|
-
});
|
|
438
|
-
const segmentIds = (await indexed.all()).map((x) => x.id.key);
|
|
439
|
-
await this.removeReplicationRange(segmentIds, this.node.identity.publicKey);
|
|
468
|
+
const rangesToRemove = await this.resolveReplicationRangesFromIdsAndKey(segmentIds, this.node.identity.publicKey);
|
|
469
|
+
await this.removeReplicationRanges(rangesToRemove, this.node.identity.publicKey);
|
|
440
470
|
await this.rpc.send(new StoppedReplicating({ segmentIds }), {
|
|
441
471
|
priority: 1,
|
|
442
472
|
});
|
|
@@ -468,10 +498,12 @@ let SharedLog = class SharedLog extends Program {
|
|
|
468
498
|
throw new Error("Key was not a PublicSignKey");
|
|
469
499
|
}
|
|
470
500
|
}
|
|
501
|
+
const timestamp = BigInt(+new Date());
|
|
471
502
|
for (const x of deleted) {
|
|
472
503
|
this.replicationChangeDebounceFn.add({
|
|
473
504
|
range: x.value,
|
|
474
505
|
type: "removed",
|
|
506
|
+
timestamp,
|
|
475
507
|
});
|
|
476
508
|
}
|
|
477
509
|
const pendingMaturity = this.pendingMaturity.get(keyHash);
|
|
@@ -496,7 +528,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
496
528
|
? Number(oldestTimestampFromDB)
|
|
497
529
|
: +new Date();
|
|
498
530
|
}
|
|
499
|
-
async
|
|
531
|
+
async resolveReplicationRangesFromIdsAndKey(ids, from) {
|
|
500
532
|
let idMatcher = new Or(ids.map((x) => new ByteMatchQuery({ key: "id", value: x })));
|
|
501
533
|
// make sure we are not removing something that is owned by the replicator
|
|
502
534
|
let identityMatcher = new StringMatch({
|
|
@@ -504,9 +536,12 @@ let SharedLog = class SharedLog extends Program {
|
|
|
504
536
|
value: from.hashcode(),
|
|
505
537
|
});
|
|
506
538
|
let query = new And([idMatcher, identityMatcher]);
|
|
539
|
+
return (await this.replicationIndex.iterate({ query }).all()).map((x) => x.value);
|
|
540
|
+
}
|
|
541
|
+
async removeReplicationRanges(ranges, from) {
|
|
507
542
|
const pendingMaturity = this.pendingMaturity.get(from.hashcode());
|
|
508
543
|
if (pendingMaturity) {
|
|
509
|
-
for (const id of
|
|
544
|
+
for (const id of ranges) {
|
|
510
545
|
const info = pendingMaturity.get(id.toString());
|
|
511
546
|
if (info) {
|
|
512
547
|
clearTimeout(info.timeout);
|
|
@@ -517,7 +552,9 @@ let SharedLog = class SharedLog extends Program {
|
|
|
517
552
|
this.pendingMaturity.delete(from.hashcode());
|
|
518
553
|
}
|
|
519
554
|
}
|
|
520
|
-
await this.replicationIndex.del({
|
|
555
|
+
await this.replicationIndex.del({
|
|
556
|
+
query: new Or(ranges.map((x) => new ByteMatchQuery({ key: "id", value: x.id }))),
|
|
557
|
+
});
|
|
521
558
|
const otherSegmentsIterator = this.replicationIndex.iterate({ query: { hash: from.hashcode() } }, { shape: { id: true } });
|
|
522
559
|
if ((await otherSegmentsIterator.next(1)).length === 0) {
|
|
523
560
|
this.uniqueReplicators.delete(from.hashcode());
|
|
@@ -531,11 +568,12 @@ let SharedLog = class SharedLog extends Program {
|
|
|
531
568
|
this.rebalanceParticipationDebounced?.();
|
|
532
569
|
}
|
|
533
570
|
}
|
|
534
|
-
async addReplicationRange(ranges, from, { reset, checkDuplicates, } = {}) {
|
|
571
|
+
async addReplicationRange(ranges, from, { reset, checkDuplicates, timestamp: ts, } = {}) {
|
|
535
572
|
if (this._isTrustedReplicator && !(await this._isTrustedReplicator(from))) {
|
|
536
573
|
return undefined;
|
|
537
574
|
}
|
|
538
575
|
let isNewReplicator = false;
|
|
576
|
+
let timestamp = BigInt(ts ?? +new Date());
|
|
539
577
|
let diffs;
|
|
540
578
|
let deleted = undefined;
|
|
541
579
|
if (reset) {
|
|
@@ -548,10 +586,10 @@ let SharedLog = class SharedLog extends Program {
|
|
|
548
586
|
await this.replicationIndex.del({ query: { hash: from.hashcode() } });
|
|
549
587
|
diffs = [
|
|
550
588
|
...deleted.map((x) => {
|
|
551
|
-
return { range: x, type: "removed" };
|
|
589
|
+
return { range: x, type: "removed", timestamp };
|
|
552
590
|
}),
|
|
553
591
|
...ranges.map((x) => {
|
|
554
|
-
return { range: x, type: "added" };
|
|
592
|
+
return { range: x, type: "added", timestamp };
|
|
555
593
|
}),
|
|
556
594
|
];
|
|
557
595
|
isNewReplicator = prevCount === 0 && ranges.length > 0;
|
|
@@ -575,7 +613,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
575
613
|
let deduplicated = [];
|
|
576
614
|
// TODO also deduplicate/de-overlap among the ranges that ought to be inserted?
|
|
577
615
|
for (const range of ranges) {
|
|
578
|
-
if (!(await
|
|
616
|
+
if (!(await countCoveringRangesSameOwner(this.replicationIndex, range))) {
|
|
579
617
|
deduplicated.push(range);
|
|
580
618
|
}
|
|
581
619
|
}
|
|
@@ -590,15 +628,27 @@ let SharedLog = class SharedLog extends Program {
|
|
|
590
628
|
const prev = existingMap.get(x.idString);
|
|
591
629
|
if (prev) {
|
|
592
630
|
if (prev.equalRange(x)) {
|
|
593
|
-
return
|
|
631
|
+
return [];
|
|
594
632
|
}
|
|
595
|
-
return
|
|
633
|
+
return [
|
|
634
|
+
{
|
|
635
|
+
range: prev,
|
|
636
|
+
timestamp: x.timestamp - 1n,
|
|
637
|
+
prev,
|
|
638
|
+
type: "replaced",
|
|
639
|
+
},
|
|
640
|
+
{
|
|
641
|
+
range: x,
|
|
642
|
+
timestamp: x.timestamp,
|
|
643
|
+
type: "added",
|
|
644
|
+
},
|
|
645
|
+
];
|
|
596
646
|
}
|
|
597
647
|
else {
|
|
598
|
-
return { range: x, type: "added" };
|
|
648
|
+
return { range: x, timestamp: x.timestamp, type: "added" };
|
|
599
649
|
}
|
|
600
650
|
})
|
|
601
|
-
.
|
|
651
|
+
.flat();
|
|
602
652
|
diffs = changes;
|
|
603
653
|
}
|
|
604
654
|
this.uniqueReplicators.add(from.hashcode());
|
|
@@ -606,7 +656,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
606
656
|
let minRoleAge = await this.getDefaultMinRoleAge();
|
|
607
657
|
let isAllMature = true;
|
|
608
658
|
for (const diff of diffs) {
|
|
609
|
-
if (diff.type === "added"
|
|
659
|
+
if (diff.type === "added") {
|
|
610
660
|
/* if (this.closed) {
|
|
611
661
|
return;
|
|
612
662
|
} */
|
|
@@ -628,7 +678,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
628
678
|
this.events.dispatchEvent(new CustomEvent("replicator:mature", {
|
|
629
679
|
detail: { publicKey: from },
|
|
630
680
|
}));
|
|
631
|
-
this.replicationChangeDebounceFn.add(diff); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable!
|
|
681
|
+
this.replicationChangeDebounceFn.add({ ...diff, matured: true }); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable!
|
|
632
682
|
pendingRanges.delete(diff.range.idString);
|
|
633
683
|
if (pendingRanges.size === 0) {
|
|
634
684
|
this.pendingMaturity.delete(diff.range.hash);
|
|
@@ -648,7 +698,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
648
698
|
}
|
|
649
699
|
}
|
|
650
700
|
}
|
|
651
|
-
else {
|
|
701
|
+
else if (diff.type === "removed") {
|
|
652
702
|
const pendingFromPeer = this.pendingMaturity.get(diff.range.hash);
|
|
653
703
|
if (pendingFromPeer) {
|
|
654
704
|
const prev = pendingFromPeer.get(diff.range.idString);
|
|
@@ -661,6 +711,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
661
711
|
}
|
|
662
712
|
}
|
|
663
713
|
}
|
|
714
|
+
// else replaced, do nothing
|
|
664
715
|
}
|
|
665
716
|
if (reset) {
|
|
666
717
|
await this.updateOldestTimestampFromIndex();
|
|
@@ -850,14 +901,15 @@ let SharedLog = class SharedLog extends Program {
|
|
|
850
901
|
this._logProperties = options;
|
|
851
902
|
// TODO types
|
|
852
903
|
this.domain = options?.domain
|
|
853
|
-
? options.domain
|
|
854
|
-
: createReplicationDomainHash(options?.compatibility && options?.compatibility < 10 ? "u32" : "u64");
|
|
904
|
+
? options.domain(this)
|
|
905
|
+
: createReplicationDomainHash(options?.compatibility && options?.compatibility < 10 ? "u32" : "u64")(this);
|
|
855
906
|
this.indexableDomain = createIndexableDomainFromResolution(this.domain.resolution);
|
|
856
907
|
this._respondToIHaveTimeout = options?.respondToIHaveTimeout ?? 2e4;
|
|
857
908
|
this._pendingDeletes = new Map();
|
|
858
909
|
this._pendingIHave = new Map();
|
|
859
910
|
this.latestReplicationInfoMessage = new Map();
|
|
860
911
|
this.coordinateToHash = new Cache({ max: 1e6, ttl: 1e4 });
|
|
912
|
+
this.recentlyRebalanced = new Cache({ max: 1e4, ttl: 1e5 });
|
|
861
913
|
this.uniqueReplicators = new Set();
|
|
862
914
|
this.openTime = +new Date();
|
|
863
915
|
this.oldestOpenTime = this.openTime;
|
|
@@ -1151,12 +1203,13 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1151
1203
|
async getCover(args, options) {
|
|
1152
1204
|
let roleAge = options?.roleAge ?? (await this.getDefaultMinRoleAge());
|
|
1153
1205
|
let eager = options?.eager ?? false;
|
|
1154
|
-
const range = await this.domain.fromArgs(args
|
|
1206
|
+
const range = await this.domain.fromArgs(args);
|
|
1207
|
+
const width = range.length ??
|
|
1208
|
+
(await minimumWidthToCover(this.replicas.min.getValue(this), this.indexableDomain.numbers));
|
|
1155
1209
|
const set = await getCoverSet({
|
|
1156
1210
|
peers: this.replicationIndex,
|
|
1157
1211
|
start: range.offset,
|
|
1158
|
-
widthToCoverScaled:
|
|
1159
|
-
(await minimumWidthToCover(this.replicas.min.getValue(this), this.indexableDomain.numbers)),
|
|
1212
|
+
widthToCoverScaled: width,
|
|
1160
1213
|
roleAge,
|
|
1161
1214
|
eager,
|
|
1162
1215
|
numbers: this.indexableDomain.numbers,
|
|
@@ -1177,6 +1230,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1177
1230
|
this.pendingMaturity.clear();
|
|
1178
1231
|
this.distributeQueue?.clear();
|
|
1179
1232
|
this.coordinateToHash.clear();
|
|
1233
|
+
this.recentlyRebalanced.clear();
|
|
1180
1234
|
this.uniqueReplicators.clear();
|
|
1181
1235
|
this._closeController.abort();
|
|
1182
1236
|
clearInterval(this.interval);
|
|
@@ -1586,7 +1640,11 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1586
1640
|
if (this.closed) {
|
|
1587
1641
|
return;
|
|
1588
1642
|
}
|
|
1589
|
-
await this.addReplicationRange(replicationInfoMessage.segments.map((x) => x.toReplicationRangeIndexable(context.from)), context.from, {
|
|
1643
|
+
await this.addReplicationRange(replicationInfoMessage.segments.map((x) => x.toReplicationRangeIndexable(context.from)), context.from, {
|
|
1644
|
+
reset,
|
|
1645
|
+
checkDuplicates: true,
|
|
1646
|
+
timestamp: Number(context.timestamp),
|
|
1647
|
+
});
|
|
1590
1648
|
/* await this._modifyReplicators(msg.role, context.from!); */
|
|
1591
1649
|
})
|
|
1592
1650
|
.catch((e) => {
|
|
@@ -1607,7 +1665,16 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1607
1665
|
if (context.from.equals(this.node.identity.publicKey)) {
|
|
1608
1666
|
return;
|
|
1609
1667
|
}
|
|
1610
|
-
await this.
|
|
1668
|
+
const rangesToRemove = await this.resolveReplicationRangesFromIdsAndKey(msg.segmentIds, context.from);
|
|
1669
|
+
await this.removeReplicationRanges(rangesToRemove, context.from);
|
|
1670
|
+
const timestamp = BigInt(+new Date());
|
|
1671
|
+
for (const range of rangesToRemove) {
|
|
1672
|
+
this.replicationChangeDebounceFn.add({
|
|
1673
|
+
range,
|
|
1674
|
+
type: "removed",
|
|
1675
|
+
timestamp,
|
|
1676
|
+
});
|
|
1677
|
+
}
|
|
1611
1678
|
}
|
|
1612
1679
|
else {
|
|
1613
1680
|
throw new Error("Unexpected message");
|
|
@@ -1767,7 +1834,13 @@ let SharedLog = class SharedLog extends Program {
|
|
|
1767
1834
|
: undefined;
|
|
1768
1835
|
const persistCoordinate = async (entry) => {
|
|
1769
1836
|
const minReplicas = decodeReplicas(entry).getValue(this);
|
|
1770
|
-
await this.findLeaders(await this.createCoordinates(entry, minReplicas), entry, { persist: {} });
|
|
1837
|
+
const leaders = await this.findLeaders(await this.createCoordinates(entry, minReplicas), entry, { persist: {} });
|
|
1838
|
+
if (options?.replicate &&
|
|
1839
|
+
typeof options.replicate !== "boolean" &&
|
|
1840
|
+
options.replicate.assumeSynced) {
|
|
1841
|
+
// make sure we dont start to initate syncing process outwards for this entry
|
|
1842
|
+
this.addPeersToGidPeerHistory(entry.meta.gid, leaders.keys());
|
|
1843
|
+
}
|
|
1771
1844
|
};
|
|
1772
1845
|
let entriesToPersist = [];
|
|
1773
1846
|
let joinOptions = {
|
|
@@ -2376,8 +2449,9 @@ let SharedLog = class SharedLog extends Program {
|
|
|
2376
2449
|
if (options?.clearCache) {
|
|
2377
2450
|
this._gidPeersHistory.clear();
|
|
2378
2451
|
}
|
|
2452
|
+
const timestamp = BigInt(+new Date());
|
|
2379
2453
|
this.onReplicationChange((await this.getAllReplicationSegments()).map((x) => {
|
|
2380
|
-
return { range: x, type: "added" };
|
|
2454
|
+
return { range: x, type: "added", timestamp };
|
|
2381
2455
|
}));
|
|
2382
2456
|
}
|
|
2383
2457
|
async waitForPruned() {
|
|
@@ -2392,11 +2466,10 @@ let SharedLog = class SharedLog extends Program {
|
|
|
2392
2466
|
return;
|
|
2393
2467
|
}
|
|
2394
2468
|
await this.log.trim();
|
|
2395
|
-
const change = mergeReplicationChanges(changeOrChanges);
|
|
2396
2469
|
const changed = false;
|
|
2397
2470
|
try {
|
|
2398
2471
|
const uncheckedDeliver = new Map();
|
|
2399
|
-
for await (const entryReplicated of toRebalance(
|
|
2472
|
+
for await (const entryReplicated of toRebalance(changeOrChanges, this.entryCoordinatesIndex, this.recentlyRebalanced)) {
|
|
2400
2473
|
if (this.closed) {
|
|
2401
2474
|
break;
|
|
2402
2475
|
}
|
|
@@ -2505,7 +2578,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
2505
2578
|
// TODO can not reuse old range, since it will (potentially) affect the index because of sideeffects
|
|
2506
2579
|
dynamicRange = new this.indexableDomain.constructorRange({
|
|
2507
2580
|
offset: dynamicRange.start1,
|
|
2508
|
-
|
|
2581
|
+
width: this.indexableDomain.numbers.denormalize(newFactor),
|
|
2509
2582
|
publicKeyHash: dynamicRange.hash,
|
|
2510
2583
|
id: dynamicRange.id,
|
|
2511
2584
|
mode: dynamicRange.mode,
|
|
@@ -2560,7 +2633,7 @@ let SharedLog = class SharedLog extends Program {
|
|
|
2560
2633
|
if (!range) {
|
|
2561
2634
|
range = new this.indexableDomain.constructorRange({
|
|
2562
2635
|
offset: this.getDynamicRangeOffset(),
|
|
2563
|
-
|
|
2636
|
+
width: this.indexableDomain.numbers.zero,
|
|
2564
2637
|
publicKeyHash: this.node.identity.publicKey.hashcode(),
|
|
2565
2638
|
mode: ReplicationIntent.NonStrict,
|
|
2566
2639
|
timestamp: BigInt(+new Date()),
|