@peerbit/shared-log 9.1.2 → 9.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/dist/benchmark/get-samples.js +2 -3
  2. package/dist/benchmark/get-samples.js.map +1 -1
  3. package/dist/benchmark/index.js +4 -6
  4. package/dist/benchmark/index.js.map +1 -1
  5. package/dist/benchmark/memory/child.d.ts +2 -0
  6. package/dist/benchmark/memory/child.d.ts.map +1 -0
  7. package/dist/benchmark/memory/child.js +149 -0
  8. package/dist/benchmark/memory/child.js.map +1 -0
  9. package/dist/benchmark/memory/index.d.ts +2 -0
  10. package/dist/benchmark/memory/index.d.ts.map +1 -0
  11. package/dist/benchmark/memory/index.js +81 -0
  12. package/dist/benchmark/memory/index.js.map +1 -0
  13. package/dist/benchmark/memory/utils.d.ts +13 -0
  14. package/dist/benchmark/memory/utils.d.ts.map +1 -0
  15. package/dist/benchmark/memory/utils.js +2 -0
  16. package/dist/benchmark/memory/utils.js.map +1 -0
  17. package/dist/benchmark/replication-prune.js +27 -25
  18. package/dist/benchmark/replication-prune.js.map +1 -1
  19. package/dist/benchmark/replication.js +15 -16
  20. package/dist/benchmark/replication.js.map +1 -1
  21. package/dist/src/debounce.d.ts +25 -0
  22. package/dist/src/debounce.d.ts.map +1 -0
  23. package/dist/src/debounce.js +130 -0
  24. package/dist/src/debounce.js.map +1 -0
  25. package/dist/src/index.d.ts +55 -21
  26. package/dist/src/index.d.ts.map +1 -1
  27. package/dist/src/index.js +867 -390
  28. package/dist/src/index.js.map +1 -1
  29. package/dist/src/pid.d.ts.map +1 -1
  30. package/dist/src/pid.js +23 -21
  31. package/dist/src/pid.js.map +1 -1
  32. package/dist/src/ranges.d.ts +104 -8
  33. package/dist/src/ranges.d.ts.map +1 -1
  34. package/dist/src/ranges.js +518 -76
  35. package/dist/src/ranges.js.map +1 -1
  36. package/dist/src/replication-domain-hash.d.ts.map +1 -1
  37. package/dist/src/replication-domain-hash.js.map +1 -1
  38. package/dist/src/replication-domain-time.d.ts.map +1 -1
  39. package/dist/src/replication-domain-time.js.map +1 -1
  40. package/dist/src/replication-domain.d.ts +22 -2
  41. package/dist/src/replication-domain.d.ts.map +1 -1
  42. package/dist/src/replication-domain.js +33 -0
  43. package/dist/src/replication-domain.js.map +1 -1
  44. package/dist/src/replication.d.ts +1 -55
  45. package/dist/src/replication.d.ts.map +1 -1
  46. package/dist/src/replication.js +5 -215
  47. package/dist/src/replication.js.map +1 -1
  48. package/dist/src/role.d.ts +1 -0
  49. package/dist/src/role.d.ts.map +1 -1
  50. package/dist/src/role.js +1 -0
  51. package/dist/src/role.js.map +1 -1
  52. package/dist/src/utils.d.ts +6 -0
  53. package/dist/src/utils.d.ts.map +1 -0
  54. package/dist/src/utils.js +39 -0
  55. package/dist/src/utils.js.map +1 -0
  56. package/package.json +5 -5
  57. package/src/debounce.ts +172 -0
  58. package/src/index.ts +1282 -562
  59. package/src/pid.ts +27 -25
  60. package/src/ranges.ts +794 -181
  61. package/src/replication-domain-hash.ts +3 -1
  62. package/src/replication-domain-time.ts +2 -1
  63. package/src/replication-domain.ts +68 -5
  64. package/src/replication.ts +9 -235
  65. package/src/role.ts +1 -0
  66. package/src/utils.ts +49 -0
package/src/index.ts CHANGED
@@ -1,5 +1,4 @@
1
1
  import { BorshError, field, variant } from "@dao-xyz/borsh";
2
- import { CustomEvent } from "@libp2p/interface";
3
2
  import { AnyBlockStore, RemoteBlocks } from "@peerbit/blocks";
4
3
  import { Cache } from "@peerbit/cache";
5
4
  import {
@@ -11,14 +10,10 @@ import {
11
10
  import {
12
11
  And,
13
12
  ByteMatchQuery,
14
- CountRequest,
15
- DeleteRequest,
16
13
  type Index,
17
14
  Or,
18
- SearchRequest,
19
15
  Sort,
20
16
  StringMatch,
21
- SumRequest,
22
17
  } from "@peerbit/indexer-interface";
23
18
  import {
24
19
  type AppendOptions,
@@ -43,13 +38,22 @@ import {
43
38
  NotStartedError,
44
39
  SilentDelivery,
45
40
  } from "@peerbit/stream-interface";
46
- import { AbortError, delay, waitFor } from "@peerbit/time";
47
- import debounce from "p-debounce";
41
+ import {
42
+ AbortError,
43
+ /* delay, */
44
+ waitFor,
45
+ } from "@peerbit/time";
48
46
  import pDefer, { type DeferredPromise } from "p-defer";
49
47
  import PQueue from "p-queue";
50
48
  import { concat } from "uint8arrays";
51
49
  import { BlocksMessage } from "./blocks.js";
52
50
  import { type CPUUsage, CPUUsageIntervalLag } from "./cpu.js";
51
+ import {
52
+ type DebouncedAccumulatorMap,
53
+ debounceAcculmulator,
54
+ debounceFixedInterval,
55
+ debouncedAccumulatorMap,
56
+ } from "./debounce.js";
53
57
  import {
54
58
  EntryWithRefs,
55
59
  ExchangeHeadsMessage,
@@ -62,11 +66,18 @@ import {
62
66
  import { TransportMessage } from "./message.js";
63
67
  import { PIDReplicationController } from "./pid.js";
64
68
  import {
69
+ EntryReplicated,
70
+ ReplicationIntent,
71
+ ReplicationRange,
72
+ ReplicationRangeIndexable,
65
73
  getCoverSet,
74
+ getEvenlySpacedU32,
66
75
  getSamples,
67
76
  hasCoveringRange,
68
77
  isMatured,
69
78
  minimumWidthToCover,
79
+ shouldAssigneToRangeBoundary,
80
+ toRebalance,
70
81
  } from "./ranges.js";
71
82
  import {
72
83
  type ReplicationDomainHash,
@@ -79,7 +90,11 @@ import {
79
90
  } from "./replication-domain-time.js";
80
91
  import {
81
92
  type ExtractDomainArgs,
93
+ type ReplicationChange,
94
+ type ReplicationChanges,
82
95
  type ReplicationDomain,
96
+ debounceAggregationChanges,
97
+ mergeReplicationChanges,
83
98
  type u32,
84
99
  } from "./replication-domain.js";
85
100
  import {
@@ -87,10 +102,7 @@ import {
87
102
  AddedReplicationSegmentMessage,
88
103
  AllReplicatingSegmentsMessage,
89
104
  ReplicationError,
90
- ReplicationIntent,
91
105
  type ReplicationLimits,
92
- ReplicationRange,
93
- ReplicationRangeIndexable,
94
106
  RequestReplicationInfoMessage,
95
107
  ResponseRoleMessage,
96
108
  StoppedReplicating,
@@ -99,6 +111,7 @@ import {
99
111
  maxReplicas,
100
112
  } from "./replication.js";
101
113
  import { MAX_U32, Observer, Replicator, scaleToU32 } from "./role.js";
114
+ import { groupByGid } from "./utils.js";
102
115
 
103
116
  export {
104
117
  type ReplicationDomain,
@@ -109,7 +122,7 @@ export {
109
122
  };
110
123
  export { type CPUUsage, CPUUsageIntervalLag };
111
124
  export * from "./replication.js";
112
-
125
+ export { EntryReplicated };
113
126
  export const logger = loggerFn({ module: "shared-log" });
114
127
 
115
128
  const getLatestEntry = (
@@ -128,40 +141,20 @@ const getLatestEntry = (
128
141
  return latest;
129
142
  };
130
143
 
131
- const groupByGid = async <
132
- T extends ShallowEntry | Entry<any> | EntryWithRefs<any>,
133
- >(
134
- entries: T[],
135
- ): Promise<Map<string, T[]>> => {
136
- const groupByGid: Map<string, T[]> = new Map();
137
- for (const head of entries) {
138
- const gid = await (head instanceof Entry
139
- ? (await head.getMeta()).gid
140
- : head instanceof ShallowEntry
141
- ? head.meta.gid
142
- : (await head.entry.getMeta()).gid);
143
- let value = groupByGid.get(gid);
144
- if (!value) {
145
- value = [];
146
- groupByGid.set(gid, value);
147
- }
148
- value.push(head);
149
- }
150
- return groupByGid;
151
- };
152
-
153
144
  export type ReplicationLimitsOptions =
154
145
  | Partial<ReplicationLimits>
155
146
  | { min?: number; max?: number };
156
147
 
157
148
  export type DynamicReplicationOptions = {
158
149
  limits?: {
150
+ interval?: number;
159
151
  storage?: number;
160
152
  cpu?: number | { max: number; monitor?: CPUUsage };
161
153
  };
162
154
  };
163
155
 
164
156
  export type FixedReplicationOptions = {
157
+ id?: Uint8Array;
165
158
  normalized?: boolean;
166
159
  factor: number | "all" | "right";
167
160
  strict?: boolean; // if true, only this range will be replicated
@@ -199,12 +192,32 @@ const isUnreplicationOptions = (options?: ReplicationOptions): boolean =>
199
192
  ((options as FixedReplicationOptions)?.offset === undefined &&
200
193
  (options as FixedReplicationOptions)?.factor === 0);
201
194
 
195
+ const isReplicationOptionsDependentOnPreviousState = (
196
+ options?: ReplicationOptions,
197
+ ): boolean => {
198
+ if (options === true) {
199
+ return true;
200
+ }
201
+
202
+ if (options == null) {
203
+ // when not providing options, we assume previous behaviour
204
+ return true;
205
+ }
206
+
207
+ // if empty object but with no keys
208
+ if (typeof options === "object" && Object.keys(options).length === 0) {
209
+ return true;
210
+ }
211
+
212
+ return false;
213
+ };
214
+
202
215
  export type SharedLogOptions<T, D extends ReplicationDomain<any, T>> = {
203
216
  replicate?: ReplicationOptions;
204
217
  replicas?: ReplicationLimitsOptions;
205
218
  respondToIHaveTimeout?: number;
206
219
  canReplicate?: (publicKey: PublicSignKey) => Promise<boolean> | boolean;
207
- sync?: (entry: Entry<T> | ShallowEntry) => boolean;
220
+ sync?: (entry: ShallowOrFullEntry<T> | EntryReplicated) => boolean;
208
221
  timeUntilRoleMaturity?: number;
209
222
  waitForReplicatorTimeout?: number;
210
223
  distributionDebounceTime?: number;
@@ -215,9 +228,27 @@ export type SharedLogOptions<T, D extends ReplicationDomain<any, T>> = {
215
228
  export const DEFAULT_MIN_REPLICAS = 2;
216
229
  export const WAIT_FOR_REPLICATOR_TIMEOUT = 9000;
217
230
  export const WAIT_FOR_ROLE_MATURITY = 5000;
218
- const REBALANCE_DEBOUNCE_INTERVAL = 100;
231
+ const PRUNE_DEBOUNCE_INTERVAL = 500;
232
+
233
+ // DONT SET THIS ANY LOWER, because it will make the pid controller unstable as the system responses are not fast enough to updates from the pid controller
234
+ const RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL = 1000;
235
+
219
236
  const DEFAULT_DISTRIBUTION_DEBOUNCE_TIME = 500;
220
237
 
238
+ const getIdForDynamicRange = (publicKey: PublicSignKey) => {
239
+ return sha256Sync(
240
+ concat([publicKey.bytes, new TextEncoder().encode("dynamic")]),
241
+ );
242
+ };
243
+
244
+ const checkMinReplicasLimit = (minReplicas: number) => {
245
+ if (minReplicas > 100) {
246
+ throw new Error(
247
+ "Higher replication degree than 100 is not recommended for performance reasons",
248
+ );
249
+ }
250
+ };
251
+
221
252
  export type Args<
222
253
  T,
223
254
  D extends ReplicationDomain<any, T> = ReplicationDomainHash,
@@ -226,17 +257,19 @@ export type Args<
226
257
  export type SharedAppendOptions<T> = AppendOptions<T> & {
227
258
  replicas?: AbsoluteReplicas | number;
228
259
  replicate?: boolean;
229
- target?: "all" | "replicators";
260
+ target?: "all" | "replicators" | "none";
230
261
  };
231
262
 
232
263
  type ReplicatorJoinEvent = { publicKey: PublicSignKey };
233
264
  type ReplicatorLeaveEvent = { publicKey: PublicSignKey };
234
- type ReplicationChange = { publicKey: PublicSignKey };
265
+ type ReplicationChangeEvent = { publicKey: PublicSignKey };
266
+ type ReplicatorMatureEvent = { publicKey: PublicSignKey };
235
267
 
236
268
  export interface SharedLogEvents extends ProgramEvents {
237
269
  "replicator:join": CustomEvent<ReplicatorJoinEvent>;
238
270
  "replicator:leave": CustomEvent<ReplicatorLeaveEvent>;
239
- "replication:change": CustomEvent<ReplicationChange>;
271
+ "replication:change": CustomEvent<ReplicationChangeEvent>;
272
+ "replicator:mature": CustomEvent<ReplicatorMatureEvent>;
240
273
  }
241
274
 
242
275
  @variant("shared_log")
@@ -251,10 +284,12 @@ export class SharedLog<
251
284
  rpc: RPC<TransportMessage, TransportMessage>;
252
285
 
253
286
  // options
254
- private _isReplicating: boolean;
255
- private _isAdaptiveReplicating: boolean;
287
+ private _isReplicating!: boolean;
288
+ private _isAdaptiveReplicating!: boolean;
256
289
 
257
290
  private _replicationRangeIndex!: Index<ReplicationRangeIndexable>;
291
+ private _entryCoordinatesIndex!: Index<EntryReplicated>;
292
+
258
293
  /* private _totalParticipation!: number; */
259
294
  private _gidPeersHistory!: Map<string, Set<string>>;
260
295
 
@@ -283,9 +318,23 @@ export class SharedLog<
283
318
 
284
319
  private _pendingIHave!: Map<
285
320
  string,
286
- { clear: () => void; callback: (entry: Entry<T>) => void }
321
+ {
322
+ resetTimeout: () => void;
323
+ requesting: Set<string>;
324
+ clear: () => void;
325
+ callback: (entry: Entry<T>) => void;
326
+ }
287
327
  >;
288
328
 
329
+ private pendingMaturity!: Map<
330
+ string,
331
+ {
332
+ timestamp: bigint;
333
+ ranges: Map<string, ReplicationChange>;
334
+ timeout: ReturnType<typeof setTimeout>;
335
+ }
336
+ >; // map of peerId to timeout
337
+
289
338
  private latestReplicationInfoMessage!: Map<string, bigint>;
290
339
 
291
340
  private remoteBlocks!: RemoteBlocks;
@@ -293,15 +342,32 @@ export class SharedLog<
293
342
  private openTime!: number;
294
343
  private oldestOpenTime!: number;
295
344
 
296
- private sync?: (entry: Entry<T> | ShallowEntry) => boolean;
345
+ private sync?: (entry: ShallowOrFullEntry<T> | EntryReplicated) => boolean;
297
346
 
298
347
  // A fn that we can call many times that recalculates the participation role
299
348
  private rebalanceParticipationDebounced:
300
- | ReturnType<typeof debounce>
349
+ | ReturnType<typeof debounceFixedInterval>
301
350
  | undefined;
302
351
 
352
+ // A fn for debouncing the calls for pruning
353
+ pruneDebouncedFn!: DebouncedAccumulatorMap<
354
+ Entry<T> | ShallowEntry | EntryReplicated
355
+ >;
356
+ private responseToPruneDebouncedFn!: ReturnType<
357
+ typeof debounceAcculmulator<
358
+ string,
359
+ {
360
+ hashes: string[];
361
+ peers: string[] | Set<string>;
362
+ },
363
+ Map<string, Set<string>>
364
+ >
365
+ >;
366
+ private replicationChangeDebounceFn!: ReturnType<
367
+ typeof debounceAggregationChanges
368
+ >;
369
+
303
370
  // regular distribution checks
304
- private distributeInterval!: ReturnType<typeof setInterval>;
305
371
  private distributeQueue?: PQueue;
306
372
 
307
373
  // Syncing and dedeplucation work
@@ -324,9 +390,8 @@ export class SharedLog<
324
390
 
325
391
  replicationController!: PIDReplicationController;
326
392
  history!: { usedMemory: number; factor: number }[];
327
- domain: D;
328
-
329
- private pq: PQueue<any>;
393
+ domain!: D;
394
+ interval: any;
330
395
 
331
396
  constructor(properties?: { id?: Uint8Array }) {
332
397
  super();
@@ -367,6 +432,7 @@ export class SharedLog<
367
432
  if (!this._isReplicating) {
368
433
  return false;
369
434
  }
435
+
370
436
  /*
371
437
  if (isAdaptiveReplicatorOption(this._replicationSettings)) {
372
438
  return true;
@@ -384,27 +450,35 @@ export class SharedLog<
384
450
  } */
385
451
 
386
452
  async calculateTotalParticipation() {
387
- const sum = await this.replicationIndex.sum(
388
- new SumRequest({ key: "width" }),
389
- );
453
+ const sum = await this.replicationIndex.sum({ key: "width" });
390
454
  return Number(sum) / MAX_U32;
391
455
  }
392
456
 
393
457
  async countReplicationSegments() {
394
- const count = await this.replicationIndex.count(
395
- new CountRequest({
396
- query: new StringMatch({
397
- key: "hash",
398
- value: this.node.identity.publicKey.hashcode(),
399
- }),
458
+ const count = await this.replicationIndex.count({
459
+ query: new StringMatch({
460
+ key: "hash",
461
+ value: this.node.identity.publicKey.hashcode(),
400
462
  }),
401
- );
463
+ });
402
464
  return count;
403
465
  }
404
466
 
405
- private setupRebalanceDebounceFunction() {
467
+ private setupRebalanceDebounceFunction(
468
+ interval = RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL,
469
+ ) {
406
470
  this.rebalanceParticipationDebounced = undefined;
407
- this.rebalanceParticipationDebounced = debounce(
471
+
472
+ // make the rebalancing to respect warmup time
473
+ let intervalTime = interval * 2;
474
+ let timeout = setTimeout(() => {
475
+ intervalTime = interval;
476
+ }, this.timeUntilRoleMaturity);
477
+ this._closeController.signal.addEventListener("abort", () => {
478
+ clearTimeout(timeout);
479
+ });
480
+
481
+ this.rebalanceParticipationDebounced = debounceFixedInterval(
408
482
  () => this.rebalanceParticipation(),
409
483
  /* Math.max(
410
484
  REBALANCE_DEBOUNCE_INTERVAL,
@@ -413,9 +487,10 @@ export class SharedLog<
413
487
  REBALANCE_DEBOUNCE_INTERVAL
414
488
  )
415
489
  ) */
416
- REBALANCE_DEBOUNCE_INTERVAL, // TODO make this dynamic on the number of replicators
490
+ () => intervalTime, // TODO make this dynamic on the number of replicators
417
491
  );
418
492
  }
493
+
419
494
  private async _replicate(
420
495
  options?: ReplicationOptions,
421
496
  {
@@ -493,6 +568,7 @@ export class SharedLog<
493
568
  let width = normalized ? 1 : scaleToU32(1);
494
569
  ranges.push(
495
570
  new ReplicationRangeIndexable({
571
+ id: rangeArg.id,
496
572
  normalized,
497
573
  offset: offset,
498
574
  length:
@@ -530,11 +606,14 @@ export class SharedLog<
530
606
  checkDuplicates,
531
607
  announce,
532
608
  });
609
+
610
+ return ranges;
533
611
  }
534
612
  }
535
613
 
536
614
  setupDebouncedRebalancing(options?: DynamicReplicationOptions) {
537
615
  this.cpuUsage?.stop?.();
616
+
538
617
  this.replicationController = new PIDReplicationController(
539
618
  this.node.identity.publicKey.hashcode(),
540
619
  {
@@ -559,15 +638,11 @@ export class SharedLog<
559
638
  ? options?.limits?.cpu?.monitor || new CPUUsageIntervalLag()
560
639
  : new CPUUsageIntervalLag();
561
640
  this.cpuUsage?.start?.();
562
- this.setupRebalanceDebounceFunction();
641
+ this.setupRebalanceDebounceFunction(options?.limits?.interval);
563
642
  }
564
643
 
565
644
  async replicate(
566
- rangeOrEntry?:
567
- | ReplicationRange
568
- | ReplicationOptions
569
- | Entry<T>
570
- | Entry<T>[],
645
+ rangeOrEntry?: ReplicationOptions | Entry<T> | Entry<T>[],
571
646
  options?: {
572
647
  reset?: boolean;
573
648
  checkDuplicates?: boolean;
@@ -604,12 +679,7 @@ export class SharedLog<
604
679
  range = rangeOrEntry ?? true;
605
680
  }
606
681
 
607
- const newRanges = await this._replicate(range, options);
608
-
609
- // assume new role
610
- await this.distribute();
611
-
612
- return newRanges;
682
+ return this._replicate(range, options);
613
683
  }
614
684
 
615
685
  async unreplicate(rangeOrEntry?: Entry<T> | ReplicationRange) {
@@ -634,16 +704,14 @@ export class SharedLog<
634
704
  throw new Error("Unsupported when adaptive replicating");
635
705
  }
636
706
 
637
- const indexed = await this.replicationIndex.query(
638
- new SearchRequest({
639
- query: {
640
- width: 1,
641
- start1: range.offset,
642
- },
643
- }),
644
- );
707
+ const indexed = this.replicationIndex.iterate({
708
+ query: {
709
+ width: 1,
710
+ start1: range.offset,
711
+ },
712
+ });
645
713
 
646
- const segmentIds = indexed.results.map((x) => x.id.key as Uint8Array);
714
+ const segmentIds = (await indexed.all()).map((x) => x.id.key as Uint8Array);
647
715
  await this.removeReplicationRange(segmentIds, this.node.identity.publicKey);
648
716
  await this.rpc.send(new StoppedReplicating({ segmentIds }), {
649
717
  priority: 1,
@@ -652,14 +720,19 @@ export class SharedLog<
652
720
 
653
721
  private async removeReplicator(key: PublicSignKey) {
654
722
  const fn = async () => {
655
- await this.replicationIndex.del(
656
- new DeleteRequest({ query: { hash: key.hashcode() } }),
657
- );
723
+ const deleted = await this.replicationIndex
724
+ .iterate({
725
+ query: { hash: key.hashcode() },
726
+ })
727
+ .all();
728
+
729
+ await this.replicationIndex.del({ query: { hash: key.hashcode() } });
658
730
 
659
731
  await this.updateOldestTimestampFromIndex();
660
732
 
661
733
  if (this.node.identity.publicKey.equals(key)) {
662
734
  // announce that we are no longer replicating
735
+
663
736
  await this.rpc.send(
664
737
  new AllReplicatingSegmentsMessage({ segments: [] }),
665
738
  { priority: 1 },
@@ -667,39 +740,52 @@ export class SharedLog<
667
740
  }
668
741
 
669
742
  this.events.dispatchEvent(
670
- new CustomEvent<ReplicationChange>("replication:change", {
743
+ new CustomEvent<ReplicationChangeEvent>("replication:change", {
671
744
  detail: { publicKey: key },
672
745
  }),
673
746
  );
674
747
 
748
+ deleted.forEach((x) => {
749
+ return this.replicationChangeDebounceFn.add({
750
+ range: x.value,
751
+ type: "removed",
752
+ });
753
+ });
754
+
755
+ const pendingMaturity = this.pendingMaturity.get(key.hashcode());
756
+ if (pendingMaturity) {
757
+ clearTimeout(pendingMaturity.timeout);
758
+ this.pendingMaturity.delete(key.hashcode());
759
+ }
760
+
675
761
  if (!key.equals(this.node.identity.publicKey)) {
676
762
  this.rebalanceParticipationDebounced?.();
677
763
  }
678
764
  };
679
765
 
680
- return this.pq.add(fn);
766
+ return fn();
681
767
  }
682
768
 
683
769
  private async updateOldestTimestampFromIndex() {
684
- const oldestTimestampFromDB = (
685
- await this.replicationIndex.query(
686
- new SearchRequest({
687
- fetch: 1,
688
- sort: [new Sort({ key: "timestamp", direction: "asc" })],
689
- }),
690
- { reference: true },
691
- )
692
- ).results[0]?.value.timestamp;
770
+ const iterator = await this.replicationIndex.iterate(
771
+ {
772
+ sort: [new Sort({ key: "timestamp", direction: "asc" })],
773
+ },
774
+ { reference: true },
775
+ );
776
+ const oldestTimestampFromDB = (await iterator.next(1))[0]?.value.timestamp;
777
+ await iterator.close();
778
+
693
779
  this.oldestOpenTime =
694
780
  oldestTimestampFromDB != null
695
781
  ? Number(oldestTimestampFromDB)
696
782
  : +new Date();
697
783
  }
698
784
 
699
- private async removeReplicationRange(id: Uint8Array[], from: PublicSignKey) {
785
+ private async removeReplicationRange(ids: Uint8Array[], from: PublicSignKey) {
700
786
  const fn = async () => {
701
787
  let idMatcher = new Or(
702
- id.map((x) => new ByteMatchQuery({ key: "id", value: x })),
788
+ ids.map((x) => new ByteMatchQuery({ key: "id", value: x })),
703
789
  );
704
790
 
705
791
  // make sure we are not removing something that is owned by the replicator
@@ -710,12 +796,23 @@ export class SharedLog<
710
796
 
711
797
  let query = new And([idMatcher, identityMatcher]);
712
798
 
713
- await this.replicationIndex.del(new DeleteRequest({ query }));
799
+ const pendingMaturity = this.pendingMaturity.get(from.hashcode());
800
+ if (pendingMaturity) {
801
+ for (const id of ids) {
802
+ pendingMaturity.ranges.delete(id.toString());
803
+ }
804
+ if (pendingMaturity.ranges.size === 0) {
805
+ clearTimeout(pendingMaturity.timeout);
806
+ this.pendingMaturity.delete(from.hashcode());
807
+ }
808
+ }
809
+
810
+ await this.replicationIndex.del({ query });
714
811
 
715
812
  await this.updateOldestTimestampFromIndex();
716
813
 
717
814
  this.events.dispatchEvent(
718
- new CustomEvent<ReplicationChange>("replication:change", {
815
+ new CustomEvent<ReplicationChangeEvent>("replication:change", {
719
816
  detail: { publicKey: from },
720
817
  }),
721
818
  );
@@ -725,7 +822,7 @@ export class SharedLog<
725
822
  }
726
823
  };
727
824
 
728
- return this.pq.add(fn);
825
+ return fn();
729
826
  }
730
827
 
731
828
  private async addReplicationRange(
@@ -741,39 +838,164 @@ export class SharedLog<
741
838
  this._isTrustedReplicator &&
742
839
  !(await this._isTrustedReplicator(from))
743
840
  ) {
744
- return false;
841
+ return undefined;
745
842
  }
746
843
 
747
- let prevCount = await this.replicationIndex.count(
748
- new CountRequest({
749
- query: new StringMatch({ key: "hash", value: from.hashcode() }),
750
- }),
751
- );
752
- const isNewReplicator = prevCount === 0;
844
+ let isNewReplicator = false;
753
845
 
846
+ let diffs: ReplicationChanges;
847
+ let deleted: ReplicationRangeIndexable[] | undefined = undefined;
754
848
  if (reset) {
755
- await this.replicationIndex.del(
756
- new DeleteRequest({ query: { hash: from.hashcode() } }),
757
- );
758
- } else if (checkDuplicates) {
759
- let deduplicated: any[] = [];
849
+ deleted = (
850
+ await this.replicationIndex
851
+ .iterate({
852
+ query: { hash: from.hashcode() },
853
+ })
854
+ .all()
855
+ ).map((x) => x.value);
856
+ await this.replicationIndex.del({ query: { hash: from.hashcode() } });
857
+
858
+ diffs = [
859
+ ...deleted.map((x) => {
860
+ return { range: x, type: "removed" as const };
861
+ }),
862
+ ...ranges.map((x) => {
863
+ return { range: x, type: "added" as const };
864
+ }),
865
+ ];
866
+
867
+ let prevCount = await this.replicationIndex.count({
868
+ query: new StringMatch({ key: "hash", value: from.hashcode() }),
869
+ });
870
+ isNewReplicator = prevCount === 0;
871
+ } else {
872
+ let existing = await this.replicationIndex
873
+ .iterate(
874
+ {
875
+ query: ranges.map(
876
+ (x) => new ByteMatchQuery({ key: "id", value: x.id }),
877
+ ),
878
+ },
879
+ { reference: true },
880
+ )
881
+ .all();
882
+ if (existing.length === 0) {
883
+ let prevCount = await this.replicationIndex.count({
884
+ query: new StringMatch({ key: "hash", value: from.hashcode() }),
885
+ });
886
+ isNewReplicator = prevCount === 0;
887
+ } else {
888
+ isNewReplicator = false;
889
+ }
760
890
 
761
- // TODO also deduplicate/de-overlap among the ranges that ought to be inserted?
762
- for (const range of ranges) {
763
- if (!(await hasCoveringRange(this.replicationIndex, range))) {
764
- deduplicated.push(range);
891
+ if (checkDuplicates) {
892
+ let deduplicated: ReplicationRangeIndexable[] = [];
893
+
894
+ // TODO also deduplicate/de-overlap among the ranges that ought to be inserted?
895
+ for (const range of ranges) {
896
+ if (!(await hasCoveringRange(this.replicationIndex, range))) {
897
+ deduplicated.push(range);
898
+ }
765
899
  }
900
+ ranges = deduplicated;
766
901
  }
767
- ranges = deduplicated;
902
+ let existingMap = new Map<string, ReplicationRangeIndexable>();
903
+ for (const result of existing) {
904
+ existingMap.set(result.value.idString, result.value);
905
+ }
906
+
907
+ let changes: ReplicationChanges = ranges
908
+ .map((x) => {
909
+ const prev = existingMap.get(x.idString);
910
+ if (prev) {
911
+ if (prev.equalRange(x)) {
912
+ return undefined;
913
+ }
914
+ return { range: x, prev, type: "updated" };
915
+ } else {
916
+ return { range: x, type: "added" };
917
+ }
918
+ })
919
+ .filter((x) => x != null) as ReplicationChanges;
920
+ diffs = changes;
768
921
  }
769
922
 
770
- for (const range of ranges) {
771
- await this.replicationIndex.put(range);
772
- if (!reset) {
773
- this.oldestOpenTime = Math.min(
774
- Number(range.timestamp),
775
- this.oldestOpenTime,
776
- );
923
+ let now = +new Date();
924
+ let minRoleAge = await this.getDefaultMinRoleAge();
925
+ let isAllMature = true;
926
+
927
+ for (const diff of diffs) {
928
+ if (diff.type === "added" || diff.type === "updated") {
929
+ await this.replicationIndex.put(diff.range);
930
+ if (!reset) {
931
+ this.oldestOpenTime = Math.min(
932
+ Number(diff.range.timestamp),
933
+ this.oldestOpenTime,
934
+ );
935
+ }
936
+
937
+ const isMature = isMatured(diff.range, now, minRoleAge);
938
+
939
+ if (
940
+ !isMature /* && diff.range.hash !== this.node.identity.publicKey.hashcode() */
941
+ ) {
942
+ // second condition is to avoid the case where we are adding a range that we own
943
+ isAllMature = false;
944
+ let prevPendingMaturity = this.pendingMaturity.get(diff.range.hash);
945
+ let map: Map<string, ReplicationChange>;
946
+ let waitForMaturityTime = Math.max(
947
+ minRoleAge - (now - Number(diff.range.timestamp)),
948
+ 0,
949
+ );
950
+
951
+ if (prevPendingMaturity) {
952
+ map = prevPendingMaturity.ranges;
953
+ if (prevPendingMaturity.timestamp < diff.range.timestamp) {
954
+ // something has changed so we need to reset the timeout
955
+ clearTimeout(prevPendingMaturity.timeout);
956
+ prevPendingMaturity.timestamp = diff.range.timestamp;
957
+ prevPendingMaturity.timeout = setTimeout(() => {
958
+ this.events.dispatchEvent(
959
+ new CustomEvent<ReplicationChangeEvent>(
960
+ "replicator:mature",
961
+ {
962
+ detail: { publicKey: from },
963
+ },
964
+ ),
965
+ );
966
+ for (const value of map.values()) {
967
+ this.replicationChangeDebounceFn.add(value); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable!
968
+ }
969
+ }, waitForMaturityTime);
970
+ }
971
+ } else {
972
+ map = new Map();
973
+ this.pendingMaturity.set(diff.range.hash, {
974
+ timestamp: diff.range.timestamp,
975
+ ranges: map,
976
+ timeout: setTimeout(() => {
977
+ this.events.dispatchEvent(
978
+ new CustomEvent<ReplicationChangeEvent>(
979
+ "replicator:mature",
980
+ {
981
+ detail: { publicKey: from },
982
+ },
983
+ ),
984
+ );
985
+ for (const value of map.values()) {
986
+ this.replicationChangeDebounceFn.add(value); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable!
987
+ }
988
+ }, waitForMaturityTime),
989
+ });
990
+ }
991
+
992
+ map.set(diff.range.idString, diff);
993
+ }
994
+ } else {
995
+ const prev = this.pendingMaturity.get(diff.range.hash);
996
+ if (prev) {
997
+ prev.ranges.delete(diff.range.idString);
998
+ }
777
999
  }
778
1000
  }
779
1001
 
@@ -782,7 +1004,7 @@ export class SharedLog<
782
1004
  }
783
1005
 
784
1006
  this.events.dispatchEvent(
785
- new CustomEvent<ReplicationChange>("replication:change", {
1007
+ new CustomEvent<ReplicationChangeEvent>("replication:change", {
786
1008
  detail: { publicKey: from },
787
1009
  }),
788
1010
  );
@@ -793,17 +1015,33 @@ export class SharedLog<
793
1015
  detail: { publicKey: from },
794
1016
  }),
795
1017
  );
1018
+
1019
+ if (isAllMature) {
1020
+ this.events.dispatchEvent(
1021
+ new CustomEvent<ReplicatorMatureEvent>("replicator:mature", {
1022
+ detail: { publicKey: from },
1023
+ }),
1024
+ );
1025
+ }
1026
+ }
1027
+
1028
+ if (ranges.length === 0 && deleted?.length === 0) {
1029
+ throw new Error("Unexpected");
796
1030
  }
797
1031
 
1032
+ diffs.length > 0 &&
1033
+ diffs.map((x) => this.replicationChangeDebounceFn.add(x));
1034
+
798
1035
  if (!from.equals(this.node.identity.publicKey)) {
799
1036
  this.rebalanceParticipationDebounced?.();
800
1037
  }
801
- return true;
1038
+
1039
+ return diffs;
802
1040
  };
803
1041
 
804
1042
  // we sequialize this because we are going to queries to check wether to add or not
805
1043
  // if two processes do the same this both process might add a range while only one in practice should
806
- return this.pq.add(fn);
1044
+ return fn();
807
1045
  }
808
1046
 
809
1047
  async startAnnounceReplicating(
@@ -816,17 +1054,19 @@ export class SharedLog<
816
1054
  ) => void;
817
1055
  } = {},
818
1056
  ) {
819
- const added = await this.addReplicationRange(
1057
+ const change = await this.addReplicationRange(
820
1058
  range,
821
1059
  this.node.identity.publicKey,
822
1060
  options,
823
1061
  );
824
- if (!added) {
1062
+
1063
+ if (!change) {
825
1064
  logger.warn("Not allowed to replicate by canReplicate");
826
1065
  }
827
1066
 
828
1067
  let message: AllReplicatingSegmentsMessage | AddedReplicationSegmentMessage;
829
- if (added) {
1068
+
1069
+ if (change) {
830
1070
  if (options.reset) {
831
1071
  message = new AllReplicatingSegmentsMessage({
832
1072
  segments: range.map((x) => x.toReplicationRange()),
@@ -855,13 +1095,15 @@ export class SharedLog<
855
1095
  removed: ShallowOrFullEntry<T>[];
856
1096
  }> {
857
1097
  const appendOptions: AppendOptions<T> = { ...options };
858
- const minReplicasData = encodeReplicas(
859
- options?.replicas
860
- ? typeof options.replicas === "number"
861
- ? new AbsoluteReplicas(options.replicas)
862
- : options.replicas
863
- : this.replicas.min,
864
- );
1098
+ const minReplicas = options?.replicas
1099
+ ? typeof options.replicas === "number"
1100
+ ? new AbsoluteReplicas(options.replicas)
1101
+ : options.replicas
1102
+ : this.replicas.min;
1103
+ const minReplicasValue = minReplicas.getValue(this);
1104
+ const minReplicasData = encodeReplicas(minReplicas);
1105
+
1106
+ checkMinReplicasLimit(minReplicasValue);
865
1107
 
866
1108
  if (!appendOptions.meta) {
867
1109
  appendOptions.meta = {
@@ -893,53 +1135,73 @@ export class SharedLog<
893
1135
  await this.replicate(result.entry, { checkDuplicates: true });
894
1136
  }
895
1137
 
896
- for (const message of await createExchangeHeadsMessages(
897
- this.log,
898
- [result.entry],
899
- this._gidParentCache,
900
- )) {
901
- if (options?.target === "replicators" || !options?.target) {
902
- const minReplicas = decodeReplicas(result.entry).getValue(this);
903
-
904
- let leaders: string[] | Set<string> = await this.findLeaders(
905
- result.entry,
906
- minReplicas,
907
- );
908
-
909
- const isLeader = leaders.includes(
910
- this.node.identity.publicKey.hashcode(),
911
- );
1138
+ let { leaders, isLeader } = await this.findLeadersPersist(
1139
+ {
1140
+ entry: result.entry,
1141
+ minReplicas: minReplicas.getValue(this),
1142
+ },
1143
+ result.entry,
1144
+ { persist: {} },
1145
+ );
912
1146
 
913
- if (message.heads[0].gidRefrences.length > 0) {
914
- const newAndOldLeaders = new Set(leaders);
915
- for (const ref of message.heads[0].gidRefrences) {
916
- const entryFromGid = this.log.entryIndex.getHeads(ref, false);
917
- for (const entry of await entryFromGid.all()) {
918
- for (const hash of await this.findLeaders(entry, minReplicas)) {
919
- newAndOldLeaders.add(hash);
1147
+ // --------------
1148
+
1149
+ if (options?.target !== "none") {
1150
+ for (const message of await createExchangeHeadsMessages(
1151
+ this.log,
1152
+ [result.entry],
1153
+ this._gidParentCache,
1154
+ )) {
1155
+ if (options?.target === "replicators" || !options?.target) {
1156
+ if (message.heads[0].gidRefrences.length > 0) {
1157
+ const newAndOldLeaders = new Map(leaders);
1158
+ for (const ref of message.heads[0].gidRefrences) {
1159
+ const entryFromGid = this.log.entryIndex.getHeads(ref, false);
1160
+ for (const entry of await entryFromGid.all()) {
1161
+ let coordinate = await this.getCoordinates(entry);
1162
+ if (coordinate == null) {
1163
+ coordinate = await this.createCoordinates(
1164
+ entry,
1165
+ minReplicasValue,
1166
+ );
1167
+ // TODO are we every to come here?
1168
+ }
1169
+ for (const [hash, features] of await this.findLeaders(
1170
+ coordinate,
1171
+ )) {
1172
+ newAndOldLeaders.set(hash, features);
1173
+ }
920
1174
  }
921
1175
  }
1176
+ leaders = newAndOldLeaders;
922
1177
  }
923
- leaders = newAndOldLeaders;
924
- }
925
1178
 
926
- let set = this._gidPeersHistory.get(result.entry.meta.gid);
927
- if (!set) {
928
- set = new Set(leaders);
929
- this._gidPeersHistory.set(result.entry.meta.gid, set);
930
- } else {
931
- for (const receiver of leaders) {
932
- set.add(receiver);
1179
+ let set = this._gidPeersHistory.get(result.entry.meta.gid);
1180
+ if (!set) {
1181
+ set = new Set(leaders.keys());
1182
+ this._gidPeersHistory.set(result.entry.meta.gid, set);
1183
+ } else {
1184
+ for (const [receiver, _features] of leaders) {
1185
+ set.add(receiver);
1186
+ }
933
1187
  }
1188
+
1189
+ mode = isLeader
1190
+ ? new SilentDelivery({ redundancy: 1, to: leaders.keys() })
1191
+ : new AcknowledgeDelivery({ redundancy: 1, to: leaders.keys() });
934
1192
  }
935
- mode = isLeader
936
- ? new SilentDelivery({ redundancy: 1, to: leaders })
937
- : new AcknowledgeDelivery({ redundancy: 1, to: leaders });
1193
+
1194
+ // TODO add options for waiting ?
1195
+ this.rpc.send(message, {
1196
+ mode,
1197
+ });
938
1198
  }
1199
+ }
939
1200
 
940
- // TODO add options for waiting ?
941
- this.rpc.send(message, {
942
- mode,
1201
+ if (!isLeader) {
1202
+ this.pruneDebouncedFn.add({
1203
+ key: result.entry.hash,
1204
+ value: result.entry,
943
1205
  });
944
1206
  }
945
1207
  this.rebalanceParticipationDebounced?.();
@@ -961,7 +1223,7 @@ export class SharedLog<
961
1223
  : undefined,
962
1224
  };
963
1225
  this.domain = options?.domain ?? (createReplicationDomainHash() as D);
964
- this._respondToIHaveTimeout = options?.respondToIHaveTimeout ?? 10 * 1000; // TODO make into arg
1226
+ this._respondToIHaveTimeout = options?.respondToIHaveTimeout ?? 2e4;
965
1227
  this._pendingDeletes = new Map();
966
1228
  this._pendingIHave = new Map();
967
1229
  this.latestReplicationInfoMessage = new Map();
@@ -981,7 +1243,7 @@ export class SharedLog<
981
1243
  this._isTrustedReplicator = options?.canReplicate;
982
1244
  this.sync = options?.sync;
983
1245
  this._logProperties = options;
984
- this.pq = new PQueue({ concurrency: 1000 });
1246
+ this.pendingMaturity = new Map();
985
1247
 
986
1248
  const id = sha256Base64Sync(this.log.id);
987
1249
  const storage = await this.node.storage.sublevel(id);
@@ -1009,22 +1271,103 @@ export class SharedLog<
1009
1271
  schema: ReplicationRangeIndexable,
1010
1272
  });
1011
1273
 
1274
+ this._entryCoordinatesIndex = await replicationIndex.init({
1275
+ schema: EntryReplicated,
1276
+ });
1277
+
1012
1278
  const logIndex = await logScope.scope("log");
1013
1279
 
1014
1280
  await this.node.indexer.start(); // TODO why do we need to start the indexer here?
1015
1281
 
1016
1282
  const hasIndexedReplicationInfo =
1017
- (await this.replicationIndex.getSize()) > 0;
1283
+ (await this.replicationIndex.count({
1284
+ query: [
1285
+ new StringMatch({
1286
+ key: "hash",
1287
+ value: this.node.identity.publicKey.hashcode(),
1288
+ }),
1289
+ ],
1290
+ })) > 0;
1018
1291
 
1019
1292
  /* this._totalParticipation = await this.calculateTotalParticipation(); */
1020
1293
 
1021
1294
  this._gidPeersHistory = new Map();
1022
1295
 
1296
+ this.replicationChangeDebounceFn = debounceAggregationChanges(
1297
+ (change) =>
1298
+ this.onReplicationChange(change).then(() =>
1299
+ this.rebalanceParticipationDebounced?.(),
1300
+ ),
1301
+ this.distributionDebounceTime,
1302
+ );
1303
+
1304
+ this.pruneDebouncedFn = debouncedAccumulatorMap(
1305
+ (map) => {
1306
+ this.prune(map);
1307
+ },
1308
+ PRUNE_DEBOUNCE_INTERVAL, // TODO make this dynamic on the number of replicators
1309
+ );
1310
+
1311
+ this.responseToPruneDebouncedFn = debounceAcculmulator<
1312
+ string,
1313
+ {
1314
+ hashes: string[];
1315
+ peers: string[] | Set<string>;
1316
+ },
1317
+ Map<string, Set<string>>
1318
+ >(
1319
+ (result) => {
1320
+ let allRequestingPeers = new Set<string>();
1321
+ let hashes: string[] = [];
1322
+ for (const [hash, requestingPeers] of result) {
1323
+ for (const peer of requestingPeers) {
1324
+ allRequestingPeers.add(peer);
1325
+ }
1326
+ hashes.push(hash);
1327
+ }
1328
+ hashes.length > 0 &&
1329
+ this.rpc.send(new ResponseIPrune({ hashes }), {
1330
+ mode: new SilentDelivery({
1331
+ to: allRequestingPeers,
1332
+ redundancy: 1,
1333
+ }),
1334
+ priority: 1,
1335
+ });
1336
+ },
1337
+ () => {
1338
+ let accumulator = new Map<string, Set<string>>();
1339
+ return {
1340
+ add: (props: { hashes: string[]; peers: string[] | Set<string> }) => {
1341
+ for (const hash of props.hashes) {
1342
+ let prev = accumulator.get(hash);
1343
+ if (!prev) {
1344
+ prev = new Set<string>();
1345
+ accumulator.set(hash, prev);
1346
+ }
1347
+ for (const peer of props.peers) {
1348
+ prev.add(peer);
1349
+ }
1350
+ }
1351
+ },
1352
+ delete: (hash: string) => {
1353
+ accumulator.delete(hash);
1354
+ },
1355
+ finalize: () => {
1356
+ return undefined as any;
1357
+ },
1358
+ size: () => accumulator.size,
1359
+ clear: () => accumulator.clear(),
1360
+ value: accumulator,
1361
+ };
1362
+ },
1363
+ PRUNE_DEBOUNCE_INTERVAL, // TODO make this dynamic on the number of replicators
1364
+ );
1365
+
1023
1366
  await this.log.open(this.remoteBlocks, this.node.identity, {
1024
1367
  keychain: this.node.services.keychain,
1025
1368
  ...this._logProperties,
1026
- onChange: (change) => {
1027
- this.onChange(change);
1369
+ onChange: async (change) => {
1370
+ await this.onChange(change);
1028
1371
  return this._logProperties?.onChange?.(change);
1029
1372
  },
1030
1373
  canAppend: async (entry) => {
@@ -1043,7 +1386,7 @@ export class SharedLog<
1043
1386
  await this.rpc.open({
1044
1387
  queryType: TransportMessage,
1045
1388
  responseType: TransportMessage,
1046
- responseHandler: this._onMessage.bind(this),
1389
+ responseHandler: (query, context) => this._onMessage(query, context),
1047
1390
  topic: this.topic,
1048
1391
  });
1049
1392
 
@@ -1063,15 +1406,6 @@ export class SharedLog<
1063
1406
 
1064
1407
  await this.rpc.subscribe();
1065
1408
 
1066
- // await this.log.load();
1067
-
1068
- // TODO (do better)
1069
- // we do this distribution interval to eliminate the sideeffects arriving from updating roles and joining entries continously.
1070
- // an alternative to this would be to call distribute/maybe prune after every join if our role has changed
1071
- this.distributeInterval = setInterval(() => {
1072
- this.distribute();
1073
- }, 7.5 * 1000);
1074
-
1075
1409
  const requestSync = async () => {
1076
1410
  /**
1077
1411
  * This method fetches entries that we potentially want.
@@ -1087,7 +1421,17 @@ export class SharedLog<
1087
1421
  // TODO test that this if statement actually does anymeaningfull
1088
1422
  if (value.length > 0) {
1089
1423
  requestHashes.push(key);
1090
- from.add(value.shift()!.hashcode());
1424
+ const publicKeyHash = value.shift()!.hashcode();
1425
+ from.add(publicKeyHash);
1426
+ const invertedSet =
1427
+ this.syncInFlightQueueInverted.get(publicKeyHash);
1428
+ if (invertedSet) {
1429
+ if (invertedSet.delete(key)) {
1430
+ if (invertedSet.size === 0) {
1431
+ this.syncInFlightQueueInverted.delete(publicKeyHash);
1432
+ }
1433
+ }
1434
+ }
1091
1435
  }
1092
1436
  if (value.length === 0) {
1093
1437
  this.syncInFlightQueue.delete(key); // no-one more to ask for this entry
@@ -1113,20 +1457,36 @@ export class SharedLog<
1113
1457
  if (this.closed) {
1114
1458
  return;
1115
1459
  }
1116
- this.syncMoreInterval = setTimeout(requestSync, 1e4);
1460
+ this.syncMoreInterval = setTimeout(requestSync, 3e3);
1117
1461
  });
1118
1462
  };
1119
1463
 
1120
1464
  // if we had a previous session with replication info, and new replication info dictates that we unreplicate
1121
1465
  // we should do that. Otherwise if options is a unreplication we dont need to do anything because
1122
1466
  // we are already unreplicated (as we are just opening)
1123
- if (
1124
- hasIndexedReplicationInfo ||
1125
- isUnreplicationOptions(options?.replicate) === false
1126
- ) {
1467
+
1468
+ let isUnreplicationOptionsDefined = isUnreplicationOptions(
1469
+ options?.replicate,
1470
+ );
1471
+ if (hasIndexedReplicationInfo && isUnreplicationOptionsDefined) {
1127
1472
  await this.replicate(options?.replicate, { checkDuplicates: true });
1473
+ } else if (
1474
+ isReplicationOptionsDependentOnPreviousState(options?.replicate) &&
1475
+ hasIndexedReplicationInfo
1476
+ ) {
1477
+ // dont do anthing since we are alread replicating stuff
1478
+ } else {
1479
+ await this.replicate(options?.replicate, {
1480
+ checkDuplicates: true,
1481
+ reset: true,
1482
+ });
1128
1483
  }
1484
+
1129
1485
  requestSync();
1486
+
1487
+ this.interval = setInterval(() => {
1488
+ this.rebalanceParticipationDebounced?.();
1489
+ }, RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL);
1130
1490
  }
1131
1491
 
1132
1492
  async afterOpen(): Promise<void> {
@@ -1165,6 +1525,7 @@ export class SharedLog<
1165
1525
  this.onEntryAdded(added.entry);
1166
1526
  }
1167
1527
  for (const removed of change.removed) {
1528
+ await this.deleteCoordinates({ hash: removed.hash });
1168
1529
  this.onEntryRemoved(removed.hash);
1169
1530
  }
1170
1531
  }
@@ -1180,6 +1541,8 @@ export class SharedLog<
1180
1541
  return false;
1181
1542
  }
1182
1543
 
1544
+ checkMinReplicasLimit(replicas);
1545
+
1183
1546
  // Don't verify entries that we have created (TODO should we? perf impact?)
1184
1547
  if (!entry.createdLocally && !(await entry.verifySignatures())) {
1185
1548
  return false;
@@ -1230,11 +1593,18 @@ export class SharedLog<
1230
1593
 
1231
1594
  private async _close() {
1232
1595
  clearTimeout(this.syncMoreInterval);
1233
- clearInterval(this.distributeInterval);
1596
+
1597
+ for (const [_key, value] of this.pendingMaturity) {
1598
+ clearTimeout(value.timeout);
1599
+ }
1600
+ this.pendingMaturity.clear();
1601
+
1234
1602
  this.distributeQueue?.clear();
1235
1603
 
1236
1604
  this._closeController.abort();
1237
1605
 
1606
+ clearInterval(this.interval);
1607
+
1238
1608
  this.node.services.pubsub.removeEventListener(
1239
1609
  "subscribe",
1240
1610
  this._onSubscriptionFn,
@@ -1262,11 +1632,13 @@ export class SharedLog<
1262
1632
  this.syncInFlight.clear();
1263
1633
  this.latestReplicationInfoMessage.clear();
1264
1634
  this._gidPeersHistory.clear();
1265
-
1635
+ this.pruneDebouncedFn = undefined as any;
1636
+ this.rebalanceParticipationDebounced = undefined;
1266
1637
  this._replicationRangeIndex = undefined as any;
1638
+ this._entryCoordinatesIndex = undefined as any;
1639
+
1267
1640
  this.cpuUsage?.stop?.();
1268
1641
  /* this._totalParticipation = 0; */
1269
- this.pq.clear();
1270
1642
  }
1271
1643
  async close(from?: Program): Promise<boolean> {
1272
1644
  const superClosed = await super.close(from);
@@ -1337,10 +1709,6 @@ export class SharedLog<
1337
1709
  return;
1338
1710
  }
1339
1711
 
1340
- const toMerge: Entry<any>[] = [];
1341
- let toDelete: Entry<any>[] | undefined = undefined;
1342
- let maybeDelete: EntryWithRefs<any>[][] | undefined = undefined;
1343
-
1344
1712
  const groupedByGid = await groupByGid(filteredHeads);
1345
1713
  const promises: Promise<void>[] = [];
1346
1714
 
@@ -1361,30 +1729,62 @@ export class SharedLog<
1361
1729
  entries.map((x) => x.entry),
1362
1730
  );
1363
1731
 
1732
+ const maxMaxReplicas = Math.max(
1733
+ maxReplicasFromHead,
1734
+ maxReplicasFromNewEntries,
1735
+ );
1736
+
1737
+ const cursor = await this.createCoordinates(
1738
+ latestEntry,
1739
+ maxMaxReplicas,
1740
+ );
1741
+
1364
1742
  const isReplicating = await this.isReplicating();
1365
1743
 
1366
- let isLeader: string[] | false;
1744
+ let isLeader:
1745
+ | Map<
1746
+ string,
1747
+ {
1748
+ intersecting: boolean;
1749
+ }
1750
+ >
1751
+ | false;
1367
1752
 
1368
1753
  if (isReplicating) {
1369
1754
  isLeader = await this.waitForIsLeader(
1370
- latestEntry,
1371
- Math.max(maxReplicasFromHead, maxReplicasFromNewEntries),
1755
+ cursor,
1756
+ this.node.identity.publicKey.hashcode(),
1372
1757
  );
1373
1758
  } else {
1374
- isLeader = await this.findLeaders(
1375
- latestEntry,
1376
- Math.max(maxReplicasFromHead, maxReplicasFromNewEntries),
1377
- );
1759
+ isLeader = await this.findLeaders(cursor);
1378
1760
 
1379
- isLeader = isLeader.includes(
1380
- this.node.identity.publicKey.hashcode(),
1381
- )
1761
+ isLeader = isLeader.has(this.node.identity.publicKey.hashcode())
1382
1762
  ? isLeader
1383
1763
  : false;
1384
1764
  }
1385
1765
 
1766
+ if (this.closed) {
1767
+ return;
1768
+ }
1769
+
1770
+ let maybeDelete: EntryWithRefs<any>[][] | undefined;
1771
+ let toMerge: Entry<any>[] = [];
1772
+ let toDelete: Entry<any>[] | undefined;
1386
1773
  if (isLeader) {
1387
- if (isLeader.find((x) => x === context.from!.hashcode())) {
1774
+ for (const entry of entries) {
1775
+ this.pruneDebouncedFn.delete(entry.entry.hash);
1776
+ }
1777
+
1778
+ for (const entry of entries) {
1779
+ await this.persistCoordinate({
1780
+ leaders: isLeader,
1781
+ coordinates: cursor,
1782
+ entry: entry.entry,
1783
+ });
1784
+ }
1785
+
1786
+ const fromIsLeader = isLeader.get(context.from!.hashcode());
1787
+ if (fromIsLeader) {
1388
1788
  let peerSet = this._gidPeersHistory.get(gid);
1389
1789
  if (!peerSet) {
1390
1790
  peerSet = new Set();
@@ -1418,151 +1818,183 @@ export class SharedLog<
1418
1818
  }. Because not leader`,
1419
1819
  );
1420
1820
  }
1421
- };
1422
- promises.push(fn());
1423
- }
1424
- await Promise.all(promises);
1425
-
1426
- if (this.closed) {
1427
- return;
1428
- }
1429
-
1430
- if (toMerge.length > 0) {
1431
- await this.log.join(toMerge);
1432
- toDelete &&
1433
- Promise.all(this.prune(toDelete)).catch((e) => {
1434
- logger.info(e.toString());
1435
- });
1436
- this.rebalanceParticipationDebounced?.();
1437
- }
1438
1821
 
1439
- /// we clear sync in flight here because we want to join before that, so that entries are totally accounted for
1440
- for (const head of heads) {
1441
- const set = this.syncInFlight.get(context.from.hashcode());
1442
- if (set) {
1443
- set.delete(head.entry.hash);
1444
- if (set?.size === 0) {
1445
- this.syncInFlight.delete(context.from.hashcode());
1822
+ if (this.closed) {
1823
+ return;
1446
1824
  }
1447
- }
1448
- }
1449
1825
 
1450
- if (maybeDelete) {
1451
- for (const entries of maybeDelete as EntryWithRefs<any>[][]) {
1452
- const headsWithGid = await this.log.entryIndex
1453
- .getHeads(entries[0].entry.meta.gid)
1454
- .all();
1455
- if (headsWithGid && headsWithGid.length > 0) {
1456
- const minReplicas = maxReplicas(this, headsWithGid.values());
1826
+ if (toMerge.length > 0) {
1827
+ await this.log.join(toMerge);
1457
1828
 
1458
- const isLeader = await this.isLeader(
1459
- entries[0].entry,
1460
- minReplicas,
1829
+ toDelete?.map((x) =>
1830
+ this.pruneDebouncedFn.add({ key: x.hash, value: x }),
1461
1831
  );
1832
+ this.rebalanceParticipationDebounced?.();
1833
+ }
1462
1834
 
1463
- if (!isLeader) {
1464
- Promise.all(this.prune(entries.map((x) => x.entry))).catch(
1465
- (e) => {
1466
- logger.info(e.toString());
1467
- },
1468
- );
1835
+ /// we clear sync in flight here because we want to join before that, so that entries are totally accounted for
1836
+ for (const entry of entries) {
1837
+ const set = this.syncInFlight.get(context.from!.hashcode());
1838
+ if (set) {
1839
+ set.delete(entry.entry.hash);
1840
+ if (set?.size === 0) {
1841
+ this.syncInFlight.delete(context.from!.hashcode());
1842
+ }
1469
1843
  }
1470
1844
  }
1471
- }
1845
+
1846
+ if (maybeDelete) {
1847
+ for (const entries of maybeDelete as EntryWithRefs<any>[][]) {
1848
+ const headsWithGid = await this.log.entryIndex
1849
+ .getHeads(entries[0].entry.meta.gid)
1850
+ .all();
1851
+ if (headsWithGid && headsWithGid.length > 0) {
1852
+ const minReplicas = maxReplicas(
1853
+ this,
1854
+ headsWithGid.values(),
1855
+ );
1856
+
1857
+ const isLeader = await this.isLeader({
1858
+ entry: entries[0].entry,
1859
+ replicas: minReplicas,
1860
+ });
1861
+
1862
+ if (!isLeader) {
1863
+ entries.map((x) =>
1864
+ this.pruneDebouncedFn.add({
1865
+ key: x.entry.hash,
1866
+ value: x.entry,
1867
+ }),
1868
+ );
1869
+ }
1870
+ }
1871
+ }
1872
+ }
1873
+ };
1874
+ promises.push(fn()); // we do this concurrently since waitForIsLeader might be a blocking operation for some entries
1472
1875
  }
1876
+ await Promise.all(promises);
1473
1877
  }
1474
1878
  } else if (msg instanceof RequestIPrune) {
1475
1879
  const hasAndIsLeader: string[] = [];
1880
+ // await delay(3000)
1476
1881
  for (const hash of msg.hashes) {
1477
1882
  const indexedEntry = await this.log.entryIndex.getShallow(hash);
1478
1883
  if (
1479
1884
  indexedEntry &&
1480
- (await this.isLeader(
1481
- indexedEntry.value,
1482
- decodeReplicas(indexedEntry.value).getValue(this),
1483
- ))
1885
+ (
1886
+ await this.findLeadersPersist(
1887
+ {
1888
+ entry: indexedEntry.value,
1889
+ minReplicas: decodeReplicas(indexedEntry.value).getValue(
1890
+ this,
1891
+ ),
1892
+ },
1893
+ indexedEntry.value,
1894
+ )
1895
+ ).isLeader
1484
1896
  ) {
1485
1897
  this._gidPeersHistory
1486
1898
  .get(indexedEntry.value.meta.gid)
1487
1899
  ?.delete(context.from.hashcode());
1488
1900
  hasAndIsLeader.push(hash);
1901
+
1902
+ hasAndIsLeader.length > 0 &&
1903
+ this.responseToPruneDebouncedFn.add({
1904
+ hashes: hasAndIsLeader,
1905
+ peers: [context.from!.hashcode()],
1906
+ });
1489
1907
  } else {
1490
1908
  const prevPendingIHave = this._pendingIHave.get(hash);
1491
- const pendingIHave = {
1492
- clear: () => {
1493
- clearTimeout(timeout);
1494
- prevPendingIHave?.clear();
1495
- },
1496
- callback: async (entry: any) => {
1497
- if (
1498
- await this.isLeader(
1499
- entry,
1500
- decodeReplicas(entry).getValue(this),
1501
- )
1502
- ) {
1503
- this._gidPeersHistory
1504
- .get(entry.meta.gid)
1505
- ?.delete(context.from!.hashcode());
1506
- this.rpc.send(new ResponseIPrune({ hashes: [entry.hash] }), {
1507
- mode: new SilentDelivery({
1508
- to: [context.from!],
1509
- redundancy: 1,
1510
- }),
1511
- });
1512
- }
1909
+ if (prevPendingIHave) {
1910
+ prevPendingIHave.requesting.add(context.from.hashcode());
1911
+ prevPendingIHave.resetTimeout();
1912
+ } else {
1913
+ const requesting = new Set([context.from.hashcode()]);
1513
1914
 
1514
- prevPendingIHave && prevPendingIHave.callback(entry);
1515
- this._pendingIHave.delete(entry.hash);
1516
- },
1517
- };
1518
- const timeout = setTimeout(() => {
1519
- const pendingIHaveRef = this._pendingIHave.get(hash);
1520
- if (pendingIHave === pendingIHaveRef) {
1915
+ let timeout = setTimeout(() => {
1521
1916
  this._pendingIHave.delete(hash);
1522
- }
1523
- }, this._respondToIHaveTimeout);
1917
+ }, this._respondToIHaveTimeout);
1918
+
1919
+ const pendingIHave = {
1920
+ requesting,
1921
+ resetTimeout: () => {
1922
+ clearTimeout(timeout);
1923
+ timeout = setTimeout(() => {
1924
+ this._pendingIHave.delete(hash);
1925
+ }, this._respondToIHaveTimeout);
1926
+ },
1927
+ clear: () => {
1928
+ clearTimeout(timeout);
1929
+ },
1930
+ callback: async (entry: Entry<T>) => {
1931
+ if (
1932
+ (
1933
+ await this.findLeadersPersist(
1934
+ {
1935
+ entry,
1936
+ minReplicas: decodeReplicas(entry).getValue(this),
1937
+ },
1938
+ entry,
1939
+ )
1940
+ ).isLeader
1941
+ ) {
1942
+ for (const peer of requesting) {
1943
+ this._gidPeersHistory.get(entry.meta.gid)?.delete(peer);
1944
+ }
1524
1945
 
1525
- this._pendingIHave.set(hash, pendingIHave);
1946
+ this.responseToPruneDebouncedFn.add({
1947
+ hashes: [entry.hash],
1948
+ peers: requesting,
1949
+ });
1950
+ this._pendingIHave.delete(hash);
1951
+ }
1952
+ },
1953
+ };
1954
+
1955
+ this._pendingIHave.set(hash, pendingIHave);
1956
+ }
1526
1957
  }
1527
1958
  }
1528
-
1529
- await this.rpc.send(new ResponseIPrune({ hashes: hasAndIsLeader }), {
1530
- mode: new SilentDelivery({ to: [context.from], redundancy: 1 }),
1531
- });
1532
1959
  } else if (msg instanceof ResponseIPrune) {
1533
1960
  for (const hash of msg.hashes) {
1534
1961
  this._pendingDeletes.get(hash)?.resolve(context.from.hashcode());
1535
1962
  }
1536
1963
  } else if (msg instanceof RequestMaybeSync) {
1537
1964
  const requestHashes: string[] = [];
1965
+
1538
1966
  for (const hash of msg.hashes) {
1539
1967
  const inFlight = this.syncInFlightQueue.get(hash);
1540
1968
  if (inFlight) {
1541
- inFlight.push(context.from);
1542
- let inverted = this.syncInFlightQueueInverted.get(
1543
- context.from.hashcode(),
1544
- );
1545
- if (!inverted) {
1546
- inverted = new Set();
1547
- this.syncInFlightQueueInverted.set(
1969
+ if (
1970
+ !inFlight.find((x) => x.hashcode() === context.from!.hashcode())
1971
+ ) {
1972
+ inFlight.push(context.from);
1973
+ let inverted = this.syncInFlightQueueInverted.get(
1548
1974
  context.from.hashcode(),
1549
- inverted,
1550
1975
  );
1976
+ if (!inverted) {
1977
+ inverted = new Set();
1978
+ this.syncInFlightQueueInverted.set(
1979
+ context.from.hashcode(),
1980
+ inverted,
1981
+ );
1982
+ }
1983
+ inverted.add(hash);
1551
1984
  }
1552
- inverted.add(hash);
1553
1985
  } else if (!(await this.log.has(hash))) {
1554
1986
  this.syncInFlightQueue.set(hash, []);
1555
1987
  requestHashes.push(hash); // request immediately (first time we have seen this hash)
1556
1988
  }
1557
1989
  }
1558
-
1559
- await this.requestSync(requestHashes, [context.from.hashcode()]);
1990
+ requestHashes.length > 0 &&
1991
+ (await this.requestSync(requestHashes, [context.from.hashcode()]));
1560
1992
  } else if (msg instanceof ResponseMaybeSync) {
1561
1993
  // TODO better choice of step size
1562
-
1563
1994
  const entries = (
1564
1995
  await Promise.all(msg.hashes.map((x) => this.log.get(x)))
1565
1996
  ).filter((x): x is Entry<any> => !!x);
1997
+
1566
1998
  const messages = await createExchangeHeadsMessages(
1567
1999
  this.log,
1568
2000
  entries,
@@ -1647,13 +2079,15 @@ export class SharedLog<
1647
2079
  if (prev && prev > context.timestamp) {
1648
2080
  return;
1649
2081
  }
2082
+
1650
2083
  this.latestReplicationInfoMessage.set(
1651
2084
  context.from!.hashcode(),
1652
2085
  context.timestamp,
1653
2086
  );
1654
2087
 
1655
2088
  let reset = msg instanceof AllReplicatingSegmentsMessage;
1656
- const added = await this.addReplicationRange(
2089
+
2090
+ await this.addReplicationRange(
1657
2091
  replicationInfoMessage.segments.map((x) =>
1658
2092
  x.toReplicationRangeIndexable(context.from!),
1659
2093
  ),
@@ -1661,8 +2095,6 @@ export class SharedLog<
1661
2095
  { reset, checkDuplicates: true },
1662
2096
  );
1663
2097
 
1664
- added && (await this.distribute());
1665
-
1666
2098
  /* await this._modifyReplicators(msg.role, context.from!); */
1667
2099
  })
1668
2100
  .catch((e) => {
@@ -1713,18 +2145,15 @@ export class SharedLog<
1713
2145
  }
1714
2146
 
1715
2147
  async getMyReplicationSegments() {
1716
- const ranges = await this.replicationIndex.query(
1717
- new SearchRequest({
1718
- query: [
1719
- new StringMatch({
1720
- key: "hash",
1721
- value: this.node.identity.publicKey.hashcode(),
1722
- }),
1723
- ],
1724
- fetch: 0xffffffff,
1725
- }),
1726
- );
1727
- return ranges.results.map((x) => x.value);
2148
+ const ranges = await this.replicationIndex
2149
+ .iterate({
2150
+ query: new StringMatch({
2151
+ key: "hash",
2152
+ value: this.node.identity.publicKey.hashcode(),
2153
+ }),
2154
+ })
2155
+ .all();
2156
+ return ranges.map((x) => x.value);
1728
2157
  }
1729
2158
 
1730
2159
  async getMyTotalParticipation() {
@@ -1742,16 +2171,22 @@ export class SharedLog<
1742
2171
  return this._replicationRangeIndex;
1743
2172
  }
1744
2173
 
2174
+ get entryCoordinatesIndex(): Index<EntryReplicated> {
2175
+ if (!this._entryCoordinatesIndex) {
2176
+ throw new ClosedError();
2177
+ }
2178
+ return this._entryCoordinatesIndex;
2179
+ }
2180
+
1745
2181
  /**
1746
2182
  * TODO improve efficiency
1747
2183
  */
1748
2184
  async getReplicators() {
1749
- let set = new Set();
1750
- const results = await this.replicationIndex.query(
1751
- new SearchRequest({ fetch: 0xfffffff }),
1752
- { reference: true, shape: { hash: true } },
1753
- );
1754
- results.results.forEach((result) => {
2185
+ let set = new Set<string>();
2186
+ const results = await this.replicationIndex
2187
+ .iterate({}, { reference: true, shape: { hash: true } })
2188
+ .all();
2189
+ results.forEach((result) => {
1755
2190
  set.add(result.value.hash);
1756
2191
  });
1757
2192
 
@@ -1761,14 +2196,13 @@ export class SharedLog<
1761
2196
  async waitForReplicator(...keys: PublicSignKey[]) {
1762
2197
  const check = async () => {
1763
2198
  for (const k of keys) {
1764
- const rects = await this.replicationIndex?.query(
1765
- new SearchRequest({
1766
- query: [new StringMatch({ key: "hash", value: k.hashcode() })],
1767
- }),
1768
- { reference: true },
1769
- );
1770
- const rect = await rects.results[0]?.value;
1771
-
2199
+ const rects = await this.replicationIndex
2200
+ ?.iterate(
2201
+ { query: new StringMatch({ key: "hash", value: k.hashcode() }) },
2202
+ { reference: true },
2203
+ )
2204
+ .all();
2205
+ const rect = rects[0]?.value;
1772
2206
  if (
1773
2207
  !rect ||
1774
2208
  !isMatured(rect, +new Date(), await this.getDefaultMinRoleAge())
@@ -1877,29 +2311,99 @@ export class SharedLog<
1877
2311
  }
1878
2312
  }
1879
2313
 
2314
+ private async findLeadersPersist(
2315
+ cursor:
2316
+ | number[]
2317
+ | {
2318
+ entry: ShallowOrFullEntry<any> | EntryReplicated;
2319
+ minReplicas: number;
2320
+ },
2321
+ entry: ShallowOrFullEntry<any> | EntryReplicated,
2322
+ options?: {
2323
+ roleAge?: number;
2324
+ // persist even if not leader
2325
+ persist?: {
2326
+ prev?: EntryReplicated[];
2327
+ };
2328
+ },
2329
+ ): Promise<{
2330
+ leaders: Map<string, { intersecting: boolean }>;
2331
+ isLeader: boolean;
2332
+ }> {
2333
+ const coordinates = Array.isArray(cursor)
2334
+ ? cursor
2335
+ : await this.createCoordinates(cursor.entry, cursor.minReplicas);
2336
+ const leaders = await this.findLeaders(coordinates, options);
2337
+ const isLeader = leaders.has(this.node.identity.publicKey.hashcode());
2338
+
2339
+ if (isLeader || options?.persist) {
2340
+ let assignToRangeBoundary: boolean | undefined = undefined;
2341
+ if (options?.persist?.prev) {
2342
+ assignToRangeBoundary = shouldAssigneToRangeBoundary(leaders);
2343
+ const prev = options.persist.prev;
2344
+ // dont do anthing if nothing has changed
2345
+ if (prev.length > 0) {
2346
+ let allTheSame = true;
2347
+
2348
+ for (const element of prev) {
2349
+ if (element.assignedToRangeBoundary !== assignToRangeBoundary) {
2350
+ allTheSame = false;
2351
+ break;
2352
+ }
2353
+ }
2354
+
2355
+ if (allTheSame) {
2356
+ return { leaders, isLeader };
2357
+ }
2358
+ }
2359
+ }
2360
+
2361
+ !this.closed &&
2362
+ (await this.persistCoordinate(
2363
+ {
2364
+ leaders,
2365
+ coordinates,
2366
+ entry,
2367
+ },
2368
+ {
2369
+ assignToRangeBoundary: assignToRangeBoundary,
2370
+ },
2371
+ ));
2372
+ }
2373
+
2374
+ return { leaders, isLeader };
2375
+ }
2376
+
1880
2377
  async isLeader(
1881
- entry: ShallowOrFullEntry<any>,
1882
- numberOfLeaders: number,
2378
+ cursor:
2379
+ | number[]
2380
+ | {
2381
+ entry: ShallowOrFullEntry<any> | EntryReplicated;
2382
+ replicas: number;
2383
+ },
1883
2384
  options?: {
1884
- candidates?: string[];
1885
2385
  roleAge?: number;
1886
2386
  },
1887
2387
  ): Promise<boolean> {
1888
- const isLeader = (
1889
- await this.findLeaders(entry, numberOfLeaders, options)
1890
- ).find((l) => l === this.node.identity.publicKey.hashcode());
1891
- return !!isLeader;
2388
+ const leaders = await this.findLeaders(cursor, options);
2389
+ return leaders.has(this.node.identity.publicKey.hashcode());
1892
2390
  }
1893
2391
 
1894
2392
  private async waitForIsLeader(
1895
- entry: ShallowOrFullEntry<T>,
1896
- numberOfLeaders: number,
1897
- timeout = this.waitForReplicatorTimeout,
1898
- ): Promise<string[] | false> {
2393
+ cursor: number[],
2394
+ hash: string,
2395
+ options: {
2396
+ timeout: number;
2397
+ } = { timeout: this.waitForReplicatorTimeout },
2398
+ ): Promise<Map<string, { intersecting: boolean }> | false> {
1899
2399
  return new Promise((resolve, reject) => {
1900
2400
  const removeListeners = () => {
1901
2401
  this.events.removeEventListener("replication:change", roleListener);
1902
- this._closeController.signal.addEventListener("abort", abortListener);
2402
+ this.events.removeEventListener("replicator:mature", roleListener); // TODO replication:change event ?
2403
+ this._closeController.signal.removeEventListener(
2404
+ "abort",
2405
+ abortListener,
2406
+ );
1903
2407
  };
1904
2408
  const abortListener = () => {
1905
2409
  removeListeners();
@@ -1910,13 +2414,11 @@ export class SharedLog<
1910
2414
  const timer = setTimeout(() => {
1911
2415
  removeListeners();
1912
2416
  resolve(false);
1913
- }, timeout);
2417
+ }, options.timeout);
1914
2418
 
1915
2419
  const check = () =>
1916
- this.findLeaders(entry, numberOfLeaders).then((leaders) => {
1917
- const isLeader = leaders.find(
1918
- (l) => l === this.node.identity.publicKey.hashcode(),
1919
- );
2420
+ this.findLeaders(cursor).then((leaders) => {
2421
+ const isLeader = leaders.has(hash);
1920
2422
  if (isLeader) {
1921
2423
  removeListeners();
1922
2424
  clearTimeout(timer);
@@ -1929,25 +2431,127 @@ export class SharedLog<
1929
2431
  };
1930
2432
 
1931
2433
  this.events.addEventListener("replication:change", roleListener); // TODO replication:change event ?
2434
+ this.events.addEventListener("replicator:mature", roleListener); // TODO replication:change event ?
1932
2435
  this._closeController.signal.addEventListener("abort", abortListener);
1933
-
1934
2436
  check();
1935
2437
  });
1936
2438
  }
1937
2439
 
1938
2440
  async findLeaders(
1939
- entry: ShallowOrFullEntry<any>,
1940
- numberOfLeaders: number,
2441
+ cursor:
2442
+ | number[]
2443
+ | {
2444
+ entry: ShallowOrFullEntry<any> | EntryReplicated;
2445
+ replicas: number;
2446
+ },
1941
2447
  options?: {
1942
2448
  roleAge?: number;
1943
2449
  },
1944
- ): Promise<string[]> {
2450
+ ): Promise<Map<string, { intersecting: boolean }>> {
1945
2451
  if (this.closed) {
1946
- return [this.node.identity.publicKey.hashcode()]; // Assumption: if the store is closed, always assume we have responsibility over the data
2452
+ const map = new Map(); // Assumption: if the store is closed, always assume we have responsibility over the data
2453
+ map.set(this.node.identity.publicKey.hashcode(), { intersecting: false });
2454
+ return map;
1947
2455
  }
1948
2456
 
2457
+ const coordinates = Array.isArray(cursor)
2458
+ ? cursor
2459
+ : await this.createCoordinates(cursor.entry, cursor.replicas);
2460
+ const leaders = await this.findLeadersFromU32(coordinates, options);
2461
+
2462
+ return leaders;
2463
+ }
2464
+
2465
+ private async groupByLeaders(
2466
+ cursors: (
2467
+ | number[]
2468
+ | {
2469
+ entry: ShallowOrFullEntry<any> | EntryReplicated;
2470
+ replicas: number;
2471
+ }
2472
+ )[],
2473
+ options?: {
2474
+ roleAge?: number;
2475
+ },
2476
+ ) {
2477
+ const leaders = await Promise.all(
2478
+ cursors.map((x) => this.findLeaders(x, options)),
2479
+ );
2480
+ const map = new Map<string, number[]>();
2481
+ leaders.forEach((leader, i) => {
2482
+ for (const [hash] of leader) {
2483
+ const arr = map.get(hash) ?? [];
2484
+ arr.push(i);
2485
+ map.set(hash, arr);
2486
+ }
2487
+ });
2488
+
2489
+ return map;
2490
+ }
2491
+
2492
+ private async createCoordinates(
2493
+ entry: ShallowOrFullEntry<any> | EntryReplicated,
2494
+ minReplicas: number,
2495
+ ) {
1949
2496
  const cursor = await this.domain.fromEntry(entry);
1950
- return this.findLeadersFromU32(cursor, numberOfLeaders, options);
2497
+ const out = getEvenlySpacedU32(cursor, minReplicas);
2498
+ return out;
2499
+ }
2500
+
2501
+ private async getCoordinates(entry: { hash: string }) {
2502
+ const result = await this.entryCoordinatesIndex
2503
+ .iterate({ query: { hash: entry.hash } })
2504
+ .all();
2505
+ return result.map((x) => x.value.coordinate);
2506
+ }
2507
+
2508
+ private async persistCoordinate(
2509
+ properties: {
2510
+ coordinates: number[];
2511
+ entry: ShallowOrFullEntry<any> | EntryReplicated;
2512
+ leaders:
2513
+ | Map<
2514
+ string,
2515
+ {
2516
+ intersecting: boolean;
2517
+ }
2518
+ >
2519
+ | false;
2520
+ },
2521
+ options?: {
2522
+ assignToRangeBoundary?: boolean;
2523
+ },
2524
+ ) {
2525
+ let assignedToRangeBoundary =
2526
+ options?.assignToRangeBoundary ??
2527
+ shouldAssigneToRangeBoundary(properties.leaders);
2528
+
2529
+ for (const coordinate of properties.coordinates) {
2530
+ await this.entryCoordinatesIndex.put(
2531
+ new EntryReplicated({
2532
+ assignedToRangeBoundary,
2533
+ coordinate,
2534
+ meta: properties.entry.meta,
2535
+ hash: properties.entry.hash,
2536
+ }),
2537
+ );
2538
+ }
2539
+
2540
+ if (properties.entry.meta.next.length > 0) {
2541
+ await this.entryCoordinatesIndex.del({
2542
+ query: new Or(
2543
+ properties.entry.meta.next.map(
2544
+ (x) => new StringMatch({ key: "hash", value: x }),
2545
+ ),
2546
+ ),
2547
+ });
2548
+ }
2549
+ }
2550
+
2551
+ private async deleteCoordinates(
2552
+ properties: { gid: string } | { hash: string },
2553
+ ) {
2554
+ await this.entryCoordinatesIndex.del({ query: properties });
1951
2555
  }
1952
2556
 
1953
2557
  async getDefaultMinRoleAge(): Promise<number> {
@@ -1970,14 +2574,13 @@ export class SharedLog<
1970
2574
  }
1971
2575
 
1972
2576
  private async findLeadersFromU32(
1973
- cursor: u32,
1974
- numberOfLeaders: number,
2577
+ cursor: u32[],
1975
2578
  options?: {
1976
2579
  roleAge?: number;
1977
2580
  },
1978
- ) {
2581
+ ): Promise<Map<string, { intersecting: boolean }>> {
1979
2582
  const roleAge = options?.roleAge ?? (await this.getDefaultMinRoleAge()); // TODO -500 as is added so that i f someone else is just as new as us, then we treat them as mature as us. without -500 we might be slower syncing if two nodes starts almost at the same time
1980
- return getSamples(cursor, this.replicationIndex, numberOfLeaders, roleAge);
2583
+ return getSamples(cursor, this.replicationIndex, roleAge);
1981
2584
  }
1982
2585
 
1983
2586
  async isReplicator(
@@ -1987,7 +2590,10 @@ export class SharedLog<
1987
2590
  roleAge?: number;
1988
2591
  },
1989
2592
  ) {
1990
- return this.isLeader(entry, decodeReplicas(entry).getValue(this), options);
2593
+ return this.isLeader(
2594
+ { entry, replicas: decodeReplicas(entry).getValue(this) },
2595
+ options,
2596
+ );
1991
2597
  }
1992
2598
 
1993
2599
  async handleSubscriptionChange(
@@ -2005,22 +2611,7 @@ export class SharedLog<
2005
2611
  for (const [_a, b] of this._gidPeersHistory) {
2006
2612
  b.delete(publicKey.hashcode());
2007
2613
  }
2008
- this.syncInFlight.delete(publicKey.hashcode());
2009
- const waitingHashes = this.syncInFlightQueueInverted.get(
2010
- publicKey.hashcode(),
2011
- );
2012
- if (waitingHashes) {
2013
- for (const hash of waitingHashes) {
2014
- let arr = this.syncInFlightQueue.get(hash);
2015
- if (arr) {
2016
- arr = arr.filter((x) => !x.equals(publicKey));
2017
- }
2018
- if (this.syncInFlightQueue.size === 0) {
2019
- this.syncInFlightQueue.delete(hash);
2020
- }
2021
- }
2022
- }
2023
- this.syncInFlightQueueInverted.delete(publicKey.hashcode());
2614
+ this.clearSyncProcessPublicKey(publicKey);
2024
2615
  }
2025
2616
 
2026
2617
  if (subscribed) {
@@ -2052,17 +2643,20 @@ export class SharedLog<
2052
2643
  }
2053
2644
 
2054
2645
  prune(
2055
- entries: (Entry<any> | ShallowEntry)[],
2646
+ entries:
2647
+ | (EntryReplicated | ShallowOrFullEntry<any>)[]
2648
+ | Map<string, EntryReplicated | ShallowOrFullEntry<any>>,
2056
2649
  options?: { timeout?: number; unchecked?: boolean },
2057
2650
  ): Promise<any>[] {
2058
2651
  if (options?.unchecked) {
2059
- return entries.map((x) => {
2652
+ return [...entries.values()].map((x) => {
2060
2653
  this._gidPeersHistory.delete(x.meta.gid);
2061
2654
  return this.log.remove(x, {
2062
2655
  recursively: true,
2063
2656
  });
2064
2657
  });
2065
2658
  }
2659
+
2066
2660
  // ask network if they have they entry,
2067
2661
  // so I can delete it
2068
2662
 
@@ -2073,8 +2667,10 @@ export class SharedLog<
2073
2667
  // - Peers join and leave, which means we might not be a replicator anymore
2074
2668
 
2075
2669
  const promises: Promise<any>[] = [];
2076
- const filteredEntries: (Entry<any> | ShallowEntry)[] = [];
2077
- for (const entry of entries) {
2670
+ const filteredEntries: (EntryReplicated | ShallowOrFullEntry<any>)[] = [];
2671
+ const deleted = new Set();
2672
+
2673
+ for (const entry of entries.values()) {
2078
2674
  const pendingPrev = this._pendingDeletes.get(entry.hash);
2079
2675
  if (pendingPrev) {
2080
2676
  promises.push(pendingPrev.promise.promise);
@@ -2104,14 +2700,13 @@ export class SharedLog<
2104
2700
  deferredPromise.reject(e);
2105
2701
  };
2106
2702
 
2107
- const timeout = setTimeout(
2108
- () => {
2109
- reject(
2110
- new Error("Timeout for checked pruning: Closed: " + this.closed),
2111
- );
2112
- },
2113
- options?.timeout ?? 10 * 1000,
2114
- );
2703
+ let cursor: number[] | undefined = undefined;
2704
+
2705
+ const timeout = setTimeout(async () => {
2706
+ reject(
2707
+ new Error("Timeout for checked pruning: Closed: " + this.closed),
2708
+ );
2709
+ }, options?.timeout ?? 1e4);
2115
2710
 
2116
2711
  this._pendingDeletes.set(entry.hash, {
2117
2712
  promise: deferredPromise,
@@ -2125,16 +2720,20 @@ export class SharedLog<
2125
2720
  ? Math.min(minReplicasValue, this.replicas.max.getValue(this))
2126
2721
  : minReplicasValue;
2127
2722
 
2128
- const leaders = await this.findLeaders(entry, minMinReplicasValue);
2129
-
2130
- if (
2131
- leaders.find((x) => x === this.node.identity.publicKey.hashcode())
2132
- ) {
2133
- reject(new Error("Failed to delete, is leader"));
2134
- return;
2135
- }
2723
+ const leaders = await this.waitForIsLeader(
2724
+ cursor ??
2725
+ (cursor = await this.createCoordinates(
2726
+ entry,
2727
+ minMinReplicasValue,
2728
+ )),
2729
+ publicKeyHash,
2730
+ );
2731
+ if (leaders) {
2732
+ if (leaders.has(this.node.identity.publicKey.hashcode())) {
2733
+ reject(new Error("Failed to delete, is leader"));
2734
+ return;
2735
+ }
2136
2736
 
2137
- if (leaders.find((x) => x === publicKeyHash)) {
2138
2737
  existCounter.add(publicKeyHash);
2139
2738
  if (minMinReplicasValue <= existCounter.size) {
2140
2739
  clear();
@@ -2144,7 +2743,8 @@ export class SharedLog<
2144
2743
  recursively: true,
2145
2744
  })
2146
2745
  .then(() => {
2147
- resolve();
2746
+ deleted.add(entry.hash);
2747
+ return resolve();
2148
2748
  })
2149
2749
  .catch((e: any) => {
2150
2750
  reject(new Error("Failed to delete entry: " + e.toString()));
@@ -2161,34 +2761,67 @@ export class SharedLog<
2161
2761
  return promises;
2162
2762
  }
2163
2763
 
2164
- this.rpc.send(
2165
- new RequestIPrune({ hashes: filteredEntries.map((x) => x.hash) }),
2166
- );
2764
+ const emitMessages = (entries: string[], to: string) => {
2765
+ this.rpc.send(
2766
+ new RequestIPrune({
2767
+ hashes: entries,
2768
+ }),
2769
+ {
2770
+ mode: new SilentDelivery({
2771
+ to: [to], // TODO group by peers?
2772
+ redundancy: 1,
2773
+ }),
2774
+ priority: 1,
2775
+ },
2776
+ );
2777
+ };
2167
2778
 
2168
- const onNewPeer = async (e: CustomEvent<ReplicatorJoinEvent>) => {
2169
- if (e.detail.publicKey.equals(this.node.identity.publicKey) === false) {
2170
- await this.rpc.send(
2171
- new RequestIPrune({ hashes: filteredEntries.map((x) => x.hash) }),
2172
- {
2173
- mode: new SilentDelivery({
2174
- to: [e.detail.publicKey.hashcode()],
2175
- redundancy: 1,
2176
- }),
2177
- },
2779
+ const maxReplicasValue = maxReplicas(this, filteredEntries);
2780
+ this.groupByLeaders(
2781
+ filteredEntries.map((x) => {
2782
+ return { entry: x, replicas: maxReplicasValue }; // TODO choose right maxReplicasValue, should it really be for all entries combined?
2783
+ }),
2784
+ ).then((map) => {
2785
+ for (const [peer, idx] of map) {
2786
+ emitMessages(
2787
+ idx.map((i) => filteredEntries[i].hash),
2788
+ peer,
2178
2789
  );
2179
2790
  }
2791
+ });
2792
+
2793
+ const onPeersChange = async (e: CustomEvent<ReplicatorJoinEvent>) => {
2794
+ if (e.detail.publicKey.equals(this.node.identity.publicKey) === false) {
2795
+ const peerEntries = (
2796
+ await this.groupByLeaders(
2797
+ filteredEntries
2798
+ .filter((x) => !deleted.has(x.hash))
2799
+ .map((x) => {
2800
+ return { entry: x, replicas: maxReplicasValue }; // TODO choose right maxReplicasValue, should it really be for all entries combined?
2801
+ }),
2802
+ )
2803
+ ).get(e.detail.publicKey.hashcode());
2804
+ if (peerEntries && peerEntries.length > 0) {
2805
+ emitMessages(
2806
+ peerEntries.map((x) => filteredEntries[x].hash),
2807
+ e.detail.publicKey.hashcode(),
2808
+ );
2809
+ }
2810
+ }
2180
2811
  };
2181
2812
 
2182
2813
  // check joining peers
2183
- this.events.addEventListener("replicator:join", onNewPeer);
2184
- Promise.allSettled(promises).finally(() =>
2185
- this.events.removeEventListener("replicator:join", onNewPeer),
2186
- );
2814
+ this.events.addEventListener("replicator:mature", onPeersChange);
2815
+ this.events.addEventListener("replicator:join", onPeersChange);
2816
+ Promise.allSettled(promises).finally(() => {
2817
+ this.events.removeEventListener("replicator:mature", onPeersChange);
2818
+ this.events.removeEventListener("replicator:join", onPeersChange);
2819
+ });
2187
2820
 
2188
2821
  return promises;
2189
2822
  }
2190
2823
 
2191
- async distribute() {
2824
+ /* async distribute() {
2192
2825
  // if there is one or more items waiting for run, don't bother adding a new item just wait for the queue to empty
2193
2826
  if (this.distributeQueue && this.distributeQueue?.size > 0) {
2194
2827
  return this.distributeQueue.onEmpty();
@@ -2205,10 +2838,59 @@ export class SharedLog<
2205
2838
  signal: this._closeController.signal,
2206
2839
  }).then(() => this._distribute()),
2207
2840
  )
2208
- .catch(() => {}); // catch ignore delay abort errror
2841
+ .catch(() => { }); // catch ignore delay abort errror
2842
+ } */
2843
+
2844
+ /**
2845
+ * For debugging
2846
+ */
2847
+ async getPrunable() {
2848
+ const heads = await this.log.getHeads(true).all();
2849
+ let prunable: Entry<any>[] = [];
2850
+ for (const head of heads) {
2851
+ const isLeader = await this.isLeader({
2852
+ entry: head,
2853
+ replicas: maxReplicas(this, [head]),
2854
+ });
2855
+
2856
+ if (!isLeader) {
2857
+ prunable.push(head);
2858
+ }
2859
+ }
2860
+ return prunable;
2209
2861
  }
2210
2862
 
2211
- async _distribute() {
2863
+ async getNonPrunable() {
2864
+ const heads = await this.log.getHeads(true).all();
2865
+ let nonPrunable: Entry<any>[] = [];
2866
+ for (const head of heads) {
2867
+ const isLeader = await this.isLeader({
2868
+ entry: head,
2869
+ replicas: maxReplicas(this, [head]),
2870
+ });
2871
+
2872
+ if (isLeader) {
2873
+ nonPrunable.push(head);
2874
+ }
2875
+ }
2876
+ return nonPrunable;
2877
+ }
2878
+
2879
+ async rebalanceAll() {
2880
+ this.onReplicationChange(
2881
+ (await this.getMyReplicationSegments()).map((x) => {
2882
+ return { range: x, type: "added" };
2883
+ }),
2884
+ );
2885
+ }
2886
+
2887
+ async waitForPruned() {
2888
+ await waitFor(() => this._pendingDeletes.size === 0);
2889
+ }
2890
+
2891
+ async onReplicationChange(
2892
+ changeOrChanges: ReplicationChanges | ReplicationChanges[],
2893
+ ) {
2212
2894
  /**
2213
2895
  * TODO use information of new joined/leaving peer to create a subset of heads
2214
2896
  * that we potentially need to share with other peers
@@ -2218,95 +2900,112 @@ export class SharedLog<
2218
2900
  return;
2219
2901
  }
2220
2902
 
2903
+ const change = mergeReplicationChanges(changeOrChanges);
2221
2904
  const changed = false;
2222
- await this.log.trim();
2223
- const heads = await this.log.getHeads().all();
2224
2905
 
2225
- const groupedByGid = await groupByGid(heads);
2226
- const uncheckedDeliver: Map<string, (Entry<any> | ShallowEntry)[]> =
2227
- new Map();
2228
- const allEntriesToDelete: (Entry<any> | ShallowEntry)[] = [];
2906
+ try {
2907
+ await this.log.trim();
2229
2908
 
2230
- for (const [gid, entries] of groupedByGid) {
2231
- if (this.closed) {
2232
- break;
2233
- }
2909
+ const uncheckedDeliver: Map<string, EntryReplicated[]> = new Map();
2234
2910
 
2235
- if (entries.length === 0) {
2236
- continue; // TODO maybe close store?
2237
- }
2911
+ const allEntriesToDelete: EntryReplicated[] = [];
2238
2912
 
2239
- const oldPeersSet = this._gidPeersHistory.get(gid);
2240
- const currentPeers = await this.findLeaders(
2241
- getLatestEntry(entries)!,
2242
- maxReplicas(this, entries), // pick max replication policy of all entries, so all information is treated equally important as the most important
2243
- );
2913
+ for await (const { gid, entries: coordinates } of toRebalance(
2914
+ change,
2915
+ this.entryCoordinatesIndex,
2916
+ )) {
2917
+ if (this.closed) {
2918
+ break;
2919
+ }
2920
+ const oldPeersSet = this._gidPeersHistory.get(gid);
2244
2921
 
2245
- const isLeader = currentPeers.find(
2246
- (x) => x === this.node.identity.publicKey.hashcode(),
2247
- );
2248
- const currentPeersSet = new Set(currentPeers);
2249
- this._gidPeersHistory.set(gid, currentPeersSet);
2922
+ if (this.closed) {
2923
+ return;
2924
+ }
2250
2925
 
2251
- for (const currentPeer of currentPeers) {
2252
- if (currentPeer === this.node.identity.publicKey.hashcode()) {
2253
- continue;
2926
+ let { isLeader, leaders: currentPeers } = await this.findLeadersPersist(
2927
+ coordinates.map((x) => x.coordinate),
2928
+ coordinates[0],
2929
+ {
2930
+ roleAge: 0,
2931
+ persist: {
2932
+ prev: coordinates,
2933
+ },
2934
+ },
2935
+ );
2936
+
2937
+ if (isLeader) {
2938
+ for (const entry of coordinates) {
2939
+ this.pruneDebouncedFn.delete(entry.hash);
2940
+ }
2254
2941
  }
2255
2942
 
2256
- if (!oldPeersSet?.has(currentPeer)) {
2257
- let arr = uncheckedDeliver.get(currentPeer);
2258
- if (!arr) {
2259
- arr = [];
2260
- uncheckedDeliver.set(currentPeer, arr);
2943
+ const currentPeersSet = new Set<string>(currentPeers.keys());
2944
+ this._gidPeersHistory.set(gid, currentPeersSet);
2945
+
2946
+ for (const [currentPeer] of currentPeers) {
2947
+ if (currentPeer === this.node.identity.publicKey.hashcode()) {
2948
+ continue;
2261
2949
  }
2262
2950
 
2263
- for (const entry of entries) {
2264
- arr.push(entry);
2951
+ if (!oldPeersSet?.has(currentPeer)) {
2952
+ let arr = uncheckedDeliver.get(currentPeer);
2953
+ if (!arr) {
2954
+ arr = [];
2955
+ uncheckedDeliver.set(currentPeer, arr);
2956
+ }
2957
+
2958
+ for (const entry of coordinates) {
2959
+ arr.push(entry);
2960
+ }
2265
2961
  }
2266
2962
  }
2267
- }
2268
2963
 
2269
- if (!isLeader) {
2270
- if (currentPeers.length > 0) {
2271
- // If we are observer, never prune locally created entries, since we dont really know who can store them
2272
- // if we are replicator, we will always persist entries that we need to so filtering on createdLocally will not make a difference
2273
- let entriesToDelete = entries;
2274
-
2275
- if (this.sync) {
2276
- entriesToDelete = entriesToDelete.filter(
2277
- (entry) => this.sync!(entry) === false,
2278
- );
2964
+ if (!isLeader) {
2965
+ if (currentPeers.size > 0) {
2966
+ // If we are observer, never prune locally created entries, since we dont really know who can store them
2967
+ // if we are replicator, we will always persist entries that we need to so filtering on createdLocally will not make a difference
2968
+ let entriesToDelete = coordinates;
2969
+
2970
+ if (this.sync) {
2971
+ entriesToDelete = entriesToDelete.filter(
2972
+ (entry) => this.sync!(entry) === false,
2973
+ );
2974
+ }
2975
+ allEntriesToDelete.push(...entriesToDelete);
2976
+ }
2977
+ } else {
2978
+ for (const entry of coordinates) {
2979
+ await this._pendingDeletes
2980
+ .get(entry.hash)
2981
+ ?.reject(
2982
+ new Error(
2983
+ "Failed to delete, is leader again. Closed: " + this.closed,
2984
+ ),
2985
+ );
2279
2986
  }
2280
- allEntriesToDelete.push(...entriesToDelete);
2281
- }
2282
- } else {
2283
- for (const entry of entries) {
2284
- await this._pendingDeletes
2285
- .get(entry.hash)
2286
- ?.reject(
2287
- new Error(
2288
- "Failed to delete, is leader again. Closed: " + this.closed,
2289
- ),
2290
- );
2291
2987
  }
2292
2988
  }
2293
- }
2294
2989
 
2295
- for (const [target, entries] of uncheckedDeliver) {
2296
- this.rpc.send(
2297
- new RequestMaybeSync({ hashes: entries.map((x) => x.hash) }),
2298
- {
2299
- mode: new SilentDelivery({ to: [target], redundancy: 1 }),
2300
- },
2301
- );
2302
- }
2990
+ for (const [target, entries] of uncheckedDeliver) {
2991
+ this.rpc.send(
2992
+ new RequestMaybeSync({ hashes: entries.map((x) => x.hash) }),
2993
+ {
2994
+ mode: new SilentDelivery({ to: [target], redundancy: 1 }),
2995
+ },
2996
+ );
2997
+ }
2303
2998
 
2304
- if (allEntriesToDelete.length > 0) {
2305
- Promise.allSettled(this.prune(allEntriesToDelete)).catch((e) => {
2306
- logger.info(e.toString());
2307
- });
2999
+ if (allEntriesToDelete.length > 0) {
3000
+ allEntriesToDelete.map((x) =>
3001
+ this.pruneDebouncedFn.add({ key: x.hash, value: x }),
3002
+ );
3003
+ }
3004
+ return changed;
3005
+ } catch (error: any) {
3006
+ logger.error(error.toString());
3007
+ throw error;
2308
3008
  }
2309
- return changed;
2310
3009
  }
2311
3010
 
2312
3011
  private async requestSync(hashes: string[], to: Set<string> | string[]) {
@@ -2336,7 +3035,7 @@ export class SharedLog<
2336
3035
  logger.debug(
2337
3036
  `Peer disconnected '${evt.detail.from.hashcode()}' from '${JSON.stringify(
2338
3037
  evt.detail.unsubscriptions.map((x) => x),
2339
- )}'`,
3038
+ )} '`,
2340
3039
  );
2341
3040
  this.latestReplicationInfoMessage.delete(evt.detail.from.hashcode());
2342
3041
 
@@ -2400,97 +3099,97 @@ export class SharedLog<
2400
3099
  );
2401
3100
  }
2402
3101
 
2403
- async rebalanceParticipation(onRoleChange = true) {
3102
+ async rebalanceParticipation() {
2404
3103
  // update more participation rate to converge to the average expected rate or bounded by
2405
3104
  // resources such as memory and or cpu
2406
3105
 
2407
- if (this.closed) {
2408
- return false;
2409
- }
2410
-
2411
- // The role is fixed (no changes depending on memory usage or peer count etc)
2412
- if (!this._isReplicating) {
2413
- return false;
2414
- }
2415
-
2416
- if (this._isAdaptiveReplicating) {
2417
- const peers = this.replicationIndex;
2418
- const usedMemory = await this.getMemoryUsage();
2419
- let dynamicRange = await this.getDynamicRange();
3106
+ const fn = async () => {
3107
+ if (this.closed) {
3108
+ return false;
3109
+ }
2420
3110
 
2421
- if (!dynamicRange) {
2422
- return; // not allowed to replicate
3111
+ // The role is fixed (no changes depending on memory usage or peer count etc)
3112
+ if (!this._isReplicating) {
3113
+ return false;
2423
3114
  }
2424
3115
 
2425
- const peersSize = (await peers.getSize()) || 1;
2426
- const totalParticipation = await this.calculateTotalParticipation();
3116
+ if (this._isAdaptiveReplicating) {
3117
+ const peers = this.replicationIndex;
3118
+ const usedMemory = await this.getMemoryUsage();
3119
+ let dynamicRange = await this.getDynamicRange();
2427
3120
 
2428
- const newFactor = this.replicationController.step({
2429
- memoryUsage: usedMemory,
2430
- currentFactor: dynamicRange.widthNormalized,
2431
- totalFactor: totalParticipation, // TODO use this._totalParticipation when flakiness is fixed
2432
- peerCount: peersSize,
2433
- cpuUsage: this.cpuUsage?.value(),
2434
- });
3121
+ if (!dynamicRange) {
3122
+ return; // not allowed to replicate
3123
+ }
3124
+
3125
+ const peersSize = (await peers.getSize()) || 1;
3126
+ const totalParticipation = await this.calculateTotalParticipation();
2435
3127
 
2436
- const relativeDifference =
2437
- Math.abs(dynamicRange.widthNormalized - newFactor) /
2438
- dynamicRange.widthNormalized;
2439
-
2440
- if (relativeDifference > 0.0001) {
2441
- // TODO can not reuse old range, since it will (potentially) affect the index because of sideeffects
2442
- dynamicRange = new ReplicationRangeIndexable({
2443
- offset: hashToU32(this.node.identity.publicKey.bytes),
2444
- length: scaleToU32(newFactor),
2445
- publicKeyHash: dynamicRange.hash,
2446
- id: dynamicRange.id,
2447
- mode: dynamicRange.mode,
2448
- timestamp: dynamicRange.timestamp,
3128
+ const newFactor = this.replicationController.step({
3129
+ memoryUsage: usedMemory,
3130
+ currentFactor: dynamicRange.widthNormalized,
3131
+ totalFactor: totalParticipation, // TODO use this._totalParticipation when flakiness is fixed
3132
+ peerCount: peersSize,
3133
+ cpuUsage: this.cpuUsage?.value(),
2449
3134
  });
2450
3135
 
2451
- const canReplicate =
2452
- !this._isTrustedReplicator ||
2453
- (await this._isTrustedReplicator(this.node.identity.publicKey));
2454
- if (!canReplicate) {
2455
- return false;
2456
- }
3136
+ const relativeDifference =
3137
+ Math.abs(dynamicRange.widthNormalized - newFactor) /
3138
+ dynamicRange.widthNormalized;
3139
+
3140
+ if (relativeDifference > 0.0001) {
3141
+ // TODO can not reuse old range, since it will (potentially) affect the index because of sideeffects
3142
+ dynamicRange = new ReplicationRangeIndexable({
3143
+ offset: hashToU32(this.node.identity.publicKey.bytes),
3144
+ length: scaleToU32(newFactor),
3145
+ publicKeyHash: dynamicRange.hash,
3146
+ id: dynamicRange.id,
3147
+ mode: dynamicRange.mode,
3148
+ timestamp: dynamicRange.timestamp,
3149
+ });
2457
3150
 
2458
- await this.startAnnounceReplicating([dynamicRange], {
2459
- checkDuplicates: false,
2460
- reset: false,
2461
- });
3151
+ const canReplicate =
3152
+ !this._isTrustedReplicator ||
3153
+ (await this._isTrustedReplicator(this.node.identity.publicKey));
3154
+ if (!canReplicate) {
3155
+ return false;
3156
+ }
2462
3157
 
2463
- /* await this._updateRole(newRole, onRoleChange); */
2464
- this.rebalanceParticipationDebounced?.();
3158
+ await this.startAnnounceReplicating([dynamicRange], {
3159
+ checkDuplicates: false,
3160
+ reset: false,
3161
+ });
2465
3162
 
2466
- return true;
2467
- } else {
2468
- this.rebalanceParticipationDebounced?.();
3163
+ /* await this._updateRole(newRole, onRoleChange); */
3164
+ this.rebalanceParticipationDebounced?.();
3165
+
3166
+ return true;
3167
+ } else {
3168
+ this.rebalanceParticipationDebounced?.();
3169
+ }
3170
+ return false;
2469
3171
  }
2470
3172
  return false;
2471
- }
2472
- return false;
3173
+ };
3174
+
3175
+ const resp = await fn();
3176
+
3177
+ return resp;
2473
3178
  }
2474
3179
  async getDynamicRange() {
2475
- let dynamicRangeId = sha256Sync(
2476
- concat([
2477
- this.node.identity.publicKey.bytes,
2478
- new TextEncoder().encode("dynamic"),
2479
- ]),
2480
- );
3180
+ let dynamicRangeId = getIdForDynamicRange(this.node.identity.publicKey);
2481
3181
  let range = (
2482
- await this.replicationIndex.query(
2483
- new SearchRequest({
3182
+ await this.replicationIndex
3183
+ .iterate({
2484
3184
  query: [
2485
3185
  new ByteMatchQuery({
2486
3186
  key: "id",
2487
3187
  value: dynamicRangeId,
2488
3188
  }),
2489
3189
  ],
2490
- fetch: 1,
2491
- }),
2492
- )
2493
- )?.results[0]?.value;
3190
+ })
3191
+ .all()
3192
+ )?.[0]?.value;
2494
3193
  if (!range) {
2495
3194
  range = new ReplicationRangeIndexable({
2496
3195
  normalized: true,
@@ -2530,12 +3229,33 @@ export class SharedLog<
2530
3229
  this.syncInFlightQueue.delete(hash);
2531
3230
  }
2532
3231
  }
2533
- private onEntryAdded(entry: Entry<any>) {
3232
+
3233
+ private clearSyncProcessPublicKey(publicKey: PublicSignKey) {
3234
+ this.syncInFlight.delete(publicKey.hashcode());
3235
+ const map = this.syncInFlightQueueInverted.get(publicKey.hashcode());
3236
+ if (map) {
3237
+ for (const hash of map) {
3238
+ const arr = this.syncInFlightQueue.get(hash);
3239
+ if (arr) {
3240
+ const filtered = arr.filter((x) => !x.equals(publicKey));
3241
+ if (filtered.length > 0) {
3242
+ this.syncInFlightQueue.set(hash, filtered);
3243
+ } else {
3244
+ this.syncInFlightQueue.delete(hash);
3245
+ }
3246
+ }
3247
+ }
3248
+ this.syncInFlightQueueInverted.delete(publicKey.hashcode());
3249
+ }
3250
+ }
3251
+
3252
+ private async onEntryAdded(entry: Entry<any>) {
2534
3253
  const ih = this._pendingIHave.get(entry.hash);
2535
3254
  if (ih) {
2536
3255
  ih.clear();
2537
3256
  ih.callback(entry);
2538
3257
  }
3258
+
2539
3259
  this.clearSyncProcess(entry.hash);
2540
3260
  }
2541
3261