@peerbit/shared-log 13.1.0 → 13.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/pid.ts CHANGED
@@ -49,9 +49,14 @@ export class PIDReplicationController {
49
49
  let errorMemory = 0;
50
50
 
51
51
  if (this.maxMemoryLimit != null) {
52
+ // Treat the configured storage limit as a ceiling, not the exact control
53
+ // target. A small reserve prevents discrete entry sizes and delayed checked
54
+ // prunes from repeatedly settling just above the hard budget.
55
+ const effectiveMemoryLimit =
56
+ this.maxMemoryLimit > 0 ? this.maxMemoryLimit * 0.95 : 0;
52
57
  errorMemory =
53
58
  currentFactor > 0 && memoryUsage > 0
54
- ? Math.max(Math.min(1, this.maxMemoryLimit / estimatedTotalSize), 0) -
59
+ ? Math.max(Math.min(1, effectiveMemoryLimit / estimatedTotalSize), 0) -
55
60
  currentFactor
56
61
  : 0;
57
62
  // Math.max(Math.min((this.maxMemoryLimit - memoryUsage) / 100e5, 1), -1)// Math.min(Math.max((this.maxMemoryLimit - memoryUsage, 0) / 10e5, 0), 1);
@@ -99,7 +104,7 @@ export class PIDReplicationController {
99
104
  // TODO make these self-optimizing
100
105
 
101
106
  let totalError: number;
102
- const errorMemoryFactor = 0.9;
107
+ let errorMemoryFactor = 0.9;
103
108
  const errorBalanceFactor = 0.6;
104
109
 
105
110
  totalError =
@@ -108,6 +113,21 @@ export class PIDReplicationController {
108
113
 
109
114
  // Computer is getting too full?
110
115
  if (errorMemory < 0) {
116
+ if (
117
+ this.maxMemoryLimit != null &&
118
+ this.maxMemoryLimit > 0 &&
119
+ coverageDeficit > 0
120
+ ) {
121
+ // When the ring is materially under-covered, shrinking a memory-limited
122
+ // range can increase gap-boundary assignments and make local memory usage
123
+ // worse, not better. Let the coverage term dominate until the floor is
124
+ // restored, while preserving the hard shrink behavior for zero-capacity peers.
125
+ errorMemoryFactor = Math.max(
126
+ 0.2,
127
+ errorMemoryFactor -
128
+ 0.7 * Math.min(1, coverageDeficit / 0.25),
129
+ );
130
+ }
111
131
  totalError =
112
132
  errorMemory * errorMemoryFactor + totalError * (1 - errorMemoryFactor);
113
133
  }
package/src/ranges.ts CHANGED
@@ -2568,7 +2568,15 @@ export const debounceAggregationChanges = <
2568
2568
  // Keep different change types for the same segment id. In particular, range
2569
2569
  // updates produce a `replaced` + `added` pair; collapsing by id would drop the
2570
2570
  // "removed" portion and prevent correct rebalancing/pruning.
2571
- const key = `${change.type}:${change.range.idString}`;
2571
+ //
2572
+ // Preserve each distinct replaced range as well. Adaptive replication can
2573
+ // shrink a segment several times before the debounced rebalance runs; if we
2574
+ // keep only the newest `replaced` range, entries from earlier wider ranges are
2575
+ // never revisited and can stay resident after they become prunable.
2576
+ const key =
2577
+ change.type === "replaced"
2578
+ ? `${change.type}:${change.range.idString}:${change.range.rangeHash}`
2579
+ : `${change.type}:${change.range.idString}`;
2572
2580
  const prev = aggregated.get(key);
2573
2581
  if (prev) {
2574
2582
  if (prev.range.timestamp < change.range.timestamp) {
@@ -58,6 +58,17 @@ export class RequestMaybeSyncCoordinate extends TransportMessage {
58
58
  }
59
59
  }
60
60
 
61
+ @variant([0, 6])
62
+ export class ConfirmEntriesMessage extends TransportMessage {
63
+ @field({ type: vec("string") })
64
+ hashes: string[];
65
+
66
+ constructor(props: { hashes: string[] }) {
67
+ super();
68
+ this.hashes = props.hashes;
69
+ }
70
+ }
71
+
61
72
  const getHashesFromSymbols = async (
62
73
  symbols: bigint[],
63
74
  entryIndex: Index<EntryReplicated<any>, any>,
@@ -109,6 +120,10 @@ const SESSION_POLL_INTERVAL_MS = 100;
109
120
  const DEFAULT_MAX_HASHES_PER_MESSAGE = 1_024;
110
121
  const DEFAULT_MAX_COORDINATES_PER_MESSAGE = 1_024;
111
122
  const DEFAULT_MAX_CONVERGENT_TRACKED_HASHES = 4_096;
123
+ // Retry missing entry requests when the first response was lost (for example, due to
124
+ // pubsub stream warmup). Keep it coarse-grained so we do not hammer the network under
125
+ // large historical backfills.
126
+ const SIMPLE_SYNC_RETRY_AFTER_MS = 10_000;
112
127
 
113
128
  const createDeferred = <T>() => {
114
129
  let resolve!: (value: T | PromiseLike<T>) => void;
@@ -612,7 +627,14 @@ export class SimpleSyncronizer<R extends "u32" | "u64">
612
627
  options?.skipCheck ||
613
628
  !(await this.checkHasCoordinateOrHash(coordinateOrHash))
614
629
  ) {
615
- this.syncInFlightQueue.set(coordinateOrHash, []);
630
+ // Track the initial sender so we can retry if the first request is lost.
631
+ this.syncInFlightQueue.set(coordinateOrHash, [from]);
632
+ let inverted = this.syncInFlightQueueInverted.get(from.hashcode());
633
+ if (!inverted) {
634
+ inverted = new Set();
635
+ this.syncInFlightQueueInverted.set(from.hashcode(), inverted);
636
+ }
637
+ inverted.add(coordinateOrHash);
616
638
  requestHashes.push(coordinateOrHash); // request immediately (first time we have seen this hash)
617
639
  }
618
640
  }
@@ -688,6 +710,7 @@ export class SimpleSyncronizer<R extends "u32" | "u64">
688
710
 
689
711
  const requestHashes: SyncableKey[] = [];
690
712
  const from: Set<string> = new Set();
713
+ const now = Date.now();
691
714
  for (const [key, value] of this.syncInFlightQueue) {
692
715
  if (this.closed) {
693
716
  return;
@@ -696,26 +719,32 @@ export class SimpleSyncronizer<R extends "u32" | "u64">
696
719
  const has = await this.checkHasCoordinateOrHash(key);
697
720
 
698
721
  if (!has) {
699
- // TODO test that this if statement actually does anymeaningfull
700
- if (value.length > 0) {
722
+ if (value.length === 0) {
723
+ // No remaining peers to ask; drop the pending key to avoid leaking.
724
+ this.clearSyncProcessKey(key);
725
+ continue;
726
+ }
727
+
728
+ // Ask one peer per key per loop. If a previous request is still considered
729
+ // "recent", wait until the retry window elapses.
730
+ const candidate = value[0]!;
731
+ const publicKeyHash = candidate.hashcode();
732
+ const inflightTimestamp = this.syncInFlight
733
+ .get(publicKeyHash)
734
+ ?.get(key)?.timestamp;
735
+ if (
736
+ inflightTimestamp == null ||
737
+ now - inflightTimestamp >= SIMPLE_SYNC_RETRY_AFTER_MS
738
+ ) {
701
739
  requestHashes.push(key);
702
- const publicKeyHash = value.shift()!.hashcode();
703
740
  from.add(publicKeyHash);
704
- const invertedSet =
705
- this.syncInFlightQueueInverted.get(publicKeyHash);
706
- if (invertedSet) {
707
- if (invertedSet.delete(key)) {
708
- if (invertedSet.size === 0) {
709
- this.syncInFlightQueueInverted.delete(publicKeyHash);
710
- }
711
- }
741
+ if (value.length > 1) {
742
+ // Rotate for fairness across multiple possible sources.
743
+ value.push(value.shift()!);
712
744
  }
713
745
  }
714
- if (value.length === 0) {
715
- this.syncInFlightQueue.delete(key); // no-one more to ask for this entry
716
- }
717
746
  } else {
718
- this.syncInFlightQueue.delete(key);
747
+ this.clearSyncProcessKey(key);
719
748
  }
720
749
  }
721
750
 
@@ -761,23 +790,27 @@ export class SimpleSyncronizer<R extends "u32" | "u64">
761
790
  return this.clearSyncProcess(hash);
762
791
  }
763
792
 
764
- private clearSyncProcess(hash: string) {
765
- const inflight = this.syncInFlightQueue.get(hash);
793
+ private clearSyncProcessKey(key: SyncableKey) {
794
+ const inflight = this.syncInFlightQueue.get(key);
766
795
  if (inflight) {
767
- for (const key of inflight) {
768
- const map = this.syncInFlightQueueInverted.get(key.hashcode());
796
+ for (const peer of inflight) {
797
+ const map = this.syncInFlightQueueInverted.get(peer.hashcode());
769
798
  if (map) {
770
- map.delete(hash);
799
+ map.delete(key);
771
800
  if (map.size === 0) {
772
- this.syncInFlightQueueInverted.delete(key.hashcode());
801
+ this.syncInFlightQueueInverted.delete(peer.hashcode());
773
802
  }
774
803
  }
775
804
  }
776
805
 
777
- this.syncInFlightQueue.delete(hash);
806
+ this.syncInFlightQueue.delete(key);
778
807
  }
779
808
  }
780
809
 
810
+ private clearSyncProcess(hash: string) {
811
+ this.clearSyncProcessKey(hash);
812
+ }
813
+
781
814
  onPeerDisconnected(key: PublicSignKey | string): Promise<void> | void {
782
815
  const publicKeyHash = typeof key === "string" ? key : key.hashcode();
783
816
  return this.clearSyncProcessPublicKeyHash(publicKeyHash);