@peerbit/shared-log 13.1.0 → 13.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/pid.ts CHANGED
@@ -1,3 +1,5 @@
1
+ const MIN_MEMORY_HEADROOM_BALANCE_SCALER = 0.25;
2
+
1
3
  export class PIDReplicationController {
2
4
  integral!: number;
3
5
  prevError!: number;
@@ -49,16 +51,23 @@ export class PIDReplicationController {
49
51
  let errorMemory = 0;
50
52
 
51
53
  if (this.maxMemoryLimit != null) {
54
+ // Treat the configured storage limit as a ceiling, not the exact control
55
+ // target. A small reserve prevents discrete entry sizes and delayed checked
56
+ // prunes from repeatedly settling just above the hard budget.
57
+ const effectiveMemoryLimit =
58
+ this.maxMemoryLimit > 0 ? this.maxMemoryLimit * 0.95 : 0;
52
59
  errorMemory =
53
60
  currentFactor > 0 && memoryUsage > 0
54
- ? Math.max(Math.min(1, this.maxMemoryLimit / estimatedTotalSize), 0) -
55
- currentFactor
61
+ ? Math.max(
62
+ Math.min(1, effectiveMemoryLimit / estimatedTotalSize),
63
+ 0,
64
+ ) - currentFactor
56
65
  : 0;
57
66
  // Math.max(Math.min((this.maxMemoryLimit - memoryUsage) / 100e5, 1), -1)// Math.min(Math.max((this.maxMemoryLimit - memoryUsage, 0) / 10e5, 0), 1);
58
67
  }
59
68
 
60
69
  const errorCoverageUnmodified = Math.min(1 - totalFactor, 1);
61
- const errorCoverage =
70
+ let errorCoverage =
62
71
  (this.maxMemoryLimit ? 1 - Math.sqrt(Math.abs(errorMemory)) : 1) *
63
72
  errorCoverageUnmodified;
64
73
 
@@ -71,14 +80,26 @@ export class PIDReplicationController {
71
80
  // is material. This avoids oscillations around `totalFactor ~= 1`.
72
81
  const coverageDeficit = Math.max(0, errorCoverageUnmodified); // ~= max(0, 1 - totalFactor)
73
82
  const negativeBalanceScale =
74
- coverageDeficit <= 0
75
- ? 1
76
- : 1 - Math.min(1, coverageDeficit / 0.1); // full clamp at 10% deficit
83
+ coverageDeficit <= 0 ? 1 : 1 - Math.min(1, coverageDeficit / 0.1); // full clamp at 10% deficit
77
84
  const errorFromEvenForBalance =
78
85
  errorFromEven >= 0 ? errorFromEven : errorFromEven * negativeBalanceScale;
79
86
 
87
+ const hasMemoryHeadroom =
88
+ this.maxMemoryLimit != null && this.maxMemoryLimit > 0 && errorMemory > 0;
89
+ if (hasMemoryHeadroom && errorFromEvenForBalance > 0) {
90
+ // Coverage surplus often means another peer has not pruned yet. Do not let
91
+ // that transient surplus cancel a constrained peer that is still below an
92
+ // even share and has storage headroom to take more work.
93
+ errorCoverage = Math.max(errorCoverage, 0);
94
+ }
95
+
80
96
  const balanceErrorScaler = this.maxMemoryLimit
81
- ? Math.abs(errorMemory)
97
+ ? hasMemoryHeadroom
98
+ ? Math.max(
99
+ Math.abs(errorMemory),
100
+ MIN_MEMORY_HEADROOM_BALANCE_SCALER,
101
+ )
102
+ : Math.abs(errorMemory)
82
103
  : 1 - Math.abs(errorCoverage);
83
104
 
84
105
  // Balance should be symmetric (allow negative error) so a peer can *reduce*
@@ -99,7 +120,7 @@ export class PIDReplicationController {
99
120
  // TODO make these self-optimizing
100
121
 
101
122
  let totalError: number;
102
- const errorMemoryFactor = 0.9;
123
+ let errorMemoryFactor = 0.9;
103
124
  const errorBalanceFactor = 0.6;
104
125
 
105
126
  totalError =
@@ -108,6 +129,20 @@ export class PIDReplicationController {
108
129
 
109
130
  // Computer is getting too full?
110
131
  if (errorMemory < 0) {
132
+ if (
133
+ this.maxMemoryLimit != null &&
134
+ this.maxMemoryLimit > 0 &&
135
+ coverageDeficit > 0
136
+ ) {
137
+ // When the ring is materially under-covered, shrinking a memory-limited
138
+ // range can increase gap-boundary assignments and make local memory usage
139
+ // worse, not better. Let the coverage term dominate until the floor is
140
+ // restored, while preserving the hard shrink behavior for zero-capacity peers.
141
+ errorMemoryFactor = Math.max(
142
+ 0.2,
143
+ errorMemoryFactor - 0.7 * Math.min(1, coverageDeficit / 0.25),
144
+ );
145
+ }
111
146
  totalError =
112
147
  errorMemory * errorMemoryFactor + totalError * (1 - errorMemoryFactor);
113
148
  }
@@ -138,7 +173,19 @@ export class PIDReplicationController {
138
173
 
139
174
  // Calculate the new replication factor
140
175
  const change = pTerm + iTerm + dTerm;
141
- const newFactor = currentFactor + change;
176
+ let newFactor = currentFactor + change;
177
+
178
+ if (this.maxCPUUsage != null && this.maxMemoryLimit == null) {
179
+ // CPU pressure may shed surplus replicas, but it must not create a
180
+ // coverage gap where the network no longer has one full copy.
181
+ const coverageSurplus = Math.max(0, totalFactor - 1);
182
+ if (newFactor < currentFactor) {
183
+ newFactor =
184
+ coverageSurplus <= 0
185
+ ? currentFactor
186
+ : Math.max(newFactor, currentFactor - coverageSurplus);
187
+ }
188
+ }
142
189
 
143
190
  // Update state for the next iteration
144
191
  this.prevError = totalError;
package/src/ranges.ts CHANGED
@@ -2568,7 +2568,15 @@ export const debounceAggregationChanges = <
2568
2568
  // Keep different change types for the same segment id. In particular, range
2569
2569
  // updates produce a `replaced` + `added` pair; collapsing by id would drop the
2570
2570
  // "removed" portion and prevent correct rebalancing/pruning.
2571
- const key = `${change.type}:${change.range.idString}`;
2571
+ //
2572
+ // Preserve each distinct replaced range as well. Adaptive replication can
2573
+ // shrink a segment several times before the debounced rebalance runs; if we
2574
+ // keep only the newest `replaced` range, entries from earlier wider ranges are
2575
+ // never revisited and can stay resident after they become prunable.
2576
+ const key =
2577
+ change.type === "replaced"
2578
+ ? `${change.type}:${change.range.idString}:${change.range.rangeHash}`
2579
+ : `${change.type}:${change.range.idString}`;
2572
2580
  const prev = aggregated.get(key);
2573
2581
  if (prev) {
2574
2582
  if (prev.range.timestamp < change.range.timestamp) {
@@ -58,6 +58,17 @@ export class RequestMaybeSyncCoordinate extends TransportMessage {
58
58
  }
59
59
  }
60
60
 
61
+ @variant([0, 6])
62
+ export class ConfirmEntriesMessage extends TransportMessage {
63
+ @field({ type: vec("string") })
64
+ hashes: string[];
65
+
66
+ constructor(props: { hashes: string[] }) {
67
+ super();
68
+ this.hashes = props.hashes;
69
+ }
70
+ }
71
+
61
72
  const getHashesFromSymbols = async (
62
73
  symbols: bigint[],
63
74
  entryIndex: Index<EntryReplicated<any>, any>,
@@ -109,6 +120,10 @@ const SESSION_POLL_INTERVAL_MS = 100;
109
120
  const DEFAULT_MAX_HASHES_PER_MESSAGE = 1_024;
110
121
  const DEFAULT_MAX_COORDINATES_PER_MESSAGE = 1_024;
111
122
  const DEFAULT_MAX_CONVERGENT_TRACKED_HASHES = 4_096;
123
+ // Retry missing entry requests when the first response was lost (for example, due to
124
+ // pubsub stream warmup). Keep it coarse-grained so we do not hammer the network under
125
+ // large historical backfills.
126
+ const SIMPLE_SYNC_RETRY_AFTER_MS = 10_000;
112
127
 
113
128
  const createDeferred = <T>() => {
114
129
  let resolve!: (value: T | PromiseLike<T>) => void;
@@ -612,7 +627,14 @@ export class SimpleSyncronizer<R extends "u32" | "u64">
612
627
  options?.skipCheck ||
613
628
  !(await this.checkHasCoordinateOrHash(coordinateOrHash))
614
629
  ) {
615
- this.syncInFlightQueue.set(coordinateOrHash, []);
630
+ // Track the initial sender so we can retry if the first request is lost.
631
+ this.syncInFlightQueue.set(coordinateOrHash, [from]);
632
+ let inverted = this.syncInFlightQueueInverted.get(from.hashcode());
633
+ if (!inverted) {
634
+ inverted = new Set();
635
+ this.syncInFlightQueueInverted.set(from.hashcode(), inverted);
636
+ }
637
+ inverted.add(coordinateOrHash);
616
638
  requestHashes.push(coordinateOrHash); // request immediately (first time we have seen this hash)
617
639
  }
618
640
  }
@@ -688,6 +710,7 @@ export class SimpleSyncronizer<R extends "u32" | "u64">
688
710
 
689
711
  const requestHashes: SyncableKey[] = [];
690
712
  const from: Set<string> = new Set();
713
+ const now = Date.now();
691
714
  for (const [key, value] of this.syncInFlightQueue) {
692
715
  if (this.closed) {
693
716
  return;
@@ -696,26 +719,32 @@ export class SimpleSyncronizer<R extends "u32" | "u64">
696
719
  const has = await this.checkHasCoordinateOrHash(key);
697
720
 
698
721
  if (!has) {
699
- // TODO test that this if statement actually does anymeaningfull
700
- if (value.length > 0) {
722
+ if (value.length === 0) {
723
+ // No remaining peers to ask; drop the pending key to avoid leaking.
724
+ this.clearSyncProcessKey(key);
725
+ continue;
726
+ }
727
+
728
+ // Ask one peer per key per loop. If a previous request is still considered
729
+ // "recent", wait until the retry window elapses.
730
+ const candidate = value[0]!;
731
+ const publicKeyHash = candidate.hashcode();
732
+ const inflightTimestamp = this.syncInFlight
733
+ .get(publicKeyHash)
734
+ ?.get(key)?.timestamp;
735
+ if (
736
+ inflightTimestamp == null ||
737
+ now - inflightTimestamp >= SIMPLE_SYNC_RETRY_AFTER_MS
738
+ ) {
701
739
  requestHashes.push(key);
702
- const publicKeyHash = value.shift()!.hashcode();
703
740
  from.add(publicKeyHash);
704
- const invertedSet =
705
- this.syncInFlightQueueInverted.get(publicKeyHash);
706
- if (invertedSet) {
707
- if (invertedSet.delete(key)) {
708
- if (invertedSet.size === 0) {
709
- this.syncInFlightQueueInverted.delete(publicKeyHash);
710
- }
711
- }
741
+ if (value.length > 1) {
742
+ // Rotate for fairness across multiple possible sources.
743
+ value.push(value.shift()!);
712
744
  }
713
745
  }
714
- if (value.length === 0) {
715
- this.syncInFlightQueue.delete(key); // no-one more to ask for this entry
716
- }
717
746
  } else {
718
- this.syncInFlightQueue.delete(key);
747
+ this.clearSyncProcessKey(key);
719
748
  }
720
749
  }
721
750
 
@@ -761,23 +790,27 @@ export class SimpleSyncronizer<R extends "u32" | "u64">
761
790
  return this.clearSyncProcess(hash);
762
791
  }
763
792
 
764
- private clearSyncProcess(hash: string) {
765
- const inflight = this.syncInFlightQueue.get(hash);
793
+ private clearSyncProcessKey(key: SyncableKey) {
794
+ const inflight = this.syncInFlightQueue.get(key);
766
795
  if (inflight) {
767
- for (const key of inflight) {
768
- const map = this.syncInFlightQueueInverted.get(key.hashcode());
796
+ for (const peer of inflight) {
797
+ const map = this.syncInFlightQueueInverted.get(peer.hashcode());
769
798
  if (map) {
770
- map.delete(hash);
799
+ map.delete(key);
771
800
  if (map.size === 0) {
772
- this.syncInFlightQueueInverted.delete(key.hashcode());
801
+ this.syncInFlightQueueInverted.delete(peer.hashcode());
773
802
  }
774
803
  }
775
804
  }
776
805
 
777
- this.syncInFlightQueue.delete(hash);
806
+ this.syncInFlightQueue.delete(key);
778
807
  }
779
808
  }
780
809
 
810
+ private clearSyncProcess(hash: string) {
811
+ this.clearSyncProcessKey(hash);
812
+ }
813
+
781
814
  onPeerDisconnected(key: PublicSignKey | string): Promise<void> | void {
782
815
  const publicKeyHash = typeof key === "string" ? key : key.hashcode();
783
816
  return this.clearSyncProcessPublicKeyHash(publicKeyHash);