@peerbit/shared-log 12.3.4 → 12.3.5-3f16953

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/pid.ts CHANGED
@@ -63,16 +63,34 @@ export class PIDReplicationController {
63
63
  errorCoverageUnmodified;
64
64
 
65
65
  const errorFromEven = 1 / peerCount - currentFactor;
66
+ // When the network is under-covered (`totalFactor < 1`) balancing "down" (negative
67
+ // error) can further reduce coverage and force constrained peers (memory/CPU limited)
68
+ // to take boundary assignments that exceed their budgets.
69
+ //
70
+ // Use a soft clamp: only suppress negative balance strongly when the coverage deficit
71
+ // is material. This avoids oscillations around `totalFactor ~= 1`.
72
+ const coverageDeficit = Math.max(0, errorCoverageUnmodified); // ~= max(0, 1 - totalFactor)
73
+ const negativeBalanceScale =
74
+ coverageDeficit <= 0
75
+ ? 1
76
+ : 1 - Math.min(1, coverageDeficit / 0.1); // full clamp at 10% deficit
77
+ const errorFromEvenForBalance =
78
+ errorFromEven >= 0 ? errorFromEven : errorFromEven * negativeBalanceScale;
66
79
 
67
80
  const balanceErrorScaler = this.maxMemoryLimit
68
81
  ? Math.abs(errorMemory)
69
82
  : 1 - Math.abs(errorCoverage);
70
83
 
71
- const errorBalance = (this.maxMemoryLimit ? errorMemory > 0 : true)
72
- ? errorFromEven > 0
73
- ? balanceErrorScaler * errorFromEven
84
+ // Balance should be symmetric (allow negative error) so a peer can *reduce*
85
+ // participation when peerCount increases. Otherwise early joiners can get
86
+ // "stuck" over-replicating even after new peers join (no memory/CPU limits).
87
+ const errorBalance = this.maxMemoryLimit
88
+ ? // Only balance when we have spare memory headroom. When memory is
89
+ // constrained (`errorMemory < 0`) the memory term will dominate anyway.
90
+ errorMemory > 0
91
+ ? balanceErrorScaler * errorFromEvenForBalance
74
92
  : 0
75
- : 0;
93
+ : balanceErrorScaler * errorFromEvenForBalance;
76
94
 
77
95
  const errorCPU = peerCount > 1 ? -1 * (cpuUsage || 0) : 0;
78
96
 
package/src/ranges.ts CHANGED
@@ -2565,13 +2565,17 @@ export const debounceAggregationChanges = <
2565
2565
  let aggregated: Map<string, ReplicationChange<T>> = new Map();
2566
2566
  return {
2567
2567
  add: (change: ReplicationChange<T>) => {
2568
- const prev = aggregated.get(change.range.idString);
2568
+ // Keep different change types for the same segment id. In particular, range
2569
+ // updates produce a `replaced` + `added` pair; collapsing by id would drop the
2570
+ // "removed" portion and prevent correct rebalancing/pruning.
2571
+ const key = `${change.type}:${change.range.idString}`;
2572
+ const prev = aggregated.get(key);
2569
2573
  if (prev) {
2570
2574
  if (prev.range.timestamp < change.range.timestamp) {
2571
- aggregated.set(change.range.idString, change);
2575
+ aggregated.set(key, change);
2572
2576
  }
2573
2577
  } else {
2574
- aggregated.set(change.range.idString, change);
2578
+ aggregated.set(key, change);
2575
2579
  }
2576
2580
  },
2577
2581
  delete: (key: string) => {
@@ -1,7 +1,13 @@
1
1
  import { field, variant, vec } from "@dao-xyz/borsh";
2
2
  import { Cache } from "@peerbit/cache";
3
3
  import { type PublicSignKey, randomBytes, toBase64 } from "@peerbit/crypto";
4
- import { type Index } from "@peerbit/indexer-interface";
4
+ import {
5
+ And,
6
+ type Index,
7
+ IntegerCompare,
8
+ Or,
9
+ type Query,
10
+ } from "@peerbit/indexer-interface";
5
11
  import type { Entry, Log } from "@peerbit/log";
6
12
  import { logger as loggerFn } from "@peerbit/logger";
7
13
  import {
@@ -13,7 +19,7 @@ import type { RPC, RequestContext } from "@peerbit/rpc";
13
19
  import { SilentDelivery } from "@peerbit/stream-interface";
14
20
  import { type EntryWithRefs } from "../exchange-heads.js";
15
21
  import { TransportMessage } from "../message.js";
16
- import { type EntryReplicated, matchEntriesInRangeQuery } from "../ranges.js";
22
+ import { type EntryReplicated } from "../ranges.js";
17
23
  import type {
18
24
  SyncableKey,
19
25
  SynchronizerComponents,
@@ -131,6 +137,50 @@ export interface SSymbol {
131
137
  symbol: bigint;
132
138
  }
133
139
 
140
+ const matchEntriesByHashNumberInRangeQuery = (range: {
141
+ start1: number | bigint;
142
+ end1: number | bigint;
143
+ start2: number | bigint;
144
+ end2: number | bigint;
145
+ }): Query => {
146
+ const c1 = new And([
147
+ new IntegerCompare({
148
+ key: "hashNumber",
149
+ compare: "gte",
150
+ value: range.start1,
151
+ }),
152
+ new IntegerCompare({
153
+ key: "hashNumber",
154
+ compare: "lt",
155
+ value: range.end1,
156
+ }),
157
+ ]);
158
+
159
+ // if range2 has length 0 or range 2 is equal to range 1 only make one query
160
+ if (
161
+ range.start2 === range.end2 ||
162
+ (range.start1 === range.start2 && range.end1 === range.end2)
163
+ ) {
164
+ return c1;
165
+ }
166
+
167
+ return new Or([
168
+ c1,
169
+ new And([
170
+ new IntegerCompare({
171
+ key: "hashNumber",
172
+ compare: "gte",
173
+ value: range.start2,
174
+ }),
175
+ new IntegerCompare({
176
+ key: "hashNumber",
177
+ compare: "lt",
178
+ value: range.end2,
179
+ }),
180
+ ]),
181
+ ]);
182
+ };
183
+
134
184
  const buildEncoderOrDecoderFromRange = async <
135
185
  T extends "encoder" | "decoder",
136
186
  E = T extends "encoder" ? EncoderWrapper : DecoderWrapper,
@@ -152,7 +202,8 @@ const buildEncoderOrDecoderFromRange = async <
152
202
  const entries = await entryIndex
153
203
  .iterate(
154
204
  {
155
- query: matchEntriesInRangeQuery({
205
+ // Range sync for IBLT is done in hashNumber space.
206
+ query: matchEntriesByHashNumberInRangeQuery({
156
207
  end1: ranges.end1,
157
208
  start1: ranges.start1,
158
209
  end2: ranges.end2,
@@ -478,9 +529,13 @@ export class RatelessIBLTSynchronizer<D extends "u32" | "u64">
478
529
  }
479
530
  }
480
531
 
532
+ // For smaller sets, the original `sqrt(n)` heuristic can occasionally under-provision
533
+ // low-degree symbols early, causing an unnecessary `MoreSymbols` round-trip. Use a
534
+ // small floor to make small-delta syncs more reliable without affecting large-n behavior.
481
535
  let initialSymbols = Math.round(
482
536
  Math.sqrt(allCoordinatesToSyncWithIblt.length),
483
537
  ); // TODO choose better
538
+ initialSymbols = Math.max(64, initialSymbols);
484
539
  for (let i = 0; i < initialSymbols; i++) {
485
540
  startSync.symbols.push(
486
541
  new SymbolSerialized(encoder.produce_next_coded_symbol()),