@peerbit/shared-log 13.0.23 → 13.0.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@peerbit/shared-log",
3
- "version": "13.0.23",
3
+ "version": "13.0.24",
4
4
  "description": "Shared log",
5
5
  "sideEffects": false,
6
6
  "type": "module",
@@ -62,18 +62,18 @@
62
62
  "pino": "^9.4.0",
63
63
  "uint8arrays": "^5.1.0",
64
64
  "@peerbit/any-store": "2.2.9",
65
- "@peerbit/blocks": "4.0.11",
66
- "@peerbit/crypto": "3.1.1",
67
65
  "@peerbit/blocks-interface": "2.0.8",
68
66
  "@peerbit/cache": "3.0.0",
67
+ "@peerbit/blocks": "4.0.11",
68
+ "@peerbit/crypto": "3.1.1",
69
69
  "@peerbit/indexer-interface": "3.0.3",
70
70
  "@peerbit/indexer-sqlite3": "3.0.6",
71
71
  "@peerbit/log": "6.0.21",
72
72
  "@peerbit/logger": "2.0.1",
73
+ "@peerbit/program": "6.0.17",
73
74
  "@peerbit/pubsub": "5.1.6",
74
75
  "@peerbit/pubsub-interface": "5.1.1",
75
76
  "@peerbit/rpc": "6.0.21",
76
- "@peerbit/program": "6.0.17",
77
77
  "@peerbit/stream-interface": "6.0.7",
78
78
  "@peerbit/time": "3.0.0",
79
79
  "@peerbit/riblt": "1.2.0"
package/src/index.ts CHANGED
@@ -495,6 +495,7 @@ const RECALCULATE_PARTICIPATION_MIN_RELATIVE_CHANGE = 0.01;
495
495
  const RECALCULATE_PARTICIPATION_MIN_RELATIVE_CHANGE_WITH_CPU_LIMIT = 0.005;
496
496
  const RECALCULATE_PARTICIPATION_MIN_RELATIVE_CHANGE_WITH_MEMORY_LIMIT = 0.001;
497
497
  const RECALCULATE_PARTICIPATION_RELATIVE_DENOMINATOR_FLOOR = 1e-3;
498
+ const TOPIC_SUBSCRIBERS_CACHE_TTL_MS = 250;
498
499
  const ADAPTIVE_REBALANCE_IDLE_INTERVAL_MULTIPLIER = 5;
499
500
  const ADAPTIVE_REBALANCE_MIN_IDLE_AFTER_LOCAL_APPEND_MS = 10_000;
500
501
 
@@ -752,6 +753,10 @@ export class SharedLog<
752
753
  private _repairSweepRunning!: boolean;
753
754
  private _repairSweepForceFreshPending!: boolean;
754
755
  private _repairSweepAddedPeersPending!: Set<string>;
756
+ private _topicSubscribersCache!: Map<
757
+ string,
758
+ { expiresAt: number; keys: PublicSignKey[] }
759
+ >;
755
760
 
756
761
  // regular distribution checks
757
762
  private distributeQueue?: PQueue;
@@ -1364,14 +1369,26 @@ export class SharedLog<
1364
1369
  private async _getTopicSubscribers(
1365
1370
  topic: string,
1366
1371
  ): Promise<PublicSignKey[] | undefined> {
1372
+ const cached = this._topicSubscribersCache.get(topic);
1373
+ if (cached && cached.expiresAt > Date.now()) {
1374
+ return cached.keys.slice();
1375
+ }
1376
+
1367
1377
  const maxPeers = 64;
1378
+ const cache = (keys: PublicSignKey[]) => {
1379
+ this._topicSubscribersCache.set(topic, {
1380
+ expiresAt: Date.now() + TOPIC_SUBSCRIBERS_CACHE_TTL_MS,
1381
+ keys,
1382
+ });
1383
+ return keys.slice();
1384
+ };
1368
1385
 
1369
1386
  // Prefer the bounded peer set we already know from the fanout overlay.
1370
1387
  if (this._fanoutChannel && (topic === this.topic || topic === this.rpc.topic)) {
1371
1388
  const hashes = this._fanoutChannel
1372
1389
  .getPeerHashes({ includeSelf: false })
1373
1390
  .slice(0, maxPeers);
1374
- if (hashes.length === 0) return [];
1391
+ if (hashes.length === 0) return cache([]);
1375
1392
 
1376
1393
  const keys = await Promise.all(
1377
1394
  hashes.map((hash) => this._resolvePublicKeyFromHash(hash)),
@@ -1387,7 +1404,7 @@ export class SharedLog<
1387
1404
  seen.add(hash);
1388
1405
  uniqueKeys.push(key);
1389
1406
  }
1390
- return uniqueKeys;
1407
+ return cache(uniqueKeys);
1391
1408
  }
1392
1409
 
1393
1410
  const selfHash = this.node.identity.publicKey.hashcode();
@@ -1444,7 +1461,7 @@ export class SharedLog<
1444
1461
  }
1445
1462
  }
1446
1463
 
1447
- if (hashes.length === 0) return [];
1464
+ if (hashes.length === 0) return cache([]);
1448
1465
 
1449
1466
  const uniqueHashes: string[] = [];
1450
1467
  const seen = new Set<string>();
@@ -1465,7 +1482,18 @@ export class SharedLog<
1465
1482
  if (hash === selfHash) continue;
1466
1483
  uniqueKeys.push(key);
1467
1484
  }
1468
- return uniqueKeys;
1485
+ return cache(uniqueKeys);
1486
+ }
1487
+
1488
+ private invalidateTopicSubscribersCache(...topics: (string | undefined)[]) {
1489
+ for (const topic of topics) {
1490
+ if (!topic) continue;
1491
+ this._topicSubscribersCache.delete(topic);
1492
+ }
1493
+ }
1494
+
1495
+ private invalidateSharedLogTopicSubscribersCache() {
1496
+ this.invalidateTopicSubscribersCache(this.topic, this.rpc.topic);
1469
1497
  }
1470
1498
 
1471
1499
  // @deprecated
@@ -2934,6 +2962,7 @@ export class SharedLog<
2934
2962
  this._repairSweepRunning = false;
2935
2963
  this._repairSweepForceFreshPending = false;
2936
2964
  this._repairSweepAddedPeersPending = new Set();
2965
+ this._topicSubscribersCache = new Map();
2937
2966
  this.coordinateToHash = new Cache<string>({ max: 1e6, ttl: 1e4 });
2938
2967
  this.recentlyRebalanced = new Cache<string>({ max: 1e4, ttl: 1e5 });
2939
2968
 
@@ -3958,39 +3987,40 @@ export class SharedLog<
3958
3987
  this.coordinateToHash.clear();
3959
3988
  this.recentlyRebalanced.clear();
3960
3989
  this.uniqueReplicators.clear();
3961
- this._closeController.abort();
3990
+ this._topicSubscribersCache.clear();
3991
+ this._closeController.abort();
3962
3992
 
3963
- clearInterval(this.interval);
3964
- this.stopReplicatorLivenessSweep();
3993
+ clearInterval(this.interval);
3994
+ this.stopReplicatorLivenessSweep();
3965
3995
 
3966
- this.node.services.pubsub.removeEventListener(
3967
- "subscribe",
3968
- this._onSubscriptionFn,
3996
+ this.node.services.pubsub.removeEventListener(
3997
+ "subscribe",
3998
+ this._onSubscriptionFn,
3969
3999
  );
3970
4000
 
3971
4001
  this.node.services.pubsub.removeEventListener(
3972
4002
  "unsubscribe",
3973
4003
  this._onUnsubscriptionFn,
3974
4004
  );
3975
- for (const timer of this._repairRetryTimers) {
3976
- clearTimeout(timer);
3977
- }
3978
- this._repairRetryTimers.clear();
3979
- this._recentRepairDispatch.clear();
3980
- this._repairSweepRunning = false;
3981
- this._repairSweepForceFreshPending = false;
3982
- this._repairSweepAddedPeersPending.clear();
4005
+ for (const timer of this._repairRetryTimers) {
4006
+ clearTimeout(timer);
4007
+ }
4008
+ this._repairRetryTimers.clear();
4009
+ this._recentRepairDispatch.clear();
4010
+ this._repairSweepRunning = false;
4011
+ this._repairSweepForceFreshPending = false;
4012
+ this._repairSweepAddedPeersPending.clear();
3983
4013
 
3984
4014
  for (const [_k, v] of this._pendingDeletes) {
3985
4015
  v.clear();
3986
4016
  v.promise.resolve(); // TODO or reject?
3987
4017
  }
3988
- for (const [_k, v] of this._pendingIHave) {
3989
- v.clear();
3990
- }
3991
- for (const [_k, v] of this._checkedPruneRetries) {
3992
- if (v.timer) clearTimeout(v.timer);
3993
- }
4018
+ for (const [_k, v] of this._pendingIHave) {
4019
+ v.clear();
4020
+ }
4021
+ for (const [_k, v] of this._checkedPruneRetries) {
4022
+ if (v.timer) clearTimeout(v.timer);
4023
+ }
3994
4024
 
3995
4025
  await this.remoteBlocks.stop();
3996
4026
  this._pendingDeletes.clear();
@@ -5390,6 +5420,7 @@ export class SharedLog<
5390
5420
  entry: Entry<T> | EntryReplicated<R> | ShallowEntry,
5391
5421
  options?: {
5392
5422
  roleAge?: number;
5423
+ candidates?: Iterable<string>;
5393
5424
  onLeader?: (key: string) => void;
5394
5425
  // persist even if not leader
5395
5426
  persist?:
@@ -5433,6 +5464,7 @@ export class SharedLog<
5433
5464
  },
5434
5465
  options?: {
5435
5466
  roleAge?: number;
5467
+ candidates?: Iterable<string>;
5436
5468
  onLeader?: (key: string) => void;
5437
5469
  // persist even if not leader
5438
5470
  persist?:
@@ -5458,6 +5490,7 @@ export class SharedLog<
5458
5490
  cursors: NumberFromType<R>[],
5459
5491
  options?: {
5460
5492
  roleAge?: number;
5493
+ candidates?: Iterable<string>;
5461
5494
  },
5462
5495
  ): Promise<Map<string, { intersecting: boolean }>> {
5463
5496
  const roleAge = options?.roleAge ?? (await this.getDefaultMinRoleAge()); // TODO -500 as is added so that i f someone else is just as new as us, then we treat them as mature as us. without -500 we might be slower syncing if two nodes starts almost at the same time
@@ -5467,44 +5500,48 @@ export class SharedLog<
5467
5500
  // If it is still warming up (for example, only contains self), supplement with
5468
5501
  // current subscribers until we have enough candidates for this decision.
5469
5502
  let peerFilter: Set<string> | undefined = undefined;
5470
- const selfReplicating = await this.isReplicating();
5471
- if (this.uniqueReplicators.size > 0) {
5472
- peerFilter = new Set(this.uniqueReplicators);
5473
- if (selfReplicating) {
5474
- peerFilter.add(selfHash);
5475
- } else {
5476
- peerFilter.delete(selfHash);
5477
- }
5503
+ if (options?.candidates) {
5504
+ peerFilter = new Set(options.candidates);
5505
+ } else {
5506
+ const selfReplicating = await this.isReplicating();
5507
+ if (this.uniqueReplicators.size > 0) {
5508
+ peerFilter = new Set(this.uniqueReplicators);
5509
+ if (selfReplicating) {
5510
+ peerFilter.add(selfHash);
5511
+ } else {
5512
+ peerFilter.delete(selfHash);
5513
+ }
5478
5514
 
5479
- try {
5480
- const subscribers = await this._getTopicSubscribers(this.topic);
5481
- if (subscribers && subscribers.length > 0) {
5482
- for (const subscriber of subscribers) {
5483
- peerFilter.add(subscriber.hashcode());
5484
- }
5485
- if (selfReplicating) {
5486
- peerFilter.add(selfHash);
5487
- } else {
5488
- peerFilter.delete(selfHash);
5515
+ try {
5516
+ const subscribers = await this._getTopicSubscribers(this.topic);
5517
+ if (subscribers && subscribers.length > 0) {
5518
+ for (const subscriber of subscribers) {
5519
+ peerFilter.add(subscriber.hashcode());
5520
+ }
5521
+ if (selfReplicating) {
5522
+ peerFilter.add(selfHash);
5523
+ } else {
5524
+ peerFilter.delete(selfHash);
5525
+ }
5489
5526
  }
5527
+ } catch {
5528
+ // Best-effort only; keep current peerFilter.
5490
5529
  }
5491
- } catch {
5492
- // Best-effort only; keep current peerFilter.
5493
- }
5494
- } else {
5495
- try {
5496
- const subscribers =
5497
- (await this._getTopicSubscribers(this.topic)) ?? undefined;
5498
- if (subscribers && subscribers.length > 0) {
5499
- peerFilter = new Set(subscribers.map((key) => key.hashcode()));
5500
- if (selfReplicating) {
5501
- peerFilter.add(selfHash);
5502
- } else {
5503
- peerFilter.delete(selfHash);
5530
+ } else {
5531
+ try {
5532
+ const subscribers =
5533
+ (await this._getTopicSubscribers(this.topic)) ?? undefined;
5534
+ if (subscribers && subscribers.length > 0) {
5535
+ peerFilter = new Set(subscribers.map((key) => key.hashcode()));
5536
+ if (selfReplicating) {
5537
+ peerFilter.add(selfHash);
5538
+ } else {
5539
+ peerFilter.delete(selfHash);
5540
+ }
5504
5541
  }
5542
+ } catch {
5543
+ // Best-effort only; if pubsub isn't ready, do a full scan.
5505
5544
  }
5506
- } catch {
5507
- // Best-effort only; if pubsub isn't ready, do a full scan.
5508
5545
  }
5509
5546
  }
5510
5547
  return getSamples<R>(
@@ -6156,30 +6193,48 @@ export class SharedLog<
6156
6193
  }
6157
6194
  }
6158
6195
 
6159
- const changed = false;
6160
- const replacedPeers = new Set<string>();
6161
- for (const change of changes) {
6162
- if (change.type === "replaced" && change.range.hash !== selfHash) {
6163
- replacedPeers.add(change.range.hash);
6164
- }
6165
- }
6166
- const addedPeers = new Set<string>();
6167
- for (const change of changes) {
6168
- if (change.type === "added" || change.type === "replaced") {
6169
- const hash = change.range.hash;
6170
- if (hash !== selfHash) {
6171
- // Range updates can reassign entries to an existing peer shortly after it
6172
- // already received a subset. Avoid suppressing legitimate follow-up repair.
6173
- this._recentRepairDispatch.delete(hash);
6196
+ const changed = false;
6197
+ const addedPeers = new Set<string>();
6198
+ const warmupPeers = new Set<string>();
6199
+ const hasSelfWarmupChange = changes.some(
6200
+ (change) =>
6201
+ change.range.hash === selfHash &&
6202
+ (change.type === "added" || change.type === "replaced"),
6203
+ );
6204
+ for (const change of changes) {
6205
+ if (change.type === "added" || change.type === "replaced") {
6206
+ const hash = change.range.hash;
6207
+ if (hash !== selfHash) {
6208
+ // Range updates can reassign entries to an existing peer shortly after it
6209
+ // already received a subset. Avoid suppressing legitimate follow-up repair.
6210
+ this._recentRepairDispatch.delete(hash);
6211
+ }
6174
6212
  }
6175
- }
6176
- if (change.type === "added") {
6177
- const hash = change.range.hash;
6178
- if (hash !== selfHash && !replacedPeers.has(hash)) {
6179
- addedPeers.add(hash);
6213
+ if (change.type === "added") {
6214
+ const hash = change.range.hash;
6215
+ if (hash !== selfHash) {
6216
+ addedPeers.add(hash);
6217
+ warmupPeers.add(hash);
6218
+ }
6180
6219
  }
6181
6220
  }
6182
- }
6221
+ const hasAdaptiveStorageLimit =
6222
+ this._isAdaptiveReplicating &&
6223
+ this.replicationController?.maxMemoryLimit != null;
6224
+ const useJoinWarmupFastPath =
6225
+ !forceFreshDelivery &&
6226
+ warmupPeers.size > 0 &&
6227
+ !hasSelfWarmupChange &&
6228
+ !hasAdaptiveStorageLimit;
6229
+ const immediateRebalanceChanges = useJoinWarmupFastPath
6230
+ ? changes.filter(
6231
+ (change) =>
6232
+ !(
6233
+ change.range.hash === selfHash &&
6234
+ (change.type === "added" || change.type === "replaced")
6235
+ ),
6236
+ )
6237
+ : changes;
6183
6238
 
6184
6239
  try {
6185
6240
  const uncheckedDeliver: Map<
@@ -6191,15 +6246,15 @@ export class SharedLog<
6191
6246
  if (!entries || entries.size === 0) {
6192
6247
  return;
6193
6248
  }
6194
- const isJoinWarmupTarget = addedPeers.has(target);
6195
- const bypassRecentDedupe = isJoinWarmupTarget || forceFreshDelivery;
6196
- this.dispatchMaybeMissingEntries(target, entries, {
6197
- bypassRecentDedupe,
6198
- retryScheduleMs: isJoinWarmupTarget
6199
- ? JOIN_WARMUP_RETRY_SCHEDULE_MS
6200
- : undefined,
6201
- forceFreshDelivery,
6202
- });
6249
+ const isWarmupTarget = warmupPeers.has(target);
6250
+ const bypassRecentDedupe = isWarmupTarget || forceFreshDelivery;
6251
+ this.dispatchMaybeMissingEntries(target, entries, {
6252
+ bypassRecentDedupe,
6253
+ retryScheduleMs: isWarmupTarget
6254
+ ? JOIN_WARMUP_RETRY_SCHEDULE_MS
6255
+ : undefined,
6256
+ forceFreshDelivery,
6257
+ });
6203
6258
  uncheckedDeliver.delete(target);
6204
6259
  };
6205
6260
  const queueUncheckedDeliver = (
@@ -6220,18 +6275,85 @@ export class SharedLog<
6220
6275
  }
6221
6276
  };
6222
6277
 
6223
- for await (const entryReplicated of toRebalance<R>(
6224
- changes,
6225
- this.entryCoordinatesIndex,
6226
- this.recentlyRebalanced,
6227
- { forceFresh: forceFreshDelivery },
6228
- )) {
6278
+ if (immediateRebalanceChanges.length > 0) {
6279
+ for await (const entryReplicated of toRebalance<R>(
6280
+ immediateRebalanceChanges,
6281
+ this.entryCoordinatesIndex,
6282
+ this.recentlyRebalanced,
6283
+ {
6284
+ forceFresh: forceFreshDelivery || useJoinWarmupFastPath,
6285
+ },
6286
+ )) {
6229
6287
  if (this.closed) {
6230
6288
  break;
6231
6289
  }
6232
6290
 
6233
- let oldPeersSet: Set<string> | undefined;
6234
- if (!forceFreshDelivery) {
6291
+ if (useJoinWarmupFastPath) {
6292
+ let oldPeersSet: Set<string> | undefined;
6293
+ const gid = entryReplicated.gid;
6294
+ oldPeersSet = gidPeersHistorySnapshot.get(gid);
6295
+ if (!gidPeersHistorySnapshot.has(gid)) {
6296
+ const existing = this._gidPeersHistory.get(gid);
6297
+ oldPeersSet = existing ? new Set(existing) : undefined;
6298
+ gidPeersHistorySnapshot.set(gid, oldPeersSet);
6299
+ }
6300
+
6301
+ for (const target of warmupPeers) {
6302
+ queueUncheckedDeliver(target, entryReplicated);
6303
+ }
6304
+
6305
+ const candidatePeers = new Set<string>([selfHash]);
6306
+ for (const target of warmupPeers) {
6307
+ candidatePeers.add(target);
6308
+ }
6309
+ if (oldPeersSet) {
6310
+ for (const oldPeer of oldPeersSet) {
6311
+ candidatePeers.add(oldPeer);
6312
+ }
6313
+ }
6314
+
6315
+ const currentPeers = await this.findLeaders(
6316
+ entryReplicated.coordinates,
6317
+ entryReplicated,
6318
+ {
6319
+ roleAge: 0,
6320
+ candidates: candidatePeers,
6321
+ persist: false,
6322
+ },
6323
+ );
6324
+
6325
+ if (oldPeersSet) {
6326
+ for (const oldPeer of oldPeersSet) {
6327
+ if (!currentPeers.has(oldPeer)) {
6328
+ this.removePruneRequestSent(entryReplicated.hash);
6329
+ }
6330
+ }
6331
+ }
6332
+
6333
+ this.addPeersToGidPeerHistory(
6334
+ entryReplicated.gid,
6335
+ currentPeers.keys(),
6336
+ true,
6337
+ );
6338
+
6339
+ if (!currentPeers.has(selfHash)) {
6340
+ this.pruneDebouncedFnAddIfNotKeeping({
6341
+ key: entryReplicated.hash,
6342
+ value: { entry: entryReplicated, leaders: currentPeers },
6343
+ });
6344
+
6345
+ this.responseToPruneDebouncedFn.delete(entryReplicated.hash);
6346
+ } else {
6347
+ this.pruneDebouncedFn.delete(entryReplicated.hash);
6348
+ await this._pendingDeletes
6349
+ .get(entryReplicated.hash)
6350
+ ?.reject(new Error("Failed to delete, is leader again"));
6351
+ this.removePruneRequestSent(entryReplicated.hash);
6352
+ }
6353
+ continue;
6354
+ }
6355
+
6356
+ let oldPeersSet: Set<string> | undefined;
6235
6357
  const gid = entryReplicated.gid;
6236
6358
  oldPeersSet = gidPeersHistorySnapshot.get(gid);
6237
6359
  if (!gidPeersHistorySnapshot.has(gid)) {
@@ -6239,18 +6361,18 @@ export class SharedLog<
6239
6361
  oldPeersSet = existing ? new Set(existing) : undefined;
6240
6362
  gidPeersHistorySnapshot.set(gid, oldPeersSet);
6241
6363
  }
6242
- }
6243
- let isLeader = false;
6244
6364
 
6245
- let currentPeers = await this.findLeaders(
6246
- entryReplicated.coordinates,
6247
- entryReplicated,
6248
- {
6249
- // we do this to make sure new replicators get data even though they are not mature so they can figure out if they want to replicate more or less
6250
- // TODO make this smarter because if a new replicator is not mature and want to replicate too much data the syncing overhead can be bad
6251
- roleAge: 0,
6252
- },
6253
- );
6365
+ let isLeader = false;
6366
+ const currentPeers = await this.findLeaders(
6367
+ entryReplicated.coordinates,
6368
+ entryReplicated,
6369
+ {
6370
+ // We do this to make sure new replicators get data even though
6371
+ // they are not mature so they can figure out if they want to
6372
+ // replicate more or less.
6373
+ roleAge: 0,
6374
+ },
6375
+ );
6254
6376
 
6255
6377
  for (const [currentPeer] of currentPeers) {
6256
6378
  if (currentPeer === this.node.identity.publicKey.hashcode()) {
@@ -6263,41 +6385,63 @@ export class SharedLog<
6263
6385
  }
6264
6386
  }
6265
6387
 
6266
- if (oldPeersSet) {
6267
- for (const oldPeer of oldPeersSet) {
6268
- if (!currentPeers.has(oldPeer)) {
6269
- this.removePruneRequestSent(entryReplicated.hash);
6388
+ if (oldPeersSet) {
6389
+ for (const oldPeer of oldPeersSet) {
6390
+ if (!currentPeers.has(oldPeer)) {
6391
+ this.removePruneRequestSent(entryReplicated.hash);
6392
+ }
6270
6393
  }
6271
6394
  }
6272
- }
6273
6395
 
6274
- this.addPeersToGidPeerHistory(
6275
- entryReplicated.gid,
6276
- currentPeers.keys(),
6277
- true,
6278
- );
6396
+ this.addPeersToGidPeerHistory(
6397
+ entryReplicated.gid,
6398
+ currentPeers.keys(),
6399
+ true,
6400
+ );
6279
6401
 
6280
- if (!isLeader) {
6281
- this.pruneDebouncedFnAddIfNotKeeping({
6282
- key: entryReplicated.hash,
6283
- value: { entry: entryReplicated, leaders: currentPeers },
6284
- });
6402
+ if (!isLeader) {
6403
+ this.pruneDebouncedFnAddIfNotKeeping({
6404
+ key: entryReplicated.hash,
6405
+ value: { entry: entryReplicated, leaders: currentPeers },
6406
+ });
6285
6407
 
6286
- this.responseToPruneDebouncedFn.delete(entryReplicated.hash); // don't allow others to prune because of expecting me to replicating this entry
6287
- } else {
6288
- this.pruneDebouncedFn.delete(entryReplicated.hash);
6289
- await this._pendingDeletes
6290
- .get(entryReplicated.hash)
6291
- ?.reject(new Error("Failed to delete, is leader again"));
6292
- this.removePruneRequestSent(entryReplicated.hash);
6408
+ this.responseToPruneDebouncedFn.delete(entryReplicated.hash); // don't allow others to prune because of expecting me to replicating this entry
6409
+ } else {
6410
+ this.pruneDebouncedFn.delete(entryReplicated.hash);
6411
+ await this._pendingDeletes
6412
+ .get(entryReplicated.hash)
6413
+ ?.reject(new Error("Failed to delete, is leader again"));
6414
+ this.removePruneRequestSent(entryReplicated.hash);
6415
+ }
6416
+ }
6293
6417
  }
6294
- }
6295
6418
 
6296
- if (forceFreshDelivery || addedPeers.size > 0) {
6297
- // Schedule a coalesced background sweep for churn/join windows instead of
6298
- // scanning the whole index synchronously on each replication change.
6299
- this.scheduleRepairSweep({ forceFreshDelivery, addedPeers });
6300
- }
6419
+ if (forceFreshDelivery) {
6420
+ // Removed/shrunk ranges still need the authoritative background pass.
6421
+ this.scheduleRepairSweep({ forceFreshDelivery, addedPeers });
6422
+ } else if (useJoinWarmupFastPath) {
6423
+ // Pure join warmup uses the cheap immediate maybe-missing dispatch above,
6424
+ // then defers the authoritative sweep so it does not compete with the
6425
+ // write burst itself.
6426
+ const peers = new Set(addedPeers);
6427
+ const timer = setTimeout(() => {
6428
+ this._repairRetryTimers.delete(timer);
6429
+ if (this.closed) {
6430
+ return;
6431
+ }
6432
+ this.scheduleRepairSweep({
6433
+ forceFreshDelivery: false,
6434
+ addedPeers: peers,
6435
+ });
6436
+ }, 250);
6437
+ timer.unref?.();
6438
+ this._repairRetryTimers.add(timer);
6439
+ } else if (addedPeers.size > 0) {
6440
+ this.scheduleRepairSweep({
6441
+ forceFreshDelivery: false,
6442
+ addedPeers,
6443
+ });
6444
+ }
6301
6445
 
6302
6446
  for (const target of [...uncheckedDeliver.keys()]) {
6303
6447
  flushUncheckedDeliverTarget(target);
@@ -6336,6 +6480,7 @@ export class SharedLog<
6336
6480
  if (!prev || prev < now) {
6337
6481
  this.latestReplicationInfoMessage.set(fromHash, now);
6338
6482
  }
6483
+ this.invalidateSharedLogTopicSubscribersCache();
6339
6484
 
6340
6485
  return this.handleSubscriptionChange(
6341
6486
  evt.detail.from,
@@ -6356,6 +6501,7 @@ export class SharedLog<
6356
6501
 
6357
6502
  this.remoteBlocks.onReachable(evt.detail.from);
6358
6503
  this._replicationInfoBlockedPeers.delete(evt.detail.from.hashcode());
6504
+ this.invalidateSharedLogTopicSubscribersCache();
6359
6505
 
6360
6506
  await this.handleSubscriptionChange(
6361
6507
  evt.detail.from,