@peerbit/shared-log 13.1.3 → 13.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@peerbit/shared-log",
3
- "version": "13.1.3",
3
+ "version": "13.1.5",
4
4
  "description": "Shared log",
5
5
  "sideEffects": false,
6
6
  "type": "module",
@@ -62,27 +62,27 @@
62
62
  "pino": "^9.4.0",
63
63
  "uint8arrays": "^5.1.0",
64
64
  "@peerbit/any-store": "2.2.9",
65
+ "@peerbit/blocks-interface": "2.0.9",
66
+ "@peerbit/blocks": "4.1.2",
65
67
  "@peerbit/cache": "3.0.0",
66
- "@peerbit/blocks-interface": "2.0.8",
67
- "@peerbit/indexer-interface": "3.0.3",
68
68
  "@peerbit/crypto": "3.1.1",
69
- "@peerbit/blocks": "4.1.1",
69
+ "@peerbit/indexer-interface": "3.0.3",
70
70
  "@peerbit/indexer-sqlite3": "3.0.6",
71
+ "@peerbit/log": "6.0.26",
71
72
  "@peerbit/logger": "2.0.1",
72
- "@peerbit/program": "6.0.19",
73
+ "@peerbit/pubsub": "5.2.2",
73
74
  "@peerbit/pubsub-interface": "5.1.1",
74
- "@peerbit/pubsub": "5.2.0",
75
- "@peerbit/rpc": "6.0.23",
75
+ "@peerbit/program": "6.0.21",
76
76
  "@peerbit/riblt": "1.2.0",
77
+ "@peerbit/rpc": "6.0.25",
77
78
  "@peerbit/stream-interface": "6.0.7",
78
- "@peerbit/log": "6.0.24",
79
79
  "@peerbit/time": "3.0.0"
80
80
  },
81
81
  "devDependencies": {
82
82
  "@types/libsodium-wrappers": "^0.7.14",
83
83
  "@types/pidusage": "^2.0.5",
84
84
  "uuid": "^10.0.0",
85
- "@peerbit/test-utils": "3.0.23"
85
+ "@peerbit/test-utils": "3.0.25"
86
86
  },
87
87
  "repository": {
88
88
  "type": "git",
package/src/index.ts CHANGED
@@ -1580,67 +1580,54 @@ export class SharedLog<
1580
1580
  return keys.slice();
1581
1581
  };
1582
1582
 
1583
- // Prefer the bounded peer set we already know from the fanout overlay.
1584
- if (this._fanoutChannel && (topic === this.topic || topic === this.rpc.topic)) {
1585
- const hashes = this._fanoutChannel
1586
- .getPeerHashes({ includeSelf: false })
1587
- .slice(0, maxPeers);
1588
- if (hashes.length === 0) return cache([]);
1589
-
1590
- const keys = await Promise.all(
1591
- hashes.map((hash) => this._resolvePublicKeyFromHash(hash)),
1592
- );
1593
- const uniqueKeys: PublicSignKey[] = [];
1594
- const seen = new Set<string>();
1595
- const selfHash = this.node.identity.publicKey.hashcode();
1596
- for (const key of keys) {
1597
- if (!key) continue;
1598
- const hash = key.hashcode();
1599
- if (hash === selfHash) continue;
1600
- if (seen.has(hash)) continue;
1601
- seen.add(hash);
1602
- uniqueKeys.push(key);
1603
- }
1604
- return cache(uniqueKeys);
1605
- }
1606
-
1607
1583
  const selfHash = this.node.identity.publicKey.hashcode();
1608
- const hashes: string[] = [];
1584
+ const hashes = new Set<string>();
1585
+ const keysByHash = new Map<string, PublicSignKey>();
1586
+ const addHash = (hash: string | undefined) => {
1587
+ if (!hash || hash === selfHash || keysByHash.has(hash)) {
1588
+ return;
1589
+ }
1590
+ hashes.add(hash);
1591
+ };
1592
+ const addKey = (key: PublicSignKey | undefined) => {
1593
+ if (!key) {
1594
+ return;
1595
+ }
1596
+ const hash = key.hashcode();
1597
+ if (hash === selfHash) {
1598
+ return;
1599
+ }
1600
+ hashes.delete(hash);
1601
+ keysByHash.set(hash, key);
1602
+ };
1609
1603
 
1610
- // Best-effort provider discovery (bounded). This requires bootstrap trackers.
1611
- try {
1612
- const fanoutService = getSharedLogFanoutService(this.node.services);
1613
- if (fanoutService?.queryProviders) {
1614
- const ns = `shared-log|${this.topic}`;
1615
- const seed = hashToSeed32(topic);
1616
- const providers: string[] = await fanoutService.queryProviders(ns, {
1617
- want: maxPeers,
1618
- seed,
1619
- });
1620
- for (const h of providers ?? []) {
1621
- if (!h || h === selfHash) continue;
1622
- hashes.push(h);
1623
- if (hashes.length >= maxPeers) break;
1624
- }
1604
+ // Fanout is a useful hint, but it can lag direct pubsub connectivity. Keep
1605
+ // collecting other local views instead of treating an empty fanout snapshot as
1606
+ // authoritative absence.
1607
+ if (this._fanoutChannel && (topic === this.topic || topic === this.rpc.topic)) {
1608
+ for (const hash of this._fanoutChannel.getPeerHashes({
1609
+ includeSelf: false,
1610
+ })) {
1611
+ addHash(hash);
1612
+ if (hashes.size + keysByHash.size >= maxPeers) break;
1625
1613
  }
1626
- } catch {
1627
- // Best-effort only.
1628
1614
  }
1629
1615
 
1630
- // Next, use already-connected peer streams (bounded and cheap).
1631
- const peerMap: Map<string, unknown> | undefined = (this.node.services.pubsub as any)
1632
- ?.peers;
1633
- if (peerMap?.keys) {
1634
- for (const h of peerMap.keys()) {
1635
- if (!h || h === selfHash) continue;
1636
- hashes.push(h);
1637
- if (hashes.length >= maxPeers) break;
1616
+ // Already-connected peer streams are cheap and are the strongest local signal
1617
+ // when fanout/provider membership is stale.
1618
+ const peerMap: Map<string, { publicKey?: PublicSignKey }> | undefined = (this.node
1619
+ .services.pubsub as any)?.peers;
1620
+ if (peerMap?.entries) {
1621
+ for (const [hash, peer] of peerMap.entries()) {
1622
+ addKey(peer?.publicKey);
1623
+ addHash(hash);
1624
+ if (hashes.size + keysByHash.size >= maxPeers) break;
1638
1625
  }
1639
1626
  }
1640
1627
 
1641
- // Finally, fall back to libp2p connections (e.g. bootstrap peers) without requiring
1642
- // any global topic membership view.
1643
- if (hashes.length < maxPeers) {
1628
+ // Libp2p connections cover bootstrap/direct peers even before a higher-level
1629
+ // topic subscriber snapshot has converged.
1630
+ if (hashes.size + keysByHash.size < maxPeers) {
1644
1631
  const connectionManager = (this.node.services.pubsub as any)?.components
1645
1632
  ?.connectionManager;
1646
1633
  const connections = connectionManager?.getConnections?.() ?? [];
@@ -1648,38 +1635,48 @@ export class SharedLog<
1648
1635
  const peerId = conn?.remotePeer;
1649
1636
  if (!peerId) continue;
1650
1637
  try {
1651
- const h = getPublicKeyFromPeerId(peerId).hashcode();
1652
- if (!h || h === selfHash) continue;
1653
- hashes.push(h);
1654
- if (hashes.length >= maxPeers) break;
1638
+ addKey(getPublicKeyFromPeerId(peerId));
1639
+ if (hashes.size + keysByHash.size >= maxPeers) break;
1655
1640
  } catch {
1656
1641
  // Best-effort only.
1657
1642
  }
1658
1643
  }
1659
1644
  }
1660
1645
 
1661
- if (hashes.length === 0) return cache([]);
1662
-
1663
- const uniqueHashes: string[] = [];
1664
- const seen = new Set<string>();
1665
- for (const h of hashes) {
1666
- if (seen.has(h)) continue;
1667
- seen.add(h);
1668
- uniqueHashes.push(h);
1669
- if (uniqueHashes.length >= maxPeers) break;
1646
+ // Best-effort provider discovery (bounded). This requires bootstrap trackers.
1647
+ if (hashes.size + keysByHash.size < maxPeers) {
1648
+ try {
1649
+ const fanoutService = getSharedLogFanoutService(this.node.services);
1650
+ if (fanoutService?.queryProviders) {
1651
+ const ns = `shared-log|${this.topic}`;
1652
+ const seed = hashToSeed32(topic);
1653
+ const providers: string[] = await fanoutService.queryProviders(ns, {
1654
+ want: maxPeers - keysByHash.size - hashes.size,
1655
+ seed,
1656
+ });
1657
+ for (const hash of providers ?? []) {
1658
+ addHash(hash);
1659
+ if (hashes.size + keysByHash.size >= maxPeers) break;
1660
+ }
1661
+ }
1662
+ } catch {
1663
+ // Best-effort only.
1664
+ }
1670
1665
  }
1671
1666
 
1667
+ if (hashes.size === 0 && keysByHash.size === 0) return cache([]);
1668
+
1669
+ const unresolvedHashes = [...hashes].slice(
1670
+ 0,
1671
+ Math.max(0, maxPeers - keysByHash.size),
1672
+ );
1672
1673
  const keys = await Promise.all(
1673
- uniqueHashes.map((hash) => this._resolvePublicKeyFromHash(hash)),
1674
+ unresolvedHashes.map((hash) => this._resolvePublicKeyFromHash(hash)),
1674
1675
  );
1675
- const uniqueKeys: PublicSignKey[] = [];
1676
1676
  for (const key of keys) {
1677
- if (!key) continue;
1678
- const hash = key.hashcode();
1679
- if (hash === selfHash) continue;
1680
- uniqueKeys.push(key);
1677
+ addKey(key);
1681
1678
  }
1682
- return cache(uniqueKeys);
1679
+ return cache([...keysByHash.values()].slice(0, maxPeers));
1683
1680
  }
1684
1681
 
1685
1682
  private invalidateTopicSubscribersCache(...topics: (string | undefined)[]) {
@@ -6556,10 +6553,11 @@ export class SharedLog<
6556
6553
  // If it is still warming up (for example, only contains self), supplement with
6557
6554
  // current subscribers until we have enough candidates for this decision.
6558
6555
  let peerFilter: Set<string> | undefined = undefined;
6556
+ let selfReplicating = false;
6559
6557
  if (options?.candidates) {
6560
6558
  peerFilter = new Set(options.candidates);
6561
6559
  } else {
6562
- const selfReplicating = await this.isReplicating();
6560
+ selfReplicating = await this.isReplicating();
6563
6561
  if (this.uniqueReplicators.size > 0) {
6564
6562
  peerFilter = new Set(this.uniqueReplicators);
6565
6563
  if (selfReplicating) {
@@ -6601,6 +6599,17 @@ export class SharedLog<
6601
6599
  }
6602
6600
  }
6603
6601
 
6602
+ if (!options?.candidates) {
6603
+ // Reachability snapshots can briefly under-report peers. Do not let that
6604
+ // turn a known mature indexed range into a false self-only full replica.
6605
+ peerFilter = await this.includeIndexedLeaderCandidatesWhenUnderfilled(
6606
+ peerFilter,
6607
+ roleAge,
6608
+ cursors.length,
6609
+ selfReplicating,
6610
+ );
6611
+ }
6612
+
6604
6613
  if (!options?.candidates) {
6605
6614
  const fullReplicaLeaders = await this.findFullReplicaLeaders(
6606
6615
  cursors.length,
@@ -6624,6 +6633,47 @@ export class SharedLog<
6624
6633
  );
6625
6634
  }
6626
6635
 
6636
+ private async includeIndexedLeaderCandidatesWhenUnderfilled(
6637
+ peerFilter: Set<string> | undefined,
6638
+ roleAge: number,
6639
+ replicas: number,
6640
+ selfReplicating: boolean,
6641
+ ): Promise<Set<string> | undefined> {
6642
+ if (!peerFilter || peerFilter.size > replicas) {
6643
+ return peerFilter;
6644
+ }
6645
+
6646
+ const selfHash = this.node.identity.publicKey.hashcode();
6647
+ const now = Date.now();
6648
+ const iterator = this.replicationIndex.iterate(
6649
+ {},
6650
+ { shape: { hash: true, timestamp: true }, reference: true },
6651
+ );
6652
+
6653
+ try {
6654
+ for (;;) {
6655
+ const batch = await iterator.next(64);
6656
+ if (batch.length === 0) {
6657
+ break;
6658
+ }
6659
+ for (const result of batch) {
6660
+ const range = result.value;
6661
+ if (range.hash === selfHash && !selfReplicating) {
6662
+ continue;
6663
+ }
6664
+ if (!isMatured(range, now, roleAge)) {
6665
+ continue;
6666
+ }
6667
+ peerFilter.add(range.hash);
6668
+ }
6669
+ }
6670
+ } finally {
6671
+ await iterator.close();
6672
+ }
6673
+
6674
+ return peerFilter;
6675
+ }
6676
+
6627
6677
  private async findFullReplicaLeaders(
6628
6678
  replicas: number,
6629
6679
  roleAge: number,