@peerbit/test-utils 2.3.18 → 2.3.19-3f16953

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/session.ts CHANGED
@@ -1,13 +1,17 @@
1
1
  import { yamux } from "@chainsafe/libp2p-yamux";
2
2
  import { DirectBlock } from "@peerbit/blocks";
3
+ import { createStore } from "@peerbit/any-store";
3
4
  import { keychain } from "@peerbit/keychain";
5
+ import { DefaultCryptoKeychain } from "@peerbit/keychain";
6
+ import { PreHash, SignatureWithKey } from "@peerbit/crypto";
4
7
  import {
5
8
  TestSession as SSession,
6
9
  listenFast,
7
10
  transportsFast,
8
11
  } from "@peerbit/libp2p-test-utils";
12
+ import type { Indices, Index, IndexEngineInitProperties } from "@peerbit/indexer-interface";
9
13
  import { type ProgramClient } from "@peerbit/program";
10
- import { DirectSub } from "@peerbit/pubsub";
14
+ import { FanoutTree, TopicControlPlane, TopicRootControlPlane } from "@peerbit/pubsub";
11
15
  import {
12
16
  type DirectStream,
13
17
  waitForNeighbour as waitForPeersStreams,
@@ -20,17 +24,260 @@ import {
20
24
  type Libp2pExtendServices,
21
25
  } from "peerbit";
22
26
  import { Peerbit } from "peerbit";
27
+ import { InMemoryNetwork, InMemorySession } from "./inmemory-libp2p.js";
23
28
 
24
29
  export type LibP2POptions = Libp2pOptions<Libp2pExtendServices>;
25
30
 
26
31
  type CreateOptions = { libp2p?: Libp2pCreateOptions; directory?: string };
32
+
33
+ export type InMemoryPeerbitSessionOptions = {
34
+ /**
35
+ * Create a sparse underlay graph by default (recommended for large n).
36
+ * If omitted, the session is returned disconnected and you can call `connect*()`.
37
+ */
38
+ degree?: number;
39
+ seed?: number;
40
+ concurrency?: number;
41
+
42
+ /**
43
+ * In-memory transport knobs.
44
+ */
45
+ network?: ConstructorParameters<typeof InMemoryNetwork>[0];
46
+ basePort?: number;
47
+
48
+ /**
49
+ * Skip expensive crypto (sign/verify) in stream-based services.
50
+ * Keeps identity/session semantics but uses dummy signatures.
51
+ */
52
+ mockCrypto?: boolean;
53
+
54
+ /**
55
+ * Use an ultra-light indexer by default so thousands of peers don't each spin up
56
+ * a sqlite3 instance. Override if you need program/indexer behavior in a test.
57
+ */
58
+ indexer?: (directory?: string) => Promise<Indices> | Indices;
59
+ };
60
+
61
+ class NoopIndices implements Indices {
62
+ private readonly store = createStore();
63
+
64
+ async init<T extends Record<string, any>, NestedType>(
65
+ _properties: IndexEngineInitProperties<T, NestedType>,
66
+ ): Promise<Index<T, NestedType>> {
67
+ throw new Error(
68
+ "NoopIndices: indexing disabled for this session (pass `indexer` to enable).",
69
+ );
70
+ }
71
+
72
+ async scope(_name: string): Promise<Indices> {
73
+ return this;
74
+ }
75
+
76
+ async start(): Promise<void> {
77
+ await this.store.open();
78
+ }
79
+
80
+ async stop(): Promise<void> {
81
+ await this.store.close();
82
+ }
83
+
84
+ async drop(): Promise<void> {
85
+ await this.store.clear();
86
+ }
87
+ }
88
+
89
+ class SimTopicControlPlane extends TopicControlPlane {
90
+ constructor(c: any, opts?: any, mockCrypto = true) {
91
+ super(c, opts);
92
+ if (mockCrypto) {
93
+ this.sign = async () =>
94
+ new SignatureWithKey({
95
+ signature: new Uint8Array([0]),
96
+ publicKey: this.publicKey,
97
+ prehash: PreHash.NONE,
98
+ });
99
+ }
100
+ }
101
+
102
+ public async verifyAndProcess(message: any) {
103
+ const from = message.header.signatures!.publicKeys[0];
104
+ if (!this.peers.has(from.hashcode())) {
105
+ this.updateSession(from, Number(message.header.session));
106
+ }
107
+ return true;
108
+ }
109
+ }
110
+
111
+ class SimFanoutTree extends FanoutTree {
112
+ constructor(c: any, opts?: any, mockCrypto = true) {
113
+ super(c, opts);
114
+ if (mockCrypto) {
115
+ this.sign = async () =>
116
+ new SignatureWithKey({
117
+ signature: new Uint8Array([0]),
118
+ publicKey: this.publicKey,
119
+ prehash: PreHash.NONE,
120
+ });
121
+ }
122
+ }
123
+
124
+ public async verifyAndProcess(message: any) {
125
+ const from = message.header.signatures!.publicKeys[0];
126
+ if (!this.peers.has(from.hashcode())) {
127
+ this.updateSession(from, Number(message.header.session));
128
+ }
129
+ return true;
130
+ }
131
+ }
132
+
133
+ class SimDirectBlock extends DirectBlock {
134
+ constructor(c: any, opts?: any, mockCrypto = true) {
135
+ super(c, opts);
136
+ if (mockCrypto) {
137
+ this.sign = async () =>
138
+ new SignatureWithKey({
139
+ signature: new Uint8Array([0]),
140
+ publicKey: this.publicKey,
141
+ prehash: PreHash.NONE,
142
+ });
143
+ }
144
+ }
145
+
146
+ public async verifyAndProcess(message: any) {
147
+ const from = message.header.signatures!.publicKeys[0];
148
+ if (!this.peers.has(from.hashcode())) {
149
+ this.updateSession(from, Number(message.header.session));
150
+ }
151
+ return true;
152
+ }
153
+ }
154
+
155
+ type SessionLike = {
156
+ peers: any[];
157
+ connect(groups?: any[][]): Promise<any>;
158
+ stop(): Promise<any>;
159
+ };
160
+
161
+ const mulberry32 = (seed: number) => {
162
+ let t = seed >>> 0;
163
+ return () => {
164
+ t += 0x6d2b79f5;
165
+ let x = t;
166
+ x = Math.imul(x ^ (x >>> 15), x | 1);
167
+ x ^= x + Math.imul(x ^ (x >>> 7), x | 61);
168
+ return ((x ^ (x >>> 14)) >>> 0) / 4294967296;
169
+ };
170
+ };
171
+
172
+ const parseSimPeerIndex = (peerId: any): number => {
173
+ const s = String(peerId?.toString?.() ?? "");
174
+ const m = s.match(/sim-(\d+)/);
175
+ if (!m) return 0;
176
+ const n = Number(m[1]);
177
+ return Number.isFinite(n) ? Math.max(0, Math.floor(n)) : 0;
178
+ };
179
+
180
+ const int = (rng: () => number, maxExclusive: number) =>
181
+ Math.floor(rng() * maxExclusive);
182
+
183
+ const buildRandomGraph = (
184
+ n: number,
185
+ targetDegree: number,
186
+ rng: () => number,
187
+ ): number[][] => {
188
+ if (n <= 0) throw new Error("nodes must be > 0");
189
+ if (targetDegree < 0) throw new Error("degree must be >= 0");
190
+ if (targetDegree >= n) {
191
+ throw new Error("degree must be < nodes for a simple graph");
192
+ }
193
+
194
+ const adj: Set<number>[] = Array.from({ length: n }, () => new Set<number>());
195
+ const degree = new Uint16Array(n);
196
+
197
+ const connect = (a: number, b: number) => {
198
+ if (a === b) return false;
199
+ if (adj[a]!.has(b)) return false;
200
+ if (degree[a]! >= targetDegree || degree[b]! >= targetDegree) return false;
201
+ adj[a]!.add(b);
202
+ adj[b]!.add(a);
203
+ degree[a]! += 1;
204
+ degree[b]! += 1;
205
+ return true;
206
+ };
207
+
208
+ // Seed connectivity (line or ring).
209
+ if (targetDegree >= 2 && n >= 3) {
210
+ for (let i = 0; i < n; i++) connect(i, (i + 1) % n);
211
+ } else if (targetDegree >= 1 && n >= 2) {
212
+ for (let i = 0; i < n - 1; i++) connect(i, i + 1);
213
+ }
214
+
215
+ const available: number[] = [];
216
+ const pos = new Int32Array(n).fill(-1);
217
+ for (let i = 0; i < n; i++) {
218
+ if (degree[i]! < targetDegree) {
219
+ pos[i] = available.length;
220
+ available.push(i);
221
+ }
222
+ }
223
+ const removeAvailable = (id: number) => {
224
+ const p = pos[id]!;
225
+ if (p < 0) return;
226
+ const last = available.pop()!;
227
+ if (last !== id) {
228
+ available[p] = last;
229
+ pos[last] = p;
230
+ }
231
+ pos[id] = -1;
232
+ };
233
+
234
+ const maxAttempts = n * Math.max(1, targetDegree) * 200;
235
+ let attempts = 0;
236
+ while (available.length > 1 && attempts < maxAttempts) {
237
+ attempts++;
238
+ const a = available[int(rng, available.length)]!;
239
+ const b = available[int(rng, available.length)]!;
240
+ if (a === b) continue;
241
+ if (!connect(a, b)) continue;
242
+ if (degree[a]! >= targetDegree) removeAvailable(a);
243
+ if (degree[b]! >= targetDegree) removeAvailable(b);
244
+ }
245
+
246
+ return adj.map((s) => [...s]);
247
+ };
248
+
249
+ const runWithConcurrency = async <T>(
250
+ tasks: Array<() => Promise<T>>,
251
+ concurrency: number,
252
+ ): Promise<T[]> => {
253
+ if (tasks.length === 0) return [];
254
+ const results: T[] = new Array(tasks.length);
255
+ let next = 0;
256
+ const workers = Array.from({ length: Math.min(concurrency, tasks.length) }, async () => {
257
+ for (;;) {
258
+ const i = next++;
259
+ if (i >= tasks.length) return;
260
+ results[i] = await tasks[i]!();
261
+ }
262
+ });
263
+ await Promise.all(workers);
264
+ return results;
265
+ };
266
+
27
267
  export class TestSession {
28
- private session: SSession<Libp2pExtendServices>;
268
+ private session: SessionLike;
29
269
  private _peers: Peerbit[];
30
270
  private connectedGroups: Set<Peerbit>[] | undefined;
31
- constructor(session: SSession<Libp2pExtendServices>, peers: Peerbit[]) {
271
+ readonly inMemory?: { session: InMemorySession<any>; network: InMemoryNetwork };
272
+
273
+ constructor(
274
+ session: SessionLike,
275
+ peers: Peerbit[],
276
+ opts?: { inMemory?: { session: InMemorySession<any>; network: InMemoryNetwork } },
277
+ ) {
32
278
  this.session = session;
33
279
  this._peers = peers;
280
+ this.inMemory = opts?.inMemory;
34
281
  this.wrapPeerStartForReconnect();
35
282
  }
36
283
 
@@ -38,14 +285,229 @@ export class TestSession {
38
285
  return this._peers;
39
286
  }
40
287
 
288
+ private async configureFanoutShardRoots() {
289
+ if (!this.connectedGroups) return;
290
+
291
+ // Build a view of direct underlay adjacency from the original dial-groups.
292
+ // This is used to pick "universally reachable" routers in sparse topologies
293
+ // (e.g. a line) so that sharded pubsub roots are directly dialable.
294
+ const adjacency = new Map<Peerbit, Set<Peerbit>>();
295
+ const addAdj = (a: Peerbit, b: Peerbit) => {
296
+ let sa = adjacency.get(a);
297
+ if (!sa) {
298
+ sa = new Set();
299
+ adjacency.set(a, sa);
300
+ }
301
+ sa.add(b);
302
+ };
303
+ for (const groupSet of this.connectedGroups) {
304
+ const peers = [...groupSet];
305
+ for (let i = 0; i < peers.length; i++) {
306
+ for (let j = i + 1; j < peers.length; j++) {
307
+ const a = peers[i]!;
308
+ const b = peers[j]!;
309
+ addAdj(a, b);
310
+ addAdj(b, a);
311
+ }
312
+ }
313
+ }
314
+
315
+ // `connect(groups)` is sometimes used to form sparse topologies by passing
316
+ // overlapping "dial groups" (e.g. [[a,b],[b,c]] for a line). For shard-root
317
+ // resolution we must instead configure candidates consistently per connected
318
+ // component; otherwise a peer that appears in multiple groups would have its
319
+ // candidate set overwritten and pubsub/rpc can deadlock.
320
+ const mergedComponents: Set<Peerbit>[] = [];
321
+ for (const groupSet of this.connectedGroups) {
322
+ const intersections: number[] = [];
323
+ for (let i = 0; i < mergedComponents.length; i++) {
324
+ const existing = mergedComponents[i]!;
325
+ for (const peer of groupSet) {
326
+ if (existing.has(peer)) {
327
+ intersections.push(i);
328
+ break;
329
+ }
330
+ }
331
+ }
332
+ if (intersections.length === 0) {
333
+ mergedComponents.push(new Set(groupSet));
334
+ continue;
335
+ }
336
+
337
+ const union = new Set<Peerbit>(groupSet);
338
+ intersections.sort((a, b) => b - a); // splice from end
339
+ for (const idx of intersections) {
340
+ for (const peer of mergedComponents[idx]!) union.add(peer);
341
+ mergedComponents.splice(idx, 1);
342
+ }
343
+ mergedComponents.push(union);
344
+ }
345
+
346
+ const peerIndex = new Map<Peerbit, number>();
347
+ for (let i = 0; i < this._peers.length; i++) {
348
+ peerIndex.set(this._peers[i]!, i);
349
+ }
350
+
351
+ for (const groupSet of mergedComponents) {
352
+ const group = [...groupSet].sort(
353
+ (a, b) => (peerIndex.get(a) ?? 0) - (peerIndex.get(b) ?? 0),
354
+ );
355
+ if (group.length === 0) continue;
356
+
357
+ // Prefer routers that are directly connected to every peer in this component
358
+ // (e.g. the center node in a line). This avoids picking a root that some peers
359
+ // cannot dial directly, which can cause fanout join timeouts in sparse graphs.
360
+ const universalRouters = group.filter((p) => {
361
+ const n = adjacency.get(p);
362
+ if (!n) return false;
363
+ for (const other of group) {
364
+ if (other === p) continue;
365
+ if (!n.has(other)) return false;
366
+ }
367
+ return true;
368
+ });
369
+
370
+ // Keep a small deterministic router set so shard roots are always hosted.
371
+ // If no universally reachable peers exist, fall back to a bounded prefix.
372
+ const routers =
373
+ universalRouters.length > 0
374
+ ? universalRouters.slice(0, 8)
375
+ : group.slice(
376
+ 0,
377
+ Math.min(
378
+ 8,
379
+ Math.max(1, Math.ceil(Math.log2(Math.max(1, group.length)))),
380
+ ),
381
+ );
382
+ const routerHashes = routers
383
+ .map(
384
+ (p) =>
385
+ (p as any)?.services?.pubsub?.publicKeyHash ??
386
+ (p as any)?.identity?.publicKey?.hashcode?.(),
387
+ )
388
+ .filter((x): x is string => typeof x === "string" && x.length > 0);
389
+
390
+ if (routerHashes.length === 0) continue;
391
+
392
+ for (const peer of group) {
393
+ const servicesAny: any = (peer as any).services;
394
+ // Prefer the pubsub service helper so we also disable its auto-candidate
395
+ // mode (otherwise it can keep gossiping/expanding candidates and shard-root
396
+ // resolution can diverge in sparse topologies).
397
+ try {
398
+ servicesAny?.pubsub?.setTopicRootCandidates?.(routerHashes);
399
+ } catch {
400
+ // ignore
401
+ }
402
+
403
+ // Also update the shared topic-root control plane directly for any
404
+ // services that are not wired through pubsub.
405
+ const fanoutPlane = servicesAny?.fanout?.topicRootControlPlane;
406
+ const pubsubPlane = servicesAny?.pubsub?.topicRootControlPlane;
407
+ const planes = [...new Set([fanoutPlane, pubsubPlane].filter(Boolean))];
408
+ for (const plane of planes) {
409
+ try {
410
+ plane.setTopicRootCandidates(routerHashes);
411
+ } catch {
412
+ // ignore
413
+ }
414
+ }
415
+ }
416
+
417
+ // Ensure subsequent root resolution (and root hosting) uses the updated
418
+ // candidate set rather than any earlier auto-candidate cached mapping.
419
+ for (const peer of group) {
420
+ try {
421
+ (peer as any)?.services?.pubsub?.shardRootCache?.clear?.();
422
+ } catch {
423
+ // ignore
424
+ }
425
+ }
426
+
427
+ // Bring up shard roots on router peers.
428
+ await Promise.all(
429
+ routers.map(async (peer) => {
430
+ try {
431
+ await (peer as any)?.services?.pubsub?.hostShardRootsNow?.();
432
+ } catch {
433
+ // ignore
434
+ }
435
+ }),
436
+ );
437
+
438
+ // If programs were opened before `connect()`, they may have subscribed under the
439
+ // initial local-only shard mapping. After updating candidates, force a best-effort
440
+ // reconcile so pubsub overlay joins + subscription announcements converge.
441
+ await Promise.all(
442
+ group.map(async (peer) => {
443
+ try {
444
+ const pubsub: any = (peer as any)?.services?.pubsub;
445
+ pubsub?.shardRootCache?.clear?.();
446
+ await pubsub?.reconcileShardOverlays?.();
447
+ } catch {
448
+ // ignore
449
+ }
450
+ }),
451
+ );
452
+ }
453
+ }
454
+
41
455
  async connect(groups?: ProgramClient[][]) {
42
456
  await this.session.connect(groups?.map((x) => x.map((y) => y)));
43
457
  this.connectedGroups = groups
44
458
  ? groups.map((group) => new Set(group as Peerbit[]))
45
459
  : [new Set(this._peers)];
460
+ await this.configureFanoutShardRoots();
46
461
  return;
47
462
  }
48
463
 
464
+ /**
465
+ * Connect peers in a sparse random graph (bounded degree) to enable large-n sims.
466
+ *
467
+ * Returns the adjacency list (by index within each group).
468
+ */
469
+ async connectRandomGraph(opts: {
470
+ degree: number;
471
+ seed?: number;
472
+ groups?: ProgramClient[][];
473
+ concurrency?: number;
474
+ }): Promise<number[][][]> {
475
+ const groups = opts.groups ?? [this._peers];
476
+ const degree = Math.max(0, Math.floor(opts.degree));
477
+ const seed = Math.max(0, Math.floor(opts.seed ?? 1));
478
+ const concurrency = Math.max(1, Math.floor(opts.concurrency ?? 100));
479
+ const rng = mulberry32(seed);
480
+
481
+ const out: number[][][] = [];
482
+ for (const group of groups) {
483
+ const n = group.length;
484
+ if (n === 0) {
485
+ out.push([]);
486
+ continue;
487
+ }
488
+ const d = Math.min(degree, Math.max(0, n - 1));
489
+ const graph = buildRandomGraph(n, d, rng);
490
+ out.push(graph);
491
+
492
+ const tasks: Array<() => Promise<boolean>> = [];
493
+ for (let a = 0; a < graph.length; a++) {
494
+ for (const b of graph[a]!) {
495
+ if (b <= a) continue;
496
+ const pa = group[a] as Peerbit;
497
+ const pb = group[b] as Peerbit;
498
+ tasks.push(() => pa.dial(pb));
499
+ }
500
+ }
501
+
502
+ await runWithConcurrency(tasks, concurrency);
503
+ }
504
+
505
+ this.connectedGroups = groups.map((group) => new Set(group as Peerbit[]));
506
+ await this.configureFanoutShardRoots();
507
+ return out;
508
+ }
509
+
510
+
49
511
  private wrapPeerStartForReconnect() {
50
512
  const patchedKey = Symbol.for("@peerbit/test-session.reconnect-on-start");
51
513
  for (const peer of this._peers) {
@@ -93,12 +555,23 @@ export class TestSession {
93
555
  target: "neighbor",
94
556
  timeout: 10_000,
95
557
  }),
558
+ other.services.fanout.waitFor(peerHash, {
559
+ target: "neighbor",
560
+ timeout: 10_000,
561
+ }),
96
562
  ]);
97
563
  }),
98
564
  );
565
+
566
+ // A stop/start can leave sharded pubsub/fanout in a stale state (old shard-root
567
+ // cache, roots no longer hosted on restarted routers, overlays not rejoined).
568
+ // Re-apply the session's deterministic shard-root configuration so program-level
569
+ // RPC/pubsub resumes working without each test having to manually reconnect.
570
+ await this.configureFanoutShardRoots();
99
571
  };
100
572
  }
101
573
  }
574
+
102
575
  async stop() {
103
576
  await Promise.all(this._peers.map((peer) => peer.stop()));
104
577
  // `Peerbit.stop()` stops libp2p for sessions created by `Peerbit.create()`,
@@ -131,6 +604,18 @@ export class TestSession {
131
604
  (x) => x.services.blocks as any as DirectStream<any>,
132
605
  ),
133
606
  );
607
+ // Sharded pubsub/fanout uses its own DirectStream protocols; ensure those
608
+ // neighbor streams are established before tests start opening programs.
609
+ await waitForPeersStreams(
610
+ ...session.peers.map(
611
+ (x) => x.services.pubsub as any as DirectStream<any>,
612
+ ),
613
+ );
614
+ await waitForPeersStreams(
615
+ ...session.peers.map(
616
+ (x) => (x.services as any).fanout as any as DirectStream<any>,
617
+ ),
618
+ );
134
619
  return session;
135
620
  }
136
621
 
@@ -184,6 +669,18 @@ export class TestSession {
184
669
  (x) => x.services.blocks as any as DirectStream<any>,
185
670
  ),
186
671
  );
672
+ // Sharded pubsub/fanout uses its own DirectStream protocols; ensure those
673
+ // neighbor streams are established before tests start opening programs.
674
+ await waitForPeersStreams(
675
+ ...session.peers.map(
676
+ (x) => x.services.pubsub as any as DirectStream<any>,
677
+ ),
678
+ );
679
+ await waitForPeersStreams(
680
+ ...session.peers.map(
681
+ (x) => (x.services as any).fanout as any as DirectStream<any>,
682
+ ),
683
+ );
187
684
  return session;
188
685
  }
189
686
 
@@ -196,14 +693,39 @@ export class TestSession {
196
693
  process.env.PEERBIT_TEST_SESSION === "fast" ||
197
694
  process.env.PEERBIT_TEST_SESSION === "tcp";
198
695
 
199
- const m = (o?: CreateOptions): Libp2pCreateOptionsWithServices => {
200
- const blocksDirectory = o?.directory
201
- ? path.join(o.directory, "/blocks").toString()
202
- : undefined;
696
+ const m = (o?: CreateOptions): Libp2pCreateOptionsWithServices => {
697
+ const blocksDirectory = o?.directory
698
+ ? path.join(o.directory, "/blocks").toString()
699
+ : undefined;
203
700
 
204
- const libp2pOptions: Libp2pCreateOptions = {
205
- ...(o?.libp2p ?? {}),
206
- };
701
+ const libp2pOptions: Libp2pCreateOptions = {
702
+ ...(o?.libp2p ?? {}),
703
+ };
704
+
705
+ // `SSession.disconnected(n, opts)` can re-use the same `opts` object across many
706
+ // libp2p peers. Memoizing a single instance in a closure would accidentally
707
+ // share one FanoutTree (and TopicRootControlPlane) across the entire session.
708
+ // That breaks shard-root resolution (everyone would inherit the first peer's
709
+ // candidate set) and can deadlock fanout joins in disconnected tests.
710
+ const perPeer = new WeakMap<
711
+ any,
712
+ { fanout: FanoutTree; topicRootControlPlane: TopicRootControlPlane }
713
+ >();
714
+ const getOrCreatePerPeer = (c: any) => {
715
+ const key = c?.privateKey ?? c;
716
+ let existing = perPeer.get(key);
717
+ if (existing) return existing;
718
+
719
+ const topicRootControlPlane = new TopicRootControlPlane();
720
+ const fanout = new FanoutTree(c, {
721
+ connectionManager: false,
722
+ topicRootControlPlane,
723
+ });
724
+ existing = { fanout, topicRootControlPlane };
725
+ perPeer.set(key, existing);
726
+ return existing;
727
+ };
728
+ const getOrCreateFanout = (c: any) => getOrCreatePerPeer(c).fanout;
207
729
 
208
730
  if (useMockSession) {
209
731
  libp2pOptions.transports = libp2pOptions.transports ?? transportsFast();
@@ -224,7 +746,13 @@ export class TestSession {
224
746
  new DirectBlock(c, {
225
747
  directory: blocksDirectory,
226
748
  }),
227
- pubsub: (c: any) => new DirectSub(c, { canRelayMessage: true }),
749
+ pubsub: (c: any) =>
750
+ new TopicControlPlane(c, {
751
+ canRelayMessage: true,
752
+ topicRootControlPlane: getOrCreatePerPeer(c).topicRootControlPlane,
753
+ fanout: getOrCreatePerPeer(c).fanout,
754
+ }),
755
+ fanout: (c: any) => getOrCreateFanout(c),
228
756
  keychain: keychain(),
229
757
  ...libp2pOptions.services,
230
758
  } as any, /// TODO types
@@ -253,4 +781,118 @@ export class TestSession {
253
781
  )) as Peerbit[],
254
782
  );
255
783
  }
784
+
785
+ /**
786
+ * Create a large-n capable Peerbit session using an in-memory libp2p transport shim.
787
+ *
788
+ * This avoids TCP/noise costs and (optionally) skips expensive crypto verification.
789
+ *
790
+ * Notes:
791
+ * - By default this uses a no-op indexer to avoid spinning up sqlite for each peer.
792
+ * - If you pass `degree`, it will connect a bounded-degree random graph automatically.
793
+ */
794
+ static async disconnectedInMemory(
795
+ n: number,
796
+ opts: InMemoryPeerbitSessionOptions & { directory?: string } = {},
797
+ ) {
798
+ const mockCrypto = opts.mockCrypto !== false;
799
+ const indexer = opts.indexer ?? (() => new NoopIndices());
800
+ const seed = Math.max(0, Math.floor(opts.seed ?? 1));
801
+
802
+ const perPeer = new Map<
803
+ string,
804
+ { fanout: SimFanoutTree; topicRootControlPlane: TopicRootControlPlane }
805
+ >();
806
+ const getOrCreatePerPeer = (c: any) => {
807
+ const key = c?.peerId?.toString?.() ?? "";
808
+ const existing = perPeer.get(key);
809
+ if (existing) return existing;
810
+
811
+ const topicRootControlPlane = new TopicRootControlPlane();
812
+ const fanout = new SimFanoutTree(
813
+ c,
814
+ {
815
+ connectionManager: false,
816
+ random: mulberry32((seed >>> 0) ^ parseSimPeerIndex(c?.peerId)),
817
+ topicRootControlPlane,
818
+ },
819
+ mockCrypto,
820
+ );
821
+ const created = { fanout, topicRootControlPlane };
822
+ perPeer.set(key, created);
823
+ return created;
824
+ };
825
+
826
+ const inMemory = await InMemorySession.disconnected<Libp2pExtendServices>(n, {
827
+ start: false,
828
+ basePort: opts.basePort ?? 30_000,
829
+ networkOpts: opts.network,
830
+ services: {
831
+ blocks: (c: any) =>
832
+ new SimDirectBlock(
833
+ c,
834
+ {
835
+ directory: opts.directory ? path.join(opts.directory, "/blocks") : undefined,
836
+ canRelayMessage: true,
837
+ },
838
+ mockCrypto,
839
+ ),
840
+ pubsub: (c: any) =>
841
+ new SimTopicControlPlane(
842
+ c,
843
+ (() => {
844
+ const { fanout, topicRootControlPlane } = getOrCreatePerPeer(c);
845
+ return {
846
+ canRelayMessage: true,
847
+ topicRootControlPlane,
848
+ fanout,
849
+ };
850
+ })(),
851
+ mockCrypto,
852
+ ),
853
+ fanout: (c: any) => getOrCreatePerPeer(c).fanout,
854
+ keychain: () =>
855
+ // Avoid libp2p keychain (datastore dependency) for large sims.
856
+ // Peerbit only requires the CryptoKeychain surface in these sessions.
857
+ new DefaultCryptoKeychain({ store: createStore() }) as any,
858
+ } as any,
859
+ });
860
+
861
+ const peers = (await Promise.all(
862
+ inMemory.peers.map((x) =>
863
+ Peerbit.create({
864
+ libp2p: x as any,
865
+ directory: opts.directory,
866
+ indexer,
867
+ }),
868
+ ),
869
+ )) as Peerbit[];
870
+
871
+ const sessionLike: SessionLike = {
872
+ peers: inMemory.peers as any[],
873
+ connect: async (groups?: any[][]) => inMemory.connectFully(groups as any),
874
+ stop: async () => inMemory.stop(),
875
+ };
876
+
877
+ return new TestSession(sessionLike, peers, {
878
+ inMemory: { session: inMemory, network: inMemory.network },
879
+ });
880
+ }
881
+
882
+ static async connectedInMemory(
883
+ n: number,
884
+ opts: InMemoryPeerbitSessionOptions & { directory?: string } = {},
885
+ ) {
886
+ const session = await TestSession.disconnectedInMemory(n, opts);
887
+ if (opts.degree != null) {
888
+ await session.connectRandomGraph({
889
+ degree: opts.degree,
890
+ seed: opts.seed,
891
+ concurrency: opts.concurrency,
892
+ });
893
+ return session;
894
+ }
895
+ await session.connect();
896
+ return session;
897
+ }
256
898
  }