@aztec/p2p 2.0.3 → 2.1.0-rc.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/dest/client/factory.d.ts.map +1 -1
  2. package/dest/client/factory.js +3 -0
  3. package/dest/client/interface.d.ts +3 -3
  4. package/dest/client/interface.d.ts.map +1 -1
  5. package/dest/client/p2p_client.d.ts +1 -7
  6. package/dest/client/p2p_client.d.ts.map +1 -1
  7. package/dest/client/p2p_client.js +12 -16
  8. package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.d.ts +13 -3
  9. package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.d.ts.map +1 -1
  10. package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.js +93 -40
  11. package/dest/mem_pools/tx_pool/memory_tx_pool.d.ts +15 -4
  12. package/dest/mem_pools/tx_pool/memory_tx_pool.d.ts.map +1 -1
  13. package/dest/mem_pools/tx_pool/memory_tx_pool.js +81 -9
  14. package/dest/mem_pools/tx_pool/tx_pool.d.ts +12 -4
  15. package/dest/mem_pools/tx_pool/tx_pool.d.ts.map +1 -1
  16. package/dest/mem_pools/tx_pool/tx_pool_test_suite.d.ts.map +1 -1
  17. package/dest/mem_pools/tx_pool/tx_pool_test_suite.js +130 -6
  18. package/dest/msg_validators/tx_validator/double_spend_validator.js +1 -1
  19. package/dest/services/libp2p/libp2p_service.d.ts +0 -1
  20. package/dest/services/libp2p/libp2p_service.d.ts.map +1 -1
  21. package/dest/services/libp2p/libp2p_service.js +6 -10
  22. package/dest/services/peer-manager/peer_manager.d.ts.map +1 -1
  23. package/dest/services/peer-manager/peer_manager.js +15 -8
  24. package/dest/services/reqresp/interface.d.ts +0 -9
  25. package/dest/services/reqresp/interface.d.ts.map +1 -1
  26. package/dest/services/reqresp/interface.js +0 -17
  27. package/dest/services/reqresp/reqresp.d.ts.map +1 -1
  28. package/dest/services/reqresp/reqresp.js +12 -8
  29. package/dest/services/tx_provider.js +1 -1
  30. package/dest/testbench/p2p_client_testbench_worker.js +2 -1
  31. package/package.json +14 -15
  32. package/src/client/factory.ts +7 -0
  33. package/src/client/interface.ts +3 -3
  34. package/src/client/p2p_client.ts +11 -18
  35. package/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts +104 -42
  36. package/src/mem_pools/tx_pool/memory_tx_pool.ts +89 -10
  37. package/src/mem_pools/tx_pool/tx_pool.ts +11 -4
  38. package/src/mem_pools/tx_pool/tx_pool_test_suite.ts +114 -7
  39. package/src/msg_validators/tx_validator/double_spend_validator.ts +1 -1
  40. package/src/services/libp2p/libp2p_service.ts +7 -11
  41. package/src/services/peer-manager/peer_manager.ts +9 -10
  42. package/src/services/reqresp/interface.ts +0 -21
  43. package/src/services/reqresp/reqresp.ts +13 -9
  44. package/src/services/tx_provider.ts +1 -1
  45. package/src/testbench/p2p_client_testbench_worker.ts +1 -0
@@ -68,7 +68,7 @@ export interface TxPool extends TypedEventEmitter<TxPoolEvents> {
68
68
  * Deletes transactions from the pool. Tx hashes that are not present are ignored.
69
69
  * @param txHashes - An array of tx hashes to be removed from the tx pool.
70
70
  */
71
- deleteTxs(txHashes: TxHash[]): Promise<void>;
71
+ deleteTxs(txHashes: TxHash[], opts?: { permanently?: boolean }): Promise<void>;
72
72
 
73
73
  /**
74
74
  * Gets all transactions currently in the tx pool.
@@ -98,11 +98,11 @@ export interface TxPool extends TypedEventEmitter<TxPoolEvents> {
98
98
  getMinedTxHashes(): Promise<[tx: TxHash, blockNumber: number][]>;
99
99
 
100
100
  /**
101
- * Returns whether the given tx hash is flagged as pending or mined.
101
+ * Returns whether the given tx hash is flagged as pending, mined, or deleted.
102
102
  * @param txHash - Hash of the tx to query.
103
- * @returns Pending or mined depending on its status, or undefined if not found.
103
+ * @returns Pending, mined, or deleted depending on its status, or undefined if not found.
104
104
  */
105
- getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | undefined>;
105
+ getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | 'deleted' | undefined>;
106
106
 
107
107
  /**
108
108
  * Configure the maximum size of the tx pool
@@ -118,4 +118,11 @@ export interface TxPool extends TypedEventEmitter<TxPoolEvents> {
118
118
  * @param txHashes - Hashes of the transactions to mark as non-evictible.
119
119
  */
120
120
  markTxsAsNonEvictable(txHashes: TxHash[]): Promise<void>;
121
+
122
+ /**
123
+ * Permanently deletes deleted mined transactions from blocks up to and including the specified block number.
124
+ * @param blockNumber - Block number threshold. Deleted mined txs from this block or earlier will be permanently deleted.
125
+ * @returns The number of transactions permanently deleted.
126
+ */
127
+ cleanupDeletedMinedTxs(blockNumber: number): Promise<number>;
121
128
  }
@@ -54,14 +54,24 @@ export function describeTxPool(getTxPool: () => TxPool) {
54
54
  expect(txsFromEvent).toEqual(expect.arrayContaining([tx2, tx3]));
55
55
  });
56
56
 
57
- it('removes txs from the pool', async () => {
58
- const tx1 = await mockTx();
59
-
60
- await pool.addTxs([tx1]);
61
- await pool.deleteTxs([tx1.getTxHash()]);
57
+ it('permanently deletes pending txs and soft-deletes mined txs', async () => {
58
+ const pendingTx = await mockTx(1);
59
+ const minedTx = await mockTx(2);
60
+
61
+ await pool.addTxs([pendingTx, minedTx]);
62
+ await pool.markAsMined([minedTx.getTxHash()], minedBlockHeader);
63
+
64
+ // Delete a pending tx - should be permanently deleted
65
+ await pool.deleteTxs([pendingTx.getTxHash()]);
66
+ await expect(pool.getTxByHash(pendingTx.getTxHash())).resolves.toBeUndefined();
67
+ await expect(pool.getTxStatus(pendingTx.getTxHash())).resolves.toBeUndefined();
68
+
69
+ // Delete a mined tx - should be soft-deleted (still in storage)
70
+ await pool.deleteTxs([minedTx.getTxHash()]);
71
+ await expect(pool.getTxByHash(minedTx.getTxHash())).resolves.toBeDefined();
72
+ await expect(pool.getTxStatus(minedTx.getTxHash())).resolves.toEqual('deleted');
73
+ await expect(pool.getMinedTxHashes()).resolves.toEqual([]);
62
74
 
63
- await expect(pool.getTxByHash(tx1.getTxHash())).resolves.toBeFalsy();
64
- await expect(pool.getTxStatus(tx1.getTxHash())).resolves.toBeUndefined();
65
75
  await expect(pool.getPendingTxCount()).resolves.toEqual(0);
66
76
  });
67
77
 
@@ -203,4 +213,101 @@ export function describeTxPool(getTxPool: () => TxPool) {
203
213
  expect(poolTxHashes).toHaveLength(4);
204
214
  expect(poolTxHashes).toEqual([tx4, tx1, tx3, tx2].map(tx => tx.getTxHash()));
205
215
  });
216
+
217
+ describe('soft-delete', () => {
218
+ it('soft-deletes mined txs and keeps them in storage', async () => {
219
+ const txs = await Promise.all([mockTx(1), mockTx(2), mockTx(3)]);
220
+ await pool.addTxs(txs);
221
+
222
+ // Mark first tx as mined
223
+ await pool.markAsMined([txs[0].getTxHash()], minedBlockHeader);
224
+
225
+ // Verify initial state
226
+ await expect(pool.getPendingTxCount()).resolves.toBe(2);
227
+ await expect(pool.getTxByHash(txs[0].getTxHash())).resolves.toBeDefined();
228
+ await expect(pool.getTxByHash(txs[1].getTxHash())).resolves.toBeDefined();
229
+
230
+ // Delete mined tx - should be soft-deleted
231
+ await pool.deleteTxs([txs[0].getTxHash()]);
232
+
233
+ // Delete pending tx - should be permanently deleted
234
+ await pool.deleteTxs([txs[1].getTxHash()]);
235
+
236
+ // Verify mined tx still exists in storage but has 'deleted' status
237
+ await expect(pool.getTxByHash(txs[0].getTxHash())).resolves.toBeDefined();
238
+ await expect(pool.getTxStatus(txs[0].getTxHash())).resolves.toEqual('deleted');
239
+
240
+ // Verify pending tx is permanently deleted
241
+ await expect(pool.getTxByHash(txs[1].getTxHash())).resolves.toBeUndefined();
242
+ await expect(pool.getTxStatus(txs[1].getTxHash())).resolves.toBeUndefined();
243
+
244
+ // Verify remaining pending count
245
+ await expect(pool.getPendingTxCount()).resolves.toBe(1);
246
+
247
+ // Verify pending hashes don't include deleted txs
248
+ const pendingHashes = await pool.getPendingTxHashes();
249
+ expect(pendingHashes).toHaveLength(1);
250
+ expect(pendingHashes.map(h => h.toString())).toContain(txs[2].getTxHash().toString());
251
+ });
252
+
253
+ it('cleans up old deleted mined transactions', async () => {
254
+ const txs = await Promise.all([mockTx(1), mockTx(2), mockTx(3)]);
255
+ await pool.addTxs(txs);
256
+
257
+ // Mark first two as mined in block 1
258
+ await pool.markAsMined([txs[0].getTxHash(), txs[1].getTxHash()], minedBlockHeader);
259
+
260
+ // Soft-delete mined transactions
261
+ await pool.deleteTxs([txs[0].getTxHash(), txs[1].getTxHash()]);
262
+
263
+ // Clean up deleted mined txs from block 1 and earlier
264
+ const deletedCount = await pool.cleanupDeletedMinedTxs(1);
265
+
266
+ // Verify old transactions are permanently deleted
267
+ expect(deletedCount).toBe(2);
268
+ await expect(pool.getTxByHash(txs[0].getTxHash())).resolves.toBeUndefined();
269
+ await expect(pool.getTxByHash(txs[1].getTxHash())).resolves.toBeUndefined();
270
+ await expect(pool.getTxByHash(txs[2].getTxHash())).resolves.toBeDefined();
271
+ });
272
+
273
+ it('does not clean up recent deleted mined transactions', async () => {
274
+ const txs = await Promise.all([mockTx(1), mockTx(2)]);
275
+ await pool.addTxs(txs);
276
+
277
+ // Mark as mined in block 2
278
+ const laterBlockHeader = BlockHeader.empty({
279
+ globalVariables: GlobalVariables.empty({ blockNumber: 2, timestamp: 0n }),
280
+ });
281
+ await pool.markAsMined([txs[0].getTxHash()], laterBlockHeader);
282
+
283
+ // Soft-delete a mined transaction
284
+ await pool.deleteTxs([txs[0].getTxHash()]);
285
+
286
+ // Try to clean up with block 1 (before the mined block)
287
+ const deletedCount = await pool.cleanupDeletedMinedTxs(1);
288
+
289
+ // Verify no transactions were cleaned up
290
+ expect(deletedCount).toBe(0);
291
+ await expect(pool.getTxByHash(txs[0].getTxHash())).resolves.toBeDefined();
292
+ });
293
+
294
+ it('restores deleted mined tx when it is mined again', async () => {
295
+ const tx = await mockTx(1);
296
+ await pool.addTxs([tx]);
297
+
298
+ // Mark as mined
299
+ await pool.markAsMined([tx.getTxHash()], minedBlockHeader);
300
+
301
+ // Soft-delete it
302
+ await pool.deleteTxs([tx.getTxHash()]);
303
+ await expect(pool.getTxStatus(tx.getTxHash())).resolves.toEqual('deleted');
304
+
305
+ // Mark as mined again (e.g., after a reorg)
306
+ await pool.markAsMined([tx.getTxHash()], minedBlockHeader);
307
+
308
+ // Should be back to mined status
309
+ await expect(pool.getTxStatus(tx.getTxHash())).resolves.toEqual('mined');
310
+ await expect(pool.getTxByHash(tx.getTxHash())).resolves.toBeDefined();
311
+ });
312
+ });
206
313
  }
@@ -24,7 +24,7 @@ export class DoubleSpendTxValidator<T extends AnyTx> implements TxValidator<T> {
24
24
  const nullifiers = tx instanceof Tx ? tx.data.getNonEmptyNullifiers() : tx.txEffect.nullifiers;
25
25
 
26
26
  // Ditch this tx if it has repeated nullifiers
27
- const uniqueNullifiers = new Set(nullifiers);
27
+ const uniqueNullifiers = new Set(nullifiers.map(n => n.toBigInt()));
28
28
  if (uniqueNullifiers.size !== nullifiers.length) {
29
29
  this.#log.verbose(`Rejecting tx ${'txHash' in tx ? tx.txHash : tx.hash} for emitting duplicate nullifiers`);
30
30
  return { result: 'invalid', reason: [TX_ERROR_DUPLICATE_NULLIFIER_IN_TX] };
@@ -43,7 +43,6 @@ import { bootstrap } from '@libp2p/bootstrap';
43
43
  import { identify } from '@libp2p/identify';
44
44
  import { type Message, type MultiaddrConnection, type PeerId, TopicValidatorResult } from '@libp2p/interface';
45
45
  import type { ConnectionManager } from '@libp2p/interface-internal';
46
- import '@libp2p/kad-dht';
47
46
  import { mplex } from '@libp2p/mplex';
48
47
  import { tcp } from '@libp2p/tcp';
49
48
  import { createLibp2p } from 'libp2p';
@@ -273,7 +272,7 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
273
272
  // The connection attempts to the node on TCP layer are not necessarily valid Aztec peers so we want to have a bit of leeway here
274
273
  // If we hit the limit, the connection will be temporarily accepted and immediately dropped.
275
274
  // Docs: https://nodejs.org/api/net.html#servermaxconnections
276
- maxConnections: Math.ceil(maxPeerCount * 1.5),
275
+ maxConnections: maxPeerCount * 2,
277
276
  // socket option: the maximum length of the queue of pending connections
278
277
  // https://nodejs.org/dist/latest-v22.x/docs/api/net.html#serverlisten
279
278
  // it's not safe if we increase this number
@@ -284,7 +283,7 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
284
283
  // In case closeAbove is reached, the server stops listening altogether
285
284
  // It's important that there is enough difference between closeAbove and listenAbove,
286
285
  // otherwise the server.listener will flap between being closed and open potentially degrading perf even more
287
- closeAbove: maxPeerCount * 2,
286
+ closeAbove: maxPeerCount * 3,
288
287
  listenBelow: Math.floor(maxPeerCount * 0.9),
289
288
  },
290
289
  }),
@@ -294,8 +293,10 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
294
293
  streamMuxers: [yamux(), mplex()],
295
294
  connectionEncryption: [noise()],
296
295
  connectionManager: {
297
- minConnections: 0,
298
- maxConnections: maxPeerCount,
296
+ minConnections: 0, // Disable libp2p peer dialing, we do it manually
297
+ // We set maxConnections above maxPeerCount because if we hit limit of maxPeerCount
298
+ // libp2p will start aggressively rejecting all new connections, preventing network discovery and crawling.
299
+ maxConnections: maxPeerCount * 2,
299
300
  maxParallelDials: 100,
300
301
  dialTimeout: 30_000,
301
302
  maxPeerAddrsToDial: 5,
@@ -453,10 +454,6 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
453
454
  const goodbyeHandler = reqGoodbyeHandler(this.peerManager);
454
455
  const blockHandler = reqRespBlockHandler(this.archiver);
455
456
  const statusHandler = reqRespStatusHandler(this.protocolVersion, this.worldStateSynchronizer, this.logger);
456
- // In case P2P client doesnt'have attestation pool,
457
- // const blockTxsHandler = this.mempools.attestationPool
458
- // ? reqRespBlockTxsHandler(this.mempools.attestationPool, this.mempools.txPool)
459
- // : def;
460
457
 
461
458
  const requestResponseHandlers: Partial<ReqRespSubProtocolHandlers> = {
462
459
  [ReqRespSubProtocol.PING]: pingHandler,
@@ -811,9 +808,8 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
811
808
  private async processValidBlockProposal(block: BlockProposal, sender: PeerId) {
812
809
  const slot = block.slotNumber.toBigInt();
813
810
  const previousSlot = slot - 1n;
814
- const epoch = slot / 32n;
815
811
  this.logger.verbose(
816
- `Received block ${block.blockNumber} for slot ${slot} epoch ${epoch} from external peer ${sender.toString()}.`,
812
+ `Received block ${block.blockNumber} for slot ${slot} from external peer ${sender.toString()}.`,
817
813
  {
818
814
  p2pMessageIdentifier: await block.p2pMessageIdentifier(),
819
815
  slot: block.slotNumber.toNumber(),
@@ -883,7 +883,7 @@ export class PeerManager implements PeerManagerInterface {
883
883
  const response = await this.reqresp.sendRequestToPeer(peerId, ReqRespSubProtocol.AUTH, authRequest.toBuffer());
884
884
  const { status } = response;
885
885
  if (status !== ReqRespStatus.SUCCESS) {
886
- this.logger.debug(`Disconnecting peer ${peerId} who failed to respond auth handshake`, {
886
+ this.logger.verbose(`Disconnecting peer ${peerId} who failed to respond auth handshake`, {
887
887
  peerId,
888
888
  status: ReqRespStatus[status],
889
889
  });
@@ -899,7 +899,7 @@ export class PeerManager implements PeerManagerInterface {
899
899
 
900
900
  const peerStatusMessage = peerAuthResponse.status;
901
901
  if (!ourStatus.validate(peerStatusMessage)) {
902
- this.logger.debug(`Disconnecting peer ${peerId} due to failed status handshake as part of auth.`, logData);
902
+ this.logger.verbose(`Disconnecting peer ${peerId} due to failed status handshake as part of auth.`, logData);
903
903
  this.markAuthHandshakeFailed(peerId);
904
904
  this.markPeerForDisconnect(peerId);
905
905
  return;
@@ -911,12 +911,9 @@ export class PeerManager implements PeerManagerInterface {
911
911
  const registeredValidators = await this.epochCache.getRegisteredValidators();
912
912
  const found = registeredValidators.find(v => v.toString() === sender.toString()) !== undefined;
913
913
  if (!found) {
914
- this.logger.debug(
914
+ this.logger.verbose(
915
915
  `Disconnecting peer ${peerId} due to failed auth handshake, peer is not a registered validator.`,
916
- {
917
- peerId,
918
- address: sender.toString(),
919
- },
916
+ { ...logData, address: sender.toString() },
920
917
  );
921
918
  this.markAuthHandshakeFailed(peerId);
922
919
  this.markPeerForDisconnect(peerId);
@@ -926,8 +923,9 @@ export class PeerManager implements PeerManagerInterface {
926
923
  // Check to see that this validator address isn't already allocated to a different peer
927
924
  const peerForAddress = this.authenticatedValidatorAddressToPeerId.get(sender.toString());
928
925
  if (peerForAddress !== undefined && peerForAddress.toString() !== peerIdString) {
929
- this.logger.debug(
926
+ this.logger.verbose(
930
927
  `Received auth for validator ${sender.toString()} from peer ${peerIdString}, but this validator is already authenticated to peer ${peerForAddress.toString()}`,
928
+ { ...logData, address: sender.toString() },
931
929
  );
932
930
  return;
933
931
  }
@@ -937,12 +935,13 @@ export class PeerManager implements PeerManagerInterface {
937
935
  this.authenticatedValidatorAddressToPeerId.set(sender.toString(), peerId);
938
936
  this.logger.info(
939
937
  `Successfully completed auth handshake with peer ${peerId}, validator address ${sender.toString()}`,
940
- logData,
938
+ { ...logData, address: sender.toString() },
941
939
  );
942
940
  } catch (err: any) {
943
941
  //TODO: maybe hard ban these peers in the future
944
- this.logger.debug(`Disconnecting peer ${peerId} due to error during auth handshake: ${err.message ?? err}`, {
942
+ this.logger.verbose(`Disconnecting peer ${peerId} due to error during auth handshake: ${err.message}`, {
945
943
  peerId,
944
+ err,
946
945
  });
947
946
  this.markAuthHandshakeFailed(peerId);
948
947
  this.markPeerForDisconnect(peerId);
@@ -122,27 +122,6 @@ export type SubProtocolMap = {
122
122
  >;
123
123
  };
124
124
 
125
- /**
126
- * Default handler for unimplemented sub protocols, this SHOULD be overwritten
127
- * by the service, but is provided as a fallback
128
- */
129
- export const defaultHandler = (_msg: any): Promise<Buffer> => {
130
- return Promise.resolve(Buffer.from('unimplemented'));
131
- };
132
-
133
- /**
134
- * Default sub protocol handlers - this SHOULD be overwritten by the service,
135
- */
136
- export const DEFAULT_SUB_PROTOCOL_HANDLERS: ReqRespSubProtocolHandlers = {
137
- [ReqRespSubProtocol.PING]: defaultHandler,
138
- [ReqRespSubProtocol.STATUS]: defaultHandler,
139
- [ReqRespSubProtocol.TX]: defaultHandler,
140
- [ReqRespSubProtocol.GOODBYE]: defaultHandler,
141
- [ReqRespSubProtocol.BLOCK]: defaultHandler,
142
- [ReqRespSubProtocol.AUTH]: defaultHandler,
143
- [ReqRespSubProtocol.BLOCK_TXS]: defaultHandler,
144
- };
145
-
146
125
  /**
147
126
  * The Request Response Pair interface defines the methods that each
148
127
  * request response pair must implement
@@ -26,7 +26,6 @@ import {
26
26
  import { BatchConnectionSampler } from './connection-sampler/batch_connection_sampler.js';
27
27
  import { ConnectionSampler, RandomSampler } from './connection-sampler/connection_sampler.js';
28
28
  import {
29
- DEFAULT_SUB_PROTOCOL_HANDLERS,
30
29
  DEFAULT_SUB_PROTOCOL_VALIDATORS,
31
30
  type ReqRespInterface,
32
31
  type ReqRespResponse,
@@ -64,9 +63,8 @@ export class ReqResp implements ReqRespInterface {
64
63
  private individualRequestTimeoutMs: number = DEFAULT_INDIVIDUAL_REQUEST_TIMEOUT_MS;
65
64
  private dialTimeoutMs: number = DEFAULT_REQRESP_DIAL_TIMEOUT_MS;
66
65
 
67
- // Warning, if the `start` function is not called as the parent class constructor, then the default sub protocol handlers will be used ( not good )
68
- private subProtocolHandlers: ReqRespSubProtocolHandlers = DEFAULT_SUB_PROTOCOL_HANDLERS;
69
- private subProtocolValidators: ReqRespSubProtocolValidators = DEFAULT_SUB_PROTOCOL_VALIDATORS;
66
+ private subProtocolHandlers: Partial<ReqRespSubProtocolHandlers> = {};
67
+ private subProtocolValidators: Partial<ReqRespSubProtocolValidators> = {};
70
68
 
71
69
  private connectionSampler: ConnectionSampler;
72
70
  private rateLimiter: RequestResponseRateLimiter;
@@ -117,11 +115,12 @@ export class ReqResp implements ReqRespInterface {
117
115
  * Start the reqresp service
118
116
  */
119
117
  async start(subProtocolHandlers: ReqRespSubProtocolHandlers, subProtocolValidators: ReqRespSubProtocolValidators) {
120
- this.subProtocolHandlers = subProtocolHandlers;
121
- this.subProtocolValidators = subProtocolValidators;
118
+ Object.assign(this.subProtocolHandlers, subProtocolHandlers);
119
+ Object.assign(this.subProtocolValidators, subProtocolValidators);
122
120
 
123
121
  // Register all protocol handlers
124
- for (const subProtocol of Object.keys(this.subProtocolHandlers)) {
122
+ for (const subProtocol of Object.keys(subProtocolHandlers)) {
123
+ this.logger.debug(`Registering handler for sub protocol ${subProtocol}`);
125
124
  await this.libp2p.handle(
126
125
  subProtocol,
127
126
  (data: IncomingStreamData) =>
@@ -140,6 +139,7 @@ export class ReqResp implements ReqRespInterface {
140
139
  ): Promise<void> {
141
140
  this.subProtocolHandlers[subProtocol] = handler;
142
141
  this.subProtocolValidators[subProtocol] = validator;
142
+ this.logger.debug(`Registering handler for sub protocol ${subProtocol}`);
143
143
  await this.libp2p.handle(
144
144
  subProtocol,
145
145
  (data: IncomingStreamData) =>
@@ -209,7 +209,7 @@ export class ReqResp implements ReqRespInterface {
209
209
  maxPeers = Math.max(10, Math.ceil(requests.length / 3)),
210
210
  maxRetryAttempts = 3,
211
211
  ): Promise<InstanceType<SubProtocolMap[SubProtocol]['response']>[]> {
212
- const responseValidator = this.subProtocolValidators[subProtocol];
212
+ const responseValidator = this.subProtocolValidators[subProtocol] ?? DEFAULT_SUB_PROTOCOL_VALIDATORS[subProtocol];
213
213
  const responses: InstanceType<SubProtocolMap[SubProtocol]['response']>[] = new Array(requests.length);
214
214
  const requestBuffers = requests.map(req => req.toBuffer());
215
215
 
@@ -594,7 +594,11 @@ export class ReqResp implements ReqRespInterface {
594
594
  *
595
595
  * */
596
596
  private async processStream(protocol: ReqRespSubProtocol, { stream, connection }: IncomingStreamData): Promise<void> {
597
- const handler = this.subProtocolHandlers[protocol]!;
597
+ const handler = this.subProtocolHandlers[protocol];
598
+ if (!handler) {
599
+ throw new Error(`No handler defined for reqresp subprotocol ${protocol}`);
600
+ }
601
+
598
602
  const snappy = this.snappyTransform;
599
603
  const SUCCESS = Uint8Array.of(ReqRespStatus.SUCCESS);
600
604
 
@@ -45,7 +45,7 @@ export class TxProvider implements ITxProvider {
45
45
  if (tx === undefined) {
46
46
  missingTxs.push(txHashes[i]);
47
47
  } else {
48
- txs.push(tx.setTxHash(txHashes[i]));
48
+ txs.push(tx);
49
49
  }
50
50
  }
51
51
 
@@ -54,6 +54,7 @@ function mockTxPool(): TxPool {
54
54
  hasTxs: () => Promise.resolve([]),
55
55
  updateConfig: () => {},
56
56
  markTxsAsNonEvictable: () => Promise.resolve(),
57
+ cleanupDeletedMinedTxs: () => Promise.resolve(0),
57
58
  };
58
59
  return Object.assign(new EventEmitter(), pool);
59
60
  }