@aztec/p2p 0.87.5 → 0.87.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/dest/client/interface.d.ts +8 -4
  2. package/dest/client/interface.d.ts.map +1 -1
  3. package/dest/client/p2p_client.d.ts +4 -3
  4. package/dest/client/p2p_client.d.ts.map +1 -1
  5. package/dest/client/p2p_client.js +17 -10
  6. package/dest/config.d.ts +10 -0
  7. package/dest/config.d.ts.map +1 -1
  8. package/dest/config.js +12 -2
  9. package/dest/index.d.ts +1 -0
  10. package/dest/index.d.ts.map +1 -1
  11. package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.d.ts +5 -6
  12. package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.d.ts.map +1 -1
  13. package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.js +37 -12
  14. package/dest/mem_pools/tx_pool/memory_tx_pool.d.ts +2 -2
  15. package/dest/mem_pools/tx_pool/memory_tx_pool.d.ts.map +1 -1
  16. package/dest/mem_pools/tx_pool/memory_tx_pool.js +1 -3
  17. package/dest/mem_pools/tx_pool/tx_pool.d.ts +6 -1
  18. package/dest/mem_pools/tx_pool/tx_pool.d.ts.map +1 -1
  19. package/dest/msg_validators/msg_seen_validator/msg_seen_validator.d.ts +10 -0
  20. package/dest/msg_validators/msg_seen_validator/msg_seen_validator.d.ts.map +1 -0
  21. package/dest/msg_validators/msg_seen_validator/msg_seen_validator.js +36 -0
  22. package/dest/services/dummy_service.d.ts +1 -1
  23. package/dest/services/dummy_service.d.ts.map +1 -1
  24. package/dest/services/dummy_service.js +1 -1
  25. package/dest/services/index.d.ts +1 -0
  26. package/dest/services/index.d.ts.map +1 -1
  27. package/dest/services/index.js +1 -0
  28. package/dest/services/libp2p/instrumentation.d.ts +11 -0
  29. package/dest/services/libp2p/instrumentation.d.ts.map +1 -0
  30. package/dest/services/libp2p/instrumentation.js +29 -0
  31. package/dest/services/libp2p/libp2p_service.d.ts +8 -5
  32. package/dest/services/libp2p/libp2p_service.d.ts.map +1 -1
  33. package/dest/services/libp2p/libp2p_service.js +59 -12
  34. package/dest/services/reqresp/connection-sampler/batch_connection_sampler.d.ts +1 -1
  35. package/dest/services/reqresp/connection-sampler/batch_connection_sampler.d.ts.map +1 -1
  36. package/dest/services/reqresp/connection-sampler/batch_connection_sampler.js +7 -3
  37. package/dest/services/reqresp/connection-sampler/connection_sampler.d.ts +2 -1
  38. package/dest/services/reqresp/connection-sampler/connection_sampler.d.ts.map +1 -1
  39. package/dest/services/reqresp/connection-sampler/connection_sampler.js +8 -3
  40. package/dest/services/reqresp/protocols/goodbye.d.ts.map +1 -1
  41. package/dest/services/reqresp/protocols/goodbye.js +3 -1
  42. package/dest/services/reqresp/rate-limiter/rate_limiter.d.ts +4 -2
  43. package/dest/services/reqresp/rate-limiter/rate_limiter.d.ts.map +1 -1
  44. package/dest/services/reqresp/rate-limiter/rate_limiter.js +10 -2
  45. package/dest/services/reqresp/rate-limiter/rate_limits.js +1 -1
  46. package/dest/services/reqresp/reqresp.d.ts +3 -3
  47. package/dest/services/reqresp/reqresp.d.ts.map +1 -1
  48. package/dest/services/reqresp/reqresp.js +39 -13
  49. package/dest/services/service.d.ts +3 -2
  50. package/dest/services/service.d.ts.map +1 -1
  51. package/dest/services/tx_collector.d.ts +14 -0
  52. package/dest/services/tx_collector.d.ts.map +1 -0
  53. package/dest/services/tx_collector.js +76 -0
  54. package/dest/test-helpers/reqresp-nodes.d.ts +3 -3
  55. package/dest/test-helpers/reqresp-nodes.d.ts.map +1 -1
  56. package/dest/test-helpers/reqresp-nodes.js +4 -4
  57. package/dest/testbench/p2p_client_testbench_worker.js +1 -1
  58. package/package.json +12 -12
  59. package/src/client/interface.ts +8 -4
  60. package/src/client/p2p_client.ts +22 -10
  61. package/src/config.ts +22 -1
  62. package/src/index.ts +2 -0
  63. package/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts +45 -18
  64. package/src/mem_pools/tx_pool/memory_tx_pool.ts +2 -4
  65. package/src/mem_pools/tx_pool/tx_pool.ts +7 -1
  66. package/src/msg_validators/msg_seen_validator/msg_seen_validator.ts +36 -0
  67. package/src/services/dummy_service.ts +3 -1
  68. package/src/services/index.ts +1 -0
  69. package/src/services/libp2p/instrumentation.ts +39 -0
  70. package/src/services/libp2p/libp2p_service.ts +79 -11
  71. package/src/services/reqresp/connection-sampler/batch_connection_sampler.ts +4 -2
  72. package/src/services/reqresp/connection-sampler/connection_sampler.ts +8 -3
  73. package/src/services/reqresp/protocols/goodbye.ts +3 -1
  74. package/src/services/reqresp/rate-limiter/rate_limiter.ts +9 -3
  75. package/src/services/reqresp/rate-limiter/rate_limits.ts +1 -1
  76. package/src/services/reqresp/reqresp.ts +44 -16
  77. package/src/services/service.ts +4 -1
  78. package/src/services/tx_collector.ts +103 -0
  79. package/src/test-helpers/reqresp-nodes.ts +13 -8
  80. package/src/testbench/p2p_client_testbench_worker.ts +1 -1
@@ -11,11 +11,13 @@ import { DatabasePublicStateSource } from '@aztec/stdlib/trees';
11
11
  import { Tx, TxHash } from '@aztec/stdlib/tx';
12
12
  import { type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-client';
13
13
 
14
+ import assert from 'assert';
15
+
14
16
  import { ArchiveCache } from '../../msg_validators/tx_validator/archive_cache.js';
15
17
  import { GasTxValidator } from '../../msg_validators/tx_validator/gas_validator.js';
16
18
  import { PoolInstrumentation, PoolName } from '../instrumentation.js';
17
19
  import { getPendingTxPriority } from './priority.js';
18
- import type { TxPool } from './tx_pool.js';
20
+ import type { TxPool, TxPoolOptions } from './tx_pool.js';
19
21
 
20
22
  /**
21
23
  * KV implementation of the Transaction Pool.
@@ -27,7 +29,10 @@ export class AztecKVTxPool implements TxPool {
27
29
  #txs: AztecAsyncMap<string, Buffer>;
28
30
 
29
31
  /** The maximum cumulative tx size that the pending txs in the pool take up. */
30
- #maxTxPoolSize: number | undefined;
32
+ #maxTxPoolSize: number = 0;
33
+
34
+ /** The tx evicion logic will kick after pool size is greater than maxTxPoolSize * txPoolOverflowFactor */
35
+ txPoolOverflowFactor: number = 1;
31
36
 
32
37
  /** Index from tx hash to the block number in which they were mined, filtered by mined txs. */
33
38
  #minedTxHashToBlock: AztecAsyncMap<string, number>;
@@ -63,7 +68,7 @@ export class AztecKVTxPool implements TxPool {
63
68
  #archivedTxIndices: AztecAsyncMap<number, string>;
64
69
 
65
70
  /** Number of txs to archive. */
66
- #archivedTxLimit: number;
71
+ #archivedTxLimit: number = 0;
67
72
 
68
73
  /** The world state synchronizer used in the node. */
69
74
  #worldStateSynchronizer: WorldStateSynchronizer;
@@ -85,12 +90,12 @@ export class AztecKVTxPool implements TxPool {
85
90
  archive: AztecAsyncKVStore,
86
91
  worldStateSynchronizer: WorldStateSynchronizer,
87
92
  telemetry: TelemetryClient = getTelemetryClient(),
88
- config: {
89
- maxTxPoolSize?: number;
90
- archivedTxLimit?: number;
91
- } = {},
93
+ config: TxPoolOptions = {},
92
94
  log = createLogger('p2p:tx_pool'),
93
95
  ) {
96
+ this.#log = log;
97
+ this.updateConfig(config);
98
+
94
99
  this.#txs = store.openMap('txs');
95
100
  this.#minedTxHashToBlock = store.openMap('txHashToBlockMined');
96
101
  this.#pendingTxPriorityToHash = store.openMultiMap('pendingTxFeeToHash');
@@ -98,18 +103,16 @@ export class AztecKVTxPool implements TxPool {
98
103
  this.#pendingTxHashToHeaderHash = store.openMap('pendingTxHashToHeaderHash');
99
104
  this.#pendingTxSize = store.openSingleton('pendingTxSize');
100
105
  this.#pendingTxCount = store.openSingleton('pendingTxCount');
101
- this.#maxTxPoolSize = config.maxTxPoolSize;
106
+
102
107
  this.#pendingTxs = new Map<string, Tx>();
103
108
  this.#nonEvictableTxs = new Set<string>();
104
109
 
105
110
  this.#archivedTxs = archive.openMap('archivedTxs');
106
111
  this.#archivedTxIndices = archive.openMap('archivedTxIndices');
107
- this.#archivedTxLimit = config.archivedTxLimit ?? 0;
108
112
 
109
113
  this.#store = store;
110
114
  this.#archive = archive;
111
115
  this.#worldStateSynchronizer = worldStateSynchronizer;
112
- this.#log = log;
113
116
  this.#metrics = new PoolInstrumentation(telemetry, PoolName.TX_POOL, () => store.estimateSize());
114
117
  }
115
118
 
@@ -386,9 +389,28 @@ export class AztecKVTxPool implements TxPool {
386
389
  return vals.map(x => TxHash.fromString(x));
387
390
  }
388
391
 
389
- public setMaxTxPoolSize(maxSizeBytes: number | undefined): Promise<void> {
390
- this.#maxTxPoolSize = maxSizeBytes;
391
- return Promise.resolve();
392
+ public updateConfig({ maxTxPoolSize, txPoolOverflowFactor, archivedTxLimit }: TxPoolOptions): void {
393
+ if (typeof maxTxPoolSize === 'number') {
394
+ assert(maxTxPoolSize >= 0, 'maxTxPoolSize must be greater or equal to 0');
395
+ this.#maxTxPoolSize = maxTxPoolSize;
396
+
397
+ if (maxTxPoolSize === 0) {
398
+ this.#log.info(`Disabling maximum tx mempool size. Tx eviction stopped`);
399
+ } else {
400
+ this.#log.info(`Setting maximum tx mempool size`, { maxTxPoolSize });
401
+ }
402
+ }
403
+
404
+ if (typeof txPoolOverflowFactor === 'number') {
405
+ assert(txPoolOverflowFactor >= 1, 'txPoolOveflowFactor must be greater or equal to 1');
406
+ this.txPoolOverflowFactor = txPoolOverflowFactor;
407
+ this.#log.info(`Allowing tx pool size to grow above limit`, { maxTxPoolSize, txPoolOverflowFactor });
408
+ }
409
+
410
+ if (typeof archivedTxLimit === 'number') {
411
+ assert(archivedTxLimit >= 0, 'archivedTxLimit must be greater or equal to 0');
412
+ this.#archivedTxLimit = archivedTxLimit;
413
+ }
392
414
  }
393
415
 
394
416
  public markTxsAsNonEvictable(txHashes: TxHash[]): Promise<void> {
@@ -483,7 +505,7 @@ export class AztecKVTxPool implements TxPool {
483
505
  private async evictLowPriorityTxs(
484
506
  newTxHashes: TxHash[],
485
507
  ): Promise<{ numLowPriorityTxsEvicted: number; numNewTxsEvicted: number }> {
486
- if (this.#maxTxPoolSize === undefined) {
508
+ if (this.#maxTxPoolSize === undefined || this.#maxTxPoolSize === 0) {
487
509
  return { numLowPriorityTxsEvicted: 0, numNewTxsEvicted: 0 };
488
510
  }
489
511
 
@@ -491,17 +513,22 @@ export class AztecKVTxPool implements TxPool {
491
513
  const txsToEvict: TxHash[] = [];
492
514
 
493
515
  let pendingTxsSize = (await this.#pendingTxSize.getAsync()) ?? 0;
494
- if (pendingTxsSize > this.#maxTxPoolSize) {
516
+ if (pendingTxsSize > this.#maxTxPoolSize * this.txPoolOverflowFactor) {
495
517
  for await (const txHash of this.#pendingTxPriorityToHash.valuesAsync()) {
496
518
  if (this.#nonEvictableTxs.has(txHash.toString())) {
497
519
  continue;
498
520
  }
499
- this.#log.verbose(`Evicting tx ${txHash} from pool due to low priority to satisfy max tx size limit`);
500
- txsToEvict.push(TxHash.fromString(txHash));
501
-
502
521
  const txSize =
503
522
  (await this.#pendingTxHashToSize.getAsync(txHash.toString())) ??
504
523
  (await this.getPendingTxByHash(txHash))?.getSize();
524
+
525
+ this.#log.verbose(`Evicting tx ${txHash} from pool due to low priority to satisfy max tx size limit`, {
526
+ txHash,
527
+ txSize,
528
+ });
529
+
530
+ txsToEvict.push(TxHash.fromString(txHash));
531
+
505
532
  if (txSize) {
506
533
  pendingTxsSize -= txSize;
507
534
  if (pendingTxsSize <= this.#maxTxPoolSize) {
@@ -5,7 +5,7 @@ import { type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-clien
5
5
 
6
6
  import { PoolInstrumentation, PoolName } from '../instrumentation.js';
7
7
  import { getPendingTxPriority } from './priority.js';
8
- import type { TxPool } from './tx_pool.js';
8
+ import type { TxPool, TxPoolOptions } from './tx_pool.js';
9
9
 
10
10
  /**
11
11
  * In-memory implementation of the Transaction Pool.
@@ -190,9 +190,7 @@ export class InMemoryTxPool implements TxPool {
190
190
  return Promise.resolve(Array.from(this.txs.keys()).map(x => TxHash.fromBigInt(x)));
191
191
  }
192
192
 
193
- setMaxTxPoolSize(_maxSizeBytes: number | undefined): Promise<void> {
194
- return Promise.resolve();
195
- }
193
+ updateConfig(_config: TxPoolOptions): void {}
196
194
 
197
195
  markTxsAsNonEvictable(_: TxHash[]): Promise<void> {
198
196
  return Promise.resolve();
@@ -1,5 +1,11 @@
1
1
  import type { Tx, TxHash } from '@aztec/stdlib/tx';
2
2
 
3
+ export type TxPoolOptions = {
4
+ maxTxPoolSize?: number;
5
+ txPoolOverflowFactor?: number;
6
+ archivedTxLimit?: number;
7
+ };
8
+
3
9
  /**
4
10
  * Interface of a transaction pool. The pool includes tx requests and is kept up-to-date by a P2P client.
5
11
  */
@@ -95,7 +101,7 @@ export interface TxPool {
95
101
  * Configure the maximum size of the tx pool
96
102
  * @param maxSizeBytes - The maximum size in bytes of the mempool. Set to undefined to disable it
97
103
  */
98
- setMaxTxPoolSize(maxSizeBytes: number | undefined): Promise<void>;
104
+ updateConfig(config: TxPoolOptions): void;
99
105
 
100
106
  /** Returns whether the pool is empty. */
101
107
  isEmpty(): Promise<boolean>;
@@ -0,0 +1,36 @@
1
+ // Implements a queue of message IDs
2
+ export class MessageSeenValidator {
3
+ private queue: Array<string>;
4
+ private writePointer = 0;
5
+ private seenMessages: Set<string> = new Set();
6
+
7
+ constructor(private queueLength: number) {
8
+ if (this.queueLength <= 0) {
9
+ throw new Error('Queue length must be greater than 0');
10
+ }
11
+ this.queue = new Array<string>(this.queueLength);
12
+ }
13
+
14
+ // Adds a message if not seen before. Returns true if added, false if already seen.
15
+ public addMessage(msgId: string): boolean {
16
+ // Check if the message is already in the cache
17
+ if (this.seenMessages.has(msgId)) {
18
+ return false;
19
+ }
20
+ // If we are at the cache limit, remove the oldest msg ID
21
+ if (this.seenMessages.size >= this.queueLength) {
22
+ const msgToRemove = this.queue[this.writePointer];
23
+ this.seenMessages.delete(msgToRemove);
24
+ }
25
+
26
+ // Insert the message into the cache and the queue
27
+ this.seenMessages.add(msgId);
28
+ this.queue[this.writePointer] = msgId;
29
+ this.writePointer = this.writePointer === this.queueLength - 1 ? 0 : this.writePointer + 1;
30
+ return true;
31
+ }
32
+
33
+ public size() {
34
+ return this.seenMessages.size;
35
+ }
36
+ }
@@ -51,7 +51,9 @@ export class DummyP2PService implements P2PService {
51
51
  /**
52
52
  * Register a callback into the validator client for when a block proposal is received
53
53
  */
54
- public registerBlockReceivedCallback(_: (block: BlockProposal) => Promise<BlockAttestation>) {}
54
+ public registerBlockReceivedCallback(
55
+ _callback: (block: BlockProposal, sender: PeerId) => Promise<BlockAttestation>,
56
+ ) {}
55
57
 
56
58
  /**
57
59
  * Sends a request to a peer.
@@ -1,2 +1,3 @@
1
1
  export * from './service.js';
2
2
  export * from './libp2p/libp2p_service.js';
3
+ export * from './tx_collector.js';
@@ -0,0 +1,39 @@
1
+ import type { Timer } from '@aztec/foundation/timer';
2
+ import type { TopicType } from '@aztec/stdlib/p2p';
3
+ import {
4
+ Attributes,
5
+ type Histogram,
6
+ Metrics,
7
+ type TelemetryClient,
8
+ type UpDownCounter,
9
+ ValueType,
10
+ } from '@aztec/telemetry-client';
11
+
12
+ export class P2PInstrumentation {
13
+ private messageValidationDuration: Histogram;
14
+ private messagePrevalidationCount: UpDownCounter;
15
+
16
+ constructor(client: TelemetryClient, name: string) {
17
+ const meter = client.getMeter(name);
18
+
19
+ this.messageValidationDuration = meter.createHistogram(Metrics.P2P_GOSSIP_MESSAGE_VALIDATION_DURATION, {
20
+ unit: 'ms',
21
+ description: 'How long validating a gossiped message takes',
22
+ valueType: ValueType.INT,
23
+ });
24
+
25
+ this.messagePrevalidationCount = meter.createUpDownCounter(Metrics.P2P_GOSSIP_MESSAGE_PREVALIDATION_COUNT, {
26
+ description: 'How many message pass/fail prevalidation',
27
+ valueType: ValueType.INT,
28
+ });
29
+ }
30
+
31
+ public recordMessageValidation(topicName: TopicType, timerOrMs: Timer | number) {
32
+ const ms = typeof timerOrMs === 'number' ? timerOrMs : timerOrMs.ms();
33
+ this.messageValidationDuration.record(Math.ceil(ms), { [Attributes.TOPIC_NAME]: topicName });
34
+ }
35
+
36
+ public incMessagePrevalidationStatus(passed: boolean, topicName: TopicType | undefined) {
37
+ this.messagePrevalidationCount.add(1, { [Attributes.TOPIC_NAME]: topicName, [Attributes.OK]: passed });
38
+ }
39
+ }
@@ -2,6 +2,7 @@ import type { EpochCacheInterface } from '@aztec/epoch-cache';
2
2
  import { createLibp2pComponentLogger, createLogger } from '@aztec/foundation/log';
3
3
  import { SerialQueue } from '@aztec/foundation/queue';
4
4
  import { RunningPromise } from '@aztec/foundation/running-promise';
5
+ import { Timer } from '@aztec/foundation/timer';
5
6
  import type { AztecAsyncKVStore } from '@aztec/kv-store';
6
7
  import { protocolContractTreeRoot } from '@aztec/protocol-contracts';
7
8
  import type { L2BlockSource } from '@aztec/stdlib/block';
@@ -48,6 +49,7 @@ import { createLibp2p } from 'libp2p';
48
49
  import type { P2PConfig } from '../../config.js';
49
50
  import type { MemPools } from '../../mem_pools/interface.js';
50
51
  import { AttestationValidator, BlockProposalValidator } from '../../msg_validators/index.js';
52
+ import { MessageSeenValidator } from '../../msg_validators/msg_seen_validator/msg_seen_validator.js';
51
53
  import { getDefaultAllowedSetupFunctions } from '../../msg_validators/tx_validator/allowed_public_setup.js';
52
54
  import { type MessageValidator, createTxMessageValidators } from '../../msg_validators/tx_validator/factory.js';
53
55
  import { DoubleSpendTxValidator, TxProofValidator } from '../../msg_validators/tx_validator/index.js';
@@ -63,7 +65,8 @@ import { DEFAULT_SUB_PROTOCOL_VALIDATORS, ReqRespSubProtocol, type SubProtocolMa
63
65
  import { reqGoodbyeHandler } from '../reqresp/protocols/goodbye.js';
64
66
  import { pingHandler, reqRespBlockHandler, reqRespTxHandler, statusHandler } from '../reqresp/protocols/index.js';
65
67
  import { ReqResp } from '../reqresp/reqresp.js';
66
- import type { P2PService, PeerDiscoveryService } from '../service.js';
68
+ import type { P2PBlockReceivedCallback, P2PService, PeerDiscoveryService } from '../service.js';
69
+ import { P2PInstrumentation } from './instrumentation.js';
67
70
 
68
71
  interface ValidationResult {
69
72
  name: string;
@@ -80,6 +83,7 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
80
83
  private jobQueue: SerialQueue = new SerialQueue();
81
84
  private peerManager: PeerManager;
82
85
  private discoveryRunningPromise?: RunningPromise;
86
+ private msgIdSeenValidators: Record<TopicType, MessageSeenValidator> = {} as Record<TopicType, MessageSeenValidator>;
83
87
 
84
88
  // Message validators
85
89
  private attestationValidator: AttestationValidator;
@@ -101,10 +105,12 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
101
105
  * @param block - The block received from the peer.
102
106
  * @returns The attestation for the block, if any.
103
107
  */
104
- private blockReceivedCallback: (block: BlockProposal) => Promise<BlockAttestation | undefined>;
108
+ private blockReceivedCallback: P2PBlockReceivedCallback;
105
109
 
106
110
  private gossipSubEventHandler: (e: CustomEvent<GossipsubMessage>) => void;
107
111
 
112
+ private instrumentation: P2PInstrumentation;
113
+
108
114
  constructor(
109
115
  private clientType: T,
110
116
  private config: P2PConfig,
@@ -120,6 +126,12 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
120
126
  ) {
121
127
  super(telemetry, 'LibP2PService');
122
128
 
129
+ this.instrumentation = new P2PInstrumentation(telemetry, 'LibP2PService');
130
+
131
+ this.msgIdSeenValidators[TopicType.tx] = new MessageSeenValidator(config.seenMessageCacheSize);
132
+ this.msgIdSeenValidators[TopicType.block_proposal] = new MessageSeenValidator(config.seenMessageCacheSize);
133
+ this.msgIdSeenValidators[TopicType.block_attestation] = new MessageSeenValidator(config.seenMessageCacheSize);
134
+
123
135
  const versions = getVersions(config);
124
136
  this.protocolVersion = compressComponentVersions(versions);
125
137
  logger.info(`Started libp2p service with protocol version ${this.protocolVersion}`);
@@ -446,8 +458,9 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
446
458
  sendBatchRequest<SubProtocol extends ReqRespSubProtocol>(
447
459
  protocol: SubProtocol,
448
460
  requests: InstanceType<SubProtocolMap[SubProtocol]['request']>[],
461
+ pinnedPeerId: PeerId | undefined,
449
462
  ): Promise<(InstanceType<SubProtocolMap[SubProtocol]['response']> | undefined)[]> {
450
- return this.reqresp.sendBatchRequest(protocol, requests);
463
+ return this.reqresp.sendBatchRequest(protocol, requests, pinnedPeerId);
451
464
  }
452
465
 
453
466
  /**
@@ -458,9 +471,8 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
458
471
  return this.peerDiscoveryService.getEnr();
459
472
  }
460
473
 
461
- public registerBlockReceivedCallback(callback: (block: BlockProposal) => Promise<BlockAttestation | undefined>) {
474
+ public registerBlockReceivedCallback(callback: P2PBlockReceivedCallback) {
462
475
  this.blockReceivedCallback = callback;
463
- this.logger.verbose('Block received callback registered');
464
476
  }
465
477
 
466
478
  /**
@@ -494,6 +506,37 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
494
506
  return result.recipients.length;
495
507
  }
496
508
 
509
+ protected preValidateReceivedMessage(msg: Message, msgId: string, source: PeerId) {
510
+ let topicType: TopicType | undefined;
511
+
512
+ switch (msg.topic) {
513
+ case this.topicStrings[TopicType.tx]:
514
+ topicType = TopicType.tx;
515
+ break;
516
+ case this.topicStrings[TopicType.block_attestation]:
517
+ topicType = TopicType.block_attestation;
518
+ break;
519
+ case this.topicStrings[TopicType.block_proposal]:
520
+ topicType = TopicType.block_proposal;
521
+ break;
522
+ default:
523
+ this.logger.error(`Received message on unknown topic: ${msg.topic}`);
524
+ break;
525
+ }
526
+
527
+ const validator = topicType ? this.msgIdSeenValidators[topicType] : undefined;
528
+
529
+ if (!validator || !validator.addMessage(msgId)) {
530
+ this.instrumentation.incMessagePrevalidationStatus(false, topicType);
531
+ this.node.services.pubsub.reportMessageValidationResult(msgId, source.toString(), TopicValidatorResult.Ignore);
532
+ return false;
533
+ }
534
+
535
+ this.instrumentation.incMessagePrevalidationStatus(true, topicType);
536
+
537
+ return true;
538
+ }
539
+
497
540
  /**
498
541
  * Handles a new gossip message that was received by the client.
499
542
  * @param topic - The message's topic.
@@ -508,6 +551,11 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
508
551
  messageId: p2pMessage.id,
509
552
  messageLatency,
510
553
  });
554
+
555
+ if (!this.preValidateReceivedMessage(msg, msgId, source)) {
556
+ return;
557
+ }
558
+
511
559
  if (msg.topic === this.topicStrings[TopicType.tx]) {
512
560
  await this.handleGossipedTx(p2pMessage.payload, msgId, source);
513
561
  }
@@ -525,14 +573,20 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
525
573
  validationFunc: () => Promise<{ result: boolean; obj: T }>,
526
574
  msgId: string,
527
575
  source: PeerId,
576
+ topicType: TopicType,
528
577
  ): Promise<{ result: boolean; obj: T | undefined }> {
529
578
  let resultAndObj: { result: boolean; obj: T | undefined } = { result: false, obj: undefined };
579
+ const timer = new Timer();
530
580
  try {
531
581
  resultAndObj = await validationFunc();
532
582
  } catch (err) {
533
583
  this.logger.error(`Error deserialising and validating message `, err);
534
584
  }
535
585
 
586
+ if (resultAndObj.result) {
587
+ this.instrumentation.recordMessageValidation(topicType, timer);
588
+ }
589
+
536
590
  this.node.services.pubsub.reportMessageValidationResult(
537
591
  msgId,
538
592
  source.toString(),
@@ -548,7 +602,7 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
548
602
  return { result, obj: tx };
549
603
  };
550
604
 
551
- const { result, obj: tx } = await this.validateReceivedMessage<Tx>(validationFunc, msgId, source);
605
+ const { result, obj: tx } = await this.validateReceivedMessage<Tx>(validationFunc, msgId, source, TopicType.tx);
552
606
  if (!result || !tx) {
553
607
  return;
554
608
  }
@@ -579,6 +633,7 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
579
633
  validationFunc,
580
634
  msgId,
581
635
  source,
636
+ TopicType.block_attestation,
582
637
  );
583
638
  if (!result || !attestation) {
584
639
  return;
@@ -606,11 +661,16 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
606
661
  return { result, obj: block };
607
662
  };
608
663
 
609
- const { result, obj: block } = await this.validateReceivedMessage<BlockProposal>(validationFunc, msgId, source);
664
+ const { result, obj: block } = await this.validateReceivedMessage<BlockProposal>(
665
+ validationFunc,
666
+ msgId,
667
+ source,
668
+ TopicType.block_proposal,
669
+ );
610
670
  if (!result || !block) {
611
671
  return;
612
672
  }
613
- await this.processValidBlockProposal(block);
673
+ await this.processValidBlockProposal(block, source);
614
674
  }
615
675
 
616
676
  // REVIEW: callback pattern https://github.com/AztecProtocol/aztec-packages/issues/7963
@@ -620,9 +680,12 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
620
680
  [Attributes.BLOCK_ARCHIVE]: block.archive.toString(),
621
681
  [Attributes.P2P_ID]: await block.p2pMessageIdentifier().then(i => i.toString()),
622
682
  }))
623
- private async processValidBlockProposal(block: BlockProposal) {
683
+ private async processValidBlockProposal(block: BlockProposal, sender: PeerId) {
684
+ const slot = block.slotNumber.toBigInt();
685
+ const previousSlot = slot - 1n;
686
+ const epoch = slot / 32n;
624
687
  this.logger.verbose(
625
- `Received block ${block.blockNumber.toNumber()} for slot ${block.slotNumber.toNumber()} from external peer.`,
688
+ `Received block ${block.blockNumber.toNumber()} for slot ${slot}, epoch ${epoch} from external peer.`,
626
689
  {
627
690
  p2pMessageIdentifier: await block.p2pMessageIdentifier(),
628
691
  slot: block.slotNumber.toNumber(),
@@ -630,9 +693,14 @@ export class LibP2PService<T extends P2PClientType = P2PClientType.Full> extends
630
693
  block: block.blockNumber.toNumber(),
631
694
  },
632
695
  );
696
+ const attestationsForPreviousSlot = await this.mempools.attestationPool?.getAttestationsForSlot(previousSlot);
697
+ if (attestationsForPreviousSlot !== undefined) {
698
+ this.logger.verbose(`Received ${attestationsForPreviousSlot.length} attestations for slot ${previousSlot}`);
699
+ }
700
+
633
701
  // Mark the txs in this proposal as non-evictable
634
702
  await this.mempools.txPool.markTxsAsNonEvictable(block.payload.txHashes);
635
- const attestation = await this.blockReceivedCallback(block);
703
+ const attestation = await this.blockReceivedCallback(block, sender);
636
704
 
637
705
  // TODO: fix up this pattern - the abstraction is not nice
638
706
  // The attestation can be undefined if no handler is registered / the validator deems the block invalid
@@ -26,6 +26,7 @@ export class BatchConnectionSampler {
26
26
  private readonly connectionSampler: ConnectionSampler,
27
27
  batchSize: number,
28
28
  maxPeers: number,
29
+ exclude?: PeerId[],
29
30
  ) {
30
31
  if (maxPeers <= 0) {
31
32
  throw new Error('Max peers cannot be 0');
@@ -38,7 +39,8 @@ export class BatchConnectionSampler {
38
39
  this.requestsPerPeer = Math.max(1, Math.floor(batchSize / maxPeers));
39
40
 
40
41
  // Sample initial peers
41
- this.batch = this.connectionSampler.samplePeersBatch(maxPeers);
42
+ const excluding = exclude && new Map(exclude.map(peerId => [peerId.toString(), true] as const));
43
+ this.batch = this.connectionSampler.samplePeersBatch(maxPeers, excluding);
42
44
  }
43
45
 
44
46
  /**
@@ -70,7 +72,7 @@ export class BatchConnectionSampler {
70
72
  }
71
73
 
72
74
  const excluding = new Map([[peerId.toString(), true]]);
73
- const newPeer = this.connectionSampler.getPeer(excluding);
75
+ const newPeer = this.connectionSampler.getPeer(excluding); // Q: Shouldn't we accumulate all excluded peers? Otherwise the sampler could return us a previously excluded peer?
74
76
 
75
77
  if (newPeer) {
76
78
  this.batch[index] = newPeer;
@@ -137,9 +137,10 @@ export class ConnectionSampler {
137
137
  * Samples a batch of unique peers from the libp2p node, prioritizing peers without active connections
138
138
  *
139
139
  * @param numberToSample - The number of peers to sample
140
+ * @param excluding - The peers to exclude from the sampling
140
141
  * @returns Array of unique sampled peers, prioritizing those without active connections
141
142
  */
142
- samplePeersBatch(numberToSample: number): PeerId[] {
143
+ samplePeersBatch(numberToSample: number, excluding?: Map<string, boolean>): PeerId[] {
143
144
  const peers = this.libp2p.getPeers();
144
145
  this.logger.debug('Sampling peers batch', { numberToSample, peers });
145
146
 
@@ -149,7 +150,7 @@ export class ConnectionSampler {
149
150
  const batch: PeerId[] = [];
150
151
  const withActiveConnections: Set<PeerId> = new Set();
151
152
  for (let i = 0; i < numberToSample; i++) {
152
- const { peer, sampledPeers } = this.getPeerFromList(peers, undefined);
153
+ const { peer, sampledPeers } = this.getPeerFromList(peers, excluding);
153
154
  if (peer) {
154
155
  batch.push(peer);
155
156
  }
@@ -252,7 +253,11 @@ export class ConnectionSampler {
252
253
  activeConnectionsCount: updatedActiveConnectionsCount,
253
254
  });
254
255
 
255
- await stream?.close();
256
+ //NOTE: All other status codes indicate closed stream.
257
+ //Either graceful close (closed/closing) or forced close (aborted/reset)
258
+ if (stream.status === 'open') {
259
+ await stream?.close();
260
+ }
256
261
  } catch (error) {
257
262
  this.logger.error(`Failed to close connection to peer with stream id ${streamId}`, error);
258
263
  } finally {
@@ -95,7 +95,9 @@ export function reqGoodbyeHandler(peerManager: PeerManager): ReqRespSubProtocolH
95
95
 
96
96
  peerManager.goodbyeReceived(peerId, reason);
97
97
 
98
- // Return a buffer of length 1 as an acknowledgement: this is allowed to fail
98
+ // NOTE: In the current implementation this won't be sent to peer,
99
+ // as the connection to peer has been already closed by peerManager.goodbyeReceived
100
+ // We have this just to satisfy interface
99
101
  return Promise.resolve(Buffer.from([0x0]));
100
102
  };
101
103
  }
@@ -8,7 +8,7 @@ import { PeerErrorSeverity } from '@aztec/stdlib/p2p';
8
8
  import type { PeerId } from '@libp2p/interface';
9
9
 
10
10
  import type { PeerScoring } from '../../peer-manager/peer_scoring.js';
11
- import type { ReqRespSubProtocol, ReqRespSubProtocolRateLimits } from '../interface.js';
11
+ import type { ProtocolRateLimitQuota, ReqRespSubProtocol, ReqRespSubProtocolRateLimits } from '../interface.js';
12
12
  import { DEFAULT_RATE_LIMITS } from './rate_limits.js';
13
13
 
14
14
  // Check for disconnected peers every 10 minutes
@@ -177,16 +177,18 @@ export class SubProtocolRateLimiter {
177
177
  */
178
178
  export class RequestResponseRateLimiter {
179
179
  private subProtocolRateLimiters: Map<ReqRespSubProtocol, SubProtocolRateLimiter>;
180
+ private rateLimits: ReqRespSubProtocolRateLimits;
180
181
 
181
182
  private cleanupInterval: NodeJS.Timeout | undefined = undefined;
182
183
 
183
184
  constructor(
184
185
  private peerScoring: PeerScoring,
185
- rateLimits: ReqRespSubProtocolRateLimits = DEFAULT_RATE_LIMITS,
186
+ rateLimits: Partial<ReqRespSubProtocolRateLimits> = {},
186
187
  ) {
187
188
  this.subProtocolRateLimiters = new Map();
188
189
 
189
- for (const [subProtocol, protocolLimits] of Object.entries(rateLimits)) {
190
+ this.rateLimits = { ...DEFAULT_RATE_LIMITS, ...rateLimits };
191
+ for (const [subProtocol, protocolLimits] of Object.entries(this.rateLimits)) {
190
192
  this.subProtocolRateLimiters.set(
191
193
  subProtocol as ReqRespSubProtocol,
192
194
  new SubProtocolRateLimiter(
@@ -228,4 +230,8 @@ export class RequestResponseRateLimiter {
228
230
  stop() {
229
231
  clearInterval(this.cleanupInterval);
230
232
  }
233
+
234
+ getRateLimits(protocol: ReqRespSubProtocol): ProtocolRateLimitQuota {
235
+ return this.rateLimits[protocol];
236
+ }
231
237
  }
@@ -29,7 +29,7 @@ export const DEFAULT_RATE_LIMITS: ReqRespSubProtocolRateLimits = {
29
29
  },
30
30
  globalLimit: {
31
31
  quotaTimeMs: 1000,
32
- quotaCount: 20,
32
+ quotaCount: 200,
33
33
  },
34
34
  },
35
35
  [ReqRespSubProtocol.BLOCK]: {