@aztec/p2p 0.0.1-commit.e6bd8901 → 0.0.1-commit.f146247c

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (217) hide show
  1. package/dest/bootstrap/bootstrap.d.ts +4 -3
  2. package/dest/bootstrap/bootstrap.d.ts.map +1 -1
  3. package/dest/bootstrap/bootstrap.js +4 -4
  4. package/dest/client/factory.d.ts +1 -1
  5. package/dest/client/factory.d.ts.map +1 -1
  6. package/dest/client/factory.js +6 -5
  7. package/dest/client/p2p_client.d.ts +1 -1
  8. package/dest/client/p2p_client.d.ts.map +1 -1
  9. package/dest/client/p2p_client.js +9 -2
  10. package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.d.ts +2 -0
  11. package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.d.ts.map +1 -0
  12. package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.js +305 -0
  13. package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.d.ts +73 -0
  14. package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.d.ts.map +1 -0
  15. package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.js +8 -0
  16. package/dest/config.d.ts +8 -2
  17. package/dest/config.d.ts.map +1 -1
  18. package/dest/config.js +2 -0
  19. package/dest/mem_pools/instrumentation.d.ts +1 -1
  20. package/dest/mem_pools/instrumentation.d.ts.map +1 -1
  21. package/dest/mem_pools/instrumentation.js +2 -2
  22. package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.d.ts +1 -1
  23. package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.d.ts.map +1 -1
  24. package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.js +7 -2
  25. package/dest/msg_validators/proposal_validator/proposal_validator.js +5 -5
  26. package/dest/msg_validators/tx_validator/archive_cache.d.ts +3 -3
  27. package/dest/msg_validators/tx_validator/archive_cache.d.ts.map +1 -1
  28. package/dest/msg_validators/tx_validator/archive_cache.js +1 -1
  29. package/dest/msg_validators/tx_validator/block_header_validator.d.ts +5 -4
  30. package/dest/msg_validators/tx_validator/block_header_validator.d.ts.map +1 -1
  31. package/dest/msg_validators/tx_validator/block_header_validator.js +3 -2
  32. package/dest/msg_validators/tx_validator/data_validator.d.ts +3 -1
  33. package/dest/msg_validators/tx_validator/data_validator.d.ts.map +1 -1
  34. package/dest/msg_validators/tx_validator/data_validator.js +4 -1
  35. package/dest/msg_validators/tx_validator/double_spend_validator.d.ts +3 -2
  36. package/dest/msg_validators/tx_validator/double_spend_validator.d.ts.map +1 -1
  37. package/dest/msg_validators/tx_validator/double_spend_validator.js +3 -2
  38. package/dest/msg_validators/tx_validator/factory.d.ts +8 -3
  39. package/dest/msg_validators/tx_validator/factory.d.ts.map +1 -1
  40. package/dest/msg_validators/tx_validator/factory.js +21 -11
  41. package/dest/msg_validators/tx_validator/gas_validator.d.ts +3 -2
  42. package/dest/msg_validators/tx_validator/gas_validator.d.ts.map +1 -1
  43. package/dest/msg_validators/tx_validator/gas_validator.js +3 -2
  44. package/dest/msg_validators/tx_validator/metadata_validator.d.ts +3 -2
  45. package/dest/msg_validators/tx_validator/metadata_validator.d.ts.map +1 -1
  46. package/dest/msg_validators/tx_validator/metadata_validator.js +2 -2
  47. package/dest/msg_validators/tx_validator/phases_validator.d.ts +3 -2
  48. package/dest/msg_validators/tx_validator/phases_validator.d.ts.map +1 -1
  49. package/dest/msg_validators/tx_validator/phases_validator.js +3 -3
  50. package/dest/msg_validators/tx_validator/size_validator.d.ts +3 -1
  51. package/dest/msg_validators/tx_validator/size_validator.d.ts.map +1 -1
  52. package/dest/msg_validators/tx_validator/size_validator.js +4 -1
  53. package/dest/msg_validators/tx_validator/timestamp_validator.d.ts +3 -2
  54. package/dest/msg_validators/tx_validator/timestamp_validator.d.ts.map +1 -1
  55. package/dest/msg_validators/tx_validator/timestamp_validator.js +2 -2
  56. package/dest/msg_validators/tx_validator/tx_permitted_validator.d.ts +3 -2
  57. package/dest/msg_validators/tx_validator/tx_permitted_validator.d.ts.map +1 -1
  58. package/dest/msg_validators/tx_validator/tx_permitted_validator.js +2 -2
  59. package/dest/msg_validators/tx_validator/tx_proof_validator.d.ts +3 -2
  60. package/dest/msg_validators/tx_validator/tx_proof_validator.d.ts.map +1 -1
  61. package/dest/msg_validators/tx_validator/tx_proof_validator.js +2 -2
  62. package/dest/services/data_store.d.ts +1 -1
  63. package/dest/services/data_store.d.ts.map +1 -1
  64. package/dest/services/data_store.js +10 -6
  65. package/dest/services/discv5/discV5_service.js +1 -1
  66. package/dest/services/dummy_service.d.ts +13 -1
  67. package/dest/services/dummy_service.d.ts.map +1 -1
  68. package/dest/services/dummy_service.js +39 -0
  69. package/dest/services/libp2p/instrumentation.d.ts +1 -1
  70. package/dest/services/libp2p/instrumentation.d.ts.map +1 -1
  71. package/dest/services/libp2p/instrumentation.js +14 -3
  72. package/dest/services/libp2p/libp2p_service.d.ts +9 -3
  73. package/dest/services/libp2p/libp2p_service.d.ts.map +1 -1
  74. package/dest/services/libp2p/libp2p_service.js +37 -28
  75. package/dest/services/peer-manager/metrics.d.ts +2 -2
  76. package/dest/services/peer-manager/metrics.d.ts.map +1 -1
  77. package/dest/services/peer-manager/metrics.js +20 -5
  78. package/dest/services/peer-manager/peer_scoring.d.ts +1 -1
  79. package/dest/services/peer-manager/peer_scoring.d.ts.map +1 -1
  80. package/dest/services/peer-manager/peer_scoring.js +8 -2
  81. package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.d.ts +47 -0
  82. package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.d.ts.map +1 -0
  83. package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.js +566 -0
  84. package/dest/services/reqresp/batch-tx-requester/config.d.ts +17 -0
  85. package/dest/services/reqresp/batch-tx-requester/config.d.ts.map +1 -0
  86. package/dest/services/reqresp/batch-tx-requester/config.js +27 -0
  87. package/dest/services/reqresp/batch-tx-requester/interface.d.ts +50 -0
  88. package/dest/services/reqresp/batch-tx-requester/interface.d.ts.map +1 -0
  89. package/dest/services/reqresp/batch-tx-requester/interface.js +1 -0
  90. package/dest/services/reqresp/batch-tx-requester/missing_txs.d.ts +37 -0
  91. package/dest/services/reqresp/batch-tx-requester/missing_txs.d.ts.map +1 -0
  92. package/dest/services/reqresp/batch-tx-requester/missing_txs.js +151 -0
  93. package/dest/services/reqresp/batch-tx-requester/peer_collection.d.ts +54 -0
  94. package/dest/services/reqresp/batch-tx-requester/peer_collection.d.ts.map +1 -0
  95. package/dest/services/reqresp/batch-tx-requester/peer_collection.js +139 -0
  96. package/dest/services/reqresp/batch-tx-requester/tx_validator.d.ts +20 -0
  97. package/dest/services/reqresp/batch-tx-requester/tx_validator.d.ts.map +1 -0
  98. package/dest/services/reqresp/batch-tx-requester/tx_validator.js +21 -0
  99. package/dest/services/reqresp/connection-sampler/batch_connection_sampler.d.ts +22 -3
  100. package/dest/services/reqresp/connection-sampler/batch_connection_sampler.d.ts.map +1 -1
  101. package/dest/services/reqresp/connection-sampler/batch_connection_sampler.js +63 -4
  102. package/dest/services/reqresp/connection-sampler/connection_sampler.d.ts +2 -1
  103. package/dest/services/reqresp/connection-sampler/connection_sampler.d.ts.map +1 -1
  104. package/dest/services/reqresp/connection-sampler/connection_sampler.js +12 -0
  105. package/dest/services/reqresp/interface.d.ts +3 -1
  106. package/dest/services/reqresp/interface.d.ts.map +1 -1
  107. package/dest/services/reqresp/metrics.d.ts +6 -5
  108. package/dest/services/reqresp/metrics.d.ts.map +1 -1
  109. package/dest/services/reqresp/metrics.js +17 -5
  110. package/dest/services/reqresp/protocols/block_txs/bitvector.d.ts +5 -1
  111. package/dest/services/reqresp/protocols/block_txs/bitvector.d.ts.map +1 -1
  112. package/dest/services/reqresp/protocols/block_txs/bitvector.js +5 -0
  113. package/dest/services/reqresp/protocols/block_txs/block_txs_handler.d.ts +1 -1
  114. package/dest/services/reqresp/protocols/block_txs/block_txs_handler.d.ts.map +1 -1
  115. package/dest/services/reqresp/protocols/block_txs/block_txs_handler.js +16 -3
  116. package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.d.ts +18 -6
  117. package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.d.ts.map +1 -1
  118. package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.js +43 -13
  119. package/dest/services/reqresp/reqresp.d.ts +6 -1
  120. package/dest/services/reqresp/reqresp.d.ts.map +1 -1
  121. package/dest/services/reqresp/reqresp.js +58 -22
  122. package/dest/services/service.d.ts +4 -1
  123. package/dest/services/service.d.ts.map +1 -1
  124. package/dest/services/tx_collection/config.d.ts +4 -1
  125. package/dest/services/tx_collection/config.d.ts.map +1 -1
  126. package/dest/services/tx_collection/config.js +9 -1
  127. package/dest/services/tx_collection/fast_tx_collection.d.ts +6 -4
  128. package/dest/services/tx_collection/fast_tx_collection.d.ts.map +1 -1
  129. package/dest/services/tx_collection/fast_tx_collection.js +16 -5
  130. package/dest/services/tx_collection/index.d.ts +2 -1
  131. package/dest/services/tx_collection/index.d.ts.map +1 -1
  132. package/dest/services/tx_collection/index.js +1 -0
  133. package/dest/services/tx_collection/instrumentation.d.ts +1 -1
  134. package/dest/services/tx_collection/instrumentation.d.ts.map +1 -1
  135. package/dest/services/tx_collection/instrumentation.js +9 -2
  136. package/dest/services/tx_collection/proposal_tx_collector.d.ts +48 -0
  137. package/dest/services/tx_collection/proposal_tx_collector.d.ts.map +1 -0
  138. package/dest/services/tx_collection/proposal_tx_collector.js +50 -0
  139. package/dest/services/tx_collection/tx_collection.d.ts +4 -4
  140. package/dest/services/tx_collection/tx_collection.d.ts.map +1 -1
  141. package/dest/services/tx_collection/tx_collection.js +5 -5
  142. package/dest/services/tx_provider_instrumentation.d.ts +1 -1
  143. package/dest/services/tx_provider_instrumentation.d.ts.map +1 -1
  144. package/dest/services/tx_provider_instrumentation.js +5 -5
  145. package/dest/test-helpers/index.d.ts +3 -1
  146. package/dest/test-helpers/index.d.ts.map +1 -1
  147. package/dest/test-helpers/index.js +2 -0
  148. package/dest/test-helpers/test_tx_provider.d.ts +40 -0
  149. package/dest/test-helpers/test_tx_provider.d.ts.map +1 -0
  150. package/dest/test-helpers/test_tx_provider.js +41 -0
  151. package/dest/test-helpers/testbench-utils.d.ts +158 -0
  152. package/dest/test-helpers/testbench-utils.d.ts.map +1 -0
  153. package/dest/test-helpers/testbench-utils.js +297 -0
  154. package/dest/testbench/p2p_client_testbench_worker.d.ts +28 -2
  155. package/dest/testbench/p2p_client_testbench_worker.d.ts.map +1 -1
  156. package/dest/testbench/p2p_client_testbench_worker.js +212 -131
  157. package/dest/testbench/worker_client_manager.d.ts +51 -6
  158. package/dest/testbench/worker_client_manager.d.ts.map +1 -1
  159. package/dest/testbench/worker_client_manager.js +226 -44
  160. package/package.json +14 -14
  161. package/src/bootstrap/bootstrap.ts +7 -4
  162. package/src/client/factory.ts +6 -10
  163. package/src/client/p2p_client.ts +9 -2
  164. package/src/client/test/tx_proposal_collector/README.md +227 -0
  165. package/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts +336 -0
  166. package/src/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.ts +43 -0
  167. package/src/config.ts +6 -1
  168. package/src/mem_pools/instrumentation.ts +2 -1
  169. package/src/msg_validators/attestation_validator/fisherman_attestation_validator.ts +8 -2
  170. package/src/msg_validators/proposal_validator/proposal_validator.ts +5 -5
  171. package/src/msg_validators/tx_validator/archive_cache.ts +3 -3
  172. package/src/msg_validators/tx_validator/block_header_validator.ts +6 -5
  173. package/src/msg_validators/tx_validator/data_validator.ts +6 -2
  174. package/src/msg_validators/tx_validator/double_spend_validator.ts +4 -3
  175. package/src/msg_validators/tx_validator/factory.ts +64 -23
  176. package/src/msg_validators/tx_validator/gas_validator.ts +9 -3
  177. package/src/msg_validators/tx_validator/metadata_validator.ts +6 -3
  178. package/src/msg_validators/tx_validator/phases_validator.ts +5 -3
  179. package/src/msg_validators/tx_validator/size_validator.ts +6 -2
  180. package/src/msg_validators/tx_validator/timestamp_validator.ts +6 -3
  181. package/src/msg_validators/tx_validator/tx_permitted_validator.ts +8 -3
  182. package/src/msg_validators/tx_validator/tx_proof_validator.ts +8 -3
  183. package/src/services/data_store.ts +10 -7
  184. package/src/services/discv5/discV5_service.ts +1 -1
  185. package/src/services/dummy_service.ts +45 -0
  186. package/src/services/libp2p/instrumentation.ts +15 -2
  187. package/src/services/libp2p/libp2p_service.ts +60 -46
  188. package/src/services/peer-manager/metrics.ts +21 -4
  189. package/src/services/peer-manager/peer_scoring.ts +4 -1
  190. package/src/services/reqresp/batch-tx-requester/README.md +305 -0
  191. package/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts +706 -0
  192. package/src/services/reqresp/batch-tx-requester/config.ts +40 -0
  193. package/src/services/reqresp/batch-tx-requester/interface.ts +57 -0
  194. package/src/services/reqresp/batch-tx-requester/missing_txs.ts +209 -0
  195. package/src/services/reqresp/batch-tx-requester/peer_collection.ts +205 -0
  196. package/src/services/reqresp/batch-tx-requester/tx_validator.ts +37 -0
  197. package/src/services/reqresp/connection-sampler/batch_connection_sampler.ts +65 -4
  198. package/src/services/reqresp/connection-sampler/connection_sampler.ts +16 -0
  199. package/src/services/reqresp/interface.ts +3 -0
  200. package/src/services/reqresp/metrics.ts +34 -9
  201. package/src/services/reqresp/protocols/block_txs/bitvector.ts +7 -0
  202. package/src/services/reqresp/protocols/block_txs/block_txs_handler.ts +18 -4
  203. package/src/services/reqresp/protocols/block_txs/block_txs_reqresp.ts +51 -9
  204. package/src/services/reqresp/reqresp.ts +66 -19
  205. package/src/services/service.ts +4 -0
  206. package/src/services/tx_collection/config.ts +15 -1
  207. package/src/services/tx_collection/fast_tx_collection.ts +36 -13
  208. package/src/services/tx_collection/index.ts +5 -0
  209. package/src/services/tx_collection/instrumentation.ts +11 -2
  210. package/src/services/tx_collection/proposal_tx_collector.ts +114 -0
  211. package/src/services/tx_collection/tx_collection.ts +4 -4
  212. package/src/services/tx_provider_instrumentation.ts +11 -5
  213. package/src/test-helpers/index.ts +2 -0
  214. package/src/test-helpers/test_tx_provider.ts +64 -0
  215. package/src/test-helpers/testbench-utils.ts +374 -0
  216. package/src/testbench/p2p_client_testbench_worker.ts +321 -122
  217. package/src/testbench/worker_client_manager.ts +304 -47
@@ -0,0 +1,566 @@
1
+ import { chunkWrapAround } from '@aztec/foundation/collection';
2
+ import { TimeoutError } from '@aztec/foundation/error';
3
+ import { createLogger } from '@aztec/foundation/log';
4
+ import { FifoMemoryQueue, Semaphore } from '@aztec/foundation/queue';
5
+ import { sleep } from '@aztec/foundation/sleep';
6
+ import { DateProvider, executeTimeout } from '@aztec/foundation/timer';
7
+ import { PeerErrorSeverity } from '@aztec/stdlib/p2p';
8
+ import { TxHash } from '@aztec/stdlib/tx';
9
+ import { peerIdFromString } from '@libp2p/peer-id';
10
+ import { ReqRespSubProtocol } from '.././interface.js';
11
+ import { BlockTxsRequest, BlockTxsResponse } from '.././protocols/index.js';
12
+ import { ReqRespStatus } from '.././status.js';
13
+ import { DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD, DEFAULT_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT, DEFAULT_BATCH_TX_REQUESTER_SMART_PARALLEL_WORKER_COUNT, DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE } from './config.js';
14
+ import { MissingTxMetadata, MissingTxMetadataCollection } from './missing_txs.js';
15
+ import { PeerCollection } from './peer_collection.js';
16
+ import { BatchRequestTxValidator } from './tx_validator.js';
17
+ /*
18
+ * Tries to fetch all missing transaction until deadline is hit.
19
+ * Transactions are yield by calling run*() method
20
+ *
21
+ * We have a couple of peer types:
22
+ * - Pinned peer is the one who sent us the block proposal
23
+ * - Dumb peer:
24
+ * - We query this peer blindly because we don't know which txs it has
25
+ * - We hope it might have some of the transactions we asked for
26
+ * - When this peer sends response it might become Smart peer
27
+ * - Smart peer:
28
+ * - Initially there are no smart peers, all are considered "dumb"
29
+ * - Peer becomes smart when in response it tels us exactly which transactions it has
30
+ * AND we are missing some of those transactions
31
+ * - Bad peer:
32
+ * - Is the peer which was unable to send us successful response N times in a row
33
+ * */ export class BatchTxRequester {
34
+ blockProposal;
35
+ pinnedPeer;
36
+ timeoutMs;
37
+ p2pService;
38
+ logger;
39
+ dateProvider;
40
+ opts;
41
+ peers;
42
+ txsMetadata;
43
+ deadline;
44
+ smartRequesterSemaphore;
45
+ txQueue;
46
+ txValidator;
47
+ smartParallelWorkerCount;
48
+ dumbParallelWorkerCount;
49
+ txBatchSize;
50
+ constructor(missingTxs, blockProposal, pinnedPeer, timeoutMs, p2pService, logger, dateProvider, opts){
51
+ this.blockProposal = blockProposal;
52
+ this.pinnedPeer = pinnedPeer;
53
+ this.timeoutMs = timeoutMs;
54
+ this.p2pService = p2pService;
55
+ this.logger = logger ?? createLogger('p2p:reqresp_batch');
56
+ this.dateProvider = dateProvider ?? new DateProvider();
57
+ this.opts = opts ?? {};
58
+ this.smartParallelWorkerCount = this.opts.smartParallelWorkerCount ?? DEFAULT_BATCH_TX_REQUESTER_SMART_PARALLEL_WORKER_COUNT;
59
+ this.dumbParallelWorkerCount = this.opts.dumbParallelWorkerCount ?? DEFAULT_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT;
60
+ this.txBatchSize = this.opts.txBatchSize ?? DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE;
61
+ this.deadline = this.dateProvider.now() + this.timeoutMs;
62
+ this.txQueue = new FifoMemoryQueue(this.logger);
63
+ this.txValidator = this.opts.txValidator ?? new BatchRequestTxValidator(this.p2pService.txValidatorConfig);
64
+ if (this.opts.peerCollection) {
65
+ this.peers = this.opts.peerCollection;
66
+ } else {
67
+ const initialPeers = this.p2pService.connectionSampler.getPeerListSortedByConnectionCountAsc();
68
+ const badPeerThreshold = this.opts.badPeerThreshold ?? DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD;
69
+ this.peers = new PeerCollection(initialPeers, this.pinnedPeer, this.dateProvider, badPeerThreshold, this.p2pService.peerScoring);
70
+ }
71
+ const entries = missingTxs.map((h)=>[
72
+ h.toString(),
73
+ new MissingTxMetadata(h)
74
+ ]);
75
+ this.txsMetadata = new MissingTxMetadataCollection(entries, this.txBatchSize);
76
+ this.smartRequesterSemaphore = this.opts.semaphore ?? new Semaphore(0);
77
+ }
78
+ /*
79
+ * Fetches all missing transactions and yields them one by one
80
+ * */ async *run() {
81
+ // Our timeout is represented in milliseconds but queue expects seconds
82
+ // We also want to make sure we wait at least 1 second in case of very low timeouts
83
+ const timeoutQueueAfter = Math.max(Math.ceil(this.timeoutMs / 1_000), 1);
84
+ try {
85
+ if (this.txsMetadata.getMissingTxHashes().size === 0) {
86
+ return undefined;
87
+ }
88
+ // Start workers in background
89
+ const workersPromise = executeTimeout(()=>Promise.allSettled([
90
+ this.smartRequester(),
91
+ this.dumbRequester(),
92
+ this.pinnedPeerRequester()
93
+ ]), this.timeoutMs).finally(()=>{
94
+ this.txQueue.end();
95
+ });
96
+ while(true){
97
+ const tx = await this.txQueue.get(timeoutQueueAfter);
98
+ // null indicates that the queue has ended
99
+ if (tx === null) {
100
+ break;
101
+ }
102
+ yield tx;
103
+ if (this.shouldStop()) {
104
+ // Drain queue before ending
105
+ let remaining;
106
+ while((remaining = this.txQueue.getImmediate()) !== undefined){
107
+ yield remaining;
108
+ }
109
+ break;
110
+ }
111
+ }
112
+ this.unlockSmartRequesterSemaphores();
113
+ await workersPromise;
114
+ } catch (e) {
115
+ this.logger.error(`Batch tx requester failed with error: ${e.message}`, {
116
+ error: e
117
+ });
118
+ } finally{
119
+ this.txQueue.end();
120
+ this.unlockSmartRequesterSemaphores();
121
+ }
122
+ }
123
+ /*
124
+ * Fetches all missing transactions
125
+ * @returns Collection of fetched transactions */ static async collectAllTxs(generator) {
126
+ const txs = [];
127
+ for await (const tx of generator){
128
+ if (tx === undefined) {
129
+ break;
130
+ }
131
+ txs.push(tx);
132
+ }
133
+ return txs;
134
+ }
135
+ /*
136
+ * Handles so-called pinned peer
137
+ * The pinned peer is the one who sent us block proposal
138
+ * We expect pinned peer to have all transactions from the proposal at some point
139
+ * This holds because they them selves have to attest to proposal and thus fetch all missing transactions
140
+ *
141
+ * Given the reasoning above - we query pinned peer separately from dumb/smart peers
142
+ * */ async pinnedPeerRequester() {
143
+ if (!this.pinnedPeer) {
144
+ this.logger.debug('No pinned peer to request from');
145
+ return;
146
+ }
147
+ while(!this.shouldStop()){
148
+ // We've hit rate limits on the pinned peer - wait until rate limit expires (clamped to deadline)
149
+ const rateLimitDelay = this.peers.getPeerRateLimitDelayMs(this.pinnedPeer);
150
+ const pinnedPeerIsRateLimited = rateLimitDelay !== undefined;
151
+ if (pinnedPeerIsRateLimited) {
152
+ await this.sleepClampedToDeadline(rateLimitDelay);
153
+ continue;
154
+ }
155
+ const pinnedPeerWentBad = this.peers.getBadPeers().has(this.pinnedPeer.toString());
156
+ if (pinnedPeerWentBad) {
157
+ return;
158
+ }
159
+ // From pinned peer we always request transactions so that we first request the least requested and not in flight
160
+ // This makes sense since pinned peer should have ALL transactions,
161
+ // Thus if it has all it is best to ask pinned first for the transactions we have trouble getting from other peers
162
+ const txs = this.txsMetadata.getTxsToRequestFromThePeer(this.pinnedPeer);
163
+ if (txs.length === 0) {
164
+ this.logger.debug(`Pinned peer ${this.pinnedPeer.toString()} has no txs to request`);
165
+ return;
166
+ }
167
+ const request = BlockTxsRequest.fromBlockProposalAndMissingTxs(this.blockProposal, txs);
168
+ if (!request) {
169
+ return;
170
+ }
171
+ txs.forEach((tx)=>{
172
+ this.txsMetadata.markRequested(tx);
173
+ this.txsMetadata.markInFlightBySmartPeer(tx);
174
+ });
175
+ await this.requestTxBatch(this.pinnedPeer, request);
176
+ txs.forEach((tx)=>{
177
+ this.txsMetadata.markNotInFlightBySmartPeer(tx);
178
+ });
179
+ }
180
+ }
181
+ /*
182
+ * Starts dumb worker loops
183
+ * */ async dumbRequester() {
184
+ const nextPeerIndex = this.makeRoundRobinIndexer();
185
+ const nextBatchIndex = this.makeRoundRobinIndexer();
186
+ // Chunk missing tx hashes into batches of txBatchSize, wrapping around to ensure no peer gets less than txBatchSize
187
+ const txChunks = ()=>{
188
+ const missingHashes = Array.from(this.txsMetadata.getMissingTxHashes());
189
+ return chunkWrapAround(missingHashes, this.txBatchSize);
190
+ };
191
+ const makeRequest = (_pid)=>{
192
+ const chunks = txChunks();
193
+ const idx = nextBatchIndex(()=>chunks.length);
194
+ const noMoreTxsToRequest = idx === undefined;
195
+ if (noMoreTxsToRequest) {
196
+ return undefined;
197
+ }
198
+ const txs = chunks[idx].map((t)=>TxHash.fromString(t));
199
+ // If peer is dumb peer, we don't know yet if they received full blockProposal
200
+ // there is solid chance that peer didn't receive proposal yet, thus we must send full hashes
201
+ const includeFullHashesInRequestNotJustIndices = true;
202
+ const blockRequest = BlockTxsRequest.fromBlockProposalAndMissingTxs(this.blockProposal, txs, includeFullHashesInRequestNotJustIndices);
203
+ const blockRequestHasNoMissingTxsFromTheProposal = !blockRequest;
204
+ if (blockRequestHasNoMissingTxsFromTheProposal) {
205
+ return undefined;
206
+ }
207
+ return {
208
+ blockRequest,
209
+ txs
210
+ };
211
+ };
212
+ const nextPeer = ()=>{
213
+ const peers = this.peers.getDumbPeersToQuery();
214
+ const idx = nextPeerIndex(()=>peers.length);
215
+ return idx === undefined ? undefined : peerIdFromString(peers[idx]);
216
+ };
217
+ const workerCount = Math.min(this.dumbParallelWorkerCount, this.peers.getAllPeers().size);
218
+ const workers = Array.from({
219
+ length: workerCount
220
+ }, (_, index)=>this.dumbWorkerLoop(nextPeer, makeRequest, index + 1));
221
+ await Promise.allSettled(workers);
222
+ }
223
+ /*
224
+ * Dumb worker loop.
225
+ * It fetches next available dumb peer and builds request for that peer
226
+ * Loops until shouldStop condition is not met or there are no more dubm peers to query
227
+ * This can happen if e.g. all "dumb" peers transition to "smart" or e.g. become "bad"
228
+ * */ async dumbWorkerLoop(pickNextPeer, request, workerIndex) {
229
+ try {
230
+ this.logger.debug(`Dumb worker ${workerIndex} started`);
231
+ while(!this.shouldStop()){
232
+ const peerId = pickNextPeer();
233
+ const weRanOutOfPeersToQuery = peerId === undefined;
234
+ if (weRanOutOfPeersToQuery) {
235
+ const nextDumbPeerDelay = this.peers.getNextDumbPeerAvailabilityDelayMs();
236
+ const thereAreSomeRateLimitedDumbPeers = nextDumbPeerDelay !== undefined;
237
+ if (thereAreSomeRateLimitedDumbPeers) {
238
+ // There are still some dumb peers to query but they have been rate limited
239
+ // Sleep until the earliest one gets unblocked (clamped to deadline)
240
+ await this.sleepClampedToDeadline(nextDumbPeerDelay);
241
+ continue;
242
+ }
243
+ this.logger.debug(`Worker loop dumb: No more peers to query`);
244
+ break;
245
+ }
246
+ const nextBatchTxRequest = request(peerId);
247
+ if (!nextBatchTxRequest) {
248
+ this.logger.debug(`Worker loop dumb: no txs to request, exiting`);
249
+ break;
250
+ }
251
+ const { blockRequest, txs } = nextBatchTxRequest;
252
+ this.logger.debug(`Worker type dumb: Requesting txs from peer ${peerId.toString()}: ${txs.map((tx)=>tx.toString()).join(', ')}`);
253
+ await this.requestTxBatch(peerId, blockRequest);
254
+ }
255
+ } catch (err) {
256
+ this.logger.error(`Dumb worker ${workerIndex} encountered an error: ${err}`);
257
+ } finally{
258
+ this.logger.debug(`Dumb worker ${workerIndex} finished`);
259
+ }
260
+ }
261
+ /*
262
+ * Starts smart worker loops
263
+ * */ async smartRequester() {
264
+ const nextPeerIndex = this.makeRoundRobinIndexer();
265
+ const nextPeer = ()=>{
266
+ const peers = this.peers.getSmartPeersToQuery();
267
+ const idx = nextPeerIndex(()=>peers.length);
268
+ return idx === undefined ? undefined : peerIdFromString(peers[idx]);
269
+ };
270
+ const makeRequest = (pid)=>{
271
+ const txs = this.txsMetadata.getTxsToRequestFromThePeer(pid);
272
+ const blockRequest = BlockTxsRequest.fromBlockProposalAndMissingTxs(this.blockProposal, txs);
273
+ if (!blockRequest) {
274
+ return undefined;
275
+ }
276
+ return {
277
+ blockRequest,
278
+ txs
279
+ };
280
+ };
281
+ const workers = Array.from({
282
+ length: Math.min(this.smartParallelWorkerCount, this.peers.getAllPeers().size)
283
+ }, (_, index)=>this.smartWorkerLoop(nextPeer, makeRequest, index + 1));
284
+ await Promise.allSettled(workers);
285
+ }
286
+ /*
287
+ * Smart worker loop.
288
+ * It fetches next available smart peer and builds request for that peer
289
+ * Loops until shouldStop condition is not met
290
+ *
291
+ * Notes:
292
+ * - We don't start worker loop immediately, but block on semaphore
293
+ * until some dumb peer transactions to smart peer
294
+ * - We might run out of smart peers, because:
295
+ * - they "went bad"
296
+ * - there are less smart peers than worker loops
297
+ * In such scenario we either wait for next dumb peer to become smart or kill the worker loop
298
+ * */ async smartWorkerLoop(pickNextPeer, request, workerIndex) {
299
+ try {
300
+ this.logger.trace(`Smart worker ${workerIndex} started`);
301
+ await executeTimeout((_)=>this.smartRequesterSemaphore.acquire(), this.timeoutMs);
302
+ this.logger.trace(`Smart worker ${workerIndex} acquired semaphore`);
303
+ while(!this.shouldStop()){
304
+ const peerId = pickNextPeer();
305
+ const weRanOutOfPeersToQuery = peerId === undefined;
306
+ if (weRanOutOfPeersToQuery) {
307
+ this.logger.debug(`Worker loop smart: No more peers to query`);
308
+ // If there are no more dumb peers to query then none of our peers can become smart,
309
+ // thus we can simply exit this worker
310
+ const noMoreDumbPeersToQuery = this.peers.getDumbPeersToQuery().length === 0;
311
+ if (noMoreDumbPeersToQuery) {
312
+ // These might be either smart peers that will get unblocked after _some time_
313
+ const nextSmartPeerDelay = this.peers.getNextSmartPeerAvailabilityDelayMs();
314
+ const thereAreSomeRateLimitedSmartPeers = nextSmartPeerDelay !== undefined;
315
+ if (thereAreSomeRateLimitedSmartPeers) {
316
+ await this.sleepClampedToDeadline(nextSmartPeerDelay);
317
+ continue;
318
+ }
319
+ this.logger.debug(`Worker loop smart: No more smart peers to query killing ${workerIndex}`);
320
+ break;
321
+ }
322
+ // Otherwise there are still some dumb peers that could become smart.
323
+ // We end up here when all known smart peers became temporarily unavailable via combination of
324
+ // (bad, in-flight, or rate-limited) or in some weird scenario all current smart peers turn bad which is permanent
325
+ // but dumb peers still exist that could become smart.
326
+ //
327
+ // When a dumb peer responds with valid txIndices, it gets
328
+ // promoted to smart and releases the semaphore, waking this worker.
329
+ await executeTimeout((_)=>this.smartRequesterSemaphore.acquire(), this.timeoutMs);
330
+ this.logger.debug(`Worker loop smart: acquired next smart peer`);
331
+ continue;
332
+ }
333
+ const nextBatchTxRequest = request(peerId);
334
+ if (!nextBatchTxRequest) {
335
+ this.logger.debug(`Worker loop smart: no txs to request, exiting`);
336
+ break;
337
+ }
338
+ const { blockRequest, txs } = nextBatchTxRequest;
339
+ // We only mark transactions as in flight if queried by Smart peer
340
+ // Because asking them from dumb peer is shot in the dark (there is a good chance they won't have it)
341
+ // So we don't gain anything if we mark txs in-flight for dumb peers
342
+ txs.forEach((tx)=>{
343
+ this.txsMetadata.markRequested(tx);
344
+ this.txsMetadata.markInFlightBySmartPeer(tx);
345
+ });
346
+ await this.requestTxBatch(peerId, blockRequest);
347
+ txs.forEach((tx)=>{
348
+ this.txsMetadata.markNotInFlightBySmartPeer(tx);
349
+ });
350
+ }
351
+ } catch (err) {
352
+ if (err instanceof TimeoutError) {
353
+ this.logger.debug(`Smart worker ${workerIndex} timed out waiting for semaphore`);
354
+ } else {
355
+ this.logger.error(`Smart worker ${workerIndex} encountered an error: ${err}`);
356
+ }
357
+ } finally{
358
+ this.logger.debug(`Smart worker ${workerIndex} finished`);
359
+ }
360
+ }
361
+ /*
362
+ * Sends actual request to the peer and handles response
363
+ *
364
+ * @param peerId - the peer to send request to
365
+ * @param request - the actual request
366
+ */ async requestTxBatch(peerId, request) {
367
+ try {
368
+ this.peers.markPeerInFlight(peerId);
369
+ const response = await this.p2pService.reqResp.sendRequestToPeer(peerId, ReqRespSubProtocol.BLOCK_TXS, request.toBuffer());
370
+ if (response.status !== ReqRespStatus.SUCCESS) {
371
+ this.logger.debug(`Peer ${peerId.toString()} failed to respond with status: ${response.status}`);
372
+ this.handleFailResponseFromPeer(peerId, response.status);
373
+ return;
374
+ }
375
+ const blockResponse = BlockTxsResponse.fromBuffer(response.data);
376
+ await this.handleSuccessResponseFromPeer(peerId, blockResponse);
377
+ } catch (err) {
378
+ this.logger.error(`Failed to get valid response from peer ${peerId.toString()}: ${err.message}`, {
379
+ peerId,
380
+ error: err
381
+ });
382
+ this.handleFailResponseFromPeer(peerId, ReqRespStatus.UNKNOWN);
383
+ } finally{
384
+ this.peers.unMarkPeerInFlight(peerId);
385
+ }
386
+ }
387
+ /*
388
+ * Handles failed response form the peer
389
+ * There are 3 scenarios
390
+ * - RATE_LIMIT_EXCEEDED: We mark this and don't query this peer again for some_time
391
+ * - FAILURE and UNKNOWN: We penalise this, if peer has been penalised this way N times they are not queried again
392
+ * this implies we will query these peers couple of more times and give them a chance to "redeem" themselves before completely ignoring them
393
+ */ handleFailResponseFromPeer(peerId, responseStatus) {
394
+ //TODO: Should we ban these peers?
395
+ if (responseStatus === ReqRespStatus.FAILURE || responseStatus === ReqRespStatus.UNKNOWN) {
396
+ this.peers.penalisePeer(peerId, PeerErrorSeverity.HighToleranceError);
397
+ return;
398
+ }
399
+ if (responseStatus === ReqRespStatus.RATE_LIMIT_EXCEEDED) {
400
+ this.peers.markPeerRateLimitExceeded(peerId);
401
+ }
402
+ }
403
+ /*
404
+ * Handles successful response form the peer, this includes
405
+ * - Handling received transactions
406
+ * - Deciding if the peer is "smart" or not
407
+ * */ async handleSuccessResponseFromPeer(peerId, response) {
408
+ this.logger.debug(`Received txs: ${response.txs.length} from peer ${peerId.toString()} `);
409
+ await this.handleReceivedTxs(peerId, response.txs);
410
+ this.decideIfPeerIsSmart(peerId, response);
411
+ }
412
+ /*
413
+ * Handles received txs.
414
+ * Transactions are validated and then put on async queue
415
+ * to be yielded by main running loop
416
+ * */ async handleReceivedTxs(peerId, txs) {
417
+ const newTxs = txs.filter((tx)=>!this.txsMetadata.alreadyFetched(tx.txHash));
418
+ if (newTxs.length === 0) {
419
+ return;
420
+ }
421
+ //TODO: this validation can be slow, maybe spawn worker just for validation
422
+ // We could use the async queue for communication.
423
+ const validationResults = await Promise.allSettled(newTxs.map(async (tx)=>({
424
+ tx,
425
+ isValid: (await this.txValidator.validateRequestedTx(tx)).result === 'valid'
426
+ })));
427
+ let hasInvalidTx = false;
428
+ validationResults.forEach((result)=>{
429
+ if (result.status === 'fulfilled' && result.value.isValid) {
430
+ if (this.txsMetadata.markFetched(peerId, result.value.tx)) {
431
+ this.txQueue.put(result.value.tx);
432
+ }
433
+ } else {
434
+ hasInvalidTx = true;
435
+ }
436
+ });
437
+ if (hasInvalidTx) {
438
+ this.peers.penalisePeer(peerId, PeerErrorSeverity.LowToleranceError);
439
+ } else {
440
+ // If we have received successful response from the peer, they have "redeemed" themselves and not considered bad anymore
441
+ this.peers.unMarkPeerAsBad(peerId);
442
+ }
443
+ const missingTxHashes = this.txsMetadata.getMissingTxHashes();
444
+ if (missingTxHashes.size === 0) {
445
+ // wake sleepers so they can see shouldStop() and exit before waiting on timeout
446
+ this.unlockSmartRequesterSemaphores();
447
+ } else {
448
+ this.logger.trace(`Missing txs: ${Array.from(this.txsMetadata.getMissingTxHashes()).map((tx)=>tx.toString()).join(', ')}`);
449
+ }
450
+ }
451
+ /*
452
+ * Peer is smart if:
453
+ * - They are not pinned peer
454
+ * - They have sent successful response indicating which txs from Block proposal they have
455
+ * - They have transactions we are missing
456
+ */ decideIfPeerIsSmart(peerId, response) {
457
+ const pinnedPeerShouldNeverBeMarkedAsSmart = this.pinnedPeer && peerId.toString() === this.pinnedPeer.toString();
458
+ if (pinnedPeerShouldNeverBeMarkedAsSmart) {
459
+ return;
460
+ }
461
+ const smartPeersAreDisabled = this.smartParallelWorkerCount === 0;
462
+ if (smartPeersAreDisabled) {
463
+ return;
464
+ }
465
+ // If block response is invalid we still want to query this peer in the future
466
+ // Because they sent successful response, so they might become smart peer in the future
467
+ // Or send us needed txs
468
+ if (!this.isBlockResponseValid(response)) {
469
+ return;
470
+ }
471
+ // We mark peer as "smart" only if they have some txs we are missing
472
+ // Otherwise we keep them as "dumb" in hope they'll receive some new txs we are missing in the future
473
+ if (!this.peerHasSomeTxsWeAreMissing(peerId, response)) {
474
+ this.logger.debug(`${peerId.toString()} has no txs we are missing, skipping`);
475
+ return;
476
+ }
477
+ this.peers.markPeerSmart(peerId);
478
+ this.markTxsPeerHas(peerId, response);
479
+ // Unblock smart workers
480
+ if (this.peers.getSmartPeersToQuery().length <= this.smartParallelWorkerCount) {
481
+ this.smartRequesterSemaphore.release();
482
+ }
483
+ }
484
+ isBlockResponseValid(response) {
485
+ const archiveRootsMatch = this.blockProposal.archive.toString() === response.archiveRoot.toString();
486
+ const peerHasSomeTxsFromProposal = !response.txIndices.isEmpty();
487
+ return archiveRootsMatch && peerHasSomeTxsFromProposal;
488
+ }
489
+ peerHasSomeTxsWeAreMissing(_peerId, response) {
490
+ const txsPeerHas = new Set(this.extractHashesPeerHasFromResponse(response).map((h)=>h.toString()));
491
+ return this.txsMetadata.getMissingTxHashes().intersection(txsPeerHas).size > 0;
492
+ }
493
+ markTxsPeerHas(peerId, response) {
494
+ const txsPeerHas = this.extractHashesPeerHasFromResponse(response);
495
+ this.logger.debug(`${peerId.toString()} has txs: ${txsPeerHas.map((tx)=>tx.toString()).join(', ')}`);
496
+ this.txsMetadata.markPeerHas(peerId, txsPeerHas);
497
+ }
498
+ extractHashesPeerHasFromResponse(response) {
499
+ const hashes = [];
500
+ const indicesOfHashesPeerHas = new Set(response.txIndices.getTrueIndices());
501
+ this.blockProposal.txHashes.forEach((hash, idx)=>{
502
+ if (indicesOfHashesPeerHas.has(idx)) {
503
+ hashes.push(hash);
504
+ }
505
+ });
506
+ return hashes;
507
+ }
508
+ /*
509
+ * Helper function to crate round robin indexer -
510
+ * i.e. the "thing" which returns next index/number in round robin fashion
511
+ **/ makeRoundRobinIndexer(start = 0) {
512
+ let i = start;
513
+ /*
514
+ * Function to calculate next round-robin number
515
+ * Idea is that we pass in an array size and based on it and previous state we call next
516
+ * Array size can change between calls thus it is passed as function
517
+ *
518
+ * @returns next index or undefined if size is 0
519
+ */ return (size)=>{
520
+ const length = size();
521
+ if (length === 0) {
522
+ return undefined;
523
+ }
524
+ const current = i % length;
525
+ i = (current + 1) % length;
526
+ return current;
527
+ };
528
+ }
529
+ /*
530
+ * @returns true if all missing txs have been fetched */ fetchedAllTxs() {
531
+ return Array.from(this.txsMetadata.values()).every((tx)=>tx.fetched);
532
+ }
533
+ /*
534
+ * Checks if the BatchTxRequester should stop fetching missing txs
535
+ * Conditions for stopping are:
536
+ * - There have been no missing transactions to start with
537
+ * - All transactions have been fetched
538
+ * - The deadline has been hit (no more time to fetch)
539
+ * - This process has been cancelled via abortSignal
540
+ *
541
+ * @returns true if BatchTxRequester should stop, otherwise false*/ shouldStop() {
542
+ const aborted = this.opts.abortSignal?.aborted ?? false;
543
+ if (aborted) {
544
+ this.unlockSmartRequesterSemaphores();
545
+ }
546
+ return aborted || this.txsMetadata.size === 0 || this.fetchedAllTxs() || this.dateProvider.now() > this.deadline;
547
+ }
548
+ /*
549
+ * Helper function which unlocks all smart requester semaphores
550
+ * @note This is needed otherwise they will block forever
551
+ * */ unlockSmartRequesterSemaphores() {
552
+ for(let i = 0; i < this.smartParallelWorkerCount; i++){
553
+ this.smartRequesterSemaphore.release();
554
+ }
555
+ }
556
+ /*
557
+ * Sleeps for the given duration, but clamped to the deadline.
558
+ * This ensures we don't sleep past the deadline.
559
+ * */ async sleepClampedToDeadline(durationMs) {
560
+ const remaining = this.deadline - this.dateProvider.now();
561
+ const thereIsTimeRemaining = remaining > 0;
562
+ if (thereIsTimeRemaining) {
563
+ await sleep(Math.min(durationMs, remaining));
564
+ }
565
+ }
566
+ }
@@ -0,0 +1,17 @@
1
+ import { type ConfigMappingsType } from '@aztec/foundation/config';
2
+ export declare const DEFAULT_BATCH_TX_REQUESTER_SMART_PARALLEL_WORKER_COUNT = 10;
3
+ export declare const DEFAULT_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT = 10;
4
+ export declare const DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE = 8;
5
+ export declare const DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD = 2;
6
+ export interface BatchTxRequesterConfig {
7
+ /** Max concurrent requests to smart peers. */
8
+ batchTxRequesterSmartParallelWorkerCount: number;
9
+ /** Max concurrent requests to dumb peers. */
10
+ batchTxRequesterDumbParallelWorkerCount: number;
11
+ /** Max transactions per request / chunk size. */
12
+ batchTxRequesterTxBatchSize: number;
13
+ /** Failures before a peer is considered bad (see > threshold logic). */
14
+ batchTxRequesterBadPeerThreshold: number;
15
+ }
16
+ export declare const batchTxRequesterConfigMappings: ConfigMappingsType<BatchTxRequesterConfig>;
17
+ //# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiY29uZmlnLmQudHMiLCJzb3VyY2VSb290IjoiIiwic291cmNlcyI6WyIuLi8uLi8uLi8uLi9zcmMvc2VydmljZXMvcmVxcmVzcC9iYXRjaC10eC1yZXF1ZXN0ZXIvY29uZmlnLnRzIl0sIm5hbWVzIjpbXSwibWFwcGluZ3MiOiJBQUFBLE9BQU8sRUFBRSxLQUFLLGtCQUFrQixFQUFzQixNQUFNLDBCQUEwQixDQUFDO0FBRXZGLGVBQU8sTUFBTSxzREFBc0QsS0FBSyxDQUFDO0FBQ3pFLGVBQU8sTUFBTSxxREFBcUQsS0FBSyxDQUFDO0FBQ3hFLGVBQU8sTUFBTSx3Q0FBd0MsSUFBSSxDQUFDO0FBQzFELGVBQU8sTUFBTSw2Q0FBNkMsSUFBSSxDQUFDO0FBRS9ELE1BQU0sV0FBVyxzQkFBc0I7SUFDckMsOENBQThDO0lBQzlDLHdDQUF3QyxFQUFFLE1BQU0sQ0FBQztJQUNqRCw2Q0FBNkM7SUFDN0MsdUNBQXVDLEVBQUUsTUFBTSxDQUFDO0lBQ2hELGlEQUFpRDtJQUNqRCwyQkFBMkIsRUFBRSxNQUFNLENBQUM7SUFDcEMsd0VBQXdFO0lBQ3hFLGdDQUFnQyxFQUFFLE1BQU0sQ0FBQztDQUMxQztBQUVELGVBQU8sTUFBTSw4QkFBOEIsRUFBRSxrQkFBa0IsQ0FBQyxzQkFBc0IsQ0FxQnJGLENBQUMifQ==
@@ -0,0 +1 @@
1
+ {"version":3,"file":"config.d.ts","sourceRoot":"","sources":["../../../../src/services/reqresp/batch-tx-requester/config.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,kBAAkB,EAAsB,MAAM,0BAA0B,CAAC;AAEvF,eAAO,MAAM,sDAAsD,KAAK,CAAC;AACzE,eAAO,MAAM,qDAAqD,KAAK,CAAC;AACxE,eAAO,MAAM,wCAAwC,IAAI,CAAC;AAC1D,eAAO,MAAM,6CAA6C,IAAI,CAAC;AAE/D,MAAM,WAAW,sBAAsB;IACrC,8CAA8C;IAC9C,wCAAwC,EAAE,MAAM,CAAC;IACjD,6CAA6C;IAC7C,uCAAuC,EAAE,MAAM,CAAC;IAChD,iDAAiD;IACjD,2BAA2B,EAAE,MAAM,CAAC;IACpC,wEAAwE;IACxE,gCAAgC,EAAE,MAAM,CAAC;CAC1C;AAED,eAAO,MAAM,8BAA8B,EAAE,kBAAkB,CAAC,sBAAsB,CAqBrF,CAAC"}
@@ -0,0 +1,27 @@
1
+ import { numberConfigHelper } from '@aztec/foundation/config';
2
+ export const DEFAULT_BATCH_TX_REQUESTER_SMART_PARALLEL_WORKER_COUNT = 10;
3
+ export const DEFAULT_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT = 10;
4
+ export const DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE = 8;
5
+ export const DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD = 2;
6
+ export const batchTxRequesterConfigMappings = {
7
+ batchTxRequesterSmartParallelWorkerCount: {
8
+ env: 'P2P_BATCH_TX_REQUESTER_SMART_PARALLEL_WORKER_COUNT',
9
+ description: 'Max concurrent requests to smart peers for batch tx requester.',
10
+ ...numberConfigHelper(DEFAULT_BATCH_TX_REQUESTER_SMART_PARALLEL_WORKER_COUNT)
11
+ },
12
+ batchTxRequesterDumbParallelWorkerCount: {
13
+ env: 'P2P_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT',
14
+ description: 'Max concurrent requests to dumb peers for batch tx requester.',
15
+ ...numberConfigHelper(DEFAULT_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT)
16
+ },
17
+ batchTxRequesterTxBatchSize: {
18
+ env: 'P2P_BATCH_TX_REQUESTER_TX_BATCH_SIZE',
19
+ description: 'Max transactions per request / chunk size for batch tx requester.',
20
+ ...numberConfigHelper(DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE)
21
+ },
22
+ batchTxRequesterBadPeerThreshold: {
23
+ env: 'P2P_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD',
24
+ description: 'Failures before a peer is considered bad (see > threshold logic).',
25
+ ...numberConfigHelper(DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD)
26
+ }
27
+ };
@@ -0,0 +1,50 @@
1
+ import type { ISemaphore } from '@aztec/foundation/queue';
2
+ import type { PeerErrorSeverity } from '@aztec/stdlib/p2p';
3
+ import type { Tx, TxHash } from '@aztec/stdlib/tx';
4
+ import type { PeerId } from '@libp2p/interface';
5
+ import type { ConnectionSampler } from '../connection-sampler/connection_sampler.js';
6
+ import type { ReqRespInterface } from '../interface.js';
7
+ import type { MissingTxMetadata } from './missing_txs.js';
8
+ import type { IPeerCollection } from './peer_collection.js';
9
+ import type { BatchRequestTxValidatorConfig, IBatchRequestTxValidator } from './tx_validator.js';
10
+ export interface IPeerPenalizer {
11
+ penalizePeer(peerId: PeerId, penalty: PeerErrorSeverity): void;
12
+ }
13
+ export interface ITxMetadataCollection {
14
+ size: number;
15
+ values(): IterableIterator<MissingTxMetadata>;
16
+ getMissingTxHashes(): Set<string>;
17
+ getTxsToRequestFromThePeer(peer: PeerId): TxHash[];
18
+ markRequested(txHash: TxHash): void;
19
+ markInFlightBySmartPeer(txHash: TxHash): void;
20
+ markNotInFlightBySmartPeer(txHash: TxHash): void;
21
+ alreadyFetched(txHash: TxHash): boolean;
22
+ markFetched(peerId: PeerId, tx: Tx): boolean;
23
+ markPeerHas(peerId: PeerId, txHashes: TxHash[]): void;
24
+ getFetchedTxs(): Tx[];
25
+ }
26
+ /**
27
+ * Interface for BatchTxRequester dependencies that can be injected from upstream
28
+ */
29
+ export interface BatchTxRequesterLibP2PService {
30
+ /** ReqResp interface for sending requests to peers */
31
+ reqResp: Pick<ReqRespInterface, 'sendBatchRequest' | 'sendRequestToPeer'>;
32
+ /** Connection sampler for getting peer lists */
33
+ connectionSampler: Pick<ConnectionSampler, 'getPeerListSortedByConnectionCountAsc'>;
34
+ /** Configuration needed for transaction validation */
35
+ txValidatorConfig: BatchRequestTxValidatorConfig;
36
+ /** Peer scoring for penalizing peers */
37
+ peerScoring: IPeerPenalizer;
38
+ }
39
+ export interface BatchTxRequesterOptions {
40
+ smartParallelWorkerCount?: number;
41
+ dumbParallelWorkerCount?: number;
42
+ txBatchSize?: number;
43
+ badPeerThreshold?: number;
44
+ semaphore?: ISemaphore;
45
+ peerCollection?: IPeerCollection;
46
+ abortSignal?: AbortSignal;
47
+ /** Optional tx validator for testing - if not provided, one is created from p2pService.txValidatorConfig */
48
+ txValidator?: IBatchRequestTxValidator;
49
+ }
50
+ //# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiaW50ZXJmYWNlLmQudHMiLCJzb3VyY2VSb290IjoiIiwic291cmNlcyI6WyIuLi8uLi8uLi8uLi9zcmMvc2VydmljZXMvcmVxcmVzcC9iYXRjaC10eC1yZXF1ZXN0ZXIvaW50ZXJmYWNlLnRzIl0sIm5hbWVzIjpbXSwibWFwcGluZ3MiOiJBQUFBLE9BQU8sS0FBSyxFQUFFLFVBQVUsRUFBRSxNQUFNLHlCQUF5QixDQUFDO0FBQzFELE9BQU8sS0FBSyxFQUFFLGlCQUFpQixFQUFFLE1BQU0sbUJBQW1CLENBQUM7QUFDM0QsT0FBTyxLQUFLLEVBQUUsRUFBRSxFQUFFLE1BQU0sRUFBRSxNQUFNLGtCQUFrQixDQUFDO0FBRW5ELE9BQU8sS0FBSyxFQUFFLE1BQU0sRUFBRSxNQUFNLG1CQUFtQixDQUFDO0FBRWhELE9BQU8sS0FBSyxFQUFFLGlCQUFpQixFQUFFLE1BQU0sNkNBQTZDLENBQUM7QUFDckYsT0FBTyxLQUFLLEVBQUUsZ0JBQWdCLEVBQUUsTUFBTSxpQkFBaUIsQ0FBQztBQUN4RCxPQUFPLEtBQUssRUFBRSxpQkFBaUIsRUFBRSxNQUFNLGtCQUFrQixDQUFDO0FBQzFELE9BQU8sS0FBSyxFQUFFLGVBQWUsRUFBRSxNQUFNLHNCQUFzQixDQUFDO0FBQzVELE9BQU8sS0FBSyxFQUFFLDZCQUE2QixFQUFFLHdCQUF3QixFQUFFLE1BQU0sbUJBQW1CLENBQUM7QUFFakcsTUFBTSxXQUFXLGNBQWM7SUFDN0IsWUFBWSxDQUFDLE1BQU0sRUFBRSxNQUFNLEVBQUUsT0FBTyxFQUFFLGlCQUFpQixHQUFHLElBQUksQ0FBQztDQUNoRTtBQUVELE1BQU0sV0FBVyxxQkFBcUI7SUFDcEMsSUFBSSxFQUFFLE1BQU0sQ0FBQztJQUNiLE1BQU0sSUFBSSxnQkFBZ0IsQ0FBQyxpQkFBaUIsQ0FBQyxDQUFDO0lBQzlDLGtCQUFrQixJQUFJLEdBQUcsQ0FBQyxNQUFNLENBQUMsQ0FBQztJQUNsQywwQkFBMEIsQ0FBQyxJQUFJLEVBQUUsTUFBTSxHQUFHLE1BQU0sRUFBRSxDQUFDO0lBQ25ELGFBQWEsQ0FBQyxNQUFNLEVBQUUsTUFBTSxHQUFHLElBQUksQ0FBQztJQUNwQyx1QkFBdUIsQ0FBQyxNQUFNLEVBQUUsTUFBTSxHQUFHLElBQUksQ0FBQztJQUM5QywwQkFBMEIsQ0FBQyxNQUFNLEVBQUUsTUFBTSxHQUFHLElBQUksQ0FBQztJQUNqRCxjQUFjLENBQUMsTUFBTSxFQUFFLE1BQU0sR0FBRyxPQUFPLENBQUM7SUFFeEMsV0FBVyxDQUFDLE1BQU0sRUFBRSxNQUFNLEVBQUUsRUFBRSxFQUFFLEVBQUUsR0FBRyxPQUFPLENBQUM7SUFDN0MsV0FBVyxDQUFDLE1BQU0sRUFBRSxNQUFNLEVBQUUsUUFBUSxFQUFFLE1BQU0sRUFBRSxHQUFHLElBQUksQ0FBQztJQUN0RCxhQUFhLElBQUksRUFBRSxFQUFFLENBQUM7Q0FDdkI7QUFFRDs7R0FFRztBQUNILE1BQU0sV0FBVyw2QkFBNkI7SUFDNUMsc0RBQXNEO0lBQ3RELE9BQU8sRUFBRSxJQUFJLENBQUMsZ0JBQWdCLEVBQUUsa0JBQWtCLEdBQUcsbUJBQW1CLENBQUMsQ0FBQztJQUMxRSxnREFBZ0Q7SUFDaEQsaUJBQWlCLEVBQUUsSUFBSSxDQUFDLGlCQUFpQixFQUFFLHVDQUF1QyxDQUFDLENBQUM7SUFDcEYsc0RBQXNEO0lBQ3RELGlCQUFpQixFQUFFLDZCQUE2QixDQUFDO0lBQ2pELHdDQUF3QztJQUN4QyxXQUFXLEVBQUUsY0FBYyxDQUFDO0NBQzdCO0FBRUQsTUFBTSxXQUFXLHVCQUF1QjtJQUN0Qyx3QkFBd0IsQ0FBQyxFQUFFLE1BQU0sQ0FBQztJQUNsQyx1QkFBdUIsQ0FBQyxFQUFFLE1BQU0sQ0FBQztJQUNqQyxXQUFXLENBQUMsRUFBRSxNQUFNLENBQUM7SUFDckIsZ0JBQWdCLENBQUMsRUFBRSxNQUFNLENBQUM7SUFFMUIsU0FBUyxDQUFDLEVBQUUsVUFBVSxDQUFDO0lBQ3ZCLGNBQWMsQ0FBQyxFQUFFLGVBQWUsQ0FBQztJQUNqQyxXQUFXLENBQUMsRUFBRSxXQUFXLENBQUM7SUFDMUIsNEdBQTRHO0lBQzVHLFdBQVcsQ0FBQyxFQUFFLHdCQUF3QixDQUFDO0NBQ3hDIn0=