@lodestar/beacon-node 1.36.0-dev.d690a62b6c → 1.36.0-dev.d8afb6dc39
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/chain/errors/dataColumnSidecarError.d.ts +17 -14
- package/lib/chain/errors/dataColumnSidecarError.d.ts.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.js +4 -0
- package/lib/chain/errors/dataColumnSidecarError.js.map +1 -1
- package/lib/chain/options.d.ts.map +1 -1
- package/lib/chain/options.js +2 -1
- package/lib/chain/options.js.map +1 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.d.ts +16 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.d.ts.map +1 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.js +31 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.js.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.d.ts.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.js +32 -15
- package/lib/chain/validation/dataColumnSidecar.js.map +1 -1
- package/lib/network/core/networkCoreWorker.js +6 -1
- package/lib/network/core/networkCoreWorker.js.map +1 -1
- package/lib/network/core/networkCoreWorkerHandler.js +1 -1
- package/lib/network/core/networkCoreWorkerHandler.js.map +1 -1
- package/lib/network/events.d.ts +1 -0
- package/lib/network/events.d.ts.map +1 -1
- package/lib/network/peers/discover.js +2 -2
- package/lib/network/peers/discover.js.map +1 -1
- package/lib/network/processor/gossipHandlers.js +1 -1
- package/lib/network/processor/gossipHandlers.js.map +1 -1
- package/lib/network/reqresp/ReqRespBeaconNode.d.ts.map +1 -1
- package/lib/network/reqresp/ReqRespBeaconNode.js +3 -1
- package/lib/network/reqresp/ReqRespBeaconNode.js.map +1 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.d.ts +2 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.js +11 -2
- package/lib/network/reqresp/handlers/beaconBlocksByRange.js.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.d.ts +2 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.js +9 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.js.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.d.ts +2 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.js +9 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.js.map +1 -1
- package/lib/network/reqresp/handlers/index.js +6 -6
- package/lib/network/reqresp/handlers/index.js.map +1 -1
- package/lib/network/reqresp/types.d.ts +1 -0
- package/lib/network/reqresp/types.d.ts.map +1 -1
- package/lib/sync/utils/downloadByRange.d.ts +58 -13
- package/lib/sync/utils/downloadByRange.d.ts.map +1 -1
- package/lib/sync/utils/downloadByRange.js +201 -82
- package/lib/sync/utils/downloadByRange.js.map +1 -1
- package/package.json +14 -14
- package/src/chain/errors/dataColumnSidecarError.ts +20 -14
- package/src/chain/options.ts +2 -0
- package/src/chain/stateCache/persistentCheckpointsCache.ts +45 -2
- package/src/chain/validation/dataColumnSidecar.ts +34 -16
- package/src/network/core/networkCoreWorker.ts +7 -2
- package/src/network/core/networkCoreWorkerHandler.ts +1 -1
- package/src/network/events.ts +1 -1
- package/src/network/peers/discover.ts +2 -2
- package/src/network/processor/gossipHandlers.ts +1 -1
- package/src/network/reqresp/ReqRespBeaconNode.ts +3 -1
- package/src/network/reqresp/handlers/beaconBlocksByRange.ts +15 -2
- package/src/network/reqresp/handlers/dataColumnSidecarsByRange.ts +13 -1
- package/src/network/reqresp/handlers/dataColumnSidecarsByRoot.ts +13 -1
- package/src/network/reqresp/handlers/index.ts +6 -6
- package/src/network/reqresp/types.ts +1 -0
- package/src/sync/utils/downloadByRange.ts +259 -103
|
@@ -17,8 +17,10 @@ import {MapTracker} from "./mapMetrics.js";
|
|
|
17
17
|
import {BlockStateCache, CacheItemType, CheckpointHex, CheckpointStateCache} from "./types.js";
|
|
18
18
|
|
|
19
19
|
export type PersistentCheckpointStateCacheOpts = {
|
|
20
|
-
/** Keep max n
|
|
20
|
+
/** Keep max n state epochs in memory, persist the rest to disk */
|
|
21
21
|
maxCPStateEpochsInMemory?: number;
|
|
22
|
+
/** Keep max n state epochs on disk */
|
|
23
|
+
maxCPStateEpochsOnDisk?: number;
|
|
22
24
|
};
|
|
23
25
|
|
|
24
26
|
type PersistentCheckpointStateCacheModules = {
|
|
@@ -58,6 +60,14 @@ type LoadedStateBytesData = {persistedKey: DatastoreKey; stateBytes: Uint8Array}
|
|
|
58
60
|
*/
|
|
59
61
|
export const DEFAULT_MAX_CP_STATE_EPOCHS_IN_MEMORY = 3;
|
|
60
62
|
|
|
63
|
+
/**
|
|
64
|
+
* By default we don't prune any persistent checkpoint states as it's not safe to delete them during
|
|
65
|
+
* long non-finality as we don't know the state of the chain and there could be a deep (hundreds of epochs) reorg
|
|
66
|
+
* if there two competing chains with similar weight but we wouldn't have a close enough state to pivot to this chain
|
|
67
|
+
* and instead require a resync from last finalized checkpoint state which could be very far in the past.
|
|
68
|
+
*/
|
|
69
|
+
export const DEFAULT_MAX_CP_STATE_ON_DISK = Infinity;
|
|
70
|
+
|
|
61
71
|
// TODO GLOAS: re-evaluate this timing
|
|
62
72
|
const PROCESS_CHECKPOINT_STATES_BPS = 6667;
|
|
63
73
|
|
|
@@ -104,6 +114,7 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
104
114
|
private preComputedCheckpoint: string | null = null;
|
|
105
115
|
private preComputedCheckpointHits: number | null = null;
|
|
106
116
|
private readonly maxEpochsInMemory: number;
|
|
117
|
+
private readonly maxEpochsOnDisk: number;
|
|
107
118
|
private readonly datastore: CPStateDatastore;
|
|
108
119
|
private readonly blockStateCache: BlockStateCache;
|
|
109
120
|
private readonly bufferPool?: BufferPool | null;
|
|
@@ -139,10 +150,16 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
139
150
|
this.logger = logger;
|
|
140
151
|
this.clock = clock;
|
|
141
152
|
this.signal = signal;
|
|
153
|
+
|
|
142
154
|
if (opts.maxCPStateEpochsInMemory !== undefined && opts.maxCPStateEpochsInMemory < 0) {
|
|
143
155
|
throw new Error("maxEpochsInMemory must be >= 0");
|
|
144
156
|
}
|
|
157
|
+
if (opts.maxCPStateEpochsOnDisk !== undefined && opts.maxCPStateEpochsOnDisk < 0) {
|
|
158
|
+
throw new Error("maxCPStateEpochsOnDisk must be >= 0");
|
|
159
|
+
}
|
|
160
|
+
|
|
145
161
|
this.maxEpochsInMemory = opts.maxCPStateEpochsInMemory ?? DEFAULT_MAX_CP_STATE_EPOCHS_IN_MEMORY;
|
|
162
|
+
this.maxEpochsOnDisk = opts.maxCPStateEpochsOnDisk ?? DEFAULT_MAX_CP_STATE_ON_DISK;
|
|
146
163
|
// Specify different datastore for testing
|
|
147
164
|
this.datastore = datastore;
|
|
148
165
|
this.blockStateCache = blockStateCache;
|
|
@@ -324,6 +341,7 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
324
341
|
this.logger.verbose("Added checkpoint state to memory", {epoch: cp.epoch, rootHex: cpHex.rootHex});
|
|
325
342
|
}
|
|
326
343
|
this.epochIndex.getOrDefault(cp.epoch).add(cpHex.rootHex);
|
|
344
|
+
this.prunePersistedStates();
|
|
327
345
|
}
|
|
328
346
|
|
|
329
347
|
/**
|
|
@@ -766,13 +784,38 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
766
784
|
this.cache.delete(key);
|
|
767
785
|
}
|
|
768
786
|
this.epochIndex.delete(epoch);
|
|
769
|
-
this.logger.verbose("Pruned
|
|
787
|
+
this.logger.verbose("Pruned checkpoint states for epoch", {
|
|
770
788
|
epoch,
|
|
771
789
|
persistCount,
|
|
772
790
|
rootHexes: Array.from(rootHexes).join(","),
|
|
773
791
|
});
|
|
774
792
|
}
|
|
775
793
|
|
|
794
|
+
/**
|
|
795
|
+
* Prune persisted checkpoint states from disk.
|
|
796
|
+
* Note that this should handle all possible errors and not throw.
|
|
797
|
+
*/
|
|
798
|
+
private prunePersistedStates(): void {
|
|
799
|
+
// epochsOnDisk epochsInMemory
|
|
800
|
+
// |----------------------------------------------------------|----------------------|
|
|
801
|
+
const maxTrackedEpochs = this.maxEpochsOnDisk + this.maxEpochsInMemory;
|
|
802
|
+
if (this.epochIndex.size <= maxTrackedEpochs) {
|
|
803
|
+
return;
|
|
804
|
+
}
|
|
805
|
+
|
|
806
|
+
const sortedEpochs = Array.from(this.epochIndex.keys()).sort((a, b) => a - b);
|
|
807
|
+
const pruneEpochs = sortedEpochs.slice(0, sortedEpochs.length - maxTrackedEpochs);
|
|
808
|
+
for (const epoch of pruneEpochs) {
|
|
809
|
+
this.deleteAllEpochItems(epoch).catch((e) =>
|
|
810
|
+
this.logger.debug(
|
|
811
|
+
"Error delete all epoch items",
|
|
812
|
+
{epoch, maxEpochsOnDisk: this.maxEpochsOnDisk, maxEpochsInMemory: this.maxEpochsInMemory},
|
|
813
|
+
e as Error
|
|
814
|
+
)
|
|
815
|
+
);
|
|
816
|
+
}
|
|
817
|
+
}
|
|
818
|
+
|
|
776
819
|
/**
|
|
777
820
|
* Serialize validators to bytes leveraging the buffer pool to save memory allocation.
|
|
778
821
|
* - As monitored on holesky as of Jan 2024, it helps save ~500ms state reload time (4.3s vs 3.8s)
|
|
@@ -39,7 +39,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
39
39
|
if (computeSubnetForDataColumnSidecar(chain.config, dataColumnSidecar) !== gossipSubnet) {
|
|
40
40
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
41
41
|
code: DataColumnSidecarErrorCode.INVALID_SUBNET,
|
|
42
|
-
|
|
42
|
+
columnIndex: dataColumnSidecar.index,
|
|
43
43
|
gossipSubnet: gossipSubnet,
|
|
44
44
|
});
|
|
45
45
|
}
|
|
@@ -156,7 +156,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
156
156
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
157
157
|
code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID,
|
|
158
158
|
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
159
|
-
|
|
159
|
+
columnIndex: dataColumnSidecar.index,
|
|
160
160
|
});
|
|
161
161
|
}
|
|
162
162
|
|
|
@@ -173,7 +173,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
173
173
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
174
174
|
code: DataColumnSidecarErrorCode.INVALID_KZG_PROOF,
|
|
175
175
|
slot: blockHeader.slot,
|
|
176
|
-
|
|
176
|
+
columnIndex: dataColumnSidecar.index,
|
|
177
177
|
});
|
|
178
178
|
} finally {
|
|
179
179
|
kzgProofTimer?.();
|
|
@@ -193,7 +193,7 @@ function verifyDataColumnSidecar(config: ChainForkConfig, dataColumnSidecar: ful
|
|
|
193
193
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
194
194
|
code: DataColumnSidecarErrorCode.INVALID_INDEX,
|
|
195
195
|
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
196
|
-
|
|
196
|
+
columnIndex: dataColumnSidecar.index,
|
|
197
197
|
});
|
|
198
198
|
}
|
|
199
199
|
|
|
@@ -201,7 +201,7 @@ function verifyDataColumnSidecar(config: ChainForkConfig, dataColumnSidecar: ful
|
|
|
201
201
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
202
202
|
code: DataColumnSidecarErrorCode.NO_COMMITMENTS,
|
|
203
203
|
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
204
|
-
|
|
204
|
+
columnIndex: dataColumnSidecar.index,
|
|
205
205
|
});
|
|
206
206
|
}
|
|
207
207
|
|
|
@@ -212,7 +212,7 @@ function verifyDataColumnSidecar(config: ChainForkConfig, dataColumnSidecar: ful
|
|
|
212
212
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
213
213
|
code: DataColumnSidecarErrorCode.TOO_MANY_KZG_COMMITMENTS,
|
|
214
214
|
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
215
|
-
|
|
215
|
+
columnIndex: dataColumnSidecar.index,
|
|
216
216
|
count: dataColumnSidecar.kzgCommitments.length,
|
|
217
217
|
limit: maxBlobsPerBlock,
|
|
218
218
|
});
|
|
@@ -293,7 +293,6 @@ export async function validateBlockDataColumnSidecars(
|
|
|
293
293
|
"Block has no blob commitments but data column sidecars were provided"
|
|
294
294
|
);
|
|
295
295
|
}
|
|
296
|
-
|
|
297
296
|
// Hash the first sidecar block header and compare the rest via (cheaper) equality
|
|
298
297
|
const firstSidecarBlockHeader = dataColumnSidecars[0].signedBlockHeader.message;
|
|
299
298
|
const firstBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(firstSidecarBlockHeader);
|
|
@@ -302,7 +301,7 @@ export async function validateBlockDataColumnSidecars(
|
|
|
302
301
|
{
|
|
303
302
|
code: DataColumnSidecarErrorCode.INCORRECT_BLOCK,
|
|
304
303
|
slot: blockSlot,
|
|
305
|
-
|
|
304
|
+
columnIndex: 0,
|
|
306
305
|
expected: toRootHex(blockRoot),
|
|
307
306
|
actual: toRootHex(firstBlockRoot),
|
|
308
307
|
},
|
|
@@ -317,33 +316,52 @@ export async function validateBlockDataColumnSidecars(
|
|
|
317
316
|
for (let i = 0; i < dataColumnSidecars.length; i++) {
|
|
318
317
|
const columnSidecar = dataColumnSidecars[i];
|
|
319
318
|
|
|
319
|
+
if (!ssz.phase0.BeaconBlockHeader.equals(firstSidecarBlockHeader, columnSidecar.signedBlockHeader.message)) {
|
|
320
|
+
throw new DataColumnSidecarValidationError({
|
|
321
|
+
code: DataColumnSidecarErrorCode.INCORRECT_HEADER_ROOT,
|
|
322
|
+
slot: blockSlot,
|
|
323
|
+
expected: toRootHex(blockRoot),
|
|
324
|
+
actual: toRootHex(ssz.phase0.BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message)),
|
|
325
|
+
});
|
|
326
|
+
}
|
|
327
|
+
|
|
320
328
|
if (columnSidecar.index >= NUMBER_OF_COLUMNS) {
|
|
321
329
|
throw new DataColumnSidecarValidationError(
|
|
322
330
|
{
|
|
323
331
|
code: DataColumnSidecarErrorCode.INVALID_INDEX,
|
|
324
332
|
slot: blockSlot,
|
|
325
|
-
|
|
333
|
+
columnIndex: columnSidecar.index,
|
|
326
334
|
},
|
|
327
335
|
"DataColumnSidecar has invalid index"
|
|
328
336
|
);
|
|
329
337
|
}
|
|
330
338
|
|
|
331
|
-
if (columnSidecar.
|
|
339
|
+
if (columnSidecar.column.length !== blockBlobCount) {
|
|
332
340
|
throw new DataColumnSidecarValidationError({
|
|
333
|
-
code: DataColumnSidecarErrorCode.
|
|
341
|
+
code: DataColumnSidecarErrorCode.INCORRECT_CELL_COUNT,
|
|
334
342
|
slot: blockSlot,
|
|
335
|
-
|
|
343
|
+
columnIndex: columnSidecar.index,
|
|
336
344
|
expected: blockBlobCount,
|
|
345
|
+
actual: columnSidecar.column.length,
|
|
346
|
+
});
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
if (columnSidecar.column.length !== columnSidecar.kzgCommitments.length) {
|
|
350
|
+
throw new DataColumnSidecarValidationError({
|
|
351
|
+
code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT,
|
|
352
|
+
slot: blockSlot,
|
|
353
|
+
columnIndex: columnSidecar.index,
|
|
354
|
+
expected: columnSidecar.column.length,
|
|
337
355
|
actual: columnSidecar.kzgCommitments.length,
|
|
338
356
|
});
|
|
339
357
|
}
|
|
340
358
|
|
|
341
|
-
if (columnSidecar.
|
|
359
|
+
if (columnSidecar.column.length !== columnSidecar.kzgProofs.length) {
|
|
342
360
|
throw new DataColumnSidecarValidationError({
|
|
343
361
|
code: DataColumnSidecarErrorCode.INCORRECT_KZG_PROOF_COUNT,
|
|
344
362
|
slot: blockSlot,
|
|
345
|
-
|
|
346
|
-
expected: columnSidecar.
|
|
363
|
+
columnIndex: columnSidecar.index,
|
|
364
|
+
expected: columnSidecar.column.length,
|
|
347
365
|
actual: columnSidecar.kzgProofs.length,
|
|
348
366
|
});
|
|
349
367
|
}
|
|
@@ -353,7 +371,7 @@ export async function validateBlockDataColumnSidecars(
|
|
|
353
371
|
{
|
|
354
372
|
code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID,
|
|
355
373
|
slot: blockSlot,
|
|
356
|
-
|
|
374
|
+
columnIndex: columnSidecar.index,
|
|
357
375
|
},
|
|
358
376
|
"DataColumnSidecar has invalid inclusion proof"
|
|
359
377
|
);
|
|
@@ -98,8 +98,13 @@ const core = await NetworkCore.init({
|
|
|
98
98
|
metricsRegistry: metricsRegister,
|
|
99
99
|
events,
|
|
100
100
|
clock,
|
|
101
|
-
getReqRespHandler: (method) => (req, peerId) =>
|
|
102
|
-
reqRespBridgeRespCaller.getAsyncIterable({
|
|
101
|
+
getReqRespHandler: (method) => (req, peerId, peerClient) =>
|
|
102
|
+
reqRespBridgeRespCaller.getAsyncIterable({
|
|
103
|
+
method,
|
|
104
|
+
req,
|
|
105
|
+
peerId: peerIdToString(peerId),
|
|
106
|
+
peerClient,
|
|
107
|
+
}),
|
|
103
108
|
activeValidatorCount: workerData.activeValidatorCount,
|
|
104
109
|
initialStatus: workerData.initialStatus,
|
|
105
110
|
initialCustodyGroupCount: workerData.initialCustodyGroupCount,
|
|
@@ -73,7 +73,7 @@ export class WorkerNetworkCore implements INetworkCore {
|
|
|
73
73
|
// Handles ReqResp response from worker and calls async generator in main thread
|
|
74
74
|
this.reqRespBridgeRespHandler = new AsyncIterableBridgeHandler(
|
|
75
75
|
getReqRespBridgeRespEvents(this.reqRespBridgeEventBus),
|
|
76
|
-
(data) => modules.getReqRespHandler(data.method)(data.req, peerIdFromString(data.peerId))
|
|
76
|
+
(data) => modules.getReqRespHandler(data.method)(data.req, peerIdFromString(data.peerId), data.peerClient)
|
|
77
77
|
);
|
|
78
78
|
|
|
79
79
|
wireEventsOnMainThread<NetworkEventData>(
|
package/src/network/events.ts
CHANGED
|
@@ -29,7 +29,7 @@ export type NetworkEventData = {
|
|
|
29
29
|
clientAgent: string;
|
|
30
30
|
};
|
|
31
31
|
[NetworkEvent.peerDisconnected]: {peer: PeerIdStr};
|
|
32
|
-
[NetworkEvent.reqRespRequest]: {request: RequestTypedContainer; peer: PeerId};
|
|
32
|
+
[NetworkEvent.reqRespRequest]: {request: RequestTypedContainer; peer: PeerId; peerClient: string};
|
|
33
33
|
[NetworkEvent.pendingGossipsubMessage]: PendingGossipsubMessage;
|
|
34
34
|
[NetworkEvent.gossipMessageValidationResult]: {
|
|
35
35
|
msgId: string;
|
|
@@ -391,8 +391,8 @@ export class PeerDiscovery {
|
|
|
391
391
|
// tcp multiaddr is known to be be present, checked inside the worker
|
|
392
392
|
const multiaddrTCP = enr.getLocationMultiaddr(ENRKey.tcp);
|
|
393
393
|
if (!multiaddrTCP) {
|
|
394
|
-
this.logger.
|
|
395
|
-
this.metrics?.discovery.discoveredStatus.inc({status: DiscoveredPeerStatus.
|
|
394
|
+
this.logger.warn("Discv5 worker sent enr without tcp multiaddr", {enr: enr.encodeTxt()});
|
|
395
|
+
this.metrics?.discovery.discoveredStatus.inc({status: DiscoveredPeerStatus.no_multiaddrs});
|
|
396
396
|
return;
|
|
397
397
|
}
|
|
398
398
|
// Are this fields mandatory?
|
|
@@ -307,7 +307,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
307
307
|
});
|
|
308
308
|
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
|
309
309
|
code: DataColumnSidecarErrorCode.ALREADY_KNOWN,
|
|
310
|
-
|
|
310
|
+
columnIndex: dataColumnSidecar.index,
|
|
311
311
|
slot,
|
|
312
312
|
});
|
|
313
313
|
}
|
|
@@ -19,6 +19,7 @@ import {callInNextEventLoop} from "../../util/eventLoop.js";
|
|
|
19
19
|
import {NetworkCoreMetrics} from "../core/metrics.js";
|
|
20
20
|
import {INetworkEventBus, NetworkEvent} from "../events.js";
|
|
21
21
|
import {MetadataController} from "../metadata.js";
|
|
22
|
+
import {ClientKind} from "../peers/client.ts";
|
|
22
23
|
import {PeersData} from "../peers/peersData.js";
|
|
23
24
|
import {IPeerRpcScoreStore, PeerAction} from "../peers/score/index.js";
|
|
24
25
|
import {StatusCache} from "../statusCache.js";
|
|
@@ -300,10 +301,11 @@ export class ReqRespBeaconNode extends ReqResp {
|
|
|
300
301
|
}
|
|
301
302
|
|
|
302
303
|
protected onIncomingRequestBody(request: RequestTypedContainer, peer: PeerId): void {
|
|
304
|
+
const peerClient = this.peersData.getPeerKind(peer.toString()) ?? ClientKind.Unknown;
|
|
303
305
|
// Allow onRequest to return and close the stream
|
|
304
306
|
// For Goodbye there may be a race condition where the listener of `receivedGoodbye`
|
|
305
307
|
// disconnects in the same synchronous call, preventing the stream from ending cleanly
|
|
306
|
-
callInNextEventLoop(() => this.networkEventBus.emit(NetworkEvent.reqRespRequest, {request, peer}));
|
|
308
|
+
callInNextEventLoop(() => this.networkEventBus.emit(NetworkEvent.reqRespRequest, {request, peer, peerClient}));
|
|
307
309
|
}
|
|
308
310
|
|
|
309
311
|
protected onIncomingRequest(peerId: PeerId, protocol: ProtocolDescriptor): void {
|
|
@@ -1,18 +1,22 @@
|
|
|
1
|
+
import {PeerId} from "@libp2p/interface";
|
|
1
2
|
import {BeaconConfig} from "@lodestar/config";
|
|
2
|
-
import {GENESIS_SLOT, isForkPostDeneb} from "@lodestar/params";
|
|
3
|
+
import {GENESIS_SLOT, isForkPostDeneb, isForkPostFulu} from "@lodestar/params";
|
|
3
4
|
import {RespStatus, ResponseError, ResponseOutgoing} from "@lodestar/reqresp";
|
|
4
5
|
import {computeEpochAtSlot} from "@lodestar/state-transition";
|
|
5
6
|
import {deneb, phase0} from "@lodestar/types";
|
|
6
7
|
import {fromHex} from "@lodestar/utils";
|
|
7
8
|
import {IBeaconChain} from "../../../chain/index.js";
|
|
8
9
|
import {IBeaconDb} from "../../../db/index.js";
|
|
10
|
+
import {prettyPrintPeerId} from "../../util.ts";
|
|
9
11
|
|
|
10
12
|
// TODO: Unit test
|
|
11
13
|
|
|
12
14
|
export async function* onBeaconBlocksByRange(
|
|
13
15
|
request: phase0.BeaconBlocksByRangeRequest,
|
|
14
16
|
chain: IBeaconChain,
|
|
15
|
-
db: IBeaconDb
|
|
17
|
+
db: IBeaconDb,
|
|
18
|
+
peerId: PeerId,
|
|
19
|
+
peerClient: string
|
|
16
20
|
): AsyncIterable<ResponseOutgoing> {
|
|
17
21
|
const {startSlot, count} = validateBeaconBlocksByRangeRequest(chain.config, request);
|
|
18
22
|
const endSlot = startSlot + count;
|
|
@@ -23,6 +27,15 @@ export async function* onBeaconBlocksByRange(
|
|
|
23
27
|
// chain.forkChoice.getFinalizeBlock().slot
|
|
24
28
|
const finalizedSlot = chain.forkChoice.getFinalizedCheckpointSlot();
|
|
25
29
|
|
|
30
|
+
const forkName = chain.config.getForkName(startSlot);
|
|
31
|
+
if (isForkPostFulu(forkName) && startSlot < chain.earliestAvailableSlot) {
|
|
32
|
+
chain.logger.verbose("Peer did not respect earliestAvailableSlot for BeaconBlocksByRange", {
|
|
33
|
+
peer: prettyPrintPeerId(peerId),
|
|
34
|
+
client: peerClient,
|
|
35
|
+
});
|
|
36
|
+
return;
|
|
37
|
+
}
|
|
38
|
+
|
|
26
39
|
// Finalized range of blocks
|
|
27
40
|
if (startSlot <= finalizedSlot) {
|
|
28
41
|
// Chain of blobs won't change
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import {PeerId} from "@libp2p/interface";
|
|
1
2
|
import {ChainConfig} from "@lodestar/config";
|
|
2
3
|
import {GENESIS_SLOT} from "@lodestar/params";
|
|
3
4
|
import {RespStatus, ResponseError, ResponseOutgoing} from "@lodestar/reqresp";
|
|
@@ -6,6 +7,7 @@ import {ColumnIndex, fulu} from "@lodestar/types";
|
|
|
6
7
|
import {fromHex} from "@lodestar/utils";
|
|
7
8
|
import {IBeaconChain} from "../../../chain/index.js";
|
|
8
9
|
import {IBeaconDb} from "../../../db/index.js";
|
|
10
|
+
import {prettyPrintPeerId} from "../../util.ts";
|
|
9
11
|
import {
|
|
10
12
|
handleColumnSidecarUnavailability,
|
|
11
13
|
validateRequestedDataColumns,
|
|
@@ -14,7 +16,9 @@ import {
|
|
|
14
16
|
export async function* onDataColumnSidecarsByRange(
|
|
15
17
|
request: fulu.DataColumnSidecarsByRangeRequest,
|
|
16
18
|
chain: IBeaconChain,
|
|
17
|
-
db: IBeaconDb
|
|
19
|
+
db: IBeaconDb,
|
|
20
|
+
peerId: PeerId,
|
|
21
|
+
peerClient: string
|
|
18
22
|
): AsyncIterable<ResponseOutgoing> {
|
|
19
23
|
// Non-finalized range of columns
|
|
20
24
|
const {startSlot, count, columns: requestedColumns} = validateDataColumnSidecarsByRangeRequest(chain.config, request);
|
|
@@ -25,6 +29,14 @@ export async function* onDataColumnSidecarsByRange(
|
|
|
25
29
|
return;
|
|
26
30
|
}
|
|
27
31
|
|
|
32
|
+
if (startSlot < chain.earliestAvailableSlot) {
|
|
33
|
+
chain.logger.verbose("Peer did not respect earliestAvailableSlot for DataColumnSidecarsByRange", {
|
|
34
|
+
peer: prettyPrintPeerId(peerId),
|
|
35
|
+
client: peerClient,
|
|
36
|
+
});
|
|
37
|
+
return;
|
|
38
|
+
}
|
|
39
|
+
|
|
28
40
|
const finalized = db.dataColumnSidecarArchive;
|
|
29
41
|
const unfinalized = db.dataColumnSidecar;
|
|
30
42
|
const finalizedSlot = chain.forkChoice.getFinalizedBlock().slot;
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import {PeerId} from "@libp2p/interface";
|
|
1
2
|
import {ResponseOutgoing} from "@lodestar/reqresp";
|
|
2
3
|
import {computeEpochAtSlot} from "@lodestar/state-transition";
|
|
3
4
|
import {ColumnIndex} from "@lodestar/types";
|
|
@@ -5,6 +6,7 @@ import {toRootHex} from "@lodestar/utils";
|
|
|
5
6
|
import {IBeaconChain} from "../../../chain/index.js";
|
|
6
7
|
import {IBeaconDb} from "../../../db/index.js";
|
|
7
8
|
import {DataColumnSidecarsByRootRequest} from "../../../util/types.js";
|
|
9
|
+
import {prettyPrintPeerId} from "../../util.ts";
|
|
8
10
|
import {
|
|
9
11
|
handleColumnSidecarUnavailability,
|
|
10
12
|
validateRequestedDataColumns,
|
|
@@ -13,7 +15,9 @@ import {
|
|
|
13
15
|
export async function* onDataColumnSidecarsByRoot(
|
|
14
16
|
requestBody: DataColumnSidecarsByRootRequest,
|
|
15
17
|
chain: IBeaconChain,
|
|
16
|
-
db: IBeaconDb
|
|
18
|
+
db: IBeaconDb,
|
|
19
|
+
peerId: PeerId,
|
|
20
|
+
peerClient: string
|
|
17
21
|
): AsyncIterable<ResponseOutgoing> {
|
|
18
22
|
// SPEC: minimum_request_epoch = max(current_epoch - MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS, FULU_FORK_EPOCH)
|
|
19
23
|
const currentEpoch = chain.clock.currentEpoch;
|
|
@@ -39,6 +43,14 @@ export async function* onDataColumnSidecarsByRoot(
|
|
|
39
43
|
continue;
|
|
40
44
|
}
|
|
41
45
|
|
|
46
|
+
if (slot < chain.earliestAvailableSlot) {
|
|
47
|
+
chain.logger.verbose("Peer did not respect earliestAvailableSlot for DataColumnSidecarsByRoot", {
|
|
48
|
+
peer: prettyPrintPeerId(peerId),
|
|
49
|
+
client: peerClient,
|
|
50
|
+
});
|
|
51
|
+
continue;
|
|
52
|
+
}
|
|
53
|
+
|
|
42
54
|
const requestedEpoch = computeEpochAtSlot(slot);
|
|
43
55
|
|
|
44
56
|
// SPEC: Clients MUST support requesting sidecars since minimum_request_epoch.
|
|
@@ -35,9 +35,9 @@ export function getReqRespHandlers({db, chain}: {db: IBeaconDb; chain: IBeaconCh
|
|
|
35
35
|
[ReqRespMethod.Goodbye]: notImplemented(ReqRespMethod.Goodbye),
|
|
36
36
|
[ReqRespMethod.Ping]: notImplemented(ReqRespMethod.Ping),
|
|
37
37
|
[ReqRespMethod.Metadata]: notImplemented(ReqRespMethod.Metadata),
|
|
38
|
-
[ReqRespMethod.BeaconBlocksByRange]: (req) => {
|
|
38
|
+
[ReqRespMethod.BeaconBlocksByRange]: (req, peerId, peerClient) => {
|
|
39
39
|
const body = ssz.phase0.BeaconBlocksByRangeRequest.deserialize(req.data);
|
|
40
|
-
return onBeaconBlocksByRange(body, chain, db);
|
|
40
|
+
return onBeaconBlocksByRange(body, chain, db, peerId, peerClient);
|
|
41
41
|
},
|
|
42
42
|
[ReqRespMethod.BeaconBlocksByRoot]: (req) => {
|
|
43
43
|
const fork = chain.config.getForkName(chain.clock.currentSlot);
|
|
@@ -53,13 +53,13 @@ export function getReqRespHandlers({db, chain}: {db: IBeaconDb; chain: IBeaconCh
|
|
|
53
53
|
const body = ssz.deneb.BlobSidecarsByRangeRequest.deserialize(req.data);
|
|
54
54
|
return onBlobSidecarsByRange(body, chain, db);
|
|
55
55
|
},
|
|
56
|
-
[ReqRespMethod.DataColumnSidecarsByRange]: (req) => {
|
|
56
|
+
[ReqRespMethod.DataColumnSidecarsByRange]: (req, peerId, peerClient) => {
|
|
57
57
|
const body = ssz.fulu.DataColumnSidecarsByRangeRequest.deserialize(req.data);
|
|
58
|
-
return onDataColumnSidecarsByRange(body, chain, db);
|
|
58
|
+
return onDataColumnSidecarsByRange(body, chain, db, peerId, peerClient);
|
|
59
59
|
},
|
|
60
|
-
[ReqRespMethod.DataColumnSidecarsByRoot]: (req) => {
|
|
60
|
+
[ReqRespMethod.DataColumnSidecarsByRoot]: (req, peerId, peerClient) => {
|
|
61
61
|
const body = DataColumnSidecarsByRootRequestType(chain.config).deserialize(req.data);
|
|
62
|
-
return onDataColumnSidecarsByRoot(body, chain, db);
|
|
62
|
+
return onDataColumnSidecarsByRoot(body, chain, db, peerId, peerClient);
|
|
63
63
|
},
|
|
64
64
|
|
|
65
65
|
[ReqRespMethod.LightClientBootstrap]: (req) => {
|