@lodestar/beacon-node 1.36.0-dev.d9cc6b90f7 → 1.36.0-dev.e6d0f574ee
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/api/impl/beacon/blocks/index.d.ts.map +1 -1
- package/lib/api/impl/beacon/blocks/index.js +41 -22
- package/lib/api/impl/beacon/blocks/index.js.map +1 -1
- package/lib/api/impl/lodestar/index.d.ts +5 -0
- package/lib/api/impl/lodestar/index.d.ts.map +1 -1
- package/lib/api/impl/lodestar/index.js +35 -10
- package/lib/api/impl/lodestar/index.js.map +1 -1
- package/lib/api/impl/node/utils.js +1 -1
- package/lib/api/impl/node/utils.js.map +1 -1
- package/lib/chain/archiveStore/archiveStore.d.ts +9 -0
- package/lib/chain/archiveStore/archiveStore.d.ts.map +1 -1
- package/lib/chain/archiveStore/archiveStore.js +24 -0
- package/lib/chain/archiveStore/archiveStore.js.map +1 -1
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.d.ts +7 -0
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.d.ts.map +1 -1
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.js +31 -5
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.js.map +1 -1
- package/lib/chain/beaconProposerCache.d.ts +3 -0
- package/lib/chain/beaconProposerCache.d.ts.map +1 -1
- package/lib/chain/beaconProposerCache.js +4 -6
- package/lib/chain/beaconProposerCache.js.map +1 -1
- package/lib/chain/chain.d.ts +5 -2
- package/lib/chain/chain.d.ts.map +1 -1
- package/lib/chain/chain.js +32 -16
- package/lib/chain/chain.js.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.d.ts +23 -13
- package/lib/chain/errors/dataColumnSidecarError.d.ts.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.js +5 -0
- package/lib/chain/errors/dataColumnSidecarError.js.map +1 -1
- package/lib/chain/forkChoice/index.d.ts +9 -1
- package/lib/chain/forkChoice/index.d.ts.map +1 -1
- package/lib/chain/forkChoice/index.js +109 -4
- package/lib/chain/forkChoice/index.js.map +1 -1
- package/lib/chain/interface.d.ts +2 -0
- package/lib/chain/interface.d.ts.map +1 -1
- package/lib/chain/options.d.ts +0 -2
- package/lib/chain/options.d.ts.map +1 -1
- package/lib/chain/options.js +2 -2
- package/lib/chain/options.js.map +1 -1
- package/lib/chain/stateCache/datastore/db.d.ts +12 -0
- package/lib/chain/stateCache/datastore/db.d.ts.map +1 -1
- package/lib/chain/stateCache/datastore/db.js +70 -0
- package/lib/chain/stateCache/datastore/db.js.map +1 -1
- package/lib/chain/stateCache/datastore/file.d.ts +1 -0
- package/lib/chain/stateCache/datastore/file.d.ts.map +1 -1
- package/lib/chain/stateCache/datastore/file.js +7 -0
- package/lib/chain/stateCache/datastore/file.js.map +1 -1
- package/lib/chain/stateCache/datastore/types.d.ts +1 -0
- package/lib/chain/stateCache/datastore/types.d.ts.map +1 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.d.ts +16 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.d.ts.map +1 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.js +31 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.js.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.d.ts.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.js +45 -17
- package/lib/chain/validation/dataColumnSidecar.js.map +1 -1
- package/lib/index.d.ts +2 -0
- package/lib/index.d.ts.map +1 -1
- package/lib/index.js +2 -0
- package/lib/index.js.map +1 -1
- package/lib/metrics/metrics/lodestar.d.ts +10 -0
- package/lib/metrics/metrics/lodestar.d.ts.map +1 -1
- package/lib/metrics/metrics/lodestar.js +15 -1
- package/lib/metrics/metrics/lodestar.js.map +1 -1
- package/lib/network/core/networkCore.d.ts.map +1 -1
- package/lib/network/core/networkCore.js +5 -1
- package/lib/network/core/networkCore.js.map +1 -1
- package/lib/network/core/networkCoreWorker.js +8 -8
- package/lib/network/core/networkCoreWorker.js.map +1 -1
- package/lib/network/core/networkCoreWorkerHandler.js +1 -1
- package/lib/network/core/networkCoreWorkerHandler.js.map +1 -1
- package/lib/network/discv5/worker.js +2 -7
- package/lib/network/discv5/worker.js.map +1 -1
- package/lib/network/events.d.ts +1 -0
- package/lib/network/events.d.ts.map +1 -1
- package/lib/network/gossip/encoding.js +1 -1
- package/lib/network/gossip/encoding.js.map +1 -1
- package/lib/network/gossip/snappy_bun.d.ts +3 -0
- package/lib/network/gossip/snappy_bun.d.ts.map +1 -0
- package/lib/network/gossip/snappy_bun.js +3 -0
- package/lib/network/gossip/snappy_bun.js.map +1 -0
- package/lib/network/metadata.d.ts +1 -1
- package/lib/network/metadata.d.ts.map +1 -1
- package/lib/network/metadata.js +1 -0
- package/lib/network/metadata.js.map +1 -1
- package/lib/network/options.d.ts +0 -1
- package/lib/network/options.d.ts.map +1 -1
- package/lib/network/options.js.map +1 -1
- package/lib/network/peers/discover.js +2 -2
- package/lib/network/peers/discover.js.map +1 -1
- package/lib/network/processor/gossipHandlers.d.ts.map +1 -1
- package/lib/network/processor/gossipHandlers.js +14 -8
- package/lib/network/processor/gossipHandlers.js.map +1 -1
- package/lib/network/reqresp/ReqRespBeaconNode.d.ts.map +1 -1
- package/lib/network/reqresp/ReqRespBeaconNode.js +3 -1
- package/lib/network/reqresp/ReqRespBeaconNode.js.map +1 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.d.ts +2 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.js +14 -3
- package/lib/network/reqresp/handlers/beaconBlocksByRange.js.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.d.ts +2 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.js +9 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.js.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.d.ts +2 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.js +9 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.js.map +1 -1
- package/lib/network/reqresp/handlers/index.js +6 -6
- package/lib/network/reqresp/handlers/index.js.map +1 -1
- package/lib/network/reqresp/types.d.ts +1 -0
- package/lib/network/reqresp/types.d.ts.map +1 -1
- package/lib/node/nodejs.d.ts +2 -1
- package/lib/node/nodejs.d.ts.map +1 -1
- package/lib/node/nodejs.js +2 -1
- package/lib/node/nodejs.js.map +1 -1
- package/lib/sync/range/range.d.ts.map +1 -1
- package/lib/sync/range/range.js +2 -1
- package/lib/sync/range/range.js.map +1 -1
- package/lib/sync/utils/downloadByRange.d.ts +58 -13
- package/lib/sync/utils/downloadByRange.d.ts.map +1 -1
- package/lib/sync/utils/downloadByRange.js +201 -82
- package/lib/sync/utils/downloadByRange.js.map +1 -1
- package/lib/sync/utils/remoteSyncType.d.ts +2 -1
- package/lib/sync/utils/remoteSyncType.d.ts.map +1 -1
- package/lib/sync/utils/remoteSyncType.js +19 -4
- package/lib/sync/utils/remoteSyncType.js.map +1 -1
- package/lib/util/blobs.d.ts +1 -1
- package/lib/util/blobs.d.ts.map +1 -1
- package/lib/util/blobs.js +53 -20
- package/lib/util/blobs.js.map +1 -1
- package/lib/util/profile.d.ts +6 -4
- package/lib/util/profile.d.ts.map +1 -1
- package/lib/util/profile.js +40 -3
- package/lib/util/profile.js.map +1 -1
- package/lib/util/sszBytes.d.ts +2 -0
- package/lib/util/sszBytes.d.ts.map +1 -1
- package/lib/util/sszBytes.js +25 -0
- package/lib/util/sszBytes.js.map +1 -1
- package/package.json +32 -25
- package/src/api/impl/beacon/blocks/index.ts +47 -25
- package/src/api/impl/lodestar/index.ts +42 -10
- package/src/api/impl/node/utils.ts +1 -1
- package/src/chain/archiveStore/archiveStore.ts +27 -0
- package/src/chain/archiveStore/strategies/frequencyStateArchiveStrategy.ts +32 -5
- package/src/chain/beaconProposerCache.ts +4 -8
- package/src/chain/chain.ts +48 -23
- package/src/chain/errors/dataColumnSidecarError.ts +27 -13
- package/src/chain/forkChoice/index.ts +178 -2
- package/src/chain/interface.ts +2 -0
- package/src/chain/options.ts +2 -3
- package/src/chain/stateCache/datastore/db.ts +89 -1
- package/src/chain/stateCache/datastore/file.ts +8 -0
- package/src/chain/stateCache/datastore/types.ts +1 -0
- package/src/chain/stateCache/persistentCheckpointsCache.ts +45 -2
- package/src/chain/validation/dataColumnSidecar.ts +54 -19
- package/src/index.ts +2 -0
- package/src/metrics/metrics/lodestar.ts +18 -1
- package/src/network/core/networkCore.ts +5 -1
- package/src/network/core/networkCoreWorker.ts +9 -9
- package/src/network/core/networkCoreWorkerHandler.ts +1 -1
- package/src/network/discv5/worker.ts +2 -7
- package/src/network/events.ts +1 -1
- package/src/network/gossip/encoding.ts +1 -1
- package/src/network/gossip/snappy_bun.ts +2 -0
- package/src/network/metadata.ts +3 -1
- package/src/network/options.ts +0 -1
- package/src/network/peers/discover.ts +2 -2
- package/src/network/processor/gossipHandlers.ts +16 -7
- package/src/network/reqresp/ReqRespBeaconNode.ts +3 -1
- package/src/network/reqresp/handlers/beaconBlocksByRange.ts +18 -3
- package/src/network/reqresp/handlers/dataColumnSidecarsByRange.ts +13 -1
- package/src/network/reqresp/handlers/dataColumnSidecarsByRoot.ts +13 -1
- package/src/network/reqresp/handlers/index.ts +6 -6
- package/src/network/reqresp/types.ts +1 -0
- package/src/node/nodejs.ts +3 -0
- package/src/sync/range/range.ts +2 -1
- package/src/sync/utils/downloadByRange.ts +259 -103
- package/src/sync/utils/remoteSyncType.ts +23 -4
- package/src/util/blobs.ts +64 -20
- package/src/util/profile.ts +45 -3
- package/src/util/sszBytes.ts +30 -0
|
@@ -1,7 +1,14 @@
|
|
|
1
1
|
import {ChainForkConfig} from "@lodestar/config";
|
|
2
|
-
import {
|
|
2
|
+
import {
|
|
3
|
+
ForkPostDeneb,
|
|
4
|
+
ForkPostFulu,
|
|
5
|
+
ForkPreFulu,
|
|
6
|
+
ForkPreGloas,
|
|
7
|
+
isForkPostFulu,
|
|
8
|
+
isForkPostGloas,
|
|
9
|
+
} from "@lodestar/params";
|
|
3
10
|
import {SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types";
|
|
4
|
-
import {LodestarError, Logger, fromHex,
|
|
11
|
+
import {LodestarError, Logger, fromHex, prettyPrintIndices, toRootHex} from "@lodestar/utils";
|
|
5
12
|
import {
|
|
6
13
|
BlockInputSource,
|
|
7
14
|
DAType,
|
|
@@ -15,7 +22,6 @@ import {validateBlockDataColumnSidecars} from "../../chain/validation/dataColumn
|
|
|
15
22
|
import {INetwork} from "../../network/index.js";
|
|
16
23
|
import {PeerIdStr} from "../../util/peerId.js";
|
|
17
24
|
import {WarnResult} from "../../util/wrapError.js";
|
|
18
|
-
import {DownloadByRootErrorCode} from "./downloadByRoot.js";
|
|
19
25
|
|
|
20
26
|
export type DownloadByRangeRequests = {
|
|
21
27
|
blocksRequest?: phase0.BeaconBlocksByRangeRequest;
|
|
@@ -111,7 +117,13 @@ export function cacheByRangeResponses({
|
|
|
111
117
|
}
|
|
112
118
|
|
|
113
119
|
for (const {blockRoot, blobSidecars} of responses.validatedBlobSidecars ?? []) {
|
|
114
|
-
const
|
|
120
|
+
const dataSlot = blobSidecars.at(0)?.signedBlockHeader.message.slot;
|
|
121
|
+
if (dataSlot === undefined) {
|
|
122
|
+
throw new Error(
|
|
123
|
+
`Coding Error: empty blobSidecars returned for blockRoot=${toRootHex(blockRoot)} from validation functions`
|
|
124
|
+
);
|
|
125
|
+
}
|
|
126
|
+
const existing = updatedBatchBlocks.get(dataSlot);
|
|
115
127
|
const blockRootHex = toRootHex(blockRoot);
|
|
116
128
|
|
|
117
129
|
if (!existing) {
|
|
@@ -122,7 +134,7 @@ export function cacheByRangeResponses({
|
|
|
122
134
|
throw new DownloadByRangeError({
|
|
123
135
|
code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE,
|
|
124
136
|
slot: existing.slot,
|
|
125
|
-
blockRoot:
|
|
137
|
+
blockRoot: existing.blockRootHex,
|
|
126
138
|
expected: DAType.Blobs,
|
|
127
139
|
actual: existing.type,
|
|
128
140
|
});
|
|
@@ -143,18 +155,24 @@ export function cacheByRangeResponses({
|
|
|
143
155
|
}
|
|
144
156
|
|
|
145
157
|
for (const {blockRoot, columnSidecars} of responses.validatedColumnSidecars ?? []) {
|
|
146
|
-
const
|
|
158
|
+
const dataSlot = columnSidecars.at(0)?.signedBlockHeader.message.slot;
|
|
159
|
+
if (dataSlot === undefined) {
|
|
160
|
+
throw new Error(
|
|
161
|
+
`Coding Error: empty columnSidecars returned for blockRoot=${toRootHex(blockRoot)} from validation functions`
|
|
162
|
+
);
|
|
163
|
+
}
|
|
164
|
+
const existing = updatedBatchBlocks.get(dataSlot);
|
|
147
165
|
const blockRootHex = toRootHex(blockRoot);
|
|
148
166
|
|
|
149
167
|
if (!existing) {
|
|
150
|
-
throw new Error("Coding error: blockInput must exist when adding
|
|
168
|
+
throw new Error("Coding error: blockInput must exist when adding columns");
|
|
151
169
|
}
|
|
152
170
|
|
|
153
171
|
if (!isBlockInputColumns(existing)) {
|
|
154
172
|
throw new DownloadByRangeError({
|
|
155
173
|
code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE,
|
|
156
174
|
slot: existing.slot,
|
|
157
|
-
blockRoot:
|
|
175
|
+
blockRoot: existing.blockRootHex,
|
|
158
176
|
expected: DAType.Columns,
|
|
159
177
|
actual: existing.type,
|
|
160
178
|
});
|
|
@@ -290,7 +308,7 @@ export async function validateResponses({
|
|
|
290
308
|
if ((blobsRequest || columnsRequest) && !(blocks || batchBlocks)) {
|
|
291
309
|
throw new DownloadByRangeError(
|
|
292
310
|
{
|
|
293
|
-
code: DownloadByRangeErrorCode.
|
|
311
|
+
code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE,
|
|
294
312
|
...requestsLogMeta({blobsRequest, columnsRequest}),
|
|
295
313
|
},
|
|
296
314
|
"No blocks to validate data requests against"
|
|
@@ -301,24 +319,28 @@ export async function validateResponses({
|
|
|
301
319
|
let warnings: DownloadByRangeError[] | null = null;
|
|
302
320
|
|
|
303
321
|
if (blocksRequest) {
|
|
304
|
-
|
|
322
|
+
const result = validateBlockByRangeResponse(config, blocksRequest, blocks ?? []);
|
|
323
|
+
if (result.warnings?.length) {
|
|
324
|
+
warnings = result.warnings;
|
|
325
|
+
}
|
|
326
|
+
validatedResponses.validatedBlocks = result.result;
|
|
305
327
|
}
|
|
306
328
|
|
|
307
329
|
const dataRequest = blobsRequest ?? columnsRequest;
|
|
308
330
|
if (!dataRequest) {
|
|
309
|
-
return {result: validatedResponses, warnings
|
|
331
|
+
return {result: validatedResponses, warnings};
|
|
310
332
|
}
|
|
311
333
|
|
|
312
|
-
const
|
|
334
|
+
const blocksForDataValidation = getBlocksForDataValidation(
|
|
313
335
|
dataRequest,
|
|
314
336
|
batchBlocks,
|
|
315
|
-
|
|
337
|
+
validatedResponses.validatedBlocks?.length ? validatedResponses.validatedBlocks : undefined
|
|
316
338
|
);
|
|
317
339
|
|
|
318
|
-
if (!
|
|
340
|
+
if (!blocksForDataValidation.length) {
|
|
319
341
|
throw new DownloadByRangeError(
|
|
320
342
|
{
|
|
321
|
-
code: DownloadByRangeErrorCode.
|
|
343
|
+
code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE,
|
|
322
344
|
...requestsLogMeta({blobsRequest, columnsRequest}),
|
|
323
345
|
},
|
|
324
346
|
"No blocks in data request slot range to validate data response against"
|
|
@@ -336,7 +358,10 @@ export async function validateResponses({
|
|
|
336
358
|
);
|
|
337
359
|
}
|
|
338
360
|
|
|
339
|
-
validatedResponses.validatedBlobSidecars = await validateBlobsByRangeResponse(
|
|
361
|
+
validatedResponses.validatedBlobSidecars = await validateBlobsByRangeResponse(
|
|
362
|
+
blocksForDataValidation,
|
|
363
|
+
blobSidecars
|
|
364
|
+
);
|
|
340
365
|
}
|
|
341
366
|
|
|
342
367
|
if (columnsRequest) {
|
|
@@ -351,8 +376,9 @@ export async function validateResponses({
|
|
|
351
376
|
}
|
|
352
377
|
|
|
353
378
|
const validatedColumnSidecarsResult = await validateColumnsByRangeResponse(
|
|
379
|
+
config,
|
|
354
380
|
columnsRequest,
|
|
355
|
-
|
|
381
|
+
blocksForDataValidation,
|
|
356
382
|
columnSidecars
|
|
357
383
|
);
|
|
358
384
|
validatedResponses.validatedColumnSidecars = validatedColumnSidecarsResult.result;
|
|
@@ -375,20 +401,30 @@ export function validateBlockByRangeResponse(
|
|
|
375
401
|
config: ChainForkConfig,
|
|
376
402
|
blocksRequest: phase0.BeaconBlocksByRangeRequest,
|
|
377
403
|
blocks: SignedBeaconBlock[]
|
|
378
|
-
): ValidatedBlock[] {
|
|
404
|
+
): WarnResult<ValidatedBlock[], DownloadByRangeError> {
|
|
379
405
|
const {startSlot, count} = blocksRequest;
|
|
380
406
|
|
|
381
|
-
//
|
|
382
|
-
//
|
|
383
|
-
//
|
|
384
|
-
//
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
407
|
+
// An error was thrown here by @twoeths in #8150 but it breaks for epochs with 0 blocks during chain
|
|
408
|
+
// liveness issues. See comment https://github.com/ChainSafe/lodestar/issues/8147#issuecomment-3246434697
|
|
409
|
+
// There are instances where clients return no blocks though. Need to monitor this via the warns to see
|
|
410
|
+
// if what the correct behavior should be
|
|
411
|
+
if (!blocks.length) {
|
|
412
|
+
throw new DownloadByRangeError({
|
|
413
|
+
code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE,
|
|
414
|
+
...requestsLogMeta({blocksRequest}),
|
|
415
|
+
});
|
|
416
|
+
// TODO: this was causing deadlock again. need to come back and fix this so that its possible to process through
|
|
417
|
+
// an empty epoch for periods with poor liveness
|
|
418
|
+
// return {
|
|
419
|
+
// result: [],
|
|
420
|
+
// warnings: [
|
|
421
|
+
// new DownloadByRangeError({
|
|
422
|
+
// code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE,
|
|
423
|
+
// ...requestsLogMeta({blocksRequest}),
|
|
424
|
+
// }),
|
|
425
|
+
// ],
|
|
426
|
+
// };
|
|
427
|
+
}
|
|
392
428
|
|
|
393
429
|
if (blocks.length > count) {
|
|
394
430
|
throw new DownloadByRangeError(
|
|
@@ -445,8 +481,8 @@ export function validateBlockByRangeResponse(
|
|
|
445
481
|
{
|
|
446
482
|
code: DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH,
|
|
447
483
|
slot: blocks[i].message.slot,
|
|
448
|
-
expected:
|
|
449
|
-
actual:
|
|
484
|
+
expected: toRootHex(blockRoot),
|
|
485
|
+
actual: toRootHex(parentRoot),
|
|
450
486
|
},
|
|
451
487
|
`Block parent root does not match the previous block's root in BeaconBlocksByRange response`
|
|
452
488
|
);
|
|
@@ -454,7 +490,10 @@ export function validateBlockByRangeResponse(
|
|
|
454
490
|
}
|
|
455
491
|
}
|
|
456
492
|
|
|
457
|
-
return
|
|
493
|
+
return {
|
|
494
|
+
result: response,
|
|
495
|
+
warnings: null,
|
|
496
|
+
};
|
|
458
497
|
}
|
|
459
498
|
|
|
460
499
|
/**
|
|
@@ -528,76 +567,184 @@ export async function validateBlobsByRangeResponse(
|
|
|
528
567
|
|
|
529
568
|
/**
|
|
530
569
|
* Should not be called directly. Only exported for unit testing purposes
|
|
570
|
+
*
|
|
571
|
+
* Spec states:
|
|
572
|
+
* 1) must be within range [start_slot, start_slot + count]
|
|
573
|
+
* 2) should respond with all columns in the range or and 3:ResourceUnavailable (and potentially get down-scored)
|
|
574
|
+
* 3) must response with at least the sidecars of the first blob-carrying block that exists in the range
|
|
575
|
+
* 4) must include all sidecars from each block from which there are blobs
|
|
576
|
+
* 5) where they exists, sidecars must be sent in (slot, index) order
|
|
577
|
+
* 6) clients may limit the number of sidecars in a response
|
|
578
|
+
* 7) clients may stop responding mid-response if their view of fork-choice changes
|
|
579
|
+
*
|
|
580
|
+
* We will interpret the spec as follows
|
|
581
|
+
* - Errors when validating: 1, 3, 5
|
|
582
|
+
* - Warnings when validating: 2, 4, 6, 7
|
|
583
|
+
*
|
|
584
|
+
* For "warning" cases, where we get a partial response but sidecars are validated and correct with respect to the
|
|
585
|
+
* blocks, then they will be kept. This loosening of the spec is to help ensure sync goes smoothly and we can find
|
|
586
|
+
* the data needed in difficult network situations.
|
|
587
|
+
*
|
|
588
|
+
* Assume for the following two examples we request indices 5, 10, 15 for a range of slots 32-63
|
|
589
|
+
*
|
|
590
|
+
* For slots where we receive no sidecars, example slot 45, but blobs exist we will stop validating subsequent
|
|
591
|
+
* slots, 45-63. The next round of requests will get structured to pull the from the slot that had columns
|
|
592
|
+
* missing to the end of the range for all columns indices that were requested for the current partially failed
|
|
593
|
+
* request (slots 45-63 and indices 5, 10, 15).
|
|
594
|
+
*
|
|
595
|
+
* For slots where only some of the requested sidecars are received we will proceed with validation. For simplicity sake
|
|
596
|
+
* we will assume that if we only get some indices back for a (or several) slot(s) that the indices we get will be
|
|
597
|
+
* consistent. IE if a peer returns only index 5, they will most likely return that same index for subsequent slot
|
|
598
|
+
* (index 5 for slots 34, 35, 36, etc). They will not likely return 5 on slot 34, 10 on slot 35, 15 on slot 36, etc.
|
|
599
|
+
* This assumption makes the code simpler. For both cases the request for the next round will be structured correctly
|
|
600
|
+
* to pull any missing column indices for whatever range remains. The simplification just leads to re-verification
|
|
601
|
+
* of the columns but the number of columns downloaded will be the same regardless of if they are validated twice.
|
|
602
|
+
*
|
|
603
|
+
* validateColumnsByRangeResponse makes some assumptions about the data being passed in
|
|
604
|
+
* blocks are:
|
|
605
|
+
* - slotwise in order
|
|
606
|
+
* - form a chain
|
|
607
|
+
* - non-sparse response (any missing block is a skipped slot not a bad response)
|
|
608
|
+
* - last block is last slot received
|
|
531
609
|
*/
|
|
532
610
|
export async function validateColumnsByRangeResponse(
|
|
611
|
+
config: ChainForkConfig,
|
|
533
612
|
request: fulu.DataColumnSidecarsByRangeRequest,
|
|
534
|
-
|
|
613
|
+
blocks: ValidatedBlock[],
|
|
535
614
|
columnSidecars: fulu.DataColumnSidecars
|
|
536
615
|
): Promise<WarnResult<ValidatedColumnSidecars[], DownloadByRangeError>> {
|
|
537
|
-
// Expected column count considering currently-validated batch blocks
|
|
538
|
-
// TODO GLOAS: Post-gloas's blobKzgCommitments is not in beacon block body. Need to source it from somewhere else.
|
|
539
|
-
const expectedColumnCount = dataRequestBlocks.reduce((acc, {block}) => {
|
|
540
|
-
return (block as SignedBeaconBlock<ForkPostDeneb & ForkPreGloas>).message.body.blobKzgCommitments.length > 0
|
|
541
|
-
? request.columns.length + acc
|
|
542
|
-
: acc;
|
|
543
|
-
}, 0);
|
|
544
|
-
const nextSlot = dataRequestBlocks.length
|
|
545
|
-
? (dataRequestBlocks.at(-1) as ValidatedBlock).block.message.slot + 1
|
|
546
|
-
: request.startSlot;
|
|
547
|
-
const possiblyMissingBlocks = nextSlot - request.startSlot + request.count;
|
|
548
|
-
|
|
549
|
-
// Allow for extra columns if some blocks are missing from the end of a batch
|
|
550
|
-
// Eg: If we requested 10 blocks but only 8 were returned, allow for up to 2 * columns.length extra columns
|
|
551
|
-
const maxColumnCount = expectedColumnCount + possiblyMissingBlocks * request.columns.length;
|
|
552
|
-
|
|
553
|
-
if (columnSidecars.length > maxColumnCount) {
|
|
554
|
-
// this never happens on devnet, so throw error for now
|
|
555
|
-
throw new DownloadByRangeError(
|
|
556
|
-
{
|
|
557
|
-
code: DownloadByRangeErrorCode.OVER_COLUMNS,
|
|
558
|
-
max: maxColumnCount,
|
|
559
|
-
actual: columnSidecars.length,
|
|
560
|
-
},
|
|
561
|
-
"Extra data columns received in DataColumnSidecarsByRange response"
|
|
562
|
-
);
|
|
563
|
-
}
|
|
564
|
-
|
|
565
616
|
const warnings: DownloadByRangeError[] = [];
|
|
566
|
-
// no need to check for columnSidecars.length vs expectedColumnCount here, will be checked per-block below
|
|
567
|
-
const requestedColumns = new Set(request.columns);
|
|
568
|
-
const validateSidecarsPromises: Promise<ValidatedColumnSidecars>[] = [];
|
|
569
|
-
for (let blockIndex = 0, columnSidecarIndex = 0; blockIndex < dataRequestBlocks.length; blockIndex++) {
|
|
570
|
-
const {block, blockRoot} = dataRequestBlocks[blockIndex];
|
|
571
|
-
const slot = block.message.slot;
|
|
572
|
-
const blockRootHex = toRootHex(blockRoot);
|
|
573
|
-
// TODO GLOAS: Post-gloas's blobKzgCommitments is not in beacon block body. Need to source it from somewhere else.
|
|
574
|
-
const blockKzgCommitments = (block as SignedBeaconBlock<ForkPostFulu & ForkPreGloas>).message.body
|
|
575
|
-
.blobKzgCommitments;
|
|
576
|
-
const expectedColumns = blockKzgCommitments.length ? request.columns.length : 0;
|
|
577
617
|
|
|
578
|
-
|
|
618
|
+
const seenColumns = new Map<Slot, Map<number, fulu.DataColumnSidecar>>();
|
|
619
|
+
let currentSlot = -1;
|
|
620
|
+
let currentIndex = -1;
|
|
621
|
+
// Check for duplicates and order
|
|
622
|
+
for (const columnSidecar of columnSidecars) {
|
|
623
|
+
const slot = columnSidecar.signedBlockHeader.message.slot;
|
|
624
|
+
let seenSlotColumns = seenColumns.get(slot);
|
|
625
|
+
if (!seenSlotColumns) {
|
|
626
|
+
seenSlotColumns = new Map();
|
|
627
|
+
seenColumns.set(slot, seenSlotColumns);
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
if (seenSlotColumns.has(columnSidecar.index)) {
|
|
631
|
+
warnings.push(
|
|
632
|
+
new DownloadByRangeError({
|
|
633
|
+
code: DownloadByRangeErrorCode.DUPLICATE_COLUMN,
|
|
634
|
+
slot,
|
|
635
|
+
index: columnSidecar.index,
|
|
636
|
+
})
|
|
637
|
+
);
|
|
638
|
+
|
|
579
639
|
continue;
|
|
580
640
|
}
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
641
|
+
|
|
642
|
+
if (currentSlot > slot) {
|
|
643
|
+
warnings.push(
|
|
644
|
+
new DownloadByRangeError(
|
|
645
|
+
{
|
|
646
|
+
code: DownloadByRangeErrorCode.OUT_OF_ORDER_COLUMNS,
|
|
647
|
+
slot,
|
|
648
|
+
},
|
|
649
|
+
"ColumnSidecars received out of slot order"
|
|
650
|
+
)
|
|
651
|
+
);
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
if (currentSlot === slot && currentIndex > columnSidecar.index) {
|
|
655
|
+
warnings.push(
|
|
656
|
+
new DownloadByRangeError(
|
|
657
|
+
{
|
|
658
|
+
code: DownloadByRangeErrorCode.OUT_OF_ORDER_COLUMNS,
|
|
659
|
+
slot,
|
|
660
|
+
},
|
|
661
|
+
"Column indices out of order within a slot"
|
|
662
|
+
)
|
|
663
|
+
);
|
|
664
|
+
}
|
|
665
|
+
|
|
666
|
+
seenSlotColumns.set(columnSidecar.index, columnSidecar);
|
|
667
|
+
if (currentSlot !== slot) {
|
|
668
|
+
// a new slot has started, reset index
|
|
669
|
+
currentIndex = -1;
|
|
670
|
+
} else {
|
|
671
|
+
currentIndex = columnSidecar.index;
|
|
672
|
+
}
|
|
673
|
+
currentSlot = slot;
|
|
674
|
+
}
|
|
675
|
+
|
|
676
|
+
const validationPromises: Promise<ValidatedColumnSidecars>[] = [];
|
|
677
|
+
|
|
678
|
+
for (const {blockRoot, block} of blocks) {
|
|
679
|
+
const slot = block.message.slot;
|
|
680
|
+
const rootHex = toRootHex(blockRoot);
|
|
681
|
+
const forkName = config.getForkName(slot);
|
|
682
|
+
const columnSidecarsMap: Map<number, fulu.DataColumnSidecar> = seenColumns.get(slot) ?? new Map();
|
|
683
|
+
const columnSidecars = Array.from(columnSidecarsMap.values()).sort((a, b) => a.index - b.index);
|
|
684
|
+
|
|
685
|
+
let blobCount: number;
|
|
686
|
+
if (!isForkPostFulu(forkName)) {
|
|
687
|
+
const dataSlot = columnSidecars.at(0)?.signedBlockHeader.message.slot;
|
|
688
|
+
throw new DownloadByRangeError({
|
|
689
|
+
code: DownloadByRangeErrorCode.MISMATCH_BLOCK_FORK,
|
|
690
|
+
slot,
|
|
691
|
+
blockFork: forkName,
|
|
692
|
+
dataFork: dataSlot ? config.getForkName(dataSlot) : "unknown",
|
|
693
|
+
});
|
|
694
|
+
}
|
|
695
|
+
if (isForkPostGloas(forkName)) {
|
|
696
|
+
// TODO GLOAS: Post-gloas's blobKzgCommitments is not in beacon block body. Need to source it from somewhere else.
|
|
697
|
+
// if block without columns is passed default to zero and throw below
|
|
698
|
+
blobCount = 0;
|
|
699
|
+
} else {
|
|
700
|
+
blobCount = (block as SignedBeaconBlock<ForkPostFulu & ForkPreGloas>).message.body.blobKzgCommitments.length;
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
if (columnSidecars.length === 0) {
|
|
704
|
+
if (!blobCount) {
|
|
705
|
+
// no columns in the slot
|
|
706
|
+
continue;
|
|
587
707
|
}
|
|
588
|
-
|
|
589
|
-
|
|
708
|
+
|
|
709
|
+
/**
|
|
710
|
+
* If no columns are found for a block and there are commitments on the block then stop checking and just
|
|
711
|
+
* return early. Even if there were columns returned for subsequent slots that doesn't matter because
|
|
712
|
+
* we will be re-requesting them again anyway. Leftovers just get ignored
|
|
713
|
+
*/
|
|
714
|
+
warnings.push(
|
|
715
|
+
new DownloadByRangeError({
|
|
716
|
+
code: DownloadByRangeErrorCode.MISSING_COLUMNS,
|
|
717
|
+
slot,
|
|
718
|
+
blockRoot: rootHex,
|
|
719
|
+
missingIndices: prettyPrintIndices(request.columns),
|
|
720
|
+
})
|
|
721
|
+
);
|
|
722
|
+
break;
|
|
723
|
+
}
|
|
724
|
+
|
|
725
|
+
const returnedColumns = Array.from(columnSidecarsMap.keys()).sort();
|
|
726
|
+
if (!blobCount) {
|
|
727
|
+
// columns for a block that does not have blobs
|
|
728
|
+
// TODO(fulu): should this be a hard error with no data retained from peer or just a warning
|
|
729
|
+
throw new DownloadByRangeError(
|
|
730
|
+
{
|
|
731
|
+
code: DownloadByRangeErrorCode.NO_COLUMNS_FOR_BLOCK,
|
|
732
|
+
slot,
|
|
733
|
+
blockRoot: rootHex,
|
|
734
|
+
invalidIndices: prettyPrintIndices(returnedColumns),
|
|
735
|
+
},
|
|
736
|
+
"Block has no blob commitments but data column sidecars were provided"
|
|
737
|
+
);
|
|
590
738
|
}
|
|
591
739
|
|
|
592
|
-
const
|
|
593
|
-
const missingIndices = request.columns.filter((i) => !returnedColumns.has(i));
|
|
740
|
+
const missingIndices = request.columns.filter((i) => !columnSidecarsMap.has(i));
|
|
594
741
|
if (missingIndices.length > 0) {
|
|
595
742
|
warnings.push(
|
|
596
743
|
new DownloadByRangeError(
|
|
597
744
|
{
|
|
598
745
|
code: DownloadByRangeErrorCode.MISSING_COLUMNS,
|
|
599
746
|
slot,
|
|
600
|
-
blockRoot:
|
|
747
|
+
blockRoot: rootHex,
|
|
601
748
|
missingIndices: prettyPrintIndices(missingIndices),
|
|
602
749
|
},
|
|
603
750
|
"Missing data columns in DataColumnSidecarsByRange response"
|
|
@@ -605,14 +752,14 @@ export async function validateColumnsByRangeResponse(
|
|
|
605
752
|
);
|
|
606
753
|
}
|
|
607
754
|
|
|
608
|
-
const extraIndices =
|
|
755
|
+
const extraIndices = returnedColumns.filter((i) => !request.columns.includes(i));
|
|
609
756
|
if (extraIndices.length > 0) {
|
|
610
757
|
warnings.push(
|
|
611
758
|
new DownloadByRangeError(
|
|
612
759
|
{
|
|
613
760
|
code: DownloadByRangeErrorCode.EXTRA_COLUMNS,
|
|
614
761
|
slot,
|
|
615
|
-
blockRoot:
|
|
762
|
+
blockRoot: rootHex,
|
|
616
763
|
invalidIndices: prettyPrintIndices(extraIndices),
|
|
617
764
|
},
|
|
618
765
|
"Data column in not in requested columns in DataColumnSidecarsByRange response"
|
|
@@ -620,17 +767,19 @@ export async function validateColumnsByRangeResponse(
|
|
|
620
767
|
);
|
|
621
768
|
}
|
|
622
769
|
|
|
623
|
-
|
|
624
|
-
validateBlockDataColumnSidecars(slot, blockRoot,
|
|
770
|
+
validationPromises.push(
|
|
771
|
+
validateBlockDataColumnSidecars(slot, blockRoot, blobCount, columnSidecars).then(() => ({
|
|
625
772
|
blockRoot,
|
|
626
|
-
columnSidecars
|
|
773
|
+
columnSidecars,
|
|
627
774
|
}))
|
|
628
775
|
);
|
|
629
776
|
}
|
|
630
777
|
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
778
|
+
const validatedColumns = await Promise.all(validationPromises);
|
|
779
|
+
return {
|
|
780
|
+
result: validatedColumns,
|
|
781
|
+
warnings: warnings.length ? warnings : null,
|
|
782
|
+
};
|
|
634
783
|
}
|
|
635
784
|
|
|
636
785
|
/**
|
|
@@ -697,7 +846,7 @@ function requestsLogMeta({blocksRequest, blobsRequest, columnsRequest}: Download
|
|
|
697
846
|
}
|
|
698
847
|
|
|
699
848
|
export enum DownloadByRangeErrorCode {
|
|
700
|
-
|
|
849
|
+
MISSING_BLOCKS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCK_RESPONSE",
|
|
701
850
|
MISSING_BLOBS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS_RESPONSE",
|
|
702
851
|
MISSING_COLUMNS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS_RESPONSE",
|
|
703
852
|
|
|
@@ -718,19 +867,19 @@ export enum DownloadByRangeErrorCode {
|
|
|
718
867
|
MISSING_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS",
|
|
719
868
|
OVER_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_OVER_COLUMNS",
|
|
720
869
|
EXTRA_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS",
|
|
870
|
+
NO_COLUMNS_FOR_BLOCK = "DOWNLOAD_BY_RANGE_ERROR_NO_COLUMNS_FOR_BLOCK",
|
|
871
|
+
DUPLICATE_COLUMN = "DOWNLOAD_BY_RANGE_ERROR_DUPLICATE_COLUMN",
|
|
872
|
+
OUT_OF_ORDER_COLUMNS = "DOWNLOAD_BY_RANGE_OUT_OF_ORDER_COLUMNS",
|
|
721
873
|
|
|
722
874
|
/** Cached block input type mismatches new data */
|
|
875
|
+
MISMATCH_BLOCK_FORK = "DOWNLOAD_BY_RANGE_ERROR_MISMATCH_BLOCK_FORK",
|
|
723
876
|
MISMATCH_BLOCK_INPUT_TYPE = "DOWNLOAD_BY_RANGE_ERROR_MISMATCH_BLOCK_INPUT_TYPE",
|
|
724
877
|
}
|
|
725
878
|
|
|
726
879
|
export type DownloadByRangeErrorType =
|
|
727
|
-
| {
|
|
728
|
-
code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE;
|
|
729
|
-
expectedCount: number;
|
|
730
|
-
}
|
|
731
880
|
| {
|
|
732
881
|
code:
|
|
733
|
-
| DownloadByRangeErrorCode.
|
|
882
|
+
| DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE
|
|
734
883
|
| DownloadByRangeErrorCode.MISSING_BLOBS_RESPONSE
|
|
735
884
|
| DownloadByRangeErrorCode.MISSING_COLUMNS_RESPONSE;
|
|
736
885
|
blockStartSlot?: number;
|
|
@@ -741,12 +890,14 @@ export type DownloadByRangeErrorType =
|
|
|
741
890
|
columnCount?: number;
|
|
742
891
|
}
|
|
743
892
|
| {
|
|
744
|
-
code:
|
|
745
|
-
|
|
893
|
+
code: DownloadByRangeErrorCode.OUT_OF_RANGE_BLOCKS;
|
|
894
|
+
slot: number;
|
|
746
895
|
}
|
|
747
896
|
| {
|
|
748
|
-
code: DownloadByRangeErrorCode.
|
|
897
|
+
code: DownloadByRangeErrorCode.MISMATCH_BLOCK_FORK;
|
|
749
898
|
slot: number;
|
|
899
|
+
dataFork: string;
|
|
900
|
+
blockFork: string;
|
|
750
901
|
}
|
|
751
902
|
| {
|
|
752
903
|
code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOCKS;
|
|
@@ -778,7 +929,7 @@ export type DownloadByRangeErrorType =
|
|
|
778
929
|
actual: number;
|
|
779
930
|
}
|
|
780
931
|
| {
|
|
781
|
-
code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOBS;
|
|
932
|
+
code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOBS | DownloadByRangeErrorCode.OUT_OF_ORDER_COLUMNS;
|
|
782
933
|
slot: number;
|
|
783
934
|
}
|
|
784
935
|
| {
|
|
@@ -798,7 +949,12 @@ export type DownloadByRangeErrorType =
|
|
|
798
949
|
missingIndices: string;
|
|
799
950
|
}
|
|
800
951
|
| {
|
|
801
|
-
code: DownloadByRangeErrorCode.
|
|
952
|
+
code: DownloadByRangeErrorCode.DUPLICATE_COLUMN;
|
|
953
|
+
slot: Slot;
|
|
954
|
+
index: number;
|
|
955
|
+
}
|
|
956
|
+
| {
|
|
957
|
+
code: DownloadByRangeErrorCode.EXTRA_COLUMNS | DownloadByRangeErrorCode.NO_COLUMNS_FOR_BLOCK;
|
|
802
958
|
slot: Slot;
|
|
803
959
|
blockRoot: string;
|
|
804
960
|
invalidIndices: string;
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import {IForkChoice} from "@lodestar/fork-choice";
|
|
2
2
|
import {computeEpochAtSlot, computeStartSlotAtEpoch} from "@lodestar/state-transition";
|
|
3
3
|
import {Slot, Status} from "@lodestar/types";
|
|
4
|
+
import {IBeaconChain} from "../../chain/interface.ts";
|
|
4
5
|
import {ChainTarget} from "../range/utils/index.js";
|
|
5
6
|
|
|
6
7
|
/** The type of peer relative to our current state */
|
|
@@ -103,8 +104,11 @@ export function getRangeSyncType(local: Status, remote: Status, forkChoice: IFor
|
|
|
103
104
|
export function getRangeSyncTarget(
|
|
104
105
|
local: Status,
|
|
105
106
|
remote: Status,
|
|
106
|
-
|
|
107
|
+
chain: IBeaconChain
|
|
107
108
|
): {syncType: RangeSyncType; startEpoch: Slot; target: ChainTarget} {
|
|
109
|
+
const forkChoice = chain.forkChoice;
|
|
110
|
+
|
|
111
|
+
// finalized sync
|
|
108
112
|
if (remote.finalizedEpoch > local.finalizedEpoch && !forkChoice.hasBlock(remote.finalizedRoot)) {
|
|
109
113
|
return {
|
|
110
114
|
// If RangeSyncType.Finalized, the range of blocks fetchable from startEpoch and target must allow to switch
|
|
@@ -131,11 +135,26 @@ export function getRangeSyncTarget(
|
|
|
131
135
|
},
|
|
132
136
|
};
|
|
133
137
|
}
|
|
138
|
+
|
|
139
|
+
// we don't want to sync from epoch < minEpoch
|
|
140
|
+
// if we boot from an unfinalized checkpoint state, we don't want to sync before anchorStateLatestBlockSlot
|
|
141
|
+
// if we boot from a finalized checkpoint state, anchorStateLatestBlockSlot is trusted and we also don't want to sync before it
|
|
142
|
+
const minEpoch = Math.max(remote.finalizedEpoch, computeEpochAtSlot(chain.anchorStateLatestBlockSlot));
|
|
143
|
+
|
|
144
|
+
// head sync
|
|
134
145
|
return {
|
|
135
146
|
syncType: RangeSyncType.Head,
|
|
136
|
-
// The new peer has the same finalized
|
|
137
|
-
// earlier finalized chain from reaching here
|
|
138
|
-
|
|
147
|
+
// The new peer has the same finalized `remote.finalizedEpoch == local.finalizedEpoch` since
|
|
148
|
+
// previous filters should prevent a peer with an earlier finalized chain from reaching here.
|
|
149
|
+
//
|
|
150
|
+
// By default and during stable network conditions, the head sync always starts from
|
|
151
|
+
// the finalized epoch (even though it's the head sync) because finalized epoch is < local head.
|
|
152
|
+
// This is to prevent the issue noted here https://github.com/ChainSafe/lodestar/pull/7509#discussion_r1984353063.
|
|
153
|
+
//
|
|
154
|
+
// During non-finality of the network, when starting from an unfinalized checkpoint state, we don't want
|
|
155
|
+
// to sync before anchorStateLatestBlockSlot as finalized epoch is too far away. Local head will also be
|
|
156
|
+
// the same to that value at startup, the head sync always starts from anchorStateLatestBlockSlot in this case.
|
|
157
|
+
startEpoch: Math.min(computeEpochAtSlot(local.headSlot), minEpoch),
|
|
139
158
|
target: {
|
|
140
159
|
slot: remote.headSlot,
|
|
141
160
|
root: remote.headRoot,
|