@lodestar/beacon-node 1.36.0-dev.d9cc6b90f7 → 1.36.0-dev.f259361847
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/chain/archiveStore/archiveStore.d.ts +9 -0
- package/lib/chain/archiveStore/archiveStore.d.ts.map +1 -1
- package/lib/chain/archiveStore/archiveStore.js +24 -0
- package/lib/chain/archiveStore/archiveStore.js.map +1 -1
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.d.ts +7 -0
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.d.ts.map +1 -1
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.js +31 -5
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.js.map +1 -1
- package/lib/chain/beaconProposerCache.d.ts +3 -0
- package/lib/chain/beaconProposerCache.d.ts.map +1 -1
- package/lib/chain/beaconProposerCache.js +4 -6
- package/lib/chain/beaconProposerCache.js.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.d.ts +7 -0
- package/lib/chain/errors/dataColumnSidecarError.d.ts.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.js +1 -0
- package/lib/chain/errors/dataColumnSidecarError.js.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.d.ts.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.js +14 -3
- package/lib/chain/validation/dataColumnSidecar.js.map +1 -1
- package/lib/metrics/metrics/lodestar.d.ts +10 -0
- package/lib/metrics/metrics/lodestar.d.ts.map +1 -1
- package/lib/metrics/metrics/lodestar.js +14 -0
- package/lib/metrics/metrics/lodestar.js.map +1 -1
- package/lib/network/processor/gossipHandlers.d.ts.map +1 -1
- package/lib/network/processor/gossipHandlers.js +14 -8
- package/lib/network/processor/gossipHandlers.js.map +1 -1
- package/lib/sync/utils/remoteSyncType.js +2 -2
- package/lib/sync/utils/remoteSyncType.js.map +1 -1
- package/package.json +14 -14
- package/src/chain/archiveStore/archiveStore.ts +27 -0
- package/src/chain/archiveStore/strategies/frequencyStateArchiveStrategy.ts +32 -5
- package/src/chain/beaconProposerCache.ts +4 -8
- package/src/chain/errors/dataColumnSidecarError.ts +8 -0
- package/src/chain/validation/dataColumnSidecar.ts +21 -4
- package/src/metrics/metrics/lodestar.ts +17 -0
- package/src/network/processor/gossipHandlers.ts +16 -7
- package/src/sync/utils/remoteSyncType.ts +2 -2
|
@@ -17,6 +17,15 @@ import {StateArchiveStrategy, StatesArchiveOpts} from "../interface.js";
|
|
|
17
17
|
*/
|
|
18
18
|
export const PERSIST_TEMP_STATE_EVERY_EPOCHS = 32;
|
|
19
19
|
|
|
20
|
+
export enum FrequencyStateArchiveStep {
|
|
21
|
+
LoadLastStoredSlot = "load_last_stored_slot",
|
|
22
|
+
GetFinalizedState = "get_finalized_state",
|
|
23
|
+
// SerializeState is tracked via stateSerializeDuration metric
|
|
24
|
+
PersistState = "persist_state",
|
|
25
|
+
LoadStoredSlotsToDelete = "load_stored_slots_to_delete",
|
|
26
|
+
DeleteOldStates = "delete_old_states",
|
|
27
|
+
}
|
|
28
|
+
|
|
20
29
|
/**
|
|
21
30
|
* Archives finalized states from active bucket to archive bucket.
|
|
22
31
|
*
|
|
@@ -47,11 +56,16 @@ export class FrequencyStateArchiveStrategy implements StateArchiveStrategy {
|
|
|
47
56
|
* ```
|
|
48
57
|
*/
|
|
49
58
|
async maybeArchiveState(finalized: CheckpointWithHex, metrics?: Metrics | null): Promise<void> {
|
|
59
|
+
let timer = metrics?.processFinalizedCheckpoint.frequencyStateArchive.startTimer();
|
|
50
60
|
const lastStoredSlot = await this.db.stateArchive.lastKey();
|
|
61
|
+
timer?.({step: FrequencyStateArchiveStep.LoadLastStoredSlot});
|
|
62
|
+
|
|
51
63
|
const lastStoredEpoch = computeEpochAtSlot(lastStoredSlot ?? 0);
|
|
52
64
|
const {archiveStateEpochFrequency} = this.opts;
|
|
53
65
|
|
|
66
|
+
const logCtx = {finalizedEpoch: finalized.epoch, lastStoredEpoch, archiveStateEpochFrequency};
|
|
54
67
|
if (finalized.epoch - lastStoredEpoch >= Math.min(PERSIST_TEMP_STATE_EVERY_EPOCHS, archiveStateEpochFrequency)) {
|
|
68
|
+
this.logger.verbose("Start archiving state", logCtx);
|
|
55
69
|
await this.archiveState(finalized, metrics);
|
|
56
70
|
|
|
57
71
|
// Only check the current and previous intervals
|
|
@@ -60,23 +74,29 @@ export class FrequencyStateArchiveStrategy implements StateArchiveStrategy {
|
|
|
60
74
|
(Math.floor(finalized.epoch / archiveStateEpochFrequency) - 1) * archiveStateEpochFrequency
|
|
61
75
|
);
|
|
62
76
|
|
|
77
|
+
timer = metrics?.processFinalizedCheckpoint.frequencyStateArchive.startTimer();
|
|
63
78
|
const storedStateSlots = await this.db.stateArchive.keys({
|
|
64
79
|
lt: computeStartSlotAtEpoch(finalized.epoch),
|
|
65
80
|
gte: computeStartSlotAtEpoch(minEpoch),
|
|
66
81
|
});
|
|
82
|
+
timer?.({step: FrequencyStateArchiveStep.LoadStoredSlotsToDelete});
|
|
67
83
|
|
|
68
84
|
const statesSlotsToDelete = computeStateSlotsToDelete(storedStateSlots, archiveStateEpochFrequency);
|
|
85
|
+
timer = metrics?.processFinalizedCheckpoint.frequencyStateArchive.startTimer();
|
|
69
86
|
if (statesSlotsToDelete.length > 0) {
|
|
70
87
|
await this.db.stateArchive.batchDelete(statesSlotsToDelete);
|
|
71
88
|
}
|
|
89
|
+
timer?.({step: FrequencyStateArchiveStep.DeleteOldStates});
|
|
72
90
|
|
|
73
91
|
// More logs to investigate the rss spike issue https://github.com/ChainSafe/lodestar/issues/5591
|
|
74
92
|
this.logger.verbose("Archived state completed", {
|
|
75
|
-
|
|
93
|
+
...logCtx,
|
|
76
94
|
minEpoch,
|
|
77
95
|
storedStateSlots: storedStateSlots.join(","),
|
|
78
96
|
statesSlotsToDelete: statesSlotsToDelete.join(","),
|
|
79
97
|
});
|
|
98
|
+
} else {
|
|
99
|
+
this.logger.verbose("Skip archiving state", logCtx);
|
|
80
100
|
}
|
|
81
101
|
}
|
|
82
102
|
|
|
@@ -86,24 +106,31 @@ export class FrequencyStateArchiveStrategy implements StateArchiveStrategy {
|
|
|
86
106
|
*/
|
|
87
107
|
async archiveState(finalized: CheckpointWithHex, metrics?: Metrics | null): Promise<void> {
|
|
88
108
|
// starting from Mar 2024, the finalized state could be from disk or in memory
|
|
109
|
+
let timer = metrics?.processFinalizedCheckpoint.frequencyStateArchive.startTimer();
|
|
89
110
|
const finalizedStateOrBytes = await this.regen.getCheckpointStateOrBytes(finalized);
|
|
111
|
+
timer?.({step: FrequencyStateArchiveStep.GetFinalizedState});
|
|
112
|
+
|
|
90
113
|
const {rootHex} = finalized;
|
|
91
114
|
if (!finalizedStateOrBytes) {
|
|
92
115
|
throw Error(`No state in cache for finalized checkpoint state epoch #${finalized.epoch} root ${rootHex}`);
|
|
93
116
|
}
|
|
94
117
|
if (finalizedStateOrBytes instanceof Uint8Array) {
|
|
95
118
|
const slot = getStateSlotFromBytes(finalizedStateOrBytes);
|
|
119
|
+
timer = metrics?.processFinalizedCheckpoint.frequencyStateArchive.startTimer();
|
|
96
120
|
await this.db.stateArchive.putBinary(slot, finalizedStateOrBytes);
|
|
121
|
+
timer?.({step: FrequencyStateArchiveStep.PersistState});
|
|
97
122
|
this.logger.verbose("Archived finalized state bytes", {epoch: finalized.epoch, slot, root: rootHex});
|
|
98
123
|
} else {
|
|
99
124
|
// serialize state using BufferPool if provided
|
|
100
|
-
const
|
|
125
|
+
const sszTimer = metrics?.stateSerializeDuration.startTimer({source: AllocSource.ARCHIVE_STATE});
|
|
101
126
|
await serializeState(
|
|
102
127
|
finalizedStateOrBytes,
|
|
103
128
|
AllocSource.ARCHIVE_STATE,
|
|
104
|
-
(stateBytes) => {
|
|
105
|
-
|
|
106
|
-
|
|
129
|
+
async (stateBytes) => {
|
|
130
|
+
sszTimer?.();
|
|
131
|
+
timer = metrics?.processFinalizedCheckpoint.frequencyStateArchive.startTimer();
|
|
132
|
+
await this.db.stateArchive.putBinary(finalizedStateOrBytes.slot, stateBytes);
|
|
133
|
+
timer?.({step: FrequencyStateArchiveStep.PersistState});
|
|
107
134
|
},
|
|
108
135
|
this.bufferPool
|
|
109
136
|
);
|
|
@@ -1,18 +1,14 @@
|
|
|
1
1
|
import {routes} from "@lodestar/api";
|
|
2
2
|
import {Epoch} from "@lodestar/types";
|
|
3
|
-
import {MapDef} from "@lodestar/utils";
|
|
4
3
|
|
|
5
4
|
const PROPOSER_PRESERVE_EPOCHS = 2;
|
|
6
5
|
|
|
7
6
|
export type ProposerPreparationData = routes.validator.ProposerPreparationData;
|
|
8
7
|
|
|
9
8
|
export class BeaconProposerCache {
|
|
10
|
-
private readonly feeRecipientByValidatorIndex:
|
|
11
|
-
constructor(opts: {suggestedFeeRecipient: string}) {
|
|
12
|
-
this.feeRecipientByValidatorIndex = new
|
|
13
|
-
epoch: 0,
|
|
14
|
-
feeRecipient: opts.suggestedFeeRecipient,
|
|
15
|
-
}));
|
|
9
|
+
private readonly feeRecipientByValidatorIndex: Map<number, {epoch: Epoch; feeRecipient: string}>;
|
|
10
|
+
constructor(readonly opts: {suggestedFeeRecipient: string}) {
|
|
11
|
+
this.feeRecipientByValidatorIndex = new Map();
|
|
16
12
|
}
|
|
17
13
|
|
|
18
14
|
add(epoch: Epoch, {validatorIndex, feeRecipient}: ProposerPreparationData): void {
|
|
@@ -30,7 +26,7 @@ export class BeaconProposerCache {
|
|
|
30
26
|
}
|
|
31
27
|
|
|
32
28
|
getOrDefault(proposerIndex: number): string {
|
|
33
|
-
return this.feeRecipientByValidatorIndex.
|
|
29
|
+
return this.feeRecipientByValidatorIndex.get(proposerIndex)?.feeRecipient ?? this.opts.suggestedFeeRecipient;
|
|
34
30
|
}
|
|
35
31
|
|
|
36
32
|
get(proposerIndex: number): string | undefined {
|
|
@@ -8,6 +8,7 @@ export enum DataColumnSidecarErrorCode {
|
|
|
8
8
|
MISMATCHED_LENGTHS = "DATA_COLUMN_SIDECAR_ERROR_MISMATCHED_LENGTHS",
|
|
9
9
|
INVALID_SUBNET = "DATA_COLUMN_SIDECAR_ERROR_INVALID_SUBNET",
|
|
10
10
|
INVALID_KZG_PROOF = "DATA_COLUMN_SIDECAR_ERROR_INVALID_KZG_PROOF",
|
|
11
|
+
TOO_MANY_KZG_COMMITMENTS = "DATA_COLUMN_SIDECAR_ERROR_TOO_MANY_KZG_COMMITMENTS",
|
|
11
12
|
|
|
12
13
|
// Validation errors when validating against an existing block
|
|
13
14
|
|
|
@@ -43,6 +44,13 @@ export type DataColumnSidecarErrorType =
|
|
|
43
44
|
proofsLength: number;
|
|
44
45
|
}
|
|
45
46
|
| {code: DataColumnSidecarErrorCode.INVALID_SUBNET; columnIdx: number; gossipSubnet: SubnetID}
|
|
47
|
+
| {
|
|
48
|
+
code: DataColumnSidecarErrorCode.TOO_MANY_KZG_COMMITMENTS;
|
|
49
|
+
slot: number;
|
|
50
|
+
columnIdx: number;
|
|
51
|
+
count: number;
|
|
52
|
+
limit: number;
|
|
53
|
+
}
|
|
46
54
|
| {code: DataColumnSidecarErrorCode.ALREADY_KNOWN; columnIdx: number; slot: Slot}
|
|
47
55
|
| {code: DataColumnSidecarErrorCode.FUTURE_SLOT; blockSlot: Slot; currentSlot: Slot}
|
|
48
56
|
| {code: DataColumnSidecarErrorCode.WOULD_REVERT_FINALIZED_SLOT; blockSlot: Slot; finalizedSlot: Slot}
|
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
import {ChainConfig} from "@lodestar/config";
|
|
1
|
+
import {ChainConfig, ChainForkConfig} from "@lodestar/config";
|
|
2
2
|
import {
|
|
3
3
|
KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH,
|
|
4
4
|
KZG_COMMITMENTS_SUBTREE_INDEX,
|
|
5
5
|
NUMBER_OF_COLUMNS,
|
|
6
6
|
} from "@lodestar/params";
|
|
7
|
-
import {
|
|
7
|
+
import {
|
|
8
|
+
computeEpochAtSlot,
|
|
9
|
+
computeStartSlotAtEpoch,
|
|
10
|
+
getBlockHeaderProposerSignatureSet,
|
|
11
|
+
} from "@lodestar/state-transition";
|
|
8
12
|
import {Root, Slot, SubnetID, fulu, ssz} from "@lodestar/types";
|
|
9
13
|
import {toRootHex, verifyMerkleBranch} from "@lodestar/utils";
|
|
10
14
|
import {Metrics} from "../../metrics/metrics.js";
|
|
@@ -29,7 +33,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
29
33
|
const blockHeader = dataColumnSidecar.signedBlockHeader.message;
|
|
30
34
|
|
|
31
35
|
// 1) [REJECT] The sidecar is valid as verified by verify_data_column_sidecar
|
|
32
|
-
verifyDataColumnSidecar(dataColumnSidecar);
|
|
36
|
+
verifyDataColumnSidecar(chain.config, dataColumnSidecar);
|
|
33
37
|
|
|
34
38
|
// 2) [REJECT] The sidecar is for the correct subnet -- i.e. compute_subnet_for_data_column_sidecar(sidecar.index) == subnet_id
|
|
35
39
|
if (computeSubnetForDataColumnSidecar(chain.config, dataColumnSidecar) !== gossipSubnet) {
|
|
@@ -184,7 +188,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
184
188
|
* SPEC FUNCTION
|
|
185
189
|
* https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
|
186
190
|
*/
|
|
187
|
-
function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): void {
|
|
191
|
+
function verifyDataColumnSidecar(config: ChainForkConfig, dataColumnSidecar: fulu.DataColumnSidecar): void {
|
|
188
192
|
if (dataColumnSidecar.index >= NUMBER_OF_COLUMNS) {
|
|
189
193
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
190
194
|
code: DataColumnSidecarErrorCode.INVALID_INDEX,
|
|
@@ -201,6 +205,19 @@ function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): voi
|
|
|
201
205
|
});
|
|
202
206
|
}
|
|
203
207
|
|
|
208
|
+
const epoch = computeEpochAtSlot(dataColumnSidecar.signedBlockHeader.message.slot);
|
|
209
|
+
const maxBlobsPerBlock = config.getMaxBlobsPerBlock(epoch);
|
|
210
|
+
|
|
211
|
+
if (dataColumnSidecar.kzgCommitments.length > maxBlobsPerBlock) {
|
|
212
|
+
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
213
|
+
code: DataColumnSidecarErrorCode.TOO_MANY_KZG_COMMITMENTS,
|
|
214
|
+
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
215
|
+
columnIdx: dataColumnSidecar.index,
|
|
216
|
+
count: dataColumnSidecar.kzgCommitments.length,
|
|
217
|
+
limit: maxBlobsPerBlock,
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
|
|
204
221
|
if (
|
|
205
222
|
dataColumnSidecar.column.length !== dataColumnSidecar.kzgCommitments.length ||
|
|
206
223
|
dataColumnSidecar.column.length !== dataColumnSidecar.kzgProofs.length
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
/** biome-ignore-all lint/suspicious/noTemplateCurlyInString: The metric templates requires to have `${}` in a normal string */
|
|
2
2
|
import {NotReorgedReason} from "@lodestar/fork-choice";
|
|
3
|
+
import {ArchiveStoreTask} from "../../chain/archiveStore/archiveStore.js";
|
|
4
|
+
import {FrequencyStateArchiveStep} from "../../chain/archiveStore/strategies/frequencyStateArchiveStrategy.js";
|
|
3
5
|
import {BlockInputSource} from "../../chain/blocks/blockInput/index.js";
|
|
4
6
|
import {JobQueueItemType} from "../../chain/bls/index.js";
|
|
5
7
|
import {AttestationErrorCode, BlockErrorCode} from "../../chain/errors/index.js";
|
|
@@ -1420,6 +1422,21 @@ export function createLodestarMetrics(
|
|
|
1420
1422
|
},
|
|
1421
1423
|
},
|
|
1422
1424
|
|
|
1425
|
+
processFinalizedCheckpoint: {
|
|
1426
|
+
durationByTask: register.histogram<{source: ArchiveStoreTask}>({
|
|
1427
|
+
name: "lodestar_process_finalized_checkpoint_seconds",
|
|
1428
|
+
help: "Histogram of time to process finalized checkpoint",
|
|
1429
|
+
buckets: [0.1, 0.5, 1, 2, 4, 8],
|
|
1430
|
+
labelNames: ["source"],
|
|
1431
|
+
}),
|
|
1432
|
+
frequencyStateArchive: register.histogram<{step: FrequencyStateArchiveStep}>({
|
|
1433
|
+
name: "lodestar_process_finalized_checkpoint_frequency_state_archive_seconds",
|
|
1434
|
+
help: "Histogram of FrequencyStateArchive duration by step",
|
|
1435
|
+
buckets: [0.1, 0.5, 1, 2, 4, 8],
|
|
1436
|
+
labelNames: ["step"],
|
|
1437
|
+
}),
|
|
1438
|
+
},
|
|
1439
|
+
|
|
1423
1440
|
regenFnCallTotal: register.gauge<{entrypoint: RegenFnName; caller: RegenCaller}>({
|
|
1424
1441
|
name: "lodestar_regen_fn_call_total",
|
|
1425
1442
|
help: "Total number of calls for regen functions",
|
|
@@ -39,6 +39,7 @@ import {
|
|
|
39
39
|
BlockError,
|
|
40
40
|
BlockErrorCode,
|
|
41
41
|
BlockGossipError,
|
|
42
|
+
DataColumnSidecarErrorCode,
|
|
42
43
|
DataColumnSidecarGossipError,
|
|
43
44
|
GossipAction,
|
|
44
45
|
GossipActionError,
|
|
@@ -304,7 +305,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
304
305
|
...blockInput.getLogMeta(),
|
|
305
306
|
index: dataColumnSidecar.index,
|
|
306
307
|
});
|
|
307
|
-
|
|
308
|
+
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
|
309
|
+
code: DataColumnSidecarErrorCode.ALREADY_KNOWN,
|
|
310
|
+
columnIdx: dataColumnSidecar.index,
|
|
311
|
+
slot,
|
|
312
|
+
});
|
|
308
313
|
}
|
|
309
314
|
}
|
|
310
315
|
|
|
@@ -556,6 +561,16 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
556
561
|
metrics?.dataColumns.elapsedTimeTillReceived.observe({receivedOrder: receivedColumns}, delaySec);
|
|
557
562
|
break;
|
|
558
563
|
}
|
|
564
|
+
|
|
565
|
+
if (!blockInput.hasAllData()) {
|
|
566
|
+
// immediately attempt fetch of data columns from execution engine
|
|
567
|
+
chain.getBlobsTracker.triggerGetBlobs(blockInput);
|
|
568
|
+
// if we've received at least half of the columns, trigger reconstruction of the rest
|
|
569
|
+
if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) {
|
|
570
|
+
chain.columnReconstructionTracker.triggerColumnReconstruction(blockInput);
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
|
|
559
574
|
if (!blockInput.hasBlockAndAllData()) {
|
|
560
575
|
const cutoffTimeMs = getCutoffTimeMs(chain, dataColumnSlot, BLOCK_AVAILABILITY_CUTOFF_MS);
|
|
561
576
|
chain.logger.debug("Received gossip data column, waiting for full data availability", {
|
|
@@ -578,12 +593,6 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
578
593
|
source: BlockInputSource.gossip,
|
|
579
594
|
});
|
|
580
595
|
});
|
|
581
|
-
// immediately attempt fetch of data columns from execution engine
|
|
582
|
-
chain.getBlobsTracker.triggerGetBlobs(blockInput);
|
|
583
|
-
// if we've received at least half of the columns, trigger reconstruction of the rest
|
|
584
|
-
if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) {
|
|
585
|
-
chain.columnReconstructionTracker.triggerColumnReconstruction(blockInput);
|
|
586
|
-
}
|
|
587
596
|
}
|
|
588
597
|
},
|
|
589
598
|
|
|
@@ -134,8 +134,8 @@ export function getRangeSyncTarget(
|
|
|
134
134
|
return {
|
|
135
135
|
syncType: RangeSyncType.Head,
|
|
136
136
|
// The new peer has the same finalized (earlier filters should prevent a peer with an
|
|
137
|
-
// earlier finalized chain from reaching here).
|
|
138
|
-
startEpoch:
|
|
137
|
+
// earlier finalized chain from reaching here) and local head will always be >= local finalized.
|
|
138
|
+
startEpoch: computeEpochAtSlot(local.headSlot),
|
|
139
139
|
target: {
|
|
140
140
|
slot: remote.headSlot,
|
|
141
141
|
root: remote.headRoot,
|