@lodestar/beacon-node 1.36.0-dev.c7f3e8d129 → 1.36.0-dev.d690a62b6c
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/api/impl/beacon/blocks/index.d.ts.map +1 -1
- package/lib/api/impl/beacon/blocks/index.js +41 -22
- package/lib/api/impl/beacon/blocks/index.js.map +1 -1
- package/lib/api/impl/lodestar/index.d.ts +5 -0
- package/lib/api/impl/lodestar/index.d.ts.map +1 -1
- package/lib/api/impl/lodestar/index.js +35 -10
- package/lib/api/impl/lodestar/index.js.map +1 -1
- package/lib/api/impl/node/utils.js +1 -1
- package/lib/api/impl/node/utils.js.map +1 -1
- package/lib/chain/archiveStore/archiveStore.d.ts +9 -0
- package/lib/chain/archiveStore/archiveStore.d.ts.map +1 -1
- package/lib/chain/archiveStore/archiveStore.js +24 -0
- package/lib/chain/archiveStore/archiveStore.js.map +1 -1
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.d.ts +7 -0
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.d.ts.map +1 -1
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.js +31 -5
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.js.map +1 -1
- package/lib/chain/chain.d.ts +5 -2
- package/lib/chain/chain.d.ts.map +1 -1
- package/lib/chain/chain.js +32 -16
- package/lib/chain/chain.js.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.d.ts +7 -0
- package/lib/chain/errors/dataColumnSidecarError.d.ts.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.js +1 -0
- package/lib/chain/errors/dataColumnSidecarError.js.map +1 -1
- package/lib/chain/forkChoice/index.d.ts +9 -1
- package/lib/chain/forkChoice/index.d.ts.map +1 -1
- package/lib/chain/forkChoice/index.js +109 -4
- package/lib/chain/forkChoice/index.js.map +1 -1
- package/lib/chain/interface.d.ts +2 -0
- package/lib/chain/interface.d.ts.map +1 -1
- package/lib/chain/options.d.ts +0 -2
- package/lib/chain/options.d.ts.map +1 -1
- package/lib/chain/options.js +0 -1
- package/lib/chain/options.js.map +1 -1
- package/lib/chain/stateCache/datastore/db.d.ts +12 -0
- package/lib/chain/stateCache/datastore/db.d.ts.map +1 -1
- package/lib/chain/stateCache/datastore/db.js +70 -0
- package/lib/chain/stateCache/datastore/db.js.map +1 -1
- package/lib/chain/stateCache/datastore/file.d.ts +1 -0
- package/lib/chain/stateCache/datastore/file.d.ts.map +1 -1
- package/lib/chain/stateCache/datastore/file.js +7 -0
- package/lib/chain/stateCache/datastore/file.js.map +1 -1
- package/lib/chain/stateCache/datastore/types.d.ts +1 -0
- package/lib/chain/stateCache/datastore/types.d.ts.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.d.ts.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.js +14 -3
- package/lib/chain/validation/dataColumnSidecar.js.map +1 -1
- package/lib/index.d.ts +2 -0
- package/lib/index.d.ts.map +1 -1
- package/lib/index.js +2 -0
- package/lib/index.js.map +1 -1
- package/lib/metrics/metrics/lodestar.d.ts +10 -0
- package/lib/metrics/metrics/lodestar.d.ts.map +1 -1
- package/lib/metrics/metrics/lodestar.js +15 -1
- package/lib/metrics/metrics/lodestar.js.map +1 -1
- package/lib/network/core/networkCore.d.ts.map +1 -1
- package/lib/network/core/networkCore.js +5 -1
- package/lib/network/core/networkCore.js.map +1 -1
- package/lib/network/core/networkCoreWorker.js +2 -7
- package/lib/network/core/networkCoreWorker.js.map +1 -1
- package/lib/network/discv5/worker.js +2 -7
- package/lib/network/discv5/worker.js.map +1 -1
- package/lib/network/gossip/encoding.js +1 -1
- package/lib/network/gossip/encoding.js.map +1 -1
- package/lib/network/gossip/snappy_bun.d.ts +3 -0
- package/lib/network/gossip/snappy_bun.d.ts.map +1 -0
- package/lib/network/gossip/snappy_bun.js +3 -0
- package/lib/network/gossip/snappy_bun.js.map +1 -0
- package/lib/network/metadata.d.ts +1 -1
- package/lib/network/metadata.d.ts.map +1 -1
- package/lib/network/metadata.js +1 -0
- package/lib/network/metadata.js.map +1 -1
- package/lib/network/options.d.ts +0 -1
- package/lib/network/options.d.ts.map +1 -1
- package/lib/network/options.js.map +1 -1
- package/lib/network/processor/gossipHandlers.d.ts.map +1 -1
- package/lib/network/processor/gossipHandlers.js +14 -8
- package/lib/network/processor/gossipHandlers.js.map +1 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.js +3 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.js.map +1 -1
- package/lib/node/nodejs.d.ts +2 -1
- package/lib/node/nodejs.d.ts.map +1 -1
- package/lib/node/nodejs.js +2 -1
- package/lib/node/nodejs.js.map +1 -1
- package/lib/sync/range/range.d.ts.map +1 -1
- package/lib/sync/range/range.js +2 -1
- package/lib/sync/range/range.js.map +1 -1
- package/lib/sync/utils/remoteSyncType.d.ts +2 -1
- package/lib/sync/utils/remoteSyncType.d.ts.map +1 -1
- package/lib/sync/utils/remoteSyncType.js +19 -4
- package/lib/sync/utils/remoteSyncType.js.map +1 -1
- package/lib/util/blobs.d.ts +1 -1
- package/lib/util/blobs.d.ts.map +1 -1
- package/lib/util/blobs.js +53 -20
- package/lib/util/blobs.js.map +1 -1
- package/lib/util/profile.d.ts +6 -4
- package/lib/util/profile.d.ts.map +1 -1
- package/lib/util/profile.js +40 -3
- package/lib/util/profile.js.map +1 -1
- package/lib/util/sszBytes.d.ts +2 -0
- package/lib/util/sszBytes.d.ts.map +1 -1
- package/lib/util/sszBytes.js +25 -0
- package/lib/util/sszBytes.js.map +1 -1
- package/package.json +31 -24
- package/src/api/impl/beacon/blocks/index.ts +47 -25
- package/src/api/impl/lodestar/index.ts +42 -10
- package/src/api/impl/node/utils.ts +1 -1
- package/src/chain/archiveStore/archiveStore.ts +27 -0
- package/src/chain/archiveStore/strategies/frequencyStateArchiveStrategy.ts +32 -5
- package/src/chain/chain.ts +48 -23
- package/src/chain/errors/dataColumnSidecarError.ts +8 -0
- package/src/chain/forkChoice/index.ts +178 -2
- package/src/chain/interface.ts +2 -0
- package/src/chain/options.ts +0 -3
- package/src/chain/stateCache/datastore/db.ts +89 -1
- package/src/chain/stateCache/datastore/file.ts +8 -0
- package/src/chain/stateCache/datastore/types.ts +1 -0
- package/src/chain/validation/dataColumnSidecar.ts +21 -4
- package/src/index.ts +2 -0
- package/src/metrics/metrics/lodestar.ts +18 -1
- package/src/network/core/networkCore.ts +5 -1
- package/src/network/core/networkCoreWorker.ts +2 -7
- package/src/network/discv5/worker.ts +2 -7
- package/src/network/gossip/encoding.ts +1 -1
- package/src/network/gossip/snappy_bun.ts +2 -0
- package/src/network/metadata.ts +3 -1
- package/src/network/options.ts +0 -1
- package/src/network/processor/gossipHandlers.ts +16 -7
- package/src/network/reqresp/handlers/beaconBlocksByRange.ts +3 -1
- package/src/node/nodejs.ts +3 -0
- package/src/sync/range/range.ts +2 -1
- package/src/sync/utils/remoteSyncType.ts +23 -4
- package/src/util/blobs.ts +64 -20
- package/src/util/profile.ts +45 -3
- package/src/util/sszBytes.ts +30 -0
package/src/chain/options.ts
CHANGED
|
@@ -41,8 +41,6 @@ export type IChainOptions = BlockProcessOpts &
|
|
|
41
41
|
maxCachedBlobSidecars?: number;
|
|
42
42
|
/** Max number of produced block roots (blinded or full) cached for broadcast validations */
|
|
43
43
|
maxCachedProducedRoots?: number;
|
|
44
|
-
/** Subscribe to and custody all data column sidecar subnets */
|
|
45
|
-
supernode?: boolean;
|
|
46
44
|
initialCustodyGroupCount?: number;
|
|
47
45
|
broadcastValidationStrictness?: string;
|
|
48
46
|
minSameMessageSignatureSetsToBatch: number;
|
|
@@ -118,7 +116,6 @@ export const defaultChainOptions: IChainOptions = {
|
|
|
118
116
|
archiveMode: DEFAULT_ARCHIVE_MODE,
|
|
119
117
|
pruneHistory: false,
|
|
120
118
|
emitPayloadAttributes: false,
|
|
121
|
-
supernode: false,
|
|
122
119
|
// for gossip block validation, it's unlikely we see a reorg with 32 slots
|
|
123
120
|
// for attestation validation, having this value ensures we don't have to regen states most of the time
|
|
124
121
|
maxSkipSlots: 32,
|
|
@@ -1,5 +1,11 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import {SLOTS_PER_EPOCH} from "@lodestar/params";
|
|
2
|
+
import {Epoch, phase0, ssz} from "@lodestar/types";
|
|
3
|
+
import {MapDef} from "@lodestar/utils";
|
|
2
4
|
import {IBeaconDb} from "../../../db/interface.js";
|
|
5
|
+
import {
|
|
6
|
+
getLastProcessedSlotFromBeaconStateSerialized,
|
|
7
|
+
getSlotFromBeaconStateSerialized,
|
|
8
|
+
} from "../../../util/sszBytes.js";
|
|
3
9
|
import {CPStateDatastore, DatastoreKey} from "./types.js";
|
|
4
10
|
|
|
5
11
|
/**
|
|
@@ -22,6 +28,13 @@ export class DbCPStateDatastore implements CPStateDatastore {
|
|
|
22
28
|
return this.db.checkpointState.getBinary(serializedCheckpoint);
|
|
23
29
|
}
|
|
24
30
|
|
|
31
|
+
async readLatestSafe(): Promise<Uint8Array | null> {
|
|
32
|
+
const allKeys = await this.readKeys();
|
|
33
|
+
if (allKeys.length === 0) return null;
|
|
34
|
+
|
|
35
|
+
return getLatestSafeDatastoreKey(allKeys, this.read.bind(this));
|
|
36
|
+
}
|
|
37
|
+
|
|
25
38
|
async readKeys(): Promise<DatastoreKey[]> {
|
|
26
39
|
return this.db.checkpointState.keys();
|
|
27
40
|
}
|
|
@@ -34,3 +47,78 @@ export function datastoreKeyToCheckpoint(key: DatastoreKey): phase0.Checkpoint {
|
|
|
34
47
|
export function checkpointToDatastoreKey(cp: phase0.Checkpoint): DatastoreKey {
|
|
35
48
|
return ssz.phase0.Checkpoint.serialize(cp);
|
|
36
49
|
}
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Get the latest safe checkpoint state the node can use to boot from
|
|
53
|
+
* - it should be the checkpoint state that's unique in its epoch
|
|
54
|
+
* - its last processed block slot should be at epoch boundary or last slot of previous epoch
|
|
55
|
+
* - state slot should be at epoch boundary
|
|
56
|
+
* - state slot should be equal to epoch * SLOTS_PER_EPOCH
|
|
57
|
+
*
|
|
58
|
+
* return the serialized data of Current Root Checkpoint State (CRCS) or Previous Root Checkpoint State (PRCS)
|
|
59
|
+
*
|
|
60
|
+
*/
|
|
61
|
+
export async function getLatestSafeDatastoreKey(
|
|
62
|
+
allKeys: DatastoreKey[],
|
|
63
|
+
readFn: (key: DatastoreKey) => Promise<Uint8Array | null>
|
|
64
|
+
): Promise<Uint8Array | null> {
|
|
65
|
+
const checkpointsByEpoch = new MapDef<Epoch, DatastoreKey[]>(() => []);
|
|
66
|
+
for (const key of allKeys) {
|
|
67
|
+
const cp = datastoreKeyToCheckpoint(key);
|
|
68
|
+
checkpointsByEpoch.getOrDefault(cp.epoch).push(key);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
const dataStoreKeyByEpoch: Map<Epoch, DatastoreKey> = new Map();
|
|
72
|
+
for (const [epoch, keys] of checkpointsByEpoch.entries()) {
|
|
73
|
+
// only consider epochs with a single checkpoint to avoid ambiguity from forks
|
|
74
|
+
if (keys.length === 1) {
|
|
75
|
+
dataStoreKeyByEpoch.set(epoch, keys[0]);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
const epochsDesc = Array.from(dataStoreKeyByEpoch.keys()).sort((a, b) => b - a);
|
|
80
|
+
for (const epoch of epochsDesc) {
|
|
81
|
+
const datastoreKey = dataStoreKeyByEpoch.get(epoch);
|
|
82
|
+
if (datastoreKey == null) {
|
|
83
|
+
// should not happen
|
|
84
|
+
continue;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
const stateBytes = await readFn(datastoreKey);
|
|
88
|
+
if (stateBytes == null) {
|
|
89
|
+
// should not happen
|
|
90
|
+
continue;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
const lastProcessedSlot = getLastProcessedSlotFromBeaconStateSerialized(stateBytes);
|
|
94
|
+
if (lastProcessedSlot == null) {
|
|
95
|
+
// cannot extract last processed slot from serialized state, skip
|
|
96
|
+
continue;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
const stateSlot = getSlotFromBeaconStateSerialized(stateBytes);
|
|
100
|
+
if (stateSlot == null) {
|
|
101
|
+
// cannot extract slot from serialized state, skip
|
|
102
|
+
continue;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (lastProcessedSlot !== stateSlot && lastProcessedSlot !== stateSlot - 1) {
|
|
106
|
+
// not CRCS or PRCS, skip
|
|
107
|
+
continue;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
if (stateSlot % SLOTS_PER_EPOCH !== 0) {
|
|
111
|
+
// not at epoch boundary, skip
|
|
112
|
+
continue;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
if (stateSlot !== SLOTS_PER_EPOCH * epoch) {
|
|
116
|
+
// should not happen after above checks, but just to be safe
|
|
117
|
+
continue;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
return stateBytes;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
return null;
|
|
124
|
+
}
|
|
@@ -2,6 +2,7 @@ import path from "node:path";
|
|
|
2
2
|
import {phase0, ssz} from "@lodestar/types";
|
|
3
3
|
import {fromHex, toHex} from "@lodestar/utils";
|
|
4
4
|
import {ensureDir, readFile, readFileNames, removeFile, writeIfNotExist} from "../../../util/file.js";
|
|
5
|
+
import {getLatestSafeDatastoreKey} from "./db.js";
|
|
5
6
|
import {CPStateDatastore, DatastoreKey} from "./types.js";
|
|
6
7
|
|
|
7
8
|
const CHECKPOINT_STATES_FOLDER = "checkpoint_states";
|
|
@@ -44,6 +45,13 @@ export class FileCPStateDatastore implements CPStateDatastore {
|
|
|
44
45
|
return readFile(filePath);
|
|
45
46
|
}
|
|
46
47
|
|
|
48
|
+
async readLatestSafe(): Promise<Uint8Array | null> {
|
|
49
|
+
const allKeys = await this.readKeys();
|
|
50
|
+
if (allKeys.length === 0) return null;
|
|
51
|
+
|
|
52
|
+
return getLatestSafeDatastoreKey(allKeys, this.read.bind(this));
|
|
53
|
+
}
|
|
54
|
+
|
|
47
55
|
async readKeys(): Promise<DatastoreKey[]> {
|
|
48
56
|
const fileNames = await readFileNames(this.folderPath);
|
|
49
57
|
return fileNames
|
|
@@ -8,6 +8,7 @@ export interface CPStateDatastore {
|
|
|
8
8
|
write: (cpKey: phase0.Checkpoint, stateBytes: Uint8Array) => Promise<DatastoreKey>;
|
|
9
9
|
remove: (key: DatastoreKey) => Promise<void>;
|
|
10
10
|
read: (key: DatastoreKey) => Promise<Uint8Array | null>;
|
|
11
|
+
readLatestSafe: () => Promise<Uint8Array | null>;
|
|
11
12
|
readKeys: () => Promise<DatastoreKey[]>;
|
|
12
13
|
init?: () => Promise<void>;
|
|
13
14
|
}
|
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
import {ChainConfig} from "@lodestar/config";
|
|
1
|
+
import {ChainConfig, ChainForkConfig} from "@lodestar/config";
|
|
2
2
|
import {
|
|
3
3
|
KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH,
|
|
4
4
|
KZG_COMMITMENTS_SUBTREE_INDEX,
|
|
5
5
|
NUMBER_OF_COLUMNS,
|
|
6
6
|
} from "@lodestar/params";
|
|
7
|
-
import {
|
|
7
|
+
import {
|
|
8
|
+
computeEpochAtSlot,
|
|
9
|
+
computeStartSlotAtEpoch,
|
|
10
|
+
getBlockHeaderProposerSignatureSet,
|
|
11
|
+
} from "@lodestar/state-transition";
|
|
8
12
|
import {Root, Slot, SubnetID, fulu, ssz} from "@lodestar/types";
|
|
9
13
|
import {toRootHex, verifyMerkleBranch} from "@lodestar/utils";
|
|
10
14
|
import {Metrics} from "../../metrics/metrics.js";
|
|
@@ -29,7 +33,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
29
33
|
const blockHeader = dataColumnSidecar.signedBlockHeader.message;
|
|
30
34
|
|
|
31
35
|
// 1) [REJECT] The sidecar is valid as verified by verify_data_column_sidecar
|
|
32
|
-
verifyDataColumnSidecar(dataColumnSidecar);
|
|
36
|
+
verifyDataColumnSidecar(chain.config, dataColumnSidecar);
|
|
33
37
|
|
|
34
38
|
// 2) [REJECT] The sidecar is for the correct subnet -- i.e. compute_subnet_for_data_column_sidecar(sidecar.index) == subnet_id
|
|
35
39
|
if (computeSubnetForDataColumnSidecar(chain.config, dataColumnSidecar) !== gossipSubnet) {
|
|
@@ -184,7 +188,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
184
188
|
* SPEC FUNCTION
|
|
185
189
|
* https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
|
186
190
|
*/
|
|
187
|
-
function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): void {
|
|
191
|
+
function verifyDataColumnSidecar(config: ChainForkConfig, dataColumnSidecar: fulu.DataColumnSidecar): void {
|
|
188
192
|
if (dataColumnSidecar.index >= NUMBER_OF_COLUMNS) {
|
|
189
193
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
190
194
|
code: DataColumnSidecarErrorCode.INVALID_INDEX,
|
|
@@ -201,6 +205,19 @@ function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): voi
|
|
|
201
205
|
});
|
|
202
206
|
}
|
|
203
207
|
|
|
208
|
+
const epoch = computeEpochAtSlot(dataColumnSidecar.signedBlockHeader.message.slot);
|
|
209
|
+
const maxBlobsPerBlock = config.getMaxBlobsPerBlock(epoch);
|
|
210
|
+
|
|
211
|
+
if (dataColumnSidecar.kzgCommitments.length > maxBlobsPerBlock) {
|
|
212
|
+
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
213
|
+
code: DataColumnSidecarErrorCode.TOO_MANY_KZG_COMMITMENTS,
|
|
214
|
+
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
215
|
+
columnIdx: dataColumnSidecar.index,
|
|
216
|
+
count: dataColumnSidecar.kzgCommitments.length,
|
|
217
|
+
limit: maxBlobsPerBlock,
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
|
|
204
221
|
if (
|
|
205
222
|
dataColumnSidecar.column.length !== dataColumnSidecar.kzgCommitments.length ||
|
|
206
223
|
dataColumnSidecar.column.length !== dataColumnSidecar.kzgProofs.length
|
package/src/index.ts
CHANGED
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
export type {RestApiServerMetrics, RestApiServerModules, RestApiServerOpts} from "./api/rest/base.js";
|
|
4
4
|
export {RestApiServer} from "./api/rest/base.js";
|
|
5
5
|
export {checkAndPersistAnchorState, initStateFromDb, initStateFromEth1} from "./chain/index.js";
|
|
6
|
+
export {DbCPStateDatastore} from "./chain/stateCache/datastore/db.js";
|
|
7
|
+
export {FileCPStateDatastore} from "./chain/stateCache/datastore/file.js";
|
|
6
8
|
export {BeaconDb, type IBeaconDb} from "./db/index.js";
|
|
7
9
|
export {Eth1Provider, type IEth1Provider} from "./eth1/index.js";
|
|
8
10
|
// Export metrics utilities to de-duplicate validator metrics
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
/** biome-ignore-all lint/suspicious/noTemplateCurlyInString: The metric templates requires to have `${}` in a normal string */
|
|
2
2
|
import {NotReorgedReason} from "@lodestar/fork-choice";
|
|
3
|
+
import {ArchiveStoreTask} from "../../chain/archiveStore/archiveStore.js";
|
|
4
|
+
import {FrequencyStateArchiveStep} from "../../chain/archiveStore/strategies/frequencyStateArchiveStrategy.js";
|
|
3
5
|
import {BlockInputSource} from "../../chain/blocks/blockInput/index.js";
|
|
4
6
|
import {JobQueueItemType} from "../../chain/bls/index.js";
|
|
5
7
|
import {AttestationErrorCode, BlockErrorCode} from "../../chain/errors/index.js";
|
|
@@ -1420,6 +1422,21 @@ export function createLodestarMetrics(
|
|
|
1420
1422
|
},
|
|
1421
1423
|
},
|
|
1422
1424
|
|
|
1425
|
+
processFinalizedCheckpoint: {
|
|
1426
|
+
durationByTask: register.histogram<{source: ArchiveStoreTask}>({
|
|
1427
|
+
name: "lodestar_process_finalized_checkpoint_seconds",
|
|
1428
|
+
help: "Histogram of time to process finalized checkpoint",
|
|
1429
|
+
buckets: [0.1, 0.5, 1, 2, 4, 8],
|
|
1430
|
+
labelNames: ["source"],
|
|
1431
|
+
}),
|
|
1432
|
+
frequencyStateArchive: register.histogram<{step: FrequencyStateArchiveStep}>({
|
|
1433
|
+
name: "lodestar_process_finalized_checkpoint_frequency_state_archive_seconds",
|
|
1434
|
+
help: "Histogram of FrequencyStateArchive duration by step",
|
|
1435
|
+
buckets: [0.1, 0.5, 1, 2, 4, 8],
|
|
1436
|
+
labelNames: ["step"],
|
|
1437
|
+
}),
|
|
1438
|
+
},
|
|
1439
|
+
|
|
1423
1440
|
regenFnCallTotal: register.gauge<{entrypoint: RegenFnName; caller: RegenCaller}>({
|
|
1424
1441
|
name: "lodestar_regen_fn_call_total",
|
|
1425
1442
|
help: "Total number of calls for regen functions",
|
|
@@ -1861,7 +1878,7 @@ export function createLodestarMetrics(
|
|
|
1861
1878
|
fetchKeys: register.histogram({
|
|
1862
1879
|
name: "lodestar_prune_history_fetch_keys_time_seconds",
|
|
1863
1880
|
help: "Time to fetch keys in seconds",
|
|
1864
|
-
buckets: [0.001, 0.01, 0.1, 1],
|
|
1881
|
+
buckets: [0.001, 0.01, 0.1, 0.3, 0.5, 1],
|
|
1865
1882
|
}),
|
|
1866
1883
|
|
|
1867
1884
|
pruneKeys: register.histogram({
|
|
@@ -553,7 +553,11 @@ export class NetworkCore implements INetworkCore {
|
|
|
553
553
|
// On fork boundary transition
|
|
554
554
|
if (epoch === nextBoundaryEpoch) {
|
|
555
555
|
// updateEth2Field() MUST be called with clock epoch, onEpoch event is emitted in response to clock events
|
|
556
|
-
this.metadata.updateEth2Field(epoch);
|
|
556
|
+
const {forkDigest} = this.metadata.updateEth2Field(epoch);
|
|
557
|
+
// Update local status to reflect the new fork digest, otherwise we will disconnect peers that re-status us
|
|
558
|
+
// right after the fork transition due to incompatible forks as our fork digest is stale since we only
|
|
559
|
+
// update it once we import a new head or when emitting update status event.
|
|
560
|
+
this.statusCache.update({...this.statusCache.get(), forkDigest});
|
|
557
561
|
this.reqResp.registerProtocolsAtBoundary(nextBoundary);
|
|
558
562
|
}
|
|
559
563
|
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
import fs from "node:fs";
|
|
2
|
-
import path from "node:path";
|
|
3
1
|
import worker from "node:worker_threads";
|
|
4
2
|
import {privateKeyFromProtobuf} from "@libp2p/crypto/keys";
|
|
5
3
|
import {peerIdFromPrivateKey} from "@libp2p/peer-id";
|
|
@@ -11,7 +9,7 @@ import {RegistryMetricCreator, collectNodeJSMetrics} from "../../metrics/index.j
|
|
|
11
9
|
import {AsyncIterableBridgeCaller, AsyncIterableBridgeHandler} from "../../util/asyncIterableToEvents.js";
|
|
12
10
|
import {Clock} from "../../util/clock.js";
|
|
13
11
|
import {peerIdToString} from "../../util/peerId.js";
|
|
14
|
-
import {
|
|
12
|
+
import {ProfileThread, profileThread, writeHeapSnapshot} from "../../util/profile.js";
|
|
15
13
|
import {wireEventsOnWorkerThread} from "../../util/workerEvents.js";
|
|
16
14
|
import {NetworkEventBus, NetworkEventData, networkEventDirection} from "../events.js";
|
|
17
15
|
import {
|
|
@@ -157,10 +155,7 @@ const libp2pWorkerApi: NetworkWorkerApi = {
|
|
|
157
155
|
dumpDiscv5KadValues: () => core.dumpDiscv5KadValues(),
|
|
158
156
|
dumpMeshPeers: () => core.dumpMeshPeers(),
|
|
159
157
|
writeProfile: async (durationMs: number, dirpath: string) => {
|
|
160
|
-
|
|
161
|
-
const filePath = path.join(dirpath, `network_thread_${new Date().toISOString()}.cpuprofile`);
|
|
162
|
-
fs.writeFileSync(filePath, profile);
|
|
163
|
-
return filePath;
|
|
158
|
+
return profileThread(ProfileThread.NETWORK, durationMs, dirpath);
|
|
164
159
|
},
|
|
165
160
|
writeDiscv5Profile: async (durationMs: number, dirpath: string) => {
|
|
166
161
|
return core.writeDiscv5Profile(durationMs, dirpath);
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
import fs from "node:fs";
|
|
2
|
-
import path from "node:path";
|
|
3
1
|
import worker from "node:worker_threads";
|
|
4
2
|
import {privateKeyFromProtobuf} from "@libp2p/crypto/keys";
|
|
5
3
|
import {peerIdFromPrivateKey} from "@libp2p/peer-id";
|
|
@@ -14,7 +12,7 @@ import {Gauge} from "@lodestar/utils";
|
|
|
14
12
|
import {RegistryMetricCreator} from "../../metrics/index.js";
|
|
15
13
|
import {collectNodeJSMetrics} from "../../metrics/nodeJsMetrics.js";
|
|
16
14
|
import {Clock} from "../../util/clock.js";
|
|
17
|
-
import {
|
|
15
|
+
import {ProfileThread, profileThread, writeHeapSnapshot} from "../../util/profile.js";
|
|
18
16
|
import {Discv5WorkerApi, Discv5WorkerData} from "./types.js";
|
|
19
17
|
import {ENRRelevance, enrRelevance} from "./utils.js";
|
|
20
18
|
|
|
@@ -108,10 +106,7 @@ const module: Discv5WorkerApi = {
|
|
|
108
106
|
return (await metricsRegistry?.metrics()) ?? "";
|
|
109
107
|
},
|
|
110
108
|
writeProfile: async (durationMs: number, dirpath: string) => {
|
|
111
|
-
|
|
112
|
-
const filePath = path.join(dirpath, `discv5_thread_${new Date().toISOString()}.cpuprofile`);
|
|
113
|
-
fs.writeFileSync(filePath, profile);
|
|
114
|
-
return filePath;
|
|
109
|
+
return profileThread(ProfileThread.DISC5, durationMs, dirpath);
|
|
115
110
|
},
|
|
116
111
|
writeHeapSnapshot: async (prefix: string, dirpath: string) => {
|
|
117
112
|
return writeHeapSnapshot(prefix, dirpath);
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import {Message} from "@libp2p/interface";
|
|
2
|
-
import {compress, uncompress} from "snappyjs";
|
|
3
2
|
import xxhashFactory from "xxhash-wasm";
|
|
3
|
+
import {compress, uncompress} from "#snappy";
|
|
4
4
|
import {digest} from "@chainsafe/as-sha256";
|
|
5
5
|
import {RPC} from "@chainsafe/libp2p-gossipsub/message";
|
|
6
6
|
import {DataTransform} from "@chainsafe/libp2p-gossipsub/types";
|
package/src/network/metadata.ts
CHANGED
|
@@ -126,7 +126,7 @@ export class MetadataController {
|
|
|
126
126
|
* 2. Network MUST call this method on fork transition.
|
|
127
127
|
* Current Clock implementation ensures no race conditions, epoch is correct if re-fetched
|
|
128
128
|
*/
|
|
129
|
-
updateEth2Field(epoch: Epoch):
|
|
129
|
+
updateEth2Field(epoch: Epoch): phase0.ENRForkID {
|
|
130
130
|
const config = this.networkConfig.config;
|
|
131
131
|
const enrForkId = getENRForkID(config, epoch);
|
|
132
132
|
const {forkDigest, nextForkVersion, nextForkEpoch} = enrForkId;
|
|
@@ -143,6 +143,8 @@ export class MetadataController {
|
|
|
143
143
|
: ssz.ForkDigest.defaultValue();
|
|
144
144
|
this.onSetValue(ENRKey.nfd, nextForkDigest);
|
|
145
145
|
this.logger.debug("Updated nfd field in ENR", {nextForkDigest: toHex(nextForkDigest)});
|
|
146
|
+
|
|
147
|
+
return enrForkId;
|
|
146
148
|
}
|
|
147
149
|
}
|
|
148
150
|
|
package/src/network/options.ts
CHANGED
|
@@ -39,6 +39,7 @@ import {
|
|
|
39
39
|
BlockError,
|
|
40
40
|
BlockErrorCode,
|
|
41
41
|
BlockGossipError,
|
|
42
|
+
DataColumnSidecarErrorCode,
|
|
42
43
|
DataColumnSidecarGossipError,
|
|
43
44
|
GossipAction,
|
|
44
45
|
GossipActionError,
|
|
@@ -304,7 +305,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
304
305
|
...blockInput.getLogMeta(),
|
|
305
306
|
index: dataColumnSidecar.index,
|
|
306
307
|
});
|
|
307
|
-
|
|
308
|
+
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
|
309
|
+
code: DataColumnSidecarErrorCode.ALREADY_KNOWN,
|
|
310
|
+
columnIdx: dataColumnSidecar.index,
|
|
311
|
+
slot,
|
|
312
|
+
});
|
|
308
313
|
}
|
|
309
314
|
}
|
|
310
315
|
|
|
@@ -556,6 +561,16 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
556
561
|
metrics?.dataColumns.elapsedTimeTillReceived.observe({receivedOrder: receivedColumns}, delaySec);
|
|
557
562
|
break;
|
|
558
563
|
}
|
|
564
|
+
|
|
565
|
+
if (!blockInput.hasAllData()) {
|
|
566
|
+
// immediately attempt fetch of data columns from execution engine
|
|
567
|
+
chain.getBlobsTracker.triggerGetBlobs(blockInput);
|
|
568
|
+
// if we've received at least half of the columns, trigger reconstruction of the rest
|
|
569
|
+
if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) {
|
|
570
|
+
chain.columnReconstructionTracker.triggerColumnReconstruction(blockInput);
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
|
|
559
574
|
if (!blockInput.hasBlockAndAllData()) {
|
|
560
575
|
const cutoffTimeMs = getCutoffTimeMs(chain, dataColumnSlot, BLOCK_AVAILABILITY_CUTOFF_MS);
|
|
561
576
|
chain.logger.debug("Received gossip data column, waiting for full data availability", {
|
|
@@ -578,12 +593,6 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
578
593
|
source: BlockInputSource.gossip,
|
|
579
594
|
});
|
|
580
595
|
});
|
|
581
|
-
// immediately attempt fetch of data columns from execution engine
|
|
582
|
-
chain.getBlobsTracker.triggerGetBlobs(blockInput);
|
|
583
|
-
// if we've received at least half of the columns, trigger reconstruction of the rest
|
|
584
|
-
if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) {
|
|
585
|
-
chain.columnReconstructionTracker.triggerColumnReconstruction(blockInput);
|
|
586
|
-
}
|
|
587
596
|
}
|
|
588
597
|
},
|
|
589
598
|
|
|
@@ -19,7 +19,9 @@ export async function* onBeaconBlocksByRange(
|
|
|
19
19
|
|
|
20
20
|
const finalized = db.blockArchive;
|
|
21
21
|
const unfinalized = db.block;
|
|
22
|
-
|
|
22
|
+
// in the case of initializing from a non-finalized state, we don't have the finalized block so this api does not work
|
|
23
|
+
// chain.forkChoice.getFinalizeBlock().slot
|
|
24
|
+
const finalizedSlot = chain.forkChoice.getFinalizedCheckpointSlot();
|
|
23
25
|
|
|
24
26
|
// Finalized range of blocks
|
|
25
27
|
if (startSlot <= finalizedSlot) {
|
package/src/node/nodejs.ts
CHANGED
|
@@ -53,6 +53,7 @@ export type BeaconNodeInitModules = {
|
|
|
53
53
|
dataDir: string;
|
|
54
54
|
peerStoreDir?: string;
|
|
55
55
|
anchorState: BeaconStateAllForks;
|
|
56
|
+
isAnchorStateFinalized: boolean;
|
|
56
57
|
wsCheckpoint?: phase0.Checkpoint;
|
|
57
58
|
metricsRegistries?: Registry[];
|
|
58
59
|
};
|
|
@@ -154,6 +155,7 @@ export class BeaconNode {
|
|
|
154
155
|
dataDir,
|
|
155
156
|
peerStoreDir,
|
|
156
157
|
anchorState,
|
|
158
|
+
isAnchorStateFinalized,
|
|
157
159
|
wsCheckpoint,
|
|
158
160
|
metricsRegistries = [],
|
|
159
161
|
}: BeaconNodeInitModules): Promise<T> {
|
|
@@ -217,6 +219,7 @@ export class BeaconNode {
|
|
|
217
219
|
metrics,
|
|
218
220
|
validatorMonitor,
|
|
219
221
|
anchorState,
|
|
222
|
+
isAnchorStateFinalized,
|
|
220
223
|
eth1: initializeEth1ForBlockProduction(opts.eth1, {
|
|
221
224
|
config,
|
|
222
225
|
db,
|
package/src/sync/range/range.ts
CHANGED
|
@@ -114,13 +114,14 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) {
|
|
|
114
114
|
*/
|
|
115
115
|
addPeer(peerId: PeerIdStr, localStatus: Status, peerStatus: Status): void {
|
|
116
116
|
// Compute if we should do a Finalized or Head sync with this peer
|
|
117
|
-
const {syncType, startEpoch, target} = getRangeSyncTarget(localStatus, peerStatus, this.chain
|
|
117
|
+
const {syncType, startEpoch, target} = getRangeSyncTarget(localStatus, peerStatus, this.chain);
|
|
118
118
|
this.logger.debug("Sync peer joined", {
|
|
119
119
|
peer: peerId,
|
|
120
120
|
syncType,
|
|
121
121
|
startEpoch,
|
|
122
122
|
targetSlot: target.slot,
|
|
123
123
|
targetRoot: toRootHex(target.root),
|
|
124
|
+
localHeadSlot: localStatus.headSlot,
|
|
124
125
|
earliestAvailableSlot: (peerStatus as fulu.Status).earliestAvailableSlot ?? Infinity,
|
|
125
126
|
});
|
|
126
127
|
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import {IForkChoice} from "@lodestar/fork-choice";
|
|
2
2
|
import {computeEpochAtSlot, computeStartSlotAtEpoch} from "@lodestar/state-transition";
|
|
3
3
|
import {Slot, Status} from "@lodestar/types";
|
|
4
|
+
import {IBeaconChain} from "../../chain/interface.ts";
|
|
4
5
|
import {ChainTarget} from "../range/utils/index.js";
|
|
5
6
|
|
|
6
7
|
/** The type of peer relative to our current state */
|
|
@@ -103,8 +104,11 @@ export function getRangeSyncType(local: Status, remote: Status, forkChoice: IFor
|
|
|
103
104
|
export function getRangeSyncTarget(
|
|
104
105
|
local: Status,
|
|
105
106
|
remote: Status,
|
|
106
|
-
|
|
107
|
+
chain: IBeaconChain
|
|
107
108
|
): {syncType: RangeSyncType; startEpoch: Slot; target: ChainTarget} {
|
|
109
|
+
const forkChoice = chain.forkChoice;
|
|
110
|
+
|
|
111
|
+
// finalized sync
|
|
108
112
|
if (remote.finalizedEpoch > local.finalizedEpoch && !forkChoice.hasBlock(remote.finalizedRoot)) {
|
|
109
113
|
return {
|
|
110
114
|
// If RangeSyncType.Finalized, the range of blocks fetchable from startEpoch and target must allow to switch
|
|
@@ -131,11 +135,26 @@ export function getRangeSyncTarget(
|
|
|
131
135
|
},
|
|
132
136
|
};
|
|
133
137
|
}
|
|
138
|
+
|
|
139
|
+
// we don't want to sync from epoch < minEpoch
|
|
140
|
+
// if we boot from an unfinalized checkpoint state, we don't want to sync before anchorStateLatestBlockSlot
|
|
141
|
+
// if we boot from a finalized checkpoint state, anchorStateLatestBlockSlot is trusted and we also don't want to sync before it
|
|
142
|
+
const minEpoch = Math.max(remote.finalizedEpoch, computeEpochAtSlot(chain.anchorStateLatestBlockSlot));
|
|
143
|
+
|
|
144
|
+
// head sync
|
|
134
145
|
return {
|
|
135
146
|
syncType: RangeSyncType.Head,
|
|
136
|
-
// The new peer has the same finalized
|
|
137
|
-
// earlier finalized chain from reaching here
|
|
138
|
-
|
|
147
|
+
// The new peer has the same finalized `remote.finalizedEpoch == local.finalizedEpoch` since
|
|
148
|
+
// previous filters should prevent a peer with an earlier finalized chain from reaching here.
|
|
149
|
+
//
|
|
150
|
+
// By default and during stable network conditions, the head sync always starts from
|
|
151
|
+
// the finalized epoch (even though it's the head sync) because finalized epoch is < local head.
|
|
152
|
+
// This is to prevent the issue noted here https://github.com/ChainSafe/lodestar/pull/7509#discussion_r1984353063.
|
|
153
|
+
//
|
|
154
|
+
// During non-finality of the network, when starting from an unfinalized checkpoint state, we don't want
|
|
155
|
+
// to sync before anchorStateLatestBlockSlot as finalized epoch is too far away. Local head will also be
|
|
156
|
+
// the same to that value at startup, the head sync always starts from anchorStateLatestBlockSlot in this case.
|
|
157
|
+
startEpoch: Math.min(computeEpochAtSlot(local.headSlot), minEpoch),
|
|
139
158
|
target: {
|
|
140
159
|
slot: remote.headSlot,
|
|
141
160
|
root: remote.headRoot,
|
package/src/util/blobs.ts
CHANGED
|
@@ -149,41 +149,85 @@ export async function dataColumnMatrixRecovery(
|
|
|
149
149
|
* Reconstruct blobs from a set of data columns, at least 50%+ of all the columns
|
|
150
150
|
* must be provided to allow to reconstruct the full data matrix
|
|
151
151
|
*/
|
|
152
|
-
export async function reconstructBlobs(sidecars: fulu.DataColumnSidecars): Promise<deneb.Blobs> {
|
|
152
|
+
export async function reconstructBlobs(sidecars: fulu.DataColumnSidecars, indices?: number[]): Promise<deneb.Blobs> {
|
|
153
153
|
if (sidecars.length < NUMBER_OF_COLUMNS / 2) {
|
|
154
154
|
throw Error(
|
|
155
155
|
`Expected at least ${NUMBER_OF_COLUMNS / 2} data columns to reconstruct blobs, received ${sidecars.length}`
|
|
156
156
|
);
|
|
157
157
|
}
|
|
158
|
+
const blobCount = sidecars[0].column.length;
|
|
158
159
|
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
// Full columns, no need to recover
|
|
163
|
-
fullSidecars = sidecars;
|
|
164
|
-
} else {
|
|
165
|
-
const sidecarsByIndex = new Map<number, fulu.DataColumnSidecar>(sidecars.map((sc) => [sc.index, sc]));
|
|
166
|
-
const recoveredSidecars = await dataColumnMatrixRecovery(sidecarsByIndex);
|
|
167
|
-
if (recoveredSidecars === null) {
|
|
168
|
-
// Should not happen because we check the column count above
|
|
169
|
-
throw Error("Failed to reconstruct the full data matrix");
|
|
160
|
+
for (const index of indices ?? []) {
|
|
161
|
+
if (index < 0 || index >= blobCount) {
|
|
162
|
+
throw Error(`Invalid blob index ${index}, must be between 0 and ${blobCount - 1}`);
|
|
170
163
|
}
|
|
171
|
-
|
|
164
|
+
}
|
|
165
|
+
const indicesToReconstruct = indices ?? Array.from({length: blobCount}, (_, i) => i);
|
|
166
|
+
|
|
167
|
+
const recoveredCells = await recoverBlobCells(sidecars, indicesToReconstruct);
|
|
168
|
+
if (recoveredCells === null) {
|
|
169
|
+
// Should not happen because we check the column count above
|
|
170
|
+
throw Error("Failed to recover cells to reconstruct blobs");
|
|
172
171
|
}
|
|
173
172
|
|
|
174
|
-
const
|
|
175
|
-
const blobs: deneb.Blobs = new Array(blobCount);
|
|
173
|
+
const blobs: deneb.Blobs = new Array(indicesToReconstruct.length);
|
|
176
174
|
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
175
|
+
for (let i = 0; i < indicesToReconstruct.length; i++) {
|
|
176
|
+
const blobIndex = indicesToReconstruct[i];
|
|
177
|
+
const cells = recoveredCells.get(blobIndex);
|
|
178
|
+
if (!cells) {
|
|
179
|
+
throw Error(`Failed to get recovered cells for blob index ${blobIndex}`);
|
|
180
|
+
}
|
|
181
|
+
blobs[i] = cellsToBlob(cells);
|
|
182
182
|
}
|
|
183
183
|
|
|
184
184
|
return blobs;
|
|
185
185
|
}
|
|
186
186
|
|
|
187
|
+
/**
|
|
188
|
+
* Recover cells for specific blob indices from a set of data columns
|
|
189
|
+
*/
|
|
190
|
+
async function recoverBlobCells(
|
|
191
|
+
partialSidecars: fulu.DataColumnSidecar[],
|
|
192
|
+
blobIndices: number[]
|
|
193
|
+
): Promise<Map<number, fulu.Cell[]> | null> {
|
|
194
|
+
const columnCount = partialSidecars.length;
|
|
195
|
+
if (columnCount < NUMBER_OF_COLUMNS / 2) {
|
|
196
|
+
// We don't have enough columns to recover
|
|
197
|
+
return null;
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
const recoveredCells = new Map<number, fulu.Cell[]>();
|
|
201
|
+
// Sort data columns by index in ascending order
|
|
202
|
+
const partialSidecarsSorted = partialSidecars.slice().sort((a, b) => a.index - b.index);
|
|
203
|
+
|
|
204
|
+
if (columnCount === NUMBER_OF_COLUMNS) {
|
|
205
|
+
// Full columns, no need to recover
|
|
206
|
+
for (const blobIndex of blobIndices) {
|
|
207
|
+
// 128 cells that make up one "extended blob" row
|
|
208
|
+
const cells = partialSidecarsSorted.map((col) => col.column[blobIndex]);
|
|
209
|
+
recoveredCells.set(blobIndex, cells);
|
|
210
|
+
}
|
|
211
|
+
return recoveredCells;
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
await Promise.all(
|
|
215
|
+
blobIndices.map(async (blobIndex) => {
|
|
216
|
+
const cellIndices: number[] = [];
|
|
217
|
+
const cells: fulu.Cell[] = [];
|
|
218
|
+
for (const dataColumn of partialSidecarsSorted) {
|
|
219
|
+
cellIndices.push(dataColumn.index);
|
|
220
|
+
cells.push(dataColumn.column[blobIndex]);
|
|
221
|
+
}
|
|
222
|
+
// Recover cells for this specific blob row
|
|
223
|
+
const recovered = await kzg.asyncRecoverCellsAndKzgProofs(cellIndices, cells);
|
|
224
|
+
recoveredCells.set(blobIndex, recovered.cells);
|
|
225
|
+
})
|
|
226
|
+
);
|
|
227
|
+
|
|
228
|
+
return recoveredCells;
|
|
229
|
+
}
|
|
230
|
+
|
|
187
231
|
/**
|
|
188
232
|
* Concatenate the systematic half (columns 0‑63) of a row of cells into
|
|
189
233
|
* the original 131072 byte blob. The parity half (64‑127) is ignored as
|