@typeberry/lib 0.2.0-e767e74 → 0.2.0-ef1ea0e
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.cjs +362 -386
- package/index.d.ts +1218 -1013
- package/index.js +362 -386
- package/package.json +1 -1
package/index.js
CHANGED
|
@@ -301,7 +301,7 @@ function resultToString(res) {
|
|
|
301
301
|
if (res.isOk) {
|
|
302
302
|
return `OK: ${typeof res.ok === "symbol" ? res.ok.toString() : res.ok}`;
|
|
303
303
|
}
|
|
304
|
-
return `${res.details}\nError: ${maybeTaggedErrorToString(res.error)}`;
|
|
304
|
+
return `${res.details()}\nError: ${maybeTaggedErrorToString(res.error)}`;
|
|
305
305
|
}
|
|
306
306
|
/** An indication of two possible outcomes returned from a function. */
|
|
307
307
|
const Result$1 = {
|
|
@@ -315,7 +315,7 @@ const Result$1 = {
|
|
|
315
315
|
};
|
|
316
316
|
},
|
|
317
317
|
/** Create new [`Result`] with `Error` status. */
|
|
318
|
-
error: (error, details
|
|
318
|
+
error: (error, details) => {
|
|
319
319
|
check `${error !== undefined} 'Error' type cannot be undefined.`;
|
|
320
320
|
return {
|
|
321
321
|
isOk: false,
|
|
@@ -428,7 +428,7 @@ function deepEqual(actual, expected, { context = [], errorsCollector, ignore = [
|
|
|
428
428
|
}
|
|
429
429
|
if (actual.isError && expected.isError) {
|
|
430
430
|
deepEqual(actual.error, expected.error, { context: ctx.concat(["error"]), errorsCollector: errors, ignore });
|
|
431
|
-
deepEqual(actual.details, expected.details, {
|
|
431
|
+
deepEqual(actual.details(), expected.details(), {
|
|
432
432
|
context: ctx.concat(["details"]),
|
|
433
433
|
errorsCollector: errors,
|
|
434
434
|
// display details when error does not match
|
|
@@ -1119,8 +1119,8 @@ class Decoder {
|
|
|
1119
1119
|
/**
|
|
1120
1120
|
* Create a new [`Decoder`] instance given a raw array of bytes as a source.
|
|
1121
1121
|
*/
|
|
1122
|
-
static fromBlob(source) {
|
|
1123
|
-
return new Decoder(source);
|
|
1122
|
+
static fromBlob(source, context) {
|
|
1123
|
+
return new Decoder(source, undefined, context);
|
|
1124
1124
|
}
|
|
1125
1125
|
/**
|
|
1126
1126
|
* Decode a single object from all of the source bytes.
|
|
@@ -1415,7 +1415,7 @@ class Decoder {
|
|
|
1415
1415
|
ensureHasBytes(bytes) {
|
|
1416
1416
|
check `${bytes >= 0} Negative number of bytes given.`;
|
|
1417
1417
|
if (this.offset + bytes > this.source.length) {
|
|
1418
|
-
throw new
|
|
1418
|
+
throw new EndOfDataError(`Attempting to decode more data than there is left. Need ${bytes}, left: ${this.source.length - this.offset}.`);
|
|
1419
1419
|
}
|
|
1420
1420
|
}
|
|
1421
1421
|
}
|
|
@@ -1429,6 +1429,8 @@ function decodeVariableLengthExtraBytes(firstByte) {
|
|
|
1429
1429
|
}
|
|
1430
1430
|
return 0;
|
|
1431
1431
|
}
|
|
1432
|
+
class EndOfDataError extends Error {
|
|
1433
|
+
}
|
|
1432
1434
|
|
|
1433
1435
|
/** Wrapper for `Decoder` that can skip bytes of fields in the data buffer instead of decoding them. */
|
|
1434
1436
|
class Skipper {
|
|
@@ -2246,6 +2248,8 @@ var codec$1;
|
|
|
2246
2248
|
return ret;
|
|
2247
2249
|
};
|
|
2248
2250
|
})();
|
|
2251
|
+
/** Zero-size `void` value. */
|
|
2252
|
+
codec.nothing = Descriptor.new("void", { bytes: 0, isExact: true }, (_e, _v) => { }, (_d) => { }, (_s) => { });
|
|
2249
2253
|
/** Variable-length U32. */
|
|
2250
2254
|
codec.varU32 = Descriptor.new("var_u32", { bytes: 4, isExact: false }, (e, v) => e.varU32(v), (d) => d.varU32(), (d) => d.varU32());
|
|
2251
2255
|
/** Variable-length U64. */
|
|
@@ -2445,6 +2449,9 @@ function forEachDescriptor(descriptors, f) {
|
|
|
2445
2449
|
f(k, descriptors[k]);
|
|
2446
2450
|
}
|
|
2447
2451
|
catch (e) {
|
|
2452
|
+
if (e instanceof EndOfDataError) {
|
|
2453
|
+
throw new EndOfDataError(`${key}: ${e}`);
|
|
2454
|
+
}
|
|
2448
2455
|
throw new Error(`${key}: ${e}`);
|
|
2449
2456
|
}
|
|
2450
2457
|
}
|
|
@@ -2522,6 +2529,7 @@ var index$q = /*#__PURE__*/Object.freeze({
|
|
|
2522
2529
|
Decoder: Decoder,
|
|
2523
2530
|
Descriptor: Descriptor,
|
|
2524
2531
|
Encoder: Encoder,
|
|
2532
|
+
EndOfDataError: EndOfDataError,
|
|
2525
2533
|
ObjectView: ObjectView,
|
|
2526
2534
|
SequenceView: SequenceView,
|
|
2527
2535
|
TYPICAL_DICTIONARY_LENGTH: TYPICAL_DICTIONARY_LENGTH,
|
|
@@ -4782,7 +4790,7 @@ class SortedArray {
|
|
|
4782
4790
|
isEqual: false,
|
|
4783
4791
|
};
|
|
4784
4792
|
}
|
|
4785
|
-
/** Create a new
|
|
4793
|
+
/** Create a new SortedArray from two sorted collections. */
|
|
4786
4794
|
static fromTwoSortedCollections(first, second) {
|
|
4787
4795
|
check `${first.comparator === second.comparator} Cannot merge arrays if they do not use the same comparator`;
|
|
4788
4796
|
const comparator = first.comparator;
|
|
@@ -5139,31 +5147,6 @@ const fullChainSpec = new ChainSpec({
|
|
|
5139
5147
|
maxLookupAnchorAge: tryAsU32(14_400),
|
|
5140
5148
|
});
|
|
5141
5149
|
|
|
5142
|
-
/**
|
|
5143
|
-
* Configuration object for typeberry workers.
|
|
5144
|
-
*/
|
|
5145
|
-
class WorkerConfig {
|
|
5146
|
-
chainSpec;
|
|
5147
|
-
dbPath;
|
|
5148
|
-
omitSealVerification;
|
|
5149
|
-
/**
|
|
5150
|
-
* Since we loose prototypes when transferring the context,
|
|
5151
|
-
* this function is re-initializing proper types.
|
|
5152
|
-
*
|
|
5153
|
-
* TODO [ToDr] instead of doing this hack, we might prefer to pass data
|
|
5154
|
-
* between workers using JAM codec maybe?
|
|
5155
|
-
*/
|
|
5156
|
-
static reInit(config) {
|
|
5157
|
-
const { chainSpec, dbPath, omitSealVerification } = config;
|
|
5158
|
-
return new WorkerConfig(new ChainSpec(chainSpec), dbPath, omitSealVerification);
|
|
5159
|
-
}
|
|
5160
|
-
constructor(chainSpec, dbPath, omitSealVerification = false) {
|
|
5161
|
-
this.chainSpec = chainSpec;
|
|
5162
|
-
this.dbPath = dbPath;
|
|
5163
|
-
this.omitSealVerification = omitSealVerification;
|
|
5164
|
-
}
|
|
5165
|
-
}
|
|
5166
|
-
|
|
5167
5150
|
/** Bootnode class represents a single contact point in the network */
|
|
5168
5151
|
class Bootnode {
|
|
5169
5152
|
id;
|
|
@@ -5197,7 +5180,6 @@ var index$m = /*#__PURE__*/Object.freeze({
|
|
|
5197
5180
|
EST_EPOCH_LENGTH: EST_EPOCH_LENGTH,
|
|
5198
5181
|
EST_VALIDATORS: EST_VALIDATORS,
|
|
5199
5182
|
EST_VALIDATORS_SUPER_MAJORITY: EST_VALIDATORS_SUPER_MAJORITY,
|
|
5200
|
-
WorkerConfig: WorkerConfig,
|
|
5201
5183
|
fullChainSpec: fullChainSpec,
|
|
5202
5184
|
tinyChainSpec: tinyChainSpec
|
|
5203
5185
|
});
|
|
@@ -7308,9 +7290,7 @@ var chain_spec$1 = {
|
|
|
7308
7290
|
id: "typeberry-default",
|
|
7309
7291
|
bootnodes: [
|
|
7310
7292
|
"e3r2oc62zwfj3crnuifuvsxvbtlzetk4o5qyhetkhagsc2fgl2oka@127.0.0.1:40000",
|
|
7311
|
-
"
|
|
7312
|
-
"en5ejs5b2tybkfh4ym5vpfh7nynby73xhtfzmazumtvcijpcsz6ma@127.0.0.1:12346",
|
|
7313
|
-
"ekwmt37xecoq6a7otkm4ux5gfmm4uwbat4bg5m223shckhaaxdpqa@127.0.0.1:12347"
|
|
7293
|
+
"eyonydqt7gj7bjdek62lwdeuxdzr5q7nmxa2p5zwwtoijgamdnkka@127.0.0.1:12345"
|
|
7314
7294
|
],
|
|
7315
7295
|
genesis_header: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ee155ace9c40292074cb6aff8c9ccdd273c81648ff1149ef36bcea6ebb8a3e25bb30a42c1e62f0afda5f0a4e8a562f7a13a24cea00ee81917b86b89e801314aaff71c6c03ff88adb5ed52c9681de1629a54e702fc14729f6b50d2f0a76f185b34418fb8c85bb3985394a8c2756d3643457ce614546202a2f50b093d762499acedee6d555b82024f1ccf8a1e37e60fa60fd40b1958c4bb3006af78647950e1b91ad93247bd01307550ec7acd757ce6fb805fcf73db364063265b30a949e90d9339326edb21e5541717fde24ec085000b28709847b8aab1ac51f84e94b37ca1b66cab2b9ff25c2410fbe9b8a717abb298c716a03983c98ceb4def2087500b8e3410746846d17469fb2f95ef365efcab9f4e22fa1feb53111c995376be8019981ccf30aa5444688b3cab47697b37d5cac5707bb3289e986b19b17db437206931a8d151e5c8fe2b9d8a606966a79edd2f9e5db47e83947ce368ccba53bf6ba20a40b8b8c5d436f92ecf605421e873a99ec528761eb52a88a2f9a057b3b3003e6f32a2105650944fcd101621fd5bb3124c9fd191d114b7ad936c1d79d734f9f21392eab0084d01534b31c1dd87c81645fd762482a90027754041ca1b56133d0466c0600ffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
|
7316
7296
|
genesis_state: {
|
|
@@ -7356,9 +7336,7 @@ var authorship = {
|
|
|
7356
7336
|
var chain_spec = {
|
|
7357
7337
|
id: "typeberry-dev",
|
|
7358
7338
|
bootnodes: [
|
|
7359
|
-
"
|
|
7360
|
-
"en5ejs5b2tybkfh4ym5vpfh7nynby73xhtfzmazumtvcijpcsz6ma@127.0.0.1:12346",
|
|
7361
|
-
"ekwmt37xecoq6a7otkm4ux5gfmm4uwbat4bg5m223shckhaaxdpqa@127.0.0.1:12347"
|
|
7339
|
+
"eyonydqt7gj7bjdek62lwdeuxdzr5q7nmxa2p5zwwtoijgamdnkka@127.0.0.1:12345"
|
|
7362
7340
|
],
|
|
7363
7341
|
genesis_header: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ee155ace9c40292074cb6aff8c9ccdd273c81648ff1149ef36bcea6ebb8a3e25bb30a42c1e62f0afda5f0a4e8a562f7a13a24cea00ee81917b86b89e801314aaff71c6c03ff88adb5ed52c9681de1629a54e702fc14729f6b50d2f0a76f185b34418fb8c85bb3985394a8c2756d3643457ce614546202a2f50b093d762499acedee6d555b82024f1ccf8a1e37e60fa60fd40b1958c4bb3006af78647950e1b91ad93247bd01307550ec7acd757ce6fb805fcf73db364063265b30a949e90d9339326edb21e5541717fde24ec085000b28709847b8aab1ac51f84e94b37ca1b66cab2b9ff25c2410fbe9b8a717abb298c716a03983c98ceb4def2087500b8e3410746846d17469fb2f95ef365efcab9f4e22fa1feb53111c995376be8019981ccf30aa5444688b3cab47697b37d5cac5707bb3289e986b19b17db437206931a8d151e5c8fe2b9d8a606966a79edd2f9e5db47e83947ce368ccba53bf6ba20a40b8b8c5d436f92ecf605421e873a99ec528761eb52a88a2f9a057b3b3003e6f32a2105650944fcd101621fd5bb3124c9fd191d114b7ad936c1d79d734f9f21392eab0084d01534b31c1dd87c81645fd762482a90027754041ca1b56133d0466c0600ffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
|
7364
7342
|
genesis_state: {
|
|
@@ -7781,16 +7759,18 @@ class NodeConfiguration {
|
|
|
7781
7759
|
version: "number",
|
|
7782
7760
|
flavor: knownChainSpecFromJson,
|
|
7783
7761
|
chain_spec: JipChainSpec.fromJson,
|
|
7784
|
-
database_base_path: "string",
|
|
7762
|
+
database_base_path: json.optional("string"),
|
|
7785
7763
|
authorship: AuthorshipOptions.fromJson,
|
|
7786
7764
|
}, NodeConfiguration.new);
|
|
7787
7765
|
static new({ $schema, version, flavor, chain_spec, database_base_path, authorship }) {
|
|
7788
7766
|
if (version !== 1) {
|
|
7789
7767
|
throw new Error("Only version=1 config is supported.");
|
|
7790
7768
|
}
|
|
7791
|
-
return new NodeConfiguration($schema, version, flavor, chain_spec, database_base_path, authorship);
|
|
7769
|
+
return new NodeConfiguration($schema, version, flavor, chain_spec, database_base_path ?? undefined, authorship);
|
|
7792
7770
|
}
|
|
7793
|
-
constructor($schema, version, flavor, chainSpec,
|
|
7771
|
+
constructor($schema, version, flavor, chainSpec,
|
|
7772
|
+
/** If database path is not provided, we load an in-memory db. */
|
|
7773
|
+
databaseBasePath, authorship) {
|
|
7794
7774
|
this.$schema = $schema;
|
|
7795
7775
|
this.version = version;
|
|
7796
7776
|
this.flavor = flavor;
|
|
@@ -7876,6 +7856,7 @@ class InMemoryBlocks {
|
|
|
7876
7856
|
getExtrinsic(hash) {
|
|
7877
7857
|
return this.extrinsicsByHeaderHash.get(hash) ?? null;
|
|
7878
7858
|
}
|
|
7859
|
+
async close() { }
|
|
7879
7860
|
}
|
|
7880
7861
|
|
|
7881
7862
|
/**
|
|
@@ -8069,6 +8050,8 @@ function accumulationOutputComparator(a, b) {
|
|
|
8069
8050
|
const O = 8;
|
|
8070
8051
|
/** `Q`: The number of items in the authorizations queue. */
|
|
8071
8052
|
const Q = 80;
|
|
8053
|
+
/** `W_B`: The maximum size of the concatenated variable-size blobs, extrinsics and imported segments of a work-package, in octets */
|
|
8054
|
+
Compatibility.isGreaterOrEqual(GpVersion.V0_7_2) ? 13_791_360 : 13_794_305;
|
|
8072
8055
|
/** `W_T`: The size of a transfer memo in octets. */
|
|
8073
8056
|
const W_T = 128;
|
|
8074
8057
|
/**
|
|
@@ -9059,31 +9042,29 @@ var UpdatePreimageKind;
|
|
|
9059
9042
|
* 3. Update `LookupHistory` with given value.
|
|
9060
9043
|
*/
|
|
9061
9044
|
class UpdatePreimage {
|
|
9062
|
-
serviceId;
|
|
9063
9045
|
action;
|
|
9064
|
-
constructor(
|
|
9065
|
-
this.serviceId = serviceId;
|
|
9046
|
+
constructor(action) {
|
|
9066
9047
|
this.action = action;
|
|
9067
9048
|
}
|
|
9068
9049
|
/** A preimage is provided. We should update the lookuphistory and add the preimage to db. */
|
|
9069
|
-
static provide({
|
|
9070
|
-
return new UpdatePreimage(
|
|
9050
|
+
static provide({ preimage, slot }) {
|
|
9051
|
+
return new UpdatePreimage({
|
|
9071
9052
|
kind: UpdatePreimageKind.Provide,
|
|
9072
9053
|
preimage,
|
|
9073
9054
|
slot,
|
|
9074
9055
|
});
|
|
9075
9056
|
}
|
|
9076
9057
|
/** The preimage should be removed completely from the database. */
|
|
9077
|
-
static remove({
|
|
9078
|
-
return new UpdatePreimage(
|
|
9058
|
+
static remove({ hash, length }) {
|
|
9059
|
+
return new UpdatePreimage({
|
|
9079
9060
|
kind: UpdatePreimageKind.Remove,
|
|
9080
9061
|
hash,
|
|
9081
9062
|
length,
|
|
9082
9063
|
});
|
|
9083
9064
|
}
|
|
9084
9065
|
/** Update the lookup history of some preimage or add a new one (request). */
|
|
9085
|
-
static updateOrAdd({
|
|
9086
|
-
return new UpdatePreimage(
|
|
9066
|
+
static updateOrAdd({ lookupHistory }) {
|
|
9067
|
+
return new UpdatePreimage({
|
|
9087
9068
|
kind: UpdatePreimageKind.UpdateOrAdd,
|
|
9088
9069
|
item: lookupHistory,
|
|
9089
9070
|
});
|
|
@@ -9120,23 +9101,21 @@ var UpdateServiceKind;
|
|
|
9120
9101
|
UpdateServiceKind[UpdateServiceKind["Create"] = 1] = "Create";
|
|
9121
9102
|
})(UpdateServiceKind || (UpdateServiceKind = {}));
|
|
9122
9103
|
/**
|
|
9123
|
-
* Update service info
|
|
9104
|
+
* Update service info or create a new one.
|
|
9124
9105
|
*/
|
|
9125
9106
|
class UpdateService {
|
|
9126
|
-
serviceId;
|
|
9127
9107
|
action;
|
|
9128
|
-
constructor(
|
|
9129
|
-
this.serviceId = serviceId;
|
|
9108
|
+
constructor(action) {
|
|
9130
9109
|
this.action = action;
|
|
9131
9110
|
}
|
|
9132
|
-
static update({
|
|
9133
|
-
return new UpdateService(
|
|
9111
|
+
static update({ serviceInfo }) {
|
|
9112
|
+
return new UpdateService({
|
|
9134
9113
|
kind: UpdateServiceKind.Update,
|
|
9135
9114
|
account: serviceInfo,
|
|
9136
9115
|
});
|
|
9137
9116
|
}
|
|
9138
|
-
static create({
|
|
9139
|
-
return new UpdateService(
|
|
9117
|
+
static create({ serviceInfo, lookupHistory, }) {
|
|
9118
|
+
return new UpdateService({
|
|
9140
9119
|
kind: UpdateServiceKind.Create,
|
|
9141
9120
|
account: serviceInfo,
|
|
9142
9121
|
lookupHistory,
|
|
@@ -9157,17 +9136,15 @@ var UpdateStorageKind;
|
|
|
9157
9136
|
* Can either create/modify an entry or remove it.
|
|
9158
9137
|
*/
|
|
9159
9138
|
class UpdateStorage {
|
|
9160
|
-
serviceId;
|
|
9161
9139
|
action;
|
|
9162
|
-
constructor(
|
|
9163
|
-
this.serviceId = serviceId;
|
|
9140
|
+
constructor(action) {
|
|
9164
9141
|
this.action = action;
|
|
9165
9142
|
}
|
|
9166
|
-
static set({
|
|
9167
|
-
return new UpdateStorage(
|
|
9143
|
+
static set({ storage }) {
|
|
9144
|
+
return new UpdateStorage({ kind: UpdateStorageKind.Set, storage });
|
|
9168
9145
|
}
|
|
9169
|
-
static remove({
|
|
9170
|
-
return new UpdateStorage(
|
|
9146
|
+
static remove({ key }) {
|
|
9147
|
+
return new UpdateStorage({ kind: UpdateStorageKind.Remove, key });
|
|
9171
9148
|
}
|
|
9172
9149
|
get key() {
|
|
9173
9150
|
if (this.action.kind === UpdateStorageKind.Remove) {
|
|
@@ -9352,12 +9329,12 @@ class InMemoryState extends WithDebug {
|
|
|
9352
9329
|
* Modify the state and apply a single state update.
|
|
9353
9330
|
*/
|
|
9354
9331
|
applyUpdate(update) {
|
|
9355
|
-
const {
|
|
9332
|
+
const { removed, created: _, updated, preimages, storage, ...rest } = update;
|
|
9356
9333
|
// just assign all other variables
|
|
9357
9334
|
Object.assign(this, rest);
|
|
9358
9335
|
// and update the services state
|
|
9359
9336
|
let result;
|
|
9360
|
-
result = this.updateServices(
|
|
9337
|
+
result = this.updateServices(updated);
|
|
9361
9338
|
if (result.isError) {
|
|
9362
9339
|
return result;
|
|
9363
9340
|
}
|
|
@@ -9369,7 +9346,7 @@ class InMemoryState extends WithDebug {
|
|
|
9369
9346
|
if (result.isError) {
|
|
9370
9347
|
return result;
|
|
9371
9348
|
}
|
|
9372
|
-
this.removeServices(
|
|
9349
|
+
this.removeServices(removed);
|
|
9373
9350
|
return Result$1.ok(OK);
|
|
9374
9351
|
}
|
|
9375
9352
|
removeServices(servicesRemoved) {
|
|
@@ -9378,89 +9355,102 @@ class InMemoryState extends WithDebug {
|
|
|
9378
9355
|
this.services.delete(serviceId);
|
|
9379
9356
|
}
|
|
9380
9357
|
}
|
|
9381
|
-
updateStorage(
|
|
9382
|
-
|
|
9383
|
-
|
|
9384
|
-
|
|
9385
|
-
|
|
9386
|
-
|
|
9387
|
-
|
|
9388
|
-
|
|
9389
|
-
|
|
9390
|
-
|
|
9391
|
-
|
|
9392
|
-
|
|
9393
|
-
|
|
9394
|
-
|
|
9358
|
+
updateStorage(storageUpdates) {
|
|
9359
|
+
if (storageUpdates === undefined) {
|
|
9360
|
+
return Result$1.ok(OK);
|
|
9361
|
+
}
|
|
9362
|
+
for (const [serviceId, updates] of storageUpdates.entries()) {
|
|
9363
|
+
for (const update of updates) {
|
|
9364
|
+
const { kind } = update.action;
|
|
9365
|
+
const service = this.services.get(serviceId);
|
|
9366
|
+
if (service === undefined) {
|
|
9367
|
+
return Result$1.error(UpdateError.NoService, () => `Attempting to update storage of non-existing service: ${serviceId}`);
|
|
9368
|
+
}
|
|
9369
|
+
if (kind === UpdateStorageKind.Set) {
|
|
9370
|
+
const { key, value } = update.action.storage;
|
|
9371
|
+
service.data.storage.set(key.toString(), StorageItem.create({ key, value }));
|
|
9372
|
+
}
|
|
9373
|
+
else if (kind === UpdateStorageKind.Remove) {
|
|
9374
|
+
const { key } = update.action;
|
|
9375
|
+
check `
|
|
9395
9376
|
${service.data.storage.has(key.toString())}
|
|
9396
|
-
Attempting to remove non-existing storage item at ${serviceId}: ${action.key}
|
|
9377
|
+
Attempting to remove non-existing storage item at ${serviceId}: ${update.action.key}
|
|
9397
9378
|
`;
|
|
9398
|
-
|
|
9399
|
-
|
|
9400
|
-
|
|
9401
|
-
|
|
9379
|
+
service.data.storage.delete(key.toString());
|
|
9380
|
+
}
|
|
9381
|
+
else {
|
|
9382
|
+
assertNever(kind);
|
|
9383
|
+
}
|
|
9402
9384
|
}
|
|
9403
9385
|
}
|
|
9404
9386
|
return Result$1.ok(OK);
|
|
9405
9387
|
}
|
|
9406
|
-
updatePreimages(
|
|
9407
|
-
|
|
9388
|
+
updatePreimages(preimagesUpdates) {
|
|
9389
|
+
if (preimagesUpdates === undefined) {
|
|
9390
|
+
return Result$1.ok(OK);
|
|
9391
|
+
}
|
|
9392
|
+
for (const [serviceId, updates] of preimagesUpdates.entries()) {
|
|
9408
9393
|
const service = this.services.get(serviceId);
|
|
9409
9394
|
if (service === undefined) {
|
|
9410
|
-
return Result$1.error(UpdateError.NoService, `Attempting to update preimage of non-existing service: ${serviceId}`);
|
|
9395
|
+
return Result$1.error(UpdateError.NoService, () => `Attempting to update preimage of non-existing service: ${serviceId}`);
|
|
9411
9396
|
}
|
|
9412
|
-
const
|
|
9413
|
-
|
|
9414
|
-
|
|
9415
|
-
|
|
9416
|
-
|
|
9417
|
-
|
|
9418
|
-
|
|
9419
|
-
|
|
9420
|
-
|
|
9421
|
-
|
|
9422
|
-
|
|
9423
|
-
|
|
9424
|
-
|
|
9425
|
-
|
|
9397
|
+
for (const update of updates) {
|
|
9398
|
+
const { kind } = update.action;
|
|
9399
|
+
if (kind === UpdatePreimageKind.Provide) {
|
|
9400
|
+
const { preimage, slot } = update.action;
|
|
9401
|
+
if (service.data.preimages.has(preimage.hash)) {
|
|
9402
|
+
return Result$1.error(UpdateError.PreimageExists, () => `Overwriting existing preimage at ${serviceId}: ${preimage}`);
|
|
9403
|
+
}
|
|
9404
|
+
service.data.preimages.set(preimage.hash, preimage);
|
|
9405
|
+
if (slot !== null) {
|
|
9406
|
+
const lookupHistory = service.data.lookupHistory.get(preimage.hash);
|
|
9407
|
+
const length = tryAsU32(preimage.blob.length);
|
|
9408
|
+
const lookup = new LookupHistoryItem(preimage.hash, length, tryAsLookupHistorySlots([slot]));
|
|
9409
|
+
if (lookupHistory === undefined) {
|
|
9410
|
+
// no lookup history for that preimage at all (edge case, should be requested)
|
|
9411
|
+
service.data.lookupHistory.set(preimage.hash, [lookup]);
|
|
9412
|
+
}
|
|
9413
|
+
else {
|
|
9414
|
+
// insert or replace exiting entry
|
|
9415
|
+
const index = lookupHistory.map((x) => x.length).indexOf(length);
|
|
9416
|
+
lookupHistory.splice(index, index === -1 ? 0 : 1, lookup);
|
|
9417
|
+
}
|
|
9426
9418
|
}
|
|
9427
|
-
|
|
9428
|
-
|
|
9429
|
-
|
|
9430
|
-
|
|
9419
|
+
}
|
|
9420
|
+
else if (kind === UpdatePreimageKind.Remove) {
|
|
9421
|
+
const { hash, length } = update.action;
|
|
9422
|
+
service.data.preimages.delete(hash);
|
|
9423
|
+
const history = service.data.lookupHistory.get(hash) ?? [];
|
|
9424
|
+
const idx = history.map((x) => x.length).indexOf(length);
|
|
9425
|
+
if (idx !== -1) {
|
|
9426
|
+
history.splice(idx, 1);
|
|
9431
9427
|
}
|
|
9432
9428
|
}
|
|
9433
|
-
|
|
9434
|
-
|
|
9435
|
-
|
|
9436
|
-
|
|
9437
|
-
|
|
9438
|
-
|
|
9439
|
-
|
|
9440
|
-
|
|
9429
|
+
else if (kind === UpdatePreimageKind.UpdateOrAdd) {
|
|
9430
|
+
const { item } = update.action;
|
|
9431
|
+
const history = service.data.lookupHistory.get(item.hash) ?? [];
|
|
9432
|
+
const existingIdx = history.map((x) => x.length).indexOf(item.length);
|
|
9433
|
+
const removeCount = existingIdx === -1 ? 0 : 1;
|
|
9434
|
+
history.splice(existingIdx, removeCount, item);
|
|
9435
|
+
service.data.lookupHistory.set(item.hash, history);
|
|
9436
|
+
}
|
|
9437
|
+
else {
|
|
9438
|
+
assertNever(kind);
|
|
9441
9439
|
}
|
|
9442
|
-
}
|
|
9443
|
-
else if (kind === UpdatePreimageKind.UpdateOrAdd) {
|
|
9444
|
-
const { item } = action;
|
|
9445
|
-
const history = service.data.lookupHistory.get(item.hash) ?? [];
|
|
9446
|
-
const existingIdx = history.map((x) => x.length).indexOf(item.length);
|
|
9447
|
-
const removeCount = existingIdx === -1 ? 0 : 1;
|
|
9448
|
-
history.splice(existingIdx, removeCount, item);
|
|
9449
|
-
service.data.lookupHistory.set(item.hash, history);
|
|
9450
|
-
}
|
|
9451
|
-
else {
|
|
9452
|
-
assertNever(kind);
|
|
9453
9440
|
}
|
|
9454
9441
|
}
|
|
9455
9442
|
return Result$1.ok(OK);
|
|
9456
9443
|
}
|
|
9457
9444
|
updateServices(servicesUpdates) {
|
|
9458
|
-
|
|
9459
|
-
|
|
9445
|
+
if (servicesUpdates === undefined) {
|
|
9446
|
+
return Result$1.ok(OK);
|
|
9447
|
+
}
|
|
9448
|
+
for (const [serviceId, update] of servicesUpdates.entries()) {
|
|
9449
|
+
const { kind, account } = update.action;
|
|
9460
9450
|
if (kind === UpdateServiceKind.Create) {
|
|
9461
|
-
const { lookupHistory } = action;
|
|
9451
|
+
const { lookupHistory } = update.action;
|
|
9462
9452
|
if (this.services.has(serviceId)) {
|
|
9463
|
-
return Result$1.error(UpdateError.DuplicateService, `${serviceId} already exists!`);
|
|
9453
|
+
return Result$1.error(UpdateError.DuplicateService, () => `${serviceId} already exists!`);
|
|
9464
9454
|
}
|
|
9465
9455
|
this.services.set(serviceId, new InMemoryService(serviceId, {
|
|
9466
9456
|
info: account,
|
|
@@ -9472,7 +9462,7 @@ class InMemoryState extends WithDebug {
|
|
|
9472
9462
|
else if (kind === UpdateServiceKind.Update) {
|
|
9473
9463
|
const existingService = this.services.get(serviceId);
|
|
9474
9464
|
if (existingService === undefined) {
|
|
9475
|
-
return Result$1.error(UpdateError.NoService, `Cannot update ${serviceId} because it does not exist.`);
|
|
9465
|
+
return Result$1.error(UpdateError.NoService, () => `Cannot update ${serviceId} because it does not exist.`);
|
|
9476
9466
|
}
|
|
9477
9467
|
existingService.data.info = account;
|
|
9478
9468
|
}
|
|
@@ -10726,76 +10716,88 @@ function* serializeStateUpdate(spec, blake2b, update) {
|
|
|
10726
10716
|
yield* serializeBasicKeys(spec, update);
|
|
10727
10717
|
const encode = (codec, val) => Encoder.encodeObject(codec, val, spec);
|
|
10728
10718
|
// then let's proceed with service updates
|
|
10729
|
-
yield* serializeServiceUpdates(update.
|
|
10719
|
+
yield* serializeServiceUpdates(update.updated, encode, blake2b);
|
|
10730
10720
|
yield* serializePreimages(update.preimages, encode, blake2b);
|
|
10731
10721
|
yield* serializeStorage(update.storage, blake2b);
|
|
10732
|
-
yield* serializeRemovedServices(update.
|
|
10722
|
+
yield* serializeRemovedServices(update.removed);
|
|
10733
10723
|
}
|
|
10734
10724
|
function* serializeRemovedServices(servicesRemoved) {
|
|
10735
|
-
|
|
10725
|
+
if (servicesRemoved === undefined) {
|
|
10726
|
+
return;
|
|
10727
|
+
}
|
|
10728
|
+
for (const serviceId of servicesRemoved) {
|
|
10736
10729
|
// TODO [ToDr] what about all data associated with a service?
|
|
10737
10730
|
const codec = serialize.serviceData(serviceId);
|
|
10738
10731
|
yield [StateEntryUpdateAction.Remove, codec.key, EMPTY_BLOB];
|
|
10739
10732
|
}
|
|
10740
10733
|
}
|
|
10741
|
-
function* serializeStorage(
|
|
10742
|
-
|
|
10743
|
-
|
|
10744
|
-
|
|
10745
|
-
|
|
10746
|
-
|
|
10747
|
-
|
|
10748
|
-
|
|
10749
|
-
|
|
10750
|
-
|
|
10751
|
-
|
|
10752
|
-
|
|
10753
|
-
|
|
10754
|
-
|
|
10734
|
+
function* serializeStorage(storageUpdates, blake2b) {
|
|
10735
|
+
if (storageUpdates === undefined) {
|
|
10736
|
+
return;
|
|
10737
|
+
}
|
|
10738
|
+
for (const [serviceId, updates] of storageUpdates.entries()) {
|
|
10739
|
+
for (const { action } of updates) {
|
|
10740
|
+
switch (action.kind) {
|
|
10741
|
+
case UpdateStorageKind.Set: {
|
|
10742
|
+
const key = action.storage.key;
|
|
10743
|
+
const codec = serialize.serviceStorage(blake2b, serviceId, key);
|
|
10744
|
+
yield [StateEntryUpdateAction.Insert, codec.key, action.storage.value];
|
|
10745
|
+
break;
|
|
10746
|
+
}
|
|
10747
|
+
case UpdateStorageKind.Remove: {
|
|
10748
|
+
const key = action.key;
|
|
10749
|
+
const codec = serialize.serviceStorage(blake2b, serviceId, key);
|
|
10750
|
+
yield [StateEntryUpdateAction.Remove, codec.key, EMPTY_BLOB];
|
|
10751
|
+
break;
|
|
10752
|
+
}
|
|
10755
10753
|
}
|
|
10756
|
-
default:
|
|
10757
|
-
assertNever(action);
|
|
10758
10754
|
}
|
|
10759
10755
|
}
|
|
10760
10756
|
}
|
|
10761
|
-
function* serializePreimages(
|
|
10762
|
-
|
|
10763
|
-
|
|
10764
|
-
|
|
10765
|
-
|
|
10766
|
-
|
|
10767
|
-
|
|
10768
|
-
|
|
10769
|
-
const
|
|
10770
|
-
|
|
10771
|
-
|
|
10772
|
-
|
|
10773
|
-
|
|
10774
|
-
|
|
10757
|
+
function* serializePreimages(preimagesUpdates, encode, blake2b) {
|
|
10758
|
+
if (preimagesUpdates === undefined) {
|
|
10759
|
+
return;
|
|
10760
|
+
}
|
|
10761
|
+
for (const [serviceId, updates] of preimagesUpdates.entries()) {
|
|
10762
|
+
for (const { action } of updates) {
|
|
10763
|
+
switch (action.kind) {
|
|
10764
|
+
case UpdatePreimageKind.Provide: {
|
|
10765
|
+
const { hash, blob } = action.preimage;
|
|
10766
|
+
const codec = serialize.servicePreimages(blake2b, serviceId, hash);
|
|
10767
|
+
yield [StateEntryUpdateAction.Insert, codec.key, blob];
|
|
10768
|
+
if (action.slot !== null) {
|
|
10769
|
+
const codec2 = serialize.serviceLookupHistory(blake2b, serviceId, hash, tryAsU32(blob.length));
|
|
10770
|
+
yield [
|
|
10771
|
+
StateEntryUpdateAction.Insert,
|
|
10772
|
+
codec2.key,
|
|
10773
|
+
encode(codec2.Codec, tryAsLookupHistorySlots([action.slot])),
|
|
10774
|
+
];
|
|
10775
|
+
}
|
|
10776
|
+
break;
|
|
10777
|
+
}
|
|
10778
|
+
case UpdatePreimageKind.UpdateOrAdd: {
|
|
10779
|
+
const { hash, length, slots } = action.item;
|
|
10780
|
+
const codec = serialize.serviceLookupHistory(blake2b, serviceId, hash, length);
|
|
10781
|
+
yield [StateEntryUpdateAction.Insert, codec.key, encode(codec.Codec, slots)];
|
|
10782
|
+
break;
|
|
10783
|
+
}
|
|
10784
|
+
case UpdatePreimageKind.Remove: {
|
|
10785
|
+
const { hash, length } = action;
|
|
10786
|
+
const codec = serialize.servicePreimages(blake2b, serviceId, hash);
|
|
10787
|
+
yield [StateEntryUpdateAction.Remove, codec.key, EMPTY_BLOB];
|
|
10788
|
+
const codec2 = serialize.serviceLookupHistory(blake2b, serviceId, hash, length);
|
|
10789
|
+
yield [StateEntryUpdateAction.Remove, codec2.key, EMPTY_BLOB];
|
|
10790
|
+
break;
|
|
10775
10791
|
}
|
|
10776
|
-
break;
|
|
10777
|
-
}
|
|
10778
|
-
case UpdatePreimageKind.UpdateOrAdd: {
|
|
10779
|
-
const { hash, length, slots } = action.item;
|
|
10780
|
-
const codec = serialize.serviceLookupHistory(blake2b, serviceId, hash, length);
|
|
10781
|
-
yield [StateEntryUpdateAction.Insert, codec.key, encode(codec.Codec, slots)];
|
|
10782
|
-
break;
|
|
10783
|
-
}
|
|
10784
|
-
case UpdatePreimageKind.Remove: {
|
|
10785
|
-
const { hash, length } = action;
|
|
10786
|
-
const codec = serialize.servicePreimages(blake2b, serviceId, hash);
|
|
10787
|
-
yield [StateEntryUpdateAction.Remove, codec.key, EMPTY_BLOB];
|
|
10788
|
-
const codec2 = serialize.serviceLookupHistory(blake2b, serviceId, hash, length);
|
|
10789
|
-
yield [StateEntryUpdateAction.Remove, codec2.key, EMPTY_BLOB];
|
|
10790
|
-
break;
|
|
10791
10792
|
}
|
|
10792
|
-
default:
|
|
10793
|
-
assertNever(action);
|
|
10794
10793
|
}
|
|
10795
10794
|
}
|
|
10796
10795
|
}
|
|
10797
10796
|
function* serializeServiceUpdates(servicesUpdates, encode, blake2b) {
|
|
10798
|
-
|
|
10797
|
+
if (servicesUpdates === undefined) {
|
|
10798
|
+
return;
|
|
10799
|
+
}
|
|
10800
|
+
for (const [serviceId, { action }] of servicesUpdates.entries()) {
|
|
10799
10801
|
// new service being created or updated
|
|
10800
10802
|
const codec = serialize.serviceData(serviceId);
|
|
10801
10803
|
yield [StateEntryUpdateAction.Insert, codec.key, encode(codec.Codec, action.account)];
|
|
@@ -11064,31 +11066,35 @@ var LeafDbError;
|
|
|
11064
11066
|
* Note that reading the actual values may require accessing the original database.
|
|
11065
11067
|
*/
|
|
11066
11068
|
class LeafDb {
|
|
11067
|
-
|
|
11069
|
+
leafs;
|
|
11068
11070
|
db;
|
|
11069
11071
|
/**
|
|
11070
11072
|
* Parse given blob containing concatenated leaf nodes into leaf db.
|
|
11071
11073
|
*/
|
|
11072
11074
|
static fromLeavesBlob(blob, db) {
|
|
11073
11075
|
if (blob.length % TRIE_NODE_BYTES !== 0) {
|
|
11074
|
-
return Result$1.error(LeafDbError.InvalidLeafData, `${blob.length} is not a multiply of ${TRIE_NODE_BYTES}: ${blob}`);
|
|
11076
|
+
return Result$1.error(LeafDbError.InvalidLeafData, () => `${blob.length} is not a multiply of ${TRIE_NODE_BYTES}: ${blob}`);
|
|
11075
11077
|
}
|
|
11076
11078
|
const leaves = SortedSet.fromArray(leafComparator, []);
|
|
11077
11079
|
for (const nodeData of blob.chunks(TRIE_NODE_BYTES)) {
|
|
11078
11080
|
const node = new TrieNode(nodeData.raw);
|
|
11079
11081
|
if (node.getNodeType() === NodeType.Branch) {
|
|
11080
|
-
return Result$1.error(LeafDbError.InvalidLeafData, `Branch node detected: ${nodeData}`);
|
|
11082
|
+
return Result$1.error(LeafDbError.InvalidLeafData, () => `Branch node detected: ${nodeData}`);
|
|
11081
11083
|
}
|
|
11082
11084
|
leaves.insert(node.asLeafNode());
|
|
11083
11085
|
}
|
|
11084
11086
|
return Result$1.ok(new LeafDb(leaves, db));
|
|
11085
11087
|
}
|
|
11088
|
+
/** Create leaf db from sorted set of leaves. */
|
|
11089
|
+
static fromLeaves(leaves, db) {
|
|
11090
|
+
return new LeafDb(leaves, db);
|
|
11091
|
+
}
|
|
11086
11092
|
/** A mapping between an embedded value or db lookup key. */
|
|
11087
11093
|
lookup;
|
|
11088
|
-
constructor(
|
|
11089
|
-
this.
|
|
11094
|
+
constructor(leafs, db) {
|
|
11095
|
+
this.leafs = leafs;
|
|
11090
11096
|
this.db = db;
|
|
11091
|
-
this.lookup = TruncatedHashDictionary.fromEntries(
|
|
11097
|
+
this.lookup = TruncatedHashDictionary.fromEntries(leafs.array.map((leaf) => {
|
|
11092
11098
|
const key = leaf.getKey().asOpaque();
|
|
11093
11099
|
const value = leaf.hasEmbeddedValue()
|
|
11094
11100
|
? {
|
|
@@ -11097,7 +11103,7 @@ class LeafDb {
|
|
|
11097
11103
|
}
|
|
11098
11104
|
: {
|
|
11099
11105
|
kind: LookupKind.DbKey,
|
|
11100
|
-
key: leaf.getValueHash()
|
|
11106
|
+
key: leaf.getValueHash(),
|
|
11101
11107
|
};
|
|
11102
11108
|
return [key, value];
|
|
11103
11109
|
}));
|
|
@@ -11117,7 +11123,7 @@ class LeafDb {
|
|
|
11117
11123
|
}
|
|
11118
11124
|
getStateRoot(blake2b) {
|
|
11119
11125
|
const blake2bTrieHasher = getBlake2bTrieHasher(blake2b);
|
|
11120
|
-
return InMemoryTrie.computeStateRoot(blake2bTrieHasher, this.
|
|
11126
|
+
return InMemoryTrie.computeStateRoot(blake2bTrieHasher, this.leafs).asOpaque();
|
|
11121
11127
|
}
|
|
11122
11128
|
intoStateEntries() {
|
|
11123
11129
|
const entries = [];
|
|
@@ -11142,144 +11148,89 @@ var LookupKind;
|
|
|
11142
11148
|
LookupKind[LookupKind["DbKey"] = 1] = "DbKey";
|
|
11143
11149
|
})(LookupKind || (LookupKind = {}));
|
|
11144
11150
|
|
|
11145
|
-
|
|
11146
|
-
const
|
|
11147
|
-
|
|
11148
|
-
const
|
|
11149
|
-
|
|
11150
|
-
|
|
11151
|
+
function updateLeafs(leafs, blake2b, data) {
|
|
11152
|
+
const blake2bTrieHasher = getBlake2bTrieHasher(blake2b);
|
|
11153
|
+
// We will collect all values that don't fit directly into leaf nodes.
|
|
11154
|
+
const values = [];
|
|
11155
|
+
for (const [action, key, value] of data) {
|
|
11156
|
+
if (action === StateEntryUpdateAction.Insert) {
|
|
11157
|
+
const leafNode = InMemoryTrie.constructLeaf(blake2bTrieHasher, key.asOpaque(), value);
|
|
11158
|
+
leafs.replace(leafNode);
|
|
11159
|
+
if (!leafNode.hasEmbeddedValue()) {
|
|
11160
|
+
values.push([leafNode.getValueHash(), value]);
|
|
11161
|
+
}
|
|
11162
|
+
}
|
|
11163
|
+
else if (action === StateEntryUpdateAction.Remove) {
|
|
11164
|
+
const leafNode = InMemoryTrie.constructLeaf(blake2bTrieHasher, key.asOpaque(), BytesBlob.empty());
|
|
11165
|
+
leafs.removeOne(leafNode);
|
|
11166
|
+
// TODO [ToDr] Handle ref-counting values or updating some header-hash-based references.
|
|
11167
|
+
}
|
|
11168
|
+
else {
|
|
11169
|
+
assertNever(action);
|
|
11170
|
+
}
|
|
11151
11171
|
}
|
|
11152
|
-
|
|
11153
|
-
|
|
11172
|
+
return {
|
|
11173
|
+
values,
|
|
11174
|
+
leafs,
|
|
11175
|
+
};
|
|
11176
|
+
}
|
|
11177
|
+
|
|
11178
|
+
/** In-memory serialized-states db. */
|
|
11179
|
+
class InMemorySerializedStates {
|
|
11180
|
+
spec;
|
|
11181
|
+
blake2b;
|
|
11182
|
+
db = HashDictionary.new();
|
|
11183
|
+
valuesDb = HashDictionary.new();
|
|
11184
|
+
constructor(spec, blake2b) {
|
|
11185
|
+
this.spec = spec;
|
|
11186
|
+
this.blake2b = blake2b;
|
|
11154
11187
|
}
|
|
11155
|
-
|
|
11156
|
-
|
|
11157
|
-
|
|
11158
|
-
|
|
11159
|
-
|
|
11160
|
-
|
|
11161
|
-
const data = Array.from(v.values());
|
|
11162
|
-
data.sort((a, b) => compare(a, b).value);
|
|
11163
|
-
e.varU32(tryAsU32(data.length));
|
|
11164
|
-
for (const v of data) {
|
|
11165
|
-
value.encode(e, v);
|
|
11188
|
+
async insertInitialState(headerHash, entries) {
|
|
11189
|
+
// convert state entries into leafdb
|
|
11190
|
+
const { values, leafs } = updateLeafs(SortedSet.fromArray(leafComparator, []), this.blake2b, Array.from(entries, (x) => [StateEntryUpdateAction.Insert, x[0], x[1]]));
|
|
11191
|
+
// insert values to the db.
|
|
11192
|
+
for (const val of values) {
|
|
11193
|
+
this.valuesDb.set(val[0], val[1]);
|
|
11166
11194
|
}
|
|
11167
|
-
|
|
11168
|
-
|
|
11169
|
-
|
|
11170
|
-
|
|
11171
|
-
|
|
11172
|
-
|
|
11173
|
-
|
|
11174
|
-
|
|
11175
|
-
|
|
11176
|
-
|
|
11177
|
-
|
|
11178
|
-
|
|
11179
|
-
|
|
11180
|
-
|
|
11181
|
-
|
|
11195
|
+
this.db.set(headerHash, leafs);
|
|
11196
|
+
return Result$1.ok(OK);
|
|
11197
|
+
}
|
|
11198
|
+
async getStateRoot(state) {
|
|
11199
|
+
return state.backend.getStateRoot(this.blake2b);
|
|
11200
|
+
}
|
|
11201
|
+
async updateAndSetState(header, state, update) {
|
|
11202
|
+
const blake2b = this.blake2b;
|
|
11203
|
+
const updatedValues = serializeStateUpdate(this.spec, blake2b, update);
|
|
11204
|
+
const { values, leafs } = updateLeafs(state.backend.leafs, blake2b, updatedValues);
|
|
11205
|
+
// insert values to the db
|
|
11206
|
+
// valuesdb can be shared between all states because it's just
|
|
11207
|
+
// <valuehash> -> <value> mapping and existence is managed by trie leafs.
|
|
11208
|
+
for (const val of values) {
|
|
11209
|
+
this.valuesDb.set(val[0], val[1]);
|
|
11210
|
+
}
|
|
11211
|
+
// make sure to clone the leafs before writing, since the collection is re-used.
|
|
11212
|
+
this.db.set(header, SortedSet.fromSortedArray(leafComparator, leafs.slice()));
|
|
11213
|
+
return Result$1.ok(OK);
|
|
11214
|
+
}
|
|
11215
|
+
getState(header) {
|
|
11216
|
+
const leafs = this.db.get(header);
|
|
11217
|
+
if (leafs === undefined) {
|
|
11218
|
+
return null;
|
|
11182
11219
|
}
|
|
11183
|
-
|
|
11184
|
-
|
|
11185
|
-
|
|
11186
|
-
|
|
11187
|
-
|
|
11188
|
-
};
|
|
11189
|
-
|
|
11190
|
-
|
|
11191
|
-
|
|
11192
|
-
slots: readonlyArray(codec$1.sequenceVarLen(codec$1.u32.asOpaque())).convert(seeThrough, tryAsLookupHistorySlots),
|
|
11193
|
-
}, "LookupHistoryItem", ({ hash, length, slots }) => new LookupHistoryItem(hash, length, slots));
|
|
11194
|
-
const lookupHistoryEntryCodec = codec$1.object({
|
|
11195
|
-
key: codec$1.bytes(HASH_SIZE).asOpaque(),
|
|
11196
|
-
data: codec$1.sequenceVarLen(lookupHistoryItemCodec),
|
|
11197
|
-
});
|
|
11198
|
-
const lookupHistoryCodec = codec$1
|
|
11199
|
-
.sequenceVarLen(lookupHistoryEntryCodec)
|
|
11200
|
-
.convert((dict) => {
|
|
11201
|
-
const entries = [];
|
|
11202
|
-
for (const [key, data] of dict) {
|
|
11203
|
-
entries.push({
|
|
11204
|
-
key,
|
|
11205
|
-
data,
|
|
11220
|
+
// now create a leafdb with shared values db.
|
|
11221
|
+
const leafDb = LeafDb.fromLeaves(leafs, {
|
|
11222
|
+
get: (key) => {
|
|
11223
|
+
const val = this.valuesDb.get(key);
|
|
11224
|
+
if (val === undefined) {
|
|
11225
|
+
throw new Error(`Missing value at key: ${key}`);
|
|
11226
|
+
}
|
|
11227
|
+
return val.raw;
|
|
11228
|
+
},
|
|
11206
11229
|
});
|
|
11230
|
+
return SerializedState.new(this.spec, this.blake2b, leafDb);
|
|
11207
11231
|
}
|
|
11208
|
-
|
|
11209
|
-
}
|
|
11210
|
-
const dict = HashDictionary.new();
|
|
11211
|
-
for (const { key, data } of items) {
|
|
11212
|
-
const items = dict.get(key) ?? [];
|
|
11213
|
-
items.push(...data);
|
|
11214
|
-
dict.set(key, items);
|
|
11215
|
-
}
|
|
11216
|
-
return dict;
|
|
11217
|
-
});
|
|
11218
|
-
class ServiceWithCodec extends InMemoryService {
|
|
11219
|
-
static Codec = codec$1.Class(ServiceWithCodec, {
|
|
11220
|
-
serviceId: codec$1.u32.asOpaque(),
|
|
11221
|
-
data: codec$1.object({
|
|
11222
|
-
info: ServiceAccountInfo.Codec,
|
|
11223
|
-
preimages: codecHashDictionary(PreimageItem.Codec, (x) => x.hash),
|
|
11224
|
-
lookupHistory: lookupHistoryCodec,
|
|
11225
|
-
storage: codecMap(StorageItem.Codec, (x) => x.key.toString()),
|
|
11226
|
-
}),
|
|
11227
|
-
});
|
|
11228
|
-
constructor(id, data) {
|
|
11229
|
-
super(id, data);
|
|
11230
|
-
}
|
|
11231
|
-
static create({ serviceId, data }) {
|
|
11232
|
-
return new ServiceWithCodec(serviceId, data);
|
|
11233
|
-
}
|
|
11234
|
-
}
|
|
11235
|
-
const inMemoryStateCodec = (spec) => codec$1.Class(class State extends InMemoryState {
|
|
11236
|
-
static create(data) {
|
|
11237
|
-
return InMemoryState.new(spec, data);
|
|
11238
|
-
}
|
|
11239
|
-
}, {
|
|
11240
|
-
// alpha
|
|
11241
|
-
authPools: serialize.authPools.Codec,
|
|
11242
|
-
// phi
|
|
11243
|
-
authQueues: serialize.authQueues.Codec,
|
|
11244
|
-
// beta
|
|
11245
|
-
recentBlocks: serialize.recentBlocks.Codec,
|
|
11246
|
-
// gamma_k
|
|
11247
|
-
nextValidatorData: codecPerValidator(ValidatorData.Codec),
|
|
11248
|
-
// gamma_z
|
|
11249
|
-
epochRoot: codec$1.bytes(BANDERSNATCH_RING_ROOT_BYTES).asOpaque(),
|
|
11250
|
-
// gamma_s
|
|
11251
|
-
sealingKeySeries: SafroleSealingKeysData.Codec,
|
|
11252
|
-
// gamma_a
|
|
11253
|
-
ticketsAccumulator: readonlyArray(codec$1.sequenceVarLen(Ticket.Codec)).convert((x) => x, asKnownSize),
|
|
11254
|
-
// psi
|
|
11255
|
-
disputesRecords: serialize.disputesRecords.Codec,
|
|
11256
|
-
// eta
|
|
11257
|
-
entropy: serialize.entropy.Codec,
|
|
11258
|
-
// iota
|
|
11259
|
-
designatedValidatorData: serialize.designatedValidators.Codec,
|
|
11260
|
-
// kappa
|
|
11261
|
-
currentValidatorData: serialize.currentValidators.Codec,
|
|
11262
|
-
// lambda
|
|
11263
|
-
previousValidatorData: serialize.previousValidators.Codec,
|
|
11264
|
-
// rho
|
|
11265
|
-
availabilityAssignment: serialize.availabilityAssignment.Codec,
|
|
11266
|
-
// tau
|
|
11267
|
-
timeslot: serialize.timeslot.Codec,
|
|
11268
|
-
// chi
|
|
11269
|
-
privilegedServices: serialize.privilegedServices.Codec,
|
|
11270
|
-
// pi
|
|
11271
|
-
statistics: serialize.statistics.Codec,
|
|
11272
|
-
// omega
|
|
11273
|
-
accumulationQueue: serialize.accumulationQueue.Codec,
|
|
11274
|
-
// xi
|
|
11275
|
-
recentlyAccumulated: serialize.recentlyAccumulated.Codec,
|
|
11276
|
-
// theta
|
|
11277
|
-
accumulationOutputLog: serialize.accumulationOutputLog.Codec,
|
|
11278
|
-
// delta
|
|
11279
|
-
services: codec$1.dictionary(codec$1.u32.asOpaque(), ServiceWithCodec.Codec, {
|
|
11280
|
-
sortKeys: (a, b) => a - b,
|
|
11281
|
-
}),
|
|
11282
|
-
});
|
|
11232
|
+
async close() { }
|
|
11233
|
+
}
|
|
11283
11234
|
|
|
11284
11235
|
/** A potential error that occured during state update. */
|
|
11285
11236
|
var StateUpdateError;
|
|
@@ -11292,13 +11243,15 @@ var StateUpdateError;
|
|
|
11292
11243
|
class InMemoryStates {
|
|
11293
11244
|
spec;
|
|
11294
11245
|
db = HashDictionary.new();
|
|
11246
|
+
blake2b;
|
|
11295
11247
|
constructor(spec) {
|
|
11296
11248
|
this.spec = spec;
|
|
11249
|
+
this.blake2b = Blake2b.createHasher();
|
|
11297
11250
|
}
|
|
11298
11251
|
async updateAndSetState(headerHash, state, update) {
|
|
11299
11252
|
const res = state.applyUpdate(update);
|
|
11300
11253
|
if (res.isOk) {
|
|
11301
|
-
return await this.
|
|
11254
|
+
return await this.insertInitialState(headerHash, state);
|
|
11302
11255
|
}
|
|
11303
11256
|
switch (res.error) {
|
|
11304
11257
|
case UpdateError.DuplicateService:
|
|
@@ -11310,31 +11263,34 @@ class InMemoryStates {
|
|
|
11310
11263
|
}
|
|
11311
11264
|
}
|
|
11312
11265
|
async getStateRoot(state) {
|
|
11313
|
-
const blake2b = await
|
|
11266
|
+
const blake2b = await this.blake2b;
|
|
11314
11267
|
return StateEntries.serializeInMemory(this.spec, blake2b, state).getRootHash(blake2b);
|
|
11315
11268
|
}
|
|
11316
11269
|
/** Insert a full state into the database. */
|
|
11317
|
-
async
|
|
11318
|
-
const
|
|
11319
|
-
this.db.set(headerHash,
|
|
11270
|
+
async insertInitialState(headerHash, state) {
|
|
11271
|
+
const copy = InMemoryState.copyFrom(this.spec, state, state.intoServicesData());
|
|
11272
|
+
this.db.set(headerHash, copy);
|
|
11320
11273
|
return Result$1.ok(OK);
|
|
11321
11274
|
}
|
|
11322
11275
|
getState(headerHash) {
|
|
11323
|
-
const
|
|
11324
|
-
if (
|
|
11276
|
+
const state = this.db.get(headerHash);
|
|
11277
|
+
if (state === undefined) {
|
|
11325
11278
|
return null;
|
|
11326
11279
|
}
|
|
11327
|
-
return
|
|
11280
|
+
return InMemoryState.copyFrom(this.spec, state, state.intoServicesData());
|
|
11328
11281
|
}
|
|
11282
|
+
async close() { }
|
|
11329
11283
|
}
|
|
11330
11284
|
|
|
11331
11285
|
var index$d = /*#__PURE__*/Object.freeze({
|
|
11332
11286
|
__proto__: null,
|
|
11333
11287
|
InMemoryBlocks: InMemoryBlocks,
|
|
11288
|
+
InMemorySerializedStates: InMemorySerializedStates,
|
|
11334
11289
|
InMemoryStates: InMemoryStates,
|
|
11335
11290
|
LeafDb: LeafDb,
|
|
11336
11291
|
get LeafDbError () { return LeafDbError; },
|
|
11337
|
-
get StateUpdateError () { return StateUpdateError; }
|
|
11292
|
+
get StateUpdateError () { return StateUpdateError; },
|
|
11293
|
+
updateLeafs: updateLeafs
|
|
11338
11294
|
});
|
|
11339
11295
|
|
|
11340
11296
|
/**
|
|
@@ -12392,6 +12348,14 @@ const NoMachineError = Symbol("Machine index not found.");
|
|
|
12392
12348
|
const SegmentExportError = Symbol("Too many segments already exported.");
|
|
12393
12349
|
|
|
12394
12350
|
const InsufficientFundsError = "insufficient funds";
|
|
12351
|
+
/** Deep clone of a map with array. */
|
|
12352
|
+
function deepCloneMapWithArray(map) {
|
|
12353
|
+
const cloned = [];
|
|
12354
|
+
for (const [k, v] of map.entries()) {
|
|
12355
|
+
cloned.push([k, v.slice()]);
|
|
12356
|
+
}
|
|
12357
|
+
return new Map(cloned);
|
|
12358
|
+
}
|
|
12395
12359
|
/**
|
|
12396
12360
|
* State updates that currently accumulating service produced.
|
|
12397
12361
|
*
|
|
@@ -12421,10 +12385,11 @@ class AccumulationStateUpdate {
|
|
|
12421
12385
|
/** Create new empty state update. */
|
|
12422
12386
|
static empty() {
|
|
12423
12387
|
return new AccumulationStateUpdate({
|
|
12424
|
-
|
|
12425
|
-
|
|
12426
|
-
|
|
12427
|
-
|
|
12388
|
+
created: [],
|
|
12389
|
+
updated: new Map(),
|
|
12390
|
+
removed: [],
|
|
12391
|
+
preimages: new Map(),
|
|
12392
|
+
storage: new Map(),
|
|
12428
12393
|
}, []);
|
|
12429
12394
|
}
|
|
12430
12395
|
/** Create a state update with some existing, yet uncommited services updates. */
|
|
@@ -12436,10 +12401,13 @@ class AccumulationStateUpdate {
|
|
|
12436
12401
|
/** Create a copy of another `StateUpdate`. Used by checkpoints. */
|
|
12437
12402
|
static copyFrom(from) {
|
|
12438
12403
|
const serviceUpdates = {
|
|
12439
|
-
|
|
12440
|
-
|
|
12441
|
-
|
|
12442
|
-
|
|
12404
|
+
// shallow copy
|
|
12405
|
+
created: [...from.services.created],
|
|
12406
|
+
updated: new Map(from.services.updated),
|
|
12407
|
+
removed: [...from.services.removed],
|
|
12408
|
+
// deep copy
|
|
12409
|
+
preimages: deepCloneMapWithArray(from.services.preimages),
|
|
12410
|
+
storage: deepCloneMapWithArray(from.services.storage),
|
|
12443
12411
|
};
|
|
12444
12412
|
const transfers = [...from.transfers];
|
|
12445
12413
|
const update = new AccumulationStateUpdate(serviceUpdates, transfers, new Map(from.yieldedRoots));
|
|
@@ -12487,9 +12455,9 @@ class PartiallyUpdatedState {
|
|
|
12487
12455
|
if (destination === null) {
|
|
12488
12456
|
return null;
|
|
12489
12457
|
}
|
|
12490
|
-
const
|
|
12491
|
-
if (
|
|
12492
|
-
return
|
|
12458
|
+
const maybeUpdatedServiceInfo = this.stateUpdate.services.updated.get(destination);
|
|
12459
|
+
if (maybeUpdatedServiceInfo !== undefined) {
|
|
12460
|
+
return maybeUpdatedServiceInfo.action.account;
|
|
12493
12461
|
}
|
|
12494
12462
|
const maybeService = this.state.getService(destination);
|
|
12495
12463
|
if (maybeService === null) {
|
|
@@ -12498,7 +12466,8 @@ class PartiallyUpdatedState {
|
|
|
12498
12466
|
return maybeService.getInfo();
|
|
12499
12467
|
}
|
|
12500
12468
|
getStorage(serviceId, rawKey) {
|
|
12501
|
-
const
|
|
12469
|
+
const storages = this.stateUpdate.services.storage.get(serviceId) ?? [];
|
|
12470
|
+
const item = storages.find((x) => x.key.isEqualTo(rawKey));
|
|
12502
12471
|
if (item !== undefined) {
|
|
12503
12472
|
return item.value;
|
|
12504
12473
|
}
|
|
@@ -12513,10 +12482,11 @@ class PartiallyUpdatedState {
|
|
|
12513
12482
|
* the existence in `preimages` map.
|
|
12514
12483
|
*/
|
|
12515
12484
|
hasPreimage(serviceId, hash) {
|
|
12516
|
-
const
|
|
12485
|
+
const preimages = this.stateUpdate.services.preimages.get(serviceId) ?? [];
|
|
12486
|
+
const providedPreimage = preimages.find(
|
|
12517
12487
|
// we ignore the action here, since if there is <any> update on that
|
|
12518
12488
|
// hash it means it has to exist, right?
|
|
12519
|
-
(p) => p.
|
|
12489
|
+
(p) => p.hash.isEqualTo(hash));
|
|
12520
12490
|
if (providedPreimage !== undefined) {
|
|
12521
12491
|
return true;
|
|
12522
12492
|
}
|
|
@@ -12529,7 +12499,8 @@ class PartiallyUpdatedState {
|
|
|
12529
12499
|
}
|
|
12530
12500
|
getPreimage(serviceId, hash) {
|
|
12531
12501
|
// TODO [ToDr] Should we verify availability here?
|
|
12532
|
-
const
|
|
12502
|
+
const preimages = this.stateUpdate.services.preimages.get(serviceId) ?? [];
|
|
12503
|
+
const freshlyProvided = preimages.find((x) => x.hash.isEqualTo(hash));
|
|
12533
12504
|
if (freshlyProvided !== undefined && freshlyProvided.action.kind === UpdatePreimageKind.Provide) {
|
|
12534
12505
|
return freshlyProvided.action.preimage.blob;
|
|
12535
12506
|
}
|
|
@@ -12538,10 +12509,11 @@ class PartiallyUpdatedState {
|
|
|
12538
12509
|
}
|
|
12539
12510
|
/** Get status of a preimage of current service taking into account any updates. */
|
|
12540
12511
|
getLookupHistory(currentTimeslot, serviceId, hash, length) {
|
|
12512
|
+
const preimages = this.stateUpdate.services.preimages.get(serviceId) ?? [];
|
|
12541
12513
|
// TODO [ToDr] This is most likely wrong. We may have `provide` and `remove` within
|
|
12542
12514
|
// the same state update. We should however switch to proper "updated state"
|
|
12543
12515
|
// representation soon.
|
|
12544
|
-
const updatedPreimage =
|
|
12516
|
+
const updatedPreimage = preimages.findLast((update) => update.hash.isEqualTo(hash) && BigInt(update.length) === length);
|
|
12545
12517
|
const stateFallback = () => {
|
|
12546
12518
|
// fallback to state lookup
|
|
12547
12519
|
const service = this.state.getService(serviceId);
|
|
@@ -12578,14 +12550,15 @@ class PartiallyUpdatedState {
|
|
|
12578
12550
|
/* State update functions. */
|
|
12579
12551
|
updateStorage(serviceId, key, value) {
|
|
12580
12552
|
const update = value === null
|
|
12581
|
-
? UpdateStorage.remove({
|
|
12553
|
+
? UpdateStorage.remove({ key })
|
|
12582
12554
|
: UpdateStorage.set({
|
|
12583
|
-
serviceId,
|
|
12584
12555
|
storage: StorageItem.create({ key, value }),
|
|
12585
12556
|
});
|
|
12586
|
-
const
|
|
12557
|
+
const storages = this.stateUpdate.services.storage.get(serviceId) ?? [];
|
|
12558
|
+
const index = storages.findIndex((x) => x.key.isEqualTo(key));
|
|
12587
12559
|
const count = index === -1 ? 0 : 1;
|
|
12588
|
-
|
|
12560
|
+
storages.splice(index, count, update);
|
|
12561
|
+
this.stateUpdate.services.storage.set(serviceId, storages);
|
|
12589
12562
|
}
|
|
12590
12563
|
/**
|
|
12591
12564
|
* Update a preimage.
|
|
@@ -12593,8 +12566,10 @@ class PartiallyUpdatedState {
|
|
|
12593
12566
|
* Note we store all previous entries as well, since there might be a sequence of:
|
|
12594
12567
|
* `provide` -> `remove` and both should update the end state somehow.
|
|
12595
12568
|
*/
|
|
12596
|
-
updatePreimage(newUpdate) {
|
|
12597
|
-
this.stateUpdate.services.preimages.
|
|
12569
|
+
updatePreimage(serviceId, newUpdate) {
|
|
12570
|
+
const updatePreimages = this.stateUpdate.services.preimages.get(serviceId) ?? [];
|
|
12571
|
+
updatePreimages.push(newUpdate);
|
|
12572
|
+
this.stateUpdate.services.preimages.set(serviceId, updatePreimages);
|
|
12598
12573
|
}
|
|
12599
12574
|
updateServiceStorageUtilisation(serviceId, items, bytes, serviceInfo) {
|
|
12600
12575
|
check `${items >= 0} storageUtilisationCount has to be a positive number, got: ${items}`;
|
|
@@ -12603,11 +12578,11 @@ class PartiallyUpdatedState {
|
|
|
12603
12578
|
const overflowBytes = !isU64(bytes);
|
|
12604
12579
|
// TODO [ToDr] this is not specified in GP, but it seems sensible.
|
|
12605
12580
|
if (overflowItems || overflowBytes) {
|
|
12606
|
-
return Result$1.error(InsufficientFundsError);
|
|
12581
|
+
return Result$1.error(InsufficientFundsError, () => `Storage utilisation overflow: items=${overflowItems}, bytes=${overflowBytes}`);
|
|
12607
12582
|
}
|
|
12608
12583
|
const thresholdBalance = ServiceAccountInfo.calculateThresholdBalance(items, bytes, serviceInfo.gratisStorage);
|
|
12609
12584
|
if (serviceInfo.balance < thresholdBalance) {
|
|
12610
|
-
return Result$1.error(InsufficientFundsError);
|
|
12585
|
+
return Result$1.error(InsufficientFundsError, () => `Service balance (${serviceInfo.balance}) below threshold (${thresholdBalance})`);
|
|
12611
12586
|
}
|
|
12612
12587
|
// Update service info with new details.
|
|
12613
12588
|
this.updateServiceInfo(serviceId, ServiceAccountInfo.create({
|
|
@@ -12618,22 +12593,25 @@ class PartiallyUpdatedState {
|
|
|
12618
12593
|
return Result$1.ok(OK);
|
|
12619
12594
|
}
|
|
12620
12595
|
updateServiceInfo(serviceId, newInfo) {
|
|
12621
|
-
const
|
|
12622
|
-
|
|
12623
|
-
|
|
12624
|
-
if (existingItem?.action.kind === UpdateServiceKind.Create) {
|
|
12625
|
-
this.stateUpdate.services.servicesUpdates.splice(idx, toRemove, UpdateService.create({
|
|
12626
|
-
serviceId,
|
|
12596
|
+
const existingUpdate = this.stateUpdate.services.updated.get(serviceId);
|
|
12597
|
+
if (existingUpdate?.action.kind === UpdateServiceKind.Create) {
|
|
12598
|
+
this.stateUpdate.services.updated.set(serviceId, UpdateService.create({
|
|
12627
12599
|
serviceInfo: newInfo,
|
|
12628
|
-
lookupHistory:
|
|
12600
|
+
lookupHistory: existingUpdate.action.lookupHistory,
|
|
12629
12601
|
}));
|
|
12630
12602
|
return;
|
|
12631
12603
|
}
|
|
12632
|
-
this.stateUpdate.services.
|
|
12633
|
-
serviceId,
|
|
12604
|
+
this.stateUpdate.services.updated.set(serviceId, UpdateService.update({
|
|
12634
12605
|
serviceInfo: newInfo,
|
|
12635
12606
|
}));
|
|
12636
12607
|
}
|
|
12608
|
+
createService(serviceId, newInfo, newLookupHistory) {
|
|
12609
|
+
this.stateUpdate.services.created.push(serviceId);
|
|
12610
|
+
this.stateUpdate.services.updated.set(serviceId, UpdateService.create({
|
|
12611
|
+
serviceInfo: newInfo,
|
|
12612
|
+
lookupHistory: newLookupHistory,
|
|
12613
|
+
}));
|
|
12614
|
+
}
|
|
12637
12615
|
getPrivilegedServices() {
|
|
12638
12616
|
if (this.stateUpdate.privilegedServices !== null) {
|
|
12639
12617
|
return this.stateUpdate.privilegedServices;
|
|
@@ -14252,7 +14230,7 @@ class ReadablePage extends MemoryPage {
|
|
|
14252
14230
|
loadInto(result, startIndex, length) {
|
|
14253
14231
|
const endIndex = startIndex + length;
|
|
14254
14232
|
if (endIndex > PAGE_SIZE$1) {
|
|
14255
|
-
return Result$1.error(PageFault.fromMemoryIndex(this.start + PAGE_SIZE$1));
|
|
14233
|
+
return Result$1.error(PageFault.fromMemoryIndex(this.start + PAGE_SIZE$1), () => `Page fault: read beyond page boundary at ${this.start + PAGE_SIZE$1}`);
|
|
14256
14234
|
}
|
|
14257
14235
|
const bytes = this.data.subarray(startIndex, endIndex);
|
|
14258
14236
|
// we zero the bytes, since data might not yet be initialized at `endIndex`.
|
|
@@ -14261,7 +14239,7 @@ class ReadablePage extends MemoryPage {
|
|
|
14261
14239
|
return Result$1.ok(OK);
|
|
14262
14240
|
}
|
|
14263
14241
|
storeFrom(_address, _data) {
|
|
14264
|
-
return Result$1.error(PageFault.fromMemoryIndex(this.start, true));
|
|
14242
|
+
return Result$1.error(PageFault.fromMemoryIndex(this.start, true), () => `Page fault: attempted to write to read-only page at ${this.start}`);
|
|
14265
14243
|
}
|
|
14266
14244
|
setData(pageIndex, data) {
|
|
14267
14245
|
this.data.set(data, pageIndex);
|
|
@@ -14290,7 +14268,7 @@ class WriteablePage extends MemoryPage {
|
|
|
14290
14268
|
loadInto(result, startIndex, length) {
|
|
14291
14269
|
const endIndex = startIndex + length;
|
|
14292
14270
|
if (endIndex > PAGE_SIZE$1) {
|
|
14293
|
-
return Result$1.error(PageFault.fromMemoryIndex(this.start + PAGE_SIZE$1));
|
|
14271
|
+
return Result$1.error(PageFault.fromMemoryIndex(this.start + PAGE_SIZE$1), () => `Page fault: read beyond page boundary at ${this.start + PAGE_SIZE$1}`);
|
|
14294
14272
|
}
|
|
14295
14273
|
const bytes = this.view.subarray(startIndex, endIndex);
|
|
14296
14274
|
// we zero the bytes, since the view might not yet be initialized at `endIndex`.
|
|
@@ -14360,7 +14338,7 @@ class Memory {
|
|
|
14360
14338
|
logger$3.insane `MEM[${address}] <- ${BytesBlob.blobFrom(bytes)}`;
|
|
14361
14339
|
const pagesResult = this.getPages(address, bytes.length, AccessType.WRITE);
|
|
14362
14340
|
if (pagesResult.isError) {
|
|
14363
|
-
return Result$1.error(pagesResult.error);
|
|
14341
|
+
return Result$1.error(pagesResult.error, pagesResult.details);
|
|
14364
14342
|
}
|
|
14365
14343
|
const pages = pagesResult.ok;
|
|
14366
14344
|
let currentPosition = address;
|
|
@@ -14385,14 +14363,14 @@ class Memory {
|
|
|
14385
14363
|
const pages = [];
|
|
14386
14364
|
for (const pageNumber of pageRange) {
|
|
14387
14365
|
if (pageNumber < RESERVED_NUMBER_OF_PAGES) {
|
|
14388
|
-
return Result$1.error(PageFault.fromPageNumber(pageNumber, true));
|
|
14366
|
+
return Result$1.error(PageFault.fromPageNumber(pageNumber, true), () => `Page fault: attempted to access reserved page ${pageNumber}`);
|
|
14389
14367
|
}
|
|
14390
14368
|
const page = this.memory.get(pageNumber);
|
|
14391
14369
|
if (page === undefined) {
|
|
14392
|
-
return Result$1.error(PageFault.fromPageNumber(pageNumber));
|
|
14370
|
+
return Result$1.error(PageFault.fromPageNumber(pageNumber), () => `Page fault: page ${pageNumber} not allocated`);
|
|
14393
14371
|
}
|
|
14394
14372
|
if (accessType === AccessType.WRITE && !page.isWriteable()) {
|
|
14395
|
-
return Result$1.error(PageFault.fromPageNumber(pageNumber, true));
|
|
14373
|
+
return Result$1.error(PageFault.fromPageNumber(pageNumber, true), () => `Page fault: attempted to write to read-only page ${pageNumber}`);
|
|
14396
14374
|
}
|
|
14397
14375
|
pages.push(page);
|
|
14398
14376
|
}
|
|
@@ -14410,7 +14388,7 @@ class Memory {
|
|
|
14410
14388
|
}
|
|
14411
14389
|
const pagesResult = this.getPages(startAddress, result.length, AccessType.READ);
|
|
14412
14390
|
if (pagesResult.isError) {
|
|
14413
|
-
return Result$1.error(pagesResult.error);
|
|
14391
|
+
return Result$1.error(pagesResult.error, pagesResult.details);
|
|
14414
14392
|
}
|
|
14415
14393
|
const pages = pagesResult.ok;
|
|
14416
14394
|
let currentPosition = startAddress;
|
|
@@ -16215,7 +16193,7 @@ class ProgramDecoder {
|
|
|
16215
16193
|
}
|
|
16216
16194
|
catch (e) {
|
|
16217
16195
|
logger$2.error `Invalid program: ${e}`;
|
|
16218
|
-
return Result$1.error(ProgramDecoderError.InvalidProgramError);
|
|
16196
|
+
return Result$1.error(ProgramDecoderError.InvalidProgramError, () => `Program decoder error: ${e}`);
|
|
16219
16197
|
}
|
|
16220
16198
|
}
|
|
16221
16199
|
}
|
|
@@ -16496,7 +16474,7 @@ class HostCallMemory {
|
|
|
16496
16474
|
return Result$1.ok(OK);
|
|
16497
16475
|
}
|
|
16498
16476
|
if (address + tryAsU64(bytes.length) > MEMORY_SIZE) {
|
|
16499
|
-
return Result$1.error(new OutOfBounds());
|
|
16477
|
+
return Result$1.error(new OutOfBounds(), () => `Memory access out of bounds: address ${address} + length ${bytes.length} exceeds memory size`);
|
|
16500
16478
|
}
|
|
16501
16479
|
return this.memory.storeFrom(tryAsMemoryIndex(Number(address)), bytes);
|
|
16502
16480
|
}
|
|
@@ -16505,13 +16483,10 @@ class HostCallMemory {
|
|
|
16505
16483
|
return Result$1.ok(OK);
|
|
16506
16484
|
}
|
|
16507
16485
|
if (startAddress + tryAsU64(result.length) > MEMORY_SIZE) {
|
|
16508
|
-
return Result$1.error(new OutOfBounds());
|
|
16486
|
+
return Result$1.error(new OutOfBounds(), () => `Memory access out of bounds: address ${startAddress} + length ${result.length} exceeds memory size`);
|
|
16509
16487
|
}
|
|
16510
16488
|
return this.memory.loadInto(result, tryAsMemoryIndex(Number(startAddress)));
|
|
16511
16489
|
}
|
|
16512
|
-
getMemory() {
|
|
16513
|
-
return this.memory;
|
|
16514
|
-
}
|
|
16515
16490
|
}
|
|
16516
16491
|
|
|
16517
16492
|
class HostCallRegisters {
|
|
@@ -17599,32 +17574,33 @@ class Preimages {
|
|
|
17599
17574
|
}
|
|
17600
17575
|
if (prevPreimage.requester > currPreimage.requester ||
|
|
17601
17576
|
currPreimage.blob.compare(prevPreimage.blob).isLessOrEqual()) {
|
|
17602
|
-
return Result$1.error(PreimagesErrorCode.PreimagesNotSortedUnique);
|
|
17577
|
+
return Result$1.error(PreimagesErrorCode.PreimagesNotSortedUnique, () => `Preimages not sorted/unique at index ${i}`);
|
|
17603
17578
|
}
|
|
17604
17579
|
}
|
|
17605
17580
|
const { preimages, slot } = input;
|
|
17606
|
-
const pendingChanges =
|
|
17581
|
+
const pendingChanges = new Map();
|
|
17607
17582
|
// select preimages for integration
|
|
17608
17583
|
for (const preimage of preimages) {
|
|
17609
17584
|
const { requester, blob } = preimage;
|
|
17610
17585
|
const hash = this.blake2b.hashBytes(blob).asOpaque();
|
|
17611
17586
|
const service = this.state.getService(requester);
|
|
17612
17587
|
if (service === null) {
|
|
17613
|
-
return Result$1.error(PreimagesErrorCode.AccountNotFound);
|
|
17588
|
+
return Result$1.error(PreimagesErrorCode.AccountNotFound, () => `Service not found: ${requester}`);
|
|
17614
17589
|
}
|
|
17615
17590
|
const hasPreimage = service.hasPreimage(hash);
|
|
17616
17591
|
const slots = service.getLookupHistory(hash, tryAsU32(blob.length));
|
|
17617
17592
|
// https://graypaper.fluffylabs.dev/#/5f542d7/181800181900
|
|
17618
17593
|
// https://graypaper.fluffylabs.dev/#/5f542d7/116f0011a500
|
|
17619
17594
|
if (hasPreimage || slots === null || !LookupHistoryItem.isRequested(slots)) {
|
|
17620
|
-
return Result$1.error(PreimagesErrorCode.PreimageUnneeded);
|
|
17595
|
+
return Result$1.error(PreimagesErrorCode.PreimageUnneeded, () => `Preimage unneeded: requester=${requester}, hash=${hash}, hasPreimage=${hasPreimage}, isRequested=${slots !== null && LookupHistoryItem.isRequested(slots)}`);
|
|
17621
17596
|
}
|
|
17622
17597
|
// https://graypaper.fluffylabs.dev/#/5f542d7/18c00018f300
|
|
17623
|
-
pendingChanges.
|
|
17624
|
-
|
|
17598
|
+
const updates = pendingChanges.get(requester) ?? [];
|
|
17599
|
+
updates.push(UpdatePreimage.provide({
|
|
17625
17600
|
preimage: PreimageItem.create({ hash, blob }),
|
|
17626
17601
|
slot,
|
|
17627
17602
|
}));
|
|
17603
|
+
pendingChanges.set(requester, updates);
|
|
17628
17604
|
}
|
|
17629
17605
|
return Result$1.ok({
|
|
17630
17606
|
preimages: pendingChanges,
|