@typeberry/lib 0.5.7 → 0.5.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -1
- package/packages/jam/block/block.d.ts +3 -3
- package/packages/jam/block/header.d.ts +6 -6
- package/packages/jam/block/test-helpers.d.ts +2 -2
- package/packages/jam/block/tickets.d.ts +5 -4
- package/packages/jam/block/tickets.d.ts.map +1 -1
- package/packages/jam/block/tickets.js +13 -6
- package/packages/jam/block-json/block.d.ts +2 -2
- package/packages/jam/block-json/block.js +1 -1
- package/packages/jam/block-json/common.d.ts +2 -1
- package/packages/jam/block-json/common.d.ts.map +1 -1
- package/packages/jam/block-json/common.js +2 -5
- package/packages/jam/block-json/extrinsic.js +1 -1
- package/packages/jam/block-json/header.d.ts +2 -1
- package/packages/jam/block-json/header.d.ts.map +1 -1
- package/packages/jam/block-json/header.js +34 -32
- package/packages/jam/block-json/tickets-extrinsic.d.ts +2 -1
- package/packages/jam/block-json/tickets-extrinsic.d.ts.map +1 -1
- package/packages/jam/block-json/tickets-extrinsic.js +7 -5
- package/packages/jam/fuzz-proto/v1/types.d.ts +1 -1
- package/packages/jam/jamnp-s/network.d.ts +2 -0
- package/packages/jam/jamnp-s/network.d.ts.map +1 -1
- package/packages/jam/jamnp-s/network.js +4 -0
- package/packages/jam/jamnp-s/protocol/ce-131-ce-132-safrole-ticket-distribution.d.ts +10 -3
- package/packages/jam/jamnp-s/protocol/ce-131-ce-132-safrole-ticket-distribution.d.ts.map +1 -1
- package/packages/jam/jamnp-s/protocol/ce-131-ce-132-safrole-ticket-distribution.js +12 -4
- package/packages/jam/jamnp-s/protocol/ce-131-ce-132-safrole-ticket-distribution.test.js +4 -3
- package/packages/jam/jamnp-s/protocol/up-0-block-announcement.d.ts +1 -1
- package/packages/jam/jamnp-s/tasks/ticket-distribution.d.ts +34 -0
- package/packages/jam/jamnp-s/tasks/ticket-distribution.d.ts.map +1 -0
- package/packages/jam/jamnp-s/tasks/ticket-distribution.js +115 -0
- package/packages/jam/jamnp-s/tasks/ticket-distribution.test.d.ts +2 -0
- package/packages/jam/jamnp-s/tasks/ticket-distribution.test.d.ts.map +1 -0
- package/packages/jam/jamnp-s/tasks/ticket-distribution.test.js +220 -0
- package/packages/jam/node/main.d.ts.map +1 -1
- package/packages/jam/node/main.js +56 -26
- package/packages/jam/node/reader.d.ts +2 -2
- package/packages/jam/node/workers.d.ts +32 -25
- package/packages/jam/node/workers.d.ts.map +1 -1
- package/packages/jam/node/workers.js +16 -7
- package/packages/jam/safrole/bandersnatch-vrf.d.ts +3 -2
- package/packages/jam/safrole/bandersnatch-vrf.d.ts.map +1 -1
- package/packages/jam/safrole/bandersnatch-vrf.js +2 -2
- package/packages/jam/safrole/bandersnatch-vrf.test.js +3 -2
- package/packages/jam/safrole/safrole.test.js +80 -72
- package/packages/jam/state/safrole-data.d.ts +1 -1
- package/packages/jam/state/test.utils.js +1 -1
- package/packages/jam/state-json/dump.js +2 -2
- package/packages/jam/state-json/safrole.d.ts +2 -2
- package/packages/jam/state-json/safrole.d.ts.map +1 -1
- package/packages/jam/state-json/safrole.js +8 -6
- package/packages/jam/state-merkleization/in-memory-state-codec.d.ts +1 -1
- package/packages/jam/state-vectors/index.d.ts +8 -7
- package/packages/jam/state-vectors/index.d.ts.map +1 -1
- package/packages/jam/state-vectors/index.js +6 -4
- package/packages/jam/transition/hasher.test.js +1 -1
- package/packages/workers/api-node/config.d.ts +5 -1
- package/packages/workers/api-node/config.d.ts.map +1 -1
- package/packages/workers/api-node/config.js +9 -3
- package/packages/workers/api-node/port.d.ts +8 -0
- package/packages/workers/api-node/port.d.ts.map +1 -1
- package/packages/workers/api-node/port.js +10 -0
- package/packages/workers/block-authorship/bootstrap-main.js +10 -3
- package/packages/workers/block-authorship/main.d.ts +2 -1
- package/packages/workers/block-authorship/main.d.ts.map +1 -1
- package/packages/workers/block-authorship/main.js +5 -4
- package/packages/workers/block-authorship/protocol.d.ts +4 -4
- package/packages/workers/block-authorship/ticket-generator.d.ts +3 -2
- package/packages/workers/block-authorship/ticket-generator.d.ts.map +1 -1
- package/packages/workers/block-authorship/ticket-generator.js +2 -3
- package/packages/workers/block-authorship/ticket-generator.test.js +11 -10
- package/packages/workers/comms-authorship-network/index.d.ts +3 -0
- package/packages/workers/comms-authorship-network/index.d.ts.map +1 -0
- package/packages/workers/comms-authorship-network/index.js +2 -0
- package/packages/workers/comms-authorship-network/protocol.d.ts +27 -0
- package/packages/workers/comms-authorship-network/protocol.d.ts.map +1 -0
- package/packages/workers/comms-authorship-network/protocol.js +24 -0
- package/packages/workers/comms-authorship-network/tickets-message.d.ts +18 -0
- package/packages/workers/comms-authorship-network/tickets-message.d.ts.map +1 -0
- package/packages/workers/comms-authorship-network/tickets-message.js +19 -0
- package/packages/workers/importer/protocol.d.ts +8 -8
- package/packages/workers/jam-network/bootstrap-main.js +10 -3
- package/packages/workers/jam-network/main.d.ts +2 -1
- package/packages/workers/jam-network/main.d.ts.map +1 -1
- package/packages/workers/jam-network/main.js +8 -1
- package/packages/workers/jam-network/protocol.d.ts +10 -10
|
@@ -12,6 +12,10 @@ import { tryAsStreamKind } from "./stream.js";
|
|
|
12
12
|
*/
|
|
13
13
|
export const STREAM_KIND_GENERATOR_TO_PROXY = tryAsStreamKind(131);
|
|
14
14
|
export const STREAM_KIND_PROXY_TO_ALL = tryAsStreamKind(132);
|
|
15
|
+
/**
|
|
16
|
+
* Network protocol message for distributing a single ticket.
|
|
17
|
+
* Used in CE-131/CE-132 streams.
|
|
18
|
+
*/
|
|
15
19
|
export class TicketDistributionRequest extends WithDebug {
|
|
16
20
|
epochIndex;
|
|
17
21
|
ticket;
|
|
@@ -30,14 +34,16 @@ export class TicketDistributionRequest extends WithDebug {
|
|
|
30
34
|
}
|
|
31
35
|
const logger = Logger.new(import.meta.filename, "protocol/ce-131-ce-132");
|
|
32
36
|
export class ServerHandler {
|
|
37
|
+
chainSpec;
|
|
33
38
|
kind;
|
|
34
39
|
onTicketReceived;
|
|
35
|
-
constructor(kind, onTicketReceived) {
|
|
40
|
+
constructor(chainSpec, kind, onTicketReceived) {
|
|
41
|
+
this.chainSpec = chainSpec;
|
|
36
42
|
this.kind = kind;
|
|
37
43
|
this.onTicketReceived = onTicketReceived;
|
|
38
44
|
}
|
|
39
45
|
onStreamMessage(sender, message) {
|
|
40
|
-
const ticketDistribution = Decoder.decodeObject(TicketDistributionRequest.Codec, message);
|
|
46
|
+
const ticketDistribution = Decoder.decodeObject(TicketDistributionRequest.Codec, message, this.chainSpec);
|
|
41
47
|
logger.log `[${sender.streamId}][ce-${this.kind}] Received ticket for epoch ${ticketDistribution.epochIndex}`;
|
|
42
48
|
this.onTicketReceived(ticketDistribution.epochIndex, ticketDistribution.ticket);
|
|
43
49
|
sender.close();
|
|
@@ -45,8 +51,10 @@ export class ServerHandler {
|
|
|
45
51
|
onClose(_streamId) { }
|
|
46
52
|
}
|
|
47
53
|
export class ClientHandler {
|
|
54
|
+
chainSpec;
|
|
48
55
|
kind;
|
|
49
|
-
constructor(kind) {
|
|
56
|
+
constructor(chainSpec, kind) {
|
|
57
|
+
this.chainSpec = chainSpec;
|
|
50
58
|
this.kind = kind;
|
|
51
59
|
}
|
|
52
60
|
onStreamMessage(sender) {
|
|
@@ -56,7 +64,7 @@ export class ClientHandler {
|
|
|
56
64
|
onClose(_streamId) { }
|
|
57
65
|
sendTicket(sender, epochIndex, ticket) {
|
|
58
66
|
const request = TicketDistributionRequest.create({ epochIndex, ticket });
|
|
59
|
-
sender.bufferAndSend(Encoder.encodeObject(TicketDistributionRequest.Codec, request));
|
|
67
|
+
sender.bufferAndSend(Encoder.encodeObject(TicketDistributionRequest.Codec, request, this.chainSpec));
|
|
60
68
|
sender.close();
|
|
61
69
|
}
|
|
62
70
|
}
|
|
@@ -3,25 +3,26 @@ import { describe, it } from "node:test";
|
|
|
3
3
|
import { tryAsEpoch } from "#@typeberry/block";
|
|
4
4
|
import { SignedTicket, tryAsTicketAttempt } from "#@typeberry/block/tickets.js";
|
|
5
5
|
import { Bytes } from "#@typeberry/bytes";
|
|
6
|
+
import { tinyChainSpec } from "#@typeberry/config";
|
|
6
7
|
import { BANDERSNATCH_PROOF_BYTES } from "#@typeberry/crypto";
|
|
7
8
|
import { OK } from "#@typeberry/utils";
|
|
8
9
|
import { ClientHandler, ServerHandler, STREAM_KIND_GENERATOR_TO_PROXY, } from "./ce-131-ce-132-safrole-ticket-distribution.js";
|
|
9
10
|
import { testClientServer } from "./test-utils.js";
|
|
10
11
|
const TEST_EPOCH = tryAsEpoch(1);
|
|
11
12
|
const TEST_TICKET = SignedTicket.create({
|
|
12
|
-
attempt: tryAsTicketAttempt(0),
|
|
13
|
+
attempt: tryAsTicketAttempt(0, tinyChainSpec),
|
|
13
14
|
signature: Bytes.zero(BANDERSNATCH_PROOF_BYTES).asOpaque(),
|
|
14
15
|
});
|
|
15
16
|
describe("CE 131 and CE 132: Safrole Ticket Distribution", () => {
|
|
16
17
|
it("Client sends a ticket distribution request and the server receives it", async () => {
|
|
17
18
|
const handlers = testClientServer();
|
|
18
19
|
await new Promise((resolve) => {
|
|
19
|
-
handlers.server.registerHandlers(new ServerHandler(STREAM_KIND_GENERATOR_TO_PROXY, (epochIndex, ticket) => {
|
|
20
|
+
handlers.server.registerHandlers(new ServerHandler(tinyChainSpec, STREAM_KIND_GENERATOR_TO_PROXY, (epochIndex, ticket) => {
|
|
20
21
|
assert.strictEqual(epochIndex, TEST_EPOCH);
|
|
21
22
|
assert.deepStrictEqual(ticket, TEST_TICKET);
|
|
22
23
|
resolve(undefined);
|
|
23
24
|
}));
|
|
24
|
-
handlers.client.registerHandlers(new ClientHandler(STREAM_KIND_GENERATOR_TO_PROXY));
|
|
25
|
+
handlers.client.registerHandlers(new ClientHandler(tinyChainSpec, STREAM_KIND_GENERATOR_TO_PROXY));
|
|
25
26
|
handlers.client.withNewStream(STREAM_KIND_GENERATOR_TO_PROXY, (handler, sender) => {
|
|
26
27
|
handler.sendTicket(sender, TEST_EPOCH, TEST_TICKET);
|
|
27
28
|
return OK;
|
|
@@ -69,7 +69,7 @@ export declare class Announcement extends WithDebug {
|
|
|
69
69
|
ticketsMarker: import("@typeberry/codec").Descriptor<import("@typeberry/block").TicketsMarker | null, import("@typeberry/codec").ViewOf<import("@typeberry/block").TicketsMarker, {
|
|
70
70
|
tickets: import("@typeberry/codec").Descriptor<readonly import("@typeberry/block").Ticket[] & import("@typeberry/utils").WithOpaque<"EpochLength">, import("@typeberry/codec").SequenceView<import("@typeberry/block").Ticket, import("@typeberry/codec").ViewOf<import("@typeberry/block").Ticket, {
|
|
71
71
|
id: import("@typeberry/codec").Descriptor<import("@typeberry/bytes").Bytes<32>, import("@typeberry/bytes").Bytes<32>>;
|
|
72
|
-
attempt: import("@typeberry/codec").Descriptor<number & import("@typeberry/numbers").WithBytesRepresentation<1> & import("@typeberry/utils").WithOpaque<"TicketAttempt[0|1|2]">, import("@typeberry/numbers").
|
|
72
|
+
attempt: import("@typeberry/codec").Descriptor<number & import("@typeberry/numbers").WithBytesRepresentation<1> & import("@typeberry/utils").WithOpaque<"TicketAttempt[0|1|2]">, import("@typeberry/numbers").U32>;
|
|
73
73
|
}>>>;
|
|
74
74
|
}> | null>;
|
|
75
75
|
bandersnatchBlockAuthorIndex: import("@typeberry/codec").Descriptor<number & import("@typeberry/numbers").WithBytesRepresentation<2> & import("@typeberry/utils").WithOpaque<"ValidatorIndex[u16]">, import("@typeberry/bytes").Bytes<2>>;
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import type { Epoch } from "#@typeberry/block";
|
|
2
|
+
import type { SignedTicket } from "#@typeberry/block/tickets.js";
|
|
3
|
+
import type { ChainSpec } from "#@typeberry/config";
|
|
4
|
+
import type { Connections } from "../peers.js";
|
|
5
|
+
import type { StreamManager } from "../stream-manager.js";
|
|
6
|
+
/**
|
|
7
|
+
* Manages distribution of Safrole tickets to connected peers.
|
|
8
|
+
*
|
|
9
|
+
* Uses CE-132 (proxy-to-all) for direct broadcast to all peers.
|
|
10
|
+
* Implements a maintain pattern similar to SyncTask: tickets are collected
|
|
11
|
+
* and periodically distributed to peers that haven't received them yet.
|
|
12
|
+
*/
|
|
13
|
+
export declare class TicketDistributionTask {
|
|
14
|
+
private readonly streamManager;
|
|
15
|
+
private readonly connections;
|
|
16
|
+
static start(streamManager: StreamManager, connections: Connections, chainSpec: ChainSpec): TicketDistributionTask;
|
|
17
|
+
/** Pending tickets waiting to be distributed to peers */
|
|
18
|
+
private pendingTickets;
|
|
19
|
+
/** Current epoch being tracked (cleared when epoch changes) */
|
|
20
|
+
private currentEpoch;
|
|
21
|
+
private constructor();
|
|
22
|
+
/**
|
|
23
|
+
* Should be called periodically to distribute pending tickets to connected peers.
|
|
24
|
+
*/
|
|
25
|
+
maintainDistribution(): void;
|
|
26
|
+
/**
|
|
27
|
+
* Add a ticket to the pending queue for distribution.
|
|
28
|
+
* Clears pending tickets when epoch changes.
|
|
29
|
+
* Deduplicates tickets based on signature.
|
|
30
|
+
*/
|
|
31
|
+
addTicket(epochIndex: Epoch, ticket: SignedTicket): void;
|
|
32
|
+
private onTicketReceived;
|
|
33
|
+
}
|
|
34
|
+
//# sourceMappingURL=ticket-distribution.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ticket-distribution.d.ts","sourceRoot":"","sources":["../../../../../../packages/jam/jamnp-s/tasks/ticket-distribution.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,KAAK,EAAE,MAAM,kBAAkB,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,mBAAmB,CAAC;AAGnD,OAAO,KAAK,EAAW,WAAW,EAAE,MAAM,aAAa,CAAC;AAExD,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,sBAAsB,CAAC;AAe1D;;;;;;GAMG;AACH,qBAAa,sBAAsB;IAuB/B,OAAO,CAAC,QAAQ,CAAC,aAAa;IAC9B,OAAO,CAAC,QAAQ,CAAC,WAAW;IAvB9B,MAAM,CAAC,KAAK,CAAC,aAAa,EAAE,aAAa,EAAE,WAAW,EAAE,WAAW,EAAE,SAAS,EAAE,SAAS;IAgBzF,yDAAyD;IACzD,OAAO,CAAC,cAAc,CAA0D;IAChF,+DAA+D;IAC/D,OAAO,CAAC,YAAY,CAAsB;IAE1C,OAAO;IAKP;;OAEG;IACH,oBAAoB;IAkDpB;;;;OAIG;IACH,SAAS,CAAC,UAAU,EAAE,KAAK,EAAE,MAAM,EAAE,YAAY;IAgCjD,OAAO,CAAC,gBAAgB;CAKzB"}
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
import { Logger } from "#@typeberry/logger";
|
|
2
|
+
import { OK } from "#@typeberry/utils";
|
|
3
|
+
import { ce131 } from "../protocol/index.js";
|
|
4
|
+
const logger = Logger.new(import.meta.filename, "net:tickets");
|
|
5
|
+
/** Aux data to track which tickets have been sent to each peer (using indices) */
|
|
6
|
+
const TICKET_AUX = {
|
|
7
|
+
id: Symbol("tickets"),
|
|
8
|
+
};
|
|
9
|
+
/**
|
|
10
|
+
* Manages distribution of Safrole tickets to connected peers.
|
|
11
|
+
*
|
|
12
|
+
* Uses CE-132 (proxy-to-all) for direct broadcast to all peers.
|
|
13
|
+
* Implements a maintain pattern similar to SyncTask: tickets are collected
|
|
14
|
+
* and periodically distributed to peers that haven't received them yet.
|
|
15
|
+
*/
|
|
16
|
+
export class TicketDistributionTask {
|
|
17
|
+
streamManager;
|
|
18
|
+
connections;
|
|
19
|
+
static start(streamManager, connections, chainSpec) {
|
|
20
|
+
const task = new TicketDistributionTask(streamManager, connections);
|
|
21
|
+
// server mode: receive tickets from peers
|
|
22
|
+
streamManager.registerIncomingHandlers(new ce131.ServerHandler(chainSpec, ce131.STREAM_KIND_PROXY_TO_ALL, (epochIndex, ticket) => {
|
|
23
|
+
task.onTicketReceived(epochIndex, ticket);
|
|
24
|
+
}));
|
|
25
|
+
// client mode: send tickets to peers
|
|
26
|
+
streamManager.registerOutgoingHandlers(new ce131.ClientHandler(chainSpec, ce131.STREAM_KIND_PROXY_TO_ALL));
|
|
27
|
+
return task;
|
|
28
|
+
}
|
|
29
|
+
/** Pending tickets waiting to be distributed to peers */
|
|
30
|
+
pendingTickets = [];
|
|
31
|
+
/** Current epoch being tracked (cleared when epoch changes) */
|
|
32
|
+
currentEpoch = null;
|
|
33
|
+
constructor(streamManager, connections) {
|
|
34
|
+
this.streamManager = streamManager;
|
|
35
|
+
this.connections = connections;
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Should be called periodically to distribute pending tickets to connected peers.
|
|
39
|
+
*/
|
|
40
|
+
maintainDistribution() {
|
|
41
|
+
if (this.currentEpoch === null) {
|
|
42
|
+
return; // No tickets to distribute yet
|
|
43
|
+
}
|
|
44
|
+
/** `this` is mutable and TS can't narrow this.currentEpoch inside the callback closure */
|
|
45
|
+
const currentEpoch = this.currentEpoch;
|
|
46
|
+
// Iterate through all pending tickets
|
|
47
|
+
for (let ticketIdx = 0; ticketIdx < this.pendingTickets.length; ticketIdx++) {
|
|
48
|
+
const { epochIndex, ticket } = this.pendingTickets[ticketIdx];
|
|
49
|
+
// Try to send to each connected peer
|
|
50
|
+
for (const peerInfo of this.connections.getConnectedPeers()) {
|
|
51
|
+
this.connections.withAuxData(peerInfo.peerId, TICKET_AUX, (maybeAux) => {
|
|
52
|
+
const shouldReset = maybeAux === undefined || maybeAux.epoch !== currentEpoch;
|
|
53
|
+
const aux = shouldReset ? { epoch: currentEpoch, seen: new Set() } : maybeAux;
|
|
54
|
+
if (peerInfo.peerRef === null) {
|
|
55
|
+
return aux;
|
|
56
|
+
}
|
|
57
|
+
// Check if we already sent this ticket to this peer
|
|
58
|
+
if (aux.seen.has(ticketIdx)) {
|
|
59
|
+
return aux; // Already sent
|
|
60
|
+
}
|
|
61
|
+
// Send the ticket - only mark as sent after successful send
|
|
62
|
+
try {
|
|
63
|
+
this.streamManager.withNewStream(peerInfo.peerRef, ce131.STREAM_KIND_PROXY_TO_ALL, (handler, sender) => {
|
|
64
|
+
logger.trace `[${peerInfo.peerId}] <-- Sending ticket for epoch ${epochIndex}`;
|
|
65
|
+
handler.sendTicket(sender, epochIndex, ticket);
|
|
66
|
+
return OK;
|
|
67
|
+
});
|
|
68
|
+
// Mark as sent only after successful send, so failed sends will be retried
|
|
69
|
+
aux.seen.add(ticketIdx);
|
|
70
|
+
}
|
|
71
|
+
catch (e) {
|
|
72
|
+
logger.warn `[${peerInfo.peerId}] Failed to send ticket for epoch ${epochIndex}: ${e}`;
|
|
73
|
+
}
|
|
74
|
+
return aux;
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
/**
|
|
80
|
+
* Add a ticket to the pending queue for distribution.
|
|
81
|
+
* Clears pending tickets when epoch changes.
|
|
82
|
+
* Deduplicates tickets based on signature.
|
|
83
|
+
*/
|
|
84
|
+
addTicket(epochIndex, ticket) {
|
|
85
|
+
// Check if epoch changed - if so, clear old tickets
|
|
86
|
+
if (this.currentEpoch !== null && this.currentEpoch !== epochIndex) {
|
|
87
|
+
logger.log `[addTicket] Epoch changed from ${this.currentEpoch} to ${epochIndex}, clearing ${this.pendingTickets.length} old tickets`;
|
|
88
|
+
this.pendingTickets = [];
|
|
89
|
+
// Note: We don't need to clear aux data for all peers here.
|
|
90
|
+
// The aux data contains the epoch, so maintainDistribution will lazily
|
|
91
|
+
// reset it when it detects an epoch mismatch. This handles both connected
|
|
92
|
+
// and disconnected peers correctly.
|
|
93
|
+
}
|
|
94
|
+
this.currentEpoch = epochIndex;
|
|
95
|
+
/**
|
|
96
|
+
* Deduplicate: check if a ticket with the same signature already exists
|
|
97
|
+
*
|
|
98
|
+
* Here we are risking "poisoning" the local pendingTickets - i.e:
|
|
99
|
+
* 1. The adversary sees a signature and swaps the ticket attempt to something different.
|
|
100
|
+
* 2. This creates an invalid ticket, but prevents a valid ticket with the same signature from being included and distributed.
|
|
101
|
+
*
|
|
102
|
+
* TODO [MaSi]: The poisoning risk should be fixed during implementation of ticket validation.
|
|
103
|
+
*/
|
|
104
|
+
const isDuplicate = this.pendingTickets.some((pending) => pending.epochIndex === epochIndex && pending.ticket.signature.isEqualTo(ticket.signature));
|
|
105
|
+
if (!isDuplicate) {
|
|
106
|
+
this.pendingTickets.push({ epochIndex, ticket });
|
|
107
|
+
logger.info `[addTicket] Added ticket for epoch ${epochIndex}, total: ${this.pendingTickets.length}`;
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
onTicketReceived(epochIndex, ticket) {
|
|
111
|
+
logger.trace `Received ticket for epoch ${epochIndex}, attempt ${ticket.attempt}`;
|
|
112
|
+
// Add to pending queue for potential re-distribution
|
|
113
|
+
this.addTicket(epochIndex, ticket);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ticket-distribution.test.d.ts","sourceRoot":"","sources":["../../../../../../packages/jam/jamnp-s/tasks/ticket-distribution.test.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
import assert from "node:assert";
|
|
2
|
+
import { describe, it } from "node:test";
|
|
3
|
+
import { setTimeout } from "node:timers/promises";
|
|
4
|
+
import { tryAsEpoch } from "#@typeberry/block";
|
|
5
|
+
import { SignedTicket, tryAsTicketAttempt } from "#@typeberry/block/tickets.js";
|
|
6
|
+
import { Bytes } from "#@typeberry/bytes";
|
|
7
|
+
import { tinyChainSpec } from "#@typeberry/config";
|
|
8
|
+
import { BANDERSNATCH_PROOF_BYTES } from "#@typeberry/crypto";
|
|
9
|
+
import { Logger } from "#@typeberry/logger";
|
|
10
|
+
import { createTestPeerPair, MockNetwork } from "#@typeberry/networking/testing.js";
|
|
11
|
+
import { OK } from "#@typeberry/utils";
|
|
12
|
+
import { Connections } from "../peers.js";
|
|
13
|
+
import { StreamManager } from "../stream-manager.js";
|
|
14
|
+
import { TicketDistributionTask } from "./ticket-distribution.js";
|
|
15
|
+
const logger = Logger.new(import.meta.filename, "test:tickets");
|
|
16
|
+
const TEST_EPOCH = tryAsEpoch(42);
|
|
17
|
+
const OTHER_EPOCH = tryAsEpoch(43);
|
|
18
|
+
function createTestTicket(attempt, signatureByte = 0) {
|
|
19
|
+
const signatureBytes = Bytes.zero(BANDERSNATCH_PROOF_BYTES);
|
|
20
|
+
// Make signature unique based on attempt and signatureByte
|
|
21
|
+
signatureBytes.raw[0] = attempt;
|
|
22
|
+
signatureBytes.raw[1] = signatureByte;
|
|
23
|
+
return SignedTicket.create({
|
|
24
|
+
attempt: tryAsTicketAttempt(attempt, tinyChainSpec),
|
|
25
|
+
signature: signatureBytes.asOpaque(),
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
describe("TicketDistributionTask", () => {
|
|
29
|
+
async function init(name) {
|
|
30
|
+
const network = new MockNetwork(name);
|
|
31
|
+
const streamManager = new StreamManager();
|
|
32
|
+
const connections = new Connections(network);
|
|
33
|
+
// Track received tickets for verification
|
|
34
|
+
const receivedTickets = [];
|
|
35
|
+
// Use real TicketDistributionTask
|
|
36
|
+
const ticketTask = TicketDistributionTask.start(streamManager, connections, tinyChainSpec);
|
|
37
|
+
// Intercept received tickets by wrapping onTicketReceived behavior
|
|
38
|
+
// The task already adds received tickets to pending queue via addTicket,
|
|
39
|
+
// so we can track them by checking the pending queue growth or by
|
|
40
|
+
// hooking into the CE-131 server handler directly
|
|
41
|
+
const originalAddTicket = ticketTask.addTicket.bind(ticketTask);
|
|
42
|
+
ticketTask.addTicket = (epochIndex, ticket) => {
|
|
43
|
+
receivedTickets.push({ epochIndex, ticket });
|
|
44
|
+
originalAddTicket(epochIndex, ticket);
|
|
45
|
+
};
|
|
46
|
+
// Setup peer listeners for incoming streams
|
|
47
|
+
network.peers.onPeerConnected((peer) => {
|
|
48
|
+
peer.addOnIncomingStream((stream) => {
|
|
49
|
+
streamManager.onIncomingStream(peer, stream);
|
|
50
|
+
return OK;
|
|
51
|
+
});
|
|
52
|
+
return OK;
|
|
53
|
+
});
|
|
54
|
+
let connectionIdx = 0;
|
|
55
|
+
const openConnection = (other) => {
|
|
56
|
+
const [self, peer] = createTestPeerPair(connectionIdx++, name, other.name);
|
|
57
|
+
network._peers.peerConnected(peer);
|
|
58
|
+
other.network._peers.peerConnected(self);
|
|
59
|
+
return [self, peer];
|
|
60
|
+
};
|
|
61
|
+
return {
|
|
62
|
+
name,
|
|
63
|
+
ticketTask,
|
|
64
|
+
network,
|
|
65
|
+
connections,
|
|
66
|
+
streamManager,
|
|
67
|
+
openConnection,
|
|
68
|
+
receivedTickets,
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
async function tick() {
|
|
72
|
+
logger.log `tick`;
|
|
73
|
+
await setTimeout(10);
|
|
74
|
+
}
|
|
75
|
+
it("should distribute ticket to all connected peers via maintainDistribution", async () => {
|
|
76
|
+
const self = await init("self");
|
|
77
|
+
const peer1 = await init("peer1");
|
|
78
|
+
const peer2 = await init("peer2");
|
|
79
|
+
self.openConnection(peer1);
|
|
80
|
+
self.openConnection(peer2);
|
|
81
|
+
await tick();
|
|
82
|
+
const ticket = createTestTicket(0);
|
|
83
|
+
self.ticketTask.addTicket(TEST_EPOCH, ticket);
|
|
84
|
+
self.ticketTask.maintainDistribution();
|
|
85
|
+
await tick();
|
|
86
|
+
// Both peers should have received the ticket
|
|
87
|
+
assert.strictEqual(peer1.receivedTickets.length, 1);
|
|
88
|
+
assert.strictEqual(peer1.receivedTickets[0].epochIndex, TEST_EPOCH);
|
|
89
|
+
assert.deepStrictEqual(peer1.receivedTickets[0].ticket, ticket);
|
|
90
|
+
assert.strictEqual(peer2.receivedTickets.length, 1);
|
|
91
|
+
assert.strictEqual(peer2.receivedTickets[0].epochIndex, TEST_EPOCH);
|
|
92
|
+
assert.deepStrictEqual(peer2.receivedTickets[0].ticket, ticket);
|
|
93
|
+
});
|
|
94
|
+
it("should receive tickets from peers", async () => {
|
|
95
|
+
const self = await init("self");
|
|
96
|
+
const peer1 = await init("peer1");
|
|
97
|
+
self.openConnection(peer1);
|
|
98
|
+
await tick();
|
|
99
|
+
const ticket = createTestTicket(1);
|
|
100
|
+
peer1.ticketTask.addTicket(TEST_EPOCH, ticket);
|
|
101
|
+
peer1.ticketTask.maintainDistribution();
|
|
102
|
+
await tick();
|
|
103
|
+
assert.strictEqual(self.receivedTickets.length, 1);
|
|
104
|
+
assert.strictEqual(self.receivedTickets[0].epochIndex, TEST_EPOCH);
|
|
105
|
+
assert.deepStrictEqual(self.receivedTickets[0].ticket, ticket);
|
|
106
|
+
});
|
|
107
|
+
it("should handle multiple tickets", async () => {
|
|
108
|
+
const self = await init("self");
|
|
109
|
+
const peer1 = await init("peer1");
|
|
110
|
+
self.openConnection(peer1);
|
|
111
|
+
await tick();
|
|
112
|
+
const ticket0 = createTestTicket(0);
|
|
113
|
+
const ticket1 = createTestTicket(1);
|
|
114
|
+
self.ticketTask.addTicket(TEST_EPOCH, ticket0);
|
|
115
|
+
self.ticketTask.addTicket(TEST_EPOCH, ticket1);
|
|
116
|
+
self.ticketTask.maintainDistribution();
|
|
117
|
+
await tick();
|
|
118
|
+
assert.strictEqual(peer1.receivedTickets.length, 2);
|
|
119
|
+
assert.deepStrictEqual(peer1.receivedTickets[0].ticket, ticket0);
|
|
120
|
+
assert.deepStrictEqual(peer1.receivedTickets[1].ticket, ticket1);
|
|
121
|
+
});
|
|
122
|
+
it("should handle no connected peers gracefully", async () => {
|
|
123
|
+
const self = await init("self");
|
|
124
|
+
const ticket = createTestTicket(0);
|
|
125
|
+
// Should not throw
|
|
126
|
+
self.ticketTask.addTicket(TEST_EPOCH, ticket);
|
|
127
|
+
self.ticketTask.maintainDistribution();
|
|
128
|
+
await tick();
|
|
129
|
+
});
|
|
130
|
+
it("should deduplicate tickets with same signature", async () => {
|
|
131
|
+
const self = await init("self");
|
|
132
|
+
const peer1 = await init("peer1");
|
|
133
|
+
self.openConnection(peer1);
|
|
134
|
+
await tick();
|
|
135
|
+
const ticket = createTestTicket(0);
|
|
136
|
+
// Add same ticket twice
|
|
137
|
+
self.ticketTask.addTicket(TEST_EPOCH, ticket);
|
|
138
|
+
self.ticketTask.addTicket(TEST_EPOCH, ticket);
|
|
139
|
+
self.ticketTask.maintainDistribution();
|
|
140
|
+
await tick();
|
|
141
|
+
// Peer should only receive it once
|
|
142
|
+
assert.strictEqual(peer1.receivedTickets.length, 1);
|
|
143
|
+
});
|
|
144
|
+
it("should not send same ticket to same peer twice", async () => {
|
|
145
|
+
const self = await init("self");
|
|
146
|
+
const peer1 = await init("peer1");
|
|
147
|
+
self.openConnection(peer1);
|
|
148
|
+
await tick();
|
|
149
|
+
const ticket = createTestTicket(0);
|
|
150
|
+
self.ticketTask.addTicket(TEST_EPOCH, ticket);
|
|
151
|
+
// Call maintainDistribution twice
|
|
152
|
+
self.ticketTask.maintainDistribution();
|
|
153
|
+
await tick();
|
|
154
|
+
self.ticketTask.maintainDistribution();
|
|
155
|
+
await tick();
|
|
156
|
+
// Peer should only receive the ticket once (aux data tracks sent indices)
|
|
157
|
+
assert.strictEqual(peer1.receivedTickets.length, 1);
|
|
158
|
+
});
|
|
159
|
+
it("should clear tickets when epoch changes", async () => {
|
|
160
|
+
const self = await init("self");
|
|
161
|
+
const peer1 = await init("peer1");
|
|
162
|
+
self.openConnection(peer1);
|
|
163
|
+
await tick();
|
|
164
|
+
const ticket1 = createTestTicket(0);
|
|
165
|
+
const ticket2 = createTestTicket(1);
|
|
166
|
+
// Add ticket for first epoch
|
|
167
|
+
self.ticketTask.addTicket(TEST_EPOCH, ticket1);
|
|
168
|
+
// Change epoch - this should clear old tickets
|
|
169
|
+
self.ticketTask.addTicket(OTHER_EPOCH, ticket2);
|
|
170
|
+
self.ticketTask.maintainDistribution();
|
|
171
|
+
await tick();
|
|
172
|
+
// Peer should only receive the second ticket (first was cleared on epoch change)
|
|
173
|
+
assert.strictEqual(peer1.receivedTickets.length, 1);
|
|
174
|
+
assert.strictEqual(peer1.receivedTickets[0].epochIndex, OTHER_EPOCH);
|
|
175
|
+
assert.deepStrictEqual(peer1.receivedTickets[0].ticket, ticket2);
|
|
176
|
+
});
|
|
177
|
+
it("should send new tickets to newly connected peers", async () => {
|
|
178
|
+
const self = await init("self");
|
|
179
|
+
const peer1 = await init("peer1");
|
|
180
|
+
const peer2 = await init("peer2");
|
|
181
|
+
// Connect peer1 first
|
|
182
|
+
self.openConnection(peer1);
|
|
183
|
+
await tick();
|
|
184
|
+
const ticket = createTestTicket(0);
|
|
185
|
+
self.ticketTask.addTicket(TEST_EPOCH, ticket);
|
|
186
|
+
self.ticketTask.maintainDistribution();
|
|
187
|
+
await tick();
|
|
188
|
+
// Now connect peer2 after ticket was already distributed to peer1
|
|
189
|
+
self.openConnection(peer2);
|
|
190
|
+
await tick();
|
|
191
|
+
// Run maintainDistribution again - peer2 should get the ticket
|
|
192
|
+
self.ticketTask.maintainDistribution();
|
|
193
|
+
await tick();
|
|
194
|
+
assert.strictEqual(peer1.receivedTickets.length, 1);
|
|
195
|
+
assert.strictEqual(peer2.receivedTickets.length, 1);
|
|
196
|
+
assert.deepStrictEqual(peer2.receivedTickets[0].ticket, ticket);
|
|
197
|
+
});
|
|
198
|
+
it("should re-distribute received tickets to other peers", async () => {
|
|
199
|
+
const self = await init("self");
|
|
200
|
+
const peer1 = await init("peer1");
|
|
201
|
+
const peer2 = await init("peer2");
|
|
202
|
+
// Self connects to both peers
|
|
203
|
+
self.openConnection(peer1);
|
|
204
|
+
self.openConnection(peer2);
|
|
205
|
+
await tick();
|
|
206
|
+
// peer1 sends a ticket to self
|
|
207
|
+
const ticket = createTestTicket(0);
|
|
208
|
+
peer1.ticketTask.addTicket(TEST_EPOCH, ticket);
|
|
209
|
+
peer1.ticketTask.maintainDistribution();
|
|
210
|
+
await tick();
|
|
211
|
+
// Self receives the ticket (via onTicketReceived -> addTicket)
|
|
212
|
+
assert.strictEqual(self.receivedTickets.length, 1);
|
|
213
|
+
// Self should re-distribute to peer2 (and peer1, but peer1 already has it)
|
|
214
|
+
self.ticketTask.maintainDistribution();
|
|
215
|
+
await tick();
|
|
216
|
+
// peer2 should now have received the ticket from self
|
|
217
|
+
assert.strictEqual(peer2.receivedTickets.length, 1);
|
|
218
|
+
assert.deepStrictEqual(peer2.receivedTickets[0].ticket, ticket);
|
|
219
|
+
});
|
|
220
|
+
});
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"main.d.ts","sourceRoot":"","sources":["../../../../../packages/jam/node/main.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,UAAU,EAAc,aAAa,EAAE,MAAM,kBAAkB,CAAC;
|
|
1
|
+
{"version":3,"file":"main.d.ts","sourceRoot":"","sources":["../../../../../packages/jam/node/main.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,UAAU,EAAc,aAAa,EAAE,MAAM,kBAAkB,CAAC;AAEzF,OAAO,EAAE,KAAK,SAAS,EAAc,MAAM,mBAAmB,CAAC;AAe/D,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,gCAAgC,CAAC;AACnE,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,sBAAsB,CAAC;AACtD,OAAO,EAAkC,MAAM,EAAW,MAAM,kBAAkB,CAAC;AAKnF,OAAO,KAAK,EAAE,SAAS,EAAiB,MAAM,iBAAiB,CAAC;AAWhE,MAAM,MAAM,OAAO,GAAG;IACpB,SAAS,EAAE,SAAS,CAAC;IACrB,eAAe,CAAC,IAAI,EAAE,UAAU,GAAG,OAAO,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC;IAChE,WAAW,CAAC,KAAK,EAAE,SAAS,GAAG,OAAO,CAAC,MAAM,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC,CAAC;IACtE,oBAAoB,IAAI,OAAO,CAAC,aAAa,CAAC,CAAC;IAC/C,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;CACxB,CAAC;AAEF,wBAAsB,IAAI,CACxB,MAAM,EAAE,SAAS,EACjB,WAAW,EAAE,CAAC,CAAC,EAAE,MAAM,KAAK,MAAM,EAClC,SAAS,EAAE,SAAS,GAAG,IAAI,GAC1B,OAAO,CAAC,OAAO,CAAC,CAuKlB"}
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { isMainThread } from "node:worker_threads";
|
|
2
|
+
import { AUTHORSHIP_NETWORK_PORT } from "#@typeberry/comms-authorship-network";
|
|
2
3
|
import { PvmBackend } from "#@typeberry/config";
|
|
3
4
|
import { initWasm } from "#@typeberry/crypto";
|
|
4
5
|
import { deriveBandersnatchSecretKey, deriveEd25519SecretKey, trivialSeed, } from "#@typeberry/crypto/key-derivation.js";
|
|
@@ -8,8 +9,8 @@ import { NetworkingConfig } from "#@typeberry/jam-network";
|
|
|
8
9
|
import { Listener } from "#@typeberry/listener";
|
|
9
10
|
import { tryAsU16, tryAsU32 } from "#@typeberry/numbers";
|
|
10
11
|
import { CURRENT_SUITE, CURRENT_VERSION, Result, version } from "#@typeberry/utils";
|
|
11
|
-
import { DirectWorkerConfig } from "#@typeberry/workers-api";
|
|
12
|
-
import { InMemWorkerConfig, LmdbWorkerConfig } from "#@typeberry/workers-api-node";
|
|
12
|
+
import { DirectPort, DirectWorkerConfig } from "#@typeberry/workers-api";
|
|
13
|
+
import { InMemWorkerConfig, LmdbWorkerConfig, ThreadPort } from "#@typeberry/workers-api-node";
|
|
13
14
|
import { getChainSpec, getDatabasePath, initializeDatabase, logger } from "./common.js";
|
|
14
15
|
import { initializeExtensions } from "./extensions.js";
|
|
15
16
|
import * as metrics from "./metrics.js";
|
|
@@ -36,8 +37,8 @@ export async function main(config, withRelPath, telemetry) {
|
|
|
36
37
|
}),
|
|
37
38
|
};
|
|
38
39
|
const importerConfig = isInMemory
|
|
39
|
-
? {
|
|
40
|
-
: {
|
|
40
|
+
? { isInMemory, config: InMemWorkerConfig.new(importerParams) }
|
|
41
|
+
: { isInMemory, config: LmdbWorkerConfig.new(importerParams) };
|
|
41
42
|
// Initialize the database with genesis state and block if there isn't one.
|
|
42
43
|
logger.info `🛢️ Opening database at ${dbPath}`;
|
|
43
44
|
const rootDb = importerConfig.config.openDatabase({ readonly: false });
|
|
@@ -50,7 +51,7 @@ export async function main(config, withRelPath, telemetry) {
|
|
|
50
51
|
// Start block importer
|
|
51
52
|
let importer;
|
|
52
53
|
let closeImporter;
|
|
53
|
-
if (importerConfig.
|
|
54
|
+
if (importerConfig.isInMemory) {
|
|
54
55
|
({ importer, finish: closeImporter } = await startImporterDirect(DirectWorkerConfig.new({
|
|
55
56
|
...importerConfig.config,
|
|
56
57
|
blocksDb: rootDb.getBlocksDb(),
|
|
@@ -91,9 +92,24 @@ export async function main(config, withRelPath, telemetry) {
|
|
|
91
92
|
},
|
|
92
93
|
],
|
|
93
94
|
};
|
|
94
|
-
const
|
|
95
|
-
|
|
96
|
-
|
|
95
|
+
const { networkingParams, authorshipParams } = isInMemory
|
|
96
|
+
? (() => {
|
|
97
|
+
const [tx, rx] = DirectPort.pair();
|
|
98
|
+
return {
|
|
99
|
+
networkingParams: { isInMemory, rootDb, authorshipPort: tx },
|
|
100
|
+
authorshipParams: { isInMemory, rootDb, networkingPort: rx },
|
|
101
|
+
};
|
|
102
|
+
})()
|
|
103
|
+
: (() => {
|
|
104
|
+
const [tx, rx] = ThreadPort.pair(chainSpec);
|
|
105
|
+
return {
|
|
106
|
+
networkingParams: { isInMemory, rootDb, authorshipPort: tx },
|
|
107
|
+
authorshipParams: { isInMemory, rootDb, networkingPort: rx },
|
|
108
|
+
};
|
|
109
|
+
})();
|
|
110
|
+
// Networking initialization (before authorship so we can relay tickets)
|
|
111
|
+
const { closeNetwork } = await initNetwork(importer, networkingParams, baseConfig, genesisHeaderHash, config.network, bestHeader);
|
|
112
|
+
const { closeAuthorship } = await initAuthorship(importer, config.isAuthoring, config.isFastForward, authorshipParams, baseConfig, authorshipKeys);
|
|
97
113
|
const api = {
|
|
98
114
|
chainSpec,
|
|
99
115
|
async importBlock(block) {
|
|
@@ -110,14 +126,14 @@ export async function main(config, withRelPath, telemetry) {
|
|
|
110
126
|
return importer.sendGetBestStateRootHash();
|
|
111
127
|
},
|
|
112
128
|
async close() {
|
|
113
|
-
logger.log `[main] ☠️ Closing the importer`;
|
|
114
|
-
await closeImporter();
|
|
115
|
-
logger.log `[main] ☠️ Closing the extensions`;
|
|
116
|
-
closeExtensions();
|
|
117
129
|
logger.log `[main] ☠️ Closing the authorship module`;
|
|
118
130
|
await closeAuthorship();
|
|
119
131
|
logger.log `[main] ☠️ Closing the networking module`;
|
|
120
132
|
await closeNetwork();
|
|
133
|
+
logger.log `[main] ☠️ Closing the importer`;
|
|
134
|
+
await closeImporter();
|
|
135
|
+
logger.log `[main] ☠️ Closing the extensions`;
|
|
136
|
+
closeExtensions();
|
|
121
137
|
logger.log `[main] 🛢️ Closing the database`;
|
|
122
138
|
await rootDb.close();
|
|
123
139
|
logger.log `[main] 📳 Closing telemetry`;
|
|
@@ -127,35 +143,48 @@ export async function main(config, withRelPath, telemetry) {
|
|
|
127
143
|
};
|
|
128
144
|
return api;
|
|
129
145
|
}
|
|
130
|
-
const initAuthorship = async (importer, isAuthoring, isFastForward,
|
|
146
|
+
const initAuthorship = async (importer, isAuthoring, isFastForward, params, baseConfig, authorshipKeys) => {
|
|
131
147
|
if (!isAuthoring) {
|
|
132
148
|
logger.log `✍️ Authorship off: disabled`;
|
|
133
|
-
return
|
|
149
|
+
return {
|
|
150
|
+
closeAuthorship: () => {
|
|
151
|
+
params.networkingPort.close();
|
|
152
|
+
return Promise.resolve();
|
|
153
|
+
},
|
|
154
|
+
authorshipWorker: null,
|
|
155
|
+
};
|
|
134
156
|
}
|
|
135
157
|
logger.info `✍️ Starting block generator.`;
|
|
136
158
|
const workerParams = { ...authorshipKeys, isFastForward };
|
|
137
|
-
const { generator, finish } = isInMemory
|
|
159
|
+
const { generator, worker, finish } = params.isInMemory
|
|
138
160
|
? await startBlockGenerator(DirectWorkerConfig.new({
|
|
139
161
|
...baseConfig,
|
|
140
|
-
blocksDb: rootDb.getBlocksDb(),
|
|
141
|
-
statesDb: rootDb.getStatesDb(),
|
|
162
|
+
blocksDb: params.rootDb.getBlocksDb(),
|
|
163
|
+
statesDb: params.rootDb.getStatesDb(),
|
|
142
164
|
workerParams,
|
|
143
|
-
}))
|
|
165
|
+
}), params.networkingPort)
|
|
144
166
|
: await spawnBlockGeneratorWorker(LmdbWorkerConfig.new({
|
|
145
167
|
...baseConfig,
|
|
146
168
|
workerParams,
|
|
169
|
+
ports: new Map([[AUTHORSHIP_NETWORK_PORT, params.networkingPort]]),
|
|
147
170
|
}));
|
|
148
171
|
// relay blocks from generator to importer
|
|
149
172
|
generator.setOnBlock(async (block) => {
|
|
150
173
|
logger.log `✍️ Produced block at ${block.header.view().timeSlotIndex.materialize()}`;
|
|
151
174
|
await importer.sendImportBlock(block);
|
|
152
175
|
});
|
|
153
|
-
return finish;
|
|
176
|
+
return { closeAuthorship: finish, authorshipWorker: worker };
|
|
154
177
|
};
|
|
155
|
-
const initNetwork = async (importer,
|
|
178
|
+
const initNetwork = async (importer, params, baseConfig, genesisHeaderHash, networkConfig, bestHeader) => {
|
|
156
179
|
if (networkConfig === null) {
|
|
157
180
|
logger.log `🛜 Networking off: no config`;
|
|
158
|
-
return
|
|
181
|
+
return {
|
|
182
|
+
closeNetwork: async () => {
|
|
183
|
+
params.authorshipPort.close();
|
|
184
|
+
},
|
|
185
|
+
networkApi: null,
|
|
186
|
+
networkWorker: null,
|
|
187
|
+
};
|
|
159
188
|
}
|
|
160
189
|
const { key, host, port, bootnodes } = networkConfig;
|
|
161
190
|
const networkingConfig = NetworkingConfig.create({
|
|
@@ -165,16 +194,17 @@ const initNetwork = async (importer, rootDb, baseConfig, genesisHeaderHash, netw
|
|
|
165
194
|
port: tryAsU16(port),
|
|
166
195
|
bootnodes: bootnodes.map((node) => node.toString()),
|
|
167
196
|
});
|
|
168
|
-
const { network, finish } = isInMemory
|
|
197
|
+
const { network, worker, finish } = params.isInMemory
|
|
169
198
|
? await startNetwork(DirectWorkerConfig.new({
|
|
170
199
|
...baseConfig,
|
|
171
|
-
blocksDb: rootDb.getBlocksDb(),
|
|
172
|
-
statesDb: rootDb.getStatesDb(),
|
|
200
|
+
blocksDb: params.rootDb.getBlocksDb(),
|
|
201
|
+
statesDb: params.rootDb.getStatesDb(),
|
|
173
202
|
workerParams: networkingConfig,
|
|
174
|
-
}))
|
|
203
|
+
}), params.authorshipPort)
|
|
175
204
|
: await spawnNetworkWorker(LmdbWorkerConfig.new({
|
|
176
205
|
...baseConfig,
|
|
177
206
|
workerParams: networkingConfig,
|
|
207
|
+
ports: new Map([[AUTHORSHIP_NETWORK_PORT, params.authorshipPort]]),
|
|
178
208
|
}));
|
|
179
209
|
// relay blocks from networking to importer
|
|
180
210
|
network.setOnBlocks(async (newBlocks) => {
|
|
@@ -186,5 +216,5 @@ const initNetwork = async (importer, rootDb, baseConfig, genesisHeaderHash, netw
|
|
|
186
216
|
bestHeader.on((header) => {
|
|
187
217
|
network.sendNewHeader(header);
|
|
188
218
|
});
|
|
189
|
-
return finish;
|
|
219
|
+
return { closeNetwork: finish, networkApi: network, networkWorker: worker };
|
|
190
220
|
};
|