@aztec/p2p 0.77.0-testnet-ignition.29 → 0.77.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/client/factory.js +2 -2
- package/dest/testbench/testbench.js +12 -92
- package/dest/testbench/worker_client_manager.d.ts +56 -0
- package/dest/testbench/worker_client_manager.d.ts.map +1 -0
- package/dest/testbench/worker_client_manager.js +266 -0
- package/package.json +10 -10
- package/src/client/factory.ts +2 -2
- package/src/testbench/testbench.ts +16 -107
- package/src/testbench/worker_client_manager.ts +318 -0
package/dest/client/factory.js
CHANGED
|
@@ -14,8 +14,8 @@ export const createP2PClient = async (clientType, _config, l2BlockSource, proofV
|
|
|
14
14
|
..._config
|
|
15
15
|
};
|
|
16
16
|
const logger = deps.logger ?? createLogger('p2p');
|
|
17
|
-
const store = deps.store ?? await createStore('p2p', config, createLogger('p2p:lmdb-v2'));
|
|
18
|
-
const archive = await createStore('p2p-archive', config, createLogger('p2p-archive:lmdb-v2'));
|
|
17
|
+
const store = deps.store ?? await createStore('p2p', 1, config, createLogger('p2p:lmdb-v2'));
|
|
18
|
+
const archive = await createStore('p2p-archive', 1, config, createLogger('p2p-archive:lmdb-v2'));
|
|
19
19
|
const mempools = {
|
|
20
20
|
txPool: deps.txPool ?? new AztecKVTxPool(store, archive, telemetry, config.archivedTxLimit),
|
|
21
21
|
attestationPool: clientType === P2PClientType.Full ? deps.attestationPool ?? new InMemoryAttestationPool(telemetry) : undefined
|
|
@@ -1,98 +1,13 @@
|
|
|
1
|
-
import { EthAddress } from '@aztec/foundation/eth-address';
|
|
2
1
|
import { createLogger } from '@aztec/foundation/log';
|
|
3
2
|
import { sleep } from '@aztec/foundation/sleep';
|
|
4
3
|
import { ClientIvcProof } from '@aztec/stdlib/proofs';
|
|
5
4
|
import { mockTx } from '@aztec/stdlib/testing';
|
|
6
|
-
import
|
|
5
|
+
import assert from 'assert';
|
|
7
6
|
import path from 'path';
|
|
8
7
|
import { fileURLToPath } from 'url';
|
|
9
|
-
import {
|
|
10
|
-
import { generatePeerIdPrivateKeys } from '../test-helpers/generate-peer-id-private-keys.js';
|
|
11
|
-
import { getPorts } from '../test-helpers/get-ports.js';
|
|
12
|
-
import { makeEnrs } from '../test-helpers/make-enrs.js';
|
|
8
|
+
import { WorkerClientManager, testChainConfig } from './worker_client_manager.js';
|
|
13
9
|
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
|
14
|
-
const workerPath = path.join(__dirname, '../../dest/testbench/p2p_client_testbench_worker.js');
|
|
15
10
|
const logger = createLogger('testbench');
|
|
16
|
-
let processes = [];
|
|
17
|
-
const testChainConfig = {
|
|
18
|
-
l1ChainId: 31337,
|
|
19
|
-
version: 1,
|
|
20
|
-
l1Contracts: {
|
|
21
|
-
rollupAddress: EthAddress.random()
|
|
22
|
-
}
|
|
23
|
-
};
|
|
24
|
-
/**
|
|
25
|
-
* Cleanup function to kill all child processes
|
|
26
|
-
*/ async function cleanup() {
|
|
27
|
-
logger.info('Cleaning up processes...');
|
|
28
|
-
await Promise.all(processes.map((proc)=>new Promise((resolve)=>{
|
|
29
|
-
proc.once('exit', ()=>resolve());
|
|
30
|
-
proc.send({
|
|
31
|
-
type: 'STOP'
|
|
32
|
-
});
|
|
33
|
-
})));
|
|
34
|
-
process.exit(0);
|
|
35
|
-
}
|
|
36
|
-
// Handle cleanup on process termination
|
|
37
|
-
process.on('SIGINT', ()=>void cleanup());
|
|
38
|
-
process.on('SIGTERM', ()=>void cleanup());
|
|
39
|
-
/**
|
|
40
|
-
* Creates a number of worker clients in separate processes
|
|
41
|
-
* All are configured to connect to each other and overrided with the test specific config
|
|
42
|
-
*
|
|
43
|
-
* @param numberOfClients - The number of clients to create
|
|
44
|
-
* @param p2pConfig - The P2P config to use for the clients
|
|
45
|
-
* @returns The ENRs of the created clients
|
|
46
|
-
*/ async function makeWorkerClients(numberOfClients, p2pConfig) {
|
|
47
|
-
const peerIdPrivateKeys = generatePeerIdPrivateKeys(numberOfClients);
|
|
48
|
-
const ports = await getPorts(numberOfClients);
|
|
49
|
-
const peerEnrs = await makeEnrs(peerIdPrivateKeys, ports, testChainConfig);
|
|
50
|
-
processes = [];
|
|
51
|
-
const readySignals = [];
|
|
52
|
-
for(let i = 0; i < numberOfClients; i++){
|
|
53
|
-
logger.info(`Creating client ${i}`);
|
|
54
|
-
const addr = `127.0.0.1:${ports[i]}`;
|
|
55
|
-
const listenAddr = `0.0.0.0:${ports[i]}`;
|
|
56
|
-
// Maximum seed with 10 other peers to allow peer discovery to connect them at a smoother rate
|
|
57
|
-
const otherNodes = peerEnrs.filter((_, ind)=>ind < Math.min(i, 10));
|
|
58
|
-
const config = {
|
|
59
|
-
...getP2PDefaultConfig(),
|
|
60
|
-
p2pEnabled: true,
|
|
61
|
-
peerIdPrivateKey: peerIdPrivateKeys[i],
|
|
62
|
-
tcpListenAddress: listenAddr,
|
|
63
|
-
udpListenAddress: listenAddr,
|
|
64
|
-
tcpAnnounceAddress: addr,
|
|
65
|
-
udpAnnounceAddress: addr,
|
|
66
|
-
bootstrapNodes: [
|
|
67
|
-
...otherNodes
|
|
68
|
-
],
|
|
69
|
-
...p2pConfig
|
|
70
|
-
};
|
|
71
|
-
const childProcess = fork(workerPath);
|
|
72
|
-
childProcess.send({
|
|
73
|
-
type: 'START',
|
|
74
|
-
config,
|
|
75
|
-
clientIndex: i
|
|
76
|
-
});
|
|
77
|
-
// Wait for ready signal
|
|
78
|
-
readySignals.push(new Promise((resolve, reject)=>{
|
|
79
|
-
childProcess.once('message', (msg)=>{
|
|
80
|
-
if (msg.type === 'READY') {
|
|
81
|
-
resolve(undefined);
|
|
82
|
-
}
|
|
83
|
-
if (msg.type === 'ERROR') {
|
|
84
|
-
reject(new Error(msg.error));
|
|
85
|
-
}
|
|
86
|
-
});
|
|
87
|
-
}));
|
|
88
|
-
processes.push(childProcess);
|
|
89
|
-
}
|
|
90
|
-
// Wait for peers to all connect with each other
|
|
91
|
-
await sleep(4000);
|
|
92
|
-
// Wait for all peers to be booted up
|
|
93
|
-
await Promise.all(readySignals);
|
|
94
|
-
return peerEnrs;
|
|
95
|
-
}
|
|
96
11
|
async function main() {
|
|
97
12
|
try {
|
|
98
13
|
// Read configuration file name from command line args
|
|
@@ -112,7 +27,8 @@ async function main() {
|
|
|
112
27
|
};
|
|
113
28
|
const numberOfClients = config.default.numberOfClients;
|
|
114
29
|
// Setup clients in separate processes
|
|
115
|
-
|
|
30
|
+
const workerClientManager = new WorkerClientManager(logger, testConfig);
|
|
31
|
+
await workerClientManager.makeWorkerClients(numberOfClients);
|
|
116
32
|
// wait a bit longer for all peers to be ready
|
|
117
33
|
await sleep(5000);
|
|
118
34
|
logger.info('Workers Ready');
|
|
@@ -120,7 +36,7 @@ async function main() {
|
|
|
120
36
|
const tx = await mockTx(1, {
|
|
121
37
|
clientIvcProof: ClientIvcProof.random()
|
|
122
38
|
});
|
|
123
|
-
processes[0].send({
|
|
39
|
+
workerClientManager.processes[0].send({
|
|
124
40
|
type: 'SEND_TX',
|
|
125
41
|
tx: tx.toBuffer()
|
|
126
42
|
});
|
|
@@ -128,14 +44,18 @@ async function main() {
|
|
|
128
44
|
// Give time for message propagation
|
|
129
45
|
await sleep(30000);
|
|
130
46
|
logger.info('Checking message propagation results');
|
|
131
|
-
|
|
47
|
+
// Check message propagation results
|
|
48
|
+
const numberOfClientsThatReceivedMessage = workerClientManager.numberOfClientsThatReceivedMessage();
|
|
49
|
+
logger.info(`Number of clients that received message: ${numberOfClientsThatReceivedMessage}`);
|
|
50
|
+
assert(numberOfClientsThatReceivedMessage === numberOfClients - 1);
|
|
51
|
+
logger.info('Test passed, cleaning up');
|
|
52
|
+
// cleanup
|
|
53
|
+
await workerClientManager.cleanup();
|
|
132
54
|
} catch (error) {
|
|
133
55
|
logger.error('Test failed with error:', error);
|
|
134
|
-
await cleanup();
|
|
135
56
|
process.exit(1);
|
|
136
57
|
}
|
|
137
58
|
}
|
|
138
59
|
main().catch((error)=>{
|
|
139
60
|
logger.error('Unhandled error:', error);
|
|
140
|
-
cleanup().catch(()=>process.exit(1));
|
|
141
61
|
});
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
/// <reference types="node" resolution-mode="require"/>
|
|
2
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
3
|
+
import type { ChainConfig } from '@aztec/stdlib/config';
|
|
4
|
+
import { type ChildProcess } from 'child_process';
|
|
5
|
+
import { type P2PConfig } from '../config.js';
|
|
6
|
+
declare const testChainConfig: ChainConfig;
|
|
7
|
+
declare class WorkerClientManager {
|
|
8
|
+
processes: ChildProcess[];
|
|
9
|
+
peerIdPrivateKeys: string[];
|
|
10
|
+
peerEnrs: string[];
|
|
11
|
+
ports: number[];
|
|
12
|
+
private p2pConfig;
|
|
13
|
+
private logger;
|
|
14
|
+
private messageReceivedByClient;
|
|
15
|
+
constructor(logger: Logger, p2pConfig: Partial<P2PConfig>);
|
|
16
|
+
destroy(): void;
|
|
17
|
+
/**
|
|
18
|
+
* Creates address strings from a port
|
|
19
|
+
*/
|
|
20
|
+
private getAddresses;
|
|
21
|
+
/**
|
|
22
|
+
* Creates a client configuration object
|
|
23
|
+
*/
|
|
24
|
+
private createClientConfig;
|
|
25
|
+
/**
|
|
26
|
+
* Spawns a worker process and returns a promise that resolves when the worker is ready
|
|
27
|
+
*/
|
|
28
|
+
private spawnWorkerProcess;
|
|
29
|
+
/**
|
|
30
|
+
* Creates a number of worker clients in separate processes
|
|
31
|
+
* All are configured to connect to each other and overrided with the test specific config
|
|
32
|
+
*
|
|
33
|
+
* @param numberOfClients - The number of clients to create
|
|
34
|
+
* @returns The ENRs of the created clients
|
|
35
|
+
*/
|
|
36
|
+
makeWorkerClients(numberOfClients: number): Promise<string[]>;
|
|
37
|
+
purgeMessageReceivedByClient(): void;
|
|
38
|
+
numberOfClientsThatReceivedMessage(): number;
|
|
39
|
+
/**
|
|
40
|
+
* Changes the port for a specific client
|
|
41
|
+
*
|
|
42
|
+
* @param clientIndex - The index of the client to change port for
|
|
43
|
+
* @param newPort - The new port to use
|
|
44
|
+
*/
|
|
45
|
+
changePort(clientIndex: number, newPort: number): Promise<void>;
|
|
46
|
+
/**
|
|
47
|
+
* Terminate a single process with timeout and force kill if needed
|
|
48
|
+
*/
|
|
49
|
+
private terminateProcess;
|
|
50
|
+
/**
|
|
51
|
+
* Cleans up all worker processes with timeout and force kill if needed
|
|
52
|
+
*/
|
|
53
|
+
cleanup(): Promise<void>;
|
|
54
|
+
}
|
|
55
|
+
export { WorkerClientManager, testChainConfig };
|
|
56
|
+
//# sourceMappingURL=worker_client_manager.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"worker_client_manager.d.ts","sourceRoot":"","sources":["../../src/testbench/worker_client_manager.ts"],"names":[],"mappings":";AACA,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,uBAAuB,CAAC;AAEpD,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,sBAAsB,CAAC;AAExD,OAAO,EAAE,KAAK,YAAY,EAAQ,MAAM,eAAe,CAAC;AAIxD,OAAO,EAAE,KAAK,SAAS,EAAuB,MAAM,cAAc,CAAC;AAQnE,QAAA,MAAM,eAAe,EAAE,WAMtB,CAAC;AAEF,cAAM,mBAAmB;IAChB,SAAS,EAAE,YAAY,EAAE,CAAM;IAC/B,iBAAiB,EAAE,MAAM,EAAE,CAAM;IACjC,QAAQ,EAAE,MAAM,EAAE,CAAM;IACxB,KAAK,EAAE,MAAM,EAAE,CAAM;IAC5B,OAAO,CAAC,SAAS,CAAqB;IACtC,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,uBAAuB,CAAgB;gBAEnC,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,OAAO,CAAC,SAAS,CAAC;IAKzD,OAAO;IAOP;;OAEG;IACH,OAAO,CAAC,YAAY;IAOpB;;OAEG;IACH,OAAO,CAAC,kBAAkB;IAgB1B;;OAEG;IACH,OAAO,CAAC,kBAAkB;IAoD1B;;;;;;OAMG;IACG,iBAAiB,CAAC,eAAe,EAAE,MAAM;IA2C/C,4BAA4B;IAI5B,kCAAkC;IAIlC;;;;;OAKG;IACG,UAAU,CAAC,WAAW,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM;IA4CrD;;OAEG;IACH,OAAO,CAAC,gBAAgB;IAsCxB;;OAEG;IACG,OAAO;CAiCd;AAED,OAAO,EAAE,mBAAmB,EAAE,eAAe,EAAE,CAAC"}
|
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
import { EthAddress } from '@aztec/foundation/eth-address';
|
|
2
|
+
import { sleep } from '@aztec/foundation/sleep';
|
|
3
|
+
import { fork } from 'child_process';
|
|
4
|
+
import path from 'path';
|
|
5
|
+
import { fileURLToPath } from 'url';
|
|
6
|
+
import { getP2PDefaultConfig } from '../config.js';
|
|
7
|
+
import { generatePeerIdPrivateKeys } from '../test-helpers/generate-peer-id-private-keys.js';
|
|
8
|
+
import { getPorts } from '../test-helpers/get-ports.js';
|
|
9
|
+
import { makeEnr, makeEnrs } from '../test-helpers/make-enrs.js';
|
|
10
|
+
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
|
11
|
+
const workerPath = path.join(__dirname, '../../dest/testbench/p2p_client_testbench_worker.js');
|
|
12
|
+
const testChainConfig = {
|
|
13
|
+
l1ChainId: 31337,
|
|
14
|
+
version: 1,
|
|
15
|
+
l1Contracts: {
|
|
16
|
+
rollupAddress: EthAddress.random()
|
|
17
|
+
}
|
|
18
|
+
};
|
|
19
|
+
class WorkerClientManager {
|
|
20
|
+
processes = [];
|
|
21
|
+
peerIdPrivateKeys = [];
|
|
22
|
+
peerEnrs = [];
|
|
23
|
+
ports = [];
|
|
24
|
+
p2pConfig;
|
|
25
|
+
logger;
|
|
26
|
+
messageReceivedByClient = [];
|
|
27
|
+
constructor(logger, p2pConfig){
|
|
28
|
+
this.logger = logger;
|
|
29
|
+
this.p2pConfig = p2pConfig;
|
|
30
|
+
}
|
|
31
|
+
destroy() {
|
|
32
|
+
this.cleanup().catch((error)=>{
|
|
33
|
+
this.logger.error('Failed to cleanup worker client manager', error);
|
|
34
|
+
process.exit(1);
|
|
35
|
+
});
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Creates address strings from a port
|
|
39
|
+
*/ getAddresses(port) {
|
|
40
|
+
return {
|
|
41
|
+
addr: `127.0.0.1:${port}`,
|
|
42
|
+
listenAddr: `0.0.0.0:${port}`
|
|
43
|
+
};
|
|
44
|
+
}
|
|
45
|
+
/**
|
|
46
|
+
* Creates a client configuration object
|
|
47
|
+
*/ createClientConfig(clientIndex, port, otherNodes) {
|
|
48
|
+
const { addr, listenAddr } = this.getAddresses(port);
|
|
49
|
+
return {
|
|
50
|
+
...getP2PDefaultConfig(),
|
|
51
|
+
p2pEnabled: true,
|
|
52
|
+
peerIdPrivateKey: this.peerIdPrivateKeys[clientIndex],
|
|
53
|
+
tcpListenAddress: listenAddr,
|
|
54
|
+
udpListenAddress: listenAddr,
|
|
55
|
+
tcpAnnounceAddress: addr,
|
|
56
|
+
udpAnnounceAddress: addr,
|
|
57
|
+
bootstrapNodes: [
|
|
58
|
+
...otherNodes
|
|
59
|
+
],
|
|
60
|
+
...this.p2pConfig
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Spawns a worker process and returns a promise that resolves when the worker is ready
|
|
65
|
+
*/ spawnWorkerProcess(config, clientIndex) {
|
|
66
|
+
const childProcess = fork(workerPath);
|
|
67
|
+
childProcess.send({
|
|
68
|
+
type: 'START',
|
|
69
|
+
config,
|
|
70
|
+
clientIndex
|
|
71
|
+
});
|
|
72
|
+
// Handle unexpected child process exit
|
|
73
|
+
childProcess.on('exit', (code, signal)=>{
|
|
74
|
+
if (code !== 0) {
|
|
75
|
+
this.logger.warn(`Worker ${clientIndex} exited unexpectedly with code ${code} and signal ${signal}`);
|
|
76
|
+
}
|
|
77
|
+
});
|
|
78
|
+
childProcess.on('message', (msg)=>{
|
|
79
|
+
if (msg.type === 'GOSSIP_RECEIVED') {
|
|
80
|
+
this.messageReceivedByClient[clientIndex] = msg.count;
|
|
81
|
+
}
|
|
82
|
+
});
|
|
83
|
+
// Create ready signal promise
|
|
84
|
+
const readySignal = new Promise((resolve, reject)=>{
|
|
85
|
+
// Set a timeout to avoid hanging indefinitely
|
|
86
|
+
const timeout = setTimeout(()=>{
|
|
87
|
+
reject(new Error(`Timeout waiting for worker ${clientIndex} to be ready`));
|
|
88
|
+
}, 30000); // 30 second timeout
|
|
89
|
+
childProcess.once('message', (msg)=>{
|
|
90
|
+
clearTimeout(timeout);
|
|
91
|
+
if (msg.type === 'READY') {
|
|
92
|
+
resolve();
|
|
93
|
+
}
|
|
94
|
+
// For future use
|
|
95
|
+
if (msg.type === 'ERROR') {
|
|
96
|
+
reject(new Error(msg.error));
|
|
97
|
+
}
|
|
98
|
+
});
|
|
99
|
+
// Also resolve/reject if process exits before sending message
|
|
100
|
+
childProcess.once('exit', (code)=>{
|
|
101
|
+
clearTimeout(timeout);
|
|
102
|
+
if (code === 0) {
|
|
103
|
+
resolve();
|
|
104
|
+
} else {
|
|
105
|
+
reject(new Error(`Worker ${clientIndex} exited with code ${code} before becoming ready`));
|
|
106
|
+
}
|
|
107
|
+
});
|
|
108
|
+
});
|
|
109
|
+
return [
|
|
110
|
+
childProcess,
|
|
111
|
+
readySignal
|
|
112
|
+
];
|
|
113
|
+
}
|
|
114
|
+
/**
|
|
115
|
+
* Creates a number of worker clients in separate processes
|
|
116
|
+
* All are configured to connect to each other and overrided with the test specific config
|
|
117
|
+
*
|
|
118
|
+
* @param numberOfClients - The number of clients to create
|
|
119
|
+
* @returns The ENRs of the created clients
|
|
120
|
+
*/ async makeWorkerClients(numberOfClients) {
|
|
121
|
+
try {
|
|
122
|
+
this.messageReceivedByClient = new Array(numberOfClients).fill(0);
|
|
123
|
+
this.peerIdPrivateKeys = generatePeerIdPrivateKeys(numberOfClients);
|
|
124
|
+
this.ports = await getPorts(numberOfClients);
|
|
125
|
+
this.peerEnrs = await makeEnrs(this.peerIdPrivateKeys, this.ports, testChainConfig);
|
|
126
|
+
this.processes = [];
|
|
127
|
+
const readySignals = [];
|
|
128
|
+
for(let i = 0; i < numberOfClients; i++){
|
|
129
|
+
this.logger.info(`Creating client ${i}`);
|
|
130
|
+
// Maximum seed with 10 other peers to allow peer discovery to connect them at a smoother rate
|
|
131
|
+
const otherNodes = this.peerEnrs.filter((_, ind)=>ind < Math.min(i, 10));
|
|
132
|
+
const config = this.createClientConfig(i, this.ports[i], otherNodes);
|
|
133
|
+
const [childProcess, readySignal] = this.spawnWorkerProcess(config, i);
|
|
134
|
+
readySignals.push(readySignal);
|
|
135
|
+
this.processes.push(childProcess);
|
|
136
|
+
}
|
|
137
|
+
// Wait for peers to all connect with each other
|
|
138
|
+
await sleep(10000);
|
|
139
|
+
// Wait for all peers to be booted up with timeout
|
|
140
|
+
await Promise.race([
|
|
141
|
+
Promise.all(readySignals),
|
|
142
|
+
new Promise((_, reject)=>setTimeout(()=>reject(new Error('Timeout waiting for all workers to be ready')), 30000))
|
|
143
|
+
]);
|
|
144
|
+
return this.peerEnrs;
|
|
145
|
+
} catch (error) {
|
|
146
|
+
// Clean up any processes that were created if there's an error
|
|
147
|
+
this.logger.error('Error during makeWorkerClients:', error);
|
|
148
|
+
await this.cleanup();
|
|
149
|
+
throw error;
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
purgeMessageReceivedByClient() {
|
|
153
|
+
this.messageReceivedByClient = new Array(this.processes.length).fill(0);
|
|
154
|
+
}
|
|
155
|
+
numberOfClientsThatReceivedMessage() {
|
|
156
|
+
return this.messageReceivedByClient.filter((count)=>count > 0).length;
|
|
157
|
+
}
|
|
158
|
+
/**
|
|
159
|
+
* Changes the port for a specific client
|
|
160
|
+
*
|
|
161
|
+
* @param clientIndex - The index of the client to change port for
|
|
162
|
+
* @param newPort - The new port to use
|
|
163
|
+
*/ async changePort(clientIndex, newPort) {
|
|
164
|
+
try {
|
|
165
|
+
if (clientIndex < 0 || clientIndex >= this.processes.length) {
|
|
166
|
+
throw new Error(`Invalid client index: ${clientIndex}`);
|
|
167
|
+
}
|
|
168
|
+
this.processes[clientIndex].send({
|
|
169
|
+
type: 'STOP'
|
|
170
|
+
});
|
|
171
|
+
// Wait for the process to be ready with a timeout
|
|
172
|
+
await sleep(10000);
|
|
173
|
+
this.logger.info(`Changing port for client ${clientIndex} to ${newPort}`);
|
|
174
|
+
// Update the port in the ports array
|
|
175
|
+
this.ports[clientIndex] = newPort;
|
|
176
|
+
// Update the port in the peerEnrs array
|
|
177
|
+
this.peerEnrs[clientIndex] = await makeEnr(this.peerIdPrivateKeys[clientIndex], newPort, testChainConfig);
|
|
178
|
+
// Maximum seed with 10 other peers to allow peer discovery to connect them at a smoother rate
|
|
179
|
+
const otherNodes = this.peerEnrs.filter((_, ind)=>ind !== clientIndex && ind < Math.min(this.peerEnrs.length, 10));
|
|
180
|
+
const config = this.createClientConfig(clientIndex, newPort, otherNodes);
|
|
181
|
+
const [childProcess, readySignal] = this.spawnWorkerProcess(config, clientIndex);
|
|
182
|
+
this.processes[clientIndex] = childProcess;
|
|
183
|
+
// Wait for the process to be ready with a timeout
|
|
184
|
+
await Promise.race([
|
|
185
|
+
readySignal,
|
|
186
|
+
new Promise((_, reject)=>setTimeout(()=>reject(new Error(`Timeout waiting for client ${clientIndex} to be ready`)), 30000))
|
|
187
|
+
]);
|
|
188
|
+
} catch (error) {
|
|
189
|
+
this.logger.error(`Error during changePort for client ${clientIndex}:`, error);
|
|
190
|
+
// Only clean up the specific process that had an issue
|
|
191
|
+
await this.terminateProcess(this.processes[clientIndex], clientIndex);
|
|
192
|
+
throw error;
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
/**
|
|
196
|
+
* Terminate a single process with timeout and force kill if needed
|
|
197
|
+
*/ terminateProcess(process1, index) {
|
|
198
|
+
if (!process1 || process1.killed) {
|
|
199
|
+
return Promise.resolve();
|
|
200
|
+
}
|
|
201
|
+
return new Promise((resolve)=>{
|
|
202
|
+
// Set a timeout for the graceful exit
|
|
203
|
+
const forceKillTimeout = setTimeout(()=>{
|
|
204
|
+
this.logger.warn(`Process ${index} didn't exit gracefully, force killing...`);
|
|
205
|
+
try {
|
|
206
|
+
process1.kill('SIGKILL'); // Force kill
|
|
207
|
+
} catch (e) {
|
|
208
|
+
this.logger.error(`Error force killing process ${index}:`, e);
|
|
209
|
+
}
|
|
210
|
+
}, 10000); // 10 second timeout for graceful exit
|
|
211
|
+
// Listen for process exit
|
|
212
|
+
process1.once('exit', ()=>{
|
|
213
|
+
clearTimeout(forceKillTimeout);
|
|
214
|
+
resolve();
|
|
215
|
+
});
|
|
216
|
+
// Try to gracefully stop the process
|
|
217
|
+
try {
|
|
218
|
+
process1.send({
|
|
219
|
+
type: 'STOP'
|
|
220
|
+
});
|
|
221
|
+
} catch (e) {
|
|
222
|
+
// If sending the message fails, force kill immediately
|
|
223
|
+
clearTimeout(forceKillTimeout);
|
|
224
|
+
try {
|
|
225
|
+
process1.kill('SIGKILL');
|
|
226
|
+
} catch (killError) {
|
|
227
|
+
this.logger.error(`Error force killing process ${index}:`, killError);
|
|
228
|
+
}
|
|
229
|
+
resolve();
|
|
230
|
+
}
|
|
231
|
+
});
|
|
232
|
+
}
|
|
233
|
+
/**
|
|
234
|
+
* Cleans up all worker processes with timeout and force kill if needed
|
|
235
|
+
*/ async cleanup() {
|
|
236
|
+
this.logger.info(`Cleaning up ${this.processes.length} worker processes`);
|
|
237
|
+
// Create array of promises for each process termination
|
|
238
|
+
const terminationPromises = this.processes.map((process1, index)=>this.terminateProcess(process1, index));
|
|
239
|
+
// Wait for all processes to terminate with a timeout
|
|
240
|
+
try {
|
|
241
|
+
await Promise.race([
|
|
242
|
+
Promise.all(terminationPromises),
|
|
243
|
+
new Promise((resolve)=>{
|
|
244
|
+
setTimeout(()=>{
|
|
245
|
+
this.logger.warn('Some processes did not terminate in time, force killing all remaining...');
|
|
246
|
+
this.processes.forEach((p)=>{
|
|
247
|
+
try {
|
|
248
|
+
if (!p.killed) {
|
|
249
|
+
p.kill('SIGKILL');
|
|
250
|
+
}
|
|
251
|
+
} catch (e) {
|
|
252
|
+
// Ignore errors when force killing
|
|
253
|
+
}
|
|
254
|
+
});
|
|
255
|
+
resolve();
|
|
256
|
+
}, 30000); // 30 second timeout for all processes
|
|
257
|
+
})
|
|
258
|
+
]);
|
|
259
|
+
} catch (error) {
|
|
260
|
+
this.logger.error('Error during cleanup:', error);
|
|
261
|
+
}
|
|
262
|
+
this.processes = [];
|
|
263
|
+
this.logger.info('All worker processes cleaned up');
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
export { WorkerClientManager, testChainConfig };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aztec/p2p",
|
|
3
|
-
"version": "0.77.0
|
|
3
|
+
"version": "0.77.0",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"exports": {
|
|
6
6
|
".": "./dest/index.js",
|
|
@@ -65,14 +65,14 @@
|
|
|
65
65
|
]
|
|
66
66
|
},
|
|
67
67
|
"dependencies": {
|
|
68
|
-
"@aztec/constants": "0.77.0
|
|
69
|
-
"@aztec/epoch-cache": "0.77.0
|
|
70
|
-
"@aztec/foundation": "0.77.0
|
|
71
|
-
"@aztec/kv-store": "0.77.0
|
|
72
|
-
"@aztec/noir-protocol-circuits-types": "0.77.0
|
|
73
|
-
"@aztec/protocol-contracts": "0.77.0
|
|
74
|
-
"@aztec/stdlib": "0.77.0
|
|
75
|
-
"@aztec/telemetry-client": "0.77.0
|
|
68
|
+
"@aztec/constants": "0.77.0",
|
|
69
|
+
"@aztec/epoch-cache": "0.77.0",
|
|
70
|
+
"@aztec/foundation": "0.77.0",
|
|
71
|
+
"@aztec/kv-store": "0.77.0",
|
|
72
|
+
"@aztec/noir-protocol-circuits-types": "0.77.0",
|
|
73
|
+
"@aztec/protocol-contracts": "0.77.0",
|
|
74
|
+
"@aztec/stdlib": "0.77.0",
|
|
75
|
+
"@aztec/telemetry-client": "0.77.0",
|
|
76
76
|
"@chainsafe/discv5": "9.0.0",
|
|
77
77
|
"@chainsafe/enr": "3.0.0",
|
|
78
78
|
"@chainsafe/libp2p-gossipsub": "13.0.0",
|
|
@@ -101,7 +101,7 @@
|
|
|
101
101
|
"xxhash-wasm": "^1.1.0"
|
|
102
102
|
},
|
|
103
103
|
"devDependencies": {
|
|
104
|
-
"@aztec/archiver": "0.77.0
|
|
104
|
+
"@aztec/archiver": "0.77.0",
|
|
105
105
|
"@jest/globals": "^29.5.0",
|
|
106
106
|
"@types/jest": "^29.5.0",
|
|
107
107
|
"@types/node": "^18.14.6",
|
package/src/client/factory.ts
CHANGED
|
@@ -38,8 +38,8 @@ export const createP2PClient = async <T extends P2PClientType>(
|
|
|
38
38
|
) => {
|
|
39
39
|
let config = { ..._config };
|
|
40
40
|
const logger = deps.logger ?? createLogger('p2p');
|
|
41
|
-
const store = deps.store ?? (await createStore('p2p', config, createLogger('p2p:lmdb-v2')));
|
|
42
|
-
const archive = await createStore('p2p-archive', config, createLogger('p2p-archive:lmdb-v2'));
|
|
41
|
+
const store = deps.store ?? (await createStore('p2p', 1, config, createLogger('p2p:lmdb-v2')));
|
|
42
|
+
const archive = await createStore('p2p-archive', 1, config, createLogger('p2p-archive:lmdb-v2'));
|
|
43
43
|
|
|
44
44
|
const mempools: MemPools<T> = {
|
|
45
45
|
txPool: deps.txPool ?? new AztecKVTxPool(store, archive, telemetry, config.archivedTxLimit),
|
|
@@ -1,117 +1,17 @@
|
|
|
1
|
-
import { EthAddress } from '@aztec/foundation/eth-address';
|
|
2
1
|
import { createLogger } from '@aztec/foundation/log';
|
|
3
2
|
import { sleep } from '@aztec/foundation/sleep';
|
|
4
|
-
import type { ChainConfig } from '@aztec/stdlib/config';
|
|
5
3
|
import { ClientIvcProof } from '@aztec/stdlib/proofs';
|
|
6
4
|
import { mockTx } from '@aztec/stdlib/testing';
|
|
7
5
|
|
|
8
|
-
import
|
|
6
|
+
import assert from 'assert';
|
|
9
7
|
import path from 'path';
|
|
10
8
|
import { fileURLToPath } from 'url';
|
|
11
9
|
|
|
12
|
-
import {
|
|
13
|
-
import { generatePeerIdPrivateKeys } from '../test-helpers/generate-peer-id-private-keys.js';
|
|
14
|
-
import { getPorts } from '../test-helpers/get-ports.js';
|
|
15
|
-
import { makeEnrs } from '../test-helpers/make-enrs.js';
|
|
10
|
+
import { WorkerClientManager, testChainConfig } from './worker_client_manager.js';
|
|
16
11
|
|
|
17
12
|
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
|
18
|
-
const workerPath = path.join(__dirname, '../../dest/testbench/p2p_client_testbench_worker.js');
|
|
19
13
|
const logger = createLogger('testbench');
|
|
20
14
|
|
|
21
|
-
let processes: ChildProcess[] = [];
|
|
22
|
-
|
|
23
|
-
const testChainConfig: ChainConfig = {
|
|
24
|
-
l1ChainId: 31337,
|
|
25
|
-
version: 1,
|
|
26
|
-
l1Contracts: {
|
|
27
|
-
rollupAddress: EthAddress.random(),
|
|
28
|
-
},
|
|
29
|
-
};
|
|
30
|
-
|
|
31
|
-
/**
|
|
32
|
-
* Cleanup function to kill all child processes
|
|
33
|
-
*/
|
|
34
|
-
async function cleanup() {
|
|
35
|
-
logger.info('Cleaning up processes...');
|
|
36
|
-
await Promise.all(
|
|
37
|
-
processes.map(
|
|
38
|
-
proc =>
|
|
39
|
-
new Promise<void>(resolve => {
|
|
40
|
-
proc.once('exit', () => resolve());
|
|
41
|
-
proc.send({ type: 'STOP' });
|
|
42
|
-
}),
|
|
43
|
-
),
|
|
44
|
-
);
|
|
45
|
-
process.exit(0);
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
// Handle cleanup on process termination
|
|
49
|
-
process.on('SIGINT', () => void cleanup());
|
|
50
|
-
process.on('SIGTERM', () => void cleanup());
|
|
51
|
-
|
|
52
|
-
/**
|
|
53
|
-
* Creates a number of worker clients in separate processes
|
|
54
|
-
* All are configured to connect to each other and overrided with the test specific config
|
|
55
|
-
*
|
|
56
|
-
* @param numberOfClients - The number of clients to create
|
|
57
|
-
* @param p2pConfig - The P2P config to use for the clients
|
|
58
|
-
* @returns The ENRs of the created clients
|
|
59
|
-
*/
|
|
60
|
-
async function makeWorkerClients(numberOfClients: number, p2pConfig: Partial<P2PConfig>) {
|
|
61
|
-
const peerIdPrivateKeys = generatePeerIdPrivateKeys(numberOfClients);
|
|
62
|
-
const ports = await getPorts(numberOfClients);
|
|
63
|
-
const peerEnrs = await makeEnrs(peerIdPrivateKeys, ports, testChainConfig);
|
|
64
|
-
|
|
65
|
-
processes = [];
|
|
66
|
-
const readySignals: Promise<void>[] = [];
|
|
67
|
-
for (let i = 0; i < numberOfClients; i++) {
|
|
68
|
-
logger.info(`Creating client ${i}`);
|
|
69
|
-
const addr = `127.0.0.1:${ports[i]}`;
|
|
70
|
-
const listenAddr = `0.0.0.0:${ports[i]}`;
|
|
71
|
-
|
|
72
|
-
// Maximum seed with 10 other peers to allow peer discovery to connect them at a smoother rate
|
|
73
|
-
const otherNodes = peerEnrs.filter((_, ind) => ind < Math.min(i, 10));
|
|
74
|
-
|
|
75
|
-
const config: P2PConfig & Partial<ChainConfig> = {
|
|
76
|
-
...getP2PDefaultConfig(),
|
|
77
|
-
p2pEnabled: true,
|
|
78
|
-
peerIdPrivateKey: peerIdPrivateKeys[i],
|
|
79
|
-
tcpListenAddress: listenAddr,
|
|
80
|
-
udpListenAddress: listenAddr,
|
|
81
|
-
tcpAnnounceAddress: addr,
|
|
82
|
-
udpAnnounceAddress: addr,
|
|
83
|
-
bootstrapNodes: [...otherNodes],
|
|
84
|
-
...p2pConfig,
|
|
85
|
-
};
|
|
86
|
-
|
|
87
|
-
const childProcess = fork(workerPath);
|
|
88
|
-
childProcess.send({ type: 'START', config, clientIndex: i });
|
|
89
|
-
|
|
90
|
-
// Wait for ready signal
|
|
91
|
-
readySignals.push(
|
|
92
|
-
new Promise((resolve, reject) => {
|
|
93
|
-
childProcess.once('message', (msg: any) => {
|
|
94
|
-
if (msg.type === 'READY') {
|
|
95
|
-
resolve(undefined);
|
|
96
|
-
}
|
|
97
|
-
if (msg.type === 'ERROR') {
|
|
98
|
-
reject(new Error(msg.error));
|
|
99
|
-
}
|
|
100
|
-
});
|
|
101
|
-
}),
|
|
102
|
-
);
|
|
103
|
-
|
|
104
|
-
processes.push(childProcess);
|
|
105
|
-
}
|
|
106
|
-
// Wait for peers to all connect with each other
|
|
107
|
-
await sleep(4000);
|
|
108
|
-
|
|
109
|
-
// Wait for all peers to be booted up
|
|
110
|
-
await Promise.all(readySignals);
|
|
111
|
-
|
|
112
|
-
return peerEnrs;
|
|
113
|
-
}
|
|
114
|
-
|
|
115
15
|
async function main() {
|
|
116
16
|
try {
|
|
117
17
|
// Read configuration file name from command line args
|
|
@@ -126,7 +26,8 @@ async function main() {
|
|
|
126
26
|
const numberOfClients = config.default.numberOfClients;
|
|
127
27
|
|
|
128
28
|
// Setup clients in separate processes
|
|
129
|
-
|
|
29
|
+
const workerClientManager = new WorkerClientManager(logger, testConfig);
|
|
30
|
+
await workerClientManager.makeWorkerClients(numberOfClients);
|
|
130
31
|
|
|
131
32
|
// wait a bit longer for all peers to be ready
|
|
132
33
|
await sleep(5000);
|
|
@@ -136,22 +37,30 @@ async function main() {
|
|
|
136
37
|
const tx = await mockTx(1, {
|
|
137
38
|
clientIvcProof: ClientIvcProof.random(),
|
|
138
39
|
});
|
|
139
|
-
|
|
40
|
+
|
|
41
|
+
workerClientManager.processes[0].send({ type: 'SEND_TX', tx: tx.toBuffer() });
|
|
140
42
|
logger.info('Transaction sent from client 0');
|
|
141
43
|
|
|
142
44
|
// Give time for message propagation
|
|
143
45
|
await sleep(30000);
|
|
144
46
|
logger.info('Checking message propagation results');
|
|
145
47
|
|
|
146
|
-
|
|
48
|
+
// Check message propagation results
|
|
49
|
+
const numberOfClientsThatReceivedMessage = workerClientManager.numberOfClientsThatReceivedMessage();
|
|
50
|
+
logger.info(`Number of clients that received message: ${numberOfClientsThatReceivedMessage}`);
|
|
51
|
+
|
|
52
|
+
assert(numberOfClientsThatReceivedMessage === numberOfClients - 1);
|
|
53
|
+
|
|
54
|
+
logger.info('Test passed, cleaning up');
|
|
55
|
+
|
|
56
|
+
// cleanup
|
|
57
|
+
await workerClientManager.cleanup();
|
|
147
58
|
} catch (error) {
|
|
148
59
|
logger.error('Test failed with error:', error);
|
|
149
|
-
await cleanup();
|
|
150
60
|
process.exit(1);
|
|
151
61
|
}
|
|
152
62
|
}
|
|
153
63
|
|
|
154
64
|
main().catch(error => {
|
|
155
65
|
logger.error('Unhandled error:', error);
|
|
156
|
-
cleanup().catch(() => process.exit(1));
|
|
157
66
|
});
|
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
import { EthAddress } from '@aztec/foundation/eth-address';
|
|
2
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
3
|
+
import { sleep } from '@aztec/foundation/sleep';
|
|
4
|
+
import type { ChainConfig } from '@aztec/stdlib/config';
|
|
5
|
+
|
|
6
|
+
import { type ChildProcess, fork } from 'child_process';
|
|
7
|
+
import path from 'path';
|
|
8
|
+
import { fileURLToPath } from 'url';
|
|
9
|
+
|
|
10
|
+
import { type P2PConfig, getP2PDefaultConfig } from '../config.js';
|
|
11
|
+
import { generatePeerIdPrivateKeys } from '../test-helpers/generate-peer-id-private-keys.js';
|
|
12
|
+
import { getPorts } from '../test-helpers/get-ports.js';
|
|
13
|
+
import { makeEnr, makeEnrs } from '../test-helpers/make-enrs.js';
|
|
14
|
+
|
|
15
|
+
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
|
16
|
+
const workerPath = path.join(__dirname, '../../dest/testbench/p2p_client_testbench_worker.js');
|
|
17
|
+
|
|
18
|
+
const testChainConfig: ChainConfig = {
|
|
19
|
+
l1ChainId: 31337,
|
|
20
|
+
version: 1,
|
|
21
|
+
l1Contracts: {
|
|
22
|
+
rollupAddress: EthAddress.random(),
|
|
23
|
+
},
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
class WorkerClientManager {
|
|
27
|
+
public processes: ChildProcess[] = [];
|
|
28
|
+
public peerIdPrivateKeys: string[] = [];
|
|
29
|
+
public peerEnrs: string[] = [];
|
|
30
|
+
public ports: number[] = [];
|
|
31
|
+
private p2pConfig: Partial<P2PConfig>;
|
|
32
|
+
private logger: Logger;
|
|
33
|
+
private messageReceivedByClient: number[] = [];
|
|
34
|
+
|
|
35
|
+
constructor(logger: Logger, p2pConfig: Partial<P2PConfig>) {
|
|
36
|
+
this.logger = logger;
|
|
37
|
+
this.p2pConfig = p2pConfig;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
destroy() {
|
|
41
|
+
this.cleanup().catch((error: Error) => {
|
|
42
|
+
this.logger.error('Failed to cleanup worker client manager', error);
|
|
43
|
+
process.exit(1);
|
|
44
|
+
});
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Creates address strings from a port
|
|
49
|
+
*/
|
|
50
|
+
private getAddresses(port: number) {
|
|
51
|
+
return {
|
|
52
|
+
addr: `127.0.0.1:${port}`,
|
|
53
|
+
listenAddr: `0.0.0.0:${port}`,
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Creates a client configuration object
|
|
59
|
+
*/
|
|
60
|
+
private createClientConfig(clientIndex: number, port: number, otherNodes: string[]) {
|
|
61
|
+
const { addr, listenAddr } = this.getAddresses(port);
|
|
62
|
+
|
|
63
|
+
return {
|
|
64
|
+
...getP2PDefaultConfig(),
|
|
65
|
+
p2pEnabled: true,
|
|
66
|
+
peerIdPrivateKey: this.peerIdPrivateKeys[clientIndex],
|
|
67
|
+
tcpListenAddress: listenAddr,
|
|
68
|
+
udpListenAddress: listenAddr,
|
|
69
|
+
tcpAnnounceAddress: addr,
|
|
70
|
+
udpAnnounceAddress: addr,
|
|
71
|
+
bootstrapNodes: [...otherNodes],
|
|
72
|
+
...this.p2pConfig,
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Spawns a worker process and returns a promise that resolves when the worker is ready
|
|
78
|
+
*/
|
|
79
|
+
private spawnWorkerProcess(
|
|
80
|
+
config: P2PConfig & Partial<ChainConfig>,
|
|
81
|
+
clientIndex: number,
|
|
82
|
+
): [ChildProcess, Promise<void>] {
|
|
83
|
+
const childProcess = fork(workerPath);
|
|
84
|
+
childProcess.send({ type: 'START', config, clientIndex });
|
|
85
|
+
|
|
86
|
+
// Handle unexpected child process exit
|
|
87
|
+
childProcess.on('exit', (code, signal) => {
|
|
88
|
+
if (code !== 0) {
|
|
89
|
+
this.logger.warn(`Worker ${clientIndex} exited unexpectedly with code ${code} and signal ${signal}`);
|
|
90
|
+
}
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
childProcess.on('message', (msg: any) => {
|
|
94
|
+
if (msg.type === 'GOSSIP_RECEIVED') {
|
|
95
|
+
this.messageReceivedByClient[clientIndex] = msg.count;
|
|
96
|
+
}
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
// Create ready signal promise
|
|
100
|
+
const readySignal = new Promise<void>((resolve, reject) => {
|
|
101
|
+
// Set a timeout to avoid hanging indefinitely
|
|
102
|
+
const timeout = setTimeout(() => {
|
|
103
|
+
reject(new Error(`Timeout waiting for worker ${clientIndex} to be ready`));
|
|
104
|
+
}, 30000); // 30 second timeout
|
|
105
|
+
|
|
106
|
+
childProcess.once('message', (msg: any) => {
|
|
107
|
+
clearTimeout(timeout);
|
|
108
|
+
if (msg.type === 'READY') {
|
|
109
|
+
resolve();
|
|
110
|
+
}
|
|
111
|
+
// For future use
|
|
112
|
+
if (msg.type === 'ERROR') {
|
|
113
|
+
reject(new Error(msg.error));
|
|
114
|
+
}
|
|
115
|
+
});
|
|
116
|
+
|
|
117
|
+
// Also resolve/reject if process exits before sending message
|
|
118
|
+
childProcess.once('exit', code => {
|
|
119
|
+
clearTimeout(timeout);
|
|
120
|
+
if (code === 0) {
|
|
121
|
+
resolve();
|
|
122
|
+
} else {
|
|
123
|
+
reject(new Error(`Worker ${clientIndex} exited with code ${code} before becoming ready`));
|
|
124
|
+
}
|
|
125
|
+
});
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
return [childProcess, readySignal];
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
/**
|
|
132
|
+
* Creates a number of worker clients in separate processes
|
|
133
|
+
* All are configured to connect to each other and overrided with the test specific config
|
|
134
|
+
*
|
|
135
|
+
* @param numberOfClients - The number of clients to create
|
|
136
|
+
* @returns The ENRs of the created clients
|
|
137
|
+
*/
|
|
138
|
+
async makeWorkerClients(numberOfClients: number) {
|
|
139
|
+
try {
|
|
140
|
+
this.messageReceivedByClient = new Array(numberOfClients).fill(0);
|
|
141
|
+
this.peerIdPrivateKeys = generatePeerIdPrivateKeys(numberOfClients);
|
|
142
|
+
this.ports = await getPorts(numberOfClients);
|
|
143
|
+
this.peerEnrs = await makeEnrs(this.peerIdPrivateKeys, this.ports, testChainConfig);
|
|
144
|
+
|
|
145
|
+
this.processes = [];
|
|
146
|
+
const readySignals: Promise<void>[] = [];
|
|
147
|
+
|
|
148
|
+
for (let i = 0; i < numberOfClients; i++) {
|
|
149
|
+
this.logger.info(`Creating client ${i}`);
|
|
150
|
+
|
|
151
|
+
// Maximum seed with 10 other peers to allow peer discovery to connect them at a smoother rate
|
|
152
|
+
const otherNodes = this.peerEnrs.filter((_, ind) => ind < Math.min(i, 10));
|
|
153
|
+
|
|
154
|
+
const config = this.createClientConfig(i, this.ports[i], otherNodes);
|
|
155
|
+
const [childProcess, readySignal] = this.spawnWorkerProcess(config, i);
|
|
156
|
+
|
|
157
|
+
readySignals.push(readySignal);
|
|
158
|
+
this.processes.push(childProcess);
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Wait for peers to all connect with each other
|
|
162
|
+
await sleep(10000);
|
|
163
|
+
|
|
164
|
+
// Wait for all peers to be booted up with timeout
|
|
165
|
+
await Promise.race([
|
|
166
|
+
Promise.all(readySignals),
|
|
167
|
+
new Promise((_, reject) =>
|
|
168
|
+
setTimeout(() => reject(new Error('Timeout waiting for all workers to be ready')), 30000),
|
|
169
|
+
),
|
|
170
|
+
]);
|
|
171
|
+
|
|
172
|
+
return this.peerEnrs;
|
|
173
|
+
} catch (error) {
|
|
174
|
+
// Clean up any processes that were created if there's an error
|
|
175
|
+
this.logger.error('Error during makeWorkerClients:', error);
|
|
176
|
+
await this.cleanup();
|
|
177
|
+
throw error;
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
purgeMessageReceivedByClient() {
|
|
182
|
+
this.messageReceivedByClient = new Array(this.processes.length).fill(0);
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
numberOfClientsThatReceivedMessage() {
|
|
186
|
+
return this.messageReceivedByClient.filter(count => count > 0).length;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* Changes the port for a specific client
|
|
191
|
+
*
|
|
192
|
+
* @param clientIndex - The index of the client to change port for
|
|
193
|
+
* @param newPort - The new port to use
|
|
194
|
+
*/
|
|
195
|
+
async changePort(clientIndex: number, newPort: number) {
|
|
196
|
+
try {
|
|
197
|
+
if (clientIndex < 0 || clientIndex >= this.processes.length) {
|
|
198
|
+
throw new Error(`Invalid client index: ${clientIndex}`);
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
this.processes[clientIndex].send({ type: 'STOP' });
|
|
202
|
+
|
|
203
|
+
// Wait for the process to be ready with a timeout
|
|
204
|
+
await sleep(10000);
|
|
205
|
+
|
|
206
|
+
this.logger.info(`Changing port for client ${clientIndex} to ${newPort}`);
|
|
207
|
+
|
|
208
|
+
// Update the port in the ports array
|
|
209
|
+
this.ports[clientIndex] = newPort;
|
|
210
|
+
|
|
211
|
+
// Update the port in the peerEnrs array
|
|
212
|
+
this.peerEnrs[clientIndex] = await makeEnr(this.peerIdPrivateKeys[clientIndex], newPort, testChainConfig);
|
|
213
|
+
|
|
214
|
+
// Maximum seed with 10 other peers to allow peer discovery to connect them at a smoother rate
|
|
215
|
+
const otherNodes = this.peerEnrs.filter(
|
|
216
|
+
(_, ind) => ind !== clientIndex && ind < Math.min(this.peerEnrs.length, 10),
|
|
217
|
+
);
|
|
218
|
+
|
|
219
|
+
const config = this.createClientConfig(clientIndex, newPort, otherNodes);
|
|
220
|
+
const [childProcess, readySignal] = this.spawnWorkerProcess(config, clientIndex);
|
|
221
|
+
|
|
222
|
+
this.processes[clientIndex] = childProcess;
|
|
223
|
+
|
|
224
|
+
// Wait for the process to be ready with a timeout
|
|
225
|
+
await Promise.race([
|
|
226
|
+
readySignal,
|
|
227
|
+
new Promise((_, reject) =>
|
|
228
|
+
setTimeout(() => reject(new Error(`Timeout waiting for client ${clientIndex} to be ready`)), 30000),
|
|
229
|
+
),
|
|
230
|
+
]);
|
|
231
|
+
} catch (error) {
|
|
232
|
+
this.logger.error(`Error during changePort for client ${clientIndex}:`, error);
|
|
233
|
+
// Only clean up the specific process that had an issue
|
|
234
|
+
await this.terminateProcess(this.processes[clientIndex], clientIndex);
|
|
235
|
+
throw error;
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/**
|
|
240
|
+
* Terminate a single process with timeout and force kill if needed
|
|
241
|
+
*/
|
|
242
|
+
private terminateProcess(process: ChildProcess, index: number): Promise<void> {
|
|
243
|
+
if (!process || process.killed) {
|
|
244
|
+
return Promise.resolve();
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
return new Promise<void>(resolve => {
|
|
248
|
+
// Set a timeout for the graceful exit
|
|
249
|
+
const forceKillTimeout = setTimeout(() => {
|
|
250
|
+
this.logger.warn(`Process ${index} didn't exit gracefully, force killing...`);
|
|
251
|
+
try {
|
|
252
|
+
process.kill('SIGKILL'); // Force kill
|
|
253
|
+
} catch (e) {
|
|
254
|
+
this.logger.error(`Error force killing process ${index}:`, e);
|
|
255
|
+
}
|
|
256
|
+
}, 10000); // 10 second timeout for graceful exit
|
|
257
|
+
|
|
258
|
+
// Listen for process exit
|
|
259
|
+
process.once('exit', () => {
|
|
260
|
+
clearTimeout(forceKillTimeout);
|
|
261
|
+
resolve();
|
|
262
|
+
});
|
|
263
|
+
|
|
264
|
+
// Try to gracefully stop the process
|
|
265
|
+
try {
|
|
266
|
+
process.send({ type: 'STOP' });
|
|
267
|
+
} catch (e) {
|
|
268
|
+
// If sending the message fails, force kill immediately
|
|
269
|
+
clearTimeout(forceKillTimeout);
|
|
270
|
+
try {
|
|
271
|
+
process.kill('SIGKILL');
|
|
272
|
+
} catch (killError) {
|
|
273
|
+
this.logger.error(`Error force killing process ${index}:`, killError);
|
|
274
|
+
}
|
|
275
|
+
resolve();
|
|
276
|
+
}
|
|
277
|
+
});
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
/**
|
|
281
|
+
* Cleans up all worker processes with timeout and force kill if needed
|
|
282
|
+
*/
|
|
283
|
+
async cleanup() {
|
|
284
|
+
this.logger.info(`Cleaning up ${this.processes.length} worker processes`);
|
|
285
|
+
|
|
286
|
+
// Create array of promises for each process termination
|
|
287
|
+
const terminationPromises = this.processes.map((process, index) => this.terminateProcess(process, index));
|
|
288
|
+
|
|
289
|
+
// Wait for all processes to terminate with a timeout
|
|
290
|
+
try {
|
|
291
|
+
await Promise.race([
|
|
292
|
+
Promise.all(terminationPromises),
|
|
293
|
+
new Promise<void>(resolve => {
|
|
294
|
+
setTimeout(() => {
|
|
295
|
+
this.logger.warn('Some processes did not terminate in time, force killing all remaining...');
|
|
296
|
+
this.processes.forEach(p => {
|
|
297
|
+
try {
|
|
298
|
+
if (!p.killed) {
|
|
299
|
+
p.kill('SIGKILL');
|
|
300
|
+
}
|
|
301
|
+
} catch (e) {
|
|
302
|
+
// Ignore errors when force killing
|
|
303
|
+
}
|
|
304
|
+
});
|
|
305
|
+
resolve();
|
|
306
|
+
}, 30000); // 30 second timeout for all processes
|
|
307
|
+
}),
|
|
308
|
+
]);
|
|
309
|
+
} catch (error) {
|
|
310
|
+
this.logger.error('Error during cleanup:', error);
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
this.processes = [];
|
|
314
|
+
this.logger.info('All worker processes cleaned up');
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
export { WorkerClientManager, testChainConfig };
|