@aztec/end-to-end 0.0.0-test.1 → 0.0.1-fake-c83136db25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/bench/client_flows/benchmark.d.ts +61 -0
- package/dest/bench/client_flows/benchmark.d.ts.map +1 -0
- package/dest/bench/client_flows/benchmark.js +261 -0
- package/dest/bench/client_flows/client_flows_benchmark.d.ts +73 -0
- package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -0
- package/dest/bench/client_flows/client_flows_benchmark.js +311 -0
- package/dest/bench/client_flows/config.d.ts +14 -0
- package/dest/bench/client_flows/config.d.ts.map +1 -0
- package/dest/bench/client_flows/config.js +106 -0
- package/dest/bench/client_flows/data_extractor.d.ts +2 -0
- package/dest/bench/client_flows/data_extractor.d.ts.map +1 -0
- package/dest/bench/client_flows/data_extractor.js +99 -0
- package/dest/bench/utils.d.ts +10 -36
- package/dest/bench/utils.d.ts.map +1 -1
- package/dest/bench/utils.js +26 -66
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +20 -12
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +85 -57
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +18 -24
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +48 -69
- package/dest/e2e_deploy_contract/deploy_test.d.ts +14 -6
- package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
- package/dest/e2e_deploy_contract/deploy_test.js +13 -19
- package/dest/e2e_epochs/epochs_test.d.ts +58 -17
- package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
- package/dest/e2e_epochs/epochs_test.js +224 -43
- package/dest/e2e_fees/bridging_race.notest.d.ts +2 -0
- package/dest/e2e_fees/bridging_race.notest.d.ts.map +1 -0
- package/dest/e2e_fees/bridging_race.notest.js +63 -0
- package/dest/e2e_fees/fees_test.d.ts +20 -9
- package/dest/e2e_fees/fees_test.d.ts.map +1 -1
- package/dest/e2e_fees/fees_test.js +98 -107
- package/dest/e2e_l1_publisher/write_json.d.ts +10 -0
- package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -0
- package/dest/e2e_l1_publisher/write_json.js +57 -0
- package/dest/e2e_multi_validator/utils.d.ts +12 -0
- package/dest/e2e_multi_validator/utils.d.ts.map +1 -0
- package/dest/e2e_multi_validator/utils.js +214 -0
- package/dest/e2e_nested_contract/nested_contract_test.d.ts +9 -6
- package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
- package/dest/e2e_nested_contract/nested_contract_test.js +22 -19
- package/dest/e2e_p2p/inactivity_slash_test.d.ts +31 -0
- package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -0
- package/dest/e2e_p2p/inactivity_slash_test.js +135 -0
- package/dest/e2e_p2p/p2p_network.d.ts +69 -22
- package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
- package/dest/e2e_p2p/p2p_network.js +180 -129
- package/dest/e2e_p2p/shared.d.ts +41 -5
- package/dest/e2e_p2p/shared.d.ts.map +1 -1
- package/dest/e2e_p2p/shared.js +163 -19
- package/dest/e2e_token_contract/token_contract_test.d.ts +11 -5
- package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
- package/dest/e2e_token_contract/token_contract_test.js +50 -26
- package/dest/{e2e_prover → fixtures}/e2e_prover_test.d.ts +14 -9
- package/dest/fixtures/e2e_prover_test.d.ts.map +1 -0
- package/dest/{e2e_prover → fixtures}/e2e_prover_test.js +95 -100
- package/dest/fixtures/fixtures.d.ts +5 -6
- package/dest/fixtures/fixtures.d.ts.map +1 -1
- package/dest/fixtures/fixtures.js +4 -3
- package/dest/fixtures/get_acvm_config.d.ts +1 -1
- package/dest/fixtures/get_acvm_config.d.ts.map +1 -1
- package/dest/fixtures/get_acvm_config.js +2 -14
- package/dest/fixtures/get_bb_config.d.ts +1 -1
- package/dest/fixtures/get_bb_config.d.ts.map +1 -1
- package/dest/fixtures/get_bb_config.js +10 -17
- package/dest/fixtures/l1_to_l2_messaging.d.ts +8 -5
- package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
- package/dest/fixtures/l1_to_l2_messaging.js +44 -18
- package/dest/fixtures/setup_l1_contracts.d.ts +3 -3
- package/dest/fixtures/setup_l1_contracts.d.ts.map +1 -1
- package/dest/fixtures/setup_l1_contracts.js +4 -4
- package/dest/fixtures/setup_p2p_test.d.ts +14 -13
- package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
- package/dest/fixtures/setup_p2p_test.js +73 -21
- package/dest/fixtures/snapshot_manager.d.ts +15 -7
- package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
- package/dest/fixtures/snapshot_manager.js +147 -121
- package/dest/fixtures/token_utils.d.ts +6 -3
- package/dest/fixtures/token_utils.d.ts.map +1 -1
- package/dest/fixtures/token_utils.js +23 -10
- package/dest/fixtures/utils.d.ts +76 -37
- package/dest/fixtures/utils.d.ts.map +1 -1
- package/dest/fixtures/utils.js +464 -368
- package/dest/fixtures/web3signer.d.ts +5 -0
- package/dest/fixtures/web3signer.d.ts.map +1 -0
- package/dest/fixtures/web3signer.js +53 -0
- package/dest/quality_of_service/alert_checker.d.ts +1 -1
- package/dest/quality_of_service/alert_checker.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.d.ts +41 -25
- package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.js +104 -50
- package/dest/shared/gas_portal_test_harness.d.ts +32 -24
- package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
- package/dest/shared/gas_portal_test_harness.js +50 -29
- package/dest/shared/jest_setup.js +1 -1
- package/dest/shared/submit-transactions.d.ts +5 -3
- package/dest/shared/submit-transactions.d.ts.map +1 -1
- package/dest/shared/submit-transactions.js +8 -7
- package/dest/shared/uniswap_l1_l2.d.ts +13 -11
- package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
- package/dest/shared/uniswap_l1_l2.js +138 -108
- package/dest/simulators/lending_simulator.d.ts +6 -6
- package/dest/simulators/lending_simulator.d.ts.map +1 -1
- package/dest/simulators/lending_simulator.js +13 -16
- package/dest/simulators/token_simulator.d.ts +5 -2
- package/dest/simulators/token_simulator.d.ts.map +1 -1
- package/dest/simulators/token_simulator.js +16 -13
- package/dest/spartan/setup_test_wallets.d.ts +23 -10
- package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
- package/dest/spartan/setup_test_wallets.js +167 -58
- package/dest/spartan/utils.d.ts +100 -303
- package/dest/spartan/utils.d.ts.map +1 -1
- package/dest/spartan/utils.js +407 -130
- package/package.json +61 -56
- package/src/bench/client_flows/benchmark.ts +341 -0
- package/src/bench/client_flows/client_flows_benchmark.ts +402 -0
- package/src/bench/client_flows/config.ts +61 -0
- package/src/bench/client_flows/data_extractor.ts +111 -0
- package/src/bench/utils.ts +22 -76
- package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +80 -77
- package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +63 -105
- package/src/e2e_deploy_contract/deploy_test.ts +23 -38
- package/src/e2e_epochs/epochs_test.ts +274 -54
- package/src/e2e_fees/bridging_race.notest.ts +80 -0
- package/src/e2e_fees/fees_test.ts +137 -136
- package/src/e2e_l1_publisher/write_json.ts +76 -0
- package/src/e2e_multi_validator/utils.ts +258 -0
- package/src/e2e_nested_contract/nested_contract_test.ts +27 -18
- package/src/e2e_p2p/inactivity_slash_test.ts +178 -0
- package/src/e2e_p2p/p2p_network.ts +272 -166
- package/src/e2e_p2p/shared.ts +244 -29
- package/src/e2e_token_contract/token_contract_test.ts +43 -39
- package/src/fixtures/dumps/epoch_proof_result.json +1 -1
- package/src/{e2e_prover → fixtures}/e2e_prover_test.ts +101 -145
- package/src/fixtures/fixtures.ts +4 -3
- package/src/fixtures/get_acvm_config.ts +3 -11
- package/src/fixtures/get_bb_config.ts +18 -13
- package/src/fixtures/l1_to_l2_messaging.ts +53 -23
- package/src/fixtures/setup_l1_contracts.ts +6 -7
- package/src/fixtures/setup_p2p_test.ts +112 -38
- package/src/fixtures/snapshot_manager.ts +187 -139
- package/src/fixtures/token_utils.ts +29 -12
- package/src/fixtures/utils.ts +552 -425
- package/src/fixtures/web3signer.ts +63 -0
- package/src/guides/up_quick_start.sh +6 -14
- package/src/quality_of_service/alert_checker.ts +1 -1
- package/src/shared/cross_chain_test_harness.ts +108 -79
- package/src/shared/gas_portal_test_harness.ts +58 -49
- package/src/shared/jest_setup.ts +1 -1
- package/src/shared/submit-transactions.ts +12 -8
- package/src/shared/uniswap_l1_l2.ts +173 -176
- package/src/simulators/lending_simulator.ts +12 -15
- package/src/simulators/token_simulator.ts +21 -13
- package/src/spartan/DEVELOP.md +121 -0
- package/src/spartan/setup_test_wallets.ts +215 -93
- package/src/spartan/utils.ts +458 -130
- package/dest/e2e_prover/e2e_prover_test.d.ts.map +0 -1
- package/dest/sample-dapp/connect.js +0 -12
- package/dest/sample-dapp/contracts.js +0 -10
- package/dest/sample-dapp/deploy.js +0 -35
- package/dest/sample-dapp/index.js +0 -98
- package/src/sample-dapp/connect.mjs +0 -16
- package/src/sample-dapp/contracts.mjs +0 -14
- package/src/sample-dapp/deploy.mjs +0 -40
- package/src/sample-dapp/index.mjs +0 -128
package/dest/spartan/utils.js
CHANGED
|
@@ -1,74 +1,80 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { createLogger } from '@aztec/aztec.js/log';
|
|
2
|
+
import { makeBackoff, retry } from '@aztec/foundation/retry';
|
|
3
|
+
import { schemas } from '@aztec/foundation/schemas';
|
|
4
|
+
import { sleep } from '@aztec/foundation/sleep';
|
|
5
|
+
import { createAztecNodeAdminClient, createAztecNodeClient } from '@aztec/stdlib/interfaces/client';
|
|
2
6
|
import { exec, execSync, spawn } from 'child_process';
|
|
3
7
|
import path from 'path';
|
|
4
8
|
import { promisify } from 'util';
|
|
9
|
+
import { createPublicClient, fallback, http } from 'viem';
|
|
5
10
|
import { z } from 'zod';
|
|
6
|
-
import { AlertChecker } from '../quality_of_service/alert_checker.js';
|
|
7
11
|
const execAsync = promisify(exec);
|
|
8
12
|
const logger = createLogger('e2e:k8s-utils');
|
|
9
|
-
const
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
const k8sLocalConfigSchema = z.object({
|
|
18
|
-
ETHEREUM_SLOT_DURATION: z.coerce.number().min(1, 'ETHEREUM_SLOT_DURATION env variable must be set'),
|
|
19
|
-
AZTEC_SLOT_DURATION: z.coerce.number().min(1, 'AZTEC_SLOT_DURATION env variable must be set'),
|
|
20
|
-
AZTEC_EPOCH_DURATION: z.coerce.number().min(1, 'AZTEC_EPOCH_DURATION env variable must be set'),
|
|
21
|
-
AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().min(1, 'AZTEC_PROOF_SUBMISSION_WINDOW env variable must be set'),
|
|
22
|
-
INSTANCE_NAME: z.string().min(1, 'INSTANCE_NAME env variable must be set'),
|
|
23
|
-
NAMESPACE: z.string().min(1, 'NAMESPACE env variable must be set'),
|
|
24
|
-
CONTAINER_NODE_PORT: z.coerce.number().default(8080),
|
|
25
|
-
CONTAINER_SEQUENCER_PORT: z.coerce.number().default(8080),
|
|
26
|
-
CONTAINER_PROVER_NODE_PORT: z.coerce.number().default(8080),
|
|
27
|
-
CONTAINER_PXE_PORT: z.coerce.number().default(8080),
|
|
28
|
-
CONTAINER_ETHEREUM_PORT: z.coerce.number().default(8545),
|
|
29
|
-
CONTAINER_METRICS_PORT: z.coerce.number().default(80),
|
|
30
|
-
GRAFANA_PASSWORD: z.string().optional(),
|
|
31
|
-
METRICS_API_PATH: z.string().default('/api/datasources/proxy/uid/spartan-metrics-prometheus/api/v1'),
|
|
32
|
-
SPARTAN_DIR: z.string().min(1, 'SPARTAN_DIR env variable must be set'),
|
|
33
|
-
ETHEREUM_HOSTS: ethereumHostsSchema.optional(),
|
|
34
|
-
L1_ACCOUNT_MNEMONIC: z.string().default('test test test test test test test test test test test junk'),
|
|
35
|
-
SEPOLIA_RUN: z.string().default('false'),
|
|
36
|
-
K8S: z.literal('local')
|
|
37
|
-
});
|
|
38
|
-
const k8sGCloudConfigSchema = k8sLocalConfigSchema.extend({
|
|
39
|
-
K8S: z.literal('gcloud'),
|
|
40
|
-
CLUSTER_NAME: z.string().min(1, 'CLUSTER_NAME env variable must be set'),
|
|
41
|
-
REGION: z.string().min(1, 'REGION env variable must be set')
|
|
13
|
+
const testConfigSchema = z.object({
|
|
14
|
+
NAMESPACE: z.string().default('scenario'),
|
|
15
|
+
REAL_VERIFIER: schemas.Boolean.optional().default(true),
|
|
16
|
+
CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
|
|
17
|
+
L1_RPC_URLS_JSON: z.string().optional(),
|
|
18
|
+
L1_ACCOUNT_MNEMONIC: z.string().optional(),
|
|
19
|
+
AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
|
|
20
|
+
AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5)
|
|
42
21
|
});
|
|
43
|
-
const directConfigSchema = z.object({
|
|
44
|
-
PXE_URL: z.string().url('PXE_URL must be a valid URL'),
|
|
45
|
-
NODE_URL: z.string().url('NODE_URL must be a valid URL'),
|
|
46
|
-
ETHEREUM_HOSTS: ethereumHostsSchema,
|
|
47
|
-
K8S: z.literal('false')
|
|
48
|
-
});
|
|
49
|
-
const envSchema = z.discriminatedUnion('K8S', [
|
|
50
|
-
k8sLocalConfigSchema,
|
|
51
|
-
k8sGCloudConfigSchema,
|
|
52
|
-
directConfigSchema
|
|
53
|
-
]);
|
|
54
|
-
export function isK8sConfig(config) {
|
|
55
|
-
return config.K8S === 'local' || config.K8S === 'gcloud';
|
|
56
|
-
}
|
|
57
|
-
export function isGCloudConfig(config) {
|
|
58
|
-
return config.K8S === 'gcloud';
|
|
59
|
-
}
|
|
60
22
|
export function setupEnvironment(env) {
|
|
61
|
-
const config =
|
|
62
|
-
|
|
63
|
-
const command = `gcloud container clusters get-credentials ${config.CLUSTER_NAME} --region=${config.REGION}`;
|
|
64
|
-
execSync(command);
|
|
65
|
-
}
|
|
23
|
+
const config = testConfigSchema.parse(env);
|
|
24
|
+
logger.warn(`Loaded env config`, config);
|
|
66
25
|
return config;
|
|
67
26
|
}
|
|
27
|
+
/**
|
|
28
|
+
* @param path - The path to the script, relative to the project root
|
|
29
|
+
* @param args - The arguments to pass to the script
|
|
30
|
+
* @param logger - The logger to use
|
|
31
|
+
* @returns The exit code of the script
|
|
32
|
+
*/ function runScript(path, args, logger, env) {
|
|
33
|
+
const childProcess = spawn(path, args, {
|
|
34
|
+
stdio: [
|
|
35
|
+
'ignore',
|
|
36
|
+
'pipe',
|
|
37
|
+
'pipe'
|
|
38
|
+
],
|
|
39
|
+
env: env ? {
|
|
40
|
+
...process.env,
|
|
41
|
+
...env
|
|
42
|
+
} : process.env
|
|
43
|
+
});
|
|
44
|
+
return new Promise((resolve, reject)=>{
|
|
45
|
+
childProcess.on('close', (code)=>resolve(code ?? 0));
|
|
46
|
+
childProcess.on('error', reject);
|
|
47
|
+
childProcess.stdout?.on('data', (data)=>{
|
|
48
|
+
logger.info(data.toString());
|
|
49
|
+
});
|
|
50
|
+
childProcess.stderr?.on('data', (data)=>{
|
|
51
|
+
logger.error(data.toString());
|
|
52
|
+
});
|
|
53
|
+
});
|
|
54
|
+
}
|
|
55
|
+
export function getAztecBin() {
|
|
56
|
+
return path.join(getGitProjectRoot(), 'yarn-project/aztec/dest/bin/index.js');
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Runs the Aztec binary
|
|
60
|
+
* @param args - The arguments to pass to the Aztec binary
|
|
61
|
+
* @param logger - The logger to use
|
|
62
|
+
* @param env - Optional environment variables to set for the process
|
|
63
|
+
* @returns The exit code of the Aztec binary
|
|
64
|
+
*/ export function runAztecBin(args, logger, env) {
|
|
65
|
+
return runScript('node', [
|
|
66
|
+
getAztecBin(),
|
|
67
|
+
...args
|
|
68
|
+
], logger, env);
|
|
69
|
+
}
|
|
70
|
+
export function runProjectScript(script, args, logger, env) {
|
|
71
|
+
const scriptPath = script.startsWith('/') ? script : path.join(getGitProjectRoot(), script);
|
|
72
|
+
return runScript(scriptPath, args, logger, env);
|
|
73
|
+
}
|
|
68
74
|
export async function startPortForward({ resource, namespace, containerPort, hostPort }) {
|
|
69
75
|
const hostPortAsString = hostPort ? hostPort.toString() : '';
|
|
70
|
-
logger.
|
|
71
|
-
const
|
|
76
|
+
logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
|
|
77
|
+
const process1 = spawn('kubectl', [
|
|
72
78
|
'port-forward',
|
|
73
79
|
'-n',
|
|
74
80
|
namespace,
|
|
@@ -85,61 +91,81 @@ export async function startPortForward({ resource, namespace, containerPort, hos
|
|
|
85
91
|
});
|
|
86
92
|
let isResolved = false;
|
|
87
93
|
const connected = new Promise((resolve)=>{
|
|
88
|
-
|
|
94
|
+
process1.stdout?.on('data', (data)=>{
|
|
89
95
|
const str = data.toString();
|
|
90
96
|
if (!isResolved && str.includes('Forwarding from')) {
|
|
91
97
|
isResolved = true;
|
|
92
|
-
logger.
|
|
98
|
+
logger.debug(`Port forward for ${resource}: ${str}`);
|
|
93
99
|
const port = str.search(/:\d+/);
|
|
94
100
|
if (port === -1) {
|
|
95
101
|
throw new Error('Port not found in port forward output');
|
|
96
102
|
}
|
|
97
103
|
const portNumber = parseInt(str.slice(port + 1));
|
|
98
|
-
logger.
|
|
99
|
-
logger.info(`Port forward connected: ${portNumber}`);
|
|
104
|
+
logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
|
|
100
105
|
resolve(portNumber);
|
|
101
106
|
} else {
|
|
102
107
|
logger.silent(str);
|
|
103
108
|
}
|
|
104
109
|
});
|
|
105
|
-
|
|
106
|
-
logger.
|
|
110
|
+
process1.stderr?.on('data', (data)=>{
|
|
111
|
+
logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
|
|
107
112
|
// It's a strange thing:
|
|
108
113
|
// If we don't pipe stderr, then the port forwarding does not work.
|
|
109
114
|
// Log to silent because this doesn't actually report errors,
|
|
110
115
|
// just extremely verbose debug logs.
|
|
111
116
|
logger.silent(data.toString());
|
|
112
117
|
});
|
|
113
|
-
|
|
118
|
+
process1.on('close', ()=>{
|
|
114
119
|
if (!isResolved) {
|
|
115
120
|
isResolved = true;
|
|
116
|
-
logger.warn(
|
|
121
|
+
logger.warn(`Port forward for ${resource} closed before connection established`);
|
|
117
122
|
resolve(0);
|
|
118
123
|
}
|
|
119
124
|
});
|
|
120
|
-
|
|
121
|
-
logger.error(`Port forward error: ${error}`);
|
|
125
|
+
process1.on('error', (error)=>{
|
|
126
|
+
logger.error(`Port forward for ${resource} error: ${error}`);
|
|
122
127
|
resolve(0);
|
|
123
128
|
});
|
|
124
|
-
|
|
125
|
-
logger.
|
|
129
|
+
process1.on('exit', (code)=>{
|
|
130
|
+
logger.verbose(`Port forward for ${resource} exited with code ${code}`);
|
|
126
131
|
resolve(0);
|
|
127
132
|
});
|
|
128
133
|
});
|
|
129
134
|
const port = await connected;
|
|
130
135
|
return {
|
|
131
|
-
process,
|
|
136
|
+
process: process1,
|
|
132
137
|
port
|
|
133
138
|
};
|
|
134
139
|
}
|
|
140
|
+
export function startPortForwardForRPC(namespace) {
|
|
141
|
+
return startPortForward({
|
|
142
|
+
resource: `services/${namespace}-rpc-aztec-node`,
|
|
143
|
+
namespace,
|
|
144
|
+
containerPort: 8080
|
|
145
|
+
});
|
|
146
|
+
}
|
|
147
|
+
export function startPortForwardForEthereum(namespace) {
|
|
148
|
+
return startPortForward({
|
|
149
|
+
resource: `services/${namespace}-eth-execution`,
|
|
150
|
+
namespace,
|
|
151
|
+
containerPort: 8545
|
|
152
|
+
});
|
|
153
|
+
}
|
|
135
154
|
export async function deleteResourceByName({ resource, namespace, name, force = false }) {
|
|
136
155
|
const command = `kubectl delete ${resource} ${name} -n ${namespace} --ignore-not-found=true --wait=true ${force ? '--force' : ''}`;
|
|
137
156
|
logger.info(`command: ${command}`);
|
|
138
157
|
const { stdout } = await execAsync(command);
|
|
139
158
|
return stdout;
|
|
140
159
|
}
|
|
141
|
-
export async function deleteResourceByLabel({ resource, namespace, label }) {
|
|
142
|
-
|
|
160
|
+
export async function deleteResourceByLabel({ resource, namespace, label, timeout = '5m', force = false }) {
|
|
161
|
+
// Check if the resource type exists before attempting to delete
|
|
162
|
+
try {
|
|
163
|
+
await execAsync(`kubectl api-resources --api-group="" --no-headers -o name | grep -q "^${resource}$" || kubectl api-resources --no-headers -o name | grep -q "^${resource}$"`);
|
|
164
|
+
} catch (error) {
|
|
165
|
+
logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
|
|
166
|
+
return '';
|
|
167
|
+
}
|
|
168
|
+
const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${force ? '--force' : ''}`;
|
|
143
169
|
logger.info(`command: ${command}`);
|
|
144
170
|
const { stdout } = await execAsync(command);
|
|
145
171
|
return stdout;
|
|
@@ -153,8 +179,12 @@ export async function waitForResourceByLabel({ resource, label, namespace, condi
|
|
|
153
179
|
export function getChartDir(spartanDir, chartName) {
|
|
154
180
|
return path.join(spartanDir.trim(), chartName);
|
|
155
181
|
}
|
|
182
|
+
function shellQuote(value) {
|
|
183
|
+
// Single-quote safe shell escaping: ' -> '\''
|
|
184
|
+
return `'${value.replace(/'/g, "'\\''")}'`;
|
|
185
|
+
}
|
|
156
186
|
function valuesToArgs(values) {
|
|
157
|
-
return Object.entries(values).map(([key, value])
|
|
187
|
+
return Object.entries(values).map(([key, value])=>typeof value === 'number' || typeof value === 'boolean' ? `--set ${key}=${value}` : `--set-string ${key}=${shellQuote(String(value))}`).join(' ');
|
|
158
188
|
}
|
|
159
189
|
function createHelmCommand({ instanceName, helmChartDir, namespace, valuesFile, timeout, values, reuseValues = false }) {
|
|
160
190
|
const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
|
|
@@ -183,26 +213,30 @@ async function execHelmCommand(args) {
|
|
|
183
213
|
* const stdout = await installChaosMeshChart({ instanceName: 'force-reorg', targetNamespace: 'smoke', valuesFile: 'prover-failure.yaml'});
|
|
184
214
|
* console.log(stdout);
|
|
185
215
|
* ```
|
|
186
|
-
*/ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir, chaosMeshNamespace = 'chaos-mesh', timeout = '
|
|
216
|
+
*/ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir, chaosMeshNamespace = 'chaos-mesh', timeout = '10m', clean = true, values = {}, logger }) {
|
|
187
217
|
if (clean) {
|
|
188
218
|
// uninstall the helm chart if it exists
|
|
189
219
|
logger.info(`Uninstalling helm chart ${instanceName}`);
|
|
190
220
|
await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
|
|
191
|
-
// and delete the
|
|
192
|
-
const
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
221
|
+
// and delete the chaos-mesh resources created by this release
|
|
222
|
+
const deleteByLabel = async (resource)=>{
|
|
223
|
+
const args = {
|
|
224
|
+
resource,
|
|
225
|
+
namespace: chaosMeshNamespace,
|
|
226
|
+
label: `app.kubernetes.io/instance=${instanceName}`
|
|
227
|
+
};
|
|
228
|
+
logger.info(`Deleting ${resource} resources for release ${instanceName}`);
|
|
229
|
+
await deleteResourceByLabel(args).catch((e)=>{
|
|
230
|
+
logger.error(`Error deleting ${resource}: ${e}`);
|
|
231
|
+
logger.info(`Force deleting ${resource}`);
|
|
232
|
+
return deleteResourceByLabel({
|
|
233
|
+
...args,
|
|
234
|
+
force: true
|
|
235
|
+
});
|
|
204
236
|
});
|
|
205
|
-
}
|
|
237
|
+
};
|
|
238
|
+
await deleteByLabel('podchaos');
|
|
239
|
+
await deleteByLabel('networkchaos');
|
|
206
240
|
}
|
|
207
241
|
return execHelmCommand({
|
|
208
242
|
instanceName,
|
|
@@ -260,13 +294,14 @@ export function applyBootNodeFailure({ namespace, spartanDir, durationSeconds, l
|
|
|
260
294
|
logger
|
|
261
295
|
});
|
|
262
296
|
}
|
|
263
|
-
export function applyValidatorKill({ namespace, spartanDir, logger }) {
|
|
297
|
+
export function applyValidatorKill({ namespace, spartanDir, logger, values }) {
|
|
264
298
|
return installChaosMeshChart({
|
|
265
299
|
instanceName: 'validator-kill',
|
|
266
300
|
targetNamespace: namespace,
|
|
267
301
|
valuesFile: 'validator-kill.yaml',
|
|
268
302
|
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
269
|
-
logger
|
|
303
|
+
logger,
|
|
304
|
+
values
|
|
270
305
|
});
|
|
271
306
|
}
|
|
272
307
|
export function applyNetworkShaping({ valuesFile, namespace, spartanDir, logger }) {
|
|
@@ -298,16 +333,189 @@ export async function restartBot(namespace, logger) {
|
|
|
298
333
|
await deleteResourceByLabel({
|
|
299
334
|
resource: 'pods',
|
|
300
335
|
namespace,
|
|
301
|
-
label: 'app=bot'
|
|
336
|
+
label: 'app.kubernetes.io/name=bot'
|
|
302
337
|
});
|
|
303
338
|
await sleep(10 * 1000);
|
|
339
|
+
// Some bot images may take time to report Ready due to heavy boot-time proving.
|
|
340
|
+
// Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
|
|
304
341
|
await waitForResourceByLabel({
|
|
305
342
|
resource: 'pods',
|
|
306
343
|
namespace,
|
|
307
|
-
label: 'app=bot'
|
|
344
|
+
label: 'app.kubernetes.io/name=bot',
|
|
345
|
+
condition: 'PodReadyToStartContainers'
|
|
308
346
|
});
|
|
309
347
|
logger.info(`Bot restarted`);
|
|
310
348
|
}
|
|
349
|
+
/**
|
|
350
|
+
* Installs or upgrades the transfer bot Helm release for the given namespace.
|
|
351
|
+
* Intended for test setup to enable L2 traffic generation only when needed.
|
|
352
|
+
*/ export async function installTransferBot({ namespace, spartanDir, logger, replicas = 1, txIntervalSeconds = 10, followChain = 'PENDING', mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk', mnemonicStartIndex, botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01', nodeUrl, timeout = '15m', reuseValues = true, aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12) }) {
|
|
353
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
354
|
+
const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
|
|
355
|
+
const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
|
|
356
|
+
logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
|
|
357
|
+
const values = {
|
|
358
|
+
'bot.replicaCount': replicas,
|
|
359
|
+
'bot.txIntervalSeconds': txIntervalSeconds,
|
|
360
|
+
'bot.followChain': followChain,
|
|
361
|
+
'bot.botPrivateKey': botPrivateKey,
|
|
362
|
+
'bot.nodeUrl': resolvedNodeUrl,
|
|
363
|
+
'bot.mnemonic': mnemonic,
|
|
364
|
+
'bot.feePaymentMethod': 'fee_juice',
|
|
365
|
+
'aztec.slotDuration': aztecSlotDuration,
|
|
366
|
+
// Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
|
|
367
|
+
// Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
|
|
368
|
+
'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
|
|
369
|
+
// Provide L1 execution RPC for bridging fee juice
|
|
370
|
+
'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
|
|
371
|
+
// Provide L1 mnemonic for bridging (falls back to labs mnemonic)
|
|
372
|
+
'bot.node.env.BOT_L1_MNEMONIC': mnemonic
|
|
373
|
+
};
|
|
374
|
+
// Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
|
|
375
|
+
if (mnemonicStartIndex === undefined) {
|
|
376
|
+
values['bot.mnemonicStartIndex'] = 0;
|
|
377
|
+
}
|
|
378
|
+
// Also pass a funded private key directly if available
|
|
379
|
+
if (process.env.FUNDING_PRIVATE_KEY) {
|
|
380
|
+
values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
|
|
381
|
+
}
|
|
382
|
+
// Align bot image with the running network image: prefer env var, else detect from a validator pod
|
|
383
|
+
let repositoryFromEnv;
|
|
384
|
+
let tagFromEnv;
|
|
385
|
+
const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
|
|
386
|
+
if (aztecDockerImage && aztecDockerImage.includes(':')) {
|
|
387
|
+
const lastColon = aztecDockerImage.lastIndexOf(':');
|
|
388
|
+
repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
|
|
389
|
+
tagFromEnv = aztecDockerImage.slice(lastColon + 1);
|
|
390
|
+
}
|
|
391
|
+
let repository = repositoryFromEnv;
|
|
392
|
+
let tag = tagFromEnv;
|
|
393
|
+
if (!repository || !tag) {
|
|
394
|
+
try {
|
|
395
|
+
const { stdout } = await execAsync(`kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`);
|
|
396
|
+
const image = stdout.trim().replace(/^'|'$/g, '');
|
|
397
|
+
if (image && image.includes(':')) {
|
|
398
|
+
const lastColon = image.lastIndexOf(':');
|
|
399
|
+
repository = image.slice(0, lastColon);
|
|
400
|
+
tag = image.slice(lastColon + 1);
|
|
401
|
+
}
|
|
402
|
+
} catch (err) {
|
|
403
|
+
logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
if (repository && tag) {
|
|
407
|
+
values['global.aztecImage.repository'] = repository;
|
|
408
|
+
values['global.aztecImage.tag'] = tag;
|
|
409
|
+
}
|
|
410
|
+
if (mnemonicStartIndex !== undefined) {
|
|
411
|
+
values['bot.mnemonicStartIndex'] = typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
|
|
412
|
+
}
|
|
413
|
+
await execHelmCommand({
|
|
414
|
+
instanceName,
|
|
415
|
+
helmChartDir,
|
|
416
|
+
namespace,
|
|
417
|
+
valuesFile: undefined,
|
|
418
|
+
timeout,
|
|
419
|
+
values: values,
|
|
420
|
+
reuseValues
|
|
421
|
+
});
|
|
422
|
+
if (replicas > 0) {
|
|
423
|
+
await waitForResourceByLabel({
|
|
424
|
+
resource: 'pods',
|
|
425
|
+
namespace,
|
|
426
|
+
label: 'app.kubernetes.io/name=bot',
|
|
427
|
+
condition: 'PodReadyToStartContainers'
|
|
428
|
+
});
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
/**
|
|
432
|
+
* Uninstalls the transfer bot Helm release from the given namespace.
|
|
433
|
+
* Intended for test teardown to clean up bot resources.
|
|
434
|
+
*/ export async function uninstallTransferBot(namespace, logger) {
|
|
435
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
436
|
+
logger.info(`Uninstalling transfer bot release ${instanceName}`);
|
|
437
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
438
|
+
// Ensure any leftover pods are removed
|
|
439
|
+
await deleteResourceByLabel({
|
|
440
|
+
resource: 'pods',
|
|
441
|
+
namespace,
|
|
442
|
+
label: 'app.kubernetes.io/name=bot'
|
|
443
|
+
}).catch(()=>undefined);
|
|
444
|
+
}
|
|
445
|
+
/**
|
|
446
|
+
* Enables or disables probabilistic transaction dropping on validators and waits for rollout.
|
|
447
|
+
* Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
|
|
448
|
+
*/ export async function setValidatorTxDrop({ namespace, enabled, probability, logger }) {
|
|
449
|
+
const drop = enabled ? 'true' : 'false';
|
|
450
|
+
const prob = String(probability);
|
|
451
|
+
const selectors = [
|
|
452
|
+
'app=validator',
|
|
453
|
+
'app.kubernetes.io/component=validator'
|
|
454
|
+
];
|
|
455
|
+
let updated = false;
|
|
456
|
+
for (const selector of selectors){
|
|
457
|
+
try {
|
|
458
|
+
const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
459
|
+
const names = list.stdout.split('\n').map((s)=>s.trim()).filter(Boolean);
|
|
460
|
+
if (names.length === 0) {
|
|
461
|
+
continue;
|
|
462
|
+
}
|
|
463
|
+
const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
|
|
464
|
+
logger.info(`command: ${cmd}`);
|
|
465
|
+
await execAsync(cmd);
|
|
466
|
+
updated = true;
|
|
467
|
+
} catch (e) {
|
|
468
|
+
logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
if (!updated) {
|
|
472
|
+
logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
|
|
473
|
+
return;
|
|
474
|
+
}
|
|
475
|
+
// Restart validator pods to ensure env vars take effect and wait for readiness
|
|
476
|
+
await restartValidators(namespace, logger);
|
|
477
|
+
}
|
|
478
|
+
export async function restartValidators(namespace, logger) {
|
|
479
|
+
const selectors = [
|
|
480
|
+
'app=validator',
|
|
481
|
+
'app.kubernetes.io/component=validator'
|
|
482
|
+
];
|
|
483
|
+
let any = false;
|
|
484
|
+
for (const selector of selectors){
|
|
485
|
+
try {
|
|
486
|
+
const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
487
|
+
if (!stdout || stdout.trim().length === 0) {
|
|
488
|
+
continue;
|
|
489
|
+
}
|
|
490
|
+
any = true;
|
|
491
|
+
await deleteResourceByLabel({
|
|
492
|
+
resource: 'pods',
|
|
493
|
+
namespace,
|
|
494
|
+
label: selector
|
|
495
|
+
});
|
|
496
|
+
} catch (e) {
|
|
497
|
+
logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
|
|
498
|
+
}
|
|
499
|
+
}
|
|
500
|
+
if (!any) {
|
|
501
|
+
logger.warn(`No validator pods found to restart in ${namespace}.`);
|
|
502
|
+
return;
|
|
503
|
+
}
|
|
504
|
+
// Wait for either label to be Ready
|
|
505
|
+
for (const selector of selectors){
|
|
506
|
+
try {
|
|
507
|
+
await waitForResourceByLabel({
|
|
508
|
+
resource: 'pods',
|
|
509
|
+
namespace,
|
|
510
|
+
label: selector
|
|
511
|
+
});
|
|
512
|
+
return;
|
|
513
|
+
} catch {
|
|
514
|
+
// try next
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
logger.warn(`Validator pods did not report Ready; continuing.`);
|
|
518
|
+
}
|
|
311
519
|
export async function enableValidatorDynamicBootNode(instanceName, namespace, spartanDir, logger) {
|
|
312
520
|
logger.info(`Enabling validator dynamic boot node`);
|
|
313
521
|
await execHelmCommand({
|
|
@@ -323,55 +531,107 @@ export async function enableValidatorDynamicBootNode(instanceName, namespace, sp
|
|
|
323
531
|
});
|
|
324
532
|
logger.info(`Validator dynamic boot node enabled`);
|
|
325
533
|
}
|
|
326
|
-
export async function runAlertCheck(config, alerts, logger) {
|
|
327
|
-
if (isK8sConfig(config)) {
|
|
328
|
-
const { process, port } = await startPortForward({
|
|
329
|
-
resource: `svc/metrics-grafana`,
|
|
330
|
-
namespace: 'metrics',
|
|
331
|
-
containerPort: config.CONTAINER_METRICS_PORT
|
|
332
|
-
});
|
|
333
|
-
const alertChecker = new AlertChecker(logger, {
|
|
334
|
-
grafanaEndpoint: `http://localhost:${port}${config.METRICS_API_PATH}`,
|
|
335
|
-
grafanaCredentials: `admin:${config.GRAFANA_PASSWORD}`
|
|
336
|
-
});
|
|
337
|
-
await alertChecker.runAlertCheck(alerts);
|
|
338
|
-
process.kill();
|
|
339
|
-
} else {
|
|
340
|
-
logger.info('Not running alert check in non-k8s environment');
|
|
341
|
-
}
|
|
342
|
-
}
|
|
343
|
-
export async function updateSequencerConfig(url, config) {
|
|
344
|
-
const node = createAztecNodeClient(url);
|
|
345
|
-
await node.setConfig(config);
|
|
346
|
-
}
|
|
347
534
|
export async function getSequencers(namespace) {
|
|
348
|
-
const command = `kubectl get pods -l app=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
535
|
+
const command = `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
349
536
|
const { stdout } = await execAsync(command);
|
|
350
|
-
|
|
537
|
+
const sequencers = stdout.split(' ');
|
|
538
|
+
logger.verbose(`Found sequencer pods ${sequencers.join(', ')}`);
|
|
539
|
+
return sequencers;
|
|
351
540
|
}
|
|
352
|
-
export
|
|
353
|
-
|
|
541
|
+
export function updateSequencersConfig(env, config) {
|
|
542
|
+
return withSequencersAdmin(env, async (client)=>{
|
|
543
|
+
await client.setConfig(config);
|
|
544
|
+
return client.getConfig();
|
|
545
|
+
});
|
|
546
|
+
}
|
|
547
|
+
export function getSequencersConfig(env) {
|
|
548
|
+
return withSequencersAdmin(env, (client)=>client.getConfig());
|
|
549
|
+
}
|
|
550
|
+
export async function withSequencersAdmin(env, fn) {
|
|
551
|
+
const adminContainerPort = 8880;
|
|
552
|
+
const namespace = env.NAMESPACE;
|
|
354
553
|
const sequencers = await getSequencers(namespace);
|
|
554
|
+
const results = [];
|
|
355
555
|
for (const sequencer of sequencers){
|
|
356
|
-
const { process, port } = await startPortForward({
|
|
556
|
+
const { process: process1, port } = await startPortForward({
|
|
357
557
|
resource: `pod/${sequencer}`,
|
|
358
558
|
namespace,
|
|
359
|
-
containerPort
|
|
559
|
+
containerPort: adminContainerPort
|
|
360
560
|
});
|
|
361
561
|
const url = `http://localhost:${port}`;
|
|
362
|
-
await
|
|
363
|
-
|
|
562
|
+
await retry(()=>fetch(`${url}/status`).then((res)=>res.status === 200), 'forward node admin port', makeBackoff([
|
|
563
|
+
1,
|
|
564
|
+
1,
|
|
565
|
+
2,
|
|
566
|
+
6
|
|
567
|
+
]), logger, true);
|
|
568
|
+
const client = createAztecNodeAdminClient(url);
|
|
569
|
+
results.push(await fn(client));
|
|
570
|
+
process1.kill();
|
|
364
571
|
}
|
|
572
|
+
return results;
|
|
365
573
|
}
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
574
|
+
/**
|
|
575
|
+
* Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
|
|
576
|
+
* it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
|
|
577
|
+
*/ export async function getPublicViemClient(env, /** If set, will push the new process into it */ processes) {
|
|
578
|
+
const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
|
|
579
|
+
if (CREATE_ETH_DEVNET) {
|
|
580
|
+
logger.info(`Creating port forward to eth execution node`);
|
|
581
|
+
const { process: process1, port } = await startPortForward({
|
|
582
|
+
resource: `svc/${NAMESPACE}-eth-execution`,
|
|
583
|
+
namespace: NAMESPACE,
|
|
584
|
+
containerPort: 8545
|
|
585
|
+
});
|
|
586
|
+
const url = `http://127.0.0.1:${port}`;
|
|
587
|
+
const client = createPublicClient({
|
|
588
|
+
transport: fallback([
|
|
589
|
+
http(url)
|
|
590
|
+
])
|
|
372
591
|
});
|
|
592
|
+
if (processes) {
|
|
593
|
+
processes.push(process1);
|
|
594
|
+
}
|
|
595
|
+
return {
|
|
596
|
+
url,
|
|
597
|
+
client,
|
|
598
|
+
process: process1
|
|
599
|
+
};
|
|
373
600
|
} else {
|
|
374
|
-
|
|
601
|
+
logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
|
|
602
|
+
if (!L1_RPC_URLS_JSON) {
|
|
603
|
+
throw new Error(`L1_RPC_URLS_JSON is not defined`);
|
|
604
|
+
}
|
|
605
|
+
const client = createPublicClient({
|
|
606
|
+
transport: fallback([
|
|
607
|
+
http(L1_RPC_URLS_JSON)
|
|
608
|
+
])
|
|
609
|
+
});
|
|
610
|
+
return {
|
|
611
|
+
url: L1_RPC_URLS_JSON,
|
|
612
|
+
client
|
|
613
|
+
};
|
|
614
|
+
}
|
|
615
|
+
}
|
|
616
|
+
/** Queries an Aztec node for the L1 deployment addresses */ export async function getL1DeploymentAddresses(env) {
|
|
617
|
+
let forwardProcess;
|
|
618
|
+
try {
|
|
619
|
+
const [sequencer] = await getSequencers(env.NAMESPACE);
|
|
620
|
+
const { process: process1, port } = await startPortForward({
|
|
621
|
+
resource: `pod/${sequencer}`,
|
|
622
|
+
namespace: env.NAMESPACE,
|
|
623
|
+
containerPort: 8080
|
|
624
|
+
});
|
|
625
|
+
forwardProcess = process1;
|
|
626
|
+
const url = `http://127.0.0.1:${port}`;
|
|
627
|
+
const node = createAztecNodeClient(url);
|
|
628
|
+
return await retry(()=>node.getNodeInfo().then((i)=>i.l1ContractAddresses), 'get node info', makeBackoff([
|
|
629
|
+
1,
|
|
630
|
+
3,
|
|
631
|
+
6
|
|
632
|
+
]), logger);
|
|
633
|
+
} finally{
|
|
634
|
+
forwardProcess?.kill();
|
|
375
635
|
}
|
|
376
636
|
}
|
|
377
637
|
/**
|
|
@@ -443,3 +703,20 @@ export async function updateSequencersConfig(env, config) {
|
|
|
443
703
|
label: 'app=pxe'
|
|
444
704
|
});
|
|
445
705
|
}
|
|
706
|
+
/**
|
|
707
|
+
* Returns the absolute path to the git repository root
|
|
708
|
+
*/ export function getGitProjectRoot() {
|
|
709
|
+
try {
|
|
710
|
+
const rootDir = execSync('git rev-parse --show-toplevel', {
|
|
711
|
+
encoding: 'utf-8',
|
|
712
|
+
stdio: [
|
|
713
|
+
'ignore',
|
|
714
|
+
'pipe',
|
|
715
|
+
'ignore'
|
|
716
|
+
]
|
|
717
|
+
}).trim();
|
|
718
|
+
return rootDir;
|
|
719
|
+
} catch (error) {
|
|
720
|
+
throw new Error(`Failed to determine git project root: ${error}`);
|
|
721
|
+
}
|
|
722
|
+
}
|