@aztec/end-to-end 0.0.0-test.1 → 0.0.1-commit.03f7ef2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/bench/client_flows/benchmark.d.ts +61 -0
- package/dest/bench/client_flows/benchmark.d.ts.map +1 -0
- package/dest/bench/client_flows/benchmark.js +261 -0
- package/dest/bench/client_flows/client_flows_benchmark.d.ts +80 -0
- package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -0
- package/dest/bench/client_flows/client_flows_benchmark.js +336 -0
- package/dest/bench/client_flows/config.d.ts +14 -0
- package/dest/bench/client_flows/config.d.ts.map +1 -0
- package/dest/bench/client_flows/config.js +106 -0
- package/dest/bench/client_flows/data_extractor.d.ts +2 -0
- package/dest/bench/client_flows/data_extractor.d.ts.map +1 -0
- package/dest/bench/client_flows/data_extractor.js +77 -0
- package/dest/bench/utils.d.ts +12 -38
- package/dest/bench/utils.d.ts.map +1 -1
- package/dest/bench/utils.js +26 -66
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +21 -13
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +85 -57
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +20 -25
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +51 -70
- package/dest/e2e_deploy_contract/deploy_test.d.ts +16 -8
- package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
- package/dest/e2e_deploy_contract/deploy_test.js +13 -19
- package/dest/e2e_epochs/epochs_test.d.ts +65 -22
- package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
- package/dest/e2e_epochs/epochs_test.js +233 -49
- package/dest/e2e_fees/bridging_race.notest.d.ts +2 -0
- package/dest/e2e_fees/bridging_race.notest.d.ts.map +1 -0
- package/dest/e2e_fees/bridging_race.notest.js +63 -0
- package/dest/e2e_fees/fees_test.d.ts +27 -12
- package/dest/e2e_fees/fees_test.d.ts.map +1 -1
- package/dest/e2e_fees/fees_test.js +106 -109
- package/dest/e2e_l1_publisher/write_json.d.ts +10 -0
- package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -0
- package/dest/e2e_l1_publisher/write_json.js +58 -0
- package/dest/e2e_multi_validator/utils.d.ts +12 -0
- package/dest/e2e_multi_validator/utils.d.ts.map +1 -0
- package/dest/e2e_multi_validator/utils.js +214 -0
- package/dest/e2e_nested_contract/nested_contract_test.d.ts +10 -7
- package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
- package/dest/e2e_nested_contract/nested_contract_test.js +24 -20
- package/dest/e2e_p2p/inactivity_slash_test.d.ts +31 -0
- package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -0
- package/dest/e2e_p2p/inactivity_slash_test.js +136 -0
- package/dest/e2e_p2p/p2p_network.d.ts +276 -23
- package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
- package/dest/e2e_p2p/p2p_network.js +188 -133
- package/dest/e2e_p2p/shared.d.ts +43 -7
- package/dest/e2e_p2p/shared.d.ts.map +1 -1
- package/dest/e2e_p2p/shared.js +164 -19
- package/dest/e2e_token_contract/token_contract_test.d.ts +12 -6
- package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
- package/dest/e2e_token_contract/token_contract_test.js +50 -26
- package/dest/fixtures/e2e_prover_test.d.ts +61 -0
- package/dest/fixtures/e2e_prover_test.d.ts.map +1 -0
- package/dest/{e2e_prover → fixtures}/e2e_prover_test.js +108 -112
- package/dest/fixtures/fixtures.d.ts +6 -8
- package/dest/fixtures/fixtures.d.ts.map +1 -1
- package/dest/fixtures/fixtures.js +5 -5
- package/dest/fixtures/get_acvm_config.d.ts +2 -2
- package/dest/fixtures/get_acvm_config.d.ts.map +1 -1
- package/dest/fixtures/get_acvm_config.js +3 -15
- package/dest/fixtures/get_bb_config.d.ts +2 -2
- package/dest/fixtures/get_bb_config.d.ts.map +1 -1
- package/dest/fixtures/get_bb_config.js +10 -17
- package/dest/fixtures/index.d.ts +1 -1
- package/dest/fixtures/l1_to_l2_messaging.d.ts +11 -7
- package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
- package/dest/fixtures/l1_to_l2_messaging.js +45 -19
- package/dest/fixtures/logging.d.ts +1 -1
- package/dest/fixtures/setup_p2p_test.d.ts +15 -14
- package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
- package/dest/fixtures/setup_p2p_test.js +82 -22
- package/dest/fixtures/snapshot_manager.d.ts +20 -14
- package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
- package/dest/fixtures/snapshot_manager.js +154 -140
- package/dest/fixtures/token_utils.d.ts +10 -4
- package/dest/fixtures/token_utils.d.ts.map +1 -1
- package/dest/fixtures/token_utils.js +28 -12
- package/dest/fixtures/utils.d.ts +95 -54
- package/dest/fixtures/utils.d.ts.map +1 -1
- package/dest/fixtures/utils.js +456 -389
- package/dest/fixtures/web3signer.d.ts +5 -0
- package/dest/fixtures/web3signer.d.ts.map +1 -0
- package/dest/fixtures/web3signer.js +53 -0
- package/dest/fixtures/with_telemetry_utils.d.ts +2 -2
- package/dest/fixtures/with_telemetry_utils.d.ts.map +1 -1
- package/dest/fixtures/with_telemetry_utils.js +2 -2
- package/dest/index.d.ts +1 -1
- package/dest/quality_of_service/alert_checker.d.ts +2 -2
- package/dest/quality_of_service/alert_checker.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.d.ts +42 -35
- package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.js +104 -50
- package/dest/shared/gas_portal_test_harness.d.ts +29 -31
- package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
- package/dest/shared/gas_portal_test_harness.js +51 -30
- package/dest/shared/index.d.ts +1 -1
- package/dest/shared/jest_setup.d.ts +1 -1
- package/dest/shared/jest_setup.js +1 -1
- package/dest/shared/submit-transactions.d.ts +6 -4
- package/dest/shared/submit-transactions.d.ts.map +1 -1
- package/dest/shared/submit-transactions.js +8 -7
- package/dest/shared/uniswap_l1_l2.d.ts +16 -13
- package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
- package/dest/shared/uniswap_l1_l2.js +149 -117
- package/dest/simulators/index.d.ts +1 -1
- package/dest/simulators/lending_simulator.d.ts +7 -11
- package/dest/simulators/lending_simulator.d.ts.map +1 -1
- package/dest/simulators/lending_simulator.js +16 -17
- package/dest/simulators/token_simulator.d.ts +6 -3
- package/dest/simulators/token_simulator.d.ts.map +1 -1
- package/dest/simulators/token_simulator.js +16 -13
- package/dest/spartan/setup_test_wallets.d.ts +26 -11
- package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
- package/dest/spartan/setup_test_wallets.js +202 -58
- package/dest/spartan/tx_metrics.d.ts +39 -0
- package/dest/spartan/tx_metrics.d.ts.map +1 -0
- package/dest/spartan/tx_metrics.js +95 -0
- package/dest/spartan/utils.d.ts +129 -313
- package/dest/spartan/utils.d.ts.map +1 -1
- package/dest/spartan/utils.js +559 -151
- package/package.json +65 -58
- package/src/bench/client_flows/benchmark.ts +341 -0
- package/src/bench/client_flows/client_flows_benchmark.ts +450 -0
- package/src/bench/client_flows/config.ts +61 -0
- package/src/bench/client_flows/data_extractor.ts +89 -0
- package/src/bench/utils.ts +22 -76
- package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +80 -77
- package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +70 -107
- package/src/e2e_deploy_contract/deploy_test.ts +24 -39
- package/src/e2e_epochs/epochs_test.ts +299 -65
- package/src/e2e_fees/bridging_race.notest.ts +80 -0
- package/src/e2e_fees/fees_test.ts +151 -141
- package/src/e2e_l1_publisher/write_json.ts +77 -0
- package/src/e2e_multi_validator/utils.ts +258 -0
- package/src/e2e_nested_contract/nested_contract_test.ts +29 -19
- package/src/e2e_p2p/inactivity_slash_test.ts +179 -0
- package/src/e2e_p2p/p2p_network.ts +274 -171
- package/src/e2e_p2p/shared.ts +251 -29
- package/src/e2e_token_contract/token_contract_test.ts +43 -39
- package/src/fixtures/dumps/epoch_proof_result.json +1 -1
- package/src/{e2e_prover → fixtures}/e2e_prover_test.ts +113 -160
- package/src/fixtures/fixtures.ts +5 -7
- package/src/fixtures/get_acvm_config.ts +4 -12
- package/src/fixtures/get_bb_config.ts +18 -13
- package/src/fixtures/l1_to_l2_messaging.ts +56 -24
- package/src/fixtures/setup_p2p_test.ts +127 -39
- package/src/fixtures/snapshot_manager.ts +196 -162
- package/src/fixtures/token_utils.ts +32 -15
- package/src/fixtures/utils.ts +562 -475
- package/src/fixtures/web3signer.ts +63 -0
- package/src/fixtures/with_telemetry_utils.ts +2 -2
- package/src/guides/up_quick_start.sh +7 -15
- package/src/quality_of_service/alert_checker.ts +1 -1
- package/src/shared/cross_chain_test_harness.ts +112 -80
- package/src/shared/gas_portal_test_harness.ts +59 -50
- package/src/shared/jest_setup.ts +1 -1
- package/src/shared/submit-transactions.ts +12 -8
- package/src/shared/uniswap_l1_l2.ts +187 -192
- package/src/simulators/lending_simulator.ts +15 -16
- package/src/simulators/token_simulator.ts +21 -13
- package/src/spartan/DEVELOP.md +128 -0
- package/src/spartan/setup_test_wallets.ts +252 -93
- package/src/spartan/tx_metrics.ts +130 -0
- package/src/spartan/utils.ts +641 -146
- package/dest/e2e_prover/e2e_prover_test.d.ts +0 -56
- package/dest/e2e_prover/e2e_prover_test.d.ts.map +0 -1
- package/dest/fixtures/setup_l1_contracts.d.ts +0 -6
- package/dest/fixtures/setup_l1_contracts.d.ts.map +0 -1
- package/dest/fixtures/setup_l1_contracts.js +0 -17
- package/dest/sample-dapp/connect.js +0 -12
- package/dest/sample-dapp/contracts.js +0 -10
- package/dest/sample-dapp/deploy.js +0 -35
- package/dest/sample-dapp/index.js +0 -98
- package/src/fixtures/setup_l1_contracts.ts +0 -27
- package/src/sample-dapp/connect.mjs +0 -16
- package/src/sample-dapp/contracts.mjs +0 -14
- package/src/sample-dapp/deploy.mjs +0 -40
- package/src/sample-dapp/index.mjs +0 -128
package/src/spartan/utils.ts
CHANGED
|
@@ -1,89 +1,90 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import type { RollupCheatCodes } from '@aztec/aztec
|
|
1
|
+
import { createLogger } from '@aztec/aztec.js/log';
|
|
2
|
+
import type { RollupCheatCodes } from '@aztec/aztec/testing';
|
|
3
|
+
import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
|
|
4
|
+
import type { ViemPublicClient } from '@aztec/ethereum/types';
|
|
5
|
+
import type { CheckpointNumber } from '@aztec/foundation/branded-types';
|
|
3
6
|
import type { Logger } from '@aztec/foundation/log';
|
|
4
|
-
import
|
|
7
|
+
import { promiseWithResolvers } from '@aztec/foundation/promise';
|
|
8
|
+
import { makeBackoff, retry } from '@aztec/foundation/retry';
|
|
9
|
+
import { schemas } from '@aztec/foundation/schemas';
|
|
10
|
+
import { sleep } from '@aztec/foundation/sleep';
|
|
11
|
+
import {
|
|
12
|
+
type AztecNodeAdmin,
|
|
13
|
+
type AztecNodeAdminConfig,
|
|
14
|
+
createAztecNodeAdminClient,
|
|
15
|
+
createAztecNodeClient,
|
|
16
|
+
} from '@aztec/stdlib/interfaces/client';
|
|
5
17
|
|
|
6
18
|
import { ChildProcess, exec, execSync, spawn } from 'child_process';
|
|
7
19
|
import path from 'path';
|
|
8
20
|
import { promisify } from 'util';
|
|
21
|
+
import { createPublicClient, fallback, http } from 'viem';
|
|
9
22
|
import { z } from 'zod';
|
|
10
23
|
|
|
11
|
-
import { AlertChecker, type AlertConfig } from '../quality_of_service/alert_checker.js';
|
|
12
|
-
|
|
13
24
|
const execAsync = promisify(exec);
|
|
14
25
|
|
|
15
26
|
const logger = createLogger('e2e:k8s-utils');
|
|
16
27
|
|
|
17
|
-
const
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
}),
|
|
27
|
-
'ETHEREUM_HOSTS must be a comma-separated list of valid URLs',
|
|
28
|
-
);
|
|
29
|
-
|
|
30
|
-
const k8sLocalConfigSchema = z.object({
|
|
31
|
-
ETHEREUM_SLOT_DURATION: z.coerce.number().min(1, 'ETHEREUM_SLOT_DURATION env variable must be set'),
|
|
32
|
-
AZTEC_SLOT_DURATION: z.coerce.number().min(1, 'AZTEC_SLOT_DURATION env variable must be set'),
|
|
33
|
-
AZTEC_EPOCH_DURATION: z.coerce.number().min(1, 'AZTEC_EPOCH_DURATION env variable must be set'),
|
|
34
|
-
AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().min(1, 'AZTEC_PROOF_SUBMISSION_WINDOW env variable must be set'),
|
|
35
|
-
INSTANCE_NAME: z.string().min(1, 'INSTANCE_NAME env variable must be set'),
|
|
36
|
-
NAMESPACE: z.string().min(1, 'NAMESPACE env variable must be set'),
|
|
37
|
-
CONTAINER_NODE_PORT: z.coerce.number().default(8080),
|
|
38
|
-
CONTAINER_SEQUENCER_PORT: z.coerce.number().default(8080),
|
|
39
|
-
CONTAINER_PROVER_NODE_PORT: z.coerce.number().default(8080),
|
|
40
|
-
CONTAINER_PXE_PORT: z.coerce.number().default(8080),
|
|
41
|
-
CONTAINER_ETHEREUM_PORT: z.coerce.number().default(8545),
|
|
42
|
-
CONTAINER_METRICS_PORT: z.coerce.number().default(80),
|
|
43
|
-
GRAFANA_PASSWORD: z.string().optional(),
|
|
44
|
-
METRICS_API_PATH: z.string().default('/api/datasources/proxy/uid/spartan-metrics-prometheus/api/v1'),
|
|
45
|
-
SPARTAN_DIR: z.string().min(1, 'SPARTAN_DIR env variable must be set'),
|
|
46
|
-
ETHEREUM_HOSTS: ethereumHostsSchema.optional(),
|
|
47
|
-
L1_ACCOUNT_MNEMONIC: z.string().default('test test test test test test test test test test test junk'),
|
|
48
|
-
SEPOLIA_RUN: z.string().default('false'),
|
|
49
|
-
K8S: z.literal('local'),
|
|
28
|
+
const testConfigSchema = z.object({
|
|
29
|
+
NAMESPACE: z.string().default('scenario'),
|
|
30
|
+
REAL_VERIFIER: schemas.Boolean.optional().default(true),
|
|
31
|
+
CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
|
|
32
|
+
L1_RPC_URLS_JSON: z.string().optional(),
|
|
33
|
+
L1_ACCOUNT_MNEMONIC: z.string().optional(),
|
|
34
|
+
AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
|
|
35
|
+
AZTEC_EPOCH_DURATION: z.coerce.number().optional().default(32),
|
|
36
|
+
AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
|
|
50
37
|
});
|
|
51
38
|
|
|
52
|
-
|
|
53
|
-
K8S: z.literal('gcloud'),
|
|
54
|
-
CLUSTER_NAME: z.string().min(1, 'CLUSTER_NAME env variable must be set'),
|
|
55
|
-
REGION: z.string().min(1, 'REGION env variable must be set'),
|
|
56
|
-
});
|
|
39
|
+
export type TestConfig = z.infer<typeof testConfigSchema>;
|
|
57
40
|
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
});
|
|
64
|
-
|
|
65
|
-
const envSchema = z.discriminatedUnion('K8S', [k8sLocalConfigSchema, k8sGCloudConfigSchema, directConfigSchema]);
|
|
41
|
+
export function setupEnvironment(env: unknown): TestConfig {
|
|
42
|
+
const config = testConfigSchema.parse(env);
|
|
43
|
+
logger.warn(`Loaded env config`, config);
|
|
44
|
+
return config;
|
|
45
|
+
}
|
|
66
46
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
47
|
+
/**
|
|
48
|
+
* @param path - The path to the script, relative to the project root
|
|
49
|
+
* @param args - The arguments to pass to the script
|
|
50
|
+
* @param logger - The logger to use
|
|
51
|
+
* @returns The exit code of the script
|
|
52
|
+
*/
|
|
53
|
+
function runScript(path: string, args: string[], logger: Logger, env?: Record<string, string>) {
|
|
54
|
+
const childProcess = spawn(path, args, {
|
|
55
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
56
|
+
env: env ? { ...process.env, ...env } : process.env,
|
|
57
|
+
});
|
|
58
|
+
return new Promise<number>((resolve, reject) => {
|
|
59
|
+
childProcess.on('close', (code: number | null) => resolve(code ?? 0));
|
|
60
|
+
childProcess.on('error', reject);
|
|
61
|
+
childProcess.stdout?.on('data', (data: Buffer) => {
|
|
62
|
+
logger.info(data.toString());
|
|
63
|
+
});
|
|
64
|
+
childProcess.stderr?.on('data', (data: Buffer) => {
|
|
65
|
+
logger.error(data.toString());
|
|
66
|
+
});
|
|
67
|
+
});
|
|
68
|
+
}
|
|
71
69
|
|
|
72
|
-
export function
|
|
73
|
-
return
|
|
70
|
+
export function getAztecBin() {
|
|
71
|
+
return path.join(getGitProjectRoot(), 'yarn-project/aztec/dest/bin/index.js');
|
|
74
72
|
}
|
|
75
73
|
|
|
76
|
-
|
|
77
|
-
|
|
74
|
+
/**
|
|
75
|
+
* Runs the Aztec binary
|
|
76
|
+
* @param args - The arguments to pass to the Aztec binary
|
|
77
|
+
* @param logger - The logger to use
|
|
78
|
+
* @param env - Optional environment variables to set for the process
|
|
79
|
+
* @returns The exit code of the Aztec binary
|
|
80
|
+
*/
|
|
81
|
+
export function runAztecBin(args: string[], logger: Logger, env?: Record<string, string>) {
|
|
82
|
+
return runScript('node', [getAztecBin(), ...args], logger, env);
|
|
78
83
|
}
|
|
79
84
|
|
|
80
|
-
export function
|
|
81
|
-
const
|
|
82
|
-
|
|
83
|
-
const command = `gcloud container clusters get-credentials ${config.CLUSTER_NAME} --region=${config.REGION}`;
|
|
84
|
-
execSync(command);
|
|
85
|
-
}
|
|
86
|
-
return config;
|
|
85
|
+
export function runProjectScript(script: string, args: string[], logger: Logger, env?: Record<string, string>) {
|
|
86
|
+
const scriptPath = script.startsWith('/') ? script : path.join(getGitProjectRoot(), script);
|
|
87
|
+
return runScript(scriptPath, args, logger, env);
|
|
87
88
|
}
|
|
88
89
|
|
|
89
90
|
export async function startPortForward({
|
|
@@ -103,7 +104,7 @@ export async function startPortForward({
|
|
|
103
104
|
}> {
|
|
104
105
|
const hostPortAsString = hostPort ? hostPort.toString() : '';
|
|
105
106
|
|
|
106
|
-
logger.
|
|
107
|
+
logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
|
|
107
108
|
|
|
108
109
|
const process = spawn(
|
|
109
110
|
'kubectl',
|
|
@@ -121,21 +122,20 @@ export async function startPortForward({
|
|
|
121
122
|
const str = data.toString() as string;
|
|
122
123
|
if (!isResolved && str.includes('Forwarding from')) {
|
|
123
124
|
isResolved = true;
|
|
124
|
-
logger.
|
|
125
|
+
logger.debug(`Port forward for ${resource}: ${str}`);
|
|
125
126
|
const port = str.search(/:\d+/);
|
|
126
127
|
if (port === -1) {
|
|
127
128
|
throw new Error('Port not found in port forward output');
|
|
128
129
|
}
|
|
129
130
|
const portNumber = parseInt(str.slice(port + 1));
|
|
130
|
-
logger.
|
|
131
|
-
logger.info(`Port forward connected: ${portNumber}`);
|
|
131
|
+
logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
|
|
132
132
|
resolve(portNumber);
|
|
133
133
|
} else {
|
|
134
134
|
logger.silent(str);
|
|
135
135
|
}
|
|
136
136
|
});
|
|
137
137
|
process.stderr?.on('data', data => {
|
|
138
|
-
logger.
|
|
138
|
+
logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
|
|
139
139
|
// It's a strange thing:
|
|
140
140
|
// If we don't pipe stderr, then the port forwarding does not work.
|
|
141
141
|
// Log to silent because this doesn't actually report errors,
|
|
@@ -145,16 +145,16 @@ export async function startPortForward({
|
|
|
145
145
|
process.on('close', () => {
|
|
146
146
|
if (!isResolved) {
|
|
147
147
|
isResolved = true;
|
|
148
|
-
logger.warn(
|
|
148
|
+
logger.warn(`Port forward for ${resource} closed before connection established`);
|
|
149
149
|
resolve(0);
|
|
150
150
|
}
|
|
151
151
|
});
|
|
152
152
|
process.on('error', error => {
|
|
153
|
-
logger.error(`Port forward error: ${error}`);
|
|
153
|
+
logger.error(`Port forward for ${resource} error: ${error}`);
|
|
154
154
|
resolve(0);
|
|
155
155
|
});
|
|
156
156
|
process.on('exit', code => {
|
|
157
|
-
logger.
|
|
157
|
+
logger.verbose(`Port forward for ${resource} exited with code ${code}`);
|
|
158
158
|
resolve(0);
|
|
159
159
|
});
|
|
160
160
|
});
|
|
@@ -164,6 +164,55 @@ export async function startPortForward({
|
|
|
164
164
|
return { process, port };
|
|
165
165
|
}
|
|
166
166
|
|
|
167
|
+
export function getExternalIP(namespace: string, serviceName: string): Promise<string> {
|
|
168
|
+
const { promise, resolve, reject } = promiseWithResolvers<string>();
|
|
169
|
+
const process = spawn(
|
|
170
|
+
'kubectl',
|
|
171
|
+
[
|
|
172
|
+
'get',
|
|
173
|
+
'service',
|
|
174
|
+
'-n',
|
|
175
|
+
namespace,
|
|
176
|
+
`${namespace}-${serviceName}`,
|
|
177
|
+
'--output',
|
|
178
|
+
"jsonpath='{.status.loadBalancer.ingress[0].ip}'",
|
|
179
|
+
],
|
|
180
|
+
{
|
|
181
|
+
stdio: 'pipe',
|
|
182
|
+
},
|
|
183
|
+
);
|
|
184
|
+
|
|
185
|
+
let ip = '';
|
|
186
|
+
process.stdout.on('data', data => {
|
|
187
|
+
ip += data;
|
|
188
|
+
});
|
|
189
|
+
process.on('error', err => {
|
|
190
|
+
reject(err);
|
|
191
|
+
});
|
|
192
|
+
process.on('exit', () => {
|
|
193
|
+
// kubectl prints JSON. Remove the quotes
|
|
194
|
+
resolve(ip.replace(/"|'/g, ''));
|
|
195
|
+
});
|
|
196
|
+
|
|
197
|
+
return promise;
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
export function startPortForwardForRPC(namespace: string, index = 0) {
|
|
201
|
+
return startPortForward({
|
|
202
|
+
resource: `pod/${namespace}-rpc-aztec-node-${index}`,
|
|
203
|
+
namespace,
|
|
204
|
+
containerPort: 8080,
|
|
205
|
+
});
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
export function startPortForwardForEthereum(namespace: string) {
|
|
209
|
+
return startPortForward({
|
|
210
|
+
resource: `services/${namespace}-eth-execution`,
|
|
211
|
+
namespace,
|
|
212
|
+
containerPort: 8545,
|
|
213
|
+
});
|
|
214
|
+
}
|
|
215
|
+
|
|
167
216
|
export async function deleteResourceByName({
|
|
168
217
|
resource,
|
|
169
218
|
namespace,
|
|
@@ -187,12 +236,25 @@ export async function deleteResourceByLabel({
|
|
|
187
236
|
resource,
|
|
188
237
|
namespace,
|
|
189
238
|
label,
|
|
239
|
+
timeout = '5m',
|
|
240
|
+
force = false,
|
|
190
241
|
}: {
|
|
191
242
|
resource: string;
|
|
192
243
|
namespace: string;
|
|
193
244
|
label: string;
|
|
245
|
+
timeout?: string;
|
|
246
|
+
force?: boolean;
|
|
194
247
|
}) {
|
|
195
|
-
|
|
248
|
+
try {
|
|
249
|
+
await execAsync(`kubectl api-resources --no-headers -o name | grep -Eq "^${resource}(\\\\..+)?$"`);
|
|
250
|
+
} catch (error) {
|
|
251
|
+
logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
|
|
252
|
+
return '';
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${
|
|
256
|
+
force ? '--force' : ''
|
|
257
|
+
}`;
|
|
196
258
|
logger.info(`command: ${command}`);
|
|
197
259
|
const { stdout } = await execAsync(command);
|
|
198
260
|
return stdout;
|
|
@@ -221,9 +283,18 @@ export function getChartDir(spartanDir: string, chartName: string) {
|
|
|
221
283
|
return path.join(spartanDir.trim(), chartName);
|
|
222
284
|
}
|
|
223
285
|
|
|
224
|
-
function
|
|
286
|
+
function shellQuote(value: string) {
|
|
287
|
+
// Single-quote safe shell escaping: ' -> '\''
|
|
288
|
+
return `'${value.replace(/'/g, "'\\''")}'`;
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
function valuesToArgs(values: Record<string, string | number | boolean>) {
|
|
225
292
|
return Object.entries(values)
|
|
226
|
-
.map(([key, value]) =>
|
|
293
|
+
.map(([key, value]) =>
|
|
294
|
+
typeof value === 'number' || typeof value === 'boolean'
|
|
295
|
+
? `--set ${key}=${value}`
|
|
296
|
+
: `--set-string ${key}=${shellQuote(String(value))}`,
|
|
297
|
+
)
|
|
227
298
|
.join(' ');
|
|
228
299
|
}
|
|
229
300
|
|
|
@@ -241,7 +312,7 @@ function createHelmCommand({
|
|
|
241
312
|
namespace: string;
|
|
242
313
|
valuesFile: string | undefined;
|
|
243
314
|
timeout: string;
|
|
244
|
-
values: Record<string, string | number>;
|
|
315
|
+
values: Record<string, string | number | boolean>;
|
|
245
316
|
reuseValues?: boolean;
|
|
246
317
|
}) {
|
|
247
318
|
const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
|
|
@@ -258,6 +329,61 @@ async function execHelmCommand(args: Parameters<typeof createHelmCommand>[0]) {
|
|
|
258
329
|
return stdout;
|
|
259
330
|
}
|
|
260
331
|
|
|
332
|
+
async function getHelmReleaseStatus(instanceName: string, namespace: string): Promise<string | undefined> {
|
|
333
|
+
try {
|
|
334
|
+
const { stdout } = await execAsync(
|
|
335
|
+
`helm list --namespace ${namespace} --all --filter '^${instanceName}$' --output json | cat`,
|
|
336
|
+
);
|
|
337
|
+
const parsed = JSON.parse(stdout) as Array<{ name?: string; status?: string }>;
|
|
338
|
+
const row = parsed.find(r => r.name === instanceName);
|
|
339
|
+
return row?.status;
|
|
340
|
+
} catch {
|
|
341
|
+
return undefined;
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
async function forceDeleteHelmReleaseRecord(instanceName: string, namespace: string, logger: Logger) {
|
|
346
|
+
const labelSelector = `owner=helm,name=${instanceName}`;
|
|
347
|
+
const cmd = `kubectl delete secret -n ${namespace} -l ${labelSelector} --ignore-not-found=true`;
|
|
348
|
+
logger.warn(`Force deleting Helm release record: ${cmd}`);
|
|
349
|
+
await execAsync(cmd).catch(() => undefined);
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
async function hasDeployedHelmRelease(instanceName: string, namespace: string): Promise<boolean> {
|
|
353
|
+
try {
|
|
354
|
+
const status = await getHelmReleaseStatus(instanceName, namespace);
|
|
355
|
+
return status?.toLowerCase() === 'deployed';
|
|
356
|
+
} catch {
|
|
357
|
+
return false;
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
export async function uninstallChaosMesh(instanceName: string, namespace: string, logger: Logger) {
|
|
362
|
+
// uninstall the helm chart if it exists
|
|
363
|
+
logger.info(`Uninstalling helm chart ${instanceName}`);
|
|
364
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
365
|
+
// and delete the chaos-mesh resources created by this release
|
|
366
|
+
const deleteByLabel = async (resource: string) => {
|
|
367
|
+
const args = {
|
|
368
|
+
resource,
|
|
369
|
+
namespace: namespace,
|
|
370
|
+
label: `app.kubernetes.io/instance=${instanceName}`,
|
|
371
|
+
} as const;
|
|
372
|
+
logger.info(`Deleting ${resource} resources for release ${instanceName}`);
|
|
373
|
+
await deleteResourceByLabel(args).catch(e => {
|
|
374
|
+
logger.error(`Error deleting ${resource}: ${e}`);
|
|
375
|
+
logger.info(`Force deleting ${resource}`);
|
|
376
|
+
return deleteResourceByLabel({ ...args, force: true });
|
|
377
|
+
});
|
|
378
|
+
};
|
|
379
|
+
|
|
380
|
+
await deleteByLabel('podchaos');
|
|
381
|
+
await deleteByLabel('networkchaos');
|
|
382
|
+
await deleteByLabel('podnetworkchaos');
|
|
383
|
+
await deleteByLabel('workflows');
|
|
384
|
+
await deleteByLabel('workflownodes');
|
|
385
|
+
}
|
|
386
|
+
|
|
261
387
|
/**
|
|
262
388
|
* Installs a Helm chart with the given parameters.
|
|
263
389
|
* @param instanceName - The name of the Helm chart instance.
|
|
@@ -280,8 +406,7 @@ export async function installChaosMeshChart({
|
|
|
280
406
|
targetNamespace,
|
|
281
407
|
valuesFile,
|
|
282
408
|
helmChartDir,
|
|
283
|
-
|
|
284
|
-
timeout = '5m',
|
|
409
|
+
timeout = '10m',
|
|
285
410
|
clean = true,
|
|
286
411
|
values = {},
|
|
287
412
|
logger,
|
|
@@ -297,27 +422,13 @@ export async function installChaosMeshChart({
|
|
|
297
422
|
logger: Logger;
|
|
298
423
|
}) {
|
|
299
424
|
if (clean) {
|
|
300
|
-
|
|
301
|
-
logger.info(`Uninstalling helm chart ${instanceName}`);
|
|
302
|
-
await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
|
|
303
|
-
// and delete the podchaos resource
|
|
304
|
-
const deleteArgs = {
|
|
305
|
-
resource: 'podchaos',
|
|
306
|
-
namespace: chaosMeshNamespace,
|
|
307
|
-
name: `${targetNamespace}-${instanceName}`,
|
|
308
|
-
};
|
|
309
|
-
logger.info(`Deleting podchaos resource`);
|
|
310
|
-
await deleteResourceByName(deleteArgs).catch(e => {
|
|
311
|
-
logger.error(`Error deleting podchaos resource: ${e}`);
|
|
312
|
-
logger.info(`Force deleting podchaos resource`);
|
|
313
|
-
return deleteResourceByName({ ...deleteArgs, force: true });
|
|
314
|
-
});
|
|
425
|
+
await uninstallChaosMesh(instanceName, targetNamespace, logger);
|
|
315
426
|
}
|
|
316
427
|
|
|
317
428
|
return execHelmCommand({
|
|
318
429
|
instanceName,
|
|
319
430
|
helmChartDir,
|
|
320
|
-
namespace:
|
|
431
|
+
namespace: targetNamespace,
|
|
321
432
|
valuesFile,
|
|
322
433
|
timeout,
|
|
323
434
|
values: { ...values, 'global.targetNamespace': targetNamespace },
|
|
@@ -351,18 +462,22 @@ export function applyProverKill({
|
|
|
351
462
|
namespace,
|
|
352
463
|
spartanDir,
|
|
353
464
|
logger,
|
|
465
|
+
values,
|
|
354
466
|
}: {
|
|
355
467
|
namespace: string;
|
|
356
468
|
spartanDir: string;
|
|
357
469
|
logger: Logger;
|
|
470
|
+
values?: Record<string, string | number>;
|
|
358
471
|
}) {
|
|
359
472
|
return installChaosMeshChart({
|
|
360
473
|
instanceName: 'prover-kill',
|
|
361
474
|
targetNamespace: namespace,
|
|
362
475
|
valuesFile: 'prover-kill.yaml',
|
|
363
476
|
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
477
|
+
chaosMeshNamespace: namespace,
|
|
364
478
|
clean: true,
|
|
365
479
|
logger,
|
|
480
|
+
values,
|
|
366
481
|
});
|
|
367
482
|
}
|
|
368
483
|
|
|
@@ -370,10 +485,12 @@ export function applyProverBrokerKill({
|
|
|
370
485
|
namespace,
|
|
371
486
|
spartanDir,
|
|
372
487
|
logger,
|
|
488
|
+
values,
|
|
373
489
|
}: {
|
|
374
490
|
namespace: string;
|
|
375
491
|
spartanDir: string;
|
|
376
492
|
logger: Logger;
|
|
493
|
+
values?: Record<string, string | number>;
|
|
377
494
|
}) {
|
|
378
495
|
return installChaosMeshChart({
|
|
379
496
|
instanceName: 'prover-broker-kill',
|
|
@@ -382,63 +499,76 @@ export function applyProverBrokerKill({
|
|
|
382
499
|
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
383
500
|
clean: true,
|
|
384
501
|
logger,
|
|
502
|
+
values,
|
|
385
503
|
});
|
|
386
504
|
}
|
|
387
505
|
|
|
388
506
|
export function applyBootNodeFailure({
|
|
507
|
+
instanceName = 'boot-node-failure',
|
|
389
508
|
namespace,
|
|
390
509
|
spartanDir,
|
|
391
510
|
durationSeconds,
|
|
392
511
|
logger,
|
|
512
|
+
values,
|
|
393
513
|
}: {
|
|
514
|
+
instanceName?: string;
|
|
394
515
|
namespace: string;
|
|
395
516
|
spartanDir: string;
|
|
396
517
|
durationSeconds: number;
|
|
397
518
|
logger: Logger;
|
|
519
|
+
values?: Record<string, string | number>;
|
|
398
520
|
}) {
|
|
399
521
|
return installChaosMeshChart({
|
|
400
|
-
instanceName
|
|
522
|
+
instanceName,
|
|
401
523
|
targetNamespace: namespace,
|
|
402
524
|
valuesFile: 'boot-node-failure.yaml',
|
|
403
525
|
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
404
526
|
values: {
|
|
405
527
|
'bootNodeFailure.duration': `${durationSeconds}s`,
|
|
528
|
+
...(values ?? {}),
|
|
406
529
|
},
|
|
407
530
|
logger,
|
|
408
531
|
});
|
|
409
532
|
}
|
|
410
533
|
|
|
411
534
|
export function applyValidatorKill({
|
|
535
|
+
instanceName = 'validator-kill',
|
|
412
536
|
namespace,
|
|
413
537
|
spartanDir,
|
|
414
538
|
logger,
|
|
539
|
+
values,
|
|
415
540
|
}: {
|
|
541
|
+
instanceName?: string;
|
|
416
542
|
namespace: string;
|
|
417
543
|
spartanDir: string;
|
|
418
544
|
logger: Logger;
|
|
545
|
+
values?: Record<string, string | number>;
|
|
419
546
|
}) {
|
|
420
547
|
return installChaosMeshChart({
|
|
421
|
-
instanceName
|
|
548
|
+
instanceName,
|
|
422
549
|
targetNamespace: namespace,
|
|
423
550
|
valuesFile: 'validator-kill.yaml',
|
|
424
551
|
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
425
552
|
logger,
|
|
553
|
+
values,
|
|
426
554
|
});
|
|
427
555
|
}
|
|
428
556
|
|
|
429
557
|
export function applyNetworkShaping({
|
|
558
|
+
instanceName = 'network-shaping',
|
|
430
559
|
valuesFile,
|
|
431
560
|
namespace,
|
|
432
561
|
spartanDir,
|
|
433
562
|
logger,
|
|
434
563
|
}: {
|
|
564
|
+
instanceName?: string;
|
|
435
565
|
valuesFile: string;
|
|
436
566
|
namespace: string;
|
|
437
567
|
spartanDir: string;
|
|
438
568
|
logger: Logger;
|
|
439
569
|
}) {
|
|
440
570
|
return installChaosMeshChart({
|
|
441
|
-
instanceName
|
|
571
|
+
instanceName,
|
|
442
572
|
targetNamespace: namespace,
|
|
443
573
|
valuesFile,
|
|
444
574
|
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
@@ -446,35 +576,283 @@ export function applyNetworkShaping({
|
|
|
446
576
|
});
|
|
447
577
|
}
|
|
448
578
|
|
|
449
|
-
export async function
|
|
579
|
+
export async function awaitCheckpointNumber(
|
|
450
580
|
rollupCheatCodes: RollupCheatCodes,
|
|
451
|
-
|
|
581
|
+
checkpointNumber: CheckpointNumber,
|
|
452
582
|
timeoutSeconds: number,
|
|
453
583
|
logger: Logger,
|
|
454
584
|
) {
|
|
455
|
-
logger.info(`Waiting for
|
|
585
|
+
logger.info(`Waiting for checkpoint ${checkpointNumber}`);
|
|
456
586
|
let tips = await rollupCheatCodes.getTips();
|
|
457
587
|
const endTime = Date.now() + timeoutSeconds * 1000;
|
|
458
|
-
while (tips.pending <
|
|
459
|
-
logger.info(`At
|
|
588
|
+
while (tips.pending < checkpointNumber && Date.now() < endTime) {
|
|
589
|
+
logger.info(`At checkpoint ${tips.pending}`);
|
|
460
590
|
await sleep(1000);
|
|
461
591
|
tips = await rollupCheatCodes.getTips();
|
|
462
592
|
}
|
|
463
|
-
if (tips.pending <
|
|
464
|
-
throw new Error(`Timeout waiting for
|
|
593
|
+
if (tips.pending < checkpointNumber) {
|
|
594
|
+
throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
|
|
465
595
|
} else {
|
|
466
|
-
logger.info(`Reached
|
|
596
|
+
logger.info(`Reached checkpoint ${tips.pending}`);
|
|
467
597
|
}
|
|
468
598
|
}
|
|
469
599
|
|
|
470
600
|
export async function restartBot(namespace: string, logger: Logger) {
|
|
471
601
|
logger.info(`Restarting bot`);
|
|
472
|
-
await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app=bot' });
|
|
602
|
+
await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' });
|
|
473
603
|
await sleep(10 * 1000);
|
|
474
|
-
|
|
604
|
+
// Some bot images may take time to report Ready due to heavy boot-time proving.
|
|
605
|
+
// Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
|
|
606
|
+
await waitForResourceByLabel({
|
|
607
|
+
resource: 'pods',
|
|
608
|
+
namespace,
|
|
609
|
+
label: 'app.kubernetes.io/name=bot',
|
|
610
|
+
condition: 'PodReadyToStartContainers',
|
|
611
|
+
});
|
|
475
612
|
logger.info(`Bot restarted`);
|
|
476
613
|
}
|
|
477
614
|
|
|
615
|
+
/**
|
|
616
|
+
* Installs or upgrades the transfer bot Helm release for the given namespace.
|
|
617
|
+
* Intended for test setup to enable L2 traffic generation only when needed.
|
|
618
|
+
*/
|
|
619
|
+
export async function installTransferBot({
|
|
620
|
+
namespace,
|
|
621
|
+
spartanDir,
|
|
622
|
+
logger,
|
|
623
|
+
replicas = 1,
|
|
624
|
+
txIntervalSeconds = 10,
|
|
625
|
+
followChain = 'PENDING',
|
|
626
|
+
mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk',
|
|
627
|
+
mnemonicStartIndex,
|
|
628
|
+
botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01',
|
|
629
|
+
nodeUrl,
|
|
630
|
+
timeout = '15m',
|
|
631
|
+
reuseValues = true,
|
|
632
|
+
aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12),
|
|
633
|
+
}: {
|
|
634
|
+
namespace: string;
|
|
635
|
+
spartanDir: string;
|
|
636
|
+
logger: Logger;
|
|
637
|
+
replicas?: number;
|
|
638
|
+
txIntervalSeconds?: number;
|
|
639
|
+
followChain?: string;
|
|
640
|
+
mnemonic?: string;
|
|
641
|
+
mnemonicStartIndex?: number | string;
|
|
642
|
+
botPrivateKey?: string;
|
|
643
|
+
nodeUrl?: string;
|
|
644
|
+
timeout?: string;
|
|
645
|
+
reuseValues?: boolean;
|
|
646
|
+
aztecSlotDuration?: number;
|
|
647
|
+
}) {
|
|
648
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
649
|
+
const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
|
|
650
|
+
const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
|
|
651
|
+
|
|
652
|
+
logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
|
|
653
|
+
|
|
654
|
+
const values: Record<string, string | number | boolean> = {
|
|
655
|
+
'bot.replicaCount': replicas,
|
|
656
|
+
'bot.txIntervalSeconds': txIntervalSeconds,
|
|
657
|
+
'bot.followChain': followChain,
|
|
658
|
+
'bot.botPrivateKey': botPrivateKey,
|
|
659
|
+
'bot.nodeUrl': resolvedNodeUrl,
|
|
660
|
+
'bot.mnemonic': mnemonic,
|
|
661
|
+
'bot.feePaymentMethod': 'fee_juice',
|
|
662
|
+
'aztec.slotDuration': aztecSlotDuration,
|
|
663
|
+
// Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
|
|
664
|
+
// Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
|
|
665
|
+
'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
|
|
666
|
+
// Provide L1 execution RPC for bridging fee juice
|
|
667
|
+
'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
|
|
668
|
+
// Provide L1 mnemonic for bridging (falls back to labs mnemonic)
|
|
669
|
+
'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
|
|
670
|
+
|
|
671
|
+
// The bot does not need Kubernetes API access. Disable RBAC + ServiceAccount creation so the chart
|
|
672
|
+
// can be installed by users without cluster-scoped RBAC permissions.
|
|
673
|
+
'bot.rbac.create': false,
|
|
674
|
+
'bot.serviceAccount.create': false,
|
|
675
|
+
'bot.serviceAccount.name': 'default',
|
|
676
|
+
};
|
|
677
|
+
// Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
|
|
678
|
+
if (mnemonicStartIndex === undefined) {
|
|
679
|
+
values['bot.mnemonicStartIndex'] = 0;
|
|
680
|
+
}
|
|
681
|
+
// Also pass a funded private key directly if available
|
|
682
|
+
if (process.env.FUNDING_PRIVATE_KEY) {
|
|
683
|
+
values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
|
|
684
|
+
}
|
|
685
|
+
// Align bot image with the running network image: prefer env var, else detect from a validator pod
|
|
686
|
+
let repositoryFromEnv: string | undefined;
|
|
687
|
+
let tagFromEnv: string | undefined;
|
|
688
|
+
const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
|
|
689
|
+
if (aztecDockerImage && aztecDockerImage.includes(':')) {
|
|
690
|
+
const lastColon = aztecDockerImage.lastIndexOf(':');
|
|
691
|
+
repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
|
|
692
|
+
tagFromEnv = aztecDockerImage.slice(lastColon + 1);
|
|
693
|
+
}
|
|
694
|
+
|
|
695
|
+
let repository = repositoryFromEnv;
|
|
696
|
+
let tag = tagFromEnv;
|
|
697
|
+
if (!repository || !tag) {
|
|
698
|
+
try {
|
|
699
|
+
const { stdout } = await execAsync(
|
|
700
|
+
`kubectl get pods -l app.kubernetes.io/name=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
|
|
701
|
+
);
|
|
702
|
+
const image = stdout.trim().replace(/^'|'$/g, '');
|
|
703
|
+
if (image && image.includes(':')) {
|
|
704
|
+
const lastColon = image.lastIndexOf(':');
|
|
705
|
+
repository = image.slice(0, lastColon);
|
|
706
|
+
tag = image.slice(lastColon + 1);
|
|
707
|
+
}
|
|
708
|
+
} catch (err) {
|
|
709
|
+
logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
|
|
710
|
+
}
|
|
711
|
+
}
|
|
712
|
+
if (repository && tag) {
|
|
713
|
+
values['global.aztecImage.repository'] = repository;
|
|
714
|
+
values['global.aztecImage.tag'] = tag;
|
|
715
|
+
}
|
|
716
|
+
if (mnemonicStartIndex !== undefined) {
|
|
717
|
+
values['bot.mnemonicStartIndex'] =
|
|
718
|
+
typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
|
|
719
|
+
}
|
|
720
|
+
|
|
721
|
+
// If a previous install attempt left the release in a non-deployed state (e.g. FAILED),
|
|
722
|
+
// `helm upgrade --install` can error with "has no deployed releases".
|
|
723
|
+
// In that case, clear the release record and do a clean install.
|
|
724
|
+
const existingStatus = await getHelmReleaseStatus(instanceName, namespace);
|
|
725
|
+
if (existingStatus && existingStatus.toLowerCase() !== 'deployed') {
|
|
726
|
+
logger.warn(`Transfer bot release ${instanceName} is in status '${existingStatus}'. Reinstalling cleanly.`);
|
|
727
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`).catch(
|
|
728
|
+
() => undefined,
|
|
729
|
+
);
|
|
730
|
+
// If helm left the release in `uninstalling`, force-delete the record so we can reinstall.
|
|
731
|
+
const afterUninstallStatus = await getHelmReleaseStatus(instanceName, namespace);
|
|
732
|
+
if (afterUninstallStatus?.toLowerCase() === 'uninstalling') {
|
|
733
|
+
await forceDeleteHelmReleaseRecord(instanceName, namespace, logger);
|
|
734
|
+
}
|
|
735
|
+
}
|
|
736
|
+
|
|
737
|
+
// `--reuse-values` fails if the release has never successfully deployed (e.g. first install, or a previous failed install).
|
|
738
|
+
// Only reuse values when we have a deployed release to reuse from.
|
|
739
|
+
const effectiveReuseValues = reuseValues && (await hasDeployedHelmRelease(instanceName, namespace));
|
|
740
|
+
|
|
741
|
+
await execHelmCommand({
|
|
742
|
+
instanceName,
|
|
743
|
+
helmChartDir,
|
|
744
|
+
namespace,
|
|
745
|
+
valuesFile: undefined,
|
|
746
|
+
timeout,
|
|
747
|
+
values: values as unknown as Record<string, string | number | boolean>,
|
|
748
|
+
reuseValues: effectiveReuseValues,
|
|
749
|
+
});
|
|
750
|
+
|
|
751
|
+
if (replicas > 0) {
|
|
752
|
+
await waitForResourceByLabel({
|
|
753
|
+
resource: 'pods',
|
|
754
|
+
namespace,
|
|
755
|
+
label: 'app.kubernetes.io/name=bot',
|
|
756
|
+
condition: 'PodReadyToStartContainers',
|
|
757
|
+
});
|
|
758
|
+
}
|
|
759
|
+
}
|
|
760
|
+
|
|
761
|
+
/**
|
|
762
|
+
* Uninstalls the transfer bot Helm release from the given namespace.
|
|
763
|
+
* Intended for test teardown to clean up bot resources.
|
|
764
|
+
*/
|
|
765
|
+
export async function uninstallTransferBot(namespace: string, logger: Logger) {
|
|
766
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
767
|
+
logger.info(`Uninstalling transfer bot release ${instanceName}`);
|
|
768
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
769
|
+
// Ensure any leftover pods are removed
|
|
770
|
+
await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' }).catch(
|
|
771
|
+
() => undefined,
|
|
772
|
+
);
|
|
773
|
+
}
|
|
774
|
+
|
|
775
|
+
/**
|
|
776
|
+
* Enables or disables probabilistic transaction dropping on validators and waits for rollout.
|
|
777
|
+
* Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
|
|
778
|
+
*/
|
|
779
|
+
export async function setValidatorTxDrop({
|
|
780
|
+
namespace,
|
|
781
|
+
enabled,
|
|
782
|
+
probability,
|
|
783
|
+
logger,
|
|
784
|
+
}: {
|
|
785
|
+
namespace: string;
|
|
786
|
+
enabled: boolean;
|
|
787
|
+
probability: number;
|
|
788
|
+
logger: Logger;
|
|
789
|
+
}) {
|
|
790
|
+
const drop = enabled ? 'true' : 'false';
|
|
791
|
+
const prob = String(probability);
|
|
792
|
+
|
|
793
|
+
const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
|
|
794
|
+
let updated = false;
|
|
795
|
+
for (const selector of selectors) {
|
|
796
|
+
try {
|
|
797
|
+
const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
798
|
+
const names = list.stdout
|
|
799
|
+
.split('\n')
|
|
800
|
+
.map(s => s.trim())
|
|
801
|
+
.filter(Boolean);
|
|
802
|
+
if (names.length === 0) {
|
|
803
|
+
continue;
|
|
804
|
+
}
|
|
805
|
+
const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
|
|
806
|
+
logger.info(`command: ${cmd}`);
|
|
807
|
+
await execAsync(cmd);
|
|
808
|
+
updated = true;
|
|
809
|
+
} catch (e) {
|
|
810
|
+
logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
|
|
811
|
+
}
|
|
812
|
+
}
|
|
813
|
+
|
|
814
|
+
if (!updated) {
|
|
815
|
+
logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
|
|
816
|
+
return;
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
// Restart validator pods to ensure env vars take effect and wait for readiness
|
|
820
|
+
await restartValidators(namespace, logger);
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
export async function restartValidators(namespace: string, logger: Logger) {
|
|
824
|
+
const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
|
|
825
|
+
let any = false;
|
|
826
|
+
for (const selector of selectors) {
|
|
827
|
+
try {
|
|
828
|
+
const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
829
|
+
if (!stdout || stdout.trim().length === 0) {
|
|
830
|
+
continue;
|
|
831
|
+
}
|
|
832
|
+
any = true;
|
|
833
|
+
await deleteResourceByLabel({ resource: 'pods', namespace, label: selector });
|
|
834
|
+
} catch (e) {
|
|
835
|
+
logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
|
|
836
|
+
}
|
|
837
|
+
}
|
|
838
|
+
|
|
839
|
+
if (!any) {
|
|
840
|
+
logger.warn(`No validator pods found to restart in ${namespace}.`);
|
|
841
|
+
return;
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
// Wait for either label to be Ready
|
|
845
|
+
for (const selector of selectors) {
|
|
846
|
+
try {
|
|
847
|
+
await waitForResourceByLabel({ resource: 'pods', namespace, label: selector });
|
|
848
|
+
return;
|
|
849
|
+
} catch {
|
|
850
|
+
// try next
|
|
851
|
+
}
|
|
852
|
+
}
|
|
853
|
+
logger.warn(`Validator pods did not report Ready; continuing.`);
|
|
854
|
+
}
|
|
855
|
+
|
|
478
856
|
export async function enableValidatorDynamicBootNode(
|
|
479
857
|
instanceName: string,
|
|
480
858
|
namespace: string,
|
|
@@ -497,64 +875,133 @@ export async function enableValidatorDynamicBootNode(
|
|
|
497
875
|
logger.info(`Validator dynamic boot node enabled`);
|
|
498
876
|
}
|
|
499
877
|
|
|
500
|
-
export async function
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
878
|
+
export async function getSequencers(namespace: string) {
|
|
879
|
+
const selectors = [
|
|
880
|
+
'app.kubernetes.io/name=validator',
|
|
881
|
+
'app.kubernetes.io/component=validator',
|
|
882
|
+
'app.kubernetes.io/component=sequencer-node',
|
|
883
|
+
'app=validator',
|
|
884
|
+
];
|
|
885
|
+
for (const selector of selectors) {
|
|
886
|
+
try {
|
|
887
|
+
const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
888
|
+
const { stdout } = await execAsync(command);
|
|
889
|
+
const sequencers = stdout
|
|
890
|
+
.split(' ')
|
|
891
|
+
.map(s => s.trim())
|
|
892
|
+
.filter(Boolean);
|
|
893
|
+
if (sequencers.length > 0) {
|
|
894
|
+
logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
|
|
895
|
+
return sequencers;
|
|
896
|
+
}
|
|
897
|
+
} catch {
|
|
898
|
+
// try next selector
|
|
899
|
+
}
|
|
515
900
|
}
|
|
901
|
+
|
|
902
|
+
// Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
|
|
903
|
+
throw new Error(
|
|
904
|
+
`No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`,
|
|
905
|
+
);
|
|
516
906
|
}
|
|
517
907
|
|
|
518
|
-
export
|
|
519
|
-
|
|
520
|
-
|
|
908
|
+
export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
|
|
909
|
+
return withSequencersAdmin(env, async client => {
|
|
910
|
+
await client.setConfig(config);
|
|
911
|
+
return client.getConfig();
|
|
912
|
+
});
|
|
521
913
|
}
|
|
522
914
|
|
|
523
|
-
export
|
|
524
|
-
|
|
525
|
-
const { stdout } = await execAsync(command);
|
|
526
|
-
return stdout.split(' ');
|
|
915
|
+
export function getSequencersConfig(env: TestConfig) {
|
|
916
|
+
return withSequencersAdmin(env, client => client.getConfig());
|
|
527
917
|
}
|
|
528
918
|
|
|
529
|
-
export async function
|
|
530
|
-
|
|
531
|
-
namespace
|
|
532
|
-
config: Partial<SequencerConfig>;
|
|
533
|
-
}) {
|
|
534
|
-
const { containerPort, namespace, config } = args;
|
|
919
|
+
export async function withSequencersAdmin<T>(env: TestConfig, fn: (node: AztecNodeAdmin) => Promise<T>): Promise<T[]> {
|
|
920
|
+
const adminContainerPort = 8880;
|
|
921
|
+
const namespace = env.NAMESPACE;
|
|
535
922
|
const sequencers = await getSequencers(namespace);
|
|
923
|
+
const results = [];
|
|
924
|
+
|
|
536
925
|
for (const sequencer of sequencers) {
|
|
537
926
|
const { process, port } = await startPortForward({
|
|
538
927
|
resource: `pod/${sequencer}`,
|
|
539
928
|
namespace,
|
|
540
|
-
containerPort,
|
|
929
|
+
containerPort: adminContainerPort,
|
|
541
930
|
});
|
|
542
931
|
|
|
543
932
|
const url = `http://localhost:${port}`;
|
|
544
|
-
await
|
|
933
|
+
await retry(
|
|
934
|
+
() => fetch(`${url}/status`).then(res => res.status === 200),
|
|
935
|
+
'forward node admin port',
|
|
936
|
+
makeBackoff([1, 1, 2, 6]),
|
|
937
|
+
logger,
|
|
938
|
+
true,
|
|
939
|
+
);
|
|
940
|
+
const client = createAztecNodeAdminClient(url);
|
|
941
|
+
results.push(await fn(client));
|
|
545
942
|
process.kill();
|
|
546
943
|
}
|
|
944
|
+
|
|
945
|
+
return results;
|
|
547
946
|
}
|
|
548
947
|
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
948
|
+
/**
|
|
949
|
+
* Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
|
|
950
|
+
* it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
|
|
951
|
+
*/
|
|
952
|
+
export async function getPublicViemClient(
|
|
953
|
+
env: TestConfig,
|
|
954
|
+
/** If set, will push the new process into it */
|
|
955
|
+
processes?: ChildProcess[],
|
|
956
|
+
): Promise<{ url: string; client: ViemPublicClient; process?: ChildProcess }> {
|
|
957
|
+
const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
|
|
958
|
+
if (CREATE_ETH_DEVNET) {
|
|
959
|
+
logger.info(`Creating port forward to eth execution node`);
|
|
960
|
+
const { process, port } = await startPortForward({
|
|
961
|
+
resource: `svc/${NAMESPACE}-eth-execution`,
|
|
962
|
+
namespace: NAMESPACE,
|
|
963
|
+
containerPort: 8545,
|
|
555
964
|
});
|
|
965
|
+
const url = `http://127.0.0.1:${port}`;
|
|
966
|
+
const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url, { batch: false })]) });
|
|
967
|
+
if (processes) {
|
|
968
|
+
processes.push(process);
|
|
969
|
+
}
|
|
970
|
+
return { url, client, process };
|
|
556
971
|
} else {
|
|
557
|
-
|
|
972
|
+
logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
|
|
973
|
+
if (!L1_RPC_URLS_JSON) {
|
|
974
|
+
throw new Error(`L1_RPC_URLS_JSON is not defined`);
|
|
975
|
+
}
|
|
976
|
+
const client: ViemPublicClient = createPublicClient({
|
|
977
|
+
transport: fallback([http(L1_RPC_URLS_JSON, { batch: false })]),
|
|
978
|
+
});
|
|
979
|
+
return { url: L1_RPC_URLS_JSON, client };
|
|
980
|
+
}
|
|
981
|
+
}
|
|
982
|
+
|
|
983
|
+
/** Queries an Aztec node for the L1 deployment addresses */
|
|
984
|
+
export async function getL1DeploymentAddresses(env: TestConfig): Promise<L1ContractAddresses> {
|
|
985
|
+
let forwardProcess: ChildProcess | undefined;
|
|
986
|
+
try {
|
|
987
|
+
const [sequencer] = await getSequencers(env.NAMESPACE);
|
|
988
|
+
const { process, port } = await startPortForward({
|
|
989
|
+
resource: `pod/${sequencer}`,
|
|
990
|
+
namespace: env.NAMESPACE,
|
|
991
|
+
containerPort: 8080,
|
|
992
|
+
});
|
|
993
|
+
|
|
994
|
+
forwardProcess = process;
|
|
995
|
+
const url = `http://127.0.0.1:${port}`;
|
|
996
|
+
const node = createAztecNodeClient(url);
|
|
997
|
+
return await retry(
|
|
998
|
+
() => node.getNodeInfo().then(i => i.l1ContractAddresses),
|
|
999
|
+
'get node info',
|
|
1000
|
+
makeBackoff([1, 3, 6]),
|
|
1001
|
+
logger,
|
|
1002
|
+
);
|
|
1003
|
+
} finally {
|
|
1004
|
+
forwardProcess?.kill();
|
|
558
1005
|
}
|
|
559
1006
|
}
|
|
560
1007
|
|
|
@@ -580,3 +1027,51 @@ export async function rollAztecPods(namespace: string) {
|
|
|
580
1027
|
await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=validator' });
|
|
581
1028
|
await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=pxe' });
|
|
582
1029
|
}
|
|
1030
|
+
|
|
1031
|
+
/**
|
|
1032
|
+
* Returns the absolute path to the git repository root
|
|
1033
|
+
*/
|
|
1034
|
+
export function getGitProjectRoot(): string {
|
|
1035
|
+
try {
|
|
1036
|
+
const rootDir = execSync('git rev-parse --show-toplevel', {
|
|
1037
|
+
encoding: 'utf-8',
|
|
1038
|
+
stdio: ['ignore', 'pipe', 'ignore'],
|
|
1039
|
+
}).trim();
|
|
1040
|
+
|
|
1041
|
+
return rootDir;
|
|
1042
|
+
} catch (error) {
|
|
1043
|
+
throw new Error(`Failed to determine git project root: ${error}`);
|
|
1044
|
+
}
|
|
1045
|
+
}
|
|
1046
|
+
|
|
1047
|
+
/** Returns a client to the RPC of the given sequencer (defaults to first) */
|
|
1048
|
+
export async function getNodeClient(
|
|
1049
|
+
env: TestConfig,
|
|
1050
|
+
index: number = 0,
|
|
1051
|
+
): Promise<{ node: ReturnType<typeof createAztecNodeClient>; port: number; process: ChildProcess }> {
|
|
1052
|
+
const namespace = env.NAMESPACE;
|
|
1053
|
+
const containerPort = 8080;
|
|
1054
|
+
const sequencers = await getSequencers(namespace);
|
|
1055
|
+
const sequencer = sequencers[index];
|
|
1056
|
+
if (!sequencer) {
|
|
1057
|
+
throw new Error(`No sequencer found at index ${index} in namespace ${namespace}`);
|
|
1058
|
+
}
|
|
1059
|
+
|
|
1060
|
+
const { process, port } = await startPortForward({
|
|
1061
|
+
resource: `pod/${sequencer}`,
|
|
1062
|
+
namespace,
|
|
1063
|
+
containerPort,
|
|
1064
|
+
});
|
|
1065
|
+
|
|
1066
|
+
const url = `http://localhost:${port}`;
|
|
1067
|
+
await retry(
|
|
1068
|
+
() => fetch(`${url}/status`).then(res => res.status === 200),
|
|
1069
|
+
'forward port',
|
|
1070
|
+
makeBackoff([1, 1, 2, 6]),
|
|
1071
|
+
logger,
|
|
1072
|
+
true,
|
|
1073
|
+
);
|
|
1074
|
+
|
|
1075
|
+
const client = createAztecNodeClient(url);
|
|
1076
|
+
return { node: client, port, process };
|
|
1077
|
+
}
|