@aztec/end-to-end 3.0.0-canary.a9708bd → 3.0.0-devnet.20251212
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/bench/client_flows/benchmark.d.ts +4 -3
- package/dest/bench/client_flows/benchmark.d.ts.map +1 -1
- package/dest/bench/client_flows/benchmark.js +2 -2
- package/dest/bench/client_flows/client_flows_benchmark.d.ts +25 -14
- package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -1
- package/dest/bench/client_flows/client_flows_benchmark.js +111 -90
- package/dest/bench/client_flows/config.d.ts +1 -1
- package/dest/bench/client_flows/data_extractor.d.ts +1 -1
- package/dest/bench/client_flows/data_extractor.js +8 -30
- package/dest/bench/utils.d.ts +4 -13
- package/dest/bench/utils.d.ts.map +1 -1
- package/dest/bench/utils.js +10 -34
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +8 -8
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +42 -42
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +12 -9
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +35 -35
- package/dest/e2e_deploy_contract/deploy_test.d.ts +12 -6
- package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
- package/dest/e2e_deploy_contract/deploy_test.js +9 -18
- package/dest/e2e_epochs/epochs_test.d.ts +19 -12
- package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
- package/dest/e2e_epochs/epochs_test.js +30 -22
- package/dest/e2e_fees/bridging_race.notest.d.ts +1 -1
- package/dest/e2e_fees/bridging_race.notest.js +14 -11
- package/dest/e2e_fees/fees_test.d.ts +10 -8
- package/dest/e2e_fees/fees_test.d.ts.map +1 -1
- package/dest/e2e_fees/fees_test.js +35 -38
- package/dest/e2e_l1_publisher/write_json.d.ts +4 -2
- package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -1
- package/dest/e2e_l1_publisher/write_json.js +9 -8
- package/dest/e2e_multi_validator/utils.d.ts +2 -2
- package/dest/e2e_multi_validator/utils.d.ts.map +1 -1
- package/dest/e2e_multi_validator/utils.js +4 -10
- package/dest/e2e_nested_contract/nested_contract_test.d.ts +7 -4
- package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
- package/dest/e2e_nested_contract/nested_contract_test.js +11 -12
- package/dest/e2e_p2p/inactivity_slash_test.d.ts +31 -0
- package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -0
- package/dest/e2e_p2p/inactivity_slash_test.js +139 -0
- package/dest/e2e_p2p/p2p_network.d.ts +238 -17
- package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
- package/dest/e2e_p2p/p2p_network.js +46 -19
- package/dest/e2e_p2p/shared.d.ts +16 -17
- package/dest/e2e_p2p/shared.d.ts.map +1 -1
- package/dest/e2e_p2p/shared.js +56 -55
- package/dest/e2e_token_contract/token_contract_test.d.ts +6 -5
- package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
- package/dest/e2e_token_contract/token_contract_test.js +14 -17
- package/dest/fixtures/e2e_prover_test.d.ts +12 -8
- package/dest/fixtures/e2e_prover_test.d.ts.map +1 -1
- package/dest/fixtures/e2e_prover_test.js +53 -58
- package/dest/fixtures/fixtures.d.ts +1 -1
- package/dest/fixtures/fixtures.d.ts.map +1 -1
- package/dest/fixtures/fixtures.js +1 -1
- package/dest/fixtures/get_acvm_config.d.ts +2 -2
- package/dest/fixtures/get_acvm_config.d.ts.map +1 -1
- package/dest/fixtures/get_acvm_config.js +1 -1
- package/dest/fixtures/get_bb_config.d.ts +2 -2
- package/dest/fixtures/get_bb_config.d.ts.map +1 -1
- package/dest/fixtures/get_bb_config.js +2 -2
- package/dest/fixtures/index.d.ts +1 -1
- package/dest/fixtures/l1_to_l2_messaging.d.ts +4 -3
- package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
- package/dest/fixtures/l1_to_l2_messaging.js +2 -2
- package/dest/fixtures/logging.d.ts +1 -1
- package/dest/fixtures/setup_l1_contracts.d.ts +476 -5
- package/dest/fixtures/setup_l1_contracts.d.ts.map +1 -1
- package/dest/fixtures/setup_l1_contracts.js +3 -3
- package/dest/fixtures/setup_p2p_test.d.ts +12 -11
- package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
- package/dest/fixtures/setup_p2p_test.js +50 -24
- package/dest/fixtures/snapshot_manager.d.ts +13 -10
- package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
- package/dest/fixtures/snapshot_manager.js +66 -51
- package/dest/fixtures/token_utils.d.ts +10 -5
- package/dest/fixtures/token_utils.d.ts.map +1 -1
- package/dest/fixtures/token_utils.js +17 -18
- package/dest/fixtures/utils.d.ts +479 -35
- package/dest/fixtures/utils.d.ts.map +1 -1
- package/dest/fixtures/utils.js +106 -125
- package/dest/fixtures/web3signer.d.ts +5 -0
- package/dest/fixtures/web3signer.d.ts.map +1 -0
- package/dest/fixtures/web3signer.js +53 -0
- package/dest/fixtures/with_telemetry_utils.d.ts +2 -2
- package/dest/fixtures/with_telemetry_utils.d.ts.map +1 -1
- package/dest/fixtures/with_telemetry_utils.js +2 -2
- package/dest/index.d.ts +1 -1
- package/dest/quality_of_service/alert_checker.d.ts +2 -2
- package/dest/quality_of_service/alert_checker.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.d.ts +20 -23
- package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.js +14 -16
- package/dest/shared/gas_portal_test_harness.d.ts +10 -17
- package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
- package/dest/shared/gas_portal_test_harness.js +11 -8
- package/dest/shared/index.d.ts +1 -1
- package/dest/shared/jest_setup.d.ts +1 -1
- package/dest/shared/jest_setup.js +1 -1
- package/dest/shared/submit-transactions.d.ts +6 -4
- package/dest/shared/submit-transactions.d.ts.map +1 -1
- package/dest/shared/submit-transactions.js +8 -7
- package/dest/shared/uniswap_l1_l2.d.ts +12 -8
- package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
- package/dest/shared/uniswap_l1_l2.js +44 -58
- package/dest/simulators/index.d.ts +1 -1
- package/dest/simulators/lending_simulator.d.ts +4 -7
- package/dest/simulators/lending_simulator.d.ts.map +1 -1
- package/dest/simulators/lending_simulator.js +8 -5
- package/dest/simulators/token_simulator.d.ts +4 -2
- package/dest/simulators/token_simulator.d.ts.map +1 -1
- package/dest/simulators/token_simulator.js +2 -2
- package/dest/spartan/setup_test_wallets.d.ts +22 -14
- package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
- package/dest/spartan/setup_test_wallets.js +144 -86
- package/dest/spartan/tx_metrics.d.ts +39 -0
- package/dest/spartan/tx_metrics.d.ts.map +1 -0
- package/dest/spartan/tx_metrics.js +95 -0
- package/dest/spartan/utils.d.ts +92 -17
- package/dest/spartan/utils.d.ts.map +1 -1
- package/dest/spartan/utils.js +386 -63
- package/package.json +43 -40
- package/src/bench/client_flows/benchmark.ts +8 -8
- package/src/bench/client_flows/client_flows_benchmark.ts +141 -114
- package/src/bench/client_flows/data_extractor.ts +9 -31
- package/src/bench/utils.ts +9 -37
- package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +46 -63
- package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +38 -51
- package/src/e2e_deploy_contract/deploy_test.ts +18 -36
- package/src/e2e_epochs/epochs_test.ts +41 -35
- package/src/e2e_fees/bridging_race.notest.ts +16 -11
- package/src/e2e_fees/fees_test.ts +42 -47
- package/src/e2e_l1_publisher/write_json.ts +12 -9
- package/src/e2e_multi_validator/utils.ts +5 -11
- package/src/e2e_nested_contract/nested_contract_test.ts +15 -13
- package/src/e2e_p2p/inactivity_slash_test.ts +184 -0
- package/src/e2e_p2p/p2p_network.ts +124 -82
- package/src/e2e_p2p/shared.ts +66 -58
- package/src/e2e_token_contract/token_contract_test.ts +17 -17
- package/src/fixtures/e2e_prover_test.ts +60 -97
- package/src/fixtures/fixtures.ts +1 -2
- package/src/fixtures/get_acvm_config.ts +2 -2
- package/src/fixtures/get_bb_config.ts +3 -2
- package/src/fixtures/l1_to_l2_messaging.ts +4 -2
- package/src/fixtures/setup_l1_contracts.ts +5 -4
- package/src/fixtures/setup_p2p_test.ts +79 -32
- package/src/fixtures/snapshot_manager.ts +87 -82
- package/src/fixtures/token_utils.ts +16 -24
- package/src/fixtures/utils.ts +142 -172
- package/src/fixtures/web3signer.ts +63 -0
- package/src/fixtures/with_telemetry_utils.ts +2 -2
- package/src/guides/up_quick_start.sh +3 -11
- package/src/quality_of_service/alert_checker.ts +1 -1
- package/src/shared/cross_chain_test_harness.ts +23 -31
- package/src/shared/gas_portal_test_harness.ts +14 -21
- package/src/shared/jest_setup.ts +1 -1
- package/src/shared/submit-transactions.ts +12 -8
- package/src/shared/uniswap_l1_l2.ts +77 -86
- package/src/simulators/lending_simulator.ts +9 -6
- package/src/simulators/token_simulator.ts +5 -2
- package/src/spartan/DEVELOP.md +15 -3
- package/src/spartan/setup_test_wallets.ts +171 -127
- package/src/spartan/tx_metrics.ts +130 -0
- package/src/spartan/utils.ts +463 -64
package/dest/spartan/utils.js
CHANGED
|
@@ -1,16 +1,24 @@
|
|
|
1
|
-
import { createLogger
|
|
1
|
+
import { createLogger } from '@aztec/aztec.js/log';
|
|
2
|
+
import { promiseWithResolvers } from '@aztec/foundation/promise';
|
|
2
3
|
import { makeBackoff, retry } from '@aztec/foundation/retry';
|
|
3
4
|
import { schemas } from '@aztec/foundation/schemas';
|
|
4
|
-
import {
|
|
5
|
+
import { sleep } from '@aztec/foundation/sleep';
|
|
6
|
+
import { createAztecNodeAdminClient, createAztecNodeClient } from '@aztec/stdlib/interfaces/client';
|
|
5
7
|
import { exec, execSync, spawn } from 'child_process';
|
|
6
8
|
import path from 'path';
|
|
7
9
|
import { promisify } from 'util';
|
|
10
|
+
import { createPublicClient, fallback, http } from 'viem';
|
|
8
11
|
import { z } from 'zod';
|
|
9
12
|
const execAsync = promisify(exec);
|
|
10
13
|
const logger = createLogger('e2e:k8s-utils');
|
|
11
14
|
const testConfigSchema = z.object({
|
|
12
15
|
NAMESPACE: z.string().default('scenario'),
|
|
13
|
-
REAL_VERIFIER: schemas.Boolean.optional().default(true)
|
|
16
|
+
REAL_VERIFIER: schemas.Boolean.optional().default(true),
|
|
17
|
+
CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
|
|
18
|
+
L1_RPC_URLS_JSON: z.string().optional(),
|
|
19
|
+
L1_ACCOUNT_MNEMONIC: z.string().optional(),
|
|
20
|
+
AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
|
|
21
|
+
AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5)
|
|
14
22
|
});
|
|
15
23
|
export function setupEnvironment(env) {
|
|
16
24
|
const config = testConfigSchema.parse(env);
|
|
@@ -66,7 +74,7 @@ export function runProjectScript(script, args, logger, env) {
|
|
|
66
74
|
}
|
|
67
75
|
export async function startPortForward({ resource, namespace, containerPort, hostPort }) {
|
|
68
76
|
const hostPortAsString = hostPort ? hostPort.toString() : '';
|
|
69
|
-
logger.
|
|
77
|
+
logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
|
|
70
78
|
const process1 = spawn('kubectl', [
|
|
71
79
|
'port-forward',
|
|
72
80
|
'-n',
|
|
@@ -88,20 +96,20 @@ export async function startPortForward({ resource, namespace, containerPort, hos
|
|
|
88
96
|
const str = data.toString();
|
|
89
97
|
if (!isResolved && str.includes('Forwarding from')) {
|
|
90
98
|
isResolved = true;
|
|
91
|
-
logger.
|
|
99
|
+
logger.debug(`Port forward for ${resource}: ${str}`);
|
|
92
100
|
const port = str.search(/:\d+/);
|
|
93
101
|
if (port === -1) {
|
|
94
102
|
throw new Error('Port not found in port forward output');
|
|
95
103
|
}
|
|
96
104
|
const portNumber = parseInt(str.slice(port + 1));
|
|
97
|
-
logger.
|
|
105
|
+
logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
|
|
98
106
|
resolve(portNumber);
|
|
99
107
|
} else {
|
|
100
108
|
logger.silent(str);
|
|
101
109
|
}
|
|
102
110
|
});
|
|
103
111
|
process1.stderr?.on('data', (data)=>{
|
|
104
|
-
logger.
|
|
112
|
+
logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
|
|
105
113
|
// It's a strange thing:
|
|
106
114
|
// If we don't pipe stderr, then the port forwarding does not work.
|
|
107
115
|
// Log to silent because this doesn't actually report errors,
|
|
@@ -111,16 +119,16 @@ export async function startPortForward({ resource, namespace, containerPort, hos
|
|
|
111
119
|
process1.on('close', ()=>{
|
|
112
120
|
if (!isResolved) {
|
|
113
121
|
isResolved = true;
|
|
114
|
-
logger.warn(
|
|
122
|
+
logger.warn(`Port forward for ${resource} closed before connection established`);
|
|
115
123
|
resolve(0);
|
|
116
124
|
}
|
|
117
125
|
});
|
|
118
126
|
process1.on('error', (error)=>{
|
|
119
|
-
logger.error(`Port forward error: ${error}`);
|
|
127
|
+
logger.error(`Port forward for ${resource} error: ${error}`);
|
|
120
128
|
resolve(0);
|
|
121
129
|
});
|
|
122
130
|
process1.on('exit', (code)=>{
|
|
123
|
-
logger.
|
|
131
|
+
logger.verbose(`Port forward for ${resource} exited with code ${code}`);
|
|
124
132
|
resolve(0);
|
|
125
133
|
});
|
|
126
134
|
});
|
|
@@ -130,9 +138,35 @@ export async function startPortForward({ resource, namespace, containerPort, hos
|
|
|
130
138
|
port
|
|
131
139
|
};
|
|
132
140
|
}
|
|
133
|
-
export function
|
|
141
|
+
export function getExternalIP(namespace, serviceName) {
|
|
142
|
+
const { promise, resolve, reject } = promiseWithResolvers();
|
|
143
|
+
const process1 = spawn('kubectl', [
|
|
144
|
+
'get',
|
|
145
|
+
'service',
|
|
146
|
+
'-n',
|
|
147
|
+
namespace,
|
|
148
|
+
`${namespace}-${serviceName}`,
|
|
149
|
+
'--output',
|
|
150
|
+
"jsonpath='{.status.loadBalancer.ingress[0].ip}'"
|
|
151
|
+
], {
|
|
152
|
+
stdio: 'pipe'
|
|
153
|
+
});
|
|
154
|
+
let ip = '';
|
|
155
|
+
process1.stdout.on('data', (data)=>{
|
|
156
|
+
ip += data;
|
|
157
|
+
});
|
|
158
|
+
process1.on('error', (err)=>{
|
|
159
|
+
reject(err);
|
|
160
|
+
});
|
|
161
|
+
process1.on('exit', ()=>{
|
|
162
|
+
// kubectl prints JSON. Remove the quotes
|
|
163
|
+
resolve(ip.replace(/"|'/g, ''));
|
|
164
|
+
});
|
|
165
|
+
return promise;
|
|
166
|
+
}
|
|
167
|
+
export function startPortForwardForRPC(namespace, index = 0) {
|
|
134
168
|
return startPortForward({
|
|
135
|
-
resource: `
|
|
169
|
+
resource: `pod/${namespace}-rpc-aztec-node-${index}`,
|
|
136
170
|
namespace,
|
|
137
171
|
containerPort: 8080
|
|
138
172
|
});
|
|
@@ -151,6 +185,13 @@ export async function deleteResourceByName({ resource, namespace, name, force =
|
|
|
151
185
|
return stdout;
|
|
152
186
|
}
|
|
153
187
|
export async function deleteResourceByLabel({ resource, namespace, label, timeout = '5m', force = false }) {
|
|
188
|
+
// Check if the resource type exists before attempting to delete
|
|
189
|
+
try {
|
|
190
|
+
await execAsync(`kubectl api-resources --api-group="" --no-headers -o name | grep -q "^${resource}$" || kubectl api-resources --no-headers -o name | grep -q "^${resource}$"`);
|
|
191
|
+
} catch (error) {
|
|
192
|
+
logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
|
|
193
|
+
return '';
|
|
194
|
+
}
|
|
154
195
|
const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${force ? '--force' : ''}`;
|
|
155
196
|
logger.info(`command: ${command}`);
|
|
156
197
|
const { stdout } = await execAsync(command);
|
|
@@ -165,8 +206,12 @@ export async function waitForResourceByLabel({ resource, label, namespace, condi
|
|
|
165
206
|
export function getChartDir(spartanDir, chartName) {
|
|
166
207
|
return path.join(spartanDir.trim(), chartName);
|
|
167
208
|
}
|
|
209
|
+
function shellQuote(value) {
|
|
210
|
+
// Single-quote safe shell escaping: ' -> '\''
|
|
211
|
+
return `'${value.replace(/'/g, "'\\''")}'`;
|
|
212
|
+
}
|
|
168
213
|
function valuesToArgs(values) {
|
|
169
|
-
return Object.entries(values).map(([key, value])
|
|
214
|
+
return Object.entries(values).map(([key, value])=>typeof value === 'number' || typeof value === 'boolean' ? `--set ${key}=${value}` : `--set-string ${key}=${shellQuote(String(value))}`).join(' ');
|
|
170
215
|
}
|
|
171
216
|
function createHelmCommand({ instanceName, helmChartDir, namespace, valuesFile, timeout, values, reuseValues = false }) {
|
|
172
217
|
const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
|
|
@@ -179,6 +224,33 @@ async function execHelmCommand(args) {
|
|
|
179
224
|
const { stdout } = await execAsync(helmCommand);
|
|
180
225
|
return stdout;
|
|
181
226
|
}
|
|
227
|
+
export async function uninstallChaosMesh(instanceName, namespace, logger) {
|
|
228
|
+
// uninstall the helm chart if it exists
|
|
229
|
+
logger.info(`Uninstalling helm chart ${instanceName}`);
|
|
230
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
231
|
+
// and delete the chaos-mesh resources created by this release
|
|
232
|
+
const deleteByLabel = async (resource)=>{
|
|
233
|
+
const args = {
|
|
234
|
+
resource,
|
|
235
|
+
namespace: namespace,
|
|
236
|
+
label: `app.kubernetes.io/instance=${instanceName}`
|
|
237
|
+
};
|
|
238
|
+
logger.info(`Deleting ${resource} resources for release ${instanceName}`);
|
|
239
|
+
await deleteResourceByLabel(args).catch((e)=>{
|
|
240
|
+
logger.error(`Error deleting ${resource}: ${e}`);
|
|
241
|
+
logger.info(`Force deleting ${resource}`);
|
|
242
|
+
return deleteResourceByLabel({
|
|
243
|
+
...args,
|
|
244
|
+
force: true
|
|
245
|
+
});
|
|
246
|
+
});
|
|
247
|
+
};
|
|
248
|
+
await deleteByLabel('podchaos');
|
|
249
|
+
await deleteByLabel('networkchaos');
|
|
250
|
+
await deleteByLabel('podnetworkchaos');
|
|
251
|
+
await deleteByLabel('workflows');
|
|
252
|
+
await deleteByLabel('workflownodes');
|
|
253
|
+
}
|
|
182
254
|
/**
|
|
183
255
|
* Installs a Helm chart with the given parameters.
|
|
184
256
|
* @param instanceName - The name of the Helm chart instance.
|
|
@@ -195,31 +267,14 @@ async function execHelmCommand(args) {
|
|
|
195
267
|
* const stdout = await installChaosMeshChart({ instanceName: 'force-reorg', targetNamespace: 'smoke', valuesFile: 'prover-failure.yaml'});
|
|
196
268
|
* console.log(stdout);
|
|
197
269
|
* ```
|
|
198
|
-
*/ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir,
|
|
270
|
+
*/ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir, timeout = '10m', clean = true, values = {}, logger }) {
|
|
199
271
|
if (clean) {
|
|
200
|
-
|
|
201
|
-
logger.info(`Uninstalling helm chart ${instanceName}`);
|
|
202
|
-
await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
|
|
203
|
-
// and delete the podchaos resource
|
|
204
|
-
const deleteArgs = {
|
|
205
|
-
resource: 'podchaos',
|
|
206
|
-
namespace: chaosMeshNamespace,
|
|
207
|
-
label: `app.kubernetes.io/instance=${instanceName}`
|
|
208
|
-
};
|
|
209
|
-
logger.info(`Deleting podchaos resource`);
|
|
210
|
-
await deleteResourceByLabel(deleteArgs).catch((e)=>{
|
|
211
|
-
logger.error(`Error deleting podchaos resource: ${e}`);
|
|
212
|
-
logger.info(`Force deleting podchaos resource`);
|
|
213
|
-
return deleteResourceByLabel({
|
|
214
|
-
...deleteArgs,
|
|
215
|
-
force: true
|
|
216
|
-
});
|
|
217
|
-
});
|
|
272
|
+
await uninstallChaosMesh(instanceName, targetNamespace, logger);
|
|
218
273
|
}
|
|
219
274
|
return execHelmCommand({
|
|
220
275
|
instanceName,
|
|
221
276
|
helmChartDir,
|
|
222
|
-
namespace:
|
|
277
|
+
namespace: targetNamespace,
|
|
223
278
|
valuesFile,
|
|
224
279
|
timeout,
|
|
225
280
|
values: {
|
|
@@ -291,19 +346,19 @@ export function applyNetworkShaping({ valuesFile, namespace, spartanDir, logger
|
|
|
291
346
|
logger
|
|
292
347
|
});
|
|
293
348
|
}
|
|
294
|
-
export async function
|
|
295
|
-
logger.info(`Waiting for
|
|
349
|
+
export async function awaitCheckpointNumber(rollupCheatCodes, checkpointNumber, timeoutSeconds, logger) {
|
|
350
|
+
logger.info(`Waiting for checkpoint ${checkpointNumber}`);
|
|
296
351
|
let tips = await rollupCheatCodes.getTips();
|
|
297
352
|
const endTime = Date.now() + timeoutSeconds * 1000;
|
|
298
|
-
while(tips.pending <
|
|
299
|
-
logger.info(`At
|
|
353
|
+
while(tips.pending < checkpointNumber && Date.now() < endTime){
|
|
354
|
+
logger.info(`At checkpoint ${tips.pending}`);
|
|
300
355
|
await sleep(1000);
|
|
301
356
|
tips = await rollupCheatCodes.getTips();
|
|
302
357
|
}
|
|
303
|
-
if (tips.pending <
|
|
304
|
-
throw new Error(`Timeout waiting for
|
|
358
|
+
if (tips.pending < checkpointNumber) {
|
|
359
|
+
throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
|
|
305
360
|
} else {
|
|
306
|
-
logger.info(`Reached
|
|
361
|
+
logger.info(`Reached checkpoint ${tips.pending}`);
|
|
307
362
|
}
|
|
308
363
|
}
|
|
309
364
|
export async function restartBot(namespace, logger) {
|
|
@@ -311,16 +366,189 @@ export async function restartBot(namespace, logger) {
|
|
|
311
366
|
await deleteResourceByLabel({
|
|
312
367
|
resource: 'pods',
|
|
313
368
|
namespace,
|
|
314
|
-
label: 'app=bot'
|
|
369
|
+
label: 'app.kubernetes.io/name=bot'
|
|
315
370
|
});
|
|
316
371
|
await sleep(10 * 1000);
|
|
372
|
+
// Some bot images may take time to report Ready due to heavy boot-time proving.
|
|
373
|
+
// Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
|
|
317
374
|
await waitForResourceByLabel({
|
|
318
375
|
resource: 'pods',
|
|
319
376
|
namespace,
|
|
320
|
-
label: 'app=bot'
|
|
377
|
+
label: 'app.kubernetes.io/name=bot',
|
|
378
|
+
condition: 'PodReadyToStartContainers'
|
|
321
379
|
});
|
|
322
380
|
logger.info(`Bot restarted`);
|
|
323
381
|
}
|
|
382
|
+
/**
|
|
383
|
+
* Installs or upgrades the transfer bot Helm release for the given namespace.
|
|
384
|
+
* Intended for test setup to enable L2 traffic generation only when needed.
|
|
385
|
+
*/ export async function installTransferBot({ namespace, spartanDir, logger, replicas = 1, txIntervalSeconds = 10, followChain = 'PENDING', mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk', mnemonicStartIndex, botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01', nodeUrl, timeout = '15m', reuseValues = true, aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12) }) {
|
|
386
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
387
|
+
const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
|
|
388
|
+
const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
|
|
389
|
+
logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
|
|
390
|
+
const values = {
|
|
391
|
+
'bot.replicaCount': replicas,
|
|
392
|
+
'bot.txIntervalSeconds': txIntervalSeconds,
|
|
393
|
+
'bot.followChain': followChain,
|
|
394
|
+
'bot.botPrivateKey': botPrivateKey,
|
|
395
|
+
'bot.nodeUrl': resolvedNodeUrl,
|
|
396
|
+
'bot.mnemonic': mnemonic,
|
|
397
|
+
'bot.feePaymentMethod': 'fee_juice',
|
|
398
|
+
'aztec.slotDuration': aztecSlotDuration,
|
|
399
|
+
// Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
|
|
400
|
+
// Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
|
|
401
|
+
'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
|
|
402
|
+
// Provide L1 execution RPC for bridging fee juice
|
|
403
|
+
'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
|
|
404
|
+
// Provide L1 mnemonic for bridging (falls back to labs mnemonic)
|
|
405
|
+
'bot.node.env.BOT_L1_MNEMONIC': mnemonic
|
|
406
|
+
};
|
|
407
|
+
// Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
|
|
408
|
+
if (mnemonicStartIndex === undefined) {
|
|
409
|
+
values['bot.mnemonicStartIndex'] = 0;
|
|
410
|
+
}
|
|
411
|
+
// Also pass a funded private key directly if available
|
|
412
|
+
if (process.env.FUNDING_PRIVATE_KEY) {
|
|
413
|
+
values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
|
|
414
|
+
}
|
|
415
|
+
// Align bot image with the running network image: prefer env var, else detect from a validator pod
|
|
416
|
+
let repositoryFromEnv;
|
|
417
|
+
let tagFromEnv;
|
|
418
|
+
const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
|
|
419
|
+
if (aztecDockerImage && aztecDockerImage.includes(':')) {
|
|
420
|
+
const lastColon = aztecDockerImage.lastIndexOf(':');
|
|
421
|
+
repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
|
|
422
|
+
tagFromEnv = aztecDockerImage.slice(lastColon + 1);
|
|
423
|
+
}
|
|
424
|
+
let repository = repositoryFromEnv;
|
|
425
|
+
let tag = tagFromEnv;
|
|
426
|
+
if (!repository || !tag) {
|
|
427
|
+
try {
|
|
428
|
+
const { stdout } = await execAsync(`kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`);
|
|
429
|
+
const image = stdout.trim().replace(/^'|'$/g, '');
|
|
430
|
+
if (image && image.includes(':')) {
|
|
431
|
+
const lastColon = image.lastIndexOf(':');
|
|
432
|
+
repository = image.slice(0, lastColon);
|
|
433
|
+
tag = image.slice(lastColon + 1);
|
|
434
|
+
}
|
|
435
|
+
} catch (err) {
|
|
436
|
+
logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
if (repository && tag) {
|
|
440
|
+
values['global.aztecImage.repository'] = repository;
|
|
441
|
+
values['global.aztecImage.tag'] = tag;
|
|
442
|
+
}
|
|
443
|
+
if (mnemonicStartIndex !== undefined) {
|
|
444
|
+
values['bot.mnemonicStartIndex'] = typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
|
|
445
|
+
}
|
|
446
|
+
await execHelmCommand({
|
|
447
|
+
instanceName,
|
|
448
|
+
helmChartDir,
|
|
449
|
+
namespace,
|
|
450
|
+
valuesFile: undefined,
|
|
451
|
+
timeout,
|
|
452
|
+
values: values,
|
|
453
|
+
reuseValues
|
|
454
|
+
});
|
|
455
|
+
if (replicas > 0) {
|
|
456
|
+
await waitForResourceByLabel({
|
|
457
|
+
resource: 'pods',
|
|
458
|
+
namespace,
|
|
459
|
+
label: 'app.kubernetes.io/name=bot',
|
|
460
|
+
condition: 'PodReadyToStartContainers'
|
|
461
|
+
});
|
|
462
|
+
}
|
|
463
|
+
}
|
|
464
|
+
/**
|
|
465
|
+
* Uninstalls the transfer bot Helm release from the given namespace.
|
|
466
|
+
* Intended for test teardown to clean up bot resources.
|
|
467
|
+
*/ export async function uninstallTransferBot(namespace, logger) {
|
|
468
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
469
|
+
logger.info(`Uninstalling transfer bot release ${instanceName}`);
|
|
470
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
471
|
+
// Ensure any leftover pods are removed
|
|
472
|
+
await deleteResourceByLabel({
|
|
473
|
+
resource: 'pods',
|
|
474
|
+
namespace,
|
|
475
|
+
label: 'app.kubernetes.io/name=bot'
|
|
476
|
+
}).catch(()=>undefined);
|
|
477
|
+
}
|
|
478
|
+
/**
|
|
479
|
+
* Enables or disables probabilistic transaction dropping on validators and waits for rollout.
|
|
480
|
+
* Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
|
|
481
|
+
*/ export async function setValidatorTxDrop({ namespace, enabled, probability, logger }) {
|
|
482
|
+
const drop = enabled ? 'true' : 'false';
|
|
483
|
+
const prob = String(probability);
|
|
484
|
+
const selectors = [
|
|
485
|
+
'app=validator',
|
|
486
|
+
'app.kubernetes.io/component=validator'
|
|
487
|
+
];
|
|
488
|
+
let updated = false;
|
|
489
|
+
for (const selector of selectors){
|
|
490
|
+
try {
|
|
491
|
+
const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
492
|
+
const names = list.stdout.split('\n').map((s)=>s.trim()).filter(Boolean);
|
|
493
|
+
if (names.length === 0) {
|
|
494
|
+
continue;
|
|
495
|
+
}
|
|
496
|
+
const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
|
|
497
|
+
logger.info(`command: ${cmd}`);
|
|
498
|
+
await execAsync(cmd);
|
|
499
|
+
updated = true;
|
|
500
|
+
} catch (e) {
|
|
501
|
+
logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
if (!updated) {
|
|
505
|
+
logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
|
|
506
|
+
return;
|
|
507
|
+
}
|
|
508
|
+
// Restart validator pods to ensure env vars take effect and wait for readiness
|
|
509
|
+
await restartValidators(namespace, logger);
|
|
510
|
+
}
|
|
511
|
+
export async function restartValidators(namespace, logger) {
|
|
512
|
+
const selectors = [
|
|
513
|
+
'app=validator',
|
|
514
|
+
'app.kubernetes.io/component=validator'
|
|
515
|
+
];
|
|
516
|
+
let any = false;
|
|
517
|
+
for (const selector of selectors){
|
|
518
|
+
try {
|
|
519
|
+
const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
520
|
+
if (!stdout || stdout.trim().length === 0) {
|
|
521
|
+
continue;
|
|
522
|
+
}
|
|
523
|
+
any = true;
|
|
524
|
+
await deleteResourceByLabel({
|
|
525
|
+
resource: 'pods',
|
|
526
|
+
namespace,
|
|
527
|
+
label: selector
|
|
528
|
+
});
|
|
529
|
+
} catch (e) {
|
|
530
|
+
logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
|
|
531
|
+
}
|
|
532
|
+
}
|
|
533
|
+
if (!any) {
|
|
534
|
+
logger.warn(`No validator pods found to restart in ${namespace}.`);
|
|
535
|
+
return;
|
|
536
|
+
}
|
|
537
|
+
// Wait for either label to be Ready
|
|
538
|
+
for (const selector of selectors){
|
|
539
|
+
try {
|
|
540
|
+
await waitForResourceByLabel({
|
|
541
|
+
resource: 'pods',
|
|
542
|
+
namespace,
|
|
543
|
+
label: selector
|
|
544
|
+
});
|
|
545
|
+
return;
|
|
546
|
+
} catch {
|
|
547
|
+
// try next
|
|
548
|
+
}
|
|
549
|
+
}
|
|
550
|
+
logger.warn(`Validator pods did not report Ready; continuing.`);
|
|
551
|
+
}
|
|
324
552
|
export async function enableValidatorDynamicBootNode(instanceName, namespace, spartanDir, logger) {
|
|
325
553
|
logger.info(`Enabling validator dynamic boot node`);
|
|
326
554
|
await execHelmCommand({
|
|
@@ -336,40 +564,108 @@ export async function enableValidatorDynamicBootNode(instanceName, namespace, sp
|
|
|
336
564
|
});
|
|
337
565
|
logger.info(`Validator dynamic boot node enabled`);
|
|
338
566
|
}
|
|
339
|
-
export async function updateSequencerConfig(url, config) {
|
|
340
|
-
const node = createAztecNodeAdminClient(url);
|
|
341
|
-
// Retry incase the port forward is not ready yet
|
|
342
|
-
await retry(()=>node.setConfig(config), 'Update sequencer config', makeBackoff([
|
|
343
|
-
1,
|
|
344
|
-
3,
|
|
345
|
-
6
|
|
346
|
-
]), logger);
|
|
347
|
-
}
|
|
348
567
|
export async function getSequencers(namespace) {
|
|
349
|
-
const command = `kubectl get pods -l app=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
568
|
+
const command = `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
350
569
|
const { stdout } = await execAsync(command);
|
|
351
|
-
|
|
570
|
+
const sequencers = stdout.split(' ');
|
|
571
|
+
logger.verbose(`Found sequencer pods ${sequencers.join(', ')}`);
|
|
572
|
+
return sequencers;
|
|
352
573
|
}
|
|
353
|
-
|
|
354
|
-
|
|
574
|
+
export function updateSequencersConfig(env, config) {
|
|
575
|
+
return withSequencersAdmin(env, async (client)=>{
|
|
576
|
+
await client.setConfig(config);
|
|
577
|
+
return client.getConfig();
|
|
578
|
+
});
|
|
579
|
+
}
|
|
580
|
+
export function getSequencersConfig(env) {
|
|
581
|
+
return withSequencersAdmin(env, (client)=>client.getConfig());
|
|
582
|
+
}
|
|
583
|
+
export async function withSequencersAdmin(env, fn) {
|
|
584
|
+
const adminContainerPort = 8880;
|
|
585
|
+
const namespace = env.NAMESPACE;
|
|
355
586
|
const sequencers = await getSequencers(namespace);
|
|
587
|
+
const results = [];
|
|
356
588
|
for (const sequencer of sequencers){
|
|
357
589
|
const { process: process1, port } = await startPortForward({
|
|
358
590
|
resource: `pod/${sequencer}`,
|
|
359
591
|
namespace,
|
|
360
|
-
containerPort
|
|
592
|
+
containerPort: adminContainerPort
|
|
361
593
|
});
|
|
362
594
|
const url = `http://localhost:${port}`;
|
|
363
|
-
await
|
|
595
|
+
await retry(()=>fetch(`${url}/status`).then((res)=>res.status === 200), 'forward node admin port', makeBackoff([
|
|
596
|
+
1,
|
|
597
|
+
1,
|
|
598
|
+
2,
|
|
599
|
+
6
|
|
600
|
+
]), logger, true);
|
|
601
|
+
const client = createAztecNodeAdminClient(url);
|
|
602
|
+
results.push(await fn(client));
|
|
364
603
|
process1.kill();
|
|
365
604
|
}
|
|
605
|
+
return results;
|
|
366
606
|
}
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
607
|
+
/**
|
|
608
|
+
* Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
|
|
609
|
+
* it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
|
|
610
|
+
*/ export async function getPublicViemClient(env, /** If set, will push the new process into it */ processes) {
|
|
611
|
+
const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
|
|
612
|
+
if (CREATE_ETH_DEVNET) {
|
|
613
|
+
logger.info(`Creating port forward to eth execution node`);
|
|
614
|
+
const { process: process1, port } = await startPortForward({
|
|
615
|
+
resource: `svc/${NAMESPACE}-eth-execution`,
|
|
616
|
+
namespace: NAMESPACE,
|
|
617
|
+
containerPort: 8545
|
|
618
|
+
});
|
|
619
|
+
const url = `http://127.0.0.1:${port}`;
|
|
620
|
+
const client = createPublicClient({
|
|
621
|
+
transport: fallback([
|
|
622
|
+
http(url)
|
|
623
|
+
])
|
|
624
|
+
});
|
|
625
|
+
if (processes) {
|
|
626
|
+
processes.push(process1);
|
|
627
|
+
}
|
|
628
|
+
return {
|
|
629
|
+
url,
|
|
630
|
+
client,
|
|
631
|
+
process: process1
|
|
632
|
+
};
|
|
633
|
+
} else {
|
|
634
|
+
logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
|
|
635
|
+
if (!L1_RPC_URLS_JSON) {
|
|
636
|
+
throw new Error(`L1_RPC_URLS_JSON is not defined`);
|
|
637
|
+
}
|
|
638
|
+
const client = createPublicClient({
|
|
639
|
+
transport: fallback([
|
|
640
|
+
http(L1_RPC_URLS_JSON)
|
|
641
|
+
])
|
|
642
|
+
});
|
|
643
|
+
return {
|
|
644
|
+
url: L1_RPC_URLS_JSON,
|
|
645
|
+
client
|
|
646
|
+
};
|
|
647
|
+
}
|
|
648
|
+
}
|
|
649
|
+
/** Queries an Aztec node for the L1 deployment addresses */ export async function getL1DeploymentAddresses(env) {
|
|
650
|
+
let forwardProcess;
|
|
651
|
+
try {
|
|
652
|
+
const [sequencer] = await getSequencers(env.NAMESPACE);
|
|
653
|
+
const { process: process1, port } = await startPortForward({
|
|
654
|
+
resource: `pod/${sequencer}`,
|
|
655
|
+
namespace: env.NAMESPACE,
|
|
656
|
+
containerPort: 8080
|
|
657
|
+
});
|
|
658
|
+
forwardProcess = process1;
|
|
659
|
+
const url = `http://127.0.0.1:${port}`;
|
|
660
|
+
const node = createAztecNodeClient(url);
|
|
661
|
+
return await retry(()=>node.getNodeInfo().then((i)=>i.l1ContractAddresses), 'get node info', makeBackoff([
|
|
662
|
+
1,
|
|
663
|
+
3,
|
|
664
|
+
6
|
|
665
|
+
]), logger);
|
|
666
|
+
} finally{
|
|
667
|
+
forwardProcess?.kill();
|
|
668
|
+
}
|
|
373
669
|
}
|
|
374
670
|
/**
|
|
375
671
|
* Rolls the Aztec pods in the given namespace.
|
|
@@ -457,3 +753,30 @@ export async function updateSequencersConfig(env, config) {
|
|
|
457
753
|
throw new Error(`Failed to determine git project root: ${error}`);
|
|
458
754
|
}
|
|
459
755
|
}
|
|
756
|
+
/** Returns a client to the RPC of the given sequencer (defaults to first) */ export async function getNodeClient(env, index = 0) {
|
|
757
|
+
const namespace = env.NAMESPACE;
|
|
758
|
+
const containerPort = 8080;
|
|
759
|
+
const sequencers = await getSequencers(namespace);
|
|
760
|
+
const sequencer = sequencers[index];
|
|
761
|
+
if (!sequencer) {
|
|
762
|
+
throw new Error(`No sequencer found at index ${index} in namespace ${namespace}`);
|
|
763
|
+
}
|
|
764
|
+
const { process: process1, port } = await startPortForward({
|
|
765
|
+
resource: `pod/${sequencer}`,
|
|
766
|
+
namespace,
|
|
767
|
+
containerPort
|
|
768
|
+
});
|
|
769
|
+
const url = `http://localhost:${port}`;
|
|
770
|
+
await retry(()=>fetch(`${url}/status`).then((res)=>res.status === 200), 'forward port', makeBackoff([
|
|
771
|
+
1,
|
|
772
|
+
1,
|
|
773
|
+
2,
|
|
774
|
+
6
|
|
775
|
+
]), logger, true);
|
|
776
|
+
const client = createAztecNodeClient(url);
|
|
777
|
+
return {
|
|
778
|
+
node: client,
|
|
779
|
+
port,
|
|
780
|
+
process: process1
|
|
781
|
+
};
|
|
782
|
+
}
|