@aztec/end-to-end 4.0.0-nightly.20250907 → 4.0.0-nightly.20260107
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/bench/client_flows/benchmark.d.ts +4 -3
- package/dest/bench/client_flows/benchmark.d.ts.map +1 -1
- package/dest/bench/client_flows/benchmark.js +2 -2
- package/dest/bench/client_flows/client_flows_benchmark.d.ts +26 -15
- package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -1
- package/dest/bench/client_flows/client_flows_benchmark.js +111 -90
- package/dest/bench/client_flows/config.d.ts +1 -1
- package/dest/bench/client_flows/data_extractor.d.ts +1 -1
- package/dest/bench/client_flows/data_extractor.js +10 -30
- package/dest/bench/utils.d.ts +3 -12
- package/dest/bench/utils.d.ts.map +1 -1
- package/dest/bench/utils.js +17 -37
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +8 -8
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +42 -42
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +13 -10
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +35 -35
- package/dest/e2e_deploy_contract/deploy_test.d.ts +12 -6
- package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
- package/dest/e2e_deploy_contract/deploy_test.js +9 -18
- package/dest/e2e_epochs/epochs_test.d.ts +20 -12
- package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
- package/dest/e2e_epochs/epochs_test.js +36 -27
- package/dest/e2e_fees/bridging_race.notest.d.ts +1 -1
- package/dest/e2e_fees/bridging_race.notest.js +14 -11
- package/dest/e2e_fees/fees_test.d.ts +13 -9
- package/dest/e2e_fees/fees_test.d.ts.map +1 -1
- package/dest/e2e_fees/fees_test.js +38 -39
- package/dest/e2e_l1_publisher/write_json.d.ts +4 -2
- package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -1
- package/dest/e2e_l1_publisher/write_json.js +9 -8
- package/dest/e2e_multi_validator/utils.d.ts +2 -2
- package/dest/e2e_multi_validator/utils.d.ts.map +1 -1
- package/dest/e2e_multi_validator/utils.js +4 -10
- package/dest/e2e_nested_contract/nested_contract_test.d.ts +7 -4
- package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
- package/dest/e2e_nested_contract/nested_contract_test.js +11 -12
- package/dest/e2e_p2p/inactivity_slash_test.d.ts +31 -0
- package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -0
- package/dest/e2e_p2p/inactivity_slash_test.js +136 -0
- package/dest/e2e_p2p/p2p_network.d.ts +238 -18
- package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
- package/dest/e2e_p2p/p2p_network.js +50 -25
- package/dest/e2e_p2p/shared.d.ts +16 -17
- package/dest/e2e_p2p/shared.d.ts.map +1 -1
- package/dest/e2e_p2p/shared.js +57 -56
- package/dest/e2e_token_contract/token_contract_test.d.ts +6 -5
- package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
- package/dest/e2e_token_contract/token_contract_test.js +14 -17
- package/dest/fixtures/e2e_prover_test.d.ts +13 -11
- package/dest/fixtures/e2e_prover_test.d.ts.map +1 -1
- package/dest/fixtures/e2e_prover_test.js +57 -66
- package/dest/fixtures/fixtures.d.ts +2 -3
- package/dest/fixtures/fixtures.d.ts.map +1 -1
- package/dest/fixtures/fixtures.js +2 -3
- package/dest/fixtures/get_acvm_config.d.ts +2 -2
- package/dest/fixtures/get_acvm_config.d.ts.map +1 -1
- package/dest/fixtures/get_acvm_config.js +1 -1
- package/dest/fixtures/get_bb_config.d.ts +2 -2
- package/dest/fixtures/get_bb_config.d.ts.map +1 -1
- package/dest/fixtures/get_bb_config.js +2 -2
- package/dest/fixtures/index.d.ts +1 -1
- package/dest/fixtures/l1_to_l2_messaging.d.ts +4 -3
- package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
- package/dest/fixtures/l1_to_l2_messaging.js +2 -2
- package/dest/fixtures/logging.d.ts +1 -1
- package/dest/fixtures/setup_p2p_test.d.ts +12 -11
- package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
- package/dest/fixtures/setup_p2p_test.js +50 -24
- package/dest/fixtures/snapshot_manager.d.ts +16 -15
- package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
- package/dest/fixtures/snapshot_manager.js +84 -88
- package/dest/fixtures/token_utils.d.ts +10 -5
- package/dest/fixtures/token_utils.d.ts.map +1 -1
- package/dest/fixtures/token_utils.js +17 -18
- package/dest/fixtures/utils.d.ts +44 -47
- package/dest/fixtures/utils.d.ts.map +1 -1
- package/dest/fixtures/utils.js +128 -185
- package/dest/fixtures/web3signer.d.ts +5 -0
- package/dest/fixtures/web3signer.d.ts.map +1 -0
- package/dest/fixtures/web3signer.js +53 -0
- package/dest/fixtures/with_telemetry_utils.d.ts +2 -2
- package/dest/fixtures/with_telemetry_utils.d.ts.map +1 -1
- package/dest/fixtures/with_telemetry_utils.js +2 -2
- package/dest/index.d.ts +1 -1
- package/dest/quality_of_service/alert_checker.d.ts +2 -2
- package/dest/quality_of_service/alert_checker.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.d.ts +20 -23
- package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.js +14 -16
- package/dest/shared/gas_portal_test_harness.d.ts +10 -17
- package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
- package/dest/shared/gas_portal_test_harness.js +11 -8
- package/dest/shared/index.d.ts +1 -1
- package/dest/shared/jest_setup.d.ts +1 -1
- package/dest/shared/jest_setup.js +1 -1
- package/dest/shared/submit-transactions.d.ts +6 -4
- package/dest/shared/submit-transactions.d.ts.map +1 -1
- package/dest/shared/submit-transactions.js +8 -7
- package/dest/shared/uniswap_l1_l2.d.ts +13 -9
- package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
- package/dest/shared/uniswap_l1_l2.js +44 -58
- package/dest/simulators/index.d.ts +1 -1
- package/dest/simulators/lending_simulator.d.ts +4 -7
- package/dest/simulators/lending_simulator.d.ts.map +1 -1
- package/dest/simulators/lending_simulator.js +8 -5
- package/dest/simulators/token_simulator.d.ts +4 -2
- package/dest/simulators/token_simulator.d.ts.map +1 -1
- package/dest/simulators/token_simulator.js +2 -2
- package/dest/spartan/setup_test_wallets.d.ts +22 -14
- package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
- package/dest/spartan/setup_test_wallets.js +144 -86
- package/dest/spartan/tx_metrics.d.ts +39 -0
- package/dest/spartan/tx_metrics.d.ts.map +1 -0
- package/dest/spartan/tx_metrics.js +95 -0
- package/dest/spartan/utils.d.ts +101 -16
- package/dest/spartan/utils.d.ts.map +1 -1
- package/dest/spartan/utils.js +414 -52
- package/package.json +43 -40
- package/src/bench/client_flows/benchmark.ts +8 -8
- package/src/bench/client_flows/client_flows_benchmark.ts +143 -115
- package/src/bench/client_flows/data_extractor.ts +9 -31
- package/src/bench/utils.ts +15 -39
- package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +46 -63
- package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +46 -55
- package/src/e2e_deploy_contract/deploy_test.ts +18 -36
- package/src/e2e_epochs/epochs_test.ts +59 -42
- package/src/e2e_fees/bridging_race.notest.ts +16 -11
- package/src/e2e_fees/fees_test.ts +47 -51
- package/src/e2e_l1_publisher/write_json.ts +12 -9
- package/src/e2e_multi_validator/utils.ts +5 -11
- package/src/e2e_nested_contract/nested_contract_test.ts +15 -13
- package/src/e2e_p2p/inactivity_slash_test.ts +179 -0
- package/src/e2e_p2p/p2p_network.ts +125 -89
- package/src/e2e_p2p/shared.ts +69 -60
- package/src/e2e_token_contract/token_contract_test.ts +17 -17
- package/src/fixtures/e2e_prover_test.ts +65 -105
- package/src/fixtures/fixtures.ts +2 -5
- package/src/fixtures/get_acvm_config.ts +2 -2
- package/src/fixtures/get_bb_config.ts +3 -2
- package/src/fixtures/l1_to_l2_messaging.ts +4 -2
- package/src/fixtures/setup_p2p_test.ts +79 -32
- package/src/fixtures/snapshot_manager.ts +120 -131
- package/src/fixtures/token_utils.ts +16 -24
- package/src/fixtures/utils.ts +175 -269
- package/src/fixtures/web3signer.ts +63 -0
- package/src/fixtures/with_telemetry_utils.ts +2 -2
- package/src/guides/up_quick_start.sh +3 -11
- package/src/quality_of_service/alert_checker.ts +1 -1
- package/src/shared/cross_chain_test_harness.ts +23 -31
- package/src/shared/gas_portal_test_harness.ts +14 -21
- package/src/shared/jest_setup.ts +1 -1
- package/src/shared/submit-transactions.ts +12 -8
- package/src/shared/uniswap_l1_l2.ts +80 -88
- package/src/simulators/lending_simulator.ts +9 -6
- package/src/simulators/token_simulator.ts +5 -2
- package/src/spartan/DEVELOP.md +15 -3
- package/src/spartan/setup_test_wallets.ts +171 -127
- package/src/spartan/tx_metrics.ts +130 -0
- package/src/spartan/utils.ts +543 -45
- package/dest/fixtures/setup_l1_contracts.d.ts +0 -6
- package/dest/fixtures/setup_l1_contracts.d.ts.map +0 -1
- package/dest/fixtures/setup_l1_contracts.js +0 -17
- package/src/fixtures/setup_l1_contracts.ts +0 -26
package/dest/spartan/utils.js
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
|
-
import { createLogger
|
|
1
|
+
import { createLogger } from '@aztec/aztec.js/log';
|
|
2
|
+
import { promiseWithResolvers } from '@aztec/foundation/promise';
|
|
2
3
|
import { makeBackoff, retry } from '@aztec/foundation/retry';
|
|
3
4
|
import { schemas } from '@aztec/foundation/schemas';
|
|
5
|
+
import { sleep } from '@aztec/foundation/sleep';
|
|
4
6
|
import { createAztecNodeAdminClient, createAztecNodeClient } from '@aztec/stdlib/interfaces/client';
|
|
5
7
|
import { exec, execSync, spawn } from 'child_process';
|
|
6
8
|
import path from 'path';
|
|
@@ -13,7 +15,11 @@ const testConfigSchema = z.object({
|
|
|
13
15
|
NAMESPACE: z.string().default('scenario'),
|
|
14
16
|
REAL_VERIFIER: schemas.Boolean.optional().default(true),
|
|
15
17
|
CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
|
|
16
|
-
L1_RPC_URLS_JSON: z.string().optional()
|
|
18
|
+
L1_RPC_URLS_JSON: z.string().optional(),
|
|
19
|
+
L1_ACCOUNT_MNEMONIC: z.string().optional(),
|
|
20
|
+
AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
|
|
21
|
+
AZTEC_EPOCH_DURATION: z.coerce.number().optional().default(32),
|
|
22
|
+
AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5)
|
|
17
23
|
});
|
|
18
24
|
export function setupEnvironment(env) {
|
|
19
25
|
const config = testConfigSchema.parse(env);
|
|
@@ -133,9 +139,35 @@ export async function startPortForward({ resource, namespace, containerPort, hos
|
|
|
133
139
|
port
|
|
134
140
|
};
|
|
135
141
|
}
|
|
136
|
-
export function
|
|
142
|
+
export function getExternalIP(namespace, serviceName) {
|
|
143
|
+
const { promise, resolve, reject } = promiseWithResolvers();
|
|
144
|
+
const process1 = spawn('kubectl', [
|
|
145
|
+
'get',
|
|
146
|
+
'service',
|
|
147
|
+
'-n',
|
|
148
|
+
namespace,
|
|
149
|
+
`${namespace}-${serviceName}`,
|
|
150
|
+
'--output',
|
|
151
|
+
"jsonpath='{.status.loadBalancer.ingress[0].ip}'"
|
|
152
|
+
], {
|
|
153
|
+
stdio: 'pipe'
|
|
154
|
+
});
|
|
155
|
+
let ip = '';
|
|
156
|
+
process1.stdout.on('data', (data)=>{
|
|
157
|
+
ip += data;
|
|
158
|
+
});
|
|
159
|
+
process1.on('error', (err)=>{
|
|
160
|
+
reject(err);
|
|
161
|
+
});
|
|
162
|
+
process1.on('exit', ()=>{
|
|
163
|
+
// kubectl prints JSON. Remove the quotes
|
|
164
|
+
resolve(ip.replace(/"|'/g, ''));
|
|
165
|
+
});
|
|
166
|
+
return promise;
|
|
167
|
+
}
|
|
168
|
+
export function startPortForwardForRPC(namespace, index = 0) {
|
|
137
169
|
return startPortForward({
|
|
138
|
-
resource: `
|
|
170
|
+
resource: `pod/${namespace}-rpc-aztec-node-${index}`,
|
|
139
171
|
namespace,
|
|
140
172
|
containerPort: 8080
|
|
141
173
|
});
|
|
@@ -154,6 +186,15 @@ export async function deleteResourceByName({ resource, namespace, name, force =
|
|
|
154
186
|
return stdout;
|
|
155
187
|
}
|
|
156
188
|
export async function deleteResourceByLabel({ resource, namespace, label, timeout = '5m', force = false }) {
|
|
189
|
+
try {
|
|
190
|
+
// Match both plain and group-qualified names (e.g., "podchaos" or "podchaos.chaos-mesh.org")
|
|
191
|
+
const escaped = resource.replace(/[-/\\^$*+?.()|[\]{}]/g, '\\$&');
|
|
192
|
+
const regex = `(^|\\.)${escaped}(\\.|$)`;
|
|
193
|
+
await execAsync(`kubectl api-resources --no-headers -o name | grep -Eq '${regex}'`);
|
|
194
|
+
} catch (error) {
|
|
195
|
+
logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
|
|
196
|
+
return '';
|
|
197
|
+
}
|
|
157
198
|
const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${force ? '--force' : ''}`;
|
|
158
199
|
logger.info(`command: ${command}`);
|
|
159
200
|
const { stdout } = await execAsync(command);
|
|
@@ -165,11 +206,40 @@ export async function waitForResourceByLabel({ resource, label, namespace, condi
|
|
|
165
206
|
const { stdout } = await execAsync(command);
|
|
166
207
|
return stdout;
|
|
167
208
|
}
|
|
209
|
+
export async function waitForResourceByName({ resource, name, namespace, condition = 'Ready', timeout = '10m' }) {
|
|
210
|
+
const command = `kubectl wait ${resource}/${name} --for=condition=${condition} -n ${namespace} --timeout=${timeout}`;
|
|
211
|
+
logger.info(`command: ${command}`);
|
|
212
|
+
const { stdout } = await execAsync(command);
|
|
213
|
+
return stdout;
|
|
214
|
+
}
|
|
215
|
+
export async function waitForResourcesByName({ resource, names, namespace, condition = 'Ready', timeout = '10m' }) {
|
|
216
|
+
if (!names.length) {
|
|
217
|
+
throw new Error(`No ${resource} names provided to waitForResourcesByName`);
|
|
218
|
+
}
|
|
219
|
+
// Wait all in parallel; if any fails, surface which one.
|
|
220
|
+
await Promise.all(names.map(async (name)=>{
|
|
221
|
+
try {
|
|
222
|
+
await waitForResourceByName({
|
|
223
|
+
resource,
|
|
224
|
+
name,
|
|
225
|
+
namespace,
|
|
226
|
+
condition,
|
|
227
|
+
timeout
|
|
228
|
+
});
|
|
229
|
+
} catch (err) {
|
|
230
|
+
throw new Error(`Failed waiting for ${resource}/${name} condition=${condition} timeout=${timeout} namespace=${namespace}: ${String(err)}`);
|
|
231
|
+
}
|
|
232
|
+
}));
|
|
233
|
+
}
|
|
168
234
|
export function getChartDir(spartanDir, chartName) {
|
|
169
235
|
return path.join(spartanDir.trim(), chartName);
|
|
170
236
|
}
|
|
237
|
+
function shellQuote(value) {
|
|
238
|
+
// Single-quote safe shell escaping: ' -> '\''
|
|
239
|
+
return `'${value.replace(/'/g, "'\\''")}'`;
|
|
240
|
+
}
|
|
171
241
|
function valuesToArgs(values) {
|
|
172
|
-
return Object.entries(values).map(([key, value])
|
|
242
|
+
return Object.entries(values).map(([key, value])=>typeof value === 'number' || typeof value === 'boolean' ? `--set ${key}=${value}` : `--set-string ${key}=${shellQuote(String(value))}`).join(' ');
|
|
173
243
|
}
|
|
174
244
|
function createHelmCommand({ instanceName, helmChartDir, namespace, valuesFile, timeout, values, reuseValues = false }) {
|
|
175
245
|
const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
|
|
@@ -182,6 +252,57 @@ async function execHelmCommand(args) {
|
|
|
182
252
|
const { stdout } = await execAsync(helmCommand);
|
|
183
253
|
return stdout;
|
|
184
254
|
}
|
|
255
|
+
async function getHelmReleaseStatus(instanceName, namespace) {
|
|
256
|
+
try {
|
|
257
|
+
const { stdout } = await execAsync(`helm list --namespace ${namespace} --all --filter '^${instanceName}$' --output json | cat`);
|
|
258
|
+
const parsed = JSON.parse(stdout);
|
|
259
|
+
const row = parsed.find((r)=>r.name === instanceName);
|
|
260
|
+
return row?.status;
|
|
261
|
+
} catch {
|
|
262
|
+
return undefined;
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
async function forceDeleteHelmReleaseRecord(instanceName, namespace, logger) {
|
|
266
|
+
const labelSelector = `owner=helm,name=${instanceName}`;
|
|
267
|
+
const cmd = `kubectl delete secret -n ${namespace} -l ${labelSelector} --ignore-not-found=true`;
|
|
268
|
+
logger.warn(`Force deleting Helm release record: ${cmd}`);
|
|
269
|
+
await execAsync(cmd).catch(()=>undefined);
|
|
270
|
+
}
|
|
271
|
+
async function hasDeployedHelmRelease(instanceName, namespace) {
|
|
272
|
+
try {
|
|
273
|
+
const status = await getHelmReleaseStatus(instanceName, namespace);
|
|
274
|
+
return status?.toLowerCase() === 'deployed';
|
|
275
|
+
} catch {
|
|
276
|
+
return false;
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
export async function uninstallChaosMesh(instanceName, namespace, logger) {
|
|
280
|
+
// uninstall the helm chart if it exists
|
|
281
|
+
logger.info(`Uninstalling helm chart ${instanceName}`);
|
|
282
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
283
|
+
// and delete the chaos-mesh resources created by this release
|
|
284
|
+
const deleteByLabel = async (resource)=>{
|
|
285
|
+
const args = {
|
|
286
|
+
resource,
|
|
287
|
+
namespace: namespace,
|
|
288
|
+
label: `app.kubernetes.io/instance=${instanceName}`
|
|
289
|
+
};
|
|
290
|
+
logger.info(`Deleting ${resource} resources for release ${instanceName}`);
|
|
291
|
+
await deleteResourceByLabel(args).catch((e)=>{
|
|
292
|
+
logger.error(`Error deleting ${resource}: ${e}`);
|
|
293
|
+
logger.info(`Force deleting ${resource}`);
|
|
294
|
+
return deleteResourceByLabel({
|
|
295
|
+
...args,
|
|
296
|
+
force: true
|
|
297
|
+
});
|
|
298
|
+
});
|
|
299
|
+
};
|
|
300
|
+
await deleteByLabel('podchaos');
|
|
301
|
+
await deleteByLabel('networkchaos');
|
|
302
|
+
await deleteByLabel('podnetworkchaos');
|
|
303
|
+
await deleteByLabel('workflows');
|
|
304
|
+
await deleteByLabel('workflownodes');
|
|
305
|
+
}
|
|
185
306
|
/**
|
|
186
307
|
* Installs a Helm chart with the given parameters.
|
|
187
308
|
* @param instanceName - The name of the Helm chart instance.
|
|
@@ -198,31 +319,14 @@ async function execHelmCommand(args) {
|
|
|
198
319
|
* const stdout = await installChaosMeshChart({ instanceName: 'force-reorg', targetNamespace: 'smoke', valuesFile: 'prover-failure.yaml'});
|
|
199
320
|
* console.log(stdout);
|
|
200
321
|
* ```
|
|
201
|
-
*/ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir,
|
|
322
|
+
*/ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir, timeout = '10m', clean = true, values = {}, logger }) {
|
|
202
323
|
if (clean) {
|
|
203
|
-
|
|
204
|
-
logger.info(`Uninstalling helm chart ${instanceName}`);
|
|
205
|
-
await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
|
|
206
|
-
// and delete the podchaos resource
|
|
207
|
-
const deleteArgs = {
|
|
208
|
-
resource: 'podchaos',
|
|
209
|
-
namespace: chaosMeshNamespace,
|
|
210
|
-
label: `app.kubernetes.io/instance=${instanceName}`
|
|
211
|
-
};
|
|
212
|
-
logger.info(`Deleting podchaos resource`);
|
|
213
|
-
await deleteResourceByLabel(deleteArgs).catch((e)=>{
|
|
214
|
-
logger.error(`Error deleting podchaos resource: ${e}`);
|
|
215
|
-
logger.info(`Force deleting podchaos resource`);
|
|
216
|
-
return deleteResourceByLabel({
|
|
217
|
-
...deleteArgs,
|
|
218
|
-
force: true
|
|
219
|
-
});
|
|
220
|
-
});
|
|
324
|
+
await uninstallChaosMesh(instanceName, targetNamespace, logger);
|
|
221
325
|
}
|
|
222
326
|
return execHelmCommand({
|
|
223
327
|
instanceName,
|
|
224
328
|
helmChartDir,
|
|
225
|
-
namespace:
|
|
329
|
+
namespace: targetNamespace,
|
|
226
330
|
valuesFile,
|
|
227
331
|
timeout,
|
|
228
332
|
values: {
|
|
@@ -243,70 +347,85 @@ export function applyProverFailure({ namespace, spartanDir, durationSeconds, log
|
|
|
243
347
|
logger
|
|
244
348
|
});
|
|
245
349
|
}
|
|
246
|
-
export function
|
|
350
|
+
export function applyValidatorFailure({ namespace, spartanDir, logger, values, instanceName }) {
|
|
351
|
+
return installChaosMeshChart({
|
|
352
|
+
instanceName: instanceName ?? 'validator-failure',
|
|
353
|
+
targetNamespace: namespace,
|
|
354
|
+
valuesFile: 'validator-failure.yaml',
|
|
355
|
+
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
356
|
+
values,
|
|
357
|
+
logger
|
|
358
|
+
});
|
|
359
|
+
}
|
|
360
|
+
export function applyProverKill({ namespace, spartanDir, logger, values }) {
|
|
247
361
|
return installChaosMeshChart({
|
|
248
362
|
instanceName: 'prover-kill',
|
|
249
363
|
targetNamespace: namespace,
|
|
250
364
|
valuesFile: 'prover-kill.yaml',
|
|
251
365
|
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
366
|
+
chaosMeshNamespace: namespace,
|
|
252
367
|
clean: true,
|
|
253
|
-
logger
|
|
368
|
+
logger,
|
|
369
|
+
values
|
|
254
370
|
});
|
|
255
371
|
}
|
|
256
|
-
export function applyProverBrokerKill({ namespace, spartanDir, logger }) {
|
|
372
|
+
export function applyProverBrokerKill({ namespace, spartanDir, logger, values }) {
|
|
257
373
|
return installChaosMeshChart({
|
|
258
374
|
instanceName: 'prover-broker-kill',
|
|
259
375
|
targetNamespace: namespace,
|
|
260
376
|
valuesFile: 'prover-broker-kill.yaml',
|
|
261
377
|
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
262
378
|
clean: true,
|
|
263
|
-
logger
|
|
379
|
+
logger,
|
|
380
|
+
values
|
|
264
381
|
});
|
|
265
382
|
}
|
|
266
|
-
export function applyBootNodeFailure({ namespace, spartanDir, durationSeconds, logger }) {
|
|
383
|
+
export function applyBootNodeFailure({ instanceName = 'boot-node-failure', namespace, spartanDir, durationSeconds, logger, values }) {
|
|
267
384
|
return installChaosMeshChart({
|
|
268
|
-
instanceName
|
|
385
|
+
instanceName,
|
|
269
386
|
targetNamespace: namespace,
|
|
270
387
|
valuesFile: 'boot-node-failure.yaml',
|
|
271
388
|
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
272
389
|
values: {
|
|
273
|
-
'bootNodeFailure.duration': `${durationSeconds}s
|
|
390
|
+
'bootNodeFailure.duration': `${durationSeconds}s`,
|
|
391
|
+
...values ?? {}
|
|
274
392
|
},
|
|
275
393
|
logger
|
|
276
394
|
});
|
|
277
395
|
}
|
|
278
|
-
export function applyValidatorKill({ namespace, spartanDir, logger, values }) {
|
|
396
|
+
export function applyValidatorKill({ instanceName = 'validator-kill', namespace, spartanDir, logger, values, clean = true }) {
|
|
279
397
|
return installChaosMeshChart({
|
|
280
|
-
instanceName: 'validator-kill',
|
|
398
|
+
instanceName: instanceName ?? 'validator-kill',
|
|
281
399
|
targetNamespace: namespace,
|
|
282
400
|
valuesFile: 'validator-kill.yaml',
|
|
283
401
|
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
402
|
+
clean,
|
|
284
403
|
logger,
|
|
285
404
|
values
|
|
286
405
|
});
|
|
287
406
|
}
|
|
288
|
-
export function applyNetworkShaping({ valuesFile, namespace, spartanDir, logger }) {
|
|
407
|
+
export function applyNetworkShaping({ instanceName = 'network-shaping', valuesFile, namespace, spartanDir, logger }) {
|
|
289
408
|
return installChaosMeshChart({
|
|
290
|
-
instanceName
|
|
409
|
+
instanceName,
|
|
291
410
|
targetNamespace: namespace,
|
|
292
411
|
valuesFile,
|
|
293
412
|
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
294
413
|
logger
|
|
295
414
|
});
|
|
296
415
|
}
|
|
297
|
-
export async function
|
|
298
|
-
logger.info(`Waiting for
|
|
416
|
+
export async function awaitCheckpointNumber(rollupCheatCodes, checkpointNumber, timeoutSeconds, logger) {
|
|
417
|
+
logger.info(`Waiting for checkpoint ${checkpointNumber}`);
|
|
299
418
|
let tips = await rollupCheatCodes.getTips();
|
|
300
419
|
const endTime = Date.now() + timeoutSeconds * 1000;
|
|
301
|
-
while(tips.pending <
|
|
302
|
-
logger.info(`At
|
|
420
|
+
while(tips.pending < checkpointNumber && Date.now() < endTime){
|
|
421
|
+
logger.info(`At checkpoint ${tips.pending}`);
|
|
303
422
|
await sleep(1000);
|
|
304
423
|
tips = await rollupCheatCodes.getTips();
|
|
305
424
|
}
|
|
306
|
-
if (tips.pending <
|
|
307
|
-
throw new Error(`Timeout waiting for
|
|
425
|
+
if (tips.pending < checkpointNumber) {
|
|
426
|
+
throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
|
|
308
427
|
} else {
|
|
309
|
-
logger.info(`Reached
|
|
428
|
+
logger.info(`Reached checkpoint ${tips.pending}`);
|
|
310
429
|
}
|
|
311
430
|
}
|
|
312
431
|
export async function restartBot(namespace, logger) {
|
|
@@ -314,16 +433,212 @@ export async function restartBot(namespace, logger) {
|
|
|
314
433
|
await deleteResourceByLabel({
|
|
315
434
|
resource: 'pods',
|
|
316
435
|
namespace,
|
|
317
|
-
label: 'app=bot'
|
|
436
|
+
label: 'app.kubernetes.io/name=bot'
|
|
318
437
|
});
|
|
319
438
|
await sleep(10 * 1000);
|
|
439
|
+
// Some bot images may take time to report Ready due to heavy boot-time proving.
|
|
440
|
+
// Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
|
|
320
441
|
await waitForResourceByLabel({
|
|
321
442
|
resource: 'pods',
|
|
322
443
|
namespace,
|
|
323
|
-
label: 'app=bot'
|
|
444
|
+
label: 'app.kubernetes.io/name=bot',
|
|
445
|
+
condition: 'PodReadyToStartContainers'
|
|
324
446
|
});
|
|
325
447
|
logger.info(`Bot restarted`);
|
|
326
448
|
}
|
|
449
|
+
/**
|
|
450
|
+
* Installs or upgrades the transfer bot Helm release for the given namespace.
|
|
451
|
+
* Intended for test setup to enable L2 traffic generation only when needed.
|
|
452
|
+
*/ export async function installTransferBot({ namespace, spartanDir, logger, replicas = 1, txIntervalSeconds = 10, followChain = 'PENDING', mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk', mnemonicStartIndex, botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01', nodeUrl, timeout = '15m', reuseValues = true, aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12) }) {
|
|
453
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
454
|
+
const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
|
|
455
|
+
const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
|
|
456
|
+
logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
|
|
457
|
+
const values = {
|
|
458
|
+
'bot.replicaCount': replicas,
|
|
459
|
+
'bot.txIntervalSeconds': txIntervalSeconds,
|
|
460
|
+
'bot.followChain': followChain,
|
|
461
|
+
'bot.botPrivateKey': botPrivateKey,
|
|
462
|
+
'bot.nodeUrl': resolvedNodeUrl,
|
|
463
|
+
'bot.mnemonic': mnemonic,
|
|
464
|
+
'bot.feePaymentMethod': 'fee_juice',
|
|
465
|
+
'aztec.slotDuration': aztecSlotDuration,
|
|
466
|
+
// Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
|
|
467
|
+
// Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
|
|
468
|
+
'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
|
|
469
|
+
// Provide L1 execution RPC for bridging fee juice
|
|
470
|
+
'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
|
|
471
|
+
// Provide L1 mnemonic for bridging (falls back to labs mnemonic)
|
|
472
|
+
'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
|
|
473
|
+
// The bot does not need Kubernetes API access. Disable RBAC + ServiceAccount creation so the chart
|
|
474
|
+
// can be installed by users without cluster-scoped RBAC permissions.
|
|
475
|
+
'bot.rbac.create': false,
|
|
476
|
+
'bot.serviceAccount.create': false,
|
|
477
|
+
'bot.serviceAccount.name': 'default'
|
|
478
|
+
};
|
|
479
|
+
// Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
|
|
480
|
+
if (mnemonicStartIndex === undefined) {
|
|
481
|
+
values['bot.mnemonicStartIndex'] = 0;
|
|
482
|
+
}
|
|
483
|
+
// Also pass a funded private key directly if available
|
|
484
|
+
if (process.env.FUNDING_PRIVATE_KEY) {
|
|
485
|
+
values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
|
|
486
|
+
}
|
|
487
|
+
// Align bot image with the running network image: prefer env var, else detect from a validator pod
|
|
488
|
+
let repositoryFromEnv;
|
|
489
|
+
let tagFromEnv;
|
|
490
|
+
const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
|
|
491
|
+
if (aztecDockerImage && aztecDockerImage.includes(':')) {
|
|
492
|
+
const lastColon = aztecDockerImage.lastIndexOf(':');
|
|
493
|
+
repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
|
|
494
|
+
tagFromEnv = aztecDockerImage.slice(lastColon + 1);
|
|
495
|
+
}
|
|
496
|
+
let repository = repositoryFromEnv;
|
|
497
|
+
let tag = tagFromEnv;
|
|
498
|
+
if (!repository || !tag) {
|
|
499
|
+
try {
|
|
500
|
+
const { stdout } = await execAsync(`kubectl get pods -l app.kubernetes.io/name=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`);
|
|
501
|
+
const image = stdout.trim().replace(/^'|'$/g, '');
|
|
502
|
+
if (image && image.includes(':')) {
|
|
503
|
+
const lastColon = image.lastIndexOf(':');
|
|
504
|
+
repository = image.slice(0, lastColon);
|
|
505
|
+
tag = image.slice(lastColon + 1);
|
|
506
|
+
}
|
|
507
|
+
} catch (err) {
|
|
508
|
+
logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
|
|
509
|
+
}
|
|
510
|
+
}
|
|
511
|
+
if (repository && tag) {
|
|
512
|
+
values['global.aztecImage.repository'] = repository;
|
|
513
|
+
values['global.aztecImage.tag'] = tag;
|
|
514
|
+
}
|
|
515
|
+
if (mnemonicStartIndex !== undefined) {
|
|
516
|
+
values['bot.mnemonicStartIndex'] = typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
|
|
517
|
+
}
|
|
518
|
+
// If a previous install attempt left the release in a non-deployed state (e.g. FAILED),
|
|
519
|
+
// `helm upgrade --install` can error with "has no deployed releases".
|
|
520
|
+
// In that case, clear the release record and do a clean install.
|
|
521
|
+
const existingStatus = await getHelmReleaseStatus(instanceName, namespace);
|
|
522
|
+
if (existingStatus && existingStatus.toLowerCase() !== 'deployed') {
|
|
523
|
+
logger.warn(`Transfer bot release ${instanceName} is in status '${existingStatus}'. Reinstalling cleanly.`);
|
|
524
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`).catch(()=>undefined);
|
|
525
|
+
// If helm left the release in `uninstalling`, force-delete the record so we can reinstall.
|
|
526
|
+
const afterUninstallStatus = await getHelmReleaseStatus(instanceName, namespace);
|
|
527
|
+
if (afterUninstallStatus?.toLowerCase() === 'uninstalling') {
|
|
528
|
+
await forceDeleteHelmReleaseRecord(instanceName, namespace, logger);
|
|
529
|
+
}
|
|
530
|
+
}
|
|
531
|
+
// `--reuse-values` fails if the release has never successfully deployed (e.g. first install, or a previous failed install).
|
|
532
|
+
// Only reuse values when we have a deployed release to reuse from.
|
|
533
|
+
const effectiveReuseValues = reuseValues && await hasDeployedHelmRelease(instanceName, namespace);
|
|
534
|
+
await execHelmCommand({
|
|
535
|
+
instanceName,
|
|
536
|
+
helmChartDir,
|
|
537
|
+
namespace,
|
|
538
|
+
valuesFile: undefined,
|
|
539
|
+
timeout,
|
|
540
|
+
values: values,
|
|
541
|
+
reuseValues: effectiveReuseValues
|
|
542
|
+
});
|
|
543
|
+
if (replicas > 0) {
|
|
544
|
+
await waitForResourceByLabel({
|
|
545
|
+
resource: 'pods',
|
|
546
|
+
namespace,
|
|
547
|
+
label: 'app.kubernetes.io/name=bot',
|
|
548
|
+
condition: 'PodReadyToStartContainers'
|
|
549
|
+
});
|
|
550
|
+
}
|
|
551
|
+
}
|
|
552
|
+
/**
|
|
553
|
+
* Uninstalls the transfer bot Helm release from the given namespace.
|
|
554
|
+
* Intended for test teardown to clean up bot resources.
|
|
555
|
+
*/ export async function uninstallTransferBot(namespace, logger) {
|
|
556
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
557
|
+
logger.info(`Uninstalling transfer bot release ${instanceName}`);
|
|
558
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
559
|
+
// Ensure any leftover pods are removed
|
|
560
|
+
await deleteResourceByLabel({
|
|
561
|
+
resource: 'pods',
|
|
562
|
+
namespace,
|
|
563
|
+
label: 'app.kubernetes.io/name=bot'
|
|
564
|
+
}).catch(()=>undefined);
|
|
565
|
+
}
|
|
566
|
+
/**
|
|
567
|
+
* Enables or disables probabilistic transaction dropping on validators and waits for rollout.
|
|
568
|
+
* Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
|
|
569
|
+
*/ export async function setValidatorTxDrop({ namespace, enabled, probability, logger }) {
|
|
570
|
+
const drop = enabled ? 'true' : 'false';
|
|
571
|
+
const prob = String(probability);
|
|
572
|
+
const selectors = [
|
|
573
|
+
'app.kubernetes.io/name=validator',
|
|
574
|
+
'app.kubernetes.io/component=validator',
|
|
575
|
+
'app=validator'
|
|
576
|
+
];
|
|
577
|
+
let updated = false;
|
|
578
|
+
for (const selector of selectors){
|
|
579
|
+
try {
|
|
580
|
+
const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
581
|
+
const names = list.stdout.split('\n').map((s)=>s.trim()).filter(Boolean);
|
|
582
|
+
if (names.length === 0) {
|
|
583
|
+
continue;
|
|
584
|
+
}
|
|
585
|
+
const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
|
|
586
|
+
logger.info(`command: ${cmd}`);
|
|
587
|
+
await execAsync(cmd);
|
|
588
|
+
updated = true;
|
|
589
|
+
} catch (e) {
|
|
590
|
+
logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
if (!updated) {
|
|
594
|
+
logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
|
|
595
|
+
return;
|
|
596
|
+
}
|
|
597
|
+
// Restart validator pods to ensure env vars take effect and wait for readiness
|
|
598
|
+
await restartValidators(namespace, logger);
|
|
599
|
+
}
|
|
600
|
+
export async function restartValidators(namespace, logger) {
|
|
601
|
+
const selectors = [
|
|
602
|
+
'app.kubernetes.io/name=validator',
|
|
603
|
+
'app.kubernetes.io/component=validator',
|
|
604
|
+
'app=validator'
|
|
605
|
+
];
|
|
606
|
+
let any = false;
|
|
607
|
+
for (const selector of selectors){
|
|
608
|
+
try {
|
|
609
|
+
const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
610
|
+
if (!stdout || stdout.trim().length === 0) {
|
|
611
|
+
continue;
|
|
612
|
+
}
|
|
613
|
+
any = true;
|
|
614
|
+
await deleteResourceByLabel({
|
|
615
|
+
resource: 'pods',
|
|
616
|
+
namespace,
|
|
617
|
+
label: selector
|
|
618
|
+
});
|
|
619
|
+
} catch (e) {
|
|
620
|
+
logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
|
|
621
|
+
}
|
|
622
|
+
}
|
|
623
|
+
if (!any) {
|
|
624
|
+
logger.warn(`No validator pods found to restart in ${namespace}.`);
|
|
625
|
+
return;
|
|
626
|
+
}
|
|
627
|
+
// Wait for either label to be Ready
|
|
628
|
+
for (const selector of selectors){
|
|
629
|
+
try {
|
|
630
|
+
await waitForResourceByLabel({
|
|
631
|
+
resource: 'pods',
|
|
632
|
+
namespace,
|
|
633
|
+
label: selector
|
|
634
|
+
});
|
|
635
|
+
return;
|
|
636
|
+
} catch {
|
|
637
|
+
// try next
|
|
638
|
+
}
|
|
639
|
+
}
|
|
640
|
+
logger.warn(`Validator pods did not report Ready; continuing.`);
|
|
641
|
+
}
|
|
327
642
|
export async function enableValidatorDynamicBootNode(instanceName, namespace, spartanDir, logger) {
|
|
328
643
|
logger.info(`Enabling validator dynamic boot node`);
|
|
329
644
|
await execHelmCommand({
|
|
@@ -340,11 +655,27 @@ export async function enableValidatorDynamicBootNode(instanceName, namespace, sp
|
|
|
340
655
|
logger.info(`Validator dynamic boot node enabled`);
|
|
341
656
|
}
|
|
342
657
|
export async function getSequencers(namespace) {
|
|
343
|
-
const
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
658
|
+
const selectors = [
|
|
659
|
+
'app.kubernetes.io/name=validator',
|
|
660
|
+
'app.kubernetes.io/component=validator',
|
|
661
|
+
'app.kubernetes.io/component=sequencer-node',
|
|
662
|
+
'app=validator'
|
|
663
|
+
];
|
|
664
|
+
for (const selector of selectors){
|
|
665
|
+
try {
|
|
666
|
+
const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
667
|
+
const { stdout } = await execAsync(command);
|
|
668
|
+
const sequencers = stdout.split(' ').map((s)=>s.trim()).filter(Boolean);
|
|
669
|
+
if (sequencers.length > 0) {
|
|
670
|
+
logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
|
|
671
|
+
return sequencers;
|
|
672
|
+
}
|
|
673
|
+
} catch {
|
|
674
|
+
// try next selector
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
|
+
// Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
|
|
678
|
+
throw new Error(`No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`);
|
|
348
679
|
}
|
|
349
680
|
export function updateSequencersConfig(env, config) {
|
|
350
681
|
return withSequencersAdmin(env, async (client)=>{
|
|
@@ -394,7 +725,9 @@ export async function withSequencersAdmin(env, fn) {
|
|
|
394
725
|
const url = `http://127.0.0.1:${port}`;
|
|
395
726
|
const client = createPublicClient({
|
|
396
727
|
transport: fallback([
|
|
397
|
-
http(url
|
|
728
|
+
http(url, {
|
|
729
|
+
batch: false
|
|
730
|
+
})
|
|
398
731
|
])
|
|
399
732
|
});
|
|
400
733
|
if (processes) {
|
|
@@ -412,7 +745,9 @@ export async function withSequencersAdmin(env, fn) {
|
|
|
412
745
|
}
|
|
413
746
|
const client = createPublicClient({
|
|
414
747
|
transport: fallback([
|
|
415
|
-
http(L1_RPC_URLS_JSON
|
|
748
|
+
http(L1_RPC_URLS_JSON, {
|
|
749
|
+
batch: false
|
|
750
|
+
})
|
|
416
751
|
])
|
|
417
752
|
});
|
|
418
753
|
return {
|
|
@@ -528,3 +863,30 @@ export async function withSequencersAdmin(env, fn) {
|
|
|
528
863
|
throw new Error(`Failed to determine git project root: ${error}`);
|
|
529
864
|
}
|
|
530
865
|
}
|
|
866
|
+
/** Returns a client to the RPC of the given sequencer (defaults to first) */ export async function getNodeClient(env, index = 0) {
|
|
867
|
+
const namespace = env.NAMESPACE;
|
|
868
|
+
const containerPort = 8080;
|
|
869
|
+
const sequencers = await getSequencers(namespace);
|
|
870
|
+
const sequencer = sequencers[index];
|
|
871
|
+
if (!sequencer) {
|
|
872
|
+
throw new Error(`No sequencer found at index ${index} in namespace ${namespace}`);
|
|
873
|
+
}
|
|
874
|
+
const { process: process1, port } = await startPortForward({
|
|
875
|
+
resource: `pod/${sequencer}`,
|
|
876
|
+
namespace,
|
|
877
|
+
containerPort
|
|
878
|
+
});
|
|
879
|
+
const url = `http://localhost:${port}`;
|
|
880
|
+
await retry(()=>fetch(`${url}/status`).then((res)=>res.status === 200), 'forward port', makeBackoff([
|
|
881
|
+
1,
|
|
882
|
+
1,
|
|
883
|
+
2,
|
|
884
|
+
6
|
|
885
|
+
]), logger, true);
|
|
886
|
+
const client = createAztecNodeClient(url);
|
|
887
|
+
return {
|
|
888
|
+
node: client,
|
|
889
|
+
port,
|
|
890
|
+
process: process1
|
|
891
|
+
};
|
|
892
|
+
}
|