@aztec/end-to-end 3.0.0-canary.a9708bd → 3.0.0-devnet.20251212
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/bench/client_flows/benchmark.d.ts +4 -3
- package/dest/bench/client_flows/benchmark.d.ts.map +1 -1
- package/dest/bench/client_flows/benchmark.js +2 -2
- package/dest/bench/client_flows/client_flows_benchmark.d.ts +25 -14
- package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -1
- package/dest/bench/client_flows/client_flows_benchmark.js +111 -90
- package/dest/bench/client_flows/config.d.ts +1 -1
- package/dest/bench/client_flows/data_extractor.d.ts +1 -1
- package/dest/bench/client_flows/data_extractor.js +8 -30
- package/dest/bench/utils.d.ts +4 -13
- package/dest/bench/utils.d.ts.map +1 -1
- package/dest/bench/utils.js +10 -34
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +8 -8
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +42 -42
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +12 -9
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +35 -35
- package/dest/e2e_deploy_contract/deploy_test.d.ts +12 -6
- package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
- package/dest/e2e_deploy_contract/deploy_test.js +9 -18
- package/dest/e2e_epochs/epochs_test.d.ts +19 -12
- package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
- package/dest/e2e_epochs/epochs_test.js +30 -22
- package/dest/e2e_fees/bridging_race.notest.d.ts +1 -1
- package/dest/e2e_fees/bridging_race.notest.js +14 -11
- package/dest/e2e_fees/fees_test.d.ts +10 -8
- package/dest/e2e_fees/fees_test.d.ts.map +1 -1
- package/dest/e2e_fees/fees_test.js +35 -38
- package/dest/e2e_l1_publisher/write_json.d.ts +4 -2
- package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -1
- package/dest/e2e_l1_publisher/write_json.js +9 -8
- package/dest/e2e_multi_validator/utils.d.ts +2 -2
- package/dest/e2e_multi_validator/utils.d.ts.map +1 -1
- package/dest/e2e_multi_validator/utils.js +4 -10
- package/dest/e2e_nested_contract/nested_contract_test.d.ts +7 -4
- package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
- package/dest/e2e_nested_contract/nested_contract_test.js +11 -12
- package/dest/e2e_p2p/inactivity_slash_test.d.ts +31 -0
- package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -0
- package/dest/e2e_p2p/inactivity_slash_test.js +139 -0
- package/dest/e2e_p2p/p2p_network.d.ts +238 -17
- package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
- package/dest/e2e_p2p/p2p_network.js +46 -19
- package/dest/e2e_p2p/shared.d.ts +16 -17
- package/dest/e2e_p2p/shared.d.ts.map +1 -1
- package/dest/e2e_p2p/shared.js +56 -55
- package/dest/e2e_token_contract/token_contract_test.d.ts +6 -5
- package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
- package/dest/e2e_token_contract/token_contract_test.js +14 -17
- package/dest/fixtures/e2e_prover_test.d.ts +12 -8
- package/dest/fixtures/e2e_prover_test.d.ts.map +1 -1
- package/dest/fixtures/e2e_prover_test.js +53 -58
- package/dest/fixtures/fixtures.d.ts +1 -1
- package/dest/fixtures/fixtures.d.ts.map +1 -1
- package/dest/fixtures/fixtures.js +1 -1
- package/dest/fixtures/get_acvm_config.d.ts +2 -2
- package/dest/fixtures/get_acvm_config.d.ts.map +1 -1
- package/dest/fixtures/get_acvm_config.js +1 -1
- package/dest/fixtures/get_bb_config.d.ts +2 -2
- package/dest/fixtures/get_bb_config.d.ts.map +1 -1
- package/dest/fixtures/get_bb_config.js +2 -2
- package/dest/fixtures/index.d.ts +1 -1
- package/dest/fixtures/l1_to_l2_messaging.d.ts +4 -3
- package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
- package/dest/fixtures/l1_to_l2_messaging.js +2 -2
- package/dest/fixtures/logging.d.ts +1 -1
- package/dest/fixtures/setup_l1_contracts.d.ts +476 -5
- package/dest/fixtures/setup_l1_contracts.d.ts.map +1 -1
- package/dest/fixtures/setup_l1_contracts.js +3 -3
- package/dest/fixtures/setup_p2p_test.d.ts +12 -11
- package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
- package/dest/fixtures/setup_p2p_test.js +50 -24
- package/dest/fixtures/snapshot_manager.d.ts +13 -10
- package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
- package/dest/fixtures/snapshot_manager.js +66 -51
- package/dest/fixtures/token_utils.d.ts +10 -5
- package/dest/fixtures/token_utils.d.ts.map +1 -1
- package/dest/fixtures/token_utils.js +17 -18
- package/dest/fixtures/utils.d.ts +479 -35
- package/dest/fixtures/utils.d.ts.map +1 -1
- package/dest/fixtures/utils.js +106 -125
- package/dest/fixtures/web3signer.d.ts +5 -0
- package/dest/fixtures/web3signer.d.ts.map +1 -0
- package/dest/fixtures/web3signer.js +53 -0
- package/dest/fixtures/with_telemetry_utils.d.ts +2 -2
- package/dest/fixtures/with_telemetry_utils.d.ts.map +1 -1
- package/dest/fixtures/with_telemetry_utils.js +2 -2
- package/dest/index.d.ts +1 -1
- package/dest/quality_of_service/alert_checker.d.ts +2 -2
- package/dest/quality_of_service/alert_checker.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.d.ts +20 -23
- package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.js +14 -16
- package/dest/shared/gas_portal_test_harness.d.ts +10 -17
- package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
- package/dest/shared/gas_portal_test_harness.js +11 -8
- package/dest/shared/index.d.ts +1 -1
- package/dest/shared/jest_setup.d.ts +1 -1
- package/dest/shared/jest_setup.js +1 -1
- package/dest/shared/submit-transactions.d.ts +6 -4
- package/dest/shared/submit-transactions.d.ts.map +1 -1
- package/dest/shared/submit-transactions.js +8 -7
- package/dest/shared/uniswap_l1_l2.d.ts +12 -8
- package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
- package/dest/shared/uniswap_l1_l2.js +44 -58
- package/dest/simulators/index.d.ts +1 -1
- package/dest/simulators/lending_simulator.d.ts +4 -7
- package/dest/simulators/lending_simulator.d.ts.map +1 -1
- package/dest/simulators/lending_simulator.js +8 -5
- package/dest/simulators/token_simulator.d.ts +4 -2
- package/dest/simulators/token_simulator.d.ts.map +1 -1
- package/dest/simulators/token_simulator.js +2 -2
- package/dest/spartan/setup_test_wallets.d.ts +22 -14
- package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
- package/dest/spartan/setup_test_wallets.js +144 -86
- package/dest/spartan/tx_metrics.d.ts +39 -0
- package/dest/spartan/tx_metrics.d.ts.map +1 -0
- package/dest/spartan/tx_metrics.js +95 -0
- package/dest/spartan/utils.d.ts +92 -17
- package/dest/spartan/utils.d.ts.map +1 -1
- package/dest/spartan/utils.js +386 -63
- package/package.json +43 -40
- package/src/bench/client_flows/benchmark.ts +8 -8
- package/src/bench/client_flows/client_flows_benchmark.ts +141 -114
- package/src/bench/client_flows/data_extractor.ts +9 -31
- package/src/bench/utils.ts +9 -37
- package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +46 -63
- package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +38 -51
- package/src/e2e_deploy_contract/deploy_test.ts +18 -36
- package/src/e2e_epochs/epochs_test.ts +41 -35
- package/src/e2e_fees/bridging_race.notest.ts +16 -11
- package/src/e2e_fees/fees_test.ts +42 -47
- package/src/e2e_l1_publisher/write_json.ts +12 -9
- package/src/e2e_multi_validator/utils.ts +5 -11
- package/src/e2e_nested_contract/nested_contract_test.ts +15 -13
- package/src/e2e_p2p/inactivity_slash_test.ts +184 -0
- package/src/e2e_p2p/p2p_network.ts +124 -82
- package/src/e2e_p2p/shared.ts +66 -58
- package/src/e2e_token_contract/token_contract_test.ts +17 -17
- package/src/fixtures/e2e_prover_test.ts +60 -97
- package/src/fixtures/fixtures.ts +1 -2
- package/src/fixtures/get_acvm_config.ts +2 -2
- package/src/fixtures/get_bb_config.ts +3 -2
- package/src/fixtures/l1_to_l2_messaging.ts +4 -2
- package/src/fixtures/setup_l1_contracts.ts +5 -4
- package/src/fixtures/setup_p2p_test.ts +79 -32
- package/src/fixtures/snapshot_manager.ts +87 -82
- package/src/fixtures/token_utils.ts +16 -24
- package/src/fixtures/utils.ts +142 -172
- package/src/fixtures/web3signer.ts +63 -0
- package/src/fixtures/with_telemetry_utils.ts +2 -2
- package/src/guides/up_quick_start.sh +3 -11
- package/src/quality_of_service/alert_checker.ts +1 -1
- package/src/shared/cross_chain_test_harness.ts +23 -31
- package/src/shared/gas_portal_test_harness.ts +14 -21
- package/src/shared/jest_setup.ts +1 -1
- package/src/shared/submit-transactions.ts +12 -8
- package/src/shared/uniswap_l1_l2.ts +77 -86
- package/src/simulators/lending_simulator.ts +9 -6
- package/src/simulators/token_simulator.ts +5 -2
- package/src/spartan/DEVELOP.md +15 -3
- package/src/spartan/setup_test_wallets.ts +171 -127
- package/src/spartan/tx_metrics.ts +130 -0
- package/src/spartan/utils.ts +463 -64
package/src/spartan/utils.ts
CHANGED
|
@@ -1,13 +1,24 @@
|
|
|
1
|
-
import { createLogger
|
|
1
|
+
import { createLogger } from '@aztec/aztec.js/log';
|
|
2
2
|
import type { RollupCheatCodes } from '@aztec/aztec/testing';
|
|
3
|
+
import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
|
|
4
|
+
import type { ViemPublicClient } from '@aztec/ethereum/types';
|
|
5
|
+
import type { CheckpointNumber } from '@aztec/foundation/branded-types';
|
|
3
6
|
import type { Logger } from '@aztec/foundation/log';
|
|
7
|
+
import { promiseWithResolvers } from '@aztec/foundation/promise';
|
|
4
8
|
import { makeBackoff, retry } from '@aztec/foundation/retry';
|
|
5
9
|
import { schemas } from '@aztec/foundation/schemas';
|
|
6
|
-
import {
|
|
10
|
+
import { sleep } from '@aztec/foundation/sleep';
|
|
11
|
+
import {
|
|
12
|
+
type AztecNodeAdmin,
|
|
13
|
+
type AztecNodeAdminConfig,
|
|
14
|
+
createAztecNodeAdminClient,
|
|
15
|
+
createAztecNodeClient,
|
|
16
|
+
} from '@aztec/stdlib/interfaces/client';
|
|
7
17
|
|
|
8
18
|
import { ChildProcess, exec, execSync, spawn } from 'child_process';
|
|
9
19
|
import path from 'path';
|
|
10
20
|
import { promisify } from 'util';
|
|
21
|
+
import { createPublicClient, fallback, http } from 'viem';
|
|
11
22
|
import { z } from 'zod';
|
|
12
23
|
|
|
13
24
|
const execAsync = promisify(exec);
|
|
@@ -17,6 +28,11 @@ const logger = createLogger('e2e:k8s-utils');
|
|
|
17
28
|
const testConfigSchema = z.object({
|
|
18
29
|
NAMESPACE: z.string().default('scenario'),
|
|
19
30
|
REAL_VERIFIER: schemas.Boolean.optional().default(true),
|
|
31
|
+
CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
|
|
32
|
+
L1_RPC_URLS_JSON: z.string().optional(),
|
|
33
|
+
L1_ACCOUNT_MNEMONIC: z.string().optional(),
|
|
34
|
+
AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
|
|
35
|
+
AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
|
|
20
36
|
});
|
|
21
37
|
|
|
22
38
|
export type TestConfig = z.infer<typeof testConfigSchema>;
|
|
@@ -87,7 +103,7 @@ export async function startPortForward({
|
|
|
87
103
|
}> {
|
|
88
104
|
const hostPortAsString = hostPort ? hostPort.toString() : '';
|
|
89
105
|
|
|
90
|
-
logger.
|
|
106
|
+
logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
|
|
91
107
|
|
|
92
108
|
const process = spawn(
|
|
93
109
|
'kubectl',
|
|
@@ -105,20 +121,20 @@ export async function startPortForward({
|
|
|
105
121
|
const str = data.toString() as string;
|
|
106
122
|
if (!isResolved && str.includes('Forwarding from')) {
|
|
107
123
|
isResolved = true;
|
|
108
|
-
logger.
|
|
124
|
+
logger.debug(`Port forward for ${resource}: ${str}`);
|
|
109
125
|
const port = str.search(/:\d+/);
|
|
110
126
|
if (port === -1) {
|
|
111
127
|
throw new Error('Port not found in port forward output');
|
|
112
128
|
}
|
|
113
129
|
const portNumber = parseInt(str.slice(port + 1));
|
|
114
|
-
logger.
|
|
130
|
+
logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
|
|
115
131
|
resolve(portNumber);
|
|
116
132
|
} else {
|
|
117
133
|
logger.silent(str);
|
|
118
134
|
}
|
|
119
135
|
});
|
|
120
136
|
process.stderr?.on('data', data => {
|
|
121
|
-
logger.
|
|
137
|
+
logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
|
|
122
138
|
// It's a strange thing:
|
|
123
139
|
// If we don't pipe stderr, then the port forwarding does not work.
|
|
124
140
|
// Log to silent because this doesn't actually report errors,
|
|
@@ -128,16 +144,16 @@ export async function startPortForward({
|
|
|
128
144
|
process.on('close', () => {
|
|
129
145
|
if (!isResolved) {
|
|
130
146
|
isResolved = true;
|
|
131
|
-
logger.warn(
|
|
147
|
+
logger.warn(`Port forward for ${resource} closed before connection established`);
|
|
132
148
|
resolve(0);
|
|
133
149
|
}
|
|
134
150
|
});
|
|
135
151
|
process.on('error', error => {
|
|
136
|
-
logger.error(`Port forward error: ${error}`);
|
|
152
|
+
logger.error(`Port forward for ${resource} error: ${error}`);
|
|
137
153
|
resolve(0);
|
|
138
154
|
});
|
|
139
155
|
process.on('exit', code => {
|
|
140
|
-
logger.
|
|
156
|
+
logger.verbose(`Port forward for ${resource} exited with code ${code}`);
|
|
141
157
|
resolve(0);
|
|
142
158
|
});
|
|
143
159
|
});
|
|
@@ -147,9 +163,42 @@ export async function startPortForward({
|
|
|
147
163
|
return { process, port };
|
|
148
164
|
}
|
|
149
165
|
|
|
150
|
-
export function
|
|
166
|
+
export function getExternalIP(namespace: string, serviceName: string): Promise<string> {
|
|
167
|
+
const { promise, resolve, reject } = promiseWithResolvers<string>();
|
|
168
|
+
const process = spawn(
|
|
169
|
+
'kubectl',
|
|
170
|
+
[
|
|
171
|
+
'get',
|
|
172
|
+
'service',
|
|
173
|
+
'-n',
|
|
174
|
+
namespace,
|
|
175
|
+
`${namespace}-${serviceName}`,
|
|
176
|
+
'--output',
|
|
177
|
+
"jsonpath='{.status.loadBalancer.ingress[0].ip}'",
|
|
178
|
+
],
|
|
179
|
+
{
|
|
180
|
+
stdio: 'pipe',
|
|
181
|
+
},
|
|
182
|
+
);
|
|
183
|
+
|
|
184
|
+
let ip = '';
|
|
185
|
+
process.stdout.on('data', data => {
|
|
186
|
+
ip += data;
|
|
187
|
+
});
|
|
188
|
+
process.on('error', err => {
|
|
189
|
+
reject(err);
|
|
190
|
+
});
|
|
191
|
+
process.on('exit', () => {
|
|
192
|
+
// kubectl prints JSON. Remove the quotes
|
|
193
|
+
resolve(ip.replace(/"|'/g, ''));
|
|
194
|
+
});
|
|
195
|
+
|
|
196
|
+
return promise;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
export function startPortForwardForRPC(namespace: string, index = 0) {
|
|
151
200
|
return startPortForward({
|
|
152
|
-
resource: `
|
|
201
|
+
resource: `pod/${namespace}-rpc-aztec-node-${index}`,
|
|
153
202
|
namespace,
|
|
154
203
|
containerPort: 8080,
|
|
155
204
|
});
|
|
@@ -195,6 +244,16 @@ export async function deleteResourceByLabel({
|
|
|
195
244
|
timeout?: string;
|
|
196
245
|
force?: boolean;
|
|
197
246
|
}) {
|
|
247
|
+
// Check if the resource type exists before attempting to delete
|
|
248
|
+
try {
|
|
249
|
+
await execAsync(
|
|
250
|
+
`kubectl api-resources --api-group="" --no-headers -o name | grep -q "^${resource}$" || kubectl api-resources --no-headers -o name | grep -q "^${resource}$"`,
|
|
251
|
+
);
|
|
252
|
+
} catch (error) {
|
|
253
|
+
logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
|
|
254
|
+
return '';
|
|
255
|
+
}
|
|
256
|
+
|
|
198
257
|
const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${
|
|
199
258
|
force ? '--force' : ''
|
|
200
259
|
}`;
|
|
@@ -226,9 +285,18 @@ export function getChartDir(spartanDir: string, chartName: string) {
|
|
|
226
285
|
return path.join(spartanDir.trim(), chartName);
|
|
227
286
|
}
|
|
228
287
|
|
|
229
|
-
function
|
|
288
|
+
function shellQuote(value: string) {
|
|
289
|
+
// Single-quote safe shell escaping: ' -> '\''
|
|
290
|
+
return `'${value.replace(/'/g, "'\\''")}'`;
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
function valuesToArgs(values: Record<string, string | number | boolean>) {
|
|
230
294
|
return Object.entries(values)
|
|
231
|
-
.map(([key, value]) =>
|
|
295
|
+
.map(([key, value]) =>
|
|
296
|
+
typeof value === 'number' || typeof value === 'boolean'
|
|
297
|
+
? `--set ${key}=${value}`
|
|
298
|
+
: `--set-string ${key}=${shellQuote(String(value))}`,
|
|
299
|
+
)
|
|
232
300
|
.join(' ');
|
|
233
301
|
}
|
|
234
302
|
|
|
@@ -246,7 +314,7 @@ function createHelmCommand({
|
|
|
246
314
|
namespace: string;
|
|
247
315
|
valuesFile: string | undefined;
|
|
248
316
|
timeout: string;
|
|
249
|
-
values: Record<string, string | number>;
|
|
317
|
+
values: Record<string, string | number | boolean>;
|
|
250
318
|
reuseValues?: boolean;
|
|
251
319
|
}) {
|
|
252
320
|
const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
|
|
@@ -263,6 +331,32 @@ async function execHelmCommand(args: Parameters<typeof createHelmCommand>[0]) {
|
|
|
263
331
|
return stdout;
|
|
264
332
|
}
|
|
265
333
|
|
|
334
|
+
export async function uninstallChaosMesh(instanceName: string, namespace: string, logger: Logger) {
|
|
335
|
+
// uninstall the helm chart if it exists
|
|
336
|
+
logger.info(`Uninstalling helm chart ${instanceName}`);
|
|
337
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
338
|
+
// and delete the chaos-mesh resources created by this release
|
|
339
|
+
const deleteByLabel = async (resource: string) => {
|
|
340
|
+
const args = {
|
|
341
|
+
resource,
|
|
342
|
+
namespace: namespace,
|
|
343
|
+
label: `app.kubernetes.io/instance=${instanceName}`,
|
|
344
|
+
} as const;
|
|
345
|
+
logger.info(`Deleting ${resource} resources for release ${instanceName}`);
|
|
346
|
+
await deleteResourceByLabel(args).catch(e => {
|
|
347
|
+
logger.error(`Error deleting ${resource}: ${e}`);
|
|
348
|
+
logger.info(`Force deleting ${resource}`);
|
|
349
|
+
return deleteResourceByLabel({ ...args, force: true });
|
|
350
|
+
});
|
|
351
|
+
};
|
|
352
|
+
|
|
353
|
+
await deleteByLabel('podchaos');
|
|
354
|
+
await deleteByLabel('networkchaos');
|
|
355
|
+
await deleteByLabel('podnetworkchaos');
|
|
356
|
+
await deleteByLabel('workflows');
|
|
357
|
+
await deleteByLabel('workflownodes');
|
|
358
|
+
}
|
|
359
|
+
|
|
266
360
|
/**
|
|
267
361
|
* Installs a Helm chart with the given parameters.
|
|
268
362
|
* @param instanceName - The name of the Helm chart instance.
|
|
@@ -285,8 +379,7 @@ export async function installChaosMeshChart({
|
|
|
285
379
|
targetNamespace,
|
|
286
380
|
valuesFile,
|
|
287
381
|
helmChartDir,
|
|
288
|
-
|
|
289
|
-
timeout = '5m',
|
|
382
|
+
timeout = '10m',
|
|
290
383
|
clean = true,
|
|
291
384
|
values = {},
|
|
292
385
|
logger,
|
|
@@ -302,27 +395,13 @@ export async function installChaosMeshChart({
|
|
|
302
395
|
logger: Logger;
|
|
303
396
|
}) {
|
|
304
397
|
if (clean) {
|
|
305
|
-
|
|
306
|
-
logger.info(`Uninstalling helm chart ${instanceName}`);
|
|
307
|
-
await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
|
|
308
|
-
// and delete the podchaos resource
|
|
309
|
-
const deleteArgs = {
|
|
310
|
-
resource: 'podchaos',
|
|
311
|
-
namespace: chaosMeshNamespace,
|
|
312
|
-
label: `app.kubernetes.io/instance=${instanceName}`,
|
|
313
|
-
};
|
|
314
|
-
logger.info(`Deleting podchaos resource`);
|
|
315
|
-
await deleteResourceByLabel(deleteArgs).catch(e => {
|
|
316
|
-
logger.error(`Error deleting podchaos resource: ${e}`);
|
|
317
|
-
logger.info(`Force deleting podchaos resource`);
|
|
318
|
-
return deleteResourceByLabel({ ...deleteArgs, force: true });
|
|
319
|
-
});
|
|
398
|
+
await uninstallChaosMesh(instanceName, targetNamespace, logger);
|
|
320
399
|
}
|
|
321
400
|
|
|
322
401
|
return execHelmCommand({
|
|
323
402
|
instanceName,
|
|
324
403
|
helmChartDir,
|
|
325
|
-
namespace:
|
|
404
|
+
namespace: targetNamespace,
|
|
326
405
|
valuesFile,
|
|
327
406
|
timeout,
|
|
328
407
|
values: { ...values, 'global.targetNamespace': targetNamespace },
|
|
@@ -454,35 +533,257 @@ export function applyNetworkShaping({
|
|
|
454
533
|
});
|
|
455
534
|
}
|
|
456
535
|
|
|
457
|
-
export async function
|
|
536
|
+
export async function awaitCheckpointNumber(
|
|
458
537
|
rollupCheatCodes: RollupCheatCodes,
|
|
459
|
-
|
|
538
|
+
checkpointNumber: CheckpointNumber,
|
|
460
539
|
timeoutSeconds: number,
|
|
461
540
|
logger: Logger,
|
|
462
541
|
) {
|
|
463
|
-
logger.info(`Waiting for
|
|
542
|
+
logger.info(`Waiting for checkpoint ${checkpointNumber}`);
|
|
464
543
|
let tips = await rollupCheatCodes.getTips();
|
|
465
544
|
const endTime = Date.now() + timeoutSeconds * 1000;
|
|
466
|
-
while (tips.pending <
|
|
467
|
-
logger.info(`At
|
|
545
|
+
while (tips.pending < checkpointNumber && Date.now() < endTime) {
|
|
546
|
+
logger.info(`At checkpoint ${tips.pending}`);
|
|
468
547
|
await sleep(1000);
|
|
469
548
|
tips = await rollupCheatCodes.getTips();
|
|
470
549
|
}
|
|
471
|
-
if (tips.pending <
|
|
472
|
-
throw new Error(`Timeout waiting for
|
|
550
|
+
if (tips.pending < checkpointNumber) {
|
|
551
|
+
throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
|
|
473
552
|
} else {
|
|
474
|
-
logger.info(`Reached
|
|
553
|
+
logger.info(`Reached checkpoint ${tips.pending}`);
|
|
475
554
|
}
|
|
476
555
|
}
|
|
477
556
|
|
|
478
557
|
export async function restartBot(namespace: string, logger: Logger) {
|
|
479
558
|
logger.info(`Restarting bot`);
|
|
480
|
-
await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app=bot' });
|
|
559
|
+
await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' });
|
|
481
560
|
await sleep(10 * 1000);
|
|
482
|
-
|
|
561
|
+
// Some bot images may take time to report Ready due to heavy boot-time proving.
|
|
562
|
+
// Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
|
|
563
|
+
await waitForResourceByLabel({
|
|
564
|
+
resource: 'pods',
|
|
565
|
+
namespace,
|
|
566
|
+
label: 'app.kubernetes.io/name=bot',
|
|
567
|
+
condition: 'PodReadyToStartContainers',
|
|
568
|
+
});
|
|
483
569
|
logger.info(`Bot restarted`);
|
|
484
570
|
}
|
|
485
571
|
|
|
572
|
+
/**
|
|
573
|
+
* Installs or upgrades the transfer bot Helm release for the given namespace.
|
|
574
|
+
* Intended for test setup to enable L2 traffic generation only when needed.
|
|
575
|
+
*/
|
|
576
|
+
export async function installTransferBot({
|
|
577
|
+
namespace,
|
|
578
|
+
spartanDir,
|
|
579
|
+
logger,
|
|
580
|
+
replicas = 1,
|
|
581
|
+
txIntervalSeconds = 10,
|
|
582
|
+
followChain = 'PENDING',
|
|
583
|
+
mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk',
|
|
584
|
+
mnemonicStartIndex,
|
|
585
|
+
botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01',
|
|
586
|
+
nodeUrl,
|
|
587
|
+
timeout = '15m',
|
|
588
|
+
reuseValues = true,
|
|
589
|
+
aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12),
|
|
590
|
+
}: {
|
|
591
|
+
namespace: string;
|
|
592
|
+
spartanDir: string;
|
|
593
|
+
logger: Logger;
|
|
594
|
+
replicas?: number;
|
|
595
|
+
txIntervalSeconds?: number;
|
|
596
|
+
followChain?: string;
|
|
597
|
+
mnemonic?: string;
|
|
598
|
+
mnemonicStartIndex?: number | string;
|
|
599
|
+
botPrivateKey?: string;
|
|
600
|
+
nodeUrl?: string;
|
|
601
|
+
timeout?: string;
|
|
602
|
+
reuseValues?: boolean;
|
|
603
|
+
aztecSlotDuration?: number;
|
|
604
|
+
}) {
|
|
605
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
606
|
+
const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
|
|
607
|
+
const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
|
|
608
|
+
|
|
609
|
+
logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
|
|
610
|
+
|
|
611
|
+
const values: Record<string, string | number | boolean> = {
|
|
612
|
+
'bot.replicaCount': replicas,
|
|
613
|
+
'bot.txIntervalSeconds': txIntervalSeconds,
|
|
614
|
+
'bot.followChain': followChain,
|
|
615
|
+
'bot.botPrivateKey': botPrivateKey,
|
|
616
|
+
'bot.nodeUrl': resolvedNodeUrl,
|
|
617
|
+
'bot.mnemonic': mnemonic,
|
|
618
|
+
'bot.feePaymentMethod': 'fee_juice',
|
|
619
|
+
'aztec.slotDuration': aztecSlotDuration,
|
|
620
|
+
// Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
|
|
621
|
+
// Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
|
|
622
|
+
'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
|
|
623
|
+
// Provide L1 execution RPC for bridging fee juice
|
|
624
|
+
'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
|
|
625
|
+
// Provide L1 mnemonic for bridging (falls back to labs mnemonic)
|
|
626
|
+
'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
|
|
627
|
+
};
|
|
628
|
+
// Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
|
|
629
|
+
if (mnemonicStartIndex === undefined) {
|
|
630
|
+
values['bot.mnemonicStartIndex'] = 0;
|
|
631
|
+
}
|
|
632
|
+
// Also pass a funded private key directly if available
|
|
633
|
+
if (process.env.FUNDING_PRIVATE_KEY) {
|
|
634
|
+
values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
|
|
635
|
+
}
|
|
636
|
+
// Align bot image with the running network image: prefer env var, else detect from a validator pod
|
|
637
|
+
let repositoryFromEnv: string | undefined;
|
|
638
|
+
let tagFromEnv: string | undefined;
|
|
639
|
+
const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
|
|
640
|
+
if (aztecDockerImage && aztecDockerImage.includes(':')) {
|
|
641
|
+
const lastColon = aztecDockerImage.lastIndexOf(':');
|
|
642
|
+
repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
|
|
643
|
+
tagFromEnv = aztecDockerImage.slice(lastColon + 1);
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
let repository = repositoryFromEnv;
|
|
647
|
+
let tag = tagFromEnv;
|
|
648
|
+
if (!repository || !tag) {
|
|
649
|
+
try {
|
|
650
|
+
const { stdout } = await execAsync(
|
|
651
|
+
`kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
|
|
652
|
+
);
|
|
653
|
+
const image = stdout.trim().replace(/^'|'$/g, '');
|
|
654
|
+
if (image && image.includes(':')) {
|
|
655
|
+
const lastColon = image.lastIndexOf(':');
|
|
656
|
+
repository = image.slice(0, lastColon);
|
|
657
|
+
tag = image.slice(lastColon + 1);
|
|
658
|
+
}
|
|
659
|
+
} catch (err) {
|
|
660
|
+
logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
|
|
661
|
+
}
|
|
662
|
+
}
|
|
663
|
+
if (repository && tag) {
|
|
664
|
+
values['global.aztecImage.repository'] = repository;
|
|
665
|
+
values['global.aztecImage.tag'] = tag;
|
|
666
|
+
}
|
|
667
|
+
if (mnemonicStartIndex !== undefined) {
|
|
668
|
+
values['bot.mnemonicStartIndex'] =
|
|
669
|
+
typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
await execHelmCommand({
|
|
673
|
+
instanceName,
|
|
674
|
+
helmChartDir,
|
|
675
|
+
namespace,
|
|
676
|
+
valuesFile: undefined,
|
|
677
|
+
timeout,
|
|
678
|
+
values: values as unknown as Record<string, string | number | boolean>,
|
|
679
|
+
reuseValues,
|
|
680
|
+
});
|
|
681
|
+
|
|
682
|
+
if (replicas > 0) {
|
|
683
|
+
await waitForResourceByLabel({
|
|
684
|
+
resource: 'pods',
|
|
685
|
+
namespace,
|
|
686
|
+
label: 'app.kubernetes.io/name=bot',
|
|
687
|
+
condition: 'PodReadyToStartContainers',
|
|
688
|
+
});
|
|
689
|
+
}
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
/**
|
|
693
|
+
* Uninstalls the transfer bot Helm release from the given namespace.
|
|
694
|
+
* Intended for test teardown to clean up bot resources.
|
|
695
|
+
*/
|
|
696
|
+
export async function uninstallTransferBot(namespace: string, logger: Logger) {
|
|
697
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
698
|
+
logger.info(`Uninstalling transfer bot release ${instanceName}`);
|
|
699
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
700
|
+
// Ensure any leftover pods are removed
|
|
701
|
+
await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' }).catch(
|
|
702
|
+
() => undefined,
|
|
703
|
+
);
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
/**
|
|
707
|
+
* Enables or disables probabilistic transaction dropping on validators and waits for rollout.
|
|
708
|
+
* Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
|
|
709
|
+
*/
|
|
710
|
+
export async function setValidatorTxDrop({
|
|
711
|
+
namespace,
|
|
712
|
+
enabled,
|
|
713
|
+
probability,
|
|
714
|
+
logger,
|
|
715
|
+
}: {
|
|
716
|
+
namespace: string;
|
|
717
|
+
enabled: boolean;
|
|
718
|
+
probability: number;
|
|
719
|
+
logger: Logger;
|
|
720
|
+
}) {
|
|
721
|
+
const drop = enabled ? 'true' : 'false';
|
|
722
|
+
const prob = String(probability);
|
|
723
|
+
|
|
724
|
+
const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
|
|
725
|
+
let updated = false;
|
|
726
|
+
for (const selector of selectors) {
|
|
727
|
+
try {
|
|
728
|
+
const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
729
|
+
const names = list.stdout
|
|
730
|
+
.split('\n')
|
|
731
|
+
.map(s => s.trim())
|
|
732
|
+
.filter(Boolean);
|
|
733
|
+
if (names.length === 0) {
|
|
734
|
+
continue;
|
|
735
|
+
}
|
|
736
|
+
const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
|
|
737
|
+
logger.info(`command: ${cmd}`);
|
|
738
|
+
await execAsync(cmd);
|
|
739
|
+
updated = true;
|
|
740
|
+
} catch (e) {
|
|
741
|
+
logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
|
|
742
|
+
}
|
|
743
|
+
}
|
|
744
|
+
|
|
745
|
+
if (!updated) {
|
|
746
|
+
logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
|
|
747
|
+
return;
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
// Restart validator pods to ensure env vars take effect and wait for readiness
|
|
751
|
+
await restartValidators(namespace, logger);
|
|
752
|
+
}
|
|
753
|
+
|
|
754
|
+
export async function restartValidators(namespace: string, logger: Logger) {
|
|
755
|
+
const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
|
|
756
|
+
let any = false;
|
|
757
|
+
for (const selector of selectors) {
|
|
758
|
+
try {
|
|
759
|
+
const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
760
|
+
if (!stdout || stdout.trim().length === 0) {
|
|
761
|
+
continue;
|
|
762
|
+
}
|
|
763
|
+
any = true;
|
|
764
|
+
await deleteResourceByLabel({ resource: 'pods', namespace, label: selector });
|
|
765
|
+
} catch (e) {
|
|
766
|
+
logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
|
|
767
|
+
}
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
if (!any) {
|
|
771
|
+
logger.warn(`No validator pods found to restart in ${namespace}.`);
|
|
772
|
+
return;
|
|
773
|
+
}
|
|
774
|
+
|
|
775
|
+
// Wait for either label to be Ready
|
|
776
|
+
for (const selector of selectors) {
|
|
777
|
+
try {
|
|
778
|
+
await waitForResourceByLabel({ resource: 'pods', namespace, label: selector });
|
|
779
|
+
return;
|
|
780
|
+
} catch {
|
|
781
|
+
// try next
|
|
782
|
+
}
|
|
783
|
+
}
|
|
784
|
+
logger.warn(`Validator pods did not report Ready; continuing.`);
|
|
785
|
+
}
|
|
786
|
+
|
|
486
787
|
export async function enableValidatorDynamicBootNode(
|
|
487
788
|
instanceName: string,
|
|
488
789
|
namespace: string,
|
|
@@ -505,44 +806,110 @@ export async function enableValidatorDynamicBootNode(
|
|
|
505
806
|
logger.info(`Validator dynamic boot node enabled`);
|
|
506
807
|
}
|
|
507
808
|
|
|
508
|
-
export async function updateSequencerConfig(url: string, config: Partial<AztecNodeAdminConfig>) {
|
|
509
|
-
const node = createAztecNodeAdminClient(url);
|
|
510
|
-
// Retry incase the port forward is not ready yet
|
|
511
|
-
await retry(() => node.setConfig(config), 'Update sequencer config', makeBackoff([1, 3, 6]), logger);
|
|
512
|
-
}
|
|
513
|
-
|
|
514
809
|
export async function getSequencers(namespace: string) {
|
|
515
|
-
const command = `kubectl get pods -l app=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
810
|
+
const command = `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
516
811
|
const { stdout } = await execAsync(command);
|
|
517
|
-
|
|
812
|
+
const sequencers = stdout.split(' ');
|
|
813
|
+
logger.verbose(`Found sequencer pods ${sequencers.join(', ')}`);
|
|
814
|
+
return sequencers;
|
|
518
815
|
}
|
|
519
816
|
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
})
|
|
525
|
-
|
|
817
|
+
export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
|
|
818
|
+
return withSequencersAdmin(env, async client => {
|
|
819
|
+
await client.setConfig(config);
|
|
820
|
+
return client.getConfig();
|
|
821
|
+
});
|
|
822
|
+
}
|
|
823
|
+
|
|
824
|
+
export function getSequencersConfig(env: TestConfig) {
|
|
825
|
+
return withSequencersAdmin(env, client => client.getConfig());
|
|
826
|
+
}
|
|
827
|
+
|
|
828
|
+
export async function withSequencersAdmin<T>(env: TestConfig, fn: (node: AztecNodeAdmin) => Promise<T>): Promise<T[]> {
|
|
829
|
+
const adminContainerPort = 8880;
|
|
830
|
+
const namespace = env.NAMESPACE;
|
|
526
831
|
const sequencers = await getSequencers(namespace);
|
|
832
|
+
const results = [];
|
|
833
|
+
|
|
527
834
|
for (const sequencer of sequencers) {
|
|
528
835
|
const { process, port } = await startPortForward({
|
|
529
836
|
resource: `pod/${sequencer}`,
|
|
530
837
|
namespace,
|
|
531
|
-
containerPort,
|
|
838
|
+
containerPort: adminContainerPort,
|
|
532
839
|
});
|
|
533
840
|
|
|
534
841
|
const url = `http://localhost:${port}`;
|
|
535
|
-
await
|
|
842
|
+
await retry(
|
|
843
|
+
() => fetch(`${url}/status`).then(res => res.status === 200),
|
|
844
|
+
'forward node admin port',
|
|
845
|
+
makeBackoff([1, 1, 2, 6]),
|
|
846
|
+
logger,
|
|
847
|
+
true,
|
|
848
|
+
);
|
|
849
|
+
const client = createAztecNodeAdminClient(url);
|
|
850
|
+
results.push(await fn(client));
|
|
536
851
|
process.kill();
|
|
537
852
|
}
|
|
853
|
+
|
|
854
|
+
return results;
|
|
538
855
|
}
|
|
539
856
|
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
857
|
+
/**
|
|
858
|
+
* Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
|
|
859
|
+
* it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
|
|
860
|
+
*/
|
|
861
|
+
export async function getPublicViemClient(
|
|
862
|
+
env: TestConfig,
|
|
863
|
+
/** If set, will push the new process into it */
|
|
864
|
+
processes?: ChildProcess[],
|
|
865
|
+
): Promise<{ url: string; client: ViemPublicClient; process?: ChildProcess }> {
|
|
866
|
+
const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
|
|
867
|
+
if (CREATE_ETH_DEVNET) {
|
|
868
|
+
logger.info(`Creating port forward to eth execution node`);
|
|
869
|
+
const { process, port } = await startPortForward({
|
|
870
|
+
resource: `svc/${NAMESPACE}-eth-execution`,
|
|
871
|
+
namespace: NAMESPACE,
|
|
872
|
+
containerPort: 8545,
|
|
873
|
+
});
|
|
874
|
+
const url = `http://127.0.0.1:${port}`;
|
|
875
|
+
const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url)]) });
|
|
876
|
+
if (processes) {
|
|
877
|
+
processes.push(process);
|
|
878
|
+
}
|
|
879
|
+
return { url, client, process };
|
|
880
|
+
} else {
|
|
881
|
+
logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
|
|
882
|
+
if (!L1_RPC_URLS_JSON) {
|
|
883
|
+
throw new Error(`L1_RPC_URLS_JSON is not defined`);
|
|
884
|
+
}
|
|
885
|
+
const client: ViemPublicClient = createPublicClient({ transport: fallback([http(L1_RPC_URLS_JSON)]) });
|
|
886
|
+
return { url: L1_RPC_URLS_JSON, client };
|
|
887
|
+
}
|
|
888
|
+
}
|
|
889
|
+
|
|
890
|
+
/** Queries an Aztec node for the L1 deployment addresses */
|
|
891
|
+
export async function getL1DeploymentAddresses(env: TestConfig): Promise<L1ContractAddresses> {
|
|
892
|
+
let forwardProcess: ChildProcess | undefined;
|
|
893
|
+
try {
|
|
894
|
+
const [sequencer] = await getSequencers(env.NAMESPACE);
|
|
895
|
+
const { process, port } = await startPortForward({
|
|
896
|
+
resource: `pod/${sequencer}`,
|
|
897
|
+
namespace: env.NAMESPACE,
|
|
898
|
+
containerPort: 8080,
|
|
899
|
+
});
|
|
900
|
+
|
|
901
|
+
forwardProcess = process;
|
|
902
|
+
const url = `http://127.0.0.1:${port}`;
|
|
903
|
+
const node = createAztecNodeClient(url);
|
|
904
|
+
return await retry(
|
|
905
|
+
() => node.getNodeInfo().then(i => i.l1ContractAddresses),
|
|
906
|
+
'get node info',
|
|
907
|
+
makeBackoff([1, 3, 6]),
|
|
908
|
+
logger,
|
|
909
|
+
);
|
|
910
|
+
} finally {
|
|
911
|
+
forwardProcess?.kill();
|
|
912
|
+
}
|
|
546
913
|
}
|
|
547
914
|
|
|
548
915
|
/**
|
|
@@ -583,3 +950,35 @@ export function getGitProjectRoot(): string {
|
|
|
583
950
|
throw new Error(`Failed to determine git project root: ${error}`);
|
|
584
951
|
}
|
|
585
952
|
}
|
|
953
|
+
|
|
954
|
+
/** Returns a client to the RPC of the given sequencer (defaults to first) */
|
|
955
|
+
export async function getNodeClient(
|
|
956
|
+
env: TestConfig,
|
|
957
|
+
index: number = 0,
|
|
958
|
+
): Promise<{ node: ReturnType<typeof createAztecNodeClient>; port: number; process: ChildProcess }> {
|
|
959
|
+
const namespace = env.NAMESPACE;
|
|
960
|
+
const containerPort = 8080;
|
|
961
|
+
const sequencers = await getSequencers(namespace);
|
|
962
|
+
const sequencer = sequencers[index];
|
|
963
|
+
if (!sequencer) {
|
|
964
|
+
throw new Error(`No sequencer found at index ${index} in namespace ${namespace}`);
|
|
965
|
+
}
|
|
966
|
+
|
|
967
|
+
const { process, port } = await startPortForward({
|
|
968
|
+
resource: `pod/${sequencer}`,
|
|
969
|
+
namespace,
|
|
970
|
+
containerPort,
|
|
971
|
+
});
|
|
972
|
+
|
|
973
|
+
const url = `http://localhost:${port}`;
|
|
974
|
+
await retry(
|
|
975
|
+
() => fetch(`${url}/status`).then(res => res.status === 200),
|
|
976
|
+
'forward port',
|
|
977
|
+
makeBackoff([1, 1, 2, 6]),
|
|
978
|
+
logger,
|
|
979
|
+
true,
|
|
980
|
+
);
|
|
981
|
+
|
|
982
|
+
const client = createAztecNodeClient(url);
|
|
983
|
+
return { node: client, port, process };
|
|
984
|
+
}
|