@aztec/end-to-end 0.0.0-test.1 → 0.0.1-commit.5476d83

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (174) hide show
  1. package/dest/bench/client_flows/benchmark.d.ts +61 -0
  2. package/dest/bench/client_flows/benchmark.d.ts.map +1 -0
  3. package/dest/bench/client_flows/benchmark.js +261 -0
  4. package/dest/bench/client_flows/client_flows_benchmark.d.ts +80 -0
  5. package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -0
  6. package/dest/bench/client_flows/client_flows_benchmark.js +334 -0
  7. package/dest/bench/client_flows/config.d.ts +14 -0
  8. package/dest/bench/client_flows/config.d.ts.map +1 -0
  9. package/dest/bench/client_flows/config.js +106 -0
  10. package/dest/bench/client_flows/data_extractor.d.ts +2 -0
  11. package/dest/bench/client_flows/data_extractor.d.ts.map +1 -0
  12. package/dest/bench/client_flows/data_extractor.js +77 -0
  13. package/dest/bench/utils.d.ts +12 -38
  14. package/dest/bench/utils.d.ts.map +1 -1
  15. package/dest/bench/utils.js +26 -66
  16. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +21 -13
  17. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
  18. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +85 -57
  19. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +19 -25
  20. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
  21. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +50 -70
  22. package/dest/e2e_deploy_contract/deploy_test.d.ts +16 -8
  23. package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
  24. package/dest/e2e_deploy_contract/deploy_test.js +13 -19
  25. package/dest/e2e_epochs/epochs_test.d.ts +59 -18
  26. package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
  27. package/dest/e2e_epochs/epochs_test.js +226 -44
  28. package/dest/e2e_fees/bridging_race.notest.d.ts +2 -0
  29. package/dest/e2e_fees/bridging_race.notest.d.ts.map +1 -0
  30. package/dest/e2e_fees/bridging_race.notest.js +63 -0
  31. package/dest/e2e_fees/fees_test.d.ts +21 -10
  32. package/dest/e2e_fees/fees_test.d.ts.map +1 -1
  33. package/dest/e2e_fees/fees_test.js +103 -109
  34. package/dest/e2e_l1_publisher/write_json.d.ts +10 -0
  35. package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -0
  36. package/dest/e2e_l1_publisher/write_json.js +58 -0
  37. package/dest/e2e_multi_validator/utils.d.ts +12 -0
  38. package/dest/e2e_multi_validator/utils.d.ts.map +1 -0
  39. package/dest/e2e_multi_validator/utils.js +214 -0
  40. package/dest/e2e_nested_contract/nested_contract_test.d.ts +10 -7
  41. package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
  42. package/dest/e2e_nested_contract/nested_contract_test.js +24 -20
  43. package/dest/e2e_p2p/inactivity_slash_test.d.ts +31 -0
  44. package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -0
  45. package/dest/e2e_p2p/inactivity_slash_test.js +139 -0
  46. package/dest/e2e_p2p/p2p_network.d.ts +275 -23
  47. package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
  48. package/dest/e2e_p2p/p2p_network.js +184 -131
  49. package/dest/e2e_p2p/shared.d.ts +43 -7
  50. package/dest/e2e_p2p/shared.d.ts.map +1 -1
  51. package/dest/e2e_p2p/shared.js +164 -19
  52. package/dest/e2e_token_contract/token_contract_test.d.ts +12 -6
  53. package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
  54. package/dest/e2e_token_contract/token_contract_test.js +50 -26
  55. package/dest/fixtures/e2e_prover_test.d.ts +63 -0
  56. package/dest/fixtures/e2e_prover_test.d.ts.map +1 -0
  57. package/dest/{e2e_prover → fixtures}/e2e_prover_test.js +104 -105
  58. package/dest/fixtures/fixtures.d.ts +6 -7
  59. package/dest/fixtures/fixtures.d.ts.map +1 -1
  60. package/dest/fixtures/fixtures.js +4 -3
  61. package/dest/fixtures/get_acvm_config.d.ts +2 -2
  62. package/dest/fixtures/get_acvm_config.d.ts.map +1 -1
  63. package/dest/fixtures/get_acvm_config.js +2 -14
  64. package/dest/fixtures/get_bb_config.d.ts +2 -2
  65. package/dest/fixtures/get_bb_config.d.ts.map +1 -1
  66. package/dest/fixtures/get_bb_config.js +10 -17
  67. package/dest/fixtures/index.d.ts +1 -1
  68. package/dest/fixtures/l1_to_l2_messaging.d.ts +9 -6
  69. package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
  70. package/dest/fixtures/l1_to_l2_messaging.js +44 -18
  71. package/dest/fixtures/logging.d.ts +1 -1
  72. package/dest/fixtures/setup_l1_contracts.d.ts +476 -5
  73. package/dest/fixtures/setup_l1_contracts.d.ts.map +1 -1
  74. package/dest/fixtures/setup_l1_contracts.js +4 -4
  75. package/dest/fixtures/setup_p2p_test.d.ts +15 -14
  76. package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
  77. package/dest/fixtures/setup_p2p_test.js +81 -21
  78. package/dest/fixtures/snapshot_manager.d.ts +17 -9
  79. package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
  80. package/dest/fixtures/snapshot_manager.js +147 -121
  81. package/dest/fixtures/token_utils.d.ts +10 -4
  82. package/dest/fixtures/token_utils.d.ts.map +1 -1
  83. package/dest/fixtures/token_utils.js +28 -12
  84. package/dest/fixtures/utils.d.ts +524 -40
  85. package/dest/fixtures/utils.d.ts.map +1 -1
  86. package/dest/fixtures/utils.js +464 -369
  87. package/dest/fixtures/web3signer.d.ts +5 -0
  88. package/dest/fixtures/web3signer.d.ts.map +1 -0
  89. package/dest/fixtures/web3signer.js +53 -0
  90. package/dest/fixtures/with_telemetry_utils.d.ts +1 -1
  91. package/dest/index.d.ts +1 -1
  92. package/dest/quality_of_service/alert_checker.d.ts +2 -2
  93. package/dest/quality_of_service/alert_checker.d.ts.map +1 -1
  94. package/dest/shared/cross_chain_test_harness.d.ts +39 -34
  95. package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
  96. package/dest/shared/cross_chain_test_harness.js +104 -50
  97. package/dest/shared/gas_portal_test_harness.d.ts +29 -31
  98. package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
  99. package/dest/shared/gas_portal_test_harness.js +51 -30
  100. package/dest/shared/index.d.ts +1 -1
  101. package/dest/shared/jest_setup.d.ts +1 -1
  102. package/dest/shared/jest_setup.js +1 -1
  103. package/dest/shared/submit-transactions.d.ts +6 -4
  104. package/dest/shared/submit-transactions.d.ts.map +1 -1
  105. package/dest/shared/submit-transactions.js +8 -7
  106. package/dest/shared/uniswap_l1_l2.d.ts +14 -12
  107. package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
  108. package/dest/shared/uniswap_l1_l2.js +146 -116
  109. package/dest/simulators/index.d.ts +1 -1
  110. package/dest/simulators/lending_simulator.d.ts +7 -11
  111. package/dest/simulators/lending_simulator.d.ts.map +1 -1
  112. package/dest/simulators/lending_simulator.js +15 -16
  113. package/dest/simulators/token_simulator.d.ts +6 -3
  114. package/dest/simulators/token_simulator.d.ts.map +1 -1
  115. package/dest/simulators/token_simulator.js +16 -13
  116. package/dest/spartan/setup_test_wallets.d.ts +26 -11
  117. package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
  118. package/dest/spartan/setup_test_wallets.js +201 -58
  119. package/dest/spartan/utils.d.ts +118 -313
  120. package/dest/spartan/utils.d.ts.map +1 -1
  121. package/dest/spartan/utils.js +472 -135
  122. package/package.json +65 -58
  123. package/src/bench/client_flows/benchmark.ts +341 -0
  124. package/src/bench/client_flows/client_flows_benchmark.ts +447 -0
  125. package/src/bench/client_flows/config.ts +61 -0
  126. package/src/bench/client_flows/data_extractor.ts +89 -0
  127. package/src/bench/utils.ts +22 -76
  128. package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +80 -77
  129. package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +65 -106
  130. package/src/e2e_deploy_contract/deploy_test.ts +24 -39
  131. package/src/e2e_epochs/epochs_test.ts +276 -55
  132. package/src/e2e_fees/bridging_race.notest.ts +80 -0
  133. package/src/e2e_fees/fees_test.ts +142 -138
  134. package/src/e2e_l1_publisher/write_json.ts +77 -0
  135. package/src/e2e_multi_validator/utils.ts +258 -0
  136. package/src/e2e_nested_contract/nested_contract_test.ts +29 -19
  137. package/src/e2e_p2p/inactivity_slash_test.ts +182 -0
  138. package/src/e2e_p2p/p2p_network.ts +279 -169
  139. package/src/e2e_p2p/shared.ts +247 -29
  140. package/src/e2e_token_contract/token_contract_test.ts +43 -39
  141. package/src/fixtures/dumps/epoch_proof_result.json +1 -1
  142. package/src/{e2e_prover → fixtures}/e2e_prover_test.ts +107 -152
  143. package/src/fixtures/fixtures.ts +4 -3
  144. package/src/fixtures/get_acvm_config.ts +3 -11
  145. package/src/fixtures/get_bb_config.ts +18 -13
  146. package/src/fixtures/l1_to_l2_messaging.ts +53 -23
  147. package/src/fixtures/setup_l1_contracts.ts +6 -7
  148. package/src/fixtures/setup_p2p_test.ts +126 -38
  149. package/src/fixtures/snapshot_manager.ts +187 -139
  150. package/src/fixtures/token_utils.ts +32 -15
  151. package/src/fixtures/utils.ts +580 -434
  152. package/src/fixtures/web3signer.ts +63 -0
  153. package/src/guides/up_quick_start.sh +7 -15
  154. package/src/quality_of_service/alert_checker.ts +1 -1
  155. package/src/shared/cross_chain_test_harness.ts +108 -79
  156. package/src/shared/gas_portal_test_harness.ts +59 -50
  157. package/src/shared/jest_setup.ts +1 -1
  158. package/src/shared/submit-transactions.ts +12 -8
  159. package/src/shared/uniswap_l1_l2.ts +181 -184
  160. package/src/simulators/lending_simulator.ts +14 -15
  161. package/src/simulators/token_simulator.ts +21 -13
  162. package/src/spartan/DEVELOP.md +121 -0
  163. package/src/spartan/setup_test_wallets.ts +251 -93
  164. package/src/spartan/utils.ts +536 -136
  165. package/dest/e2e_prover/e2e_prover_test.d.ts +0 -56
  166. package/dest/e2e_prover/e2e_prover_test.d.ts.map +0 -1
  167. package/dest/sample-dapp/connect.js +0 -12
  168. package/dest/sample-dapp/contracts.js +0 -10
  169. package/dest/sample-dapp/deploy.js +0 -35
  170. package/dest/sample-dapp/index.js +0 -98
  171. package/src/sample-dapp/connect.mjs +0 -16
  172. package/src/sample-dapp/contracts.mjs +0 -14
  173. package/src/sample-dapp/deploy.mjs +0 -40
  174. package/src/sample-dapp/index.mjs +0 -128
@@ -1,89 +1,87 @@
1
- import { createAztecNodeClient, createLogger, sleep } from '@aztec/aztec.js';
2
- import type { RollupCheatCodes } from '@aztec/aztec.js/ethereum';
1
+ import { createLogger } from '@aztec/aztec.js/log';
2
+ import type { RollupCheatCodes } from '@aztec/aztec/testing';
3
+ import type { L1ContractAddresses, ViemPublicClient } from '@aztec/ethereum';
3
4
  import type { Logger } from '@aztec/foundation/log';
4
- import type { SequencerConfig } from '@aztec/sequencer-client';
5
+ import { promiseWithResolvers } from '@aztec/foundation/promise';
6
+ import { makeBackoff, retry } from '@aztec/foundation/retry';
7
+ import { schemas } from '@aztec/foundation/schemas';
8
+ import { sleep } from '@aztec/foundation/sleep';
9
+ import {
10
+ type AztecNodeAdmin,
11
+ type AztecNodeAdminConfig,
12
+ createAztecNodeAdminClient,
13
+ createAztecNodeClient,
14
+ } from '@aztec/stdlib/interfaces/client';
5
15
 
6
16
  import { ChildProcess, exec, execSync, spawn } from 'child_process';
7
17
  import path from 'path';
8
18
  import { promisify } from 'util';
19
+ import { createPublicClient, fallback, http } from 'viem';
9
20
  import { z } from 'zod';
10
21
 
11
- import { AlertChecker, type AlertConfig } from '../quality_of_service/alert_checker.js';
12
-
13
22
  const execAsync = promisify(exec);
14
23
 
15
24
  const logger = createLogger('e2e:k8s-utils');
16
25
 
17
- const ethereumHostsSchema = z.string().refine(
18
- str =>
19
- str.split(',').every(url => {
20
- try {
21
- new URL(url.trim());
22
- return true;
23
- } catch {
24
- return false;
25
- }
26
- }),
27
- 'ETHEREUM_HOSTS must be a comma-separated list of valid URLs',
28
- );
29
-
30
- const k8sLocalConfigSchema = z.object({
31
- ETHEREUM_SLOT_DURATION: z.coerce.number().min(1, 'ETHEREUM_SLOT_DURATION env variable must be set'),
32
- AZTEC_SLOT_DURATION: z.coerce.number().min(1, 'AZTEC_SLOT_DURATION env variable must be set'),
33
- AZTEC_EPOCH_DURATION: z.coerce.number().min(1, 'AZTEC_EPOCH_DURATION env variable must be set'),
34
- AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().min(1, 'AZTEC_PROOF_SUBMISSION_WINDOW env variable must be set'),
35
- INSTANCE_NAME: z.string().min(1, 'INSTANCE_NAME env variable must be set'),
36
- NAMESPACE: z.string().min(1, 'NAMESPACE env variable must be set'),
37
- CONTAINER_NODE_PORT: z.coerce.number().default(8080),
38
- CONTAINER_SEQUENCER_PORT: z.coerce.number().default(8080),
39
- CONTAINER_PROVER_NODE_PORT: z.coerce.number().default(8080),
40
- CONTAINER_PXE_PORT: z.coerce.number().default(8080),
41
- CONTAINER_ETHEREUM_PORT: z.coerce.number().default(8545),
42
- CONTAINER_METRICS_PORT: z.coerce.number().default(80),
43
- GRAFANA_PASSWORD: z.string().optional(),
44
- METRICS_API_PATH: z.string().default('/api/datasources/proxy/uid/spartan-metrics-prometheus/api/v1'),
45
- SPARTAN_DIR: z.string().min(1, 'SPARTAN_DIR env variable must be set'),
46
- ETHEREUM_HOSTS: ethereumHostsSchema.optional(),
47
- L1_ACCOUNT_MNEMONIC: z.string().default('test test test test test test test test test test test junk'),
48
- SEPOLIA_RUN: z.string().default('false'),
49
- K8S: z.literal('local'),
50
- });
51
-
52
- const k8sGCloudConfigSchema = k8sLocalConfigSchema.extend({
53
- K8S: z.literal('gcloud'),
54
- CLUSTER_NAME: z.string().min(1, 'CLUSTER_NAME env variable must be set'),
55
- REGION: z.string().min(1, 'REGION env variable must be set'),
26
+ const testConfigSchema = z.object({
27
+ NAMESPACE: z.string().default('scenario'),
28
+ REAL_VERIFIER: schemas.Boolean.optional().default(true),
29
+ CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
30
+ L1_RPC_URLS_JSON: z.string().optional(),
31
+ L1_ACCOUNT_MNEMONIC: z.string().optional(),
32
+ AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
33
+ AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
56
34
  });
57
35
 
58
- const directConfigSchema = z.object({
59
- PXE_URL: z.string().url('PXE_URL must be a valid URL'),
60
- NODE_URL: z.string().url('NODE_URL must be a valid URL'),
61
- ETHEREUM_HOSTS: ethereumHostsSchema,
62
- K8S: z.literal('false'),
63
- });
36
+ export type TestConfig = z.infer<typeof testConfigSchema>;
64
37
 
65
- const envSchema = z.discriminatedUnion('K8S', [k8sLocalConfigSchema, k8sGCloudConfigSchema, directConfigSchema]);
38
+ export function setupEnvironment(env: unknown): TestConfig {
39
+ const config = testConfigSchema.parse(env);
40
+ logger.warn(`Loaded env config`, config);
41
+ return config;
42
+ }
66
43
 
67
- export type K8sLocalConfig = z.infer<typeof k8sLocalConfigSchema>;
68
- export type K8sGCloudConfig = z.infer<typeof k8sGCloudConfigSchema>;
69
- export type DirectConfig = z.infer<typeof directConfigSchema>;
70
- export type EnvConfig = z.infer<typeof envSchema>;
44
+ /**
45
+ * @param path - The path to the script, relative to the project root
46
+ * @param args - The arguments to pass to the script
47
+ * @param logger - The logger to use
48
+ * @returns The exit code of the script
49
+ */
50
+ function runScript(path: string, args: string[], logger: Logger, env?: Record<string, string>) {
51
+ const childProcess = spawn(path, args, {
52
+ stdio: ['ignore', 'pipe', 'pipe'],
53
+ env: env ? { ...process.env, ...env } : process.env,
54
+ });
55
+ return new Promise<number>((resolve, reject) => {
56
+ childProcess.on('close', (code: number | null) => resolve(code ?? 0));
57
+ childProcess.on('error', reject);
58
+ childProcess.stdout?.on('data', (data: Buffer) => {
59
+ logger.info(data.toString());
60
+ });
61
+ childProcess.stderr?.on('data', (data: Buffer) => {
62
+ logger.error(data.toString());
63
+ });
64
+ });
65
+ }
71
66
 
72
- export function isK8sConfig(config: EnvConfig): config is K8sLocalConfig | K8sGCloudConfig {
73
- return config.K8S === 'local' || config.K8S === 'gcloud';
67
+ export function getAztecBin() {
68
+ return path.join(getGitProjectRoot(), 'yarn-project/aztec/dest/bin/index.js');
74
69
  }
75
70
 
76
- export function isGCloudConfig(config: EnvConfig): config is K8sGCloudConfig {
77
- return config.K8S === 'gcloud';
71
+ /**
72
+ * Runs the Aztec binary
73
+ * @param args - The arguments to pass to the Aztec binary
74
+ * @param logger - The logger to use
75
+ * @param env - Optional environment variables to set for the process
76
+ * @returns The exit code of the Aztec binary
77
+ */
78
+ export function runAztecBin(args: string[], logger: Logger, env?: Record<string, string>) {
79
+ return runScript('node', [getAztecBin(), ...args], logger, env);
78
80
  }
79
81
 
80
- export function setupEnvironment(env: unknown): EnvConfig {
81
- const config = envSchema.parse(env);
82
- if (isGCloudConfig(config)) {
83
- const command = `gcloud container clusters get-credentials ${config.CLUSTER_NAME} --region=${config.REGION}`;
84
- execSync(command);
85
- }
86
- return config;
82
+ export function runProjectScript(script: string, args: string[], logger: Logger, env?: Record<string, string>) {
83
+ const scriptPath = script.startsWith('/') ? script : path.join(getGitProjectRoot(), script);
84
+ return runScript(scriptPath, args, logger, env);
87
85
  }
88
86
 
89
87
  export async function startPortForward({
@@ -103,7 +101,7 @@ export async function startPortForward({
103
101
  }> {
104
102
  const hostPortAsString = hostPort ? hostPort.toString() : '';
105
103
 
106
- logger.info(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
104
+ logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
107
105
 
108
106
  const process = spawn(
109
107
  'kubectl',
@@ -121,21 +119,20 @@ export async function startPortForward({
121
119
  const str = data.toString() as string;
122
120
  if (!isResolved && str.includes('Forwarding from')) {
123
121
  isResolved = true;
124
- logger.info(str);
122
+ logger.debug(`Port forward for ${resource}: ${str}`);
125
123
  const port = str.search(/:\d+/);
126
124
  if (port === -1) {
127
125
  throw new Error('Port not found in port forward output');
128
126
  }
129
127
  const portNumber = parseInt(str.slice(port + 1));
130
- logger.info(`Port forward connected: ${portNumber}`);
131
- logger.info(`Port forward connected: ${portNumber}`);
128
+ logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
132
129
  resolve(portNumber);
133
130
  } else {
134
131
  logger.silent(str);
135
132
  }
136
133
  });
137
134
  process.stderr?.on('data', data => {
138
- logger.info(data.toString());
135
+ logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
139
136
  // It's a strange thing:
140
137
  // If we don't pipe stderr, then the port forwarding does not work.
141
138
  // Log to silent because this doesn't actually report errors,
@@ -145,16 +142,16 @@ export async function startPortForward({
145
142
  process.on('close', () => {
146
143
  if (!isResolved) {
147
144
  isResolved = true;
148
- logger.warn('Port forward closed before connection established');
145
+ logger.warn(`Port forward for ${resource} closed before connection established`);
149
146
  resolve(0);
150
147
  }
151
148
  });
152
149
  process.on('error', error => {
153
- logger.error(`Port forward error: ${error}`);
150
+ logger.error(`Port forward for ${resource} error: ${error}`);
154
151
  resolve(0);
155
152
  });
156
153
  process.on('exit', code => {
157
- logger.info(`Port forward exited with code ${code}`);
154
+ logger.verbose(`Port forward for ${resource} exited with code ${code}`);
158
155
  resolve(0);
159
156
  });
160
157
  });
@@ -164,6 +161,55 @@ export async function startPortForward({
164
161
  return { process, port };
165
162
  }
166
163
 
164
+ export function getExternalIP(namespace: string, serviceName: string): Promise<string> {
165
+ const { promise, resolve, reject } = promiseWithResolvers<string>();
166
+ const process = spawn(
167
+ 'kubectl',
168
+ [
169
+ 'get',
170
+ 'service',
171
+ '-n',
172
+ namespace,
173
+ `${namespace}-${serviceName}`,
174
+ '--output',
175
+ "jsonpath='{.status.loadBalancer.ingress[0].ip}'",
176
+ ],
177
+ {
178
+ stdio: 'pipe',
179
+ },
180
+ );
181
+
182
+ let ip = '';
183
+ process.stdout.on('data', data => {
184
+ ip += data;
185
+ });
186
+ process.on('error', err => {
187
+ reject(err);
188
+ });
189
+ process.on('exit', () => {
190
+ // kubectl prints JSON. Remove the quotes
191
+ resolve(ip.replace(/"|'/g, ''));
192
+ });
193
+
194
+ return promise;
195
+ }
196
+
197
+ export function startPortForwardForRPC(namespace: string, resourceType = 'services', index = 0) {
198
+ return startPortForward({
199
+ resource: `${resourceType}/${namespace}-rpc-aztec-node-${index}`,
200
+ namespace,
201
+ containerPort: 8080,
202
+ });
203
+ }
204
+
205
+ export function startPortForwardForEthereum(namespace: string) {
206
+ return startPortForward({
207
+ resource: `services/${namespace}-eth-execution`,
208
+ namespace,
209
+ containerPort: 8545,
210
+ });
211
+ }
212
+
167
213
  export async function deleteResourceByName({
168
214
  resource,
169
215
  namespace,
@@ -187,12 +233,28 @@ export async function deleteResourceByLabel({
187
233
  resource,
188
234
  namespace,
189
235
  label,
236
+ timeout = '5m',
237
+ force = false,
190
238
  }: {
191
239
  resource: string;
192
240
  namespace: string;
193
241
  label: string;
242
+ timeout?: string;
243
+ force?: boolean;
194
244
  }) {
195
- const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true`;
245
+ // Check if the resource type exists before attempting to delete
246
+ try {
247
+ await execAsync(
248
+ `kubectl api-resources --api-group="" --no-headers -o name | grep -q "^${resource}$" || kubectl api-resources --no-headers -o name | grep -q "^${resource}$"`,
249
+ );
250
+ } catch (error) {
251
+ logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
252
+ return '';
253
+ }
254
+
255
+ const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${
256
+ force ? '--force' : ''
257
+ }`;
196
258
  logger.info(`command: ${command}`);
197
259
  const { stdout } = await execAsync(command);
198
260
  return stdout;
@@ -221,9 +283,18 @@ export function getChartDir(spartanDir: string, chartName: string) {
221
283
  return path.join(spartanDir.trim(), chartName);
222
284
  }
223
285
 
224
- function valuesToArgs(values: Record<string, string | number>) {
286
+ function shellQuote(value: string) {
287
+ // Single-quote safe shell escaping: ' -> '\''
288
+ return `'${value.replace(/'/g, "'\\''")}'`;
289
+ }
290
+
291
+ function valuesToArgs(values: Record<string, string | number | boolean>) {
225
292
  return Object.entries(values)
226
- .map(([key, value]) => `--set ${key}=${value}`)
293
+ .map(([key, value]) =>
294
+ typeof value === 'number' || typeof value === 'boolean'
295
+ ? `--set ${key}=${value}`
296
+ : `--set-string ${key}=${shellQuote(String(value))}`,
297
+ )
227
298
  .join(' ');
228
299
  }
229
300
 
@@ -241,7 +312,7 @@ function createHelmCommand({
241
312
  namespace: string;
242
313
  valuesFile: string | undefined;
243
314
  timeout: string;
244
- values: Record<string, string | number>;
315
+ values: Record<string, string | number | boolean>;
245
316
  reuseValues?: boolean;
246
317
  }) {
247
318
  const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
@@ -258,6 +329,32 @@ async function execHelmCommand(args: Parameters<typeof createHelmCommand>[0]) {
258
329
  return stdout;
259
330
  }
260
331
 
332
+ export async function cleanHelm(instanceName: string, namespace: string, logger: Logger) {
333
+ // uninstall the helm chart if it exists
334
+ logger.info(`Uninstalling helm chart ${instanceName}`);
335
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
336
+ // and delete the chaos-mesh resources created by this release
337
+ const deleteByLabel = async (resource: string) => {
338
+ const args = {
339
+ resource,
340
+ namespace: namespace,
341
+ label: `app.kubernetes.io/instance=${instanceName}`,
342
+ } as const;
343
+ logger.info(`Deleting ${resource} resources for release ${instanceName}`);
344
+ await deleteResourceByLabel(args).catch(e => {
345
+ logger.error(`Error deleting ${resource}: ${e}`);
346
+ logger.info(`Force deleting ${resource}`);
347
+ return deleteResourceByLabel({ ...args, force: true });
348
+ });
349
+ };
350
+
351
+ await deleteByLabel('podchaos');
352
+ await deleteByLabel('networkchaos');
353
+ await deleteByLabel('podnetworkchaos');
354
+ await deleteByLabel('workflows');
355
+ await deleteByLabel('workflownodes');
356
+ }
357
+
261
358
  /**
262
359
  * Installs a Helm chart with the given parameters.
263
360
  * @param instanceName - The name of the Helm chart instance.
@@ -280,8 +377,7 @@ export async function installChaosMeshChart({
280
377
  targetNamespace,
281
378
  valuesFile,
282
379
  helmChartDir,
283
- chaosMeshNamespace = 'chaos-mesh',
284
- timeout = '5m',
380
+ timeout = '10m',
285
381
  clean = true,
286
382
  values = {},
287
383
  logger,
@@ -297,27 +393,13 @@ export async function installChaosMeshChart({
297
393
  logger: Logger;
298
394
  }) {
299
395
  if (clean) {
300
- // uninstall the helm chart if it exists
301
- logger.info(`Uninstalling helm chart ${instanceName}`);
302
- await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
303
- // and delete the podchaos resource
304
- const deleteArgs = {
305
- resource: 'podchaos',
306
- namespace: chaosMeshNamespace,
307
- name: `${targetNamespace}-${instanceName}`,
308
- };
309
- logger.info(`Deleting podchaos resource`);
310
- await deleteResourceByName(deleteArgs).catch(e => {
311
- logger.error(`Error deleting podchaos resource: ${e}`);
312
- logger.info(`Force deleting podchaos resource`);
313
- return deleteResourceByName({ ...deleteArgs, force: true });
314
- });
396
+ await cleanHelm(instanceName, targetNamespace, logger);
315
397
  }
316
398
 
317
399
  return execHelmCommand({
318
400
  instanceName,
319
401
  helmChartDir,
320
- namespace: chaosMeshNamespace,
402
+ namespace: targetNamespace,
321
403
  valuesFile,
322
404
  timeout,
323
405
  values: { ...values, 'global.targetNamespace': targetNamespace },
@@ -412,10 +494,12 @@ export function applyValidatorKill({
412
494
  namespace,
413
495
  spartanDir,
414
496
  logger,
497
+ values,
415
498
  }: {
416
499
  namespace: string;
417
500
  spartanDir: string;
418
501
  logger: Logger;
502
+ values?: Record<string, string | number>;
419
503
  }) {
420
504
  return installChaosMeshChart({
421
505
  instanceName: 'validator-kill',
@@ -423,6 +507,7 @@ export function applyValidatorKill({
423
507
  valuesFile: 'validator-kill.yaml',
424
508
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
425
509
  logger,
510
+ values,
426
511
  });
427
512
  }
428
513
 
@@ -469,12 +554,234 @@ export async function awaitL2BlockNumber(
469
554
 
470
555
  export async function restartBot(namespace: string, logger: Logger) {
471
556
  logger.info(`Restarting bot`);
472
- await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app=bot' });
557
+ await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' });
473
558
  await sleep(10 * 1000);
474
- await waitForResourceByLabel({ resource: 'pods', namespace, label: 'app=bot' });
559
+ // Some bot images may take time to report Ready due to heavy boot-time proving.
560
+ // Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
561
+ await waitForResourceByLabel({
562
+ resource: 'pods',
563
+ namespace,
564
+ label: 'app.kubernetes.io/name=bot',
565
+ condition: 'PodReadyToStartContainers',
566
+ });
475
567
  logger.info(`Bot restarted`);
476
568
  }
477
569
 
570
+ /**
571
+ * Installs or upgrades the transfer bot Helm release for the given namespace.
572
+ * Intended for test setup to enable L2 traffic generation only when needed.
573
+ */
574
+ export async function installTransferBot({
575
+ namespace,
576
+ spartanDir,
577
+ logger,
578
+ replicas = 1,
579
+ txIntervalSeconds = 10,
580
+ followChain = 'PENDING',
581
+ mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk',
582
+ mnemonicStartIndex,
583
+ botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01',
584
+ nodeUrl,
585
+ timeout = '15m',
586
+ reuseValues = true,
587
+ aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12),
588
+ }: {
589
+ namespace: string;
590
+ spartanDir: string;
591
+ logger: Logger;
592
+ replicas?: number;
593
+ txIntervalSeconds?: number;
594
+ followChain?: string;
595
+ mnemonic?: string;
596
+ mnemonicStartIndex?: number | string;
597
+ botPrivateKey?: string;
598
+ nodeUrl?: string;
599
+ timeout?: string;
600
+ reuseValues?: boolean;
601
+ aztecSlotDuration?: number;
602
+ }) {
603
+ const instanceName = `${namespace}-bot-transfers`;
604
+ const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
605
+ const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
606
+
607
+ logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
608
+
609
+ const values: Record<string, string | number | boolean> = {
610
+ 'bot.replicaCount': replicas,
611
+ 'bot.txIntervalSeconds': txIntervalSeconds,
612
+ 'bot.followChain': followChain,
613
+ 'bot.botPrivateKey': botPrivateKey,
614
+ 'bot.nodeUrl': resolvedNodeUrl,
615
+ 'bot.mnemonic': mnemonic,
616
+ 'bot.feePaymentMethod': 'fee_juice',
617
+ 'aztec.slotDuration': aztecSlotDuration,
618
+ // Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
619
+ // Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
620
+ 'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
621
+ // Provide L1 execution RPC for bridging fee juice
622
+ 'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
623
+ // Provide L1 mnemonic for bridging (falls back to labs mnemonic)
624
+ 'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
625
+ };
626
+ // Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
627
+ if (mnemonicStartIndex === undefined) {
628
+ values['bot.mnemonicStartIndex'] = 0;
629
+ }
630
+ // Also pass a funded private key directly if available
631
+ if (process.env.FUNDING_PRIVATE_KEY) {
632
+ values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
633
+ }
634
+ // Align bot image with the running network image: prefer env var, else detect from a validator pod
635
+ let repositoryFromEnv: string | undefined;
636
+ let tagFromEnv: string | undefined;
637
+ const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
638
+ if (aztecDockerImage && aztecDockerImage.includes(':')) {
639
+ const lastColon = aztecDockerImage.lastIndexOf(':');
640
+ repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
641
+ tagFromEnv = aztecDockerImage.slice(lastColon + 1);
642
+ }
643
+
644
+ let repository = repositoryFromEnv;
645
+ let tag = tagFromEnv;
646
+ if (!repository || !tag) {
647
+ try {
648
+ const { stdout } = await execAsync(
649
+ `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
650
+ );
651
+ const image = stdout.trim().replace(/^'|'$/g, '');
652
+ if (image && image.includes(':')) {
653
+ const lastColon = image.lastIndexOf(':');
654
+ repository = image.slice(0, lastColon);
655
+ tag = image.slice(lastColon + 1);
656
+ }
657
+ } catch (err) {
658
+ logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
659
+ }
660
+ }
661
+ if (repository && tag) {
662
+ values['global.aztecImage.repository'] = repository;
663
+ values['global.aztecImage.tag'] = tag;
664
+ }
665
+ if (mnemonicStartIndex !== undefined) {
666
+ values['bot.mnemonicStartIndex'] =
667
+ typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
668
+ }
669
+
670
+ await execHelmCommand({
671
+ instanceName,
672
+ helmChartDir,
673
+ namespace,
674
+ valuesFile: undefined,
675
+ timeout,
676
+ values: values as unknown as Record<string, string | number | boolean>,
677
+ reuseValues,
678
+ });
679
+
680
+ if (replicas > 0) {
681
+ await waitForResourceByLabel({
682
+ resource: 'pods',
683
+ namespace,
684
+ label: 'app.kubernetes.io/name=bot',
685
+ condition: 'PodReadyToStartContainers',
686
+ });
687
+ }
688
+ }
689
+
690
+ /**
691
+ * Uninstalls the transfer bot Helm release from the given namespace.
692
+ * Intended for test teardown to clean up bot resources.
693
+ */
694
+ export async function uninstallTransferBot(namespace: string, logger: Logger) {
695
+ const instanceName = `${namespace}-bot-transfers`;
696
+ logger.info(`Uninstalling transfer bot release ${instanceName}`);
697
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
698
+ // Ensure any leftover pods are removed
699
+ await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' }).catch(
700
+ () => undefined,
701
+ );
702
+ }
703
+
704
+ /**
705
+ * Enables or disables probabilistic transaction dropping on validators and waits for rollout.
706
+ * Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
707
+ */
708
+ export async function setValidatorTxDrop({
709
+ namespace,
710
+ enabled,
711
+ probability,
712
+ logger,
713
+ }: {
714
+ namespace: string;
715
+ enabled: boolean;
716
+ probability: number;
717
+ logger: Logger;
718
+ }) {
719
+ const drop = enabled ? 'true' : 'false';
720
+ const prob = String(probability);
721
+
722
+ const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
723
+ let updated = false;
724
+ for (const selector of selectors) {
725
+ try {
726
+ const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
727
+ const names = list.stdout
728
+ .split('\n')
729
+ .map(s => s.trim())
730
+ .filter(Boolean);
731
+ if (names.length === 0) {
732
+ continue;
733
+ }
734
+ const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
735
+ logger.info(`command: ${cmd}`);
736
+ await execAsync(cmd);
737
+ updated = true;
738
+ } catch (e) {
739
+ logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
740
+ }
741
+ }
742
+
743
+ if (!updated) {
744
+ logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
745
+ return;
746
+ }
747
+
748
+ // Restart validator pods to ensure env vars take effect and wait for readiness
749
+ await restartValidators(namespace, logger);
750
+ }
751
+
752
+ export async function restartValidators(namespace: string, logger: Logger) {
753
+ const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
754
+ let any = false;
755
+ for (const selector of selectors) {
756
+ try {
757
+ const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
758
+ if (!stdout || stdout.trim().length === 0) {
759
+ continue;
760
+ }
761
+ any = true;
762
+ await deleteResourceByLabel({ resource: 'pods', namespace, label: selector });
763
+ } catch (e) {
764
+ logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
765
+ }
766
+ }
767
+
768
+ if (!any) {
769
+ logger.warn(`No validator pods found to restart in ${namespace}.`);
770
+ return;
771
+ }
772
+
773
+ // Wait for either label to be Ready
774
+ for (const selector of selectors) {
775
+ try {
776
+ await waitForResourceByLabel({ resource: 'pods', namespace, label: selector });
777
+ return;
778
+ } catch {
779
+ // try next
780
+ }
781
+ }
782
+ logger.warn(`Validator pods did not report Ready; continuing.`);
783
+ }
784
+
478
785
  export async function enableValidatorDynamicBootNode(
479
786
  instanceName: string,
480
787
  namespace: string,
@@ -497,64 +804,109 @@ export async function enableValidatorDynamicBootNode(
497
804
  logger.info(`Validator dynamic boot node enabled`);
498
805
  }
499
806
 
500
- export async function runAlertCheck(config: EnvConfig, alerts: AlertConfig[], logger: Logger) {
501
- if (isK8sConfig(config)) {
502
- const { process, port } = await startPortForward({
503
- resource: `svc/metrics-grafana`,
504
- namespace: 'metrics',
505
- containerPort: config.CONTAINER_METRICS_PORT,
506
- });
507
- const alertChecker = new AlertChecker(logger, {
508
- grafanaEndpoint: `http://localhost:${port}${config.METRICS_API_PATH}`,
509
- grafanaCredentials: `admin:${config.GRAFANA_PASSWORD}`,
510
- });
511
- await alertChecker.runAlertCheck(alerts);
512
- process.kill();
513
- } else {
514
- logger.info('Not running alert check in non-k8s environment');
515
- }
807
+ export async function getSequencers(namespace: string) {
808
+ const command = `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
809
+ const { stdout } = await execAsync(command);
810
+ const sequencers = stdout.split(' ');
811
+ logger.verbose(`Found sequencer pods ${sequencers.join(', ')}`);
812
+ return sequencers;
516
813
  }
517
814
 
518
- export async function updateSequencerConfig(url: string, config: Partial<SequencerConfig>) {
519
- const node = createAztecNodeClient(url);
520
- await node.setConfig(config);
815
+ export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
816
+ return withSequencersAdmin(env, async client => {
817
+ await client.setConfig(config);
818
+ return client.getConfig();
819
+ });
521
820
  }
522
821
 
523
- export async function getSequencers(namespace: string) {
524
- const command = `kubectl get pods -l app=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
525
- const { stdout } = await execAsync(command);
526
- return stdout.split(' ');
822
+ export function getSequencersConfig(env: TestConfig) {
823
+ return withSequencersAdmin(env, client => client.getConfig());
527
824
  }
528
825
 
529
- export async function updateK8sSequencersConfig(args: {
530
- containerPort: number;
531
- namespace: string;
532
- config: Partial<SequencerConfig>;
533
- }) {
534
- const { containerPort, namespace, config } = args;
826
+ export async function withSequencersAdmin<T>(env: TestConfig, fn: (node: AztecNodeAdmin) => Promise<T>): Promise<T[]> {
827
+ const adminContainerPort = 8880;
828
+ const namespace = env.NAMESPACE;
535
829
  const sequencers = await getSequencers(namespace);
830
+ const results = [];
831
+
536
832
  for (const sequencer of sequencers) {
537
833
  const { process, port } = await startPortForward({
538
834
  resource: `pod/${sequencer}`,
539
835
  namespace,
540
- containerPort,
836
+ containerPort: adminContainerPort,
541
837
  });
542
838
 
543
839
  const url = `http://localhost:${port}`;
544
- await updateSequencerConfig(url, config);
840
+ await retry(
841
+ () => fetch(`${url}/status`).then(res => res.status === 200),
842
+ 'forward node admin port',
843
+ makeBackoff([1, 1, 2, 6]),
844
+ logger,
845
+ true,
846
+ );
847
+ const client = createAztecNodeAdminClient(url);
848
+ results.push(await fn(client));
545
849
  process.kill();
546
850
  }
851
+
852
+ return results;
547
853
  }
548
854
 
549
- export async function updateSequencersConfig(env: EnvConfig, config: Partial<SequencerConfig>) {
550
- if (isK8sConfig(env)) {
551
- await updateK8sSequencersConfig({
552
- containerPort: env.CONTAINER_NODE_PORT,
553
- namespace: env.NAMESPACE,
554
- config,
855
+ /**
856
+ * Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
857
+ * it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
858
+ */
859
+ export async function getPublicViemClient(
860
+ env: TestConfig,
861
+ /** If set, will push the new process into it */
862
+ processes?: ChildProcess[],
863
+ ): Promise<{ url: string; client: ViemPublicClient; process?: ChildProcess }> {
864
+ const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
865
+ if (CREATE_ETH_DEVNET) {
866
+ logger.info(`Creating port forward to eth execution node`);
867
+ const { process, port } = await startPortForward({
868
+ resource: `svc/${NAMESPACE}-eth-execution`,
869
+ namespace: NAMESPACE,
870
+ containerPort: 8545,
555
871
  });
872
+ const url = `http://127.0.0.1:${port}`;
873
+ const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url)]) });
874
+ if (processes) {
875
+ processes.push(process);
876
+ }
877
+ return { url, client, process };
556
878
  } else {
557
- await updateSequencerConfig(env.NODE_URL, config);
879
+ logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
880
+ if (!L1_RPC_URLS_JSON) {
881
+ throw new Error(`L1_RPC_URLS_JSON is not defined`);
882
+ }
883
+ const client: ViemPublicClient = createPublicClient({ transport: fallback([http(L1_RPC_URLS_JSON)]) });
884
+ return { url: L1_RPC_URLS_JSON, client };
885
+ }
886
+ }
887
+
888
+ /** Queries an Aztec node for the L1 deployment addresses */
889
+ export async function getL1DeploymentAddresses(env: TestConfig): Promise<L1ContractAddresses> {
890
+ let forwardProcess: ChildProcess | undefined;
891
+ try {
892
+ const [sequencer] = await getSequencers(env.NAMESPACE);
893
+ const { process, port } = await startPortForward({
894
+ resource: `pod/${sequencer}`,
895
+ namespace: env.NAMESPACE,
896
+ containerPort: 8080,
897
+ });
898
+
899
+ forwardProcess = process;
900
+ const url = `http://127.0.0.1:${port}`;
901
+ const node = createAztecNodeClient(url);
902
+ return await retry(
903
+ () => node.getNodeInfo().then(i => i.l1ContractAddresses),
904
+ 'get node info',
905
+ makeBackoff([1, 3, 6]),
906
+ logger,
907
+ );
908
+ } finally {
909
+ forwardProcess?.kill();
558
910
  }
559
911
  }
560
912
 
@@ -580,3 +932,51 @@ export async function rollAztecPods(namespace: string) {
580
932
  await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=validator' });
581
933
  await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=pxe' });
582
934
  }
935
+
936
+ /**
937
+ * Returns the absolute path to the git repository root
938
+ */
939
+ export function getGitProjectRoot(): string {
940
+ try {
941
+ const rootDir = execSync('git rev-parse --show-toplevel', {
942
+ encoding: 'utf-8',
943
+ stdio: ['ignore', 'pipe', 'ignore'],
944
+ }).trim();
945
+
946
+ return rootDir;
947
+ } catch (error) {
948
+ throw new Error(`Failed to determine git project root: ${error}`);
949
+ }
950
+ }
951
+
952
+ /** Returns a client to the RPC of the given sequencer (defaults to first) */
953
+ export async function getNodeClient(
954
+ env: TestConfig,
955
+ index: number = 0,
956
+ ): Promise<{ node: ReturnType<typeof createAztecNodeClient>; port: number; process: ChildProcess }> {
957
+ const namespace = env.NAMESPACE;
958
+ const containerPort = 8080;
959
+ const sequencers = await getSequencers(namespace);
960
+ const sequencer = sequencers[index];
961
+ if (!sequencer) {
962
+ throw new Error(`No sequencer found at index ${index} in namespace ${namespace}`);
963
+ }
964
+
965
+ const { process, port } = await startPortForward({
966
+ resource: `pod/${sequencer}`,
967
+ namespace,
968
+ containerPort,
969
+ });
970
+
971
+ const url = `http://localhost:${port}`;
972
+ await retry(
973
+ () => fetch(`${url}/status`).then(res => res.status === 200),
974
+ 'forward port',
975
+ makeBackoff([1, 1, 2, 6]),
976
+ logger,
977
+ true,
978
+ );
979
+
980
+ const client = createAztecNodeClient(url);
981
+ return { node: client, port, process };
982
+ }