@aztec/end-to-end 0.0.0-test.1 → 0.0.1-commit.1142ef1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (186) hide show
  1. package/dest/bench/client_flows/benchmark.d.ts +61 -0
  2. package/dest/bench/client_flows/benchmark.d.ts.map +1 -0
  3. package/dest/bench/client_flows/benchmark.js +261 -0
  4. package/dest/bench/client_flows/client_flows_benchmark.d.ts +80 -0
  5. package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -0
  6. package/dest/bench/client_flows/client_flows_benchmark.js +336 -0
  7. package/dest/bench/client_flows/config.d.ts +14 -0
  8. package/dest/bench/client_flows/config.d.ts.map +1 -0
  9. package/dest/bench/client_flows/config.js +106 -0
  10. package/dest/bench/client_flows/data_extractor.d.ts +2 -0
  11. package/dest/bench/client_flows/data_extractor.d.ts.map +1 -0
  12. package/dest/bench/client_flows/data_extractor.js +79 -0
  13. package/dest/bench/utils.d.ts +14 -40
  14. package/dest/bench/utils.d.ts.map +1 -1
  15. package/dest/bench/utils.js +37 -70
  16. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +21 -13
  17. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
  18. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +85 -57
  19. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +29 -28
  20. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
  21. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +79 -82
  22. package/dest/e2e_deploy_contract/deploy_test.d.ts +16 -8
  23. package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
  24. package/dest/e2e_deploy_contract/deploy_test.js +13 -19
  25. package/dest/e2e_epochs/epochs_test.d.ts +65 -22
  26. package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
  27. package/dest/e2e_epochs/epochs_test.js +233 -49
  28. package/dest/e2e_fees/bridging_race.notest.d.ts +2 -0
  29. package/dest/e2e_fees/bridging_race.notest.d.ts.map +1 -0
  30. package/dest/e2e_fees/bridging_race.notest.js +63 -0
  31. package/dest/e2e_fees/fees_test.d.ts +27 -12
  32. package/dest/e2e_fees/fees_test.d.ts.map +1 -1
  33. package/dest/e2e_fees/fees_test.js +107 -110
  34. package/dest/e2e_l1_publisher/write_json.d.ts +10 -0
  35. package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -0
  36. package/dest/e2e_l1_publisher/write_json.js +55 -0
  37. package/dest/e2e_multi_validator/utils.d.ts +12 -0
  38. package/dest/e2e_multi_validator/utils.d.ts.map +1 -0
  39. package/dest/e2e_multi_validator/utils.js +214 -0
  40. package/dest/e2e_nested_contract/nested_contract_test.d.ts +10 -7
  41. package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
  42. package/dest/e2e_nested_contract/nested_contract_test.js +24 -20
  43. package/dest/e2e_p2p/inactivity_slash_test.d.ts +31 -0
  44. package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -0
  45. package/dest/e2e_p2p/inactivity_slash_test.js +136 -0
  46. package/dest/e2e_p2p/p2p_network.d.ts +276 -23
  47. package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
  48. package/dest/e2e_p2p/p2p_network.js +188 -133
  49. package/dest/e2e_p2p/shared.d.ts +43 -7
  50. package/dest/e2e_p2p/shared.d.ts.map +1 -1
  51. package/dest/e2e_p2p/shared.js +164 -19
  52. package/dest/e2e_token_contract/token_contract_test.d.ts +12 -6
  53. package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
  54. package/dest/e2e_token_contract/token_contract_test.js +50 -26
  55. package/dest/fixtures/e2e_prover_test.d.ts +61 -0
  56. package/dest/fixtures/e2e_prover_test.d.ts.map +1 -0
  57. package/dest/{e2e_prover → fixtures}/e2e_prover_test.js +108 -113
  58. package/dest/fixtures/fixtures.d.ts +6 -8
  59. package/dest/fixtures/fixtures.d.ts.map +1 -1
  60. package/dest/fixtures/fixtures.js +5 -5
  61. package/dest/fixtures/get_acvm_config.d.ts +2 -2
  62. package/dest/fixtures/get_acvm_config.d.ts.map +1 -1
  63. package/dest/fixtures/get_acvm_config.js +3 -15
  64. package/dest/fixtures/get_bb_config.d.ts +2 -2
  65. package/dest/fixtures/get_bb_config.d.ts.map +1 -1
  66. package/dest/fixtures/get_bb_config.js +10 -17
  67. package/dest/fixtures/index.d.ts +1 -1
  68. package/dest/fixtures/l1_to_l2_messaging.d.ts +11 -7
  69. package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
  70. package/dest/fixtures/l1_to_l2_messaging.js +45 -19
  71. package/dest/fixtures/logging.d.ts +1 -1
  72. package/dest/fixtures/setup_p2p_test.d.ts +15 -14
  73. package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
  74. package/dest/fixtures/setup_p2p_test.js +82 -22
  75. package/dest/fixtures/snapshot_manager.d.ts +20 -14
  76. package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
  77. package/dest/fixtures/snapshot_manager.js +147 -138
  78. package/dest/fixtures/token_utils.d.ts +10 -4
  79. package/dest/fixtures/token_utils.d.ts.map +1 -1
  80. package/dest/fixtures/token_utils.js +28 -12
  81. package/dest/fixtures/utils.d.ts +92 -54
  82. package/dest/fixtures/utils.d.ts.map +1 -1
  83. package/dest/fixtures/utils.js +452 -389
  84. package/dest/fixtures/web3signer.d.ts +5 -0
  85. package/dest/fixtures/web3signer.d.ts.map +1 -0
  86. package/dest/fixtures/web3signer.js +53 -0
  87. package/dest/fixtures/with_telemetry_utils.d.ts +2 -2
  88. package/dest/fixtures/with_telemetry_utils.d.ts.map +1 -1
  89. package/dest/fixtures/with_telemetry_utils.js +2 -2
  90. package/dest/index.d.ts +1 -1
  91. package/dest/quality_of_service/grafana_client.d.ts +41 -0
  92. package/dest/quality_of_service/grafana_client.d.ts.map +1 -0
  93. package/dest/quality_of_service/{alert_checker.js → grafana_client.js} +1 -1
  94. package/dest/shared/cross_chain_test_harness.d.ts +42 -35
  95. package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
  96. package/dest/shared/cross_chain_test_harness.js +106 -52
  97. package/dest/shared/gas_portal_test_harness.d.ts +29 -31
  98. package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
  99. package/dest/shared/gas_portal_test_harness.js +51 -30
  100. package/dest/shared/index.d.ts +2 -2
  101. package/dest/shared/index.d.ts.map +1 -1
  102. package/dest/shared/jest_setup.d.ts +1 -1
  103. package/dest/shared/jest_setup.js +1 -1
  104. package/dest/shared/submit-transactions.d.ts +6 -4
  105. package/dest/shared/submit-transactions.d.ts.map +1 -1
  106. package/dest/shared/submit-transactions.js +8 -7
  107. package/dest/shared/uniswap_l1_l2.d.ts +3 -25
  108. package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
  109. package/dest/shared/uniswap_l1_l2.js +170 -120
  110. package/dest/simulators/index.d.ts +1 -1
  111. package/dest/simulators/lending_simulator.d.ts +7 -11
  112. package/dest/simulators/lending_simulator.d.ts.map +1 -1
  113. package/dest/simulators/lending_simulator.js +16 -17
  114. package/dest/simulators/token_simulator.d.ts +6 -3
  115. package/dest/simulators/token_simulator.d.ts.map +1 -1
  116. package/dest/simulators/token_simulator.js +16 -13
  117. package/dest/spartan/setup_test_wallets.d.ts +27 -11
  118. package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
  119. package/dest/spartan/setup_test_wallets.js +202 -58
  120. package/dest/spartan/tx_metrics.d.ts +39 -0
  121. package/dest/spartan/tx_metrics.d.ts.map +1 -0
  122. package/dest/spartan/tx_metrics.js +95 -0
  123. package/dest/spartan/utils.d.ts +151 -313
  124. package/dest/spartan/utils.d.ts.map +1 -1
  125. package/dest/spartan/utils.js +598 -151
  126. package/package.json +65 -58
  127. package/src/bench/client_flows/benchmark.ts +341 -0
  128. package/src/bench/client_flows/client_flows_benchmark.ts +450 -0
  129. package/src/bench/client_flows/config.ts +61 -0
  130. package/src/bench/client_flows/data_extractor.ts +89 -0
  131. package/src/bench/utils.ts +35 -81
  132. package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +80 -77
  133. package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +103 -122
  134. package/src/e2e_deploy_contract/deploy_test.ts +24 -39
  135. package/src/e2e_epochs/epochs_test.ts +299 -65
  136. package/src/e2e_fees/bridging_race.notest.ts +80 -0
  137. package/src/e2e_fees/fees_test.ts +150 -142
  138. package/src/e2e_l1_publisher/write_json.ts +74 -0
  139. package/src/e2e_multi_validator/utils.ts +258 -0
  140. package/src/e2e_nested_contract/nested_contract_test.ts +29 -19
  141. package/src/e2e_p2p/inactivity_slash_test.ts +179 -0
  142. package/src/e2e_p2p/p2p_network.ts +274 -171
  143. package/src/e2e_p2p/shared.ts +252 -29
  144. package/src/e2e_token_contract/token_contract_test.ts +43 -39
  145. package/src/fixtures/dumps/epoch_proof_result.json +1 -1
  146. package/src/{e2e_prover → fixtures}/e2e_prover_test.ts +112 -160
  147. package/src/fixtures/fixtures.ts +5 -7
  148. package/src/fixtures/get_acvm_config.ts +4 -12
  149. package/src/fixtures/get_bb_config.ts +18 -13
  150. package/src/fixtures/l1_to_l2_messaging.ts +56 -24
  151. package/src/fixtures/setup_p2p_test.ts +127 -39
  152. package/src/fixtures/snapshot_manager.ts +189 -160
  153. package/src/fixtures/token_utils.ts +32 -15
  154. package/src/fixtures/utils.ts +556 -475
  155. package/src/fixtures/web3signer.ts +63 -0
  156. package/src/fixtures/with_telemetry_utils.ts +2 -2
  157. package/src/guides/up_quick_start.sh +7 -15
  158. package/src/quality_of_service/{alert_checker.ts → grafana_client.ts} +2 -2
  159. package/src/shared/cross_chain_test_harness.ts +113 -87
  160. package/src/shared/gas_portal_test_harness.ts +59 -50
  161. package/src/shared/index.ts +1 -1
  162. package/src/shared/jest_setup.ts +1 -1
  163. package/src/shared/submit-transactions.ts +12 -8
  164. package/src/shared/uniswap_l1_l2.ts +194 -211
  165. package/src/simulators/lending_simulator.ts +15 -16
  166. package/src/simulators/token_simulator.ts +21 -13
  167. package/src/spartan/DEVELOP.md +128 -0
  168. package/src/spartan/setup_test_wallets.ts +258 -93
  169. package/src/spartan/tx_metrics.ts +130 -0
  170. package/src/spartan/utils.ts +722 -146
  171. package/dest/e2e_prover/e2e_prover_test.d.ts +0 -56
  172. package/dest/e2e_prover/e2e_prover_test.d.ts.map +0 -1
  173. package/dest/fixtures/setup_l1_contracts.d.ts +0 -6
  174. package/dest/fixtures/setup_l1_contracts.d.ts.map +0 -1
  175. package/dest/fixtures/setup_l1_contracts.js +0 -17
  176. package/dest/quality_of_service/alert_checker.d.ts +0 -41
  177. package/dest/quality_of_service/alert_checker.d.ts.map +0 -1
  178. package/dest/sample-dapp/connect.js +0 -12
  179. package/dest/sample-dapp/contracts.js +0 -10
  180. package/dest/sample-dapp/deploy.js +0 -35
  181. package/dest/sample-dapp/index.js +0 -98
  182. package/src/fixtures/setup_l1_contracts.ts +0 -27
  183. package/src/sample-dapp/connect.mjs +0 -16
  184. package/src/sample-dapp/contracts.mjs +0 -14
  185. package/src/sample-dapp/deploy.mjs +0 -40
  186. package/src/sample-dapp/index.mjs +0 -128
@@ -1,89 +1,90 @@
1
- import { createAztecNodeClient, createLogger, sleep } from '@aztec/aztec.js';
2
- import type { RollupCheatCodes } from '@aztec/aztec.js/ethereum';
1
+ import { createLogger } from '@aztec/aztec.js/log';
2
+ import type { RollupCheatCodes } from '@aztec/aztec/testing';
3
+ import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
4
+ import type { ViemPublicClient } from '@aztec/ethereum/types';
5
+ import type { CheckpointNumber } from '@aztec/foundation/branded-types';
3
6
  import type { Logger } from '@aztec/foundation/log';
4
- import type { SequencerConfig } from '@aztec/sequencer-client';
7
+ import { promiseWithResolvers } from '@aztec/foundation/promise';
8
+ import { makeBackoff, retry } from '@aztec/foundation/retry';
9
+ import { schemas } from '@aztec/foundation/schemas';
10
+ import { sleep } from '@aztec/foundation/sleep';
11
+ import {
12
+ type AztecNodeAdmin,
13
+ type AztecNodeAdminConfig,
14
+ createAztecNodeAdminClient,
15
+ createAztecNodeClient,
16
+ } from '@aztec/stdlib/interfaces/client';
5
17
 
6
18
  import { ChildProcess, exec, execSync, spawn } from 'child_process';
7
19
  import path from 'path';
8
20
  import { promisify } from 'util';
21
+ import { createPublicClient, fallback, http } from 'viem';
9
22
  import { z } from 'zod';
10
23
 
11
- import { AlertChecker, type AlertConfig } from '../quality_of_service/alert_checker.js';
12
-
13
24
  const execAsync = promisify(exec);
14
25
 
15
26
  const logger = createLogger('e2e:k8s-utils');
16
27
 
17
- const ethereumHostsSchema = z.string().refine(
18
- str =>
19
- str.split(',').every(url => {
20
- try {
21
- new URL(url.trim());
22
- return true;
23
- } catch {
24
- return false;
25
- }
26
- }),
27
- 'ETHEREUM_HOSTS must be a comma-separated list of valid URLs',
28
- );
29
-
30
- const k8sLocalConfigSchema = z.object({
31
- ETHEREUM_SLOT_DURATION: z.coerce.number().min(1, 'ETHEREUM_SLOT_DURATION env variable must be set'),
32
- AZTEC_SLOT_DURATION: z.coerce.number().min(1, 'AZTEC_SLOT_DURATION env variable must be set'),
33
- AZTEC_EPOCH_DURATION: z.coerce.number().min(1, 'AZTEC_EPOCH_DURATION env variable must be set'),
34
- AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().min(1, 'AZTEC_PROOF_SUBMISSION_WINDOW env variable must be set'),
35
- INSTANCE_NAME: z.string().min(1, 'INSTANCE_NAME env variable must be set'),
36
- NAMESPACE: z.string().min(1, 'NAMESPACE env variable must be set'),
37
- CONTAINER_NODE_PORT: z.coerce.number().default(8080),
38
- CONTAINER_SEQUENCER_PORT: z.coerce.number().default(8080),
39
- CONTAINER_PROVER_NODE_PORT: z.coerce.number().default(8080),
40
- CONTAINER_PXE_PORT: z.coerce.number().default(8080),
41
- CONTAINER_ETHEREUM_PORT: z.coerce.number().default(8545),
42
- CONTAINER_METRICS_PORT: z.coerce.number().default(80),
43
- GRAFANA_PASSWORD: z.string().optional(),
44
- METRICS_API_PATH: z.string().default('/api/datasources/proxy/uid/spartan-metrics-prometheus/api/v1'),
45
- SPARTAN_DIR: z.string().min(1, 'SPARTAN_DIR env variable must be set'),
46
- ETHEREUM_HOSTS: ethereumHostsSchema.optional(),
47
- L1_ACCOUNT_MNEMONIC: z.string().default('test test test test test test test test test test test junk'),
48
- SEPOLIA_RUN: z.string().default('false'),
49
- K8S: z.literal('local'),
50
- });
51
-
52
- const k8sGCloudConfigSchema = k8sLocalConfigSchema.extend({
53
- K8S: z.literal('gcloud'),
54
- CLUSTER_NAME: z.string().min(1, 'CLUSTER_NAME env variable must be set'),
55
- REGION: z.string().min(1, 'REGION env variable must be set'),
28
+ const testConfigSchema = z.object({
29
+ NAMESPACE: z.string().default('scenario'),
30
+ REAL_VERIFIER: schemas.Boolean.optional().default(true),
31
+ CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
32
+ L1_RPC_URLS_JSON: z.string().optional(),
33
+ L1_ACCOUNT_MNEMONIC: z.string().optional(),
34
+ AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
35
+ AZTEC_EPOCH_DURATION: z.coerce.number().optional().default(32),
36
+ AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
56
37
  });
57
38
 
58
- const directConfigSchema = z.object({
59
- PXE_URL: z.string().url('PXE_URL must be a valid URL'),
60
- NODE_URL: z.string().url('NODE_URL must be a valid URL'),
61
- ETHEREUM_HOSTS: ethereumHostsSchema,
62
- K8S: z.literal('false'),
63
- });
39
+ export type TestConfig = z.infer<typeof testConfigSchema>;
64
40
 
65
- const envSchema = z.discriminatedUnion('K8S', [k8sLocalConfigSchema, k8sGCloudConfigSchema, directConfigSchema]);
41
+ export function setupEnvironment(env: unknown): TestConfig {
42
+ const config = testConfigSchema.parse(env);
43
+ logger.warn(`Loaded env config`, config);
44
+ return config;
45
+ }
66
46
 
67
- export type K8sLocalConfig = z.infer<typeof k8sLocalConfigSchema>;
68
- export type K8sGCloudConfig = z.infer<typeof k8sGCloudConfigSchema>;
69
- export type DirectConfig = z.infer<typeof directConfigSchema>;
70
- export type EnvConfig = z.infer<typeof envSchema>;
47
+ /**
48
+ * @param path - The path to the script, relative to the project root
49
+ * @param args - The arguments to pass to the script
50
+ * @param logger - The logger to use
51
+ * @returns The exit code of the script
52
+ */
53
+ function runScript(path: string, args: string[], logger: Logger, env?: Record<string, string>) {
54
+ const childProcess = spawn(path, args, {
55
+ stdio: ['ignore', 'pipe', 'pipe'],
56
+ env: env ? { ...process.env, ...env } : process.env,
57
+ });
58
+ return new Promise<number>((resolve, reject) => {
59
+ childProcess.on('close', (code: number | null) => resolve(code ?? 0));
60
+ childProcess.on('error', reject);
61
+ childProcess.stdout?.on('data', (data: Buffer) => {
62
+ logger.info(data.toString());
63
+ });
64
+ childProcess.stderr?.on('data', (data: Buffer) => {
65
+ logger.error(data.toString());
66
+ });
67
+ });
68
+ }
71
69
 
72
- export function isK8sConfig(config: EnvConfig): config is K8sLocalConfig | K8sGCloudConfig {
73
- return config.K8S === 'local' || config.K8S === 'gcloud';
70
+ export function getAztecBin() {
71
+ return path.join(getGitProjectRoot(), 'yarn-project/aztec/dest/bin/index.js');
74
72
  }
75
73
 
76
- export function isGCloudConfig(config: EnvConfig): config is K8sGCloudConfig {
77
- return config.K8S === 'gcloud';
74
+ /**
75
+ * Runs the Aztec binary
76
+ * @param args - The arguments to pass to the Aztec binary
77
+ * @param logger - The logger to use
78
+ * @param env - Optional environment variables to set for the process
79
+ * @returns The exit code of the Aztec binary
80
+ */
81
+ export function runAztecBin(args: string[], logger: Logger, env?: Record<string, string>) {
82
+ return runScript('node', [getAztecBin(), ...args], logger, env);
78
83
  }
79
84
 
80
- export function setupEnvironment(env: unknown): EnvConfig {
81
- const config = envSchema.parse(env);
82
- if (isGCloudConfig(config)) {
83
- const command = `gcloud container clusters get-credentials ${config.CLUSTER_NAME} --region=${config.REGION}`;
84
- execSync(command);
85
- }
86
- return config;
85
+ export function runProjectScript(script: string, args: string[], logger: Logger, env?: Record<string, string>) {
86
+ const scriptPath = script.startsWith('/') ? script : path.join(getGitProjectRoot(), script);
87
+ return runScript(scriptPath, args, logger, env);
87
88
  }
88
89
 
89
90
  export async function startPortForward({
@@ -103,7 +104,7 @@ export async function startPortForward({
103
104
  }> {
104
105
  const hostPortAsString = hostPort ? hostPort.toString() : '';
105
106
 
106
- logger.info(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
107
+ logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
107
108
 
108
109
  const process = spawn(
109
110
  'kubectl',
@@ -121,21 +122,20 @@ export async function startPortForward({
121
122
  const str = data.toString() as string;
122
123
  if (!isResolved && str.includes('Forwarding from')) {
123
124
  isResolved = true;
124
- logger.info(str);
125
+ logger.debug(`Port forward for ${resource}: ${str}`);
125
126
  const port = str.search(/:\d+/);
126
127
  if (port === -1) {
127
128
  throw new Error('Port not found in port forward output');
128
129
  }
129
130
  const portNumber = parseInt(str.slice(port + 1));
130
- logger.info(`Port forward connected: ${portNumber}`);
131
- logger.info(`Port forward connected: ${portNumber}`);
131
+ logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
132
132
  resolve(portNumber);
133
133
  } else {
134
134
  logger.silent(str);
135
135
  }
136
136
  });
137
137
  process.stderr?.on('data', data => {
138
- logger.info(data.toString());
138
+ logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
139
139
  // It's a strange thing:
140
140
  // If we don't pipe stderr, then the port forwarding does not work.
141
141
  // Log to silent because this doesn't actually report errors,
@@ -145,16 +145,16 @@ export async function startPortForward({
145
145
  process.on('close', () => {
146
146
  if (!isResolved) {
147
147
  isResolved = true;
148
- logger.warn('Port forward closed before connection established');
148
+ logger.warn(`Port forward for ${resource} closed before connection established`);
149
149
  resolve(0);
150
150
  }
151
151
  });
152
152
  process.on('error', error => {
153
- logger.error(`Port forward error: ${error}`);
153
+ logger.error(`Port forward for ${resource} error: ${error}`);
154
154
  resolve(0);
155
155
  });
156
156
  process.on('exit', code => {
157
- logger.info(`Port forward exited with code ${code}`);
157
+ logger.verbose(`Port forward for ${resource} exited with code ${code}`);
158
158
  resolve(0);
159
159
  });
160
160
  });
@@ -164,6 +164,55 @@ export async function startPortForward({
164
164
  return { process, port };
165
165
  }
166
166
 
167
+ export function getExternalIP(namespace: string, serviceName: string): Promise<string> {
168
+ const { promise, resolve, reject } = promiseWithResolvers<string>();
169
+ const process = spawn(
170
+ 'kubectl',
171
+ [
172
+ 'get',
173
+ 'service',
174
+ '-n',
175
+ namespace,
176
+ `${namespace}-${serviceName}`,
177
+ '--output',
178
+ "jsonpath='{.status.loadBalancer.ingress[0].ip}'",
179
+ ],
180
+ {
181
+ stdio: 'pipe',
182
+ },
183
+ );
184
+
185
+ let ip = '';
186
+ process.stdout.on('data', data => {
187
+ ip += data;
188
+ });
189
+ process.on('error', err => {
190
+ reject(err);
191
+ });
192
+ process.on('exit', () => {
193
+ // kubectl prints JSON. Remove the quotes
194
+ resolve(ip.replace(/"|'/g, ''));
195
+ });
196
+
197
+ return promise;
198
+ }
199
+
200
+ export function startPortForwardForRPC(namespace: string, index = 0) {
201
+ return startPortForward({
202
+ resource: `pod/${namespace}-rpc-aztec-node-${index}`,
203
+ namespace,
204
+ containerPort: 8080,
205
+ });
206
+ }
207
+
208
+ export function startPortForwardForEthereum(namespace: string) {
209
+ return startPortForward({
210
+ resource: `services/${namespace}-eth-execution`,
211
+ namespace,
212
+ containerPort: 8545,
213
+ });
214
+ }
215
+
167
216
  export async function deleteResourceByName({
168
217
  resource,
169
218
  namespace,
@@ -187,12 +236,28 @@ export async function deleteResourceByLabel({
187
236
  resource,
188
237
  namespace,
189
238
  label,
239
+ timeout = '5m',
240
+ force = false,
190
241
  }: {
191
242
  resource: string;
192
243
  namespace: string;
193
244
  label: string;
245
+ timeout?: string;
246
+ force?: boolean;
194
247
  }) {
195
- const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true`;
248
+ try {
249
+ // Match both plain and group-qualified names (e.g., "podchaos" or "podchaos.chaos-mesh.org")
250
+ const escaped = resource.replace(/[-/\\^$*+?.()|[\]{}]/g, '\\$&');
251
+ const regex = `(^|\\.)${escaped}(\\.|$)`;
252
+ await execAsync(`kubectl api-resources --no-headers -o name | grep -Eq '${regex}'`);
253
+ } catch (error) {
254
+ logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
255
+ return '';
256
+ }
257
+
258
+ const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${
259
+ force ? '--force' : ''
260
+ }`;
196
261
  logger.info(`command: ${command}`);
197
262
  const { stdout } = await execAsync(command);
198
263
  return stdout;
@@ -217,13 +282,74 @@ export async function waitForResourceByLabel({
217
282
  return stdout;
218
283
  }
219
284
 
285
+ export async function waitForResourceByName({
286
+ resource,
287
+ name,
288
+ namespace,
289
+ condition = 'Ready',
290
+ timeout = '10m',
291
+ }: {
292
+ resource: string;
293
+ name: string;
294
+ namespace: string;
295
+ condition?: string;
296
+ timeout?: string;
297
+ }) {
298
+ const command = `kubectl wait ${resource}/${name} --for=condition=${condition} -n ${namespace} --timeout=${timeout}`;
299
+ logger.info(`command: ${command}`);
300
+ const { stdout } = await execAsync(command);
301
+ return stdout;
302
+ }
303
+
304
+ export async function waitForResourcesByName({
305
+ resource,
306
+ names,
307
+ namespace,
308
+ condition = 'Ready',
309
+ timeout = '10m',
310
+ }: {
311
+ resource: string;
312
+ names: string[];
313
+ namespace: string;
314
+ condition?: string;
315
+ timeout?: string;
316
+ }) {
317
+ if (!names.length) {
318
+ throw new Error(`No ${resource} names provided to waitForResourcesByName`);
319
+ }
320
+
321
+ // Wait all in parallel; if any fails, surface which one.
322
+ await Promise.all(
323
+ names.map(async name => {
324
+ try {
325
+ await waitForResourceByName({ resource, name, namespace, condition, timeout });
326
+ } catch (err) {
327
+ throw new Error(
328
+ `Failed waiting for ${resource}/${name} condition=${condition} timeout=${timeout} namespace=${namespace}: ${String(
329
+ err,
330
+ )}`,
331
+ );
332
+ }
333
+ }),
334
+ );
335
+ }
336
+
220
337
  export function getChartDir(spartanDir: string, chartName: string) {
221
338
  return path.join(spartanDir.trim(), chartName);
222
339
  }
223
340
 
224
- function valuesToArgs(values: Record<string, string | number>) {
341
+ function shellQuote(value: string) {
342
+ // Single-quote safe shell escaping: ' -> '\''
343
+ return `'${value.replace(/'/g, "'\\''")}'`;
344
+ }
345
+
346
+ function valuesToArgs(values: Record<string, string | number | boolean>) {
225
347
  return Object.entries(values)
226
- .map(([key, value]) => `--set ${key}=${value}`)
348
+ .map(([key, value]) =>
349
+ typeof value === 'number' || typeof value === 'boolean'
350
+ ? `--set ${key}=${value}`
351
+ : `--set-string ${key}=${shellQuote(String(value))}`,
352
+ )
227
353
  .join(' ');
228
354
  }
229
355
 
@@ -241,7 +367,7 @@ function createHelmCommand({
241
367
  namespace: string;
242
368
  valuesFile: string | undefined;
243
369
  timeout: string;
244
- values: Record<string, string | number>;
370
+ values: Record<string, string | number | boolean>;
245
371
  reuseValues?: boolean;
246
372
  }) {
247
373
  const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
@@ -258,6 +384,61 @@ async function execHelmCommand(args: Parameters<typeof createHelmCommand>[0]) {
258
384
  return stdout;
259
385
  }
260
386
 
387
+ async function getHelmReleaseStatus(instanceName: string, namespace: string): Promise<string | undefined> {
388
+ try {
389
+ const { stdout } = await execAsync(
390
+ `helm list --namespace ${namespace} --all --filter '^${instanceName}$' --output json | cat`,
391
+ );
392
+ const parsed = JSON.parse(stdout) as Array<{ name?: string; status?: string }>;
393
+ const row = parsed.find(r => r.name === instanceName);
394
+ return row?.status;
395
+ } catch {
396
+ return undefined;
397
+ }
398
+ }
399
+
400
+ async function forceDeleteHelmReleaseRecord(instanceName: string, namespace: string, logger: Logger) {
401
+ const labelSelector = `owner=helm,name=${instanceName}`;
402
+ const cmd = `kubectl delete secret -n ${namespace} -l ${labelSelector} --ignore-not-found=true`;
403
+ logger.warn(`Force deleting Helm release record: ${cmd}`);
404
+ await execAsync(cmd).catch(() => undefined);
405
+ }
406
+
407
+ async function hasDeployedHelmRelease(instanceName: string, namespace: string): Promise<boolean> {
408
+ try {
409
+ const status = await getHelmReleaseStatus(instanceName, namespace);
410
+ return status?.toLowerCase() === 'deployed';
411
+ } catch {
412
+ return false;
413
+ }
414
+ }
415
+
416
+ export async function uninstallChaosMesh(instanceName: string, namespace: string, logger: Logger) {
417
+ // uninstall the helm chart if it exists
418
+ logger.info(`Uninstalling helm chart ${instanceName}`);
419
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
420
+ // and delete the chaos-mesh resources created by this release
421
+ const deleteByLabel = async (resource: string) => {
422
+ const args = {
423
+ resource,
424
+ namespace: namespace,
425
+ label: `app.kubernetes.io/instance=${instanceName}`,
426
+ } as const;
427
+ logger.info(`Deleting ${resource} resources for release ${instanceName}`);
428
+ await deleteResourceByLabel(args).catch(e => {
429
+ logger.error(`Error deleting ${resource}: ${e}`);
430
+ logger.info(`Force deleting ${resource}`);
431
+ return deleteResourceByLabel({ ...args, force: true });
432
+ });
433
+ };
434
+
435
+ await deleteByLabel('podchaos');
436
+ await deleteByLabel('networkchaos');
437
+ await deleteByLabel('podnetworkchaos');
438
+ await deleteByLabel('workflows');
439
+ await deleteByLabel('workflownodes');
440
+ }
441
+
261
442
  /**
262
443
  * Installs a Helm chart with the given parameters.
263
444
  * @param instanceName - The name of the Helm chart instance.
@@ -280,8 +461,7 @@ export async function installChaosMeshChart({
280
461
  targetNamespace,
281
462
  valuesFile,
282
463
  helmChartDir,
283
- chaosMeshNamespace = 'chaos-mesh',
284
- timeout = '5m',
464
+ timeout = '10m',
285
465
  clean = true,
286
466
  values = {},
287
467
  logger,
@@ -297,27 +477,13 @@ export async function installChaosMeshChart({
297
477
  logger: Logger;
298
478
  }) {
299
479
  if (clean) {
300
- // uninstall the helm chart if it exists
301
- logger.info(`Uninstalling helm chart ${instanceName}`);
302
- await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
303
- // and delete the podchaos resource
304
- const deleteArgs = {
305
- resource: 'podchaos',
306
- namespace: chaosMeshNamespace,
307
- name: `${targetNamespace}-${instanceName}`,
308
- };
309
- logger.info(`Deleting podchaos resource`);
310
- await deleteResourceByName(deleteArgs).catch(e => {
311
- logger.error(`Error deleting podchaos resource: ${e}`);
312
- logger.info(`Force deleting podchaos resource`);
313
- return deleteResourceByName({ ...deleteArgs, force: true });
314
- });
480
+ await uninstallChaosMesh(instanceName, targetNamespace, logger);
315
481
  }
316
482
 
317
483
  return execHelmCommand({
318
484
  instanceName,
319
485
  helmChartDir,
320
- namespace: chaosMeshNamespace,
486
+ namespace: targetNamespace,
321
487
  valuesFile,
322
488
  timeout,
323
489
  values: { ...values, 'global.targetNamespace': targetNamespace },
@@ -347,22 +513,49 @@ export function applyProverFailure({
347
513
  });
348
514
  }
349
515
 
516
+ export function applyValidatorFailure({
517
+ namespace,
518
+ spartanDir,
519
+ logger,
520
+ values,
521
+ instanceName,
522
+ }: {
523
+ namespace: string;
524
+ spartanDir: string;
525
+ logger: Logger;
526
+ values?: Record<string, string | number>;
527
+ instanceName?: string;
528
+ }) {
529
+ return installChaosMeshChart({
530
+ instanceName: instanceName ?? 'validator-failure',
531
+ targetNamespace: namespace,
532
+ valuesFile: 'validator-failure.yaml',
533
+ helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
534
+ values,
535
+ logger,
536
+ });
537
+ }
538
+
350
539
  export function applyProverKill({
351
540
  namespace,
352
541
  spartanDir,
353
542
  logger,
543
+ values,
354
544
  }: {
355
545
  namespace: string;
356
546
  spartanDir: string;
357
547
  logger: Logger;
548
+ values?: Record<string, string | number>;
358
549
  }) {
359
550
  return installChaosMeshChart({
360
551
  instanceName: 'prover-kill',
361
552
  targetNamespace: namespace,
362
553
  valuesFile: 'prover-kill.yaml',
363
554
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
555
+ chaosMeshNamespace: namespace,
364
556
  clean: true,
365
557
  logger,
558
+ values,
366
559
  });
367
560
  }
368
561
 
@@ -370,10 +563,12 @@ export function applyProverBrokerKill({
370
563
  namespace,
371
564
  spartanDir,
372
565
  logger,
566
+ values,
373
567
  }: {
374
568
  namespace: string;
375
569
  spartanDir: string;
376
570
  logger: Logger;
571
+ values?: Record<string, string | number>;
377
572
  }) {
378
573
  return installChaosMeshChart({
379
574
  instanceName: 'prover-broker-kill',
@@ -382,63 +577,79 @@ export function applyProverBrokerKill({
382
577
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
383
578
  clean: true,
384
579
  logger,
580
+ values,
385
581
  });
386
582
  }
387
583
 
388
584
  export function applyBootNodeFailure({
585
+ instanceName = 'boot-node-failure',
389
586
  namespace,
390
587
  spartanDir,
391
588
  durationSeconds,
392
589
  logger,
590
+ values,
393
591
  }: {
592
+ instanceName?: string;
394
593
  namespace: string;
395
594
  spartanDir: string;
396
595
  durationSeconds: number;
397
596
  logger: Logger;
597
+ values?: Record<string, string | number>;
398
598
  }) {
399
599
  return installChaosMeshChart({
400
- instanceName: 'boot-node-failure',
600
+ instanceName,
401
601
  targetNamespace: namespace,
402
602
  valuesFile: 'boot-node-failure.yaml',
403
603
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
404
604
  values: {
405
605
  'bootNodeFailure.duration': `${durationSeconds}s`,
606
+ ...(values ?? {}),
406
607
  },
407
608
  logger,
408
609
  });
409
610
  }
410
611
 
411
612
  export function applyValidatorKill({
613
+ instanceName = 'validator-kill',
412
614
  namespace,
413
615
  spartanDir,
414
616
  logger,
617
+ values,
618
+ clean = true,
415
619
  }: {
620
+ instanceName?: string;
416
621
  namespace: string;
417
622
  spartanDir: string;
418
623
  logger: Logger;
624
+ values?: Record<string, string | number>;
625
+ clean?: boolean;
419
626
  }) {
420
627
  return installChaosMeshChart({
421
- instanceName: 'validator-kill',
628
+ instanceName: instanceName ?? 'validator-kill',
422
629
  targetNamespace: namespace,
423
630
  valuesFile: 'validator-kill.yaml',
424
631
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
632
+ clean,
425
633
  logger,
634
+ values,
426
635
  });
427
636
  }
428
637
 
429
638
  export function applyNetworkShaping({
639
+ instanceName = 'network-shaping',
430
640
  valuesFile,
431
641
  namespace,
432
642
  spartanDir,
433
643
  logger,
434
644
  }: {
645
+ instanceName?: string;
435
646
  valuesFile: string;
436
647
  namespace: string;
437
648
  spartanDir: string;
438
649
  logger: Logger;
439
650
  }) {
440
651
  return installChaosMeshChart({
441
- instanceName: 'network-shaping',
652
+ instanceName,
442
653
  targetNamespace: namespace,
443
654
  valuesFile,
444
655
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
@@ -446,35 +657,283 @@ export function applyNetworkShaping({
446
657
  });
447
658
  }
448
659
 
449
- export async function awaitL2BlockNumber(
660
+ export async function awaitCheckpointNumber(
450
661
  rollupCheatCodes: RollupCheatCodes,
451
- blockNumber: bigint,
662
+ checkpointNumber: CheckpointNumber,
452
663
  timeoutSeconds: number,
453
664
  logger: Logger,
454
665
  ) {
455
- logger.info(`Waiting for L2 Block ${blockNumber}`);
666
+ logger.info(`Waiting for checkpoint ${checkpointNumber}`);
456
667
  let tips = await rollupCheatCodes.getTips();
457
668
  const endTime = Date.now() + timeoutSeconds * 1000;
458
- while (tips.pending < blockNumber && Date.now() < endTime) {
459
- logger.info(`At L2 Block ${tips.pending}`);
669
+ while (tips.pending < checkpointNumber && Date.now() < endTime) {
670
+ logger.info(`At checkpoint ${tips.pending}`);
460
671
  await sleep(1000);
461
672
  tips = await rollupCheatCodes.getTips();
462
673
  }
463
- if (tips.pending < blockNumber) {
464
- throw new Error(`Timeout waiting for L2 Block ${blockNumber}, only reached ${tips.pending}`);
674
+ if (tips.pending < checkpointNumber) {
675
+ throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
465
676
  } else {
466
- logger.info(`Reached L2 Block ${tips.pending}`);
677
+ logger.info(`Reached checkpoint ${tips.pending}`);
467
678
  }
468
679
  }
469
680
 
470
681
  export async function restartBot(namespace: string, logger: Logger) {
471
682
  logger.info(`Restarting bot`);
472
- await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app=bot' });
683
+ await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' });
473
684
  await sleep(10 * 1000);
474
- await waitForResourceByLabel({ resource: 'pods', namespace, label: 'app=bot' });
685
+ // Some bot images may take time to report Ready due to heavy boot-time proving.
686
+ // Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
687
+ await waitForResourceByLabel({
688
+ resource: 'pods',
689
+ namespace,
690
+ label: 'app.kubernetes.io/name=bot',
691
+ condition: 'PodReadyToStartContainers',
692
+ });
475
693
  logger.info(`Bot restarted`);
476
694
  }
477
695
 
696
+ /**
697
+ * Installs or upgrades the transfer bot Helm release for the given namespace.
698
+ * Intended for test setup to enable L2 traffic generation only when needed.
699
+ */
700
+ export async function installTransferBot({
701
+ namespace,
702
+ spartanDir,
703
+ logger,
704
+ replicas = 1,
705
+ txIntervalSeconds = 10,
706
+ followChain = 'PENDING',
707
+ mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk',
708
+ mnemonicStartIndex,
709
+ botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01',
710
+ nodeUrl,
711
+ timeout = '15m',
712
+ reuseValues = true,
713
+ aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12),
714
+ }: {
715
+ namespace: string;
716
+ spartanDir: string;
717
+ logger: Logger;
718
+ replicas?: number;
719
+ txIntervalSeconds?: number;
720
+ followChain?: string;
721
+ mnemonic?: string;
722
+ mnemonicStartIndex?: number | string;
723
+ botPrivateKey?: string;
724
+ nodeUrl?: string;
725
+ timeout?: string;
726
+ reuseValues?: boolean;
727
+ aztecSlotDuration?: number;
728
+ }) {
729
+ const instanceName = `${namespace}-bot-transfers`;
730
+ const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
731
+ const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
732
+
733
+ logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
734
+
735
+ const values: Record<string, string | number | boolean> = {
736
+ 'bot.replicaCount': replicas,
737
+ 'bot.txIntervalSeconds': txIntervalSeconds,
738
+ 'bot.followChain': followChain,
739
+ 'bot.botPrivateKey': botPrivateKey,
740
+ 'bot.nodeUrl': resolvedNodeUrl,
741
+ 'bot.mnemonic': mnemonic,
742
+ 'bot.feePaymentMethod': 'fee_juice',
743
+ 'aztec.slotDuration': aztecSlotDuration,
744
+ // Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
745
+ // Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
746
+ 'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
747
+ // Provide L1 execution RPC for bridging fee juice
748
+ 'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
749
+ // Provide L1 mnemonic for bridging (falls back to labs mnemonic)
750
+ 'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
751
+
752
+ // The bot does not need Kubernetes API access. Disable RBAC + ServiceAccount creation so the chart
753
+ // can be installed by users without cluster-scoped RBAC permissions.
754
+ 'bot.rbac.create': false,
755
+ 'bot.serviceAccount.create': false,
756
+ 'bot.serviceAccount.name': 'default',
757
+ };
758
+ // Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
759
+ if (mnemonicStartIndex === undefined) {
760
+ values['bot.mnemonicStartIndex'] = 0;
761
+ }
762
+ // Also pass a funded private key directly if available
763
+ if (process.env.FUNDING_PRIVATE_KEY) {
764
+ values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
765
+ }
766
+ // Align bot image with the running network image: prefer env var, else detect from a validator pod
767
+ let repositoryFromEnv: string | undefined;
768
+ let tagFromEnv: string | undefined;
769
+ const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
770
+ if (aztecDockerImage && aztecDockerImage.includes(':')) {
771
+ const lastColon = aztecDockerImage.lastIndexOf(':');
772
+ repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
773
+ tagFromEnv = aztecDockerImage.slice(lastColon + 1);
774
+ }
775
+
776
+ let repository = repositoryFromEnv;
777
+ let tag = tagFromEnv;
778
+ if (!repository || !tag) {
779
+ try {
780
+ const { stdout } = await execAsync(
781
+ `kubectl get pods -l app.kubernetes.io/name=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
782
+ );
783
+ const image = stdout.trim().replace(/^'|'$/g, '');
784
+ if (image && image.includes(':')) {
785
+ const lastColon = image.lastIndexOf(':');
786
+ repository = image.slice(0, lastColon);
787
+ tag = image.slice(lastColon + 1);
788
+ }
789
+ } catch (err) {
790
+ logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
791
+ }
792
+ }
793
+ if (repository && tag) {
794
+ values['global.aztecImage.repository'] = repository;
795
+ values['global.aztecImage.tag'] = tag;
796
+ }
797
+ if (mnemonicStartIndex !== undefined) {
798
+ values['bot.mnemonicStartIndex'] =
799
+ typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
800
+ }
801
+
802
+ // If a previous install attempt left the release in a non-deployed state (e.g. FAILED),
803
+ // `helm upgrade --install` can error with "has no deployed releases".
804
+ // In that case, clear the release record and do a clean install.
805
+ const existingStatus = await getHelmReleaseStatus(instanceName, namespace);
806
+ if (existingStatus && existingStatus.toLowerCase() !== 'deployed') {
807
+ logger.warn(`Transfer bot release ${instanceName} is in status '${existingStatus}'. Reinstalling cleanly.`);
808
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`).catch(
809
+ () => undefined,
810
+ );
811
+ // If helm left the release in `uninstalling`, force-delete the record so we can reinstall.
812
+ const afterUninstallStatus = await getHelmReleaseStatus(instanceName, namespace);
813
+ if (afterUninstallStatus?.toLowerCase() === 'uninstalling') {
814
+ await forceDeleteHelmReleaseRecord(instanceName, namespace, logger);
815
+ }
816
+ }
817
+
818
+ // `--reuse-values` fails if the release has never successfully deployed (e.g. first install, or a previous failed install).
819
+ // Only reuse values when we have a deployed release to reuse from.
820
+ const effectiveReuseValues = reuseValues && (await hasDeployedHelmRelease(instanceName, namespace));
821
+
822
+ await execHelmCommand({
823
+ instanceName,
824
+ helmChartDir,
825
+ namespace,
826
+ valuesFile: undefined,
827
+ timeout,
828
+ values: values as unknown as Record<string, string | number | boolean>,
829
+ reuseValues: effectiveReuseValues,
830
+ });
831
+
832
+ if (replicas > 0) {
833
+ await waitForResourceByLabel({
834
+ resource: 'pods',
835
+ namespace,
836
+ label: 'app.kubernetes.io/name=bot',
837
+ condition: 'PodReadyToStartContainers',
838
+ });
839
+ }
840
+ }
841
+
842
+ /**
843
+ * Uninstalls the transfer bot Helm release from the given namespace.
844
+ * Intended for test teardown to clean up bot resources.
845
+ */
846
+ export async function uninstallTransferBot(namespace: string, logger: Logger) {
847
+ const instanceName = `${namespace}-bot-transfers`;
848
+ logger.info(`Uninstalling transfer bot release ${instanceName}`);
849
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
850
+ // Ensure any leftover pods are removed
851
+ await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' }).catch(
852
+ () => undefined,
853
+ );
854
+ }
855
+
856
+ /**
857
+ * Enables or disables probabilistic transaction dropping on validators and waits for rollout.
858
+ * Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
859
+ */
860
+ export async function setValidatorTxDrop({
861
+ namespace,
862
+ enabled,
863
+ probability,
864
+ logger,
865
+ }: {
866
+ namespace: string;
867
+ enabled: boolean;
868
+ probability: number;
869
+ logger: Logger;
870
+ }) {
871
+ const drop = enabled ? 'true' : 'false';
872
+ const prob = String(probability);
873
+
874
+ const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
875
+ let updated = false;
876
+ for (const selector of selectors) {
877
+ try {
878
+ const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
879
+ const names = list.stdout
880
+ .split('\n')
881
+ .map(s => s.trim())
882
+ .filter(Boolean);
883
+ if (names.length === 0) {
884
+ continue;
885
+ }
886
+ const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
887
+ logger.info(`command: ${cmd}`);
888
+ await execAsync(cmd);
889
+ updated = true;
890
+ } catch (e) {
891
+ logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
892
+ }
893
+ }
894
+
895
+ if (!updated) {
896
+ logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
897
+ return;
898
+ }
899
+
900
+ // Restart validator pods to ensure env vars take effect and wait for readiness
901
+ await restartValidators(namespace, logger);
902
+ }
903
+
904
+ export async function restartValidators(namespace: string, logger: Logger) {
905
+ const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
906
+ let any = false;
907
+ for (const selector of selectors) {
908
+ try {
909
+ const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
910
+ if (!stdout || stdout.trim().length === 0) {
911
+ continue;
912
+ }
913
+ any = true;
914
+ await deleteResourceByLabel({ resource: 'pods', namespace, label: selector });
915
+ } catch (e) {
916
+ logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
917
+ }
918
+ }
919
+
920
+ if (!any) {
921
+ logger.warn(`No validator pods found to restart in ${namespace}.`);
922
+ return;
923
+ }
924
+
925
+ // Wait for either label to be Ready
926
+ for (const selector of selectors) {
927
+ try {
928
+ await waitForResourceByLabel({ resource: 'pods', namespace, label: selector });
929
+ return;
930
+ } catch {
931
+ // try next
932
+ }
933
+ }
934
+ logger.warn(`Validator pods did not report Ready; continuing.`);
935
+ }
936
+
478
937
  export async function enableValidatorDynamicBootNode(
479
938
  instanceName: string,
480
939
  namespace: string,
@@ -497,64 +956,133 @@ export async function enableValidatorDynamicBootNode(
497
956
  logger.info(`Validator dynamic boot node enabled`);
498
957
  }
499
958
 
500
- export async function runAlertCheck(config: EnvConfig, alerts: AlertConfig[], logger: Logger) {
501
- if (isK8sConfig(config)) {
502
- const { process, port } = await startPortForward({
503
- resource: `svc/metrics-grafana`,
504
- namespace: 'metrics',
505
- containerPort: config.CONTAINER_METRICS_PORT,
506
- });
507
- const alertChecker = new AlertChecker(logger, {
508
- grafanaEndpoint: `http://localhost:${port}${config.METRICS_API_PATH}`,
509
- grafanaCredentials: `admin:${config.GRAFANA_PASSWORD}`,
510
- });
511
- await alertChecker.runAlertCheck(alerts);
512
- process.kill();
513
- } else {
514
- logger.info('Not running alert check in non-k8s environment');
959
+ export async function getSequencers(namespace: string) {
960
+ const selectors = [
961
+ 'app.kubernetes.io/name=validator',
962
+ 'app.kubernetes.io/component=validator',
963
+ 'app.kubernetes.io/component=sequencer-node',
964
+ 'app=validator',
965
+ ];
966
+ for (const selector of selectors) {
967
+ try {
968
+ const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
969
+ const { stdout } = await execAsync(command);
970
+ const sequencers = stdout
971
+ .split(' ')
972
+ .map(s => s.trim())
973
+ .filter(Boolean);
974
+ if (sequencers.length > 0) {
975
+ logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
976
+ return sequencers;
977
+ }
978
+ } catch {
979
+ // try next selector
980
+ }
515
981
  }
982
+
983
+ // Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
984
+ throw new Error(
985
+ `No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`,
986
+ );
516
987
  }
517
988
 
518
- export async function updateSequencerConfig(url: string, config: Partial<SequencerConfig>) {
519
- const node = createAztecNodeClient(url);
520
- await node.setConfig(config);
989
+ export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
990
+ return withSequencersAdmin(env, async client => {
991
+ await client.setConfig(config);
992
+ return client.getConfig();
993
+ });
521
994
  }
522
995
 
523
- export async function getSequencers(namespace: string) {
524
- const command = `kubectl get pods -l app=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
525
- const { stdout } = await execAsync(command);
526
- return stdout.split(' ');
996
+ export function getSequencersConfig(env: TestConfig) {
997
+ return withSequencersAdmin(env, client => client.getConfig());
527
998
  }
528
999
 
529
- export async function updateK8sSequencersConfig(args: {
530
- containerPort: number;
531
- namespace: string;
532
- config: Partial<SequencerConfig>;
533
- }) {
534
- const { containerPort, namespace, config } = args;
1000
+ export async function withSequencersAdmin<T>(env: TestConfig, fn: (node: AztecNodeAdmin) => Promise<T>): Promise<T[]> {
1001
+ const adminContainerPort = 8880;
1002
+ const namespace = env.NAMESPACE;
535
1003
  const sequencers = await getSequencers(namespace);
1004
+ const results = [];
1005
+
536
1006
  for (const sequencer of sequencers) {
537
1007
  const { process, port } = await startPortForward({
538
1008
  resource: `pod/${sequencer}`,
539
1009
  namespace,
540
- containerPort,
1010
+ containerPort: adminContainerPort,
541
1011
  });
542
1012
 
543
1013
  const url = `http://localhost:${port}`;
544
- await updateSequencerConfig(url, config);
1014
+ await retry(
1015
+ () => fetch(`${url}/status`).then(res => res.status === 200),
1016
+ 'forward node admin port',
1017
+ makeBackoff([1, 1, 2, 6]),
1018
+ logger,
1019
+ true,
1020
+ );
1021
+ const client = createAztecNodeAdminClient(url);
1022
+ results.push(await fn(client));
545
1023
  process.kill();
546
1024
  }
1025
+
1026
+ return results;
547
1027
  }
548
1028
 
549
- export async function updateSequencersConfig(env: EnvConfig, config: Partial<SequencerConfig>) {
550
- if (isK8sConfig(env)) {
551
- await updateK8sSequencersConfig({
552
- containerPort: env.CONTAINER_NODE_PORT,
553
- namespace: env.NAMESPACE,
554
- config,
1029
+ /**
1030
+ * Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
1031
+ * it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
1032
+ */
1033
+ export async function getPublicViemClient(
1034
+ env: TestConfig,
1035
+ /** If set, will push the new process into it */
1036
+ processes?: ChildProcess[],
1037
+ ): Promise<{ url: string; client: ViemPublicClient; process?: ChildProcess }> {
1038
+ const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
1039
+ if (CREATE_ETH_DEVNET) {
1040
+ logger.info(`Creating port forward to eth execution node`);
1041
+ const { process, port } = await startPortForward({
1042
+ resource: `svc/${NAMESPACE}-eth-execution`,
1043
+ namespace: NAMESPACE,
1044
+ containerPort: 8545,
555
1045
  });
1046
+ const url = `http://127.0.0.1:${port}`;
1047
+ const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url, { batch: false })]) });
1048
+ if (processes) {
1049
+ processes.push(process);
1050
+ }
1051
+ return { url, client, process };
556
1052
  } else {
557
- await updateSequencerConfig(env.NODE_URL, config);
1053
+ logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
1054
+ if (!L1_RPC_URLS_JSON) {
1055
+ throw new Error(`L1_RPC_URLS_JSON is not defined`);
1056
+ }
1057
+ const client: ViemPublicClient = createPublicClient({
1058
+ transport: fallback([http(L1_RPC_URLS_JSON, { batch: false })]),
1059
+ });
1060
+ return { url: L1_RPC_URLS_JSON, client };
1061
+ }
1062
+ }
1063
+
1064
+ /** Queries an Aztec node for the L1 deployment addresses */
1065
+ export async function getL1DeploymentAddresses(env: TestConfig): Promise<L1ContractAddresses> {
1066
+ let forwardProcess: ChildProcess | undefined;
1067
+ try {
1068
+ const [sequencer] = await getSequencers(env.NAMESPACE);
1069
+ const { process, port } = await startPortForward({
1070
+ resource: `pod/${sequencer}`,
1071
+ namespace: env.NAMESPACE,
1072
+ containerPort: 8080,
1073
+ });
1074
+
1075
+ forwardProcess = process;
1076
+ const url = `http://127.0.0.1:${port}`;
1077
+ const node = createAztecNodeClient(url);
1078
+ return await retry(
1079
+ () => node.getNodeInfo().then(i => i.l1ContractAddresses),
1080
+ 'get node info',
1081
+ makeBackoff([1, 3, 6]),
1082
+ logger,
1083
+ );
1084
+ } finally {
1085
+ forwardProcess?.kill();
558
1086
  }
559
1087
  }
560
1088
 
@@ -580,3 +1108,51 @@ export async function rollAztecPods(namespace: string) {
580
1108
  await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=validator' });
581
1109
  await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=pxe' });
582
1110
  }
1111
+
1112
+ /**
1113
+ * Returns the absolute path to the git repository root
1114
+ */
1115
+ export function getGitProjectRoot(): string {
1116
+ try {
1117
+ const rootDir = execSync('git rev-parse --show-toplevel', {
1118
+ encoding: 'utf-8',
1119
+ stdio: ['ignore', 'pipe', 'ignore'],
1120
+ }).trim();
1121
+
1122
+ return rootDir;
1123
+ } catch (error) {
1124
+ throw new Error(`Failed to determine git project root: ${error}`);
1125
+ }
1126
+ }
1127
+
1128
+ /** Returns a client to the RPC of the given sequencer (defaults to first) */
1129
+ export async function getNodeClient(
1130
+ env: TestConfig,
1131
+ index: number = 0,
1132
+ ): Promise<{ node: ReturnType<typeof createAztecNodeClient>; port: number; process: ChildProcess }> {
1133
+ const namespace = env.NAMESPACE;
1134
+ const containerPort = 8080;
1135
+ const sequencers = await getSequencers(namespace);
1136
+ const sequencer = sequencers[index];
1137
+ if (!sequencer) {
1138
+ throw new Error(`No sequencer found at index ${index} in namespace ${namespace}`);
1139
+ }
1140
+
1141
+ const { process, port } = await startPortForward({
1142
+ resource: `pod/${sequencer}`,
1143
+ namespace,
1144
+ containerPort,
1145
+ });
1146
+
1147
+ const url = `http://localhost:${port}`;
1148
+ await retry(
1149
+ () => fetch(`${url}/status`).then(res => res.status === 200),
1150
+ 'forward port',
1151
+ makeBackoff([1, 1, 2, 6]),
1152
+ logger,
1153
+ true,
1154
+ );
1155
+
1156
+ const client = createAztecNodeClient(url);
1157
+ return { node: client, port, process };
1158
+ }