@aztec/end-to-end 0.0.0-test.1 → 0.0.1-commit.24de95ac

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (166) hide show
  1. package/dest/bench/client_flows/benchmark.d.ts +61 -0
  2. package/dest/bench/client_flows/benchmark.d.ts.map +1 -0
  3. package/dest/bench/client_flows/benchmark.js +261 -0
  4. package/dest/bench/client_flows/client_flows_benchmark.d.ts +73 -0
  5. package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -0
  6. package/dest/bench/client_flows/client_flows_benchmark.js +311 -0
  7. package/dest/bench/client_flows/config.d.ts +14 -0
  8. package/dest/bench/client_flows/config.d.ts.map +1 -0
  9. package/dest/bench/client_flows/config.js +106 -0
  10. package/dest/bench/client_flows/data_extractor.d.ts +2 -0
  11. package/dest/bench/client_flows/data_extractor.d.ts.map +1 -0
  12. package/dest/bench/client_flows/data_extractor.js +99 -0
  13. package/dest/bench/utils.d.ts +10 -36
  14. package/dest/bench/utils.d.ts.map +1 -1
  15. package/dest/bench/utils.js +26 -66
  16. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +20 -12
  17. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
  18. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +85 -57
  19. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +18 -24
  20. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
  21. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +48 -69
  22. package/dest/e2e_deploy_contract/deploy_test.d.ts +14 -6
  23. package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
  24. package/dest/e2e_deploy_contract/deploy_test.js +13 -19
  25. package/dest/e2e_epochs/epochs_test.d.ts +58 -17
  26. package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
  27. package/dest/e2e_epochs/epochs_test.js +224 -43
  28. package/dest/e2e_fees/bridging_race.notest.d.ts +2 -0
  29. package/dest/e2e_fees/bridging_race.notest.d.ts.map +1 -0
  30. package/dest/e2e_fees/bridging_race.notest.js +63 -0
  31. package/dest/e2e_fees/fees_test.d.ts +20 -9
  32. package/dest/e2e_fees/fees_test.d.ts.map +1 -1
  33. package/dest/e2e_fees/fees_test.js +98 -107
  34. package/dest/e2e_l1_publisher/write_json.d.ts +10 -0
  35. package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -0
  36. package/dest/e2e_l1_publisher/write_json.js +57 -0
  37. package/dest/e2e_multi_validator/utils.d.ts +12 -0
  38. package/dest/e2e_multi_validator/utils.d.ts.map +1 -0
  39. package/dest/e2e_multi_validator/utils.js +214 -0
  40. package/dest/e2e_nested_contract/nested_contract_test.d.ts +9 -6
  41. package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
  42. package/dest/e2e_nested_contract/nested_contract_test.js +22 -19
  43. package/dest/e2e_p2p/inactivity_slash_test.d.ts +31 -0
  44. package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -0
  45. package/dest/e2e_p2p/inactivity_slash_test.js +135 -0
  46. package/dest/e2e_p2p/p2p_network.d.ts +69 -22
  47. package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
  48. package/dest/e2e_p2p/p2p_network.js +180 -129
  49. package/dest/e2e_p2p/shared.d.ts +41 -5
  50. package/dest/e2e_p2p/shared.d.ts.map +1 -1
  51. package/dest/e2e_p2p/shared.js +163 -19
  52. package/dest/e2e_token_contract/token_contract_test.d.ts +11 -5
  53. package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
  54. package/dest/e2e_token_contract/token_contract_test.js +50 -26
  55. package/dest/{e2e_prover → fixtures}/e2e_prover_test.d.ts +14 -9
  56. package/dest/fixtures/e2e_prover_test.d.ts.map +1 -0
  57. package/dest/{e2e_prover → fixtures}/e2e_prover_test.js +95 -100
  58. package/dest/fixtures/fixtures.d.ts +5 -6
  59. package/dest/fixtures/fixtures.d.ts.map +1 -1
  60. package/dest/fixtures/fixtures.js +4 -3
  61. package/dest/fixtures/get_acvm_config.d.ts +1 -1
  62. package/dest/fixtures/get_acvm_config.d.ts.map +1 -1
  63. package/dest/fixtures/get_acvm_config.js +2 -14
  64. package/dest/fixtures/get_bb_config.d.ts +1 -1
  65. package/dest/fixtures/get_bb_config.d.ts.map +1 -1
  66. package/dest/fixtures/get_bb_config.js +10 -17
  67. package/dest/fixtures/l1_to_l2_messaging.d.ts +8 -5
  68. package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
  69. package/dest/fixtures/l1_to_l2_messaging.js +44 -18
  70. package/dest/fixtures/setup_l1_contracts.d.ts +3 -3
  71. package/dest/fixtures/setup_l1_contracts.d.ts.map +1 -1
  72. package/dest/fixtures/setup_l1_contracts.js +4 -4
  73. package/dest/fixtures/setup_p2p_test.d.ts +14 -13
  74. package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
  75. package/dest/fixtures/setup_p2p_test.js +73 -21
  76. package/dest/fixtures/snapshot_manager.d.ts +15 -7
  77. package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
  78. package/dest/fixtures/snapshot_manager.js +147 -121
  79. package/dest/fixtures/token_utils.d.ts +6 -3
  80. package/dest/fixtures/token_utils.d.ts.map +1 -1
  81. package/dest/fixtures/token_utils.js +23 -10
  82. package/dest/fixtures/utils.d.ts +76 -37
  83. package/dest/fixtures/utils.d.ts.map +1 -1
  84. package/dest/fixtures/utils.js +464 -368
  85. package/dest/fixtures/web3signer.d.ts +5 -0
  86. package/dest/fixtures/web3signer.d.ts.map +1 -0
  87. package/dest/fixtures/web3signer.js +53 -0
  88. package/dest/quality_of_service/alert_checker.d.ts +1 -1
  89. package/dest/quality_of_service/alert_checker.d.ts.map +1 -1
  90. package/dest/shared/cross_chain_test_harness.d.ts +41 -25
  91. package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
  92. package/dest/shared/cross_chain_test_harness.js +104 -50
  93. package/dest/shared/gas_portal_test_harness.d.ts +32 -24
  94. package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
  95. package/dest/shared/gas_portal_test_harness.js +50 -29
  96. package/dest/shared/jest_setup.js +1 -1
  97. package/dest/shared/submit-transactions.d.ts +5 -3
  98. package/dest/shared/submit-transactions.d.ts.map +1 -1
  99. package/dest/shared/submit-transactions.js +8 -7
  100. package/dest/shared/uniswap_l1_l2.d.ts +13 -11
  101. package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
  102. package/dest/shared/uniswap_l1_l2.js +138 -108
  103. package/dest/simulators/lending_simulator.d.ts +6 -6
  104. package/dest/simulators/lending_simulator.d.ts.map +1 -1
  105. package/dest/simulators/lending_simulator.js +13 -16
  106. package/dest/simulators/token_simulator.d.ts +5 -2
  107. package/dest/simulators/token_simulator.d.ts.map +1 -1
  108. package/dest/simulators/token_simulator.js +16 -13
  109. package/dest/spartan/setup_test_wallets.d.ts +23 -10
  110. package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
  111. package/dest/spartan/setup_test_wallets.js +167 -58
  112. package/dest/spartan/utils.d.ts +106 -303
  113. package/dest/spartan/utils.d.ts.map +1 -1
  114. package/dest/spartan/utils.js +434 -130
  115. package/package.json +61 -56
  116. package/src/bench/client_flows/benchmark.ts +341 -0
  117. package/src/bench/client_flows/client_flows_benchmark.ts +402 -0
  118. package/src/bench/client_flows/config.ts +61 -0
  119. package/src/bench/client_flows/data_extractor.ts +111 -0
  120. package/src/bench/utils.ts +22 -76
  121. package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +80 -77
  122. package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +63 -105
  123. package/src/e2e_deploy_contract/deploy_test.ts +23 -38
  124. package/src/e2e_epochs/epochs_test.ts +274 -54
  125. package/src/e2e_fees/bridging_race.notest.ts +80 -0
  126. package/src/e2e_fees/fees_test.ts +137 -136
  127. package/src/e2e_l1_publisher/write_json.ts +76 -0
  128. package/src/e2e_multi_validator/utils.ts +258 -0
  129. package/src/e2e_nested_contract/nested_contract_test.ts +27 -18
  130. package/src/e2e_p2p/inactivity_slash_test.ts +178 -0
  131. package/src/e2e_p2p/p2p_network.ts +272 -166
  132. package/src/e2e_p2p/shared.ts +244 -29
  133. package/src/e2e_token_contract/token_contract_test.ts +43 -39
  134. package/src/fixtures/dumps/epoch_proof_result.json +1 -1
  135. package/src/{e2e_prover → fixtures}/e2e_prover_test.ts +101 -145
  136. package/src/fixtures/fixtures.ts +4 -3
  137. package/src/fixtures/get_acvm_config.ts +3 -11
  138. package/src/fixtures/get_bb_config.ts +18 -13
  139. package/src/fixtures/l1_to_l2_messaging.ts +53 -23
  140. package/src/fixtures/setup_l1_contracts.ts +6 -7
  141. package/src/fixtures/setup_p2p_test.ts +112 -38
  142. package/src/fixtures/snapshot_manager.ts +187 -139
  143. package/src/fixtures/token_utils.ts +29 -12
  144. package/src/fixtures/utils.ts +552 -425
  145. package/src/fixtures/web3signer.ts +63 -0
  146. package/src/guides/up_quick_start.sh +6 -14
  147. package/src/quality_of_service/alert_checker.ts +1 -1
  148. package/src/shared/cross_chain_test_harness.ts +108 -79
  149. package/src/shared/gas_portal_test_harness.ts +58 -49
  150. package/src/shared/jest_setup.ts +1 -1
  151. package/src/shared/submit-transactions.ts +12 -8
  152. package/src/shared/uniswap_l1_l2.ts +173 -176
  153. package/src/simulators/lending_simulator.ts +12 -15
  154. package/src/simulators/token_simulator.ts +21 -13
  155. package/src/spartan/DEVELOP.md +121 -0
  156. package/src/spartan/setup_test_wallets.ts +215 -93
  157. package/src/spartan/utils.ts +490 -130
  158. package/dest/e2e_prover/e2e_prover_test.d.ts.map +0 -1
  159. package/dest/sample-dapp/connect.js +0 -12
  160. package/dest/sample-dapp/contracts.js +0 -10
  161. package/dest/sample-dapp/deploy.js +0 -35
  162. package/dest/sample-dapp/index.js +0 -98
  163. package/src/sample-dapp/connect.mjs +0 -16
  164. package/src/sample-dapp/contracts.mjs +0 -14
  165. package/src/sample-dapp/deploy.mjs +0 -40
  166. package/src/sample-dapp/index.mjs +0 -128
@@ -1,89 +1,86 @@
1
- import { createAztecNodeClient, createLogger, sleep } from '@aztec/aztec.js';
2
- import type { RollupCheatCodes } from '@aztec/aztec.js/ethereum';
1
+ import { createLogger } from '@aztec/aztec.js/log';
2
+ import type { RollupCheatCodes } from '@aztec/aztec/testing';
3
+ import type { L1ContractAddresses, ViemPublicClient } from '@aztec/ethereum';
3
4
  import type { Logger } from '@aztec/foundation/log';
4
- import type { SequencerConfig } from '@aztec/sequencer-client';
5
+ import { makeBackoff, retry } from '@aztec/foundation/retry';
6
+ import { schemas } from '@aztec/foundation/schemas';
7
+ import { sleep } from '@aztec/foundation/sleep';
8
+ import {
9
+ type AztecNodeAdmin,
10
+ type AztecNodeAdminConfig,
11
+ createAztecNodeAdminClient,
12
+ createAztecNodeClient,
13
+ } from '@aztec/stdlib/interfaces/client';
5
14
 
6
15
  import { ChildProcess, exec, execSync, spawn } from 'child_process';
7
16
  import path from 'path';
8
17
  import { promisify } from 'util';
18
+ import { createPublicClient, fallback, http } from 'viem';
9
19
  import { z } from 'zod';
10
20
 
11
- import { AlertChecker, type AlertConfig } from '../quality_of_service/alert_checker.js';
12
-
13
21
  const execAsync = promisify(exec);
14
22
 
15
23
  const logger = createLogger('e2e:k8s-utils');
16
24
 
17
- const ethereumHostsSchema = z.string().refine(
18
- str =>
19
- str.split(',').every(url => {
20
- try {
21
- new URL(url.trim());
22
- return true;
23
- } catch {
24
- return false;
25
- }
26
- }),
27
- 'ETHEREUM_HOSTS must be a comma-separated list of valid URLs',
28
- );
29
-
30
- const k8sLocalConfigSchema = z.object({
31
- ETHEREUM_SLOT_DURATION: z.coerce.number().min(1, 'ETHEREUM_SLOT_DURATION env variable must be set'),
32
- AZTEC_SLOT_DURATION: z.coerce.number().min(1, 'AZTEC_SLOT_DURATION env variable must be set'),
33
- AZTEC_EPOCH_DURATION: z.coerce.number().min(1, 'AZTEC_EPOCH_DURATION env variable must be set'),
34
- AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().min(1, 'AZTEC_PROOF_SUBMISSION_WINDOW env variable must be set'),
35
- INSTANCE_NAME: z.string().min(1, 'INSTANCE_NAME env variable must be set'),
36
- NAMESPACE: z.string().min(1, 'NAMESPACE env variable must be set'),
37
- CONTAINER_NODE_PORT: z.coerce.number().default(8080),
38
- CONTAINER_SEQUENCER_PORT: z.coerce.number().default(8080),
39
- CONTAINER_PROVER_NODE_PORT: z.coerce.number().default(8080),
40
- CONTAINER_PXE_PORT: z.coerce.number().default(8080),
41
- CONTAINER_ETHEREUM_PORT: z.coerce.number().default(8545),
42
- CONTAINER_METRICS_PORT: z.coerce.number().default(80),
43
- GRAFANA_PASSWORD: z.string().optional(),
44
- METRICS_API_PATH: z.string().default('/api/datasources/proxy/uid/spartan-metrics-prometheus/api/v1'),
45
- SPARTAN_DIR: z.string().min(1, 'SPARTAN_DIR env variable must be set'),
46
- ETHEREUM_HOSTS: ethereumHostsSchema.optional(),
47
- L1_ACCOUNT_MNEMONIC: z.string().default('test test test test test test test test test test test junk'),
48
- SEPOLIA_RUN: z.string().default('false'),
49
- K8S: z.literal('local'),
25
+ const testConfigSchema = z.object({
26
+ NAMESPACE: z.string().default('scenario'),
27
+ REAL_VERIFIER: schemas.Boolean.optional().default(true),
28
+ CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
29
+ L1_RPC_URLS_JSON: z.string().optional(),
30
+ L1_ACCOUNT_MNEMONIC: z.string().optional(),
31
+ AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
32
+ AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
50
33
  });
51
34
 
52
- const k8sGCloudConfigSchema = k8sLocalConfigSchema.extend({
53
- K8S: z.literal('gcloud'),
54
- CLUSTER_NAME: z.string().min(1, 'CLUSTER_NAME env variable must be set'),
55
- REGION: z.string().min(1, 'REGION env variable must be set'),
56
- });
57
-
58
- const directConfigSchema = z.object({
59
- PXE_URL: z.string().url('PXE_URL must be a valid URL'),
60
- NODE_URL: z.string().url('NODE_URL must be a valid URL'),
61
- ETHEREUM_HOSTS: ethereumHostsSchema,
62
- K8S: z.literal('false'),
63
- });
35
+ export type TestConfig = z.infer<typeof testConfigSchema>;
64
36
 
65
- const envSchema = z.discriminatedUnion('K8S', [k8sLocalConfigSchema, k8sGCloudConfigSchema, directConfigSchema]);
37
+ export function setupEnvironment(env: unknown): TestConfig {
38
+ const config = testConfigSchema.parse(env);
39
+ logger.warn(`Loaded env config`, config);
40
+ return config;
41
+ }
66
42
 
67
- export type K8sLocalConfig = z.infer<typeof k8sLocalConfigSchema>;
68
- export type K8sGCloudConfig = z.infer<typeof k8sGCloudConfigSchema>;
69
- export type DirectConfig = z.infer<typeof directConfigSchema>;
70
- export type EnvConfig = z.infer<typeof envSchema>;
43
+ /**
44
+ * @param path - The path to the script, relative to the project root
45
+ * @param args - The arguments to pass to the script
46
+ * @param logger - The logger to use
47
+ * @returns The exit code of the script
48
+ */
49
+ function runScript(path: string, args: string[], logger: Logger, env?: Record<string, string>) {
50
+ const childProcess = spawn(path, args, {
51
+ stdio: ['ignore', 'pipe', 'pipe'],
52
+ env: env ? { ...process.env, ...env } : process.env,
53
+ });
54
+ return new Promise<number>((resolve, reject) => {
55
+ childProcess.on('close', (code: number | null) => resolve(code ?? 0));
56
+ childProcess.on('error', reject);
57
+ childProcess.stdout?.on('data', (data: Buffer) => {
58
+ logger.info(data.toString());
59
+ });
60
+ childProcess.stderr?.on('data', (data: Buffer) => {
61
+ logger.error(data.toString());
62
+ });
63
+ });
64
+ }
71
65
 
72
- export function isK8sConfig(config: EnvConfig): config is K8sLocalConfig | K8sGCloudConfig {
73
- return config.K8S === 'local' || config.K8S === 'gcloud';
66
+ export function getAztecBin() {
67
+ return path.join(getGitProjectRoot(), 'yarn-project/aztec/dest/bin/index.js');
74
68
  }
75
69
 
76
- export function isGCloudConfig(config: EnvConfig): config is K8sGCloudConfig {
77
- return config.K8S === 'gcloud';
70
+ /**
71
+ * Runs the Aztec binary
72
+ * @param args - The arguments to pass to the Aztec binary
73
+ * @param logger - The logger to use
74
+ * @param env - Optional environment variables to set for the process
75
+ * @returns The exit code of the Aztec binary
76
+ */
77
+ export function runAztecBin(args: string[], logger: Logger, env?: Record<string, string>) {
78
+ return runScript('node', [getAztecBin(), ...args], logger, env);
78
79
  }
79
80
 
80
- export function setupEnvironment(env: unknown): EnvConfig {
81
- const config = envSchema.parse(env);
82
- if (isGCloudConfig(config)) {
83
- const command = `gcloud container clusters get-credentials ${config.CLUSTER_NAME} --region=${config.REGION}`;
84
- execSync(command);
85
- }
86
- return config;
81
+ export function runProjectScript(script: string, args: string[], logger: Logger, env?: Record<string, string>) {
82
+ const scriptPath = script.startsWith('/') ? script : path.join(getGitProjectRoot(), script);
83
+ return runScript(scriptPath, args, logger, env);
87
84
  }
88
85
 
89
86
  export async function startPortForward({
@@ -103,7 +100,7 @@ export async function startPortForward({
103
100
  }> {
104
101
  const hostPortAsString = hostPort ? hostPort.toString() : '';
105
102
 
106
- logger.info(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
103
+ logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
107
104
 
108
105
  const process = spawn(
109
106
  'kubectl',
@@ -121,21 +118,20 @@ export async function startPortForward({
121
118
  const str = data.toString() as string;
122
119
  if (!isResolved && str.includes('Forwarding from')) {
123
120
  isResolved = true;
124
- logger.info(str);
121
+ logger.debug(`Port forward for ${resource}: ${str}`);
125
122
  const port = str.search(/:\d+/);
126
123
  if (port === -1) {
127
124
  throw new Error('Port not found in port forward output');
128
125
  }
129
126
  const portNumber = parseInt(str.slice(port + 1));
130
- logger.info(`Port forward connected: ${portNumber}`);
131
- logger.info(`Port forward connected: ${portNumber}`);
127
+ logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
132
128
  resolve(portNumber);
133
129
  } else {
134
130
  logger.silent(str);
135
131
  }
136
132
  });
137
133
  process.stderr?.on('data', data => {
138
- logger.info(data.toString());
134
+ logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
139
135
  // It's a strange thing:
140
136
  // If we don't pipe stderr, then the port forwarding does not work.
141
137
  // Log to silent because this doesn't actually report errors,
@@ -145,16 +141,16 @@ export async function startPortForward({
145
141
  process.on('close', () => {
146
142
  if (!isResolved) {
147
143
  isResolved = true;
148
- logger.warn('Port forward closed before connection established');
144
+ logger.warn(`Port forward for ${resource} closed before connection established`);
149
145
  resolve(0);
150
146
  }
151
147
  });
152
148
  process.on('error', error => {
153
- logger.error(`Port forward error: ${error}`);
149
+ logger.error(`Port forward for ${resource} error: ${error}`);
154
150
  resolve(0);
155
151
  });
156
152
  process.on('exit', code => {
157
- logger.info(`Port forward exited with code ${code}`);
153
+ logger.verbose(`Port forward for ${resource} exited with code ${code}`);
158
154
  resolve(0);
159
155
  });
160
156
  });
@@ -164,6 +160,22 @@ export async function startPortForward({
164
160
  return { process, port };
165
161
  }
166
162
 
163
+ export function startPortForwardForRPC(namespace: string) {
164
+ return startPortForward({
165
+ resource: `services/${namespace}-rpc-aztec-node`,
166
+ namespace,
167
+ containerPort: 8080,
168
+ });
169
+ }
170
+
171
+ export function startPortForwardForEthereum(namespace: string) {
172
+ return startPortForward({
173
+ resource: `services/${namespace}-eth-execution`,
174
+ namespace,
175
+ containerPort: 8545,
176
+ });
177
+ }
178
+
167
179
  export async function deleteResourceByName({
168
180
  resource,
169
181
  namespace,
@@ -187,12 +199,28 @@ export async function deleteResourceByLabel({
187
199
  resource,
188
200
  namespace,
189
201
  label,
202
+ timeout = '5m',
203
+ force = false,
190
204
  }: {
191
205
  resource: string;
192
206
  namespace: string;
193
207
  label: string;
208
+ timeout?: string;
209
+ force?: boolean;
194
210
  }) {
195
- const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true`;
211
+ // Check if the resource type exists before attempting to delete
212
+ try {
213
+ await execAsync(
214
+ `kubectl api-resources --api-group="" --no-headers -o name | grep -q "^${resource}$" || kubectl api-resources --no-headers -o name | grep -q "^${resource}$"`,
215
+ );
216
+ } catch (error) {
217
+ logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
218
+ return '';
219
+ }
220
+
221
+ const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${
222
+ force ? '--force' : ''
223
+ }`;
196
224
  logger.info(`command: ${command}`);
197
225
  const { stdout } = await execAsync(command);
198
226
  return stdout;
@@ -221,9 +249,18 @@ export function getChartDir(spartanDir: string, chartName: string) {
221
249
  return path.join(spartanDir.trim(), chartName);
222
250
  }
223
251
 
224
- function valuesToArgs(values: Record<string, string | number>) {
252
+ function shellQuote(value: string) {
253
+ // Single-quote safe shell escaping: ' -> '\''
254
+ return `'${value.replace(/'/g, "'\\''")}'`;
255
+ }
256
+
257
+ function valuesToArgs(values: Record<string, string | number | boolean>) {
225
258
  return Object.entries(values)
226
- .map(([key, value]) => `--set ${key}=${value}`)
259
+ .map(([key, value]) =>
260
+ typeof value === 'number' || typeof value === 'boolean'
261
+ ? `--set ${key}=${value}`
262
+ : `--set-string ${key}=${shellQuote(String(value))}`,
263
+ )
227
264
  .join(' ');
228
265
  }
229
266
 
@@ -241,7 +278,7 @@ function createHelmCommand({
241
278
  namespace: string;
242
279
  valuesFile: string | undefined;
243
280
  timeout: string;
244
- values: Record<string, string | number>;
281
+ values: Record<string, string | number | boolean>;
245
282
  reuseValues?: boolean;
246
283
  }) {
247
284
  const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
@@ -281,7 +318,7 @@ export async function installChaosMeshChart({
281
318
  valuesFile,
282
319
  helmChartDir,
283
320
  chaosMeshNamespace = 'chaos-mesh',
284
- timeout = '5m',
321
+ timeout = '10m',
285
322
  clean = true,
286
323
  values = {},
287
324
  logger,
@@ -300,18 +337,23 @@ export async function installChaosMeshChart({
300
337
  // uninstall the helm chart if it exists
301
338
  logger.info(`Uninstalling helm chart ${instanceName}`);
302
339
  await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
303
- // and delete the podchaos resource
304
- const deleteArgs = {
305
- resource: 'podchaos',
306
- namespace: chaosMeshNamespace,
307
- name: `${targetNamespace}-${instanceName}`,
340
+ // and delete the chaos-mesh resources created by this release
341
+ const deleteByLabel = async (resource: string) => {
342
+ const args = {
343
+ resource,
344
+ namespace: chaosMeshNamespace,
345
+ label: `app.kubernetes.io/instance=${instanceName}`,
346
+ } as const;
347
+ logger.info(`Deleting ${resource} resources for release ${instanceName}`);
348
+ await deleteResourceByLabel(args).catch(e => {
349
+ logger.error(`Error deleting ${resource}: ${e}`);
350
+ logger.info(`Force deleting ${resource}`);
351
+ return deleteResourceByLabel({ ...args, force: true });
352
+ });
308
353
  };
309
- logger.info(`Deleting podchaos resource`);
310
- await deleteResourceByName(deleteArgs).catch(e => {
311
- logger.error(`Error deleting podchaos resource: ${e}`);
312
- logger.info(`Force deleting podchaos resource`);
313
- return deleteResourceByName({ ...deleteArgs, force: true });
314
- });
354
+
355
+ await deleteByLabel('podchaos');
356
+ await deleteByLabel('networkchaos');
315
357
  }
316
358
 
317
359
  return execHelmCommand({
@@ -412,10 +454,12 @@ export function applyValidatorKill({
412
454
  namespace,
413
455
  spartanDir,
414
456
  logger,
457
+ values,
415
458
  }: {
416
459
  namespace: string;
417
460
  spartanDir: string;
418
461
  logger: Logger;
462
+ values?: Record<string, string | number>;
419
463
  }) {
420
464
  return installChaosMeshChart({
421
465
  instanceName: 'validator-kill',
@@ -423,6 +467,7 @@ export function applyValidatorKill({
423
467
  valuesFile: 'validator-kill.yaml',
424
468
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
425
469
  logger,
470
+ values,
426
471
  });
427
472
  }
428
473
 
@@ -469,12 +514,234 @@ export async function awaitL2BlockNumber(
469
514
 
470
515
  export async function restartBot(namespace: string, logger: Logger) {
471
516
  logger.info(`Restarting bot`);
472
- await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app=bot' });
517
+ await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' });
473
518
  await sleep(10 * 1000);
474
- await waitForResourceByLabel({ resource: 'pods', namespace, label: 'app=bot' });
519
+ // Some bot images may take time to report Ready due to heavy boot-time proving.
520
+ // Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
521
+ await waitForResourceByLabel({
522
+ resource: 'pods',
523
+ namespace,
524
+ label: 'app.kubernetes.io/name=bot',
525
+ condition: 'PodReadyToStartContainers',
526
+ });
475
527
  logger.info(`Bot restarted`);
476
528
  }
477
529
 
530
+ /**
531
+ * Installs or upgrades the transfer bot Helm release for the given namespace.
532
+ * Intended for test setup to enable L2 traffic generation only when needed.
533
+ */
534
+ export async function installTransferBot({
535
+ namespace,
536
+ spartanDir,
537
+ logger,
538
+ replicas = 1,
539
+ txIntervalSeconds = 10,
540
+ followChain = 'PENDING',
541
+ mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk',
542
+ mnemonicStartIndex,
543
+ botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01',
544
+ nodeUrl,
545
+ timeout = '15m',
546
+ reuseValues = true,
547
+ aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12),
548
+ }: {
549
+ namespace: string;
550
+ spartanDir: string;
551
+ logger: Logger;
552
+ replicas?: number;
553
+ txIntervalSeconds?: number;
554
+ followChain?: string;
555
+ mnemonic?: string;
556
+ mnemonicStartIndex?: number | string;
557
+ botPrivateKey?: string;
558
+ nodeUrl?: string;
559
+ timeout?: string;
560
+ reuseValues?: boolean;
561
+ aztecSlotDuration?: number;
562
+ }) {
563
+ const instanceName = `${namespace}-bot-transfers`;
564
+ const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
565
+ const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
566
+
567
+ logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
568
+
569
+ const values: Record<string, string | number | boolean> = {
570
+ 'bot.replicaCount': replicas,
571
+ 'bot.txIntervalSeconds': txIntervalSeconds,
572
+ 'bot.followChain': followChain,
573
+ 'bot.botPrivateKey': botPrivateKey,
574
+ 'bot.nodeUrl': resolvedNodeUrl,
575
+ 'bot.mnemonic': mnemonic,
576
+ 'bot.feePaymentMethod': 'fee_juice',
577
+ 'aztec.slotDuration': aztecSlotDuration,
578
+ // Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
579
+ // Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
580
+ 'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
581
+ // Provide L1 execution RPC for bridging fee juice
582
+ 'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
583
+ // Provide L1 mnemonic for bridging (falls back to labs mnemonic)
584
+ 'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
585
+ };
586
+ // Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
587
+ if (mnemonicStartIndex === undefined) {
588
+ values['bot.mnemonicStartIndex'] = 0;
589
+ }
590
+ // Also pass a funded private key directly if available
591
+ if (process.env.FUNDING_PRIVATE_KEY) {
592
+ values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
593
+ }
594
+ // Align bot image with the running network image: prefer env var, else detect from a validator pod
595
+ let repositoryFromEnv: string | undefined;
596
+ let tagFromEnv: string | undefined;
597
+ const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
598
+ if (aztecDockerImage && aztecDockerImage.includes(':')) {
599
+ const lastColon = aztecDockerImage.lastIndexOf(':');
600
+ repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
601
+ tagFromEnv = aztecDockerImage.slice(lastColon + 1);
602
+ }
603
+
604
+ let repository = repositoryFromEnv;
605
+ let tag = tagFromEnv;
606
+ if (!repository || !tag) {
607
+ try {
608
+ const { stdout } = await execAsync(
609
+ `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
610
+ );
611
+ const image = stdout.trim().replace(/^'|'$/g, '');
612
+ if (image && image.includes(':')) {
613
+ const lastColon = image.lastIndexOf(':');
614
+ repository = image.slice(0, lastColon);
615
+ tag = image.slice(lastColon + 1);
616
+ }
617
+ } catch (err) {
618
+ logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
619
+ }
620
+ }
621
+ if (repository && tag) {
622
+ values['global.aztecImage.repository'] = repository;
623
+ values['global.aztecImage.tag'] = tag;
624
+ }
625
+ if (mnemonicStartIndex !== undefined) {
626
+ values['bot.mnemonicStartIndex'] =
627
+ typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
628
+ }
629
+
630
+ await execHelmCommand({
631
+ instanceName,
632
+ helmChartDir,
633
+ namespace,
634
+ valuesFile: undefined,
635
+ timeout,
636
+ values: values as unknown as Record<string, string | number | boolean>,
637
+ reuseValues,
638
+ });
639
+
640
+ if (replicas > 0) {
641
+ await waitForResourceByLabel({
642
+ resource: 'pods',
643
+ namespace,
644
+ label: 'app.kubernetes.io/name=bot',
645
+ condition: 'PodReadyToStartContainers',
646
+ });
647
+ }
648
+ }
649
+
650
+ /**
651
+ * Uninstalls the transfer bot Helm release from the given namespace.
652
+ * Intended for test teardown to clean up bot resources.
653
+ */
654
+ export async function uninstallTransferBot(namespace: string, logger: Logger) {
655
+ const instanceName = `${namespace}-bot-transfers`;
656
+ logger.info(`Uninstalling transfer bot release ${instanceName}`);
657
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
658
+ // Ensure any leftover pods are removed
659
+ await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' }).catch(
660
+ () => undefined,
661
+ );
662
+ }
663
+
664
+ /**
665
+ * Enables or disables probabilistic transaction dropping on validators and waits for rollout.
666
+ * Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
667
+ */
668
+ export async function setValidatorTxDrop({
669
+ namespace,
670
+ enabled,
671
+ probability,
672
+ logger,
673
+ }: {
674
+ namespace: string;
675
+ enabled: boolean;
676
+ probability: number;
677
+ logger: Logger;
678
+ }) {
679
+ const drop = enabled ? 'true' : 'false';
680
+ const prob = String(probability);
681
+
682
+ const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
683
+ let updated = false;
684
+ for (const selector of selectors) {
685
+ try {
686
+ const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
687
+ const names = list.stdout
688
+ .split('\n')
689
+ .map(s => s.trim())
690
+ .filter(Boolean);
691
+ if (names.length === 0) {
692
+ continue;
693
+ }
694
+ const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
695
+ logger.info(`command: ${cmd}`);
696
+ await execAsync(cmd);
697
+ updated = true;
698
+ } catch (e) {
699
+ logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
700
+ }
701
+ }
702
+
703
+ if (!updated) {
704
+ logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
705
+ return;
706
+ }
707
+
708
+ // Restart validator pods to ensure env vars take effect and wait for readiness
709
+ await restartValidators(namespace, logger);
710
+ }
711
+
712
+ export async function restartValidators(namespace: string, logger: Logger) {
713
+ const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
714
+ let any = false;
715
+ for (const selector of selectors) {
716
+ try {
717
+ const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
718
+ if (!stdout || stdout.trim().length === 0) {
719
+ continue;
720
+ }
721
+ any = true;
722
+ await deleteResourceByLabel({ resource: 'pods', namespace, label: selector });
723
+ } catch (e) {
724
+ logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
725
+ }
726
+ }
727
+
728
+ if (!any) {
729
+ logger.warn(`No validator pods found to restart in ${namespace}.`);
730
+ return;
731
+ }
732
+
733
+ // Wait for either label to be Ready
734
+ for (const selector of selectors) {
735
+ try {
736
+ await waitForResourceByLabel({ resource: 'pods', namespace, label: selector });
737
+ return;
738
+ } catch {
739
+ // try next
740
+ }
741
+ }
742
+ logger.warn(`Validator pods did not report Ready; continuing.`);
743
+ }
744
+
478
745
  export async function enableValidatorDynamicBootNode(
479
746
  instanceName: string,
480
747
  namespace: string,
@@ -497,64 +764,109 @@ export async function enableValidatorDynamicBootNode(
497
764
  logger.info(`Validator dynamic boot node enabled`);
498
765
  }
499
766
 
500
- export async function runAlertCheck(config: EnvConfig, alerts: AlertConfig[], logger: Logger) {
501
- if (isK8sConfig(config)) {
502
- const { process, port } = await startPortForward({
503
- resource: `svc/metrics-grafana`,
504
- namespace: 'metrics',
505
- containerPort: config.CONTAINER_METRICS_PORT,
506
- });
507
- const alertChecker = new AlertChecker(logger, {
508
- grafanaEndpoint: `http://localhost:${port}${config.METRICS_API_PATH}`,
509
- grafanaCredentials: `admin:${config.GRAFANA_PASSWORD}`,
510
- });
511
- await alertChecker.runAlertCheck(alerts);
512
- process.kill();
513
- } else {
514
- logger.info('Not running alert check in non-k8s environment');
515
- }
767
+ export async function getSequencers(namespace: string) {
768
+ const command = `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
769
+ const { stdout } = await execAsync(command);
770
+ const sequencers = stdout.split(' ');
771
+ logger.verbose(`Found sequencer pods ${sequencers.join(', ')}`);
772
+ return sequencers;
516
773
  }
517
774
 
518
- export async function updateSequencerConfig(url: string, config: Partial<SequencerConfig>) {
519
- const node = createAztecNodeClient(url);
520
- await node.setConfig(config);
775
+ export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
776
+ return withSequencersAdmin(env, async client => {
777
+ await client.setConfig(config);
778
+ return client.getConfig();
779
+ });
521
780
  }
522
781
 
523
- export async function getSequencers(namespace: string) {
524
- const command = `kubectl get pods -l app=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
525
- const { stdout } = await execAsync(command);
526
- return stdout.split(' ');
782
+ export function getSequencersConfig(env: TestConfig) {
783
+ return withSequencersAdmin(env, client => client.getConfig());
527
784
  }
528
785
 
529
- export async function updateK8sSequencersConfig(args: {
530
- containerPort: number;
531
- namespace: string;
532
- config: Partial<SequencerConfig>;
533
- }) {
534
- const { containerPort, namespace, config } = args;
786
+ export async function withSequencersAdmin<T>(env: TestConfig, fn: (node: AztecNodeAdmin) => Promise<T>): Promise<T[]> {
787
+ const adminContainerPort = 8880;
788
+ const namespace = env.NAMESPACE;
535
789
  const sequencers = await getSequencers(namespace);
790
+ const results = [];
791
+
536
792
  for (const sequencer of sequencers) {
537
793
  const { process, port } = await startPortForward({
538
794
  resource: `pod/${sequencer}`,
539
795
  namespace,
540
- containerPort,
796
+ containerPort: adminContainerPort,
541
797
  });
542
798
 
543
799
  const url = `http://localhost:${port}`;
544
- await updateSequencerConfig(url, config);
800
+ await retry(
801
+ () => fetch(`${url}/status`).then(res => res.status === 200),
802
+ 'forward node admin port',
803
+ makeBackoff([1, 1, 2, 6]),
804
+ logger,
805
+ true,
806
+ );
807
+ const client = createAztecNodeAdminClient(url);
808
+ results.push(await fn(client));
545
809
  process.kill();
546
810
  }
811
+
812
+ return results;
547
813
  }
548
814
 
549
- export async function updateSequencersConfig(env: EnvConfig, config: Partial<SequencerConfig>) {
550
- if (isK8sConfig(env)) {
551
- await updateK8sSequencersConfig({
552
- containerPort: env.CONTAINER_NODE_PORT,
553
- namespace: env.NAMESPACE,
554
- config,
815
+ /**
816
+ * Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
817
+ * it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
818
+ */
819
+ export async function getPublicViemClient(
820
+ env: TestConfig,
821
+ /** If set, will push the new process into it */
822
+ processes?: ChildProcess[],
823
+ ): Promise<{ url: string; client: ViemPublicClient; process?: ChildProcess }> {
824
+ const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
825
+ if (CREATE_ETH_DEVNET) {
826
+ logger.info(`Creating port forward to eth execution node`);
827
+ const { process, port } = await startPortForward({
828
+ resource: `svc/${NAMESPACE}-eth-execution`,
829
+ namespace: NAMESPACE,
830
+ containerPort: 8545,
555
831
  });
832
+ const url = `http://127.0.0.1:${port}`;
833
+ const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url)]) });
834
+ if (processes) {
835
+ processes.push(process);
836
+ }
837
+ return { url, client, process };
556
838
  } else {
557
- await updateSequencerConfig(env.NODE_URL, config);
839
+ logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
840
+ if (!L1_RPC_URLS_JSON) {
841
+ throw new Error(`L1_RPC_URLS_JSON is not defined`);
842
+ }
843
+ const client: ViemPublicClient = createPublicClient({ transport: fallback([http(L1_RPC_URLS_JSON)]) });
844
+ return { url: L1_RPC_URLS_JSON, client };
845
+ }
846
+ }
847
+
848
+ /** Queries an Aztec node for the L1 deployment addresses */
849
+ export async function getL1DeploymentAddresses(env: TestConfig): Promise<L1ContractAddresses> {
850
+ let forwardProcess: ChildProcess | undefined;
851
+ try {
852
+ const [sequencer] = await getSequencers(env.NAMESPACE);
853
+ const { process, port } = await startPortForward({
854
+ resource: `pod/${sequencer}`,
855
+ namespace: env.NAMESPACE,
856
+ containerPort: 8080,
857
+ });
858
+
859
+ forwardProcess = process;
860
+ const url = `http://127.0.0.1:${port}`;
861
+ const node = createAztecNodeClient(url);
862
+ return await retry(
863
+ () => node.getNodeInfo().then(i => i.l1ContractAddresses),
864
+ 'get node info',
865
+ makeBackoff([1, 3, 6]),
866
+ logger,
867
+ );
868
+ } finally {
869
+ forwardProcess?.kill();
558
870
  }
559
871
  }
560
872
 
@@ -580,3 +892,51 @@ export async function rollAztecPods(namespace: string) {
580
892
  await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=validator' });
581
893
  await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=pxe' });
582
894
  }
895
+
896
+ /**
897
+ * Returns the absolute path to the git repository root
898
+ */
899
+ export function getGitProjectRoot(): string {
900
+ try {
901
+ const rootDir = execSync('git rev-parse --show-toplevel', {
902
+ encoding: 'utf-8',
903
+ stdio: ['ignore', 'pipe', 'ignore'],
904
+ }).trim();
905
+
906
+ return rootDir;
907
+ } catch (error) {
908
+ throw new Error(`Failed to determine git project root: ${error}`);
909
+ }
910
+ }
911
+
912
+ /** Returns a client to the RPC of the given sequencer (defaults to first) */
913
+ export async function getNodeClient(
914
+ env: TestConfig,
915
+ index: number = 0,
916
+ ): Promise<{ node: ReturnType<typeof createAztecNodeClient>; port: number; process: ChildProcess }> {
917
+ const namespace = env.NAMESPACE;
918
+ const containerPort = 8080;
919
+ const sequencers = await getSequencers(namespace);
920
+ const sequencer = sequencers[index];
921
+ if (!sequencer) {
922
+ throw new Error(`No sequencer found at index ${index} in namespace ${namespace}`);
923
+ }
924
+
925
+ const { process, port } = await startPortForward({
926
+ resource: `pod/${sequencer}`,
927
+ namespace,
928
+ containerPort,
929
+ });
930
+
931
+ const url = `http://localhost:${port}`;
932
+ await retry(
933
+ () => fetch(`${url}/status`).then(res => res.status === 200),
934
+ 'forward port',
935
+ makeBackoff([1, 1, 2, 6]),
936
+ logger,
937
+ true,
938
+ );
939
+
940
+ const client = createAztecNodeClient(url);
941
+ return { node: client, port, process };
942
+ }