@aztec/end-to-end 0.0.1-commit.24de95ac → 0.0.1-commit.3469e52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. package/dest/bench/client_flows/benchmark.d.ts +3 -2
  2. package/dest/bench/client_flows/benchmark.d.ts.map +1 -1
  3. package/dest/bench/client_flows/benchmark.js +21 -1
  4. package/dest/bench/client_flows/client_flows_benchmark.d.ts +21 -15
  5. package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -1
  6. package/dest/bench/client_flows/client_flows_benchmark.js +116 -121
  7. package/dest/bench/client_flows/config.d.ts +1 -1
  8. package/dest/bench/client_flows/data_extractor.d.ts +1 -1
  9. package/dest/bench/client_flows/data_extractor.js +7 -27
  10. package/dest/bench/utils.d.ts +5 -5
  11. package/dest/bench/utils.d.ts.map +1 -1
  12. package/dest/bench/utils.js +18 -11
  13. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +6 -7
  14. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
  15. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +98 -113
  16. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +19 -13
  17. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
  18. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +91 -70
  19. package/dest/e2e_deploy_contract/deploy_test.d.ts +5 -4
  20. package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
  21. package/dest/e2e_deploy_contract/deploy_test.js +18 -13
  22. package/dest/e2e_epochs/epochs_test.d.ts +11 -9
  23. package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
  24. package/dest/e2e_epochs/epochs_test.js +19 -16
  25. package/dest/e2e_fees/bridging_race.notest.d.ts +1 -1
  26. package/dest/e2e_fees/bridging_race.notest.js +4 -6
  27. package/dest/e2e_fees/fees_test.d.ts +20 -16
  28. package/dest/e2e_fees/fees_test.d.ts.map +1 -1
  29. package/dest/e2e_fees/fees_test.js +127 -139
  30. package/dest/e2e_l1_publisher/write_json.d.ts +3 -3
  31. package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -1
  32. package/dest/e2e_l1_publisher/write_json.js +23 -18
  33. package/dest/e2e_multi_validator/utils.d.ts +1 -1
  34. package/dest/e2e_multi_validator/utils.js +1 -1
  35. package/dest/e2e_nested_contract/nested_contract_test.d.ts +6 -9
  36. package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
  37. package/dest/e2e_nested_contract/nested_contract_test.js +32 -39
  38. package/dest/e2e_p2p/inactivity_slash_test.d.ts +3 -3
  39. package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -1
  40. package/dest/e2e_p2p/inactivity_slash_test.js +7 -6
  41. package/dest/e2e_p2p/p2p_network.d.ts +225 -18
  42. package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
  43. package/dest/e2e_p2p/p2p_network.js +117 -110
  44. package/dest/e2e_p2p/shared.d.ts +6 -6
  45. package/dest/e2e_p2p/shared.d.ts.map +1 -1
  46. package/dest/e2e_p2p/shared.js +6 -5
  47. package/dest/e2e_token_contract/token_contract_test.d.ts +16 -9
  48. package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
  49. package/dest/e2e_token_contract/token_contract_test.js +90 -92
  50. package/dest/fixtures/e2e_prover_test.d.ts +12 -18
  51. package/dest/fixtures/e2e_prover_test.d.ts.map +1 -1
  52. package/dest/fixtures/e2e_prover_test.js +98 -109
  53. package/dest/fixtures/fixtures.d.ts +2 -3
  54. package/dest/fixtures/fixtures.d.ts.map +1 -1
  55. package/dest/fixtures/fixtures.js +2 -3
  56. package/dest/fixtures/get_acvm_config.d.ts +1 -1
  57. package/dest/fixtures/get_acvm_config.js +1 -1
  58. package/dest/fixtures/get_bb_config.d.ts +1 -1
  59. package/dest/fixtures/get_bb_config.d.ts.map +1 -1
  60. package/dest/fixtures/index.d.ts +1 -1
  61. package/dest/fixtures/l1_to_l2_messaging.d.ts +4 -3
  62. package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
  63. package/dest/fixtures/l1_to_l2_messaging.js +2 -2
  64. package/dest/fixtures/logging.d.ts +1 -1
  65. package/dest/fixtures/setup.d.ts +216 -0
  66. package/dest/fixtures/setup.d.ts.map +1 -0
  67. package/dest/fixtures/setup.js +684 -0
  68. package/dest/fixtures/setup_p2p_test.d.ts +4 -4
  69. package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
  70. package/dest/fixtures/setup_p2p_test.js +18 -10
  71. package/dest/fixtures/token_utils.d.ts +5 -2
  72. package/dest/fixtures/token_utils.d.ts.map +1 -1
  73. package/dest/fixtures/token_utils.js +7 -4
  74. package/dest/fixtures/utils.d.ts +5 -192
  75. package/dest/fixtures/utils.d.ts.map +1 -1
  76. package/dest/fixtures/utils.js +4 -648
  77. package/dest/fixtures/web3signer.d.ts +1 -1
  78. package/dest/fixtures/web3signer.js +1 -1
  79. package/dest/fixtures/with_telemetry_utils.d.ts +2 -2
  80. package/dest/fixtures/with_telemetry_utils.d.ts.map +1 -1
  81. package/dest/fixtures/with_telemetry_utils.js +2 -2
  82. package/dest/index.d.ts +1 -1
  83. package/dest/quality_of_service/grafana_client.d.ts +41 -0
  84. package/dest/quality_of_service/grafana_client.d.ts.map +1 -0
  85. package/dest/quality_of_service/{alert_checker.js → grafana_client.js} +1 -1
  86. package/dest/quality_of_service/prometheus_client.d.ts +38 -0
  87. package/dest/quality_of_service/prometheus_client.d.ts.map +1 -0
  88. package/dest/quality_of_service/prometheus_client.js +67 -0
  89. package/dest/shared/cross_chain_test_harness.d.ts +5 -3
  90. package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
  91. package/dest/shared/cross_chain_test_harness.js +3 -3
  92. package/dest/shared/gas_portal_test_harness.d.ts +2 -2
  93. package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
  94. package/dest/shared/gas_portal_test_harness.js +1 -1
  95. package/dest/shared/index.d.ts +2 -2
  96. package/dest/shared/index.d.ts.map +1 -1
  97. package/dest/shared/jest_setup.d.ts +1 -1
  98. package/dest/shared/submit-transactions.d.ts +1 -1
  99. package/dest/shared/submit-transactions.d.ts.map +1 -1
  100. package/dest/shared/uniswap_l1_l2.d.ts +3 -27
  101. package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
  102. package/dest/shared/uniswap_l1_l2.js +43 -23
  103. package/dest/simulators/index.d.ts +1 -1
  104. package/dest/simulators/lending_simulator.d.ts +2 -2
  105. package/dest/simulators/lending_simulator.d.ts.map +1 -1
  106. package/dest/simulators/lending_simulator.js +5 -3
  107. package/dest/simulators/token_simulator.d.ts +1 -1
  108. package/dest/simulators/token_simulator.d.ts.map +1 -1
  109. package/dest/spartan/setup_test_wallets.d.ts +8 -5
  110. package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
  111. package/dest/spartan/setup_test_wallets.js +45 -10
  112. package/dest/spartan/tx_metrics.d.ts +52 -0
  113. package/dest/spartan/tx_metrics.d.ts.map +1 -0
  114. package/dest/spartan/tx_metrics.js +248 -0
  115. package/dest/spartan/utils.d.ts +66 -24
  116. package/dest/spartan/utils.d.ts.map +1 -1
  117. package/dest/spartan/utils.js +326 -133
  118. package/package.json +43 -40
  119. package/src/bench/client_flows/benchmark.ts +24 -2
  120. package/src/bench/client_flows/client_flows_benchmark.ts +157 -162
  121. package/src/bench/client_flows/data_extractor.ts +6 -28
  122. package/src/bench/utils.ts +22 -14
  123. package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +107 -142
  124. package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +140 -124
  125. package/src/e2e_deploy_contract/deploy_test.ts +22 -15
  126. package/src/e2e_epochs/epochs_test.ts +39 -25
  127. package/src/e2e_fees/bridging_race.notest.ts +4 -7
  128. package/src/e2e_fees/fees_test.ts +180 -215
  129. package/src/e2e_l1_publisher/write_json.ts +26 -20
  130. package/src/e2e_multi_validator/utils.ts +1 -1
  131. package/src/e2e_nested_contract/nested_contract_test.ts +35 -55
  132. package/src/e2e_p2p/inactivity_slash_test.ts +10 -9
  133. package/src/e2e_p2p/p2p_network.ts +175 -180
  134. package/src/e2e_p2p/shared.ts +15 -7
  135. package/src/e2e_token_contract/token_contract_test.ts +105 -118
  136. package/src/fixtures/e2e_prover_test.ts +120 -153
  137. package/src/fixtures/fixtures.ts +2 -5
  138. package/src/fixtures/get_acvm_config.ts +1 -1
  139. package/src/fixtures/l1_to_l2_messaging.ts +4 -2
  140. package/src/fixtures/setup.ts +1010 -0
  141. package/src/fixtures/setup_p2p_test.ts +23 -9
  142. package/src/fixtures/token_utils.ts +4 -4
  143. package/src/fixtures/utils.ts +27 -947
  144. package/src/fixtures/web3signer.ts +1 -1
  145. package/src/fixtures/with_telemetry_utils.ts +2 -2
  146. package/src/guides/up_quick_start.sh +1 -1
  147. package/src/quality_of_service/{alert_checker.ts → grafana_client.ts} +1 -1
  148. package/src/quality_of_service/prometheus_client.ts +113 -0
  149. package/src/shared/cross_chain_test_harness.ts +6 -9
  150. package/src/shared/gas_portal_test_harness.ts +2 -2
  151. package/src/shared/index.ts +1 -1
  152. package/src/shared/uniswap_l1_l2.ts +53 -67
  153. package/src/simulators/lending_simulator.ts +6 -4
  154. package/src/spartan/DEVELOP.md +7 -0
  155. package/src/spartan/setup_test_wallets.ts +56 -13
  156. package/src/spartan/tx_metrics.ts +231 -0
  157. package/src/spartan/utils.ts +379 -75
  158. package/dest/fixtures/setup_l1_contracts.d.ts +0 -6
  159. package/dest/fixtures/setup_l1_contracts.d.ts.map +0 -1
  160. package/dest/fixtures/setup_l1_contracts.js +0 -17
  161. package/dest/fixtures/snapshot_manager.d.ts +0 -95
  162. package/dest/fixtures/snapshot_manager.d.ts.map +0 -1
  163. package/dest/fixtures/snapshot_manager.js +0 -505
  164. package/dest/quality_of_service/alert_checker.d.ts +0 -41
  165. package/dest/quality_of_service/alert_checker.d.ts.map +0 -1
  166. package/src/fixtures/setup_l1_contracts.ts +0 -26
  167. package/src/fixtures/snapshot_manager.ts +0 -665
@@ -1,7 +1,10 @@
1
1
  import { createLogger } from '@aztec/aztec.js/log';
2
2
  import type { RollupCheatCodes } from '@aztec/aztec/testing';
3
- import type { L1ContractAddresses, ViemPublicClient } from '@aztec/ethereum';
3
+ import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
4
+ import type { ViemPublicClient } from '@aztec/ethereum/types';
5
+ import type { CheckpointNumber } from '@aztec/foundation/branded-types';
4
6
  import type { Logger } from '@aztec/foundation/log';
7
+ import { promiseWithResolvers } from '@aztec/foundation/promise';
5
8
  import { makeBackoff, retry } from '@aztec/foundation/retry';
6
9
  import { schemas } from '@aztec/foundation/schemas';
7
10
  import { sleep } from '@aztec/foundation/sleep';
@@ -29,7 +32,9 @@ const testConfigSchema = z.object({
29
32
  L1_RPC_URLS_JSON: z.string().optional(),
30
33
  L1_ACCOUNT_MNEMONIC: z.string().optional(),
31
34
  AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
35
+ AZTEC_EPOCH_DURATION: z.coerce.number().optional().default(32),
32
36
  AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
37
+ AZTEC_LAG_IN_EPOCHS_FOR_VALIDATOR_SET: z.coerce.number().optional().default(2),
33
38
  });
34
39
 
35
40
  export type TestConfig = z.infer<typeof testConfigSchema>;
@@ -113,7 +118,7 @@ export async function startPortForward({
113
118
  );
114
119
 
115
120
  let isResolved = false;
116
- const connected = new Promise<number>(resolve => {
121
+ const connected = new Promise<number>((resolve, reject) => {
117
122
  process.stdout?.on('data', data => {
118
123
  const str = data.toString() as string;
119
124
  if (!isResolved && str.includes('Forwarding from')) {
@@ -121,7 +126,8 @@ export async function startPortForward({
121
126
  logger.debug(`Port forward for ${resource}: ${str}`);
122
127
  const port = str.search(/:\d+/);
123
128
  if (port === -1) {
124
- throw new Error('Port not found in port forward output');
129
+ reject(new Error('Port not found in port forward output'));
130
+ return;
125
131
  }
126
132
  const portNumber = parseInt(str.slice(port + 1));
127
133
  logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
@@ -141,17 +147,26 @@ export async function startPortForward({
141
147
  process.on('close', () => {
142
148
  if (!isResolved) {
143
149
  isResolved = true;
144
- logger.warn(`Port forward for ${resource} closed before connection established`);
145
- resolve(0);
150
+ const msg = `Port forward for ${resource} closed before connection established`;
151
+ logger.warn(msg);
152
+ reject(new Error(msg));
146
153
  }
147
154
  });
148
155
  process.on('error', error => {
149
- logger.error(`Port forward for ${resource} error: ${error}`);
150
- resolve(0);
156
+ if (!isResolved) {
157
+ isResolved = true;
158
+ const msg = `Port forward for ${resource} error: ${error}`;
159
+ logger.error(msg);
160
+ reject(new Error(msg));
161
+ }
151
162
  });
152
163
  process.on('exit', code => {
153
- logger.verbose(`Port forward for ${resource} exited with code ${code}`);
154
- resolve(0);
164
+ if (!isResolved) {
165
+ isResolved = true;
166
+ const msg = `Port forward for ${resource} exited with code ${code}`;
167
+ logger.verbose(msg);
168
+ reject(new Error(msg));
169
+ }
155
170
  });
156
171
  });
157
172
 
@@ -160,9 +175,50 @@ export async function startPortForward({
160
175
  return { process, port };
161
176
  }
162
177
 
163
- export function startPortForwardForRPC(namespace: string) {
178
+ export function getExternalIP(namespace: string, serviceName: string): Promise<string> {
179
+ const { promise, resolve, reject } = promiseWithResolvers<string>();
180
+ const process = spawn(
181
+ 'kubectl',
182
+ [
183
+ 'get',
184
+ 'service',
185
+ '-n',
186
+ namespace,
187
+ `${namespace}-${serviceName}`,
188
+ '--output',
189
+ "jsonpath='{.status.loadBalancer.ingress[0].ip}'",
190
+ ],
191
+ {
192
+ stdio: 'pipe',
193
+ },
194
+ );
195
+
196
+ let ip = '';
197
+ process.stdout.on('data', data => {
198
+ ip += data;
199
+ });
200
+ process.on('error', err => {
201
+ reject(err);
202
+ });
203
+ process.on('exit', () => {
204
+ // kubectl prints JSON. Remove the quotes
205
+ resolve(ip.replace(/"|'/g, ''));
206
+ });
207
+
208
+ return promise;
209
+ }
210
+
211
+ export function startPortForwardForPrometeheus(namespace: string) {
164
212
  return startPortForward({
165
- resource: `services/${namespace}-rpc-aztec-node`,
213
+ resource: `svc/${namespace}-prometheus-server`,
214
+ namespace,
215
+ containerPort: 80,
216
+ });
217
+ }
218
+
219
+ export function startPortForwardForRPC(namespace: string, index = 0) {
220
+ return startPortForward({
221
+ resource: `pod/${namespace}-rpc-aztec-node-${index}`,
166
222
  namespace,
167
223
  containerPort: 8080,
168
224
  });
@@ -208,11 +264,11 @@ export async function deleteResourceByLabel({
208
264
  timeout?: string;
209
265
  force?: boolean;
210
266
  }) {
211
- // Check if the resource type exists before attempting to delete
212
267
  try {
213
- await execAsync(
214
- `kubectl api-resources --api-group="" --no-headers -o name | grep -q "^${resource}$" || kubectl api-resources --no-headers -o name | grep -q "^${resource}$"`,
215
- );
268
+ // Match both plain and group-qualified names (e.g., "podchaos" or "podchaos.chaos-mesh.org")
269
+ const escaped = resource.replace(/[-/\\^$*+?.()|[\]{}]/g, '\\$&');
270
+ const regex = `(^|\\.)${escaped}(\\.|$)`;
271
+ await execAsync(`kubectl api-resources --no-headers -o name | grep -Eq '${regex}'`);
216
272
  } catch (error) {
217
273
  logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
218
274
  return '';
@@ -245,6 +301,58 @@ export async function waitForResourceByLabel({
245
301
  return stdout;
246
302
  }
247
303
 
304
+ export async function waitForResourceByName({
305
+ resource,
306
+ name,
307
+ namespace,
308
+ condition = 'Ready',
309
+ timeout = '10m',
310
+ }: {
311
+ resource: string;
312
+ name: string;
313
+ namespace: string;
314
+ condition?: string;
315
+ timeout?: string;
316
+ }) {
317
+ const command = `kubectl wait ${resource}/${name} --for=condition=${condition} -n ${namespace} --timeout=${timeout}`;
318
+ logger.info(`command: ${command}`);
319
+ const { stdout } = await execAsync(command);
320
+ return stdout;
321
+ }
322
+
323
+ export async function waitForResourcesByName({
324
+ resource,
325
+ names,
326
+ namespace,
327
+ condition = 'Ready',
328
+ timeout = '10m',
329
+ }: {
330
+ resource: string;
331
+ names: string[];
332
+ namespace: string;
333
+ condition?: string;
334
+ timeout?: string;
335
+ }) {
336
+ if (!names.length) {
337
+ throw new Error(`No ${resource} names provided to waitForResourcesByName`);
338
+ }
339
+
340
+ // Wait all in parallel; if any fails, surface which one.
341
+ await Promise.all(
342
+ names.map(async name => {
343
+ try {
344
+ await waitForResourceByName({ resource, name, namespace, condition, timeout });
345
+ } catch (err) {
346
+ throw new Error(
347
+ `Failed waiting for ${resource}/${name} condition=${condition} timeout=${timeout} namespace=${namespace}: ${String(
348
+ err,
349
+ )}`,
350
+ );
351
+ }
352
+ }),
353
+ );
354
+ }
355
+
248
356
  export function getChartDir(spartanDir: string, chartName: string) {
249
357
  return path.join(spartanDir.trim(), chartName);
250
358
  }
@@ -295,6 +403,61 @@ async function execHelmCommand(args: Parameters<typeof createHelmCommand>[0]) {
295
403
  return stdout;
296
404
  }
297
405
 
406
+ async function getHelmReleaseStatus(instanceName: string, namespace: string): Promise<string | undefined> {
407
+ try {
408
+ const { stdout } = await execAsync(
409
+ `helm list --namespace ${namespace} --all --filter '^${instanceName}$' --output json | cat`,
410
+ );
411
+ const parsed = JSON.parse(stdout) as Array<{ name?: string; status?: string }>;
412
+ const row = parsed.find(r => r.name === instanceName);
413
+ return row?.status;
414
+ } catch {
415
+ return undefined;
416
+ }
417
+ }
418
+
419
+ async function forceDeleteHelmReleaseRecord(instanceName: string, namespace: string, logger: Logger) {
420
+ const labelSelector = `owner=helm,name=${instanceName}`;
421
+ const cmd = `kubectl delete secret -n ${namespace} -l ${labelSelector} --ignore-not-found=true`;
422
+ logger.warn(`Force deleting Helm release record: ${cmd}`);
423
+ await execAsync(cmd).catch(() => undefined);
424
+ }
425
+
426
+ async function hasDeployedHelmRelease(instanceName: string, namespace: string): Promise<boolean> {
427
+ try {
428
+ const status = await getHelmReleaseStatus(instanceName, namespace);
429
+ return status?.toLowerCase() === 'deployed';
430
+ } catch {
431
+ return false;
432
+ }
433
+ }
434
+
435
+ export async function uninstallChaosMesh(instanceName: string, namespace: string, logger: Logger) {
436
+ // uninstall the helm chart if it exists
437
+ logger.info(`Uninstalling helm chart ${instanceName}`);
438
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
439
+ // and delete the chaos-mesh resources created by this release
440
+ const deleteByLabel = async (resource: string) => {
441
+ const args = {
442
+ resource,
443
+ namespace: namespace,
444
+ label: `app.kubernetes.io/instance=${instanceName}`,
445
+ } as const;
446
+ logger.info(`Deleting ${resource} resources for release ${instanceName}`);
447
+ await deleteResourceByLabel(args).catch(e => {
448
+ logger.error(`Error deleting ${resource}: ${e}`);
449
+ logger.info(`Force deleting ${resource}`);
450
+ return deleteResourceByLabel({ ...args, force: true });
451
+ });
452
+ };
453
+
454
+ await deleteByLabel('podchaos');
455
+ await deleteByLabel('networkchaos');
456
+ await deleteByLabel('podnetworkchaos');
457
+ await deleteByLabel('workflows');
458
+ await deleteByLabel('workflownodes');
459
+ }
460
+
298
461
  /**
299
462
  * Installs a Helm chart with the given parameters.
300
463
  * @param instanceName - The name of the Helm chart instance.
@@ -317,7 +480,6 @@ export async function installChaosMeshChart({
317
480
  targetNamespace,
318
481
  valuesFile,
319
482
  helmChartDir,
320
- chaosMeshNamespace = 'chaos-mesh',
321
483
  timeout = '10m',
322
484
  clean = true,
323
485
  values = {},
@@ -334,32 +496,13 @@ export async function installChaosMeshChart({
334
496
  logger: Logger;
335
497
  }) {
336
498
  if (clean) {
337
- // uninstall the helm chart if it exists
338
- logger.info(`Uninstalling helm chart ${instanceName}`);
339
- await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
340
- // and delete the chaos-mesh resources created by this release
341
- const deleteByLabel = async (resource: string) => {
342
- const args = {
343
- resource,
344
- namespace: chaosMeshNamespace,
345
- label: `app.kubernetes.io/instance=${instanceName}`,
346
- } as const;
347
- logger.info(`Deleting ${resource} resources for release ${instanceName}`);
348
- await deleteResourceByLabel(args).catch(e => {
349
- logger.error(`Error deleting ${resource}: ${e}`);
350
- logger.info(`Force deleting ${resource}`);
351
- return deleteResourceByLabel({ ...args, force: true });
352
- });
353
- };
354
-
355
- await deleteByLabel('podchaos');
356
- await deleteByLabel('networkchaos');
499
+ await uninstallChaosMesh(instanceName, targetNamespace, logger);
357
500
  }
358
501
 
359
502
  return execHelmCommand({
360
503
  instanceName,
361
504
  helmChartDir,
362
- namespace: chaosMeshNamespace,
505
+ namespace: targetNamespace,
363
506
  valuesFile,
364
507
  timeout,
365
508
  values: { ...values, 'global.targetNamespace': targetNamespace },
@@ -389,22 +532,49 @@ export function applyProverFailure({
389
532
  });
390
533
  }
391
534
 
535
+ export function applyValidatorFailure({
536
+ namespace,
537
+ spartanDir,
538
+ logger,
539
+ values,
540
+ instanceName,
541
+ }: {
542
+ namespace: string;
543
+ spartanDir: string;
544
+ logger: Logger;
545
+ values?: Record<string, string | number>;
546
+ instanceName?: string;
547
+ }) {
548
+ return installChaosMeshChart({
549
+ instanceName: instanceName ?? 'validator-failure',
550
+ targetNamespace: namespace,
551
+ valuesFile: 'validator-failure.yaml',
552
+ helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
553
+ values,
554
+ logger,
555
+ });
556
+ }
557
+
392
558
  export function applyProverKill({
393
559
  namespace,
394
560
  spartanDir,
395
561
  logger,
562
+ values,
396
563
  }: {
397
564
  namespace: string;
398
565
  spartanDir: string;
399
566
  logger: Logger;
567
+ values?: Record<string, string | number>;
400
568
  }) {
401
569
  return installChaosMeshChart({
402
570
  instanceName: 'prover-kill',
403
571
  targetNamespace: namespace,
404
572
  valuesFile: 'prover-kill.yaml',
405
573
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
574
+ chaosMeshNamespace: namespace,
406
575
  clean: true,
407
576
  logger,
577
+ values,
408
578
  });
409
579
  }
410
580
 
@@ -412,10 +582,12 @@ export function applyProverBrokerKill({
412
582
  namespace,
413
583
  spartanDir,
414
584
  logger,
585
+ values,
415
586
  }: {
416
587
  namespace: string;
417
588
  spartanDir: string;
418
589
  logger: Logger;
590
+ values?: Record<string, string | number>;
419
591
  }) {
420
592
  return installChaosMeshChart({
421
593
  instanceName: 'prover-broker-kill',
@@ -424,66 +596,79 @@ export function applyProverBrokerKill({
424
596
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
425
597
  clean: true,
426
598
  logger,
599
+ values,
427
600
  });
428
601
  }
429
602
 
430
603
  export function applyBootNodeFailure({
604
+ instanceName = 'boot-node-failure',
431
605
  namespace,
432
606
  spartanDir,
433
607
  durationSeconds,
434
608
  logger,
609
+ values,
435
610
  }: {
611
+ instanceName?: string;
436
612
  namespace: string;
437
613
  spartanDir: string;
438
614
  durationSeconds: number;
439
615
  logger: Logger;
616
+ values?: Record<string, string | number>;
440
617
  }) {
441
618
  return installChaosMeshChart({
442
- instanceName: 'boot-node-failure',
619
+ instanceName,
443
620
  targetNamespace: namespace,
444
621
  valuesFile: 'boot-node-failure.yaml',
445
622
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
446
623
  values: {
447
624
  'bootNodeFailure.duration': `${durationSeconds}s`,
625
+ ...(values ?? {}),
448
626
  },
449
627
  logger,
450
628
  });
451
629
  }
452
630
 
453
631
  export function applyValidatorKill({
632
+ instanceName = 'validator-kill',
454
633
  namespace,
455
634
  spartanDir,
456
635
  logger,
457
636
  values,
637
+ clean = true,
458
638
  }: {
639
+ instanceName?: string;
459
640
  namespace: string;
460
641
  spartanDir: string;
461
642
  logger: Logger;
462
643
  values?: Record<string, string | number>;
644
+ clean?: boolean;
463
645
  }) {
464
646
  return installChaosMeshChart({
465
- instanceName: 'validator-kill',
647
+ instanceName: instanceName ?? 'validator-kill',
466
648
  targetNamespace: namespace,
467
649
  valuesFile: 'validator-kill.yaml',
468
650
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
651
+ clean,
469
652
  logger,
470
653
  values,
471
654
  });
472
655
  }
473
656
 
474
657
  export function applyNetworkShaping({
658
+ instanceName = 'network-shaping',
475
659
  valuesFile,
476
660
  namespace,
477
661
  spartanDir,
478
662
  logger,
479
663
  }: {
664
+ instanceName?: string;
480
665
  valuesFile: string;
481
666
  namespace: string;
482
667
  spartanDir: string;
483
668
  logger: Logger;
484
669
  }) {
485
670
  return installChaosMeshChart({
486
- instanceName: 'network-shaping',
671
+ instanceName,
487
672
  targetNamespace: namespace,
488
673
  valuesFile,
489
674
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
@@ -491,24 +676,24 @@ export function applyNetworkShaping({
491
676
  });
492
677
  }
493
678
 
494
- export async function awaitL2BlockNumber(
679
+ export async function awaitCheckpointNumber(
495
680
  rollupCheatCodes: RollupCheatCodes,
496
- blockNumber: bigint,
681
+ checkpointNumber: CheckpointNumber,
497
682
  timeoutSeconds: number,
498
683
  logger: Logger,
499
684
  ) {
500
- logger.info(`Waiting for L2 Block ${blockNumber}`);
685
+ logger.info(`Waiting for checkpoint ${checkpointNumber}`);
501
686
  let tips = await rollupCheatCodes.getTips();
502
687
  const endTime = Date.now() + timeoutSeconds * 1000;
503
- while (tips.pending < blockNumber && Date.now() < endTime) {
504
- logger.info(`At L2 Block ${tips.pending}`);
688
+ while (tips.pending < checkpointNumber && Date.now() < endTime) {
689
+ logger.info(`At checkpoint ${tips.pending}`);
505
690
  await sleep(1000);
506
691
  tips = await rollupCheatCodes.getTips();
507
692
  }
508
- if (tips.pending < blockNumber) {
509
- throw new Error(`Timeout waiting for L2 Block ${blockNumber}, only reached ${tips.pending}`);
693
+ if (tips.pending < checkpointNumber) {
694
+ throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
510
695
  } else {
511
- logger.info(`Reached L2 Block ${tips.pending}`);
696
+ logger.info(`Reached checkpoint ${tips.pending}`);
512
697
  }
513
698
  }
514
699
 
@@ -582,6 +767,12 @@ export async function installTransferBot({
582
767
  'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
583
768
  // Provide L1 mnemonic for bridging (falls back to labs mnemonic)
584
769
  'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
770
+
771
+ // The bot does not need Kubernetes API access. Disable RBAC + ServiceAccount creation so the chart
772
+ // can be installed by users without cluster-scoped RBAC permissions.
773
+ 'bot.rbac.create': false,
774
+ 'bot.serviceAccount.create': false,
775
+ 'bot.serviceAccount.name': 'default',
585
776
  };
586
777
  // Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
587
778
  if (mnemonicStartIndex === undefined) {
@@ -606,7 +797,7 @@ export async function installTransferBot({
606
797
  if (!repository || !tag) {
607
798
  try {
608
799
  const { stdout } = await execAsync(
609
- `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
800
+ `kubectl get pods -l app.kubernetes.io/name=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
610
801
  );
611
802
  const image = stdout.trim().replace(/^'|'$/g, '');
612
803
  if (image && image.includes(':')) {
@@ -627,6 +818,26 @@ export async function installTransferBot({
627
818
  typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
628
819
  }
629
820
 
821
+ // If a previous install attempt left the release in a non-deployed state (e.g. FAILED),
822
+ // `helm upgrade --install` can error with "has no deployed releases".
823
+ // In that case, clear the release record and do a clean install.
824
+ const existingStatus = await getHelmReleaseStatus(instanceName, namespace);
825
+ if (existingStatus && existingStatus.toLowerCase() !== 'deployed') {
826
+ logger.warn(`Transfer bot release ${instanceName} is in status '${existingStatus}'. Reinstalling cleanly.`);
827
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`).catch(
828
+ () => undefined,
829
+ );
830
+ // If helm left the release in `uninstalling`, force-delete the record so we can reinstall.
831
+ const afterUninstallStatus = await getHelmReleaseStatus(instanceName, namespace);
832
+ if (afterUninstallStatus?.toLowerCase() === 'uninstalling') {
833
+ await forceDeleteHelmReleaseRecord(instanceName, namespace, logger);
834
+ }
835
+ }
836
+
837
+ // `--reuse-values` fails if the release has never successfully deployed (e.g. first install, or a previous failed install).
838
+ // Only reuse values when we have a deployed release to reuse from.
839
+ const effectiveReuseValues = reuseValues && (await hasDeployedHelmRelease(instanceName, namespace));
840
+
630
841
  await execHelmCommand({
631
842
  instanceName,
632
843
  helmChartDir,
@@ -634,7 +845,7 @@ export async function installTransferBot({
634
845
  valuesFile: undefined,
635
846
  timeout,
636
847
  values: values as unknown as Record<string, string | number | boolean>,
637
- reuseValues,
848
+ reuseValues: effectiveReuseValues,
638
849
  });
639
850
 
640
851
  if (replicas > 0) {
@@ -679,7 +890,7 @@ export async function setValidatorTxDrop({
679
890
  const drop = enabled ? 'true' : 'false';
680
891
  const prob = String(probability);
681
892
 
682
- const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
893
+ const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
683
894
  let updated = false;
684
895
  for (const selector of selectors) {
685
896
  try {
@@ -710,7 +921,7 @@ export async function setValidatorTxDrop({
710
921
  }
711
922
 
712
923
  export async function restartValidators(namespace: string, logger: Logger) {
713
- const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
924
+ const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
714
925
  let any = false;
715
926
  for (const selector of selectors) {
716
927
  try {
@@ -765,11 +976,33 @@ export async function enableValidatorDynamicBootNode(
765
976
  }
766
977
 
767
978
  export async function getSequencers(namespace: string) {
768
- const command = `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
769
- const { stdout } = await execAsync(command);
770
- const sequencers = stdout.split(' ');
771
- logger.verbose(`Found sequencer pods ${sequencers.join(', ')}`);
772
- return sequencers;
979
+ const selectors = [
980
+ 'app.kubernetes.io/name=validator',
981
+ 'app.kubernetes.io/component=validator',
982
+ 'app.kubernetes.io/component=sequencer-node',
983
+ 'app=validator',
984
+ ];
985
+ for (const selector of selectors) {
986
+ try {
987
+ const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
988
+ const { stdout } = await execAsync(command);
989
+ const sequencers = stdout
990
+ .split(' ')
991
+ .map(s => s.trim())
992
+ .filter(Boolean);
993
+ if (sequencers.length > 0) {
994
+ logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
995
+ return sequencers;
996
+ }
997
+ } catch {
998
+ // try next selector
999
+ }
1000
+ }
1001
+
1002
+ // Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
1003
+ throw new Error(
1004
+ `No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`,
1005
+ );
773
1006
  }
774
1007
 
775
1008
  export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
@@ -830,7 +1063,7 @@ export async function getPublicViemClient(
830
1063
  containerPort: 8545,
831
1064
  });
832
1065
  const url = `http://127.0.0.1:${port}`;
833
- const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url)]) });
1066
+ const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url, { batch: false })]) });
834
1067
  if (processes) {
835
1068
  processes.push(process);
836
1069
  }
@@ -840,7 +1073,9 @@ export async function getPublicViemClient(
840
1073
  if (!L1_RPC_URLS_JSON) {
841
1074
  throw new Error(`L1_RPC_URLS_JSON is not defined`);
842
1075
  }
843
- const client: ViemPublicClient = createPublicClient({ transport: fallback([http(L1_RPC_URLS_JSON)]) });
1076
+ const client: ViemPublicClient = createPublicClient({
1077
+ transport: fallback([http(L1_RPC_URLS_JSON, { batch: false })]),
1078
+ });
844
1079
  return { url: L1_RPC_URLS_JSON, client };
845
1080
  }
846
1081
  }
@@ -873,24 +1108,93 @@ export async function getL1DeploymentAddresses(env: TestConfig): Promise<L1Contr
873
1108
  /**
874
1109
  * Rolls the Aztec pods in the given namespace.
875
1110
  * @param namespace - The namespace to roll the Aztec pods in.
876
- * @dev - IMPORTANT: This function DOES NOT delete the underlying PVCs.
877
- * This means that the pods will be restarted with the same persistent storage.
878
- * This is useful for testing, but you should be aware of the implications.
1111
+ * @param clearState - If true, also deletes the underlying PVCs to clear persistent storage.
1112
+ * This is required for rollup upgrades where the old state is incompatible with the new rollup.
1113
+ * Defaults to false, which preserves the existing storage.
879
1114
  */
880
- export async function rollAztecPods(namespace: string) {
881
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=boot-node' });
882
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-node' });
883
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-broker' });
884
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-agent' });
885
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=validator' });
886
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=pxe' });
1115
+ export async function rollAztecPods(namespace: string, clearState: boolean = false) {
1116
+ // Pod components use 'validator', but StatefulSets and PVCs use 'sequencer-node' for validators
1117
+ const podComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'prover-agent', 'sequencer-node', 'rpc'];
1118
+ const pvcComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'sequencer-node', 'rpc'];
1119
+ // StatefulSet components that need to be scaled down before PVC deletion
1120
+ // Note: validators use 'sequencer-node' as component label, not 'validator'
1121
+ const statefulSetComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'sequencer-node', 'rpc'];
1122
+
1123
+ if (clearState) {
1124
+ // To delete PVCs, we must first scale down StatefulSets so pods release the volumes
1125
+ // Otherwise PVC deletion will hang waiting for pods to terminate
1126
+
1127
+ // First, save original replica counts
1128
+ const originalReplicas: Map<string, number> = new Map();
1129
+ for (const component of statefulSetComponents) {
1130
+ try {
1131
+ const getCmd = `kubectl get statefulset -l app.kubernetes.io/component=${component} -n ${namespace} -o jsonpath='{.items[0].spec.replicas}'`;
1132
+ const { stdout } = await execAsync(getCmd);
1133
+ const replicas = parseInt(stdout.replace(/'/g, '').trim(), 10);
1134
+ if (!isNaN(replicas) && replicas > 0) {
1135
+ originalReplicas.set(component, replicas);
1136
+ }
1137
+ } catch {
1138
+ // Component might not exist, continue
1139
+ }
1140
+ }
1141
+
1142
+ // Scale down to 0
1143
+ for (const component of statefulSetComponents) {
1144
+ try {
1145
+ const scaleCmd = `kubectl scale statefulset -l app.kubernetes.io/component=${component} -n ${namespace} --replicas=0 --timeout=2m`;
1146
+ logger.info(`command: ${scaleCmd}`);
1147
+ await execAsync(scaleCmd);
1148
+ } catch (e) {
1149
+ // Component might not exist or might be a Deployment, continue
1150
+ logger.verbose(`Scale down ${component} skipped: ${e}`);
1151
+ }
1152
+ }
1153
+
1154
+ // Wait for pods to terminate
1155
+ await sleep(15 * 1000);
1156
+
1157
+ // Now delete PVCs (they should no longer be in use)
1158
+ for (const component of pvcComponents) {
1159
+ await deleteResourceByLabel({
1160
+ resource: 'persistentvolumeclaims',
1161
+ namespace: namespace,
1162
+ label: `app.kubernetes.io/component=${component}`,
1163
+ });
1164
+ }
1165
+
1166
+ // Scale StatefulSets back up to original replica counts
1167
+ for (const component of statefulSetComponents) {
1168
+ const replicas = originalReplicas.get(component) ?? 1;
1169
+ try {
1170
+ const scaleCmd = `kubectl scale statefulset -l app.kubernetes.io/component=${component} -n ${namespace} --replicas=${replicas} --timeout=2m`;
1171
+ logger.info(`command: ${scaleCmd}`);
1172
+ await execAsync(scaleCmd);
1173
+ } catch (e) {
1174
+ logger.verbose(`Scale up ${component} skipped: ${e}`);
1175
+ }
1176
+ }
1177
+ } else {
1178
+ // Just delete pods (no state clearing)
1179
+ for (const component of podComponents) {
1180
+ await deleteResourceByLabel({
1181
+ resource: 'pods',
1182
+ namespace: namespace,
1183
+ label: `app.kubernetes.io/component=${component}`,
1184
+ });
1185
+ }
1186
+ }
1187
+
887
1188
  await sleep(10 * 1000);
888
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=boot-node' });
889
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-node' });
890
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-broker' });
891
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-agent' });
892
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=validator' });
893
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=pxe' });
1189
+
1190
+ // Wait for pods to come back
1191
+ for (const component of podComponents) {
1192
+ await waitForResourceByLabel({
1193
+ resource: 'pods',
1194
+ namespace: namespace,
1195
+ label: `app.kubernetes.io/component=${component}`,
1196
+ });
1197
+ }
894
1198
  }
895
1199
 
896
1200
  /**