@aztec/end-to-end 0.0.1-commit.d3ec352c → 0.0.1-commit.d431d1c

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. package/dest/bench/client_flows/benchmark.d.ts +3 -2
  2. package/dest/bench/client_flows/benchmark.d.ts.map +1 -1
  3. package/dest/bench/client_flows/benchmark.js +21 -1
  4. package/dest/bench/client_flows/client_flows_benchmark.d.ts +14 -15
  5. package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -1
  6. package/dest/bench/client_flows/client_flows_benchmark.js +110 -138
  7. package/dest/bench/client_flows/data_extractor.js +3 -1
  8. package/dest/bench/utils.d.ts +6 -6
  9. package/dest/bench/utils.d.ts.map +1 -1
  10. package/dest/bench/utils.js +18 -11
  11. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +6 -7
  12. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
  13. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +98 -113
  14. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +19 -13
  15. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
  16. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +91 -71
  17. package/dest/e2e_deploy_contract/deploy_test.d.ts +4 -3
  18. package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
  19. package/dest/e2e_deploy_contract/deploy_test.js +18 -13
  20. package/dest/e2e_epochs/epochs_test.d.ts +3 -2
  21. package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
  22. package/dest/e2e_epochs/epochs_test.js +13 -11
  23. package/dest/e2e_fees/bridging_race.notest.js +3 -5
  24. package/dest/e2e_fees/fees_test.d.ts +18 -15
  25. package/dest/e2e_fees/fees_test.d.ts.map +1 -1
  26. package/dest/e2e_fees/fees_test.js +126 -141
  27. package/dest/e2e_l1_publisher/write_json.d.ts +3 -3
  28. package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -1
  29. package/dest/e2e_l1_publisher/write_json.js +19 -15
  30. package/dest/e2e_multi_validator/utils.js +1 -1
  31. package/dest/e2e_nested_contract/nested_contract_test.d.ts +6 -9
  32. package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
  33. package/dest/e2e_nested_contract/nested_contract_test.js +32 -40
  34. package/dest/e2e_p2p/inactivity_slash_test.d.ts +4 -4
  35. package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -1
  36. package/dest/e2e_p2p/inactivity_slash_test.js +6 -9
  37. package/dest/e2e_p2p/p2p_network.d.ts +13 -11
  38. package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
  39. package/dest/e2e_p2p/p2p_network.js +116 -111
  40. package/dest/e2e_p2p/shared.d.ts +2 -2
  41. package/dest/e2e_p2p/shared.d.ts.map +1 -1
  42. package/dest/e2e_p2p/shared.js +4 -4
  43. package/dest/e2e_token_contract/token_contract_test.d.ts +16 -9
  44. package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
  45. package/dest/e2e_token_contract/token_contract_test.js +90 -92
  46. package/dest/fixtures/e2e_prover_test.d.ts +10 -18
  47. package/dest/fixtures/e2e_prover_test.d.ts.map +1 -1
  48. package/dest/fixtures/e2e_prover_test.js +88 -103
  49. package/dest/fixtures/fixtures.d.ts +2 -3
  50. package/dest/fixtures/fixtures.d.ts.map +1 -1
  51. package/dest/fixtures/fixtures.js +1 -2
  52. package/dest/fixtures/get_acvm_config.js +1 -1
  53. package/dest/fixtures/l1_to_l2_messaging.d.ts +4 -3
  54. package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
  55. package/dest/fixtures/l1_to_l2_messaging.js +2 -2
  56. package/dest/fixtures/setup.d.ts +216 -0
  57. package/dest/fixtures/setup.d.ts.map +1 -0
  58. package/dest/fixtures/setup.js +684 -0
  59. package/dest/fixtures/setup_p2p_test.js +3 -3
  60. package/dest/fixtures/utils.d.ts +5 -638
  61. package/dest/fixtures/utils.d.ts.map +1 -1
  62. package/dest/fixtures/utils.js +4 -647
  63. package/dest/fixtures/web3signer.js +1 -1
  64. package/dest/fixtures/with_telemetry_utils.d.ts +2 -2
  65. package/dest/fixtures/with_telemetry_utils.d.ts.map +1 -1
  66. package/dest/fixtures/with_telemetry_utils.js +2 -2
  67. package/dest/quality_of_service/grafana_client.d.ts +41 -0
  68. package/dest/quality_of_service/grafana_client.d.ts.map +1 -0
  69. package/dest/quality_of_service/{alert_checker.js → grafana_client.js} +1 -1
  70. package/dest/quality_of_service/prometheus_client.d.ts +38 -0
  71. package/dest/quality_of_service/prometheus_client.d.ts.map +1 -0
  72. package/dest/quality_of_service/prometheus_client.js +67 -0
  73. package/dest/shared/cross_chain_test_harness.d.ts +16 -4
  74. package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
  75. package/dest/shared/cross_chain_test_harness.js +3 -3
  76. package/dest/shared/gas_portal_test_harness.d.ts +12 -2
  77. package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
  78. package/dest/shared/index.d.ts +2 -2
  79. package/dest/shared/index.d.ts.map +1 -1
  80. package/dest/shared/uniswap_l1_l2.d.ts +3 -27
  81. package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
  82. package/dest/shared/uniswap_l1_l2.js +43 -23
  83. package/dest/simulators/lending_simulator.d.ts +6 -2
  84. package/dest/simulators/lending_simulator.d.ts.map +1 -1
  85. package/dest/simulators/lending_simulator.js +1 -1
  86. package/dest/spartan/setup_test_wallets.d.ts +4 -3
  87. package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
  88. package/dest/spartan/setup_test_wallets.js +2 -1
  89. package/dest/spartan/tx_metrics.d.ts +52 -0
  90. package/dest/spartan/tx_metrics.d.ts.map +1 -0
  91. package/dest/spartan/tx_metrics.js +248 -0
  92. package/dest/spartan/utils.d.ts +51 -12
  93. package/dest/spartan/utils.d.ts.map +1 -1
  94. package/dest/spartan/utils.js +262 -102
  95. package/package.json +40 -39
  96. package/src/bench/client_flows/benchmark.ts +24 -2
  97. package/src/bench/client_flows/client_flows_benchmark.ts +150 -200
  98. package/src/bench/client_flows/data_extractor.ts +1 -1
  99. package/src/bench/utils.ts +22 -14
  100. package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +107 -142
  101. package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +140 -125
  102. package/src/e2e_deploy_contract/deploy_test.ts +21 -14
  103. package/src/e2e_epochs/epochs_test.ts +26 -13
  104. package/src/e2e_fees/bridging_race.notest.ts +3 -6
  105. package/src/e2e_fees/fees_test.ts +177 -216
  106. package/src/e2e_l1_publisher/write_json.ts +22 -17
  107. package/src/e2e_multi_validator/utils.ts +1 -1
  108. package/src/e2e_nested_contract/nested_contract_test.ts +35 -56
  109. package/src/e2e_p2p/inactivity_slash_test.ts +9 -12
  110. package/src/e2e_p2p/p2p_network.ts +174 -183
  111. package/src/e2e_p2p/shared.ts +11 -6
  112. package/src/e2e_token_contract/token_contract_test.ts +105 -118
  113. package/src/fixtures/e2e_prover_test.ts +112 -144
  114. package/src/fixtures/fixtures.ts +1 -3
  115. package/src/fixtures/get_acvm_config.ts +1 -1
  116. package/src/fixtures/l1_to_l2_messaging.ts +4 -2
  117. package/src/fixtures/setup.ts +1010 -0
  118. package/src/fixtures/setup_p2p_test.ts +3 -3
  119. package/src/fixtures/utils.ts +27 -966
  120. package/src/fixtures/web3signer.ts +1 -1
  121. package/src/fixtures/with_telemetry_utils.ts +2 -2
  122. package/src/quality_of_service/{alert_checker.ts → grafana_client.ts} +1 -1
  123. package/src/quality_of_service/prometheus_client.ts +113 -0
  124. package/src/shared/cross_chain_test_harness.ts +6 -10
  125. package/src/shared/gas_portal_test_harness.ts +1 -1
  126. package/src/shared/index.ts +1 -1
  127. package/src/shared/uniswap_l1_l2.ts +53 -67
  128. package/src/simulators/lending_simulator.ts +2 -2
  129. package/src/spartan/setup_test_wallets.ts +9 -2
  130. package/src/spartan/tx_metrics.ts +231 -0
  131. package/src/spartan/utils.ts +308 -45
  132. package/dest/fixtures/setup_l1_contracts.d.ts +0 -477
  133. package/dest/fixtures/setup_l1_contracts.d.ts.map +0 -1
  134. package/dest/fixtures/setup_l1_contracts.js +0 -17
  135. package/dest/fixtures/snapshot_manager.d.ts +0 -95
  136. package/dest/fixtures/snapshot_manager.d.ts.map +0 -1
  137. package/dest/fixtures/snapshot_manager.js +0 -505
  138. package/dest/quality_of_service/alert_checker.d.ts +0 -41
  139. package/dest/quality_of_service/alert_checker.d.ts.map +0 -1
  140. package/src/fixtures/setup_l1_contracts.ts +0 -26
  141. package/src/fixtures/snapshot_manager.ts +0 -665
@@ -1,6 +1,7 @@
1
1
  import { createLogger } from '@aztec/aztec.js/log';
2
2
  import type { RollupCheatCodes } from '@aztec/aztec/testing';
3
- import type { L1ContractAddresses, ViemPublicClient } from '@aztec/ethereum';
3
+ import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
4
+ import type { ViemPublicClient } from '@aztec/ethereum/types';
4
5
  import type { CheckpointNumber } from '@aztec/foundation/branded-types';
5
6
  import type { Logger } from '@aztec/foundation/log';
6
7
  import { promiseWithResolvers } from '@aztec/foundation/promise';
@@ -31,7 +32,9 @@ const testConfigSchema = z.object({
31
32
  L1_RPC_URLS_JSON: z.string().optional(),
32
33
  L1_ACCOUNT_MNEMONIC: z.string().optional(),
33
34
  AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
35
+ AZTEC_EPOCH_DURATION: z.coerce.number().optional().default(32),
34
36
  AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
37
+ AZTEC_LAG_IN_EPOCHS_FOR_VALIDATOR_SET: z.coerce.number().optional().default(2),
35
38
  });
36
39
 
37
40
  export type TestConfig = z.infer<typeof testConfigSchema>;
@@ -115,7 +118,7 @@ export async function startPortForward({
115
118
  );
116
119
 
117
120
  let isResolved = false;
118
- const connected = new Promise<number>(resolve => {
121
+ const connected = new Promise<number>((resolve, reject) => {
119
122
  process.stdout?.on('data', data => {
120
123
  const str = data.toString() as string;
121
124
  if (!isResolved && str.includes('Forwarding from')) {
@@ -123,7 +126,8 @@ export async function startPortForward({
123
126
  logger.debug(`Port forward for ${resource}: ${str}`);
124
127
  const port = str.search(/:\d+/);
125
128
  if (port === -1) {
126
- throw new Error('Port not found in port forward output');
129
+ reject(new Error('Port not found in port forward output'));
130
+ return;
127
131
  }
128
132
  const portNumber = parseInt(str.slice(port + 1));
129
133
  logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
@@ -143,17 +147,26 @@ export async function startPortForward({
143
147
  process.on('close', () => {
144
148
  if (!isResolved) {
145
149
  isResolved = true;
146
- logger.warn(`Port forward for ${resource} closed before connection established`);
147
- resolve(0);
150
+ const msg = `Port forward for ${resource} closed before connection established`;
151
+ logger.warn(msg);
152
+ reject(new Error(msg));
148
153
  }
149
154
  });
150
155
  process.on('error', error => {
151
- logger.error(`Port forward for ${resource} error: ${error}`);
152
- resolve(0);
156
+ if (!isResolved) {
157
+ isResolved = true;
158
+ const msg = `Port forward for ${resource} error: ${error}`;
159
+ logger.error(msg);
160
+ reject(new Error(msg));
161
+ }
153
162
  });
154
163
  process.on('exit', code => {
155
- logger.verbose(`Port forward for ${resource} exited with code ${code}`);
156
- resolve(0);
164
+ if (!isResolved) {
165
+ isResolved = true;
166
+ const msg = `Port forward for ${resource} exited with code ${code}`;
167
+ logger.verbose(msg);
168
+ reject(new Error(msg));
169
+ }
157
170
  });
158
171
  });
159
172
 
@@ -195,6 +208,14 @@ export function getExternalIP(namespace: string, serviceName: string): Promise<s
195
208
  return promise;
196
209
  }
197
210
 
211
+ export function startPortForwardForPrometeheus(namespace: string) {
212
+ return startPortForward({
213
+ resource: `svc/${namespace}-prometheus-server`,
214
+ namespace,
215
+ containerPort: 80,
216
+ });
217
+ }
218
+
198
219
  export function startPortForwardForRPC(namespace: string, index = 0) {
199
220
  return startPortForward({
200
221
  resource: `pod/${namespace}-rpc-aztec-node-${index}`,
@@ -243,11 +264,11 @@ export async function deleteResourceByLabel({
243
264
  timeout?: string;
244
265
  force?: boolean;
245
266
  }) {
246
- // Check if the resource type exists before attempting to delete
247
267
  try {
248
- await execAsync(
249
- `kubectl api-resources --api-group="" --no-headers -o name | grep -q "^${resource}$" || kubectl api-resources --no-headers -o name | grep -q "^${resource}$"`,
250
- );
268
+ // Match both plain and group-qualified names (e.g., "podchaos" or "podchaos.chaos-mesh.org")
269
+ const escaped = resource.replace(/[-/\\^$*+?.()|[\]{}]/g, '\\$&');
270
+ const regex = `(^|\\.)${escaped}(\\.|$)`;
271
+ await execAsync(`kubectl api-resources --no-headers -o name | grep -Eq '${regex}'`);
251
272
  } catch (error) {
252
273
  logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
253
274
  return '';
@@ -280,6 +301,58 @@ export async function waitForResourceByLabel({
280
301
  return stdout;
281
302
  }
282
303
 
304
+ export async function waitForResourceByName({
305
+ resource,
306
+ name,
307
+ namespace,
308
+ condition = 'Ready',
309
+ timeout = '10m',
310
+ }: {
311
+ resource: string;
312
+ name: string;
313
+ namespace: string;
314
+ condition?: string;
315
+ timeout?: string;
316
+ }) {
317
+ const command = `kubectl wait ${resource}/${name} --for=condition=${condition} -n ${namespace} --timeout=${timeout}`;
318
+ logger.info(`command: ${command}`);
319
+ const { stdout } = await execAsync(command);
320
+ return stdout;
321
+ }
322
+
323
+ export async function waitForResourcesByName({
324
+ resource,
325
+ names,
326
+ namespace,
327
+ condition = 'Ready',
328
+ timeout = '10m',
329
+ }: {
330
+ resource: string;
331
+ names: string[];
332
+ namespace: string;
333
+ condition?: string;
334
+ timeout?: string;
335
+ }) {
336
+ if (!names.length) {
337
+ throw new Error(`No ${resource} names provided to waitForResourcesByName`);
338
+ }
339
+
340
+ // Wait all in parallel; if any fails, surface which one.
341
+ await Promise.all(
342
+ names.map(async name => {
343
+ try {
344
+ await waitForResourceByName({ resource, name, namespace, condition, timeout });
345
+ } catch (err) {
346
+ throw new Error(
347
+ `Failed waiting for ${resource}/${name} condition=${condition} timeout=${timeout} namespace=${namespace}: ${String(
348
+ err,
349
+ )}`,
350
+ );
351
+ }
352
+ }),
353
+ );
354
+ }
355
+
283
356
  export function getChartDir(spartanDir: string, chartName: string) {
284
357
  return path.join(spartanDir.trim(), chartName);
285
358
  }
@@ -330,7 +403,36 @@ async function execHelmCommand(args: Parameters<typeof createHelmCommand>[0]) {
330
403
  return stdout;
331
404
  }
332
405
 
333
- export async function cleanHelm(instanceName: string, namespace: string, logger: Logger) {
406
+ async function getHelmReleaseStatus(instanceName: string, namespace: string): Promise<string | undefined> {
407
+ try {
408
+ const { stdout } = await execAsync(
409
+ `helm list --namespace ${namespace} --all --filter '^${instanceName}$' --output json | cat`,
410
+ );
411
+ const parsed = JSON.parse(stdout) as Array<{ name?: string; status?: string }>;
412
+ const row = parsed.find(r => r.name === instanceName);
413
+ return row?.status;
414
+ } catch {
415
+ return undefined;
416
+ }
417
+ }
418
+
419
+ async function forceDeleteHelmReleaseRecord(instanceName: string, namespace: string, logger: Logger) {
420
+ const labelSelector = `owner=helm,name=${instanceName}`;
421
+ const cmd = `kubectl delete secret -n ${namespace} -l ${labelSelector} --ignore-not-found=true`;
422
+ logger.warn(`Force deleting Helm release record: ${cmd}`);
423
+ await execAsync(cmd).catch(() => undefined);
424
+ }
425
+
426
+ async function hasDeployedHelmRelease(instanceName: string, namespace: string): Promise<boolean> {
427
+ try {
428
+ const status = await getHelmReleaseStatus(instanceName, namespace);
429
+ return status?.toLowerCase() === 'deployed';
430
+ } catch {
431
+ return false;
432
+ }
433
+ }
434
+
435
+ export async function uninstallChaosMesh(instanceName: string, namespace: string, logger: Logger) {
334
436
  // uninstall the helm chart if it exists
335
437
  logger.info(`Uninstalling helm chart ${instanceName}`);
336
438
  await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
@@ -394,7 +496,7 @@ export async function installChaosMeshChart({
394
496
  logger: Logger;
395
497
  }) {
396
498
  if (clean) {
397
- await cleanHelm(instanceName, targetNamespace, logger);
499
+ await uninstallChaosMesh(instanceName, targetNamespace, logger);
398
500
  }
399
501
 
400
502
  return execHelmCommand({
@@ -430,22 +532,49 @@ export function applyProverFailure({
430
532
  });
431
533
  }
432
534
 
535
+ export function applyValidatorFailure({
536
+ namespace,
537
+ spartanDir,
538
+ logger,
539
+ values,
540
+ instanceName,
541
+ }: {
542
+ namespace: string;
543
+ spartanDir: string;
544
+ logger: Logger;
545
+ values?: Record<string, string | number>;
546
+ instanceName?: string;
547
+ }) {
548
+ return installChaosMeshChart({
549
+ instanceName: instanceName ?? 'validator-failure',
550
+ targetNamespace: namespace,
551
+ valuesFile: 'validator-failure.yaml',
552
+ helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
553
+ values,
554
+ logger,
555
+ });
556
+ }
557
+
433
558
  export function applyProverKill({
434
559
  namespace,
435
560
  spartanDir,
436
561
  logger,
562
+ values,
437
563
  }: {
438
564
  namespace: string;
439
565
  spartanDir: string;
440
566
  logger: Logger;
567
+ values?: Record<string, string | number>;
441
568
  }) {
442
569
  return installChaosMeshChart({
443
570
  instanceName: 'prover-kill',
444
571
  targetNamespace: namespace,
445
572
  valuesFile: 'prover-kill.yaml',
446
573
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
574
+ chaosMeshNamespace: namespace,
447
575
  clean: true,
448
576
  logger,
577
+ values,
449
578
  });
450
579
  }
451
580
 
@@ -453,10 +582,12 @@ export function applyProverBrokerKill({
453
582
  namespace,
454
583
  spartanDir,
455
584
  logger,
585
+ values,
456
586
  }: {
457
587
  namespace: string;
458
588
  spartanDir: string;
459
589
  logger: Logger;
590
+ values?: Record<string, string | number>;
460
591
  }) {
461
592
  return installChaosMeshChart({
462
593
  instanceName: 'prover-broker-kill',
@@ -465,66 +596,79 @@ export function applyProverBrokerKill({
465
596
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
466
597
  clean: true,
467
598
  logger,
599
+ values,
468
600
  });
469
601
  }
470
602
 
471
603
  export function applyBootNodeFailure({
604
+ instanceName = 'boot-node-failure',
472
605
  namespace,
473
606
  spartanDir,
474
607
  durationSeconds,
475
608
  logger,
609
+ values,
476
610
  }: {
611
+ instanceName?: string;
477
612
  namespace: string;
478
613
  spartanDir: string;
479
614
  durationSeconds: number;
480
615
  logger: Logger;
616
+ values?: Record<string, string | number>;
481
617
  }) {
482
618
  return installChaosMeshChart({
483
- instanceName: 'boot-node-failure',
619
+ instanceName,
484
620
  targetNamespace: namespace,
485
621
  valuesFile: 'boot-node-failure.yaml',
486
622
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
487
623
  values: {
488
624
  'bootNodeFailure.duration': `${durationSeconds}s`,
625
+ ...(values ?? {}),
489
626
  },
490
627
  logger,
491
628
  });
492
629
  }
493
630
 
494
631
  export function applyValidatorKill({
632
+ instanceName = 'validator-kill',
495
633
  namespace,
496
634
  spartanDir,
497
635
  logger,
498
636
  values,
637
+ clean = true,
499
638
  }: {
639
+ instanceName?: string;
500
640
  namespace: string;
501
641
  spartanDir: string;
502
642
  logger: Logger;
503
643
  values?: Record<string, string | number>;
644
+ clean?: boolean;
504
645
  }) {
505
646
  return installChaosMeshChart({
506
- instanceName: 'validator-kill',
647
+ instanceName: instanceName ?? 'validator-kill',
507
648
  targetNamespace: namespace,
508
649
  valuesFile: 'validator-kill.yaml',
509
650
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
651
+ clean,
510
652
  logger,
511
653
  values,
512
654
  });
513
655
  }
514
656
 
515
657
  export function applyNetworkShaping({
658
+ instanceName = 'network-shaping',
516
659
  valuesFile,
517
660
  namespace,
518
661
  spartanDir,
519
662
  logger,
520
663
  }: {
664
+ instanceName?: string;
521
665
  valuesFile: string;
522
666
  namespace: string;
523
667
  spartanDir: string;
524
668
  logger: Logger;
525
669
  }) {
526
670
  return installChaosMeshChart({
527
- instanceName: 'network-shaping',
671
+ instanceName,
528
672
  targetNamespace: namespace,
529
673
  valuesFile,
530
674
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
@@ -623,6 +767,12 @@ export async function installTransferBot({
623
767
  'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
624
768
  // Provide L1 mnemonic for bridging (falls back to labs mnemonic)
625
769
  'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
770
+
771
+ // The bot does not need Kubernetes API access. Disable RBAC + ServiceAccount creation so the chart
772
+ // can be installed by users without cluster-scoped RBAC permissions.
773
+ 'bot.rbac.create': false,
774
+ 'bot.serviceAccount.create': false,
775
+ 'bot.serviceAccount.name': 'default',
626
776
  };
627
777
  // Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
628
778
  if (mnemonicStartIndex === undefined) {
@@ -647,7 +797,7 @@ export async function installTransferBot({
647
797
  if (!repository || !tag) {
648
798
  try {
649
799
  const { stdout } = await execAsync(
650
- `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
800
+ `kubectl get pods -l app.kubernetes.io/name=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
651
801
  );
652
802
  const image = stdout.trim().replace(/^'|'$/g, '');
653
803
  if (image && image.includes(':')) {
@@ -668,6 +818,26 @@ export async function installTransferBot({
668
818
  typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
669
819
  }
670
820
 
821
+ // If a previous install attempt left the release in a non-deployed state (e.g. FAILED),
822
+ // `helm upgrade --install` can error with "has no deployed releases".
823
+ // In that case, clear the release record and do a clean install.
824
+ const existingStatus = await getHelmReleaseStatus(instanceName, namespace);
825
+ if (existingStatus && existingStatus.toLowerCase() !== 'deployed') {
826
+ logger.warn(`Transfer bot release ${instanceName} is in status '${existingStatus}'. Reinstalling cleanly.`);
827
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`).catch(
828
+ () => undefined,
829
+ );
830
+ // If helm left the release in `uninstalling`, force-delete the record so we can reinstall.
831
+ const afterUninstallStatus = await getHelmReleaseStatus(instanceName, namespace);
832
+ if (afterUninstallStatus?.toLowerCase() === 'uninstalling') {
833
+ await forceDeleteHelmReleaseRecord(instanceName, namespace, logger);
834
+ }
835
+ }
836
+
837
+ // `--reuse-values` fails if the release has never successfully deployed (e.g. first install, or a previous failed install).
838
+ // Only reuse values when we have a deployed release to reuse from.
839
+ const effectiveReuseValues = reuseValues && (await hasDeployedHelmRelease(instanceName, namespace));
840
+
671
841
  await execHelmCommand({
672
842
  instanceName,
673
843
  helmChartDir,
@@ -675,7 +845,7 @@ export async function installTransferBot({
675
845
  valuesFile: undefined,
676
846
  timeout,
677
847
  values: values as unknown as Record<string, string | number | boolean>,
678
- reuseValues,
848
+ reuseValues: effectiveReuseValues,
679
849
  });
680
850
 
681
851
  if (replicas > 0) {
@@ -720,7 +890,7 @@ export async function setValidatorTxDrop({
720
890
  const drop = enabled ? 'true' : 'false';
721
891
  const prob = String(probability);
722
892
 
723
- const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
893
+ const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
724
894
  let updated = false;
725
895
  for (const selector of selectors) {
726
896
  try {
@@ -751,7 +921,7 @@ export async function setValidatorTxDrop({
751
921
  }
752
922
 
753
923
  export async function restartValidators(namespace: string, logger: Logger) {
754
- const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
924
+ const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
755
925
  let any = false;
756
926
  for (const selector of selectors) {
757
927
  try {
@@ -806,11 +976,33 @@ export async function enableValidatorDynamicBootNode(
806
976
  }
807
977
 
808
978
  export async function getSequencers(namespace: string) {
809
- const command = `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
810
- const { stdout } = await execAsync(command);
811
- const sequencers = stdout.split(' ');
812
- logger.verbose(`Found sequencer pods ${sequencers.join(', ')}`);
813
- return sequencers;
979
+ const selectors = [
980
+ 'app.kubernetes.io/name=validator',
981
+ 'app.kubernetes.io/component=validator',
982
+ 'app.kubernetes.io/component=sequencer-node',
983
+ 'app=validator',
984
+ ];
985
+ for (const selector of selectors) {
986
+ try {
987
+ const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
988
+ const { stdout } = await execAsync(command);
989
+ const sequencers = stdout
990
+ .split(' ')
991
+ .map(s => s.trim())
992
+ .filter(Boolean);
993
+ if (sequencers.length > 0) {
994
+ logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
995
+ return sequencers;
996
+ }
997
+ } catch {
998
+ // try next selector
999
+ }
1000
+ }
1001
+
1002
+ // Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
1003
+ throw new Error(
1004
+ `No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`,
1005
+ );
814
1006
  }
815
1007
 
816
1008
  export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
@@ -871,7 +1063,7 @@ export async function getPublicViemClient(
871
1063
  containerPort: 8545,
872
1064
  });
873
1065
  const url = `http://127.0.0.1:${port}`;
874
- const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url)]) });
1066
+ const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url, { batch: false })]) });
875
1067
  if (processes) {
876
1068
  processes.push(process);
877
1069
  }
@@ -881,7 +1073,9 @@ export async function getPublicViemClient(
881
1073
  if (!L1_RPC_URLS_JSON) {
882
1074
  throw new Error(`L1_RPC_URLS_JSON is not defined`);
883
1075
  }
884
- const client: ViemPublicClient = createPublicClient({ transport: fallback([http(L1_RPC_URLS_JSON)]) });
1076
+ const client: ViemPublicClient = createPublicClient({
1077
+ transport: fallback([http(L1_RPC_URLS_JSON, { batch: false })]),
1078
+ });
885
1079
  return { url: L1_RPC_URLS_JSON, client };
886
1080
  }
887
1081
  }
@@ -914,24 +1108,93 @@ export async function getL1DeploymentAddresses(env: TestConfig): Promise<L1Contr
914
1108
  /**
915
1109
  * Rolls the Aztec pods in the given namespace.
916
1110
  * @param namespace - The namespace to roll the Aztec pods in.
917
- * @dev - IMPORTANT: This function DOES NOT delete the underlying PVCs.
918
- * This means that the pods will be restarted with the same persistent storage.
919
- * This is useful for testing, but you should be aware of the implications.
1111
+ * @param clearState - If true, also deletes the underlying PVCs to clear persistent storage.
1112
+ * This is required for rollup upgrades where the old state is incompatible with the new rollup.
1113
+ * Defaults to false, which preserves the existing storage.
920
1114
  */
921
- export async function rollAztecPods(namespace: string) {
922
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=boot-node' });
923
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-node' });
924
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-broker' });
925
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-agent' });
926
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=validator' });
927
- await deleteResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=pxe' });
1115
+ export async function rollAztecPods(namespace: string, clearState: boolean = false) {
1116
+ // Pod components use 'validator', but StatefulSets and PVCs use 'sequencer-node' for validators
1117
+ const podComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'prover-agent', 'sequencer-node', 'rpc'];
1118
+ const pvcComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'sequencer-node', 'rpc'];
1119
+ // StatefulSet components that need to be scaled down before PVC deletion
1120
+ // Note: validators use 'sequencer-node' as component label, not 'validator'
1121
+ const statefulSetComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'sequencer-node', 'rpc'];
1122
+
1123
+ if (clearState) {
1124
+ // To delete PVCs, we must first scale down StatefulSets so pods release the volumes
1125
+ // Otherwise PVC deletion will hang waiting for pods to terminate
1126
+
1127
+ // First, save original replica counts
1128
+ const originalReplicas: Map<string, number> = new Map();
1129
+ for (const component of statefulSetComponents) {
1130
+ try {
1131
+ const getCmd = `kubectl get statefulset -l app.kubernetes.io/component=${component} -n ${namespace} -o jsonpath='{.items[0].spec.replicas}'`;
1132
+ const { stdout } = await execAsync(getCmd);
1133
+ const replicas = parseInt(stdout.replace(/'/g, '').trim(), 10);
1134
+ if (!isNaN(replicas) && replicas > 0) {
1135
+ originalReplicas.set(component, replicas);
1136
+ }
1137
+ } catch {
1138
+ // Component might not exist, continue
1139
+ }
1140
+ }
1141
+
1142
+ // Scale down to 0
1143
+ for (const component of statefulSetComponents) {
1144
+ try {
1145
+ const scaleCmd = `kubectl scale statefulset -l app.kubernetes.io/component=${component} -n ${namespace} --replicas=0 --timeout=2m`;
1146
+ logger.info(`command: ${scaleCmd}`);
1147
+ await execAsync(scaleCmd);
1148
+ } catch (e) {
1149
+ // Component might not exist or might be a Deployment, continue
1150
+ logger.verbose(`Scale down ${component} skipped: ${e}`);
1151
+ }
1152
+ }
1153
+
1154
+ // Wait for pods to terminate
1155
+ await sleep(15 * 1000);
1156
+
1157
+ // Now delete PVCs (they should no longer be in use)
1158
+ for (const component of pvcComponents) {
1159
+ await deleteResourceByLabel({
1160
+ resource: 'persistentvolumeclaims',
1161
+ namespace: namespace,
1162
+ label: `app.kubernetes.io/component=${component}`,
1163
+ });
1164
+ }
1165
+
1166
+ // Scale StatefulSets back up to original replica counts
1167
+ for (const component of statefulSetComponents) {
1168
+ const replicas = originalReplicas.get(component) ?? 1;
1169
+ try {
1170
+ const scaleCmd = `kubectl scale statefulset -l app.kubernetes.io/component=${component} -n ${namespace} --replicas=${replicas} --timeout=2m`;
1171
+ logger.info(`command: ${scaleCmd}`);
1172
+ await execAsync(scaleCmd);
1173
+ } catch (e) {
1174
+ logger.verbose(`Scale up ${component} skipped: ${e}`);
1175
+ }
1176
+ }
1177
+ } else {
1178
+ // Just delete pods (no state clearing)
1179
+ for (const component of podComponents) {
1180
+ await deleteResourceByLabel({
1181
+ resource: 'pods',
1182
+ namespace: namespace,
1183
+ label: `app.kubernetes.io/component=${component}`,
1184
+ });
1185
+ }
1186
+ }
1187
+
928
1188
  await sleep(10 * 1000);
929
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=boot-node' });
930
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-node' });
931
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-broker' });
932
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=prover-agent' });
933
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=validator' });
934
- await waitForResourceByLabel({ resource: 'pods', namespace: namespace, label: 'app=pxe' });
1189
+
1190
+ // Wait for pods to come back
1191
+ for (const component of podComponents) {
1192
+ await waitForResourceByLabel({
1193
+ resource: 'pods',
1194
+ namespace: namespace,
1195
+ label: `app.kubernetes.io/component=${component}`,
1196
+ });
1197
+ }
935
1198
  }
936
1199
 
937
1200
  /**