@aztec/end-to-end 3.0.0-rc.5 → 4.0.0-nightly.20260107

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/dest/bench/client_flows/data_extractor.js +3 -1
  2. package/dest/bench/utils.d.ts +2 -2
  3. package/dest/bench/utils.d.ts.map +1 -1
  4. package/dest/bench/utils.js +10 -6
  5. package/dest/e2e_epochs/epochs_test.d.ts +2 -1
  6. package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
  7. package/dest/e2e_epochs/epochs_test.js +8 -5
  8. package/dest/e2e_fees/fees_test.d.ts +3 -1
  9. package/dest/e2e_fees/fees_test.d.ts.map +1 -1
  10. package/dest/e2e_fees/fees_test.js +4 -2
  11. package/dest/e2e_p2p/p2p_network.js +1 -1
  12. package/dest/e2e_p2p/shared.d.ts +1 -1
  13. package/dest/e2e_p2p/shared.d.ts.map +1 -1
  14. package/dest/e2e_p2p/shared.js +4 -4
  15. package/dest/fixtures/e2e_prover_test.d.ts +1 -3
  16. package/dest/fixtures/e2e_prover_test.d.ts.map +1 -1
  17. package/dest/fixtures/e2e_prover_test.js +6 -9
  18. package/dest/fixtures/fixtures.d.ts +2 -3
  19. package/dest/fixtures/fixtures.d.ts.map +1 -1
  20. package/dest/fixtures/fixtures.js +1 -2
  21. package/dest/fixtures/snapshot_manager.d.ts +1 -3
  22. package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
  23. package/dest/fixtures/snapshot_manager.js +5 -32
  24. package/dest/fixtures/utils.d.ts +7 -4
  25. package/dest/fixtures/utils.d.ts.map +1 -1
  26. package/dest/fixtures/utils.js +16 -24
  27. package/dest/spartan/utils.d.ts +37 -6
  28. package/dest/spartan/utils.d.ts.map +1 -1
  29. package/dest/spartan/utils.js +137 -27
  30. package/package.json +38 -38
  31. package/src/bench/client_flows/data_extractor.ts +1 -1
  32. package/src/bench/utils.ts +11 -7
  33. package/src/e2e_epochs/epochs_test.ts +20 -7
  34. package/src/e2e_fees/fees_test.ts +5 -4
  35. package/src/e2e_p2p/p2p_network.ts +1 -1
  36. package/src/e2e_p2p/shared.ts +6 -5
  37. package/src/fixtures/e2e_prover_test.ts +4 -7
  38. package/src/fixtures/fixtures.ts +1 -3
  39. package/src/fixtures/snapshot_manager.ts +7 -41
  40. package/src/fixtures/utils.ts +15 -28
  41. package/src/spartan/utils.ts +192 -18
@@ -32,6 +32,7 @@ const testConfigSchema = z.object({
32
32
  L1_RPC_URLS_JSON: z.string().optional(),
33
33
  L1_ACCOUNT_MNEMONIC: z.string().optional(),
34
34
  AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
35
+ AZTEC_EPOCH_DURATION: z.coerce.number().optional().default(32),
35
36
  AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
36
37
  });
37
38
 
@@ -244,11 +245,11 @@ export async function deleteResourceByLabel({
244
245
  timeout?: string;
245
246
  force?: boolean;
246
247
  }) {
247
- // Check if the resource type exists before attempting to delete
248
248
  try {
249
- await execAsync(
250
- `kubectl api-resources --api-group="" --no-headers -o name | grep -q "^${resource}$" || kubectl api-resources --no-headers -o name | grep -q "^${resource}$"`,
251
- );
249
+ // Match both plain and group-qualified names (e.g., "podchaos" or "podchaos.chaos-mesh.org")
250
+ const escaped = resource.replace(/[-/\\^$*+?.()|[\]{}]/g, '\\$&');
251
+ const regex = `(^|\\.)${escaped}(\\.|$)`;
252
+ await execAsync(`kubectl api-resources --no-headers -o name | grep -Eq '${regex}'`);
252
253
  } catch (error) {
253
254
  logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
254
255
  return '';
@@ -281,6 +282,58 @@ export async function waitForResourceByLabel({
281
282
  return stdout;
282
283
  }
283
284
 
285
+ export async function waitForResourceByName({
286
+ resource,
287
+ name,
288
+ namespace,
289
+ condition = 'Ready',
290
+ timeout = '10m',
291
+ }: {
292
+ resource: string;
293
+ name: string;
294
+ namespace: string;
295
+ condition?: string;
296
+ timeout?: string;
297
+ }) {
298
+ const command = `kubectl wait ${resource}/${name} --for=condition=${condition} -n ${namespace} --timeout=${timeout}`;
299
+ logger.info(`command: ${command}`);
300
+ const { stdout } = await execAsync(command);
301
+ return stdout;
302
+ }
303
+
304
+ export async function waitForResourcesByName({
305
+ resource,
306
+ names,
307
+ namespace,
308
+ condition = 'Ready',
309
+ timeout = '10m',
310
+ }: {
311
+ resource: string;
312
+ names: string[];
313
+ namespace: string;
314
+ condition?: string;
315
+ timeout?: string;
316
+ }) {
317
+ if (!names.length) {
318
+ throw new Error(`No ${resource} names provided to waitForResourcesByName`);
319
+ }
320
+
321
+ // Wait all in parallel; if any fails, surface which one.
322
+ await Promise.all(
323
+ names.map(async name => {
324
+ try {
325
+ await waitForResourceByName({ resource, name, namespace, condition, timeout });
326
+ } catch (err) {
327
+ throw new Error(
328
+ `Failed waiting for ${resource}/${name} condition=${condition} timeout=${timeout} namespace=${namespace}: ${String(
329
+ err,
330
+ )}`,
331
+ );
332
+ }
333
+ }),
334
+ );
335
+ }
336
+
284
337
  export function getChartDir(spartanDir: string, chartName: string) {
285
338
  return path.join(spartanDir.trim(), chartName);
286
339
  }
@@ -331,6 +384,35 @@ async function execHelmCommand(args: Parameters<typeof createHelmCommand>[0]) {
331
384
  return stdout;
332
385
  }
333
386
 
387
+ async function getHelmReleaseStatus(instanceName: string, namespace: string): Promise<string | undefined> {
388
+ try {
389
+ const { stdout } = await execAsync(
390
+ `helm list --namespace ${namespace} --all --filter '^${instanceName}$' --output json | cat`,
391
+ );
392
+ const parsed = JSON.parse(stdout) as Array<{ name?: string; status?: string }>;
393
+ const row = parsed.find(r => r.name === instanceName);
394
+ return row?.status;
395
+ } catch {
396
+ return undefined;
397
+ }
398
+ }
399
+
400
+ async function forceDeleteHelmReleaseRecord(instanceName: string, namespace: string, logger: Logger) {
401
+ const labelSelector = `owner=helm,name=${instanceName}`;
402
+ const cmd = `kubectl delete secret -n ${namespace} -l ${labelSelector} --ignore-not-found=true`;
403
+ logger.warn(`Force deleting Helm release record: ${cmd}`);
404
+ await execAsync(cmd).catch(() => undefined);
405
+ }
406
+
407
+ async function hasDeployedHelmRelease(instanceName: string, namespace: string): Promise<boolean> {
408
+ try {
409
+ const status = await getHelmReleaseStatus(instanceName, namespace);
410
+ return status?.toLowerCase() === 'deployed';
411
+ } catch {
412
+ return false;
413
+ }
414
+ }
415
+
334
416
  export async function uninstallChaosMesh(instanceName: string, namespace: string, logger: Logger) {
335
417
  // uninstall the helm chart if it exists
336
418
  logger.info(`Uninstalling helm chart ${instanceName}`);
@@ -431,22 +513,49 @@ export function applyProverFailure({
431
513
  });
432
514
  }
433
515
 
516
+ export function applyValidatorFailure({
517
+ namespace,
518
+ spartanDir,
519
+ logger,
520
+ values,
521
+ instanceName,
522
+ }: {
523
+ namespace: string;
524
+ spartanDir: string;
525
+ logger: Logger;
526
+ values?: Record<string, string | number>;
527
+ instanceName?: string;
528
+ }) {
529
+ return installChaosMeshChart({
530
+ instanceName: instanceName ?? 'validator-failure',
531
+ targetNamespace: namespace,
532
+ valuesFile: 'validator-failure.yaml',
533
+ helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
534
+ values,
535
+ logger,
536
+ });
537
+ }
538
+
434
539
  export function applyProverKill({
435
540
  namespace,
436
541
  spartanDir,
437
542
  logger,
543
+ values,
438
544
  }: {
439
545
  namespace: string;
440
546
  spartanDir: string;
441
547
  logger: Logger;
548
+ values?: Record<string, string | number>;
442
549
  }) {
443
550
  return installChaosMeshChart({
444
551
  instanceName: 'prover-kill',
445
552
  targetNamespace: namespace,
446
553
  valuesFile: 'prover-kill.yaml',
447
554
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
555
+ chaosMeshNamespace: namespace,
448
556
  clean: true,
449
557
  logger,
558
+ values,
450
559
  });
451
560
  }
452
561
 
@@ -454,10 +563,12 @@ export function applyProverBrokerKill({
454
563
  namespace,
455
564
  spartanDir,
456
565
  logger,
566
+ values,
457
567
  }: {
458
568
  namespace: string;
459
569
  spartanDir: string;
460
570
  logger: Logger;
571
+ values?: Record<string, string | number>;
461
572
  }) {
462
573
  return installChaosMeshChart({
463
574
  instanceName: 'prover-broker-kill',
@@ -466,66 +577,79 @@ export function applyProverBrokerKill({
466
577
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
467
578
  clean: true,
468
579
  logger,
580
+ values,
469
581
  });
470
582
  }
471
583
 
472
584
  export function applyBootNodeFailure({
585
+ instanceName = 'boot-node-failure',
473
586
  namespace,
474
587
  spartanDir,
475
588
  durationSeconds,
476
589
  logger,
590
+ values,
477
591
  }: {
592
+ instanceName?: string;
478
593
  namespace: string;
479
594
  spartanDir: string;
480
595
  durationSeconds: number;
481
596
  logger: Logger;
597
+ values?: Record<string, string | number>;
482
598
  }) {
483
599
  return installChaosMeshChart({
484
- instanceName: 'boot-node-failure',
600
+ instanceName,
485
601
  targetNamespace: namespace,
486
602
  valuesFile: 'boot-node-failure.yaml',
487
603
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
488
604
  values: {
489
605
  'bootNodeFailure.duration': `${durationSeconds}s`,
606
+ ...(values ?? {}),
490
607
  },
491
608
  logger,
492
609
  });
493
610
  }
494
611
 
495
612
  export function applyValidatorKill({
613
+ instanceName = 'validator-kill',
496
614
  namespace,
497
615
  spartanDir,
498
616
  logger,
499
617
  values,
618
+ clean = true,
500
619
  }: {
620
+ instanceName?: string;
501
621
  namespace: string;
502
622
  spartanDir: string;
503
623
  logger: Logger;
504
624
  values?: Record<string, string | number>;
625
+ clean?: boolean;
505
626
  }) {
506
627
  return installChaosMeshChart({
507
- instanceName: 'validator-kill',
628
+ instanceName: instanceName ?? 'validator-kill',
508
629
  targetNamespace: namespace,
509
630
  valuesFile: 'validator-kill.yaml',
510
631
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
632
+ clean,
511
633
  logger,
512
634
  values,
513
635
  });
514
636
  }
515
637
 
516
638
  export function applyNetworkShaping({
639
+ instanceName = 'network-shaping',
517
640
  valuesFile,
518
641
  namespace,
519
642
  spartanDir,
520
643
  logger,
521
644
  }: {
645
+ instanceName?: string;
522
646
  valuesFile: string;
523
647
  namespace: string;
524
648
  spartanDir: string;
525
649
  logger: Logger;
526
650
  }) {
527
651
  return installChaosMeshChart({
528
- instanceName: 'network-shaping',
652
+ instanceName,
529
653
  targetNamespace: namespace,
530
654
  valuesFile,
531
655
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
@@ -624,6 +748,12 @@ export async function installTransferBot({
624
748
  'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
625
749
  // Provide L1 mnemonic for bridging (falls back to labs mnemonic)
626
750
  'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
751
+
752
+ // The bot does not need Kubernetes API access. Disable RBAC + ServiceAccount creation so the chart
753
+ // can be installed by users without cluster-scoped RBAC permissions.
754
+ 'bot.rbac.create': false,
755
+ 'bot.serviceAccount.create': false,
756
+ 'bot.serviceAccount.name': 'default',
627
757
  };
628
758
  // Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
629
759
  if (mnemonicStartIndex === undefined) {
@@ -648,7 +778,7 @@ export async function installTransferBot({
648
778
  if (!repository || !tag) {
649
779
  try {
650
780
  const { stdout } = await execAsync(
651
- `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
781
+ `kubectl get pods -l app.kubernetes.io/name=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
652
782
  );
653
783
  const image = stdout.trim().replace(/^'|'$/g, '');
654
784
  if (image && image.includes(':')) {
@@ -669,6 +799,26 @@ export async function installTransferBot({
669
799
  typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
670
800
  }
671
801
 
802
+ // If a previous install attempt left the release in a non-deployed state (e.g. FAILED),
803
+ // `helm upgrade --install` can error with "has no deployed releases".
804
+ // In that case, clear the release record and do a clean install.
805
+ const existingStatus = await getHelmReleaseStatus(instanceName, namespace);
806
+ if (existingStatus && existingStatus.toLowerCase() !== 'deployed') {
807
+ logger.warn(`Transfer bot release ${instanceName} is in status '${existingStatus}'. Reinstalling cleanly.`);
808
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`).catch(
809
+ () => undefined,
810
+ );
811
+ // If helm left the release in `uninstalling`, force-delete the record so we can reinstall.
812
+ const afterUninstallStatus = await getHelmReleaseStatus(instanceName, namespace);
813
+ if (afterUninstallStatus?.toLowerCase() === 'uninstalling') {
814
+ await forceDeleteHelmReleaseRecord(instanceName, namespace, logger);
815
+ }
816
+ }
817
+
818
+ // `--reuse-values` fails if the release has never successfully deployed (e.g. first install, or a previous failed install).
819
+ // Only reuse values when we have a deployed release to reuse from.
820
+ const effectiveReuseValues = reuseValues && (await hasDeployedHelmRelease(instanceName, namespace));
821
+
672
822
  await execHelmCommand({
673
823
  instanceName,
674
824
  helmChartDir,
@@ -676,7 +826,7 @@ export async function installTransferBot({
676
826
  valuesFile: undefined,
677
827
  timeout,
678
828
  values: values as unknown as Record<string, string | number | boolean>,
679
- reuseValues,
829
+ reuseValues: effectiveReuseValues,
680
830
  });
681
831
 
682
832
  if (replicas > 0) {
@@ -721,7 +871,7 @@ export async function setValidatorTxDrop({
721
871
  const drop = enabled ? 'true' : 'false';
722
872
  const prob = String(probability);
723
873
 
724
- const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
874
+ const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
725
875
  let updated = false;
726
876
  for (const selector of selectors) {
727
877
  try {
@@ -752,7 +902,7 @@ export async function setValidatorTxDrop({
752
902
  }
753
903
 
754
904
  export async function restartValidators(namespace: string, logger: Logger) {
755
- const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
905
+ const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
756
906
  let any = false;
757
907
  for (const selector of selectors) {
758
908
  try {
@@ -807,11 +957,33 @@ export async function enableValidatorDynamicBootNode(
807
957
  }
808
958
 
809
959
  export async function getSequencers(namespace: string) {
810
- const command = `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
811
- const { stdout } = await execAsync(command);
812
- const sequencers = stdout.split(' ');
813
- logger.verbose(`Found sequencer pods ${sequencers.join(', ')}`);
814
- return sequencers;
960
+ const selectors = [
961
+ 'app.kubernetes.io/name=validator',
962
+ 'app.kubernetes.io/component=validator',
963
+ 'app.kubernetes.io/component=sequencer-node',
964
+ 'app=validator',
965
+ ];
966
+ for (const selector of selectors) {
967
+ try {
968
+ const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
969
+ const { stdout } = await execAsync(command);
970
+ const sequencers = stdout
971
+ .split(' ')
972
+ .map(s => s.trim())
973
+ .filter(Boolean);
974
+ if (sequencers.length > 0) {
975
+ logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
976
+ return sequencers;
977
+ }
978
+ } catch {
979
+ // try next selector
980
+ }
981
+ }
982
+
983
+ // Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
984
+ throw new Error(
985
+ `No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`,
986
+ );
815
987
  }
816
988
 
817
989
  export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
@@ -872,7 +1044,7 @@ export async function getPublicViemClient(
872
1044
  containerPort: 8545,
873
1045
  });
874
1046
  const url = `http://127.0.0.1:${port}`;
875
- const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url)]) });
1047
+ const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url, { batch: false })]) });
876
1048
  if (processes) {
877
1049
  processes.push(process);
878
1050
  }
@@ -882,7 +1054,9 @@ export async function getPublicViemClient(
882
1054
  if (!L1_RPC_URLS_JSON) {
883
1055
  throw new Error(`L1_RPC_URLS_JSON is not defined`);
884
1056
  }
885
- const client: ViemPublicClient = createPublicClient({ transport: fallback([http(L1_RPC_URLS_JSON)]) });
1057
+ const client: ViemPublicClient = createPublicClient({
1058
+ transport: fallback([http(L1_RPC_URLS_JSON, { batch: false })]),
1059
+ });
886
1060
  return { url: L1_RPC_URLS_JSON, client };
887
1061
  }
888
1062
  }