@aztec/end-to-end 2.0.0-nightly.20250903 → 2.0.0-rc.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/dest/bench/client_flows/client_flows_benchmark.d.ts +1 -1
  2. package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -1
  3. package/dest/bench/client_flows/client_flows_benchmark.js +6 -7
  4. package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
  5. package/dest/e2e_epochs/epochs_test.js +2 -2
  6. package/dest/e2e_fees/bridging_race.notest.js +3 -5
  7. package/dest/e2e_fees/fees_test.d.ts +1 -1
  8. package/dest/e2e_fees/fees_test.d.ts.map +1 -1
  9. package/dest/e2e_fees/fees_test.js +4 -5
  10. package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -0
  11. package/dest/fixtures/e2e_prover_test.d.ts +1 -1
  12. package/dest/fixtures/e2e_prover_test.d.ts.map +1 -1
  13. package/dest/fixtures/e2e_prover_test.js +10 -10
  14. package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
  15. package/dest/fixtures/snapshot_manager.js +1 -21
  16. package/dest/fixtures/utils.d.ts.map +1 -1
  17. package/dest/fixtures/utils.js +1 -3
  18. package/dest/shared/gas_portal_test_harness.d.ts +5 -5
  19. package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
  20. package/dest/shared/gas_portal_test_harness.js +6 -16
  21. package/dest/spartan/utils.d.ts +35 -22
  22. package/dest/spartan/utils.d.ts.map +1 -1
  23. package/dest/spartan/utils.js +117 -46
  24. package/package.json +36 -36
  25. package/src/bench/client_flows/client_flows_benchmark.ts +9 -17
  26. package/src/e2e_epochs/epochs_test.ts +7 -2
  27. package/src/e2e_fees/bridging_race.notest.ts +6 -5
  28. package/src/e2e_fees/fees_test.ts +7 -9
  29. package/src/fixtures/e2e_prover_test.ts +7 -6
  30. package/src/fixtures/snapshot_manager.ts +1 -28
  31. package/src/fixtures/utils.ts +1 -3
  32. package/src/shared/gas_portal_test_harness.ts +8 -17
  33. package/src/spartan/DEVELOP.md +116 -0
  34. package/src/spartan/utils.ts +126 -48
  35. package/dest/integration_l1_publisher/write_json.d.ts.map +0 -1
  36. /package/dest/{integration_l1_publisher → e2e_l1_publisher}/write_json.d.ts +0 -0
  37. /package/dest/{integration_l1_publisher → e2e_l1_publisher}/write_json.js +0 -0
  38. /package/src/{integration_l1_publisher → e2e_l1_publisher}/write_json.ts +0 -0
@@ -0,0 +1,116 @@
1
+ The flow is as follows:
2
+
3
+ 1. Install/start KIND locally
4
+ 2. Bootstrap (to build an aztec image)
5
+ 3. Load image into kind
6
+ 4. Deploy networks
7
+ 5. Run tests in `yarn-project/end-to-end/src/spartan`
8
+
9
+ # Setup KIND
10
+
11
+ KIND is a kubernetes cluster that runs locally out of docker containers.
12
+
13
+ You can just
14
+
15
+ ```bash
16
+ spartan/bootstrap.sh kind
17
+ ```
18
+
19
+ You only need to do that once. If you do it again, it will destroy the cluster and recreate it (which you almost never need to do).
20
+
21
+ Now you’ll likely want some visibility into your cluster. You can
22
+
23
+ ```bash
24
+ spartan/scripts/create_k8s_dashboard.sh
25
+ ```
26
+
27
+ And after ~30 seconds or so you can
28
+
29
+ ```bash
30
+ spartan/scripts/forward_k8s_dashboard.sh
31
+ ```
32
+
33
+ That will run a port forward to your port `8443` . If you’re running in a remote environment (e.g. the mainframe), you’ll need to subsequently forward that back to your local machine. Cursor/VSCode have built in port forwarding (cmd/ctrl shift P, “forward”)
34
+
35
+ Open the forwarded page, and copy/paste the token that was generated when you forwarded the dashboard.
36
+
37
+ # Build an aztecprotocol:aztec image
38
+
39
+ ```bash
40
+ ./bootstrap.sh
41
+ export AZTEC_DOCKER_IMAGE="aztecprotocol/aztec:$(docker images "aztecprotocol/aztec" --format json | \
42
+ jq -r 'select(.Tag != "latest") | .Tag' | \
43
+ head -1)"
44
+ kind load docker-image $AZTEC_DOCKER_IMAGE
45
+ ```
46
+
47
+ If you just changed typescript, you can (after the initial bootstrap)
48
+
49
+ ```bash
50
+ ./yarn-project/bootstrap.sh
51
+ ./release-image/bootstrap.sh
52
+ export AZTEC_DOCKER_IMAGE="aztecprotocol/aztec:$(docker images "aztecprotocol/aztec" --format json | \
53
+ jq -r 'select(.Tag != "latest") | .Tag' | \
54
+ head -1)"
55
+ kind load docker-image $AZTEC_DOCKER_IMAGE
56
+ ```
57
+
58
+ The export is important there. The `AZTEC_DOCKER_IMAGE` env var is used as both:
59
+
60
+ - the container that runs the rollup contract deployment
61
+ - the containers for the aztec infrastructure (validators, provers, etc)
62
+
63
+ # Deploy stuff
64
+
65
+ ```bash
66
+ ./spartan/bootstrap.sh network_deploy scenario.local.env
67
+ ```
68
+
69
+ That will take 1-3 minutes. But at the end you should have everything you need.
70
+
71
+ You can (`k` is just an alias over `kubectl`)
72
+
73
+ ```bash
74
+ ❯ k get pods -n scenario
75
+ NAME READY STATUS RESTARTS AGE
76
+ deploy-rollup-contracts-2025-08-31-1511-w2dlb 0/1 Completed 0 2m34s
77
+ scenario-eth-beacon-0 1/1 Running 0 39m
78
+ scenario-eth-execution-0 1/1 Running 0 39m
79
+ scenario-eth-validator-0 1/1 Running 0 39m
80
+ scenario-p2p-bootstrap-node-5cbf9658b9-6vd9b 1/1 Running 0 20m
81
+ scenario-prover-agent-59bd96899d-46k5s 1/1 Running 0 116s
82
+ scenario-prover-agent-59bd96899d-vzvkd 1/1 Running 0 116s
83
+ scenario-prover-broker-0 1/1 Running 0 116s
84
+ scenario-prover-node-0 1/1 Running 0 116s
85
+ scenario-rpc-aztec-node-0 1/1 Running 0 116s
86
+ scenario-validator-0 1/1 Running 0 116s
87
+ scenario-validator-1 1/1 Running 0 116s
88
+ scenario-validator-2 1/1 Running 0 116s
89
+ scenario-validator-3 1/1 Running 0 116s
90
+ ```
91
+
92
+ For example, you can forward back the ethereum node with
93
+
94
+ ```bash
95
+ k port-forward -n scenario services/eth-devnet-eth-execution 8545:8545
96
+ ```
97
+
98
+ And then do whatever you like with it.
99
+
100
+ # Run tests
101
+
102
+ With the cluster running, you can now easily run tests.
103
+
104
+ ```bash
105
+ # run one
106
+ ./spartan/bootstrap.sh single_test scenario.local.env spartan/smoke.test.ts
107
+
108
+ # run all (serially)
109
+ ./spartan/bootstrap.sh network_tests scenario.local.env
110
+ ```
111
+
112
+ Right now, I recommend running the smoke test first, always, as it waits for the committee to exist.
113
+
114
+ # Teardown
115
+
116
+ You can just `k delete namespace scenario`. That will destroy everything in your kind cluster. To destroy the associated terraform state that was stored locally, just `./spartan/terraform/purge_local_state.sh`.
@@ -1,38 +1,38 @@
1
1
  import { createLogger, sleep } from '@aztec/aztec.js';
2
2
  import type { RollupCheatCodes } from '@aztec/aztec/testing';
3
+ import type { L1ContractAddresses, ViemPublicClient } from '@aztec/ethereum';
3
4
  import type { Logger } from '@aztec/foundation/log';
4
5
  import { makeBackoff, retry } from '@aztec/foundation/retry';
5
- import { type AztecNodeAdminConfig, createAztecNodeAdminClient } from '@aztec/stdlib/interfaces/client';
6
+ import { schemas } from '@aztec/foundation/schemas';
7
+ import {
8
+ type AztecNodeAdmin,
9
+ type AztecNodeAdminConfig,
10
+ createAztecNodeAdminClient,
11
+ createAztecNodeClient,
12
+ } from '@aztec/stdlib/interfaces/client';
6
13
 
7
14
  import { ChildProcess, exec, execSync, spawn } from 'child_process';
8
15
  import path from 'path';
9
16
  import { promisify } from 'util';
17
+ import { createPublicClient, fallback, http } from 'viem';
10
18
  import { z } from 'zod';
11
19
 
12
- export const RPC_SERVICE_NAME = 'services/aztec-infra-rpc-aztec-node';
13
-
14
20
  const execAsync = promisify(exec);
15
21
 
16
22
  const logger = createLogger('e2e:k8s-utils');
17
23
 
18
24
  const testConfigSchema = z.object({
19
- NAMESPACE: z.string().min(1, 'NAMESPACE env variable must be set'),
20
- L1_ACCOUNT_MNEMONIC: z.string().default('test test test test test test test test test test test junk'),
21
- K8S_CLUSTER: z.string().min(1, 'K8S_CLUSTER env variable must be set'),
22
- REGION: z.string().optional(),
23
- PROJECT_ID: z.string().optional(),
24
- AZTEC_REAL_PROOFS: z.coerce.boolean().default(false),
25
+ NAMESPACE: z.string().default('scenario'),
26
+ REAL_VERIFIER: schemas.Boolean.optional().default(true),
27
+ CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
28
+ L1_RPC_URLS_JSON: z.string().optional(),
25
29
  });
26
30
 
27
31
  export type TestConfig = z.infer<typeof testConfigSchema>;
28
32
 
29
33
  export function setupEnvironment(env: unknown): TestConfig {
30
34
  const config = testConfigSchema.parse(env);
31
-
32
- if (config.K8S_CLUSTER !== 'kind') {
33
- const command = `gcloud container clusters get-credentials ${config.K8S_CLUSTER} --region=${config.REGION} --project=${config.PROJECT_ID}`;
34
- execSync(command);
35
- }
35
+ logger.warn(`Loaded env config`, config);
36
36
  return config;
37
37
  }
38
38
 
@@ -96,7 +96,7 @@ export async function startPortForward({
96
96
  }> {
97
97
  const hostPortAsString = hostPort ? hostPort.toString() : '';
98
98
 
99
- logger.info(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
99
+ logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
100
100
 
101
101
  const process = spawn(
102
102
  'kubectl',
@@ -114,20 +114,20 @@ export async function startPortForward({
114
114
  const str = data.toString() as string;
115
115
  if (!isResolved && str.includes('Forwarding from')) {
116
116
  isResolved = true;
117
- logger.info(str);
117
+ logger.debug(`Port forward for ${resource}: ${str}`);
118
118
  const port = str.search(/:\d+/);
119
119
  if (port === -1) {
120
120
  throw new Error('Port not found in port forward output');
121
121
  }
122
122
  const portNumber = parseInt(str.slice(port + 1));
123
- logger.info(`Port forward connected: ${portNumber}`);
123
+ logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
124
124
  resolve(portNumber);
125
125
  } else {
126
126
  logger.silent(str);
127
127
  }
128
128
  });
129
129
  process.stderr?.on('data', data => {
130
- logger.info(data.toString());
130
+ logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
131
131
  // It's a strange thing:
132
132
  // If we don't pipe stderr, then the port forwarding does not work.
133
133
  // Log to silent because this doesn't actually report errors,
@@ -137,16 +137,16 @@ export async function startPortForward({
137
137
  process.on('close', () => {
138
138
  if (!isResolved) {
139
139
  isResolved = true;
140
- logger.warn('Port forward closed before connection established');
140
+ logger.warn(`Port forward for ${resource} closed before connection established`);
141
141
  resolve(0);
142
142
  }
143
143
  });
144
144
  process.on('error', error => {
145
- logger.error(`Port forward error: ${error}`);
145
+ logger.error(`Port forward for ${resource} error: ${error}`);
146
146
  resolve(0);
147
147
  });
148
148
  process.on('exit', code => {
149
- logger.info(`Port forward exited with code ${code}`);
149
+ logger.verbose(`Port forward for ${resource} exited with code ${code}`);
150
150
  resolve(0);
151
151
  });
152
152
  });
@@ -158,12 +158,20 @@ export async function startPortForward({
158
158
 
159
159
  export function startPortForwardForRPC(namespace: string) {
160
160
  return startPortForward({
161
- resource: RPC_SERVICE_NAME,
161
+ resource: `services/${namespace}-rpc-aztec-node`,
162
162
  namespace,
163
163
  containerPort: 8080,
164
164
  });
165
165
  }
166
166
 
167
+ export function startPortForwardForEthereum(namespace: string) {
168
+ return startPortForward({
169
+ resource: `services/${namespace}-eth-execution`,
170
+ namespace,
171
+ containerPort: 8545,
172
+ });
173
+ }
174
+
167
175
  export async function deleteResourceByName({
168
176
  resource,
169
177
  namespace,
@@ -188,13 +196,17 @@ export async function deleteResourceByLabel({
188
196
  namespace,
189
197
  label,
190
198
  timeout = '5m',
199
+ force = false,
191
200
  }: {
192
201
  resource: string;
193
202
  namespace: string;
194
203
  label: string;
195
204
  timeout?: string;
205
+ force?: boolean;
196
206
  }) {
197
- const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout}`;
207
+ const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${
208
+ force ? '--force' : ''
209
+ }`;
198
210
  logger.info(`command: ${command}`);
199
211
  const { stdout } = await execAsync(command);
200
212
  return stdout;
@@ -306,13 +318,13 @@ export async function installChaosMeshChart({
306
318
  const deleteArgs = {
307
319
  resource: 'podchaos',
308
320
  namespace: chaosMeshNamespace,
309
- name: `${targetNamespace}-${instanceName}`,
321
+ label: `app.kubernetes.io/instance=${instanceName}`,
310
322
  };
311
323
  logger.info(`Deleting podchaos resource`);
312
- await deleteResourceByName(deleteArgs).catch(e => {
324
+ await deleteResourceByLabel(deleteArgs).catch(e => {
313
325
  logger.error(`Error deleting podchaos resource: ${e}`);
314
326
  logger.info(`Force deleting podchaos resource`);
315
- return deleteResourceByName({ ...deleteArgs, force: true });
327
+ return deleteResourceByLabel({ ...deleteArgs, force: true });
316
328
  });
317
329
  }
318
330
 
@@ -502,44 +514,110 @@ export async function enableValidatorDynamicBootNode(
502
514
  logger.info(`Validator dynamic boot node enabled`);
503
515
  }
504
516
 
505
- export async function updateSequencerConfig(url: string, config: Partial<AztecNodeAdminConfig>) {
506
- const node = createAztecNodeAdminClient(url);
507
- // Retry incase the port forward is not ready yet
508
- await retry(() => node.setConfig(config), 'Update sequencer config', makeBackoff([1, 3, 6]), logger);
509
- }
510
-
511
517
  export async function getSequencers(namespace: string) {
512
- const command = `kubectl get pods -l app=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
518
+ const command = `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
513
519
  const { stdout } = await execAsync(command);
514
- return stdout.split(' ');
520
+ const sequencers = stdout.split(' ');
521
+ logger.verbose(`Found sequencer pods ${sequencers.join(', ')}`);
522
+ return sequencers;
515
523
  }
516
524
 
517
- async function updateK8sSequencersConfig(args: {
518
- containerPort: number;
519
- namespace: string;
520
- config: Partial<AztecNodeAdminConfig>;
521
- }) {
522
- const { containerPort, namespace, config } = args;
525
+ export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
526
+ return withSequencersAdmin(env, async client => {
527
+ await client.setConfig(config);
528
+ return client.getConfig();
529
+ });
530
+ }
531
+
532
+ export function getSequencersConfig(env: TestConfig) {
533
+ return withSequencersAdmin(env, client => client.getConfig());
534
+ }
535
+
536
+ export async function withSequencersAdmin<T>(env: TestConfig, fn: (node: AztecNodeAdmin) => Promise<T>): Promise<T[]> {
537
+ const adminContainerPort = 8880;
538
+ const namespace = env.NAMESPACE;
523
539
  const sequencers = await getSequencers(namespace);
540
+ const results = [];
541
+
524
542
  for (const sequencer of sequencers) {
525
543
  const { process, port } = await startPortForward({
526
544
  resource: `pod/${sequencer}`,
527
545
  namespace,
528
- containerPort,
546
+ containerPort: adminContainerPort,
529
547
  });
530
548
 
531
549
  const url = `http://localhost:${port}`;
532
- await updateSequencerConfig(url, config);
550
+ await retry(
551
+ () => fetch(`${url}/status`).then(res => res.status === 200),
552
+ 'forward node admin port',
553
+ makeBackoff([1, 1, 2, 6]),
554
+ logger,
555
+ true,
556
+ );
557
+ const client = createAztecNodeAdminClient(url);
558
+ results.push(await fn(client));
533
559
  process.kill();
534
560
  }
561
+
562
+ return results;
535
563
  }
536
564
 
537
- export async function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
538
- await updateK8sSequencersConfig({
539
- containerPort: 8880,
540
- namespace: env.NAMESPACE,
541
- config,
542
- });
565
+ /**
566
+ * Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
567
+ * it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
568
+ */
569
+ export async function getPublicViemClient(
570
+ env: TestConfig,
571
+ /** If set, will push the new process into it */
572
+ processes?: ChildProcess[],
573
+ ): Promise<{ url: string; client: ViemPublicClient; process?: ChildProcess }> {
574
+ const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
575
+ if (CREATE_ETH_DEVNET) {
576
+ logger.info(`Creating port forward to eth execution node`);
577
+ const { process, port } = await startPortForward({
578
+ resource: `svc/${NAMESPACE}-eth-execution`,
579
+ namespace: NAMESPACE,
580
+ containerPort: 8545,
581
+ });
582
+ const url = `http://127.0.0.1:${port}`;
583
+ const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url)]) });
584
+ if (processes) {
585
+ processes.push(process);
586
+ }
587
+ return { url, client, process };
588
+ } else {
589
+ logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
590
+ if (!L1_RPC_URLS_JSON) {
591
+ throw new Error(`L1_RPC_URLS_JSON is not defined`);
592
+ }
593
+ const client: ViemPublicClient = createPublicClient({ transport: fallback([http(L1_RPC_URLS_JSON)]) });
594
+ return { url: L1_RPC_URLS_JSON, client };
595
+ }
596
+ }
597
+
598
+ /** Queries an Aztec node for the L1 deployment addresses */
599
+ export async function getL1DeploymentAddresses(env: TestConfig): Promise<L1ContractAddresses> {
600
+ let forwardProcess: ChildProcess | undefined;
601
+ try {
602
+ const [sequencer] = await getSequencers(env.NAMESPACE);
603
+ const { process, port } = await startPortForward({
604
+ resource: `pod/${sequencer}`,
605
+ namespace: env.NAMESPACE,
606
+ containerPort: 8080,
607
+ });
608
+
609
+ forwardProcess = process;
610
+ const url = `http://127.0.0.1:${port}`;
611
+ const node = createAztecNodeClient(url);
612
+ return await retry(
613
+ () => node.getNodeInfo().then(i => i.l1ContractAddresses),
614
+ 'get node info',
615
+ makeBackoff([1, 3, 6]),
616
+ logger,
617
+ );
618
+ } finally {
619
+ forwardProcess?.kill();
620
+ }
543
621
  }
544
622
 
545
623
  /**
@@ -1 +0,0 @@
1
- {"version":3,"file":"write_json.d.ts","sourceRoot":"","sources":["../../src/integration_l1_publisher/write_json.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,EAAE,EAAE,KAAK,OAAO,EAAE,MAAM,iBAAiB,CAAC;AACjE,OAAO,EAAE,WAAW,EAAE,IAAI,EAAE,MAAM,iBAAiB,CAAC;AAOpD;;;GAGG;AACH,wBAAsB,SAAS,CAC7B,QAAQ,EAAE,MAAM,EAChB,KAAK,EAAE,OAAO,EACd,aAAa,EAAE,EAAE,EAAE,EACnB,KAAK,EAAE,IAAI,EAAE,EACb,WAAW,EAAE,WAAW,EACxB,gBAAgB,EAAE,YAAY,EAC9B,eAAe,EAAE,KAAK,MAAM,EAAE,GAC7B,OAAO,CAAC,IAAI,CAAC,CAqDf"}