@aztec/end-to-end 0.0.0-test.1 → 0.0.1-commit.03f7ef2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (181) hide show
  1. package/dest/bench/client_flows/benchmark.d.ts +61 -0
  2. package/dest/bench/client_flows/benchmark.d.ts.map +1 -0
  3. package/dest/bench/client_flows/benchmark.js +261 -0
  4. package/dest/bench/client_flows/client_flows_benchmark.d.ts +80 -0
  5. package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -0
  6. package/dest/bench/client_flows/client_flows_benchmark.js +336 -0
  7. package/dest/bench/client_flows/config.d.ts +14 -0
  8. package/dest/bench/client_flows/config.d.ts.map +1 -0
  9. package/dest/bench/client_flows/config.js +106 -0
  10. package/dest/bench/client_flows/data_extractor.d.ts +2 -0
  11. package/dest/bench/client_flows/data_extractor.d.ts.map +1 -0
  12. package/dest/bench/client_flows/data_extractor.js +77 -0
  13. package/dest/bench/utils.d.ts +12 -38
  14. package/dest/bench/utils.d.ts.map +1 -1
  15. package/dest/bench/utils.js +26 -66
  16. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +21 -13
  17. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
  18. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +85 -57
  19. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +20 -25
  20. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
  21. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +51 -70
  22. package/dest/e2e_deploy_contract/deploy_test.d.ts +16 -8
  23. package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
  24. package/dest/e2e_deploy_contract/deploy_test.js +13 -19
  25. package/dest/e2e_epochs/epochs_test.d.ts +65 -22
  26. package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
  27. package/dest/e2e_epochs/epochs_test.js +233 -49
  28. package/dest/e2e_fees/bridging_race.notest.d.ts +2 -0
  29. package/dest/e2e_fees/bridging_race.notest.d.ts.map +1 -0
  30. package/dest/e2e_fees/bridging_race.notest.js +63 -0
  31. package/dest/e2e_fees/fees_test.d.ts +27 -12
  32. package/dest/e2e_fees/fees_test.d.ts.map +1 -1
  33. package/dest/e2e_fees/fees_test.js +106 -109
  34. package/dest/e2e_l1_publisher/write_json.d.ts +10 -0
  35. package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -0
  36. package/dest/e2e_l1_publisher/write_json.js +58 -0
  37. package/dest/e2e_multi_validator/utils.d.ts +12 -0
  38. package/dest/e2e_multi_validator/utils.d.ts.map +1 -0
  39. package/dest/e2e_multi_validator/utils.js +214 -0
  40. package/dest/e2e_nested_contract/nested_contract_test.d.ts +10 -7
  41. package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
  42. package/dest/e2e_nested_contract/nested_contract_test.js +24 -20
  43. package/dest/e2e_p2p/inactivity_slash_test.d.ts +31 -0
  44. package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -0
  45. package/dest/e2e_p2p/inactivity_slash_test.js +136 -0
  46. package/dest/e2e_p2p/p2p_network.d.ts +276 -23
  47. package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
  48. package/dest/e2e_p2p/p2p_network.js +188 -133
  49. package/dest/e2e_p2p/shared.d.ts +43 -7
  50. package/dest/e2e_p2p/shared.d.ts.map +1 -1
  51. package/dest/e2e_p2p/shared.js +164 -19
  52. package/dest/e2e_token_contract/token_contract_test.d.ts +12 -6
  53. package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
  54. package/dest/e2e_token_contract/token_contract_test.js +50 -26
  55. package/dest/fixtures/e2e_prover_test.d.ts +61 -0
  56. package/dest/fixtures/e2e_prover_test.d.ts.map +1 -0
  57. package/dest/{e2e_prover → fixtures}/e2e_prover_test.js +108 -112
  58. package/dest/fixtures/fixtures.d.ts +6 -8
  59. package/dest/fixtures/fixtures.d.ts.map +1 -1
  60. package/dest/fixtures/fixtures.js +5 -5
  61. package/dest/fixtures/get_acvm_config.d.ts +2 -2
  62. package/dest/fixtures/get_acvm_config.d.ts.map +1 -1
  63. package/dest/fixtures/get_acvm_config.js +3 -15
  64. package/dest/fixtures/get_bb_config.d.ts +2 -2
  65. package/dest/fixtures/get_bb_config.d.ts.map +1 -1
  66. package/dest/fixtures/get_bb_config.js +10 -17
  67. package/dest/fixtures/index.d.ts +1 -1
  68. package/dest/fixtures/l1_to_l2_messaging.d.ts +11 -7
  69. package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
  70. package/dest/fixtures/l1_to_l2_messaging.js +45 -19
  71. package/dest/fixtures/logging.d.ts +1 -1
  72. package/dest/fixtures/setup_p2p_test.d.ts +15 -14
  73. package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
  74. package/dest/fixtures/setup_p2p_test.js +82 -22
  75. package/dest/fixtures/snapshot_manager.d.ts +20 -14
  76. package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
  77. package/dest/fixtures/snapshot_manager.js +154 -140
  78. package/dest/fixtures/token_utils.d.ts +10 -4
  79. package/dest/fixtures/token_utils.d.ts.map +1 -1
  80. package/dest/fixtures/token_utils.js +28 -12
  81. package/dest/fixtures/utils.d.ts +95 -54
  82. package/dest/fixtures/utils.d.ts.map +1 -1
  83. package/dest/fixtures/utils.js +456 -389
  84. package/dest/fixtures/web3signer.d.ts +5 -0
  85. package/dest/fixtures/web3signer.d.ts.map +1 -0
  86. package/dest/fixtures/web3signer.js +53 -0
  87. package/dest/fixtures/with_telemetry_utils.d.ts +2 -2
  88. package/dest/fixtures/with_telemetry_utils.d.ts.map +1 -1
  89. package/dest/fixtures/with_telemetry_utils.js +2 -2
  90. package/dest/index.d.ts +1 -1
  91. package/dest/quality_of_service/alert_checker.d.ts +2 -2
  92. package/dest/quality_of_service/alert_checker.d.ts.map +1 -1
  93. package/dest/shared/cross_chain_test_harness.d.ts +42 -35
  94. package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
  95. package/dest/shared/cross_chain_test_harness.js +104 -50
  96. package/dest/shared/gas_portal_test_harness.d.ts +29 -31
  97. package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
  98. package/dest/shared/gas_portal_test_harness.js +51 -30
  99. package/dest/shared/index.d.ts +1 -1
  100. package/dest/shared/jest_setup.d.ts +1 -1
  101. package/dest/shared/jest_setup.js +1 -1
  102. package/dest/shared/submit-transactions.d.ts +6 -4
  103. package/dest/shared/submit-transactions.d.ts.map +1 -1
  104. package/dest/shared/submit-transactions.js +8 -7
  105. package/dest/shared/uniswap_l1_l2.d.ts +16 -13
  106. package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
  107. package/dest/shared/uniswap_l1_l2.js +149 -117
  108. package/dest/simulators/index.d.ts +1 -1
  109. package/dest/simulators/lending_simulator.d.ts +7 -11
  110. package/dest/simulators/lending_simulator.d.ts.map +1 -1
  111. package/dest/simulators/lending_simulator.js +16 -17
  112. package/dest/simulators/token_simulator.d.ts +6 -3
  113. package/dest/simulators/token_simulator.d.ts.map +1 -1
  114. package/dest/simulators/token_simulator.js +16 -13
  115. package/dest/spartan/setup_test_wallets.d.ts +26 -11
  116. package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
  117. package/dest/spartan/setup_test_wallets.js +202 -58
  118. package/dest/spartan/tx_metrics.d.ts +39 -0
  119. package/dest/spartan/tx_metrics.d.ts.map +1 -0
  120. package/dest/spartan/tx_metrics.js +95 -0
  121. package/dest/spartan/utils.d.ts +129 -313
  122. package/dest/spartan/utils.d.ts.map +1 -1
  123. package/dest/spartan/utils.js +559 -151
  124. package/package.json +65 -58
  125. package/src/bench/client_flows/benchmark.ts +341 -0
  126. package/src/bench/client_flows/client_flows_benchmark.ts +450 -0
  127. package/src/bench/client_flows/config.ts +61 -0
  128. package/src/bench/client_flows/data_extractor.ts +89 -0
  129. package/src/bench/utils.ts +22 -76
  130. package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +80 -77
  131. package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +70 -107
  132. package/src/e2e_deploy_contract/deploy_test.ts +24 -39
  133. package/src/e2e_epochs/epochs_test.ts +299 -65
  134. package/src/e2e_fees/bridging_race.notest.ts +80 -0
  135. package/src/e2e_fees/fees_test.ts +151 -141
  136. package/src/e2e_l1_publisher/write_json.ts +77 -0
  137. package/src/e2e_multi_validator/utils.ts +258 -0
  138. package/src/e2e_nested_contract/nested_contract_test.ts +29 -19
  139. package/src/e2e_p2p/inactivity_slash_test.ts +179 -0
  140. package/src/e2e_p2p/p2p_network.ts +274 -171
  141. package/src/e2e_p2p/shared.ts +251 -29
  142. package/src/e2e_token_contract/token_contract_test.ts +43 -39
  143. package/src/fixtures/dumps/epoch_proof_result.json +1 -1
  144. package/src/{e2e_prover → fixtures}/e2e_prover_test.ts +113 -160
  145. package/src/fixtures/fixtures.ts +5 -7
  146. package/src/fixtures/get_acvm_config.ts +4 -12
  147. package/src/fixtures/get_bb_config.ts +18 -13
  148. package/src/fixtures/l1_to_l2_messaging.ts +56 -24
  149. package/src/fixtures/setup_p2p_test.ts +127 -39
  150. package/src/fixtures/snapshot_manager.ts +196 -162
  151. package/src/fixtures/token_utils.ts +32 -15
  152. package/src/fixtures/utils.ts +562 -475
  153. package/src/fixtures/web3signer.ts +63 -0
  154. package/src/fixtures/with_telemetry_utils.ts +2 -2
  155. package/src/guides/up_quick_start.sh +7 -15
  156. package/src/quality_of_service/alert_checker.ts +1 -1
  157. package/src/shared/cross_chain_test_harness.ts +112 -80
  158. package/src/shared/gas_portal_test_harness.ts +59 -50
  159. package/src/shared/jest_setup.ts +1 -1
  160. package/src/shared/submit-transactions.ts +12 -8
  161. package/src/shared/uniswap_l1_l2.ts +187 -192
  162. package/src/simulators/lending_simulator.ts +15 -16
  163. package/src/simulators/token_simulator.ts +21 -13
  164. package/src/spartan/DEVELOP.md +128 -0
  165. package/src/spartan/setup_test_wallets.ts +252 -93
  166. package/src/spartan/tx_metrics.ts +130 -0
  167. package/src/spartan/utils.ts +641 -146
  168. package/dest/e2e_prover/e2e_prover_test.d.ts +0 -56
  169. package/dest/e2e_prover/e2e_prover_test.d.ts.map +0 -1
  170. package/dest/fixtures/setup_l1_contracts.d.ts +0 -6
  171. package/dest/fixtures/setup_l1_contracts.d.ts.map +0 -1
  172. package/dest/fixtures/setup_l1_contracts.js +0 -17
  173. package/dest/sample-dapp/connect.js +0 -12
  174. package/dest/sample-dapp/contracts.js +0 -10
  175. package/dest/sample-dapp/deploy.js +0 -35
  176. package/dest/sample-dapp/index.js +0 -98
  177. package/src/fixtures/setup_l1_contracts.ts +0 -27
  178. package/src/sample-dapp/connect.mjs +0 -16
  179. package/src/sample-dapp/contracts.mjs +0 -14
  180. package/src/sample-dapp/deploy.mjs +0 -40
  181. package/src/sample-dapp/index.mjs +0 -128
@@ -1,74 +1,82 @@
1
- import { createAztecNodeClient, createLogger, sleep } from '@aztec/aztec.js';
1
+ import { createLogger } from '@aztec/aztec.js/log';
2
+ import { promiseWithResolvers } from '@aztec/foundation/promise';
3
+ import { makeBackoff, retry } from '@aztec/foundation/retry';
4
+ import { schemas } from '@aztec/foundation/schemas';
5
+ import { sleep } from '@aztec/foundation/sleep';
6
+ import { createAztecNodeAdminClient, createAztecNodeClient } from '@aztec/stdlib/interfaces/client';
2
7
  import { exec, execSync, spawn } from 'child_process';
3
8
  import path from 'path';
4
9
  import { promisify } from 'util';
10
+ import { createPublicClient, fallback, http } from 'viem';
5
11
  import { z } from 'zod';
6
- import { AlertChecker } from '../quality_of_service/alert_checker.js';
7
12
  const execAsync = promisify(exec);
8
13
  const logger = createLogger('e2e:k8s-utils');
9
- const ethereumHostsSchema = z.string().refine((str)=>str.split(',').every((url)=>{
10
- try {
11
- new URL(url.trim());
12
- return true;
13
- } catch {
14
- return false;
15
- }
16
- }), 'ETHEREUM_HOSTS must be a comma-separated list of valid URLs');
17
- const k8sLocalConfigSchema = z.object({
18
- ETHEREUM_SLOT_DURATION: z.coerce.number().min(1, 'ETHEREUM_SLOT_DURATION env variable must be set'),
19
- AZTEC_SLOT_DURATION: z.coerce.number().min(1, 'AZTEC_SLOT_DURATION env variable must be set'),
20
- AZTEC_EPOCH_DURATION: z.coerce.number().min(1, 'AZTEC_EPOCH_DURATION env variable must be set'),
21
- AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().min(1, 'AZTEC_PROOF_SUBMISSION_WINDOW env variable must be set'),
22
- INSTANCE_NAME: z.string().min(1, 'INSTANCE_NAME env variable must be set'),
23
- NAMESPACE: z.string().min(1, 'NAMESPACE env variable must be set'),
24
- CONTAINER_NODE_PORT: z.coerce.number().default(8080),
25
- CONTAINER_SEQUENCER_PORT: z.coerce.number().default(8080),
26
- CONTAINER_PROVER_NODE_PORT: z.coerce.number().default(8080),
27
- CONTAINER_PXE_PORT: z.coerce.number().default(8080),
28
- CONTAINER_ETHEREUM_PORT: z.coerce.number().default(8545),
29
- CONTAINER_METRICS_PORT: z.coerce.number().default(80),
30
- GRAFANA_PASSWORD: z.string().optional(),
31
- METRICS_API_PATH: z.string().default('/api/datasources/proxy/uid/spartan-metrics-prometheus/api/v1'),
32
- SPARTAN_DIR: z.string().min(1, 'SPARTAN_DIR env variable must be set'),
33
- ETHEREUM_HOSTS: ethereumHostsSchema.optional(),
34
- L1_ACCOUNT_MNEMONIC: z.string().default('test test test test test test test test test test test junk'),
35
- SEPOLIA_RUN: z.string().default('false'),
36
- K8S: z.literal('local')
37
- });
38
- const k8sGCloudConfigSchema = k8sLocalConfigSchema.extend({
39
- K8S: z.literal('gcloud'),
40
- CLUSTER_NAME: z.string().min(1, 'CLUSTER_NAME env variable must be set'),
41
- REGION: z.string().min(1, 'REGION env variable must be set')
42
- });
43
- const directConfigSchema = z.object({
44
- PXE_URL: z.string().url('PXE_URL must be a valid URL'),
45
- NODE_URL: z.string().url('NODE_URL must be a valid URL'),
46
- ETHEREUM_HOSTS: ethereumHostsSchema,
47
- K8S: z.literal('false')
14
+ const testConfigSchema = z.object({
15
+ NAMESPACE: z.string().default('scenario'),
16
+ REAL_VERIFIER: schemas.Boolean.optional().default(true),
17
+ CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
18
+ L1_RPC_URLS_JSON: z.string().optional(),
19
+ L1_ACCOUNT_MNEMONIC: z.string().optional(),
20
+ AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
21
+ AZTEC_EPOCH_DURATION: z.coerce.number().optional().default(32),
22
+ AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5)
48
23
  });
49
- const envSchema = z.discriminatedUnion('K8S', [
50
- k8sLocalConfigSchema,
51
- k8sGCloudConfigSchema,
52
- directConfigSchema
53
- ]);
54
- export function isK8sConfig(config) {
55
- return config.K8S === 'local' || config.K8S === 'gcloud';
56
- }
57
- export function isGCloudConfig(config) {
58
- return config.K8S === 'gcloud';
59
- }
60
24
  export function setupEnvironment(env) {
61
- const config = envSchema.parse(env);
62
- if (isGCloudConfig(config)) {
63
- const command = `gcloud container clusters get-credentials ${config.CLUSTER_NAME} --region=${config.REGION}`;
64
- execSync(command);
65
- }
25
+ const config = testConfigSchema.parse(env);
26
+ logger.warn(`Loaded env config`, config);
66
27
  return config;
67
28
  }
29
+ /**
30
+ * @param path - The path to the script, relative to the project root
31
+ * @param args - The arguments to pass to the script
32
+ * @param logger - The logger to use
33
+ * @returns The exit code of the script
34
+ */ function runScript(path, args, logger, env) {
35
+ const childProcess = spawn(path, args, {
36
+ stdio: [
37
+ 'ignore',
38
+ 'pipe',
39
+ 'pipe'
40
+ ],
41
+ env: env ? {
42
+ ...process.env,
43
+ ...env
44
+ } : process.env
45
+ });
46
+ return new Promise((resolve, reject)=>{
47
+ childProcess.on('close', (code)=>resolve(code ?? 0));
48
+ childProcess.on('error', reject);
49
+ childProcess.stdout?.on('data', (data)=>{
50
+ logger.info(data.toString());
51
+ });
52
+ childProcess.stderr?.on('data', (data)=>{
53
+ logger.error(data.toString());
54
+ });
55
+ });
56
+ }
57
+ export function getAztecBin() {
58
+ return path.join(getGitProjectRoot(), 'yarn-project/aztec/dest/bin/index.js');
59
+ }
60
+ /**
61
+ * Runs the Aztec binary
62
+ * @param args - The arguments to pass to the Aztec binary
63
+ * @param logger - The logger to use
64
+ * @param env - Optional environment variables to set for the process
65
+ * @returns The exit code of the Aztec binary
66
+ */ export function runAztecBin(args, logger, env) {
67
+ return runScript('node', [
68
+ getAztecBin(),
69
+ ...args
70
+ ], logger, env);
71
+ }
72
+ export function runProjectScript(script, args, logger, env) {
73
+ const scriptPath = script.startsWith('/') ? script : path.join(getGitProjectRoot(), script);
74
+ return runScript(scriptPath, args, logger, env);
75
+ }
68
76
  export async function startPortForward({ resource, namespace, containerPort, hostPort }) {
69
77
  const hostPortAsString = hostPort ? hostPort.toString() : '';
70
- logger.info(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
71
- const process = spawn('kubectl', [
78
+ logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
79
+ const process1 = spawn('kubectl', [
72
80
  'port-forward',
73
81
  '-n',
74
82
  namespace,
@@ -85,61 +93,106 @@ export async function startPortForward({ resource, namespace, containerPort, hos
85
93
  });
86
94
  let isResolved = false;
87
95
  const connected = new Promise((resolve)=>{
88
- process.stdout?.on('data', (data)=>{
96
+ process1.stdout?.on('data', (data)=>{
89
97
  const str = data.toString();
90
98
  if (!isResolved && str.includes('Forwarding from')) {
91
99
  isResolved = true;
92
- logger.info(str);
100
+ logger.debug(`Port forward for ${resource}: ${str}`);
93
101
  const port = str.search(/:\d+/);
94
102
  if (port === -1) {
95
103
  throw new Error('Port not found in port forward output');
96
104
  }
97
105
  const portNumber = parseInt(str.slice(port + 1));
98
- logger.info(`Port forward connected: ${portNumber}`);
99
- logger.info(`Port forward connected: ${portNumber}`);
106
+ logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
100
107
  resolve(portNumber);
101
108
  } else {
102
109
  logger.silent(str);
103
110
  }
104
111
  });
105
- process.stderr?.on('data', (data)=>{
106
- logger.info(data.toString());
112
+ process1.stderr?.on('data', (data)=>{
113
+ logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
107
114
  // It's a strange thing:
108
115
  // If we don't pipe stderr, then the port forwarding does not work.
109
116
  // Log to silent because this doesn't actually report errors,
110
117
  // just extremely verbose debug logs.
111
118
  logger.silent(data.toString());
112
119
  });
113
- process.on('close', ()=>{
120
+ process1.on('close', ()=>{
114
121
  if (!isResolved) {
115
122
  isResolved = true;
116
- logger.warn('Port forward closed before connection established');
123
+ logger.warn(`Port forward for ${resource} closed before connection established`);
117
124
  resolve(0);
118
125
  }
119
126
  });
120
- process.on('error', (error)=>{
121
- logger.error(`Port forward error: ${error}`);
127
+ process1.on('error', (error)=>{
128
+ logger.error(`Port forward for ${resource} error: ${error}`);
122
129
  resolve(0);
123
130
  });
124
- process.on('exit', (code)=>{
125
- logger.info(`Port forward exited with code ${code}`);
131
+ process1.on('exit', (code)=>{
132
+ logger.verbose(`Port forward for ${resource} exited with code ${code}`);
126
133
  resolve(0);
127
134
  });
128
135
  });
129
136
  const port = await connected;
130
137
  return {
131
- process,
138
+ process: process1,
132
139
  port
133
140
  };
134
141
  }
142
+ export function getExternalIP(namespace, serviceName) {
143
+ const { promise, resolve, reject } = promiseWithResolvers();
144
+ const process1 = spawn('kubectl', [
145
+ 'get',
146
+ 'service',
147
+ '-n',
148
+ namespace,
149
+ `${namespace}-${serviceName}`,
150
+ '--output',
151
+ "jsonpath='{.status.loadBalancer.ingress[0].ip}'"
152
+ ], {
153
+ stdio: 'pipe'
154
+ });
155
+ let ip = '';
156
+ process1.stdout.on('data', (data)=>{
157
+ ip += data;
158
+ });
159
+ process1.on('error', (err)=>{
160
+ reject(err);
161
+ });
162
+ process1.on('exit', ()=>{
163
+ // kubectl prints JSON. Remove the quotes
164
+ resolve(ip.replace(/"|'/g, ''));
165
+ });
166
+ return promise;
167
+ }
168
+ export function startPortForwardForRPC(namespace, index = 0) {
169
+ return startPortForward({
170
+ resource: `pod/${namespace}-rpc-aztec-node-${index}`,
171
+ namespace,
172
+ containerPort: 8080
173
+ });
174
+ }
175
+ export function startPortForwardForEthereum(namespace) {
176
+ return startPortForward({
177
+ resource: `services/${namespace}-eth-execution`,
178
+ namespace,
179
+ containerPort: 8545
180
+ });
181
+ }
135
182
  export async function deleteResourceByName({ resource, namespace, name, force = false }) {
136
183
  const command = `kubectl delete ${resource} ${name} -n ${namespace} --ignore-not-found=true --wait=true ${force ? '--force' : ''}`;
137
184
  logger.info(`command: ${command}`);
138
185
  const { stdout } = await execAsync(command);
139
186
  return stdout;
140
187
  }
141
- export async function deleteResourceByLabel({ resource, namespace, label }) {
142
- const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true`;
188
+ export async function deleteResourceByLabel({ resource, namespace, label, timeout = '5m', force = false }) {
189
+ try {
190
+ await execAsync(`kubectl api-resources --no-headers -o name | grep -Eq "^${resource}(\\\\..+)?$"`);
191
+ } catch (error) {
192
+ logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
193
+ return '';
194
+ }
195
+ const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${force ? '--force' : ''}`;
143
196
  logger.info(`command: ${command}`);
144
197
  const { stdout } = await execAsync(command);
145
198
  return stdout;
@@ -153,8 +206,12 @@ export async function waitForResourceByLabel({ resource, label, namespace, condi
153
206
  export function getChartDir(spartanDir, chartName) {
154
207
  return path.join(spartanDir.trim(), chartName);
155
208
  }
209
+ function shellQuote(value) {
210
+ // Single-quote safe shell escaping: ' -> '\''
211
+ return `'${value.replace(/'/g, "'\\''")}'`;
212
+ }
156
213
  function valuesToArgs(values) {
157
- return Object.entries(values).map(([key, value])=>`--set ${key}=${value}`).join(' ');
214
+ return Object.entries(values).map(([key, value])=>typeof value === 'number' || typeof value === 'boolean' ? `--set ${key}=${value}` : `--set-string ${key}=${shellQuote(String(value))}`).join(' ');
158
215
  }
159
216
  function createHelmCommand({ instanceName, helmChartDir, namespace, valuesFile, timeout, values, reuseValues = false }) {
160
217
  const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
@@ -167,6 +224,57 @@ async function execHelmCommand(args) {
167
224
  const { stdout } = await execAsync(helmCommand);
168
225
  return stdout;
169
226
  }
227
+ async function getHelmReleaseStatus(instanceName, namespace) {
228
+ try {
229
+ const { stdout } = await execAsync(`helm list --namespace ${namespace} --all --filter '^${instanceName}$' --output json | cat`);
230
+ const parsed = JSON.parse(stdout);
231
+ const row = parsed.find((r)=>r.name === instanceName);
232
+ return row?.status;
233
+ } catch {
234
+ return undefined;
235
+ }
236
+ }
237
+ async function forceDeleteHelmReleaseRecord(instanceName, namespace, logger) {
238
+ const labelSelector = `owner=helm,name=${instanceName}`;
239
+ const cmd = `kubectl delete secret -n ${namespace} -l ${labelSelector} --ignore-not-found=true`;
240
+ logger.warn(`Force deleting Helm release record: ${cmd}`);
241
+ await execAsync(cmd).catch(()=>undefined);
242
+ }
243
+ async function hasDeployedHelmRelease(instanceName, namespace) {
244
+ try {
245
+ const status = await getHelmReleaseStatus(instanceName, namespace);
246
+ return status?.toLowerCase() === 'deployed';
247
+ } catch {
248
+ return false;
249
+ }
250
+ }
251
+ export async function uninstallChaosMesh(instanceName, namespace, logger) {
252
+ // uninstall the helm chart if it exists
253
+ logger.info(`Uninstalling helm chart ${instanceName}`);
254
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
255
+ // and delete the chaos-mesh resources created by this release
256
+ const deleteByLabel = async (resource)=>{
257
+ const args = {
258
+ resource,
259
+ namespace: namespace,
260
+ label: `app.kubernetes.io/instance=${instanceName}`
261
+ };
262
+ logger.info(`Deleting ${resource} resources for release ${instanceName}`);
263
+ await deleteResourceByLabel(args).catch((e)=>{
264
+ logger.error(`Error deleting ${resource}: ${e}`);
265
+ logger.info(`Force deleting ${resource}`);
266
+ return deleteResourceByLabel({
267
+ ...args,
268
+ force: true
269
+ });
270
+ });
271
+ };
272
+ await deleteByLabel('podchaos');
273
+ await deleteByLabel('networkchaos');
274
+ await deleteByLabel('podnetworkchaos');
275
+ await deleteByLabel('workflows');
276
+ await deleteByLabel('workflownodes');
277
+ }
170
278
  /**
171
279
  * Installs a Helm chart with the given parameters.
172
280
  * @param instanceName - The name of the Helm chart instance.
@@ -183,31 +291,14 @@ async function execHelmCommand(args) {
183
291
  * const stdout = await installChaosMeshChart({ instanceName: 'force-reorg', targetNamespace: 'smoke', valuesFile: 'prover-failure.yaml'});
184
292
  * console.log(stdout);
185
293
  * ```
186
- */ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir, chaosMeshNamespace = 'chaos-mesh', timeout = '5m', clean = true, values = {}, logger }) {
294
+ */ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir, timeout = '10m', clean = true, values = {}, logger }) {
187
295
  if (clean) {
188
- // uninstall the helm chart if it exists
189
- logger.info(`Uninstalling helm chart ${instanceName}`);
190
- await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
191
- // and delete the podchaos resource
192
- const deleteArgs = {
193
- resource: 'podchaos',
194
- namespace: chaosMeshNamespace,
195
- name: `${targetNamespace}-${instanceName}`
196
- };
197
- logger.info(`Deleting podchaos resource`);
198
- await deleteResourceByName(deleteArgs).catch((e)=>{
199
- logger.error(`Error deleting podchaos resource: ${e}`);
200
- logger.info(`Force deleting podchaos resource`);
201
- return deleteResourceByName({
202
- ...deleteArgs,
203
- force: true
204
- });
205
- });
296
+ await uninstallChaosMesh(instanceName, targetNamespace, logger);
206
297
  }
207
298
  return execHelmCommand({
208
299
  instanceName,
209
300
  helmChartDir,
210
- namespace: chaosMeshNamespace,
301
+ namespace: targetNamespace,
211
302
  valuesFile,
212
303
  timeout,
213
304
  values: {
@@ -228,69 +319,74 @@ export function applyProverFailure({ namespace, spartanDir, durationSeconds, log
228
319
  logger
229
320
  });
230
321
  }
231
- export function applyProverKill({ namespace, spartanDir, logger }) {
322
+ export function applyProverKill({ namespace, spartanDir, logger, values }) {
232
323
  return installChaosMeshChart({
233
324
  instanceName: 'prover-kill',
234
325
  targetNamespace: namespace,
235
326
  valuesFile: 'prover-kill.yaml',
236
327
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
328
+ chaosMeshNamespace: namespace,
237
329
  clean: true,
238
- logger
330
+ logger,
331
+ values
239
332
  });
240
333
  }
241
- export function applyProverBrokerKill({ namespace, spartanDir, logger }) {
334
+ export function applyProverBrokerKill({ namespace, spartanDir, logger, values }) {
242
335
  return installChaosMeshChart({
243
336
  instanceName: 'prover-broker-kill',
244
337
  targetNamespace: namespace,
245
338
  valuesFile: 'prover-broker-kill.yaml',
246
339
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
247
340
  clean: true,
248
- logger
341
+ logger,
342
+ values
249
343
  });
250
344
  }
251
- export function applyBootNodeFailure({ namespace, spartanDir, durationSeconds, logger }) {
345
+ export function applyBootNodeFailure({ instanceName = 'boot-node-failure', namespace, spartanDir, durationSeconds, logger, values }) {
252
346
  return installChaosMeshChart({
253
- instanceName: 'boot-node-failure',
347
+ instanceName,
254
348
  targetNamespace: namespace,
255
349
  valuesFile: 'boot-node-failure.yaml',
256
350
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
257
351
  values: {
258
- 'bootNodeFailure.duration': `${durationSeconds}s`
352
+ 'bootNodeFailure.duration': `${durationSeconds}s`,
353
+ ...values ?? {}
259
354
  },
260
355
  logger
261
356
  });
262
357
  }
263
- export function applyValidatorKill({ namespace, spartanDir, logger }) {
358
+ export function applyValidatorKill({ instanceName = 'validator-kill', namespace, spartanDir, logger, values }) {
264
359
  return installChaosMeshChart({
265
- instanceName: 'validator-kill',
360
+ instanceName,
266
361
  targetNamespace: namespace,
267
362
  valuesFile: 'validator-kill.yaml',
268
363
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
269
- logger
364
+ logger,
365
+ values
270
366
  });
271
367
  }
272
- export function applyNetworkShaping({ valuesFile, namespace, spartanDir, logger }) {
368
+ export function applyNetworkShaping({ instanceName = 'network-shaping', valuesFile, namespace, spartanDir, logger }) {
273
369
  return installChaosMeshChart({
274
- instanceName: 'network-shaping',
370
+ instanceName,
275
371
  targetNamespace: namespace,
276
372
  valuesFile,
277
373
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
278
374
  logger
279
375
  });
280
376
  }
281
- export async function awaitL2BlockNumber(rollupCheatCodes, blockNumber, timeoutSeconds, logger) {
282
- logger.info(`Waiting for L2 Block ${blockNumber}`);
377
+ export async function awaitCheckpointNumber(rollupCheatCodes, checkpointNumber, timeoutSeconds, logger) {
378
+ logger.info(`Waiting for checkpoint ${checkpointNumber}`);
283
379
  let tips = await rollupCheatCodes.getTips();
284
380
  const endTime = Date.now() + timeoutSeconds * 1000;
285
- while(tips.pending < blockNumber && Date.now() < endTime){
286
- logger.info(`At L2 Block ${tips.pending}`);
381
+ while(tips.pending < checkpointNumber && Date.now() < endTime){
382
+ logger.info(`At checkpoint ${tips.pending}`);
287
383
  await sleep(1000);
288
384
  tips = await rollupCheatCodes.getTips();
289
385
  }
290
- if (tips.pending < blockNumber) {
291
- throw new Error(`Timeout waiting for L2 Block ${blockNumber}, only reached ${tips.pending}`);
386
+ if (tips.pending < checkpointNumber) {
387
+ throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
292
388
  } else {
293
- logger.info(`Reached L2 Block ${tips.pending}`);
389
+ logger.info(`Reached checkpoint ${tips.pending}`);
294
390
  }
295
391
  }
296
392
  export async function restartBot(namespace, logger) {
@@ -298,16 +394,212 @@ export async function restartBot(namespace, logger) {
298
394
  await deleteResourceByLabel({
299
395
  resource: 'pods',
300
396
  namespace,
301
- label: 'app=bot'
397
+ label: 'app.kubernetes.io/name=bot'
302
398
  });
303
399
  await sleep(10 * 1000);
400
+ // Some bot images may take time to report Ready due to heavy boot-time proving.
401
+ // Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
304
402
  await waitForResourceByLabel({
305
403
  resource: 'pods',
306
404
  namespace,
307
- label: 'app=bot'
405
+ label: 'app.kubernetes.io/name=bot',
406
+ condition: 'PodReadyToStartContainers'
308
407
  });
309
408
  logger.info(`Bot restarted`);
310
409
  }
410
+ /**
411
+ * Installs or upgrades the transfer bot Helm release for the given namespace.
412
+ * Intended for test setup to enable L2 traffic generation only when needed.
413
+ */ export async function installTransferBot({ namespace, spartanDir, logger, replicas = 1, txIntervalSeconds = 10, followChain = 'PENDING', mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk', mnemonicStartIndex, botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01', nodeUrl, timeout = '15m', reuseValues = true, aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12) }) {
414
+ const instanceName = `${namespace}-bot-transfers`;
415
+ const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
416
+ const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
417
+ logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
418
+ const values = {
419
+ 'bot.replicaCount': replicas,
420
+ 'bot.txIntervalSeconds': txIntervalSeconds,
421
+ 'bot.followChain': followChain,
422
+ 'bot.botPrivateKey': botPrivateKey,
423
+ 'bot.nodeUrl': resolvedNodeUrl,
424
+ 'bot.mnemonic': mnemonic,
425
+ 'bot.feePaymentMethod': 'fee_juice',
426
+ 'aztec.slotDuration': aztecSlotDuration,
427
+ // Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
428
+ // Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
429
+ 'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
430
+ // Provide L1 execution RPC for bridging fee juice
431
+ 'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
432
+ // Provide L1 mnemonic for bridging (falls back to labs mnemonic)
433
+ 'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
434
+ // The bot does not need Kubernetes API access. Disable RBAC + ServiceAccount creation so the chart
435
+ // can be installed by users without cluster-scoped RBAC permissions.
436
+ 'bot.rbac.create': false,
437
+ 'bot.serviceAccount.create': false,
438
+ 'bot.serviceAccount.name': 'default'
439
+ };
440
+ // Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
441
+ if (mnemonicStartIndex === undefined) {
442
+ values['bot.mnemonicStartIndex'] = 0;
443
+ }
444
+ // Also pass a funded private key directly if available
445
+ if (process.env.FUNDING_PRIVATE_KEY) {
446
+ values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
447
+ }
448
+ // Align bot image with the running network image: prefer env var, else detect from a validator pod
449
+ let repositoryFromEnv;
450
+ let tagFromEnv;
451
+ const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
452
+ if (aztecDockerImage && aztecDockerImage.includes(':')) {
453
+ const lastColon = aztecDockerImage.lastIndexOf(':');
454
+ repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
455
+ tagFromEnv = aztecDockerImage.slice(lastColon + 1);
456
+ }
457
+ let repository = repositoryFromEnv;
458
+ let tag = tagFromEnv;
459
+ if (!repository || !tag) {
460
+ try {
461
+ const { stdout } = await execAsync(`kubectl get pods -l app.kubernetes.io/name=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`);
462
+ const image = stdout.trim().replace(/^'|'$/g, '');
463
+ if (image && image.includes(':')) {
464
+ const lastColon = image.lastIndexOf(':');
465
+ repository = image.slice(0, lastColon);
466
+ tag = image.slice(lastColon + 1);
467
+ }
468
+ } catch (err) {
469
+ logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
470
+ }
471
+ }
472
+ if (repository && tag) {
473
+ values['global.aztecImage.repository'] = repository;
474
+ values['global.aztecImage.tag'] = tag;
475
+ }
476
+ if (mnemonicStartIndex !== undefined) {
477
+ values['bot.mnemonicStartIndex'] = typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
478
+ }
479
+ // If a previous install attempt left the release in a non-deployed state (e.g. FAILED),
480
+ // `helm upgrade --install` can error with "has no deployed releases".
481
+ // In that case, clear the release record and do a clean install.
482
+ const existingStatus = await getHelmReleaseStatus(instanceName, namespace);
483
+ if (existingStatus && existingStatus.toLowerCase() !== 'deployed') {
484
+ logger.warn(`Transfer bot release ${instanceName} is in status '${existingStatus}'. Reinstalling cleanly.`);
485
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`).catch(()=>undefined);
486
+ // If helm left the release in `uninstalling`, force-delete the record so we can reinstall.
487
+ const afterUninstallStatus = await getHelmReleaseStatus(instanceName, namespace);
488
+ if (afterUninstallStatus?.toLowerCase() === 'uninstalling') {
489
+ await forceDeleteHelmReleaseRecord(instanceName, namespace, logger);
490
+ }
491
+ }
492
+ // `--reuse-values` fails if the release has never successfully deployed (e.g. first install, or a previous failed install).
493
+ // Only reuse values when we have a deployed release to reuse from.
494
+ const effectiveReuseValues = reuseValues && await hasDeployedHelmRelease(instanceName, namespace);
495
+ await execHelmCommand({
496
+ instanceName,
497
+ helmChartDir,
498
+ namespace,
499
+ valuesFile: undefined,
500
+ timeout,
501
+ values: values,
502
+ reuseValues: effectiveReuseValues
503
+ });
504
+ if (replicas > 0) {
505
+ await waitForResourceByLabel({
506
+ resource: 'pods',
507
+ namespace,
508
+ label: 'app.kubernetes.io/name=bot',
509
+ condition: 'PodReadyToStartContainers'
510
+ });
511
+ }
512
+ }
513
+ /**
514
+ * Uninstalls the transfer bot Helm release from the given namespace.
515
+ * Intended for test teardown to clean up bot resources.
516
+ */ export async function uninstallTransferBot(namespace, logger) {
517
+ const instanceName = `${namespace}-bot-transfers`;
518
+ logger.info(`Uninstalling transfer bot release ${instanceName}`);
519
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
520
+ // Ensure any leftover pods are removed
521
+ await deleteResourceByLabel({
522
+ resource: 'pods',
523
+ namespace,
524
+ label: 'app.kubernetes.io/name=bot'
525
+ }).catch(()=>undefined);
526
+ }
527
+ /**
528
+ * Enables or disables probabilistic transaction dropping on validators and waits for rollout.
529
+ * Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
530
+ */ export async function setValidatorTxDrop({ namespace, enabled, probability, logger }) {
531
+ const drop = enabled ? 'true' : 'false';
532
+ const prob = String(probability);
533
+ const selectors = [
534
+ 'app.kubernetes.io/name=validator',
535
+ 'app.kubernetes.io/component=validator',
536
+ 'app=validator'
537
+ ];
538
+ let updated = false;
539
+ for (const selector of selectors){
540
+ try {
541
+ const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
542
+ const names = list.stdout.split('\n').map((s)=>s.trim()).filter(Boolean);
543
+ if (names.length === 0) {
544
+ continue;
545
+ }
546
+ const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
547
+ logger.info(`command: ${cmd}`);
548
+ await execAsync(cmd);
549
+ updated = true;
550
+ } catch (e) {
551
+ logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
552
+ }
553
+ }
554
+ if (!updated) {
555
+ logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
556
+ return;
557
+ }
558
+ // Restart validator pods to ensure env vars take effect and wait for readiness
559
+ await restartValidators(namespace, logger);
560
+ }
561
+ export async function restartValidators(namespace, logger) {
562
+ const selectors = [
563
+ 'app.kubernetes.io/name=validator',
564
+ 'app.kubernetes.io/component=validator',
565
+ 'app=validator'
566
+ ];
567
+ let any = false;
568
+ for (const selector of selectors){
569
+ try {
570
+ const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
571
+ if (!stdout || stdout.trim().length === 0) {
572
+ continue;
573
+ }
574
+ any = true;
575
+ await deleteResourceByLabel({
576
+ resource: 'pods',
577
+ namespace,
578
+ label: selector
579
+ });
580
+ } catch (e) {
581
+ logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
582
+ }
583
+ }
584
+ if (!any) {
585
+ logger.warn(`No validator pods found to restart in ${namespace}.`);
586
+ return;
587
+ }
588
+ // Wait for either label to be Ready
589
+ for (const selector of selectors){
590
+ try {
591
+ await waitForResourceByLabel({
592
+ resource: 'pods',
593
+ namespace,
594
+ label: selector
595
+ });
596
+ return;
597
+ } catch {
598
+ // try next
599
+ }
600
+ }
601
+ logger.warn(`Validator pods did not report Ready; continuing.`);
602
+ }
311
603
  export async function enableValidatorDynamicBootNode(instanceName, namespace, spartanDir, logger) {
312
604
  logger.info(`Enabling validator dynamic boot node`);
313
605
  await execHelmCommand({
@@ -323,55 +615,127 @@ export async function enableValidatorDynamicBootNode(instanceName, namespace, sp
323
615
  });
324
616
  logger.info(`Validator dynamic boot node enabled`);
325
617
  }
326
- export async function runAlertCheck(config, alerts, logger) {
327
- if (isK8sConfig(config)) {
328
- const { process, port } = await startPortForward({
329
- resource: `svc/metrics-grafana`,
330
- namespace: 'metrics',
331
- containerPort: config.CONTAINER_METRICS_PORT
332
- });
333
- const alertChecker = new AlertChecker(logger, {
334
- grafanaEndpoint: `http://localhost:${port}${config.METRICS_API_PATH}`,
335
- grafanaCredentials: `admin:${config.GRAFANA_PASSWORD}`
336
- });
337
- await alertChecker.runAlertCheck(alerts);
338
- process.kill();
339
- } else {
340
- logger.info('Not running alert check in non-k8s environment');
618
+ export async function getSequencers(namespace) {
619
+ const selectors = [
620
+ 'app.kubernetes.io/name=validator',
621
+ 'app.kubernetes.io/component=validator',
622
+ 'app.kubernetes.io/component=sequencer-node',
623
+ 'app=validator'
624
+ ];
625
+ for (const selector of selectors){
626
+ try {
627
+ const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
628
+ const { stdout } = await execAsync(command);
629
+ const sequencers = stdout.split(' ').map((s)=>s.trim()).filter(Boolean);
630
+ if (sequencers.length > 0) {
631
+ logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
632
+ return sequencers;
633
+ }
634
+ } catch {
635
+ // try next selector
636
+ }
341
637
  }
638
+ // Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
639
+ throw new Error(`No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`);
342
640
  }
343
- export async function updateSequencerConfig(url, config) {
344
- const node = createAztecNodeClient(url);
345
- await node.setConfig(config);
641
+ export function updateSequencersConfig(env, config) {
642
+ return withSequencersAdmin(env, async (client)=>{
643
+ await client.setConfig(config);
644
+ return client.getConfig();
645
+ });
346
646
  }
347
- export async function getSequencers(namespace) {
348
- const command = `kubectl get pods -l app=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
349
- const { stdout } = await execAsync(command);
350
- return stdout.split(' ');
647
+ export function getSequencersConfig(env) {
648
+ return withSequencersAdmin(env, (client)=>client.getConfig());
351
649
  }
352
- export async function updateK8sSequencersConfig(args) {
353
- const { containerPort, namespace, config } = args;
650
+ export async function withSequencersAdmin(env, fn) {
651
+ const adminContainerPort = 8880;
652
+ const namespace = env.NAMESPACE;
354
653
  const sequencers = await getSequencers(namespace);
654
+ const results = [];
355
655
  for (const sequencer of sequencers){
356
- const { process, port } = await startPortForward({
656
+ const { process: process1, port } = await startPortForward({
357
657
  resource: `pod/${sequencer}`,
358
658
  namespace,
359
- containerPort
659
+ containerPort: adminContainerPort
360
660
  });
361
661
  const url = `http://localhost:${port}`;
362
- await updateSequencerConfig(url, config);
363
- process.kill();
662
+ await retry(()=>fetch(`${url}/status`).then((res)=>res.status === 200), 'forward node admin port', makeBackoff([
663
+ 1,
664
+ 1,
665
+ 2,
666
+ 6
667
+ ]), logger, true);
668
+ const client = createAztecNodeAdminClient(url);
669
+ results.push(await fn(client));
670
+ process1.kill();
364
671
  }
672
+ return results;
365
673
  }
366
- export async function updateSequencersConfig(env, config) {
367
- if (isK8sConfig(env)) {
368
- await updateK8sSequencersConfig({
369
- containerPort: env.CONTAINER_NODE_PORT,
370
- namespace: env.NAMESPACE,
371
- config
674
+ /**
675
+ * Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
676
+ * it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
677
+ */ export async function getPublicViemClient(env, /** If set, will push the new process into it */ processes) {
678
+ const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
679
+ if (CREATE_ETH_DEVNET) {
680
+ logger.info(`Creating port forward to eth execution node`);
681
+ const { process: process1, port } = await startPortForward({
682
+ resource: `svc/${NAMESPACE}-eth-execution`,
683
+ namespace: NAMESPACE,
684
+ containerPort: 8545
372
685
  });
686
+ const url = `http://127.0.0.1:${port}`;
687
+ const client = createPublicClient({
688
+ transport: fallback([
689
+ http(url, {
690
+ batch: false
691
+ })
692
+ ])
693
+ });
694
+ if (processes) {
695
+ processes.push(process1);
696
+ }
697
+ return {
698
+ url,
699
+ client,
700
+ process: process1
701
+ };
373
702
  } else {
374
- await updateSequencerConfig(env.NODE_URL, config);
703
+ logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
704
+ if (!L1_RPC_URLS_JSON) {
705
+ throw new Error(`L1_RPC_URLS_JSON is not defined`);
706
+ }
707
+ const client = createPublicClient({
708
+ transport: fallback([
709
+ http(L1_RPC_URLS_JSON, {
710
+ batch: false
711
+ })
712
+ ])
713
+ });
714
+ return {
715
+ url: L1_RPC_URLS_JSON,
716
+ client
717
+ };
718
+ }
719
+ }
720
+ /** Queries an Aztec node for the L1 deployment addresses */ export async function getL1DeploymentAddresses(env) {
721
+ let forwardProcess;
722
+ try {
723
+ const [sequencer] = await getSequencers(env.NAMESPACE);
724
+ const { process: process1, port } = await startPortForward({
725
+ resource: `pod/${sequencer}`,
726
+ namespace: env.NAMESPACE,
727
+ containerPort: 8080
728
+ });
729
+ forwardProcess = process1;
730
+ const url = `http://127.0.0.1:${port}`;
731
+ const node = createAztecNodeClient(url);
732
+ return await retry(()=>node.getNodeInfo().then((i)=>i.l1ContractAddresses), 'get node info', makeBackoff([
733
+ 1,
734
+ 3,
735
+ 6
736
+ ]), logger);
737
+ } finally{
738
+ forwardProcess?.kill();
375
739
  }
376
740
  }
377
741
  /**
@@ -443,3 +807,47 @@ export async function updateSequencersConfig(env, config) {
443
807
  label: 'app=pxe'
444
808
  });
445
809
  }
810
+ /**
811
+ * Returns the absolute path to the git repository root
812
+ */ export function getGitProjectRoot() {
813
+ try {
814
+ const rootDir = execSync('git rev-parse --show-toplevel', {
815
+ encoding: 'utf-8',
816
+ stdio: [
817
+ 'ignore',
818
+ 'pipe',
819
+ 'ignore'
820
+ ]
821
+ }).trim();
822
+ return rootDir;
823
+ } catch (error) {
824
+ throw new Error(`Failed to determine git project root: ${error}`);
825
+ }
826
+ }
827
+ /** Returns a client to the RPC of the given sequencer (defaults to first) */ export async function getNodeClient(env, index = 0) {
828
+ const namespace = env.NAMESPACE;
829
+ const containerPort = 8080;
830
+ const sequencers = await getSequencers(namespace);
831
+ const sequencer = sequencers[index];
832
+ if (!sequencer) {
833
+ throw new Error(`No sequencer found at index ${index} in namespace ${namespace}`);
834
+ }
835
+ const { process: process1, port } = await startPortForward({
836
+ resource: `pod/${sequencer}`,
837
+ namespace,
838
+ containerPort
839
+ });
840
+ const url = `http://localhost:${port}`;
841
+ await retry(()=>fetch(`${url}/status`).then((res)=>res.status === 200), 'forward port', makeBackoff([
842
+ 1,
843
+ 1,
844
+ 2,
845
+ 6
846
+ ]), logger, true);
847
+ const client = createAztecNodeClient(url);
848
+ return {
849
+ node: client,
850
+ port,
851
+ process: process1
852
+ };
853
+ }