@aztec/end-to-end 0.0.0-test.1 → 0.0.1-commit.1142ef1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (186) hide show
  1. package/dest/bench/client_flows/benchmark.d.ts +61 -0
  2. package/dest/bench/client_flows/benchmark.d.ts.map +1 -0
  3. package/dest/bench/client_flows/benchmark.js +261 -0
  4. package/dest/bench/client_flows/client_flows_benchmark.d.ts +80 -0
  5. package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -0
  6. package/dest/bench/client_flows/client_flows_benchmark.js +336 -0
  7. package/dest/bench/client_flows/config.d.ts +14 -0
  8. package/dest/bench/client_flows/config.d.ts.map +1 -0
  9. package/dest/bench/client_flows/config.js +106 -0
  10. package/dest/bench/client_flows/data_extractor.d.ts +2 -0
  11. package/dest/bench/client_flows/data_extractor.d.ts.map +1 -0
  12. package/dest/bench/client_flows/data_extractor.js +79 -0
  13. package/dest/bench/utils.d.ts +14 -40
  14. package/dest/bench/utils.d.ts.map +1 -1
  15. package/dest/bench/utils.js +37 -70
  16. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +21 -13
  17. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
  18. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +85 -57
  19. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +29 -28
  20. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
  21. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +79 -82
  22. package/dest/e2e_deploy_contract/deploy_test.d.ts +16 -8
  23. package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
  24. package/dest/e2e_deploy_contract/deploy_test.js +13 -19
  25. package/dest/e2e_epochs/epochs_test.d.ts +65 -22
  26. package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
  27. package/dest/e2e_epochs/epochs_test.js +233 -49
  28. package/dest/e2e_fees/bridging_race.notest.d.ts +2 -0
  29. package/dest/e2e_fees/bridging_race.notest.d.ts.map +1 -0
  30. package/dest/e2e_fees/bridging_race.notest.js +63 -0
  31. package/dest/e2e_fees/fees_test.d.ts +27 -12
  32. package/dest/e2e_fees/fees_test.d.ts.map +1 -1
  33. package/dest/e2e_fees/fees_test.js +107 -110
  34. package/dest/e2e_l1_publisher/write_json.d.ts +10 -0
  35. package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -0
  36. package/dest/e2e_l1_publisher/write_json.js +55 -0
  37. package/dest/e2e_multi_validator/utils.d.ts +12 -0
  38. package/dest/e2e_multi_validator/utils.d.ts.map +1 -0
  39. package/dest/e2e_multi_validator/utils.js +214 -0
  40. package/dest/e2e_nested_contract/nested_contract_test.d.ts +10 -7
  41. package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
  42. package/dest/e2e_nested_contract/nested_contract_test.js +24 -20
  43. package/dest/e2e_p2p/inactivity_slash_test.d.ts +31 -0
  44. package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -0
  45. package/dest/e2e_p2p/inactivity_slash_test.js +136 -0
  46. package/dest/e2e_p2p/p2p_network.d.ts +276 -23
  47. package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
  48. package/dest/e2e_p2p/p2p_network.js +188 -133
  49. package/dest/e2e_p2p/shared.d.ts +43 -7
  50. package/dest/e2e_p2p/shared.d.ts.map +1 -1
  51. package/dest/e2e_p2p/shared.js +164 -19
  52. package/dest/e2e_token_contract/token_contract_test.d.ts +12 -6
  53. package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
  54. package/dest/e2e_token_contract/token_contract_test.js +50 -26
  55. package/dest/fixtures/e2e_prover_test.d.ts +61 -0
  56. package/dest/fixtures/e2e_prover_test.d.ts.map +1 -0
  57. package/dest/{e2e_prover → fixtures}/e2e_prover_test.js +108 -113
  58. package/dest/fixtures/fixtures.d.ts +6 -8
  59. package/dest/fixtures/fixtures.d.ts.map +1 -1
  60. package/dest/fixtures/fixtures.js +5 -5
  61. package/dest/fixtures/get_acvm_config.d.ts +2 -2
  62. package/dest/fixtures/get_acvm_config.d.ts.map +1 -1
  63. package/dest/fixtures/get_acvm_config.js +3 -15
  64. package/dest/fixtures/get_bb_config.d.ts +2 -2
  65. package/dest/fixtures/get_bb_config.d.ts.map +1 -1
  66. package/dest/fixtures/get_bb_config.js +10 -17
  67. package/dest/fixtures/index.d.ts +1 -1
  68. package/dest/fixtures/l1_to_l2_messaging.d.ts +11 -7
  69. package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
  70. package/dest/fixtures/l1_to_l2_messaging.js +45 -19
  71. package/dest/fixtures/logging.d.ts +1 -1
  72. package/dest/fixtures/setup_p2p_test.d.ts +15 -14
  73. package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
  74. package/dest/fixtures/setup_p2p_test.js +82 -22
  75. package/dest/fixtures/snapshot_manager.d.ts +20 -14
  76. package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
  77. package/dest/fixtures/snapshot_manager.js +147 -138
  78. package/dest/fixtures/token_utils.d.ts +10 -4
  79. package/dest/fixtures/token_utils.d.ts.map +1 -1
  80. package/dest/fixtures/token_utils.js +28 -12
  81. package/dest/fixtures/utils.d.ts +92 -54
  82. package/dest/fixtures/utils.d.ts.map +1 -1
  83. package/dest/fixtures/utils.js +452 -389
  84. package/dest/fixtures/web3signer.d.ts +5 -0
  85. package/dest/fixtures/web3signer.d.ts.map +1 -0
  86. package/dest/fixtures/web3signer.js +53 -0
  87. package/dest/fixtures/with_telemetry_utils.d.ts +2 -2
  88. package/dest/fixtures/with_telemetry_utils.d.ts.map +1 -1
  89. package/dest/fixtures/with_telemetry_utils.js +2 -2
  90. package/dest/index.d.ts +1 -1
  91. package/dest/quality_of_service/grafana_client.d.ts +41 -0
  92. package/dest/quality_of_service/grafana_client.d.ts.map +1 -0
  93. package/dest/quality_of_service/{alert_checker.js → grafana_client.js} +1 -1
  94. package/dest/shared/cross_chain_test_harness.d.ts +42 -35
  95. package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
  96. package/dest/shared/cross_chain_test_harness.js +106 -52
  97. package/dest/shared/gas_portal_test_harness.d.ts +29 -31
  98. package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
  99. package/dest/shared/gas_portal_test_harness.js +51 -30
  100. package/dest/shared/index.d.ts +2 -2
  101. package/dest/shared/index.d.ts.map +1 -1
  102. package/dest/shared/jest_setup.d.ts +1 -1
  103. package/dest/shared/jest_setup.js +1 -1
  104. package/dest/shared/submit-transactions.d.ts +6 -4
  105. package/dest/shared/submit-transactions.d.ts.map +1 -1
  106. package/dest/shared/submit-transactions.js +8 -7
  107. package/dest/shared/uniswap_l1_l2.d.ts +3 -25
  108. package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
  109. package/dest/shared/uniswap_l1_l2.js +170 -120
  110. package/dest/simulators/index.d.ts +1 -1
  111. package/dest/simulators/lending_simulator.d.ts +7 -11
  112. package/dest/simulators/lending_simulator.d.ts.map +1 -1
  113. package/dest/simulators/lending_simulator.js +16 -17
  114. package/dest/simulators/token_simulator.d.ts +6 -3
  115. package/dest/simulators/token_simulator.d.ts.map +1 -1
  116. package/dest/simulators/token_simulator.js +16 -13
  117. package/dest/spartan/setup_test_wallets.d.ts +27 -11
  118. package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
  119. package/dest/spartan/setup_test_wallets.js +202 -58
  120. package/dest/spartan/tx_metrics.d.ts +39 -0
  121. package/dest/spartan/tx_metrics.d.ts.map +1 -0
  122. package/dest/spartan/tx_metrics.js +95 -0
  123. package/dest/spartan/utils.d.ts +151 -313
  124. package/dest/spartan/utils.d.ts.map +1 -1
  125. package/dest/spartan/utils.js +598 -151
  126. package/package.json +65 -58
  127. package/src/bench/client_flows/benchmark.ts +341 -0
  128. package/src/bench/client_flows/client_flows_benchmark.ts +450 -0
  129. package/src/bench/client_flows/config.ts +61 -0
  130. package/src/bench/client_flows/data_extractor.ts +89 -0
  131. package/src/bench/utils.ts +35 -81
  132. package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +80 -77
  133. package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +103 -122
  134. package/src/e2e_deploy_contract/deploy_test.ts +24 -39
  135. package/src/e2e_epochs/epochs_test.ts +299 -65
  136. package/src/e2e_fees/bridging_race.notest.ts +80 -0
  137. package/src/e2e_fees/fees_test.ts +150 -142
  138. package/src/e2e_l1_publisher/write_json.ts +74 -0
  139. package/src/e2e_multi_validator/utils.ts +258 -0
  140. package/src/e2e_nested_contract/nested_contract_test.ts +29 -19
  141. package/src/e2e_p2p/inactivity_slash_test.ts +179 -0
  142. package/src/e2e_p2p/p2p_network.ts +274 -171
  143. package/src/e2e_p2p/shared.ts +252 -29
  144. package/src/e2e_token_contract/token_contract_test.ts +43 -39
  145. package/src/fixtures/dumps/epoch_proof_result.json +1 -1
  146. package/src/{e2e_prover → fixtures}/e2e_prover_test.ts +112 -160
  147. package/src/fixtures/fixtures.ts +5 -7
  148. package/src/fixtures/get_acvm_config.ts +4 -12
  149. package/src/fixtures/get_bb_config.ts +18 -13
  150. package/src/fixtures/l1_to_l2_messaging.ts +56 -24
  151. package/src/fixtures/setup_p2p_test.ts +127 -39
  152. package/src/fixtures/snapshot_manager.ts +189 -160
  153. package/src/fixtures/token_utils.ts +32 -15
  154. package/src/fixtures/utils.ts +556 -475
  155. package/src/fixtures/web3signer.ts +63 -0
  156. package/src/fixtures/with_telemetry_utils.ts +2 -2
  157. package/src/guides/up_quick_start.sh +7 -15
  158. package/src/quality_of_service/{alert_checker.ts → grafana_client.ts} +2 -2
  159. package/src/shared/cross_chain_test_harness.ts +113 -87
  160. package/src/shared/gas_portal_test_harness.ts +59 -50
  161. package/src/shared/index.ts +1 -1
  162. package/src/shared/jest_setup.ts +1 -1
  163. package/src/shared/submit-transactions.ts +12 -8
  164. package/src/shared/uniswap_l1_l2.ts +194 -211
  165. package/src/simulators/lending_simulator.ts +15 -16
  166. package/src/simulators/token_simulator.ts +21 -13
  167. package/src/spartan/DEVELOP.md +128 -0
  168. package/src/spartan/setup_test_wallets.ts +258 -93
  169. package/src/spartan/tx_metrics.ts +130 -0
  170. package/src/spartan/utils.ts +722 -146
  171. package/dest/e2e_prover/e2e_prover_test.d.ts +0 -56
  172. package/dest/e2e_prover/e2e_prover_test.d.ts.map +0 -1
  173. package/dest/fixtures/setup_l1_contracts.d.ts +0 -6
  174. package/dest/fixtures/setup_l1_contracts.d.ts.map +0 -1
  175. package/dest/fixtures/setup_l1_contracts.js +0 -17
  176. package/dest/quality_of_service/alert_checker.d.ts +0 -41
  177. package/dest/quality_of_service/alert_checker.d.ts.map +0 -1
  178. package/dest/sample-dapp/connect.js +0 -12
  179. package/dest/sample-dapp/contracts.js +0 -10
  180. package/dest/sample-dapp/deploy.js +0 -35
  181. package/dest/sample-dapp/index.js +0 -98
  182. package/src/fixtures/setup_l1_contracts.ts +0 -27
  183. package/src/sample-dapp/connect.mjs +0 -16
  184. package/src/sample-dapp/contracts.mjs +0 -14
  185. package/src/sample-dapp/deploy.mjs +0 -40
  186. package/src/sample-dapp/index.mjs +0 -128
@@ -1,74 +1,82 @@
1
- import { createAztecNodeClient, createLogger, sleep } from '@aztec/aztec.js';
1
+ import { createLogger } from '@aztec/aztec.js/log';
2
+ import { promiseWithResolvers } from '@aztec/foundation/promise';
3
+ import { makeBackoff, retry } from '@aztec/foundation/retry';
4
+ import { schemas } from '@aztec/foundation/schemas';
5
+ import { sleep } from '@aztec/foundation/sleep';
6
+ import { createAztecNodeAdminClient, createAztecNodeClient } from '@aztec/stdlib/interfaces/client';
2
7
  import { exec, execSync, spawn } from 'child_process';
3
8
  import path from 'path';
4
9
  import { promisify } from 'util';
10
+ import { createPublicClient, fallback, http } from 'viem';
5
11
  import { z } from 'zod';
6
- import { AlertChecker } from '../quality_of_service/alert_checker.js';
7
12
  const execAsync = promisify(exec);
8
13
  const logger = createLogger('e2e:k8s-utils');
9
- const ethereumHostsSchema = z.string().refine((str)=>str.split(',').every((url)=>{
10
- try {
11
- new URL(url.trim());
12
- return true;
13
- } catch {
14
- return false;
15
- }
16
- }), 'ETHEREUM_HOSTS must be a comma-separated list of valid URLs');
17
- const k8sLocalConfigSchema = z.object({
18
- ETHEREUM_SLOT_DURATION: z.coerce.number().min(1, 'ETHEREUM_SLOT_DURATION env variable must be set'),
19
- AZTEC_SLOT_DURATION: z.coerce.number().min(1, 'AZTEC_SLOT_DURATION env variable must be set'),
20
- AZTEC_EPOCH_DURATION: z.coerce.number().min(1, 'AZTEC_EPOCH_DURATION env variable must be set'),
21
- AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().min(1, 'AZTEC_PROOF_SUBMISSION_WINDOW env variable must be set'),
22
- INSTANCE_NAME: z.string().min(1, 'INSTANCE_NAME env variable must be set'),
23
- NAMESPACE: z.string().min(1, 'NAMESPACE env variable must be set'),
24
- CONTAINER_NODE_PORT: z.coerce.number().default(8080),
25
- CONTAINER_SEQUENCER_PORT: z.coerce.number().default(8080),
26
- CONTAINER_PROVER_NODE_PORT: z.coerce.number().default(8080),
27
- CONTAINER_PXE_PORT: z.coerce.number().default(8080),
28
- CONTAINER_ETHEREUM_PORT: z.coerce.number().default(8545),
29
- CONTAINER_METRICS_PORT: z.coerce.number().default(80),
30
- GRAFANA_PASSWORD: z.string().optional(),
31
- METRICS_API_PATH: z.string().default('/api/datasources/proxy/uid/spartan-metrics-prometheus/api/v1'),
32
- SPARTAN_DIR: z.string().min(1, 'SPARTAN_DIR env variable must be set'),
33
- ETHEREUM_HOSTS: ethereumHostsSchema.optional(),
34
- L1_ACCOUNT_MNEMONIC: z.string().default('test test test test test test test test test test test junk'),
35
- SEPOLIA_RUN: z.string().default('false'),
36
- K8S: z.literal('local')
37
- });
38
- const k8sGCloudConfigSchema = k8sLocalConfigSchema.extend({
39
- K8S: z.literal('gcloud'),
40
- CLUSTER_NAME: z.string().min(1, 'CLUSTER_NAME env variable must be set'),
41
- REGION: z.string().min(1, 'REGION env variable must be set')
42
- });
43
- const directConfigSchema = z.object({
44
- PXE_URL: z.string().url('PXE_URL must be a valid URL'),
45
- NODE_URL: z.string().url('NODE_URL must be a valid URL'),
46
- ETHEREUM_HOSTS: ethereumHostsSchema,
47
- K8S: z.literal('false')
14
+ const testConfigSchema = z.object({
15
+ NAMESPACE: z.string().default('scenario'),
16
+ REAL_VERIFIER: schemas.Boolean.optional().default(true),
17
+ CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
18
+ L1_RPC_URLS_JSON: z.string().optional(),
19
+ L1_ACCOUNT_MNEMONIC: z.string().optional(),
20
+ AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
21
+ AZTEC_EPOCH_DURATION: z.coerce.number().optional().default(32),
22
+ AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5)
48
23
  });
49
- const envSchema = z.discriminatedUnion('K8S', [
50
- k8sLocalConfigSchema,
51
- k8sGCloudConfigSchema,
52
- directConfigSchema
53
- ]);
54
- export function isK8sConfig(config) {
55
- return config.K8S === 'local' || config.K8S === 'gcloud';
56
- }
57
- export function isGCloudConfig(config) {
58
- return config.K8S === 'gcloud';
59
- }
60
24
  export function setupEnvironment(env) {
61
- const config = envSchema.parse(env);
62
- if (isGCloudConfig(config)) {
63
- const command = `gcloud container clusters get-credentials ${config.CLUSTER_NAME} --region=${config.REGION}`;
64
- execSync(command);
65
- }
25
+ const config = testConfigSchema.parse(env);
26
+ logger.warn(`Loaded env config`, config);
66
27
  return config;
67
28
  }
29
+ /**
30
+ * @param path - The path to the script, relative to the project root
31
+ * @param args - The arguments to pass to the script
32
+ * @param logger - The logger to use
33
+ * @returns The exit code of the script
34
+ */ function runScript(path, args, logger, env) {
35
+ const childProcess = spawn(path, args, {
36
+ stdio: [
37
+ 'ignore',
38
+ 'pipe',
39
+ 'pipe'
40
+ ],
41
+ env: env ? {
42
+ ...process.env,
43
+ ...env
44
+ } : process.env
45
+ });
46
+ return new Promise((resolve, reject)=>{
47
+ childProcess.on('close', (code)=>resolve(code ?? 0));
48
+ childProcess.on('error', reject);
49
+ childProcess.stdout?.on('data', (data)=>{
50
+ logger.info(data.toString());
51
+ });
52
+ childProcess.stderr?.on('data', (data)=>{
53
+ logger.error(data.toString());
54
+ });
55
+ });
56
+ }
57
+ export function getAztecBin() {
58
+ return path.join(getGitProjectRoot(), 'yarn-project/aztec/dest/bin/index.js');
59
+ }
60
+ /**
61
+ * Runs the Aztec binary
62
+ * @param args - The arguments to pass to the Aztec binary
63
+ * @param logger - The logger to use
64
+ * @param env - Optional environment variables to set for the process
65
+ * @returns The exit code of the Aztec binary
66
+ */ export function runAztecBin(args, logger, env) {
67
+ return runScript('node', [
68
+ getAztecBin(),
69
+ ...args
70
+ ], logger, env);
71
+ }
72
+ export function runProjectScript(script, args, logger, env) {
73
+ const scriptPath = script.startsWith('/') ? script : path.join(getGitProjectRoot(), script);
74
+ return runScript(scriptPath, args, logger, env);
75
+ }
68
76
  export async function startPortForward({ resource, namespace, containerPort, hostPort }) {
69
77
  const hostPortAsString = hostPort ? hostPort.toString() : '';
70
- logger.info(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
71
- const process = spawn('kubectl', [
78
+ logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
79
+ const process1 = spawn('kubectl', [
72
80
  'port-forward',
73
81
  '-n',
74
82
  namespace,
@@ -85,61 +93,109 @@ export async function startPortForward({ resource, namespace, containerPort, hos
85
93
  });
86
94
  let isResolved = false;
87
95
  const connected = new Promise((resolve)=>{
88
- process.stdout?.on('data', (data)=>{
96
+ process1.stdout?.on('data', (data)=>{
89
97
  const str = data.toString();
90
98
  if (!isResolved && str.includes('Forwarding from')) {
91
99
  isResolved = true;
92
- logger.info(str);
100
+ logger.debug(`Port forward for ${resource}: ${str}`);
93
101
  const port = str.search(/:\d+/);
94
102
  if (port === -1) {
95
103
  throw new Error('Port not found in port forward output');
96
104
  }
97
105
  const portNumber = parseInt(str.slice(port + 1));
98
- logger.info(`Port forward connected: ${portNumber}`);
99
- logger.info(`Port forward connected: ${portNumber}`);
106
+ logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
100
107
  resolve(portNumber);
101
108
  } else {
102
109
  logger.silent(str);
103
110
  }
104
111
  });
105
- process.stderr?.on('data', (data)=>{
106
- logger.info(data.toString());
112
+ process1.stderr?.on('data', (data)=>{
113
+ logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
107
114
  // It's a strange thing:
108
115
  // If we don't pipe stderr, then the port forwarding does not work.
109
116
  // Log to silent because this doesn't actually report errors,
110
117
  // just extremely verbose debug logs.
111
118
  logger.silent(data.toString());
112
119
  });
113
- process.on('close', ()=>{
120
+ process1.on('close', ()=>{
114
121
  if (!isResolved) {
115
122
  isResolved = true;
116
- logger.warn('Port forward closed before connection established');
123
+ logger.warn(`Port forward for ${resource} closed before connection established`);
117
124
  resolve(0);
118
125
  }
119
126
  });
120
- process.on('error', (error)=>{
121
- logger.error(`Port forward error: ${error}`);
127
+ process1.on('error', (error)=>{
128
+ logger.error(`Port forward for ${resource} error: ${error}`);
122
129
  resolve(0);
123
130
  });
124
- process.on('exit', (code)=>{
125
- logger.info(`Port forward exited with code ${code}`);
131
+ process1.on('exit', (code)=>{
132
+ logger.verbose(`Port forward for ${resource} exited with code ${code}`);
126
133
  resolve(0);
127
134
  });
128
135
  });
129
136
  const port = await connected;
130
137
  return {
131
- process,
138
+ process: process1,
132
139
  port
133
140
  };
134
141
  }
142
+ export function getExternalIP(namespace, serviceName) {
143
+ const { promise, resolve, reject } = promiseWithResolvers();
144
+ const process1 = spawn('kubectl', [
145
+ 'get',
146
+ 'service',
147
+ '-n',
148
+ namespace,
149
+ `${namespace}-${serviceName}`,
150
+ '--output',
151
+ "jsonpath='{.status.loadBalancer.ingress[0].ip}'"
152
+ ], {
153
+ stdio: 'pipe'
154
+ });
155
+ let ip = '';
156
+ process1.stdout.on('data', (data)=>{
157
+ ip += data;
158
+ });
159
+ process1.on('error', (err)=>{
160
+ reject(err);
161
+ });
162
+ process1.on('exit', ()=>{
163
+ // kubectl prints JSON. Remove the quotes
164
+ resolve(ip.replace(/"|'/g, ''));
165
+ });
166
+ return promise;
167
+ }
168
+ export function startPortForwardForRPC(namespace, index = 0) {
169
+ return startPortForward({
170
+ resource: `pod/${namespace}-rpc-aztec-node-${index}`,
171
+ namespace,
172
+ containerPort: 8080
173
+ });
174
+ }
175
+ export function startPortForwardForEthereum(namespace) {
176
+ return startPortForward({
177
+ resource: `services/${namespace}-eth-execution`,
178
+ namespace,
179
+ containerPort: 8545
180
+ });
181
+ }
135
182
  export async function deleteResourceByName({ resource, namespace, name, force = false }) {
136
183
  const command = `kubectl delete ${resource} ${name} -n ${namespace} --ignore-not-found=true --wait=true ${force ? '--force' : ''}`;
137
184
  logger.info(`command: ${command}`);
138
185
  const { stdout } = await execAsync(command);
139
186
  return stdout;
140
187
  }
141
- export async function deleteResourceByLabel({ resource, namespace, label }) {
142
- const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true`;
188
+ export async function deleteResourceByLabel({ resource, namespace, label, timeout = '5m', force = false }) {
189
+ try {
190
+ // Match both plain and group-qualified names (e.g., "podchaos" or "podchaos.chaos-mesh.org")
191
+ const escaped = resource.replace(/[-/\\^$*+?.()|[\]{}]/g, '\\$&');
192
+ const regex = `(^|\\.)${escaped}(\\.|$)`;
193
+ await execAsync(`kubectl api-resources --no-headers -o name | grep -Eq '${regex}'`);
194
+ } catch (error) {
195
+ logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
196
+ return '';
197
+ }
198
+ const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${force ? '--force' : ''}`;
143
199
  logger.info(`command: ${command}`);
144
200
  const { stdout } = await execAsync(command);
145
201
  return stdout;
@@ -150,11 +206,40 @@ export async function waitForResourceByLabel({ resource, label, namespace, condi
150
206
  const { stdout } = await execAsync(command);
151
207
  return stdout;
152
208
  }
209
+ export async function waitForResourceByName({ resource, name, namespace, condition = 'Ready', timeout = '10m' }) {
210
+ const command = `kubectl wait ${resource}/${name} --for=condition=${condition} -n ${namespace} --timeout=${timeout}`;
211
+ logger.info(`command: ${command}`);
212
+ const { stdout } = await execAsync(command);
213
+ return stdout;
214
+ }
215
+ export async function waitForResourcesByName({ resource, names, namespace, condition = 'Ready', timeout = '10m' }) {
216
+ if (!names.length) {
217
+ throw new Error(`No ${resource} names provided to waitForResourcesByName`);
218
+ }
219
+ // Wait all in parallel; if any fails, surface which one.
220
+ await Promise.all(names.map(async (name)=>{
221
+ try {
222
+ await waitForResourceByName({
223
+ resource,
224
+ name,
225
+ namespace,
226
+ condition,
227
+ timeout
228
+ });
229
+ } catch (err) {
230
+ throw new Error(`Failed waiting for ${resource}/${name} condition=${condition} timeout=${timeout} namespace=${namespace}: ${String(err)}`);
231
+ }
232
+ }));
233
+ }
153
234
  export function getChartDir(spartanDir, chartName) {
154
235
  return path.join(spartanDir.trim(), chartName);
155
236
  }
237
+ function shellQuote(value) {
238
+ // Single-quote safe shell escaping: ' -> '\''
239
+ return `'${value.replace(/'/g, "'\\''")}'`;
240
+ }
156
241
  function valuesToArgs(values) {
157
- return Object.entries(values).map(([key, value])=>`--set ${key}=${value}`).join(' ');
242
+ return Object.entries(values).map(([key, value])=>typeof value === 'number' || typeof value === 'boolean' ? `--set ${key}=${value}` : `--set-string ${key}=${shellQuote(String(value))}`).join(' ');
158
243
  }
159
244
  function createHelmCommand({ instanceName, helmChartDir, namespace, valuesFile, timeout, values, reuseValues = false }) {
160
245
  const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
@@ -167,6 +252,57 @@ async function execHelmCommand(args) {
167
252
  const { stdout } = await execAsync(helmCommand);
168
253
  return stdout;
169
254
  }
255
+ async function getHelmReleaseStatus(instanceName, namespace) {
256
+ try {
257
+ const { stdout } = await execAsync(`helm list --namespace ${namespace} --all --filter '^${instanceName}$' --output json | cat`);
258
+ const parsed = JSON.parse(stdout);
259
+ const row = parsed.find((r)=>r.name === instanceName);
260
+ return row?.status;
261
+ } catch {
262
+ return undefined;
263
+ }
264
+ }
265
+ async function forceDeleteHelmReleaseRecord(instanceName, namespace, logger) {
266
+ const labelSelector = `owner=helm,name=${instanceName}`;
267
+ const cmd = `kubectl delete secret -n ${namespace} -l ${labelSelector} --ignore-not-found=true`;
268
+ logger.warn(`Force deleting Helm release record: ${cmd}`);
269
+ await execAsync(cmd).catch(()=>undefined);
270
+ }
271
+ async function hasDeployedHelmRelease(instanceName, namespace) {
272
+ try {
273
+ const status = await getHelmReleaseStatus(instanceName, namespace);
274
+ return status?.toLowerCase() === 'deployed';
275
+ } catch {
276
+ return false;
277
+ }
278
+ }
279
+ export async function uninstallChaosMesh(instanceName, namespace, logger) {
280
+ // uninstall the helm chart if it exists
281
+ logger.info(`Uninstalling helm chart ${instanceName}`);
282
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
283
+ // and delete the chaos-mesh resources created by this release
284
+ const deleteByLabel = async (resource)=>{
285
+ const args = {
286
+ resource,
287
+ namespace: namespace,
288
+ label: `app.kubernetes.io/instance=${instanceName}`
289
+ };
290
+ logger.info(`Deleting ${resource} resources for release ${instanceName}`);
291
+ await deleteResourceByLabel(args).catch((e)=>{
292
+ logger.error(`Error deleting ${resource}: ${e}`);
293
+ logger.info(`Force deleting ${resource}`);
294
+ return deleteResourceByLabel({
295
+ ...args,
296
+ force: true
297
+ });
298
+ });
299
+ };
300
+ await deleteByLabel('podchaos');
301
+ await deleteByLabel('networkchaos');
302
+ await deleteByLabel('podnetworkchaos');
303
+ await deleteByLabel('workflows');
304
+ await deleteByLabel('workflownodes');
305
+ }
170
306
  /**
171
307
  * Installs a Helm chart with the given parameters.
172
308
  * @param instanceName - The name of the Helm chart instance.
@@ -183,31 +319,14 @@ async function execHelmCommand(args) {
183
319
  * const stdout = await installChaosMeshChart({ instanceName: 'force-reorg', targetNamespace: 'smoke', valuesFile: 'prover-failure.yaml'});
184
320
  * console.log(stdout);
185
321
  * ```
186
- */ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir, chaosMeshNamespace = 'chaos-mesh', timeout = '5m', clean = true, values = {}, logger }) {
322
+ */ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir, timeout = '10m', clean = true, values = {}, logger }) {
187
323
  if (clean) {
188
- // uninstall the helm chart if it exists
189
- logger.info(`Uninstalling helm chart ${instanceName}`);
190
- await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
191
- // and delete the podchaos resource
192
- const deleteArgs = {
193
- resource: 'podchaos',
194
- namespace: chaosMeshNamespace,
195
- name: `${targetNamespace}-${instanceName}`
196
- };
197
- logger.info(`Deleting podchaos resource`);
198
- await deleteResourceByName(deleteArgs).catch((e)=>{
199
- logger.error(`Error deleting podchaos resource: ${e}`);
200
- logger.info(`Force deleting podchaos resource`);
201
- return deleteResourceByName({
202
- ...deleteArgs,
203
- force: true
204
- });
205
- });
324
+ await uninstallChaosMesh(instanceName, targetNamespace, logger);
206
325
  }
207
326
  return execHelmCommand({
208
327
  instanceName,
209
328
  helmChartDir,
210
- namespace: chaosMeshNamespace,
329
+ namespace: targetNamespace,
211
330
  valuesFile,
212
331
  timeout,
213
332
  values: {
@@ -228,69 +347,85 @@ export function applyProverFailure({ namespace, spartanDir, durationSeconds, log
228
347
  logger
229
348
  });
230
349
  }
231
- export function applyProverKill({ namespace, spartanDir, logger }) {
350
+ export function applyValidatorFailure({ namespace, spartanDir, logger, values, instanceName }) {
351
+ return installChaosMeshChart({
352
+ instanceName: instanceName ?? 'validator-failure',
353
+ targetNamespace: namespace,
354
+ valuesFile: 'validator-failure.yaml',
355
+ helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
356
+ values,
357
+ logger
358
+ });
359
+ }
360
+ export function applyProverKill({ namespace, spartanDir, logger, values }) {
232
361
  return installChaosMeshChart({
233
362
  instanceName: 'prover-kill',
234
363
  targetNamespace: namespace,
235
364
  valuesFile: 'prover-kill.yaml',
236
365
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
366
+ chaosMeshNamespace: namespace,
237
367
  clean: true,
238
- logger
368
+ logger,
369
+ values
239
370
  });
240
371
  }
241
- export function applyProverBrokerKill({ namespace, spartanDir, logger }) {
372
+ export function applyProverBrokerKill({ namespace, spartanDir, logger, values }) {
242
373
  return installChaosMeshChart({
243
374
  instanceName: 'prover-broker-kill',
244
375
  targetNamespace: namespace,
245
376
  valuesFile: 'prover-broker-kill.yaml',
246
377
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
247
378
  clean: true,
248
- logger
379
+ logger,
380
+ values
249
381
  });
250
382
  }
251
- export function applyBootNodeFailure({ namespace, spartanDir, durationSeconds, logger }) {
383
+ export function applyBootNodeFailure({ instanceName = 'boot-node-failure', namespace, spartanDir, durationSeconds, logger, values }) {
252
384
  return installChaosMeshChart({
253
- instanceName: 'boot-node-failure',
385
+ instanceName,
254
386
  targetNamespace: namespace,
255
387
  valuesFile: 'boot-node-failure.yaml',
256
388
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
257
389
  values: {
258
- 'bootNodeFailure.duration': `${durationSeconds}s`
390
+ 'bootNodeFailure.duration': `${durationSeconds}s`,
391
+ ...values ?? {}
259
392
  },
260
393
  logger
261
394
  });
262
395
  }
263
- export function applyValidatorKill({ namespace, spartanDir, logger }) {
396
+ export function applyValidatorKill({ instanceName = 'validator-kill', namespace, spartanDir, logger, values, clean = true }) {
264
397
  return installChaosMeshChart({
265
- instanceName: 'validator-kill',
398
+ instanceName: instanceName ?? 'validator-kill',
266
399
  targetNamespace: namespace,
267
400
  valuesFile: 'validator-kill.yaml',
268
401
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
269
- logger
402
+ clean,
403
+ logger,
404
+ values
270
405
  });
271
406
  }
272
- export function applyNetworkShaping({ valuesFile, namespace, spartanDir, logger }) {
407
+ export function applyNetworkShaping({ instanceName = 'network-shaping', valuesFile, namespace, spartanDir, logger }) {
273
408
  return installChaosMeshChart({
274
- instanceName: 'network-shaping',
409
+ instanceName,
275
410
  targetNamespace: namespace,
276
411
  valuesFile,
277
412
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
278
413
  logger
279
414
  });
280
415
  }
281
- export async function awaitL2BlockNumber(rollupCheatCodes, blockNumber, timeoutSeconds, logger) {
282
- logger.info(`Waiting for L2 Block ${blockNumber}`);
416
+ export async function awaitCheckpointNumber(rollupCheatCodes, checkpointNumber, timeoutSeconds, logger) {
417
+ logger.info(`Waiting for checkpoint ${checkpointNumber}`);
283
418
  let tips = await rollupCheatCodes.getTips();
284
419
  const endTime = Date.now() + timeoutSeconds * 1000;
285
- while(tips.pending < blockNumber && Date.now() < endTime){
286
- logger.info(`At L2 Block ${tips.pending}`);
420
+ while(tips.pending < checkpointNumber && Date.now() < endTime){
421
+ logger.info(`At checkpoint ${tips.pending}`);
287
422
  await sleep(1000);
288
423
  tips = await rollupCheatCodes.getTips();
289
424
  }
290
- if (tips.pending < blockNumber) {
291
- throw new Error(`Timeout waiting for L2 Block ${blockNumber}, only reached ${tips.pending}`);
425
+ if (tips.pending < checkpointNumber) {
426
+ throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
292
427
  } else {
293
- logger.info(`Reached L2 Block ${tips.pending}`);
428
+ logger.info(`Reached checkpoint ${tips.pending}`);
294
429
  }
295
430
  }
296
431
  export async function restartBot(namespace, logger) {
@@ -298,16 +433,212 @@ export async function restartBot(namespace, logger) {
298
433
  await deleteResourceByLabel({
299
434
  resource: 'pods',
300
435
  namespace,
301
- label: 'app=bot'
436
+ label: 'app.kubernetes.io/name=bot'
302
437
  });
303
438
  await sleep(10 * 1000);
439
+ // Some bot images may take time to report Ready due to heavy boot-time proving.
440
+ // Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
304
441
  await waitForResourceByLabel({
305
442
  resource: 'pods',
306
443
  namespace,
307
- label: 'app=bot'
444
+ label: 'app.kubernetes.io/name=bot',
445
+ condition: 'PodReadyToStartContainers'
308
446
  });
309
447
  logger.info(`Bot restarted`);
310
448
  }
449
+ /**
450
+ * Installs or upgrades the transfer bot Helm release for the given namespace.
451
+ * Intended for test setup to enable L2 traffic generation only when needed.
452
+ */ export async function installTransferBot({ namespace, spartanDir, logger, replicas = 1, txIntervalSeconds = 10, followChain = 'PENDING', mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk', mnemonicStartIndex, botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01', nodeUrl, timeout = '15m', reuseValues = true, aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12) }) {
453
+ const instanceName = `${namespace}-bot-transfers`;
454
+ const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
455
+ const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
456
+ logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
457
+ const values = {
458
+ 'bot.replicaCount': replicas,
459
+ 'bot.txIntervalSeconds': txIntervalSeconds,
460
+ 'bot.followChain': followChain,
461
+ 'bot.botPrivateKey': botPrivateKey,
462
+ 'bot.nodeUrl': resolvedNodeUrl,
463
+ 'bot.mnemonic': mnemonic,
464
+ 'bot.feePaymentMethod': 'fee_juice',
465
+ 'aztec.slotDuration': aztecSlotDuration,
466
+ // Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
467
+ // Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
468
+ 'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
469
+ // Provide L1 execution RPC for bridging fee juice
470
+ 'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
471
+ // Provide L1 mnemonic for bridging (falls back to labs mnemonic)
472
+ 'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
473
+ // The bot does not need Kubernetes API access. Disable RBAC + ServiceAccount creation so the chart
474
+ // can be installed by users without cluster-scoped RBAC permissions.
475
+ 'bot.rbac.create': false,
476
+ 'bot.serviceAccount.create': false,
477
+ 'bot.serviceAccount.name': 'default'
478
+ };
479
+ // Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
480
+ if (mnemonicStartIndex === undefined) {
481
+ values['bot.mnemonicStartIndex'] = 0;
482
+ }
483
+ // Also pass a funded private key directly if available
484
+ if (process.env.FUNDING_PRIVATE_KEY) {
485
+ values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
486
+ }
487
+ // Align bot image with the running network image: prefer env var, else detect from a validator pod
488
+ let repositoryFromEnv;
489
+ let tagFromEnv;
490
+ const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
491
+ if (aztecDockerImage && aztecDockerImage.includes(':')) {
492
+ const lastColon = aztecDockerImage.lastIndexOf(':');
493
+ repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
494
+ tagFromEnv = aztecDockerImage.slice(lastColon + 1);
495
+ }
496
+ let repository = repositoryFromEnv;
497
+ let tag = tagFromEnv;
498
+ if (!repository || !tag) {
499
+ try {
500
+ const { stdout } = await execAsync(`kubectl get pods -l app.kubernetes.io/name=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`);
501
+ const image = stdout.trim().replace(/^'|'$/g, '');
502
+ if (image && image.includes(':')) {
503
+ const lastColon = image.lastIndexOf(':');
504
+ repository = image.slice(0, lastColon);
505
+ tag = image.slice(lastColon + 1);
506
+ }
507
+ } catch (err) {
508
+ logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
509
+ }
510
+ }
511
+ if (repository && tag) {
512
+ values['global.aztecImage.repository'] = repository;
513
+ values['global.aztecImage.tag'] = tag;
514
+ }
515
+ if (mnemonicStartIndex !== undefined) {
516
+ values['bot.mnemonicStartIndex'] = typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
517
+ }
518
+ // If a previous install attempt left the release in a non-deployed state (e.g. FAILED),
519
+ // `helm upgrade --install` can error with "has no deployed releases".
520
+ // In that case, clear the release record and do a clean install.
521
+ const existingStatus = await getHelmReleaseStatus(instanceName, namespace);
522
+ if (existingStatus && existingStatus.toLowerCase() !== 'deployed') {
523
+ logger.warn(`Transfer bot release ${instanceName} is in status '${existingStatus}'. Reinstalling cleanly.`);
524
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`).catch(()=>undefined);
525
+ // If helm left the release in `uninstalling`, force-delete the record so we can reinstall.
526
+ const afterUninstallStatus = await getHelmReleaseStatus(instanceName, namespace);
527
+ if (afterUninstallStatus?.toLowerCase() === 'uninstalling') {
528
+ await forceDeleteHelmReleaseRecord(instanceName, namespace, logger);
529
+ }
530
+ }
531
+ // `--reuse-values` fails if the release has never successfully deployed (e.g. first install, or a previous failed install).
532
+ // Only reuse values when we have a deployed release to reuse from.
533
+ const effectiveReuseValues = reuseValues && await hasDeployedHelmRelease(instanceName, namespace);
534
+ await execHelmCommand({
535
+ instanceName,
536
+ helmChartDir,
537
+ namespace,
538
+ valuesFile: undefined,
539
+ timeout,
540
+ values: values,
541
+ reuseValues: effectiveReuseValues
542
+ });
543
+ if (replicas > 0) {
544
+ await waitForResourceByLabel({
545
+ resource: 'pods',
546
+ namespace,
547
+ label: 'app.kubernetes.io/name=bot',
548
+ condition: 'PodReadyToStartContainers'
549
+ });
550
+ }
551
+ }
552
+ /**
553
+ * Uninstalls the transfer bot Helm release from the given namespace.
554
+ * Intended for test teardown to clean up bot resources.
555
+ */ export async function uninstallTransferBot(namespace, logger) {
556
+ const instanceName = `${namespace}-bot-transfers`;
557
+ logger.info(`Uninstalling transfer bot release ${instanceName}`);
558
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
559
+ // Ensure any leftover pods are removed
560
+ await deleteResourceByLabel({
561
+ resource: 'pods',
562
+ namespace,
563
+ label: 'app.kubernetes.io/name=bot'
564
+ }).catch(()=>undefined);
565
+ }
566
+ /**
567
+ * Enables or disables probabilistic transaction dropping on validators and waits for rollout.
568
+ * Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
569
+ */ export async function setValidatorTxDrop({ namespace, enabled, probability, logger }) {
570
+ const drop = enabled ? 'true' : 'false';
571
+ const prob = String(probability);
572
+ const selectors = [
573
+ 'app.kubernetes.io/name=validator',
574
+ 'app.kubernetes.io/component=validator',
575
+ 'app=validator'
576
+ ];
577
+ let updated = false;
578
+ for (const selector of selectors){
579
+ try {
580
+ const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
581
+ const names = list.stdout.split('\n').map((s)=>s.trim()).filter(Boolean);
582
+ if (names.length === 0) {
583
+ continue;
584
+ }
585
+ const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
586
+ logger.info(`command: ${cmd}`);
587
+ await execAsync(cmd);
588
+ updated = true;
589
+ } catch (e) {
590
+ logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
591
+ }
592
+ }
593
+ if (!updated) {
594
+ logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
595
+ return;
596
+ }
597
+ // Restart validator pods to ensure env vars take effect and wait for readiness
598
+ await restartValidators(namespace, logger);
599
+ }
600
+ export async function restartValidators(namespace, logger) {
601
+ const selectors = [
602
+ 'app.kubernetes.io/name=validator',
603
+ 'app.kubernetes.io/component=validator',
604
+ 'app=validator'
605
+ ];
606
+ let any = false;
607
+ for (const selector of selectors){
608
+ try {
609
+ const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
610
+ if (!stdout || stdout.trim().length === 0) {
611
+ continue;
612
+ }
613
+ any = true;
614
+ await deleteResourceByLabel({
615
+ resource: 'pods',
616
+ namespace,
617
+ label: selector
618
+ });
619
+ } catch (e) {
620
+ logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
621
+ }
622
+ }
623
+ if (!any) {
624
+ logger.warn(`No validator pods found to restart in ${namespace}.`);
625
+ return;
626
+ }
627
+ // Wait for either label to be Ready
628
+ for (const selector of selectors){
629
+ try {
630
+ await waitForResourceByLabel({
631
+ resource: 'pods',
632
+ namespace,
633
+ label: selector
634
+ });
635
+ return;
636
+ } catch {
637
+ // try next
638
+ }
639
+ }
640
+ logger.warn(`Validator pods did not report Ready; continuing.`);
641
+ }
311
642
  export async function enableValidatorDynamicBootNode(instanceName, namespace, spartanDir, logger) {
312
643
  logger.info(`Enabling validator dynamic boot node`);
313
644
  await execHelmCommand({
@@ -323,55 +654,127 @@ export async function enableValidatorDynamicBootNode(instanceName, namespace, sp
323
654
  });
324
655
  logger.info(`Validator dynamic boot node enabled`);
325
656
  }
326
- export async function runAlertCheck(config, alerts, logger) {
327
- if (isK8sConfig(config)) {
328
- const { process, port } = await startPortForward({
329
- resource: `svc/metrics-grafana`,
330
- namespace: 'metrics',
331
- containerPort: config.CONTAINER_METRICS_PORT
332
- });
333
- const alertChecker = new AlertChecker(logger, {
334
- grafanaEndpoint: `http://localhost:${port}${config.METRICS_API_PATH}`,
335
- grafanaCredentials: `admin:${config.GRAFANA_PASSWORD}`
336
- });
337
- await alertChecker.runAlertCheck(alerts);
338
- process.kill();
339
- } else {
340
- logger.info('Not running alert check in non-k8s environment');
657
+ export async function getSequencers(namespace) {
658
+ const selectors = [
659
+ 'app.kubernetes.io/name=validator',
660
+ 'app.kubernetes.io/component=validator',
661
+ 'app.kubernetes.io/component=sequencer-node',
662
+ 'app=validator'
663
+ ];
664
+ for (const selector of selectors){
665
+ try {
666
+ const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
667
+ const { stdout } = await execAsync(command);
668
+ const sequencers = stdout.split(' ').map((s)=>s.trim()).filter(Boolean);
669
+ if (sequencers.length > 0) {
670
+ logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
671
+ return sequencers;
672
+ }
673
+ } catch {
674
+ // try next selector
675
+ }
341
676
  }
677
+ // Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
678
+ throw new Error(`No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`);
342
679
  }
343
- export async function updateSequencerConfig(url, config) {
344
- const node = createAztecNodeClient(url);
345
- await node.setConfig(config);
680
+ export function updateSequencersConfig(env, config) {
681
+ return withSequencersAdmin(env, async (client)=>{
682
+ await client.setConfig(config);
683
+ return client.getConfig();
684
+ });
346
685
  }
347
- export async function getSequencers(namespace) {
348
- const command = `kubectl get pods -l app=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
349
- const { stdout } = await execAsync(command);
350
- return stdout.split(' ');
686
+ export function getSequencersConfig(env) {
687
+ return withSequencersAdmin(env, (client)=>client.getConfig());
351
688
  }
352
- export async function updateK8sSequencersConfig(args) {
353
- const { containerPort, namespace, config } = args;
689
+ export async function withSequencersAdmin(env, fn) {
690
+ const adminContainerPort = 8880;
691
+ const namespace = env.NAMESPACE;
354
692
  const sequencers = await getSequencers(namespace);
693
+ const results = [];
355
694
  for (const sequencer of sequencers){
356
- const { process, port } = await startPortForward({
695
+ const { process: process1, port } = await startPortForward({
357
696
  resource: `pod/${sequencer}`,
358
697
  namespace,
359
- containerPort
698
+ containerPort: adminContainerPort
360
699
  });
361
700
  const url = `http://localhost:${port}`;
362
- await updateSequencerConfig(url, config);
363
- process.kill();
701
+ await retry(()=>fetch(`${url}/status`).then((res)=>res.status === 200), 'forward node admin port', makeBackoff([
702
+ 1,
703
+ 1,
704
+ 2,
705
+ 6
706
+ ]), logger, true);
707
+ const client = createAztecNodeAdminClient(url);
708
+ results.push(await fn(client));
709
+ process1.kill();
364
710
  }
711
+ return results;
365
712
  }
366
- export async function updateSequencersConfig(env, config) {
367
- if (isK8sConfig(env)) {
368
- await updateK8sSequencersConfig({
369
- containerPort: env.CONTAINER_NODE_PORT,
370
- namespace: env.NAMESPACE,
371
- config
713
+ /**
714
+ * Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
715
+ * it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
716
+ */ export async function getPublicViemClient(env, /** If set, will push the new process into it */ processes) {
717
+ const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
718
+ if (CREATE_ETH_DEVNET) {
719
+ logger.info(`Creating port forward to eth execution node`);
720
+ const { process: process1, port } = await startPortForward({
721
+ resource: `svc/${NAMESPACE}-eth-execution`,
722
+ namespace: NAMESPACE,
723
+ containerPort: 8545
372
724
  });
725
+ const url = `http://127.0.0.1:${port}`;
726
+ const client = createPublicClient({
727
+ transport: fallback([
728
+ http(url, {
729
+ batch: false
730
+ })
731
+ ])
732
+ });
733
+ if (processes) {
734
+ processes.push(process1);
735
+ }
736
+ return {
737
+ url,
738
+ client,
739
+ process: process1
740
+ };
373
741
  } else {
374
- await updateSequencerConfig(env.NODE_URL, config);
742
+ logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
743
+ if (!L1_RPC_URLS_JSON) {
744
+ throw new Error(`L1_RPC_URLS_JSON is not defined`);
745
+ }
746
+ const client = createPublicClient({
747
+ transport: fallback([
748
+ http(L1_RPC_URLS_JSON, {
749
+ batch: false
750
+ })
751
+ ])
752
+ });
753
+ return {
754
+ url: L1_RPC_URLS_JSON,
755
+ client
756
+ };
757
+ }
758
+ }
759
+ /** Queries an Aztec node for the L1 deployment addresses */ export async function getL1DeploymentAddresses(env) {
760
+ let forwardProcess;
761
+ try {
762
+ const [sequencer] = await getSequencers(env.NAMESPACE);
763
+ const { process: process1, port } = await startPortForward({
764
+ resource: `pod/${sequencer}`,
765
+ namespace: env.NAMESPACE,
766
+ containerPort: 8080
767
+ });
768
+ forwardProcess = process1;
769
+ const url = `http://127.0.0.1:${port}`;
770
+ const node = createAztecNodeClient(url);
771
+ return await retry(()=>node.getNodeInfo().then((i)=>i.l1ContractAddresses), 'get node info', makeBackoff([
772
+ 1,
773
+ 3,
774
+ 6
775
+ ]), logger);
776
+ } finally{
777
+ forwardProcess?.kill();
375
778
  }
376
779
  }
377
780
  /**
@@ -443,3 +846,47 @@ export async function updateSequencersConfig(env, config) {
443
846
  label: 'app=pxe'
444
847
  });
445
848
  }
849
+ /**
850
+ * Returns the absolute path to the git repository root
851
+ */ export function getGitProjectRoot() {
852
+ try {
853
+ const rootDir = execSync('git rev-parse --show-toplevel', {
854
+ encoding: 'utf-8',
855
+ stdio: [
856
+ 'ignore',
857
+ 'pipe',
858
+ 'ignore'
859
+ ]
860
+ }).trim();
861
+ return rootDir;
862
+ } catch (error) {
863
+ throw new Error(`Failed to determine git project root: ${error}`);
864
+ }
865
+ }
866
+ /** Returns a client to the RPC of the given sequencer (defaults to first) */ export async function getNodeClient(env, index = 0) {
867
+ const namespace = env.NAMESPACE;
868
+ const containerPort = 8080;
869
+ const sequencers = await getSequencers(namespace);
870
+ const sequencer = sequencers[index];
871
+ if (!sequencer) {
872
+ throw new Error(`No sequencer found at index ${index} in namespace ${namespace}`);
873
+ }
874
+ const { process: process1, port } = await startPortForward({
875
+ resource: `pod/${sequencer}`,
876
+ namespace,
877
+ containerPort
878
+ });
879
+ const url = `http://localhost:${port}`;
880
+ await retry(()=>fetch(`${url}/status`).then((res)=>res.status === 200), 'forward port', makeBackoff([
881
+ 1,
882
+ 1,
883
+ 2,
884
+ 6
885
+ ]), logger, true);
886
+ const client = createAztecNodeClient(url);
887
+ return {
888
+ node: client,
889
+ port,
890
+ process: process1
891
+ };
892
+ }