@aztec/end-to-end 0.0.1-commit.24de95ac → 0.0.1-commit.3469e52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. package/dest/bench/client_flows/benchmark.d.ts +3 -2
  2. package/dest/bench/client_flows/benchmark.d.ts.map +1 -1
  3. package/dest/bench/client_flows/benchmark.js +21 -1
  4. package/dest/bench/client_flows/client_flows_benchmark.d.ts +21 -15
  5. package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -1
  6. package/dest/bench/client_flows/client_flows_benchmark.js +116 -121
  7. package/dest/bench/client_flows/config.d.ts +1 -1
  8. package/dest/bench/client_flows/data_extractor.d.ts +1 -1
  9. package/dest/bench/client_flows/data_extractor.js +7 -27
  10. package/dest/bench/utils.d.ts +5 -5
  11. package/dest/bench/utils.d.ts.map +1 -1
  12. package/dest/bench/utils.js +18 -11
  13. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +6 -7
  14. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
  15. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +98 -113
  16. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +19 -13
  17. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
  18. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +91 -70
  19. package/dest/e2e_deploy_contract/deploy_test.d.ts +5 -4
  20. package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
  21. package/dest/e2e_deploy_contract/deploy_test.js +18 -13
  22. package/dest/e2e_epochs/epochs_test.d.ts +11 -9
  23. package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
  24. package/dest/e2e_epochs/epochs_test.js +19 -16
  25. package/dest/e2e_fees/bridging_race.notest.d.ts +1 -1
  26. package/dest/e2e_fees/bridging_race.notest.js +4 -6
  27. package/dest/e2e_fees/fees_test.d.ts +20 -16
  28. package/dest/e2e_fees/fees_test.d.ts.map +1 -1
  29. package/dest/e2e_fees/fees_test.js +127 -139
  30. package/dest/e2e_l1_publisher/write_json.d.ts +3 -3
  31. package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -1
  32. package/dest/e2e_l1_publisher/write_json.js +23 -18
  33. package/dest/e2e_multi_validator/utils.d.ts +1 -1
  34. package/dest/e2e_multi_validator/utils.js +1 -1
  35. package/dest/e2e_nested_contract/nested_contract_test.d.ts +6 -9
  36. package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
  37. package/dest/e2e_nested_contract/nested_contract_test.js +32 -39
  38. package/dest/e2e_p2p/inactivity_slash_test.d.ts +3 -3
  39. package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -1
  40. package/dest/e2e_p2p/inactivity_slash_test.js +7 -6
  41. package/dest/e2e_p2p/p2p_network.d.ts +225 -18
  42. package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
  43. package/dest/e2e_p2p/p2p_network.js +117 -110
  44. package/dest/e2e_p2p/shared.d.ts +6 -6
  45. package/dest/e2e_p2p/shared.d.ts.map +1 -1
  46. package/dest/e2e_p2p/shared.js +6 -5
  47. package/dest/e2e_token_contract/token_contract_test.d.ts +16 -9
  48. package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
  49. package/dest/e2e_token_contract/token_contract_test.js +90 -92
  50. package/dest/fixtures/e2e_prover_test.d.ts +12 -18
  51. package/dest/fixtures/e2e_prover_test.d.ts.map +1 -1
  52. package/dest/fixtures/e2e_prover_test.js +98 -109
  53. package/dest/fixtures/fixtures.d.ts +2 -3
  54. package/dest/fixtures/fixtures.d.ts.map +1 -1
  55. package/dest/fixtures/fixtures.js +2 -3
  56. package/dest/fixtures/get_acvm_config.d.ts +1 -1
  57. package/dest/fixtures/get_acvm_config.js +1 -1
  58. package/dest/fixtures/get_bb_config.d.ts +1 -1
  59. package/dest/fixtures/get_bb_config.d.ts.map +1 -1
  60. package/dest/fixtures/index.d.ts +1 -1
  61. package/dest/fixtures/l1_to_l2_messaging.d.ts +4 -3
  62. package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
  63. package/dest/fixtures/l1_to_l2_messaging.js +2 -2
  64. package/dest/fixtures/logging.d.ts +1 -1
  65. package/dest/fixtures/setup.d.ts +216 -0
  66. package/dest/fixtures/setup.d.ts.map +1 -0
  67. package/dest/fixtures/setup.js +684 -0
  68. package/dest/fixtures/setup_p2p_test.d.ts +4 -4
  69. package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
  70. package/dest/fixtures/setup_p2p_test.js +18 -10
  71. package/dest/fixtures/token_utils.d.ts +5 -2
  72. package/dest/fixtures/token_utils.d.ts.map +1 -1
  73. package/dest/fixtures/token_utils.js +7 -4
  74. package/dest/fixtures/utils.d.ts +5 -192
  75. package/dest/fixtures/utils.d.ts.map +1 -1
  76. package/dest/fixtures/utils.js +4 -648
  77. package/dest/fixtures/web3signer.d.ts +1 -1
  78. package/dest/fixtures/web3signer.js +1 -1
  79. package/dest/fixtures/with_telemetry_utils.d.ts +2 -2
  80. package/dest/fixtures/with_telemetry_utils.d.ts.map +1 -1
  81. package/dest/fixtures/with_telemetry_utils.js +2 -2
  82. package/dest/index.d.ts +1 -1
  83. package/dest/quality_of_service/grafana_client.d.ts +41 -0
  84. package/dest/quality_of_service/grafana_client.d.ts.map +1 -0
  85. package/dest/quality_of_service/{alert_checker.js → grafana_client.js} +1 -1
  86. package/dest/quality_of_service/prometheus_client.d.ts +38 -0
  87. package/dest/quality_of_service/prometheus_client.d.ts.map +1 -0
  88. package/dest/quality_of_service/prometheus_client.js +67 -0
  89. package/dest/shared/cross_chain_test_harness.d.ts +5 -3
  90. package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
  91. package/dest/shared/cross_chain_test_harness.js +3 -3
  92. package/dest/shared/gas_portal_test_harness.d.ts +2 -2
  93. package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
  94. package/dest/shared/gas_portal_test_harness.js +1 -1
  95. package/dest/shared/index.d.ts +2 -2
  96. package/dest/shared/index.d.ts.map +1 -1
  97. package/dest/shared/jest_setup.d.ts +1 -1
  98. package/dest/shared/submit-transactions.d.ts +1 -1
  99. package/dest/shared/submit-transactions.d.ts.map +1 -1
  100. package/dest/shared/uniswap_l1_l2.d.ts +3 -27
  101. package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
  102. package/dest/shared/uniswap_l1_l2.js +43 -23
  103. package/dest/simulators/index.d.ts +1 -1
  104. package/dest/simulators/lending_simulator.d.ts +2 -2
  105. package/dest/simulators/lending_simulator.d.ts.map +1 -1
  106. package/dest/simulators/lending_simulator.js +5 -3
  107. package/dest/simulators/token_simulator.d.ts +1 -1
  108. package/dest/simulators/token_simulator.d.ts.map +1 -1
  109. package/dest/spartan/setup_test_wallets.d.ts +8 -5
  110. package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
  111. package/dest/spartan/setup_test_wallets.js +45 -10
  112. package/dest/spartan/tx_metrics.d.ts +52 -0
  113. package/dest/spartan/tx_metrics.d.ts.map +1 -0
  114. package/dest/spartan/tx_metrics.js +248 -0
  115. package/dest/spartan/utils.d.ts +66 -24
  116. package/dest/spartan/utils.d.ts.map +1 -1
  117. package/dest/spartan/utils.js +326 -133
  118. package/package.json +43 -40
  119. package/src/bench/client_flows/benchmark.ts +24 -2
  120. package/src/bench/client_flows/client_flows_benchmark.ts +157 -162
  121. package/src/bench/client_flows/data_extractor.ts +6 -28
  122. package/src/bench/utils.ts +22 -14
  123. package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +107 -142
  124. package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +140 -124
  125. package/src/e2e_deploy_contract/deploy_test.ts +22 -15
  126. package/src/e2e_epochs/epochs_test.ts +39 -25
  127. package/src/e2e_fees/bridging_race.notest.ts +4 -7
  128. package/src/e2e_fees/fees_test.ts +180 -215
  129. package/src/e2e_l1_publisher/write_json.ts +26 -20
  130. package/src/e2e_multi_validator/utils.ts +1 -1
  131. package/src/e2e_nested_contract/nested_contract_test.ts +35 -55
  132. package/src/e2e_p2p/inactivity_slash_test.ts +10 -9
  133. package/src/e2e_p2p/p2p_network.ts +175 -180
  134. package/src/e2e_p2p/shared.ts +15 -7
  135. package/src/e2e_token_contract/token_contract_test.ts +105 -118
  136. package/src/fixtures/e2e_prover_test.ts +120 -153
  137. package/src/fixtures/fixtures.ts +2 -5
  138. package/src/fixtures/get_acvm_config.ts +1 -1
  139. package/src/fixtures/l1_to_l2_messaging.ts +4 -2
  140. package/src/fixtures/setup.ts +1010 -0
  141. package/src/fixtures/setup_p2p_test.ts +23 -9
  142. package/src/fixtures/token_utils.ts +4 -4
  143. package/src/fixtures/utils.ts +27 -947
  144. package/src/fixtures/web3signer.ts +1 -1
  145. package/src/fixtures/with_telemetry_utils.ts +2 -2
  146. package/src/guides/up_quick_start.sh +1 -1
  147. package/src/quality_of_service/{alert_checker.ts → grafana_client.ts} +1 -1
  148. package/src/quality_of_service/prometheus_client.ts +113 -0
  149. package/src/shared/cross_chain_test_harness.ts +6 -9
  150. package/src/shared/gas_portal_test_harness.ts +2 -2
  151. package/src/shared/index.ts +1 -1
  152. package/src/shared/uniswap_l1_l2.ts +53 -67
  153. package/src/simulators/lending_simulator.ts +6 -4
  154. package/src/spartan/DEVELOP.md +7 -0
  155. package/src/spartan/setup_test_wallets.ts +56 -13
  156. package/src/spartan/tx_metrics.ts +231 -0
  157. package/src/spartan/utils.ts +379 -75
  158. package/dest/fixtures/setup_l1_contracts.d.ts +0 -6
  159. package/dest/fixtures/setup_l1_contracts.d.ts.map +0 -1
  160. package/dest/fixtures/setup_l1_contracts.js +0 -17
  161. package/dest/fixtures/snapshot_manager.d.ts +0 -95
  162. package/dest/fixtures/snapshot_manager.d.ts.map +0 -1
  163. package/dest/fixtures/snapshot_manager.js +0 -505
  164. package/dest/quality_of_service/alert_checker.d.ts +0 -41
  165. package/dest/quality_of_service/alert_checker.d.ts.map +0 -1
  166. package/src/fixtures/setup_l1_contracts.ts +0 -26
  167. package/src/fixtures/snapshot_manager.ts +0 -665
@@ -1,4 +1,5 @@
1
1
  import { createLogger } from '@aztec/aztec.js/log';
2
+ import { promiseWithResolvers } from '@aztec/foundation/promise';
2
3
  import { makeBackoff, retry } from '@aztec/foundation/retry';
3
4
  import { schemas } from '@aztec/foundation/schemas';
4
5
  import { sleep } from '@aztec/foundation/sleep';
@@ -17,7 +18,9 @@ const testConfigSchema = z.object({
17
18
  L1_RPC_URLS_JSON: z.string().optional(),
18
19
  L1_ACCOUNT_MNEMONIC: z.string().optional(),
19
20
  AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
20
- AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5)
21
+ AZTEC_EPOCH_DURATION: z.coerce.number().optional().default(32),
22
+ AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
23
+ AZTEC_LAG_IN_EPOCHS_FOR_VALIDATOR_SET: z.coerce.number().optional().default(2)
21
24
  });
22
25
  export function setupEnvironment(env) {
23
26
  const config = testConfigSchema.parse(env);
@@ -90,7 +93,7 @@ export async function startPortForward({ resource, namespace, containerPort, hos
90
93
  ]
91
94
  });
92
95
  let isResolved = false;
93
- const connected = new Promise((resolve)=>{
96
+ const connected = new Promise((resolve, reject)=>{
94
97
  process1.stdout?.on('data', (data)=>{
95
98
  const str = data.toString();
96
99
  if (!isResolved && str.includes('Forwarding from')) {
@@ -98,7 +101,8 @@ export async function startPortForward({ resource, namespace, containerPort, hos
98
101
  logger.debug(`Port forward for ${resource}: ${str}`);
99
102
  const port = str.search(/:\d+/);
100
103
  if (port === -1) {
101
- throw new Error('Port not found in port forward output');
104
+ reject(new Error('Port not found in port forward output'));
105
+ return;
102
106
  }
103
107
  const portNumber = parseInt(str.slice(port + 1));
104
108
  logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
@@ -118,17 +122,26 @@ export async function startPortForward({ resource, namespace, containerPort, hos
118
122
  process1.on('close', ()=>{
119
123
  if (!isResolved) {
120
124
  isResolved = true;
121
- logger.warn(`Port forward for ${resource} closed before connection established`);
122
- resolve(0);
125
+ const msg = `Port forward for ${resource} closed before connection established`;
126
+ logger.warn(msg);
127
+ reject(new Error(msg));
123
128
  }
124
129
  });
125
130
  process1.on('error', (error)=>{
126
- logger.error(`Port forward for ${resource} error: ${error}`);
127
- resolve(0);
131
+ if (!isResolved) {
132
+ isResolved = true;
133
+ const msg = `Port forward for ${resource} error: ${error}`;
134
+ logger.error(msg);
135
+ reject(new Error(msg));
136
+ }
128
137
  });
129
138
  process1.on('exit', (code)=>{
130
- logger.verbose(`Port forward for ${resource} exited with code ${code}`);
131
- resolve(0);
139
+ if (!isResolved) {
140
+ isResolved = true;
141
+ const msg = `Port forward for ${resource} exited with code ${code}`;
142
+ logger.verbose(msg);
143
+ reject(new Error(msg));
144
+ }
132
145
  });
133
146
  });
134
147
  const port = await connected;
@@ -137,9 +150,42 @@ export async function startPortForward({ resource, namespace, containerPort, hos
137
150
  port
138
151
  };
139
152
  }
140
- export function startPortForwardForRPC(namespace) {
153
+ export function getExternalIP(namespace, serviceName) {
154
+ const { promise, resolve, reject } = promiseWithResolvers();
155
+ const process1 = spawn('kubectl', [
156
+ 'get',
157
+ 'service',
158
+ '-n',
159
+ namespace,
160
+ `${namespace}-${serviceName}`,
161
+ '--output',
162
+ "jsonpath='{.status.loadBalancer.ingress[0].ip}'"
163
+ ], {
164
+ stdio: 'pipe'
165
+ });
166
+ let ip = '';
167
+ process1.stdout.on('data', (data)=>{
168
+ ip += data;
169
+ });
170
+ process1.on('error', (err)=>{
171
+ reject(err);
172
+ });
173
+ process1.on('exit', ()=>{
174
+ // kubectl prints JSON. Remove the quotes
175
+ resolve(ip.replace(/"|'/g, ''));
176
+ });
177
+ return promise;
178
+ }
179
+ export function startPortForwardForPrometeheus(namespace) {
141
180
  return startPortForward({
142
- resource: `services/${namespace}-rpc-aztec-node`,
181
+ resource: `svc/${namespace}-prometheus-server`,
182
+ namespace,
183
+ containerPort: 80
184
+ });
185
+ }
186
+ export function startPortForwardForRPC(namespace, index = 0) {
187
+ return startPortForward({
188
+ resource: `pod/${namespace}-rpc-aztec-node-${index}`,
143
189
  namespace,
144
190
  containerPort: 8080
145
191
  });
@@ -158,9 +204,11 @@ export async function deleteResourceByName({ resource, namespace, name, force =
158
204
  return stdout;
159
205
  }
160
206
  export async function deleteResourceByLabel({ resource, namespace, label, timeout = '5m', force = false }) {
161
- // Check if the resource type exists before attempting to delete
162
207
  try {
163
- await execAsync(`kubectl api-resources --api-group="" --no-headers -o name | grep -q "^${resource}$" || kubectl api-resources --no-headers -o name | grep -q "^${resource}$"`);
208
+ // Match both plain and group-qualified names (e.g., "podchaos" or "podchaos.chaos-mesh.org")
209
+ const escaped = resource.replace(/[-/\\^$*+?.()|[\]{}]/g, '\\$&');
210
+ const regex = `(^|\\.)${escaped}(\\.|$)`;
211
+ await execAsync(`kubectl api-resources --no-headers -o name | grep -Eq '${regex}'`);
164
212
  } catch (error) {
165
213
  logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
166
214
  return '';
@@ -176,6 +224,31 @@ export async function waitForResourceByLabel({ resource, label, namespace, condi
176
224
  const { stdout } = await execAsync(command);
177
225
  return stdout;
178
226
  }
227
+ export async function waitForResourceByName({ resource, name, namespace, condition = 'Ready', timeout = '10m' }) {
228
+ const command = `kubectl wait ${resource}/${name} --for=condition=${condition} -n ${namespace} --timeout=${timeout}`;
229
+ logger.info(`command: ${command}`);
230
+ const { stdout } = await execAsync(command);
231
+ return stdout;
232
+ }
233
+ export async function waitForResourcesByName({ resource, names, namespace, condition = 'Ready', timeout = '10m' }) {
234
+ if (!names.length) {
235
+ throw new Error(`No ${resource} names provided to waitForResourcesByName`);
236
+ }
237
+ // Wait all in parallel; if any fails, surface which one.
238
+ await Promise.all(names.map(async (name)=>{
239
+ try {
240
+ await waitForResourceByName({
241
+ resource,
242
+ name,
243
+ namespace,
244
+ condition,
245
+ timeout
246
+ });
247
+ } catch (err) {
248
+ throw new Error(`Failed waiting for ${resource}/${name} condition=${condition} timeout=${timeout} namespace=${namespace}: ${String(err)}`);
249
+ }
250
+ }));
251
+ }
179
252
  export function getChartDir(spartanDir, chartName) {
180
253
  return path.join(spartanDir.trim(), chartName);
181
254
  }
@@ -197,6 +270,57 @@ async function execHelmCommand(args) {
197
270
  const { stdout } = await execAsync(helmCommand);
198
271
  return stdout;
199
272
  }
273
+ async function getHelmReleaseStatus(instanceName, namespace) {
274
+ try {
275
+ const { stdout } = await execAsync(`helm list --namespace ${namespace} --all --filter '^${instanceName}$' --output json | cat`);
276
+ const parsed = JSON.parse(stdout);
277
+ const row = parsed.find((r)=>r.name === instanceName);
278
+ return row?.status;
279
+ } catch {
280
+ return undefined;
281
+ }
282
+ }
283
+ async function forceDeleteHelmReleaseRecord(instanceName, namespace, logger) {
284
+ const labelSelector = `owner=helm,name=${instanceName}`;
285
+ const cmd = `kubectl delete secret -n ${namespace} -l ${labelSelector} --ignore-not-found=true`;
286
+ logger.warn(`Force deleting Helm release record: ${cmd}`);
287
+ await execAsync(cmd).catch(()=>undefined);
288
+ }
289
+ async function hasDeployedHelmRelease(instanceName, namespace) {
290
+ try {
291
+ const status = await getHelmReleaseStatus(instanceName, namespace);
292
+ return status?.toLowerCase() === 'deployed';
293
+ } catch {
294
+ return false;
295
+ }
296
+ }
297
+ export async function uninstallChaosMesh(instanceName, namespace, logger) {
298
+ // uninstall the helm chart if it exists
299
+ logger.info(`Uninstalling helm chart ${instanceName}`);
300
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
301
+ // and delete the chaos-mesh resources created by this release
302
+ const deleteByLabel = async (resource)=>{
303
+ const args = {
304
+ resource,
305
+ namespace: namespace,
306
+ label: `app.kubernetes.io/instance=${instanceName}`
307
+ };
308
+ logger.info(`Deleting ${resource} resources for release ${instanceName}`);
309
+ await deleteResourceByLabel(args).catch((e)=>{
310
+ logger.error(`Error deleting ${resource}: ${e}`);
311
+ logger.info(`Force deleting ${resource}`);
312
+ return deleteResourceByLabel({
313
+ ...args,
314
+ force: true
315
+ });
316
+ });
317
+ };
318
+ await deleteByLabel('podchaos');
319
+ await deleteByLabel('networkchaos');
320
+ await deleteByLabel('podnetworkchaos');
321
+ await deleteByLabel('workflows');
322
+ await deleteByLabel('workflownodes');
323
+ }
200
324
  /**
201
325
  * Installs a Helm chart with the given parameters.
202
326
  * @param instanceName - The name of the Helm chart instance.
@@ -213,35 +337,14 @@ async function execHelmCommand(args) {
213
337
  * const stdout = await installChaosMeshChart({ instanceName: 'force-reorg', targetNamespace: 'smoke', valuesFile: 'prover-failure.yaml'});
214
338
  * console.log(stdout);
215
339
  * ```
216
- */ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir, chaosMeshNamespace = 'chaos-mesh', timeout = '10m', clean = true, values = {}, logger }) {
340
+ */ export async function installChaosMeshChart({ instanceName, targetNamespace, valuesFile, helmChartDir, timeout = '10m', clean = true, values = {}, logger }) {
217
341
  if (clean) {
218
- // uninstall the helm chart if it exists
219
- logger.info(`Uninstalling helm chart ${instanceName}`);
220
- await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
221
- // and delete the chaos-mesh resources created by this release
222
- const deleteByLabel = async (resource)=>{
223
- const args = {
224
- resource,
225
- namespace: chaosMeshNamespace,
226
- label: `app.kubernetes.io/instance=${instanceName}`
227
- };
228
- logger.info(`Deleting ${resource} resources for release ${instanceName}`);
229
- await deleteResourceByLabel(args).catch((e)=>{
230
- logger.error(`Error deleting ${resource}: ${e}`);
231
- logger.info(`Force deleting ${resource}`);
232
- return deleteResourceByLabel({
233
- ...args,
234
- force: true
235
- });
236
- });
237
- };
238
- await deleteByLabel('podchaos');
239
- await deleteByLabel('networkchaos');
342
+ await uninstallChaosMesh(instanceName, targetNamespace, logger);
240
343
  }
241
344
  return execHelmCommand({
242
345
  instanceName,
243
346
  helmChartDir,
244
- namespace: chaosMeshNamespace,
347
+ namespace: targetNamespace,
245
348
  valuesFile,
246
349
  timeout,
247
350
  values: {
@@ -262,70 +365,85 @@ export function applyProverFailure({ namespace, spartanDir, durationSeconds, log
262
365
  logger
263
366
  });
264
367
  }
265
- export function applyProverKill({ namespace, spartanDir, logger }) {
368
+ export function applyValidatorFailure({ namespace, spartanDir, logger, values, instanceName }) {
369
+ return installChaosMeshChart({
370
+ instanceName: instanceName ?? 'validator-failure',
371
+ targetNamespace: namespace,
372
+ valuesFile: 'validator-failure.yaml',
373
+ helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
374
+ values,
375
+ logger
376
+ });
377
+ }
378
+ export function applyProverKill({ namespace, spartanDir, logger, values }) {
266
379
  return installChaosMeshChart({
267
380
  instanceName: 'prover-kill',
268
381
  targetNamespace: namespace,
269
382
  valuesFile: 'prover-kill.yaml',
270
383
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
384
+ chaosMeshNamespace: namespace,
271
385
  clean: true,
272
- logger
386
+ logger,
387
+ values
273
388
  });
274
389
  }
275
- export function applyProverBrokerKill({ namespace, spartanDir, logger }) {
390
+ export function applyProverBrokerKill({ namespace, spartanDir, logger, values }) {
276
391
  return installChaosMeshChart({
277
392
  instanceName: 'prover-broker-kill',
278
393
  targetNamespace: namespace,
279
394
  valuesFile: 'prover-broker-kill.yaml',
280
395
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
281
396
  clean: true,
282
- logger
397
+ logger,
398
+ values
283
399
  });
284
400
  }
285
- export function applyBootNodeFailure({ namespace, spartanDir, durationSeconds, logger }) {
401
+ export function applyBootNodeFailure({ instanceName = 'boot-node-failure', namespace, spartanDir, durationSeconds, logger, values }) {
286
402
  return installChaosMeshChart({
287
- instanceName: 'boot-node-failure',
403
+ instanceName,
288
404
  targetNamespace: namespace,
289
405
  valuesFile: 'boot-node-failure.yaml',
290
406
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
291
407
  values: {
292
- 'bootNodeFailure.duration': `${durationSeconds}s`
408
+ 'bootNodeFailure.duration': `${durationSeconds}s`,
409
+ ...values ?? {}
293
410
  },
294
411
  logger
295
412
  });
296
413
  }
297
- export function applyValidatorKill({ namespace, spartanDir, logger, values }) {
414
+ export function applyValidatorKill({ instanceName = 'validator-kill', namespace, spartanDir, logger, values, clean = true }) {
298
415
  return installChaosMeshChart({
299
- instanceName: 'validator-kill',
416
+ instanceName: instanceName ?? 'validator-kill',
300
417
  targetNamespace: namespace,
301
418
  valuesFile: 'validator-kill.yaml',
302
419
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
420
+ clean,
303
421
  logger,
304
422
  values
305
423
  });
306
424
  }
307
- export function applyNetworkShaping({ valuesFile, namespace, spartanDir, logger }) {
425
+ export function applyNetworkShaping({ instanceName = 'network-shaping', valuesFile, namespace, spartanDir, logger }) {
308
426
  return installChaosMeshChart({
309
- instanceName: 'network-shaping',
427
+ instanceName,
310
428
  targetNamespace: namespace,
311
429
  valuesFile,
312
430
  helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
313
431
  logger
314
432
  });
315
433
  }
316
- export async function awaitL2BlockNumber(rollupCheatCodes, blockNumber, timeoutSeconds, logger) {
317
- logger.info(`Waiting for L2 Block ${blockNumber}`);
434
+ export async function awaitCheckpointNumber(rollupCheatCodes, checkpointNumber, timeoutSeconds, logger) {
435
+ logger.info(`Waiting for checkpoint ${checkpointNumber}`);
318
436
  let tips = await rollupCheatCodes.getTips();
319
437
  const endTime = Date.now() + timeoutSeconds * 1000;
320
- while(tips.pending < blockNumber && Date.now() < endTime){
321
- logger.info(`At L2 Block ${tips.pending}`);
438
+ while(tips.pending < checkpointNumber && Date.now() < endTime){
439
+ logger.info(`At checkpoint ${tips.pending}`);
322
440
  await sleep(1000);
323
441
  tips = await rollupCheatCodes.getTips();
324
442
  }
325
- if (tips.pending < blockNumber) {
326
- throw new Error(`Timeout waiting for L2 Block ${blockNumber}, only reached ${tips.pending}`);
443
+ if (tips.pending < checkpointNumber) {
444
+ throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
327
445
  } else {
328
- logger.info(`Reached L2 Block ${tips.pending}`);
446
+ logger.info(`Reached checkpoint ${tips.pending}`);
329
447
  }
330
448
  }
331
449
  export async function restartBot(namespace, logger) {
@@ -369,7 +487,12 @@ export async function restartBot(namespace, logger) {
369
487
  // Provide L1 execution RPC for bridging fee juice
370
488
  'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
371
489
  // Provide L1 mnemonic for bridging (falls back to labs mnemonic)
372
- 'bot.node.env.BOT_L1_MNEMONIC': mnemonic
490
+ 'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
491
+ // The bot does not need Kubernetes API access. Disable RBAC + ServiceAccount creation so the chart
492
+ // can be installed by users without cluster-scoped RBAC permissions.
493
+ 'bot.rbac.create': false,
494
+ 'bot.serviceAccount.create': false,
495
+ 'bot.serviceAccount.name': 'default'
373
496
  };
374
497
  // Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
375
498
  if (mnemonicStartIndex === undefined) {
@@ -392,7 +515,7 @@ export async function restartBot(namespace, logger) {
392
515
  let tag = tagFromEnv;
393
516
  if (!repository || !tag) {
394
517
  try {
395
- const { stdout } = await execAsync(`kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`);
518
+ const { stdout } = await execAsync(`kubectl get pods -l app.kubernetes.io/name=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`);
396
519
  const image = stdout.trim().replace(/^'|'$/g, '');
397
520
  if (image && image.includes(':')) {
398
521
  const lastColon = image.lastIndexOf(':');
@@ -410,6 +533,22 @@ export async function restartBot(namespace, logger) {
410
533
  if (mnemonicStartIndex !== undefined) {
411
534
  values['bot.mnemonicStartIndex'] = typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
412
535
  }
536
+ // If a previous install attempt left the release in a non-deployed state (e.g. FAILED),
537
+ // `helm upgrade --install` can error with "has no deployed releases".
538
+ // In that case, clear the release record and do a clean install.
539
+ const existingStatus = await getHelmReleaseStatus(instanceName, namespace);
540
+ if (existingStatus && existingStatus.toLowerCase() !== 'deployed') {
541
+ logger.warn(`Transfer bot release ${instanceName} is in status '${existingStatus}'. Reinstalling cleanly.`);
542
+ await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`).catch(()=>undefined);
543
+ // If helm left the release in `uninstalling`, force-delete the record so we can reinstall.
544
+ const afterUninstallStatus = await getHelmReleaseStatus(instanceName, namespace);
545
+ if (afterUninstallStatus?.toLowerCase() === 'uninstalling') {
546
+ await forceDeleteHelmReleaseRecord(instanceName, namespace, logger);
547
+ }
548
+ }
549
+ // `--reuse-values` fails if the release has never successfully deployed (e.g. first install, or a previous failed install).
550
+ // Only reuse values when we have a deployed release to reuse from.
551
+ const effectiveReuseValues = reuseValues && await hasDeployedHelmRelease(instanceName, namespace);
413
552
  await execHelmCommand({
414
553
  instanceName,
415
554
  helmChartDir,
@@ -417,7 +556,7 @@ export async function restartBot(namespace, logger) {
417
556
  valuesFile: undefined,
418
557
  timeout,
419
558
  values: values,
420
- reuseValues
559
+ reuseValues: effectiveReuseValues
421
560
  });
422
561
  if (replicas > 0) {
423
562
  await waitForResourceByLabel({
@@ -449,8 +588,9 @@ export async function restartBot(namespace, logger) {
449
588
  const drop = enabled ? 'true' : 'false';
450
589
  const prob = String(probability);
451
590
  const selectors = [
452
- 'app=validator',
453
- 'app.kubernetes.io/component=validator'
591
+ 'app.kubernetes.io/name=validator',
592
+ 'app.kubernetes.io/component=validator',
593
+ 'app=validator'
454
594
  ];
455
595
  let updated = false;
456
596
  for (const selector of selectors){
@@ -477,8 +617,9 @@ export async function restartBot(namespace, logger) {
477
617
  }
478
618
  export async function restartValidators(namespace, logger) {
479
619
  const selectors = [
480
- 'app=validator',
481
- 'app.kubernetes.io/component=validator'
620
+ 'app.kubernetes.io/name=validator',
621
+ 'app.kubernetes.io/component=validator',
622
+ 'app=validator'
482
623
  ];
483
624
  let any = false;
484
625
  for (const selector of selectors){
@@ -532,11 +673,27 @@ export async function enableValidatorDynamicBootNode(instanceName, namespace, sp
532
673
  logger.info(`Validator dynamic boot node enabled`);
533
674
  }
534
675
  export async function getSequencers(namespace) {
535
- const command = `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
536
- const { stdout } = await execAsync(command);
537
- const sequencers = stdout.split(' ');
538
- logger.verbose(`Found sequencer pods ${sequencers.join(', ')}`);
539
- return sequencers;
676
+ const selectors = [
677
+ 'app.kubernetes.io/name=validator',
678
+ 'app.kubernetes.io/component=validator',
679
+ 'app.kubernetes.io/component=sequencer-node',
680
+ 'app=validator'
681
+ ];
682
+ for (const selector of selectors){
683
+ try {
684
+ const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
685
+ const { stdout } = await execAsync(command);
686
+ const sequencers = stdout.split(' ').map((s)=>s.trim()).filter(Boolean);
687
+ if (sequencers.length > 0) {
688
+ logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
689
+ return sequencers;
690
+ }
691
+ } catch {
692
+ // try next selector
693
+ }
694
+ }
695
+ // Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
696
+ throw new Error(`No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`);
540
697
  }
541
698
  export function updateSequencersConfig(env, config) {
542
699
  return withSequencersAdmin(env, async (client)=>{
@@ -586,7 +743,9 @@ export async function withSequencersAdmin(env, fn) {
586
743
  const url = `http://127.0.0.1:${port}`;
587
744
  const client = createPublicClient({
588
745
  transport: fallback([
589
- http(url)
746
+ http(url, {
747
+ batch: false
748
+ })
590
749
  ])
591
750
  });
592
751
  if (processes) {
@@ -604,7 +763,9 @@ export async function withSequencersAdmin(env, fn) {
604
763
  }
605
764
  const client = createPublicClient({
606
765
  transport: fallback([
607
- http(L1_RPC_URLS_JSON)
766
+ http(L1_RPC_URLS_JSON, {
767
+ batch: false
768
+ })
608
769
  ])
609
770
  });
610
771
  return {
@@ -637,71 +798,103 @@ export async function withSequencersAdmin(env, fn) {
637
798
  /**
638
799
  * Rolls the Aztec pods in the given namespace.
639
800
  * @param namespace - The namespace to roll the Aztec pods in.
640
- * @dev - IMPORTANT: This function DOES NOT delete the underlying PVCs.
641
- * This means that the pods will be restarted with the same persistent storage.
642
- * This is useful for testing, but you should be aware of the implications.
643
- */ export async function rollAztecPods(namespace) {
644
- await deleteResourceByLabel({
645
- resource: 'pods',
646
- namespace: namespace,
647
- label: 'app=boot-node'
648
- });
649
- await deleteResourceByLabel({
650
- resource: 'pods',
651
- namespace: namespace,
652
- label: 'app=prover-node'
653
- });
654
- await deleteResourceByLabel({
655
- resource: 'pods',
656
- namespace: namespace,
657
- label: 'app=prover-broker'
658
- });
659
- await deleteResourceByLabel({
660
- resource: 'pods',
661
- namespace: namespace,
662
- label: 'app=prover-agent'
663
- });
664
- await deleteResourceByLabel({
665
- resource: 'pods',
666
- namespace: namespace,
667
- label: 'app=validator'
668
- });
669
- await deleteResourceByLabel({
670
- resource: 'pods',
671
- namespace: namespace,
672
- label: 'app=pxe'
673
- });
801
+ * @param clearState - If true, also deletes the underlying PVCs to clear persistent storage.
802
+ * This is required for rollup upgrades where the old state is incompatible with the new rollup.
803
+ * Defaults to false, which preserves the existing storage.
804
+ */ export async function rollAztecPods(namespace, clearState = false) {
805
+ // Pod components use 'validator', but StatefulSets and PVCs use 'sequencer-node' for validators
806
+ const podComponents = [
807
+ 'p2p-bootstrap',
808
+ 'prover-node',
809
+ 'prover-broker',
810
+ 'prover-agent',
811
+ 'sequencer-node',
812
+ 'rpc'
813
+ ];
814
+ const pvcComponents = [
815
+ 'p2p-bootstrap',
816
+ 'prover-node',
817
+ 'prover-broker',
818
+ 'sequencer-node',
819
+ 'rpc'
820
+ ];
821
+ // StatefulSet components that need to be scaled down before PVC deletion
822
+ // Note: validators use 'sequencer-node' as component label, not 'validator'
823
+ const statefulSetComponents = [
824
+ 'p2p-bootstrap',
825
+ 'prover-node',
826
+ 'prover-broker',
827
+ 'sequencer-node',
828
+ 'rpc'
829
+ ];
830
+ if (clearState) {
831
+ // To delete PVCs, we must first scale down StatefulSets so pods release the volumes
832
+ // Otherwise PVC deletion will hang waiting for pods to terminate
833
+ // First, save original replica counts
834
+ const originalReplicas = new Map();
835
+ for (const component of statefulSetComponents){
836
+ try {
837
+ const getCmd = `kubectl get statefulset -l app.kubernetes.io/component=${component} -n ${namespace} -o jsonpath='{.items[0].spec.replicas}'`;
838
+ const { stdout } = await execAsync(getCmd);
839
+ const replicas = parseInt(stdout.replace(/'/g, '').trim(), 10);
840
+ if (!isNaN(replicas) && replicas > 0) {
841
+ originalReplicas.set(component, replicas);
842
+ }
843
+ } catch {
844
+ // Component might not exist, continue
845
+ }
846
+ }
847
+ // Scale down to 0
848
+ for (const component of statefulSetComponents){
849
+ try {
850
+ const scaleCmd = `kubectl scale statefulset -l app.kubernetes.io/component=${component} -n ${namespace} --replicas=0 --timeout=2m`;
851
+ logger.info(`command: ${scaleCmd}`);
852
+ await execAsync(scaleCmd);
853
+ } catch (e) {
854
+ // Component might not exist or might be a Deployment, continue
855
+ logger.verbose(`Scale down ${component} skipped: ${e}`);
856
+ }
857
+ }
858
+ // Wait for pods to terminate
859
+ await sleep(15 * 1000);
860
+ // Now delete PVCs (they should no longer be in use)
861
+ for (const component of pvcComponents){
862
+ await deleteResourceByLabel({
863
+ resource: 'persistentvolumeclaims',
864
+ namespace: namespace,
865
+ label: `app.kubernetes.io/component=${component}`
866
+ });
867
+ }
868
+ // Scale StatefulSets back up to original replica counts
869
+ for (const component of statefulSetComponents){
870
+ const replicas = originalReplicas.get(component) ?? 1;
871
+ try {
872
+ const scaleCmd = `kubectl scale statefulset -l app.kubernetes.io/component=${component} -n ${namespace} --replicas=${replicas} --timeout=2m`;
873
+ logger.info(`command: ${scaleCmd}`);
874
+ await execAsync(scaleCmd);
875
+ } catch (e) {
876
+ logger.verbose(`Scale up ${component} skipped: ${e}`);
877
+ }
878
+ }
879
+ } else {
880
+ // Just delete pods (no state clearing)
881
+ for (const component of podComponents){
882
+ await deleteResourceByLabel({
883
+ resource: 'pods',
884
+ namespace: namespace,
885
+ label: `app.kubernetes.io/component=${component}`
886
+ });
887
+ }
888
+ }
674
889
  await sleep(10 * 1000);
675
- await waitForResourceByLabel({
676
- resource: 'pods',
677
- namespace: namespace,
678
- label: 'app=boot-node'
679
- });
680
- await waitForResourceByLabel({
681
- resource: 'pods',
682
- namespace: namespace,
683
- label: 'app=prover-node'
684
- });
685
- await waitForResourceByLabel({
686
- resource: 'pods',
687
- namespace: namespace,
688
- label: 'app=prover-broker'
689
- });
690
- await waitForResourceByLabel({
691
- resource: 'pods',
692
- namespace: namespace,
693
- label: 'app=prover-agent'
694
- });
695
- await waitForResourceByLabel({
696
- resource: 'pods',
697
- namespace: namespace,
698
- label: 'app=validator'
699
- });
700
- await waitForResourceByLabel({
701
- resource: 'pods',
702
- namespace: namespace,
703
- label: 'app=pxe'
704
- });
890
+ // Wait for pods to come back
891
+ for (const component of podComponents){
892
+ await waitForResourceByLabel({
893
+ resource: 'pods',
894
+ namespace: namespace,
895
+ label: `app.kubernetes.io/component=${component}`
896
+ });
897
+ }
705
898
  }
706
899
  /**
707
900
  * Returns the absolute path to the git repository root