@aztec/end-to-end 0.0.1-commit.d3ec352c → 0.0.1-commit.e6bd8901

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/dest/bench/client_flows/benchmark.d.ts +3 -2
  2. package/dest/bench/client_flows/benchmark.d.ts.map +1 -1
  3. package/dest/bench/client_flows/benchmark.js +21 -1
  4. package/dest/bench/client_flows/client_flows_benchmark.d.ts +14 -15
  5. package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -1
  6. package/dest/bench/client_flows/client_flows_benchmark.js +123 -136
  7. package/dest/bench/client_flows/data_extractor.js +3 -1
  8. package/dest/bench/utils.d.ts +9 -8
  9. package/dest/bench/utils.d.ts.map +1 -1
  10. package/dest/bench/utils.js +27 -18
  11. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +6 -7
  12. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
  13. package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +96 -112
  14. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +19 -13
  15. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
  16. package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +91 -71
  17. package/dest/e2e_deploy_contract/deploy_test.d.ts +4 -3
  18. package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
  19. package/dest/e2e_deploy_contract/deploy_test.js +18 -13
  20. package/dest/e2e_epochs/epochs_test.d.ts +3 -2
  21. package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
  22. package/dest/e2e_epochs/epochs_test.js +16 -11
  23. package/dest/e2e_fees/bridging_race.notest.js +4 -6
  24. package/dest/e2e_fees/fees_test.d.ts +18 -15
  25. package/dest/e2e_fees/fees_test.d.ts.map +1 -1
  26. package/dest/e2e_fees/fees_test.js +126 -141
  27. package/dest/e2e_l1_publisher/write_json.d.ts +2 -2
  28. package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -1
  29. package/dest/e2e_l1_publisher/write_json.js +19 -15
  30. package/dest/e2e_multi_validator/utils.js +1 -1
  31. package/dest/e2e_nested_contract/nested_contract_test.d.ts +6 -9
  32. package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
  33. package/dest/e2e_nested_contract/nested_contract_test.js +31 -39
  34. package/dest/e2e_p2p/inactivity_slash_test.d.ts +4 -4
  35. package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -1
  36. package/dest/e2e_p2p/inactivity_slash_test.js +6 -9
  37. package/dest/e2e_p2p/p2p_network.d.ts +13 -11
  38. package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
  39. package/dest/e2e_p2p/p2p_network.js +120 -111
  40. package/dest/e2e_p2p/shared.d.ts +7 -7
  41. package/dest/e2e_p2p/shared.d.ts.map +1 -1
  42. package/dest/e2e_p2p/shared.js +11 -18
  43. package/dest/e2e_token_contract/token_contract_test.d.ts +16 -9
  44. package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
  45. package/dest/e2e_token_contract/token_contract_test.js +89 -91
  46. package/dest/fixtures/e2e_prover_test.d.ts +10 -18
  47. package/dest/fixtures/e2e_prover_test.d.ts.map +1 -1
  48. package/dest/fixtures/e2e_prover_test.js +90 -102
  49. package/dest/fixtures/fixtures.d.ts +2 -3
  50. package/dest/fixtures/fixtures.d.ts.map +1 -1
  51. package/dest/fixtures/fixtures.js +1 -2
  52. package/dest/fixtures/get_acvm_config.js +1 -1
  53. package/dest/fixtures/l1_to_l2_messaging.d.ts +4 -3
  54. package/dest/fixtures/l1_to_l2_messaging.d.ts.map +1 -1
  55. package/dest/fixtures/l1_to_l2_messaging.js +2 -2
  56. package/dest/fixtures/setup.d.ts +218 -0
  57. package/dest/fixtures/setup.d.ts.map +1 -0
  58. package/dest/fixtures/setup.js +690 -0
  59. package/dest/fixtures/setup_p2p_test.js +3 -3
  60. package/dest/fixtures/token_utils.d.ts +1 -1
  61. package/dest/fixtures/token_utils.d.ts.map +1 -1
  62. package/dest/fixtures/token_utils.js +7 -4
  63. package/dest/fixtures/utils.d.ts +5 -638
  64. package/dest/fixtures/utils.d.ts.map +1 -1
  65. package/dest/fixtures/utils.js +4 -647
  66. package/dest/fixtures/web3signer.js +1 -1
  67. package/dest/fixtures/with_telemetry_utils.d.ts +2 -2
  68. package/dest/fixtures/with_telemetry_utils.d.ts.map +1 -1
  69. package/dest/fixtures/with_telemetry_utils.js +2 -2
  70. package/dest/quality_of_service/grafana_client.d.ts +41 -0
  71. package/dest/quality_of_service/grafana_client.d.ts.map +1 -0
  72. package/dest/quality_of_service/{alert_checker.js → grafana_client.js} +1 -1
  73. package/dest/quality_of_service/prometheus_client.d.ts +38 -0
  74. package/dest/quality_of_service/prometheus_client.d.ts.map +1 -0
  75. package/dest/quality_of_service/prometheus_client.js +67 -0
  76. package/dest/shared/cross_chain_test_harness.d.ts +18 -7
  77. package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
  78. package/dest/shared/cross_chain_test_harness.js +14 -14
  79. package/dest/shared/gas_portal_test_harness.d.ts +12 -2
  80. package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
  81. package/dest/shared/gas_portal_test_harness.js +1 -1
  82. package/dest/shared/index.d.ts +2 -2
  83. package/dest/shared/index.d.ts.map +1 -1
  84. package/dest/shared/submit-transactions.d.ts +3 -3
  85. package/dest/shared/submit-transactions.d.ts.map +1 -1
  86. package/dest/shared/submit-transactions.js +9 -11
  87. package/dest/shared/uniswap_l1_l2.d.ts +3 -27
  88. package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
  89. package/dest/shared/uniswap_l1_l2.js +55 -35
  90. package/dest/simulators/lending_simulator.d.ts +6 -2
  91. package/dest/simulators/lending_simulator.d.ts.map +1 -1
  92. package/dest/simulators/lending_simulator.js +2 -2
  93. package/dest/spartan/setup_test_wallets.d.ts +4 -3
  94. package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
  95. package/dest/spartan/setup_test_wallets.js +63 -35
  96. package/dest/spartan/tx_metrics.d.ts +54 -0
  97. package/dest/spartan/tx_metrics.d.ts.map +1 -0
  98. package/dest/spartan/tx_metrics.js +272 -0
  99. package/dest/spartan/utils/bot.d.ts +27 -0
  100. package/dest/spartan/utils/bot.d.ts.map +1 -0
  101. package/dest/spartan/utils/bot.js +141 -0
  102. package/dest/spartan/utils/chaos.d.ts +79 -0
  103. package/dest/spartan/utils/chaos.d.ts.map +1 -0
  104. package/dest/spartan/utils/chaos.js +142 -0
  105. package/dest/spartan/utils/clients.d.ts +39 -0
  106. package/dest/spartan/utils/clients.d.ts.map +1 -0
  107. package/dest/spartan/utils/clients.js +90 -0
  108. package/dest/spartan/utils/config.d.ts +36 -0
  109. package/dest/spartan/utils/config.d.ts.map +1 -0
  110. package/dest/spartan/utils/config.js +20 -0
  111. package/dest/spartan/utils/health.d.ts +63 -0
  112. package/dest/spartan/utils/health.d.ts.map +1 -0
  113. package/dest/spartan/utils/health.js +202 -0
  114. package/dest/spartan/utils/helm.d.ts +15 -0
  115. package/dest/spartan/utils/helm.d.ts.map +1 -0
  116. package/dest/spartan/utils/helm.js +47 -0
  117. package/dest/spartan/utils/index.d.ts +9 -0
  118. package/dest/spartan/utils/index.d.ts.map +1 -0
  119. package/dest/spartan/utils/index.js +18 -0
  120. package/dest/spartan/utils/k8s.d.ts +98 -0
  121. package/dest/spartan/utils/k8s.d.ts.map +1 -0
  122. package/dest/spartan/utils/k8s.js +257 -0
  123. package/dest/spartan/utils/nodes.d.ts +31 -0
  124. package/dest/spartan/utils/nodes.d.ts.map +1 -0
  125. package/dest/spartan/utils/nodes.js +290 -0
  126. package/dest/spartan/utils/scripts.d.ts +16 -0
  127. package/dest/spartan/utils/scripts.d.ts.map +1 -0
  128. package/dest/spartan/utils/scripts.js +66 -0
  129. package/dest/spartan/utils.d.ts +2 -221
  130. package/dest/spartan/utils.d.ts.map +1 -1
  131. package/dest/spartan/utils.js +1 -782
  132. package/package.json +40 -39
  133. package/src/bench/client_flows/benchmark.ts +24 -2
  134. package/src/bench/client_flows/client_flows_benchmark.ts +145 -208
  135. package/src/bench/client_flows/data_extractor.ts +1 -1
  136. package/src/bench/utils.ts +30 -20
  137. package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +104 -142
  138. package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +140 -125
  139. package/src/e2e_deploy_contract/deploy_test.ts +21 -14
  140. package/src/e2e_epochs/epochs_test.ts +54 -36
  141. package/src/e2e_fees/bridging_race.notest.ts +4 -10
  142. package/src/e2e_fees/fees_test.ts +177 -220
  143. package/src/e2e_l1_publisher/write_json.ts +21 -16
  144. package/src/e2e_multi_validator/utils.ts +1 -1
  145. package/src/e2e_nested_contract/nested_contract_test.ts +33 -56
  146. package/src/e2e_p2p/inactivity_slash_test.ts +9 -12
  147. package/src/e2e_p2p/p2p_network.ts +180 -183
  148. package/src/e2e_p2p/shared.ts +21 -26
  149. package/src/e2e_token_contract/token_contract_test.ts +103 -118
  150. package/src/fixtures/e2e_prover_test.ts +103 -140
  151. package/src/fixtures/fixtures.ts +1 -3
  152. package/src/fixtures/get_acvm_config.ts +1 -1
  153. package/src/fixtures/l1_to_l2_messaging.ts +4 -2
  154. package/src/fixtures/setup.ts +1017 -0
  155. package/src/fixtures/setup_p2p_test.ts +3 -3
  156. package/src/fixtures/token_utils.ts +6 -5
  157. package/src/fixtures/utils.ts +27 -966
  158. package/src/fixtures/web3signer.ts +1 -1
  159. package/src/fixtures/with_telemetry_utils.ts +2 -2
  160. package/src/quality_of_service/{alert_checker.ts → grafana_client.ts} +1 -1
  161. package/src/quality_of_service/prometheus_client.ts +113 -0
  162. package/src/shared/cross_chain_test_harness.ts +19 -37
  163. package/src/shared/gas_portal_test_harness.ts +2 -2
  164. package/src/shared/index.ts +1 -1
  165. package/src/shared/submit-transactions.ts +9 -15
  166. package/src/shared/uniswap_l1_l2.ts +65 -86
  167. package/src/simulators/lending_simulator.ts +3 -3
  168. package/src/spartan/setup_test_wallets.ts +81 -26
  169. package/src/spartan/tx_metrics.ts +250 -0
  170. package/src/spartan/utils/bot.ts +185 -0
  171. package/src/spartan/utils/chaos.ts +253 -0
  172. package/src/spartan/utils/clients.ts +100 -0
  173. package/src/spartan/utils/config.ts +26 -0
  174. package/src/spartan/utils/health.ts +255 -0
  175. package/src/spartan/utils/helm.ts +84 -0
  176. package/src/spartan/utils/index.ts +62 -0
  177. package/src/spartan/utils/k8s.ts +375 -0
  178. package/src/spartan/utils/nodes.ts +323 -0
  179. package/src/spartan/utils/scripts.ts +63 -0
  180. package/src/spartan/utils.ts +1 -983
  181. package/dest/fixtures/setup_l1_contracts.d.ts +0 -477
  182. package/dest/fixtures/setup_l1_contracts.d.ts.map +0 -1
  183. package/dest/fixtures/setup_l1_contracts.js +0 -17
  184. package/dest/fixtures/snapshot_manager.d.ts +0 -95
  185. package/dest/fixtures/snapshot_manager.d.ts.map +0 -1
  186. package/dest/fixtures/snapshot_manager.js +0 -505
  187. package/dest/quality_of_service/alert_checker.d.ts +0 -41
  188. package/dest/quality_of_service/alert_checker.d.ts.map +0 -1
  189. package/src/fixtures/setup_l1_contracts.ts +0 -26
  190. package/src/fixtures/snapshot_manager.ts +0 -665
@@ -0,0 +1,375 @@
1
+ import { createLogger } from '@aztec/aztec.js/log';
2
+ import { promiseWithResolvers } from '@aztec/foundation/promise';
3
+ import { retryUntil } from '@aztec/foundation/retry';
4
+
5
+ import { type ChildProcess, exec, spawn } from 'child_process';
6
+ import path from 'path';
7
+ import { promisify } from 'util';
8
+
9
+ const execAsync = promisify(exec);
10
+
11
+ const logger = createLogger('e2e:k8s-utils');
12
+
13
+ /**
14
+ * Represents an endpoint to reach a K8s service.
15
+ * May be a LoadBalancer external IP or a port-forward.
16
+ */
17
+ export interface ServiceEndpoint {
18
+ url: string;
19
+ process?: ChildProcess;
20
+ }
21
+
22
+ export async function startPortForward({
23
+ resource,
24
+ namespace,
25
+ containerPort,
26
+ hostPort,
27
+ }: {
28
+ resource: string;
29
+ namespace: string;
30
+ containerPort: number;
31
+ // If not provided, the port will be chosen automatically
32
+ hostPort?: number;
33
+ }): Promise<{
34
+ process: ChildProcess;
35
+ port: number;
36
+ }> {
37
+ const hostPortAsString = hostPort ? hostPort.toString() : '';
38
+
39
+ logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
40
+
41
+ const process = spawn(
42
+ 'kubectl',
43
+ ['port-forward', '-n', namespace, resource, `${hostPortAsString}:${containerPort}`],
44
+ {
45
+ detached: true,
46
+ windowsHide: true,
47
+ stdio: ['ignore', 'pipe', 'pipe'],
48
+ },
49
+ );
50
+
51
+ let isResolved = false;
52
+ const connected = new Promise<number>((resolve, reject) => {
53
+ process.stdout?.on('data', data => {
54
+ const str = data.toString() as string;
55
+ if (!isResolved && str.includes('Forwarding from')) {
56
+ isResolved = true;
57
+ logger.debug(`Port forward for ${resource}: ${str}`);
58
+ const port = str.search(/:\d+/);
59
+ if (port === -1) {
60
+ reject(new Error('Port not found in port forward output'));
61
+ return;
62
+ }
63
+ const portNumber = parseInt(str.slice(port + 1));
64
+ logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
65
+ resolve(portNumber);
66
+ } else {
67
+ logger.silent(str);
68
+ }
69
+ });
70
+ process.stderr?.on('data', data => {
71
+ logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
72
+ // It's a strange thing:
73
+ // If we don't pipe stderr, then the port forwarding does not work.
74
+ // Log to silent because this doesn't actually report errors,
75
+ // just extremely verbose debug logs.
76
+ logger.silent(data.toString());
77
+ });
78
+ process.on('close', () => {
79
+ if (!isResolved) {
80
+ isResolved = true;
81
+ const msg = `Port forward for ${resource} closed before connection established`;
82
+ logger.warn(msg);
83
+ reject(new Error(msg));
84
+ }
85
+ });
86
+ process.on('error', error => {
87
+ if (!isResolved) {
88
+ isResolved = true;
89
+ const msg = `Port forward for ${resource} error: ${error}`;
90
+ logger.error(msg);
91
+ reject(new Error(msg));
92
+ }
93
+ });
94
+ process.on('exit', code => {
95
+ if (!isResolved) {
96
+ isResolved = true;
97
+ const msg = `Port forward for ${resource} exited with code ${code}`;
98
+ logger.verbose(msg);
99
+ reject(new Error(msg));
100
+ }
101
+ });
102
+ });
103
+
104
+ const port = await connected;
105
+
106
+ return { process, port };
107
+ }
108
+
109
+ export function getExternalIP(namespace: string, serviceName: string): Promise<string> {
110
+ const { promise, resolve, reject } = promiseWithResolvers<string>();
111
+ const process = spawn(
112
+ 'kubectl',
113
+ [
114
+ 'get',
115
+ 'service',
116
+ '-n',
117
+ namespace,
118
+ `${namespace}-${serviceName}`,
119
+ '--output',
120
+ "jsonpath='{.status.loadBalancer.ingress[0].ip}'",
121
+ ],
122
+ {
123
+ stdio: 'pipe',
124
+ },
125
+ );
126
+
127
+ let ip = '';
128
+ process.stdout.on('data', data => {
129
+ ip += data;
130
+ });
131
+ process.on('error', err => {
132
+ reject(err);
133
+ });
134
+ process.on('exit', () => {
135
+ // kubectl prints JSON. Remove the quotes
136
+ resolve(ip.replace(/"|'/g, ''));
137
+ });
138
+
139
+ return promise;
140
+ }
141
+
142
+ /**
143
+ * Gets an endpoint for a K8s service.
144
+ * By default, tries to get the external IP first and falls back to port-forward if unavailable.
145
+ *
146
+ * @param opts.namespace - K8s namespace
147
+ * @param opts.serviceName - Service name suffix (e.g., 'rpc-aztec-node', 'eth-execution')
148
+ * @param opts.containerPort - Port the service exposes
149
+ * @param opts.usePortForward - If true, skip external IP check and always use port-forward
150
+ */
151
+ export async function getServiceEndpoint(opts: {
152
+ namespace: string;
153
+ serviceName: string;
154
+ containerPort: number;
155
+ forcePortForward?: boolean;
156
+ }): Promise<ServiceEndpoint> {
157
+ const { namespace, serviceName, containerPort, forcePortForward } = opts;
158
+
159
+ if (!forcePortForward) {
160
+ try {
161
+ const ip = await retryUntil(
162
+ async () => {
163
+ try {
164
+ const ip = await getExternalIP(namespace, serviceName);
165
+ if (ip && ip !== '' && ip !== '<pending>' && ip !== 'null') {
166
+ return ip;
167
+ }
168
+ } catch (err) {
169
+ logger.verbose(`Failed to get external IP for ${serviceName}: ${err}`);
170
+ }
171
+ return undefined;
172
+ },
173
+ `external IP for ${serviceName}`,
174
+ 30,
175
+ 5,
176
+ );
177
+ logger.info(`Using external IP for ${serviceName}: ${ip}:${containerPort}`);
178
+ return { url: `http://${ip}:${containerPort}` };
179
+ } catch {
180
+ logger.warn(`External IP not available for ${serviceName} after 5min, using port-forward`);
181
+ }
182
+ }
183
+
184
+ // Fallback to port-forward
185
+ const resource = `svc/${namespace}-${serviceName}`;
186
+
187
+ const { process, port } = await startPortForward({
188
+ resource,
189
+ namespace,
190
+ containerPort,
191
+ });
192
+
193
+ return { url: `http://127.0.0.1:${port}`, process };
194
+ }
195
+
196
+ /**
197
+ * Gets an endpoint for the RPC node service.
198
+ * Tries external IP first, falls back to port-forward.
199
+ *
200
+ * @param namespace - K8s namespace
201
+ * @param usePortForward - If true, skip external IP and use port-forward directly
202
+ */
203
+ export async function getRPCEndpoint(namespace: string, forcePortForward?: boolean): Promise<ServiceEndpoint> {
204
+ return await getServiceEndpoint({
205
+ namespace,
206
+ serviceName: 'rpc-aztec-node',
207
+ containerPort: 8080,
208
+ forcePortForward,
209
+ });
210
+ }
211
+
212
+ /**
213
+ * Gets an endpoint for the Ethereum execution service.
214
+ * Tries external IP first, falls back to port-forward.
215
+ *
216
+ * @param namespace - K8s namespace
217
+ * @param usePortForward - If true, skip external IP and use port-forward directly
218
+ */
219
+ export async function getEthereumEndpoint(namespace: string, forcePortForward?: boolean): Promise<ServiceEndpoint> {
220
+ return await getServiceEndpoint({
221
+ namespace,
222
+ serviceName: 'eth-execution',
223
+ containerPort: 8545,
224
+ forcePortForward,
225
+ });
226
+ }
227
+
228
+ export function startPortForwardForPrometeheus(namespace: string) {
229
+ return startPortForward({
230
+ resource: `svc/${namespace}-prometheus-server`,
231
+ namespace,
232
+ containerPort: 80,
233
+ });
234
+ }
235
+
236
+ export function startPortForwardForRPC(namespace: string, index = 0) {
237
+ return startPortForward({
238
+ resource: `pod/${namespace}-rpc-aztec-node-${index}`,
239
+ namespace,
240
+ containerPort: 8080,
241
+ });
242
+ }
243
+
244
+ export function startPortForwardForEthereum(namespace: string) {
245
+ return startPortForward({
246
+ resource: `services/${namespace}-eth-execution`,
247
+ namespace,
248
+ containerPort: 8545,
249
+ });
250
+ }
251
+
252
+ export async function deleteResourceByName({
253
+ resource,
254
+ namespace,
255
+ name,
256
+ force = false,
257
+ }: {
258
+ resource: string;
259
+ namespace: string;
260
+ name: string;
261
+ force?: boolean;
262
+ }) {
263
+ const command = `kubectl delete ${resource} ${name} -n ${namespace} --ignore-not-found=true --wait=true ${
264
+ force ? '--force' : ''
265
+ }`;
266
+ logger.info(`command: ${command}`);
267
+ const { stdout } = await execAsync(command);
268
+ return stdout;
269
+ }
270
+
271
+ export async function deleteResourceByLabel({
272
+ resource,
273
+ namespace,
274
+ label,
275
+ timeout = '5m',
276
+ force = false,
277
+ }: {
278
+ resource: string;
279
+ namespace: string;
280
+ label: string;
281
+ timeout?: string;
282
+ force?: boolean;
283
+ }) {
284
+ try {
285
+ // Match both plain and group-qualified names (e.g., "podchaos" or "podchaos.chaos-mesh.org")
286
+ const escaped = resource.replace(/[-/\\^$*+?.()|[\]{}]/g, '\\$&');
287
+ const regex = `(^|\\.)${escaped}(\\.|$)`;
288
+ await execAsync(`kubectl api-resources --no-headers -o name | grep -Eq '${regex}'`);
289
+ } catch (error) {
290
+ logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
291
+ return '';
292
+ }
293
+
294
+ const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${
295
+ force ? '--force' : ''
296
+ }`;
297
+ logger.info(`command: ${command}`);
298
+ const { stdout } = await execAsync(command);
299
+ return stdout;
300
+ }
301
+
302
+ export async function waitForResourceByLabel({
303
+ resource,
304
+ label,
305
+ namespace,
306
+ condition = 'Ready',
307
+ timeout = '10m',
308
+ }: {
309
+ resource: string;
310
+ label: string;
311
+ namespace: string;
312
+ condition?: string;
313
+ timeout?: string;
314
+ }) {
315
+ const command = `kubectl wait ${resource} -l ${label} --for=condition=${condition} -n ${namespace} --timeout=${timeout}`;
316
+ logger.info(`command: ${command}`);
317
+ const { stdout } = await execAsync(command);
318
+ return stdout;
319
+ }
320
+
321
+ export async function waitForResourceByName({
322
+ resource,
323
+ name,
324
+ namespace,
325
+ condition = 'Ready',
326
+ timeout = '10m',
327
+ }: {
328
+ resource: string;
329
+ name: string;
330
+ namespace: string;
331
+ condition?: string;
332
+ timeout?: string;
333
+ }) {
334
+ const command = `kubectl wait ${resource}/${name} --for=condition=${condition} -n ${namespace} --timeout=${timeout}`;
335
+ logger.info(`command: ${command}`);
336
+ const { stdout } = await execAsync(command);
337
+ return stdout;
338
+ }
339
+
340
+ export async function waitForResourcesByName({
341
+ resource,
342
+ names,
343
+ namespace,
344
+ condition = 'Ready',
345
+ timeout = '10m',
346
+ }: {
347
+ resource: string;
348
+ names: string[];
349
+ namespace: string;
350
+ condition?: string;
351
+ timeout?: string;
352
+ }) {
353
+ if (!names.length) {
354
+ throw new Error(`No ${resource} names provided to waitForResourcesByName`);
355
+ }
356
+
357
+ // Wait all in parallel; if any fails, surface which one.
358
+ await Promise.all(
359
+ names.map(async name => {
360
+ try {
361
+ await waitForResourceByName({ resource, name, namespace, condition, timeout });
362
+ } catch (err) {
363
+ throw new Error(
364
+ `Failed waiting for ${resource}/${name} condition=${condition} timeout=${timeout} namespace=${namespace}: ${String(
365
+ err,
366
+ )}`,
367
+ );
368
+ }
369
+ }),
370
+ );
371
+ }
372
+
373
+ export function getChartDir(spartanDir: string, chartName: string) {
374
+ return path.join(spartanDir.trim(), chartName);
375
+ }
@@ -0,0 +1,323 @@
1
+ import { createLogger } from '@aztec/aztec.js/log';
2
+ import type { RollupCheatCodes } from '@aztec/aztec/testing';
3
+ import type { CheckpointNumber } from '@aztec/foundation/branded-types';
4
+ import type { Logger } from '@aztec/foundation/log';
5
+ import { makeBackoff, retry } from '@aztec/foundation/retry';
6
+ import { sleep } from '@aztec/foundation/sleep';
7
+ import {
8
+ type AztecNodeAdmin,
9
+ type AztecNodeAdminConfig,
10
+ createAztecNodeAdminClient,
11
+ } from '@aztec/stdlib/interfaces/client';
12
+
13
+ import { exec } from 'child_process';
14
+ import { promisify } from 'util';
15
+
16
+ import type { TestConfig } from './config.js';
17
+ import { execHelmCommand } from './helm.js';
18
+ import { deleteResourceByLabel, getChartDir, startPortForward, waitForResourceByLabel } from './k8s.js';
19
+
20
+ const execAsync = promisify(exec);
21
+
22
+ const logger = createLogger('e2e:k8s-utils');
23
+
24
+ export async function awaitCheckpointNumber(
25
+ rollupCheatCodes: RollupCheatCodes,
26
+ checkpointNumber: CheckpointNumber,
27
+ timeoutSeconds: number,
28
+ log: Logger,
29
+ ) {
30
+ log.info(`Waiting for checkpoint ${checkpointNumber}`);
31
+ let tips = await rollupCheatCodes.getTips();
32
+ const endTime = Date.now() + timeoutSeconds * 1000;
33
+ while (tips.pending < checkpointNumber && Date.now() < endTime) {
34
+ log.info(`At checkpoint ${tips.pending}`);
35
+ await sleep(1000);
36
+ tips = await rollupCheatCodes.getTips();
37
+ }
38
+ if (tips.pending < checkpointNumber) {
39
+ throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
40
+ } else {
41
+ log.info(`Reached checkpoint ${tips.pending}`);
42
+ }
43
+ }
44
+
45
+ export async function getSequencers(namespace: string) {
46
+ const selectors = [
47
+ 'app.kubernetes.io/name=validator',
48
+ 'app.kubernetes.io/component=validator',
49
+ 'app.kubernetes.io/component=sequencer-node',
50
+ 'app=validator',
51
+ ];
52
+ for (const selector of selectors) {
53
+ try {
54
+ const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
55
+ const { stdout } = await execAsync(command);
56
+ const sequencers = stdout
57
+ .split(' ')
58
+ .map(s => s.trim())
59
+ .filter(Boolean);
60
+ if (sequencers.length > 0) {
61
+ logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
62
+ return sequencers;
63
+ }
64
+ } catch {
65
+ // try next selector
66
+ }
67
+ }
68
+
69
+ // Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
70
+ throw new Error(
71
+ `No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`,
72
+ );
73
+ }
74
+
75
+ export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
76
+ return withSequencersAdmin(env, async client => {
77
+ await client.setConfig(config);
78
+ return client.getConfig();
79
+ });
80
+ }
81
+
82
+ export function getSequencersConfig(env: TestConfig) {
83
+ return withSequencersAdmin(env, client => client.getConfig());
84
+ }
85
+
86
+ export async function withSequencersAdmin<T>(env: TestConfig, fn: (node: AztecNodeAdmin) => Promise<T>): Promise<T[]> {
87
+ const adminContainerPort = 8880;
88
+ const namespace = env.NAMESPACE;
89
+ const sequencers = await getSequencers(namespace);
90
+ const results = [];
91
+
92
+ for (const sequencer of sequencers) {
93
+ // Wrap port-forward + fetch in a retry to handle flaky port-forwards
94
+ const result = await retry(
95
+ async () => {
96
+ const { process, port } = await startPortForward({
97
+ resource: `pod/${sequencer}`,
98
+ namespace,
99
+ containerPort: adminContainerPort,
100
+ });
101
+
102
+ try {
103
+ const url = `http://localhost:${port}`;
104
+ // Quick health check before using the connection
105
+ const statusRes = await fetch(`${url}/status`);
106
+ if (statusRes.status !== 200) {
107
+ throw new Error(`Admin endpoint returned status ${statusRes.status}`);
108
+ }
109
+ const client = createAztecNodeAdminClient(url);
110
+ return { result: await fn(client), process };
111
+ } catch (err) {
112
+ // Kill the port-forward before retrying
113
+ process.kill();
114
+ throw err;
115
+ }
116
+ },
117
+ 'connect to node admin',
118
+ makeBackoff([1, 2, 4, 8]),
119
+ logger,
120
+ true,
121
+ );
122
+
123
+ results.push(result.result);
124
+ result.process.kill();
125
+ }
126
+
127
+ return results;
128
+ }
129
+
130
+ /**
131
+ * Enables or disables probabilistic transaction dropping on validators and waits for rollout.
132
+ * Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
133
+ */
134
+ export async function setValidatorTxDrop({
135
+ namespace,
136
+ enabled,
137
+ probability,
138
+ logger: log,
139
+ }: {
140
+ namespace: string;
141
+ enabled: boolean;
142
+ probability: number;
143
+ logger: Logger;
144
+ }) {
145
+ const drop = enabled ? 'true' : 'false';
146
+ const prob = String(probability);
147
+
148
+ const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
149
+ let updated = false;
150
+ for (const selector of selectors) {
151
+ try {
152
+ const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
153
+ const names = list.stdout
154
+ .split('\n')
155
+ .map(s => s.trim())
156
+ .filter(Boolean);
157
+ if (names.length === 0) {
158
+ continue;
159
+ }
160
+ const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
161
+ log.info(`command: ${cmd}`);
162
+ await execAsync(cmd);
163
+ updated = true;
164
+ } catch (e) {
165
+ log.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
166
+ }
167
+ }
168
+
169
+ if (!updated) {
170
+ log.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
171
+ return;
172
+ }
173
+
174
+ // Restart validator pods to ensure env vars take effect and wait for readiness
175
+ await restartValidators(namespace, log);
176
+ }
177
+
178
+ export async function restartValidators(namespace: string, log: Logger) {
179
+ const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
180
+ let any = false;
181
+ for (const selector of selectors) {
182
+ try {
183
+ const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
184
+ if (!stdout || stdout.trim().length === 0) {
185
+ continue;
186
+ }
187
+ any = true;
188
+ await deleteResourceByLabel({ resource: 'pods', namespace, label: selector });
189
+ } catch (e) {
190
+ log.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
191
+ }
192
+ }
193
+
194
+ if (!any) {
195
+ log.warn(`No validator pods found to restart in ${namespace}.`);
196
+ return;
197
+ }
198
+
199
+ // Wait for either label to be Ready
200
+ for (const selector of selectors) {
201
+ try {
202
+ await waitForResourceByLabel({ resource: 'pods', namespace, label: selector });
203
+ return;
204
+ } catch {
205
+ // try next
206
+ }
207
+ }
208
+ log.warn(`Validator pods did not report Ready; continuing.`);
209
+ }
210
+
211
+ export async function enableValidatorDynamicBootNode(
212
+ instanceName: string,
213
+ namespace: string,
214
+ spartanDir: string,
215
+ log: Logger,
216
+ ) {
217
+ log.info(`Enabling validator dynamic boot node`);
218
+ await execHelmCommand({
219
+ instanceName,
220
+ namespace,
221
+ helmChartDir: getChartDir(spartanDir, 'aztec-network'),
222
+ values: {
223
+ 'validator.dynamicBootNode': 'true',
224
+ },
225
+ valuesFile: undefined,
226
+ timeout: '15m',
227
+ reuseValues: true,
228
+ });
229
+
230
+ log.info(`Validator dynamic boot node enabled`);
231
+ }
232
+
233
+ /**
234
+ * Rolls the Aztec pods in the given namespace.
235
+ * @param namespace - The namespace to roll the Aztec pods in.
236
+ * @param clearState - If true, also deletes the underlying PVCs to clear persistent storage.
237
+ * This is required for rollup upgrades where the old state is incompatible with the new rollup.
238
+ * Defaults to false, which preserves the existing storage.
239
+ */
240
+ export async function rollAztecPods(namespace: string, clearState: boolean = false) {
241
+ // Pod components use 'validator', but StatefulSets and PVCs use 'sequencer-node' for validators
242
+ const podComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'prover-agent', 'sequencer-node', 'rpc'];
243
+ const pvcComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'sequencer-node', 'rpc'];
244
+ // StatefulSet components that need to be scaled down before PVC deletion
245
+ // Note: validators use 'sequencer-node' as component label, not 'validator'
246
+ const statefulSetComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'sequencer-node', 'rpc'];
247
+
248
+ if (clearState) {
249
+ // To delete PVCs, we must first scale down StatefulSets so pods release the volumes
250
+ // Otherwise PVC deletion will hang waiting for pods to terminate
251
+
252
+ // First, save original replica counts
253
+ const originalReplicas: Map<string, number> = new Map();
254
+ for (const component of statefulSetComponents) {
255
+ try {
256
+ const getCmd = `kubectl get statefulset -l app.kubernetes.io/component=${component} -n ${namespace} -o jsonpath='{.items[0].spec.replicas}'`;
257
+ const { stdout } = await execAsync(getCmd);
258
+ const replicas = parseInt(stdout.replace(/'/g, '').trim(), 10);
259
+ if (!isNaN(replicas) && replicas > 0) {
260
+ originalReplicas.set(component, replicas);
261
+ }
262
+ } catch {
263
+ // Component might not exist, continue
264
+ }
265
+ }
266
+
267
+ // Scale down to 0
268
+ for (const component of statefulSetComponents) {
269
+ try {
270
+ const scaleCmd = `kubectl scale statefulset -l app.kubernetes.io/component=${component} -n ${namespace} --replicas=0 --timeout=2m`;
271
+ logger.info(`command: ${scaleCmd}`);
272
+ await execAsync(scaleCmd);
273
+ } catch (e) {
274
+ // Component might not exist or might be a Deployment, continue
275
+ logger.verbose(`Scale down ${component} skipped: ${e}`);
276
+ }
277
+ }
278
+
279
+ // Wait for pods to terminate
280
+ await sleep(15 * 1000);
281
+
282
+ // Now delete PVCs (they should no longer be in use)
283
+ for (const component of pvcComponents) {
284
+ await deleteResourceByLabel({
285
+ resource: 'persistentvolumeclaims',
286
+ namespace: namespace,
287
+ label: `app.kubernetes.io/component=${component}`,
288
+ });
289
+ }
290
+
291
+ // Scale StatefulSets back up to original replica counts
292
+ for (const component of statefulSetComponents) {
293
+ const replicas = originalReplicas.get(component) ?? 1;
294
+ try {
295
+ const scaleCmd = `kubectl scale statefulset -l app.kubernetes.io/component=${component} -n ${namespace} --replicas=${replicas} --timeout=2m`;
296
+ logger.info(`command: ${scaleCmd}`);
297
+ await execAsync(scaleCmd);
298
+ } catch (e) {
299
+ logger.verbose(`Scale up ${component} skipped: ${e}`);
300
+ }
301
+ }
302
+ } else {
303
+ // Just delete pods (no state clearing)
304
+ for (const component of podComponents) {
305
+ await deleteResourceByLabel({
306
+ resource: 'pods',
307
+ namespace: namespace,
308
+ label: `app.kubernetes.io/component=${component}`,
309
+ });
310
+ }
311
+ }
312
+
313
+ await sleep(10 * 1000);
314
+
315
+ // Wait for pods to come back
316
+ for (const component of podComponents) {
317
+ await waitForResourceByLabel({
318
+ resource: 'pods',
319
+ namespace: namespace,
320
+ label: `app.kubernetes.io/component=${component}`,
321
+ });
322
+ }
323
+ }