@aztec/end-to-end 4.0.0-nightly.20260120 → 4.0.0-nightly.20260122
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/shared/cross_chain_test_harness.d.ts +3 -4
- package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
- package/dest/shared/submit-transactions.d.ts +1 -1
- package/dest/shared/submit-transactions.d.ts.map +1 -1
- package/dest/shared/submit-transactions.js +1 -4
- package/dest/spartan/tx_metrics.d.ts +11 -1
- package/dest/spartan/tx_metrics.d.ts.map +1 -1
- package/dest/spartan/tx_metrics.js +133 -4
- package/dest/spartan/utils/bot.d.ts +27 -0
- package/dest/spartan/utils/bot.d.ts.map +1 -0
- package/dest/spartan/utils/bot.js +141 -0
- package/dest/spartan/utils/chaos.d.ts +79 -0
- package/dest/spartan/utils/chaos.d.ts.map +1 -0
- package/dest/spartan/utils/chaos.js +142 -0
- package/dest/spartan/utils/clients.d.ts +25 -0
- package/dest/spartan/utils/clients.d.ts.map +1 -0
- package/dest/spartan/utils/clients.js +101 -0
- package/dest/spartan/utils/config.d.ts +36 -0
- package/dest/spartan/utils/config.d.ts.map +1 -0
- package/dest/spartan/utils/config.js +20 -0
- package/dest/spartan/utils/health.d.ts +63 -0
- package/dest/spartan/utils/health.d.ts.map +1 -0
- package/dest/spartan/utils/health.js +202 -0
- package/dest/spartan/utils/helm.d.ts +15 -0
- package/dest/spartan/utils/helm.d.ts.map +1 -0
- package/dest/spartan/utils/helm.js +47 -0
- package/dest/spartan/utils/index.d.ts +9 -0
- package/dest/spartan/utils/index.d.ts.map +1 -0
- package/dest/spartan/utils/index.js +18 -0
- package/dest/spartan/utils/k8s.d.ts +59 -0
- package/dest/spartan/utils/k8s.d.ts.map +1 -0
- package/dest/spartan/utils/k8s.js +185 -0
- package/dest/spartan/utils/nodes.d.ts +31 -0
- package/dest/spartan/utils/nodes.d.ts.map +1 -0
- package/dest/spartan/utils/nodes.js +273 -0
- package/dest/spartan/utils/scripts.d.ts +16 -0
- package/dest/spartan/utils/scripts.d.ts.map +1 -0
- package/dest/spartan/utils/scripts.js +66 -0
- package/dest/spartan/utils.d.ts +2 -260
- package/dest/spartan/utils.d.ts.map +1 -1
- package/dest/spartan/utils.js +1 -942
- package/package.json +39 -39
- package/src/shared/cross_chain_test_harness.ts +2 -3
- package/src/shared/submit-transactions.ts +1 -6
- package/src/spartan/tx_metrics.ts +82 -4
- package/src/spartan/utils/bot.ts +185 -0
- package/src/spartan/utils/chaos.ts +253 -0
- package/src/spartan/utils/clients.ts +106 -0
- package/src/spartan/utils/config.ts +26 -0
- package/src/spartan/utils/health.ts +256 -0
- package/src/spartan/utils/helm.ts +84 -0
- package/src/spartan/utils/index.ts +58 -0
- package/src/spartan/utils/k8s.ts +279 -0
- package/src/spartan/utils/nodes.ts +308 -0
- package/src/spartan/utils/scripts.ts +63 -0
- package/src/spartan/utils.ts +1 -1246
package/src/spartan/utils.ts
CHANGED
|
@@ -1,1246 +1 @@
|
|
|
1
|
-
|
|
2
|
-
import type { RollupCheatCodes } from '@aztec/aztec/testing';
|
|
3
|
-
import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
|
|
4
|
-
import type { ViemPublicClient } from '@aztec/ethereum/types';
|
|
5
|
-
import type { CheckpointNumber } from '@aztec/foundation/branded-types';
|
|
6
|
-
import type { Logger } from '@aztec/foundation/log';
|
|
7
|
-
import { promiseWithResolvers } from '@aztec/foundation/promise';
|
|
8
|
-
import { makeBackoff, retry } from '@aztec/foundation/retry';
|
|
9
|
-
import { schemas } from '@aztec/foundation/schemas';
|
|
10
|
-
import { sleep } from '@aztec/foundation/sleep';
|
|
11
|
-
import {
|
|
12
|
-
type AztecNodeAdmin,
|
|
13
|
-
type AztecNodeAdminConfig,
|
|
14
|
-
createAztecNodeAdminClient,
|
|
15
|
-
createAztecNodeClient,
|
|
16
|
-
} from '@aztec/stdlib/interfaces/client';
|
|
17
|
-
|
|
18
|
-
import { ChildProcess, exec, execSync, spawn } from 'child_process';
|
|
19
|
-
import path from 'path';
|
|
20
|
-
import { promisify } from 'util';
|
|
21
|
-
import { createPublicClient, fallback, http } from 'viem';
|
|
22
|
-
import { z } from 'zod';
|
|
23
|
-
|
|
24
|
-
const execAsync = promisify(exec);
|
|
25
|
-
|
|
26
|
-
const logger = createLogger('e2e:k8s-utils');
|
|
27
|
-
|
|
28
|
-
const testConfigSchema = z.object({
|
|
29
|
-
NAMESPACE: z.string().default('scenario'),
|
|
30
|
-
REAL_VERIFIER: schemas.Boolean.optional().default(true),
|
|
31
|
-
CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
|
|
32
|
-
L1_RPC_URLS_JSON: z.string().optional(),
|
|
33
|
-
L1_ACCOUNT_MNEMONIC: z.string().optional(),
|
|
34
|
-
AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
|
|
35
|
-
AZTEC_EPOCH_DURATION: z.coerce.number().optional().default(32),
|
|
36
|
-
AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
|
|
37
|
-
AZTEC_LAG_IN_EPOCHS_FOR_VALIDATOR_SET: z.coerce.number().optional().default(2),
|
|
38
|
-
});
|
|
39
|
-
|
|
40
|
-
export type TestConfig = z.infer<typeof testConfigSchema>;
|
|
41
|
-
|
|
42
|
-
export function setupEnvironment(env: unknown): TestConfig {
|
|
43
|
-
const config = testConfigSchema.parse(env);
|
|
44
|
-
logger.warn(`Loaded env config`, config);
|
|
45
|
-
return config;
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
/**
|
|
49
|
-
* @param path - The path to the script, relative to the project root
|
|
50
|
-
* @param args - The arguments to pass to the script
|
|
51
|
-
* @param logger - The logger to use
|
|
52
|
-
* @returns The exit code of the script
|
|
53
|
-
*/
|
|
54
|
-
function runScript(path: string, args: string[], logger: Logger, env?: Record<string, string>) {
|
|
55
|
-
const childProcess = spawn(path, args, {
|
|
56
|
-
stdio: ['ignore', 'pipe', 'pipe'],
|
|
57
|
-
env: env ? { ...process.env, ...env } : process.env,
|
|
58
|
-
});
|
|
59
|
-
return new Promise<number>((resolve, reject) => {
|
|
60
|
-
childProcess.on('close', (code: number | null) => resolve(code ?? 0));
|
|
61
|
-
childProcess.on('error', reject);
|
|
62
|
-
childProcess.stdout?.on('data', (data: Buffer) => {
|
|
63
|
-
logger.info(data.toString());
|
|
64
|
-
});
|
|
65
|
-
childProcess.stderr?.on('data', (data: Buffer) => {
|
|
66
|
-
logger.error(data.toString());
|
|
67
|
-
});
|
|
68
|
-
});
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
export function getAztecBin() {
|
|
72
|
-
return path.join(getGitProjectRoot(), 'yarn-project/aztec/dest/bin/index.js');
|
|
73
|
-
}
|
|
74
|
-
|
|
75
|
-
/**
|
|
76
|
-
* Runs the Aztec binary
|
|
77
|
-
* @param args - The arguments to pass to the Aztec binary
|
|
78
|
-
* @param logger - The logger to use
|
|
79
|
-
* @param env - Optional environment variables to set for the process
|
|
80
|
-
* @returns The exit code of the Aztec binary
|
|
81
|
-
*/
|
|
82
|
-
export function runAztecBin(args: string[], logger: Logger, env?: Record<string, string>) {
|
|
83
|
-
return runScript('node', [getAztecBin(), ...args], logger, env);
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
export function runProjectScript(script: string, args: string[], logger: Logger, env?: Record<string, string>) {
|
|
87
|
-
const scriptPath = script.startsWith('/') ? script : path.join(getGitProjectRoot(), script);
|
|
88
|
-
return runScript(scriptPath, args, logger, env);
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
export async function startPortForward({
|
|
92
|
-
resource,
|
|
93
|
-
namespace,
|
|
94
|
-
containerPort,
|
|
95
|
-
hostPort,
|
|
96
|
-
}: {
|
|
97
|
-
resource: string;
|
|
98
|
-
namespace: string;
|
|
99
|
-
containerPort: number;
|
|
100
|
-
// If not provided, the port will be chosen automatically
|
|
101
|
-
hostPort?: number;
|
|
102
|
-
}): Promise<{
|
|
103
|
-
process: ChildProcess;
|
|
104
|
-
port: number;
|
|
105
|
-
}> {
|
|
106
|
-
const hostPortAsString = hostPort ? hostPort.toString() : '';
|
|
107
|
-
|
|
108
|
-
logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
|
|
109
|
-
|
|
110
|
-
const process = spawn(
|
|
111
|
-
'kubectl',
|
|
112
|
-
['port-forward', '-n', namespace, resource, `${hostPortAsString}:${containerPort}`],
|
|
113
|
-
{
|
|
114
|
-
detached: true,
|
|
115
|
-
windowsHide: true,
|
|
116
|
-
stdio: ['ignore', 'pipe', 'pipe'],
|
|
117
|
-
},
|
|
118
|
-
);
|
|
119
|
-
|
|
120
|
-
let isResolved = false;
|
|
121
|
-
const connected = new Promise<number>((resolve, reject) => {
|
|
122
|
-
process.stdout?.on('data', data => {
|
|
123
|
-
const str = data.toString() as string;
|
|
124
|
-
if (!isResolved && str.includes('Forwarding from')) {
|
|
125
|
-
isResolved = true;
|
|
126
|
-
logger.debug(`Port forward for ${resource}: ${str}`);
|
|
127
|
-
const port = str.search(/:\d+/);
|
|
128
|
-
if (port === -1) {
|
|
129
|
-
reject(new Error('Port not found in port forward output'));
|
|
130
|
-
return;
|
|
131
|
-
}
|
|
132
|
-
const portNumber = parseInt(str.slice(port + 1));
|
|
133
|
-
logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
|
|
134
|
-
resolve(portNumber);
|
|
135
|
-
} else {
|
|
136
|
-
logger.silent(str);
|
|
137
|
-
}
|
|
138
|
-
});
|
|
139
|
-
process.stderr?.on('data', data => {
|
|
140
|
-
logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
|
|
141
|
-
// It's a strange thing:
|
|
142
|
-
// If we don't pipe stderr, then the port forwarding does not work.
|
|
143
|
-
// Log to silent because this doesn't actually report errors,
|
|
144
|
-
// just extremely verbose debug logs.
|
|
145
|
-
logger.silent(data.toString());
|
|
146
|
-
});
|
|
147
|
-
process.on('close', () => {
|
|
148
|
-
if (!isResolved) {
|
|
149
|
-
isResolved = true;
|
|
150
|
-
const msg = `Port forward for ${resource} closed before connection established`;
|
|
151
|
-
logger.warn(msg);
|
|
152
|
-
reject(new Error(msg));
|
|
153
|
-
}
|
|
154
|
-
});
|
|
155
|
-
process.on('error', error => {
|
|
156
|
-
if (!isResolved) {
|
|
157
|
-
isResolved = true;
|
|
158
|
-
const msg = `Port forward for ${resource} error: ${error}`;
|
|
159
|
-
logger.error(msg);
|
|
160
|
-
reject(new Error(msg));
|
|
161
|
-
}
|
|
162
|
-
});
|
|
163
|
-
process.on('exit', code => {
|
|
164
|
-
if (!isResolved) {
|
|
165
|
-
isResolved = true;
|
|
166
|
-
const msg = `Port forward for ${resource} exited with code ${code}`;
|
|
167
|
-
logger.verbose(msg);
|
|
168
|
-
reject(new Error(msg));
|
|
169
|
-
}
|
|
170
|
-
});
|
|
171
|
-
});
|
|
172
|
-
|
|
173
|
-
const port = await connected;
|
|
174
|
-
|
|
175
|
-
return { process, port };
|
|
176
|
-
}
|
|
177
|
-
|
|
178
|
-
export function getExternalIP(namespace: string, serviceName: string): Promise<string> {
|
|
179
|
-
const { promise, resolve, reject } = promiseWithResolvers<string>();
|
|
180
|
-
const process = spawn(
|
|
181
|
-
'kubectl',
|
|
182
|
-
[
|
|
183
|
-
'get',
|
|
184
|
-
'service',
|
|
185
|
-
'-n',
|
|
186
|
-
namespace,
|
|
187
|
-
`${namespace}-${serviceName}`,
|
|
188
|
-
'--output',
|
|
189
|
-
"jsonpath='{.status.loadBalancer.ingress[0].ip}'",
|
|
190
|
-
],
|
|
191
|
-
{
|
|
192
|
-
stdio: 'pipe',
|
|
193
|
-
},
|
|
194
|
-
);
|
|
195
|
-
|
|
196
|
-
let ip = '';
|
|
197
|
-
process.stdout.on('data', data => {
|
|
198
|
-
ip += data;
|
|
199
|
-
});
|
|
200
|
-
process.on('error', err => {
|
|
201
|
-
reject(err);
|
|
202
|
-
});
|
|
203
|
-
process.on('exit', () => {
|
|
204
|
-
// kubectl prints JSON. Remove the quotes
|
|
205
|
-
resolve(ip.replace(/"|'/g, ''));
|
|
206
|
-
});
|
|
207
|
-
|
|
208
|
-
return promise;
|
|
209
|
-
}
|
|
210
|
-
|
|
211
|
-
export function startPortForwardForPrometeheus(namespace: string) {
|
|
212
|
-
return startPortForward({
|
|
213
|
-
resource: `svc/${namespace}-prometheus-server`,
|
|
214
|
-
namespace,
|
|
215
|
-
containerPort: 80,
|
|
216
|
-
});
|
|
217
|
-
}
|
|
218
|
-
|
|
219
|
-
export function startPortForwardForRPC(namespace: string, index = 0) {
|
|
220
|
-
return startPortForward({
|
|
221
|
-
resource: `pod/${namespace}-rpc-aztec-node-${index}`,
|
|
222
|
-
namespace,
|
|
223
|
-
containerPort: 8080,
|
|
224
|
-
});
|
|
225
|
-
}
|
|
226
|
-
|
|
227
|
-
export function startPortForwardForEthereum(namespace: string) {
|
|
228
|
-
return startPortForward({
|
|
229
|
-
resource: `services/${namespace}-eth-execution`,
|
|
230
|
-
namespace,
|
|
231
|
-
containerPort: 8545,
|
|
232
|
-
});
|
|
233
|
-
}
|
|
234
|
-
|
|
235
|
-
export async function deleteResourceByName({
|
|
236
|
-
resource,
|
|
237
|
-
namespace,
|
|
238
|
-
name,
|
|
239
|
-
force = false,
|
|
240
|
-
}: {
|
|
241
|
-
resource: string;
|
|
242
|
-
namespace: string;
|
|
243
|
-
name: string;
|
|
244
|
-
force?: boolean;
|
|
245
|
-
}) {
|
|
246
|
-
const command = `kubectl delete ${resource} ${name} -n ${namespace} --ignore-not-found=true --wait=true ${
|
|
247
|
-
force ? '--force' : ''
|
|
248
|
-
}`;
|
|
249
|
-
logger.info(`command: ${command}`);
|
|
250
|
-
const { stdout } = await execAsync(command);
|
|
251
|
-
return stdout;
|
|
252
|
-
}
|
|
253
|
-
|
|
254
|
-
export async function deleteResourceByLabel({
|
|
255
|
-
resource,
|
|
256
|
-
namespace,
|
|
257
|
-
label,
|
|
258
|
-
timeout = '5m',
|
|
259
|
-
force = false,
|
|
260
|
-
}: {
|
|
261
|
-
resource: string;
|
|
262
|
-
namespace: string;
|
|
263
|
-
label: string;
|
|
264
|
-
timeout?: string;
|
|
265
|
-
force?: boolean;
|
|
266
|
-
}) {
|
|
267
|
-
try {
|
|
268
|
-
// Match both plain and group-qualified names (e.g., "podchaos" or "podchaos.chaos-mesh.org")
|
|
269
|
-
const escaped = resource.replace(/[-/\\^$*+?.()|[\]{}]/g, '\\$&');
|
|
270
|
-
const regex = `(^|\\.)${escaped}(\\.|$)`;
|
|
271
|
-
await execAsync(`kubectl api-resources --no-headers -o name | grep -Eq '${regex}'`);
|
|
272
|
-
} catch (error) {
|
|
273
|
-
logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
|
|
274
|
-
return '';
|
|
275
|
-
}
|
|
276
|
-
|
|
277
|
-
const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${
|
|
278
|
-
force ? '--force' : ''
|
|
279
|
-
}`;
|
|
280
|
-
logger.info(`command: ${command}`);
|
|
281
|
-
const { stdout } = await execAsync(command);
|
|
282
|
-
return stdout;
|
|
283
|
-
}
|
|
284
|
-
|
|
285
|
-
export async function waitForResourceByLabel({
|
|
286
|
-
resource,
|
|
287
|
-
label,
|
|
288
|
-
namespace,
|
|
289
|
-
condition = 'Ready',
|
|
290
|
-
timeout = '10m',
|
|
291
|
-
}: {
|
|
292
|
-
resource: string;
|
|
293
|
-
label: string;
|
|
294
|
-
namespace: string;
|
|
295
|
-
condition?: string;
|
|
296
|
-
timeout?: string;
|
|
297
|
-
}) {
|
|
298
|
-
const command = `kubectl wait ${resource} -l ${label} --for=condition=${condition} -n ${namespace} --timeout=${timeout}`;
|
|
299
|
-
logger.info(`command: ${command}`);
|
|
300
|
-
const { stdout } = await execAsync(command);
|
|
301
|
-
return stdout;
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
export async function waitForResourceByName({
|
|
305
|
-
resource,
|
|
306
|
-
name,
|
|
307
|
-
namespace,
|
|
308
|
-
condition = 'Ready',
|
|
309
|
-
timeout = '10m',
|
|
310
|
-
}: {
|
|
311
|
-
resource: string;
|
|
312
|
-
name: string;
|
|
313
|
-
namespace: string;
|
|
314
|
-
condition?: string;
|
|
315
|
-
timeout?: string;
|
|
316
|
-
}) {
|
|
317
|
-
const command = `kubectl wait ${resource}/${name} --for=condition=${condition} -n ${namespace} --timeout=${timeout}`;
|
|
318
|
-
logger.info(`command: ${command}`);
|
|
319
|
-
const { stdout } = await execAsync(command);
|
|
320
|
-
return stdout;
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
export async function waitForResourcesByName({
|
|
324
|
-
resource,
|
|
325
|
-
names,
|
|
326
|
-
namespace,
|
|
327
|
-
condition = 'Ready',
|
|
328
|
-
timeout = '10m',
|
|
329
|
-
}: {
|
|
330
|
-
resource: string;
|
|
331
|
-
names: string[];
|
|
332
|
-
namespace: string;
|
|
333
|
-
condition?: string;
|
|
334
|
-
timeout?: string;
|
|
335
|
-
}) {
|
|
336
|
-
if (!names.length) {
|
|
337
|
-
throw new Error(`No ${resource} names provided to waitForResourcesByName`);
|
|
338
|
-
}
|
|
339
|
-
|
|
340
|
-
// Wait all in parallel; if any fails, surface which one.
|
|
341
|
-
await Promise.all(
|
|
342
|
-
names.map(async name => {
|
|
343
|
-
try {
|
|
344
|
-
await waitForResourceByName({ resource, name, namespace, condition, timeout });
|
|
345
|
-
} catch (err) {
|
|
346
|
-
throw new Error(
|
|
347
|
-
`Failed waiting for ${resource}/${name} condition=${condition} timeout=${timeout} namespace=${namespace}: ${String(
|
|
348
|
-
err,
|
|
349
|
-
)}`,
|
|
350
|
-
);
|
|
351
|
-
}
|
|
352
|
-
}),
|
|
353
|
-
);
|
|
354
|
-
}
|
|
355
|
-
|
|
356
|
-
export function getChartDir(spartanDir: string, chartName: string) {
|
|
357
|
-
return path.join(spartanDir.trim(), chartName);
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
function shellQuote(value: string) {
|
|
361
|
-
// Single-quote safe shell escaping: ' -> '\''
|
|
362
|
-
return `'${value.replace(/'/g, "'\\''")}'`;
|
|
363
|
-
}
|
|
364
|
-
|
|
365
|
-
function valuesToArgs(values: Record<string, string | number | boolean>) {
|
|
366
|
-
return Object.entries(values)
|
|
367
|
-
.map(([key, value]) =>
|
|
368
|
-
typeof value === 'number' || typeof value === 'boolean'
|
|
369
|
-
? `--set ${key}=${value}`
|
|
370
|
-
: `--set-string ${key}=${shellQuote(String(value))}`,
|
|
371
|
-
)
|
|
372
|
-
.join(' ');
|
|
373
|
-
}
|
|
374
|
-
|
|
375
|
-
function createHelmCommand({
|
|
376
|
-
instanceName,
|
|
377
|
-
helmChartDir,
|
|
378
|
-
namespace,
|
|
379
|
-
valuesFile,
|
|
380
|
-
timeout,
|
|
381
|
-
values,
|
|
382
|
-
reuseValues = false,
|
|
383
|
-
}: {
|
|
384
|
-
instanceName: string;
|
|
385
|
-
helmChartDir: string;
|
|
386
|
-
namespace: string;
|
|
387
|
-
valuesFile: string | undefined;
|
|
388
|
-
timeout: string;
|
|
389
|
-
values: Record<string, string | number | boolean>;
|
|
390
|
-
reuseValues?: boolean;
|
|
391
|
-
}) {
|
|
392
|
-
const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
|
|
393
|
-
const reuseValuesArgs = reuseValues ? '--reuse-values' : '';
|
|
394
|
-
return `helm upgrade --install ${instanceName} ${helmChartDir} --namespace ${namespace} ${valuesFileArgs} ${reuseValuesArgs} --wait --timeout=${timeout} ${valuesToArgs(
|
|
395
|
-
values,
|
|
396
|
-
)}`;
|
|
397
|
-
}
|
|
398
|
-
|
|
399
|
-
async function execHelmCommand(args: Parameters<typeof createHelmCommand>[0]) {
|
|
400
|
-
const helmCommand = createHelmCommand(args);
|
|
401
|
-
logger.info(`helm command: ${helmCommand}`);
|
|
402
|
-
const { stdout } = await execAsync(helmCommand);
|
|
403
|
-
return stdout;
|
|
404
|
-
}
|
|
405
|
-
|
|
406
|
-
async function getHelmReleaseStatus(instanceName: string, namespace: string): Promise<string | undefined> {
|
|
407
|
-
try {
|
|
408
|
-
const { stdout } = await execAsync(
|
|
409
|
-
`helm list --namespace ${namespace} --all --filter '^${instanceName}$' --output json | cat`,
|
|
410
|
-
);
|
|
411
|
-
const parsed = JSON.parse(stdout) as Array<{ name?: string; status?: string }>;
|
|
412
|
-
const row = parsed.find(r => r.name === instanceName);
|
|
413
|
-
return row?.status;
|
|
414
|
-
} catch {
|
|
415
|
-
return undefined;
|
|
416
|
-
}
|
|
417
|
-
}
|
|
418
|
-
|
|
419
|
-
async function forceDeleteHelmReleaseRecord(instanceName: string, namespace: string, logger: Logger) {
|
|
420
|
-
const labelSelector = `owner=helm,name=${instanceName}`;
|
|
421
|
-
const cmd = `kubectl delete secret -n ${namespace} -l ${labelSelector} --ignore-not-found=true`;
|
|
422
|
-
logger.warn(`Force deleting Helm release record: ${cmd}`);
|
|
423
|
-
await execAsync(cmd).catch(() => undefined);
|
|
424
|
-
}
|
|
425
|
-
|
|
426
|
-
async function hasDeployedHelmRelease(instanceName: string, namespace: string): Promise<boolean> {
|
|
427
|
-
try {
|
|
428
|
-
const status = await getHelmReleaseStatus(instanceName, namespace);
|
|
429
|
-
return status?.toLowerCase() === 'deployed';
|
|
430
|
-
} catch {
|
|
431
|
-
return false;
|
|
432
|
-
}
|
|
433
|
-
}
|
|
434
|
-
|
|
435
|
-
export async function uninstallChaosMesh(instanceName: string, namespace: string, logger: Logger) {
|
|
436
|
-
// uninstall the helm chart if it exists
|
|
437
|
-
logger.info(`Uninstalling helm chart ${instanceName}`);
|
|
438
|
-
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
439
|
-
// and delete the chaos-mesh resources created by this release
|
|
440
|
-
const deleteByLabel = async (resource: string) => {
|
|
441
|
-
const args = {
|
|
442
|
-
resource,
|
|
443
|
-
namespace: namespace,
|
|
444
|
-
label: `app.kubernetes.io/instance=${instanceName}`,
|
|
445
|
-
} as const;
|
|
446
|
-
logger.info(`Deleting ${resource} resources for release ${instanceName}`);
|
|
447
|
-
await deleteResourceByLabel(args).catch(e => {
|
|
448
|
-
logger.error(`Error deleting ${resource}: ${e}`);
|
|
449
|
-
logger.info(`Force deleting ${resource}`);
|
|
450
|
-
return deleteResourceByLabel({ ...args, force: true });
|
|
451
|
-
});
|
|
452
|
-
};
|
|
453
|
-
|
|
454
|
-
await deleteByLabel('podchaos');
|
|
455
|
-
await deleteByLabel('networkchaos');
|
|
456
|
-
await deleteByLabel('podnetworkchaos');
|
|
457
|
-
await deleteByLabel('workflows');
|
|
458
|
-
await deleteByLabel('workflownodes');
|
|
459
|
-
}
|
|
460
|
-
|
|
461
|
-
/**
|
|
462
|
-
* Installs a Helm chart with the given parameters.
|
|
463
|
-
* @param instanceName - The name of the Helm chart instance.
|
|
464
|
-
* @param targetNamespace - The namespace with the resources to be affected by the Helm chart.
|
|
465
|
-
* @param valuesFile - The values file to use for the Helm chart.
|
|
466
|
-
* @param chaosMeshNamespace - The namespace to install the Helm chart in.
|
|
467
|
-
* @param timeout - The timeout for the Helm command.
|
|
468
|
-
* @param clean - Whether to clean up the Helm chart before installing it.
|
|
469
|
-
* @returns The stdout of the Helm command.
|
|
470
|
-
* @throws If the Helm command fails.
|
|
471
|
-
*
|
|
472
|
-
* Example usage:
|
|
473
|
-
* ```typescript
|
|
474
|
-
* const stdout = await installChaosMeshChart({ instanceName: 'force-reorg', targetNamespace: 'smoke', valuesFile: 'prover-failure.yaml'});
|
|
475
|
-
* console.log(stdout);
|
|
476
|
-
* ```
|
|
477
|
-
*/
|
|
478
|
-
export async function installChaosMeshChart({
|
|
479
|
-
instanceName,
|
|
480
|
-
targetNamespace,
|
|
481
|
-
valuesFile,
|
|
482
|
-
helmChartDir,
|
|
483
|
-
timeout = '10m',
|
|
484
|
-
clean = true,
|
|
485
|
-
values = {},
|
|
486
|
-
logger,
|
|
487
|
-
}: {
|
|
488
|
-
instanceName: string;
|
|
489
|
-
targetNamespace: string;
|
|
490
|
-
valuesFile: string;
|
|
491
|
-
helmChartDir: string;
|
|
492
|
-
chaosMeshNamespace?: string;
|
|
493
|
-
timeout?: string;
|
|
494
|
-
clean?: boolean;
|
|
495
|
-
values?: Record<string, string | number>;
|
|
496
|
-
logger: Logger;
|
|
497
|
-
}) {
|
|
498
|
-
if (clean) {
|
|
499
|
-
await uninstallChaosMesh(instanceName, targetNamespace, logger);
|
|
500
|
-
}
|
|
501
|
-
|
|
502
|
-
return execHelmCommand({
|
|
503
|
-
instanceName,
|
|
504
|
-
helmChartDir,
|
|
505
|
-
namespace: targetNamespace,
|
|
506
|
-
valuesFile,
|
|
507
|
-
timeout,
|
|
508
|
-
values: { ...values, 'global.targetNamespace': targetNamespace },
|
|
509
|
-
});
|
|
510
|
-
}
|
|
511
|
-
|
|
512
|
-
export function applyProverFailure({
|
|
513
|
-
namespace,
|
|
514
|
-
spartanDir,
|
|
515
|
-
durationSeconds,
|
|
516
|
-
logger,
|
|
517
|
-
}: {
|
|
518
|
-
namespace: string;
|
|
519
|
-
spartanDir: string;
|
|
520
|
-
durationSeconds: number;
|
|
521
|
-
logger: Logger;
|
|
522
|
-
}) {
|
|
523
|
-
return installChaosMeshChart({
|
|
524
|
-
instanceName: 'prover-failure',
|
|
525
|
-
targetNamespace: namespace,
|
|
526
|
-
valuesFile: 'prover-failure.yaml',
|
|
527
|
-
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
528
|
-
values: {
|
|
529
|
-
'proverFailure.duration': `${durationSeconds}s`,
|
|
530
|
-
},
|
|
531
|
-
logger,
|
|
532
|
-
});
|
|
533
|
-
}
|
|
534
|
-
|
|
535
|
-
export function applyValidatorFailure({
|
|
536
|
-
namespace,
|
|
537
|
-
spartanDir,
|
|
538
|
-
logger,
|
|
539
|
-
values,
|
|
540
|
-
instanceName,
|
|
541
|
-
}: {
|
|
542
|
-
namespace: string;
|
|
543
|
-
spartanDir: string;
|
|
544
|
-
logger: Logger;
|
|
545
|
-
values?: Record<string, string | number>;
|
|
546
|
-
instanceName?: string;
|
|
547
|
-
}) {
|
|
548
|
-
return installChaosMeshChart({
|
|
549
|
-
instanceName: instanceName ?? 'validator-failure',
|
|
550
|
-
targetNamespace: namespace,
|
|
551
|
-
valuesFile: 'validator-failure.yaml',
|
|
552
|
-
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
553
|
-
values,
|
|
554
|
-
logger,
|
|
555
|
-
});
|
|
556
|
-
}
|
|
557
|
-
|
|
558
|
-
export function applyProverKill({
|
|
559
|
-
namespace,
|
|
560
|
-
spartanDir,
|
|
561
|
-
logger,
|
|
562
|
-
values,
|
|
563
|
-
}: {
|
|
564
|
-
namespace: string;
|
|
565
|
-
spartanDir: string;
|
|
566
|
-
logger: Logger;
|
|
567
|
-
values?: Record<string, string | number>;
|
|
568
|
-
}) {
|
|
569
|
-
return installChaosMeshChart({
|
|
570
|
-
instanceName: 'prover-kill',
|
|
571
|
-
targetNamespace: namespace,
|
|
572
|
-
valuesFile: 'prover-kill.yaml',
|
|
573
|
-
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
574
|
-
chaosMeshNamespace: namespace,
|
|
575
|
-
clean: true,
|
|
576
|
-
logger,
|
|
577
|
-
values,
|
|
578
|
-
});
|
|
579
|
-
}
|
|
580
|
-
|
|
581
|
-
export function applyProverBrokerKill({
|
|
582
|
-
namespace,
|
|
583
|
-
spartanDir,
|
|
584
|
-
logger,
|
|
585
|
-
values,
|
|
586
|
-
}: {
|
|
587
|
-
namespace: string;
|
|
588
|
-
spartanDir: string;
|
|
589
|
-
logger: Logger;
|
|
590
|
-
values?: Record<string, string | number>;
|
|
591
|
-
}) {
|
|
592
|
-
return installChaosMeshChart({
|
|
593
|
-
instanceName: 'prover-broker-kill',
|
|
594
|
-
targetNamespace: namespace,
|
|
595
|
-
valuesFile: 'prover-broker-kill.yaml',
|
|
596
|
-
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
597
|
-
clean: true,
|
|
598
|
-
logger,
|
|
599
|
-
values,
|
|
600
|
-
});
|
|
601
|
-
}
|
|
602
|
-
|
|
603
|
-
export function applyBootNodeFailure({
|
|
604
|
-
instanceName = 'boot-node-failure',
|
|
605
|
-
namespace,
|
|
606
|
-
spartanDir,
|
|
607
|
-
durationSeconds,
|
|
608
|
-
logger,
|
|
609
|
-
values,
|
|
610
|
-
}: {
|
|
611
|
-
instanceName?: string;
|
|
612
|
-
namespace: string;
|
|
613
|
-
spartanDir: string;
|
|
614
|
-
durationSeconds: number;
|
|
615
|
-
logger: Logger;
|
|
616
|
-
values?: Record<string, string | number>;
|
|
617
|
-
}) {
|
|
618
|
-
return installChaosMeshChart({
|
|
619
|
-
instanceName,
|
|
620
|
-
targetNamespace: namespace,
|
|
621
|
-
valuesFile: 'boot-node-failure.yaml',
|
|
622
|
-
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
623
|
-
values: {
|
|
624
|
-
'bootNodeFailure.duration': `${durationSeconds}s`,
|
|
625
|
-
...(values ?? {}),
|
|
626
|
-
},
|
|
627
|
-
logger,
|
|
628
|
-
});
|
|
629
|
-
}
|
|
630
|
-
|
|
631
|
-
export function applyValidatorKill({
|
|
632
|
-
instanceName = 'validator-kill',
|
|
633
|
-
namespace,
|
|
634
|
-
spartanDir,
|
|
635
|
-
logger,
|
|
636
|
-
values,
|
|
637
|
-
clean = true,
|
|
638
|
-
}: {
|
|
639
|
-
instanceName?: string;
|
|
640
|
-
namespace: string;
|
|
641
|
-
spartanDir: string;
|
|
642
|
-
logger: Logger;
|
|
643
|
-
values?: Record<string, string | number>;
|
|
644
|
-
clean?: boolean;
|
|
645
|
-
}) {
|
|
646
|
-
return installChaosMeshChart({
|
|
647
|
-
instanceName: instanceName ?? 'validator-kill',
|
|
648
|
-
targetNamespace: namespace,
|
|
649
|
-
valuesFile: 'validator-kill.yaml',
|
|
650
|
-
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
651
|
-
clean,
|
|
652
|
-
logger,
|
|
653
|
-
values,
|
|
654
|
-
});
|
|
655
|
-
}
|
|
656
|
-
|
|
657
|
-
export function applyNetworkShaping({
|
|
658
|
-
instanceName = 'network-shaping',
|
|
659
|
-
valuesFile,
|
|
660
|
-
namespace,
|
|
661
|
-
spartanDir,
|
|
662
|
-
logger,
|
|
663
|
-
}: {
|
|
664
|
-
instanceName?: string;
|
|
665
|
-
valuesFile: string;
|
|
666
|
-
namespace: string;
|
|
667
|
-
spartanDir: string;
|
|
668
|
-
logger: Logger;
|
|
669
|
-
}) {
|
|
670
|
-
return installChaosMeshChart({
|
|
671
|
-
instanceName,
|
|
672
|
-
targetNamespace: namespace,
|
|
673
|
-
valuesFile,
|
|
674
|
-
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
675
|
-
logger,
|
|
676
|
-
});
|
|
677
|
-
}
|
|
678
|
-
|
|
679
|
-
export async function awaitCheckpointNumber(
|
|
680
|
-
rollupCheatCodes: RollupCheatCodes,
|
|
681
|
-
checkpointNumber: CheckpointNumber,
|
|
682
|
-
timeoutSeconds: number,
|
|
683
|
-
logger: Logger,
|
|
684
|
-
) {
|
|
685
|
-
logger.info(`Waiting for checkpoint ${checkpointNumber}`);
|
|
686
|
-
let tips = await rollupCheatCodes.getTips();
|
|
687
|
-
const endTime = Date.now() + timeoutSeconds * 1000;
|
|
688
|
-
while (tips.pending < checkpointNumber && Date.now() < endTime) {
|
|
689
|
-
logger.info(`At checkpoint ${tips.pending}`);
|
|
690
|
-
await sleep(1000);
|
|
691
|
-
tips = await rollupCheatCodes.getTips();
|
|
692
|
-
}
|
|
693
|
-
if (tips.pending < checkpointNumber) {
|
|
694
|
-
throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
|
|
695
|
-
} else {
|
|
696
|
-
logger.info(`Reached checkpoint ${tips.pending}`);
|
|
697
|
-
}
|
|
698
|
-
}
|
|
699
|
-
|
|
700
|
-
export async function restartBot(namespace: string, logger: Logger) {
|
|
701
|
-
logger.info(`Restarting bot`);
|
|
702
|
-
await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' });
|
|
703
|
-
await sleep(10 * 1000);
|
|
704
|
-
// Some bot images may take time to report Ready due to heavy boot-time proving.
|
|
705
|
-
// Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
|
|
706
|
-
await waitForResourceByLabel({
|
|
707
|
-
resource: 'pods',
|
|
708
|
-
namespace,
|
|
709
|
-
label: 'app.kubernetes.io/name=bot',
|
|
710
|
-
condition: 'PodReadyToStartContainers',
|
|
711
|
-
});
|
|
712
|
-
logger.info(`Bot restarted`);
|
|
713
|
-
}
|
|
714
|
-
|
|
715
|
-
/**
|
|
716
|
-
* Installs or upgrades the transfer bot Helm release for the given namespace.
|
|
717
|
-
* Intended for test setup to enable L2 traffic generation only when needed.
|
|
718
|
-
*/
|
|
719
|
-
export async function installTransferBot({
|
|
720
|
-
namespace,
|
|
721
|
-
spartanDir,
|
|
722
|
-
logger,
|
|
723
|
-
replicas = 1,
|
|
724
|
-
txIntervalSeconds = 10,
|
|
725
|
-
followChain = 'PENDING',
|
|
726
|
-
mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk',
|
|
727
|
-
mnemonicStartIndex,
|
|
728
|
-
botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01',
|
|
729
|
-
nodeUrl,
|
|
730
|
-
timeout = '15m',
|
|
731
|
-
reuseValues = true,
|
|
732
|
-
aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12),
|
|
733
|
-
}: {
|
|
734
|
-
namespace: string;
|
|
735
|
-
spartanDir: string;
|
|
736
|
-
logger: Logger;
|
|
737
|
-
replicas?: number;
|
|
738
|
-
txIntervalSeconds?: number;
|
|
739
|
-
followChain?: string;
|
|
740
|
-
mnemonic?: string;
|
|
741
|
-
mnemonicStartIndex?: number | string;
|
|
742
|
-
botPrivateKey?: string;
|
|
743
|
-
nodeUrl?: string;
|
|
744
|
-
timeout?: string;
|
|
745
|
-
reuseValues?: boolean;
|
|
746
|
-
aztecSlotDuration?: number;
|
|
747
|
-
}) {
|
|
748
|
-
const instanceName = `${namespace}-bot-transfers`;
|
|
749
|
-
const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
|
|
750
|
-
const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
|
|
751
|
-
|
|
752
|
-
logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
|
|
753
|
-
|
|
754
|
-
const values: Record<string, string | number | boolean> = {
|
|
755
|
-
'bot.replicaCount': replicas,
|
|
756
|
-
'bot.txIntervalSeconds': txIntervalSeconds,
|
|
757
|
-
'bot.followChain': followChain,
|
|
758
|
-
'bot.botPrivateKey': botPrivateKey,
|
|
759
|
-
'bot.nodeUrl': resolvedNodeUrl,
|
|
760
|
-
'bot.mnemonic': mnemonic,
|
|
761
|
-
'bot.feePaymentMethod': 'fee_juice',
|
|
762
|
-
'aztec.slotDuration': aztecSlotDuration,
|
|
763
|
-
// Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
|
|
764
|
-
// Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
|
|
765
|
-
'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
|
|
766
|
-
// Provide L1 execution RPC for bridging fee juice
|
|
767
|
-
'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
|
|
768
|
-
// Provide L1 mnemonic for bridging (falls back to labs mnemonic)
|
|
769
|
-
'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
|
|
770
|
-
|
|
771
|
-
// The bot does not need Kubernetes API access. Disable RBAC + ServiceAccount creation so the chart
|
|
772
|
-
// can be installed by users without cluster-scoped RBAC permissions.
|
|
773
|
-
'bot.rbac.create': false,
|
|
774
|
-
'bot.serviceAccount.create': false,
|
|
775
|
-
'bot.serviceAccount.name': 'default',
|
|
776
|
-
};
|
|
777
|
-
// Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
|
|
778
|
-
if (mnemonicStartIndex === undefined) {
|
|
779
|
-
values['bot.mnemonicStartIndex'] = 0;
|
|
780
|
-
}
|
|
781
|
-
// Also pass a funded private key directly if available
|
|
782
|
-
if (process.env.FUNDING_PRIVATE_KEY) {
|
|
783
|
-
values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
|
|
784
|
-
}
|
|
785
|
-
// Align bot image with the running network image: prefer env var, else detect from a validator pod
|
|
786
|
-
let repositoryFromEnv: string | undefined;
|
|
787
|
-
let tagFromEnv: string | undefined;
|
|
788
|
-
const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
|
|
789
|
-
if (aztecDockerImage && aztecDockerImage.includes(':')) {
|
|
790
|
-
const lastColon = aztecDockerImage.lastIndexOf(':');
|
|
791
|
-
repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
|
|
792
|
-
tagFromEnv = aztecDockerImage.slice(lastColon + 1);
|
|
793
|
-
}
|
|
794
|
-
|
|
795
|
-
let repository = repositoryFromEnv;
|
|
796
|
-
let tag = tagFromEnv;
|
|
797
|
-
if (!repository || !tag) {
|
|
798
|
-
try {
|
|
799
|
-
const { stdout } = await execAsync(
|
|
800
|
-
`kubectl get pods -l app.kubernetes.io/name=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
|
|
801
|
-
);
|
|
802
|
-
const image = stdout.trim().replace(/^'|'$/g, '');
|
|
803
|
-
if (image && image.includes(':')) {
|
|
804
|
-
const lastColon = image.lastIndexOf(':');
|
|
805
|
-
repository = image.slice(0, lastColon);
|
|
806
|
-
tag = image.slice(lastColon + 1);
|
|
807
|
-
}
|
|
808
|
-
} catch (err) {
|
|
809
|
-
logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
|
|
810
|
-
}
|
|
811
|
-
}
|
|
812
|
-
if (repository && tag) {
|
|
813
|
-
values['global.aztecImage.repository'] = repository;
|
|
814
|
-
values['global.aztecImage.tag'] = tag;
|
|
815
|
-
}
|
|
816
|
-
if (mnemonicStartIndex !== undefined) {
|
|
817
|
-
values['bot.mnemonicStartIndex'] =
|
|
818
|
-
typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
|
|
819
|
-
}
|
|
820
|
-
|
|
821
|
-
// If a previous install attempt left the release in a non-deployed state (e.g. FAILED),
|
|
822
|
-
// `helm upgrade --install` can error with "has no deployed releases".
|
|
823
|
-
// In that case, clear the release record and do a clean install.
|
|
824
|
-
const existingStatus = await getHelmReleaseStatus(instanceName, namespace);
|
|
825
|
-
if (existingStatus && existingStatus.toLowerCase() !== 'deployed') {
|
|
826
|
-
logger.warn(`Transfer bot release ${instanceName} is in status '${existingStatus}'. Reinstalling cleanly.`);
|
|
827
|
-
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`).catch(
|
|
828
|
-
() => undefined,
|
|
829
|
-
);
|
|
830
|
-
// If helm left the release in `uninstalling`, force-delete the record so we can reinstall.
|
|
831
|
-
const afterUninstallStatus = await getHelmReleaseStatus(instanceName, namespace);
|
|
832
|
-
if (afterUninstallStatus?.toLowerCase() === 'uninstalling') {
|
|
833
|
-
await forceDeleteHelmReleaseRecord(instanceName, namespace, logger);
|
|
834
|
-
}
|
|
835
|
-
}
|
|
836
|
-
|
|
837
|
-
// `--reuse-values` fails if the release has never successfully deployed (e.g. first install, or a previous failed install).
|
|
838
|
-
// Only reuse values when we have a deployed release to reuse from.
|
|
839
|
-
const effectiveReuseValues = reuseValues && (await hasDeployedHelmRelease(instanceName, namespace));
|
|
840
|
-
|
|
841
|
-
await execHelmCommand({
|
|
842
|
-
instanceName,
|
|
843
|
-
helmChartDir,
|
|
844
|
-
namespace,
|
|
845
|
-
valuesFile: undefined,
|
|
846
|
-
timeout,
|
|
847
|
-
values: values as unknown as Record<string, string | number | boolean>,
|
|
848
|
-
reuseValues: effectiveReuseValues,
|
|
849
|
-
});
|
|
850
|
-
|
|
851
|
-
if (replicas > 0) {
|
|
852
|
-
await waitForResourceByLabel({
|
|
853
|
-
resource: 'pods',
|
|
854
|
-
namespace,
|
|
855
|
-
label: 'app.kubernetes.io/name=bot',
|
|
856
|
-
condition: 'PodReadyToStartContainers',
|
|
857
|
-
});
|
|
858
|
-
}
|
|
859
|
-
}
|
|
860
|
-
|
|
861
|
-
/**
|
|
862
|
-
* Uninstalls the transfer bot Helm release from the given namespace.
|
|
863
|
-
* Intended for test teardown to clean up bot resources.
|
|
864
|
-
*/
|
|
865
|
-
export async function uninstallTransferBot(namespace: string, logger: Logger) {
|
|
866
|
-
const instanceName = `${namespace}-bot-transfers`;
|
|
867
|
-
logger.info(`Uninstalling transfer bot release ${instanceName}`);
|
|
868
|
-
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
869
|
-
// Ensure any leftover pods are removed
|
|
870
|
-
await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' }).catch(
|
|
871
|
-
() => undefined,
|
|
872
|
-
);
|
|
873
|
-
}
|
|
874
|
-
|
|
875
|
-
/**
|
|
876
|
-
* Enables or disables probabilistic transaction dropping on validators and waits for rollout.
|
|
877
|
-
* Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
|
|
878
|
-
*/
|
|
879
|
-
export async function setValidatorTxDrop({
|
|
880
|
-
namespace,
|
|
881
|
-
enabled,
|
|
882
|
-
probability,
|
|
883
|
-
logger,
|
|
884
|
-
}: {
|
|
885
|
-
namespace: string;
|
|
886
|
-
enabled: boolean;
|
|
887
|
-
probability: number;
|
|
888
|
-
logger: Logger;
|
|
889
|
-
}) {
|
|
890
|
-
const drop = enabled ? 'true' : 'false';
|
|
891
|
-
const prob = String(probability);
|
|
892
|
-
|
|
893
|
-
const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
|
|
894
|
-
let updated = false;
|
|
895
|
-
for (const selector of selectors) {
|
|
896
|
-
try {
|
|
897
|
-
const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
898
|
-
const names = list.stdout
|
|
899
|
-
.split('\n')
|
|
900
|
-
.map(s => s.trim())
|
|
901
|
-
.filter(Boolean);
|
|
902
|
-
if (names.length === 0) {
|
|
903
|
-
continue;
|
|
904
|
-
}
|
|
905
|
-
const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
|
|
906
|
-
logger.info(`command: ${cmd}`);
|
|
907
|
-
await execAsync(cmd);
|
|
908
|
-
updated = true;
|
|
909
|
-
} catch (e) {
|
|
910
|
-
logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
|
|
911
|
-
}
|
|
912
|
-
}
|
|
913
|
-
|
|
914
|
-
if (!updated) {
|
|
915
|
-
logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
|
|
916
|
-
return;
|
|
917
|
-
}
|
|
918
|
-
|
|
919
|
-
// Restart validator pods to ensure env vars take effect and wait for readiness
|
|
920
|
-
await restartValidators(namespace, logger);
|
|
921
|
-
}
|
|
922
|
-
|
|
923
|
-
export async function restartValidators(namespace: string, logger: Logger) {
|
|
924
|
-
const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
|
|
925
|
-
let any = false;
|
|
926
|
-
for (const selector of selectors) {
|
|
927
|
-
try {
|
|
928
|
-
const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
929
|
-
if (!stdout || stdout.trim().length === 0) {
|
|
930
|
-
continue;
|
|
931
|
-
}
|
|
932
|
-
any = true;
|
|
933
|
-
await deleteResourceByLabel({ resource: 'pods', namespace, label: selector });
|
|
934
|
-
} catch (e) {
|
|
935
|
-
logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
|
|
936
|
-
}
|
|
937
|
-
}
|
|
938
|
-
|
|
939
|
-
if (!any) {
|
|
940
|
-
logger.warn(`No validator pods found to restart in ${namespace}.`);
|
|
941
|
-
return;
|
|
942
|
-
}
|
|
943
|
-
|
|
944
|
-
// Wait for either label to be Ready
|
|
945
|
-
for (const selector of selectors) {
|
|
946
|
-
try {
|
|
947
|
-
await waitForResourceByLabel({ resource: 'pods', namespace, label: selector });
|
|
948
|
-
return;
|
|
949
|
-
} catch {
|
|
950
|
-
// try next
|
|
951
|
-
}
|
|
952
|
-
}
|
|
953
|
-
logger.warn(`Validator pods did not report Ready; continuing.`);
|
|
954
|
-
}
|
|
955
|
-
|
|
956
|
-
export async function enableValidatorDynamicBootNode(
|
|
957
|
-
instanceName: string,
|
|
958
|
-
namespace: string,
|
|
959
|
-
spartanDir: string,
|
|
960
|
-
logger: Logger,
|
|
961
|
-
) {
|
|
962
|
-
logger.info(`Enabling validator dynamic boot node`);
|
|
963
|
-
await execHelmCommand({
|
|
964
|
-
instanceName,
|
|
965
|
-
namespace,
|
|
966
|
-
helmChartDir: getChartDir(spartanDir, 'aztec-network'),
|
|
967
|
-
values: {
|
|
968
|
-
'validator.dynamicBootNode': 'true',
|
|
969
|
-
},
|
|
970
|
-
valuesFile: undefined,
|
|
971
|
-
timeout: '15m',
|
|
972
|
-
reuseValues: true,
|
|
973
|
-
});
|
|
974
|
-
|
|
975
|
-
logger.info(`Validator dynamic boot node enabled`);
|
|
976
|
-
}
|
|
977
|
-
|
|
978
|
-
export async function getSequencers(namespace: string) {
|
|
979
|
-
const selectors = [
|
|
980
|
-
'app.kubernetes.io/name=validator',
|
|
981
|
-
'app.kubernetes.io/component=validator',
|
|
982
|
-
'app.kubernetes.io/component=sequencer-node',
|
|
983
|
-
'app=validator',
|
|
984
|
-
];
|
|
985
|
-
for (const selector of selectors) {
|
|
986
|
-
try {
|
|
987
|
-
const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
988
|
-
const { stdout } = await execAsync(command);
|
|
989
|
-
const sequencers = stdout
|
|
990
|
-
.split(' ')
|
|
991
|
-
.map(s => s.trim())
|
|
992
|
-
.filter(Boolean);
|
|
993
|
-
if (sequencers.length > 0) {
|
|
994
|
-
logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
|
|
995
|
-
return sequencers;
|
|
996
|
-
}
|
|
997
|
-
} catch {
|
|
998
|
-
// try next selector
|
|
999
|
-
}
|
|
1000
|
-
}
|
|
1001
|
-
|
|
1002
|
-
// Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
|
|
1003
|
-
throw new Error(
|
|
1004
|
-
`No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`,
|
|
1005
|
-
);
|
|
1006
|
-
}
|
|
1007
|
-
|
|
1008
|
-
export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
|
|
1009
|
-
return withSequencersAdmin(env, async client => {
|
|
1010
|
-
await client.setConfig(config);
|
|
1011
|
-
return client.getConfig();
|
|
1012
|
-
});
|
|
1013
|
-
}
|
|
1014
|
-
|
|
1015
|
-
export function getSequencersConfig(env: TestConfig) {
|
|
1016
|
-
return withSequencersAdmin(env, client => client.getConfig());
|
|
1017
|
-
}
|
|
1018
|
-
|
|
1019
|
-
export async function withSequencersAdmin<T>(env: TestConfig, fn: (node: AztecNodeAdmin) => Promise<T>): Promise<T[]> {
|
|
1020
|
-
const adminContainerPort = 8880;
|
|
1021
|
-
const namespace = env.NAMESPACE;
|
|
1022
|
-
const sequencers = await getSequencers(namespace);
|
|
1023
|
-
const results = [];
|
|
1024
|
-
|
|
1025
|
-
for (const sequencer of sequencers) {
|
|
1026
|
-
const { process, port } = await startPortForward({
|
|
1027
|
-
resource: `pod/${sequencer}`,
|
|
1028
|
-
namespace,
|
|
1029
|
-
containerPort: adminContainerPort,
|
|
1030
|
-
});
|
|
1031
|
-
|
|
1032
|
-
const url = `http://localhost:${port}`;
|
|
1033
|
-
await retry(
|
|
1034
|
-
() => fetch(`${url}/status`).then(res => res.status === 200),
|
|
1035
|
-
'forward node admin port',
|
|
1036
|
-
makeBackoff([1, 1, 2, 6]),
|
|
1037
|
-
logger,
|
|
1038
|
-
true,
|
|
1039
|
-
);
|
|
1040
|
-
const client = createAztecNodeAdminClient(url);
|
|
1041
|
-
results.push(await fn(client));
|
|
1042
|
-
process.kill();
|
|
1043
|
-
}
|
|
1044
|
-
|
|
1045
|
-
return results;
|
|
1046
|
-
}
|
|
1047
|
-
|
|
1048
|
-
/**
|
|
1049
|
-
* Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
|
|
1050
|
-
* it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
|
|
1051
|
-
*/
|
|
1052
|
-
export async function getPublicViemClient(
|
|
1053
|
-
env: TestConfig,
|
|
1054
|
-
/** If set, will push the new process into it */
|
|
1055
|
-
processes?: ChildProcess[],
|
|
1056
|
-
): Promise<{ url: string; client: ViemPublicClient; process?: ChildProcess }> {
|
|
1057
|
-
const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
|
|
1058
|
-
if (CREATE_ETH_DEVNET) {
|
|
1059
|
-
logger.info(`Creating port forward to eth execution node`);
|
|
1060
|
-
const { process, port } = await startPortForward({
|
|
1061
|
-
resource: `svc/${NAMESPACE}-eth-execution`,
|
|
1062
|
-
namespace: NAMESPACE,
|
|
1063
|
-
containerPort: 8545,
|
|
1064
|
-
});
|
|
1065
|
-
const url = `http://127.0.0.1:${port}`;
|
|
1066
|
-
const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url, { batch: false })]) });
|
|
1067
|
-
if (processes) {
|
|
1068
|
-
processes.push(process);
|
|
1069
|
-
}
|
|
1070
|
-
return { url, client, process };
|
|
1071
|
-
} else {
|
|
1072
|
-
logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
|
|
1073
|
-
if (!L1_RPC_URLS_JSON) {
|
|
1074
|
-
throw new Error(`L1_RPC_URLS_JSON is not defined`);
|
|
1075
|
-
}
|
|
1076
|
-
const client: ViemPublicClient = createPublicClient({
|
|
1077
|
-
transport: fallback([http(L1_RPC_URLS_JSON, { batch: false })]),
|
|
1078
|
-
});
|
|
1079
|
-
return { url: L1_RPC_URLS_JSON, client };
|
|
1080
|
-
}
|
|
1081
|
-
}
|
|
1082
|
-
|
|
1083
|
-
/** Queries an Aztec node for the L1 deployment addresses */
|
|
1084
|
-
export async function getL1DeploymentAddresses(env: TestConfig): Promise<L1ContractAddresses> {
|
|
1085
|
-
let forwardProcess: ChildProcess | undefined;
|
|
1086
|
-
try {
|
|
1087
|
-
const [sequencer] = await getSequencers(env.NAMESPACE);
|
|
1088
|
-
const { process, port } = await startPortForward({
|
|
1089
|
-
resource: `pod/${sequencer}`,
|
|
1090
|
-
namespace: env.NAMESPACE,
|
|
1091
|
-
containerPort: 8080,
|
|
1092
|
-
});
|
|
1093
|
-
|
|
1094
|
-
forwardProcess = process;
|
|
1095
|
-
const url = `http://127.0.0.1:${port}`;
|
|
1096
|
-
const node = createAztecNodeClient(url);
|
|
1097
|
-
return await retry(
|
|
1098
|
-
() => node.getNodeInfo().then(i => i.l1ContractAddresses),
|
|
1099
|
-
'get node info',
|
|
1100
|
-
makeBackoff([1, 3, 6]),
|
|
1101
|
-
logger,
|
|
1102
|
-
);
|
|
1103
|
-
} finally {
|
|
1104
|
-
forwardProcess?.kill();
|
|
1105
|
-
}
|
|
1106
|
-
}
|
|
1107
|
-
|
|
1108
|
-
/**
|
|
1109
|
-
* Rolls the Aztec pods in the given namespace.
|
|
1110
|
-
* @param namespace - The namespace to roll the Aztec pods in.
|
|
1111
|
-
* @param clearState - If true, also deletes the underlying PVCs to clear persistent storage.
|
|
1112
|
-
* This is required for rollup upgrades where the old state is incompatible with the new rollup.
|
|
1113
|
-
* Defaults to false, which preserves the existing storage.
|
|
1114
|
-
*/
|
|
1115
|
-
export async function rollAztecPods(namespace: string, clearState: boolean = false) {
|
|
1116
|
-
// Pod components use 'validator', but StatefulSets and PVCs use 'sequencer-node' for validators
|
|
1117
|
-
const podComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'prover-agent', 'sequencer-node', 'rpc'];
|
|
1118
|
-
const pvcComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'sequencer-node', 'rpc'];
|
|
1119
|
-
// StatefulSet components that need to be scaled down before PVC deletion
|
|
1120
|
-
// Note: validators use 'sequencer-node' as component label, not 'validator'
|
|
1121
|
-
const statefulSetComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'sequencer-node', 'rpc'];
|
|
1122
|
-
|
|
1123
|
-
if (clearState) {
|
|
1124
|
-
// To delete PVCs, we must first scale down StatefulSets so pods release the volumes
|
|
1125
|
-
// Otherwise PVC deletion will hang waiting for pods to terminate
|
|
1126
|
-
|
|
1127
|
-
// First, save original replica counts
|
|
1128
|
-
const originalReplicas: Map<string, number> = new Map();
|
|
1129
|
-
for (const component of statefulSetComponents) {
|
|
1130
|
-
try {
|
|
1131
|
-
const getCmd = `kubectl get statefulset -l app.kubernetes.io/component=${component} -n ${namespace} -o jsonpath='{.items[0].spec.replicas}'`;
|
|
1132
|
-
const { stdout } = await execAsync(getCmd);
|
|
1133
|
-
const replicas = parseInt(stdout.replace(/'/g, '').trim(), 10);
|
|
1134
|
-
if (!isNaN(replicas) && replicas > 0) {
|
|
1135
|
-
originalReplicas.set(component, replicas);
|
|
1136
|
-
}
|
|
1137
|
-
} catch {
|
|
1138
|
-
// Component might not exist, continue
|
|
1139
|
-
}
|
|
1140
|
-
}
|
|
1141
|
-
|
|
1142
|
-
// Scale down to 0
|
|
1143
|
-
for (const component of statefulSetComponents) {
|
|
1144
|
-
try {
|
|
1145
|
-
const scaleCmd = `kubectl scale statefulset -l app.kubernetes.io/component=${component} -n ${namespace} --replicas=0 --timeout=2m`;
|
|
1146
|
-
logger.info(`command: ${scaleCmd}`);
|
|
1147
|
-
await execAsync(scaleCmd);
|
|
1148
|
-
} catch (e) {
|
|
1149
|
-
// Component might not exist or might be a Deployment, continue
|
|
1150
|
-
logger.verbose(`Scale down ${component} skipped: ${e}`);
|
|
1151
|
-
}
|
|
1152
|
-
}
|
|
1153
|
-
|
|
1154
|
-
// Wait for pods to terminate
|
|
1155
|
-
await sleep(15 * 1000);
|
|
1156
|
-
|
|
1157
|
-
// Now delete PVCs (they should no longer be in use)
|
|
1158
|
-
for (const component of pvcComponents) {
|
|
1159
|
-
await deleteResourceByLabel({
|
|
1160
|
-
resource: 'persistentvolumeclaims',
|
|
1161
|
-
namespace: namespace,
|
|
1162
|
-
label: `app.kubernetes.io/component=${component}`,
|
|
1163
|
-
});
|
|
1164
|
-
}
|
|
1165
|
-
|
|
1166
|
-
// Scale StatefulSets back up to original replica counts
|
|
1167
|
-
for (const component of statefulSetComponents) {
|
|
1168
|
-
const replicas = originalReplicas.get(component) ?? 1;
|
|
1169
|
-
try {
|
|
1170
|
-
const scaleCmd = `kubectl scale statefulset -l app.kubernetes.io/component=${component} -n ${namespace} --replicas=${replicas} --timeout=2m`;
|
|
1171
|
-
logger.info(`command: ${scaleCmd}`);
|
|
1172
|
-
await execAsync(scaleCmd);
|
|
1173
|
-
} catch (e) {
|
|
1174
|
-
logger.verbose(`Scale up ${component} skipped: ${e}`);
|
|
1175
|
-
}
|
|
1176
|
-
}
|
|
1177
|
-
} else {
|
|
1178
|
-
// Just delete pods (no state clearing)
|
|
1179
|
-
for (const component of podComponents) {
|
|
1180
|
-
await deleteResourceByLabel({
|
|
1181
|
-
resource: 'pods',
|
|
1182
|
-
namespace: namespace,
|
|
1183
|
-
label: `app.kubernetes.io/component=${component}`,
|
|
1184
|
-
});
|
|
1185
|
-
}
|
|
1186
|
-
}
|
|
1187
|
-
|
|
1188
|
-
await sleep(10 * 1000);
|
|
1189
|
-
|
|
1190
|
-
// Wait for pods to come back
|
|
1191
|
-
for (const component of podComponents) {
|
|
1192
|
-
await waitForResourceByLabel({
|
|
1193
|
-
resource: 'pods',
|
|
1194
|
-
namespace: namespace,
|
|
1195
|
-
label: `app.kubernetes.io/component=${component}`,
|
|
1196
|
-
});
|
|
1197
|
-
}
|
|
1198
|
-
}
|
|
1199
|
-
|
|
1200
|
-
/**
|
|
1201
|
-
* Returns the absolute path to the git repository root
|
|
1202
|
-
*/
|
|
1203
|
-
export function getGitProjectRoot(): string {
|
|
1204
|
-
try {
|
|
1205
|
-
const rootDir = execSync('git rev-parse --show-toplevel', {
|
|
1206
|
-
encoding: 'utf-8',
|
|
1207
|
-
stdio: ['ignore', 'pipe', 'ignore'],
|
|
1208
|
-
}).trim();
|
|
1209
|
-
|
|
1210
|
-
return rootDir;
|
|
1211
|
-
} catch (error) {
|
|
1212
|
-
throw new Error(`Failed to determine git project root: ${error}`);
|
|
1213
|
-
}
|
|
1214
|
-
}
|
|
1215
|
-
|
|
1216
|
-
/** Returns a client to the RPC of the given sequencer (defaults to first) */
|
|
1217
|
-
export async function getNodeClient(
|
|
1218
|
-
env: TestConfig,
|
|
1219
|
-
index: number = 0,
|
|
1220
|
-
): Promise<{ node: ReturnType<typeof createAztecNodeClient>; port: number; process: ChildProcess }> {
|
|
1221
|
-
const namespace = env.NAMESPACE;
|
|
1222
|
-
const containerPort = 8080;
|
|
1223
|
-
const sequencers = await getSequencers(namespace);
|
|
1224
|
-
const sequencer = sequencers[index];
|
|
1225
|
-
if (!sequencer) {
|
|
1226
|
-
throw new Error(`No sequencer found at index ${index} in namespace ${namespace}`);
|
|
1227
|
-
}
|
|
1228
|
-
|
|
1229
|
-
const { process, port } = await startPortForward({
|
|
1230
|
-
resource: `pod/${sequencer}`,
|
|
1231
|
-
namespace,
|
|
1232
|
-
containerPort,
|
|
1233
|
-
});
|
|
1234
|
-
|
|
1235
|
-
const url = `http://localhost:${port}`;
|
|
1236
|
-
await retry(
|
|
1237
|
-
() => fetch(`${url}/status`).then(res => res.status === 200),
|
|
1238
|
-
'forward port',
|
|
1239
|
-
makeBackoff([1, 1, 2, 6]),
|
|
1240
|
-
logger,
|
|
1241
|
-
true,
|
|
1242
|
-
);
|
|
1243
|
-
|
|
1244
|
-
const client = createAztecNodeClient(url);
|
|
1245
|
-
return { node: client, port, process };
|
|
1246
|
-
}
|
|
1
|
+
export * from './utils/index.js';
|