@aztec/end-to-end 4.0.0-nightly.20260120 → 4.0.0-nightly.20260122
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/shared/cross_chain_test_harness.d.ts +3 -4
- package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
- package/dest/shared/submit-transactions.d.ts +1 -1
- package/dest/shared/submit-transactions.d.ts.map +1 -1
- package/dest/shared/submit-transactions.js +1 -4
- package/dest/spartan/tx_metrics.d.ts +11 -1
- package/dest/spartan/tx_metrics.d.ts.map +1 -1
- package/dest/spartan/tx_metrics.js +133 -4
- package/dest/spartan/utils/bot.d.ts +27 -0
- package/dest/spartan/utils/bot.d.ts.map +1 -0
- package/dest/spartan/utils/bot.js +141 -0
- package/dest/spartan/utils/chaos.d.ts +79 -0
- package/dest/spartan/utils/chaos.d.ts.map +1 -0
- package/dest/spartan/utils/chaos.js +142 -0
- package/dest/spartan/utils/clients.d.ts +25 -0
- package/dest/spartan/utils/clients.d.ts.map +1 -0
- package/dest/spartan/utils/clients.js +101 -0
- package/dest/spartan/utils/config.d.ts +36 -0
- package/dest/spartan/utils/config.d.ts.map +1 -0
- package/dest/spartan/utils/config.js +20 -0
- package/dest/spartan/utils/health.d.ts +63 -0
- package/dest/spartan/utils/health.d.ts.map +1 -0
- package/dest/spartan/utils/health.js +202 -0
- package/dest/spartan/utils/helm.d.ts +15 -0
- package/dest/spartan/utils/helm.d.ts.map +1 -0
- package/dest/spartan/utils/helm.js +47 -0
- package/dest/spartan/utils/index.d.ts +9 -0
- package/dest/spartan/utils/index.d.ts.map +1 -0
- package/dest/spartan/utils/index.js +18 -0
- package/dest/spartan/utils/k8s.d.ts +59 -0
- package/dest/spartan/utils/k8s.d.ts.map +1 -0
- package/dest/spartan/utils/k8s.js +185 -0
- package/dest/spartan/utils/nodes.d.ts +31 -0
- package/dest/spartan/utils/nodes.d.ts.map +1 -0
- package/dest/spartan/utils/nodes.js +273 -0
- package/dest/spartan/utils/scripts.d.ts +16 -0
- package/dest/spartan/utils/scripts.d.ts.map +1 -0
- package/dest/spartan/utils/scripts.js +66 -0
- package/dest/spartan/utils.d.ts +2 -260
- package/dest/spartan/utils.d.ts.map +1 -1
- package/dest/spartan/utils.js +1 -942
- package/package.json +39 -39
- package/src/shared/cross_chain_test_harness.ts +2 -3
- package/src/shared/submit-transactions.ts +1 -6
- package/src/spartan/tx_metrics.ts +82 -4
- package/src/spartan/utils/bot.ts +185 -0
- package/src/spartan/utils/chaos.ts +253 -0
- package/src/spartan/utils/clients.ts +106 -0
- package/src/spartan/utils/config.ts +26 -0
- package/src/spartan/utils/health.ts +256 -0
- package/src/spartan/utils/helm.ts +84 -0
- package/src/spartan/utils/index.ts +58 -0
- package/src/spartan/utils/k8s.ts +279 -0
- package/src/spartan/utils/nodes.ts +308 -0
- package/src/spartan/utils/scripts.ts +63 -0
- package/src/spartan/utils.ts +1 -1246
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
2
|
+
|
|
3
|
+
import { exec } from 'child_process';
|
|
4
|
+
import { promisify } from 'util';
|
|
5
|
+
|
|
6
|
+
import { execHelmCommand } from './helm.js';
|
|
7
|
+
import { deleteResourceByLabel, getChartDir } from './k8s.js';
|
|
8
|
+
|
|
9
|
+
const execAsync = promisify(exec);
|
|
10
|
+
|
|
11
|
+
export async function uninstallChaosMesh(instanceName: string, namespace: string, log: Logger) {
|
|
12
|
+
// uninstall the helm chart if it exists
|
|
13
|
+
log.info(`Uninstalling helm chart ${instanceName}`);
|
|
14
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
15
|
+
// and delete the chaos-mesh resources created by this release
|
|
16
|
+
const deleteByLabel = async (resource: string) => {
|
|
17
|
+
const args = {
|
|
18
|
+
resource,
|
|
19
|
+
namespace: namespace,
|
|
20
|
+
label: `app.kubernetes.io/instance=${instanceName}`,
|
|
21
|
+
} as const;
|
|
22
|
+
log.info(`Deleting ${resource} resources for release ${instanceName}`);
|
|
23
|
+
await deleteResourceByLabel(args).catch(e => {
|
|
24
|
+
log.error(`Error deleting ${resource}: ${e}`);
|
|
25
|
+
log.info(`Force deleting ${resource}`);
|
|
26
|
+
return deleteResourceByLabel({ ...args, force: true });
|
|
27
|
+
});
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
await deleteByLabel('podchaos');
|
|
31
|
+
await deleteByLabel('networkchaos');
|
|
32
|
+
await deleteByLabel('podnetworkchaos');
|
|
33
|
+
await deleteByLabel('workflows');
|
|
34
|
+
await deleteByLabel('workflownodes');
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Installs a Helm chart with the given parameters.
|
|
39
|
+
* @param instanceName - The name of the Helm chart instance.
|
|
40
|
+
* @param targetNamespace - The namespace with the resources to be affected by the Helm chart.
|
|
41
|
+
* @param valuesFile - The values file to use for the Helm chart.
|
|
42
|
+
* @param chaosMeshNamespace - The namespace to install the Helm chart in.
|
|
43
|
+
* @param timeout - The timeout for the Helm command.
|
|
44
|
+
* @param clean - Whether to clean up the Helm chart before installing it.
|
|
45
|
+
* @returns The stdout of the Helm command.
|
|
46
|
+
* @throws If the Helm command fails.
|
|
47
|
+
*
|
|
48
|
+
* Example usage:
|
|
49
|
+
* ```typescript
|
|
50
|
+
* const stdout = await installChaosMeshChart({ instanceName: 'force-reorg', targetNamespace: 'smoke', valuesFile: 'prover-failure.yaml'});
|
|
51
|
+
* console.log(stdout);
|
|
52
|
+
* ```
|
|
53
|
+
*/
|
|
54
|
+
export async function installChaosMeshChart({
|
|
55
|
+
instanceName,
|
|
56
|
+
targetNamespace,
|
|
57
|
+
valuesFile,
|
|
58
|
+
helmChartDir,
|
|
59
|
+
timeout = '10m',
|
|
60
|
+
clean = true,
|
|
61
|
+
values = {},
|
|
62
|
+
logger: log,
|
|
63
|
+
}: {
|
|
64
|
+
instanceName: string;
|
|
65
|
+
targetNamespace: string;
|
|
66
|
+
valuesFile: string;
|
|
67
|
+
helmChartDir: string;
|
|
68
|
+
chaosMeshNamespace?: string;
|
|
69
|
+
timeout?: string;
|
|
70
|
+
clean?: boolean;
|
|
71
|
+
values?: Record<string, string | number>;
|
|
72
|
+
logger: Logger;
|
|
73
|
+
}) {
|
|
74
|
+
if (clean) {
|
|
75
|
+
await uninstallChaosMesh(instanceName, targetNamespace, log);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
return execHelmCommand({
|
|
79
|
+
instanceName,
|
|
80
|
+
helmChartDir,
|
|
81
|
+
namespace: targetNamespace,
|
|
82
|
+
valuesFile,
|
|
83
|
+
timeout,
|
|
84
|
+
values: { ...values, 'global.targetNamespace': targetNamespace },
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
export function applyProverFailure({
|
|
89
|
+
namespace,
|
|
90
|
+
spartanDir,
|
|
91
|
+
durationSeconds,
|
|
92
|
+
logger: log,
|
|
93
|
+
}: {
|
|
94
|
+
namespace: string;
|
|
95
|
+
spartanDir: string;
|
|
96
|
+
durationSeconds: number;
|
|
97
|
+
logger: Logger;
|
|
98
|
+
}) {
|
|
99
|
+
return installChaosMeshChart({
|
|
100
|
+
instanceName: 'prover-failure',
|
|
101
|
+
targetNamespace: namespace,
|
|
102
|
+
valuesFile: 'prover-failure.yaml',
|
|
103
|
+
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
104
|
+
values: {
|
|
105
|
+
'proverFailure.duration': `${durationSeconds}s`,
|
|
106
|
+
},
|
|
107
|
+
logger: log,
|
|
108
|
+
});
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
export function applyValidatorFailure({
|
|
112
|
+
namespace,
|
|
113
|
+
spartanDir,
|
|
114
|
+
logger: log,
|
|
115
|
+
values,
|
|
116
|
+
instanceName,
|
|
117
|
+
}: {
|
|
118
|
+
namespace: string;
|
|
119
|
+
spartanDir: string;
|
|
120
|
+
logger: Logger;
|
|
121
|
+
values?: Record<string, string | number>;
|
|
122
|
+
instanceName?: string;
|
|
123
|
+
}) {
|
|
124
|
+
return installChaosMeshChart({
|
|
125
|
+
instanceName: instanceName ?? 'validator-failure',
|
|
126
|
+
targetNamespace: namespace,
|
|
127
|
+
valuesFile: 'validator-failure.yaml',
|
|
128
|
+
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
129
|
+
values,
|
|
130
|
+
logger: log,
|
|
131
|
+
});
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
export function applyProverKill({
|
|
135
|
+
namespace,
|
|
136
|
+
spartanDir,
|
|
137
|
+
logger: log,
|
|
138
|
+
values,
|
|
139
|
+
}: {
|
|
140
|
+
namespace: string;
|
|
141
|
+
spartanDir: string;
|
|
142
|
+
logger: Logger;
|
|
143
|
+
values?: Record<string, string | number>;
|
|
144
|
+
}) {
|
|
145
|
+
return installChaosMeshChart({
|
|
146
|
+
instanceName: 'prover-kill',
|
|
147
|
+
targetNamespace: namespace,
|
|
148
|
+
valuesFile: 'prover-kill.yaml',
|
|
149
|
+
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
150
|
+
chaosMeshNamespace: namespace,
|
|
151
|
+
clean: true,
|
|
152
|
+
logger: log,
|
|
153
|
+
values,
|
|
154
|
+
});
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
export function applyProverBrokerKill({
|
|
158
|
+
namespace,
|
|
159
|
+
spartanDir,
|
|
160
|
+
logger: log,
|
|
161
|
+
values,
|
|
162
|
+
}: {
|
|
163
|
+
namespace: string;
|
|
164
|
+
spartanDir: string;
|
|
165
|
+
logger: Logger;
|
|
166
|
+
values?: Record<string, string | number>;
|
|
167
|
+
}) {
|
|
168
|
+
return installChaosMeshChart({
|
|
169
|
+
instanceName: 'prover-broker-kill',
|
|
170
|
+
targetNamespace: namespace,
|
|
171
|
+
valuesFile: 'prover-broker-kill.yaml',
|
|
172
|
+
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
173
|
+
clean: true,
|
|
174
|
+
logger: log,
|
|
175
|
+
values,
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
export function applyBootNodeFailure({
|
|
180
|
+
instanceName = 'boot-node-failure',
|
|
181
|
+
namespace,
|
|
182
|
+
spartanDir,
|
|
183
|
+
durationSeconds,
|
|
184
|
+
logger: log,
|
|
185
|
+
values,
|
|
186
|
+
}: {
|
|
187
|
+
instanceName?: string;
|
|
188
|
+
namespace: string;
|
|
189
|
+
spartanDir: string;
|
|
190
|
+
durationSeconds: number;
|
|
191
|
+
logger: Logger;
|
|
192
|
+
values?: Record<string, string | number>;
|
|
193
|
+
}) {
|
|
194
|
+
return installChaosMeshChart({
|
|
195
|
+
instanceName,
|
|
196
|
+
targetNamespace: namespace,
|
|
197
|
+
valuesFile: 'boot-node-failure.yaml',
|
|
198
|
+
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
199
|
+
values: {
|
|
200
|
+
'bootNodeFailure.duration': `${durationSeconds}s`,
|
|
201
|
+
...(values ?? {}),
|
|
202
|
+
},
|
|
203
|
+
logger: log,
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
export function applyValidatorKill({
|
|
208
|
+
instanceName = 'validator-kill',
|
|
209
|
+
namespace,
|
|
210
|
+
spartanDir,
|
|
211
|
+
logger: log,
|
|
212
|
+
values,
|
|
213
|
+
clean = true,
|
|
214
|
+
}: {
|
|
215
|
+
instanceName?: string;
|
|
216
|
+
namespace: string;
|
|
217
|
+
spartanDir: string;
|
|
218
|
+
logger: Logger;
|
|
219
|
+
values?: Record<string, string | number>;
|
|
220
|
+
clean?: boolean;
|
|
221
|
+
}) {
|
|
222
|
+
return installChaosMeshChart({
|
|
223
|
+
instanceName: instanceName ?? 'validator-kill',
|
|
224
|
+
targetNamespace: namespace,
|
|
225
|
+
valuesFile: 'validator-kill.yaml',
|
|
226
|
+
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
227
|
+
clean,
|
|
228
|
+
logger: log,
|
|
229
|
+
values,
|
|
230
|
+
});
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
export function applyNetworkShaping({
|
|
234
|
+
instanceName = 'network-shaping',
|
|
235
|
+
valuesFile,
|
|
236
|
+
namespace,
|
|
237
|
+
spartanDir,
|
|
238
|
+
logger: log,
|
|
239
|
+
}: {
|
|
240
|
+
instanceName?: string;
|
|
241
|
+
valuesFile: string;
|
|
242
|
+
namespace: string;
|
|
243
|
+
spartanDir: string;
|
|
244
|
+
logger: Logger;
|
|
245
|
+
}) {
|
|
246
|
+
return installChaosMeshChart({
|
|
247
|
+
instanceName,
|
|
248
|
+
targetNamespace: namespace,
|
|
249
|
+
valuesFile,
|
|
250
|
+
helmChartDir: getChartDir(spartanDir, 'aztec-chaos-scenarios'),
|
|
251
|
+
logger: log,
|
|
252
|
+
});
|
|
253
|
+
}
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
import { createLogger } from '@aztec/aztec.js/log';
|
|
2
|
+
import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
|
|
3
|
+
import type { ViemPublicClient } from '@aztec/ethereum/types';
|
|
4
|
+
import { makeBackoff, retry } from '@aztec/foundation/retry';
|
|
5
|
+
import { createAztecNodeClient } from '@aztec/stdlib/interfaces/client';
|
|
6
|
+
|
|
7
|
+
import type { ChildProcess } from 'child_process';
|
|
8
|
+
import { createPublicClient, fallback, http } from 'viem';
|
|
9
|
+
|
|
10
|
+
import type { TestConfig } from './config.js';
|
|
11
|
+
import { startPortForward } from './k8s.js';
|
|
12
|
+
import { getSequencers } from './nodes.js';
|
|
13
|
+
|
|
14
|
+
const logger = createLogger('e2e:k8s-utils');
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
|
|
18
|
+
* it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
|
|
19
|
+
*/
|
|
20
|
+
export async function getPublicViemClient(
|
|
21
|
+
env: TestConfig,
|
|
22
|
+
/** If set, will push the new process into it */
|
|
23
|
+
processes?: ChildProcess[],
|
|
24
|
+
): Promise<{ url: string; client: ViemPublicClient; process?: ChildProcess }> {
|
|
25
|
+
const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
|
|
26
|
+
if (CREATE_ETH_DEVNET) {
|
|
27
|
+
logger.info(`Creating port forward to eth execution node`);
|
|
28
|
+
const { process, port } = await startPortForward({
|
|
29
|
+
resource: `svc/${NAMESPACE}-eth-execution`,
|
|
30
|
+
namespace: NAMESPACE,
|
|
31
|
+
containerPort: 8545,
|
|
32
|
+
});
|
|
33
|
+
const url = `http://127.0.0.1:${port}`;
|
|
34
|
+
const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url, { batch: false })]) });
|
|
35
|
+
if (processes) {
|
|
36
|
+
processes.push(process);
|
|
37
|
+
}
|
|
38
|
+
return { url, client, process };
|
|
39
|
+
} else {
|
|
40
|
+
logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
|
|
41
|
+
if (!L1_RPC_URLS_JSON) {
|
|
42
|
+
throw new Error(`L1_RPC_URLS_JSON is not defined`);
|
|
43
|
+
}
|
|
44
|
+
const client: ViemPublicClient = createPublicClient({
|
|
45
|
+
transport: fallback([http(L1_RPC_URLS_JSON, { batch: false })]),
|
|
46
|
+
});
|
|
47
|
+
return { url: L1_RPC_URLS_JSON, client };
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/** Queries an Aztec node for the L1 deployment addresses */
|
|
52
|
+
export async function getL1DeploymentAddresses(env: TestConfig): Promise<L1ContractAddresses> {
|
|
53
|
+
let forwardProcess: ChildProcess | undefined;
|
|
54
|
+
try {
|
|
55
|
+
const [sequencer] = await getSequencers(env.NAMESPACE);
|
|
56
|
+
const { process, port } = await startPortForward({
|
|
57
|
+
resource: `pod/${sequencer}`,
|
|
58
|
+
namespace: env.NAMESPACE,
|
|
59
|
+
containerPort: 8080,
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
forwardProcess = process;
|
|
63
|
+
const url = `http://127.0.0.1:${port}`;
|
|
64
|
+
const node = createAztecNodeClient(url);
|
|
65
|
+
return await retry(
|
|
66
|
+
() => node.getNodeInfo().then(i => i.l1ContractAddresses),
|
|
67
|
+
'get node info',
|
|
68
|
+
makeBackoff([1, 3, 6]),
|
|
69
|
+
logger,
|
|
70
|
+
);
|
|
71
|
+
} finally {
|
|
72
|
+
forwardProcess?.kill();
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/** Returns a client to the RPC of the given sequencer (defaults to first) */
|
|
77
|
+
export async function getNodeClient(
|
|
78
|
+
env: TestConfig,
|
|
79
|
+
index: number = 0,
|
|
80
|
+
): Promise<{ node: ReturnType<typeof createAztecNodeClient>; port: number; process: ChildProcess }> {
|
|
81
|
+
const namespace = env.NAMESPACE;
|
|
82
|
+
const containerPort = 8080;
|
|
83
|
+
const sequencers = await getSequencers(namespace);
|
|
84
|
+
const sequencer = sequencers[index];
|
|
85
|
+
if (!sequencer) {
|
|
86
|
+
throw new Error(`No sequencer found at index ${index} in namespace ${namespace}`);
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
const { process, port } = await startPortForward({
|
|
90
|
+
resource: `pod/${sequencer}`,
|
|
91
|
+
namespace,
|
|
92
|
+
containerPort,
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
const url = `http://localhost:${port}`;
|
|
96
|
+
await retry(
|
|
97
|
+
() => fetch(`${url}/status`).then(res => res.status === 200),
|
|
98
|
+
'forward port',
|
|
99
|
+
makeBackoff([1, 1, 2, 6]),
|
|
100
|
+
logger,
|
|
101
|
+
true,
|
|
102
|
+
);
|
|
103
|
+
|
|
104
|
+
const client = createAztecNodeClient(url);
|
|
105
|
+
return { node: client, port, process };
|
|
106
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { createLogger } from '@aztec/aztec.js/log';
|
|
2
|
+
import { schemas } from '@aztec/foundation/schemas';
|
|
3
|
+
|
|
4
|
+
import { z } from 'zod';
|
|
5
|
+
|
|
6
|
+
const logger = createLogger('e2e:k8s-utils');
|
|
7
|
+
|
|
8
|
+
const testConfigSchema = z.object({
|
|
9
|
+
NAMESPACE: z.string().default('scenario'),
|
|
10
|
+
REAL_VERIFIER: schemas.Boolean.optional().default(true),
|
|
11
|
+
CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
|
|
12
|
+
L1_RPC_URLS_JSON: z.string().optional(),
|
|
13
|
+
L1_ACCOUNT_MNEMONIC: z.string().optional(),
|
|
14
|
+
AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
|
|
15
|
+
AZTEC_EPOCH_DURATION: z.coerce.number().optional().default(32),
|
|
16
|
+
AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
|
|
17
|
+
AZTEC_LAG_IN_EPOCHS_FOR_VALIDATOR_SET: z.coerce.number().optional().default(2),
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
export type TestConfig = z.infer<typeof testConfigSchema>;
|
|
21
|
+
|
|
22
|
+
export function setupEnvironment(env: unknown): TestConfig {
|
|
23
|
+
const config = testConfigSchema.parse(env);
|
|
24
|
+
logger.warn(`Loaded env config`, config);
|
|
25
|
+
return config;
|
|
26
|
+
}
|
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
import { createAztecNodeClient } from '@aztec/aztec.js/node';
|
|
2
|
+
import { createEthereumChain } from '@aztec/ethereum/chain';
|
|
3
|
+
import { RollupContract } from '@aztec/ethereum/contracts';
|
|
4
|
+
import type { ViemPublicClient } from '@aztec/ethereum/types';
|
|
5
|
+
import { CheckpointNumber } from '@aztec/foundation/branded-types';
|
|
6
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
7
|
+
|
|
8
|
+
import type { ChildProcess } from 'child_process';
|
|
9
|
+
import { createPublicClient, fallback, http } from 'viem';
|
|
10
|
+
|
|
11
|
+
import { startPortForwardForEthereum, startPortForwardForRPC } from './k8s.js';
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Snapshot of chain state captured during setup for comparison in teardown.
|
|
15
|
+
*/
|
|
16
|
+
export interface ChainHealthSnapshot {
|
|
17
|
+
blockNumber: number;
|
|
18
|
+
checkpointNumber: CheckpointNumber;
|
|
19
|
+
timestamp: number;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Pre-flight and post-flight health checks for the Aztec network.
|
|
24
|
+
*
|
|
25
|
+
* Use in beforeAll/afterAll to validate the chain is healthy before tests run
|
|
26
|
+
* and verify it continued progressing during the test.
|
|
27
|
+
*
|
|
28
|
+
* @example
|
|
29
|
+
* ```typescript
|
|
30
|
+
* const health = new ChainHealth(config.NAMESPACE, logger);
|
|
31
|
+
*
|
|
32
|
+
* beforeAll(async () => {
|
|
33
|
+
* await health.setup();
|
|
34
|
+
* });
|
|
35
|
+
*
|
|
36
|
+
* afterAll(async () => {
|
|
37
|
+
* await health.teardown();
|
|
38
|
+
* });
|
|
39
|
+
* ```
|
|
40
|
+
*/
|
|
41
|
+
export class ChainHealth {
|
|
42
|
+
private namespace: string;
|
|
43
|
+
private logger: Logger;
|
|
44
|
+
private snapshot?: ChainHealthSnapshot;
|
|
45
|
+
|
|
46
|
+
constructor(namespace: string, logger: Logger) {
|
|
47
|
+
this.namespace = namespace;
|
|
48
|
+
this.logger = logger;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Pre-flight health check. Validates chain is in a testable state and captures
|
|
53
|
+
* initial state for comparison in teardown.
|
|
54
|
+
*
|
|
55
|
+
* Checks performed:
|
|
56
|
+
* - Node is reachable and returns valid info
|
|
57
|
+
* - ENR exists
|
|
58
|
+
* - L1 is accessible
|
|
59
|
+
* - At least 1 L2 block has been mined
|
|
60
|
+
* - Committee exists
|
|
61
|
+
* - At least 1 checkpoint has been reached
|
|
62
|
+
*
|
|
63
|
+
* @throws Error if any health check fails
|
|
64
|
+
*/
|
|
65
|
+
async setup(): Promise<void> {
|
|
66
|
+
const processes: ChildProcess[] = [];
|
|
67
|
+
|
|
68
|
+
try {
|
|
69
|
+
// Establish temporary connections
|
|
70
|
+
const { process: rpcProcess, port: rpcPort } = await startPortForwardForRPC(this.namespace);
|
|
71
|
+
processes.push(rpcProcess);
|
|
72
|
+
|
|
73
|
+
const { process: ethProcess, port: ethPort } = await startPortForwardForEthereum(this.namespace);
|
|
74
|
+
processes.push(ethProcess);
|
|
75
|
+
|
|
76
|
+
const nodeUrl = `http://127.0.0.1:${rpcPort}`;
|
|
77
|
+
const ethereumUrl = `http://127.0.0.1:${ethPort}`;
|
|
78
|
+
|
|
79
|
+
// Create clients
|
|
80
|
+
const node = createAztecNodeClient(nodeUrl);
|
|
81
|
+
|
|
82
|
+
// Check 1: Node is reachable
|
|
83
|
+
let nodeInfo;
|
|
84
|
+
try {
|
|
85
|
+
nodeInfo = await node.getNodeInfo();
|
|
86
|
+
} catch (err) {
|
|
87
|
+
throw new Error(`Health check failed: Node is not reachable at ${nodeUrl}. Error: ${err}`);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
if (!nodeInfo) {
|
|
91
|
+
throw new Error('Health check failed: Node returned empty info');
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// Check 2: ENR exists (P2P identity)
|
|
95
|
+
if (!nodeInfo.enr || !nodeInfo.enr.startsWith('enr:-')) {
|
|
96
|
+
throw new Error(`Health check failed: Invalid or missing ENR. Got: ${nodeInfo.enr}`);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// Check 3: L1 is accessible
|
|
100
|
+
const chain = createEthereumChain([ethereumUrl], nodeInfo.l1ChainId);
|
|
101
|
+
const ethereumClient: ViemPublicClient = createPublicClient({
|
|
102
|
+
chain: chain.chainInfo,
|
|
103
|
+
transport: fallback([http(ethereumUrl, { batch: false })]),
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
try {
|
|
107
|
+
await ethereumClient.getBlockNumber();
|
|
108
|
+
} catch (err) {
|
|
109
|
+
throw new Error(`Health check failed: L1 is not accessible at ${ethereumUrl}. Error: ${err}`);
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// Check 4: At least 1 L2 block mined
|
|
113
|
+
let l2BlockNumber;
|
|
114
|
+
try {
|
|
115
|
+
l2BlockNumber = await node.getBlockNumber();
|
|
116
|
+
} catch (err) {
|
|
117
|
+
throw new Error(`Health check failed: Could not get L2 block number. Error: ${err}`);
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
if (l2BlockNumber < 1) {
|
|
121
|
+
throw new Error(`Health check failed: No L2 blocks mined yet. Block number: ${l2BlockNumber}`);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Check 5: Committee exists
|
|
125
|
+
const rollup = new RollupContract(ethereumClient, nodeInfo.l1ContractAddresses.rollupAddress);
|
|
126
|
+
|
|
127
|
+
let committee;
|
|
128
|
+
try {
|
|
129
|
+
committee = await rollup.getCurrentEpochCommittee();
|
|
130
|
+
} catch (err) {
|
|
131
|
+
throw new Error(`Health check failed: Could not get committee. Error: ${err}`);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
if (!committee || committee.length === 0) {
|
|
135
|
+
throw new Error('Health check failed: No committee exists. Validators may not be registered yet.');
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// Check 6: At least 1 checkpoint reached
|
|
139
|
+
let checkpointNumber;
|
|
140
|
+
try {
|
|
141
|
+
checkpointNumber = await rollup.getCheckpointNumber();
|
|
142
|
+
} catch (err) {
|
|
143
|
+
throw new Error(`Health check failed: Could not get checkpoint number. Error: ${err}`);
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
if (checkpointNumber < CheckpointNumber(1)) {
|
|
147
|
+
throw new Error(
|
|
148
|
+
`Health check failed: No checkpoint reached yet. Checkpoint number: ${checkpointNumber}. ` +
|
|
149
|
+
'The proving pipeline may not have completed a proof yet.',
|
|
150
|
+
);
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
// Capture snapshot for teardown comparison
|
|
154
|
+
this.snapshot = {
|
|
155
|
+
blockNumber: l2BlockNumber,
|
|
156
|
+
checkpointNumber,
|
|
157
|
+
timestamp: Date.now(),
|
|
158
|
+
};
|
|
159
|
+
|
|
160
|
+
this.logger.info('Pre-flight health check passed');
|
|
161
|
+
} finally {
|
|
162
|
+
processes.forEach(p => p.kill());
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
/**
|
|
167
|
+
* Post-flight health check. Verifies the chain continued progressing during the test.
|
|
168
|
+
*
|
|
169
|
+
* For tests that ran longer than the threshold, checks:
|
|
170
|
+
* - Block number increased since setup
|
|
171
|
+
* - Checkpoint number increased since setup
|
|
172
|
+
*
|
|
173
|
+
* For shorter tests, skips the check.
|
|
174
|
+
*
|
|
175
|
+
* @throws Error if chain did not progress
|
|
176
|
+
*/
|
|
177
|
+
async teardown(): Promise<void> {
|
|
178
|
+
if (!this.snapshot) {
|
|
179
|
+
this.logger.warn('Teardown called without setup - skipping chain progress check');
|
|
180
|
+
return;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
const processes: ChildProcess[] = [];
|
|
184
|
+
// Minimum test duration to check chain progression
|
|
185
|
+
const PROGRESS_CHECK_THRESHOLD_SECONDS = 120;
|
|
186
|
+
|
|
187
|
+
try {
|
|
188
|
+
const elapsedSeconds = Math.round((Date.now() - this.snapshot.timestamp) / 1000);
|
|
189
|
+
|
|
190
|
+
// Skip progress check for short tests
|
|
191
|
+
if (elapsedSeconds <= PROGRESS_CHECK_THRESHOLD_SECONDS) {
|
|
192
|
+
this.logger.info('Post-flight health check passed (skipped progress check - test too short)');
|
|
193
|
+
return;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
const { process: rpcProcess, port: rpcPort } = await startPortForwardForRPC(this.namespace);
|
|
197
|
+
processes.push(rpcProcess);
|
|
198
|
+
|
|
199
|
+
const { process: ethProcess, port: ethPort } = await startPortForwardForEthereum(this.namespace);
|
|
200
|
+
processes.push(ethProcess);
|
|
201
|
+
|
|
202
|
+
const nodeUrl = `http://127.0.0.1:${rpcPort}`;
|
|
203
|
+
const ethereumUrl = `http://127.0.0.1:${ethPort}`;
|
|
204
|
+
const node = createAztecNodeClient(nodeUrl);
|
|
205
|
+
|
|
206
|
+
// Check that block number increased
|
|
207
|
+
let currentBlockNumber;
|
|
208
|
+
try {
|
|
209
|
+
currentBlockNumber = await node.getBlockNumber();
|
|
210
|
+
} catch (err) {
|
|
211
|
+
throw new Error(`Teardown health check failed: Could not get block number. Error: ${err}`);
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
if (currentBlockNumber <= this.snapshot.blockNumber) {
|
|
215
|
+
throw new Error(
|
|
216
|
+
`Chain did not progress during test. ` +
|
|
217
|
+
`Block number at setup: ${this.snapshot.blockNumber}, ` +
|
|
218
|
+
`Block number at teardown: ${currentBlockNumber}, ` +
|
|
219
|
+
`Elapsed time: ${elapsedSeconds}s. ` +
|
|
220
|
+
`The chain may have stalled during the test.`,
|
|
221
|
+
);
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
// Check that checkpoint number increased
|
|
225
|
+
const nodeInfo = await node.getNodeInfo();
|
|
226
|
+
const chain = createEthereumChain([ethereumUrl], nodeInfo.l1ChainId);
|
|
227
|
+
const ethereumClient: ViemPublicClient = createPublicClient({
|
|
228
|
+
chain: chain.chainInfo,
|
|
229
|
+
transport: fallback([http(ethereumUrl, { batch: false })]),
|
|
230
|
+
});
|
|
231
|
+
|
|
232
|
+
const rollup = new RollupContract(ethereumClient, nodeInfo.l1ContractAddresses.rollupAddress);
|
|
233
|
+
let currentCheckpoint;
|
|
234
|
+
try {
|
|
235
|
+
currentCheckpoint = await rollup.getCheckpointNumber();
|
|
236
|
+
} catch (err) {
|
|
237
|
+
throw new Error(`Teardown health check failed: Could not get checkpoint number. Error: ${err}`);
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
if (currentCheckpoint <= this.snapshot.checkpointNumber) {
|
|
241
|
+
throw new Error(
|
|
242
|
+
`Proving pipeline did not progress during test. ` +
|
|
243
|
+
`Checkpoint at setup: ${this.snapshot.checkpointNumber}, ` +
|
|
244
|
+
`Checkpoint at teardown: ${currentCheckpoint}, ` +
|
|
245
|
+
`Elapsed time: ${elapsedSeconds}s. ` +
|
|
246
|
+
`The proving pipeline may have stalled during the test.`,
|
|
247
|
+
);
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
this.logger.info('Post-flight health check passed');
|
|
251
|
+
} finally {
|
|
252
|
+
processes.forEach(p => p.kill());
|
|
253
|
+
this.snapshot = undefined;
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
}
|