@aztec/end-to-end 0.0.1-commit.d431d1c → 0.0.1-commit.e310a4c8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/bench/client_flows/client_flows_benchmark.d.ts +1 -1
- package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -1
- package/dest/bench/client_flows/client_flows_benchmark.js +28 -13
- package/dest/bench/utils.d.ts +5 -4
- package/dest/bench/utils.d.ts.map +1 -1
- package/dest/bench/utils.js +9 -7
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +1 -1
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +7 -8
- package/dest/e2e_epochs/epochs_test.d.ts +7 -1
- package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
- package/dest/e2e_epochs/epochs_test.js +29 -7
- package/dest/e2e_fees/bridging_race.notest.js +1 -1
- package/dest/e2e_fees/fees_test.d.ts +1 -1
- package/dest/e2e_fees/fees_test.d.ts.map +1 -1
- package/dest/e2e_fees/fees_test.js +6 -6
- package/dest/e2e_l1_publisher/write_json.d.ts +4 -3
- package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -1
- package/dest/e2e_l1_publisher/write_json.js +1 -7
- package/dest/e2e_nested_contract/nested_contract_test.d.ts +1 -1
- package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
- package/dest/e2e_nested_contract/nested_contract_test.js +2 -2
- package/dest/e2e_p2p/p2p_network.d.ts +1 -1
- package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
- package/dest/e2e_p2p/p2p_network.js +6 -2
- package/dest/e2e_p2p/reqresp/utils.d.ts +22 -0
- package/dest/e2e_p2p/reqresp/utils.d.ts.map +1 -0
- package/dest/e2e_p2p/reqresp/utils.js +153 -0
- package/dest/e2e_p2p/shared.d.ts +6 -6
- package/dest/e2e_p2p/shared.d.ts.map +1 -1
- package/dest/e2e_p2p/shared.js +9 -16
- package/dest/e2e_token_contract/token_contract_test.d.ts +1 -1
- package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
- package/dest/e2e_token_contract/token_contract_test.js +3 -3
- package/dest/fixtures/e2e_prover_test.d.ts +1 -1
- package/dest/fixtures/e2e_prover_test.d.ts.map +1 -1
- package/dest/fixtures/e2e_prover_test.js +8 -5
- package/dest/fixtures/setup.d.ts +5 -3
- package/dest/fixtures/setup.d.ts.map +1 -1
- package/dest/fixtures/setup.js +31 -20
- package/dest/fixtures/setup_p2p_test.d.ts +4 -5
- package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
- package/dest/fixtures/setup_p2p_test.js +24 -19
- package/dest/fixtures/token_utils.d.ts +1 -1
- package/dest/fixtures/token_utils.d.ts.map +1 -1
- package/dest/fixtures/token_utils.js +7 -4
- package/dest/shared/cross_chain_test_harness.d.ts +3 -4
- package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.js +11 -11
- package/dest/shared/gas_portal_test_harness.js +1 -1
- package/dest/shared/submit-transactions.d.ts +3 -3
- package/dest/shared/submit-transactions.d.ts.map +1 -1
- package/dest/shared/submit-transactions.js +9 -11
- package/dest/shared/uniswap_l1_l2.d.ts +1 -1
- package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
- package/dest/shared/uniswap_l1_l2.js +12 -12
- package/dest/simulators/lending_simulator.js +2 -2
- package/dest/spartan/setup_test_wallets.d.ts +1 -1
- package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
- package/dest/spartan/setup_test_wallets.js +61 -34
- package/dest/spartan/tx_metrics.d.ts +38 -2
- package/dest/spartan/tx_metrics.d.ts.map +1 -1
- package/dest/spartan/tx_metrics.js +178 -4
- package/dest/spartan/utils/bot.d.ts +27 -0
- package/dest/spartan/utils/bot.d.ts.map +1 -0
- package/dest/spartan/utils/bot.js +141 -0
- package/dest/spartan/utils/chaos.d.ts +79 -0
- package/dest/spartan/utils/chaos.d.ts.map +1 -0
- package/dest/spartan/utils/chaos.js +142 -0
- package/dest/spartan/utils/clients.d.ts +39 -0
- package/dest/spartan/utils/clients.d.ts.map +1 -0
- package/dest/spartan/utils/clients.js +90 -0
- package/dest/spartan/utils/config.d.ts +36 -0
- package/dest/spartan/utils/config.d.ts.map +1 -0
- package/dest/spartan/utils/config.js +20 -0
- package/dest/spartan/utils/health.d.ts +63 -0
- package/dest/spartan/utils/health.d.ts.map +1 -0
- package/dest/spartan/utils/health.js +202 -0
- package/dest/spartan/utils/helm.d.ts +15 -0
- package/dest/spartan/utils/helm.d.ts.map +1 -0
- package/dest/spartan/utils/helm.js +47 -0
- package/dest/spartan/utils/index.d.ts +9 -0
- package/dest/spartan/utils/index.d.ts.map +1 -0
- package/dest/spartan/utils/index.js +18 -0
- package/dest/spartan/utils/k8s.d.ts +126 -0
- package/dest/spartan/utils/k8s.d.ts.map +1 -0
- package/dest/spartan/utils/k8s.js +375 -0
- package/dest/spartan/utils/nodes.d.ts +41 -0
- package/dest/spartan/utils/nodes.d.ts.map +1 -0
- package/dest/spartan/utils/nodes.js +461 -0
- package/dest/spartan/utils/scripts.d.ts +16 -0
- package/dest/spartan/utils/scripts.d.ts.map +1 -0
- package/dest/spartan/utils/scripts.js +66 -0
- package/dest/spartan/utils.d.ts +2 -260
- package/dest/spartan/utils.d.ts.map +1 -1
- package/dest/spartan/utils.js +1 -942
- package/package.json +39 -39
- package/src/bench/client_flows/client_flows_benchmark.ts +11 -24
- package/src/bench/utils.ts +9 -7
- package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +9 -12
- package/src/e2e_epochs/epochs_test.ts +58 -32
- package/src/e2e_fees/bridging_race.notest.ts +1 -4
- package/src/e2e_fees/fees_test.ts +10 -14
- package/src/e2e_l1_publisher/write_json.ts +3 -8
- package/src/e2e_nested_contract/nested_contract_test.ts +2 -4
- package/src/e2e_p2p/p2p_network.ts +15 -9
- package/src/e2e_p2p/reqresp/utils.ts +207 -0
- package/src/e2e_p2p/shared.ts +20 -22
- package/src/e2e_token_contract/token_contract_test.ts +3 -5
- package/src/fixtures/e2e_prover_test.ts +4 -9
- package/src/fixtures/setup.ts +33 -26
- package/src/fixtures/setup_p2p_test.ts +15 -20
- package/src/fixtures/token_utils.ts +6 -5
- package/src/shared/cross_chain_test_harness.ts +13 -27
- package/src/shared/gas_portal_test_harness.ts +1 -1
- package/src/shared/submit-transactions.ts +9 -15
- package/src/shared/uniswap_l1_l2.ts +12 -19
- package/src/simulators/lending_simulator.ts +2 -2
- package/src/spartan/setup_test_wallets.ts +72 -24
- package/src/spartan/tx_metrics.ts +152 -7
- package/src/spartan/utils/bot.ts +185 -0
- package/src/spartan/utils/chaos.ts +253 -0
- package/src/spartan/utils/clients.ts +100 -0
- package/src/spartan/utils/config.ts +26 -0
- package/src/spartan/utils/health.ts +255 -0
- package/src/spartan/utils/helm.ts +84 -0
- package/src/spartan/utils/index.ts +64 -0
- package/src/spartan/utils/k8s.ts +527 -0
- package/src/spartan/utils/nodes.ts +538 -0
- package/src/spartan/utils/scripts.ts +63 -0
- package/src/spartan/utils.ts +1 -1246
|
@@ -0,0 +1,538 @@
|
|
|
1
|
+
import { createLogger } from '@aztec/aztec.js/log';
|
|
2
|
+
import { createAztecNodeClient } from '@aztec/aztec.js/node';
|
|
3
|
+
import type { RollupCheatCodes } from '@aztec/aztec/testing';
|
|
4
|
+
import type { CheckpointNumber } from '@aztec/foundation/branded-types';
|
|
5
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
6
|
+
import { makeBackoff, retry, retryUntil } from '@aztec/foundation/retry';
|
|
7
|
+
import { sleep } from '@aztec/foundation/sleep';
|
|
8
|
+
import {
|
|
9
|
+
type AztecNodeAdmin,
|
|
10
|
+
type AztecNodeAdminConfig,
|
|
11
|
+
createAztecNodeAdminClient,
|
|
12
|
+
} from '@aztec/stdlib/interfaces/client';
|
|
13
|
+
|
|
14
|
+
import { exec } from 'child_process';
|
|
15
|
+
import { promisify } from 'util';
|
|
16
|
+
|
|
17
|
+
import type { TestConfig } from './config.js';
|
|
18
|
+
import { execHelmCommand } from './helm.js';
|
|
19
|
+
import {
|
|
20
|
+
deleteResourceByLabel,
|
|
21
|
+
getChartDir,
|
|
22
|
+
startPortForward,
|
|
23
|
+
waitForResourceByLabel,
|
|
24
|
+
waitForResourceByName,
|
|
25
|
+
waitForStatefulSetsReady,
|
|
26
|
+
} from './k8s.js';
|
|
27
|
+
|
|
28
|
+
const execAsync = promisify(exec);
|
|
29
|
+
|
|
30
|
+
const logger = createLogger('e2e:k8s-utils');
|
|
31
|
+
|
|
32
|
+
export async function awaitCheckpointNumber(
|
|
33
|
+
rollupCheatCodes: RollupCheatCodes,
|
|
34
|
+
checkpointNumber: CheckpointNumber,
|
|
35
|
+
timeoutSeconds: number,
|
|
36
|
+
log: Logger,
|
|
37
|
+
) {
|
|
38
|
+
log.info(`Waiting for checkpoint ${checkpointNumber}`);
|
|
39
|
+
let tips = await rollupCheatCodes.getTips();
|
|
40
|
+
const endTime = Date.now() + timeoutSeconds * 1000;
|
|
41
|
+
while (tips.pending < checkpointNumber && Date.now() < endTime) {
|
|
42
|
+
log.info(`At checkpoint ${tips.pending}`);
|
|
43
|
+
await sleep(1000);
|
|
44
|
+
tips = await rollupCheatCodes.getTips();
|
|
45
|
+
}
|
|
46
|
+
if (tips.pending < checkpointNumber) {
|
|
47
|
+
throw new Error(`Timeout waiting for checkpoint ${checkpointNumber}, only reached ${tips.pending}`);
|
|
48
|
+
} else {
|
|
49
|
+
log.info(`Reached checkpoint ${tips.pending}`);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Waits until the proven block number increases.
|
|
55
|
+
*
|
|
56
|
+
* @param rpcUrl - URL of an Aztec RPC node to query
|
|
57
|
+
* @param log - Logger instance
|
|
58
|
+
* @param timeoutSeconds - Maximum time to wait
|
|
59
|
+
* @param pollIntervalSeconds - How often to check
|
|
60
|
+
*/
|
|
61
|
+
export async function waitForProvenToAdvance(
|
|
62
|
+
rpcUrl: string,
|
|
63
|
+
log: Logger,
|
|
64
|
+
timeoutSeconds: number = 300,
|
|
65
|
+
pollIntervalSeconds: number = 12, // slot duration
|
|
66
|
+
): Promise<void> {
|
|
67
|
+
const node = createAztecNodeClient(rpcUrl);
|
|
68
|
+
|
|
69
|
+
log.info('Waiting for proven block to advance (indicating epoch proof just submitted)...');
|
|
70
|
+
|
|
71
|
+
// Get current proven block number
|
|
72
|
+
let initialProvenBlock: number;
|
|
73
|
+
try {
|
|
74
|
+
const tips = await node.getL2Tips();
|
|
75
|
+
initialProvenBlock = Number(tips.proven.block.number);
|
|
76
|
+
log.info(`Current proven block: ${initialProvenBlock}. Waiting for it to increase...`);
|
|
77
|
+
} catch (err) {
|
|
78
|
+
log.warn(`Error getting initial tips: ${err}. Will poll until successful.`);
|
|
79
|
+
initialProvenBlock = 0;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
await retryUntil(
|
|
83
|
+
async () => {
|
|
84
|
+
try {
|
|
85
|
+
const tips = await node.getL2Tips();
|
|
86
|
+
const currentProvenBlock = Number(tips.proven.block.number);
|
|
87
|
+
const proposedBlock = Number(tips.proposed.number);
|
|
88
|
+
|
|
89
|
+
log.verbose(
|
|
90
|
+
`Chain state: proposed=${proposedBlock}, proven=${currentProvenBlock} (waiting for > ${initialProvenBlock})`,
|
|
91
|
+
);
|
|
92
|
+
|
|
93
|
+
if (currentProvenBlock > initialProvenBlock) {
|
|
94
|
+
log.info(`Proven block advanced from ${initialProvenBlock} to ${currentProvenBlock}.`);
|
|
95
|
+
return true;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
return false;
|
|
99
|
+
} catch (err) {
|
|
100
|
+
log.verbose(`Error checking tips: ${err}`);
|
|
101
|
+
return false;
|
|
102
|
+
}
|
|
103
|
+
},
|
|
104
|
+
'proven block to advance',
|
|
105
|
+
timeoutSeconds,
|
|
106
|
+
pollIntervalSeconds,
|
|
107
|
+
);
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
export async function getSequencers(namespace: string) {
|
|
111
|
+
const selectors = [
|
|
112
|
+
'app.kubernetes.io/name=validator',
|
|
113
|
+
'app.kubernetes.io/component=validator',
|
|
114
|
+
'app.kubernetes.io/component=sequencer-node',
|
|
115
|
+
'app=validator',
|
|
116
|
+
];
|
|
117
|
+
for (const selector of selectors) {
|
|
118
|
+
try {
|
|
119
|
+
const command = `kubectl get pods -l ${selector} -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
120
|
+
const { stdout } = await execAsync(command);
|
|
121
|
+
const sequencers = stdout
|
|
122
|
+
.split(' ')
|
|
123
|
+
.map(s => s.trim())
|
|
124
|
+
.filter(Boolean);
|
|
125
|
+
if (sequencers.length > 0) {
|
|
126
|
+
logger.verbose(`Found sequencer pods ${sequencers.join(', ')} (selector=${selector})`);
|
|
127
|
+
return sequencers;
|
|
128
|
+
}
|
|
129
|
+
} catch {
|
|
130
|
+
// try next selector
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// Fail fast instead of returning [''] which leads to attempts to port-forward `pod/`.
|
|
135
|
+
throw new Error(
|
|
136
|
+
`No sequencer/validator pods found in namespace ${namespace}. Tried selectors: ${selectors.join(', ')}`,
|
|
137
|
+
);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
|
|
141
|
+
return withSequencersAdmin(env, async client => {
|
|
142
|
+
await client.setConfig(config);
|
|
143
|
+
return client.getConfig();
|
|
144
|
+
});
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
export function getSequencersConfig(env: TestConfig) {
|
|
148
|
+
return withSequencersAdmin(env, client => client.getConfig());
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
export async function withSequencersAdmin<T>(env: TestConfig, fn: (node: AztecNodeAdmin) => Promise<T>): Promise<T[]> {
|
|
152
|
+
const adminContainerPort = 8880;
|
|
153
|
+
const namespace = env.NAMESPACE;
|
|
154
|
+
const sequencers = await getSequencers(namespace);
|
|
155
|
+
const results = [];
|
|
156
|
+
|
|
157
|
+
for (const sequencer of sequencers) {
|
|
158
|
+
// Ensure pod is Ready before attempting port-forward.
|
|
159
|
+
await waitForResourceByName({ resource: 'pods', name: sequencer, namespace });
|
|
160
|
+
// Wrap port-forward + fetch in a retry to handle flaky port-forwards
|
|
161
|
+
const result = await retry(
|
|
162
|
+
async () => {
|
|
163
|
+
const { process, port } = await startPortForward({
|
|
164
|
+
resource: `pod/${sequencer}`,
|
|
165
|
+
namespace,
|
|
166
|
+
containerPort: adminContainerPort,
|
|
167
|
+
});
|
|
168
|
+
|
|
169
|
+
try {
|
|
170
|
+
const url = `http://localhost:${port}`;
|
|
171
|
+
// Quick health check before using the connection
|
|
172
|
+
const statusRes = await fetch(`${url}/status`);
|
|
173
|
+
if (statusRes.status !== 200) {
|
|
174
|
+
throw new Error(`Admin endpoint returned status ${statusRes.status}`);
|
|
175
|
+
}
|
|
176
|
+
const client = createAztecNodeAdminClient(url);
|
|
177
|
+
return { result: await fn(client), process };
|
|
178
|
+
} catch (err) {
|
|
179
|
+
// Kill the port-forward before retrying
|
|
180
|
+
process.kill();
|
|
181
|
+
throw err;
|
|
182
|
+
}
|
|
183
|
+
},
|
|
184
|
+
'connect to node admin',
|
|
185
|
+
makeBackoff([1, 2, 4, 8]),
|
|
186
|
+
logger,
|
|
187
|
+
true,
|
|
188
|
+
);
|
|
189
|
+
|
|
190
|
+
results.push(result.result);
|
|
191
|
+
result.process.kill();
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
return results;
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
async function getAztecImageForMigrations(namespace: string): Promise<string> {
|
|
198
|
+
const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
|
|
199
|
+
if (aztecDockerImage) {
|
|
200
|
+
return aztecDockerImage;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
const { stdout } = await execAsync(
|
|
204
|
+
`kubectl get pods -l app.kubernetes.io/name=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
|
|
205
|
+
);
|
|
206
|
+
const image = stdout.trim().replace(/^'|'$/g, '');
|
|
207
|
+
if (!image) {
|
|
208
|
+
throw new Error(`Could not detect aztec image from validator pod in namespace ${namespace}`);
|
|
209
|
+
}
|
|
210
|
+
return image;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
async function getHaDbConnectionUrl(namespace: string): Promise<string> {
|
|
214
|
+
const secretName = `${namespace}-validator-ha-db-postgres`;
|
|
215
|
+
const { stdout } = await execAsync(`kubectl get secret ${secretName} -n ${namespace} -o json`);
|
|
216
|
+
const secret = JSON.parse(stdout);
|
|
217
|
+
const data = secret?.data ?? {};
|
|
218
|
+
const decode = (value?: string) => (value ? Buffer.from(value, 'base64').toString('utf8') : '');
|
|
219
|
+
const user = decode(data.POSTGRES_USER);
|
|
220
|
+
const password = decode(data.POSTGRES_PASSWORD);
|
|
221
|
+
const database = decode(data.POSTGRES_DB);
|
|
222
|
+
if (!user || !password || !database) {
|
|
223
|
+
throw new Error(`Missing HA DB credentials in secret ${secretName}`);
|
|
224
|
+
}
|
|
225
|
+
const host = `${namespace}-validator-ha-db-postgres.${namespace}.svc.cluster.local`;
|
|
226
|
+
return `postgresql://${encodeURIComponent(user)}:${encodeURIComponent(password)}@${host}:5432/${database}`;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
export async function initHADb(namespace: string): Promise<void> {
|
|
230
|
+
const databaseUrl = await getHaDbConnectionUrl(namespace);
|
|
231
|
+
const image = await getAztecImageForMigrations(namespace);
|
|
232
|
+
const jobName = `${namespace}-validator-ha-db-migrate`;
|
|
233
|
+
await execAsync(`kubectl delete pod ${jobName} -n ${namespace} --ignore-not-found=true`).catch(() => undefined);
|
|
234
|
+
|
|
235
|
+
const migrateCmd = [
|
|
236
|
+
`kubectl run ${jobName} -n ${namespace}`,
|
|
237
|
+
'--rm -i',
|
|
238
|
+
'--restart=Never',
|
|
239
|
+
`--image=${image}`,
|
|
240
|
+
`--env=DATABASE_URL=${databaseUrl}`,
|
|
241
|
+
'--command -- node --no-warnings /usr/src/yarn-project/aztec/dest/bin/index.js migrate-ha-db up',
|
|
242
|
+
].join(' ');
|
|
243
|
+
const migrateCmdForLog = migrateCmd.replace(/--env=DATABASE_URL=\S+/, '--env=DATABASE_URL=<redacted>');
|
|
244
|
+
|
|
245
|
+
await retry(
|
|
246
|
+
async () => {
|
|
247
|
+
logger.info(`command: ${migrateCmdForLog}`);
|
|
248
|
+
await execAsync(migrateCmd);
|
|
249
|
+
},
|
|
250
|
+
'run HA DB migrations',
|
|
251
|
+
makeBackoff([1, 2, 4, 8, 16]),
|
|
252
|
+
logger,
|
|
253
|
+
true,
|
|
254
|
+
);
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
/**
|
|
258
|
+
* Enables or disables probabilistic transaction dropping on validators and waits for rollout.
|
|
259
|
+
* Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
|
|
260
|
+
*/
|
|
261
|
+
export async function setValidatorTxDrop({
|
|
262
|
+
namespace,
|
|
263
|
+
enabled,
|
|
264
|
+
probability,
|
|
265
|
+
logger: log,
|
|
266
|
+
}: {
|
|
267
|
+
namespace: string;
|
|
268
|
+
enabled: boolean;
|
|
269
|
+
probability: number;
|
|
270
|
+
logger: Logger;
|
|
271
|
+
}) {
|
|
272
|
+
const drop = enabled ? 'true' : 'false';
|
|
273
|
+
const prob = String(probability);
|
|
274
|
+
|
|
275
|
+
const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
|
|
276
|
+
let updated = false;
|
|
277
|
+
for (const selector of selectors) {
|
|
278
|
+
try {
|
|
279
|
+
const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
280
|
+
const names = list.stdout
|
|
281
|
+
.split('\n')
|
|
282
|
+
.map(s => s.trim())
|
|
283
|
+
.filter(Boolean);
|
|
284
|
+
if (names.length === 0) {
|
|
285
|
+
continue;
|
|
286
|
+
}
|
|
287
|
+
const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
|
|
288
|
+
log.info(`command: ${cmd}`);
|
|
289
|
+
await execAsync(cmd);
|
|
290
|
+
updated = true;
|
|
291
|
+
} catch (e) {
|
|
292
|
+
log.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
if (!updated) {
|
|
297
|
+
log.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
|
|
298
|
+
return;
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
// Restart validator pods to ensure env vars take effect and wait for readiness
|
|
302
|
+
await restartValidators(namespace, log);
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
export async function restartValidators(namespace: string, log: Logger) {
|
|
306
|
+
const selectors = ['app.kubernetes.io/name=validator', 'app.kubernetes.io/component=validator', 'app=validator'];
|
|
307
|
+
let any = false;
|
|
308
|
+
for (const selector of selectors) {
|
|
309
|
+
try {
|
|
310
|
+
const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
311
|
+
if (!stdout || stdout.trim().length === 0) {
|
|
312
|
+
continue;
|
|
313
|
+
}
|
|
314
|
+
any = true;
|
|
315
|
+
await deleteResourceByLabel({ resource: 'pods', namespace, label: selector });
|
|
316
|
+
} catch (e) {
|
|
317
|
+
log.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
if (!any) {
|
|
322
|
+
log.warn(`No validator pods found to restart in ${namespace}.`);
|
|
323
|
+
return;
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
// Wait for either label to be Ready
|
|
327
|
+
for (const selector of selectors) {
|
|
328
|
+
try {
|
|
329
|
+
await waitForResourceByLabel({ resource: 'pods', namespace, label: selector });
|
|
330
|
+
return;
|
|
331
|
+
} catch {
|
|
332
|
+
// try next
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
log.warn(`Validator pods did not report Ready; continuing.`);
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
export async function enableValidatorDynamicBootNode(
|
|
339
|
+
instanceName: string,
|
|
340
|
+
namespace: string,
|
|
341
|
+
spartanDir: string,
|
|
342
|
+
log: Logger,
|
|
343
|
+
) {
|
|
344
|
+
log.info(`Enabling validator dynamic boot node`);
|
|
345
|
+
await execHelmCommand({
|
|
346
|
+
instanceName,
|
|
347
|
+
namespace,
|
|
348
|
+
helmChartDir: getChartDir(spartanDir, 'aztec-network'),
|
|
349
|
+
values: {
|
|
350
|
+
'validator.dynamicBootNode': 'true',
|
|
351
|
+
},
|
|
352
|
+
valuesFile: undefined,
|
|
353
|
+
timeout: '15m',
|
|
354
|
+
reuseValues: true,
|
|
355
|
+
});
|
|
356
|
+
|
|
357
|
+
log.info(`Validator dynamic boot node enabled`);
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
/**
|
|
361
|
+
* Rolls the Aztec pods in the given namespace.
|
|
362
|
+
* @param namespace - The namespace to roll the Aztec pods in.
|
|
363
|
+
* @param clearState - If true, also deletes the underlying PVCs to clear persistent storage.
|
|
364
|
+
* This is required for rollup upgrades where the old state is incompatible with the new rollup.
|
|
365
|
+
* Defaults to false, which preserves the existing storage.
|
|
366
|
+
*/
|
|
367
|
+
export async function rollAztecPods(namespace: string, clearState: boolean = false) {
|
|
368
|
+
// Pod components use 'validator', but StatefulSets and PVCs use 'sequencer-node' for validators
|
|
369
|
+
const podComponents = [
|
|
370
|
+
'p2p-bootstrap',
|
|
371
|
+
'prover-node',
|
|
372
|
+
'prover-broker',
|
|
373
|
+
'prover-agent',
|
|
374
|
+
'sequencer-node',
|
|
375
|
+
'rpc',
|
|
376
|
+
'validator-ha-db',
|
|
377
|
+
];
|
|
378
|
+
const pvcComponents = ['p2p-bootstrap', 'prover-node', 'prover-broker', 'sequencer-node', 'rpc', 'validator-ha-db'];
|
|
379
|
+
// StatefulSet components that need to be scaled down before PVC deletion
|
|
380
|
+
// Note: validators use 'sequencer-node' as component label, not 'validator'
|
|
381
|
+
const statefulSetComponents = [
|
|
382
|
+
'p2p-bootstrap',
|
|
383
|
+
'prover-node',
|
|
384
|
+
'prover-broker',
|
|
385
|
+
'sequencer-node',
|
|
386
|
+
'rpc',
|
|
387
|
+
'validator-ha-db',
|
|
388
|
+
];
|
|
389
|
+
|
|
390
|
+
if (clearState) {
|
|
391
|
+
// To delete PVCs, we must first scale down StatefulSets so pods release the volumes
|
|
392
|
+
// Otherwise PVC deletion will hang waiting for pods to terminate
|
|
393
|
+
|
|
394
|
+
// Save original replica counts for all StatefulSets
|
|
395
|
+
const originalReplicas: Map<string, number> = new Map();
|
|
396
|
+
for (const component of statefulSetComponents) {
|
|
397
|
+
try {
|
|
398
|
+
// Get all StatefulSets that match the component label
|
|
399
|
+
const getCmd = `kubectl get statefulset -l app.kubernetes.io/component=${component} -n ${namespace} -o json`;
|
|
400
|
+
const { stdout } = await execAsync(getCmd);
|
|
401
|
+
const result = JSON.parse(stdout);
|
|
402
|
+
for (const sts of result.items || []) {
|
|
403
|
+
const name = sts.metadata.name;
|
|
404
|
+
const replicas = sts.spec.replicas ?? 1;
|
|
405
|
+
if (replicas > 0) {
|
|
406
|
+
originalReplicas.set(name, replicas);
|
|
407
|
+
logger.debug(`Saved replica count for StatefulSet ${name}: ${replicas}`);
|
|
408
|
+
}
|
|
409
|
+
}
|
|
410
|
+
} catch {
|
|
411
|
+
// Component might not exist, continue
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
// Scale down to 0
|
|
416
|
+
for (const component of statefulSetComponents) {
|
|
417
|
+
try {
|
|
418
|
+
const scaleCmd = `kubectl scale statefulset -l app.kubernetes.io/component=${component} -n ${namespace} --replicas=0 --timeout=2m`;
|
|
419
|
+
logger.info(`command: ${scaleCmd}`);
|
|
420
|
+
await execAsync(scaleCmd);
|
|
421
|
+
} catch (e) {
|
|
422
|
+
// Component might not exist or might be a Deployment, continue
|
|
423
|
+
logger.verbose(`Scale down ${component} skipped: ${e}`);
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
// Wait for all pods to fully terminate before deleting PVCs.
|
|
428
|
+
// terminationGracePeriodSeconds default is 30s.
|
|
429
|
+
logger.info('Waiting for pods to fully terminate before deleting PVCs...');
|
|
430
|
+
for (const component of statefulSetComponents) {
|
|
431
|
+
try {
|
|
432
|
+
// Wait for all pods with this component label to be deleted
|
|
433
|
+
const waitCmd = `kubectl wait pods -l app.kubernetes.io/component=${component} --for=delete -n ${namespace} --timeout=2m`;
|
|
434
|
+
logger.info(`command: ${waitCmd}`);
|
|
435
|
+
await execAsync(waitCmd);
|
|
436
|
+
} catch (e) {
|
|
437
|
+
logger.verbose(`Wait for pod deletion ${component} skipped: ${e}`);
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
// Extra buffer to ensure PVC protection finalizers are cleared
|
|
441
|
+
await sleep(5 * 1000);
|
|
442
|
+
|
|
443
|
+
// Now delete PVCs (they should no longer be in use)
|
|
444
|
+
for (const component of pvcComponents) {
|
|
445
|
+
try {
|
|
446
|
+
await deleteResourceByLabel({
|
|
447
|
+
resource: 'persistentvolumeclaims',
|
|
448
|
+
namespace: namespace,
|
|
449
|
+
label: `app.kubernetes.io/component=${component}`,
|
|
450
|
+
});
|
|
451
|
+
} catch (e) {
|
|
452
|
+
logger.warn(`Failed to delete PVCs for ${component}: ${e}`);
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
// Verify PVCs are deleted
|
|
457
|
+
for (const component of pvcComponents) {
|
|
458
|
+
try {
|
|
459
|
+
const waitCmd = `kubectl wait pvc -l app.kubernetes.io/component=${component} --for=delete -n ${namespace} --timeout=2m`;
|
|
460
|
+
logger.info(`command: ${waitCmd}`);
|
|
461
|
+
await execAsync(waitCmd);
|
|
462
|
+
} catch (e) {
|
|
463
|
+
logger.verbose(`Wait for PVC deletion ${component} skipped: ${e}`);
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
const haDbStatefulSets = [...originalReplicas.entries()].filter(([name]) => name.includes('validator-ha-db'));
|
|
468
|
+
const otherStatefulSets = [...originalReplicas.entries()].filter(([name]) => !name.includes('validator-ha-db'));
|
|
469
|
+
|
|
470
|
+
// Bring up HA DB first so we can run migrations before validators start
|
|
471
|
+
for (const [stsName, replicas] of haDbStatefulSets) {
|
|
472
|
+
try {
|
|
473
|
+
const scaleCmd = `kubectl scale statefulset ${stsName} -n ${namespace} --replicas=${replicas} --timeout=2m`;
|
|
474
|
+
logger.info(`command: ${scaleCmd}`);
|
|
475
|
+
await execAsync(scaleCmd);
|
|
476
|
+
} catch (e) {
|
|
477
|
+
logger.verbose(`Scale up ${stsName} skipped: ${e}`);
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
if (haDbStatefulSets.length > 0) {
|
|
482
|
+
try {
|
|
483
|
+
await waitForStatefulSetsReady({
|
|
484
|
+
namespace,
|
|
485
|
+
label: 'app.kubernetes.io/component=validator-ha-db',
|
|
486
|
+
timeoutSeconds: 600,
|
|
487
|
+
});
|
|
488
|
+
await initHADb(namespace);
|
|
489
|
+
} catch (e) {
|
|
490
|
+
logger.warn(`HA DB migration step skipped or failed: ${e}`);
|
|
491
|
+
}
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
// Scale remaining StatefulSets back up to original replica counts (by name, not label)
|
|
495
|
+
for (const [stsName, replicas] of otherStatefulSets) {
|
|
496
|
+
try {
|
|
497
|
+
const scaleCmd = `kubectl scale statefulset ${stsName} -n ${namespace} --replicas=${replicas} --timeout=2m`;
|
|
498
|
+
logger.info(`command: ${scaleCmd}`);
|
|
499
|
+
await execAsync(scaleCmd);
|
|
500
|
+
} catch (e) {
|
|
501
|
+
logger.verbose(`Scale up ${stsName} skipped: ${e}`);
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
} else {
|
|
505
|
+
// Just delete pods (no state clearing)
|
|
506
|
+
for (const component of podComponents) {
|
|
507
|
+
await deleteResourceByLabel({
|
|
508
|
+
resource: 'pods',
|
|
509
|
+
namespace: namespace,
|
|
510
|
+
label: `app.kubernetes.io/component=${component}`,
|
|
511
|
+
});
|
|
512
|
+
}
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
await sleep(10 * 1000);
|
|
516
|
+
|
|
517
|
+
// Wait for StatefulSets to have all replicas ready.
|
|
518
|
+
for (const component of statefulSetComponents) {
|
|
519
|
+
try {
|
|
520
|
+
await waitForStatefulSetsReady({
|
|
521
|
+
namespace,
|
|
522
|
+
label: `app.kubernetes.io/component=${component}`,
|
|
523
|
+
timeoutSeconds: 600, // 10 minutes
|
|
524
|
+
});
|
|
525
|
+
} catch (e) {
|
|
526
|
+
logger.warn(`StatefulSet component ${component} may not be fully ready: ${e}`);
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
const nonStatefulSetComponents = podComponents.filter(c => !statefulSetComponents.includes(c));
|
|
531
|
+
for (const component of nonStatefulSetComponents) {
|
|
532
|
+
await waitForResourceByLabel({
|
|
533
|
+
resource: 'pods',
|
|
534
|
+
namespace: namespace,
|
|
535
|
+
label: `app.kubernetes.io/component=${component}`,
|
|
536
|
+
});
|
|
537
|
+
}
|
|
538
|
+
}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
2
|
+
|
|
3
|
+
import { execSync, spawn } from 'child_process';
|
|
4
|
+
import path from 'path';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* @param scriptPath - The path to the script, relative to the project root
|
|
8
|
+
* @param args - The arguments to pass to the script
|
|
9
|
+
* @param logger - The logger to use
|
|
10
|
+
* @returns The exit code of the script
|
|
11
|
+
*/
|
|
12
|
+
function runScript(scriptPath: string, args: string[], logger: Logger, env?: Record<string, string>) {
|
|
13
|
+
const childProcess = spawn(scriptPath, args, {
|
|
14
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
15
|
+
env: env ? { ...process.env, ...env } : process.env,
|
|
16
|
+
});
|
|
17
|
+
return new Promise<number>((resolve, reject) => {
|
|
18
|
+
childProcess.on('close', (code: number | null) => resolve(code ?? 0));
|
|
19
|
+
childProcess.on('error', reject);
|
|
20
|
+
childProcess.stdout?.on('data', (data: Buffer) => {
|
|
21
|
+
logger.info(data.toString());
|
|
22
|
+
});
|
|
23
|
+
childProcess.stderr?.on('data', (data: Buffer) => {
|
|
24
|
+
logger.error(data.toString());
|
|
25
|
+
});
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Returns the absolute path to the git repository root
|
|
31
|
+
*/
|
|
32
|
+
export function getGitProjectRoot(): string {
|
|
33
|
+
try {
|
|
34
|
+
const rootDir = execSync('git rev-parse --show-toplevel', {
|
|
35
|
+
encoding: 'utf-8',
|
|
36
|
+
stdio: ['ignore', 'pipe', 'ignore'],
|
|
37
|
+
}).trim();
|
|
38
|
+
|
|
39
|
+
return rootDir;
|
|
40
|
+
} catch (error) {
|
|
41
|
+
throw new Error(`Failed to determine git project root: ${error}`);
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
export function getAztecBin() {
|
|
46
|
+
return path.join(getGitProjectRoot(), 'yarn-project/aztec/dest/bin/index.js');
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Runs the Aztec binary
|
|
51
|
+
* @param args - The arguments to pass to the Aztec binary
|
|
52
|
+
* @param logger - The logger to use
|
|
53
|
+
* @param env - Optional environment variables to set for the process
|
|
54
|
+
* @returns The exit code of the Aztec binary
|
|
55
|
+
*/
|
|
56
|
+
export function runAztecBin(args: string[], logger: Logger, env?: Record<string, string>) {
|
|
57
|
+
return runScript('node', [getAztecBin(), ...args], logger, env);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
export function runProjectScript(script: string, args: string[], logger: Logger, env?: Record<string, string>) {
|
|
61
|
+
const scriptPath = script.startsWith('/') ? script : path.join(getGitProjectRoot(), script);
|
|
62
|
+
return runScript(scriptPath, args, logger, env);
|
|
63
|
+
}
|