@aztec/end-to-end 3.0.0-canary.a9708bd → 3.0.0-devnet.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/bench/client_flows/benchmark.d.ts +3 -2
- package/dest/bench/client_flows/benchmark.d.ts.map +1 -1
- package/dest/bench/client_flows/client_flows_benchmark.d.ts +16 -12
- package/dest/bench/client_flows/client_flows_benchmark.d.ts.map +1 -1
- package/dest/bench/client_flows/client_flows_benchmark.js +54 -58
- package/dest/bench/utils.d.ts +2 -11
- package/dest/bench/utils.d.ts.map +1 -1
- package/dest/bench/utils.js +10 -34
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts +7 -7
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.d.ts.map +1 -1
- package/dest/e2e_blacklist_token_contract/blacklist_token_contract_test.js +42 -42
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts +10 -8
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.d.ts.map +1 -1
- package/dest/e2e_cross_chain_messaging/cross_chain_messaging_test.js +31 -33
- package/dest/e2e_deploy_contract/deploy_test.d.ts +10 -4
- package/dest/e2e_deploy_contract/deploy_test.d.ts.map +1 -1
- package/dest/e2e_deploy_contract/deploy_test.js +9 -18
- package/dest/e2e_epochs/epochs_test.d.ts +9 -3
- package/dest/e2e_epochs/epochs_test.d.ts.map +1 -1
- package/dest/e2e_epochs/epochs_test.js +19 -13
- package/dest/e2e_fees/bridging_race.notest.js +12 -9
- package/dest/e2e_fees/fees_test.d.ts +5 -5
- package/dest/e2e_fees/fees_test.d.ts.map +1 -1
- package/dest/e2e_fees/fees_test.js +26 -33
- package/dest/e2e_l1_publisher/write_json.d.ts +3 -1
- package/dest/e2e_l1_publisher/write_json.d.ts.map +1 -1
- package/dest/e2e_l1_publisher/write_json.js +5 -5
- package/dest/e2e_multi_validator/utils.d.ts +1 -1
- package/dest/e2e_multi_validator/utils.d.ts.map +1 -1
- package/dest/e2e_multi_validator/utils.js +3 -9
- package/dest/e2e_nested_contract/nested_contract_test.d.ts +6 -3
- package/dest/e2e_nested_contract/nested_contract_test.d.ts.map +1 -1
- package/dest/e2e_nested_contract/nested_contract_test.js +7 -9
- package/dest/e2e_p2p/inactivity_slash_test.d.ts +31 -0
- package/dest/e2e_p2p/inactivity_slash_test.d.ts.map +1 -0
- package/dest/e2e_p2p/inactivity_slash_test.js +135 -0
- package/dest/e2e_p2p/p2p_network.d.ts +22 -8
- package/dest/e2e_p2p/p2p_network.d.ts.map +1 -1
- package/dest/e2e_p2p/p2p_network.js +36 -15
- package/dest/e2e_p2p/shared.d.ts +12 -13
- package/dest/e2e_p2p/shared.d.ts.map +1 -1
- package/dest/e2e_p2p/shared.js +54 -54
- package/dest/e2e_token_contract/token_contract_test.d.ts +5 -4
- package/dest/e2e_token_contract/token_contract_test.d.ts.map +1 -1
- package/dest/e2e_token_contract/token_contract_test.js +14 -17
- package/dest/fixtures/e2e_prover_test.d.ts +8 -6
- package/dest/fixtures/e2e_prover_test.d.ts.map +1 -1
- package/dest/fixtures/e2e_prover_test.js +42 -51
- package/dest/fixtures/get_acvm_config.d.ts +1 -1
- package/dest/fixtures/get_acvm_config.d.ts.map +1 -1
- package/dest/fixtures/get_bb_config.d.ts +1 -1
- package/dest/fixtures/get_bb_config.d.ts.map +1 -1
- package/dest/fixtures/get_bb_config.js +2 -2
- package/dest/fixtures/setup_l1_contracts.d.ts +1 -1
- package/dest/fixtures/setup_l1_contracts.d.ts.map +1 -1
- package/dest/fixtures/setup_l1_contracts.js +2 -2
- package/dest/fixtures/setup_p2p_test.d.ts +10 -9
- package/dest/fixtures/setup_p2p_test.d.ts.map +1 -1
- package/dest/fixtures/setup_p2p_test.js +38 -20
- package/dest/fixtures/snapshot_manager.d.ts +10 -7
- package/dest/fixtures/snapshot_manager.d.ts.map +1 -1
- package/dest/fixtures/snapshot_manager.js +60 -47
- package/dest/fixtures/token_utils.d.ts +6 -4
- package/dest/fixtures/token_utils.d.ts.map +1 -1
- package/dest/fixtures/token_utils.js +11 -15
- package/dest/fixtures/utils.d.ts +26 -28
- package/dest/fixtures/utils.d.ts.map +1 -1
- package/dest/fixtures/utils.js +83 -109
- package/dest/fixtures/web3signer.d.ts +5 -0
- package/dest/fixtures/web3signer.d.ts.map +1 -0
- package/dest/fixtures/web3signer.js +53 -0
- package/dest/quality_of_service/alert_checker.d.ts +1 -1
- package/dest/quality_of_service/alert_checker.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.d.ts +16 -10
- package/dest/shared/cross_chain_test_harness.d.ts.map +1 -1
- package/dest/shared/cross_chain_test_harness.js +13 -15
- package/dest/shared/gas_portal_test_harness.d.ts +9 -6
- package/dest/shared/gas_portal_test_harness.d.ts.map +1 -1
- package/dest/shared/gas_portal_test_harness.js +10 -7
- package/dest/shared/jest_setup.js +1 -1
- package/dest/shared/submit-transactions.d.ts +5 -3
- package/dest/shared/submit-transactions.d.ts.map +1 -1
- package/dest/shared/submit-transactions.js +8 -7
- package/dest/shared/uniswap_l1_l2.d.ts +9 -6
- package/dest/shared/uniswap_l1_l2.d.ts.map +1 -1
- package/dest/shared/uniswap_l1_l2.js +29 -45
- package/dest/simulators/lending_simulator.d.ts +2 -1
- package/dest/simulators/lending_simulator.d.ts.map +1 -1
- package/dest/simulators/lending_simulator.js +3 -2
- package/dest/simulators/token_simulator.d.ts +3 -1
- package/dest/simulators/token_simulator.d.ts.map +1 -1
- package/dest/simulators/token_simulator.js +2 -2
- package/dest/spartan/setup_test_wallets.d.ts +19 -13
- package/dest/spartan/setup_test_wallets.d.ts.map +1 -1
- package/dest/spartan/setup_test_wallets.js +108 -85
- package/dest/spartan/utils.d.ts +68 -3
- package/dest/spartan/utils.d.ts.map +1 -1
- package/dest/spartan/utils.js +312 -49
- package/package.json +39 -38
- package/src/bench/client_flows/benchmark.ts +6 -6
- package/src/bench/client_flows/client_flows_benchmark.ts +62 -82
- package/src/bench/client_flows/data_extractor.ts +1 -1
- package/src/bench/utils.ts +9 -37
- package/src/e2e_blacklist_token_contract/blacklist_token_contract_test.ts +46 -63
- package/src/e2e_cross_chain_messaging/cross_chain_messaging_test.ts +33 -47
- package/src/e2e_deploy_contract/deploy_test.ts +17 -35
- package/src/e2e_epochs/epochs_test.ts +22 -19
- package/src/e2e_fees/bridging_race.notest.ts +14 -9
- package/src/e2e_fees/fees_test.ts +29 -40
- package/src/e2e_l1_publisher/write_json.ts +8 -6
- package/src/e2e_multi_validator/utils.ts +4 -10
- package/src/e2e_nested_contract/nested_contract_test.ts +11 -10
- package/src/e2e_p2p/inactivity_slash_test.ts +178 -0
- package/src/e2e_p2p/p2p_network.ts +110 -71
- package/src/e2e_p2p/shared.ts +57 -56
- package/src/e2e_token_contract/token_contract_test.ts +17 -17
- package/src/fixtures/e2e_prover_test.ts +52 -88
- package/src/fixtures/get_acvm_config.ts +1 -1
- package/src/fixtures/get_bb_config.ts +3 -2
- package/src/fixtures/setup_l1_contracts.ts +3 -3
- package/src/fixtures/setup_p2p_test.ts +60 -27
- package/src/fixtures/snapshot_manager.ts +80 -72
- package/src/fixtures/token_utils.ts +13 -21
- package/src/fixtures/utils.ts +95 -145
- package/src/fixtures/web3signer.ts +63 -0
- package/src/guides/up_quick_start.sh +2 -10
- package/src/quality_of_service/alert_checker.ts +1 -1
- package/src/shared/cross_chain_test_harness.ts +18 -29
- package/src/shared/gas_portal_test_harness.ts +12 -19
- package/src/shared/jest_setup.ts +1 -1
- package/src/shared/submit-transactions.ts +12 -8
- package/src/shared/uniswap_l1_l2.ts +61 -67
- package/src/simulators/lending_simulator.ts +3 -2
- package/src/simulators/token_simulator.ts +5 -2
- package/src/spartan/DEVELOP.md +8 -3
- package/src/spartan/setup_test_wallets.ts +133 -126
- package/src/spartan/utils.ts +373 -48
package/src/spartan/utils.ts
CHANGED
|
@@ -1,13 +1,21 @@
|
|
|
1
|
-
import { createLogger
|
|
1
|
+
import { createLogger } from '@aztec/aztec.js/log';
|
|
2
2
|
import type { RollupCheatCodes } from '@aztec/aztec/testing';
|
|
3
|
+
import type { L1ContractAddresses, ViemPublicClient } from '@aztec/ethereum';
|
|
3
4
|
import type { Logger } from '@aztec/foundation/log';
|
|
4
5
|
import { makeBackoff, retry } from '@aztec/foundation/retry';
|
|
5
6
|
import { schemas } from '@aztec/foundation/schemas';
|
|
6
|
-
import {
|
|
7
|
+
import { sleep } from '@aztec/foundation/sleep';
|
|
8
|
+
import {
|
|
9
|
+
type AztecNodeAdmin,
|
|
10
|
+
type AztecNodeAdminConfig,
|
|
11
|
+
createAztecNodeAdminClient,
|
|
12
|
+
createAztecNodeClient,
|
|
13
|
+
} from '@aztec/stdlib/interfaces/client';
|
|
7
14
|
|
|
8
15
|
import { ChildProcess, exec, execSync, spawn } from 'child_process';
|
|
9
16
|
import path from 'path';
|
|
10
17
|
import { promisify } from 'util';
|
|
18
|
+
import { createPublicClient, fallback, http } from 'viem';
|
|
11
19
|
import { z } from 'zod';
|
|
12
20
|
|
|
13
21
|
const execAsync = promisify(exec);
|
|
@@ -17,6 +25,11 @@ const logger = createLogger('e2e:k8s-utils');
|
|
|
17
25
|
const testConfigSchema = z.object({
|
|
18
26
|
NAMESPACE: z.string().default('scenario'),
|
|
19
27
|
REAL_VERIFIER: schemas.Boolean.optional().default(true),
|
|
28
|
+
CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false),
|
|
29
|
+
L1_RPC_URLS_JSON: z.string().optional(),
|
|
30
|
+
L1_ACCOUNT_MNEMONIC: z.string().optional(),
|
|
31
|
+
AZTEC_SLOT_DURATION: z.coerce.number().optional().default(24),
|
|
32
|
+
AZTEC_PROOF_SUBMISSION_WINDOW: z.coerce.number().optional().default(5),
|
|
20
33
|
});
|
|
21
34
|
|
|
22
35
|
export type TestConfig = z.infer<typeof testConfigSchema>;
|
|
@@ -87,7 +100,7 @@ export async function startPortForward({
|
|
|
87
100
|
}> {
|
|
88
101
|
const hostPortAsString = hostPort ? hostPort.toString() : '';
|
|
89
102
|
|
|
90
|
-
logger.
|
|
103
|
+
logger.debug(`kubectl port-forward -n ${namespace} ${resource} ${hostPortAsString}:${containerPort}`);
|
|
91
104
|
|
|
92
105
|
const process = spawn(
|
|
93
106
|
'kubectl',
|
|
@@ -105,20 +118,20 @@ export async function startPortForward({
|
|
|
105
118
|
const str = data.toString() as string;
|
|
106
119
|
if (!isResolved && str.includes('Forwarding from')) {
|
|
107
120
|
isResolved = true;
|
|
108
|
-
logger.
|
|
121
|
+
logger.debug(`Port forward for ${resource}: ${str}`);
|
|
109
122
|
const port = str.search(/:\d+/);
|
|
110
123
|
if (port === -1) {
|
|
111
124
|
throw new Error('Port not found in port forward output');
|
|
112
125
|
}
|
|
113
126
|
const portNumber = parseInt(str.slice(port + 1));
|
|
114
|
-
logger.
|
|
127
|
+
logger.verbose(`Port forwarded for ${resource} at ${portNumber}:${containerPort}`);
|
|
115
128
|
resolve(portNumber);
|
|
116
129
|
} else {
|
|
117
130
|
logger.silent(str);
|
|
118
131
|
}
|
|
119
132
|
});
|
|
120
133
|
process.stderr?.on('data', data => {
|
|
121
|
-
logger.
|
|
134
|
+
logger.verbose(`Port forward for ${resource}: ${data.toString()}`);
|
|
122
135
|
// It's a strange thing:
|
|
123
136
|
// If we don't pipe stderr, then the port forwarding does not work.
|
|
124
137
|
// Log to silent because this doesn't actually report errors,
|
|
@@ -128,16 +141,16 @@ export async function startPortForward({
|
|
|
128
141
|
process.on('close', () => {
|
|
129
142
|
if (!isResolved) {
|
|
130
143
|
isResolved = true;
|
|
131
|
-
logger.warn(
|
|
144
|
+
logger.warn(`Port forward for ${resource} closed before connection established`);
|
|
132
145
|
resolve(0);
|
|
133
146
|
}
|
|
134
147
|
});
|
|
135
148
|
process.on('error', error => {
|
|
136
|
-
logger.error(`Port forward error: ${error}`);
|
|
149
|
+
logger.error(`Port forward for ${resource} error: ${error}`);
|
|
137
150
|
resolve(0);
|
|
138
151
|
});
|
|
139
152
|
process.on('exit', code => {
|
|
140
|
-
logger.
|
|
153
|
+
logger.verbose(`Port forward for ${resource} exited with code ${code}`);
|
|
141
154
|
resolve(0);
|
|
142
155
|
});
|
|
143
156
|
});
|
|
@@ -195,6 +208,16 @@ export async function deleteResourceByLabel({
|
|
|
195
208
|
timeout?: string;
|
|
196
209
|
force?: boolean;
|
|
197
210
|
}) {
|
|
211
|
+
// Check if the resource type exists before attempting to delete
|
|
212
|
+
try {
|
|
213
|
+
await execAsync(
|
|
214
|
+
`kubectl api-resources --api-group="" --no-headers -o name | grep -q "^${resource}$" || kubectl api-resources --no-headers -o name | grep -q "^${resource}$"`,
|
|
215
|
+
);
|
|
216
|
+
} catch (error) {
|
|
217
|
+
logger.warn(`Resource type '${resource}' not found in cluster, skipping deletion ${error}`);
|
|
218
|
+
return '';
|
|
219
|
+
}
|
|
220
|
+
|
|
198
221
|
const command = `kubectl delete ${resource} -l ${label} -n ${namespace} --ignore-not-found=true --wait=true --timeout=${timeout} ${
|
|
199
222
|
force ? '--force' : ''
|
|
200
223
|
}`;
|
|
@@ -226,9 +249,18 @@ export function getChartDir(spartanDir: string, chartName: string) {
|
|
|
226
249
|
return path.join(spartanDir.trim(), chartName);
|
|
227
250
|
}
|
|
228
251
|
|
|
229
|
-
function
|
|
252
|
+
function shellQuote(value: string) {
|
|
253
|
+
// Single-quote safe shell escaping: ' -> '\''
|
|
254
|
+
return `'${value.replace(/'/g, "'\\''")}'`;
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
function valuesToArgs(values: Record<string, string | number | boolean>) {
|
|
230
258
|
return Object.entries(values)
|
|
231
|
-
.map(([key, value]) =>
|
|
259
|
+
.map(([key, value]) =>
|
|
260
|
+
typeof value === 'number' || typeof value === 'boolean'
|
|
261
|
+
? `--set ${key}=${value}`
|
|
262
|
+
: `--set-string ${key}=${shellQuote(String(value))}`,
|
|
263
|
+
)
|
|
232
264
|
.join(' ');
|
|
233
265
|
}
|
|
234
266
|
|
|
@@ -246,7 +278,7 @@ function createHelmCommand({
|
|
|
246
278
|
namespace: string;
|
|
247
279
|
valuesFile: string | undefined;
|
|
248
280
|
timeout: string;
|
|
249
|
-
values: Record<string, string | number>;
|
|
281
|
+
values: Record<string, string | number | boolean>;
|
|
250
282
|
reuseValues?: boolean;
|
|
251
283
|
}) {
|
|
252
284
|
const valuesFileArgs = valuesFile ? `--values ${helmChartDir}/values/${valuesFile}` : '';
|
|
@@ -286,7 +318,7 @@ export async function installChaosMeshChart({
|
|
|
286
318
|
valuesFile,
|
|
287
319
|
helmChartDir,
|
|
288
320
|
chaosMeshNamespace = 'chaos-mesh',
|
|
289
|
-
timeout = '
|
|
321
|
+
timeout = '10m',
|
|
290
322
|
clean = true,
|
|
291
323
|
values = {},
|
|
292
324
|
logger,
|
|
@@ -305,18 +337,23 @@ export async function installChaosMeshChart({
|
|
|
305
337
|
// uninstall the helm chart if it exists
|
|
306
338
|
logger.info(`Uninstalling helm chart ${instanceName}`);
|
|
307
339
|
await execAsync(`helm uninstall ${instanceName} --namespace ${chaosMeshNamespace} --wait --ignore-not-found`);
|
|
308
|
-
// and delete the
|
|
309
|
-
const
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
340
|
+
// and delete the chaos-mesh resources created by this release
|
|
341
|
+
const deleteByLabel = async (resource: string) => {
|
|
342
|
+
const args = {
|
|
343
|
+
resource,
|
|
344
|
+
namespace: chaosMeshNamespace,
|
|
345
|
+
label: `app.kubernetes.io/instance=${instanceName}`,
|
|
346
|
+
} as const;
|
|
347
|
+
logger.info(`Deleting ${resource} resources for release ${instanceName}`);
|
|
348
|
+
await deleteResourceByLabel(args).catch(e => {
|
|
349
|
+
logger.error(`Error deleting ${resource}: ${e}`);
|
|
350
|
+
logger.info(`Force deleting ${resource}`);
|
|
351
|
+
return deleteResourceByLabel({ ...args, force: true });
|
|
352
|
+
});
|
|
313
353
|
};
|
|
314
|
-
|
|
315
|
-
await
|
|
316
|
-
|
|
317
|
-
logger.info(`Force deleting podchaos resource`);
|
|
318
|
-
return deleteResourceByLabel({ ...deleteArgs, force: true });
|
|
319
|
-
});
|
|
354
|
+
|
|
355
|
+
await deleteByLabel('podchaos');
|
|
356
|
+
await deleteByLabel('networkchaos');
|
|
320
357
|
}
|
|
321
358
|
|
|
322
359
|
return execHelmCommand({
|
|
@@ -477,12 +514,234 @@ export async function awaitL2BlockNumber(
|
|
|
477
514
|
|
|
478
515
|
export async function restartBot(namespace: string, logger: Logger) {
|
|
479
516
|
logger.info(`Restarting bot`);
|
|
480
|
-
await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app=bot' });
|
|
517
|
+
await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' });
|
|
481
518
|
await sleep(10 * 1000);
|
|
482
|
-
|
|
519
|
+
// Some bot images may take time to report Ready due to heavy boot-time proving.
|
|
520
|
+
// Waiting for PodReadyToStartContainers ensures the pod is scheduled and starting without blocking on full readiness.
|
|
521
|
+
await waitForResourceByLabel({
|
|
522
|
+
resource: 'pods',
|
|
523
|
+
namespace,
|
|
524
|
+
label: 'app.kubernetes.io/name=bot',
|
|
525
|
+
condition: 'PodReadyToStartContainers',
|
|
526
|
+
});
|
|
483
527
|
logger.info(`Bot restarted`);
|
|
484
528
|
}
|
|
485
529
|
|
|
530
|
+
/**
|
|
531
|
+
* Installs or upgrades the transfer bot Helm release for the given namespace.
|
|
532
|
+
* Intended for test setup to enable L2 traffic generation only when needed.
|
|
533
|
+
*/
|
|
534
|
+
export async function installTransferBot({
|
|
535
|
+
namespace,
|
|
536
|
+
spartanDir,
|
|
537
|
+
logger,
|
|
538
|
+
replicas = 1,
|
|
539
|
+
txIntervalSeconds = 10,
|
|
540
|
+
followChain = 'PENDING',
|
|
541
|
+
mnemonic = process.env.LABS_INFRA_MNEMONIC ?? 'test test test test test test test test test test test junk',
|
|
542
|
+
mnemonicStartIndex,
|
|
543
|
+
botPrivateKey = process.env.BOT_TRANSFERS_L2_PRIVATE_KEY ?? '0xcafe01',
|
|
544
|
+
nodeUrl,
|
|
545
|
+
timeout = '15m',
|
|
546
|
+
reuseValues = true,
|
|
547
|
+
aztecSlotDuration = Number(process.env.AZTEC_SLOT_DURATION ?? 12),
|
|
548
|
+
}: {
|
|
549
|
+
namespace: string;
|
|
550
|
+
spartanDir: string;
|
|
551
|
+
logger: Logger;
|
|
552
|
+
replicas?: number;
|
|
553
|
+
txIntervalSeconds?: number;
|
|
554
|
+
followChain?: string;
|
|
555
|
+
mnemonic?: string;
|
|
556
|
+
mnemonicStartIndex?: number | string;
|
|
557
|
+
botPrivateKey?: string;
|
|
558
|
+
nodeUrl?: string;
|
|
559
|
+
timeout?: string;
|
|
560
|
+
reuseValues?: boolean;
|
|
561
|
+
aztecSlotDuration?: number;
|
|
562
|
+
}) {
|
|
563
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
564
|
+
const helmChartDir = getChartDir(spartanDir, 'aztec-bot');
|
|
565
|
+
const resolvedNodeUrl = nodeUrl ?? `http://${namespace}-rpc-aztec-node.${namespace}.svc.cluster.local:8080`;
|
|
566
|
+
|
|
567
|
+
logger.info(`Installing/upgrading transfer bot: replicas=${replicas}, followChain=${followChain}`);
|
|
568
|
+
|
|
569
|
+
const values: Record<string, string | number | boolean> = {
|
|
570
|
+
'bot.replicaCount': replicas,
|
|
571
|
+
'bot.txIntervalSeconds': txIntervalSeconds,
|
|
572
|
+
'bot.followChain': followChain,
|
|
573
|
+
'bot.botPrivateKey': botPrivateKey,
|
|
574
|
+
'bot.nodeUrl': resolvedNodeUrl,
|
|
575
|
+
'bot.mnemonic': mnemonic,
|
|
576
|
+
'bot.feePaymentMethod': 'fee_juice',
|
|
577
|
+
'aztec.slotDuration': aztecSlotDuration,
|
|
578
|
+
// Ensure bot can reach its own PXE started in-process (default rpc.port is 8080)
|
|
579
|
+
// Note: since aztec-bot depends on aztec-node with alias `bot`, env vars go under `bot.node.env`.
|
|
580
|
+
'bot.node.env.BOT_PXE_URL': 'http://127.0.0.1:8080',
|
|
581
|
+
// Provide L1 execution RPC for bridging fee juice
|
|
582
|
+
'bot.node.env.ETHEREUM_HOSTS': `http://${namespace}-eth-execution.${namespace}.svc.cluster.local:8545`,
|
|
583
|
+
// Provide L1 mnemonic for bridging (falls back to labs mnemonic)
|
|
584
|
+
'bot.node.env.BOT_L1_MNEMONIC': mnemonic,
|
|
585
|
+
};
|
|
586
|
+
// Ensure we derive a funded L1 key (index 0 is funded on anvil default mnemonic)
|
|
587
|
+
if (mnemonicStartIndex === undefined) {
|
|
588
|
+
values['bot.mnemonicStartIndex'] = 0;
|
|
589
|
+
}
|
|
590
|
+
// Also pass a funded private key directly if available
|
|
591
|
+
if (process.env.FUNDING_PRIVATE_KEY) {
|
|
592
|
+
values['bot.node.env.BOT_L1_PRIVATE_KEY'] = process.env.FUNDING_PRIVATE_KEY;
|
|
593
|
+
}
|
|
594
|
+
// Align bot image with the running network image: prefer env var, else detect from a validator pod
|
|
595
|
+
let repositoryFromEnv: string | undefined;
|
|
596
|
+
let tagFromEnv: string | undefined;
|
|
597
|
+
const aztecDockerImage = process.env.AZTEC_DOCKER_IMAGE;
|
|
598
|
+
if (aztecDockerImage && aztecDockerImage.includes(':')) {
|
|
599
|
+
const lastColon = aztecDockerImage.lastIndexOf(':');
|
|
600
|
+
repositoryFromEnv = aztecDockerImage.slice(0, lastColon);
|
|
601
|
+
tagFromEnv = aztecDockerImage.slice(lastColon + 1);
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
let repository = repositoryFromEnv;
|
|
605
|
+
let tag = tagFromEnv;
|
|
606
|
+
if (!repository || !tag) {
|
|
607
|
+
try {
|
|
608
|
+
const { stdout } = await execAsync(
|
|
609
|
+
`kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[0].spec.containers[?(@.name=="aztec")].image}' | cat`,
|
|
610
|
+
);
|
|
611
|
+
const image = stdout.trim().replace(/^'|'$/g, '');
|
|
612
|
+
if (image && image.includes(':')) {
|
|
613
|
+
const lastColon = image.lastIndexOf(':');
|
|
614
|
+
repository = image.slice(0, lastColon);
|
|
615
|
+
tag = image.slice(lastColon + 1);
|
|
616
|
+
}
|
|
617
|
+
} catch (err) {
|
|
618
|
+
logger.warn(`Could not detect aztec image from validator pod: ${String(err)}`);
|
|
619
|
+
}
|
|
620
|
+
}
|
|
621
|
+
if (repository && tag) {
|
|
622
|
+
values['global.aztecImage.repository'] = repository;
|
|
623
|
+
values['global.aztecImage.tag'] = tag;
|
|
624
|
+
}
|
|
625
|
+
if (mnemonicStartIndex !== undefined) {
|
|
626
|
+
values['bot.mnemonicStartIndex'] =
|
|
627
|
+
typeof mnemonicStartIndex === 'string' ? mnemonicStartIndex : Number(mnemonicStartIndex);
|
|
628
|
+
}
|
|
629
|
+
|
|
630
|
+
await execHelmCommand({
|
|
631
|
+
instanceName,
|
|
632
|
+
helmChartDir,
|
|
633
|
+
namespace,
|
|
634
|
+
valuesFile: undefined,
|
|
635
|
+
timeout,
|
|
636
|
+
values: values as unknown as Record<string, string | number | boolean>,
|
|
637
|
+
reuseValues,
|
|
638
|
+
});
|
|
639
|
+
|
|
640
|
+
if (replicas > 0) {
|
|
641
|
+
await waitForResourceByLabel({
|
|
642
|
+
resource: 'pods',
|
|
643
|
+
namespace,
|
|
644
|
+
label: 'app.kubernetes.io/name=bot',
|
|
645
|
+
condition: 'PodReadyToStartContainers',
|
|
646
|
+
});
|
|
647
|
+
}
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
/**
|
|
651
|
+
* Uninstalls the transfer bot Helm release from the given namespace.
|
|
652
|
+
* Intended for test teardown to clean up bot resources.
|
|
653
|
+
*/
|
|
654
|
+
export async function uninstallTransferBot(namespace: string, logger: Logger) {
|
|
655
|
+
const instanceName = `${namespace}-bot-transfers`;
|
|
656
|
+
logger.info(`Uninstalling transfer bot release ${instanceName}`);
|
|
657
|
+
await execAsync(`helm uninstall ${instanceName} --namespace ${namespace} --wait --ignore-not-found`);
|
|
658
|
+
// Ensure any leftover pods are removed
|
|
659
|
+
await deleteResourceByLabel({ resource: 'pods', namespace, label: 'app.kubernetes.io/name=bot' }).catch(
|
|
660
|
+
() => undefined,
|
|
661
|
+
);
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
/**
|
|
665
|
+
* Enables or disables probabilistic transaction dropping on validators and waits for rollout.
|
|
666
|
+
* Wired to env vars P2P_DROP_TX and P2P_DROP_TX_CHANCE via Helm values.
|
|
667
|
+
*/
|
|
668
|
+
export async function setValidatorTxDrop({
|
|
669
|
+
namespace,
|
|
670
|
+
enabled,
|
|
671
|
+
probability,
|
|
672
|
+
logger,
|
|
673
|
+
}: {
|
|
674
|
+
namespace: string;
|
|
675
|
+
enabled: boolean;
|
|
676
|
+
probability: number;
|
|
677
|
+
logger: Logger;
|
|
678
|
+
}) {
|
|
679
|
+
const drop = enabled ? 'true' : 'false';
|
|
680
|
+
const prob = String(probability);
|
|
681
|
+
|
|
682
|
+
const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
|
|
683
|
+
let updated = false;
|
|
684
|
+
for (const selector of selectors) {
|
|
685
|
+
try {
|
|
686
|
+
const list = await execAsync(`kubectl get statefulset -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
687
|
+
const names = list.stdout
|
|
688
|
+
.split('\n')
|
|
689
|
+
.map(s => s.trim())
|
|
690
|
+
.filter(Boolean);
|
|
691
|
+
if (names.length === 0) {
|
|
692
|
+
continue;
|
|
693
|
+
}
|
|
694
|
+
const cmd = `kubectl set env statefulset -l ${selector} -n ${namespace} P2P_DROP_TX=${drop} P2P_DROP_TX_CHANCE=${prob}`;
|
|
695
|
+
logger.info(`command: ${cmd}`);
|
|
696
|
+
await execAsync(cmd);
|
|
697
|
+
updated = true;
|
|
698
|
+
} catch (e) {
|
|
699
|
+
logger.warn(`Failed to update validators with selector ${selector}: ${String(e)}`);
|
|
700
|
+
}
|
|
701
|
+
}
|
|
702
|
+
|
|
703
|
+
if (!updated) {
|
|
704
|
+
logger.warn(`No validator StatefulSets found in ${namespace}. Skipping tx drop toggle.`);
|
|
705
|
+
return;
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
// Restart validator pods to ensure env vars take effect and wait for readiness
|
|
709
|
+
await restartValidators(namespace, logger);
|
|
710
|
+
}
|
|
711
|
+
|
|
712
|
+
export async function restartValidators(namespace: string, logger: Logger) {
|
|
713
|
+
const selectors = ['app=validator', 'app.kubernetes.io/component=validator'];
|
|
714
|
+
let any = false;
|
|
715
|
+
for (const selector of selectors) {
|
|
716
|
+
try {
|
|
717
|
+
const { stdout } = await execAsync(`kubectl get pods -l ${selector} -n ${namespace} --no-headers -o name | cat`);
|
|
718
|
+
if (!stdout || stdout.trim().length === 0) {
|
|
719
|
+
continue;
|
|
720
|
+
}
|
|
721
|
+
any = true;
|
|
722
|
+
await deleteResourceByLabel({ resource: 'pods', namespace, label: selector });
|
|
723
|
+
} catch (e) {
|
|
724
|
+
logger.warn(`Error restarting validator pods with selector ${selector}: ${String(e)}`);
|
|
725
|
+
}
|
|
726
|
+
}
|
|
727
|
+
|
|
728
|
+
if (!any) {
|
|
729
|
+
logger.warn(`No validator pods found to restart in ${namespace}.`);
|
|
730
|
+
return;
|
|
731
|
+
}
|
|
732
|
+
|
|
733
|
+
// Wait for either label to be Ready
|
|
734
|
+
for (const selector of selectors) {
|
|
735
|
+
try {
|
|
736
|
+
await waitForResourceByLabel({ resource: 'pods', namespace, label: selector });
|
|
737
|
+
return;
|
|
738
|
+
} catch {
|
|
739
|
+
// try next
|
|
740
|
+
}
|
|
741
|
+
}
|
|
742
|
+
logger.warn(`Validator pods did not report Ready; continuing.`);
|
|
743
|
+
}
|
|
744
|
+
|
|
486
745
|
export async function enableValidatorDynamicBootNode(
|
|
487
746
|
instanceName: string,
|
|
488
747
|
namespace: string,
|
|
@@ -505,44 +764,110 @@ export async function enableValidatorDynamicBootNode(
|
|
|
505
764
|
logger.info(`Validator dynamic boot node enabled`);
|
|
506
765
|
}
|
|
507
766
|
|
|
508
|
-
export async function updateSequencerConfig(url: string, config: Partial<AztecNodeAdminConfig>) {
|
|
509
|
-
const node = createAztecNodeAdminClient(url);
|
|
510
|
-
// Retry incase the port forward is not ready yet
|
|
511
|
-
await retry(() => node.setConfig(config), 'Update sequencer config', makeBackoff([1, 3, 6]), logger);
|
|
512
|
-
}
|
|
513
|
-
|
|
514
767
|
export async function getSequencers(namespace: string) {
|
|
515
|
-
const command = `kubectl get pods -l app=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
768
|
+
const command = `kubectl get pods -l app.kubernetes.io/component=validator -n ${namespace} -o jsonpath='{.items[*].metadata.name}'`;
|
|
516
769
|
const { stdout } = await execAsync(command);
|
|
517
|
-
|
|
770
|
+
const sequencers = stdout.split(' ');
|
|
771
|
+
logger.verbose(`Found sequencer pods ${sequencers.join(', ')}`);
|
|
772
|
+
return sequencers;
|
|
518
773
|
}
|
|
519
774
|
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
})
|
|
525
|
-
|
|
775
|
+
export function updateSequencersConfig(env: TestConfig, config: Partial<AztecNodeAdminConfig>) {
|
|
776
|
+
return withSequencersAdmin(env, async client => {
|
|
777
|
+
await client.setConfig(config);
|
|
778
|
+
return client.getConfig();
|
|
779
|
+
});
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
export function getSequencersConfig(env: TestConfig) {
|
|
783
|
+
return withSequencersAdmin(env, client => client.getConfig());
|
|
784
|
+
}
|
|
785
|
+
|
|
786
|
+
export async function withSequencersAdmin<T>(env: TestConfig, fn: (node: AztecNodeAdmin) => Promise<T>): Promise<T[]> {
|
|
787
|
+
const adminContainerPort = 8880;
|
|
788
|
+
const namespace = env.NAMESPACE;
|
|
526
789
|
const sequencers = await getSequencers(namespace);
|
|
790
|
+
const results = [];
|
|
791
|
+
|
|
527
792
|
for (const sequencer of sequencers) {
|
|
528
793
|
const { process, port } = await startPortForward({
|
|
529
794
|
resource: `pod/${sequencer}`,
|
|
530
795
|
namespace,
|
|
531
|
-
containerPort,
|
|
796
|
+
containerPort: adminContainerPort,
|
|
532
797
|
});
|
|
533
798
|
|
|
534
799
|
const url = `http://localhost:${port}`;
|
|
535
|
-
await
|
|
800
|
+
await retry(
|
|
801
|
+
() => fetch(`${url}/status`).then(res => res.status === 200),
|
|
802
|
+
'forward node admin port',
|
|
803
|
+
makeBackoff([1, 1, 2, 6]),
|
|
804
|
+
logger,
|
|
805
|
+
true,
|
|
806
|
+
);
|
|
807
|
+
const client = createAztecNodeAdminClient(url);
|
|
808
|
+
results.push(await fn(client));
|
|
536
809
|
process.kill();
|
|
537
810
|
}
|
|
811
|
+
|
|
812
|
+
return results;
|
|
538
813
|
}
|
|
539
814
|
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
815
|
+
/**
|
|
816
|
+
* Returns a public viem client to the eth execution node. If it was part of a local eth devnet,
|
|
817
|
+
* it first port-forwards the service and points to it. Otherwise, just uses the external RPC url.
|
|
818
|
+
*/
|
|
819
|
+
export async function getPublicViemClient(
|
|
820
|
+
env: TestConfig,
|
|
821
|
+
/** If set, will push the new process into it */
|
|
822
|
+
processes?: ChildProcess[],
|
|
823
|
+
): Promise<{ url: string; client: ViemPublicClient; process?: ChildProcess }> {
|
|
824
|
+
const { NAMESPACE, CREATE_ETH_DEVNET, L1_RPC_URLS_JSON } = env;
|
|
825
|
+
if (CREATE_ETH_DEVNET) {
|
|
826
|
+
logger.info(`Creating port forward to eth execution node`);
|
|
827
|
+
const { process, port } = await startPortForward({
|
|
828
|
+
resource: `svc/${NAMESPACE}-eth-execution`,
|
|
829
|
+
namespace: NAMESPACE,
|
|
830
|
+
containerPort: 8545,
|
|
831
|
+
});
|
|
832
|
+
const url = `http://127.0.0.1:${port}`;
|
|
833
|
+
const client: ViemPublicClient = createPublicClient({ transport: fallback([http(url)]) });
|
|
834
|
+
if (processes) {
|
|
835
|
+
processes.push(process);
|
|
836
|
+
}
|
|
837
|
+
return { url, client, process };
|
|
838
|
+
} else {
|
|
839
|
+
logger.info(`Connecting to the eth execution node at ${L1_RPC_URLS_JSON}`);
|
|
840
|
+
if (!L1_RPC_URLS_JSON) {
|
|
841
|
+
throw new Error(`L1_RPC_URLS_JSON is not defined`);
|
|
842
|
+
}
|
|
843
|
+
const client: ViemPublicClient = createPublicClient({ transport: fallback([http(L1_RPC_URLS_JSON)]) });
|
|
844
|
+
return { url: L1_RPC_URLS_JSON, client };
|
|
845
|
+
}
|
|
846
|
+
}
|
|
847
|
+
|
|
848
|
+
/** Queries an Aztec node for the L1 deployment addresses */
|
|
849
|
+
export async function getL1DeploymentAddresses(env: TestConfig): Promise<L1ContractAddresses> {
|
|
850
|
+
let forwardProcess: ChildProcess | undefined;
|
|
851
|
+
try {
|
|
852
|
+
const [sequencer] = await getSequencers(env.NAMESPACE);
|
|
853
|
+
const { process, port } = await startPortForward({
|
|
854
|
+
resource: `pod/${sequencer}`,
|
|
855
|
+
namespace: env.NAMESPACE,
|
|
856
|
+
containerPort: 8080,
|
|
857
|
+
});
|
|
858
|
+
|
|
859
|
+
forwardProcess = process;
|
|
860
|
+
const url = `http://127.0.0.1:${port}`;
|
|
861
|
+
const node = createAztecNodeClient(url);
|
|
862
|
+
return await retry(
|
|
863
|
+
() => node.getNodeInfo().then(i => i.l1ContractAddresses),
|
|
864
|
+
'get node info',
|
|
865
|
+
makeBackoff([1, 3, 6]),
|
|
866
|
+
logger,
|
|
867
|
+
);
|
|
868
|
+
} finally {
|
|
869
|
+
forwardProcess?.kill();
|
|
870
|
+
}
|
|
546
871
|
}
|
|
547
872
|
|
|
548
873
|
/**
|