@aztec/archiver 0.65.2 → 0.66.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/archiver/archiver.d.ts +18 -22
- package/dest/archiver/archiver.d.ts.map +1 -1
- package/dest/archiver/archiver.js +143 -99
- package/dest/archiver/archiver_store.d.ts +7 -8
- package/dest/archiver/archiver_store.d.ts.map +1 -1
- package/dest/archiver/archiver_store_test_suite.d.ts.map +1 -1
- package/dest/archiver/archiver_store_test_suite.js +126 -150
- package/dest/archiver/config.d.ts +6 -12
- package/dest/archiver/config.d.ts.map +1 -1
- package/dest/archiver/config.js +6 -1
- package/dest/archiver/data_retrieval.d.ts +2 -3
- package/dest/archiver/data_retrieval.d.ts.map +1 -1
- package/dest/archiver/data_retrieval.js +14 -15
- package/dest/archiver/instrumentation.d.ts +2 -7
- package/dest/archiver/instrumentation.d.ts.map +1 -1
- package/dest/archiver/instrumentation.js +3 -6
- package/dest/archiver/kv_archiver_store/kv_archiver_store.d.ts +7 -8
- package/dest/archiver/kv_archiver_store/kv_archiver_store.d.ts.map +1 -1
- package/dest/archiver/kv_archiver_store/kv_archiver_store.js +7 -8
- package/dest/archiver/kv_archiver_store/log_store.d.ts +7 -8
- package/dest/archiver/kv_archiver_store/log_store.d.ts.map +1 -1
- package/dest/archiver/kv_archiver_store/log_store.js +55 -95
- package/dest/archiver/memory_archiver_store/memory_archiver_store.d.ts +8 -10
- package/dest/archiver/memory_archiver_store/memory_archiver_store.d.ts.map +1 -1
- package/dest/archiver/memory_archiver_store/memory_archiver_store.js +50 -57
- package/dest/index.d.ts +2 -2
- package/dest/index.d.ts.map +1 -1
- package/dest/index.js +3 -42
- package/dest/test/mock_l2_block_source.d.ts.map +1 -1
- package/dest/test/mock_l2_block_source.js +2 -2
- package/package.json +11 -13
- package/src/archiver/archiver.ts +199 -191
- package/src/archiver/archiver_store.ts +6 -13
- package/src/archiver/archiver_store_test_suite.ts +160 -186
- package/src/archiver/config.ts +12 -12
- package/src/archiver/data_retrieval.ts +12 -17
- package/src/archiver/instrumentation.ts +3 -5
- package/src/archiver/kv_archiver_store/kv_archiver_store.ts +7 -14
- package/src/archiver/kv_archiver_store/log_store.ts +68 -118
- package/src/archiver/memory_archiver_store/memory_archiver_store.ts +51 -65
- package/src/index.ts +5 -59
- package/src/test/mock_l2_block_source.ts +1 -2
- package/dest/archiver/epoch_helpers.d.ts +0 -20
- package/dest/archiver/epoch_helpers.d.ts.map +0 -1
- package/dest/archiver/epoch_helpers.js +0 -34
- package/src/archiver/epoch_helpers.ts +0 -54
|
@@ -1,12 +1,9 @@
|
|
|
1
1
|
import {
|
|
2
|
-
type FromLogType,
|
|
3
2
|
type GetUnencryptedLogsResponse,
|
|
4
3
|
type InBlock,
|
|
5
4
|
type InboxLeaf,
|
|
6
5
|
type L2Block,
|
|
7
|
-
type L2BlockL2Logs,
|
|
8
6
|
type LogFilter,
|
|
9
|
-
type LogType,
|
|
10
7
|
type TxEffect,
|
|
11
8
|
type TxHash,
|
|
12
9
|
type TxReceipt,
|
|
@@ -18,6 +15,7 @@ import {
|
|
|
18
15
|
type ExecutablePrivateFunctionWithMembershipProof,
|
|
19
16
|
type Fr,
|
|
20
17
|
type Header,
|
|
18
|
+
type PrivateLog,
|
|
21
19
|
type UnconstrainedFunctionWithMembershipProof,
|
|
22
20
|
} from '@aztec/circuits.js';
|
|
23
21
|
import { type ContractArtifact, type FunctionSelector } from '@aztec/foundation/abi';
|
|
@@ -142,17 +140,12 @@ export interface ArchiverDataStore {
|
|
|
142
140
|
getTotalL1ToL2MessageCount(): Promise<bigint>;
|
|
143
141
|
|
|
144
142
|
/**
|
|
145
|
-
*
|
|
146
|
-
* @param from -
|
|
147
|
-
* @param limit - The number of
|
|
148
|
-
* @
|
|
149
|
-
* @returns The requested logs.
|
|
143
|
+
* Retrieves all private logs from up to `limit` blocks, starting from the block number `from`.
|
|
144
|
+
* @param from - The block number from which to begin retrieving logs.
|
|
145
|
+
* @param limit - The maximum number of blocks to retrieve logs from.
|
|
146
|
+
* @returns An array of private logs from the specified range of blocks.
|
|
150
147
|
*/
|
|
151
|
-
|
|
152
|
-
from: number,
|
|
153
|
-
limit: number,
|
|
154
|
-
logType: TLogType,
|
|
155
|
-
): Promise<L2BlockL2Logs<FromLogType<TLogType>>[]>;
|
|
148
|
+
getPrivateLogs(from: number, limit: number): Promise<PrivateLog[]>;
|
|
156
149
|
|
|
157
150
|
/**
|
|
158
151
|
* Gets all logs that match any of the received tags (i.e. logs with their first field equal to a tag).
|
|
@@ -1,4 +1,14 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import {
|
|
2
|
+
InboxLeaf,
|
|
3
|
+
L2Block,
|
|
4
|
+
LogId,
|
|
5
|
+
TxEffect,
|
|
6
|
+
TxHash,
|
|
7
|
+
UnencryptedFunctionL2Logs,
|
|
8
|
+
UnencryptedL2Log,
|
|
9
|
+
UnencryptedTxL2Logs,
|
|
10
|
+
wrapInBlock,
|
|
11
|
+
} from '@aztec/circuit-types';
|
|
2
12
|
import '@aztec/circuit-types/jest';
|
|
3
13
|
import {
|
|
4
14
|
AztecAddress,
|
|
@@ -8,6 +18,8 @@ import {
|
|
|
8
18
|
INITIAL_L2_BLOCK_NUM,
|
|
9
19
|
L1_TO_L2_MSG_SUBTREE_HEIGHT,
|
|
10
20
|
MAX_NULLIFIERS_PER_TX,
|
|
21
|
+
PRIVATE_LOG_SIZE_IN_FIELDS,
|
|
22
|
+
PrivateLog,
|
|
11
23
|
SerializableContractInstance,
|
|
12
24
|
computePublicBytecodeCommitment,
|
|
13
25
|
} from '@aztec/circuits.js';
|
|
@@ -16,7 +28,6 @@ import {
|
|
|
16
28
|
makeExecutablePrivateFunctionWithMembershipProof,
|
|
17
29
|
makeUnconstrainedFunctionWithMembershipProof,
|
|
18
30
|
} from '@aztec/circuits.js/testing';
|
|
19
|
-
import { toBufferBE } from '@aztec/foundation/bigint-buffer';
|
|
20
31
|
import { times } from '@aztec/foundation/collection';
|
|
21
32
|
import { randomBytes, randomInt } from '@aztec/foundation/crypto';
|
|
22
33
|
|
|
@@ -155,55 +166,41 @@ export function describeArchiverDataStore(testName: string, getStore: () => Arch
|
|
|
155
166
|
});
|
|
156
167
|
|
|
157
168
|
describe('addLogs', () => {
|
|
158
|
-
it('adds
|
|
169
|
+
it('adds private & unencrypted logs', async () => {
|
|
159
170
|
const block = blocks[0].data;
|
|
160
171
|
await expect(store.addLogs([block])).resolves.toEqual(true);
|
|
161
172
|
});
|
|
162
173
|
});
|
|
163
174
|
|
|
164
175
|
describe('deleteLogs', () => {
|
|
165
|
-
it('deletes
|
|
176
|
+
it('deletes private & unencrypted logs', async () => {
|
|
166
177
|
const block = blocks[0].data;
|
|
167
178
|
await store.addBlocks([blocks[0]]);
|
|
168
179
|
await expect(store.addLogs([block])).resolves.toEqual(true);
|
|
169
180
|
|
|
170
|
-
expect((await store.
|
|
171
|
-
|
|
172
|
-
|
|
181
|
+
expect((await store.getPrivateLogs(1, 1)).length).toEqual(
|
|
182
|
+
block.body.txEffects.map(txEffect => txEffect.privateLogs).flat().length,
|
|
183
|
+
);
|
|
184
|
+
expect((await store.getUnencryptedLogs({ fromBlock: 1 })).logs.length).toEqual(
|
|
185
|
+
block.body.unencryptedLogs.getTotalLogCount(),
|
|
186
|
+
);
|
|
173
187
|
|
|
174
188
|
// This one is a pain for memory as we would never want to just delete memory in the middle.
|
|
175
189
|
await store.deleteLogs([block]);
|
|
176
190
|
|
|
177
|
-
expect((await store.
|
|
178
|
-
expect((await store.
|
|
179
|
-
expect((await store.getLogs(1, 1, LogType.UNENCRYPTED))[0]).toEqual(undefined);
|
|
191
|
+
expect((await store.getPrivateLogs(1, 1)).length).toEqual(0);
|
|
192
|
+
expect((await store.getUnencryptedLogs({ fromBlock: 1 })).logs.length).toEqual(0);
|
|
180
193
|
});
|
|
181
194
|
});
|
|
182
195
|
|
|
183
|
-
describe
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
beforeEach(async () => {
|
|
189
|
-
await store.addBlocks(blocks);
|
|
190
|
-
await store.addLogs(blocks.map(b => b.data));
|
|
191
|
-
});
|
|
196
|
+
describe('getPrivateLogs', () => {
|
|
197
|
+
it('gets added private logs', async () => {
|
|
198
|
+
const block = blocks[0].data;
|
|
199
|
+
await store.addBlocks([blocks[0]]);
|
|
200
|
+
await store.addLogs([block]);
|
|
192
201
|
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
switch (logType) {
|
|
196
|
-
case LogType.ENCRYPTED:
|
|
197
|
-
return block.data.body.encryptedLogs;
|
|
198
|
-
case LogType.NOTEENCRYPTED:
|
|
199
|
-
return block.data.body.noteEncryptedLogs;
|
|
200
|
-
case LogType.UNENCRYPTED:
|
|
201
|
-
default:
|
|
202
|
-
return block.data.body.unencryptedLogs;
|
|
203
|
-
}
|
|
204
|
-
});
|
|
205
|
-
const actualLogs = await store.getLogs(from, limit, logType);
|
|
206
|
-
expect(actualLogs[0].txLogs[0]).toEqual(expectedLogs[0].txLogs[0]);
|
|
202
|
+
const privateLogs = await store.getPrivateLogs(1, 1);
|
|
203
|
+
expect(privateLogs).toEqual(block.body.txEffects.map(txEffect => txEffect.privateLogs).flat());
|
|
207
204
|
});
|
|
208
205
|
});
|
|
209
206
|
|
|
@@ -373,178 +370,155 @@ export function describeArchiverDataStore(testName: string, getStore: () => Arch
|
|
|
373
370
|
});
|
|
374
371
|
|
|
375
372
|
describe('getLogsByTags', () => {
|
|
376
|
-
const
|
|
377
|
-
const
|
|
378
|
-
const
|
|
379
|
-
const
|
|
380
|
-
const numUnencryptedLogsPerFn = 1;
|
|
381
|
-
const numBlocks = 10;
|
|
382
|
-
let blocks: L1Published<L2Block>[];
|
|
383
|
-
let encryptedLogTags: { [i: number]: { [j: number]: Buffer[] } } = {};
|
|
384
|
-
let unencryptedLogTags: { [i: number]: { [j: number]: Buffer[] } } = {};
|
|
385
|
-
|
|
386
|
-
beforeEach(async () => {
|
|
387
|
-
blocks = times(numBlocks, (index: number) => ({
|
|
388
|
-
data: L2Block.random(
|
|
389
|
-
index + 1,
|
|
390
|
-
txsPerBlock,
|
|
391
|
-
numPrivateFunctionCalls,
|
|
392
|
-
numPublicFunctionCalls,
|
|
393
|
-
numEncryptedLogsPerFn,
|
|
394
|
-
numUnencryptedLogsPerFn,
|
|
395
|
-
),
|
|
396
|
-
l1: { blockNumber: BigInt(index), blockHash: `0x${index}`, timestamp: BigInt(index) },
|
|
397
|
-
}));
|
|
398
|
-
// Last block has the note encrypted log tags of the first tx copied from the previous block
|
|
399
|
-
blocks[numBlocks - 1].data.body.noteEncryptedLogs.txLogs[0].functionLogs.forEach((fnLogs, fnIndex) => {
|
|
400
|
-
fnLogs.logs.forEach((log, logIndex) => {
|
|
401
|
-
const previousLogData =
|
|
402
|
-
blocks[numBlocks - 2].data.body.noteEncryptedLogs.txLogs[0].functionLogs[fnIndex].logs[logIndex].data;
|
|
403
|
-
previousLogData.copy(log.data, 0, 0, 32);
|
|
404
|
-
});
|
|
405
|
-
});
|
|
406
|
-
// Last block has invalid tags in the second tx
|
|
407
|
-
const tooBig = toBufferBE(Fr.MODULUS, 32);
|
|
408
|
-
blocks[numBlocks - 1].data.body.noteEncryptedLogs.txLogs[1].functionLogs.forEach(fnLogs => {
|
|
409
|
-
fnLogs.logs.forEach(log => {
|
|
410
|
-
tooBig.copy(log.data, 0, 0, 32);
|
|
411
|
-
});
|
|
412
|
-
});
|
|
373
|
+
const numBlocks = 3;
|
|
374
|
+
const numTxsPerBlock = 4;
|
|
375
|
+
const numPrivateLogsPerTx = 3;
|
|
376
|
+
const numUnencryptedLogsPerTx = 2;
|
|
413
377
|
|
|
414
|
-
|
|
415
|
-
await store.addLogs(blocks.map(b => b.data));
|
|
378
|
+
let blocks: L1Published<L2Block>[];
|
|
416
379
|
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
blocks.forEach((b, blockIndex) => {
|
|
420
|
-
if (!encryptedLogTags[blockIndex]) {
|
|
421
|
-
encryptedLogTags[blockIndex] = {};
|
|
422
|
-
}
|
|
423
|
-
if (!unencryptedLogTags[blockIndex]) {
|
|
424
|
-
unencryptedLogTags[blockIndex] = {};
|
|
425
|
-
}
|
|
426
|
-
b.data.body.noteEncryptedLogs.txLogs.forEach((txLogs, txIndex) => {
|
|
427
|
-
if (!encryptedLogTags[blockIndex][txIndex]) {
|
|
428
|
-
encryptedLogTags[blockIndex][txIndex] = [];
|
|
429
|
-
}
|
|
430
|
-
encryptedLogTags[blockIndex][txIndex].push(...txLogs.unrollLogs().map(log => log.data.subarray(0, 32)));
|
|
431
|
-
});
|
|
432
|
-
b.data.body.unencryptedLogs.txLogs.forEach((txLogs, txIndex) => {
|
|
433
|
-
if (!unencryptedLogTags[blockIndex][txIndex]) {
|
|
434
|
-
unencryptedLogTags[blockIndex][txIndex] = [];
|
|
435
|
-
}
|
|
436
|
-
unencryptedLogTags[blockIndex][txIndex].push(...txLogs.unrollLogs().map(log => log.data.subarray(0, 32)));
|
|
437
|
-
});
|
|
438
|
-
});
|
|
439
|
-
});
|
|
380
|
+
const makeTag = (blockNumber: number, txIndex: number, logIndex: number, isPublic = false) =>
|
|
381
|
+
new Fr((blockNumber * 100 + txIndex * 10 + logIndex) * (isPublic ? 123 : 1));
|
|
440
382
|
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
const targetBlockIndex = randomInt(numBlocks - 2);
|
|
444
|
-
const targetTxIndex = randomInt(txsPerBlock);
|
|
383
|
+
const makePrivateLog = (tag: Fr) =>
|
|
384
|
+
PrivateLog.fromFields([tag, ...times(PRIVATE_LOG_SIZE_IN_FIELDS - 1, i => new Fr(tag.toNumber() + i))]);
|
|
445
385
|
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
);
|
|
386
|
+
const makePublicLog = (tag: Fr) =>
|
|
387
|
+
Buffer.concat([tag.toBuffer(), ...times(tag.toNumber() % 60, i => new Fr(tag.toNumber() + i).toBuffer())]);
|
|
449
388
|
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
expect(logsByTag).toHaveLength(1);
|
|
455
|
-
const [scopedLog] = logsByTag;
|
|
456
|
-
expect(scopedLog.txHash).toEqual(blocks[targetBlockIndex].data.body.txEffects[targetTxIndex].txHash);
|
|
457
|
-
expect(scopedLog.logData).toEqual(
|
|
458
|
-
blocks[targetBlockIndex].data.body.noteEncryptedLogs.txLogs[targetTxIndex].unrollLogs()[logIndex].data,
|
|
459
|
-
);
|
|
389
|
+
const mockPrivateLogs = (blockNumber: number, txIndex: number) => {
|
|
390
|
+
return times(numPrivateLogsPerTx, (logIndex: number) => {
|
|
391
|
+
const tag = makeTag(blockNumber, txIndex, logIndex);
|
|
392
|
+
return makePrivateLog(tag);
|
|
460
393
|
});
|
|
461
|
-
}
|
|
462
|
-
|
|
463
|
-
// TODO: Allow this test when #9835 is fixed and tags can be correctly decoded
|
|
464
|
-
it.skip('is possible to batch request all logs (encrypted and unencrypted) of a tx via tags', async () => {
|
|
465
|
-
// get random tx from any block that's not the last one
|
|
466
|
-
const targetBlockIndex = randomInt(numBlocks - 2);
|
|
467
|
-
const targetTxIndex = randomInt(txsPerBlock);
|
|
468
|
-
|
|
469
|
-
const logsByTags = await store.getLogsByTags(
|
|
470
|
-
encryptedLogTags[targetBlockIndex][targetTxIndex]
|
|
471
|
-
.concat(unencryptedLogTags[targetBlockIndex][targetTxIndex])
|
|
472
|
-
.map(buffer => new Fr(buffer)),
|
|
473
|
-
);
|
|
394
|
+
};
|
|
474
395
|
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
const unencryptedLogsByTags = logsByTags.slice(numPrivateFunctionCalls * numEncryptedLogsPerFn);
|
|
481
|
-
encryptedLogsByTags.forEach((logsByTag, logIndex) => {
|
|
482
|
-
expect(logsByTag).toHaveLength(1);
|
|
483
|
-
const [scopedLog] = logsByTag;
|
|
484
|
-
expect(scopedLog.txHash).toEqual(blocks[targetBlockIndex].data.body.txEffects[targetTxIndex].txHash);
|
|
485
|
-
expect(scopedLog.logData).toEqual(
|
|
486
|
-
blocks[targetBlockIndex].data.body.noteEncryptedLogs.txLogs[targetTxIndex].unrollLogs()[logIndex].data,
|
|
487
|
-
);
|
|
396
|
+
const mockUnencryptedLogs = (blockNumber: number, txIndex: number) => {
|
|
397
|
+
const logs = times(numUnencryptedLogsPerTx, (logIndex: number) => {
|
|
398
|
+
const tag = makeTag(blockNumber, txIndex, logIndex, /* isPublic */ true);
|
|
399
|
+
const log = makePublicLog(tag);
|
|
400
|
+
return new UnencryptedL2Log(AztecAddress.fromNumber(txIndex), log);
|
|
488
401
|
});
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
402
|
+
return new UnencryptedTxL2Logs([new UnencryptedFunctionL2Logs(logs)]);
|
|
403
|
+
};
|
|
404
|
+
|
|
405
|
+
const mockBlockWithLogs = (blockNumber: number): L1Published<L2Block> => {
|
|
406
|
+
const block = L2Block.random(blockNumber);
|
|
407
|
+
block.header.globalVariables.blockNumber = new Fr(blockNumber);
|
|
408
|
+
|
|
409
|
+
block.body.txEffects = times(numTxsPerBlock, (txIndex: number) => {
|
|
410
|
+
const txEffect = TxEffect.random();
|
|
411
|
+
txEffect.privateLogs = mockPrivateLogs(blockNumber, txIndex);
|
|
412
|
+
txEffect.unencryptedLogs = mockUnencryptedLogs(blockNumber, txIndex);
|
|
413
|
+
return txEffect;
|
|
495
414
|
});
|
|
496
|
-
});
|
|
497
415
|
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
416
|
+
return {
|
|
417
|
+
data: block,
|
|
418
|
+
l1: { blockNumber: BigInt(blockNumber), blockHash: `0x${blockNumber}`, timestamp: BigInt(blockNumber) },
|
|
419
|
+
};
|
|
420
|
+
};
|
|
503
421
|
|
|
504
|
-
|
|
505
|
-
|
|
422
|
+
beforeEach(async () => {
|
|
423
|
+
blocks = times(numBlocks, (index: number) => mockBlockWithLogs(index));
|
|
506
424
|
|
|
507
|
-
|
|
425
|
+
await store.addBlocks(blocks);
|
|
426
|
+
await store.addLogs(blocks.map(b => b.data));
|
|
508
427
|
});
|
|
509
428
|
|
|
510
|
-
it('is possible to batch request logs
|
|
511
|
-
|
|
512
|
-
|
|
429
|
+
it('is possible to batch request private logs via tags', async () => {
|
|
430
|
+
const tags = [makeTag(1, 1, 2), makeTag(0, 2, 0)];
|
|
431
|
+
|
|
432
|
+
const logsByTags = await store.getLogsByTags(tags);
|
|
433
|
+
|
|
434
|
+
expect(logsByTags).toEqual([
|
|
435
|
+
[
|
|
436
|
+
expect.objectContaining({
|
|
437
|
+
blockNumber: 1,
|
|
438
|
+
logData: makePrivateLog(tags[0]).toBuffer(),
|
|
439
|
+
isFromPublic: false,
|
|
440
|
+
}),
|
|
441
|
+
],
|
|
442
|
+
[
|
|
443
|
+
expect.objectContaining({
|
|
444
|
+
blockNumber: 0,
|
|
445
|
+
logData: makePrivateLog(tags[1]).toBuffer(),
|
|
446
|
+
isFromPublic: false,
|
|
447
|
+
}),
|
|
448
|
+
],
|
|
449
|
+
]);
|
|
450
|
+
});
|
|
513
451
|
|
|
514
|
-
|
|
515
|
-
|
|
452
|
+
// TODO: Allow this test when #9835 is fixed and tags can be correctly decoded
|
|
453
|
+
it.skip('is possible to batch request all logs (private and unencrypted) via tags', async () => {
|
|
454
|
+
// Tag(0, 0, 0) is shared with the first private log and the first unencrypted log.
|
|
455
|
+
const tags = [makeTag(0, 0, 0)];
|
|
456
|
+
|
|
457
|
+
const logsByTags = await store.getLogsByTags(tags);
|
|
458
|
+
|
|
459
|
+
expect(logsByTags).toEqual([
|
|
460
|
+
[
|
|
461
|
+
expect.objectContaining({
|
|
462
|
+
blockNumber: 0,
|
|
463
|
+
logData: makePrivateLog(tags[0]).toBuffer(),
|
|
464
|
+
isFromPublic: false,
|
|
465
|
+
}),
|
|
466
|
+
expect.objectContaining({
|
|
467
|
+
blockNumber: 0,
|
|
468
|
+
logData: makePublicLog(tags[0]),
|
|
469
|
+
isFromPublic: true,
|
|
470
|
+
}),
|
|
471
|
+
],
|
|
472
|
+
]);
|
|
473
|
+
});
|
|
516
474
|
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
475
|
+
it('is possible to batch request logs that have the same tag but different content', async () => {
|
|
476
|
+
const tags = [makeTag(1, 2, 1)];
|
|
477
|
+
|
|
478
|
+
// Create a block containing logs that have the same tag as the blocks before.
|
|
479
|
+
const newBlockNumber = numBlocks;
|
|
480
|
+
const newBlock = mockBlockWithLogs(newBlockNumber);
|
|
481
|
+
const newLog = newBlock.data.body.txEffects[1].privateLogs[1];
|
|
482
|
+
newLog.fields[0] = tags[0];
|
|
483
|
+
newBlock.data.body.txEffects[1].privateLogs[1] = newLog;
|
|
484
|
+
await store.addBlocks([newBlock]);
|
|
485
|
+
await store.addLogs([newBlock.data]);
|
|
486
|
+
|
|
487
|
+
const logsByTags = await store.getLogsByTags(tags);
|
|
488
|
+
|
|
489
|
+
expect(logsByTags).toEqual([
|
|
490
|
+
[
|
|
491
|
+
expect.objectContaining({
|
|
492
|
+
blockNumber: 1,
|
|
493
|
+
logData: makePrivateLog(tags[0]).toBuffer(),
|
|
494
|
+
isFromPublic: false,
|
|
495
|
+
}),
|
|
496
|
+
expect.objectContaining({
|
|
497
|
+
blockNumber: newBlockNumber,
|
|
498
|
+
logData: newLog.toBuffer(),
|
|
499
|
+
isFromPublic: false,
|
|
500
|
+
}),
|
|
501
|
+
],
|
|
502
|
+
]);
|
|
522
503
|
});
|
|
523
504
|
|
|
524
505
|
it('is possible to request logs for non-existing tags and determine their position', async () => {
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
const
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
506
|
+
const tags = [makeTag(99, 88, 77), makeTag(1, 1, 1)];
|
|
507
|
+
|
|
508
|
+
const logsByTags = await store.getLogsByTags(tags);
|
|
509
|
+
|
|
510
|
+
expect(logsByTags).toEqual([
|
|
511
|
+
[
|
|
512
|
+
// No logs for the first tag.
|
|
513
|
+
],
|
|
514
|
+
[
|
|
515
|
+
expect.objectContaining({
|
|
516
|
+
blockNumber: 1,
|
|
517
|
+
logData: makePrivateLog(tags[1]).toBuffer(),
|
|
518
|
+
isFromPublic: false,
|
|
519
|
+
}),
|
|
520
|
+
],
|
|
532
521
|
]);
|
|
533
|
-
|
|
534
|
-
const expectedResponseSize = numPrivateFunctionCalls * numEncryptedLogsPerFn;
|
|
535
|
-
expect(logsByTags.length).toEqual(expectedResponseSize);
|
|
536
|
-
|
|
537
|
-
const [emptyLogsByTag, ...populatedLogsByTags] = logsByTags;
|
|
538
|
-
expect(emptyLogsByTag).toHaveLength(0);
|
|
539
|
-
|
|
540
|
-
populatedLogsByTags.forEach((logsByTag, logIndex) => {
|
|
541
|
-
expect(logsByTag).toHaveLength(1);
|
|
542
|
-
const [scopedLog] = logsByTag;
|
|
543
|
-
expect(scopedLog.txHash).toEqual(blocks[targetBlockIndex].data.body.txEffects[targetTxIndex].txHash);
|
|
544
|
-
expect(scopedLog.logData).toEqual(
|
|
545
|
-
blocks[targetBlockIndex].data.body.noteEncryptedLogs.txLogs[targetTxIndex].unrollLogs()[logIndex + 1].data,
|
|
546
|
-
);
|
|
547
|
-
});
|
|
548
522
|
});
|
|
549
523
|
});
|
|
550
524
|
|
|
@@ -557,7 +531,7 @@ export function describeArchiverDataStore(testName: string, getStore: () => Arch
|
|
|
557
531
|
|
|
558
532
|
beforeEach(async () => {
|
|
559
533
|
blocks = times(numBlocks, (index: number) => ({
|
|
560
|
-
data: L2Block.random(index + 1, txsPerBlock,
|
|
534
|
+
data: L2Block.random(index + 1, txsPerBlock, numPublicFunctionCalls, numUnencryptedLogs),
|
|
561
535
|
l1: { blockNumber: BigInt(index), blockHash: `0x${index}`, timestamp: BigInt(index) },
|
|
562
536
|
}));
|
|
563
537
|
|
package/src/archiver/config.ts
CHANGED
|
@@ -18,24 +18,19 @@ import { type ConfigMappingsType, getConfigFromMappings, numberConfigHelper } fr
|
|
|
18
18
|
* The archiver configuration.
|
|
19
19
|
*/
|
|
20
20
|
export type ArchiverConfig = {
|
|
21
|
-
/**
|
|
22
|
-
* URL for an archiver service. If set, will return an archiver client as opposed to starting a new one.
|
|
23
|
-
*/
|
|
21
|
+
/** URL for an archiver service. If set, will return an archiver client as opposed to starting a new one. */
|
|
24
22
|
archiverUrl?: string;
|
|
25
23
|
|
|
26
|
-
/**
|
|
27
|
-
* The polling interval in ms for retrieving new L2 blocks and encrypted logs.
|
|
28
|
-
*/
|
|
24
|
+
/** The polling interval in ms for retrieving new L2 blocks and encrypted logs. */
|
|
29
25
|
archiverPollingIntervalMS?: number;
|
|
30
26
|
|
|
31
|
-
/**
|
|
32
|
-
|
|
33
|
-
|
|
27
|
+
/** The number of L2 blocks the archiver will attempt to download at a time. */
|
|
28
|
+
archiverBatchSize?: number;
|
|
29
|
+
|
|
30
|
+
/** The polling interval viem uses in ms */
|
|
34
31
|
viemPollingIntervalMS?: number;
|
|
35
32
|
|
|
36
|
-
/**
|
|
37
|
-
* The deployed L1 contract addresses
|
|
38
|
-
*/
|
|
33
|
+
/** The deployed L1 contract addresses */
|
|
39
34
|
l1Contracts: L1ContractAddresses;
|
|
40
35
|
|
|
41
36
|
/** The max number of logs that can be obtained in 1 "getUnencryptedLogs" call. */
|
|
@@ -54,6 +49,11 @@ export const archiverConfigMappings: ConfigMappingsType<ArchiverConfig> = {
|
|
|
54
49
|
description: 'The polling interval in ms for retrieving new L2 blocks and encrypted logs.',
|
|
55
50
|
...numberConfigHelper(1_000),
|
|
56
51
|
},
|
|
52
|
+
archiverBatchSize: {
|
|
53
|
+
env: 'ARCHIVER_BATCH_SIZE',
|
|
54
|
+
description: 'The number of L2 blocks the archiver will attempt to download at a time.',
|
|
55
|
+
...numberConfigHelper(100),
|
|
56
|
+
},
|
|
57
57
|
maxLogs: {
|
|
58
58
|
env: 'ARCHIVER_MAX_LOGS',
|
|
59
59
|
description: 'The max number of logs that can be obtained in 1 "getUnencryptedLogs" call.',
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import { Body, InboxLeaf, L2Block } from '@aztec/circuit-types';
|
|
2
2
|
import { AppendOnlyTreeSnapshot, Fr, Header, Proof } from '@aztec/circuits.js';
|
|
3
|
+
import { asyncPool } from '@aztec/foundation/async-pool';
|
|
3
4
|
import { type EthAddress } from '@aztec/foundation/eth-address';
|
|
4
5
|
import { type ViemSignature } from '@aztec/foundation/eth-signature';
|
|
5
6
|
import { type DebugLogger, createDebugLogger } from '@aztec/foundation/log';
|
|
@@ -25,16 +26,14 @@ import { type L1Published, type L1PublishedData } from './structs/published.js';
|
|
|
25
26
|
* Fetches new L2 blocks.
|
|
26
27
|
* @param publicClient - The viem public client to use for transaction retrieval.
|
|
27
28
|
* @param rollupAddress - The address of the rollup contract.
|
|
28
|
-
* @param blockUntilSynced - If true, blocks until the archiver has fully synced.
|
|
29
29
|
* @param searchStartBlock - The block number to use for starting the search.
|
|
30
30
|
* @param searchEndBlock - The highest block number that we should search up to.
|
|
31
31
|
* @param expectedNextL2BlockNum - The next L2 block number that we expect to find.
|
|
32
32
|
* @returns An array of block; as well as the next eth block to search from.
|
|
33
33
|
*/
|
|
34
|
-
export async function
|
|
34
|
+
export async function retrieveBlocksFromRollup(
|
|
35
35
|
rollup: GetContractReturnType<typeof RollupAbi, PublicClient<HttpTransport, Chain>>,
|
|
36
36
|
publicClient: PublicClient,
|
|
37
|
-
blockUntilSynced: boolean,
|
|
38
37
|
searchStartBlock: bigint,
|
|
39
38
|
searchEndBlock: bigint,
|
|
40
39
|
logger: DebugLogger = createDebugLogger('aztec:archiver'),
|
|
@@ -58,13 +57,13 @@ export async function retrieveBlockFromRollup(
|
|
|
58
57
|
|
|
59
58
|
const lastLog = l2BlockProposedLogs[l2BlockProposedLogs.length - 1];
|
|
60
59
|
logger.debug(
|
|
61
|
-
`Got L2 block processed logs for ${l2BlockProposedLogs[0].blockNumber}-${lastLog.blockNumber} between ${searchStartBlock}-${searchEndBlock}
|
|
60
|
+
`Got ${l2BlockProposedLogs.length} L2 block processed logs for L2 blocks ${l2BlockProposedLogs[0].args.blockNumber}-${lastLog.args.blockNumber} between L1 blocks ${searchStartBlock}-${searchEndBlock}`,
|
|
62
61
|
);
|
|
63
62
|
|
|
64
63
|
const newBlocks = await processL2BlockProposedLogs(rollup, publicClient, l2BlockProposedLogs, logger);
|
|
65
64
|
retrievedBlocks.push(...newBlocks);
|
|
66
65
|
searchStartBlock = lastLog.blockNumber! + 1n;
|
|
67
|
-
} while (
|
|
66
|
+
} while (searchStartBlock <= searchEndBlock);
|
|
68
67
|
return retrievedBlocks;
|
|
69
68
|
}
|
|
70
69
|
|
|
@@ -82,14 +81,13 @@ export async function processL2BlockProposedLogs(
|
|
|
82
81
|
logger: DebugLogger,
|
|
83
82
|
): Promise<L1Published<L2Block>[]> {
|
|
84
83
|
const retrievedBlocks: L1Published<L2Block>[] = [];
|
|
85
|
-
|
|
84
|
+
await asyncPool(10, logs, async log => {
|
|
86
85
|
const l2BlockNumber = log.args.blockNumber!;
|
|
87
86
|
const archive = log.args.archive!;
|
|
88
87
|
const archiveFromChain = await rollup.read.archiveAt([l2BlockNumber]);
|
|
89
88
|
|
|
90
89
|
// The value from the event and contract will match only if the block is in the chain.
|
|
91
90
|
if (archive === archiveFromChain) {
|
|
92
|
-
// TODO: Fetch blocks from calldata in parallel
|
|
93
91
|
const block = await getBlockFromRollupTx(publicClient, log.transactionHash!, l2BlockNumber);
|
|
94
92
|
|
|
95
93
|
const l1: L1PublishedData = {
|
|
@@ -100,11 +98,12 @@ export async function processL2BlockProposedLogs(
|
|
|
100
98
|
|
|
101
99
|
retrievedBlocks.push({ data: block, l1 });
|
|
102
100
|
} else {
|
|
103
|
-
logger.warn(
|
|
104
|
-
|
|
105
|
-
|
|
101
|
+
logger.warn(`Ignoring L2 block ${l2BlockNumber} due to archive root mismatch`, {
|
|
102
|
+
actual: archive,
|
|
103
|
+
expected: archiveFromChain,
|
|
104
|
+
});
|
|
106
105
|
}
|
|
107
|
-
}
|
|
106
|
+
});
|
|
108
107
|
|
|
109
108
|
return retrievedBlocks;
|
|
110
109
|
}
|
|
@@ -129,10 +128,7 @@ async function getBlockFromRollupTx(
|
|
|
129
128
|
l2BlockNum: bigint,
|
|
130
129
|
): Promise<L2Block> {
|
|
131
130
|
const { input: data } = await publicClient.getTransaction({ hash: txHash });
|
|
132
|
-
const { functionName, args } = decodeFunctionData({
|
|
133
|
-
abi: RollupAbi,
|
|
134
|
-
data,
|
|
135
|
-
});
|
|
131
|
+
const { functionName, args } = decodeFunctionData({ abi: RollupAbi, data });
|
|
136
132
|
|
|
137
133
|
const allowedMethods = ['propose', 'proposeAndClaim'];
|
|
138
134
|
|
|
@@ -184,7 +180,6 @@ async function getBlockFromRollupTx(
|
|
|
184
180
|
*/
|
|
185
181
|
export async function retrieveL1ToL2Messages(
|
|
186
182
|
inbox: GetContractReturnType<typeof InboxAbi, PublicClient<HttpTransport, Chain>>,
|
|
187
|
-
blockUntilSynced: boolean,
|
|
188
183
|
searchStartBlock: bigint,
|
|
189
184
|
searchEndBlock: bigint,
|
|
190
185
|
): Promise<DataRetrieval<InboxLeaf>> {
|
|
@@ -213,7 +208,7 @@ export async function retrieveL1ToL2Messages(
|
|
|
213
208
|
|
|
214
209
|
// handles the case when there are no new messages:
|
|
215
210
|
searchStartBlock = (messageSentLogs.findLast(msgLog => !!msgLog)?.blockNumber || searchStartBlock) + 1n;
|
|
216
|
-
} while (
|
|
211
|
+
} while (searchStartBlock <= searchEndBlock);
|
|
217
212
|
return { lastProcessedL1BlockNumber: searchStartBlock - 1n, retrievedData: retrievedL1ToL2Messages };
|
|
218
213
|
}
|
|
219
214
|
|
|
@@ -5,6 +5,7 @@ import {
|
|
|
5
5
|
type Gauge,
|
|
6
6
|
type Histogram,
|
|
7
7
|
LmdbMetrics,
|
|
8
|
+
type LmdbStatsCallback,
|
|
8
9
|
Metrics,
|
|
9
10
|
type TelemetryClient,
|
|
10
11
|
type UpDownCounter,
|
|
@@ -23,7 +24,7 @@ export class ArchiverInstrumentation {
|
|
|
23
24
|
|
|
24
25
|
private log = createDebugLogger('aztec:archiver:instrumentation');
|
|
25
26
|
|
|
26
|
-
constructor(private telemetry: TelemetryClient) {
|
|
27
|
+
constructor(private telemetry: TelemetryClient, lmdbStats?: LmdbStatsCallback) {
|
|
27
28
|
const meter = telemetry.getMeter('Archiver');
|
|
28
29
|
this.blockHeight = meter.createGauge(Metrics.ARCHIVER_BLOCK_HEIGHT, {
|
|
29
30
|
description: 'The height of the latest block processed by the archiver',
|
|
@@ -72,13 +73,10 @@ export class ArchiverInstrumentation {
|
|
|
72
73
|
name: Metrics.ARCHIVER_DB_NUM_ITEMS,
|
|
73
74
|
description: 'Num items in the archiver database',
|
|
74
75
|
},
|
|
76
|
+
lmdbStats,
|
|
75
77
|
);
|
|
76
78
|
}
|
|
77
79
|
|
|
78
|
-
public recordDBMetrics(metrics: { mappingSize: number; numItems: number; actualSize: number }) {
|
|
79
|
-
this.dbMetrics.recordDBMetrics(metrics);
|
|
80
|
-
}
|
|
81
|
-
|
|
82
80
|
public isEnabled(): boolean {
|
|
83
81
|
return this.telemetry.isEnabled();
|
|
84
82
|
}
|