@aztec/stdlib 2.1.0-rc.1 → 2.1.0-rc.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/block/attestation_info.d.ts +30 -0
- package/dest/block/attestation_info.d.ts.map +1 -0
- package/dest/block/attestation_info.js +39 -0
- package/dest/block/index.d.ts +1 -0
- package/dest/block/index.d.ts.map +1 -1
- package/dest/block/index.js +1 -0
- package/dest/block/proposal/attestations_and_signers.js +1 -1
- package/dest/block/published_l2_block.d.ts +0 -2
- package/dest/block/published_l2_block.d.ts.map +1 -1
- package/dest/block/published_l2_block.js +0 -6
- package/dest/epoch-helpers/index.d.ts +2 -0
- package/dest/epoch-helpers/index.d.ts.map +1 -1
- package/dest/epoch-helpers/index.js +3 -0
- package/dest/file-store/factory.d.ts.map +1 -1
- package/dest/file-store/factory.js +18 -0
- package/dest/file-store/interface.d.ts +8 -2
- package/dest/file-store/interface.d.ts.map +1 -1
- package/dest/file-store/s3.d.ts +26 -0
- package/dest/file-store/s3.d.ts.map +1 -0
- package/dest/file-store/s3.js +252 -0
- package/dest/interfaces/aztec-node-admin.d.ts +3 -0
- package/dest/interfaces/aztec-node-admin.d.ts.map +1 -1
- package/dest/interfaces/configs.d.ts +5 -0
- package/dest/interfaces/configs.d.ts.map +1 -1
- package/dest/interfaces/configs.js +2 -1
- package/dest/interfaces/p2p.d.ts +2 -0
- package/dest/interfaces/p2p.d.ts.map +1 -1
- package/dest/interfaces/p2p.js +2 -1
- package/dest/l1-contracts/slash_factory.d.ts +1 -1
- package/dest/l1-contracts/slash_factory.d.ts.map +1 -1
- package/dest/l1-contracts/slash_factory.js +1 -1
- package/dest/p2p/block_attestation.d.ts +47 -1
- package/dest/p2p/block_attestation.d.ts.map +1 -1
- package/dest/p2p/block_attestation.js +43 -9
- package/dest/p2p/consensus_payload.js +1 -1
- package/dest/p2p/gossipable.d.ts +2 -4
- package/dest/p2p/gossipable.d.ts.map +1 -1
- package/dest/p2p/gossipable.js +5 -14
- package/dest/snapshots/download.d.ts.map +1 -1
- package/dest/snapshots/download.js +58 -2
- package/dest/snapshots/upload.d.ts.map +1 -1
- package/dest/snapshots/upload.js +1 -0
- package/dest/tests/mocks.d.ts +3 -1
- package/dest/tests/mocks.d.ts.map +1 -1
- package/dest/tests/mocks.js +31 -9
- package/package.json +10 -9
- package/src/block/attestation_info.ts +62 -0
- package/src/block/index.ts +1 -0
- package/src/block/proposal/attestations_and_signers.ts +1 -1
- package/src/block/published_l2_block.ts +0 -11
- package/src/epoch-helpers/index.ts +8 -0
- package/src/file-store/factory.ts +15 -0
- package/src/file-store/interface.ts +8 -2
- package/src/file-store/s3.ts +254 -0
- package/src/interfaces/configs.ts +3 -0
- package/src/interfaces/p2p.ts +4 -0
- package/src/l1-contracts/slash_factory.ts +1 -1
- package/src/p2p/block_attestation.ts +57 -6
- package/src/p2p/consensus_payload.ts +1 -1
- package/src/p2p/gossipable.ts +6 -16
- package/src/snapshots/download.ts +66 -2
- package/src/snapshots/upload.ts +1 -0
- package/src/tests/mocks.ts +51 -12
|
@@ -4,9 +4,11 @@ import { GoogleCloudFileStore } from './gcs.js';
|
|
|
4
4
|
import { HttpFileStore } from './http.js';
|
|
5
5
|
import type { FileStore, ReadOnlyFileStore } from './interface.js';
|
|
6
6
|
import { LocalFileStore } from './local.js';
|
|
7
|
+
import { S3FileStore } from './s3.js';
|
|
7
8
|
|
|
8
9
|
const supportedExamples = [
|
|
9
10
|
`gs://bucket-name/path/to/store`,
|
|
11
|
+
`s3://bucket-name/path/to/store`,
|
|
10
12
|
`file:///absolute/local/path/to/store`,
|
|
11
13
|
`https://host/path`,
|
|
12
14
|
];
|
|
@@ -39,6 +41,19 @@ export async function createFileStore(
|
|
|
39
41
|
} catch {
|
|
40
42
|
throw new Error(`Invalid google cloud store definition: '${config}'.`);
|
|
41
43
|
}
|
|
44
|
+
} else if (config.startsWith('s3://')) {
|
|
45
|
+
try {
|
|
46
|
+
const url = new URL(config);
|
|
47
|
+
const bucket = url.host;
|
|
48
|
+
const path = url.pathname.replace(/^\/+/, '');
|
|
49
|
+
const endpoint = url.searchParams.get('endpoint');
|
|
50
|
+
const publicBaseUrl = url.searchParams.get('publicBaseUrl') ?? undefined;
|
|
51
|
+
logger.info(`Creating S3 file store at ${bucket} ${path}`);
|
|
52
|
+
const store = new S3FileStore(bucket, path, { endpoint: endpoint ?? undefined, publicBaseUrl });
|
|
53
|
+
return store;
|
|
54
|
+
} catch {
|
|
55
|
+
throw new Error(`Invalid S3 store definition: '${config}'.`);
|
|
56
|
+
}
|
|
42
57
|
} else {
|
|
43
58
|
throw new Error(`Unknown file store config: '${config}'. Supported values are ${supportedExamples.join(', ')}.`);
|
|
44
59
|
}
|
|
@@ -12,8 +12,14 @@ export type FileStoreSaveOptions = { public?: boolean; metadata?: Record<string,
|
|
|
12
12
|
|
|
13
13
|
/** Simple file store. */
|
|
14
14
|
export interface FileStore extends ReadOnlyFileStore {
|
|
15
|
-
/**
|
|
15
|
+
/**
|
|
16
|
+
* Saves contents to the given path. Returns an URI that can be used later to `read` the file.
|
|
17
|
+
* Default: `compress` is false unless explicitly set.
|
|
18
|
+
*/
|
|
16
19
|
save(path: string, data: Buffer, opts?: FileStoreSaveOptions): Promise<string>;
|
|
17
|
-
/**
|
|
20
|
+
/**
|
|
21
|
+
* Uploads contents from a local file. Returns an URI that can be used later to `read` the file.
|
|
22
|
+
* Default: `compress` is true unless explicitly set to false.
|
|
23
|
+
*/
|
|
18
24
|
upload(destPath: string, srcPath: string, opts?: FileStoreSaveOptions): Promise<string>;
|
|
19
25
|
}
|
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
import { type Logger, createLogger } from '@aztec/foundation/log';
|
|
2
|
+
|
|
3
|
+
import {
|
|
4
|
+
GetObjectCommand,
|
|
5
|
+
type GetObjectCommandOutput,
|
|
6
|
+
HeadObjectCommand,
|
|
7
|
+
PutObjectCommand,
|
|
8
|
+
S3Client,
|
|
9
|
+
} from '@aws-sdk/client-s3';
|
|
10
|
+
import { createReadStream, createWriteStream } from 'fs';
|
|
11
|
+
import { mkdir, mkdtemp, stat, unlink } from 'fs/promises';
|
|
12
|
+
import { tmpdir } from 'os';
|
|
13
|
+
import { basename, dirname, join } from 'path';
|
|
14
|
+
import { Readable } from 'stream';
|
|
15
|
+
import { finished } from 'stream/promises';
|
|
16
|
+
import { createGzip } from 'zlib';
|
|
17
|
+
|
|
18
|
+
import type { FileStore, FileStoreSaveOptions } from './interface.js';
|
|
19
|
+
|
|
20
|
+
function normalizeBasePath(path: string): string {
|
|
21
|
+
return path?.replace(/^\/+|\/+$/g, '') ?? '';
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export class S3FileStore implements FileStore {
|
|
25
|
+
private readonly s3: S3Client;
|
|
26
|
+
private readonly region: string;
|
|
27
|
+
private readonly endpoint?: string;
|
|
28
|
+
private readonly publicBaseUrl?: string;
|
|
29
|
+
|
|
30
|
+
constructor(
|
|
31
|
+
private readonly bucketName: string,
|
|
32
|
+
private readonly basePath: string,
|
|
33
|
+
opts: { endpoint?: string; publicBaseUrl?: string },
|
|
34
|
+
private readonly log: Logger = createLogger('stdlib:s3-file-store'),
|
|
35
|
+
) {
|
|
36
|
+
this.endpoint = opts.endpoint;
|
|
37
|
+
this.region = this.endpoint ? 'auto' : (process.env.AWS_REGION ?? process.env.AWS_DEFAULT_REGION ?? 'us-east-1');
|
|
38
|
+
this.publicBaseUrl = opts.publicBaseUrl;
|
|
39
|
+
|
|
40
|
+
const clientOptions: any = {};
|
|
41
|
+
if (this.endpoint) {
|
|
42
|
+
clientOptions.region = 'auto';
|
|
43
|
+
clientOptions.endpoint = this.endpoint;
|
|
44
|
+
clientOptions.forcePathStyle = true;
|
|
45
|
+
} else {
|
|
46
|
+
clientOptions.region = this.region;
|
|
47
|
+
}
|
|
48
|
+
this.s3 = new S3Client(clientOptions);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
public async save(path: string, data: Buffer, opts: FileStoreSaveOptions = {}): Promise<string> {
|
|
52
|
+
const key = this.getFullPath(path);
|
|
53
|
+
const shouldCompress = !!opts.compress;
|
|
54
|
+
|
|
55
|
+
const body = shouldCompress ? (await import('zlib')).gzipSync(data) : data;
|
|
56
|
+
const contentLength = body.length;
|
|
57
|
+
const contentType = this.detectContentType(key, shouldCompress);
|
|
58
|
+
const put = new PutObjectCommand({
|
|
59
|
+
Bucket: this.bucketName,
|
|
60
|
+
Key: key,
|
|
61
|
+
Body: body,
|
|
62
|
+
ContentType: contentType,
|
|
63
|
+
CacheControl: opts.metadata?.['Cache-control'],
|
|
64
|
+
Metadata: this.extractUserMetadata(opts.metadata),
|
|
65
|
+
ContentLength: contentLength,
|
|
66
|
+
});
|
|
67
|
+
await this.s3.send(put);
|
|
68
|
+
return this.buildReturnedUrl(key, !!opts.public);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
public async upload(destPath: string, srcPath: string, opts: FileStoreSaveOptions = {}): Promise<string> {
|
|
72
|
+
const key = this.getFullPath(destPath);
|
|
73
|
+
const shouldCompress = opts.compress !== false; // default true like GCS impl
|
|
74
|
+
|
|
75
|
+
await mkdir(dirname(srcPath), { recursive: true }).catch(() => undefined);
|
|
76
|
+
let contentLength: number | undefined;
|
|
77
|
+
let bodyPath = srcPath;
|
|
78
|
+
|
|
79
|
+
// We don't set Content-Encoding and we avoid SigV4 streaming (aws-chunked).
|
|
80
|
+
// With AWS SigV4 streaming uploads (Content-Encoding: aws-chunked[,gzip]), servers require
|
|
81
|
+
// x-amz-decoded-content-length (the size of the decoded payload) and an exact Content-Length
|
|
82
|
+
// that includes chunk metadata. For on-the-fly compression, providing
|
|
83
|
+
// those values without buffering or a pre-pass is impractical. Instead, we pre-gzip to a temp file
|
|
84
|
+
// to know ContentLength up-front and upload the gzipped bytes as-is, omitting Content-Encoding.
|
|
85
|
+
// Reference: AWS SigV4 streaming (chunked upload) requirements —
|
|
86
|
+
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
|
|
87
|
+
if (shouldCompress) {
|
|
88
|
+
// Pre-gzip to a temp file so we know the exact length for R2/S3 headers
|
|
89
|
+
const tmpDir = await mkdtemp(join(tmpdir(), 's3-upload-'));
|
|
90
|
+
const gzPath = join(tmpDir, `${basename(srcPath)}.gz`);
|
|
91
|
+
const source = createReadStream(srcPath);
|
|
92
|
+
const gz = createGzip();
|
|
93
|
+
const out = createWriteStream(gzPath);
|
|
94
|
+
try {
|
|
95
|
+
await finished(source.pipe(gz).pipe(out));
|
|
96
|
+
const st = await stat(gzPath);
|
|
97
|
+
contentLength = st.size;
|
|
98
|
+
bodyPath = gzPath;
|
|
99
|
+
} catch (err) {
|
|
100
|
+
// Ensure temp file is removed on failure
|
|
101
|
+
await unlink(gzPath).catch(() => undefined);
|
|
102
|
+
throw err;
|
|
103
|
+
}
|
|
104
|
+
} else {
|
|
105
|
+
const st = await stat(srcPath);
|
|
106
|
+
contentLength = st.size;
|
|
107
|
+
bodyPath = srcPath;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
const bodyStream = createReadStream(bodyPath);
|
|
111
|
+
const contentType = this.detectContentType(key, shouldCompress);
|
|
112
|
+
try {
|
|
113
|
+
const put = new PutObjectCommand({
|
|
114
|
+
Bucket: this.bucketName,
|
|
115
|
+
Key: key,
|
|
116
|
+
Body: bodyStream as any,
|
|
117
|
+
ContentType: contentType,
|
|
118
|
+
CacheControl: opts.metadata?.['Cache-control'],
|
|
119
|
+
Metadata: this.extractUserMetadata(opts.metadata),
|
|
120
|
+
// Explicitly set ContentLength so R2 can compute x-amz-decoded-content-length correctly
|
|
121
|
+
ContentLength: contentLength,
|
|
122
|
+
} as any);
|
|
123
|
+
await this.s3.send(put);
|
|
124
|
+
} finally {
|
|
125
|
+
if (shouldCompress && bodyPath !== srcPath) {
|
|
126
|
+
await unlink(bodyPath).catch(() => undefined);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
return this.buildReturnedUrl(key, !!opts.public);
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
public async read(pathOrUrlStr: string): Promise<Buffer> {
|
|
133
|
+
const { bucket, key } = this.getBucketAndKey(pathOrUrlStr);
|
|
134
|
+
const out: GetObjectCommandOutput = await this.s3.send(new GetObjectCommand({ Bucket: bucket, Key: key }));
|
|
135
|
+
const stream = out.Body as Readable;
|
|
136
|
+
const chunks: Buffer[] = [];
|
|
137
|
+
for await (const chunk of stream) {
|
|
138
|
+
chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
|
|
139
|
+
}
|
|
140
|
+
return Buffer.concat(chunks);
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
public async download(pathOrUrlStr: string, destPath: string): Promise<void> {
|
|
144
|
+
const { bucket, key } = this.getBucketAndKey(pathOrUrlStr);
|
|
145
|
+
const out: GetObjectCommandOutput = await this.s3.send(new GetObjectCommand({ Bucket: bucket, Key: key }));
|
|
146
|
+
await mkdir(dirname(destPath), { recursive: true });
|
|
147
|
+
const write = createWriteStream(destPath);
|
|
148
|
+
await finished((out.Body as Readable).pipe(write));
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
public async exists(pathOrUrlStr: string): Promise<boolean> {
|
|
152
|
+
try {
|
|
153
|
+
const { bucket, key } = this.getBucketAndKey(pathOrUrlStr);
|
|
154
|
+
await this.s3.send(new HeadObjectCommand({ Bucket: bucket, Key: key }));
|
|
155
|
+
return true;
|
|
156
|
+
} catch (err: any) {
|
|
157
|
+
const code = err?.$metadata?.httpStatusCode ?? err?.name ?? err?.Code;
|
|
158
|
+
if (code === 404 || code === 'NotFound' || code === 'NoSuchKey') {
|
|
159
|
+
return false;
|
|
160
|
+
}
|
|
161
|
+
this.log.warn(`Error checking existence for ${pathOrUrlStr}: ${err?.message ?? String(err)}`);
|
|
162
|
+
return false;
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
private extractUserMetadata(meta?: Record<string, string>): Record<string, string> | undefined {
|
|
167
|
+
if (!meta) {
|
|
168
|
+
return undefined;
|
|
169
|
+
}
|
|
170
|
+
const { ['Cache-control']: _ignored, ...rest } = meta;
|
|
171
|
+
return Object.keys(rest).length ? rest : undefined;
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
private detectContentType(key: string, isCompressed: boolean | undefined): string | undefined {
|
|
175
|
+
// Basic content type inference
|
|
176
|
+
const lower = key.toLowerCase();
|
|
177
|
+
if (lower.endsWith('.json') || lower.endsWith('.json.gz')) {
|
|
178
|
+
return 'application/json';
|
|
179
|
+
}
|
|
180
|
+
if (lower.endsWith('.txt') || lower.endsWith('.log') || lower.endsWith('.csv') || lower.endsWith('.md')) {
|
|
181
|
+
return 'text/plain; charset=utf-8';
|
|
182
|
+
}
|
|
183
|
+
if (lower.endsWith('.db') || lower.endsWith('.sqlite') || lower.endsWith('.bin')) {
|
|
184
|
+
return 'application/octet-stream';
|
|
185
|
+
}
|
|
186
|
+
if (lower.endsWith('.wasm') || lower.endsWith('.wasm.gz')) {
|
|
187
|
+
return 'application/wasm';
|
|
188
|
+
}
|
|
189
|
+
// If compressed, prefer octet-stream unless known
|
|
190
|
+
if (isCompressed) {
|
|
191
|
+
return 'application/octet-stream';
|
|
192
|
+
}
|
|
193
|
+
return undefined;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
private buildReturnedUrl(key: string, makePublic: boolean): string {
|
|
197
|
+
if (!makePublic) {
|
|
198
|
+
return `s3://${this.bucketName}/${key}`;
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
if (this.publicBaseUrl) {
|
|
202
|
+
const base = this.publicBaseUrl.replace(/\/$/, '');
|
|
203
|
+
// key already includes basePath via getFullPath, so do not prefix basePath again
|
|
204
|
+
const full = key.replace(/^\/+/, '');
|
|
205
|
+
return `${base}/${full}`;
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// Try to synthesize a URL from endpoint if available (works for public R2 buckets)
|
|
209
|
+
if (this.endpoint) {
|
|
210
|
+
try {
|
|
211
|
+
const url = new URL(this.endpoint);
|
|
212
|
+
return `https://${this.bucketName}.${url.host}/${key}`;
|
|
213
|
+
} catch {
|
|
214
|
+
// fallthrough
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// Fallback to AWS style URL if region looks valid
|
|
219
|
+
return `https://${this.bucketName}.s3.${this.region}.amazonaws.com/${key}`;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
private getBucketAndKey(pathOrUrlStr: string): { bucket: string; key: string } {
|
|
223
|
+
if (URL.canParse(pathOrUrlStr)) {
|
|
224
|
+
const url = new URL(pathOrUrlStr);
|
|
225
|
+
if (url.protocol === 's3:') {
|
|
226
|
+
return { bucket: url.host, key: url.pathname.replace(/^\/+/, '') };
|
|
227
|
+
}
|
|
228
|
+
// For https URLs, try to infer virtual-hosted or path-style
|
|
229
|
+
if (url.protocol === 'https:' || url.protocol === 'http:') {
|
|
230
|
+
// If the URL matches the configured publicBaseUrl host, map back to our bucket and key
|
|
231
|
+
if (this.publicBaseUrl && url.host === new URL(this.publicBaseUrl).host) {
|
|
232
|
+
return { bucket: this.bucketName, key: url.pathname.replace(/^\/+/, '') };
|
|
233
|
+
}
|
|
234
|
+
const hostParts = url.host.split('.');
|
|
235
|
+
if (hostParts.length > 3 && (hostParts[1] === 's3' || hostParts[hostParts.length - 2] === 'r2')) {
|
|
236
|
+
// virtual hosted
|
|
237
|
+
return { bucket: hostParts[0], key: url.pathname.replace(/^\/+/, '') };
|
|
238
|
+
} else if (this.endpoint && url.host === new URL(this.endpoint).host) {
|
|
239
|
+
// path-style at custom endpoint
|
|
240
|
+
const [bucket, ...rest] = url.pathname.replace(/^\/+/, '').split('/');
|
|
241
|
+
return { bucket, key: rest.join('/') };
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
// Treat as path
|
|
246
|
+
return { bucket: this.bucketName, key: this.getFullPath(pathOrUrlStr) };
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
private getFullPath(path: string): string {
|
|
250
|
+
const base = normalizeBasePath(this.basePath);
|
|
251
|
+
const rel = path.replace(/^\/+/, '');
|
|
252
|
+
return base ? join(base, rel) : rel;
|
|
253
|
+
}
|
|
254
|
+
}
|
|
@@ -52,6 +52,8 @@ export interface SequencerConfig {
|
|
|
52
52
|
skipCollectingAttestations?: boolean;
|
|
53
53
|
/** Do not invalidate the previous block if invalid when we are the proposer (for testing only) */
|
|
54
54
|
skipInvalidateBlockAsProposer?: boolean;
|
|
55
|
+
/** Inject a fake attestation (for testing only) */
|
|
56
|
+
injectFakeAttestation?: boolean;
|
|
55
57
|
}
|
|
56
58
|
|
|
57
59
|
export const SequencerConfigSchema = z.object({
|
|
@@ -75,4 +77,5 @@ export const SequencerConfigSchema = z.object({
|
|
|
75
77
|
skipCollectingAttestations: z.boolean().optional(),
|
|
76
78
|
secondsBeforeInvalidatingBlockAsCommitteeMember: z.number(),
|
|
77
79
|
secondsBeforeInvalidatingBlockAsNonCommitteeMember: z.number(),
|
|
80
|
+
injectFakeAttestation: z.boolean().optional(),
|
|
78
81
|
}) satisfies ZodFor<SequencerConfig>;
|
package/src/interfaces/p2p.ts
CHANGED
|
@@ -57,6 +57,9 @@ export interface P2PApiWithAttestations extends P2PApiWithoutAttestations {
|
|
|
57
57
|
* @returns BlockAttestations
|
|
58
58
|
*/
|
|
59
59
|
getAttestationsForSlot(slot: bigint, proposalId?: string): Promise<BlockAttestation[]>;
|
|
60
|
+
|
|
61
|
+
/** Deletes a given attestation manually from the p2p client attestation pool. */
|
|
62
|
+
deleteAttestation(attestation: BlockAttestation): Promise<void>;
|
|
60
63
|
}
|
|
61
64
|
|
|
62
65
|
export interface P2PClient extends P2PApiWithAttestations {
|
|
@@ -85,4 +88,5 @@ export const P2PApiSchema: ApiSchemaFor<P2PApi> = {
|
|
|
85
88
|
getPendingTxCount: z.function().returns(schemas.Integer),
|
|
86
89
|
getEncodedEnr: z.function().returns(z.string().optional()),
|
|
87
90
|
getPeers: z.function().args(optional(z.boolean())).returns(z.array(PeerInfoSchema)),
|
|
91
|
+
deleteAttestation: z.function().args(BlockAttestation.schema).returns(z.void()),
|
|
88
92
|
};
|
|
@@ -4,7 +4,7 @@ import { EthAddress } from '@aztec/foundation/eth-address';
|
|
|
4
4
|
import { createLogger } from '@aztec/foundation/log';
|
|
5
5
|
import { SlashFactoryAbi } from '@aztec/l1-artifacts/SlashFactoryAbi';
|
|
6
6
|
|
|
7
|
-
import { type GetContractReturnType, type Hex, type Log, encodeFunctionData, getContract } from 'viem';
|
|
7
|
+
import { type GetContractReturnType, type Hex, type Log, encodeFunctionData, getContract } from '@spalladino/viem';
|
|
8
8
|
|
|
9
9
|
import type { L1RollupConstants } from '../epoch-helpers/index.js';
|
|
10
10
|
import {
|
|
@@ -30,6 +30,7 @@ export class BlockAttestation extends Gossipable {
|
|
|
30
30
|
static override p2pTopic = TopicType.block_attestation;
|
|
31
31
|
|
|
32
32
|
private sender: EthAddress | undefined;
|
|
33
|
+
private proposer: EthAddress | undefined;
|
|
33
34
|
|
|
34
35
|
constructor(
|
|
35
36
|
/** The block number of the attestation. */
|
|
@@ -40,6 +41,9 @@ export class BlockAttestation extends Gossipable {
|
|
|
40
41
|
|
|
41
42
|
/** The signature of the block attester */
|
|
42
43
|
public readonly signature: Signature,
|
|
44
|
+
|
|
45
|
+
/** The signature from the block proposer */
|
|
46
|
+
public readonly proposerSignature: Signature,
|
|
43
47
|
) {
|
|
44
48
|
super();
|
|
45
49
|
}
|
|
@@ -50,8 +54,9 @@ export class BlockAttestation extends Gossipable {
|
|
|
50
54
|
blockNumber: schemas.UInt32,
|
|
51
55
|
payload: ConsensusPayload.schema,
|
|
52
56
|
signature: Signature.schema,
|
|
57
|
+
proposerSignature: Signature.schema,
|
|
53
58
|
})
|
|
54
|
-
.transform(obj => new BlockAttestation(obj.blockNumber, obj.payload, obj.signature));
|
|
59
|
+
.transform(obj => new BlockAttestation(obj.blockNumber, obj.payload, obj.signature, obj.proposerSignature));
|
|
55
60
|
}
|
|
56
61
|
|
|
57
62
|
override generateP2PMessageIdentifier(): Promise<Buffer32> {
|
|
@@ -81,28 +86,74 @@ export class BlockAttestation extends Gossipable {
|
|
|
81
86
|
return this.sender;
|
|
82
87
|
}
|
|
83
88
|
|
|
89
|
+
/**
|
|
90
|
+
* Tries to get the sender of the attestation
|
|
91
|
+
* @returns The sender of the attestation or undefined if it fails during recovery
|
|
92
|
+
*/
|
|
93
|
+
tryGetSender(): EthAddress | undefined {
|
|
94
|
+
try {
|
|
95
|
+
return this.getSender();
|
|
96
|
+
} catch {
|
|
97
|
+
return undefined;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* Lazily evaluate and cache the proposer of the block
|
|
103
|
+
* @returns The proposer of the block
|
|
104
|
+
*/
|
|
105
|
+
getProposer(): EthAddress {
|
|
106
|
+
if (!this.proposer) {
|
|
107
|
+
// Recover the proposer from the proposal signature
|
|
108
|
+
const hashed = getHashedSignaturePayloadEthSignedMessage(this.payload, SignatureDomainSeparator.blockProposal);
|
|
109
|
+
// Cache the proposer for later use
|
|
110
|
+
this.proposer = recoverAddress(hashed, this.proposerSignature);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
return this.proposer;
|
|
114
|
+
}
|
|
115
|
+
|
|
84
116
|
getPayload(): Buffer {
|
|
85
117
|
return this.payload.getPayloadToSign(SignatureDomainSeparator.blockAttestation);
|
|
86
118
|
}
|
|
87
119
|
|
|
88
120
|
toBuffer(): Buffer {
|
|
89
|
-
return serializeToBuffer([this.blockNumber, this.payload, this.signature]);
|
|
121
|
+
return serializeToBuffer([this.blockNumber, this.payload, this.signature, this.proposerSignature]);
|
|
90
122
|
}
|
|
91
123
|
|
|
92
124
|
static fromBuffer(buf: Buffer | BufferReader): BlockAttestation {
|
|
93
125
|
const reader = BufferReader.asReader(buf);
|
|
94
|
-
return new BlockAttestation(
|
|
126
|
+
return new BlockAttestation(
|
|
127
|
+
reader.readNumber(),
|
|
128
|
+
reader.readObject(ConsensusPayload),
|
|
129
|
+
reader.readObject(Signature),
|
|
130
|
+
reader.readObject(Signature),
|
|
131
|
+
);
|
|
95
132
|
}
|
|
96
133
|
|
|
97
134
|
static empty(): BlockAttestation {
|
|
98
|
-
return new BlockAttestation(0, ConsensusPayload.empty(), Signature.empty());
|
|
135
|
+
return new BlockAttestation(0, ConsensusPayload.empty(), Signature.empty(), Signature.empty());
|
|
99
136
|
}
|
|
100
137
|
|
|
101
138
|
static random(): BlockAttestation {
|
|
102
|
-
return new BlockAttestation(
|
|
139
|
+
return new BlockAttestation(
|
|
140
|
+
Math.floor(Math.random() * 1000) + 1,
|
|
141
|
+
ConsensusPayload.random(),
|
|
142
|
+
Signature.random(),
|
|
143
|
+
Signature.random(),
|
|
144
|
+
);
|
|
103
145
|
}
|
|
104
146
|
|
|
105
147
|
getSize(): number {
|
|
106
|
-
return 4 /* blockNumber */ + this.payload.getSize() + this.signature.getSize();
|
|
148
|
+
return 4 /* blockNumber */ + this.payload.getSize() + this.signature.getSize() + this.proposerSignature.getSize();
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
toInspect() {
|
|
152
|
+
return {
|
|
153
|
+
blockNumber: this.blockNumber,
|
|
154
|
+
payload: this.payload.toInspect(),
|
|
155
|
+
signature: this.signature.toString(),
|
|
156
|
+
proposerSignature: this.proposerSignature.toString(),
|
|
157
|
+
};
|
|
107
158
|
}
|
|
108
159
|
}
|
|
@@ -4,7 +4,7 @@ import { BufferReader, serializeToBuffer } from '@aztec/foundation/serialize';
|
|
|
4
4
|
import { hexToBuffer } from '@aztec/foundation/string';
|
|
5
5
|
import type { FieldsOf } from '@aztec/foundation/types';
|
|
6
6
|
|
|
7
|
-
import { encodeAbiParameters, parseAbiParameters } from 'viem';
|
|
7
|
+
import { encodeAbiParameters, parseAbiParameters } from '@spalladino/viem';
|
|
8
8
|
import { z } from 'zod';
|
|
9
9
|
|
|
10
10
|
import type { L2Block } from '../block/l2_block.js';
|
package/src/p2p/gossipable.ts
CHANGED
|
@@ -1,33 +1,23 @@
|
|
|
1
1
|
import { Buffer32 } from '@aztec/foundation/buffer';
|
|
2
|
-
import { BufferReader,
|
|
2
|
+
import { BufferReader, serializeToBuffer } from '@aztec/foundation/serialize';
|
|
3
3
|
|
|
4
4
|
import type { TopicType } from './topic_type.js';
|
|
5
5
|
|
|
6
6
|
export class P2PMessage {
|
|
7
|
-
constructor(
|
|
8
|
-
public readonly publishTime: Date,
|
|
9
|
-
public readonly id: Buffer32,
|
|
10
|
-
public readonly payload: Buffer,
|
|
11
|
-
) {}
|
|
7
|
+
constructor(public readonly payload: Buffer) {}
|
|
12
8
|
|
|
13
|
-
static
|
|
14
|
-
return new P2PMessage(
|
|
9
|
+
static fromGossipable(message: Gossipable): P2PMessage {
|
|
10
|
+
return new P2PMessage(message.toBuffer());
|
|
15
11
|
}
|
|
16
12
|
|
|
17
13
|
static fromMessageData(messageData: Buffer): P2PMessage {
|
|
18
14
|
const reader = new BufferReader(messageData);
|
|
19
|
-
const publishTime = reader.readUInt64();
|
|
20
|
-
const id = Buffer32.fromBuffer(reader);
|
|
21
15
|
const payload = reader.readBuffer();
|
|
22
|
-
return new P2PMessage(
|
|
16
|
+
return new P2PMessage(payload);
|
|
23
17
|
}
|
|
24
18
|
|
|
25
19
|
toMessageData(): Buffer {
|
|
26
|
-
return serializeToBuffer([
|
|
27
|
-
bigintToUInt64BE(BigInt(this.publishTime.getTime())),
|
|
28
|
-
this.id,
|
|
29
|
-
serializeToBuffer(this.payload.length, this.payload),
|
|
30
|
-
]);
|
|
20
|
+
return serializeToBuffer([serializeToBuffer(this.payload.length, this.payload)]);
|
|
31
21
|
}
|
|
32
22
|
}
|
|
33
23
|
|
|
@@ -2,6 +2,12 @@ import { fromEntries, getEntries, maxBy } from '@aztec/foundation/collection';
|
|
|
2
2
|
import { jsonParseWithSchema } from '@aztec/foundation/json-rpc';
|
|
3
3
|
import type { ReadOnlyFileStore } from '@aztec/stdlib/file-store';
|
|
4
4
|
|
|
5
|
+
import { createReadStream, createWriteStream } from 'fs';
|
|
6
|
+
import fs from 'fs/promises';
|
|
7
|
+
import pathMod from 'path';
|
|
8
|
+
import { pipeline } from 'stream/promises';
|
|
9
|
+
import { createGunzip, gunzipSync } from 'zlib';
|
|
10
|
+
|
|
5
11
|
import {
|
|
6
12
|
SnapshotDataKeys,
|
|
7
13
|
type SnapshotDataUrls,
|
|
@@ -20,7 +26,8 @@ export async function getSnapshotIndex(
|
|
|
20
26
|
try {
|
|
21
27
|
if (await store.exists(snapshotIndexPath)) {
|
|
22
28
|
const snapshotIndexData = await store.read(snapshotIndexPath);
|
|
23
|
-
|
|
29
|
+
const buf = maybeGunzip(snapshotIndexData);
|
|
30
|
+
return jsonParseWithSchema(buf.toString('utf-8'), SnapshotsIndexSchema);
|
|
24
31
|
} else {
|
|
25
32
|
return undefined;
|
|
26
33
|
}
|
|
@@ -50,10 +57,67 @@ export function makeSnapshotPaths(baseDir: string): SnapshotDataUrls {
|
|
|
50
57
|
return fromEntries(SnapshotDataKeys.map(key => [key, `${baseDir}/${key}.db`]));
|
|
51
58
|
}
|
|
52
59
|
|
|
60
|
+
function isGzipMagic(data: Buffer): boolean {
|
|
61
|
+
return data.length >= 2 && data[0] === 0x1f && data[1] === 0x8b;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
function maybeGunzip(data: Buffer): Buffer {
|
|
65
|
+
const magicNumberIndicatesGzip = isGzipMagic(data);
|
|
66
|
+
|
|
67
|
+
if (magicNumberIndicatesGzip) {
|
|
68
|
+
try {
|
|
69
|
+
const out = gunzipSync(data);
|
|
70
|
+
return out;
|
|
71
|
+
} catch (err) {
|
|
72
|
+
throw new Error(`Decompression of gzipped data failed: ${(err as Error).message}`);
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
return data;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
async function detectGzip(localFilePathToPeek: string): Promise<boolean> {
|
|
79
|
+
// Peek the actual bytes we downloaded.
|
|
80
|
+
try {
|
|
81
|
+
const fd = await fs.open(localFilePathToPeek, 'r');
|
|
82
|
+
try {
|
|
83
|
+
const header = Buffer.alloc(2);
|
|
84
|
+
const { bytesRead } = await fd.read(header, 0, 2, 0);
|
|
85
|
+
return bytesRead >= 2 && isGzipMagic(header);
|
|
86
|
+
} finally {
|
|
87
|
+
await fd.close();
|
|
88
|
+
}
|
|
89
|
+
} catch {
|
|
90
|
+
return false;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
53
94
|
export async function downloadSnapshot(
|
|
54
95
|
snapshot: Pick<SnapshotMetadata, 'dataUrls'>,
|
|
55
96
|
localPaths: Record<SnapshotDataKeys, string>,
|
|
56
97
|
store: ReadOnlyFileStore,
|
|
57
98
|
): Promise<void> {
|
|
58
|
-
await Promise.all(
|
|
99
|
+
await Promise.all(
|
|
100
|
+
getEntries(localPaths).map(async ([key, path]) => {
|
|
101
|
+
await fs.mkdir(pathMod.dirname(path), { recursive: true });
|
|
102
|
+
|
|
103
|
+
const tmpPath = `${path}.download`;
|
|
104
|
+
try {
|
|
105
|
+
const url = snapshot.dataUrls[key];
|
|
106
|
+
await store.download(url, tmpPath);
|
|
107
|
+
|
|
108
|
+
const isGzip = await detectGzip(tmpPath);
|
|
109
|
+
|
|
110
|
+
const read = createReadStream(tmpPath);
|
|
111
|
+
const write = createWriteStream(path);
|
|
112
|
+
if (isGzip) {
|
|
113
|
+
const gunzip = createGunzip();
|
|
114
|
+
await pipeline(read, gunzip, write);
|
|
115
|
+
} else {
|
|
116
|
+
await pipeline(read, write);
|
|
117
|
+
}
|
|
118
|
+
} finally {
|
|
119
|
+
await fs.unlink(tmpPath).catch(() => undefined);
|
|
120
|
+
}
|
|
121
|
+
}),
|
|
122
|
+
);
|
|
59
123
|
}
|
package/src/snapshots/upload.ts
CHANGED
|
@@ -48,6 +48,7 @@ export async function uploadSnapshotToIndex(
|
|
|
48
48
|
|
|
49
49
|
await store.save(getSnapshotIndexPath(metadata), Buffer.from(jsonStringify(snapshotsIndex, true)), {
|
|
50
50
|
public: true, // Make the index publicly accessible
|
|
51
|
+
compress: false, // Ensure index.json is not gzipped
|
|
51
52
|
metadata: { ['Cache-control']: 'no-store' }, // Do not cache object versions
|
|
52
53
|
});
|
|
53
54
|
return newSnapshotMetadata;
|