@aztec/blob-client 3.0.0-nightly.20251223
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +62 -0
- package/dest/archive/blobscan_archive_client.d.ts +147 -0
- package/dest/archive/blobscan_archive_client.d.ts.map +1 -0
- package/dest/archive/blobscan_archive_client.js +141 -0
- package/dest/archive/config.d.ts +7 -0
- package/dest/archive/config.d.ts.map +1 -0
- package/dest/archive/config.js +11 -0
- package/dest/archive/factory.d.ts +4 -0
- package/dest/archive/factory.d.ts.map +1 -0
- package/dest/archive/factory.js +7 -0
- package/dest/archive/index.d.ts +3 -0
- package/dest/archive/index.d.ts.map +1 -0
- package/dest/archive/index.js +2 -0
- package/dest/archive/instrumentation.d.ts +11 -0
- package/dest/archive/instrumentation.d.ts.map +1 -0
- package/dest/archive/instrumentation.js +33 -0
- package/dest/archive/interface.d.ts +13 -0
- package/dest/archive/interface.d.ts.map +1 -0
- package/dest/archive/interface.js +1 -0
- package/dest/blobstore/blob_store_test_suite.d.ts +3 -0
- package/dest/blobstore/blob_store_test_suite.d.ts.map +1 -0
- package/dest/blobstore/blob_store_test_suite.js +164 -0
- package/dest/blobstore/index.d.ts +3 -0
- package/dest/blobstore/index.d.ts.map +1 -0
- package/dest/blobstore/index.js +2 -0
- package/dest/blobstore/interface.d.ts +12 -0
- package/dest/blobstore/interface.d.ts.map +1 -0
- package/dest/blobstore/interface.js +1 -0
- package/dest/blobstore/memory_blob_store.d.ts +8 -0
- package/dest/blobstore/memory_blob_store.d.ts.map +1 -0
- package/dest/blobstore/memory_blob_store.js +24 -0
- package/dest/client/bin/index.d.ts +3 -0
- package/dest/client/bin/index.d.ts.map +1 -0
- package/dest/client/bin/index.js +30 -0
- package/dest/client/config.d.ts +50 -0
- package/dest/client/config.d.ts.map +1 -0
- package/dest/client/config.js +55 -0
- package/dest/client/factory.d.ts +39 -0
- package/dest/client/factory.d.ts.map +1 -0
- package/dest/client/factory.js +53 -0
- package/dest/client/http.d.ts +63 -0
- package/dest/client/http.d.ts.map +1 -0
- package/dest/client/http.js +536 -0
- package/dest/client/index.d.ts +6 -0
- package/dest/client/index.d.ts.map +1 -0
- package/dest/client/index.js +5 -0
- package/dest/client/interface.d.ts +23 -0
- package/dest/client/interface.d.ts.map +1 -0
- package/dest/client/interface.js +1 -0
- package/dest/client/local.d.ts +12 -0
- package/dest/client/local.d.ts.map +1 -0
- package/dest/client/local.js +18 -0
- package/dest/client/tests.d.ts +11 -0
- package/dest/client/tests.d.ts.map +1 -0
- package/dest/client/tests.js +65 -0
- package/dest/encoding/index.d.ts +15 -0
- package/dest/encoding/index.d.ts.map +1 -0
- package/dest/encoding/index.js +19 -0
- package/dest/filestore/factory.d.ts +50 -0
- package/dest/filestore/factory.d.ts.map +1 -0
- package/dest/filestore/factory.js +67 -0
- package/dest/filestore/filestore_blob_client.d.ts +56 -0
- package/dest/filestore/filestore_blob_client.d.ts.map +1 -0
- package/dest/filestore/filestore_blob_client.js +99 -0
- package/dest/filestore/index.d.ts +3 -0
- package/dest/filestore/index.d.ts.map +1 -0
- package/dest/filestore/index.js +2 -0
- package/dest/types/api.d.ts +65 -0
- package/dest/types/api.d.ts.map +1 -0
- package/dest/types/api.js +22 -0
- package/dest/types/blob_with_index.d.ts +25 -0
- package/dest/types/blob_with_index.d.ts.map +1 -0
- package/dest/types/blob_with_index.js +43 -0
- package/dest/types/index.d.ts +2 -0
- package/dest/types/index.d.ts.map +1 -0
- package/dest/types/index.js +1 -0
- package/package.json +95 -0
- package/src/archive/blobscan_archive_client.ts +178 -0
- package/src/archive/config.ts +14 -0
- package/src/archive/factory.ts +11 -0
- package/src/archive/fixtures/blobscan_get_blob_data.json +1 -0
- package/src/archive/fixtures/blobscan_get_block.json +56 -0
- package/src/archive/index.ts +2 -0
- package/src/archive/instrumentation.ts +41 -0
- package/src/archive/interface.ts +9 -0
- package/src/blobstore/blob_store_test_suite.ts +137 -0
- package/src/blobstore/index.ts +2 -0
- package/src/blobstore/interface.ts +12 -0
- package/src/blobstore/memory_blob_store.ts +31 -0
- package/src/client/bin/index.ts +35 -0
- package/src/client/config.ts +117 -0
- package/src/client/factory.ts +88 -0
- package/src/client/http.ts +620 -0
- package/src/client/index.ts +5 -0
- package/src/client/interface.ts +30 -0
- package/src/client/local.ts +32 -0
- package/src/client/tests.ts +78 -0
- package/src/encoding/index.ts +21 -0
- package/src/filestore/factory.ts +145 -0
- package/src/filestore/filestore_blob_client.ts +129 -0
- package/src/filestore/index.ts +2 -0
- package/src/types/api.ts +50 -0
- package/src/types/blob_with_index.ts +48 -0
- package/src/types/index.ts +1 -0
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import { Attributes, Metrics, type TelemetryClient, type UpDownCounter, ValueType } from '@aztec/telemetry-client';
|
|
2
|
+
|
|
3
|
+
export class BlobArchiveClientInstrumentation {
|
|
4
|
+
private blockRequestCounter: UpDownCounter;
|
|
5
|
+
private blobRequestCounter: UpDownCounter;
|
|
6
|
+
private retrievedBlobs: UpDownCounter;
|
|
7
|
+
|
|
8
|
+
constructor(
|
|
9
|
+
client: TelemetryClient,
|
|
10
|
+
private httpHost: string,
|
|
11
|
+
name: string,
|
|
12
|
+
) {
|
|
13
|
+
const meter = client.getMeter(name);
|
|
14
|
+
this.blockRequestCounter = meter.createUpDownCounter(Metrics.BLOB_SINK_ARCHIVE_BLOCK_REQUEST_COUNT, {
|
|
15
|
+
description: 'Number of requests made to retrieve blocks from the blob archive',
|
|
16
|
+
valueType: ValueType.INT,
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
this.blobRequestCounter = meter.createUpDownCounter(Metrics.BLOB_SINK_ARCHIVE_BLOB_REQUEST_COUNT, {
|
|
20
|
+
description: 'Number of requests made to retrieve blobs from the blob archive',
|
|
21
|
+
valueType: ValueType.INT,
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
this.retrievedBlobs = meter.createUpDownCounter(Metrics.BLOB_SINK_ARCHIVE_BLOB_COUNT, {
|
|
25
|
+
description: 'Number of blobs retrieved from the blob archive',
|
|
26
|
+
valueType: ValueType.INT,
|
|
27
|
+
});
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
incRequest(type: 'blocks' | 'blobs', status: number) {
|
|
31
|
+
const counter = type === 'blocks' ? this.blockRequestCounter : this.blobRequestCounter;
|
|
32
|
+
counter.add(1, {
|
|
33
|
+
[Attributes.HTTP_RESPONSE_STATUS_CODE]: status,
|
|
34
|
+
[Attributes.HTTP_REQUEST_HOST]: this.httpHost,
|
|
35
|
+
});
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
incRetrievedBlobs(count: number) {
|
|
39
|
+
this.retrievedBlobs.add(count);
|
|
40
|
+
}
|
|
41
|
+
}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import type { BlobJson } from '@aztec/blob-lib/types';
|
|
2
|
+
|
|
3
|
+
/** Interface to an blob archiving service. */
|
|
4
|
+
export interface BlobArchiveClient {
|
|
5
|
+
getBlobData(id: string): Promise<Buffer | undefined>;
|
|
6
|
+
getBlobsFromBlock(blockId: string): Promise<BlobJson[] | undefined>;
|
|
7
|
+
getLatestBlock(): Promise<{ hash: string; number: number; slot: number }>;
|
|
8
|
+
getBaseUrl(): string;
|
|
9
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import { Blob } from '@aztec/blob-lib';
|
|
2
|
+
import { Fr } from '@aztec/foundation/curves/bn254';
|
|
3
|
+
|
|
4
|
+
import { BlobWithIndex } from '../types/index.js';
|
|
5
|
+
import type { BlobStore } from './interface.js';
|
|
6
|
+
|
|
7
|
+
export function describeBlobStore(getBlobStore: () => Promise<BlobStore>) {
|
|
8
|
+
let blobStore: BlobStore;
|
|
9
|
+
|
|
10
|
+
beforeEach(async () => {
|
|
11
|
+
blobStore = await getBlobStore();
|
|
12
|
+
});
|
|
13
|
+
|
|
14
|
+
it('should store and retrieve a blob by hash', async () => {
|
|
15
|
+
// Create a test blob with random fields
|
|
16
|
+
const testFields = [Fr.random(), Fr.random(), Fr.random()];
|
|
17
|
+
const blob = Blob.fromFields(testFields);
|
|
18
|
+
const blobWithIndex = new BlobWithIndex(blob, 0);
|
|
19
|
+
const blobHash = blob.getEthVersionedBlobHash();
|
|
20
|
+
|
|
21
|
+
// Store the blob
|
|
22
|
+
await blobStore.addBlobs([blobWithIndex]);
|
|
23
|
+
|
|
24
|
+
// Retrieve the blob by hash
|
|
25
|
+
const retrievedBlobs = await blobStore.getBlobsByHashes([blobHash]);
|
|
26
|
+
|
|
27
|
+
// Verify the blob was retrieved and matches
|
|
28
|
+
expect(retrievedBlobs.length).toBe(1);
|
|
29
|
+
expect(retrievedBlobs[0].blob).toEqual(blob);
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
it('should handle multiple blobs stored and retrieved by their hashes', async () => {
|
|
33
|
+
// Create two different blobs
|
|
34
|
+
const blob1 = Blob.fromFields([Fr.random(), Fr.random()]);
|
|
35
|
+
const blob2 = Blob.fromFields([Fr.random(), Fr.random(), Fr.random()]);
|
|
36
|
+
const blobWithIndex1 = new BlobWithIndex(blob1, 0);
|
|
37
|
+
const blobWithIndex2 = new BlobWithIndex(blob2, 1);
|
|
38
|
+
|
|
39
|
+
const blobHash1 = blob1.getEthVersionedBlobHash();
|
|
40
|
+
const blobHash2 = blob2.getEthVersionedBlobHash();
|
|
41
|
+
|
|
42
|
+
// Store both blobs
|
|
43
|
+
await blobStore.addBlobs([blobWithIndex1, blobWithIndex2]);
|
|
44
|
+
|
|
45
|
+
// Retrieve and verify both blobs
|
|
46
|
+
const retrievedBlobs = await blobStore.getBlobsByHashes([blobHash1, blobHash2]);
|
|
47
|
+
|
|
48
|
+
expect(retrievedBlobs.length).toBe(2);
|
|
49
|
+
expect(retrievedBlobs[0].blob).toEqual(blob1);
|
|
50
|
+
expect(retrievedBlobs[1].blob).toEqual(blob2);
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
it('should return empty array for non-existent blob hash', async () => {
|
|
54
|
+
// Create a random hash that doesn't exist
|
|
55
|
+
const nonExistentHash = Buffer.alloc(32);
|
|
56
|
+
nonExistentHash.fill(0xff);
|
|
57
|
+
|
|
58
|
+
const retrievedBlobs = await blobStore.getBlobsByHashes([nonExistentHash]);
|
|
59
|
+
expect(retrievedBlobs).toEqual([]);
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
it('should handle storing blobs with different indices', async () => {
|
|
63
|
+
// Create blobs with different indices
|
|
64
|
+
const blob1 = Blob.fromFields([Fr.random()]);
|
|
65
|
+
const blob2 = Blob.fromFields([Fr.random()]);
|
|
66
|
+
const blobWithIndex1 = new BlobWithIndex(blob1, 0);
|
|
67
|
+
const blobWithIndex2 = new BlobWithIndex(blob2, 1);
|
|
68
|
+
|
|
69
|
+
await blobStore.addBlobs([blobWithIndex1, blobWithIndex2]);
|
|
70
|
+
|
|
71
|
+
const blobHash1 = blob1.getEthVersionedBlobHash();
|
|
72
|
+
const blobHash2 = blob2.getEthVersionedBlobHash();
|
|
73
|
+
|
|
74
|
+
const retrievedBlobs = await blobStore.getBlobsByHashes([blobHash1, blobHash2]);
|
|
75
|
+
|
|
76
|
+
expect(retrievedBlobs[0].index).toBe(0);
|
|
77
|
+
expect(retrievedBlobs[1].index).toBe(1);
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
it('should handle retrieving subset of stored blobs', async () => {
|
|
81
|
+
// Store multiple blobs
|
|
82
|
+
const blob1 = Blob.fromFields([Fr.random()]);
|
|
83
|
+
const blob2 = Blob.fromFields([Fr.random()]);
|
|
84
|
+
const blob3 = Blob.fromFields([Fr.random()]);
|
|
85
|
+
|
|
86
|
+
await blobStore.addBlobs([new BlobWithIndex(blob1, 0), new BlobWithIndex(blob2, 1), new BlobWithIndex(blob3, 2)]);
|
|
87
|
+
|
|
88
|
+
// Retrieve only some of them
|
|
89
|
+
const blobHash1 = blob1.getEthVersionedBlobHash();
|
|
90
|
+
const blobHash3 = blob3.getEthVersionedBlobHash();
|
|
91
|
+
|
|
92
|
+
const retrievedBlobs = await blobStore.getBlobsByHashes([blobHash1, blobHash3]);
|
|
93
|
+
|
|
94
|
+
expect(retrievedBlobs.length).toBe(2);
|
|
95
|
+
expect(retrievedBlobs[0].blob).toEqual(blob1);
|
|
96
|
+
expect(retrievedBlobs[1].blob).toEqual(blob3);
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
it('should handle duplicate blob hashes in request', async () => {
|
|
100
|
+
const blob = Blob.fromFields([Fr.random()]);
|
|
101
|
+
const blobWithIndex = new BlobWithIndex(blob, 0);
|
|
102
|
+
const blobHash = blob.getEthVersionedBlobHash();
|
|
103
|
+
|
|
104
|
+
await blobStore.addBlobs([blobWithIndex]);
|
|
105
|
+
|
|
106
|
+
// Request the same blob hash multiple times
|
|
107
|
+
const retrievedBlobs = await blobStore.getBlobsByHashes([blobHash, blobHash]);
|
|
108
|
+
|
|
109
|
+
// Implementation may return duplicates or deduplicate - both are valid
|
|
110
|
+
expect(retrievedBlobs.length).toBeGreaterThanOrEqual(1);
|
|
111
|
+
expect(retrievedBlobs[0].blob).toEqual(blob);
|
|
112
|
+
});
|
|
113
|
+
|
|
114
|
+
it('should overwrite blob when storing with same hash', async () => {
|
|
115
|
+
// Create two blobs that will have the same hash (same content)
|
|
116
|
+
const fields = [Fr.random(), Fr.random()];
|
|
117
|
+
const blob1 = Blob.fromFields(fields);
|
|
118
|
+
const blob2 = Blob.fromFields(fields);
|
|
119
|
+
|
|
120
|
+
// Store with different indices
|
|
121
|
+
const blobWithIndex1 = new BlobWithIndex(blob1, 0);
|
|
122
|
+
const blobWithIndex2 = new BlobWithIndex(blob2, 5);
|
|
123
|
+
|
|
124
|
+
const blobHash = blob1.getEthVersionedBlobHash();
|
|
125
|
+
|
|
126
|
+
// Store first blob
|
|
127
|
+
await blobStore.addBlobs([blobWithIndex1]);
|
|
128
|
+
|
|
129
|
+
// Overwrite with second blob (same hash, different index)
|
|
130
|
+
await blobStore.addBlobs([blobWithIndex2]);
|
|
131
|
+
|
|
132
|
+
// Retrieve and verify it's the second blob (with index 5)
|
|
133
|
+
const retrievedBlobs = await blobStore.getBlobsByHashes([blobHash]);
|
|
134
|
+
expect(retrievedBlobs.length).toBe(1);
|
|
135
|
+
expect(retrievedBlobs[0].index).toBe(5);
|
|
136
|
+
});
|
|
137
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import type { BlobWithIndex } from '../types/index.js';
|
|
2
|
+
|
|
3
|
+
export interface BlobStore {
|
|
4
|
+
/**
|
|
5
|
+
* Get blobs by their hashes
|
|
6
|
+
*/
|
|
7
|
+
getBlobsByHashes: (blobHashes: Buffer[]) => Promise<BlobWithIndex[]>;
|
|
8
|
+
/**
|
|
9
|
+
* Add blobs to the store, indexed by their hashes
|
|
10
|
+
*/
|
|
11
|
+
addBlobs: (blobs: BlobWithIndex[]) => Promise<void>;
|
|
12
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import { bufferToHex } from '@aztec/foundation/string';
|
|
2
|
+
|
|
3
|
+
import { BlobWithIndex } from '../types/index.js';
|
|
4
|
+
import type { BlobStore } from './interface.js';
|
|
5
|
+
|
|
6
|
+
export class MemoryBlobStore implements BlobStore {
|
|
7
|
+
private blobs: Map<string, Buffer> = new Map();
|
|
8
|
+
|
|
9
|
+
public getBlobsByHashes(blobHashes: Buffer[]): Promise<BlobWithIndex[]> {
|
|
10
|
+
const results: BlobWithIndex[] = [];
|
|
11
|
+
|
|
12
|
+
for (const blobHash of blobHashes) {
|
|
13
|
+
const key = bufferToHex(blobHash);
|
|
14
|
+
const blobBuffer = this.blobs.get(key);
|
|
15
|
+
if (blobBuffer) {
|
|
16
|
+
results.push(BlobWithIndex.fromBuffer(blobBuffer));
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
return Promise.resolve(results);
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
public addBlobs(blobs: BlobWithIndex[]): Promise<void> {
|
|
24
|
+
for (const blob of blobs) {
|
|
25
|
+
const blobHash = blob.blob.getEthVersionedBlobHash();
|
|
26
|
+
const key = bufferToHex(blobHash);
|
|
27
|
+
this.blobs.set(key, blob.toBuffer());
|
|
28
|
+
}
|
|
29
|
+
return Promise.resolve();
|
|
30
|
+
}
|
|
31
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/* eslint-disable no-console */
|
|
3
|
+
import { createLogger } from '@aztec/foundation/log';
|
|
4
|
+
import { bufferToHex, hexToBuffer } from '@aztec/foundation/string';
|
|
5
|
+
|
|
6
|
+
import { getBlobClientConfigFromEnv } from '../config.js';
|
|
7
|
+
import { createBlobClient } from '../factory.js';
|
|
8
|
+
|
|
9
|
+
async function main() {
|
|
10
|
+
const logger = createLogger('blob-client');
|
|
11
|
+
const blockHash = process.argv[2];
|
|
12
|
+
if (!blockHash) {
|
|
13
|
+
logger.error('Please provide a block hash as an argument.');
|
|
14
|
+
process.exit(1);
|
|
15
|
+
}
|
|
16
|
+
const blobHashes = process.argv.slice(3).map(hexToBuffer);
|
|
17
|
+
logger.info(`Fetching blobs for block hash ${blockHash}`);
|
|
18
|
+
if (blobHashes.length > 0) {
|
|
19
|
+
logger.info(`Filtering by blob hashes ${blobHashes.map(bufferToHex).join(', ')}`);
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
const blobClient = createBlobClient(getBlobClientConfigFromEnv());
|
|
23
|
+
const blobs = await blobClient.getBlobSidecar(blockHash, blobHashes);
|
|
24
|
+
logger.info(`Got ${blobs.length} blobs`);
|
|
25
|
+
for (const blob of blobs) {
|
|
26
|
+
console.log(blob.toJSON());
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// Example usage:
|
|
31
|
+
// $ L1_CHAIN_ID=11155111 LOG_LEVEL=trace yarn blob-client 0x7d81980a40426c40544f0f729ada953be406730b877b5865d6cdc35cc8f9c84e 0x010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c444014
|
|
32
|
+
main().catch(err => {
|
|
33
|
+
console.error(err);
|
|
34
|
+
process.exit(1);
|
|
35
|
+
});
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
import {
|
|
2
|
+
type ConfigMappingsType,
|
|
3
|
+
SecretValue,
|
|
4
|
+
booleanConfigHelper,
|
|
5
|
+
getConfigFromMappings,
|
|
6
|
+
} from '@aztec/foundation/config';
|
|
7
|
+
|
|
8
|
+
import { type BlobArchiveApiConfig, blobArchiveApiConfigMappings } from '../archive/config.js';
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* The configuration for the blob client
|
|
12
|
+
*/
|
|
13
|
+
export interface BlobClientConfig extends BlobArchiveApiConfig {
|
|
14
|
+
/**
|
|
15
|
+
* List of URLs for L1 RPC Execution clients
|
|
16
|
+
*/
|
|
17
|
+
l1RpcUrls?: string[];
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* List of URLs of the Ethereum consensus nodes that services will connect to (comma separated)
|
|
21
|
+
*/
|
|
22
|
+
l1ConsensusHostUrls?: string[];
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* List of API keys for the corresponding L1 consensus client URLs. Added at the end of the URL as "?key=<api-key>" unless a header is defined
|
|
26
|
+
*/
|
|
27
|
+
l1ConsensusHostApiKeys?: SecretValue<string>[];
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* List of header names for the corresponding L1 consensus client API keys, if needed. Added as "<api-key-header>: <api-key>"
|
|
31
|
+
*/
|
|
32
|
+
l1ConsensusHostApiKeyHeaders?: string[];
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* The map size to be provided to LMDB for each blob sink DB, optional, will inherit from the general dataStoreMapSizeKb if not specified
|
|
36
|
+
*/
|
|
37
|
+
blobSinkMapSizeKb?: number;
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Whether to allow having no blob sources configured during startup
|
|
41
|
+
*/
|
|
42
|
+
blobAllowEmptySources?: boolean;
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* URLs for reading blobs from filestore (s3://, gs://, file://, https://). Tried in order until blobs are found.
|
|
46
|
+
*/
|
|
47
|
+
blobFileStoreUrls?: string[];
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* URL for uploading blobs to filestore (s3://, gs://, file://)
|
|
51
|
+
*/
|
|
52
|
+
blobFileStoreUploadUrl?: string;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
export const blobClientConfigMapping: ConfigMappingsType<BlobClientConfig> = {
|
|
56
|
+
l1RpcUrls: {
|
|
57
|
+
env: 'ETHEREUM_HOSTS',
|
|
58
|
+
description: 'List of URLs for L1 RPC Execution clients',
|
|
59
|
+
parseEnv: (val: string) => val.split(',').map(url => url.trim()),
|
|
60
|
+
},
|
|
61
|
+
l1ConsensusHostUrls: {
|
|
62
|
+
env: 'L1_CONSENSUS_HOST_URLS',
|
|
63
|
+
description: 'List of URLs of the Ethereum consensus nodes that services will connect to (comma separated)',
|
|
64
|
+
parseEnv: (val: string) => val.split(',').map(url => url.trim().replace(/\/$/, '')),
|
|
65
|
+
},
|
|
66
|
+
l1ConsensusHostApiKeys: {
|
|
67
|
+
env: 'L1_CONSENSUS_HOST_API_KEYS',
|
|
68
|
+
description:
|
|
69
|
+
'List of API keys for the corresponding L1 consensus clients, if needed. Added to the end of the corresponding URL as "?key=<api-key>" unless a header is defined',
|
|
70
|
+
parseEnv: (val: string) => val.split(',').map(key => new SecretValue(key.trim())),
|
|
71
|
+
},
|
|
72
|
+
l1ConsensusHostApiKeyHeaders: {
|
|
73
|
+
env: 'L1_CONSENSUS_HOST_API_KEY_HEADERS',
|
|
74
|
+
description:
|
|
75
|
+
'List of header names for the corresponding L1 consensus client API keys, if needed. Added to the corresponding request as "<api-key-header>: <api-key>"',
|
|
76
|
+
parseEnv: (val: string) => val.split(',').map(url => url.trim()),
|
|
77
|
+
},
|
|
78
|
+
blobSinkMapSizeKb: {
|
|
79
|
+
env: 'BLOB_SINK_MAP_SIZE_KB',
|
|
80
|
+
description: 'The maximum possible size of the blob sink DB in KB. Overwrites the general dataStoreMapSizeKb.',
|
|
81
|
+
parseEnv: (val: string | undefined) => (val ? +val : undefined),
|
|
82
|
+
},
|
|
83
|
+
blobAllowEmptySources: {
|
|
84
|
+
env: 'BLOB_ALLOW_EMPTY_SOURCES',
|
|
85
|
+
description: 'Whether to allow having no blob sources configured during startup',
|
|
86
|
+
...booleanConfigHelper(false),
|
|
87
|
+
},
|
|
88
|
+
blobFileStoreUrls: {
|
|
89
|
+
env: 'BLOB_FILE_STORE_URLS',
|
|
90
|
+
description: 'URLs for filestore blob archive, comma-separated. Tried in order until blobs are found.',
|
|
91
|
+
parseEnv: (val: string) =>
|
|
92
|
+
val
|
|
93
|
+
.split(',')
|
|
94
|
+
.map(url => url.trim())
|
|
95
|
+
.filter(url => url.length > 0),
|
|
96
|
+
},
|
|
97
|
+
blobFileStoreUploadUrl: {
|
|
98
|
+
env: 'BLOB_FILE_STORE_UPLOAD_URL',
|
|
99
|
+
description: 'URL for uploading blobs to filestore (s3://, gs://, file://)',
|
|
100
|
+
},
|
|
101
|
+
...blobArchiveApiConfigMappings,
|
|
102
|
+
};
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Returns the blob client configuration from the environment variables.
|
|
106
|
+
* @returns The blob client configuration.
|
|
107
|
+
*/
|
|
108
|
+
export function getBlobClientConfigFromEnv(): BlobClientConfig {
|
|
109
|
+
return getConfigFromMappings<BlobClientConfig>(blobClientConfigMapping);
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Returns whether the given blob client config has any remote sources defined.
|
|
114
|
+
*/
|
|
115
|
+
export function hasRemoteBlobSources(config: BlobClientConfig = {}): boolean {
|
|
116
|
+
return !!(config.l1ConsensusHostUrls?.length || config.archiveApiUrl || config.blobFileStoreUrls?.length);
|
|
117
|
+
}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import { type Logger, createLogger } from '@aztec/foundation/log';
|
|
2
|
+
|
|
3
|
+
import { MemoryBlobStore } from '../blobstore/memory_blob_store.js';
|
|
4
|
+
import {
|
|
5
|
+
type BlobFileStoreMetadata,
|
|
6
|
+
createReadOnlyFileStoreBlobClients,
|
|
7
|
+
createWritableFileStoreBlobClient,
|
|
8
|
+
} from '../filestore/factory.js';
|
|
9
|
+
import type { FileStoreBlobClient } from '../filestore/filestore_blob_client.js';
|
|
10
|
+
import { type BlobClientConfig, hasRemoteBlobSources } from './config.js';
|
|
11
|
+
import { HttpBlobClient } from './http.js';
|
|
12
|
+
import type { BlobClientInterface } from './interface.js';
|
|
13
|
+
import { LocalBlobClient } from './local.js';
|
|
14
|
+
|
|
15
|
+
export interface CreateBlobClientDeps {
|
|
16
|
+
logger?: Logger;
|
|
17
|
+
/** FileStore clients for reading blobs */
|
|
18
|
+
fileStoreClients?: FileStoreBlobClient[];
|
|
19
|
+
/** FileStore client for uploading blobs */
|
|
20
|
+
fileStoreUploadClient?: FileStoreBlobClient;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export function createBlobClient(config?: BlobClientConfig, deps?: CreateBlobClientDeps): BlobClientInterface {
|
|
24
|
+
const log = deps?.logger ?? createLogger('blob-client');
|
|
25
|
+
if (!hasRemoteBlobSources(config)) {
|
|
26
|
+
log.info(`Creating local blob client.`);
|
|
27
|
+
const blobStore = new MemoryBlobStore();
|
|
28
|
+
return new LocalBlobClient(blobStore);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
log.info(`Creating blob client.`, {
|
|
32
|
+
l1ConsensusHostUrls: config?.l1ConsensusHostUrls,
|
|
33
|
+
archiveApiUrl: config?.archiveApiUrl,
|
|
34
|
+
fileStoreCount: deps?.fileStoreClients?.length ?? 0,
|
|
35
|
+
hasFileStoreUpload: !!deps?.fileStoreUploadClient,
|
|
36
|
+
});
|
|
37
|
+
return new HttpBlobClient(config, {
|
|
38
|
+
logger: log,
|
|
39
|
+
fileStoreClients: deps?.fileStoreClients,
|
|
40
|
+
fileStoreUploadClient: deps?.fileStoreUploadClient,
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Configuration required for creating a blob client with file stores.
|
|
46
|
+
* Extends BlobClientConfig with the metadata needed to construct file store paths.
|
|
47
|
+
*/
|
|
48
|
+
export interface BlobClientWithFileStoresConfig extends BlobClientConfig {
|
|
49
|
+
l1ChainId: number;
|
|
50
|
+
rollupVersion: number;
|
|
51
|
+
l1Contracts: { rollupAddress: { toString(): string } };
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Creates a BlobClient with FileStore clients for reading and uploading blobs.
|
|
56
|
+
* This is a convenience function that handles the common pattern of:
|
|
57
|
+
* 1. Building BlobFileStoreMetadata from config
|
|
58
|
+
* 2. Creating read-only FileStore clients
|
|
59
|
+
* 3. Creating a writable FileStore client for uploads
|
|
60
|
+
* 4. Creating the BlobClient with these dependencies
|
|
61
|
+
*
|
|
62
|
+
* @param config - Configuration containing blob client settings and chain metadata
|
|
63
|
+
* @param logger - Optional logger for the blob client
|
|
64
|
+
* @returns A BlobClientInterface configured with file store support
|
|
65
|
+
*/
|
|
66
|
+
export async function createBlobClientWithFileStores(
|
|
67
|
+
config: BlobClientWithFileStoresConfig,
|
|
68
|
+
logger?: Logger,
|
|
69
|
+
): Promise<BlobClientInterface> {
|
|
70
|
+
const log = logger ?? createLogger('blob-client');
|
|
71
|
+
|
|
72
|
+
const fileStoreMetadata: BlobFileStoreMetadata = {
|
|
73
|
+
l1ChainId: config.l1ChainId,
|
|
74
|
+
rollupVersion: config.rollupVersion,
|
|
75
|
+
rollupAddress: config.l1Contracts.rollupAddress.toString(),
|
|
76
|
+
};
|
|
77
|
+
|
|
78
|
+
const [fileStoreClients, fileStoreUploadClient] = await Promise.all([
|
|
79
|
+
createReadOnlyFileStoreBlobClients(config.blobFileStoreUrls, fileStoreMetadata, log),
|
|
80
|
+
createWritableFileStoreBlobClient(config.blobFileStoreUploadUrl, fileStoreMetadata, log),
|
|
81
|
+
]);
|
|
82
|
+
|
|
83
|
+
return createBlobClient(config, {
|
|
84
|
+
logger: log,
|
|
85
|
+
fileStoreClients,
|
|
86
|
+
fileStoreUploadClient,
|
|
87
|
+
});
|
|
88
|
+
}
|