@aztec/blob-client 3.0.0-nightly.20251223
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +62 -0
- package/dest/archive/blobscan_archive_client.d.ts +147 -0
- package/dest/archive/blobscan_archive_client.d.ts.map +1 -0
- package/dest/archive/blobscan_archive_client.js +141 -0
- package/dest/archive/config.d.ts +7 -0
- package/dest/archive/config.d.ts.map +1 -0
- package/dest/archive/config.js +11 -0
- package/dest/archive/factory.d.ts +4 -0
- package/dest/archive/factory.d.ts.map +1 -0
- package/dest/archive/factory.js +7 -0
- package/dest/archive/index.d.ts +3 -0
- package/dest/archive/index.d.ts.map +1 -0
- package/dest/archive/index.js +2 -0
- package/dest/archive/instrumentation.d.ts +11 -0
- package/dest/archive/instrumentation.d.ts.map +1 -0
- package/dest/archive/instrumentation.js +33 -0
- package/dest/archive/interface.d.ts +13 -0
- package/dest/archive/interface.d.ts.map +1 -0
- package/dest/archive/interface.js +1 -0
- package/dest/blobstore/blob_store_test_suite.d.ts +3 -0
- package/dest/blobstore/blob_store_test_suite.d.ts.map +1 -0
- package/dest/blobstore/blob_store_test_suite.js +164 -0
- package/dest/blobstore/index.d.ts +3 -0
- package/dest/blobstore/index.d.ts.map +1 -0
- package/dest/blobstore/index.js +2 -0
- package/dest/blobstore/interface.d.ts +12 -0
- package/dest/blobstore/interface.d.ts.map +1 -0
- package/dest/blobstore/interface.js +1 -0
- package/dest/blobstore/memory_blob_store.d.ts +8 -0
- package/dest/blobstore/memory_blob_store.d.ts.map +1 -0
- package/dest/blobstore/memory_blob_store.js +24 -0
- package/dest/client/bin/index.d.ts +3 -0
- package/dest/client/bin/index.d.ts.map +1 -0
- package/dest/client/bin/index.js +30 -0
- package/dest/client/config.d.ts +50 -0
- package/dest/client/config.d.ts.map +1 -0
- package/dest/client/config.js +55 -0
- package/dest/client/factory.d.ts +39 -0
- package/dest/client/factory.d.ts.map +1 -0
- package/dest/client/factory.js +53 -0
- package/dest/client/http.d.ts +63 -0
- package/dest/client/http.d.ts.map +1 -0
- package/dest/client/http.js +536 -0
- package/dest/client/index.d.ts +6 -0
- package/dest/client/index.d.ts.map +1 -0
- package/dest/client/index.js +5 -0
- package/dest/client/interface.d.ts +23 -0
- package/dest/client/interface.d.ts.map +1 -0
- package/dest/client/interface.js +1 -0
- package/dest/client/local.d.ts +12 -0
- package/dest/client/local.d.ts.map +1 -0
- package/dest/client/local.js +18 -0
- package/dest/client/tests.d.ts +11 -0
- package/dest/client/tests.d.ts.map +1 -0
- package/dest/client/tests.js +65 -0
- package/dest/encoding/index.d.ts +15 -0
- package/dest/encoding/index.d.ts.map +1 -0
- package/dest/encoding/index.js +19 -0
- package/dest/filestore/factory.d.ts +50 -0
- package/dest/filestore/factory.d.ts.map +1 -0
- package/dest/filestore/factory.js +67 -0
- package/dest/filestore/filestore_blob_client.d.ts +56 -0
- package/dest/filestore/filestore_blob_client.d.ts.map +1 -0
- package/dest/filestore/filestore_blob_client.js +99 -0
- package/dest/filestore/index.d.ts +3 -0
- package/dest/filestore/index.d.ts.map +1 -0
- package/dest/filestore/index.js +2 -0
- package/dest/types/api.d.ts +65 -0
- package/dest/types/api.d.ts.map +1 -0
- package/dest/types/api.js +22 -0
- package/dest/types/blob_with_index.d.ts +25 -0
- package/dest/types/blob_with_index.d.ts.map +1 -0
- package/dest/types/blob_with_index.js +43 -0
- package/dest/types/index.d.ts +2 -0
- package/dest/types/index.d.ts.map +1 -0
- package/dest/types/index.js +1 -0
- package/package.json +95 -0
- package/src/archive/blobscan_archive_client.ts +178 -0
- package/src/archive/config.ts +14 -0
- package/src/archive/factory.ts +11 -0
- package/src/archive/fixtures/blobscan_get_blob_data.json +1 -0
- package/src/archive/fixtures/blobscan_get_block.json +56 -0
- package/src/archive/index.ts +2 -0
- package/src/archive/instrumentation.ts +41 -0
- package/src/archive/interface.ts +9 -0
- package/src/blobstore/blob_store_test_suite.ts +137 -0
- package/src/blobstore/index.ts +2 -0
- package/src/blobstore/interface.ts +12 -0
- package/src/blobstore/memory_blob_store.ts +31 -0
- package/src/client/bin/index.ts +35 -0
- package/src/client/config.ts +117 -0
- package/src/client/factory.ts +88 -0
- package/src/client/http.ts +620 -0
- package/src/client/index.ts +5 -0
- package/src/client/interface.ts +30 -0
- package/src/client/local.ts +32 -0
- package/src/client/tests.ts +78 -0
- package/src/encoding/index.ts +21 -0
- package/src/filestore/factory.ts +145 -0
- package/src/filestore/filestore_blob_client.ts +129 -0
- package/src/filestore/index.ts +2 -0
- package/src/types/api.ts +50 -0
- package/src/types/blob_with_index.ts +48 -0
- package/src/types/index.ts +1 -0
|
@@ -0,0 +1,536 @@
|
|
|
1
|
+
import { Blob, computeEthVersionedBlobHash } from '@aztec/blob-lib';
|
|
2
|
+
import { shuffle } from '@aztec/foundation/array';
|
|
3
|
+
import { createLogger } from '@aztec/foundation/log';
|
|
4
|
+
import { makeBackoff, retry } from '@aztec/foundation/retry';
|
|
5
|
+
import { bufferToHex, hexToBuffer } from '@aztec/foundation/string';
|
|
6
|
+
import { createPublicClient, fallback, http } from 'viem';
|
|
7
|
+
import { createBlobArchiveClient } from '../archive/factory.js';
|
|
8
|
+
import { BlobWithIndex } from '../types/blob_with_index.js';
|
|
9
|
+
import { getBlobClientConfigFromEnv } from './config.js';
|
|
10
|
+
export class HttpBlobClient {
|
|
11
|
+
opts;
|
|
12
|
+
log;
|
|
13
|
+
config;
|
|
14
|
+
archiveClient;
|
|
15
|
+
fetch;
|
|
16
|
+
fileStoreClients;
|
|
17
|
+
fileStoreUploadClient;
|
|
18
|
+
disabled;
|
|
19
|
+
constructor(config, opts = {}){
|
|
20
|
+
this.opts = opts;
|
|
21
|
+
this.disabled = false;
|
|
22
|
+
this.config = config ?? getBlobClientConfigFromEnv();
|
|
23
|
+
this.archiveClient = opts.archiveClient ?? createBlobArchiveClient(this.config);
|
|
24
|
+
this.log = opts.logger ?? createLogger('blob-client:client');
|
|
25
|
+
this.fileStoreClients = opts.fileStoreClients ?? [];
|
|
26
|
+
this.fileStoreUploadClient = opts.fileStoreUploadClient;
|
|
27
|
+
if (this.fileStoreUploadClient && !opts.onBlobsFetched) {
|
|
28
|
+
this.opts.onBlobsFetched = (blobs)=>{
|
|
29
|
+
this.uploadBlobsToFileStore(blobs);
|
|
30
|
+
};
|
|
31
|
+
}
|
|
32
|
+
this.fetch = async (...args)=>{
|
|
33
|
+
return await retry(()=>fetch(...args), // eslint-disable-next-line @typescript-eslint/no-base-to-string
|
|
34
|
+
`Fetching ${args[0]}`, makeBackoff([
|
|
35
|
+
1,
|
|
36
|
+
1,
|
|
37
|
+
3
|
|
38
|
+
]), this.log, /*failSilently=*/ true);
|
|
39
|
+
};
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Upload fetched blobs to filestore (fire-and-forget).
|
|
43
|
+
* Called automatically when blobs are fetched from any source.
|
|
44
|
+
*/ uploadBlobsToFileStore(blobs) {
|
|
45
|
+
if (!this.fileStoreUploadClient) {
|
|
46
|
+
return;
|
|
47
|
+
}
|
|
48
|
+
void this.fileStoreUploadClient.saveBlobs(blobs, true).catch((err)=>{
|
|
49
|
+
this.log.warn(`Failed to upload ${blobs.length} blobs to filestore`, err);
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Disables or enables blob storage operations.
|
|
54
|
+
* When disabled, getBlobSidecar returns empty arrays and sendBlobsToFilestore returns false.
|
|
55
|
+
* Useful for testing scenarios where blob storage failure needs to be simulated.
|
|
56
|
+
* @param value - True to disable blob storage, false to enable
|
|
57
|
+
*/ setDisabled(value) {
|
|
58
|
+
this.disabled = value;
|
|
59
|
+
this.log.info(`Blob storage ${value ? 'disabled' : 'enabled'}`);
|
|
60
|
+
}
|
|
61
|
+
async testSources() {
|
|
62
|
+
const { l1ConsensusHostUrls } = this.config;
|
|
63
|
+
const archiveUrl = this.archiveClient?.getBaseUrl();
|
|
64
|
+
this.log.info(`Testing configured blob sources`, {
|
|
65
|
+
l1ConsensusHostUrls,
|
|
66
|
+
archiveUrl
|
|
67
|
+
});
|
|
68
|
+
let successfulSourceCount = 0;
|
|
69
|
+
if (l1ConsensusHostUrls && l1ConsensusHostUrls.length > 0) {
|
|
70
|
+
for(let l1ConsensusHostIndex = 0; l1ConsensusHostIndex < l1ConsensusHostUrls.length; l1ConsensusHostIndex++){
|
|
71
|
+
const l1ConsensusHostUrl = l1ConsensusHostUrls[l1ConsensusHostIndex];
|
|
72
|
+
try {
|
|
73
|
+
const { url, ...options } = getBeaconNodeFetchOptions(`${l1ConsensusHostUrl}/eth/v1/beacon/headers`, this.config, l1ConsensusHostIndex);
|
|
74
|
+
const res = await this.fetch(url, options);
|
|
75
|
+
if (res.ok) {
|
|
76
|
+
this.log.info(`L1 consensus host is reachable`, {
|
|
77
|
+
l1ConsensusHostUrl
|
|
78
|
+
});
|
|
79
|
+
successfulSourceCount++;
|
|
80
|
+
} else {
|
|
81
|
+
this.log.error(`Failure reaching L1 consensus host: ${res.statusText} (${res.status})`, {
|
|
82
|
+
l1ConsensusHostUrl
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
} catch (err) {
|
|
86
|
+
this.log.error(`Error reaching L1 consensus host`, err, {
|
|
87
|
+
l1ConsensusHostUrl
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
} else {
|
|
92
|
+
this.log.warn('No L1 consensus host urls configured');
|
|
93
|
+
}
|
|
94
|
+
if (this.archiveClient) {
|
|
95
|
+
try {
|
|
96
|
+
const latest = await this.archiveClient.getLatestBlock();
|
|
97
|
+
this.log.info(`Archive client is reachable and synced to L1 block ${latest.number}`, {
|
|
98
|
+
latest,
|
|
99
|
+
archiveUrl
|
|
100
|
+
});
|
|
101
|
+
successfulSourceCount++;
|
|
102
|
+
} catch (err) {
|
|
103
|
+
this.log.error(`Error reaching archive client`, err, {
|
|
104
|
+
archiveUrl
|
|
105
|
+
});
|
|
106
|
+
}
|
|
107
|
+
} else {
|
|
108
|
+
this.log.warn('No archive client configured');
|
|
109
|
+
}
|
|
110
|
+
if (this.fileStoreClients.length > 0) {
|
|
111
|
+
for (const fileStoreClient of this.fileStoreClients){
|
|
112
|
+
try {
|
|
113
|
+
const accessible = await fileStoreClient.testConnection();
|
|
114
|
+
if (accessible) {
|
|
115
|
+
this.log.info(`FileStore is reachable`, {
|
|
116
|
+
url: fileStoreClient.getBaseUrl()
|
|
117
|
+
});
|
|
118
|
+
successfulSourceCount++;
|
|
119
|
+
} else {
|
|
120
|
+
this.log.warn(`FileStore is not accessible`, {
|
|
121
|
+
url: fileStoreClient.getBaseUrl()
|
|
122
|
+
});
|
|
123
|
+
}
|
|
124
|
+
} catch (err) {
|
|
125
|
+
this.log.error(`Error reaching filestore`, err, {
|
|
126
|
+
url: fileStoreClient.getBaseUrl()
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
if (successfulSourceCount === 0) {
|
|
132
|
+
if (this.config.blobAllowEmptySources) {
|
|
133
|
+
this.log.warn('No blob sources are reachable');
|
|
134
|
+
} else {
|
|
135
|
+
throw new Error('No blob sources are reachable');
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
async sendBlobsToFilestore(blobs) {
|
|
140
|
+
if (this.disabled) {
|
|
141
|
+
this.log.warn('Blob storage is disabled, not uploading blobs');
|
|
142
|
+
return false;
|
|
143
|
+
}
|
|
144
|
+
if (!this.fileStoreUploadClient) {
|
|
145
|
+
this.log.verbose('No filestore upload configured');
|
|
146
|
+
return false;
|
|
147
|
+
}
|
|
148
|
+
this.log.verbose(`Uploading ${blobs.length} blobs to filestore`);
|
|
149
|
+
try {
|
|
150
|
+
await this.fileStoreUploadClient.saveBlobs(blobs, true);
|
|
151
|
+
return true;
|
|
152
|
+
} catch (err) {
|
|
153
|
+
this.log.error('Failed to upload blobs to filestore', err);
|
|
154
|
+
return false;
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
/**
|
|
158
|
+
* Get the blob sidecar
|
|
159
|
+
*
|
|
160
|
+
* If requesting from the blob client, we send the blobkHash
|
|
161
|
+
* If requesting from the beacon node, we send the slot number
|
|
162
|
+
*
|
|
163
|
+
* Source ordering depends on sync state:
|
|
164
|
+
* - Historical sync: blob client → FileStore → L1 consensus → Archive
|
|
165
|
+
* - Near tip sync: blob client → FileStore → L1 consensus → FileStore (with retries) → Archive (eg blobscan)
|
|
166
|
+
*
|
|
167
|
+
* @param blockHash - The block hash
|
|
168
|
+
* @param blobHashes - The blob hashes to fetch
|
|
169
|
+
* @param indices - The indices of the blobs to get
|
|
170
|
+
* @param opts - Options including isHistoricalSync flag
|
|
171
|
+
* @returns The blobs
|
|
172
|
+
*/ async getBlobSidecar(blockHash, blobHashes, indices, opts) {
|
|
173
|
+
if (this.disabled) {
|
|
174
|
+
this.log.warn('Blob storage is disabled, returning empty blob sidecar');
|
|
175
|
+
return [];
|
|
176
|
+
}
|
|
177
|
+
const isHistoricalSync = opts?.isHistoricalSync ?? false;
|
|
178
|
+
// Accumulate blobs across sources, preserving order and handling duplicates
|
|
179
|
+
// resultBlobs[i] will contain the blob for blobHashes[i], or undefined if not yet found
|
|
180
|
+
const resultBlobs = new Array(blobHashes.length).fill(undefined);
|
|
181
|
+
// Helper to get missing blob hashes that we still need to fetch
|
|
182
|
+
const getMissingBlobHashes = ()=>blobHashes.map((bh, i)=>resultBlobs[i] === undefined ? bh : undefined).filter((bh)=>bh !== undefined);
|
|
183
|
+
// Return the result, ignoring any undefined ones
|
|
184
|
+
const getFilledBlobs = ()=>resultBlobs.filter((b)=>b !== undefined);
|
|
185
|
+
// Helper to fill in results from fetched blobs
|
|
186
|
+
const fillResults = (fetchedBlobs)=>{
|
|
187
|
+
const blobs = processFetchedBlobs(fetchedBlobs, blobHashes, this.log);
|
|
188
|
+
// Fill in any missing positions with matching blobs
|
|
189
|
+
for(let i = 0; i < blobHashes.length; i++){
|
|
190
|
+
if (resultBlobs[i] === undefined) {
|
|
191
|
+
resultBlobs[i] = blobs[i];
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
return getFilledBlobs();
|
|
195
|
+
};
|
|
196
|
+
// Fire callback when returning blobs (fire-and-forget)
|
|
197
|
+
const returnWithCallback = (blobs)=>{
|
|
198
|
+
if (blobs.length > 0 && this.opts.onBlobsFetched) {
|
|
199
|
+
void Promise.resolve().then(()=>this.opts.onBlobsFetched(blobs));
|
|
200
|
+
}
|
|
201
|
+
return blobs;
|
|
202
|
+
};
|
|
203
|
+
const { l1ConsensusHostUrls } = this.config;
|
|
204
|
+
const ctx = {
|
|
205
|
+
blockHash,
|
|
206
|
+
blobHashes: blobHashes.map(bufferToHex),
|
|
207
|
+
indices
|
|
208
|
+
};
|
|
209
|
+
// Try filestore (quick, no retries) - useful for both historical and near-tip sync
|
|
210
|
+
if (this.fileStoreClients.length > 0 && getMissingBlobHashes().length > 0) {
|
|
211
|
+
await this.tryFileStores(getMissingBlobHashes, fillResults, ctx);
|
|
212
|
+
if (getMissingBlobHashes().length === 0) {
|
|
213
|
+
return returnWithCallback(getFilledBlobs());
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
const missingAfterSink = getMissingBlobHashes();
|
|
217
|
+
if (missingAfterSink.length > 0 && l1ConsensusHostUrls && l1ConsensusHostUrls.length > 0) {
|
|
218
|
+
// The beacon api can query by slot number, so we get that first
|
|
219
|
+
const consensusCtx = {
|
|
220
|
+
l1ConsensusHostUrls,
|
|
221
|
+
...ctx
|
|
222
|
+
};
|
|
223
|
+
this.log.trace(`Attempting to get slot number for block hash`, consensusCtx);
|
|
224
|
+
const slotNumber = await this.getSlotNumber(blockHash);
|
|
225
|
+
this.log.debug(`Got slot number ${slotNumber} from consensus host for querying blobs`, consensusCtx);
|
|
226
|
+
if (slotNumber) {
|
|
227
|
+
let l1ConsensusHostUrl;
|
|
228
|
+
for(let l1ConsensusHostIndex = 0; l1ConsensusHostIndex < l1ConsensusHostUrls.length; l1ConsensusHostIndex++){
|
|
229
|
+
const missingHashes = getMissingBlobHashes();
|
|
230
|
+
if (missingHashes.length === 0) {
|
|
231
|
+
break;
|
|
232
|
+
}
|
|
233
|
+
l1ConsensusHostUrl = l1ConsensusHostUrls[l1ConsensusHostIndex];
|
|
234
|
+
this.log.trace(`Attempting to get ${missingHashes.length} blobs from consensus host`, {
|
|
235
|
+
slotNumber,
|
|
236
|
+
l1ConsensusHostUrl,
|
|
237
|
+
...ctx
|
|
238
|
+
});
|
|
239
|
+
const blobs = await this.getBlobsFromHost(l1ConsensusHostUrl, slotNumber, indices, l1ConsensusHostIndex);
|
|
240
|
+
const result = fillResults(blobs);
|
|
241
|
+
this.log.debug(`Got ${blobs.length} blobs from consensus host (total: ${result.length}/${blobHashes.length})`, {
|
|
242
|
+
slotNumber,
|
|
243
|
+
l1ConsensusHostUrl,
|
|
244
|
+
...ctx
|
|
245
|
+
});
|
|
246
|
+
if (result.length === blobHashes.length) {
|
|
247
|
+
return returnWithCallback(result);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
// For near-tip sync, retry filestores with backoff (eventual consistency)
|
|
253
|
+
// This handles the case where blobs are still being uploaded by other validators
|
|
254
|
+
if (!isHistoricalSync && this.fileStoreClients.length > 0 && getMissingBlobHashes().length > 0) {
|
|
255
|
+
try {
|
|
256
|
+
await retry(async ()=>{
|
|
257
|
+
await this.tryFileStores(getMissingBlobHashes, fillResults, ctx);
|
|
258
|
+
if (getMissingBlobHashes().length > 0) {
|
|
259
|
+
throw new Error('Still missing blobs from filestores');
|
|
260
|
+
}
|
|
261
|
+
}, 'filestore blob retrieval', makeBackoff([
|
|
262
|
+
1,
|
|
263
|
+
1,
|
|
264
|
+
2
|
|
265
|
+
]), this.log, true);
|
|
266
|
+
return returnWithCallback(getFilledBlobs());
|
|
267
|
+
} catch {
|
|
268
|
+
// Exhausted retries, continue to archive fallback
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
const missingAfterConsensus = getMissingBlobHashes();
|
|
272
|
+
if (missingAfterConsensus.length > 0 && this.archiveClient) {
|
|
273
|
+
const archiveCtx = {
|
|
274
|
+
archiveUrl: this.archiveClient.getBaseUrl(),
|
|
275
|
+
...ctx
|
|
276
|
+
};
|
|
277
|
+
this.log.trace(`Attempting to get ${missingAfterConsensus.length} blobs from archive`, archiveCtx);
|
|
278
|
+
const allBlobs = await this.archiveClient.getBlobsFromBlock(blockHash);
|
|
279
|
+
if (!allBlobs) {
|
|
280
|
+
this.log.debug('No blobs found from archive client', archiveCtx);
|
|
281
|
+
} else {
|
|
282
|
+
this.log.trace(`Got ${allBlobs.length} blobs from archive client before filtering`, archiveCtx);
|
|
283
|
+
const result = fillResults(allBlobs);
|
|
284
|
+
this.log.debug(`Got ${allBlobs.length} blobs from archive client (total: ${result.length}/${blobHashes.length})`, archiveCtx);
|
|
285
|
+
if (result.length === blobHashes.length) {
|
|
286
|
+
return returnWithCallback(result);
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
const result = getFilledBlobs();
|
|
291
|
+
if (result.length < blobHashes.length) {
|
|
292
|
+
this.log.warn(`Failed to fetch all blobs for ${blockHash} from all blob sources (got ${result.length}/${blobHashes.length})`, {
|
|
293
|
+
l1ConsensusHostUrls,
|
|
294
|
+
archiveUrl: this.archiveClient?.getBaseUrl(),
|
|
295
|
+
fileStoreUrls: this.fileStoreClients.map((c)=>c.getBaseUrl())
|
|
296
|
+
});
|
|
297
|
+
}
|
|
298
|
+
return returnWithCallback(result);
|
|
299
|
+
}
|
|
300
|
+
/**
|
|
301
|
+
* Try all filestores once (shuffled for load distribution).
|
|
302
|
+
* @param getMissingBlobHashes - Function to get remaining blob hashes to fetch
|
|
303
|
+
* @param fillResults - Callback to fill in results
|
|
304
|
+
* @param ctx - Logging context
|
|
305
|
+
*/ async tryFileStores(getMissingBlobHashes, fillResults, ctx) {
|
|
306
|
+
// Shuffle clients for load distribution
|
|
307
|
+
const shuffledClients = [
|
|
308
|
+
...this.fileStoreClients
|
|
309
|
+
];
|
|
310
|
+
shuffle(shuffledClients);
|
|
311
|
+
for (const client of shuffledClients){
|
|
312
|
+
const blobHashes = getMissingBlobHashes();
|
|
313
|
+
if (blobHashes.length === 0) {
|
|
314
|
+
return; // All blobs found, no need to try more filestores
|
|
315
|
+
}
|
|
316
|
+
try {
|
|
317
|
+
const blobHashStrings = blobHashes.map((h)=>`0x${h.toString('hex')}`);
|
|
318
|
+
this.log.trace(`Attempting to get ${blobHashStrings.length} blobs from filestore`, {
|
|
319
|
+
url: client.getBaseUrl(),
|
|
320
|
+
...ctx
|
|
321
|
+
});
|
|
322
|
+
const blobs = await client.getBlobsByHashes(blobHashStrings);
|
|
323
|
+
if (blobs.length > 0) {
|
|
324
|
+
const result = fillResults(blobs);
|
|
325
|
+
this.log.debug(`Got ${blobs.length} blobs from filestore (total: ${result.length}/${ctx.blobHashes.length})`, {
|
|
326
|
+
url: client.getBaseUrl(),
|
|
327
|
+
...ctx
|
|
328
|
+
});
|
|
329
|
+
}
|
|
330
|
+
} catch (err) {
|
|
331
|
+
this.log.warn(`Failed to fetch from filestore: ${err}`, {
|
|
332
|
+
url: client.getBaseUrl()
|
|
333
|
+
});
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
async getBlobSidecarFrom(hostUrl, blockHashOrSlot, blobHashes = [], indices = [], l1ConsensusHostIndex) {
|
|
338
|
+
const blobs = await this.getBlobsFromHost(hostUrl, blockHashOrSlot, indices, l1ConsensusHostIndex);
|
|
339
|
+
return processFetchedBlobs(blobs, blobHashes, this.log).filter((b)=>b !== undefined);
|
|
340
|
+
}
|
|
341
|
+
async getBlobsFromHost(hostUrl, blockHashOrSlot, indices = [], l1ConsensusHostIndex) {
|
|
342
|
+
try {
|
|
343
|
+
let res = await this.fetchBlobSidecars(hostUrl, blockHashOrSlot, indices, l1ConsensusHostIndex);
|
|
344
|
+
if (res.ok) {
|
|
345
|
+
return parseBlobJsonsFromResponse(await res.json(), this.log);
|
|
346
|
+
}
|
|
347
|
+
if (res.status === 404 && typeof blockHashOrSlot === 'number') {
|
|
348
|
+
const latestSlot = await this.getLatestSlotNumber(hostUrl, l1ConsensusHostIndex);
|
|
349
|
+
this.log.debug(`Requested L1 slot ${blockHashOrSlot} not found, trying out slots up to ${latestSlot}`, {
|
|
350
|
+
hostUrl,
|
|
351
|
+
status: res.status,
|
|
352
|
+
statusText: res.statusText
|
|
353
|
+
});
|
|
354
|
+
let maxRetries = 10;
|
|
355
|
+
let currentSlot = blockHashOrSlot + 1;
|
|
356
|
+
while(res.status === 404 && maxRetries > 0 && latestSlot !== undefined && currentSlot <= latestSlot){
|
|
357
|
+
this.log.debug(`Trying slot ${currentSlot} for blob indices ${indices.join(', ')}`);
|
|
358
|
+
res = await this.fetchBlobSidecars(hostUrl, currentSlot, indices, l1ConsensusHostIndex);
|
|
359
|
+
if (res.ok) {
|
|
360
|
+
return parseBlobJsonsFromResponse(await res.json(), this.log);
|
|
361
|
+
}
|
|
362
|
+
currentSlot++;
|
|
363
|
+
maxRetries--;
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
this.log.warn(`Unable to get blob sidecar for ${blockHashOrSlot}: ${res.statusText} (${res.status})`, {
|
|
367
|
+
status: res.status,
|
|
368
|
+
statusText: res.statusText,
|
|
369
|
+
body: await res.text().catch(()=>'Failed to read response body')
|
|
370
|
+
});
|
|
371
|
+
return [];
|
|
372
|
+
} catch (error) {
|
|
373
|
+
this.log.warn(`Error getting blob sidecar from ${hostUrl}: ${error.message ?? error}`);
|
|
374
|
+
return [];
|
|
375
|
+
}
|
|
376
|
+
}
|
|
377
|
+
fetchBlobSidecars(hostUrl, blockHashOrSlot, indices, l1ConsensusHostIndex) {
|
|
378
|
+
let baseUrl = `${hostUrl}/eth/v1/beacon/blob_sidecars/${blockHashOrSlot}`;
|
|
379
|
+
if (indices.length > 0) {
|
|
380
|
+
baseUrl += `?indices=${indices.join(',')}`;
|
|
381
|
+
}
|
|
382
|
+
const { url, ...options } = getBeaconNodeFetchOptions(baseUrl, this.config, l1ConsensusHostIndex);
|
|
383
|
+
this.log.debug(`Fetching blob sidecar for ${blockHashOrSlot}`, {
|
|
384
|
+
url,
|
|
385
|
+
...options
|
|
386
|
+
});
|
|
387
|
+
return this.fetch(url, options);
|
|
388
|
+
}
|
|
389
|
+
async getLatestSlotNumber(hostUrl, l1ConsensusHostIndex) {
|
|
390
|
+
try {
|
|
391
|
+
const baseUrl = `${hostUrl}/eth/v1/beacon/headers/head`;
|
|
392
|
+
const { url, ...options } = getBeaconNodeFetchOptions(baseUrl, this.config, l1ConsensusHostIndex);
|
|
393
|
+
this.log.debug(`Fetching latest slot number`, {
|
|
394
|
+
url,
|
|
395
|
+
...options
|
|
396
|
+
});
|
|
397
|
+
const res = await this.fetch(url, options);
|
|
398
|
+
if (res.ok) {
|
|
399
|
+
const body = await res.json();
|
|
400
|
+
const slot = parseInt(body.data.header.message.slot);
|
|
401
|
+
if (Number.isNaN(slot)) {
|
|
402
|
+
this.log.error(`Failed to parse slot number from response from ${hostUrl}`, {
|
|
403
|
+
body
|
|
404
|
+
});
|
|
405
|
+
return undefined;
|
|
406
|
+
}
|
|
407
|
+
return slot;
|
|
408
|
+
}
|
|
409
|
+
} catch (err) {
|
|
410
|
+
this.log.error(`Error getting latest slot number from ${hostUrl}`, err);
|
|
411
|
+
return undefined;
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
/**
|
|
415
|
+
* Get the slot number from the consensus host
|
|
416
|
+
* As of eip-4788, the parentBeaconBlockRoot is included in the execution layer.
|
|
417
|
+
* This allows us to query the consensus layer for the slot number of the parent block, which we will then use
|
|
418
|
+
* to request blobs from the consensus layer.
|
|
419
|
+
*
|
|
420
|
+
* If this returns undefined, it means that we are not connected to a real consensus host, and we should
|
|
421
|
+
* query blobs with the blockHash.
|
|
422
|
+
*
|
|
423
|
+
* If this returns a number, then we should query blobs with the slot number
|
|
424
|
+
*
|
|
425
|
+
* @param blockHash - The block hash
|
|
426
|
+
* @returns The slot number
|
|
427
|
+
*/ async getSlotNumber(blockHash) {
|
|
428
|
+
const { l1ConsensusHostUrls, l1RpcUrls } = this.config;
|
|
429
|
+
if (!l1ConsensusHostUrls || l1ConsensusHostUrls.length === 0) {
|
|
430
|
+
this.log.debug('No consensus host url configured');
|
|
431
|
+
return undefined;
|
|
432
|
+
}
|
|
433
|
+
if (!l1RpcUrls || l1RpcUrls.length === 0) {
|
|
434
|
+
this.log.debug('No execution host url configured');
|
|
435
|
+
return undefined;
|
|
436
|
+
}
|
|
437
|
+
// Ping execution node to get the parentBeaconBlockRoot for this block
|
|
438
|
+
let parentBeaconBlockRoot;
|
|
439
|
+
const client = createPublicClient({
|
|
440
|
+
transport: fallback(l1RpcUrls.map((url)=>http(url, {
|
|
441
|
+
batch: false
|
|
442
|
+
})))
|
|
443
|
+
});
|
|
444
|
+
try {
|
|
445
|
+
const res = await client.request({
|
|
446
|
+
method: 'eth_getBlockByHash',
|
|
447
|
+
params: [
|
|
448
|
+
blockHash,
|
|
449
|
+
/*tx flag*/ false
|
|
450
|
+
]
|
|
451
|
+
});
|
|
452
|
+
if (res.parentBeaconBlockRoot) {
|
|
453
|
+
parentBeaconBlockRoot = res.parentBeaconBlockRoot;
|
|
454
|
+
}
|
|
455
|
+
} catch (err) {
|
|
456
|
+
this.log.error(`Error getting parent beacon block root`, err);
|
|
457
|
+
}
|
|
458
|
+
if (!parentBeaconBlockRoot) {
|
|
459
|
+
this.log.error(`No parent beacon block root found for block ${blockHash}`);
|
|
460
|
+
return undefined;
|
|
461
|
+
}
|
|
462
|
+
// Query beacon chain to get the slot number for that block root
|
|
463
|
+
let l1ConsensusHostUrl;
|
|
464
|
+
for(let l1ConsensusHostIndex = 0; l1ConsensusHostIndex < l1ConsensusHostUrls.length; l1ConsensusHostIndex++){
|
|
465
|
+
l1ConsensusHostUrl = l1ConsensusHostUrls[l1ConsensusHostIndex];
|
|
466
|
+
try {
|
|
467
|
+
const { url, ...options } = getBeaconNodeFetchOptions(`${l1ConsensusHostUrl}/eth/v1/beacon/headers/${parentBeaconBlockRoot}`, this.config, l1ConsensusHostIndex);
|
|
468
|
+
const res = await this.fetch(url, options);
|
|
469
|
+
if (res.ok) {
|
|
470
|
+
const body = await res.json();
|
|
471
|
+
// Add one to get the slot number of the original block hash
|
|
472
|
+
return Number(body.data.header.message.slot) + 1;
|
|
473
|
+
}
|
|
474
|
+
} catch (err) {
|
|
475
|
+
this.log.error(`Error getting slot number`, err);
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
return undefined;
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
function parseBlobJsonsFromResponse(response, logger) {
|
|
482
|
+
try {
|
|
483
|
+
const blobs = response.data.map(parseBlobJson);
|
|
484
|
+
return blobs;
|
|
485
|
+
} catch (err) {
|
|
486
|
+
logger.error(`Error parsing blob json from response`, err);
|
|
487
|
+
return [];
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
// Blobs will be in this form when requested from the blob client, or from the beacon chain via `getBlobSidecars`:
|
|
491
|
+
// https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/getBlobSidecars
|
|
492
|
+
// Here we attempt to parse the response data to Buffer, and check the lengths (via Blob's constructor), to avoid
|
|
493
|
+
// throwing an error down the line when calling BlobWithIndex.fromJson().
|
|
494
|
+
function parseBlobJson(data) {
|
|
495
|
+
const blobBuffer = Buffer.from(data.blob.slice(2), 'hex');
|
|
496
|
+
const commitmentBuffer = Buffer.from(data.kzg_commitment.slice(2), 'hex');
|
|
497
|
+
const blob = new Blob(blobBuffer, commitmentBuffer);
|
|
498
|
+
return blob.toJson(parseInt(data.index));
|
|
499
|
+
}
|
|
500
|
+
// Returns an array that maps each blob hash to the corresponding blob with index, or undefined if the blob is not found
|
|
501
|
+
// or the data does not match the commitment.
|
|
502
|
+
function processFetchedBlobs(blobs, blobHashes, logger) {
|
|
503
|
+
const requestedBlobHashes = new Set(blobHashes.map(bufferToHex));
|
|
504
|
+
const hashToBlob = new Map();
|
|
505
|
+
for (const blob of blobs){
|
|
506
|
+
const hashHex = bufferToHex(computeEthVersionedBlobHash(hexToBuffer(blob.kzg_commitment)));
|
|
507
|
+
if (!requestedBlobHashes.has(hashHex) || hashToBlob.has(hashHex)) {
|
|
508
|
+
continue;
|
|
509
|
+
}
|
|
510
|
+
try {
|
|
511
|
+
const blobWithIndex = BlobWithIndex.fromJson(blob);
|
|
512
|
+
hashToBlob.set(hashHex, blobWithIndex);
|
|
513
|
+
} catch (err) {
|
|
514
|
+
// If the above throws, it's likely that the blob commitment does not match the hash of the blob data.
|
|
515
|
+
logger.error(`Error converting blob from json`, err);
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
return blobHashes.map((h)=>hashToBlob.get(bufferToHex(h)));
|
|
519
|
+
}
|
|
520
|
+
function getBeaconNodeFetchOptions(url, config, l1ConsensusHostIndex) {
|
|
521
|
+
const { l1ConsensusHostApiKeys, l1ConsensusHostApiKeyHeaders } = config;
|
|
522
|
+
const l1ConsensusHostApiKey = l1ConsensusHostIndex !== undefined && l1ConsensusHostApiKeys && l1ConsensusHostApiKeys[l1ConsensusHostIndex];
|
|
523
|
+
const l1ConsensusHostApiKeyHeader = l1ConsensusHostIndex !== undefined && l1ConsensusHostApiKeyHeaders && l1ConsensusHostApiKeyHeaders[l1ConsensusHostIndex];
|
|
524
|
+
let formattedUrl = url;
|
|
525
|
+
if (l1ConsensusHostApiKey && l1ConsensusHostApiKey.getValue() !== '' && !l1ConsensusHostApiKeyHeader) {
|
|
526
|
+
formattedUrl += `${formattedUrl.includes('?') ? '&' : '?'}key=${l1ConsensusHostApiKey.getValue()}`;
|
|
527
|
+
}
|
|
528
|
+
return {
|
|
529
|
+
url: formattedUrl,
|
|
530
|
+
...l1ConsensusHostApiKey && l1ConsensusHostApiKeyHeader && {
|
|
531
|
+
headers: {
|
|
532
|
+
[l1ConsensusHostApiKeyHeader]: l1ConsensusHostApiKey.getValue()
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
};
|
|
536
|
+
}
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
export * from './http.js';
|
|
2
|
+
export * from './local.js';
|
|
3
|
+
export * from './interface.js';
|
|
4
|
+
export * from './factory.js';
|
|
5
|
+
export * from './config.js';
|
|
6
|
+
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiaW5kZXguZC50cyIsInNvdXJjZVJvb3QiOiIiLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy9jbGllbnQvaW5kZXgudHMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6IkFBQUEsY0FBYyxXQUFXLENBQUM7QUFDMUIsY0FBYyxZQUFZLENBQUM7QUFDM0IsY0FBYyxnQkFBZ0IsQ0FBQztBQUMvQixjQUFjLGNBQWMsQ0FBQztBQUM3QixjQUFjLGFBQWEsQ0FBQyJ9
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/client/index.ts"],"names":[],"mappings":"AAAA,cAAc,WAAW,CAAC;AAC1B,cAAc,YAAY,CAAC;AAC3B,cAAc,gBAAgB,CAAC;AAC/B,cAAc,cAAc,CAAC;AAC7B,cAAc,aAAa,CAAC"}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { Blob } from '@aztec/blob-lib';
|
|
2
|
+
import type { BlobWithIndex } from '../types/blob_with_index.js';
|
|
3
|
+
/**
|
|
4
|
+
* Options for getBlobSidecar method.
|
|
5
|
+
*/
|
|
6
|
+
export interface GetBlobSidecarOptions {
|
|
7
|
+
/**
|
|
8
|
+
* True if the archiver is catching up (historical sync), false if near tip.
|
|
9
|
+
* This affects source ordering:
|
|
10
|
+
* - Historical: FileStore first (data should exist), then L1 consensus, then archive (eg. blobscan)
|
|
11
|
+
* - Near tip: FileStore first with no retries (data should exist), L1 consensus second (freshest data), then FileStore with retries, then archive (eg. blobscan)
|
|
12
|
+
*/
|
|
13
|
+
isHistoricalSync?: boolean;
|
|
14
|
+
}
|
|
15
|
+
export interface BlobClientInterface {
|
|
16
|
+
/** Sends the given blobs to the filestore, to be indexed by blob hash. */
|
|
17
|
+
sendBlobsToFilestore(blobs: Blob[]): Promise<boolean>;
|
|
18
|
+
/** Fetches the given blob sidecars by block, hash, and indices. */
|
|
19
|
+
getBlobSidecar(blockId: string, blobHashes?: Buffer[], indices?: number[], opts?: GetBlobSidecarOptions): Promise<BlobWithIndex[]>;
|
|
20
|
+
/** Tests all configured blob sources and logs whether they are reachable or not. */
|
|
21
|
+
testSources(): Promise<void>;
|
|
22
|
+
}
|
|
23
|
+
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiaW50ZXJmYWNlLmQudHMiLCJzb3VyY2VSb290IjoiIiwic291cmNlcyI6WyIuLi8uLi9zcmMvY2xpZW50L2ludGVyZmFjZS50cyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiQUFBQSxPQUFPLEtBQUssRUFBRSxJQUFJLEVBQUUsTUFBTSxpQkFBaUIsQ0FBQztBQUU1QyxPQUFPLEtBQUssRUFBRSxhQUFhLEVBQUUsTUFBTSw2QkFBNkIsQ0FBQztBQUVqRTs7R0FFRztBQUNILE1BQU0sV0FBVyxxQkFBcUI7SUFDcEM7Ozs7O09BS0c7SUFDSCxnQkFBZ0IsQ0FBQyxFQUFFLE9BQU8sQ0FBQztDQUM1QjtBQUVELE1BQU0sV0FBVyxtQkFBbUI7SUFDbEMsMEVBQTBFO0lBQzFFLG9CQUFvQixDQUFDLEtBQUssRUFBRSxJQUFJLEVBQUUsR0FBRyxPQUFPLENBQUMsT0FBTyxDQUFDLENBQUM7SUFDdEQsbUVBQW1FO0lBQ25FLGNBQWMsQ0FDWixPQUFPLEVBQUUsTUFBTSxFQUNmLFVBQVUsQ0FBQyxFQUFFLE1BQU0sRUFBRSxFQUNyQixPQUFPLENBQUMsRUFBRSxNQUFNLEVBQUUsRUFDbEIsSUFBSSxDQUFDLEVBQUUscUJBQXFCLEdBQzNCLE9BQU8sQ0FBQyxhQUFhLEVBQUUsQ0FBQyxDQUFDO0lBQzVCLG9GQUFvRjtJQUNwRixXQUFXLElBQUksT0FBTyxDQUFDLElBQUksQ0FBQyxDQUFDO0NBQzlCIn0=
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"interface.d.ts","sourceRoot":"","sources":["../../src/client/interface.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,IAAI,EAAE,MAAM,iBAAiB,CAAC;AAE5C,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,6BAA6B,CAAC;AAEjE;;GAEG;AACH,MAAM,WAAW,qBAAqB;IACpC;;;;;OAKG;IACH,gBAAgB,CAAC,EAAE,OAAO,CAAC;CAC5B;AAED,MAAM,WAAW,mBAAmB;IAClC,0EAA0E;IAC1E,oBAAoB,CAAC,KAAK,EAAE,IAAI,EAAE,GAAG,OAAO,CAAC,OAAO,CAAC,CAAC;IACtD,mEAAmE;IACnE,cAAc,CACZ,OAAO,EAAE,MAAM,EACf,UAAU,CAAC,EAAE,MAAM,EAAE,EACrB,OAAO,CAAC,EAAE,MAAM,EAAE,EAClB,IAAI,CAAC,EAAE,qBAAqB,GAC3B,OAAO,CAAC,aAAa,EAAE,CAAC,CAAC;IAC5B,oFAAoF;IACpF,WAAW,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;CAC9B"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export { };
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import type { Blob } from '@aztec/blob-lib';
|
|
2
|
+
import type { BlobStore } from '../blobstore/index.js';
|
|
3
|
+
import { BlobWithIndex } from '../types/blob_with_index.js';
|
|
4
|
+
import type { BlobClientInterface, GetBlobSidecarOptions } from './interface.js';
|
|
5
|
+
export declare class LocalBlobClient implements BlobClientInterface {
|
|
6
|
+
private readonly blobStore;
|
|
7
|
+
constructor(blobStore: BlobStore);
|
|
8
|
+
testSources(): Promise<void>;
|
|
9
|
+
sendBlobsToFilestore(blobs: Blob[]): Promise<boolean>;
|
|
10
|
+
getBlobSidecar(_blockId: string, blobHashes: Buffer[], _indices?: number[], _opts?: GetBlobSidecarOptions): Promise<BlobWithIndex[]>;
|
|
11
|
+
}
|
|
12
|
+
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibG9jYWwuZC50cyIsInNvdXJjZVJvb3QiOiIiLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy9jbGllbnQvbG9jYWwudHMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6IkFBQUEsT0FBTyxLQUFLLEVBQUUsSUFBSSxFQUFFLE1BQU0saUJBQWlCLENBQUM7QUFFNUMsT0FBTyxLQUFLLEVBQUUsU0FBUyxFQUFFLE1BQU0sdUJBQXVCLENBQUM7QUFDdkQsT0FBTyxFQUFFLGFBQWEsRUFBRSxNQUFNLDZCQUE2QixDQUFDO0FBQzVELE9BQU8sS0FBSyxFQUFFLG1CQUFtQixFQUFFLHFCQUFxQixFQUFFLE1BQU0sZ0JBQWdCLENBQUM7QUFFakYscUJBQWEsZUFBZ0IsWUFBVyxtQkFBbUI7SUFDekQsT0FBTyxDQUFDLFFBQVEsQ0FBQyxTQUFTLENBQVk7SUFFdEMsWUFBWSxTQUFTLEVBQUUsU0FBUyxFQUUvQjtJQUVNLFdBQVcsSUFBSSxPQUFPLENBQUMsSUFBSSxDQUFDLENBRWxDO0lBRVksb0JBQW9CLENBQUMsS0FBSyxFQUFFLElBQUksRUFBRSxHQUFHLE9BQU8sQ0FBQyxPQUFPLENBQUMsQ0FJakU7SUFFTSxjQUFjLENBQ25CLFFBQVEsRUFBRSxNQUFNLEVBQ2hCLFVBQVUsRUFBRSxNQUFNLEVBQUUsRUFDcEIsUUFBUSxDQUFDLEVBQUUsTUFBTSxFQUFFLEVBQ25CLEtBQUssQ0FBQyxFQUFFLHFCQUFxQixHQUM1QixPQUFPLENBQUMsYUFBYSxFQUFFLENBQUMsQ0FFMUI7Q0FDRiJ9
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"local.d.ts","sourceRoot":"","sources":["../../src/client/local.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,IAAI,EAAE,MAAM,iBAAiB,CAAC;AAE5C,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,uBAAuB,CAAC;AACvD,OAAO,EAAE,aAAa,EAAE,MAAM,6BAA6B,CAAC;AAC5D,OAAO,KAAK,EAAE,mBAAmB,EAAE,qBAAqB,EAAE,MAAM,gBAAgB,CAAC;AAEjF,qBAAa,eAAgB,YAAW,mBAAmB;IACzD,OAAO,CAAC,QAAQ,CAAC,SAAS,CAAY;IAEtC,YAAY,SAAS,EAAE,SAAS,EAE/B;IAEM,WAAW,IAAI,OAAO,CAAC,IAAI,CAAC,CAElC;IAEY,oBAAoB,CAAC,KAAK,EAAE,IAAI,EAAE,GAAG,OAAO,CAAC,OAAO,CAAC,CAIjE;IAEM,cAAc,CACnB,QAAQ,EAAE,MAAM,EAChB,UAAU,EAAE,MAAM,EAAE,EACpB,QAAQ,CAAC,EAAE,MAAM,EAAE,EACnB,KAAK,CAAC,EAAE,qBAAqB,GAC5B,OAAO,CAAC,aAAa,EAAE,CAAC,CAE1B;CACF"}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import { BlobWithIndex } from '../types/blob_with_index.js';
|
|
2
|
+
export class LocalBlobClient {
|
|
3
|
+
blobStore;
|
|
4
|
+
constructor(blobStore){
|
|
5
|
+
this.blobStore = blobStore;
|
|
6
|
+
}
|
|
7
|
+
testSources() {
|
|
8
|
+
return Promise.resolve();
|
|
9
|
+
}
|
|
10
|
+
async sendBlobsToFilestore(blobs) {
|
|
11
|
+
const blobsWithIndex = blobs.map((blob, index)=>new BlobWithIndex(blob, index));
|
|
12
|
+
await this.blobStore.addBlobs(blobsWithIndex);
|
|
13
|
+
return true;
|
|
14
|
+
}
|
|
15
|
+
getBlobSidecar(_blockId, blobHashes, _indices, _opts) {
|
|
16
|
+
return this.blobStore.getBlobsByHashes(blobHashes);
|
|
17
|
+
}
|
|
18
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { BlobClientInterface } from './interface.js';
|
|
2
|
+
/**
|
|
3
|
+
* Shared test suite for blob clients
|
|
4
|
+
* @param createClient - Function that creates a client instance for testing
|
|
5
|
+
* @param cleanup - Optional cleanup function to run after each test
|
|
6
|
+
*/
|
|
7
|
+
export declare function runBlobClientTests(createClient: () => Promise<{
|
|
8
|
+
client: BlobClientInterface;
|
|
9
|
+
cleanup: () => Promise<void>;
|
|
10
|
+
}>): void;
|
|
11
|
+
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoidGVzdHMuZC50cyIsInNvdXJjZVJvb3QiOiIiLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy9jbGllbnQvdGVzdHMudHMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6IkFBSUEsT0FBTyxLQUFLLEVBQUUsbUJBQW1CLEVBQUUsTUFBTSxnQkFBZ0IsQ0FBQztBQUUxRDs7OztHQUlHO0FBQ0gsd0JBQWdCLGtCQUFrQixDQUNoQyxZQUFZLEVBQUUsTUFBTSxPQUFPLENBQUM7SUFBRSxNQUFNLEVBQUUsbUJBQW1CLENBQUM7SUFBQyxPQUFPLEVBQUUsTUFBTSxPQUFPLENBQUMsSUFBSSxDQUFDLENBQUE7Q0FBRSxDQUFDLFFBaUUzRiJ9
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"tests.d.ts","sourceRoot":"","sources":["../../src/client/tests.ts"],"names":[],"mappings":"AAIA,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,gBAAgB,CAAC;AAE1D;;;;GAIG;AACH,wBAAgB,kBAAkB,CAChC,YAAY,EAAE,MAAM,OAAO,CAAC;IAAE,MAAM,EAAE,mBAAmB,CAAC;IAAC,OAAO,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,CAAA;CAAE,CAAC,QAiE3F"}
|