querysub 0.355.0 → 0.357.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/.cursorrules +8 -0
  2. package/bin/movelogs.js +4 -0
  3. package/package.json +12 -6
  4. package/scripts/postinstall.js +23 -0
  5. package/src/-a-archives/archiveCache.ts +10 -12
  6. package/src/-a-archives/archives.ts +29 -0
  7. package/src/-a-archives/archivesBackBlaze.ts +60 -12
  8. package/src/-a-archives/archivesDisk.ts +27 -8
  9. package/src/-a-archives/archivesLimitedCache.ts +21 -0
  10. package/src/-a-archives/archivesMemoryCache.ts +350 -0
  11. package/src/-a-archives/archivesPrivateFileSystem.ts +22 -0
  12. package/src/-g-core-values/NodeCapabilities.ts +3 -0
  13. package/src/0-path-value-core/auditLogs.ts +5 -1
  14. package/src/0-path-value-core/pathValueCore.ts +7 -7
  15. package/src/4-dom/qreact.tsx +1 -0
  16. package/src/4-querysub/Querysub.ts +1 -5
  17. package/src/config.ts +5 -0
  18. package/src/diagnostics/MachineThreadInfo.tsx +235 -0
  19. package/src/diagnostics/NodeViewer.tsx +3 -2
  20. package/src/diagnostics/logs/FastArchiveAppendable.ts +79 -42
  21. package/src/diagnostics/logs/FastArchiveController.ts +102 -63
  22. package/src/diagnostics/logs/FastArchiveViewer.tsx +36 -8
  23. package/src/diagnostics/logs/IndexedLogs/BufferIndex.ts +461 -0
  24. package/src/diagnostics/logs/IndexedLogs/BufferIndexCPP.cpp +327 -0
  25. package/src/diagnostics/logs/IndexedLogs/BufferIndexCPP.d.ts +18 -0
  26. package/src/diagnostics/logs/IndexedLogs/BufferIndexCPP.js +1 -0
  27. package/src/diagnostics/logs/IndexedLogs/BufferIndexHelpers.ts +140 -0
  28. package/src/diagnostics/logs/IndexedLogs/BufferIndexLogsOptimizationConstants.ts +22 -0
  29. package/src/diagnostics/logs/IndexedLogs/BufferIndexWAT.wat +1145 -0
  30. package/src/diagnostics/logs/IndexedLogs/BufferIndexWAT.wat.d.ts +178 -0
  31. package/src/diagnostics/logs/IndexedLogs/BufferListStreamer.ts +206 -0
  32. package/src/diagnostics/logs/IndexedLogs/BufferUnitIndex.ts +719 -0
  33. package/src/diagnostics/logs/IndexedLogs/BufferUnitSet.ts +146 -0
  34. package/src/diagnostics/logs/IndexedLogs/FilePathSelector.tsx +408 -0
  35. package/src/diagnostics/logs/IndexedLogs/FindProgressTracker.ts +45 -0
  36. package/src/diagnostics/logs/IndexedLogs/IndexedLogs.ts +598 -0
  37. package/src/diagnostics/logs/IndexedLogs/LogStreamer.ts +47 -0
  38. package/src/diagnostics/logs/IndexedLogs/LogViewer3.tsx +702 -0
  39. package/src/diagnostics/logs/IndexedLogs/TimeFileTree.ts +236 -0
  40. package/src/diagnostics/logs/IndexedLogs/binding.gyp +23 -0
  41. package/src/diagnostics/logs/IndexedLogs/moveIndexLogsToPublic.ts +221 -0
  42. package/src/diagnostics/logs/IndexedLogs/moveLogsEntry.ts +10 -0
  43. package/src/diagnostics/logs/LogViewer2.tsx +120 -55
  44. package/src/diagnostics/logs/TimeRangeSelector.tsx +5 -2
  45. package/src/diagnostics/logs/diskLogger.ts +32 -48
  46. package/src/diagnostics/logs/errorNotifications/ErrorNotificationController.ts +3 -2
  47. package/src/diagnostics/logs/errorNotifications/errorDigests.tsx +1 -0
  48. package/src/diagnostics/logs/lifeCycleAnalysis/LifeCyclePages.tsx +150 -0
  49. package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycles.tsx +133 -0
  50. package/src/diagnostics/logs/lifeCycleAnalysis/test.ts +180 -0
  51. package/src/diagnostics/logs/lifeCycleAnalysis/test.wat +106 -0
  52. package/src/diagnostics/logs/lifeCycleAnalysis/test.wat.d.ts +2 -0
  53. package/src/diagnostics/logs/lifeCycleAnalysis/testHoist.ts +5 -0
  54. package/src/diagnostics/logs/logViewerExtractField.ts +2 -3
  55. package/src/diagnostics/managementPages.tsx +11 -1
  56. package/src/diagnostics/trackResources.ts +1 -1
  57. package/src/misc/lz4_wasm_nodejs.d.ts +34 -0
  58. package/src/misc/lz4_wasm_nodejs.js +178 -0
  59. package/src/misc/lz4_wasm_nodejs_bg.js +94 -0
  60. package/src/misc/lz4_wasm_nodejs_bg.wasm +0 -0
  61. package/src/misc/lz4_wasm_nodejs_bg.wasm.d.ts +15 -0
  62. package/src/storage/CompressedStream.ts +13 -0
  63. package/src/storage/LZ4.ts +32 -0
  64. package/src/storage/ZSTD.ts +10 -0
  65. package/src/wat/watCompiler.ts +1716 -0
  66. package/src/wat/watGrammar.pegjs +93 -0
  67. package/src/wat/watHandler.ts +179 -0
  68. package/src/wat/watInstructions.txt +707 -0
  69. package/src/zip.ts +3 -89
  70. package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +0 -125
@@ -0,0 +1,461 @@
1
+ // NOTE: I think even if the index is larger than the file size itself, it's still worth it. Because we'll be able to do a binary search. At a system level, it probably isn't worth it, especially at a network level. However, we don't pay the network cost on our main thread, so for our application I think it is worth it (and it's too much effort to special case poor compression cases that will never happen anyways...)
2
+
3
+ import { MaybePromise } from "socket-function/src/types";
4
+ import { LZ4 } from "../../../storage/LZ4";
5
+ import { ZSTD } from "../../../storage/ZSTD";
6
+ import { binarySearchIndex, sort } from "socket-function/src/misc";
7
+ import { CompressedStream } from "../../../storage/CompressedStream";
8
+ import { cacheArgsEqual, cacheLimited, cacheWeak, lazy } from "socket-function/src/caching";
9
+ import { measureBlock, measureFnc, measureWrap } from "socket-function/src/profiling/measure";
10
+ import { formatNumber, formatTime } from "socket-function/src/formatting/format";
11
+ import { magenta, yellow } from "socket-function/src/formatting/logColors";
12
+ import { Unit, getAllUnits, Reader, createMatchesPattern, createOffsetReader, splitOnWildcard, SearchParams } from "./BufferIndexHelpers";
13
+ import { UnitSet } from "./BufferUnitSet";
14
+ import { BufferUnitIndex } from "./BufferUnitIndex";
15
+ import { BufferListStreamer } from "./BufferListStreamer";
16
+ import { STREAMING_BLOCK_THRESHOLD } from "./BufferIndexLogsOptimizationConstants";
17
+
18
+
19
+
20
+
21
+ // Magic numbers for different streamer types
22
+ const DATA_STREAMER_MAGIC = 0xB4B8F0F1;
23
+ const INDEX_STREAMER_MAGIC = 0xB4B8F0F2;
24
+ const BLOCK_STREAMER_MAGIC = 0xB4B8F0F3;
25
+
26
+
27
+ // Create instances for the different streamer types
28
+ const dataStreamerType = new BufferListStreamer(DATA_STREAMER_MAGIC, true); // Include last block for data
29
+ const indexStreamerType = new BufferListStreamer(INDEX_STREAMER_MAGIC, true); // Include last block for index
30
+ const blockStreamerType = new BufferListStreamer(BLOCK_STREAMER_MAGIC, true); // Include last block for inner blocks
31
+
32
+
33
+
34
+ const STREAM_TYPE = 0xC9;
35
+ const BULK_TYPE = 0xD5;
36
+ // Encode a type byte followed by an int32 header size and arbitrary header content.
37
+ // The caller's data follows immediately after.
38
+ // Format: type (1) | headerSize as int32 (4) | headerContent (headerSize bytes)
39
+ function encodeTypeHeader(type: number, headerContent: Buffer): Buffer {
40
+ let header = Buffer.alloc(1 + 4 + headerContent.length);
41
+ header.writeUInt8(type, 0);
42
+ header.writeInt32LE(headerContent.length, 1);
43
+ headerContent.copy(header, 5);
44
+ return header;
45
+ }
46
+ // Strips the type header written by encodeTypeHeader and returns the type,
47
+ // the raw header content, and the remaining data after the header.
48
+ // Returns undefined if the header is corrupted/incomplete.
49
+ function decodeTypeHeader(data: Buffer): { type: number, headerContent: Buffer, data: Buffer } | undefined {
50
+ if (data.length < 5) {
51
+ return undefined;
52
+ }
53
+ let type = data[0];
54
+ let headerSize = data.readInt32LE(1);
55
+ if (headerSize < 0 || 5 + headerSize > data.length) {
56
+ return undefined;
57
+ }
58
+ let headerContent = data.slice(5, 5 + headerSize);
59
+ return { type, headerContent, data: data.slice(5 + headerSize) };
60
+ }
61
+
62
+
63
+
64
+ export class BufferIndex {
65
+ /*
66
+ Data structure is always:
67
+ compressed(Buffer[])[]
68
+ However sometimes we used BufferListStreamer, and sometimes BufferList
69
+ Index structure is sometimes:
70
+ UnitRefList[], and sometimes just UnitRefList
71
+ */
72
+ public static createStreamer(): {
73
+ add(buffer: Buffer[]): {
74
+ data: Buffer;
75
+ // NOTE: We will cache in memory index values we want to write and write them all at once so the index is efficient. It's fine if the program terminates before we write our final index values. The reading code will handle this partial index and correctly recreate it.
76
+ index?: Buffer;
77
+ };
78
+ close(): {
79
+ data: Buffer;
80
+ index: Buffer;
81
+ };
82
+ } {
83
+ let dataStreamer = dataStreamerType.createStreamer();
84
+ let indexStreamer = indexStreamerType.createStreamer();
85
+ let currentBlockStreamer = blockStreamerType.createStreamer();
86
+
87
+ let compressedStream = new CompressedStream();
88
+ let currentBlockSize = 0;
89
+ let currentBlock: Buffer[] = [];
90
+ let isFirst = true;
91
+ let isFirstIndex = true;
92
+
93
+ let finishIndex = (indexParts: Buffer[]) => {
94
+ if (isFirstIndex) {
95
+ isFirstIndex = false;
96
+ indexParts.push(encodeTypeHeader(STREAM_TYPE, Buffer.alloc(0)));
97
+ }
98
+
99
+ indexParts.push(indexStreamer.add(UnitSet.encode([currentBlock])));
100
+ indexParts.push(indexStreamer.finishBlock());
101
+ };
102
+
103
+
104
+ return {
105
+ close: () => {
106
+ let indexParts: Buffer[] = [];
107
+ finishIndex(indexParts);
108
+ return {
109
+ data: dataStreamer.finishBlock(),
110
+ index: Buffer.concat(indexParts),
111
+ };
112
+ },
113
+ add: measureWrap((buffers: Buffer[]) => {
114
+ const outputParts: Buffer[] = [];
115
+ const indexParts: Buffer[] = [];
116
+
117
+ // Write the STREAM_TYPE header once so decodeAll (and find) can identify the format.
118
+ if (isFirst) {
119
+ isFirst = false;
120
+ outputParts.push(encodeTypeHeader(STREAM_TYPE, Buffer.alloc(0)));
121
+ }
122
+
123
+ for (const buffer of buffers) {
124
+ outputParts.push(dataStreamer.add(compressedStream.append(currentBlockStreamer.add(buffer))));
125
+ outputParts.push(dataStreamer.add(compressedStream.append(currentBlockStreamer.finishBlock())));
126
+
127
+ currentBlock.push(buffer);
128
+ currentBlockSize += buffer.length;
129
+
130
+ if (currentBlockSize >= STREAMING_BLOCK_THRESHOLD) {
131
+ outputParts.push(dataStreamer.finishBlock());
132
+ currentBlockStreamer = blockStreamerType.createStreamer();
133
+ compressedStream = new CompressedStream();
134
+
135
+ finishIndex(indexParts);
136
+
137
+ currentBlock = [];
138
+ currentBlockSize = 0;
139
+ }
140
+ }
141
+
142
+ return {
143
+ data: Buffer.concat(outputParts),
144
+ index: indexParts.length > 0 && Buffer.concat(indexParts) || undefined,
145
+ };
146
+ }, `BufferIndex|stream add`),
147
+ };
148
+ }
149
+
150
+ public static async decodeAll(dataIn: Buffer): Promise<Buffer[]> {
151
+ return (await this.decodeAllBlocked(dataIn)).flat();
152
+ }
153
+ @measureFnc
154
+ public static async decodeAllBlocked(dataIn: Buffer): Promise<Buffer[][]> {
155
+ let type = dataIn[0];
156
+ if (type === STREAM_TYPE) {
157
+ let decoded = decodeTypeHeader(dataIn);
158
+ if (!decoded) return [];
159
+ let { data } = decoded;
160
+ let rawBlocks = await dataStreamerType.getAllBlocks(data);
161
+ let blocks = rawBlocks.map(comp => {
162
+ try {
163
+ return CompressedStream.decode(comp);
164
+ } catch (e) {
165
+ return Buffer.alloc(0);
166
+ }
167
+ });
168
+ let allBuffers = await Promise.all(blocks.map(decomp => blockStreamerType.getAllBlocks(decomp)));
169
+ return allBuffers;
170
+ } else if (type === BULK_TYPE) {
171
+ return [await BufferUnitIndex.decodeAll(dataIn)];
172
+ } else {
173
+ // Unknown type, return empty
174
+ return [];
175
+ }
176
+ }
177
+
178
+ // IMPORTANT! The input data values should be sorted from newest to oldest.
179
+ @measureFnc
180
+ public static encodeAll(config: {
181
+ data: Buffer[];
182
+ }): {
183
+ data: Buffer;
184
+ index: Buffer;
185
+ } {
186
+ let obj = BufferUnitIndex.encode(config.data, {
187
+ type: BULK_TYPE,
188
+ });
189
+ return {
190
+ data: obj.blocks,
191
+ index: obj.index,
192
+ };
193
+ }
194
+
195
+ // Rebuild index completely from data reader
196
+ private static async rebuildIndexFromData(dataReader: Reader): Promise<Buffer> {
197
+ let data = await dataReader.read(0, await dataReader.getLength());
198
+ let dataBlocks = await BufferIndex.decodeAllBlocked(data);
199
+
200
+ // Build complete index from scratch
201
+ let indexStreamer = indexStreamerType.createStreamer();
202
+ let parts: Buffer[] = [encodeTypeHeader(STREAM_TYPE, Buffer.alloc(0))];
203
+
204
+ for (let dataBlock of dataBlocks) {
205
+ let rawIndex = UnitSet.encode([dataBlock]);
206
+ parts.push(indexStreamer.add(rawIndex));
207
+ parts.push(indexStreamer.finishBlock());
208
+ }
209
+
210
+ return Buffer.concat(parts);
211
+ }
212
+
213
+ public static async fixPartialIndex(config: {
214
+ index: Buffer;
215
+ dataReader: Reader;
216
+ }): Promise<Buffer> {
217
+ let type = config.index[0];
218
+ // Only streaming indexes can be partial — bulk indexes are written all at once.
219
+ if (type !== STREAM_TYPE) return config.index;
220
+
221
+ let { index, dataReader } = config;
222
+
223
+ let decoded = decodeTypeHeader(index);
224
+ if (!decoded) {
225
+ // Index header is corrupted, regenerate complete index from data
226
+ return await BufferIndex.rebuildIndexFromData(dataReader);
227
+ }
228
+ try {
229
+ let { headerContent, data: indexData } = decoded;
230
+
231
+ // Skip the header in the data reader
232
+ let headerBuf = await dataReader.read(0, 5);
233
+ if (headerBuf.length < 5) {
234
+ // Data reader header is corrupted, return empty index
235
+ return encodeTypeHeader(STREAM_TYPE, Buffer.alloc(0));
236
+ }
237
+ let headerSize = headerBuf.readInt32LE(1);
238
+ let totalHeaderSize = 1 + 4 + headerSize;
239
+ let dataWithoutHeaderReader = createOffsetReader(dataReader, totalHeaderSize);
240
+
241
+ // Use efficient block counting
242
+ let indexCount = await indexStreamerType.getBlockCount({
243
+ getLength: () => indexData.length,
244
+ read: (offset: number, length: number) => Promise.resolve(indexData.slice(offset, offset + length)),
245
+ });
246
+ let dataBlockCount = await dataStreamerType.getBlockCount(dataWithoutHeaderReader);
247
+
248
+ // We'll never really hit the case when the index is complete, but... if it is, then we should take advantage of this.
249
+ if (indexCount >= dataBlockCount) return config.index;
250
+
251
+ // Check if the index is clean (not corrupted)
252
+ let indexIsClean = await indexStreamerType.isClean({
253
+ getLength: () => indexData.length,
254
+ read: (offset: number, length: number) => Promise.resolve(indexData.slice(offset, offset + length)),
255
+ });
256
+
257
+ // This should be the most common case. Usually, we should have fully written the last index, but not have written the next index.
258
+ if (indexIsClean && indexCount === dataBlockCount - 1) {
259
+ // We can efficiently append just the missing blocks
260
+ let parts: Buffer[] = [config.index];
261
+
262
+ let blockCount = await dataStreamerType.getBlockCount(dataWithoutHeaderReader);
263
+
264
+ let blocks = await dataStreamerType.getBlockRange({
265
+ reader: dataWithoutHeaderReader,
266
+ startIndex: indexCount,
267
+ endIndex: blockCount,
268
+ });
269
+
270
+ let indexStreamer = indexStreamerType.createStreamer();
271
+ // Continue from where the index left off
272
+ for (let block of blocks) {
273
+ try {
274
+ let decompressedBlock = CompressedStream.decode(block);
275
+ let blockBuffers = await blockStreamerType.getAllBlocks(decompressedBlock);
276
+ let rawIndex = UnitSet.encode([blockBuffers]);
277
+ parts.push(indexStreamer.add(rawIndex));
278
+ parts.push(indexStreamer.finishBlock());
279
+ } catch (e) {
280
+ // Skip corrupted data block
281
+ continue;
282
+ }
283
+ }
284
+
285
+ return Buffer.concat(parts);
286
+ }
287
+ } catch (e) {
288
+ console.error(`Error fixing partial index. This SHOULDN'T error, but... we will just rebuild from the data and it should work: ${e}`);
289
+ }
290
+ return await BufferIndex.rebuildIndexFromData(dataReader);
291
+ }
292
+
293
+ @measureFnc
294
+ public static async find(config: {
295
+ index: Buffer;
296
+ dataReader: Reader;
297
+
298
+ params: SearchParams;
299
+
300
+ keepIterating: () => boolean;
301
+ onResult: (match: Buffer) => void;
302
+ }): Promise<{
303
+ blocksChecked: number;
304
+ blocksCheckedCompressedSize: number;
305
+ blocksCheckedDecompressedSize: number;
306
+
307
+ blocksWithErrors: string[];
308
+
309
+
310
+ isPending: boolean;
311
+ indexCount: number;
312
+ indexSize: number;
313
+ totalBlockCount: number;
314
+
315
+ blockSearchTime: number;
316
+ }> {
317
+ let { index, dataReader, params } = config;
318
+ let emptyResult = { blocksChecked: 0, blocksCheckedCompressedSize: 0, blocksCheckedDecompressedSize: 0, blocksWithErrors: [], isPending: false, indexCount: 0, totalBlockCount: 0, blockSearchTime: 0, indexSize: 0 };
319
+
320
+ // Handle empty or too small index buffer - rebuild from data
321
+ if (index.length === 0) {
322
+ index = await BufferIndex.rebuildIndexFromData(dataReader);
323
+ if (index.length === 0) {
324
+ return emptyResult;
325
+ }
326
+ }
327
+
328
+ let candidateCount = 0;
329
+
330
+ // Create the pattern matcher once with pre-calculated segments
331
+ const matchesPattern = createMatchesPattern(params.findBuffer, !!params.disableWildCards);
332
+
333
+ // Compute search units once — shared by both index types
334
+ let allSearchUnits = new Set<Unit>();
335
+ {
336
+ let segments = params.disableWildCards && [params.findBuffer] || splitOnWildcard(params.findBuffer).filter(s => s.length > 0);
337
+ for (let seg of segments) {
338
+ if (seg.length < 4) continue;
339
+ for (let ref of getAllUnits({ buffer: seg, bufferIndex: 0, block: 0 })) {
340
+ allSearchUnits.add(ref.unit);
341
+ }
342
+ }
343
+ if (allSearchUnits.size === 0) {
344
+ // Search pattern too short to use index, return empty results
345
+ return emptyResult;
346
+ }
347
+ }
348
+
349
+ let type = index[0];
350
+
351
+ if (type === STREAM_TYPE) {
352
+
353
+ // Fix partial index before processing
354
+ index = await BufferIndex.fixPartialIndex({ index, dataReader });
355
+
356
+ // NOTE: Ironically, the stream type is actually the least efficient to read. Because there's no central index, it means that even if we do try to lazily read it, every single read call would likely have to scan through most of the file to find that specific block. However, this is fine. The stream type is mostly just used for pending files, which shouldn't be that large. And we still do only read the blocks when we have at least one match
357
+
358
+ let decoded = decodeTypeHeader(index);
359
+ if (!decoded) {
360
+ // Index is too corrupted, return empty results
361
+ return emptyResult;
362
+ }
363
+ let { data: rawIndexData } = decoded;
364
+ let indexEntries = await indexStreamerType.getAllBlocks(rawIndexData);
365
+
366
+ const getDataBlocks = lazy(async (): Promise<Buffer[]> => {
367
+ // NOTE: While this is somewhat inefficient, the fact that all these blocks are individually compressed makes this reasonable fast.
368
+ let length = await dataReader.getLength();
369
+ let dataIn = await dataReader.read(0, length);
370
+ let decoded = decodeTypeHeader(dataIn);
371
+ if (!decoded) return [];
372
+ let { data } = decoded;
373
+ return await dataStreamerType.getAllBlocks(data);
374
+ });
375
+
376
+ let matchCount = 0;
377
+ let blocksChecked = 0;
378
+ let blocksCheckedCompressedSize = 0;
379
+ let blocksCheckedDecompressedSize = 0;
380
+ let blockSearchTime = 0;
381
+ let blocksWithErrors: string[] = [];
382
+
383
+ // Iterate newest-first so the caller gets the most recent matches first.
384
+ for (let i = indexEntries.length - 1; i >= 0; i--) {
385
+ if (matchCount >= params.limit || !config.keepIterating()) break;
386
+ const blockIndex = i;
387
+
388
+ // Each index entry is a UnitSet
389
+ let blockIndexData = indexEntries[i];
390
+
391
+ // Check if this block contains all search units
392
+ let hasAllUnits = true;
393
+ for (let unit of allSearchUnits) {
394
+ if (!UnitSet.has(blockIndexData, unit)) {
395
+ hasAllUnits = false;
396
+ break;
397
+ }
398
+ }
399
+
400
+ if (!hasAllUnits) continue;
401
+
402
+ const dataBlocks = await getDataBlocks();
403
+
404
+ let blockSearchTimeStart = Date.now();
405
+ // Load and scan this block
406
+ try {
407
+ let blockCompressed = dataBlocks[blockIndex];
408
+ if (!blockCompressed) {
409
+ throw new Error(`Not enough blocks in data, have ${dataBlocks.length}, expected ${blockIndex + 1}`);
410
+ }
411
+ blocksCheckedCompressedSize += blockCompressed.length;
412
+ let blockData = CompressedStream.decode(blockCompressed);
413
+ blocksCheckedDecompressedSize += blockData.length;
414
+ let buffers = await blockStreamerType.getAllBlocks(blockData);
415
+ blocksChecked++;
416
+
417
+ // Scan all buffers in this block
418
+ for (let bufferIndex = buffers.length - 1; bufferIndex >= 0; bufferIndex--) {
419
+ if (matchCount >= params.limit || !config.keepIterating()) break;
420
+
421
+ candidateCount++;
422
+ let buffer = buffers[bufferIndex];
423
+ if (matchesPattern(buffer)) {
424
+ config.onResult(buffer);
425
+ matchCount++;
426
+ }
427
+ }
428
+ } catch (e: any) {
429
+ // Skip corrupted block
430
+ blocksWithErrors.push(`(for block ${blockIndex + 1} / ${dataBlocks.length}) ${String(e?.stack || e)}`);
431
+ console.warn(`Error decompressing block for search: ${e.stack || e}`);
432
+ continue;
433
+ }
434
+ blockSearchTime += Date.now() - blockSearchTimeStart;
435
+ }
436
+ let indexSize = indexEntries.map(x => x.length).reduce((a, b) => a + b, 0);
437
+ return { blocksChecked, blocksCheckedCompressedSize, blocksCheckedDecompressedSize, blocksWithErrors, isPending: true, indexCount: indexEntries.length, indexSize, totalBlockCount: indexEntries.length, blockSearchTime };
438
+ } else if (type === BULK_TYPE) {
439
+ let results = await BufferUnitIndex.find({
440
+ params,
441
+ index,
442
+ reader: dataReader,
443
+ keepIterating: config.keepIterating,
444
+ onResult: config.onResult,
445
+ });
446
+ return {
447
+ blocksChecked: results.blocksChecked,
448
+ blocksCheckedCompressedSize: results.blocksCheckedCompressedSize,
449
+ blocksCheckedDecompressedSize: results.blocksCheckedDecompressedSize,
450
+ totalBlockCount: results.totalBlockCount,
451
+ blockSearchTime: results.blockSearchTime,
452
+ blocksWithErrors: results.blocksWithErrors,
453
+ isPending: false,
454
+ indexCount: 1,
455
+ indexSize: index.length,
456
+ };
457
+ } else {
458
+ throw new Error(`Unknown type in index file: ${type}`);
459
+ }
460
+ }
461
+ }