@soulcraft/brainy 3.8.2 → 3.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1303,5 +1303,73 @@ export class OPFSStorage extends BaseStorage {
1303
1303
  nextCursor
1304
1304
  };
1305
1305
  }
1306
+ /**
1307
+ * Initialize counts from OPFS storage
1308
+ */
1309
+ async initializeCounts() {
1310
+ try {
1311
+ // Try to load existing counts from counts.json
1312
+ const systemDir = await this.rootDir.getDirectoryHandle('system', { create: true });
1313
+ const countsFile = await systemDir.getFileHandle('counts.json');
1314
+ const file = await countsFile.getFile();
1315
+ const data = await file.text();
1316
+ const counts = JSON.parse(data);
1317
+ // Restore counts from OPFS
1318
+ this.entityCounts = new Map(Object.entries(counts.entityCounts || {}));
1319
+ this.verbCounts = new Map(Object.entries(counts.verbCounts || {}));
1320
+ this.totalNounCount = counts.totalNounCount || 0;
1321
+ this.totalVerbCount = counts.totalVerbCount || 0;
1322
+ }
1323
+ catch (error) {
1324
+ // If counts don't exist, initialize by scanning (one-time operation)
1325
+ await this.initializeCountsFromScan();
1326
+ }
1327
+ }
1328
+ /**
1329
+ * Initialize counts by scanning OPFS (fallback for missing counts file)
1330
+ */
1331
+ async initializeCountsFromScan() {
1332
+ try {
1333
+ // Count nouns
1334
+ let nounCount = 0;
1335
+ for await (const [,] of this.nounsDir.entries()) {
1336
+ nounCount++;
1337
+ }
1338
+ this.totalNounCount = nounCount;
1339
+ // Count verbs
1340
+ let verbCount = 0;
1341
+ for await (const [,] of this.verbsDir.entries()) {
1342
+ verbCount++;
1343
+ }
1344
+ this.totalVerbCount = verbCount;
1345
+ // Save initial counts
1346
+ await this.persistCounts();
1347
+ }
1348
+ catch (error) {
1349
+ console.error('Error initializing counts from OPFS scan:', error);
1350
+ }
1351
+ }
1352
+ /**
1353
+ * Persist counts to OPFS storage
1354
+ */
1355
+ async persistCounts() {
1356
+ try {
1357
+ const systemDir = await this.rootDir.getDirectoryHandle('system', { create: true });
1358
+ const countsFile = await systemDir.getFileHandle('counts.json', { create: true });
1359
+ const writable = await countsFile.createWritable();
1360
+ const counts = {
1361
+ entityCounts: Object.fromEntries(this.entityCounts),
1362
+ verbCounts: Object.fromEntries(this.verbCounts),
1363
+ totalNounCount: this.totalNounCount,
1364
+ totalVerbCount: this.totalVerbCount,
1365
+ lastUpdated: new Date().toISOString()
1366
+ };
1367
+ await writable.write(JSON.stringify(counts));
1368
+ await writable.close();
1369
+ }
1370
+ catch (error) {
1371
+ console.error('Error persisting counts to OPFS:', error);
1372
+ }
1373
+ }
1306
1374
  }
1307
1375
  //# sourceMappingURL=opfsStorage.js.map
@@ -72,6 +72,10 @@ export declare class S3CompatibleStorage extends BaseStorage {
72
72
  private backpressure;
73
73
  private nounWriteBuffer;
74
74
  private verbWriteBuffer;
75
+ private coordinator?;
76
+ private shardManager?;
77
+ private cacheSync?;
78
+ private readWriteSeparation?;
75
79
  private requestCoalescer;
76
80
  private highVolumeMode;
77
81
  private lastVolumeCheck;
@@ -106,6 +110,24 @@ export declare class S3CompatibleStorage extends BaseStorage {
106
110
  * Initialize the storage adapter
107
111
  */
108
112
  init(): Promise<void>;
113
+ /**
114
+ * Set distributed components for multi-node coordination
115
+ * Zero-config: Automatically optimizes based on components provided
116
+ */
117
+ setDistributedComponents(components: {
118
+ coordinator?: any;
119
+ shardManager?: any;
120
+ cacheSync?: any;
121
+ readWriteSeparation?: any;
122
+ }): void;
123
+ /**
124
+ * Get the S3 key for a noun, using sharding if available
125
+ */
126
+ private getNounKey;
127
+ /**
128
+ * Get the S3 key for a verb, using sharding if available
129
+ */
130
+ private getVerbKey;
109
131
  /**
110
132
  * Override base class method to detect S3-specific throttling errors
111
133
  */
@@ -495,4 +517,16 @@ export declare class S3CompatibleStorage extends BaseStorage {
495
517
  hasMore: boolean;
496
518
  nextCursor?: string;
497
519
  }>;
520
+ /**
521
+ * Initialize counts from S3 storage
522
+ */
523
+ protected initializeCounts(): Promise<void>;
524
+ /**
525
+ * Initialize counts by scanning S3 (fallback for missing counts file)
526
+ */
527
+ private initializeCountsFromScan;
528
+ /**
529
+ * Persist counts to S3 storage
530
+ */
531
+ protected persistCounts(): Promise<void>;
498
532
  }
@@ -240,6 +240,49 @@ export class S3CompatibleStorage extends BaseStorage {
240
240
  throw new Error(`Failed to initialize ${this.serviceType} storage: ${error}`);
241
241
  }
242
242
  }
243
+ /**
244
+ * Set distributed components for multi-node coordination
245
+ * Zero-config: Automatically optimizes based on components provided
246
+ */
247
+ setDistributedComponents(components) {
248
+ this.coordinator = components.coordinator;
249
+ this.shardManager = components.shardManager;
250
+ this.cacheSync = components.cacheSync;
251
+ this.readWriteSeparation = components.readWriteSeparation;
252
+ // Auto-configure based on what's available
253
+ if (this.shardManager) {
254
+ console.log(`🎯 S3 Storage: Sharding enabled with ${this.shardManager.config?.shardCount || 64} shards`);
255
+ }
256
+ if (this.coordinator) {
257
+ console.log(`🤝 S3 Storage: Distributed coordination active (node: ${this.coordinator.nodeId})`);
258
+ }
259
+ if (this.cacheSync) {
260
+ console.log('🔄 S3 Storage: Cache synchronization enabled');
261
+ }
262
+ if (this.readWriteSeparation) {
263
+ console.log(`📖 S3 Storage: Read/write separation with ${this.readWriteSeparation.config?.replicationFactor || 3}x replication`);
264
+ }
265
+ }
266
+ /**
267
+ * Get the S3 key for a noun, using sharding if available
268
+ */
269
+ getNounKey(id) {
270
+ if (this.shardManager) {
271
+ const shardId = this.shardManager.getShardForKey(id);
272
+ return `shards/${shardId}/${this.nounPrefix}${id}.json`;
273
+ }
274
+ return `${this.nounPrefix}${id}.json`;
275
+ }
276
+ /**
277
+ * Get the S3 key for a verb, using sharding if available
278
+ */
279
+ getVerbKey(id) {
280
+ if (this.shardManager) {
281
+ const shardId = this.shardManager.getShardForKey(id);
282
+ return `shards/${shardId}/${this.verbPrefix}${id}.json`;
283
+ }
284
+ return `${this.verbPrefix}${id}.json`;
285
+ }
243
286
  /**
244
287
  * Override base class method to detect S3-specific throttling errors
245
288
  */
@@ -668,7 +711,8 @@ export class S3CompatibleStorage extends BaseStorage {
668
711
  };
669
712
  // Import the PutObjectCommand only when needed
670
713
  const { PutObjectCommand } = await import('@aws-sdk/client-s3');
671
- const key = `${this.nounPrefix}${node.id}.json`;
714
+ // Use sharding if available
715
+ const key = this.getNounKey(node.id);
672
716
  const body = JSON.stringify(serializableNode, null, 2);
673
717
  this.logger.trace(`Saving to key: ${key}`);
674
718
  // Save the node to S3-compatible storage
@@ -1013,10 +1057,10 @@ export class S3CompatibleStorage extends BaseStorage {
1013
1057
  };
1014
1058
  // Import the PutObjectCommand only when needed
1015
1059
  const { PutObjectCommand } = await import('@aws-sdk/client-s3');
1016
- // Save the edge to S3-compatible storage
1060
+ // Save the edge to S3-compatible storage using sharding if available
1017
1061
  await this.s3Client.send(new PutObjectCommand({
1018
1062
  Bucket: this.bucketName,
1019
- Key: `${this.verbPrefix}${edge.id}.json`,
1063
+ Key: this.getVerbKey(edge.id),
1020
1064
  Body: JSON.stringify(serializableEdge, null, 2),
1021
1065
  ContentType: 'application/json'
1022
1066
  }));
@@ -2660,5 +2704,87 @@ export class S3CompatibleStorage extends BaseStorage {
2660
2704
  nextCursor: result.nextCursor
2661
2705
  };
2662
2706
  }
2707
+ /**
2708
+ * Initialize counts from S3 storage
2709
+ */
2710
+ async initializeCounts() {
2711
+ const countsKey = `${this.systemPrefix}counts.json`;
2712
+ try {
2713
+ const { GetObjectCommand } = await import('@aws-sdk/client-s3');
2714
+ // Try to load existing counts
2715
+ const response = await this.s3Client.send(new GetObjectCommand({
2716
+ Bucket: this.bucketName,
2717
+ Key: countsKey
2718
+ }));
2719
+ if (response.Body) {
2720
+ const data = await response.Body.transformToString();
2721
+ const counts = JSON.parse(data);
2722
+ // Restore counts from S3
2723
+ this.entityCounts = new Map(Object.entries(counts.entityCounts || {}));
2724
+ this.verbCounts = new Map(Object.entries(counts.verbCounts || {}));
2725
+ this.totalNounCount = counts.totalNounCount || 0;
2726
+ this.totalVerbCount = counts.totalVerbCount || 0;
2727
+ }
2728
+ }
2729
+ catch (error) {
2730
+ if (error.name !== 'NoSuchKey') {
2731
+ console.error('Error loading counts from S3:', error);
2732
+ }
2733
+ // If counts don't exist, initialize by scanning (one-time operation)
2734
+ await this.initializeCountsFromScan();
2735
+ }
2736
+ }
2737
+ /**
2738
+ * Initialize counts by scanning S3 (fallback for missing counts file)
2739
+ */
2740
+ async initializeCountsFromScan() {
2741
+ // This is expensive but only happens once for legacy data
2742
+ // In production, counts are maintained incrementally
2743
+ try {
2744
+ const { ListObjectsV2Command } = await import('@aws-sdk/client-s3');
2745
+ // Count nouns
2746
+ const nounResponse = await this.s3Client.send(new ListObjectsV2Command({
2747
+ Bucket: this.bucketName,
2748
+ Prefix: this.nounPrefix
2749
+ }));
2750
+ this.totalNounCount = nounResponse.Contents?.filter((obj) => obj.Key?.endsWith('.json')).length || 0;
2751
+ // Count verbs
2752
+ const verbResponse = await this.s3Client.send(new ListObjectsV2Command({
2753
+ Bucket: this.bucketName,
2754
+ Prefix: this.verbPrefix
2755
+ }));
2756
+ this.totalVerbCount = verbResponse.Contents?.filter((obj) => obj.Key?.endsWith('.json')).length || 0;
2757
+ // Save initial counts
2758
+ await this.persistCounts();
2759
+ }
2760
+ catch (error) {
2761
+ console.error('Error initializing counts from S3 scan:', error);
2762
+ }
2763
+ }
2764
+ /**
2765
+ * Persist counts to S3 storage
2766
+ */
2767
+ async persistCounts() {
2768
+ const countsKey = `${this.systemPrefix}counts.json`;
2769
+ try {
2770
+ const { PutObjectCommand } = await import('@aws-sdk/client-s3');
2771
+ const counts = {
2772
+ entityCounts: Object.fromEntries(this.entityCounts),
2773
+ verbCounts: Object.fromEntries(this.verbCounts),
2774
+ totalNounCount: this.totalNounCount,
2775
+ totalVerbCount: this.totalVerbCount,
2776
+ lastUpdated: new Date().toISOString()
2777
+ };
2778
+ await this.s3Client.send(new PutObjectCommand({
2779
+ Bucket: this.bucketName,
2780
+ Key: countsKey,
2781
+ Body: JSON.stringify(counts),
2782
+ ContentType: 'application/json'
2783
+ }));
2784
+ }
2785
+ catch (error) {
2786
+ console.error('Error persisting counts to S3:', error);
2787
+ }
2788
+ }
2663
2789
  }
2664
2790
  //# sourceMappingURL=s3CompatibleStorage.js.map
@@ -337,14 +337,15 @@ export class BaseStorage extends BaseStorageAdapter {
337
337
  }
338
338
  // Check if the adapter has a paginated method for getting nouns
339
339
  if (typeof this.getNounsWithPagination === 'function') {
340
- // Use the adapter's paginated method
340
+ // Use the adapter's paginated method - pass offset directly to adapter
341
341
  const result = await this.getNounsWithPagination({
342
342
  limit,
343
+ offset, // Let the adapter handle offset for O(1) operation
343
344
  cursor,
344
345
  filter: options?.filter
345
346
  });
346
- // Apply offset if needed (some adapters might not support offset)
347
- const items = result.items.slice(offset);
347
+ // Don't slice here - the adapter should handle offset efficiently
348
+ const items = result.items;
348
349
  // CRITICAL SAFETY CHECK: Prevent infinite loops
349
350
  // If we have no items but hasMore is true, force hasMore to false
350
351
  // This prevents pagination bugs from causing infinite loops
@@ -6,7 +6,6 @@ import { HNSWNoun, Vector } from '../coreTypes.js';
6
6
  declare enum CompressionType {
7
7
  NONE = "none",
8
8
  GZIP = "gzip",
9
- BROTLI = "brotli",
10
9
  QUANTIZATION = "quantization",
11
10
  HYBRID = "hybrid"
12
11
  }
@@ -74,14 +73,6 @@ export declare class ReadOnlyOptimizations {
74
73
  * GZIP decompression
75
74
  */
76
75
  private gzipDecompress;
77
- /**
78
- * Brotli compression (placeholder - similar to GZIP)
79
- */
80
- private brotliCompress;
81
- /**
82
- * Brotli decompression (placeholder)
83
- */
84
- private brotliDecompress;
85
76
  /**
86
77
  * Create prebuilt index segments for faster loading
87
78
  */
@@ -7,9 +7,9 @@ var CompressionType;
7
7
  (function (CompressionType) {
8
8
  CompressionType["NONE"] = "none";
9
9
  CompressionType["GZIP"] = "gzip";
10
- CompressionType["BROTLI"] = "brotli";
11
10
  CompressionType["QUANTIZATION"] = "quantization";
12
11
  CompressionType["HYBRID"] = "hybrid";
12
+ // BROTLI removed - was not actually implemented
13
13
  })(CompressionType || (CompressionType = {}));
14
14
  // Vector quantization methods
15
15
  var QuantizationType;
@@ -67,10 +67,7 @@ export class ReadOnlyOptimizations {
67
67
  const gzipBuffer = new Float32Array(vector).buffer;
68
68
  compressedData = await this.gzipCompress(gzipBuffer.slice(0));
69
69
  break;
70
- case CompressionType.BROTLI:
71
- const brotliBuffer = new Float32Array(vector).buffer;
72
- compressedData = await this.brotliCompress(brotliBuffer.slice(0));
73
- break;
70
+ // Brotli removed - was not implemented
74
71
  case CompressionType.HYBRID:
75
72
  // First quantize, then compress
76
73
  const quantized = await this.quantizeVector(vector, segmentId);
@@ -99,9 +96,7 @@ export class ReadOnlyOptimizations {
99
96
  case CompressionType.GZIP:
100
97
  const gzipDecompressed = await this.gzipDecompress(compressedData);
101
98
  return Array.from(new Float32Array(gzipDecompressed));
102
- case CompressionType.BROTLI:
103
- const brotliDecompressed = await this.brotliDecompress(compressedData);
104
- return Array.from(new Float32Array(brotliDecompressed));
99
+ // Brotli removed - was not implemented
105
100
  case CompressionType.HYBRID:
106
101
  const gzipStage = await this.gzipDecompress(compressedData);
107
102
  return this.dequantizeVector(gzipStage, segmentId, originalDimension);
@@ -219,21 +214,7 @@ export class ReadOnlyOptimizations {
219
214
  return compressedData;
220
215
  }
221
216
  }
222
- /**
223
- * Brotli compression (placeholder - similar to GZIP)
224
- */
225
- async brotliCompress(data) {
226
- // Would implement Brotli compression here
227
- console.warn('Brotli compression not implemented, falling back to GZIP');
228
- return this.gzipCompress(data);
229
- }
230
- /**
231
- * Brotli decompression (placeholder)
232
- */
233
- async brotliDecompress(compressedData) {
234
- console.warn('Brotli decompression not implemented, falling back to GZIP');
235
- return this.gzipDecompress(compressedData);
236
- }
217
+ // Brotli methods removed - were not implemented
237
218
  /**
238
219
  * Create prebuilt index segments for faster loading
239
220
  */
@@ -277,8 +258,7 @@ export class ReadOnlyOptimizations {
277
258
  switch (this.config.compression.metadataCompression) {
278
259
  case CompressionType.GZIP:
279
260
  return this.gzipCompress(data.buffer.slice(0));
280
- case CompressionType.BROTLI:
281
- return this.brotliCompress(data.buffer.slice(0));
261
+ // Brotli removed - was not implemented
282
262
  default:
283
263
  return data.buffer.slice(0);
284
264
  }
@@ -329,9 +309,7 @@ export class ReadOnlyOptimizations {
329
309
  case CompressionType.GZIP:
330
310
  decompressed = await this.gzipDecompress(compressedData);
331
311
  break;
332
- case CompressionType.BROTLI:
333
- decompressed = await this.brotliDecompress(compressedData);
334
- break;
312
+ // Brotli removed - was not implemented
335
313
  default:
336
314
  decompressed = compressedData;
337
315
  break;
@@ -269,10 +269,25 @@ export interface BrainyConfig {
269
269
  ttl?: number;
270
270
  };
271
271
  augmentations?: Record<string, any>;
272
+ distributed?: {
273
+ enabled: boolean;
274
+ nodeId?: string;
275
+ nodes?: string[];
276
+ coordinatorUrl?: string;
277
+ shardCount?: number;
278
+ replicationFactor?: number;
279
+ consensus?: 'raft' | 'none';
280
+ transport?: 'tcp' | 'http' | 'udp';
281
+ };
272
282
  warmup?: boolean;
273
283
  realtime?: boolean;
274
284
  multiTenancy?: boolean;
275
285
  telemetry?: boolean;
286
+ disableAutoRebuild?: boolean;
287
+ disableMetrics?: boolean;
288
+ disableAutoOptimize?: boolean;
289
+ batchWrites?: boolean;
290
+ maxConcurrentOperations?: number;
276
291
  verbose?: boolean;
277
292
  silent?: boolean;
278
293
  }
@@ -3,8 +3,6 @@
3
3
  * Complete rewrite to eliminate TensorFlow.js and use ONNX-based models
4
4
  */
5
5
  import { isBrowser } from './environment.js';
6
- import { join } from 'node:path';
7
- import { existsSync } from 'node:fs';
8
6
  // @ts-ignore - Transformers.js is now the primary embedding library
9
7
  import { pipeline, env } from '@huggingface/transformers';
10
8
  // CRITICAL: Disable ONNX memory arena to prevent 4-8GB allocation
@@ -281,15 +279,23 @@ export class TransformerEmbedding {
281
279
  }
282
280
  try {
283
281
  // For Q8 models, we need to explicitly specify the model file
284
- if (actualType === 'q8') {
285
- // Check if quantized model exists
286
- const modelPath = join(cacheDir, this.options.model, 'onnx', 'model_quantized.onnx');
287
- if (existsSync(modelPath)) {
288
- this.logger('log', '✅ Q8 model found locally');
282
+ if (actualType === 'q8' && !isBrowser()) {
283
+ try {
284
+ // Check if quantized model exists (Node.js only)
285
+ const { join } = await import('node:path');
286
+ const { existsSync } = await import('node:fs');
287
+ const modelPath = join(cacheDir, this.options.model, 'onnx', 'model_quantized.onnx');
288
+ if (existsSync(modelPath)) {
289
+ this.logger('log', '✅ Q8 model found locally');
290
+ }
291
+ else {
292
+ this.logger('warn', '⚠️ Q8 model not found');
293
+ actualType = 'q8'; // Always Q8
294
+ }
289
295
  }
290
- else {
291
- this.logger('warn', '⚠️ Q8 model not found');
292
- actualType = 'q8'; // Always Q8
296
+ catch (error) {
297
+ // Skip model path check in browser or if imports fail
298
+ this.logger('log', '🌐 Skipping local model check in browser environment');
293
299
  }
294
300
  }
295
301
  this.extractor = await pipeline('feature-extraction', this.options.model, pipelineOptions);
@@ -68,6 +68,11 @@ export declare class MetadataIndexManager {
68
68
  private totalEntitiesByType;
69
69
  private unifiedCache;
70
70
  constructor(storage: StorageAdapter, config?: MetadataIndexConfig);
71
+ /**
72
+ * Lazy load entity counts from storage statistics (O(1) operation)
73
+ * This avoids rebuilding the entire index on startup
74
+ */
75
+ private lazyLoadCounts;
71
76
  /**
72
77
  * Get index key for field and value
73
78
  */
@@ -45,6 +45,30 @@ export class MetadataIndexManager {
45
45
  });
46
46
  // Get global unified cache for coordinated memory management
47
47
  this.unifiedCache = getGlobalCache();
48
+ // Lazy load counts from storage statistics on first access
49
+ this.lazyLoadCounts();
50
+ }
51
+ /**
52
+ * Lazy load entity counts from storage statistics (O(1) operation)
53
+ * This avoids rebuilding the entire index on startup
54
+ */
55
+ async lazyLoadCounts() {
56
+ try {
57
+ // Get statistics from storage (should be O(1) with our FileSystemStorage improvements)
58
+ const stats = await this.storage.getStatistics();
59
+ if (stats && stats.nounCount) {
60
+ // Populate entity counts from storage statistics
61
+ for (const [type, count] of Object.entries(stats.nounCount)) {
62
+ if (typeof count === 'number' && count > 0) {
63
+ this.totalEntitiesByType.set(type, count);
64
+ }
65
+ }
66
+ }
67
+ }
68
+ catch (error) {
69
+ // Silently fail - counts will be populated as entities are added
70
+ // This maintains zero-configuration principle
71
+ }
48
72
  }
49
73
  /**
50
74
  * Get index key for field and value
@@ -0,0 +1,53 @@
1
+ /**
2
+ * Universal Mutex Implementation for Thread-Safe Operations
3
+ * Provides consistent locking across all storage adapters
4
+ * Critical for preventing race conditions in count operations
5
+ */
6
+ export interface MutexInterface {
7
+ acquire(key: string, timeout?: number): Promise<() => void>;
8
+ runExclusive<T>(key: string, fn: () => Promise<T>, timeout?: number): Promise<T>;
9
+ isLocked(key: string): boolean;
10
+ }
11
+ /**
12
+ * In-memory mutex for single-process scenarios
13
+ * Used by MemoryStorage and as fallback for other adapters
14
+ */
15
+ export declare class InMemoryMutex implements MutexInterface {
16
+ private locks;
17
+ acquire(key: string, timeout?: number): Promise<() => void>;
18
+ private release;
19
+ runExclusive<T>(key: string, fn: () => Promise<T>, timeout?: number): Promise<T>;
20
+ isLocked(key: string): boolean;
21
+ }
22
+ /**
23
+ * File-based mutex for multi-process scenarios (Node.js)
24
+ * Uses atomic file operations to prevent TOCTOU races
25
+ */
26
+ export declare class FileMutex implements MutexInterface {
27
+ private fs;
28
+ private path;
29
+ private lockDir;
30
+ private processLocks;
31
+ private lockTimers;
32
+ constructor(lockDir: string);
33
+ acquire(key: string, timeout?: number): Promise<() => void>;
34
+ private release;
35
+ runExclusive<T>(key: string, fn: () => Promise<T>, timeout?: number): Promise<T>;
36
+ isLocked(key: string): boolean;
37
+ /**
38
+ * Clean up all locks held by this process
39
+ */
40
+ cleanup(): Promise<void>;
41
+ }
42
+ /**
43
+ * Factory to create appropriate mutex for the environment
44
+ */
45
+ export declare function createMutex(options?: {
46
+ type?: 'memory' | 'file';
47
+ lockDir?: string;
48
+ }): MutexInterface;
49
+ export declare function getGlobalMutex(): MutexInterface;
50
+ /**
51
+ * Cleanup function for graceful shutdown
52
+ */
53
+ export declare function cleanupMutexes(): Promise<void>;