@soulcraft/brainy 4.9.1 → 4.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1332,44 +1332,69 @@ export class AzureBlobStorage extends BaseStorage {
1332
1332
  */
1333
1333
  async saveHNSWData(nounId, hnswData) {
1334
1334
  await this.ensureInitialized();
1335
- try {
1336
- // CRITICAL FIX (v4.7.3): Must preserve existing node data (id, vector) when updating HNSW metadata
1337
- const shard = getShardIdFromUuid(nounId);
1338
- const key = `entities/nouns/hnsw/${shard}/${nounId}.json`;
1339
- const blockBlobClient = this.containerClient.getBlockBlobClient(key);
1335
+ // CRITICAL FIX (v4.7.3): Must preserve existing node data (id, vector) when updating HNSW metadata
1336
+ // Previous implementation overwrote the entire file, destroying vector data
1337
+ // Now we READ the existing node, UPDATE only connections/level, then WRITE back the complete node
1338
+ // CRITICAL FIX (v4.10.1): Optimistic locking with ETags to prevent race conditions
1339
+ // Uses Azure Blob ETags with ifMatch preconditions - retries with exponential backoff on conflicts
1340
+ // Prevents data corruption when multiple entities connect to same neighbor simultaneously
1341
+ const shard = getShardIdFromUuid(nounId);
1342
+ const key = `entities/nouns/hnsw/${shard}/${nounId}.json`;
1343
+ const blockBlobClient = this.containerClient.getBlockBlobClient(key);
1344
+ const maxRetries = 5;
1345
+ for (let attempt = 0; attempt < maxRetries; attempt++) {
1340
1346
  try {
1341
- // Read existing node data
1342
- const downloadResponse = await blockBlobClient.download(0);
1343
- const existingData = await this.streamToBuffer(downloadResponse.readableStreamBody);
1344
- const existingNode = JSON.parse(existingData.toString());
1347
+ // Get current ETag and data
1348
+ let currentETag;
1349
+ let existingNode = {};
1350
+ try {
1351
+ const downloadResponse = await blockBlobClient.download(0);
1352
+ const existingData = await this.streamToBuffer(downloadResponse.readableStreamBody);
1353
+ existingNode = JSON.parse(existingData.toString());
1354
+ currentETag = downloadResponse.etag;
1355
+ }
1356
+ catch (error) {
1357
+ // File doesn't exist yet - will create new
1358
+ if (error.statusCode !== 404 && error.code !== 'BlobNotFound') {
1359
+ throw error;
1360
+ }
1361
+ }
1345
1362
  // Preserve id and vector, update only HNSW graph metadata
1346
1363
  const updatedNode = {
1347
- ...existingNode,
1364
+ ...existingNode, // Preserve all existing fields (id, vector, etc.)
1348
1365
  level: hnswData.level,
1349
1366
  connections: hnswData.connections
1350
1367
  };
1351
1368
  const content = JSON.stringify(updatedNode, null, 2);
1369
+ // ATOMIC WRITE: Use ETag precondition
1370
+ // If currentETag exists, only write if ETag matches (no concurrent modification)
1371
+ // If no ETag, only write if blob doesn't exist (ifNoneMatch: *)
1352
1372
  await blockBlobClient.upload(content, content.length, {
1353
- blobHTTPHeaders: { blobContentType: 'application/json' }
1373
+ blobHTTPHeaders: { blobContentType: 'application/json' },
1374
+ conditions: currentETag
1375
+ ? { ifMatch: currentETag }
1376
+ : { ifNoneMatch: '*' } // Only create if doesn't exist
1354
1377
  });
1378
+ // Success! Exit retry loop
1379
+ return;
1355
1380
  }
1356
1381
  catch (error) {
1357
- // If node doesn't exist yet, create it with just HNSW data
1358
- if (error.statusCode === 404 || error.code === 'BlobNotFound') {
1359
- const content = JSON.stringify(hnswData, null, 2);
1360
- await blockBlobClient.upload(content, content.length, {
1361
- blobHTTPHeaders: { blobContentType: 'application/json' }
1362
- });
1363
- }
1364
- else {
1365
- throw error;
1382
+ // Precondition failed - concurrent modification detected
1383
+ if (error.statusCode === 412 || error.code === 'ConditionNotMet') {
1384
+ if (attempt === maxRetries - 1) {
1385
+ this.logger.error(`Max retries (${maxRetries}) exceeded for ${nounId} - concurrent modification conflict`);
1386
+ throw new Error(`Failed to save HNSW data for ${nounId}: max retries exceeded due to concurrent modifications`);
1387
+ }
1388
+ // Exponential backoff: 50ms, 100ms, 200ms, 400ms, 800ms
1389
+ const backoffMs = 50 * Math.pow(2, attempt);
1390
+ await new Promise(resolve => setTimeout(resolve, backoffMs));
1391
+ continue;
1366
1392
  }
1393
+ // Other error - rethrow
1394
+ this.logger.error(`Failed to save HNSW data for ${nounId}:`, error);
1395
+ throw new Error(`Failed to save HNSW data for ${nounId}: ${error}`);
1367
1396
  }
1368
1397
  }
1369
- catch (error) {
1370
- this.logger.error(`Failed to save HNSW data for ${nounId}:`, error);
1371
- throw new Error(`Failed to save HNSW data for ${nounId}: ${error}`);
1372
- }
1373
1398
  }
1374
1399
  /**
1375
1400
  * Get HNSW graph data for a noun
@@ -1394,20 +1419,54 @@ export class AzureBlobStorage extends BaseStorage {
1394
1419
  }
1395
1420
  /**
1396
1421
  * Save HNSW system data (entry point, max level)
1422
+ *
1423
+ * CRITICAL FIX (v4.10.1): Optimistic locking with ETags to prevent race conditions
1397
1424
  */
1398
1425
  async saveHNSWSystem(systemData) {
1399
1426
  await this.ensureInitialized();
1400
- try {
1401
- const key = `${this.systemPrefix}hnsw-system.json`;
1402
- const blockBlobClient = this.containerClient.getBlockBlobClient(key);
1403
- const content = JSON.stringify(systemData, null, 2);
1404
- await blockBlobClient.upload(content, content.length, {
1405
- blobHTTPHeaders: { blobContentType: 'application/json' }
1406
- });
1407
- }
1408
- catch (error) {
1409
- this.logger.error('Failed to save HNSW system data:', error);
1410
- throw new Error(`Failed to save HNSW system data: ${error}`);
1427
+ const key = `${this.systemPrefix}hnsw-system.json`;
1428
+ const blockBlobClient = this.containerClient.getBlockBlobClient(key);
1429
+ const maxRetries = 5;
1430
+ for (let attempt = 0; attempt < maxRetries; attempt++) {
1431
+ try {
1432
+ // Get current ETag
1433
+ let currentETag;
1434
+ try {
1435
+ const properties = await blockBlobClient.getProperties();
1436
+ currentETag = properties.etag;
1437
+ }
1438
+ catch (error) {
1439
+ // File doesn't exist yet
1440
+ if (error.statusCode !== 404 && error.code !== 'BlobNotFound') {
1441
+ throw error;
1442
+ }
1443
+ }
1444
+ const content = JSON.stringify(systemData, null, 2);
1445
+ // ATOMIC WRITE: Use ETag precondition
1446
+ await blockBlobClient.upload(content, content.length, {
1447
+ blobHTTPHeaders: { blobContentType: 'application/json' },
1448
+ conditions: currentETag
1449
+ ? { ifMatch: currentETag }
1450
+ : { ifNoneMatch: '*' }
1451
+ });
1452
+ // Success!
1453
+ return;
1454
+ }
1455
+ catch (error) {
1456
+ // Precondition failed - concurrent modification
1457
+ if (error.statusCode === 412 || error.code === 'ConditionNotMet') {
1458
+ if (attempt === maxRetries - 1) {
1459
+ this.logger.error(`Max retries (${maxRetries}) exceeded for HNSW system data`);
1460
+ throw new Error('Failed to save HNSW system data: max retries exceeded due to concurrent modifications');
1461
+ }
1462
+ const backoffMs = 50 * Math.pow(2, attempt);
1463
+ await new Promise(resolve => setTimeout(resolve, backoffMs));
1464
+ continue;
1465
+ }
1466
+ // Other error - rethrow
1467
+ this.logger.error('Failed to save HNSW system data:', error);
1468
+ throw new Error(`Failed to save HNSW system data: ${error}`);
1469
+ }
1411
1470
  }
1412
1471
  }
1413
1472
  /**
@@ -391,6 +391,8 @@ export declare class FileSystemStorage extends BaseStorage {
391
391
  } | null>;
392
392
  /**
393
393
  * Save HNSW system data (entry point, max level)
394
+ *
395
+ * CRITICAL FIX (v4.10.1): Atomic write to prevent race conditions during concurrent updates
394
396
  */
395
397
  saveHNSWSystem(systemData: {
396
398
  entryPointId: string | null;
@@ -2177,30 +2177,48 @@ export class FileSystemStorage extends BaseStorage {
2177
2177
  // CRITICAL FIX (v4.7.3): Must preserve existing node data (id, vector) when updating HNSW metadata
2178
2178
  // Previous implementation overwrote the entire file, destroying vector data
2179
2179
  // Now we READ the existing node, UPDATE only connections/level, then WRITE back the complete node
2180
+ // CRITICAL FIX (v4.10.1): Atomic write to prevent race conditions during concurrent HNSW updates
2181
+ // Uses temp file + atomic rename strategy (POSIX guarantees rename() atomicity)
2182
+ // Prevents data corruption when multiple entities connect to same neighbor simultaneously
2180
2183
  const filePath = this.getNodePath(nounId);
2184
+ const tempPath = `${filePath}.tmp.${Date.now()}.${Math.random().toString(36).substring(2)}`;
2181
2185
  try {
2182
- // Read existing node data
2183
- const existingData = await fs.promises.readFile(filePath, 'utf-8');
2184
- const existingNode = JSON.parse(existingData);
2186
+ // Read existing node data (if exists)
2187
+ let existingNode = {};
2188
+ try {
2189
+ const existingData = await fs.promises.readFile(filePath, 'utf-8');
2190
+ existingNode = JSON.parse(existingData);
2191
+ }
2192
+ catch (error) {
2193
+ // File doesn't exist yet - will create new
2194
+ if (error.code !== 'ENOENT') {
2195
+ throw error;
2196
+ }
2197
+ }
2185
2198
  // Preserve id and vector, update only HNSW graph metadata
2186
2199
  const updatedNode = {
2187
2200
  ...existingNode, // Preserve all existing fields (id, vector, etc.)
2188
2201
  level: hnswData.level,
2189
2202
  connections: hnswData.connections
2190
2203
  };
2191
- // Write back the COMPLETE node with updated HNSW data
2192
- await fs.promises.writeFile(filePath, JSON.stringify(updatedNode, null, 2));
2204
+ // ATOMIC WRITE SEQUENCE:
2205
+ // 1. Write to temp file
2206
+ await this.ensureDirectoryExists(path.dirname(tempPath));
2207
+ await fs.promises.writeFile(tempPath, JSON.stringify(updatedNode, null, 2));
2208
+ // 2. Atomic rename temp → final (POSIX atomicity guarantee)
2209
+ // This operation is guaranteed atomic by POSIX - either succeeds completely or fails
2210
+ // Multiple concurrent renames will serialize at the kernel level
2211
+ await fs.promises.rename(tempPath, filePath);
2193
2212
  }
2194
2213
  catch (error) {
2195
- // If node doesn't exist yet, create it with just HNSW data
2196
- // This should only happen during initial node creation
2197
- if (error.code === 'ENOENT') {
2198
- await this.ensureDirectoryExists(path.dirname(filePath));
2199
- await fs.promises.writeFile(filePath, JSON.stringify(hnswData, null, 2));
2214
+ // Clean up temp file on any error
2215
+ try {
2216
+ await fs.promises.unlink(tempPath);
2200
2217
  }
2201
- else {
2202
- throw error;
2218
+ catch (cleanupError) {
2219
+ // Ignore cleanup errors - temp file may not exist
2203
2220
  }
2221
+ throw error;
2204
2222
  }
2205
2223
  }
2206
2224
  /**
@@ -2223,11 +2241,30 @@ export class FileSystemStorage extends BaseStorage {
2223
2241
  }
2224
2242
  /**
2225
2243
  * Save HNSW system data (entry point, max level)
2244
+ *
2245
+ * CRITICAL FIX (v4.10.1): Atomic write to prevent race conditions during concurrent updates
2226
2246
  */
2227
2247
  async saveHNSWSystem(systemData) {
2228
2248
  await this.ensureInitialized();
2229
2249
  const filePath = path.join(this.systemDir, 'hnsw-system.json');
2230
- await fs.promises.writeFile(filePath, JSON.stringify(systemData, null, 2));
2250
+ const tempPath = `${filePath}.tmp.${Date.now()}.${Math.random().toString(36).substring(2)}`;
2251
+ try {
2252
+ // Write to temp file
2253
+ await this.ensureDirectoryExists(path.dirname(tempPath));
2254
+ await fs.promises.writeFile(tempPath, JSON.stringify(systemData, null, 2));
2255
+ // Atomic rename temp → final (POSIX atomicity guarantee)
2256
+ await fs.promises.rename(tempPath, filePath);
2257
+ }
2258
+ catch (error) {
2259
+ // Clean up temp file on any error
2260
+ try {
2261
+ await fs.promises.unlink(tempPath);
2262
+ }
2263
+ catch (cleanupError) {
2264
+ // Ignore cleanup errors
2265
+ }
2266
+ throw error;
2267
+ }
2231
2268
  }
2232
2269
  /**
2233
2270
  * Get HNSW system data
@@ -381,6 +381,8 @@ export declare class GcsStorage extends BaseStorage {
381
381
  /**
382
382
  * Save HNSW system data (entry point, max level)
383
383
  * Storage path: system/hnsw-system.json
384
+ *
385
+ * CRITICAL FIX (v4.10.1): Optimistic locking with generation numbers to prevent race conditions
384
386
  */
385
387
  saveHNSWSystem(systemData: {
386
388
  entryPointId: string | null;
@@ -1487,47 +1487,72 @@ export class GcsStorage extends BaseStorage {
1487
1487
  */
1488
1488
  async saveHNSWData(nounId, hnswData) {
1489
1489
  await this.ensureInitialized();
1490
- try {
1491
- // CRITICAL FIX (v4.7.3): Must preserve existing node data (id, vector) when updating HNSW metadata
1492
- // Previous implementation overwrote the entire file, destroying vector data
1493
- // Now we READ the existing node, UPDATE only connections/level, then WRITE back the complete node
1494
- const shard = getShardIdFromUuid(nounId);
1495
- const key = `entities/nouns/hnsw/${shard}/${nounId}.json`;
1496
- const file = this.bucket.file(key);
1490
+ // CRITICAL FIX (v4.7.3): Must preserve existing node data (id, vector) when updating HNSW metadata
1491
+ // Previous implementation overwrote the entire file, destroying vector data
1492
+ // Now we READ the existing node, UPDATE only connections/level, then WRITE back the complete node
1493
+ // CRITICAL FIX (v4.10.1): Optimistic locking with generation numbers to prevent race conditions
1494
+ // Uses GCS generation preconditions - retries with exponential backoff on conflicts
1495
+ // Prevents data corruption when multiple entities connect to same neighbor simultaneously
1496
+ const shard = getShardIdFromUuid(nounId);
1497
+ const key = `entities/nouns/hnsw/${shard}/${nounId}.json`;
1498
+ const file = this.bucket.file(key);
1499
+ const maxRetries = 5;
1500
+ for (let attempt = 0; attempt < maxRetries; attempt++) {
1497
1501
  try {
1498
- // Read existing node data
1499
- const [existingData] = await file.download();
1500
- const existingNode = JSON.parse(existingData.toString());
1502
+ // Get current generation and data
1503
+ let currentGeneration;
1504
+ let existingNode = {};
1505
+ try {
1506
+ // Download file and get metadata in parallel
1507
+ const [data, metadata] = await Promise.all([
1508
+ file.download(),
1509
+ file.getMetadata()
1510
+ ]);
1511
+ existingNode = JSON.parse(data[0].toString('utf-8'));
1512
+ currentGeneration = metadata[0].generation?.toString();
1513
+ }
1514
+ catch (error) {
1515
+ // File doesn't exist yet - will create new
1516
+ if (error.code !== 404) {
1517
+ throw error;
1518
+ }
1519
+ }
1501
1520
  // Preserve id and vector, update only HNSW graph metadata
1502
1521
  const updatedNode = {
1503
1522
  ...existingNode, // Preserve all existing fields (id, vector, etc.)
1504
1523
  level: hnswData.level,
1505
1524
  connections: hnswData.connections
1506
1525
  };
1507
- // Write back the COMPLETE node with updated HNSW data
1526
+ // ATOMIC WRITE: Use generation precondition
1527
+ // If currentGeneration exists, only write if generation matches (no concurrent modification)
1528
+ // If no generation, only write if file doesn't exist (ifGenerationMatch: 0)
1508
1529
  await file.save(JSON.stringify(updatedNode, null, 2), {
1509
1530
  contentType: 'application/json',
1510
- resumable: false
1531
+ resumable: false,
1532
+ preconditionOpts: currentGeneration
1533
+ ? { ifGenerationMatch: currentGeneration }
1534
+ : { ifGenerationMatch: '0' } // Only create if doesn't exist
1511
1535
  });
1536
+ // Success! Exit retry loop
1537
+ return;
1512
1538
  }
1513
1539
  catch (error) {
1514
- // If node doesn't exist yet, create it with just HNSW data
1515
- // This should only happen during initial node creation
1516
- if (error.code === 404) {
1517
- await file.save(JSON.stringify(hnswData, null, 2), {
1518
- contentType: 'application/json',
1519
- resumable: false
1520
- });
1521
- }
1522
- else {
1523
- throw error;
1540
+ // Precondition failed (412) - concurrent modification detected
1541
+ if (error.code === 412) {
1542
+ if (attempt === maxRetries - 1) {
1543
+ this.logger.error(`Max retries (${maxRetries}) exceeded for ${nounId} - concurrent modification conflict`);
1544
+ throw new Error(`Failed to save HNSW data for ${nounId}: max retries exceeded due to concurrent modifications`);
1545
+ }
1546
+ // Exponential backoff: 50ms, 100ms, 200ms, 400ms, 800ms
1547
+ const backoffMs = 50 * Math.pow(2, attempt);
1548
+ await new Promise(resolve => setTimeout(resolve, backoffMs));
1549
+ continue;
1524
1550
  }
1551
+ // Other error - rethrow
1552
+ this.logger.error(`Failed to save HNSW data for ${nounId}:`, error);
1553
+ throw new Error(`Failed to save HNSW data for ${nounId}: ${error}`);
1525
1554
  }
1526
1555
  }
1527
- catch (error) {
1528
- this.logger.error(`Failed to save HNSW data for ${nounId}:`, error);
1529
- throw new Error(`Failed to save HNSW data for ${nounId}: ${error}`);
1530
- }
1531
1556
  }
1532
1557
  /**
1533
1558
  * Get HNSW graph data for a noun
@@ -1553,20 +1578,54 @@ export class GcsStorage extends BaseStorage {
1553
1578
  /**
1554
1579
  * Save HNSW system data (entry point, max level)
1555
1580
  * Storage path: system/hnsw-system.json
1581
+ *
1582
+ * CRITICAL FIX (v4.10.1): Optimistic locking with generation numbers to prevent race conditions
1556
1583
  */
1557
1584
  async saveHNSWSystem(systemData) {
1558
1585
  await this.ensureInitialized();
1559
- try {
1560
- const key = `${this.systemPrefix}hnsw-system.json`;
1561
- const file = this.bucket.file(key);
1562
- await file.save(JSON.stringify(systemData, null, 2), {
1563
- contentType: 'application/json',
1564
- resumable: false
1565
- });
1566
- }
1567
- catch (error) {
1568
- this.logger.error('Failed to save HNSW system data:', error);
1569
- throw new Error(`Failed to save HNSW system data: ${error}`);
1586
+ const key = `${this.systemPrefix}hnsw-system.json`;
1587
+ const file = this.bucket.file(key);
1588
+ const maxRetries = 5;
1589
+ for (let attempt = 0; attempt < maxRetries; attempt++) {
1590
+ try {
1591
+ // Get current generation
1592
+ let currentGeneration;
1593
+ try {
1594
+ const [metadata] = await file.getMetadata();
1595
+ currentGeneration = metadata.generation?.toString();
1596
+ }
1597
+ catch (error) {
1598
+ // File doesn't exist yet
1599
+ if (error.code !== 404) {
1600
+ throw error;
1601
+ }
1602
+ }
1603
+ // ATOMIC WRITE: Use generation precondition
1604
+ await file.save(JSON.stringify(systemData, null, 2), {
1605
+ contentType: 'application/json',
1606
+ resumable: false,
1607
+ preconditionOpts: currentGeneration
1608
+ ? { ifGenerationMatch: currentGeneration }
1609
+ : { ifGenerationMatch: '0' }
1610
+ });
1611
+ // Success!
1612
+ return;
1613
+ }
1614
+ catch (error) {
1615
+ // Precondition failed - concurrent modification
1616
+ if (error.code === 412) {
1617
+ if (attempt === maxRetries - 1) {
1618
+ this.logger.error(`Max retries (${maxRetries}) exceeded for HNSW system data`);
1619
+ throw new Error('Failed to save HNSW system data: max retries exceeded due to concurrent modifications');
1620
+ }
1621
+ const backoffMs = 50 * Math.pow(2, attempt);
1622
+ await new Promise(resolve => setTimeout(resolve, backoffMs));
1623
+ continue;
1624
+ }
1625
+ // Other error - rethrow
1626
+ this.logger.error('Failed to save HNSW system data:', error);
1627
+ throw new Error(`Failed to save HNSW system data: ${error}`);
1628
+ }
1570
1629
  }
1571
1630
  }
1572
1631
  /**
@@ -192,8 +192,13 @@ export declare class MemoryStorage extends BaseStorage {
192
192
  * Get vector for a noun
193
193
  */
194
194
  getNounVector(id: string): Promise<number[] | null>;
195
+ private hnswLocks;
195
196
  /**
196
197
  * Save HNSW graph data for a noun
198
+ *
199
+ * CRITICAL FIX (v4.10.1): Mutex locking to prevent race conditions during concurrent HNSW updates
200
+ * Even in-memory operations can race due to async/await interleaving
201
+ * Prevents data corruption when multiple entities connect to same neighbor simultaneously
197
202
  */
198
203
  saveHNSWData(nounId: string, hnswData: {
199
204
  level: number;
@@ -208,6 +213,8 @@ export declare class MemoryStorage extends BaseStorage {
208
213
  } | null>;
209
214
  /**
210
215
  * Save HNSW system data (entry point, max level)
216
+ *
217
+ * CRITICAL FIX (v4.10.1): Mutex locking to prevent race conditions
211
218
  */
212
219
  saveHNSWSystem(systemData: {
213
220
  entryPointId: string | null;
@@ -28,6 +28,9 @@ export class MemoryStorage extends BaseStorage {
28
28
  this.statistics = null;
29
29
  // Unified object store for primitive operations (replaces metadata, nounMetadata, verbMetadata)
30
30
  this.objectStore = new Map();
31
+ // CRITICAL FIX (v4.10.1): Mutex locks for HNSW concurrency control
32
+ // Even in-memory operations need serialization to prevent async race conditions
33
+ this.hnswLocks = new Map();
31
34
  }
32
35
  /**
33
36
  * Initialize the storage adapter
@@ -668,13 +671,42 @@ export class MemoryStorage extends BaseStorage {
668
671
  }
669
672
  /**
670
673
  * Save HNSW graph data for a noun
674
+ *
675
+ * CRITICAL FIX (v4.10.1): Mutex locking to prevent race conditions during concurrent HNSW updates
676
+ * Even in-memory operations can race due to async/await interleaving
677
+ * Prevents data corruption when multiple entities connect to same neighbor simultaneously
671
678
  */
672
679
  async saveHNSWData(nounId, hnswData) {
673
- // For memory storage, HNSW data is already in the noun object
674
- // This method is a no-op since saveNoun already stores the full graph
675
- // But we store it separately for consistency with other adapters
676
680
  const path = `hnsw/${nounId}.json`;
677
- await this.writeObjectToPath(path, hnswData);
681
+ // MUTEX LOCK: Wait for any pending operations on this entity
682
+ while (this.hnswLocks.has(path)) {
683
+ await this.hnswLocks.get(path);
684
+ }
685
+ // Acquire lock by creating a promise that we'll resolve when done
686
+ let releaseLock;
687
+ const lockPromise = new Promise(resolve => { releaseLock = resolve; });
688
+ this.hnswLocks.set(path, lockPromise);
689
+ try {
690
+ // Read existing data (if exists)
691
+ let existingNode = {};
692
+ const existing = this.objectStore.get(path);
693
+ if (existing) {
694
+ existingNode = existing;
695
+ }
696
+ // Preserve id and vector, update only HNSW graph metadata
697
+ const updatedNode = {
698
+ ...existingNode, // Preserve all existing fields
699
+ level: hnswData.level,
700
+ connections: hnswData.connections
701
+ };
702
+ // Write atomically (in-memory, but now serialized by mutex)
703
+ this.objectStore.set(path, JSON.parse(JSON.stringify(updatedNode)));
704
+ }
705
+ finally {
706
+ // Release lock
707
+ this.hnswLocks.delete(path);
708
+ releaseLock();
709
+ }
678
710
  }
679
711
  /**
680
712
  * Get HNSW graph data for a noun
@@ -686,10 +718,28 @@ export class MemoryStorage extends BaseStorage {
686
718
  }
687
719
  /**
688
720
  * Save HNSW system data (entry point, max level)
721
+ *
722
+ * CRITICAL FIX (v4.10.1): Mutex locking to prevent race conditions
689
723
  */
690
724
  async saveHNSWSystem(systemData) {
691
725
  const path = 'system/hnsw-system.json';
692
- await this.writeObjectToPath(path, systemData);
726
+ // MUTEX LOCK: Wait for any pending operations
727
+ while (this.hnswLocks.has(path)) {
728
+ await this.hnswLocks.get(path);
729
+ }
730
+ // Acquire lock
731
+ let releaseLock;
732
+ const lockPromise = new Promise(resolve => { releaseLock = resolve; });
733
+ this.hnswLocks.set(path, lockPromise);
734
+ try {
735
+ // Write atomically (serialized by mutex)
736
+ this.objectStore.set(path, JSON.parse(JSON.stringify(systemData)));
737
+ }
738
+ finally {
739
+ // Release lock
740
+ this.hnswLocks.delete(path);
741
+ releaseLock();
742
+ }
693
743
  }
694
744
  /**
695
745
  * Get HNSW system data
@@ -306,9 +306,14 @@ export declare class OPFSStorage extends BaseStorage {
306
306
  * Get a noun's vector for HNSW rebuild
307
307
  */
308
308
  getNounVector(id: string): Promise<number[] | null>;
309
+ private hnswLocks;
309
310
  /**
310
311
  * Save HNSW graph data for a noun
311
312
  * Storage path: nouns/hnsw/{shard}/{id}.json
313
+ *
314
+ * CRITICAL FIX (v4.10.1): Mutex locking to prevent race conditions during concurrent HNSW updates
315
+ * Browser is single-threaded but async operations can interleave - mutex prevents this
316
+ * Prevents data corruption when multiple entities connect to same neighbor simultaneously
312
317
  */
313
318
  saveHNSWData(nounId: string, hnswData: {
314
319
  level: number;
@@ -325,6 +330,8 @@ export declare class OPFSStorage extends BaseStorage {
325
330
  /**
326
331
  * Save HNSW system data (entry point, max level)
327
332
  * Storage path: index/hnsw-system.json
333
+ *
334
+ * CRITICAL FIX (v4.10.1): Mutex locking to prevent race conditions
328
335
  */
329
336
  saveHNSWSystem(systemData: {
330
337
  entryPointId: string | null;
@@ -42,6 +42,9 @@ export class OPFSStorage extends BaseStorage {
42
42
  this.quotaCriticalThreshold = 0.95; // Critical at 95% usage
43
43
  this.lastQuotaCheck = 0;
44
44
  this.quotaCheckInterval = 60000; // Check every 60 seconds
45
+ // CRITICAL FIX (v4.10.1): Mutex locks for HNSW concurrency control
46
+ // Browser environments are single-threaded but async operations can still interleave
47
+ this.hnswLocks = new Map();
45
48
  // Check if OPFS is available
46
49
  this.isAvailable =
47
50
  typeof navigator !== 'undefined' &&
@@ -1708,9 +1711,22 @@ export class OPFSStorage extends BaseStorage {
1708
1711
  /**
1709
1712
  * Save HNSW graph data for a noun
1710
1713
  * Storage path: nouns/hnsw/{shard}/{id}.json
1714
+ *
1715
+ * CRITICAL FIX (v4.10.1): Mutex locking to prevent race conditions during concurrent HNSW updates
1716
+ * Browser is single-threaded but async operations can interleave - mutex prevents this
1717
+ * Prevents data corruption when multiple entities connect to same neighbor simultaneously
1711
1718
  */
1712
1719
  async saveHNSWData(nounId, hnswData) {
1713
1720
  await this.ensureInitialized();
1721
+ const lockKey = `hnsw/${nounId}`;
1722
+ // MUTEX LOCK: Wait for any pending operations on this entity
1723
+ while (this.hnswLocks.has(lockKey)) {
1724
+ await this.hnswLocks.get(lockKey);
1725
+ }
1726
+ // Acquire lock
1727
+ let releaseLock;
1728
+ const lockPromise = new Promise(resolve => { releaseLock = resolve; });
1729
+ this.hnswLocks.set(lockKey, lockPromise);
1714
1730
  try {
1715
1731
  // CRITICAL FIX (v4.7.3): Must preserve existing node data (id, vector) when updating HNSW metadata
1716
1732
  const hnswDir = await this.nounsDir.getDirectoryHandle('hnsw', { create: true });
@@ -1743,6 +1759,11 @@ export class OPFSStorage extends BaseStorage {
1743
1759
  console.error(`Failed to save HNSW data for ${nounId}:`, error);
1744
1760
  throw new Error(`Failed to save HNSW data for ${nounId}: ${error}`);
1745
1761
  }
1762
+ finally {
1763
+ // Release lock
1764
+ this.hnswLocks.delete(lockKey);
1765
+ releaseLock();
1766
+ }
1746
1767
  }
1747
1768
  /**
1748
1769
  * Get HNSW graph data for a noun
@@ -1774,9 +1795,20 @@ export class OPFSStorage extends BaseStorage {
1774
1795
  /**
1775
1796
  * Save HNSW system data (entry point, max level)
1776
1797
  * Storage path: index/hnsw-system.json
1798
+ *
1799
+ * CRITICAL FIX (v4.10.1): Mutex locking to prevent race conditions
1777
1800
  */
1778
1801
  async saveHNSWSystem(systemData) {
1779
1802
  await this.ensureInitialized();
1803
+ const lockKey = 'system/hnsw-system';
1804
+ // MUTEX LOCK: Wait for any pending operations
1805
+ while (this.hnswLocks.has(lockKey)) {
1806
+ await this.hnswLocks.get(lockKey);
1807
+ }
1808
+ // Acquire lock
1809
+ let releaseLock;
1810
+ const lockPromise = new Promise(resolve => { releaseLock = resolve; });
1811
+ this.hnswLocks.set(lockKey, lockPromise);
1780
1812
  try {
1781
1813
  // Create or get the file in the index directory
1782
1814
  const fileHandle = await this.indexDir.getFileHandle('hnsw-system.json', { create: true });
@@ -1789,6 +1821,11 @@ export class OPFSStorage extends BaseStorage {
1789
1821
  console.error('Failed to save HNSW system data:', error);
1790
1822
  throw new Error(`Failed to save HNSW system data: ${error}`);
1791
1823
  }
1824
+ finally {
1825
+ // Release lock
1826
+ this.hnswLocks.delete(lockKey);
1827
+ releaseLock();
1828
+ }
1792
1829
  }
1793
1830
  /**
1794
1831
  * Get HNSW system data (entry point, max level)