@soulcraft/brainy 3.15.0 → 3.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/storage/adapters/fileSystemStorage.d.ts +15 -0
- package/dist/storage/adapters/fileSystemStorage.js +374 -28
- package/dist/vfs/VFSHealthCheck.d.ts +78 -0
- package/dist/vfs/VFSHealthCheck.js +299 -0
- package/dist/vfs/VirtualFileSystem.js +60 -4
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,24 @@
|
|
|
2
2
|
|
|
3
3
|
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
|
|
4
4
|
|
|
5
|
+
## [3.17.0](https://github.com/soulcraftlabs/brainy/compare/v3.16.0...v3.17.0) (2025-09-27)
|
|
6
|
+
|
|
7
|
+
## [3.15.0](https://github.com/soulcraftlabs/brainy/compare/v3.14.2...v3.15.0) (2025-09-26)
|
|
8
|
+
|
|
9
|
+
### Bug Fixes
|
|
10
|
+
|
|
11
|
+
* **vfs**: Ensure Contains relationships are maintained when updating files
|
|
12
|
+
* **vfs**: Fix root directory metadata handling to prevent "Not a directory" errors
|
|
13
|
+
* **vfs**: Add entity metadata compatibility layer for proper VFS operations
|
|
14
|
+
* **vfs**: Fix resolvePath() to return entity IDs instead of path strings
|
|
15
|
+
* **vfs**: Improve error handling in ensureDirectory() method
|
|
16
|
+
|
|
17
|
+
### Features
|
|
18
|
+
|
|
19
|
+
* **vfs**: Add comprehensive tests for Contains relationship integrity
|
|
20
|
+
* **vfs**: Ensure all VFS entities use standard Brainy NounType and VerbType enums
|
|
21
|
+
* **vfs**: Add metadata validation and repair for existing entities
|
|
22
|
+
|
|
5
23
|
## [3.0.1](https://github.com/soulcraftlabs/brainy/compare/v2.14.3...v3.0.1) (2025-09-15)
|
|
6
24
|
|
|
7
25
|
**Brainy 3.0 Production Release** - World's first Triple Intelligence™ database unifying vector, graph, and document search
|
|
@@ -288,6 +288,21 @@ export declare class FileSystemStorage extends BaseStorage {
|
|
|
288
288
|
* Consistent across all entity types
|
|
289
289
|
*/
|
|
290
290
|
private getShardedPath;
|
|
291
|
+
/**
|
|
292
|
+
* Get all JSON files from a sharded directory structure
|
|
293
|
+
* Properly traverses sharded subdirectories based on current sharding depth
|
|
294
|
+
*/
|
|
295
|
+
private getAllShardedFiles;
|
|
296
|
+
/**
|
|
297
|
+
* Production-scale streaming pagination for very large datasets
|
|
298
|
+
* Avoids loading all filenames into memory
|
|
299
|
+
*/
|
|
300
|
+
private getVerbsWithPaginationStreaming;
|
|
301
|
+
/**
|
|
302
|
+
* Stream through sharded files without loading all names into memory
|
|
303
|
+
* Production-scale implementation for millions of files
|
|
304
|
+
*/
|
|
305
|
+
private streamShardedFiles;
|
|
291
306
|
/**
|
|
292
307
|
* Check if a file exists (handles both sharded and non-sharded)
|
|
293
308
|
*/
|
|
@@ -39,7 +39,7 @@ export class FileSystemStorage extends BaseStorage {
|
|
|
39
39
|
super();
|
|
40
40
|
// Intelligent sharding configuration
|
|
41
41
|
this.shardingDepth = 2; // 0=flat, 1=ab/, 2=ab/cd/
|
|
42
|
-
this.SHARDING_THRESHOLD =
|
|
42
|
+
this.SHARDING_THRESHOLD = 100; // Enable deep sharding at 100 files for optimal performance
|
|
43
43
|
this.useDualWrite = true; // Write to both locations during migration
|
|
44
44
|
this.activeLocks = new Set();
|
|
45
45
|
this.lockTimers = new Map(); // Track timers for cleanup
|
|
@@ -182,7 +182,8 @@ export class FileSystemStorage extends BaseStorage {
|
|
|
182
182
|
id: parsedNode.id,
|
|
183
183
|
vector: parsedNode.vector,
|
|
184
184
|
connections,
|
|
185
|
-
level: parsedNode.level || 0
|
|
185
|
+
level: parsedNode.level || 0,
|
|
186
|
+
metadata: parsedNode.metadata
|
|
186
187
|
};
|
|
187
188
|
}
|
|
188
189
|
catch (error) {
|
|
@@ -303,6 +304,8 @@ export class FileSystemStorage extends BaseStorage {
|
|
|
303
304
|
*/
|
|
304
305
|
async saveEdge(edge) {
|
|
305
306
|
await this.ensureInitialized();
|
|
307
|
+
// Check if this is a new edge to update counts
|
|
308
|
+
const isNew = !(await this.fileExists(this.getVerbPath(edge.id)));
|
|
306
309
|
// Convert connections Map to a serializable format
|
|
307
310
|
const serializableEdge = {
|
|
308
311
|
...edge,
|
|
@@ -311,6 +314,14 @@ export class FileSystemStorage extends BaseStorage {
|
|
|
311
314
|
const filePath = this.getVerbPath(edge.id);
|
|
312
315
|
await this.ensureDirectoryExists(path.dirname(filePath));
|
|
313
316
|
await fs.promises.writeFile(filePath, JSON.stringify(serializableEdge, null, 2));
|
|
317
|
+
// Update verb count for new edges (production-scale optimizations)
|
|
318
|
+
if (isNew) {
|
|
319
|
+
this.totalVerbCount++;
|
|
320
|
+
// Persist counts periodically (every 10 operations for efficiency)
|
|
321
|
+
if (this.totalVerbCount % 10 === 0) {
|
|
322
|
+
this.persistCounts(); // Async persist, don't await
|
|
323
|
+
}
|
|
324
|
+
}
|
|
314
325
|
}
|
|
315
326
|
/**
|
|
316
327
|
* Get an edge from storage
|
|
@@ -502,8 +513,22 @@ export class FileSystemStorage extends BaseStorage {
|
|
|
502
513
|
*/
|
|
503
514
|
async saveVerbMetadata_internal(id, metadata) {
|
|
504
515
|
await this.ensureInitialized();
|
|
516
|
+
console.log(`[DEBUG] Saving verb metadata for ${id} to: ${this.verbMetadataDir}`);
|
|
505
517
|
const filePath = path.join(this.verbMetadataDir, `${id}.json`);
|
|
506
|
-
|
|
518
|
+
console.log(`[DEBUG] Full file path: ${filePath}`);
|
|
519
|
+
try {
|
|
520
|
+
await this.ensureDirectoryExists(path.dirname(filePath));
|
|
521
|
+
console.log(`[DEBUG] Directory ensured: ${path.dirname(filePath)}`);
|
|
522
|
+
await fs.promises.writeFile(filePath, JSON.stringify(metadata, null, 2));
|
|
523
|
+
console.log(`[DEBUG] File written successfully: ${filePath}`);
|
|
524
|
+
// Verify the file was actually written
|
|
525
|
+
const exists = await fs.promises.access(filePath).then(() => true).catch(() => false);
|
|
526
|
+
console.log(`[DEBUG] File exists after write: ${exists}`);
|
|
527
|
+
}
|
|
528
|
+
catch (error) {
|
|
529
|
+
console.error(`[DEBUG] Error saving verb metadata:`, error);
|
|
530
|
+
throw error;
|
|
531
|
+
}
|
|
507
532
|
}
|
|
508
533
|
/**
|
|
509
534
|
* Get verb metadata from storage
|
|
@@ -531,9 +556,8 @@ export class FileSystemStorage extends BaseStorage {
|
|
|
531
556
|
const limit = options.limit || 100;
|
|
532
557
|
const cursor = options.cursor;
|
|
533
558
|
try {
|
|
534
|
-
// Get all noun files
|
|
535
|
-
const
|
|
536
|
-
const nounFiles = files.filter((f) => f.endsWith('.json'));
|
|
559
|
+
// Get all noun files (handles sharding properly)
|
|
560
|
+
const nounFiles = await this.getAllShardedFiles(this.nounsDir);
|
|
537
561
|
// Sort for consistent pagination
|
|
538
562
|
nounFiles.sort();
|
|
539
563
|
// Find starting position - prioritize offset for O(1) operation
|
|
@@ -562,7 +586,8 @@ export class FileSystemStorage extends BaseStorage {
|
|
|
562
586
|
// Second pass: load the current page
|
|
563
587
|
for (const file of pageFiles) {
|
|
564
588
|
try {
|
|
565
|
-
const
|
|
589
|
+
const id = file.replace('.json', '');
|
|
590
|
+
const data = await fs.promises.readFile(this.getNodePath(id), 'utf-8');
|
|
566
591
|
const noun = JSON.parse(data);
|
|
567
592
|
// Apply filter if provided
|
|
568
593
|
if (options.filter) {
|
|
@@ -872,29 +897,30 @@ export class FileSystemStorage extends BaseStorage {
|
|
|
872
897
|
const limit = options.limit || 100;
|
|
873
898
|
const startIndex = options.cursor ? parseInt(options.cursor, 10) : 0;
|
|
874
899
|
try {
|
|
875
|
-
//
|
|
876
|
-
|
|
877
|
-
//
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
// Calculate pagination
|
|
883
|
-
const totalCount = verbFiles.length;
|
|
900
|
+
// Production-scale optimization: Use persisted count for total instead of scanning
|
|
901
|
+
const totalCount = this.totalVerbCount || 0;
|
|
902
|
+
// For large datasets, warn about performance
|
|
903
|
+
if (totalCount > 1000000) {
|
|
904
|
+
console.warn(`Very large verb dataset detected (${totalCount} verbs). Performance may be degraded. Consider database storage for optimal performance.`);
|
|
905
|
+
}
|
|
906
|
+
// Calculate pagination bounds
|
|
884
907
|
const endIndex = Math.min(startIndex + limit, totalCount);
|
|
885
908
|
const hasMore = endIndex < totalCount;
|
|
886
|
-
//
|
|
887
|
-
if (totalCount >
|
|
888
|
-
|
|
909
|
+
// For production-scale datasets, use streaming approach
|
|
910
|
+
if (totalCount > 50000) {
|
|
911
|
+
return await this.getVerbsWithPaginationStreaming(options, startIndex, limit);
|
|
889
912
|
}
|
|
913
|
+
// For smaller datasets, use the current approach (with optimizations)
|
|
914
|
+
const verbFiles = await this.getAllShardedFiles(this.verbsDir);
|
|
915
|
+
verbFiles.sort(); // This is still acceptable for <50k files
|
|
890
916
|
// Load the requested page of verbs
|
|
891
917
|
const verbs = [];
|
|
892
918
|
for (let i = startIndex; i < endIndex; i++) {
|
|
893
919
|
const file = verbFiles[i];
|
|
894
920
|
const id = file.replace('.json', '');
|
|
895
921
|
try {
|
|
896
|
-
// Read the verb data (HNSWVerb stored as edge)
|
|
897
|
-
const filePath =
|
|
922
|
+
// Read the verb data (HNSWVerb stored as edge) - use sharded path
|
|
923
|
+
const filePath = this.getVerbPath(id);
|
|
898
924
|
const data = await fs.promises.readFile(filePath, 'utf-8');
|
|
899
925
|
const edge = JSON.parse(data);
|
|
900
926
|
// Get metadata which contains the actual verb information
|
|
@@ -1336,20 +1362,19 @@ export class FileSystemStorage extends BaseStorage {
|
|
|
1336
1362
|
*/
|
|
1337
1363
|
async initializeCountsFromDisk() {
|
|
1338
1364
|
try {
|
|
1339
|
-
// Count nouns
|
|
1340
|
-
const
|
|
1341
|
-
const validNounFiles = nounFiles.filter((f) => f.endsWith('.json'));
|
|
1365
|
+
// Count nouns (handles sharding properly)
|
|
1366
|
+
const validNounFiles = await this.getAllShardedFiles(this.nounsDir);
|
|
1342
1367
|
this.totalNounCount = validNounFiles.length;
|
|
1343
|
-
// Count verbs
|
|
1344
|
-
const
|
|
1345
|
-
const validVerbFiles = verbFiles.filter((f) => f.endsWith('.json'));
|
|
1368
|
+
// Count verbs (handles sharding properly)
|
|
1369
|
+
const validVerbFiles = await this.getAllShardedFiles(this.verbsDir);
|
|
1346
1370
|
this.totalVerbCount = validVerbFiles.length;
|
|
1347
1371
|
// Sample some files to get type distribution (don't read all)
|
|
1348
1372
|
const sampleSize = Math.min(100, validNounFiles.length);
|
|
1349
1373
|
for (let i = 0; i < sampleSize; i++) {
|
|
1350
1374
|
try {
|
|
1351
1375
|
const file = validNounFiles[i];
|
|
1352
|
-
const
|
|
1376
|
+
const id = file.replace('.json', '');
|
|
1377
|
+
const data = await fs.promises.readFile(this.getNodePath(id), 'utf-8');
|
|
1353
1378
|
const noun = JSON.parse(data);
|
|
1354
1379
|
const type = noun.metadata?.type || noun.metadata?.nounType || 'default';
|
|
1355
1380
|
this.entityCounts.set(type, (this.entityCounts.get(type) || 0) + 1);
|
|
@@ -1449,6 +1474,327 @@ export class FileSystemStorage extends BaseStorage {
|
|
|
1449
1474
|
return path.join(baseDir, shard1Deep, shard2Deep, `${id}.json`);
|
|
1450
1475
|
}
|
|
1451
1476
|
}
|
|
1477
|
+
/**
|
|
1478
|
+
* Get all JSON files from a sharded directory structure
|
|
1479
|
+
* Properly traverses sharded subdirectories based on current sharding depth
|
|
1480
|
+
*/
|
|
1481
|
+
async getAllShardedFiles(baseDir) {
|
|
1482
|
+
const allFiles = [];
|
|
1483
|
+
const depth = this.cachedShardingDepth ?? this.getOptimalShardingDepth();
|
|
1484
|
+
try {
|
|
1485
|
+
switch (depth) {
|
|
1486
|
+
case 0:
|
|
1487
|
+
// Flat structure: read directly from baseDir
|
|
1488
|
+
const flatFiles = await fs.promises.readdir(baseDir);
|
|
1489
|
+
for (const file of flatFiles) {
|
|
1490
|
+
if (file.endsWith('.json')) {
|
|
1491
|
+
allFiles.push(file);
|
|
1492
|
+
}
|
|
1493
|
+
}
|
|
1494
|
+
break;
|
|
1495
|
+
case 1:
|
|
1496
|
+
// Single-level sharding: baseDir/ab/
|
|
1497
|
+
try {
|
|
1498
|
+
const shardDirs = await fs.promises.readdir(baseDir);
|
|
1499
|
+
for (const shardDir of shardDirs) {
|
|
1500
|
+
const shardPath = path.join(baseDir, shardDir);
|
|
1501
|
+
try {
|
|
1502
|
+
const stat = await fs.promises.stat(shardPath);
|
|
1503
|
+
if (stat.isDirectory()) {
|
|
1504
|
+
const shardFiles = await fs.promises.readdir(shardPath);
|
|
1505
|
+
for (const file of shardFiles) {
|
|
1506
|
+
if (file.endsWith('.json')) {
|
|
1507
|
+
allFiles.push(file);
|
|
1508
|
+
}
|
|
1509
|
+
}
|
|
1510
|
+
}
|
|
1511
|
+
}
|
|
1512
|
+
catch (shardError) {
|
|
1513
|
+
// Skip inaccessible shard directories
|
|
1514
|
+
continue;
|
|
1515
|
+
}
|
|
1516
|
+
}
|
|
1517
|
+
}
|
|
1518
|
+
catch (baseError) {
|
|
1519
|
+
// If baseDir doesn't exist, return empty array
|
|
1520
|
+
if (baseError.code === 'ENOENT') {
|
|
1521
|
+
return [];
|
|
1522
|
+
}
|
|
1523
|
+
throw baseError;
|
|
1524
|
+
}
|
|
1525
|
+
break;
|
|
1526
|
+
case 2:
|
|
1527
|
+
default:
|
|
1528
|
+
// Deep sharding: baseDir/ab/cd/
|
|
1529
|
+
try {
|
|
1530
|
+
const level1Dirs = await fs.promises.readdir(baseDir);
|
|
1531
|
+
for (const level1Dir of level1Dirs) {
|
|
1532
|
+
const level1Path = path.join(baseDir, level1Dir);
|
|
1533
|
+
try {
|
|
1534
|
+
const level1Stat = await fs.promises.stat(level1Path);
|
|
1535
|
+
if (level1Stat.isDirectory()) {
|
|
1536
|
+
const level2Dirs = await fs.promises.readdir(level1Path);
|
|
1537
|
+
for (const level2Dir of level2Dirs) {
|
|
1538
|
+
const level2Path = path.join(level1Path, level2Dir);
|
|
1539
|
+
try {
|
|
1540
|
+
const level2Stat = await fs.promises.stat(level2Path);
|
|
1541
|
+
if (level2Stat.isDirectory()) {
|
|
1542
|
+
const shardFiles = await fs.promises.readdir(level2Path);
|
|
1543
|
+
for (const file of shardFiles) {
|
|
1544
|
+
if (file.endsWith('.json')) {
|
|
1545
|
+
allFiles.push(file);
|
|
1546
|
+
}
|
|
1547
|
+
}
|
|
1548
|
+
}
|
|
1549
|
+
}
|
|
1550
|
+
catch (level2Error) {
|
|
1551
|
+
// Skip inaccessible level2 directories
|
|
1552
|
+
continue;
|
|
1553
|
+
}
|
|
1554
|
+
}
|
|
1555
|
+
}
|
|
1556
|
+
}
|
|
1557
|
+
catch (level1Error) {
|
|
1558
|
+
// Skip inaccessible level1 directories
|
|
1559
|
+
continue;
|
|
1560
|
+
}
|
|
1561
|
+
}
|
|
1562
|
+
}
|
|
1563
|
+
catch (baseError) {
|
|
1564
|
+
// If baseDir doesn't exist, return empty array
|
|
1565
|
+
if (baseError.code === 'ENOENT') {
|
|
1566
|
+
return [];
|
|
1567
|
+
}
|
|
1568
|
+
throw baseError;
|
|
1569
|
+
}
|
|
1570
|
+
break;
|
|
1571
|
+
}
|
|
1572
|
+
// Sort for consistent ordering
|
|
1573
|
+
allFiles.sort();
|
|
1574
|
+
return allFiles;
|
|
1575
|
+
}
|
|
1576
|
+
catch (error) {
|
|
1577
|
+
if (error.code === 'ENOENT') {
|
|
1578
|
+
// Directory doesn't exist yet
|
|
1579
|
+
return [];
|
|
1580
|
+
}
|
|
1581
|
+
throw error;
|
|
1582
|
+
}
|
|
1583
|
+
}
|
|
1584
|
+
/**
|
|
1585
|
+
* Production-scale streaming pagination for very large datasets
|
|
1586
|
+
* Avoids loading all filenames into memory
|
|
1587
|
+
*/
|
|
1588
|
+
async getVerbsWithPaginationStreaming(options, startIndex, limit) {
|
|
1589
|
+
const verbs = [];
|
|
1590
|
+
const totalCount = this.totalVerbCount || 0;
|
|
1591
|
+
let processedCount = 0;
|
|
1592
|
+
let skippedCount = 0;
|
|
1593
|
+
let resultCount = 0;
|
|
1594
|
+
const depth = this.cachedShardingDepth ?? this.getOptimalShardingDepth();
|
|
1595
|
+
try {
|
|
1596
|
+
// Stream through sharded directories efficiently
|
|
1597
|
+
const hasMore = await this.streamShardedFiles(this.verbsDir, depth, async (filename, filePath) => {
|
|
1598
|
+
// Skip files until we reach start index
|
|
1599
|
+
if (skippedCount < startIndex) {
|
|
1600
|
+
skippedCount++;
|
|
1601
|
+
return true; // continue
|
|
1602
|
+
}
|
|
1603
|
+
// Stop if we have enough results
|
|
1604
|
+
if (resultCount >= limit) {
|
|
1605
|
+
return false; // stop streaming
|
|
1606
|
+
}
|
|
1607
|
+
try {
|
|
1608
|
+
const id = filename.replace('.json', '');
|
|
1609
|
+
// Read verb data and metadata
|
|
1610
|
+
const data = await fs.promises.readFile(filePath, 'utf-8');
|
|
1611
|
+
const edge = JSON.parse(data);
|
|
1612
|
+
const metadata = await this.getVerbMetadata(id);
|
|
1613
|
+
if (!metadata) {
|
|
1614
|
+
processedCount++;
|
|
1615
|
+
return true; // continue, skip this verb
|
|
1616
|
+
}
|
|
1617
|
+
// Reconstruct GraphVerb
|
|
1618
|
+
const verb = {
|
|
1619
|
+
id: edge.id,
|
|
1620
|
+
vector: edge.vector,
|
|
1621
|
+
connections: edge.connections || new Map(),
|
|
1622
|
+
sourceId: metadata.sourceId || metadata.source,
|
|
1623
|
+
targetId: metadata.targetId || metadata.target,
|
|
1624
|
+
source: metadata.source || metadata.sourceId,
|
|
1625
|
+
target: metadata.target || metadata.targetId,
|
|
1626
|
+
verb: metadata.verb || metadata.type,
|
|
1627
|
+
type: metadata.type || metadata.verb,
|
|
1628
|
+
weight: metadata.weight,
|
|
1629
|
+
metadata: metadata.metadata || metadata,
|
|
1630
|
+
data: metadata.data,
|
|
1631
|
+
createdAt: metadata.createdAt,
|
|
1632
|
+
updatedAt: metadata.updatedAt,
|
|
1633
|
+
createdBy: metadata.createdBy,
|
|
1634
|
+
embedding: metadata.embedding || edge.vector
|
|
1635
|
+
};
|
|
1636
|
+
// Apply filters
|
|
1637
|
+
if (options.filter) {
|
|
1638
|
+
const filter = options.filter;
|
|
1639
|
+
if (filter.verbType) {
|
|
1640
|
+
const types = Array.isArray(filter.verbType) ? filter.verbType : [filter.verbType];
|
|
1641
|
+
const verbType = verb.type || verb.verb;
|
|
1642
|
+
if (verbType && !types.includes(verbType))
|
|
1643
|
+
return true; // continue
|
|
1644
|
+
}
|
|
1645
|
+
if (filter.sourceId) {
|
|
1646
|
+
const sources = Array.isArray(filter.sourceId) ? filter.sourceId : [filter.sourceId];
|
|
1647
|
+
const sourceId = verb.sourceId || verb.source;
|
|
1648
|
+
if (!sourceId || !sources.includes(sourceId))
|
|
1649
|
+
return true; // continue
|
|
1650
|
+
}
|
|
1651
|
+
if (filter.targetId) {
|
|
1652
|
+
const targets = Array.isArray(filter.targetId) ? filter.targetId : [filter.targetId];
|
|
1653
|
+
const targetId = verb.targetId || verb.target;
|
|
1654
|
+
if (!targetId || !targets.includes(targetId))
|
|
1655
|
+
return true; // continue
|
|
1656
|
+
}
|
|
1657
|
+
}
|
|
1658
|
+
verbs.push(verb);
|
|
1659
|
+
resultCount++;
|
|
1660
|
+
processedCount++;
|
|
1661
|
+
return true; // continue
|
|
1662
|
+
}
|
|
1663
|
+
catch (error) {
|
|
1664
|
+
console.warn(`Failed to read verb from ${filePath}:`, error);
|
|
1665
|
+
processedCount++;
|
|
1666
|
+
return true; // continue
|
|
1667
|
+
}
|
|
1668
|
+
});
|
|
1669
|
+
const finalHasMore = (startIndex + resultCount) < totalCount;
|
|
1670
|
+
return {
|
|
1671
|
+
items: verbs,
|
|
1672
|
+
totalCount,
|
|
1673
|
+
hasMore: finalHasMore,
|
|
1674
|
+
nextCursor: finalHasMore ? String(startIndex + resultCount) : undefined
|
|
1675
|
+
};
|
|
1676
|
+
}
|
|
1677
|
+
catch (error) {
|
|
1678
|
+
if (error.code === 'ENOENT') {
|
|
1679
|
+
return {
|
|
1680
|
+
items: [],
|
|
1681
|
+
totalCount: 0,
|
|
1682
|
+
hasMore: false
|
|
1683
|
+
};
|
|
1684
|
+
}
|
|
1685
|
+
throw error;
|
|
1686
|
+
}
|
|
1687
|
+
}
|
|
1688
|
+
/**
|
|
1689
|
+
* Stream through sharded files without loading all names into memory
|
|
1690
|
+
* Production-scale implementation for millions of files
|
|
1691
|
+
*/
|
|
1692
|
+
async streamShardedFiles(baseDir, depth, processor) {
|
|
1693
|
+
let hasMore = true;
|
|
1694
|
+
switch (depth) {
|
|
1695
|
+
case 0:
|
|
1696
|
+
// Flat structure
|
|
1697
|
+
try {
|
|
1698
|
+
const files = await fs.promises.readdir(baseDir);
|
|
1699
|
+
const sortedFiles = files.filter((f) => f.endsWith('.json')).sort();
|
|
1700
|
+
for (const file of sortedFiles) {
|
|
1701
|
+
const shouldContinue = await processor(file, path.join(baseDir, file));
|
|
1702
|
+
if (!shouldContinue) {
|
|
1703
|
+
hasMore = false;
|
|
1704
|
+
break;
|
|
1705
|
+
}
|
|
1706
|
+
}
|
|
1707
|
+
}
|
|
1708
|
+
catch (error) {
|
|
1709
|
+
if (error.code === 'ENOENT')
|
|
1710
|
+
hasMore = false;
|
|
1711
|
+
}
|
|
1712
|
+
break;
|
|
1713
|
+
case 1:
|
|
1714
|
+
// Single-level sharding: ab/
|
|
1715
|
+
try {
|
|
1716
|
+
const shardDirs = await fs.promises.readdir(baseDir);
|
|
1717
|
+
const sortedShardDirs = shardDirs.sort();
|
|
1718
|
+
for (const shardDir of sortedShardDirs) {
|
|
1719
|
+
const shardPath = path.join(baseDir, shardDir);
|
|
1720
|
+
try {
|
|
1721
|
+
const stat = await fs.promises.stat(shardPath);
|
|
1722
|
+
if (stat.isDirectory()) {
|
|
1723
|
+
const files = await fs.promises.readdir(shardPath);
|
|
1724
|
+
const sortedFiles = files.filter((f) => f.endsWith('.json')).sort();
|
|
1725
|
+
for (const file of sortedFiles) {
|
|
1726
|
+
const shouldContinue = await processor(file, path.join(shardPath, file));
|
|
1727
|
+
if (!shouldContinue) {
|
|
1728
|
+
hasMore = false;
|
|
1729
|
+
break;
|
|
1730
|
+
}
|
|
1731
|
+
}
|
|
1732
|
+
if (!hasMore)
|
|
1733
|
+
break;
|
|
1734
|
+
}
|
|
1735
|
+
}
|
|
1736
|
+
catch (shardError) {
|
|
1737
|
+
continue; // Skip inaccessible shard directories
|
|
1738
|
+
}
|
|
1739
|
+
}
|
|
1740
|
+
}
|
|
1741
|
+
catch (error) {
|
|
1742
|
+
if (error.code === 'ENOENT')
|
|
1743
|
+
hasMore = false;
|
|
1744
|
+
}
|
|
1745
|
+
break;
|
|
1746
|
+
case 2:
|
|
1747
|
+
default:
|
|
1748
|
+
// Deep sharding: ab/cd/
|
|
1749
|
+
try {
|
|
1750
|
+
const level1Dirs = await fs.promises.readdir(baseDir);
|
|
1751
|
+
const sortedLevel1Dirs = level1Dirs.sort();
|
|
1752
|
+
for (const level1Dir of sortedLevel1Dirs) {
|
|
1753
|
+
const level1Path = path.join(baseDir, level1Dir);
|
|
1754
|
+
try {
|
|
1755
|
+
const level1Stat = await fs.promises.stat(level1Path);
|
|
1756
|
+
if (level1Stat.isDirectory()) {
|
|
1757
|
+
const level2Dirs = await fs.promises.readdir(level1Path);
|
|
1758
|
+
const sortedLevel2Dirs = level2Dirs.sort();
|
|
1759
|
+
for (const level2Dir of sortedLevel2Dirs) {
|
|
1760
|
+
const level2Path = path.join(level1Path, level2Dir);
|
|
1761
|
+
try {
|
|
1762
|
+
const level2Stat = await fs.promises.stat(level2Path);
|
|
1763
|
+
if (level2Stat.isDirectory()) {
|
|
1764
|
+
const files = await fs.promises.readdir(level2Path);
|
|
1765
|
+
const sortedFiles = files.filter((f) => f.endsWith('.json')).sort();
|
|
1766
|
+
for (const file of sortedFiles) {
|
|
1767
|
+
const shouldContinue = await processor(file, path.join(level2Path, file));
|
|
1768
|
+
if (!shouldContinue) {
|
|
1769
|
+
hasMore = false;
|
|
1770
|
+
break;
|
|
1771
|
+
}
|
|
1772
|
+
}
|
|
1773
|
+
if (!hasMore)
|
|
1774
|
+
break;
|
|
1775
|
+
}
|
|
1776
|
+
}
|
|
1777
|
+
catch (level2Error) {
|
|
1778
|
+
continue; // Skip inaccessible level2 directories
|
|
1779
|
+
}
|
|
1780
|
+
}
|
|
1781
|
+
if (!hasMore)
|
|
1782
|
+
break;
|
|
1783
|
+
}
|
|
1784
|
+
}
|
|
1785
|
+
catch (level1Error) {
|
|
1786
|
+
continue; // Skip inaccessible level1 directories
|
|
1787
|
+
}
|
|
1788
|
+
}
|
|
1789
|
+
}
|
|
1790
|
+
catch (error) {
|
|
1791
|
+
if (error.code === 'ENOENT')
|
|
1792
|
+
hasMore = false;
|
|
1793
|
+
}
|
|
1794
|
+
break;
|
|
1795
|
+
}
|
|
1796
|
+
return hasMore;
|
|
1797
|
+
}
|
|
1452
1798
|
/**
|
|
1453
1799
|
* Check if a file exists (handles both sharded and non-sharded)
|
|
1454
1800
|
*/
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import { Brainy } from '../brainy.js';
|
|
2
|
+
import { VirtualFileSystem } from './VirtualFileSystem.js';
|
|
3
|
+
export interface VFSHealthReport {
|
|
4
|
+
healthy: boolean;
|
|
5
|
+
rootDirectory: {
|
|
6
|
+
exists: boolean;
|
|
7
|
+
isDirectory: boolean;
|
|
8
|
+
accessible: boolean;
|
|
9
|
+
};
|
|
10
|
+
containsRelationships: {
|
|
11
|
+
intact: boolean;
|
|
12
|
+
orphanedFiles: string[];
|
|
13
|
+
missingRelationships: number;
|
|
14
|
+
};
|
|
15
|
+
metadataIntegrity: {
|
|
16
|
+
valid: boolean;
|
|
17
|
+
malformedEntities: string[];
|
|
18
|
+
};
|
|
19
|
+
performance: {
|
|
20
|
+
readLatency: number;
|
|
21
|
+
writeLatency: number;
|
|
22
|
+
cacheHitRate: number;
|
|
23
|
+
};
|
|
24
|
+
recommendations: string[];
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* VFS Health Check and Recovery System
|
|
28
|
+
*
|
|
29
|
+
* Provides automated health checks, diagnostics, and recovery
|
|
30
|
+
* procedures for the Virtual File System.
|
|
31
|
+
*/
|
|
32
|
+
export declare class VFSHealthCheck {
|
|
33
|
+
private vfs;
|
|
34
|
+
private brain;
|
|
35
|
+
private logger;
|
|
36
|
+
constructor(vfs: VirtualFileSystem, brain: Brainy);
|
|
37
|
+
/**
|
|
38
|
+
* Perform comprehensive health check
|
|
39
|
+
*/
|
|
40
|
+
checkHealth(): Promise<VFSHealthReport>;
|
|
41
|
+
/**
|
|
42
|
+
* Check root directory health
|
|
43
|
+
*/
|
|
44
|
+
private checkRootDirectory;
|
|
45
|
+
/**
|
|
46
|
+
* Check Contains relationships integrity
|
|
47
|
+
*/
|
|
48
|
+
private checkContainsRelationships;
|
|
49
|
+
/**
|
|
50
|
+
* Check metadata integrity
|
|
51
|
+
*/
|
|
52
|
+
private checkMetadataIntegrity;
|
|
53
|
+
/**
|
|
54
|
+
* Check performance metrics
|
|
55
|
+
*/
|
|
56
|
+
private checkPerformance;
|
|
57
|
+
/**
|
|
58
|
+
* Generate recommendations based on health report
|
|
59
|
+
*/
|
|
60
|
+
private generateRecommendations;
|
|
61
|
+
/**
|
|
62
|
+
* Attempt to recover VFS to healthy state
|
|
63
|
+
*/
|
|
64
|
+
recover(): Promise<void>;
|
|
65
|
+
/**
|
|
66
|
+
* Recover root directory
|
|
67
|
+
*/
|
|
68
|
+
private recoverRootDirectory;
|
|
69
|
+
/**
|
|
70
|
+
* Repair orphaned files by creating missing Contains relationships
|
|
71
|
+
*/
|
|
72
|
+
private repairOrphanedFiles;
|
|
73
|
+
/**
|
|
74
|
+
* Fix malformed metadata
|
|
75
|
+
*/
|
|
76
|
+
private fixMalformedMetadata;
|
|
77
|
+
private getParentPath;
|
|
78
|
+
}
|
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
import { VFSError, VFSErrorCode } from './types.js';
|
|
2
|
+
import { Logger } from '../utils/logger.js';
|
|
3
|
+
/**
|
|
4
|
+
* VFS Health Check and Recovery System
|
|
5
|
+
*
|
|
6
|
+
* Provides automated health checks, diagnostics, and recovery
|
|
7
|
+
* procedures for the Virtual File System.
|
|
8
|
+
*/
|
|
9
|
+
export class VFSHealthCheck {
|
|
10
|
+
constructor(vfs, brain) {
|
|
11
|
+
this.vfs = vfs;
|
|
12
|
+
this.brain = brain;
|
|
13
|
+
this.logger = new Logger('VFSHealthCheck');
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Perform comprehensive health check
|
|
17
|
+
*/
|
|
18
|
+
async checkHealth() {
|
|
19
|
+
const report = {
|
|
20
|
+
healthy: true,
|
|
21
|
+
rootDirectory: {
|
|
22
|
+
exists: false,
|
|
23
|
+
isDirectory: false,
|
|
24
|
+
accessible: false
|
|
25
|
+
},
|
|
26
|
+
containsRelationships: {
|
|
27
|
+
intact: true,
|
|
28
|
+
orphanedFiles: [],
|
|
29
|
+
missingRelationships: 0
|
|
30
|
+
},
|
|
31
|
+
metadataIntegrity: {
|
|
32
|
+
valid: true,
|
|
33
|
+
malformedEntities: []
|
|
34
|
+
},
|
|
35
|
+
performance: {
|
|
36
|
+
readLatency: 0,
|
|
37
|
+
writeLatency: 0,
|
|
38
|
+
cacheHitRate: 0
|
|
39
|
+
},
|
|
40
|
+
recommendations: []
|
|
41
|
+
};
|
|
42
|
+
// Check root directory
|
|
43
|
+
await this.checkRootDirectory(report);
|
|
44
|
+
// Check Contains relationships
|
|
45
|
+
await this.checkContainsRelationships(report);
|
|
46
|
+
// Check metadata integrity
|
|
47
|
+
await this.checkMetadataIntegrity(report);
|
|
48
|
+
// Check performance metrics
|
|
49
|
+
await this.checkPerformance(report);
|
|
50
|
+
// Generate recommendations
|
|
51
|
+
this.generateRecommendations(report);
|
|
52
|
+
report.healthy =
|
|
53
|
+
report.rootDirectory.exists &&
|
|
54
|
+
report.rootDirectory.isDirectory &&
|
|
55
|
+
report.rootDirectory.accessible &&
|
|
56
|
+
report.containsRelationships.intact &&
|
|
57
|
+
report.metadataIntegrity.valid;
|
|
58
|
+
return report;
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Check root directory health
|
|
62
|
+
*/
|
|
63
|
+
async checkRootDirectory(report) {
|
|
64
|
+
try {
|
|
65
|
+
// Check if root exists
|
|
66
|
+
const rootId = await this.vfs.resolvePath('/');
|
|
67
|
+
report.rootDirectory.exists = !!rootId;
|
|
68
|
+
if (rootId) {
|
|
69
|
+
// Get root entity
|
|
70
|
+
const rootEntity = await this.brain.get(rootId);
|
|
71
|
+
// Check if it's a directory
|
|
72
|
+
report.rootDirectory.isDirectory =
|
|
73
|
+
rootEntity?.metadata?.vfsType === 'directory' ||
|
|
74
|
+
rootEntity?.vfsType === 'directory';
|
|
75
|
+
// Check if it's accessible
|
|
76
|
+
try {
|
|
77
|
+
await this.vfs.readdir('/');
|
|
78
|
+
report.rootDirectory.accessible = true;
|
|
79
|
+
}
|
|
80
|
+
catch {
|
|
81
|
+
report.rootDirectory.accessible = false;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
catch (error) {
|
|
86
|
+
this.logger.error('Root directory check failed:', error);
|
|
87
|
+
report.rootDirectory.exists = false;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
/**
|
|
91
|
+
* Check Contains relationships integrity
|
|
92
|
+
*/
|
|
93
|
+
async checkContainsRelationships(report) {
|
|
94
|
+
try {
|
|
95
|
+
// Get all file entities
|
|
96
|
+
const files = await this.brain.find({
|
|
97
|
+
metadata: { vfsType: 'file' },
|
|
98
|
+
limit: 1000
|
|
99
|
+
});
|
|
100
|
+
for (const file of files) {
|
|
101
|
+
// Check if file has a Contains relationship with its parent
|
|
102
|
+
const parentPath = this.getParentPath(file.metadata.path);
|
|
103
|
+
if (parentPath) {
|
|
104
|
+
try {
|
|
105
|
+
const parentId = await this.vfs.resolvePath(parentPath);
|
|
106
|
+
const relations = await this.brain.getRelations({
|
|
107
|
+
from: parentId,
|
|
108
|
+
to: file.id,
|
|
109
|
+
type: 'Contains'
|
|
110
|
+
});
|
|
111
|
+
if (relations.length === 0) {
|
|
112
|
+
report.containsRelationships.orphanedFiles.push(file.metadata.path);
|
|
113
|
+
report.containsRelationships.missingRelationships++;
|
|
114
|
+
report.containsRelationships.intact = false;
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
catch {
|
|
118
|
+
// Parent doesn't exist
|
|
119
|
+
report.containsRelationships.orphanedFiles.push(file.metadata.path);
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
catch (error) {
|
|
125
|
+
this.logger.error('Contains relationships check failed:', error);
|
|
126
|
+
report.containsRelationships.intact = false;
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
/**
|
|
130
|
+
* Check metadata integrity
|
|
131
|
+
*/
|
|
132
|
+
async checkMetadataIntegrity(report) {
|
|
133
|
+
try {
|
|
134
|
+
// Get all VFS entities
|
|
135
|
+
const entities = await this.brain.find({
|
|
136
|
+
metadata: { path: { $exists: true } },
|
|
137
|
+
limit: 1000
|
|
138
|
+
});
|
|
139
|
+
for (const entity of entities) {
|
|
140
|
+
// Check required metadata fields
|
|
141
|
+
if (!entity.metadata?.vfsType ||
|
|
142
|
+
!entity.metadata?.path ||
|
|
143
|
+
entity.metadata.vfsType !== 'file' && entity.metadata.vfsType !== 'directory') {
|
|
144
|
+
report.metadataIntegrity.malformedEntities.push(entity.id);
|
|
145
|
+
report.metadataIntegrity.valid = false;
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
catch (error) {
|
|
150
|
+
this.logger.error('Metadata integrity check failed:', error);
|
|
151
|
+
report.metadataIntegrity.valid = false;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
/**
|
|
155
|
+
* Check performance metrics
|
|
156
|
+
*/
|
|
157
|
+
async checkPerformance(report) {
|
|
158
|
+
try {
|
|
159
|
+
// Test read performance
|
|
160
|
+
const readStart = Date.now();
|
|
161
|
+
await this.vfs.readdir('/');
|
|
162
|
+
report.performance.readLatency = Date.now() - readStart;
|
|
163
|
+
// Test write performance
|
|
164
|
+
const writeStart = Date.now();
|
|
165
|
+
const testPath = '/.health-check-' + Date.now();
|
|
166
|
+
await this.vfs.writeFile(testPath, 'test');
|
|
167
|
+
report.performance.writeLatency = Date.now() - writeStart;
|
|
168
|
+
// Clean up test file
|
|
169
|
+
await this.vfs.unlink(testPath);
|
|
170
|
+
// Calculate cache hit rate (if available)
|
|
171
|
+
const stats = await this.brain.getStatistics();
|
|
172
|
+
if (stats?.cache) {
|
|
173
|
+
report.performance.cacheHitRate = stats.cache.hitRate || 0;
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
catch (error) {
|
|
177
|
+
this.logger.error('Performance check failed:', error);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
/**
|
|
181
|
+
* Generate recommendations based on health report
|
|
182
|
+
*/
|
|
183
|
+
generateRecommendations(report) {
|
|
184
|
+
if (!report.rootDirectory.exists || !report.rootDirectory.isDirectory) {
|
|
185
|
+
report.recommendations.push('Run VFS recovery to rebuild root directory');
|
|
186
|
+
}
|
|
187
|
+
if (report.containsRelationships.orphanedFiles.length > 0) {
|
|
188
|
+
report.recommendations.push(`Repair ${report.containsRelationships.orphanedFiles.length} orphaned files`);
|
|
189
|
+
}
|
|
190
|
+
if (report.metadataIntegrity.malformedEntities.length > 0) {
|
|
191
|
+
report.recommendations.push(`Fix metadata for ${report.metadataIntegrity.malformedEntities.length} malformed entities`);
|
|
192
|
+
}
|
|
193
|
+
if (report.performance.readLatency > 100) {
|
|
194
|
+
report.recommendations.push('Consider enabling caching or optimizing indexes');
|
|
195
|
+
}
|
|
196
|
+
if (report.performance.cacheHitRate < 0.5) {
|
|
197
|
+
report.recommendations.push('Increase cache size or TTL for better performance');
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
/**
|
|
201
|
+
* Attempt to recover VFS to healthy state
|
|
202
|
+
*/
|
|
203
|
+
async recover() {
|
|
204
|
+
this.logger.info('Starting VFS recovery...');
|
|
205
|
+
// 1. Ensure root directory exists and is properly configured
|
|
206
|
+
await this.recoverRootDirectory();
|
|
207
|
+
// 2. Repair orphaned files
|
|
208
|
+
await this.repairOrphanedFiles();
|
|
209
|
+
// 3. Fix malformed metadata
|
|
210
|
+
await this.fixMalformedMetadata();
|
|
211
|
+
this.logger.info('VFS recovery completed');
|
|
212
|
+
}
|
|
213
|
+
/**
|
|
214
|
+
* Recover root directory
|
|
215
|
+
*/
|
|
216
|
+
async recoverRootDirectory() {
|
|
217
|
+
try {
|
|
218
|
+
// Force recreation of root directory with proper metadata
|
|
219
|
+
const rootId = await this.vfs['initializeRoot']();
|
|
220
|
+
// Verify root is now accessible
|
|
221
|
+
await this.vfs.readdir('/');
|
|
222
|
+
this.logger.info('Root directory recovered successfully');
|
|
223
|
+
}
|
|
224
|
+
catch (error) {
|
|
225
|
+
this.logger.error('Failed to recover root directory:', error);
|
|
226
|
+
throw new VFSError(VFSErrorCode.EIO, 'Failed to recover root directory', '/', 'recover');
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
/**
|
|
230
|
+
* Repair orphaned files by creating missing Contains relationships
|
|
231
|
+
*/
|
|
232
|
+
async repairOrphanedFiles() {
|
|
233
|
+
const report = await this.checkHealth();
|
|
234
|
+
for (const filePath of report.containsRelationships.orphanedFiles) {
|
|
235
|
+
try {
|
|
236
|
+
const fileId = await this.vfs.resolvePath(filePath);
|
|
237
|
+
const parentPath = this.getParentPath(filePath);
|
|
238
|
+
if (parentPath) {
|
|
239
|
+
// Ensure parent directory exists
|
|
240
|
+
await this.vfs.mkdir(parentPath, { recursive: true });
|
|
241
|
+
const parentId = await this.vfs.resolvePath(parentPath);
|
|
242
|
+
// Create missing Contains relationship
|
|
243
|
+
await this.brain.relate({
|
|
244
|
+
from: parentId,
|
|
245
|
+
to: fileId,
|
|
246
|
+
type: 'Contains'
|
|
247
|
+
});
|
|
248
|
+
this.logger.info(`Repaired orphaned file: ${filePath}`);
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
catch (error) {
|
|
252
|
+
this.logger.error(`Failed to repair orphaned file ${filePath}:`, error);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
/**
|
|
257
|
+
* Fix malformed metadata
|
|
258
|
+
*/
|
|
259
|
+
async fixMalformedMetadata() {
|
|
260
|
+
const report = await this.checkHealth();
|
|
261
|
+
for (const entityId of report.metadataIntegrity.malformedEntities) {
|
|
262
|
+
try {
|
|
263
|
+
const entity = await this.brain.get(entityId);
|
|
264
|
+
if (entity) {
|
|
265
|
+
// Reconstruct metadata
|
|
266
|
+
const fixedMetadata = {
|
|
267
|
+
path: entity.metadata?.path || entity.path || '/',
|
|
268
|
+
name: entity.metadata?.name || entity.name || '',
|
|
269
|
+
vfsType: entity.metadata?.vfsType || entity.vfsType || 'file',
|
|
270
|
+
size: entity.metadata?.size || entity.size || 0,
|
|
271
|
+
permissions: entity.metadata?.permissions || 0o644,
|
|
272
|
+
owner: entity.metadata?.owner || 'user',
|
|
273
|
+
group: entity.metadata?.group || 'users',
|
|
274
|
+
accessed: entity.metadata?.accessed || Date.now(),
|
|
275
|
+
modified: entity.metadata?.modified || Date.now(),
|
|
276
|
+
...entity.metadata
|
|
277
|
+
};
|
|
278
|
+
// Update entity with fixed metadata
|
|
279
|
+
await this.brain.update({
|
|
280
|
+
id: entityId,
|
|
281
|
+
metadata: fixedMetadata
|
|
282
|
+
});
|
|
283
|
+
this.logger.info(`Fixed metadata for entity: ${entityId}`);
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
catch (error) {
|
|
287
|
+
this.logger.error(`Failed to fix metadata for entity ${entityId}:`, error);
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
getParentPath(path) {
|
|
292
|
+
const normalized = path.replace(/\/+/g, '/').replace(/\/$/, '');
|
|
293
|
+
const lastSlash = normalized.lastIndexOf('/');
|
|
294
|
+
if (lastSlash <= 0)
|
|
295
|
+
return '/';
|
|
296
|
+
return normalized.substring(0, lastSlash);
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
//# sourceMappingURL=VFSHealthCheck.js.map
|
|
@@ -62,16 +62,37 @@ export class VirtualFileSystem {
|
|
|
62
62
|
* Create or find the root directory entity
|
|
63
63
|
*/
|
|
64
64
|
async initializeRoot() {
|
|
65
|
-
// Check if root already exists
|
|
65
|
+
// Check if root already exists - search using where clause
|
|
66
66
|
const existing = await this.brain.find({
|
|
67
67
|
where: {
|
|
68
|
-
path: '/',
|
|
69
|
-
vfsType: 'directory'
|
|
68
|
+
'metadata.path': '/',
|
|
69
|
+
'metadata.vfsType': 'directory'
|
|
70
70
|
},
|
|
71
71
|
limit: 1
|
|
72
72
|
});
|
|
73
73
|
if (existing.length > 0) {
|
|
74
|
-
|
|
74
|
+
const rootEntity = existing[0];
|
|
75
|
+
// Ensure the root entity has proper metadata structure
|
|
76
|
+
const entityMetadata = rootEntity.metadata || rootEntity;
|
|
77
|
+
if (!entityMetadata.vfsType) {
|
|
78
|
+
// Update the root entity with proper metadata
|
|
79
|
+
await this.brain.update({
|
|
80
|
+
id: rootEntity.id,
|
|
81
|
+
metadata: {
|
|
82
|
+
path: '/',
|
|
83
|
+
name: '',
|
|
84
|
+
vfsType: 'directory',
|
|
85
|
+
size: 0,
|
|
86
|
+
permissions: 0o755,
|
|
87
|
+
owner: 'root',
|
|
88
|
+
group: 'root',
|
|
89
|
+
accessed: Date.now(),
|
|
90
|
+
modified: Date.now(),
|
|
91
|
+
...entityMetadata // Preserve any existing metadata
|
|
92
|
+
}
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
return rootEntity.id;
|
|
75
96
|
}
|
|
76
97
|
// Create root directory
|
|
77
98
|
const root = await this.brain.add({
|
|
@@ -764,6 +785,41 @@ export class VirtualFileSystem {
|
|
|
764
785
|
if (!entity) {
|
|
765
786
|
throw new VFSError(VFSErrorCode.ENOENT, `Entity not found: ${id}`);
|
|
766
787
|
}
|
|
788
|
+
// Ensure entity has proper VFS metadata structure
|
|
789
|
+
// Handle both nested and flat metadata structures for compatibility
|
|
790
|
+
if (!entity.metadata || !entity.metadata.vfsType) {
|
|
791
|
+
// Check if metadata is at top level (legacy structure)
|
|
792
|
+
const anyEntity = entity;
|
|
793
|
+
if (anyEntity.vfsType || anyEntity.path) {
|
|
794
|
+
entity.metadata = {
|
|
795
|
+
path: anyEntity.path || '/',
|
|
796
|
+
name: anyEntity.name || '',
|
|
797
|
+
vfsType: anyEntity.vfsType || (anyEntity.path === '/' ? 'directory' : 'file'),
|
|
798
|
+
size: anyEntity.size || 0,
|
|
799
|
+
permissions: anyEntity.permissions || (anyEntity.vfsType === 'directory' ? 0o755 : 0o644),
|
|
800
|
+
owner: anyEntity.owner || 'user',
|
|
801
|
+
group: anyEntity.group || 'users',
|
|
802
|
+
accessed: anyEntity.accessed || Date.now(),
|
|
803
|
+
modified: anyEntity.modified || Date.now(),
|
|
804
|
+
...entity.metadata // Preserve any existing nested metadata
|
|
805
|
+
};
|
|
806
|
+
}
|
|
807
|
+
else if (entity.id === this.rootEntityId) {
|
|
808
|
+
// Special case: ensure root directory always has proper metadata
|
|
809
|
+
entity.metadata = {
|
|
810
|
+
path: '/',
|
|
811
|
+
name: '',
|
|
812
|
+
vfsType: 'directory',
|
|
813
|
+
size: 0,
|
|
814
|
+
permissions: 0o755,
|
|
815
|
+
owner: 'root',
|
|
816
|
+
group: 'root',
|
|
817
|
+
accessed: Date.now(),
|
|
818
|
+
modified: Date.now(),
|
|
819
|
+
...entity.metadata
|
|
820
|
+
};
|
|
821
|
+
}
|
|
822
|
+
}
|
|
767
823
|
return entity;
|
|
768
824
|
}
|
|
769
825
|
getParentPath(path) {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@soulcraft/brainy",
|
|
3
|
-
"version": "3.
|
|
3
|
+
"version": "3.17.0",
|
|
4
4
|
"description": "Universal Knowledge Protocol™ - World's first Triple Intelligence database unifying vector, graph, and document search in one API. 31 nouns × 40 verbs for infinite expressiveness.",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"module": "dist/index.js",
|