@soulcraft/brainy 2.15.0 → 3.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/README.md +249 -152
- package/dist/api/ConfigAPI.d.ts +67 -0
- package/dist/api/ConfigAPI.js +166 -0
- package/dist/api/DataAPI.d.ts +123 -0
- package/dist/api/DataAPI.js +391 -0
- package/dist/api/SecurityAPI.d.ts +50 -0
- package/dist/api/SecurityAPI.js +139 -0
- package/dist/api/UniversalImportAPI.d.ts +134 -0
- package/dist/api/UniversalImportAPI.js +615 -0
- package/dist/augmentationManager.js +12 -7
- package/dist/augmentationPipeline.d.ts +0 -61
- package/dist/augmentationPipeline.js +0 -87
- package/dist/augmentationRegistry.d.ts +1 -1
- package/dist/augmentationRegistry.js +1 -1
- package/dist/augmentations/apiServerAugmentation.d.ts +27 -1
- package/dist/augmentations/apiServerAugmentation.js +288 -7
- package/dist/augmentations/auditLogAugmentation.d.ts +109 -0
- package/dist/augmentations/auditLogAugmentation.js +358 -0
- package/dist/augmentations/batchProcessingAugmentation.d.ts +3 -2
- package/dist/augmentations/batchProcessingAugmentation.js +123 -22
- package/dist/augmentations/brainyAugmentation.d.ts +87 -8
- package/dist/augmentations/brainyAugmentation.js +159 -2
- package/dist/augmentations/cacheAugmentation.d.ts +6 -5
- package/dist/augmentations/cacheAugmentation.js +113 -17
- package/dist/augmentations/conduitAugmentations.d.ts +2 -2
- package/dist/augmentations/conduitAugmentations.js +2 -2
- package/dist/augmentations/configResolver.d.ts +122 -0
- package/dist/augmentations/configResolver.js +440 -0
- package/dist/augmentations/connectionPoolAugmentation.d.ts +3 -1
- package/dist/augmentations/connectionPoolAugmentation.js +37 -12
- package/dist/augmentations/defaultAugmentations.d.ts +9 -11
- package/dist/augmentations/defaultAugmentations.js +4 -11
- package/dist/augmentations/discovery/catalogDiscovery.d.ts +142 -0
- package/dist/augmentations/discovery/catalogDiscovery.js +249 -0
- package/dist/augmentations/discovery/localDiscovery.d.ts +84 -0
- package/dist/augmentations/discovery/localDiscovery.js +246 -0
- package/dist/augmentations/discovery/runtimeLoader.d.ts +97 -0
- package/dist/augmentations/discovery/runtimeLoader.js +337 -0
- package/dist/augmentations/discovery.d.ts +152 -0
- package/dist/augmentations/discovery.js +441 -0
- package/dist/augmentations/display/intelligentComputation.d.ts +1 -1
- package/dist/augmentations/display/intelligentComputation.js +4 -4
- package/dist/augmentations/entityRegistryAugmentation.d.ts +3 -1
- package/dist/augmentations/entityRegistryAugmentation.js +5 -1
- package/dist/augmentations/indexAugmentation.d.ts +3 -3
- package/dist/augmentations/indexAugmentation.js +2 -2
- package/dist/augmentations/intelligentVerbScoringAugmentation.d.ts +22 -6
- package/dist/augmentations/intelligentVerbScoringAugmentation.js +106 -23
- package/dist/augmentations/manifest.d.ts +176 -0
- package/dist/augmentations/manifest.js +8 -0
- package/dist/augmentations/marketplace/AugmentationMarketplace.d.ts +168 -0
- package/dist/augmentations/marketplace/AugmentationMarketplace.js +329 -0
- package/dist/augmentations/marketplace/cli.d.ts +47 -0
- package/dist/augmentations/marketplace/cli.js +265 -0
- package/dist/augmentations/metricsAugmentation.d.ts +3 -3
- package/dist/augmentations/metricsAugmentation.js +2 -2
- package/dist/augmentations/monitoringAugmentation.d.ts +3 -3
- package/dist/augmentations/monitoringAugmentation.js +2 -2
- package/dist/augmentations/neuralImport.d.ts +1 -1
- package/dist/augmentations/rateLimitAugmentation.d.ts +82 -0
- package/dist/augmentations/rateLimitAugmentation.js +321 -0
- package/dist/augmentations/requestDeduplicatorAugmentation.d.ts +2 -2
- package/dist/augmentations/requestDeduplicatorAugmentation.js +1 -1
- package/dist/augmentations/storageAugmentation.d.ts +1 -1
- package/dist/augmentations/storageAugmentation.js +2 -2
- package/dist/augmentations/storageAugmentations.d.ts +37 -8
- package/dist/augmentations/storageAugmentations.js +204 -15
- package/dist/augmentations/synapseAugmentation.d.ts +1 -1
- package/dist/augmentations/synapseAugmentation.js +35 -16
- package/dist/augmentations/typeMatching/intelligentTypeMatcher.d.ts +39 -59
- package/dist/augmentations/typeMatching/intelligentTypeMatcher.js +103 -389
- package/dist/augmentations/universalDisplayAugmentation.d.ts +2 -2
- package/dist/augmentations/universalDisplayAugmentation.js +2 -2
- package/dist/brainy-unified.d.ts +106 -0
- package/dist/brainy-unified.js +327 -0
- package/dist/brainy.d.ts +273 -0
- package/dist/brainy.js +1181 -0
- package/dist/brainyData.d.ts +29 -72
- package/dist/brainyData.js +350 -304
- package/dist/brainyDataV3.d.ts +186 -0
- package/dist/brainyDataV3.js +337 -0
- package/dist/browserFramework.d.ts +6 -6
- package/dist/browserFramework.js +11 -8
- package/dist/browserFramework.minimal.d.ts +5 -5
- package/dist/browserFramework.minimal.js +11 -8
- package/dist/config/index.d.ts +2 -2
- package/dist/config/index.js +3 -3
- package/dist/config/modelAutoConfig.d.ts +6 -7
- package/dist/config/modelAutoConfig.js +17 -76
- package/dist/cortex/backupRestore.d.ts +2 -2
- package/dist/cortex/backupRestore.js +85 -27
- package/dist/cortex/healthCheck.d.ts +2 -2
- package/dist/cortex/neuralImport.d.ts +2 -2
- package/dist/cortex/neuralImport.js +18 -13
- package/dist/cortex/performanceMonitor.d.ts +2 -2
- package/dist/critical/model-guardian.d.ts +4 -0
- package/dist/critical/model-guardian.js +31 -11
- package/dist/demo.d.ts +4 -4
- package/dist/demo.js +7 -7
- package/dist/distributed/cacheSync.d.ts +112 -0
- package/dist/distributed/cacheSync.js +265 -0
- package/dist/distributed/coordinator.d.ts +193 -0
- package/dist/distributed/coordinator.js +548 -0
- package/dist/distributed/httpTransport.d.ts +120 -0
- package/dist/distributed/httpTransport.js +446 -0
- package/dist/distributed/index.d.ts +8 -0
- package/dist/distributed/index.js +5 -0
- package/dist/distributed/networkTransport.d.ts +132 -0
- package/dist/distributed/networkTransport.js +633 -0
- package/dist/distributed/queryPlanner.d.ts +104 -0
- package/dist/distributed/queryPlanner.js +327 -0
- package/dist/distributed/readWriteSeparation.d.ts +134 -0
- package/dist/distributed/readWriteSeparation.js +350 -0
- package/dist/distributed/shardManager.d.ts +114 -0
- package/dist/distributed/shardManager.js +357 -0
- package/dist/distributed/shardMigration.d.ts +110 -0
- package/dist/distributed/shardMigration.js +289 -0
- package/dist/distributed/storageDiscovery.d.ts +160 -0
- package/dist/distributed/storageDiscovery.js +551 -0
- package/dist/embeddings/EmbeddingManager.d.ts +0 -4
- package/dist/embeddings/EmbeddingManager.js +21 -26
- package/dist/errors/brainyError.d.ts +5 -1
- package/dist/errors/brainyError.js +12 -0
- package/dist/examples/basicUsage.js +3 -3
- package/dist/graph/graphAdjacencyIndex.d.ts +96 -0
- package/dist/graph/graphAdjacencyIndex.js +288 -0
- package/dist/graph/pathfinding.js +4 -2
- package/dist/hnsw/scaledHNSWSystem.js +11 -2
- package/dist/importManager.js +6 -3
- package/dist/index.d.ts +12 -21
- package/dist/index.js +14 -22
- package/dist/mcp/brainyMCPAdapter.d.ts +4 -4
- package/dist/mcp/brainyMCPAdapter.js +5 -5
- package/dist/mcp/brainyMCPService.d.ts +3 -3
- package/dist/mcp/brainyMCPService.js +3 -11
- package/dist/mcp/mcpAugmentationToolset.js +20 -30
- package/dist/neural/embeddedPatterns.d.ts +1 -1
- package/dist/neural/embeddedPatterns.js +2 -2
- package/dist/neural/entityExtractor.d.ts +65 -0
- package/dist/neural/entityExtractor.js +316 -0
- package/dist/neural/improvedNeuralAPI.js +90 -79
- package/dist/neural/naturalLanguageProcessor.d.ts +155 -10
- package/dist/neural/naturalLanguageProcessor.js +941 -66
- package/dist/neural/naturalLanguageProcessorStatic.d.ts +2 -2
- package/dist/neural/naturalLanguageProcessorStatic.js +3 -3
- package/dist/neural/neuralAPI.js +8 -2
- package/dist/neural/patternLibrary.d.ts +57 -3
- package/dist/neural/patternLibrary.js +348 -13
- package/dist/neural/staticPatternMatcher.d.ts +2 -2
- package/dist/neural/staticPatternMatcher.js +2 -2
- package/dist/shared/default-augmentations.d.ts +3 -3
- package/dist/shared/default-augmentations.js +5 -5
- package/dist/storage/adapters/fileSystemStorage.d.ts +4 -0
- package/dist/storage/adapters/fileSystemStorage.js +54 -1
- package/dist/storage/adapters/memoryStorage.js +13 -8
- package/dist/storage/backwardCompatibility.d.ts +10 -78
- package/dist/storage/backwardCompatibility.js +17 -132
- package/dist/storage/baseStorage.d.ts +6 -0
- package/dist/storage/baseStorage.js +17 -0
- package/dist/storage/cacheManager.js +2 -2
- package/dist/storage/readOnlyOptimizations.js +8 -3
- package/dist/streaming/pipeline.d.ts +154 -0
- package/dist/streaming/pipeline.js +551 -0
- package/dist/triple/TripleIntelligence.d.ts +25 -110
- package/dist/triple/TripleIntelligence.js +4 -574
- package/dist/triple/TripleIntelligenceSystem.d.ts +159 -0
- package/dist/triple/TripleIntelligenceSystem.js +519 -0
- package/dist/types/apiTypes.d.ts +278 -0
- package/dist/types/apiTypes.js +33 -0
- package/dist/types/brainy.types.d.ts +308 -0
- package/dist/types/brainy.types.js +8 -0
- package/dist/types/brainyDataInterface.d.ts +3 -3
- package/dist/types/brainyDataInterface.js +2 -2
- package/dist/types/graphTypes.js +2 -2
- package/dist/utils/cacheAutoConfig.d.ts +3 -3
- package/dist/utils/embedding.js +8 -14
- package/dist/utils/enhancedLogger.d.ts +104 -0
- package/dist/utils/enhancedLogger.js +232 -0
- package/dist/utils/index.d.ts +1 -1
- package/dist/utils/index.js +1 -1
- package/dist/utils/intelligentTypeMapper.d.ts +60 -0
- package/dist/utils/intelligentTypeMapper.js +349 -0
- package/dist/utils/metadataIndex.d.ts +118 -1
- package/dist/utils/metadataIndex.js +539 -16
- package/dist/utils/paramValidation.d.ts +39 -0
- package/dist/utils/paramValidation.js +192 -0
- package/dist/utils/rateLimiter.d.ts +160 -0
- package/dist/utils/rateLimiter.js +271 -0
- package/dist/utils/statistics.d.ts +4 -4
- package/dist/utils/statistics.js +3 -3
- package/dist/utils/structuredLogger.d.ts +146 -0
- package/dist/utils/structuredLogger.js +394 -0
- package/dist/utils/textEncoding.js +2 -1
- package/dist/utils/typeValidation.d.ts +34 -0
- package/dist/utils/typeValidation.js +247 -0
- package/package.json +14 -6
- package/scripts/download-models.cjs +6 -15
- package/dist/augmentations/walAugmentation.d.ts +0 -111
- package/dist/augmentations/walAugmentation.js +0 -519
- package/dist/chat/BrainyChat.d.ts +0 -121
- package/dist/chat/BrainyChat.js +0 -396
- package/dist/chat/ChatCLI.d.ts +0 -61
- package/dist/chat/ChatCLI.js +0 -351
|
@@ -0,0 +1,551 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Storage-based Discovery for Zero-Config Distributed Brainy
|
|
3
|
+
* Uses shared storage (S3/GCS/R2) as coordination point
|
|
4
|
+
* REAL PRODUCTION CODE - No mocks, no stubs!
|
|
5
|
+
*/
|
|
6
|
+
import { EventEmitter } from 'events';
|
|
7
|
+
import * as os from 'os';
|
|
8
|
+
export class StorageDiscovery extends EventEmitter {
|
|
9
|
+
constructor(storage, nodeId) {
|
|
10
|
+
super();
|
|
11
|
+
this.clusterConfig = null;
|
|
12
|
+
this.heartbeatInterval = null;
|
|
13
|
+
this.discoveryInterval = null;
|
|
14
|
+
this.endpoint = '';
|
|
15
|
+
this.isRunning = false;
|
|
16
|
+
this.HEARTBEAT_INTERVAL = 5000; // 5 seconds
|
|
17
|
+
this.DISCOVERY_INTERVAL = 2000; // 2 seconds
|
|
18
|
+
this.NODE_TIMEOUT = 30000; // 30 seconds until node considered dead
|
|
19
|
+
this.CLUSTER_PATH = '_cluster';
|
|
20
|
+
this.storage = storage;
|
|
21
|
+
this.nodeId = nodeId || this.generateNodeId();
|
|
22
|
+
// Initialize node info with REAL system data
|
|
23
|
+
this.nodeInfo = {
|
|
24
|
+
id: this.nodeId,
|
|
25
|
+
endpoint: '', // Will be set when HTTP server starts
|
|
26
|
+
hostname: os.hostname(),
|
|
27
|
+
started: Date.now(),
|
|
28
|
+
lastSeen: Date.now(),
|
|
29
|
+
role: 'candidate',
|
|
30
|
+
shards: [],
|
|
31
|
+
capacity: {
|
|
32
|
+
cpu: os.cpus().length,
|
|
33
|
+
memory: Math.floor(os.totalmem() / 1024 / 1024), // MB
|
|
34
|
+
storage: 0 // Will be updated based on actual usage
|
|
35
|
+
},
|
|
36
|
+
stats: {
|
|
37
|
+
nouns: 0,
|
|
38
|
+
verbs: 0,
|
|
39
|
+
queries: 0,
|
|
40
|
+
latency: 0
|
|
41
|
+
}
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Start discovery and registration
|
|
46
|
+
*/
|
|
47
|
+
async start(httpPort) {
|
|
48
|
+
if (this.isRunning)
|
|
49
|
+
return this.clusterConfig;
|
|
50
|
+
this.isRunning = true;
|
|
51
|
+
// Set our endpoint
|
|
52
|
+
this.endpoint = await this.detectEndpoint(httpPort);
|
|
53
|
+
this.nodeInfo.endpoint = this.endpoint;
|
|
54
|
+
// Try to load existing cluster config
|
|
55
|
+
this.clusterConfig = await this.loadClusterConfig();
|
|
56
|
+
if (!this.clusterConfig) {
|
|
57
|
+
// We're the first node - initialize cluster
|
|
58
|
+
await this.initializeCluster();
|
|
59
|
+
}
|
|
60
|
+
else {
|
|
61
|
+
// Join existing cluster
|
|
62
|
+
await this.joinCluster();
|
|
63
|
+
}
|
|
64
|
+
// Start heartbeat to keep our node alive
|
|
65
|
+
this.startHeartbeat();
|
|
66
|
+
// Start discovery to find other nodes
|
|
67
|
+
this.startDiscovery();
|
|
68
|
+
this.emit('started', this.nodeInfo);
|
|
69
|
+
return this.clusterConfig;
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Stop discovery and unregister
|
|
73
|
+
*/
|
|
74
|
+
async stop() {
|
|
75
|
+
if (!this.isRunning)
|
|
76
|
+
return;
|
|
77
|
+
this.isRunning = false;
|
|
78
|
+
// Stop intervals
|
|
79
|
+
if (this.heartbeatInterval) {
|
|
80
|
+
clearInterval(this.heartbeatInterval);
|
|
81
|
+
this.heartbeatInterval = null;
|
|
82
|
+
}
|
|
83
|
+
if (this.discoveryInterval) {
|
|
84
|
+
clearInterval(this.discoveryInterval);
|
|
85
|
+
this.discoveryInterval = null;
|
|
86
|
+
}
|
|
87
|
+
// Remove ourselves from cluster
|
|
88
|
+
await this.leaveCluster();
|
|
89
|
+
this.emit('stopped');
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* Initialize a new cluster (we're the first node)
|
|
93
|
+
*/
|
|
94
|
+
async initializeCluster() {
|
|
95
|
+
console.log(`[${this.nodeId}] Initializing new cluster as first node`);
|
|
96
|
+
this.nodeInfo.role = 'primary';
|
|
97
|
+
this.clusterConfig = {
|
|
98
|
+
version: 1,
|
|
99
|
+
created: Date.now(),
|
|
100
|
+
updated: Date.now(),
|
|
101
|
+
leader: this.nodeId,
|
|
102
|
+
nodes: {
|
|
103
|
+
[this.nodeId]: this.nodeInfo
|
|
104
|
+
},
|
|
105
|
+
shards: {
|
|
106
|
+
count: 64, // Default shard count
|
|
107
|
+
assignments: {}
|
|
108
|
+
},
|
|
109
|
+
settings: {
|
|
110
|
+
replicationFactor: 3,
|
|
111
|
+
shardCount: 64,
|
|
112
|
+
autoRebalance: true,
|
|
113
|
+
minNodes: 1,
|
|
114
|
+
maxNodesPerShard: 5
|
|
115
|
+
}
|
|
116
|
+
};
|
|
117
|
+
// Assign all shards to ourselves initially
|
|
118
|
+
for (let i = 0; i < this.clusterConfig.shards.count; i++) {
|
|
119
|
+
const shardId = `shard-${i.toString().padStart(3, '0')}`;
|
|
120
|
+
this.clusterConfig.shards.assignments[shardId] = [this.nodeId];
|
|
121
|
+
this.nodeInfo.shards.push(shardId);
|
|
122
|
+
}
|
|
123
|
+
// Save cluster config
|
|
124
|
+
await this.saveClusterConfig();
|
|
125
|
+
// Register ourselves
|
|
126
|
+
await this.registerNode();
|
|
127
|
+
this.emit('clusterInitialized', this.clusterConfig);
|
|
128
|
+
}
|
|
129
|
+
/**
|
|
130
|
+
* Join an existing cluster
|
|
131
|
+
*/
|
|
132
|
+
async joinCluster() {
|
|
133
|
+
console.log(`[${this.nodeId}] Joining existing cluster`);
|
|
134
|
+
if (!this.clusterConfig)
|
|
135
|
+
throw new Error('No cluster config');
|
|
136
|
+
// Add ourselves to the cluster
|
|
137
|
+
this.clusterConfig.nodes[this.nodeId] = this.nodeInfo;
|
|
138
|
+
// Determine our role based on cluster state
|
|
139
|
+
const nodeCount = Object.keys(this.clusterConfig.nodes).length;
|
|
140
|
+
if (!this.clusterConfig.leader || !this.clusterConfig.nodes[this.clusterConfig.leader]) {
|
|
141
|
+
// No leader or leader is gone - trigger election
|
|
142
|
+
await this.triggerLeaderElection();
|
|
143
|
+
}
|
|
144
|
+
else {
|
|
145
|
+
// Become replica
|
|
146
|
+
this.nodeInfo.role = 'replica';
|
|
147
|
+
}
|
|
148
|
+
// Register ourselves
|
|
149
|
+
await this.registerNode();
|
|
150
|
+
// Request shard assignment if auto-rebalance is enabled
|
|
151
|
+
if (this.clusterConfig.settings.autoRebalance) {
|
|
152
|
+
await this.requestShardAssignment();
|
|
153
|
+
}
|
|
154
|
+
this.emit('clusterJoined', this.clusterConfig);
|
|
155
|
+
}
|
|
156
|
+
/**
|
|
157
|
+
* Leave cluster cleanly
|
|
158
|
+
*/
|
|
159
|
+
async leaveCluster() {
|
|
160
|
+
if (!this.clusterConfig)
|
|
161
|
+
return;
|
|
162
|
+
console.log(`[${this.nodeId}] Leaving cluster`);
|
|
163
|
+
// Remove ourselves from node registry
|
|
164
|
+
try {
|
|
165
|
+
// Mark as deleted rather than actually deleting
|
|
166
|
+
const deadNode = { ...this.nodeInfo, lastSeen: 0, status: 'inactive' };
|
|
167
|
+
await this.storage.saveMetadata(`${this.CLUSTER_PATH}/nodes/${this.nodeId}.json`, deadNode);
|
|
168
|
+
}
|
|
169
|
+
catch (err) {
|
|
170
|
+
// Ignore errors during shutdown
|
|
171
|
+
}
|
|
172
|
+
// If we're the leader, trigger new election
|
|
173
|
+
if (this.clusterConfig.leader === this.nodeId) {
|
|
174
|
+
this.clusterConfig.leader = null;
|
|
175
|
+
await this.saveClusterConfig();
|
|
176
|
+
}
|
|
177
|
+
this.emit('clusterLeft');
|
|
178
|
+
}
|
|
179
|
+
/**
|
|
180
|
+
* Register node in storage
|
|
181
|
+
*/
|
|
182
|
+
async registerNode() {
|
|
183
|
+
const path = `${this.CLUSTER_PATH}/nodes/${this.nodeId}.json`;
|
|
184
|
+
await this.storage.saveMetadata(path, this.nodeInfo);
|
|
185
|
+
// Also update registry
|
|
186
|
+
await this.updateNodeRegistry(this.nodeId);
|
|
187
|
+
}
|
|
188
|
+
/**
|
|
189
|
+
* Heartbeat to keep node alive
|
|
190
|
+
*/
|
|
191
|
+
startHeartbeat() {
|
|
192
|
+
this.heartbeatInterval = setInterval(async () => {
|
|
193
|
+
try {
|
|
194
|
+
this.nodeInfo.lastSeen = Date.now();
|
|
195
|
+
await this.registerNode();
|
|
196
|
+
// Also update cluster config if we're the leader
|
|
197
|
+
if (this.clusterConfig && this.clusterConfig.leader === this.nodeId) {
|
|
198
|
+
await this.saveClusterConfig();
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
catch (err) {
|
|
202
|
+
console.error(`[${this.nodeId}] Heartbeat failed:`, err);
|
|
203
|
+
}
|
|
204
|
+
}, this.HEARTBEAT_INTERVAL);
|
|
205
|
+
}
|
|
206
|
+
/**
|
|
207
|
+
* Discover other nodes and monitor health
|
|
208
|
+
*/
|
|
209
|
+
startDiscovery() {
|
|
210
|
+
this.discoveryInterval = setInterval(async () => {
|
|
211
|
+
try {
|
|
212
|
+
await this.discoverNodes();
|
|
213
|
+
await this.checkNodeHealth();
|
|
214
|
+
// Check if we need to rebalance
|
|
215
|
+
if (this.shouldRebalance()) {
|
|
216
|
+
await this.triggerRebalance();
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
catch (err) {
|
|
220
|
+
console.error(`[${this.nodeId}] Discovery failed:`, err);
|
|
221
|
+
}
|
|
222
|
+
}, this.DISCOVERY_INTERVAL);
|
|
223
|
+
}
|
|
224
|
+
/**
|
|
225
|
+
* Discover nodes from storage
|
|
226
|
+
*/
|
|
227
|
+
async discoverNodes() {
|
|
228
|
+
try {
|
|
229
|
+
// Since we can't list arbitrary paths, we'll use a registry approach
|
|
230
|
+
// Each node registers in a central registry file
|
|
231
|
+
const registry = await this.loadNodeRegistry();
|
|
232
|
+
const now = Date.now();
|
|
233
|
+
let updated = false;
|
|
234
|
+
for (const nodeId of registry) {
|
|
235
|
+
if (nodeId === this.nodeId)
|
|
236
|
+
continue;
|
|
237
|
+
try {
|
|
238
|
+
const nodeInfo = await this.storage.getMetadata(`${this.CLUSTER_PATH}/nodes/${nodeId}.json`);
|
|
239
|
+
// Check if node is alive
|
|
240
|
+
if (now - nodeInfo.lastSeen < this.NODE_TIMEOUT) {
|
|
241
|
+
if (!this.clusterConfig.nodes[nodeId]) {
|
|
242
|
+
// New node discovered!
|
|
243
|
+
console.log(`[${this.nodeId}] Discovered new node: ${nodeId}`);
|
|
244
|
+
this.clusterConfig.nodes[nodeId] = nodeInfo;
|
|
245
|
+
updated = true;
|
|
246
|
+
this.emit('nodeDiscovered', nodeInfo);
|
|
247
|
+
}
|
|
248
|
+
else {
|
|
249
|
+
// Update existing node info
|
|
250
|
+
this.clusterConfig.nodes[nodeId] = nodeInfo;
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
catch (err) {
|
|
255
|
+
// Node file might be corrupted or deleted
|
|
256
|
+
console.warn(`[${this.nodeId}] Failed to read node ${nodeId}:`, err);
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
if (updated) {
|
|
260
|
+
this.clusterConfig.version++;
|
|
261
|
+
this.clusterConfig.updated = Date.now();
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
catch (err) {
|
|
265
|
+
// Storage might be unavailable
|
|
266
|
+
console.error(`[${this.nodeId}] Failed to discover nodes:`, err);
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
/**
|
|
270
|
+
* Load node registry from storage
|
|
271
|
+
*/
|
|
272
|
+
async loadNodeRegistry() {
|
|
273
|
+
try {
|
|
274
|
+
const registry = await this.storage.getMetadata(`${this.CLUSTER_PATH}/registry.json`);
|
|
275
|
+
return registry?.nodes || [];
|
|
276
|
+
}
|
|
277
|
+
catch (err) {
|
|
278
|
+
return [];
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
/**
|
|
282
|
+
* Update node registry in storage
|
|
283
|
+
*/
|
|
284
|
+
async updateNodeRegistry(add, remove) {
|
|
285
|
+
try {
|
|
286
|
+
let registry = await this.loadNodeRegistry();
|
|
287
|
+
if (add && !registry.includes(add)) {
|
|
288
|
+
registry.push(add);
|
|
289
|
+
}
|
|
290
|
+
if (remove) {
|
|
291
|
+
registry = registry.filter(id => id !== remove);
|
|
292
|
+
}
|
|
293
|
+
await this.storage.saveMetadata(`${this.CLUSTER_PATH}/registry.json`, {
|
|
294
|
+
nodes: registry,
|
|
295
|
+
updated: Date.now()
|
|
296
|
+
});
|
|
297
|
+
}
|
|
298
|
+
catch (err) {
|
|
299
|
+
console.error(`[${this.nodeId}] Failed to update registry:`, err);
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
/**
|
|
303
|
+
* Check health of known nodes
|
|
304
|
+
*/
|
|
305
|
+
async checkNodeHealth() {
|
|
306
|
+
if (!this.clusterConfig)
|
|
307
|
+
return;
|
|
308
|
+
const now = Date.now();
|
|
309
|
+
const deadNodes = [];
|
|
310
|
+
for (const [nodeId, nodeInfo] of Object.entries(this.clusterConfig.nodes)) {
|
|
311
|
+
if (nodeId === this.nodeId)
|
|
312
|
+
continue;
|
|
313
|
+
if (now - nodeInfo.lastSeen > this.NODE_TIMEOUT) {
|
|
314
|
+
console.log(`[${this.nodeId}] Node ${nodeId} is dead (last seen ${now - nodeInfo.lastSeen}ms ago)`);
|
|
315
|
+
deadNodes.push(nodeId);
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
// Remove dead nodes
|
|
319
|
+
for (const nodeId of deadNodes) {
|
|
320
|
+
delete this.clusterConfig.nodes[nodeId];
|
|
321
|
+
this.emit('nodeLost', nodeId);
|
|
322
|
+
// If dead node was leader, trigger election
|
|
323
|
+
if (this.clusterConfig.leader === nodeId) {
|
|
324
|
+
await this.triggerLeaderElection();
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
if (deadNodes.length > 0) {
|
|
328
|
+
// Trigger rebalance to reassign shards from dead nodes
|
|
329
|
+
await this.triggerRebalance();
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
/**
|
|
333
|
+
* Load cluster configuration from storage
|
|
334
|
+
*/
|
|
335
|
+
async loadClusterConfig() {
|
|
336
|
+
try {
|
|
337
|
+
const config = await this.storage.getMetadata(`${this.CLUSTER_PATH}/config.json`);
|
|
338
|
+
return config;
|
|
339
|
+
}
|
|
340
|
+
catch (err) {
|
|
341
|
+
// No cluster config exists yet
|
|
342
|
+
return null;
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
/**
|
|
346
|
+
* Save cluster configuration to storage
|
|
347
|
+
*/
|
|
348
|
+
async saveClusterConfig() {
|
|
349
|
+
if (!this.clusterConfig)
|
|
350
|
+
return;
|
|
351
|
+
await this.storage.saveMetadata(`${this.CLUSTER_PATH}/config.json`, this.clusterConfig);
|
|
352
|
+
}
|
|
353
|
+
/**
|
|
354
|
+
* Trigger leader election (simplified - not full Raft)
|
|
355
|
+
*/
|
|
356
|
+
async triggerLeaderElection() {
|
|
357
|
+
console.log(`[${this.nodeId}] Triggering leader election`);
|
|
358
|
+
// Simple election: node with lowest ID wins
|
|
359
|
+
// In production, use proper Raft consensus
|
|
360
|
+
const activeNodes = Object.entries(this.clusterConfig.nodes)
|
|
361
|
+
.filter(([_, info]) => Date.now() - info.lastSeen < this.NODE_TIMEOUT)
|
|
362
|
+
.sort(([a], [b]) => a.localeCompare(b));
|
|
363
|
+
if (activeNodes.length > 0) {
|
|
364
|
+
const [leaderId, leaderInfo] = activeNodes[0];
|
|
365
|
+
this.clusterConfig.leader = leaderId;
|
|
366
|
+
if (leaderId === this.nodeId) {
|
|
367
|
+
console.log(`[${this.nodeId}] Became leader`);
|
|
368
|
+
this.nodeInfo.role = 'primary';
|
|
369
|
+
this.emit('becameLeader');
|
|
370
|
+
}
|
|
371
|
+
else {
|
|
372
|
+
console.log(`[${this.nodeId}] Node ${leaderId} is the new leader`);
|
|
373
|
+
this.nodeInfo.role = 'replica';
|
|
374
|
+
this.emit('leaderElected', leaderId);
|
|
375
|
+
}
|
|
376
|
+
await this.saveClusterConfig();
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
/**
|
|
380
|
+
* Request shard assignment for this node
|
|
381
|
+
*/
|
|
382
|
+
async requestShardAssignment() {
|
|
383
|
+
if (!this.clusterConfig)
|
|
384
|
+
return;
|
|
385
|
+
// Calculate how many shards each node should have
|
|
386
|
+
const nodeCount = Object.keys(this.clusterConfig.nodes).length;
|
|
387
|
+
const shardsPerNode = Math.ceil(this.clusterConfig.shards.count / nodeCount);
|
|
388
|
+
// Find shards that need assignment
|
|
389
|
+
const unassignedShards = [];
|
|
390
|
+
for (let i = 0; i < this.clusterConfig.shards.count; i++) {
|
|
391
|
+
const shardId = `shard-${i.toString().padStart(3, '0')}`;
|
|
392
|
+
if (!this.clusterConfig.shards.assignments[shardId] ||
|
|
393
|
+
this.clusterConfig.shards.assignments[shardId].length === 0) {
|
|
394
|
+
unassignedShards.push(shardId);
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
// Assign some shards to ourselves
|
|
398
|
+
const ourShare = unassignedShards.slice(0, shardsPerNode);
|
|
399
|
+
for (const shardId of ourShare) {
|
|
400
|
+
this.clusterConfig.shards.assignments[shardId] = [this.nodeId];
|
|
401
|
+
this.nodeInfo.shards.push(shardId);
|
|
402
|
+
}
|
|
403
|
+
if (ourShare.length > 0) {
|
|
404
|
+
console.log(`[${this.nodeId}] Assigned ${ourShare.length} shards`);
|
|
405
|
+
await this.saveClusterConfig();
|
|
406
|
+
}
|
|
407
|
+
}
|
|
408
|
+
/**
|
|
409
|
+
* Check if rebalancing is needed
|
|
410
|
+
*/
|
|
411
|
+
shouldRebalance() {
|
|
412
|
+
if (!this.clusterConfig || !this.clusterConfig.settings.autoRebalance) {
|
|
413
|
+
return false;
|
|
414
|
+
}
|
|
415
|
+
// Check if shards are evenly distributed
|
|
416
|
+
const nodeCount = Object.keys(this.clusterConfig.nodes).length;
|
|
417
|
+
if (nodeCount <= 1)
|
|
418
|
+
return false;
|
|
419
|
+
const targetShardsPerNode = Math.ceil(this.clusterConfig.shards.count / nodeCount);
|
|
420
|
+
const variance = 2; // Allow some variance
|
|
421
|
+
for (const nodeInfo of Object.values(this.clusterConfig.nodes)) {
|
|
422
|
+
const shardCount = nodeInfo.shards.length;
|
|
423
|
+
if (Math.abs(shardCount - targetShardsPerNode) > variance) {
|
|
424
|
+
return true;
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
return false;
|
|
428
|
+
}
|
|
429
|
+
/**
|
|
430
|
+
* Trigger shard rebalancing
|
|
431
|
+
*/
|
|
432
|
+
async triggerRebalance() {
|
|
433
|
+
// Only leader can trigger rebalance
|
|
434
|
+
if (this.clusterConfig?.leader !== this.nodeId)
|
|
435
|
+
return;
|
|
436
|
+
console.log(`[${this.nodeId}] Triggering shard rebalance`);
|
|
437
|
+
// This will be implemented with actual data migration
|
|
438
|
+
// For now, just redistribute shard assignments
|
|
439
|
+
await this.redistributeShards();
|
|
440
|
+
this.emit('rebalanceTriggered');
|
|
441
|
+
}
|
|
442
|
+
/**
|
|
443
|
+
* Redistribute shards among active nodes
|
|
444
|
+
*/
|
|
445
|
+
async redistributeShards() {
|
|
446
|
+
if (!this.clusterConfig)
|
|
447
|
+
return;
|
|
448
|
+
const activeNodes = Object.keys(this.clusterConfig.nodes)
|
|
449
|
+
.filter(id => Date.now() - this.clusterConfig.nodes[id].lastSeen < this.NODE_TIMEOUT);
|
|
450
|
+
if (activeNodes.length === 0)
|
|
451
|
+
return;
|
|
452
|
+
const shardsPerNode = Math.ceil(this.clusterConfig.shards.count / activeNodes.length);
|
|
453
|
+
const newAssignments = {};
|
|
454
|
+
// Clear current shard assignments from nodes
|
|
455
|
+
for (const nodeInfo of Object.values(this.clusterConfig.nodes)) {
|
|
456
|
+
nodeInfo.shards = [];
|
|
457
|
+
}
|
|
458
|
+
// Redistribute shards
|
|
459
|
+
let nodeIndex = 0;
|
|
460
|
+
for (let i = 0; i < this.clusterConfig.shards.count; i++) {
|
|
461
|
+
const shardId = `shard-${i.toString().padStart(3, '0')}`;
|
|
462
|
+
const primaryNode = activeNodes[nodeIndex % activeNodes.length];
|
|
463
|
+
// Assign primary
|
|
464
|
+
newAssignments[shardId] = [primaryNode];
|
|
465
|
+
this.clusterConfig.nodes[primaryNode].shards.push(shardId);
|
|
466
|
+
// Assign replicas
|
|
467
|
+
const replicas = [];
|
|
468
|
+
for (let r = 1; r < Math.min(this.clusterConfig.settings.replicationFactor, activeNodes.length); r++) {
|
|
469
|
+
const replicaNode = activeNodes[(nodeIndex + r) % activeNodes.length];
|
|
470
|
+
if (replicaNode !== primaryNode) {
|
|
471
|
+
replicas.push(replicaNode);
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
if (replicas.length > 0) {
|
|
475
|
+
newAssignments[shardId].push(...replicas);
|
|
476
|
+
}
|
|
477
|
+
nodeIndex++;
|
|
478
|
+
}
|
|
479
|
+
this.clusterConfig.shards.assignments = newAssignments;
|
|
480
|
+
this.clusterConfig.version++;
|
|
481
|
+
this.clusterConfig.updated = Date.now();
|
|
482
|
+
await this.saveClusterConfig();
|
|
483
|
+
console.log(`[${this.nodeId}] Rebalanced ${this.clusterConfig.shards.count} shards across ${activeNodes.length} nodes`);
|
|
484
|
+
}
|
|
485
|
+
/**
|
|
486
|
+
* Detect our public endpoint
|
|
487
|
+
*/
|
|
488
|
+
async detectEndpoint(port) {
|
|
489
|
+
// Try to detect public IP
|
|
490
|
+
const interfaces = os.networkInterfaces();
|
|
491
|
+
let ip = '127.0.0.1';
|
|
492
|
+
// Find first non-internal IPv4 address
|
|
493
|
+
for (const iface of Object.values(interfaces)) {
|
|
494
|
+
if (!iface)
|
|
495
|
+
continue;
|
|
496
|
+
for (const addr of iface) {
|
|
497
|
+
if (addr.family === 'IPv4' && !addr.internal) {
|
|
498
|
+
ip = addr.address;
|
|
499
|
+
break;
|
|
500
|
+
}
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
// In cloud environments, might need to detect public IP differently
|
|
504
|
+
if (process.env.PUBLIC_IP) {
|
|
505
|
+
ip = process.env.PUBLIC_IP;
|
|
506
|
+
}
|
|
507
|
+
else if (process.env.KUBERNETES_SERVICE_HOST) {
|
|
508
|
+
// In Kubernetes, use pod IP
|
|
509
|
+
ip = process.env.POD_IP || ip;
|
|
510
|
+
}
|
|
511
|
+
return `http://${ip}:${port}`;
|
|
512
|
+
}
|
|
513
|
+
/**
|
|
514
|
+
* Generate unique node ID
|
|
515
|
+
*/
|
|
516
|
+
generateNodeId() {
|
|
517
|
+
const hostname = os.hostname();
|
|
518
|
+
const pid = process.pid;
|
|
519
|
+
const random = Math.random().toString(36).substring(2, 8);
|
|
520
|
+
return `${hostname}-${pid}-${random}`;
|
|
521
|
+
}
|
|
522
|
+
/**
|
|
523
|
+
* Get current cluster configuration
|
|
524
|
+
*/
|
|
525
|
+
getClusterConfig() {
|
|
526
|
+
return this.clusterConfig;
|
|
527
|
+
}
|
|
528
|
+
/**
|
|
529
|
+
* Get active nodes
|
|
530
|
+
*/
|
|
531
|
+
getActiveNodes() {
|
|
532
|
+
if (!this.clusterConfig)
|
|
533
|
+
return [];
|
|
534
|
+
const now = Date.now();
|
|
535
|
+
return Object.values(this.clusterConfig.nodes)
|
|
536
|
+
.filter(node => now - node.lastSeen < this.NODE_TIMEOUT);
|
|
537
|
+
}
|
|
538
|
+
/**
|
|
539
|
+
* Get shards assigned to this node
|
|
540
|
+
*/
|
|
541
|
+
getMyShards() {
|
|
542
|
+
return this.nodeInfo.shards;
|
|
543
|
+
}
|
|
544
|
+
/**
|
|
545
|
+
* Update node statistics
|
|
546
|
+
*/
|
|
547
|
+
updateStats(stats) {
|
|
548
|
+
Object.assign(this.nodeInfo.stats, stats);
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
//# sourceMappingURL=storageDiscovery.js.map
|
|
@@ -61,10 +61,6 @@ export declare class EmbeddingManager {
|
|
|
61
61
|
* Get embedding function for compatibility
|
|
62
62
|
*/
|
|
63
63
|
getEmbeddingFunction(): EmbeddingFunction;
|
|
64
|
-
/**
|
|
65
|
-
* Determine model precision
|
|
66
|
-
*/
|
|
67
|
-
private determinePrecision;
|
|
68
64
|
/**
|
|
69
65
|
* Get models directory path
|
|
70
66
|
*/
|
|
@@ -32,9 +32,9 @@ export class EmbeddingManager {
|
|
|
32
32
|
this.initTime = null;
|
|
33
33
|
this.embedCount = 0;
|
|
34
34
|
this.locked = false;
|
|
35
|
-
//
|
|
36
|
-
this.precision =
|
|
37
|
-
console.log(`🎯 EmbeddingManager: Using
|
|
35
|
+
// Always use Q8 for optimal size/performance (99% accuracy, 75% smaller)
|
|
36
|
+
this.precision = 'q8';
|
|
37
|
+
console.log(`🎯 EmbeddingManager: Using Q8 precision`);
|
|
38
38
|
}
|
|
39
39
|
/**
|
|
40
40
|
* Get the singleton instance
|
|
@@ -50,7 +50,14 @@ export class EmbeddingManager {
|
|
|
50
50
|
*/
|
|
51
51
|
async init() {
|
|
52
52
|
// In unit test mode, skip real model initialization
|
|
53
|
-
|
|
53
|
+
const isTestMode = process.env.BRAINY_UNIT_TEST === 'true' || globalThis.__BRAINY_UNIT_TEST__;
|
|
54
|
+
if (isTestMode) {
|
|
55
|
+
// Production safeguard: Warn if mock mode is active but NODE_ENV is production
|
|
56
|
+
if (process.env.NODE_ENV === 'production') {
|
|
57
|
+
throw new Error('CRITICAL: Mock embeddings detected in production environment! ' +
|
|
58
|
+
'BRAINY_UNIT_TEST or __BRAINY_UNIT_TEST__ is set while NODE_ENV=production. ' +
|
|
59
|
+
'This is a security risk. Remove test flags before deploying to production.');
|
|
60
|
+
}
|
|
54
61
|
if (!this.initialized) {
|
|
55
62
|
this.initialized = true;
|
|
56
63
|
this.initTime = 1; // Mock init time
|
|
@@ -98,9 +105,9 @@ export class EmbeddingManager {
|
|
|
98
105
|
const pipelineOptions = {
|
|
99
106
|
cache_dir: modelsPath,
|
|
100
107
|
local_files_only: false,
|
|
101
|
-
//
|
|
102
|
-
dtype:
|
|
103
|
-
quantized:
|
|
108
|
+
// Always use Q8 precision
|
|
109
|
+
dtype: 'q8',
|
|
110
|
+
quantized: true,
|
|
104
111
|
// Memory optimizations
|
|
105
112
|
session_options: {
|
|
106
113
|
enableCpuMemArena: false,
|
|
@@ -119,7 +126,7 @@ export class EmbeddingManager {
|
|
|
119
126
|
// Log success
|
|
120
127
|
const memoryMB = this.getMemoryUsage();
|
|
121
128
|
console.log(`✅ Model loaded in ${this.initTime}ms`);
|
|
122
|
-
console.log(`📊 Precision:
|
|
129
|
+
console.log(`📊 Precision: Q8 | Memory: ${memoryMB}MB`);
|
|
123
130
|
console.log(`🔒 Configuration locked`);
|
|
124
131
|
}
|
|
125
132
|
catch (error) {
|
|
@@ -133,7 +140,12 @@ export class EmbeddingManager {
|
|
|
133
140
|
*/
|
|
134
141
|
async embed(text) {
|
|
135
142
|
// Check for unit test environment - use mocks to prevent ONNX conflicts
|
|
136
|
-
|
|
143
|
+
const isTestMode = process.env.BRAINY_UNIT_TEST === 'true' || globalThis.__BRAINY_UNIT_TEST__;
|
|
144
|
+
if (isTestMode) {
|
|
145
|
+
// Production safeguard
|
|
146
|
+
if (process.env.NODE_ENV === 'production') {
|
|
147
|
+
throw new Error('CRITICAL: Mock embeddings in production!');
|
|
148
|
+
}
|
|
137
149
|
return this.getMockEmbedding(text);
|
|
138
150
|
}
|
|
139
151
|
// Ensure initialized
|
|
@@ -192,23 +204,6 @@ export class EmbeddingManager {
|
|
|
192
204
|
return await this.embed(data);
|
|
193
205
|
};
|
|
194
206
|
}
|
|
195
|
-
/**
|
|
196
|
-
* Determine model precision
|
|
197
|
-
*/
|
|
198
|
-
determinePrecision() {
|
|
199
|
-
// Check environment variable overrides
|
|
200
|
-
if (process.env.BRAINY_MODEL_PRECISION === 'fp32') {
|
|
201
|
-
return 'fp32';
|
|
202
|
-
}
|
|
203
|
-
if (process.env.BRAINY_MODEL_PRECISION === 'q8') {
|
|
204
|
-
return 'q8';
|
|
205
|
-
}
|
|
206
|
-
if (process.env.BRAINY_FORCE_FP32 === 'true') {
|
|
207
|
-
return 'fp32';
|
|
208
|
-
}
|
|
209
|
-
// Default to Q8 - optimal for most use cases
|
|
210
|
-
return 'q8';
|
|
211
|
-
}
|
|
212
207
|
/**
|
|
213
208
|
* Get models directory path
|
|
214
209
|
*/
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* Custom error types for Brainy operations
|
|
3
3
|
* Provides better error classification and handling
|
|
4
4
|
*/
|
|
5
|
-
export type BrainyErrorType = 'TIMEOUT' | 'NETWORK' | 'STORAGE' | 'NOT_FOUND' | 'RETRY_EXHAUSTED';
|
|
5
|
+
export type BrainyErrorType = 'TIMEOUT' | 'NETWORK' | 'STORAGE' | 'NOT_FOUND' | 'RETRY_EXHAUSTED' | 'VALIDATION';
|
|
6
6
|
/**
|
|
7
7
|
* Custom error class for Brainy operations
|
|
8
8
|
* Provides error type classification and retry information
|
|
@@ -34,6 +34,10 @@ export declare class BrainyError extends Error {
|
|
|
34
34
|
* Create a retry exhausted error
|
|
35
35
|
*/
|
|
36
36
|
static retryExhausted(operation: string, maxRetries: number, lastError?: Error): BrainyError;
|
|
37
|
+
/**
|
|
38
|
+
* Create a validation error
|
|
39
|
+
*/
|
|
40
|
+
static validation(parameter: string, constraint: string, value?: any): BrainyError;
|
|
37
41
|
/**
|
|
38
42
|
* Check if an error is retryable
|
|
39
43
|
*/
|
|
@@ -50,6 +50,12 @@ export class BrainyError extends Error {
|
|
|
50
50
|
static retryExhausted(operation, maxRetries, lastError) {
|
|
51
51
|
return new BrainyError(`Operation '${operation}' failed after ${maxRetries} retry attempts`, 'RETRY_EXHAUSTED', false, lastError, maxRetries, maxRetries);
|
|
52
52
|
}
|
|
53
|
+
/**
|
|
54
|
+
* Create a validation error
|
|
55
|
+
*/
|
|
56
|
+
static validation(parameter, constraint, value) {
|
|
57
|
+
return new BrainyError(`Invalid ${parameter}: ${constraint}`, 'VALIDATION', false);
|
|
58
|
+
}
|
|
53
59
|
/**
|
|
54
60
|
* Check if an error is retryable
|
|
55
61
|
*/
|
|
@@ -106,6 +112,12 @@ export class BrainyError extends Error {
|
|
|
106
112
|
message.includes('does not exist')) {
|
|
107
113
|
return BrainyError.notFound(operation || 'resource');
|
|
108
114
|
}
|
|
115
|
+
if (message.includes('invalid') ||
|
|
116
|
+
message.includes('validation') ||
|
|
117
|
+
message.includes('cannot be null') ||
|
|
118
|
+
message.includes('must be')) {
|
|
119
|
+
return new BrainyError(error.message, 'VALIDATION', false, error);
|
|
120
|
+
}
|
|
109
121
|
// Default to storage error for unclassified errors
|
|
110
122
|
return BrainyError.storage(error.message, error);
|
|
111
123
|
}
|