@soulcraft/brainy 0.60.0 → 0.61.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -7,9 +7,10 @@
7
7
  [![Node.js](https://img.shields.io/badge/node-%3E%3D24.4.1-brightgreen.svg)](https://nodejs.org/)
8
8
  [![TypeScript](https://img.shields.io/badge/TypeScript-5.4.5-blue.svg)](https://www.typescriptlang.org/)
9
9
 
10
- # BRAINY: The Brain in a Jar Database™
10
+ # BRAINY: Multi-Dimensional AI Database™
11
11
 
12
- **The world's only Vector + Graph + AI database and realtime data platform**
12
+ **The world's first Multi-Dimensional AI Database**
13
+ *Vector similarity • Graph relationships • Metadata facets • AI context*
13
14
 
14
15
  *Zero-to-Smart™ technology that thinks so you don't have to*
15
16
 
@@ -48,13 +49,13 @@ const results = await brainy.search("AI language models", 5, {
48
49
  Pinecone ($$$) + Neo4j ($$$) + Elasticsearch ($$$) + Sync Hell = 😱
49
50
  ```
50
51
 
51
- ### ✅ The Brainy Way: One Smart Brain
52
+ ### ✅ The Brainy Way: One Multi-Dimensional Brain
52
53
 
53
54
  ```
54
- Vector Search + Graph Relations + Metadata Filtering + AI Intelligence = 🧠✨
55
+ Multi-Dimensional AI Database = Vector + Graph + Facets + AI = 🧠✨
55
56
  ```
56
57
 
57
- **Your data gets a brain upgrade. No assembly required.**
58
+ **Your data gets a multi-dimensional brain upgrade. No assembly required.**
58
59
 
59
60
  ## ⚡ QUICK & EASY: From Zero to Smart in 60 Seconds
60
61
 
@@ -458,6 +459,7 @@ brainy augment trial notion # Start 14-day free trial
458
459
 
459
460
  ### Advanced Topics
460
461
 
462
+ - [**🏗️ Storage & Retrieval Architecture**](docs/technical/STORAGE_AND_RETRIEVAL_ARCHITECTURE.md) - Multi-dimensional database internals
461
463
  - [**Brainy CLI**](docs/brainy-cli.md) - Command-line superpowers
462
464
  - [**Brainy Chat**](BRAINY-CHAT.md) - Conversational AI interface
463
465
  - [**Cortex AI**](CORTEX.md) - Intelligence augmentation
package/bin/brainy.js CHANGED
@@ -13,6 +13,7 @@ import chalk from 'chalk'
13
13
  import { readFileSync } from 'fs'
14
14
  import { dirname, join } from 'path'
15
15
  import { fileURLToPath } from 'url'
16
+ import { createInterface } from 'readline'
16
17
 
17
18
  // Use native fetch (available in Node.js 18+)
18
19
 
@@ -57,7 +58,7 @@ const wrapInteractive = (fn) => {
57
58
 
58
59
  program
59
60
  .name('brainy')
60
- .description('🧠 Brainy - Vector + Graph Database with AI Coordination')
61
+ .description('🧠 Brainy - Multi-Dimensional AI Database')
61
62
  .version(packageJson.version)
62
63
 
63
64
  // ========================================
@@ -66,7 +67,7 @@ program
66
67
 
67
68
  program
68
69
  .command('init')
69
- .description('🚀 Initialize Brainy in your project')
70
+ .description('Initialize Brainy in your project')
70
71
  .option('-s, --storage <type>', 'Storage type (filesystem, s3, r2, gcs, memory)')
71
72
  .option('-e, --encryption', 'Enable encryption for secrets')
72
73
  .action(wrapAction(async (options) => {
@@ -75,8 +76,8 @@ program
75
76
 
76
77
  program
77
78
  .command('add [data]')
78
- .description('📊 Add data to Brainy')
79
- .option('-m, --metadata <json>', 'Metadata as JSON')
79
+ .description('Add data across multiple dimensions (vector, graph, facets)')
80
+ .option('-m, --metadata <json>', 'Metadata facets as JSON')
80
81
  .option('-i, --id <id>', 'Custom ID')
81
82
  .action(wrapAction(async (data, options) => {
82
83
  let metadata = {}
@@ -96,11 +97,11 @@ program
96
97
 
97
98
  program
98
99
  .command('search <query>')
99
- .description('🔍 Search your database')
100
+ .description('Multi-dimensional search across vector, graph, and facets')
100
101
  .option('-l, --limit <number>', 'Number of results', '10')
101
- .option('-f, --filter <json>', 'MongoDB-style metadata filters')
102
- .option('-v, --verbs <types>', 'Graph verb types to traverse (comma-separated)')
103
- .option('-d, --depth <number>', 'Graph traversal depth', '1')
102
+ .option('-f, --filter <json>', 'Filter by metadata facets')
103
+ .option('-v, --verbs <types>', 'Include related data (comma-separated)')
104
+ .option('-d, --depth <number>', 'Relationship depth', '1')
104
105
  .action(wrapAction(async (query, options) => {
105
106
  const searchOptions = { limit: parseInt(options.limit) }
106
107
 
@@ -123,7 +124,7 @@ program
123
124
 
124
125
  program
125
126
  .command('chat [question]')
126
- .description('💬 Chat with your data (interactive mode if no question)')
127
+ .description('AI-powered chat with multi-dimensional context')
127
128
  .option('-l, --llm <model>', 'LLM model to use')
128
129
  .action(wrapInteractive(async (question, options) => {
129
130
  await cortex.chat(question)
@@ -131,7 +132,7 @@ program
131
132
 
132
133
  program
133
134
  .command('stats')
134
- .description('📊 Show database statistics')
135
+ .description('Show database statistics and insights')
135
136
  .option('-d, --detailed', 'Show detailed statistics')
136
137
  .action(wrapAction(async (options) => {
137
138
  await cortex.stats(options.detailed)
@@ -139,7 +140,7 @@ program
139
140
 
140
141
  program
141
142
  .command('health')
142
- .description('🔋 Check system health')
143
+ .description('Check system health')
143
144
  .option('--auto-fix', 'Automatically apply safe repairs')
144
145
  .action(wrapAction(async (options) => {
145
146
  await cortex.health(options)
@@ -147,21 +148,21 @@ program
147
148
 
148
149
  program
149
150
  .command('find')
150
- .description('🔍 Interactive advanced search')
151
+ .description('Advanced intelligent search (interactive)')
151
152
  .action(wrapInteractive(async () => {
152
153
  await cortex.advancedSearch()
153
154
  }))
154
155
 
155
156
  program
156
157
  .command('explore [nodeId]')
157
- .description('🗺️ Interactively explore graph connections')
158
+ .description('Explore data relationships interactively')
158
159
  .action(wrapInteractive(async (nodeId) => {
159
160
  await cortex.explore(nodeId)
160
161
  }))
161
162
 
162
163
  program
163
164
  .command('backup')
164
- .description('💾 Create database backup')
165
+ .description('Create database backup')
165
166
  .option('-c, --compress', 'Compress backup')
166
167
  .option('-o, --output <file>', 'Output file')
167
168
  .action(wrapAction(async (options) => {
@@ -170,7 +171,7 @@ program
170
171
 
171
172
  program
172
173
  .command('restore <file>')
173
- .description('♻️ Restore from backup')
174
+ .description('Restore from backup')
174
175
  .action(wrapInteractive(async (file) => {
175
176
  await cortex.restore(file)
176
177
  }))
@@ -181,35 +182,30 @@ program
181
182
 
182
183
  program
183
184
  .command('connect')
184
- .description('🔗 Connect me to your Brain Cloud so I remember everything')
185
+ .description('Connect to Brain Cloud for AI memory')
185
186
  .action(wrapInteractive(async () => {
186
- console.log(chalk.cyan('\n🧠 Setting Up AI Memory...'))
187
- console.log(chalk.gray('━'.repeat(50)))
187
+ console.log(chalk.cyan('\n🧠 Brain Cloud Setup'))
188
+ console.log(chalk.gray('━'.repeat(40)))
188
189
 
189
190
  try {
190
191
  // Detect customer ID
191
192
  const customerId = await detectCustomerId()
192
193
 
193
194
  if (customerId) {
194
- console.log(chalk.green(`✅ Found your Brain Cloud: ${customerId}`))
195
- console.log('\n🔧 I can set up AI memory so I remember our conversations:')
196
- console.log(chalk.yellow(' • Update Claude configuration'))
195
+ console.log(chalk.green(`✅ Found Brain Cloud: ${customerId}`))
196
+ console.log('\n🔧 Setting up AI memory:')
197
+ console.log(chalk.yellow(' • Update configuration'))
197
198
  console.log(chalk.yellow(' • Add memory instructions'))
198
199
  console.log(chalk.yellow(' • Enable cross-session memory'))
199
200
 
200
- // For now, auto-proceed (in a real CLI environment, user could be prompted)
201
- console.log(chalk.cyan('\n🚀 Setting up AI memory...'))
202
- const proceed = true
203
-
204
- if (proceed) {
205
- await setupBrainCloudMemory(customerId)
206
- console.log(chalk.green('\n🎉 AI Memory Connected!'))
207
- console.log(chalk.cyan('Restart Claude Code and I\'ll remember everything!'))
208
- }
201
+ console.log(chalk.cyan('\n🚀 Configuring...'))
202
+ await setupBrainCloudMemory(customerId)
203
+ console.log(chalk.green('\n✅ AI memory connected!'))
204
+ console.log(chalk.cyan('Restart Claude Code to activate memory.'))
209
205
  } else {
210
- console.log(chalk.yellow('🤔 No Brain Cloud found. Let me help you set one up:'))
211
- console.log('\n1. Visit: ' + chalk.cyan('https://app.soulcraftlabs.com'))
212
- console.log('2. Sign up for Brain Cloud ($19/month)')
206
+ console.log(chalk.yellow('No Brain Cloud found. Setting up:'))
207
+ console.log('\n1. Visit: ' + chalk.cyan('https://soulcraft.com'))
208
+ console.log('2. Sign up for Brain Cloud')
213
209
  console.log('3. Run ' + chalk.green('brainy connect') + ' again')
214
210
  }
215
211
  } catch (error) {
@@ -219,7 +215,7 @@ program
219
215
 
220
216
  program
221
217
  .command('cloud [action]')
222
- .description('☁️ Connect to Brain Cloud - AI memory that never forgets')
218
+ .description('Manage Brain Cloud connection')
223
219
  .option('--connect <id>', 'Connect to existing Brain Cloud instance')
224
220
  .option('--export <id>', 'Export all data from Brain Cloud instance')
225
221
  .option('--status <id>', 'Check status of Brain Cloud instance')
@@ -363,7 +359,7 @@ program
363
359
 
364
360
  program
365
361
  .command('install <augmentation>')
366
- .description('📦 Install augmentation')
362
+ .description('Install augmentation')
367
363
  .option('-m, --mode <type>', 'Installation mode (free|premium)', 'free')
368
364
  .option('-c, --config <json>', 'Configuration as JSON')
369
365
  .action(wrapAction(async (augmentation, options) => {
@@ -386,7 +382,7 @@ program
386
382
 
387
383
  program
388
384
  .command('run <augmentation>')
389
- .description('Run augmentation')
385
+ .description('Run augmentation')
390
386
  .option('-c, --config <json>', 'Runtime configuration as JSON')
391
387
  .action(wrapAction(async (augmentation, options) => {
392
388
  if (augmentation === 'brain-jar') {
@@ -400,7 +396,7 @@ program
400
396
 
401
397
  program
402
398
  .command('status [augmentation]')
403
- .description('📊 Show augmentation status')
399
+ .description('Show augmentation status')
404
400
  .action(wrapAction(async (augmentation) => {
405
401
  if (augmentation === 'brain-jar') {
406
402
  await cortex.brainJarStatus()
@@ -415,7 +411,7 @@ program
415
411
 
416
412
  program
417
413
  .command('stop [augmentation]')
418
- .description('⏹️ Stop augmentation')
414
+ .description('Stop augmentation')
419
415
  .action(wrapAction(async (augmentation) => {
420
416
  if (augmentation === 'brain-jar') {
421
417
  await cortex.brainJarStop()
@@ -426,11 +422,11 @@ program
426
422
 
427
423
  program
428
424
  .command('list')
429
- .description('📋 List installed augmentations')
425
+ .description('List installed augmentations')
430
426
  .option('-a, --available', 'Show available augmentations')
431
427
  .action(wrapAction(async (options) => {
432
428
  if (options.available) {
433
- console.log(chalk.cyan('🧩 Available Augmentations:'))
429
+ console.log(chalk.cyan('Available Augmentations:'))
434
430
  console.log(' • brain-jar - AI coordination and collaboration')
435
431
  console.log(' • encryption - Data encryption and security')
436
432
  console.log(' • neural-import - AI-powered data analysis')
@@ -442,30 +438,17 @@ program
442
438
  }
443
439
  }))
444
440
 
445
- // ========================================
446
- // BRAIN CLOUD SUPER COMMAND (New!)
447
- // ========================================
448
-
449
- program
450
- .command('cloud')
451
- .description('☁️ Setup Brain Cloud - AI coordination across all devices')
452
- .option('-m, --mode <type>', 'Setup mode (free|premium)', 'interactive')
453
- .option('-k, --key <key>', 'License key for premium features')
454
- .option('-s, --skip-install', 'Skip Brain Jar installation')
455
- .action(wrapInteractive(async (options) => {
456
- await cortex.setupBrainCloud(options)
457
- }))
458
441
 
459
442
  // ========================================
460
443
  // BRAIN JAR SPECIFIC COMMANDS (Rich UX)
461
444
  // ========================================
462
445
 
463
446
  const brainJar = program.command('brain-jar')
464
- .description('🧠🫙 AI coordination and collaboration')
447
+ .description('AI coordination and collaboration')
465
448
 
466
449
  brainJar
467
450
  .command('install')
468
- .description('📦 Install Brain Jar coordination')
451
+ .description('Install Brain Jar coordination')
469
452
  .option('-m, --mode <type>', 'Installation mode (free|premium)', 'free')
470
453
  .action(wrapAction(async (options) => {
471
454
  await cortex.brainJarInstall(options.mode)
@@ -473,7 +456,7 @@ brainJar
473
456
 
474
457
  brainJar
475
458
  .command('start')
476
- .description('🚀 Start Brain Jar coordination')
459
+ .description('Start Brain Jar coordination')
477
460
  .option('-s, --server <url>', 'Custom server URL')
478
461
  .option('-n, --name <name>', 'Agent name')
479
462
  .option('-r, --role <role>', 'Agent role')
@@ -483,7 +466,7 @@ brainJar
483
466
 
484
467
  brainJar
485
468
  .command('dashboard')
486
- .description('📊 Open Brain Jar dashboard')
469
+ .description('Open Brain Jar dashboard')
487
470
  .option('-o, --open', 'Auto-open in browser', true)
488
471
  .action(wrapAction(async (options) => {
489
472
  await cortex.brainJarDashboard(options.open)
@@ -491,28 +474,28 @@ brainJar
491
474
 
492
475
  brainJar
493
476
  .command('status')
494
- .description('🔍 Show Brain Jar status')
477
+ .description('Show Brain Jar status')
495
478
  .action(wrapAction(async () => {
496
479
  await cortex.brainJarStatus()
497
480
  }))
498
481
 
499
482
  brainJar
500
483
  .command('agents')
501
- .description('👥 List connected agents')
484
+ .description('List connected agents')
502
485
  .action(wrapAction(async () => {
503
486
  await cortex.brainJarAgents()
504
487
  }))
505
488
 
506
489
  brainJar
507
490
  .command('message <text>')
508
- .description('📨 Send message to coordination channel')
491
+ .description('Send message to coordination channel')
509
492
  .action(wrapAction(async (text) => {
510
493
  await cortex.brainJarMessage(text)
511
494
  }))
512
495
 
513
496
  brainJar
514
497
  .command('search <query>')
515
- .description('🔍 Search coordination history')
498
+ .description('Search coordination history')
516
499
  .option('-l, --limit <number>', 'Number of results', '10')
517
500
  .action(wrapAction(async (query, options) => {
518
501
  await cortex.brainJarSearch(query, parseInt(options.limit))
@@ -523,7 +506,7 @@ brainJar
523
506
  // ========================================
524
507
 
525
508
  const config = program.command('config')
526
- .description('⚙️ Manage configuration')
509
+ .description('Manage configuration')
527
510
 
528
511
  config
529
512
  .command('set <key> <value>')
@@ -557,11 +540,11 @@ config
557
540
  // ========================================
558
541
 
559
542
  const cortexCmd = program.command('cortex')
560
- .description('🔧 Legacy Cortex commands (deprecated - use direct commands)')
543
+ .description('Legacy Cortex commands (deprecated - use direct commands)')
561
544
 
562
545
  cortexCmd
563
546
  .command('chat [question]')
564
- .description('💬 Chat with your data')
547
+ .description('Chat with your data')
565
548
  .action(wrapInteractive(async (question) => {
566
549
  console.log(chalk.yellow('⚠️ Deprecated: Use "brainy chat" instead'))
567
550
  await cortex.chat(question)
@@ -569,7 +552,7 @@ cortexCmd
569
552
 
570
553
  cortexCmd
571
554
  .command('add [data]')
572
- .description('📊 Add data')
555
+ .description('Add data')
573
556
  .action(wrapAction(async (data) => {
574
557
  console.log(chalk.yellow('⚠️ Deprecated: Use "brainy add" instead'))
575
558
  await cortex.add(data, {})
@@ -581,7 +564,7 @@ cortexCmd
581
564
 
582
565
  program
583
566
  .command('shell')
584
- .description('🐚 Interactive Brainy shell')
567
+ .description('Interactive Brainy shell')
585
568
  .action(wrapInteractive(async () => {
586
569
  console.log(chalk.cyan('🧠 Brainy Interactive Shell'))
587
570
  console.log(chalk.dim('Type "help" for commands, "exit" to quit\n'))
@@ -596,21 +579,24 @@ program.parse(process.argv)
596
579
 
597
580
  // Show help if no command
598
581
  if (!process.argv.slice(2).length) {
599
- console.log(chalk.cyan('🧠☁️ Brainy - AI Coordination Service'))
600
- console.log('')
601
- console.log(chalk.bold('One-Command Setup:'))
602
- console.log(chalk.green(' brainy cloud # Setup Brain Cloud (recommended!)'))
603
- console.log('')
582
+ console.log(chalk.cyan('🧠 Brainy - Multi-Dimensional AI Database'))
583
+ console.log(chalk.gray('Vector similarity, graph relationships, metadata facets, and AI context.\n'))
584
+
604
585
  console.log(chalk.bold('Quick Start:'))
605
586
  console.log(' brainy init # Initialize project')
606
- console.log(' brainy add "some data" # Add data')
607
- console.log(' brainy search "query" # Search data')
608
- console.log(' brainy chat # Chat with data')
587
+ console.log(' brainy add "some data" # Add multi-dimensional data')
588
+ console.log(' brainy search "query" # Search across all dimensions')
589
+ console.log(' brainy chat # AI chat with full context')
590
+ console.log('')
591
+ console.log(chalk.bold('AI Memory:'))
592
+ console.log(chalk.green(' brainy connect # Connect to Brain Cloud'))
593
+ console.log(' brainy cloud --status <id> # Check cloud status')
609
594
  console.log('')
610
595
  console.log(chalk.bold('AI Coordination:'))
611
- console.log(' brainy install brain-jar # Install AI coordination')
596
+ console.log(' brainy install brain-jar # Install coordination')
612
597
  console.log(' brainy brain-jar start # Start coordination')
613
- console.log(' brainy brain-jar dashboard # View dashboard')
598
+ console.log('')
599
+ console.log(chalk.dim('Learn more: https://soulcraft.com'))
614
600
  console.log('')
615
601
  program.outputHelp()
616
602
  }
@@ -129,31 +129,41 @@ class BaseMemoryAugmentation {
129
129
  error: 'Query must be a vector (array of numbers) for vector search'
130
130
  };
131
131
  }
132
- // Get all nodes from storage
133
- const nodes = await this.storage.getAllNouns();
134
- // Calculate distances and prepare results
135
- const results = [];
136
- for (const node of nodes) {
137
- // Skip nodes that don't have a vector
138
- if (!node.vector || !Array.isArray(node.vector)) {
139
- continue;
140
- }
141
- // Get metadata for the node
142
- const metadata = await this.storage.getMetadata(node.id);
143
- // Calculate distance between query vector and node vector
144
- const distance = cosineDistance(queryVector, node.vector);
145
- // Convert distance to similarity score (1 - distance for cosine)
146
- // This way higher scores are better (more similar)
147
- const score = 1 - distance;
148
- results.push({
149
- id: node.id,
150
- score,
151
- data: metadata
132
+ // Process nodes in batches to avoid loading everything into memory
133
+ const allResults = [];
134
+ let hasMore = true;
135
+ let cursor;
136
+ while (hasMore) {
137
+ // Get a batch of nodes
138
+ const batchResult = await this.storage.getNouns({
139
+ pagination: { limit: 100, cursor }
152
140
  });
141
+ // Process this batch
142
+ for (const noun of batchResult.items) {
143
+ // Skip nodes that don't have a vector
144
+ if (!noun.vector || !Array.isArray(noun.vector)) {
145
+ continue;
146
+ }
147
+ // Get metadata for the node
148
+ const metadata = await this.storage.getMetadata(noun.id);
149
+ // Calculate distance between query vector and node vector
150
+ const distance = cosineDistance(queryVector, noun.vector);
151
+ // Convert distance to similarity score (1 - distance for cosine)
152
+ // This way higher scores are better (more similar)
153
+ const score = 1 - distance;
154
+ allResults.push({
155
+ id: noun.id,
156
+ score,
157
+ data: metadata
158
+ });
159
+ }
160
+ // Update pagination state
161
+ hasMore = batchResult.hasMore;
162
+ cursor = batchResult.nextCursor;
153
163
  }
154
164
  // Sort results by score (descending) and take top k
155
- results.sort((a, b) => b.score - a.score);
156
- const topResults = results.slice(0, k);
165
+ allResults.sort((a, b) => b.score - a.score);
166
+ const topResults = allResults.slice(0, k);
157
167
  return {
158
168
  success: true,
159
169
  data: topResults
@@ -736,11 +736,6 @@ export declare class BrainyData<T = any> implements BrainyDataInterface<T> {
736
736
  * @returns Promise<Array<VectorDocument<T> | null>> Array of documents (null for missing IDs)
737
737
  */
738
738
  getBatch(ids: string[]): Promise<Array<VectorDocument<T> | null>>;
739
- /**
740
- * Get all nouns in the database
741
- * @returns Array of vector documents
742
- */
743
- getAllNouns(): Promise<VectorDocument<T>[]>;
744
739
  /**
745
740
  * Get nouns with pagination and filtering
746
741
  * @param options Pagination and filtering options
@@ -830,10 +825,25 @@ export declare class BrainyData<T = any> implements BrainyDataInterface<T> {
830
825
  */
831
826
  getVerb(id: string): Promise<GraphVerb | null>;
832
827
  /**
833
- * Get all verbs
834
- * @returns Array of all verbs
828
+ * Internal performance optimization: intelligently load verbs when beneficial
829
+ * @internal - Used by search, indexing, and caching optimizations
830
+ */
831
+ private _optimizedLoadAllVerbs;
832
+ /**
833
+ * Internal performance optimization: intelligently load nouns when beneficial
834
+ * @internal - Used by search, indexing, and caching optimizations
835
+ */
836
+ private _optimizedLoadAllNouns;
837
+ /**
838
+ * Intelligent decision making for when to preload all data
839
+ * @internal
840
+ */
841
+ private _shouldPreloadAllData;
842
+ /**
843
+ * Estimate if dataset size is reasonable for in-memory loading
844
+ * @internal
835
845
  */
836
- getAllVerbs(): Promise<GraphVerb[]>;
846
+ private _isDatasetSizeReasonable;
837
847
  /**
838
848
  * Get verbs with pagination and filtering
839
849
  * @param options Pagination and filtering options
@@ -2146,26 +2146,8 @@ export class BrainyData {
2146
2146
  }
2147
2147
  return results;
2148
2148
  }
2149
- /**
2150
- * Get all nouns in the database
2151
- * @returns Array of vector documents
2152
- */
2153
- async getAllNouns() {
2154
- await this.ensureInitialized();
2155
- try {
2156
- // Use getNouns with no pagination to get all nouns
2157
- const result = await this.getNouns({
2158
- pagination: {
2159
- limit: Number.MAX_SAFE_INTEGER // Request all nouns
2160
- }
2161
- });
2162
- return result.items;
2163
- }
2164
- catch (error) {
2165
- console.error('Failed to get all nouns:', error);
2166
- throw new Error(`Failed to get all nouns: ${error}`);
2167
- }
2168
- }
2149
+ // getAllNouns() method removed - use getNouns() with pagination instead
2150
+ // This method was dangerous and could cause expensive scans and memory issues
2169
2151
  /**
2170
2152
  * Get nouns with pagination and filtering
2171
2153
  * @param options Pagination and filtering options
@@ -2725,7 +2707,7 @@ export class BrainyData {
2725
2707
  const scores = await this.intelligentVerbScoring.computeVerbScores(sourceId, targetId, verbType, options.weight, options.metadata);
2726
2708
  finalWeight = scores.weight;
2727
2709
  finalConfidence = scores.confidence;
2728
- scoringReasoning = scores.reasoning;
2710
+ scoringReasoning = scores.reasoning || [];
2729
2711
  if (this.loggingConfig?.verbose && scoringReasoning.length > 0) {
2730
2712
  console.log(`Intelligent verb scoring for ${sourceId}-${verbType}-${targetId}:`, scoringReasoning);
2731
2713
  }
@@ -2748,8 +2730,8 @@ export class BrainyData {
2748
2730
  type: verbType, // Set the type property to match the verb type
2749
2731
  weight: finalWeight,
2750
2732
  confidence: finalConfidence, // Add confidence to metadata
2751
- intelligentScoring: scoringReasoning.length > 0 ? {
2752
- reasoning: scoringReasoning,
2733
+ intelligentScoring: this.intelligentVerbScoring?.enabled ? {
2734
+ reasoning: scoringReasoning.length > 0 ? scoringReasoning : [`Final weight ${finalWeight}`, `Base confidence ${finalConfidence || 0.5}`],
2753
2735
  computedAt: new Date().toISOString()
2754
2736
  } : undefined,
2755
2737
  createdAt: timestamp,
@@ -2851,7 +2833,12 @@ export class BrainyData {
2851
2833
  updatedAt: metadata.updatedAt,
2852
2834
  createdBy: metadata.createdBy,
2853
2835
  data: metadata.data,
2854
- metadata: metadata.data // Alias for backward compatibility
2836
+ metadata: {
2837
+ ...metadata.data,
2838
+ weight: metadata.weight,
2839
+ confidence: metadata.confidence,
2840
+ ...(metadata.intelligentScoring && { intelligentScoring: metadata.intelligentScoring })
2841
+ } // Complete metadata including intelligent scoring when available
2855
2842
  };
2856
2843
  return graphVerb;
2857
2844
  }
@@ -2861,47 +2848,94 @@ export class BrainyData {
2861
2848
  }
2862
2849
  }
2863
2850
  /**
2864
- * Get all verbs
2865
- * @returns Array of all verbs
2851
+ * Internal performance optimization: intelligently load verbs when beneficial
2852
+ * @internal - Used by search, indexing, and caching optimizations
2866
2853
  */
2867
- async getAllVerbs() {
2868
- await this.ensureInitialized();
2869
- try {
2870
- // Get all lightweight verbs from storage
2871
- const hnswVerbs = await this.storage.getAllVerbs();
2872
- // Convert each HNSWVerb to GraphVerb by loading metadata
2873
- const graphVerbs = [];
2874
- for (const hnswVerb of hnswVerbs) {
2875
- const metadata = await this.storage.getVerbMetadata(hnswVerb.id);
2876
- if (metadata) {
2877
- const graphVerb = {
2878
- id: hnswVerb.id,
2879
- vector: hnswVerb.vector,
2880
- sourceId: metadata.sourceId,
2881
- targetId: metadata.targetId,
2882
- source: metadata.source,
2883
- target: metadata.target,
2884
- verb: metadata.verb,
2885
- type: metadata.type,
2886
- weight: metadata.weight,
2887
- createdAt: metadata.createdAt,
2888
- updatedAt: metadata.updatedAt,
2889
- createdBy: metadata.createdBy,
2890
- data: metadata.data,
2891
- metadata: metadata.data // Alias for backward compatibility
2892
- };
2893
- graphVerbs.push(graphVerb);
2894
- }
2895
- else {
2896
- console.warn(`Verb ${hnswVerb.id} found but no metadata - skipping`);
2897
- }
2854
+ async _optimizedLoadAllVerbs() {
2855
+ // Only load all if it's safe and beneficial
2856
+ if (await this._shouldPreloadAllData()) {
2857
+ const result = await this.getVerbs({
2858
+ pagination: { limit: Number.MAX_SAFE_INTEGER }
2859
+ });
2860
+ return result.items;
2861
+ }
2862
+ // Fall back to on-demand loading
2863
+ return [];
2864
+ }
2865
+ /**
2866
+ * Internal performance optimization: intelligently load nouns when beneficial
2867
+ * @internal - Used by search, indexing, and caching optimizations
2868
+ */
2869
+ async _optimizedLoadAllNouns() {
2870
+ // Only load all if it's safe and beneficial
2871
+ if (await this._shouldPreloadAllData()) {
2872
+ const result = await this.getNouns({
2873
+ pagination: { limit: Number.MAX_SAFE_INTEGER }
2874
+ });
2875
+ return result.items;
2876
+ }
2877
+ // Fall back to on-demand loading
2878
+ return [];
2879
+ }
2880
+ /**
2881
+ * Intelligent decision making for when to preload all data
2882
+ * @internal
2883
+ */
2884
+ async _shouldPreloadAllData() {
2885
+ // Smart heuristics for performance optimization
2886
+ // 1. Read-only mode is ideal for preloading
2887
+ if (this.readOnly) {
2888
+ return await this._isDatasetSizeReasonable();
2889
+ }
2890
+ // 2. Check available memory (Node.js)
2891
+ if (typeof process !== 'undefined' && process.memoryUsage) {
2892
+ const memUsage = process.memoryUsage();
2893
+ const availableMemory = memUsage.heapTotal - memUsage.heapUsed;
2894
+ const memoryMB = availableMemory / (1024 * 1024);
2895
+ // Only preload if we have substantial free memory (>500MB)
2896
+ if (memoryMB < 500) {
2897
+ console.debug('Performance optimization: Skipping preload due to low memory');
2898
+ return false;
2898
2899
  }
2899
- return graphVerbs;
2900
2900
  }
2901
- catch (error) {
2902
- console.error('Failed to get all verbs:', error);
2903
- throw new Error(`Failed to get all verbs: ${error}`);
2901
+ // 3. Consider frozen/immutable mode
2902
+ if (this.frozen) {
2903
+ return await this._isDatasetSizeReasonable();
2904
2904
  }
2905
+ // 4. For frequent search operations, preloading can be beneficial
2906
+ // TODO: Track search frequency and decide based on access patterns
2907
+ return false; // Conservative default for write-heavy workloads
2908
+ }
2909
+ /**
2910
+ * Estimate if dataset size is reasonable for in-memory loading
2911
+ * @internal
2912
+ */
2913
+ async _isDatasetSizeReasonable() {
2914
+ // Implement basic size estimation
2915
+ // Check if we have recent statistics
2916
+ const stats = await this.getStatistics();
2917
+ if (stats) {
2918
+ const totalEntities = Object.values(stats.nounCount || {}).reduce((a, b) => a + b, 0) +
2919
+ Object.values(stats.verbCount || {}).reduce((a, b) => a + b, 0);
2920
+ // Conservative thresholds
2921
+ if (totalEntities > 100000) {
2922
+ console.debug('Performance optimization: Dataset too large for preloading');
2923
+ return false;
2924
+ }
2925
+ if (totalEntities < 10000) {
2926
+ console.debug('Performance optimization: Small dataset - safe to preload');
2927
+ return true;
2928
+ }
2929
+ }
2930
+ // Medium datasets - check memory pressure
2931
+ if (typeof process !== 'undefined' && process.memoryUsage) {
2932
+ const memUsage = process.memoryUsage();
2933
+ const heapUsedPercent = (memUsage.heapUsed / memUsage.heapTotal) * 100;
2934
+ // Only preload if heap usage is low
2935
+ return heapUsedPercent < 50;
2936
+ }
2937
+ // Default: conservative approach
2938
+ return false;
2905
2939
  }
2906
2940
  /**
2907
2941
  * Get verbs with pagination and filtering
@@ -3679,19 +3713,40 @@ export class BrainyData {
3679
3713
  }
3680
3714
  // First use the HNSW index to find similar vectors efficiently
3681
3715
  const searchResults = await this.index.search(queryVector, k * 2);
3682
- // Get all verbs for filtering
3683
- const allVerbs = await this.getAllVerbs();
3684
- // Create a map of verb IDs for faster lookup
3685
- const verbMap = new Map();
3686
- for (const verb of allVerbs) {
3687
- verbMap.set(verb.id, verb);
3716
+ // Intelligent verb loading: preload all if beneficial, otherwise on-demand
3717
+ let verbMap = null;
3718
+ let usePreloadedVerbs = false;
3719
+ // Try to intelligently preload verbs for performance
3720
+ const preloadedVerbs = await this._optimizedLoadAllVerbs();
3721
+ if (preloadedVerbs.length > 0) {
3722
+ verbMap = new Map();
3723
+ for (const verb of preloadedVerbs) {
3724
+ verbMap.set(verb.id, verb);
3725
+ }
3726
+ usePreloadedVerbs = true;
3727
+ console.debug(`Performance optimization: Preloaded ${preloadedVerbs.length} verbs for fast lookup`);
3688
3728
  }
3729
+ // Fallback: on-demand verb loading function
3730
+ const getVerbById = async (verbId) => {
3731
+ if (usePreloadedVerbs && verbMap) {
3732
+ return verbMap.get(verbId) || null;
3733
+ }
3734
+ try {
3735
+ const verb = await this.getVerb(verbId);
3736
+ return verb;
3737
+ }
3738
+ catch (error) {
3739
+ console.warn(`Failed to load verb ${verbId}:`, error);
3740
+ return null;
3741
+ }
3742
+ };
3689
3743
  // Filter search results to only include verbs
3690
3744
  const verbResults = [];
3745
+ // Process search results and load verbs on-demand
3691
3746
  for (const result of searchResults) {
3692
3747
  // Search results are [id, distance] tuples
3693
3748
  const [id, distance] = result;
3694
- const verb = verbMap.get(id);
3749
+ const verb = await getVerbById(id);
3695
3750
  if (verb) {
3696
3751
  // If verb types are specified, check if this verb matches
3697
3752
  if (options.verbTypes && options.verbTypes.length > 0) {
@@ -3721,8 +3776,11 @@ export class BrainyData {
3721
3776
  }
3722
3777
  }
3723
3778
  else {
3724
- // Use all verbs
3725
- verbs = allVerbs;
3779
+ // Get all verbs with pagination
3780
+ const allVerbsResult = await this.getVerbs({
3781
+ pagination: { limit: 10000 }
3782
+ });
3783
+ verbs = allVerbsResult.items;
3726
3784
  }
3727
3785
  // Calculate similarity for each verb not already in results
3728
3786
  const existingIds = new Set(verbResults.map((v) => v.id));
@@ -4248,10 +4306,18 @@ export class BrainyData {
4248
4306
  async backup() {
4249
4307
  await this.ensureInitialized();
4250
4308
  try {
4251
- // Get all nouns
4252
- const nouns = await this.getAllNouns();
4253
- // Get all verbs
4254
- const verbs = await this.getAllVerbs();
4309
+ // Use intelligent loading for backup - this is a legitimate use case for full export
4310
+ console.log('Creating backup - loading all data...');
4311
+ // For backup, we legitimately need all data, so use large pagination
4312
+ const nounsResult = await this.getNouns({
4313
+ pagination: { limit: Number.MAX_SAFE_INTEGER }
4314
+ });
4315
+ const nouns = nounsResult.items;
4316
+ const verbsResult = await this.getVerbs({
4317
+ pagination: { limit: Number.MAX_SAFE_INTEGER }
4318
+ });
4319
+ const verbs = verbsResult.items;
4320
+ console.log(`Backup: Loaded ${nouns.length} nouns and ${verbs.length} verbs`);
4255
4321
  // Get all noun types
4256
4322
  const nounTypes = Object.values(NounType);
4257
4323
  // Get all verb types
@@ -512,16 +512,4 @@ export interface StorageAdapter {
512
512
  * @returns Promise that resolves to an array of changes
513
513
  */
514
514
  getChangesSince?(timestamp: number, limit?: number): Promise<any[]>;
515
- /**
516
- * Get all nouns from storage
517
- * @returns Promise that resolves to an array of all nouns
518
- * @deprecated This method loads all data into memory and may cause performance issues. Use getNouns() with pagination instead.
519
- */
520
- getAllNouns(): Promise<HNSWNoun[]>;
521
- /**
522
- * Get all verbs from storage
523
- * @returns Promise that resolves to an array of all HNSWVerbs
524
- * @deprecated This method loads all data into memory and may cause performance issues. Use getVerbs() with pagination instead.
525
- */
526
- getAllVerbs(): Promise<HNSWVerb[]>;
527
515
  }
@@ -29,18 +29,6 @@ export declare abstract class BaseStorageAdapter implements StorageAdapter {
29
29
  quota: number | null;
30
30
  details?: Record<string, any>;
31
31
  }>;
32
- /**
33
- * Get all nouns from storage
34
- * @returns Promise that resolves to an array of all nouns
35
- * @deprecated This method loads all data into memory and may cause performance issues. Use getNouns() with pagination instead.
36
- */
37
- abstract getAllNouns(): Promise<any[]>;
38
- /**
39
- * Get all verbs from storage
40
- * @returns Promise that resolves to an array of all HNSWVerbs
41
- * @deprecated This method loads all data into memory and may cause performance issues. Use getVerbs() with pagination instead.
42
- */
43
- abstract getAllVerbs(): Promise<any[]>;
44
32
  /**
45
33
  * Get nouns with pagination and filtering
46
34
  * @param options Pagination and filtering options
@@ -146,7 +146,7 @@ export class OPFSStorage extends BaseStorage {
146
146
  connections: this.mapToObject(noun.connections, (set) => Array.from(set))
147
147
  };
148
148
  // Create or get the file for this noun
149
- const fileHandle = await this.nounsDir.getFileHandle(noun.id, {
149
+ const fileHandle = await this.nounsDir.getFileHandle(`${noun.id}.json`, {
150
150
  create: true
151
151
  });
152
152
  // Write the noun data to the file
@@ -166,7 +166,7 @@ export class OPFSStorage extends BaseStorage {
166
166
  await this.ensureInitialized();
167
167
  try {
168
168
  // Get the file handle for this noun
169
- const fileHandle = await this.nounsDir.getFileHandle(id);
169
+ const fileHandle = await this.nounsDir.getFileHandle(`${id}.json`);
170
170
  // Read the noun data from the file
171
171
  const file = await fileHandle.getFile();
172
172
  const text = await file.text();
@@ -253,7 +253,7 @@ export class OPFSStorage extends BaseStorage {
253
253
  async deleteNode(id) {
254
254
  await this.ensureInitialized();
255
255
  try {
256
- await this.nounsDir.removeEntry(id);
256
+ await this.nounsDir.removeEntry(`${id}.json`);
257
257
  }
258
258
  catch (error) {
259
259
  // Ignore NotFoundError, which means the file doesn't exist
@@ -281,7 +281,7 @@ export class OPFSStorage extends BaseStorage {
281
281
  connections: this.mapToObject(edge.connections, (set) => Array.from(set))
282
282
  };
283
283
  // Create or get the file for this verb
284
- const fileHandle = await this.verbsDir.getFileHandle(edge.id, {
284
+ const fileHandle = await this.verbsDir.getFileHandle(`${edge.id}.json`, {
285
285
  create: true
286
286
  });
287
287
  // Write the verb data to the file
@@ -307,7 +307,7 @@ export class OPFSStorage extends BaseStorage {
307
307
  await this.ensureInitialized();
308
308
  try {
309
309
  // Get the file handle for this edge
310
- const fileHandle = await this.verbsDir.getFileHandle(id);
310
+ const fileHandle = await this.verbsDir.getFileHandle(`${id}.json`);
311
311
  // Read the edge data from the file
312
312
  const file = await fileHandle.getFile();
313
313
  const text = await file.text();
@@ -389,10 +389,12 @@ export class OPFSStorage extends BaseStorage {
389
389
  * Get verbs by source (internal implementation)
390
390
  */
391
391
  async getVerbsBySource_internal(sourceId) {
392
- // This method is deprecated and would require loading metadata for each edge
393
- // For now, return empty array since this is not efficiently implementable with new storage pattern
394
- console.warn('getVerbsBySource_internal is deprecated and not efficiently supported in new storage pattern');
395
- return [];
392
+ // Use the paginated approach to properly handle HNSWVerb to GraphVerb conversion
393
+ const result = await this.getVerbsWithPagination({
394
+ filter: { sourceId: [sourceId] },
395
+ limit: Number.MAX_SAFE_INTEGER // Get all matching results
396
+ });
397
+ return result.items;
396
398
  }
397
399
  /**
398
400
  * Get edges by source
@@ -407,10 +409,12 @@ export class OPFSStorage extends BaseStorage {
407
409
  * Get verbs by target (internal implementation)
408
410
  */
409
411
  async getVerbsByTarget_internal(targetId) {
410
- // This method is deprecated and would require loading metadata for each edge
411
- // For now, return empty array since this is not efficiently implementable with new storage pattern
412
- console.warn('getVerbsByTarget_internal is deprecated and not efficiently supported in new storage pattern');
413
- return [];
412
+ // Use the paginated approach to properly handle HNSWVerb to GraphVerb conversion
413
+ const result = await this.getVerbsWithPagination({
414
+ filter: { targetId: [targetId] },
415
+ limit: Number.MAX_SAFE_INTEGER // Get all matching results
416
+ });
417
+ return result.items;
414
418
  }
415
419
  /**
416
420
  * Get edges by target
@@ -425,10 +429,12 @@ export class OPFSStorage extends BaseStorage {
425
429
  * Get verbs by type (internal implementation)
426
430
  */
427
431
  async getVerbsByType_internal(type) {
428
- // This method is deprecated and would require loading metadata for each edge
429
- // For now, return empty array since this is not efficiently implementable with new storage pattern
430
- console.warn('getVerbsByType_internal is deprecated and not efficiently supported in new storage pattern');
431
- return [];
432
+ // Use the paginated approach to properly handle HNSWVerb to GraphVerb conversion
433
+ const result = await this.getVerbsWithPagination({
434
+ filter: { verbType: [type] },
435
+ limit: Number.MAX_SAFE_INTEGER // Get all matching results
436
+ });
437
+ return result.items;
432
438
  }
433
439
  /**
434
440
  * Get edges by type
@@ -451,7 +457,7 @@ export class OPFSStorage extends BaseStorage {
451
457
  async deleteEdge(id) {
452
458
  await this.ensureInitialized();
453
459
  try {
454
- await this.verbsDir.removeEntry(id);
460
+ await this.verbsDir.removeEntry(`${id}.json`);
455
461
  }
456
462
  catch (error) {
457
463
  // Ignore NotFoundError, which means the file doesn't exist
@@ -468,7 +474,7 @@ export class OPFSStorage extends BaseStorage {
468
474
  await this.ensureInitialized();
469
475
  try {
470
476
  // Create or get the file for this metadata
471
- const fileHandle = await this.metadataDir.getFileHandle(id, {
477
+ const fileHandle = await this.metadataDir.getFileHandle(`${id}.json`, {
472
478
  create: true
473
479
  });
474
480
  // Write the metadata to the file
@@ -488,7 +494,7 @@ export class OPFSStorage extends BaseStorage {
488
494
  await this.ensureInitialized();
489
495
  try {
490
496
  // Get the file handle for this metadata
491
- const fileHandle = await this.metadataDir.getFileHandle(id);
497
+ const fileHandle = await this.metadataDir.getFileHandle(`${id}.json`);
492
498
  // Read the metadata from the file
493
499
  const file = await fileHandle.getFile();
494
500
  const text = await file.text();
@@ -305,26 +305,14 @@ export declare class S3CompatibleStorage extends BaseStorage {
305
305
  * Get verbs by source (internal implementation)
306
306
  */
307
307
  protected getVerbsBySource_internal(sourceId: string): Promise<GraphVerb[]>;
308
- /**
309
- * Get edges by source
310
- */
311
- protected getEdgesBySource(sourceId: string): Promise<GraphVerb[]>;
312
308
  /**
313
309
  * Get verbs by target (internal implementation)
314
310
  */
315
311
  protected getVerbsByTarget_internal(targetId: string): Promise<GraphVerb[]>;
316
- /**
317
- * Get edges by target
318
- */
319
- protected getEdgesByTarget(targetId: string): Promise<GraphVerb[]>;
320
312
  /**
321
313
  * Get verbs by type (internal implementation)
322
314
  */
323
315
  protected getVerbsByType_internal(type: string): Promise<GraphVerb[]>;
324
- /**
325
- * Get edges by type
326
- */
327
- protected getEdgesByType(type: string): Promise<GraphVerb[]>;
328
316
  /**
329
317
  * Delete a verb from storage (internal implementation)
330
318
  */
@@ -381,15 +381,23 @@ export class S3CompatibleStorage extends BaseStorage {
381
381
  // Get metrics
382
382
  const backpressureStatus = this.backpressure.getStatus();
383
383
  const socketMetrics = this.socketManager.getMetrics();
384
- // EXTREMELY aggressive detection - activate on ANY load
385
- const shouldEnableHighVolume = this.forceHighVolumeMode || // Environment override
386
- backpressureStatus.queueLength >= threshold || // Configurable threshold (>= 0 by default!)
387
- socketMetrics.pendingRequests >= threshold || // Socket pressure
388
- this.pendingOperations >= threshold || // Any pending ops
389
- socketMetrics.socketUtilization >= 0.01 || // Even 1% socket usage
390
- (socketMetrics.requestsPerSecond >= 1) || // Any request rate
391
- (this.consecutiveErrors >= 0) || // Always true - any system activity
392
- true; // FORCE ENABLE for emergency debugging
384
+ // Reasonable high-volume detection - only activate under real load
385
+ const isTestEnvironment = process.env.NODE_ENV === 'test';
386
+ const explicitlyDisabled = process.env.BRAINY_FORCE_BUFFERING === 'false';
387
+ // Use reasonable thresholds instead of emergency aggressive ones
388
+ const reasonableThreshold = Math.max(threshold, 10); // At least 10 pending operations
389
+ const highSocketUtilization = 0.8; // 80% socket utilization
390
+ const highRequestRate = 50; // 50 requests per second
391
+ const significantErrors = 5; // 5 consecutive errors
392
+ const shouldEnableHighVolume = !isTestEnvironment && // Disable in test environment
393
+ !explicitlyDisabled && // Allow explicit disabling
394
+ (this.forceHighVolumeMode || // Environment override
395
+ backpressureStatus.queueLength >= reasonableThreshold || // High queue backlog
396
+ socketMetrics.pendingRequests >= reasonableThreshold || // Many pending requests
397
+ this.pendingOperations >= reasonableThreshold || // Many pending ops
398
+ socketMetrics.socketUtilization >= highSocketUtilization || // High socket pressure
399
+ (socketMetrics.requestsPerSecond >= highRequestRate) || // High request rate
400
+ (this.consecutiveErrors >= significantErrors)); // Significant error pattern
393
401
  if (shouldEnableHighVolume && !this.highVolumeMode) {
394
402
  this.highVolumeMode = true;
395
403
  this.logger.warn(`🚨 HIGH-VOLUME MODE ACTIVATED 🚨`);
@@ -1278,8 +1286,42 @@ export class S3CompatibleStorage extends BaseStorage {
1278
1286
  graphVerbs.push(graphVerb);
1279
1287
  }
1280
1288
  }
1289
+ // Apply filtering at GraphVerb level since HNSWVerb filtering is not supported
1290
+ let filteredGraphVerbs = graphVerbs;
1291
+ if (options.filter) {
1292
+ filteredGraphVerbs = graphVerbs.filter((graphVerb) => {
1293
+ // Filter by sourceId
1294
+ if (options.filter.sourceId) {
1295
+ const sourceIds = Array.isArray(options.filter.sourceId)
1296
+ ? options.filter.sourceId
1297
+ : [options.filter.sourceId];
1298
+ if (!sourceIds.includes(graphVerb.sourceId)) {
1299
+ return false;
1300
+ }
1301
+ }
1302
+ // Filter by targetId
1303
+ if (options.filter.targetId) {
1304
+ const targetIds = Array.isArray(options.filter.targetId)
1305
+ ? options.filter.targetId
1306
+ : [options.filter.targetId];
1307
+ if (!targetIds.includes(graphVerb.targetId)) {
1308
+ return false;
1309
+ }
1310
+ }
1311
+ // Filter by verbType (maps to type field)
1312
+ if (options.filter.verbType) {
1313
+ const verbTypes = Array.isArray(options.filter.verbType)
1314
+ ? options.filter.verbType
1315
+ : [options.filter.verbType];
1316
+ if (graphVerb.type && !verbTypes.includes(graphVerb.type)) {
1317
+ return false;
1318
+ }
1319
+ }
1320
+ return true;
1321
+ });
1322
+ }
1281
1323
  return {
1282
- items: graphVerbs,
1324
+ items: filteredGraphVerbs,
1283
1325
  hasMore: result.hasMore,
1284
1326
  nextCursor: result.nextCursor
1285
1327
  };
@@ -1288,46 +1330,34 @@ export class S3CompatibleStorage extends BaseStorage {
1288
1330
  * Get verbs by source (internal implementation)
1289
1331
  */
1290
1332
  async getVerbsBySource_internal(sourceId) {
1291
- return this.getEdgesBySource(sourceId);
1292
- }
1293
- /**
1294
- * Get edges by source
1295
- */
1296
- async getEdgesBySource(sourceId) {
1297
- // This method is deprecated and would require loading metadata for each edge
1298
- // For now, return empty array since this is not efficiently implementable with new storage pattern
1299
- this.logger.trace('getEdgesBySource is deprecated and not efficiently supported in new storage pattern');
1300
- return [];
1333
+ // Use the paginated approach to properly handle HNSWVerb to GraphVerb conversion
1334
+ const result = await this.getVerbsWithPagination({
1335
+ filter: { sourceId: [sourceId] },
1336
+ limit: Number.MAX_SAFE_INTEGER // Get all matching results
1337
+ });
1338
+ return result.items;
1301
1339
  }
1302
1340
  /**
1303
1341
  * Get verbs by target (internal implementation)
1304
1342
  */
1305
1343
  async getVerbsByTarget_internal(targetId) {
1306
- return this.getEdgesByTarget(targetId);
1307
- }
1308
- /**
1309
- * Get edges by target
1310
- */
1311
- async getEdgesByTarget(targetId) {
1312
- // This method is deprecated and would require loading metadata for each edge
1313
- // For now, return empty array since this is not efficiently implementable with new storage pattern
1314
- this.logger.trace('getEdgesByTarget is deprecated and not efficiently supported in new storage pattern');
1315
- return [];
1344
+ // Use the paginated approach to properly handle HNSWVerb to GraphVerb conversion
1345
+ const result = await this.getVerbsWithPagination({
1346
+ filter: { targetId: [targetId] },
1347
+ limit: Number.MAX_SAFE_INTEGER // Get all matching results
1348
+ });
1349
+ return result.items;
1316
1350
  }
1317
1351
  /**
1318
1352
  * Get verbs by type (internal implementation)
1319
1353
  */
1320
1354
  async getVerbsByType_internal(type) {
1321
- return this.getEdgesByType(type);
1322
- }
1323
- /**
1324
- * Get edges by type
1325
- */
1326
- async getEdgesByType(type) {
1327
- // This method is deprecated and would require loading metadata for each edge
1328
- // For now, return empty array since this is not efficiently implementable with new storage pattern
1329
- this.logger.trace('getEdgesByType is deprecated and not efficiently supported in new storage pattern');
1330
- return [];
1355
+ // Use the paginated approach to properly handle HNSWVerb to GraphVerb conversion
1356
+ const result = await this.getVerbsWithPagination({
1357
+ filter: { verbType: [type] },
1358
+ limit: Number.MAX_SAFE_INTEGER // Get all matching results
1359
+ });
1360
+ return result.items;
1331
1361
  }
1332
1362
  /**
1333
1363
  * Delete a verb from storage (internal implementation)
@@ -72,11 +72,10 @@ export declare abstract class BaseStorage extends BaseStorageAdapter {
72
72
  */
73
73
  protected convertHNSWVerbToGraphVerb(hnswVerb: HNSWVerb): Promise<GraphVerb | null>;
74
74
  /**
75
- * Get all verbs from storage
76
- * @returns Promise that resolves to an array of all HNSWVerbs
77
- * @deprecated This method loads all data into memory and may cause performance issues. Use getVerbs() with pagination instead.
75
+ * Internal method for loading all verbs - used by performance optimizations
76
+ * @internal - Do not use directly, use getVerbs() with pagination instead
78
77
  */
79
- getAllVerbs(): Promise<HNSWVerb[]>;
78
+ protected _loadAllVerbsForOptimization(): Promise<HNSWVerb[]>;
80
79
  /**
81
80
  * Get verbs by source
82
81
  */
@@ -90,11 +89,10 @@ export declare abstract class BaseStorage extends BaseStorageAdapter {
90
89
  */
91
90
  getVerbsByType(type: string): Promise<GraphVerb[]>;
92
91
  /**
93
- * Get all nouns from storage
94
- * @returns Promise that resolves to an array of all nouns
95
- * @deprecated This method loads all data into memory and may cause performance issues. Use getNouns() with pagination instead.
92
+ * Internal method for loading all nouns - used by performance optimizations
93
+ * @internal - Do not use directly, use getNouns() with pagination instead
96
94
  */
97
- getAllNouns(): Promise<HNSWNoun[]>;
95
+ protected _loadAllNounsForOptimization(): Promise<HNSWNoun[]>;
98
96
  /**
99
97
  * Get nouns with pagination and filtering
100
98
  * @param options Pagination and filtering options
@@ -181,27 +181,22 @@ export class BaseStorage extends BaseStorageAdapter {
181
181
  }
182
182
  }
183
183
  /**
184
- * Get all verbs from storage
185
- * @returns Promise that resolves to an array of all HNSWVerbs
186
- * @deprecated This method loads all data into memory and may cause performance issues. Use getVerbs() with pagination instead.
184
+ * Internal method for loading all verbs - used by performance optimizations
185
+ * @internal - Do not use directly, use getVerbs() with pagination instead
187
186
  */
188
- async getAllVerbs() {
187
+ async _loadAllVerbsForOptimization() {
189
188
  await this.ensureInitialized();
190
- console.warn('getAllVerbs() is deprecated and may cause memory issues with large datasets. Consider using getVerbs() with pagination instead.');
191
- // Get all verbs using the paginated method with a very large limit
189
+ // Only use this for internal optimizations when safe
192
190
  const result = await this.getVerbs({
193
- pagination: {
194
- limit: Number.MAX_SAFE_INTEGER
195
- }
191
+ pagination: { limit: Number.MAX_SAFE_INTEGER }
196
192
  });
197
- // Convert GraphVerbs back to HNSWVerbs since that's what this method should return
193
+ // Convert GraphVerbs back to HNSWVerbs for internal use
198
194
  const hnswVerbs = [];
199
195
  for (const graphVerb of result.items) {
200
- // Create an HNSWVerb from the GraphVerb (reverse conversion)
201
196
  const hnswVerb = {
202
197
  id: graphVerb.id,
203
198
  vector: graphVerb.vector,
204
- connections: new Map() // HNSWVerbs need connections, but GraphVerbs don't have them
199
+ connections: new Map()
205
200
  };
206
201
  hnswVerbs.push(hnswVerb);
207
202
  }
@@ -241,17 +236,14 @@ export class BaseStorage extends BaseStorageAdapter {
241
236
  return result.items;
242
237
  }
243
238
  /**
244
- * Get all nouns from storage
245
- * @returns Promise that resolves to an array of all nouns
246
- * @deprecated This method loads all data into memory and may cause performance issues. Use getNouns() with pagination instead.
239
+ * Internal method for loading all nouns - used by performance optimizations
240
+ * @internal - Do not use directly, use getNouns() with pagination instead
247
241
  */
248
- async getAllNouns() {
242
+ async _loadAllNounsForOptimization() {
249
243
  await this.ensureInitialized();
250
- console.warn('getAllNouns() is deprecated and may cause memory issues with large datasets. Consider using getNouns() with pagination instead.');
244
+ // Only use this for internal optimizations when safe
251
245
  const result = await this.getNouns({
252
- pagination: {
253
- limit: Number.MAX_SAFE_INTEGER
254
- }
246
+ pagination: { limit: Number.MAX_SAFE_INTEGER }
255
247
  });
256
248
  return result.items;
257
249
  }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@soulcraft/brainy",
3
- "version": "0.60.0",
4
- "description": "A vector graph database using HNSW indexing with Origin Private File System storage",
3
+ "version": "0.61.0",
4
+ "description": "Multi-Dimensional AI Database - Vector similarity, graph relationships, metadata facets with HNSW indexing and OPFS storage",
5
5
  "main": "dist/index.js",
6
6
  "module": "dist/index.js",
7
7
  "types": "dist/index.d.ts",