@soulcraft/brainy 2.0.1 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -9,18 +9,24 @@
9
9
  [![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
10
10
  [![TypeScript](https://img.shields.io/badge/%3C%2F%3E-TypeScript-%230074c1.svg)](https://www.typescriptlang.org/)
11
11
 
12
- **🧠 Brainy 2.0 - Zero-Configuration AI Database with Triple Intelligence™**
12
+ **🧠 Brainy 2.0 - The Universal Knowledge Protocol™**
13
13
 
14
- The industry's first truly zero-configuration AI database that combines vector similarity, metadata filtering, and graph relationships with O(log n) performance. Production-ready with 3ms search latency, 220 pre-computed NLP patterns, and only 24MB memory footprint.
14
+ **World's first Triple Intelligence™ database**—unifying vector similarity, graph relationships, and document filtering in one magical API. Model ANY data from ANY domain using 31 standardized noun types × 40 verb types.
15
+
16
+ **Why Brainy Leads**: We're the first to solve the impossible—combining three different database paradigms (vector, graph, document) into one unified query interface. This breakthrough enables us to be the Universal Knowledge Protocol where all tools, augmentations, and AI models speak the same language.
17
+
18
+ **Build once, integrate everywhere.** O(log n) performance, 3ms search latency, 24MB memory footprint.
15
19
 
16
20
  ## 🎉 What's New in 2.0
17
21
 
18
- - **Triple Intelligence™**: Unified Vector + Metadata + Graph queries in one API
22
+ - **World's First Triple Intelligence™**: Unified vector + graph + document in ONE query
23
+ - **Universal Knowledge Protocol**: 31 nouns × 40 verbs standardize all knowledge
24
+ - **Infinite Expressiveness**: Model ANY data with unlimited metadata
19
25
  - **API Consolidation**: 15+ methods → 2 clean APIs (`search()` and `find()`)
20
26
  - **Natural Language**: Ask questions in plain English
21
27
  - **Zero Configuration**: Works instantly, no setup required
22
28
  - **O(log n) Performance**: Binary search on sorted indices
23
- - **220+ NLP Patterns**: Pre-computed for instant understanding
29
+ - **Perfect Interoperability**: All tools and AI models speak the same language
24
30
  - **Universal Compatibility**: Node.js, Browser, Edge, Workers
25
31
 
26
32
  ## ⚡ Quick Start
@@ -35,29 +41,53 @@ import { BrainyData } from 'brainy'
35
41
  const brain = new BrainyData()
36
42
  await brain.init()
37
43
 
38
- // Add data with automatic embedding
39
- await brain.addNoun("JavaScript is a programming language", {
44
+ // Add entities (nouns) with automatic embedding
45
+ const jsId = await brain.addNoun("JavaScript is a programming language", {
40
46
  type: "language",
41
- year: 1995
47
+ year: 1995,
48
+ paradigm: "multi-paradigm"
42
49
  })
43
50
 
44
- // Natural language search
45
- const results = await brain.find("programming languages from the 90s")
51
+ const nodeId = await brain.addNoun("Node.js runtime environment", {
52
+ type: "runtime",
53
+ year: 2009,
54
+ platform: "server-side"
55
+ })
46
56
 
47
- // Vector similarity with metadata filtering
48
- const filtered = await brain.search("JavaScript", {
49
- metadata: { type: "language" },
50
- limit: 5
57
+ // Create relationships (verbs) between entities
58
+ await brain.addVerb(nodeId, jsId, "executes", {
59
+ since: 2009,
60
+ performance: "high"
61
+ })
62
+
63
+ // Natural language search with graph relationships
64
+ const results = await brain.find("programming languages used by server runtimes")
65
+
66
+ // Triple Intelligence: vector + metadata + relationships
67
+ const filtered = await brain.find({
68
+ like: "JavaScript", // Vector similarity
69
+ where: { type: "language" }, // Metadata filtering
70
+ connected: { from: nodeId, depth: 1 } // Graph relationships
51
71
  })
52
72
  ```
53
73
 
54
74
  ## 🚀 Key Features
55
75
 
56
- ### Triple Intelligence Engine
57
- Combines three search paradigms in one unified API:
76
+ ### World's First Triple Intelligence Engine
77
+ **The breakthrough that enables the Universal Knowledge Protocol:**
58
78
  - **Vector Search**: Semantic similarity with HNSW indexing
59
- - **Metadata Filtering**: O(log n) field lookups with binary search
60
- - **Graph Relationships**: Navigate connected knowledge
79
+ - **Graph Relationships**: Navigate connected knowledge like Neo4j
80
+ - **Document Filtering**: MongoDB-style queries with O(log n) performance
81
+ - **Unified in ONE API**: No separate queries, no complex joins
82
+ - **First to solve this**: Others do vector OR graph OR document—we do ALL
83
+
84
+ ### Universal Knowledge Protocol with Infinite Expressiveness
85
+ **Enabled by Triple Intelligence, standardized for everyone:**
86
+ - **24 Noun Types × 40 Verb Types**: 960 base combinations
87
+ - **∞ Expressiveness**: Unlimited metadata = model ANY data
88
+ - **One Language**: All tools, augmentations, AI models speak the same types
89
+ - **Perfect Interoperability**: Move data between any Brainy instance
90
+ - **No Schema Lock-in**: Evolve without migrations
61
91
 
62
92
  ### Natural Language Understanding
63
93
  ```javascript
@@ -108,17 +138,26 @@ const results = await brain.find({
108
138
 
109
139
  ### CRUD Operations
110
140
  ```javascript
111
- // Create
141
+ // Create entities (nouns)
112
142
  const id = await brain.addNoun(data, metadata)
113
143
 
144
+ // Create relationships (verbs)
145
+ const verbId = await brain.addVerb(sourceId, targetId, "relationType", {
146
+ strength: 0.9,
147
+ bidirectional: false
148
+ })
149
+
114
150
  // Read
115
151
  const item = await brain.getNoun(id)
152
+ const verb = await brain.getVerb(verbId)
116
153
 
117
154
  // Update
118
155
  await brain.updateNoun(id, newData, newMetadata)
156
+ await brain.updateVerb(verbId, newMetadata)
119
157
 
120
158
  // Delete
121
159
  await brain.deleteNoun(id)
160
+ await brain.deleteVerb(verbId)
122
161
 
123
162
  // Bulk operations
124
163
  await brain.import(arrayOfData)
@@ -127,16 +166,35 @@ const exported = await brain.export({ format: 'json' })
127
166
 
128
167
  ## 🎯 Use Cases
129
168
 
130
- ### Knowledge Management
169
+ ### Knowledge Management with Relationships
131
170
  ```javascript
132
- // Store and search documentation
133
- await brain.addNoun(documentContent, {
171
+ // Store documentation with rich relationships
172
+ const apiGuide = await brain.addNoun("REST API Guide", {
134
173
  title: "API Guide",
135
174
  category: "documentation",
136
175
  version: "2.0"
137
176
  })
138
177
 
139
- const docs = await brain.find("API documentation for version 2")
178
+ const author = await brain.addNoun("Jane Developer", {
179
+ type: "person",
180
+ role: "tech-lead"
181
+ })
182
+
183
+ const project = await brain.addNoun("E-commerce Platform", {
184
+ type: "project",
185
+ status: "active"
186
+ })
187
+
188
+ // Create knowledge graph
189
+ await brain.addVerb(author, apiGuide, "authored", {
190
+ date: "2024-03-15"
191
+ })
192
+ await brain.addVerb(apiGuide, project, "documents", {
193
+ coverage: "complete"
194
+ })
195
+
196
+ // Query the knowledge graph naturally
197
+ const docs = await brain.find("documentation authored by tech leads for active projects")
140
198
  ```
141
199
 
142
200
  ### Semantic Search
@@ -148,17 +206,35 @@ const similar = await brain.search(existingContent, {
148
206
  })
149
207
  ```
150
208
 
151
- ### AI Memory Layer
209
+ ### AI Memory Layer with Context
152
210
  ```javascript
153
- // Store conversation context
154
- await brain.addNoun(userMessage, {
155
- userId: "123",
211
+ // Store conversation with relationships
212
+ const userId = await brain.addNoun("User 123", {
213
+ type: "user",
214
+ tier: "premium"
215
+ })
216
+
217
+ const messageId = await brain.addNoun(userMessage, {
218
+ type: "message",
156
219
  timestamp: Date.now(),
157
220
  session: "abc"
158
221
  })
159
222
 
160
- // Retrieve relevant context
161
- const context = await brain.find(`previous conversations with user 123`)
223
+ const topicId = await brain.addNoun("Product Support", {
224
+ type: "topic",
225
+ category: "support"
226
+ })
227
+
228
+ // Link conversation elements
229
+ await brain.addVerb(userId, messageId, "sent")
230
+ await brain.addVerb(messageId, topicId, "about")
231
+
232
+ // Retrieve context with relationships
233
+ const context = await brain.find({
234
+ where: { type: "message" },
235
+ connected: { from: userId, type: "sent" },
236
+ like: "previous product issues"
237
+ })
162
238
  ```
163
239
 
164
240
  ## 💾 Storage Options
@@ -272,6 +348,42 @@ Key changes:
272
348
 
273
349
  We welcome contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
274
350
 
351
+ ## 🧠 The Universal Knowledge Protocol Explained
352
+
353
+ ### How We Achieved The Impossible
354
+
355
+ **Triple Intelligence™** makes us the **world's first** to unify three database paradigms:
356
+ 1. **Vector databases** (Pinecone, Weaviate) - semantic similarity
357
+ 2. **Graph databases** (Neo4j, ArangoDB) - relationships
358
+ 3. **Document databases** (MongoDB, Elasticsearch) - metadata filtering
359
+
360
+ **One API to rule them all.** Others make you choose. We unified them.
361
+
362
+ ### The Math of Infinite Expressiveness
363
+
364
+ ```
365
+ 24 Nouns × 40 Verbs × ∞ Metadata × Triple Intelligence = Universal Protocol
366
+ ```
367
+
368
+ - **960 base combinations** from standardized types
369
+ - **∞ domain specificity** via unlimited metadata
370
+ - **∞ relationship depth** via graph traversal
371
+ - **= Model ANYTHING**: From quantum physics to social networks
372
+
373
+ ### Why This Changes Everything
374
+
375
+ **Like HTTP for the web, Brainy for knowledge:**
376
+ - All augmentations compose perfectly - same noun-verb language
377
+ - All AI models share knowledge - GPT, Claude, Llama all understand
378
+ - All tools integrate seamlessly - no translation layers
379
+ - All data flows freely - perfect portability
380
+
381
+ **The Vision**: One protocol. All knowledge. Every tool. Any AI.
382
+
383
+ **Proven across industries**: Healthcare, Finance, Manufacturing, Education, Legal, Retail, Government, and beyond.
384
+
385
+ [→ See the Mathematical Proof & Full Taxonomy](docs/architecture/noun-verb-taxonomy.md)
386
+
275
387
  ## 📖 Documentation
276
388
 
277
389
  - [Getting Started Guide](docs/guides/getting-started.md)
@@ -279,6 +391,7 @@ We welcome contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
279
391
  - [Architecture Overview](docs/architecture/overview.md)
280
392
  - [Natural Language Guide](docs/guides/natural-language.md)
281
393
  - [Triple Intelligence](docs/architecture/triple-intelligence.md)
394
+ - [Noun-Verb Taxonomy](docs/architecture/noun-verb-taxonomy.md)
282
395
 
283
396
  ## 🏢 Enterprise & Cloud
284
397
 
@@ -59,6 +59,7 @@ export declare class NeuralImportAugmentation extends BaseAugmentation {
59
59
  readonly priority = 80;
60
60
  private config;
61
61
  private analysisCache;
62
+ private typeMatcher;
62
63
  constructor(config?: Partial<NeuralImportConfig>);
63
64
  protected onInitialize(): Promise<void>;
64
65
  protected onShutdown(): Promise<void>;
@@ -79,15 +80,23 @@ export declare class NeuralImportAugmentation extends BaseAugmentation {
79
80
  */
80
81
  private parseRawData;
81
82
  /**
82
- * Parse CSV data
83
+ * Parse CSV data - handles quoted values, escaped quotes, and edge cases
83
84
  */
84
85
  private parseCSV;
86
+ /**
87
+ * Parse YAML data
88
+ */
89
+ private parseYAML;
90
+ /**
91
+ * Parse a YAML value (handle strings, numbers, booleans, null)
92
+ */
93
+ private parseYAMLValue;
85
94
  /**
86
95
  * Perform neural analysis on parsed data
87
96
  */
88
97
  private performNeuralAnalysis;
89
98
  /**
90
- * Infer noun type from object structure
99
+ * Infer noun type from object structure using intelligent type matching
91
100
  */
92
101
  private inferNounType;
93
102
  /**
@@ -95,7 +104,7 @@ export declare class NeuralImportAugmentation extends BaseAugmentation {
95
104
  */
96
105
  private detectRelationships;
97
106
  /**
98
- * Infer verb type from field name
107
+ * Infer verb type from field name using intelligent type matching
99
108
  */
100
109
  private inferVerbType;
101
110
  /**
@@ -8,6 +8,8 @@
8
8
  */
9
9
  import { BaseAugmentation } from './brainyAugmentation.js';
10
10
  import * as path from '../universal/path.js';
11
+ import { getTypeMatcher } from './typeMatching/intelligentTypeMatcher.js';
12
+ import { prodLog } from '../utils/logger.js';
11
13
  /**
12
14
  * Neural Import Augmentation - Unified Implementation
13
15
  * Processes data with AI before storage operations
@@ -20,6 +22,7 @@ export class NeuralImportAugmentation extends BaseAugmentation {
20
22
  this.operations = ['add', 'addNoun', 'addVerb', 'all']; // Use 'all' to catch batch operations
21
23
  this.priority = 80; // High priority for data processing
22
24
  this.analysisCache = new Map();
25
+ this.typeMatcher = null;
23
26
  this.config = {
24
27
  confidenceThreshold: 0.7,
25
28
  enableWeights: true,
@@ -29,7 +32,13 @@ export class NeuralImportAugmentation extends BaseAugmentation {
29
32
  };
30
33
  }
31
34
  async onInitialize() {
32
- this.log('🧠 Neural Import augmentation initialized');
35
+ try {
36
+ this.typeMatcher = await getTypeMatcher();
37
+ this.log('🧠 Neural Import augmentation initialized with intelligent type matching');
38
+ }
39
+ catch (error) {
40
+ this.log('⚠️ Failed to initialize type matcher, falling back to heuristics', 'warn');
41
+ }
33
42
  }
34
43
  async onShutdown() {
35
44
  this.analysisCache.clear();
@@ -128,13 +137,7 @@ export class NeuralImportAugmentation extends BaseAugmentation {
128
137
  return this.parseCSV(content);
129
138
  case 'yaml':
130
139
  case 'yml':
131
- // For now, basic YAML support - in full implementation would use yaml parser
132
- try {
133
- return JSON.parse(content); // Placeholder
134
- }
135
- catch {
136
- return [{ text: content }];
137
- }
140
+ return this.parseYAML(content);
138
141
  case 'txt':
139
142
  case 'text':
140
143
  // Split text into sentences/paragraphs for analysis
@@ -145,24 +148,174 @@ export class NeuralImportAugmentation extends BaseAugmentation {
145
148
  }
146
149
  }
147
150
  /**
148
- * Parse CSV data
151
+ * Parse CSV data - handles quoted values, escaped quotes, and edge cases
149
152
  */
150
153
  parseCSV(content) {
151
- const lines = content.split('\n').filter(line => line.trim());
154
+ const lines = content.split('\n');
152
155
  if (lines.length === 0)
153
156
  return [];
154
- const headers = lines[0].split(',').map(h => h.trim());
157
+ // Parse a CSV line handling quotes
158
+ const parseLine = (line) => {
159
+ const result = [];
160
+ let current = '';
161
+ let inQuotes = false;
162
+ let i = 0;
163
+ while (i < line.length) {
164
+ const char = line[i];
165
+ const nextChar = line[i + 1];
166
+ if (char === '"') {
167
+ if (inQuotes && nextChar === '"') {
168
+ // Escaped quote
169
+ current += '"';
170
+ i += 2;
171
+ }
172
+ else {
173
+ // Toggle quote mode
174
+ inQuotes = !inQuotes;
175
+ i++;
176
+ }
177
+ }
178
+ else if (char === ',' && !inQuotes) {
179
+ // Field separator
180
+ result.push(current.trim());
181
+ current = '';
182
+ i++;
183
+ }
184
+ else {
185
+ current += char;
186
+ i++;
187
+ }
188
+ }
189
+ // Add last field
190
+ result.push(current.trim());
191
+ return result;
192
+ };
193
+ // Parse headers
194
+ const headers = parseLine(lines[0]);
155
195
  const data = [];
196
+ // Parse data rows
156
197
  for (let i = 1; i < lines.length; i++) {
157
- const values = lines[i].split(',').map(v => v.trim());
198
+ const line = lines[i].trim();
199
+ if (!line)
200
+ continue; // Skip empty lines
201
+ const values = parseLine(line);
158
202
  const row = {};
159
203
  headers.forEach((header, index) => {
160
- row[header] = values[index] || '';
204
+ const value = values[index] || '';
205
+ // Try to parse numbers
206
+ const num = Number(value);
207
+ row[header] = !isNaN(num) && value !== '' ? num : value;
161
208
  });
162
209
  data.push(row);
163
210
  }
164
211
  return data;
165
212
  }
213
+ /**
214
+ * Parse YAML data
215
+ */
216
+ parseYAML(content) {
217
+ try {
218
+ // Simple YAML parser for basic structures
219
+ // For full YAML support, we'd use js-yaml library
220
+ const lines = content.split('\n');
221
+ const result = [];
222
+ let currentObject = null;
223
+ let currentIndent = 0;
224
+ for (const line of lines) {
225
+ const trimmed = line.trim();
226
+ if (!trimmed || trimmed.startsWith('#'))
227
+ continue; // Skip empty lines and comments
228
+ // Calculate indentation
229
+ const indent = line.length - line.trimStart().length;
230
+ // Check for array item
231
+ if (trimmed.startsWith('- ')) {
232
+ const value = trimmed.substring(2).trim();
233
+ if (indent === 0) {
234
+ // Top-level array item
235
+ if (value.includes(':')) {
236
+ // Object in array
237
+ currentObject = {};
238
+ result.push(currentObject);
239
+ const [key, val] = value.split(':').map(s => s.trim());
240
+ currentObject[key] = this.parseYAMLValue(val);
241
+ }
242
+ else {
243
+ result.push(this.parseYAMLValue(value));
244
+ }
245
+ }
246
+ else if (currentObject) {
247
+ // Nested array
248
+ const lastKey = Object.keys(currentObject).pop();
249
+ if (lastKey) {
250
+ if (!Array.isArray(currentObject[lastKey])) {
251
+ currentObject[lastKey] = [];
252
+ }
253
+ currentObject[lastKey].push(this.parseYAMLValue(value));
254
+ }
255
+ }
256
+ }
257
+ else if (trimmed.includes(':')) {
258
+ // Key-value pair
259
+ const colonIndex = trimmed.indexOf(':');
260
+ const key = trimmed.substring(0, colonIndex).trim();
261
+ const value = trimmed.substring(colonIndex + 1).trim();
262
+ if (indent === 0) {
263
+ // Top-level object
264
+ if (!currentObject) {
265
+ currentObject = {};
266
+ result.push(currentObject);
267
+ }
268
+ currentObject[key] = this.parseYAMLValue(value);
269
+ currentIndent = 0;
270
+ }
271
+ else if (currentObject) {
272
+ // Nested object
273
+ if (indent > currentIndent && !value) {
274
+ // Start of nested object
275
+ const lastKey = Object.keys(currentObject).pop();
276
+ if (lastKey) {
277
+ currentObject[lastKey] = { [key]: '' };
278
+ }
279
+ }
280
+ else {
281
+ currentObject[key] = this.parseYAMLValue(value);
282
+ }
283
+ currentIndent = indent;
284
+ }
285
+ }
286
+ }
287
+ // If we built a single object and not an array, wrap it
288
+ if (result.length === 0 && currentObject) {
289
+ result.push(currentObject);
290
+ }
291
+ return result.length > 0 ? result : [{ text: content }];
292
+ }
293
+ catch (error) {
294
+ prodLog.warn('YAML parsing failed, treating as text:', error);
295
+ return [{ text: content }];
296
+ }
297
+ }
298
+ /**
299
+ * Parse a YAML value (handle strings, numbers, booleans, null)
300
+ */
301
+ parseYAMLValue(value) {
302
+ if (!value || value === '~' || value === 'null')
303
+ return null;
304
+ if (value === 'true')
305
+ return true;
306
+ if (value === 'false')
307
+ return false;
308
+ // Remove quotes if present
309
+ if ((value.startsWith('"') && value.endsWith('"')) ||
310
+ (value.startsWith("'") && value.endsWith("'"))) {
311
+ return value.slice(1, -1);
312
+ }
313
+ // Try to parse as number
314
+ const num = Number(value);
315
+ if (!isNaN(num) && value !== '')
316
+ return num;
317
+ return value;
318
+ }
166
319
  /**
167
320
  * Perform neural analysis on parsed data
168
321
  */
@@ -177,14 +330,14 @@ export class NeuralImportAugmentation extends BaseAugmentation {
177
330
  const entityId = item.id || item.name || item.title || `entity_${Date.now()}_${Math.random()}`;
178
331
  detectedEntities.push({
179
332
  originalData: item,
180
- nounType: this.inferNounType(item),
333
+ nounType: await this.inferNounType(item),
181
334
  confidence: 0.85,
182
335
  suggestedId: String(entityId),
183
336
  reasoning: 'Detected from structured data',
184
337
  alternativeTypes: []
185
338
  });
186
339
  // Detect relationships from references
187
- this.detectRelationships(item, entityId, detectedRelationships);
340
+ await this.detectRelationships(item, entityId, detectedRelationships);
188
341
  }
189
342
  }
190
343
  // Generate insights
@@ -216,36 +369,31 @@ export class NeuralImportAugmentation extends BaseAugmentation {
216
369
  };
217
370
  }
218
371
  /**
219
- * Infer noun type from object structure
372
+ * Infer noun type from object structure using intelligent type matching
220
373
  */
221
- inferNounType(obj) {
222
- // Simple heuristics for type detection
223
- if (obj.email || obj.username)
224
- return 'Person';
225
- if (obj.title && obj.content)
226
- return 'Document';
227
- if (obj.price || obj.product)
228
- return 'Product';
229
- if (obj.date || obj.timestamp)
230
- return 'Event';
231
- if (obj.url || obj.link)
232
- return 'Resource';
233
- if (obj.lat || obj.longitude)
234
- return 'Location';
235
- // Default fallback
236
- return 'Entity';
374
+ async inferNounType(obj) {
375
+ if (!this.typeMatcher) {
376
+ // Initialize type matcher if not available
377
+ this.typeMatcher = await getTypeMatcher();
378
+ }
379
+ const result = await this.typeMatcher.matchNounType(obj);
380
+ // Log if confidence is low for debugging
381
+ if (result.confidence < 0.5) {
382
+ this.log(`Low confidence (${result.confidence.toFixed(2)}) for noun type: ${result.type}`, 'warn');
383
+ }
384
+ return result.type;
237
385
  }
238
386
  /**
239
387
  * Detect relationships from object references
240
388
  */
241
- detectRelationships(obj, sourceId, relationships) {
389
+ async detectRelationships(obj, sourceId, relationships) {
242
390
  // Look for reference patterns
243
391
  for (const [key, value] of Object.entries(obj)) {
244
392
  if (key.endsWith('Id') || key.endsWith('_id') || key === 'parentId' || key === 'userId') {
245
393
  relationships.push({
246
394
  sourceId,
247
395
  targetId: String(value),
248
- verbType: this.inferVerbType(key),
396
+ verbType: await this.inferVerbType(key, obj, { id: value }),
249
397
  confidence: 0.75,
250
398
  weight: 1,
251
399
  reasoning: `Reference detected in field: ${key}`,
@@ -259,7 +407,7 @@ export class NeuralImportAugmentation extends BaseAugmentation {
259
407
  relationships.push({
260
408
  sourceId,
261
409
  targetId: String(targetId),
262
- verbType: this.inferVerbType(key),
410
+ verbType: await this.inferVerbType(key, obj, { id: targetId }),
263
411
  confidence: 0.7,
264
412
  weight: 1,
265
413
  reasoning: `Array reference in field: ${key}`,
@@ -271,27 +419,19 @@ export class NeuralImportAugmentation extends BaseAugmentation {
271
419
  }
272
420
  }
273
421
  /**
274
- * Infer verb type from field name
422
+ * Infer verb type from field name using intelligent type matching
275
423
  */
276
- inferVerbType(fieldName) {
277
- const normalized = fieldName.toLowerCase();
278
- if (normalized.includes('parent'))
279
- return 'childOf';
280
- if (normalized.includes('user'))
281
- return 'belongsTo';
282
- if (normalized.includes('author'))
283
- return 'authoredBy';
284
- if (normalized.includes('owner'))
285
- return 'ownedBy';
286
- if (normalized.includes('creator'))
287
- return 'createdBy';
288
- if (normalized.includes('member'))
289
- return 'memberOf';
290
- if (normalized.includes('tag'))
291
- return 'taggedWith';
292
- if (normalized.includes('category'))
293
- return 'categorizedAs';
294
- return 'relatedTo';
424
+ async inferVerbType(fieldName, sourceObj, targetObj) {
425
+ if (!this.typeMatcher) {
426
+ // Initialize type matcher if not available
427
+ this.typeMatcher = await getTypeMatcher();
428
+ }
429
+ const result = await this.typeMatcher.matchVerbType(sourceObj, targetObj, fieldName);
430
+ // Log if confidence is low for debugging
431
+ if (result.confidence < 0.5) {
432
+ this.log(`Low confidence (${result.confidence.toFixed(2)}) for verb type: ${result.type}`, 'warn');
433
+ }
434
+ return result.type;
295
435
  }
296
436
  /**
297
437
  * Group entities by type