@soulcraft/brainy 0.63.0 → 1.0.0-rc.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,42 +0,0 @@
1
- /**
2
- * Brainy Chat - Talk to Your Data
3
- *
4
- * Simple, powerful conversational AI for your Brainy database.
5
- * Works with zero configuration, optionally enhanced with LLM.
6
- */
7
- import { BrainyData } from '../brainyData.js';
8
- export interface ChatOptions {
9
- /** Optional LLM model name or provider:model format */
10
- llm?: string;
11
- /** Include source references in responses */
12
- sources?: boolean;
13
- /** API key for LLM provider (if needed) */
14
- apiKey?: string;
15
- }
16
- export declare class BrainyChat {
17
- private brainy;
18
- private llmProvider?;
19
- private options;
20
- private history;
21
- constructor(brainy: BrainyData, options?: ChatOptions);
22
- /**
23
- * Initialize LLM provider based on model string
24
- */
25
- private initializeLLM;
26
- /**
27
- * Ask a question - works with or without LLM
28
- */
29
- ask(question: string): Promise<string>;
30
- /**
31
- * Generate response using LLM
32
- */
33
- private generateWithLLM;
34
- /**
35
- * Generate response with smart templates (no LLM needed)
36
- */
37
- private generateWithTemplate;
38
- /**
39
- * Interactive chat mode (Node.js only)
40
- */
41
- chat(): Promise<void>;
42
- }
@@ -1,340 +0,0 @@
1
- /**
2
- * Brainy Chat - Talk to Your Data
3
- *
4
- * Simple, powerful conversational AI for your Brainy database.
5
- * Works with zero configuration, optionally enhanced with LLM.
6
- */
7
- export class BrainyChat {
8
- constructor(brainy, options = {}) {
9
- this.history = [];
10
- this.brainy = brainy;
11
- this.options = options;
12
- // Load LLM if specified
13
- if (options.llm) {
14
- this.initializeLLM(options.llm, options.apiKey);
15
- }
16
- }
17
- /**
18
- * Initialize LLM provider based on model string
19
- */
20
- async initializeLLM(model, apiKey) {
21
- // Parse provider from model string (e.g., "claude-3-5-sonnet", "gpt-4", "Xenova/LaMini")
22
- if (model.startsWith('claude') || model.includes('anthropic')) {
23
- this.llmProvider = new ClaudeLLMProvider(model, apiKey);
24
- }
25
- else if (model.startsWith('gpt') || model.includes('openai')) {
26
- this.llmProvider = new OpenAILLMProvider(model, apiKey);
27
- }
28
- else if (model.includes('/')) {
29
- // Hugging Face model format
30
- this.llmProvider = new HuggingFaceLLMProvider(model);
31
- }
32
- else {
33
- console.warn(`Unknown LLM model: ${model}, falling back to templates`);
34
- }
35
- }
36
- /**
37
- * Ask a question - works with or without LLM
38
- */
39
- async ask(question) {
40
- // Find relevant context using vector search
41
- const searchResults = await this.brainy.search(question, 10);
42
- // Generate response
43
- let answer;
44
- if (this.llmProvider) {
45
- answer = await this.generateWithLLM(question, searchResults);
46
- }
47
- else {
48
- answer = this.generateWithTemplate(question, searchResults);
49
- }
50
- // Add sources if requested
51
- if (this.options.sources && searchResults.length > 0) {
52
- const sources = searchResults
53
- .slice(0, 3)
54
- .map(r => r.id)
55
- .join(', ');
56
- answer += `\n[Sources: ${sources}]`;
57
- }
58
- // Track history (keep last 10 exchanges)
59
- this.history.push({ question, answer });
60
- if (this.history.length > 10) {
61
- this.history = this.history.slice(-10);
62
- }
63
- return answer;
64
- }
65
- /**
66
- * Generate response using LLM
67
- */
68
- async generateWithLLM(question, context) {
69
- if (!this.llmProvider) {
70
- return this.generateWithTemplate(question, context);
71
- }
72
- // Build context from search results
73
- const contextData = context.map(item => ({
74
- id: item.id,
75
- score: item.score,
76
- metadata: item.metadata || {}
77
- }));
78
- // Include conversation history for context
79
- const historyContext = this.history.slice(-3).map(h => `Q: ${h.question}\nA: ${h.answer}`).join('\n\n');
80
- try {
81
- const response = await this.llmProvider.generate(question, {
82
- searchResults: contextData,
83
- history: historyContext
84
- });
85
- return response;
86
- }
87
- catch (error) {
88
- console.warn('LLM generation failed, using template:', error);
89
- return this.generateWithTemplate(question, context);
90
- }
91
- }
92
- /**
93
- * Generate response with smart templates (no LLM needed)
94
- */
95
- generateWithTemplate(question, context) {
96
- if (context.length === 0) {
97
- return "I couldn't find relevant information to answer that question.";
98
- }
99
- const q = question.toLowerCase();
100
- // Quantitative questions
101
- if (q.includes('how many') || q.includes('count')) {
102
- const count = context.length;
103
- const items = context.slice(0, 3).map(c => c.id).join(', ');
104
- return `I found ${count} relevant items. The top matches are: ${items}.`;
105
- }
106
- // Comparison questions
107
- if (q.includes('compare') || q.includes('difference') || q.includes('vs')) {
108
- if (context.length < 2) {
109
- return "I need at least two items to make a comparison.";
110
- }
111
- const first = context[0];
112
- const second = context[1];
113
- return `Comparing "${first.id}" (${(first.score * 100).toFixed(0)}% relevance) with "${second.id}" (${(second.score * 100).toFixed(0)}% relevance). Both are related to your query but ${first.id} shows stronger similarity.`;
114
- }
115
- // List questions
116
- if (q.includes('list') || q.includes('what are') || q.includes('show me')) {
117
- const items = context.slice(0, 5).map((c, i) => `${i + 1}. ${c.id}${c.metadata?.description ? ': ' + c.metadata.description : ''}`).join('\n');
118
- return `Here are the top results:\n${items}`;
119
- }
120
- // Analysis questions
121
- if (q.includes('analyze') || q.includes('explain') || q.includes('why')) {
122
- const top = context[0];
123
- const metadata = top.metadata || {};
124
- const details = Object.entries(metadata)
125
- .slice(0, 3)
126
- .map(([k, v]) => `${k}: ${JSON.stringify(v)}`)
127
- .join(', ');
128
- return `Based on my analysis of "${top.id}" (${(top.score * 100).toFixed(0)}% relevant): ${details || 'This item matches your query based on semantic similarity.'}`;
129
- }
130
- // Trend/pattern questions
131
- if (q.includes('trend') || q.includes('pattern')) {
132
- const items = context.slice(0, 3).map(c => c.id);
133
- return `I identified patterns across ${context.length} related items. Key examples include: ${items.join(', ')}. These show common characteristics related to "${question}".`;
134
- }
135
- // Yes/No questions
136
- if (q.startsWith('is') || q.startsWith('are') || q.startsWith('does') || q.startsWith('do')) {
137
- const confidence = context[0].score;
138
- if (confidence > 0.8) {
139
- return `Yes, based on "${context[0].id}" with ${(confidence * 100).toFixed(0)}% confidence.`;
140
- }
141
- else if (confidence > 0.5) {
142
- return `Possibly. I found "${context[0].id}" with ${(confidence * 100).toFixed(0)}% relevance to your question.`;
143
- }
144
- else {
145
- return `I'm not certain. The closest match is "${context[0].id}" but with only ${(confidence * 100).toFixed(0)}% relevance.`;
146
- }
147
- }
148
- // Default response - provide the most relevant information
149
- const top = context[0];
150
- const metadata = top.metadata ?
151
- Object.entries(top.metadata)
152
- .slice(0, 3)
153
- .map(([k, v]) => `${k}: ${JSON.stringify(v)}`)
154
- .join(', ') :
155
- 'no additional details';
156
- return `Based on "${top.id}" (${(top.score * 100).toFixed(0)}% relevant): ${metadata}`;
157
- }
158
- /**
159
- * Interactive chat mode (Node.js only)
160
- */
161
- async chat() {
162
- // Check if we're in Node.js
163
- if (typeof process === 'undefined' || !process.stdin) {
164
- console.log('Interactive chat is only available in Node.js environment');
165
- return;
166
- }
167
- const readline = await import('readline');
168
- const rl = readline.createInterface({
169
- input: process.stdin,
170
- output: process.stdout,
171
- prompt: 'You> '
172
- });
173
- console.log('\n🧠 Brainy Chat - Interactive Mode');
174
- console.log('Type your questions or "exit" to quit\n');
175
- rl.prompt();
176
- rl.on('line', async (line) => {
177
- const input = line.trim();
178
- if (input.toLowerCase() === 'exit' || input.toLowerCase() === 'quit') {
179
- console.log('\nGoodbye! šŸ‘‹');
180
- rl.close();
181
- return;
182
- }
183
- if (input) {
184
- try {
185
- const answer = await this.ask(input);
186
- console.log(`\nšŸ¤– ${answer}\n`);
187
- }
188
- catch (error) {
189
- console.log(`\nāŒ Error: ${error instanceof Error ? error.message : String(error)}\n`);
190
- }
191
- }
192
- rl.prompt();
193
- });
194
- rl.on('close', () => {
195
- process.exit(0);
196
- });
197
- }
198
- }
199
- /**
200
- * Claude LLM Provider
201
- */
202
- class ClaudeLLMProvider {
203
- constructor(model, apiKey) {
204
- this.model = model.includes('claude') ? model : `claude-3-5-sonnet-20241022`;
205
- this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
206
- }
207
- async generate(prompt, context) {
208
- if (!this.apiKey) {
209
- throw new Error('Claude API key required. Set ANTHROPIC_API_KEY or pass apiKey option.');
210
- }
211
- const systemPrompt = `You are a helpful AI assistant with access to a vector database.
212
- Answer questions based on the provided context from semantic search results.
213
- Be concise and accurate. If the context doesn't contain relevant information, say so.`;
214
- const userPrompt = `Context from database search:
215
- ${JSON.stringify(context.searchResults, null, 2)}
216
-
217
- Recent conversation:
218
- ${context.history || 'No previous conversation'}
219
-
220
- Question: ${prompt}
221
-
222
- Please provide a helpful answer based on the context above.`;
223
- try {
224
- const response = await fetch('https://api.anthropic.com/v1/messages', {
225
- method: 'POST',
226
- headers: {
227
- 'Content-Type': 'application/json',
228
- 'x-api-key': this.apiKey,
229
- 'anthropic-version': '2023-06-01'
230
- },
231
- body: JSON.stringify({
232
- model: this.model,
233
- max_tokens: 1024,
234
- messages: [
235
- { role: 'user', content: userPrompt }
236
- ],
237
- system: systemPrompt
238
- })
239
- });
240
- if (!response.ok) {
241
- throw new Error(`Claude API error: ${response.status}`);
242
- }
243
- const data = await response.json();
244
- return data.content[0].text;
245
- }
246
- catch (error) {
247
- throw new Error(`Failed to generate with Claude: ${error instanceof Error ? error.message : String(error)}`);
248
- }
249
- }
250
- }
251
- /**
252
- * OpenAI LLM Provider
253
- */
254
- class OpenAILLMProvider {
255
- constructor(model, apiKey) {
256
- this.model = model.includes('gpt') ? model : 'gpt-4o-mini';
257
- this.apiKey = apiKey || process.env.OPENAI_API_KEY;
258
- }
259
- async generate(prompt, context) {
260
- if (!this.apiKey) {
261
- throw new Error('OpenAI API key required. Set OPENAI_API_KEY or pass apiKey option.');
262
- }
263
- const systemPrompt = `You are a helpful AI assistant with access to a vector database.
264
- Answer questions based on the provided context from semantic search results.`;
265
- const userPrompt = `Context: ${JSON.stringify(context.searchResults)}
266
- History: ${context.history || 'None'}
267
- Question: ${prompt}`;
268
- try {
269
- const response = await fetch('https://api.openai.com/v1/chat/completions', {
270
- method: 'POST',
271
- headers: {
272
- 'Content-Type': 'application/json',
273
- 'Authorization': `Bearer ${this.apiKey}`
274
- },
275
- body: JSON.stringify({
276
- model: this.model,
277
- messages: [
278
- { role: 'system', content: systemPrompt },
279
- { role: 'user', content: userPrompt }
280
- ],
281
- max_tokens: 500,
282
- temperature: 0.7
283
- })
284
- });
285
- if (!response.ok) {
286
- throw new Error(`OpenAI API error: ${response.status}`);
287
- }
288
- const data = await response.json();
289
- return data.choices[0].message.content;
290
- }
291
- catch (error) {
292
- throw new Error(`Failed to generate with OpenAI: ${error instanceof Error ? error.message : String(error)}`);
293
- }
294
- }
295
- }
296
- /**
297
- * Hugging Face Local LLM Provider
298
- */
299
- class HuggingFaceLLMProvider {
300
- constructor(model) {
301
- this.model = model;
302
- this.initializePipeline();
303
- }
304
- async initializePipeline() {
305
- try {
306
- // Lazy load transformers.js - this is optional and may not be installed
307
- // @ts-ignore - Optional dependency
308
- const transformersModule = await import('@huggingface/transformers').catch(() => null);
309
- if (transformersModule) {
310
- const { pipeline } = transformersModule;
311
- this.pipeline = await pipeline('text2text-generation', this.model);
312
- }
313
- else {
314
- console.warn(`Transformers.js not installed. Install with: npm install @huggingface/transformers`);
315
- }
316
- }
317
- catch (error) {
318
- console.warn(`Failed to load Hugging Face model ${this.model}:`, error);
319
- }
320
- }
321
- async generate(prompt, context) {
322
- if (!this.pipeline) {
323
- throw new Error('Hugging Face model not loaded');
324
- }
325
- const input = `Answer based on context: ${JSON.stringify(context.searchResults).slice(0, 500)}
326
- Question: ${prompt}
327
- Answer:`;
328
- try {
329
- const result = await this.pipeline(input, {
330
- max_new_tokens: 150,
331
- temperature: 0.7
332
- });
333
- return result[0].generated_text.trim();
334
- }
335
- catch (error) {
336
- throw new Error(`Failed to generate with Hugging Face: ${error instanceof Error ? error.message : String(error)}`);
337
- }
338
- }
339
- }
340
- //# sourceMappingURL=brainyChat.js.map
@@ -1,32 +0,0 @@
1
- /**
2
- * CLI Wrapper for Cortex
3
- *
4
- * Provides CLI-specific functionality that wraps the core Cortex orchestrator
5
- * Following our philosophy: "Simple for beginners, powerful for experts"
6
- */
7
- export declare class CortexCLI {
8
- private brainy;
9
- private cortex;
10
- private neuralImport?;
11
- constructor();
12
- /**
13
- * Initialize the brain and cortex
14
- */
15
- init(options?: any): Promise<boolean>;
16
- /**
17
- * Add data - with Neural Import processing by default
18
- */
19
- add(data?: string, metadata?: any): Promise<void>;
20
- /**
21
- * Search with helpful display
22
- */
23
- search(query?: string, options?: any): Promise<void>;
24
- /**
25
- * Show statistics
26
- */
27
- stats(detailed?: boolean): Promise<void>;
28
- /**
29
- * Interactive chat interface
30
- */
31
- chat(initialMessage?: string): Promise<void>;
32
- }
@@ -1,209 +0,0 @@
1
- /**
2
- * CLI Wrapper for Cortex
3
- *
4
- * Provides CLI-specific functionality that wraps the core Cortex orchestrator
5
- * Following our philosophy: "Simple for beginners, powerful for experts"
6
- */
7
- import { BrainyData } from '../brainyData.js';
8
- import { Cortex } from '../cortex.js';
9
- import { NeuralImport } from './neuralImport.js';
10
- // @ts-ignore
11
- import chalk from 'chalk';
12
- // @ts-ignore
13
- import ora from 'ora';
14
- // @ts-ignore
15
- import prompts from 'prompts';
16
- export class CortexCLI {
17
- constructor() {
18
- this.brainy = new BrainyData();
19
- this.cortex = new Cortex();
20
- }
21
- /**
22
- * Initialize the brain and cortex
23
- */
24
- async init(options = {}) {
25
- const spinner = ora('Initializing brain...').start();
26
- try {
27
- // Initialize BrainyData
28
- await this.brainy.init();
29
- // Set up Neural Import as default augmentation
30
- this.neuralImport = new NeuralImport(this.brainy);
31
- this.cortex.register(this.neuralImport);
32
- spinner.succeed('Brain initialized!');
33
- return true;
34
- }
35
- catch (error) {
36
- spinner.fail('Initialization failed');
37
- throw error;
38
- }
39
- }
40
- /**
41
- * Add data - with Neural Import processing by default
42
- */
43
- async add(data, metadata) {
44
- // If no data provided, go interactive
45
- if (!data) {
46
- const input = await prompts([
47
- {
48
- type: 'text',
49
- name: 'data',
50
- message: 'What would you like to add?',
51
- validate: (value) => value.length > 0 || 'Please enter something'
52
- }
53
- ]);
54
- if (!input.data)
55
- return;
56
- data = input.data;
57
- }
58
- // Check if it's a file path
59
- const fs = await import('fs');
60
- if (fs.existsSync(data)) {
61
- // Use Neural Import for files
62
- if (this.neuralImport) {
63
- console.log(chalk.cyan('🧠 Using Neural Import to understand your data...'));
64
- const result = await this.neuralImport.neuralImport(data, {
65
- autoApply: true,
66
- previewOnly: false
67
- });
68
- console.log(chalk.green(`āœ… Imported ${result.detectedEntities.length} entities`));
69
- console.log(chalk.gray(` ${result.detectedRelationships.length} relationships detected`));
70
- console.log(chalk.gray(` Confidence: ${(result.confidence * 100).toFixed(1)}%`));
71
- if (result.insights.length > 0) {
72
- console.log(chalk.cyan('\nšŸ’” Insights:'));
73
- result.insights.forEach(insight => {
74
- console.log(chalk.gray(` • ${insight.description}`));
75
- });
76
- }
77
- }
78
- }
79
- else {
80
- // Plain text - add directly
81
- const id = await this.brainy.add(data, metadata);
82
- console.log(chalk.green('āœ… Added!'));
83
- console.log(chalk.gray(` ID: ${id}`));
84
- }
85
- }
86
- /**
87
- * Search with helpful display
88
- */
89
- async search(query, options = {}) {
90
- // Interactive mode if no query
91
- if (!query) {
92
- const input = await prompts({
93
- type: 'text',
94
- name: 'query',
95
- message: 'What are you looking for?',
96
- validate: (value) => value.length > 0 || 'Please enter a search term'
97
- });
98
- if (!input.query)
99
- return;
100
- query = input.query;
101
- }
102
- const results = await this.brainy.search(query, options.limit || 5);
103
- if (results.length === 0) {
104
- console.log(chalk.yellow('No results found'));
105
- console.log(chalk.gray('Try different keywords or add more data'));
106
- }
107
- else {
108
- console.log(chalk.cyan(`\nFound ${results.length} results:\n`));
109
- results.forEach((result, i) => {
110
- const content = result.content || result.text || result.data;
111
- console.log(chalk.white(`${i + 1}. ${content}`));
112
- console.log(chalk.gray(` Score: ${(result.score * 100).toFixed(1)}%`));
113
- if (result.metadata) {
114
- const keys = Object.keys(result.metadata).slice(0, 3);
115
- if (keys.length > 0) {
116
- const preview = keys.map(k => `${k}: ${result.metadata[k]}`).join(', ');
117
- console.log(chalk.gray(` ${preview}`));
118
- }
119
- }
120
- console.log();
121
- });
122
- }
123
- }
124
- /**
125
- * Show statistics
126
- */
127
- async stats(detailed = false) {
128
- const stats = await this.brainy.getStats();
129
- console.log(chalk.cyan('\n🧠 Brain Statistics\n'));
130
- console.log(chalk.white('Capacity:'));
131
- console.log(` Memories: ${stats.totalNouns || 0}`);
132
- console.log(` Connections: ${stats.totalVerbs || 0}`);
133
- if (detailed) {
134
- console.log(chalk.white('\nBreakdown:'));
135
- if (stats.nounTypes) {
136
- console.log(' Entity Types:');
137
- Object.entries(stats.nounTypes).forEach(([type, count]) => {
138
- console.log(` ${type}: ${count}`);
139
- });
140
- }
141
- if (stats.verbTypes) {
142
- console.log(' Relationship Types:');
143
- Object.entries(stats.verbTypes).forEach(([type, count]) => {
144
- console.log(` ${type}: ${count}`);
145
- });
146
- }
147
- }
148
- console.log(chalk.white('\nAugmentations:'));
149
- const augmentations = this.cortex.getAugmentationsByType('sense');
150
- console.log(` Active: ${augmentations.length}`);
151
- if (augmentations.length > 0) {
152
- augmentations.forEach(aug => {
153
- console.log(` • ${aug.name || 'unnamed'}`);
154
- });
155
- }
156
- }
157
- /**
158
- * Interactive chat interface
159
- */
160
- async chat(initialMessage) {
161
- console.log(chalk.cyan('\n🧠 Brain Chat'));
162
- console.log(chalk.gray('I can help you explore your data. Type "exit" to quit.\n'));
163
- // Note: Full chat would integrate with BrainyChat
164
- // For now, we use search as a simple implementation
165
- const readline = (await import('readline')).createInterface({
166
- input: process.stdin,
167
- output: process.stdout,
168
- prompt: chalk.cyan('You: ')
169
- });
170
- if (initialMessage) {
171
- console.log(chalk.cyan('You: ') + initialMessage);
172
- const results = await this.brainy.search(initialMessage, 3);
173
- if (results.length > 0) {
174
- console.log(chalk.green('Brain: ') + 'Based on what I know:');
175
- results.forEach(r => {
176
- console.log(chalk.gray(` • ${r.content || r.text}`));
177
- });
178
- }
179
- else {
180
- console.log(chalk.green('Brain: ') + "I don't have information about that yet.");
181
- }
182
- console.log();
183
- }
184
- readline.prompt();
185
- readline.on('line', async (line) => {
186
- const input = line.trim();
187
- if (input.toLowerCase() === 'exit') {
188
- console.log(chalk.gray('\nGoodbye! šŸ‘‹'));
189
- readline.close();
190
- process.exit(0);
191
- }
192
- if (input) {
193
- const results = await this.brainy.search(input, 3);
194
- if (results.length > 0) {
195
- console.log(chalk.green('Brain: ') + 'I found:');
196
- results.forEach(r => {
197
- console.log(chalk.gray(` • ${r.content || r.text}`));
198
- });
199
- }
200
- else {
201
- console.log(chalk.green('Brain: ') + "I don't know about that yet.");
202
- }
203
- console.log();
204
- }
205
- readline.prompt();
206
- });
207
- }
208
- }
209
- //# sourceMappingURL=cliWrapper.js.map