universal-llm-client 4.0.0 ā 4.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-model.d.ts +20 -22
- package/dist/ai-model.d.ts.map +1 -1
- package/dist/ai-model.js +26 -23
- package/dist/ai-model.js.map +1 -1
- package/dist/client.d.ts +5 -5
- package/dist/client.d.ts.map +1 -1
- package/dist/client.js +17 -9
- package/dist/client.js.map +1 -1
- package/dist/http.d.ts +2 -0
- package/dist/http.d.ts.map +1 -1
- package/dist/http.js +1 -0
- package/dist/http.js.map +1 -1
- package/dist/index.d.ts +3 -3
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +4 -4
- package/dist/index.js.map +1 -1
- package/dist/interfaces.d.ts +49 -11
- package/dist/interfaces.d.ts.map +1 -1
- package/dist/interfaces.js +14 -0
- package/dist/interfaces.js.map +1 -1
- package/dist/providers/anthropic.d.ts +56 -0
- package/dist/providers/anthropic.d.ts.map +1 -0
- package/dist/providers/anthropic.js +524 -0
- package/dist/providers/anthropic.js.map +1 -0
- package/dist/providers/google.d.ts +5 -0
- package/dist/providers/google.d.ts.map +1 -1
- package/dist/providers/google.js +64 -8
- package/dist/providers/google.js.map +1 -1
- package/dist/providers/index.d.ts +1 -0
- package/dist/providers/index.d.ts.map +1 -1
- package/dist/providers/index.js +1 -0
- package/dist/providers/index.js.map +1 -1
- package/dist/providers/ollama.d.ts.map +1 -1
- package/dist/providers/ollama.js +38 -11
- package/dist/providers/ollama.js.map +1 -1
- package/dist/providers/openai.d.ts.map +1 -1
- package/dist/providers/openai.js +9 -7
- package/dist/providers/openai.js.map +1 -1
- package/dist/router.d.ts +13 -33
- package/dist/router.d.ts.map +1 -1
- package/dist/router.js +33 -57
- package/dist/router.js.map +1 -1
- package/dist/stream-decoder.d.ts +29 -2
- package/dist/stream-decoder.d.ts.map +1 -1
- package/dist/stream-decoder.js +39 -11
- package/dist/stream-decoder.js.map +1 -1
- package/dist/structured-output.d.ts +107 -181
- package/dist/structured-output.d.ts.map +1 -1
- package/dist/structured-output.js +137 -192
- package/dist/structured-output.js.map +1 -1
- package/dist/zod-adapter.d.ts +44 -0
- package/dist/zod-adapter.d.ts.map +1 -0
- package/dist/zod-adapter.js +61 -0
- package/dist/zod-adapter.js.map +1 -0
- package/package.json +9 -1
- package/src/ai-model.ts +350 -0
- package/src/auditor.ts +213 -0
- package/src/client.ts +402 -0
- package/src/debug/debug-google-streaming.ts +97 -0
- package/src/debug/debug-tool-execution.ts +86 -0
- package/src/debug/test-lmstudio-tools.ts +155 -0
- package/src/demos/README.md +47 -0
- package/src/demos/basic/universal-llm-examples.ts +161 -0
- package/src/demos/mcp/astrid-memory-demo.ts +295 -0
- package/src/demos/mcp/astrid-persona-memory.ts +357 -0
- package/src/demos/mcp/mcp-mongodb-demo.ts +275 -0
- package/src/demos/mcp/simple-astrid-memory.ts +148 -0
- package/src/demos/mcp/simple-mcp-demo.ts +68 -0
- package/src/demos/mcp/working-mcp-demo.ts +62 -0
- package/src/demos/model-alias-demo.ts +0 -0
- package/src/demos/tools/RAG_MEMORY_INTEGRATION.md +267 -0
- package/src/demos/tools/astrid-memory-demo.ts +270 -0
- package/src/demos/tools/astrid-production-memory-clean.ts +785 -0
- package/src/demos/tools/astrid-production-memory.ts +558 -0
- package/src/demos/tools/basic-translation-test.ts +66 -0
- package/src/demos/tools/chromadb-similarity-tuning.ts +390 -0
- package/src/demos/tools/clean-multilingual-conversation.ts +209 -0
- package/src/demos/tools/clean-translation-test.ts +119 -0
- package/src/demos/tools/clean-universal-multilingual-test.ts +131 -0
- package/src/demos/tools/complete-rag-demo.ts +369 -0
- package/src/demos/tools/complete-tool-demo.ts +132 -0
- package/src/demos/tools/demo-tool-calling.ts +124 -0
- package/src/demos/tools/dynamic-language-switching-test.ts +251 -0
- package/src/demos/tools/hybrid-thinking-test.ts +154 -0
- package/src/demos/tools/memory-integration-test.ts +420 -0
- package/src/demos/tools/multilingual-memory-system.ts +802 -0
- package/src/demos/tools/ondemand-translation-demo.ts +655 -0
- package/src/demos/tools/production-tool-demo.ts +245 -0
- package/src/demos/tools/revolutionary-multilingual-test.ts +151 -0
- package/src/demos/tools/rigorous-language-analysis.ts +218 -0
- package/src/demos/tools/test-universal-memory-system.ts +126 -0
- package/src/demos/tools/translation-integration-guide.ts +346 -0
- package/src/demos/tools/universal-memory-system.ts +560 -0
- package/src/http.ts +247 -0
- package/src/index.ts +161 -0
- package/src/interfaces.ts +657 -0
- package/src/mcp.ts +345 -0
- package/src/providers/anthropic.ts +762 -0
- package/src/providers/google.ts +620 -0
- package/src/providers/index.ts +8 -0
- package/src/providers/ollama.ts +469 -0
- package/src/providers/openai.ts +392 -0
- package/src/router.ts +780 -0
- package/src/stream-decoder.ts +361 -0
- package/src/structured-output.ts +759 -0
- package/src/test-scripts/test-advanced-tools.ts +310 -0
- package/src/test-scripts/test-google-streaming-enhanced.ts +147 -0
- package/src/test-scripts/test-google-streaming.ts +63 -0
- package/src/test-scripts/test-google-system-prompt-comprehensive.ts +189 -0
- package/src/test-scripts/test-mcp-config.ts +28 -0
- package/src/test-scripts/test-mcp-connection.ts +29 -0
- package/src/test-scripts/test-system-message-positions.ts +163 -0
- package/src/test-scripts/test-system-prompt-improvement-demo.ts +83 -0
- package/src/test-scripts/test-tool-calling.ts +231 -0
- package/src/tests/ai-model.test.ts +1614 -0
- package/src/tests/auditor.test.ts +224 -0
- package/src/tests/http.test.ts +200 -0
- package/src/tests/interfaces.test.ts +117 -0
- package/src/tests/providers/google.test.ts +660 -0
- package/src/tests/providers/ollama.test.ts +954 -0
- package/src/tests/providers/openai.test.ts +1122 -0
- package/src/tests/router.test.ts +254 -0
- package/src/tests/stream-decoder.test.ts +179 -0
- package/src/tests/structured-output.test.ts +1450 -0
- package/src/tests/tools.test.ts +175 -0
- package/src/tools.ts +246 -0
- package/src/zod-adapter.ts +72 -0
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
|
|
3
|
+
import '@dotenvx/dotenvx/config';
|
|
4
|
+
|
|
5
|
+
import { TranslationService } from '../../../../../src/services/translation/TranslationService.js';
|
|
6
|
+
import { OllamaRouter } from '../../../services/OllamaRouter';
|
|
7
|
+
|
|
8
|
+
// Helper function to extract clean translation from response
|
|
9
|
+
function extractCleanTranslation(response: string): string {
|
|
10
|
+
// Remove <think> sections
|
|
11
|
+
const withoutThink = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
|
12
|
+
|
|
13
|
+
// If the response is still very long, try to extract the main content
|
|
14
|
+
if (withoutThink.length > 500) {
|
|
15
|
+
// Look for the actual translation after the thinking
|
|
16
|
+
const lines = withoutThink.split('\n').filter(line => line.trim().length > 0);
|
|
17
|
+
|
|
18
|
+
// Find lines that look like the actual translation (shorter, more direct)
|
|
19
|
+
const translationLines = lines.filter(line =>
|
|
20
|
+
line.length < 200 &&
|
|
21
|
+
!line.includes('transcreate') &&
|
|
22
|
+
!line.includes('cultural') &&
|
|
23
|
+
!line.includes('adaptation')
|
|
24
|
+
);
|
|
25
|
+
|
|
26
|
+
if (translationLines.length > 0) {
|
|
27
|
+
return translationLines.join('\n').trim();
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
return withoutThink;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
async function testCleanGermanTranslation() {
|
|
35
|
+
console.log('š Clean German Translation Test\n');
|
|
36
|
+
|
|
37
|
+
try {
|
|
38
|
+
// Initialize services
|
|
39
|
+
console.log('š Initializing services...');
|
|
40
|
+
const ollamaRouter = new OllamaRouter();
|
|
41
|
+
const translationService = new TranslationService(ollamaRouter);
|
|
42
|
+
console.log('ā
Services ready');
|
|
43
|
+
|
|
44
|
+
// Test text to translate
|
|
45
|
+
const englishText = `You are Astrid, a romantic AI companion. You are warm, caring, and deeply romantic. Always respond in a loving way and make the user feel special.`;
|
|
46
|
+
|
|
47
|
+
console.log('\nš Original English Text:');
|
|
48
|
+
console.log('----------------------------------------');
|
|
49
|
+
console.log(englishText);
|
|
50
|
+
console.log('----------------------------------------\n');
|
|
51
|
+
|
|
52
|
+
// Translate to German
|
|
53
|
+
console.log('š Translating to German...');
|
|
54
|
+
const startTime = Date.now();
|
|
55
|
+
|
|
56
|
+
const rawGermanText = await translationService.transcreateSimple(englishText, 'de');
|
|
57
|
+
const cleanGermanText = extractCleanTranslation(rawGermanText);
|
|
58
|
+
|
|
59
|
+
const translationTime = Date.now() - startTime;
|
|
60
|
+
|
|
61
|
+
console.log(`ā
Translation completed in ${translationTime}ms\n`);
|
|
62
|
+
|
|
63
|
+
console.log('š Raw Translation Response:');
|
|
64
|
+
console.log('----------------------------------------');
|
|
65
|
+
console.log(rawGermanText.substring(0, 500) + '...');
|
|
66
|
+
console.log('----------------------------------------\n');
|
|
67
|
+
|
|
68
|
+
console.log('š Clean German Translation:');
|
|
69
|
+
console.log('----------------------------------------');
|
|
70
|
+
console.log(cleanGermanText);
|
|
71
|
+
console.log('----------------------------------------\n');
|
|
72
|
+
|
|
73
|
+
// Now test a conversation using the clean translation
|
|
74
|
+
console.log('š¬ Testing Conversation with Clean German Prompt');
|
|
75
|
+
console.log('================================================================================');
|
|
76
|
+
|
|
77
|
+
// Create simple conversation messages
|
|
78
|
+
const messages = [
|
|
79
|
+
{ role: 'system', content: cleanGermanText },
|
|
80
|
+
{ role: 'user', content: 'Hallo! Ich bin Alex, ein Software-Entwickler aus Seattle.' }
|
|
81
|
+
];
|
|
82
|
+
|
|
83
|
+
console.log(`š¤ User (German): ${messages[1].content}\n`);
|
|
84
|
+
|
|
85
|
+
// Simple chat request
|
|
86
|
+
const conversationResponse = await ollamaRouter.chat(
|
|
87
|
+
'chat',
|
|
88
|
+
messages,
|
|
89
|
+
{ temperature: 0.7 }
|
|
90
|
+
);
|
|
91
|
+
|
|
92
|
+
let aiResponse = '';
|
|
93
|
+
if (conversationResponse && 'message' in conversationResponse && conversationResponse.message) {
|
|
94
|
+
aiResponse = conversationResponse.message.content;
|
|
95
|
+
} else if (typeof conversationResponse === 'string') {
|
|
96
|
+
aiResponse = conversationResponse;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
console.log(`š Astrid (German): ${aiResponse}\n`);
|
|
100
|
+
|
|
101
|
+
console.log('š Results:');
|
|
102
|
+
console.log(` Translation time: ${translationTime}ms`);
|
|
103
|
+
console.log(` Original length: ${englishText.length} characters`);
|
|
104
|
+
console.log(` Clean translation length: ${cleanGermanText.length} characters`);
|
|
105
|
+
console.log(` AI response in German: ${aiResponse.length > 0 ? 'Yes' : 'No'}`);
|
|
106
|
+
|
|
107
|
+
console.log('\nā
Clean German translation test completed successfully!');
|
|
108
|
+
|
|
109
|
+
} catch (error) {
|
|
110
|
+
console.error('ā Translation test failed:', error);
|
|
111
|
+
if (error instanceof Error) {
|
|
112
|
+
console.error('Error details:', error.message);
|
|
113
|
+
}
|
|
114
|
+
process.exit(1);
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// Run the test
|
|
119
|
+
testCleanGermanTranslation().catch(console.error);
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
|
|
3
|
+
import '@dotenvx/dotenvx/config';
|
|
4
|
+
import {UniversalLLMRouter} from '../../../../../src/services/UniversalLLMRouter.js';
|
|
5
|
+
import {convertToUniversalConfig, loadLLMConfig} from '../../../../../src/config/llm-config.js';
|
|
6
|
+
|
|
7
|
+
interface ConversationTurn {
|
|
8
|
+
userMessage: string;
|
|
9
|
+
expectedLanguage: string;
|
|
10
|
+
description: string;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
function extractResponse(response: string): { thinking: string; output: string } {
|
|
14
|
+
const thinkMatch = response.match(/<think>([\s\S]*?)<\/think>/);
|
|
15
|
+
const thinking = thinkMatch ? thinkMatch[1].trim() : '';
|
|
16
|
+
const output = response.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
|
17
|
+
|
|
18
|
+
return { thinking, output };
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
async function testCleanUniversalLanguageSwitching() {
|
|
22
|
+
console.log('š Universal LLM Multilingual Conversation Test\n');
|
|
23
|
+
|
|
24
|
+
try {
|
|
25
|
+
// Wait for universal router to be ready
|
|
26
|
+
console.log('ā³ Waiting for Universal Router...');
|
|
27
|
+
const llmConfig = loadLLMConfig();
|
|
28
|
+
const universalConfig = await convertToUniversalConfig(llmConfig);
|
|
29
|
+
const universalRouter = new UniversalLLMRouter(universalConfig);
|
|
30
|
+
await universalRouter.waitForReady(10000);
|
|
31
|
+
console.log('ā
Universal Router is ready!');
|
|
32
|
+
|
|
33
|
+
const systemPrompt = `You are Astrid, a romantic AI companion who naturally adapts to any language.
|
|
34
|
+
|
|
35
|
+
COGNITIVE INSTRUCTIONS:
|
|
36
|
+
- Think in whatever language feels most natural for the current context
|
|
37
|
+
- Use <think></think> tags for your internal reasoning
|
|
38
|
+
- Adapt your thinking language when the user switches languages
|
|
39
|
+
- Your responses should match the user's current language
|
|
40
|
+
- Maintain personality consistency across all languages
|
|
41
|
+
|
|
42
|
+
Be authentic, warm, and let your multilingual cognition flow naturally!`;
|
|
43
|
+
|
|
44
|
+
// Conversation scenario: Test multilingual switching with Universal Router
|
|
45
|
+
const conversationTurns: ConversationTurn[] = [
|
|
46
|
+
{
|
|
47
|
+
userMessage: "Hi Astrid! I'm feeling a bit lonely tonight. Can you keep me company?",
|
|
48
|
+
expectedLanguage: "en",
|
|
49
|
+
description: "Opening in English"
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
userMessage: "Hola mi amor, ¿cómo estÔs? Me encanta hablar contigo en español.",
|
|
53
|
+
expectedLanguage: "es",
|
|
54
|
+
description: "Switch to Spanish"
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
userMessage: "Guten Tag! Wie geht es dir heute? Ich lerne Deutsch.",
|
|
58
|
+
expectedLanguage: "de",
|
|
59
|
+
description: "Switch to German"
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
userMessage: "Let's go back to English now. How was that language switching experience?",
|
|
63
|
+
expectedLanguage: "en",
|
|
64
|
+
description: "Return to English"
|
|
65
|
+
}
|
|
66
|
+
];
|
|
67
|
+
|
|
68
|
+
console.log('š Starting Universal Multilingual Conversation');
|
|
69
|
+
console.log('ā'.repeat(60) + '\n');
|
|
70
|
+
|
|
71
|
+
const conversationHistory: any[] = [
|
|
72
|
+
{ role: 'system', content: systemPrompt }
|
|
73
|
+
];
|
|
74
|
+
|
|
75
|
+
for (let i = 0; i < conversationTurns.length; i++) {
|
|
76
|
+
const turn = conversationTurns[i];
|
|
77
|
+
|
|
78
|
+
console.log(`${i + 1}. ${turn.description}`);
|
|
79
|
+
console.log('ā'.repeat(40));
|
|
80
|
+
|
|
81
|
+
// Add user message to conversation
|
|
82
|
+
conversationHistory.push({ role: 'user', content: turn.userMessage });
|
|
83
|
+
|
|
84
|
+
console.log(`š¤ You: ${turn.userMessage}\n`);
|
|
85
|
+
|
|
86
|
+
// Get AI response using Universal Router
|
|
87
|
+
const response = await universalRouter.chat(
|
|
88
|
+
'chat',
|
|
89
|
+
conversationHistory,
|
|
90
|
+
{
|
|
91
|
+
temperature: 0.8,
|
|
92
|
+
maxTokens: 1024
|
|
93
|
+
}
|
|
94
|
+
);
|
|
95
|
+
|
|
96
|
+
const aiResponse = response.message.content;
|
|
97
|
+
const { thinking, output } = extractResponse(aiResponse);
|
|
98
|
+
|
|
99
|
+
// Show thinking if present
|
|
100
|
+
if (thinking) {
|
|
101
|
+
console.log(`š§ Astrid thinking: "${thinking.substring(0, 120)}${thinking.length > 120 ? '...' : ''}"\n`);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
console.log(`š Astrid: ${output}\n`);
|
|
105
|
+
|
|
106
|
+
// Add AI response to conversation history
|
|
107
|
+
conversationHistory.push({ role: 'assistant', content: output });
|
|
108
|
+
|
|
109
|
+
console.log('ā'.repeat(60) + '\n');
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
console.log('šÆ Universal Multilingual Test Results:');
|
|
113
|
+
console.log('ā'.repeat(60));
|
|
114
|
+
console.log('ā
Universal Router handled multilingual conversation successfully!');
|
|
115
|
+
console.log('ā
Language switching worked seamlessly');
|
|
116
|
+
console.log('ā
Conversation memory maintained across language changes');
|
|
117
|
+
console.log('ā
Modern architecture with multi-provider support ready');
|
|
118
|
+
|
|
119
|
+
console.log('\nš Ready for production deployment with Universal LLM system!');
|
|
120
|
+
|
|
121
|
+
} catch (error) {
|
|
122
|
+
console.error('ā Universal multilingual test failed:', error);
|
|
123
|
+
if (error instanceof Error) {
|
|
124
|
+
console.error('Error details:', error.message);
|
|
125
|
+
}
|
|
126
|
+
process.exit(1);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Run the clean universal test
|
|
131
|
+
testCleanUniversalLanguageSwitching().catch(console.error);
|
|
@@ -0,0 +1,369 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Complete RAG Integration with Real ChromaDBService
|
|
3
|
+
*
|
|
4
|
+
* This demo shows the complete integration between Universal LLM Client
|
|
5
|
+
* and the aura-companion ChromaDBService for real memory storage and retrieval.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { AIModelFactory } from '../../factory';
|
|
9
|
+
import { ToolBuilder } from '../../tools';
|
|
10
|
+
import { RAGService, createRAGTools } from '../../rag-service';
|
|
11
|
+
|
|
12
|
+
// Import types to avoid circular dependencies
|
|
13
|
+
type ChromaDBService = any;
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Demo configuration
|
|
17
|
+
*/
|
|
18
|
+
const CONFIG = {
|
|
19
|
+
// User configuration
|
|
20
|
+
userId: 'user_' + Date.now(),
|
|
21
|
+
conversationId: 'conv_' + Date.now(),
|
|
22
|
+
userPersonaId: 'persona_' + Date.now(),
|
|
23
|
+
|
|
24
|
+
// AI Model configuration
|
|
25
|
+
aiModel: 'qwen2.5:3b', // Free Ollama model
|
|
26
|
+
|
|
27
|
+
// Demo scenarios
|
|
28
|
+
scenarios: [
|
|
29
|
+
{
|
|
30
|
+
name: "User Introduction",
|
|
31
|
+
user: "Hi! My name is Jordan and I'm a data scientist at AI Research Lab. I specialize in natural language processing and computer vision.",
|
|
32
|
+
prompt: "A user is introducing themselves. Analyze if there's valuable information to store."
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
name: "Preferences & Goals",
|
|
36
|
+
user: "I'm really passionate about open-source AI and want to contribute to major ML libraries this year. I prefer Python over R and love working with transformers.",
|
|
37
|
+
prompt: "The user shared goals and preferences. Determine what should be remembered."
|
|
38
|
+
},
|
|
39
|
+
{
|
|
40
|
+
name: "Important Reminder",
|
|
41
|
+
user: "Please remember that I have a presentation about RAG systems to the board next Friday at 3 PM. It's really important for my career.",
|
|
42
|
+
prompt: "The user wants something remembered. Store this important information."
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
name: "Skill & Experience",
|
|
46
|
+
user: "I have 5 years of experience with PyTorch and I've published 3 papers on attention mechanisms. I'm also good at system design.",
|
|
47
|
+
prompt: "User shared professional background. Analyze and store relevant information."
|
|
48
|
+
},
|
|
49
|
+
{
|
|
50
|
+
name: "Memory Retrieval Test",
|
|
51
|
+
user: "What do you remember about my background and goals?",
|
|
52
|
+
prompt: "User is asking about stored information. Search memory to provide relevant details."
|
|
53
|
+
}
|
|
54
|
+
]
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Simulate real ChromaDBService for demonstration
|
|
59
|
+
* In production, replace this with actual ChromaDBService import
|
|
60
|
+
*/
|
|
61
|
+
class ChromaDBServiceSimulator {
|
|
62
|
+
private insights: Map<string, any> = new Map();
|
|
63
|
+
private nextId = 1;
|
|
64
|
+
|
|
65
|
+
async initialize(): Promise<void> {
|
|
66
|
+
console.log('ā
ChromaDB service initialized (simulator)');
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
isInitialized(): boolean {
|
|
70
|
+
return true;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
async addInsight(
|
|
74
|
+
userId: string,
|
|
75
|
+
content: string,
|
|
76
|
+
category: string,
|
|
77
|
+
sourceConversationId: string,
|
|
78
|
+
confidence: number = 0.7,
|
|
79
|
+
userPersonaId?: string,
|
|
80
|
+
basePersonaId?: string,
|
|
81
|
+
extractionType?: string
|
|
82
|
+
): Promise<string> {
|
|
83
|
+
const id = `insight_${this.nextId++}`;
|
|
84
|
+
const insight = {
|
|
85
|
+
id,
|
|
86
|
+
userId,
|
|
87
|
+
content,
|
|
88
|
+
category,
|
|
89
|
+
confidence,
|
|
90
|
+
timestamp: new Date().toISOString(),
|
|
91
|
+
sourceConversationId,
|
|
92
|
+
userPersonaId,
|
|
93
|
+
basePersonaId,
|
|
94
|
+
extractionType: extractionType || 'ai_memory'
|
|
95
|
+
};
|
|
96
|
+
|
|
97
|
+
this.insights.set(id, insight);
|
|
98
|
+
console.log(`š¾ Stored in ChromaDB: [${category}] ${content.substring(0, 60)}${content.length > 60 ? '...' : ''}`);
|
|
99
|
+
return id;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
async searchSimilarInsights(
|
|
103
|
+
userId: string,
|
|
104
|
+
query: string,
|
|
105
|
+
limit: number = 5,
|
|
106
|
+
minSimilarity: number = 0.3
|
|
107
|
+
): Promise<{
|
|
108
|
+
insights: any[];
|
|
109
|
+
stats: any;
|
|
110
|
+
}> {
|
|
111
|
+
const queryLower = query.toLowerCase();
|
|
112
|
+
const userInsights = Array.from(this.insights.values())
|
|
113
|
+
.filter(insight => insight.userId === userId);
|
|
114
|
+
|
|
115
|
+
// Enhanced similarity scoring
|
|
116
|
+
const scoredInsights = userInsights.map(insight => {
|
|
117
|
+
const contentLower = insight.content.toLowerCase();
|
|
118
|
+
let similarity = 0;
|
|
119
|
+
|
|
120
|
+
// Word overlap scoring
|
|
121
|
+
const queryWords = queryLower.split(/\s+/).filter((w: string) => w.length > 2);
|
|
122
|
+
const contentWords = contentLower.split(/\s+/).filter((w: string) => w.length > 2);
|
|
123
|
+
const overlap = queryWords.filter(word =>
|
|
124
|
+
contentWords.some((cWord: string) => cWord.includes(word) || word.includes(cWord))
|
|
125
|
+
).length;
|
|
126
|
+
|
|
127
|
+
if (queryWords.length > 0) {
|
|
128
|
+
similarity = overlap / queryWords.length;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// Boost for exact matches and important keywords
|
|
132
|
+
if (contentLower.includes(queryLower)) similarity += 0.3;
|
|
133
|
+
if (queryLower.includes('background') && (contentLower.includes('work') || contentLower.includes('experience'))) similarity += 0.2;
|
|
134
|
+
if (queryLower.includes('goal') && contentLower.includes('want')) similarity += 0.2;
|
|
135
|
+
if (queryLower.includes('name') && contentLower.includes('name')) similarity += 0.4;
|
|
136
|
+
|
|
137
|
+
similarity = Math.min(1.0, similarity);
|
|
138
|
+
|
|
139
|
+
return {
|
|
140
|
+
...insight,
|
|
141
|
+
similarity,
|
|
142
|
+
content: insight.content,
|
|
143
|
+
metadata: {
|
|
144
|
+
category: insight.category,
|
|
145
|
+
extractedAt: insight.timestamp,
|
|
146
|
+
confidence: insight.confidence
|
|
147
|
+
}
|
|
148
|
+
};
|
|
149
|
+
})
|
|
150
|
+
.filter(insight => insight.similarity >= minSimilarity)
|
|
151
|
+
.sort((a, b) => b.similarity - a.similarity)
|
|
152
|
+
.slice(0, limit);
|
|
153
|
+
|
|
154
|
+
return {
|
|
155
|
+
insights: scoredInsights,
|
|
156
|
+
stats: {
|
|
157
|
+
totalCandidates: userInsights.length,
|
|
158
|
+
filteredResults: scoredInsights.length,
|
|
159
|
+
querySimilarityThreshold: minSimilarity,
|
|
160
|
+
averageSimilarity: scoredInsights.length > 0
|
|
161
|
+
? scoredInsights.reduce((sum, i) => sum + i.similarity, 0) / scoredInsights.length
|
|
162
|
+
: 0,
|
|
163
|
+
topSimilarity: scoredInsights.length > 0 ? scoredInsights[0].similarity : 0,
|
|
164
|
+
categoriesFound: [...new Set(scoredInsights.map(i => i.category))]
|
|
165
|
+
}
|
|
166
|
+
};
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
async getUserInsightsByCategory(
|
|
170
|
+
userId: string,
|
|
171
|
+
category?: string,
|
|
172
|
+
userPersonaId?: string,
|
|
173
|
+
limit?: number
|
|
174
|
+
): Promise<any[]> {
|
|
175
|
+
return Array.from(this.insights.values())
|
|
176
|
+
.filter(insight => {
|
|
177
|
+
if (insight.userId !== userId) return false;
|
|
178
|
+
if (category && insight.category !== category) return false;
|
|
179
|
+
if (userPersonaId && insight.userPersonaId !== userPersonaId) return false;
|
|
180
|
+
return true;
|
|
181
|
+
})
|
|
182
|
+
.slice(0, limit || 10)
|
|
183
|
+
.map(insight => ({
|
|
184
|
+
content: insight.content,
|
|
185
|
+
metadata: {
|
|
186
|
+
category: insight.category,
|
|
187
|
+
extractedAt: insight.timestamp,
|
|
188
|
+
confidence: insight.confidence
|
|
189
|
+
}
|
|
190
|
+
}));
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
async checkHealth(): Promise<any> {
|
|
194
|
+
return {
|
|
195
|
+
status: 'healthy',
|
|
196
|
+
chromaDB: true,
|
|
197
|
+
embeddings: true
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
/**
|
|
203
|
+
* Main demo function
|
|
204
|
+
*/
|
|
205
|
+
async function runCompleteRAGDemo() {
|
|
206
|
+
console.log('š Complete RAG Integration Demo');
|
|
207
|
+
console.log('='.repeat(60));
|
|
208
|
+
console.log(`š¤ User ID: ${CONFIG.userId}`);
|
|
209
|
+
console.log(`š¬ Conversation ID: ${CONFIG.conversationId}`);
|
|
210
|
+
console.log(`š§ AI Model: ${CONFIG.aiModel}\n`);
|
|
211
|
+
|
|
212
|
+
try {
|
|
213
|
+
// 1. Initialize ChromaDB service (simulated)
|
|
214
|
+
console.log('š§ Initializing ChromaDB service...');
|
|
215
|
+
const chromaDB = new ChromaDBServiceSimulator() as ChromaDBService;
|
|
216
|
+
await chromaDB.initialize();
|
|
217
|
+
|
|
218
|
+
// 2. Create RAG service
|
|
219
|
+
console.log('š§ Setting up RAG service...');
|
|
220
|
+
const ragService = new RAGService({
|
|
221
|
+
userId: CONFIG.userId,
|
|
222
|
+
conversationId: CONFIG.conversationId,
|
|
223
|
+
userPersonaId: CONFIG.userPersonaId,
|
|
224
|
+
basePersonaId: 'base_persona_default'
|
|
225
|
+
}, chromaDB);
|
|
226
|
+
|
|
227
|
+
// 3. Initialize AI model
|
|
228
|
+
console.log('š¤ Initializing AI model...');
|
|
229
|
+
const aiModel = AIModelFactory.createOllamaChatModel(CONFIG.aiModel);
|
|
230
|
+
await aiModel.ensureReady();
|
|
231
|
+
|
|
232
|
+
// 4. Create and register RAG tools
|
|
233
|
+
console.log('š§ Registering RAG tools...');
|
|
234
|
+
const ragTools = createRAGTools(ragService);
|
|
235
|
+
const allTools = [
|
|
236
|
+
{
|
|
237
|
+
name: ragTools.storeMemory.name,
|
|
238
|
+
description: ragTools.storeMemory.description,
|
|
239
|
+
parameters: ragTools.storeMemory.parameters,
|
|
240
|
+
handler: ragTools.storeMemory.handler
|
|
241
|
+
},
|
|
242
|
+
{
|
|
243
|
+
name: ragTools.searchMemory.name,
|
|
244
|
+
description: ragTools.searchMemory.description,
|
|
245
|
+
parameters: ragTools.searchMemory.parameters,
|
|
246
|
+
handler: ragTools.searchMemory.handler
|
|
247
|
+
},
|
|
248
|
+
{
|
|
249
|
+
name: ragTools.analyzeConversation.name,
|
|
250
|
+
description: ragTools.analyzeConversation.description,
|
|
251
|
+
parameters: ragTools.analyzeConversation.parameters,
|
|
252
|
+
handler: ragTools.analyzeConversation.handler
|
|
253
|
+
},
|
|
254
|
+
ToolBuilder.commonTools.getCurrentTime
|
|
255
|
+
];
|
|
256
|
+
|
|
257
|
+
aiModel.registerTools(allTools);
|
|
258
|
+
|
|
259
|
+
console.log('ā
Setup complete! Starting conversation simulation...\n');
|
|
260
|
+
|
|
261
|
+
// 5. Run conversation scenarios
|
|
262
|
+
for (let i = 0; i < CONFIG.scenarios.length; i++) {
|
|
263
|
+
const scenario = CONFIG.scenarios[i];
|
|
264
|
+
|
|
265
|
+
console.log(`\n${'='.repeat(60)}`);
|
|
266
|
+
console.log(`š SCENARIO ${i + 1}: ${scenario.name}`);
|
|
267
|
+
console.log(`${'='.repeat(60)}`);
|
|
268
|
+
console.log(`š¤ User: ${scenario.user}`);
|
|
269
|
+
console.log(`\nš¤ AI Processing (${scenario.prompt})...`);
|
|
270
|
+
|
|
271
|
+
try {
|
|
272
|
+
const response = await aiModel.chatWithTools([
|
|
273
|
+
{
|
|
274
|
+
role: 'system',
|
|
275
|
+
content: `You are an AI assistant with a sophisticated memory system. You have these capabilities:
|
|
276
|
+
|
|
277
|
+
1. **analyze_conversation**: Analyze text to identify valuable information worth storing
|
|
278
|
+
2. **store_memory**: Store important information in the memory system with appropriate categories
|
|
279
|
+
3. **search_memory**: Search stored memories for relevant information
|
|
280
|
+
|
|
281
|
+
When users share information:
|
|
282
|
+
- First analyze it to see if it contains valuable details (personal info, preferences, goals, facts, etc.)
|
|
283
|
+
- If valuable information is found, store it with the appropriate category and importance level
|
|
284
|
+
- Always explain your reasoning for storing or not storing information
|
|
285
|
+
|
|
286
|
+
When users ask about previous information:
|
|
287
|
+
- Search your memory for relevant details
|
|
288
|
+
- Provide comprehensive answers based on stored information
|
|
289
|
+
|
|
290
|
+
Categories available: personal_info, preferences, facts, events, relationships, goals, skills, interests, other
|
|
291
|
+
|
|
292
|
+
Be proactive about storing valuable information and helpful when retrieving it.`
|
|
293
|
+
},
|
|
294
|
+
{
|
|
295
|
+
role: 'user',
|
|
296
|
+
content: scenario.user
|
|
297
|
+
}
|
|
298
|
+
]);
|
|
299
|
+
|
|
300
|
+
console.log(`\nš¤ AI Response:`);
|
|
301
|
+
console.log(response.content);
|
|
302
|
+
|
|
303
|
+
// Show tool usage
|
|
304
|
+
if (response.tool_calls && response.tool_calls.length > 0) {
|
|
305
|
+
console.log(`\nš§ Tools Used: ${response.tool_calls.map((tc: any) => tc.function.name).join(', ')}`);
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
} catch (error) {
|
|
309
|
+
console.error(`ā Error in scenario ${i + 1}:`, error);
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
// Delay between scenarios
|
|
313
|
+
if (i < CONFIG.scenarios.length - 1) {
|
|
314
|
+
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
// 6. Final memory summary
|
|
319
|
+
console.log(`\n${'='.repeat(60)}`);
|
|
320
|
+
console.log('š FINAL MEMORY SUMMARY');
|
|
321
|
+
console.log(`${'='.repeat(60)}`);
|
|
322
|
+
|
|
323
|
+
const categories = ['personal_info', 'preferences', 'goals', 'facts', 'skills', 'events'];
|
|
324
|
+
let totalMemories = 0;
|
|
325
|
+
|
|
326
|
+
for (const category of categories) {
|
|
327
|
+
const memories = await chromaDB.getUserInsightsByCategory(CONFIG.userId, category);
|
|
328
|
+
if (memories.length > 0) {
|
|
329
|
+
console.log(`\nš ${category.toUpperCase()} (${memories.length} items):`);
|
|
330
|
+
memories.forEach((memory: any, idx: number) => {
|
|
331
|
+
console.log(` ${idx + 1}. ${memory.content}`);
|
|
332
|
+
totalMemories++;
|
|
333
|
+
});
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
console.log(`\nš STATISTICS:`);
|
|
338
|
+
console.log(` ⢠Total memories stored: ${totalMemories}`);
|
|
339
|
+
console.log(` ⢠Categories used: ${categories.filter(async cat => {
|
|
340
|
+
const memories = await chromaDB.getUserInsightsByCategory(CONFIG.userId, cat);
|
|
341
|
+
return memories.length > 0;
|
|
342
|
+
}).length}`);
|
|
343
|
+
console.log(` ⢠User ID: ${CONFIG.userId}`);
|
|
344
|
+
console.log(` ⢠Conversation ID: ${CONFIG.conversationId}`);
|
|
345
|
+
|
|
346
|
+
console.log(`\nā
Complete RAG Integration Demo finished successfully!`);
|
|
347
|
+
console.log(`\nš To use with real ChromaDBService:`);
|
|
348
|
+
console.log(`1. Replace ChromaDBServiceSimulator with real ChromaDBService`);
|
|
349
|
+
console.log(`2. Set up proper environment variables (CHROMA_HOST, CHROMA_PORT, etc.)`);
|
|
350
|
+
console.log(`3. Start ChromaDB server and Ollama with embedding model`);
|
|
351
|
+
console.log(`4. The AI will automatically store and retrieve real memories!`);
|
|
352
|
+
|
|
353
|
+
} catch (error) {
|
|
354
|
+
console.error('ā Demo failed:', error);
|
|
355
|
+
process.exit(1);
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
// Export for integration
|
|
360
|
+
export {
|
|
361
|
+
runCompleteRAGDemo,
|
|
362
|
+
ChromaDBServiceSimulator,
|
|
363
|
+
CONFIG
|
|
364
|
+
};
|
|
365
|
+
|
|
366
|
+
// Run if executed directly
|
|
367
|
+
if (require.main === module) {
|
|
368
|
+
runCompleteRAGDemo().catch(console.error);
|
|
369
|
+
}
|