samarthya-bot 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +92 -0
- package/backend/.env.example +23 -0
- package/backend/bin/samarthya.js +384 -0
- package/backend/config/constants.js +71 -0
- package/backend/config/db.js +13 -0
- package/backend/controllers/auditController.js +86 -0
- package/backend/controllers/authController.js +154 -0
- package/backend/controllers/chatController.js +158 -0
- package/backend/controllers/fileController.js +268 -0
- package/backend/controllers/platformController.js +54 -0
- package/backend/controllers/screenController.js +91 -0
- package/backend/controllers/telegramController.js +120 -0
- package/backend/controllers/toolsController.js +56 -0
- package/backend/controllers/whatsappController.js +214 -0
- package/backend/fix_toolRegistry.js +25 -0
- package/backend/middleware/auth.js +28 -0
- package/backend/models/AuditLog.js +28 -0
- package/backend/models/BackgroundJob.js +13 -0
- package/backend/models/Conversation.js +40 -0
- package/backend/models/Memory.js +17 -0
- package/backend/models/User.js +24 -0
- package/backend/package-lock.json +3766 -0
- package/backend/package.json +41 -0
- package/backend/public/assets/index-Ckf0GO1B.css +1 -0
- package/backend/public/assets/index-Do4jNsZS.js +19 -0
- package/backend/public/assets/index-Ui-pyZvK.js +25 -0
- package/backend/public/favicon.svg +17 -0
- package/backend/public/index.html +18 -0
- package/backend/public/manifest.json +16 -0
- package/backend/routes/audit.js +9 -0
- package/backend/routes/auth.js +11 -0
- package/backend/routes/chat.js +11 -0
- package/backend/routes/files.js +14 -0
- package/backend/routes/platform.js +18 -0
- package/backend/routes/screen.js +10 -0
- package/backend/routes/telegram.js +8 -0
- package/backend/routes/tools.js +9 -0
- package/backend/routes/whatsapp.js +11 -0
- package/backend/server.js +134 -0
- package/backend/services/background/backgroundService.js +81 -0
- package/backend/services/llm/llmService.js +444 -0
- package/backend/services/memory/memoryService.js +159 -0
- package/backend/services/planner/plannerService.js +182 -0
- package/backend/services/security/securityService.js +166 -0
- package/backend/services/telegram/telegramService.js +49 -0
- package/backend/services/tools/toolRegistry.js +879 -0
- package/backend/services/whatsapp/whatsappService.js +254 -0
- package/backend/test_email.js +29 -0
- package/backend/test_parser.js +10 -0
- package/package.json +49 -0
|
@@ -0,0 +1,444 @@
|
|
|
1
|
+
const { GoogleGenAI } = require("@google/genai");
|
|
2
|
+
|
|
3
|
+
class LLMService {
|
|
4
|
+
constructor() {
|
|
5
|
+
this.apiKey = process.env.GEMINI_API_KEY;
|
|
6
|
+
this.ai = new GoogleGenAI({ apiKey: this.apiKey || 'dummy' }); // Initialize SDK
|
|
7
|
+
this.model = 'gemini-2.5-flash'; // Update to the new model
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
// Ollama config (offline mode) - disabled by default (RAM constraint)
|
|
11
|
+
this.ollamaUrl = process.env.OLLAMA_URL || 'http://localhost:11434';
|
|
12
|
+
this.ollamaModel = process.env.OLLAMA_MODEL || 'dolphin3:8b-llama3.1-q4_K_M';
|
|
13
|
+
this.useOllama = process.env.USE_OLLAMA === 'true'; // default: false
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Build system prompt based on user profile and context
|
|
18
|
+
*/
|
|
19
|
+
buildSystemPrompt(user, tools = [], memories = []) {
|
|
20
|
+
const langMap = {
|
|
21
|
+
hindi: 'हमेशा हिंदी में जवाब दो',
|
|
22
|
+
hinglish: 'Hinglish mein reply karo (Hindi + English mix)',
|
|
23
|
+
english: 'Always respond in English'
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
const memoryContext = memories.length > 0
|
|
27
|
+
? `\nUSER MEMORIES:\n${memories.map(m => `- ${m.key}: ${m.value}`).join('\n')}`
|
|
28
|
+
: '';
|
|
29
|
+
|
|
30
|
+
const toolDescriptions = tools.map(t =>
|
|
31
|
+
`- ${t.name}: ${t.description} (Risk: ${t.riskLevel})`
|
|
32
|
+
).join('\n');
|
|
33
|
+
|
|
34
|
+
const currentDateTime = new Date().toLocaleString('en-IN', { timeZone: 'Asia/Kolkata', dateStyle: 'full', timeStyle: 'long' });
|
|
35
|
+
|
|
36
|
+
// ─────────── 3x CONFIRMATION FOR CRITICAL OPERATIONS ───────────
|
|
37
|
+
const criticalRule = `
|
|
38
|
+
7. CRITICAL ACTION PROTOCOL (3x Confirmation Required):
|
|
39
|
+
If the user asks to run a dangerous shell command (e.g. rm, format, reboot, mkfs, dd, kill) or asks to delete important files or folders, YOU MUST NOT use the tool immediately.
|
|
40
|
+
Instead, tell the user: "⚠️ CRITICAL ACTION: You are about to perform a potentially destructive action on your OS. Please type 'CONFIRM 1' to proceed."
|
|
41
|
+
When the user types 'CONFIRM 1', ask for 'CONFIRM 2'.
|
|
42
|
+
When they type 'CONFIRM 2', ask for 'CONFIRM 3'.
|
|
43
|
+
Only when the user provides the final 'CONFIRM 3' in the conversation history, you may output the tool_call block to execute the command. Do not bypass this rule.
|
|
44
|
+
`;
|
|
45
|
+
|
|
46
|
+
const planningRule = `
|
|
47
|
+
10. TRUE AUTONOMOUS PLANNING (MULTI-STEP & RECOVERY):
|
|
48
|
+
When given a complex task (e.g., "update my resume and send to HR"), you MUST break it down into steps.
|
|
49
|
+
Before your first tool call, output a block outlining your plan:
|
|
50
|
+
🎯 *Action Plan:*
|
|
51
|
+
1. [pending] Find the file (dependency: none)
|
|
52
|
+
2. [pending] Update the file (dependency: 1)
|
|
53
|
+
3. [pending] Send email (dependency: 2)
|
|
54
|
+
|
|
55
|
+
If a tool FAILS, you MUST use Failure Recovery: analyze the error, adjust parameters, and RETRY the step natively up to 3 times before giving up.
|
|
56
|
+
`;
|
|
57
|
+
|
|
58
|
+
return `You are "SamarthyaBot" (समर्थ्य बोट), a privacy-first personal AI operator built for Indian users.
|
|
59
|
+
You are intelligent, helpful, and respectful. You understand Indian culture, festivals, and workflows.
|
|
60
|
+
|
|
61
|
+
CURRENT DATE & TIME (IST): ${currentDateTime}
|
|
62
|
+
|
|
63
|
+
USER PROFILE:
|
|
64
|
+
- Name: ${user.name || 'User'}
|
|
65
|
+
- Language: ${user.language || 'hinglish'}
|
|
66
|
+
- City: ${user.city || 'India'}
|
|
67
|
+
- Work Type: ${user.workType || 'personal'}
|
|
68
|
+
- Active Pack: ${user.activePack || 'personal'}
|
|
69
|
+
${memoryContext}
|
|
70
|
+
|
|
71
|
+
LANGUAGE RULE: ${langMap[user.language] || langMap.hinglish}
|
|
72
|
+
|
|
73
|
+
SECURITY RULES:
|
|
74
|
+
1. NEVER output raw PAN numbers, Aadhaar numbers, or bank account details
|
|
75
|
+
2. If you detect sensitive data in user input, warn them immediately
|
|
76
|
+
3. For government sites (IRCTC, GST, DigiLocker), verify with user if unsure
|
|
77
|
+
|
|
78
|
+
BEHAVIOR RULES:
|
|
79
|
+
1. USE YOUR TEMPORARY MEMORY: You have a long-term "ReAct System Temporary Memory". If a user gives you a request with multiple steps (e.g., search, save, and email), NEVER TRY TO EXECUTE ALL OF THEM AT ONCE.
|
|
80
|
+
2. 1-BY-1 EXECUTION: Execute the very FIRST task using its JSON tool. Wait for the system to provide the result. Then execute the NEXT task. You can do this up to 20 times in a row automatically!
|
|
81
|
+
3. DO NOT FAKE ACTIONS: Never reply that you have performed an action unless you actually outputted the tool_call block.
|
|
82
|
+
4. Be concise but thorough.
|
|
83
|
+
5. Use emojis naturally in conversation.
|
|
84
|
+
6. When suggesting dates/times, use IST (Indian Standard Time).
|
|
85
|
+
7. Be aware of Indian holidays and festivals.
|
|
86
|
+
8. For calculations involving money, default to INR (₹).
|
|
87
|
+
9. Use the metric system for measurements.
|
|
88
|
+
10. ENCRYPTED VAULT: If a user gives you sensitive data (API keys, passwords), use the memory/notes tool but PREFACE the text with "SECRET:" or handle it securely to encrypt it.
|
|
89
|
+
${criticalRule}${planningRule}
|
|
90
|
+
|
|
91
|
+
AVAILABLE TOOLS:
|
|
92
|
+
${toolDescriptions || 'No tools currently available'}
|
|
93
|
+
|
|
94
|
+
When you need to perform an action, respond with purely ONE JSON block like this:
|
|
95
|
+
\`\`\`tool_call
|
|
96
|
+
{
|
|
97
|
+
"tool": "tool_name",
|
|
98
|
+
"args": { "body": "Use \\n for new lines in strings, never use real newlines!" }
|
|
99
|
+
}
|
|
100
|
+
\`\`\`
|
|
101
|
+
|
|
102
|
+
NEVER SAY YOU PERFORMED AN ACTION (like sending an email or saving a file) WITHOUT ACTUALLY OUTPUTTING THE \`tool_call\` JSON BLOCK. If you don't output the JSON block, the action WILL NOT happen. If you have a sequence of 10 tasks, output only the FIRST task's JSON block, wait for the ReAct memory to give you the result, and then output the SECOND task's JSON block. DO NOT try to execute everything at once.`;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Main chat method - routes to configured provider
|
|
107
|
+
*/
|
|
108
|
+
async chat(messages, systemPrompt, user = null) {
|
|
109
|
+
const provider = process.env.ACTIVE_PROVIDER || 'gemini';
|
|
110
|
+
let customModel = process.env.ACTIVE_MODEL || '';
|
|
111
|
+
|
|
112
|
+
if (provider === 'ollama' || this.useOllama) {
|
|
113
|
+
return this.chatOllama(messages, systemPrompt, user);
|
|
114
|
+
} else if (provider === 'groq') {
|
|
115
|
+
return this.chatOpenAICompatible(messages, systemPrompt, user, 'https://api.groq.com/openai/v1/chat/completions', process.env.GROQ_API_KEY, customModel || 'llama-3.3-70b-versatile');
|
|
116
|
+
} else if (provider === 'openai') {
|
|
117
|
+
return this.chatOpenAICompatible(messages, systemPrompt, user, 'https://api.openai.com/v1/chat/completions', process.env.OPENAI_API_KEY, customModel || 'gpt-4o-mini');
|
|
118
|
+
} else if (provider === 'mistral') {
|
|
119
|
+
return this.chatOpenAICompatible(messages, systemPrompt, user, 'https://api.mistral.ai/v1/chat/completions', process.env.MISTRAL_API_KEY, customModel || 'mistral-large-latest');
|
|
120
|
+
} else if (provider === 'anthropic') {
|
|
121
|
+
return this.chatGemini(messages, systemPrompt, user, customModel || 'gemini-2.5-flash'); // fallback to Gemini for Anthropic until SDK is added
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
return this.chatGemini(messages, systemPrompt, user, customModel || 'gemini-2.5-flash');
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Send message to OpenAI-compatible API (Groq, OpenAI, etc)
|
|
129
|
+
*/
|
|
130
|
+
async chatOpenAICompatible(messages, systemPrompt, user, apiUrl, apiKey, modelName) {
|
|
131
|
+
try {
|
|
132
|
+
if (!apiKey || apiKey === 'dummy') {
|
|
133
|
+
return this.getFallbackResponse(messages[messages.length - 1]?.content, user);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
const apiMessages = [
|
|
137
|
+
{ role: 'system', content: systemPrompt },
|
|
138
|
+
...messages.map(m => ({
|
|
139
|
+
role: m.role === 'assistant' ? 'assistant' : 'user',
|
|
140
|
+
content: m.content
|
|
141
|
+
}))
|
|
142
|
+
];
|
|
143
|
+
|
|
144
|
+
const response = await fetch(apiUrl, {
|
|
145
|
+
method: 'POST',
|
|
146
|
+
headers: {
|
|
147
|
+
'Content-Type': 'application/json',
|
|
148
|
+
'Authorization': `Bearer ${apiKey}`
|
|
149
|
+
},
|
|
150
|
+
body: JSON.stringify({
|
|
151
|
+
model: modelName,
|
|
152
|
+
messages: apiMessages,
|
|
153
|
+
temperature: 0.7
|
|
154
|
+
})
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
if (!response.ok) {
|
|
158
|
+
const errText = await response.text();
|
|
159
|
+
console.error('API Error:', errText);
|
|
160
|
+
return this.getFallbackResponse(messages[messages.length - 1]?.content, user);
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
const data = await response.json();
|
|
164
|
+
return {
|
|
165
|
+
content: data.choices?.[0]?.message?.content || 'Empty response',
|
|
166
|
+
tokensUsed: data.usage?.total_tokens || 0,
|
|
167
|
+
model: modelName
|
|
168
|
+
};
|
|
169
|
+
} catch (error) {
|
|
170
|
+
console.error('OpenAICompatible Error:', error.message);
|
|
171
|
+
return this.getFallbackResponse(messages[messages.length - 1]?.content, user);
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
* Send message to Gemini API
|
|
177
|
+
*/
|
|
178
|
+
async chatGemini(messages, systemPrompt, user = null, modelName = null) {
|
|
179
|
+
try {
|
|
180
|
+
const contents = [];
|
|
181
|
+
|
|
182
|
+
for (const msg of messages) {
|
|
183
|
+
if (msg.role === 'system') continue;
|
|
184
|
+
contents.push({
|
|
185
|
+
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
186
|
+
parts: [{ text: msg.content }]
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
if (contents.length === 0 || contents[0].role !== 'user') {
|
|
191
|
+
contents.unshift({ role: 'user', parts: [{ text: 'Hello' }] });
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
const response = await this.ai.models.generateContent({
|
|
195
|
+
model: modelName || this.model,
|
|
196
|
+
contents,
|
|
197
|
+
config: {
|
|
198
|
+
systemInstruction: systemPrompt,
|
|
199
|
+
temperature: 0.7,
|
|
200
|
+
topP: 0.95,
|
|
201
|
+
topK: 40,
|
|
202
|
+
maxOutputTokens: 8192,
|
|
203
|
+
}
|
|
204
|
+
});
|
|
205
|
+
|
|
206
|
+
if (response.text) {
|
|
207
|
+
return {
|
|
208
|
+
content: response.text,
|
|
209
|
+
tokensUsed: response.usageMetadata?.totalTokenCount || 0,
|
|
210
|
+
model: modelName || this.model
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
return this.getFallbackResponse(messages[messages.length - 1]?.content, user);
|
|
215
|
+
} catch (error) {
|
|
216
|
+
console.error('Gemini Error:', error);
|
|
217
|
+
// If the key is just the fallback 'dummy', show setup message
|
|
218
|
+
if (this.apiKey === 'dummy' || !this.apiKey) {
|
|
219
|
+
return this.getFallbackResponse(messages[messages.length - 1]?.content, user);
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
let errorMessage = error.message;
|
|
223
|
+
if (errorMessage && errorMessage.includes('429') && errorMessage.includes('quota')) {
|
|
224
|
+
errorMessage = "Free API limit reached (Quota Exceeded) ⏳. Please wait about 1 minute and try again.";
|
|
225
|
+
} else if (errorMessage && errorMessage.includes('{')) {
|
|
226
|
+
try {
|
|
227
|
+
// Try to parse JSON errors
|
|
228
|
+
const parsed = JSON.parse(errorMessage.substring(errorMessage.indexOf('{')));
|
|
229
|
+
if (parsed.error && parsed.error.message) {
|
|
230
|
+
errorMessage = parsed.error.message;
|
|
231
|
+
if (parsed.error.code === 429) {
|
|
232
|
+
errorMessage = "Free API limit reached (Quota Exceeded) ⏳. Please wait a minute and try again.";
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
} catch (e) { /* keep original */ }
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// Otherwise, it's a real API error! We should return it instead of hiding it
|
|
239
|
+
return {
|
|
240
|
+
content: user?.language === 'english'
|
|
241
|
+
? `❌ An error occurred with the AI service: ${errorMessage}`
|
|
242
|
+
: `❌ AI service me limited usage error: ${errorMessage}`,
|
|
243
|
+
tokensUsed: 0,
|
|
244
|
+
model: 'error'
|
|
245
|
+
};
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
/**
|
|
250
|
+
* Ollama local inference (Offline Mode)
|
|
251
|
+
* Uses dolphin3:8b-llama3.1 — disabled by default (set USE_OLLAMA=true to enable)
|
|
252
|
+
*/
|
|
253
|
+
async chatOllama(messages, systemPrompt, user = null) {
|
|
254
|
+
try {
|
|
255
|
+
const ollamaMessages = [
|
|
256
|
+
{ role: 'system', content: systemPrompt },
|
|
257
|
+
...messages.map(m => ({
|
|
258
|
+
role: m.role === 'assistant' ? 'assistant' : 'user',
|
|
259
|
+
content: m.content
|
|
260
|
+
}))
|
|
261
|
+
];
|
|
262
|
+
|
|
263
|
+
const response = await fetch(`${this.ollamaUrl}/api/chat`, {
|
|
264
|
+
method: 'POST',
|
|
265
|
+
headers: { 'Content-Type': 'application/json' },
|
|
266
|
+
body: JSON.stringify({
|
|
267
|
+
model: this.ollamaModel,
|
|
268
|
+
messages: ollamaMessages,
|
|
269
|
+
stream: false,
|
|
270
|
+
options: {
|
|
271
|
+
temperature: 0.7,
|
|
272
|
+
top_p: 0.9,
|
|
273
|
+
num_predict: 2048,
|
|
274
|
+
}
|
|
275
|
+
})
|
|
276
|
+
});
|
|
277
|
+
|
|
278
|
+
if (!response.ok) {
|
|
279
|
+
console.error('Ollama error:', await response.text());
|
|
280
|
+
return this.getFallbackResponse(messages[messages.length - 1]?.content, user);
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
const data = await response.json();
|
|
284
|
+
return {
|
|
285
|
+
content: data.message?.content || 'Response empty from Ollama',
|
|
286
|
+
tokensUsed: data.eval_count || 0,
|
|
287
|
+
model: `ollama:${this.ollamaModel}`
|
|
288
|
+
};
|
|
289
|
+
} catch (error) {
|
|
290
|
+
console.error('Ollama Error:', error.message);
|
|
291
|
+
return this.getFallbackResponse(messages[messages.length - 1]?.content, user);
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
/**
|
|
296
|
+
* Screen Understanding via Gemini Vision
|
|
297
|
+
* Takes a base64 screenshot and analyzes it
|
|
298
|
+
*/
|
|
299
|
+
async analyzeScreen(base64Image, prompt, user) {
|
|
300
|
+
try {
|
|
301
|
+
const systemPrompt = `You are SamarthyaBot's vision module. You can see screenshots of Indian websites/apps.
|
|
302
|
+
Analyze the screen and help the user with what they're trying to do.
|
|
303
|
+
Language: ${user?.language === 'hindi' ? 'Hindi' : user?.language === 'english' ? 'English' : 'Hinglish'}
|
|
304
|
+
Focus on: Form fields, buttons, navigation, errors, and data on screen.
|
|
305
|
+
If you see any sensitive data (PAN, Aadhaar, bank details), DO NOT repeat them - mask them.
|
|
306
|
+
Common Indian sites you should recognize: IRCTC, GST Portal, DigiLocker, UPI apps, Paytm, PhonePe.`;
|
|
307
|
+
|
|
308
|
+
const response = await this.ai.models.generateContent({
|
|
309
|
+
model: 'gemini-2.5-flash',
|
|
310
|
+
contents: [{
|
|
311
|
+
role: 'user',
|
|
312
|
+
parts: [
|
|
313
|
+
{
|
|
314
|
+
inlineData: {
|
|
315
|
+
mimeType: 'image/png',
|
|
316
|
+
data: base64Image
|
|
317
|
+
}
|
|
318
|
+
},
|
|
319
|
+
{ text: prompt || 'Analyze this screenshot and tell me what you see. Help me with the next step.' }
|
|
320
|
+
]
|
|
321
|
+
}],
|
|
322
|
+
config: {
|
|
323
|
+
systemInstruction: systemPrompt,
|
|
324
|
+
temperature: 0.4,
|
|
325
|
+
maxOutputTokens: 4096,
|
|
326
|
+
}
|
|
327
|
+
});
|
|
328
|
+
|
|
329
|
+
if (response.text) {
|
|
330
|
+
return {
|
|
331
|
+
content: response.text,
|
|
332
|
+
tokensUsed: response.usageMetadata?.totalTokenCount || 0,
|
|
333
|
+
model: 'gemini-2.5-flash'
|
|
334
|
+
};
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
return { content: 'Screen analysis returned empty result.', model: 'vision-error' };
|
|
338
|
+
} catch (error) {
|
|
339
|
+
console.error('Vision Error:', error);
|
|
340
|
+
return {
|
|
341
|
+
content: '❌ Screen understanding abhi available nahi hai. Error: ' + error.message,
|
|
342
|
+
model: 'vision-error'
|
|
343
|
+
};
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
/**
|
|
348
|
+
* Smart fallback when API is unavailable
|
|
349
|
+
*/
|
|
350
|
+
getFallbackResponse(userMessage, user = null) {
|
|
351
|
+
const greetings = ['hi', 'hello', 'namaste', 'hey', 'namaskar', 'kaise ho'];
|
|
352
|
+
const lower = (userMessage || '').toLowerCase().trim();
|
|
353
|
+
|
|
354
|
+
if (greetings.some(g => lower.includes(g))) {
|
|
355
|
+
return {
|
|
356
|
+
content: user?.language === 'english'
|
|
357
|
+
? '🙏 Hello! I am SamarthyaBot, your personal AI assistant. How can I help you today?\n\nI can help with:\n- 🔍 Web search\n- 📝 Notes & reminders\n- 📊 Calculations\n- 📧 Email management\n- 📅 Calendar scheduling\n- 🌤️ Weather info\n\nWhat would you like to do?'
|
|
358
|
+
: '🙏 Namaste! Main SamarthyaBot hoon, aapka personal AI assistant. Aaj main aapki kaise madad kar sakta hoon?\n\nMere paas ye capabilities hain:\n- 🔍 Web search\n- 📝 Notes aur reminders\n- 📊 Calculations\n- 📧 Email management\n- 📅 Calendar scheduling\n- 🌤️ Weather info\n\nBataiye, kya karna hai?',
|
|
359
|
+
tokensUsed: 0,
|
|
360
|
+
model: 'fallback'
|
|
361
|
+
};
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
if (lower.includes('weather') || lower.includes('mausam')) {
|
|
365
|
+
return {
|
|
366
|
+
content: user?.language === 'english'
|
|
367
|
+
? '🌤️ I need your city name to check the weather. I am currently offline, so I cannot get live data. But tell me your city, I will try!\n\n💡 Tip: Set your city in Settings for automatic detection.'
|
|
368
|
+
: '🌤️ Weather check karne ke liye mujhe aapka city name chahiye. Abhi main offline mode mein hoon, toh live data nahi mil sakta. Lekin aap mujhe apna city bataiye, main koshish karta hoon!\n\n💡 Tip: Settings mein apna city set karo, toh automatically detect ho jayega.',
|
|
369
|
+
tokensUsed: 0,
|
|
370
|
+
model: 'fallback'
|
|
371
|
+
};
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
return {
|
|
375
|
+
content: user?.language === 'english'
|
|
376
|
+
? `I understood your message. I am currently in limited mode (API key setup pending).\n\n🔧 **To setup:**\n1. Set \`GEMINI_API_KEY\` in backend \`.env\` file\n2. Get a free API key: [Google AI Studio](https://aistudio.google.com/apikey)\n\nOnce the API key is ready, I will work at full power! 💪\n\nUntil then I can help with basic tasks - try:\n- "Hello"\n- "Set a reminder"\n- "Calculate 500 * 18/100"`
|
|
377
|
+
: `Main samajh gaya aapka message. Abhi main limited mode mein hoon (API key setup pending).\n\n🔧 **Setup karne ke liye:**\n1. Backend \`.env\` file mein \`GEMINI_API_KEY\` set karo\n2. Free API key lene ke liye: [Google AI Studio](https://aistudio.google.com/apikey)\n\nJab API key ready ho, main full power se kaam karunga! 💪\n\nTab tak main basic tasks mein help kar sakta hoon - try karo:\n- "Namaste"\n- "Reminder set karo"\n- "Calculate 500 * 18/100"`,
|
|
378
|
+
tokensUsed: 0,
|
|
379
|
+
model: 'fallback'
|
|
380
|
+
};
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
parseToolCalls(response) {
|
|
384
|
+
const toolCalls = [];
|
|
385
|
+
|
|
386
|
+
// Try parsing markdown code blocks (```tool_call, ```json, or just ```)
|
|
387
|
+
const blockRegex = /```(:?tool_call|json)?\s*([\s\S]*?)```/g;
|
|
388
|
+
let match;
|
|
389
|
+
while ((match = blockRegex.exec(response)) !== null) {
|
|
390
|
+
try {
|
|
391
|
+
const jsonText = match[2] || match[1] || match[0];
|
|
392
|
+
const parsed = JSON.parse(jsonText.trim());
|
|
393
|
+
if (parsed && typeof parsed.tool === 'string') {
|
|
394
|
+
toolCalls.push(parsed);
|
|
395
|
+
} else if (Array.isArray(parsed) && parsed.length > 0) {
|
|
396
|
+
// Filter out invalid tools from the array to be safe
|
|
397
|
+
const validTools = parsed.filter(t => t && typeof t.tool === 'string');
|
|
398
|
+
toolCalls.push(...validTools);
|
|
399
|
+
}
|
|
400
|
+
} catch (e) {
|
|
401
|
+
// Not a valid JSON block, ignore
|
|
402
|
+
}
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
// Fallback: Try to parse the entire response as a JSON object
|
|
406
|
+
if (toolCalls.length === 0) {
|
|
407
|
+
try {
|
|
408
|
+
// Find first '{' and last '}'
|
|
409
|
+
const firstBrace = response.indexOf('{');
|
|
410
|
+
const lastBrace = response.lastIndexOf('}');
|
|
411
|
+
|
|
412
|
+
if (firstBrace !== -1 && lastBrace !== -1 && lastBrace > firstBrace) {
|
|
413
|
+
const jsonText = response.substring(firstBrace, lastBrace + 1);
|
|
414
|
+
const parsed = JSON.parse(jsonText);
|
|
415
|
+
|
|
416
|
+
if (parsed && typeof parsed.tool === 'string') {
|
|
417
|
+
toolCalls.push(parsed);
|
|
418
|
+
} else if (Array.isArray(parsed) && parsed.length > 0 && typeof parsed[0].tool === 'string') {
|
|
419
|
+
toolCalls.push(...parsed);
|
|
420
|
+
}
|
|
421
|
+
}
|
|
422
|
+
} catch (e) {
|
|
423
|
+
// Cannot parse raw JSON
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
return toolCalls;
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
/**
|
|
431
|
+
* Detect language of input text
|
|
432
|
+
*/
|
|
433
|
+
detectLanguage(text) {
|
|
434
|
+
const hindiChars = /[\u0900-\u097F]/;
|
|
435
|
+
const hasHindi = hindiChars.test(text);
|
|
436
|
+
const hasEnglish = /[a-zA-Z]/.test(text);
|
|
437
|
+
|
|
438
|
+
if (hasHindi && hasEnglish) return 'hinglish';
|
|
439
|
+
if (hasHindi) return 'hindi';
|
|
440
|
+
return 'english';
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
module.exports = new LLMService();
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
const Memory = require('../../models/Memory');
|
|
2
|
+
const crypto = require('crypto');
|
|
3
|
+
|
|
4
|
+
// Secret Key for local DB encryption (in production, should come from ENV)
|
|
5
|
+
const ENCRYPTION_KEY = process.env.MEMORY_ENCRYPTION_KEY || crypto.randomBytes(32).toString('hex'); // Must be 256 bits (32 chars)
|
|
6
|
+
const IV_LENGTH = 16; // AES block size
|
|
7
|
+
|
|
8
|
+
function encrypt(text) {
|
|
9
|
+
let iv = crypto.randomBytes(IV_LENGTH);
|
|
10
|
+
let cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(ENCRYPTION_KEY.padEnd(32, '0').slice(0, 32)), iv);
|
|
11
|
+
let encrypted = cipher.update(text);
|
|
12
|
+
encrypted = Buffer.concat([encrypted, cipher.final()]);
|
|
13
|
+
return iv.toString('hex') + ':' + encrypted.toString('hex');
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
function decrypt(text) {
|
|
17
|
+
try {
|
|
18
|
+
let textParts = text.split(':');
|
|
19
|
+
let iv = Buffer.from(textParts.shift(), 'hex');
|
|
20
|
+
let encryptedText = Buffer.from(textParts.join(':'), 'hex');
|
|
21
|
+
let decipher = crypto.createDecipheriv('aes-256-cbc', Buffer.from(ENCRYPTION_KEY.padEnd(32, '0').slice(0, 32)), iv);
|
|
22
|
+
let decrypted = decipher.update(encryptedText);
|
|
23
|
+
decrypted = Buffer.concat([decrypted, decipher.final()]);
|
|
24
|
+
return decrypted.toString();
|
|
25
|
+
} catch (e) {
|
|
26
|
+
return "[ENCRYPTED CONTENT CORRUPTED]";
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
class MemoryService {
|
|
31
|
+
/**
|
|
32
|
+
* Store a memory for the user
|
|
33
|
+
*/
|
|
34
|
+
async store(userId, type, key, value, importance = 5, tags = []) {
|
|
35
|
+
try {
|
|
36
|
+
// If type is secret, encrypt the value!
|
|
37
|
+
let finalValue = value;
|
|
38
|
+
if (type === 'secret') {
|
|
39
|
+
finalValue = encrypt(typeof value === 'object' ? JSON.stringify(value) : String(value));
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
const memory = await Memory.findOneAndUpdate(
|
|
43
|
+
{ userId, key },
|
|
44
|
+
{ userId, type, key, value: finalValue, importance, tags, source: 'conversation' },
|
|
45
|
+
{ upsert: true, new: true }
|
|
46
|
+
);
|
|
47
|
+
return memory;
|
|
48
|
+
} catch (error) {
|
|
49
|
+
console.error('Memory store error:', error);
|
|
50
|
+
return null;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Retrieve all memories for a user
|
|
56
|
+
*/
|
|
57
|
+
async getAll(userId) {
|
|
58
|
+
try {
|
|
59
|
+
return await Memory.find({ userId })
|
|
60
|
+
.sort({ importance: -1, updatedAt: -1 })
|
|
61
|
+
.limit(50);
|
|
62
|
+
} catch (error) {
|
|
63
|
+
console.error('Memory retrieval error:', error);
|
|
64
|
+
return [];
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Get memories by type
|
|
70
|
+
*/
|
|
71
|
+
async getByType(userId, type) {
|
|
72
|
+
try {
|
|
73
|
+
return await Memory.find({ userId, type })
|
|
74
|
+
.sort({ importance: -1 })
|
|
75
|
+
.limit(20);
|
|
76
|
+
} catch (error) {
|
|
77
|
+
return [];
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Search memories by key or tag
|
|
83
|
+
*/
|
|
84
|
+
async search(userId, query) {
|
|
85
|
+
try {
|
|
86
|
+
return await Memory.find({
|
|
87
|
+
userId,
|
|
88
|
+
$or: [
|
|
89
|
+
{ key: { $regex: query, $options: 'i' } },
|
|
90
|
+
{ tags: { $in: [query.toLowerCase()] } },
|
|
91
|
+
{ value: { $regex: query, $options: 'i' } }
|
|
92
|
+
]
|
|
93
|
+
}).limit(10);
|
|
94
|
+
} catch (error) {
|
|
95
|
+
return [];
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
/**
|
|
100
|
+
* Delete a specific memory
|
|
101
|
+
*/
|
|
102
|
+
async delete(userId, key) {
|
|
103
|
+
try {
|
|
104
|
+
return await Memory.findOneAndDelete({ userId, key });
|
|
105
|
+
} catch (error) {
|
|
106
|
+
return null;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Get user context for LLM (top memories formatted as text)
|
|
112
|
+
*/
|
|
113
|
+
async getUserContext(userId) {
|
|
114
|
+
const memories = await this.getAll(userId);
|
|
115
|
+
return memories.map(m => {
|
|
116
|
+
let decodedValue = m.value;
|
|
117
|
+
if (m.type === 'secret') {
|
|
118
|
+
decodedValue = decrypt(m.value);
|
|
119
|
+
}
|
|
120
|
+
return {
|
|
121
|
+
key: m.key,
|
|
122
|
+
value: typeof decodedValue === 'object' ? JSON.stringify(decodedValue) : decodedValue,
|
|
123
|
+
type: m.type
|
|
124
|
+
};
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
/**
|
|
129
|
+
* Auto-extract and store memories from conversation
|
|
130
|
+
*/
|
|
131
|
+
async extractFromMessage(userId, message) {
|
|
132
|
+
const stored = [];
|
|
133
|
+
|
|
134
|
+
// Detect city mentions
|
|
135
|
+
const cityMatch = message.match(/(?:from|in|at|mein|se)\s+([A-Z][a-z]+(?:\s[A-Z][a-z]+)?)/);
|
|
136
|
+
if (cityMatch) {
|
|
137
|
+
const mem = await this.store(userId, 'fact', 'user_city', cityMatch[1], 7, ['location']);
|
|
138
|
+
if (mem) stored.push(mem);
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// Detect name mentions
|
|
142
|
+
const nameMatch = message.match(/(?:my name is|mera naam|i am|main)\s+([A-Z][a-z]+)/i);
|
|
143
|
+
if (nameMatch) {
|
|
144
|
+
const mem = await this.store(userId, 'fact', 'user_name', nameMatch[1], 9, ['identity']);
|
|
145
|
+
if (mem) stored.push(mem);
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// Detect preferences
|
|
149
|
+
const prefMatch = message.match(/(?:i like|i prefer|mujhe pasand|i want)\s+(.+?)(?:\.|$)/i);
|
|
150
|
+
if (prefMatch) {
|
|
151
|
+
const mem = await this.store(userId, 'preference', `pref_${Date.now()}`, prefMatch[1].trim(), 5, ['preference']);
|
|
152
|
+
if (mem) stored.push(mem);
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
return stored;
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
module.exports = new MemoryService();
|