@meechi-ai/core 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. package/LICENSE +624 -0
  2. package/README.md +59 -0
  3. package/dist/components/CalendarView.d.ts +3 -0
  4. package/dist/components/CalendarView.js +72 -0
  5. package/dist/components/ChatInterface.d.ts +6 -0
  6. package/dist/components/ChatInterface.js +105 -0
  7. package/dist/components/FileExplorer.d.ts +9 -0
  8. package/dist/components/FileExplorer.js +757 -0
  9. package/dist/components/Icon.d.ts +9 -0
  10. package/dist/components/Icon.js +44 -0
  11. package/dist/components/SourceEditor.d.ts +13 -0
  12. package/dist/components/SourceEditor.js +50 -0
  13. package/dist/components/ThemeProvider.d.ts +5 -0
  14. package/dist/components/ThemeProvider.js +105 -0
  15. package/dist/components/ThemeSwitcher.d.ts +1 -0
  16. package/dist/components/ThemeSwitcher.js +16 -0
  17. package/dist/components/voice/VoiceInputArea.d.ts +14 -0
  18. package/dist/components/voice/VoiceInputArea.js +190 -0
  19. package/dist/components/voice/VoiceOverlay.d.ts +7 -0
  20. package/dist/components/voice/VoiceOverlay.js +71 -0
  21. package/dist/hooks/useMeechi.d.ts +16 -0
  22. package/dist/hooks/useMeechi.js +461 -0
  23. package/dist/hooks/useSync.d.ts +8 -0
  24. package/dist/hooks/useSync.js +87 -0
  25. package/dist/index.d.ts +14 -0
  26. package/dist/index.js +22 -0
  27. package/dist/lib/ai/embeddings.d.ts +15 -0
  28. package/dist/lib/ai/embeddings.js +128 -0
  29. package/dist/lib/ai/gpu-lock.d.ts +19 -0
  30. package/dist/lib/ai/gpu-lock.js +43 -0
  31. package/dist/lib/ai/llm.worker.d.ts +1 -0
  32. package/dist/lib/ai/llm.worker.js +7 -0
  33. package/dist/lib/ai/local-llm.d.ts +30 -0
  34. package/dist/lib/ai/local-llm.js +211 -0
  35. package/dist/lib/ai/manager.d.ts +20 -0
  36. package/dist/lib/ai/manager.js +51 -0
  37. package/dist/lib/ai/parsing.d.ts +12 -0
  38. package/dist/lib/ai/parsing.js +56 -0
  39. package/dist/lib/ai/prompts.d.ts +2 -0
  40. package/dist/lib/ai/prompts.js +2 -0
  41. package/dist/lib/ai/providers/gemini.d.ts +6 -0
  42. package/dist/lib/ai/providers/gemini.js +88 -0
  43. package/dist/lib/ai/providers/groq.d.ts +6 -0
  44. package/dist/lib/ai/providers/groq.js +42 -0
  45. package/dist/lib/ai/registry.d.ts +29 -0
  46. package/dist/lib/ai/registry.js +52 -0
  47. package/dist/lib/ai/tools.d.ts +2 -0
  48. package/dist/lib/ai/tools.js +106 -0
  49. package/dist/lib/ai/types.d.ts +22 -0
  50. package/dist/lib/ai/types.js +1 -0
  51. package/dist/lib/ai/worker.d.ts +1 -0
  52. package/dist/lib/ai/worker.js +60 -0
  53. package/dist/lib/audio/input.d.ts +13 -0
  54. package/dist/lib/audio/input.js +121 -0
  55. package/dist/lib/audio/stt.d.ts +13 -0
  56. package/dist/lib/audio/stt.js +119 -0
  57. package/dist/lib/audio/tts.d.ts +12 -0
  58. package/dist/lib/audio/tts.js +128 -0
  59. package/dist/lib/audio/vad.d.ts +18 -0
  60. package/dist/lib/audio/vad.js +117 -0
  61. package/dist/lib/colors.d.ts +16 -0
  62. package/dist/lib/colors.js +67 -0
  63. package/dist/lib/extensions.d.ts +35 -0
  64. package/dist/lib/extensions.js +24 -0
  65. package/dist/lib/hooks/use-voice-loop.d.ts +13 -0
  66. package/dist/lib/hooks/use-voice-loop.js +313 -0
  67. package/dist/lib/mcp/McpClient.d.ts +19 -0
  68. package/dist/lib/mcp/McpClient.js +42 -0
  69. package/dist/lib/mcp/McpRegistry.d.ts +47 -0
  70. package/dist/lib/mcp/McpRegistry.js +117 -0
  71. package/dist/lib/mcp/native/GroqVoiceNative.d.ts +21 -0
  72. package/dist/lib/mcp/native/GroqVoiceNative.js +29 -0
  73. package/dist/lib/mcp/native/LocalSyncNative.d.ts +19 -0
  74. package/dist/lib/mcp/native/LocalSyncNative.js +26 -0
  75. package/dist/lib/mcp/native/LocalVoiceNative.d.ts +19 -0
  76. package/dist/lib/mcp/native/LocalVoiceNative.js +27 -0
  77. package/dist/lib/mcp/native/MeechiNativeCore.d.ts +25 -0
  78. package/dist/lib/mcp/native/MeechiNativeCore.js +209 -0
  79. package/dist/lib/mcp/native/index.d.ts +10 -0
  80. package/dist/lib/mcp/native/index.js +10 -0
  81. package/dist/lib/mcp/types.d.ts +35 -0
  82. package/dist/lib/mcp/types.js +1 -0
  83. package/dist/lib/pdf.d.ts +10 -0
  84. package/dist/lib/pdf.js +142 -0
  85. package/dist/lib/settings.d.ts +48 -0
  86. package/dist/lib/settings.js +87 -0
  87. package/dist/lib/storage/db.d.ts +57 -0
  88. package/dist/lib/storage/db.js +45 -0
  89. package/dist/lib/storage/local.d.ts +28 -0
  90. package/dist/lib/storage/local.js +534 -0
  91. package/dist/lib/storage/migrate.d.ts +3 -0
  92. package/dist/lib/storage/migrate.js +122 -0
  93. package/dist/lib/storage/types.d.ts +66 -0
  94. package/dist/lib/storage/types.js +1 -0
  95. package/dist/lib/sync/client-drive.d.ts +9 -0
  96. package/dist/lib/sync/client-drive.js +69 -0
  97. package/dist/lib/sync/engine.d.ts +18 -0
  98. package/dist/lib/sync/engine.js +517 -0
  99. package/dist/lib/sync/google-drive.d.ts +52 -0
  100. package/dist/lib/sync/google-drive.js +183 -0
  101. package/dist/lib/sync/merge.d.ts +1 -0
  102. package/dist/lib/sync/merge.js +68 -0
  103. package/dist/lib/yjs/YjsProvider.d.ts +11 -0
  104. package/dist/lib/yjs/YjsProvider.js +33 -0
  105. package/dist/lib/yjs/graph.d.ts +11 -0
  106. package/dist/lib/yjs/graph.js +7 -0
  107. package/dist/lib/yjs/hooks.d.ts +7 -0
  108. package/dist/lib/yjs/hooks.js +37 -0
  109. package/dist/lib/yjs/store.d.ts +4 -0
  110. package/dist/lib/yjs/store.js +19 -0
  111. package/dist/lib/yjs/syncGraph.d.ts +1 -0
  112. package/dist/lib/yjs/syncGraph.js +38 -0
  113. package/dist/providers/theme-provider.d.ts +3 -0
  114. package/dist/providers/theme-provider.js +18 -0
  115. package/dist/tsconfig.lib.tsbuildinfo +1 -0
  116. package/package.json +69 -0
@@ -0,0 +1,461 @@
1
+ 'use client';
2
+ import { useState, useEffect, useCallback } from 'react';
3
+ import { localLlmService } from '../lib/ai/local-llm';
4
+ import { aiManager } from "../lib/ai/manager";
5
+ import { settingsManager } from '../lib/settings';
6
+ import { SYSTEM_PROMPT } from '../lib/ai/prompts';
7
+ import { mcpClient } from '../lib/mcp/McpClient';
8
+ import { AVAILABLE_MODELS } from '../lib/ai/registry';
9
+ import { parseToolCalls } from '../lib/ai/parsing';
10
+ export function useMeechi() {
11
+ const [isLowPowerDevice, setIsLowPowerDevice] = useState(true);
12
+ const [localAIStatus, setLocalAIStatus] = useState("");
13
+ const [downloadProgress, setDownloadProgress] = useState(null);
14
+ const [loadedModel, setLoadedModel] = useState(null);
15
+ const [rateLimitCooldown, setRateLimitCooldown] = useState(null);
16
+ const [activeMemories, setActiveMemories] = useState([]);
17
+ const [mode, _setMode] = useState('chat'); // Default to simple chat
18
+ const [isReady, setIsReady] = useState(false);
19
+ // Initialization Logic
20
+ useEffect(() => {
21
+ const init = async () => {
22
+ var _a, _b;
23
+ const config = await settingsManager.getConfig();
24
+ // Check Rate Limit Persisted
25
+ const persisted = localStorage.getItem('meechi_rate_limit_cooldown');
26
+ if (persisted) {
27
+ const ts = parseInt(persisted);
28
+ if (ts > Date.now())
29
+ setRateLimitCooldown(ts);
30
+ }
31
+ // Load Active Memories from Registry (Generic)
32
+ const { mcpRegistry } = await import('../lib/mcp/McpClient');
33
+ // Core allows ANY agent to be loaded, not just specific ones.
34
+ const memories = mcpRegistry.getMarketplace()
35
+ .filter(s => s.isActive)
36
+ .map(s => s.id);
37
+ setActiveMemories(memories);
38
+ // Mode Persistence Logic
39
+ const lastMode = localStorage.getItem('meechi_last_mode');
40
+ if (lastMode && (memories.includes(lastMode) || lastMode === 'chat')) {
41
+ _setMode(lastMode);
42
+ }
43
+ else {
44
+ _setMode('chat'); // Default to Simple Chat
45
+ }
46
+ // CHECK CLOUD PRIORITY (Mirroring chat logic)
47
+ // If activeProvider is Cloud and has Key, we skip auto-initializing Local 1B.
48
+ // This prevents the "Waking up 1B..." status and saves huge resources.
49
+ if (config.activeProviderId && config.activeProviderId !== 'local' && config.activeProviderId !== 'browser') {
50
+ const provider = config.providers.find(p => p.id === config.activeProviderId);
51
+ // Check Key in Config OR Environment
52
+ const hasKey = ((provider === null || provider === void 0 ? void 0 : provider.apiKey) && provider.apiKey.length > 0) ||
53
+ (config.activeProviderId === 'openai' && process.env.NEXT_PUBLIC_OPENAI_KEY_EXISTS) ||
54
+ (config.activeProviderId === 'groq' && process.env.NEXT_PUBLIC_GROQ_API_KEY); // Check env for Groq too if strict
55
+ if (hasKey) {
56
+ console.log(`[useMeechi] Cloud Configured (${config.activeProviderId}). Skipping Local Init.`);
57
+ setLocalAIStatus(`Cloud Ready (${config.activeProviderId})`);
58
+ setIsReady(true);
59
+ return;
60
+ }
61
+ }
62
+ if (!config.localAI.enabled)
63
+ return;
64
+ // Hardware Detection
65
+ try {
66
+ let gpuInfo = {};
67
+ if ('gpu' in navigator) {
68
+ const adapter = await navigator.gpu.requestAdapter();
69
+ if (adapter)
70
+ gpuInfo = await ((_b = (_a = adapter).requestAdapterInfo) === null || _b === void 0 ? void 0 : _b.call(_a)) || {};
71
+ }
72
+ // Heuristic: Apple or RTX 30/40 series -> High Power
73
+ const isHighPower = gpuInfo.vendor === 'apple' ||
74
+ /RTX (3090|4080|4090|A6000)/i.test(gpuInfo.renderer || "");
75
+ setIsLowPowerDevice(!isHighPower);
76
+ // Model Selection Logic via Registry
77
+ // Default: 1B for Low Power, 8B for High Power
78
+ const defaultLow = AVAILABLE_MODELS.local.find(m => m.low_power && m.family === 'llama').id;
79
+ // const defaultHigh = AVAILABLE_MODELS.find(m => !m.low_power && m.family === 'llama')!.id;
80
+ let modelId = defaultLow;
81
+ const configModel = config.localAI.model;
82
+ if (!configModel || configModel === 'Auto') {
83
+ // FORCE 1B Default (User Request: "Make the 1B the default")
84
+ // We ignore high power detection for stability.
85
+ modelId = defaultLow;
86
+ }
87
+ else {
88
+ // Check if the configModel exists in registry, otherwise fallback
89
+ const exists = AVAILABLE_MODELS.local.find(m => m.id === configModel);
90
+ modelId = exists ? exists.id : configModel;
91
+ }
92
+ setLoadedModel(modelId);
93
+ // Initialize WebLLM
94
+ const currentId = localLlmService.getModelId();
95
+ const needsInit = !localLlmService.isInitialized() || (currentId !== modelId);
96
+ if (needsInit) {
97
+ if (currentId && currentId !== modelId) {
98
+ const status = `Switching to ${modelId.includes('8B') ? '8B' : '1B'}...`;
99
+ setLocalAIStatus(status);
100
+ // Trigger blocking UI
101
+ setDownloadProgress({ percentage: 0, text: status });
102
+ }
103
+ else {
104
+ const status = `Waking up ${modelId.includes('8B') ? '8B' : '1B'}...`;
105
+ setLocalAIStatus(status);
106
+ // Trigger blocking UI
107
+ setDownloadProgress({ percentage: 0, text: status });
108
+ }
109
+ await localLlmService.initialize(modelId, (p) => {
110
+ if (p.includes("Fetching") || p.includes("Loading")) {
111
+ const match = p.match(/(\d+)%/);
112
+ if (match) {
113
+ setDownloadProgress({ percentage: parseInt(match[1]), text: p });
114
+ setLocalAIStatus(`Deep Thinking... (${match[1]}%)`);
115
+ }
116
+ }
117
+ });
118
+ setIsReady(true);
119
+ setLocalAIStatus("");
120
+ setDownloadProgress(null);
121
+ }
122
+ else {
123
+ // ALREADY INITIALIZED
124
+ console.log("[useMeechi] Local AI already initialized.");
125
+ setIsReady(true);
126
+ setLocalAIStatus("");
127
+ }
128
+ }
129
+ catch (e) {
130
+ console.error("Failed to init Local AI", e);
131
+ setLocalAIStatus("Hibernating (Init Failed)");
132
+ }
133
+ };
134
+ init();
135
+ }, []);
136
+ /**
137
+ * UNIFIED CHAT FUNCTION
138
+ * Handles Local -> Cloud fallback transparently.
139
+ * Executes MCP tools automatically.
140
+ */
141
+ const chat = useCallback(async (userMsg, history, context, onUpdate, onToolStart, onToolResult) => {
142
+ // 1. SIMPLE CHAT FALLBACK (Canvas / No Agents)
143
+ // If the mode is 'chat' and the agent isn't active, we do a regular chat without persona.
144
+ const isAgenticChat = activeMemories.includes(mode);
145
+ const isSimpleChat = mode === 'chat';
146
+ if (!isAgenticChat && !isSimpleChat) {
147
+ // If trying to use an inactive agent, fallback to simple chat
148
+ _setMode('chat');
149
+ }
150
+ const { mcpRegistry } = await import("../lib/mcp/McpClient");
151
+ const config = await settingsManager.getConfig();
152
+ // Determine if we should use Local AI or Cloud AI based on Active Provider
153
+ // If activeProviderId is valid and NOT 'local' (and has a key?), prioritize it.
154
+ // We assume 'local' or 'browser' means Local WebLLM.
155
+ let useLocal = config.localAI.enabled;
156
+ if (config.activeProviderId && config.activeProviderId !== 'local' && config.activeProviderId !== 'browser') {
157
+ // If user explicitly selected a Cloud Provider (e.g. Groq, OpenAI),
158
+ // we disable local usage for this turn.
159
+ // But we should verify if the Cloud Provider is actually configured (Key exists?)
160
+ const provider = config.providers.find(p => p.id === config.activeProviderId);
161
+ const hasKey = (provider === null || provider === void 0 ? void 0 : provider.apiKey) || (config.activeProviderId === 'openai' && process.env.NEXT_PUBLIC_OPENAI_KEY_EXISTS);
162
+ if (hasKey) {
163
+ useLocal = false;
164
+ console.log(`[useMeechi] Using Cloud Provider: ${config.activeProviderId}`);
165
+ }
166
+ else {
167
+ console.warn(`[useMeechi] Cloud Provider ${config.activeProviderId} selected but NO KEY found. Falling back to Local.`);
168
+ }
169
+ }
170
+ let finalContent = "";
171
+ let userContentToUse = userMsg; // Default to raw user message
172
+ // 2. PREPARE PERSONA BASED ON MODE & MEMORIES
173
+ let modePrompt = SYSTEM_PROMPT;
174
+ if (activeMemories.includes(mode)) {
175
+ modePrompt = (await mcpRegistry.getAgentInstructions(mode)) || SYSTEM_PROMPT;
176
+ }
177
+ // Combine with background memories (if any agentic memories are active)
178
+ const backgroundMemories = mcpRegistry.getCombinedInstructions();
179
+ let systemMsg = `${modePrompt}\n\n### ACTIVE MEMORY BACKGROUND\n${backgroundMemories}`;
180
+ let temp = mode === 'research' ? 0.3 : 0.7;
181
+ if (mode === 'research') {
182
+ // Truncate Context to prevent OOM
183
+ const MAX_CONTEXT_CHARS = 5000;
184
+ const contextStr = typeof context === 'string' ? context : String(context);
185
+ const safeContext = contextStr.length > MAX_CONTEXT_CHARS
186
+ ? contextStr.substring(0, MAX_CONTEXT_CHARS) + "\n...(truncated)"
187
+ : contextStr;
188
+ // NUKE CONTEXT CITATIONS
189
+ const cleanContext = safeContext.replace(/[\(\[]\s*[A-Z][a-zA-Z\s&.]*,\s*\d{4}[a-z]?\s*[\)\]]/g, '');
190
+ // CRITICAL FIX: Inject Context into the USER message for 1B focus.
191
+ userContentToUse = `### CONTEXT (TRUSTED USER DATA - READ CAREFULLY)\n${cleanContext}\n\n### INSTRUCTION\nUsing the Trusted Data above, answer the user's question or summarize the content. The data is accurate. Do not refuse.\n\nIMPORTANT: Do NOT include a list of References or Sources at the end.\n\n### USER QUESTION\n${userMsg}`;
192
+ }
193
+ else {
194
+ // CASUAL CHAT MODE
195
+ const safeChatContext = context;
196
+ if (safeChatContext && safeChatContext.length > 50) {
197
+ // Narrative Context Injection
198
+ const contextBlock = `
199
+ \n=== RELEVANT MEMORY & FILES ===
200
+ ${safeChatContext}
201
+ ===============================
202
+ (System Note: The above is context from your memory. Use it to answer the user naturally.)
203
+ `;
204
+ systemMsg += contextBlock;
205
+ }
206
+ }
207
+ // 3. LOCAL AI ATTEMPT
208
+ if (useLocal) {
209
+ // Guard: If Local AI is enabled but not ready, stop.
210
+ if (!isReady) {
211
+ // Check if actually initialized but state missed it (Race condition fix)
212
+ if (localLlmService.isInitialized()) {
213
+ console.log("[useMeechi] State desync detected. Setting Ready.");
214
+ setIsReady(true);
215
+ }
216
+ else {
217
+ onUpdate("\n\n*Meechi is warming up... (Please wait for 'Ready' status)*");
218
+ return;
219
+ }
220
+ }
221
+ try {
222
+ // Ensure initialized (Double check)
223
+ if (!localLlmService.isInitialized()) {
224
+ await localLlmService.initialize(config.localAI.model);
225
+ }
226
+ // Filter out system tool reports so AI doesn't mimic them
227
+ // This prevents the "hallucination" where AI just prints the result text
228
+ // ALSO FILTER ERROR MESSAGES so AI doesn't repeat them
229
+ // NEW: FILTER "REFUSALS". If the AI previously said "I don't have info", hide it so it doesn't repeat that pattern.
230
+ const cleanHistory = history.map(m => {
231
+ // Sanitize 'Michio:' prefixes from old logs to prevent hallucination
232
+ let content = m.content;
233
+ if (m.role === 'assistant' || m.role === 'michio') {
234
+ content = content.replace(/^(Michio|Meechi):\s*/i, '').trim();
235
+ }
236
+ return { role: m.role, content };
237
+ }).filter(m => !m.content.startsWith('> **Tool') &&
238
+ !m.content.startsWith('**Error**') &&
239
+ !m.content.startsWith('Error:') &&
240
+ !m.content.includes("I don't have any information about your previous activities") &&
241
+ !m.content.includes("context to draw upon") &&
242
+ // Anti-Hallucination Filters (Log Style)
243
+ !m.content.includes("**Topic Summary**") &&
244
+ !m.content.includes("**Files and Topics**") &&
245
+ !m.content.includes("**Tools Used**") &&
246
+ !m.content.includes("**Summary of Recent Activity**"));
247
+ const messages = [
248
+ { role: 'system', content: systemMsg },
249
+ ...cleanHistory,
250
+ { role: 'user', content: userContentToUse }
251
+ ];
252
+ await localLlmService.chat(messages, (chunk) => {
253
+ finalContent += chunk;
254
+ onUpdate(chunk);
255
+ }, {
256
+ temperature: temp,
257
+ // STOP TOKENS: Physically stop the model from generating references.
258
+ // We use the positive termination token "---END---" as the primary stop.
259
+ // We also include aggressive partial matches for References to catch them if the model ignores the end token.
260
+ stop: mode === 'research' ? [
261
+ "---END---",
262
+ "Reference:", "References:", "Source:", "Sources:", "Bibliography:",
263
+ "**Reference", "**Source", "### Reference", "### Source"
264
+ ] : undefined
265
+ });
266
+ // FINAL SANITIZATION BEFORE TOOLS/HISTORY
267
+ finalContent = finalContent.replace(/^((Michio|Meechi|Echo|Assistant):\s*)+/i, '').trim();
268
+ console.log(`[Raw AI Output (${mode})]:`, finalContent);
269
+ // Post-Processing: Check for Tools (Using centralized parser)
270
+ // Tools are technically allowed in both modes, but usually Research uses them more.
271
+ const tools = parseToolCalls(finalContent);
272
+ for (const tool of tools) {
273
+ if (onToolStart)
274
+ onToolStart(tool.name);
275
+ // CHECK FOR PARSE ERROR
276
+ if (tool.error) {
277
+ if (onToolResult) {
278
+ onToolResult(`\n> **Tool Error (${tool.name})**: Invalid JSON arguments. Please retry using strict JSON.`);
279
+ }
280
+ continue;
281
+ }
282
+ // EXECUTE VIA MCP SERVER
283
+ try {
284
+ const result = await mcpClient.executeTool(tool.name, tool.args);
285
+ // Add result to LOCAL history for the follow-up generation
286
+ const resStr = `\n> **Tool (${tool.name})**: ${result.summary || result.message || JSON.stringify(result)}`;
287
+ messages.push({ role: 'user', content: resStr });
288
+ if (onToolResult) {
289
+ onToolResult(resStr);
290
+ }
291
+ }
292
+ catch (toolErr) {
293
+ console.warn(`[Meechi] Tool Execution Failed: ${tool.name}`, toolErr);
294
+ const errStr = `\n> **Tool Error**: Failed to execute '${tool.name}'. Reason: ${toolErr.message || "Unknown error"}`;
295
+ messages.push({ role: 'user', content: errStr });
296
+ if (onToolResult)
297
+ onToolResult(errStr);
298
+ }
299
+ }
300
+ // RECURSIVE FOLLOW-UP: Generate confirmation message ONLY if tools were used
301
+ if (tools.length > 0) {
302
+ // Re-assemble messages for follow-up
303
+ const followUpMessages = [
304
+ ...messages.slice(0, messages.length - tools.length), // Original context (System + History + User)
305
+ { role: 'assistant', content: finalContent }, // The tool call it just made
306
+ ...messages.slice(messages.length - tools.length) // The tool results we pushed in the loop
307
+ ];
308
+ await localLlmService.chat(followUpMessages, (chunk) => {
309
+ // This will OVERWRITE the <function> output in the UI, which is exactly what we want
310
+ // (hiding the tool call implementation detail)
311
+ onUpdate(chunk);
312
+ }, { temperature: temp });
313
+ }
314
+ return; // Success, exit.
315
+ }
316
+ catch (e) {
317
+ console.warn("Local AI Failed.", e);
318
+ // CRITICAL FAIL-SAFE:
319
+ const activeId = config.activeProviderId || 'groq';
320
+ const activeProvider = config.providers.find(p => p.id === activeId);
321
+ // Strict check: Ensure apiKey is a non-empty string
322
+ const rawKey = activeProvider === null || activeProvider === void 0 ? void 0 : activeProvider.apiKey;
323
+ const hasCloudKey = (rawKey && rawKey.trim().length > 0) || (activeId === 'openai' && !!process.env.NEXT_PUBLIC_OPENAI_KEY_EXISTS);
324
+ const errorMessage = (e === null || e === void 0 ? void 0 : e.message) || "Unknown error";
325
+ console.log("[Meechi Fallback Debug]", {
326
+ error: errorMessage,
327
+ activeId,
328
+ hasCloudKey,
329
+ rawKeyLength: rawKey === null || rawKey === void 0 ? void 0 : rawKey.length
330
+ });
331
+ // If it was a GPU Crash, handle it specifically
332
+ if (errorMessage === 'GPU_CRASH' || errorMessage.includes('Device was lost') || errorMessage.includes('ContextWindowSizeExceededError')) {
333
+ if (errorMessage.includes('ContextWindowSizeExceededError')) {
334
+ onUpdate(`\n\n**System Alert**: Context too large for this model (Try clearing chat or shorter docs).`);
335
+ return;
336
+ }
337
+ setLocalAIStatus("GPU Crashed (Reload Required)");
338
+ onUpdate(`\n\n**System Alert**: Local AI GPU Driver Crashed.\n- Please reload to reset.`);
339
+ return; // STOP. Do not fallback.
340
+ }
341
+ // If regular error but NO Cloud Key, STOP.
342
+ if (!hasCloudKey) {
343
+ setLocalAIStatus("Error (No Cloud Fallback)");
344
+ onUpdate(`\n\n**Error**: Local AI failed. Cloud fallback skipped (No Key).\n\n**Reason**: ${e.message}`);
345
+ return; // STOP.
346
+ }
347
+ // Otherwise, fall through to Cloud
348
+ console.log("Attempting Cloud Fallback (Key Found)...");
349
+ }
350
+ }
351
+ // 2. CLOUD AI ATTEMPT
352
+ try {
353
+ if (rateLimitCooldown && Date.now() < rateLimitCooldown) {
354
+ throw new Error(`Rate limit active until ${new Date(rateLimitCooldown).toLocaleTimeString()}`);
355
+ }
356
+ // STATIC DESKTOP MODE: Direct Client-Side Call
357
+ const isStatic = process.env.NEXT_PUBLIC_IS_STATIC === 'true';
358
+ if (isStatic) {
359
+ console.log("[Meechi] Static Mode: Calling AI Client-Side...");
360
+ const result = await aiManager.chat(userMsg, systemMsg, // Context is already embedded in system/user msg by now
361
+ history, config, [] // Tools (TODO: Support client-side tools if needed)
362
+ );
363
+ // Emulate Stream (roughly) or just dump content
364
+ // AIManager returns full completion currently, not stream.
365
+ // We'll just dump it all at once for now or chunk it?
366
+ // The UI expects incremental updates if possible, but one big update is fine.
367
+ onUpdate(result.content);
368
+ return;
369
+ }
370
+ // WEB/PWA MODE: Server API Call
371
+ const { mcpClient } = await import("../lib/mcp/McpClient");
372
+ const dynamicTools = await mcpClient.getTools();
373
+ const res = await fetch("/api/chat", {
374
+ method: "POST",
375
+ body: JSON.stringify({
376
+ message: userMsg,
377
+ history,
378
+ context,
379
+ config,
380
+ tools: dynamicTools // Explicitly pass the resolved tools
381
+ }),
382
+ headers: { "Content-Type": "application/json" }
383
+ });
384
+ if (!res.ok) {
385
+ const errText = await res.text();
386
+ // Check if it's the known rate limit
387
+ if (res.status === 429) {
388
+ const retryAfter = 60 * 1000; // Default 1m
389
+ const cooldown = Date.now() + retryAfter;
390
+ setRateLimitCooldown(cooldown);
391
+ localStorage.setItem('meechi_rate_limit_cooldown', cooldown.toString());
392
+ }
393
+ // Parse if JSON
394
+ let errMsg = errText;
395
+ try {
396
+ const json = JSON.parse(errText);
397
+ if (json.message)
398
+ errMsg = json.message;
399
+ }
400
+ catch (_a) {
401
+ // If it's HTML, it's likely a 500/404 page dump
402
+ if (errText.trim().startsWith("<!DOCTYPE html") || errText.includes("<html")) {
403
+ errMsg = "Server Error (HTML Response - Check Logs)";
404
+ }
405
+ }
406
+ console.error("[useMeechi] Server Error Details:", errMsg);
407
+ throw new Error(`Server Error (${res.status}): ${errMsg.length > 500 ? errMsg.substring(0, 500) + "..." : errMsg}`);
408
+ }
409
+ const data = await res.json();
410
+ // If Text Response
411
+ if (data.response) {
412
+ onUpdate(data.response);
413
+ }
414
+ // If Tool Calls (Cloud Format)
415
+ if (data.tool_calls) {
416
+ for (const call of data.tool_calls) {
417
+ const name = call.function.name;
418
+ const args = JSON.parse(call.function.arguments);
419
+ if (onToolStart)
420
+ onToolStart(name);
421
+ // EXECUTE VIA MCP SERVER
422
+ const result = await mcpClient.executeTool(name, args);
423
+ if (onToolResult) {
424
+ const resStr = `\n> **Tool (${name})**: ${result.summary || result.message || JSON.stringify(result)}`;
425
+ onToolResult(resStr);
426
+ }
427
+ }
428
+ }
429
+ }
430
+ catch (e) {
431
+ // Handle GPU Crash specifically
432
+ if (e.message === 'GPU_CRASH' || e.message.includes('Device was lost')) {
433
+ setLocalAIStatus("GPU Crashed (Reloading...)");
434
+ // Optional: Auto-switch to lighter model?
435
+ // For now, just let the user know they need to reload or it will retry next time.
436
+ onUpdate(`\n\n**System Alert**: The GPU driver crashed. Please refresh the page to restore AI functionality.`);
437
+ }
438
+ else {
439
+ onUpdate(`\n\n**Error**: ${e.message}`);
440
+ }
441
+ }
442
+ }, [rateLimitCooldown, mode, isLowPowerDevice, loadedModel]);
443
+ return {
444
+ isReady,
445
+ localAIStatus,
446
+ downloadProgress,
447
+ chat,
448
+ isLowPowerDevice,
449
+ loadedModel,
450
+ activeMemories,
451
+ mode,
452
+ setMode: (m) => {
453
+ _setMode(m);
454
+ localStorage.setItem('meechi_last_mode', m);
455
+ },
456
+ stop: async () => {
457
+ console.log("[Meechi] User requested STOP.");
458
+ await localLlmService.interrupt();
459
+ }
460
+ };
461
+ }
@@ -0,0 +1,8 @@
1
+ import { StorageProvider } from '../lib/storage/types';
2
+ export declare function useSync(storage: StorageProvider, session: any, updateSession: () => Promise<any>, currentDate: string): {
3
+ isSyncing: boolean;
4
+ syncNow: () => Promise<void>;
5
+ syncError: string | null;
6
+ syncMessage: string;
7
+ syncLogs: string[];
8
+ };
@@ -0,0 +1,87 @@
1
+ import { useState, useEffect, useCallback, useRef } from 'react';
2
+ import { SyncEngine } from '../lib/sync/engine';
3
+ import { GoogleDriveClient } from '../lib/sync/google-drive';
4
+ export function useSync(storage, session, updateSession, currentDate) {
5
+ const [syncLogs, setSyncLogs] = useState([]);
6
+ const [isSyncing, setIsSyncing] = useState(false);
7
+ const [syncError, setSyncError] = useState(null);
8
+ const [syncMessage, setSyncMessage] = useState('');
9
+ const engineRef = useRef(null);
10
+ const addLog = useCallback((msg) => {
11
+ setSyncLogs(prev => {
12
+ const next = [...prev, msg];
13
+ return next.slice(-50); // Keep last 50
14
+ });
15
+ setSyncMessage(msg);
16
+ }, []);
17
+ // Initialize Engine when session is available
18
+ useEffect(() => {
19
+ if (session === null || session === void 0 ? void 0 : session.accessToken) {
20
+ const client = new GoogleDriveClient(session.accessToken);
21
+ engineRef.current = new SyncEngine(client, storage);
22
+ // Register with storage for manual triggering
23
+ if ('setSyncEngine' in storage) {
24
+ storage.setSyncEngine(engineRef.current);
25
+ }
26
+ }
27
+ else {
28
+ engineRef.current = null;
29
+ if ('setSyncEngine' in storage) {
30
+ storage.setSyncEngine(null);
31
+ }
32
+ }
33
+ }, [session === null || session === void 0 ? void 0 : session.accessToken, storage]);
34
+ const syncNow = useCallback(async () => {
35
+ if (!engineRef.current)
36
+ return;
37
+ if (isSyncing)
38
+ return;
39
+ setIsSyncing(true);
40
+ setSyncError(null);
41
+ addLog('Starting Sync...');
42
+ try {
43
+ await engineRef.current.sync((msg) => addLog(msg));
44
+ setSyncError(null); // Clear error on success
45
+ }
46
+ catch (e) {
47
+ console.error("Sync Error", e);
48
+ const errMsg = e.message || "Sync Failed";
49
+ setSyncError(errMsg);
50
+ addLog(`Error: ${errMsg}`);
51
+ // Handle Token Expiry
52
+ if (errMsg.includes("Unauthorized") || errMsg.includes("Token expired")) {
53
+ addLog("Refreshing Session...");
54
+ await updateSession(); // Trigger NextAuth rotation
55
+ }
56
+ }
57
+ finally {
58
+ setIsSyncing(false);
59
+ addLog("Finished.");
60
+ }
61
+ }, [isSyncing, session, addLog, updateSession]);
62
+ // Auto-sync on load and periodically
63
+ useEffect(() => {
64
+ if (!(session === null || session === void 0 ? void 0 : session.accessToken))
65
+ return;
66
+ // 1. Initial Sync (once per session/mount)
67
+ // We use a timeout to let the app settle
68
+ const timer = setTimeout(() => {
69
+ // Only sync if not already syncing (though syncNow checks this too)
70
+ if (!engineRef.current)
71
+ return;
72
+ syncNow();
73
+ }, 1000);
74
+ // 2. Periodic Sync
75
+ const interval = setInterval(() => {
76
+ syncNow();
77
+ }, 120000);
78
+ return () => {
79
+ clearTimeout(timer);
80
+ clearInterval(interval);
81
+ };
82
+ // CRITICAL DEBT: syncNow in dep array causes infinite loop if syncNow updates state that triggers re-render
83
+ // We really only want this to run when SESSION starts.
84
+ // eslint-disable-next-line react-hooks/exhaustive-deps
85
+ }, [session === null || session === void 0 ? void 0 : session.accessToken]);
86
+ return { isSyncing, syncNow, syncError, syncMessage, syncLogs };
87
+ }
@@ -0,0 +1,14 @@
1
+ export * from './lib/mcp/McpClient';
2
+ export * from './lib/mcp/McpRegistry';
3
+ export * from './lib/mcp/types';
4
+ export * from './lib/mcp/native';
5
+ export * from './lib/storage/db';
6
+ export * from './lib/storage/local';
7
+ export * from './lib/yjs/store';
8
+ export * from './lib/yjs/syncGraph';
9
+ export * from './lib/ai/embeddings';
10
+ export * from './lib/ai/prompts';
11
+ export * from './lib/ai/registry';
12
+ export * from './hooks/useMeechi';
13
+ export * from './lib/settings';
14
+ export * from './lib/extensions';
package/dist/index.js ADDED
@@ -0,0 +1,22 @@
1
+ // MCP Core
2
+ export * from './lib/mcp/McpClient';
3
+ export * from './lib/mcp/McpRegistry';
4
+ export * from './lib/mcp/types';
5
+ export * from './lib/mcp/native';
6
+ // Storage & Sync
7
+ export * from './lib/storage/db';
8
+ export * from './lib/storage/local';
9
+ export * from './lib/yjs/store';
10
+ export * from './lib/yjs/syncGraph';
11
+ // AI Logic & Prompts
12
+ export * from './lib/ai/embeddings';
13
+ export * from './lib/ai/prompts';
14
+ export * from './lib/ai/registry'; // Needed?
15
+ // Hooks
16
+ export * from './hooks/useMeechi';
17
+ // Settings & Extensions
18
+ export * from './lib/settings';
19
+ export * from './lib/extensions';
20
+ // Note: UI Components (SourceEditor, FileExplorer, etc.) are excluded from the
21
+ // Core library build because they depend on application-specific CSS.
22
+ // Consumers should import these directly from their own app.
@@ -0,0 +1,15 @@
1
+ /**
2
+ * Generates an embedding for a string of text.
3
+ * Now offloads to a Web Worker to ensure stability and UI responsiveness.
4
+ * PROTECTED BY GPU LOCK to prevent LLM/RAG collisions.
5
+ */
6
+ export declare function generateEmbedding(text: string): Promise<number[]>;
7
+ /**
8
+ * Splits text into semantic chunks.
9
+ * Standard Recursive Character splitting logic.
10
+ */
11
+ export declare function chunkText(text: string, maxChunkSize?: number, overlap?: number): string[];
12
+ /**
13
+ * Calculates cosine similarity between two vectors.
14
+ */
15
+ export declare function cosineSimilarity(v1: number[], v2: number[]): number;