@meechi-ai/core 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. package/LICENSE +624 -0
  2. package/README.md +59 -0
  3. package/dist/components/CalendarView.d.ts +3 -0
  4. package/dist/components/CalendarView.js +72 -0
  5. package/dist/components/ChatInterface.d.ts +6 -0
  6. package/dist/components/ChatInterface.js +105 -0
  7. package/dist/components/FileExplorer.d.ts +9 -0
  8. package/dist/components/FileExplorer.js +757 -0
  9. package/dist/components/Icon.d.ts +9 -0
  10. package/dist/components/Icon.js +44 -0
  11. package/dist/components/SourceEditor.d.ts +13 -0
  12. package/dist/components/SourceEditor.js +50 -0
  13. package/dist/components/ThemeProvider.d.ts +5 -0
  14. package/dist/components/ThemeProvider.js +105 -0
  15. package/dist/components/ThemeSwitcher.d.ts +1 -0
  16. package/dist/components/ThemeSwitcher.js +16 -0
  17. package/dist/components/voice/VoiceInputArea.d.ts +14 -0
  18. package/dist/components/voice/VoiceInputArea.js +190 -0
  19. package/dist/components/voice/VoiceOverlay.d.ts +7 -0
  20. package/dist/components/voice/VoiceOverlay.js +71 -0
  21. package/dist/hooks/useMeechi.d.ts +16 -0
  22. package/dist/hooks/useMeechi.js +461 -0
  23. package/dist/hooks/useSync.d.ts +8 -0
  24. package/dist/hooks/useSync.js +87 -0
  25. package/dist/index.d.ts +14 -0
  26. package/dist/index.js +22 -0
  27. package/dist/lib/ai/embeddings.d.ts +15 -0
  28. package/dist/lib/ai/embeddings.js +128 -0
  29. package/dist/lib/ai/gpu-lock.d.ts +19 -0
  30. package/dist/lib/ai/gpu-lock.js +43 -0
  31. package/dist/lib/ai/llm.worker.d.ts +1 -0
  32. package/dist/lib/ai/llm.worker.js +7 -0
  33. package/dist/lib/ai/local-llm.d.ts +30 -0
  34. package/dist/lib/ai/local-llm.js +211 -0
  35. package/dist/lib/ai/manager.d.ts +20 -0
  36. package/dist/lib/ai/manager.js +51 -0
  37. package/dist/lib/ai/parsing.d.ts +12 -0
  38. package/dist/lib/ai/parsing.js +56 -0
  39. package/dist/lib/ai/prompts.d.ts +2 -0
  40. package/dist/lib/ai/prompts.js +2 -0
  41. package/dist/lib/ai/providers/gemini.d.ts +6 -0
  42. package/dist/lib/ai/providers/gemini.js +88 -0
  43. package/dist/lib/ai/providers/groq.d.ts +6 -0
  44. package/dist/lib/ai/providers/groq.js +42 -0
  45. package/dist/lib/ai/registry.d.ts +29 -0
  46. package/dist/lib/ai/registry.js +52 -0
  47. package/dist/lib/ai/tools.d.ts +2 -0
  48. package/dist/lib/ai/tools.js +106 -0
  49. package/dist/lib/ai/types.d.ts +22 -0
  50. package/dist/lib/ai/types.js +1 -0
  51. package/dist/lib/ai/worker.d.ts +1 -0
  52. package/dist/lib/ai/worker.js +60 -0
  53. package/dist/lib/audio/input.d.ts +13 -0
  54. package/dist/lib/audio/input.js +121 -0
  55. package/dist/lib/audio/stt.d.ts +13 -0
  56. package/dist/lib/audio/stt.js +119 -0
  57. package/dist/lib/audio/tts.d.ts +12 -0
  58. package/dist/lib/audio/tts.js +128 -0
  59. package/dist/lib/audio/vad.d.ts +18 -0
  60. package/dist/lib/audio/vad.js +117 -0
  61. package/dist/lib/colors.d.ts +16 -0
  62. package/dist/lib/colors.js +67 -0
  63. package/dist/lib/extensions.d.ts +35 -0
  64. package/dist/lib/extensions.js +24 -0
  65. package/dist/lib/hooks/use-voice-loop.d.ts +13 -0
  66. package/dist/lib/hooks/use-voice-loop.js +313 -0
  67. package/dist/lib/mcp/McpClient.d.ts +19 -0
  68. package/dist/lib/mcp/McpClient.js +42 -0
  69. package/dist/lib/mcp/McpRegistry.d.ts +47 -0
  70. package/dist/lib/mcp/McpRegistry.js +117 -0
  71. package/dist/lib/mcp/native/GroqVoiceNative.d.ts +21 -0
  72. package/dist/lib/mcp/native/GroqVoiceNative.js +29 -0
  73. package/dist/lib/mcp/native/LocalSyncNative.d.ts +19 -0
  74. package/dist/lib/mcp/native/LocalSyncNative.js +26 -0
  75. package/dist/lib/mcp/native/LocalVoiceNative.d.ts +19 -0
  76. package/dist/lib/mcp/native/LocalVoiceNative.js +27 -0
  77. package/dist/lib/mcp/native/MeechiNativeCore.d.ts +25 -0
  78. package/dist/lib/mcp/native/MeechiNativeCore.js +209 -0
  79. package/dist/lib/mcp/native/index.d.ts +10 -0
  80. package/dist/lib/mcp/native/index.js +10 -0
  81. package/dist/lib/mcp/types.d.ts +35 -0
  82. package/dist/lib/mcp/types.js +1 -0
  83. package/dist/lib/pdf.d.ts +10 -0
  84. package/dist/lib/pdf.js +142 -0
  85. package/dist/lib/settings.d.ts +48 -0
  86. package/dist/lib/settings.js +87 -0
  87. package/dist/lib/storage/db.d.ts +57 -0
  88. package/dist/lib/storage/db.js +45 -0
  89. package/dist/lib/storage/local.d.ts +28 -0
  90. package/dist/lib/storage/local.js +534 -0
  91. package/dist/lib/storage/migrate.d.ts +3 -0
  92. package/dist/lib/storage/migrate.js +122 -0
  93. package/dist/lib/storage/types.d.ts +66 -0
  94. package/dist/lib/storage/types.js +1 -0
  95. package/dist/lib/sync/client-drive.d.ts +9 -0
  96. package/dist/lib/sync/client-drive.js +69 -0
  97. package/dist/lib/sync/engine.d.ts +18 -0
  98. package/dist/lib/sync/engine.js +517 -0
  99. package/dist/lib/sync/google-drive.d.ts +52 -0
  100. package/dist/lib/sync/google-drive.js +183 -0
  101. package/dist/lib/sync/merge.d.ts +1 -0
  102. package/dist/lib/sync/merge.js +68 -0
  103. package/dist/lib/yjs/YjsProvider.d.ts +11 -0
  104. package/dist/lib/yjs/YjsProvider.js +33 -0
  105. package/dist/lib/yjs/graph.d.ts +11 -0
  106. package/dist/lib/yjs/graph.js +7 -0
  107. package/dist/lib/yjs/hooks.d.ts +7 -0
  108. package/dist/lib/yjs/hooks.js +37 -0
  109. package/dist/lib/yjs/store.d.ts +4 -0
  110. package/dist/lib/yjs/store.js +19 -0
  111. package/dist/lib/yjs/syncGraph.d.ts +1 -0
  112. package/dist/lib/yjs/syncGraph.js +38 -0
  113. package/dist/providers/theme-provider.d.ts +3 -0
  114. package/dist/providers/theme-provider.js +18 -0
  115. package/dist/tsconfig.lib.tsbuildinfo +1 -0
  116. package/package.json +69 -0
@@ -0,0 +1,128 @@
1
+ // Web Worker Management for RAG
2
+ // Isolating Transformers.js in a worker prevents environment crashes in Next.js/Turbopack
3
+ import { gpuLock } from './gpu-lock';
4
+ let worker = null;
5
+ let terminateTimer = null;
6
+ const pendingRequests = new Map();
7
+ function terminateWorker() {
8
+ if (worker) {
9
+ console.log("[RAG] Auto-terminating worker to save resources...");
10
+ worker.terminate();
11
+ worker = null;
12
+ }
13
+ }
14
+ function getWorker() {
15
+ if (typeof window === 'undefined')
16
+ return null;
17
+ // Reset timer on access
18
+ if (terminateTimer)
19
+ clearTimeout(terminateTimer);
20
+ terminateTimer = setTimeout(terminateWorker, 30000); // 30s idle timeout
21
+ if (worker)
22
+ return worker;
23
+ try {
24
+ console.log("[RAG] Initializing AI Web Worker...");
25
+ // Next.js standard way to load workers
26
+ worker = new Worker(new URL('./worker.ts', import.meta.url));
27
+ worker.onmessage = (event) => {
28
+ const { id, embedding, error } = event.data;
29
+ const handler = pendingRequests.get(id);
30
+ if (!handler)
31
+ return;
32
+ pendingRequests.delete(id);
33
+ if (error) {
34
+ handler.reject(new Error(error));
35
+ }
36
+ else {
37
+ handler.resolve(embedding);
38
+ }
39
+ };
40
+ worker.onerror = (err) => {
41
+ console.error("[RAG] Worker Error:", err);
42
+ // If worker crashes, clear it so next retry spawns new one
43
+ worker = null;
44
+ };
45
+ return worker;
46
+ }
47
+ catch (e) {
48
+ console.error("[RAG] Failed to initialize worker:", e);
49
+ return null;
50
+ }
51
+ }
52
+ /**
53
+ * Generates an embedding for a string of text.
54
+ * Now offloads to a Web Worker to ensure stability and UI responsiveness.
55
+ * PROTECTED BY GPU LOCK to prevent LLM/RAG collisions.
56
+ */
57
+ export async function generateEmbedding(text) {
58
+ // 1. Acquire Lock (Waits if Chat is active)
59
+ await gpuLock.acquire('RAG');
60
+ try {
61
+ const aiWorker = getWorker();
62
+ if (!aiWorker) {
63
+ throw new Error("RAG Worker not available (SSR or Init Failure)");
64
+ }
65
+ const id = Math.random().toString(36).substring(7);
66
+ return await new Promise((resolve, reject) => {
67
+ pendingRequests.set(id, { resolve, reject });
68
+ aiWorker.postMessage({ id, text });
69
+ // Timeout just in case
70
+ setTimeout(() => {
71
+ if (pendingRequests.has(id)) {
72
+ pendingRequests.delete(id);
73
+ reject(new Error("Embedding generation timed out"));
74
+ }
75
+ }, 30000);
76
+ });
77
+ }
78
+ finally {
79
+ // 2. Release Lock (Immediately allow Chat to resume)
80
+ gpuLock.release();
81
+ }
82
+ }
83
+ /**
84
+ * Splits text into semantic chunks.
85
+ * Standard Recursive Character splitting logic.
86
+ */
87
+ export function chunkText(text, maxChunkSize = 1000, overlap = 200) {
88
+ if (!text)
89
+ return [];
90
+ // Split into paragraphs first
91
+ const paragraphs = text.split(/\n\s*\n/);
92
+ const chunks = [];
93
+ let currentChunk = "";
94
+ for (const para of paragraphs) {
95
+ if ((currentChunk.length + para.length) <= maxChunkSize) {
96
+ currentChunk += (currentChunk ? "\n\n" : "") + para;
97
+ }
98
+ else {
99
+ if (currentChunk)
100
+ chunks.push(currentChunk);
101
+ currentChunk = para;
102
+ // If a single paragraph is still too big, hard cut it
103
+ if (currentChunk.length > maxChunkSize) {
104
+ // ... (simplified cut for now)
105
+ const sub = currentChunk.substring(0, maxChunkSize);
106
+ chunks.push(sub);
107
+ currentChunk = currentChunk.substring(maxChunkSize - overlap);
108
+ }
109
+ }
110
+ }
111
+ if (currentChunk)
112
+ chunks.push(currentChunk);
113
+ return chunks;
114
+ }
115
+ /**
116
+ * Calculates cosine similarity between two vectors.
117
+ */
118
+ export function cosineSimilarity(v1, v2) {
119
+ let dotProduct = 0;
120
+ let norm1 = 0;
121
+ let norm2 = 0;
122
+ for (let i = 0; i < v1.length; i++) {
123
+ dotProduct += v1[i] * v2[i];
124
+ norm1 += v1[i] * v1[i];
125
+ norm2 += v2[i] * v2[i];
126
+ }
127
+ return dotProduct / (Math.sqrt(norm1) * Math.sqrt(norm2));
128
+ }
@@ -0,0 +1,19 @@
1
+ /**
2
+ * A simple Mutex to ensure only one heavy GPU task runs at a time.
3
+ * Prevents "Context Lost" errors when WebLLM and TensorFlow.js try to
4
+ * allocate VRAM simultaneously.
5
+ */
6
+ export declare class GPUResourceLock {
7
+ private locked;
8
+ private queue;
9
+ /**
10
+ * Request access to the GPU. Returns a promise that resolves when access is granted.
11
+ * @param debugName - Name of the requester for logging
12
+ */
13
+ acquire(debugName: string): Promise<void>;
14
+ /**
15
+ * Release the GPU lock, allowing the next task in queue to proceed.
16
+ */
17
+ release(): void;
18
+ }
19
+ export declare const gpuLock: GPUResourceLock;
@@ -0,0 +1,43 @@
1
+ /**
2
+ * A simple Mutex to ensure only one heavy GPU task runs at a time.
3
+ * Prevents "Context Lost" errors when WebLLM and TensorFlow.js try to
4
+ * allocate VRAM simultaneously.
5
+ */
6
+ export class GPUResourceLock {
7
+ constructor() {
8
+ this.locked = false;
9
+ this.queue = [];
10
+ }
11
+ /**
12
+ * Request access to the GPU. Returns a promise that resolves when access is granted.
13
+ * @param debugName - Name of the requester for logging
14
+ */
15
+ async acquire(debugName) {
16
+ if (!this.locked) {
17
+ this.locked = true;
18
+ // console.log(`[GPU Lock] Acquired by ${debugName}`);
19
+ return Promise.resolve();
20
+ }
21
+ // console.log(`[GPU Lock] ${debugName} waiting for lock...`);
22
+ return new Promise((resolve) => {
23
+ this.queue.push(() => {
24
+ this.locked = true;
25
+ // console.log(`[GPU Lock] Acquired by ${debugName} (after wait)`);
26
+ resolve();
27
+ });
28
+ });
29
+ }
30
+ /**
31
+ * Release the GPU lock, allowing the next task in queue to proceed.
32
+ */
33
+ release() {
34
+ this.locked = false;
35
+ // console.log(`[GPU Lock] Released`);
36
+ const next = this.queue.shift();
37
+ if (next) {
38
+ next();
39
+ }
40
+ }
41
+ }
42
+ // Global Singleton
43
+ export const gpuLock = new GPUResourceLock();
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,7 @@
1
+ import { WebWorkerMLCEngineHandler } from "@mlc-ai/web-llm";
2
+ // This is the worker script that will run the LLM in a background thread.
3
+ // It prevents the UI from freezing during generation.
4
+ const handler = new WebWorkerMLCEngineHandler();
5
+ self.onmessage = (msg) => {
6
+ handler.onmessage(msg);
7
+ };
@@ -0,0 +1,30 @@
1
+ import { AIChatMessage, AITool } from "./types";
2
+ export declare class WebLLMService {
3
+ private engine;
4
+ private loading;
5
+ private currentModelId;
6
+ private initPromise;
7
+ private progressListeners;
8
+ private worker;
9
+ /**
10
+ * Connect to or Initialize the Engine via Web Worker
11
+ */
12
+ initialize(modelId: string, progressCallback?: (text: string) => void, config?: {
13
+ context_window?: number;
14
+ }): Promise<void>;
15
+ isInitialized(): boolean;
16
+ chat(messages: AIChatMessage[], onUpdate: (chunk: string) => void, options?: {
17
+ tools?: AITool[];
18
+ temperature?: number;
19
+ top_p?: number;
20
+ stop?: string[];
21
+ }): Promise<string>;
22
+ isLoading(): boolean;
23
+ getModelId(): string | null;
24
+ interrupt(): Promise<void>;
25
+ /**
26
+ * Completely unload the engine and terminate the worker to free memory.
27
+ */
28
+ unload(): Promise<void>;
29
+ }
30
+ export declare const localLlmService: WebLLMService;
@@ -0,0 +1,211 @@
1
+ var __asyncValues = (this && this.__asyncValues) || function (o) {
2
+ if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
3
+ var m = o[Symbol.asyncIterator], i;
4
+ return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
5
+ function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
6
+ function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
7
+ };
8
+ import { CreateWebWorkerMLCEngine } from "@mlc-ai/web-llm";
9
+ import { getModelConfig } from "./registry";
10
+ import { gpuLock } from "./gpu-lock";
11
+ export class WebLLMService {
12
+ constructor() {
13
+ this.engine = null;
14
+ this.loading = false;
15
+ this.currentModelId = null;
16
+ this.initPromise = null;
17
+ this.progressListeners = [];
18
+ this.worker = null;
19
+ }
20
+ /**
21
+ * Connect to or Initialize the Engine via Web Worker
22
+ */
23
+ async initialize(modelId, progressCallback, config = {}) {
24
+ if (this.engine && this.currentModelId === modelId) {
25
+ return;
26
+ }
27
+ // Check for Secure Context / Cache API (Required for WebLLM)
28
+ if (typeof window !== 'undefined' && !window.caches) {
29
+ throw new Error("Secure Context Required: The Cache API is missing. Please use HTTPS or 'localhost', or enable 'Insecure origins treated as secure' in chrome://flags.");
30
+ }
31
+ if (progressCallback) {
32
+ this.progressListeners.push(progressCallback);
33
+ }
34
+ // If initialization is in progress, return the existing promise
35
+ if (this.initPromise) {
36
+ return this.initPromise;
37
+ }
38
+ this.loading = true;
39
+ // Create a new initialization promise
40
+ this.initPromise = (async () => {
41
+ var _a, _b;
42
+ try {
43
+ console.log("[Meechi] Initializing via Dedicated Web Worker...");
44
+ // Ensure previous engine is unloaded to free GPU memory
45
+ if (this.engine) {
46
+ try {
47
+ console.log("[Meechi] Unloading previous engine...");
48
+ await this.engine.unload();
49
+ // Terminate old worker to fully free memory
50
+ if (this.worker) {
51
+ this.worker.terminate();
52
+ this.worker = null;
53
+ }
54
+ }
55
+ catch (e) {
56
+ console.warn("[Meechi] Failed to clean unload:", e);
57
+ }
58
+ this.engine = null;
59
+ }
60
+ // Get Model Config
61
+ const modelConfig = getModelConfig(modelId);
62
+ // PERFORMANCE TUNING: Mobile Detection
63
+ // Mobile GPUs (iOS/Android) often crash with default 4096 context.
64
+ // We aggressively cap it to 2048 or lower to ensure stability and speed.
65
+ const isMobile = typeof navigator !== 'undefined' && /iPhone|iPad|iPod|Android/i.test(navigator.userAgent);
66
+ const defaultContext = (modelConfig === null || modelConfig === void 0 ? void 0 : modelConfig.context_window) || 4096;
67
+ // If mobile, cap at 2048 (or the requested config, if lower).
68
+ // If desktop, use the robust default.
69
+ let safeContext = config.context_window || defaultContext;
70
+ if (isMobile && safeContext > 2048) {
71
+ console.log("[Meechi] Mobile detected. Capping context window to 2048 for stability.");
72
+ safeContext = 2048;
73
+ }
74
+ // Create new Worker
75
+ this.worker = new Worker(new URL('./llm.worker.ts', import.meta.url), { type: 'module' });
76
+ this.engine = await CreateWebWorkerMLCEngine(this.worker, modelId, {
77
+ initProgressCallback: (progress) => {
78
+ this.progressListeners.forEach(cb => cb(progress.text));
79
+ },
80
+ }, {
81
+ context_window_size: safeContext,
82
+ });
83
+ this.currentModelId = modelId;
84
+ }
85
+ catch (error) {
86
+ console.error("Failed to initialize WebLLM:", error);
87
+ // FORCE RESET on error
88
+ if (this.engine) {
89
+ try {
90
+ await this.engine.unload();
91
+ }
92
+ catch (_c) { }
93
+ this.engine = null;
94
+ }
95
+ if (this.worker) {
96
+ this.worker.terminate();
97
+ this.worker = null;
98
+ }
99
+ this.currentModelId = null;
100
+ // Check for GPU Context Lost
101
+ if (((_a = error.message) === null || _a === void 0 ? void 0 : _a.includes("Context lost")) || ((_b = error.message) === null || _b === void 0 ? void 0 : _b.includes("valid external Instance"))) {
102
+ console.warn("GPU Context Lost detected during init. Clearing state...");
103
+ }
104
+ throw error;
105
+ }
106
+ finally {
107
+ this.loading = false;
108
+ this.initPromise = null; // Clear promise so next attempt works
109
+ this.progressListeners = []; // Clear listeners
110
+ }
111
+ })();
112
+ return this.initPromise;
113
+ }
114
+ isInitialized() {
115
+ return !!this.engine;
116
+ }
117
+ async chat(messages, onUpdate, options = {}) {
118
+ var _a, e_1, _b, _c;
119
+ var _d, _e, _f;
120
+ if (!this.engine) {
121
+ throw new Error("Local Engine not initialized");
122
+ }
123
+ // ACQUIRE GPU LOCK
124
+ // This stops RAG from trying to embed while we are generating tokens
125
+ await gpuLock.acquire('Chat');
126
+ let fullResponse = "";
127
+ try {
128
+ const completion = await this.engine.chat.completions.create({
129
+ messages: messages,
130
+ stream: true,
131
+ temperature: (_d = options.temperature) !== null && _d !== void 0 ? _d : 0.7,
132
+ top_p: (_e = options.top_p) !== null && _e !== void 0 ? _e : 0.9,
133
+ stop: options.stop,
134
+ frequency_penalty: 0.1, // Slight penalty to prevent infinite loops "The user has asked..."
135
+ });
136
+ try {
137
+ for (var _g = true, completion_1 = __asyncValues(completion), completion_1_1; completion_1_1 = await completion_1.next(), _a = completion_1_1.done, !_a; _g = true) {
138
+ _c = completion_1_1.value;
139
+ _g = false;
140
+ const chunk = _c;
141
+ const delta = ((_f = chunk.choices[0]) === null || _f === void 0 ? void 0 : _f.delta.content) || "";
142
+ if (delta) {
143
+ fullResponse += delta;
144
+ onUpdate(delta);
145
+ }
146
+ }
147
+ }
148
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
149
+ finally {
150
+ try {
151
+ if (!_g && !_a && (_b = completion_1.return)) await _b.call(completion_1);
152
+ }
153
+ finally { if (e_1) throw e_1.error; }
154
+ }
155
+ }
156
+ catch (e) {
157
+ console.error("WebLLM Chat Error:", e);
158
+ // Detect GPU Context Loss or Device Loss
159
+ const errMsg = e.message || "";
160
+ if (errMsg.includes("Context lost") || errMsg.includes("Device was lost") || errMsg.includes("Instance reference no longer exists") || errMsg.includes("dropped in popErrorScope") || errMsg.includes("already been disposed")) {
161
+ console.warn("GPU Crash detected. Resetting WebLLM engine state...");
162
+ this.engine = null;
163
+ this.currentModelId = null;
164
+ if (this.worker) {
165
+ this.worker.terminate();
166
+ this.worker = null;
167
+ }
168
+ // Propagate a specific error for the UI to handle (e.g., suggest reload)
169
+ throw new Error("GPU_CRASH");
170
+ }
171
+ throw e;
172
+ }
173
+ finally {
174
+ // RELEASE GPU LOCK
175
+ gpuLock.release();
176
+ }
177
+ return fullResponse;
178
+ }
179
+ isLoading() {
180
+ return this.loading;
181
+ }
182
+ getModelId() {
183
+ return this.currentModelId;
184
+ }
185
+ async interrupt() {
186
+ if (this.engine) {
187
+ await this.engine.interruptGenerate();
188
+ }
189
+ }
190
+ /**
191
+ * Completely unload the engine and terminate the worker to free memory.
192
+ */
193
+ async unload() {
194
+ if (this.engine) {
195
+ try {
196
+ console.log("[Meechi] Unloading engine explicitly...");
197
+ await this.engine.unload();
198
+ }
199
+ catch (e) {
200
+ console.warn("[Meechi] Error unloading engine:", e);
201
+ }
202
+ this.engine = null;
203
+ }
204
+ if (this.worker) {
205
+ this.worker.terminate();
206
+ this.worker = null;
207
+ }
208
+ this.currentModelId = null;
209
+ }
210
+ }
211
+ export const localLlmService = new WebLLMService();
@@ -0,0 +1,20 @@
1
+ import { AIChatMessage, AITool, AICompletion, AIProvider } from "./types";
2
+ export interface AIConfig {
3
+ activeProviderId: string;
4
+ providers: {
5
+ id: string;
6
+ apiKey?: string;
7
+ model?: string;
8
+ }[];
9
+ identity?: {
10
+ name: string;
11
+ tone: string;
12
+ };
13
+ }
14
+ export declare class AIManager {
15
+ private providers;
16
+ constructor();
17
+ registerProvider(provider: AIProvider): void;
18
+ chat(userMessage: string, systemContext: string, history: AIChatMessage[], config: AIConfig, tools?: AITool[]): Promise<AICompletion>;
19
+ }
20
+ export declare const aiManager: AIManager;
@@ -0,0 +1,51 @@
1
+ import { SYSTEM_PROMPT } from "./prompts";
2
+ import { GroqProvider } from "./providers/groq";
3
+ import { GeminiProvider } from "./providers/gemini";
4
+ export class AIManager {
5
+ constructor() {
6
+ this.providers = new Map();
7
+ // Register default providers
8
+ this.registerProvider(new GroqProvider());
9
+ this.registerProvider(new GeminiProvider());
10
+ }
11
+ registerProvider(provider) {
12
+ this.providers.set(provider.id, provider);
13
+ }
14
+ async chat(userMessage, systemContext, history, config, tools) {
15
+ // 1. Determine Primary Provider
16
+ const primaryId = config.activeProviderId || 'groq';
17
+ let providerConfig = config.providers.find(p => p.id === primaryId);
18
+ let provider = this.providers.get(primaryId);
19
+ // CHECK API KEY for Cloud Providers
20
+ if (primaryId === 'openai' && !(providerConfig === null || providerConfig === void 0 ? void 0 : providerConfig.apiKey) && !process.env.OPENAI_API_KEY) {
21
+ console.warn("[Cloud] No OpenAI Key found.");
22
+ return {
23
+ content: "I am unable to connect to the cloud because no API Key is configured. Please check your settings or restart the Local AI.",
24
+ usage: { total_tokens: 0 }
25
+ };
26
+ }
27
+ if (!provider) {
28
+ console.warn(`Provider ${primaryId} not found. Falling back to Groq.`);
29
+ provider = this.providers.get('groq');
30
+ }
31
+ // 2. Construct Messages
32
+ // Inject Identity/Tone into System Prompt
33
+ // Inject Identity/Tone into System Prompt
34
+ // 2. Construct Messages
35
+ const systemPrompt = `
36
+ ${SYSTEM_PROMPT}
37
+
38
+ Context:
39
+ ${systemContext}
40
+ `.trim();
41
+ const messages = [
42
+ { role: 'system', content: systemPrompt },
43
+ ...history,
44
+ { role: 'user', content: userMessage }
45
+ ];
46
+ // 3. Attempt Chat
47
+ // We throw errors to let the client handle fallback (e.g. Local AI)
48
+ return await provider.chat((providerConfig === null || providerConfig === void 0 ? void 0 : providerConfig.model) || "", messages, tools, providerConfig === null || providerConfig === void 0 ? void 0 : providerConfig.apiKey);
49
+ }
50
+ }
51
+ export const aiManager = new AIManager();
@@ -0,0 +1,12 @@
1
+ export interface ToolCall {
2
+ name: string;
3
+ args: any;
4
+ raw: string;
5
+ error?: string;
6
+ }
7
+ /**
8
+ * Parses tool calls from AI response text.
9
+ * Supports XML format: <function="name">args</function>
10
+ * Robust to common LLM formatting errors.
11
+ */
12
+ export declare function parseToolCalls(content: string): ToolCall[];
@@ -0,0 +1,56 @@
1
+ /**
2
+ * Parses tool calls from AI response text.
3
+ * Supports XML format: <function="name">args</function>
4
+ * Robust to common LLM formatting errors.
5
+ */
6
+ export function parseToolCalls(content) {
7
+ const tools = [];
8
+ // Regex for <function="name">{args}</function> or <function name='name'>{args}</function>
9
+ // Improved to be more permissive with whitespace and attributes
10
+ const toolRegex = /<function(?:=\s*["']?([^"'>]+)["']?|\s+name=["']?([^"'>]+)["']?)\s*>([\s\S]*?)<\/function>/gi;
11
+ let match;
12
+ while ((match = toolRegex.exec(content)) !== null) {
13
+ // match[1] or match[2] is the name
14
+ const name = (match[1] || match[2]).replace(/["']/g, "").trim();
15
+ const argsStr = match[3].trim();
16
+ try {
17
+ // Attempt 1: Direct JSON parse
18
+ const args = JSON.parse(argsStr);
19
+ tools.push({ name, args, raw: match[0] });
20
+ }
21
+ catch (e) {
22
+ // Attempt 2: Sanitize Common JSON Errors
23
+ try {
24
+ // 1. Handle Newlines in strings (LLMs often put literal newlines in JSON strings)
25
+ // 2. Handle Markdown code blocks if they wrapped the JSON inside the XML (rare but happens)
26
+ let cleaned = argsStr
27
+ .replace(/^```json\s*/i, '')
28
+ .replace(/\s*```$/, '');
29
+ // Sanitize newlines inside double quotes
30
+ cleaned = cleaned.replace(/"((?:[^"\\]|\\.)*)"/g, (m) => {
31
+ return m.replace(/\n/g, "\\n").replace(/\r/g, "");
32
+ });
33
+ const args = JSON.parse(cleaned);
34
+ tools.push({ name, args, raw: match[0] });
35
+ }
36
+ catch (e2) {
37
+ // Attempt 3: Aggressive Fixes (Last resort)
38
+ try {
39
+ let fixed = argsStr
40
+ .replace(/'/g, '"') // Helper for single quotes
41
+ .replace(/,\s*}/g, '}') // Trailing commas
42
+ .replace(/([{,]\s*)([a-zA-Z0-9_]+)\s*:/g, '$1"$2":'); // Unquoted keys
43
+ // Re-sanitize strings
44
+ fixed = fixed.replace(/"((?:[^"\\]|\\.)*)"/g, (m) => m.replace(/\n/g, "\\n"));
45
+ const args = JSON.parse(fixed);
46
+ tools.push({ name, args, raw: match[0] });
47
+ }
48
+ catch (e3) {
49
+ console.error(`Failed to parse args for tool ${name}`, argsStr);
50
+ tools.push({ name, args: {}, error: "Invalid JSON arguments", raw: match[0] });
51
+ }
52
+ }
53
+ }
54
+ }
55
+ return tools;
56
+ }
@@ -0,0 +1,2 @@
1
+ export declare const SYSTEM_PROMPT = "You are Meechi, an intelligent AI assistant living on the user's computer. You are helpful, harmless, and honest.";
2
+ export declare const RESEARCH_SYSTEM_PROMPT = "You are a Research Assistant. Analyze the provided context and answer questions based on the data.";
@@ -0,0 +1,2 @@
1
+ export const SYSTEM_PROMPT = "You are Meechi, an intelligent AI assistant living on the user's computer. You are helpful, harmless, and honest.";
2
+ export const RESEARCH_SYSTEM_PROMPT = "You are a Research Assistant. Analyze the provided context and answer questions based on the data.";
@@ -0,0 +1,6 @@
1
+ import { AIProvider, AIChatMessage, AITool, AICompletion } from "../types";
2
+ export declare class GeminiProvider implements AIProvider {
3
+ id: string;
4
+ name: string;
5
+ chat(model: string, messages: AIChatMessage[], tools?: AITool[], apiKey?: string): Promise<AICompletion>;
6
+ }