@meechi-ai/core 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. package/LICENSE +624 -0
  2. package/README.md +59 -0
  3. package/dist/components/CalendarView.d.ts +3 -0
  4. package/dist/components/CalendarView.js +72 -0
  5. package/dist/components/ChatInterface.d.ts +6 -0
  6. package/dist/components/ChatInterface.js +105 -0
  7. package/dist/components/FileExplorer.d.ts +9 -0
  8. package/dist/components/FileExplorer.js +757 -0
  9. package/dist/components/Icon.d.ts +9 -0
  10. package/dist/components/Icon.js +44 -0
  11. package/dist/components/SourceEditor.d.ts +13 -0
  12. package/dist/components/SourceEditor.js +50 -0
  13. package/dist/components/ThemeProvider.d.ts +5 -0
  14. package/dist/components/ThemeProvider.js +105 -0
  15. package/dist/components/ThemeSwitcher.d.ts +1 -0
  16. package/dist/components/ThemeSwitcher.js +16 -0
  17. package/dist/components/voice/VoiceInputArea.d.ts +14 -0
  18. package/dist/components/voice/VoiceInputArea.js +190 -0
  19. package/dist/components/voice/VoiceOverlay.d.ts +7 -0
  20. package/dist/components/voice/VoiceOverlay.js +71 -0
  21. package/dist/hooks/useMeechi.d.ts +16 -0
  22. package/dist/hooks/useMeechi.js +461 -0
  23. package/dist/hooks/useSync.d.ts +8 -0
  24. package/dist/hooks/useSync.js +87 -0
  25. package/dist/index.d.ts +14 -0
  26. package/dist/index.js +22 -0
  27. package/dist/lib/ai/embeddings.d.ts +15 -0
  28. package/dist/lib/ai/embeddings.js +128 -0
  29. package/dist/lib/ai/gpu-lock.d.ts +19 -0
  30. package/dist/lib/ai/gpu-lock.js +43 -0
  31. package/dist/lib/ai/llm.worker.d.ts +1 -0
  32. package/dist/lib/ai/llm.worker.js +7 -0
  33. package/dist/lib/ai/local-llm.d.ts +30 -0
  34. package/dist/lib/ai/local-llm.js +211 -0
  35. package/dist/lib/ai/manager.d.ts +20 -0
  36. package/dist/lib/ai/manager.js +51 -0
  37. package/dist/lib/ai/parsing.d.ts +12 -0
  38. package/dist/lib/ai/parsing.js +56 -0
  39. package/dist/lib/ai/prompts.d.ts +2 -0
  40. package/dist/lib/ai/prompts.js +2 -0
  41. package/dist/lib/ai/providers/gemini.d.ts +6 -0
  42. package/dist/lib/ai/providers/gemini.js +88 -0
  43. package/dist/lib/ai/providers/groq.d.ts +6 -0
  44. package/dist/lib/ai/providers/groq.js +42 -0
  45. package/dist/lib/ai/registry.d.ts +29 -0
  46. package/dist/lib/ai/registry.js +52 -0
  47. package/dist/lib/ai/tools.d.ts +2 -0
  48. package/dist/lib/ai/tools.js +106 -0
  49. package/dist/lib/ai/types.d.ts +22 -0
  50. package/dist/lib/ai/types.js +1 -0
  51. package/dist/lib/ai/worker.d.ts +1 -0
  52. package/dist/lib/ai/worker.js +60 -0
  53. package/dist/lib/audio/input.d.ts +13 -0
  54. package/dist/lib/audio/input.js +121 -0
  55. package/dist/lib/audio/stt.d.ts +13 -0
  56. package/dist/lib/audio/stt.js +119 -0
  57. package/dist/lib/audio/tts.d.ts +12 -0
  58. package/dist/lib/audio/tts.js +128 -0
  59. package/dist/lib/audio/vad.d.ts +18 -0
  60. package/dist/lib/audio/vad.js +117 -0
  61. package/dist/lib/colors.d.ts +16 -0
  62. package/dist/lib/colors.js +67 -0
  63. package/dist/lib/extensions.d.ts +35 -0
  64. package/dist/lib/extensions.js +24 -0
  65. package/dist/lib/hooks/use-voice-loop.d.ts +13 -0
  66. package/dist/lib/hooks/use-voice-loop.js +313 -0
  67. package/dist/lib/mcp/McpClient.d.ts +19 -0
  68. package/dist/lib/mcp/McpClient.js +42 -0
  69. package/dist/lib/mcp/McpRegistry.d.ts +47 -0
  70. package/dist/lib/mcp/McpRegistry.js +117 -0
  71. package/dist/lib/mcp/native/GroqVoiceNative.d.ts +21 -0
  72. package/dist/lib/mcp/native/GroqVoiceNative.js +29 -0
  73. package/dist/lib/mcp/native/LocalSyncNative.d.ts +19 -0
  74. package/dist/lib/mcp/native/LocalSyncNative.js +26 -0
  75. package/dist/lib/mcp/native/LocalVoiceNative.d.ts +19 -0
  76. package/dist/lib/mcp/native/LocalVoiceNative.js +27 -0
  77. package/dist/lib/mcp/native/MeechiNativeCore.d.ts +25 -0
  78. package/dist/lib/mcp/native/MeechiNativeCore.js +209 -0
  79. package/dist/lib/mcp/native/index.d.ts +10 -0
  80. package/dist/lib/mcp/native/index.js +10 -0
  81. package/dist/lib/mcp/types.d.ts +35 -0
  82. package/dist/lib/mcp/types.js +1 -0
  83. package/dist/lib/pdf.d.ts +10 -0
  84. package/dist/lib/pdf.js +142 -0
  85. package/dist/lib/settings.d.ts +48 -0
  86. package/dist/lib/settings.js +87 -0
  87. package/dist/lib/storage/db.d.ts +57 -0
  88. package/dist/lib/storage/db.js +45 -0
  89. package/dist/lib/storage/local.d.ts +28 -0
  90. package/dist/lib/storage/local.js +534 -0
  91. package/dist/lib/storage/migrate.d.ts +3 -0
  92. package/dist/lib/storage/migrate.js +122 -0
  93. package/dist/lib/storage/types.d.ts +66 -0
  94. package/dist/lib/storage/types.js +1 -0
  95. package/dist/lib/sync/client-drive.d.ts +9 -0
  96. package/dist/lib/sync/client-drive.js +69 -0
  97. package/dist/lib/sync/engine.d.ts +18 -0
  98. package/dist/lib/sync/engine.js +517 -0
  99. package/dist/lib/sync/google-drive.d.ts +52 -0
  100. package/dist/lib/sync/google-drive.js +183 -0
  101. package/dist/lib/sync/merge.d.ts +1 -0
  102. package/dist/lib/sync/merge.js +68 -0
  103. package/dist/lib/yjs/YjsProvider.d.ts +11 -0
  104. package/dist/lib/yjs/YjsProvider.js +33 -0
  105. package/dist/lib/yjs/graph.d.ts +11 -0
  106. package/dist/lib/yjs/graph.js +7 -0
  107. package/dist/lib/yjs/hooks.d.ts +7 -0
  108. package/dist/lib/yjs/hooks.js +37 -0
  109. package/dist/lib/yjs/store.d.ts +4 -0
  110. package/dist/lib/yjs/store.js +19 -0
  111. package/dist/lib/yjs/syncGraph.d.ts +1 -0
  112. package/dist/lib/yjs/syncGraph.js +38 -0
  113. package/dist/providers/theme-provider.d.ts +3 -0
  114. package/dist/providers/theme-provider.js +18 -0
  115. package/dist/tsconfig.lib.tsbuildinfo +1 -0
  116. package/package.json +69 -0
@@ -0,0 +1,88 @@
1
+ import { GoogleGenerativeAI } from "@google/generative-ai";
2
+ export class GeminiProvider {
3
+ constructor() {
4
+ this.id = "gemini";
5
+ this.name = "Google Gemini";
6
+ }
7
+ async chat(model, messages, tools, apiKey) {
8
+ if (!apiKey) {
9
+ apiKey = process.env.GEMINI_API_KEY;
10
+ }
11
+ if (!apiKey) {
12
+ throw new Error("Gemini API Key is missing. Please set it in Settings or .env");
13
+ }
14
+ const genAI = new GoogleGenerativeAI(apiKey);
15
+ const geminiModel = genAI.getGenerativeModel({
16
+ model: model || "gemini-flash-latest"
17
+ });
18
+ // Gemini format conversion
19
+ // Note: Gemini has specific rules about System prompts (must be separate)
20
+ let systemInstruction = "";
21
+ const history = [];
22
+ for (const msg of messages) {
23
+ if (msg.role === 'system') {
24
+ systemInstruction += msg.content + "\n";
25
+ }
26
+ else if (msg.role === 'user') {
27
+ history.push({ role: 'user', parts: [{ text: msg.content }] });
28
+ }
29
+ else if (msg.role === 'assistant') {
30
+ history.push({ role: 'model', parts: [{ text: msg.content }] });
31
+ }
32
+ }
33
+ // Configure generation config
34
+ const generationConfig = {
35
+ temperature: 0.7,
36
+ topK: 1,
37
+ topP: 1,
38
+ maxOutputTokens: 2048,
39
+ };
40
+ // Map Tools to Gemini Format
41
+ const geminiTools = tools === null || tools === void 0 ? void 0 : tools.map(t => ({
42
+ functionDeclarations: [{
43
+ name: t.function.name,
44
+ description: t.function.description,
45
+ parameters: t.function.parameters
46
+ }]
47
+ }));
48
+ try {
49
+ const chatSession = geminiModel.startChat({
50
+ history: history.slice(0, -1), // All but last
51
+ generationConfig,
52
+ systemInstruction: systemInstruction ? { role: 'system', parts: [{ text: systemInstruction }] } : undefined,
53
+ tools: geminiTools
54
+ });
55
+ const lastMsg = history[history.length - 1];
56
+ const result = await chatSession.sendMessage(lastMsg.parts[0].text);
57
+ const response = await result.response;
58
+ // Check for Function Calls
59
+ const functionCalls = response.functionCalls();
60
+ if (functionCalls && functionCalls.length > 0) {
61
+ return {
62
+ content: "",
63
+ tool_calls: functionCalls.map((fc) => ({
64
+ function: {
65
+ name: fc.name,
66
+ arguments: JSON.stringify(fc.args)
67
+ }
68
+ })),
69
+ usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 }
70
+ };
71
+ }
72
+ const text = response.text();
73
+ return {
74
+ content: text,
75
+ usage: {
76
+ // Mock usage for now as Gemini doesn't always return standard usage obj in same format
77
+ prompt_tokens: 0,
78
+ completion_tokens: 0,
79
+ total_tokens: 0
80
+ }
81
+ };
82
+ }
83
+ catch (error) {
84
+ console.error("Gemini Provider Error:", error);
85
+ throw new Error(`Gemini Error: ${error.message}`);
86
+ }
87
+ }
88
+ }
@@ -0,0 +1,6 @@
1
+ import { AIProvider, AIChatMessage, AITool, AICompletion } from "../types";
2
+ export declare class GroqProvider implements AIProvider {
3
+ id: string;
4
+ name: string;
5
+ chat(model: string, messages: AIChatMessage[], tools?: AITool[], apiKey?: string): Promise<AICompletion>;
6
+ }
@@ -0,0 +1,42 @@
1
+ import Groq from "groq-sdk";
2
+ export class GroqProvider {
3
+ constructor() {
4
+ this.id = "groq";
5
+ this.name = "Groq";
6
+ }
7
+ async chat(model, messages, tools, apiKey) {
8
+ if (!apiKey) {
9
+ // Fallback to Env if not provided explicitly
10
+ apiKey = process.env.GROQ_API_KEY || process.env.NEXT_PUBLIC_GROQ_API_KEY;
11
+ }
12
+ if (!apiKey) {
13
+ throw new Error("Groq API Key is missing. Please set it in Settings or .env");
14
+ }
15
+ const groq = new Groq({ apiKey });
16
+ console.log(`[Groq] Sending tools: ${(tools === null || tools === void 0 ? void 0 : tools.map(t => t.function.name).join(', ')) || 'None'}`);
17
+ try {
18
+ const chatCompletion = await groq.chat.completions.create({
19
+ messages: messages,
20
+ model: model || "llama-3.3-70b-versatile",
21
+ temperature: 0.7,
22
+ max_tokens: 1024,
23
+ top_p: 1,
24
+ stream: false,
25
+ stop: null,
26
+ tools: tools,
27
+ tool_choice: tools && tools.length > 0 ? "auto" : "none"
28
+ });
29
+ const choice = chatCompletion.choices[0];
30
+ const message = choice === null || choice === void 0 ? void 0 : choice.message;
31
+ return {
32
+ content: (message === null || message === void 0 ? void 0 : message.content) || "",
33
+ tool_calls: message === null || message === void 0 ? void 0 : message.tool_calls,
34
+ usage: chatCompletion.usage
35
+ };
36
+ }
37
+ catch (error) {
38
+ console.error("Groq Provider Error:", error);
39
+ throw new Error(`Groq Error: ${error.message}`);
40
+ }
41
+ }
42
+ }
@@ -0,0 +1,29 @@
1
+ export interface ModelConfig {
2
+ id: string;
3
+ name: string;
4
+ family: 'llama' | 'gemma' | 'phi' | 'generic';
5
+ vram_required_mb: number;
6
+ low_power: boolean;
7
+ context_window: number;
8
+ }
9
+ interface LocalModelConfig extends ModelConfig {
10
+ id: string;
11
+ name: string;
12
+ family: 'llama' | 'gemma' | 'phi' | 'generic';
13
+ vram_required_mb: number;
14
+ low_power: boolean;
15
+ context_window: number;
16
+ }
17
+ interface CloudModelConfig {
18
+ id: string;
19
+ name: string;
20
+ context_window: number;
21
+ }
22
+ export declare const AVAILABLE_MODELS: {
23
+ local: LocalModelConfig[];
24
+ groq: CloudModelConfig[];
25
+ gemini: CloudModelConfig[];
26
+ };
27
+ export declare function getModelConfig(modelId: string): LocalModelConfig | undefined;
28
+ export declare function getSystemPromptForModel(modelId: string): string;
29
+ export {};
@@ -0,0 +1,52 @@
1
+ import { SYSTEM_PROMPT } from './prompts';
2
+ export const AVAILABLE_MODELS = {
3
+ local: [
4
+ {
5
+ id: 'Llama-3.2-1B-Instruct-q4f16_1-MLC',
6
+ name: 'Llama 3.2 1B (Fastest)',
7
+ family: 'llama',
8
+ vram_required_mb: 1500,
9
+ low_power: true,
10
+ context_window: 4096
11
+ },
12
+ {
13
+ id: 'Llama-3.2-3B-Instruct-q4f16_1-MLC',
14
+ name: 'Llama 3.2 3B (Balanced)',
15
+ family: 'llama',
16
+ vram_required_mb: 3000,
17
+ low_power: false,
18
+ context_window: 8192
19
+ },
20
+ {
21
+ id: 'TinyLlama-1.1B-Chat-v1.0-q4f16_1-MLC',
22
+ name: 'TinyLlama 1.1B',
23
+ family: 'llama',
24
+ vram_required_mb: 1000,
25
+ low_power: true,
26
+ context_window: 2048
27
+ }
28
+ ],
29
+ groq: [
30
+ { id: 'llama-3.3-70b-versatile', name: 'Llama 3.3 70B (Versatile)', context_window: 32768 },
31
+ { id: 'llama-3.1-8b-instant', name: 'Llama 3.1 8B (Instant)', context_window: 32768 },
32
+ { id: 'mixtral-8x7b-32768', name: 'Mixtral 8x7B', context_window: 32768 },
33
+ { id: 'gemma-2-9b-it', name: 'Gemma 2 9B', context_window: 8192 }
34
+ ],
35
+ gemini: [
36
+ { id: 'gemini-1.5-flash', name: 'Gemini 1.5 Flash (Fast)', context_window: 1000000 },
37
+ { id: 'gemini-1.5-pro', name: 'Gemini 1.5 Pro (Powerful)', context_window: 2000000 },
38
+ { id: 'gemini-1.0-pro', name: 'Gemini 1.0 Pro', context_window: 32000 }
39
+ ]
40
+ };
41
+ export function getModelConfig(modelId) {
42
+ return AVAILABLE_MODELS.local.find(m => m.id === modelId);
43
+ }
44
+ // Factory for getting the right System Prompt based on Model Family
45
+ export function getSystemPromptForModel(modelId) {
46
+ const config = getModelConfig(modelId);
47
+ if (!config)
48
+ return SYSTEM_PROMPT; // Default
49
+ // We can specialize prompts here if needed in the future
50
+ // For now, Llama family works well with the default XML prompt
51
+ return SYSTEM_PROMPT;
52
+ }
@@ -0,0 +1,2 @@
1
+ import { AITool } from "./types";
2
+ export declare const TOOLS: AITool[];
@@ -0,0 +1,106 @@
1
+ export const TOOLS = [
2
+ {
3
+ type: "function",
4
+ "function": {
5
+ name: "update_file",
6
+ description: "Update the content of an existing file. ONLY use this when the user explicitly asks to edit, modify, or append to a note. DO NOT use this spontaneously.",
7
+ parameters: {
8
+ type: "object",
9
+ properties: {
10
+ filePath: {
11
+ type: "string",
12
+ description: "The path of the file to update.",
13
+ },
14
+ newContent: {
15
+ type: "string",
16
+ description: "The FULL new content of the file.",
17
+ },
18
+ },
19
+ required: ["filePath", "newContent"],
20
+ },
21
+ },
22
+ },
23
+ {
24
+ type: "function",
25
+ "function": {
26
+ name: "create_file",
27
+ description: "Create a new file. ONLY use this when the user explicitly provides content to save or asks to create a specific document. Never create files spontaneously for 'testing' or 'logging'.",
28
+ parameters: {
29
+ type: "object",
30
+ properties: {
31
+ filePath: {
32
+ type: "string",
33
+ description: "The path for the new file.",
34
+ },
35
+ content: {
36
+ type: "string",
37
+ description: "The content of the new file.",
38
+ },
39
+ },
40
+ required: ["filePath", "content"],
41
+ },
42
+ },
43
+ },
44
+ {
45
+ type: "function",
46
+ "function": {
47
+ name: "update_user_settings",
48
+ description: "Update the user's Name or Tone. ONLY use this when the user explicitly asks to change their name or how the AI sounds. NEVER use this to 'guess' or 'set' a name based on general talk.",
49
+ parameters: {
50
+ type: "object",
51
+ properties: {
52
+ name: {
53
+ type: "string",
54
+ description: "The user's preferred name."
55
+ },
56
+ tone: {
57
+ type: "string",
58
+ description: "The preferred tone for the AI."
59
+ }
60
+ },
61
+ },
62
+ },
63
+ },
64
+ {
65
+ type: "function",
66
+ "function": {
67
+ name: "move_file",
68
+ description: "Move or rename a file. Use this to organize files into folders (Topics) or rename them.",
69
+ parameters: {
70
+ type: "object",
71
+ properties: {
72
+ sourcePath: {
73
+ type: "string",
74
+ description: "The current path of the file (e.g. 'temp/myfile.pdf')."
75
+ },
76
+ destinationPath: {
77
+ type: "string",
78
+ description: "The new path for the file (e.g. 'misc/Work/myfile.pdf')."
79
+ }
80
+ },
81
+ required: ["sourcePath", "destinationPath"]
82
+ }
83
+ }
84
+ },
85
+ {
86
+ type: "function",
87
+ "function": {
88
+ name: "fetch_url",
89
+ description: "Fetch content from a URL to save as a source. Use this when the user shares a link.",
90
+ parameters: {
91
+ type: "object",
92
+ properties: {
93
+ url: {
94
+ type: "string",
95
+ description: "The URL to fetch."
96
+ },
97
+ destinationPath: {
98
+ type: "string",
99
+ description: "The path to save the source to (e.g. 'misc/Research/source.md')."
100
+ }
101
+ },
102
+ required: ["url", "destinationPath"]
103
+ }
104
+ }
105
+ }
106
+ ];
@@ -0,0 +1,22 @@
1
+ export interface AIChatMessage {
2
+ role: 'system' | 'user' | 'assistant';
3
+ content: string;
4
+ }
5
+ export interface AITool {
6
+ type: "function";
7
+ function: {
8
+ name: string;
9
+ description: string;
10
+ parameters: any;
11
+ };
12
+ }
13
+ export interface AICompletion {
14
+ content: string;
15
+ tool_calls?: any[];
16
+ usage?: any;
17
+ }
18
+ export interface AIProvider {
19
+ id: string;
20
+ name: string;
21
+ chat(model: string, messages: AIChatMessage[], tools?: AITool[], apiKey?: string): Promise<AICompletion>;
22
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,60 @@
1
+ // Global storage for the pipeline
2
+ let embedder = null;
3
+ const EMBEDDING_MODEL = 'Xenova/all-MiniLM-L6-v2';
4
+ // Helper to load the library from static vendor file
5
+ async function loadLibrary() {
6
+ // @ts-ignore
7
+ if (self.transformers)
8
+ return self.transformers;
9
+ console.log("[RAG Worker] Loading Transformers.js from static vendor...");
10
+ // Import from public/vendor/transformers.js (served by the main app)
11
+ // @ts-ignore
12
+ const mod = await import(/* webpackIgnore: true */ '/vendor/transformers.js');
13
+ return mod;
14
+ }
15
+ // Initialize the model
16
+ async function init() {
17
+ if (embedder)
18
+ return embedder;
19
+ try {
20
+ const { pipeline, env } = await loadLibrary();
21
+ // Configure environment
22
+ env.allowLocalModels = false; // Fetch model weights from CDN (or cache)
23
+ env.useBrowserCache = true; // Cache weights in browser (Offline support after first load)
24
+ console.log(`[RAG Worker] Loading Model: ${EMBEDDING_MODEL}...`);
25
+ // Load the feature extraction pipeline
26
+ embedder = await pipeline('feature-extraction', EMBEDDING_MODEL, {
27
+ quantized: true, // Use quantized model for speed/size (matches USE performance)
28
+ });
29
+ console.log("[RAG Worker] Model loaded successfully!");
30
+ return embedder;
31
+ }
32
+ catch (err) {
33
+ console.error("[RAG Worker] Initialization Failed:", err);
34
+ throw err;
35
+ }
36
+ }
37
+ self.onmessage = async (event) => {
38
+ const { id, text } = event.data;
39
+ if (!text) {
40
+ self.postMessage({ id, error: "No text provided" });
41
+ return;
42
+ }
43
+ try {
44
+ const pipe = await init();
45
+ // Convert text to embedding
46
+ // output is a Tensor { data: Float32Array, dims: [...] }
47
+ const output = await pipe(text, {
48
+ pooling: 'mean', // Mean pooling is standard for sentence embeddings
49
+ normalize: true, // Normalize vectors for cosine similarity
50
+ });
51
+ // Extract the vector as a plain array
52
+ const embedding = Array.from(output.data);
53
+ self.postMessage({ id, embedding });
54
+ }
55
+ catch (err) {
56
+ console.error("[RAG Worker] Error:", err);
57
+ self.postMessage({ id, error: err.message });
58
+ }
59
+ };
60
+ export {};
@@ -0,0 +1,13 @@
1
+ export declare class AudioInputService {
2
+ private audioContext;
3
+ private stream;
4
+ private processor;
5
+ private source;
6
+ private analyser;
7
+ private readonly SAMPLE_RATE;
8
+ constructor();
9
+ getAnalyser(): AnalyserNode | null;
10
+ start(onAudioData: (data: Float32Array) => void): Promise<void>;
11
+ stop(): void;
12
+ private downsampleBuffer;
13
+ }
@@ -0,0 +1,121 @@
1
+ export class AudioInputService {
2
+ constructor() {
3
+ this.audioContext = null;
4
+ this.stream = null;
5
+ this.processor = null; // Fallback or AudioWorklet
6
+ this.source = null;
7
+ this.analyser = null;
8
+ // Whisper typically expects 16kHz
9
+ this.SAMPLE_RATE = 16000;
10
+ }
11
+ getAnalyser() {
12
+ return this.analyser;
13
+ }
14
+ async start(onAudioData) {
15
+ if (this.stream)
16
+ return;
17
+ try {
18
+ this.stream = await navigator.mediaDevices.getUserMedia({
19
+ audio: {
20
+ channelCount: 1,
21
+ echoCancellation: true,
22
+ noiseSuppression: true,
23
+ autoGainControl: true,
24
+ sampleRate: this.SAMPLE_RATE
25
+ }
26
+ });
27
+ this.audioContext = new AudioContext({ sampleRate: this.SAMPLE_RATE });
28
+ console.log(`[AudioInput] AudioContext created. Requested: ${this.SAMPLE_RATE}, Actual: ${this.audioContext.sampleRate}`);
29
+ await this.audioContext.resume(); // Ensure running
30
+ this.source = this.audioContext.createMediaStreamSource(this.stream);
31
+ this.analyser = this.audioContext.createAnalyser();
32
+ this.analyser.fftSize = 256;
33
+ this.processor = this.audioContext.createScriptProcessor(4096, 1, 1);
34
+ // Mute the monitoring to prevent feedback (Microphone -> Speaker loop)
35
+ const muteNode = this.audioContext.createGain();
36
+ muteNode.gain.value = 0;
37
+ this.processor.connect(muteNode);
38
+ muteNode.connect(this.audioContext.destination);
39
+ let buffer = new Float32Array(0);
40
+ const TARGET_CHUNK_SIZE = 512;
41
+ this.processor.onaudioprocess = (e) => {
42
+ let inputData = e.inputBuffer.getChannelData(0);
43
+ // Software Gain (1.5x) - Reduced from 3x/5x to fix clipping/distortion
44
+ const GAIN_FACTOR = 1.5;
45
+ const amplified = new Float32Array(inputData.length);
46
+ for (let i = 0; i < inputData.length; i++) {
47
+ amplified[i] = inputData[i] * GAIN_FACTOR;
48
+ }
49
+ inputData = amplified;
50
+ // DOWNSAMPLE if mismatch (e.g. 48k -> 16k)
51
+ if (this.audioContext && this.audioContext.sampleRate !== this.SAMPLE_RATE) {
52
+ inputData = this.downsampleBuffer(inputData, this.audioContext.sampleRate, this.SAMPLE_RATE);
53
+ }
54
+ // Append
55
+ const newBuffer = new Float32Array(buffer.length + inputData.length);
56
+ newBuffer.set(buffer, 0);
57
+ newBuffer.set(inputData, buffer.length);
58
+ buffer = newBuffer;
59
+ while (buffer.length >= TARGET_CHUNK_SIZE) {
60
+ const chunk = buffer.slice(0, TARGET_CHUNK_SIZE);
61
+ buffer = buffer.slice(TARGET_CHUNK_SIZE);
62
+ onAudioData(chunk);
63
+ }
64
+ };
65
+ // Connect graph: Source -> Analyser -> Processor -> Destination
66
+ this.source.connect(this.analyser);
67
+ this.analyser.connect(this.processor);
68
+ this.processor.connect(this.audioContext.destination);
69
+ console.log("[AudioInput] Engine started successfully");
70
+ }
71
+ catch (err) {
72
+ console.error("AudioInputService Error:", err);
73
+ throw err;
74
+ }
75
+ }
76
+ stop() {
77
+ if (this.stream) {
78
+ this.stream.getTracks().forEach(track => track.stop());
79
+ this.stream = null;
80
+ }
81
+ if (this.processor) {
82
+ this.processor.disconnect();
83
+ this.processor = null;
84
+ }
85
+ if (this.source) {
86
+ this.source.disconnect();
87
+ this.source = null;
88
+ }
89
+ if (this.audioContext) {
90
+ this.audioContext.close();
91
+ this.audioContext = null;
92
+ }
93
+ }
94
+ // Helper: Simple Linear Interpolation Downsampler
95
+ downsampleBuffer(buffer, sampleRate, outSampleRate) {
96
+ if (outSampleRate === sampleRate)
97
+ return buffer;
98
+ if (outSampleRate > sampleRate) {
99
+ console.warn("[AudioInput] Upsampling not supported, returning original");
100
+ return buffer;
101
+ }
102
+ const sampleRateRatio = sampleRate / outSampleRate;
103
+ const newLength = Math.round(buffer.length / sampleRateRatio);
104
+ const result = new Float32Array(newLength);
105
+ let offsetResult = 0;
106
+ let offsetBuffer = 0;
107
+ while (offsetResult < result.length) {
108
+ const nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
109
+ // Average values in the range
110
+ let accum = 0, count = 0;
111
+ for (let i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
112
+ accum += buffer[i];
113
+ count++;
114
+ }
115
+ result[offsetResult] = count > 0 ? accum / count : 0;
116
+ offsetResult++;
117
+ offsetBuffer = nextOffsetBuffer;
118
+ }
119
+ return result;
120
+ }
121
+ }
@@ -0,0 +1,13 @@
1
+ type TranscriberPipeline = (audio: Float32Array | Float64Array, options?: any) => Promise<{
2
+ text: string;
3
+ }>;
4
+ export declare class TranscriberService {
5
+ private static instance;
6
+ private static modelId;
7
+ private static cloudKey;
8
+ static setCloudProvider(apiKey: string | null): void;
9
+ static getInstance(): Promise<TranscriberPipeline>;
10
+ static transcribe(audio: Float32Array): Promise<string>;
11
+ private static encodeWAV;
12
+ }
13
+ export {};