@tryhamster/gerbil 1.0.0-rc.1 → 1.0.0-rc.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/{index.d.mts → index.d.ts} +354 -3
- package/dist/browser/index.d.ts.map +1 -0
- package/dist/browser/{index.mjs → index.js} +119 -8
- package/dist/browser/index.js.map +1 -0
- package/dist/{chrome-backend-Y9F7W5VQ.mjs → chrome-backend-CORwaIyC.mjs} +1 -1
- package/dist/{chrome-backend-Y9F7W5VQ.mjs.map → chrome-backend-CORwaIyC.mjs.map} +1 -1
- package/dist/{chrome-backend-JEPeM2YE.mjs → chrome-backend-DIKYoWj-.mjs} +1 -1
- package/dist/cli.mjs +14 -15
- package/dist/cli.mjs.map +1 -1
- package/dist/frameworks/express.d.mts +1 -1
- package/dist/frameworks/express.mjs +3 -4
- package/dist/frameworks/express.mjs.map +1 -1
- package/dist/frameworks/fastify.d.mts +1 -1
- package/dist/frameworks/fastify.mjs +2 -3
- package/dist/frameworks/fastify.mjs.map +1 -1
- package/dist/frameworks/hono.d.mts +1 -1
- package/dist/frameworks/hono.mjs +2 -3
- package/dist/frameworks/hono.mjs.map +1 -1
- package/dist/frameworks/next.d.mts +2 -2
- package/dist/frameworks/next.mjs +2 -3
- package/dist/frameworks/next.mjs.map +1 -1
- package/dist/frameworks/react.d.mts +1 -1
- package/dist/frameworks/trpc.d.mts +1 -1
- package/dist/frameworks/trpc.mjs +2 -3
- package/dist/frameworks/trpc.mjs.map +1 -1
- package/dist/gerbil-DJGqq7BX.mjs +4 -0
- package/dist/{gerbil-yoSpRHgv.mjs → gerbil-DoDGHe6Z.mjs} +187 -19
- package/dist/gerbil-DoDGHe6Z.mjs.map +1 -0
- package/dist/{gerbil-POAz8peb.d.mts → gerbil-qOTe1nl2.d.mts} +2 -2
- package/dist/{gerbil-POAz8peb.d.mts.map → gerbil-qOTe1nl2.d.mts.map} +1 -1
- package/dist/index.d.mts +19 -3
- package/dist/index.d.mts.map +1 -1
- package/dist/index.mjs +6 -7
- package/dist/index.mjs.map +1 -1
- package/dist/integrations/ai-sdk.d.mts +1 -1
- package/dist/integrations/ai-sdk.mjs +4 -5
- package/dist/integrations/ai-sdk.mjs.map +1 -1
- package/dist/integrations/langchain.d.mts +1 -1
- package/dist/integrations/langchain.mjs +2 -3
- package/dist/integrations/langchain.mjs.map +1 -1
- package/dist/integrations/llamaindex.d.mts +1 -1
- package/dist/integrations/llamaindex.mjs +2 -3
- package/dist/integrations/llamaindex.mjs.map +1 -1
- package/dist/integrations/mcp-client.mjs +2 -2
- package/dist/integrations/mcp.d.mts +2 -2
- package/dist/integrations/mcp.mjs +5 -6
- package/dist/kokoro-BNTb6egA.mjs +20210 -0
- package/dist/kokoro-BNTb6egA.mjs.map +1 -0
- package/dist/kokoro-CMOGDSgT.js +20212 -0
- package/dist/kokoro-CMOGDSgT.js.map +1 -0
- package/dist/{mcp-Bitg4sjX.mjs → mcp-kzDDWIoS.mjs} +3 -3
- package/dist/{mcp-Bitg4sjX.mjs.map → mcp-kzDDWIoS.mjs.map} +1 -1
- package/dist/{one-liner-B1rmFto6.mjs → one-liner-DxnNs_JK.mjs} +2 -2
- package/dist/{one-liner-B1rmFto6.mjs.map → one-liner-DxnNs_JK.mjs.map} +1 -1
- package/dist/repl-DGUw4fCc.mjs +9 -0
- package/dist/skills/index.d.mts +24 -24
- package/dist/skills/index.d.mts.map +1 -1
- package/dist/skills/index.mjs +4 -5
- package/dist/{skills-5DxAV-rn.mjs → skills-DulrOPeP.mjs} +12 -12
- package/dist/skills-DulrOPeP.mjs.map +1 -0
- package/dist/stt-1WIefHwc.mjs +3 -0
- package/dist/{stt-Bv_dum-R.mjs → stt-CG_7KB_0.mjs} +3 -2
- package/dist/stt-CG_7KB_0.mjs.map +1 -0
- package/dist/stt-Dne6SENv.js +434 -0
- package/dist/stt-Dne6SENv.js.map +1 -0
- package/dist/{tools-IYPrqoek.mjs → tools-Bi1P7Xoy.mjs} +2 -2
- package/dist/{tools-IYPrqoek.mjs.map → tools-Bi1P7Xoy.mjs.map} +1 -1
- package/dist/transformers.web-DiD1gTwk.js +44695 -0
- package/dist/transformers.web-DiD1gTwk.js.map +1 -0
- package/dist/transformers.web-u34VxRFM.js +3 -0
- package/dist/{tts-5yWeP_I0.mjs → tts-B1pZMlDv.mjs} +1 -1
- package/dist/tts-C2FzKuSx.js +725 -0
- package/dist/tts-C2FzKuSx.js.map +1 -0
- package/dist/{tts-DG6denWG.mjs → tts-CyHhcLtN.mjs} +6 -4
- package/dist/tts-CyHhcLtN.mjs.map +1 -0
- package/dist/{types-s6Py2_DL.d.mts → types-CiTc7ez3.d.mts} +1 -1
- package/dist/{types-s6Py2_DL.d.mts.map → types-CiTc7ez3.d.mts.map} +1 -1
- package/dist/{utils-CkB4Roi6.mjs → utils-CZBZ8dgR.mjs} +1 -1
- package/dist/{utils-CkB4Roi6.mjs.map → utils-CZBZ8dgR.mjs.map} +1 -1
- package/package.json +6 -6
- package/dist/browser/index.d.mts.map +0 -1
- package/dist/browser/index.mjs.map +0 -1
- package/dist/gerbil-DeQlX_Mt.mjs +0 -5
- package/dist/gerbil-yoSpRHgv.mjs.map +0 -1
- package/dist/models-BAtL8qsA.mjs +0 -171
- package/dist/models-BAtL8qsA.mjs.map +0 -1
- package/dist/models-CE0fBq0U.d.mts +0 -22
- package/dist/models-CE0fBq0U.d.mts.map +0 -1
- package/dist/repl-D20JO260.mjs +0 -10
- package/dist/skills-5DxAV-rn.mjs.map +0 -1
- package/dist/stt-Bv_dum-R.mjs.map +0 -1
- package/dist/stt-KzSoNvwI.mjs +0 -3
- package/dist/tts-DG6denWG.mjs.map +0 -1
- /package/dist/{auto-update-DsWBBnEk.mjs → auto-update-S9s5-g0C.mjs} +0 -0
- /package/dist/{chunk-Ct1HF2bE.mjs → chunk-CkXuGtQK.mjs} +0 -0
- /package/dist/{microphone-D-6y9aiE.mjs → microphone-DaMZFRuR.mjs} +0 -0
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"index.mjs","names":["currentResolve: ((text: string) => void) | null","currentReject: ((error: Error) => void) | null","gerbilWorker: GerbilWorker","options","userMessage: Message","assistantMessage: Message","KOKORO_BROWSER_VOICES: BrowserVoiceInfo[]","SUPERTONIC_BROWSER_VOICES: BrowserVoiceInfo[]","TTS_MODELS: Record<\n TTSModelId,\n { repo: string; defaultVoice: string; sampleRate: number; voices: BrowserVoiceInfo[] }\n>","audioData: Float32Array","sampleRate: number","audioContext: AudioContext | null","progress: STTProgress","e: any"],"sources":["../../src/browser/index.ts"],"sourcesContent":["/**\n * Gerbil Browser Support\n *\n * Run LLMs directly in the browser with WebGPU acceleration.\n *\n * @example useChat (React)\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <form onSubmit={handleSubmit}>\n * {messages.map(m => <div key={m.id}>{m.role}: {m.content}</div>)}\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * </form>\n * );\n * }\n * ```\n *\n * @example useCompletion (React)\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading } = useCompletion();\n * if (isLoading) return <div>Loading...</div>;\n * return <button onClick={() => complete(\"Write a haiku\")}>{completion}</button>;\n * }\n * ```\n *\n * @example Low-level API\n * ```ts\n * import { createGerbilWorker } from \"@tryhamster/gerbil/browser\";\n *\n * const gerbil = await createGerbilWorker({\n * modelId: \"qwen3-0.6b\",\n * onToken: (token) => console.log(token.text),\n * });\n * await gerbil.generate(\"Hello!\");\n * gerbil.terminate();\n * ```\n */\n\nimport { resolveModel } from \"../core/models.js\";\n\n// Re-export models and types (browser-safe, no Node.js dependencies)\nexport { BUILTIN_MODELS } from \"../core/models.js\";\nexport type * from \"../core/types.js\";\n\n// NOTE: We intentionally do NOT export Gerbil from core here.\n// The core Gerbil class has Node.js code paths (chrome-backend/puppeteer)\n// that break browser bundlers. Use createGerbilWorker() instead for browser.\n\n// ============================================\n// Types\n// ============================================\n\nexport type WorkerProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n error?: string;\n};\n\nexport type WorkerToken = {\n status: \"token\";\n text: string;\n state: \"thinking\" | \"answering\";\n numTokens: number;\n tps: number;\n};\n\nexport type WorkerComplete = {\n status: \"complete\";\n text: string;\n numTokens: number;\n totalTime: number;\n tps: number;\n};\n\nexport type GerbilWorkerOptions = {\n /** Model ID to load (default: \"qwen3-0.6b\") */\n modelId?: string;\n /** Called during model loading with progress updates */\n onProgress?: (progress: WorkerProgress) => void;\n /** Called for each token during streaming generation */\n onToken?: (token: WorkerToken) => void;\n /** Called when generation is complete */\n onComplete?: (result: WorkerComplete) => void;\n /** Called on errors */\n onError?: (error: string) => void;\n /** Worker script URL (auto-detected if not provided) */\n workerUrl?: string;\n};\n\nexport type GenerateStreamOptions = {\n /** Maximum tokens to generate */\n maxTokens?: number;\n /** Temperature for sampling (0 = deterministic) */\n temperature?: number;\n /** Top-p nucleus sampling */\n topP?: number;\n /** Top-k sampling */\n topK?: number;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** System prompt */\n system?: string;\n /** Image URLs or data URIs (for vision models) */\n images?: string[];\n /** Conversation history for multi-turn (includes all previous messages) */\n history?: Array<{ role: \"user\" | \"assistant\" | \"system\"; content: string }>;\n};\n\nexport type GerbilWorker = {\n /** Generate text with streaming */\n generate: (prompt: string, options?: GenerateStreamOptions) => Promise<string>;\n /** Interrupt current generation */\n interrupt: () => void;\n /** Reset conversation cache */\n reset: () => void;\n /** Terminate the worker */\n terminate: () => void;\n /** Check if model is loaded */\n isReady: () => boolean;\n};\n\n// ============================================\n// Web Worker Factory\n// ============================================\n\n/**\n * Create a Gerbil worker for streaming WebGPU inference\n *\n * Uses a Web Worker to keep the UI responsive during model loading\n * and text generation, with real-time token streaming.\n */\nexport async function createGerbilWorker(options: GerbilWorkerOptions = {}): Promise<GerbilWorker> {\n const { modelId = \"qwen3-0.6b\", onProgress, onToken, onComplete, onError } = options;\n\n // Resolve model to HuggingFace path\n const source = resolveModel(modelId);\n\n return new Promise((resolve, reject) => {\n // Create inline worker from the worker code\n const workerCode = `\n import {\n AutoTokenizer,\n AutoModelForCausalLM,\n AutoProcessor,\n AutoModelForImageTextToText,\n RawImage,\n TextStreamer,\n InterruptableStoppingCriteria,\n env,\n } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1\";\n\n // Enable IndexedDB caching for browser (prevents re-downloading models)\n env.useBrowserCache = true;\n env.allowLocalModels = false;\n\n class ModelPipeline {\n static tokenizer = null;\n static model = null;\n static processor = null;\n static visionModel = null;\n static modelId = \"\";\n static isVision = false;\n\n static async getInstance(modelId, options = {}, progressCallback) {\n if (this.modelId !== modelId) {\n this.tokenizer = null;\n this.model = null;\n this.processor = null;\n this.visionModel = null;\n }\n this.modelId = modelId;\n \n // Detect vision models\n this.isVision = options.vision || \n modelId.toLowerCase().includes(\"ministral\") ||\n modelId.toLowerCase().includes(\"vision\") ||\n modelId.toLowerCase().includes(\"vlm\");\n\n const dtype = options.dtype || \"q4f16\";\n const device = options.device || \"webgpu\";\n\n if (this.isVision) {\n // Load vision model components\n // Note: Don't specify dtype for vision models - let transformers.js pick defaults\n if (!this.processor) {\n this.processor = await AutoProcessor.from_pretrained(modelId, {\n progress_callback: progressCallback,\n });\n }\n if (!this.visionModel) {\n this.visionModel = await AutoModelForImageTextToText.from_pretrained(modelId, {\n device,\n progress_callback: progressCallback,\n });\n }\n return { \n processor: this.processor, \n model: this.visionModel, \n tokenizer: this.processor.tokenizer,\n isVision: true \n };\n } else {\n // Load text-only model components\n if (!this.tokenizer) {\n this.tokenizer = await AutoTokenizer.from_pretrained(modelId, {\n progress_callback: progressCallback,\n });\n }\n if (!this.model) {\n this.model = await AutoModelForCausalLM.from_pretrained(modelId, {\n dtype,\n device,\n progress_callback: progressCallback,\n });\n }\n return { \n tokenizer: this.tokenizer, \n model: this.model, \n isVision: false \n };\n }\n }\n }\n\n const stoppingCriteria = new InterruptableStoppingCriteria();\n let pastKeyValuesCache = null;\n\n async function load(data) {\n const { modelId, options = {} } = data;\n self.postMessage({ status: \"loading\", message: \"Loading model...\" });\n\n const downloadState = {\n downloading: new Set(),\n completed: new Set(),\n isDownloading: false,\n };\n\n try {\n const result = await ModelPipeline.getInstance(\n modelId,\n options,\n (progress) => {\n if (progress.status === \"progress\" && progress.file) {\n const pct = Math.round(progress.progress || 0);\n if (pct < 100) {\n downloadState.downloading.add(progress.file);\n downloadState.isDownloading = true;\n } else if (pct === 100) {\n downloadState.downloading.delete(progress.file);\n downloadState.completed.add(progress.file);\n }\n if (downloadState.isDownloading) {\n self.postMessage({\n status: \"downloading\",\n file: progress.file,\n progress: pct,\n downloadCount: downloadState.downloading.size,\n totalFiles: downloadState.completed.size + downloadState.downloading.size,\n });\n }\n }\n }\n );\n\n self.postMessage({ status: \"loading\", message: \"Compiling shaders...\" });\n \n // Warmup differs for vision vs text models\n if (result.isVision) {\n // Vision models need both text and vision warmup\n // Text warmup first\n const textWarmupInputs = result.tokenizer(\"hello\");\n await result.model.generate({ ...textWarmupInputs, max_new_tokens: 1 });\n \n // Vision warmup with synthetic image\n self.postMessage({ status: \"loading\", message: \"Warming up vision encoder...\" });\n try {\n // Create a tiny 8x8 test image using OffscreenCanvas\n const canvas = new OffscreenCanvas(8, 8);\n const ctx = canvas.getContext(\"2d\");\n ctx.fillStyle = \"red\";\n ctx.fillRect(0, 0, 8, 8);\n const blob = await canvas.convertToBlob({ type: \"image/png\" });\n const warmupImage = await RawImage.fromBlob(blob);\n \n // Process with vision pipeline\n const warmupContent = [{ type: \"image\" }, { type: \"text\", text: \"hi\" }];\n const warmupMessages = [{ role: \"user\", content: warmupContent }];\n const warmupPrompt = result.processor.apply_chat_template(warmupMessages, { add_generation_prompt: true });\n const warmupInputs = await result.processor(warmupImage, warmupPrompt, { add_special_tokens: false });\n \n // Run vision warmup generation\n await result.model.generate({\n ...warmupInputs,\n max_new_tokens: 1,\n });\n } catch (warmupErr) {\n console.warn(\"Vision warmup failed (non-fatal):\", warmupErr);\n }\n } else {\n const warmupInputs = result.tokenizer(\"a\");\n await result.model.generate({ ...warmupInputs, max_new_tokens: 1 });\n }\n\n self.postMessage({ status: \"ready\", isVision: result.isVision });\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n async function generate(data) {\n const { messages, images = [], options = {} } = data;\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 20, thinking = false } = options;\n\n try {\n const result = await ModelPipeline.getInstance(ModelPipeline.modelId, {});\n \n // Route to vision or text generation\n if (result.isVision && images.length > 0) {\n await generateVision(result, messages, images, options);\n } else {\n await generateText(result, messages, options);\n }\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n async function generateText(result, messages, options) {\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 20, thinking = false } = options;\n const { tokenizer, model } = result;\n\n const inputs = tokenizer.apply_chat_template(messages, {\n add_generation_prompt: true,\n return_dict: true,\n enable_thinking: thinking,\n });\n\n let state = \"answering\";\n const [START_THINKING_TOKEN_ID, END_THINKING_TOKEN_ID] = tokenizer.encode(\n \"<think></think>\",\n { add_special_tokens: false }\n );\n\n let startTime = null;\n let numTokens = 0;\n\n const tokenCallback = (tokens) => {\n startTime ??= performance.now();\n numTokens += 1;\n const tokenId = Number(tokens[0]);\n if (tokenId === START_THINKING_TOKEN_ID) state = \"thinking\";\n else if (tokenId === END_THINKING_TOKEN_ID) state = \"answering\";\n };\n\n const streamCallback = (text) => {\n const tps = startTime ? (numTokens / (performance.now() - startTime)) * 1000 : 0;\n self.postMessage({ status: \"token\", text, state, numTokens, tps });\n };\n\n const streamer = new TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: streamCallback,\n token_callback_function: tokenCallback,\n });\n\n self.postMessage({ status: \"start\" });\n\n const { past_key_values, sequences } = await model.generate({\n ...inputs,\n past_key_values: pastKeyValuesCache,\n do_sample: temperature > 0,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n max_new_tokens: maxTokens,\n streamer,\n stopping_criteria: stoppingCriteria,\n return_dict_in_generate: true,\n });\n\n pastKeyValuesCache = past_key_values;\n\n const endTime = performance.now();\n const totalTime = startTime ? endTime - startTime : 0;\n const decoded = tokenizer.batch_decode(sequences, { skip_special_tokens: true });\n\n self.postMessage({\n status: \"complete\",\n text: decoded[0] || \"\",\n numTokens,\n totalTime,\n tps: totalTime > 0 ? (numTokens / totalTime) * 1000 : 0,\n });\n }\n\n async function generateVision(result, messages, images, options) {\n const { maxTokens = 2048, temperature = 0.7, topP = 0.9, topK = 20 } = options;\n const { processor, model, tokenizer } = result;\n\n self.postMessage({ status: \"progress\", message: \"Preparing vision request...\" });\n\n // Build message content with image placeholders and text\n const lastMessage = messages[messages.length - 1];\n const content = [];\n for (const _ of images) {\n content.push({ type: \"image\" });\n }\n content.push({ type: \"text\", text: lastMessage.content });\n\n // For vision models, include a brief system instruction for concise responses\n // Note: Vision processors handle system differently than text models\n const visionMessages = [\n { role: \"system\", content: \"You are a helpful assistant. Be concise and direct in your responses.\" },\n { role: \"user\", content }\n ];\n\n // Apply chat template with generation prompt\n const chatPrompt = processor.apply_chat_template(visionMessages, {\n add_generation_prompt: true\n });\n\n // Load images (handle both string URLs and { source: string } objects)\n self.postMessage({ status: \"progress\", message: \"Loading images...\" });\n const loadedImages = await Promise.all(\n images.map(img => {\n const url = typeof img === \"string\" ? img : img.source;\n return RawImage.fromURL(url);\n })\n );\n self.postMessage({ status: \"progress\", message: \"Processing inputs...\" });\n\n // Process inputs\n const inputs = await processor(\n loadedImages.length === 1 ? loadedImages[0] : loadedImages,\n chatPrompt,\n { add_special_tokens: false }\n );\n self.postMessage({ status: \"progress\", message: \"Generating response...\" });\n\n let startTime = null;\n let numTokens = 0;\n\n const streamCallback = (text) => {\n startTime ??= performance.now();\n numTokens += 1;\n const tps = (numTokens / (performance.now() - startTime)) * 1000;\n self.postMessage({ status: \"token\", text, state: \"answering\", numTokens, tps });\n };\n\n const streamer = new TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: streamCallback,\n });\n\n self.postMessage({ status: \"start\" });\n\n const outputs = await model.generate({\n ...inputs,\n max_new_tokens: maxTokens,\n do_sample: temperature > 0,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n streamer,\n stopping_criteria: stoppingCriteria,\n });\n\n // Decode output (skip prompt)\n const inputLength = inputs.input_ids.dims?.at(-1) || 0;\n const decoded = processor.batch_decode(\n outputs.slice(null, [inputLength, null]),\n { skip_special_tokens: true }\n );\n\n const endTime = performance.now();\n const totalTime = startTime ? endTime - startTime : 0;\n\n self.postMessage({\n status: \"complete\",\n text: decoded[0] || \"\",\n numTokens,\n totalTime,\n tps: totalTime > 0 ? (numTokens / totalTime) * 1000 : 0,\n });\n }\n\n self.addEventListener(\"message\", async (e) => {\n const { type, ...data } = e.data;\n switch (type) {\n case \"load\": await load(data); break;\n case \"generate\": stoppingCriteria.reset(); await generate(data); break;\n case \"interrupt\": stoppingCriteria.interrupt(); break;\n case \"reset\": pastKeyValuesCache = null; stoppingCriteria.reset(); break;\n }\n });\n\n self.postMessage({ status: \"init\" });\n `;\n\n const blob = new Blob([workerCode], { type: \"application/javascript\" });\n const workerUrl = URL.createObjectURL(blob);\n const worker = new Worker(workerUrl, { type: \"module\" });\n\n let isReady = false;\n let currentResolve: ((text: string) => void) | null = null;\n let currentReject: ((error: Error) => void) | null = null;\n let _generatedText = \"\";\n\n worker.onmessage = (e) => {\n const msg = e.data;\n\n switch (msg.status) {\n case \"init\":\n // Worker initialized, load the model\n worker.postMessage({ type: \"load\", modelId: source.path });\n break;\n\n case \"loading\":\n case \"downloading\":\n onProgress?.(msg as WorkerProgress);\n break;\n\n case \"ready\":\n isReady = true;\n onProgress?.(msg as WorkerProgress);\n resolve(gerbilWorker);\n break;\n\n case \"start\":\n _generatedText = \"\";\n break;\n\n case \"token\":\n _generatedText += msg.text;\n onToken?.(msg as WorkerToken);\n break;\n\n case \"complete\":\n onComplete?.(msg as WorkerComplete);\n currentResolve?.(msg.text);\n currentResolve = null;\n currentReject = null;\n break;\n\n case \"error\":\n onError?.(msg.error);\n onProgress?.({ status: \"error\", error: msg.error });\n if (currentReject) {\n currentReject(new Error(msg.error));\n currentResolve = null;\n currentReject = null;\n } else {\n reject(new Error(msg.error));\n }\n break;\n }\n };\n\n worker.onerror = (e) => {\n const error = e.message || \"Worker error\";\n onError?.(error);\n reject(new Error(error));\n };\n\n const gerbilWorker: GerbilWorker = {\n generate: (prompt: string, options: GenerateStreamOptions = {}) =>\n new Promise((res, rej) => {\n currentResolve = res;\n currentReject = rej;\n\n const system = options.system || \"You are a helpful assistant.\";\n\n // Use history if provided (for multi-turn conversations)\n // Otherwise, just use system + current prompt\n const messages = options.history\n ? [{ role: \"system\", content: system }, ...options.history]\n : [\n { role: \"system\", content: system },\n { role: \"user\", content: prompt },\n ];\n\n // When using history, reset KV cache first to avoid position mismatches\n // (full history is provided, so we don't need cached context)\n if (options.history) {\n worker.postMessage({ type: \"reset\" });\n }\n\n worker.postMessage({\n type: \"generate\",\n messages,\n images: options.images || [],\n options: {\n maxTokens: options.maxTokens ?? (options.images?.length ? 2048 : 256),\n temperature: options.temperature ?? 0.7,\n topP: options.topP ?? 0.9,\n topK: options.topK ?? 20,\n thinking: options.thinking ?? false,\n },\n });\n }),\n\n interrupt: () => {\n worker.postMessage({ type: \"interrupt\" });\n },\n\n reset: () => {\n worker.postMessage({ type: \"reset\" });\n },\n\n terminate: () => {\n worker.terminate();\n URL.revokeObjectURL(workerUrl);\n },\n\n isReady: () => isReady,\n };\n });\n}\n\n// ============================================\n// React Hooks\n// ============================================\n\n/** Message in a chat conversation */\nexport type Message = {\n id: string;\n role: \"user\" | \"assistant\";\n content: string;\n thinking?: string;\n /** Attached images (URLs or data URIs) - for vision models */\n images?: string[];\n};\n\n/** Loading progress state */\nexport type LoadingProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n};\n\n/** Options for useChat hook */\nexport type UseChatOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens per response */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Initial messages */\n initialMessages?: Message[];\n /** Auto-load model on mount (default: false - loads on first generate or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Return type for useChat hook */\nexport type UseChatReturn = {\n /** Chat messages */\n messages: Message[];\n /** Current input value */\n input: string;\n /** Set input value */\n setInput: (value: string) => void;\n /** Submit current input */\n handleSubmit: (e?: { preventDefault?: () => void }) => void;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating a response */\n isGenerating: boolean;\n /** Current thinking content (streaming) */\n thinking: string;\n /** Stop generation */\n stop: () => void;\n /** Clear all messages */\n clear: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n /** Currently attached images (for next message) */\n attachedImages: string[];\n /** Attach an image to the next message */\n attachImage: (imageUrl: string) => void;\n /** Remove an attached image */\n removeImage: (index: number) => void;\n /** Clear all attached images */\n clearImages: () => void;\n /** Send message with specific images (convenience method) */\n sendWithImages: (text: string, images: string[]) => void;\n};\n\n/**\n * React hook for chat with local LLM\n *\n * @example\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading, isGenerating } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <div>\n * {messages.map(m => (\n * <div key={m.id}>{m.role}: {m.content}</div>\n * ))}\n * <form onSubmit={handleSubmit}>\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * <button disabled={isGenerating}>Send</button>\n * </form>\n * </div>\n * );\n * }\n * ```\n */\nexport function useChat(options: UseChatOptions = {}): UseChatReturn {\n // Lazy import React to avoid SSR issues\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useChat requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n initialMessages = [],\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [messages, setMessages] = useState<Message[]>(initialMessages);\n const [input, setInput] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [thinking, setThinking] = useState<string>(\"\");\n const [currentResponse, setCurrentResponse] = useState<string>(\"\");\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n const [attachedImages, setAttachedImages] = useState<string[]>([]);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const messageIdRef = useRef<number>(0);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on generate\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n // Initialize worker\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCurrentResponse((r: string) => r + token.text);\n }\n },\n onComplete: () => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n // Commit response to messages when generation completes\n useEffect(() => {\n if (!isGenerating && currentResponse) {\n setMessages((msgs: Message[]) => {\n const lastMsg = msgs.at(-1);\n if (lastMsg?.role === \"assistant\") {\n return msgs.map((m: Message, i: number) =>\n i === msgs.length - 1\n ? { ...m, content: currentResponse, thinking: thinking || undefined }\n : m,\n );\n }\n return msgs;\n });\n setCurrentResponse(\"\");\n setThinking(\"\");\n }\n }, [isGenerating, currentResponse, thinking]);\n\n // Store pending message for auto-load scenario\n const pendingMessageRef = useRef<string | null>(null);\n const pendingImagesRef = useRef<string[]>([]);\n\n // Image management functions\n const attachImage = useCallback((imageUrl: string) => {\n setAttachedImages((imgs: string[]) => [...imgs, imageUrl]);\n }, []);\n\n const removeImage = useCallback((index: number) => {\n setAttachedImages((imgs: string[]) => imgs.filter((_: string, i: number) => i !== index));\n }, []);\n\n const clearImages = useCallback(() => {\n setAttachedImages([]);\n }, []);\n\n // Internal function to send a message with specific images\n const sendMessageWithImages = useCallback(\n (text: string, images: string[]) => {\n if (!text.trim() || isGenerating) {\n return;\n }\n\n messageIdRef.current += 1;\n const userMessage: Message = {\n id: `msg-${messageIdRef.current}`,\n role: \"user\",\n content: text.trim(),\n images: images.length > 0 ? images : undefined,\n };\n\n messageIdRef.current += 1;\n const assistantMessage: Message = {\n id: `msg-${messageIdRef.current}`,\n role: \"assistant\",\n content: \"\",\n };\n\n setMessages((msgs: Message[]) => [...msgs, userMessage, assistantMessage]);\n setCurrentResponse(\"\");\n setThinking(\"\");\n\n // If worker not loaded, trigger load and queue the message\n if (!workerRef.current) {\n pendingMessageRef.current = text.trim();\n pendingImagesRef.current = images;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(text.trim(), {\n system,\n thinking: enableThinking,\n maxTokens: images.length > 0 ? Math.max(maxTokens, 2048) : maxTokens,\n temperature,\n images: images.length > 0 ? images : undefined,\n });\n },\n [isGenerating, system, enableThinking, maxTokens, temperature, load],\n );\n\n const handleSubmit = useCallback(\n (e?: { preventDefault?: () => void }) => {\n e?.preventDefault?.();\n\n if (!input.trim() || isGenerating) {\n return;\n }\n\n // Send with any attached images\n sendMessageWithImages(input, attachedImages);\n setInput(\"\");\n setAttachedImages([]);\n },\n [input, isGenerating, attachedImages, sendMessageWithImages],\n );\n\n // Convenience method to send with specific images\n const sendWithImages = useCallback(\n (text: string, images: string[]) => {\n sendMessageWithImages(text, images);\n },\n [sendMessageWithImages],\n );\n\n // Process pending message when worker becomes ready\n useEffect(() => {\n if (isReady && pendingMessageRef.current && workerRef.current) {\n const pendingContent = pendingMessageRef.current;\n const pendingImages = pendingImagesRef.current;\n pendingMessageRef.current = null;\n pendingImagesRef.current = [];\n setIsGenerating(true);\n workerRef.current.generate(pendingContent, {\n system,\n thinking: enableThinking,\n maxTokens: pendingImages.length > 0 ? Math.max(maxTokens, 2048) : maxTokens,\n temperature,\n images: pendingImages.length > 0 ? pendingImages : undefined,\n });\n }\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n const clear = useCallback(() => {\n workerRef.current?.reset();\n setMessages([]);\n setCurrentResponse(\"\");\n setThinking(\"\");\n setAttachedImages([]);\n }, []);\n\n // Update last message with streaming content\n const displayMessages = messages.map((m: Message, i: number) => {\n if (i === messages.length - 1 && m.role === \"assistant\" && isGenerating) {\n return { ...m, content: currentResponse, thinking: thinking || undefined };\n }\n return m;\n });\n\n return {\n messages: displayMessages,\n input,\n setInput,\n handleSubmit,\n isLoading,\n loadingProgress,\n isGenerating,\n thinking,\n stop,\n clear,\n tps,\n isReady,\n error,\n load,\n attachedImages,\n attachImage,\n removeImage,\n clearImages,\n sendWithImages,\n };\n}\n\n/** Options for useCompletion hook */\nexport type UseCompletionOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Auto-load model on mount (default: false - loads on first complete() or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Options for single completion call */\nexport type CompleteOptions = {\n /** Image URLs or data URIs to analyze (for vision models) */\n images?: string[];\n};\n\n/** Return type for useCompletion hook */\nexport type UseCompletionReturn = {\n /** Generated completion */\n completion: string;\n /** Thinking content (if enabled) */\n thinking: string;\n /** Generate completion (optionally with images for vision models) */\n complete: (prompt: string, options?: CompleteOptions) => Promise<string>;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating */\n isGenerating: boolean;\n /** Stop generation */\n stop: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n};\n\n/**\n * React hook for text completion with local LLM\n *\n * @example\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading, isGenerating } = useCompletion();\n *\n * if (isLoading) return <div>Loading...</div>;\n *\n * return (\n * <div>\n * <button onClick={() => complete(\"Write a haiku\")}>Generate</button>\n * <p>{completion}</p>\n * </div>\n * );\n * }\n * ```\n */\nexport function useCompletion(options: UseCompletionOptions = {}): UseCompletionReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useCompletion requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [completion, setCompletion] = useState<string>(\"\");\n const [thinking, setThinking] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const resolveRef = useRef<((text: string) => void) | null>(null);\n const rejectRef = useRef<((err: Error) => void) | null>(null);\n const pendingPromptRef = useRef<string | null>(null);\n const pendingImagesRef = useRef<string[] | undefined>(undefined);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on complete()\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCompletion((c: string) => c + token.text);\n }\n },\n onComplete: (result) => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n resolveRef.current?.(result.text);\n resolveRef.current = null;\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n const complete = useCallback(\n (prompt: string, completeOptions?: CompleteOptions): Promise<string> => {\n return new Promise((resolve, reject) => {\n setCompletion(\"\");\n setThinking(\"\");\n resolveRef.current = resolve;\n rejectRef.current = reject;\n\n // If worker not loaded, trigger load and queue the prompt\n if (!workerRef.current) {\n pendingPromptRef.current = prompt;\n pendingImagesRef.current = completeOptions?.images;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(prompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n images: completeOptions?.images,\n });\n });\n },\n [system, enableThinking, maxTokens, temperature, load],\n );\n\n // Process pending prompt when worker becomes ready\n useEffect(() => {\n if (isReady && pendingPromptRef.current && workerRef.current) {\n const pendingPrompt = pendingPromptRef.current;\n const pendingImages = pendingImagesRef.current;\n pendingPromptRef.current = null;\n pendingImagesRef.current = undefined;\n setIsGenerating(true);\n workerRef.current.generate(pendingPrompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n images: pendingImages,\n });\n }\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n return {\n completion,\n thinking,\n complete,\n isLoading,\n loadingProgress,\n isGenerating,\n stop,\n tps,\n isReady,\n error,\n load,\n };\n}\n\n// ============================================\n// Text-to-Speech (useSpeech hook)\n// ============================================\n\n/** TTS loading progress */\nexport type TTSProgress = {\n status: \"idle\" | \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n error?: string;\n};\n\n/** Available TTS models */\nexport type TTSModelId = \"kokoro-82m\" | \"supertonic-66m\";\n\n/** Voice info for TTS models */\nexport type BrowserVoiceInfo = {\n id: string;\n name: string;\n gender: \"male\" | \"female\";\n language: string;\n description: string;\n};\n\n/** Kokoro voice definitions (24kHz, high quality) */\nconst KOKORO_BROWSER_VOICES: BrowserVoiceInfo[] = [\n {\n id: \"af_heart\",\n name: \"Heart\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, highest quality (Grade A)\",\n },\n {\n id: \"af_bella\",\n name: \"Bella\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, warm and friendly (Grade A-)\",\n },\n {\n id: \"af_nicole\",\n name: \"Nicole\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, soft and gentle\",\n },\n {\n id: \"af_sarah\",\n name: \"Sarah\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, clear and professional\",\n },\n {\n id: \"af_sky\",\n name: \"Sky\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, young and energetic\",\n },\n {\n id: \"af_alloy\",\n name: \"Alloy\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_aoede\",\n name: \"Aoede\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, mythical\",\n },\n {\n id: \"af_jessica\",\n name: \"Jessica\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_kore\",\n name: \"Kore\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_nova\",\n name: \"Nova\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_river\",\n name: \"River\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"am_fenrir\",\n name: \"Fenrir\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, best quality\",\n },\n {\n id: \"am_michael\",\n name: \"Michael\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, warm and friendly\",\n },\n { id: \"am_adam\", name: \"Adam\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_echo\", name: \"Echo\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_eric\", name: \"Eric\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_liam\", name: \"Liam\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_onyx\", name: \"Onyx\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_puck\", name: \"Puck\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n {\n id: \"am_santa\",\n name: \"Santa\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, festive\",\n },\n {\n id: \"bf_emma\",\n name: \"Emma\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, elegant and clear\",\n },\n {\n id: \"bf_isabella\",\n name: \"Isabella\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, sophisticated\",\n },\n {\n id: \"bf_alice\",\n name: \"Alice\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female\",\n },\n {\n id: \"bf_lily\",\n name: \"Lily\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female\",\n },\n {\n id: \"bm_george\",\n name: \"George\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, distinguished\",\n },\n {\n id: \"bm_lewis\",\n name: \"Lewis\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, friendly\",\n },\n {\n id: \"bm_daniel\",\n name: \"Daniel\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male\",\n },\n { id: \"bm_fable\", name: \"Fable\", gender: \"male\", language: \"en-gb\", description: \"British male\" },\n];\n\n/** Supertonic voice definitions (44.1kHz, faster) */\nconst SUPERTONIC_BROWSER_VOICES: BrowserVoiceInfo[] = [\n {\n id: \"F1\",\n name: \"Female 1\",\n gender: \"female\",\n language: \"en\",\n description: \"Female voice 1 - Clear and natural\",\n },\n {\n id: \"F2\",\n name: \"Female 2\",\n gender: \"female\",\n language: \"en\",\n description: \"Female voice 2 - Warm and expressive\",\n },\n {\n id: \"M1\",\n name: \"Male 1\",\n gender: \"male\",\n language: \"en\",\n description: \"Male voice 1 - Deep and confident\",\n },\n {\n id: \"M2\",\n name: \"Male 2\",\n gender: \"male\",\n language: \"en\",\n description: \"Male voice 2 - Friendly and casual\",\n },\n];\n\n/** TTS model configuration */\nconst TTS_MODELS: Record<\n TTSModelId,\n { repo: string; defaultVoice: string; sampleRate: number; voices: BrowserVoiceInfo[] }\n> = {\n \"kokoro-82m\": {\n repo: \"onnx-community/Kokoro-82M-v1.0-ONNX\",\n defaultVoice: \"af_heart\",\n sampleRate: 24000,\n voices: KOKORO_BROWSER_VOICES,\n },\n \"supertonic-66m\": {\n repo: \"onnx-community/Supertonic-TTS-ONNX\",\n defaultVoice: \"F1\",\n sampleRate: 44100,\n voices: SUPERTONIC_BROWSER_VOICES,\n },\n};\n\n/** Options for useSpeech hook */\nexport type UseSpeechOptions = {\n /** TTS model to use (default: \"kokoro-82m\") */\n model?: TTSModelId;\n /** Default voice ID (default: model's default voice) */\n voice?: string;\n /** Speech speed multiplier (default: 1.0) */\n speed?: number;\n /** Auto-load TTS model on mount (default: false) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n /** Called when speech starts */\n onStart?: () => void;\n /** Called when speech ends */\n onEnd?: () => void;\n};\n\n/** Return type for useSpeech hook */\nexport type UseSpeechReturn = {\n /** Speak text aloud */\n speak: (text: string, options?: { voice?: string; speed?: number }) => Promise<void>;\n /** Stop current speech */\n stop: () => void;\n /** Whether TTS model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: TTSProgress | null;\n /** Whether currently speaking */\n isSpeaking: boolean;\n /** Whether TTS model is ready */\n isReady: boolean;\n /** Load the TTS model */\n load: () => void;\n /** Error message if any */\n error: string | null;\n /** List available voices for current model */\n listVoices: () => BrowserVoiceInfo[];\n /** Current voice ID */\n currentVoice: string;\n /** Set current voice */\n setVoice: (voiceId: string) => void;\n /** Current speed */\n currentSpeed: number;\n /** Set speed */\n setSpeed: (speed: number) => void;\n /** Current TTS model ID */\n currentModel: TTSModelId;\n /** Sample rate for current model (24000 for Kokoro, 44100 for Supertonic) */\n sampleRate: number;\n};\n\n/**\n * React hook for text-to-speech with Web Audio API playback\n *\n * Supports both Kokoro (24kHz, high quality) and Supertonic (44.1kHz, faster).\n *\n * @example\n * ```tsx\n * import { useSpeech } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * // Default: Kokoro TTS\n * const { speak, stop, isLoading, isSpeaking, listVoices, setVoice } = useSpeech();\n *\n * // Or use Supertonic (44.1kHz, faster)\n * // const { speak, listVoices } = useSpeech({ model: \"supertonic-66m\" });\n *\n * if (isLoading) return <div>Loading TTS...</div>;\n *\n * return (\n * <div>\n * <select onChange={e => setVoice(e.target.value)}>\n * {listVoices().map(v => (\n * <option key={v.id} value={v.id}>{v.name}</option>\n * ))}\n * </select>\n * <button onClick={() => speak(\"Hello world!\")}>\n * {isSpeaking ? \"Speaking...\" : \"Speak\"}\n * </button>\n * {isSpeaking && <button onClick={stop}>Stop</button>}\n * </div>\n * );\n * }\n * ```\n */\nexport function useSpeech(options: UseSpeechOptions = {}): UseSpeechReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useSpeech requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model: modelId = \"kokoro-82m\",\n speed: defaultSpeed = 1.0,\n autoLoad = false,\n onReady,\n onError,\n onStart,\n onEnd,\n } = options;\n\n // Get model config\n const modelConfig = TTS_MODELS[modelId];\n const defaultVoice = options.voice || modelConfig.defaultVoice;\n\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<TTSProgress | null>(null);\n const [isSpeaking, setIsSpeaking] = useState<boolean>(false);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n const [currentVoice, setCurrentVoice] = useState<string>(defaultVoice);\n const [currentSpeed, setCurrentSpeed] = useState<number>(defaultSpeed);\n\n const ttsRef = useRef<any>(null);\n const voiceEmbeddingsRef = useRef<Map<string, Float32Array>>(new Map());\n const audioContextRef = useRef<AudioContext | null>(null);\n const sourceNodeRef = useRef<AudioBufferSourceNode | null>(null);\n const mountedRef = useRef<boolean>(true);\n const modelIdRef = useRef<TTSModelId>(modelId);\n\n // Voice list based on selected model\n const listVoices = useCallback((): BrowserVoiceInfo[] => {\n return modelConfig.voices;\n }, [modelConfig.voices]);\n\n // Load function\n const load = useCallback(() => {\n if (ttsRef.current || isLoading) return;\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n // Initialize TTS based on model\n useEffect(() => {\n if (!shouldLoad) return;\n\n mountedRef.current = true;\n modelIdRef.current = modelId;\n\n const initTTS = async () => {\n try {\n const isSupertonic = modelId === \"supertonic-66m\";\n const config = TTS_MODELS[modelId];\n\n setLoadingProgress({\n status: \"loading\",\n message: `Loading ${isSupertonic ? \"Supertonic\" : \"Kokoro\"} TTS...`,\n });\n\n if (isSupertonic) {\n // Load Supertonic using transformers.js pipeline\n const { pipeline } = await import(\"@huggingface/transformers\");\n\n const tts = await pipeline(\"text-to-speech\", config.repo, {\n device: \"webgpu\",\n progress_callback: (progress: any) => {\n if (!mountedRef.current) return;\n if (progress.status === \"progress\" && progress.file) {\n setLoadingProgress({\n status: \"downloading\",\n file: progress.file,\n progress: Math.round(progress.progress || 0),\n });\n }\n },\n });\n\n if (!mountedRef.current) return;\n\n // Load speaker embeddings from the voices folder\n const voicesUrl = `https://huggingface.co/${config.repo}/resolve/main/voices/`;\n const embeddingsMap = new Map<string, Float32Array>();\n\n // Load all voice embeddings\n await Promise.all(\n config.voices.map(async (voice) => {\n try {\n const response = await fetch(`${voicesUrl}${voice.id}.bin`);\n if (response.ok) {\n const buffer = await response.arrayBuffer();\n embeddingsMap.set(voice.id, new Float32Array(buffer));\n }\n } catch (e) {\n console.warn(`Failed to load voice embedding for ${voice.id}:`, e);\n }\n }),\n );\n\n if (!mountedRef.current) return;\n\n // Warmup the model with a dummy embedding\n try {\n await tts(\"Hello\", {\n speaker_embeddings: new Float32Array(1 * 101 * 128),\n num_inference_steps: 1,\n speed: 1.0,\n });\n } catch (e) {\n console.warn(\"Supertonic warmup failed:\", e);\n }\n\n voiceEmbeddingsRef.current = embeddingsMap;\n ttsRef.current = { type: \"supertonic\", pipeline: tts, config };\n } else {\n // Load Kokoro using kokoro-js\n const kokoroModule = await import(\"kokoro-js\");\n const { KokoroTTS } = kokoroModule;\n\n const tts = await KokoroTTS.from_pretrained(config.repo, {\n dtype: \"fp32\",\n progress_callback: (progress: any) => {\n if (!mountedRef.current) return;\n if (progress.status === \"progress\" && progress.file) {\n setLoadingProgress({\n status: \"downloading\",\n file: progress.file,\n progress: Math.round(progress.progress || 0),\n });\n }\n },\n });\n\n if (!mountedRef.current) return;\n\n ttsRef.current = { type: \"kokoro\", instance: tts, config };\n }\n\n setIsLoading(false);\n setIsReady(true);\n setLoadingProgress({ status: \"ready\" });\n onReady?.();\n } catch (err) {\n if (!mountedRef.current) return;\n const errorMsg = err instanceof Error ? err.message : String(err);\n setError(errorMsg);\n setIsLoading(false);\n setLoadingProgress({ status: \"error\", error: errorMsg });\n onError?.(errorMsg);\n }\n };\n\n initTTS();\n\n return () => {\n mountedRef.current = false;\n };\n }, [shouldLoad, modelId, onReady, onError]);\n\n // Cleanup AudioContext only on unmount (not on re-renders)\n useEffect(() => {\n return () => {\n try {\n sourceNodeRef.current?.stop();\n } catch {\n // Ignore if already stopped\n }\n try {\n if (audioContextRef.current && audioContextRef.current.state !== \"closed\") {\n audioContextRef.current.close();\n }\n } catch {\n // Ignore if already closed\n }\n };\n }, []);\n\n // Speak function with Web Audio API playback\n const speak = useCallback(\n async (text: string, opts?: { voice?: string; speed?: number }) => {\n const voice = opts?.voice || currentVoice;\n const speed = opts?.speed || currentSpeed;\n\n // Auto-load if not loaded\n if (!ttsRef.current) {\n load();\n // Queue speak for after load\n return;\n }\n\n try {\n setIsSpeaking(true);\n onStart?.();\n\n let audioData: Float32Array;\n let sampleRate: number;\n\n const ttsBackend = ttsRef.current;\n\n if (ttsBackend.type === \"supertonic\") {\n // Supertonic: use transformers.js pipeline with speaker embeddings\n const config = ttsBackend.config;\n\n // Validate voice\n const voiceInfo = config.voices.find((v: BrowserVoiceInfo) => v.id === voice);\n if (!voiceInfo) {\n const validVoices = config.voices.map((v: BrowserVoiceInfo) => v.id).join(\", \");\n throw new Error(`Voice \"${voice}\" not found. Should be one of: ${validVoices}.`);\n }\n\n // Get or load voice embedding (101x128 = 12,928 floats)\n let speakerEmbedding = voiceEmbeddingsRef.current.get(voice);\n if (!speakerEmbedding) {\n try {\n const voiceUrl = `https://huggingface.co/${config.repo}/resolve/main/voices/${voice}.bin`;\n const response = await fetch(voiceUrl);\n if (response.ok) {\n const buffer = await response.arrayBuffer();\n speakerEmbedding = new Float32Array(buffer);\n voiceEmbeddingsRef.current.set(voice, speakerEmbedding);\n } else {\n throw new Error(`Failed to load voice: ${response.status}`);\n }\n } catch {\n // Fallback: create neutral embedding\n speakerEmbedding = new Float32Array(101 * 128).fill(0.1);\n voiceEmbeddingsRef.current.set(voice, speakerEmbedding);\n }\n }\n\n // Generate audio\n const result = await ttsBackend.pipeline(text, {\n speaker_embeddings: speakerEmbedding,\n speed: speed,\n });\n audioData = result.audio as Float32Array;\n sampleRate = result.sampling_rate as number;\n } else {\n // Kokoro: use kokoro-js generate\n const config = ttsBackend.config;\n\n // Validate voice\n const voiceInfo = config.voices.find((v: BrowserVoiceInfo) => v.id === voice);\n if (!voiceInfo) {\n const validVoices = config.voices.map((v: BrowserVoiceInfo) => v.id).join(\", \");\n throw new Error(`Voice \"${voice}\" not found. Should be one of: ${validVoices}.`);\n }\n\n const result = await ttsBackend.instance.generate(text, { voice, speed });\n audioData = result.audio as Float32Array;\n sampleRate = result.sampling_rate as number;\n }\n\n if (!mountedRef.current) return;\n\n // Create or recreate AudioContext if needed\n if (!audioContextRef.current || audioContextRef.current.state === \"closed\") {\n audioContextRef.current = new AudioContext();\n }\n\n const audioContext = audioContextRef.current;\n\n // Resume context if suspended (browser autoplay policy)\n if (audioContext.state === \"suspended\") {\n await audioContext.resume();\n }\n\n // Create audio buffer (ensure we have a proper ArrayBuffer-backed Float32Array)\n const audioBuffer = audioContext.createBuffer(1, audioData.length, sampleRate);\n const channelData = new Float32Array(audioData);\n audioBuffer.copyToChannel(channelData, 0);\n\n // Stop any current playback\n if (sourceNodeRef.current) {\n sourceNodeRef.current.stop();\n sourceNodeRef.current.disconnect();\n }\n\n // Create and play source node\n const sourceNode = audioContext.createBufferSource();\n sourceNode.buffer = audioBuffer;\n sourceNode.connect(audioContext.destination);\n\n sourceNode.onended = () => {\n if (mountedRef.current) {\n setIsSpeaking(false);\n onEnd?.();\n }\n };\n\n sourceNodeRef.current = sourceNode;\n sourceNode.start();\n } catch (err) {\n if (!mountedRef.current) return;\n const errorMsg = err instanceof Error ? err.message : String(err);\n setError(errorMsg);\n setIsSpeaking(false);\n onError?.(errorMsg);\n }\n },\n [currentVoice, currentSpeed, load, onStart, onEnd, onError],\n );\n\n // Stop function\n const stop = useCallback(() => {\n if (sourceNodeRef.current) {\n sourceNodeRef.current.stop();\n sourceNodeRef.current.disconnect();\n sourceNodeRef.current = null;\n }\n setIsSpeaking(false);\n }, []);\n\n // Voice setter with validation\n const setVoice = useCallback(\n (voiceId: string) => {\n const voiceInfo = modelConfig.voices.find((v) => v.id === voiceId);\n if (voiceInfo) {\n setCurrentVoice(voiceId);\n } else {\n console.warn(\n `Voice \"${voiceId}\" not valid for ${modelId}. Available: ${modelConfig.voices.map((v) => v.id).join(\", \")}`,\n );\n }\n },\n [modelConfig.voices, modelId],\n );\n\n // Speed setter\n const setSpeed = useCallback((speed: number) => {\n setCurrentSpeed(Math.max(0.5, Math.min(2.0, speed)));\n }, []);\n\n return {\n speak,\n stop,\n isLoading,\n loadingProgress,\n isSpeaking,\n isReady,\n load,\n error,\n listVoices,\n currentVoice,\n setVoice,\n currentSpeed,\n setSpeed,\n currentModel: modelId,\n sampleRate: modelConfig.sampleRate,\n };\n}\n\n// ============================================\n// Audio Playback Utilities\n// ============================================\n\n/**\n * Play audio from Float32Array using Web Audio API\n *\n * @example\n * ```ts\n * import { playAudio } from \"@tryhamster/gerbil/browser\";\n *\n * const audio = new Float32Array([...]); // TTS output\n * const controller = await playAudio(audio, 24000);\n *\n * // Stop playback\n * controller.stop();\n * ```\n */\nexport async function playAudio(\n audio: Float32Array,\n sampleRate: number = 24000,\n): Promise<{ stop: () => void; onEnded: Promise<void> }> {\n const audioContext = new AudioContext();\n\n // Resume if suspended\n if (audioContext.state === \"suspended\") {\n await audioContext.resume();\n }\n\n const audioBuffer = audioContext.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n audioBuffer.copyToChannel(channelData, 0);\n\n const sourceNode = audioContext.createBufferSource();\n sourceNode.buffer = audioBuffer;\n sourceNode.connect(audioContext.destination);\n\n const onEnded = new Promise<void>((resolve) => {\n sourceNode.onended = () => {\n audioContext.close();\n resolve();\n };\n });\n\n sourceNode.start();\n\n return {\n stop: () => {\n sourceNode.stop();\n audioContext.close();\n },\n onEnded,\n };\n}\n\n/**\n * Create a reusable audio player for streaming TTS\n *\n * @example\n * ```ts\n * import { createAudioPlayer } from \"@tryhamster/gerbil/browser\";\n *\n * const player = createAudioPlayer(24000);\n *\n * // Queue audio chunks as they arrive\n * player.queue(chunk1);\n * player.queue(chunk2);\n *\n * // Stop and clear\n * player.stop();\n * ```\n */\nexport function createAudioPlayer(sampleRate: number = 24000): {\n queue: (audio: Float32Array) => void;\n stop: () => void;\n isPlaying: () => boolean;\n} {\n let audioContext: AudioContext | null = null;\n let nextStartTime = 0;\n let isActive = false;\n\n const ensureContext = async () => {\n if (!audioContext) {\n audioContext = new AudioContext();\n }\n if (audioContext.state === \"suspended\") {\n await audioContext.resume();\n }\n return audioContext;\n };\n\n return {\n queue: async (audio: Float32Array) => {\n const ctx = await ensureContext();\n isActive = true;\n\n const buffer = ctx.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n buffer.copyToChannel(channelData, 0);\n\n const source = ctx.createBufferSource();\n source.buffer = buffer;\n source.connect(ctx.destination);\n\n // Schedule seamlessly after previous chunk\n const startTime = Math.max(ctx.currentTime, nextStartTime);\n source.start(startTime);\n nextStartTime = startTime + buffer.duration;\n\n source.onended = () => {\n if (ctx.currentTime >= nextStartTime - 0.1) {\n isActive = false;\n }\n };\n },\n\n stop: () => {\n isActive = false;\n nextStartTime = 0;\n if (audioContext) {\n audioContext.close();\n audioContext = null;\n }\n },\n\n isPlaying: () => isActive,\n };\n}\n\n// ============================================\n// Voice Input Hook (STT)\n// ============================================\n\n/**\n * Progress info for STT loading\n */\nexport type STTProgress = {\n status: \"downloading\" | \"loading\" | \"ready\" | \"error\";\n message?: string;\n progress?: number;\n file?: string;\n};\n\n/**\n * Options for useVoiceInput hook\n */\nexport type UseVoiceInputOptions = {\n /** STT model ID (default: whisper-tiny.en) */\n model?: string;\n /** Auto-load model on mount (default: false) */\n autoLoad?: boolean;\n /** Callback when model is ready */\n onReady?: () => void;\n /** Callback when transcription completes (or for each chunk in streaming mode) */\n onTranscript?: (text: string) => void;\n /** Callback on error */\n onError?: (error: string) => void;\n /** Callback during loading */\n onProgress?: (progress: STTProgress) => void;\n /** Enable streaming transcription - transcribes audio in chunks as you speak */\n streaming?: boolean;\n /** Chunk duration in ms for streaming mode (default: 3000 = 3 seconds) */\n chunkDuration?: number;\n /** Callback for each streaming chunk with partial transcript */\n onChunk?: (text: string, chunkIndex: number) => void;\n};\n\n/**\n * Return type for useVoiceInput hook\n */\nexport type UseVoiceInputReturn = {\n /** Start recording audio */\n startRecording: () => Promise<void>;\n /** Stop recording and transcribe */\n stopRecording: () => Promise<string>;\n /** Cancel recording without transcribing */\n cancelRecording: () => void;\n /** Transcribe raw audio data (Float32Array at 16kHz) */\n transcribe: (audio: Float32Array) => Promise<string>;\n /** Whether currently recording */\n isRecording: boolean;\n /** Whether transcribing */\n isTranscribing: boolean;\n /** Whether model is loading */\n isLoading: boolean;\n /** Whether model is ready */\n isReady: boolean;\n /** Latest transcription result (full transcript in streaming mode) */\n transcript: string;\n /** Current streaming chunk being transcribed (streaming mode only) */\n streamingChunk: string;\n /** Number of chunks transcribed so far (streaming mode only) */\n chunkCount: number;\n /** Loading progress */\n loadingProgress: STTProgress | null;\n /** Error message */\n error: string | null;\n /** Manually load the model */\n load: () => void;\n};\n\n/**\n * React hook for voice input with browser microphone\n *\n * Uses MediaRecorder to capture audio and Whisper for transcription.\n * Supports both one-shot and streaming transcription modes.\n *\n * @example Basic usage (one-shot)\n * ```tsx\n * function VoiceInput() {\n * const { startRecording, stopRecording, isRecording, transcript } = useVoiceInput({\n * onTranscript: (text) => console.log(\"User said:\", text),\n * });\n *\n * return (\n * <button onClick={isRecording ? stopRecording : startRecording}>\n * {isRecording ? \"Stop\" : \"Record\"}\n * </button>\n * );\n * }\n * ```\n *\n * @example Streaming transcription (real-time)\n * ```tsx\n * function LiveTranscription() {\n * const { startRecording, stopRecording, isRecording, transcript, streamingChunk } = useVoiceInput({\n * streaming: true, // Enable streaming mode\n * chunkDuration: 1500, // Transcribe every 1.5 seconds (default)\n * onChunk: (text, idx) => console.log(`Chunk ${idx}: ${text}`),\n * });\n *\n * return (\n * <div>\n * <button onClick={isRecording ? stopRecording : startRecording}>\n * {isRecording ? \"Stop\" : \"Start Live Transcription\"}\n * </button>\n * <p>Current chunk: {streamingChunk}</p>\n * <p>Full transcript: {transcript}</p>\n * </div>\n * );\n * }\n * ```\n */\nexport function useVoiceInput(options: UseVoiceInputOptions = {}): UseVoiceInputReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useVoiceInput requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"whisper-tiny.en\",\n autoLoad = false,\n onReady,\n onTranscript,\n onError,\n onProgress,\n streaming = false,\n chunkDuration = 1500, // Transcribe every 1.5 seconds for near real-time\n onChunk,\n } = options;\n\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<STTProgress | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [isRecording, setIsRecording] = useState<boolean>(false);\n const [isTranscribing, setIsTranscribing] = useState<boolean>(false);\n const [transcript, setTranscript] = useState<string>(\"\");\n const [streamingChunk, setStreamingChunk] = useState<string>(\"\");\n const [chunkCount, setChunkCount] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const sttRef = useRef<any>(null);\n const mediaRecorderRef = useRef<MediaRecorder | null>(null);\n const audioChunksRef = useRef<Blob[]>([]);\n const streamRef = useRef<MediaStream | null>(null);\n const mountedRef = useRef<boolean>(true);\n const streamingIntervalRef = useRef<ReturnType<typeof setInterval> | null>(null);\n const pendingChunksRef = useRef<Blob[]>([]);\n const fullTranscriptRef = useRef<string>(\"\");\n\n // Load the STT model\n useEffect(() => {\n if (!shouldLoad || isReady) return;\n\n let cancelled = false;\n\n const loadModel = async () => {\n try {\n setIsLoading(true);\n setLoadingProgress({ status: \"loading\", message: \"Loading STT model...\" });\n onProgress?.({ status: \"loading\", message: \"Loading STT model...\" });\n\n // Dynamic import to avoid bundling when not used\n const { WhisperSTT } = await import(\"../core/stt.js\");\n\n if (cancelled || !mountedRef.current) return;\n\n const stt = new WhisperSTT(model);\n await stt.load({\n onProgress: (p: any) => {\n if (!mountedRef.current) return;\n const progress: STTProgress = {\n status: p.progress !== undefined ? \"downloading\" : \"loading\",\n message: p.status,\n progress: p.progress,\n file: p.file,\n };\n setLoadingProgress(progress);\n onProgress?.(progress);\n },\n });\n\n if (cancelled || !mountedRef.current) {\n stt.dispose();\n return;\n }\n\n sttRef.current = stt;\n setIsReady(true);\n setIsLoading(false);\n setLoadingProgress({ status: \"ready\" });\n onProgress?.({ status: \"ready\" });\n onReady?.();\n } catch (e: any) {\n if (!mountedRef.current) return;\n const errMsg = e.message || \"Failed to load STT model\";\n setError(errMsg);\n setIsLoading(false);\n setLoadingProgress({ status: \"error\", message: errMsg });\n onProgress?.({ status: \"error\", message: errMsg });\n onError?.(errMsg);\n }\n };\n\n loadModel();\n\n return () => {\n cancelled = true;\n };\n }, [shouldLoad, isReady, model, onReady, onError, onProgress]);\n\n // Cleanup on unmount\n useEffect(() => {\n mountedRef.current = true;\n return () => {\n mountedRef.current = false;\n if (sttRef.current) {\n sttRef.current.dispose();\n }\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n }\n };\n }, []);\n\n // Manual load trigger\n const load = useCallback(() => {\n if (!shouldLoad && !isReady && !isLoading) {\n setShouldLoad(true);\n }\n }, [shouldLoad, isReady, isLoading]);\n\n // Convert audio blob to Float32Array at 16kHz\n const blobToFloat32 = useCallback(async (blob: Blob): Promise<Float32Array> => {\n const audioContext = new AudioContext({ sampleRate: 16000 });\n const arrayBuffer = await blob.arrayBuffer();\n const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);\n\n // Get mono channel\n const channelData = audioBuffer.getChannelData(0);\n\n // Resample if needed\n if (audioBuffer.sampleRate !== 16000) {\n const ratio = 16000 / audioBuffer.sampleRate;\n const newLength = Math.round(channelData.length * ratio);\n const resampled = new Float32Array(newLength);\n for (let i = 0; i < newLength; i++) {\n const srcIndex = i / ratio;\n const floor = Math.floor(srcIndex);\n const ceil = Math.min(floor + 1, channelData.length - 1);\n const t = srcIndex - floor;\n resampled[i] = channelData[floor] * (1 - t) + channelData[ceil] * t;\n }\n audioContext.close();\n return resampled;\n }\n\n audioContext.close();\n return new Float32Array(channelData);\n }, []);\n\n // Transcribe audio\n const transcribe = useCallback(\n async (audio: Float32Array): Promise<string> => {\n if (!sttRef.current) {\n if (!shouldLoad) {\n setShouldLoad(true);\n throw new Error(\"STT model not loaded. Loading now, please try again.\");\n }\n throw new Error(\"STT model not loaded\");\n }\n\n setIsTranscribing(true);\n try {\n const result = await sttRef.current.transcribe(audio);\n let text = result.text.trim();\n // Filter out Whisper artifacts\n if (text === \"[BLANK_AUDIO]\" || text === \"(blank audio)\" || text === \"[BLANK AUDIO]\") {\n text = \"\";\n }\n setTranscript(text);\n onTranscript?.(text);\n return text;\n } finally {\n if (mountedRef.current) {\n setIsTranscribing(false);\n }\n }\n },\n [shouldLoad, onTranscript],\n );\n\n // Track how many samples we've processed for streaming\n const processedSamplesRef = useRef<number>(0);\n\n // Transcribe a chunk of audio (for streaming mode)\n // Uses audioChunksRef (all chunks) to ensure valid WebM container\n const transcribeChunk = useCallback(\n async (chunkIdx: number): Promise<string> => {\n if (!sttRef.current || audioChunksRef.current.length === 0) return \"\";\n\n try {\n // Create blob from ALL chunks (needed for valid WebM header)\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n const audioData = await blobToFloat32(audioBlob);\n\n // Calculate new samples since last transcription\n const newSamplesStart = processedSamplesRef.current;\n const totalSamples = audioData.length;\n\n // Skip if no new audio (< 0.5 seconds = 8000 samples at 16kHz)\n if (totalSamples - newSamplesStart < 8000) return \"\";\n\n // Extract only the new portion of audio\n const newAudio = audioData.slice(newSamplesStart);\n\n // Update processed count\n processedSamplesRef.current = totalSamples;\n\n const result = await sttRef.current.transcribe(newAudio);\n let text = result.text.trim();\n\n // Filter out Whisper artifacts\n if (text === \"[BLANK_AUDIO]\" || text === \"(blank audio)\" || text === \"[BLANK AUDIO]\") {\n text = \"\";\n }\n\n if (text && mountedRef.current) {\n setStreamingChunk(text);\n onChunk?.(text, chunkIdx);\n }\n\n return text;\n } catch {\n return \"\";\n }\n },\n [blobToFloat32, onChunk],\n );\n\n // Start recording\n const startRecording = useCallback(async () => {\n if (isRecording) return;\n\n try {\n // For streaming mode, ensure STT model is loaded first\n if (streaming && !sttRef.current) {\n if (!shouldLoad) {\n setShouldLoad(true);\n }\n // Wait for model to load\n setIsLoading(true);\n const { WhisperSTT } = await import(\"../core/stt.js\");\n const stt = new WhisperSTT(model);\n await stt.load({\n onProgress: (p: any) => {\n if (mountedRef.current) {\n const progress: STTProgress = {\n status:\n p.status === \"downloading\"\n ? \"downloading\"\n : p.status === \"ready\"\n ? \"ready\"\n : \"loading\",\n message: p.status,\n progress: p.progress,\n file: p.file,\n };\n setLoadingProgress(progress);\n onProgress?.(progress);\n }\n },\n });\n if (!mountedRef.current) {\n stt.dispose();\n return;\n }\n sttRef.current = stt;\n setIsReady(true);\n setIsLoading(false);\n setLoadingProgress({ status: \"ready\" });\n onProgress?.({ status: \"ready\" });\n onReady?.();\n }\n\n // Request microphone permission\n const stream = await navigator.mediaDevices.getUserMedia({\n audio: {\n sampleRate: 16000,\n channelCount: 1,\n echoCancellation: true,\n noiseSuppression: true,\n },\n });\n\n streamRef.current = stream;\n audioChunksRef.current = [];\n pendingChunksRef.current = [];\n fullTranscriptRef.current = \"\";\n processedSamplesRef.current = 0;\n setTranscript(\"\");\n setStreamingChunk(\"\");\n setChunkCount(0);\n\n const mediaRecorder = new MediaRecorder(stream);\n mediaRecorderRef.current = mediaRecorder;\n\n mediaRecorder.ondataavailable = (event) => {\n if (event.data.size > 0) {\n audioChunksRef.current.push(event.data);\n if (streaming) {\n pendingChunksRef.current.push(event.data);\n }\n }\n };\n\n mediaRecorder.start(100); // Collect data every 100ms\n setIsRecording(true);\n setError(null);\n\n // If streaming mode, set up recursive transcription loop\n if (streaming && sttRef.current) {\n let chunkIdx = 0;\n let shouldContinue = true;\n\n // Use recursive setTimeout instead of setInterval to avoid timing issues\n // with heavy WebGPU/WASM operations\n const processNextChunk = async () => {\n if (!shouldContinue || !mountedRef.current) {\n return;\n }\n\n const numPending = pendingChunksRef.current.length;\n\n // Check if we have new audio to process\n if (numPending > 0) {\n // Clear pending counter (we'll process via audioChunksRef which has all data)\n pendingChunksRef.current = [];\n\n try {\n setIsTranscribing(true);\n const chunkText = await transcribeChunk(chunkIdx);\n\n if (chunkText && mountedRef.current) {\n chunkIdx++;\n setChunkCount(chunkIdx);\n\n // Append to full transcript using functional update\n setTranscript((prev) => {\n const newTranscript = prev + (prev ? \" \" : \"\") + chunkText;\n fullTranscriptRef.current = newTranscript;\n onTranscript?.(newTranscript);\n return newTranscript;\n });\n }\n } catch (e) {\n console.error(\"[useVoiceInput] Chunk transcription error:\", e);\n } finally {\n if (mountedRef.current) {\n setIsTranscribing(false);\n }\n }\n }\n\n // Schedule next check if still running\n if (shouldContinue && mountedRef.current) {\n streamingIntervalRef.current = setTimeout(processNextChunk, chunkDuration) as any;\n }\n };\n\n // Start the loop\n streamingIntervalRef.current = setTimeout(processNextChunk, chunkDuration) as any;\n\n // Store a way to stop the loop\n (streamingIntervalRef as any)._stop = () => {\n shouldContinue = false;\n };\n }\n } catch (e: any) {\n const errMsg = e.message || \"Failed to start recording\";\n setError(errMsg);\n onError?.(errMsg);\n }\n }, [\n isRecording,\n streaming,\n shouldLoad,\n model,\n chunkDuration,\n transcribeChunk,\n onTranscript,\n onError,\n onProgress,\n onReady,\n ]);\n\n // Stop recording and transcribe\n const stopRecording = useCallback(async (): Promise<string> => {\n // Stop streaming loop\n if ((streamingIntervalRef as any)._stop) {\n (streamingIntervalRef as any)._stop();\n }\n if (streamingIntervalRef.current) {\n clearTimeout(streamingIntervalRef.current);\n streamingIntervalRef.current = null;\n }\n\n return new Promise((resolve, reject) => {\n if (!mediaRecorderRef.current || !isRecording) {\n reject(new Error(\"Not recording\"));\n return;\n }\n\n const mediaRecorder = mediaRecorderRef.current;\n\n mediaRecorder.onstop = async () => {\n // Stop all tracks\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n setIsRecording(false);\n\n // In streaming mode, process any remaining chunks and return full transcript\n if (streaming) {\n // Process any remaining audio\n if (audioChunksRef.current.length > 0 && processedSamplesRef.current > 0) {\n setIsTranscribing(true);\n pendingChunksRef.current = [];\n\n try {\n const finalChunkText = await transcribeChunk(chunkCount);\n if (finalChunkText && mountedRef.current) {\n setTranscript((prev) => {\n const newTranscript = prev + (prev ? \" \" : \"\") + finalChunkText;\n fullTranscriptRef.current = newTranscript;\n return newTranscript;\n });\n }\n } finally {\n if (mountedRef.current) {\n setIsTranscribing(false);\n }\n }\n }\n\n const finalText = fullTranscriptRef.current;\n onTranscript?.(finalText);\n resolve(finalText);\n return;\n }\n\n // Non-streaming mode: transcribe entire recording\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n\n try {\n // Ensure model is loaded\n if (!sttRef.current) {\n if (!shouldLoad) {\n setShouldLoad(true);\n }\n // Wait for model to load\n await new Promise<void>((res, rej) => {\n const checkReady = setInterval(() => {\n if (sttRef.current) {\n clearInterval(checkReady);\n res();\n }\n }, 100);\n setTimeout(() => {\n clearInterval(checkReady);\n rej(new Error(\"Timeout waiting for STT model\"));\n }, 30000);\n });\n }\n\n // Convert blob to Float32Array\n const audioData = await blobToFloat32(audioBlob);\n\n // Transcribe\n const text = await transcribe(audioData);\n resolve(text);\n } catch (e: any) {\n const errMsg = e.message || \"Transcription failed\";\n setError(errMsg);\n onError?.(errMsg);\n reject(e);\n }\n };\n\n mediaRecorder.stop();\n });\n }, [\n isRecording,\n streaming,\n chunkCount,\n shouldLoad,\n blobToFloat32,\n transcribe,\n transcribeChunk,\n onTranscript,\n onError,\n ]);\n\n // Cancel recording\n const cancelRecording = useCallback(() => {\n // Stop streaming loop\n if ((streamingIntervalRef as any)._stop) {\n (streamingIntervalRef as any)._stop();\n }\n if (streamingIntervalRef.current) {\n clearTimeout(streamingIntervalRef.current);\n streamingIntervalRef.current = null;\n }\n\n if (mediaRecorderRef.current && isRecording) {\n mediaRecorderRef.current.stop();\n }\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n audioChunksRef.current = [];\n pendingChunksRef.current = [];\n processedSamplesRef.current = 0;\n setIsRecording(false);\n }, [isRecording]);\n\n return {\n startRecording,\n stopRecording,\n cancelRecording,\n transcribe,\n isRecording,\n isTranscribing,\n isLoading,\n isReady,\n transcript,\n streamingChunk,\n chunkCount,\n loadingProgress,\n error,\n load,\n };\n}\n\n// ============================================\n// Voice Chat Hook (STT + LLM + TTS)\n// ============================================\n\n/**\n * Options for useVoiceChat hook\n */\nexport type UseVoiceChatOptions = {\n /** LLM model ID (default: qwen3-0.6b) */\n llmModel?: string;\n /** STT model ID (default: whisper-tiny.en) */\n sttModel?: string;\n /** TTS model ID (default: kokoro-82m, also supports supertonic-66m) */\n ttsModel?: TTSModelId;\n /** System prompt for LLM */\n system?: string;\n /** Enable thinking mode (default: false) */\n thinking?: boolean;\n /** TTS voice ID (default: model's default voice) */\n voice?: string;\n /** TTS speech speed (default: 1.0) */\n speed?: number;\n /** Auto-load all models on mount (default: false) */\n autoLoad?: boolean;\n /** Callback when user speaks */\n onUserSpeak?: (text: string) => void;\n /** Callback when assistant responds */\n onAssistantSpeak?: (text: string) => void;\n /** Callback on error */\n onError?: (error: string) => void;\n};\n\n/**\n * Message in voice chat\n */\nexport type VoiceChatMessage = {\n id: string;\n role: \"user\" | \"assistant\";\n content: string;\n thinking?: string;\n audioUrl?: string;\n};\n\n/**\n * Return type for useVoiceChat hook\n */\nexport type UseVoiceChatReturn = {\n /** Messages in the conversation */\n messages: VoiceChatMessage[];\n /** Start recording user speech */\n startListening: () => Promise<void>;\n /** Stop recording and process (STT → LLM → TTS) */\n stopListening: () => Promise<void>;\n /** Cancel current operation */\n cancel: () => void;\n /** Clear conversation history */\n clear: () => void;\n /** Whether recording user speech */\n isListening: boolean;\n /** Whether processing (STT/LLM/TTS) */\n isProcessing: boolean;\n /** Whether assistant is speaking */\n isSpeaking: boolean;\n /** Current stage: idle, listening, transcribing, thinking, speaking */\n stage: \"idle\" | \"listening\" | \"transcribing\" | \"thinking\" | \"speaking\";\n /** Whether all models are loaded */\n isReady: boolean;\n /** Whether loading models */\n isLoading: boolean;\n /** Loading progress message */\n loadingMessage: string;\n /** Error message */\n error: string | null;\n /** Manually load all models */\n load: () => void;\n};\n\n/**\n * React hook for voice conversation with STT + LLM + TTS\n *\n * Complete voice-to-voice conversation loop:\n * 1. User presses button to speak\n * 2. Speech is transcribed (Whisper)\n * 3. LLM generates response\n * 4. Response is spoken aloud (Kokoro or Supertonic TTS)\n *\n * @example\n * ```tsx\n * function VoiceChat() {\n * const {\n * messages,\n * startListening,\n * stopListening,\n * isListening,\n * isSpeaking,\n * stage,\n * } = useVoiceChat({\n * system: \"You are a helpful voice assistant.\",\n * voice: \"af_bella\",\n * // Or use Supertonic for faster synthesis:\n * // ttsModel: \"supertonic-66m\",\n * // voice: \"F1\",\n * });\n *\n * return (\n * <div>\n * {messages.map(m => (\n * <div key={m.id}>{m.role}: {m.content}</div>\n * ))}\n * <button\n * onMouseDown={startListening}\n * onMouseUp={stopListening}\n * >\n * {stage === \"idle\" ? \"🎤 Hold to Speak\" : stage}\n * </button>\n * </div>\n * );\n * }\n * ```\n */\nexport function useVoiceChat(options: UseVoiceChatOptions = {}): UseVoiceChatReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useVoiceChat requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n // Get TTS model config for default voice\n const ttsModelId = options.ttsModel || \"kokoro-82m\";\n const ttsConfig = TTS_MODELS[ttsModelId];\n\n const {\n llmModel = \"qwen3-0.6b\",\n sttModel = \"whisper-tiny.en\",\n system = \"You are a helpful voice assistant. Keep responses brief and conversational.\",\n thinking = false,\n voice = ttsConfig.defaultVoice,\n speed = 1.0,\n autoLoad = false,\n onUserSpeak,\n onAssistantSpeak,\n onError,\n } = options;\n\n const [messages, setMessages] = useState<VoiceChatMessage[]>([]);\n const [stage, setStage] = useState<\n \"idle\" | \"listening\" | \"transcribing\" | \"thinking\" | \"speaking\"\n >(\"idle\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingMessage, setLoadingMessage] = useState<string>(\"\");\n const [isReady, setIsReady] = useState<boolean>(false);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n // Refs for models and audio\n const llmWorkerRef = useRef<any>(null);\n const sttRef = useRef<any>(null);\n const ttsRef = useRef<any>(null);\n const mediaRecorderRef = useRef<MediaRecorder | null>(null);\n const audioChunksRef = useRef<Blob[]>([]);\n const streamRef = useRef<MediaStream | null>(null);\n const audioContextRef = useRef<AudioContext | null>(null);\n const sourceNodeRef = useRef<AudioBufferSourceNode | null>(null);\n const mountedRef = useRef<boolean>(true);\n const cancelledRef = useRef<boolean>(false);\n\n // Computed states\n const isListening = stage === \"listening\";\n const isProcessing = stage === \"transcribing\" || stage === \"thinking\";\n const isSpeaking = stage === \"speaking\";\n\n // Load all models\n useEffect(() => {\n if (!shouldLoad || isReady) return;\n\n let cancelled = false;\n\n const loadModels = async () => {\n try {\n setIsLoading(true);\n setError(null);\n\n // Load STT\n setLoadingMessage(\"Loading speech recognition (Whisper)...\");\n const { WhisperSTT } = await import(\"../core/stt.js\");\n if (cancelled || !mountedRef.current) return;\n\n const stt = new WhisperSTT(sttModel);\n await stt.load({\n onProgress: (p: any) => {\n if (!mountedRef.current) return;\n setLoadingMessage(p.status || \"Loading STT...\");\n },\n });\n if (cancelled || !mountedRef.current) {\n stt.dispose();\n return;\n }\n sttRef.current = stt;\n\n // Load LLM worker\n setLoadingMessage(\"Loading language model...\");\n const worker = await createGerbilWorker({\n modelId: llmModel,\n onProgress: (p) => {\n if (!mountedRef.current) return;\n setLoadingMessage(p.message || \"Loading LLM...\");\n },\n });\n if (cancelled || !mountedRef.current) {\n worker.terminate();\n return;\n }\n llmWorkerRef.current = worker;\n\n // Load TTS (Kokoro or Supertonic based on ttsModel option)\n const isSupertonic = ttsModelId === \"supertonic-66m\";\n setLoadingMessage(`Loading text-to-speech (${isSupertonic ? \"Supertonic\" : \"Kokoro\"})...`);\n\n const { createTTS } = await import(\"../core/tts.js\");\n if (cancelled || !mountedRef.current) return;\n\n const tts = createTTS(ttsModelId);\n await tts.load({\n onProgress: (p: any) => {\n if (!mountedRef.current) return;\n setLoadingMessage(p.status || \"Loading TTS...\");\n },\n });\n if (cancelled || !mountedRef.current) {\n await tts.dispose();\n return;\n }\n ttsRef.current = tts;\n\n setIsReady(true);\n setIsLoading(false);\n setLoadingMessage(\"Ready!\");\n } catch (e: any) {\n if (!mountedRef.current) return;\n const errMsg = e.message || \"Failed to load models\";\n setError(errMsg);\n setIsLoading(false);\n onError?.(errMsg);\n }\n };\n\n loadModels();\n\n return () => {\n cancelled = true;\n };\n }, [shouldLoad, isReady, llmModel, sttModel, ttsModelId, onError]);\n\n // Cleanup on unmount\n useEffect(() => {\n mountedRef.current = true;\n return () => {\n mountedRef.current = false;\n llmWorkerRef.current?.terminate();\n sttRef.current?.dispose();\n ttsRef.current?.dispose();\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n }\n audioContextRef.current?.close();\n };\n }, []);\n\n // Load trigger\n const load = useCallback(() => {\n if (!shouldLoad && !isReady && !isLoading) {\n setShouldLoad(true);\n }\n }, [shouldLoad, isReady, isLoading]);\n\n // Convert blob to Float32 at 16kHz\n const blobToFloat32 = useCallback(async (blob: Blob): Promise<Float32Array> => {\n const audioContext = new AudioContext({ sampleRate: 16000 });\n const arrayBuffer = await blob.arrayBuffer();\n const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);\n const channelData = audioBuffer.getChannelData(0);\n\n if (audioBuffer.sampleRate !== 16000) {\n const ratio = 16000 / audioBuffer.sampleRate;\n const newLength = Math.round(channelData.length * ratio);\n const resampled = new Float32Array(newLength);\n for (let i = 0; i < newLength; i++) {\n const srcIndex = i / ratio;\n const floor = Math.floor(srcIndex);\n const ceil = Math.min(floor + 1, channelData.length - 1);\n const t = srcIndex - floor;\n resampled[i] = channelData[floor] * (1 - t) + channelData[ceil] * t;\n }\n audioContext.close();\n return resampled;\n }\n\n audioContext.close();\n return new Float32Array(channelData);\n }, []);\n\n // Play audio through Web Audio API\n const playAudioBuffer = useCallback(\n async (audio: Float32Array, sampleRate: number): Promise<void> => {\n return new Promise((resolve) => {\n if (!audioContextRef.current) {\n audioContextRef.current = new AudioContext();\n }\n const ctx = audioContextRef.current;\n\n const buffer = ctx.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n buffer.copyToChannel(channelData, 0);\n\n const source = ctx.createBufferSource();\n source.buffer = buffer;\n source.connect(ctx.destination);\n source.onended = () => {\n if (mountedRef.current) {\n resolve();\n }\n };\n source.start();\n sourceNodeRef.current = source;\n });\n },\n [],\n );\n\n // Start listening\n const startListening = useCallback(async () => {\n if (stage !== \"idle\") return;\n\n // Trigger load if not ready\n if (!isReady && !isLoading) {\n setShouldLoad(true);\n return;\n }\n\n cancelledRef.current = false;\n\n try {\n const stream = await navigator.mediaDevices.getUserMedia({\n audio: { sampleRate: 16000, channelCount: 1, echoCancellation: true },\n });\n\n streamRef.current = stream;\n audioChunksRef.current = [];\n\n const mediaRecorder = new MediaRecorder(stream);\n mediaRecorderRef.current = mediaRecorder;\n\n mediaRecorder.ondataavailable = (event) => {\n if (event.data.size > 0) {\n audioChunksRef.current.push(event.data);\n }\n };\n\n mediaRecorder.start(100);\n setStage(\"listening\");\n setError(null);\n } catch (e: any) {\n const errMsg = e.message || \"Failed to access microphone\";\n setError(errMsg);\n onError?.(errMsg);\n }\n }, [stage, isReady, isLoading, onError]);\n\n // Stop listening and process\n const stopListening = useCallback(async () => {\n if (stage !== \"listening\") return;\n\n const mediaRecorder = mediaRecorderRef.current;\n if (!mediaRecorder) return;\n\n return new Promise<void>((resolve) => {\n mediaRecorder.onstop = async () => {\n // Stop mic\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n if (cancelledRef.current) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n\n try {\n // STT\n setStage(\"transcribing\");\n const audioData = await blobToFloat32(audioBlob);\n const sttResult = await sttRef.current.transcribe(audioData);\n let userText = sttResult.text.trim();\n\n // Filter out Whisper artifacts\n if (\n userText === \"[BLANK_AUDIO]\" ||\n userText === \"(blank audio)\" ||\n userText === \"[BLANK AUDIO]\"\n ) {\n userText = \"\";\n }\n\n if (cancelledRef.current || !userText) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n // Add user message\n const userMsgId = `user-${Date.now()}`;\n setMessages((m) => [...m, { id: userMsgId, role: \"user\", content: userText }]);\n onUserSpeak?.(userText);\n\n // LLM\n setStage(\"thinking\");\n\n // Build conversation history\n const history = messages.map((m) => ({\n role: m.role as \"user\" | \"assistant\",\n content: m.content,\n }));\n history.push({ role: \"user\", content: userText });\n\n let responseText = \"\";\n let thinkingText = \"\";\n\n await llmWorkerRef.current.generate(userText, {\n system,\n thinking,\n history,\n onToken: (token: WorkerToken) => {\n if (cancelledRef.current) return;\n if (token.state === \"thinking\") {\n thinkingText += token.text;\n } else {\n responseText += token.text;\n }\n },\n });\n\n if (cancelledRef.current) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n // Add assistant message\n const assistantMsgId = `assistant-${Date.now()}`;\n setMessages((m) => [\n ...m,\n {\n id: assistantMsgId,\n role: \"assistant\",\n content: responseText,\n thinking: thinkingText || undefined,\n },\n ]);\n onAssistantSpeak?.(responseText);\n\n // TTS\n if (responseText.trim()) {\n setStage(\"speaking\");\n const ttsResult = await ttsRef.current.speak(responseText, { voice, speed });\n\n if (!cancelledRef.current) {\n await playAudioBuffer(ttsResult.audio, ttsResult.sampleRate);\n }\n }\n\n setStage(\"idle\");\n resolve();\n } catch (e: any) {\n if (!mountedRef.current) return;\n const errMsg = e.message || \"Processing failed\";\n setError(errMsg);\n setStage(\"idle\");\n onError?.(errMsg);\n resolve();\n }\n };\n\n mediaRecorder.stop();\n });\n }, [\n stage,\n messages,\n system,\n thinking,\n voice,\n speed,\n blobToFloat32,\n playAudioBuffer,\n onUserSpeak,\n onAssistantSpeak,\n onError,\n ]);\n\n // Cancel\n const cancel = useCallback(() => {\n cancelledRef.current = true;\n\n if (mediaRecorderRef.current && stage === \"listening\") {\n mediaRecorderRef.current.stop();\n }\n\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n if (sourceNodeRef.current) {\n try {\n sourceNodeRef.current.stop();\n } catch {}\n }\n\n audioChunksRef.current = [];\n setStage(\"idle\");\n }, [stage]);\n\n // Clear messages\n const clear = useCallback(() => {\n setMessages([]);\n }, []);\n\n return {\n messages,\n startListening,\n stopListening,\n cancel,\n clear,\n isListening,\n isProcessing,\n isSpeaking,\n stage,\n isReady,\n isLoading,\n loadingMessage,\n error,\n load,\n };\n}\n\n// ============================================\n// Utilities\n// ============================================\n\n/**\n * Check if WebGPU is supported\n */\nexport function isWebGPUSupported(): boolean {\n if (typeof navigator === \"undefined\") {\n return false;\n }\n return \"gpu\" in navigator;\n}\n\n/**\n * Get WebGPU adapter info\n */\nexport async function getWebGPUInfo(): Promise<{\n supported: boolean;\n adapter?: string;\n device?: string;\n} | null> {\n if (!isWebGPUSupported()) {\n return { supported: false };\n }\n\n try {\n const adapter = await (navigator as any).gpu.requestAdapter();\n if (!adapter) {\n return { supported: false };\n }\n\n const info = await adapter.requestAdapterInfo();\n return {\n supported: true,\n adapter: info.vendor,\n device: info.device,\n };\n } catch {\n return { supported: false };\n }\n}\n\nexport default {\n isWebGPUSupported,\n getWebGPUInfo,\n createGerbilWorker,\n playAudio,\n createAudioPlayer,\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAkJA,eAAsB,mBAAmB,UAA+B,EAAE,EAAyB;CACjG,MAAM,EAAE,UAAU,cAAc,YAAY,SAAS,YAAY,YAAY;CAG7E,MAAM,SAAS,aAAa,QAAQ;AAEpC,QAAO,IAAI,SAAS,SAAS,WAAW;EA4WtC,MAAM,OAAO,IAAI,KAAK,CA1WH;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;MA0We,EAAE,EAAE,MAAM,0BAA0B,CAAC;EACvE,MAAM,YAAY,IAAI,gBAAgB,KAAK;EAC3C,MAAM,SAAS,IAAI,OAAO,WAAW,EAAE,MAAM,UAAU,CAAC;EAExD,IAAI,UAAU;EACd,IAAIA,iBAAkD;EACtD,IAAIC,gBAAiD;EACrD,IAAI,iBAAiB;AAErB,SAAO,aAAa,MAAM;GACxB,MAAM,MAAM,EAAE;AAEd,WAAQ,IAAI,QAAZ;IACE,KAAK;AAEH,YAAO,YAAY;MAAE,MAAM;MAAQ,SAAS,OAAO;MAAM,CAAC;AAC1D;IAEF,KAAK;IACL,KAAK;AACH,kBAAa,IAAsB;AACnC;IAEF,KAAK;AACH,eAAU;AACV,kBAAa,IAAsB;AACnC,aAAQ,aAAa;AACrB;IAEF,KAAK;AACH,sBAAiB;AACjB;IAEF,KAAK;AACH,uBAAkB,IAAI;AACtB,eAAU,IAAmB;AAC7B;IAEF,KAAK;AACH,kBAAa,IAAsB;AACnC,sBAAiB,IAAI,KAAK;AAC1B,sBAAiB;AACjB,qBAAgB;AAChB;IAEF,KAAK;AACH,eAAU,IAAI,MAAM;AACpB,kBAAa;MAAE,QAAQ;MAAS,OAAO,IAAI;MAAO,CAAC;AACnD,SAAI,eAAe;AACjB,oBAAc,IAAI,MAAM,IAAI,MAAM,CAAC;AACnC,uBAAiB;AACjB,sBAAgB;WAEhB,QAAO,IAAI,MAAM,IAAI,MAAM,CAAC;AAE9B;;;AAIN,SAAO,WAAW,MAAM;GACtB,MAAM,QAAQ,EAAE,WAAW;AAC3B,aAAU,MAAM;AAChB,UAAO,IAAI,MAAM,MAAM,CAAC;;EAG1B,MAAMC,eAA6B;GACjC,WAAW,QAAgB,YAAiC,EAAE,KAC5D,IAAI,SAAS,KAAK,QAAQ;AACxB,qBAAiB;AACjB,oBAAgB;IAEhB,MAAM,SAASC,UAAQ,UAAU;IAIjC,MAAM,WAAWA,UAAQ,UACrB,CAAC;KAAE,MAAM;KAAU,SAAS;KAAQ,EAAE,GAAGA,UAAQ,QAAQ,GACzD,CACE;KAAE,MAAM;KAAU,SAAS;KAAQ,EACnC;KAAE,MAAM;KAAQ,SAAS;KAAQ,CAClC;AAIL,QAAIA,UAAQ,QACV,QAAO,YAAY,EAAE,MAAM,SAAS,CAAC;AAGvC,WAAO,YAAY;KACjB,MAAM;KACN;KACA,QAAQA,UAAQ,UAAU,EAAE;KAC5B,SAAS;MACP,WAAWA,UAAQ,cAAcA,UAAQ,QAAQ,SAAS,OAAO;MACjE,aAAaA,UAAQ,eAAe;MACpC,MAAMA,UAAQ,QAAQ;MACtB,MAAMA,UAAQ,QAAQ;MACtB,UAAUA,UAAQ,YAAY;MAC/B;KACF,CAAC;KACF;GAEJ,iBAAiB;AACf,WAAO,YAAY,EAAE,MAAM,aAAa,CAAC;;GAG3C,aAAa;AACX,WAAO,YAAY,EAAE,MAAM,SAAS,CAAC;;GAGvC,iBAAiB;AACf,WAAO,WAAW;AAClB,QAAI,gBAAgB,UAAU;;GAGhC,eAAe;GAChB;GACD;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuHJ,SAAgB,QAAQ,UAA0B,EAAE,EAAiB;CAEnE,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,+DAA+D;CAGjF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,kBAAkB,EAAE,EACpB,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,UAAU,eAAe,SAAoB,gBAAgB;CACpE,MAAM,CAAC,OAAO,YAAY,SAAiB,GAAG;CAC9C,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,iBAAiB,sBAAsB,SAAiB,GAAG;CAClE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAC/D,MAAM,CAAC,gBAAgB,qBAAqB,SAAmB,EAAE,CAAC;CAElE,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,eAAe,OAAe,EAAE;CACtC,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAGf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,qBAAoB,MAAc,IAAI,MAAM,KAAK;;GAGrD,kBAAkB;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;;GAExB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;AAGvB,iBAAgB;AACd,MAAI,CAAC,gBAAgB,iBAAiB;AACpC,gBAAa,SAAoB;AAE/B,QADgB,KAAK,GAAG,GAAG,EACd,SAAS,YACpB,QAAO,KAAK,KAAK,GAAY,MAC3B,MAAM,KAAK,SAAS,IAChB;KAAE,GAAG;KAAG,SAAS;KAAiB,UAAU,YAAY;KAAW,GACnE,EACL;AAEH,WAAO;KACP;AACF,sBAAmB,GAAG;AACtB,eAAY,GAAG;;IAEhB;EAAC;EAAc;EAAiB;EAAS,CAAC;CAG7C,MAAM,oBAAoB,OAAsB,KAAK;CACrD,MAAM,mBAAmB,OAAiB,EAAE,CAAC;CAG7C,MAAM,cAAc,aAAa,aAAqB;AACpD,qBAAmB,SAAmB,CAAC,GAAG,MAAM,SAAS,CAAC;IACzD,EAAE,CAAC;CAEN,MAAM,cAAc,aAAa,UAAkB;AACjD,qBAAmB,SAAmB,KAAK,QAAQ,GAAW,MAAc,MAAM,MAAM,CAAC;IACxF,EAAE,CAAC;CAEN,MAAM,cAAc,kBAAkB;AACpC,oBAAkB,EAAE,CAAC;IACpB,EAAE,CAAC;CAGN,MAAM,wBAAwB,aAC3B,MAAc,WAAqB;AAClC,MAAI,CAAC,KAAK,MAAM,IAAI,aAClB;AAGF,eAAa,WAAW;EACxB,MAAMC,cAAuB;GAC3B,IAAI,OAAO,aAAa;GACxB,MAAM;GACN,SAAS,KAAK,MAAM;GACpB,QAAQ,OAAO,SAAS,IAAI,SAAS;GACtC;AAED,eAAa,WAAW;EACxB,MAAMC,mBAA4B;GAChC,IAAI,OAAO,aAAa;GACxB,MAAM;GACN,SAAS;GACV;AAED,eAAa,SAAoB;GAAC,GAAG;GAAM;GAAa;GAAiB,CAAC;AAC1E,qBAAmB,GAAG;AACtB,cAAY,GAAG;AAGf,MAAI,CAAC,UAAU,SAAS;AACtB,qBAAkB,UAAU,KAAK,MAAM;AACvC,oBAAiB,UAAU;AAC3B,SAAM;AACN;;AAGF,kBAAgB,KAAK;AACrB,YAAU,QAAQ,SAAS,KAAK,MAAM,EAAE;GACtC;GACA,UAAU;GACV,WAAW,OAAO,SAAS,IAAI,KAAK,IAAI,WAAW,KAAK,GAAG;GAC3D;GACA,QAAQ,OAAO,SAAS,IAAI,SAAS;GACtC,CAAC;IAEJ;EAAC;EAAc;EAAQ;EAAgB;EAAW;EAAa;EAAK,CACrE;CAED,MAAM,eAAe,aAClB,MAAwC;AACvC,KAAG,kBAAkB;AAErB,MAAI,CAAC,MAAM,MAAM,IAAI,aACnB;AAIF,wBAAsB,OAAO,eAAe;AAC5C,WAAS,GAAG;AACZ,oBAAkB,EAAE,CAAC;IAEvB;EAAC;EAAO;EAAc;EAAgB;EAAsB,CAC7D;CAGD,MAAM,iBAAiB,aACpB,MAAc,WAAqB;AAClC,wBAAsB,MAAM,OAAO;IAErC,CAAC,sBAAsB,CACxB;AAGD,iBAAgB;AACd,MAAI,WAAW,kBAAkB,WAAW,UAAU,SAAS;GAC7D,MAAM,iBAAiB,kBAAkB;GACzC,MAAM,gBAAgB,iBAAiB;AACvC,qBAAkB,UAAU;AAC5B,oBAAiB,UAAU,EAAE;AAC7B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,gBAAgB;IACzC;IACA,UAAU;IACV,WAAW,cAAc,SAAS,IAAI,KAAK,IAAI,WAAW,KAAK,GAAG;IAClE;IACA,QAAQ,cAAc,SAAS,IAAI,gBAAgB;IACpD,CAAC;;IAEH;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;CAE7D,MAAM,OAAO,kBAAkB;AAC7B,YAAU,SAAS,WAAW;AAC9B,kBAAgB,MAAM;IACrB,EAAE,CAAC;CAEN,MAAM,QAAQ,kBAAkB;AAC9B,YAAU,SAAS,OAAO;AAC1B,cAAY,EAAE,CAAC;AACf,qBAAmB,GAAG;AACtB,cAAY,GAAG;AACf,oBAAkB,EAAE,CAAC;IACpB,EAAE,CAAC;AAUN,QAAO;EACL,UARsB,SAAS,KAAK,GAAY,MAAc;AAC9D,OAAI,MAAM,SAAS,SAAS,KAAK,EAAE,SAAS,eAAe,aACzD,QAAO;IAAE,GAAG;IAAG,SAAS;IAAiB,UAAU,YAAY;IAAW;AAE5E,UAAO;IACP;EAIA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;;;;AA4EH,SAAgB,cAAc,UAAgC,EAAE,EAAuB;CACrF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,qEAAqE;CAGvF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,YAAY,iBAAiB,SAAiB,GAAG;CACxD,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,aAAa,OAAwC,KAAK;CAChE,MAAM,YAAY,OAAsC,KAAK;CAC7D,MAAM,mBAAmB,OAAsB,KAAK;CACpD,MAAM,mBAAmB,OAA6B,OAAU;CAChE,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAEf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,gBAAe,MAAc,IAAI,MAAM,KAAK;;GAGhD,aAAa,WAAW;AACtB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;AACtB,eAAW,UAAU,OAAO,KAAK;AACjC,eAAW,UAAU;;GAEvB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;CAEvB,MAAM,WAAW,aACd,QAAgB,oBAAuD;AACtE,SAAO,IAAI,SAAS,SAAS,WAAW;AACtC,iBAAc,GAAG;AACjB,eAAY,GAAG;AACf,cAAW,UAAU;AACrB,aAAU,UAAU;AAGpB,OAAI,CAAC,UAAU,SAAS;AACtB,qBAAiB,UAAU;AAC3B,qBAAiB,UAAU,iBAAiB;AAC5C,UAAM;AACN;;AAGF,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,QAAQ;IACjC;IACA,UAAU;IACV;IACA;IACA,QAAQ,iBAAiB;IAC1B,CAAC;IACF;IAEJ;EAAC;EAAQ;EAAgB;EAAW;EAAa;EAAK,CACvD;AAGD,iBAAgB;AACd,MAAI,WAAW,iBAAiB,WAAW,UAAU,SAAS;GAC5D,MAAM,gBAAgB,iBAAiB;GACvC,MAAM,gBAAgB,iBAAiB;AACvC,oBAAiB,UAAU;AAC3B,oBAAiB,UAAU;AAC3B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,eAAe;IACxC;IACA,UAAU;IACV;IACA;IACA,QAAQ;IACT,CAAC;;IAEH;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;AAO7D,QAAO;EACL;EACA;EACA;EACA;EACA;EACA;EACA,MAZW,kBAAkB;AAC7B,aAAU,SAAS,WAAW;AAC9B,mBAAgB,MAAM;KACrB,EAAE,CAAC;EAUJ;EACA;EACA;EACA;EACD;;;AA6BH,MAAMC,wBAA4C;CAChD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EAAE,IAAI;EAAY,MAAM;EAAS,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAgB;CAClG;;AAGD,MAAMC,4BAAgD;CACpD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACF;;AAGD,MAAMC,aAGF;CACF,cAAc;EACZ,MAAM;EACN,cAAc;EACd,YAAY;EACZ,QAAQ;EACT;CACD,kBAAkB;EAChB,MAAM;EACN,cAAc;EACd,YAAY;EACZ,QAAQ;EACT;CACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA0FD,SAAgB,UAAU,UAA4B,EAAE,EAAmB;CACzE,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,iEAAiE;CAGnF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,OAAO,UAAU,cACjB,OAAO,eAAe,GACtB,WAAW,OACX,SACA,SACA,SACA,UACE;CAGJ,MAAM,cAAc,WAAW;CAC/B,MAAM,eAAe,QAAQ,SAAS,YAAY;CAElD,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAA6B,KAAK;CAChF,MAAM,CAAC,YAAY,iBAAiB,SAAkB,MAAM;CAC5D,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAC/D,MAAM,CAAC,cAAc,mBAAmB,SAAiB,aAAa;CACtE,MAAM,CAAC,cAAc,mBAAmB,SAAiB,aAAa;CAEtE,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,qBAAqB,uBAAkC,IAAI,KAAK,CAAC;CACvE,MAAM,kBAAkB,OAA4B,KAAK;CACzD,MAAM,gBAAgB,OAAqC,KAAK;CAChE,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,aAAa,OAAmB,QAAQ;CAG9C,MAAM,aAAa,kBAAsC;AACvD,SAAO,YAAY;IAClB,CAAC,YAAY,OAAO,CAAC;CAGxB,MAAM,OAAO,kBAAkB;AAC7B,MAAI,OAAO,WAAW,UAAW;AACjC,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAGf,iBAAgB;AACd,MAAI,CAAC,WAAY;AAEjB,aAAW,UAAU;AACrB,aAAW,UAAU;EAErB,MAAM,UAAU,YAAY;AAC1B,OAAI;IACF,MAAM,eAAe,YAAY;IACjC,MAAM,SAAS,WAAW;AAE1B,uBAAmB;KACjB,QAAQ;KACR,SAAS,WAAW,eAAe,eAAe,SAAS;KAC5D,CAAC;AAEF,QAAI,cAAc;KAEhB,MAAM,EAAE,aAAa,MAAM,OAAO;KAElC,MAAM,MAAM,MAAM,SAAS,kBAAkB,OAAO,MAAM;MACxD,QAAQ;MACR,oBAAoB,aAAkB;AACpC,WAAI,CAAC,WAAW,QAAS;AACzB,WAAI,SAAS,WAAW,cAAc,SAAS,KAC7C,oBAAmB;QACjB,QAAQ;QACR,MAAM,SAAS;QACf,UAAU,KAAK,MAAM,SAAS,YAAY,EAAE;QAC7C,CAAC;;MAGP,CAAC;AAEF,SAAI,CAAC,WAAW,QAAS;KAGzB,MAAM,YAAY,0BAA0B,OAAO,KAAK;KACxD,MAAM,gCAAgB,IAAI,KAA2B;AAGrD,WAAM,QAAQ,IACZ,OAAO,OAAO,IAAI,OAAO,UAAU;AACjC,UAAI;OACF,MAAM,WAAW,MAAM,MAAM,GAAG,YAAY,MAAM,GAAG,MAAM;AAC3D,WAAI,SAAS,IAAI;QACf,MAAM,SAAS,MAAM,SAAS,aAAa;AAC3C,sBAAc,IAAI,MAAM,IAAI,IAAI,aAAa,OAAO,CAAC;;eAEhD,GAAG;AACV,eAAQ,KAAK,sCAAsC,MAAM,GAAG,IAAI,EAAE;;OAEpE,CACH;AAED,SAAI,CAAC,WAAW,QAAS;AAGzB,SAAI;AACF,YAAM,IAAI,SAAS;OACjB,oBAAoB,IAAI,aAAa,MAAc;OACnD,qBAAqB;OACrB,OAAO;OACR,CAAC;cACK,GAAG;AACV,cAAQ,KAAK,6BAA6B,EAAE;;AAG9C,wBAAmB,UAAU;AAC7B,YAAO,UAAU;MAAE,MAAM;MAAc,UAAU;MAAK;MAAQ;WACzD;KAGL,MAAM,EAAE,cADa,MAAM,OAAO;KAGlC,MAAM,MAAM,MAAM,UAAU,gBAAgB,OAAO,MAAM;MACvD,OAAO;MACP,oBAAoB,aAAkB;AACpC,WAAI,CAAC,WAAW,QAAS;AACzB,WAAI,SAAS,WAAW,cAAc,SAAS,KAC7C,oBAAmB;QACjB,QAAQ;QACR,MAAM,SAAS;QACf,UAAU,KAAK,MAAM,SAAS,YAAY,EAAE;QAC7C,CAAC;;MAGP,CAAC;AAEF,SAAI,CAAC,WAAW,QAAS;AAEzB,YAAO,UAAU;MAAE,MAAM;MAAU,UAAU;MAAK;MAAQ;;AAG5D,iBAAa,MAAM;AACnB,eAAW,KAAK;AAChB,uBAAmB,EAAE,QAAQ,SAAS,CAAC;AACvC,eAAW;YACJ,KAAK;AACZ,QAAI,CAAC,WAAW,QAAS;IACzB,MAAM,WAAW,eAAe,QAAQ,IAAI,UAAU,OAAO,IAAI;AACjE,aAAS,SAAS;AAClB,iBAAa,MAAM;AACnB,uBAAmB;KAAE,QAAQ;KAAS,OAAO;KAAU,CAAC;AACxD,cAAU,SAAS;;;AAIvB,WAAS;AAET,eAAa;AACX,cAAW,UAAU;;IAEtB;EAAC;EAAY;EAAS;EAAS;EAAQ,CAAC;AAG3C,iBAAgB;AACd,eAAa;AACX,OAAI;AACF,kBAAc,SAAS,MAAM;WACvB;AAGR,OAAI;AACF,QAAI,gBAAgB,WAAW,gBAAgB,QAAQ,UAAU,SAC/D,iBAAgB,QAAQ,OAAO;WAE3B;;IAIT,EAAE,CAAC;AA8JN,QAAO;EACL,OA5JY,YACZ,OAAO,MAAc,SAA8C;GACjE,MAAM,QAAQ,MAAM,SAAS;GAC7B,MAAM,QAAQ,MAAM,SAAS;AAG7B,OAAI,CAAC,OAAO,SAAS;AACnB,UAAM;AAEN;;AAGF,OAAI;AACF,kBAAc,KAAK;AACnB,eAAW;IAEX,IAAIC;IACJ,IAAIC;IAEJ,MAAM,aAAa,OAAO;AAE1B,QAAI,WAAW,SAAS,cAAc;KAEpC,MAAM,SAAS,WAAW;AAI1B,SAAI,CADc,OAAO,OAAO,MAAM,MAAwB,EAAE,OAAO,MAAM,EAC7D;MACd,MAAM,cAAc,OAAO,OAAO,KAAK,MAAwB,EAAE,GAAG,CAAC,KAAK,KAAK;AAC/E,YAAM,IAAI,MAAM,UAAU,MAAM,iCAAiC,YAAY,GAAG;;KAIlF,IAAI,mBAAmB,mBAAmB,QAAQ,IAAI,MAAM;AAC5D,SAAI,CAAC,iBACH,KAAI;MACF,MAAM,WAAW,0BAA0B,OAAO,KAAK,uBAAuB,MAAM;MACpF,MAAM,WAAW,MAAM,MAAM,SAAS;AACtC,UAAI,SAAS,IAAI;OACf,MAAM,SAAS,MAAM,SAAS,aAAa;AAC3C,0BAAmB,IAAI,aAAa,OAAO;AAC3C,0BAAmB,QAAQ,IAAI,OAAO,iBAAiB;YAEvD,OAAM,IAAI,MAAM,yBAAyB,SAAS,SAAS;aAEvD;AAEN,yBAAmB,IAAI,aAAa,MAAU,CAAC,KAAK,GAAI;AACxD,yBAAmB,QAAQ,IAAI,OAAO,iBAAiB;;KAK3D,MAAM,SAAS,MAAM,WAAW,SAAS,MAAM;MAC7C,oBAAoB;MACb;MACR,CAAC;AACF,iBAAY,OAAO;AACnB,kBAAa,OAAO;WACf;KAEL,MAAM,SAAS,WAAW;AAI1B,SAAI,CADc,OAAO,OAAO,MAAM,MAAwB,EAAE,OAAO,MAAM,EAC7D;MACd,MAAM,cAAc,OAAO,OAAO,KAAK,MAAwB,EAAE,GAAG,CAAC,KAAK,KAAK;AAC/E,YAAM,IAAI,MAAM,UAAU,MAAM,iCAAiC,YAAY,GAAG;;KAGlF,MAAM,SAAS,MAAM,WAAW,SAAS,SAAS,MAAM;MAAE;MAAO;MAAO,CAAC;AACzE,iBAAY,OAAO;AACnB,kBAAa,OAAO;;AAGtB,QAAI,CAAC,WAAW,QAAS;AAGzB,QAAI,CAAC,gBAAgB,WAAW,gBAAgB,QAAQ,UAAU,SAChE,iBAAgB,UAAU,IAAI,cAAc;IAG9C,MAAM,eAAe,gBAAgB;AAGrC,QAAI,aAAa,UAAU,YACzB,OAAM,aAAa,QAAQ;IAI7B,MAAM,cAAc,aAAa,aAAa,GAAG,UAAU,QAAQ,WAAW;IAC9E,MAAM,cAAc,IAAI,aAAa,UAAU;AAC/C,gBAAY,cAAc,aAAa,EAAE;AAGzC,QAAI,cAAc,SAAS;AACzB,mBAAc,QAAQ,MAAM;AAC5B,mBAAc,QAAQ,YAAY;;IAIpC,MAAM,aAAa,aAAa,oBAAoB;AACpD,eAAW,SAAS;AACpB,eAAW,QAAQ,aAAa,YAAY;AAE5C,eAAW,gBAAgB;AACzB,SAAI,WAAW,SAAS;AACtB,oBAAc,MAAM;AACpB,eAAS;;;AAIb,kBAAc,UAAU;AACxB,eAAW,OAAO;YACX,KAAK;AACZ,QAAI,CAAC,WAAW,QAAS;IACzB,MAAM,WAAW,eAAe,QAAQ,IAAI,UAAU,OAAO,IAAI;AACjE,aAAS,SAAS;AAClB,kBAAc,MAAM;AACpB,cAAU,SAAS;;KAGvB;GAAC;GAAc;GAAc;GAAM;GAAS;GAAO;GAAQ,CAC5D;EAkCC,MA/BW,kBAAkB;AAC7B,OAAI,cAAc,SAAS;AACzB,kBAAc,QAAQ,MAAM;AAC5B,kBAAc,QAAQ,YAAY;AAClC,kBAAc,UAAU;;AAE1B,iBAAc,MAAM;KACnB,EAAE,CAAC;EAyBJ;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA,UA9Be,aACd,YAAoB;AAEnB,OADkB,YAAY,OAAO,MAAM,MAAM,EAAE,OAAO,QAAQ,CAEhE,iBAAgB,QAAQ;OAExB,SAAQ,KACN,UAAU,QAAQ,kBAAkB,QAAQ,eAAe,YAAY,OAAO,KAAK,MAAM,EAAE,GAAG,CAAC,KAAK,KAAK,GAC1G;KAGL,CAAC,YAAY,QAAQ,QAAQ,CAC9B;EAmBC;EACA,UAjBe,aAAa,UAAkB;AAC9C,mBAAgB,KAAK,IAAI,IAAK,KAAK,IAAI,GAAK,MAAM,CAAC,CAAC;KACnD,EAAE,CAAC;EAgBJ,cAAc;EACd,YAAY,YAAY;EACzB;;;;;;;;;;;;;;;;AAqBH,eAAsB,UACpB,OACA,aAAqB,MACkC;CACvD,MAAM,eAAe,IAAI,cAAc;AAGvC,KAAI,aAAa,UAAU,YACzB,OAAM,aAAa,QAAQ;CAG7B,MAAM,cAAc,aAAa,aAAa,GAAG,MAAM,QAAQ,WAAW;CAC1E,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,aAAY,cAAc,aAAa,EAAE;CAEzC,MAAM,aAAa,aAAa,oBAAoB;AACpD,YAAW,SAAS;AACpB,YAAW,QAAQ,aAAa,YAAY;CAE5C,MAAM,UAAU,IAAI,SAAe,YAAY;AAC7C,aAAW,gBAAgB;AACzB,gBAAa,OAAO;AACpB,YAAS;;GAEX;AAEF,YAAW,OAAO;AAElB,QAAO;EACL,YAAY;AACV,cAAW,MAAM;AACjB,gBAAa,OAAO;;EAEtB;EACD;;;;;;;;;;;;;;;;;;;AAoBH,SAAgB,kBAAkB,aAAqB,MAIrD;CACA,IAAIC,eAAoC;CACxC,IAAI,gBAAgB;CACpB,IAAI,WAAW;CAEf,MAAM,gBAAgB,YAAY;AAChC,MAAI,CAAC,aACH,gBAAe,IAAI,cAAc;AAEnC,MAAI,aAAa,UAAU,YACzB,OAAM,aAAa,QAAQ;AAE7B,SAAO;;AAGT,QAAO;EACL,OAAO,OAAO,UAAwB;GACpC,MAAM,MAAM,MAAM,eAAe;AACjC,cAAW;GAEX,MAAM,SAAS,IAAI,aAAa,GAAG,MAAM,QAAQ,WAAW;GAC5D,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,UAAO,cAAc,aAAa,EAAE;GAEpC,MAAM,SAAS,IAAI,oBAAoB;AACvC,UAAO,SAAS;AAChB,UAAO,QAAQ,IAAI,YAAY;GAG/B,MAAM,YAAY,KAAK,IAAI,IAAI,aAAa,cAAc;AAC1D,UAAO,MAAM,UAAU;AACvB,mBAAgB,YAAY,OAAO;AAEnC,UAAO,gBAAgB;AACrB,QAAI,IAAI,eAAe,gBAAgB,GACrC,YAAW;;;EAKjB,YAAY;AACV,cAAW;AACX,mBAAgB;AAChB,OAAI,cAAc;AAChB,iBAAa,OAAO;AACpB,mBAAe;;;EAInB,iBAAiB;EAClB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqHH,SAAgB,cAAc,UAAgC,EAAE,EAAuB;CACrF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,qEAAqE;CAGvF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,mBACR,WAAW,OACX,SACA,cACA,SACA,YACA,YAAY,OACZ,gBAAgB,MAChB,YACE;CAEJ,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAA6B,KAAK;CAChF,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,aAAa,kBAAkB,SAAkB,MAAM;CAC9D,MAAM,CAAC,gBAAgB,qBAAqB,SAAkB,MAAM;CACpE,MAAM,CAAC,YAAY,iBAAiB,SAAiB,GAAG;CACxD,MAAM,CAAC,gBAAgB,qBAAqB,SAAiB,GAAG;CAChE,MAAM,CAAC,YAAY,iBAAiB,SAAiB,EAAE;CACvD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,mBAAmB,OAA6B,KAAK;CAC3D,MAAM,iBAAiB,OAAe,EAAE,CAAC;CACzC,MAAM,YAAY,OAA2B,KAAK;CAClD,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,uBAAuB,OAA8C,KAAK;CAChF,MAAM,mBAAmB,OAAe,EAAE,CAAC;CAC3C,MAAM,oBAAoB,OAAe,GAAG;AAG5C,iBAAgB;AACd,MAAI,CAAC,cAAc,QAAS;EAE5B,IAAI,YAAY;EAEhB,MAAM,YAAY,YAAY;AAC5B,OAAI;AACF,iBAAa,KAAK;AAClB,uBAAmB;KAAE,QAAQ;KAAW,SAAS;KAAwB,CAAC;AAC1E,iBAAa;KAAE,QAAQ;KAAW,SAAS;KAAwB,CAAC;IAGpE,MAAM,EAAE,eAAe,MAAM,OAAO;AAEpC,QAAI,aAAa,CAAC,WAAW,QAAS;IAEtC,MAAM,MAAM,IAAI,WAAW,MAAM;AACjC,UAAM,IAAI,KAAK,EACb,aAAa,MAAW;AACtB,SAAI,CAAC,WAAW,QAAS;KACzB,MAAMC,WAAwB;MAC5B,QAAQ,EAAE,aAAa,SAAY,gBAAgB;MACnD,SAAS,EAAE;MACX,UAAU,EAAE;MACZ,MAAM,EAAE;MACT;AACD,wBAAmB,SAAS;AAC5B,kBAAa,SAAS;OAEzB,CAAC;AAEF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,SAAI,SAAS;AACb;;AAGF,WAAO,UAAU;AACjB,eAAW,KAAK;AAChB,iBAAa,MAAM;AACnB,uBAAmB,EAAE,QAAQ,SAAS,CAAC;AACvC,iBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,eAAW;YACJC,GAAQ;AACf,QAAI,CAAC,WAAW,QAAS;IACzB,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,iBAAa,MAAM;AACnB,uBAAmB;KAAE,QAAQ;KAAS,SAAS;KAAQ,CAAC;AACxD,iBAAa;KAAE,QAAQ;KAAS,SAAS;KAAQ,CAAC;AAClD,cAAU,OAAO;;;AAIrB,aAAW;AAEX,eAAa;AACX,eAAY;;IAEb;EAAC;EAAY;EAAS;EAAO;EAAS;EAAS;EAAW,CAAC;AAG9D,iBAAgB;AACd,aAAW,UAAU;AACrB,eAAa;AACX,cAAW,UAAU;AACrB,OAAI,OAAO,QACT,QAAO,QAAQ,SAAS;AAE1B,OAAI,UAAU,QACZ,MAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;;IAIjB,EAAE,CAAC;CAGN,MAAM,OAAO,kBAAkB;AAC7B,MAAI,CAAC,cAAc,CAAC,WAAW,CAAC,UAC9B,eAAc,KAAK;IAEpB;EAAC;EAAY;EAAS;EAAU,CAAC;CAGpC,MAAM,gBAAgB,YAAY,OAAO,SAAsC;EAC7E,MAAM,eAAe,IAAI,aAAa,EAAE,YAAY,MAAO,CAAC;EAC5D,MAAM,cAAc,MAAM,KAAK,aAAa;EAC5C,MAAM,cAAc,MAAM,aAAa,gBAAgB,YAAY;EAGnE,MAAM,cAAc,YAAY,eAAe,EAAE;AAGjD,MAAI,YAAY,eAAe,MAAO;GACpC,MAAM,QAAQ,OAAQ,YAAY;GAClC,MAAM,YAAY,KAAK,MAAM,YAAY,SAAS,MAAM;GACxD,MAAM,YAAY,IAAI,aAAa,UAAU;AAC7C,QAAK,IAAI,IAAI,GAAG,IAAI,WAAW,KAAK;IAClC,MAAM,WAAW,IAAI;IACrB,MAAM,QAAQ,KAAK,MAAM,SAAS;IAClC,MAAM,OAAO,KAAK,IAAI,QAAQ,GAAG,YAAY,SAAS,EAAE;IACxD,MAAM,IAAI,WAAW;AACrB,cAAU,KAAK,YAAY,UAAU,IAAI,KAAK,YAAY,QAAQ;;AAEpE,gBAAa,OAAO;AACpB,UAAO;;AAGT,eAAa,OAAO;AACpB,SAAO,IAAI,aAAa,YAAY;IACnC,EAAE,CAAC;CAGN,MAAM,aAAa,YACjB,OAAO,UAAyC;AAC9C,MAAI,CAAC,OAAO,SAAS;AACnB,OAAI,CAAC,YAAY;AACf,kBAAc,KAAK;AACnB,UAAM,IAAI,MAAM,uDAAuD;;AAEzE,SAAM,IAAI,MAAM,uBAAuB;;AAGzC,oBAAkB,KAAK;AACvB,MAAI;GAEF,IAAI,QADW,MAAM,OAAO,QAAQ,WAAW,MAAM,EACnC,KAAK,MAAM;AAE7B,OAAI,SAAS,mBAAmB,SAAS,mBAAmB,SAAS,gBACnE,QAAO;AAET,iBAAc,KAAK;AACnB,kBAAe,KAAK;AACpB,UAAO;YACC;AACR,OAAI,WAAW,QACb,mBAAkB,MAAM;;IAI9B,CAAC,YAAY,aAAa,CAC3B;CAGD,MAAM,sBAAsB,OAAe,EAAE;CAI7C,MAAM,kBAAkB,YACtB,OAAO,aAAsC;AAC3C,MAAI,CAAC,OAAO,WAAW,eAAe,QAAQ,WAAW,EAAG,QAAO;AAEnE,MAAI;GAGF,MAAM,YAAY,MAAM,cADN,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC,CAC1B;GAGhD,MAAM,kBAAkB,oBAAoB;GAC5C,MAAM,eAAe,UAAU;AAG/B,OAAI,eAAe,kBAAkB,IAAM,QAAO;GAGlD,MAAM,WAAW,UAAU,MAAM,gBAAgB;AAGjD,uBAAoB,UAAU;GAG9B,IAAI,QADW,MAAM,OAAO,QAAQ,WAAW,SAAS,EACtC,KAAK,MAAM;AAG7B,OAAI,SAAS,mBAAmB,SAAS,mBAAmB,SAAS,gBACnE,QAAO;AAGT,OAAI,QAAQ,WAAW,SAAS;AAC9B,sBAAkB,KAAK;AACvB,cAAU,MAAM,SAAS;;AAG3B,UAAO;UACD;AACN,UAAO;;IAGX,CAAC,eAAe,QAAQ,CACzB;AAuSD,QAAO;EACL,gBArSqB,YAAY,YAAY;AAC7C,OAAI,YAAa;AAEjB,OAAI;AAEF,QAAI,aAAa,CAAC,OAAO,SAAS;AAChC,SAAI,CAAC,WACH,eAAc,KAAK;AAGrB,kBAAa,KAAK;KAClB,MAAM,EAAE,eAAe,MAAM,OAAO;KACpC,MAAM,MAAM,IAAI,WAAW,MAAM;AACjC,WAAM,IAAI,KAAK,EACb,aAAa,MAAW;AACtB,UAAI,WAAW,SAAS;OACtB,MAAMD,WAAwB;QAC5B,QACE,EAAE,WAAW,gBACT,gBACA,EAAE,WAAW,UACX,UACA;QACR,SAAS,EAAE;QACX,UAAU,EAAE;QACZ,MAAM,EAAE;QACT;AACD,0BAAmB,SAAS;AAC5B,oBAAa,SAAS;;QAG3B,CAAC;AACF,SAAI,CAAC,WAAW,SAAS;AACvB,UAAI,SAAS;AACb;;AAEF,YAAO,UAAU;AACjB,gBAAW,KAAK;AAChB,kBAAa,MAAM;AACnB,wBAAmB,EAAE,QAAQ,SAAS,CAAC;AACvC,kBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,gBAAW;;IAIb,MAAM,SAAS,MAAM,UAAU,aAAa,aAAa,EACvD,OAAO;KACL,YAAY;KACZ,cAAc;KACd,kBAAkB;KAClB,kBAAkB;KACnB,EACF,CAAC;AAEF,cAAU,UAAU;AACpB,mBAAe,UAAU,EAAE;AAC3B,qBAAiB,UAAU,EAAE;AAC7B,sBAAkB,UAAU;AAC5B,wBAAoB,UAAU;AAC9B,kBAAc,GAAG;AACjB,sBAAkB,GAAG;AACrB,kBAAc,EAAE;IAEhB,MAAM,gBAAgB,IAAI,cAAc,OAAO;AAC/C,qBAAiB,UAAU;AAE3B,kBAAc,mBAAmB,UAAU;AACzC,SAAI,MAAM,KAAK,OAAO,GAAG;AACvB,qBAAe,QAAQ,KAAK,MAAM,KAAK;AACvC,UAAI,UACF,kBAAiB,QAAQ,KAAK,MAAM,KAAK;;;AAK/C,kBAAc,MAAM,IAAI;AACxB,mBAAe,KAAK;AACpB,aAAS,KAAK;AAGd,QAAI,aAAa,OAAO,SAAS;KAC/B,IAAI,WAAW;KACf,IAAI,iBAAiB;KAIrB,MAAM,mBAAmB,YAAY;AACnC,UAAI,CAAC,kBAAkB,CAAC,WAAW,QACjC;AAMF,UAHmB,iBAAiB,QAAQ,SAG3B,GAAG;AAElB,wBAAiB,UAAU,EAAE;AAE7B,WAAI;AACF,0BAAkB,KAAK;QACvB,MAAM,YAAY,MAAM,gBAAgB,SAAS;AAEjD,YAAI,aAAa,WAAW,SAAS;AACnC;AACA,uBAAc,SAAS;AAGvB,wBAAe,SAAS;UACtB,MAAM,gBAAgB,QAAQ,OAAO,MAAM,MAAM;AACjD,4BAAkB,UAAU;AAC5B,yBAAe,cAAc;AAC7B,iBAAO;WACP;;gBAEG,GAAG;AACV,gBAAQ,MAAM,8CAA8C,EAAE;iBACtD;AACR,YAAI,WAAW,QACb,mBAAkB,MAAM;;;AAM9B,UAAI,kBAAkB,WAAW,QAC/B,sBAAqB,UAAU,WAAW,kBAAkB,cAAc;;AAK9E,0BAAqB,UAAU,WAAW,kBAAkB,cAAc;AAG1E,KAAC,qBAA6B,cAAc;AAC1C,uBAAiB;;;YAGdC,GAAQ;IACf,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,cAAU,OAAO;;KAElB;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EA6IA,eA1IoB,YAAY,YAA6B;AAE7D,OAAK,qBAA6B,MAChC,CAAC,qBAA6B,OAAO;AAEvC,OAAI,qBAAqB,SAAS;AAChC,iBAAa,qBAAqB,QAAQ;AAC1C,yBAAqB,UAAU;;AAGjC,UAAO,IAAI,SAAS,SAAS,WAAW;AACtC,QAAI,CAAC,iBAAiB,WAAW,CAAC,aAAa;AAC7C,4BAAO,IAAI,MAAM,gBAAgB,CAAC;AAClC;;IAGF,MAAM,gBAAgB,iBAAiB;AAEvC,kBAAc,SAAS,YAAY;AAEjC,SAAI,UAAU,SAAS;AACrB,WAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,gBAAU,UAAU;;AAGtB,oBAAe,MAAM;AAGrB,SAAI,WAAW;AAEb,UAAI,eAAe,QAAQ,SAAS,KAAK,oBAAoB,UAAU,GAAG;AACxE,yBAAkB,KAAK;AACvB,wBAAiB,UAAU,EAAE;AAE7B,WAAI;QACF,MAAM,iBAAiB,MAAM,gBAAgB,WAAW;AACxD,YAAI,kBAAkB,WAAW,QAC/B,gBAAe,SAAS;SACtB,MAAM,gBAAgB,QAAQ,OAAO,MAAM,MAAM;AACjD,2BAAkB,UAAU;AAC5B,gBAAO;UACP;iBAEI;AACR,YAAI,WAAW,QACb,mBAAkB,MAAM;;;MAK9B,MAAM,YAAY,kBAAkB;AACpC,qBAAe,UAAU;AACzB,cAAQ,UAAU;AAClB;;KAIF,MAAM,YAAY,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC;AAE1E,SAAI;AAEF,UAAI,CAAC,OAAO,SAAS;AACnB,WAAI,CAAC,WACH,eAAc,KAAK;AAGrB,aAAM,IAAI,SAAe,KAAK,QAAQ;QACpC,MAAM,aAAa,kBAAkB;AACnC,aAAI,OAAO,SAAS;AAClB,wBAAc,WAAW;AACzB,eAAK;;WAEN,IAAI;AACP,yBAAiB;AACf,uBAAc,WAAW;AACzB,6BAAI,IAAI,MAAM,gCAAgC,CAAC;WAC9C,IAAM;SACT;;AAQJ,cADa,MAAM,WAHD,MAAM,cAAc,UAAU,CAGR,CAC3B;cACNA,GAAQ;MACf,MAAM,SAAS,EAAE,WAAW;AAC5B,eAAS,OAAO;AAChB,gBAAU,OAAO;AACjB,aAAO,EAAE;;;AAIb,kBAAc,MAAM;KACpB;KACD;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EA+BA,iBA5BsB,kBAAkB;AAExC,OAAK,qBAA6B,MAChC,CAAC,qBAA6B,OAAO;AAEvC,OAAI,qBAAqB,SAAS;AAChC,iBAAa,qBAAqB,QAAQ;AAC1C,yBAAqB,UAAU;;AAGjC,OAAI,iBAAiB,WAAW,YAC9B,kBAAiB,QAAQ,MAAM;AAEjC,OAAI,UAAU,SAAS;AACrB,SAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,cAAU,UAAU;;AAEtB,kBAAe,UAAU,EAAE;AAC3B,oBAAiB,UAAU,EAAE;AAC7B,uBAAoB,UAAU;AAC9B,kBAAe,MAAM;KACpB,CAAC,YAAY,CAAC;EAMf;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2HH,SAAgB,aAAa,UAA+B,EAAE,EAAsB;CAClF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,oEAAoE;CAGtF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAQrD,MAAM,aAAa,QAAQ,YAAY;CACvC,MAAM,YAAY,WAAW;CAE7B,MAAM,EACJ,WAAW,cACX,WAAW,mBACX,SAAS,+EACT,WAAW,OACX,QAAQ,UAAU,cAClB,QAAQ,GACR,WAAW,OACX,aACA,kBACA,YACE;CAEJ,MAAM,CAAC,UAAU,eAAe,SAA6B,EAAE,CAAC;CAChE,MAAM,CAAC,OAAO,YAAY,SAExB,OAAO;CACT,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,gBAAgB,qBAAqB,SAAiB,GAAG;CAChE,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAG/D,MAAM,eAAe,OAAY,KAAK;CACtC,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,mBAAmB,OAA6B,KAAK;CAC3D,MAAM,iBAAiB,OAAe,EAAE,CAAC;CACzC,MAAM,YAAY,OAA2B,KAAK;CAClD,MAAM,kBAAkB,OAA4B,KAAK;CACzD,MAAM,gBAAgB,OAAqC,KAAK;CAChE,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,eAAe,OAAgB,MAAM;CAG3C,MAAM,cAAc,UAAU;CAC9B,MAAM,eAAe,UAAU,kBAAkB,UAAU;CAC3D,MAAM,aAAa,UAAU;AAG7B,iBAAgB;AACd,MAAI,CAAC,cAAc,QAAS;EAE5B,IAAI,YAAY;EAEhB,MAAM,aAAa,YAAY;AAC7B,OAAI;AACF,iBAAa,KAAK;AAClB,aAAS,KAAK;AAGd,sBAAkB,0CAA0C;IAC5D,MAAM,EAAE,eAAe,MAAM,OAAO;AACpC,QAAI,aAAa,CAAC,WAAW,QAAS;IAEtC,MAAM,MAAM,IAAI,WAAW,SAAS;AACpC,UAAM,IAAI,KAAK,EACb,aAAa,MAAW;AACtB,SAAI,CAAC,WAAW,QAAS;AACzB,uBAAkB,EAAE,UAAU,iBAAiB;OAElD,CAAC;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,SAAI,SAAS;AACb;;AAEF,WAAO,UAAU;AAGjB,sBAAkB,4BAA4B;IAC9C,MAAM,SAAS,MAAM,mBAAmB;KACtC,SAAS;KACT,aAAa,MAAM;AACjB,UAAI,CAAC,WAAW,QAAS;AACzB,wBAAkB,EAAE,WAAW,iBAAiB;;KAEnD,CAAC;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,YAAO,WAAW;AAClB;;AAEF,iBAAa,UAAU;AAIvB,sBAAkB,2BADG,eAAe,mBACwB,eAAe,SAAS,MAAM;IAE1F,MAAM,EAAE,cAAc,MAAM,OAAO;AACnC,QAAI,aAAa,CAAC,WAAW,QAAS;IAEtC,MAAM,MAAM,UAAU,WAAW;AACjC,UAAM,IAAI,KAAK,EACb,aAAa,MAAW;AACtB,SAAI,CAAC,WAAW,QAAS;AACzB,uBAAkB,EAAE,UAAU,iBAAiB;OAElD,CAAC;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,WAAM,IAAI,SAAS;AACnB;;AAEF,WAAO,UAAU;AAEjB,eAAW,KAAK;AAChB,iBAAa,MAAM;AACnB,sBAAkB,SAAS;YACpBA,GAAQ;AACf,QAAI,CAAC,WAAW,QAAS;IACzB,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,iBAAa,MAAM;AACnB,cAAU,OAAO;;;AAIrB,cAAY;AAEZ,eAAa;AACX,eAAY;;IAEb;EAAC;EAAY;EAAS;EAAU;EAAU;EAAY;EAAQ,CAAC;AAGlE,iBAAgB;AACd,aAAW,UAAU;AACrB,eAAa;AACX,cAAW,UAAU;AACrB,gBAAa,SAAS,WAAW;AACjC,UAAO,SAAS,SAAS;AACzB,UAAO,SAAS,SAAS;AACzB,OAAI,UAAU,QACZ,MAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAGhB,mBAAgB,SAAS,OAAO;;IAEjC,EAAE,CAAC;CAGN,MAAM,OAAO,kBAAkB;AAC7B,MAAI,CAAC,cAAc,CAAC,WAAW,CAAC,UAC9B,eAAc,KAAK;IAEpB;EAAC;EAAY;EAAS;EAAU,CAAC;CAGpC,MAAM,gBAAgB,YAAY,OAAO,SAAsC;EAC7E,MAAM,eAAe,IAAI,aAAa,EAAE,YAAY,MAAO,CAAC;EAC5D,MAAM,cAAc,MAAM,KAAK,aAAa;EAC5C,MAAM,cAAc,MAAM,aAAa,gBAAgB,YAAY;EACnE,MAAM,cAAc,YAAY,eAAe,EAAE;AAEjD,MAAI,YAAY,eAAe,MAAO;GACpC,MAAM,QAAQ,OAAQ,YAAY;GAClC,MAAM,YAAY,KAAK,MAAM,YAAY,SAAS,MAAM;GACxD,MAAM,YAAY,IAAI,aAAa,UAAU;AAC7C,QAAK,IAAI,IAAI,GAAG,IAAI,WAAW,KAAK;IAClC,MAAM,WAAW,IAAI;IACrB,MAAM,QAAQ,KAAK,MAAM,SAAS;IAClC,MAAM,OAAO,KAAK,IAAI,QAAQ,GAAG,YAAY,SAAS,EAAE;IACxD,MAAM,IAAI,WAAW;AACrB,cAAU,KAAK,YAAY,UAAU,IAAI,KAAK,YAAY,QAAQ;;AAEpE,gBAAa,OAAO;AACpB,UAAO;;AAGT,eAAa,OAAO;AACpB,SAAO,IAAI,aAAa,YAAY;IACnC,EAAE,CAAC;CAGN,MAAM,kBAAkB,YACtB,OAAO,OAAqB,eAAsC;AAChE,SAAO,IAAI,SAAS,YAAY;AAC9B,OAAI,CAAC,gBAAgB,QACnB,iBAAgB,UAAU,IAAI,cAAc;GAE9C,MAAM,MAAM,gBAAgB;GAE5B,MAAM,SAAS,IAAI,aAAa,GAAG,MAAM,QAAQ,WAAW;GAC5D,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,UAAO,cAAc,aAAa,EAAE;GAEpC,MAAM,SAAS,IAAI,oBAAoB;AACvC,UAAO,SAAS;AAChB,UAAO,QAAQ,IAAI,YAAY;AAC/B,UAAO,gBAAgB;AACrB,QAAI,WAAW,QACb,UAAS;;AAGb,UAAO,OAAO;AACd,iBAAc,UAAU;IACxB;IAEJ,EAAE,CACH;AA+MD,QAAO;EACL;EACA,gBA9MqB,YAAY,YAAY;AAC7C,OAAI,UAAU,OAAQ;AAGtB,OAAI,CAAC,WAAW,CAAC,WAAW;AAC1B,kBAAc,KAAK;AACnB;;AAGF,gBAAa,UAAU;AAEvB,OAAI;IACF,MAAM,SAAS,MAAM,UAAU,aAAa,aAAa,EACvD,OAAO;KAAE,YAAY;KAAO,cAAc;KAAG,kBAAkB;KAAM,EACtE,CAAC;AAEF,cAAU,UAAU;AACpB,mBAAe,UAAU,EAAE;IAE3B,MAAM,gBAAgB,IAAI,cAAc,OAAO;AAC/C,qBAAiB,UAAU;AAE3B,kBAAc,mBAAmB,UAAU;AACzC,SAAI,MAAM,KAAK,OAAO,EACpB,gBAAe,QAAQ,KAAK,MAAM,KAAK;;AAI3C,kBAAc,MAAM,IAAI;AACxB,aAAS,YAAY;AACrB,aAAS,KAAK;YACPA,GAAQ;IACf,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,cAAU,OAAO;;KAElB;GAAC;GAAO;GAAS;GAAW;GAAQ,CAAC;EA2KtC,eAxKoB,YAAY,YAAY;AAC5C,OAAI,UAAU,YAAa;GAE3B,MAAM,gBAAgB,iBAAiB;AACvC,OAAI,CAAC,cAAe;AAEpB,UAAO,IAAI,SAAe,YAAY;AACpC,kBAAc,SAAS,YAAY;AAEjC,SAAI,UAAU,SAAS;AACrB,WAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,gBAAU,UAAU;;AAGtB,SAAI,aAAa,SAAS;AACxB,eAAS,OAAO;AAChB,eAAS;AACT;;KAGF,MAAM,YAAY,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC;AAE1E,SAAI;AAEF,eAAS,eAAe;MACxB,MAAM,YAAY,MAAM,cAAc,UAAU;MAEhD,IAAI,YADc,MAAM,OAAO,QAAQ,WAAW,UAAU,EACnC,KAAK,MAAM;AAGpC,UACE,aAAa,mBACb,aAAa,mBACb,aAAa,gBAEb,YAAW;AAGb,UAAI,aAAa,WAAW,CAAC,UAAU;AACrC,gBAAS,OAAO;AAChB,gBAAS;AACT;;MAIF,MAAM,YAAY,QAAQ,KAAK,KAAK;AACpC,mBAAa,MAAM,CAAC,GAAG,GAAG;OAAE,IAAI;OAAW,MAAM;OAAQ,SAAS;OAAU,CAAC,CAAC;AAC9E,oBAAc,SAAS;AAGvB,eAAS,WAAW;MAGpB,MAAM,UAAU,SAAS,KAAK,OAAO;OACnC,MAAM,EAAE;OACR,SAAS,EAAE;OACZ,EAAE;AACH,cAAQ,KAAK;OAAE,MAAM;OAAQ,SAAS;OAAU,CAAC;MAEjD,IAAI,eAAe;MACnB,IAAI,eAAe;AAEnB,YAAM,aAAa,QAAQ,SAAS,UAAU;OAC5C;OACA;OACA;OACA,UAAU,UAAuB;AAC/B,YAAI,aAAa,QAAS;AAC1B,YAAI,MAAM,UAAU,WAClB,iBAAgB,MAAM;YAEtB,iBAAgB,MAAM;;OAG3B,CAAC;AAEF,UAAI,aAAa,SAAS;AACxB,gBAAS,OAAO;AAChB,gBAAS;AACT;;MAIF,MAAM,iBAAiB,aAAa,KAAK,KAAK;AAC9C,mBAAa,MAAM,CACjB,GAAG,GACH;OACE,IAAI;OACJ,MAAM;OACN,SAAS;OACT,UAAU,gBAAgB;OAC3B,CACF,CAAC;AACF,yBAAmB,aAAa;AAGhC,UAAI,aAAa,MAAM,EAAE;AACvB,gBAAS,WAAW;OACpB,MAAM,YAAY,MAAM,OAAO,QAAQ,MAAM,cAAc;QAAE;QAAO;QAAO,CAAC;AAE5E,WAAI,CAAC,aAAa,QAChB,OAAM,gBAAgB,UAAU,OAAO,UAAU,WAAW;;AAIhE,eAAS,OAAO;AAChB,eAAS;cACFA,GAAQ;AACf,UAAI,CAAC,WAAW,QAAS;MACzB,MAAM,SAAS,EAAE,WAAW;AAC5B,eAAS,OAAO;AAChB,eAAS,OAAO;AAChB,gBAAU,OAAO;AACjB,eAAS;;;AAIb,kBAAc,MAAM;KACpB;KACD;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EAoCA,QAjCa,kBAAkB;AAC/B,gBAAa,UAAU;AAEvB,OAAI,iBAAiB,WAAW,UAAU,YACxC,kBAAiB,QAAQ,MAAM;AAGjC,OAAI,UAAU,SAAS;AACrB,SAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,cAAU,UAAU;;AAGtB,OAAI,cAAc,QAChB,KAAI;AACF,kBAAc,QAAQ,MAAM;WACtB;AAGV,kBAAe,UAAU,EAAE;AAC3B,YAAS,OAAO;KACf,CAAC,MAAM,CAAC;EAYT,OATY,kBAAkB;AAC9B,eAAY,EAAE,CAAC;KACd,EAAE,CAAC;EAQJ;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;AAUH,SAAgB,oBAA6B;AAC3C,KAAI,OAAO,cAAc,YACvB,QAAO;AAET,QAAO,SAAS;;;;;AAMlB,eAAsB,gBAIZ;AACR,KAAI,CAAC,mBAAmB,CACtB,QAAO,EAAE,WAAW,OAAO;AAG7B,KAAI;EACF,MAAM,UAAU,MAAO,UAAkB,IAAI,gBAAgB;AAC7D,MAAI,CAAC,QACH,QAAO,EAAE,WAAW,OAAO;EAG7B,MAAM,OAAO,MAAM,QAAQ,oBAAoB;AAC/C,SAAO;GACL,WAAW;GACX,SAAS,KAAK;GACd,QAAQ,KAAK;GACd;SACK;AACN,SAAO,EAAE,WAAW,OAAO;;;AAI/B,sBAAe;CACb;CACA;CACA;CACA;CACA;CACD"}
|
package/dist/gerbil-DeQlX_Mt.mjs
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"gerbil-yoSpRHgv.mjs","names":["globalCache: ResponseCache | null","pipeline","rawPipeline","KOKORO_VOICES_DEFAULT: VoiceInfo[]","isBrowser","tfDevice: \"webgpu\" | \"wasm\" | \"cpu\"","chromeErr: any","result","result: GenerateResult","tokenQueue: string[]","resolveNext: ((value: string | null) => void) | null","content: Array<{ type: string; text?: string }>","text","results: EmbedResult[]","messages: Array<{ role: string; content: string }>"],"sources":["../src/core/cache.ts","../src/core/gerbil.ts"],"sourcesContent":["/**\n * Response Cache for Gerbil\n *\n * LRU cache with TTL expiration for caching inference responses.\n * Enables instant responses for repeated prompts.\n */\n\nimport type { GenerateResult } from \"./types.js\";\n\n// ============================================\n// Types\n// ============================================\n\ntype CacheEntry = {\n result: GenerateResult;\n createdAt: number;\n ttl: number;\n};\n\ntype CacheStats = {\n hits: number;\n misses: number;\n size: number;\n maxSize: number;\n};\n\n// ============================================\n// Cache Key Generation\n// ============================================\n\n/**\n * Generate a deterministic cache key from prompt and options.\n * Key includes all parameters that affect the output.\n */\nexport function generateCacheKey(\n prompt: string,\n modelId: string,\n options: {\n maxTokens?: number;\n temperature?: number;\n topP?: number;\n topK?: number;\n system?: string;\n thinking?: boolean;\n },\n): string {\n const keyParts = [\n prompt,\n modelId,\n options.maxTokens ?? 256,\n options.temperature ?? 0.7,\n options.topP ?? 0.9,\n options.topK ?? 50,\n options.system ?? \"\",\n options.thinking ?? false,\n ];\n\n // Simple hash function for cache key\n const str = JSON.stringify(keyParts);\n let hash = 0;\n for (let i = 0; i < str.length; i++) {\n const char = str.charCodeAt(i);\n hash = (hash << 5) - hash + char;\n hash = hash & hash; // Convert to 32bit integer\n }\n return `gerbil:${hash.toString(16)}`;\n}\n\n// ============================================\n// Response Cache\n// ============================================\n\n/**\n * LRU cache with TTL expiration for inference responses.\n */\nexport class ResponseCache {\n private cache: Map<string, CacheEntry> = new Map();\n private maxSize: number;\n private defaultTtl: number;\n private hits = 0;\n private misses = 0;\n\n /**\n * Create a new response cache.\n * @param maxSize Maximum number of entries (default: 100)\n * @param defaultTtl Default TTL in ms (default: 5 minutes)\n */\n constructor(maxSize = 100, defaultTtl = 5 * 60 * 1000) {\n this.maxSize = maxSize;\n this.defaultTtl = defaultTtl;\n }\n\n /**\n * Get a cached response if it exists and hasn't expired.\n */\n get(key: string): GenerateResult | null {\n const entry = this.cache.get(key);\n\n if (!entry) {\n this.misses++;\n return null;\n }\n\n // Check if expired\n if (Date.now() - entry.createdAt > entry.ttl) {\n this.cache.delete(key);\n this.misses++;\n return null;\n }\n\n // Move to end for LRU (delete and re-add)\n this.cache.delete(key);\n this.cache.set(key, entry);\n\n this.hits++;\n return { ...entry.result, cached: true };\n }\n\n /**\n * Store a response in the cache.\n */\n set(key: string, result: GenerateResult, ttl?: number): void {\n // Evict oldest entries if at capacity\n while (this.cache.size >= this.maxSize) {\n const firstKey = this.cache.keys().next().value;\n if (firstKey) {\n this.cache.delete(firstKey);\n }\n }\n\n this.cache.set(key, {\n result,\n createdAt: Date.now(),\n ttl: ttl ?? this.defaultTtl,\n });\n }\n\n /**\n * Check if a key exists and is not expired.\n */\n has(key: string): boolean {\n const entry = this.cache.get(key);\n if (!entry) return false;\n\n if (Date.now() - entry.createdAt > entry.ttl) {\n this.cache.delete(key);\n return false;\n }\n\n return true;\n }\n\n /**\n * Remove a specific key from the cache.\n */\n delete(key: string): boolean {\n return this.cache.delete(key);\n }\n\n /**\n * Clear all entries from the cache.\n */\n clear(): void {\n this.cache.clear();\n this.hits = 0;\n this.misses = 0;\n }\n\n /**\n * Remove all expired entries.\n */\n prune(): number {\n const now = Date.now();\n let pruned = 0;\n\n for (const [key, entry] of this.cache) {\n if (now - entry.createdAt > entry.ttl) {\n this.cache.delete(key);\n pruned++;\n }\n }\n\n return pruned;\n }\n\n /**\n * Get cache statistics.\n */\n getStats(): CacheStats {\n return {\n hits: this.hits,\n misses: this.misses,\n size: this.cache.size,\n maxSize: this.maxSize,\n };\n }\n\n /**\n * Get hit rate as a percentage.\n */\n getHitRate(): number {\n const total = this.hits + this.misses;\n if (total === 0) return 0;\n return (this.hits / total) * 100;\n }\n}\n\n// ============================================\n// Global Cache Instance\n// ============================================\n\nlet globalCache: ResponseCache | null = null;\n\n/**\n * Get the global response cache instance.\n * Creates one if it doesn't exist.\n */\nexport function getGlobalCache(): ResponseCache {\n if (!globalCache) {\n globalCache = new ResponseCache();\n }\n return globalCache;\n}\n\n/**\n * Configure the global cache with custom settings.\n */\nexport function configureGlobalCache(maxSize?: number, defaultTtl?: number): ResponseCache {\n globalCache = new ResponseCache(maxSize, defaultTtl);\n return globalCache;\n}\n\n/**\n * Clear and reset the global cache.\n */\nexport function clearGlobalCache(): void {\n if (globalCache) {\n globalCache.clear();\n }\n}\n","/**\n * Gerbil - Local GPU-accelerated LLM inference\n */\n\nimport {\n AutoModelForCausalLM,\n AutoModelForImageTextToText,\n AutoProcessor,\n AutoTokenizer,\n env,\n type FeatureExtractionPipeline,\n type PreTrainedTokenizer,\n RawImage,\n pipeline as rawPipeline,\n type TextGenerationPipeline,\n TextStreamer,\n} from \"@huggingface/transformers\";\n\n// Wrapper to avoid TypeScript complexity issues with transformers.js types\nconst pipeline = rawPipeline as (task: string, model: string, options?: any) => Promise<any>;\n\n// Suppress noisy transformers.js warnings during model loading\nfunction suppressNoisyWarnings<T>(fn: () => Promise<T>): Promise<T> {\n const originalWarn = console.warn;\n console.warn = (...args: any[]) => {\n const msg = args[0]?.toString?.() || \"\";\n // Suppress \"Unable to determine content-length\" warnings from transformers.js\n if (msg.includes(\"content-length\") || msg.includes(\"Unable to determine\")) {\n return;\n }\n originalWarn.apply(console, args);\n };\n\n return fn().finally(() => {\n console.warn = originalWarn;\n });\n}\n\nimport { generateCacheKey, getGlobalCache } from \"./cache.js\";\nimport {\n BUILTIN_MODELS,\n createExternalModelConfig,\n fetchModelContextLength,\n getModelConfig,\n resolveModel,\n} from \"./models.js\";\nimport type {\n AudioChunk,\n EmbedOptions,\n EmbedResult,\n GenerateOptions,\n GenerateResult,\n GerbilConfig,\n JsonOptions,\n LoadOptions,\n LoadSTTOptions,\n LoadTTSOptions,\n ModelConfig,\n SessionStats,\n SpeakOptions,\n SpeakResult,\n STTModelConfig,\n StreamingTranscriptionOptions,\n StreamingTranscriptionSession,\n SystemInfo,\n TranscribeOptions,\n TranscribeResult,\n VoiceInfo,\n} from \"./types.js\";\n\n// TTS types for lazy loading\ntype KokoroTTSType = import(\"./tts.js\").KokoroTTS;\ntype SupertonicTTSType = import(\"./tts.js\").SupertonicTTS;\ntype TTSBackendType = KokoroTTSType | SupertonicTTSType;\n\n// STT type for lazy loading\ntype WhisperSTTType = import(\"./stt.js\").WhisperSTT;\n\n// Default voices for listVoices() when TTS not loaded\nconst KOKORO_VOICES_DEFAULT: VoiceInfo[] = [\n {\n id: \"af_bella\",\n name: \"Bella\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, warm and friendly\",\n },\n {\n id: \"af_sarah\",\n name: \"Sarah\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, clear and professional\",\n },\n {\n id: \"af_nicole\",\n name: \"Nicole\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, soft and gentle\",\n },\n {\n id: \"af_sky\",\n name: \"Sky\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, young and energetic\",\n },\n {\n id: \"am_adam\",\n name: \"Adam\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, deep and confident\",\n },\n {\n id: \"am_michael\",\n name: \"Michael\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, warm and friendly\",\n },\n {\n id: \"bf_emma\",\n name: \"Emma\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, elegant and clear\",\n },\n {\n id: \"bf_isabella\",\n name: \"Isabella\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, sophisticated\",\n },\n {\n id: \"bm_george\",\n name: \"George\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, distinguished\",\n },\n {\n id: \"bm_lewis\",\n name: \"Lewis\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, friendly and warm\",\n },\n];\n\nimport { extractJson, zodToJsonSchema } from \"./utils.js\";\n\n// Configure transformers.js based on environment\nconst isBrowser = typeof window !== \"undefined\";\nenv.allowLocalModels = !isBrowser; // false in browser (fetch from HuggingFace)\nenv.useBrowserCache = isBrowser; // true in browser (cache in IndexedDB)\n\n// ============================================\n// Gerbil Class\n// ============================================\n\n// WebGPU initialization state for Node.js\nlet webgpuInitialized = false;\nlet webgpuAvailable = false;\n\n/**\n * Initialize WebGPU for Node.js environments\n * Called automatically before model loading\n */\nasync function initNodeWebGPU(): Promise<boolean> {\n if (webgpuInitialized) {\n return webgpuAvailable;\n }\n webgpuInitialized = true;\n\n // Skip if in browser (already has WebGPU)\n if (typeof window !== \"undefined\") {\n webgpuAvailable = \"gpu\" in navigator;\n return webgpuAvailable;\n }\n\n // Try to initialize WebGPU in Node.js via Dawn\n // Use Function constructor to hide import from bundlers\n try {\n const dynamicImport = new Function(\"specifier\", \"return import(specifier)\");\n const webgpuModule = await dynamicImport(\"webgpu\");\n const { create, globals } = webgpuModule;\n\n // Extend globalThis with WebGPU globals\n Object.assign(globalThis, globals);\n\n // Create navigator.gpu\n if (!(globalThis as any).navigator) {\n (globalThis as any).navigator = {};\n }\n (globalThis as any).navigator.gpu = create([]);\n\n webgpuAvailable = true;\n } catch {\n // WebGPU not available, will fall back to CPU\n webgpuAvailable = false;\n }\n\n return webgpuAvailable;\n}\n\n// ChromeGPUBackend is dynamically imported only in Node.js to avoid bundling puppeteer in browser\ntype ChromeGPUBackendType = import(\"./chrome-backend.js\").ChromeGPUBackend;\n\nexport class Gerbil {\n private generator: TextGenerationPipeline | null = null;\n private tokenizer: PreTrainedTokenizer | null = null;\n private model: any = null; // AutoModelForCausalLM instance\n private embedder: FeatureExtractionPipeline | null = null;\n private currentModel: string | null = null;\n private modelConfig: ModelConfig | null = null;\n private readonly config: GerbilConfig;\n private stats: SessionStats;\n private useDirect = false; // Use direct model loading (for WebGPU)\n private chromeBackend: ChromeGPUBackendType | null = null; // Chrome backend for Node.js WebGPU\n private _deviceMode: \"webgpu\" | \"cpu\" | \"wasm\" = \"cpu\"; // Track which backend is active\n\n // Vision model components\n private processor: any = null; // AutoProcessor for vision models\n private visionModel: any = null; // AutoModelForImageTextToText instance\n private isVisionModel = false; // Whether current model supports vision\n\n constructor(config: GerbilConfig = {}) {\n this.config = config;\n this.stats = {\n prompts: 0,\n tokensIn: 0,\n tokensOut: 0,\n avgSpeed: 0,\n totalTime: 0,\n cacheHits: 0,\n cacheMisses: 0,\n };\n }\n\n // ============================================\n // Static Methods\n // ============================================\n\n static listModels(): ModelConfig[] {\n return Object.values(BUILTIN_MODELS);\n }\n\n static getModel(modelId: string): ModelConfig | undefined {\n return BUILTIN_MODELS[modelId];\n }\n\n // ============================================\n // Model Loading\n // ============================================\n\n /**\n * Load a model\n *\n * @example\n * ```ts\n * // Built-in model\n * await g.loadModel(\"qwen3-0.6b\");\n *\n * // HuggingFace model\n * await g.loadModel(\"hf:microsoft/Phi-3-mini\");\n *\n * // Local model\n * await g.loadModel(\"file:./models/my-model\");\n *\n * // Vision model\n * await g.loadModel(\"ministral-3b\");\n * ```\n */\n async loadModel(modelId = \"qwen3-0.6b\", options: LoadOptions = {}): Promise<void> {\n // Dispose any existing model/backend before loading a new one\n // This prevents zombie Chrome pages when switching models\n if (this.isLoaded()) {\n await this.dispose();\n }\n\n // Initialize WebGPU for Node.js if needed\n await initNodeWebGPU();\n\n const source = resolveModel(modelId);\n const { onProgress, device = \"auto\", dtype: userDtype } = options;\n\n // Get or create model config\n let config = getModelConfig(modelId);\n if (!config) {\n // Try to fetch actual context length from HuggingFace config.json\n const contextLength = await fetchModelContextLength(source.path).catch(() => null);\n config = createExternalModelConfig(modelId, source.path, contextLength || undefined);\n }\n\n // Route to vision model loading if needed\n if (config.supportsVision) {\n return this.loadVisionModel(modelId, source.path, config, options);\n }\n\n onProgress?.({ status: `Loading ${modelId}...` });\n\n // Map device to transformers.js device\n // Browser supports: webgpu, wasm (no cpu)\n // Node supports: webgpu, cpu\n const isBrowser = typeof window !== \"undefined\";\n const fallbackDevice = isBrowser ? \"wasm\" : \"cpu\";\n let tfDevice: \"webgpu\" | \"wasm\" | \"cpu\" = fallbackDevice;\n if (device === \"webgpu\" || device === \"gpu\" || device === \"auto\") {\n tfDevice = \"webgpu\";\n }\n\n // Use q4f16 for WebGPU (required for Qwen3), q4 for CPU/WASM\n const dtype = userDtype ?? (tfDevice === \"webgpu\" ? \"q4f16\" : \"q4\");\n\n // Track if we're still in loading phase (to suppress progress during inference)\n let isLoading = true;\n let lastFile = \"\";\n let lastPct = -1;\n\n const progressCallback = (progress: any) => {\n if (!isLoading) {\n return; // Suppress progress after initial load\n }\n\n if (progress.status === \"progress\" && progress.file) {\n const pct = Math.round(progress.progress || 0);\n // Only report if file changed or progress increased significantly\n if (progress.file !== lastFile || pct >= lastPct + 5) {\n lastFile = progress.file;\n lastPct = pct;\n onProgress?.({\n status: `Downloading ${progress.file}`,\n progress: pct,\n file: progress.file,\n });\n }\n }\n };\n\n try {\n // Use direct model loading for browser WebGPU (like qwen-web does)\n // This bypasses pipeline() which may have different ONNX session config\n if (isBrowser && tfDevice === \"webgpu\") {\n onProgress?.({ status: \"Loading tokenizer...\" });\n this.tokenizer = (await suppressNoisyWarnings(() =>\n AutoTokenizer.from_pretrained(source.path, {\n progress_callback: progressCallback,\n }),\n )) as PreTrainedTokenizer;\n\n onProgress?.({ status: \"Loading model...\" });\n this.model = await suppressNoisyWarnings(() =>\n AutoModelForCausalLM.from_pretrained(source.path, {\n dtype,\n device: tfDevice,\n progress_callback: progressCallback,\n }),\n );\n\n this.useDirect = true;\n this._deviceMode = \"webgpu\";\n this.isVisionModel = false;\n isLoading = false;\n this.currentModel = modelId;\n this.modelConfig = config;\n onProgress?.({ status: \"Ready (WebGPU)!\" });\n } else if (!isBrowser && tfDevice === \"webgpu\") {\n // Node.js + WebGPU: Use Chrome backend for real GPU acceleration\n onProgress?.({ status: \"Starting Chrome WebGPU backend...\" });\n\n // Dynamic import to avoid bundling puppeteer in browser builds\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n this.chromeBackend = await ChromeGPUBackend.create({\n modelId: source.path,\n contextLength: config.contextLength,\n onProgress,\n });\n\n this.useDirect = false;\n this._deviceMode = \"webgpu\";\n this.isVisionModel = false;\n isLoading = false;\n this.currentModel = modelId;\n this.modelConfig = config;\n // Ready status is set by ChromeGPUBackend\n } else {\n // Use pipeline for CPU / WASM\n const pipelineOptions = {\n dtype,\n device: tfDevice,\n progress_callback: progressCallback,\n };\n this.generator = (await suppressNoisyWarnings(() =>\n pipeline(\"text-generation\", source.path, pipelineOptions as any),\n )) as TextGenerationPipeline;\n\n this.useDirect = false;\n this._deviceMode = tfDevice as \"cpu\" | \"wasm\";\n this.isVisionModel = false;\n isLoading = false;\n this.currentModel = modelId;\n this.modelConfig = config;\n onProgress?.({ status: `Ready (${tfDevice.toUpperCase()})!` });\n }\n } catch (err) {\n // Fallback to CPU/WASM if GPU fails (silently)\n if (tfDevice !== fallbackDevice) {\n onProgress?.({ status: `Using ${fallbackDevice.toUpperCase()}...` });\n\n // Clean up Chrome backend if it was partially initialized\n if (this.chromeBackend) {\n await this.chromeBackend.dispose();\n this.chromeBackend = null;\n }\n\n // Fallback always uses pipeline (WASM/CPU don't need direct loading)\n this.generator = (await suppressNoisyWarnings(() =>\n pipeline(\"text-generation\", source.path, {\n dtype: \"q4\",\n device: fallbackDevice,\n progress_callback: progressCallback,\n } as any),\n )) as TextGenerationPipeline;\n\n this.useDirect = false;\n this._deviceMode = fallbackDevice as \"cpu\" | \"wasm\";\n this.isVisionModel = false;\n isLoading = false;\n this.currentModel = modelId;\n this.modelConfig = config;\n onProgress?.({ status: `Ready (${fallbackDevice.toUpperCase()})!` });\n } else {\n throw err;\n }\n }\n }\n\n /**\n * Load a vision model (VLM)\n * Uses AutoProcessor + AutoModelForImageTextToText instead of tokenizer + causal LM\n */\n private async loadVisionModel(\n modelId: string,\n repoPath: string,\n config: ModelConfig,\n options: LoadOptions = {},\n ): Promise<void> {\n const { onProgress, device = \"auto\" } = options;\n\n onProgress?.({ status: `Loading ${modelId} (vision model)...` });\n\n const isBrowser = typeof window !== \"undefined\";\n const fallbackDevice = isBrowser ? \"wasm\" : \"cpu\";\n let tfDevice: \"webgpu\" | \"wasm\" | \"cpu\" = fallbackDevice;\n if (device === \"webgpu\" || device === \"gpu\" || device === \"auto\") {\n tfDevice = \"webgpu\";\n }\n\n // Node.js + WebGPU: Use Chrome backend for GPU acceleration\n if (!isBrowser && tfDevice === \"webgpu\") {\n onProgress?.({ status: \"Starting Chrome WebGPU backend (vision)...\" });\n\n // Dynamic import to avoid bundling puppeteer in browser builds\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n this.chromeBackend = await ChromeGPUBackend.create({\n modelId: repoPath,\n contextLength: config.contextLength,\n isVision: true, // Enable vision mode in Chrome backend\n onProgress,\n });\n\n this.useDirect = false;\n this._deviceMode = \"webgpu\";\n this.isVisionModel = true;\n this.currentModel = modelId;\n this.modelConfig = config;\n // Ready status is set by ChromeGPUBackend\n return;\n }\n\n // Browser or CPU/WASM: Load directly\n let lastFile = \"\";\n let lastPct = -1;\n\n const progressCallback = (progress: any) => {\n if (progress.status === \"progress\" && progress.file) {\n const pct = Math.round(progress.progress || 0);\n if (progress.file !== lastFile || pct >= lastPct + 5) {\n lastFile = progress.file;\n lastPct = pct;\n onProgress?.({\n status: `Downloading ${progress.file}`,\n progress: pct,\n file: progress.file,\n });\n }\n }\n };\n\n try {\n // Load processor (handles both tokenization and image preprocessing)\n onProgress?.({ status: \"Loading processor...\" });\n this.processor = await suppressNoisyWarnings(() =>\n AutoProcessor.from_pretrained(repoPath, {\n progress_callback: progressCallback,\n }),\n );\n\n // Load vision model\n onProgress?.({ status: \"Loading vision model...\" });\n this.visionModel = await suppressNoisyWarnings(() =>\n AutoModelForImageTextToText.from_pretrained(repoPath, {\n device: tfDevice,\n progress_callback: progressCallback,\n }),\n );\n\n this.isVisionModel = true;\n this.useDirect = true;\n this._deviceMode = tfDevice === \"webgpu\" ? \"webgpu\" : (tfDevice as \"cpu\" | \"wasm\");\n this.currentModel = modelId;\n this.modelConfig = config;\n onProgress?.({ status: `Ready (Vision, ${tfDevice.toUpperCase()})!` });\n } catch (err) {\n // Fallback to CPU/WASM if GPU fails\n if (tfDevice !== fallbackDevice) {\n onProgress?.({ status: `Vision model: Using ${fallbackDevice.toUpperCase()}...` });\n\n this.processor = await suppressNoisyWarnings(() =>\n AutoProcessor.from_pretrained(repoPath, {\n progress_callback: progressCallback,\n }),\n );\n\n this.visionModel = await suppressNoisyWarnings(() =>\n AutoModelForImageTextToText.from_pretrained(repoPath, {\n device: fallbackDevice,\n progress_callback: progressCallback,\n }),\n );\n\n this.isVisionModel = true;\n this.useDirect = true;\n this._deviceMode = fallbackDevice as \"cpu\" | \"wasm\";\n this.currentModel = modelId;\n this.modelConfig = config;\n onProgress?.({ status: `Ready (Vision, ${fallbackDevice.toUpperCase()})!` });\n } else {\n throw err;\n }\n }\n }\n\n /**\n * Check if a model is loaded\n */\n isLoaded(): boolean {\n return (\n this.generator !== null ||\n (this.useDirect && this.model !== null) ||\n this.chromeBackend !== null ||\n (this.isVisionModel && this.visionModel !== null)\n );\n }\n\n /**\n * Check if current model supports vision\n */\n supportsVision(): boolean {\n return this.isVisionModel && this.modelConfig?.supportsVision === true;\n }\n\n /**\n * Get current model info\n */\n getModelInfo(): ModelConfig | null {\n return this.modelConfig;\n }\n\n /**\n * Get current device mode (webgpu, cpu, or wasm)\n */\n getDeviceMode(): \"webgpu\" | \"cpu\" | \"wasm\" {\n return this._deviceMode;\n }\n\n /**\n * Get dtype used for current model\n */\n getDtype(): string {\n // WebGPU uses q4f16, CPU/WASM use q4\n return this._deviceMode === \"webgpu\" ? \"q4f16\" : \"q4\";\n }\n\n /**\n * Get response cache statistics\n */\n getResponseCacheStats(): { hits: number; misses: number; size: number; hitRate: number } {\n const cache = getGlobalCache();\n const stats = cache.getStats();\n return {\n hits: stats.hits,\n misses: stats.misses,\n size: stats.size,\n hitRate: cache.getHitRate(),\n };\n }\n\n /**\n * Clear the response cache (for cached generate() results)\n */\n clearResponseCache(): void {\n getGlobalCache().clear();\n }\n\n /**\n * Get Chrome backend status (if using WebGPU via Chrome)\n */\n getChromeStatus(): {\n pid: number | null;\n port: number;\n modelId: string;\n startedAt: Date | null;\n } | null {\n if (!this.chromeBackend) {\n return null;\n }\n return this.chromeBackend.getStatus();\n }\n\n /**\n * Get Chrome memory usage (if using WebGPU via Chrome)\n * Returns JS heap memory in bytes\n */\n async getChromeMemory(): Promise<{ jsHeapUsed: number; jsHeapTotal: number } | null> {\n if (!this.chromeBackend) {\n return null;\n }\n return this.chromeBackend.getMemoryUsage();\n }\n\n /**\n * Get memory usage in GB (if using WebGPU via Chrome)\n */\n async getMemoryUsage(): Promise<{ usedGB: number; totalGB: number; usedPercent: number } | null> {\n if (!this.chromeBackend) {\n return null;\n }\n return this.chromeBackend.getMemoryStats();\n }\n\n /**\n * Clear KV cache to free memory\n * This will reset the conversation context but free up memory\n */\n async clearCache(): Promise<void> {\n if (this.chromeBackend) {\n await this.chromeBackend.reset();\n }\n }\n\n /**\n * Check memory usage and cleanup if needed\n * @param thresholdGB Memory threshold in GB (default: 8)\n * @returns true if cleanup was performed\n */\n async checkMemoryAndCleanup(thresholdGB = 8): Promise<boolean> {\n if (!this.chromeBackend) {\n return false;\n }\n return this.chromeBackend.checkMemoryAndCleanup(thresholdGB);\n }\n\n // ============================================\n // Text Generation\n // ============================================\n\n /**\n * Generate text (automatically routes to vision generation if images provided)\n *\n * @example\n * ```ts\n * // Text generation\n * const result = await g.generate(\"Hello!\");\n *\n * // Vision generation (with vision model)\n * const result = await g.generate(\"What's in this image?\", {\n * images: [{ source: \"https://example.com/cat.jpg\" }]\n * });\n * ```\n */\n async generate(prompt: string, options: GenerateOptions = {}): Promise<GenerateResult> {\n if (!this.isLoaded()) {\n // Auto-load default model\n await this.loadModel(this.config.model || \"qwen3-0.6b\");\n }\n\n const { images } = options;\n\n // Route to local vision generation if:\n // 1. Images provided\n // 2. Model supports vision\n // 3. NOT using Chrome backend (Chrome backend handles vision internally)\n if (images?.length && this.isVisionModel && !this.chromeBackend) {\n return this.generateWithVision(prompt, options);\n }\n\n // Warn if images provided but model doesn't support vision\n if (images?.length && !this.isVisionModel) {\n }\n\n const {\n maxTokens = 256,\n temperature = 0.7,\n topP = 0.9,\n topK = 50,\n thinking = false,\n system,\n cache = false,\n cacheTtl,\n } = options;\n\n // Check cache if enabled (skip for streaming/vision)\n if (cache && !options.onToken && !images?.length) {\n const cacheKey = generateCacheKey(prompt, this.currentModel || \"\", {\n maxTokens,\n temperature,\n topP,\n topK,\n system,\n thinking,\n });\n const cached = getGlobalCache().get(cacheKey);\n if (cached) {\n return cached;\n }\n }\n\n const startTime = performance.now();\n\n try {\n let rawText = \"\";\n\n if (this.chromeBackend) {\n // Chrome backend approach (for Node.js WebGPU via Chrome)\n try {\n rawText = await this.chromeBackend.generate(prompt, {\n maxTokens,\n temperature,\n topP,\n topK,\n thinking,\n system,\n // Pass images for vision models\n images: images?.map((img) => img.source),\n // Wrap onToken to match Gerbil's simpler signature\n onToken: options.onToken ? (t) => options.onToken?.(t.text) : undefined,\n });\n } catch (chromeErr: any) {\n // If Chrome died (OOM, crash), fall back to CPU silently\n if (chromeErr?.message === \"CHROME_BACKEND_DEAD\" || !this.chromeBackend?.isAlive()) {\n await this.chromeBackend?.dispose().catch(() => {});\n this.chromeBackend = null;\n this._deviceMode = \"cpu\";\n // Load CPU fallback and retry\n const modelPath = this.currentModel || \"qwen3-0.6b\";\n this.generator = (await pipeline(\"text-generation\", modelPath, {\n dtype: \"q4\",\n device: \"cpu\",\n } as any)) as TextGenerationPipeline;\n // Retry with CPU\n return this.generate(prompt, options);\n }\n throw chromeErr;\n }\n } else if (this.useDirect && this.model && this.tokenizer) {\n // Direct model approach (for browser WebGPU)\n const messages = this.buildMessages(prompt, { ...options, thinking });\n\n const inputs = (this.tokenizer as any).apply_chat_template(messages, {\n add_generation_prompt: true,\n return_dict: true,\n enable_thinking: thinking, // Qwen3 thinking mode\n });\n\n const output = await this.model.generate({\n ...inputs,\n max_new_tokens: maxTokens,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n do_sample: temperature > 0,\n });\n\n // Get input length to extract only generated tokens\n const inputLength = inputs.input_ids.dims?.[1] || inputs.input_ids.data?.length || 0;\n\n // Slice output tensor to get only new tokens (skip prompt)\n const outputTokens = output.slice(null, [inputLength, null]);\n const decoded = this.tokenizer.batch_decode(outputTokens, {\n skip_special_tokens: true,\n });\n\n rawText = decoded[0] || \"\";\n\n // If we still have prompt artifacts, extract assistant response\n if (rawText.toLowerCase().includes(\"assistant\")) {\n const match = rawText.match(/assistant[:\\s]*([\\s\\S]*)/i);\n if (match) {\n rawText = match[1].trim();\n }\n }\n } else if (this.generator) {\n // Pipeline approach (for Node.js / CPU / WASM)\n const formattedPrompt = this.formatPrompt(prompt, { ...options, thinking });\n\n const output = await this.generator(formattedPrompt, {\n max_new_tokens: maxTokens,\n temperature,\n top_p: topP,\n top_k: topK,\n do_sample: temperature > 0,\n return_full_text: false,\n });\n\n // Extract text from pipeline output\n if (Array.isArray(output) && output[0]) {\n const result = output[0] as any;\n if (Array.isArray(result.generated_text)) {\n const last = result.generated_text.at(-1);\n rawText = last?.content || \"\";\n } else {\n rawText = result.generated_text || \"\";\n }\n }\n } else {\n throw new Error(\"No model loaded\");\n }\n\n const endTime = performance.now();\n const totalTime = endTime - startTime;\n\n rawText = this.cleanOutput(rawText);\n\n // Always parse thinking to strip <think> tags from output\n // (model may generate them even without thinking mode enabled)\n const { thinking: thinkingText, response } = this.parseThinking(rawText);\n\n // Only include thinking in result if mode was enabled\n const finalThinking = thinking ? thinkingText : undefined;\n\n const tokensGenerated = Math.ceil(response.length / 4);\n\n // Update stats\n this.stats.prompts += 1;\n this.stats.tokensOut += tokensGenerated;\n this.stats.totalTime += totalTime;\n this.stats.avgSpeed = (this.stats.tokensOut / this.stats.totalTime) * 1000;\n\n const result: GenerateResult = {\n text: response,\n thinking: finalThinking,\n tokensGenerated,\n tokensPerSecond: (tokensGenerated / totalTime) * 1000,\n totalTime,\n finishReason: \"stop\",\n provider: \"local\",\n cached: false,\n };\n\n // Store in cache if enabled\n if (cache && !options.onToken && !images?.length) {\n const cacheKey = generateCacheKey(prompt, this.currentModel || \"\", {\n maxTokens,\n temperature,\n topP,\n topK,\n system,\n thinking,\n });\n getGlobalCache().set(cacheKey, result, cacheTtl);\n }\n\n return result;\n } catch (_error) {\n return {\n text: \"\",\n tokensGenerated: 0,\n tokensPerSecond: 0,\n totalTime: performance.now() - startTime,\n finishReason: \"error\",\n provider: \"local\",\n cached: false,\n };\n }\n }\n\n /**\n * Stream text generation (simulated token-by-token)\n *\n * Note: Yields the raw output including <think> tags if thinking mode is enabled.\n * The final result has parsed thinking separated out.\n */\n async *stream(\n prompt: string,\n options: GenerateOptions = {},\n ): AsyncGenerator<string, GenerateResult, unknown> {\n if (!this.isLoaded()) {\n await this.loadModel(this.config.model || \"qwen3-0.6b\");\n }\n\n const startTime = performance.now();\n\n // For Chrome backend, use real streaming via onToken callback\n if (this.chromeBackend) {\n let fullText = \"\";\n const tokenQueue: string[] = [];\n let resolveNext: ((value: string | null) => void) | null = null;\n let done = false;\n\n // Start generation with streaming callback\n const generatePromise = this.chromeBackend\n .generate(prompt, {\n ...options,\n // Convert ImageInput[] to string[] for Chrome backend\n images: options.images?.map((img) => img.source),\n onToken: (token) => {\n fullText += token.text;\n if (resolveNext) {\n resolveNext(token.text);\n resolveNext = null;\n } else {\n tokenQueue.push(token.text);\n }\n },\n })\n .then(() => {\n done = true;\n if (resolveNext) {\n resolveNext(null);\n }\n })\n .catch((err) => {\n done = true;\n if (resolveNext) {\n resolveNext(null);\n }\n throw err;\n });\n\n // Yield tokens as they arrive\n while (!done || tokenQueue.length > 0) {\n if (tokenQueue.length > 0) {\n const token = tokenQueue.shift()!;\n yield token;\n options.onToken?.(token);\n } else if (!done) {\n const token = await new Promise<string | null>((resolve) => {\n resolveNext = resolve;\n });\n if (token) {\n yield token;\n options.onToken?.(token);\n }\n }\n }\n\n await generatePromise;\n\n const { thinking: thinkingText, response } = this.parseThinking(fullText);\n const tokensGenerated = Math.ceil(response.length / 4);\n const totalTime = performance.now() - startTime;\n\n return {\n text: response,\n thinking: options.thinking ? thinkingText : undefined,\n tokensGenerated,\n totalTime,\n tokensPerSecond: (tokensGenerated / totalTime) * 1000,\n finishReason: \"stop\" as const,\n };\n }\n\n // For pipeline/direct model, use fake streaming (generate then yield)\n const result = await this.generateRaw(prompt, options);\n\n // Yield word by word for more accurate token simulation\n // (actual tokens average ~4 chars, words are a reasonable approximation)\n const words = result.rawText.split(/(\\s+)/);\n for (const word of words) {\n if (word) {\n yield word;\n options.onToken?.(word);\n }\n }\n\n return result.result;\n }\n\n /**\n * Internal: Generate with raw text access for streaming\n */\n private async generateRaw(\n prompt: string,\n options: GenerateOptions = {},\n ): Promise<{ rawText: string; result: GenerateResult }> {\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 50, thinking = false } = options;\n\n const startTime = performance.now();\n const formattedPrompt = this.formatPrompt(prompt, { ...options, thinking });\n\n try {\n const output = await this.generator?.(formattedPrompt, {\n max_new_tokens: maxTokens,\n temperature,\n top_p: topP,\n top_k: topK,\n do_sample: temperature > 0,\n return_full_text: false,\n });\n\n const endTime = performance.now();\n const totalTime = endTime - startTime;\n\n // Extract text from output\n let rawText = \"\";\n if (Array.isArray(output) && output[0]) {\n const result = output[0] as any;\n if (Array.isArray(result.generated_text)) {\n const last = result.generated_text.at(-1);\n rawText = last?.content || \"\";\n } else {\n rawText = result.generated_text || \"\";\n }\n }\n\n rawText = this.cleanOutput(rawText);\n const { thinking: thinkingText, response } = this.parseThinking(rawText);\n const finalThinking = thinking ? thinkingText : undefined;\n const tokensGenerated = Math.ceil(response.length / 4);\n\n // Update stats\n this.stats.prompts += 1;\n this.stats.tokensOut += tokensGenerated;\n this.stats.totalTime += totalTime;\n this.stats.avgSpeed = (this.stats.tokensOut / this.stats.totalTime) * 1000;\n\n return {\n rawText,\n result: {\n text: response,\n thinking: finalThinking,\n tokensGenerated,\n tokensPerSecond: (tokensGenerated / totalTime) * 1000,\n totalTime,\n finishReason: \"stop\",\n provider: \"local\",\n cached: false,\n },\n };\n } catch (_error) {\n return {\n rawText: \"\",\n result: {\n text: \"\",\n tokensGenerated: 0,\n tokensPerSecond: 0,\n totalTime: performance.now() - startTime,\n finishReason: \"error\",\n provider: \"local\",\n cached: false,\n },\n };\n }\n }\n\n // ============================================\n // Vision Generation\n // ============================================\n\n /**\n * Generate text from images using a vision model\n * Called automatically by generate() when images are provided\n */\n private async generateWithVision(\n prompt: string,\n options: GenerateOptions,\n ): Promise<GenerateResult> {\n if (!(this.processor && this.visionModel)) {\n throw new Error(\"Vision model not loaded. Load a vision model first.\");\n }\n\n const {\n images = [],\n maxTokens = 2048,\n temperature = 0.7,\n topP = 0.9,\n topK = 20,\n system,\n } = options;\n\n const startTime = performance.now();\n\n try {\n // Build message content with images and text\n const content: Array<{ type: string; text?: string }> = [];\n\n // Add image placeholders (the actual images are passed separately)\n for (let i = 0; i < images.length; i += 1) {\n content.push({ type: \"image\" });\n }\n\n // Add text prompt\n content.push({ type: \"text\", text: prompt });\n\n const messages = [\n ...(system ? [{ role: \"system\", content: system }] : []),\n { role: \"user\", content },\n ];\n\n // Apply chat template\n const chatPrompt = this.processor.apply_chat_template(messages);\n\n // Load images using RawImage\n const loadedImages = await Promise.all(\n images.map(async (img) => await RawImage.fromURL(img.source)),\n );\n\n // Process inputs (image + text)\n const inputs = await this.processor(\n loadedImages.length === 1 ? loadedImages[0] : loadedImages,\n chatPrompt,\n { add_special_tokens: false },\n );\n\n // Set up streaming if callback provided\n let fullText = \"\";\n const streamer = options.onToken\n ? new TextStreamer(this.processor.tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: (text: string) => {\n fullText += text;\n options.onToken?.(text);\n },\n })\n : undefined;\n\n // Generate\n const outputs = await this.visionModel.generate({\n ...inputs,\n max_new_tokens: maxTokens,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n do_sample: temperature > 0,\n ...(streamer ? { streamer } : {}),\n });\n\n // Decode output (skip the prompt tokens)\n const inputLength = inputs.input_ids.dims?.at(-1) || 0;\n const decoded = this.processor.batch_decode(outputs.slice(null, [inputLength, null]), {\n skip_special_tokens: true,\n });\n\n const text = decoded[0] || fullText || \"\";\n const totalTime = performance.now() - startTime;\n const tokensGenerated = Math.ceil(text.length / 4);\n\n // Update stats\n this.stats.prompts += 1;\n this.stats.tokensOut += tokensGenerated;\n this.stats.totalTime += totalTime;\n this.stats.avgSpeed = (this.stats.tokensOut / this.stats.totalTime) * 1000;\n\n return {\n text: this.cleanOutput(text),\n tokensGenerated,\n tokensPerSecond: (tokensGenerated / totalTime) * 1000,\n totalTime,\n finishReason: \"stop\",\n provider: \"local\",\n cached: false,\n };\n } catch (_error) {\n return {\n text: \"\",\n tokensGenerated: 0,\n tokensPerSecond: 0,\n totalTime: performance.now() - startTime,\n finishReason: \"error\",\n provider: \"local\",\n cached: false,\n };\n }\n }\n\n // ============================================\n // Structured Output (JSON)\n // ============================================\n\n /**\n * Generate structured JSON output\n */\n async json<T>(prompt: string, options: JsonOptions<T>): Promise<T> {\n const { schema, retries = 3, temperature = 0.3 } = options;\n\n const systemPrompt = `You are a JSON generator. You MUST respond with valid JSON only.\nNo explanations, no markdown, no code blocks. Just pure JSON.\nThe JSON must conform to this schema: ${JSON.stringify(zodToJsonSchema(schema))}`;\n\n for (let attempt = 0; attempt < retries; attempt += 1) {\n const result = await this.generate(prompt, {\n system: options.system || systemPrompt,\n temperature,\n maxTokens: 1000,\n });\n\n try {\n // Try to extract JSON from response\n const jsonStr = extractJson(result.text);\n const parsed = JSON.parse(jsonStr);\n const validated = schema.parse(parsed);\n return validated;\n } catch (error) {\n if (attempt === retries - 1) {\n throw new Error(`Failed to generate valid JSON after ${retries} attempts: ${error}`);\n }\n }\n }\n\n throw new Error(\"Failed to generate valid JSON\");\n }\n\n // ============================================\n // Embeddings\n // ============================================\n\n /**\n * Generate embeddings\n */\n async embed(text: string, options: EmbedOptions = {}): Promise<EmbedResult> {\n if (!this.embedder) {\n // Load embedding model\n const model = options.model || \"Xenova/all-MiniLM-L6-v2\";\n this.embedder = (await pipeline(\"feature-extraction\", model)) as FeatureExtractionPipeline;\n }\n\n const startTime = performance.now();\n const output = await this.embedder(text, {\n pooling: \"mean\",\n normalize: options.normalize !== false,\n });\n\n const vector = Array.from(output.data as Float32Array);\n\n return {\n vector,\n text,\n totalTime: performance.now() - startTime,\n };\n }\n\n /**\n * Generate embeddings for multiple texts\n */\n async embedBatch(texts: string[], options: EmbedOptions = {}): Promise<EmbedResult[]> {\n const results: EmbedResult[] = [];\n for (const text of texts) {\n results.push(await this.embed(text, options));\n }\n return results;\n }\n\n // ============================================\n // Stats & Info\n // ============================================\n\n /**\n * Get session stats\n */\n getStats(): SessionStats {\n return { ...this.stats };\n }\n\n /**\n * Get system info\n */\n getInfo(): SystemInfo {\n return {\n version: \"1.0.0\",\n model: this.modelConfig,\n device: {\n backend: \"transformers.js\",\n gpu: null, // TODO: detect GPU\n vram: null,\n status: this.isLoaded() ? \"ready\" : \"loading\",\n },\n context: {\n max: this.modelConfig?.contextLength || 0,\n used: 0,\n available: this.modelConfig?.contextLength || 0,\n },\n cache: {\n location: \"~/.gerbil/models\",\n size: \"0 MB\",\n modelCount: 0,\n },\n };\n }\n\n /**\n * Reset stats\n */\n resetStats(): void {\n this.stats = {\n prompts: 0,\n tokensIn: 0,\n tokensOut: 0,\n avgSpeed: 0,\n totalTime: 0,\n cacheHits: 0,\n cacheMisses: 0,\n };\n }\n\n // ============================================\n // Text-to-Speech (TTS)\n // ============================================\n\n private tts: TTSBackendType | null = null;\n private ttsModelId: string = \"kokoro-82m\";\n\n /**\n * Load TTS model for text-to-speech synthesis\n *\n * @example\n * ```ts\n * // Load default (Kokoro)\n * await g.loadTTS({ onProgress: (p) => console.log(p.status) });\n *\n * // Load Supertonic (faster, 44kHz output)\n * await g.loadTTS({ model: \"supertonic-66m\" });\n *\n * const result = await g.speak(\"Hello world\");\n * // result.audio = Float32Array, result.sampleRate = 24000 or 44100\n * ```\n */\n async loadTTS(options: LoadTTSOptions & { model?: string } = {}): Promise<void> {\n const modelId = options.model || \"kokoro-82m\";\n\n // If switching models, dispose the old one\n if (this.tts && this.ttsModelId !== modelId) {\n await this.tts.dispose();\n this.tts = null;\n }\n\n if (this.tts?.isLoaded()) {\n return;\n }\n\n this.ttsModelId = modelId;\n\n // Dynamic import to avoid bundling TTS code when not used\n const { createTTS } = await import(\"./tts.js\");\n\n if (!this.tts) {\n this.tts = createTTS(modelId);\n }\n\n await this.tts.load(options);\n }\n\n /**\n * Ensure TTS model is loaded (lazy loading)\n */\n async ensureTTSLoaded(options?: LoadTTSOptions): Promise<void> {\n if (!this.tts?.isLoaded()) {\n await this.loadTTS(options);\n }\n }\n\n /**\n * Generate speech from text\n *\n * @example\n * ```ts\n * const result = await g.speak(\"Hello world\", { voice: \"af_bella\" });\n * // result.audio = Float32Array PCM\n * // result.sampleRate = 24000\n * // result.duration = seconds\n * ```\n */\n async speak(text: string, options: SpeakOptions = {}): Promise<SpeakResult> {\n await this.ensureTTSLoaded({ onProgress: options.onProgress });\n return this.tts!.speak(text, options);\n }\n\n /**\n * Stream speech generation (yields audio chunks as they're generated)\n *\n * @example\n * ```ts\n * for await (const chunk of g.speakStream(\"Long text...\")) {\n * // chunk.samples = Float32Array\n * // chunk.isFinal = boolean\n * playChunk(chunk);\n * }\n * ```\n */\n async *speakStream(\n text: string,\n options: SpeakOptions = {},\n ): AsyncGenerator<AudioChunk, SpeakResult, unknown> {\n await this.ensureTTSLoaded({ onProgress: options.onProgress });\n return yield* this.tts!.speakStream(text, options);\n }\n\n /**\n * Get list of available TTS voices\n */\n listVoices(): VoiceInfo[] {\n if (!this.tts) {\n // Return default voices from static import\n return KOKORO_VOICES_DEFAULT;\n }\n return this.tts.listVoices();\n }\n\n /**\n * Check if TTS model is loaded\n */\n isTTSLoaded(): boolean {\n return this.tts?.isLoaded() ?? false;\n }\n\n /**\n * Get current TTS model info\n */\n getTTSModelInfo(): { id: string; loaded: boolean; device?: \"webgpu\" | \"cpu\" } | null {\n if (!this.tts) {\n return null;\n }\n return {\n id: this.ttsModelId,\n loaded: this.tts.isLoaded(),\n device: this.tts.isLoaded() ? this.tts.getDeviceMode() : undefined,\n };\n }\n\n /**\n * List available TTS models\n */\n async listTTSModels(): Promise<\n Array<{ id: string; description: string; sampleRate: number; voiceCount: number }>\n > {\n const { TTS_MODELS } = await import(\"./tts.js\");\n return Object.values(TTS_MODELS).map((m) => ({\n id: m.id,\n description: m.description,\n sampleRate: m.sampleRate,\n voiceCount: m.voices.length,\n }));\n }\n\n // ============================================\n // Speech-to-Text (STT)\n // ============================================\n\n private stt: WhisperSTTType | null = null;\n\n /**\n * Load STT model for speech-to-text transcription\n *\n * @example\n * ```ts\n * await g.loadSTT({\n * onProgress: (p) => console.log(p.status)\n * });\n *\n * const result = await g.transcribe(audioData);\n * console.log(result.text);\n * ```\n */\n async loadSTT(modelId?: string, options: LoadSTTOptions = {}): Promise<void> {\n if (this.stt?.isLoaded()) {\n return;\n }\n\n // Dynamic import to avoid bundling STT code when not used\n const { WhisperSTT } = await import(\"./stt.js\");\n\n if (!this.stt) {\n this.stt = new WhisperSTT(modelId);\n }\n\n await this.stt.load(options);\n }\n\n /**\n * Ensure STT model is loaded (lazy loading)\n */\n public async ensureSTTLoaded(modelId?: string, options?: LoadSTTOptions): Promise<void> {\n if (!this.stt?.isLoaded()) {\n await this.loadSTT(modelId, options);\n }\n }\n\n /**\n * Transcribe audio to text\n *\n * @param audio - Audio data as Float32Array (16kHz mono) or Uint8Array (WAV file)\n * @param options - Transcription options\n *\n * @example\n * ```ts\n * // From Float32Array (16kHz mono)\n * const result = await g.transcribe(audioData);\n * console.log(result.text);\n *\n * // With timestamps\n * const result = await g.transcribe(audioData, { timestamps: true });\n * for (const seg of result.segments) {\n * console.log(`[${seg.start}s] ${seg.text}`);\n * }\n *\n * // From WAV file\n * const wavData = fs.readFileSync(\"audio.wav\");\n * const result = await g.transcribe(new Uint8Array(wavData));\n * ```\n */\n async transcribe(\n audio: Float32Array | Uint8Array,\n options: TranscribeOptions = {},\n ): Promise<TranscribeResult> {\n await this.ensureSTTLoaded(undefined, { onProgress: options.onProgress });\n return this.stt!.transcribe(audio, options);\n }\n\n /**\n * Create a streaming transcription session\n *\n * Transcribes audio in real-time by processing chunks at regular intervals.\n * Perfect for live captioning, call transcription, or real-time subtitles.\n *\n * @param options - Streaming options\n * @returns Streaming session controller\n *\n * @example\n * ```ts\n * const session = await g.createStreamingTranscription({\n * chunkDuration: 3000, // Transcribe every 3 seconds\n * onChunk: (text, idx) => console.log(`Chunk ${idx}: ${text}`),\n * onTranscript: (fullText) => console.log(\"Full:\", fullText),\n * });\n *\n * // Feed audio data as it comes in\n * session.feedAudio(audioChunk);\n *\n * // Start automatic interval-based transcription\n * session.start();\n *\n * // Later, stop and get final transcript\n * const finalText = await session.stop();\n * ```\n */\n async createStreamingTranscription(\n options: StreamingTranscriptionOptions = {},\n ): Promise<StreamingTranscriptionSession> {\n await this.ensureSTTLoaded();\n return this.stt!.createStreamingSession(options);\n }\n\n /**\n * Get list of available STT models\n */\n async listSTTModels(): Promise<STTModelConfig[]> {\n // Dynamic import to avoid bundling STT code when not used\n const { WhisperSTT } = await import(\"./stt.js\");\n return WhisperSTT.listModels();\n }\n\n /**\n * Check if STT model is loaded\n */\n isSTTLoaded(): boolean {\n return this.stt?.isLoaded() ?? false;\n }\n\n /**\n * Get current STT model info\n */\n getSTTModelInfo(): { id: string; loaded: boolean; device?: \"webgpu\" | \"cpu\" } | null {\n if (!this.stt) {\n return null;\n }\n return {\n id: this.stt.getModelInfo().id,\n loaded: this.stt.isLoaded(),\n device: this.stt.isLoaded() ? this.stt.getDeviceMode() : undefined,\n };\n }\n\n // ============================================\n // Microphone Input\n // ============================================\n\n /**\n * Record audio from microphone and transcribe\n *\n * @example\n * ```ts\n * // Record for 5 seconds and transcribe\n * const result = await g.listen(5000);\n * console.log(result.text);\n *\n * // Use with voice chat\n * const userInput = await g.listen(10000);\n * const response = await g.generate(userInput.text);\n * await g.speak(response.text);\n * ```\n */\n async listen(\n durationMs: number = 5000,\n options: { onProgress?: (status: string) => void } = {},\n ): Promise<TranscribeResult> {\n // Dynamic import for microphone (avoids bundling when not used)\n const { Microphone, isSoxAvailable } = await import(\"./microphone.js\");\n\n if (!isSoxAvailable()) {\n throw new Error(\n \"Microphone recording requires SoX. Install with:\\n\" +\n \" macOS: brew install sox\\n\" +\n \" Ubuntu: sudo apt install sox\\n\" +\n \" Windows: https://sox.sourceforge.net/\",\n );\n }\n\n options.onProgress?.(\"Starting microphone...\");\n\n const mic = new Microphone({ sampleRate: 16000 });\n await mic.start();\n\n options.onProgress?.(`Recording for ${(durationMs / 1000).toFixed(1)}s...`);\n\n // Wait for the specified duration\n await new Promise((r) => setTimeout(r, durationMs));\n\n options.onProgress?.(\"Processing audio...\");\n const { audio } = await mic.stop();\n\n options.onProgress?.(\"Transcribing...\");\n return this.transcribe(audio, {\n onProgress: (p) => options.onProgress?.(p.status || \"Transcribing...\"),\n });\n }\n\n /**\n * Check if microphone recording is available\n */\n async isMicrophoneAvailable(): Promise<boolean> {\n try {\n const { isSoxAvailable } = await import(\"./microphone.js\");\n return isSoxAvailable();\n } catch {\n return false;\n }\n }\n\n // ============================================\n // Cleanup\n // ============================================\n\n /**\n * Dispose of resources\n * @param disconnect If true, also disconnect from shared browser (for clean script exit)\n */\n async dispose(disconnect = false): Promise<void> {\n // Clean up Chrome backend first (most important to release resources)\n if (this.chromeBackend) {\n try {\n await this.chromeBackend.dispose(disconnect);\n } catch {\n // Ignore errors during cleanup\n }\n this.chromeBackend = null;\n }\n\n if (this.generator) {\n if (typeof (this.generator as any).dispose === \"function\") {\n try {\n await (this.generator as any).dispose();\n } catch {\n // Ignore errors during cleanup\n }\n }\n this.generator = null;\n }\n if (this.embedder) {\n if (typeof (this.embedder as any).dispose === \"function\") {\n try {\n await (this.embedder as any).dispose();\n } catch {\n // Ignore errors during cleanup\n }\n }\n this.embedder = null;\n }\n\n // Clean up vision model resources\n if (this.visionModel) {\n if (typeof this.visionModel.dispose === \"function\") {\n try {\n await this.visionModel.dispose();\n } catch {\n // Ignore errors during cleanup\n }\n }\n this.visionModel = null;\n }\n if (this.processor) {\n this.processor = null;\n }\n\n // Clean up TTS resources\n if (this.tts) {\n try {\n await this.tts.dispose();\n } catch {\n // Ignore errors during cleanup\n }\n this.tts = null;\n }\n\n // Clean up STT resources\n if (this.stt) {\n try {\n this.stt.dispose();\n } catch {\n // Ignore errors during cleanup\n }\n this.stt = null;\n }\n\n this.currentModel = null;\n this.modelConfig = null;\n this.isVisionModel = false;\n }\n\n /**\n * Shutdown the shared Chrome backend completely.\n * Call this when your script/process is done to ensure proper cleanup.\n * This closes the shared browser used for WebGPU acceleration.\n */\n static async shutdown(): Promise<void> {\n // Dynamic import to match how ChromeGPUBackend is loaded\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n await ChromeGPUBackend.closeSharedBrowser();\n }\n\n /**\n * Get global WebGPU process info (all active backends)\n * Useful for monitoring and debugging memory leaks\n */\n static async getWebGPUProcesses(): Promise<{\n browser: {\n running: boolean;\n pid: number | null;\n port: number;\n activePagesCount: number;\n maxPages: number;\n };\n backends: Array<{\n modelId: string;\n isVision: boolean;\n isReady: boolean;\n memory: { usedGB: number; totalGB: number; usedPercent: number } | null;\n }>;\n } | null> {\n // Not available in browser\n if (typeof window !== \"undefined\") {\n return null;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n const browser = ChromeGPUBackend.getGlobalBrowserStatus();\n const backends = await ChromeGPUBackend.getAllBackendsInfo();\n\n return { browser, backends };\n } catch {\n return null;\n }\n }\n\n /**\n * Kill all WebGPU processes (for zombie cleanup)\n * Use this if you suspect memory leaks from undisposed Gerbil instances\n */\n static async killAllWebGPU(): Promise<{ pagesKilled: number; browserKilled: boolean } | null> {\n // Not available in browser\n if (typeof window !== \"undefined\") {\n return null;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n return await ChromeGPUBackend.killAllBackends();\n } catch {\n return null;\n }\n }\n\n /**\n * Kill a specific WebGPU backend by index\n * @param index Index of the backend to kill (0-based)\n */\n static async killWebGPUBackend(index: number): Promise<boolean> {\n // Not available in browser\n if (typeof window !== \"undefined\") {\n return false;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n return await ChromeGPUBackend.killBackendByIndex(index);\n } catch {\n return false;\n }\n }\n\n /**\n * Get all Chrome pages across ALL Gerbil processes\n * This provides cross-process visibility into WebGPU backends\n */\n static async getAllChromePagesInfo(): Promise<Array<{\n url: string;\n title: string;\n isOurs: boolean;\n modelId: string | null;\n memory: { usedGB: number; totalGB: number } | null;\n }> | null> {\n if (typeof window !== \"undefined\") {\n return null;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n return await ChromeGPUBackend.getAllChromePages();\n } catch {\n return null;\n }\n }\n\n /**\n * Kill a Chrome page by index (works across processes)\n * @param index Index of the page to kill (0-based)\n */\n static async killChromePage(index: number): Promise<boolean> {\n if (typeof window !== \"undefined\") {\n return false;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n return await ChromeGPUBackend.killPageByIndex(index);\n } catch {\n return false;\n }\n }\n\n /**\n * Get total Chrome page count (all processes)\n */\n static async getTotalChromePageCount(): Promise<number> {\n if (typeof window !== \"undefined\") {\n return 0;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n return await ChromeGPUBackend.getTotalPageCount();\n } catch {\n return 0;\n }\n }\n\n // ============================================\n // Private Methods\n // ============================================\n\n private formatPrompt(prompt: string, options: GenerateOptions): string {\n const system = options.system || \"You are a helpful assistant.\";\n const isQwen = this.currentModel?.includes(\"qwen\");\n\n if (options.thinking && this.modelConfig?.supportsThinking) {\n const thinkSystem = `${system}\\n\\nThink step-by-step before answering. Wrap your reasoning in <think></think> tags, then provide your answer.`;\n return `<|im_start|>system\\n${thinkSystem}<|im_end|>\\n<|im_start|>user\\n${prompt}<|im_end|>\\n<|im_start|>assistant\\n`;\n }\n\n if (isQwen) {\n return `<|im_start|>system\\n${system}<|im_end|>\\n<|im_start|>user\\n${prompt} /no_think<|im_end|>\\n<|im_start|>assistant\\n`;\n }\n\n return `<|im_start|>system\\n${system}<|im_end|>\\n<|im_start|>user\\n${prompt}<|im_end|>\\n<|im_start|>assistant\\n`;\n }\n\n private buildMessages(\n prompt: string,\n options: GenerateOptions,\n ): Array<{ role: string; content: string }> {\n const system = options.system || \"You are a helpful assistant.\";\n const messages: Array<{ role: string; content: string }> = [];\n\n // For direct model (WebGPU), enable_thinking is passed to apply_chat_template\n // so we don't need to add /no_think or modify the system prompt\n messages.push({ role: \"system\", content: system });\n messages.push({ role: \"user\", content: prompt });\n\n return messages;\n }\n\n private parseThinking(text: string): {\n thinking?: string;\n response: string;\n } {\n // Handle complete <think>...</think> blocks\n const match = text.match(/<think>([\\s\\S]*?)<\\/think>/);\n if (match) {\n const thinking = match[1].trim();\n const response = text.replace(/<think>[\\s\\S]*?<\\/think>/, \"\").trim();\n return { thinking, response };\n }\n\n // Handle unclosed <think> tags (model stopped mid-thought)\n const unclosedMatch = text.match(/<think>([\\s\\S]*)$/);\n if (unclosedMatch) {\n const thinking = unclosedMatch[1].trim();\n const response = text.replace(/<think>[\\s\\S]*$/, \"\").trim();\n return { thinking: thinking || undefined, response };\n }\n\n // Handle any remaining think tags\n const response = text.replace(/<\\/?think>/g, \"\").trim();\n return { response };\n }\n\n private cleanOutput(text: string): string {\n return (\n text\n .replace(/<\\|im_end\\|>/g, \"\")\n .replace(/<\\|im_start\\|>/g, \"\")\n .replace(/<\\|endoftext\\|>/g, \"\")\n .replace(/<\\/s>/g, \"\")\n // Clean up artifacts from direct model output\n .replace(/^\\/no_think\\s*/i, \"\")\n .replace(/^assistant\\s*/i, \"\")\n .replace(/^\\s*\\/no_think\\s*/gim, \"\")\n .replace(/^\\s*assistant\\s*/gim, \"\")\n // Clean up role markers that might appear\n .replace(/^(system|user|assistant):\\s*/gim, \"\")\n .trim()\n );\n }\n}\n\nexport default Gerbil;\n"],"mappings":";;;;;;;;;AAkCA,SAAgB,iBACd,QACA,SACA,SAQQ;CACR,MAAM,WAAW;EACf;EACA;EACA,QAAQ,aAAa;EACrB,QAAQ,eAAe;EACvB,QAAQ,QAAQ;EAChB,QAAQ,QAAQ;EAChB,QAAQ,UAAU;EAClB,QAAQ,YAAY;EACrB;CAGD,MAAM,MAAM,KAAK,UAAU,SAAS;CACpC,IAAI,OAAO;AACX,MAAK,IAAI,IAAI,GAAG,IAAI,IAAI,QAAQ,KAAK;EACnC,MAAM,OAAO,IAAI,WAAW,EAAE;AAC9B,UAAQ,QAAQ,KAAK,OAAO;AAC5B,SAAO,OAAO;;AAEhB,QAAO,UAAU,KAAK,SAAS,GAAG;;;;;AAUpC,IAAa,gBAAb,MAA2B;CACzB,AAAQ,wBAAiC,IAAI,KAAK;CAClD,AAAQ;CACR,AAAQ;CACR,AAAQ,OAAO;CACf,AAAQ,SAAS;;;;;;CAOjB,YAAY,UAAU,KAAK,aAAa,MAAS,KAAM;AACrD,OAAK,UAAU;AACf,OAAK,aAAa;;;;;CAMpB,IAAI,KAAoC;EACtC,MAAM,QAAQ,KAAK,MAAM,IAAI,IAAI;AAEjC,MAAI,CAAC,OAAO;AACV,QAAK;AACL,UAAO;;AAIT,MAAI,KAAK,KAAK,GAAG,MAAM,YAAY,MAAM,KAAK;AAC5C,QAAK,MAAM,OAAO,IAAI;AACtB,QAAK;AACL,UAAO;;AAIT,OAAK,MAAM,OAAO,IAAI;AACtB,OAAK,MAAM,IAAI,KAAK,MAAM;AAE1B,OAAK;AACL,SAAO;GAAE,GAAG,MAAM;GAAQ,QAAQ;GAAM;;;;;CAM1C,IAAI,KAAa,QAAwB,KAAoB;AAE3D,SAAO,KAAK,MAAM,QAAQ,KAAK,SAAS;GACtC,MAAM,WAAW,KAAK,MAAM,MAAM,CAAC,MAAM,CAAC;AAC1C,OAAI,SACF,MAAK,MAAM,OAAO,SAAS;;AAI/B,OAAK,MAAM,IAAI,KAAK;GAClB;GACA,WAAW,KAAK,KAAK;GACrB,KAAK,OAAO,KAAK;GAClB,CAAC;;;;;CAMJ,IAAI,KAAsB;EACxB,MAAM,QAAQ,KAAK,MAAM,IAAI,IAAI;AACjC,MAAI,CAAC,MAAO,QAAO;AAEnB,MAAI,KAAK,KAAK,GAAG,MAAM,YAAY,MAAM,KAAK;AAC5C,QAAK,MAAM,OAAO,IAAI;AACtB,UAAO;;AAGT,SAAO;;;;;CAMT,OAAO,KAAsB;AAC3B,SAAO,KAAK,MAAM,OAAO,IAAI;;;;;CAM/B,QAAc;AACZ,OAAK,MAAM,OAAO;AAClB,OAAK,OAAO;AACZ,OAAK,SAAS;;;;;CAMhB,QAAgB;EACd,MAAM,MAAM,KAAK,KAAK;EACtB,IAAI,SAAS;AAEb,OAAK,MAAM,CAAC,KAAK,UAAU,KAAK,MAC9B,KAAI,MAAM,MAAM,YAAY,MAAM,KAAK;AACrC,QAAK,MAAM,OAAO,IAAI;AACtB;;AAIJ,SAAO;;;;;CAMT,WAAuB;AACrB,SAAO;GACL,MAAM,KAAK;GACX,QAAQ,KAAK;GACb,MAAM,KAAK,MAAM;GACjB,SAAS,KAAK;GACf;;;;;CAMH,aAAqB;EACnB,MAAM,QAAQ,KAAK,OAAO,KAAK;AAC/B,MAAI,UAAU,EAAG,QAAO;AACxB,SAAQ,KAAK,OAAO,QAAS;;;AAQjC,IAAIA,cAAoC;;;;;AAMxC,SAAgB,iBAAgC;AAC9C,KAAI,CAAC,YACH,eAAc,IAAI,eAAe;AAEnC,QAAO;;;;;AAMT,SAAgB,qBAAqB,SAAkB,YAAoC;AACzF,eAAc,IAAI,cAAc,SAAS,WAAW;AACpD,QAAO;;;;;AAMT,SAAgB,mBAAyB;AACvC,KAAI,YACF,aAAY,OAAO;;;;;;;;AC1NvB,MAAMC,aAAWC;AAGjB,SAAS,sBAAyB,IAAkC;CAClE,MAAM,eAAe,QAAQ;AAC7B,SAAQ,QAAQ,GAAG,SAAgB;EACjC,MAAM,MAAM,KAAK,IAAI,YAAY,IAAI;AAErC,MAAI,IAAI,SAAS,iBAAiB,IAAI,IAAI,SAAS,sBAAsB,CACvE;AAEF,eAAa,MAAM,SAAS,KAAK;;AAGnC,QAAO,IAAI,CAAC,cAAc;AACxB,UAAQ,OAAO;GACf;;AA4CJ,MAAMC,wBAAqC;CACzC;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACF;AAKD,MAAM,YAAY,OAAO,WAAW;AACpC,IAAI,mBAAmB,CAAC;AACxB,IAAI,kBAAkB;AAOtB,IAAI,oBAAoB;AACxB,IAAI,kBAAkB;;;;;AAMtB,eAAe,iBAAmC;AAChD,KAAI,kBACF,QAAO;AAET,qBAAoB;AAGpB,KAAI,OAAO,WAAW,aAAa;AACjC,oBAAkB,SAAS;AAC3B,SAAO;;AAKT,KAAI;EAGF,MAAM,EAAE,QAAQ,YADK,MADC,IAAI,SAAS,aAAa,2BAA2B,CAClC,SAAS;AAIlD,SAAO,OAAO,YAAY,QAAQ;AAGlC,MAAI,CAAE,WAAmB,UACvB,CAAC,WAAmB,YAAY,EAAE;AAEpC,EAAC,WAAmB,UAAU,MAAM,OAAO,EAAE,CAAC;AAE9C,oBAAkB;SACZ;AAEN,oBAAkB;;AAGpB,QAAO;;AAMT,IAAa,SAAb,MAAoB;CAClB,AAAQ,YAA2C;CACnD,AAAQ,YAAwC;CAChD,AAAQ,QAAa;CACrB,AAAQ,WAA6C;CACrD,AAAQ,eAA8B;CACtC,AAAQ,cAAkC;CAC1C,AAAiB;CACjB,AAAQ;CACR,AAAQ,YAAY;CACpB,AAAQ,gBAA6C;CACrD,AAAQ,cAAyC;CAGjD,AAAQ,YAAiB;CACzB,AAAQ,cAAmB;CAC3B,AAAQ,gBAAgB;CAExB,YAAY,SAAuB,EAAE,EAAE;AACrC,OAAK,SAAS;AACd,OAAK,QAAQ;GACX,SAAS;GACT,UAAU;GACV,WAAW;GACX,UAAU;GACV,WAAW;GACX,WAAW;GACX,aAAa;GACd;;CAOH,OAAO,aAA4B;AACjC,SAAO,OAAO,OAAO,eAAe;;CAGtC,OAAO,SAAS,SAA0C;AACxD,SAAO,eAAe;;;;;;;;;;;;;;;;;;;;CAyBxB,MAAM,UAAU,UAAU,cAAc,UAAuB,EAAE,EAAiB;AAGhF,MAAI,KAAK,UAAU,CACjB,OAAM,KAAK,SAAS;AAItB,QAAM,gBAAgB;EAEtB,MAAM,SAAS,aAAa,QAAQ;EACpC,MAAM,EAAE,YAAY,SAAS,QAAQ,OAAO,cAAc;EAG1D,IAAI,SAAS,eAAe,QAAQ;AACpC,MAAI,CAAC,QAAQ;GAEX,MAAM,gBAAgB,MAAM,wBAAwB,OAAO,KAAK,CAAC,YAAY,KAAK;AAClF,YAAS,0BAA0B,SAAS,OAAO,MAAM,iBAAiB,OAAU;;AAItF,MAAI,OAAO,eACT,QAAO,KAAK,gBAAgB,SAAS,OAAO,MAAM,QAAQ,QAAQ;AAGpE,eAAa,EAAE,QAAQ,WAAW,QAAQ,MAAM,CAAC;EAKjD,MAAMC,cAAY,OAAO,WAAW;EACpC,MAAM,iBAAiBA,cAAY,SAAS;EAC5C,IAAIC,WAAsC;AAC1C,MAAI,WAAW,YAAY,WAAW,SAAS,WAAW,OACxD,YAAW;EAIb,MAAM,QAAQ,cAAc,aAAa,WAAW,UAAU;EAG9D,IAAI,YAAY;EAChB,IAAI,WAAW;EACf,IAAI,UAAU;EAEd,MAAM,oBAAoB,aAAkB;AAC1C,OAAI,CAAC,UACH;AAGF,OAAI,SAAS,WAAW,cAAc,SAAS,MAAM;IACnD,MAAM,MAAM,KAAK,MAAM,SAAS,YAAY,EAAE;AAE9C,QAAI,SAAS,SAAS,YAAY,OAAO,UAAU,GAAG;AACpD,gBAAW,SAAS;AACpB,eAAU;AACV,kBAAa;MACX,QAAQ,eAAe,SAAS;MAChC,UAAU;MACV,MAAM,SAAS;MAChB,CAAC;;;;AAKR,MAAI;AAGF,OAAID,eAAa,aAAa,UAAU;AACtC,iBAAa,EAAE,QAAQ,wBAAwB,CAAC;AAChD,SAAK,YAAa,MAAM,4BACtB,cAAc,gBAAgB,OAAO,MAAM,EACzC,mBAAmB,kBACpB,CAAC,CACH;AAED,iBAAa,EAAE,QAAQ,oBAAoB,CAAC;AAC5C,SAAK,QAAQ,MAAM,4BACjB,qBAAqB,gBAAgB,OAAO,MAAM;KAChD;KACA,QAAQ;KACR,mBAAmB;KACpB,CAAC,CACH;AAED,SAAK,YAAY;AACjB,SAAK,cAAc;AACnB,SAAK,gBAAgB;AACrB,gBAAY;AACZ,SAAK,eAAe;AACpB,SAAK,cAAc;AACnB,iBAAa,EAAE,QAAQ,mBAAmB,CAAC;cAClC,CAACA,eAAa,aAAa,UAAU;AAE9C,iBAAa,EAAE,QAAQ,qCAAqC,CAAC;IAG7D,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,SAAK,gBAAgB,MAAM,iBAAiB,OAAO;KACjD,SAAS,OAAO;KAChB,eAAe,OAAO;KACtB;KACD,CAAC;AAEF,SAAK,YAAY;AACjB,SAAK,cAAc;AACnB,SAAK,gBAAgB;AACrB,gBAAY;AACZ,SAAK,eAAe;AACpB,SAAK,cAAc;UAEd;IAEL,MAAM,kBAAkB;KACtB;KACA,QAAQ;KACR,mBAAmB;KACpB;AACD,SAAK,YAAa,MAAM,4BACtBH,WAAS,mBAAmB,OAAO,MAAM,gBAAuB,CACjE;AAED,SAAK,YAAY;AACjB,SAAK,cAAc;AACnB,SAAK,gBAAgB;AACrB,gBAAY;AACZ,SAAK,eAAe;AACpB,SAAK,cAAc;AACnB,iBAAa,EAAE,QAAQ,UAAU,SAAS,aAAa,CAAC,KAAK,CAAC;;WAEzD,KAAK;AAEZ,OAAI,aAAa,gBAAgB;AAC/B,iBAAa,EAAE,QAAQ,SAAS,eAAe,aAAa,CAAC,MAAM,CAAC;AAGpE,QAAI,KAAK,eAAe;AACtB,WAAM,KAAK,cAAc,SAAS;AAClC,UAAK,gBAAgB;;AAIvB,SAAK,YAAa,MAAM,4BACtBA,WAAS,mBAAmB,OAAO,MAAM;KACvC,OAAO;KACP,QAAQ;KACR,mBAAmB;KACpB,CAAQ,CACV;AAED,SAAK,YAAY;AACjB,SAAK,cAAc;AACnB,SAAK,gBAAgB;AACrB,gBAAY;AACZ,SAAK,eAAe;AACpB,SAAK,cAAc;AACnB,iBAAa,EAAE,QAAQ,UAAU,eAAe,aAAa,CAAC,KAAK,CAAC;SAEpE,OAAM;;;;;;;CASZ,MAAc,gBACZ,SACA,UACA,QACA,UAAuB,EAAE,EACV;EACf,MAAM,EAAE,YAAY,SAAS,WAAW;AAExC,eAAa,EAAE,QAAQ,WAAW,QAAQ,qBAAqB,CAAC;EAEhE,MAAMG,cAAY,OAAO,WAAW;EACpC,MAAM,iBAAiBA,cAAY,SAAS;EAC5C,IAAIC,WAAsC;AAC1C,MAAI,WAAW,YAAY,WAAW,SAAS,WAAW,OACxD,YAAW;AAIb,MAAI,CAACD,eAAa,aAAa,UAAU;AACvC,gBAAa,EAAE,QAAQ,8CAA8C,CAAC;GAGtE,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,QAAK,gBAAgB,MAAM,iBAAiB,OAAO;IACjD,SAAS;IACT,eAAe,OAAO;IACtB,UAAU;IACV;IACD,CAAC;AAEF,QAAK,YAAY;AACjB,QAAK,cAAc;AACnB,QAAK,gBAAgB;AACrB,QAAK,eAAe;AACpB,QAAK,cAAc;AAEnB;;EAIF,IAAI,WAAW;EACf,IAAI,UAAU;EAEd,MAAM,oBAAoB,aAAkB;AAC1C,OAAI,SAAS,WAAW,cAAc,SAAS,MAAM;IACnD,MAAM,MAAM,KAAK,MAAM,SAAS,YAAY,EAAE;AAC9C,QAAI,SAAS,SAAS,YAAY,OAAO,UAAU,GAAG;AACpD,gBAAW,SAAS;AACpB,eAAU;AACV,kBAAa;MACX,QAAQ,eAAe,SAAS;MAChC,UAAU;MACV,MAAM,SAAS;MAChB,CAAC;;;;AAKR,MAAI;AAEF,gBAAa,EAAE,QAAQ,wBAAwB,CAAC;AAChD,QAAK,YAAY,MAAM,4BACrB,cAAc,gBAAgB,UAAU,EACtC,mBAAmB,kBACpB,CAAC,CACH;AAGD,gBAAa,EAAE,QAAQ,2BAA2B,CAAC;AACnD,QAAK,cAAc,MAAM,4BACvB,4BAA4B,gBAAgB,UAAU;IACpD,QAAQ;IACR,mBAAmB;IACpB,CAAC,CACH;AAED,QAAK,gBAAgB;AACrB,QAAK,YAAY;AACjB,QAAK,cAAc,aAAa,WAAW,WAAY;AACvD,QAAK,eAAe;AACpB,QAAK,cAAc;AACnB,gBAAa,EAAE,QAAQ,kBAAkB,SAAS,aAAa,CAAC,KAAK,CAAC;WAC/D,KAAK;AAEZ,OAAI,aAAa,gBAAgB;AAC/B,iBAAa,EAAE,QAAQ,uBAAuB,eAAe,aAAa,CAAC,MAAM,CAAC;AAElF,SAAK,YAAY,MAAM,4BACrB,cAAc,gBAAgB,UAAU,EACtC,mBAAmB,kBACpB,CAAC,CACH;AAED,SAAK,cAAc,MAAM,4BACvB,4BAA4B,gBAAgB,UAAU;KACpD,QAAQ;KACR,mBAAmB;KACpB,CAAC,CACH;AAED,SAAK,gBAAgB;AACrB,SAAK,YAAY;AACjB,SAAK,cAAc;AACnB,SAAK,eAAe;AACpB,SAAK,cAAc;AACnB,iBAAa,EAAE,QAAQ,kBAAkB,eAAe,aAAa,CAAC,KAAK,CAAC;SAE5E,OAAM;;;;;;CAQZ,WAAoB;AAClB,SACE,KAAK,cAAc,QAClB,KAAK,aAAa,KAAK,UAAU,QAClC,KAAK,kBAAkB,QACtB,KAAK,iBAAiB,KAAK,gBAAgB;;;;;CAOhD,iBAA0B;AACxB,SAAO,KAAK,iBAAiB,KAAK,aAAa,mBAAmB;;;;;CAMpE,eAAmC;AACjC,SAAO,KAAK;;;;;CAMd,gBAA2C;AACzC,SAAO,KAAK;;;;;CAMd,WAAmB;AAEjB,SAAO,KAAK,gBAAgB,WAAW,UAAU;;;;;CAMnD,wBAAyF;EACvF,MAAM,QAAQ,gBAAgB;EAC9B,MAAM,QAAQ,MAAM,UAAU;AAC9B,SAAO;GACL,MAAM,MAAM;GACZ,QAAQ,MAAM;GACd,MAAM,MAAM;GACZ,SAAS,MAAM,YAAY;GAC5B;;;;;CAMH,qBAA2B;AACzB,kBAAgB,CAAC,OAAO;;;;;CAM1B,kBAKS;AACP,MAAI,CAAC,KAAK,cACR,QAAO;AAET,SAAO,KAAK,cAAc,WAAW;;;;;;CAOvC,MAAM,kBAA+E;AACnF,MAAI,CAAC,KAAK,cACR,QAAO;AAET,SAAO,KAAK,cAAc,gBAAgB;;;;;CAM5C,MAAM,iBAA2F;AAC/F,MAAI,CAAC,KAAK,cACR,QAAO;AAET,SAAO,KAAK,cAAc,gBAAgB;;;;;;CAO5C,MAAM,aAA4B;AAChC,MAAI,KAAK,cACP,OAAM,KAAK,cAAc,OAAO;;;;;;;CASpC,MAAM,sBAAsB,cAAc,GAAqB;AAC7D,MAAI,CAAC,KAAK,cACR,QAAO;AAET,SAAO,KAAK,cAAc,sBAAsB,YAAY;;;;;;;;;;;;;;;;CAqB9D,MAAM,SAAS,QAAgB,UAA2B,EAAE,EAA2B;AACrF,MAAI,CAAC,KAAK,UAAU,CAElB,OAAM,KAAK,UAAU,KAAK,OAAO,SAAS,aAAa;EAGzD,MAAM,EAAE,WAAW;AAMnB,MAAI,QAAQ,UAAU,KAAK,iBAAiB,CAAC,KAAK,cAChD,QAAO,KAAK,mBAAmB,QAAQ,QAAQ;AAIjD,MAAI,QAAQ,UAAU,CAAC,KAAK,eAAe;EAG3C,MAAM,EACJ,YAAY,KACZ,cAAc,IACd,OAAO,IACP,OAAO,IACP,WAAW,OACX,QACA,QAAQ,OACR,aACE;AAGJ,MAAI,SAAS,CAAC,QAAQ,WAAW,CAAC,QAAQ,QAAQ;GAChD,MAAM,WAAW,iBAAiB,QAAQ,KAAK,gBAAgB,IAAI;IACjE;IACA;IACA;IACA;IACA;IACA;IACD,CAAC;GACF,MAAM,SAAS,gBAAgB,CAAC,IAAI,SAAS;AAC7C,OAAI,OACF,QAAO;;EAIX,MAAM,YAAY,YAAY,KAAK;AAEnC,MAAI;GACF,IAAI,UAAU;AAEd,OAAI,KAAK,cAEP,KAAI;AACF,cAAU,MAAM,KAAK,cAAc,SAAS,QAAQ;KAClD;KACA;KACA;KACA;KACA;KACA;KAEA,QAAQ,QAAQ,KAAK,QAAQ,IAAI,OAAO;KAExC,SAAS,QAAQ,WAAW,MAAM,QAAQ,UAAU,EAAE,KAAK,GAAG;KAC/D,CAAC;YACKE,WAAgB;AAEvB,QAAI,WAAW,YAAY,yBAAyB,CAAC,KAAK,eAAe,SAAS,EAAE;AAClF,WAAM,KAAK,eAAe,SAAS,CAAC,YAAY,GAAG;AACnD,UAAK,gBAAgB;AACrB,UAAK,cAAc;AAGnB,UAAK,YAAa,MAAML,WAAS,mBADf,KAAK,gBAAgB,cACwB;MAC7D,OAAO;MACP,QAAQ;MACT,CAAQ;AAET,YAAO,KAAK,SAAS,QAAQ,QAAQ;;AAEvC,UAAM;;YAEC,KAAK,aAAa,KAAK,SAAS,KAAK,WAAW;IAEzD,MAAM,WAAW,KAAK,cAAc,QAAQ;KAAE,GAAG;KAAS;KAAU,CAAC;IAErE,MAAM,SAAU,KAAK,UAAkB,oBAAoB,UAAU;KACnE,uBAAuB;KACvB,aAAa;KACb,iBAAiB;KAClB,CAAC;IAEF,MAAM,SAAS,MAAM,KAAK,MAAM,SAAS;KACvC,GAAG;KACH,gBAAgB;KAChB,aAAa,cAAc,IAAI,cAAc;KAC7C,OAAO;KACP,OAAO;KACP,WAAW,cAAc;KAC1B,CAAC;IAGF,MAAM,cAAc,OAAO,UAAU,OAAO,MAAM,OAAO,UAAU,MAAM,UAAU;IAGnF,MAAM,eAAe,OAAO,MAAM,MAAM,CAAC,aAAa,KAAK,CAAC;AAK5D,cAJgB,KAAK,UAAU,aAAa,cAAc,EACxD,qBAAqB,MACtB,CAAC,CAEgB,MAAM;AAGxB,QAAI,QAAQ,aAAa,CAAC,SAAS,YAAY,EAAE;KAC/C,MAAM,QAAQ,QAAQ,MAAM,4BAA4B;AACxD,SAAI,MACF,WAAU,MAAM,GAAG,MAAM;;cAGpB,KAAK,WAAW;IAEzB,MAAM,kBAAkB,KAAK,aAAa,QAAQ;KAAE,GAAG;KAAS;KAAU,CAAC;IAE3E,MAAM,SAAS,MAAM,KAAK,UAAU,iBAAiB;KACnD,gBAAgB;KAChB;KACA,OAAO;KACP,OAAO;KACP,WAAW,cAAc;KACzB,kBAAkB;KACnB,CAAC;AAGF,QAAI,MAAM,QAAQ,OAAO,IAAI,OAAO,IAAI;KACtC,MAAMM,WAAS,OAAO;AACtB,SAAI,MAAM,QAAQA,SAAO,eAAe,CAEtC,WADaA,SAAO,eAAe,GAAG,GAAG,EACzB,WAAW;SAE3B,WAAUA,SAAO,kBAAkB;;SAIvC,OAAM,IAAI,MAAM,kBAAkB;GAIpC,MAAM,YADU,YAAY,KAAK,GACL;AAE5B,aAAU,KAAK,YAAY,QAAQ;GAInC,MAAM,EAAE,UAAU,cAAc,aAAa,KAAK,cAAc,QAAQ;GAGxE,MAAM,gBAAgB,WAAW,eAAe;GAEhD,MAAM,kBAAkB,KAAK,KAAK,SAAS,SAAS,EAAE;AAGtD,QAAK,MAAM,WAAW;AACtB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,WAAY,KAAK,MAAM,YAAY,KAAK,MAAM,YAAa;GAEtE,MAAMC,SAAyB;IAC7B,MAAM;IACN,UAAU;IACV;IACA,iBAAkB,kBAAkB,YAAa;IACjD;IACA,cAAc;IACd,UAAU;IACV,QAAQ;IACT;AAGD,OAAI,SAAS,CAAC,QAAQ,WAAW,CAAC,QAAQ,QAAQ;IAChD,MAAM,WAAW,iBAAiB,QAAQ,KAAK,gBAAgB,IAAI;KACjE;KACA;KACA;KACA;KACA;KACA;KACD,CAAC;AACF,oBAAgB,CAAC,IAAI,UAAU,QAAQ,SAAS;;AAGlD,UAAO;WACA,QAAQ;AACf,UAAO;IACL,MAAM;IACN,iBAAiB;IACjB,iBAAiB;IACjB,WAAW,YAAY,KAAK,GAAG;IAC/B,cAAc;IACd,UAAU;IACV,QAAQ;IACT;;;;;;;;;CAUL,OAAO,OACL,QACA,UAA2B,EAAE,EACoB;AACjD,MAAI,CAAC,KAAK,UAAU,CAClB,OAAM,KAAK,UAAU,KAAK,OAAO,SAAS,aAAa;EAGzD,MAAM,YAAY,YAAY,KAAK;AAGnC,MAAI,KAAK,eAAe;GACtB,IAAI,WAAW;GACf,MAAMC,aAAuB,EAAE;GAC/B,IAAIC,cAAuD;GAC3D,IAAI,OAAO;GAGX,MAAM,kBAAkB,KAAK,cAC1B,SAAS,QAAQ;IAChB,GAAG;IAEH,QAAQ,QAAQ,QAAQ,KAAK,QAAQ,IAAI,OAAO;IAChD,UAAU,UAAU;AAClB,iBAAY,MAAM;AAClB,SAAI,aAAa;AACf,kBAAY,MAAM,KAAK;AACvB,oBAAc;WAEd,YAAW,KAAK,MAAM,KAAK;;IAGhC,CAAC,CACD,WAAW;AACV,WAAO;AACP,QAAI,YACF,aAAY,KAAK;KAEnB,CACD,OAAO,QAAQ;AACd,WAAO;AACP,QAAI,YACF,aAAY,KAAK;AAEnB,UAAM;KACN;AAGJ,UAAO,CAAC,QAAQ,WAAW,SAAS,EAClC,KAAI,WAAW,SAAS,GAAG;IACzB,MAAM,QAAQ,WAAW,OAAO;AAChC,UAAM;AACN,YAAQ,UAAU,MAAM;cACf,CAAC,MAAM;IAChB,MAAM,QAAQ,MAAM,IAAI,SAAwB,YAAY;AAC1D,mBAAc;MACd;AACF,QAAI,OAAO;AACT,WAAM;AACN,aAAQ,UAAU,MAAM;;;AAK9B,SAAM;GAEN,MAAM,EAAE,UAAU,cAAc,aAAa,KAAK,cAAc,SAAS;GACzE,MAAM,kBAAkB,KAAK,KAAK,SAAS,SAAS,EAAE;GACtD,MAAM,YAAY,YAAY,KAAK,GAAG;AAEtC,UAAO;IACL,MAAM;IACN,UAAU,QAAQ,WAAW,eAAe;IAC5C;IACA;IACA,iBAAkB,kBAAkB,YAAa;IACjD,cAAc;IACf;;EAIH,MAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,QAAQ;EAItD,MAAM,QAAQ,OAAO,QAAQ,MAAM,QAAQ;AAC3C,OAAK,MAAM,QAAQ,MACjB,KAAI,MAAM;AACR,SAAM;AACN,WAAQ,UAAU,KAAK;;AAI3B,SAAO,OAAO;;;;;CAMhB,MAAc,YACZ,QACA,UAA2B,EAAE,EACyB;EACtD,MAAM,EAAE,YAAY,KAAK,cAAc,IAAK,OAAO,IAAK,OAAO,IAAI,WAAW,UAAU;EAExF,MAAM,YAAY,YAAY,KAAK;EACnC,MAAM,kBAAkB,KAAK,aAAa,QAAQ;GAAE,GAAG;GAAS;GAAU,CAAC;AAE3E,MAAI;GACF,MAAM,SAAS,MAAM,KAAK,YAAY,iBAAiB;IACrD,gBAAgB;IAChB;IACA,OAAO;IACP,OAAO;IACP,WAAW,cAAc;IACzB,kBAAkB;IACnB,CAAC;GAGF,MAAM,YADU,YAAY,KAAK,GACL;GAG5B,IAAI,UAAU;AACd,OAAI,MAAM,QAAQ,OAAO,IAAI,OAAO,IAAI;IACtC,MAAM,SAAS,OAAO;AACtB,QAAI,MAAM,QAAQ,OAAO,eAAe,CAEtC,WADa,OAAO,eAAe,GAAG,GAAG,EACzB,WAAW;QAE3B,WAAU,OAAO,kBAAkB;;AAIvC,aAAU,KAAK,YAAY,QAAQ;GACnC,MAAM,EAAE,UAAU,cAAc,aAAa,KAAK,cAAc,QAAQ;GACxE,MAAM,gBAAgB,WAAW,eAAe;GAChD,MAAM,kBAAkB,KAAK,KAAK,SAAS,SAAS,EAAE;AAGtD,QAAK,MAAM,WAAW;AACtB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,WAAY,KAAK,MAAM,YAAY,KAAK,MAAM,YAAa;AAEtE,UAAO;IACL;IACA,QAAQ;KACN,MAAM;KACN,UAAU;KACV;KACA,iBAAkB,kBAAkB,YAAa;KACjD;KACA,cAAc;KACd,UAAU;KACV,QAAQ;KACT;IACF;WACM,QAAQ;AACf,UAAO;IACL,SAAS;IACT,QAAQ;KACN,MAAM;KACN,iBAAiB;KACjB,iBAAiB;KACjB,WAAW,YAAY,KAAK,GAAG;KAC/B,cAAc;KACd,UAAU;KACV,QAAQ;KACT;IACF;;;;;;;CAYL,MAAc,mBACZ,QACA,SACyB;AACzB,MAAI,EAAE,KAAK,aAAa,KAAK,aAC3B,OAAM,IAAI,MAAM,sDAAsD;EAGxE,MAAM,EACJ,SAAS,EAAE,EACX,YAAY,MACZ,cAAc,IACd,OAAO,IACP,OAAO,IACP,WACE;EAEJ,MAAM,YAAY,YAAY,KAAK;AAEnC,MAAI;GAEF,MAAMC,UAAkD,EAAE;AAG1D,QAAK,IAAI,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK,EACtC,SAAQ,KAAK,EAAE,MAAM,SAAS,CAAC;AAIjC,WAAQ,KAAK;IAAE,MAAM;IAAQ,MAAM;IAAQ,CAAC;GAE5C,MAAM,WAAW,CACf,GAAI,SAAS,CAAC;IAAE,MAAM;IAAU,SAAS;IAAQ,CAAC,GAAG,EAAE,EACvD;IAAE,MAAM;IAAQ;IAAS,CAC1B;GAGD,MAAM,aAAa,KAAK,UAAU,oBAAoB,SAAS;GAG/D,MAAM,eAAe,MAAM,QAAQ,IACjC,OAAO,IAAI,OAAO,QAAQ,MAAM,SAAS,QAAQ,IAAI,OAAO,CAAC,CAC9D;GAGD,MAAM,SAAS,MAAM,KAAK,UACxB,aAAa,WAAW,IAAI,aAAa,KAAK,cAC9C,YACA,EAAE,oBAAoB,OAAO,CAC9B;GAGD,IAAI,WAAW;GACf,MAAM,WAAW,QAAQ,UACrB,IAAI,aAAa,KAAK,UAAU,WAAW;IACzC,aAAa;IACb,qBAAqB;IACrB,oBAAoB,WAAiB;AACnC,iBAAYC;AACZ,aAAQ,UAAUA,OAAK;;IAE1B,CAAC,GACF;GAGJ,MAAM,UAAU,MAAM,KAAK,YAAY,SAAS;IAC9C,GAAG;IACH,gBAAgB;IAChB,aAAa,cAAc,IAAI,cAAc;IAC7C,OAAO;IACP,OAAO;IACP,WAAW,cAAc;IACzB,GAAI,WAAW,EAAE,UAAU,GAAG,EAAE;IACjC,CAAC;GAGF,MAAM,cAAc,OAAO,UAAU,MAAM,GAAG,GAAG,IAAI;GAKrD,MAAM,OAJU,KAAK,UAAU,aAAa,QAAQ,MAAM,MAAM,CAAC,aAAa,KAAK,CAAC,EAAE,EACpF,qBAAqB,MACtB,CAAC,CAEmB,MAAM,YAAY;GACvC,MAAM,YAAY,YAAY,KAAK,GAAG;GACtC,MAAM,kBAAkB,KAAK,KAAK,KAAK,SAAS,EAAE;AAGlD,QAAK,MAAM,WAAW;AACtB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,WAAY,KAAK,MAAM,YAAY,KAAK,MAAM,YAAa;AAEtE,UAAO;IACL,MAAM,KAAK,YAAY,KAAK;IAC5B;IACA,iBAAkB,kBAAkB,YAAa;IACjD;IACA,cAAc;IACd,UAAU;IACV,QAAQ;IACT;WACM,QAAQ;AACf,UAAO;IACL,MAAM;IACN,iBAAiB;IACjB,iBAAiB;IACjB,WAAW,YAAY,KAAK,GAAG;IAC/B,cAAc;IACd,UAAU;IACV,QAAQ;IACT;;;;;;CAWL,MAAM,KAAQ,QAAgB,SAAqC;EACjE,MAAM,EAAE,QAAQ,UAAU,GAAG,cAAc,OAAQ;EAEnD,MAAM,eAAe;;wCAEe,KAAK,UAAU,gBAAgB,OAAO,CAAC;AAE3E,OAAK,IAAI,UAAU,GAAG,UAAU,SAAS,WAAW,GAAG;GACrD,MAAM,SAAS,MAAM,KAAK,SAAS,QAAQ;IACzC,QAAQ,QAAQ,UAAU;IAC1B;IACA,WAAW;IACZ,CAAC;AAEF,OAAI;IAEF,MAAM,UAAU,YAAY,OAAO,KAAK;IACxC,MAAM,SAAS,KAAK,MAAM,QAAQ;AAElC,WADkB,OAAO,MAAM,OAAO;YAE/B,OAAO;AACd,QAAI,YAAY,UAAU,EACxB,OAAM,IAAI,MAAM,uCAAuC,QAAQ,aAAa,QAAQ;;;AAK1F,QAAM,IAAI,MAAM,gCAAgC;;;;;CAUlD,MAAM,MAAM,MAAc,UAAwB,EAAE,EAAwB;AAC1E,MAAI,CAAC,KAAK,SAGR,MAAK,WAAY,MAAMX,WAAS,sBADlB,QAAQ,SAAS,0BAC6B;EAG9D,MAAM,YAAY,YAAY,KAAK;EACnC,MAAM,SAAS,MAAM,KAAK,SAAS,MAAM;GACvC,SAAS;GACT,WAAW,QAAQ,cAAc;GAClC,CAAC;AAIF,SAAO;GACL,QAHa,MAAM,KAAK,OAAO,KAAqB;GAIpD;GACA,WAAW,YAAY,KAAK,GAAG;GAChC;;;;;CAMH,MAAM,WAAW,OAAiB,UAAwB,EAAE,EAA0B;EACpF,MAAMY,UAAyB,EAAE;AACjC,OAAK,MAAM,QAAQ,MACjB,SAAQ,KAAK,MAAM,KAAK,MAAM,MAAM,QAAQ,CAAC;AAE/C,SAAO;;;;;CAUT,WAAyB;AACvB,SAAO,EAAE,GAAG,KAAK,OAAO;;;;;CAM1B,UAAsB;AACpB,SAAO;GACL,SAAS;GACT,OAAO,KAAK;GACZ,QAAQ;IACN,SAAS;IACT,KAAK;IACL,MAAM;IACN,QAAQ,KAAK,UAAU,GAAG,UAAU;IACrC;GACD,SAAS;IACP,KAAK,KAAK,aAAa,iBAAiB;IACxC,MAAM;IACN,WAAW,KAAK,aAAa,iBAAiB;IAC/C;GACD,OAAO;IACL,UAAU;IACV,MAAM;IACN,YAAY;IACb;GACF;;;;;CAMH,aAAmB;AACjB,OAAK,QAAQ;GACX,SAAS;GACT,UAAU;GACV,WAAW;GACX,UAAU;GACV,WAAW;GACX,WAAW;GACX,aAAa;GACd;;CAOH,AAAQ,MAA6B;CACrC,AAAQ,aAAqB;;;;;;;;;;;;;;;;CAiB7B,MAAM,QAAQ,UAA+C,EAAE,EAAiB;EAC9E,MAAM,UAAU,QAAQ,SAAS;AAGjC,MAAI,KAAK,OAAO,KAAK,eAAe,SAAS;AAC3C,SAAM,KAAK,IAAI,SAAS;AACxB,QAAK,MAAM;;AAGb,MAAI,KAAK,KAAK,UAAU,CACtB;AAGF,OAAK,aAAa;EAGlB,MAAM,EAAE,cAAc,MAAM,OAAO;AAEnC,MAAI,CAAC,KAAK,IACR,MAAK,MAAM,UAAU,QAAQ;AAG/B,QAAM,KAAK,IAAI,KAAK,QAAQ;;;;;CAM9B,MAAM,gBAAgB,SAAyC;AAC7D,MAAI,CAAC,KAAK,KAAK,UAAU,CACvB,OAAM,KAAK,QAAQ,QAAQ;;;;;;;;;;;;;CAe/B,MAAM,MAAM,MAAc,UAAwB,EAAE,EAAwB;AAC1E,QAAM,KAAK,gBAAgB,EAAE,YAAY,QAAQ,YAAY,CAAC;AAC9D,SAAO,KAAK,IAAK,MAAM,MAAM,QAAQ;;;;;;;;;;;;;;CAevC,OAAO,YACL,MACA,UAAwB,EAAE,EACwB;AAClD,QAAM,KAAK,gBAAgB,EAAE,YAAY,QAAQ,YAAY,CAAC;AAC9D,SAAO,OAAO,KAAK,IAAK,YAAY,MAAM,QAAQ;;;;;CAMpD,aAA0B;AACxB,MAAI,CAAC,KAAK,IAER,QAAO;AAET,SAAO,KAAK,IAAI,YAAY;;;;;CAM9B,cAAuB;AACrB,SAAO,KAAK,KAAK,UAAU,IAAI;;;;;CAMjC,kBAAqF;AACnF,MAAI,CAAC,KAAK,IACR,QAAO;AAET,SAAO;GACL,IAAI,KAAK;GACT,QAAQ,KAAK,IAAI,UAAU;GAC3B,QAAQ,KAAK,IAAI,UAAU,GAAG,KAAK,IAAI,eAAe,GAAG;GAC1D;;;;;CAMH,MAAM,gBAEJ;EACA,MAAM,EAAE,eAAe,MAAM,OAAO;AACpC,SAAO,OAAO,OAAO,WAAW,CAAC,KAAK,OAAO;GAC3C,IAAI,EAAE;GACN,aAAa,EAAE;GACf,YAAY,EAAE;GACd,YAAY,EAAE,OAAO;GACtB,EAAE;;CAOL,AAAQ,MAA6B;;;;;;;;;;;;;;CAerC,MAAM,QAAQ,SAAkB,UAA0B,EAAE,EAAiB;AAC3E,MAAI,KAAK,KAAK,UAAU,CACtB;EAIF,MAAM,EAAE,eAAe,MAAM,OAAO;AAEpC,MAAI,CAAC,KAAK,IACR,MAAK,MAAM,IAAI,WAAW,QAAQ;AAGpC,QAAM,KAAK,IAAI,KAAK,QAAQ;;;;;CAM9B,MAAa,gBAAgB,SAAkB,SAAyC;AACtF,MAAI,CAAC,KAAK,KAAK,UAAU,CACvB,OAAM,KAAK,QAAQ,SAAS,QAAQ;;;;;;;;;;;;;;;;;;;;;;;;;CA2BxC,MAAM,WACJ,OACA,UAA6B,EAAE,EACJ;AAC3B,QAAM,KAAK,gBAAgB,QAAW,EAAE,YAAY,QAAQ,YAAY,CAAC;AACzE,SAAO,KAAK,IAAK,WAAW,OAAO,QAAQ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA8B7C,MAAM,6BACJ,UAAyC,EAAE,EACH;AACxC,QAAM,KAAK,iBAAiB;AAC5B,SAAO,KAAK,IAAK,uBAAuB,QAAQ;;;;;CAMlD,MAAM,gBAA2C;EAE/C,MAAM,EAAE,eAAe,MAAM,OAAO;AACpC,SAAO,WAAW,YAAY;;;;;CAMhC,cAAuB;AACrB,SAAO,KAAK,KAAK,UAAU,IAAI;;;;;CAMjC,kBAAqF;AACnF,MAAI,CAAC,KAAK,IACR,QAAO;AAET,SAAO;GACL,IAAI,KAAK,IAAI,cAAc,CAAC;GAC5B,QAAQ,KAAK,IAAI,UAAU;GAC3B,QAAQ,KAAK,IAAI,UAAU,GAAG,KAAK,IAAI,eAAe,GAAG;GAC1D;;;;;;;;;;;;;;;;;CAsBH,MAAM,OACJ,aAAqB,KACrB,UAAqD,EAAE,EAC5B;EAE3B,MAAM,EAAE,YAAY,mBAAmB,MAAM,OAAO;AAEpD,MAAI,CAAC,gBAAgB,CACnB,OAAM,IAAI,MACR,uJAID;AAGH,UAAQ,aAAa,yBAAyB;EAE9C,MAAM,MAAM,IAAI,WAAW,EAAE,YAAY,MAAO,CAAC;AACjD,QAAM,IAAI,OAAO;AAEjB,UAAQ,aAAa,kBAAkB,aAAa,KAAM,QAAQ,EAAE,CAAC,MAAM;AAG3E,QAAM,IAAI,SAAS,MAAM,WAAW,GAAG,WAAW,CAAC;AAEnD,UAAQ,aAAa,sBAAsB;EAC3C,MAAM,EAAE,UAAU,MAAM,IAAI,MAAM;AAElC,UAAQ,aAAa,kBAAkB;AACvC,SAAO,KAAK,WAAW,OAAO,EAC5B,aAAa,MAAM,QAAQ,aAAa,EAAE,UAAU,kBAAkB,EACvE,CAAC;;;;;CAMJ,MAAM,wBAA0C;AAC9C,MAAI;GACF,MAAM,EAAE,mBAAmB,MAAM,OAAO;AACxC,UAAO,gBAAgB;UACjB;AACN,UAAO;;;;;;;CAYX,MAAM,QAAQ,aAAa,OAAsB;AAE/C,MAAI,KAAK,eAAe;AACtB,OAAI;AACF,UAAM,KAAK,cAAc,QAAQ,WAAW;WACtC;AAGR,QAAK,gBAAgB;;AAGvB,MAAI,KAAK,WAAW;AAClB,OAAI,OAAQ,KAAK,UAAkB,YAAY,WAC7C,KAAI;AACF,UAAO,KAAK,UAAkB,SAAS;WACjC;AAIV,QAAK,YAAY;;AAEnB,MAAI,KAAK,UAAU;AACjB,OAAI,OAAQ,KAAK,SAAiB,YAAY,WAC5C,KAAI;AACF,UAAO,KAAK,SAAiB,SAAS;WAChC;AAIV,QAAK,WAAW;;AAIlB,MAAI,KAAK,aAAa;AACpB,OAAI,OAAO,KAAK,YAAY,YAAY,WACtC,KAAI;AACF,UAAM,KAAK,YAAY,SAAS;WAC1B;AAIV,QAAK,cAAc;;AAErB,MAAI,KAAK,UACP,MAAK,YAAY;AAInB,MAAI,KAAK,KAAK;AACZ,OAAI;AACF,UAAM,KAAK,IAAI,SAAS;WAClB;AAGR,QAAK,MAAM;;AAIb,MAAI,KAAK,KAAK;AACZ,OAAI;AACF,SAAK,IAAI,SAAS;WACZ;AAGR,QAAK,MAAM;;AAGb,OAAK,eAAe;AACpB,OAAK,cAAc;AACnB,OAAK,gBAAgB;;;;;;;CAQvB,aAAa,WAA0B;EAErC,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,QAAM,iBAAiB,oBAAoB;;;;;;CAO7C,aAAa,qBAcH;AAER,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAI1C,UAAO;IAAE,SAHO,iBAAiB,wBAAwB;IAGvC,UAFD,MAAM,iBAAiB,oBAAoB;IAEhC;UACtB;AACN,UAAO;;;;;;;CAQX,aAAa,gBAAiF;AAE5F,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,UAAO,MAAM,iBAAiB,iBAAiB;UACzC;AACN,UAAO;;;;;;;CAQX,aAAa,kBAAkB,OAAiC;AAE9D,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,UAAO,MAAM,iBAAiB,mBAAmB,MAAM;UACjD;AACN,UAAO;;;;;;;CAQX,aAAa,wBAMF;AACT,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,UAAO,MAAM,iBAAiB,mBAAmB;UAC3C;AACN,UAAO;;;;;;;CAQX,aAAa,eAAe,OAAiC;AAC3D,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,UAAO,MAAM,iBAAiB,gBAAgB,MAAM;UAC9C;AACN,UAAO;;;;;;CAOX,aAAa,0BAA2C;AACtD,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,UAAO,MAAM,iBAAiB,mBAAmB;UAC3C;AACN,UAAO;;;CAQX,AAAQ,aAAa,QAAgB,SAAkC;EACrE,MAAM,SAAS,QAAQ,UAAU;EACjC,MAAM,SAAS,KAAK,cAAc,SAAS,OAAO;AAElD,MAAI,QAAQ,YAAY,KAAK,aAAa,iBAExC,QAAO,uBADa,GAAG,OAAO,iHACY,gCAAgC,OAAO;AAGnF,MAAI,OACF,QAAO,uBAAuB,OAAO,gCAAgC,OAAO;AAG9E,SAAO,uBAAuB,OAAO,gCAAgC,OAAO;;CAG9E,AAAQ,cACN,QACA,SAC0C;EAC1C,MAAM,SAAS,QAAQ,UAAU;EACjC,MAAMC,WAAqD,EAAE;AAI7D,WAAS,KAAK;GAAE,MAAM;GAAU,SAAS;GAAQ,CAAC;AAClD,WAAS,KAAK;GAAE,MAAM;GAAQ,SAAS;GAAQ,CAAC;AAEhD,SAAO;;CAGT,AAAQ,cAAc,MAGpB;EAEA,MAAM,QAAQ,KAAK,MAAM,6BAA6B;AACtD,MAAI,MAGF,QAAO;GAAE,UAFQ,MAAM,GAAG,MAAM;GAEb,UADF,KAAK,QAAQ,4BAA4B,GAAG,CAAC,MAAM;GACvC;EAI/B,MAAM,gBAAgB,KAAK,MAAM,oBAAoB;AACrD,MAAI,eAAe;GACjB,MAAM,WAAW,cAAc,GAAG,MAAM;GACxC,MAAM,WAAW,KAAK,QAAQ,mBAAmB,GAAG,CAAC,MAAM;AAC3D,UAAO;IAAE,UAAU,YAAY;IAAW;IAAU;;AAKtD,SAAO,EAAE,UADQ,KAAK,QAAQ,eAAe,GAAG,CAAC,MAAM,EACpC;;CAGrB,AAAQ,YAAY,MAAsB;AACxC,SACE,KACG,QAAQ,iBAAiB,GAAG,CAC5B,QAAQ,mBAAmB,GAAG,CAC9B,QAAQ,oBAAoB,GAAG,CAC/B,QAAQ,UAAU,GAAG,CAErB,QAAQ,mBAAmB,GAAG,CAC9B,QAAQ,kBAAkB,GAAG,CAC7B,QAAQ,wBAAwB,GAAG,CACnC,QAAQ,uBAAuB,GAAG,CAElC,QAAQ,mCAAmC,GAAG,CAC9C,MAAM"}
|