@tryhamster/gerbil 1.0.0-rc.17 → 1.0.0-rc.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","names":["BUILTIN_MODELS: Record<string, ModelConfig>","currentResolve: ((text: string) => void) | null","currentReject: ((error: Error) => void) | null","gerbilWorker: GerbilWorker","options","userMessage: Message","assistantMessage: Message","KOKORO_BROWSER_VOICES: BrowserVoiceInfo[]","SUPERTONIC_BROWSER_VOICES: BrowserVoiceInfo[]","TTS_MODELS: Record<\n TTSModelId,\n { repo: string; defaultVoice: string; sampleRate: number; voices: BrowserVoiceInfo[] }\n>","audioContext: AudioContext | null","resolveSTTModel","progress: STTProgress","e: any","ttsConfig","results: BrowserSearchResult[]"],"sources":["../../src/core/models.ts","../../src/browser/index.ts"],"sourcesContent":["/**\n * Model Registry\n *\n * Supports built-in models and any HuggingFace model via hf:org/model syntax\n */\n\nimport type { ModelConfig, ModelSource } from \"./types.js\";\n\n// ============================================\n// Built-in Models (curated & tested)\n// ============================================\n\nexport const BUILTIN_MODELS: Record<string, ModelConfig> = {\n \"qwen3-0.6b\": {\n id: \"qwen3-0.6b\",\n repo: \"onnx-community/Qwen3-0.6B-ONNX\",\n description: \"Qwen3 0.6B - Best balance of speed and quality, supports thinking\",\n size: \"~400MB\",\n contextLength: 32_768,\n supportsThinking: true,\n supportsJson: true,\n family: \"qwen\",\n },\n \"qwen2.5-0.5b\": {\n id: \"qwen2.5-0.5b\",\n repo: \"onnx-community/Qwen2.5-0.5B-Instruct\",\n description: \"Qwen2.5 0.5B - Fast and capable\",\n size: \"~350MB\",\n contextLength: 32_768,\n supportsThinking: false,\n supportsJson: true,\n family: \"qwen\",\n },\n \"qwen2.5-coder-0.5b\": {\n id: \"qwen2.5-coder-0.5b\",\n repo: \"onnx-community/Qwen2.5-Coder-0.5B-Instruct\",\n description: \"Qwen2.5 Coder 0.5B - Optimized for code\",\n size: \"~400MB\",\n contextLength: 32_768,\n supportsThinking: false,\n supportsJson: true,\n family: \"qwen\",\n },\n \"smollm2-360m\": {\n id: \"smollm2-360m\",\n repo: \"HuggingFaceTB/SmolLM2-360M-Instruct\",\n description: \"SmolLM2 360M - Fast, good for simple tasks\",\n size: \"~250MB\",\n contextLength: 8192,\n supportsThinking: false,\n supportsJson: false,\n family: \"smollm\",\n },\n \"smollm2-135m\": {\n id: \"smollm2-135m\",\n repo: \"HuggingFaceTB/SmolLM2-135M-Instruct\",\n description: \"SmolLM2 135M - Fastest, basic generation\",\n size: \"~100MB\",\n contextLength: 8192,\n supportsThinking: false,\n supportsJson: false,\n family: \"smollm\",\n },\n \"phi-3-mini\": {\n id: \"phi-3-mini\",\n repo: \"microsoft/Phi-3-mini-4k-instruct-onnx\",\n description: \"Phi-3 Mini - High quality, larger model\",\n size: \"~2.1GB\",\n contextLength: 4096,\n supportsThinking: false,\n supportsJson: true,\n family: \"phi\",\n },\n \"ministral-3b\": {\n id: \"ministral-3b\",\n repo: \"mistralai/Ministral-3-3B-Instruct-2512-ONNX\",\n description: \"Ministral 3 3B - Vision + Reasoning, 256k context\",\n size: \"~2.5GB\",\n contextLength: 262_144,\n supportsThinking: true,\n supportsJson: true,\n supportsVision: true,\n visionEncoderSize: \"0.4B\",\n family: \"mistral\",\n },\n};\n\n// ============================================\n// Model Resolution\n// ============================================\n\n/**\n * Parse model identifier and resolve to source\n *\n * Supported formats:\n * - \"qwen3-0.6b\" (built-in)\n * - \"hf:org/model\" (HuggingFace shorthand)\n * - \"https://huggingface.co/org/model\" (full URL)\n * - \"file:./path/to/model\" (local path)\n */\nexport function resolveModel(modelId: string): ModelSource {\n // Built-in model\n if (BUILTIN_MODELS[modelId]) {\n return {\n type: \"builtin\",\n path: BUILTIN_MODELS[modelId].repo,\n };\n }\n\n // HuggingFace shorthand: hf:org/model\n if (modelId.startsWith(\"hf:\")) {\n const repo = modelId.slice(3);\n return {\n type: \"huggingface\",\n path: repo,\n };\n }\n\n // HuggingFace URL\n if (modelId.startsWith(\"https://huggingface.co/\")) {\n const repo = modelId.replace(\"https://huggingface.co/\", \"\");\n return {\n type: \"huggingface\",\n path: repo,\n };\n }\n\n // Local file\n if (modelId.startsWith(\"file:\")) {\n const path = modelId.slice(5);\n return {\n type: \"local\",\n path,\n };\n }\n\n // Assume it's a HuggingFace repo if it contains a slash\n if (modelId.includes(\"/\")) {\n return {\n type: \"huggingface\",\n path: modelId,\n };\n }\n\n // Unknown - treat as HuggingFace\n return {\n type: \"huggingface\",\n path: modelId,\n };\n}\n\n/**\n * Get model config (built-in only)\n */\nexport function getModelConfig(modelId: string): ModelConfig | null {\n return BUILTIN_MODELS[modelId] || null;\n}\n\n// Default context lengths by model family (when config.json is unavailable)\nconst FAMILY_CONTEXT_DEFAULTS: Record<string, number> = {\n qwen: 32_768,\n mistral: 262_144, // Ministral models support up to 256K\n llama: 8192,\n phi: 4096,\n smollm: 8192,\n other: 4096,\n};\n\n/**\n * Create model config for external model\n */\nexport function createExternalModelConfig(\n modelId: string,\n repo: string,\n contextLength?: number,\n): ModelConfig {\n // Try to infer family from repo name\n let family: ModelConfig[\"family\"] = \"other\";\n const repoLower = repo.toLowerCase();\n\n if (repoLower.includes(\"qwen\")) {\n family = \"qwen\";\n } else if (repoLower.includes(\"smollm\")) {\n family = \"smollm\";\n } else if (repoLower.includes(\"phi\")) {\n family = \"phi\";\n } else if (repoLower.includes(\"mistral\") || repoLower.includes(\"ministral\")) {\n family = \"mistral\";\n } else if (repoLower.includes(\"llama\")) {\n family = \"llama\";\n }\n\n // Detect vision models from common patterns\n const supportsVision =\n repoLower.includes(\"vision\") ||\n repoLower.includes(\"vlm\") ||\n repoLower.includes(\"image-text\") ||\n repoLower.includes(\"ministral\");\n\n return {\n id: modelId,\n repo,\n description: `External model: ${repo}`,\n size: \"Unknown\",\n contextLength: contextLength || FAMILY_CONTEXT_DEFAULTS[family] || 4096,\n supportsThinking: family === \"qwen\" || family === \"mistral\",\n supportsJson: family === \"qwen\" || family === \"phi\" || family === \"mistral\",\n supportsVision,\n family,\n };\n}\n\n/**\n * Fetch context length from HuggingFace model config\n */\nexport async function fetchModelContextLength(repo: string): Promise<number | null> {\n try {\n const res = await fetch(`https://huggingface.co/${repo}/raw/main/config.json`);\n if (!res.ok) {\n return null;\n }\n\n const config = await res.json();\n\n // Different models use different field names\n return (\n config.max_position_embeddings ||\n config.n_positions ||\n config.max_seq_len ||\n config.sliding_window || // Some models use this\n config.context_length ||\n null\n );\n } catch {\n return null;\n }\n}\n\n/**\n * List all built-in models\n */\nexport function listBuiltinModels(): ModelConfig[] {\n return Object.values(BUILTIN_MODELS);\n}\n\n/**\n * Search HuggingFace models (placeholder - would need HF API)\n */\nexport async function searchModels(query: string): Promise<ModelConfig[]> {\n // TODO: Implement HuggingFace API search\n // For now, filter built-in models\n const q = query.toLowerCase();\n return listBuiltinModels().filter(\n (m) =>\n m.id.toLowerCase().includes(q) ||\n m.description.toLowerCase().includes(q) ||\n m.family.toLowerCase().includes(q),\n );\n}\n","/**\n * Gerbil Browser Support\n *\n * Run LLMs directly in the browser with WebGPU acceleration.\n *\n * @example useChat (React)\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <form onSubmit={handleSubmit}>\n * {messages.map(m => <div key={m.id}>{m.role}: {m.content}</div>)}\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * </form>\n * );\n * }\n * ```\n *\n * @example useCompletion (React)\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading } = useCompletion();\n * if (isLoading) return <div>Loading...</div>;\n * return <button onClick={() => complete(\"Write a haiku\")}>{completion}</button>;\n * }\n * ```\n *\n * @example Low-level API\n * ```ts\n * import { createGerbilWorker } from \"@tryhamster/gerbil/browser\";\n *\n * const gerbil = await createGerbilWorker({\n * modelId: \"qwen3-0.6b\",\n * onToken: (token) => console.log(token.text),\n * });\n * await gerbil.generate(\"Hello!\");\n * gerbil.terminate();\n * ```\n */\n\nimport { resolveModel } from \"../core/models.js\";\n\n// Re-export models and types (browser-safe, no Node.js dependencies)\nexport { BUILTIN_MODELS } from \"../core/models.js\";\nexport type * from \"../core/types.js\";\n\n// NOTE: We intentionally do NOT export Gerbil from core here.\n// The core Gerbil class has Node.js code paths (chrome-backend/puppeteer)\n// that break browser bundlers. Use createGerbilWorker() instead for browser.\n\n// ============================================\n// Types\n// ============================================\n\nexport type WorkerProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n error?: string;\n};\n\nexport type WorkerToken = {\n status: \"token\";\n text: string;\n state: \"thinking\" | \"answering\";\n numTokens: number;\n tps: number;\n};\n\nexport type WorkerComplete = {\n status: \"complete\";\n text: string;\n numTokens: number;\n totalTime: number;\n tps: number;\n};\n\nexport type GerbilWorkerOptions = {\n /** Model ID to load (default: \"qwen3-0.6b\") */\n modelId?: string;\n /** Called during model loading with progress updates */\n onProgress?: (progress: WorkerProgress) => void;\n /** Called for each token during streaming generation */\n onToken?: (token: WorkerToken) => void;\n /** Called when generation is complete */\n onComplete?: (result: WorkerComplete) => void;\n /** Called on errors */\n onError?: (error: string) => void;\n /** Worker script URL (auto-detected if not provided) */\n workerUrl?: string;\n};\n\nexport type GenerateStreamOptions = {\n /** Maximum tokens to generate */\n maxTokens?: number;\n /** Temperature for sampling (0 = deterministic) */\n temperature?: number;\n /** Top-p nucleus sampling */\n topP?: number;\n /** Top-k sampling */\n topK?: number;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** System prompt */\n system?: string;\n /** Image URLs or data URIs (for vision models) */\n images?: string[];\n /** Conversation history for multi-turn (includes all previous messages) */\n history?: Array<{ role: \"user\" | \"assistant\" | \"system\"; content: string }>;\n};\n\nexport type GerbilWorker = {\n /** Generate text with streaming */\n generate: (prompt: string, options?: GenerateStreamOptions) => Promise<string>;\n /** Interrupt current generation */\n interrupt: () => void;\n /** Reset conversation cache */\n reset: () => void;\n /** Terminate the worker */\n terminate: () => void;\n /** Check if model is loaded */\n isReady: () => boolean;\n};\n\n// ============================================\n// Web Worker Factory\n// ============================================\n\n/**\n * Create a Gerbil worker for streaming WebGPU inference\n *\n * Uses a Web Worker to keep the UI responsive during model loading\n * and text generation, with real-time token streaming.\n */\nexport async function createGerbilWorker(options: GerbilWorkerOptions = {}): Promise<GerbilWorker> {\n const { modelId = \"qwen3-0.6b\", onProgress, onToken, onComplete, onError } = options;\n\n // Resolve model to HuggingFace path\n const source = resolveModel(modelId);\n\n return new Promise((resolve, reject) => {\n // Create inline worker from the worker code\n const workerCode = `\n import {\n AutoTokenizer,\n AutoModelForCausalLM,\n AutoProcessor,\n AutoModelForImageTextToText,\n RawImage,\n TextStreamer,\n InterruptableStoppingCriteria,\n env,\n } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1\";\n\n // Enable IndexedDB caching for browser (prevents re-downloading models)\n env.useBrowserCache = true;\n env.allowLocalModels = false;\n\n class ModelPipeline {\n static tokenizer = null;\n static model = null;\n static processor = null;\n static visionModel = null;\n static modelId = \"\";\n static isVision = false;\n\n static async getInstance(modelId, options = {}, progressCallback) {\n if (this.modelId !== modelId) {\n this.tokenizer = null;\n this.model = null;\n this.processor = null;\n this.visionModel = null;\n }\n this.modelId = modelId;\n \n // Detect vision models\n this.isVision = options.vision || \n modelId.toLowerCase().includes(\"ministral\") ||\n modelId.toLowerCase().includes(\"vision\") ||\n modelId.toLowerCase().includes(\"vlm\");\n\n const dtype = options.dtype || \"q4f16\";\n const device = options.device || \"webgpu\";\n\n if (this.isVision) {\n // Load vision model components\n // Note: Don't specify dtype for vision models - let transformers.js pick defaults\n if (!this.processor) {\n this.processor = await AutoProcessor.from_pretrained(modelId, {\n progress_callback: progressCallback,\n });\n }\n if (!this.visionModel) {\n this.visionModel = await AutoModelForImageTextToText.from_pretrained(modelId, {\n device,\n progress_callback: progressCallback,\n });\n }\n return { \n processor: this.processor, \n model: this.visionModel, \n tokenizer: this.processor.tokenizer,\n isVision: true \n };\n } else {\n // Load text-only model components\n if (!this.tokenizer) {\n this.tokenizer = await AutoTokenizer.from_pretrained(modelId, {\n progress_callback: progressCallback,\n });\n }\n if (!this.model) {\n this.model = await AutoModelForCausalLM.from_pretrained(modelId, {\n dtype,\n device,\n progress_callback: progressCallback,\n });\n }\n return { \n tokenizer: this.tokenizer, \n model: this.model, \n isVision: false \n };\n }\n }\n }\n\n const stoppingCriteria = new InterruptableStoppingCriteria();\n let pastKeyValuesCache = null;\n\n async function load(data) {\n const { modelId, options = {} } = data;\n self.postMessage({ status: \"loading\", message: \"Loading model...\" });\n\n const downloadState = {\n downloading: new Set(),\n completed: new Set(),\n isDownloading: false,\n };\n\n try {\n const result = await ModelPipeline.getInstance(\n modelId,\n options,\n (progress) => {\n if (progress.status === \"progress\" && progress.file) {\n const pct = Math.round(progress.progress || 0);\n if (pct < 100) {\n downloadState.downloading.add(progress.file);\n downloadState.isDownloading = true;\n } else if (pct === 100) {\n downloadState.downloading.delete(progress.file);\n downloadState.completed.add(progress.file);\n }\n if (downloadState.isDownloading) {\n self.postMessage({\n status: \"downloading\",\n file: progress.file,\n progress: pct,\n downloadCount: downloadState.downloading.size,\n totalFiles: downloadState.completed.size + downloadState.downloading.size,\n });\n }\n }\n }\n );\n\n self.postMessage({ status: \"loading\", message: \"Compiling shaders...\" });\n \n // Warmup differs for vision vs text models\n if (result.isVision) {\n // Vision models need both text and vision warmup\n // Text warmup first\n const textWarmupInputs = result.tokenizer(\"hello\");\n await result.model.generate({ ...textWarmupInputs, max_new_tokens: 1 });\n \n // Vision warmup with synthetic image\n self.postMessage({ status: \"loading\", message: \"Warming up vision encoder...\" });\n try {\n // Create a tiny 8x8 test image using OffscreenCanvas\n const canvas = new OffscreenCanvas(8, 8);\n const ctx = canvas.getContext(\"2d\");\n ctx.fillStyle = \"red\";\n ctx.fillRect(0, 0, 8, 8);\n const blob = await canvas.convertToBlob({ type: \"image/png\" });\n const warmupImage = await RawImage.fromBlob(blob);\n \n // Process with vision pipeline\n const warmupContent = [{ type: \"image\" }, { type: \"text\", text: \"hi\" }];\n const warmupMessages = [{ role: \"user\", content: warmupContent }];\n const warmupPrompt = result.processor.apply_chat_template(warmupMessages, { add_generation_prompt: true });\n const warmupInputs = await result.processor(warmupImage, warmupPrompt, { add_special_tokens: false });\n \n // Run vision warmup generation\n await result.model.generate({\n ...warmupInputs,\n max_new_tokens: 1,\n });\n } catch (warmupErr) {\n console.warn(\"Vision warmup failed (non-fatal):\", warmupErr);\n }\n } else {\n const warmupInputs = result.tokenizer(\"a\");\n await result.model.generate({ ...warmupInputs, max_new_tokens: 1 });\n }\n\n self.postMessage({ status: \"ready\", isVision: result.isVision });\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n async function generate(data) {\n const { messages, images = [], options = {} } = data;\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 20, thinking = false } = options;\n\n try {\n const result = await ModelPipeline.getInstance(ModelPipeline.modelId, {});\n \n // Route to vision or text generation\n if (result.isVision && images.length > 0) {\n await generateVision(result, messages, images, options);\n } else {\n await generateText(result, messages, options);\n }\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n async function generateText(result, messages, options) {\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 20, thinking = false } = options;\n const { tokenizer, model } = result;\n\n const inputs = tokenizer.apply_chat_template(messages, {\n add_generation_prompt: true,\n return_dict: true,\n enable_thinking: thinking,\n });\n\n let state = \"answering\";\n const [START_THINKING_TOKEN_ID, END_THINKING_TOKEN_ID] = tokenizer.encode(\n \"<think></think>\",\n { add_special_tokens: false }\n );\n\n let startTime = null;\n let numTokens = 0;\n\n const tokenCallback = (tokens) => {\n startTime ??= performance.now();\n numTokens += 1;\n const tokenId = Number(tokens[0]);\n if (tokenId === START_THINKING_TOKEN_ID) state = \"thinking\";\n else if (tokenId === END_THINKING_TOKEN_ID) state = \"answering\";\n };\n\n const streamCallback = (text) => {\n const tps = startTime ? (numTokens / (performance.now() - startTime)) * 1000 : 0;\n self.postMessage({ status: \"token\", text, state, numTokens, tps });\n };\n\n const streamer = new TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: streamCallback,\n token_callback_function: tokenCallback,\n });\n\n self.postMessage({ status: \"start\" });\n\n const { past_key_values, sequences } = await model.generate({\n ...inputs,\n past_key_values: pastKeyValuesCache,\n do_sample: temperature > 0,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n max_new_tokens: maxTokens,\n streamer,\n stopping_criteria: stoppingCriteria,\n return_dict_in_generate: true,\n });\n\n pastKeyValuesCache = past_key_values;\n\n const endTime = performance.now();\n const totalTime = startTime ? endTime - startTime : 0;\n const decoded = tokenizer.batch_decode(sequences, { skip_special_tokens: true });\n\n self.postMessage({\n status: \"complete\",\n text: decoded[0] || \"\",\n numTokens,\n totalTime,\n tps: totalTime > 0 ? (numTokens / totalTime) * 1000 : 0,\n });\n }\n\n async function generateVision(result, messages, images, options) {\n const { maxTokens = 2048, temperature = 0.7, topP = 0.9, topK = 20 } = options;\n const { processor, model, tokenizer } = result;\n\n self.postMessage({ status: \"progress\", message: \"Preparing vision request...\" });\n\n // Build message content with image placeholders and text\n const lastMessage = messages[messages.length - 1];\n const content = [];\n for (const _ of images) {\n content.push({ type: \"image\" });\n }\n content.push({ type: \"text\", text: lastMessage.content });\n\n // For vision models, include a brief system instruction for concise responses\n // Note: Vision processors handle system differently than text models\n const visionMessages = [\n { role: \"system\", content: \"You are a helpful assistant. Be concise and direct in your responses.\" },\n { role: \"user\", content }\n ];\n\n // Apply chat template with generation prompt\n const chatPrompt = processor.apply_chat_template(visionMessages, {\n add_generation_prompt: true\n });\n\n // Load images (handle both string URLs and { source: string } objects)\n self.postMessage({ status: \"progress\", message: \"Loading images...\" });\n const loadedImages = await Promise.all(\n images.map(img => {\n const url = typeof img === \"string\" ? img : img.source;\n return RawImage.fromURL(url);\n })\n );\n self.postMessage({ status: \"progress\", message: \"Processing inputs...\" });\n\n // Process inputs\n const inputs = await processor(\n loadedImages.length === 1 ? loadedImages[0] : loadedImages,\n chatPrompt,\n { add_special_tokens: false }\n );\n self.postMessage({ status: \"progress\", message: \"Generating response...\" });\n\n let startTime = null;\n let numTokens = 0;\n\n const streamCallback = (text) => {\n startTime ??= performance.now();\n numTokens += 1;\n const tps = (numTokens / (performance.now() - startTime)) * 1000;\n self.postMessage({ status: \"token\", text, state: \"answering\", numTokens, tps });\n };\n\n const streamer = new TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: streamCallback,\n });\n\n self.postMessage({ status: \"start\" });\n\n const outputs = await model.generate({\n ...inputs,\n max_new_tokens: maxTokens,\n do_sample: temperature > 0,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n streamer,\n stopping_criteria: stoppingCriteria,\n });\n\n // Decode output (skip prompt)\n const inputLength = inputs.input_ids.dims?.at(-1) || 0;\n const decoded = processor.batch_decode(\n outputs.slice(null, [inputLength, null]),\n { skip_special_tokens: true }\n );\n\n const endTime = performance.now();\n const totalTime = startTime ? endTime - startTime : 0;\n\n self.postMessage({\n status: \"complete\",\n text: decoded[0] || \"\",\n numTokens,\n totalTime,\n tps: totalTime > 0 ? (numTokens / totalTime) * 1000 : 0,\n });\n }\n\n self.addEventListener(\"message\", async (e) => {\n const { type, ...data } = e.data;\n switch (type) {\n case \"load\": await load(data); break;\n case \"generate\": stoppingCriteria.reset(); await generate(data); break;\n case \"interrupt\": stoppingCriteria.interrupt(); break;\n case \"reset\": pastKeyValuesCache = null; stoppingCriteria.reset(); break;\n }\n });\n\n self.postMessage({ status: \"init\" });\n `;\n\n const blob = new Blob([workerCode], { type: \"application/javascript\" });\n const workerUrl = URL.createObjectURL(blob);\n const worker = new Worker(workerUrl, { type: \"module\" });\n\n let isReady = false;\n let currentResolve: ((text: string) => void) | null = null;\n let currentReject: ((error: Error) => void) | null = null;\n let _generatedText = \"\";\n\n worker.onmessage = (e) => {\n const msg = e.data;\n\n switch (msg.status) {\n case \"init\":\n // Worker initialized, load the model\n worker.postMessage({ type: \"load\", modelId: source.path });\n break;\n\n case \"loading\":\n case \"downloading\":\n onProgress?.(msg as WorkerProgress);\n break;\n\n case \"ready\":\n isReady = true;\n onProgress?.(msg as WorkerProgress);\n resolve(gerbilWorker);\n break;\n\n case \"start\":\n _generatedText = \"\";\n break;\n\n case \"token\":\n _generatedText += msg.text;\n onToken?.(msg as WorkerToken);\n break;\n\n case \"complete\":\n onComplete?.(msg as WorkerComplete);\n currentResolve?.(msg.text);\n currentResolve = null;\n currentReject = null;\n break;\n\n case \"error\":\n onError?.(msg.error);\n onProgress?.({ status: \"error\", error: msg.error });\n if (currentReject) {\n currentReject(new Error(msg.error));\n currentResolve = null;\n currentReject = null;\n } else {\n reject(new Error(msg.error));\n }\n break;\n }\n };\n\n worker.onerror = (e) => {\n const error = e.message || \"Worker error\";\n onError?.(error);\n reject(new Error(error));\n };\n\n const gerbilWorker: GerbilWorker = {\n generate: (prompt: string, options: GenerateStreamOptions = {}) =>\n new Promise((res, rej) => {\n currentResolve = res;\n currentReject = rej;\n\n const system = options.system || \"You are a helpful assistant.\";\n\n // Use history if provided (for multi-turn conversations)\n // Otherwise, just use system + current prompt\n const messages = options.history\n ? [{ role: \"system\", content: system }, ...options.history]\n : [\n { role: \"system\", content: system },\n { role: \"user\", content: prompt },\n ];\n\n // When using history, reset KV cache first to avoid position mismatches\n // (full history is provided, so we don't need cached context)\n if (options.history) {\n worker.postMessage({ type: \"reset\" });\n }\n\n worker.postMessage({\n type: \"generate\",\n messages,\n images: options.images || [],\n options: {\n maxTokens: options.maxTokens ?? (options.images?.length ? 2048 : 256),\n temperature: options.temperature ?? 0.7,\n topP: options.topP ?? 0.9,\n topK: options.topK ?? 20,\n thinking: options.thinking ?? false,\n },\n });\n }),\n\n interrupt: () => {\n worker.postMessage({ type: \"interrupt\" });\n },\n\n reset: () => {\n worker.postMessage({ type: \"reset\" });\n },\n\n terminate: () => {\n worker.terminate();\n URL.revokeObjectURL(workerUrl);\n },\n\n isReady: () => isReady,\n };\n });\n}\n\n// ============================================\n// React Hooks\n// ============================================\n\n/** Message in a chat conversation */\nexport type Message = {\n id: string;\n role: \"user\" | \"assistant\";\n content: string;\n thinking?: string;\n /** Attached images (URLs or data URIs) - for vision models */\n images?: string[];\n};\n\n/** Loading progress state */\nexport type LoadingProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n};\n\n/** Options for useChat hook */\nexport type UseChatOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens per response */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Initial messages */\n initialMessages?: Message[];\n /** Auto-load model on mount (default: false - loads on first generate or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Return type for useChat hook */\nexport type UseChatReturn = {\n /** Chat messages */\n messages: Message[];\n /** Current input value */\n input: string;\n /** Set input value */\n setInput: (value: string) => void;\n /** Submit current input */\n handleSubmit: (e?: { preventDefault?: () => void }) => void;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating a response */\n isGenerating: boolean;\n /** Current thinking content (streaming) */\n thinking: string;\n /** Stop generation */\n stop: () => void;\n /** Clear all messages */\n clear: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n /** Currently attached images (for next message) */\n attachedImages: string[];\n /** Attach an image to the next message */\n attachImage: (imageUrl: string) => void;\n /** Remove an attached image */\n removeImage: (index: number) => void;\n /** Clear all attached images */\n clearImages: () => void;\n /** Send message with specific images (convenience method) */\n sendWithImages: (text: string, images: string[]) => void;\n};\n\n/**\n * React hook for chat with local LLM\n *\n * @example\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading, isGenerating } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <div>\n * {messages.map(m => (\n * <div key={m.id}>{m.role}: {m.content}</div>\n * ))}\n * <form onSubmit={handleSubmit}>\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * <button disabled={isGenerating}>Send</button>\n * </form>\n * </div>\n * );\n * }\n * ```\n */\nexport function useChat(options: UseChatOptions = {}): UseChatReturn {\n // Lazy import React to avoid SSR issues\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useChat requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n initialMessages = [],\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [messages, setMessages] = useState<Message[]>(initialMessages);\n const [input, setInput] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [thinking, setThinking] = useState<string>(\"\");\n const [currentResponse, setCurrentResponse] = useState<string>(\"\");\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n const [attachedImages, setAttachedImages] = useState<string[]>([]);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const messageIdRef = useRef<number>(0);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on generate\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n // Initialize worker\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCurrentResponse((r: string) => r + token.text);\n }\n },\n onComplete: () => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n // Commit response to messages when generation completes\n useEffect(() => {\n if (!isGenerating && currentResponse) {\n setMessages((msgs: Message[]) => {\n const lastMsg = msgs.at(-1);\n if (lastMsg?.role === \"assistant\") {\n return msgs.map((m: Message, i: number) =>\n i === msgs.length - 1\n ? { ...m, content: currentResponse, thinking: thinking || undefined }\n : m,\n );\n }\n return msgs;\n });\n setCurrentResponse(\"\");\n setThinking(\"\");\n }\n }, [isGenerating, currentResponse, thinking]);\n\n // Store pending message for auto-load scenario\n const pendingMessageRef = useRef<string | null>(null);\n const pendingImagesRef = useRef<string[]>([]);\n\n // Image management functions\n const attachImage = useCallback((imageUrl: string) => {\n setAttachedImages((imgs: string[]) => [...imgs, imageUrl]);\n }, []);\n\n const removeImage = useCallback((index: number) => {\n setAttachedImages((imgs: string[]) => imgs.filter((_: string, i: number) => i !== index));\n }, []);\n\n const clearImages = useCallback(() => {\n setAttachedImages([]);\n }, []);\n\n // Internal function to send a message with specific images\n const sendMessageWithImages = useCallback(\n (text: string, images: string[]) => {\n if (!text.trim() || isGenerating) {\n return;\n }\n\n messageIdRef.current += 1;\n const userMessage: Message = {\n id: `msg-${messageIdRef.current}`,\n role: \"user\",\n content: text.trim(),\n images: images.length > 0 ? images : undefined,\n };\n\n messageIdRef.current += 1;\n const assistantMessage: Message = {\n id: `msg-${messageIdRef.current}`,\n role: \"assistant\",\n content: \"\",\n };\n\n setMessages((msgs: Message[]) => [...msgs, userMessage, assistantMessage]);\n setCurrentResponse(\"\");\n setThinking(\"\");\n\n // If worker not loaded, trigger load and queue the message\n if (!workerRef.current) {\n pendingMessageRef.current = text.trim();\n pendingImagesRef.current = images;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(text.trim(), {\n system,\n thinking: enableThinking,\n maxTokens: images.length > 0 ? Math.max(maxTokens, 2048) : maxTokens,\n temperature,\n images: images.length > 0 ? images : undefined,\n });\n },\n [isGenerating, system, enableThinking, maxTokens, temperature, load],\n );\n\n const handleSubmit = useCallback(\n (e?: { preventDefault?: () => void }) => {\n e?.preventDefault?.();\n\n if (!input.trim() || isGenerating) {\n return;\n }\n\n // Send with any attached images\n sendMessageWithImages(input, attachedImages);\n setInput(\"\");\n setAttachedImages([]);\n },\n [input, isGenerating, attachedImages, sendMessageWithImages],\n );\n\n // Convenience method to send with specific images\n const sendWithImages = useCallback(\n (text: string, images: string[]) => {\n sendMessageWithImages(text, images);\n },\n [sendMessageWithImages],\n );\n\n // Process pending message when worker becomes ready\n useEffect(() => {\n if (isReady && pendingMessageRef.current && workerRef.current) {\n const pendingContent = pendingMessageRef.current;\n const pendingImages = pendingImagesRef.current;\n pendingMessageRef.current = null;\n pendingImagesRef.current = [];\n setIsGenerating(true);\n workerRef.current.generate(pendingContent, {\n system,\n thinking: enableThinking,\n maxTokens: pendingImages.length > 0 ? Math.max(maxTokens, 2048) : maxTokens,\n temperature,\n images: pendingImages.length > 0 ? pendingImages : undefined,\n });\n }\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n const clear = useCallback(() => {\n workerRef.current?.reset();\n setMessages([]);\n setCurrentResponse(\"\");\n setThinking(\"\");\n setAttachedImages([]);\n }, []);\n\n // Update last message with streaming content\n const displayMessages = messages.map((m: Message, i: number) => {\n if (i === messages.length - 1 && m.role === \"assistant\" && isGenerating) {\n return { ...m, content: currentResponse, thinking: thinking || undefined };\n }\n return m;\n });\n\n return {\n messages: displayMessages,\n input,\n setInput,\n handleSubmit,\n isLoading,\n loadingProgress,\n isGenerating,\n thinking,\n stop,\n clear,\n tps,\n isReady,\n error,\n load,\n attachedImages,\n attachImage,\n removeImage,\n clearImages,\n sendWithImages,\n };\n}\n\n/** Options for useCompletion hook */\nexport type UseCompletionOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Auto-load model on mount (default: false - loads on first complete() or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Options for single completion call */\nexport type CompleteOptions = {\n /** Image URLs or data URIs to analyze (for vision models) */\n images?: string[];\n};\n\n/** Return type for useCompletion hook */\nexport type UseCompletionReturn = {\n /** Generated completion */\n completion: string;\n /** Thinking content (if enabled) */\n thinking: string;\n /** Generate completion (optionally with images for vision models) */\n complete: (prompt: string, options?: CompleteOptions) => Promise<string>;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating */\n isGenerating: boolean;\n /** Stop generation */\n stop: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n};\n\n/**\n * React hook for text completion with local LLM\n *\n * @example\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading, isGenerating } = useCompletion();\n *\n * if (isLoading) return <div>Loading...</div>;\n *\n * return (\n * <div>\n * <button onClick={() => complete(\"Write a haiku\")}>Generate</button>\n * <p>{completion}</p>\n * </div>\n * );\n * }\n * ```\n */\nexport function useCompletion(options: UseCompletionOptions = {}): UseCompletionReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useCompletion requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [completion, setCompletion] = useState<string>(\"\");\n const [thinking, setThinking] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const resolveRef = useRef<((text: string) => void) | null>(null);\n const rejectRef = useRef<((err: Error) => void) | null>(null);\n const pendingPromptRef = useRef<string | null>(null);\n const pendingImagesRef = useRef<string[] | undefined>(undefined);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on complete()\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCompletion((c: string) => c + token.text);\n }\n },\n onComplete: (result) => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n resolveRef.current?.(result.text);\n resolveRef.current = null;\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n const complete = useCallback(\n (prompt: string, completeOptions?: CompleteOptions): Promise<string> => {\n return new Promise((resolve, reject) => {\n setCompletion(\"\");\n setThinking(\"\");\n resolveRef.current = resolve;\n rejectRef.current = reject;\n\n // If worker not loaded, trigger load and queue the prompt\n if (!workerRef.current) {\n pendingPromptRef.current = prompt;\n pendingImagesRef.current = completeOptions?.images;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(prompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n images: completeOptions?.images,\n });\n });\n },\n [system, enableThinking, maxTokens, temperature, load],\n );\n\n // Process pending prompt when worker becomes ready\n useEffect(() => {\n if (isReady && pendingPromptRef.current && workerRef.current) {\n const pendingPrompt = pendingPromptRef.current;\n const pendingImages = pendingImagesRef.current;\n pendingPromptRef.current = null;\n pendingImagesRef.current = undefined;\n setIsGenerating(true);\n workerRef.current.generate(pendingPrompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n images: pendingImages,\n });\n }\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n return {\n completion,\n thinking,\n complete,\n isLoading,\n loadingProgress,\n isGenerating,\n stop,\n tps,\n isReady,\n error,\n load,\n };\n}\n\n// ============================================\n// Text-to-Speech (useSpeech hook)\n// ============================================\n\n/** TTS loading progress */\nexport type TTSProgress = {\n status: \"idle\" | \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n error?: string;\n};\n\n/** Available TTS models */\nexport type TTSModelId = \"kokoro-82m\" | \"supertonic-66m\";\n\n/** Voice info for TTS models */\nexport type BrowserVoiceInfo = {\n id: string;\n name: string;\n gender: \"male\" | \"female\";\n language: string;\n description: string;\n};\n\n/** Kokoro voice definitions (24kHz, high quality) */\nconst KOKORO_BROWSER_VOICES: BrowserVoiceInfo[] = [\n {\n id: \"af_heart\",\n name: \"Heart\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, highest quality (Grade A)\",\n },\n {\n id: \"af_bella\",\n name: \"Bella\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, warm and friendly (Grade A-)\",\n },\n {\n id: \"af_nicole\",\n name: \"Nicole\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, soft and gentle\",\n },\n {\n id: \"af_sarah\",\n name: \"Sarah\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, clear and professional\",\n },\n {\n id: \"af_sky\",\n name: \"Sky\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, young and energetic\",\n },\n {\n id: \"af_alloy\",\n name: \"Alloy\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_aoede\",\n name: \"Aoede\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, mythical\",\n },\n {\n id: \"af_jessica\",\n name: \"Jessica\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_kore\",\n name: \"Kore\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_nova\",\n name: \"Nova\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_river\",\n name: \"River\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"am_fenrir\",\n name: \"Fenrir\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, best quality\",\n },\n {\n id: \"am_michael\",\n name: \"Michael\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, warm and friendly\",\n },\n { id: \"am_adam\", name: \"Adam\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_echo\", name: \"Echo\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_eric\", name: \"Eric\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_liam\", name: \"Liam\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_onyx\", name: \"Onyx\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_puck\", name: \"Puck\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n {\n id: \"am_santa\",\n name: \"Santa\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, festive\",\n },\n {\n id: \"bf_emma\",\n name: \"Emma\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, elegant and clear\",\n },\n {\n id: \"bf_isabella\",\n name: \"Isabella\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, sophisticated\",\n },\n {\n id: \"bf_alice\",\n name: \"Alice\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female\",\n },\n {\n id: \"bf_lily\",\n name: \"Lily\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female\",\n },\n {\n id: \"bm_george\",\n name: \"George\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, distinguished\",\n },\n {\n id: \"bm_lewis\",\n name: \"Lewis\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, friendly\",\n },\n {\n id: \"bm_daniel\",\n name: \"Daniel\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male\",\n },\n { id: \"bm_fable\", name: \"Fable\", gender: \"male\", language: \"en-gb\", description: \"British male\" },\n];\n\n/** Supertonic voice definitions (44.1kHz, faster) */\nconst SUPERTONIC_BROWSER_VOICES: BrowserVoiceInfo[] = [\n {\n id: \"F1\",\n name: \"Female 1\",\n gender: \"female\",\n language: \"en\",\n description: \"Female voice 1 - Clear and natural\",\n },\n {\n id: \"F2\",\n name: \"Female 2\",\n gender: \"female\",\n language: \"en\",\n description: \"Female voice 2 - Warm and expressive\",\n },\n {\n id: \"M1\",\n name: \"Male 1\",\n gender: \"male\",\n language: \"en\",\n description: \"Male voice 1 - Deep and confident\",\n },\n {\n id: \"M2\",\n name: \"Male 2\",\n gender: \"male\",\n language: \"en\",\n description: \"Male voice 2 - Friendly and casual\",\n },\n];\n\n/** TTS model configuration */\nconst TTS_MODELS: Record<\n TTSModelId,\n { repo: string; defaultVoice: string; sampleRate: number; voices: BrowserVoiceInfo[] }\n> = {\n \"kokoro-82m\": {\n repo: \"onnx-community/Kokoro-82M-v1.0-ONNX\",\n defaultVoice: \"af_heart\",\n sampleRate: 24000,\n voices: KOKORO_BROWSER_VOICES,\n },\n \"supertonic-66m\": {\n repo: \"onnx-community/Supertonic-TTS-ONNX\",\n defaultVoice: \"F1\",\n sampleRate: 44100,\n voices: SUPERTONIC_BROWSER_VOICES,\n },\n};\n\n/** Options for useSpeech hook */\nexport type UseSpeechOptions = {\n /** TTS model to use (default: \"kokoro-82m\") */\n model?: TTSModelId;\n /** Default voice ID (default: model's default voice) */\n voice?: string;\n /** Speech speed multiplier (default: 1.0) */\n speed?: number;\n /** Auto-load TTS model on mount (default: false) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n /** Called when speech starts */\n onStart?: () => void;\n /** Called when speech ends */\n onEnd?: () => void;\n};\n\n/** Return type for useSpeech hook */\nexport type UseSpeechReturn = {\n /** Speak text aloud */\n speak: (text: string, options?: { voice?: string; speed?: number }) => Promise<void>;\n /** Stop current speech */\n stop: () => void;\n /** Whether TTS model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: TTSProgress | null;\n /** Whether currently speaking */\n isSpeaking: boolean;\n /** Whether TTS model is ready */\n isReady: boolean;\n /** Load the TTS model */\n load: () => void;\n /** Error message if any */\n error: string | null;\n /** List available voices for current model */\n listVoices: () => BrowserVoiceInfo[];\n /** Current voice ID */\n currentVoice: string;\n /** Set current voice */\n setVoice: (voiceId: string) => void;\n /** Current speed */\n currentSpeed: number;\n /** Set speed */\n setSpeed: (speed: number) => void;\n /** Current TTS model ID */\n currentModel: TTSModelId;\n /** Sample rate for current model (24000 for Kokoro, 44100 for Supertonic) */\n sampleRate: number;\n};\n\n// ============================================\n// TTS Worker (inline, loads from CDN)\n// ============================================\nconst TTS_WORKER_CODE = `\n // TTS Worker - runs in separate thread, loads from CDN\n import { pipeline, env } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1\";\n \n // Configure environment\n env.useBrowserCache = true;\n env.allowLocalModels = false;\n \n let ttsInstance = null;\n let modelType = null; // \"supertonic\" or \"kokoro\"\n let voiceEmbeddings = new Map();\n let kokoroTTS = null;\n \n self.onmessage = async (e) => {\n const { type, payload } = e.data;\n \n if (type === \"load\") {\n try {\n const { modelId, repo, voices } = payload;\n modelType = modelId === \"supertonic-66m\" ? \"supertonic\" : \"kokoro\";\n \n if (modelType === \"supertonic\") {\n // Load Supertonic using transformers.js pipeline\n ttsInstance = await pipeline(\"text-to-speech\", repo, {\n device: \"webgpu\",\n progress_callback: (progress) => {\n self.postMessage({ type: \"progress\", payload: progress });\n },\n });\n \n // Load voice embeddings\n for (const voice of voices) {\n try {\n const voiceUrl = \"https://huggingface.co/\" + repo + \"/resolve/main/voices/\" + voice.id + \".bin\";\n const response = await fetch(voiceUrl);\n if (response.ok) {\n const buffer = await response.arrayBuffer();\n voiceEmbeddings.set(voice.id, new Float32Array(buffer));\n }\n } catch (err) {\n console.warn(\"Failed to load voice:\", voice.id, err);\n }\n }\n \n // Warmup\n try {\n await ttsInstance(\"Hello\", {\n speaker_embeddings: new Float32Array(1 * 101 * 128),\n num_inference_steps: 1,\n speed: 1.0,\n });\n } catch (e) {\n console.warn(\"Warmup failed:\", e);\n }\n } else {\n // Load Kokoro using kokoro-js from CDN\n const kokoroModule = await import(\"https://cdn.jsdelivr.net/npm/kokoro-js@1.2.1/dist/kokoro.web.min.js\");\n const { KokoroTTS } = kokoroModule;\n \n kokoroTTS = await KokoroTTS.from_pretrained(repo, {\n dtype: \"fp32\",\n progress_callback: (progress) => {\n self.postMessage({ type: \"progress\", payload: progress });\n },\n });\n }\n \n self.postMessage({ type: \"ready\" });\n } catch (err) {\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n \n if (type === \"generate\") {\n try {\n const { text, voice, speed } = payload;\n let audio, sampleRate;\n \n if (modelType === \"supertonic\") {\n let embedding = voiceEmbeddings.get(voice);\n if (!embedding) {\n embedding = new Float32Array(101 * 128).fill(0.1);\n }\n \n const result = await ttsInstance(text, {\n speaker_embeddings: embedding,\n speed: speed || 1.0,\n });\n \n audio = result.audio;\n sampleRate = result.sampling_rate;\n } else {\n const result = await kokoroTTS.generate(text, {\n voice: voice,\n speed: speed || 1.0,\n });\n \n audio = result.audio;\n sampleRate = result.sampling_rate;\n }\n \n // Transfer audio data back\n self.postMessage(\n { type: \"audio\", payload: { audio: audio, sampleRate: sampleRate } },\n [audio.buffer]\n );\n } catch (err) {\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n };\n`;\n\n/** Create TTS worker instance */\nfunction createTTSWorker(): Worker {\n const blob = new Blob([TTS_WORKER_CODE], { type: \"application/javascript\" });\n const url = URL.createObjectURL(blob);\n const worker = new Worker(url, { type: \"module\" });\n URL.revokeObjectURL(url);\n return worker;\n}\n\n/**\n * React hook for text-to-speech with Web Audio API playback\n *\n * Supports both Kokoro (24kHz, high quality) and Supertonic (44.1kHz, faster).\n *\n * @example\n * ```tsx\n * import { useSpeech } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * // Default: Kokoro TTS\n * const { speak, stop, isLoading, isSpeaking, listVoices, setVoice } = useSpeech();\n *\n * // Or use Supertonic (44.1kHz, faster)\n * // const { speak, listVoices } = useSpeech({ model: \"supertonic-66m\" });\n *\n * if (isLoading) return <div>Loading TTS...</div>;\n *\n * return (\n * <div>\n * <select onChange={e => setVoice(e.target.value)}>\n * {listVoices().map(v => (\n * <option key={v.id} value={v.id}>{v.name}</option>\n * ))}\n * </select>\n * <button onClick={() => speak(\"Hello world!\")}>\n * {isSpeaking ? \"Speaking...\" : \"Speak\"}\n * </button>\n * {isSpeaking && <button onClick={stop}>Stop</button>}\n * </div>\n * );\n * }\n * ```\n */\nexport function useSpeech(options: UseSpeechOptions = {}): UseSpeechReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useSpeech requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model: modelId = \"kokoro-82m\",\n speed: defaultSpeed = 1.0,\n autoLoad = false,\n onReady,\n onError,\n onStart,\n onEnd,\n } = options;\n\n // Get model config\n const modelConfig = TTS_MODELS[modelId];\n const defaultVoice = options.voice || modelConfig.defaultVoice;\n\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<TTSProgress | null>(null);\n const [isSpeaking, setIsSpeaking] = useState<boolean>(false);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n const [currentVoice, setCurrentVoice] = useState<string>(defaultVoice);\n const [currentSpeed, setCurrentSpeed] = useState<number>(defaultSpeed);\n\n const workerRef = useRef<Worker | null>(null);\n const audioContextRef = useRef<AudioContext | null>(null);\n const sourceNodeRef = useRef<AudioBufferSourceNode | null>(null);\n const mountedRef = useRef<boolean>(true);\n const modelIdRef = useRef<TTSModelId>(modelId);\n const pendingSpeakRef = useRef<{ text: string; voice: string; speed: number } | null>(null);\n\n // Voice list based on selected model\n const listVoices = useCallback((): BrowserVoiceInfo[] => {\n return modelConfig.voices;\n }, [modelConfig.voices]);\n\n // Load function\n const load = useCallback(() => {\n if (workerRef.current || isLoading) return;\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n // Initialize TTS worker\n useEffect(() => {\n if (!shouldLoad) return;\n\n mountedRef.current = true;\n modelIdRef.current = modelId;\n\n const config = TTS_MODELS[modelId];\n\n setLoadingProgress({\n status: \"loading\",\n message: `Loading ${modelId === \"supertonic-66m\" ? \"Supertonic\" : \"Kokoro\"} TTS...`,\n });\n\n // Create worker\n const worker = createTTSWorker();\n workerRef.current = worker;\n\n // Handle worker messages\n worker.onmessage = (e: MessageEvent) => {\n if (!mountedRef.current) return;\n\n const { type, payload } = e.data;\n\n if (type === \"progress\" && payload.status === \"progress\" && payload.file) {\n setLoadingProgress({\n status: \"downloading\",\n file: payload.file,\n progress: Math.round(payload.progress || 0),\n });\n }\n\n if (type === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n setLoadingProgress({ status: \"ready\" });\n onReady?.();\n\n // Process pending speak request\n if (pendingSpeakRef.current) {\n const { text, voice, speed } = pendingSpeakRef.current;\n pendingSpeakRef.current = null;\n worker.postMessage({ type: \"generate\", payload: { text, voice, speed } });\n }\n }\n\n if (type === \"audio\") {\n // Play audio using Web Audio API\n const { audio, sampleRate } = payload;\n playAudioData(audio, sampleRate);\n }\n\n if (type === \"error\") {\n const errorMsg = payload;\n setError(errorMsg);\n setIsLoading(false);\n setIsSpeaking(false);\n setLoadingProgress({ status: \"error\", error: errorMsg });\n onError?.(errorMsg);\n }\n };\n\n worker.onerror = (err) => {\n if (!mountedRef.current) return;\n const errorMsg = err.message || \"Worker error\";\n setError(errorMsg);\n setIsLoading(false);\n setLoadingProgress({ status: \"error\", error: errorMsg });\n onError?.(errorMsg);\n };\n\n // Send load message\n worker.postMessage({\n type: \"load\",\n payload: {\n modelId,\n repo: config.repo,\n voices: config.voices,\n },\n });\n\n return () => {\n mountedRef.current = false;\n worker.terminate();\n workerRef.current = null;\n };\n }, [shouldLoad, modelId, onReady, onError]);\n\n // Helper to play audio data\n const playAudioData = useCallback(\n async (audio: Float32Array, sampleRate: number) => {\n try {\n // Create or reuse AudioContext\n if (!audioContextRef.current || audioContextRef.current.state === \"closed\") {\n audioContextRef.current = new AudioContext({ sampleRate });\n }\n const ctx = audioContextRef.current;\n\n if (ctx.state === \"suspended\") {\n await ctx.resume();\n }\n\n // Create buffer and play\n const audioBuffer = ctx.createBuffer(1, audio.length, sampleRate);\n audioBuffer.copyToChannel(new Float32Array(audio), 0);\n\n const sourceNode = ctx.createBufferSource();\n sourceNode.buffer = audioBuffer;\n sourceNode.connect(ctx.destination);\n\n sourceNodeRef.current = sourceNode;\n\n sourceNode.onended = () => {\n if (!mountedRef.current) return;\n setIsSpeaking(false);\n onEnd?.();\n };\n\n sourceNode.start();\n } catch (err) {\n setIsSpeaking(false);\n const errorMsg = err instanceof Error ? err.message : String(err);\n setError(errorMsg);\n onError?.(errorMsg);\n }\n },\n [onEnd, onError],\n );\n\n // Cleanup AudioContext only on unmount (not on re-renders)\n useEffect(() => {\n return () => {\n try {\n sourceNodeRef.current?.stop();\n } catch {\n // Ignore if already stopped\n }\n try {\n if (audioContextRef.current && audioContextRef.current.state !== \"closed\") {\n audioContextRef.current.close();\n }\n } catch {\n // Ignore if already closed\n }\n };\n }, []);\n\n // Speak function - sends message to worker\n const speak = useCallback(\n async (text: string, opts?: { voice?: string; speed?: number }) => {\n const voice = opts?.voice || currentVoice;\n const speed = opts?.speed || currentSpeed;\n\n // Validate voice\n const voiceInfo = modelConfig.voices.find((v) => v.id === voice);\n if (!voiceInfo) {\n const validVoices = modelConfig.voices.map((v) => v.id).join(\", \");\n const errorMsg = `Voice \"${voice}\" not found. Should be one of: ${validVoices}.`;\n setError(errorMsg);\n onError?.(errorMsg);\n return;\n }\n\n // Auto-load if not loaded\n if (!workerRef.current) {\n // Queue speak for after load\n pendingSpeakRef.current = { text, voice, speed };\n load();\n return;\n }\n\n if (!isReady) {\n // Queue speak for after ready\n pendingSpeakRef.current = { text, voice, speed };\n return;\n }\n\n setIsSpeaking(true);\n onStart?.();\n\n // Send generate message to worker\n workerRef.current.postMessage({\n type: \"generate\",\n payload: { text, voice, speed },\n });\n },\n [currentVoice, currentSpeed, modelConfig.voices, load, isReady, onStart, onError],\n );\n\n // Stop function\n const stop = useCallback(() => {\n if (sourceNodeRef.current) {\n sourceNodeRef.current.stop();\n sourceNodeRef.current.disconnect();\n sourceNodeRef.current = null;\n }\n setIsSpeaking(false);\n }, []);\n\n // Voice setter with validation\n const setVoice = useCallback(\n (voiceId: string) => {\n const voiceInfo = modelConfig.voices.find((v) => v.id === voiceId);\n if (voiceInfo) {\n setCurrentVoice(voiceId);\n } else {\n console.warn(\n `Voice \"${voiceId}\" not valid for ${modelId}. Available: ${modelConfig.voices.map((v) => v.id).join(\", \")}`,\n );\n }\n },\n [modelConfig.voices, modelId],\n );\n\n // Speed setter\n const setSpeed = useCallback((speed: number) => {\n setCurrentSpeed(Math.max(0.5, Math.min(2.0, speed)));\n }, []);\n\n return {\n speak,\n stop,\n isLoading,\n loadingProgress,\n isSpeaking,\n isReady,\n load,\n error,\n listVoices,\n currentVoice,\n setVoice,\n currentSpeed,\n setSpeed,\n currentModel: modelId,\n sampleRate: modelConfig.sampleRate,\n };\n}\n\n// ============================================\n// Audio Playback Utilities\n// ============================================\n\n/**\n * Play audio from Float32Array using Web Audio API\n *\n * @example\n * ```ts\n * import { playAudio } from \"@tryhamster/gerbil/browser\";\n *\n * const audio = new Float32Array([...]); // TTS output\n * const controller = await playAudio(audio, 24000);\n *\n * // Stop playback\n * controller.stop();\n * ```\n */\nexport async function playAudio(\n audio: Float32Array,\n sampleRate: number = 24000,\n): Promise<{ stop: () => void; onEnded: Promise<void> }> {\n const audioContext = new AudioContext();\n\n // Resume if suspended\n if (audioContext.state === \"suspended\") {\n await audioContext.resume();\n }\n\n const audioBuffer = audioContext.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n audioBuffer.copyToChannel(channelData, 0);\n\n const sourceNode = audioContext.createBufferSource();\n sourceNode.buffer = audioBuffer;\n sourceNode.connect(audioContext.destination);\n\n const onEnded = new Promise<void>((resolve) => {\n sourceNode.onended = () => {\n audioContext.close();\n resolve();\n };\n });\n\n sourceNode.start();\n\n return {\n stop: () => {\n sourceNode.stop();\n audioContext.close();\n },\n onEnded,\n };\n}\n\n/**\n * Create a reusable audio player for streaming TTS\n *\n * @example\n * ```ts\n * import { createAudioPlayer } from \"@tryhamster/gerbil/browser\";\n *\n * const player = createAudioPlayer(24000);\n *\n * // Queue audio chunks as they arrive\n * player.queue(chunk1);\n * player.queue(chunk2);\n *\n * // Stop and clear\n * player.stop();\n * ```\n */\nexport function createAudioPlayer(sampleRate: number = 24000): {\n queue: (audio: Float32Array) => void;\n stop: () => void;\n isPlaying: () => boolean;\n} {\n let audioContext: AudioContext | null = null;\n let nextStartTime = 0;\n let isActive = false;\n\n const ensureContext = async () => {\n if (!audioContext) {\n audioContext = new AudioContext();\n }\n if (audioContext.state === \"suspended\") {\n await audioContext.resume();\n }\n return audioContext;\n };\n\n return {\n queue: async (audio: Float32Array) => {\n const ctx = await ensureContext();\n isActive = true;\n\n const buffer = ctx.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n buffer.copyToChannel(channelData, 0);\n\n const source = ctx.createBufferSource();\n source.buffer = buffer;\n source.connect(ctx.destination);\n\n // Schedule seamlessly after previous chunk\n const startTime = Math.max(ctx.currentTime, nextStartTime);\n source.start(startTime);\n nextStartTime = startTime + buffer.duration;\n\n source.onended = () => {\n if (ctx.currentTime >= nextStartTime - 0.1) {\n isActive = false;\n }\n };\n },\n\n stop: () => {\n isActive = false;\n nextStartTime = 0;\n if (audioContext) {\n audioContext.close();\n audioContext = null;\n }\n },\n\n isPlaying: () => isActive,\n };\n}\n\n// ============================================\n// Voice Input Hook (STT)\n// ============================================\n\n// ============================================\n// STT Worker (inline, loads from CDN)\n// ============================================\nconst STT_WORKER_CODE = `\n // STT Worker - runs in separate thread, loads from CDN\n import { pipeline, env } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1\";\n \n // Configure environment\n env.useBrowserCache = true;\n env.allowLocalModels = false;\n \n let sttPipeline = null;\n \n self.onmessage = async (e) => {\n const { type, payload } = e.data;\n \n if (type === \"load\") {\n try {\n const { model } = payload;\n \n // Load Whisper model\n sttPipeline = await pipeline(\"automatic-speech-recognition\", model, {\n device: \"webgpu\",\n progress_callback: (progress) => {\n self.postMessage({ type: \"progress\", payload: progress });\n },\n });\n \n self.postMessage({ type: \"ready\" });\n } catch (err) {\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n \n if (type === \"transcribe\") {\n try {\n const { audio } = payload;\n \n // Run transcription\n const result = await sttPipeline(audio, {\n return_timestamps: false,\n });\n \n self.postMessage({ type: \"transcript\", payload: result.text || \"\" });\n } catch (err) {\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n };\n`;\n\n/** Create STT worker instance */\nfunction createSTTWorker(): Worker {\n const blob = new Blob([STT_WORKER_CODE], { type: \"application/javascript\" });\n const url = URL.createObjectURL(blob);\n const worker = new Worker(url, { type: \"module\" });\n URL.revokeObjectURL(url);\n return worker;\n}\n\n/**\n * Progress info for STT loading\n */\nexport type STTProgress = {\n status: \"downloading\" | \"loading\" | \"ready\" | \"error\";\n message?: string;\n progress?: number;\n file?: string;\n};\n\n/**\n * Options for useVoiceInput hook\n */\nexport type UseVoiceInputOptions = {\n /** STT model ID (default: whisper-tiny.en) */\n model?: string;\n /** Auto-load model on mount (default: false) */\n autoLoad?: boolean;\n /** Callback when model is ready */\n onReady?: () => void;\n /** Callback when transcription completes (or for each chunk in streaming mode) */\n onTranscript?: (text: string) => void;\n /** Callback on error */\n onError?: (error: string) => void;\n /** Callback during loading */\n onProgress?: (progress: STTProgress) => void;\n /** Enable streaming transcription - transcribes audio in chunks as you speak */\n streaming?: boolean;\n /** Chunk duration in ms for streaming mode (default: 3000 = 3 seconds) */\n chunkDuration?: number;\n /** Callback for each streaming chunk with partial transcript */\n onChunk?: (text: string, chunkIndex: number) => void;\n};\n\n/**\n * Return type for useVoiceInput hook\n */\nexport type UseVoiceInputReturn = {\n /** Start recording audio */\n startRecording: () => Promise<void>;\n /** Stop recording and transcribe */\n stopRecording: () => Promise<string>;\n /** Cancel recording without transcribing */\n cancelRecording: () => void;\n /** Transcribe raw audio data (Float32Array at 16kHz) */\n transcribe: (audio: Float32Array) => Promise<string>;\n /** Whether currently recording */\n isRecording: boolean;\n /** Whether transcribing */\n isTranscribing: boolean;\n /** Whether model is loading */\n isLoading: boolean;\n /** Whether model is ready */\n isReady: boolean;\n /** Latest transcription result (full transcript in streaming mode) */\n transcript: string;\n /** Current streaming chunk being transcribed (streaming mode only) */\n streamingChunk: string;\n /** Number of chunks transcribed so far (streaming mode only) */\n chunkCount: number;\n /** Loading progress */\n loadingProgress: STTProgress | null;\n /** Error message */\n error: string | null;\n /** Manually load the model */\n load: () => void;\n};\n\n/**\n * React hook for voice input with browser microphone\n *\n * Uses MediaRecorder to capture audio and Whisper for transcription.\n * Supports both one-shot and streaming transcription modes.\n *\n * @example Basic usage (one-shot)\n * ```tsx\n * function VoiceInput() {\n * const { startRecording, stopRecording, isRecording, transcript } = useVoiceInput({\n * onTranscript: (text) => console.log(\"User said:\", text),\n * });\n *\n * return (\n * <button onClick={isRecording ? stopRecording : startRecording}>\n * {isRecording ? \"Stop\" : \"Record\"}\n * </button>\n * );\n * }\n * ```\n *\n * @example Streaming transcription (real-time)\n * ```tsx\n * function LiveTranscription() {\n * const { startRecording, stopRecording, isRecording, transcript, streamingChunk } = useVoiceInput({\n * streaming: true, // Enable streaming mode\n * chunkDuration: 1500, // Transcribe every 1.5 seconds (default)\n * onChunk: (text, idx) => console.log(`Chunk ${idx}: ${text}`),\n * });\n *\n * return (\n * <div>\n * <button onClick={isRecording ? stopRecording : startRecording}>\n * {isRecording ? \"Stop\" : \"Start Live Transcription\"}\n * </button>\n * <p>Current chunk: {streamingChunk}</p>\n * <p>Full transcript: {transcript}</p>\n * </div>\n * );\n * }\n * ```\n */\nexport function useVoiceInput(options: UseVoiceInputOptions = {}): UseVoiceInputReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useVoiceInput requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"whisper-tiny.en\",\n autoLoad = false,\n onReady,\n onTranscript,\n onError,\n onProgress,\n streaming = false,\n chunkDuration = 1500, // Transcribe every 1.5 seconds for near real-time\n onChunk,\n } = options;\n\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<STTProgress | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [isRecording, setIsRecording] = useState<boolean>(false);\n const [isTranscribing, setIsTranscribing] = useState<boolean>(false);\n const [transcript, setTranscript] = useState<string>(\"\");\n const [streamingChunk, setStreamingChunk] = useState<string>(\"\");\n const [chunkCount, setChunkCount] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const workerRef = useRef<Worker | null>(null);\n const mediaRecorderRef = useRef<MediaRecorder | null>(null);\n const audioChunksRef = useRef<Blob[]>([]);\n const streamRef = useRef<MediaStream | null>(null);\n const mountedRef = useRef<boolean>(true);\n const streamingIntervalRef = useRef<ReturnType<typeof setInterval> | null>(null);\n const pendingChunksRef = useRef<Blob[]>([]);\n const fullTranscriptRef = useRef<string>(\"\");\n const transcribeResolveRef = useRef<((text: string) => void) | null>(null);\n const transcribeRejectRef = useRef<((err: Error) => void) | null>(null);\n\n // Resolve model ID to HuggingFace path\n const resolveSTTModel = (modelId: string): string => {\n const STT_MODEL_MAP: Record<string, string> = {\n \"whisper-tiny\": \"onnx-community/whisper-tiny\",\n \"whisper-tiny.en\": \"onnx-community/whisper-tiny.en\",\n \"whisper-base\": \"onnx-community/whisper-base\",\n \"whisper-base.en\": \"onnx-community/whisper-base.en\",\n \"whisper-small\": \"onnx-community/whisper-small\",\n \"whisper-small.en\": \"onnx-community/whisper-small.en\",\n };\n return STT_MODEL_MAP[modelId] || modelId;\n };\n\n // Load the STT model via worker\n useEffect(() => {\n if (!shouldLoad || isReady) return;\n\n mountedRef.current = true;\n\n setIsLoading(true);\n setLoadingProgress({ status: \"loading\", message: \"Loading STT model...\" });\n onProgress?.({ status: \"loading\", message: \"Loading STT model...\" });\n\n // Create worker\n const worker = createSTTWorker();\n workerRef.current = worker;\n\n // Handle worker messages\n worker.onmessage = (e: MessageEvent) => {\n if (!mountedRef.current) return;\n\n const { type, payload } = e.data;\n\n if (type === \"progress\") {\n const progress: STTProgress = {\n status: payload.progress !== undefined ? \"downloading\" : \"loading\",\n message: payload.status,\n progress: payload.progress,\n file: payload.file,\n };\n setLoadingProgress(progress);\n onProgress?.(progress);\n }\n\n if (type === \"ready\") {\n setIsReady(true);\n setIsLoading(false);\n setLoadingProgress({ status: \"ready\" });\n onProgress?.({ status: \"ready\" });\n onReady?.();\n }\n\n if (type === \"transcript\") {\n const text = payload;\n setIsTranscribing(false);\n if (transcribeResolveRef.current) {\n transcribeResolveRef.current(text);\n transcribeResolveRef.current = null;\n transcribeRejectRef.current = null;\n }\n }\n\n if (type === \"error\") {\n const errMsg = payload;\n setError(errMsg);\n setIsLoading(false);\n setIsTranscribing(false);\n setLoadingProgress({ status: \"error\", message: errMsg });\n onProgress?.({ status: \"error\", message: errMsg });\n onError?.(errMsg);\n if (transcribeRejectRef.current) {\n transcribeRejectRef.current(new Error(errMsg));\n transcribeResolveRef.current = null;\n transcribeRejectRef.current = null;\n }\n }\n };\n\n worker.onerror = (err) => {\n if (!mountedRef.current) return;\n const errMsg = err.message || \"Worker error\";\n setError(errMsg);\n setIsLoading(false);\n setLoadingProgress({ status: \"error\", message: errMsg });\n onProgress?.({ status: \"error\", message: errMsg });\n onError?.(errMsg);\n };\n\n // Send load message\n worker.postMessage({\n type: \"load\",\n payload: { model: resolveSTTModel(model) },\n });\n\n return () => {\n mountedRef.current = false;\n worker.terminate();\n workerRef.current = null;\n };\n }, [shouldLoad, isReady, model, onReady, onError, onProgress]);\n\n // Cleanup on unmount\n useEffect(() => {\n mountedRef.current = true;\n return () => {\n mountedRef.current = false;\n if (workerRef.current) {\n workerRef.current.terminate();\n workerRef.current = null;\n }\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n }\n };\n }, []);\n\n // Manual load trigger\n const load = useCallback(() => {\n if (!shouldLoad && !isReady && !isLoading) {\n setShouldLoad(true);\n }\n }, [shouldLoad, isReady, isLoading]);\n\n // Convert audio blob to Float32Array at 16kHz\n const blobToFloat32 = useCallback(async (blob: Blob): Promise<Float32Array> => {\n const audioContext = new AudioContext({ sampleRate: 16000 });\n const arrayBuffer = await blob.arrayBuffer();\n const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);\n\n // Get mono channel\n const channelData = audioBuffer.getChannelData(0);\n\n // Resample if needed\n if (audioBuffer.sampleRate !== 16000) {\n const ratio = 16000 / audioBuffer.sampleRate;\n const newLength = Math.round(channelData.length * ratio);\n const resampled = new Float32Array(newLength);\n for (let i = 0; i < newLength; i++) {\n const srcIndex = i / ratio;\n const floor = Math.floor(srcIndex);\n const ceil = Math.min(floor + 1, channelData.length - 1);\n const t = srcIndex - floor;\n resampled[i] = channelData[floor] * (1 - t) + channelData[ceil] * t;\n }\n audioContext.close();\n return resampled;\n }\n\n audioContext.close();\n return new Float32Array(channelData);\n }, []);\n\n // Transcribe audio via worker\n const transcribe = useCallback(\n async (audio: Float32Array): Promise<string> => {\n if (!workerRef.current) {\n if (!shouldLoad) {\n setShouldLoad(true);\n throw new Error(\"STT model not loaded. Loading now, please try again.\");\n }\n throw new Error(\"STT model not loaded\");\n }\n\n if (!isReady) {\n throw new Error(\"STT model still loading\");\n }\n\n setIsTranscribing(true);\n\n return new Promise((resolve, reject) => {\n transcribeResolveRef.current = (text: string) => {\n // Filter out Whisper artifacts\n let filtered = text.trim();\n if (\n filtered === \"[BLANK_AUDIO]\" ||\n filtered === \"(blank audio)\" ||\n filtered === \"[BLANK AUDIO]\"\n ) {\n filtered = \"\";\n }\n setTranscript(filtered);\n onTranscript?.(filtered);\n resolve(filtered);\n };\n transcribeRejectRef.current = reject;\n\n // Send audio to worker (transfer buffer for performance)\n const audioArray = new Float32Array(audio);\n workerRef.current!.postMessage({ type: \"transcribe\", payload: { audio: audioArray } }, [\n audioArray.buffer,\n ]);\n });\n },\n [shouldLoad, isReady, onTranscript],\n );\n\n // Track how many samples we've processed for streaming\n const processedSamplesRef = useRef<number>(0);\n\n // Transcribe a chunk of audio (for streaming mode)\n // Uses audioChunksRef (all chunks) to ensure valid WebM container\n const transcribeChunk = useCallback(\n async (chunkIdx: number): Promise<string> => {\n if (!workerRef.current || !isReady || audioChunksRef.current.length === 0) return \"\";\n\n try {\n // Create blob from ALL chunks (needed for valid WebM header)\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n const audioData = await blobToFloat32(audioBlob);\n\n // Calculate new samples since last transcription\n const newSamplesStart = processedSamplesRef.current;\n const totalSamples = audioData.length;\n\n // Skip if no new audio (< 0.5 seconds = 8000 samples at 16kHz)\n if (totalSamples - newSamplesStart < 8000) return \"\";\n\n // Extract only the new portion of audio\n const newAudio = audioData.slice(newSamplesStart);\n\n // Update processed count\n processedSamplesRef.current = totalSamples;\n\n // Use transcribe function which handles worker communication\n const text = await transcribe(newAudio);\n\n if (text && mountedRef.current) {\n setStreamingChunk(text);\n onChunk?.(text, chunkIdx);\n }\n\n return text;\n } catch {\n return \"\";\n }\n },\n [blobToFloat32, isReady, transcribe, onChunk],\n );\n\n // Start recording\n const startRecording = useCallback(async () => {\n if (isRecording) return;\n\n try {\n // For streaming mode, ensure STT model is loaded first\n if (streaming && !isReady) {\n if (!shouldLoad) {\n setShouldLoad(true);\n }\n // Wait for worker to be ready\n await new Promise<void>((resolve, reject) => {\n const checkReady = setInterval(() => {\n if (isReady && workerRef.current) {\n clearInterval(checkReady);\n resolve();\n }\n }, 100);\n setTimeout(() => {\n clearInterval(checkReady);\n reject(new Error(\"Timeout waiting for STT model\"));\n }, 60000);\n });\n }\n\n // Request microphone permission\n const stream = await navigator.mediaDevices.getUserMedia({\n audio: {\n sampleRate: 16000,\n channelCount: 1,\n echoCancellation: true,\n noiseSuppression: true,\n },\n });\n\n streamRef.current = stream;\n audioChunksRef.current = [];\n pendingChunksRef.current = [];\n fullTranscriptRef.current = \"\";\n processedSamplesRef.current = 0;\n setTranscript(\"\");\n setStreamingChunk(\"\");\n setChunkCount(0);\n\n const mediaRecorder = new MediaRecorder(stream);\n mediaRecorderRef.current = mediaRecorder;\n\n mediaRecorder.ondataavailable = (event) => {\n if (event.data.size > 0) {\n audioChunksRef.current.push(event.data);\n if (streaming) {\n pendingChunksRef.current.push(event.data);\n }\n }\n };\n\n mediaRecorder.start(100); // Collect data every 100ms\n setIsRecording(true);\n setError(null);\n\n // If streaming mode, set up recursive transcription loop\n if (streaming && isReady && workerRef.current) {\n let chunkIdx = 0;\n let shouldContinue = true;\n\n // Use recursive setTimeout instead of setInterval to avoid timing issues\n // with heavy WebGPU/WASM operations\n const processNextChunk = async () => {\n if (!shouldContinue || !mountedRef.current) {\n return;\n }\n\n const numPending = pendingChunksRef.current.length;\n\n // Check if we have new audio to process\n if (numPending > 0) {\n // Clear pending counter (we'll process via audioChunksRef which has all data)\n pendingChunksRef.current = [];\n\n try {\n setIsTranscribing(true);\n const chunkText = await transcribeChunk(chunkIdx);\n\n if (chunkText && mountedRef.current) {\n chunkIdx++;\n setChunkCount(chunkIdx);\n\n // Append to full transcript using functional update\n setTranscript((prev) => {\n const newTranscript = prev + (prev ? \" \" : \"\") + chunkText;\n fullTranscriptRef.current = newTranscript;\n onTranscript?.(newTranscript);\n return newTranscript;\n });\n }\n } catch (e) {\n console.error(\"[useVoiceInput] Chunk transcription error:\", e);\n } finally {\n if (mountedRef.current) {\n setIsTranscribing(false);\n }\n }\n }\n\n // Schedule next check if still running\n if (shouldContinue && mountedRef.current) {\n streamingIntervalRef.current = setTimeout(processNextChunk, chunkDuration) as any;\n }\n };\n\n // Start the loop\n streamingIntervalRef.current = setTimeout(processNextChunk, chunkDuration) as any;\n\n // Store a way to stop the loop\n (streamingIntervalRef as any)._stop = () => {\n shouldContinue = false;\n };\n }\n } catch (e: any) {\n const errMsg = e.message || \"Failed to start recording\";\n setError(errMsg);\n onError?.(errMsg);\n }\n }, [\n isRecording,\n streaming,\n shouldLoad,\n model,\n chunkDuration,\n transcribeChunk,\n onTranscript,\n onError,\n onProgress,\n onReady,\n ]);\n\n // Stop recording and transcribe\n const stopRecording = useCallback(async (): Promise<string> => {\n // Stop streaming loop\n if ((streamingIntervalRef as any)._stop) {\n (streamingIntervalRef as any)._stop();\n }\n if (streamingIntervalRef.current) {\n clearTimeout(streamingIntervalRef.current);\n streamingIntervalRef.current = null;\n }\n\n return new Promise((resolve, reject) => {\n if (!mediaRecorderRef.current || !isRecording) {\n reject(new Error(\"Not recording\"));\n return;\n }\n\n const mediaRecorder = mediaRecorderRef.current;\n\n mediaRecorder.onstop = async () => {\n // Stop all tracks\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n setIsRecording(false);\n\n // In streaming mode, process any remaining chunks and return full transcript\n if (streaming) {\n // Process any remaining audio\n if (audioChunksRef.current.length > 0 && processedSamplesRef.current > 0) {\n setIsTranscribing(true);\n pendingChunksRef.current = [];\n\n try {\n const finalChunkText = await transcribeChunk(chunkCount);\n if (finalChunkText && mountedRef.current) {\n setTranscript((prev) => {\n const newTranscript = prev + (prev ? \" \" : \"\") + finalChunkText;\n fullTranscriptRef.current = newTranscript;\n return newTranscript;\n });\n }\n } finally {\n if (mountedRef.current) {\n setIsTranscribing(false);\n }\n }\n }\n\n const finalText = fullTranscriptRef.current;\n onTranscript?.(finalText);\n resolve(finalText);\n return;\n }\n\n // Non-streaming mode: transcribe entire recording\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n\n try {\n // Ensure model is loaded\n if (!isReady || !workerRef.current) {\n if (!shouldLoad) {\n setShouldLoad(true);\n }\n // Wait for worker to be ready\n await new Promise<void>((res, rej) => {\n const checkReady = setInterval(() => {\n if (isReady && workerRef.current) {\n clearInterval(checkReady);\n res();\n }\n }, 100);\n setTimeout(() => {\n clearInterval(checkReady);\n rej(new Error(\"Timeout waiting for STT model\"));\n }, 30000);\n });\n }\n\n // Convert blob to Float32Array\n const audioData = await blobToFloat32(audioBlob);\n\n // Transcribe\n const text = await transcribe(audioData);\n resolve(text);\n } catch (e: any) {\n const errMsg = e.message || \"Transcription failed\";\n setError(errMsg);\n onError?.(errMsg);\n reject(e);\n }\n };\n\n mediaRecorder.stop();\n });\n }, [\n isRecording,\n streaming,\n chunkCount,\n shouldLoad,\n blobToFloat32,\n transcribe,\n transcribeChunk,\n onTranscript,\n onError,\n ]);\n\n // Cancel recording\n const cancelRecording = useCallback(() => {\n // Stop streaming loop\n if ((streamingIntervalRef as any)._stop) {\n (streamingIntervalRef as any)._stop();\n }\n if (streamingIntervalRef.current) {\n clearTimeout(streamingIntervalRef.current);\n streamingIntervalRef.current = null;\n }\n\n if (mediaRecorderRef.current && isRecording) {\n mediaRecorderRef.current.stop();\n }\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n audioChunksRef.current = [];\n pendingChunksRef.current = [];\n processedSamplesRef.current = 0;\n setIsRecording(false);\n }, [isRecording]);\n\n return {\n startRecording,\n stopRecording,\n cancelRecording,\n transcribe,\n isRecording,\n isTranscribing,\n isLoading,\n isReady,\n transcript,\n streamingChunk,\n chunkCount,\n loadingProgress,\n error,\n load,\n };\n}\n\n// ============================================\n// Voice Chat Hook (STT + LLM + TTS)\n// ============================================\n\n/**\n * Options for useVoiceChat hook\n */\nexport type UseVoiceChatOptions = {\n /** LLM model ID (default: qwen3-0.6b) */\n llmModel?: string;\n /** STT model ID (default: whisper-tiny.en) */\n sttModel?: string;\n /** TTS model ID (default: kokoro-82m, also supports supertonic-66m) */\n ttsModel?: TTSModelId;\n /** System prompt for LLM */\n system?: string;\n /** Enable thinking mode (default: false) */\n thinking?: boolean;\n /** TTS voice ID (default: model's default voice) */\n voice?: string;\n /** TTS speech speed (default: 1.0) */\n speed?: number;\n /** Auto-load all models on mount (default: false) */\n autoLoad?: boolean;\n /** Callback when user speaks */\n onUserSpeak?: (text: string) => void;\n /** Callback when assistant responds */\n onAssistantSpeak?: (text: string) => void;\n /** Callback on error */\n onError?: (error: string) => void;\n};\n\n/**\n * Message in voice chat\n */\nexport type VoiceChatMessage = {\n id: string;\n role: \"user\" | \"assistant\";\n content: string;\n thinking?: string;\n audioUrl?: string;\n};\n\n/**\n * Return type for useVoiceChat hook\n */\nexport type UseVoiceChatReturn = {\n /** Messages in the conversation */\n messages: VoiceChatMessage[];\n /** Start recording user speech */\n startListening: () => Promise<void>;\n /** Stop recording and process (STT → LLM → TTS) */\n stopListening: () => Promise<void>;\n /** Cancel current operation */\n cancel: () => void;\n /** Clear conversation history */\n clear: () => void;\n /** Whether recording user speech */\n isListening: boolean;\n /** Whether processing (STT/LLM/TTS) */\n isProcessing: boolean;\n /** Whether assistant is speaking */\n isSpeaking: boolean;\n /** Current stage: idle, listening, transcribing, thinking, speaking */\n stage: \"idle\" | \"listening\" | \"transcribing\" | \"thinking\" | \"speaking\";\n /** Whether all models are loaded */\n isReady: boolean;\n /** Whether loading models */\n isLoading: boolean;\n /** Loading progress message */\n loadingMessage: string;\n /** Error message */\n error: string | null;\n /** Manually load all models */\n load: () => void;\n};\n\n/**\n * React hook for voice conversation with STT + LLM + TTS\n *\n * Complete voice-to-voice conversation loop:\n * 1. User presses button to speak\n * 2. Speech is transcribed (Whisper)\n * 3. LLM generates response\n * 4. Response is spoken aloud (Kokoro or Supertonic TTS)\n *\n * @example\n * ```tsx\n * function VoiceChat() {\n * const {\n * messages,\n * startListening,\n * stopListening,\n * isListening,\n * isSpeaking,\n * stage,\n * } = useVoiceChat({\n * system: \"You are a helpful voice assistant.\",\n * voice: \"af_bella\",\n * // Or use Supertonic for faster synthesis:\n * // ttsModel: \"supertonic-66m\",\n * // voice: \"F1\",\n * });\n *\n * return (\n * <div>\n * {messages.map(m => (\n * <div key={m.id}>{m.role}: {m.content}</div>\n * ))}\n * <button\n * onMouseDown={startListening}\n * onMouseUp={stopListening}\n * >\n * {stage === \"idle\" ? \"🎤 Hold to Speak\" : stage}\n * </button>\n * </div>\n * );\n * }\n * ```\n */\nexport function useVoiceChat(options: UseVoiceChatOptions = {}): UseVoiceChatReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useVoiceChat requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n // Get TTS model config for default voice\n const ttsModelId = options.ttsModel || \"kokoro-82m\";\n const ttsConfig = TTS_MODELS[ttsModelId];\n\n const {\n llmModel = \"qwen3-0.6b\",\n sttModel = \"whisper-tiny.en\",\n system = \"You are a helpful voice assistant. Keep responses brief and conversational.\",\n thinking = false,\n voice = ttsConfig.defaultVoice,\n speed = 1.0,\n autoLoad = false,\n onUserSpeak,\n onAssistantSpeak,\n onError,\n } = options;\n\n const [messages, setMessages] = useState<VoiceChatMessage[]>([]);\n const [stage, setStage] = useState<\n \"idle\" | \"listening\" | \"transcribing\" | \"thinking\" | \"speaking\"\n >(\"idle\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingMessage, setLoadingMessage] = useState<string>(\"\");\n const [isReady, setIsReady] = useState<boolean>(false);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n // Refs for models and audio\n const llmWorkerRef = useRef<any>(null);\n const sttRef = useRef<any>(null);\n const ttsRef = useRef<any>(null);\n const mediaRecorderRef = useRef<MediaRecorder | null>(null);\n const audioChunksRef = useRef<Blob[]>([]);\n const streamRef = useRef<MediaStream | null>(null);\n const audioContextRef = useRef<AudioContext | null>(null);\n const sourceNodeRef = useRef<AudioBufferSourceNode | null>(null);\n const mountedRef = useRef<boolean>(true);\n const cancelledRef = useRef<boolean>(false);\n\n // Computed states\n const isListening = stage === \"listening\";\n const isProcessing = stage === \"transcribing\" || stage === \"thinking\";\n const isSpeaking = stage === \"speaking\";\n\n // Resolve model ID to HuggingFace path for STT\n const resolveSTTModel = (modelId: string): string => {\n const STT_MODEL_MAP: Record<string, string> = {\n \"whisper-tiny\": \"onnx-community/whisper-tiny\",\n \"whisper-tiny.en\": \"onnx-community/whisper-tiny.en\",\n \"whisper-base\": \"onnx-community/whisper-base\",\n \"whisper-base.en\": \"onnx-community/whisper-base.en\",\n \"whisper-small\": \"onnx-community/whisper-small\",\n \"whisper-small.en\": \"onnx-community/whisper-small.en\",\n };\n return STT_MODEL_MAP[modelId] || modelId;\n };\n\n // Load all models via workers\n useEffect(() => {\n if (!shouldLoad || isReady) return;\n\n let cancelled = false;\n\n const loadModels = async () => {\n try {\n setIsLoading(true);\n setError(null);\n\n // Load STT worker\n setLoadingMessage(\"Loading speech recognition (Whisper)...\");\n const sttWorker = createSTTWorker();\n if (cancelled || !mountedRef.current) {\n sttWorker.terminate();\n return;\n }\n\n // Wait for STT worker to be ready\n await new Promise<void>((resolve, reject) => {\n sttWorker.onmessage = (e: MessageEvent) => {\n const { type, payload } = e.data;\n if (type === \"ready\") resolve();\n if (type === \"error\") reject(new Error(payload));\n if (type === \"progress\" && mountedRef.current) {\n setLoadingMessage(payload.status || \"Loading STT...\");\n }\n };\n sttWorker.onerror = (e) => reject(new Error(e.message));\n sttWorker.postMessage({\n type: \"load\",\n payload: { model: resolveSTTModel(sttModel) },\n });\n });\n if (cancelled || !mountedRef.current) {\n sttWorker.terminate();\n return;\n }\n sttRef.current = sttWorker;\n\n // Load LLM worker\n setLoadingMessage(\"Loading language model...\");\n const worker = await createGerbilWorker({\n modelId: llmModel,\n onProgress: (p) => {\n if (!mountedRef.current) return;\n setLoadingMessage(p.message || \"Loading LLM...\");\n },\n });\n if (cancelled || !mountedRef.current) {\n worker.terminate();\n return;\n }\n llmWorkerRef.current = worker;\n\n // Load TTS worker\n const isSupertonic = ttsModelId === \"supertonic-66m\";\n setLoadingMessage(`Loading text-to-speech (${isSupertonic ? \"Supertonic\" : \"Kokoro\"})...`);\n\n const ttsWorker = createTTSWorker();\n if (cancelled || !mountedRef.current) {\n ttsWorker.terminate();\n return;\n }\n\n const ttsConfig = TTS_MODELS[ttsModelId];\n // Wait for TTS worker to be ready\n await new Promise<void>((resolve, reject) => {\n ttsWorker.onmessage = (e: MessageEvent) => {\n const { type, payload } = e.data;\n if (type === \"ready\") resolve();\n if (type === \"error\") reject(new Error(payload));\n if (type === \"progress\" && mountedRef.current) {\n setLoadingMessage(payload.status || \"Loading TTS...\");\n }\n };\n ttsWorker.onerror = (e) => reject(new Error(e.message));\n ttsWorker.postMessage({\n type: \"load\",\n payload: {\n modelId: ttsModelId,\n repo: ttsConfig.repo,\n voices: ttsConfig.voices,\n },\n });\n });\n if (cancelled || !mountedRef.current) {\n ttsWorker.terminate();\n return;\n }\n ttsRef.current = ttsWorker;\n\n setIsReady(true);\n setIsLoading(false);\n setLoadingMessage(\"Ready!\");\n } catch (e: any) {\n if (!mountedRef.current) return;\n const errMsg = e.message || \"Failed to load models\";\n setError(errMsg);\n setIsLoading(false);\n onError?.(errMsg);\n }\n };\n\n loadModels();\n\n return () => {\n cancelled = true;\n };\n }, [shouldLoad, isReady, llmModel, sttModel, ttsModelId, onError]);\n\n // Cleanup on unmount\n useEffect(() => {\n mountedRef.current = true;\n return () => {\n mountedRef.current = false;\n llmWorkerRef.current?.terminate();\n sttRef.current?.terminate();\n ttsRef.current?.terminate();\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n }\n audioContextRef.current?.close();\n };\n }, []);\n\n // Load trigger\n const load = useCallback(() => {\n if (!shouldLoad && !isReady && !isLoading) {\n setShouldLoad(true);\n }\n }, [shouldLoad, isReady, isLoading]);\n\n // Convert blob to Float32 at 16kHz\n const blobToFloat32 = useCallback(async (blob: Blob): Promise<Float32Array> => {\n const audioContext = new AudioContext({ sampleRate: 16000 });\n const arrayBuffer = await blob.arrayBuffer();\n const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);\n const channelData = audioBuffer.getChannelData(0);\n\n if (audioBuffer.sampleRate !== 16000) {\n const ratio = 16000 / audioBuffer.sampleRate;\n const newLength = Math.round(channelData.length * ratio);\n const resampled = new Float32Array(newLength);\n for (let i = 0; i < newLength; i++) {\n const srcIndex = i / ratio;\n const floor = Math.floor(srcIndex);\n const ceil = Math.min(floor + 1, channelData.length - 1);\n const t = srcIndex - floor;\n resampled[i] = channelData[floor] * (1 - t) + channelData[ceil] * t;\n }\n audioContext.close();\n return resampled;\n }\n\n audioContext.close();\n return new Float32Array(channelData);\n }, []);\n\n // Play audio through Web Audio API\n const playAudioBuffer = useCallback(\n async (audio: Float32Array, sampleRate: number): Promise<void> => {\n return new Promise((resolve) => {\n if (!audioContextRef.current) {\n audioContextRef.current = new AudioContext();\n }\n const ctx = audioContextRef.current;\n\n const buffer = ctx.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n buffer.copyToChannel(channelData, 0);\n\n const source = ctx.createBufferSource();\n source.buffer = buffer;\n source.connect(ctx.destination);\n source.onended = () => {\n if (mountedRef.current) {\n resolve();\n }\n };\n source.start();\n sourceNodeRef.current = source;\n });\n },\n [],\n );\n\n // Start listening\n const startListening = useCallback(async () => {\n if (stage !== \"idle\") return;\n\n // Trigger load if not ready\n if (!isReady && !isLoading) {\n setShouldLoad(true);\n return;\n }\n\n cancelledRef.current = false;\n\n try {\n const stream = await navigator.mediaDevices.getUserMedia({\n audio: { sampleRate: 16000, channelCount: 1, echoCancellation: true },\n });\n\n streamRef.current = stream;\n audioChunksRef.current = [];\n\n const mediaRecorder = new MediaRecorder(stream);\n mediaRecorderRef.current = mediaRecorder;\n\n mediaRecorder.ondataavailable = (event) => {\n if (event.data.size > 0) {\n audioChunksRef.current.push(event.data);\n }\n };\n\n mediaRecorder.start(100);\n setStage(\"listening\");\n setError(null);\n } catch (e: any) {\n const errMsg = e.message || \"Failed to access microphone\";\n setError(errMsg);\n onError?.(errMsg);\n }\n }, [stage, isReady, isLoading, onError]);\n\n // Stop listening and process\n const stopListening = useCallback(async () => {\n if (stage !== \"listening\") return;\n\n const mediaRecorder = mediaRecorderRef.current;\n if (!mediaRecorder) return;\n\n return new Promise<void>((resolve) => {\n mediaRecorder.onstop = async () => {\n // Stop mic\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n if (cancelledRef.current) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n\n try {\n // STT via worker\n setStage(\"transcribing\");\n const audioData = await blobToFloat32(audioBlob);\n\n // Send to STT worker and wait for response\n let userText = await new Promise<string>((sttResolve, sttReject) => {\n const handler = (e: MessageEvent) => {\n const { type, payload } = e.data;\n if (type === \"transcript\") {\n sttRef.current?.removeEventListener(\"message\", handler);\n sttResolve(payload);\n }\n if (type === \"error\") {\n sttRef.current?.removeEventListener(\"message\", handler);\n sttReject(new Error(payload));\n }\n };\n sttRef.current?.addEventListener(\"message\", handler);\n const audioArray = new Float32Array(audioData);\n sttRef.current?.postMessage({ type: \"transcribe\", payload: { audio: audioArray } }, [\n audioArray.buffer,\n ]);\n });\n\n userText = userText.trim();\n\n // Filter out Whisper artifacts\n if (\n userText === \"[BLANK_AUDIO]\" ||\n userText === \"(blank audio)\" ||\n userText === \"[BLANK AUDIO]\"\n ) {\n userText = \"\";\n }\n\n if (cancelledRef.current || !userText) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n // Add user message\n const userMsgId = `user-${Date.now()}`;\n setMessages((m) => [...m, { id: userMsgId, role: \"user\", content: userText }]);\n onUserSpeak?.(userText);\n\n // LLM\n setStage(\"thinking\");\n\n // Build conversation history\n const history = messages.map((m) => ({\n role: m.role as \"user\" | \"assistant\",\n content: m.content,\n }));\n history.push({ role: \"user\", content: userText });\n\n let responseText = \"\";\n let thinkingText = \"\";\n\n await llmWorkerRef.current.generate(userText, {\n system,\n thinking,\n history,\n onToken: (token: WorkerToken) => {\n if (cancelledRef.current) return;\n if (token.state === \"thinking\") {\n thinkingText += token.text;\n } else {\n responseText += token.text;\n }\n },\n });\n\n if (cancelledRef.current) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n // Add assistant message\n const assistantMsgId = `assistant-${Date.now()}`;\n setMessages((m) => [\n ...m,\n {\n id: assistantMsgId,\n role: \"assistant\",\n content: responseText,\n thinking: thinkingText || undefined,\n },\n ]);\n onAssistantSpeak?.(responseText);\n\n // TTS via worker\n if (responseText.trim()) {\n setStage(\"speaking\");\n\n // Send to TTS worker and wait for response\n const ttsResult = await new Promise<{ audio: Float32Array; sampleRate: number }>(\n (ttsResolve, ttsReject) => {\n const handler = (e: MessageEvent) => {\n const { type, payload } = e.data;\n if (type === \"audio\") {\n ttsRef.current?.removeEventListener(\"message\", handler);\n ttsResolve({ audio: payload.audio, sampleRate: payload.sampleRate });\n }\n if (type === \"error\") {\n ttsRef.current?.removeEventListener(\"message\", handler);\n ttsReject(new Error(payload));\n }\n };\n ttsRef.current?.addEventListener(\"message\", handler);\n ttsRef.current?.postMessage({\n type: \"generate\",\n payload: { text: responseText, voice, speed },\n });\n },\n );\n\n if (!cancelledRef.current) {\n await playAudioBuffer(ttsResult.audio, ttsResult.sampleRate);\n }\n }\n\n setStage(\"idle\");\n resolve();\n } catch (e: any) {\n if (!mountedRef.current) return;\n const errMsg = e.message || \"Processing failed\";\n setError(errMsg);\n setStage(\"idle\");\n onError?.(errMsg);\n resolve();\n }\n };\n\n mediaRecorder.stop();\n });\n }, [\n stage,\n messages,\n system,\n thinking,\n voice,\n speed,\n blobToFloat32,\n playAudioBuffer,\n onUserSpeak,\n onAssistantSpeak,\n onError,\n ]);\n\n // Cancel\n const cancel = useCallback(() => {\n cancelledRef.current = true;\n\n if (mediaRecorderRef.current && stage === \"listening\") {\n mediaRecorderRef.current.stop();\n }\n\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n if (sourceNodeRef.current) {\n try {\n sourceNodeRef.current.stop();\n } catch {}\n }\n\n audioChunksRef.current = [];\n setStage(\"idle\");\n }, [stage]);\n\n // Clear messages\n const clear = useCallback(() => {\n setMessages([]);\n }, []);\n\n return {\n messages,\n startListening,\n stopListening,\n cancel,\n clear,\n isListening,\n isProcessing,\n isSpeaking,\n stage,\n isReady,\n isLoading,\n loadingMessage,\n error,\n load,\n };\n}\n\n// ============================================\n// Embedding Worker (inline, loads from CDN)\n// ============================================\nconst EMBEDDING_WORKER_CODE = `\n // Embedding Worker - runs in separate thread, loads from CDN\n import { pipeline, env } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1\";\n \n console.log(\"[EMBED WORKER] Worker script starting...\");\n \n // Configure environment\n env.useBrowserCache = true;\n env.allowLocalModels = false;\n \n let embedder = null;\n let modelId = null;\n \n self.onmessage = async (e) => {\n const { type, payload } = e.data;\n console.log(\"[EMBED WORKER] Received message:\", type, payload);\n \n if (type === \"load\") {\n try {\n modelId = payload.model || \"Xenova/all-MiniLM-L6-v2\";\n console.log(\"[EMBED WORKER] Loading model:\", modelId);\n \n // Note: Don't specify device for embeddings - they're small models\n // that work fine with default backend (unlike LLMs/STT which need WebGPU)\n embedder = await pipeline(\"feature-extraction\", modelId, {\n progress_callback: (progress) => {\n self.postMessage({ type: \"progress\", payload: progress });\n },\n });\n \n console.log(\"[EMBED WORKER] Pipeline created, running warmup...\");\n \n // Warmup - don't swallow errors\n try {\n const warmup = await embedder(\"hello\", { pooling: \"mean\", normalize: true });\n console.log(\"[EMBED WORKER] Warmup successful, output type:\", typeof warmup, warmup?.constructor?.name);\n } catch (e) {\n console.error(\"[EMBED WORKER] Warmup failed:\", e);\n self.postMessage({ type: \"error\", payload: \"Warmup failed: \" + (e.message || String(e)) });\n return;\n }\n \n console.log(\"[EMBED WORKER] Sending ready message...\");\n self.postMessage({ type: \"ready\" });\n console.log(\"[EMBED WORKER] Ready message sent!\");\n } catch (err) {\n console.error(\"[EMBED WORKER] Load error:\", err);\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n \n if (type === \"embed\") {\n console.log(\"[EMBED WORKER] Processing embed request...\");\n try {\n const { text, normalize } = payload;\n console.log(\"[EMBED WORKER] Text to embed:\", text?.substring?.(0, 50));\n \n if (!embedder) {\n throw new Error(\"Embedder not initialized\");\n }\n \n console.log(\"[EMBED WORKER] Calling embedder...\");\n const output = await embedder(text, {\n pooling: \"mean\",\n normalize: normalize !== false,\n });\n \n console.log(\"[EMBED WORKER] Got output, type:\", typeof output, output?.constructor?.name);\n console.log(\"[EMBED WORKER] Output keys:\", output ? Object.keys(output) : \"null\");\n \n // Handle different output formats from transformers.js\n let vector;\n if (output?.data) {\n vector = Array.from(output.data);\n } else if (output?.tolist) {\n vector = output.tolist();\n } else if (Array.isArray(output)) {\n vector = output;\n } else {\n throw new Error(\"Unknown output format: \" + typeof output);\n }\n \n console.log(\"[EMBED WORKER] Vector length:\", vector?.length);\n console.log(\"[EMBED WORKER] Sending embedding result...\");\n self.postMessage({ type: \"embedding\", payload: { vector, text } });\n console.log(\"[EMBED WORKER] Embedding result sent!\");\n } catch (err) {\n console.error(\"[EMBED WORKER] Embed error:\", err);\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n \n if (type === \"embedBatch\") {\n console.log(\"[EMBED WORKER] Processing embedBatch request...\");\n try {\n const { texts, normalize } = payload;\n const results = [];\n \n for (const text of texts) {\n const output = await embedder(text, {\n pooling: \"mean\",\n normalize: normalize !== false,\n });\n results.push({ vector: Array.from(output.data), text });\n }\n \n console.log(\"[EMBED WORKER] Batch complete, sending results...\");\n self.postMessage({ type: \"embeddings\", payload: results });\n } catch (err) {\n console.error(\"[EMBED WORKER] Batch error:\", err);\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n };\n \n console.log(\"[EMBED WORKER] Worker script loaded, waiting for messages...\");\n`;\n\n/** Create Embedding worker instance */\nfunction createEmbeddingWorker(): Worker {\n const blob = new Blob([EMBEDDING_WORKER_CODE], { type: \"application/javascript\" });\n const url = URL.createObjectURL(blob);\n const worker = new Worker(url, { type: \"module\" });\n URL.revokeObjectURL(url);\n return worker;\n}\n\n/** Embedding result type */\nexport type BrowserEmbedResult = {\n vector: number[];\n text: string;\n};\n\n/** Search result type */\nexport type BrowserSearchResult = {\n text: string;\n score: number;\n index: number;\n};\n\n/** useEmbedding options */\nexport type UseEmbeddingOptions = {\n /** Embedding model (default: \"Xenova/all-MiniLM-L6-v2\") */\n model?: string;\n /** Normalize vectors (default: true) */\n normalize?: boolean;\n /** Auto-load on mount (default: false) */\n autoLoad?: boolean;\n /** Callback when ready */\n onReady?: () => void;\n /** Callback on error */\n onError?: (error: string) => void;\n};\n\n/** useEmbedding return type */\nexport type UseEmbeddingReturn = {\n /** Generate embedding for text */\n embed: (text: string) => Promise<number[]>;\n /** Generate embeddings for multiple texts */\n embedBatch: (texts: string[]) => Promise<BrowserEmbedResult[]>;\n /** Compute cosine similarity between two texts */\n similarity: (textA: string, textB: string) => Promise<number>;\n /** Semantic search - find most similar texts */\n search: (query: string, corpus: string[], topK?: number) => Promise<BrowserSearchResult[]>;\n /** Find nearest text to an embedding vector */\n findNearest: (\n embedding: number[],\n candidates: string[],\n topK?: number,\n ) => Promise<BrowserSearchResult[]>;\n /** Compute cosine similarity between two vectors */\n cosineSimilarity: (a: number[], b: number[]) => number;\n /** Manually load the model - returns Promise that resolves when ready */\n load: () => Promise<void>;\n /** Whether model is loading */\n isLoading: boolean;\n /** Whether model is ready */\n isReady: boolean;\n /** Loading progress */\n loadingProgress: { status: string; message?: string; progress?: number } | null;\n /** Error message */\n error: string | null;\n};\n\n/**\n * React hook for text embeddings in the browser\n *\n * @example\n * ```tsx\n * import { useEmbedding } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { embed, similarity, search, isLoading, isReady } = useEmbedding();\n *\n * if (isLoading) return <div>Loading embedding model...</div>;\n *\n * const handleSearch = async () => {\n * const results = await search(\"capital of France\", [\n * \"Paris is beautiful\",\n * \"London is in England\",\n * \"Dogs are pets\"\n * ]);\n * console.log(results); // [{ text: \"Paris is beautiful\", score: 0.89, index: 0 }, ...]\n * };\n *\n * return <button onClick={handleSearch}>Search</button>;\n * }\n * ```\n */\nexport function useEmbedding(options: UseEmbeddingOptions = {}): UseEmbeddingReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\n \"useEmbedding requires React. Make sure React is available in the global scope.\",\n );\n }\n\n const { useState, useEffect, useRef, useCallback } = React;\n const {\n model = \"Xenova/all-MiniLM-L6-v2\",\n normalize = true,\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [isLoading, setIsLoading] = useState(false);\n const [isReady, setIsReady] = useState(false);\n const [error, setError] = useState(null as string | null);\n const [loadingProgress, setLoadingProgress] = useState(\n null as { status: string; message?: string; progress?: number } | null,\n );\n\n const workerRef = useRef(null as Worker | null);\n const loadRequestedRef = useRef(false);\n // Promise that resolves when model is ready - critical for proper async waiting\n const readyPromiseRef = useRef(null as Promise<void> | null);\n const readyResolveRef = useRef(null as (() => void) | null);\n\n // Cosine similarity (pure function, no async)\n const cosineSimilarity = useCallback((a: number[], b: number[]): number => {\n if (a.length !== b.length) {\n throw new Error(`Vector dimensions must match: ${a.length} vs ${b.length}`);\n }\n\n let dotProduct = 0;\n let normA = 0;\n let normB = 0;\n\n for (let i = 0; i < a.length; i++) {\n dotProduct += a[i] * b[i];\n normA += a[i] * a[i];\n normB += b[i] * b[i];\n }\n\n const magnitude = Math.sqrt(normA) * Math.sqrt(normB);\n if (magnitude === 0) return 0;\n\n return dotProduct / magnitude;\n }, []);\n\n // Load model - returns a promise that resolves when ready\n const load = useCallback(() => {\n // Already loaded\n if (isReady && workerRef.current) {\n return Promise.resolve();\n }\n\n // Already loading - return existing promise\n if (loadRequestedRef.current && readyPromiseRef.current) {\n return readyPromiseRef.current;\n }\n\n loadRequestedRef.current = true;\n setIsLoading(true);\n setLoadingProgress({ status: \"loading\", message: \"Loading embedding model...\" });\n\n // Create promise that will resolve when ready\n readyPromiseRef.current = new Promise<void>((resolve) => {\n readyResolveRef.current = resolve;\n });\n\n const worker = createEmbeddingWorker();\n workerRef.current = worker;\n\n // Use addEventListener instead of onmessage to allow multiple listeners\n // (the embed() function also adds its own listener for responses)\n worker.addEventListener(\"message\", (e: MessageEvent) => {\n const { type, payload } = e.data;\n console.log(\"[EMBED HOOK] Received from worker:\", type, payload);\n\n if (type === \"progress\") {\n if (payload.status === \"progress\" && payload.file) {\n setLoadingProgress({\n status: \"downloading\",\n message: `Downloading ${payload.file}`,\n progress: Math.round((payload.loaded / payload.total) * 100),\n });\n }\n } else if (type === \"ready\") {\n console.log(\"[EMBED HOOK] Model ready! Resolving promise...\");\n setIsLoading(false);\n setIsReady(true);\n setLoadingProgress({ status: \"ready\" });\n // Resolve the ready promise\n readyResolveRef.current?.();\n console.log(\"[EMBED HOOK] Promise resolved, calling onReady...\");\n onReady?.();\n } else if (type === \"error\") {\n console.error(\"[EMBED HOOK] Error from worker:\", payload);\n setIsLoading(false);\n setError(payload);\n onError?.(payload);\n }\n });\n\n worker.onerror = (err) => {\n console.error(\"[EMBED HOOK] Worker onerror:\", err);\n setIsLoading(false);\n const errMsg = err.message || \"Worker error\";\n setError(errMsg);\n setLoadingProgress({ status: \"error\", message: errMsg });\n onError?.(errMsg);\n };\n\n console.log(\"[EMBED HOOK] Sending load message to worker...\");\n worker.postMessage({ type: \"load\", payload: { model } });\n\n return readyPromiseRef.current;\n }, [model, isReady, onReady, onError]);\n\n // Auto-load on mount if requested\n useEffect(() => {\n if (autoLoad) {\n load();\n }\n\n return () => {\n if (workerRef.current) {\n workerRef.current.terminate();\n workerRef.current = null;\n }\n };\n }, [autoLoad, load]);\n\n // Embed single text\n const embed = useCallback(\n async (text: string): Promise<number[]> => {\n console.log(\"[EMBED HOOK] embed() called with:\", text?.substring?.(0, 50));\n\n // Trigger load if not started\n const loadPromise = load();\n\n // Wait for the ready Promise (handles stale closures)\n console.log(\"[EMBED HOOK] Waiting for ready promise...\");\n if (readyPromiseRef.current) {\n await readyPromiseRef.current;\n } else {\n await loadPromise;\n }\n console.log(\"[EMBED HOOK] Ready promise resolved, sending embed message...\");\n\n return new Promise((resolve, reject) => {\n const worker = workerRef.current;\n if (!worker) {\n console.error(\"[EMBED HOOK] No worker available!\");\n reject(new Error(\"Worker not initialized. Call load() first.\"));\n return;\n }\n\n // Timeout after 30 seconds\n const timeout = setTimeout(() => {\n console.error(\"[EMBED HOOK] Timeout reached!\");\n worker.removeEventListener(\"message\", handler);\n reject(new Error(\"Embedding timeout after 30s\"));\n }, 30000);\n\n const handler = (e: MessageEvent) => {\n console.log(\"[EMBED HOOK] embed handler received:\", e.data.type);\n if (e.data.type === \"embedding\") {\n console.log(\"[EMBED HOOK] Got embedding result!\");\n clearTimeout(timeout);\n worker.removeEventListener(\"message\", handler);\n resolve(e.data.payload.vector);\n } else if (e.data.type === \"error\") {\n console.error(\"[EMBED HOOK] Got error:\", e.data.payload);\n clearTimeout(timeout);\n worker.removeEventListener(\"message\", handler);\n reject(new Error(e.data.payload));\n }\n };\n\n worker.addEventListener(\"message\", handler);\n console.log(\"[EMBED HOOK] Posting embed message to worker...\");\n worker.postMessage({ type: \"embed\", payload: { text, normalize } });\n console.log(\"[EMBED HOOK] Message posted, waiting for response...\");\n });\n },\n [load, normalize],\n );\n\n // Embed batch\n const embedBatch = useCallback(\n async (texts: string[]): Promise<BrowserEmbedResult[]> => {\n // Trigger load if not started\n const loadPromise = load();\n\n // Wait for the ready Promise (handles stale closures)\n if (readyPromiseRef.current) {\n await readyPromiseRef.current;\n } else {\n await loadPromise;\n }\n\n return new Promise((resolve, reject) => {\n const worker = workerRef.current;\n if (!worker) {\n reject(new Error(\"Worker not initialized. Call load() first.\"));\n return;\n }\n\n // Timeout after 60 seconds for batch\n const timeout = setTimeout(() => {\n worker.removeEventListener(\"message\", handler);\n reject(new Error(\"Batch embedding timeout after 60s\"));\n }, 60000);\n\n const handler = (e: MessageEvent) => {\n if (e.data.type === \"embeddings\") {\n clearTimeout(timeout);\n worker.removeEventListener(\"message\", handler);\n resolve(e.data.payload);\n } else if (e.data.type === \"error\") {\n clearTimeout(timeout);\n worker.removeEventListener(\"message\", handler);\n reject(new Error(e.data.payload));\n }\n };\n\n worker.addEventListener(\"message\", handler);\n worker.postMessage({ type: \"embedBatch\", payload: { texts, normalize } });\n });\n },\n [load, normalize],\n );\n\n // Similarity between two texts\n const similarity = useCallback(\n async (textA: string, textB: string): Promise<number> => {\n const [embA, embB] = await Promise.all([embed(textA), embed(textB)]);\n return cosineSimilarity(embA, embB);\n },\n [embed, cosineSimilarity],\n );\n\n // Semantic search\n const search = useCallback(\n async (query: string, corpus: string[], topK?: number): Promise<BrowserSearchResult[]> => {\n const [queryEmb, corpusEmbs] = await Promise.all([embed(query), embedBatch(corpus)]);\n\n const results: BrowserSearchResult[] = corpusEmbs.map(\n (doc: BrowserEmbedResult, index: number) => ({\n text: doc.text,\n score: cosineSimilarity(queryEmb, doc.vector),\n index,\n }),\n );\n\n results.sort((a, b) => b.score - a.score);\n return topK ? results.slice(0, topK) : results;\n },\n [embed, embedBatch, cosineSimilarity],\n );\n\n // Find nearest to an embedding\n const findNearest = useCallback(\n async (\n embedding: number[],\n candidates: string[],\n topK?: number,\n ): Promise<BrowserSearchResult[]> => {\n const candidateEmbs = await embedBatch(candidates);\n\n const results: BrowserSearchResult[] = candidateEmbs.map(\n (doc: BrowserEmbedResult, index: number) => ({\n text: doc.text,\n score: cosineSimilarity(embedding, doc.vector),\n index,\n }),\n );\n\n results.sort((a, b) => b.score - a.score);\n return topK ? results.slice(0, topK) : results;\n },\n [embedBatch, cosineSimilarity],\n );\n\n return {\n embed,\n embedBatch,\n similarity,\n search,\n findNearest,\n cosineSimilarity,\n load,\n isLoading,\n isReady,\n loadingProgress,\n error,\n };\n}\n\n// ============================================\n// Model Preloading (Non-React)\n// ============================================\n\n/** Progress callback for preloading */\nexport type PreloadProgress = {\n status: \"downloading\" | \"loading\" | \"ready\" | \"error\";\n file?: string;\n progress?: number;\n message?: string;\n};\n\n/** Preload options */\nexport type PreloadOptions = {\n onProgress?: (progress: PreloadProgress) => void;\n};\n\n/**\n * Preload a chat/LLM model (downloads to IndexedDB cache)\n *\n * Call this during app initialization to ensure the model is ready\n * when users need it.\n *\n * @example\n * ```ts\n * // In your app's initialization\n * import { preloadChatModel } from \"@tryhamster/gerbil/browser\";\n *\n * await preloadChatModel(\"qwen3-0.6b\", {\n * onProgress: (p) => console.log(p.status, p.progress),\n * });\n *\n * // Later, useChat will load instantly from cache\n * ```\n */\nexport async function preloadChatModel(\n modelId: string,\n options: PreloadOptions = {},\n): Promise<void> {\n const { onProgress } = options;\n\n // Use createGerbilWorker to load the model, then terminate\n const worker = await createGerbilWorker({\n modelId,\n onProgress: (p) => {\n if (p.status === \"downloading\") {\n onProgress?.({\n status: \"downloading\",\n file: p.file,\n progress: p.progress,\n });\n } else {\n onProgress?.({ status: \"loading\", message: p.status });\n }\n },\n });\n\n onProgress?.({ status: \"ready\" });\n worker.terminate();\n}\n\n/**\n * Preload an embedding model\n *\n * @example\n * ```ts\n * await preloadEmbeddingModel(\"Xenova/all-MiniLM-L6-v2\");\n * ```\n */\nexport async function preloadEmbeddingModel(\n modelId = \"Xenova/all-MiniLM-L6-v2\",\n options: PreloadOptions = {},\n): Promise<void> {\n const { onProgress } = options;\n\n return new Promise((resolve, reject) => {\n const worker = createEmbeddingWorker();\n\n worker.onmessage = (e: MessageEvent) => {\n const { type, payload } = e.data;\n\n if (type === \"progress\") {\n if (payload.status === \"progress\" && payload.file) {\n onProgress?.({\n status: \"downloading\",\n file: payload.file,\n progress: Math.round((payload.loaded / payload.total) * 100),\n });\n }\n } else if (type === \"ready\") {\n onProgress?.({ status: \"ready\" });\n worker.terminate();\n resolve();\n } else if (type === \"error\") {\n onProgress?.({ status: \"error\", message: payload });\n worker.terminate();\n reject(new Error(payload));\n }\n };\n\n onProgress?.({ status: \"loading\", message: `Loading ${modelId}...` });\n worker.postMessage({ type: \"load\", payload: { model: modelId } });\n });\n}\n\n/**\n * Preload a TTS model\n *\n * @example\n * ```ts\n * await preloadTTSModel(\"kokoro-82m\");\n * ```\n */\nexport async function preloadTTSModel(\n modelId: TTSModelId = \"kokoro-82m\",\n options: PreloadOptions = {},\n): Promise<void> {\n const { onProgress } = options;\n const modelConfig = TTS_MODELS[modelId];\n\n if (!modelConfig) {\n throw new Error(`Unknown TTS model: ${modelId}`);\n }\n\n return new Promise((resolve, reject) => {\n const worker = createTTSWorker();\n\n worker.onmessage = (e: MessageEvent) => {\n const { type, payload } = e.data;\n\n if (type === \"progress\") {\n if (payload.status === \"progress\" && payload.file) {\n onProgress?.({\n status: \"downloading\",\n file: payload.file,\n progress: Math.round((payload.loaded / payload.total) * 100),\n });\n }\n } else if (type === \"ready\") {\n onProgress?.({ status: \"ready\" });\n worker.terminate();\n resolve();\n } else if (type === \"error\") {\n onProgress?.({ status: \"error\", message: payload });\n worker.terminate();\n reject(new Error(payload));\n }\n };\n\n onProgress?.({ status: \"loading\", message: `Loading ${modelId}...` });\n worker.postMessage({\n type: \"load\",\n payload: {\n modelId,\n repo: modelConfig.repo,\n voices: modelConfig.voices,\n },\n });\n });\n}\n\n/**\n * Preload an STT model\n *\n * @example\n * ```ts\n * await preloadSTTModel(\"whisper-tiny.en\");\n * ```\n */\nexport async function preloadSTTModel(\n modelId = \"whisper-tiny.en\",\n options: PreloadOptions = {},\n): Promise<void> {\n const { onProgress } = options;\n const resolved = resolveSTTModel(modelId);\n\n return new Promise((resolve, reject) => {\n const worker = createSTTWorker();\n\n worker.onmessage = (e: MessageEvent) => {\n const { type, payload } = e.data;\n\n if (type === \"progress\") {\n if (payload.status === \"progress\" && payload.file) {\n onProgress?.({\n status: \"downloading\",\n file: payload.file,\n progress: Math.round((payload.loaded / payload.total) * 100),\n });\n }\n } else if (type === \"ready\") {\n onProgress?.({ status: \"ready\" });\n worker.terminate();\n resolve();\n } else if (type === \"error\") {\n onProgress?.({ status: \"error\", message: payload });\n worker.terminate();\n reject(new Error(payload));\n }\n };\n\n onProgress?.({ status: \"loading\", message: `Loading ${modelId}...` });\n worker.postMessage({ type: \"load\", payload: { model: resolved } });\n });\n}\n\n/** Helper to resolve STT model ID to repo */\nfunction resolveSTTModel(modelId: string): string {\n const STT_MODEL_MAP: Record<string, string> = {\n \"whisper-tiny\": \"onnx-community/whisper-tiny\",\n \"whisper-tiny.en\": \"onnx-community/whisper-tiny.en\",\n \"whisper-base\": \"onnx-community/whisper-base\",\n \"whisper-base.en\": \"onnx-community/whisper-base.en\",\n \"whisper-small\": \"onnx-community/whisper-small\",\n \"whisper-small.en\": \"onnx-community/whisper-small.en\",\n \"whisper-large-v3-turbo\": \"onnx-community/whisper-large-v3-turbo\",\n };\n return STT_MODEL_MAP[modelId] || modelId;\n}\n\n// ============================================\n// Utilities\n// ============================================\n\n/**\n * Check if WebGPU is supported\n */\nexport function isWebGPUSupported(): boolean {\n if (typeof navigator === \"undefined\") {\n return false;\n }\n return \"gpu\" in navigator;\n}\n\n/**\n * Get WebGPU adapter info\n */\nexport async function getWebGPUInfo(): Promise<{\n supported: boolean;\n adapter?: string;\n device?: string;\n} | null> {\n if (!isWebGPUSupported()) {\n return { supported: false };\n }\n\n try {\n const adapter = await (navigator as any).gpu.requestAdapter();\n if (!adapter) {\n return { supported: false };\n }\n\n const info = await adapter.requestAdapterInfo();\n return {\n supported: true,\n adapter: info.vendor,\n device: info.device,\n };\n } catch {\n return { supported: false };\n }\n}\n\nexport default {\n isWebGPUSupported,\n getWebGPUInfo,\n createGerbilWorker,\n playAudio,\n createAudioPlayer,\n preloadChatModel,\n preloadEmbeddingModel,\n preloadTTSModel,\n preloadSTTModel,\n};\n"],"mappings":";AAYA,MAAaA,iBAA8C;CACzD,cAAc;EACZ,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,sBAAsB;EACpB,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,cAAc;EACZ,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,gBAAgB;EAChB,mBAAmB;EACnB,QAAQ;EACT;CACF;;;;;;;;;;AAeD,SAAgB,aAAa,SAA8B;AAEzD,KAAI,eAAe,SACjB,QAAO;EACL,MAAM;EACN,MAAM,eAAe,SAAS;EAC/B;AAIH,KAAI,QAAQ,WAAW,MAAM,CAE3B,QAAO;EACL,MAAM;EACN,MAHW,QAAQ,MAAM,EAAE;EAI5B;AAIH,KAAI,QAAQ,WAAW,0BAA0B,CAE/C,QAAO;EACL,MAAM;EACN,MAHW,QAAQ,QAAQ,2BAA2B,GAAG;EAI1D;AAIH,KAAI,QAAQ,WAAW,QAAQ,CAE7B,QAAO;EACL,MAAM;EACN,MAHW,QAAQ,MAAM,EAAE;EAI5B;AAIH,KAAI,QAAQ,SAAS,IAAI,CACvB,QAAO;EACL,MAAM;EACN,MAAM;EACP;AAIH,QAAO;EACL,MAAM;EACN,MAAM;EACP;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACFH,eAAsB,mBAAmB,UAA+B,EAAE,EAAyB;CACjG,MAAM,EAAE,UAAU,cAAc,YAAY,SAAS,YAAY,YAAY;CAG7E,MAAM,SAAS,aAAa,QAAQ;AAEpC,QAAO,IAAI,SAAS,SAAS,WAAW;EA4WtC,MAAM,OAAO,IAAI,KAAK,CA1WH;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;MA0We,EAAE,EAAE,MAAM,0BAA0B,CAAC;EACvE,MAAM,YAAY,IAAI,gBAAgB,KAAK;EAC3C,MAAM,SAAS,IAAI,OAAO,WAAW,EAAE,MAAM,UAAU,CAAC;EAExD,IAAI,UAAU;EACd,IAAIC,iBAAkD;EACtD,IAAIC,gBAAiD;EACrD,IAAI,iBAAiB;AAErB,SAAO,aAAa,MAAM;GACxB,MAAM,MAAM,EAAE;AAEd,WAAQ,IAAI,QAAZ;IACE,KAAK;AAEH,YAAO,YAAY;MAAE,MAAM;MAAQ,SAAS,OAAO;MAAM,CAAC;AAC1D;IAEF,KAAK;IACL,KAAK;AACH,kBAAa,IAAsB;AACnC;IAEF,KAAK;AACH,eAAU;AACV,kBAAa,IAAsB;AACnC,aAAQ,aAAa;AACrB;IAEF,KAAK;AACH,sBAAiB;AACjB;IAEF,KAAK;AACH,uBAAkB,IAAI;AACtB,eAAU,IAAmB;AAC7B;IAEF,KAAK;AACH,kBAAa,IAAsB;AACnC,sBAAiB,IAAI,KAAK;AAC1B,sBAAiB;AACjB,qBAAgB;AAChB;IAEF,KAAK;AACH,eAAU,IAAI,MAAM;AACpB,kBAAa;MAAE,QAAQ;MAAS,OAAO,IAAI;MAAO,CAAC;AACnD,SAAI,eAAe;AACjB,oBAAc,IAAI,MAAM,IAAI,MAAM,CAAC;AACnC,uBAAiB;AACjB,sBAAgB;WAEhB,QAAO,IAAI,MAAM,IAAI,MAAM,CAAC;AAE9B;;;AAIN,SAAO,WAAW,MAAM;GACtB,MAAM,QAAQ,EAAE,WAAW;AAC3B,aAAU,MAAM;AAChB,UAAO,IAAI,MAAM,MAAM,CAAC;;EAG1B,MAAMC,eAA6B;GACjC,WAAW,QAAgB,YAAiC,EAAE,KAC5D,IAAI,SAAS,KAAK,QAAQ;AACxB,qBAAiB;AACjB,oBAAgB;IAEhB,MAAM,SAASC,UAAQ,UAAU;IAIjC,MAAM,WAAWA,UAAQ,UACrB,CAAC;KAAE,MAAM;KAAU,SAAS;KAAQ,EAAE,GAAGA,UAAQ,QAAQ,GACzD,CACE;KAAE,MAAM;KAAU,SAAS;KAAQ,EACnC;KAAE,MAAM;KAAQ,SAAS;KAAQ,CAClC;AAIL,QAAIA,UAAQ,QACV,QAAO,YAAY,EAAE,MAAM,SAAS,CAAC;AAGvC,WAAO,YAAY;KACjB,MAAM;KACN;KACA,QAAQA,UAAQ,UAAU,EAAE;KAC5B,SAAS;MACP,WAAWA,UAAQ,cAAcA,UAAQ,QAAQ,SAAS,OAAO;MACjE,aAAaA,UAAQ,eAAe;MACpC,MAAMA,UAAQ,QAAQ;MACtB,MAAMA,UAAQ,QAAQ;MACtB,UAAUA,UAAQ,YAAY;MAC/B;KACF,CAAC;KACF;GAEJ,iBAAiB;AACf,WAAO,YAAY,EAAE,MAAM,aAAa,CAAC;;GAG3C,aAAa;AACX,WAAO,YAAY,EAAE,MAAM,SAAS,CAAC;;GAGvC,iBAAiB;AACf,WAAO,WAAW;AAClB,QAAI,gBAAgB,UAAU;;GAGhC,eAAe;GAChB;GACD;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuHJ,SAAgB,QAAQ,UAA0B,EAAE,EAAiB;CAEnE,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,+DAA+D;CAGjF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,kBAAkB,EAAE,EACpB,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,UAAU,eAAe,SAAoB,gBAAgB;CACpE,MAAM,CAAC,OAAO,YAAY,SAAiB,GAAG;CAC9C,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,iBAAiB,sBAAsB,SAAiB,GAAG;CAClE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAC/D,MAAM,CAAC,gBAAgB,qBAAqB,SAAmB,EAAE,CAAC;CAElE,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,eAAe,OAAe,EAAE;CACtC,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAGf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,qBAAoB,MAAc,IAAI,MAAM,KAAK;;GAGrD,kBAAkB;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;;GAExB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;AAGvB,iBAAgB;AACd,MAAI,CAAC,gBAAgB,iBAAiB;AACpC,gBAAa,SAAoB;AAE/B,QADgB,KAAK,GAAG,GAAG,EACd,SAAS,YACpB,QAAO,KAAK,KAAK,GAAY,MAC3B,MAAM,KAAK,SAAS,IAChB;KAAE,GAAG;KAAG,SAAS;KAAiB,UAAU,YAAY;KAAW,GACnE,EACL;AAEH,WAAO;KACP;AACF,sBAAmB,GAAG;AACtB,eAAY,GAAG;;IAEhB;EAAC;EAAc;EAAiB;EAAS,CAAC;CAG7C,MAAM,oBAAoB,OAAsB,KAAK;CACrD,MAAM,mBAAmB,OAAiB,EAAE,CAAC;CAG7C,MAAM,cAAc,aAAa,aAAqB;AACpD,qBAAmB,SAAmB,CAAC,GAAG,MAAM,SAAS,CAAC;IACzD,EAAE,CAAC;CAEN,MAAM,cAAc,aAAa,UAAkB;AACjD,qBAAmB,SAAmB,KAAK,QAAQ,GAAW,MAAc,MAAM,MAAM,CAAC;IACxF,EAAE,CAAC;CAEN,MAAM,cAAc,kBAAkB;AACpC,oBAAkB,EAAE,CAAC;IACpB,EAAE,CAAC;CAGN,MAAM,wBAAwB,aAC3B,MAAc,WAAqB;AAClC,MAAI,CAAC,KAAK,MAAM,IAAI,aAClB;AAGF,eAAa,WAAW;EACxB,MAAMC,cAAuB;GAC3B,IAAI,OAAO,aAAa;GACxB,MAAM;GACN,SAAS,KAAK,MAAM;GACpB,QAAQ,OAAO,SAAS,IAAI,SAAS;GACtC;AAED,eAAa,WAAW;EACxB,MAAMC,mBAA4B;GAChC,IAAI,OAAO,aAAa;GACxB,MAAM;GACN,SAAS;GACV;AAED,eAAa,SAAoB;GAAC,GAAG;GAAM;GAAa;GAAiB,CAAC;AAC1E,qBAAmB,GAAG;AACtB,cAAY,GAAG;AAGf,MAAI,CAAC,UAAU,SAAS;AACtB,qBAAkB,UAAU,KAAK,MAAM;AACvC,oBAAiB,UAAU;AAC3B,SAAM;AACN;;AAGF,kBAAgB,KAAK;AACrB,YAAU,QAAQ,SAAS,KAAK,MAAM,EAAE;GACtC;GACA,UAAU;GACV,WAAW,OAAO,SAAS,IAAI,KAAK,IAAI,WAAW,KAAK,GAAG;GAC3D;GACA,QAAQ,OAAO,SAAS,IAAI,SAAS;GACtC,CAAC;IAEJ;EAAC;EAAc;EAAQ;EAAgB;EAAW;EAAa;EAAK,CACrE;CAED,MAAM,eAAe,aAClB,MAAwC;AACvC,KAAG,kBAAkB;AAErB,MAAI,CAAC,MAAM,MAAM,IAAI,aACnB;AAIF,wBAAsB,OAAO,eAAe;AAC5C,WAAS,GAAG;AACZ,oBAAkB,EAAE,CAAC;IAEvB;EAAC;EAAO;EAAc;EAAgB;EAAsB,CAC7D;CAGD,MAAM,iBAAiB,aACpB,MAAc,WAAqB;AAClC,wBAAsB,MAAM,OAAO;IAErC,CAAC,sBAAsB,CACxB;AAGD,iBAAgB;AACd,MAAI,WAAW,kBAAkB,WAAW,UAAU,SAAS;GAC7D,MAAM,iBAAiB,kBAAkB;GACzC,MAAM,gBAAgB,iBAAiB;AACvC,qBAAkB,UAAU;AAC5B,oBAAiB,UAAU,EAAE;AAC7B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,gBAAgB;IACzC;IACA,UAAU;IACV,WAAW,cAAc,SAAS,IAAI,KAAK,IAAI,WAAW,KAAK,GAAG;IAClE;IACA,QAAQ,cAAc,SAAS,IAAI,gBAAgB;IACpD,CAAC;;IAEH;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;CAE7D,MAAM,OAAO,kBAAkB;AAC7B,YAAU,SAAS,WAAW;AAC9B,kBAAgB,MAAM;IACrB,EAAE,CAAC;CAEN,MAAM,QAAQ,kBAAkB;AAC9B,YAAU,SAAS,OAAO;AAC1B,cAAY,EAAE,CAAC;AACf,qBAAmB,GAAG;AACtB,cAAY,GAAG;AACf,oBAAkB,EAAE,CAAC;IACpB,EAAE,CAAC;AAUN,QAAO;EACL,UARsB,SAAS,KAAK,GAAY,MAAc;AAC9D,OAAI,MAAM,SAAS,SAAS,KAAK,EAAE,SAAS,eAAe,aACzD,QAAO;IAAE,GAAG;IAAG,SAAS;IAAiB,UAAU,YAAY;IAAW;AAE5E,UAAO;IACP;EAIA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;;;;AA4EH,SAAgB,cAAc,UAAgC,EAAE,EAAuB;CACrF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,qEAAqE;CAGvF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,YAAY,iBAAiB,SAAiB,GAAG;CACxD,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,aAAa,OAAwC,KAAK;CAChE,MAAM,YAAY,OAAsC,KAAK;CAC7D,MAAM,mBAAmB,OAAsB,KAAK;CACpD,MAAM,mBAAmB,OAA6B,OAAU;CAChE,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAEf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,gBAAe,MAAc,IAAI,MAAM,KAAK;;GAGhD,aAAa,WAAW;AACtB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;AACtB,eAAW,UAAU,OAAO,KAAK;AACjC,eAAW,UAAU;;GAEvB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;CAEvB,MAAM,WAAW,aACd,QAAgB,oBAAuD;AACtE,SAAO,IAAI,SAAS,SAAS,WAAW;AACtC,iBAAc,GAAG;AACjB,eAAY,GAAG;AACf,cAAW,UAAU;AACrB,aAAU,UAAU;AAGpB,OAAI,CAAC,UAAU,SAAS;AACtB,qBAAiB,UAAU;AAC3B,qBAAiB,UAAU,iBAAiB;AAC5C,UAAM;AACN;;AAGF,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,QAAQ;IACjC;IACA,UAAU;IACV;IACA;IACA,QAAQ,iBAAiB;IAC1B,CAAC;IACF;IAEJ;EAAC;EAAQ;EAAgB;EAAW;EAAa;EAAK,CACvD;AAGD,iBAAgB;AACd,MAAI,WAAW,iBAAiB,WAAW,UAAU,SAAS;GAC5D,MAAM,gBAAgB,iBAAiB;GACvC,MAAM,gBAAgB,iBAAiB;AACvC,oBAAiB,UAAU;AAC3B,oBAAiB,UAAU;AAC3B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,eAAe;IACxC;IACA,UAAU;IACV;IACA;IACA,QAAQ;IACT,CAAC;;IAEH;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;AAO7D,QAAO;EACL;EACA;EACA;EACA;EACA;EACA;EACA,MAZW,kBAAkB;AAC7B,aAAU,SAAS,WAAW;AAC9B,mBAAgB,MAAM;KACrB,EAAE,CAAC;EAUJ;EACA;EACA;EACA;EACD;;;AA6BH,MAAMC,wBAA4C;CAChD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EAAE,IAAI;EAAY,MAAM;EAAS,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAgB;CAClG;;AAGD,MAAMC,4BAAgD;CACpD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACF;;AAGD,MAAMC,aAGF;CACF,cAAc;EACZ,MAAM;EACN,cAAc;EACd,YAAY;EACZ,QAAQ;EACT;CACD,kBAAkB;EAChB,MAAM;EACN,cAAc;EACd,YAAY;EACZ,QAAQ;EACT;CACF;AA2DD,MAAM,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAkHxB,SAAS,kBAA0B;CACjC,MAAM,OAAO,IAAI,KAAK,CAAC,gBAAgB,EAAE,EAAE,MAAM,0BAA0B,CAAC;CAC5E,MAAM,MAAM,IAAI,gBAAgB,KAAK;CACrC,MAAM,SAAS,IAAI,OAAO,KAAK,EAAE,MAAM,UAAU,CAAC;AAClD,KAAI,gBAAgB,IAAI;AACxB,QAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqCT,SAAgB,UAAU,UAA4B,EAAE,EAAmB;CACzE,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,iEAAiE;CAGnF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,OAAO,UAAU,cACjB,OAAO,eAAe,GACtB,WAAW,OACX,SACA,SACA,SACA,UACE;CAGJ,MAAM,cAAc,WAAW;CAC/B,MAAM,eAAe,QAAQ,SAAS,YAAY;CAElD,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAA6B,KAAK;CAChF,MAAM,CAAC,YAAY,iBAAiB,SAAkB,MAAM;CAC5D,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAC/D,MAAM,CAAC,cAAc,mBAAmB,SAAiB,aAAa;CACtE,MAAM,CAAC,cAAc,mBAAmB,SAAiB,aAAa;CAEtE,MAAM,YAAY,OAAsB,KAAK;CAC7C,MAAM,kBAAkB,OAA4B,KAAK;CACzD,MAAM,gBAAgB,OAAqC,KAAK;CAChE,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,aAAa,OAAmB,QAAQ;CAC9C,MAAM,kBAAkB,OAA8D,KAAK;CAG3F,MAAM,aAAa,kBAAsC;AACvD,SAAO,YAAY;IAClB,CAAC,YAAY,OAAO,CAAC;CAGxB,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UAAW;AACpC,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAGf,iBAAgB;AACd,MAAI,CAAC,WAAY;AAEjB,aAAW,UAAU;AACrB,aAAW,UAAU;EAErB,MAAM,SAAS,WAAW;AAE1B,qBAAmB;GACjB,QAAQ;GACR,SAAS,WAAW,YAAY,mBAAmB,eAAe,SAAS;GAC5E,CAAC;EAGF,MAAM,SAAS,iBAAiB;AAChC,YAAU,UAAU;AAGpB,SAAO,aAAa,MAAoB;AACtC,OAAI,CAAC,WAAW,QAAS;GAEzB,MAAM,EAAE,MAAM,YAAY,EAAE;AAE5B,OAAI,SAAS,cAAc,QAAQ,WAAW,cAAc,QAAQ,KAClE,oBAAmB;IACjB,QAAQ;IACR,MAAM,QAAQ;IACd,UAAU,KAAK,MAAM,QAAQ,YAAY,EAAE;IAC5C,CAAC;AAGJ,OAAI,SAAS,SAAS;AACpB,iBAAa,MAAM;AACnB,eAAW,KAAK;AAChB,uBAAmB,EAAE,QAAQ,SAAS,CAAC;AACvC,eAAW;AAGX,QAAI,gBAAgB,SAAS;KAC3B,MAAM,EAAE,MAAM,OAAO,UAAU,gBAAgB;AAC/C,qBAAgB,UAAU;AAC1B,YAAO,YAAY;MAAE,MAAM;MAAY,SAAS;OAAE;OAAM;OAAO;OAAO;MAAE,CAAC;;;AAI7E,OAAI,SAAS,SAAS;IAEpB,MAAM,EAAE,OAAO,eAAe;AAC9B,kBAAc,OAAO,WAAW;;AAGlC,OAAI,SAAS,SAAS;IACpB,MAAM,WAAW;AACjB,aAAS,SAAS;AAClB,iBAAa,MAAM;AACnB,kBAAc,MAAM;AACpB,uBAAmB;KAAE,QAAQ;KAAS,OAAO;KAAU,CAAC;AACxD,cAAU,SAAS;;;AAIvB,SAAO,WAAW,QAAQ;AACxB,OAAI,CAAC,WAAW,QAAS;GACzB,MAAM,WAAW,IAAI,WAAW;AAChC,YAAS,SAAS;AAClB,gBAAa,MAAM;AACnB,sBAAmB;IAAE,QAAQ;IAAS,OAAO;IAAU,CAAC;AACxD,aAAU,SAAS;;AAIrB,SAAO,YAAY;GACjB,MAAM;GACN,SAAS;IACP;IACA,MAAM,OAAO;IACb,QAAQ,OAAO;IAChB;GACF,CAAC;AAEF,eAAa;AACX,cAAW,UAAU;AACrB,UAAO,WAAW;AAClB,aAAU,UAAU;;IAErB;EAAC;EAAY;EAAS;EAAS;EAAQ,CAAC;CAG3C,MAAM,gBAAgB,YACpB,OAAO,OAAqB,eAAuB;AACjD,MAAI;AAEF,OAAI,CAAC,gBAAgB,WAAW,gBAAgB,QAAQ,UAAU,SAChE,iBAAgB,UAAU,IAAI,aAAa,EAAE,YAAY,CAAC;GAE5D,MAAM,MAAM,gBAAgB;AAE5B,OAAI,IAAI,UAAU,YAChB,OAAM,IAAI,QAAQ;GAIpB,MAAM,cAAc,IAAI,aAAa,GAAG,MAAM,QAAQ,WAAW;AACjE,eAAY,cAAc,IAAI,aAAa,MAAM,EAAE,EAAE;GAErD,MAAM,aAAa,IAAI,oBAAoB;AAC3C,cAAW,SAAS;AACpB,cAAW,QAAQ,IAAI,YAAY;AAEnC,iBAAc,UAAU;AAExB,cAAW,gBAAgB;AACzB,QAAI,CAAC,WAAW,QAAS;AACzB,kBAAc,MAAM;AACpB,aAAS;;AAGX,cAAW,OAAO;WACX,KAAK;AACZ,iBAAc,MAAM;GACpB,MAAM,WAAW,eAAe,QAAQ,IAAI,UAAU,OAAO,IAAI;AACjE,YAAS,SAAS;AAClB,aAAU,SAAS;;IAGvB,CAAC,OAAO,QAAQ,CACjB;AAGD,iBAAgB;AACd,eAAa;AACX,OAAI;AACF,kBAAc,SAAS,MAAM;WACvB;AAGR,OAAI;AACF,QAAI,gBAAgB,WAAW,gBAAgB,QAAQ,UAAU,SAC/D,iBAAgB,QAAQ,OAAO;WAE3B;;IAIT,EAAE,CAAC;AA0EN,QAAO;EACL,OAxEY,YACZ,OAAO,MAAc,SAA8C;GACjE,MAAM,QAAQ,MAAM,SAAS;GAC7B,MAAM,QAAQ,MAAM,SAAS;AAI7B,OAAI,CADc,YAAY,OAAO,MAAM,MAAM,EAAE,OAAO,MAAM,EAChD;IAEd,MAAM,WAAW,UAAU,MAAM,iCADb,YAAY,OAAO,KAAK,MAAM,EAAE,GAAG,CAAC,KAAK,KAAK,CACY;AAC9E,aAAS,SAAS;AAClB,cAAU,SAAS;AACnB;;AAIF,OAAI,CAAC,UAAU,SAAS;AAEtB,oBAAgB,UAAU;KAAE;KAAM;KAAO;KAAO;AAChD,UAAM;AACN;;AAGF,OAAI,CAAC,SAAS;AAEZ,oBAAgB,UAAU;KAAE;KAAM;KAAO;KAAO;AAChD;;AAGF,iBAAc,KAAK;AACnB,cAAW;AAGX,aAAU,QAAQ,YAAY;IAC5B,MAAM;IACN,SAAS;KAAE;KAAM;KAAO;KAAO;IAChC,CAAC;KAEJ;GAAC;GAAc;GAAc,YAAY;GAAQ;GAAM;GAAS;GAAS;GAAQ,CAClF;EAkCC,MA/BW,kBAAkB;AAC7B,OAAI,cAAc,SAAS;AACzB,kBAAc,QAAQ,MAAM;AAC5B,kBAAc,QAAQ,YAAY;AAClC,kBAAc,UAAU;;AAE1B,iBAAc,MAAM;KACnB,EAAE,CAAC;EAyBJ;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA,UA9Be,aACd,YAAoB;AAEnB,OADkB,YAAY,OAAO,MAAM,MAAM,EAAE,OAAO,QAAQ,CAEhE,iBAAgB,QAAQ;OAExB,SAAQ,KACN,UAAU,QAAQ,kBAAkB,QAAQ,eAAe,YAAY,OAAO,KAAK,MAAM,EAAE,GAAG,CAAC,KAAK,KAAK,GAC1G;KAGL,CAAC,YAAY,QAAQ,QAAQ,CAC9B;EAmBC;EACA,UAjBe,aAAa,UAAkB;AAC9C,mBAAgB,KAAK,IAAI,IAAK,KAAK,IAAI,GAAK,MAAM,CAAC,CAAC;KACnD,EAAE,CAAC;EAgBJ,cAAc;EACd,YAAY,YAAY;EACzB;;;;;;;;;;;;;;;;AAqBH,eAAsB,UACpB,OACA,aAAqB,MACkC;CACvD,MAAM,eAAe,IAAI,cAAc;AAGvC,KAAI,aAAa,UAAU,YACzB,OAAM,aAAa,QAAQ;CAG7B,MAAM,cAAc,aAAa,aAAa,GAAG,MAAM,QAAQ,WAAW;CAC1E,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,aAAY,cAAc,aAAa,EAAE;CAEzC,MAAM,aAAa,aAAa,oBAAoB;AACpD,YAAW,SAAS;AACpB,YAAW,QAAQ,aAAa,YAAY;CAE5C,MAAM,UAAU,IAAI,SAAe,YAAY;AAC7C,aAAW,gBAAgB;AACzB,gBAAa,OAAO;AACpB,YAAS;;GAEX;AAEF,YAAW,OAAO;AAElB,QAAO;EACL,YAAY;AACV,cAAW,MAAM;AACjB,gBAAa,OAAO;;EAEtB;EACD;;;;;;;;;;;;;;;;;;;AAoBH,SAAgB,kBAAkB,aAAqB,MAIrD;CACA,IAAIC,eAAoC;CACxC,IAAI,gBAAgB;CACpB,IAAI,WAAW;CAEf,MAAM,gBAAgB,YAAY;AAChC,MAAI,CAAC,aACH,gBAAe,IAAI,cAAc;AAEnC,MAAI,aAAa,UAAU,YACzB,OAAM,aAAa,QAAQ;AAE7B,SAAO;;AAGT,QAAO;EACL,OAAO,OAAO,UAAwB;GACpC,MAAM,MAAM,MAAM,eAAe;AACjC,cAAW;GAEX,MAAM,SAAS,IAAI,aAAa,GAAG,MAAM,QAAQ,WAAW;GAC5D,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,UAAO,cAAc,aAAa,EAAE;GAEpC,MAAM,SAAS,IAAI,oBAAoB;AACvC,UAAO,SAAS;AAChB,UAAO,QAAQ,IAAI,YAAY;GAG/B,MAAM,YAAY,KAAK,IAAI,IAAI,aAAa,cAAc;AAC1D,UAAO,MAAM,UAAU;AACvB,mBAAgB,YAAY,OAAO;AAEnC,UAAO,gBAAgB;AACrB,QAAI,IAAI,eAAe,gBAAgB,GACrC,YAAW;;;EAKjB,YAAY;AACV,cAAW;AACX,mBAAgB;AAChB,OAAI,cAAc;AAChB,iBAAa,OAAO;AACpB,mBAAe;;;EAInB,iBAAiB;EAClB;;AAUH,MAAM,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAiDxB,SAAS,kBAA0B;CACjC,MAAM,OAAO,IAAI,KAAK,CAAC,gBAAgB,EAAE,EAAE,MAAM,0BAA0B,CAAC;CAC5E,MAAM,MAAM,IAAI,gBAAgB,KAAK;CACrC,MAAM,SAAS,IAAI,OAAO,KAAK,EAAE,MAAM,UAAU,CAAC;AAClD,KAAI,gBAAgB,IAAI;AACxB,QAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAiHT,SAAgB,cAAc,UAAgC,EAAE,EAAuB;CACrF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,qEAAqE;CAGvF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,mBACR,WAAW,OACX,SACA,cACA,SACA,YACA,YAAY,OACZ,gBAAgB,MAChB,YACE;CAEJ,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAA6B,KAAK;CAChF,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,aAAa,kBAAkB,SAAkB,MAAM;CAC9D,MAAM,CAAC,gBAAgB,qBAAqB,SAAkB,MAAM;CACpE,MAAM,CAAC,YAAY,iBAAiB,SAAiB,GAAG;CACxD,MAAM,CAAC,gBAAgB,qBAAqB,SAAiB,GAAG;CAChE,MAAM,CAAC,YAAY,iBAAiB,SAAiB,EAAE;CACvD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,YAAY,OAAsB,KAAK;CAC7C,MAAM,mBAAmB,OAA6B,KAAK;CAC3D,MAAM,iBAAiB,OAAe,EAAE,CAAC;CACzC,MAAM,YAAY,OAA2B,KAAK;CAClD,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,uBAAuB,OAA8C,KAAK;CAChF,MAAM,mBAAmB,OAAe,EAAE,CAAC;CAC3C,MAAM,oBAAoB,OAAe,GAAG;CAC5C,MAAM,uBAAuB,OAAwC,KAAK;CAC1E,MAAM,sBAAsB,OAAsC,KAAK;CAGvE,MAAMC,qBAAmB,YAA4B;AASnD,SAR8C;GAC5C,gBAAgB;GAChB,mBAAmB;GACnB,gBAAgB;GAChB,mBAAmB;GACnB,iBAAiB;GACjB,oBAAoB;GACrB,CACoB,YAAY;;AAInC,iBAAgB;AACd,MAAI,CAAC,cAAc,QAAS;AAE5B,aAAW,UAAU;AAErB,eAAa,KAAK;AAClB,qBAAmB;GAAE,QAAQ;GAAW,SAAS;GAAwB,CAAC;AAC1E,eAAa;GAAE,QAAQ;GAAW,SAAS;GAAwB,CAAC;EAGpE,MAAM,SAAS,iBAAiB;AAChC,YAAU,UAAU;AAGpB,SAAO,aAAa,MAAoB;AACtC,OAAI,CAAC,WAAW,QAAS;GAEzB,MAAM,EAAE,MAAM,YAAY,EAAE;AAE5B,OAAI,SAAS,YAAY;IACvB,MAAMC,WAAwB;KAC5B,QAAQ,QAAQ,aAAa,SAAY,gBAAgB;KACzD,SAAS,QAAQ;KACjB,UAAU,QAAQ;KAClB,MAAM,QAAQ;KACf;AACD,uBAAmB,SAAS;AAC5B,iBAAa,SAAS;;AAGxB,OAAI,SAAS,SAAS;AACpB,eAAW,KAAK;AAChB,iBAAa,MAAM;AACnB,uBAAmB,EAAE,QAAQ,SAAS,CAAC;AACvC,iBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,eAAW;;AAGb,OAAI,SAAS,cAAc;IACzB,MAAM,OAAO;AACb,sBAAkB,MAAM;AACxB,QAAI,qBAAqB,SAAS;AAChC,0BAAqB,QAAQ,KAAK;AAClC,0BAAqB,UAAU;AAC/B,yBAAoB,UAAU;;;AAIlC,OAAI,SAAS,SAAS;IACpB,MAAM,SAAS;AACf,aAAS,OAAO;AAChB,iBAAa,MAAM;AACnB,sBAAkB,MAAM;AACxB,uBAAmB;KAAE,QAAQ;KAAS,SAAS;KAAQ,CAAC;AACxD,iBAAa;KAAE,QAAQ;KAAS,SAAS;KAAQ,CAAC;AAClD,cAAU,OAAO;AACjB,QAAI,oBAAoB,SAAS;AAC/B,yBAAoB,QAAQ,IAAI,MAAM,OAAO,CAAC;AAC9C,0BAAqB,UAAU;AAC/B,yBAAoB,UAAU;;;;AAKpC,SAAO,WAAW,QAAQ;AACxB,OAAI,CAAC,WAAW,QAAS;GACzB,MAAM,SAAS,IAAI,WAAW;AAC9B,YAAS,OAAO;AAChB,gBAAa,MAAM;AACnB,sBAAmB;IAAE,QAAQ;IAAS,SAAS;IAAQ,CAAC;AACxD,gBAAa;IAAE,QAAQ;IAAS,SAAS;IAAQ,CAAC;AAClD,aAAU,OAAO;;AAInB,SAAO,YAAY;GACjB,MAAM;GACN,SAAS,EAAE,OAAOD,kBAAgB,MAAM,EAAE;GAC3C,CAAC;AAEF,eAAa;AACX,cAAW,UAAU;AACrB,UAAO,WAAW;AAClB,aAAU,UAAU;;IAErB;EAAC;EAAY;EAAS;EAAO;EAAS;EAAS;EAAW,CAAC;AAG9D,iBAAgB;AACd,aAAW,UAAU;AACrB,eAAa;AACX,cAAW,UAAU;AACrB,OAAI,UAAU,SAAS;AACrB,cAAU,QAAQ,WAAW;AAC7B,cAAU,UAAU;;AAEtB,OAAI,UAAU,QACZ,MAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;;IAIjB,EAAE,CAAC;CAGN,MAAM,OAAO,kBAAkB;AAC7B,MAAI,CAAC,cAAc,CAAC,WAAW,CAAC,UAC9B,eAAc,KAAK;IAEpB;EAAC;EAAY;EAAS;EAAU,CAAC;CAGpC,MAAM,gBAAgB,YAAY,OAAO,SAAsC;EAC7E,MAAM,eAAe,IAAI,aAAa,EAAE,YAAY,MAAO,CAAC;EAC5D,MAAM,cAAc,MAAM,KAAK,aAAa;EAC5C,MAAM,cAAc,MAAM,aAAa,gBAAgB,YAAY;EAGnE,MAAM,cAAc,YAAY,eAAe,EAAE;AAGjD,MAAI,YAAY,eAAe,MAAO;GACpC,MAAM,QAAQ,OAAQ,YAAY;GAClC,MAAM,YAAY,KAAK,MAAM,YAAY,SAAS,MAAM;GACxD,MAAM,YAAY,IAAI,aAAa,UAAU;AAC7C,QAAK,IAAI,IAAI,GAAG,IAAI,WAAW,KAAK;IAClC,MAAM,WAAW,IAAI;IACrB,MAAM,QAAQ,KAAK,MAAM,SAAS;IAClC,MAAM,OAAO,KAAK,IAAI,QAAQ,GAAG,YAAY,SAAS,EAAE;IACxD,MAAM,IAAI,WAAW;AACrB,cAAU,KAAK,YAAY,UAAU,IAAI,KAAK,YAAY,QAAQ;;AAEpE,gBAAa,OAAO;AACpB,UAAO;;AAGT,eAAa,OAAO;AACpB,SAAO,IAAI,aAAa,YAAY;IACnC,EAAE,CAAC;CAGN,MAAM,aAAa,YACjB,OAAO,UAAyC;AAC9C,MAAI,CAAC,UAAU,SAAS;AACtB,OAAI,CAAC,YAAY;AACf,kBAAc,KAAK;AACnB,UAAM,IAAI,MAAM,uDAAuD;;AAEzE,SAAM,IAAI,MAAM,uBAAuB;;AAGzC,MAAI,CAAC,QACH,OAAM,IAAI,MAAM,0BAA0B;AAG5C,oBAAkB,KAAK;AAEvB,SAAO,IAAI,SAAS,SAAS,WAAW;AACtC,wBAAqB,WAAW,SAAiB;IAE/C,IAAI,WAAW,KAAK,MAAM;AAC1B,QACE,aAAa,mBACb,aAAa,mBACb,aAAa,gBAEb,YAAW;AAEb,kBAAc,SAAS;AACvB,mBAAe,SAAS;AACxB,YAAQ,SAAS;;AAEnB,uBAAoB,UAAU;GAG9B,MAAM,aAAa,IAAI,aAAa,MAAM;AAC1C,aAAU,QAAS,YAAY;IAAE,MAAM;IAAc,SAAS,EAAE,OAAO,YAAY;IAAE,EAAE,CACrF,WAAW,OACZ,CAAC;IACF;IAEJ;EAAC;EAAY;EAAS;EAAa,CACpC;CAGD,MAAM,sBAAsB,OAAe,EAAE;CAI7C,MAAM,kBAAkB,YACtB,OAAO,aAAsC;AAC3C,MAAI,CAAC,UAAU,WAAW,CAAC,WAAW,eAAe,QAAQ,WAAW,EAAG,QAAO;AAElF,MAAI;GAGF,MAAM,YAAY,MAAM,cADN,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC,CAC1B;GAGhD,MAAM,kBAAkB,oBAAoB;GAC5C,MAAM,eAAe,UAAU;AAG/B,OAAI,eAAe,kBAAkB,IAAM,QAAO;GAGlD,MAAM,WAAW,UAAU,MAAM,gBAAgB;AAGjD,uBAAoB,UAAU;GAG9B,MAAM,OAAO,MAAM,WAAW,SAAS;AAEvC,OAAI,QAAQ,WAAW,SAAS;AAC9B,sBAAkB,KAAK;AACvB,cAAU,MAAM,SAAS;;AAG3B,UAAO;UACD;AACN,UAAO;;IAGX;EAAC;EAAe;EAAS;EAAY;EAAQ,CAC9C;AAmRD,QAAO;EACL,gBAjRqB,YAAY,YAAY;AAC7C,OAAI,YAAa;AAEjB,OAAI;AAEF,QAAI,aAAa,CAAC,SAAS;AACzB,SAAI,CAAC,WACH,eAAc,KAAK;AAGrB,WAAM,IAAI,SAAe,SAAS,WAAW;MAC3C,MAAM,aAAa,kBAAkB;AACnC,WAAI,WAAW,UAAU,SAAS;AAChC,sBAAc,WAAW;AACzB,iBAAS;;SAEV,IAAI;AACP,uBAAiB;AACf,qBAAc,WAAW;AACzB,8BAAO,IAAI,MAAM,gCAAgC,CAAC;SACjD,IAAM;OACT;;IAIJ,MAAM,SAAS,MAAM,UAAU,aAAa,aAAa,EACvD,OAAO;KACL,YAAY;KACZ,cAAc;KACd,kBAAkB;KAClB,kBAAkB;KACnB,EACF,CAAC;AAEF,cAAU,UAAU;AACpB,mBAAe,UAAU,EAAE;AAC3B,qBAAiB,UAAU,EAAE;AAC7B,sBAAkB,UAAU;AAC5B,wBAAoB,UAAU;AAC9B,kBAAc,GAAG;AACjB,sBAAkB,GAAG;AACrB,kBAAc,EAAE;IAEhB,MAAM,gBAAgB,IAAI,cAAc,OAAO;AAC/C,qBAAiB,UAAU;AAE3B,kBAAc,mBAAmB,UAAU;AACzC,SAAI,MAAM,KAAK,OAAO,GAAG;AACvB,qBAAe,QAAQ,KAAK,MAAM,KAAK;AACvC,UAAI,UACF,kBAAiB,QAAQ,KAAK,MAAM,KAAK;;;AAK/C,kBAAc,MAAM,IAAI;AACxB,mBAAe,KAAK;AACpB,aAAS,KAAK;AAGd,QAAI,aAAa,WAAW,UAAU,SAAS;KAC7C,IAAI,WAAW;KACf,IAAI,iBAAiB;KAIrB,MAAM,mBAAmB,YAAY;AACnC,UAAI,CAAC,kBAAkB,CAAC,WAAW,QACjC;AAMF,UAHmB,iBAAiB,QAAQ,SAG3B,GAAG;AAElB,wBAAiB,UAAU,EAAE;AAE7B,WAAI;AACF,0BAAkB,KAAK;QACvB,MAAM,YAAY,MAAM,gBAAgB,SAAS;AAEjD,YAAI,aAAa,WAAW,SAAS;AACnC;AACA,uBAAc,SAAS;AAGvB,wBAAe,SAAS;UACtB,MAAM,gBAAgB,QAAQ,OAAO,MAAM,MAAM;AACjD,4BAAkB,UAAU;AAC5B,yBAAe,cAAc;AAC7B,iBAAO;WACP;;gBAEG,GAAG;AACV,gBAAQ,MAAM,8CAA8C,EAAE;iBACtD;AACR,YAAI,WAAW,QACb,mBAAkB,MAAM;;;AAM9B,UAAI,kBAAkB,WAAW,QAC/B,sBAAqB,UAAU,WAAW,kBAAkB,cAAc;;AAK9E,0BAAqB,UAAU,WAAW,kBAAkB,cAAc;AAG1E,KAAC,qBAA6B,cAAc;AAC1C,uBAAiB;;;YAGdE,GAAQ;IACf,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,cAAU,OAAO;;KAElB;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EA6IA,eA1IoB,YAAY,YAA6B;AAE7D,OAAK,qBAA6B,MAChC,CAAC,qBAA6B,OAAO;AAEvC,OAAI,qBAAqB,SAAS;AAChC,iBAAa,qBAAqB,QAAQ;AAC1C,yBAAqB,UAAU;;AAGjC,UAAO,IAAI,SAAS,SAAS,WAAW;AACtC,QAAI,CAAC,iBAAiB,WAAW,CAAC,aAAa;AAC7C,4BAAO,IAAI,MAAM,gBAAgB,CAAC;AAClC;;IAGF,MAAM,gBAAgB,iBAAiB;AAEvC,kBAAc,SAAS,YAAY;AAEjC,SAAI,UAAU,SAAS;AACrB,WAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,gBAAU,UAAU;;AAGtB,oBAAe,MAAM;AAGrB,SAAI,WAAW;AAEb,UAAI,eAAe,QAAQ,SAAS,KAAK,oBAAoB,UAAU,GAAG;AACxE,yBAAkB,KAAK;AACvB,wBAAiB,UAAU,EAAE;AAE7B,WAAI;QACF,MAAM,iBAAiB,MAAM,gBAAgB,WAAW;AACxD,YAAI,kBAAkB,WAAW,QAC/B,gBAAe,SAAS;SACtB,MAAM,gBAAgB,QAAQ,OAAO,MAAM,MAAM;AACjD,2BAAkB,UAAU;AAC5B,gBAAO;UACP;iBAEI;AACR,YAAI,WAAW,QACb,mBAAkB,MAAM;;;MAK9B,MAAM,YAAY,kBAAkB;AACpC,qBAAe,UAAU;AACzB,cAAQ,UAAU;AAClB;;KAIF,MAAM,YAAY,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC;AAE1E,SAAI;AAEF,UAAI,CAAC,WAAW,CAAC,UAAU,SAAS;AAClC,WAAI,CAAC,WACH,eAAc,KAAK;AAGrB,aAAM,IAAI,SAAe,KAAK,QAAQ;QACpC,MAAM,aAAa,kBAAkB;AACnC,aAAI,WAAW,UAAU,SAAS;AAChC,wBAAc,WAAW;AACzB,eAAK;;WAEN,IAAI;AACP,yBAAiB;AACf,uBAAc,WAAW;AACzB,6BAAI,IAAI,MAAM,gCAAgC,CAAC;WAC9C,IAAM;SACT;;AAQJ,cADa,MAAM,WAHD,MAAM,cAAc,UAAU,CAGR,CAC3B;cACNA,GAAQ;MACf,MAAM,SAAS,EAAE,WAAW;AAC5B,eAAS,OAAO;AAChB,gBAAU,OAAO;AACjB,aAAO,EAAE;;;AAIb,kBAAc,MAAM;KACpB;KACD;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EA+BA,iBA5BsB,kBAAkB;AAExC,OAAK,qBAA6B,MAChC,CAAC,qBAA6B,OAAO;AAEvC,OAAI,qBAAqB,SAAS;AAChC,iBAAa,qBAAqB,QAAQ;AAC1C,yBAAqB,UAAU;;AAGjC,OAAI,iBAAiB,WAAW,YAC9B,kBAAiB,QAAQ,MAAM;AAEjC,OAAI,UAAU,SAAS;AACrB,SAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,cAAU,UAAU;;AAEtB,kBAAe,UAAU,EAAE;AAC3B,oBAAiB,UAAU,EAAE;AAC7B,uBAAoB,UAAU;AAC9B,kBAAe,MAAM;KACpB,CAAC,YAAY,CAAC;EAMf;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2HH,SAAgB,aAAa,UAA+B,EAAE,EAAsB;CAClF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,oEAAoE;CAGtF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAQrD,MAAM,aAAa,QAAQ,YAAY;CACvC,MAAM,YAAY,WAAW;CAE7B,MAAM,EACJ,WAAW,cACX,WAAW,mBACX,SAAS,+EACT,WAAW,OACX,QAAQ,UAAU,cAClB,QAAQ,GACR,WAAW,OACX,aACA,kBACA,YACE;CAEJ,MAAM,CAAC,UAAU,eAAe,SAA6B,EAAE,CAAC;CAChE,MAAM,CAAC,OAAO,YAAY,SAExB,OAAO;CACT,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,gBAAgB,qBAAqB,SAAiB,GAAG;CAChE,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAG/D,MAAM,eAAe,OAAY,KAAK;CACtC,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,mBAAmB,OAA6B,KAAK;CAC3D,MAAM,iBAAiB,OAAe,EAAE,CAAC;CACzC,MAAM,YAAY,OAA2B,KAAK;CAClD,MAAM,kBAAkB,OAA4B,KAAK;CACzD,MAAM,gBAAgB,OAAqC,KAAK;CAChE,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,eAAe,OAAgB,MAAM;CAG3C,MAAM,cAAc,UAAU;CAC9B,MAAM,eAAe,UAAU,kBAAkB,UAAU;CAC3D,MAAM,aAAa,UAAU;CAG7B,MAAMF,qBAAmB,YAA4B;AASnD,SAR8C;GAC5C,gBAAgB;GAChB,mBAAmB;GACnB,gBAAgB;GAChB,mBAAmB;GACnB,iBAAiB;GACjB,oBAAoB;GACrB,CACoB,YAAY;;AAInC,iBAAgB;AACd,MAAI,CAAC,cAAc,QAAS;EAE5B,IAAI,YAAY;EAEhB,MAAM,aAAa,YAAY;AAC7B,OAAI;AACF,iBAAa,KAAK;AAClB,aAAS,KAAK;AAGd,sBAAkB,0CAA0C;IAC5D,MAAM,YAAY,iBAAiB;AACnC,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,eAAU,WAAW;AACrB;;AAIF,UAAM,IAAI,SAAe,SAAS,WAAW;AAC3C,eAAU,aAAa,MAAoB;MACzC,MAAM,EAAE,MAAM,YAAY,EAAE;AAC5B,UAAI,SAAS,QAAS,UAAS;AAC/B,UAAI,SAAS,QAAS,QAAO,IAAI,MAAM,QAAQ,CAAC;AAChD,UAAI,SAAS,cAAc,WAAW,QACpC,mBAAkB,QAAQ,UAAU,iBAAiB;;AAGzD,eAAU,WAAW,MAAM,OAAO,IAAI,MAAM,EAAE,QAAQ,CAAC;AACvD,eAAU,YAAY;MACpB,MAAM;MACN,SAAS,EAAE,OAAOA,kBAAgB,SAAS,EAAE;MAC9C,CAAC;MACF;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,eAAU,WAAW;AACrB;;AAEF,WAAO,UAAU;AAGjB,sBAAkB,4BAA4B;IAC9C,MAAM,SAAS,MAAM,mBAAmB;KACtC,SAAS;KACT,aAAa,MAAM;AACjB,UAAI,CAAC,WAAW,QAAS;AACzB,wBAAkB,EAAE,WAAW,iBAAiB;;KAEnD,CAAC;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,YAAO,WAAW;AAClB;;AAEF,iBAAa,UAAU;AAIvB,sBAAkB,2BADG,eAAe,mBACwB,eAAe,SAAS,MAAM;IAE1F,MAAM,YAAY,iBAAiB;AACnC,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,eAAU,WAAW;AACrB;;IAGF,MAAMG,cAAY,WAAW;AAE7B,UAAM,IAAI,SAAe,SAAS,WAAW;AAC3C,eAAU,aAAa,MAAoB;MACzC,MAAM,EAAE,MAAM,YAAY,EAAE;AAC5B,UAAI,SAAS,QAAS,UAAS;AAC/B,UAAI,SAAS,QAAS,QAAO,IAAI,MAAM,QAAQ,CAAC;AAChD,UAAI,SAAS,cAAc,WAAW,QACpC,mBAAkB,QAAQ,UAAU,iBAAiB;;AAGzD,eAAU,WAAW,MAAM,OAAO,IAAI,MAAM,EAAE,QAAQ,CAAC;AACvD,eAAU,YAAY;MACpB,MAAM;MACN,SAAS;OACP,SAAS;OACT,MAAMA,YAAU;OAChB,QAAQA,YAAU;OACnB;MACF,CAAC;MACF;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,eAAU,WAAW;AACrB;;AAEF,WAAO,UAAU;AAEjB,eAAW,KAAK;AAChB,iBAAa,MAAM;AACnB,sBAAkB,SAAS;YACpBD,GAAQ;AACf,QAAI,CAAC,WAAW,QAAS;IACzB,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,iBAAa,MAAM;AACnB,cAAU,OAAO;;;AAIrB,cAAY;AAEZ,eAAa;AACX,eAAY;;IAEb;EAAC;EAAY;EAAS;EAAU;EAAU;EAAY;EAAQ,CAAC;AAGlE,iBAAgB;AACd,aAAW,UAAU;AACrB,eAAa;AACX,cAAW,UAAU;AACrB,gBAAa,SAAS,WAAW;AACjC,UAAO,SAAS,WAAW;AAC3B,UAAO,SAAS,WAAW;AAC3B,OAAI,UAAU,QACZ,MAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAGhB,mBAAgB,SAAS,OAAO;;IAEjC,EAAE,CAAC;CAGN,MAAM,OAAO,kBAAkB;AAC7B,MAAI,CAAC,cAAc,CAAC,WAAW,CAAC,UAC9B,eAAc,KAAK;IAEpB;EAAC;EAAY;EAAS;EAAU,CAAC;CAGpC,MAAM,gBAAgB,YAAY,OAAO,SAAsC;EAC7E,MAAM,eAAe,IAAI,aAAa,EAAE,YAAY,MAAO,CAAC;EAC5D,MAAM,cAAc,MAAM,KAAK,aAAa;EAC5C,MAAM,cAAc,MAAM,aAAa,gBAAgB,YAAY;EACnE,MAAM,cAAc,YAAY,eAAe,EAAE;AAEjD,MAAI,YAAY,eAAe,MAAO;GACpC,MAAM,QAAQ,OAAQ,YAAY;GAClC,MAAM,YAAY,KAAK,MAAM,YAAY,SAAS,MAAM;GACxD,MAAM,YAAY,IAAI,aAAa,UAAU;AAC7C,QAAK,IAAI,IAAI,GAAG,IAAI,WAAW,KAAK;IAClC,MAAM,WAAW,IAAI;IACrB,MAAM,QAAQ,KAAK,MAAM,SAAS;IAClC,MAAM,OAAO,KAAK,IAAI,QAAQ,GAAG,YAAY,SAAS,EAAE;IACxD,MAAM,IAAI,WAAW;AACrB,cAAU,KAAK,YAAY,UAAU,IAAI,KAAK,YAAY,QAAQ;;AAEpE,gBAAa,OAAO;AACpB,UAAO;;AAGT,eAAa,OAAO;AACpB,SAAO,IAAI,aAAa,YAAY;IACnC,EAAE,CAAC;CAGN,MAAM,kBAAkB,YACtB,OAAO,OAAqB,eAAsC;AAChE,SAAO,IAAI,SAAS,YAAY;AAC9B,OAAI,CAAC,gBAAgB,QACnB,iBAAgB,UAAU,IAAI,cAAc;GAE9C,MAAM,MAAM,gBAAgB;GAE5B,MAAM,SAAS,IAAI,aAAa,GAAG,MAAM,QAAQ,WAAW;GAC5D,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,UAAO,cAAc,aAAa,EAAE;GAEpC,MAAM,SAAS,IAAI,oBAAoB;AACvC,UAAO,SAAS;AAChB,UAAO,QAAQ,IAAI,YAAY;AAC/B,UAAO,gBAAgB;AACrB,QAAI,WAAW,QACb,UAAS;;AAGb,UAAO,OAAO;AACd,iBAAc,UAAU;IACxB;IAEJ,EAAE,CACH;AAwPD,QAAO;EACL;EACA,gBAvPqB,YAAY,YAAY;AAC7C,OAAI,UAAU,OAAQ;AAGtB,OAAI,CAAC,WAAW,CAAC,WAAW;AAC1B,kBAAc,KAAK;AACnB;;AAGF,gBAAa,UAAU;AAEvB,OAAI;IACF,MAAM,SAAS,MAAM,UAAU,aAAa,aAAa,EACvD,OAAO;KAAE,YAAY;KAAO,cAAc;KAAG,kBAAkB;KAAM,EACtE,CAAC;AAEF,cAAU,UAAU;AACpB,mBAAe,UAAU,EAAE;IAE3B,MAAM,gBAAgB,IAAI,cAAc,OAAO;AAC/C,qBAAiB,UAAU;AAE3B,kBAAc,mBAAmB,UAAU;AACzC,SAAI,MAAM,KAAK,OAAO,EACpB,gBAAe,QAAQ,KAAK,MAAM,KAAK;;AAI3C,kBAAc,MAAM,IAAI;AACxB,aAAS,YAAY;AACrB,aAAS,KAAK;YACPA,GAAQ;IACf,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,cAAU,OAAO;;KAElB;GAAC;GAAO;GAAS;GAAW;GAAQ,CAAC;EAoNtC,eAjNoB,YAAY,YAAY;AAC5C,OAAI,UAAU,YAAa;GAE3B,MAAM,gBAAgB,iBAAiB;AACvC,OAAI,CAAC,cAAe;AAEpB,UAAO,IAAI,SAAe,YAAY;AACpC,kBAAc,SAAS,YAAY;AAEjC,SAAI,UAAU,SAAS;AACrB,WAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,gBAAU,UAAU;;AAGtB,SAAI,aAAa,SAAS;AACxB,eAAS,OAAO;AAChB,eAAS;AACT;;KAGF,MAAM,YAAY,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC;AAE1E,SAAI;AAEF,eAAS,eAAe;MACxB,MAAM,YAAY,MAAM,cAAc,UAAU;MAGhD,IAAI,WAAW,MAAM,IAAI,SAAiB,YAAY,cAAc;OAClE,MAAM,WAAW,MAAoB;QACnC,MAAM,EAAE,MAAM,YAAY,EAAE;AAC5B,YAAI,SAAS,cAAc;AACzB,gBAAO,SAAS,oBAAoB,WAAW,QAAQ;AACvD,oBAAW,QAAQ;;AAErB,YAAI,SAAS,SAAS;AACpB,gBAAO,SAAS,oBAAoB,WAAW,QAAQ;AACvD,mBAAU,IAAI,MAAM,QAAQ,CAAC;;;AAGjC,cAAO,SAAS,iBAAiB,WAAW,QAAQ;OACpD,MAAM,aAAa,IAAI,aAAa,UAAU;AAC9C,cAAO,SAAS,YAAY;QAAE,MAAM;QAAc,SAAS,EAAE,OAAO,YAAY;QAAE,EAAE,CAClF,WAAW,OACZ,CAAC;QACF;AAEF,iBAAW,SAAS,MAAM;AAG1B,UACE,aAAa,mBACb,aAAa,mBACb,aAAa,gBAEb,YAAW;AAGb,UAAI,aAAa,WAAW,CAAC,UAAU;AACrC,gBAAS,OAAO;AAChB,gBAAS;AACT;;MAIF,MAAM,YAAY,QAAQ,KAAK,KAAK;AACpC,mBAAa,MAAM,CAAC,GAAG,GAAG;OAAE,IAAI;OAAW,MAAM;OAAQ,SAAS;OAAU,CAAC,CAAC;AAC9E,oBAAc,SAAS;AAGvB,eAAS,WAAW;MAGpB,MAAM,UAAU,SAAS,KAAK,OAAO;OACnC,MAAM,EAAE;OACR,SAAS,EAAE;OACZ,EAAE;AACH,cAAQ,KAAK;OAAE,MAAM;OAAQ,SAAS;OAAU,CAAC;MAEjD,IAAI,eAAe;MACnB,IAAI,eAAe;AAEnB,YAAM,aAAa,QAAQ,SAAS,UAAU;OAC5C;OACA;OACA;OACA,UAAU,UAAuB;AAC/B,YAAI,aAAa,QAAS;AAC1B,YAAI,MAAM,UAAU,WAClB,iBAAgB,MAAM;YAEtB,iBAAgB,MAAM;;OAG3B,CAAC;AAEF,UAAI,aAAa,SAAS;AACxB,gBAAS,OAAO;AAChB,gBAAS;AACT;;MAIF,MAAM,iBAAiB,aAAa,KAAK,KAAK;AAC9C,mBAAa,MAAM,CACjB,GAAG,GACH;OACE,IAAI;OACJ,MAAM;OACN,SAAS;OACT,UAAU,gBAAgB;OAC3B,CACF,CAAC;AACF,yBAAmB,aAAa;AAGhC,UAAI,aAAa,MAAM,EAAE;AACvB,gBAAS,WAAW;OAGpB,MAAM,YAAY,MAAM,IAAI,SACzB,YAAY,cAAc;QACzB,MAAM,WAAW,MAAoB;SACnC,MAAM,EAAE,MAAM,YAAY,EAAE;AAC5B,aAAI,SAAS,SAAS;AACpB,iBAAO,SAAS,oBAAoB,WAAW,QAAQ;AACvD,qBAAW;WAAE,OAAO,QAAQ;WAAO,YAAY,QAAQ;WAAY,CAAC;;AAEtE,aAAI,SAAS,SAAS;AACpB,iBAAO,SAAS,oBAAoB,WAAW,QAAQ;AACvD,oBAAU,IAAI,MAAM,QAAQ,CAAC;;;AAGjC,eAAO,SAAS,iBAAiB,WAAW,QAAQ;AACpD,eAAO,SAAS,YAAY;SAC1B,MAAM;SACN,SAAS;UAAE,MAAM;UAAc;UAAO;UAAO;SAC9C,CAAC;SAEL;AAED,WAAI,CAAC,aAAa,QAChB,OAAM,gBAAgB,UAAU,OAAO,UAAU,WAAW;;AAIhE,eAAS,OAAO;AAChB,eAAS;cACFA,GAAQ;AACf,UAAI,CAAC,WAAW,QAAS;MACzB,MAAM,SAAS,EAAE,WAAW;AAC5B,eAAS,OAAO;AAChB,eAAS,OAAO;AAChB,gBAAU,OAAO;AACjB,eAAS;;;AAIb,kBAAc,MAAM;KACpB;KACD;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EAoCA,QAjCa,kBAAkB;AAC/B,gBAAa,UAAU;AAEvB,OAAI,iBAAiB,WAAW,UAAU,YACxC,kBAAiB,QAAQ,MAAM;AAGjC,OAAI,UAAU,SAAS;AACrB,SAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,cAAU,UAAU;;AAGtB,OAAI,cAAc,QAChB,KAAI;AACF,kBAAc,QAAQ,MAAM;WACtB;AAGV,kBAAe,UAAU,EAAE;AAC3B,YAAS,OAAO;KACf,CAAC,MAAM,CAAC;EAYT,OATY,kBAAkB;AAC9B,eAAY,EAAE,CAAC;KACd,EAAE,CAAC;EAQJ;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;AAMH,MAAM,wBAAwB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuH9B,SAAS,wBAAgC;CACvC,MAAM,OAAO,IAAI,KAAK,CAAC,sBAAsB,EAAE,EAAE,MAAM,0BAA0B,CAAC;CAClF,MAAM,MAAM,IAAI,gBAAgB,KAAK;CACrC,MAAM,SAAS,IAAI,OAAO,KAAK,EAAE,MAAM,UAAU,CAAC;AAClD,KAAI,gBAAgB,IAAI;AACxB,QAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqFT,SAAgB,aAAa,UAA+B,EAAE,EAAsB;CAClF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MACR,iFACD;CAGH,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CACrD,MAAM,EACJ,QAAQ,2BACR,YAAY,MACZ,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,WAAW,gBAAgB,SAAS,MAAM;CACjD,MAAM,CAAC,SAAS,cAAc,SAAS,MAAM;CAC7C,MAAM,CAAC,OAAO,YAAY,SAAS,KAAsB;CACzD,MAAM,CAAC,iBAAiB,sBAAsB,SAC5C,KACD;CAED,MAAM,YAAY,OAAO,KAAsB;CAC/C,MAAM,mBAAmB,OAAO,MAAM;CAEtC,MAAM,kBAAkB,OAAO,KAA6B;CAC5D,MAAM,kBAAkB,OAAO,KAA4B;CAG3D,MAAM,mBAAmB,aAAa,GAAa,MAAwB;AACzE,MAAI,EAAE,WAAW,EAAE,OACjB,OAAM,IAAI,MAAM,iCAAiC,EAAE,OAAO,MAAM,EAAE,SAAS;EAG7E,IAAI,aAAa;EACjB,IAAI,QAAQ;EACZ,IAAI,QAAQ;AAEZ,OAAK,IAAI,IAAI,GAAG,IAAI,EAAE,QAAQ,KAAK;AACjC,iBAAc,EAAE,KAAK,EAAE;AACvB,YAAS,EAAE,KAAK,EAAE;AAClB,YAAS,EAAE,KAAK,EAAE;;EAGpB,MAAM,YAAY,KAAK,KAAK,MAAM,GAAG,KAAK,KAAK,MAAM;AACrD,MAAI,cAAc,EAAG,QAAO;AAE5B,SAAO,aAAa;IACnB,EAAE,CAAC;CAGN,MAAM,OAAO,kBAAkB;AAE7B,MAAI,WAAW,UAAU,QACvB,QAAO,QAAQ,SAAS;AAI1B,MAAI,iBAAiB,WAAW,gBAAgB,QAC9C,QAAO,gBAAgB;AAGzB,mBAAiB,UAAU;AAC3B,eAAa,KAAK;AAClB,qBAAmB;GAAE,QAAQ;GAAW,SAAS;GAA8B,CAAC;AAGhF,kBAAgB,UAAU,IAAI,SAAe,YAAY;AACvD,mBAAgB,UAAU;IAC1B;EAEF,MAAM,SAAS,uBAAuB;AACtC,YAAU,UAAU;AAIpB,SAAO,iBAAiB,YAAY,MAAoB;GACtD,MAAM,EAAE,MAAM,YAAY,EAAE;AAC5B,WAAQ,IAAI,sCAAsC,MAAM,QAAQ;AAEhE,OAAI,SAAS,YACX;QAAI,QAAQ,WAAW,cAAc,QAAQ,KAC3C,oBAAmB;KACjB,QAAQ;KACR,SAAS,eAAe,QAAQ;KAChC,UAAU,KAAK,MAAO,QAAQ,SAAS,QAAQ,QAAS,IAAI;KAC7D,CAAC;cAEK,SAAS,SAAS;AAC3B,YAAQ,IAAI,iDAAiD;AAC7D,iBAAa,MAAM;AACnB,eAAW,KAAK;AAChB,uBAAmB,EAAE,QAAQ,SAAS,CAAC;AAEvC,oBAAgB,WAAW;AAC3B,YAAQ,IAAI,oDAAoD;AAChE,eAAW;cACF,SAAS,SAAS;AAC3B,YAAQ,MAAM,mCAAmC,QAAQ;AACzD,iBAAa,MAAM;AACnB,aAAS,QAAQ;AACjB,cAAU,QAAQ;;IAEpB;AAEF,SAAO,WAAW,QAAQ;AACxB,WAAQ,MAAM,gCAAgC,IAAI;AAClD,gBAAa,MAAM;GACnB,MAAM,SAAS,IAAI,WAAW;AAC9B,YAAS,OAAO;AAChB,sBAAmB;IAAE,QAAQ;IAAS,SAAS;IAAQ,CAAC;AACxD,aAAU,OAAO;;AAGnB,UAAQ,IAAI,iDAAiD;AAC7D,SAAO,YAAY;GAAE,MAAM;GAAQ,SAAS,EAAE,OAAO;GAAE,CAAC;AAExD,SAAO,gBAAgB;IACtB;EAAC;EAAO;EAAS;EAAS;EAAQ,CAAC;AAGtC,iBAAgB;AACd,MAAI,SACF,OAAM;AAGR,eAAa;AACX,OAAI,UAAU,SAAS;AACrB,cAAU,QAAQ,WAAW;AAC7B,cAAU,UAAU;;;IAGvB,CAAC,UAAU,KAAK,CAAC;CAGpB,MAAM,QAAQ,YACZ,OAAO,SAAoC;AACzC,UAAQ,IAAI,qCAAqC,MAAM,YAAY,GAAG,GAAG,CAAC;EAG1E,MAAM,cAAc,MAAM;AAG1B,UAAQ,IAAI,4CAA4C;AACxD,MAAI,gBAAgB,QAClB,OAAM,gBAAgB;MAEtB,OAAM;AAER,UAAQ,IAAI,gEAAgE;AAE5E,SAAO,IAAI,SAAS,SAAS,WAAW;GACtC,MAAM,SAAS,UAAU;AACzB,OAAI,CAAC,QAAQ;AACX,YAAQ,MAAM,oCAAoC;AAClD,2BAAO,IAAI,MAAM,6CAA6C,CAAC;AAC/D;;GAIF,MAAM,UAAU,iBAAiB;AAC/B,YAAQ,MAAM,gCAAgC;AAC9C,WAAO,oBAAoB,WAAW,QAAQ;AAC9C,2BAAO,IAAI,MAAM,8BAA8B,CAAC;MAC/C,IAAM;GAET,MAAM,WAAW,MAAoB;AACnC,YAAQ,IAAI,wCAAwC,EAAE,KAAK,KAAK;AAChE,QAAI,EAAE,KAAK,SAAS,aAAa;AAC/B,aAAQ,IAAI,qCAAqC;AACjD,kBAAa,QAAQ;AACrB,YAAO,oBAAoB,WAAW,QAAQ;AAC9C,aAAQ,EAAE,KAAK,QAAQ,OAAO;eACrB,EAAE,KAAK,SAAS,SAAS;AAClC,aAAQ,MAAM,2BAA2B,EAAE,KAAK,QAAQ;AACxD,kBAAa,QAAQ;AACrB,YAAO,oBAAoB,WAAW,QAAQ;AAC9C,YAAO,IAAI,MAAM,EAAE,KAAK,QAAQ,CAAC;;;AAIrC,UAAO,iBAAiB,WAAW,QAAQ;AAC3C,WAAQ,IAAI,kDAAkD;AAC9D,UAAO,YAAY;IAAE,MAAM;IAAS,SAAS;KAAE;KAAM;KAAW;IAAE,CAAC;AACnE,WAAQ,IAAI,uDAAuD;IACnE;IAEJ,CAAC,MAAM,UAAU,CAClB;CAGD,MAAM,aAAa,YACjB,OAAO,UAAmD;EAExD,MAAM,cAAc,MAAM;AAG1B,MAAI,gBAAgB,QAClB,OAAM,gBAAgB;MAEtB,OAAM;AAGR,SAAO,IAAI,SAAS,SAAS,WAAW;GACtC,MAAM,SAAS,UAAU;AACzB,OAAI,CAAC,QAAQ;AACX,2BAAO,IAAI,MAAM,6CAA6C,CAAC;AAC/D;;GAIF,MAAM,UAAU,iBAAiB;AAC/B,WAAO,oBAAoB,WAAW,QAAQ;AAC9C,2BAAO,IAAI,MAAM,oCAAoC,CAAC;MACrD,IAAM;GAET,MAAM,WAAW,MAAoB;AACnC,QAAI,EAAE,KAAK,SAAS,cAAc;AAChC,kBAAa,QAAQ;AACrB,YAAO,oBAAoB,WAAW,QAAQ;AAC9C,aAAQ,EAAE,KAAK,QAAQ;eACd,EAAE,KAAK,SAAS,SAAS;AAClC,kBAAa,QAAQ;AACrB,YAAO,oBAAoB,WAAW,QAAQ;AAC9C,YAAO,IAAI,MAAM,EAAE,KAAK,QAAQ,CAAC;;;AAIrC,UAAO,iBAAiB,WAAW,QAAQ;AAC3C,UAAO,YAAY;IAAE,MAAM;IAAc,SAAS;KAAE;KAAO;KAAW;IAAE,CAAC;IACzE;IAEJ,CAAC,MAAM,UAAU,CAClB;AAqDD,QAAO;EACL;EACA;EACA,YArDiB,YACjB,OAAO,OAAe,UAAmC;GACvD,MAAM,CAAC,MAAM,QAAQ,MAAM,QAAQ,IAAI,CAAC,MAAM,MAAM,EAAE,MAAM,MAAM,CAAC,CAAC;AACpE,UAAO,iBAAiB,MAAM,KAAK;KAErC,CAAC,OAAO,iBAAiB,CAC1B;EAgDC,QA7Ca,YACb,OAAO,OAAe,QAAkB,SAAkD;GACxF,MAAM,CAAC,UAAU,cAAc,MAAM,QAAQ,IAAI,CAAC,MAAM,MAAM,EAAE,WAAW,OAAO,CAAC,CAAC;GAEpF,MAAME,UAAiC,WAAW,KAC/C,KAAyB,WAAmB;IAC3C,MAAM,IAAI;IACV,OAAO,iBAAiB,UAAU,IAAI,OAAO;IAC7C;IACD,EACF;AAED,WAAQ,MAAM,GAAG,MAAM,EAAE,QAAQ,EAAE,MAAM;AACzC,UAAO,OAAO,QAAQ,MAAM,GAAG,KAAK,GAAG;KAEzC;GAAC;GAAO;GAAY;GAAiB,CACtC;EA8BC,aA3BkB,YAClB,OACE,WACA,YACA,SACmC;GAGnC,MAAMA,WAFgB,MAAM,WAAW,WAAW,EAEG,KAClD,KAAyB,WAAmB;IAC3C,MAAM,IAAI;IACV,OAAO,iBAAiB,WAAW,IAAI,OAAO;IAC9C;IACD,EACF;AAED,WAAQ,MAAM,GAAG,MAAM,EAAE,QAAQ,EAAE,MAAM;AACzC,UAAO,OAAO,QAAQ,MAAM,GAAG,KAAK,GAAG;KAEzC,CAAC,YAAY,iBAAiB,CAC/B;EAQC;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;AAsCH,eAAsB,iBACpB,SACA,UAA0B,EAAE,EACb;CACf,MAAM,EAAE,eAAe;CAGvB,MAAM,SAAS,MAAM,mBAAmB;EACtC;EACA,aAAa,MAAM;AACjB,OAAI,EAAE,WAAW,cACf,cAAa;IACX,QAAQ;IACR,MAAM,EAAE;IACR,UAAU,EAAE;IACb,CAAC;OAEF,cAAa;IAAE,QAAQ;IAAW,SAAS,EAAE;IAAQ,CAAC;;EAG3D,CAAC;AAEF,cAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,QAAO,WAAW;;;;;;;;;;AAWpB,eAAsB,sBACpB,UAAU,2BACV,UAA0B,EAAE,EACb;CACf,MAAM,EAAE,eAAe;AAEvB,QAAO,IAAI,SAAS,SAAS,WAAW;EACtC,MAAM,SAAS,uBAAuB;AAEtC,SAAO,aAAa,MAAoB;GACtC,MAAM,EAAE,MAAM,YAAY,EAAE;AAE5B,OAAI,SAAS,YACX;QAAI,QAAQ,WAAW,cAAc,QAAQ,KAC3C,cAAa;KACX,QAAQ;KACR,MAAM,QAAQ;KACd,UAAU,KAAK,MAAO,QAAQ,SAAS,QAAQ,QAAS,IAAI;KAC7D,CAAC;cAEK,SAAS,SAAS;AAC3B,iBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,WAAO,WAAW;AAClB,aAAS;cACA,SAAS,SAAS;AAC3B,iBAAa;KAAE,QAAQ;KAAS,SAAS;KAAS,CAAC;AACnD,WAAO,WAAW;AAClB,WAAO,IAAI,MAAM,QAAQ,CAAC;;;AAI9B,eAAa;GAAE,QAAQ;GAAW,SAAS,WAAW,QAAQ;GAAM,CAAC;AACrE,SAAO,YAAY;GAAE,MAAM;GAAQ,SAAS,EAAE,OAAO,SAAS;GAAE,CAAC;GACjE;;;;;;;;;;AAWJ,eAAsB,gBACpB,UAAsB,cACtB,UAA0B,EAAE,EACb;CACf,MAAM,EAAE,eAAe;CACvB,MAAM,cAAc,WAAW;AAE/B,KAAI,CAAC,YACH,OAAM,IAAI,MAAM,sBAAsB,UAAU;AAGlD,QAAO,IAAI,SAAS,SAAS,WAAW;EACtC,MAAM,SAAS,iBAAiB;AAEhC,SAAO,aAAa,MAAoB;GACtC,MAAM,EAAE,MAAM,YAAY,EAAE;AAE5B,OAAI,SAAS,YACX;QAAI,QAAQ,WAAW,cAAc,QAAQ,KAC3C,cAAa;KACX,QAAQ;KACR,MAAM,QAAQ;KACd,UAAU,KAAK,MAAO,QAAQ,SAAS,QAAQ,QAAS,IAAI;KAC7D,CAAC;cAEK,SAAS,SAAS;AAC3B,iBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,WAAO,WAAW;AAClB,aAAS;cACA,SAAS,SAAS;AAC3B,iBAAa;KAAE,QAAQ;KAAS,SAAS;KAAS,CAAC;AACnD,WAAO,WAAW;AAClB,WAAO,IAAI,MAAM,QAAQ,CAAC;;;AAI9B,eAAa;GAAE,QAAQ;GAAW,SAAS,WAAW,QAAQ;GAAM,CAAC;AACrE,SAAO,YAAY;GACjB,MAAM;GACN,SAAS;IACP;IACA,MAAM,YAAY;IAClB,QAAQ,YAAY;IACrB;GACF,CAAC;GACF;;;;;;;;;;AAWJ,eAAsB,gBACpB,UAAU,mBACV,UAA0B,EAAE,EACb;CACf,MAAM,EAAE,eAAe;CACvB,MAAM,WAAW,gBAAgB,QAAQ;AAEzC,QAAO,IAAI,SAAS,SAAS,WAAW;EACtC,MAAM,SAAS,iBAAiB;AAEhC,SAAO,aAAa,MAAoB;GACtC,MAAM,EAAE,MAAM,YAAY,EAAE;AAE5B,OAAI,SAAS,YACX;QAAI,QAAQ,WAAW,cAAc,QAAQ,KAC3C,cAAa;KACX,QAAQ;KACR,MAAM,QAAQ;KACd,UAAU,KAAK,MAAO,QAAQ,SAAS,QAAQ,QAAS,IAAI;KAC7D,CAAC;cAEK,SAAS,SAAS;AAC3B,iBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,WAAO,WAAW;AAClB,aAAS;cACA,SAAS,SAAS;AAC3B,iBAAa;KAAE,QAAQ;KAAS,SAAS;KAAS,CAAC;AACnD,WAAO,WAAW;AAClB,WAAO,IAAI,MAAM,QAAQ,CAAC;;;AAI9B,eAAa;GAAE,QAAQ;GAAW,SAAS,WAAW,QAAQ;GAAM,CAAC;AACrE,SAAO,YAAY;GAAE,MAAM;GAAQ,SAAS,EAAE,OAAO,UAAU;GAAE,CAAC;GAClE;;;AAIJ,SAAS,gBAAgB,SAAyB;AAUhD,QAT8C;EAC5C,gBAAgB;EAChB,mBAAmB;EACnB,gBAAgB;EAChB,mBAAmB;EACnB,iBAAiB;EACjB,oBAAoB;EACpB,0BAA0B;EAC3B,CACoB,YAAY;;;;;AAUnC,SAAgB,oBAA6B;AAC3C,KAAI,OAAO,cAAc,YACvB,QAAO;AAET,QAAO,SAAS;;;;;AAMlB,eAAsB,gBAIZ;AACR,KAAI,CAAC,mBAAmB,CACtB,QAAO,EAAE,WAAW,OAAO;AAG7B,KAAI;EACF,MAAM,UAAU,MAAO,UAAkB,IAAI,gBAAgB;AAC7D,MAAI,CAAC,QACH,QAAO,EAAE,WAAW,OAAO;EAG7B,MAAM,OAAO,MAAM,QAAQ,oBAAoB;AAC/C,SAAO;GACL,WAAW;GACX,SAAS,KAAK;GACd,QAAQ,KAAK;GACd;SACK;AACN,SAAO,EAAE,WAAW,OAAO;;;AAI/B,sBAAe;CACb;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACD"}
1
+ {"version":3,"file":"index.js","names":["BUILTIN_MODELS: Record<string, ModelConfig>","currentResolve: ((text: string) => void) | null","currentReject: ((error: Error) => void) | null","gerbilWorker: GerbilWorker","options","userMessage: Message","assistantMessage: Message","KOKORO_BROWSER_VOICES: BrowserVoiceInfo[]","SUPERTONIC_BROWSER_VOICES: BrowserVoiceInfo[]","TTS_MODELS: Record<\n TTSModelId,\n { repo: string; defaultVoice: string; sampleRate: number; voices: BrowserVoiceInfo[] }\n>","audioContext: AudioContext | null","resolveSTTModel","progress: STTProgress","e: any","ttsConfig","results: BrowserSearchResult[]"],"sources":["../../src/core/models.ts","../../src/browser/index.ts"],"sourcesContent":["/**\n * Model Registry\n *\n * Supports built-in models and any HuggingFace model via hf:org/model syntax\n */\n\nimport type { ModelConfig, ModelSource } from \"./types.js\";\n\n// ============================================\n// Built-in Models (curated & tested)\n// ============================================\n\nexport const BUILTIN_MODELS: Record<string, ModelConfig> = {\n \"qwen3-0.6b\": {\n id: \"qwen3-0.6b\",\n repo: \"onnx-community/Qwen3-0.6B-ONNX\",\n description: \"Qwen3 0.6B - Best balance of speed and quality, supports thinking\",\n size: \"~400MB\",\n contextLength: 32_768,\n supportsThinking: true,\n supportsJson: true,\n family: \"qwen\",\n },\n \"qwen2.5-0.5b\": {\n id: \"qwen2.5-0.5b\",\n repo: \"onnx-community/Qwen2.5-0.5B-Instruct\",\n description: \"Qwen2.5 0.5B - Fast and capable\",\n size: \"~350MB\",\n contextLength: 32_768,\n supportsThinking: false,\n supportsJson: true,\n family: \"qwen\",\n },\n \"qwen2.5-coder-0.5b\": {\n id: \"qwen2.5-coder-0.5b\",\n repo: \"onnx-community/Qwen2.5-Coder-0.5B-Instruct\",\n description: \"Qwen2.5 Coder 0.5B - Optimized for code\",\n size: \"~400MB\",\n contextLength: 32_768,\n supportsThinking: false,\n supportsJson: true,\n family: \"qwen\",\n },\n \"smollm2-360m\": {\n id: \"smollm2-360m\",\n repo: \"HuggingFaceTB/SmolLM2-360M-Instruct\",\n description: \"SmolLM2 360M - Fast, good for simple tasks\",\n size: \"~250MB\",\n contextLength: 8192,\n supportsThinking: false,\n supportsJson: false,\n family: \"smollm\",\n },\n \"smollm2-135m\": {\n id: \"smollm2-135m\",\n repo: \"HuggingFaceTB/SmolLM2-135M-Instruct\",\n description: \"SmolLM2 135M - Fastest, basic generation\",\n size: \"~100MB\",\n contextLength: 8192,\n supportsThinking: false,\n supportsJson: false,\n family: \"smollm\",\n },\n \"phi-3-mini\": {\n id: \"phi-3-mini\",\n repo: \"microsoft/Phi-3-mini-4k-instruct-onnx\",\n description: \"Phi-3 Mini - High quality, larger model\",\n size: \"~2.1GB\",\n contextLength: 4096,\n supportsThinking: false,\n supportsJson: true,\n family: \"phi\",\n },\n \"ministral-3b\": {\n id: \"ministral-3b\",\n repo: \"mistralai/Ministral-3-3B-Instruct-2512-ONNX\",\n description: \"Ministral 3 3B - Vision + Reasoning, 256k context\",\n size: \"~2.5GB\",\n contextLength: 262_144,\n supportsThinking: true,\n supportsJson: true,\n supportsVision: true,\n visionEncoderSize: \"0.4B\",\n family: \"mistral\",\n },\n};\n\n// ============================================\n// Model Resolution\n// ============================================\n\n/**\n * Parse model identifier and resolve to source\n *\n * Supported formats:\n * - \"qwen3-0.6b\" (built-in)\n * - \"hf:org/model\" (HuggingFace shorthand)\n * - \"https://huggingface.co/org/model\" (full URL)\n * - \"file:./path/to/model\" (local path)\n */\nexport function resolveModel(modelId: string): ModelSource {\n // Built-in model\n if (BUILTIN_MODELS[modelId]) {\n return {\n type: \"builtin\",\n path: BUILTIN_MODELS[modelId].repo,\n };\n }\n\n // HuggingFace shorthand: hf:org/model\n if (modelId.startsWith(\"hf:\")) {\n const repo = modelId.slice(3);\n return {\n type: \"huggingface\",\n path: repo,\n };\n }\n\n // HuggingFace URL\n if (modelId.startsWith(\"https://huggingface.co/\")) {\n const repo = modelId.replace(\"https://huggingface.co/\", \"\");\n return {\n type: \"huggingface\",\n path: repo,\n };\n }\n\n // Local file\n if (modelId.startsWith(\"file:\")) {\n const path = modelId.slice(5);\n return {\n type: \"local\",\n path,\n };\n }\n\n // Assume it's a HuggingFace repo if it contains a slash\n if (modelId.includes(\"/\")) {\n return {\n type: \"huggingface\",\n path: modelId,\n };\n }\n\n // Unknown - treat as HuggingFace\n return {\n type: \"huggingface\",\n path: modelId,\n };\n}\n\n/**\n * Get model config (built-in only)\n */\nexport function getModelConfig(modelId: string): ModelConfig | null {\n return BUILTIN_MODELS[modelId] || null;\n}\n\n// Default context lengths by model family (when config.json is unavailable)\nconst FAMILY_CONTEXT_DEFAULTS: Record<string, number> = {\n qwen: 32_768,\n mistral: 262_144, // Ministral models support up to 256K\n llama: 8192,\n phi: 4096,\n smollm: 8192,\n other: 4096,\n};\n\n/**\n * Create model config for external model\n */\nexport function createExternalModelConfig(\n modelId: string,\n repo: string,\n contextLength?: number,\n): ModelConfig {\n // Try to infer family from repo name\n let family: ModelConfig[\"family\"] = \"other\";\n const repoLower = repo.toLowerCase();\n\n if (repoLower.includes(\"qwen\")) {\n family = \"qwen\";\n } else if (repoLower.includes(\"smollm\")) {\n family = \"smollm\";\n } else if (repoLower.includes(\"phi\")) {\n family = \"phi\";\n } else if (repoLower.includes(\"mistral\") || repoLower.includes(\"ministral\")) {\n family = \"mistral\";\n } else if (repoLower.includes(\"llama\")) {\n family = \"llama\";\n }\n\n // Detect vision models from common patterns\n const supportsVision =\n repoLower.includes(\"vision\") ||\n repoLower.includes(\"vlm\") ||\n repoLower.includes(\"image-text\") ||\n repoLower.includes(\"ministral\");\n\n return {\n id: modelId,\n repo,\n description: `External model: ${repo}`,\n size: \"Unknown\",\n contextLength: contextLength || FAMILY_CONTEXT_DEFAULTS[family] || 4096,\n supportsThinking: family === \"qwen\" || family === \"mistral\",\n supportsJson: family === \"qwen\" || family === \"phi\" || family === \"mistral\",\n supportsVision,\n family,\n };\n}\n\n/**\n * Fetch context length from HuggingFace model config\n */\nexport async function fetchModelContextLength(repo: string): Promise<number | null> {\n try {\n const res = await fetch(`https://huggingface.co/${repo}/raw/main/config.json`);\n if (!res.ok) {\n return null;\n }\n\n const config = await res.json();\n\n // Different models use different field names\n return (\n config.max_position_embeddings ||\n config.n_positions ||\n config.max_seq_len ||\n config.sliding_window || // Some models use this\n config.context_length ||\n null\n );\n } catch {\n return null;\n }\n}\n\n/**\n * List all built-in models\n */\nexport function listBuiltinModels(): ModelConfig[] {\n return Object.values(BUILTIN_MODELS);\n}\n\n/**\n * Search HuggingFace models (placeholder - would need HF API)\n */\nexport async function searchModels(query: string): Promise<ModelConfig[]> {\n // TODO: Implement HuggingFace API search\n // For now, filter built-in models\n const q = query.toLowerCase();\n return listBuiltinModels().filter(\n (m) =>\n m.id.toLowerCase().includes(q) ||\n m.description.toLowerCase().includes(q) ||\n m.family.toLowerCase().includes(q),\n );\n}\n","/**\n * Gerbil Browser Support\n *\n * Run LLMs directly in the browser with WebGPU acceleration.\n *\n * @example useChat (React)\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <form onSubmit={handleSubmit}>\n * {messages.map(m => <div key={m.id}>{m.role}: {m.content}</div>)}\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * </form>\n * );\n * }\n * ```\n *\n * @example useCompletion (React)\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading } = useCompletion();\n * if (isLoading) return <div>Loading...</div>;\n * return <button onClick={() => complete(\"Write a haiku\")}>{completion}</button>;\n * }\n * ```\n *\n * @example Low-level API\n * ```ts\n * import { createGerbilWorker } from \"@tryhamster/gerbil/browser\";\n *\n * const gerbil = await createGerbilWorker({\n * modelId: \"qwen3-0.6b\",\n * onToken: (token) => console.log(token.text),\n * });\n * await gerbil.generate(\"Hello!\");\n * gerbil.terminate();\n * ```\n */\n\nimport { resolveModel } from \"../core/models.js\";\n\n// Re-export models and types (browser-safe, no Node.js dependencies)\nexport { BUILTIN_MODELS } from \"../core/models.js\";\nexport type * from \"../core/types.js\";\n\n// NOTE: We intentionally do NOT export Gerbil from core here.\n// The core Gerbil class has Node.js code paths (chrome-backend/puppeteer)\n// that break browser bundlers. Use createGerbilWorker() instead for browser.\n\n// ============================================\n// Types\n// ============================================\n\nexport type WorkerProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n error?: string;\n};\n\nexport type WorkerToken = {\n status: \"token\";\n text: string;\n state: \"thinking\" | \"answering\";\n numTokens: number;\n tps: number;\n};\n\nexport type WorkerComplete = {\n status: \"complete\";\n text: string;\n numTokens: number;\n totalTime: number;\n tps: number;\n};\n\nexport type GerbilWorkerOptions = {\n /** Model ID to load (default: \"qwen3-0.6b\") */\n modelId?: string;\n /** Called during model loading with progress updates */\n onProgress?: (progress: WorkerProgress) => void;\n /** Called for each token during streaming generation */\n onToken?: (token: WorkerToken) => void;\n /** Called when generation is complete */\n onComplete?: (result: WorkerComplete) => void;\n /** Called on errors */\n onError?: (error: string) => void;\n /** Worker script URL (auto-detected if not provided) */\n workerUrl?: string;\n};\n\nexport type GenerateStreamOptions = {\n /** Maximum tokens to generate */\n maxTokens?: number;\n /** Temperature for sampling (0 = deterministic) */\n temperature?: number;\n /** Top-p nucleus sampling */\n topP?: number;\n /** Top-k sampling */\n topK?: number;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** System prompt */\n system?: string;\n /** Image URLs or data URIs (for vision models) */\n images?: string[];\n /** Conversation history for multi-turn (includes all previous messages) */\n history?: Array<{ role: \"user\" | \"assistant\" | \"system\"; content: string }>;\n};\n\nexport type GerbilWorker = {\n /** Generate text with streaming */\n generate: (prompt: string, options?: GenerateStreamOptions) => Promise<string>;\n /** Interrupt current generation */\n interrupt: () => void;\n /** Reset conversation cache */\n reset: () => void;\n /** Terminate the worker */\n terminate: () => void;\n /** Check if model is loaded */\n isReady: () => boolean;\n};\n\n// ============================================\n// Web Worker Factory\n// ============================================\n\n/**\n * Create a Gerbil worker for streaming WebGPU inference\n *\n * Uses a Web Worker to keep the UI responsive during model loading\n * and text generation, with real-time token streaming.\n */\nexport async function createGerbilWorker(options: GerbilWorkerOptions = {}): Promise<GerbilWorker> {\n const { modelId = \"qwen3-0.6b\", onProgress, onToken, onComplete, onError } = options;\n\n // Resolve model to HuggingFace path\n const source = resolveModel(modelId);\n\n return new Promise((resolve, reject) => {\n // Create inline worker from the worker code\n const workerCode = `\n import {\n AutoTokenizer,\n AutoModelForCausalLM,\n AutoProcessor,\n AutoModelForImageTextToText,\n RawImage,\n TextStreamer,\n InterruptableStoppingCriteria,\n env,\n } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1\";\n\n // Enable IndexedDB caching for browser (prevents re-downloading models)\n env.useBrowserCache = true;\n env.allowLocalModels = false;\n\n class ModelPipeline {\n static tokenizer = null;\n static model = null;\n static processor = null;\n static visionModel = null;\n static modelId = \"\";\n static isVision = false;\n\n static async getInstance(modelId, options = {}, progressCallback) {\n if (this.modelId !== modelId) {\n this.tokenizer = null;\n this.model = null;\n this.processor = null;\n this.visionModel = null;\n }\n this.modelId = modelId;\n \n // Detect vision models\n this.isVision = options.vision || \n modelId.toLowerCase().includes(\"ministral\") ||\n modelId.toLowerCase().includes(\"vision\") ||\n modelId.toLowerCase().includes(\"vlm\");\n\n const dtype = options.dtype || \"q4f16\";\n const device = options.device || \"webgpu\";\n\n if (this.isVision) {\n // Load vision model components\n // Note: Don't specify dtype for vision models - let transformers.js pick defaults\n if (!this.processor) {\n this.processor = await AutoProcessor.from_pretrained(modelId, {\n progress_callback: progressCallback,\n });\n }\n if (!this.visionModel) {\n this.visionModel = await AutoModelForImageTextToText.from_pretrained(modelId, {\n device,\n progress_callback: progressCallback,\n });\n }\n return { \n processor: this.processor, \n model: this.visionModel, \n tokenizer: this.processor.tokenizer,\n isVision: true \n };\n } else {\n // Load text-only model components\n if (!this.tokenizer) {\n this.tokenizer = await AutoTokenizer.from_pretrained(modelId, {\n progress_callback: progressCallback,\n });\n }\n if (!this.model) {\n this.model = await AutoModelForCausalLM.from_pretrained(modelId, {\n dtype,\n device,\n progress_callback: progressCallback,\n });\n }\n return { \n tokenizer: this.tokenizer, \n model: this.model, \n isVision: false \n };\n }\n }\n }\n\n const stoppingCriteria = new InterruptableStoppingCriteria();\n let pastKeyValuesCache = null;\n\n async function load(data) {\n const { modelId, options = {} } = data;\n self.postMessage({ status: \"loading\", message: \"Loading model...\" });\n\n const downloadState = {\n downloading: new Set(),\n completed: new Set(),\n isDownloading: false,\n };\n\n try {\n const result = await ModelPipeline.getInstance(\n modelId,\n options,\n (progress) => {\n if (progress.status === \"progress\" && progress.file) {\n const pct = Math.round(progress.progress || 0);\n if (pct < 100) {\n downloadState.downloading.add(progress.file);\n downloadState.isDownloading = true;\n } else if (pct === 100) {\n downloadState.downloading.delete(progress.file);\n downloadState.completed.add(progress.file);\n }\n if (downloadState.isDownloading) {\n self.postMessage({\n status: \"downloading\",\n file: progress.file,\n progress: pct,\n downloadCount: downloadState.downloading.size,\n totalFiles: downloadState.completed.size + downloadState.downloading.size,\n });\n }\n }\n }\n );\n\n self.postMessage({ status: \"loading\", message: \"Compiling shaders...\" });\n \n // Warmup differs for vision vs text models\n if (result.isVision) {\n // Vision models need both text and vision warmup\n // Text warmup first\n const textWarmupInputs = result.tokenizer(\"hello\");\n await result.model.generate({ ...textWarmupInputs, max_new_tokens: 1 });\n \n // Vision warmup with synthetic image\n self.postMessage({ status: \"loading\", message: \"Warming up vision encoder...\" });\n try {\n // Create a tiny 8x8 test image using OffscreenCanvas\n const canvas = new OffscreenCanvas(8, 8);\n const ctx = canvas.getContext(\"2d\");\n ctx.fillStyle = \"red\";\n ctx.fillRect(0, 0, 8, 8);\n const blob = await canvas.convertToBlob({ type: \"image/png\" });\n const warmupImage = await RawImage.fromBlob(blob);\n \n // Process with vision pipeline\n const warmupContent = [{ type: \"image\" }, { type: \"text\", text: \"hi\" }];\n const warmupMessages = [{ role: \"user\", content: warmupContent }];\n const warmupPrompt = result.processor.apply_chat_template(warmupMessages, { add_generation_prompt: true });\n const warmupInputs = await result.processor(warmupImage, warmupPrompt, { add_special_tokens: false });\n \n // Run vision warmup generation\n await result.model.generate({\n ...warmupInputs,\n max_new_tokens: 1,\n });\n } catch (warmupErr) {\n console.warn(\"Vision warmup failed (non-fatal):\", warmupErr);\n }\n } else {\n const warmupInputs = result.tokenizer(\"a\");\n await result.model.generate({ ...warmupInputs, max_new_tokens: 1 });\n }\n\n self.postMessage({ status: \"ready\", isVision: result.isVision });\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n async function generate(data) {\n const { messages, images = [], options = {} } = data;\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 20, thinking = false } = options;\n\n try {\n const result = await ModelPipeline.getInstance(ModelPipeline.modelId, {});\n \n // Route to vision or text generation\n if (result.isVision && images.length > 0) {\n await generateVision(result, messages, images, options);\n } else {\n await generateText(result, messages, options);\n }\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n async function generateText(result, messages, options) {\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 20, thinking = false } = options;\n const { tokenizer, model } = result;\n\n const inputs = tokenizer.apply_chat_template(messages, {\n add_generation_prompt: true,\n return_dict: true,\n enable_thinking: thinking,\n });\n\n let state = \"answering\";\n const [START_THINKING_TOKEN_ID, END_THINKING_TOKEN_ID] = tokenizer.encode(\n \"<think></think>\",\n { add_special_tokens: false }\n );\n\n let startTime = null;\n let numTokens = 0;\n\n const tokenCallback = (tokens) => {\n startTime ??= performance.now();\n numTokens += 1;\n const tokenId = Number(tokens[0]);\n if (tokenId === START_THINKING_TOKEN_ID) state = \"thinking\";\n else if (tokenId === END_THINKING_TOKEN_ID) state = \"answering\";\n };\n\n const streamCallback = (text) => {\n const tps = startTime ? (numTokens / (performance.now() - startTime)) * 1000 : 0;\n self.postMessage({ status: \"token\", text, state, numTokens, tps });\n };\n\n const streamer = new TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: streamCallback,\n token_callback_function: tokenCallback,\n });\n\n self.postMessage({ status: \"start\" });\n\n const { past_key_values, sequences } = await model.generate({\n ...inputs,\n past_key_values: pastKeyValuesCache,\n do_sample: temperature > 0,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n max_new_tokens: maxTokens,\n streamer,\n stopping_criteria: stoppingCriteria,\n return_dict_in_generate: true,\n });\n\n pastKeyValuesCache = past_key_values;\n\n const endTime = performance.now();\n const totalTime = startTime ? endTime - startTime : 0;\n const decoded = tokenizer.batch_decode(sequences, { skip_special_tokens: true });\n\n self.postMessage({\n status: \"complete\",\n text: decoded[0] || \"\",\n numTokens,\n totalTime,\n tps: totalTime > 0 ? (numTokens / totalTime) * 1000 : 0,\n });\n }\n\n async function generateVision(result, messages, images, options) {\n const { maxTokens = 2048, temperature = 0.7, topP = 0.9, topK = 20 } = options;\n const { processor, model, tokenizer } = result;\n\n self.postMessage({ status: \"progress\", message: \"Preparing vision request...\" });\n\n // Build message content with image placeholders and text\n const lastMessage = messages[messages.length - 1];\n const content = [];\n for (const _ of images) {\n content.push({ type: \"image\" });\n }\n content.push({ type: \"text\", text: lastMessage.content });\n\n // For vision models, include a brief system instruction for concise responses\n // Note: Vision processors handle system differently than text models\n const visionMessages = [\n { role: \"system\", content: \"You are a helpful assistant. Be concise and direct in your responses.\" },\n { role: \"user\", content }\n ];\n\n // Apply chat template with generation prompt\n const chatPrompt = processor.apply_chat_template(visionMessages, {\n add_generation_prompt: true\n });\n\n // Load images (handle both string URLs and { source: string } objects)\n self.postMessage({ status: \"progress\", message: \"Loading images...\" });\n const loadedImages = await Promise.all(\n images.map(img => {\n const url = typeof img === \"string\" ? img : img.source;\n return RawImage.fromURL(url);\n })\n );\n self.postMessage({ status: \"progress\", message: \"Processing inputs...\" });\n\n // Process inputs\n const inputs = await processor(\n loadedImages.length === 1 ? loadedImages[0] : loadedImages,\n chatPrompt,\n { add_special_tokens: false }\n );\n self.postMessage({ status: \"progress\", message: \"Generating response...\" });\n\n let startTime = null;\n let numTokens = 0;\n\n const streamCallback = (text) => {\n startTime ??= performance.now();\n numTokens += 1;\n const tps = (numTokens / (performance.now() - startTime)) * 1000;\n self.postMessage({ status: \"token\", text, state: \"answering\", numTokens, tps });\n };\n\n const streamer = new TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: streamCallback,\n });\n\n self.postMessage({ status: \"start\" });\n\n const outputs = await model.generate({\n ...inputs,\n max_new_tokens: maxTokens,\n do_sample: temperature > 0,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n streamer,\n stopping_criteria: stoppingCriteria,\n });\n\n // Decode output (skip prompt)\n const inputLength = inputs.input_ids.dims?.at(-1) || 0;\n const decoded = processor.batch_decode(\n outputs.slice(null, [inputLength, null]),\n { skip_special_tokens: true }\n );\n\n const endTime = performance.now();\n const totalTime = startTime ? endTime - startTime : 0;\n\n self.postMessage({\n status: \"complete\",\n text: decoded[0] || \"\",\n numTokens,\n totalTime,\n tps: totalTime > 0 ? (numTokens / totalTime) * 1000 : 0,\n });\n }\n\n self.addEventListener(\"message\", async (e) => {\n const { type, ...data } = e.data;\n switch (type) {\n case \"load\": await load(data); break;\n case \"generate\": stoppingCriteria.reset(); await generate(data); break;\n case \"interrupt\": stoppingCriteria.interrupt(); break;\n case \"reset\": pastKeyValuesCache = null; stoppingCriteria.reset(); break;\n }\n });\n\n self.postMessage({ status: \"init\" });\n `;\n\n const blob = new Blob([workerCode], { type: \"application/javascript\" });\n const workerUrl = URL.createObjectURL(blob);\n const worker = new Worker(workerUrl, { type: \"module\" });\n\n let isReady = false;\n let currentResolve: ((text: string) => void) | null = null;\n let currentReject: ((error: Error) => void) | null = null;\n let _generatedText = \"\";\n\n worker.onmessage = (e) => {\n const msg = e.data;\n\n switch (msg.status) {\n case \"init\":\n // Worker initialized, load the model\n worker.postMessage({ type: \"load\", modelId: source.path });\n break;\n\n case \"loading\":\n case \"downloading\":\n onProgress?.(msg as WorkerProgress);\n break;\n\n case \"ready\":\n isReady = true;\n onProgress?.(msg as WorkerProgress);\n resolve(gerbilWorker);\n break;\n\n case \"start\":\n _generatedText = \"\";\n break;\n\n case \"token\":\n _generatedText += msg.text;\n onToken?.(msg as WorkerToken);\n break;\n\n case \"complete\":\n onComplete?.(msg as WorkerComplete);\n currentResolve?.(msg.text);\n currentResolve = null;\n currentReject = null;\n break;\n\n case \"error\":\n onError?.(msg.error);\n onProgress?.({ status: \"error\", error: msg.error });\n if (currentReject) {\n currentReject(new Error(msg.error));\n currentResolve = null;\n currentReject = null;\n } else {\n reject(new Error(msg.error));\n }\n break;\n }\n };\n\n worker.onerror = (e) => {\n const error = e.message || \"Worker error\";\n onError?.(error);\n reject(new Error(error));\n };\n\n const gerbilWorker: GerbilWorker = {\n generate: (prompt: string, options: GenerateStreamOptions = {}) =>\n new Promise((res, rej) => {\n currentResolve = res;\n currentReject = rej;\n\n const system = options.system || \"You are a helpful assistant.\";\n\n // Use history if provided (for multi-turn conversations)\n // Otherwise, just use system + current prompt\n const messages = options.history\n ? [{ role: \"system\", content: system }, ...options.history]\n : [\n { role: \"system\", content: system },\n { role: \"user\", content: prompt },\n ];\n\n // When using history, reset KV cache first to avoid position mismatches\n // (full history is provided, so we don't need cached context)\n if (options.history) {\n worker.postMessage({ type: \"reset\" });\n }\n\n worker.postMessage({\n type: \"generate\",\n messages,\n images: options.images || [],\n options: {\n maxTokens: options.maxTokens ?? (options.images?.length ? 2048 : 256),\n temperature: options.temperature ?? 0.7,\n topP: options.topP ?? 0.9,\n topK: options.topK ?? 20,\n thinking: options.thinking ?? false,\n },\n });\n }),\n\n interrupt: () => {\n worker.postMessage({ type: \"interrupt\" });\n },\n\n reset: () => {\n worker.postMessage({ type: \"reset\" });\n },\n\n terminate: () => {\n worker.terminate();\n URL.revokeObjectURL(workerUrl);\n },\n\n isReady: () => isReady,\n };\n });\n}\n\n// ============================================\n// React Hooks\n// ============================================\n\n/** Message in a chat conversation */\nexport type Message = {\n id: string;\n role: \"user\" | \"assistant\";\n content: string;\n thinking?: string;\n /** Attached images (URLs or data URIs) - for vision models */\n images?: string[];\n};\n\n/** Loading progress state */\nexport type LoadingProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n};\n\n/** Options for useChat hook */\nexport type UseChatOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens per response */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Initial messages */\n initialMessages?: Message[];\n /** Auto-load model on mount (default: false - loads on first generate or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Return type for useChat hook */\nexport type UseChatReturn = {\n /** Chat messages */\n messages: Message[];\n /** Current input value */\n input: string;\n /** Set input value */\n setInput: (value: string) => void;\n /** Submit current input */\n handleSubmit: (e?: { preventDefault?: () => void }) => void;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating a response */\n isGenerating: boolean;\n /** Current thinking content (streaming) */\n thinking: string;\n /** Stop generation */\n stop: () => void;\n /** Clear all messages */\n clear: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n /** Currently attached images (for next message) */\n attachedImages: string[];\n /** Attach an image to the next message */\n attachImage: (imageUrl: string) => void;\n /** Remove an attached image */\n removeImage: (index: number) => void;\n /** Clear all attached images */\n clearImages: () => void;\n /** Send message with specific images (convenience method) */\n sendWithImages: (text: string, images: string[]) => void;\n};\n\n/**\n * React hook for chat with local LLM\n *\n * @example\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading, isGenerating } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <div>\n * {messages.map(m => (\n * <div key={m.id}>{m.role}: {m.content}</div>\n * ))}\n * <form onSubmit={handleSubmit}>\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * <button disabled={isGenerating}>Send</button>\n * </form>\n * </div>\n * );\n * }\n * ```\n */\nexport function useChat(options: UseChatOptions = {}): UseChatReturn {\n // Lazy import React to avoid SSR issues\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useChat requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n initialMessages = [],\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [messages, setMessages] = useState<Message[]>(initialMessages);\n const [input, setInput] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [thinking, setThinking] = useState<string>(\"\");\n const [currentResponse, setCurrentResponse] = useState<string>(\"\");\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n const [attachedImages, setAttachedImages] = useState<string[]>([]);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const messageIdRef = useRef<number>(0);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on generate\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n // Initialize worker\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCurrentResponse((r: string) => r + token.text);\n }\n },\n onComplete: () => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n // Commit response to messages when generation completes\n useEffect(() => {\n if (!isGenerating && currentResponse) {\n setMessages((msgs: Message[]) => {\n const lastMsg = msgs.at(-1);\n if (lastMsg?.role === \"assistant\") {\n return msgs.map((m: Message, i: number) =>\n i === msgs.length - 1\n ? { ...m, content: currentResponse, thinking: thinking || undefined }\n : m,\n );\n }\n return msgs;\n });\n setCurrentResponse(\"\");\n setThinking(\"\");\n }\n }, [isGenerating, currentResponse, thinking]);\n\n // Store pending message for auto-load scenario\n const pendingMessageRef = useRef<string | null>(null);\n const pendingImagesRef = useRef<string[]>([]);\n\n // Image management functions\n const attachImage = useCallback((imageUrl: string) => {\n setAttachedImages((imgs: string[]) => [...imgs, imageUrl]);\n }, []);\n\n const removeImage = useCallback((index: number) => {\n setAttachedImages((imgs: string[]) => imgs.filter((_: string, i: number) => i !== index));\n }, []);\n\n const clearImages = useCallback(() => {\n setAttachedImages([]);\n }, []);\n\n // Internal function to send a message with specific images\n const sendMessageWithImages = useCallback(\n (text: string, images: string[]) => {\n if (!text.trim() || isGenerating) {\n return;\n }\n\n messageIdRef.current += 1;\n const userMessage: Message = {\n id: `msg-${messageIdRef.current}`,\n role: \"user\",\n content: text.trim(),\n images: images.length > 0 ? images : undefined,\n };\n\n messageIdRef.current += 1;\n const assistantMessage: Message = {\n id: `msg-${messageIdRef.current}`,\n role: \"assistant\",\n content: \"\",\n };\n\n setMessages((msgs: Message[]) => [...msgs, userMessage, assistantMessage]);\n setCurrentResponse(\"\");\n setThinking(\"\");\n\n // If worker not loaded, trigger load and queue the message\n if (!workerRef.current) {\n pendingMessageRef.current = text.trim();\n pendingImagesRef.current = images;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(text.trim(), {\n system,\n thinking: enableThinking,\n maxTokens: images.length > 0 ? Math.max(maxTokens, 2048) : maxTokens,\n temperature,\n images: images.length > 0 ? images : undefined,\n });\n },\n [isGenerating, system, enableThinking, maxTokens, temperature, load],\n );\n\n const handleSubmit = useCallback(\n (e?: { preventDefault?: () => void }) => {\n e?.preventDefault?.();\n\n if (!input.trim() || isGenerating) {\n return;\n }\n\n // Send with any attached images\n sendMessageWithImages(input, attachedImages);\n setInput(\"\");\n setAttachedImages([]);\n },\n [input, isGenerating, attachedImages, sendMessageWithImages],\n );\n\n // Convenience method to send with specific images\n const sendWithImages = useCallback(\n (text: string, images: string[]) => {\n sendMessageWithImages(text, images);\n },\n [sendMessageWithImages],\n );\n\n // Process pending message when worker becomes ready\n useEffect(() => {\n if (isReady && pendingMessageRef.current && workerRef.current) {\n const pendingContent = pendingMessageRef.current;\n const pendingImages = pendingImagesRef.current;\n pendingMessageRef.current = null;\n pendingImagesRef.current = [];\n setIsGenerating(true);\n workerRef.current.generate(pendingContent, {\n system,\n thinking: enableThinking,\n maxTokens: pendingImages.length > 0 ? Math.max(maxTokens, 2048) : maxTokens,\n temperature,\n images: pendingImages.length > 0 ? pendingImages : undefined,\n });\n }\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n const clear = useCallback(() => {\n workerRef.current?.reset();\n setMessages([]);\n setCurrentResponse(\"\");\n setThinking(\"\");\n setAttachedImages([]);\n }, []);\n\n // Update last message with streaming content\n const displayMessages = messages.map((m: Message, i: number) => {\n if (i === messages.length - 1 && m.role === \"assistant\" && isGenerating) {\n return { ...m, content: currentResponse, thinking: thinking || undefined };\n }\n return m;\n });\n\n return {\n messages: displayMessages,\n input,\n setInput,\n handleSubmit,\n isLoading,\n loadingProgress,\n isGenerating,\n thinking,\n stop,\n clear,\n tps,\n isReady,\n error,\n load,\n attachedImages,\n attachImage,\n removeImage,\n clearImages,\n sendWithImages,\n };\n}\n\n/** Options for useCompletion hook */\nexport type UseCompletionOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Auto-load model on mount (default: false - loads on first complete() or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Options for single completion call */\nexport type CompleteOptions = {\n /** Image URLs or data URIs to analyze (for vision models) */\n images?: string[];\n};\n\n/** Return type for useCompletion hook */\nexport type UseCompletionReturn = {\n /** Generated completion */\n completion: string;\n /** Thinking content (if enabled) */\n thinking: string;\n /** Generate completion (optionally with images for vision models) */\n complete: (prompt: string, options?: CompleteOptions) => Promise<string>;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating */\n isGenerating: boolean;\n /** Stop generation */\n stop: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n};\n\n/**\n * React hook for text completion with local LLM\n *\n * @example\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading, isGenerating } = useCompletion();\n *\n * if (isLoading) return <div>Loading...</div>;\n *\n * return (\n * <div>\n * <button onClick={() => complete(\"Write a haiku\")}>Generate</button>\n * <p>{completion}</p>\n * </div>\n * );\n * }\n * ```\n */\nexport function useCompletion(options: UseCompletionOptions = {}): UseCompletionReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useCompletion requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [completion, setCompletion] = useState<string>(\"\");\n const [thinking, setThinking] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const resolveRef = useRef<((text: string) => void) | null>(null);\n const rejectRef = useRef<((err: Error) => void) | null>(null);\n const pendingPromptRef = useRef<string | null>(null);\n const pendingImagesRef = useRef<string[] | undefined>(undefined);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on complete()\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCompletion((c: string) => c + token.text);\n }\n },\n onComplete: (result) => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n resolveRef.current?.(result.text);\n resolveRef.current = null;\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n const complete = useCallback(\n (prompt: string, completeOptions?: CompleteOptions): Promise<string> => {\n return new Promise((resolve, reject) => {\n setCompletion(\"\");\n setThinking(\"\");\n resolveRef.current = resolve;\n rejectRef.current = reject;\n\n // If worker not loaded, trigger load and queue the prompt\n if (!workerRef.current) {\n pendingPromptRef.current = prompt;\n pendingImagesRef.current = completeOptions?.images;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(prompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n images: completeOptions?.images,\n });\n });\n },\n [system, enableThinking, maxTokens, temperature, load],\n );\n\n // Process pending prompt when worker becomes ready\n useEffect(() => {\n if (isReady && pendingPromptRef.current && workerRef.current) {\n const pendingPrompt = pendingPromptRef.current;\n const pendingImages = pendingImagesRef.current;\n pendingPromptRef.current = null;\n pendingImagesRef.current = undefined;\n setIsGenerating(true);\n workerRef.current.generate(pendingPrompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n images: pendingImages,\n });\n }\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n return {\n completion,\n thinking,\n complete,\n isLoading,\n loadingProgress,\n isGenerating,\n stop,\n tps,\n isReady,\n error,\n load,\n };\n}\n\n// ============================================\n// Text-to-Speech (useSpeech hook)\n// ============================================\n\n/** TTS loading progress */\nexport type TTSProgress = {\n status: \"idle\" | \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n error?: string;\n};\n\n/** Available TTS models */\nexport type TTSModelId = \"kokoro-82m\" | \"supertonic-66m\";\n\n/** Voice info for TTS models */\nexport type BrowserVoiceInfo = {\n id: string;\n name: string;\n gender: \"male\" | \"female\";\n language: string;\n description: string;\n};\n\n/** Kokoro voice definitions (24kHz, high quality) */\nconst KOKORO_BROWSER_VOICES: BrowserVoiceInfo[] = [\n {\n id: \"af_heart\",\n name: \"Heart\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, highest quality (Grade A)\",\n },\n {\n id: \"af_bella\",\n name: \"Bella\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, warm and friendly (Grade A-)\",\n },\n {\n id: \"af_nicole\",\n name: \"Nicole\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, soft and gentle\",\n },\n {\n id: \"af_sarah\",\n name: \"Sarah\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, clear and professional\",\n },\n {\n id: \"af_sky\",\n name: \"Sky\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, young and energetic\",\n },\n {\n id: \"af_alloy\",\n name: \"Alloy\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_aoede\",\n name: \"Aoede\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, mythical\",\n },\n {\n id: \"af_jessica\",\n name: \"Jessica\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_kore\",\n name: \"Kore\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_nova\",\n name: \"Nova\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_river\",\n name: \"River\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"am_fenrir\",\n name: \"Fenrir\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, best quality\",\n },\n {\n id: \"am_michael\",\n name: \"Michael\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, warm and friendly\",\n },\n { id: \"am_adam\", name: \"Adam\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_echo\", name: \"Echo\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_eric\", name: \"Eric\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_liam\", name: \"Liam\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_onyx\", name: \"Onyx\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_puck\", name: \"Puck\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n {\n id: \"am_santa\",\n name: \"Santa\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, festive\",\n },\n {\n id: \"bf_emma\",\n name: \"Emma\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, elegant and clear\",\n },\n {\n id: \"bf_isabella\",\n name: \"Isabella\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, sophisticated\",\n },\n {\n id: \"bf_alice\",\n name: \"Alice\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female\",\n },\n {\n id: \"bf_lily\",\n name: \"Lily\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female\",\n },\n {\n id: \"bm_george\",\n name: \"George\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, distinguished\",\n },\n {\n id: \"bm_lewis\",\n name: \"Lewis\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, friendly\",\n },\n {\n id: \"bm_daniel\",\n name: \"Daniel\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male\",\n },\n { id: \"bm_fable\", name: \"Fable\", gender: \"male\", language: \"en-gb\", description: \"British male\" },\n];\n\n/** Supertonic voice definitions (44.1kHz, faster) */\nconst SUPERTONIC_BROWSER_VOICES: BrowserVoiceInfo[] = [\n {\n id: \"F1\",\n name: \"Female 1\",\n gender: \"female\",\n language: \"en\",\n description: \"Female voice 1 - Clear and natural\",\n },\n {\n id: \"F2\",\n name: \"Female 2\",\n gender: \"female\",\n language: \"en\",\n description: \"Female voice 2 - Warm and expressive\",\n },\n {\n id: \"M1\",\n name: \"Male 1\",\n gender: \"male\",\n language: \"en\",\n description: \"Male voice 1 - Deep and confident\",\n },\n {\n id: \"M2\",\n name: \"Male 2\",\n gender: \"male\",\n language: \"en\",\n description: \"Male voice 2 - Friendly and casual\",\n },\n];\n\n/** TTS model configuration */\nconst TTS_MODELS: Record<\n TTSModelId,\n { repo: string; defaultVoice: string; sampleRate: number; voices: BrowserVoiceInfo[] }\n> = {\n \"kokoro-82m\": {\n repo: \"onnx-community/Kokoro-82M-v1.0-ONNX\",\n defaultVoice: \"af_heart\",\n sampleRate: 24000,\n voices: KOKORO_BROWSER_VOICES,\n },\n \"supertonic-66m\": {\n repo: \"onnx-community/Supertonic-TTS-ONNX\",\n defaultVoice: \"F1\",\n sampleRate: 44100,\n voices: SUPERTONIC_BROWSER_VOICES,\n },\n};\n\n/** Options for useSpeech hook */\nexport type UseSpeechOptions = {\n /** TTS model to use (default: \"kokoro-82m\") */\n model?: TTSModelId;\n /** Default voice ID (default: model's default voice) */\n voice?: string;\n /** Speech speed multiplier (default: 1.0) */\n speed?: number;\n /** Auto-load TTS model on mount (default: false) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n /** Called when speech starts */\n onStart?: () => void;\n /** Called when speech ends */\n onEnd?: () => void;\n};\n\n/** Return type for useSpeech hook */\nexport type UseSpeechReturn = {\n /** Speak text aloud */\n speak: (text: string, options?: { voice?: string; speed?: number }) => Promise<void>;\n /** Stop current speech */\n stop: () => void;\n /** Whether TTS model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: TTSProgress | null;\n /** Whether currently speaking */\n isSpeaking: boolean;\n /** Whether TTS model is ready */\n isReady: boolean;\n /** Load the TTS model */\n load: () => void;\n /** Error message if any */\n error: string | null;\n /** List available voices for current model */\n listVoices: () => BrowserVoiceInfo[];\n /** Current voice ID */\n currentVoice: string;\n /** Set current voice */\n setVoice: (voiceId: string) => void;\n /** Current speed */\n currentSpeed: number;\n /** Set speed */\n setSpeed: (speed: number) => void;\n /** Current TTS model ID */\n currentModel: TTSModelId;\n /** Sample rate for current model (24000 for Kokoro, 44100 for Supertonic) */\n sampleRate: number;\n};\n\n// ============================================\n// TTS Worker (inline, loads from CDN)\n// ============================================\nconst TTS_WORKER_CODE = `\n // TTS Worker - runs in separate thread, loads from CDN\n import { pipeline, env } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1\";\n \n // Configure environment\n env.useBrowserCache = true;\n env.allowLocalModels = false;\n \n let ttsInstance = null;\n let modelType = null; // \"supertonic\" or \"kokoro\"\n let voiceEmbeddings = new Map();\n let kokoroTTS = null;\n \n self.onmessage = async (e) => {\n const { type, payload } = e.data;\n \n if (type === \"load\") {\n try {\n const { modelId, repo, voices } = payload;\n modelType = modelId === \"supertonic-66m\" ? \"supertonic\" : \"kokoro\";\n \n if (modelType === \"supertonic\") {\n // Load Supertonic using transformers.js pipeline\n ttsInstance = await pipeline(\"text-to-speech\", repo, {\n device: \"webgpu\",\n progress_callback: (progress) => {\n self.postMessage({ type: \"progress\", payload: progress });\n },\n });\n \n // Load voice embeddings\n for (const voice of voices) {\n try {\n const voiceUrl = \"https://huggingface.co/\" + repo + \"/resolve/main/voices/\" + voice.id + \".bin\";\n const response = await fetch(voiceUrl);\n if (response.ok) {\n const buffer = await response.arrayBuffer();\n voiceEmbeddings.set(voice.id, new Float32Array(buffer));\n }\n } catch (err) {\n console.warn(\"Failed to load voice:\", voice.id, err);\n }\n }\n \n // Warmup\n try {\n await ttsInstance(\"Hello\", {\n speaker_embeddings: new Float32Array(1 * 101 * 128),\n num_inference_steps: 1,\n speed: 1.0,\n });\n } catch (e) {\n console.warn(\"Warmup failed:\", e);\n }\n } else {\n // Load Kokoro using kokoro-js from CDN\n const kokoroModule = await import(\"https://cdn.jsdelivr.net/npm/kokoro-js@1.2.1/dist/kokoro.web.min.js\");\n const { KokoroTTS } = kokoroModule;\n \n kokoroTTS = await KokoroTTS.from_pretrained(repo, {\n dtype: \"fp32\",\n progress_callback: (progress) => {\n self.postMessage({ type: \"progress\", payload: progress });\n },\n });\n }\n \n self.postMessage({ type: \"ready\" });\n } catch (err) {\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n \n if (type === \"generate\") {\n try {\n const { text, voice, speed } = payload;\n let audio, sampleRate;\n \n if (modelType === \"supertonic\") {\n let embedding = voiceEmbeddings.get(voice);\n if (!embedding) {\n embedding = new Float32Array(101 * 128).fill(0.1);\n }\n \n const result = await ttsInstance(text, {\n speaker_embeddings: embedding,\n speed: speed || 1.0,\n });\n \n audio = result.audio;\n sampleRate = result.sampling_rate;\n } else {\n const result = await kokoroTTS.generate(text, {\n voice: voice,\n speed: speed || 1.0,\n });\n \n audio = result.audio;\n sampleRate = result.sampling_rate;\n }\n \n // Transfer audio data back\n self.postMessage(\n { type: \"audio\", payload: { audio: audio, sampleRate: sampleRate } },\n [audio.buffer]\n );\n } catch (err) {\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n };\n`;\n\n/** Create TTS worker instance */\nfunction createTTSWorker(): Worker {\n const blob = new Blob([TTS_WORKER_CODE], { type: \"application/javascript\" });\n const url = URL.createObjectURL(blob);\n const worker = new Worker(url, { type: \"module\" });\n URL.revokeObjectURL(url);\n return worker;\n}\n\n/**\n * React hook for text-to-speech with Web Audio API playback\n *\n * Supports both Kokoro (24kHz, high quality) and Supertonic (44.1kHz, faster).\n *\n * @example\n * ```tsx\n * import { useSpeech } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * // Default: Kokoro TTS\n * const { speak, stop, isLoading, isSpeaking, listVoices, setVoice } = useSpeech();\n *\n * // Or use Supertonic (44.1kHz, faster)\n * // const { speak, listVoices } = useSpeech({ model: \"supertonic-66m\" });\n *\n * if (isLoading) return <div>Loading TTS...</div>;\n *\n * return (\n * <div>\n * <select onChange={e => setVoice(e.target.value)}>\n * {listVoices().map(v => (\n * <option key={v.id} value={v.id}>{v.name}</option>\n * ))}\n * </select>\n * <button onClick={() => speak(\"Hello world!\")}>\n * {isSpeaking ? \"Speaking...\" : \"Speak\"}\n * </button>\n * {isSpeaking && <button onClick={stop}>Stop</button>}\n * </div>\n * );\n * }\n * ```\n */\nexport function useSpeech(options: UseSpeechOptions = {}): UseSpeechReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useSpeech requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model: modelId = \"kokoro-82m\",\n speed: defaultSpeed = 1.0,\n autoLoad = false,\n onReady,\n onError,\n onStart,\n onEnd,\n } = options;\n\n // Get model config\n const modelConfig = TTS_MODELS[modelId];\n const defaultVoice = options.voice || modelConfig.defaultVoice;\n\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<TTSProgress | null>(null);\n const [isSpeaking, setIsSpeaking] = useState<boolean>(false);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n const [currentVoice, setCurrentVoice] = useState<string>(defaultVoice);\n const [currentSpeed, setCurrentSpeed] = useState<number>(defaultSpeed);\n\n const workerRef = useRef<Worker | null>(null);\n const audioContextRef = useRef<AudioContext | null>(null);\n const sourceNodeRef = useRef<AudioBufferSourceNode | null>(null);\n const mountedRef = useRef<boolean>(true);\n const modelIdRef = useRef<TTSModelId>(modelId);\n const pendingSpeakRef = useRef<{ text: string; voice: string; speed: number } | null>(null);\n\n // Voice list based on selected model\n const listVoices = useCallback((): BrowserVoiceInfo[] => {\n return modelConfig.voices;\n }, [modelConfig.voices]);\n\n // Load function\n const load = useCallback(() => {\n if (workerRef.current || isLoading) return;\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n // Initialize TTS worker\n useEffect(() => {\n if (!shouldLoad) return;\n\n mountedRef.current = true;\n modelIdRef.current = modelId;\n\n const config = TTS_MODELS[modelId];\n\n setLoadingProgress({\n status: \"loading\",\n message: `Loading ${modelId === \"supertonic-66m\" ? \"Supertonic\" : \"Kokoro\"} TTS...`,\n });\n\n // Create worker\n const worker = createTTSWorker();\n workerRef.current = worker;\n\n // Handle worker messages\n worker.onmessage = (e: MessageEvent) => {\n if (!mountedRef.current) return;\n\n const { type, payload } = e.data;\n\n if (type === \"progress\" && payload.status === \"progress\" && payload.file) {\n setLoadingProgress({\n status: \"downloading\",\n file: payload.file,\n progress: Math.round(payload.progress || 0),\n });\n }\n\n if (type === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n setLoadingProgress({ status: \"ready\" });\n onReady?.();\n\n // Process pending speak request\n if (pendingSpeakRef.current) {\n const { text, voice, speed } = pendingSpeakRef.current;\n pendingSpeakRef.current = null;\n worker.postMessage({ type: \"generate\", payload: { text, voice, speed } });\n }\n }\n\n if (type === \"audio\") {\n // Play audio using Web Audio API\n const { audio, sampleRate } = payload;\n playAudioData(audio, sampleRate);\n }\n\n if (type === \"error\") {\n const errorMsg = payload;\n setError(errorMsg);\n setIsLoading(false);\n setIsSpeaking(false);\n setLoadingProgress({ status: \"error\", error: errorMsg });\n onError?.(errorMsg);\n }\n };\n\n worker.onerror = (err) => {\n if (!mountedRef.current) return;\n const errorMsg = err.message || \"Worker error\";\n setError(errorMsg);\n setIsLoading(false);\n setLoadingProgress({ status: \"error\", error: errorMsg });\n onError?.(errorMsg);\n };\n\n // Send load message\n worker.postMessage({\n type: \"load\",\n payload: {\n modelId,\n repo: config.repo,\n voices: config.voices,\n },\n });\n\n return () => {\n mountedRef.current = false;\n worker.terminate();\n workerRef.current = null;\n };\n }, [shouldLoad, modelId, onReady, onError]);\n\n // Helper to play audio data\n const playAudioData = useCallback(\n async (audio: Float32Array, sampleRate: number) => {\n try {\n // Create or reuse AudioContext\n if (!audioContextRef.current || audioContextRef.current.state === \"closed\") {\n audioContextRef.current = new AudioContext({ sampleRate });\n }\n const ctx = audioContextRef.current;\n\n if (ctx.state === \"suspended\") {\n await ctx.resume();\n }\n\n // Create buffer and play\n const audioBuffer = ctx.createBuffer(1, audio.length, sampleRate);\n audioBuffer.copyToChannel(new Float32Array(audio), 0);\n\n const sourceNode = ctx.createBufferSource();\n sourceNode.buffer = audioBuffer;\n sourceNode.connect(ctx.destination);\n\n sourceNodeRef.current = sourceNode;\n\n sourceNode.onended = () => {\n if (!mountedRef.current) return;\n setIsSpeaking(false);\n onEnd?.();\n };\n\n sourceNode.start();\n } catch (err) {\n setIsSpeaking(false);\n const errorMsg = err instanceof Error ? err.message : String(err);\n setError(errorMsg);\n onError?.(errorMsg);\n }\n },\n [onEnd, onError],\n );\n\n // Cleanup AudioContext only on unmount (not on re-renders)\n useEffect(() => {\n return () => {\n try {\n sourceNodeRef.current?.stop();\n } catch {\n // Ignore if already stopped\n }\n try {\n if (audioContextRef.current && audioContextRef.current.state !== \"closed\") {\n audioContextRef.current.close();\n }\n } catch {\n // Ignore if already closed\n }\n };\n }, []);\n\n // Speak function - sends message to worker\n const speak = useCallback(\n async (text: string, opts?: { voice?: string; speed?: number }) => {\n const voice = opts?.voice || currentVoice;\n const speed = opts?.speed || currentSpeed;\n\n // Validate voice\n const voiceInfo = modelConfig.voices.find((v) => v.id === voice);\n if (!voiceInfo) {\n const validVoices = modelConfig.voices.map((v) => v.id).join(\", \");\n const errorMsg = `Voice \"${voice}\" not found. Should be one of: ${validVoices}.`;\n setError(errorMsg);\n onError?.(errorMsg);\n return;\n }\n\n // Auto-load if not loaded\n if (!workerRef.current) {\n // Queue speak for after load\n pendingSpeakRef.current = { text, voice, speed };\n load();\n return;\n }\n\n if (!isReady) {\n // Queue speak for after ready\n pendingSpeakRef.current = { text, voice, speed };\n return;\n }\n\n setIsSpeaking(true);\n onStart?.();\n\n // Send generate message to worker\n workerRef.current.postMessage({\n type: \"generate\",\n payload: { text, voice, speed },\n });\n },\n [currentVoice, currentSpeed, modelConfig.voices, load, isReady, onStart, onError],\n );\n\n // Stop function\n const stop = useCallback(() => {\n if (sourceNodeRef.current) {\n sourceNodeRef.current.stop();\n sourceNodeRef.current.disconnect();\n sourceNodeRef.current = null;\n }\n setIsSpeaking(false);\n }, []);\n\n // Voice setter with validation\n const setVoice = useCallback(\n (voiceId: string) => {\n const voiceInfo = modelConfig.voices.find((v) => v.id === voiceId);\n if (voiceInfo) {\n setCurrentVoice(voiceId);\n } else {\n console.warn(\n `Voice \"${voiceId}\" not valid for ${modelId}. Available: ${modelConfig.voices.map((v) => v.id).join(\", \")}`,\n );\n }\n },\n [modelConfig.voices, modelId],\n );\n\n // Speed setter\n const setSpeed = useCallback((speed: number) => {\n setCurrentSpeed(Math.max(0.5, Math.min(2.0, speed)));\n }, []);\n\n return {\n speak,\n stop,\n isLoading,\n loadingProgress,\n isSpeaking,\n isReady,\n load,\n error,\n listVoices,\n currentVoice,\n setVoice,\n currentSpeed,\n setSpeed,\n currentModel: modelId,\n sampleRate: modelConfig.sampleRate,\n };\n}\n\n// ============================================\n// Audio Playback Utilities\n// ============================================\n\n/**\n * Play audio from Float32Array using Web Audio API\n *\n * @example\n * ```ts\n * import { playAudio } from \"@tryhamster/gerbil/browser\";\n *\n * const audio = new Float32Array([...]); // TTS output\n * const controller = await playAudio(audio, 24000);\n *\n * // Stop playback\n * controller.stop();\n * ```\n */\nexport async function playAudio(\n audio: Float32Array,\n sampleRate: number = 24000,\n): Promise<{ stop: () => void; onEnded: Promise<void> }> {\n const audioContext = new AudioContext();\n\n // Resume if suspended\n if (audioContext.state === \"suspended\") {\n await audioContext.resume();\n }\n\n const audioBuffer = audioContext.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n audioBuffer.copyToChannel(channelData, 0);\n\n const sourceNode = audioContext.createBufferSource();\n sourceNode.buffer = audioBuffer;\n sourceNode.connect(audioContext.destination);\n\n const onEnded = new Promise<void>((resolve) => {\n sourceNode.onended = () => {\n audioContext.close();\n resolve();\n };\n });\n\n sourceNode.start();\n\n return {\n stop: () => {\n sourceNode.stop();\n audioContext.close();\n },\n onEnded,\n };\n}\n\n/**\n * Create a reusable audio player for streaming TTS\n *\n * @example\n * ```ts\n * import { createAudioPlayer } from \"@tryhamster/gerbil/browser\";\n *\n * const player = createAudioPlayer(24000);\n *\n * // Queue audio chunks as they arrive\n * player.queue(chunk1);\n * player.queue(chunk2);\n *\n * // Stop and clear\n * player.stop();\n * ```\n */\nexport function createAudioPlayer(sampleRate: number = 24000): {\n queue: (audio: Float32Array) => void;\n stop: () => void;\n isPlaying: () => boolean;\n} {\n let audioContext: AudioContext | null = null;\n let nextStartTime = 0;\n let isActive = false;\n\n const ensureContext = async () => {\n if (!audioContext) {\n audioContext = new AudioContext();\n }\n if (audioContext.state === \"suspended\") {\n await audioContext.resume();\n }\n return audioContext;\n };\n\n return {\n queue: async (audio: Float32Array) => {\n const ctx = await ensureContext();\n isActive = true;\n\n const buffer = ctx.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n buffer.copyToChannel(channelData, 0);\n\n const source = ctx.createBufferSource();\n source.buffer = buffer;\n source.connect(ctx.destination);\n\n // Schedule seamlessly after previous chunk\n const startTime = Math.max(ctx.currentTime, nextStartTime);\n source.start(startTime);\n nextStartTime = startTime + buffer.duration;\n\n source.onended = () => {\n if (ctx.currentTime >= nextStartTime - 0.1) {\n isActive = false;\n }\n };\n },\n\n stop: () => {\n isActive = false;\n nextStartTime = 0;\n if (audioContext) {\n audioContext.close();\n audioContext = null;\n }\n },\n\n isPlaying: () => isActive,\n };\n}\n\n// ============================================\n// Voice Input Hook (STT)\n// ============================================\n\n// ============================================\n// STT Worker (inline, loads from CDN)\n// ============================================\nconst STT_WORKER_CODE = `\n // STT Worker - runs in separate thread, loads from CDN\n import { pipeline, env } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1\";\n \n // Configure environment\n env.useBrowserCache = true;\n env.allowLocalModels = false;\n \n let sttPipeline = null;\n \n self.onmessage = async (e) => {\n const { type, payload } = e.data;\n \n if (type === \"load\") {\n try {\n const { model } = payload;\n \n // Load Whisper model\n sttPipeline = await pipeline(\"automatic-speech-recognition\", model, {\n device: \"webgpu\",\n progress_callback: (progress) => {\n self.postMessage({ type: \"progress\", payload: progress });\n },\n });\n \n self.postMessage({ type: \"ready\" });\n } catch (err) {\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n \n if (type === \"transcribe\") {\n try {\n const { audio } = payload;\n \n // Run transcription\n const result = await sttPipeline(audio, {\n return_timestamps: false,\n });\n \n self.postMessage({ type: \"transcript\", payload: result.text || \"\" });\n } catch (err) {\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n };\n`;\n\n/** Create STT worker instance */\nfunction createSTTWorker(): Worker {\n const blob = new Blob([STT_WORKER_CODE], { type: \"application/javascript\" });\n const url = URL.createObjectURL(blob);\n const worker = new Worker(url, { type: \"module\" });\n URL.revokeObjectURL(url);\n return worker;\n}\n\n/**\n * Progress info for STT loading\n */\nexport type STTProgress = {\n status: \"downloading\" | \"loading\" | \"ready\" | \"error\";\n message?: string;\n progress?: number;\n file?: string;\n};\n\n/**\n * Options for useVoiceInput hook\n */\nexport type UseVoiceInputOptions = {\n /** STT model ID (default: whisper-tiny.en) */\n model?: string;\n /** Auto-load model on mount (default: false) */\n autoLoad?: boolean;\n /** Callback when model is ready */\n onReady?: () => void;\n /** Callback when transcription completes (or for each chunk in streaming mode) */\n onTranscript?: (text: string) => void;\n /** Callback on error */\n onError?: (error: string) => void;\n /** Callback during loading */\n onProgress?: (progress: STTProgress) => void;\n /** Enable streaming transcription - transcribes audio in chunks as you speak */\n streaming?: boolean;\n /** Chunk duration in ms for streaming mode (default: 3000 = 3 seconds) */\n chunkDuration?: number;\n /** Callback for each streaming chunk with partial transcript */\n onChunk?: (text: string, chunkIndex: number) => void;\n};\n\n/**\n * Return type for useVoiceInput hook\n */\nexport type UseVoiceInputReturn = {\n /** Start recording audio */\n startRecording: () => Promise<void>;\n /** Stop recording and transcribe */\n stopRecording: () => Promise<string>;\n /** Cancel recording without transcribing */\n cancelRecording: () => void;\n /** Transcribe raw audio data (Float32Array at 16kHz) */\n transcribe: (audio: Float32Array) => Promise<string>;\n /** Whether currently recording */\n isRecording: boolean;\n /** Whether transcribing */\n isTranscribing: boolean;\n /** Whether model is loading */\n isLoading: boolean;\n /** Whether model is ready */\n isReady: boolean;\n /** Latest transcription result (full transcript in streaming mode) */\n transcript: string;\n /** Current streaming chunk being transcribed (streaming mode only) */\n streamingChunk: string;\n /** Number of chunks transcribed so far (streaming mode only) */\n chunkCount: number;\n /** Loading progress */\n loadingProgress: STTProgress | null;\n /** Error message */\n error: string | null;\n /** Manually load the model */\n load: () => void;\n};\n\n/**\n * React hook for voice input with browser microphone\n *\n * Uses MediaRecorder to capture audio and Whisper for transcription.\n * Supports both one-shot and streaming transcription modes.\n *\n * @example Basic usage (one-shot)\n * ```tsx\n * function VoiceInput() {\n * const { startRecording, stopRecording, isRecording, transcript } = useVoiceInput({\n * onTranscript: (text) => console.log(\"User said:\", text),\n * });\n *\n * return (\n * <button onClick={isRecording ? stopRecording : startRecording}>\n * {isRecording ? \"Stop\" : \"Record\"}\n * </button>\n * );\n * }\n * ```\n *\n * @example Streaming transcription (real-time)\n * ```tsx\n * function LiveTranscription() {\n * const { startRecording, stopRecording, isRecording, transcript, streamingChunk } = useVoiceInput({\n * streaming: true, // Enable streaming mode\n * chunkDuration: 1500, // Transcribe every 1.5 seconds (default)\n * onChunk: (text, idx) => console.log(`Chunk ${idx}: ${text}`),\n * });\n *\n * return (\n * <div>\n * <button onClick={isRecording ? stopRecording : startRecording}>\n * {isRecording ? \"Stop\" : \"Start Live Transcription\"}\n * </button>\n * <p>Current chunk: {streamingChunk}</p>\n * <p>Full transcript: {transcript}</p>\n * </div>\n * );\n * }\n * ```\n */\nexport function useVoiceInput(options: UseVoiceInputOptions = {}): UseVoiceInputReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useVoiceInput requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"whisper-tiny.en\",\n autoLoad = false,\n onReady,\n onTranscript,\n onError,\n onProgress,\n streaming = false,\n chunkDuration = 1500, // Transcribe every 1.5 seconds for near real-time\n onChunk,\n } = options;\n\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<STTProgress | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [isRecording, setIsRecording] = useState<boolean>(false);\n const [isTranscribing, setIsTranscribing] = useState<boolean>(false);\n const [transcript, setTranscript] = useState<string>(\"\");\n const [streamingChunk, setStreamingChunk] = useState<string>(\"\");\n const [chunkCount, setChunkCount] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const workerRef = useRef<Worker | null>(null);\n const mediaRecorderRef = useRef<MediaRecorder | null>(null);\n const audioChunksRef = useRef<Blob[]>([]);\n const streamRef = useRef<MediaStream | null>(null);\n const mountedRef = useRef<boolean>(true);\n const streamingIntervalRef = useRef<ReturnType<typeof setInterval> | null>(null);\n const pendingChunksRef = useRef<Blob[]>([]);\n const fullTranscriptRef = useRef<string>(\"\");\n const transcribeResolveRef = useRef<((text: string) => void) | null>(null);\n const transcribeRejectRef = useRef<((err: Error) => void) | null>(null);\n\n // Resolve model ID to HuggingFace path\n const resolveSTTModel = (modelId: string): string => {\n const STT_MODEL_MAP: Record<string, string> = {\n \"whisper-tiny\": \"onnx-community/whisper-tiny\",\n \"whisper-tiny.en\": \"onnx-community/whisper-tiny.en\",\n \"whisper-base\": \"onnx-community/whisper-base\",\n \"whisper-base.en\": \"onnx-community/whisper-base.en\",\n \"whisper-small\": \"onnx-community/whisper-small\",\n \"whisper-small.en\": \"onnx-community/whisper-small.en\",\n };\n return STT_MODEL_MAP[modelId] || modelId;\n };\n\n // Load the STT model via worker\n useEffect(() => {\n if (!shouldLoad || isReady) return;\n\n mountedRef.current = true;\n\n setIsLoading(true);\n setLoadingProgress({ status: \"loading\", message: \"Loading STT model...\" });\n onProgress?.({ status: \"loading\", message: \"Loading STT model...\" });\n\n // Create worker\n const worker = createSTTWorker();\n workerRef.current = worker;\n\n // Handle worker messages\n worker.onmessage = (e: MessageEvent) => {\n if (!mountedRef.current) return;\n\n const { type, payload } = e.data;\n\n if (type === \"progress\") {\n const progress: STTProgress = {\n status: payload.progress !== undefined ? \"downloading\" : \"loading\",\n message: payload.status,\n progress: payload.progress,\n file: payload.file,\n };\n setLoadingProgress(progress);\n onProgress?.(progress);\n }\n\n if (type === \"ready\") {\n setIsReady(true);\n setIsLoading(false);\n setLoadingProgress({ status: \"ready\" });\n onProgress?.({ status: \"ready\" });\n onReady?.();\n }\n\n if (type === \"transcript\") {\n const text = payload;\n setIsTranscribing(false);\n if (transcribeResolveRef.current) {\n transcribeResolveRef.current(text);\n transcribeResolveRef.current = null;\n transcribeRejectRef.current = null;\n }\n }\n\n if (type === \"error\") {\n const errMsg = payload;\n setError(errMsg);\n setIsLoading(false);\n setIsTranscribing(false);\n setLoadingProgress({ status: \"error\", message: errMsg });\n onProgress?.({ status: \"error\", message: errMsg });\n onError?.(errMsg);\n if (transcribeRejectRef.current) {\n transcribeRejectRef.current(new Error(errMsg));\n transcribeResolveRef.current = null;\n transcribeRejectRef.current = null;\n }\n }\n };\n\n worker.onerror = (err) => {\n if (!mountedRef.current) return;\n const errMsg = err.message || \"Worker error\";\n setError(errMsg);\n setIsLoading(false);\n setLoadingProgress({ status: \"error\", message: errMsg });\n onProgress?.({ status: \"error\", message: errMsg });\n onError?.(errMsg);\n };\n\n // Send load message\n worker.postMessage({\n type: \"load\",\n payload: { model: resolveSTTModel(model) },\n });\n\n return () => {\n mountedRef.current = false;\n worker.terminate();\n workerRef.current = null;\n };\n }, [shouldLoad, isReady, model, onReady, onError, onProgress]);\n\n // Cleanup on unmount\n useEffect(() => {\n mountedRef.current = true;\n return () => {\n mountedRef.current = false;\n if (workerRef.current) {\n workerRef.current.terminate();\n workerRef.current = null;\n }\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n }\n };\n }, []);\n\n // Manual load trigger\n const load = useCallback(() => {\n if (!shouldLoad && !isReady && !isLoading) {\n setShouldLoad(true);\n }\n }, [shouldLoad, isReady, isLoading]);\n\n // Convert audio blob to Float32Array at 16kHz\n const blobToFloat32 = useCallback(async (blob: Blob): Promise<Float32Array> => {\n const audioContext = new AudioContext({ sampleRate: 16000 });\n const arrayBuffer = await blob.arrayBuffer();\n const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);\n\n // Get mono channel\n const channelData = audioBuffer.getChannelData(0);\n\n // Resample if needed\n if (audioBuffer.sampleRate !== 16000) {\n const ratio = 16000 / audioBuffer.sampleRate;\n const newLength = Math.round(channelData.length * ratio);\n const resampled = new Float32Array(newLength);\n for (let i = 0; i < newLength; i++) {\n const srcIndex = i / ratio;\n const floor = Math.floor(srcIndex);\n const ceil = Math.min(floor + 1, channelData.length - 1);\n const t = srcIndex - floor;\n resampled[i] = channelData[floor] * (1 - t) + channelData[ceil] * t;\n }\n audioContext.close();\n return resampled;\n }\n\n audioContext.close();\n return new Float32Array(channelData);\n }, []);\n\n // Transcribe audio via worker\n const transcribe = useCallback(\n async (audio: Float32Array): Promise<string> => {\n if (!workerRef.current) {\n if (!shouldLoad) {\n setShouldLoad(true);\n throw new Error(\"STT model not loaded. Loading now, please try again.\");\n }\n throw new Error(\"STT model not loaded\");\n }\n\n if (!isReady) {\n throw new Error(\"STT model still loading\");\n }\n\n setIsTranscribing(true);\n\n return new Promise((resolve, reject) => {\n transcribeResolveRef.current = (text: string) => {\n // Filter out Whisper artifacts\n let filtered = text.trim();\n if (\n filtered === \"[BLANK_AUDIO]\" ||\n filtered === \"(blank audio)\" ||\n filtered === \"[BLANK AUDIO]\"\n ) {\n filtered = \"\";\n }\n setTranscript(filtered);\n onTranscript?.(filtered);\n resolve(filtered);\n };\n transcribeRejectRef.current = reject;\n\n // Send audio to worker (transfer buffer for performance)\n const audioArray = new Float32Array(audio);\n workerRef.current!.postMessage({ type: \"transcribe\", payload: { audio: audioArray } }, [\n audioArray.buffer,\n ]);\n });\n },\n [shouldLoad, isReady, onTranscript],\n );\n\n // Track how many samples we've processed for streaming\n const processedSamplesRef = useRef<number>(0);\n\n // Transcribe a chunk of audio (for streaming mode)\n // Uses audioChunksRef (all chunks) to ensure valid WebM container\n const transcribeChunk = useCallback(\n async (chunkIdx: number): Promise<string> => {\n if (!workerRef.current || !isReady || audioChunksRef.current.length === 0) return \"\";\n\n try {\n // Create blob from ALL chunks (needed for valid WebM header)\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n const audioData = await blobToFloat32(audioBlob);\n\n // Calculate new samples since last transcription\n const newSamplesStart = processedSamplesRef.current;\n const totalSamples = audioData.length;\n\n // Skip if no new audio (< 0.5 seconds = 8000 samples at 16kHz)\n if (totalSamples - newSamplesStart < 8000) return \"\";\n\n // Extract only the new portion of audio\n const newAudio = audioData.slice(newSamplesStart);\n\n // Update processed count\n processedSamplesRef.current = totalSamples;\n\n // Use transcribe function which handles worker communication\n const text = await transcribe(newAudio);\n\n if (text && mountedRef.current) {\n setStreamingChunk(text);\n onChunk?.(text, chunkIdx);\n }\n\n return text;\n } catch {\n return \"\";\n }\n },\n [blobToFloat32, isReady, transcribe, onChunk],\n );\n\n // Start recording\n const startRecording = useCallback(async () => {\n if (isRecording) return;\n\n try {\n // For streaming mode, ensure STT model is loaded first\n if (streaming && !isReady) {\n if (!shouldLoad) {\n setShouldLoad(true);\n }\n // Wait for worker to be ready\n await new Promise<void>((resolve, reject) => {\n const checkReady = setInterval(() => {\n if (isReady && workerRef.current) {\n clearInterval(checkReady);\n resolve();\n }\n }, 100);\n setTimeout(() => {\n clearInterval(checkReady);\n reject(new Error(\"Timeout waiting for STT model\"));\n }, 60000);\n });\n }\n\n // Request microphone permission\n const stream = await navigator.mediaDevices.getUserMedia({\n audio: {\n sampleRate: 16000,\n channelCount: 1,\n echoCancellation: true,\n noiseSuppression: true,\n },\n });\n\n streamRef.current = stream;\n audioChunksRef.current = [];\n pendingChunksRef.current = [];\n fullTranscriptRef.current = \"\";\n processedSamplesRef.current = 0;\n setTranscript(\"\");\n setStreamingChunk(\"\");\n setChunkCount(0);\n\n const mediaRecorder = new MediaRecorder(stream);\n mediaRecorderRef.current = mediaRecorder;\n\n mediaRecorder.ondataavailable = (event) => {\n if (event.data.size > 0) {\n audioChunksRef.current.push(event.data);\n if (streaming) {\n pendingChunksRef.current.push(event.data);\n }\n }\n };\n\n mediaRecorder.start(100); // Collect data every 100ms\n setIsRecording(true);\n setError(null);\n\n // If streaming mode, set up recursive transcription loop\n if (streaming && isReady && workerRef.current) {\n let chunkIdx = 0;\n let shouldContinue = true;\n\n // Use recursive setTimeout instead of setInterval to avoid timing issues\n // with heavy WebGPU/WASM operations\n const processNextChunk = async () => {\n if (!shouldContinue || !mountedRef.current) {\n return;\n }\n\n const numPending = pendingChunksRef.current.length;\n\n // Check if we have new audio to process\n if (numPending > 0) {\n // Clear pending counter (we'll process via audioChunksRef which has all data)\n pendingChunksRef.current = [];\n\n try {\n setIsTranscribing(true);\n const chunkText = await transcribeChunk(chunkIdx);\n\n if (chunkText && mountedRef.current) {\n chunkIdx++;\n setChunkCount(chunkIdx);\n\n // Append to full transcript using functional update\n setTranscript((prev) => {\n const newTranscript = prev + (prev ? \" \" : \"\") + chunkText;\n fullTranscriptRef.current = newTranscript;\n onTranscript?.(newTranscript);\n return newTranscript;\n });\n }\n } catch (e) {\n console.error(\"[useVoiceInput] Chunk transcription error:\", e);\n } finally {\n if (mountedRef.current) {\n setIsTranscribing(false);\n }\n }\n }\n\n // Schedule next check if still running\n if (shouldContinue && mountedRef.current) {\n streamingIntervalRef.current = setTimeout(processNextChunk, chunkDuration) as any;\n }\n };\n\n // Start the loop\n streamingIntervalRef.current = setTimeout(processNextChunk, chunkDuration) as any;\n\n // Store a way to stop the loop\n (streamingIntervalRef as any)._stop = () => {\n shouldContinue = false;\n };\n }\n } catch (e: any) {\n const errMsg = e.message || \"Failed to start recording\";\n setError(errMsg);\n onError?.(errMsg);\n }\n }, [\n isRecording,\n streaming,\n shouldLoad,\n model,\n chunkDuration,\n transcribeChunk,\n onTranscript,\n onError,\n onProgress,\n onReady,\n ]);\n\n // Stop recording and transcribe\n const stopRecording = useCallback(async (): Promise<string> => {\n // Stop streaming loop\n if ((streamingIntervalRef as any)._stop) {\n (streamingIntervalRef as any)._stop();\n }\n if (streamingIntervalRef.current) {\n clearTimeout(streamingIntervalRef.current);\n streamingIntervalRef.current = null;\n }\n\n return new Promise((resolve, reject) => {\n if (!mediaRecorderRef.current || !isRecording) {\n reject(new Error(\"Not recording\"));\n return;\n }\n\n const mediaRecorder = mediaRecorderRef.current;\n\n mediaRecorder.onstop = async () => {\n // Stop all tracks\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n setIsRecording(false);\n\n // In streaming mode, process any remaining chunks and return full transcript\n if (streaming) {\n // Process any remaining audio\n if (audioChunksRef.current.length > 0 && processedSamplesRef.current > 0) {\n setIsTranscribing(true);\n pendingChunksRef.current = [];\n\n try {\n const finalChunkText = await transcribeChunk(chunkCount);\n if (finalChunkText && mountedRef.current) {\n setTranscript((prev) => {\n const newTranscript = prev + (prev ? \" \" : \"\") + finalChunkText;\n fullTranscriptRef.current = newTranscript;\n return newTranscript;\n });\n }\n } finally {\n if (mountedRef.current) {\n setIsTranscribing(false);\n }\n }\n }\n\n const finalText = fullTranscriptRef.current;\n onTranscript?.(finalText);\n resolve(finalText);\n return;\n }\n\n // Non-streaming mode: transcribe entire recording\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n\n try {\n // Ensure model is loaded\n if (!isReady || !workerRef.current) {\n if (!shouldLoad) {\n setShouldLoad(true);\n }\n // Wait for worker to be ready\n await new Promise<void>((res, rej) => {\n const checkReady = setInterval(() => {\n if (isReady && workerRef.current) {\n clearInterval(checkReady);\n res();\n }\n }, 100);\n setTimeout(() => {\n clearInterval(checkReady);\n rej(new Error(\"Timeout waiting for STT model\"));\n }, 30000);\n });\n }\n\n // Convert blob to Float32Array\n const audioData = await blobToFloat32(audioBlob);\n\n // Transcribe\n const text = await transcribe(audioData);\n resolve(text);\n } catch (e: any) {\n const errMsg = e.message || \"Transcription failed\";\n setError(errMsg);\n onError?.(errMsg);\n reject(e);\n }\n };\n\n mediaRecorder.stop();\n });\n }, [\n isRecording,\n streaming,\n chunkCount,\n shouldLoad,\n blobToFloat32,\n transcribe,\n transcribeChunk,\n onTranscript,\n onError,\n ]);\n\n // Cancel recording\n const cancelRecording = useCallback(() => {\n // Stop streaming loop\n if ((streamingIntervalRef as any)._stop) {\n (streamingIntervalRef as any)._stop();\n }\n if (streamingIntervalRef.current) {\n clearTimeout(streamingIntervalRef.current);\n streamingIntervalRef.current = null;\n }\n\n if (mediaRecorderRef.current && isRecording) {\n mediaRecorderRef.current.stop();\n }\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n audioChunksRef.current = [];\n pendingChunksRef.current = [];\n processedSamplesRef.current = 0;\n setIsRecording(false);\n }, [isRecording]);\n\n return {\n startRecording,\n stopRecording,\n cancelRecording,\n transcribe,\n isRecording,\n isTranscribing,\n isLoading,\n isReady,\n transcript,\n streamingChunk,\n chunkCount,\n loadingProgress,\n error,\n load,\n };\n}\n\n// ============================================\n// Voice Chat Hook (STT + LLM + TTS)\n// ============================================\n\n/**\n * Options for useVoiceChat hook\n */\nexport type UseVoiceChatOptions = {\n /** LLM model ID (default: qwen3-0.6b) */\n llmModel?: string;\n /** STT model ID (default: whisper-tiny.en) */\n sttModel?: string;\n /** TTS model ID (default: kokoro-82m, also supports supertonic-66m) */\n ttsModel?: TTSModelId;\n /** System prompt for LLM */\n system?: string;\n /** Enable thinking mode (default: false) */\n thinking?: boolean;\n /** TTS voice ID (default: model's default voice) */\n voice?: string;\n /** TTS speech speed (default: 1.0) */\n speed?: number;\n /** Auto-load all models on mount (default: false) */\n autoLoad?: boolean;\n /** Callback when user speaks */\n onUserSpeak?: (text: string) => void;\n /** Callback when assistant responds */\n onAssistantSpeak?: (text: string) => void;\n /** Callback on error */\n onError?: (error: string) => void;\n};\n\n/**\n * Message in voice chat\n */\nexport type VoiceChatMessage = {\n id: string;\n role: \"user\" | \"assistant\";\n content: string;\n thinking?: string;\n audioUrl?: string;\n};\n\n/**\n * Return type for useVoiceChat hook\n */\nexport type UseVoiceChatReturn = {\n /** Messages in the conversation */\n messages: VoiceChatMessage[];\n /** Start recording user speech */\n startListening: () => Promise<void>;\n /** Stop recording and process (STT → LLM → TTS) */\n stopListening: () => Promise<void>;\n /** Cancel current operation */\n cancel: () => void;\n /** Clear conversation history */\n clear: () => void;\n /** Whether recording user speech */\n isListening: boolean;\n /** Whether processing (STT/LLM/TTS) */\n isProcessing: boolean;\n /** Whether assistant is speaking */\n isSpeaking: boolean;\n /** Current stage: idle, listening, transcribing, thinking, speaking */\n stage: \"idle\" | \"listening\" | \"transcribing\" | \"thinking\" | \"speaking\";\n /** Whether all models are loaded */\n isReady: boolean;\n /** Whether loading models */\n isLoading: boolean;\n /** Loading progress message */\n loadingMessage: string;\n /** Error message */\n error: string | null;\n /** Manually load all models */\n load: () => void;\n};\n\n/**\n * React hook for voice conversation with STT + LLM + TTS\n *\n * Complete voice-to-voice conversation loop:\n * 1. User presses button to speak\n * 2. Speech is transcribed (Whisper)\n * 3. LLM generates response\n * 4. Response is spoken aloud (Kokoro or Supertonic TTS)\n *\n * @example\n * ```tsx\n * function VoiceChat() {\n * const {\n * messages,\n * startListening,\n * stopListening,\n * isListening,\n * isSpeaking,\n * stage,\n * } = useVoiceChat({\n * system: \"You are a helpful voice assistant.\",\n * voice: \"af_bella\",\n * // Or use Supertonic for faster synthesis:\n * // ttsModel: \"supertonic-66m\",\n * // voice: \"F1\",\n * });\n *\n * return (\n * <div>\n * {messages.map(m => (\n * <div key={m.id}>{m.role}: {m.content}</div>\n * ))}\n * <button\n * onMouseDown={startListening}\n * onMouseUp={stopListening}\n * >\n * {stage === \"idle\" ? \"🎤 Hold to Speak\" : stage}\n * </button>\n * </div>\n * );\n * }\n * ```\n */\nexport function useVoiceChat(options: UseVoiceChatOptions = {}): UseVoiceChatReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useVoiceChat requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n // Get TTS model config for default voice\n const ttsModelId = options.ttsModel || \"kokoro-82m\";\n const ttsConfig = TTS_MODELS[ttsModelId];\n\n const {\n llmModel = \"qwen3-0.6b\",\n sttModel = \"whisper-tiny.en\",\n system = \"You are a helpful voice assistant. Keep responses brief and conversational.\",\n thinking = false,\n voice = ttsConfig.defaultVoice,\n speed = 1.0,\n autoLoad = false,\n onUserSpeak,\n onAssistantSpeak,\n onError,\n } = options;\n\n const [messages, setMessages] = useState<VoiceChatMessage[]>([]);\n const [stage, setStage] = useState<\n \"idle\" | \"listening\" | \"transcribing\" | \"thinking\" | \"speaking\"\n >(\"idle\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingMessage, setLoadingMessage] = useState<string>(\"\");\n const [isReady, setIsReady] = useState<boolean>(false);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n // Refs for models and audio\n const llmWorkerRef = useRef<any>(null);\n const sttRef = useRef<any>(null);\n const ttsRef = useRef<any>(null);\n const mediaRecorderRef = useRef<MediaRecorder | null>(null);\n const audioChunksRef = useRef<Blob[]>([]);\n const streamRef = useRef<MediaStream | null>(null);\n const audioContextRef = useRef<AudioContext | null>(null);\n const sourceNodeRef = useRef<AudioBufferSourceNode | null>(null);\n const mountedRef = useRef<boolean>(true);\n const cancelledRef = useRef<boolean>(false);\n\n // Computed states\n const isListening = stage === \"listening\";\n const isProcessing = stage === \"transcribing\" || stage === \"thinking\";\n const isSpeaking = stage === \"speaking\";\n\n // Resolve model ID to HuggingFace path for STT\n const resolveSTTModel = (modelId: string): string => {\n const STT_MODEL_MAP: Record<string, string> = {\n \"whisper-tiny\": \"onnx-community/whisper-tiny\",\n \"whisper-tiny.en\": \"onnx-community/whisper-tiny.en\",\n \"whisper-base\": \"onnx-community/whisper-base\",\n \"whisper-base.en\": \"onnx-community/whisper-base.en\",\n \"whisper-small\": \"onnx-community/whisper-small\",\n \"whisper-small.en\": \"onnx-community/whisper-small.en\",\n };\n return STT_MODEL_MAP[modelId] || modelId;\n };\n\n // Load all models via workers\n useEffect(() => {\n if (!shouldLoad || isReady) return;\n\n let cancelled = false;\n\n const loadModels = async () => {\n try {\n setIsLoading(true);\n setError(null);\n\n // Load STT worker\n setLoadingMessage(\"Loading speech recognition (Whisper)...\");\n const sttWorker = createSTTWorker();\n if (cancelled || !mountedRef.current) {\n sttWorker.terminate();\n return;\n }\n\n // Wait for STT worker to be ready\n await new Promise<void>((resolve, reject) => {\n sttWorker.onmessage = (e: MessageEvent) => {\n const { type, payload } = e.data;\n if (type === \"ready\") resolve();\n if (type === \"error\") reject(new Error(payload));\n if (type === \"progress\" && mountedRef.current) {\n setLoadingMessage(payload.status || \"Loading STT...\");\n }\n };\n sttWorker.onerror = (e) => reject(new Error(e.message));\n sttWorker.postMessage({\n type: \"load\",\n payload: { model: resolveSTTModel(sttModel) },\n });\n });\n if (cancelled || !mountedRef.current) {\n sttWorker.terminate();\n return;\n }\n sttRef.current = sttWorker;\n\n // Load LLM worker\n setLoadingMessage(\"Loading language model...\");\n const worker = await createGerbilWorker({\n modelId: llmModel,\n onProgress: (p) => {\n if (!mountedRef.current) return;\n setLoadingMessage(p.message || \"Loading LLM...\");\n },\n });\n if (cancelled || !mountedRef.current) {\n worker.terminate();\n return;\n }\n llmWorkerRef.current = worker;\n\n // Load TTS worker\n const isSupertonic = ttsModelId === \"supertonic-66m\";\n setLoadingMessage(`Loading text-to-speech (${isSupertonic ? \"Supertonic\" : \"Kokoro\"})...`);\n\n const ttsWorker = createTTSWorker();\n if (cancelled || !mountedRef.current) {\n ttsWorker.terminate();\n return;\n }\n\n const ttsConfig = TTS_MODELS[ttsModelId];\n // Wait for TTS worker to be ready\n await new Promise<void>((resolve, reject) => {\n ttsWorker.onmessage = (e: MessageEvent) => {\n const { type, payload } = e.data;\n if (type === \"ready\") resolve();\n if (type === \"error\") reject(new Error(payload));\n if (type === \"progress\" && mountedRef.current) {\n setLoadingMessage(payload.status || \"Loading TTS...\");\n }\n };\n ttsWorker.onerror = (e) => reject(new Error(e.message));\n ttsWorker.postMessage({\n type: \"load\",\n payload: {\n modelId: ttsModelId,\n repo: ttsConfig.repo,\n voices: ttsConfig.voices,\n },\n });\n });\n if (cancelled || !mountedRef.current) {\n ttsWorker.terminate();\n return;\n }\n ttsRef.current = ttsWorker;\n\n setIsReady(true);\n setIsLoading(false);\n setLoadingMessage(\"Ready!\");\n } catch (e: any) {\n if (!mountedRef.current) return;\n const errMsg = e.message || \"Failed to load models\";\n setError(errMsg);\n setIsLoading(false);\n onError?.(errMsg);\n }\n };\n\n loadModels();\n\n return () => {\n cancelled = true;\n };\n }, [shouldLoad, isReady, llmModel, sttModel, ttsModelId, onError]);\n\n // Cleanup on unmount\n useEffect(() => {\n mountedRef.current = true;\n return () => {\n mountedRef.current = false;\n llmWorkerRef.current?.terminate();\n sttRef.current?.terminate();\n ttsRef.current?.terminate();\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n }\n audioContextRef.current?.close();\n };\n }, []);\n\n // Load trigger\n const load = useCallback(() => {\n if (!shouldLoad && !isReady && !isLoading) {\n setShouldLoad(true);\n }\n }, [shouldLoad, isReady, isLoading]);\n\n // Convert blob to Float32 at 16kHz\n const blobToFloat32 = useCallback(async (blob: Blob): Promise<Float32Array> => {\n const audioContext = new AudioContext({ sampleRate: 16000 });\n const arrayBuffer = await blob.arrayBuffer();\n const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);\n const channelData = audioBuffer.getChannelData(0);\n\n if (audioBuffer.sampleRate !== 16000) {\n const ratio = 16000 / audioBuffer.sampleRate;\n const newLength = Math.round(channelData.length * ratio);\n const resampled = new Float32Array(newLength);\n for (let i = 0; i < newLength; i++) {\n const srcIndex = i / ratio;\n const floor = Math.floor(srcIndex);\n const ceil = Math.min(floor + 1, channelData.length - 1);\n const t = srcIndex - floor;\n resampled[i] = channelData[floor] * (1 - t) + channelData[ceil] * t;\n }\n audioContext.close();\n return resampled;\n }\n\n audioContext.close();\n return new Float32Array(channelData);\n }, []);\n\n // Play audio through Web Audio API\n const playAudioBuffer = useCallback(\n async (audio: Float32Array, sampleRate: number): Promise<void> => {\n return new Promise((resolve) => {\n if (!audioContextRef.current) {\n audioContextRef.current = new AudioContext();\n }\n const ctx = audioContextRef.current;\n\n const buffer = ctx.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n buffer.copyToChannel(channelData, 0);\n\n const source = ctx.createBufferSource();\n source.buffer = buffer;\n source.connect(ctx.destination);\n source.onended = () => {\n if (mountedRef.current) {\n resolve();\n }\n };\n source.start();\n sourceNodeRef.current = source;\n });\n },\n [],\n );\n\n // Start listening\n const startListening = useCallback(async () => {\n if (stage !== \"idle\") return;\n\n // Trigger load if not ready\n if (!isReady && !isLoading) {\n setShouldLoad(true);\n return;\n }\n\n cancelledRef.current = false;\n\n try {\n const stream = await navigator.mediaDevices.getUserMedia({\n audio: { sampleRate: 16000, channelCount: 1, echoCancellation: true },\n });\n\n streamRef.current = stream;\n audioChunksRef.current = [];\n\n const mediaRecorder = new MediaRecorder(stream);\n mediaRecorderRef.current = mediaRecorder;\n\n mediaRecorder.ondataavailable = (event) => {\n if (event.data.size > 0) {\n audioChunksRef.current.push(event.data);\n }\n };\n\n mediaRecorder.start(100);\n setStage(\"listening\");\n setError(null);\n } catch (e: any) {\n const errMsg = e.message || \"Failed to access microphone\";\n setError(errMsg);\n onError?.(errMsg);\n }\n }, [stage, isReady, isLoading, onError]);\n\n // Stop listening and process\n const stopListening = useCallback(async () => {\n if (stage !== \"listening\") return;\n\n const mediaRecorder = mediaRecorderRef.current;\n if (!mediaRecorder) return;\n\n return new Promise<void>((resolve) => {\n mediaRecorder.onstop = async () => {\n // Stop mic\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n if (cancelledRef.current) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n\n try {\n // STT via worker\n setStage(\"transcribing\");\n const audioData = await blobToFloat32(audioBlob);\n\n // Send to STT worker and wait for response\n let userText = await new Promise<string>((sttResolve, sttReject) => {\n const handler = (e: MessageEvent) => {\n const { type, payload } = e.data;\n if (type === \"transcript\") {\n sttRef.current?.removeEventListener(\"message\", handler);\n sttResolve(payload);\n }\n if (type === \"error\") {\n sttRef.current?.removeEventListener(\"message\", handler);\n sttReject(new Error(payload));\n }\n };\n sttRef.current?.addEventListener(\"message\", handler);\n const audioArray = new Float32Array(audioData);\n sttRef.current?.postMessage({ type: \"transcribe\", payload: { audio: audioArray } }, [\n audioArray.buffer,\n ]);\n });\n\n userText = userText.trim();\n\n // Filter out Whisper artifacts\n if (\n userText === \"[BLANK_AUDIO]\" ||\n userText === \"(blank audio)\" ||\n userText === \"[BLANK AUDIO]\"\n ) {\n userText = \"\";\n }\n\n if (cancelledRef.current || !userText) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n // Add user message\n const userMsgId = `user-${Date.now()}`;\n setMessages((m) => [...m, { id: userMsgId, role: \"user\", content: userText }]);\n onUserSpeak?.(userText);\n\n // LLM\n setStage(\"thinking\");\n\n // Build conversation history\n const history = messages.map((m) => ({\n role: m.role as \"user\" | \"assistant\",\n content: m.content,\n }));\n history.push({ role: \"user\", content: userText });\n\n let responseText = \"\";\n let thinkingText = \"\";\n\n await llmWorkerRef.current.generate(userText, {\n system,\n thinking,\n history,\n onToken: (token: WorkerToken) => {\n if (cancelledRef.current) return;\n if (token.state === \"thinking\") {\n thinkingText += token.text;\n } else {\n responseText += token.text;\n }\n },\n });\n\n if (cancelledRef.current) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n // Add assistant message\n const assistantMsgId = `assistant-${Date.now()}`;\n setMessages((m) => [\n ...m,\n {\n id: assistantMsgId,\n role: \"assistant\",\n content: responseText,\n thinking: thinkingText || undefined,\n },\n ]);\n onAssistantSpeak?.(responseText);\n\n // TTS via worker\n if (responseText.trim()) {\n setStage(\"speaking\");\n\n // Send to TTS worker and wait for response\n const ttsResult = await new Promise<{ audio: Float32Array; sampleRate: number }>(\n (ttsResolve, ttsReject) => {\n const handler = (e: MessageEvent) => {\n const { type, payload } = e.data;\n if (type === \"audio\") {\n ttsRef.current?.removeEventListener(\"message\", handler);\n ttsResolve({ audio: payload.audio, sampleRate: payload.sampleRate });\n }\n if (type === \"error\") {\n ttsRef.current?.removeEventListener(\"message\", handler);\n ttsReject(new Error(payload));\n }\n };\n ttsRef.current?.addEventListener(\"message\", handler);\n ttsRef.current?.postMessage({\n type: \"generate\",\n payload: { text: responseText, voice, speed },\n });\n },\n );\n\n if (!cancelledRef.current) {\n await playAudioBuffer(ttsResult.audio, ttsResult.sampleRate);\n }\n }\n\n setStage(\"idle\");\n resolve();\n } catch (e: any) {\n if (!mountedRef.current) return;\n const errMsg = e.message || \"Processing failed\";\n setError(errMsg);\n setStage(\"idle\");\n onError?.(errMsg);\n resolve();\n }\n };\n\n mediaRecorder.stop();\n });\n }, [\n stage,\n messages,\n system,\n thinking,\n voice,\n speed,\n blobToFloat32,\n playAudioBuffer,\n onUserSpeak,\n onAssistantSpeak,\n onError,\n ]);\n\n // Cancel\n const cancel = useCallback(() => {\n cancelledRef.current = true;\n\n if (mediaRecorderRef.current && stage === \"listening\") {\n mediaRecorderRef.current.stop();\n }\n\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n if (sourceNodeRef.current) {\n try {\n sourceNodeRef.current.stop();\n } catch {}\n }\n\n audioChunksRef.current = [];\n setStage(\"idle\");\n }, [stage]);\n\n // Clear messages\n const clear = useCallback(() => {\n setMessages([]);\n }, []);\n\n return {\n messages,\n startListening,\n stopListening,\n cancel,\n clear,\n isListening,\n isProcessing,\n isSpeaking,\n stage,\n isReady,\n isLoading,\n loadingMessage,\n error,\n load,\n };\n}\n\n// ============================================\n// Embedding Worker (inline, loads from CDN)\n// ============================================\nconst EMBEDDING_WORKER_CODE = `\n // Embedding Worker - runs in separate thread, loads from CDN\n import { pipeline, env } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1\";\n \n console.log(\"[EMBED WORKER] Worker script starting...\");\n \n // Configure environment\n env.useBrowserCache = true;\n env.allowLocalModels = false;\n \n let embedder = null;\n let modelId = null;\n \n self.onmessage = async (e) => {\n const { type, payload } = e.data;\n console.log(\"[EMBED WORKER] Received message:\", type, payload);\n \n if (type === \"load\") {\n try {\n modelId = payload.model || \"Xenova/all-MiniLM-L6-v2\";\n console.log(\"[EMBED WORKER] Loading model:\", modelId);\n \n // Note: Don't specify device for embeddings - they're small models\n // that work fine with default backend (unlike LLMs/STT which need WebGPU)\n embedder = await pipeline(\"feature-extraction\", modelId, {\n progress_callback: (progress) => {\n self.postMessage({ type: \"progress\", payload: progress });\n },\n });\n \n console.log(\"[EMBED WORKER] Pipeline created, running warmup...\");\n \n // Warmup - don't swallow errors\n try {\n const warmup = await embedder(\"hello\", { pooling: \"mean\", normalize: true });\n console.log(\"[EMBED WORKER] Warmup successful, output type:\", typeof warmup, warmup?.constructor?.name);\n } catch (e) {\n console.error(\"[EMBED WORKER] Warmup failed:\", e);\n self.postMessage({ type: \"error\", payload: \"Warmup failed: \" + (e.message || String(e)) });\n return;\n }\n \n console.log(\"[EMBED WORKER] Sending ready message...\");\n self.postMessage({ type: \"ready\" });\n console.log(\"[EMBED WORKER] Ready message sent!\");\n } catch (err) {\n console.error(\"[EMBED WORKER] Load error:\", err);\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n \n if (type === \"embed\") {\n console.log(\"[EMBED WORKER] Processing embed request...\");\n try {\n const { text, normalize } = payload;\n console.log(\"[EMBED WORKER] Text to embed:\", text?.substring?.(0, 50));\n \n if (!embedder) {\n throw new Error(\"Embedder not initialized\");\n }\n \n console.log(\"[EMBED WORKER] Calling embedder...\");\n const output = await embedder(text, {\n pooling: \"mean\",\n normalize: normalize !== false,\n });\n \n console.log(\"[EMBED WORKER] Got output, type:\", typeof output, output?.constructor?.name);\n console.log(\"[EMBED WORKER] Output keys:\", output ? Object.keys(output) : \"null\");\n \n // Handle different output formats from transformers.js\n let vector;\n if (output?.data) {\n vector = Array.from(output.data);\n } else if (output?.tolist) {\n vector = output.tolist();\n } else if (Array.isArray(output)) {\n vector = output;\n } else {\n throw new Error(\"Unknown output format: \" + typeof output);\n }\n \n console.log(\"[EMBED WORKER] Vector length:\", vector?.length);\n console.log(\"[EMBED WORKER] Sending embedding result...\");\n self.postMessage({ type: \"embedding\", payload: { vector, text } });\n console.log(\"[EMBED WORKER] Embedding result sent!\");\n } catch (err) {\n console.error(\"[EMBED WORKER] Embed error:\", err);\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n \n if (type === \"embedBatch\") {\n console.log(\"[EMBED WORKER] Processing embedBatch request...\");\n try {\n const { texts, normalize } = payload;\n const results = [];\n \n for (const text of texts) {\n const output = await embedder(text, {\n pooling: \"mean\",\n normalize: normalize !== false,\n });\n results.push({ vector: Array.from(output.data), text });\n }\n \n console.log(\"[EMBED WORKER] Batch complete, sending results...\");\n self.postMessage({ type: \"embeddings\", payload: results });\n } catch (err) {\n console.error(\"[EMBED WORKER] Batch error:\", err);\n self.postMessage({ type: \"error\", payload: err.message || String(err) });\n }\n }\n };\n \n console.log(\"[EMBED WORKER] Worker script loaded, waiting for messages...\");\n`;\n\n/** Create Embedding worker instance */\nfunction createEmbeddingWorker(): Worker {\n const blob = new Blob([EMBEDDING_WORKER_CODE], { type: \"application/javascript\" });\n const url = URL.createObjectURL(blob);\n const worker = new Worker(url, { type: \"module\" });\n URL.revokeObjectURL(url);\n return worker;\n}\n\n/** Embedding result type */\nexport type BrowserEmbedResult = {\n vector: number[];\n text: string;\n};\n\n/** Search result type */\nexport type BrowserSearchResult = {\n text: string;\n score: number;\n index: number;\n};\n\n/** useEmbedding options */\nexport type UseEmbeddingOptions = {\n /** Embedding model (default: \"Xenova/all-MiniLM-L6-v2\") */\n model?: string;\n /** Normalize vectors (default: true) */\n normalize?: boolean;\n /** Auto-load on mount (default: false) */\n autoLoad?: boolean;\n /** Callback when ready */\n onReady?: () => void;\n /** Callback on error */\n onError?: (error: string) => void;\n};\n\n/** useEmbedding return type */\nexport type UseEmbeddingReturn = {\n /** Generate embedding for text */\n embed: (text: string) => Promise<number[]>;\n /** Generate embeddings for multiple texts */\n embedBatch: (texts: string[]) => Promise<BrowserEmbedResult[]>;\n /** Compute cosine similarity between two texts */\n similarity: (textA: string, textB: string) => Promise<number>;\n /** Semantic search - find most similar texts */\n search: (query: string, corpus: string[], topK?: number) => Promise<BrowserSearchResult[]>;\n /** Find nearest text to an embedding vector */\n findNearest: (\n embedding: number[],\n candidates: string[],\n topK?: number,\n ) => Promise<BrowserSearchResult[]>;\n /** Compute cosine similarity between two vectors */\n cosineSimilarity: (a: number[], b: number[]) => number;\n /** Manually load the model - returns Promise that resolves when ready */\n load: () => Promise<void>;\n /** Whether model is loading */\n isLoading: boolean;\n /** Whether model is ready */\n isReady: boolean;\n /** Loading progress */\n loadingProgress: { status: string; message?: string; progress?: number } | null;\n /** Error message */\n error: string | null;\n};\n\n/**\n * React hook for text embeddings in the browser\n *\n * @example\n * ```tsx\n * import { useEmbedding } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { embed, similarity, search, isLoading, isReady } = useEmbedding();\n *\n * if (isLoading) return <div>Loading embedding model...</div>;\n *\n * const handleSearch = async () => {\n * const results = await search(\"capital of France\", [\n * \"Paris is beautiful\",\n * \"London is in England\",\n * \"Dogs are pets\"\n * ]);\n * console.log(results); // [{ text: \"Paris is beautiful\", score: 0.89, index: 0 }, ...]\n * };\n *\n * return <button onClick={handleSearch}>Search</button>;\n * }\n * ```\n */\nexport function useEmbedding(options: UseEmbeddingOptions = {}): UseEmbeddingReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\n \"useEmbedding requires React. Make sure React is available in the global scope.\",\n );\n }\n\n const { useState, useEffect, useRef, useCallback } = React;\n const {\n model = \"Xenova/all-MiniLM-L6-v2\",\n normalize = true,\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [isLoading, setIsLoading] = useState(false);\n const [isReady, setIsReady] = useState(false);\n const [error, setError] = useState(null as string | null);\n const [loadingProgress, setLoadingProgress] = useState(\n null as { status: string; message?: string; progress?: number } | null,\n );\n\n const workerRef = useRef(null as Worker | null);\n const loadRequestedRef = useRef(false);\n // Promise that resolves when model is ready - critical for proper async waiting\n const readyPromiseRef = useRef(null as Promise<void> | null);\n const readyResolveRef = useRef(null as (() => void) | null);\n\n // Cosine similarity (pure function, no async)\n const cosineSimilarity = useCallback((a: number[], b: number[]): number => {\n if (a.length !== b.length) {\n throw new Error(`Vector dimensions must match: ${a.length} vs ${b.length}`);\n }\n\n let dotProduct = 0;\n let normA = 0;\n let normB = 0;\n\n for (let i = 0; i < a.length; i++) {\n dotProduct += a[i] * b[i];\n normA += a[i] * a[i];\n normB += b[i] * b[i];\n }\n\n const magnitude = Math.sqrt(normA) * Math.sqrt(normB);\n if (magnitude === 0) return 0;\n\n return dotProduct / magnitude;\n }, []);\n\n // Load model - returns a promise that resolves when ready\n const load = useCallback(() => {\n // Already loaded\n if (isReady && workerRef.current) {\n return Promise.resolve();\n }\n\n // Already loading - return existing promise\n if (loadRequestedRef.current && readyPromiseRef.current) {\n return readyPromiseRef.current;\n }\n\n loadRequestedRef.current = true;\n setIsLoading(true);\n setLoadingProgress({ status: \"loading\", message: \"Loading embedding model...\" });\n\n // Create promise that will resolve when ready\n readyPromiseRef.current = new Promise<void>((resolve) => {\n readyResolveRef.current = resolve;\n });\n\n const worker = createEmbeddingWorker();\n workerRef.current = worker;\n\n // Use addEventListener instead of onmessage to allow multiple listeners\n // (the embed() function also adds its own listener for responses)\n worker.addEventListener(\"message\", (e: MessageEvent) => {\n const { type, payload } = e.data;\n console.log(\"[EMBED HOOK] Received from worker:\", type, payload);\n\n if (type === \"progress\") {\n if (payload.status === \"progress\" && payload.file) {\n setLoadingProgress({\n status: \"downloading\",\n message: `Downloading ${payload.file}`,\n progress: Math.round((payload.loaded / payload.total) * 100),\n });\n }\n } else if (type === \"ready\") {\n console.log(\"[EMBED HOOK] Model ready! Resolving promise...\");\n setIsLoading(false);\n setIsReady(true);\n setLoadingProgress({ status: \"ready\" });\n // Resolve the ready promise\n readyResolveRef.current?.();\n console.log(\"[EMBED HOOK] Promise resolved, calling onReady...\");\n onReady?.();\n } else if (type === \"error\") {\n console.error(\"[EMBED HOOK] Error from worker:\", payload);\n setIsLoading(false);\n setError(payload);\n onError?.(payload);\n }\n });\n\n worker.onerror = (err) => {\n console.error(\"[EMBED HOOK] Worker onerror:\", err);\n setIsLoading(false);\n const errMsg = err.message || \"Worker error\";\n setError(errMsg);\n setLoadingProgress({ status: \"error\", message: errMsg });\n onError?.(errMsg);\n };\n\n console.log(\"[EMBED HOOK] Sending load message to worker...\");\n worker.postMessage({ type: \"load\", payload: { model } });\n\n return readyPromiseRef.current;\n // Note: isReady is intentionally NOT in deps - we don't want to recreate\n // this callback when ready changes, as that would trigger useEffect cleanup\n // and terminate the worker prematurely\n }, [model, onReady, onError]);\n\n // Auto-load on mount if requested\n useEffect(() => {\n if (autoLoad) {\n load();\n }\n\n return () => {\n if (workerRef.current) {\n workerRef.current.terminate();\n workerRef.current = null;\n }\n };\n }, [autoLoad, load]);\n\n // Embed single text\n const embed = useCallback(\n async (text: string): Promise<number[]> => {\n console.log(\"[EMBED HOOK] embed() called with:\", text?.substring?.(0, 50));\n\n // Trigger load if not started\n const loadPromise = load();\n\n // Wait for the ready Promise (handles stale closures)\n console.log(\"[EMBED HOOK] Waiting for ready promise...\");\n if (readyPromiseRef.current) {\n await readyPromiseRef.current;\n } else {\n await loadPromise;\n }\n console.log(\"[EMBED HOOK] Ready promise resolved, sending embed message...\");\n\n return new Promise((resolve, reject) => {\n const worker = workerRef.current;\n if (!worker) {\n console.error(\"[EMBED HOOK] No worker available!\");\n reject(new Error(\"Worker not initialized. Call load() first.\"));\n return;\n }\n\n // Timeout after 30 seconds\n const timeout = setTimeout(() => {\n console.error(\"[EMBED HOOK] Timeout reached!\");\n worker.removeEventListener(\"message\", handler);\n reject(new Error(\"Embedding timeout after 30s\"));\n }, 30000);\n\n const handler = (e: MessageEvent) => {\n console.log(\"[EMBED HOOK] embed handler received:\", e.data.type);\n if (e.data.type === \"embedding\") {\n console.log(\"[EMBED HOOK] Got embedding result!\");\n clearTimeout(timeout);\n worker.removeEventListener(\"message\", handler);\n resolve(e.data.payload.vector);\n } else if (e.data.type === \"error\") {\n console.error(\"[EMBED HOOK] Got error:\", e.data.payload);\n clearTimeout(timeout);\n worker.removeEventListener(\"message\", handler);\n reject(new Error(e.data.payload));\n }\n };\n\n worker.addEventListener(\"message\", handler);\n console.log(\"[EMBED HOOK] Posting embed message to worker...\");\n worker.postMessage({ type: \"embed\", payload: { text, normalize } });\n console.log(\"[EMBED HOOK] Message posted, waiting for response...\");\n });\n },\n [load, normalize],\n );\n\n // Embed batch\n const embedBatch = useCallback(\n async (texts: string[]): Promise<BrowserEmbedResult[]> => {\n // Trigger load if not started\n const loadPromise = load();\n\n // Wait for the ready Promise (handles stale closures)\n if (readyPromiseRef.current) {\n await readyPromiseRef.current;\n } else {\n await loadPromise;\n }\n\n return new Promise((resolve, reject) => {\n const worker = workerRef.current;\n if (!worker) {\n reject(new Error(\"Worker not initialized. Call load() first.\"));\n return;\n }\n\n // Timeout after 60 seconds for batch\n const timeout = setTimeout(() => {\n worker.removeEventListener(\"message\", handler);\n reject(new Error(\"Batch embedding timeout after 60s\"));\n }, 60000);\n\n const handler = (e: MessageEvent) => {\n if (e.data.type === \"embeddings\") {\n clearTimeout(timeout);\n worker.removeEventListener(\"message\", handler);\n resolve(e.data.payload);\n } else if (e.data.type === \"error\") {\n clearTimeout(timeout);\n worker.removeEventListener(\"message\", handler);\n reject(new Error(e.data.payload));\n }\n };\n\n worker.addEventListener(\"message\", handler);\n worker.postMessage({ type: \"embedBatch\", payload: { texts, normalize } });\n });\n },\n [load, normalize],\n );\n\n // Similarity between two texts\n const similarity = useCallback(\n async (textA: string, textB: string): Promise<number> => {\n const [embA, embB] = await Promise.all([embed(textA), embed(textB)]);\n return cosineSimilarity(embA, embB);\n },\n [embed, cosineSimilarity],\n );\n\n // Semantic search\n const search = useCallback(\n async (query: string, corpus: string[], topK?: number): Promise<BrowserSearchResult[]> => {\n const [queryEmb, corpusEmbs] = await Promise.all([embed(query), embedBatch(corpus)]);\n\n const results: BrowserSearchResult[] = corpusEmbs.map(\n (doc: BrowserEmbedResult, index: number) => ({\n text: doc.text,\n score: cosineSimilarity(queryEmb, doc.vector),\n index,\n }),\n );\n\n results.sort((a, b) => b.score - a.score);\n return topK ? results.slice(0, topK) : results;\n },\n [embed, embedBatch, cosineSimilarity],\n );\n\n // Find nearest to an embedding\n const findNearest = useCallback(\n async (\n embedding: number[],\n candidates: string[],\n topK?: number,\n ): Promise<BrowserSearchResult[]> => {\n const candidateEmbs = await embedBatch(candidates);\n\n const results: BrowserSearchResult[] = candidateEmbs.map(\n (doc: BrowserEmbedResult, index: number) => ({\n text: doc.text,\n score: cosineSimilarity(embedding, doc.vector),\n index,\n }),\n );\n\n results.sort((a, b) => b.score - a.score);\n return topK ? results.slice(0, topK) : results;\n },\n [embedBatch, cosineSimilarity],\n );\n\n return {\n embed,\n embedBatch,\n similarity,\n search,\n findNearest,\n cosineSimilarity,\n load,\n isLoading,\n isReady,\n loadingProgress,\n error,\n };\n}\n\n// ============================================\n// Model Preloading (Non-React)\n// ============================================\n\n/** Progress callback for preloading */\nexport type PreloadProgress = {\n status: \"downloading\" | \"loading\" | \"ready\" | \"error\";\n file?: string;\n progress?: number;\n message?: string;\n};\n\n/** Preload options */\nexport type PreloadOptions = {\n onProgress?: (progress: PreloadProgress) => void;\n};\n\n/**\n * Preload a chat/LLM model (downloads to IndexedDB cache)\n *\n * Call this during app initialization to ensure the model is ready\n * when users need it.\n *\n * @example\n * ```ts\n * // In your app's initialization\n * import { preloadChatModel } from \"@tryhamster/gerbil/browser\";\n *\n * await preloadChatModel(\"qwen3-0.6b\", {\n * onProgress: (p) => console.log(p.status, p.progress),\n * });\n *\n * // Later, useChat will load instantly from cache\n * ```\n */\nexport async function preloadChatModel(\n modelId: string,\n options: PreloadOptions = {},\n): Promise<void> {\n const { onProgress } = options;\n\n // Use createGerbilWorker to load the model, then terminate\n const worker = await createGerbilWorker({\n modelId,\n onProgress: (p) => {\n if (p.status === \"downloading\") {\n onProgress?.({\n status: \"downloading\",\n file: p.file,\n progress: p.progress,\n });\n } else {\n onProgress?.({ status: \"loading\", message: p.status });\n }\n },\n });\n\n onProgress?.({ status: \"ready\" });\n worker.terminate();\n}\n\n/**\n * Preload an embedding model\n *\n * @example\n * ```ts\n * await preloadEmbeddingModel(\"Xenova/all-MiniLM-L6-v2\");\n * ```\n */\nexport async function preloadEmbeddingModel(\n modelId = \"Xenova/all-MiniLM-L6-v2\",\n options: PreloadOptions = {},\n): Promise<void> {\n const { onProgress } = options;\n\n return new Promise((resolve, reject) => {\n const worker = createEmbeddingWorker();\n\n worker.onmessage = (e: MessageEvent) => {\n const { type, payload } = e.data;\n\n if (type === \"progress\") {\n if (payload.status === \"progress\" && payload.file) {\n onProgress?.({\n status: \"downloading\",\n file: payload.file,\n progress: Math.round((payload.loaded / payload.total) * 100),\n });\n }\n } else if (type === \"ready\") {\n onProgress?.({ status: \"ready\" });\n worker.terminate();\n resolve();\n } else if (type === \"error\") {\n onProgress?.({ status: \"error\", message: payload });\n worker.terminate();\n reject(new Error(payload));\n }\n };\n\n onProgress?.({ status: \"loading\", message: `Loading ${modelId}...` });\n worker.postMessage({ type: \"load\", payload: { model: modelId } });\n });\n}\n\n/**\n * Preload a TTS model\n *\n * @example\n * ```ts\n * await preloadTTSModel(\"kokoro-82m\");\n * ```\n */\nexport async function preloadTTSModel(\n modelId: TTSModelId = \"kokoro-82m\",\n options: PreloadOptions = {},\n): Promise<void> {\n const { onProgress } = options;\n const modelConfig = TTS_MODELS[modelId];\n\n if (!modelConfig) {\n throw new Error(`Unknown TTS model: ${modelId}`);\n }\n\n return new Promise((resolve, reject) => {\n const worker = createTTSWorker();\n\n worker.onmessage = (e: MessageEvent) => {\n const { type, payload } = e.data;\n\n if (type === \"progress\") {\n if (payload.status === \"progress\" && payload.file) {\n onProgress?.({\n status: \"downloading\",\n file: payload.file,\n progress: Math.round((payload.loaded / payload.total) * 100),\n });\n }\n } else if (type === \"ready\") {\n onProgress?.({ status: \"ready\" });\n worker.terminate();\n resolve();\n } else if (type === \"error\") {\n onProgress?.({ status: \"error\", message: payload });\n worker.terminate();\n reject(new Error(payload));\n }\n };\n\n onProgress?.({ status: \"loading\", message: `Loading ${modelId}...` });\n worker.postMessage({\n type: \"load\",\n payload: {\n modelId,\n repo: modelConfig.repo,\n voices: modelConfig.voices,\n },\n });\n });\n}\n\n/**\n * Preload an STT model\n *\n * @example\n * ```ts\n * await preloadSTTModel(\"whisper-tiny.en\");\n * ```\n */\nexport async function preloadSTTModel(\n modelId = \"whisper-tiny.en\",\n options: PreloadOptions = {},\n): Promise<void> {\n const { onProgress } = options;\n const resolved = resolveSTTModel(modelId);\n\n return new Promise((resolve, reject) => {\n const worker = createSTTWorker();\n\n worker.onmessage = (e: MessageEvent) => {\n const { type, payload } = e.data;\n\n if (type === \"progress\") {\n if (payload.status === \"progress\" && payload.file) {\n onProgress?.({\n status: \"downloading\",\n file: payload.file,\n progress: Math.round((payload.loaded / payload.total) * 100),\n });\n }\n } else if (type === \"ready\") {\n onProgress?.({ status: \"ready\" });\n worker.terminate();\n resolve();\n } else if (type === \"error\") {\n onProgress?.({ status: \"error\", message: payload });\n worker.terminate();\n reject(new Error(payload));\n }\n };\n\n onProgress?.({ status: \"loading\", message: `Loading ${modelId}...` });\n worker.postMessage({ type: \"load\", payload: { model: resolved } });\n });\n}\n\n/** Helper to resolve STT model ID to repo */\nfunction resolveSTTModel(modelId: string): string {\n const STT_MODEL_MAP: Record<string, string> = {\n \"whisper-tiny\": \"onnx-community/whisper-tiny\",\n \"whisper-tiny.en\": \"onnx-community/whisper-tiny.en\",\n \"whisper-base\": \"onnx-community/whisper-base\",\n \"whisper-base.en\": \"onnx-community/whisper-base.en\",\n \"whisper-small\": \"onnx-community/whisper-small\",\n \"whisper-small.en\": \"onnx-community/whisper-small.en\",\n \"whisper-large-v3-turbo\": \"onnx-community/whisper-large-v3-turbo\",\n };\n return STT_MODEL_MAP[modelId] || modelId;\n}\n\n// ============================================\n// Utilities\n// ============================================\n\n/**\n * Check if WebGPU is supported\n */\nexport function isWebGPUSupported(): boolean {\n if (typeof navigator === \"undefined\") {\n return false;\n }\n return \"gpu\" in navigator;\n}\n\n/**\n * Get WebGPU adapter info\n */\nexport async function getWebGPUInfo(): Promise<{\n supported: boolean;\n adapter?: string;\n device?: string;\n} | null> {\n if (!isWebGPUSupported()) {\n return { supported: false };\n }\n\n try {\n const adapter = await (navigator as any).gpu.requestAdapter();\n if (!adapter) {\n return { supported: false };\n }\n\n const info = await adapter.requestAdapterInfo();\n return {\n supported: true,\n adapter: info.vendor,\n device: info.device,\n };\n } catch {\n return { supported: false };\n }\n}\n\nexport default {\n isWebGPUSupported,\n getWebGPUInfo,\n createGerbilWorker,\n playAudio,\n createAudioPlayer,\n preloadChatModel,\n preloadEmbeddingModel,\n preloadTTSModel,\n preloadSTTModel,\n};\n"],"mappings":";AAYA,MAAaA,iBAA8C;CACzD,cAAc;EACZ,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,sBAAsB;EACpB,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,cAAc;EACZ,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,gBAAgB;EAChB,mBAAmB;EACnB,QAAQ;EACT;CACF;;;;;;;;;;AAeD,SAAgB,aAAa,SAA8B;AAEzD,KAAI,eAAe,SACjB,QAAO;EACL,MAAM;EACN,MAAM,eAAe,SAAS;EAC/B;AAIH,KAAI,QAAQ,WAAW,MAAM,CAE3B,QAAO;EACL,MAAM;EACN,MAHW,QAAQ,MAAM,EAAE;EAI5B;AAIH,KAAI,QAAQ,WAAW,0BAA0B,CAE/C,QAAO;EACL,MAAM;EACN,MAHW,QAAQ,QAAQ,2BAA2B,GAAG;EAI1D;AAIH,KAAI,QAAQ,WAAW,QAAQ,CAE7B,QAAO;EACL,MAAM;EACN,MAHW,QAAQ,MAAM,EAAE;EAI5B;AAIH,KAAI,QAAQ,SAAS,IAAI,CACvB,QAAO;EACL,MAAM;EACN,MAAM;EACP;AAIH,QAAO;EACL,MAAM;EACN,MAAM;EACP;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACFH,eAAsB,mBAAmB,UAA+B,EAAE,EAAyB;CACjG,MAAM,EAAE,UAAU,cAAc,YAAY,SAAS,YAAY,YAAY;CAG7E,MAAM,SAAS,aAAa,QAAQ;AAEpC,QAAO,IAAI,SAAS,SAAS,WAAW;EA4WtC,MAAM,OAAO,IAAI,KAAK,CA1WH;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;MA0We,EAAE,EAAE,MAAM,0BAA0B,CAAC;EACvE,MAAM,YAAY,IAAI,gBAAgB,KAAK;EAC3C,MAAM,SAAS,IAAI,OAAO,WAAW,EAAE,MAAM,UAAU,CAAC;EAExD,IAAI,UAAU;EACd,IAAIC,iBAAkD;EACtD,IAAIC,gBAAiD;EACrD,IAAI,iBAAiB;AAErB,SAAO,aAAa,MAAM;GACxB,MAAM,MAAM,EAAE;AAEd,WAAQ,IAAI,QAAZ;IACE,KAAK;AAEH,YAAO,YAAY;MAAE,MAAM;MAAQ,SAAS,OAAO;MAAM,CAAC;AAC1D;IAEF,KAAK;IACL,KAAK;AACH,kBAAa,IAAsB;AACnC;IAEF,KAAK;AACH,eAAU;AACV,kBAAa,IAAsB;AACnC,aAAQ,aAAa;AACrB;IAEF,KAAK;AACH,sBAAiB;AACjB;IAEF,KAAK;AACH,uBAAkB,IAAI;AACtB,eAAU,IAAmB;AAC7B;IAEF,KAAK;AACH,kBAAa,IAAsB;AACnC,sBAAiB,IAAI,KAAK;AAC1B,sBAAiB;AACjB,qBAAgB;AAChB;IAEF,KAAK;AACH,eAAU,IAAI,MAAM;AACpB,kBAAa;MAAE,QAAQ;MAAS,OAAO,IAAI;MAAO,CAAC;AACnD,SAAI,eAAe;AACjB,oBAAc,IAAI,MAAM,IAAI,MAAM,CAAC;AACnC,uBAAiB;AACjB,sBAAgB;WAEhB,QAAO,IAAI,MAAM,IAAI,MAAM,CAAC;AAE9B;;;AAIN,SAAO,WAAW,MAAM;GACtB,MAAM,QAAQ,EAAE,WAAW;AAC3B,aAAU,MAAM;AAChB,UAAO,IAAI,MAAM,MAAM,CAAC;;EAG1B,MAAMC,eAA6B;GACjC,WAAW,QAAgB,YAAiC,EAAE,KAC5D,IAAI,SAAS,KAAK,QAAQ;AACxB,qBAAiB;AACjB,oBAAgB;IAEhB,MAAM,SAASC,UAAQ,UAAU;IAIjC,MAAM,WAAWA,UAAQ,UACrB,CAAC;KAAE,MAAM;KAAU,SAAS;KAAQ,EAAE,GAAGA,UAAQ,QAAQ,GACzD,CACE;KAAE,MAAM;KAAU,SAAS;KAAQ,EACnC;KAAE,MAAM;KAAQ,SAAS;KAAQ,CAClC;AAIL,QAAIA,UAAQ,QACV,QAAO,YAAY,EAAE,MAAM,SAAS,CAAC;AAGvC,WAAO,YAAY;KACjB,MAAM;KACN;KACA,QAAQA,UAAQ,UAAU,EAAE;KAC5B,SAAS;MACP,WAAWA,UAAQ,cAAcA,UAAQ,QAAQ,SAAS,OAAO;MACjE,aAAaA,UAAQ,eAAe;MACpC,MAAMA,UAAQ,QAAQ;MACtB,MAAMA,UAAQ,QAAQ;MACtB,UAAUA,UAAQ,YAAY;MAC/B;KACF,CAAC;KACF;GAEJ,iBAAiB;AACf,WAAO,YAAY,EAAE,MAAM,aAAa,CAAC;;GAG3C,aAAa;AACX,WAAO,YAAY,EAAE,MAAM,SAAS,CAAC;;GAGvC,iBAAiB;AACf,WAAO,WAAW;AAClB,QAAI,gBAAgB,UAAU;;GAGhC,eAAe;GAChB;GACD;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuHJ,SAAgB,QAAQ,UAA0B,EAAE,EAAiB;CAEnE,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,+DAA+D;CAGjF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,kBAAkB,EAAE,EACpB,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,UAAU,eAAe,SAAoB,gBAAgB;CACpE,MAAM,CAAC,OAAO,YAAY,SAAiB,GAAG;CAC9C,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,iBAAiB,sBAAsB,SAAiB,GAAG;CAClE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAC/D,MAAM,CAAC,gBAAgB,qBAAqB,SAAmB,EAAE,CAAC;CAElE,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,eAAe,OAAe,EAAE;CACtC,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAGf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,qBAAoB,MAAc,IAAI,MAAM,KAAK;;GAGrD,kBAAkB;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;;GAExB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;AAGvB,iBAAgB;AACd,MAAI,CAAC,gBAAgB,iBAAiB;AACpC,gBAAa,SAAoB;AAE/B,QADgB,KAAK,GAAG,GAAG,EACd,SAAS,YACpB,QAAO,KAAK,KAAK,GAAY,MAC3B,MAAM,KAAK,SAAS,IAChB;KAAE,GAAG;KAAG,SAAS;KAAiB,UAAU,YAAY;KAAW,GACnE,EACL;AAEH,WAAO;KACP;AACF,sBAAmB,GAAG;AACtB,eAAY,GAAG;;IAEhB;EAAC;EAAc;EAAiB;EAAS,CAAC;CAG7C,MAAM,oBAAoB,OAAsB,KAAK;CACrD,MAAM,mBAAmB,OAAiB,EAAE,CAAC;CAG7C,MAAM,cAAc,aAAa,aAAqB;AACpD,qBAAmB,SAAmB,CAAC,GAAG,MAAM,SAAS,CAAC;IACzD,EAAE,CAAC;CAEN,MAAM,cAAc,aAAa,UAAkB;AACjD,qBAAmB,SAAmB,KAAK,QAAQ,GAAW,MAAc,MAAM,MAAM,CAAC;IACxF,EAAE,CAAC;CAEN,MAAM,cAAc,kBAAkB;AACpC,oBAAkB,EAAE,CAAC;IACpB,EAAE,CAAC;CAGN,MAAM,wBAAwB,aAC3B,MAAc,WAAqB;AAClC,MAAI,CAAC,KAAK,MAAM,IAAI,aAClB;AAGF,eAAa,WAAW;EACxB,MAAMC,cAAuB;GAC3B,IAAI,OAAO,aAAa;GACxB,MAAM;GACN,SAAS,KAAK,MAAM;GACpB,QAAQ,OAAO,SAAS,IAAI,SAAS;GACtC;AAED,eAAa,WAAW;EACxB,MAAMC,mBAA4B;GAChC,IAAI,OAAO,aAAa;GACxB,MAAM;GACN,SAAS;GACV;AAED,eAAa,SAAoB;GAAC,GAAG;GAAM;GAAa;GAAiB,CAAC;AAC1E,qBAAmB,GAAG;AACtB,cAAY,GAAG;AAGf,MAAI,CAAC,UAAU,SAAS;AACtB,qBAAkB,UAAU,KAAK,MAAM;AACvC,oBAAiB,UAAU;AAC3B,SAAM;AACN;;AAGF,kBAAgB,KAAK;AACrB,YAAU,QAAQ,SAAS,KAAK,MAAM,EAAE;GACtC;GACA,UAAU;GACV,WAAW,OAAO,SAAS,IAAI,KAAK,IAAI,WAAW,KAAK,GAAG;GAC3D;GACA,QAAQ,OAAO,SAAS,IAAI,SAAS;GACtC,CAAC;IAEJ;EAAC;EAAc;EAAQ;EAAgB;EAAW;EAAa;EAAK,CACrE;CAED,MAAM,eAAe,aAClB,MAAwC;AACvC,KAAG,kBAAkB;AAErB,MAAI,CAAC,MAAM,MAAM,IAAI,aACnB;AAIF,wBAAsB,OAAO,eAAe;AAC5C,WAAS,GAAG;AACZ,oBAAkB,EAAE,CAAC;IAEvB;EAAC;EAAO;EAAc;EAAgB;EAAsB,CAC7D;CAGD,MAAM,iBAAiB,aACpB,MAAc,WAAqB;AAClC,wBAAsB,MAAM,OAAO;IAErC,CAAC,sBAAsB,CACxB;AAGD,iBAAgB;AACd,MAAI,WAAW,kBAAkB,WAAW,UAAU,SAAS;GAC7D,MAAM,iBAAiB,kBAAkB;GACzC,MAAM,gBAAgB,iBAAiB;AACvC,qBAAkB,UAAU;AAC5B,oBAAiB,UAAU,EAAE;AAC7B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,gBAAgB;IACzC;IACA,UAAU;IACV,WAAW,cAAc,SAAS,IAAI,KAAK,IAAI,WAAW,KAAK,GAAG;IAClE;IACA,QAAQ,cAAc,SAAS,IAAI,gBAAgB;IACpD,CAAC;;IAEH;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;CAE7D,MAAM,OAAO,kBAAkB;AAC7B,YAAU,SAAS,WAAW;AAC9B,kBAAgB,MAAM;IACrB,EAAE,CAAC;CAEN,MAAM,QAAQ,kBAAkB;AAC9B,YAAU,SAAS,OAAO;AAC1B,cAAY,EAAE,CAAC;AACf,qBAAmB,GAAG;AACtB,cAAY,GAAG;AACf,oBAAkB,EAAE,CAAC;IACpB,EAAE,CAAC;AAUN,QAAO;EACL,UARsB,SAAS,KAAK,GAAY,MAAc;AAC9D,OAAI,MAAM,SAAS,SAAS,KAAK,EAAE,SAAS,eAAe,aACzD,QAAO;IAAE,GAAG;IAAG,SAAS;IAAiB,UAAU,YAAY;IAAW;AAE5E,UAAO;IACP;EAIA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;;;;AA4EH,SAAgB,cAAc,UAAgC,EAAE,EAAuB;CACrF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,qEAAqE;CAGvF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,YAAY,iBAAiB,SAAiB,GAAG;CACxD,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,aAAa,OAAwC,KAAK;CAChE,MAAM,YAAY,OAAsC,KAAK;CAC7D,MAAM,mBAAmB,OAAsB,KAAK;CACpD,MAAM,mBAAmB,OAA6B,OAAU;CAChE,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAEf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,gBAAe,MAAc,IAAI,MAAM,KAAK;;GAGhD,aAAa,WAAW;AACtB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;AACtB,eAAW,UAAU,OAAO,KAAK;AACjC,eAAW,UAAU;;GAEvB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;CAEvB,MAAM,WAAW,aACd,QAAgB,oBAAuD;AACtE,SAAO,IAAI,SAAS,SAAS,WAAW;AACtC,iBAAc,GAAG;AACjB,eAAY,GAAG;AACf,cAAW,UAAU;AACrB,aAAU,UAAU;AAGpB,OAAI,CAAC,UAAU,SAAS;AACtB,qBAAiB,UAAU;AAC3B,qBAAiB,UAAU,iBAAiB;AAC5C,UAAM;AACN;;AAGF,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,QAAQ;IACjC;IACA,UAAU;IACV;IACA;IACA,QAAQ,iBAAiB;IAC1B,CAAC;IACF;IAEJ;EAAC;EAAQ;EAAgB;EAAW;EAAa;EAAK,CACvD;AAGD,iBAAgB;AACd,MAAI,WAAW,iBAAiB,WAAW,UAAU,SAAS;GAC5D,MAAM,gBAAgB,iBAAiB;GACvC,MAAM,gBAAgB,iBAAiB;AACvC,oBAAiB,UAAU;AAC3B,oBAAiB,UAAU;AAC3B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,eAAe;IACxC;IACA,UAAU;IACV;IACA;IACA,QAAQ;IACT,CAAC;;IAEH;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;AAO7D,QAAO;EACL;EACA;EACA;EACA;EACA;EACA;EACA,MAZW,kBAAkB;AAC7B,aAAU,SAAS,WAAW;AAC9B,mBAAgB,MAAM;KACrB,EAAE,CAAC;EAUJ;EACA;EACA;EACA;EACD;;;AA6BH,MAAMC,wBAA4C;CAChD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EAAE,IAAI;EAAY,MAAM;EAAS,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAgB;CAClG;;AAGD,MAAMC,4BAAgD;CACpD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACF;;AAGD,MAAMC,aAGF;CACF,cAAc;EACZ,MAAM;EACN,cAAc;EACd,YAAY;EACZ,QAAQ;EACT;CACD,kBAAkB;EAChB,MAAM;EACN,cAAc;EACd,YAAY;EACZ,QAAQ;EACT;CACF;AA2DD,MAAM,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAkHxB,SAAS,kBAA0B;CACjC,MAAM,OAAO,IAAI,KAAK,CAAC,gBAAgB,EAAE,EAAE,MAAM,0BAA0B,CAAC;CAC5E,MAAM,MAAM,IAAI,gBAAgB,KAAK;CACrC,MAAM,SAAS,IAAI,OAAO,KAAK,EAAE,MAAM,UAAU,CAAC;AAClD,KAAI,gBAAgB,IAAI;AACxB,QAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqCT,SAAgB,UAAU,UAA4B,EAAE,EAAmB;CACzE,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,iEAAiE;CAGnF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,OAAO,UAAU,cACjB,OAAO,eAAe,GACtB,WAAW,OACX,SACA,SACA,SACA,UACE;CAGJ,MAAM,cAAc,WAAW;CAC/B,MAAM,eAAe,QAAQ,SAAS,YAAY;CAElD,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAA6B,KAAK;CAChF,MAAM,CAAC,YAAY,iBAAiB,SAAkB,MAAM;CAC5D,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAC/D,MAAM,CAAC,cAAc,mBAAmB,SAAiB,aAAa;CACtE,MAAM,CAAC,cAAc,mBAAmB,SAAiB,aAAa;CAEtE,MAAM,YAAY,OAAsB,KAAK;CAC7C,MAAM,kBAAkB,OAA4B,KAAK;CACzD,MAAM,gBAAgB,OAAqC,KAAK;CAChE,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,aAAa,OAAmB,QAAQ;CAC9C,MAAM,kBAAkB,OAA8D,KAAK;CAG3F,MAAM,aAAa,kBAAsC;AACvD,SAAO,YAAY;IAClB,CAAC,YAAY,OAAO,CAAC;CAGxB,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UAAW;AACpC,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAGf,iBAAgB;AACd,MAAI,CAAC,WAAY;AAEjB,aAAW,UAAU;AACrB,aAAW,UAAU;EAErB,MAAM,SAAS,WAAW;AAE1B,qBAAmB;GACjB,QAAQ;GACR,SAAS,WAAW,YAAY,mBAAmB,eAAe,SAAS;GAC5E,CAAC;EAGF,MAAM,SAAS,iBAAiB;AAChC,YAAU,UAAU;AAGpB,SAAO,aAAa,MAAoB;AACtC,OAAI,CAAC,WAAW,QAAS;GAEzB,MAAM,EAAE,MAAM,YAAY,EAAE;AAE5B,OAAI,SAAS,cAAc,QAAQ,WAAW,cAAc,QAAQ,KAClE,oBAAmB;IACjB,QAAQ;IACR,MAAM,QAAQ;IACd,UAAU,KAAK,MAAM,QAAQ,YAAY,EAAE;IAC5C,CAAC;AAGJ,OAAI,SAAS,SAAS;AACpB,iBAAa,MAAM;AACnB,eAAW,KAAK;AAChB,uBAAmB,EAAE,QAAQ,SAAS,CAAC;AACvC,eAAW;AAGX,QAAI,gBAAgB,SAAS;KAC3B,MAAM,EAAE,MAAM,OAAO,UAAU,gBAAgB;AAC/C,qBAAgB,UAAU;AAC1B,YAAO,YAAY;MAAE,MAAM;MAAY,SAAS;OAAE;OAAM;OAAO;OAAO;MAAE,CAAC;;;AAI7E,OAAI,SAAS,SAAS;IAEpB,MAAM,EAAE,OAAO,eAAe;AAC9B,kBAAc,OAAO,WAAW;;AAGlC,OAAI,SAAS,SAAS;IACpB,MAAM,WAAW;AACjB,aAAS,SAAS;AAClB,iBAAa,MAAM;AACnB,kBAAc,MAAM;AACpB,uBAAmB;KAAE,QAAQ;KAAS,OAAO;KAAU,CAAC;AACxD,cAAU,SAAS;;;AAIvB,SAAO,WAAW,QAAQ;AACxB,OAAI,CAAC,WAAW,QAAS;GACzB,MAAM,WAAW,IAAI,WAAW;AAChC,YAAS,SAAS;AAClB,gBAAa,MAAM;AACnB,sBAAmB;IAAE,QAAQ;IAAS,OAAO;IAAU,CAAC;AACxD,aAAU,SAAS;;AAIrB,SAAO,YAAY;GACjB,MAAM;GACN,SAAS;IACP;IACA,MAAM,OAAO;IACb,QAAQ,OAAO;IAChB;GACF,CAAC;AAEF,eAAa;AACX,cAAW,UAAU;AACrB,UAAO,WAAW;AAClB,aAAU,UAAU;;IAErB;EAAC;EAAY;EAAS;EAAS;EAAQ,CAAC;CAG3C,MAAM,gBAAgB,YACpB,OAAO,OAAqB,eAAuB;AACjD,MAAI;AAEF,OAAI,CAAC,gBAAgB,WAAW,gBAAgB,QAAQ,UAAU,SAChE,iBAAgB,UAAU,IAAI,aAAa,EAAE,YAAY,CAAC;GAE5D,MAAM,MAAM,gBAAgB;AAE5B,OAAI,IAAI,UAAU,YAChB,OAAM,IAAI,QAAQ;GAIpB,MAAM,cAAc,IAAI,aAAa,GAAG,MAAM,QAAQ,WAAW;AACjE,eAAY,cAAc,IAAI,aAAa,MAAM,EAAE,EAAE;GAErD,MAAM,aAAa,IAAI,oBAAoB;AAC3C,cAAW,SAAS;AACpB,cAAW,QAAQ,IAAI,YAAY;AAEnC,iBAAc,UAAU;AAExB,cAAW,gBAAgB;AACzB,QAAI,CAAC,WAAW,QAAS;AACzB,kBAAc,MAAM;AACpB,aAAS;;AAGX,cAAW,OAAO;WACX,KAAK;AACZ,iBAAc,MAAM;GACpB,MAAM,WAAW,eAAe,QAAQ,IAAI,UAAU,OAAO,IAAI;AACjE,YAAS,SAAS;AAClB,aAAU,SAAS;;IAGvB,CAAC,OAAO,QAAQ,CACjB;AAGD,iBAAgB;AACd,eAAa;AACX,OAAI;AACF,kBAAc,SAAS,MAAM;WACvB;AAGR,OAAI;AACF,QAAI,gBAAgB,WAAW,gBAAgB,QAAQ,UAAU,SAC/D,iBAAgB,QAAQ,OAAO;WAE3B;;IAIT,EAAE,CAAC;AA0EN,QAAO;EACL,OAxEY,YACZ,OAAO,MAAc,SAA8C;GACjE,MAAM,QAAQ,MAAM,SAAS;GAC7B,MAAM,QAAQ,MAAM,SAAS;AAI7B,OAAI,CADc,YAAY,OAAO,MAAM,MAAM,EAAE,OAAO,MAAM,EAChD;IAEd,MAAM,WAAW,UAAU,MAAM,iCADb,YAAY,OAAO,KAAK,MAAM,EAAE,GAAG,CAAC,KAAK,KAAK,CACY;AAC9E,aAAS,SAAS;AAClB,cAAU,SAAS;AACnB;;AAIF,OAAI,CAAC,UAAU,SAAS;AAEtB,oBAAgB,UAAU;KAAE;KAAM;KAAO;KAAO;AAChD,UAAM;AACN;;AAGF,OAAI,CAAC,SAAS;AAEZ,oBAAgB,UAAU;KAAE;KAAM;KAAO;KAAO;AAChD;;AAGF,iBAAc,KAAK;AACnB,cAAW;AAGX,aAAU,QAAQ,YAAY;IAC5B,MAAM;IACN,SAAS;KAAE;KAAM;KAAO;KAAO;IAChC,CAAC;KAEJ;GAAC;GAAc;GAAc,YAAY;GAAQ;GAAM;GAAS;GAAS;GAAQ,CAClF;EAkCC,MA/BW,kBAAkB;AAC7B,OAAI,cAAc,SAAS;AACzB,kBAAc,QAAQ,MAAM;AAC5B,kBAAc,QAAQ,YAAY;AAClC,kBAAc,UAAU;;AAE1B,iBAAc,MAAM;KACnB,EAAE,CAAC;EAyBJ;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA,UA9Be,aACd,YAAoB;AAEnB,OADkB,YAAY,OAAO,MAAM,MAAM,EAAE,OAAO,QAAQ,CAEhE,iBAAgB,QAAQ;OAExB,SAAQ,KACN,UAAU,QAAQ,kBAAkB,QAAQ,eAAe,YAAY,OAAO,KAAK,MAAM,EAAE,GAAG,CAAC,KAAK,KAAK,GAC1G;KAGL,CAAC,YAAY,QAAQ,QAAQ,CAC9B;EAmBC;EACA,UAjBe,aAAa,UAAkB;AAC9C,mBAAgB,KAAK,IAAI,IAAK,KAAK,IAAI,GAAK,MAAM,CAAC,CAAC;KACnD,EAAE,CAAC;EAgBJ,cAAc;EACd,YAAY,YAAY;EACzB;;;;;;;;;;;;;;;;AAqBH,eAAsB,UACpB,OACA,aAAqB,MACkC;CACvD,MAAM,eAAe,IAAI,cAAc;AAGvC,KAAI,aAAa,UAAU,YACzB,OAAM,aAAa,QAAQ;CAG7B,MAAM,cAAc,aAAa,aAAa,GAAG,MAAM,QAAQ,WAAW;CAC1E,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,aAAY,cAAc,aAAa,EAAE;CAEzC,MAAM,aAAa,aAAa,oBAAoB;AACpD,YAAW,SAAS;AACpB,YAAW,QAAQ,aAAa,YAAY;CAE5C,MAAM,UAAU,IAAI,SAAe,YAAY;AAC7C,aAAW,gBAAgB;AACzB,gBAAa,OAAO;AACpB,YAAS;;GAEX;AAEF,YAAW,OAAO;AAElB,QAAO;EACL,YAAY;AACV,cAAW,MAAM;AACjB,gBAAa,OAAO;;EAEtB;EACD;;;;;;;;;;;;;;;;;;;AAoBH,SAAgB,kBAAkB,aAAqB,MAIrD;CACA,IAAIC,eAAoC;CACxC,IAAI,gBAAgB;CACpB,IAAI,WAAW;CAEf,MAAM,gBAAgB,YAAY;AAChC,MAAI,CAAC,aACH,gBAAe,IAAI,cAAc;AAEnC,MAAI,aAAa,UAAU,YACzB,OAAM,aAAa,QAAQ;AAE7B,SAAO;;AAGT,QAAO;EACL,OAAO,OAAO,UAAwB;GACpC,MAAM,MAAM,MAAM,eAAe;AACjC,cAAW;GAEX,MAAM,SAAS,IAAI,aAAa,GAAG,MAAM,QAAQ,WAAW;GAC5D,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,UAAO,cAAc,aAAa,EAAE;GAEpC,MAAM,SAAS,IAAI,oBAAoB;AACvC,UAAO,SAAS;AAChB,UAAO,QAAQ,IAAI,YAAY;GAG/B,MAAM,YAAY,KAAK,IAAI,IAAI,aAAa,cAAc;AAC1D,UAAO,MAAM,UAAU;AACvB,mBAAgB,YAAY,OAAO;AAEnC,UAAO,gBAAgB;AACrB,QAAI,IAAI,eAAe,gBAAgB,GACrC,YAAW;;;EAKjB,YAAY;AACV,cAAW;AACX,mBAAgB;AAChB,OAAI,cAAc;AAChB,iBAAa,OAAO;AACpB,mBAAe;;;EAInB,iBAAiB;EAClB;;AAUH,MAAM,kBAAkB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAiDxB,SAAS,kBAA0B;CACjC,MAAM,OAAO,IAAI,KAAK,CAAC,gBAAgB,EAAE,EAAE,MAAM,0BAA0B,CAAC;CAC5E,MAAM,MAAM,IAAI,gBAAgB,KAAK;CACrC,MAAM,SAAS,IAAI,OAAO,KAAK,EAAE,MAAM,UAAU,CAAC;AAClD,KAAI,gBAAgB,IAAI;AACxB,QAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAiHT,SAAgB,cAAc,UAAgC,EAAE,EAAuB;CACrF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,qEAAqE;CAGvF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,mBACR,WAAW,OACX,SACA,cACA,SACA,YACA,YAAY,OACZ,gBAAgB,MAChB,YACE;CAEJ,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAA6B,KAAK;CAChF,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,aAAa,kBAAkB,SAAkB,MAAM;CAC9D,MAAM,CAAC,gBAAgB,qBAAqB,SAAkB,MAAM;CACpE,MAAM,CAAC,YAAY,iBAAiB,SAAiB,GAAG;CACxD,MAAM,CAAC,gBAAgB,qBAAqB,SAAiB,GAAG;CAChE,MAAM,CAAC,YAAY,iBAAiB,SAAiB,EAAE;CACvD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,YAAY,OAAsB,KAAK;CAC7C,MAAM,mBAAmB,OAA6B,KAAK;CAC3D,MAAM,iBAAiB,OAAe,EAAE,CAAC;CACzC,MAAM,YAAY,OAA2B,KAAK;CAClD,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,uBAAuB,OAA8C,KAAK;CAChF,MAAM,mBAAmB,OAAe,EAAE,CAAC;CAC3C,MAAM,oBAAoB,OAAe,GAAG;CAC5C,MAAM,uBAAuB,OAAwC,KAAK;CAC1E,MAAM,sBAAsB,OAAsC,KAAK;CAGvE,MAAMC,qBAAmB,YAA4B;AASnD,SAR8C;GAC5C,gBAAgB;GAChB,mBAAmB;GACnB,gBAAgB;GAChB,mBAAmB;GACnB,iBAAiB;GACjB,oBAAoB;GACrB,CACoB,YAAY;;AAInC,iBAAgB;AACd,MAAI,CAAC,cAAc,QAAS;AAE5B,aAAW,UAAU;AAErB,eAAa,KAAK;AAClB,qBAAmB;GAAE,QAAQ;GAAW,SAAS;GAAwB,CAAC;AAC1E,eAAa;GAAE,QAAQ;GAAW,SAAS;GAAwB,CAAC;EAGpE,MAAM,SAAS,iBAAiB;AAChC,YAAU,UAAU;AAGpB,SAAO,aAAa,MAAoB;AACtC,OAAI,CAAC,WAAW,QAAS;GAEzB,MAAM,EAAE,MAAM,YAAY,EAAE;AAE5B,OAAI,SAAS,YAAY;IACvB,MAAMC,WAAwB;KAC5B,QAAQ,QAAQ,aAAa,SAAY,gBAAgB;KACzD,SAAS,QAAQ;KACjB,UAAU,QAAQ;KAClB,MAAM,QAAQ;KACf;AACD,uBAAmB,SAAS;AAC5B,iBAAa,SAAS;;AAGxB,OAAI,SAAS,SAAS;AACpB,eAAW,KAAK;AAChB,iBAAa,MAAM;AACnB,uBAAmB,EAAE,QAAQ,SAAS,CAAC;AACvC,iBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,eAAW;;AAGb,OAAI,SAAS,cAAc;IACzB,MAAM,OAAO;AACb,sBAAkB,MAAM;AACxB,QAAI,qBAAqB,SAAS;AAChC,0BAAqB,QAAQ,KAAK;AAClC,0BAAqB,UAAU;AAC/B,yBAAoB,UAAU;;;AAIlC,OAAI,SAAS,SAAS;IACpB,MAAM,SAAS;AACf,aAAS,OAAO;AAChB,iBAAa,MAAM;AACnB,sBAAkB,MAAM;AACxB,uBAAmB;KAAE,QAAQ;KAAS,SAAS;KAAQ,CAAC;AACxD,iBAAa;KAAE,QAAQ;KAAS,SAAS;KAAQ,CAAC;AAClD,cAAU,OAAO;AACjB,QAAI,oBAAoB,SAAS;AAC/B,yBAAoB,QAAQ,IAAI,MAAM,OAAO,CAAC;AAC9C,0BAAqB,UAAU;AAC/B,yBAAoB,UAAU;;;;AAKpC,SAAO,WAAW,QAAQ;AACxB,OAAI,CAAC,WAAW,QAAS;GACzB,MAAM,SAAS,IAAI,WAAW;AAC9B,YAAS,OAAO;AAChB,gBAAa,MAAM;AACnB,sBAAmB;IAAE,QAAQ;IAAS,SAAS;IAAQ,CAAC;AACxD,gBAAa;IAAE,QAAQ;IAAS,SAAS;IAAQ,CAAC;AAClD,aAAU,OAAO;;AAInB,SAAO,YAAY;GACjB,MAAM;GACN,SAAS,EAAE,OAAOD,kBAAgB,MAAM,EAAE;GAC3C,CAAC;AAEF,eAAa;AACX,cAAW,UAAU;AACrB,UAAO,WAAW;AAClB,aAAU,UAAU;;IAErB;EAAC;EAAY;EAAS;EAAO;EAAS;EAAS;EAAW,CAAC;AAG9D,iBAAgB;AACd,aAAW,UAAU;AACrB,eAAa;AACX,cAAW,UAAU;AACrB,OAAI,UAAU,SAAS;AACrB,cAAU,QAAQ,WAAW;AAC7B,cAAU,UAAU;;AAEtB,OAAI,UAAU,QACZ,MAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;;IAIjB,EAAE,CAAC;CAGN,MAAM,OAAO,kBAAkB;AAC7B,MAAI,CAAC,cAAc,CAAC,WAAW,CAAC,UAC9B,eAAc,KAAK;IAEpB;EAAC;EAAY;EAAS;EAAU,CAAC;CAGpC,MAAM,gBAAgB,YAAY,OAAO,SAAsC;EAC7E,MAAM,eAAe,IAAI,aAAa,EAAE,YAAY,MAAO,CAAC;EAC5D,MAAM,cAAc,MAAM,KAAK,aAAa;EAC5C,MAAM,cAAc,MAAM,aAAa,gBAAgB,YAAY;EAGnE,MAAM,cAAc,YAAY,eAAe,EAAE;AAGjD,MAAI,YAAY,eAAe,MAAO;GACpC,MAAM,QAAQ,OAAQ,YAAY;GAClC,MAAM,YAAY,KAAK,MAAM,YAAY,SAAS,MAAM;GACxD,MAAM,YAAY,IAAI,aAAa,UAAU;AAC7C,QAAK,IAAI,IAAI,GAAG,IAAI,WAAW,KAAK;IAClC,MAAM,WAAW,IAAI;IACrB,MAAM,QAAQ,KAAK,MAAM,SAAS;IAClC,MAAM,OAAO,KAAK,IAAI,QAAQ,GAAG,YAAY,SAAS,EAAE;IACxD,MAAM,IAAI,WAAW;AACrB,cAAU,KAAK,YAAY,UAAU,IAAI,KAAK,YAAY,QAAQ;;AAEpE,gBAAa,OAAO;AACpB,UAAO;;AAGT,eAAa,OAAO;AACpB,SAAO,IAAI,aAAa,YAAY;IACnC,EAAE,CAAC;CAGN,MAAM,aAAa,YACjB,OAAO,UAAyC;AAC9C,MAAI,CAAC,UAAU,SAAS;AACtB,OAAI,CAAC,YAAY;AACf,kBAAc,KAAK;AACnB,UAAM,IAAI,MAAM,uDAAuD;;AAEzE,SAAM,IAAI,MAAM,uBAAuB;;AAGzC,MAAI,CAAC,QACH,OAAM,IAAI,MAAM,0BAA0B;AAG5C,oBAAkB,KAAK;AAEvB,SAAO,IAAI,SAAS,SAAS,WAAW;AACtC,wBAAqB,WAAW,SAAiB;IAE/C,IAAI,WAAW,KAAK,MAAM;AAC1B,QACE,aAAa,mBACb,aAAa,mBACb,aAAa,gBAEb,YAAW;AAEb,kBAAc,SAAS;AACvB,mBAAe,SAAS;AACxB,YAAQ,SAAS;;AAEnB,uBAAoB,UAAU;GAG9B,MAAM,aAAa,IAAI,aAAa,MAAM;AAC1C,aAAU,QAAS,YAAY;IAAE,MAAM;IAAc,SAAS,EAAE,OAAO,YAAY;IAAE,EAAE,CACrF,WAAW,OACZ,CAAC;IACF;IAEJ;EAAC;EAAY;EAAS;EAAa,CACpC;CAGD,MAAM,sBAAsB,OAAe,EAAE;CAI7C,MAAM,kBAAkB,YACtB,OAAO,aAAsC;AAC3C,MAAI,CAAC,UAAU,WAAW,CAAC,WAAW,eAAe,QAAQ,WAAW,EAAG,QAAO;AAElF,MAAI;GAGF,MAAM,YAAY,MAAM,cADN,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC,CAC1B;GAGhD,MAAM,kBAAkB,oBAAoB;GAC5C,MAAM,eAAe,UAAU;AAG/B,OAAI,eAAe,kBAAkB,IAAM,QAAO;GAGlD,MAAM,WAAW,UAAU,MAAM,gBAAgB;AAGjD,uBAAoB,UAAU;GAG9B,MAAM,OAAO,MAAM,WAAW,SAAS;AAEvC,OAAI,QAAQ,WAAW,SAAS;AAC9B,sBAAkB,KAAK;AACvB,cAAU,MAAM,SAAS;;AAG3B,UAAO;UACD;AACN,UAAO;;IAGX;EAAC;EAAe;EAAS;EAAY;EAAQ,CAC9C;AAmRD,QAAO;EACL,gBAjRqB,YAAY,YAAY;AAC7C,OAAI,YAAa;AAEjB,OAAI;AAEF,QAAI,aAAa,CAAC,SAAS;AACzB,SAAI,CAAC,WACH,eAAc,KAAK;AAGrB,WAAM,IAAI,SAAe,SAAS,WAAW;MAC3C,MAAM,aAAa,kBAAkB;AACnC,WAAI,WAAW,UAAU,SAAS;AAChC,sBAAc,WAAW;AACzB,iBAAS;;SAEV,IAAI;AACP,uBAAiB;AACf,qBAAc,WAAW;AACzB,8BAAO,IAAI,MAAM,gCAAgC,CAAC;SACjD,IAAM;OACT;;IAIJ,MAAM,SAAS,MAAM,UAAU,aAAa,aAAa,EACvD,OAAO;KACL,YAAY;KACZ,cAAc;KACd,kBAAkB;KAClB,kBAAkB;KACnB,EACF,CAAC;AAEF,cAAU,UAAU;AACpB,mBAAe,UAAU,EAAE;AAC3B,qBAAiB,UAAU,EAAE;AAC7B,sBAAkB,UAAU;AAC5B,wBAAoB,UAAU;AAC9B,kBAAc,GAAG;AACjB,sBAAkB,GAAG;AACrB,kBAAc,EAAE;IAEhB,MAAM,gBAAgB,IAAI,cAAc,OAAO;AAC/C,qBAAiB,UAAU;AAE3B,kBAAc,mBAAmB,UAAU;AACzC,SAAI,MAAM,KAAK,OAAO,GAAG;AACvB,qBAAe,QAAQ,KAAK,MAAM,KAAK;AACvC,UAAI,UACF,kBAAiB,QAAQ,KAAK,MAAM,KAAK;;;AAK/C,kBAAc,MAAM,IAAI;AACxB,mBAAe,KAAK;AACpB,aAAS,KAAK;AAGd,QAAI,aAAa,WAAW,UAAU,SAAS;KAC7C,IAAI,WAAW;KACf,IAAI,iBAAiB;KAIrB,MAAM,mBAAmB,YAAY;AACnC,UAAI,CAAC,kBAAkB,CAAC,WAAW,QACjC;AAMF,UAHmB,iBAAiB,QAAQ,SAG3B,GAAG;AAElB,wBAAiB,UAAU,EAAE;AAE7B,WAAI;AACF,0BAAkB,KAAK;QACvB,MAAM,YAAY,MAAM,gBAAgB,SAAS;AAEjD,YAAI,aAAa,WAAW,SAAS;AACnC;AACA,uBAAc,SAAS;AAGvB,wBAAe,SAAS;UACtB,MAAM,gBAAgB,QAAQ,OAAO,MAAM,MAAM;AACjD,4BAAkB,UAAU;AAC5B,yBAAe,cAAc;AAC7B,iBAAO;WACP;;gBAEG,GAAG;AACV,gBAAQ,MAAM,8CAA8C,EAAE;iBACtD;AACR,YAAI,WAAW,QACb,mBAAkB,MAAM;;;AAM9B,UAAI,kBAAkB,WAAW,QAC/B,sBAAqB,UAAU,WAAW,kBAAkB,cAAc;;AAK9E,0BAAqB,UAAU,WAAW,kBAAkB,cAAc;AAG1E,KAAC,qBAA6B,cAAc;AAC1C,uBAAiB;;;YAGdE,GAAQ;IACf,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,cAAU,OAAO;;KAElB;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EA6IA,eA1IoB,YAAY,YAA6B;AAE7D,OAAK,qBAA6B,MAChC,CAAC,qBAA6B,OAAO;AAEvC,OAAI,qBAAqB,SAAS;AAChC,iBAAa,qBAAqB,QAAQ;AAC1C,yBAAqB,UAAU;;AAGjC,UAAO,IAAI,SAAS,SAAS,WAAW;AACtC,QAAI,CAAC,iBAAiB,WAAW,CAAC,aAAa;AAC7C,4BAAO,IAAI,MAAM,gBAAgB,CAAC;AAClC;;IAGF,MAAM,gBAAgB,iBAAiB;AAEvC,kBAAc,SAAS,YAAY;AAEjC,SAAI,UAAU,SAAS;AACrB,WAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,gBAAU,UAAU;;AAGtB,oBAAe,MAAM;AAGrB,SAAI,WAAW;AAEb,UAAI,eAAe,QAAQ,SAAS,KAAK,oBAAoB,UAAU,GAAG;AACxE,yBAAkB,KAAK;AACvB,wBAAiB,UAAU,EAAE;AAE7B,WAAI;QACF,MAAM,iBAAiB,MAAM,gBAAgB,WAAW;AACxD,YAAI,kBAAkB,WAAW,QAC/B,gBAAe,SAAS;SACtB,MAAM,gBAAgB,QAAQ,OAAO,MAAM,MAAM;AACjD,2BAAkB,UAAU;AAC5B,gBAAO;UACP;iBAEI;AACR,YAAI,WAAW,QACb,mBAAkB,MAAM;;;MAK9B,MAAM,YAAY,kBAAkB;AACpC,qBAAe,UAAU;AACzB,cAAQ,UAAU;AAClB;;KAIF,MAAM,YAAY,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC;AAE1E,SAAI;AAEF,UAAI,CAAC,WAAW,CAAC,UAAU,SAAS;AAClC,WAAI,CAAC,WACH,eAAc,KAAK;AAGrB,aAAM,IAAI,SAAe,KAAK,QAAQ;QACpC,MAAM,aAAa,kBAAkB;AACnC,aAAI,WAAW,UAAU,SAAS;AAChC,wBAAc,WAAW;AACzB,eAAK;;WAEN,IAAI;AACP,yBAAiB;AACf,uBAAc,WAAW;AACzB,6BAAI,IAAI,MAAM,gCAAgC,CAAC;WAC9C,IAAM;SACT;;AAQJ,cADa,MAAM,WAHD,MAAM,cAAc,UAAU,CAGR,CAC3B;cACNA,GAAQ;MACf,MAAM,SAAS,EAAE,WAAW;AAC5B,eAAS,OAAO;AAChB,gBAAU,OAAO;AACjB,aAAO,EAAE;;;AAIb,kBAAc,MAAM;KACpB;KACD;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EA+BA,iBA5BsB,kBAAkB;AAExC,OAAK,qBAA6B,MAChC,CAAC,qBAA6B,OAAO;AAEvC,OAAI,qBAAqB,SAAS;AAChC,iBAAa,qBAAqB,QAAQ;AAC1C,yBAAqB,UAAU;;AAGjC,OAAI,iBAAiB,WAAW,YAC9B,kBAAiB,QAAQ,MAAM;AAEjC,OAAI,UAAU,SAAS;AACrB,SAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,cAAU,UAAU;;AAEtB,kBAAe,UAAU,EAAE;AAC3B,oBAAiB,UAAU,EAAE;AAC7B,uBAAoB,UAAU;AAC9B,kBAAe,MAAM;KACpB,CAAC,YAAY,CAAC;EAMf;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2HH,SAAgB,aAAa,UAA+B,EAAE,EAAsB;CAClF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,oEAAoE;CAGtF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAQrD,MAAM,aAAa,QAAQ,YAAY;CACvC,MAAM,YAAY,WAAW;CAE7B,MAAM,EACJ,WAAW,cACX,WAAW,mBACX,SAAS,+EACT,WAAW,OACX,QAAQ,UAAU,cAClB,QAAQ,GACR,WAAW,OACX,aACA,kBACA,YACE;CAEJ,MAAM,CAAC,UAAU,eAAe,SAA6B,EAAE,CAAC;CAChE,MAAM,CAAC,OAAO,YAAY,SAExB,OAAO;CACT,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,gBAAgB,qBAAqB,SAAiB,GAAG;CAChE,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAG/D,MAAM,eAAe,OAAY,KAAK;CACtC,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,mBAAmB,OAA6B,KAAK;CAC3D,MAAM,iBAAiB,OAAe,EAAE,CAAC;CACzC,MAAM,YAAY,OAA2B,KAAK;CAClD,MAAM,kBAAkB,OAA4B,KAAK;CACzD,MAAM,gBAAgB,OAAqC,KAAK;CAChE,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,eAAe,OAAgB,MAAM;CAG3C,MAAM,cAAc,UAAU;CAC9B,MAAM,eAAe,UAAU,kBAAkB,UAAU;CAC3D,MAAM,aAAa,UAAU;CAG7B,MAAMF,qBAAmB,YAA4B;AASnD,SAR8C;GAC5C,gBAAgB;GAChB,mBAAmB;GACnB,gBAAgB;GAChB,mBAAmB;GACnB,iBAAiB;GACjB,oBAAoB;GACrB,CACoB,YAAY;;AAInC,iBAAgB;AACd,MAAI,CAAC,cAAc,QAAS;EAE5B,IAAI,YAAY;EAEhB,MAAM,aAAa,YAAY;AAC7B,OAAI;AACF,iBAAa,KAAK;AAClB,aAAS,KAAK;AAGd,sBAAkB,0CAA0C;IAC5D,MAAM,YAAY,iBAAiB;AACnC,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,eAAU,WAAW;AACrB;;AAIF,UAAM,IAAI,SAAe,SAAS,WAAW;AAC3C,eAAU,aAAa,MAAoB;MACzC,MAAM,EAAE,MAAM,YAAY,EAAE;AAC5B,UAAI,SAAS,QAAS,UAAS;AAC/B,UAAI,SAAS,QAAS,QAAO,IAAI,MAAM,QAAQ,CAAC;AAChD,UAAI,SAAS,cAAc,WAAW,QACpC,mBAAkB,QAAQ,UAAU,iBAAiB;;AAGzD,eAAU,WAAW,MAAM,OAAO,IAAI,MAAM,EAAE,QAAQ,CAAC;AACvD,eAAU,YAAY;MACpB,MAAM;MACN,SAAS,EAAE,OAAOA,kBAAgB,SAAS,EAAE;MAC9C,CAAC;MACF;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,eAAU,WAAW;AACrB;;AAEF,WAAO,UAAU;AAGjB,sBAAkB,4BAA4B;IAC9C,MAAM,SAAS,MAAM,mBAAmB;KACtC,SAAS;KACT,aAAa,MAAM;AACjB,UAAI,CAAC,WAAW,QAAS;AACzB,wBAAkB,EAAE,WAAW,iBAAiB;;KAEnD,CAAC;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,YAAO,WAAW;AAClB;;AAEF,iBAAa,UAAU;AAIvB,sBAAkB,2BADG,eAAe,mBACwB,eAAe,SAAS,MAAM;IAE1F,MAAM,YAAY,iBAAiB;AACnC,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,eAAU,WAAW;AACrB;;IAGF,MAAMG,cAAY,WAAW;AAE7B,UAAM,IAAI,SAAe,SAAS,WAAW;AAC3C,eAAU,aAAa,MAAoB;MACzC,MAAM,EAAE,MAAM,YAAY,EAAE;AAC5B,UAAI,SAAS,QAAS,UAAS;AAC/B,UAAI,SAAS,QAAS,QAAO,IAAI,MAAM,QAAQ,CAAC;AAChD,UAAI,SAAS,cAAc,WAAW,QACpC,mBAAkB,QAAQ,UAAU,iBAAiB;;AAGzD,eAAU,WAAW,MAAM,OAAO,IAAI,MAAM,EAAE,QAAQ,CAAC;AACvD,eAAU,YAAY;MACpB,MAAM;MACN,SAAS;OACP,SAAS;OACT,MAAMA,YAAU;OAChB,QAAQA,YAAU;OACnB;MACF,CAAC;MACF;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,eAAU,WAAW;AACrB;;AAEF,WAAO,UAAU;AAEjB,eAAW,KAAK;AAChB,iBAAa,MAAM;AACnB,sBAAkB,SAAS;YACpBD,GAAQ;AACf,QAAI,CAAC,WAAW,QAAS;IACzB,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,iBAAa,MAAM;AACnB,cAAU,OAAO;;;AAIrB,cAAY;AAEZ,eAAa;AACX,eAAY;;IAEb;EAAC;EAAY;EAAS;EAAU;EAAU;EAAY;EAAQ,CAAC;AAGlE,iBAAgB;AACd,aAAW,UAAU;AACrB,eAAa;AACX,cAAW,UAAU;AACrB,gBAAa,SAAS,WAAW;AACjC,UAAO,SAAS,WAAW;AAC3B,UAAO,SAAS,WAAW;AAC3B,OAAI,UAAU,QACZ,MAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAGhB,mBAAgB,SAAS,OAAO;;IAEjC,EAAE,CAAC;CAGN,MAAM,OAAO,kBAAkB;AAC7B,MAAI,CAAC,cAAc,CAAC,WAAW,CAAC,UAC9B,eAAc,KAAK;IAEpB;EAAC;EAAY;EAAS;EAAU,CAAC;CAGpC,MAAM,gBAAgB,YAAY,OAAO,SAAsC;EAC7E,MAAM,eAAe,IAAI,aAAa,EAAE,YAAY,MAAO,CAAC;EAC5D,MAAM,cAAc,MAAM,KAAK,aAAa;EAC5C,MAAM,cAAc,MAAM,aAAa,gBAAgB,YAAY;EACnE,MAAM,cAAc,YAAY,eAAe,EAAE;AAEjD,MAAI,YAAY,eAAe,MAAO;GACpC,MAAM,QAAQ,OAAQ,YAAY;GAClC,MAAM,YAAY,KAAK,MAAM,YAAY,SAAS,MAAM;GACxD,MAAM,YAAY,IAAI,aAAa,UAAU;AAC7C,QAAK,IAAI,IAAI,GAAG,IAAI,WAAW,KAAK;IAClC,MAAM,WAAW,IAAI;IACrB,MAAM,QAAQ,KAAK,MAAM,SAAS;IAClC,MAAM,OAAO,KAAK,IAAI,QAAQ,GAAG,YAAY,SAAS,EAAE;IACxD,MAAM,IAAI,WAAW;AACrB,cAAU,KAAK,YAAY,UAAU,IAAI,KAAK,YAAY,QAAQ;;AAEpE,gBAAa,OAAO;AACpB,UAAO;;AAGT,eAAa,OAAO;AACpB,SAAO,IAAI,aAAa,YAAY;IACnC,EAAE,CAAC;CAGN,MAAM,kBAAkB,YACtB,OAAO,OAAqB,eAAsC;AAChE,SAAO,IAAI,SAAS,YAAY;AAC9B,OAAI,CAAC,gBAAgB,QACnB,iBAAgB,UAAU,IAAI,cAAc;GAE9C,MAAM,MAAM,gBAAgB;GAE5B,MAAM,SAAS,IAAI,aAAa,GAAG,MAAM,QAAQ,WAAW;GAC5D,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,UAAO,cAAc,aAAa,EAAE;GAEpC,MAAM,SAAS,IAAI,oBAAoB;AACvC,UAAO,SAAS;AAChB,UAAO,QAAQ,IAAI,YAAY;AAC/B,UAAO,gBAAgB;AACrB,QAAI,WAAW,QACb,UAAS;;AAGb,UAAO,OAAO;AACd,iBAAc,UAAU;IACxB;IAEJ,EAAE,CACH;AAwPD,QAAO;EACL;EACA,gBAvPqB,YAAY,YAAY;AAC7C,OAAI,UAAU,OAAQ;AAGtB,OAAI,CAAC,WAAW,CAAC,WAAW;AAC1B,kBAAc,KAAK;AACnB;;AAGF,gBAAa,UAAU;AAEvB,OAAI;IACF,MAAM,SAAS,MAAM,UAAU,aAAa,aAAa,EACvD,OAAO;KAAE,YAAY;KAAO,cAAc;KAAG,kBAAkB;KAAM,EACtE,CAAC;AAEF,cAAU,UAAU;AACpB,mBAAe,UAAU,EAAE;IAE3B,MAAM,gBAAgB,IAAI,cAAc,OAAO;AAC/C,qBAAiB,UAAU;AAE3B,kBAAc,mBAAmB,UAAU;AACzC,SAAI,MAAM,KAAK,OAAO,EACpB,gBAAe,QAAQ,KAAK,MAAM,KAAK;;AAI3C,kBAAc,MAAM,IAAI;AACxB,aAAS,YAAY;AACrB,aAAS,KAAK;YACPA,GAAQ;IACf,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,cAAU,OAAO;;KAElB;GAAC;GAAO;GAAS;GAAW;GAAQ,CAAC;EAoNtC,eAjNoB,YAAY,YAAY;AAC5C,OAAI,UAAU,YAAa;GAE3B,MAAM,gBAAgB,iBAAiB;AACvC,OAAI,CAAC,cAAe;AAEpB,UAAO,IAAI,SAAe,YAAY;AACpC,kBAAc,SAAS,YAAY;AAEjC,SAAI,UAAU,SAAS;AACrB,WAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,gBAAU,UAAU;;AAGtB,SAAI,aAAa,SAAS;AACxB,eAAS,OAAO;AAChB,eAAS;AACT;;KAGF,MAAM,YAAY,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC;AAE1E,SAAI;AAEF,eAAS,eAAe;MACxB,MAAM,YAAY,MAAM,cAAc,UAAU;MAGhD,IAAI,WAAW,MAAM,IAAI,SAAiB,YAAY,cAAc;OAClE,MAAM,WAAW,MAAoB;QACnC,MAAM,EAAE,MAAM,YAAY,EAAE;AAC5B,YAAI,SAAS,cAAc;AACzB,gBAAO,SAAS,oBAAoB,WAAW,QAAQ;AACvD,oBAAW,QAAQ;;AAErB,YAAI,SAAS,SAAS;AACpB,gBAAO,SAAS,oBAAoB,WAAW,QAAQ;AACvD,mBAAU,IAAI,MAAM,QAAQ,CAAC;;;AAGjC,cAAO,SAAS,iBAAiB,WAAW,QAAQ;OACpD,MAAM,aAAa,IAAI,aAAa,UAAU;AAC9C,cAAO,SAAS,YAAY;QAAE,MAAM;QAAc,SAAS,EAAE,OAAO,YAAY;QAAE,EAAE,CAClF,WAAW,OACZ,CAAC;QACF;AAEF,iBAAW,SAAS,MAAM;AAG1B,UACE,aAAa,mBACb,aAAa,mBACb,aAAa,gBAEb,YAAW;AAGb,UAAI,aAAa,WAAW,CAAC,UAAU;AACrC,gBAAS,OAAO;AAChB,gBAAS;AACT;;MAIF,MAAM,YAAY,QAAQ,KAAK,KAAK;AACpC,mBAAa,MAAM,CAAC,GAAG,GAAG;OAAE,IAAI;OAAW,MAAM;OAAQ,SAAS;OAAU,CAAC,CAAC;AAC9E,oBAAc,SAAS;AAGvB,eAAS,WAAW;MAGpB,MAAM,UAAU,SAAS,KAAK,OAAO;OACnC,MAAM,EAAE;OACR,SAAS,EAAE;OACZ,EAAE;AACH,cAAQ,KAAK;OAAE,MAAM;OAAQ,SAAS;OAAU,CAAC;MAEjD,IAAI,eAAe;MACnB,IAAI,eAAe;AAEnB,YAAM,aAAa,QAAQ,SAAS,UAAU;OAC5C;OACA;OACA;OACA,UAAU,UAAuB;AAC/B,YAAI,aAAa,QAAS;AAC1B,YAAI,MAAM,UAAU,WAClB,iBAAgB,MAAM;YAEtB,iBAAgB,MAAM;;OAG3B,CAAC;AAEF,UAAI,aAAa,SAAS;AACxB,gBAAS,OAAO;AAChB,gBAAS;AACT;;MAIF,MAAM,iBAAiB,aAAa,KAAK,KAAK;AAC9C,mBAAa,MAAM,CACjB,GAAG,GACH;OACE,IAAI;OACJ,MAAM;OACN,SAAS;OACT,UAAU,gBAAgB;OAC3B,CACF,CAAC;AACF,yBAAmB,aAAa;AAGhC,UAAI,aAAa,MAAM,EAAE;AACvB,gBAAS,WAAW;OAGpB,MAAM,YAAY,MAAM,IAAI,SACzB,YAAY,cAAc;QACzB,MAAM,WAAW,MAAoB;SACnC,MAAM,EAAE,MAAM,YAAY,EAAE;AAC5B,aAAI,SAAS,SAAS;AACpB,iBAAO,SAAS,oBAAoB,WAAW,QAAQ;AACvD,qBAAW;WAAE,OAAO,QAAQ;WAAO,YAAY,QAAQ;WAAY,CAAC;;AAEtE,aAAI,SAAS,SAAS;AACpB,iBAAO,SAAS,oBAAoB,WAAW,QAAQ;AACvD,oBAAU,IAAI,MAAM,QAAQ,CAAC;;;AAGjC,eAAO,SAAS,iBAAiB,WAAW,QAAQ;AACpD,eAAO,SAAS,YAAY;SAC1B,MAAM;SACN,SAAS;UAAE,MAAM;UAAc;UAAO;UAAO;SAC9C,CAAC;SAEL;AAED,WAAI,CAAC,aAAa,QAChB,OAAM,gBAAgB,UAAU,OAAO,UAAU,WAAW;;AAIhE,eAAS,OAAO;AAChB,eAAS;cACFA,GAAQ;AACf,UAAI,CAAC,WAAW,QAAS;MACzB,MAAM,SAAS,EAAE,WAAW;AAC5B,eAAS,OAAO;AAChB,eAAS,OAAO;AAChB,gBAAU,OAAO;AACjB,eAAS;;;AAIb,kBAAc,MAAM;KACpB;KACD;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EAoCA,QAjCa,kBAAkB;AAC/B,gBAAa,UAAU;AAEvB,OAAI,iBAAiB,WAAW,UAAU,YACxC,kBAAiB,QAAQ,MAAM;AAGjC,OAAI,UAAU,SAAS;AACrB,SAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,cAAU,UAAU;;AAGtB,OAAI,cAAc,QAChB,KAAI;AACF,kBAAc,QAAQ,MAAM;WACtB;AAGV,kBAAe,UAAU,EAAE;AAC3B,YAAS,OAAO;KACf,CAAC,MAAM,CAAC;EAYT,OATY,kBAAkB;AAC9B,eAAY,EAAE,CAAC;KACd,EAAE,CAAC;EAQJ;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;AAMH,MAAM,wBAAwB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuH9B,SAAS,wBAAgC;CACvC,MAAM,OAAO,IAAI,KAAK,CAAC,sBAAsB,EAAE,EAAE,MAAM,0BAA0B,CAAC;CAClF,MAAM,MAAM,IAAI,gBAAgB,KAAK;CACrC,MAAM,SAAS,IAAI,OAAO,KAAK,EAAE,MAAM,UAAU,CAAC;AAClD,KAAI,gBAAgB,IAAI;AACxB,QAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqFT,SAAgB,aAAa,UAA+B,EAAE,EAAsB;CAClF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MACR,iFACD;CAGH,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CACrD,MAAM,EACJ,QAAQ,2BACR,YAAY,MACZ,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,WAAW,gBAAgB,SAAS,MAAM;CACjD,MAAM,CAAC,SAAS,cAAc,SAAS,MAAM;CAC7C,MAAM,CAAC,OAAO,YAAY,SAAS,KAAsB;CACzD,MAAM,CAAC,iBAAiB,sBAAsB,SAC5C,KACD;CAED,MAAM,YAAY,OAAO,KAAsB;CAC/C,MAAM,mBAAmB,OAAO,MAAM;CAEtC,MAAM,kBAAkB,OAAO,KAA6B;CAC5D,MAAM,kBAAkB,OAAO,KAA4B;CAG3D,MAAM,mBAAmB,aAAa,GAAa,MAAwB;AACzE,MAAI,EAAE,WAAW,EAAE,OACjB,OAAM,IAAI,MAAM,iCAAiC,EAAE,OAAO,MAAM,EAAE,SAAS;EAG7E,IAAI,aAAa;EACjB,IAAI,QAAQ;EACZ,IAAI,QAAQ;AAEZ,OAAK,IAAI,IAAI,GAAG,IAAI,EAAE,QAAQ,KAAK;AACjC,iBAAc,EAAE,KAAK,EAAE;AACvB,YAAS,EAAE,KAAK,EAAE;AAClB,YAAS,EAAE,KAAK,EAAE;;EAGpB,MAAM,YAAY,KAAK,KAAK,MAAM,GAAG,KAAK,KAAK,MAAM;AACrD,MAAI,cAAc,EAAG,QAAO;AAE5B,SAAO,aAAa;IACnB,EAAE,CAAC;CAGN,MAAM,OAAO,kBAAkB;AAE7B,MAAI,WAAW,UAAU,QACvB,QAAO,QAAQ,SAAS;AAI1B,MAAI,iBAAiB,WAAW,gBAAgB,QAC9C,QAAO,gBAAgB;AAGzB,mBAAiB,UAAU;AAC3B,eAAa,KAAK;AAClB,qBAAmB;GAAE,QAAQ;GAAW,SAAS;GAA8B,CAAC;AAGhF,kBAAgB,UAAU,IAAI,SAAe,YAAY;AACvD,mBAAgB,UAAU;IAC1B;EAEF,MAAM,SAAS,uBAAuB;AACtC,YAAU,UAAU;AAIpB,SAAO,iBAAiB,YAAY,MAAoB;GACtD,MAAM,EAAE,MAAM,YAAY,EAAE;AAC5B,WAAQ,IAAI,sCAAsC,MAAM,QAAQ;AAEhE,OAAI,SAAS,YACX;QAAI,QAAQ,WAAW,cAAc,QAAQ,KAC3C,oBAAmB;KACjB,QAAQ;KACR,SAAS,eAAe,QAAQ;KAChC,UAAU,KAAK,MAAO,QAAQ,SAAS,QAAQ,QAAS,IAAI;KAC7D,CAAC;cAEK,SAAS,SAAS;AAC3B,YAAQ,IAAI,iDAAiD;AAC7D,iBAAa,MAAM;AACnB,eAAW,KAAK;AAChB,uBAAmB,EAAE,QAAQ,SAAS,CAAC;AAEvC,oBAAgB,WAAW;AAC3B,YAAQ,IAAI,oDAAoD;AAChE,eAAW;cACF,SAAS,SAAS;AAC3B,YAAQ,MAAM,mCAAmC,QAAQ;AACzD,iBAAa,MAAM;AACnB,aAAS,QAAQ;AACjB,cAAU,QAAQ;;IAEpB;AAEF,SAAO,WAAW,QAAQ;AACxB,WAAQ,MAAM,gCAAgC,IAAI;AAClD,gBAAa,MAAM;GACnB,MAAM,SAAS,IAAI,WAAW;AAC9B,YAAS,OAAO;AAChB,sBAAmB;IAAE,QAAQ;IAAS,SAAS;IAAQ,CAAC;AACxD,aAAU,OAAO;;AAGnB,UAAQ,IAAI,iDAAiD;AAC7D,SAAO,YAAY;GAAE,MAAM;GAAQ,SAAS,EAAE,OAAO;GAAE,CAAC;AAExD,SAAO,gBAAgB;IAItB;EAAC;EAAO;EAAS;EAAQ,CAAC;AAG7B,iBAAgB;AACd,MAAI,SACF,OAAM;AAGR,eAAa;AACX,OAAI,UAAU,SAAS;AACrB,cAAU,QAAQ,WAAW;AAC7B,cAAU,UAAU;;;IAGvB,CAAC,UAAU,KAAK,CAAC;CAGpB,MAAM,QAAQ,YACZ,OAAO,SAAoC;AACzC,UAAQ,IAAI,qCAAqC,MAAM,YAAY,GAAG,GAAG,CAAC;EAG1E,MAAM,cAAc,MAAM;AAG1B,UAAQ,IAAI,4CAA4C;AACxD,MAAI,gBAAgB,QAClB,OAAM,gBAAgB;MAEtB,OAAM;AAER,UAAQ,IAAI,gEAAgE;AAE5E,SAAO,IAAI,SAAS,SAAS,WAAW;GACtC,MAAM,SAAS,UAAU;AACzB,OAAI,CAAC,QAAQ;AACX,YAAQ,MAAM,oCAAoC;AAClD,2BAAO,IAAI,MAAM,6CAA6C,CAAC;AAC/D;;GAIF,MAAM,UAAU,iBAAiB;AAC/B,YAAQ,MAAM,gCAAgC;AAC9C,WAAO,oBAAoB,WAAW,QAAQ;AAC9C,2BAAO,IAAI,MAAM,8BAA8B,CAAC;MAC/C,IAAM;GAET,MAAM,WAAW,MAAoB;AACnC,YAAQ,IAAI,wCAAwC,EAAE,KAAK,KAAK;AAChE,QAAI,EAAE,KAAK,SAAS,aAAa;AAC/B,aAAQ,IAAI,qCAAqC;AACjD,kBAAa,QAAQ;AACrB,YAAO,oBAAoB,WAAW,QAAQ;AAC9C,aAAQ,EAAE,KAAK,QAAQ,OAAO;eACrB,EAAE,KAAK,SAAS,SAAS;AAClC,aAAQ,MAAM,2BAA2B,EAAE,KAAK,QAAQ;AACxD,kBAAa,QAAQ;AACrB,YAAO,oBAAoB,WAAW,QAAQ;AAC9C,YAAO,IAAI,MAAM,EAAE,KAAK,QAAQ,CAAC;;;AAIrC,UAAO,iBAAiB,WAAW,QAAQ;AAC3C,WAAQ,IAAI,kDAAkD;AAC9D,UAAO,YAAY;IAAE,MAAM;IAAS,SAAS;KAAE;KAAM;KAAW;IAAE,CAAC;AACnE,WAAQ,IAAI,uDAAuD;IACnE;IAEJ,CAAC,MAAM,UAAU,CAClB;CAGD,MAAM,aAAa,YACjB,OAAO,UAAmD;EAExD,MAAM,cAAc,MAAM;AAG1B,MAAI,gBAAgB,QAClB,OAAM,gBAAgB;MAEtB,OAAM;AAGR,SAAO,IAAI,SAAS,SAAS,WAAW;GACtC,MAAM,SAAS,UAAU;AACzB,OAAI,CAAC,QAAQ;AACX,2BAAO,IAAI,MAAM,6CAA6C,CAAC;AAC/D;;GAIF,MAAM,UAAU,iBAAiB;AAC/B,WAAO,oBAAoB,WAAW,QAAQ;AAC9C,2BAAO,IAAI,MAAM,oCAAoC,CAAC;MACrD,IAAM;GAET,MAAM,WAAW,MAAoB;AACnC,QAAI,EAAE,KAAK,SAAS,cAAc;AAChC,kBAAa,QAAQ;AACrB,YAAO,oBAAoB,WAAW,QAAQ;AAC9C,aAAQ,EAAE,KAAK,QAAQ;eACd,EAAE,KAAK,SAAS,SAAS;AAClC,kBAAa,QAAQ;AACrB,YAAO,oBAAoB,WAAW,QAAQ;AAC9C,YAAO,IAAI,MAAM,EAAE,KAAK,QAAQ,CAAC;;;AAIrC,UAAO,iBAAiB,WAAW,QAAQ;AAC3C,UAAO,YAAY;IAAE,MAAM;IAAc,SAAS;KAAE;KAAO;KAAW;IAAE,CAAC;IACzE;IAEJ,CAAC,MAAM,UAAU,CAClB;AAqDD,QAAO;EACL;EACA;EACA,YArDiB,YACjB,OAAO,OAAe,UAAmC;GACvD,MAAM,CAAC,MAAM,QAAQ,MAAM,QAAQ,IAAI,CAAC,MAAM,MAAM,EAAE,MAAM,MAAM,CAAC,CAAC;AACpE,UAAO,iBAAiB,MAAM,KAAK;KAErC,CAAC,OAAO,iBAAiB,CAC1B;EAgDC,QA7Ca,YACb,OAAO,OAAe,QAAkB,SAAkD;GACxF,MAAM,CAAC,UAAU,cAAc,MAAM,QAAQ,IAAI,CAAC,MAAM,MAAM,EAAE,WAAW,OAAO,CAAC,CAAC;GAEpF,MAAME,UAAiC,WAAW,KAC/C,KAAyB,WAAmB;IAC3C,MAAM,IAAI;IACV,OAAO,iBAAiB,UAAU,IAAI,OAAO;IAC7C;IACD,EACF;AAED,WAAQ,MAAM,GAAG,MAAM,EAAE,QAAQ,EAAE,MAAM;AACzC,UAAO,OAAO,QAAQ,MAAM,GAAG,KAAK,GAAG;KAEzC;GAAC;GAAO;GAAY;GAAiB,CACtC;EA8BC,aA3BkB,YAClB,OACE,WACA,YACA,SACmC;GAGnC,MAAMA,WAFgB,MAAM,WAAW,WAAW,EAEG,KAClD,KAAyB,WAAmB;IAC3C,MAAM,IAAI;IACV,OAAO,iBAAiB,WAAW,IAAI,OAAO;IAC9C;IACD,EACF;AAED,WAAQ,MAAM,GAAG,MAAM,EAAE,QAAQ,EAAE,MAAM;AACzC,UAAO,OAAO,QAAQ,MAAM,GAAG,KAAK,GAAG;KAEzC,CAAC,YAAY,iBAAiB,CAC/B;EAQC;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;AAsCH,eAAsB,iBACpB,SACA,UAA0B,EAAE,EACb;CACf,MAAM,EAAE,eAAe;CAGvB,MAAM,SAAS,MAAM,mBAAmB;EACtC;EACA,aAAa,MAAM;AACjB,OAAI,EAAE,WAAW,cACf,cAAa;IACX,QAAQ;IACR,MAAM,EAAE;IACR,UAAU,EAAE;IACb,CAAC;OAEF,cAAa;IAAE,QAAQ;IAAW,SAAS,EAAE;IAAQ,CAAC;;EAG3D,CAAC;AAEF,cAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,QAAO,WAAW;;;;;;;;;;AAWpB,eAAsB,sBACpB,UAAU,2BACV,UAA0B,EAAE,EACb;CACf,MAAM,EAAE,eAAe;AAEvB,QAAO,IAAI,SAAS,SAAS,WAAW;EACtC,MAAM,SAAS,uBAAuB;AAEtC,SAAO,aAAa,MAAoB;GACtC,MAAM,EAAE,MAAM,YAAY,EAAE;AAE5B,OAAI,SAAS,YACX;QAAI,QAAQ,WAAW,cAAc,QAAQ,KAC3C,cAAa;KACX,QAAQ;KACR,MAAM,QAAQ;KACd,UAAU,KAAK,MAAO,QAAQ,SAAS,QAAQ,QAAS,IAAI;KAC7D,CAAC;cAEK,SAAS,SAAS;AAC3B,iBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,WAAO,WAAW;AAClB,aAAS;cACA,SAAS,SAAS;AAC3B,iBAAa;KAAE,QAAQ;KAAS,SAAS;KAAS,CAAC;AACnD,WAAO,WAAW;AAClB,WAAO,IAAI,MAAM,QAAQ,CAAC;;;AAI9B,eAAa;GAAE,QAAQ;GAAW,SAAS,WAAW,QAAQ;GAAM,CAAC;AACrE,SAAO,YAAY;GAAE,MAAM;GAAQ,SAAS,EAAE,OAAO,SAAS;GAAE,CAAC;GACjE;;;;;;;;;;AAWJ,eAAsB,gBACpB,UAAsB,cACtB,UAA0B,EAAE,EACb;CACf,MAAM,EAAE,eAAe;CACvB,MAAM,cAAc,WAAW;AAE/B,KAAI,CAAC,YACH,OAAM,IAAI,MAAM,sBAAsB,UAAU;AAGlD,QAAO,IAAI,SAAS,SAAS,WAAW;EACtC,MAAM,SAAS,iBAAiB;AAEhC,SAAO,aAAa,MAAoB;GACtC,MAAM,EAAE,MAAM,YAAY,EAAE;AAE5B,OAAI,SAAS,YACX;QAAI,QAAQ,WAAW,cAAc,QAAQ,KAC3C,cAAa;KACX,QAAQ;KACR,MAAM,QAAQ;KACd,UAAU,KAAK,MAAO,QAAQ,SAAS,QAAQ,QAAS,IAAI;KAC7D,CAAC;cAEK,SAAS,SAAS;AAC3B,iBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,WAAO,WAAW;AAClB,aAAS;cACA,SAAS,SAAS;AAC3B,iBAAa;KAAE,QAAQ;KAAS,SAAS;KAAS,CAAC;AACnD,WAAO,WAAW;AAClB,WAAO,IAAI,MAAM,QAAQ,CAAC;;;AAI9B,eAAa;GAAE,QAAQ;GAAW,SAAS,WAAW,QAAQ;GAAM,CAAC;AACrE,SAAO,YAAY;GACjB,MAAM;GACN,SAAS;IACP;IACA,MAAM,YAAY;IAClB,QAAQ,YAAY;IACrB;GACF,CAAC;GACF;;;;;;;;;;AAWJ,eAAsB,gBACpB,UAAU,mBACV,UAA0B,EAAE,EACb;CACf,MAAM,EAAE,eAAe;CACvB,MAAM,WAAW,gBAAgB,QAAQ;AAEzC,QAAO,IAAI,SAAS,SAAS,WAAW;EACtC,MAAM,SAAS,iBAAiB;AAEhC,SAAO,aAAa,MAAoB;GACtC,MAAM,EAAE,MAAM,YAAY,EAAE;AAE5B,OAAI,SAAS,YACX;QAAI,QAAQ,WAAW,cAAc,QAAQ,KAC3C,cAAa;KACX,QAAQ;KACR,MAAM,QAAQ;KACd,UAAU,KAAK,MAAO,QAAQ,SAAS,QAAQ,QAAS,IAAI;KAC7D,CAAC;cAEK,SAAS,SAAS;AAC3B,iBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,WAAO,WAAW;AAClB,aAAS;cACA,SAAS,SAAS;AAC3B,iBAAa;KAAE,QAAQ;KAAS,SAAS;KAAS,CAAC;AACnD,WAAO,WAAW;AAClB,WAAO,IAAI,MAAM,QAAQ,CAAC;;;AAI9B,eAAa;GAAE,QAAQ;GAAW,SAAS,WAAW,QAAQ;GAAM,CAAC;AACrE,SAAO,YAAY;GAAE,MAAM;GAAQ,SAAS,EAAE,OAAO,UAAU;GAAE,CAAC;GAClE;;;AAIJ,SAAS,gBAAgB,SAAyB;AAUhD,QAT8C;EAC5C,gBAAgB;EAChB,mBAAmB;EACnB,gBAAgB;EAChB,mBAAmB;EACnB,iBAAiB;EACjB,oBAAoB;EACpB,0BAA0B;EAC3B,CACoB,YAAY;;;;;AAUnC,SAAgB,oBAA6B;AAC3C,KAAI,OAAO,cAAc,YACvB,QAAO;AAET,QAAO,SAAS;;;;;AAMlB,eAAsB,gBAIZ;AACR,KAAI,CAAC,mBAAmB,CACtB,QAAO,EAAE,WAAW,OAAO;AAG7B,KAAI;EACF,MAAM,UAAU,MAAO,UAAkB,IAAI,gBAAgB;AAC7D,MAAI,CAAC,QACH,QAAO,EAAE,WAAW,OAAO;EAG7B,MAAM,OAAO,MAAM,QAAQ,oBAAoB;AAC/C,SAAO;GACL,WAAW;GACX,SAAS,KAAK;GACd,QAAQ,KAAK;GACd;SACK;AACN,SAAO,EAAE,WAAW,OAAO;;;AAI/B,sBAAe;CACb;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACD"}