@tryhamster/gerbil 1.0.0-rc.0 → 1.0.0-rc.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/README.md +79 -14
  2. package/dist/auto-update-DsWBBnEk.mjs +3 -0
  3. package/dist/browser/index.d.mts +401 -5
  4. package/dist/browser/index.d.mts.map +1 -1
  5. package/dist/browser/index.mjs +1772 -146
  6. package/dist/browser/index.mjs.map +1 -1
  7. package/dist/{chrome-backend-CtwPENIW.mjs → chrome-backend-JEPeM2YE.mjs} +1 -1
  8. package/dist/{chrome-backend-C5Un08O4.mjs → chrome-backend-Y9F7W5VQ.mjs} +514 -73
  9. package/dist/chrome-backend-Y9F7W5VQ.mjs.map +1 -0
  10. package/dist/cli.mjs +3359 -646
  11. package/dist/cli.mjs.map +1 -1
  12. package/dist/frameworks/express.d.mts +1 -1
  13. package/dist/frameworks/express.mjs +3 -3
  14. package/dist/frameworks/fastify.d.mts +1 -1
  15. package/dist/frameworks/fastify.mjs +3 -3
  16. package/dist/frameworks/hono.d.mts +1 -1
  17. package/dist/frameworks/hono.mjs +3 -3
  18. package/dist/frameworks/next.d.mts +2 -2
  19. package/dist/frameworks/next.mjs +3 -3
  20. package/dist/frameworks/react.d.mts +1 -1
  21. package/dist/frameworks/trpc.d.mts +1 -1
  22. package/dist/frameworks/trpc.mjs +3 -3
  23. package/dist/gerbil-DeQlX_Mt.mjs +5 -0
  24. package/dist/gerbil-POAz8peb.d.mts +431 -0
  25. package/dist/gerbil-POAz8peb.d.mts.map +1 -0
  26. package/dist/gerbil-yoSpRHgv.mjs +1463 -0
  27. package/dist/gerbil-yoSpRHgv.mjs.map +1 -0
  28. package/dist/index.d.mts +395 -9
  29. package/dist/index.d.mts.map +1 -1
  30. package/dist/index.mjs +8 -6
  31. package/dist/index.mjs.map +1 -1
  32. package/dist/integrations/ai-sdk.d.mts +122 -4
  33. package/dist/integrations/ai-sdk.d.mts.map +1 -1
  34. package/dist/integrations/ai-sdk.mjs +239 -11
  35. package/dist/integrations/ai-sdk.mjs.map +1 -1
  36. package/dist/integrations/langchain.d.mts +132 -2
  37. package/dist/integrations/langchain.d.mts.map +1 -1
  38. package/dist/integrations/langchain.mjs +176 -8
  39. package/dist/integrations/langchain.mjs.map +1 -1
  40. package/dist/integrations/llamaindex.d.mts +1 -1
  41. package/dist/integrations/llamaindex.mjs +3 -3
  42. package/dist/integrations/mcp-client.mjs +4 -4
  43. package/dist/integrations/mcp-client.mjs.map +1 -1
  44. package/dist/integrations/mcp.d.mts +2 -2
  45. package/dist/integrations/mcp.d.mts.map +1 -1
  46. package/dist/integrations/mcp.mjs +6 -6
  47. package/dist/{mcp-R8kRLIKb.mjs → mcp-Bitg4sjX.mjs} +10 -37
  48. package/dist/mcp-Bitg4sjX.mjs.map +1 -0
  49. package/dist/microphone-D-6y9aiE.mjs +3 -0
  50. package/dist/{models-DKULvhOr.mjs → models-BAtL8qsA.mjs} +42 -7
  51. package/dist/models-BAtL8qsA.mjs.map +1 -0
  52. package/dist/{models-De2-_GmQ.d.mts → models-CE0fBq0U.d.mts} +2 -2
  53. package/dist/models-CE0fBq0U.d.mts.map +1 -0
  54. package/dist/{one-liner-BUQR0nqq.mjs → one-liner-B1rmFto6.mjs} +2 -2
  55. package/dist/{one-liner-BUQR0nqq.mjs.map → one-liner-B1rmFto6.mjs.map} +1 -1
  56. package/dist/repl-D20JO260.mjs +10 -0
  57. package/dist/skills/index.d.mts +303 -12
  58. package/dist/skills/index.d.mts.map +1 -1
  59. package/dist/skills/index.mjs +6 -6
  60. package/dist/skills-5DxAV-rn.mjs +1435 -0
  61. package/dist/skills-5DxAV-rn.mjs.map +1 -0
  62. package/dist/stt-Bv_dum-R.mjs +433 -0
  63. package/dist/stt-Bv_dum-R.mjs.map +1 -0
  64. package/dist/stt-KzSoNvwI.mjs +3 -0
  65. package/dist/{tools-BsiEE6f2.mjs → tools-IYPrqoek.mjs} +6 -7
  66. package/dist/{tools-BsiEE6f2.mjs.map → tools-IYPrqoek.mjs.map} +1 -1
  67. package/dist/tts-5yWeP_I0.mjs +3 -0
  68. package/dist/tts-DG6denWG.mjs +729 -0
  69. package/dist/tts-DG6denWG.mjs.map +1 -0
  70. package/dist/types-s6Py2_DL.d.mts +353 -0
  71. package/dist/types-s6Py2_DL.d.mts.map +1 -0
  72. package/dist/{utils-7vXqtq2Q.mjs → utils-CkB4Roi6.mjs} +1 -1
  73. package/dist/{utils-7vXqtq2Q.mjs.map → utils-CkB4Roi6.mjs.map} +1 -1
  74. package/docs/ai-sdk.md +137 -21
  75. package/docs/browser.md +241 -2
  76. package/docs/memory.md +72 -0
  77. package/docs/stt.md +494 -0
  78. package/docs/tts.md +569 -0
  79. package/docs/vision.md +396 -0
  80. package/package.json +17 -18
  81. package/dist/auto-update-BbNHbSU1.mjs +0 -3
  82. package/dist/chrome-backend-C5Un08O4.mjs.map +0 -1
  83. package/dist/gerbil-BfnsFWRE.mjs +0 -644
  84. package/dist/gerbil-BfnsFWRE.mjs.map +0 -1
  85. package/dist/gerbil-BjW-z7Fq.mjs +0 -5
  86. package/dist/gerbil-DZ1k3ChC.d.mts +0 -138
  87. package/dist/gerbil-DZ1k3ChC.d.mts.map +0 -1
  88. package/dist/mcp-R8kRLIKb.mjs.map +0 -1
  89. package/dist/models-DKULvhOr.mjs.map +0 -1
  90. package/dist/models-De2-_GmQ.d.mts.map +0 -1
  91. package/dist/skills-D3CEpgDc.mjs +0 -630
  92. package/dist/skills-D3CEpgDc.mjs.map +0 -1
  93. package/dist/types-BS1N92Jt.d.mts +0 -183
  94. package/dist/types-BS1N92Jt.d.mts.map +0 -1
@@ -1 +1 @@
1
- {"version":3,"file":"index.mjs","names":["currentResolve: ((text: string) => void) | null","currentReject: ((error: Error) => void) | null","gerbilWorker: GerbilWorker","options","userMessage: Message","assistantMessage: Message"],"sources":["../../src/browser/index.ts"],"sourcesContent":["/**\n * Gerbil Browser Support\n *\n * Run LLMs directly in the browser with WebGPU acceleration.\n *\n * @example useChat (React)\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <form onSubmit={handleSubmit}>\n * {messages.map(m => <div key={m.id}>{m.role}: {m.content}</div>)}\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * </form>\n * );\n * }\n * ```\n *\n * @example useCompletion (React)\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading } = useCompletion();\n * if (isLoading) return <div>Loading...</div>;\n * return <button onClick={() => complete(\"Write a haiku\")}>{completion}</button>;\n * }\n * ```\n *\n * @example Low-level API\n * ```ts\n * import { createGerbilWorker } from \"@tryhamster/gerbil/browser\";\n *\n * const gerbil = await createGerbilWorker({\n * modelId: \"qwen3-0.6b\",\n * onToken: (token) => console.log(token.text),\n * });\n * await gerbil.generate(\"Hello!\");\n * gerbil.terminate();\n * ```\n */\n\nimport { resolveModel } from \"../core/models.js\";\n\n// Re-export models and types (browser-safe, no Node.js dependencies)\nexport { BUILTIN_MODELS } from \"../core/models.js\";\nexport type * from \"../core/types.js\";\n\n// NOTE: We intentionally do NOT export Gerbil from core here.\n// The core Gerbil class has Node.js code paths (chrome-backend/puppeteer)\n// that break browser bundlers. Use createGerbilWorker() instead for browser.\n\n// ============================================\n// Types\n// ============================================\n\nexport type WorkerProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n error?: string;\n};\n\nexport type WorkerToken = {\n status: \"token\";\n text: string;\n state: \"thinking\" | \"answering\";\n numTokens: number;\n tps: number;\n};\n\nexport type WorkerComplete = {\n status: \"complete\";\n text: string;\n numTokens: number;\n totalTime: number;\n tps: number;\n};\n\nexport type GerbilWorkerOptions = {\n /** Model ID to load (default: \"qwen3-0.6b\") */\n modelId?: string;\n /** Called during model loading with progress updates */\n onProgress?: (progress: WorkerProgress) => void;\n /** Called for each token during streaming generation */\n onToken?: (token: WorkerToken) => void;\n /** Called when generation is complete */\n onComplete?: (result: WorkerComplete) => void;\n /** Called on errors */\n onError?: (error: string) => void;\n /** Worker script URL (auto-detected if not provided) */\n workerUrl?: string;\n};\n\nexport type GenerateStreamOptions = {\n /** Maximum tokens to generate */\n maxTokens?: number;\n /** Temperature for sampling (0 = deterministic) */\n temperature?: number;\n /** Top-p nucleus sampling */\n topP?: number;\n /** Top-k sampling */\n topK?: number;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** System prompt */\n system?: string;\n};\n\nexport type GerbilWorker = {\n /** Generate text with streaming */\n generate: (prompt: string, options?: GenerateStreamOptions) => Promise<string>;\n /** Interrupt current generation */\n interrupt: () => void;\n /** Reset conversation cache */\n reset: () => void;\n /** Terminate the worker */\n terminate: () => void;\n /** Check if model is loaded */\n isReady: () => boolean;\n};\n\n// ============================================\n// Web Worker Factory\n// ============================================\n\n/**\n * Create a Gerbil worker for streaming WebGPU inference\n *\n * Uses a Web Worker to keep the UI responsive during model loading\n * and text generation, with real-time token streaming.\n */\nexport async function createGerbilWorker(options: GerbilWorkerOptions = {}): Promise<GerbilWorker> {\n const { modelId = \"qwen3-0.6b\", onProgress, onToken, onComplete, onError } = options;\n\n // Resolve model to HuggingFace path\n const source = resolveModel(modelId);\n\n return new Promise((resolve, reject) => {\n // Create inline worker from the worker code\n const workerCode = `\n import {\n AutoTokenizer,\n AutoModelForCausalLM,\n TextStreamer,\n InterruptableStoppingCriteria,\n } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.0\";\n\n class ModelPipeline {\n static tokenizer = null;\n static model = null;\n static modelId = \"\";\n\n static async getInstance(modelId, options = {}, progressCallback) {\n if (this.modelId !== modelId) {\n this.tokenizer = null;\n this.model = null;\n }\n this.modelId = modelId;\n\n const dtype = options.dtype || \"q4f16\";\n const device = options.device || \"webgpu\";\n\n if (!this.tokenizer) {\n this.tokenizer = await AutoTokenizer.from_pretrained(modelId, {\n progress_callback: progressCallback,\n });\n }\n\n if (!this.model) {\n this.model = await AutoModelForCausalLM.from_pretrained(modelId, {\n dtype,\n device,\n progress_callback: progressCallback,\n });\n }\n\n return { tokenizer: this.tokenizer, model: this.model };\n }\n }\n\n const stoppingCriteria = new InterruptableStoppingCriteria();\n let pastKeyValuesCache = null;\n\n async function load(data) {\n const { modelId, options = {} } = data;\n self.postMessage({ status: \"loading\", message: \"Loading model...\" });\n\n // Track download state - if we see progress < 100, we're downloading\n const downloadState = {\n downloading: new Set(), // Files currently downloading\n completed: new Set(), // Files completed\n isDownloading: false, // True if any file needed download\n };\n\n try {\n const { tokenizer, model } = await ModelPipeline.getInstance(\n modelId,\n options,\n (progress) => {\n if (progress.status === \"progress\" && progress.file) {\n const pct = Math.round(progress.progress || 0);\n \n // If we see progress < 100, this file is being downloaded (not from cache)\n if (pct < 100) {\n downloadState.downloading.add(progress.file);\n downloadState.isDownloading = true;\n } else if (pct === 100) {\n downloadState.downloading.delete(progress.file);\n downloadState.completed.add(progress.file);\n }\n\n // Only emit downloading status if actually downloading\n if (downloadState.isDownloading) {\n self.postMessage({\n status: \"downloading\",\n file: progress.file,\n progress: pct,\n downloadCount: downloadState.downloading.size,\n totalFiles: downloadState.completed.size + downloadState.downloading.size,\n });\n }\n }\n }\n );\n\n self.postMessage({ status: \"loading\", message: \"Compiling shaders...\" });\n const warmupInputs = tokenizer(\"a\");\n await model.generate({ ...warmupInputs, max_new_tokens: 1 });\n\n self.postMessage({ status: \"ready\" });\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n async function generate(data) {\n const { messages, options = {} } = data;\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 20, thinking = false } = options;\n\n try {\n const { tokenizer, model } = await ModelPipeline.getInstance(ModelPipeline.modelId, {});\n\n const inputs = tokenizer.apply_chat_template(messages, {\n add_generation_prompt: true,\n return_dict: true,\n enable_thinking: thinking,\n });\n\n let state = \"answering\";\n const [START_THINKING_TOKEN_ID, END_THINKING_TOKEN_ID] = tokenizer.encode(\n \"<think></think>\",\n { add_special_tokens: false }\n );\n\n let startTime = null;\n let numTokens = 0;\n\n // Token callback for state tracking (receives raw token IDs)\n const tokenCallback = (tokens) => {\n startTime ??= performance.now();\n numTokens++;\n \n const tokenId = Number(tokens[0]);\n if (tokenId === START_THINKING_TOKEN_ID) {\n state = \"thinking\";\n } else if (tokenId === END_THINKING_TOKEN_ID) {\n state = \"answering\";\n }\n };\n\n // Text callback for streaming (receives decoded text)\n const streamCallback = (text) => {\n const tps = startTime ? (numTokens / (performance.now() - startTime)) * 1000 : 0;\n self.postMessage({ status: \"token\", text, state, numTokens, tps });\n };\n\n const streamer = new TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: streamCallback,\n token_callback_function: tokenCallback,\n });\n\n self.postMessage({ status: \"start\" });\n\n const { past_key_values, sequences } = await model.generate({\n ...inputs,\n past_key_values: pastKeyValuesCache,\n do_sample: temperature > 0,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n max_new_tokens: maxTokens,\n streamer,\n stopping_criteria: stoppingCriteria,\n return_dict_in_generate: true,\n });\n\n pastKeyValuesCache = past_key_values;\n\n const endTime = performance.now();\n const totalTime = startTime ? endTime - startTime : 0;\n const decoded = tokenizer.batch_decode(sequences, { skip_special_tokens: true });\n\n self.postMessage({\n status: \"complete\",\n text: decoded[0] || \"\",\n numTokens,\n totalTime,\n tps: totalTime > 0 ? (numTokens / totalTime) * 1000 : 0,\n });\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n self.addEventListener(\"message\", async (e) => {\n const { type, ...data } = e.data;\n switch (type) {\n case \"load\": await load(data); break;\n case \"generate\": stoppingCriteria.reset(); await generate(data); break;\n case \"interrupt\": stoppingCriteria.interrupt(); break;\n case \"reset\": pastKeyValuesCache = null; stoppingCriteria.reset(); break;\n }\n });\n\n self.postMessage({ status: \"init\" });\n `;\n\n const blob = new Blob([workerCode], { type: \"application/javascript\" });\n const workerUrl = URL.createObjectURL(blob);\n const worker = new Worker(workerUrl, { type: \"module\" });\n\n let isReady = false;\n let currentResolve: ((text: string) => void) | null = null;\n let currentReject: ((error: Error) => void) | null = null;\n let _generatedText = \"\";\n\n worker.onmessage = (e) => {\n const msg = e.data;\n\n switch (msg.status) {\n case \"init\":\n // Worker initialized, load the model\n worker.postMessage({ type: \"load\", modelId: source.path });\n break;\n\n case \"loading\":\n case \"downloading\":\n onProgress?.(msg as WorkerProgress);\n break;\n\n case \"ready\":\n isReady = true;\n onProgress?.(msg as WorkerProgress);\n resolve(gerbilWorker);\n break;\n\n case \"start\":\n _generatedText = \"\";\n break;\n\n case \"token\":\n _generatedText += msg.text;\n onToken?.(msg as WorkerToken);\n break;\n\n case \"complete\":\n onComplete?.(msg as WorkerComplete);\n currentResolve?.(msg.text);\n currentResolve = null;\n currentReject = null;\n break;\n\n case \"error\":\n onError?.(msg.error);\n onProgress?.({ status: \"error\", error: msg.error });\n if (currentReject) {\n currentReject(new Error(msg.error));\n currentResolve = null;\n currentReject = null;\n } else {\n reject(new Error(msg.error));\n }\n break;\n }\n };\n\n worker.onerror = (e) => {\n const error = e.message || \"Worker error\";\n onError?.(error);\n reject(new Error(error));\n };\n\n const gerbilWorker: GerbilWorker = {\n generate: (prompt: string, options: GenerateStreamOptions = {}) => {\n return new Promise((res, rej) => {\n currentResolve = res;\n currentReject = rej;\n\n const system = options.system || \"You are a helpful assistant.\";\n const messages = [\n { role: \"system\", content: system },\n { role: \"user\", content: prompt }, // enable_thinking handles think mode\n ];\n\n worker.postMessage({\n type: \"generate\",\n messages,\n options: {\n maxTokens: options.maxTokens ?? 256,\n temperature: options.temperature ?? 0.7,\n topP: options.topP ?? 0.9,\n topK: options.topK ?? 20,\n thinking: options.thinking ?? false,\n },\n });\n });\n },\n\n interrupt: () => {\n worker.postMessage({ type: \"interrupt\" });\n },\n\n reset: () => {\n worker.postMessage({ type: \"reset\" });\n },\n\n terminate: () => {\n worker.terminate();\n URL.revokeObjectURL(workerUrl);\n },\n\n isReady: () => isReady,\n };\n });\n}\n\n// ============================================\n// React Hooks\n// ============================================\n\n/** Message in a chat conversation */\nexport type Message = {\n id: string;\n role: \"user\" | \"assistant\";\n content: string;\n thinking?: string;\n};\n\n/** Loading progress state */\nexport type LoadingProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n};\n\n/** Options for useChat hook */\nexport type UseChatOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens per response */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Initial messages */\n initialMessages?: Message[];\n /** Auto-load model on mount (default: false - loads on first generate or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Return type for useChat hook */\nexport type UseChatReturn = {\n /** Chat messages */\n messages: Message[];\n /** Current input value */\n input: string;\n /** Set input value */\n setInput: (value: string) => void;\n /** Submit current input */\n handleSubmit: (e?: { preventDefault?: () => void }) => void;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating a response */\n isGenerating: boolean;\n /** Current thinking content (streaming) */\n thinking: string;\n /** Stop generation */\n stop: () => void;\n /** Clear all messages */\n clear: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n};\n\n/**\n * React hook for chat with local LLM\n *\n * @example\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading, isGenerating } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <div>\n * {messages.map(m => (\n * <div key={m.id}>{m.role}: {m.content}</div>\n * ))}\n * <form onSubmit={handleSubmit}>\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * <button disabled={isGenerating}>Send</button>\n * </form>\n * </div>\n * );\n * }\n * ```\n */\nexport function useChat(options: UseChatOptions = {}): UseChatReturn {\n // Lazy import React to avoid SSR issues\n const React = globalThis.React;\n if (!React) {\n throw new Error(\"useChat requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => undefined | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n initialMessages = [],\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [messages, setMessages] = useState<Message[]>(initialMessages);\n const [input, setInput] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [thinking, setThinking] = useState<string>(\"\");\n const [currentResponse, setCurrentResponse] = useState<string>(\"\");\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const messageIdRef = useRef<number>(0);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on generate\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n // Initialize worker\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCurrentResponse((r: string) => r + token.text);\n }\n },\n onComplete: () => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n // Commit response to messages when generation completes\n useEffect(() => {\n if (!isGenerating && currentResponse) {\n setMessages((msgs: Message[]) => {\n const lastMsg = msgs.at(-1);\n if (lastMsg?.role === \"assistant\") {\n return msgs.map((m: Message, i: number) =>\n i === msgs.length - 1\n ? {\n ...m,\n content: currentResponse,\n thinking: thinking || undefined,\n }\n : m,\n );\n }\n return msgs;\n });\n setCurrentResponse(\"\");\n setThinking(\"\");\n }\n return () => {\n return;\n };\n }, [isGenerating, currentResponse, thinking]);\n\n // Store pending message for auto-load scenario\n const pendingMessageRef = useRef<string | null>(null);\n\n const handleSubmit = useCallback(\n (e?: { preventDefault?: () => void }) => {\n e?.preventDefault?.();\n\n if (!input.trim() || isGenerating) {\n return;\n }\n\n const userMessage: Message = {\n id: `msg-${++messageIdRef.current}`,\n role: \"user\",\n content: input.trim(),\n };\n\n const assistantMessage: Message = {\n id: `msg-${++messageIdRef.current}`,\n role: \"assistant\",\n content: \"\",\n };\n\n setMessages((msgs: Message[]) => [...msgs, userMessage, assistantMessage]);\n setInput(\"\");\n setCurrentResponse(\"\");\n setThinking(\"\");\n\n // If worker not loaded, trigger load and queue the message\n if (!workerRef.current) {\n pendingMessageRef.current = userMessage.content;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(userMessage.content, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n });\n },\n [input, isGenerating, system, enableThinking, maxTokens, temperature, load],\n );\n\n // Process pending message when worker becomes ready\n useEffect(() => {\n if (isReady && pendingMessageRef.current && workerRef.current) {\n const pendingContent = pendingMessageRef.current;\n pendingMessageRef.current = null;\n setIsGenerating(true);\n workerRef.current.generate(pendingContent, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n });\n }\n return () => {\n return;\n };\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n const clear = useCallback(() => {\n workerRef.current?.reset();\n setMessages([]);\n setCurrentResponse(\"\");\n setThinking(\"\");\n }, []);\n\n // Update last message with streaming content\n const displayMessages = messages.map((m: Message, i: number) => {\n if (i === messages.length - 1 && m.role === \"assistant\" && isGenerating) {\n return {\n ...m,\n content: currentResponse,\n thinking: thinking || undefined,\n };\n }\n return m;\n });\n\n return {\n messages: displayMessages,\n input,\n setInput,\n handleSubmit,\n isLoading,\n loadingProgress,\n isGenerating,\n thinking,\n stop,\n clear,\n tps,\n isReady,\n error,\n load,\n };\n}\n\n/** Options for useCompletion hook */\nexport type UseCompletionOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Auto-load model on mount (default: false - loads on first complete() or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Return type for useCompletion hook */\nexport type UseCompletionReturn = {\n /** Generated completion */\n completion: string;\n /** Thinking content (if enabled) */\n thinking: string;\n /** Generate completion */\n complete: (prompt: string) => Promise<string>;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating */\n isGenerating: boolean;\n /** Stop generation */\n stop: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n};\n\n/**\n * React hook for text completion with local LLM\n *\n * @example\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading, isGenerating } = useCompletion();\n *\n * if (isLoading) return <div>Loading...</div>;\n *\n * return (\n * <div>\n * <button onClick={() => complete(\"Write a haiku\")}>Generate</button>\n * <p>{completion}</p>\n * </div>\n * );\n * }\n * ```\n */\nexport function useCompletion(options: UseCompletionOptions = {}): UseCompletionReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useCompletion requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => undefined | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [completion, setCompletion] = useState<string>(\"\");\n const [thinking, setThinking] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const resolveRef = useRef<((text: string) => void) | null>(null);\n const rejectRef = useRef<((err: Error) => void) | null>(null);\n const pendingPromptRef = useRef<string | null>(null);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on complete()\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCompletion((c: string) => c + token.text);\n }\n },\n onComplete: (result) => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n resolveRef.current?.(result.text);\n resolveRef.current = null;\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n const complete = useCallback(\n (prompt: string): Promise<string> => {\n return new Promise((resolve, reject) => {\n setCompletion(\"\");\n setThinking(\"\");\n resolveRef.current = resolve;\n rejectRef.current = reject;\n\n // If worker not loaded, trigger load and queue the prompt\n if (!workerRef.current) {\n pendingPromptRef.current = prompt;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(prompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n });\n });\n },\n [system, enableThinking, maxTokens, temperature, load],\n );\n\n // Process pending prompt when worker becomes ready\n useEffect(() => {\n if (isReady && pendingPromptRef.current && workerRef.current) {\n const pendingPrompt = pendingPromptRef.current;\n pendingPromptRef.current = null;\n setIsGenerating(true);\n workerRef.current.generate(pendingPrompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n });\n }\n return () => {\n return;\n };\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n return {\n completion,\n thinking,\n complete,\n isLoading,\n loadingProgress,\n isGenerating,\n stop,\n tps,\n isReady,\n error,\n load,\n };\n}\n\n// ============================================\n// Utilities\n// ============================================\n\n/**\n * Check if WebGPU is supported\n */\nexport function isWebGPUSupported(): boolean {\n if (typeof navigator === \"undefined\") {\n return false;\n }\n return \"gpu\" in navigator;\n}\n\n/**\n * Get WebGPU adapter info\n */\nexport async function getWebGPUInfo(): Promise<{\n supported: boolean;\n adapter?: string;\n device?: string;\n} | null> {\n if (!isWebGPUSupported()) {\n return { supported: false };\n }\n\n try {\n const adapter = await (navigator as any).gpu.requestAdapter();\n if (!adapter) {\n return { supported: false };\n }\n\n const info = await adapter.requestAdapterInfo();\n return {\n supported: true,\n adapter: info.vendor,\n device: info.device,\n };\n } catch {\n return { supported: false };\n }\n}\n\nexport default { isWebGPUSupported, getWebGPUInfo, createGerbilWorker };\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA8IA,eAAsB,mBAAmB,UAA+B,EAAE,EAAyB;CACjG,MAAM,EAAE,UAAU,cAAc,YAAY,SAAS,YAAY,YAAY;CAG7E,MAAM,SAAS,aAAa,QAAQ;AAEpC,QAAO,IAAI,SAAS,SAAS,WAAW;EAgMtC,MAAM,OAAO,IAAI,KAAK,CA9LH;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;MA8Le,EAAE,EAAE,MAAM,0BAA0B,CAAC;EACvE,MAAM,YAAY,IAAI,gBAAgB,KAAK;EAC3C,MAAM,SAAS,IAAI,OAAO,WAAW,EAAE,MAAM,UAAU,CAAC;EAExD,IAAI,UAAU;EACd,IAAIA,iBAAkD;EACtD,IAAIC,gBAAiD;EACrD,IAAI,iBAAiB;AAErB,SAAO,aAAa,MAAM;GACxB,MAAM,MAAM,EAAE;AAEd,WAAQ,IAAI,QAAZ;IACE,KAAK;AAEH,YAAO,YAAY;MAAE,MAAM;MAAQ,SAAS,OAAO;MAAM,CAAC;AAC1D;IAEF,KAAK;IACL,KAAK;AACH,kBAAa,IAAsB;AACnC;IAEF,KAAK;AACH,eAAU;AACV,kBAAa,IAAsB;AACnC,aAAQ,aAAa;AACrB;IAEF,KAAK;AACH,sBAAiB;AACjB;IAEF,KAAK;AACH,uBAAkB,IAAI;AACtB,eAAU,IAAmB;AAC7B;IAEF,KAAK;AACH,kBAAa,IAAsB;AACnC,sBAAiB,IAAI,KAAK;AAC1B,sBAAiB;AACjB,qBAAgB;AAChB;IAEF,KAAK;AACH,eAAU,IAAI,MAAM;AACpB,kBAAa;MAAE,QAAQ;MAAS,OAAO,IAAI;MAAO,CAAC;AACnD,SAAI,eAAe;AACjB,oBAAc,IAAI,MAAM,IAAI,MAAM,CAAC;AACnC,uBAAiB;AACjB,sBAAgB;WAEhB,QAAO,IAAI,MAAM,IAAI,MAAM,CAAC;AAE9B;;;AAIN,SAAO,WAAW,MAAM;GACtB,MAAM,QAAQ,EAAE,WAAW;AAC3B,aAAU,MAAM;AAChB,UAAO,IAAI,MAAM,MAAM,CAAC;;EAG1B,MAAMC,eAA6B;GACjC,WAAW,QAAgB,YAAiC,EAAE,KAAK;AACjE,WAAO,IAAI,SAAS,KAAK,QAAQ;AAC/B,sBAAiB;AACjB,qBAAgB;KAGhB,MAAM,WAAW,CACf;MAAE,MAAM;MAAU,SAFLC,UAAQ,UAAU;MAEI,EACnC;MAAE,MAAM;MAAQ,SAAS;MAAQ,CAClC;AAED,YAAO,YAAY;MACjB,MAAM;MACN;MACA,SAAS;OACP,WAAWA,UAAQ,aAAa;OAChC,aAAaA,UAAQ,eAAe;OACpC,MAAMA,UAAQ,QAAQ;OACtB,MAAMA,UAAQ,QAAQ;OACtB,UAAUA,UAAQ,YAAY;OAC/B;MACF,CAAC;MACF;;GAGJ,iBAAiB;AACf,WAAO,YAAY,EAAE,MAAM,aAAa,CAAC;;GAG3C,aAAa;AACX,WAAO,YAAY,EAAE,MAAM,SAAS,CAAC;;GAGvC,iBAAiB;AACf,WAAO,WAAW;AAClB,QAAI,gBAAgB,UAAU;;GAGhC,eAAe;GAChB;GACD;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2GJ,SAAgB,QAAQ,UAA0B,EAAE,EAAiB;CAEnE,MAAM,QAAQ,WAAW;AACzB,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,+DAA+D;CAGjF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,kBAAkB,EAAE,EACpB,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,UAAU,eAAe,SAAoB,gBAAgB;CACpE,MAAM,CAAC,OAAO,YAAY,SAAiB,GAAG;CAC9C,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,iBAAiB,sBAAsB,SAAiB,GAAG;CAClE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,eAAe,OAAe,EAAE;CACtC,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAGf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,qBAAoB,MAAc,IAAI,MAAM,KAAK;;GAGrD,kBAAkB;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;;GAExB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;AAGvB,iBAAgB;AACd,MAAI,CAAC,gBAAgB,iBAAiB;AACpC,gBAAa,SAAoB;AAE/B,QADgB,KAAK,GAAG,GAAG,EACd,SAAS,YACpB,QAAO,KAAK,KAAK,GAAY,MAC3B,MAAM,KAAK,SAAS,IAChB;KACE,GAAG;KACH,SAAS;KACT,UAAU,YAAY;KACvB,GACD,EACL;AAEH,WAAO;KACP;AACF,sBAAmB,GAAG;AACtB,eAAY,GAAG;;AAEjB,eAAa;IAGZ;EAAC;EAAc;EAAiB;EAAS,CAAC;CAG7C,MAAM,oBAAoB,OAAsB,KAAK;CAErD,MAAM,eAAe,aAClB,MAAwC;AACvC,KAAG,kBAAkB;AAErB,MAAI,CAAC,MAAM,MAAM,IAAI,aACnB;EAGF,MAAMC,cAAuB;GAC3B,IAAI,OAAO,EAAE,aAAa;GAC1B,MAAM;GACN,SAAS,MAAM,MAAM;GACtB;EAED,MAAMC,mBAA4B;GAChC,IAAI,OAAO,EAAE,aAAa;GAC1B,MAAM;GACN,SAAS;GACV;AAED,eAAa,SAAoB;GAAC,GAAG;GAAM;GAAa;GAAiB,CAAC;AAC1E,WAAS,GAAG;AACZ,qBAAmB,GAAG;AACtB,cAAY,GAAG;AAGf,MAAI,CAAC,UAAU,SAAS;AACtB,qBAAkB,UAAU,YAAY;AACxC,SAAM;AACN;;AAGF,kBAAgB,KAAK;AACrB,YAAU,QAAQ,SAAS,YAAY,SAAS;GAC9C;GACA,UAAU;GACV;GACA;GACD,CAAC;IAEJ;EAAC;EAAO;EAAc;EAAQ;EAAgB;EAAW;EAAa;EAAK,CAC5E;AAGD,iBAAgB;AACd,MAAI,WAAW,kBAAkB,WAAW,UAAU,SAAS;GAC7D,MAAM,iBAAiB,kBAAkB;AACzC,qBAAkB,UAAU;AAC5B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,gBAAgB;IACzC;IACA,UAAU;IACV;IACA;IACD,CAAC;;AAEJ,eAAa;IAGZ;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;CAE7D,MAAM,OAAO,kBAAkB;AAC7B,YAAU,SAAS,WAAW;AAC9B,kBAAgB,MAAM;IACrB,EAAE,CAAC;CAEN,MAAM,QAAQ,kBAAkB;AAC9B,YAAU,SAAS,OAAO;AAC1B,cAAY,EAAE,CAAC;AACf,qBAAmB,GAAG;AACtB,cAAY,GAAG;IACd,EAAE,CAAC;AAcN,QAAO;EACL,UAZsB,SAAS,KAAK,GAAY,MAAc;AAC9D,OAAI,MAAM,SAAS,SAAS,KAAK,EAAE,SAAS,eAAe,aACzD,QAAO;IACL,GAAG;IACH,SAAS;IACT,UAAU,YAAY;IACvB;AAEH,UAAO;IACP;EAIA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;;;;AAsEH,SAAgB,cAAc,UAAgC,EAAE,EAAuB;CACrF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,qEAAqE;CAGvF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,YAAY,iBAAiB,SAAiB,GAAG;CACxD,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,aAAa,OAAwC,KAAK;CAChE,MAAM,YAAY,OAAsC,KAAK;CAC7D,MAAM,mBAAmB,OAAsB,KAAK;CACpD,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAEf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,gBAAe,MAAc,IAAI,MAAM,KAAK;;GAGhD,aAAa,WAAW;AACtB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;AACtB,eAAW,UAAU,OAAO,KAAK;AACjC,eAAW,UAAU;;GAEvB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;CAEvB,MAAM,WAAW,aACd,WAAoC;AACnC,SAAO,IAAI,SAAS,SAAS,WAAW;AACtC,iBAAc,GAAG;AACjB,eAAY,GAAG;AACf,cAAW,UAAU;AACrB,aAAU,UAAU;AAGpB,OAAI,CAAC,UAAU,SAAS;AACtB,qBAAiB,UAAU;AAC3B,UAAM;AACN;;AAGF,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,QAAQ;IACjC;IACA,UAAU;IACV;IACA;IACD,CAAC;IACF;IAEJ;EAAC;EAAQ;EAAgB;EAAW;EAAa;EAAK,CACvD;AAGD,iBAAgB;AACd,MAAI,WAAW,iBAAiB,WAAW,UAAU,SAAS;GAC5D,MAAM,gBAAgB,iBAAiB;AACvC,oBAAiB,UAAU;AAC3B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,eAAe;IACxC;IACA,UAAU;IACV;IACA;IACD,CAAC;;AAEJ,eAAa;IAGZ;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;AAO7D,QAAO;EACL;EACA;EACA;EACA;EACA;EACA;EACA,MAZW,kBAAkB;AAC7B,aAAU,SAAS,WAAW;AAC9B,mBAAgB,MAAM;KACrB,EAAE,CAAC;EAUJ;EACA;EACA;EACA;EACD;;;;;AAUH,SAAgB,oBAA6B;AAC3C,KAAI,OAAO,cAAc,YACvB,QAAO;AAET,QAAO,SAAS;;;;;AAMlB,eAAsB,gBAIZ;AACR,KAAI,CAAC,mBAAmB,CACtB,QAAO,EAAE,WAAW,OAAO;AAG7B,KAAI;EACF,MAAM,UAAU,MAAO,UAAkB,IAAI,gBAAgB;AAC7D,MAAI,CAAC,QACH,QAAO,EAAE,WAAW,OAAO;EAG7B,MAAM,OAAO,MAAM,QAAQ,oBAAoB;AAC/C,SAAO;GACL,WAAW;GACX,SAAS,KAAK;GACd,QAAQ,KAAK;GACd;SACK;AACN,SAAO,EAAE,WAAW,OAAO;;;AAI/B,sBAAe;CAAE;CAAmB;CAAe;CAAoB"}
1
+ {"version":3,"file":"index.mjs","names":["currentResolve: ((text: string) => void) | null","currentReject: ((error: Error) => void) | null","gerbilWorker: GerbilWorker","options","userMessage: Message","assistantMessage: Message","KOKORO_BROWSER_VOICES: BrowserVoiceInfo[]","SUPERTONIC_BROWSER_VOICES: BrowserVoiceInfo[]","TTS_MODELS: Record<\n TTSModelId,\n { repo: string; defaultVoice: string; sampleRate: number; voices: BrowserVoiceInfo[] }\n>","audioData: Float32Array","sampleRate: number","audioContext: AudioContext | null","progress: STTProgress","e: any"],"sources":["../../src/browser/index.ts"],"sourcesContent":["/**\n * Gerbil Browser Support\n *\n * Run LLMs directly in the browser with WebGPU acceleration.\n *\n * @example useChat (React)\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <form onSubmit={handleSubmit}>\n * {messages.map(m => <div key={m.id}>{m.role}: {m.content}</div>)}\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * </form>\n * );\n * }\n * ```\n *\n * @example useCompletion (React)\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading } = useCompletion();\n * if (isLoading) return <div>Loading...</div>;\n * return <button onClick={() => complete(\"Write a haiku\")}>{completion}</button>;\n * }\n * ```\n *\n * @example Low-level API\n * ```ts\n * import { createGerbilWorker } from \"@tryhamster/gerbil/browser\";\n *\n * const gerbil = await createGerbilWorker({\n * modelId: \"qwen3-0.6b\",\n * onToken: (token) => console.log(token.text),\n * });\n * await gerbil.generate(\"Hello!\");\n * gerbil.terminate();\n * ```\n */\n\nimport { resolveModel } from \"../core/models.js\";\n\n// Re-export models and types (browser-safe, no Node.js dependencies)\nexport { BUILTIN_MODELS } from \"../core/models.js\";\nexport type * from \"../core/types.js\";\n\n// NOTE: We intentionally do NOT export Gerbil from core here.\n// The core Gerbil class has Node.js code paths (chrome-backend/puppeteer)\n// that break browser bundlers. Use createGerbilWorker() instead for browser.\n\n// ============================================\n// Types\n// ============================================\n\nexport type WorkerProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n error?: string;\n};\n\nexport type WorkerToken = {\n status: \"token\";\n text: string;\n state: \"thinking\" | \"answering\";\n numTokens: number;\n tps: number;\n};\n\nexport type WorkerComplete = {\n status: \"complete\";\n text: string;\n numTokens: number;\n totalTime: number;\n tps: number;\n};\n\nexport type GerbilWorkerOptions = {\n /** Model ID to load (default: \"qwen3-0.6b\") */\n modelId?: string;\n /** Called during model loading with progress updates */\n onProgress?: (progress: WorkerProgress) => void;\n /** Called for each token during streaming generation */\n onToken?: (token: WorkerToken) => void;\n /** Called when generation is complete */\n onComplete?: (result: WorkerComplete) => void;\n /** Called on errors */\n onError?: (error: string) => void;\n /** Worker script URL (auto-detected if not provided) */\n workerUrl?: string;\n};\n\nexport type GenerateStreamOptions = {\n /** Maximum tokens to generate */\n maxTokens?: number;\n /** Temperature for sampling (0 = deterministic) */\n temperature?: number;\n /** Top-p nucleus sampling */\n topP?: number;\n /** Top-k sampling */\n topK?: number;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** System prompt */\n system?: string;\n /** Image URLs or data URIs (for vision models) */\n images?: string[];\n /** Conversation history for multi-turn (includes all previous messages) */\n history?: Array<{ role: \"user\" | \"assistant\" | \"system\"; content: string }>;\n};\n\nexport type GerbilWorker = {\n /** Generate text with streaming */\n generate: (prompt: string, options?: GenerateStreamOptions) => Promise<string>;\n /** Interrupt current generation */\n interrupt: () => void;\n /** Reset conversation cache */\n reset: () => void;\n /** Terminate the worker */\n terminate: () => void;\n /** Check if model is loaded */\n isReady: () => boolean;\n};\n\n// ============================================\n// Web Worker Factory\n// ============================================\n\n/**\n * Create a Gerbil worker for streaming WebGPU inference\n *\n * Uses a Web Worker to keep the UI responsive during model loading\n * and text generation, with real-time token streaming.\n */\nexport async function createGerbilWorker(options: GerbilWorkerOptions = {}): Promise<GerbilWorker> {\n const { modelId = \"qwen3-0.6b\", onProgress, onToken, onComplete, onError } = options;\n\n // Resolve model to HuggingFace path\n const source = resolveModel(modelId);\n\n return new Promise((resolve, reject) => {\n // Create inline worker from the worker code\n const workerCode = `\n import {\n AutoTokenizer,\n AutoModelForCausalLM,\n AutoProcessor,\n AutoModelForImageTextToText,\n RawImage,\n TextStreamer,\n InterruptableStoppingCriteria,\n env,\n } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1\";\n\n // Enable IndexedDB caching for browser (prevents re-downloading models)\n env.useBrowserCache = true;\n env.allowLocalModels = false;\n\n class ModelPipeline {\n static tokenizer = null;\n static model = null;\n static processor = null;\n static visionModel = null;\n static modelId = \"\";\n static isVision = false;\n\n static async getInstance(modelId, options = {}, progressCallback) {\n if (this.modelId !== modelId) {\n this.tokenizer = null;\n this.model = null;\n this.processor = null;\n this.visionModel = null;\n }\n this.modelId = modelId;\n \n // Detect vision models\n this.isVision = options.vision || \n modelId.toLowerCase().includes(\"ministral\") ||\n modelId.toLowerCase().includes(\"vision\") ||\n modelId.toLowerCase().includes(\"vlm\");\n\n const dtype = options.dtype || \"q4f16\";\n const device = options.device || \"webgpu\";\n\n if (this.isVision) {\n // Load vision model components\n // Note: Don't specify dtype for vision models - let transformers.js pick defaults\n if (!this.processor) {\n this.processor = await AutoProcessor.from_pretrained(modelId, {\n progress_callback: progressCallback,\n });\n }\n if (!this.visionModel) {\n this.visionModel = await AutoModelForImageTextToText.from_pretrained(modelId, {\n device,\n progress_callback: progressCallback,\n });\n }\n return { \n processor: this.processor, \n model: this.visionModel, \n tokenizer: this.processor.tokenizer,\n isVision: true \n };\n } else {\n // Load text-only model components\n if (!this.tokenizer) {\n this.tokenizer = await AutoTokenizer.from_pretrained(modelId, {\n progress_callback: progressCallback,\n });\n }\n if (!this.model) {\n this.model = await AutoModelForCausalLM.from_pretrained(modelId, {\n dtype,\n device,\n progress_callback: progressCallback,\n });\n }\n return { \n tokenizer: this.tokenizer, \n model: this.model, \n isVision: false \n };\n }\n }\n }\n\n const stoppingCriteria = new InterruptableStoppingCriteria();\n let pastKeyValuesCache = null;\n\n async function load(data) {\n const { modelId, options = {} } = data;\n self.postMessage({ status: \"loading\", message: \"Loading model...\" });\n\n const downloadState = {\n downloading: new Set(),\n completed: new Set(),\n isDownloading: false,\n };\n\n try {\n const result = await ModelPipeline.getInstance(\n modelId,\n options,\n (progress) => {\n if (progress.status === \"progress\" && progress.file) {\n const pct = Math.round(progress.progress || 0);\n if (pct < 100) {\n downloadState.downloading.add(progress.file);\n downloadState.isDownloading = true;\n } else if (pct === 100) {\n downloadState.downloading.delete(progress.file);\n downloadState.completed.add(progress.file);\n }\n if (downloadState.isDownloading) {\n self.postMessage({\n status: \"downloading\",\n file: progress.file,\n progress: pct,\n downloadCount: downloadState.downloading.size,\n totalFiles: downloadState.completed.size + downloadState.downloading.size,\n });\n }\n }\n }\n );\n\n self.postMessage({ status: \"loading\", message: \"Compiling shaders...\" });\n \n // Warmup differs for vision vs text models\n if (result.isVision) {\n // Vision models need both text and vision warmup\n // Text warmup first\n const textWarmupInputs = result.tokenizer(\"hello\");\n await result.model.generate({ ...textWarmupInputs, max_new_tokens: 1 });\n \n // Vision warmup with synthetic image\n self.postMessage({ status: \"loading\", message: \"Warming up vision encoder...\" });\n try {\n // Create a tiny 8x8 test image using OffscreenCanvas\n const canvas = new OffscreenCanvas(8, 8);\n const ctx = canvas.getContext(\"2d\");\n ctx.fillStyle = \"red\";\n ctx.fillRect(0, 0, 8, 8);\n const blob = await canvas.convertToBlob({ type: \"image/png\" });\n const warmupImage = await RawImage.fromBlob(blob);\n \n // Process with vision pipeline\n const warmupContent = [{ type: \"image\" }, { type: \"text\", text: \"hi\" }];\n const warmupMessages = [{ role: \"user\", content: warmupContent }];\n const warmupPrompt = result.processor.apply_chat_template(warmupMessages, { add_generation_prompt: true });\n const warmupInputs = await result.processor(warmupImage, warmupPrompt, { add_special_tokens: false });\n \n // Run vision warmup generation\n await result.model.generate({\n ...warmupInputs,\n max_new_tokens: 1,\n });\n } catch (warmupErr) {\n console.warn(\"Vision warmup failed (non-fatal):\", warmupErr);\n }\n } else {\n const warmupInputs = result.tokenizer(\"a\");\n await result.model.generate({ ...warmupInputs, max_new_tokens: 1 });\n }\n\n self.postMessage({ status: \"ready\", isVision: result.isVision });\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n async function generate(data) {\n const { messages, images = [], options = {} } = data;\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 20, thinking = false } = options;\n\n try {\n const result = await ModelPipeline.getInstance(ModelPipeline.modelId, {});\n \n // Route to vision or text generation\n if (result.isVision && images.length > 0) {\n await generateVision(result, messages, images, options);\n } else {\n await generateText(result, messages, options);\n }\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n async function generateText(result, messages, options) {\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 20, thinking = false } = options;\n const { tokenizer, model } = result;\n\n const inputs = tokenizer.apply_chat_template(messages, {\n add_generation_prompt: true,\n return_dict: true,\n enable_thinking: thinking,\n });\n\n let state = \"answering\";\n const [START_THINKING_TOKEN_ID, END_THINKING_TOKEN_ID] = tokenizer.encode(\n \"<think></think>\",\n { add_special_tokens: false }\n );\n\n let startTime = null;\n let numTokens = 0;\n\n const tokenCallback = (tokens) => {\n startTime ??= performance.now();\n numTokens += 1;\n const tokenId = Number(tokens[0]);\n if (tokenId === START_THINKING_TOKEN_ID) state = \"thinking\";\n else if (tokenId === END_THINKING_TOKEN_ID) state = \"answering\";\n };\n\n const streamCallback = (text) => {\n const tps = startTime ? (numTokens / (performance.now() - startTime)) * 1000 : 0;\n self.postMessage({ status: \"token\", text, state, numTokens, tps });\n };\n\n const streamer = new TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: streamCallback,\n token_callback_function: tokenCallback,\n });\n\n self.postMessage({ status: \"start\" });\n\n const { past_key_values, sequences } = await model.generate({\n ...inputs,\n past_key_values: pastKeyValuesCache,\n do_sample: temperature > 0,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n max_new_tokens: maxTokens,\n streamer,\n stopping_criteria: stoppingCriteria,\n return_dict_in_generate: true,\n });\n\n pastKeyValuesCache = past_key_values;\n\n const endTime = performance.now();\n const totalTime = startTime ? endTime - startTime : 0;\n const decoded = tokenizer.batch_decode(sequences, { skip_special_tokens: true });\n\n self.postMessage({\n status: \"complete\",\n text: decoded[0] || \"\",\n numTokens,\n totalTime,\n tps: totalTime > 0 ? (numTokens / totalTime) * 1000 : 0,\n });\n }\n\n async function generateVision(result, messages, images, options) {\n const { maxTokens = 2048, temperature = 0.7, topP = 0.9, topK = 20 } = options;\n const { processor, model, tokenizer } = result;\n\n self.postMessage({ status: \"progress\", message: \"Preparing vision request...\" });\n\n // Build message content with image placeholders and text\n const lastMessage = messages[messages.length - 1];\n const content = [];\n for (const _ of images) {\n content.push({ type: \"image\" });\n }\n content.push({ type: \"text\", text: lastMessage.content });\n\n // For vision models, include a brief system instruction for concise responses\n // Note: Vision processors handle system differently than text models\n const visionMessages = [\n { role: \"system\", content: \"You are a helpful assistant. Be concise and direct in your responses.\" },\n { role: \"user\", content }\n ];\n\n // Apply chat template with generation prompt\n const chatPrompt = processor.apply_chat_template(visionMessages, {\n add_generation_prompt: true\n });\n\n // Load images (handle both string URLs and { source: string } objects)\n self.postMessage({ status: \"progress\", message: \"Loading images...\" });\n const loadedImages = await Promise.all(\n images.map(img => {\n const url = typeof img === \"string\" ? img : img.source;\n return RawImage.fromURL(url);\n })\n );\n self.postMessage({ status: \"progress\", message: \"Processing inputs...\" });\n\n // Process inputs\n const inputs = await processor(\n loadedImages.length === 1 ? loadedImages[0] : loadedImages,\n chatPrompt,\n { add_special_tokens: false }\n );\n self.postMessage({ status: \"progress\", message: \"Generating response...\" });\n\n let startTime = null;\n let numTokens = 0;\n\n const streamCallback = (text) => {\n startTime ??= performance.now();\n numTokens += 1;\n const tps = (numTokens / (performance.now() - startTime)) * 1000;\n self.postMessage({ status: \"token\", text, state: \"answering\", numTokens, tps });\n };\n\n const streamer = new TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: streamCallback,\n });\n\n self.postMessage({ status: \"start\" });\n\n const outputs = await model.generate({\n ...inputs,\n max_new_tokens: maxTokens,\n do_sample: temperature > 0,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n streamer,\n stopping_criteria: stoppingCriteria,\n });\n\n // Decode output (skip prompt)\n const inputLength = inputs.input_ids.dims?.at(-1) || 0;\n const decoded = processor.batch_decode(\n outputs.slice(null, [inputLength, null]),\n { skip_special_tokens: true }\n );\n\n const endTime = performance.now();\n const totalTime = startTime ? endTime - startTime : 0;\n\n self.postMessage({\n status: \"complete\",\n text: decoded[0] || \"\",\n numTokens,\n totalTime,\n tps: totalTime > 0 ? (numTokens / totalTime) * 1000 : 0,\n });\n }\n\n self.addEventListener(\"message\", async (e) => {\n const { type, ...data } = e.data;\n switch (type) {\n case \"load\": await load(data); break;\n case \"generate\": stoppingCriteria.reset(); await generate(data); break;\n case \"interrupt\": stoppingCriteria.interrupt(); break;\n case \"reset\": pastKeyValuesCache = null; stoppingCriteria.reset(); break;\n }\n });\n\n self.postMessage({ status: \"init\" });\n `;\n\n const blob = new Blob([workerCode], { type: \"application/javascript\" });\n const workerUrl = URL.createObjectURL(blob);\n const worker = new Worker(workerUrl, { type: \"module\" });\n\n let isReady = false;\n let currentResolve: ((text: string) => void) | null = null;\n let currentReject: ((error: Error) => void) | null = null;\n let _generatedText = \"\";\n\n worker.onmessage = (e) => {\n const msg = e.data;\n\n switch (msg.status) {\n case \"init\":\n // Worker initialized, load the model\n worker.postMessage({ type: \"load\", modelId: source.path });\n break;\n\n case \"loading\":\n case \"downloading\":\n onProgress?.(msg as WorkerProgress);\n break;\n\n case \"ready\":\n isReady = true;\n onProgress?.(msg as WorkerProgress);\n resolve(gerbilWorker);\n break;\n\n case \"start\":\n _generatedText = \"\";\n break;\n\n case \"token\":\n _generatedText += msg.text;\n onToken?.(msg as WorkerToken);\n break;\n\n case \"complete\":\n onComplete?.(msg as WorkerComplete);\n currentResolve?.(msg.text);\n currentResolve = null;\n currentReject = null;\n break;\n\n case \"error\":\n onError?.(msg.error);\n onProgress?.({ status: \"error\", error: msg.error });\n if (currentReject) {\n currentReject(new Error(msg.error));\n currentResolve = null;\n currentReject = null;\n } else {\n reject(new Error(msg.error));\n }\n break;\n }\n };\n\n worker.onerror = (e) => {\n const error = e.message || \"Worker error\";\n onError?.(error);\n reject(new Error(error));\n };\n\n const gerbilWorker: GerbilWorker = {\n generate: (prompt: string, options: GenerateStreamOptions = {}) =>\n new Promise((res, rej) => {\n currentResolve = res;\n currentReject = rej;\n\n const system = options.system || \"You are a helpful assistant.\";\n\n // Use history if provided (for multi-turn conversations)\n // Otherwise, just use system + current prompt\n const messages = options.history\n ? [{ role: \"system\", content: system }, ...options.history]\n : [\n { role: \"system\", content: system },\n { role: \"user\", content: prompt },\n ];\n\n // When using history, reset KV cache first to avoid position mismatches\n // (full history is provided, so we don't need cached context)\n if (options.history) {\n worker.postMessage({ type: \"reset\" });\n }\n\n worker.postMessage({\n type: \"generate\",\n messages,\n images: options.images || [],\n options: {\n maxTokens: options.maxTokens ?? (options.images?.length ? 2048 : 256),\n temperature: options.temperature ?? 0.7,\n topP: options.topP ?? 0.9,\n topK: options.topK ?? 20,\n thinking: options.thinking ?? false,\n },\n });\n }),\n\n interrupt: () => {\n worker.postMessage({ type: \"interrupt\" });\n },\n\n reset: () => {\n worker.postMessage({ type: \"reset\" });\n },\n\n terminate: () => {\n worker.terminate();\n URL.revokeObjectURL(workerUrl);\n },\n\n isReady: () => isReady,\n };\n });\n}\n\n// ============================================\n// React Hooks\n// ============================================\n\n/** Message in a chat conversation */\nexport type Message = {\n id: string;\n role: \"user\" | \"assistant\";\n content: string;\n thinking?: string;\n /** Attached images (URLs or data URIs) - for vision models */\n images?: string[];\n};\n\n/** Loading progress state */\nexport type LoadingProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n};\n\n/** Options for useChat hook */\nexport type UseChatOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens per response */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Initial messages */\n initialMessages?: Message[];\n /** Auto-load model on mount (default: false - loads on first generate or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Return type for useChat hook */\nexport type UseChatReturn = {\n /** Chat messages */\n messages: Message[];\n /** Current input value */\n input: string;\n /** Set input value */\n setInput: (value: string) => void;\n /** Submit current input */\n handleSubmit: (e?: { preventDefault?: () => void }) => void;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating a response */\n isGenerating: boolean;\n /** Current thinking content (streaming) */\n thinking: string;\n /** Stop generation */\n stop: () => void;\n /** Clear all messages */\n clear: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n /** Currently attached images (for next message) */\n attachedImages: string[];\n /** Attach an image to the next message */\n attachImage: (imageUrl: string) => void;\n /** Remove an attached image */\n removeImage: (index: number) => void;\n /** Clear all attached images */\n clearImages: () => void;\n /** Send message with specific images (convenience method) */\n sendWithImages: (text: string, images: string[]) => void;\n};\n\n/**\n * React hook for chat with local LLM\n *\n * @example\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading, isGenerating } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <div>\n * {messages.map(m => (\n * <div key={m.id}>{m.role}: {m.content}</div>\n * ))}\n * <form onSubmit={handleSubmit}>\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * <button disabled={isGenerating}>Send</button>\n * </form>\n * </div>\n * );\n * }\n * ```\n */\nexport function useChat(options: UseChatOptions = {}): UseChatReturn {\n // Lazy import React to avoid SSR issues\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useChat requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n initialMessages = [],\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [messages, setMessages] = useState<Message[]>(initialMessages);\n const [input, setInput] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [thinking, setThinking] = useState<string>(\"\");\n const [currentResponse, setCurrentResponse] = useState<string>(\"\");\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n const [attachedImages, setAttachedImages] = useState<string[]>([]);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const messageIdRef = useRef<number>(0);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on generate\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n // Initialize worker\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCurrentResponse((r: string) => r + token.text);\n }\n },\n onComplete: () => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n // Commit response to messages when generation completes\n useEffect(() => {\n if (!isGenerating && currentResponse) {\n setMessages((msgs: Message[]) => {\n const lastMsg = msgs.at(-1);\n if (lastMsg?.role === \"assistant\") {\n return msgs.map((m: Message, i: number) =>\n i === msgs.length - 1\n ? { ...m, content: currentResponse, thinking: thinking || undefined }\n : m,\n );\n }\n return msgs;\n });\n setCurrentResponse(\"\");\n setThinking(\"\");\n }\n }, [isGenerating, currentResponse, thinking]);\n\n // Store pending message for auto-load scenario\n const pendingMessageRef = useRef<string | null>(null);\n const pendingImagesRef = useRef<string[]>([]);\n\n // Image management functions\n const attachImage = useCallback((imageUrl: string) => {\n setAttachedImages((imgs: string[]) => [...imgs, imageUrl]);\n }, []);\n\n const removeImage = useCallback((index: number) => {\n setAttachedImages((imgs: string[]) => imgs.filter((_: string, i: number) => i !== index));\n }, []);\n\n const clearImages = useCallback(() => {\n setAttachedImages([]);\n }, []);\n\n // Internal function to send a message with specific images\n const sendMessageWithImages = useCallback(\n (text: string, images: string[]) => {\n if (!text.trim() || isGenerating) {\n return;\n }\n\n messageIdRef.current += 1;\n const userMessage: Message = {\n id: `msg-${messageIdRef.current}`,\n role: \"user\",\n content: text.trim(),\n images: images.length > 0 ? images : undefined,\n };\n\n messageIdRef.current += 1;\n const assistantMessage: Message = {\n id: `msg-${messageIdRef.current}`,\n role: \"assistant\",\n content: \"\",\n };\n\n setMessages((msgs: Message[]) => [...msgs, userMessage, assistantMessage]);\n setCurrentResponse(\"\");\n setThinking(\"\");\n\n // If worker not loaded, trigger load and queue the message\n if (!workerRef.current) {\n pendingMessageRef.current = text.trim();\n pendingImagesRef.current = images;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(text.trim(), {\n system,\n thinking: enableThinking,\n maxTokens: images.length > 0 ? Math.max(maxTokens, 2048) : maxTokens,\n temperature,\n images: images.length > 0 ? images : undefined,\n });\n },\n [isGenerating, system, enableThinking, maxTokens, temperature, load],\n );\n\n const handleSubmit = useCallback(\n (e?: { preventDefault?: () => void }) => {\n e?.preventDefault?.();\n\n if (!input.trim() || isGenerating) {\n return;\n }\n\n // Send with any attached images\n sendMessageWithImages(input, attachedImages);\n setInput(\"\");\n setAttachedImages([]);\n },\n [input, isGenerating, attachedImages, sendMessageWithImages],\n );\n\n // Convenience method to send with specific images\n const sendWithImages = useCallback(\n (text: string, images: string[]) => {\n sendMessageWithImages(text, images);\n },\n [sendMessageWithImages],\n );\n\n // Process pending message when worker becomes ready\n useEffect(() => {\n if (isReady && pendingMessageRef.current && workerRef.current) {\n const pendingContent = pendingMessageRef.current;\n const pendingImages = pendingImagesRef.current;\n pendingMessageRef.current = null;\n pendingImagesRef.current = [];\n setIsGenerating(true);\n workerRef.current.generate(pendingContent, {\n system,\n thinking: enableThinking,\n maxTokens: pendingImages.length > 0 ? Math.max(maxTokens, 2048) : maxTokens,\n temperature,\n images: pendingImages.length > 0 ? pendingImages : undefined,\n });\n }\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n const clear = useCallback(() => {\n workerRef.current?.reset();\n setMessages([]);\n setCurrentResponse(\"\");\n setThinking(\"\");\n setAttachedImages([]);\n }, []);\n\n // Update last message with streaming content\n const displayMessages = messages.map((m: Message, i: number) => {\n if (i === messages.length - 1 && m.role === \"assistant\" && isGenerating) {\n return { ...m, content: currentResponse, thinking: thinking || undefined };\n }\n return m;\n });\n\n return {\n messages: displayMessages,\n input,\n setInput,\n handleSubmit,\n isLoading,\n loadingProgress,\n isGenerating,\n thinking,\n stop,\n clear,\n tps,\n isReady,\n error,\n load,\n attachedImages,\n attachImage,\n removeImage,\n clearImages,\n sendWithImages,\n };\n}\n\n/** Options for useCompletion hook */\nexport type UseCompletionOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Auto-load model on mount (default: false - loads on first complete() or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Options for single completion call */\nexport type CompleteOptions = {\n /** Image URLs or data URIs to analyze (for vision models) */\n images?: string[];\n};\n\n/** Return type for useCompletion hook */\nexport type UseCompletionReturn = {\n /** Generated completion */\n completion: string;\n /** Thinking content (if enabled) */\n thinking: string;\n /** Generate completion (optionally with images for vision models) */\n complete: (prompt: string, options?: CompleteOptions) => Promise<string>;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating */\n isGenerating: boolean;\n /** Stop generation */\n stop: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n};\n\n/**\n * React hook for text completion with local LLM\n *\n * @example\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading, isGenerating } = useCompletion();\n *\n * if (isLoading) return <div>Loading...</div>;\n *\n * return (\n * <div>\n * <button onClick={() => complete(\"Write a haiku\")}>Generate</button>\n * <p>{completion}</p>\n * </div>\n * );\n * }\n * ```\n */\nexport function useCompletion(options: UseCompletionOptions = {}): UseCompletionReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useCompletion requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [completion, setCompletion] = useState<string>(\"\");\n const [thinking, setThinking] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const resolveRef = useRef<((text: string) => void) | null>(null);\n const rejectRef = useRef<((err: Error) => void) | null>(null);\n const pendingPromptRef = useRef<string | null>(null);\n const pendingImagesRef = useRef<string[] | undefined>(undefined);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on complete()\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCompletion((c: string) => c + token.text);\n }\n },\n onComplete: (result) => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n resolveRef.current?.(result.text);\n resolveRef.current = null;\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n const complete = useCallback(\n (prompt: string, completeOptions?: CompleteOptions): Promise<string> => {\n return new Promise((resolve, reject) => {\n setCompletion(\"\");\n setThinking(\"\");\n resolveRef.current = resolve;\n rejectRef.current = reject;\n\n // If worker not loaded, trigger load and queue the prompt\n if (!workerRef.current) {\n pendingPromptRef.current = prompt;\n pendingImagesRef.current = completeOptions?.images;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(prompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n images: completeOptions?.images,\n });\n });\n },\n [system, enableThinking, maxTokens, temperature, load],\n );\n\n // Process pending prompt when worker becomes ready\n useEffect(() => {\n if (isReady && pendingPromptRef.current && workerRef.current) {\n const pendingPrompt = pendingPromptRef.current;\n const pendingImages = pendingImagesRef.current;\n pendingPromptRef.current = null;\n pendingImagesRef.current = undefined;\n setIsGenerating(true);\n workerRef.current.generate(pendingPrompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n images: pendingImages,\n });\n }\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n return {\n completion,\n thinking,\n complete,\n isLoading,\n loadingProgress,\n isGenerating,\n stop,\n tps,\n isReady,\n error,\n load,\n };\n}\n\n// ============================================\n// Text-to-Speech (useSpeech hook)\n// ============================================\n\n/** TTS loading progress */\nexport type TTSProgress = {\n status: \"idle\" | \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n error?: string;\n};\n\n/** Available TTS models */\nexport type TTSModelId = \"kokoro-82m\" | \"supertonic-66m\";\n\n/** Voice info for TTS models */\nexport type BrowserVoiceInfo = {\n id: string;\n name: string;\n gender: \"male\" | \"female\";\n language: string;\n description: string;\n};\n\n/** Kokoro voice definitions (24kHz, high quality) */\nconst KOKORO_BROWSER_VOICES: BrowserVoiceInfo[] = [\n {\n id: \"af_heart\",\n name: \"Heart\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, highest quality (Grade A)\",\n },\n {\n id: \"af_bella\",\n name: \"Bella\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, warm and friendly (Grade A-)\",\n },\n {\n id: \"af_nicole\",\n name: \"Nicole\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, soft and gentle\",\n },\n {\n id: \"af_sarah\",\n name: \"Sarah\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, clear and professional\",\n },\n {\n id: \"af_sky\",\n name: \"Sky\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, young and energetic\",\n },\n {\n id: \"af_alloy\",\n name: \"Alloy\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_aoede\",\n name: \"Aoede\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, mythical\",\n },\n {\n id: \"af_jessica\",\n name: \"Jessica\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_kore\",\n name: \"Kore\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_nova\",\n name: \"Nova\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"af_river\",\n name: \"River\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female\",\n },\n {\n id: \"am_fenrir\",\n name: \"Fenrir\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, best quality\",\n },\n {\n id: \"am_michael\",\n name: \"Michael\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, warm and friendly\",\n },\n { id: \"am_adam\", name: \"Adam\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_echo\", name: \"Echo\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_eric\", name: \"Eric\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_liam\", name: \"Liam\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_onyx\", name: \"Onyx\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n { id: \"am_puck\", name: \"Puck\", gender: \"male\", language: \"en-us\", description: \"American male\" },\n {\n id: \"am_santa\",\n name: \"Santa\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, festive\",\n },\n {\n id: \"bf_emma\",\n name: \"Emma\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, elegant and clear\",\n },\n {\n id: \"bf_isabella\",\n name: \"Isabella\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, sophisticated\",\n },\n {\n id: \"bf_alice\",\n name: \"Alice\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female\",\n },\n {\n id: \"bf_lily\",\n name: \"Lily\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female\",\n },\n {\n id: \"bm_george\",\n name: \"George\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, distinguished\",\n },\n {\n id: \"bm_lewis\",\n name: \"Lewis\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, friendly\",\n },\n {\n id: \"bm_daniel\",\n name: \"Daniel\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male\",\n },\n { id: \"bm_fable\", name: \"Fable\", gender: \"male\", language: \"en-gb\", description: \"British male\" },\n];\n\n/** Supertonic voice definitions (44.1kHz, faster) */\nconst SUPERTONIC_BROWSER_VOICES: BrowserVoiceInfo[] = [\n {\n id: \"F1\",\n name: \"Female 1\",\n gender: \"female\",\n language: \"en\",\n description: \"Female voice 1 - Clear and natural\",\n },\n {\n id: \"F2\",\n name: \"Female 2\",\n gender: \"female\",\n language: \"en\",\n description: \"Female voice 2 - Warm and expressive\",\n },\n {\n id: \"M1\",\n name: \"Male 1\",\n gender: \"male\",\n language: \"en\",\n description: \"Male voice 1 - Deep and confident\",\n },\n {\n id: \"M2\",\n name: \"Male 2\",\n gender: \"male\",\n language: \"en\",\n description: \"Male voice 2 - Friendly and casual\",\n },\n];\n\n/** TTS model configuration */\nconst TTS_MODELS: Record<\n TTSModelId,\n { repo: string; defaultVoice: string; sampleRate: number; voices: BrowserVoiceInfo[] }\n> = {\n \"kokoro-82m\": {\n repo: \"onnx-community/Kokoro-82M-v1.0-ONNX\",\n defaultVoice: \"af_heart\",\n sampleRate: 24000,\n voices: KOKORO_BROWSER_VOICES,\n },\n \"supertonic-66m\": {\n repo: \"onnx-community/Supertonic-TTS-ONNX\",\n defaultVoice: \"F1\",\n sampleRate: 44100,\n voices: SUPERTONIC_BROWSER_VOICES,\n },\n};\n\n/** Options for useSpeech hook */\nexport type UseSpeechOptions = {\n /** TTS model to use (default: \"kokoro-82m\") */\n model?: TTSModelId;\n /** Default voice ID (default: model's default voice) */\n voice?: string;\n /** Speech speed multiplier (default: 1.0) */\n speed?: number;\n /** Auto-load TTS model on mount (default: false) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n /** Called when speech starts */\n onStart?: () => void;\n /** Called when speech ends */\n onEnd?: () => void;\n};\n\n/** Return type for useSpeech hook */\nexport type UseSpeechReturn = {\n /** Speak text aloud */\n speak: (text: string, options?: { voice?: string; speed?: number }) => Promise<void>;\n /** Stop current speech */\n stop: () => void;\n /** Whether TTS model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: TTSProgress | null;\n /** Whether currently speaking */\n isSpeaking: boolean;\n /** Whether TTS model is ready */\n isReady: boolean;\n /** Load the TTS model */\n load: () => void;\n /** Error message if any */\n error: string | null;\n /** List available voices for current model */\n listVoices: () => BrowserVoiceInfo[];\n /** Current voice ID */\n currentVoice: string;\n /** Set current voice */\n setVoice: (voiceId: string) => void;\n /** Current speed */\n currentSpeed: number;\n /** Set speed */\n setSpeed: (speed: number) => void;\n /** Current TTS model ID */\n currentModel: TTSModelId;\n /** Sample rate for current model (24000 for Kokoro, 44100 for Supertonic) */\n sampleRate: number;\n};\n\n/**\n * React hook for text-to-speech with Web Audio API playback\n *\n * Supports both Kokoro (24kHz, high quality) and Supertonic (44.1kHz, faster).\n *\n * @example\n * ```tsx\n * import { useSpeech } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * // Default: Kokoro TTS\n * const { speak, stop, isLoading, isSpeaking, listVoices, setVoice } = useSpeech();\n *\n * // Or use Supertonic (44.1kHz, faster)\n * // const { speak, listVoices } = useSpeech({ model: \"supertonic-66m\" });\n *\n * if (isLoading) return <div>Loading TTS...</div>;\n *\n * return (\n * <div>\n * <select onChange={e => setVoice(e.target.value)}>\n * {listVoices().map(v => (\n * <option key={v.id} value={v.id}>{v.name}</option>\n * ))}\n * </select>\n * <button onClick={() => speak(\"Hello world!\")}>\n * {isSpeaking ? \"Speaking...\" : \"Speak\"}\n * </button>\n * {isSpeaking && <button onClick={stop}>Stop</button>}\n * </div>\n * );\n * }\n * ```\n */\nexport function useSpeech(options: UseSpeechOptions = {}): UseSpeechReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useSpeech requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model: modelId = \"kokoro-82m\",\n speed: defaultSpeed = 1.0,\n autoLoad = false,\n onReady,\n onError,\n onStart,\n onEnd,\n } = options;\n\n // Get model config\n const modelConfig = TTS_MODELS[modelId];\n const defaultVoice = options.voice || modelConfig.defaultVoice;\n\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<TTSProgress | null>(null);\n const [isSpeaking, setIsSpeaking] = useState<boolean>(false);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n const [currentVoice, setCurrentVoice] = useState<string>(defaultVoice);\n const [currentSpeed, setCurrentSpeed] = useState<number>(defaultSpeed);\n\n const ttsRef = useRef<any>(null);\n const voiceEmbeddingsRef = useRef<Map<string, Float32Array>>(new Map());\n const audioContextRef = useRef<AudioContext | null>(null);\n const sourceNodeRef = useRef<AudioBufferSourceNode | null>(null);\n const mountedRef = useRef<boolean>(true);\n const modelIdRef = useRef<TTSModelId>(modelId);\n\n // Voice list based on selected model\n const listVoices = useCallback((): BrowserVoiceInfo[] => {\n return modelConfig.voices;\n }, [modelConfig.voices]);\n\n // Load function\n const load = useCallback(() => {\n if (ttsRef.current || isLoading) return;\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n // Initialize TTS based on model\n useEffect(() => {\n if (!shouldLoad) return;\n\n mountedRef.current = true;\n modelIdRef.current = modelId;\n\n const initTTS = async () => {\n try {\n const isSupertonic = modelId === \"supertonic-66m\";\n const config = TTS_MODELS[modelId];\n\n setLoadingProgress({\n status: \"loading\",\n message: `Loading ${isSupertonic ? \"Supertonic\" : \"Kokoro\"} TTS...`,\n });\n\n if (isSupertonic) {\n // Load Supertonic using transformers.js pipeline\n // Use CDN URL via variable to prevent bundler static analysis\n // (bundlers can't resolve dynamic imports with variables)\n const transformersCdn = \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1\";\n const { pipeline } = await import(/* webpackIgnore: true */ transformersCdn);\n\n const tts = await pipeline(\"text-to-speech\", config.repo, {\n device: \"webgpu\",\n progress_callback: (progress: any) => {\n if (!mountedRef.current) return;\n if (progress.status === \"progress\" && progress.file) {\n setLoadingProgress({\n status: \"downloading\",\n file: progress.file,\n progress: Math.round(progress.progress || 0),\n });\n }\n },\n });\n\n if (!mountedRef.current) return;\n\n // Load speaker embeddings from the voices folder\n const voicesUrl = `https://huggingface.co/${config.repo}/resolve/main/voices/`;\n const embeddingsMap = new Map<string, Float32Array>();\n\n // Load all voice embeddings\n await Promise.all(\n config.voices.map(async (voice) => {\n try {\n const response = await fetch(`${voicesUrl}${voice.id}.bin`);\n if (response.ok) {\n const buffer = await response.arrayBuffer();\n embeddingsMap.set(voice.id, new Float32Array(buffer));\n }\n } catch (e) {\n console.warn(`Failed to load voice embedding for ${voice.id}:`, e);\n }\n }),\n );\n\n if (!mountedRef.current) return;\n\n // Warmup the model with a dummy embedding\n try {\n await tts(\"Hello\", {\n speaker_embeddings: new Float32Array(1 * 101 * 128),\n num_inference_steps: 1,\n speed: 1.0,\n });\n } catch (e) {\n console.warn(\"Supertonic warmup failed:\", e);\n }\n\n voiceEmbeddingsRef.current = embeddingsMap;\n ttsRef.current = { type: \"supertonic\", pipeline: tts, config };\n } else {\n // Load Kokoro using kokoro-js\n const kokoroModule = await import(\"kokoro-js\");\n const { KokoroTTS } = kokoroModule;\n\n const tts = await KokoroTTS.from_pretrained(config.repo, {\n dtype: \"fp32\",\n progress_callback: (progress: any) => {\n if (!mountedRef.current) return;\n if (progress.status === \"progress\" && progress.file) {\n setLoadingProgress({\n status: \"downloading\",\n file: progress.file,\n progress: Math.round(progress.progress || 0),\n });\n }\n },\n });\n\n if (!mountedRef.current) return;\n\n ttsRef.current = { type: \"kokoro\", instance: tts, config };\n }\n\n setIsLoading(false);\n setIsReady(true);\n setLoadingProgress({ status: \"ready\" });\n onReady?.();\n } catch (err) {\n if (!mountedRef.current) return;\n const errorMsg = err instanceof Error ? err.message : String(err);\n setError(errorMsg);\n setIsLoading(false);\n setLoadingProgress({ status: \"error\", error: errorMsg });\n onError?.(errorMsg);\n }\n };\n\n initTTS();\n\n return () => {\n mountedRef.current = false;\n };\n }, [shouldLoad, modelId, onReady, onError]);\n\n // Cleanup AudioContext only on unmount (not on re-renders)\n useEffect(() => {\n return () => {\n try {\n sourceNodeRef.current?.stop();\n } catch {\n // Ignore if already stopped\n }\n try {\n if (audioContextRef.current && audioContextRef.current.state !== \"closed\") {\n audioContextRef.current.close();\n }\n } catch {\n // Ignore if already closed\n }\n };\n }, []);\n\n // Speak function with Web Audio API playback\n const speak = useCallback(\n async (text: string, opts?: { voice?: string; speed?: number }) => {\n const voice = opts?.voice || currentVoice;\n const speed = opts?.speed || currentSpeed;\n\n // Auto-load if not loaded\n if (!ttsRef.current) {\n load();\n // Queue speak for after load\n return;\n }\n\n try {\n setIsSpeaking(true);\n onStart?.();\n\n let audioData: Float32Array;\n let sampleRate: number;\n\n const ttsBackend = ttsRef.current;\n\n if (ttsBackend.type === \"supertonic\") {\n // Supertonic: use transformers.js pipeline with speaker embeddings\n const config = ttsBackend.config;\n\n // Validate voice\n const voiceInfo = config.voices.find((v: BrowserVoiceInfo) => v.id === voice);\n if (!voiceInfo) {\n const validVoices = config.voices.map((v: BrowserVoiceInfo) => v.id).join(\", \");\n throw new Error(`Voice \"${voice}\" not found. Should be one of: ${validVoices}.`);\n }\n\n // Get or load voice embedding (101x128 = 12,928 floats)\n let speakerEmbedding = voiceEmbeddingsRef.current.get(voice);\n if (!speakerEmbedding) {\n try {\n const voiceUrl = `https://huggingface.co/${config.repo}/resolve/main/voices/${voice}.bin`;\n const response = await fetch(voiceUrl);\n if (response.ok) {\n const buffer = await response.arrayBuffer();\n speakerEmbedding = new Float32Array(buffer);\n voiceEmbeddingsRef.current.set(voice, speakerEmbedding);\n } else {\n throw new Error(`Failed to load voice: ${response.status}`);\n }\n } catch {\n // Fallback: create neutral embedding\n speakerEmbedding = new Float32Array(101 * 128).fill(0.1);\n voiceEmbeddingsRef.current.set(voice, speakerEmbedding);\n }\n }\n\n // Generate audio\n const result = await ttsBackend.pipeline(text, {\n speaker_embeddings: speakerEmbedding,\n speed: speed,\n });\n audioData = result.audio as Float32Array;\n sampleRate = result.sampling_rate as number;\n } else {\n // Kokoro: use kokoro-js generate\n const config = ttsBackend.config;\n\n // Validate voice\n const voiceInfo = config.voices.find((v: BrowserVoiceInfo) => v.id === voice);\n if (!voiceInfo) {\n const validVoices = config.voices.map((v: BrowserVoiceInfo) => v.id).join(\", \");\n throw new Error(`Voice \"${voice}\" not found. Should be one of: ${validVoices}.`);\n }\n\n const result = await ttsBackend.instance.generate(text, { voice, speed });\n audioData = result.audio as Float32Array;\n sampleRate = result.sampling_rate as number;\n }\n\n if (!mountedRef.current) return;\n\n // Create or recreate AudioContext if needed\n if (!audioContextRef.current || audioContextRef.current.state === \"closed\") {\n audioContextRef.current = new AudioContext();\n }\n\n const audioContext = audioContextRef.current;\n\n // Resume context if suspended (browser autoplay policy)\n if (audioContext.state === \"suspended\") {\n await audioContext.resume();\n }\n\n // Create audio buffer (ensure we have a proper ArrayBuffer-backed Float32Array)\n const audioBuffer = audioContext.createBuffer(1, audioData.length, sampleRate);\n const channelData = new Float32Array(audioData);\n audioBuffer.copyToChannel(channelData, 0);\n\n // Stop any current playback\n if (sourceNodeRef.current) {\n sourceNodeRef.current.stop();\n sourceNodeRef.current.disconnect();\n }\n\n // Create and play source node\n const sourceNode = audioContext.createBufferSource();\n sourceNode.buffer = audioBuffer;\n sourceNode.connect(audioContext.destination);\n\n sourceNode.onended = () => {\n if (mountedRef.current) {\n setIsSpeaking(false);\n onEnd?.();\n }\n };\n\n sourceNodeRef.current = sourceNode;\n sourceNode.start();\n } catch (err) {\n if (!mountedRef.current) return;\n const errorMsg = err instanceof Error ? err.message : String(err);\n setError(errorMsg);\n setIsSpeaking(false);\n onError?.(errorMsg);\n }\n },\n [currentVoice, currentSpeed, load, onStart, onEnd, onError],\n );\n\n // Stop function\n const stop = useCallback(() => {\n if (sourceNodeRef.current) {\n sourceNodeRef.current.stop();\n sourceNodeRef.current.disconnect();\n sourceNodeRef.current = null;\n }\n setIsSpeaking(false);\n }, []);\n\n // Voice setter with validation\n const setVoice = useCallback(\n (voiceId: string) => {\n const voiceInfo = modelConfig.voices.find((v) => v.id === voiceId);\n if (voiceInfo) {\n setCurrentVoice(voiceId);\n } else {\n console.warn(\n `Voice \"${voiceId}\" not valid for ${modelId}. Available: ${modelConfig.voices.map((v) => v.id).join(\", \")}`,\n );\n }\n },\n [modelConfig.voices, modelId],\n );\n\n // Speed setter\n const setSpeed = useCallback((speed: number) => {\n setCurrentSpeed(Math.max(0.5, Math.min(2.0, speed)));\n }, []);\n\n return {\n speak,\n stop,\n isLoading,\n loadingProgress,\n isSpeaking,\n isReady,\n load,\n error,\n listVoices,\n currentVoice,\n setVoice,\n currentSpeed,\n setSpeed,\n currentModel: modelId,\n sampleRate: modelConfig.sampleRate,\n };\n}\n\n// ============================================\n// Audio Playback Utilities\n// ============================================\n\n/**\n * Play audio from Float32Array using Web Audio API\n *\n * @example\n * ```ts\n * import { playAudio } from \"@tryhamster/gerbil/browser\";\n *\n * const audio = new Float32Array([...]); // TTS output\n * const controller = await playAudio(audio, 24000);\n *\n * // Stop playback\n * controller.stop();\n * ```\n */\nexport async function playAudio(\n audio: Float32Array,\n sampleRate: number = 24000,\n): Promise<{ stop: () => void; onEnded: Promise<void> }> {\n const audioContext = new AudioContext();\n\n // Resume if suspended\n if (audioContext.state === \"suspended\") {\n await audioContext.resume();\n }\n\n const audioBuffer = audioContext.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n audioBuffer.copyToChannel(channelData, 0);\n\n const sourceNode = audioContext.createBufferSource();\n sourceNode.buffer = audioBuffer;\n sourceNode.connect(audioContext.destination);\n\n const onEnded = new Promise<void>((resolve) => {\n sourceNode.onended = () => {\n audioContext.close();\n resolve();\n };\n });\n\n sourceNode.start();\n\n return {\n stop: () => {\n sourceNode.stop();\n audioContext.close();\n },\n onEnded,\n };\n}\n\n/**\n * Create a reusable audio player for streaming TTS\n *\n * @example\n * ```ts\n * import { createAudioPlayer } from \"@tryhamster/gerbil/browser\";\n *\n * const player = createAudioPlayer(24000);\n *\n * // Queue audio chunks as they arrive\n * player.queue(chunk1);\n * player.queue(chunk2);\n *\n * // Stop and clear\n * player.stop();\n * ```\n */\nexport function createAudioPlayer(sampleRate: number = 24000): {\n queue: (audio: Float32Array) => void;\n stop: () => void;\n isPlaying: () => boolean;\n} {\n let audioContext: AudioContext | null = null;\n let nextStartTime = 0;\n let isActive = false;\n\n const ensureContext = async () => {\n if (!audioContext) {\n audioContext = new AudioContext();\n }\n if (audioContext.state === \"suspended\") {\n await audioContext.resume();\n }\n return audioContext;\n };\n\n return {\n queue: async (audio: Float32Array) => {\n const ctx = await ensureContext();\n isActive = true;\n\n const buffer = ctx.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n buffer.copyToChannel(channelData, 0);\n\n const source = ctx.createBufferSource();\n source.buffer = buffer;\n source.connect(ctx.destination);\n\n // Schedule seamlessly after previous chunk\n const startTime = Math.max(ctx.currentTime, nextStartTime);\n source.start(startTime);\n nextStartTime = startTime + buffer.duration;\n\n source.onended = () => {\n if (ctx.currentTime >= nextStartTime - 0.1) {\n isActive = false;\n }\n };\n },\n\n stop: () => {\n isActive = false;\n nextStartTime = 0;\n if (audioContext) {\n audioContext.close();\n audioContext = null;\n }\n },\n\n isPlaying: () => isActive,\n };\n}\n\n// ============================================\n// Voice Input Hook (STT)\n// ============================================\n\n/**\n * Progress info for STT loading\n */\nexport type STTProgress = {\n status: \"downloading\" | \"loading\" | \"ready\" | \"error\";\n message?: string;\n progress?: number;\n file?: string;\n};\n\n/**\n * Options for useVoiceInput hook\n */\nexport type UseVoiceInputOptions = {\n /** STT model ID (default: whisper-tiny.en) */\n model?: string;\n /** Auto-load model on mount (default: false) */\n autoLoad?: boolean;\n /** Callback when model is ready */\n onReady?: () => void;\n /** Callback when transcription completes (or for each chunk in streaming mode) */\n onTranscript?: (text: string) => void;\n /** Callback on error */\n onError?: (error: string) => void;\n /** Callback during loading */\n onProgress?: (progress: STTProgress) => void;\n /** Enable streaming transcription - transcribes audio in chunks as you speak */\n streaming?: boolean;\n /** Chunk duration in ms for streaming mode (default: 3000 = 3 seconds) */\n chunkDuration?: number;\n /** Callback for each streaming chunk with partial transcript */\n onChunk?: (text: string, chunkIndex: number) => void;\n};\n\n/**\n * Return type for useVoiceInput hook\n */\nexport type UseVoiceInputReturn = {\n /** Start recording audio */\n startRecording: () => Promise<void>;\n /** Stop recording and transcribe */\n stopRecording: () => Promise<string>;\n /** Cancel recording without transcribing */\n cancelRecording: () => void;\n /** Transcribe raw audio data (Float32Array at 16kHz) */\n transcribe: (audio: Float32Array) => Promise<string>;\n /** Whether currently recording */\n isRecording: boolean;\n /** Whether transcribing */\n isTranscribing: boolean;\n /** Whether model is loading */\n isLoading: boolean;\n /** Whether model is ready */\n isReady: boolean;\n /** Latest transcription result (full transcript in streaming mode) */\n transcript: string;\n /** Current streaming chunk being transcribed (streaming mode only) */\n streamingChunk: string;\n /** Number of chunks transcribed so far (streaming mode only) */\n chunkCount: number;\n /** Loading progress */\n loadingProgress: STTProgress | null;\n /** Error message */\n error: string | null;\n /** Manually load the model */\n load: () => void;\n};\n\n/**\n * React hook for voice input with browser microphone\n *\n * Uses MediaRecorder to capture audio and Whisper for transcription.\n * Supports both one-shot and streaming transcription modes.\n *\n * @example Basic usage (one-shot)\n * ```tsx\n * function VoiceInput() {\n * const { startRecording, stopRecording, isRecording, transcript } = useVoiceInput({\n * onTranscript: (text) => console.log(\"User said:\", text),\n * });\n *\n * return (\n * <button onClick={isRecording ? stopRecording : startRecording}>\n * {isRecording ? \"Stop\" : \"Record\"}\n * </button>\n * );\n * }\n * ```\n *\n * @example Streaming transcription (real-time)\n * ```tsx\n * function LiveTranscription() {\n * const { startRecording, stopRecording, isRecording, transcript, streamingChunk } = useVoiceInput({\n * streaming: true, // Enable streaming mode\n * chunkDuration: 1500, // Transcribe every 1.5 seconds (default)\n * onChunk: (text, idx) => console.log(`Chunk ${idx}: ${text}`),\n * });\n *\n * return (\n * <div>\n * <button onClick={isRecording ? stopRecording : startRecording}>\n * {isRecording ? \"Stop\" : \"Start Live Transcription\"}\n * </button>\n * <p>Current chunk: {streamingChunk}</p>\n * <p>Full transcript: {transcript}</p>\n * </div>\n * );\n * }\n * ```\n */\nexport function useVoiceInput(options: UseVoiceInputOptions = {}): UseVoiceInputReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useVoiceInput requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"whisper-tiny.en\",\n autoLoad = false,\n onReady,\n onTranscript,\n onError,\n onProgress,\n streaming = false,\n chunkDuration = 1500, // Transcribe every 1.5 seconds for near real-time\n onChunk,\n } = options;\n\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<STTProgress | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [isRecording, setIsRecording] = useState<boolean>(false);\n const [isTranscribing, setIsTranscribing] = useState<boolean>(false);\n const [transcript, setTranscript] = useState<string>(\"\");\n const [streamingChunk, setStreamingChunk] = useState<string>(\"\");\n const [chunkCount, setChunkCount] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const sttRef = useRef<any>(null);\n const mediaRecorderRef = useRef<MediaRecorder | null>(null);\n const audioChunksRef = useRef<Blob[]>([]);\n const streamRef = useRef<MediaStream | null>(null);\n const mountedRef = useRef<boolean>(true);\n const streamingIntervalRef = useRef<ReturnType<typeof setInterval> | null>(null);\n const pendingChunksRef = useRef<Blob[]>([]);\n const fullTranscriptRef = useRef<string>(\"\");\n\n // Load the STT model\n useEffect(() => {\n if (!shouldLoad || isReady) return;\n\n let cancelled = false;\n\n const loadModel = async () => {\n try {\n setIsLoading(true);\n setLoadingProgress({ status: \"loading\", message: \"Loading STT model...\" });\n onProgress?.({ status: \"loading\", message: \"Loading STT model...\" });\n\n // Dynamic import to avoid bundling when not used\n const { WhisperSTT } = await import(\"../core/stt.js\");\n\n if (cancelled || !mountedRef.current) return;\n\n const stt = new WhisperSTT(model);\n await stt.load({\n onProgress: (p: any) => {\n if (!mountedRef.current) return;\n const progress: STTProgress = {\n status: p.progress !== undefined ? \"downloading\" : \"loading\",\n message: p.status,\n progress: p.progress,\n file: p.file,\n };\n setLoadingProgress(progress);\n onProgress?.(progress);\n },\n });\n\n if (cancelled || !mountedRef.current) {\n stt.dispose();\n return;\n }\n\n sttRef.current = stt;\n setIsReady(true);\n setIsLoading(false);\n setLoadingProgress({ status: \"ready\" });\n onProgress?.({ status: \"ready\" });\n onReady?.();\n } catch (e: any) {\n if (!mountedRef.current) return;\n const errMsg = e.message || \"Failed to load STT model\";\n setError(errMsg);\n setIsLoading(false);\n setLoadingProgress({ status: \"error\", message: errMsg });\n onProgress?.({ status: \"error\", message: errMsg });\n onError?.(errMsg);\n }\n };\n\n loadModel();\n\n return () => {\n cancelled = true;\n };\n }, [shouldLoad, isReady, model, onReady, onError, onProgress]);\n\n // Cleanup on unmount\n useEffect(() => {\n mountedRef.current = true;\n return () => {\n mountedRef.current = false;\n if (sttRef.current) {\n sttRef.current.dispose();\n }\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n }\n };\n }, []);\n\n // Manual load trigger\n const load = useCallback(() => {\n if (!shouldLoad && !isReady && !isLoading) {\n setShouldLoad(true);\n }\n }, [shouldLoad, isReady, isLoading]);\n\n // Convert audio blob to Float32Array at 16kHz\n const blobToFloat32 = useCallback(async (blob: Blob): Promise<Float32Array> => {\n const audioContext = new AudioContext({ sampleRate: 16000 });\n const arrayBuffer = await blob.arrayBuffer();\n const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);\n\n // Get mono channel\n const channelData = audioBuffer.getChannelData(0);\n\n // Resample if needed\n if (audioBuffer.sampleRate !== 16000) {\n const ratio = 16000 / audioBuffer.sampleRate;\n const newLength = Math.round(channelData.length * ratio);\n const resampled = new Float32Array(newLength);\n for (let i = 0; i < newLength; i++) {\n const srcIndex = i / ratio;\n const floor = Math.floor(srcIndex);\n const ceil = Math.min(floor + 1, channelData.length - 1);\n const t = srcIndex - floor;\n resampled[i] = channelData[floor] * (1 - t) + channelData[ceil] * t;\n }\n audioContext.close();\n return resampled;\n }\n\n audioContext.close();\n return new Float32Array(channelData);\n }, []);\n\n // Transcribe audio\n const transcribe = useCallback(\n async (audio: Float32Array): Promise<string> => {\n if (!sttRef.current) {\n if (!shouldLoad) {\n setShouldLoad(true);\n throw new Error(\"STT model not loaded. Loading now, please try again.\");\n }\n throw new Error(\"STT model not loaded\");\n }\n\n setIsTranscribing(true);\n try {\n const result = await sttRef.current.transcribe(audio);\n let text = result.text.trim();\n // Filter out Whisper artifacts\n if (text === \"[BLANK_AUDIO]\" || text === \"(blank audio)\" || text === \"[BLANK AUDIO]\") {\n text = \"\";\n }\n setTranscript(text);\n onTranscript?.(text);\n return text;\n } finally {\n if (mountedRef.current) {\n setIsTranscribing(false);\n }\n }\n },\n [shouldLoad, onTranscript],\n );\n\n // Track how many samples we've processed for streaming\n const processedSamplesRef = useRef<number>(0);\n\n // Transcribe a chunk of audio (for streaming mode)\n // Uses audioChunksRef (all chunks) to ensure valid WebM container\n const transcribeChunk = useCallback(\n async (chunkIdx: number): Promise<string> => {\n if (!sttRef.current || audioChunksRef.current.length === 0) return \"\";\n\n try {\n // Create blob from ALL chunks (needed for valid WebM header)\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n const audioData = await blobToFloat32(audioBlob);\n\n // Calculate new samples since last transcription\n const newSamplesStart = processedSamplesRef.current;\n const totalSamples = audioData.length;\n\n // Skip if no new audio (< 0.5 seconds = 8000 samples at 16kHz)\n if (totalSamples - newSamplesStart < 8000) return \"\";\n\n // Extract only the new portion of audio\n const newAudio = audioData.slice(newSamplesStart);\n\n // Update processed count\n processedSamplesRef.current = totalSamples;\n\n const result = await sttRef.current.transcribe(newAudio);\n let text = result.text.trim();\n\n // Filter out Whisper artifacts\n if (text === \"[BLANK_AUDIO]\" || text === \"(blank audio)\" || text === \"[BLANK AUDIO]\") {\n text = \"\";\n }\n\n if (text && mountedRef.current) {\n setStreamingChunk(text);\n onChunk?.(text, chunkIdx);\n }\n\n return text;\n } catch {\n return \"\";\n }\n },\n [blobToFloat32, onChunk],\n );\n\n // Start recording\n const startRecording = useCallback(async () => {\n if (isRecording) return;\n\n try {\n // For streaming mode, ensure STT model is loaded first\n if (streaming && !sttRef.current) {\n if (!shouldLoad) {\n setShouldLoad(true);\n }\n // Wait for model to load\n setIsLoading(true);\n const { WhisperSTT } = await import(\"../core/stt.js\");\n const stt = new WhisperSTT(model);\n await stt.load({\n onProgress: (p: any) => {\n if (mountedRef.current) {\n const progress: STTProgress = {\n status:\n p.status === \"downloading\"\n ? \"downloading\"\n : p.status === \"ready\"\n ? \"ready\"\n : \"loading\",\n message: p.status,\n progress: p.progress,\n file: p.file,\n };\n setLoadingProgress(progress);\n onProgress?.(progress);\n }\n },\n });\n if (!mountedRef.current) {\n stt.dispose();\n return;\n }\n sttRef.current = stt;\n setIsReady(true);\n setIsLoading(false);\n setLoadingProgress({ status: \"ready\" });\n onProgress?.({ status: \"ready\" });\n onReady?.();\n }\n\n // Request microphone permission\n const stream = await navigator.mediaDevices.getUserMedia({\n audio: {\n sampleRate: 16000,\n channelCount: 1,\n echoCancellation: true,\n noiseSuppression: true,\n },\n });\n\n streamRef.current = stream;\n audioChunksRef.current = [];\n pendingChunksRef.current = [];\n fullTranscriptRef.current = \"\";\n processedSamplesRef.current = 0;\n setTranscript(\"\");\n setStreamingChunk(\"\");\n setChunkCount(0);\n\n const mediaRecorder = new MediaRecorder(stream);\n mediaRecorderRef.current = mediaRecorder;\n\n mediaRecorder.ondataavailable = (event) => {\n if (event.data.size > 0) {\n audioChunksRef.current.push(event.data);\n if (streaming) {\n pendingChunksRef.current.push(event.data);\n }\n }\n };\n\n mediaRecorder.start(100); // Collect data every 100ms\n setIsRecording(true);\n setError(null);\n\n // If streaming mode, set up recursive transcription loop\n if (streaming && sttRef.current) {\n let chunkIdx = 0;\n let shouldContinue = true;\n\n // Use recursive setTimeout instead of setInterval to avoid timing issues\n // with heavy WebGPU/WASM operations\n const processNextChunk = async () => {\n if (!shouldContinue || !mountedRef.current) {\n return;\n }\n\n const numPending = pendingChunksRef.current.length;\n\n // Check if we have new audio to process\n if (numPending > 0) {\n // Clear pending counter (we'll process via audioChunksRef which has all data)\n pendingChunksRef.current = [];\n\n try {\n setIsTranscribing(true);\n const chunkText = await transcribeChunk(chunkIdx);\n\n if (chunkText && mountedRef.current) {\n chunkIdx++;\n setChunkCount(chunkIdx);\n\n // Append to full transcript using functional update\n setTranscript((prev) => {\n const newTranscript = prev + (prev ? \" \" : \"\") + chunkText;\n fullTranscriptRef.current = newTranscript;\n onTranscript?.(newTranscript);\n return newTranscript;\n });\n }\n } catch (e) {\n console.error(\"[useVoiceInput] Chunk transcription error:\", e);\n } finally {\n if (mountedRef.current) {\n setIsTranscribing(false);\n }\n }\n }\n\n // Schedule next check if still running\n if (shouldContinue && mountedRef.current) {\n streamingIntervalRef.current = setTimeout(processNextChunk, chunkDuration) as any;\n }\n };\n\n // Start the loop\n streamingIntervalRef.current = setTimeout(processNextChunk, chunkDuration) as any;\n\n // Store a way to stop the loop\n (streamingIntervalRef as any)._stop = () => {\n shouldContinue = false;\n };\n }\n } catch (e: any) {\n const errMsg = e.message || \"Failed to start recording\";\n setError(errMsg);\n onError?.(errMsg);\n }\n }, [\n isRecording,\n streaming,\n shouldLoad,\n model,\n chunkDuration,\n transcribeChunk,\n onTranscript,\n onError,\n onProgress,\n onReady,\n ]);\n\n // Stop recording and transcribe\n const stopRecording = useCallback(async (): Promise<string> => {\n // Stop streaming loop\n if ((streamingIntervalRef as any)._stop) {\n (streamingIntervalRef as any)._stop();\n }\n if (streamingIntervalRef.current) {\n clearTimeout(streamingIntervalRef.current);\n streamingIntervalRef.current = null;\n }\n\n return new Promise((resolve, reject) => {\n if (!mediaRecorderRef.current || !isRecording) {\n reject(new Error(\"Not recording\"));\n return;\n }\n\n const mediaRecorder = mediaRecorderRef.current;\n\n mediaRecorder.onstop = async () => {\n // Stop all tracks\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n setIsRecording(false);\n\n // In streaming mode, process any remaining chunks and return full transcript\n if (streaming) {\n // Process any remaining audio\n if (audioChunksRef.current.length > 0 && processedSamplesRef.current > 0) {\n setIsTranscribing(true);\n pendingChunksRef.current = [];\n\n try {\n const finalChunkText = await transcribeChunk(chunkCount);\n if (finalChunkText && mountedRef.current) {\n setTranscript((prev) => {\n const newTranscript = prev + (prev ? \" \" : \"\") + finalChunkText;\n fullTranscriptRef.current = newTranscript;\n return newTranscript;\n });\n }\n } finally {\n if (mountedRef.current) {\n setIsTranscribing(false);\n }\n }\n }\n\n const finalText = fullTranscriptRef.current;\n onTranscript?.(finalText);\n resolve(finalText);\n return;\n }\n\n // Non-streaming mode: transcribe entire recording\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n\n try {\n // Ensure model is loaded\n if (!sttRef.current) {\n if (!shouldLoad) {\n setShouldLoad(true);\n }\n // Wait for model to load\n await new Promise<void>((res, rej) => {\n const checkReady = setInterval(() => {\n if (sttRef.current) {\n clearInterval(checkReady);\n res();\n }\n }, 100);\n setTimeout(() => {\n clearInterval(checkReady);\n rej(new Error(\"Timeout waiting for STT model\"));\n }, 30000);\n });\n }\n\n // Convert blob to Float32Array\n const audioData = await blobToFloat32(audioBlob);\n\n // Transcribe\n const text = await transcribe(audioData);\n resolve(text);\n } catch (e: any) {\n const errMsg = e.message || \"Transcription failed\";\n setError(errMsg);\n onError?.(errMsg);\n reject(e);\n }\n };\n\n mediaRecorder.stop();\n });\n }, [\n isRecording,\n streaming,\n chunkCount,\n shouldLoad,\n blobToFloat32,\n transcribe,\n transcribeChunk,\n onTranscript,\n onError,\n ]);\n\n // Cancel recording\n const cancelRecording = useCallback(() => {\n // Stop streaming loop\n if ((streamingIntervalRef as any)._stop) {\n (streamingIntervalRef as any)._stop();\n }\n if (streamingIntervalRef.current) {\n clearTimeout(streamingIntervalRef.current);\n streamingIntervalRef.current = null;\n }\n\n if (mediaRecorderRef.current && isRecording) {\n mediaRecorderRef.current.stop();\n }\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n audioChunksRef.current = [];\n pendingChunksRef.current = [];\n processedSamplesRef.current = 0;\n setIsRecording(false);\n }, [isRecording]);\n\n return {\n startRecording,\n stopRecording,\n cancelRecording,\n transcribe,\n isRecording,\n isTranscribing,\n isLoading,\n isReady,\n transcript,\n streamingChunk,\n chunkCount,\n loadingProgress,\n error,\n load,\n };\n}\n\n// ============================================\n// Voice Chat Hook (STT + LLM + TTS)\n// ============================================\n\n/**\n * Options for useVoiceChat hook\n */\nexport type UseVoiceChatOptions = {\n /** LLM model ID (default: qwen3-0.6b) */\n llmModel?: string;\n /** STT model ID (default: whisper-tiny.en) */\n sttModel?: string;\n /** TTS model ID (default: kokoro-82m, also supports supertonic-66m) */\n ttsModel?: TTSModelId;\n /** System prompt for LLM */\n system?: string;\n /** Enable thinking mode (default: false) */\n thinking?: boolean;\n /** TTS voice ID (default: model's default voice) */\n voice?: string;\n /** TTS speech speed (default: 1.0) */\n speed?: number;\n /** Auto-load all models on mount (default: false) */\n autoLoad?: boolean;\n /** Callback when user speaks */\n onUserSpeak?: (text: string) => void;\n /** Callback when assistant responds */\n onAssistantSpeak?: (text: string) => void;\n /** Callback on error */\n onError?: (error: string) => void;\n};\n\n/**\n * Message in voice chat\n */\nexport type VoiceChatMessage = {\n id: string;\n role: \"user\" | \"assistant\";\n content: string;\n thinking?: string;\n audioUrl?: string;\n};\n\n/**\n * Return type for useVoiceChat hook\n */\nexport type UseVoiceChatReturn = {\n /** Messages in the conversation */\n messages: VoiceChatMessage[];\n /** Start recording user speech */\n startListening: () => Promise<void>;\n /** Stop recording and process (STT → LLM → TTS) */\n stopListening: () => Promise<void>;\n /** Cancel current operation */\n cancel: () => void;\n /** Clear conversation history */\n clear: () => void;\n /** Whether recording user speech */\n isListening: boolean;\n /** Whether processing (STT/LLM/TTS) */\n isProcessing: boolean;\n /** Whether assistant is speaking */\n isSpeaking: boolean;\n /** Current stage: idle, listening, transcribing, thinking, speaking */\n stage: \"idle\" | \"listening\" | \"transcribing\" | \"thinking\" | \"speaking\";\n /** Whether all models are loaded */\n isReady: boolean;\n /** Whether loading models */\n isLoading: boolean;\n /** Loading progress message */\n loadingMessage: string;\n /** Error message */\n error: string | null;\n /** Manually load all models */\n load: () => void;\n};\n\n/**\n * React hook for voice conversation with STT + LLM + TTS\n *\n * Complete voice-to-voice conversation loop:\n * 1. User presses button to speak\n * 2. Speech is transcribed (Whisper)\n * 3. LLM generates response\n * 4. Response is spoken aloud (Kokoro or Supertonic TTS)\n *\n * @example\n * ```tsx\n * function VoiceChat() {\n * const {\n * messages,\n * startListening,\n * stopListening,\n * isListening,\n * isSpeaking,\n * stage,\n * } = useVoiceChat({\n * system: \"You are a helpful voice assistant.\",\n * voice: \"af_bella\",\n * // Or use Supertonic for faster synthesis:\n * // ttsModel: \"supertonic-66m\",\n * // voice: \"F1\",\n * });\n *\n * return (\n * <div>\n * {messages.map(m => (\n * <div key={m.id}>{m.role}: {m.content}</div>\n * ))}\n * <button\n * onMouseDown={startListening}\n * onMouseUp={stopListening}\n * >\n * {stage === \"idle\" ? \"🎤 Hold to Speak\" : stage}\n * </button>\n * </div>\n * );\n * }\n * ```\n */\nexport function useVoiceChat(options: UseVoiceChatOptions = {}): UseVoiceChatReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useVoiceChat requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => void | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n // Get TTS model config for default voice\n const ttsModelId = options.ttsModel || \"kokoro-82m\";\n const ttsConfig = TTS_MODELS[ttsModelId];\n\n const {\n llmModel = \"qwen3-0.6b\",\n sttModel = \"whisper-tiny.en\",\n system = \"You are a helpful voice assistant. Keep responses brief and conversational.\",\n thinking = false,\n voice = ttsConfig.defaultVoice,\n speed = 1.0,\n autoLoad = false,\n onUserSpeak,\n onAssistantSpeak,\n onError,\n } = options;\n\n const [messages, setMessages] = useState<VoiceChatMessage[]>([]);\n const [stage, setStage] = useState<\n \"idle\" | \"listening\" | \"transcribing\" | \"thinking\" | \"speaking\"\n >(\"idle\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingMessage, setLoadingMessage] = useState<string>(\"\");\n const [isReady, setIsReady] = useState<boolean>(false);\n const [error, setError] = useState<string | null>(null);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n // Refs for models and audio\n const llmWorkerRef = useRef<any>(null);\n const sttRef = useRef<any>(null);\n const ttsRef = useRef<any>(null);\n const mediaRecorderRef = useRef<MediaRecorder | null>(null);\n const audioChunksRef = useRef<Blob[]>([]);\n const streamRef = useRef<MediaStream | null>(null);\n const audioContextRef = useRef<AudioContext | null>(null);\n const sourceNodeRef = useRef<AudioBufferSourceNode | null>(null);\n const mountedRef = useRef<boolean>(true);\n const cancelledRef = useRef<boolean>(false);\n\n // Computed states\n const isListening = stage === \"listening\";\n const isProcessing = stage === \"transcribing\" || stage === \"thinking\";\n const isSpeaking = stage === \"speaking\";\n\n // Load all models\n useEffect(() => {\n if (!shouldLoad || isReady) return;\n\n let cancelled = false;\n\n const loadModels = async () => {\n try {\n setIsLoading(true);\n setError(null);\n\n // Load STT\n setLoadingMessage(\"Loading speech recognition (Whisper)...\");\n const { WhisperSTT } = await import(\"../core/stt.js\");\n if (cancelled || !mountedRef.current) return;\n\n const stt = new WhisperSTT(sttModel);\n await stt.load({\n onProgress: (p: any) => {\n if (!mountedRef.current) return;\n setLoadingMessage(p.status || \"Loading STT...\");\n },\n });\n if (cancelled || !mountedRef.current) {\n stt.dispose();\n return;\n }\n sttRef.current = stt;\n\n // Load LLM worker\n setLoadingMessage(\"Loading language model...\");\n const worker = await createGerbilWorker({\n modelId: llmModel,\n onProgress: (p) => {\n if (!mountedRef.current) return;\n setLoadingMessage(p.message || \"Loading LLM...\");\n },\n });\n if (cancelled || !mountedRef.current) {\n worker.terminate();\n return;\n }\n llmWorkerRef.current = worker;\n\n // Load TTS (Kokoro or Supertonic based on ttsModel option)\n const isSupertonic = ttsModelId === \"supertonic-66m\";\n setLoadingMessage(`Loading text-to-speech (${isSupertonic ? \"Supertonic\" : \"Kokoro\"})...`);\n\n const { createTTS } = await import(\"../core/tts.js\");\n if (cancelled || !mountedRef.current) return;\n\n const tts = createTTS(ttsModelId);\n await tts.load({\n onProgress: (p: any) => {\n if (!mountedRef.current) return;\n setLoadingMessage(p.status || \"Loading TTS...\");\n },\n });\n if (cancelled || !mountedRef.current) {\n await tts.dispose();\n return;\n }\n ttsRef.current = tts;\n\n setIsReady(true);\n setIsLoading(false);\n setLoadingMessage(\"Ready!\");\n } catch (e: any) {\n if (!mountedRef.current) return;\n const errMsg = e.message || \"Failed to load models\";\n setError(errMsg);\n setIsLoading(false);\n onError?.(errMsg);\n }\n };\n\n loadModels();\n\n return () => {\n cancelled = true;\n };\n }, [shouldLoad, isReady, llmModel, sttModel, ttsModelId, onError]);\n\n // Cleanup on unmount\n useEffect(() => {\n mountedRef.current = true;\n return () => {\n mountedRef.current = false;\n llmWorkerRef.current?.terminate();\n sttRef.current?.dispose();\n ttsRef.current?.dispose();\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n }\n audioContextRef.current?.close();\n };\n }, []);\n\n // Load trigger\n const load = useCallback(() => {\n if (!shouldLoad && !isReady && !isLoading) {\n setShouldLoad(true);\n }\n }, [shouldLoad, isReady, isLoading]);\n\n // Convert blob to Float32 at 16kHz\n const blobToFloat32 = useCallback(async (blob: Blob): Promise<Float32Array> => {\n const audioContext = new AudioContext({ sampleRate: 16000 });\n const arrayBuffer = await blob.arrayBuffer();\n const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);\n const channelData = audioBuffer.getChannelData(0);\n\n if (audioBuffer.sampleRate !== 16000) {\n const ratio = 16000 / audioBuffer.sampleRate;\n const newLength = Math.round(channelData.length * ratio);\n const resampled = new Float32Array(newLength);\n for (let i = 0; i < newLength; i++) {\n const srcIndex = i / ratio;\n const floor = Math.floor(srcIndex);\n const ceil = Math.min(floor + 1, channelData.length - 1);\n const t = srcIndex - floor;\n resampled[i] = channelData[floor] * (1 - t) + channelData[ceil] * t;\n }\n audioContext.close();\n return resampled;\n }\n\n audioContext.close();\n return new Float32Array(channelData);\n }, []);\n\n // Play audio through Web Audio API\n const playAudioBuffer = useCallback(\n async (audio: Float32Array, sampleRate: number): Promise<void> => {\n return new Promise((resolve) => {\n if (!audioContextRef.current) {\n audioContextRef.current = new AudioContext();\n }\n const ctx = audioContextRef.current;\n\n const buffer = ctx.createBuffer(1, audio.length, sampleRate);\n const channelData = new Float32Array(audio);\n buffer.copyToChannel(channelData, 0);\n\n const source = ctx.createBufferSource();\n source.buffer = buffer;\n source.connect(ctx.destination);\n source.onended = () => {\n if (mountedRef.current) {\n resolve();\n }\n };\n source.start();\n sourceNodeRef.current = source;\n });\n },\n [],\n );\n\n // Start listening\n const startListening = useCallback(async () => {\n if (stage !== \"idle\") return;\n\n // Trigger load if not ready\n if (!isReady && !isLoading) {\n setShouldLoad(true);\n return;\n }\n\n cancelledRef.current = false;\n\n try {\n const stream = await navigator.mediaDevices.getUserMedia({\n audio: { sampleRate: 16000, channelCount: 1, echoCancellation: true },\n });\n\n streamRef.current = stream;\n audioChunksRef.current = [];\n\n const mediaRecorder = new MediaRecorder(stream);\n mediaRecorderRef.current = mediaRecorder;\n\n mediaRecorder.ondataavailable = (event) => {\n if (event.data.size > 0) {\n audioChunksRef.current.push(event.data);\n }\n };\n\n mediaRecorder.start(100);\n setStage(\"listening\");\n setError(null);\n } catch (e: any) {\n const errMsg = e.message || \"Failed to access microphone\";\n setError(errMsg);\n onError?.(errMsg);\n }\n }, [stage, isReady, isLoading, onError]);\n\n // Stop listening and process\n const stopListening = useCallback(async () => {\n if (stage !== \"listening\") return;\n\n const mediaRecorder = mediaRecorderRef.current;\n if (!mediaRecorder) return;\n\n return new Promise<void>((resolve) => {\n mediaRecorder.onstop = async () => {\n // Stop mic\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n if (cancelledRef.current) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n const audioBlob = new Blob(audioChunksRef.current, { type: \"audio/webm\" });\n\n try {\n // STT\n setStage(\"transcribing\");\n const audioData = await blobToFloat32(audioBlob);\n const sttResult = await sttRef.current.transcribe(audioData);\n let userText = sttResult.text.trim();\n\n // Filter out Whisper artifacts\n if (\n userText === \"[BLANK_AUDIO]\" ||\n userText === \"(blank audio)\" ||\n userText === \"[BLANK AUDIO]\"\n ) {\n userText = \"\";\n }\n\n if (cancelledRef.current || !userText) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n // Add user message\n const userMsgId = `user-${Date.now()}`;\n setMessages((m) => [...m, { id: userMsgId, role: \"user\", content: userText }]);\n onUserSpeak?.(userText);\n\n // LLM\n setStage(\"thinking\");\n\n // Build conversation history\n const history = messages.map((m) => ({\n role: m.role as \"user\" | \"assistant\",\n content: m.content,\n }));\n history.push({ role: \"user\", content: userText });\n\n let responseText = \"\";\n let thinkingText = \"\";\n\n await llmWorkerRef.current.generate(userText, {\n system,\n thinking,\n history,\n onToken: (token: WorkerToken) => {\n if (cancelledRef.current) return;\n if (token.state === \"thinking\") {\n thinkingText += token.text;\n } else {\n responseText += token.text;\n }\n },\n });\n\n if (cancelledRef.current) {\n setStage(\"idle\");\n resolve();\n return;\n }\n\n // Add assistant message\n const assistantMsgId = `assistant-${Date.now()}`;\n setMessages((m) => [\n ...m,\n {\n id: assistantMsgId,\n role: \"assistant\",\n content: responseText,\n thinking: thinkingText || undefined,\n },\n ]);\n onAssistantSpeak?.(responseText);\n\n // TTS\n if (responseText.trim()) {\n setStage(\"speaking\");\n const ttsResult = await ttsRef.current.speak(responseText, { voice, speed });\n\n if (!cancelledRef.current) {\n await playAudioBuffer(ttsResult.audio, ttsResult.sampleRate);\n }\n }\n\n setStage(\"idle\");\n resolve();\n } catch (e: any) {\n if (!mountedRef.current) return;\n const errMsg = e.message || \"Processing failed\";\n setError(errMsg);\n setStage(\"idle\");\n onError?.(errMsg);\n resolve();\n }\n };\n\n mediaRecorder.stop();\n });\n }, [\n stage,\n messages,\n system,\n thinking,\n voice,\n speed,\n blobToFloat32,\n playAudioBuffer,\n onUserSpeak,\n onAssistantSpeak,\n onError,\n ]);\n\n // Cancel\n const cancel = useCallback(() => {\n cancelledRef.current = true;\n\n if (mediaRecorderRef.current && stage === \"listening\") {\n mediaRecorderRef.current.stop();\n }\n\n if (streamRef.current) {\n for (const track of streamRef.current.getTracks()) {\n track.stop();\n }\n streamRef.current = null;\n }\n\n if (sourceNodeRef.current) {\n try {\n sourceNodeRef.current.stop();\n } catch {}\n }\n\n audioChunksRef.current = [];\n setStage(\"idle\");\n }, [stage]);\n\n // Clear messages\n const clear = useCallback(() => {\n setMessages([]);\n }, []);\n\n return {\n messages,\n startListening,\n stopListening,\n cancel,\n clear,\n isListening,\n isProcessing,\n isSpeaking,\n stage,\n isReady,\n isLoading,\n loadingMessage,\n error,\n load,\n };\n}\n\n// ============================================\n// Utilities\n// ============================================\n\n/**\n * Check if WebGPU is supported\n */\nexport function isWebGPUSupported(): boolean {\n if (typeof navigator === \"undefined\") {\n return false;\n }\n return \"gpu\" in navigator;\n}\n\n/**\n * Get WebGPU adapter info\n */\nexport async function getWebGPUInfo(): Promise<{\n supported: boolean;\n adapter?: string;\n device?: string;\n} | null> {\n if (!isWebGPUSupported()) {\n return { supported: false };\n }\n\n try {\n const adapter = await (navigator as any).gpu.requestAdapter();\n if (!adapter) {\n return { supported: false };\n }\n\n const info = await adapter.requestAdapterInfo();\n return {\n supported: true,\n adapter: info.vendor,\n device: info.device,\n };\n } catch {\n return { supported: false };\n }\n}\n\nexport default {\n isWebGPUSupported,\n getWebGPUInfo,\n createGerbilWorker,\n playAudio,\n createAudioPlayer,\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAkJA,eAAsB,mBAAmB,UAA+B,EAAE,EAAyB;CACjG,MAAM,EAAE,UAAU,cAAc,YAAY,SAAS,YAAY,YAAY;CAG7E,MAAM,SAAS,aAAa,QAAQ;AAEpC,QAAO,IAAI,SAAS,SAAS,WAAW;EA4WtC,MAAM,OAAO,IAAI,KAAK,CA1WH;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;MA0We,EAAE,EAAE,MAAM,0BAA0B,CAAC;EACvE,MAAM,YAAY,IAAI,gBAAgB,KAAK;EAC3C,MAAM,SAAS,IAAI,OAAO,WAAW,EAAE,MAAM,UAAU,CAAC;EAExD,IAAI,UAAU;EACd,IAAIA,iBAAkD;EACtD,IAAIC,gBAAiD;EACrD,IAAI,iBAAiB;AAErB,SAAO,aAAa,MAAM;GACxB,MAAM,MAAM,EAAE;AAEd,WAAQ,IAAI,QAAZ;IACE,KAAK;AAEH,YAAO,YAAY;MAAE,MAAM;MAAQ,SAAS,OAAO;MAAM,CAAC;AAC1D;IAEF,KAAK;IACL,KAAK;AACH,kBAAa,IAAsB;AACnC;IAEF,KAAK;AACH,eAAU;AACV,kBAAa,IAAsB;AACnC,aAAQ,aAAa;AACrB;IAEF,KAAK;AACH,sBAAiB;AACjB;IAEF,KAAK;AACH,uBAAkB,IAAI;AACtB,eAAU,IAAmB;AAC7B;IAEF,KAAK;AACH,kBAAa,IAAsB;AACnC,sBAAiB,IAAI,KAAK;AAC1B,sBAAiB;AACjB,qBAAgB;AAChB;IAEF,KAAK;AACH,eAAU,IAAI,MAAM;AACpB,kBAAa;MAAE,QAAQ;MAAS,OAAO,IAAI;MAAO,CAAC;AACnD,SAAI,eAAe;AACjB,oBAAc,IAAI,MAAM,IAAI,MAAM,CAAC;AACnC,uBAAiB;AACjB,sBAAgB;WAEhB,QAAO,IAAI,MAAM,IAAI,MAAM,CAAC;AAE9B;;;AAIN,SAAO,WAAW,MAAM;GACtB,MAAM,QAAQ,EAAE,WAAW;AAC3B,aAAU,MAAM;AAChB,UAAO,IAAI,MAAM,MAAM,CAAC;;EAG1B,MAAMC,eAA6B;GACjC,WAAW,QAAgB,YAAiC,EAAE,KAC5D,IAAI,SAAS,KAAK,QAAQ;AACxB,qBAAiB;AACjB,oBAAgB;IAEhB,MAAM,SAASC,UAAQ,UAAU;IAIjC,MAAM,WAAWA,UAAQ,UACrB,CAAC;KAAE,MAAM;KAAU,SAAS;KAAQ,EAAE,GAAGA,UAAQ,QAAQ,GACzD,CACE;KAAE,MAAM;KAAU,SAAS;KAAQ,EACnC;KAAE,MAAM;KAAQ,SAAS;KAAQ,CAClC;AAIL,QAAIA,UAAQ,QACV,QAAO,YAAY,EAAE,MAAM,SAAS,CAAC;AAGvC,WAAO,YAAY;KACjB,MAAM;KACN;KACA,QAAQA,UAAQ,UAAU,EAAE;KAC5B,SAAS;MACP,WAAWA,UAAQ,cAAcA,UAAQ,QAAQ,SAAS,OAAO;MACjE,aAAaA,UAAQ,eAAe;MACpC,MAAMA,UAAQ,QAAQ;MACtB,MAAMA,UAAQ,QAAQ;MACtB,UAAUA,UAAQ,YAAY;MAC/B;KACF,CAAC;KACF;GAEJ,iBAAiB;AACf,WAAO,YAAY,EAAE,MAAM,aAAa,CAAC;;GAG3C,aAAa;AACX,WAAO,YAAY,EAAE,MAAM,SAAS,CAAC;;GAGvC,iBAAiB;AACf,WAAO,WAAW;AAClB,QAAI,gBAAgB,UAAU;;GAGhC,eAAe;GAChB;GACD;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAuHJ,SAAgB,QAAQ,UAA0B,EAAE,EAAiB;CAEnE,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,+DAA+D;CAGjF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,kBAAkB,EAAE,EACpB,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,UAAU,eAAe,SAAoB,gBAAgB;CACpE,MAAM,CAAC,OAAO,YAAY,SAAiB,GAAG;CAC9C,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,iBAAiB,sBAAsB,SAAiB,GAAG;CAClE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAC/D,MAAM,CAAC,gBAAgB,qBAAqB,SAAmB,EAAE,CAAC;CAElE,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,eAAe,OAAe,EAAE;CACtC,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAGf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,qBAAoB,MAAc,IAAI,MAAM,KAAK;;GAGrD,kBAAkB;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;;GAExB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;AAGvB,iBAAgB;AACd,MAAI,CAAC,gBAAgB,iBAAiB;AACpC,gBAAa,SAAoB;AAE/B,QADgB,KAAK,GAAG,GAAG,EACd,SAAS,YACpB,QAAO,KAAK,KAAK,GAAY,MAC3B,MAAM,KAAK,SAAS,IAChB;KAAE,GAAG;KAAG,SAAS;KAAiB,UAAU,YAAY;KAAW,GACnE,EACL;AAEH,WAAO;KACP;AACF,sBAAmB,GAAG;AACtB,eAAY,GAAG;;IAEhB;EAAC;EAAc;EAAiB;EAAS,CAAC;CAG7C,MAAM,oBAAoB,OAAsB,KAAK;CACrD,MAAM,mBAAmB,OAAiB,EAAE,CAAC;CAG7C,MAAM,cAAc,aAAa,aAAqB;AACpD,qBAAmB,SAAmB,CAAC,GAAG,MAAM,SAAS,CAAC;IACzD,EAAE,CAAC;CAEN,MAAM,cAAc,aAAa,UAAkB;AACjD,qBAAmB,SAAmB,KAAK,QAAQ,GAAW,MAAc,MAAM,MAAM,CAAC;IACxF,EAAE,CAAC;CAEN,MAAM,cAAc,kBAAkB;AACpC,oBAAkB,EAAE,CAAC;IACpB,EAAE,CAAC;CAGN,MAAM,wBAAwB,aAC3B,MAAc,WAAqB;AAClC,MAAI,CAAC,KAAK,MAAM,IAAI,aAClB;AAGF,eAAa,WAAW;EACxB,MAAMC,cAAuB;GAC3B,IAAI,OAAO,aAAa;GACxB,MAAM;GACN,SAAS,KAAK,MAAM;GACpB,QAAQ,OAAO,SAAS,IAAI,SAAS;GACtC;AAED,eAAa,WAAW;EACxB,MAAMC,mBAA4B;GAChC,IAAI,OAAO,aAAa;GACxB,MAAM;GACN,SAAS;GACV;AAED,eAAa,SAAoB;GAAC,GAAG;GAAM;GAAa;GAAiB,CAAC;AAC1E,qBAAmB,GAAG;AACtB,cAAY,GAAG;AAGf,MAAI,CAAC,UAAU,SAAS;AACtB,qBAAkB,UAAU,KAAK,MAAM;AACvC,oBAAiB,UAAU;AAC3B,SAAM;AACN;;AAGF,kBAAgB,KAAK;AACrB,YAAU,QAAQ,SAAS,KAAK,MAAM,EAAE;GACtC;GACA,UAAU;GACV,WAAW,OAAO,SAAS,IAAI,KAAK,IAAI,WAAW,KAAK,GAAG;GAC3D;GACA,QAAQ,OAAO,SAAS,IAAI,SAAS;GACtC,CAAC;IAEJ;EAAC;EAAc;EAAQ;EAAgB;EAAW;EAAa;EAAK,CACrE;CAED,MAAM,eAAe,aAClB,MAAwC;AACvC,KAAG,kBAAkB;AAErB,MAAI,CAAC,MAAM,MAAM,IAAI,aACnB;AAIF,wBAAsB,OAAO,eAAe;AAC5C,WAAS,GAAG;AACZ,oBAAkB,EAAE,CAAC;IAEvB;EAAC;EAAO;EAAc;EAAgB;EAAsB,CAC7D;CAGD,MAAM,iBAAiB,aACpB,MAAc,WAAqB;AAClC,wBAAsB,MAAM,OAAO;IAErC,CAAC,sBAAsB,CACxB;AAGD,iBAAgB;AACd,MAAI,WAAW,kBAAkB,WAAW,UAAU,SAAS;GAC7D,MAAM,iBAAiB,kBAAkB;GACzC,MAAM,gBAAgB,iBAAiB;AACvC,qBAAkB,UAAU;AAC5B,oBAAiB,UAAU,EAAE;AAC7B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,gBAAgB;IACzC;IACA,UAAU;IACV,WAAW,cAAc,SAAS,IAAI,KAAK,IAAI,WAAW,KAAK,GAAG;IAClE;IACA,QAAQ,cAAc,SAAS,IAAI,gBAAgB;IACpD,CAAC;;IAEH;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;CAE7D,MAAM,OAAO,kBAAkB;AAC7B,YAAU,SAAS,WAAW;AAC9B,kBAAgB,MAAM;IACrB,EAAE,CAAC;CAEN,MAAM,QAAQ,kBAAkB;AAC9B,YAAU,SAAS,OAAO;AAC1B,cAAY,EAAE,CAAC;AACf,qBAAmB,GAAG;AACtB,cAAY,GAAG;AACf,oBAAkB,EAAE,CAAC;IACpB,EAAE,CAAC;AAUN,QAAO;EACL,UARsB,SAAS,KAAK,GAAY,MAAc;AAC9D,OAAI,MAAM,SAAS,SAAS,KAAK,EAAE,SAAS,eAAe,aACzD,QAAO;IAAE,GAAG;IAAG,SAAS;IAAiB,UAAU,YAAY;IAAW;AAE5E,UAAO;IACP;EAIA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;;;;AA4EH,SAAgB,cAAc,UAAgC,EAAE,EAAuB;CACrF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,qEAAqE;CAGvF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,YAAY,iBAAiB,SAAiB,GAAG;CACxD,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,aAAa,OAAwC,KAAK;CAChE,MAAM,YAAY,OAAsC,KAAK;CAC7D,MAAM,mBAAmB,OAAsB,KAAK;CACpD,MAAM,mBAAmB,OAA6B,OAAU;CAChE,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAEf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,gBAAe,MAAc,IAAI,MAAM,KAAK;;GAGhD,aAAa,WAAW;AACtB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;AACtB,eAAW,UAAU,OAAO,KAAK;AACjC,eAAW,UAAU;;GAEvB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;CAEvB,MAAM,WAAW,aACd,QAAgB,oBAAuD;AACtE,SAAO,IAAI,SAAS,SAAS,WAAW;AACtC,iBAAc,GAAG;AACjB,eAAY,GAAG;AACf,cAAW,UAAU;AACrB,aAAU,UAAU;AAGpB,OAAI,CAAC,UAAU,SAAS;AACtB,qBAAiB,UAAU;AAC3B,qBAAiB,UAAU,iBAAiB;AAC5C,UAAM;AACN;;AAGF,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,QAAQ;IACjC;IACA,UAAU;IACV;IACA;IACA,QAAQ,iBAAiB;IAC1B,CAAC;IACF;IAEJ;EAAC;EAAQ;EAAgB;EAAW;EAAa;EAAK,CACvD;AAGD,iBAAgB;AACd,MAAI,WAAW,iBAAiB,WAAW,UAAU,SAAS;GAC5D,MAAM,gBAAgB,iBAAiB;GACvC,MAAM,gBAAgB,iBAAiB;AACvC,oBAAiB,UAAU;AAC3B,oBAAiB,UAAU;AAC3B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,eAAe;IACxC;IACA,UAAU;IACV;IACA;IACA,QAAQ;IACT,CAAC;;IAEH;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;AAO7D,QAAO;EACL;EACA;EACA;EACA;EACA;EACA;EACA,MAZW,kBAAkB;AAC7B,aAAU,SAAS,WAAW;AAC9B,mBAAgB,MAAM;KACrB,EAAE,CAAC;EAUJ;EACA;EACA;EACA;EACD;;;AA6BH,MAAMC,wBAA4C;CAChD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EAAE,IAAI;EAAW,MAAM;EAAQ,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAiB;CAChG;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EAAE,IAAI;EAAY,MAAM;EAAS,QAAQ;EAAQ,UAAU;EAAS,aAAa;EAAgB;CAClG;;AAGD,MAAMC,4BAAgD;CACpD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACF;;AAGD,MAAMC,aAGF;CACF,cAAc;EACZ,MAAM;EACN,cAAc;EACd,YAAY;EACZ,QAAQ;EACT;CACD,kBAAkB;EAChB,MAAM;EACN,cAAc;EACd,YAAY;EACZ,QAAQ;EACT;CACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA0FD,SAAgB,UAAU,UAA4B,EAAE,EAAmB;CACzE,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,iEAAiE;CAGnF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,OAAO,UAAU,cACjB,OAAO,eAAe,GACtB,WAAW,OACX,SACA,SACA,SACA,UACE;CAGJ,MAAM,cAAc,WAAW;CAC/B,MAAM,eAAe,QAAQ,SAAS,YAAY;CAElD,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAA6B,KAAK;CAChF,MAAM,CAAC,YAAY,iBAAiB,SAAkB,MAAM;CAC5D,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAC/D,MAAM,CAAC,cAAc,mBAAmB,SAAiB,aAAa;CACtE,MAAM,CAAC,cAAc,mBAAmB,SAAiB,aAAa;CAEtE,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,qBAAqB,uBAAkC,IAAI,KAAK,CAAC;CACvE,MAAM,kBAAkB,OAA4B,KAAK;CACzD,MAAM,gBAAgB,OAAqC,KAAK;CAChE,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,aAAa,OAAmB,QAAQ;CAG9C,MAAM,aAAa,kBAAsC;AACvD,SAAO,YAAY;IAClB,CAAC,YAAY,OAAO,CAAC;CAGxB,MAAM,OAAO,kBAAkB;AAC7B,MAAI,OAAO,WAAW,UAAW;AACjC,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAGf,iBAAgB;AACd,MAAI,CAAC,WAAY;AAEjB,aAAW,UAAU;AACrB,aAAW,UAAU;EAErB,MAAM,UAAU,YAAY;AAC1B,OAAI;IACF,MAAM,eAAe,YAAY;IACjC,MAAM,SAAS,WAAW;AAE1B,uBAAmB;KACjB,QAAQ;KACR,SAAS,WAAW,eAAe,eAAe,SAAS;KAC5D,CAAC;AAEF,QAAI,cAAc;KAKhB,MAAM,EAAE,aAAa,MAAM,OADH;KAGxB,MAAM,MAAM,MAAM,SAAS,kBAAkB,OAAO,MAAM;MACxD,QAAQ;MACR,oBAAoB,aAAkB;AACpC,WAAI,CAAC,WAAW,QAAS;AACzB,WAAI,SAAS,WAAW,cAAc,SAAS,KAC7C,oBAAmB;QACjB,QAAQ;QACR,MAAM,SAAS;QACf,UAAU,KAAK,MAAM,SAAS,YAAY,EAAE;QAC7C,CAAC;;MAGP,CAAC;AAEF,SAAI,CAAC,WAAW,QAAS;KAGzB,MAAM,YAAY,0BAA0B,OAAO,KAAK;KACxD,MAAM,gCAAgB,IAAI,KAA2B;AAGrD,WAAM,QAAQ,IACZ,OAAO,OAAO,IAAI,OAAO,UAAU;AACjC,UAAI;OACF,MAAM,WAAW,MAAM,MAAM,GAAG,YAAY,MAAM,GAAG,MAAM;AAC3D,WAAI,SAAS,IAAI;QACf,MAAM,SAAS,MAAM,SAAS,aAAa;AAC3C,sBAAc,IAAI,MAAM,IAAI,IAAI,aAAa,OAAO,CAAC;;eAEhD,GAAG;AACV,eAAQ,KAAK,sCAAsC,MAAM,GAAG,IAAI,EAAE;;OAEpE,CACH;AAED,SAAI,CAAC,WAAW,QAAS;AAGzB,SAAI;AACF,YAAM,IAAI,SAAS;OACjB,oBAAoB,IAAI,aAAa,MAAc;OACnD,qBAAqB;OACrB,OAAO;OACR,CAAC;cACK,GAAG;AACV,cAAQ,KAAK,6BAA6B,EAAE;;AAG9C,wBAAmB,UAAU;AAC7B,YAAO,UAAU;MAAE,MAAM;MAAc,UAAU;MAAK;MAAQ;WACzD;KAGL,MAAM,EAAE,cADa,MAAM,OAAO;KAGlC,MAAM,MAAM,MAAM,UAAU,gBAAgB,OAAO,MAAM;MACvD,OAAO;MACP,oBAAoB,aAAkB;AACpC,WAAI,CAAC,WAAW,QAAS;AACzB,WAAI,SAAS,WAAW,cAAc,SAAS,KAC7C,oBAAmB;QACjB,QAAQ;QACR,MAAM,SAAS;QACf,UAAU,KAAK,MAAM,SAAS,YAAY,EAAE;QAC7C,CAAC;;MAGP,CAAC;AAEF,SAAI,CAAC,WAAW,QAAS;AAEzB,YAAO,UAAU;MAAE,MAAM;MAAU,UAAU;MAAK;MAAQ;;AAG5D,iBAAa,MAAM;AACnB,eAAW,KAAK;AAChB,uBAAmB,EAAE,QAAQ,SAAS,CAAC;AACvC,eAAW;YACJ,KAAK;AACZ,QAAI,CAAC,WAAW,QAAS;IACzB,MAAM,WAAW,eAAe,QAAQ,IAAI,UAAU,OAAO,IAAI;AACjE,aAAS,SAAS;AAClB,iBAAa,MAAM;AACnB,uBAAmB;KAAE,QAAQ;KAAS,OAAO;KAAU,CAAC;AACxD,cAAU,SAAS;;;AAIvB,WAAS;AAET,eAAa;AACX,cAAW,UAAU;;IAEtB;EAAC;EAAY;EAAS;EAAS;EAAQ,CAAC;AAG3C,iBAAgB;AACd,eAAa;AACX,OAAI;AACF,kBAAc,SAAS,MAAM;WACvB;AAGR,OAAI;AACF,QAAI,gBAAgB,WAAW,gBAAgB,QAAQ,UAAU,SAC/D,iBAAgB,QAAQ,OAAO;WAE3B;;IAIT,EAAE,CAAC;AA8JN,QAAO;EACL,OA5JY,YACZ,OAAO,MAAc,SAA8C;GACjE,MAAM,QAAQ,MAAM,SAAS;GAC7B,MAAM,QAAQ,MAAM,SAAS;AAG7B,OAAI,CAAC,OAAO,SAAS;AACnB,UAAM;AAEN;;AAGF,OAAI;AACF,kBAAc,KAAK;AACnB,eAAW;IAEX,IAAIC;IACJ,IAAIC;IAEJ,MAAM,aAAa,OAAO;AAE1B,QAAI,WAAW,SAAS,cAAc;KAEpC,MAAM,SAAS,WAAW;AAI1B,SAAI,CADc,OAAO,OAAO,MAAM,MAAwB,EAAE,OAAO,MAAM,EAC7D;MACd,MAAM,cAAc,OAAO,OAAO,KAAK,MAAwB,EAAE,GAAG,CAAC,KAAK,KAAK;AAC/E,YAAM,IAAI,MAAM,UAAU,MAAM,iCAAiC,YAAY,GAAG;;KAIlF,IAAI,mBAAmB,mBAAmB,QAAQ,IAAI,MAAM;AAC5D,SAAI,CAAC,iBACH,KAAI;MACF,MAAM,WAAW,0BAA0B,OAAO,KAAK,uBAAuB,MAAM;MACpF,MAAM,WAAW,MAAM,MAAM,SAAS;AACtC,UAAI,SAAS,IAAI;OACf,MAAM,SAAS,MAAM,SAAS,aAAa;AAC3C,0BAAmB,IAAI,aAAa,OAAO;AAC3C,0BAAmB,QAAQ,IAAI,OAAO,iBAAiB;YAEvD,OAAM,IAAI,MAAM,yBAAyB,SAAS,SAAS;aAEvD;AAEN,yBAAmB,IAAI,aAAa,MAAU,CAAC,KAAK,GAAI;AACxD,yBAAmB,QAAQ,IAAI,OAAO,iBAAiB;;KAK3D,MAAM,SAAS,MAAM,WAAW,SAAS,MAAM;MAC7C,oBAAoB;MACb;MACR,CAAC;AACF,iBAAY,OAAO;AACnB,kBAAa,OAAO;WACf;KAEL,MAAM,SAAS,WAAW;AAI1B,SAAI,CADc,OAAO,OAAO,MAAM,MAAwB,EAAE,OAAO,MAAM,EAC7D;MACd,MAAM,cAAc,OAAO,OAAO,KAAK,MAAwB,EAAE,GAAG,CAAC,KAAK,KAAK;AAC/E,YAAM,IAAI,MAAM,UAAU,MAAM,iCAAiC,YAAY,GAAG;;KAGlF,MAAM,SAAS,MAAM,WAAW,SAAS,SAAS,MAAM;MAAE;MAAO;MAAO,CAAC;AACzE,iBAAY,OAAO;AACnB,kBAAa,OAAO;;AAGtB,QAAI,CAAC,WAAW,QAAS;AAGzB,QAAI,CAAC,gBAAgB,WAAW,gBAAgB,QAAQ,UAAU,SAChE,iBAAgB,UAAU,IAAI,cAAc;IAG9C,MAAM,eAAe,gBAAgB;AAGrC,QAAI,aAAa,UAAU,YACzB,OAAM,aAAa,QAAQ;IAI7B,MAAM,cAAc,aAAa,aAAa,GAAG,UAAU,QAAQ,WAAW;IAC9E,MAAM,cAAc,IAAI,aAAa,UAAU;AAC/C,gBAAY,cAAc,aAAa,EAAE;AAGzC,QAAI,cAAc,SAAS;AACzB,mBAAc,QAAQ,MAAM;AAC5B,mBAAc,QAAQ,YAAY;;IAIpC,MAAM,aAAa,aAAa,oBAAoB;AACpD,eAAW,SAAS;AACpB,eAAW,QAAQ,aAAa,YAAY;AAE5C,eAAW,gBAAgB;AACzB,SAAI,WAAW,SAAS;AACtB,oBAAc,MAAM;AACpB,eAAS;;;AAIb,kBAAc,UAAU;AACxB,eAAW,OAAO;YACX,KAAK;AACZ,QAAI,CAAC,WAAW,QAAS;IACzB,MAAM,WAAW,eAAe,QAAQ,IAAI,UAAU,OAAO,IAAI;AACjE,aAAS,SAAS;AAClB,kBAAc,MAAM;AACpB,cAAU,SAAS;;KAGvB;GAAC;GAAc;GAAc;GAAM;GAAS;GAAO;GAAQ,CAC5D;EAkCC,MA/BW,kBAAkB;AAC7B,OAAI,cAAc,SAAS;AACzB,kBAAc,QAAQ,MAAM;AAC5B,kBAAc,QAAQ,YAAY;AAClC,kBAAc,UAAU;;AAE1B,iBAAc,MAAM;KACnB,EAAE,CAAC;EAyBJ;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA,UA9Be,aACd,YAAoB;AAEnB,OADkB,YAAY,OAAO,MAAM,MAAM,EAAE,OAAO,QAAQ,CAEhE,iBAAgB,QAAQ;OAExB,SAAQ,KACN,UAAU,QAAQ,kBAAkB,QAAQ,eAAe,YAAY,OAAO,KAAK,MAAM,EAAE,GAAG,CAAC,KAAK,KAAK,GAC1G;KAGL,CAAC,YAAY,QAAQ,QAAQ,CAC9B;EAmBC;EACA,UAjBe,aAAa,UAAkB;AAC9C,mBAAgB,KAAK,IAAI,IAAK,KAAK,IAAI,GAAK,MAAM,CAAC,CAAC;KACnD,EAAE,CAAC;EAgBJ,cAAc;EACd,YAAY,YAAY;EACzB;;;;;;;;;;;;;;;;AAqBH,eAAsB,UACpB,OACA,aAAqB,MACkC;CACvD,MAAM,eAAe,IAAI,cAAc;AAGvC,KAAI,aAAa,UAAU,YACzB,OAAM,aAAa,QAAQ;CAG7B,MAAM,cAAc,aAAa,aAAa,GAAG,MAAM,QAAQ,WAAW;CAC1E,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,aAAY,cAAc,aAAa,EAAE;CAEzC,MAAM,aAAa,aAAa,oBAAoB;AACpD,YAAW,SAAS;AACpB,YAAW,QAAQ,aAAa,YAAY;CAE5C,MAAM,UAAU,IAAI,SAAe,YAAY;AAC7C,aAAW,gBAAgB;AACzB,gBAAa,OAAO;AACpB,YAAS;;GAEX;AAEF,YAAW,OAAO;AAElB,QAAO;EACL,YAAY;AACV,cAAW,MAAM;AACjB,gBAAa,OAAO;;EAEtB;EACD;;;;;;;;;;;;;;;;;;;AAoBH,SAAgB,kBAAkB,aAAqB,MAIrD;CACA,IAAIC,eAAoC;CACxC,IAAI,gBAAgB;CACpB,IAAI,WAAW;CAEf,MAAM,gBAAgB,YAAY;AAChC,MAAI,CAAC,aACH,gBAAe,IAAI,cAAc;AAEnC,MAAI,aAAa,UAAU,YACzB,OAAM,aAAa,QAAQ;AAE7B,SAAO;;AAGT,QAAO;EACL,OAAO,OAAO,UAAwB;GACpC,MAAM,MAAM,MAAM,eAAe;AACjC,cAAW;GAEX,MAAM,SAAS,IAAI,aAAa,GAAG,MAAM,QAAQ,WAAW;GAC5D,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,UAAO,cAAc,aAAa,EAAE;GAEpC,MAAM,SAAS,IAAI,oBAAoB;AACvC,UAAO,SAAS;AAChB,UAAO,QAAQ,IAAI,YAAY;GAG/B,MAAM,YAAY,KAAK,IAAI,IAAI,aAAa,cAAc;AAC1D,UAAO,MAAM,UAAU;AACvB,mBAAgB,YAAY,OAAO;AAEnC,UAAO,gBAAgB;AACrB,QAAI,IAAI,eAAe,gBAAgB,GACrC,YAAW;;;EAKjB,YAAY;AACV,cAAW;AACX,mBAAgB;AAChB,OAAI,cAAc;AAChB,iBAAa,OAAO;AACpB,mBAAe;;;EAInB,iBAAiB;EAClB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAqHH,SAAgB,cAAc,UAAgC,EAAE,EAAuB;CACrF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,qEAAqE;CAGvF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,mBACR,WAAW,OACX,SACA,cACA,SACA,YACA,YAAY,OACZ,gBAAgB,MAChB,YACE;CAEJ,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAA6B,KAAK;CAChF,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,aAAa,kBAAkB,SAAkB,MAAM;CAC9D,MAAM,CAAC,gBAAgB,qBAAqB,SAAkB,MAAM;CACpE,MAAM,CAAC,YAAY,iBAAiB,SAAiB,GAAG;CACxD,MAAM,CAAC,gBAAgB,qBAAqB,SAAiB,GAAG;CAChE,MAAM,CAAC,YAAY,iBAAiB,SAAiB,EAAE;CACvD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,mBAAmB,OAA6B,KAAK;CAC3D,MAAM,iBAAiB,OAAe,EAAE,CAAC;CACzC,MAAM,YAAY,OAA2B,KAAK;CAClD,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,uBAAuB,OAA8C,KAAK;CAChF,MAAM,mBAAmB,OAAe,EAAE,CAAC;CAC3C,MAAM,oBAAoB,OAAe,GAAG;AAG5C,iBAAgB;AACd,MAAI,CAAC,cAAc,QAAS;EAE5B,IAAI,YAAY;EAEhB,MAAM,YAAY,YAAY;AAC5B,OAAI;AACF,iBAAa,KAAK;AAClB,uBAAmB;KAAE,QAAQ;KAAW,SAAS;KAAwB,CAAC;AAC1E,iBAAa;KAAE,QAAQ;KAAW,SAAS;KAAwB,CAAC;IAGpE,MAAM,EAAE,eAAe,MAAM,OAAO;AAEpC,QAAI,aAAa,CAAC,WAAW,QAAS;IAEtC,MAAM,MAAM,IAAI,WAAW,MAAM;AACjC,UAAM,IAAI,KAAK,EACb,aAAa,MAAW;AACtB,SAAI,CAAC,WAAW,QAAS;KACzB,MAAMC,WAAwB;MAC5B,QAAQ,EAAE,aAAa,SAAY,gBAAgB;MACnD,SAAS,EAAE;MACX,UAAU,EAAE;MACZ,MAAM,EAAE;MACT;AACD,wBAAmB,SAAS;AAC5B,kBAAa,SAAS;OAEzB,CAAC;AAEF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,SAAI,SAAS;AACb;;AAGF,WAAO,UAAU;AACjB,eAAW,KAAK;AAChB,iBAAa,MAAM;AACnB,uBAAmB,EAAE,QAAQ,SAAS,CAAC;AACvC,iBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,eAAW;YACJC,GAAQ;AACf,QAAI,CAAC,WAAW,QAAS;IACzB,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,iBAAa,MAAM;AACnB,uBAAmB;KAAE,QAAQ;KAAS,SAAS;KAAQ,CAAC;AACxD,iBAAa;KAAE,QAAQ;KAAS,SAAS;KAAQ,CAAC;AAClD,cAAU,OAAO;;;AAIrB,aAAW;AAEX,eAAa;AACX,eAAY;;IAEb;EAAC;EAAY;EAAS;EAAO;EAAS;EAAS;EAAW,CAAC;AAG9D,iBAAgB;AACd,aAAW,UAAU;AACrB,eAAa;AACX,cAAW,UAAU;AACrB,OAAI,OAAO,QACT,QAAO,QAAQ,SAAS;AAE1B,OAAI,UAAU,QACZ,MAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;;IAIjB,EAAE,CAAC;CAGN,MAAM,OAAO,kBAAkB;AAC7B,MAAI,CAAC,cAAc,CAAC,WAAW,CAAC,UAC9B,eAAc,KAAK;IAEpB;EAAC;EAAY;EAAS;EAAU,CAAC;CAGpC,MAAM,gBAAgB,YAAY,OAAO,SAAsC;EAC7E,MAAM,eAAe,IAAI,aAAa,EAAE,YAAY,MAAO,CAAC;EAC5D,MAAM,cAAc,MAAM,KAAK,aAAa;EAC5C,MAAM,cAAc,MAAM,aAAa,gBAAgB,YAAY;EAGnE,MAAM,cAAc,YAAY,eAAe,EAAE;AAGjD,MAAI,YAAY,eAAe,MAAO;GACpC,MAAM,QAAQ,OAAQ,YAAY;GAClC,MAAM,YAAY,KAAK,MAAM,YAAY,SAAS,MAAM;GACxD,MAAM,YAAY,IAAI,aAAa,UAAU;AAC7C,QAAK,IAAI,IAAI,GAAG,IAAI,WAAW,KAAK;IAClC,MAAM,WAAW,IAAI;IACrB,MAAM,QAAQ,KAAK,MAAM,SAAS;IAClC,MAAM,OAAO,KAAK,IAAI,QAAQ,GAAG,YAAY,SAAS,EAAE;IACxD,MAAM,IAAI,WAAW;AACrB,cAAU,KAAK,YAAY,UAAU,IAAI,KAAK,YAAY,QAAQ;;AAEpE,gBAAa,OAAO;AACpB,UAAO;;AAGT,eAAa,OAAO;AACpB,SAAO,IAAI,aAAa,YAAY;IACnC,EAAE,CAAC;CAGN,MAAM,aAAa,YACjB,OAAO,UAAyC;AAC9C,MAAI,CAAC,OAAO,SAAS;AACnB,OAAI,CAAC,YAAY;AACf,kBAAc,KAAK;AACnB,UAAM,IAAI,MAAM,uDAAuD;;AAEzE,SAAM,IAAI,MAAM,uBAAuB;;AAGzC,oBAAkB,KAAK;AACvB,MAAI;GAEF,IAAI,QADW,MAAM,OAAO,QAAQ,WAAW,MAAM,EACnC,KAAK,MAAM;AAE7B,OAAI,SAAS,mBAAmB,SAAS,mBAAmB,SAAS,gBACnE,QAAO;AAET,iBAAc,KAAK;AACnB,kBAAe,KAAK;AACpB,UAAO;YACC;AACR,OAAI,WAAW,QACb,mBAAkB,MAAM;;IAI9B,CAAC,YAAY,aAAa,CAC3B;CAGD,MAAM,sBAAsB,OAAe,EAAE;CAI7C,MAAM,kBAAkB,YACtB,OAAO,aAAsC;AAC3C,MAAI,CAAC,OAAO,WAAW,eAAe,QAAQ,WAAW,EAAG,QAAO;AAEnE,MAAI;GAGF,MAAM,YAAY,MAAM,cADN,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC,CAC1B;GAGhD,MAAM,kBAAkB,oBAAoB;GAC5C,MAAM,eAAe,UAAU;AAG/B,OAAI,eAAe,kBAAkB,IAAM,QAAO;GAGlD,MAAM,WAAW,UAAU,MAAM,gBAAgB;AAGjD,uBAAoB,UAAU;GAG9B,IAAI,QADW,MAAM,OAAO,QAAQ,WAAW,SAAS,EACtC,KAAK,MAAM;AAG7B,OAAI,SAAS,mBAAmB,SAAS,mBAAmB,SAAS,gBACnE,QAAO;AAGT,OAAI,QAAQ,WAAW,SAAS;AAC9B,sBAAkB,KAAK;AACvB,cAAU,MAAM,SAAS;;AAG3B,UAAO;UACD;AACN,UAAO;;IAGX,CAAC,eAAe,QAAQ,CACzB;AAuSD,QAAO;EACL,gBArSqB,YAAY,YAAY;AAC7C,OAAI,YAAa;AAEjB,OAAI;AAEF,QAAI,aAAa,CAAC,OAAO,SAAS;AAChC,SAAI,CAAC,WACH,eAAc,KAAK;AAGrB,kBAAa,KAAK;KAClB,MAAM,EAAE,eAAe,MAAM,OAAO;KACpC,MAAM,MAAM,IAAI,WAAW,MAAM;AACjC,WAAM,IAAI,KAAK,EACb,aAAa,MAAW;AACtB,UAAI,WAAW,SAAS;OACtB,MAAMD,WAAwB;QAC5B,QACE,EAAE,WAAW,gBACT,gBACA,EAAE,WAAW,UACX,UACA;QACR,SAAS,EAAE;QACX,UAAU,EAAE;QACZ,MAAM,EAAE;QACT;AACD,0BAAmB,SAAS;AAC5B,oBAAa,SAAS;;QAG3B,CAAC;AACF,SAAI,CAAC,WAAW,SAAS;AACvB,UAAI,SAAS;AACb;;AAEF,YAAO,UAAU;AACjB,gBAAW,KAAK;AAChB,kBAAa,MAAM;AACnB,wBAAmB,EAAE,QAAQ,SAAS,CAAC;AACvC,kBAAa,EAAE,QAAQ,SAAS,CAAC;AACjC,gBAAW;;IAIb,MAAM,SAAS,MAAM,UAAU,aAAa,aAAa,EACvD,OAAO;KACL,YAAY;KACZ,cAAc;KACd,kBAAkB;KAClB,kBAAkB;KACnB,EACF,CAAC;AAEF,cAAU,UAAU;AACpB,mBAAe,UAAU,EAAE;AAC3B,qBAAiB,UAAU,EAAE;AAC7B,sBAAkB,UAAU;AAC5B,wBAAoB,UAAU;AAC9B,kBAAc,GAAG;AACjB,sBAAkB,GAAG;AACrB,kBAAc,EAAE;IAEhB,MAAM,gBAAgB,IAAI,cAAc,OAAO;AAC/C,qBAAiB,UAAU;AAE3B,kBAAc,mBAAmB,UAAU;AACzC,SAAI,MAAM,KAAK,OAAO,GAAG;AACvB,qBAAe,QAAQ,KAAK,MAAM,KAAK;AACvC,UAAI,UACF,kBAAiB,QAAQ,KAAK,MAAM,KAAK;;;AAK/C,kBAAc,MAAM,IAAI;AACxB,mBAAe,KAAK;AACpB,aAAS,KAAK;AAGd,QAAI,aAAa,OAAO,SAAS;KAC/B,IAAI,WAAW;KACf,IAAI,iBAAiB;KAIrB,MAAM,mBAAmB,YAAY;AACnC,UAAI,CAAC,kBAAkB,CAAC,WAAW,QACjC;AAMF,UAHmB,iBAAiB,QAAQ,SAG3B,GAAG;AAElB,wBAAiB,UAAU,EAAE;AAE7B,WAAI;AACF,0BAAkB,KAAK;QACvB,MAAM,YAAY,MAAM,gBAAgB,SAAS;AAEjD,YAAI,aAAa,WAAW,SAAS;AACnC;AACA,uBAAc,SAAS;AAGvB,wBAAe,SAAS;UACtB,MAAM,gBAAgB,QAAQ,OAAO,MAAM,MAAM;AACjD,4BAAkB,UAAU;AAC5B,yBAAe,cAAc;AAC7B,iBAAO;WACP;;gBAEG,GAAG;AACV,gBAAQ,MAAM,8CAA8C,EAAE;iBACtD;AACR,YAAI,WAAW,QACb,mBAAkB,MAAM;;;AAM9B,UAAI,kBAAkB,WAAW,QAC/B,sBAAqB,UAAU,WAAW,kBAAkB,cAAc;;AAK9E,0BAAqB,UAAU,WAAW,kBAAkB,cAAc;AAG1E,KAAC,qBAA6B,cAAc;AAC1C,uBAAiB;;;YAGdC,GAAQ;IACf,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,cAAU,OAAO;;KAElB;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EA6IA,eA1IoB,YAAY,YAA6B;AAE7D,OAAK,qBAA6B,MAChC,CAAC,qBAA6B,OAAO;AAEvC,OAAI,qBAAqB,SAAS;AAChC,iBAAa,qBAAqB,QAAQ;AAC1C,yBAAqB,UAAU;;AAGjC,UAAO,IAAI,SAAS,SAAS,WAAW;AACtC,QAAI,CAAC,iBAAiB,WAAW,CAAC,aAAa;AAC7C,4BAAO,IAAI,MAAM,gBAAgB,CAAC;AAClC;;IAGF,MAAM,gBAAgB,iBAAiB;AAEvC,kBAAc,SAAS,YAAY;AAEjC,SAAI,UAAU,SAAS;AACrB,WAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,gBAAU,UAAU;;AAGtB,oBAAe,MAAM;AAGrB,SAAI,WAAW;AAEb,UAAI,eAAe,QAAQ,SAAS,KAAK,oBAAoB,UAAU,GAAG;AACxE,yBAAkB,KAAK;AACvB,wBAAiB,UAAU,EAAE;AAE7B,WAAI;QACF,MAAM,iBAAiB,MAAM,gBAAgB,WAAW;AACxD,YAAI,kBAAkB,WAAW,QAC/B,gBAAe,SAAS;SACtB,MAAM,gBAAgB,QAAQ,OAAO,MAAM,MAAM;AACjD,2BAAkB,UAAU;AAC5B,gBAAO;UACP;iBAEI;AACR,YAAI,WAAW,QACb,mBAAkB,MAAM;;;MAK9B,MAAM,YAAY,kBAAkB;AACpC,qBAAe,UAAU;AACzB,cAAQ,UAAU;AAClB;;KAIF,MAAM,YAAY,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC;AAE1E,SAAI;AAEF,UAAI,CAAC,OAAO,SAAS;AACnB,WAAI,CAAC,WACH,eAAc,KAAK;AAGrB,aAAM,IAAI,SAAe,KAAK,QAAQ;QACpC,MAAM,aAAa,kBAAkB;AACnC,aAAI,OAAO,SAAS;AAClB,wBAAc,WAAW;AACzB,eAAK;;WAEN,IAAI;AACP,yBAAiB;AACf,uBAAc,WAAW;AACzB,6BAAI,IAAI,MAAM,gCAAgC,CAAC;WAC9C,IAAM;SACT;;AAQJ,cADa,MAAM,WAHD,MAAM,cAAc,UAAU,CAGR,CAC3B;cACNA,GAAQ;MACf,MAAM,SAAS,EAAE,WAAW;AAC5B,eAAS,OAAO;AAChB,gBAAU,OAAO;AACjB,aAAO,EAAE;;;AAIb,kBAAc,MAAM;KACpB;KACD;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EA+BA,iBA5BsB,kBAAkB;AAExC,OAAK,qBAA6B,MAChC,CAAC,qBAA6B,OAAO;AAEvC,OAAI,qBAAqB,SAAS;AAChC,iBAAa,qBAAqB,QAAQ;AAC1C,yBAAqB,UAAU;;AAGjC,OAAI,iBAAiB,WAAW,YAC9B,kBAAiB,QAAQ,MAAM;AAEjC,OAAI,UAAU,SAAS;AACrB,SAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,cAAU,UAAU;;AAEtB,kBAAe,UAAU,EAAE;AAC3B,oBAAiB,UAAU,EAAE;AAC7B,uBAAoB,UAAU;AAC9B,kBAAe,MAAM;KACpB,CAAC,YAAY,CAAC;EAMf;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2HH,SAAgB,aAAa,UAA+B,EAAE,EAAsB;CAClF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,oEAAoE;CAGtF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAQrD,MAAM,aAAa,QAAQ,YAAY;CACvC,MAAM,YAAY,WAAW;CAE7B,MAAM,EACJ,WAAW,cACX,WAAW,mBACX,SAAS,+EACT,WAAW,OACX,QAAQ,UAAU,cAClB,QAAQ,GACR,WAAW,OACX,aACA,kBACA,YACE;CAEJ,MAAM,CAAC,UAAU,eAAe,SAA6B,EAAE,CAAC;CAChE,MAAM,CAAC,OAAO,YAAY,SAExB,OAAO;CACT,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,gBAAgB,qBAAqB,SAAiB,GAAG;CAChE,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAG/D,MAAM,eAAe,OAAY,KAAK;CACtC,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,SAAS,OAAY,KAAK;CAChC,MAAM,mBAAmB,OAA6B,KAAK;CAC3D,MAAM,iBAAiB,OAAe,EAAE,CAAC;CACzC,MAAM,YAAY,OAA2B,KAAK;CAClD,MAAM,kBAAkB,OAA4B,KAAK;CACzD,MAAM,gBAAgB,OAAqC,KAAK;CAChE,MAAM,aAAa,OAAgB,KAAK;CACxC,MAAM,eAAe,OAAgB,MAAM;CAG3C,MAAM,cAAc,UAAU;CAC9B,MAAM,eAAe,UAAU,kBAAkB,UAAU;CAC3D,MAAM,aAAa,UAAU;AAG7B,iBAAgB;AACd,MAAI,CAAC,cAAc,QAAS;EAE5B,IAAI,YAAY;EAEhB,MAAM,aAAa,YAAY;AAC7B,OAAI;AACF,iBAAa,KAAK;AAClB,aAAS,KAAK;AAGd,sBAAkB,0CAA0C;IAC5D,MAAM,EAAE,eAAe,MAAM,OAAO;AACpC,QAAI,aAAa,CAAC,WAAW,QAAS;IAEtC,MAAM,MAAM,IAAI,WAAW,SAAS;AACpC,UAAM,IAAI,KAAK,EACb,aAAa,MAAW;AACtB,SAAI,CAAC,WAAW,QAAS;AACzB,uBAAkB,EAAE,UAAU,iBAAiB;OAElD,CAAC;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,SAAI,SAAS;AACb;;AAEF,WAAO,UAAU;AAGjB,sBAAkB,4BAA4B;IAC9C,MAAM,SAAS,MAAM,mBAAmB;KACtC,SAAS;KACT,aAAa,MAAM;AACjB,UAAI,CAAC,WAAW,QAAS;AACzB,wBAAkB,EAAE,WAAW,iBAAiB;;KAEnD,CAAC;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,YAAO,WAAW;AAClB;;AAEF,iBAAa,UAAU;AAIvB,sBAAkB,2BADG,eAAe,mBACwB,eAAe,SAAS,MAAM;IAE1F,MAAM,EAAE,cAAc,MAAM,OAAO;AACnC,QAAI,aAAa,CAAC,WAAW,QAAS;IAEtC,MAAM,MAAM,UAAU,WAAW;AACjC,UAAM,IAAI,KAAK,EACb,aAAa,MAAW;AACtB,SAAI,CAAC,WAAW,QAAS;AACzB,uBAAkB,EAAE,UAAU,iBAAiB;OAElD,CAAC;AACF,QAAI,aAAa,CAAC,WAAW,SAAS;AACpC,WAAM,IAAI,SAAS;AACnB;;AAEF,WAAO,UAAU;AAEjB,eAAW,KAAK;AAChB,iBAAa,MAAM;AACnB,sBAAkB,SAAS;YACpBA,GAAQ;AACf,QAAI,CAAC,WAAW,QAAS;IACzB,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,iBAAa,MAAM;AACnB,cAAU,OAAO;;;AAIrB,cAAY;AAEZ,eAAa;AACX,eAAY;;IAEb;EAAC;EAAY;EAAS;EAAU;EAAU;EAAY;EAAQ,CAAC;AAGlE,iBAAgB;AACd,aAAW,UAAU;AACrB,eAAa;AACX,cAAW,UAAU;AACrB,gBAAa,SAAS,WAAW;AACjC,UAAO,SAAS,SAAS;AACzB,UAAO,SAAS,SAAS;AACzB,OAAI,UAAU,QACZ,MAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAGhB,mBAAgB,SAAS,OAAO;;IAEjC,EAAE,CAAC;CAGN,MAAM,OAAO,kBAAkB;AAC7B,MAAI,CAAC,cAAc,CAAC,WAAW,CAAC,UAC9B,eAAc,KAAK;IAEpB;EAAC;EAAY;EAAS;EAAU,CAAC;CAGpC,MAAM,gBAAgB,YAAY,OAAO,SAAsC;EAC7E,MAAM,eAAe,IAAI,aAAa,EAAE,YAAY,MAAO,CAAC;EAC5D,MAAM,cAAc,MAAM,KAAK,aAAa;EAC5C,MAAM,cAAc,MAAM,aAAa,gBAAgB,YAAY;EACnE,MAAM,cAAc,YAAY,eAAe,EAAE;AAEjD,MAAI,YAAY,eAAe,MAAO;GACpC,MAAM,QAAQ,OAAQ,YAAY;GAClC,MAAM,YAAY,KAAK,MAAM,YAAY,SAAS,MAAM;GACxD,MAAM,YAAY,IAAI,aAAa,UAAU;AAC7C,QAAK,IAAI,IAAI,GAAG,IAAI,WAAW,KAAK;IAClC,MAAM,WAAW,IAAI;IACrB,MAAM,QAAQ,KAAK,MAAM,SAAS;IAClC,MAAM,OAAO,KAAK,IAAI,QAAQ,GAAG,YAAY,SAAS,EAAE;IACxD,MAAM,IAAI,WAAW;AACrB,cAAU,KAAK,YAAY,UAAU,IAAI,KAAK,YAAY,QAAQ;;AAEpE,gBAAa,OAAO;AACpB,UAAO;;AAGT,eAAa,OAAO;AACpB,SAAO,IAAI,aAAa,YAAY;IACnC,EAAE,CAAC;CAGN,MAAM,kBAAkB,YACtB,OAAO,OAAqB,eAAsC;AAChE,SAAO,IAAI,SAAS,YAAY;AAC9B,OAAI,CAAC,gBAAgB,QACnB,iBAAgB,UAAU,IAAI,cAAc;GAE9C,MAAM,MAAM,gBAAgB;GAE5B,MAAM,SAAS,IAAI,aAAa,GAAG,MAAM,QAAQ,WAAW;GAC5D,MAAM,cAAc,IAAI,aAAa,MAAM;AAC3C,UAAO,cAAc,aAAa,EAAE;GAEpC,MAAM,SAAS,IAAI,oBAAoB;AACvC,UAAO,SAAS;AAChB,UAAO,QAAQ,IAAI,YAAY;AAC/B,UAAO,gBAAgB;AACrB,QAAI,WAAW,QACb,UAAS;;AAGb,UAAO,OAAO;AACd,iBAAc,UAAU;IACxB;IAEJ,EAAE,CACH;AA+MD,QAAO;EACL;EACA,gBA9MqB,YAAY,YAAY;AAC7C,OAAI,UAAU,OAAQ;AAGtB,OAAI,CAAC,WAAW,CAAC,WAAW;AAC1B,kBAAc,KAAK;AACnB;;AAGF,gBAAa,UAAU;AAEvB,OAAI;IACF,MAAM,SAAS,MAAM,UAAU,aAAa,aAAa,EACvD,OAAO;KAAE,YAAY;KAAO,cAAc;KAAG,kBAAkB;KAAM,EACtE,CAAC;AAEF,cAAU,UAAU;AACpB,mBAAe,UAAU,EAAE;IAE3B,MAAM,gBAAgB,IAAI,cAAc,OAAO;AAC/C,qBAAiB,UAAU;AAE3B,kBAAc,mBAAmB,UAAU;AACzC,SAAI,MAAM,KAAK,OAAO,EACpB,gBAAe,QAAQ,KAAK,MAAM,KAAK;;AAI3C,kBAAc,MAAM,IAAI;AACxB,aAAS,YAAY;AACrB,aAAS,KAAK;YACPA,GAAQ;IACf,MAAM,SAAS,EAAE,WAAW;AAC5B,aAAS,OAAO;AAChB,cAAU,OAAO;;KAElB;GAAC;GAAO;GAAS;GAAW;GAAQ,CAAC;EA2KtC,eAxKoB,YAAY,YAAY;AAC5C,OAAI,UAAU,YAAa;GAE3B,MAAM,gBAAgB,iBAAiB;AACvC,OAAI,CAAC,cAAe;AAEpB,UAAO,IAAI,SAAe,YAAY;AACpC,kBAAc,SAAS,YAAY;AAEjC,SAAI,UAAU,SAAS;AACrB,WAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,gBAAU,UAAU;;AAGtB,SAAI,aAAa,SAAS;AACxB,eAAS,OAAO;AAChB,eAAS;AACT;;KAGF,MAAM,YAAY,IAAI,KAAK,eAAe,SAAS,EAAE,MAAM,cAAc,CAAC;AAE1E,SAAI;AAEF,eAAS,eAAe;MACxB,MAAM,YAAY,MAAM,cAAc,UAAU;MAEhD,IAAI,YADc,MAAM,OAAO,QAAQ,WAAW,UAAU,EACnC,KAAK,MAAM;AAGpC,UACE,aAAa,mBACb,aAAa,mBACb,aAAa,gBAEb,YAAW;AAGb,UAAI,aAAa,WAAW,CAAC,UAAU;AACrC,gBAAS,OAAO;AAChB,gBAAS;AACT;;MAIF,MAAM,YAAY,QAAQ,KAAK,KAAK;AACpC,mBAAa,MAAM,CAAC,GAAG,GAAG;OAAE,IAAI;OAAW,MAAM;OAAQ,SAAS;OAAU,CAAC,CAAC;AAC9E,oBAAc,SAAS;AAGvB,eAAS,WAAW;MAGpB,MAAM,UAAU,SAAS,KAAK,OAAO;OACnC,MAAM,EAAE;OACR,SAAS,EAAE;OACZ,EAAE;AACH,cAAQ,KAAK;OAAE,MAAM;OAAQ,SAAS;OAAU,CAAC;MAEjD,IAAI,eAAe;MACnB,IAAI,eAAe;AAEnB,YAAM,aAAa,QAAQ,SAAS,UAAU;OAC5C;OACA;OACA;OACA,UAAU,UAAuB;AAC/B,YAAI,aAAa,QAAS;AAC1B,YAAI,MAAM,UAAU,WAClB,iBAAgB,MAAM;YAEtB,iBAAgB,MAAM;;OAG3B,CAAC;AAEF,UAAI,aAAa,SAAS;AACxB,gBAAS,OAAO;AAChB,gBAAS;AACT;;MAIF,MAAM,iBAAiB,aAAa,KAAK,KAAK;AAC9C,mBAAa,MAAM,CACjB,GAAG,GACH;OACE,IAAI;OACJ,MAAM;OACN,SAAS;OACT,UAAU,gBAAgB;OAC3B,CACF,CAAC;AACF,yBAAmB,aAAa;AAGhC,UAAI,aAAa,MAAM,EAAE;AACvB,gBAAS,WAAW;OACpB,MAAM,YAAY,MAAM,OAAO,QAAQ,MAAM,cAAc;QAAE;QAAO;QAAO,CAAC;AAE5E,WAAI,CAAC,aAAa,QAChB,OAAM,gBAAgB,UAAU,OAAO,UAAU,WAAW;;AAIhE,eAAS,OAAO;AAChB,eAAS;cACFA,GAAQ;AACf,UAAI,CAAC,WAAW,QAAS;MACzB,MAAM,SAAS,EAAE,WAAW;AAC5B,eAAS,OAAO;AAChB,eAAS,OAAO;AAChB,gBAAU,OAAO;AACjB,eAAS;;;AAIb,kBAAc,MAAM;KACpB;KACD;GACD;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,CAAC;EAoCA,QAjCa,kBAAkB;AAC/B,gBAAa,UAAU;AAEvB,OAAI,iBAAiB,WAAW,UAAU,YACxC,kBAAiB,QAAQ,MAAM;AAGjC,OAAI,UAAU,SAAS;AACrB,SAAK,MAAM,SAAS,UAAU,QAAQ,WAAW,CAC/C,OAAM,MAAM;AAEd,cAAU,UAAU;;AAGtB,OAAI,cAAc,QAChB,KAAI;AACF,kBAAc,QAAQ,MAAM;WACtB;AAGV,kBAAe,UAAU,EAAE;AAC3B,YAAS,OAAO;KACf,CAAC,MAAM,CAAC;EAYT,OATY,kBAAkB;AAC9B,eAAY,EAAE,CAAC;KACd,EAAE,CAAC;EAQJ;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;AAUH,SAAgB,oBAA6B;AAC3C,KAAI,OAAO,cAAc,YACvB,QAAO;AAET,QAAO,SAAS;;;;;AAMlB,eAAsB,gBAIZ;AACR,KAAI,CAAC,mBAAmB,CACtB,QAAO,EAAE,WAAW,OAAO;AAG7B,KAAI;EACF,MAAM,UAAU,MAAO,UAAkB,IAAI,gBAAgB;AAC7D,MAAI,CAAC,QACH,QAAO,EAAE,WAAW,OAAO;EAG7B,MAAM,OAAO,MAAM,QAAQ,oBAAoB;AAC/C,SAAO;GACL,WAAW;GACX,SAAS,KAAK;GACd,QAAQ,KAAK;GACd;SACK;AACN,SAAO,EAAE,WAAW,OAAO;;;AAI/B,sBAAe;CACb;CACA;CACA;CACA;CACA;CACD"}
@@ -1,3 +1,3 @@
1
- import { i as trackCachedModel, n as getChromeCachedModels, r as refreshCachedModelSizes, t as ChromeGPUBackend } from "./chrome-backend-C5Un08O4.mjs";
1
+ import { i as trackCachedModel, n as getChromeCachedModels, r as refreshCachedModelSizes, t as ChromeGPUBackend } from "./chrome-backend-Y9F7W5VQ.mjs";
2
2
 
3
3
  export { ChromeGPUBackend };