@tryhamster/gerbil 1.0.0-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/LICENSE +23 -0
  2. package/README.md +253 -0
  3. package/bin/cli.js +2 -0
  4. package/dist/auto-update-BbNHbSU1.mjs +3 -0
  5. package/dist/browser/index.d.mts +262 -0
  6. package/dist/browser/index.d.mts.map +1 -0
  7. package/dist/browser/index.mjs +755 -0
  8. package/dist/browser/index.mjs.map +1 -0
  9. package/dist/chrome-backend-C5Un08O4.mjs +771 -0
  10. package/dist/chrome-backend-C5Un08O4.mjs.map +1 -0
  11. package/dist/chrome-backend-CtwPENIW.mjs +3 -0
  12. package/dist/chunk-Ct1HF2bE.mjs +7 -0
  13. package/dist/cli.d.mts +1 -0
  14. package/dist/cli.mjs +7078 -0
  15. package/dist/cli.mjs.map +1 -0
  16. package/dist/frameworks/express.d.mts +22 -0
  17. package/dist/frameworks/express.d.mts.map +1 -0
  18. package/dist/frameworks/express.mjs +123 -0
  19. package/dist/frameworks/express.mjs.map +1 -0
  20. package/dist/frameworks/fastify.d.mts +11 -0
  21. package/dist/frameworks/fastify.d.mts.map +1 -0
  22. package/dist/frameworks/fastify.mjs +73 -0
  23. package/dist/frameworks/fastify.mjs.map +1 -0
  24. package/dist/frameworks/hono.d.mts +14 -0
  25. package/dist/frameworks/hono.d.mts.map +1 -0
  26. package/dist/frameworks/hono.mjs +82 -0
  27. package/dist/frameworks/hono.mjs.map +1 -0
  28. package/dist/frameworks/next.d.mts +31 -0
  29. package/dist/frameworks/next.d.mts.map +1 -0
  30. package/dist/frameworks/next.mjs +116 -0
  31. package/dist/frameworks/next.mjs.map +1 -0
  32. package/dist/frameworks/react.d.mts +56 -0
  33. package/dist/frameworks/react.d.mts.map +1 -0
  34. package/dist/frameworks/react.mjs +172 -0
  35. package/dist/frameworks/react.mjs.map +1 -0
  36. package/dist/frameworks/trpc.d.mts +12 -0
  37. package/dist/frameworks/trpc.d.mts.map +1 -0
  38. package/dist/frameworks/trpc.mjs +80 -0
  39. package/dist/frameworks/trpc.mjs.map +1 -0
  40. package/dist/gerbil-BfnsFWRE.mjs +644 -0
  41. package/dist/gerbil-BfnsFWRE.mjs.map +1 -0
  42. package/dist/gerbil-BjW-z7Fq.mjs +5 -0
  43. package/dist/gerbil-DZ1k3ChC.d.mts +138 -0
  44. package/dist/gerbil-DZ1k3ChC.d.mts.map +1 -0
  45. package/dist/index.d.mts +223 -0
  46. package/dist/index.d.mts.map +1 -0
  47. package/dist/index.mjs +13 -0
  48. package/dist/index.mjs.map +1 -0
  49. package/dist/integrations/ai-sdk.d.mts +78 -0
  50. package/dist/integrations/ai-sdk.d.mts.map +1 -0
  51. package/dist/integrations/ai-sdk.mjs +199 -0
  52. package/dist/integrations/ai-sdk.mjs.map +1 -0
  53. package/dist/integrations/langchain.d.mts +41 -0
  54. package/dist/integrations/langchain.d.mts.map +1 -0
  55. package/dist/integrations/langchain.mjs +93 -0
  56. package/dist/integrations/langchain.mjs.map +1 -0
  57. package/dist/integrations/llamaindex.d.mts +45 -0
  58. package/dist/integrations/llamaindex.d.mts.map +1 -0
  59. package/dist/integrations/llamaindex.mjs +86 -0
  60. package/dist/integrations/llamaindex.mjs.map +1 -0
  61. package/dist/integrations/mcp-client.d.mts +206 -0
  62. package/dist/integrations/mcp-client.d.mts.map +1 -0
  63. package/dist/integrations/mcp-client.mjs +507 -0
  64. package/dist/integrations/mcp-client.mjs.map +1 -0
  65. package/dist/integrations/mcp.d.mts +177 -0
  66. package/dist/integrations/mcp.d.mts.map +1 -0
  67. package/dist/integrations/mcp.mjs +8 -0
  68. package/dist/mcp-R8kRLIKb.mjs +348 -0
  69. package/dist/mcp-R8kRLIKb.mjs.map +1 -0
  70. package/dist/models-DKULvhOr.mjs +136 -0
  71. package/dist/models-DKULvhOr.mjs.map +1 -0
  72. package/dist/models-De2-_GmQ.d.mts +22 -0
  73. package/dist/models-De2-_GmQ.d.mts.map +1 -0
  74. package/dist/one-liner-BUQR0nqq.mjs +98 -0
  75. package/dist/one-liner-BUQR0nqq.mjs.map +1 -0
  76. package/dist/skills/index.d.mts +390 -0
  77. package/dist/skills/index.d.mts.map +1 -0
  78. package/dist/skills/index.mjs +7 -0
  79. package/dist/skills-D3CEpgDc.mjs +630 -0
  80. package/dist/skills-D3CEpgDc.mjs.map +1 -0
  81. package/dist/tools-BsiEE6f2.mjs +567 -0
  82. package/dist/tools-BsiEE6f2.mjs.map +1 -0
  83. package/dist/types-BS1N92Jt.d.mts +183 -0
  84. package/dist/types-BS1N92Jt.d.mts.map +1 -0
  85. package/dist/utils-7vXqtq2Q.mjs +63 -0
  86. package/dist/utils-7vXqtq2Q.mjs.map +1 -0
  87. package/docs/ai-sdk.md +80 -0
  88. package/docs/architecture/README.md +84 -0
  89. package/docs/architecture/caching.md +227 -0
  90. package/docs/architecture/inference.md +176 -0
  91. package/docs/architecture/overview.md +179 -0
  92. package/docs/architecture/streaming.md +261 -0
  93. package/docs/architecture/webgpu.md +213 -0
  94. package/docs/browser.md +328 -0
  95. package/docs/cli.md +155 -0
  96. package/docs/frameworks.md +90 -0
  97. package/docs/mcp-client.md +224 -0
  98. package/docs/mcp.md +109 -0
  99. package/docs/memory.md +229 -0
  100. package/docs/repl.md +473 -0
  101. package/docs/skills.md +261 -0
  102. package/docs/tools.md +304 -0
  103. package/package.json +207 -0
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.mjs","names":["currentResolve: ((text: string) => void) | null","currentReject: ((error: Error) => void) | null","gerbilWorker: GerbilWorker","options","userMessage: Message","assistantMessage: Message"],"sources":["../../src/browser/index.ts"],"sourcesContent":["/**\n * Gerbil Browser Support\n *\n * Run LLMs directly in the browser with WebGPU acceleration.\n *\n * @example useChat (React)\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <form onSubmit={handleSubmit}>\n * {messages.map(m => <div key={m.id}>{m.role}: {m.content}</div>)}\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * </form>\n * );\n * }\n * ```\n *\n * @example useCompletion (React)\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading } = useCompletion();\n * if (isLoading) return <div>Loading...</div>;\n * return <button onClick={() => complete(\"Write a haiku\")}>{completion}</button>;\n * }\n * ```\n *\n * @example Low-level API\n * ```ts\n * import { createGerbilWorker } from \"@tryhamster/gerbil/browser\";\n *\n * const gerbil = await createGerbilWorker({\n * modelId: \"qwen3-0.6b\",\n * onToken: (token) => console.log(token.text),\n * });\n * await gerbil.generate(\"Hello!\");\n * gerbil.terminate();\n * ```\n */\n\nimport { resolveModel } from \"../core/models.js\";\n\n// Re-export models and types (browser-safe, no Node.js dependencies)\nexport { BUILTIN_MODELS } from \"../core/models.js\";\nexport type * from \"../core/types.js\";\n\n// NOTE: We intentionally do NOT export Gerbil from core here.\n// The core Gerbil class has Node.js code paths (chrome-backend/puppeteer)\n// that break browser bundlers. Use createGerbilWorker() instead for browser.\n\n// ============================================\n// Types\n// ============================================\n\nexport type WorkerProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n error?: string;\n};\n\nexport type WorkerToken = {\n status: \"token\";\n text: string;\n state: \"thinking\" | \"answering\";\n numTokens: number;\n tps: number;\n};\n\nexport type WorkerComplete = {\n status: \"complete\";\n text: string;\n numTokens: number;\n totalTime: number;\n tps: number;\n};\n\nexport type GerbilWorkerOptions = {\n /** Model ID to load (default: \"qwen3-0.6b\") */\n modelId?: string;\n /** Called during model loading with progress updates */\n onProgress?: (progress: WorkerProgress) => void;\n /** Called for each token during streaming generation */\n onToken?: (token: WorkerToken) => void;\n /** Called when generation is complete */\n onComplete?: (result: WorkerComplete) => void;\n /** Called on errors */\n onError?: (error: string) => void;\n /** Worker script URL (auto-detected if not provided) */\n workerUrl?: string;\n};\n\nexport type GenerateStreamOptions = {\n /** Maximum tokens to generate */\n maxTokens?: number;\n /** Temperature for sampling (0 = deterministic) */\n temperature?: number;\n /** Top-p nucleus sampling */\n topP?: number;\n /** Top-k sampling */\n topK?: number;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** System prompt */\n system?: string;\n};\n\nexport type GerbilWorker = {\n /** Generate text with streaming */\n generate: (prompt: string, options?: GenerateStreamOptions) => Promise<string>;\n /** Interrupt current generation */\n interrupt: () => void;\n /** Reset conversation cache */\n reset: () => void;\n /** Terminate the worker */\n terminate: () => void;\n /** Check if model is loaded */\n isReady: () => boolean;\n};\n\n// ============================================\n// Web Worker Factory\n// ============================================\n\n/**\n * Create a Gerbil worker for streaming WebGPU inference\n *\n * Uses a Web Worker to keep the UI responsive during model loading\n * and text generation, with real-time token streaming.\n */\nexport async function createGerbilWorker(options: GerbilWorkerOptions = {}): Promise<GerbilWorker> {\n const { modelId = \"qwen3-0.6b\", onProgress, onToken, onComplete, onError } = options;\n\n // Resolve model to HuggingFace path\n const source = resolveModel(modelId);\n\n return new Promise((resolve, reject) => {\n // Create inline worker from the worker code\n const workerCode = `\n import {\n AutoTokenizer,\n AutoModelForCausalLM,\n TextStreamer,\n InterruptableStoppingCriteria,\n } from \"https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.0\";\n\n class ModelPipeline {\n static tokenizer = null;\n static model = null;\n static modelId = \"\";\n\n static async getInstance(modelId, options = {}, progressCallback) {\n if (this.modelId !== modelId) {\n this.tokenizer = null;\n this.model = null;\n }\n this.modelId = modelId;\n\n const dtype = options.dtype || \"q4f16\";\n const device = options.device || \"webgpu\";\n\n if (!this.tokenizer) {\n this.tokenizer = await AutoTokenizer.from_pretrained(modelId, {\n progress_callback: progressCallback,\n });\n }\n\n if (!this.model) {\n this.model = await AutoModelForCausalLM.from_pretrained(modelId, {\n dtype,\n device,\n progress_callback: progressCallback,\n });\n }\n\n return { tokenizer: this.tokenizer, model: this.model };\n }\n }\n\n const stoppingCriteria = new InterruptableStoppingCriteria();\n let pastKeyValuesCache = null;\n\n async function load(data) {\n const { modelId, options = {} } = data;\n self.postMessage({ status: \"loading\", message: \"Loading model...\" });\n\n // Track download state - if we see progress < 100, we're downloading\n const downloadState = {\n downloading: new Set(), // Files currently downloading\n completed: new Set(), // Files completed\n isDownloading: false, // True if any file needed download\n };\n\n try {\n const { tokenizer, model } = await ModelPipeline.getInstance(\n modelId,\n options,\n (progress) => {\n if (progress.status === \"progress\" && progress.file) {\n const pct = Math.round(progress.progress || 0);\n \n // If we see progress < 100, this file is being downloaded (not from cache)\n if (pct < 100) {\n downloadState.downloading.add(progress.file);\n downloadState.isDownloading = true;\n } else if (pct === 100) {\n downloadState.downloading.delete(progress.file);\n downloadState.completed.add(progress.file);\n }\n\n // Only emit downloading status if actually downloading\n if (downloadState.isDownloading) {\n self.postMessage({\n status: \"downloading\",\n file: progress.file,\n progress: pct,\n downloadCount: downloadState.downloading.size,\n totalFiles: downloadState.completed.size + downloadState.downloading.size,\n });\n }\n }\n }\n );\n\n self.postMessage({ status: \"loading\", message: \"Compiling shaders...\" });\n const warmupInputs = tokenizer(\"a\");\n await model.generate({ ...warmupInputs, max_new_tokens: 1 });\n\n self.postMessage({ status: \"ready\" });\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n async function generate(data) {\n const { messages, options = {} } = data;\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 20, thinking = false } = options;\n\n try {\n const { tokenizer, model } = await ModelPipeline.getInstance(ModelPipeline.modelId, {});\n\n const inputs = tokenizer.apply_chat_template(messages, {\n add_generation_prompt: true,\n return_dict: true,\n enable_thinking: thinking,\n });\n\n let state = \"answering\";\n const [START_THINKING_TOKEN_ID, END_THINKING_TOKEN_ID] = tokenizer.encode(\n \"<think></think>\",\n { add_special_tokens: false }\n );\n\n let startTime = null;\n let numTokens = 0;\n\n // Token callback for state tracking (receives raw token IDs)\n const tokenCallback = (tokens) => {\n startTime ??= performance.now();\n numTokens++;\n \n const tokenId = Number(tokens[0]);\n if (tokenId === START_THINKING_TOKEN_ID) {\n state = \"thinking\";\n } else if (tokenId === END_THINKING_TOKEN_ID) {\n state = \"answering\";\n }\n };\n\n // Text callback for streaming (receives decoded text)\n const streamCallback = (text) => {\n const tps = startTime ? (numTokens / (performance.now() - startTime)) * 1000 : 0;\n self.postMessage({ status: \"token\", text, state, numTokens, tps });\n };\n\n const streamer = new TextStreamer(tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: streamCallback,\n token_callback_function: tokenCallback,\n });\n\n self.postMessage({ status: \"start\" });\n\n const { past_key_values, sequences } = await model.generate({\n ...inputs,\n past_key_values: pastKeyValuesCache,\n do_sample: temperature > 0,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n max_new_tokens: maxTokens,\n streamer,\n stopping_criteria: stoppingCriteria,\n return_dict_in_generate: true,\n });\n\n pastKeyValuesCache = past_key_values;\n\n const endTime = performance.now();\n const totalTime = startTime ? endTime - startTime : 0;\n const decoded = tokenizer.batch_decode(sequences, { skip_special_tokens: true });\n\n self.postMessage({\n status: \"complete\",\n text: decoded[0] || \"\",\n numTokens,\n totalTime,\n tps: totalTime > 0 ? (numTokens / totalTime) * 1000 : 0,\n });\n } catch (error) {\n self.postMessage({ status: \"error\", error: error.message || String(error) });\n }\n }\n\n self.addEventListener(\"message\", async (e) => {\n const { type, ...data } = e.data;\n switch (type) {\n case \"load\": await load(data); break;\n case \"generate\": stoppingCriteria.reset(); await generate(data); break;\n case \"interrupt\": stoppingCriteria.interrupt(); break;\n case \"reset\": pastKeyValuesCache = null; stoppingCriteria.reset(); break;\n }\n });\n\n self.postMessage({ status: \"init\" });\n `;\n\n const blob = new Blob([workerCode], { type: \"application/javascript\" });\n const workerUrl = URL.createObjectURL(blob);\n const worker = new Worker(workerUrl, { type: \"module\" });\n\n let isReady = false;\n let currentResolve: ((text: string) => void) | null = null;\n let currentReject: ((error: Error) => void) | null = null;\n let _generatedText = \"\";\n\n worker.onmessage = (e) => {\n const msg = e.data;\n\n switch (msg.status) {\n case \"init\":\n // Worker initialized, load the model\n worker.postMessage({ type: \"load\", modelId: source.path });\n break;\n\n case \"loading\":\n case \"downloading\":\n onProgress?.(msg as WorkerProgress);\n break;\n\n case \"ready\":\n isReady = true;\n onProgress?.(msg as WorkerProgress);\n resolve(gerbilWorker);\n break;\n\n case \"start\":\n _generatedText = \"\";\n break;\n\n case \"token\":\n _generatedText += msg.text;\n onToken?.(msg as WorkerToken);\n break;\n\n case \"complete\":\n onComplete?.(msg as WorkerComplete);\n currentResolve?.(msg.text);\n currentResolve = null;\n currentReject = null;\n break;\n\n case \"error\":\n onError?.(msg.error);\n onProgress?.({ status: \"error\", error: msg.error });\n if (currentReject) {\n currentReject(new Error(msg.error));\n currentResolve = null;\n currentReject = null;\n } else {\n reject(new Error(msg.error));\n }\n break;\n }\n };\n\n worker.onerror = (e) => {\n const error = e.message || \"Worker error\";\n onError?.(error);\n reject(new Error(error));\n };\n\n const gerbilWorker: GerbilWorker = {\n generate: (prompt: string, options: GenerateStreamOptions = {}) => {\n return new Promise((res, rej) => {\n currentResolve = res;\n currentReject = rej;\n\n const system = options.system || \"You are a helpful assistant.\";\n const messages = [\n { role: \"system\", content: system },\n { role: \"user\", content: prompt }, // enable_thinking handles think mode\n ];\n\n worker.postMessage({\n type: \"generate\",\n messages,\n options: {\n maxTokens: options.maxTokens ?? 256,\n temperature: options.temperature ?? 0.7,\n topP: options.topP ?? 0.9,\n topK: options.topK ?? 20,\n thinking: options.thinking ?? false,\n },\n });\n });\n },\n\n interrupt: () => {\n worker.postMessage({ type: \"interrupt\" });\n },\n\n reset: () => {\n worker.postMessage({ type: \"reset\" });\n },\n\n terminate: () => {\n worker.terminate();\n URL.revokeObjectURL(workerUrl);\n },\n\n isReady: () => isReady,\n };\n });\n}\n\n// ============================================\n// React Hooks\n// ============================================\n\n/** Message in a chat conversation */\nexport type Message = {\n id: string;\n role: \"user\" | \"assistant\";\n content: string;\n thinking?: string;\n};\n\n/** Loading progress state */\nexport type LoadingProgress = {\n status: \"loading\" | \"downloading\" | \"ready\" | \"error\";\n message?: string;\n file?: string;\n progress?: number;\n /** Number of files being downloaded (0 = loading from cache) */\n downloadCount?: number;\n /** Total files to process */\n totalFiles?: number;\n};\n\n/** Options for useChat hook */\nexport type UseChatOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens per response */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Initial messages */\n initialMessages?: Message[];\n /** Auto-load model on mount (default: false - loads on first generate or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Return type for useChat hook */\nexport type UseChatReturn = {\n /** Chat messages */\n messages: Message[];\n /** Current input value */\n input: string;\n /** Set input value */\n setInput: (value: string) => void;\n /** Submit current input */\n handleSubmit: (e?: { preventDefault?: () => void }) => void;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating a response */\n isGenerating: boolean;\n /** Current thinking content (streaming) */\n thinking: string;\n /** Stop generation */\n stop: () => void;\n /** Clear all messages */\n clear: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n};\n\n/**\n * React hook for chat with local LLM\n *\n * @example\n * ```tsx\n * import { useChat } from \"@tryhamster/gerbil/browser\";\n *\n * function Chat() {\n * const { messages, input, setInput, handleSubmit, isLoading, isGenerating } = useChat();\n *\n * if (isLoading) return <div>Loading model...</div>;\n *\n * return (\n * <div>\n * {messages.map(m => (\n * <div key={m.id}>{m.role}: {m.content}</div>\n * ))}\n * <form onSubmit={handleSubmit}>\n * <input value={input} onChange={e => setInput(e.target.value)} />\n * <button disabled={isGenerating}>Send</button>\n * </form>\n * </div>\n * );\n * }\n * ```\n */\nexport function useChat(options: UseChatOptions = {}): UseChatReturn {\n // Lazy import React to avoid SSR issues\n const React = globalThis.React;\n if (!React) {\n throw new Error(\"useChat requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => undefined | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n initialMessages = [],\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [messages, setMessages] = useState<Message[]>(initialMessages);\n const [input, setInput] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [thinking, setThinking] = useState<string>(\"\");\n const [currentResponse, setCurrentResponse] = useState<string>(\"\");\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const messageIdRef = useRef<number>(0);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on generate\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n // Initialize worker\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCurrentResponse((r: string) => r + token.text);\n }\n },\n onComplete: () => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n // Commit response to messages when generation completes\n useEffect(() => {\n if (!isGenerating && currentResponse) {\n setMessages((msgs: Message[]) => {\n const lastMsg = msgs.at(-1);\n if (lastMsg?.role === \"assistant\") {\n return msgs.map((m: Message, i: number) =>\n i === msgs.length - 1\n ? {\n ...m,\n content: currentResponse,\n thinking: thinking || undefined,\n }\n : m,\n );\n }\n return msgs;\n });\n setCurrentResponse(\"\");\n setThinking(\"\");\n }\n return () => {\n return;\n };\n }, [isGenerating, currentResponse, thinking]);\n\n // Store pending message for auto-load scenario\n const pendingMessageRef = useRef<string | null>(null);\n\n const handleSubmit = useCallback(\n (e?: { preventDefault?: () => void }) => {\n e?.preventDefault?.();\n\n if (!input.trim() || isGenerating) {\n return;\n }\n\n const userMessage: Message = {\n id: `msg-${++messageIdRef.current}`,\n role: \"user\",\n content: input.trim(),\n };\n\n const assistantMessage: Message = {\n id: `msg-${++messageIdRef.current}`,\n role: \"assistant\",\n content: \"\",\n };\n\n setMessages((msgs: Message[]) => [...msgs, userMessage, assistantMessage]);\n setInput(\"\");\n setCurrentResponse(\"\");\n setThinking(\"\");\n\n // If worker not loaded, trigger load and queue the message\n if (!workerRef.current) {\n pendingMessageRef.current = userMessage.content;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(userMessage.content, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n });\n },\n [input, isGenerating, system, enableThinking, maxTokens, temperature, load],\n );\n\n // Process pending message when worker becomes ready\n useEffect(() => {\n if (isReady && pendingMessageRef.current && workerRef.current) {\n const pendingContent = pendingMessageRef.current;\n pendingMessageRef.current = null;\n setIsGenerating(true);\n workerRef.current.generate(pendingContent, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n });\n }\n return () => {\n return;\n };\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n const clear = useCallback(() => {\n workerRef.current?.reset();\n setMessages([]);\n setCurrentResponse(\"\");\n setThinking(\"\");\n }, []);\n\n // Update last message with streaming content\n const displayMessages = messages.map((m: Message, i: number) => {\n if (i === messages.length - 1 && m.role === \"assistant\" && isGenerating) {\n return {\n ...m,\n content: currentResponse,\n thinking: thinking || undefined,\n };\n }\n return m;\n });\n\n return {\n messages: displayMessages,\n input,\n setInput,\n handleSubmit,\n isLoading,\n loadingProgress,\n isGenerating,\n thinking,\n stop,\n clear,\n tps,\n isReady,\n error,\n load,\n };\n}\n\n/** Options for useCompletion hook */\nexport type UseCompletionOptions = {\n /** Model ID (default: \"qwen3-0.6b\") */\n model?: string;\n /** System prompt */\n system?: string;\n /** Enable thinking mode (Qwen3) */\n thinking?: boolean;\n /** Max tokens */\n maxTokens?: number;\n /** Temperature (0-2) */\n temperature?: number;\n /** Auto-load model on mount (default: false - loads on first complete() or load()) */\n autoLoad?: boolean;\n /** Called when model is ready */\n onReady?: () => void;\n /** Called on error */\n onError?: (error: string) => void;\n};\n\n/** Return type for useCompletion hook */\nexport type UseCompletionReturn = {\n /** Generated completion */\n completion: string;\n /** Thinking content (if enabled) */\n thinking: string;\n /** Generate completion */\n complete: (prompt: string) => Promise<string>;\n /** Whether model is loading */\n isLoading: boolean;\n /** Loading progress */\n loadingProgress: LoadingProgress | null;\n /** Whether generating */\n isGenerating: boolean;\n /** Stop generation */\n stop: () => void;\n /** Current tokens per second */\n tps: number;\n /** Whether model is ready */\n isReady: boolean;\n /** Error message if any */\n error: string | null;\n /** Load the model (only needed if lazy: true) */\n load: () => void;\n};\n\n/**\n * React hook for text completion with local LLM\n *\n * @example\n * ```tsx\n * import { useCompletion } from \"@tryhamster/gerbil/browser\";\n *\n * function App() {\n * const { complete, completion, isLoading, isGenerating } = useCompletion();\n *\n * if (isLoading) return <div>Loading...</div>;\n *\n * return (\n * <div>\n * <button onClick={() => complete(\"Write a haiku\")}>Generate</button>\n * <p>{completion}</p>\n * </div>\n * );\n * }\n * ```\n */\nexport function useCompletion(options: UseCompletionOptions = {}): UseCompletionReturn {\n const React = (globalThis as any).React;\n if (!React) {\n throw new Error(\"useCompletion requires React. Import React before using this hook.\");\n }\n\n const { useState, useEffect, useRef, useCallback } = React as {\n useState: <T>(initial: T) => [T, (v: T | ((prev: T) => T)) => void];\n useEffect: (effect: () => undefined | (() => void), deps?: unknown[]) => void;\n useRef: <T>(initial: T) => { current: T };\n useCallback: <T>(fn: T, deps: unknown[]) => T;\n };\n\n const {\n model = \"qwen3-0.6b\",\n system = \"You are a helpful assistant.\",\n thinking: enableThinking = false,\n maxTokens = 512,\n temperature = 0.7,\n autoLoad = false,\n onReady,\n onError,\n } = options;\n\n const [completion, setCompletion] = useState<string>(\"\");\n const [thinking, setThinking] = useState<string>(\"\");\n const [isLoading, setIsLoading] = useState<boolean>(autoLoad);\n const [loadingProgress, setLoadingProgress] = useState<LoadingProgress | null>(null);\n const [isGenerating, setIsGenerating] = useState<boolean>(false);\n const [tps, setTps] = useState<number>(0);\n const [error, setError] = useState<string | null>(null);\n const [isReady, setIsReady] = useState<boolean>(false);\n const [shouldLoad, setShouldLoad] = useState<boolean>(autoLoad);\n\n const workerRef = useRef<GerbilWorker | null>(null);\n const resolveRef = useRef<((text: string) => void) | null>(null);\n const rejectRef = useRef<((err: Error) => void) | null>(null);\n const pendingPromptRef = useRef<string | null>(null);\n const mountedRef = useRef<boolean>(true);\n\n // Load function - can be called manually or auto-triggered on complete()\n const load = useCallback(() => {\n if (workerRef.current || isLoading) {\n return;\n }\n setIsLoading(true);\n setShouldLoad(true);\n }, [isLoading]);\n\n useEffect(() => {\n if (!shouldLoad) {\n return;\n }\n\n if (!isWebGPUSupported()) {\n setError(\"WebGPU not supported. Use Chrome/Edge 113+.\");\n setIsLoading(false);\n onError?.(\"WebGPU not supported\");\n return;\n }\n\n mountedRef.current = true;\n\n createGerbilWorker({\n modelId: model,\n onProgress: (p) => {\n if (!mountedRef.current) {\n return;\n }\n setLoadingProgress(p);\n if (p.status === \"ready\") {\n setIsLoading(false);\n setIsReady(true);\n onReady?.();\n }\n },\n onToken: (token) => {\n if (!mountedRef.current) {\n return;\n }\n setTps(token.tps);\n if (token.state === \"thinking\") {\n setThinking((t: string) => t + token.text);\n } else {\n setCompletion((c: string) => c + token.text);\n }\n },\n onComplete: (result) => {\n if (!mountedRef.current) {\n return;\n }\n setIsGenerating(false);\n resolveRef.current?.(result.text);\n resolveRef.current = null;\n },\n onError: (err) => {\n if (!mountedRef.current) {\n return;\n }\n setError(err);\n setIsGenerating(false);\n onError?.(err);\n },\n })\n .then((worker) => {\n if (mountedRef.current) {\n workerRef.current = worker;\n } else {\n worker.terminate();\n }\n })\n .catch((err) => {\n if (mountedRef.current) {\n setError(err.message);\n setIsLoading(false);\n onError?.(err.message);\n }\n });\n\n return () => {\n mountedRef.current = false;\n workerRef.current?.terminate();\n };\n }, [model, shouldLoad]);\n\n const complete = useCallback(\n (prompt: string): Promise<string> => {\n return new Promise((resolve, reject) => {\n setCompletion(\"\");\n setThinking(\"\");\n resolveRef.current = resolve;\n rejectRef.current = reject;\n\n // If worker not loaded, trigger load and queue the prompt\n if (!workerRef.current) {\n pendingPromptRef.current = prompt;\n load();\n return;\n }\n\n setIsGenerating(true);\n workerRef.current.generate(prompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n });\n });\n },\n [system, enableThinking, maxTokens, temperature, load],\n );\n\n // Process pending prompt when worker becomes ready\n useEffect(() => {\n if (isReady && pendingPromptRef.current && workerRef.current) {\n const pendingPrompt = pendingPromptRef.current;\n pendingPromptRef.current = null;\n setIsGenerating(true);\n workerRef.current.generate(pendingPrompt, {\n system,\n thinking: enableThinking,\n maxTokens,\n temperature,\n });\n }\n return () => {\n return;\n };\n }, [isReady, system, enableThinking, maxTokens, temperature]);\n\n const stop = useCallback(() => {\n workerRef.current?.interrupt();\n setIsGenerating(false);\n }, []);\n\n return {\n completion,\n thinking,\n complete,\n isLoading,\n loadingProgress,\n isGenerating,\n stop,\n tps,\n isReady,\n error,\n load,\n };\n}\n\n// ============================================\n// Utilities\n// ============================================\n\n/**\n * Check if WebGPU is supported\n */\nexport function isWebGPUSupported(): boolean {\n if (typeof navigator === \"undefined\") {\n return false;\n }\n return \"gpu\" in navigator;\n}\n\n/**\n * Get WebGPU adapter info\n */\nexport async function getWebGPUInfo(): Promise<{\n supported: boolean;\n adapter?: string;\n device?: string;\n} | null> {\n if (!isWebGPUSupported()) {\n return { supported: false };\n }\n\n try {\n const adapter = await (navigator as any).gpu.requestAdapter();\n if (!adapter) {\n return { supported: false };\n }\n\n const info = await adapter.requestAdapterInfo();\n return {\n supported: true,\n adapter: info.vendor,\n device: info.device,\n };\n } catch {\n return { supported: false };\n }\n}\n\nexport default { isWebGPUSupported, getWebGPUInfo, createGerbilWorker };\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA8IA,eAAsB,mBAAmB,UAA+B,EAAE,EAAyB;CACjG,MAAM,EAAE,UAAU,cAAc,YAAY,SAAS,YAAY,YAAY;CAG7E,MAAM,SAAS,aAAa,QAAQ;AAEpC,QAAO,IAAI,SAAS,SAAS,WAAW;EAgMtC,MAAM,OAAO,IAAI,KAAK,CA9LH;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;MA8Le,EAAE,EAAE,MAAM,0BAA0B,CAAC;EACvE,MAAM,YAAY,IAAI,gBAAgB,KAAK;EAC3C,MAAM,SAAS,IAAI,OAAO,WAAW,EAAE,MAAM,UAAU,CAAC;EAExD,IAAI,UAAU;EACd,IAAIA,iBAAkD;EACtD,IAAIC,gBAAiD;EACrD,IAAI,iBAAiB;AAErB,SAAO,aAAa,MAAM;GACxB,MAAM,MAAM,EAAE;AAEd,WAAQ,IAAI,QAAZ;IACE,KAAK;AAEH,YAAO,YAAY;MAAE,MAAM;MAAQ,SAAS,OAAO;MAAM,CAAC;AAC1D;IAEF,KAAK;IACL,KAAK;AACH,kBAAa,IAAsB;AACnC;IAEF,KAAK;AACH,eAAU;AACV,kBAAa,IAAsB;AACnC,aAAQ,aAAa;AACrB;IAEF,KAAK;AACH,sBAAiB;AACjB;IAEF,KAAK;AACH,uBAAkB,IAAI;AACtB,eAAU,IAAmB;AAC7B;IAEF,KAAK;AACH,kBAAa,IAAsB;AACnC,sBAAiB,IAAI,KAAK;AAC1B,sBAAiB;AACjB,qBAAgB;AAChB;IAEF,KAAK;AACH,eAAU,IAAI,MAAM;AACpB,kBAAa;MAAE,QAAQ;MAAS,OAAO,IAAI;MAAO,CAAC;AACnD,SAAI,eAAe;AACjB,oBAAc,IAAI,MAAM,IAAI,MAAM,CAAC;AACnC,uBAAiB;AACjB,sBAAgB;WAEhB,QAAO,IAAI,MAAM,IAAI,MAAM,CAAC;AAE9B;;;AAIN,SAAO,WAAW,MAAM;GACtB,MAAM,QAAQ,EAAE,WAAW;AAC3B,aAAU,MAAM;AAChB,UAAO,IAAI,MAAM,MAAM,CAAC;;EAG1B,MAAMC,eAA6B;GACjC,WAAW,QAAgB,YAAiC,EAAE,KAAK;AACjE,WAAO,IAAI,SAAS,KAAK,QAAQ;AAC/B,sBAAiB;AACjB,qBAAgB;KAGhB,MAAM,WAAW,CACf;MAAE,MAAM;MAAU,SAFLC,UAAQ,UAAU;MAEI,EACnC;MAAE,MAAM;MAAQ,SAAS;MAAQ,CAClC;AAED,YAAO,YAAY;MACjB,MAAM;MACN;MACA,SAAS;OACP,WAAWA,UAAQ,aAAa;OAChC,aAAaA,UAAQ,eAAe;OACpC,MAAMA,UAAQ,QAAQ;OACtB,MAAMA,UAAQ,QAAQ;OACtB,UAAUA,UAAQ,YAAY;OAC/B;MACF,CAAC;MACF;;GAGJ,iBAAiB;AACf,WAAO,YAAY,EAAE,MAAM,aAAa,CAAC;;GAG3C,aAAa;AACX,WAAO,YAAY,EAAE,MAAM,SAAS,CAAC;;GAGvC,iBAAiB;AACf,WAAO,WAAW;AAClB,QAAI,gBAAgB,UAAU;;GAGhC,eAAe;GAChB;GACD;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA2GJ,SAAgB,QAAQ,UAA0B,EAAE,EAAiB;CAEnE,MAAM,QAAQ,WAAW;AACzB,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,+DAA+D;CAGjF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,kBAAkB,EAAE,EACpB,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,UAAU,eAAe,SAAoB,gBAAgB;CACpE,MAAM,CAAC,OAAO,YAAY,SAAiB,GAAG;CAC9C,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,iBAAiB,sBAAsB,SAAiB,GAAG;CAClE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,eAAe,OAAe,EAAE;CACtC,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAGf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,qBAAoB,MAAc,IAAI,MAAM,KAAK;;GAGrD,kBAAkB;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;;GAExB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;AAGvB,iBAAgB;AACd,MAAI,CAAC,gBAAgB,iBAAiB;AACpC,gBAAa,SAAoB;AAE/B,QADgB,KAAK,GAAG,GAAG,EACd,SAAS,YACpB,QAAO,KAAK,KAAK,GAAY,MAC3B,MAAM,KAAK,SAAS,IAChB;KACE,GAAG;KACH,SAAS;KACT,UAAU,YAAY;KACvB,GACD,EACL;AAEH,WAAO;KACP;AACF,sBAAmB,GAAG;AACtB,eAAY,GAAG;;AAEjB,eAAa;IAGZ;EAAC;EAAc;EAAiB;EAAS,CAAC;CAG7C,MAAM,oBAAoB,OAAsB,KAAK;CAErD,MAAM,eAAe,aAClB,MAAwC;AACvC,KAAG,kBAAkB;AAErB,MAAI,CAAC,MAAM,MAAM,IAAI,aACnB;EAGF,MAAMC,cAAuB;GAC3B,IAAI,OAAO,EAAE,aAAa;GAC1B,MAAM;GACN,SAAS,MAAM,MAAM;GACtB;EAED,MAAMC,mBAA4B;GAChC,IAAI,OAAO,EAAE,aAAa;GAC1B,MAAM;GACN,SAAS;GACV;AAED,eAAa,SAAoB;GAAC,GAAG;GAAM;GAAa;GAAiB,CAAC;AAC1E,WAAS,GAAG;AACZ,qBAAmB,GAAG;AACtB,cAAY,GAAG;AAGf,MAAI,CAAC,UAAU,SAAS;AACtB,qBAAkB,UAAU,YAAY;AACxC,SAAM;AACN;;AAGF,kBAAgB,KAAK;AACrB,YAAU,QAAQ,SAAS,YAAY,SAAS;GAC9C;GACA,UAAU;GACV;GACA;GACD,CAAC;IAEJ;EAAC;EAAO;EAAc;EAAQ;EAAgB;EAAW;EAAa;EAAK,CAC5E;AAGD,iBAAgB;AACd,MAAI,WAAW,kBAAkB,WAAW,UAAU,SAAS;GAC7D,MAAM,iBAAiB,kBAAkB;AACzC,qBAAkB,UAAU;AAC5B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,gBAAgB;IACzC;IACA,UAAU;IACV;IACA;IACD,CAAC;;AAEJ,eAAa;IAGZ;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;CAE7D,MAAM,OAAO,kBAAkB;AAC7B,YAAU,SAAS,WAAW;AAC9B,kBAAgB,MAAM;IACrB,EAAE,CAAC;CAEN,MAAM,QAAQ,kBAAkB;AAC9B,YAAU,SAAS,OAAO;AAC1B,cAAY,EAAE,CAAC;AACf,qBAAmB,GAAG;AACtB,cAAY,GAAG;IACd,EAAE,CAAC;AAcN,QAAO;EACL,UAZsB,SAAS,KAAK,GAAY,MAAc;AAC9D,OAAI,MAAM,SAAS,SAAS,KAAK,EAAE,SAAS,eAAe,aACzD,QAAO;IACL,GAAG;IACH,SAAS;IACT,UAAU,YAAY;IACvB;AAEH,UAAO;IACP;EAIA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACD;;;;;;;;;;;;;;;;;;;;;;;AAsEH,SAAgB,cAAc,UAAgC,EAAE,EAAuB;CACrF,MAAM,QAAS,WAAmB;AAClC,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,qEAAqE;CAGvF,MAAM,EAAE,UAAU,WAAW,QAAQ,gBAAgB;CAOrD,MAAM,EACJ,QAAQ,cACR,SAAS,gCACT,UAAU,iBAAiB,OAC3B,YAAY,KACZ,cAAc,IACd,WAAW,OACX,SACA,YACE;CAEJ,MAAM,CAAC,YAAY,iBAAiB,SAAiB,GAAG;CACxD,MAAM,CAAC,UAAU,eAAe,SAAiB,GAAG;CACpD,MAAM,CAAC,WAAW,gBAAgB,SAAkB,SAAS;CAC7D,MAAM,CAAC,iBAAiB,sBAAsB,SAAiC,KAAK;CACpF,MAAM,CAAC,cAAc,mBAAmB,SAAkB,MAAM;CAChE,MAAM,CAAC,KAAK,UAAU,SAAiB,EAAE;CACzC,MAAM,CAAC,OAAO,YAAY,SAAwB,KAAK;CACvD,MAAM,CAAC,SAAS,cAAc,SAAkB,MAAM;CACtD,MAAM,CAAC,YAAY,iBAAiB,SAAkB,SAAS;CAE/D,MAAM,YAAY,OAA4B,KAAK;CACnD,MAAM,aAAa,OAAwC,KAAK;CAChE,MAAM,YAAY,OAAsC,KAAK;CAC7D,MAAM,mBAAmB,OAAsB,KAAK;CACpD,MAAM,aAAa,OAAgB,KAAK;CAGxC,MAAM,OAAO,kBAAkB;AAC7B,MAAI,UAAU,WAAW,UACvB;AAEF,eAAa,KAAK;AAClB,gBAAc,KAAK;IAClB,CAAC,UAAU,CAAC;AAEf,iBAAgB;AACd,MAAI,CAAC,WACH;AAGF,MAAI,CAAC,mBAAmB,EAAE;AACxB,YAAS,8CAA8C;AACvD,gBAAa,MAAM;AACnB,aAAU,uBAAuB;AACjC;;AAGF,aAAW,UAAU;AAErB,qBAAmB;GACjB,SAAS;GACT,aAAa,MAAM;AACjB,QAAI,CAAC,WAAW,QACd;AAEF,uBAAmB,EAAE;AACrB,QAAI,EAAE,WAAW,SAAS;AACxB,kBAAa,MAAM;AACnB,gBAAW,KAAK;AAChB,gBAAW;;;GAGf,UAAU,UAAU;AAClB,QAAI,CAAC,WAAW,QACd;AAEF,WAAO,MAAM,IAAI;AACjB,QAAI,MAAM,UAAU,WAClB,cAAa,MAAc,IAAI,MAAM,KAAK;QAE1C,gBAAe,MAAc,IAAI,MAAM,KAAK;;GAGhD,aAAa,WAAW;AACtB,QAAI,CAAC,WAAW,QACd;AAEF,oBAAgB,MAAM;AACtB,eAAW,UAAU,OAAO,KAAK;AACjC,eAAW,UAAU;;GAEvB,UAAU,QAAQ;AAChB,QAAI,CAAC,WAAW,QACd;AAEF,aAAS,IAAI;AACb,oBAAgB,MAAM;AACtB,cAAU,IAAI;;GAEjB,CAAC,CACC,MAAM,WAAW;AAChB,OAAI,WAAW,QACb,WAAU,UAAU;OAEpB,QAAO,WAAW;IAEpB,CACD,OAAO,QAAQ;AACd,OAAI,WAAW,SAAS;AACtB,aAAS,IAAI,QAAQ;AACrB,iBAAa,MAAM;AACnB,cAAU,IAAI,QAAQ;;IAExB;AAEJ,eAAa;AACX,cAAW,UAAU;AACrB,aAAU,SAAS,WAAW;;IAE/B,CAAC,OAAO,WAAW,CAAC;CAEvB,MAAM,WAAW,aACd,WAAoC;AACnC,SAAO,IAAI,SAAS,SAAS,WAAW;AACtC,iBAAc,GAAG;AACjB,eAAY,GAAG;AACf,cAAW,UAAU;AACrB,aAAU,UAAU;AAGpB,OAAI,CAAC,UAAU,SAAS;AACtB,qBAAiB,UAAU;AAC3B,UAAM;AACN;;AAGF,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,QAAQ;IACjC;IACA,UAAU;IACV;IACA;IACD,CAAC;IACF;IAEJ;EAAC;EAAQ;EAAgB;EAAW;EAAa;EAAK,CACvD;AAGD,iBAAgB;AACd,MAAI,WAAW,iBAAiB,WAAW,UAAU,SAAS;GAC5D,MAAM,gBAAgB,iBAAiB;AACvC,oBAAiB,UAAU;AAC3B,mBAAgB,KAAK;AACrB,aAAU,QAAQ,SAAS,eAAe;IACxC;IACA,UAAU;IACV;IACA;IACD,CAAC;;AAEJ,eAAa;IAGZ;EAAC;EAAS;EAAQ;EAAgB;EAAW;EAAY,CAAC;AAO7D,QAAO;EACL;EACA;EACA;EACA;EACA;EACA;EACA,MAZW,kBAAkB;AAC7B,aAAU,SAAS,WAAW;AAC9B,mBAAgB,MAAM;KACrB,EAAE,CAAC;EAUJ;EACA;EACA;EACA;EACD;;;;;AAUH,SAAgB,oBAA6B;AAC3C,KAAI,OAAO,cAAc,YACvB,QAAO;AAET,QAAO,SAAS;;;;;AAMlB,eAAsB,gBAIZ;AACR,KAAI,CAAC,mBAAmB,CACtB,QAAO,EAAE,WAAW,OAAO;AAG7B,KAAI;EACF,MAAM,UAAU,MAAO,UAAkB,IAAI,gBAAgB;AAC7D,MAAI,CAAC,QACH,QAAO,EAAE,WAAW,OAAO;EAG7B,MAAM,OAAO,MAAM,QAAQ,oBAAoB;AAC/C,SAAO;GACL,WAAW;GACX,SAAS,KAAK;GACd,QAAQ,KAAK;GACd;SACK;AACN,SAAO,EAAE,WAAW,OAAO;;;AAI/B,sBAAe;CAAE;CAAmB;CAAe;CAAoB"}