@tryhamster/gerbil 1.0.0-rc.0 → 1.0.0-rc.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +79 -14
- package/dist/auto-update-S9s5-g0C.mjs +3 -0
- package/dist/browser/index.d.ts +1009 -0
- package/dist/browser/index.d.ts.map +1 -0
- package/dist/browser/index.js +2492 -0
- package/dist/browser/index.js.map +1 -0
- package/dist/{chrome-backend-C5Un08O4.mjs → chrome-backend-CORwaIyC.mjs} +514 -73
- package/dist/chrome-backend-CORwaIyC.mjs.map +1 -0
- package/dist/{chrome-backend-CtwPENIW.mjs → chrome-backend-DIKYoWj-.mjs} +1 -1
- package/dist/cli.mjs +3359 -647
- package/dist/cli.mjs.map +1 -1
- package/dist/frameworks/express.d.mts +1 -1
- package/dist/frameworks/express.mjs +3 -4
- package/dist/frameworks/express.mjs.map +1 -1
- package/dist/frameworks/fastify.d.mts +1 -1
- package/dist/frameworks/fastify.mjs +2 -3
- package/dist/frameworks/fastify.mjs.map +1 -1
- package/dist/frameworks/hono.d.mts +1 -1
- package/dist/frameworks/hono.mjs +2 -3
- package/dist/frameworks/hono.mjs.map +1 -1
- package/dist/frameworks/next.d.mts +2 -2
- package/dist/frameworks/next.mjs +2 -3
- package/dist/frameworks/next.mjs.map +1 -1
- package/dist/frameworks/react.d.mts +1 -1
- package/dist/frameworks/trpc.d.mts +1 -1
- package/dist/frameworks/trpc.mjs +2 -3
- package/dist/frameworks/trpc.mjs.map +1 -1
- package/dist/gerbil-DJGqq7BX.mjs +4 -0
- package/dist/gerbil-DoDGHe6Z.mjs +1631 -0
- package/dist/gerbil-DoDGHe6Z.mjs.map +1 -0
- package/dist/gerbil-qOTe1nl2.d.mts +431 -0
- package/dist/gerbil-qOTe1nl2.d.mts.map +1 -0
- package/dist/index.d.mts +411 -9
- package/dist/index.d.mts.map +1 -1
- package/dist/index.mjs +7 -6
- package/dist/index.mjs.map +1 -1
- package/dist/integrations/ai-sdk.d.mts +122 -4
- package/dist/integrations/ai-sdk.d.mts.map +1 -1
- package/dist/integrations/ai-sdk.mjs +238 -11
- package/dist/integrations/ai-sdk.mjs.map +1 -1
- package/dist/integrations/langchain.d.mts +132 -2
- package/dist/integrations/langchain.d.mts.map +1 -1
- package/dist/integrations/langchain.mjs +175 -8
- package/dist/integrations/langchain.mjs.map +1 -1
- package/dist/integrations/llamaindex.d.mts +1 -1
- package/dist/integrations/llamaindex.mjs +2 -3
- package/dist/integrations/llamaindex.mjs.map +1 -1
- package/dist/integrations/mcp-client.mjs +4 -4
- package/dist/integrations/mcp-client.mjs.map +1 -1
- package/dist/integrations/mcp.d.mts +2 -2
- package/dist/integrations/mcp.d.mts.map +1 -1
- package/dist/integrations/mcp.mjs +5 -6
- package/dist/kokoro-BNTb6egA.mjs +20210 -0
- package/dist/kokoro-BNTb6egA.mjs.map +1 -0
- package/dist/kokoro-CMOGDSgT.js +20212 -0
- package/dist/kokoro-CMOGDSgT.js.map +1 -0
- package/dist/{mcp-R8kRLIKb.mjs → mcp-kzDDWIoS.mjs} +10 -37
- package/dist/mcp-kzDDWIoS.mjs.map +1 -0
- package/dist/microphone-DaMZFRuR.mjs +3 -0
- package/dist/{one-liner-BUQR0nqq.mjs → one-liner-DxnNs_JK.mjs} +2 -2
- package/dist/{one-liner-BUQR0nqq.mjs.map → one-liner-DxnNs_JK.mjs.map} +1 -1
- package/dist/repl-DGUw4fCc.mjs +9 -0
- package/dist/skills/index.d.mts +305 -14
- package/dist/skills/index.d.mts.map +1 -1
- package/dist/skills/index.mjs +5 -6
- package/dist/skills-DulrOPeP.mjs +1435 -0
- package/dist/skills-DulrOPeP.mjs.map +1 -0
- package/dist/stt-1WIefHwc.mjs +3 -0
- package/dist/stt-CG_7KB_0.mjs +434 -0
- package/dist/stt-CG_7KB_0.mjs.map +1 -0
- package/dist/stt-Dne6SENv.js +434 -0
- package/dist/stt-Dne6SENv.js.map +1 -0
- package/dist/{tools-BsiEE6f2.mjs → tools-Bi1P7Xoy.mjs} +6 -7
- package/dist/{tools-BsiEE6f2.mjs.map → tools-Bi1P7Xoy.mjs.map} +1 -1
- package/dist/transformers.web-DiD1gTwk.js +44695 -0
- package/dist/transformers.web-DiD1gTwk.js.map +1 -0
- package/dist/transformers.web-u34VxRFM.js +3 -0
- package/dist/tts-B1pZMlDv.mjs +3 -0
- package/dist/tts-C2FzKuSx.js +725 -0
- package/dist/tts-C2FzKuSx.js.map +1 -0
- package/dist/tts-CyHhcLtN.mjs +731 -0
- package/dist/tts-CyHhcLtN.mjs.map +1 -0
- package/dist/types-CiTc7ez3.d.mts +353 -0
- package/dist/types-CiTc7ez3.d.mts.map +1 -0
- package/dist/{utils-7vXqtq2Q.mjs → utils-CZBZ8dgR.mjs} +1 -1
- package/dist/{utils-7vXqtq2Q.mjs.map → utils-CZBZ8dgR.mjs.map} +1 -1
- package/docs/ai-sdk.md +137 -21
- package/docs/browser.md +241 -2
- package/docs/memory.md +72 -0
- package/docs/stt.md +494 -0
- package/docs/tts.md +569 -0
- package/docs/vision.md +396 -0
- package/package.json +21 -22
- package/dist/auto-update-BbNHbSU1.mjs +0 -3
- package/dist/browser/index.d.mts +0 -262
- package/dist/browser/index.d.mts.map +0 -1
- package/dist/browser/index.mjs +0 -755
- package/dist/browser/index.mjs.map +0 -1
- package/dist/chrome-backend-C5Un08O4.mjs.map +0 -1
- package/dist/gerbil-BfnsFWRE.mjs +0 -644
- package/dist/gerbil-BfnsFWRE.mjs.map +0 -1
- package/dist/gerbil-BjW-z7Fq.mjs +0 -5
- package/dist/gerbil-DZ1k3ChC.d.mts +0 -138
- package/dist/gerbil-DZ1k3ChC.d.mts.map +0 -1
- package/dist/mcp-R8kRLIKb.mjs.map +0 -1
- package/dist/models-DKULvhOr.mjs +0 -136
- package/dist/models-DKULvhOr.mjs.map +0 -1
- package/dist/models-De2-_GmQ.d.mts +0 -22
- package/dist/models-De2-_GmQ.d.mts.map +0 -1
- package/dist/skills-D3CEpgDc.mjs +0 -630
- package/dist/skills-D3CEpgDc.mjs.map +0 -1
- package/dist/types-BS1N92Jt.d.mts +0 -183
- package/dist/types-BS1N92Jt.d.mts.map +0 -1
- /package/dist/{chunk-Ct1HF2bE.mjs → chunk-CkXuGtQK.mjs} +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"gerbil-DoDGHe6Z.mjs","names":["globalCache: ResponseCache | null","BUILTIN_MODELS: Record<string, ModelConfig>","FAMILY_CONTEXT_DEFAULTS: Record<string, number>","family: ModelConfig[\"family\"]","pipeline","rawPipeline","KOKORO_VOICES_DEFAULT: VoiceInfo[]","isBrowser","tfDevice: \"webgpu\" | \"wasm\" | \"cpu\"","chromeErr: any","result","result: GenerateResult","tokenQueue: string[]","resolveNext: ((value: string | null) => void) | null","content: Array<{ type: string; text?: string }>","text","results: EmbedResult[]","messages: Array<{ role: string; content: string }>"],"sources":["../src/core/cache.ts","../src/core/models.ts","../src/core/gerbil.ts"],"sourcesContent":["/**\n * Response Cache for Gerbil\n *\n * LRU cache with TTL expiration for caching inference responses.\n * Enables instant responses for repeated prompts.\n */\n\nimport type { GenerateResult } from \"./types.js\";\n\n// ============================================\n// Types\n// ============================================\n\ntype CacheEntry = {\n result: GenerateResult;\n createdAt: number;\n ttl: number;\n};\n\ntype CacheStats = {\n hits: number;\n misses: number;\n size: number;\n maxSize: number;\n};\n\n// ============================================\n// Cache Key Generation\n// ============================================\n\n/**\n * Generate a deterministic cache key from prompt and options.\n * Key includes all parameters that affect the output.\n */\nexport function generateCacheKey(\n prompt: string,\n modelId: string,\n options: {\n maxTokens?: number;\n temperature?: number;\n topP?: number;\n topK?: number;\n system?: string;\n thinking?: boolean;\n },\n): string {\n const keyParts = [\n prompt,\n modelId,\n options.maxTokens ?? 256,\n options.temperature ?? 0.7,\n options.topP ?? 0.9,\n options.topK ?? 50,\n options.system ?? \"\",\n options.thinking ?? false,\n ];\n\n // Simple hash function for cache key\n const str = JSON.stringify(keyParts);\n let hash = 0;\n for (let i = 0; i < str.length; i++) {\n const char = str.charCodeAt(i);\n hash = (hash << 5) - hash + char;\n hash = hash & hash; // Convert to 32bit integer\n }\n return `gerbil:${hash.toString(16)}`;\n}\n\n// ============================================\n// Response Cache\n// ============================================\n\n/**\n * LRU cache with TTL expiration for inference responses.\n */\nexport class ResponseCache {\n private cache: Map<string, CacheEntry> = new Map();\n private maxSize: number;\n private defaultTtl: number;\n private hits = 0;\n private misses = 0;\n\n /**\n * Create a new response cache.\n * @param maxSize Maximum number of entries (default: 100)\n * @param defaultTtl Default TTL in ms (default: 5 minutes)\n */\n constructor(maxSize = 100, defaultTtl = 5 * 60 * 1000) {\n this.maxSize = maxSize;\n this.defaultTtl = defaultTtl;\n }\n\n /**\n * Get a cached response if it exists and hasn't expired.\n */\n get(key: string): GenerateResult | null {\n const entry = this.cache.get(key);\n\n if (!entry) {\n this.misses++;\n return null;\n }\n\n // Check if expired\n if (Date.now() - entry.createdAt > entry.ttl) {\n this.cache.delete(key);\n this.misses++;\n return null;\n }\n\n // Move to end for LRU (delete and re-add)\n this.cache.delete(key);\n this.cache.set(key, entry);\n\n this.hits++;\n return { ...entry.result, cached: true };\n }\n\n /**\n * Store a response in the cache.\n */\n set(key: string, result: GenerateResult, ttl?: number): void {\n // Evict oldest entries if at capacity\n while (this.cache.size >= this.maxSize) {\n const firstKey = this.cache.keys().next().value;\n if (firstKey) {\n this.cache.delete(firstKey);\n }\n }\n\n this.cache.set(key, {\n result,\n createdAt: Date.now(),\n ttl: ttl ?? this.defaultTtl,\n });\n }\n\n /**\n * Check if a key exists and is not expired.\n */\n has(key: string): boolean {\n const entry = this.cache.get(key);\n if (!entry) return false;\n\n if (Date.now() - entry.createdAt > entry.ttl) {\n this.cache.delete(key);\n return false;\n }\n\n return true;\n }\n\n /**\n * Remove a specific key from the cache.\n */\n delete(key: string): boolean {\n return this.cache.delete(key);\n }\n\n /**\n * Clear all entries from the cache.\n */\n clear(): void {\n this.cache.clear();\n this.hits = 0;\n this.misses = 0;\n }\n\n /**\n * Remove all expired entries.\n */\n prune(): number {\n const now = Date.now();\n let pruned = 0;\n\n for (const [key, entry] of this.cache) {\n if (now - entry.createdAt > entry.ttl) {\n this.cache.delete(key);\n pruned++;\n }\n }\n\n return pruned;\n }\n\n /**\n * Get cache statistics.\n */\n getStats(): CacheStats {\n return {\n hits: this.hits,\n misses: this.misses,\n size: this.cache.size,\n maxSize: this.maxSize,\n };\n }\n\n /**\n * Get hit rate as a percentage.\n */\n getHitRate(): number {\n const total = this.hits + this.misses;\n if (total === 0) return 0;\n return (this.hits / total) * 100;\n }\n}\n\n// ============================================\n// Global Cache Instance\n// ============================================\n\nlet globalCache: ResponseCache | null = null;\n\n/**\n * Get the global response cache instance.\n * Creates one if it doesn't exist.\n */\nexport function getGlobalCache(): ResponseCache {\n if (!globalCache) {\n globalCache = new ResponseCache();\n }\n return globalCache;\n}\n\n/**\n * Configure the global cache with custom settings.\n */\nexport function configureGlobalCache(maxSize?: number, defaultTtl?: number): ResponseCache {\n globalCache = new ResponseCache(maxSize, defaultTtl);\n return globalCache;\n}\n\n/**\n * Clear and reset the global cache.\n */\nexport function clearGlobalCache(): void {\n if (globalCache) {\n globalCache.clear();\n }\n}\n","/**\n * Model Registry\n *\n * Supports built-in models and any HuggingFace model via hf:org/model syntax\n */\n\nimport type { ModelConfig, ModelSource } from \"./types.js\";\n\n// ============================================\n// Built-in Models (curated & tested)\n// ============================================\n\nexport const BUILTIN_MODELS: Record<string, ModelConfig> = {\n \"qwen3-0.6b\": {\n id: \"qwen3-0.6b\",\n repo: \"onnx-community/Qwen3-0.6B-ONNX\",\n description: \"Qwen3 0.6B - Best balance of speed and quality, supports thinking\",\n size: \"~400MB\",\n contextLength: 32_768,\n supportsThinking: true,\n supportsJson: true,\n family: \"qwen\",\n },\n \"qwen2.5-0.5b\": {\n id: \"qwen2.5-0.5b\",\n repo: \"onnx-community/Qwen2.5-0.5B-Instruct\",\n description: \"Qwen2.5 0.5B - Fast and capable\",\n size: \"~350MB\",\n contextLength: 32_768,\n supportsThinking: false,\n supportsJson: true,\n family: \"qwen\",\n },\n \"qwen2.5-coder-0.5b\": {\n id: \"qwen2.5-coder-0.5b\",\n repo: \"onnx-community/Qwen2.5-Coder-0.5B-Instruct\",\n description: \"Qwen2.5 Coder 0.5B - Optimized for code\",\n size: \"~400MB\",\n contextLength: 32_768,\n supportsThinking: false,\n supportsJson: true,\n family: \"qwen\",\n },\n \"smollm2-360m\": {\n id: \"smollm2-360m\",\n repo: \"HuggingFaceTB/SmolLM2-360M-Instruct\",\n description: \"SmolLM2 360M - Fast, good for simple tasks\",\n size: \"~250MB\",\n contextLength: 8192,\n supportsThinking: false,\n supportsJson: false,\n family: \"smollm\",\n },\n \"smollm2-135m\": {\n id: \"smollm2-135m\",\n repo: \"HuggingFaceTB/SmolLM2-135M-Instruct\",\n description: \"SmolLM2 135M - Fastest, basic generation\",\n size: \"~100MB\",\n contextLength: 8192,\n supportsThinking: false,\n supportsJson: false,\n family: \"smollm\",\n },\n \"phi-3-mini\": {\n id: \"phi-3-mini\",\n repo: \"microsoft/Phi-3-mini-4k-instruct-onnx\",\n description: \"Phi-3 Mini - High quality, larger model\",\n size: \"~2.1GB\",\n contextLength: 4096,\n supportsThinking: false,\n supportsJson: true,\n family: \"phi\",\n },\n \"ministral-3b\": {\n id: \"ministral-3b\",\n repo: \"mistralai/Ministral-3-3B-Instruct-2512-ONNX\",\n description: \"Ministral 3 3B - Vision + Reasoning, 256k context\",\n size: \"~2.5GB\",\n contextLength: 262_144,\n supportsThinking: true,\n supportsJson: true,\n supportsVision: true,\n visionEncoderSize: \"0.4B\",\n family: \"mistral\",\n },\n};\n\n// ============================================\n// Model Resolution\n// ============================================\n\n/**\n * Parse model identifier and resolve to source\n *\n * Supported formats:\n * - \"qwen3-0.6b\" (built-in)\n * - \"hf:org/model\" (HuggingFace shorthand)\n * - \"https://huggingface.co/org/model\" (full URL)\n * - \"file:./path/to/model\" (local path)\n */\nexport function resolveModel(modelId: string): ModelSource {\n // Built-in model\n if (BUILTIN_MODELS[modelId]) {\n return {\n type: \"builtin\",\n path: BUILTIN_MODELS[modelId].repo,\n };\n }\n\n // HuggingFace shorthand: hf:org/model\n if (modelId.startsWith(\"hf:\")) {\n const repo = modelId.slice(3);\n return {\n type: \"huggingface\",\n path: repo,\n };\n }\n\n // HuggingFace URL\n if (modelId.startsWith(\"https://huggingface.co/\")) {\n const repo = modelId.replace(\"https://huggingface.co/\", \"\");\n return {\n type: \"huggingface\",\n path: repo,\n };\n }\n\n // Local file\n if (modelId.startsWith(\"file:\")) {\n const path = modelId.slice(5);\n return {\n type: \"local\",\n path,\n };\n }\n\n // Assume it's a HuggingFace repo if it contains a slash\n if (modelId.includes(\"/\")) {\n return {\n type: \"huggingface\",\n path: modelId,\n };\n }\n\n // Unknown - treat as HuggingFace\n return {\n type: \"huggingface\",\n path: modelId,\n };\n}\n\n/**\n * Get model config (built-in only)\n */\nexport function getModelConfig(modelId: string): ModelConfig | null {\n return BUILTIN_MODELS[modelId] || null;\n}\n\n// Default context lengths by model family (when config.json is unavailable)\nconst FAMILY_CONTEXT_DEFAULTS: Record<string, number> = {\n qwen: 32_768,\n mistral: 262_144, // Ministral models support up to 256K\n llama: 8192,\n phi: 4096,\n smollm: 8192,\n other: 4096,\n};\n\n/**\n * Create model config for external model\n */\nexport function createExternalModelConfig(\n modelId: string,\n repo: string,\n contextLength?: number,\n): ModelConfig {\n // Try to infer family from repo name\n let family: ModelConfig[\"family\"] = \"other\";\n const repoLower = repo.toLowerCase();\n\n if (repoLower.includes(\"qwen\")) {\n family = \"qwen\";\n } else if (repoLower.includes(\"smollm\")) {\n family = \"smollm\";\n } else if (repoLower.includes(\"phi\")) {\n family = \"phi\";\n } else if (repoLower.includes(\"mistral\") || repoLower.includes(\"ministral\")) {\n family = \"mistral\";\n } else if (repoLower.includes(\"llama\")) {\n family = \"llama\";\n }\n\n // Detect vision models from common patterns\n const supportsVision =\n repoLower.includes(\"vision\") ||\n repoLower.includes(\"vlm\") ||\n repoLower.includes(\"image-text\") ||\n repoLower.includes(\"ministral\");\n\n return {\n id: modelId,\n repo,\n description: `External model: ${repo}`,\n size: \"Unknown\",\n contextLength: contextLength || FAMILY_CONTEXT_DEFAULTS[family] || 4096,\n supportsThinking: family === \"qwen\" || family === \"mistral\",\n supportsJson: family === \"qwen\" || family === \"phi\" || family === \"mistral\",\n supportsVision,\n family,\n };\n}\n\n/**\n * Fetch context length from HuggingFace model config\n */\nexport async function fetchModelContextLength(repo: string): Promise<number | null> {\n try {\n const res = await fetch(`https://huggingface.co/${repo}/raw/main/config.json`);\n if (!res.ok) {\n return null;\n }\n\n const config = await res.json();\n\n // Different models use different field names\n return (\n config.max_position_embeddings ||\n config.n_positions ||\n config.max_seq_len ||\n config.sliding_window || // Some models use this\n config.context_length ||\n null\n );\n } catch {\n return null;\n }\n}\n\n/**\n * List all built-in models\n */\nexport function listBuiltinModels(): ModelConfig[] {\n return Object.values(BUILTIN_MODELS);\n}\n\n/**\n * Search HuggingFace models (placeholder - would need HF API)\n */\nexport async function searchModels(query: string): Promise<ModelConfig[]> {\n // TODO: Implement HuggingFace API search\n // For now, filter built-in models\n const q = query.toLowerCase();\n return listBuiltinModels().filter(\n (m) =>\n m.id.toLowerCase().includes(q) ||\n m.description.toLowerCase().includes(q) ||\n m.family.toLowerCase().includes(q),\n );\n}\n","/**\n * Gerbil - Local GPU-accelerated LLM inference\n */\n\nimport {\n AutoModelForCausalLM,\n AutoModelForImageTextToText,\n AutoProcessor,\n AutoTokenizer,\n env,\n type FeatureExtractionPipeline,\n type PreTrainedTokenizer,\n RawImage,\n pipeline as rawPipeline,\n type TextGenerationPipeline,\n TextStreamer,\n} from \"@huggingface/transformers\";\n\n// Wrapper to avoid TypeScript complexity issues with transformers.js types\nconst pipeline = rawPipeline as (task: string, model: string, options?: any) => Promise<any>;\n\n// Suppress noisy transformers.js warnings during model loading\nfunction suppressNoisyWarnings<T>(fn: () => Promise<T>): Promise<T> {\n const originalWarn = console.warn;\n console.warn = (...args: any[]) => {\n const msg = args[0]?.toString?.() || \"\";\n // Suppress \"Unable to determine content-length\" warnings from transformers.js\n if (msg.includes(\"content-length\") || msg.includes(\"Unable to determine\")) {\n return;\n }\n originalWarn.apply(console, args);\n };\n\n return fn().finally(() => {\n console.warn = originalWarn;\n });\n}\n\nimport { generateCacheKey, getGlobalCache } from \"./cache.js\";\nimport {\n BUILTIN_MODELS,\n createExternalModelConfig,\n fetchModelContextLength,\n getModelConfig,\n resolveModel,\n} from \"./models.js\";\nimport type {\n AudioChunk,\n EmbedOptions,\n EmbedResult,\n GenerateOptions,\n GenerateResult,\n GerbilConfig,\n JsonOptions,\n LoadOptions,\n LoadSTTOptions,\n LoadTTSOptions,\n ModelConfig,\n SessionStats,\n SpeakOptions,\n SpeakResult,\n STTModelConfig,\n StreamingTranscriptionOptions,\n StreamingTranscriptionSession,\n SystemInfo,\n TranscribeOptions,\n TranscribeResult,\n VoiceInfo,\n} from \"./types.js\";\n\n// TTS types for lazy loading\ntype KokoroTTSType = import(\"./tts.js\").KokoroTTS;\ntype SupertonicTTSType = import(\"./tts.js\").SupertonicTTS;\ntype TTSBackendType = KokoroTTSType | SupertonicTTSType;\n\n// STT type for lazy loading\ntype WhisperSTTType = import(\"./stt.js\").WhisperSTT;\n\n// Default voices for listVoices() when TTS not loaded\nconst KOKORO_VOICES_DEFAULT: VoiceInfo[] = [\n {\n id: \"af_bella\",\n name: \"Bella\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, warm and friendly\",\n },\n {\n id: \"af_sarah\",\n name: \"Sarah\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, clear and professional\",\n },\n {\n id: \"af_nicole\",\n name: \"Nicole\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, soft and gentle\",\n },\n {\n id: \"af_sky\",\n name: \"Sky\",\n gender: \"female\",\n language: \"en-us\",\n description: \"American female, young and energetic\",\n },\n {\n id: \"am_adam\",\n name: \"Adam\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, deep and confident\",\n },\n {\n id: \"am_michael\",\n name: \"Michael\",\n gender: \"male\",\n language: \"en-us\",\n description: \"American male, warm and friendly\",\n },\n {\n id: \"bf_emma\",\n name: \"Emma\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, elegant and clear\",\n },\n {\n id: \"bf_isabella\",\n name: \"Isabella\",\n gender: \"female\",\n language: \"en-gb\",\n description: \"British female, sophisticated\",\n },\n {\n id: \"bm_george\",\n name: \"George\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, distinguished\",\n },\n {\n id: \"bm_lewis\",\n name: \"Lewis\",\n gender: \"male\",\n language: \"en-gb\",\n description: \"British male, friendly and warm\",\n },\n];\n\nimport { extractJson, zodToJsonSchema } from \"./utils.js\";\n\n// Configure transformers.js based on environment\nconst isBrowser = typeof window !== \"undefined\";\nenv.allowLocalModels = !isBrowser; // false in browser (fetch from HuggingFace)\nenv.useBrowserCache = isBrowser; // true in browser (cache in IndexedDB)\n\n// ============================================\n// Gerbil Class\n// ============================================\n\n// WebGPU initialization state for Node.js\nlet webgpuInitialized = false;\nlet webgpuAvailable = false;\n\n/**\n * Initialize WebGPU for Node.js environments\n * Called automatically before model loading\n */\nasync function initNodeWebGPU(): Promise<boolean> {\n if (webgpuInitialized) {\n return webgpuAvailable;\n }\n webgpuInitialized = true;\n\n // Skip if in browser (already has WebGPU)\n if (typeof window !== \"undefined\") {\n webgpuAvailable = \"gpu\" in navigator;\n return webgpuAvailable;\n }\n\n // Try to initialize WebGPU in Node.js via Dawn\n // Use Function constructor to hide import from bundlers\n try {\n const dynamicImport = new Function(\"specifier\", \"return import(specifier)\");\n const webgpuModule = await dynamicImport(\"webgpu\");\n const { create, globals } = webgpuModule;\n\n // Extend globalThis with WebGPU globals\n Object.assign(globalThis, globals);\n\n // Create navigator.gpu\n if (!(globalThis as any).navigator) {\n (globalThis as any).navigator = {};\n }\n (globalThis as any).navigator.gpu = create([]);\n\n webgpuAvailable = true;\n } catch {\n // WebGPU not available, will fall back to CPU\n webgpuAvailable = false;\n }\n\n return webgpuAvailable;\n}\n\n// ChromeGPUBackend is dynamically imported only in Node.js to avoid bundling puppeteer in browser\ntype ChromeGPUBackendType = import(\"./chrome-backend.js\").ChromeGPUBackend;\n\nexport class Gerbil {\n private generator: TextGenerationPipeline | null = null;\n private tokenizer: PreTrainedTokenizer | null = null;\n private model: any = null; // AutoModelForCausalLM instance\n private embedder: FeatureExtractionPipeline | null = null;\n private currentModel: string | null = null;\n private modelConfig: ModelConfig | null = null;\n private readonly config: GerbilConfig;\n private stats: SessionStats;\n private useDirect = false; // Use direct model loading (for WebGPU)\n private chromeBackend: ChromeGPUBackendType | null = null; // Chrome backend for Node.js WebGPU\n private _deviceMode: \"webgpu\" | \"cpu\" | \"wasm\" = \"cpu\"; // Track which backend is active\n\n // Vision model components\n private processor: any = null; // AutoProcessor for vision models\n private visionModel: any = null; // AutoModelForImageTextToText instance\n private isVisionModel = false; // Whether current model supports vision\n\n constructor(config: GerbilConfig = {}) {\n this.config = config;\n this.stats = {\n prompts: 0,\n tokensIn: 0,\n tokensOut: 0,\n avgSpeed: 0,\n totalTime: 0,\n cacheHits: 0,\n cacheMisses: 0,\n };\n }\n\n // ============================================\n // Static Methods\n // ============================================\n\n static listModels(): ModelConfig[] {\n return Object.values(BUILTIN_MODELS);\n }\n\n static getModel(modelId: string): ModelConfig | undefined {\n return BUILTIN_MODELS[modelId];\n }\n\n // ============================================\n // Model Loading\n // ============================================\n\n /**\n * Load a model\n *\n * @example\n * ```ts\n * // Built-in model\n * await g.loadModel(\"qwen3-0.6b\");\n *\n * // HuggingFace model\n * await g.loadModel(\"hf:microsoft/Phi-3-mini\");\n *\n * // Local model\n * await g.loadModel(\"file:./models/my-model\");\n *\n * // Vision model\n * await g.loadModel(\"ministral-3b\");\n * ```\n */\n async loadModel(modelId = \"qwen3-0.6b\", options: LoadOptions = {}): Promise<void> {\n // Dispose any existing model/backend before loading a new one\n // This prevents zombie Chrome pages when switching models\n if (this.isLoaded()) {\n await this.dispose();\n }\n\n // Initialize WebGPU for Node.js if needed\n await initNodeWebGPU();\n\n const source = resolveModel(modelId);\n const { onProgress, device = \"auto\", dtype: userDtype } = options;\n\n // Get or create model config\n let config = getModelConfig(modelId);\n if (!config) {\n // Try to fetch actual context length from HuggingFace config.json\n const contextLength = await fetchModelContextLength(source.path).catch(() => null);\n config = createExternalModelConfig(modelId, source.path, contextLength || undefined);\n }\n\n // Route to vision model loading if needed\n if (config.supportsVision) {\n return this.loadVisionModel(modelId, source.path, config, options);\n }\n\n onProgress?.({ status: `Loading ${modelId}...` });\n\n // Map device to transformers.js device\n // Browser supports: webgpu, wasm (no cpu)\n // Node supports: webgpu, cpu\n const isBrowser = typeof window !== \"undefined\";\n const fallbackDevice = isBrowser ? \"wasm\" : \"cpu\";\n let tfDevice: \"webgpu\" | \"wasm\" | \"cpu\" = fallbackDevice;\n if (device === \"webgpu\" || device === \"gpu\" || device === \"auto\") {\n tfDevice = \"webgpu\";\n }\n\n // Use q4f16 for WebGPU (required for Qwen3), q4 for CPU/WASM\n const dtype = userDtype ?? (tfDevice === \"webgpu\" ? \"q4f16\" : \"q4\");\n\n // Track if we're still in loading phase (to suppress progress during inference)\n let isLoading = true;\n let lastFile = \"\";\n let lastPct = -1;\n\n const progressCallback = (progress: any) => {\n if (!isLoading) {\n return; // Suppress progress after initial load\n }\n\n if (progress.status === \"progress\" && progress.file) {\n const pct = Math.round(progress.progress || 0);\n // Only report if file changed or progress increased significantly\n if (progress.file !== lastFile || pct >= lastPct + 5) {\n lastFile = progress.file;\n lastPct = pct;\n onProgress?.({\n status: `Downloading ${progress.file}`,\n progress: pct,\n file: progress.file,\n });\n }\n }\n };\n\n try {\n // Use direct model loading for browser WebGPU (like qwen-web does)\n // This bypasses pipeline() which may have different ONNX session config\n if (isBrowser && tfDevice === \"webgpu\") {\n onProgress?.({ status: \"Loading tokenizer...\" });\n this.tokenizer = (await suppressNoisyWarnings(() =>\n AutoTokenizer.from_pretrained(source.path, {\n progress_callback: progressCallback,\n }),\n )) as PreTrainedTokenizer;\n\n onProgress?.({ status: \"Loading model...\" });\n this.model = await suppressNoisyWarnings(() =>\n AutoModelForCausalLM.from_pretrained(source.path, {\n dtype,\n device: tfDevice,\n progress_callback: progressCallback,\n }),\n );\n\n this.useDirect = true;\n this._deviceMode = \"webgpu\";\n this.isVisionModel = false;\n isLoading = false;\n this.currentModel = modelId;\n this.modelConfig = config;\n onProgress?.({ status: \"Ready (WebGPU)!\" });\n } else if (!isBrowser && tfDevice === \"webgpu\") {\n // Node.js + WebGPU: Use Chrome backend for real GPU acceleration\n onProgress?.({ status: \"Starting Chrome WebGPU backend...\" });\n\n // Dynamic import to avoid bundling puppeteer in browser builds\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n this.chromeBackend = await ChromeGPUBackend.create({\n modelId: source.path,\n contextLength: config.contextLength,\n onProgress,\n });\n\n this.useDirect = false;\n this._deviceMode = \"webgpu\";\n this.isVisionModel = false;\n isLoading = false;\n this.currentModel = modelId;\n this.modelConfig = config;\n // Ready status is set by ChromeGPUBackend\n } else {\n // Use pipeline for CPU / WASM\n const pipelineOptions = {\n dtype,\n device: tfDevice,\n progress_callback: progressCallback,\n };\n this.generator = (await suppressNoisyWarnings(() =>\n pipeline(\"text-generation\", source.path, pipelineOptions as any),\n )) as TextGenerationPipeline;\n\n this.useDirect = false;\n this._deviceMode = tfDevice as \"cpu\" | \"wasm\";\n this.isVisionModel = false;\n isLoading = false;\n this.currentModel = modelId;\n this.modelConfig = config;\n onProgress?.({ status: `Ready (${tfDevice.toUpperCase()})!` });\n }\n } catch (err) {\n // Fallback to CPU/WASM if GPU fails (silently)\n if (tfDevice !== fallbackDevice) {\n onProgress?.({ status: `Using ${fallbackDevice.toUpperCase()}...` });\n\n // Clean up Chrome backend if it was partially initialized\n if (this.chromeBackend) {\n await this.chromeBackend.dispose();\n this.chromeBackend = null;\n }\n\n // Fallback always uses pipeline (WASM/CPU don't need direct loading)\n this.generator = (await suppressNoisyWarnings(() =>\n pipeline(\"text-generation\", source.path, {\n dtype: \"q4\",\n device: fallbackDevice,\n progress_callback: progressCallback,\n } as any),\n )) as TextGenerationPipeline;\n\n this.useDirect = false;\n this._deviceMode = fallbackDevice as \"cpu\" | \"wasm\";\n this.isVisionModel = false;\n isLoading = false;\n this.currentModel = modelId;\n this.modelConfig = config;\n onProgress?.({ status: `Ready (${fallbackDevice.toUpperCase()})!` });\n } else {\n throw err;\n }\n }\n }\n\n /**\n * Load a vision model (VLM)\n * Uses AutoProcessor + AutoModelForImageTextToText instead of tokenizer + causal LM\n */\n private async loadVisionModel(\n modelId: string,\n repoPath: string,\n config: ModelConfig,\n options: LoadOptions = {},\n ): Promise<void> {\n const { onProgress, device = \"auto\" } = options;\n\n onProgress?.({ status: `Loading ${modelId} (vision model)...` });\n\n const isBrowser = typeof window !== \"undefined\";\n const fallbackDevice = isBrowser ? \"wasm\" : \"cpu\";\n let tfDevice: \"webgpu\" | \"wasm\" | \"cpu\" = fallbackDevice;\n if (device === \"webgpu\" || device === \"gpu\" || device === \"auto\") {\n tfDevice = \"webgpu\";\n }\n\n // Node.js + WebGPU: Use Chrome backend for GPU acceleration\n if (!isBrowser && tfDevice === \"webgpu\") {\n onProgress?.({ status: \"Starting Chrome WebGPU backend (vision)...\" });\n\n // Dynamic import to avoid bundling puppeteer in browser builds\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n this.chromeBackend = await ChromeGPUBackend.create({\n modelId: repoPath,\n contextLength: config.contextLength,\n isVision: true, // Enable vision mode in Chrome backend\n onProgress,\n });\n\n this.useDirect = false;\n this._deviceMode = \"webgpu\";\n this.isVisionModel = true;\n this.currentModel = modelId;\n this.modelConfig = config;\n // Ready status is set by ChromeGPUBackend\n return;\n }\n\n // Browser or CPU/WASM: Load directly\n let lastFile = \"\";\n let lastPct = -1;\n\n const progressCallback = (progress: any) => {\n if (progress.status === \"progress\" && progress.file) {\n const pct = Math.round(progress.progress || 0);\n if (progress.file !== lastFile || pct >= lastPct + 5) {\n lastFile = progress.file;\n lastPct = pct;\n onProgress?.({\n status: `Downloading ${progress.file}`,\n progress: pct,\n file: progress.file,\n });\n }\n }\n };\n\n try {\n // Load processor (handles both tokenization and image preprocessing)\n onProgress?.({ status: \"Loading processor...\" });\n this.processor = await suppressNoisyWarnings(() =>\n AutoProcessor.from_pretrained(repoPath, {\n progress_callback: progressCallback,\n }),\n );\n\n // Load vision model\n onProgress?.({ status: \"Loading vision model...\" });\n this.visionModel = await suppressNoisyWarnings(() =>\n AutoModelForImageTextToText.from_pretrained(repoPath, {\n device: tfDevice,\n progress_callback: progressCallback,\n }),\n );\n\n this.isVisionModel = true;\n this.useDirect = true;\n this._deviceMode = tfDevice === \"webgpu\" ? \"webgpu\" : (tfDevice as \"cpu\" | \"wasm\");\n this.currentModel = modelId;\n this.modelConfig = config;\n onProgress?.({ status: `Ready (Vision, ${tfDevice.toUpperCase()})!` });\n } catch (err) {\n // Fallback to CPU/WASM if GPU fails\n if (tfDevice !== fallbackDevice) {\n onProgress?.({ status: `Vision model: Using ${fallbackDevice.toUpperCase()}...` });\n\n this.processor = await suppressNoisyWarnings(() =>\n AutoProcessor.from_pretrained(repoPath, {\n progress_callback: progressCallback,\n }),\n );\n\n this.visionModel = await suppressNoisyWarnings(() =>\n AutoModelForImageTextToText.from_pretrained(repoPath, {\n device: fallbackDevice,\n progress_callback: progressCallback,\n }),\n );\n\n this.isVisionModel = true;\n this.useDirect = true;\n this._deviceMode = fallbackDevice as \"cpu\" | \"wasm\";\n this.currentModel = modelId;\n this.modelConfig = config;\n onProgress?.({ status: `Ready (Vision, ${fallbackDevice.toUpperCase()})!` });\n } else {\n throw err;\n }\n }\n }\n\n /**\n * Check if a model is loaded\n */\n isLoaded(): boolean {\n return (\n this.generator !== null ||\n (this.useDirect && this.model !== null) ||\n this.chromeBackend !== null ||\n (this.isVisionModel && this.visionModel !== null)\n );\n }\n\n /**\n * Check if current model supports vision\n */\n supportsVision(): boolean {\n return this.isVisionModel && this.modelConfig?.supportsVision === true;\n }\n\n /**\n * Get current model info\n */\n getModelInfo(): ModelConfig | null {\n return this.modelConfig;\n }\n\n /**\n * Get current device mode (webgpu, cpu, or wasm)\n */\n getDeviceMode(): \"webgpu\" | \"cpu\" | \"wasm\" {\n return this._deviceMode;\n }\n\n /**\n * Get dtype used for current model\n */\n getDtype(): string {\n // WebGPU uses q4f16, CPU/WASM use q4\n return this._deviceMode === \"webgpu\" ? \"q4f16\" : \"q4\";\n }\n\n /**\n * Get response cache statistics\n */\n getResponseCacheStats(): { hits: number; misses: number; size: number; hitRate: number } {\n const cache = getGlobalCache();\n const stats = cache.getStats();\n return {\n hits: stats.hits,\n misses: stats.misses,\n size: stats.size,\n hitRate: cache.getHitRate(),\n };\n }\n\n /**\n * Clear the response cache (for cached generate() results)\n */\n clearResponseCache(): void {\n getGlobalCache().clear();\n }\n\n /**\n * Get Chrome backend status (if using WebGPU via Chrome)\n */\n getChromeStatus(): {\n pid: number | null;\n port: number;\n modelId: string;\n startedAt: Date | null;\n } | null {\n if (!this.chromeBackend) {\n return null;\n }\n return this.chromeBackend.getStatus();\n }\n\n /**\n * Get Chrome memory usage (if using WebGPU via Chrome)\n * Returns JS heap memory in bytes\n */\n async getChromeMemory(): Promise<{ jsHeapUsed: number; jsHeapTotal: number } | null> {\n if (!this.chromeBackend) {\n return null;\n }\n return this.chromeBackend.getMemoryUsage();\n }\n\n /**\n * Get memory usage in GB (if using WebGPU via Chrome)\n */\n async getMemoryUsage(): Promise<{ usedGB: number; totalGB: number; usedPercent: number } | null> {\n if (!this.chromeBackend) {\n return null;\n }\n return this.chromeBackend.getMemoryStats();\n }\n\n /**\n * Clear KV cache to free memory\n * This will reset the conversation context but free up memory\n */\n async clearCache(): Promise<void> {\n if (this.chromeBackend) {\n await this.chromeBackend.reset();\n }\n }\n\n /**\n * Check memory usage and cleanup if needed\n * @param thresholdGB Memory threshold in GB (default: 8)\n * @returns true if cleanup was performed\n */\n async checkMemoryAndCleanup(thresholdGB = 8): Promise<boolean> {\n if (!this.chromeBackend) {\n return false;\n }\n return this.chromeBackend.checkMemoryAndCleanup(thresholdGB);\n }\n\n // ============================================\n // Text Generation\n // ============================================\n\n /**\n * Generate text (automatically routes to vision generation if images provided)\n *\n * @example\n * ```ts\n * // Text generation\n * const result = await g.generate(\"Hello!\");\n *\n * // Vision generation (with vision model)\n * const result = await g.generate(\"What's in this image?\", {\n * images: [{ source: \"https://example.com/cat.jpg\" }]\n * });\n * ```\n */\n async generate(prompt: string, options: GenerateOptions = {}): Promise<GenerateResult> {\n if (!this.isLoaded()) {\n // Auto-load default model\n await this.loadModel(this.config.model || \"qwen3-0.6b\");\n }\n\n const { images } = options;\n\n // Route to local vision generation if:\n // 1. Images provided\n // 2. Model supports vision\n // 3. NOT using Chrome backend (Chrome backend handles vision internally)\n if (images?.length && this.isVisionModel && !this.chromeBackend) {\n return this.generateWithVision(prompt, options);\n }\n\n // Warn if images provided but model doesn't support vision\n if (images?.length && !this.isVisionModel) {\n }\n\n const {\n maxTokens = 256,\n temperature = 0.7,\n topP = 0.9,\n topK = 50,\n thinking = false,\n system,\n cache = false,\n cacheTtl,\n } = options;\n\n // Check cache if enabled (skip for streaming/vision)\n if (cache && !options.onToken && !images?.length) {\n const cacheKey = generateCacheKey(prompt, this.currentModel || \"\", {\n maxTokens,\n temperature,\n topP,\n topK,\n system,\n thinking,\n });\n const cached = getGlobalCache().get(cacheKey);\n if (cached) {\n return cached;\n }\n }\n\n const startTime = performance.now();\n\n try {\n let rawText = \"\";\n\n if (this.chromeBackend) {\n // Chrome backend approach (for Node.js WebGPU via Chrome)\n try {\n rawText = await this.chromeBackend.generate(prompt, {\n maxTokens,\n temperature,\n topP,\n topK,\n thinking,\n system,\n // Pass images for vision models\n images: images?.map((img) => img.source),\n // Wrap onToken to match Gerbil's simpler signature\n onToken: options.onToken ? (t) => options.onToken?.(t.text) : undefined,\n });\n } catch (chromeErr: any) {\n // If Chrome died (OOM, crash), fall back to CPU silently\n if (chromeErr?.message === \"CHROME_BACKEND_DEAD\" || !this.chromeBackend?.isAlive()) {\n await this.chromeBackend?.dispose().catch(() => {});\n this.chromeBackend = null;\n this._deviceMode = \"cpu\";\n // Load CPU fallback and retry\n const modelPath = this.currentModel || \"qwen3-0.6b\";\n this.generator = (await pipeline(\"text-generation\", modelPath, {\n dtype: \"q4\",\n device: \"cpu\",\n } as any)) as TextGenerationPipeline;\n // Retry with CPU\n return this.generate(prompt, options);\n }\n throw chromeErr;\n }\n } else if (this.useDirect && this.model && this.tokenizer) {\n // Direct model approach (for browser WebGPU)\n const messages = this.buildMessages(prompt, { ...options, thinking });\n\n const inputs = (this.tokenizer as any).apply_chat_template(messages, {\n add_generation_prompt: true,\n return_dict: true,\n enable_thinking: thinking, // Qwen3 thinking mode\n });\n\n const output = await this.model.generate({\n ...inputs,\n max_new_tokens: maxTokens,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n do_sample: temperature > 0,\n });\n\n // Get input length to extract only generated tokens\n const inputLength = inputs.input_ids.dims?.[1] || inputs.input_ids.data?.length || 0;\n\n // Slice output tensor to get only new tokens (skip prompt)\n const outputTokens = output.slice(null, [inputLength, null]);\n const decoded = this.tokenizer.batch_decode(outputTokens, {\n skip_special_tokens: true,\n });\n\n rawText = decoded[0] || \"\";\n\n // If we still have prompt artifacts, extract assistant response\n if (rawText.toLowerCase().includes(\"assistant\")) {\n const match = rawText.match(/assistant[:\\s]*([\\s\\S]*)/i);\n if (match) {\n rawText = match[1].trim();\n }\n }\n } else if (this.generator) {\n // Pipeline approach (for Node.js / CPU / WASM)\n const formattedPrompt = this.formatPrompt(prompt, { ...options, thinking });\n\n const output = await this.generator(formattedPrompt, {\n max_new_tokens: maxTokens,\n temperature,\n top_p: topP,\n top_k: topK,\n do_sample: temperature > 0,\n return_full_text: false,\n });\n\n // Extract text from pipeline output\n if (Array.isArray(output) && output[0]) {\n const result = output[0] as any;\n if (Array.isArray(result.generated_text)) {\n const last = result.generated_text.at(-1);\n rawText = last?.content || \"\";\n } else {\n rawText = result.generated_text || \"\";\n }\n }\n } else {\n throw new Error(\"No model loaded\");\n }\n\n const endTime = performance.now();\n const totalTime = endTime - startTime;\n\n rawText = this.cleanOutput(rawText);\n\n // Always parse thinking to strip <think> tags from output\n // (model may generate them even without thinking mode enabled)\n const { thinking: thinkingText, response } = this.parseThinking(rawText);\n\n // Only include thinking in result if mode was enabled\n const finalThinking = thinking ? thinkingText : undefined;\n\n const tokensGenerated = Math.ceil(response.length / 4);\n\n // Update stats\n this.stats.prompts += 1;\n this.stats.tokensOut += tokensGenerated;\n this.stats.totalTime += totalTime;\n this.stats.avgSpeed = (this.stats.tokensOut / this.stats.totalTime) * 1000;\n\n const result: GenerateResult = {\n text: response,\n thinking: finalThinking,\n tokensGenerated,\n tokensPerSecond: (tokensGenerated / totalTime) * 1000,\n totalTime,\n finishReason: \"stop\",\n provider: \"local\",\n cached: false,\n };\n\n // Store in cache if enabled\n if (cache && !options.onToken && !images?.length) {\n const cacheKey = generateCacheKey(prompt, this.currentModel || \"\", {\n maxTokens,\n temperature,\n topP,\n topK,\n system,\n thinking,\n });\n getGlobalCache().set(cacheKey, result, cacheTtl);\n }\n\n return result;\n } catch (_error) {\n return {\n text: \"\",\n tokensGenerated: 0,\n tokensPerSecond: 0,\n totalTime: performance.now() - startTime,\n finishReason: \"error\",\n provider: \"local\",\n cached: false,\n };\n }\n }\n\n /**\n * Stream text generation (simulated token-by-token)\n *\n * Note: Yields the raw output including <think> tags if thinking mode is enabled.\n * The final result has parsed thinking separated out.\n */\n async *stream(\n prompt: string,\n options: GenerateOptions = {},\n ): AsyncGenerator<string, GenerateResult, unknown> {\n if (!this.isLoaded()) {\n await this.loadModel(this.config.model || \"qwen3-0.6b\");\n }\n\n const startTime = performance.now();\n\n // For Chrome backend, use real streaming via onToken callback\n if (this.chromeBackend) {\n let fullText = \"\";\n const tokenQueue: string[] = [];\n let resolveNext: ((value: string | null) => void) | null = null;\n let done = false;\n\n // Start generation with streaming callback\n const generatePromise = this.chromeBackend\n .generate(prompt, {\n ...options,\n // Convert ImageInput[] to string[] for Chrome backend\n images: options.images?.map((img) => img.source),\n onToken: (token) => {\n fullText += token.text;\n if (resolveNext) {\n resolveNext(token.text);\n resolveNext = null;\n } else {\n tokenQueue.push(token.text);\n }\n },\n })\n .then(() => {\n done = true;\n if (resolveNext) {\n resolveNext(null);\n }\n })\n .catch((err) => {\n done = true;\n if (resolveNext) {\n resolveNext(null);\n }\n throw err;\n });\n\n // Yield tokens as they arrive\n while (!done || tokenQueue.length > 0) {\n if (tokenQueue.length > 0) {\n const token = tokenQueue.shift()!;\n yield token;\n options.onToken?.(token);\n } else if (!done) {\n const token = await new Promise<string | null>((resolve) => {\n resolveNext = resolve;\n });\n if (token) {\n yield token;\n options.onToken?.(token);\n }\n }\n }\n\n await generatePromise;\n\n const { thinking: thinkingText, response } = this.parseThinking(fullText);\n const tokensGenerated = Math.ceil(response.length / 4);\n const totalTime = performance.now() - startTime;\n\n return {\n text: response,\n thinking: options.thinking ? thinkingText : undefined,\n tokensGenerated,\n totalTime,\n tokensPerSecond: (tokensGenerated / totalTime) * 1000,\n finishReason: \"stop\" as const,\n };\n }\n\n // For pipeline/direct model, use fake streaming (generate then yield)\n const result = await this.generateRaw(prompt, options);\n\n // Yield word by word for more accurate token simulation\n // (actual tokens average ~4 chars, words are a reasonable approximation)\n const words = result.rawText.split(/(\\s+)/);\n for (const word of words) {\n if (word) {\n yield word;\n options.onToken?.(word);\n }\n }\n\n return result.result;\n }\n\n /**\n * Internal: Generate with raw text access for streaming\n */\n private async generateRaw(\n prompt: string,\n options: GenerateOptions = {},\n ): Promise<{ rawText: string; result: GenerateResult }> {\n const { maxTokens = 256, temperature = 0.7, topP = 0.9, topK = 50, thinking = false } = options;\n\n const startTime = performance.now();\n const formattedPrompt = this.formatPrompt(prompt, { ...options, thinking });\n\n try {\n const output = await this.generator?.(formattedPrompt, {\n max_new_tokens: maxTokens,\n temperature,\n top_p: topP,\n top_k: topK,\n do_sample: temperature > 0,\n return_full_text: false,\n });\n\n const endTime = performance.now();\n const totalTime = endTime - startTime;\n\n // Extract text from output\n let rawText = \"\";\n if (Array.isArray(output) && output[0]) {\n const result = output[0] as any;\n if (Array.isArray(result.generated_text)) {\n const last = result.generated_text.at(-1);\n rawText = last?.content || \"\";\n } else {\n rawText = result.generated_text || \"\";\n }\n }\n\n rawText = this.cleanOutput(rawText);\n const { thinking: thinkingText, response } = this.parseThinking(rawText);\n const finalThinking = thinking ? thinkingText : undefined;\n const tokensGenerated = Math.ceil(response.length / 4);\n\n // Update stats\n this.stats.prompts += 1;\n this.stats.tokensOut += tokensGenerated;\n this.stats.totalTime += totalTime;\n this.stats.avgSpeed = (this.stats.tokensOut / this.stats.totalTime) * 1000;\n\n return {\n rawText,\n result: {\n text: response,\n thinking: finalThinking,\n tokensGenerated,\n tokensPerSecond: (tokensGenerated / totalTime) * 1000,\n totalTime,\n finishReason: \"stop\",\n provider: \"local\",\n cached: false,\n },\n };\n } catch (_error) {\n return {\n rawText: \"\",\n result: {\n text: \"\",\n tokensGenerated: 0,\n tokensPerSecond: 0,\n totalTime: performance.now() - startTime,\n finishReason: \"error\",\n provider: \"local\",\n cached: false,\n },\n };\n }\n }\n\n // ============================================\n // Vision Generation\n // ============================================\n\n /**\n * Generate text from images using a vision model\n * Called automatically by generate() when images are provided\n */\n private async generateWithVision(\n prompt: string,\n options: GenerateOptions,\n ): Promise<GenerateResult> {\n if (!(this.processor && this.visionModel)) {\n throw new Error(\"Vision model not loaded. Load a vision model first.\");\n }\n\n const {\n images = [],\n maxTokens = 2048,\n temperature = 0.7,\n topP = 0.9,\n topK = 20,\n system,\n } = options;\n\n const startTime = performance.now();\n\n try {\n // Build message content with images and text\n const content: Array<{ type: string; text?: string }> = [];\n\n // Add image placeholders (the actual images are passed separately)\n for (let i = 0; i < images.length; i += 1) {\n content.push({ type: \"image\" });\n }\n\n // Add text prompt\n content.push({ type: \"text\", text: prompt });\n\n const messages = [\n ...(system ? [{ role: \"system\", content: system }] : []),\n { role: \"user\", content },\n ];\n\n // Apply chat template\n const chatPrompt = this.processor.apply_chat_template(messages);\n\n // Load images using RawImage\n const loadedImages = await Promise.all(\n images.map(async (img) => await RawImage.fromURL(img.source)),\n );\n\n // Process inputs (image + text)\n const inputs = await this.processor(\n loadedImages.length === 1 ? loadedImages[0] : loadedImages,\n chatPrompt,\n { add_special_tokens: false },\n );\n\n // Set up streaming if callback provided\n let fullText = \"\";\n const streamer = options.onToken\n ? new TextStreamer(this.processor.tokenizer, {\n skip_prompt: true,\n skip_special_tokens: true,\n callback_function: (text: string) => {\n fullText += text;\n options.onToken?.(text);\n },\n })\n : undefined;\n\n // Generate\n const outputs = await this.visionModel.generate({\n ...inputs,\n max_new_tokens: maxTokens,\n temperature: temperature > 0 ? temperature : undefined,\n top_p: topP,\n top_k: topK,\n do_sample: temperature > 0,\n ...(streamer ? { streamer } : {}),\n });\n\n // Decode output (skip the prompt tokens)\n const inputLength = inputs.input_ids.dims?.at(-1) || 0;\n const decoded = this.processor.batch_decode(outputs.slice(null, [inputLength, null]), {\n skip_special_tokens: true,\n });\n\n const text = decoded[0] || fullText || \"\";\n const totalTime = performance.now() - startTime;\n const tokensGenerated = Math.ceil(text.length / 4);\n\n // Update stats\n this.stats.prompts += 1;\n this.stats.tokensOut += tokensGenerated;\n this.stats.totalTime += totalTime;\n this.stats.avgSpeed = (this.stats.tokensOut / this.stats.totalTime) * 1000;\n\n return {\n text: this.cleanOutput(text),\n tokensGenerated,\n tokensPerSecond: (tokensGenerated / totalTime) * 1000,\n totalTime,\n finishReason: \"stop\",\n provider: \"local\",\n cached: false,\n };\n } catch (_error) {\n return {\n text: \"\",\n tokensGenerated: 0,\n tokensPerSecond: 0,\n totalTime: performance.now() - startTime,\n finishReason: \"error\",\n provider: \"local\",\n cached: false,\n };\n }\n }\n\n // ============================================\n // Structured Output (JSON)\n // ============================================\n\n /**\n * Generate structured JSON output\n */\n async json<T>(prompt: string, options: JsonOptions<T>): Promise<T> {\n const { schema, retries = 3, temperature = 0.3 } = options;\n\n const systemPrompt = `You are a JSON generator. You MUST respond with valid JSON only.\nNo explanations, no markdown, no code blocks. Just pure JSON.\nThe JSON must conform to this schema: ${JSON.stringify(zodToJsonSchema(schema))}`;\n\n for (let attempt = 0; attempt < retries; attempt += 1) {\n const result = await this.generate(prompt, {\n system: options.system || systemPrompt,\n temperature,\n maxTokens: 1000,\n });\n\n try {\n // Try to extract JSON from response\n const jsonStr = extractJson(result.text);\n const parsed = JSON.parse(jsonStr);\n const validated = schema.parse(parsed);\n return validated;\n } catch (error) {\n if (attempt === retries - 1) {\n throw new Error(`Failed to generate valid JSON after ${retries} attempts: ${error}`);\n }\n }\n }\n\n throw new Error(\"Failed to generate valid JSON\");\n }\n\n // ============================================\n // Embeddings\n // ============================================\n\n /**\n * Generate embeddings\n */\n async embed(text: string, options: EmbedOptions = {}): Promise<EmbedResult> {\n if (!this.embedder) {\n // Load embedding model\n const model = options.model || \"Xenova/all-MiniLM-L6-v2\";\n this.embedder = (await pipeline(\"feature-extraction\", model)) as FeatureExtractionPipeline;\n }\n\n const startTime = performance.now();\n const output = await this.embedder(text, {\n pooling: \"mean\",\n normalize: options.normalize !== false,\n });\n\n const vector = Array.from(output.data as Float32Array);\n\n return {\n vector,\n text,\n totalTime: performance.now() - startTime,\n };\n }\n\n /**\n * Generate embeddings for multiple texts\n */\n async embedBatch(texts: string[], options: EmbedOptions = {}): Promise<EmbedResult[]> {\n const results: EmbedResult[] = [];\n for (const text of texts) {\n results.push(await this.embed(text, options));\n }\n return results;\n }\n\n // ============================================\n // Stats & Info\n // ============================================\n\n /**\n * Get session stats\n */\n getStats(): SessionStats {\n return { ...this.stats };\n }\n\n /**\n * Get system info\n */\n getInfo(): SystemInfo {\n return {\n version: \"1.0.0\",\n model: this.modelConfig,\n device: {\n backend: \"transformers.js\",\n gpu: null, // TODO: detect GPU\n vram: null,\n status: this.isLoaded() ? \"ready\" : \"loading\",\n },\n context: {\n max: this.modelConfig?.contextLength || 0,\n used: 0,\n available: this.modelConfig?.contextLength || 0,\n },\n cache: {\n location: \"~/.gerbil/models\",\n size: \"0 MB\",\n modelCount: 0,\n },\n };\n }\n\n /**\n * Reset stats\n */\n resetStats(): void {\n this.stats = {\n prompts: 0,\n tokensIn: 0,\n tokensOut: 0,\n avgSpeed: 0,\n totalTime: 0,\n cacheHits: 0,\n cacheMisses: 0,\n };\n }\n\n // ============================================\n // Text-to-Speech (TTS)\n // ============================================\n\n private tts: TTSBackendType | null = null;\n private ttsModelId: string = \"kokoro-82m\";\n\n /**\n * Load TTS model for text-to-speech synthesis\n *\n * @example\n * ```ts\n * // Load default (Kokoro)\n * await g.loadTTS({ onProgress: (p) => console.log(p.status) });\n *\n * // Load Supertonic (faster, 44kHz output)\n * await g.loadTTS({ model: \"supertonic-66m\" });\n *\n * const result = await g.speak(\"Hello world\");\n * // result.audio = Float32Array, result.sampleRate = 24000 or 44100\n * ```\n */\n async loadTTS(options: LoadTTSOptions & { model?: string } = {}): Promise<void> {\n const modelId = options.model || \"kokoro-82m\";\n\n // If switching models, dispose the old one\n if (this.tts && this.ttsModelId !== modelId) {\n await this.tts.dispose();\n this.tts = null;\n }\n\n if (this.tts?.isLoaded()) {\n return;\n }\n\n this.ttsModelId = modelId;\n\n // Dynamic import to avoid bundling TTS code when not used\n const { createTTS } = await import(\"./tts.js\");\n\n if (!this.tts) {\n this.tts = createTTS(modelId);\n }\n\n await this.tts.load(options);\n }\n\n /**\n * Ensure TTS model is loaded (lazy loading)\n */\n async ensureTTSLoaded(options?: LoadTTSOptions): Promise<void> {\n if (!this.tts?.isLoaded()) {\n await this.loadTTS(options);\n }\n }\n\n /**\n * Generate speech from text\n *\n * @example\n * ```ts\n * const result = await g.speak(\"Hello world\", { voice: \"af_bella\" });\n * // result.audio = Float32Array PCM\n * // result.sampleRate = 24000\n * // result.duration = seconds\n * ```\n */\n async speak(text: string, options: SpeakOptions = {}): Promise<SpeakResult> {\n await this.ensureTTSLoaded({ onProgress: options.onProgress });\n return this.tts!.speak(text, options);\n }\n\n /**\n * Stream speech generation (yields audio chunks as they're generated)\n *\n * @example\n * ```ts\n * for await (const chunk of g.speakStream(\"Long text...\")) {\n * // chunk.samples = Float32Array\n * // chunk.isFinal = boolean\n * playChunk(chunk);\n * }\n * ```\n */\n async *speakStream(\n text: string,\n options: SpeakOptions = {},\n ): AsyncGenerator<AudioChunk, SpeakResult, unknown> {\n await this.ensureTTSLoaded({ onProgress: options.onProgress });\n return yield* this.tts!.speakStream(text, options);\n }\n\n /**\n * Get list of available TTS voices\n */\n listVoices(): VoiceInfo[] {\n if (!this.tts) {\n // Return default voices from static import\n return KOKORO_VOICES_DEFAULT;\n }\n return this.tts.listVoices();\n }\n\n /**\n * Check if TTS model is loaded\n */\n isTTSLoaded(): boolean {\n return this.tts?.isLoaded() ?? false;\n }\n\n /**\n * Get current TTS model info\n */\n getTTSModelInfo(): { id: string; loaded: boolean; device?: \"webgpu\" | \"cpu\" } | null {\n if (!this.tts) {\n return null;\n }\n return {\n id: this.ttsModelId,\n loaded: this.tts.isLoaded(),\n device: this.tts.isLoaded() ? this.tts.getDeviceMode() : undefined,\n };\n }\n\n /**\n * List available TTS models\n */\n async listTTSModels(): Promise<\n Array<{ id: string; description: string; sampleRate: number; voiceCount: number }>\n > {\n const { TTS_MODELS } = await import(\"./tts.js\");\n return Object.values(TTS_MODELS).map((m) => ({\n id: m.id,\n description: m.description,\n sampleRate: m.sampleRate,\n voiceCount: m.voices.length,\n }));\n }\n\n // ============================================\n // Speech-to-Text (STT)\n // ============================================\n\n private stt: WhisperSTTType | null = null;\n\n /**\n * Load STT model for speech-to-text transcription\n *\n * @example\n * ```ts\n * await g.loadSTT({\n * onProgress: (p) => console.log(p.status)\n * });\n *\n * const result = await g.transcribe(audioData);\n * console.log(result.text);\n * ```\n */\n async loadSTT(modelId?: string, options: LoadSTTOptions = {}): Promise<void> {\n if (this.stt?.isLoaded()) {\n return;\n }\n\n // Dynamic import to avoid bundling STT code when not used\n const { WhisperSTT } = await import(\"./stt.js\");\n\n if (!this.stt) {\n this.stt = new WhisperSTT(modelId);\n }\n\n await this.stt.load(options);\n }\n\n /**\n * Ensure STT model is loaded (lazy loading)\n */\n public async ensureSTTLoaded(modelId?: string, options?: LoadSTTOptions): Promise<void> {\n if (!this.stt?.isLoaded()) {\n await this.loadSTT(modelId, options);\n }\n }\n\n /**\n * Transcribe audio to text\n *\n * @param audio - Audio data as Float32Array (16kHz mono) or Uint8Array (WAV file)\n * @param options - Transcription options\n *\n * @example\n * ```ts\n * // From Float32Array (16kHz mono)\n * const result = await g.transcribe(audioData);\n * console.log(result.text);\n *\n * // With timestamps\n * const result = await g.transcribe(audioData, { timestamps: true });\n * for (const seg of result.segments) {\n * console.log(`[${seg.start}s] ${seg.text}`);\n * }\n *\n * // From WAV file\n * const wavData = fs.readFileSync(\"audio.wav\");\n * const result = await g.transcribe(new Uint8Array(wavData));\n * ```\n */\n async transcribe(\n audio: Float32Array | Uint8Array,\n options: TranscribeOptions = {},\n ): Promise<TranscribeResult> {\n await this.ensureSTTLoaded(undefined, { onProgress: options.onProgress });\n return this.stt!.transcribe(audio, options);\n }\n\n /**\n * Create a streaming transcription session\n *\n * Transcribes audio in real-time by processing chunks at regular intervals.\n * Perfect for live captioning, call transcription, or real-time subtitles.\n *\n * @param options - Streaming options\n * @returns Streaming session controller\n *\n * @example\n * ```ts\n * const session = await g.createStreamingTranscription({\n * chunkDuration: 3000, // Transcribe every 3 seconds\n * onChunk: (text, idx) => console.log(`Chunk ${idx}: ${text}`),\n * onTranscript: (fullText) => console.log(\"Full:\", fullText),\n * });\n *\n * // Feed audio data as it comes in\n * session.feedAudio(audioChunk);\n *\n * // Start automatic interval-based transcription\n * session.start();\n *\n * // Later, stop and get final transcript\n * const finalText = await session.stop();\n * ```\n */\n async createStreamingTranscription(\n options: StreamingTranscriptionOptions = {},\n ): Promise<StreamingTranscriptionSession> {\n await this.ensureSTTLoaded();\n return this.stt!.createStreamingSession(options);\n }\n\n /**\n * Get list of available STT models\n */\n async listSTTModels(): Promise<STTModelConfig[]> {\n // Dynamic import to avoid bundling STT code when not used\n const { WhisperSTT } = await import(\"./stt.js\");\n return WhisperSTT.listModels();\n }\n\n /**\n * Check if STT model is loaded\n */\n isSTTLoaded(): boolean {\n return this.stt?.isLoaded() ?? false;\n }\n\n /**\n * Get current STT model info\n */\n getSTTModelInfo(): { id: string; loaded: boolean; device?: \"webgpu\" | \"cpu\" } | null {\n if (!this.stt) {\n return null;\n }\n return {\n id: this.stt.getModelInfo().id,\n loaded: this.stt.isLoaded(),\n device: this.stt.isLoaded() ? this.stt.getDeviceMode() : undefined,\n };\n }\n\n // ============================================\n // Microphone Input\n // ============================================\n\n /**\n * Record audio from microphone and transcribe\n *\n * @example\n * ```ts\n * // Record for 5 seconds and transcribe\n * const result = await g.listen(5000);\n * console.log(result.text);\n *\n * // Use with voice chat\n * const userInput = await g.listen(10000);\n * const response = await g.generate(userInput.text);\n * await g.speak(response.text);\n * ```\n */\n async listen(\n durationMs: number = 5000,\n options: { onProgress?: (status: string) => void } = {},\n ): Promise<TranscribeResult> {\n // Dynamic import for microphone (avoids bundling when not used)\n const { Microphone, isSoxAvailable } = await import(\"./microphone.js\");\n\n if (!isSoxAvailable()) {\n throw new Error(\n \"Microphone recording requires SoX. Install with:\\n\" +\n \" macOS: brew install sox\\n\" +\n \" Ubuntu: sudo apt install sox\\n\" +\n \" Windows: https://sox.sourceforge.net/\",\n );\n }\n\n options.onProgress?.(\"Starting microphone...\");\n\n const mic = new Microphone({ sampleRate: 16000 });\n await mic.start();\n\n options.onProgress?.(`Recording for ${(durationMs / 1000).toFixed(1)}s...`);\n\n // Wait for the specified duration\n await new Promise((r) => setTimeout(r, durationMs));\n\n options.onProgress?.(\"Processing audio...\");\n const { audio } = await mic.stop();\n\n options.onProgress?.(\"Transcribing...\");\n return this.transcribe(audio, {\n onProgress: (p) => options.onProgress?.(p.status || \"Transcribing...\"),\n });\n }\n\n /**\n * Check if microphone recording is available\n */\n async isMicrophoneAvailable(): Promise<boolean> {\n try {\n const { isSoxAvailable } = await import(\"./microphone.js\");\n return isSoxAvailable();\n } catch {\n return false;\n }\n }\n\n // ============================================\n // Cleanup\n // ============================================\n\n /**\n * Dispose of resources\n * @param disconnect If true, also disconnect from shared browser (for clean script exit)\n */\n async dispose(disconnect = false): Promise<void> {\n // Clean up Chrome backend first (most important to release resources)\n if (this.chromeBackend) {\n try {\n await this.chromeBackend.dispose(disconnect);\n } catch {\n // Ignore errors during cleanup\n }\n this.chromeBackend = null;\n }\n\n if (this.generator) {\n if (typeof (this.generator as any).dispose === \"function\") {\n try {\n await (this.generator as any).dispose();\n } catch {\n // Ignore errors during cleanup\n }\n }\n this.generator = null;\n }\n if (this.embedder) {\n if (typeof (this.embedder as any).dispose === \"function\") {\n try {\n await (this.embedder as any).dispose();\n } catch {\n // Ignore errors during cleanup\n }\n }\n this.embedder = null;\n }\n\n // Clean up vision model resources\n if (this.visionModel) {\n if (typeof this.visionModel.dispose === \"function\") {\n try {\n await this.visionModel.dispose();\n } catch {\n // Ignore errors during cleanup\n }\n }\n this.visionModel = null;\n }\n if (this.processor) {\n this.processor = null;\n }\n\n // Clean up TTS resources\n if (this.tts) {\n try {\n await this.tts.dispose();\n } catch {\n // Ignore errors during cleanup\n }\n this.tts = null;\n }\n\n // Clean up STT resources\n if (this.stt) {\n try {\n this.stt.dispose();\n } catch {\n // Ignore errors during cleanup\n }\n this.stt = null;\n }\n\n this.currentModel = null;\n this.modelConfig = null;\n this.isVisionModel = false;\n }\n\n /**\n * Shutdown the shared Chrome backend completely.\n * Call this when your script/process is done to ensure proper cleanup.\n * This closes the shared browser used for WebGPU acceleration.\n */\n static async shutdown(): Promise<void> {\n // Dynamic import to match how ChromeGPUBackend is loaded\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n await ChromeGPUBackend.closeSharedBrowser();\n }\n\n /**\n * Get global WebGPU process info (all active backends)\n * Useful for monitoring and debugging memory leaks\n */\n static async getWebGPUProcesses(): Promise<{\n browser: {\n running: boolean;\n pid: number | null;\n port: number;\n activePagesCount: number;\n maxPages: number;\n };\n backends: Array<{\n modelId: string;\n isVision: boolean;\n isReady: boolean;\n memory: { usedGB: number; totalGB: number; usedPercent: number } | null;\n }>;\n } | null> {\n // Not available in browser\n if (typeof window !== \"undefined\") {\n return null;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n const browser = ChromeGPUBackend.getGlobalBrowserStatus();\n const backends = await ChromeGPUBackend.getAllBackendsInfo();\n\n return { browser, backends };\n } catch {\n return null;\n }\n }\n\n /**\n * Kill all WebGPU processes (for zombie cleanup)\n * Use this if you suspect memory leaks from undisposed Gerbil instances\n */\n static async killAllWebGPU(): Promise<{ pagesKilled: number; browserKilled: boolean } | null> {\n // Not available in browser\n if (typeof window !== \"undefined\") {\n return null;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n return await ChromeGPUBackend.killAllBackends();\n } catch {\n return null;\n }\n }\n\n /**\n * Kill a specific WebGPU backend by index\n * @param index Index of the backend to kill (0-based)\n */\n static async killWebGPUBackend(index: number): Promise<boolean> {\n // Not available in browser\n if (typeof window !== \"undefined\") {\n return false;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n return await ChromeGPUBackend.killBackendByIndex(index);\n } catch {\n return false;\n }\n }\n\n /**\n * Get all Chrome pages across ALL Gerbil processes\n * This provides cross-process visibility into WebGPU backends\n */\n static async getAllChromePagesInfo(): Promise<Array<{\n url: string;\n title: string;\n isOurs: boolean;\n modelId: string | null;\n memory: { usedGB: number; totalGB: number } | null;\n }> | null> {\n if (typeof window !== \"undefined\") {\n return null;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n return await ChromeGPUBackend.getAllChromePages();\n } catch {\n return null;\n }\n }\n\n /**\n * Kill a Chrome page by index (works across processes)\n * @param index Index of the page to kill (0-based)\n */\n static async killChromePage(index: number): Promise<boolean> {\n if (typeof window !== \"undefined\") {\n return false;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n return await ChromeGPUBackend.killPageByIndex(index);\n } catch {\n return false;\n }\n }\n\n /**\n * Get total Chrome page count (all processes)\n */\n static async getTotalChromePageCount(): Promise<number> {\n if (typeof window !== \"undefined\") {\n return 0;\n }\n\n try {\n const { ChromeGPUBackend } = await import(\"./chrome-backend.js\");\n return await ChromeGPUBackend.getTotalPageCount();\n } catch {\n return 0;\n }\n }\n\n // ============================================\n // Private Methods\n // ============================================\n\n private formatPrompt(prompt: string, options: GenerateOptions): string {\n const system = options.system || \"You are a helpful assistant.\";\n const isQwen = this.currentModel?.includes(\"qwen\");\n\n if (options.thinking && this.modelConfig?.supportsThinking) {\n const thinkSystem = `${system}\\n\\nThink step-by-step before answering. Wrap your reasoning in <think></think> tags, then provide your answer.`;\n return `<|im_start|>system\\n${thinkSystem}<|im_end|>\\n<|im_start|>user\\n${prompt}<|im_end|>\\n<|im_start|>assistant\\n`;\n }\n\n if (isQwen) {\n return `<|im_start|>system\\n${system}<|im_end|>\\n<|im_start|>user\\n${prompt} /no_think<|im_end|>\\n<|im_start|>assistant\\n`;\n }\n\n return `<|im_start|>system\\n${system}<|im_end|>\\n<|im_start|>user\\n${prompt}<|im_end|>\\n<|im_start|>assistant\\n`;\n }\n\n private buildMessages(\n prompt: string,\n options: GenerateOptions,\n ): Array<{ role: string; content: string }> {\n const system = options.system || \"You are a helpful assistant.\";\n const messages: Array<{ role: string; content: string }> = [];\n\n // For direct model (WebGPU), enable_thinking is passed to apply_chat_template\n // so we don't need to add /no_think or modify the system prompt\n messages.push({ role: \"system\", content: system });\n messages.push({ role: \"user\", content: prompt });\n\n return messages;\n }\n\n private parseThinking(text: string): {\n thinking?: string;\n response: string;\n } {\n // Handle complete <think>...</think> blocks\n const match = text.match(/<think>([\\s\\S]*?)<\\/think>/);\n if (match) {\n const thinking = match[1].trim();\n const response = text.replace(/<think>[\\s\\S]*?<\\/think>/, \"\").trim();\n return { thinking, response };\n }\n\n // Handle unclosed <think> tags (model stopped mid-thought)\n const unclosedMatch = text.match(/<think>([\\s\\S]*)$/);\n if (unclosedMatch) {\n const thinking = unclosedMatch[1].trim();\n const response = text.replace(/<think>[\\s\\S]*$/, \"\").trim();\n return { thinking: thinking || undefined, response };\n }\n\n // Handle any remaining think tags\n const response = text.replace(/<\\/?think>/g, \"\").trim();\n return { response };\n }\n\n private cleanOutput(text: string): string {\n return (\n text\n .replace(/<\\|im_end\\|>/g, \"\")\n .replace(/<\\|im_start\\|>/g, \"\")\n .replace(/<\\|endoftext\\|>/g, \"\")\n .replace(/<\\/s>/g, \"\")\n // Clean up artifacts from direct model output\n .replace(/^\\/no_think\\s*/i, \"\")\n .replace(/^assistant\\s*/i, \"\")\n .replace(/^\\s*\\/no_think\\s*/gim, \"\")\n .replace(/^\\s*assistant\\s*/gim, \"\")\n // Clean up role markers that might appear\n .replace(/^(system|user|assistant):\\s*/gim, \"\")\n .trim()\n );\n }\n}\n\nexport default Gerbil;\n"],"mappings":";;;;;;;;AAkCA,SAAgB,iBACd,QACA,SACA,SAQQ;CACR,MAAM,WAAW;EACf;EACA;EACA,QAAQ,aAAa;EACrB,QAAQ,eAAe;EACvB,QAAQ,QAAQ;EAChB,QAAQ,QAAQ;EAChB,QAAQ,UAAU;EAClB,QAAQ,YAAY;EACrB;CAGD,MAAM,MAAM,KAAK,UAAU,SAAS;CACpC,IAAI,OAAO;AACX,MAAK,IAAI,IAAI,GAAG,IAAI,IAAI,QAAQ,KAAK;EACnC,MAAM,OAAO,IAAI,WAAW,EAAE;AAC9B,UAAQ,QAAQ,KAAK,OAAO;AAC5B,SAAO,OAAO;;AAEhB,QAAO,UAAU,KAAK,SAAS,GAAG;;;;;AAUpC,IAAa,gBAAb,MAA2B;CACzB,AAAQ,wBAAiC,IAAI,KAAK;CAClD,AAAQ;CACR,AAAQ;CACR,AAAQ,OAAO;CACf,AAAQ,SAAS;;;;;;CAOjB,YAAY,UAAU,KAAK,aAAa,MAAS,KAAM;AACrD,OAAK,UAAU;AACf,OAAK,aAAa;;;;;CAMpB,IAAI,KAAoC;EACtC,MAAM,QAAQ,KAAK,MAAM,IAAI,IAAI;AAEjC,MAAI,CAAC,OAAO;AACV,QAAK;AACL,UAAO;;AAIT,MAAI,KAAK,KAAK,GAAG,MAAM,YAAY,MAAM,KAAK;AAC5C,QAAK,MAAM,OAAO,IAAI;AACtB,QAAK;AACL,UAAO;;AAIT,OAAK,MAAM,OAAO,IAAI;AACtB,OAAK,MAAM,IAAI,KAAK,MAAM;AAE1B,OAAK;AACL,SAAO;GAAE,GAAG,MAAM;GAAQ,QAAQ;GAAM;;;;;CAM1C,IAAI,KAAa,QAAwB,KAAoB;AAE3D,SAAO,KAAK,MAAM,QAAQ,KAAK,SAAS;GACtC,MAAM,WAAW,KAAK,MAAM,MAAM,CAAC,MAAM,CAAC;AAC1C,OAAI,SACF,MAAK,MAAM,OAAO,SAAS;;AAI/B,OAAK,MAAM,IAAI,KAAK;GAClB;GACA,WAAW,KAAK,KAAK;GACrB,KAAK,OAAO,KAAK;GAClB,CAAC;;;;;CAMJ,IAAI,KAAsB;EACxB,MAAM,QAAQ,KAAK,MAAM,IAAI,IAAI;AACjC,MAAI,CAAC,MAAO,QAAO;AAEnB,MAAI,KAAK,KAAK,GAAG,MAAM,YAAY,MAAM,KAAK;AAC5C,QAAK,MAAM,OAAO,IAAI;AACtB,UAAO;;AAGT,SAAO;;;;;CAMT,OAAO,KAAsB;AAC3B,SAAO,KAAK,MAAM,OAAO,IAAI;;;;;CAM/B,QAAc;AACZ,OAAK,MAAM,OAAO;AAClB,OAAK,OAAO;AACZ,OAAK,SAAS;;;;;CAMhB,QAAgB;EACd,MAAM,MAAM,KAAK,KAAK;EACtB,IAAI,SAAS;AAEb,OAAK,MAAM,CAAC,KAAK,UAAU,KAAK,MAC9B,KAAI,MAAM,MAAM,YAAY,MAAM,KAAK;AACrC,QAAK,MAAM,OAAO,IAAI;AACtB;;AAIJ,SAAO;;;;;CAMT,WAAuB;AACrB,SAAO;GACL,MAAM,KAAK;GACX,QAAQ,KAAK;GACb,MAAM,KAAK,MAAM;GACjB,SAAS,KAAK;GACf;;;;;CAMH,aAAqB;EACnB,MAAM,QAAQ,KAAK,OAAO,KAAK;AAC/B,MAAI,UAAU,EAAG,QAAO;AACxB,SAAQ,KAAK,OAAO,QAAS;;;AAQjC,IAAIA,cAAoC;;;;;AAMxC,SAAgB,iBAAgC;AAC9C,KAAI,CAAC,YACH,eAAc,IAAI,eAAe;AAEnC,QAAO;;;;;AAMT,SAAgB,qBAAqB,SAAkB,YAAoC;AACzF,eAAc,IAAI,cAAc,SAAS,WAAW;AACpD,QAAO;;;;;AAMT,SAAgB,mBAAyB;AACvC,KAAI,YACF,aAAY,OAAO;;;;;ACjOvB,MAAaC,iBAA8C;CACzD,cAAc;EACZ,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,sBAAsB;EACpB,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,cAAc;EACZ,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,QAAQ;EACT;CACD,gBAAgB;EACd,IAAI;EACJ,MAAM;EACN,aAAa;EACb,MAAM;EACN,eAAe;EACf,kBAAkB;EAClB,cAAc;EACd,gBAAgB;EAChB,mBAAmB;EACnB,QAAQ;EACT;CACF;;;;;;;;;;AAeD,SAAgB,aAAa,SAA8B;AAEzD,KAAI,eAAe,SACjB,QAAO;EACL,MAAM;EACN,MAAM,eAAe,SAAS;EAC/B;AAIH,KAAI,QAAQ,WAAW,MAAM,CAE3B,QAAO;EACL,MAAM;EACN,MAHW,QAAQ,MAAM,EAAE;EAI5B;AAIH,KAAI,QAAQ,WAAW,0BAA0B,CAE/C,QAAO;EACL,MAAM;EACN,MAHW,QAAQ,QAAQ,2BAA2B,GAAG;EAI1D;AAIH,KAAI,QAAQ,WAAW,QAAQ,CAE7B,QAAO;EACL,MAAM;EACN,MAHW,QAAQ,MAAM,EAAE;EAI5B;AAIH,KAAI,QAAQ,SAAS,IAAI,CACvB,QAAO;EACL,MAAM;EACN,MAAM;EACP;AAIH,QAAO;EACL,MAAM;EACN,MAAM;EACP;;;;;AAMH,SAAgB,eAAe,SAAqC;AAClE,QAAO,eAAe,YAAY;;AAIpC,MAAMC,0BAAkD;CACtD,MAAM;CACN,SAAS;CACT,OAAO;CACP,KAAK;CACL,QAAQ;CACR,OAAO;CACR;;;;AAKD,SAAgB,0BACd,SACA,MACA,eACa;CAEb,IAAIC,SAAgC;CACpC,MAAM,YAAY,KAAK,aAAa;AAEpC,KAAI,UAAU,SAAS,OAAO,CAC5B,UAAS;UACA,UAAU,SAAS,SAAS,CACrC,UAAS;UACA,UAAU,SAAS,MAAM,CAClC,UAAS;UACA,UAAU,SAAS,UAAU,IAAI,UAAU,SAAS,YAAY,CACzE,UAAS;UACA,UAAU,SAAS,QAAQ,CACpC,UAAS;CAIX,MAAM,iBACJ,UAAU,SAAS,SAAS,IAC5B,UAAU,SAAS,MAAM,IACzB,UAAU,SAAS,aAAa,IAChC,UAAU,SAAS,YAAY;AAEjC,QAAO;EACL,IAAI;EACJ;EACA,aAAa,mBAAmB;EAChC,MAAM;EACN,eAAe,iBAAiB,wBAAwB,WAAW;EACnE,kBAAkB,WAAW,UAAU,WAAW;EAClD,cAAc,WAAW,UAAU,WAAW,SAAS,WAAW;EAClE;EACA;EACD;;;;;AAMH,eAAsB,wBAAwB,MAAsC;AAClF,KAAI;EACF,MAAM,MAAM,MAAM,MAAM,0BAA0B,KAAK,uBAAuB;AAC9E,MAAI,CAAC,IAAI,GACP,QAAO;EAGT,MAAM,SAAS,MAAM,IAAI,MAAM;AAG/B,SACE,OAAO,2BACP,OAAO,eACP,OAAO,eACP,OAAO,kBACP,OAAO,kBACP;SAEI;AACN,SAAO;;;;;;AAOX,SAAgB,oBAAmC;AACjD,QAAO,OAAO,OAAO,eAAe;;;;;;;;AC/NtC,MAAMC,aAAWC;AAGjB,SAAS,sBAAyB,IAAkC;CAClE,MAAM,eAAe,QAAQ;AAC7B,SAAQ,QAAQ,GAAG,SAAgB;EACjC,MAAM,MAAM,KAAK,IAAI,YAAY,IAAI;AAErC,MAAI,IAAI,SAAS,iBAAiB,IAAI,IAAI,SAAS,sBAAsB,CACvE;AAEF,eAAa,MAAM,SAAS,KAAK;;AAGnC,QAAO,IAAI,CAAC,cAAc;AACxB,UAAQ,OAAO;GACf;;AA4CJ,MAAMC,wBAAqC;CACzC;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACD;EACE,IAAI;EACJ,MAAM;EACN,QAAQ;EACR,UAAU;EACV,aAAa;EACd;CACF;AAKD,MAAM,YAAY,OAAO,WAAW;AACpC,IAAI,mBAAmB,CAAC;AACxB,IAAI,kBAAkB;AAOtB,IAAI,oBAAoB;AACxB,IAAI,kBAAkB;;;;;AAMtB,eAAe,iBAAmC;AAChD,KAAI,kBACF,QAAO;AAET,qBAAoB;AAGpB,KAAI,OAAO,WAAW,aAAa;AACjC,oBAAkB,SAAS;AAC3B,SAAO;;AAKT,KAAI;EAGF,MAAM,EAAE,QAAQ,YADK,MADC,IAAI,SAAS,aAAa,2BAA2B,CAClC,SAAS;AAIlD,SAAO,OAAO,YAAY,QAAQ;AAGlC,MAAI,CAAE,WAAmB,UACvB,CAAC,WAAmB,YAAY,EAAE;AAEpC,EAAC,WAAmB,UAAU,MAAM,OAAO,EAAE,CAAC;AAE9C,oBAAkB;SACZ;AAEN,oBAAkB;;AAGpB,QAAO;;AAMT,IAAa,SAAb,MAAoB;CAClB,AAAQ,YAA2C;CACnD,AAAQ,YAAwC;CAChD,AAAQ,QAAa;CACrB,AAAQ,WAA6C;CACrD,AAAQ,eAA8B;CACtC,AAAQ,cAAkC;CAC1C,AAAiB;CACjB,AAAQ;CACR,AAAQ,YAAY;CACpB,AAAQ,gBAA6C;CACrD,AAAQ,cAAyC;CAGjD,AAAQ,YAAiB;CACzB,AAAQ,cAAmB;CAC3B,AAAQ,gBAAgB;CAExB,YAAY,SAAuB,EAAE,EAAE;AACrC,OAAK,SAAS;AACd,OAAK,QAAQ;GACX,SAAS;GACT,UAAU;GACV,WAAW;GACX,UAAU;GACV,WAAW;GACX,WAAW;GACX,aAAa;GACd;;CAOH,OAAO,aAA4B;AACjC,SAAO,OAAO,OAAO,eAAe;;CAGtC,OAAO,SAAS,SAA0C;AACxD,SAAO,eAAe;;;;;;;;;;;;;;;;;;;;CAyBxB,MAAM,UAAU,UAAU,cAAc,UAAuB,EAAE,EAAiB;AAGhF,MAAI,KAAK,UAAU,CACjB,OAAM,KAAK,SAAS;AAItB,QAAM,gBAAgB;EAEtB,MAAM,SAAS,aAAa,QAAQ;EACpC,MAAM,EAAE,YAAY,SAAS,QAAQ,OAAO,cAAc;EAG1D,IAAI,SAAS,eAAe,QAAQ;AACpC,MAAI,CAAC,QAAQ;GAEX,MAAM,gBAAgB,MAAM,wBAAwB,OAAO,KAAK,CAAC,YAAY,KAAK;AAClF,YAAS,0BAA0B,SAAS,OAAO,MAAM,iBAAiB,OAAU;;AAItF,MAAI,OAAO,eACT,QAAO,KAAK,gBAAgB,SAAS,OAAO,MAAM,QAAQ,QAAQ;AAGpE,eAAa,EAAE,QAAQ,WAAW,QAAQ,MAAM,CAAC;EAKjD,MAAMC,cAAY,OAAO,WAAW;EACpC,MAAM,iBAAiBA,cAAY,SAAS;EAC5C,IAAIC,WAAsC;AAC1C,MAAI,WAAW,YAAY,WAAW,SAAS,WAAW,OACxD,YAAW;EAIb,MAAM,QAAQ,cAAc,aAAa,WAAW,UAAU;EAG9D,IAAI,YAAY;EAChB,IAAI,WAAW;EACf,IAAI,UAAU;EAEd,MAAM,oBAAoB,aAAkB;AAC1C,OAAI,CAAC,UACH;AAGF,OAAI,SAAS,WAAW,cAAc,SAAS,MAAM;IACnD,MAAM,MAAM,KAAK,MAAM,SAAS,YAAY,EAAE;AAE9C,QAAI,SAAS,SAAS,YAAY,OAAO,UAAU,GAAG;AACpD,gBAAW,SAAS;AACpB,eAAU;AACV,kBAAa;MACX,QAAQ,eAAe,SAAS;MAChC,UAAU;MACV,MAAM,SAAS;MAChB,CAAC;;;;AAKR,MAAI;AAGF,OAAID,eAAa,aAAa,UAAU;AACtC,iBAAa,EAAE,QAAQ,wBAAwB,CAAC;AAChD,SAAK,YAAa,MAAM,4BACtB,cAAc,gBAAgB,OAAO,MAAM,EACzC,mBAAmB,kBACpB,CAAC,CACH;AAED,iBAAa,EAAE,QAAQ,oBAAoB,CAAC;AAC5C,SAAK,QAAQ,MAAM,4BACjB,qBAAqB,gBAAgB,OAAO,MAAM;KAChD;KACA,QAAQ;KACR,mBAAmB;KACpB,CAAC,CACH;AAED,SAAK,YAAY;AACjB,SAAK,cAAc;AACnB,SAAK,gBAAgB;AACrB,gBAAY;AACZ,SAAK,eAAe;AACpB,SAAK,cAAc;AACnB,iBAAa,EAAE,QAAQ,mBAAmB,CAAC;cAClC,CAACA,eAAa,aAAa,UAAU;AAE9C,iBAAa,EAAE,QAAQ,qCAAqC,CAAC;IAG7D,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,SAAK,gBAAgB,MAAM,iBAAiB,OAAO;KACjD,SAAS,OAAO;KAChB,eAAe,OAAO;KACtB;KACD,CAAC;AAEF,SAAK,YAAY;AACjB,SAAK,cAAc;AACnB,SAAK,gBAAgB;AACrB,gBAAY;AACZ,SAAK,eAAe;AACpB,SAAK,cAAc;UAEd;IAEL,MAAM,kBAAkB;KACtB;KACA,QAAQ;KACR,mBAAmB;KACpB;AACD,SAAK,YAAa,MAAM,4BACtBH,WAAS,mBAAmB,OAAO,MAAM,gBAAuB,CACjE;AAED,SAAK,YAAY;AACjB,SAAK,cAAc;AACnB,SAAK,gBAAgB;AACrB,gBAAY;AACZ,SAAK,eAAe;AACpB,SAAK,cAAc;AACnB,iBAAa,EAAE,QAAQ,UAAU,SAAS,aAAa,CAAC,KAAK,CAAC;;WAEzD,KAAK;AAEZ,OAAI,aAAa,gBAAgB;AAC/B,iBAAa,EAAE,QAAQ,SAAS,eAAe,aAAa,CAAC,MAAM,CAAC;AAGpE,QAAI,KAAK,eAAe;AACtB,WAAM,KAAK,cAAc,SAAS;AAClC,UAAK,gBAAgB;;AAIvB,SAAK,YAAa,MAAM,4BACtBA,WAAS,mBAAmB,OAAO,MAAM;KACvC,OAAO;KACP,QAAQ;KACR,mBAAmB;KACpB,CAAQ,CACV;AAED,SAAK,YAAY;AACjB,SAAK,cAAc;AACnB,SAAK,gBAAgB;AACrB,gBAAY;AACZ,SAAK,eAAe;AACpB,SAAK,cAAc;AACnB,iBAAa,EAAE,QAAQ,UAAU,eAAe,aAAa,CAAC,KAAK,CAAC;SAEpE,OAAM;;;;;;;CASZ,MAAc,gBACZ,SACA,UACA,QACA,UAAuB,EAAE,EACV;EACf,MAAM,EAAE,YAAY,SAAS,WAAW;AAExC,eAAa,EAAE,QAAQ,WAAW,QAAQ,qBAAqB,CAAC;EAEhE,MAAMG,cAAY,OAAO,WAAW;EACpC,MAAM,iBAAiBA,cAAY,SAAS;EAC5C,IAAIC,WAAsC;AAC1C,MAAI,WAAW,YAAY,WAAW,SAAS,WAAW,OACxD,YAAW;AAIb,MAAI,CAACD,eAAa,aAAa,UAAU;AACvC,gBAAa,EAAE,QAAQ,8CAA8C,CAAC;GAGtE,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,QAAK,gBAAgB,MAAM,iBAAiB,OAAO;IACjD,SAAS;IACT,eAAe,OAAO;IACtB,UAAU;IACV;IACD,CAAC;AAEF,QAAK,YAAY;AACjB,QAAK,cAAc;AACnB,QAAK,gBAAgB;AACrB,QAAK,eAAe;AACpB,QAAK,cAAc;AAEnB;;EAIF,IAAI,WAAW;EACf,IAAI,UAAU;EAEd,MAAM,oBAAoB,aAAkB;AAC1C,OAAI,SAAS,WAAW,cAAc,SAAS,MAAM;IACnD,MAAM,MAAM,KAAK,MAAM,SAAS,YAAY,EAAE;AAC9C,QAAI,SAAS,SAAS,YAAY,OAAO,UAAU,GAAG;AACpD,gBAAW,SAAS;AACpB,eAAU;AACV,kBAAa;MACX,QAAQ,eAAe,SAAS;MAChC,UAAU;MACV,MAAM,SAAS;MAChB,CAAC;;;;AAKR,MAAI;AAEF,gBAAa,EAAE,QAAQ,wBAAwB,CAAC;AAChD,QAAK,YAAY,MAAM,4BACrB,cAAc,gBAAgB,UAAU,EACtC,mBAAmB,kBACpB,CAAC,CACH;AAGD,gBAAa,EAAE,QAAQ,2BAA2B,CAAC;AACnD,QAAK,cAAc,MAAM,4BACvB,4BAA4B,gBAAgB,UAAU;IACpD,QAAQ;IACR,mBAAmB;IACpB,CAAC,CACH;AAED,QAAK,gBAAgB;AACrB,QAAK,YAAY;AACjB,QAAK,cAAc,aAAa,WAAW,WAAY;AACvD,QAAK,eAAe;AACpB,QAAK,cAAc;AACnB,gBAAa,EAAE,QAAQ,kBAAkB,SAAS,aAAa,CAAC,KAAK,CAAC;WAC/D,KAAK;AAEZ,OAAI,aAAa,gBAAgB;AAC/B,iBAAa,EAAE,QAAQ,uBAAuB,eAAe,aAAa,CAAC,MAAM,CAAC;AAElF,SAAK,YAAY,MAAM,4BACrB,cAAc,gBAAgB,UAAU,EACtC,mBAAmB,kBACpB,CAAC,CACH;AAED,SAAK,cAAc,MAAM,4BACvB,4BAA4B,gBAAgB,UAAU;KACpD,QAAQ;KACR,mBAAmB;KACpB,CAAC,CACH;AAED,SAAK,gBAAgB;AACrB,SAAK,YAAY;AACjB,SAAK,cAAc;AACnB,SAAK,eAAe;AACpB,SAAK,cAAc;AACnB,iBAAa,EAAE,QAAQ,kBAAkB,eAAe,aAAa,CAAC,KAAK,CAAC;SAE5E,OAAM;;;;;;CAQZ,WAAoB;AAClB,SACE,KAAK,cAAc,QAClB,KAAK,aAAa,KAAK,UAAU,QAClC,KAAK,kBAAkB,QACtB,KAAK,iBAAiB,KAAK,gBAAgB;;;;;CAOhD,iBAA0B;AACxB,SAAO,KAAK,iBAAiB,KAAK,aAAa,mBAAmB;;;;;CAMpE,eAAmC;AACjC,SAAO,KAAK;;;;;CAMd,gBAA2C;AACzC,SAAO,KAAK;;;;;CAMd,WAAmB;AAEjB,SAAO,KAAK,gBAAgB,WAAW,UAAU;;;;;CAMnD,wBAAyF;EACvF,MAAM,QAAQ,gBAAgB;EAC9B,MAAM,QAAQ,MAAM,UAAU;AAC9B,SAAO;GACL,MAAM,MAAM;GACZ,QAAQ,MAAM;GACd,MAAM,MAAM;GACZ,SAAS,MAAM,YAAY;GAC5B;;;;;CAMH,qBAA2B;AACzB,kBAAgB,CAAC,OAAO;;;;;CAM1B,kBAKS;AACP,MAAI,CAAC,KAAK,cACR,QAAO;AAET,SAAO,KAAK,cAAc,WAAW;;;;;;CAOvC,MAAM,kBAA+E;AACnF,MAAI,CAAC,KAAK,cACR,QAAO;AAET,SAAO,KAAK,cAAc,gBAAgB;;;;;CAM5C,MAAM,iBAA2F;AAC/F,MAAI,CAAC,KAAK,cACR,QAAO;AAET,SAAO,KAAK,cAAc,gBAAgB;;;;;;CAO5C,MAAM,aAA4B;AAChC,MAAI,KAAK,cACP,OAAM,KAAK,cAAc,OAAO;;;;;;;CASpC,MAAM,sBAAsB,cAAc,GAAqB;AAC7D,MAAI,CAAC,KAAK,cACR,QAAO;AAET,SAAO,KAAK,cAAc,sBAAsB,YAAY;;;;;;;;;;;;;;;;CAqB9D,MAAM,SAAS,QAAgB,UAA2B,EAAE,EAA2B;AACrF,MAAI,CAAC,KAAK,UAAU,CAElB,OAAM,KAAK,UAAU,KAAK,OAAO,SAAS,aAAa;EAGzD,MAAM,EAAE,WAAW;AAMnB,MAAI,QAAQ,UAAU,KAAK,iBAAiB,CAAC,KAAK,cAChD,QAAO,KAAK,mBAAmB,QAAQ,QAAQ;AAIjD,MAAI,QAAQ,UAAU,CAAC,KAAK,eAAe;EAG3C,MAAM,EACJ,YAAY,KACZ,cAAc,IACd,OAAO,IACP,OAAO,IACP,WAAW,OACX,QACA,QAAQ,OACR,aACE;AAGJ,MAAI,SAAS,CAAC,QAAQ,WAAW,CAAC,QAAQ,QAAQ;GAChD,MAAM,WAAW,iBAAiB,QAAQ,KAAK,gBAAgB,IAAI;IACjE;IACA;IACA;IACA;IACA;IACA;IACD,CAAC;GACF,MAAM,SAAS,gBAAgB,CAAC,IAAI,SAAS;AAC7C,OAAI,OACF,QAAO;;EAIX,MAAM,YAAY,YAAY,KAAK;AAEnC,MAAI;GACF,IAAI,UAAU;AAEd,OAAI,KAAK,cAEP,KAAI;AACF,cAAU,MAAM,KAAK,cAAc,SAAS,QAAQ;KAClD;KACA;KACA;KACA;KACA;KACA;KAEA,QAAQ,QAAQ,KAAK,QAAQ,IAAI,OAAO;KAExC,SAAS,QAAQ,WAAW,MAAM,QAAQ,UAAU,EAAE,KAAK,GAAG;KAC/D,CAAC;YACKE,WAAgB;AAEvB,QAAI,WAAW,YAAY,yBAAyB,CAAC,KAAK,eAAe,SAAS,EAAE;AAClF,WAAM,KAAK,eAAe,SAAS,CAAC,YAAY,GAAG;AACnD,UAAK,gBAAgB;AACrB,UAAK,cAAc;AAGnB,UAAK,YAAa,MAAML,WAAS,mBADf,KAAK,gBAAgB,cACwB;MAC7D,OAAO;MACP,QAAQ;MACT,CAAQ;AAET,YAAO,KAAK,SAAS,QAAQ,QAAQ;;AAEvC,UAAM;;YAEC,KAAK,aAAa,KAAK,SAAS,KAAK,WAAW;IAEzD,MAAM,WAAW,KAAK,cAAc,QAAQ;KAAE,GAAG;KAAS;KAAU,CAAC;IAErE,MAAM,SAAU,KAAK,UAAkB,oBAAoB,UAAU;KACnE,uBAAuB;KACvB,aAAa;KACb,iBAAiB;KAClB,CAAC;IAEF,MAAM,SAAS,MAAM,KAAK,MAAM,SAAS;KACvC,GAAG;KACH,gBAAgB;KAChB,aAAa,cAAc,IAAI,cAAc;KAC7C,OAAO;KACP,OAAO;KACP,WAAW,cAAc;KAC1B,CAAC;IAGF,MAAM,cAAc,OAAO,UAAU,OAAO,MAAM,OAAO,UAAU,MAAM,UAAU;IAGnF,MAAM,eAAe,OAAO,MAAM,MAAM,CAAC,aAAa,KAAK,CAAC;AAK5D,cAJgB,KAAK,UAAU,aAAa,cAAc,EACxD,qBAAqB,MACtB,CAAC,CAEgB,MAAM;AAGxB,QAAI,QAAQ,aAAa,CAAC,SAAS,YAAY,EAAE;KAC/C,MAAM,QAAQ,QAAQ,MAAM,4BAA4B;AACxD,SAAI,MACF,WAAU,MAAM,GAAG,MAAM;;cAGpB,KAAK,WAAW;IAEzB,MAAM,kBAAkB,KAAK,aAAa,QAAQ;KAAE,GAAG;KAAS;KAAU,CAAC;IAE3E,MAAM,SAAS,MAAM,KAAK,UAAU,iBAAiB;KACnD,gBAAgB;KAChB;KACA,OAAO;KACP,OAAO;KACP,WAAW,cAAc;KACzB,kBAAkB;KACnB,CAAC;AAGF,QAAI,MAAM,QAAQ,OAAO,IAAI,OAAO,IAAI;KACtC,MAAMM,WAAS,OAAO;AACtB,SAAI,MAAM,QAAQA,SAAO,eAAe,CAEtC,WADaA,SAAO,eAAe,GAAG,GAAG,EACzB,WAAW;SAE3B,WAAUA,SAAO,kBAAkB;;SAIvC,OAAM,IAAI,MAAM,kBAAkB;GAIpC,MAAM,YADU,YAAY,KAAK,GACL;AAE5B,aAAU,KAAK,YAAY,QAAQ;GAInC,MAAM,EAAE,UAAU,cAAc,aAAa,KAAK,cAAc,QAAQ;GAGxE,MAAM,gBAAgB,WAAW,eAAe;GAEhD,MAAM,kBAAkB,KAAK,KAAK,SAAS,SAAS,EAAE;AAGtD,QAAK,MAAM,WAAW;AACtB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,WAAY,KAAK,MAAM,YAAY,KAAK,MAAM,YAAa;GAEtE,MAAMC,SAAyB;IAC7B,MAAM;IACN,UAAU;IACV;IACA,iBAAkB,kBAAkB,YAAa;IACjD;IACA,cAAc;IACd,UAAU;IACV,QAAQ;IACT;AAGD,OAAI,SAAS,CAAC,QAAQ,WAAW,CAAC,QAAQ,QAAQ;IAChD,MAAM,WAAW,iBAAiB,QAAQ,KAAK,gBAAgB,IAAI;KACjE;KACA;KACA;KACA;KACA;KACA;KACD,CAAC;AACF,oBAAgB,CAAC,IAAI,UAAU,QAAQ,SAAS;;AAGlD,UAAO;WACA,QAAQ;AACf,UAAO;IACL,MAAM;IACN,iBAAiB;IACjB,iBAAiB;IACjB,WAAW,YAAY,KAAK,GAAG;IAC/B,cAAc;IACd,UAAU;IACV,QAAQ;IACT;;;;;;;;;CAUL,OAAO,OACL,QACA,UAA2B,EAAE,EACoB;AACjD,MAAI,CAAC,KAAK,UAAU,CAClB,OAAM,KAAK,UAAU,KAAK,OAAO,SAAS,aAAa;EAGzD,MAAM,YAAY,YAAY,KAAK;AAGnC,MAAI,KAAK,eAAe;GACtB,IAAI,WAAW;GACf,MAAMC,aAAuB,EAAE;GAC/B,IAAIC,cAAuD;GAC3D,IAAI,OAAO;GAGX,MAAM,kBAAkB,KAAK,cAC1B,SAAS,QAAQ;IAChB,GAAG;IAEH,QAAQ,QAAQ,QAAQ,KAAK,QAAQ,IAAI,OAAO;IAChD,UAAU,UAAU;AAClB,iBAAY,MAAM;AAClB,SAAI,aAAa;AACf,kBAAY,MAAM,KAAK;AACvB,oBAAc;WAEd,YAAW,KAAK,MAAM,KAAK;;IAGhC,CAAC,CACD,WAAW;AACV,WAAO;AACP,QAAI,YACF,aAAY,KAAK;KAEnB,CACD,OAAO,QAAQ;AACd,WAAO;AACP,QAAI,YACF,aAAY,KAAK;AAEnB,UAAM;KACN;AAGJ,UAAO,CAAC,QAAQ,WAAW,SAAS,EAClC,KAAI,WAAW,SAAS,GAAG;IACzB,MAAM,QAAQ,WAAW,OAAO;AAChC,UAAM;AACN,YAAQ,UAAU,MAAM;cACf,CAAC,MAAM;IAChB,MAAM,QAAQ,MAAM,IAAI,SAAwB,YAAY;AAC1D,mBAAc;MACd;AACF,QAAI,OAAO;AACT,WAAM;AACN,aAAQ,UAAU,MAAM;;;AAK9B,SAAM;GAEN,MAAM,EAAE,UAAU,cAAc,aAAa,KAAK,cAAc,SAAS;GACzE,MAAM,kBAAkB,KAAK,KAAK,SAAS,SAAS,EAAE;GACtD,MAAM,YAAY,YAAY,KAAK,GAAG;AAEtC,UAAO;IACL,MAAM;IACN,UAAU,QAAQ,WAAW,eAAe;IAC5C;IACA;IACA,iBAAkB,kBAAkB,YAAa;IACjD,cAAc;IACf;;EAIH,MAAM,SAAS,MAAM,KAAK,YAAY,QAAQ,QAAQ;EAItD,MAAM,QAAQ,OAAO,QAAQ,MAAM,QAAQ;AAC3C,OAAK,MAAM,QAAQ,MACjB,KAAI,MAAM;AACR,SAAM;AACN,WAAQ,UAAU,KAAK;;AAI3B,SAAO,OAAO;;;;;CAMhB,MAAc,YACZ,QACA,UAA2B,EAAE,EACyB;EACtD,MAAM,EAAE,YAAY,KAAK,cAAc,IAAK,OAAO,IAAK,OAAO,IAAI,WAAW,UAAU;EAExF,MAAM,YAAY,YAAY,KAAK;EACnC,MAAM,kBAAkB,KAAK,aAAa,QAAQ;GAAE,GAAG;GAAS;GAAU,CAAC;AAE3E,MAAI;GACF,MAAM,SAAS,MAAM,KAAK,YAAY,iBAAiB;IACrD,gBAAgB;IAChB;IACA,OAAO;IACP,OAAO;IACP,WAAW,cAAc;IACzB,kBAAkB;IACnB,CAAC;GAGF,MAAM,YADU,YAAY,KAAK,GACL;GAG5B,IAAI,UAAU;AACd,OAAI,MAAM,QAAQ,OAAO,IAAI,OAAO,IAAI;IACtC,MAAM,SAAS,OAAO;AACtB,QAAI,MAAM,QAAQ,OAAO,eAAe,CAEtC,WADa,OAAO,eAAe,GAAG,GAAG,EACzB,WAAW;QAE3B,WAAU,OAAO,kBAAkB;;AAIvC,aAAU,KAAK,YAAY,QAAQ;GACnC,MAAM,EAAE,UAAU,cAAc,aAAa,KAAK,cAAc,QAAQ;GACxE,MAAM,gBAAgB,WAAW,eAAe;GAChD,MAAM,kBAAkB,KAAK,KAAK,SAAS,SAAS,EAAE;AAGtD,QAAK,MAAM,WAAW;AACtB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,WAAY,KAAK,MAAM,YAAY,KAAK,MAAM,YAAa;AAEtE,UAAO;IACL;IACA,QAAQ;KACN,MAAM;KACN,UAAU;KACV;KACA,iBAAkB,kBAAkB,YAAa;KACjD;KACA,cAAc;KACd,UAAU;KACV,QAAQ;KACT;IACF;WACM,QAAQ;AACf,UAAO;IACL,SAAS;IACT,QAAQ;KACN,MAAM;KACN,iBAAiB;KACjB,iBAAiB;KACjB,WAAW,YAAY,KAAK,GAAG;KAC/B,cAAc;KACd,UAAU;KACV,QAAQ;KACT;IACF;;;;;;;CAYL,MAAc,mBACZ,QACA,SACyB;AACzB,MAAI,EAAE,KAAK,aAAa,KAAK,aAC3B,OAAM,IAAI,MAAM,sDAAsD;EAGxE,MAAM,EACJ,SAAS,EAAE,EACX,YAAY,MACZ,cAAc,IACd,OAAO,IACP,OAAO,IACP,WACE;EAEJ,MAAM,YAAY,YAAY,KAAK;AAEnC,MAAI;GAEF,MAAMC,UAAkD,EAAE;AAG1D,QAAK,IAAI,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK,EACtC,SAAQ,KAAK,EAAE,MAAM,SAAS,CAAC;AAIjC,WAAQ,KAAK;IAAE,MAAM;IAAQ,MAAM;IAAQ,CAAC;GAE5C,MAAM,WAAW,CACf,GAAI,SAAS,CAAC;IAAE,MAAM;IAAU,SAAS;IAAQ,CAAC,GAAG,EAAE,EACvD;IAAE,MAAM;IAAQ;IAAS,CAC1B;GAGD,MAAM,aAAa,KAAK,UAAU,oBAAoB,SAAS;GAG/D,MAAM,eAAe,MAAM,QAAQ,IACjC,OAAO,IAAI,OAAO,QAAQ,MAAM,SAAS,QAAQ,IAAI,OAAO,CAAC,CAC9D;GAGD,MAAM,SAAS,MAAM,KAAK,UACxB,aAAa,WAAW,IAAI,aAAa,KAAK,cAC9C,YACA,EAAE,oBAAoB,OAAO,CAC9B;GAGD,IAAI,WAAW;GACf,MAAM,WAAW,QAAQ,UACrB,IAAI,aAAa,KAAK,UAAU,WAAW;IACzC,aAAa;IACb,qBAAqB;IACrB,oBAAoB,WAAiB;AACnC,iBAAYC;AACZ,aAAQ,UAAUA,OAAK;;IAE1B,CAAC,GACF;GAGJ,MAAM,UAAU,MAAM,KAAK,YAAY,SAAS;IAC9C,GAAG;IACH,gBAAgB;IAChB,aAAa,cAAc,IAAI,cAAc;IAC7C,OAAO;IACP,OAAO;IACP,WAAW,cAAc;IACzB,GAAI,WAAW,EAAE,UAAU,GAAG,EAAE;IACjC,CAAC;GAGF,MAAM,cAAc,OAAO,UAAU,MAAM,GAAG,GAAG,IAAI;GAKrD,MAAM,OAJU,KAAK,UAAU,aAAa,QAAQ,MAAM,MAAM,CAAC,aAAa,KAAK,CAAC,EAAE,EACpF,qBAAqB,MACtB,CAAC,CAEmB,MAAM,YAAY;GACvC,MAAM,YAAY,YAAY,KAAK,GAAG;GACtC,MAAM,kBAAkB,KAAK,KAAK,KAAK,SAAS,EAAE;AAGlD,QAAK,MAAM,WAAW;AACtB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,aAAa;AACxB,QAAK,MAAM,WAAY,KAAK,MAAM,YAAY,KAAK,MAAM,YAAa;AAEtE,UAAO;IACL,MAAM,KAAK,YAAY,KAAK;IAC5B;IACA,iBAAkB,kBAAkB,YAAa;IACjD;IACA,cAAc;IACd,UAAU;IACV,QAAQ;IACT;WACM,QAAQ;AACf,UAAO;IACL,MAAM;IACN,iBAAiB;IACjB,iBAAiB;IACjB,WAAW,YAAY,KAAK,GAAG;IAC/B,cAAc;IACd,UAAU;IACV,QAAQ;IACT;;;;;;CAWL,MAAM,KAAQ,QAAgB,SAAqC;EACjE,MAAM,EAAE,QAAQ,UAAU,GAAG,cAAc,OAAQ;EAEnD,MAAM,eAAe;;wCAEe,KAAK,UAAU,gBAAgB,OAAO,CAAC;AAE3E,OAAK,IAAI,UAAU,GAAG,UAAU,SAAS,WAAW,GAAG;GACrD,MAAM,SAAS,MAAM,KAAK,SAAS,QAAQ;IACzC,QAAQ,QAAQ,UAAU;IAC1B;IACA,WAAW;IACZ,CAAC;AAEF,OAAI;IAEF,MAAM,UAAU,YAAY,OAAO,KAAK;IACxC,MAAM,SAAS,KAAK,MAAM,QAAQ;AAElC,WADkB,OAAO,MAAM,OAAO;YAE/B,OAAO;AACd,QAAI,YAAY,UAAU,EACxB,OAAM,IAAI,MAAM,uCAAuC,QAAQ,aAAa,QAAQ;;;AAK1F,QAAM,IAAI,MAAM,gCAAgC;;;;;CAUlD,MAAM,MAAM,MAAc,UAAwB,EAAE,EAAwB;AAC1E,MAAI,CAAC,KAAK,SAGR,MAAK,WAAY,MAAMX,WAAS,sBADlB,QAAQ,SAAS,0BAC6B;EAG9D,MAAM,YAAY,YAAY,KAAK;EACnC,MAAM,SAAS,MAAM,KAAK,SAAS,MAAM;GACvC,SAAS;GACT,WAAW,QAAQ,cAAc;GAClC,CAAC;AAIF,SAAO;GACL,QAHa,MAAM,KAAK,OAAO,KAAqB;GAIpD;GACA,WAAW,YAAY,KAAK,GAAG;GAChC;;;;;CAMH,MAAM,WAAW,OAAiB,UAAwB,EAAE,EAA0B;EACpF,MAAMY,UAAyB,EAAE;AACjC,OAAK,MAAM,QAAQ,MACjB,SAAQ,KAAK,MAAM,KAAK,MAAM,MAAM,QAAQ,CAAC;AAE/C,SAAO;;;;;CAUT,WAAyB;AACvB,SAAO,EAAE,GAAG,KAAK,OAAO;;;;;CAM1B,UAAsB;AACpB,SAAO;GACL,SAAS;GACT,OAAO,KAAK;GACZ,QAAQ;IACN,SAAS;IACT,KAAK;IACL,MAAM;IACN,QAAQ,KAAK,UAAU,GAAG,UAAU;IACrC;GACD,SAAS;IACP,KAAK,KAAK,aAAa,iBAAiB;IACxC,MAAM;IACN,WAAW,KAAK,aAAa,iBAAiB;IAC/C;GACD,OAAO;IACL,UAAU;IACV,MAAM;IACN,YAAY;IACb;GACF;;;;;CAMH,aAAmB;AACjB,OAAK,QAAQ;GACX,SAAS;GACT,UAAU;GACV,WAAW;GACX,UAAU;GACV,WAAW;GACX,WAAW;GACX,aAAa;GACd;;CAOH,AAAQ,MAA6B;CACrC,AAAQ,aAAqB;;;;;;;;;;;;;;;;CAiB7B,MAAM,QAAQ,UAA+C,EAAE,EAAiB;EAC9E,MAAM,UAAU,QAAQ,SAAS;AAGjC,MAAI,KAAK,OAAO,KAAK,eAAe,SAAS;AAC3C,SAAM,KAAK,IAAI,SAAS;AACxB,QAAK,MAAM;;AAGb,MAAI,KAAK,KAAK,UAAU,CACtB;AAGF,OAAK,aAAa;EAGlB,MAAM,EAAE,cAAc,MAAM,OAAO;AAEnC,MAAI,CAAC,KAAK,IACR,MAAK,MAAM,UAAU,QAAQ;AAG/B,QAAM,KAAK,IAAI,KAAK,QAAQ;;;;;CAM9B,MAAM,gBAAgB,SAAyC;AAC7D,MAAI,CAAC,KAAK,KAAK,UAAU,CACvB,OAAM,KAAK,QAAQ,QAAQ;;;;;;;;;;;;;CAe/B,MAAM,MAAM,MAAc,UAAwB,EAAE,EAAwB;AAC1E,QAAM,KAAK,gBAAgB,EAAE,YAAY,QAAQ,YAAY,CAAC;AAC9D,SAAO,KAAK,IAAK,MAAM,MAAM,QAAQ;;;;;;;;;;;;;;CAevC,OAAO,YACL,MACA,UAAwB,EAAE,EACwB;AAClD,QAAM,KAAK,gBAAgB,EAAE,YAAY,QAAQ,YAAY,CAAC;AAC9D,SAAO,OAAO,KAAK,IAAK,YAAY,MAAM,QAAQ;;;;;CAMpD,aAA0B;AACxB,MAAI,CAAC,KAAK,IAER,QAAO;AAET,SAAO,KAAK,IAAI,YAAY;;;;;CAM9B,cAAuB;AACrB,SAAO,KAAK,KAAK,UAAU,IAAI;;;;;CAMjC,kBAAqF;AACnF,MAAI,CAAC,KAAK,IACR,QAAO;AAET,SAAO;GACL,IAAI,KAAK;GACT,QAAQ,KAAK,IAAI,UAAU;GAC3B,QAAQ,KAAK,IAAI,UAAU,GAAG,KAAK,IAAI,eAAe,GAAG;GAC1D;;;;;CAMH,MAAM,gBAEJ;EACA,MAAM,EAAE,eAAe,MAAM,OAAO;AACpC,SAAO,OAAO,OAAO,WAAW,CAAC,KAAK,OAAO;GAC3C,IAAI,EAAE;GACN,aAAa,EAAE;GACf,YAAY,EAAE;GACd,YAAY,EAAE,OAAO;GACtB,EAAE;;CAOL,AAAQ,MAA6B;;;;;;;;;;;;;;CAerC,MAAM,QAAQ,SAAkB,UAA0B,EAAE,EAAiB;AAC3E,MAAI,KAAK,KAAK,UAAU,CACtB;EAIF,MAAM,EAAE,eAAe,MAAM,OAAO;AAEpC,MAAI,CAAC,KAAK,IACR,MAAK,MAAM,IAAI,WAAW,QAAQ;AAGpC,QAAM,KAAK,IAAI,KAAK,QAAQ;;;;;CAM9B,MAAa,gBAAgB,SAAkB,SAAyC;AACtF,MAAI,CAAC,KAAK,KAAK,UAAU,CACvB,OAAM,KAAK,QAAQ,SAAS,QAAQ;;;;;;;;;;;;;;;;;;;;;;;;;CA2BxC,MAAM,WACJ,OACA,UAA6B,EAAE,EACJ;AAC3B,QAAM,KAAK,gBAAgB,QAAW,EAAE,YAAY,QAAQ,YAAY,CAAC;AACzE,SAAO,KAAK,IAAK,WAAW,OAAO,QAAQ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA8B7C,MAAM,6BACJ,UAAyC,EAAE,EACH;AACxC,QAAM,KAAK,iBAAiB;AAC5B,SAAO,KAAK,IAAK,uBAAuB,QAAQ;;;;;CAMlD,MAAM,gBAA2C;EAE/C,MAAM,EAAE,eAAe,MAAM,OAAO;AACpC,SAAO,WAAW,YAAY;;;;;CAMhC,cAAuB;AACrB,SAAO,KAAK,KAAK,UAAU,IAAI;;;;;CAMjC,kBAAqF;AACnF,MAAI,CAAC,KAAK,IACR,QAAO;AAET,SAAO;GACL,IAAI,KAAK,IAAI,cAAc,CAAC;GAC5B,QAAQ,KAAK,IAAI,UAAU;GAC3B,QAAQ,KAAK,IAAI,UAAU,GAAG,KAAK,IAAI,eAAe,GAAG;GAC1D;;;;;;;;;;;;;;;;;CAsBH,MAAM,OACJ,aAAqB,KACrB,UAAqD,EAAE,EAC5B;EAE3B,MAAM,EAAE,YAAY,mBAAmB,MAAM,OAAO;AAEpD,MAAI,CAAC,gBAAgB,CACnB,OAAM,IAAI,MACR,uJAID;AAGH,UAAQ,aAAa,yBAAyB;EAE9C,MAAM,MAAM,IAAI,WAAW,EAAE,YAAY,MAAO,CAAC;AACjD,QAAM,IAAI,OAAO;AAEjB,UAAQ,aAAa,kBAAkB,aAAa,KAAM,QAAQ,EAAE,CAAC,MAAM;AAG3E,QAAM,IAAI,SAAS,MAAM,WAAW,GAAG,WAAW,CAAC;AAEnD,UAAQ,aAAa,sBAAsB;EAC3C,MAAM,EAAE,UAAU,MAAM,IAAI,MAAM;AAElC,UAAQ,aAAa,kBAAkB;AACvC,SAAO,KAAK,WAAW,OAAO,EAC5B,aAAa,MAAM,QAAQ,aAAa,EAAE,UAAU,kBAAkB,EACvE,CAAC;;;;;CAMJ,MAAM,wBAA0C;AAC9C,MAAI;GACF,MAAM,EAAE,mBAAmB,MAAM,OAAO;AACxC,UAAO,gBAAgB;UACjB;AACN,UAAO;;;;;;;CAYX,MAAM,QAAQ,aAAa,OAAsB;AAE/C,MAAI,KAAK,eAAe;AACtB,OAAI;AACF,UAAM,KAAK,cAAc,QAAQ,WAAW;WACtC;AAGR,QAAK,gBAAgB;;AAGvB,MAAI,KAAK,WAAW;AAClB,OAAI,OAAQ,KAAK,UAAkB,YAAY,WAC7C,KAAI;AACF,UAAO,KAAK,UAAkB,SAAS;WACjC;AAIV,QAAK,YAAY;;AAEnB,MAAI,KAAK,UAAU;AACjB,OAAI,OAAQ,KAAK,SAAiB,YAAY,WAC5C,KAAI;AACF,UAAO,KAAK,SAAiB,SAAS;WAChC;AAIV,QAAK,WAAW;;AAIlB,MAAI,KAAK,aAAa;AACpB,OAAI,OAAO,KAAK,YAAY,YAAY,WACtC,KAAI;AACF,UAAM,KAAK,YAAY,SAAS;WAC1B;AAIV,QAAK,cAAc;;AAErB,MAAI,KAAK,UACP,MAAK,YAAY;AAInB,MAAI,KAAK,KAAK;AACZ,OAAI;AACF,UAAM,KAAK,IAAI,SAAS;WAClB;AAGR,QAAK,MAAM;;AAIb,MAAI,KAAK,KAAK;AACZ,OAAI;AACF,SAAK,IAAI,SAAS;WACZ;AAGR,QAAK,MAAM;;AAGb,OAAK,eAAe;AACpB,OAAK,cAAc;AACnB,OAAK,gBAAgB;;;;;;;CAQvB,aAAa,WAA0B;EAErC,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,QAAM,iBAAiB,oBAAoB;;;;;;CAO7C,aAAa,qBAcH;AAER,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAI1C,UAAO;IAAE,SAHO,iBAAiB,wBAAwB;IAGvC,UAFD,MAAM,iBAAiB,oBAAoB;IAEhC;UACtB;AACN,UAAO;;;;;;;CAQX,aAAa,gBAAiF;AAE5F,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,UAAO,MAAM,iBAAiB,iBAAiB;UACzC;AACN,UAAO;;;;;;;CAQX,aAAa,kBAAkB,OAAiC;AAE9D,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,UAAO,MAAM,iBAAiB,mBAAmB,MAAM;UACjD;AACN,UAAO;;;;;;;CAQX,aAAa,wBAMF;AACT,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,UAAO,MAAM,iBAAiB,mBAAmB;UAC3C;AACN,UAAO;;;;;;;CAQX,aAAa,eAAe,OAAiC;AAC3D,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,UAAO,MAAM,iBAAiB,gBAAgB,MAAM;UAC9C;AACN,UAAO;;;;;;CAOX,aAAa,0BAA2C;AACtD,MAAI,OAAO,WAAW,YACpB,QAAO;AAGT,MAAI;GACF,MAAM,EAAE,qBAAqB,MAAM,OAAO;AAC1C,UAAO,MAAM,iBAAiB,mBAAmB;UAC3C;AACN,UAAO;;;CAQX,AAAQ,aAAa,QAAgB,SAAkC;EACrE,MAAM,SAAS,QAAQ,UAAU;EACjC,MAAM,SAAS,KAAK,cAAc,SAAS,OAAO;AAElD,MAAI,QAAQ,YAAY,KAAK,aAAa,iBAExC,QAAO,uBADa,GAAG,OAAO,iHACY,gCAAgC,OAAO;AAGnF,MAAI,OACF,QAAO,uBAAuB,OAAO,gCAAgC,OAAO;AAG9E,SAAO,uBAAuB,OAAO,gCAAgC,OAAO;;CAG9E,AAAQ,cACN,QACA,SAC0C;EAC1C,MAAM,SAAS,QAAQ,UAAU;EACjC,MAAMC,WAAqD,EAAE;AAI7D,WAAS,KAAK;GAAE,MAAM;GAAU,SAAS;GAAQ,CAAC;AAClD,WAAS,KAAK;GAAE,MAAM;GAAQ,SAAS;GAAQ,CAAC;AAEhD,SAAO;;CAGT,AAAQ,cAAc,MAGpB;EAEA,MAAM,QAAQ,KAAK,MAAM,6BAA6B;AACtD,MAAI,MAGF,QAAO;GAAE,UAFQ,MAAM,GAAG,MAAM;GAEb,UADF,KAAK,QAAQ,4BAA4B,GAAG,CAAC,MAAM;GACvC;EAI/B,MAAM,gBAAgB,KAAK,MAAM,oBAAoB;AACrD,MAAI,eAAe;GACjB,MAAM,WAAW,cAAc,GAAG,MAAM;GACxC,MAAM,WAAW,KAAK,QAAQ,mBAAmB,GAAG,CAAC,MAAM;AAC3D,UAAO;IAAE,UAAU,YAAY;IAAW;IAAU;;AAKtD,SAAO,EAAE,UADQ,KAAK,QAAQ,eAAe,GAAG,CAAC,MAAM,EACpC;;CAGrB,AAAQ,YAAY,MAAsB;AACxC,SACE,KACG,QAAQ,iBAAiB,GAAG,CAC5B,QAAQ,mBAAmB,GAAG,CAC9B,QAAQ,oBAAoB,GAAG,CAC/B,QAAQ,UAAU,GAAG,CAErB,QAAQ,mBAAmB,GAAG,CAC9B,QAAQ,kBAAkB,GAAG,CAC7B,QAAQ,wBAAwB,GAAG,CACnC,QAAQ,uBAAuB,GAAG,CAElC,QAAQ,mCAAmC,GAAG,CAC9C,MAAM"}
|
|
@@ -0,0 +1,431 @@
|
|
|
1
|
+
import { C as SpeakResult, E as SystemInfo, O as TranscribeOptions, S as SpeakOptions, T as StreamingTranscriptionSession, b as STTModelConfig, c as GerbilConfig, f as JsonOptions, g as ModelConfig, h as LoadTTSOptions, i as EmbedResult, j as VoiceInfo, k as TranscribeResult, m as LoadSTTOptions, o as GenerateOptions, p as LoadOptions, r as EmbedOptions, s as GenerateResult, t as AudioChunk, w as StreamingTranscriptionOptions, x as SessionStats } from "./types-CiTc7ez3.mjs";
|
|
2
|
+
|
|
3
|
+
//#region src/core/gerbil.d.ts
|
|
4
|
+
|
|
5
|
+
declare class Gerbil {
|
|
6
|
+
private generator;
|
|
7
|
+
private tokenizer;
|
|
8
|
+
private model;
|
|
9
|
+
private embedder;
|
|
10
|
+
private currentModel;
|
|
11
|
+
private modelConfig;
|
|
12
|
+
private readonly config;
|
|
13
|
+
private stats;
|
|
14
|
+
private useDirect;
|
|
15
|
+
private chromeBackend;
|
|
16
|
+
private _deviceMode;
|
|
17
|
+
private processor;
|
|
18
|
+
private visionModel;
|
|
19
|
+
private isVisionModel;
|
|
20
|
+
constructor(config?: GerbilConfig);
|
|
21
|
+
static listModels(): ModelConfig[];
|
|
22
|
+
static getModel(modelId: string): ModelConfig | undefined;
|
|
23
|
+
/**
|
|
24
|
+
* Load a model
|
|
25
|
+
*
|
|
26
|
+
* @example
|
|
27
|
+
* ```ts
|
|
28
|
+
* // Built-in model
|
|
29
|
+
* await g.loadModel("qwen3-0.6b");
|
|
30
|
+
*
|
|
31
|
+
* // HuggingFace model
|
|
32
|
+
* await g.loadModel("hf:microsoft/Phi-3-mini");
|
|
33
|
+
*
|
|
34
|
+
* // Local model
|
|
35
|
+
* await g.loadModel("file:./models/my-model");
|
|
36
|
+
*
|
|
37
|
+
* // Vision model
|
|
38
|
+
* await g.loadModel("ministral-3b");
|
|
39
|
+
* ```
|
|
40
|
+
*/
|
|
41
|
+
loadModel(modelId?: string, options?: LoadOptions): Promise<void>;
|
|
42
|
+
/**
|
|
43
|
+
* Load a vision model (VLM)
|
|
44
|
+
* Uses AutoProcessor + AutoModelForImageTextToText instead of tokenizer + causal LM
|
|
45
|
+
*/
|
|
46
|
+
private loadVisionModel;
|
|
47
|
+
/**
|
|
48
|
+
* Check if a model is loaded
|
|
49
|
+
*/
|
|
50
|
+
isLoaded(): boolean;
|
|
51
|
+
/**
|
|
52
|
+
* Check if current model supports vision
|
|
53
|
+
*/
|
|
54
|
+
supportsVision(): boolean;
|
|
55
|
+
/**
|
|
56
|
+
* Get current model info
|
|
57
|
+
*/
|
|
58
|
+
getModelInfo(): ModelConfig | null;
|
|
59
|
+
/**
|
|
60
|
+
* Get current device mode (webgpu, cpu, or wasm)
|
|
61
|
+
*/
|
|
62
|
+
getDeviceMode(): "webgpu" | "cpu" | "wasm";
|
|
63
|
+
/**
|
|
64
|
+
* Get dtype used for current model
|
|
65
|
+
*/
|
|
66
|
+
getDtype(): string;
|
|
67
|
+
/**
|
|
68
|
+
* Get response cache statistics
|
|
69
|
+
*/
|
|
70
|
+
getResponseCacheStats(): {
|
|
71
|
+
hits: number;
|
|
72
|
+
misses: number;
|
|
73
|
+
size: number;
|
|
74
|
+
hitRate: number;
|
|
75
|
+
};
|
|
76
|
+
/**
|
|
77
|
+
* Clear the response cache (for cached generate() results)
|
|
78
|
+
*/
|
|
79
|
+
clearResponseCache(): void;
|
|
80
|
+
/**
|
|
81
|
+
* Get Chrome backend status (if using WebGPU via Chrome)
|
|
82
|
+
*/
|
|
83
|
+
getChromeStatus(): {
|
|
84
|
+
pid: number | null;
|
|
85
|
+
port: number;
|
|
86
|
+
modelId: string;
|
|
87
|
+
startedAt: Date | null;
|
|
88
|
+
} | null;
|
|
89
|
+
/**
|
|
90
|
+
* Get Chrome memory usage (if using WebGPU via Chrome)
|
|
91
|
+
* Returns JS heap memory in bytes
|
|
92
|
+
*/
|
|
93
|
+
getChromeMemory(): Promise<{
|
|
94
|
+
jsHeapUsed: number;
|
|
95
|
+
jsHeapTotal: number;
|
|
96
|
+
} | null>;
|
|
97
|
+
/**
|
|
98
|
+
* Get memory usage in GB (if using WebGPU via Chrome)
|
|
99
|
+
*/
|
|
100
|
+
getMemoryUsage(): Promise<{
|
|
101
|
+
usedGB: number;
|
|
102
|
+
totalGB: number;
|
|
103
|
+
usedPercent: number;
|
|
104
|
+
} | null>;
|
|
105
|
+
/**
|
|
106
|
+
* Clear KV cache to free memory
|
|
107
|
+
* This will reset the conversation context but free up memory
|
|
108
|
+
*/
|
|
109
|
+
clearCache(): Promise<void>;
|
|
110
|
+
/**
|
|
111
|
+
* Check memory usage and cleanup if needed
|
|
112
|
+
* @param thresholdGB Memory threshold in GB (default: 8)
|
|
113
|
+
* @returns true if cleanup was performed
|
|
114
|
+
*/
|
|
115
|
+
checkMemoryAndCleanup(thresholdGB?: number): Promise<boolean>;
|
|
116
|
+
/**
|
|
117
|
+
* Generate text (automatically routes to vision generation if images provided)
|
|
118
|
+
*
|
|
119
|
+
* @example
|
|
120
|
+
* ```ts
|
|
121
|
+
* // Text generation
|
|
122
|
+
* const result = await g.generate("Hello!");
|
|
123
|
+
*
|
|
124
|
+
* // Vision generation (with vision model)
|
|
125
|
+
* const result = await g.generate("What's in this image?", {
|
|
126
|
+
* images: [{ source: "https://example.com/cat.jpg" }]
|
|
127
|
+
* });
|
|
128
|
+
* ```
|
|
129
|
+
*/
|
|
130
|
+
generate(prompt: string, options?: GenerateOptions): Promise<GenerateResult>;
|
|
131
|
+
/**
|
|
132
|
+
* Stream text generation (simulated token-by-token)
|
|
133
|
+
*
|
|
134
|
+
* Note: Yields the raw output including <think> tags if thinking mode is enabled.
|
|
135
|
+
* The final result has parsed thinking separated out.
|
|
136
|
+
*/
|
|
137
|
+
stream(prompt: string, options?: GenerateOptions): AsyncGenerator<string, GenerateResult, unknown>;
|
|
138
|
+
/**
|
|
139
|
+
* Internal: Generate with raw text access for streaming
|
|
140
|
+
*/
|
|
141
|
+
private generateRaw;
|
|
142
|
+
/**
|
|
143
|
+
* Generate text from images using a vision model
|
|
144
|
+
* Called automatically by generate() when images are provided
|
|
145
|
+
*/
|
|
146
|
+
private generateWithVision;
|
|
147
|
+
/**
|
|
148
|
+
* Generate structured JSON output
|
|
149
|
+
*/
|
|
150
|
+
json<T>(prompt: string, options: JsonOptions<T>): Promise<T>;
|
|
151
|
+
/**
|
|
152
|
+
* Generate embeddings
|
|
153
|
+
*/
|
|
154
|
+
embed(text: string, options?: EmbedOptions): Promise<EmbedResult>;
|
|
155
|
+
/**
|
|
156
|
+
* Generate embeddings for multiple texts
|
|
157
|
+
*/
|
|
158
|
+
embedBatch(texts: string[], options?: EmbedOptions): Promise<EmbedResult[]>;
|
|
159
|
+
/**
|
|
160
|
+
* Get session stats
|
|
161
|
+
*/
|
|
162
|
+
getStats(): SessionStats;
|
|
163
|
+
/**
|
|
164
|
+
* Get system info
|
|
165
|
+
*/
|
|
166
|
+
getInfo(): SystemInfo;
|
|
167
|
+
/**
|
|
168
|
+
* Reset stats
|
|
169
|
+
*/
|
|
170
|
+
resetStats(): void;
|
|
171
|
+
private tts;
|
|
172
|
+
private ttsModelId;
|
|
173
|
+
/**
|
|
174
|
+
* Load TTS model for text-to-speech synthesis
|
|
175
|
+
*
|
|
176
|
+
* @example
|
|
177
|
+
* ```ts
|
|
178
|
+
* // Load default (Kokoro)
|
|
179
|
+
* await g.loadTTS({ onProgress: (p) => console.log(p.status) });
|
|
180
|
+
*
|
|
181
|
+
* // Load Supertonic (faster, 44kHz output)
|
|
182
|
+
* await g.loadTTS({ model: "supertonic-66m" });
|
|
183
|
+
*
|
|
184
|
+
* const result = await g.speak("Hello world");
|
|
185
|
+
* // result.audio = Float32Array, result.sampleRate = 24000 or 44100
|
|
186
|
+
* ```
|
|
187
|
+
*/
|
|
188
|
+
loadTTS(options?: LoadTTSOptions & {
|
|
189
|
+
model?: string;
|
|
190
|
+
}): Promise<void>;
|
|
191
|
+
/**
|
|
192
|
+
* Ensure TTS model is loaded (lazy loading)
|
|
193
|
+
*/
|
|
194
|
+
ensureTTSLoaded(options?: LoadTTSOptions): Promise<void>;
|
|
195
|
+
/**
|
|
196
|
+
* Generate speech from text
|
|
197
|
+
*
|
|
198
|
+
* @example
|
|
199
|
+
* ```ts
|
|
200
|
+
* const result = await g.speak("Hello world", { voice: "af_bella" });
|
|
201
|
+
* // result.audio = Float32Array PCM
|
|
202
|
+
* // result.sampleRate = 24000
|
|
203
|
+
* // result.duration = seconds
|
|
204
|
+
* ```
|
|
205
|
+
*/
|
|
206
|
+
speak(text: string, options?: SpeakOptions): Promise<SpeakResult>;
|
|
207
|
+
/**
|
|
208
|
+
* Stream speech generation (yields audio chunks as they're generated)
|
|
209
|
+
*
|
|
210
|
+
* @example
|
|
211
|
+
* ```ts
|
|
212
|
+
* for await (const chunk of g.speakStream("Long text...")) {
|
|
213
|
+
* // chunk.samples = Float32Array
|
|
214
|
+
* // chunk.isFinal = boolean
|
|
215
|
+
* playChunk(chunk);
|
|
216
|
+
* }
|
|
217
|
+
* ```
|
|
218
|
+
*/
|
|
219
|
+
speakStream(text: string, options?: SpeakOptions): AsyncGenerator<AudioChunk, SpeakResult, unknown>;
|
|
220
|
+
/**
|
|
221
|
+
* Get list of available TTS voices
|
|
222
|
+
*/
|
|
223
|
+
listVoices(): VoiceInfo[];
|
|
224
|
+
/**
|
|
225
|
+
* Check if TTS model is loaded
|
|
226
|
+
*/
|
|
227
|
+
isTTSLoaded(): boolean;
|
|
228
|
+
/**
|
|
229
|
+
* Get current TTS model info
|
|
230
|
+
*/
|
|
231
|
+
getTTSModelInfo(): {
|
|
232
|
+
id: string;
|
|
233
|
+
loaded: boolean;
|
|
234
|
+
device?: "webgpu" | "cpu";
|
|
235
|
+
} | null;
|
|
236
|
+
/**
|
|
237
|
+
* List available TTS models
|
|
238
|
+
*/
|
|
239
|
+
listTTSModels(): Promise<Array<{
|
|
240
|
+
id: string;
|
|
241
|
+
description: string;
|
|
242
|
+
sampleRate: number;
|
|
243
|
+
voiceCount: number;
|
|
244
|
+
}>>;
|
|
245
|
+
private stt;
|
|
246
|
+
/**
|
|
247
|
+
* Load STT model for speech-to-text transcription
|
|
248
|
+
*
|
|
249
|
+
* @example
|
|
250
|
+
* ```ts
|
|
251
|
+
* await g.loadSTT({
|
|
252
|
+
* onProgress: (p) => console.log(p.status)
|
|
253
|
+
* });
|
|
254
|
+
*
|
|
255
|
+
* const result = await g.transcribe(audioData);
|
|
256
|
+
* console.log(result.text);
|
|
257
|
+
* ```
|
|
258
|
+
*/
|
|
259
|
+
loadSTT(modelId?: string, options?: LoadSTTOptions): Promise<void>;
|
|
260
|
+
/**
|
|
261
|
+
* Ensure STT model is loaded (lazy loading)
|
|
262
|
+
*/
|
|
263
|
+
ensureSTTLoaded(modelId?: string, options?: LoadSTTOptions): Promise<void>;
|
|
264
|
+
/**
|
|
265
|
+
* Transcribe audio to text
|
|
266
|
+
*
|
|
267
|
+
* @param audio - Audio data as Float32Array (16kHz mono) or Uint8Array (WAV file)
|
|
268
|
+
* @param options - Transcription options
|
|
269
|
+
*
|
|
270
|
+
* @example
|
|
271
|
+
* ```ts
|
|
272
|
+
* // From Float32Array (16kHz mono)
|
|
273
|
+
* const result = await g.transcribe(audioData);
|
|
274
|
+
* console.log(result.text);
|
|
275
|
+
*
|
|
276
|
+
* // With timestamps
|
|
277
|
+
* const result = await g.transcribe(audioData, { timestamps: true });
|
|
278
|
+
* for (const seg of result.segments) {
|
|
279
|
+
* console.log(`[${seg.start}s] ${seg.text}`);
|
|
280
|
+
* }
|
|
281
|
+
*
|
|
282
|
+
* // From WAV file
|
|
283
|
+
* const wavData = fs.readFileSync("audio.wav");
|
|
284
|
+
* const result = await g.transcribe(new Uint8Array(wavData));
|
|
285
|
+
* ```
|
|
286
|
+
*/
|
|
287
|
+
transcribe(audio: Float32Array | Uint8Array, options?: TranscribeOptions): Promise<TranscribeResult>;
|
|
288
|
+
/**
|
|
289
|
+
* Create a streaming transcription session
|
|
290
|
+
*
|
|
291
|
+
* Transcribes audio in real-time by processing chunks at regular intervals.
|
|
292
|
+
* Perfect for live captioning, call transcription, or real-time subtitles.
|
|
293
|
+
*
|
|
294
|
+
* @param options - Streaming options
|
|
295
|
+
* @returns Streaming session controller
|
|
296
|
+
*
|
|
297
|
+
* @example
|
|
298
|
+
* ```ts
|
|
299
|
+
* const session = await g.createStreamingTranscription({
|
|
300
|
+
* chunkDuration: 3000, // Transcribe every 3 seconds
|
|
301
|
+
* onChunk: (text, idx) => console.log(`Chunk ${idx}: ${text}`),
|
|
302
|
+
* onTranscript: (fullText) => console.log("Full:", fullText),
|
|
303
|
+
* });
|
|
304
|
+
*
|
|
305
|
+
* // Feed audio data as it comes in
|
|
306
|
+
* session.feedAudio(audioChunk);
|
|
307
|
+
*
|
|
308
|
+
* // Start automatic interval-based transcription
|
|
309
|
+
* session.start();
|
|
310
|
+
*
|
|
311
|
+
* // Later, stop and get final transcript
|
|
312
|
+
* const finalText = await session.stop();
|
|
313
|
+
* ```
|
|
314
|
+
*/
|
|
315
|
+
createStreamingTranscription(options?: StreamingTranscriptionOptions): Promise<StreamingTranscriptionSession>;
|
|
316
|
+
/**
|
|
317
|
+
* Get list of available STT models
|
|
318
|
+
*/
|
|
319
|
+
listSTTModels(): Promise<STTModelConfig[]>;
|
|
320
|
+
/**
|
|
321
|
+
* Check if STT model is loaded
|
|
322
|
+
*/
|
|
323
|
+
isSTTLoaded(): boolean;
|
|
324
|
+
/**
|
|
325
|
+
* Get current STT model info
|
|
326
|
+
*/
|
|
327
|
+
getSTTModelInfo(): {
|
|
328
|
+
id: string;
|
|
329
|
+
loaded: boolean;
|
|
330
|
+
device?: "webgpu" | "cpu";
|
|
331
|
+
} | null;
|
|
332
|
+
/**
|
|
333
|
+
* Record audio from microphone and transcribe
|
|
334
|
+
*
|
|
335
|
+
* @example
|
|
336
|
+
* ```ts
|
|
337
|
+
* // Record for 5 seconds and transcribe
|
|
338
|
+
* const result = await g.listen(5000);
|
|
339
|
+
* console.log(result.text);
|
|
340
|
+
*
|
|
341
|
+
* // Use with voice chat
|
|
342
|
+
* const userInput = await g.listen(10000);
|
|
343
|
+
* const response = await g.generate(userInput.text);
|
|
344
|
+
* await g.speak(response.text);
|
|
345
|
+
* ```
|
|
346
|
+
*/
|
|
347
|
+
listen(durationMs?: number, options?: {
|
|
348
|
+
onProgress?: (status: string) => void;
|
|
349
|
+
}): Promise<TranscribeResult>;
|
|
350
|
+
/**
|
|
351
|
+
* Check if microphone recording is available
|
|
352
|
+
*/
|
|
353
|
+
isMicrophoneAvailable(): Promise<boolean>;
|
|
354
|
+
/**
|
|
355
|
+
* Dispose of resources
|
|
356
|
+
* @param disconnect If true, also disconnect from shared browser (for clean script exit)
|
|
357
|
+
*/
|
|
358
|
+
dispose(disconnect?: boolean): Promise<void>;
|
|
359
|
+
/**
|
|
360
|
+
* Shutdown the shared Chrome backend completely.
|
|
361
|
+
* Call this when your script/process is done to ensure proper cleanup.
|
|
362
|
+
* This closes the shared browser used for WebGPU acceleration.
|
|
363
|
+
*/
|
|
364
|
+
static shutdown(): Promise<void>;
|
|
365
|
+
/**
|
|
366
|
+
* Get global WebGPU process info (all active backends)
|
|
367
|
+
* Useful for monitoring and debugging memory leaks
|
|
368
|
+
*/
|
|
369
|
+
static getWebGPUProcesses(): Promise<{
|
|
370
|
+
browser: {
|
|
371
|
+
running: boolean;
|
|
372
|
+
pid: number | null;
|
|
373
|
+
port: number;
|
|
374
|
+
activePagesCount: number;
|
|
375
|
+
maxPages: number;
|
|
376
|
+
};
|
|
377
|
+
backends: Array<{
|
|
378
|
+
modelId: string;
|
|
379
|
+
isVision: boolean;
|
|
380
|
+
isReady: boolean;
|
|
381
|
+
memory: {
|
|
382
|
+
usedGB: number;
|
|
383
|
+
totalGB: number;
|
|
384
|
+
usedPercent: number;
|
|
385
|
+
} | null;
|
|
386
|
+
}>;
|
|
387
|
+
} | null>;
|
|
388
|
+
/**
|
|
389
|
+
* Kill all WebGPU processes (for zombie cleanup)
|
|
390
|
+
* Use this if you suspect memory leaks from undisposed Gerbil instances
|
|
391
|
+
*/
|
|
392
|
+
static killAllWebGPU(): Promise<{
|
|
393
|
+
pagesKilled: number;
|
|
394
|
+
browserKilled: boolean;
|
|
395
|
+
} | null>;
|
|
396
|
+
/**
|
|
397
|
+
* Kill a specific WebGPU backend by index
|
|
398
|
+
* @param index Index of the backend to kill (0-based)
|
|
399
|
+
*/
|
|
400
|
+
static killWebGPUBackend(index: number): Promise<boolean>;
|
|
401
|
+
/**
|
|
402
|
+
* Get all Chrome pages across ALL Gerbil processes
|
|
403
|
+
* This provides cross-process visibility into WebGPU backends
|
|
404
|
+
*/
|
|
405
|
+
static getAllChromePagesInfo(): Promise<Array<{
|
|
406
|
+
url: string;
|
|
407
|
+
title: string;
|
|
408
|
+
isOurs: boolean;
|
|
409
|
+
modelId: string | null;
|
|
410
|
+
memory: {
|
|
411
|
+
usedGB: number;
|
|
412
|
+
totalGB: number;
|
|
413
|
+
} | null;
|
|
414
|
+
}> | null>;
|
|
415
|
+
/**
|
|
416
|
+
* Kill a Chrome page by index (works across processes)
|
|
417
|
+
* @param index Index of the page to kill (0-based)
|
|
418
|
+
*/
|
|
419
|
+
static killChromePage(index: number): Promise<boolean>;
|
|
420
|
+
/**
|
|
421
|
+
* Get total Chrome page count (all processes)
|
|
422
|
+
*/
|
|
423
|
+
static getTotalChromePageCount(): Promise<number>;
|
|
424
|
+
private formatPrompt;
|
|
425
|
+
private buildMessages;
|
|
426
|
+
private parseThinking;
|
|
427
|
+
private cleanOutput;
|
|
428
|
+
}
|
|
429
|
+
//#endregion
|
|
430
|
+
export { Gerbil as t };
|
|
431
|
+
//# sourceMappingURL=gerbil-qOTe1nl2.d.mts.map
|