@tryhamster/gerbil 1.0.0-rc.0 → 1.0.0-rc.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. package/README.md +79 -14
  2. package/dist/auto-update-S9s5-g0C.mjs +3 -0
  3. package/dist/browser/index.d.ts +1009 -0
  4. package/dist/browser/index.d.ts.map +1 -0
  5. package/dist/browser/index.js +2492 -0
  6. package/dist/browser/index.js.map +1 -0
  7. package/dist/{chrome-backend-C5Un08O4.mjs → chrome-backend-CORwaIyC.mjs} +514 -73
  8. package/dist/chrome-backend-CORwaIyC.mjs.map +1 -0
  9. package/dist/{chrome-backend-CtwPENIW.mjs → chrome-backend-DIKYoWj-.mjs} +1 -1
  10. package/dist/cli.mjs +3359 -647
  11. package/dist/cli.mjs.map +1 -1
  12. package/dist/frameworks/express.d.mts +1 -1
  13. package/dist/frameworks/express.mjs +3 -4
  14. package/dist/frameworks/express.mjs.map +1 -1
  15. package/dist/frameworks/fastify.d.mts +1 -1
  16. package/dist/frameworks/fastify.mjs +2 -3
  17. package/dist/frameworks/fastify.mjs.map +1 -1
  18. package/dist/frameworks/hono.d.mts +1 -1
  19. package/dist/frameworks/hono.mjs +2 -3
  20. package/dist/frameworks/hono.mjs.map +1 -1
  21. package/dist/frameworks/next.d.mts +2 -2
  22. package/dist/frameworks/next.mjs +2 -3
  23. package/dist/frameworks/next.mjs.map +1 -1
  24. package/dist/frameworks/react.d.mts +1 -1
  25. package/dist/frameworks/trpc.d.mts +1 -1
  26. package/dist/frameworks/trpc.mjs +2 -3
  27. package/dist/frameworks/trpc.mjs.map +1 -1
  28. package/dist/gerbil-DJGqq7BX.mjs +4 -0
  29. package/dist/gerbil-DoDGHe6Z.mjs +1631 -0
  30. package/dist/gerbil-DoDGHe6Z.mjs.map +1 -0
  31. package/dist/gerbil-qOTe1nl2.d.mts +431 -0
  32. package/dist/gerbil-qOTe1nl2.d.mts.map +1 -0
  33. package/dist/index.d.mts +411 -9
  34. package/dist/index.d.mts.map +1 -1
  35. package/dist/index.mjs +7 -6
  36. package/dist/index.mjs.map +1 -1
  37. package/dist/integrations/ai-sdk.d.mts +122 -4
  38. package/dist/integrations/ai-sdk.d.mts.map +1 -1
  39. package/dist/integrations/ai-sdk.mjs +238 -11
  40. package/dist/integrations/ai-sdk.mjs.map +1 -1
  41. package/dist/integrations/langchain.d.mts +132 -2
  42. package/dist/integrations/langchain.d.mts.map +1 -1
  43. package/dist/integrations/langchain.mjs +175 -8
  44. package/dist/integrations/langchain.mjs.map +1 -1
  45. package/dist/integrations/llamaindex.d.mts +1 -1
  46. package/dist/integrations/llamaindex.mjs +2 -3
  47. package/dist/integrations/llamaindex.mjs.map +1 -1
  48. package/dist/integrations/mcp-client.mjs +4 -4
  49. package/dist/integrations/mcp-client.mjs.map +1 -1
  50. package/dist/integrations/mcp.d.mts +2 -2
  51. package/dist/integrations/mcp.d.mts.map +1 -1
  52. package/dist/integrations/mcp.mjs +5 -6
  53. package/dist/kokoro-BNTb6egA.mjs +20210 -0
  54. package/dist/kokoro-BNTb6egA.mjs.map +1 -0
  55. package/dist/kokoro-CMOGDSgT.js +20212 -0
  56. package/dist/kokoro-CMOGDSgT.js.map +1 -0
  57. package/dist/{mcp-R8kRLIKb.mjs → mcp-kzDDWIoS.mjs} +10 -37
  58. package/dist/mcp-kzDDWIoS.mjs.map +1 -0
  59. package/dist/microphone-DaMZFRuR.mjs +3 -0
  60. package/dist/{one-liner-BUQR0nqq.mjs → one-liner-DxnNs_JK.mjs} +2 -2
  61. package/dist/{one-liner-BUQR0nqq.mjs.map → one-liner-DxnNs_JK.mjs.map} +1 -1
  62. package/dist/repl-DGUw4fCc.mjs +9 -0
  63. package/dist/skills/index.d.mts +305 -14
  64. package/dist/skills/index.d.mts.map +1 -1
  65. package/dist/skills/index.mjs +5 -6
  66. package/dist/skills-DulrOPeP.mjs +1435 -0
  67. package/dist/skills-DulrOPeP.mjs.map +1 -0
  68. package/dist/stt-1WIefHwc.mjs +3 -0
  69. package/dist/stt-CG_7KB_0.mjs +434 -0
  70. package/dist/stt-CG_7KB_0.mjs.map +1 -0
  71. package/dist/stt-Dne6SENv.js +434 -0
  72. package/dist/stt-Dne6SENv.js.map +1 -0
  73. package/dist/{tools-BsiEE6f2.mjs → tools-Bi1P7Xoy.mjs} +6 -7
  74. package/dist/{tools-BsiEE6f2.mjs.map → tools-Bi1P7Xoy.mjs.map} +1 -1
  75. package/dist/transformers.web-DiD1gTwk.js +44695 -0
  76. package/dist/transformers.web-DiD1gTwk.js.map +1 -0
  77. package/dist/transformers.web-u34VxRFM.js +3 -0
  78. package/dist/tts-B1pZMlDv.mjs +3 -0
  79. package/dist/tts-C2FzKuSx.js +725 -0
  80. package/dist/tts-C2FzKuSx.js.map +1 -0
  81. package/dist/tts-CyHhcLtN.mjs +731 -0
  82. package/dist/tts-CyHhcLtN.mjs.map +1 -0
  83. package/dist/types-CiTc7ez3.d.mts +353 -0
  84. package/dist/types-CiTc7ez3.d.mts.map +1 -0
  85. package/dist/{utils-7vXqtq2Q.mjs → utils-CZBZ8dgR.mjs} +1 -1
  86. package/dist/{utils-7vXqtq2Q.mjs.map → utils-CZBZ8dgR.mjs.map} +1 -1
  87. package/docs/ai-sdk.md +137 -21
  88. package/docs/browser.md +241 -2
  89. package/docs/memory.md +72 -0
  90. package/docs/stt.md +494 -0
  91. package/docs/tts.md +569 -0
  92. package/docs/vision.md +396 -0
  93. package/package.json +21 -22
  94. package/dist/auto-update-BbNHbSU1.mjs +0 -3
  95. package/dist/browser/index.d.mts +0 -262
  96. package/dist/browser/index.d.mts.map +0 -1
  97. package/dist/browser/index.mjs +0 -755
  98. package/dist/browser/index.mjs.map +0 -1
  99. package/dist/chrome-backend-C5Un08O4.mjs.map +0 -1
  100. package/dist/gerbil-BfnsFWRE.mjs +0 -644
  101. package/dist/gerbil-BfnsFWRE.mjs.map +0 -1
  102. package/dist/gerbil-BjW-z7Fq.mjs +0 -5
  103. package/dist/gerbil-DZ1k3ChC.d.mts +0 -138
  104. package/dist/gerbil-DZ1k3ChC.d.mts.map +0 -1
  105. package/dist/mcp-R8kRLIKb.mjs.map +0 -1
  106. package/dist/models-DKULvhOr.mjs +0 -136
  107. package/dist/models-DKULvhOr.mjs.map +0 -1
  108. package/dist/models-De2-_GmQ.d.mts +0 -22
  109. package/dist/models-De2-_GmQ.d.mts.map +0 -1
  110. package/dist/skills-D3CEpgDc.mjs +0 -630
  111. package/dist/skills-D3CEpgDc.mjs.map +0 -1
  112. package/dist/types-BS1N92Jt.d.mts +0 -183
  113. package/dist/types-BS1N92Jt.d.mts.map +0 -1
  114. /package/dist/{chunk-Ct1HF2bE.mjs → chunk-CkXuGtQK.mjs} +0 -0
@@ -0,0 +1,1631 @@
1
+ import { n as zodToJsonSchema, t as extractJson } from "./utils-CZBZ8dgR.mjs";
2
+ import { AutoModelForCausalLM, AutoModelForImageTextToText, AutoProcessor, AutoTokenizer, RawImage, TextStreamer, env, pipeline } from "@huggingface/transformers";
3
+
4
+ //#region src/core/cache.ts
5
+ /**
6
+ * Generate a deterministic cache key from prompt and options.
7
+ * Key includes all parameters that affect the output.
8
+ */
9
+ function generateCacheKey(prompt, modelId, options) {
10
+ const keyParts = [
11
+ prompt,
12
+ modelId,
13
+ options.maxTokens ?? 256,
14
+ options.temperature ?? .7,
15
+ options.topP ?? .9,
16
+ options.topK ?? 50,
17
+ options.system ?? "",
18
+ options.thinking ?? false
19
+ ];
20
+ const str = JSON.stringify(keyParts);
21
+ let hash = 0;
22
+ for (let i = 0; i < str.length; i++) {
23
+ const char = str.charCodeAt(i);
24
+ hash = (hash << 5) - hash + char;
25
+ hash = hash & hash;
26
+ }
27
+ return `gerbil:${hash.toString(16)}`;
28
+ }
29
+ /**
30
+ * LRU cache with TTL expiration for inference responses.
31
+ */
32
+ var ResponseCache = class {
33
+ cache = /* @__PURE__ */ new Map();
34
+ maxSize;
35
+ defaultTtl;
36
+ hits = 0;
37
+ misses = 0;
38
+ /**
39
+ * Create a new response cache.
40
+ * @param maxSize Maximum number of entries (default: 100)
41
+ * @param defaultTtl Default TTL in ms (default: 5 minutes)
42
+ */
43
+ constructor(maxSize = 100, defaultTtl = 300 * 1e3) {
44
+ this.maxSize = maxSize;
45
+ this.defaultTtl = defaultTtl;
46
+ }
47
+ /**
48
+ * Get a cached response if it exists and hasn't expired.
49
+ */
50
+ get(key) {
51
+ const entry = this.cache.get(key);
52
+ if (!entry) {
53
+ this.misses++;
54
+ return null;
55
+ }
56
+ if (Date.now() - entry.createdAt > entry.ttl) {
57
+ this.cache.delete(key);
58
+ this.misses++;
59
+ return null;
60
+ }
61
+ this.cache.delete(key);
62
+ this.cache.set(key, entry);
63
+ this.hits++;
64
+ return {
65
+ ...entry.result,
66
+ cached: true
67
+ };
68
+ }
69
+ /**
70
+ * Store a response in the cache.
71
+ */
72
+ set(key, result, ttl) {
73
+ while (this.cache.size >= this.maxSize) {
74
+ const firstKey = this.cache.keys().next().value;
75
+ if (firstKey) this.cache.delete(firstKey);
76
+ }
77
+ this.cache.set(key, {
78
+ result,
79
+ createdAt: Date.now(),
80
+ ttl: ttl ?? this.defaultTtl
81
+ });
82
+ }
83
+ /**
84
+ * Check if a key exists and is not expired.
85
+ */
86
+ has(key) {
87
+ const entry = this.cache.get(key);
88
+ if (!entry) return false;
89
+ if (Date.now() - entry.createdAt > entry.ttl) {
90
+ this.cache.delete(key);
91
+ return false;
92
+ }
93
+ return true;
94
+ }
95
+ /**
96
+ * Remove a specific key from the cache.
97
+ */
98
+ delete(key) {
99
+ return this.cache.delete(key);
100
+ }
101
+ /**
102
+ * Clear all entries from the cache.
103
+ */
104
+ clear() {
105
+ this.cache.clear();
106
+ this.hits = 0;
107
+ this.misses = 0;
108
+ }
109
+ /**
110
+ * Remove all expired entries.
111
+ */
112
+ prune() {
113
+ const now = Date.now();
114
+ let pruned = 0;
115
+ for (const [key, entry] of this.cache) if (now - entry.createdAt > entry.ttl) {
116
+ this.cache.delete(key);
117
+ pruned++;
118
+ }
119
+ return pruned;
120
+ }
121
+ /**
122
+ * Get cache statistics.
123
+ */
124
+ getStats() {
125
+ return {
126
+ hits: this.hits,
127
+ misses: this.misses,
128
+ size: this.cache.size,
129
+ maxSize: this.maxSize
130
+ };
131
+ }
132
+ /**
133
+ * Get hit rate as a percentage.
134
+ */
135
+ getHitRate() {
136
+ const total = this.hits + this.misses;
137
+ if (total === 0) return 0;
138
+ return this.hits / total * 100;
139
+ }
140
+ };
141
+ let globalCache = null;
142
+ /**
143
+ * Get the global response cache instance.
144
+ * Creates one if it doesn't exist.
145
+ */
146
+ function getGlobalCache() {
147
+ if (!globalCache) globalCache = new ResponseCache();
148
+ return globalCache;
149
+ }
150
+ /**
151
+ * Configure the global cache with custom settings.
152
+ */
153
+ function configureGlobalCache(maxSize, defaultTtl) {
154
+ globalCache = new ResponseCache(maxSize, defaultTtl);
155
+ return globalCache;
156
+ }
157
+ /**
158
+ * Clear and reset the global cache.
159
+ */
160
+ function clearGlobalCache() {
161
+ if (globalCache) globalCache.clear();
162
+ }
163
+
164
+ //#endregion
165
+ //#region src/core/models.ts
166
+ const BUILTIN_MODELS = {
167
+ "qwen3-0.6b": {
168
+ id: "qwen3-0.6b",
169
+ repo: "onnx-community/Qwen3-0.6B-ONNX",
170
+ description: "Qwen3 0.6B - Best balance of speed and quality, supports thinking",
171
+ size: "~400MB",
172
+ contextLength: 32768,
173
+ supportsThinking: true,
174
+ supportsJson: true,
175
+ family: "qwen"
176
+ },
177
+ "qwen2.5-0.5b": {
178
+ id: "qwen2.5-0.5b",
179
+ repo: "onnx-community/Qwen2.5-0.5B-Instruct",
180
+ description: "Qwen2.5 0.5B - Fast and capable",
181
+ size: "~350MB",
182
+ contextLength: 32768,
183
+ supportsThinking: false,
184
+ supportsJson: true,
185
+ family: "qwen"
186
+ },
187
+ "qwen2.5-coder-0.5b": {
188
+ id: "qwen2.5-coder-0.5b",
189
+ repo: "onnx-community/Qwen2.5-Coder-0.5B-Instruct",
190
+ description: "Qwen2.5 Coder 0.5B - Optimized for code",
191
+ size: "~400MB",
192
+ contextLength: 32768,
193
+ supportsThinking: false,
194
+ supportsJson: true,
195
+ family: "qwen"
196
+ },
197
+ "smollm2-360m": {
198
+ id: "smollm2-360m",
199
+ repo: "HuggingFaceTB/SmolLM2-360M-Instruct",
200
+ description: "SmolLM2 360M - Fast, good for simple tasks",
201
+ size: "~250MB",
202
+ contextLength: 8192,
203
+ supportsThinking: false,
204
+ supportsJson: false,
205
+ family: "smollm"
206
+ },
207
+ "smollm2-135m": {
208
+ id: "smollm2-135m",
209
+ repo: "HuggingFaceTB/SmolLM2-135M-Instruct",
210
+ description: "SmolLM2 135M - Fastest, basic generation",
211
+ size: "~100MB",
212
+ contextLength: 8192,
213
+ supportsThinking: false,
214
+ supportsJson: false,
215
+ family: "smollm"
216
+ },
217
+ "phi-3-mini": {
218
+ id: "phi-3-mini",
219
+ repo: "microsoft/Phi-3-mini-4k-instruct-onnx",
220
+ description: "Phi-3 Mini - High quality, larger model",
221
+ size: "~2.1GB",
222
+ contextLength: 4096,
223
+ supportsThinking: false,
224
+ supportsJson: true,
225
+ family: "phi"
226
+ },
227
+ "ministral-3b": {
228
+ id: "ministral-3b",
229
+ repo: "mistralai/Ministral-3-3B-Instruct-2512-ONNX",
230
+ description: "Ministral 3 3B - Vision + Reasoning, 256k context",
231
+ size: "~2.5GB",
232
+ contextLength: 262144,
233
+ supportsThinking: true,
234
+ supportsJson: true,
235
+ supportsVision: true,
236
+ visionEncoderSize: "0.4B",
237
+ family: "mistral"
238
+ }
239
+ };
240
+ /**
241
+ * Parse model identifier and resolve to source
242
+ *
243
+ * Supported formats:
244
+ * - "qwen3-0.6b" (built-in)
245
+ * - "hf:org/model" (HuggingFace shorthand)
246
+ * - "https://huggingface.co/org/model" (full URL)
247
+ * - "file:./path/to/model" (local path)
248
+ */
249
+ function resolveModel(modelId) {
250
+ if (BUILTIN_MODELS[modelId]) return {
251
+ type: "builtin",
252
+ path: BUILTIN_MODELS[modelId].repo
253
+ };
254
+ if (modelId.startsWith("hf:")) return {
255
+ type: "huggingface",
256
+ path: modelId.slice(3)
257
+ };
258
+ if (modelId.startsWith("https://huggingface.co/")) return {
259
+ type: "huggingface",
260
+ path: modelId.replace("https://huggingface.co/", "")
261
+ };
262
+ if (modelId.startsWith("file:")) return {
263
+ type: "local",
264
+ path: modelId.slice(5)
265
+ };
266
+ if (modelId.includes("/")) return {
267
+ type: "huggingface",
268
+ path: modelId
269
+ };
270
+ return {
271
+ type: "huggingface",
272
+ path: modelId
273
+ };
274
+ }
275
+ /**
276
+ * Get model config (built-in only)
277
+ */
278
+ function getModelConfig(modelId) {
279
+ return BUILTIN_MODELS[modelId] || null;
280
+ }
281
+ const FAMILY_CONTEXT_DEFAULTS = {
282
+ qwen: 32768,
283
+ mistral: 262144,
284
+ llama: 8192,
285
+ phi: 4096,
286
+ smollm: 8192,
287
+ other: 4096
288
+ };
289
+ /**
290
+ * Create model config for external model
291
+ */
292
+ function createExternalModelConfig(modelId, repo, contextLength) {
293
+ let family = "other";
294
+ const repoLower = repo.toLowerCase();
295
+ if (repoLower.includes("qwen")) family = "qwen";
296
+ else if (repoLower.includes("smollm")) family = "smollm";
297
+ else if (repoLower.includes("phi")) family = "phi";
298
+ else if (repoLower.includes("mistral") || repoLower.includes("ministral")) family = "mistral";
299
+ else if (repoLower.includes("llama")) family = "llama";
300
+ const supportsVision = repoLower.includes("vision") || repoLower.includes("vlm") || repoLower.includes("image-text") || repoLower.includes("ministral");
301
+ return {
302
+ id: modelId,
303
+ repo,
304
+ description: `External model: ${repo}`,
305
+ size: "Unknown",
306
+ contextLength: contextLength || FAMILY_CONTEXT_DEFAULTS[family] || 4096,
307
+ supportsThinking: family === "qwen" || family === "mistral",
308
+ supportsJson: family === "qwen" || family === "phi" || family === "mistral",
309
+ supportsVision,
310
+ family
311
+ };
312
+ }
313
+ /**
314
+ * Fetch context length from HuggingFace model config
315
+ */
316
+ async function fetchModelContextLength(repo) {
317
+ try {
318
+ const res = await fetch(`https://huggingface.co/${repo}/raw/main/config.json`);
319
+ if (!res.ok) return null;
320
+ const config = await res.json();
321
+ return config.max_position_embeddings || config.n_positions || config.max_seq_len || config.sliding_window || config.context_length || null;
322
+ } catch {
323
+ return null;
324
+ }
325
+ }
326
+ /**
327
+ * List all built-in models
328
+ */
329
+ function listBuiltinModels() {
330
+ return Object.values(BUILTIN_MODELS);
331
+ }
332
+
333
+ //#endregion
334
+ //#region src/core/gerbil.ts
335
+ /**
336
+ * Gerbil - Local GPU-accelerated LLM inference
337
+ */
338
+ const pipeline$1 = pipeline;
339
+ function suppressNoisyWarnings(fn) {
340
+ const originalWarn = console.warn;
341
+ console.warn = (...args) => {
342
+ const msg = args[0]?.toString?.() || "";
343
+ if (msg.includes("content-length") || msg.includes("Unable to determine")) return;
344
+ originalWarn.apply(console, args);
345
+ };
346
+ return fn().finally(() => {
347
+ console.warn = originalWarn;
348
+ });
349
+ }
350
+ const KOKORO_VOICES_DEFAULT = [
351
+ {
352
+ id: "af_bella",
353
+ name: "Bella",
354
+ gender: "female",
355
+ language: "en-us",
356
+ description: "American female, warm and friendly"
357
+ },
358
+ {
359
+ id: "af_sarah",
360
+ name: "Sarah",
361
+ gender: "female",
362
+ language: "en-us",
363
+ description: "American female, clear and professional"
364
+ },
365
+ {
366
+ id: "af_nicole",
367
+ name: "Nicole",
368
+ gender: "female",
369
+ language: "en-us",
370
+ description: "American female, soft and gentle"
371
+ },
372
+ {
373
+ id: "af_sky",
374
+ name: "Sky",
375
+ gender: "female",
376
+ language: "en-us",
377
+ description: "American female, young and energetic"
378
+ },
379
+ {
380
+ id: "am_adam",
381
+ name: "Adam",
382
+ gender: "male",
383
+ language: "en-us",
384
+ description: "American male, deep and confident"
385
+ },
386
+ {
387
+ id: "am_michael",
388
+ name: "Michael",
389
+ gender: "male",
390
+ language: "en-us",
391
+ description: "American male, warm and friendly"
392
+ },
393
+ {
394
+ id: "bf_emma",
395
+ name: "Emma",
396
+ gender: "female",
397
+ language: "en-gb",
398
+ description: "British female, elegant and clear"
399
+ },
400
+ {
401
+ id: "bf_isabella",
402
+ name: "Isabella",
403
+ gender: "female",
404
+ language: "en-gb",
405
+ description: "British female, sophisticated"
406
+ },
407
+ {
408
+ id: "bm_george",
409
+ name: "George",
410
+ gender: "male",
411
+ language: "en-gb",
412
+ description: "British male, distinguished"
413
+ },
414
+ {
415
+ id: "bm_lewis",
416
+ name: "Lewis",
417
+ gender: "male",
418
+ language: "en-gb",
419
+ description: "British male, friendly and warm"
420
+ }
421
+ ];
422
+ const isBrowser = typeof window !== "undefined";
423
+ env.allowLocalModels = !isBrowser;
424
+ env.useBrowserCache = isBrowser;
425
+ let webgpuInitialized = false;
426
+ let webgpuAvailable = false;
427
+ /**
428
+ * Initialize WebGPU for Node.js environments
429
+ * Called automatically before model loading
430
+ */
431
+ async function initNodeWebGPU() {
432
+ if (webgpuInitialized) return webgpuAvailable;
433
+ webgpuInitialized = true;
434
+ if (typeof window !== "undefined") {
435
+ webgpuAvailable = "gpu" in navigator;
436
+ return webgpuAvailable;
437
+ }
438
+ try {
439
+ const { create, globals } = await new Function("specifier", "return import(specifier)")("webgpu");
440
+ Object.assign(globalThis, globals);
441
+ if (!globalThis.navigator) globalThis.navigator = {};
442
+ globalThis.navigator.gpu = create([]);
443
+ webgpuAvailable = true;
444
+ } catch {
445
+ webgpuAvailable = false;
446
+ }
447
+ return webgpuAvailable;
448
+ }
449
+ var Gerbil = class {
450
+ generator = null;
451
+ tokenizer = null;
452
+ model = null;
453
+ embedder = null;
454
+ currentModel = null;
455
+ modelConfig = null;
456
+ config;
457
+ stats;
458
+ useDirect = false;
459
+ chromeBackend = null;
460
+ _deviceMode = "cpu";
461
+ processor = null;
462
+ visionModel = null;
463
+ isVisionModel = false;
464
+ constructor(config = {}) {
465
+ this.config = config;
466
+ this.stats = {
467
+ prompts: 0,
468
+ tokensIn: 0,
469
+ tokensOut: 0,
470
+ avgSpeed: 0,
471
+ totalTime: 0,
472
+ cacheHits: 0,
473
+ cacheMisses: 0
474
+ };
475
+ }
476
+ static listModels() {
477
+ return Object.values(BUILTIN_MODELS);
478
+ }
479
+ static getModel(modelId) {
480
+ return BUILTIN_MODELS[modelId];
481
+ }
482
+ /**
483
+ * Load a model
484
+ *
485
+ * @example
486
+ * ```ts
487
+ * // Built-in model
488
+ * await g.loadModel("qwen3-0.6b");
489
+ *
490
+ * // HuggingFace model
491
+ * await g.loadModel("hf:microsoft/Phi-3-mini");
492
+ *
493
+ * // Local model
494
+ * await g.loadModel("file:./models/my-model");
495
+ *
496
+ * // Vision model
497
+ * await g.loadModel("ministral-3b");
498
+ * ```
499
+ */
500
+ async loadModel(modelId = "qwen3-0.6b", options = {}) {
501
+ if (this.isLoaded()) await this.dispose();
502
+ await initNodeWebGPU();
503
+ const source = resolveModel(modelId);
504
+ const { onProgress, device = "auto", dtype: userDtype } = options;
505
+ let config = getModelConfig(modelId);
506
+ if (!config) {
507
+ const contextLength = await fetchModelContextLength(source.path).catch(() => null);
508
+ config = createExternalModelConfig(modelId, source.path, contextLength || void 0);
509
+ }
510
+ if (config.supportsVision) return this.loadVisionModel(modelId, source.path, config, options);
511
+ onProgress?.({ status: `Loading ${modelId}...` });
512
+ const isBrowser$1 = typeof window !== "undefined";
513
+ const fallbackDevice = isBrowser$1 ? "wasm" : "cpu";
514
+ let tfDevice = fallbackDevice;
515
+ if (device === "webgpu" || device === "gpu" || device === "auto") tfDevice = "webgpu";
516
+ const dtype = userDtype ?? (tfDevice === "webgpu" ? "q4f16" : "q4");
517
+ let isLoading = true;
518
+ let lastFile = "";
519
+ let lastPct = -1;
520
+ const progressCallback = (progress) => {
521
+ if (!isLoading) return;
522
+ if (progress.status === "progress" && progress.file) {
523
+ const pct = Math.round(progress.progress || 0);
524
+ if (progress.file !== lastFile || pct >= lastPct + 5) {
525
+ lastFile = progress.file;
526
+ lastPct = pct;
527
+ onProgress?.({
528
+ status: `Downloading ${progress.file}`,
529
+ progress: pct,
530
+ file: progress.file
531
+ });
532
+ }
533
+ }
534
+ };
535
+ try {
536
+ if (isBrowser$1 && tfDevice === "webgpu") {
537
+ onProgress?.({ status: "Loading tokenizer..." });
538
+ this.tokenizer = await suppressNoisyWarnings(() => AutoTokenizer.from_pretrained(source.path, { progress_callback: progressCallback }));
539
+ onProgress?.({ status: "Loading model..." });
540
+ this.model = await suppressNoisyWarnings(() => AutoModelForCausalLM.from_pretrained(source.path, {
541
+ dtype,
542
+ device: tfDevice,
543
+ progress_callback: progressCallback
544
+ }));
545
+ this.useDirect = true;
546
+ this._deviceMode = "webgpu";
547
+ this.isVisionModel = false;
548
+ isLoading = false;
549
+ this.currentModel = modelId;
550
+ this.modelConfig = config;
551
+ onProgress?.({ status: "Ready (WebGPU)!" });
552
+ } else if (!isBrowser$1 && tfDevice === "webgpu") {
553
+ onProgress?.({ status: "Starting Chrome WebGPU backend..." });
554
+ const { ChromeGPUBackend } = await import("./chrome-backend-DIKYoWj-.mjs");
555
+ this.chromeBackend = await ChromeGPUBackend.create({
556
+ modelId: source.path,
557
+ contextLength: config.contextLength,
558
+ onProgress
559
+ });
560
+ this.useDirect = false;
561
+ this._deviceMode = "webgpu";
562
+ this.isVisionModel = false;
563
+ isLoading = false;
564
+ this.currentModel = modelId;
565
+ this.modelConfig = config;
566
+ } else {
567
+ const pipelineOptions = {
568
+ dtype,
569
+ device: tfDevice,
570
+ progress_callback: progressCallback
571
+ };
572
+ this.generator = await suppressNoisyWarnings(() => pipeline$1("text-generation", source.path, pipelineOptions));
573
+ this.useDirect = false;
574
+ this._deviceMode = tfDevice;
575
+ this.isVisionModel = false;
576
+ isLoading = false;
577
+ this.currentModel = modelId;
578
+ this.modelConfig = config;
579
+ onProgress?.({ status: `Ready (${tfDevice.toUpperCase()})!` });
580
+ }
581
+ } catch (err) {
582
+ if (tfDevice !== fallbackDevice) {
583
+ onProgress?.({ status: `Using ${fallbackDevice.toUpperCase()}...` });
584
+ if (this.chromeBackend) {
585
+ await this.chromeBackend.dispose();
586
+ this.chromeBackend = null;
587
+ }
588
+ this.generator = await suppressNoisyWarnings(() => pipeline$1("text-generation", source.path, {
589
+ dtype: "q4",
590
+ device: fallbackDevice,
591
+ progress_callback: progressCallback
592
+ }));
593
+ this.useDirect = false;
594
+ this._deviceMode = fallbackDevice;
595
+ this.isVisionModel = false;
596
+ isLoading = false;
597
+ this.currentModel = modelId;
598
+ this.modelConfig = config;
599
+ onProgress?.({ status: `Ready (${fallbackDevice.toUpperCase()})!` });
600
+ } else throw err;
601
+ }
602
+ }
603
+ /**
604
+ * Load a vision model (VLM)
605
+ * Uses AutoProcessor + AutoModelForImageTextToText instead of tokenizer + causal LM
606
+ */
607
+ async loadVisionModel(modelId, repoPath, config, options = {}) {
608
+ const { onProgress, device = "auto" } = options;
609
+ onProgress?.({ status: `Loading ${modelId} (vision model)...` });
610
+ const isBrowser$1 = typeof window !== "undefined";
611
+ const fallbackDevice = isBrowser$1 ? "wasm" : "cpu";
612
+ let tfDevice = fallbackDevice;
613
+ if (device === "webgpu" || device === "gpu" || device === "auto") tfDevice = "webgpu";
614
+ if (!isBrowser$1 && tfDevice === "webgpu") {
615
+ onProgress?.({ status: "Starting Chrome WebGPU backend (vision)..." });
616
+ const { ChromeGPUBackend } = await import("./chrome-backend-DIKYoWj-.mjs");
617
+ this.chromeBackend = await ChromeGPUBackend.create({
618
+ modelId: repoPath,
619
+ contextLength: config.contextLength,
620
+ isVision: true,
621
+ onProgress
622
+ });
623
+ this.useDirect = false;
624
+ this._deviceMode = "webgpu";
625
+ this.isVisionModel = true;
626
+ this.currentModel = modelId;
627
+ this.modelConfig = config;
628
+ return;
629
+ }
630
+ let lastFile = "";
631
+ let lastPct = -1;
632
+ const progressCallback = (progress) => {
633
+ if (progress.status === "progress" && progress.file) {
634
+ const pct = Math.round(progress.progress || 0);
635
+ if (progress.file !== lastFile || pct >= lastPct + 5) {
636
+ lastFile = progress.file;
637
+ lastPct = pct;
638
+ onProgress?.({
639
+ status: `Downloading ${progress.file}`,
640
+ progress: pct,
641
+ file: progress.file
642
+ });
643
+ }
644
+ }
645
+ };
646
+ try {
647
+ onProgress?.({ status: "Loading processor..." });
648
+ this.processor = await suppressNoisyWarnings(() => AutoProcessor.from_pretrained(repoPath, { progress_callback: progressCallback }));
649
+ onProgress?.({ status: "Loading vision model..." });
650
+ this.visionModel = await suppressNoisyWarnings(() => AutoModelForImageTextToText.from_pretrained(repoPath, {
651
+ device: tfDevice,
652
+ progress_callback: progressCallback
653
+ }));
654
+ this.isVisionModel = true;
655
+ this.useDirect = true;
656
+ this._deviceMode = tfDevice === "webgpu" ? "webgpu" : tfDevice;
657
+ this.currentModel = modelId;
658
+ this.modelConfig = config;
659
+ onProgress?.({ status: `Ready (Vision, ${tfDevice.toUpperCase()})!` });
660
+ } catch (err) {
661
+ if (tfDevice !== fallbackDevice) {
662
+ onProgress?.({ status: `Vision model: Using ${fallbackDevice.toUpperCase()}...` });
663
+ this.processor = await suppressNoisyWarnings(() => AutoProcessor.from_pretrained(repoPath, { progress_callback: progressCallback }));
664
+ this.visionModel = await suppressNoisyWarnings(() => AutoModelForImageTextToText.from_pretrained(repoPath, {
665
+ device: fallbackDevice,
666
+ progress_callback: progressCallback
667
+ }));
668
+ this.isVisionModel = true;
669
+ this.useDirect = true;
670
+ this._deviceMode = fallbackDevice;
671
+ this.currentModel = modelId;
672
+ this.modelConfig = config;
673
+ onProgress?.({ status: `Ready (Vision, ${fallbackDevice.toUpperCase()})!` });
674
+ } else throw err;
675
+ }
676
+ }
677
+ /**
678
+ * Check if a model is loaded
679
+ */
680
+ isLoaded() {
681
+ return this.generator !== null || this.useDirect && this.model !== null || this.chromeBackend !== null || this.isVisionModel && this.visionModel !== null;
682
+ }
683
+ /**
684
+ * Check if current model supports vision
685
+ */
686
+ supportsVision() {
687
+ return this.isVisionModel && this.modelConfig?.supportsVision === true;
688
+ }
689
+ /**
690
+ * Get current model info
691
+ */
692
+ getModelInfo() {
693
+ return this.modelConfig;
694
+ }
695
+ /**
696
+ * Get current device mode (webgpu, cpu, or wasm)
697
+ */
698
+ getDeviceMode() {
699
+ return this._deviceMode;
700
+ }
701
+ /**
702
+ * Get dtype used for current model
703
+ */
704
+ getDtype() {
705
+ return this._deviceMode === "webgpu" ? "q4f16" : "q4";
706
+ }
707
+ /**
708
+ * Get response cache statistics
709
+ */
710
+ getResponseCacheStats() {
711
+ const cache = getGlobalCache();
712
+ const stats = cache.getStats();
713
+ return {
714
+ hits: stats.hits,
715
+ misses: stats.misses,
716
+ size: stats.size,
717
+ hitRate: cache.getHitRate()
718
+ };
719
+ }
720
+ /**
721
+ * Clear the response cache (for cached generate() results)
722
+ */
723
+ clearResponseCache() {
724
+ getGlobalCache().clear();
725
+ }
726
+ /**
727
+ * Get Chrome backend status (if using WebGPU via Chrome)
728
+ */
729
+ getChromeStatus() {
730
+ if (!this.chromeBackend) return null;
731
+ return this.chromeBackend.getStatus();
732
+ }
733
+ /**
734
+ * Get Chrome memory usage (if using WebGPU via Chrome)
735
+ * Returns JS heap memory in bytes
736
+ */
737
+ async getChromeMemory() {
738
+ if (!this.chromeBackend) return null;
739
+ return this.chromeBackend.getMemoryUsage();
740
+ }
741
+ /**
742
+ * Get memory usage in GB (if using WebGPU via Chrome)
743
+ */
744
+ async getMemoryUsage() {
745
+ if (!this.chromeBackend) return null;
746
+ return this.chromeBackend.getMemoryStats();
747
+ }
748
+ /**
749
+ * Clear KV cache to free memory
750
+ * This will reset the conversation context but free up memory
751
+ */
752
+ async clearCache() {
753
+ if (this.chromeBackend) await this.chromeBackend.reset();
754
+ }
755
+ /**
756
+ * Check memory usage and cleanup if needed
757
+ * @param thresholdGB Memory threshold in GB (default: 8)
758
+ * @returns true if cleanup was performed
759
+ */
760
+ async checkMemoryAndCleanup(thresholdGB = 8) {
761
+ if (!this.chromeBackend) return false;
762
+ return this.chromeBackend.checkMemoryAndCleanup(thresholdGB);
763
+ }
764
+ /**
765
+ * Generate text (automatically routes to vision generation if images provided)
766
+ *
767
+ * @example
768
+ * ```ts
769
+ * // Text generation
770
+ * const result = await g.generate("Hello!");
771
+ *
772
+ * // Vision generation (with vision model)
773
+ * const result = await g.generate("What's in this image?", {
774
+ * images: [{ source: "https://example.com/cat.jpg" }]
775
+ * });
776
+ * ```
777
+ */
778
+ async generate(prompt, options = {}) {
779
+ if (!this.isLoaded()) await this.loadModel(this.config.model || "qwen3-0.6b");
780
+ const { images } = options;
781
+ if (images?.length && this.isVisionModel && !this.chromeBackend) return this.generateWithVision(prompt, options);
782
+ if (images?.length && !this.isVisionModel) {}
783
+ const { maxTokens = 256, temperature = .7, topP = .9, topK = 50, thinking = false, system, cache = false, cacheTtl } = options;
784
+ if (cache && !options.onToken && !images?.length) {
785
+ const cacheKey = generateCacheKey(prompt, this.currentModel || "", {
786
+ maxTokens,
787
+ temperature,
788
+ topP,
789
+ topK,
790
+ system,
791
+ thinking
792
+ });
793
+ const cached = getGlobalCache().get(cacheKey);
794
+ if (cached) return cached;
795
+ }
796
+ const startTime = performance.now();
797
+ try {
798
+ let rawText = "";
799
+ if (this.chromeBackend) try {
800
+ rawText = await this.chromeBackend.generate(prompt, {
801
+ maxTokens,
802
+ temperature,
803
+ topP,
804
+ topK,
805
+ thinking,
806
+ system,
807
+ images: images?.map((img) => img.source),
808
+ onToken: options.onToken ? (t) => options.onToken?.(t.text) : void 0
809
+ });
810
+ } catch (chromeErr) {
811
+ if (chromeErr?.message === "CHROME_BACKEND_DEAD" || !this.chromeBackend?.isAlive()) {
812
+ await this.chromeBackend?.dispose().catch(() => {});
813
+ this.chromeBackend = null;
814
+ this._deviceMode = "cpu";
815
+ this.generator = await pipeline$1("text-generation", this.currentModel || "qwen3-0.6b", {
816
+ dtype: "q4",
817
+ device: "cpu"
818
+ });
819
+ return this.generate(prompt, options);
820
+ }
821
+ throw chromeErr;
822
+ }
823
+ else if (this.useDirect && this.model && this.tokenizer) {
824
+ const messages = this.buildMessages(prompt, {
825
+ ...options,
826
+ thinking
827
+ });
828
+ const inputs = this.tokenizer.apply_chat_template(messages, {
829
+ add_generation_prompt: true,
830
+ return_dict: true,
831
+ enable_thinking: thinking
832
+ });
833
+ const output = await this.model.generate({
834
+ ...inputs,
835
+ max_new_tokens: maxTokens,
836
+ temperature: temperature > 0 ? temperature : void 0,
837
+ top_p: topP,
838
+ top_k: topK,
839
+ do_sample: temperature > 0
840
+ });
841
+ const inputLength = inputs.input_ids.dims?.[1] || inputs.input_ids.data?.length || 0;
842
+ const outputTokens = output.slice(null, [inputLength, null]);
843
+ rawText = this.tokenizer.batch_decode(outputTokens, { skip_special_tokens: true })[0] || "";
844
+ if (rawText.toLowerCase().includes("assistant")) {
845
+ const match = rawText.match(/assistant[:\s]*([\s\S]*)/i);
846
+ if (match) rawText = match[1].trim();
847
+ }
848
+ } else if (this.generator) {
849
+ const formattedPrompt = this.formatPrompt(prompt, {
850
+ ...options,
851
+ thinking
852
+ });
853
+ const output = await this.generator(formattedPrompt, {
854
+ max_new_tokens: maxTokens,
855
+ temperature,
856
+ top_p: topP,
857
+ top_k: topK,
858
+ do_sample: temperature > 0,
859
+ return_full_text: false
860
+ });
861
+ if (Array.isArray(output) && output[0]) {
862
+ const result$1 = output[0];
863
+ if (Array.isArray(result$1.generated_text)) rawText = result$1.generated_text.at(-1)?.content || "";
864
+ else rawText = result$1.generated_text || "";
865
+ }
866
+ } else throw new Error("No model loaded");
867
+ const totalTime = performance.now() - startTime;
868
+ rawText = this.cleanOutput(rawText);
869
+ const { thinking: thinkingText, response } = this.parseThinking(rawText);
870
+ const finalThinking = thinking ? thinkingText : void 0;
871
+ const tokensGenerated = Math.ceil(response.length / 4);
872
+ this.stats.prompts += 1;
873
+ this.stats.tokensOut += tokensGenerated;
874
+ this.stats.totalTime += totalTime;
875
+ this.stats.avgSpeed = this.stats.tokensOut / this.stats.totalTime * 1e3;
876
+ const result = {
877
+ text: response,
878
+ thinking: finalThinking,
879
+ tokensGenerated,
880
+ tokensPerSecond: tokensGenerated / totalTime * 1e3,
881
+ totalTime,
882
+ finishReason: "stop",
883
+ provider: "local",
884
+ cached: false
885
+ };
886
+ if (cache && !options.onToken && !images?.length) {
887
+ const cacheKey = generateCacheKey(prompt, this.currentModel || "", {
888
+ maxTokens,
889
+ temperature,
890
+ topP,
891
+ topK,
892
+ system,
893
+ thinking
894
+ });
895
+ getGlobalCache().set(cacheKey, result, cacheTtl);
896
+ }
897
+ return result;
898
+ } catch (_error) {
899
+ return {
900
+ text: "",
901
+ tokensGenerated: 0,
902
+ tokensPerSecond: 0,
903
+ totalTime: performance.now() - startTime,
904
+ finishReason: "error",
905
+ provider: "local",
906
+ cached: false
907
+ };
908
+ }
909
+ }
910
+ /**
911
+ * Stream text generation (simulated token-by-token)
912
+ *
913
+ * Note: Yields the raw output including <think> tags if thinking mode is enabled.
914
+ * The final result has parsed thinking separated out.
915
+ */
916
+ async *stream(prompt, options = {}) {
917
+ if (!this.isLoaded()) await this.loadModel(this.config.model || "qwen3-0.6b");
918
+ const startTime = performance.now();
919
+ if (this.chromeBackend) {
920
+ let fullText = "";
921
+ const tokenQueue = [];
922
+ let resolveNext = null;
923
+ let done = false;
924
+ const generatePromise = this.chromeBackend.generate(prompt, {
925
+ ...options,
926
+ images: options.images?.map((img) => img.source),
927
+ onToken: (token) => {
928
+ fullText += token.text;
929
+ if (resolveNext) {
930
+ resolveNext(token.text);
931
+ resolveNext = null;
932
+ } else tokenQueue.push(token.text);
933
+ }
934
+ }).then(() => {
935
+ done = true;
936
+ if (resolveNext) resolveNext(null);
937
+ }).catch((err) => {
938
+ done = true;
939
+ if (resolveNext) resolveNext(null);
940
+ throw err;
941
+ });
942
+ while (!done || tokenQueue.length > 0) if (tokenQueue.length > 0) {
943
+ const token = tokenQueue.shift();
944
+ yield token;
945
+ options.onToken?.(token);
946
+ } else if (!done) {
947
+ const token = await new Promise((resolve) => {
948
+ resolveNext = resolve;
949
+ });
950
+ if (token) {
951
+ yield token;
952
+ options.onToken?.(token);
953
+ }
954
+ }
955
+ await generatePromise;
956
+ const { thinking: thinkingText, response } = this.parseThinking(fullText);
957
+ const tokensGenerated = Math.ceil(response.length / 4);
958
+ const totalTime = performance.now() - startTime;
959
+ return {
960
+ text: response,
961
+ thinking: options.thinking ? thinkingText : void 0,
962
+ tokensGenerated,
963
+ totalTime,
964
+ tokensPerSecond: tokensGenerated / totalTime * 1e3,
965
+ finishReason: "stop"
966
+ };
967
+ }
968
+ const result = await this.generateRaw(prompt, options);
969
+ const words = result.rawText.split(/(\s+)/);
970
+ for (const word of words) if (word) {
971
+ yield word;
972
+ options.onToken?.(word);
973
+ }
974
+ return result.result;
975
+ }
976
+ /**
977
+ * Internal: Generate with raw text access for streaming
978
+ */
979
+ async generateRaw(prompt, options = {}) {
980
+ const { maxTokens = 256, temperature = .7, topP = .9, topK = 50, thinking = false } = options;
981
+ const startTime = performance.now();
982
+ const formattedPrompt = this.formatPrompt(prompt, {
983
+ ...options,
984
+ thinking
985
+ });
986
+ try {
987
+ const output = await this.generator?.(formattedPrompt, {
988
+ max_new_tokens: maxTokens,
989
+ temperature,
990
+ top_p: topP,
991
+ top_k: topK,
992
+ do_sample: temperature > 0,
993
+ return_full_text: false
994
+ });
995
+ const totalTime = performance.now() - startTime;
996
+ let rawText = "";
997
+ if (Array.isArray(output) && output[0]) {
998
+ const result = output[0];
999
+ if (Array.isArray(result.generated_text)) rawText = result.generated_text.at(-1)?.content || "";
1000
+ else rawText = result.generated_text || "";
1001
+ }
1002
+ rawText = this.cleanOutput(rawText);
1003
+ const { thinking: thinkingText, response } = this.parseThinking(rawText);
1004
+ const finalThinking = thinking ? thinkingText : void 0;
1005
+ const tokensGenerated = Math.ceil(response.length / 4);
1006
+ this.stats.prompts += 1;
1007
+ this.stats.tokensOut += tokensGenerated;
1008
+ this.stats.totalTime += totalTime;
1009
+ this.stats.avgSpeed = this.stats.tokensOut / this.stats.totalTime * 1e3;
1010
+ return {
1011
+ rawText,
1012
+ result: {
1013
+ text: response,
1014
+ thinking: finalThinking,
1015
+ tokensGenerated,
1016
+ tokensPerSecond: tokensGenerated / totalTime * 1e3,
1017
+ totalTime,
1018
+ finishReason: "stop",
1019
+ provider: "local",
1020
+ cached: false
1021
+ }
1022
+ };
1023
+ } catch (_error) {
1024
+ return {
1025
+ rawText: "",
1026
+ result: {
1027
+ text: "",
1028
+ tokensGenerated: 0,
1029
+ tokensPerSecond: 0,
1030
+ totalTime: performance.now() - startTime,
1031
+ finishReason: "error",
1032
+ provider: "local",
1033
+ cached: false
1034
+ }
1035
+ };
1036
+ }
1037
+ }
1038
+ /**
1039
+ * Generate text from images using a vision model
1040
+ * Called automatically by generate() when images are provided
1041
+ */
1042
+ async generateWithVision(prompt, options) {
1043
+ if (!(this.processor && this.visionModel)) throw new Error("Vision model not loaded. Load a vision model first.");
1044
+ const { images = [], maxTokens = 2048, temperature = .7, topP = .9, topK = 20, system } = options;
1045
+ const startTime = performance.now();
1046
+ try {
1047
+ const content = [];
1048
+ for (let i = 0; i < images.length; i += 1) content.push({ type: "image" });
1049
+ content.push({
1050
+ type: "text",
1051
+ text: prompt
1052
+ });
1053
+ const messages = [...system ? [{
1054
+ role: "system",
1055
+ content: system
1056
+ }] : [], {
1057
+ role: "user",
1058
+ content
1059
+ }];
1060
+ const chatPrompt = this.processor.apply_chat_template(messages);
1061
+ const loadedImages = await Promise.all(images.map(async (img) => await RawImage.fromURL(img.source)));
1062
+ const inputs = await this.processor(loadedImages.length === 1 ? loadedImages[0] : loadedImages, chatPrompt, { add_special_tokens: false });
1063
+ let fullText = "";
1064
+ const streamer = options.onToken ? new TextStreamer(this.processor.tokenizer, {
1065
+ skip_prompt: true,
1066
+ skip_special_tokens: true,
1067
+ callback_function: (text$1) => {
1068
+ fullText += text$1;
1069
+ options.onToken?.(text$1);
1070
+ }
1071
+ }) : void 0;
1072
+ const outputs = await this.visionModel.generate({
1073
+ ...inputs,
1074
+ max_new_tokens: maxTokens,
1075
+ temperature: temperature > 0 ? temperature : void 0,
1076
+ top_p: topP,
1077
+ top_k: topK,
1078
+ do_sample: temperature > 0,
1079
+ ...streamer ? { streamer } : {}
1080
+ });
1081
+ const inputLength = inputs.input_ids.dims?.at(-1) || 0;
1082
+ const text = this.processor.batch_decode(outputs.slice(null, [inputLength, null]), { skip_special_tokens: true })[0] || fullText || "";
1083
+ const totalTime = performance.now() - startTime;
1084
+ const tokensGenerated = Math.ceil(text.length / 4);
1085
+ this.stats.prompts += 1;
1086
+ this.stats.tokensOut += tokensGenerated;
1087
+ this.stats.totalTime += totalTime;
1088
+ this.stats.avgSpeed = this.stats.tokensOut / this.stats.totalTime * 1e3;
1089
+ return {
1090
+ text: this.cleanOutput(text),
1091
+ tokensGenerated,
1092
+ tokensPerSecond: tokensGenerated / totalTime * 1e3,
1093
+ totalTime,
1094
+ finishReason: "stop",
1095
+ provider: "local",
1096
+ cached: false
1097
+ };
1098
+ } catch (_error) {
1099
+ return {
1100
+ text: "",
1101
+ tokensGenerated: 0,
1102
+ tokensPerSecond: 0,
1103
+ totalTime: performance.now() - startTime,
1104
+ finishReason: "error",
1105
+ provider: "local",
1106
+ cached: false
1107
+ };
1108
+ }
1109
+ }
1110
+ /**
1111
+ * Generate structured JSON output
1112
+ */
1113
+ async json(prompt, options) {
1114
+ const { schema, retries = 3, temperature = .3 } = options;
1115
+ const systemPrompt = `You are a JSON generator. You MUST respond with valid JSON only.
1116
+ No explanations, no markdown, no code blocks. Just pure JSON.
1117
+ The JSON must conform to this schema: ${JSON.stringify(zodToJsonSchema(schema))}`;
1118
+ for (let attempt = 0; attempt < retries; attempt += 1) {
1119
+ const result = await this.generate(prompt, {
1120
+ system: options.system || systemPrompt,
1121
+ temperature,
1122
+ maxTokens: 1e3
1123
+ });
1124
+ try {
1125
+ const jsonStr = extractJson(result.text);
1126
+ const parsed = JSON.parse(jsonStr);
1127
+ return schema.parse(parsed);
1128
+ } catch (error) {
1129
+ if (attempt === retries - 1) throw new Error(`Failed to generate valid JSON after ${retries} attempts: ${error}`);
1130
+ }
1131
+ }
1132
+ throw new Error("Failed to generate valid JSON");
1133
+ }
1134
+ /**
1135
+ * Generate embeddings
1136
+ */
1137
+ async embed(text, options = {}) {
1138
+ if (!this.embedder) this.embedder = await pipeline$1("feature-extraction", options.model || "Xenova/all-MiniLM-L6-v2");
1139
+ const startTime = performance.now();
1140
+ const output = await this.embedder(text, {
1141
+ pooling: "mean",
1142
+ normalize: options.normalize !== false
1143
+ });
1144
+ return {
1145
+ vector: Array.from(output.data),
1146
+ text,
1147
+ totalTime: performance.now() - startTime
1148
+ };
1149
+ }
1150
+ /**
1151
+ * Generate embeddings for multiple texts
1152
+ */
1153
+ async embedBatch(texts, options = {}) {
1154
+ const results = [];
1155
+ for (const text of texts) results.push(await this.embed(text, options));
1156
+ return results;
1157
+ }
1158
+ /**
1159
+ * Get session stats
1160
+ */
1161
+ getStats() {
1162
+ return { ...this.stats };
1163
+ }
1164
+ /**
1165
+ * Get system info
1166
+ */
1167
+ getInfo() {
1168
+ return {
1169
+ version: "1.0.0",
1170
+ model: this.modelConfig,
1171
+ device: {
1172
+ backend: "transformers.js",
1173
+ gpu: null,
1174
+ vram: null,
1175
+ status: this.isLoaded() ? "ready" : "loading"
1176
+ },
1177
+ context: {
1178
+ max: this.modelConfig?.contextLength || 0,
1179
+ used: 0,
1180
+ available: this.modelConfig?.contextLength || 0
1181
+ },
1182
+ cache: {
1183
+ location: "~/.gerbil/models",
1184
+ size: "0 MB",
1185
+ modelCount: 0
1186
+ }
1187
+ };
1188
+ }
1189
+ /**
1190
+ * Reset stats
1191
+ */
1192
+ resetStats() {
1193
+ this.stats = {
1194
+ prompts: 0,
1195
+ tokensIn: 0,
1196
+ tokensOut: 0,
1197
+ avgSpeed: 0,
1198
+ totalTime: 0,
1199
+ cacheHits: 0,
1200
+ cacheMisses: 0
1201
+ };
1202
+ }
1203
+ tts = null;
1204
+ ttsModelId = "kokoro-82m";
1205
+ /**
1206
+ * Load TTS model for text-to-speech synthesis
1207
+ *
1208
+ * @example
1209
+ * ```ts
1210
+ * // Load default (Kokoro)
1211
+ * await g.loadTTS({ onProgress: (p) => console.log(p.status) });
1212
+ *
1213
+ * // Load Supertonic (faster, 44kHz output)
1214
+ * await g.loadTTS({ model: "supertonic-66m" });
1215
+ *
1216
+ * const result = await g.speak("Hello world");
1217
+ * // result.audio = Float32Array, result.sampleRate = 24000 or 44100
1218
+ * ```
1219
+ */
1220
+ async loadTTS(options = {}) {
1221
+ const modelId = options.model || "kokoro-82m";
1222
+ if (this.tts && this.ttsModelId !== modelId) {
1223
+ await this.tts.dispose();
1224
+ this.tts = null;
1225
+ }
1226
+ if (this.tts?.isLoaded()) return;
1227
+ this.ttsModelId = modelId;
1228
+ const { createTTS } = await import("./tts-B1pZMlDv.mjs");
1229
+ if (!this.tts) this.tts = createTTS(modelId);
1230
+ await this.tts.load(options);
1231
+ }
1232
+ /**
1233
+ * Ensure TTS model is loaded (lazy loading)
1234
+ */
1235
+ async ensureTTSLoaded(options) {
1236
+ if (!this.tts?.isLoaded()) await this.loadTTS(options);
1237
+ }
1238
+ /**
1239
+ * Generate speech from text
1240
+ *
1241
+ * @example
1242
+ * ```ts
1243
+ * const result = await g.speak("Hello world", { voice: "af_bella" });
1244
+ * // result.audio = Float32Array PCM
1245
+ * // result.sampleRate = 24000
1246
+ * // result.duration = seconds
1247
+ * ```
1248
+ */
1249
+ async speak(text, options = {}) {
1250
+ await this.ensureTTSLoaded({ onProgress: options.onProgress });
1251
+ return this.tts.speak(text, options);
1252
+ }
1253
+ /**
1254
+ * Stream speech generation (yields audio chunks as they're generated)
1255
+ *
1256
+ * @example
1257
+ * ```ts
1258
+ * for await (const chunk of g.speakStream("Long text...")) {
1259
+ * // chunk.samples = Float32Array
1260
+ * // chunk.isFinal = boolean
1261
+ * playChunk(chunk);
1262
+ * }
1263
+ * ```
1264
+ */
1265
+ async *speakStream(text, options = {}) {
1266
+ await this.ensureTTSLoaded({ onProgress: options.onProgress });
1267
+ return yield* this.tts.speakStream(text, options);
1268
+ }
1269
+ /**
1270
+ * Get list of available TTS voices
1271
+ */
1272
+ listVoices() {
1273
+ if (!this.tts) return KOKORO_VOICES_DEFAULT;
1274
+ return this.tts.listVoices();
1275
+ }
1276
+ /**
1277
+ * Check if TTS model is loaded
1278
+ */
1279
+ isTTSLoaded() {
1280
+ return this.tts?.isLoaded() ?? false;
1281
+ }
1282
+ /**
1283
+ * Get current TTS model info
1284
+ */
1285
+ getTTSModelInfo() {
1286
+ if (!this.tts) return null;
1287
+ return {
1288
+ id: this.ttsModelId,
1289
+ loaded: this.tts.isLoaded(),
1290
+ device: this.tts.isLoaded() ? this.tts.getDeviceMode() : void 0
1291
+ };
1292
+ }
1293
+ /**
1294
+ * List available TTS models
1295
+ */
1296
+ async listTTSModels() {
1297
+ const { TTS_MODELS } = await import("./tts-B1pZMlDv.mjs");
1298
+ return Object.values(TTS_MODELS).map((m) => ({
1299
+ id: m.id,
1300
+ description: m.description,
1301
+ sampleRate: m.sampleRate,
1302
+ voiceCount: m.voices.length
1303
+ }));
1304
+ }
1305
+ stt = null;
1306
+ /**
1307
+ * Load STT model for speech-to-text transcription
1308
+ *
1309
+ * @example
1310
+ * ```ts
1311
+ * await g.loadSTT({
1312
+ * onProgress: (p) => console.log(p.status)
1313
+ * });
1314
+ *
1315
+ * const result = await g.transcribe(audioData);
1316
+ * console.log(result.text);
1317
+ * ```
1318
+ */
1319
+ async loadSTT(modelId, options = {}) {
1320
+ if (this.stt?.isLoaded()) return;
1321
+ const { WhisperSTT } = await import("./stt-1WIefHwc.mjs");
1322
+ if (!this.stt) this.stt = new WhisperSTT(modelId);
1323
+ await this.stt.load(options);
1324
+ }
1325
+ /**
1326
+ * Ensure STT model is loaded (lazy loading)
1327
+ */
1328
+ async ensureSTTLoaded(modelId, options) {
1329
+ if (!this.stt?.isLoaded()) await this.loadSTT(modelId, options);
1330
+ }
1331
+ /**
1332
+ * Transcribe audio to text
1333
+ *
1334
+ * @param audio - Audio data as Float32Array (16kHz mono) or Uint8Array (WAV file)
1335
+ * @param options - Transcription options
1336
+ *
1337
+ * @example
1338
+ * ```ts
1339
+ * // From Float32Array (16kHz mono)
1340
+ * const result = await g.transcribe(audioData);
1341
+ * console.log(result.text);
1342
+ *
1343
+ * // With timestamps
1344
+ * const result = await g.transcribe(audioData, { timestamps: true });
1345
+ * for (const seg of result.segments) {
1346
+ * console.log(`[${seg.start}s] ${seg.text}`);
1347
+ * }
1348
+ *
1349
+ * // From WAV file
1350
+ * const wavData = fs.readFileSync("audio.wav");
1351
+ * const result = await g.transcribe(new Uint8Array(wavData));
1352
+ * ```
1353
+ */
1354
+ async transcribe(audio, options = {}) {
1355
+ await this.ensureSTTLoaded(void 0, { onProgress: options.onProgress });
1356
+ return this.stt.transcribe(audio, options);
1357
+ }
1358
+ /**
1359
+ * Create a streaming transcription session
1360
+ *
1361
+ * Transcribes audio in real-time by processing chunks at regular intervals.
1362
+ * Perfect for live captioning, call transcription, or real-time subtitles.
1363
+ *
1364
+ * @param options - Streaming options
1365
+ * @returns Streaming session controller
1366
+ *
1367
+ * @example
1368
+ * ```ts
1369
+ * const session = await g.createStreamingTranscription({
1370
+ * chunkDuration: 3000, // Transcribe every 3 seconds
1371
+ * onChunk: (text, idx) => console.log(`Chunk ${idx}: ${text}`),
1372
+ * onTranscript: (fullText) => console.log("Full:", fullText),
1373
+ * });
1374
+ *
1375
+ * // Feed audio data as it comes in
1376
+ * session.feedAudio(audioChunk);
1377
+ *
1378
+ * // Start automatic interval-based transcription
1379
+ * session.start();
1380
+ *
1381
+ * // Later, stop and get final transcript
1382
+ * const finalText = await session.stop();
1383
+ * ```
1384
+ */
1385
+ async createStreamingTranscription(options = {}) {
1386
+ await this.ensureSTTLoaded();
1387
+ return this.stt.createStreamingSession(options);
1388
+ }
1389
+ /**
1390
+ * Get list of available STT models
1391
+ */
1392
+ async listSTTModels() {
1393
+ const { WhisperSTT } = await import("./stt-1WIefHwc.mjs");
1394
+ return WhisperSTT.listModels();
1395
+ }
1396
+ /**
1397
+ * Check if STT model is loaded
1398
+ */
1399
+ isSTTLoaded() {
1400
+ return this.stt?.isLoaded() ?? false;
1401
+ }
1402
+ /**
1403
+ * Get current STT model info
1404
+ */
1405
+ getSTTModelInfo() {
1406
+ if (!this.stt) return null;
1407
+ return {
1408
+ id: this.stt.getModelInfo().id,
1409
+ loaded: this.stt.isLoaded(),
1410
+ device: this.stt.isLoaded() ? this.stt.getDeviceMode() : void 0
1411
+ };
1412
+ }
1413
+ /**
1414
+ * Record audio from microphone and transcribe
1415
+ *
1416
+ * @example
1417
+ * ```ts
1418
+ * // Record for 5 seconds and transcribe
1419
+ * const result = await g.listen(5000);
1420
+ * console.log(result.text);
1421
+ *
1422
+ * // Use with voice chat
1423
+ * const userInput = await g.listen(10000);
1424
+ * const response = await g.generate(userInput.text);
1425
+ * await g.speak(response.text);
1426
+ * ```
1427
+ */
1428
+ async listen(durationMs = 5e3, options = {}) {
1429
+ const { Microphone, isSoxAvailable } = await import("./microphone-DaMZFRuR.mjs");
1430
+ if (!isSoxAvailable()) throw new Error("Microphone recording requires SoX. Install with:\n macOS: brew install sox\n Ubuntu: sudo apt install sox\n Windows: https://sox.sourceforge.net/");
1431
+ options.onProgress?.("Starting microphone...");
1432
+ const mic = new Microphone({ sampleRate: 16e3 });
1433
+ await mic.start();
1434
+ options.onProgress?.(`Recording for ${(durationMs / 1e3).toFixed(1)}s...`);
1435
+ await new Promise((r) => setTimeout(r, durationMs));
1436
+ options.onProgress?.("Processing audio...");
1437
+ const { audio } = await mic.stop();
1438
+ options.onProgress?.("Transcribing...");
1439
+ return this.transcribe(audio, { onProgress: (p) => options.onProgress?.(p.status || "Transcribing...") });
1440
+ }
1441
+ /**
1442
+ * Check if microphone recording is available
1443
+ */
1444
+ async isMicrophoneAvailable() {
1445
+ try {
1446
+ const { isSoxAvailable } = await import("./microphone-DaMZFRuR.mjs");
1447
+ return isSoxAvailable();
1448
+ } catch {
1449
+ return false;
1450
+ }
1451
+ }
1452
+ /**
1453
+ * Dispose of resources
1454
+ * @param disconnect If true, also disconnect from shared browser (for clean script exit)
1455
+ */
1456
+ async dispose(disconnect = false) {
1457
+ if (this.chromeBackend) {
1458
+ try {
1459
+ await this.chromeBackend.dispose(disconnect);
1460
+ } catch {}
1461
+ this.chromeBackend = null;
1462
+ }
1463
+ if (this.generator) {
1464
+ if (typeof this.generator.dispose === "function") try {
1465
+ await this.generator.dispose();
1466
+ } catch {}
1467
+ this.generator = null;
1468
+ }
1469
+ if (this.embedder) {
1470
+ if (typeof this.embedder.dispose === "function") try {
1471
+ await this.embedder.dispose();
1472
+ } catch {}
1473
+ this.embedder = null;
1474
+ }
1475
+ if (this.visionModel) {
1476
+ if (typeof this.visionModel.dispose === "function") try {
1477
+ await this.visionModel.dispose();
1478
+ } catch {}
1479
+ this.visionModel = null;
1480
+ }
1481
+ if (this.processor) this.processor = null;
1482
+ if (this.tts) {
1483
+ try {
1484
+ await this.tts.dispose();
1485
+ } catch {}
1486
+ this.tts = null;
1487
+ }
1488
+ if (this.stt) {
1489
+ try {
1490
+ this.stt.dispose();
1491
+ } catch {}
1492
+ this.stt = null;
1493
+ }
1494
+ this.currentModel = null;
1495
+ this.modelConfig = null;
1496
+ this.isVisionModel = false;
1497
+ }
1498
+ /**
1499
+ * Shutdown the shared Chrome backend completely.
1500
+ * Call this when your script/process is done to ensure proper cleanup.
1501
+ * This closes the shared browser used for WebGPU acceleration.
1502
+ */
1503
+ static async shutdown() {
1504
+ const { ChromeGPUBackend } = await import("./chrome-backend-DIKYoWj-.mjs");
1505
+ await ChromeGPUBackend.closeSharedBrowser();
1506
+ }
1507
+ /**
1508
+ * Get global WebGPU process info (all active backends)
1509
+ * Useful for monitoring and debugging memory leaks
1510
+ */
1511
+ static async getWebGPUProcesses() {
1512
+ if (typeof window !== "undefined") return null;
1513
+ try {
1514
+ const { ChromeGPUBackend } = await import("./chrome-backend-DIKYoWj-.mjs");
1515
+ return {
1516
+ browser: ChromeGPUBackend.getGlobalBrowserStatus(),
1517
+ backends: await ChromeGPUBackend.getAllBackendsInfo()
1518
+ };
1519
+ } catch {
1520
+ return null;
1521
+ }
1522
+ }
1523
+ /**
1524
+ * Kill all WebGPU processes (for zombie cleanup)
1525
+ * Use this if you suspect memory leaks from undisposed Gerbil instances
1526
+ */
1527
+ static async killAllWebGPU() {
1528
+ if (typeof window !== "undefined") return null;
1529
+ try {
1530
+ const { ChromeGPUBackend } = await import("./chrome-backend-DIKYoWj-.mjs");
1531
+ return await ChromeGPUBackend.killAllBackends();
1532
+ } catch {
1533
+ return null;
1534
+ }
1535
+ }
1536
+ /**
1537
+ * Kill a specific WebGPU backend by index
1538
+ * @param index Index of the backend to kill (0-based)
1539
+ */
1540
+ static async killWebGPUBackend(index) {
1541
+ if (typeof window !== "undefined") return false;
1542
+ try {
1543
+ const { ChromeGPUBackend } = await import("./chrome-backend-DIKYoWj-.mjs");
1544
+ return await ChromeGPUBackend.killBackendByIndex(index);
1545
+ } catch {
1546
+ return false;
1547
+ }
1548
+ }
1549
+ /**
1550
+ * Get all Chrome pages across ALL Gerbil processes
1551
+ * This provides cross-process visibility into WebGPU backends
1552
+ */
1553
+ static async getAllChromePagesInfo() {
1554
+ if (typeof window !== "undefined") return null;
1555
+ try {
1556
+ const { ChromeGPUBackend } = await import("./chrome-backend-DIKYoWj-.mjs");
1557
+ return await ChromeGPUBackend.getAllChromePages();
1558
+ } catch {
1559
+ return null;
1560
+ }
1561
+ }
1562
+ /**
1563
+ * Kill a Chrome page by index (works across processes)
1564
+ * @param index Index of the page to kill (0-based)
1565
+ */
1566
+ static async killChromePage(index) {
1567
+ if (typeof window !== "undefined") return false;
1568
+ try {
1569
+ const { ChromeGPUBackend } = await import("./chrome-backend-DIKYoWj-.mjs");
1570
+ return await ChromeGPUBackend.killPageByIndex(index);
1571
+ } catch {
1572
+ return false;
1573
+ }
1574
+ }
1575
+ /**
1576
+ * Get total Chrome page count (all processes)
1577
+ */
1578
+ static async getTotalChromePageCount() {
1579
+ if (typeof window !== "undefined") return 0;
1580
+ try {
1581
+ const { ChromeGPUBackend } = await import("./chrome-backend-DIKYoWj-.mjs");
1582
+ return await ChromeGPUBackend.getTotalPageCount();
1583
+ } catch {
1584
+ return 0;
1585
+ }
1586
+ }
1587
+ formatPrompt(prompt, options) {
1588
+ const system = options.system || "You are a helpful assistant.";
1589
+ const isQwen = this.currentModel?.includes("qwen");
1590
+ if (options.thinking && this.modelConfig?.supportsThinking) return `<|im_start|>system\n${`${system}\n\nThink step-by-step before answering. Wrap your reasoning in <think></think> tags, then provide your answer.`}<|im_end|>\n<|im_start|>user\n${prompt}<|im_end|>\n<|im_start|>assistant\n`;
1591
+ if (isQwen) return `<|im_start|>system\n${system}<|im_end|>\n<|im_start|>user\n${prompt} /no_think<|im_end|>\n<|im_start|>assistant\n`;
1592
+ return `<|im_start|>system\n${system}<|im_end|>\n<|im_start|>user\n${prompt}<|im_end|>\n<|im_start|>assistant\n`;
1593
+ }
1594
+ buildMessages(prompt, options) {
1595
+ const system = options.system || "You are a helpful assistant.";
1596
+ const messages = [];
1597
+ messages.push({
1598
+ role: "system",
1599
+ content: system
1600
+ });
1601
+ messages.push({
1602
+ role: "user",
1603
+ content: prompt
1604
+ });
1605
+ return messages;
1606
+ }
1607
+ parseThinking(text) {
1608
+ const match = text.match(/<think>([\s\S]*?)<\/think>/);
1609
+ if (match) return {
1610
+ thinking: match[1].trim(),
1611
+ response: text.replace(/<think>[\s\S]*?<\/think>/, "").trim()
1612
+ };
1613
+ const unclosedMatch = text.match(/<think>([\s\S]*)$/);
1614
+ if (unclosedMatch) {
1615
+ const thinking = unclosedMatch[1].trim();
1616
+ const response = text.replace(/<think>[\s\S]*$/, "").trim();
1617
+ return {
1618
+ thinking: thinking || void 0,
1619
+ response
1620
+ };
1621
+ }
1622
+ return { response: text.replace(/<\/?think>/g, "").trim() };
1623
+ }
1624
+ cleanOutput(text) {
1625
+ return text.replace(/<\|im_end\|>/g, "").replace(/<\|im_start\|>/g, "").replace(/<\|endoftext\|>/g, "").replace(/<\/s>/g, "").replace(/^\/no_think\s*/i, "").replace(/^assistant\s*/i, "").replace(/^\s*\/no_think\s*/gim, "").replace(/^\s*assistant\s*/gim, "").replace(/^(system|user|assistant):\s*/gim, "").trim();
1626
+ }
1627
+ };
1628
+
1629
+ //#endregion
1630
+ export { ResponseCache as a, getGlobalCache as c, resolveModel as i, BUILTIN_MODELS as n, clearGlobalCache as o, listBuiltinModels as r, configureGlobalCache as s, Gerbil as t };
1631
+ //# sourceMappingURL=gerbil-DoDGHe6Z.mjs.map