@tryhamster/gerbil 1.0.0-rc.2 → 1.0.0-rc.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/dist/browser/{index.d.mts → index.d.ts} +354 -3
  2. package/dist/browser/index.d.ts.map +1 -0
  3. package/dist/browser/{index.mjs → index.js} +116 -6
  4. package/dist/browser/index.js.map +1 -0
  5. package/dist/{chrome-backend-Y9F7W5VQ.mjs → chrome-backend-CORwaIyC.mjs} +1 -1
  6. package/dist/{chrome-backend-Y9F7W5VQ.mjs.map → chrome-backend-CORwaIyC.mjs.map} +1 -1
  7. package/dist/{chrome-backend-JEPeM2YE.mjs → chrome-backend-DIKYoWj-.mjs} +1 -1
  8. package/dist/cli.mjs +14 -15
  9. package/dist/cli.mjs.map +1 -1
  10. package/dist/frameworks/express.d.mts +1 -1
  11. package/dist/frameworks/express.mjs +3 -4
  12. package/dist/frameworks/express.mjs.map +1 -1
  13. package/dist/frameworks/fastify.d.mts +1 -1
  14. package/dist/frameworks/fastify.mjs +2 -3
  15. package/dist/frameworks/fastify.mjs.map +1 -1
  16. package/dist/frameworks/hono.d.mts +1 -1
  17. package/dist/frameworks/hono.mjs +2 -3
  18. package/dist/frameworks/hono.mjs.map +1 -1
  19. package/dist/frameworks/next.d.mts +2 -2
  20. package/dist/frameworks/next.mjs +2 -3
  21. package/dist/frameworks/next.mjs.map +1 -1
  22. package/dist/frameworks/react.d.mts +1 -1
  23. package/dist/frameworks/trpc.d.mts +1 -1
  24. package/dist/frameworks/trpc.mjs +2 -3
  25. package/dist/frameworks/trpc.mjs.map +1 -1
  26. package/dist/{gerbil-POAz8peb.d.mts → gerbil-CnncBh38.d.mts} +2 -2
  27. package/dist/{gerbil-POAz8peb.d.mts.map → gerbil-CnncBh38.d.mts.map} +1 -1
  28. package/dist/{gerbil-yoSpRHgv.mjs → gerbil-Dq039a6V.mjs} +187 -19
  29. package/dist/gerbil-Dq039a6V.mjs.map +1 -0
  30. package/dist/gerbil-DyTEWXLy.mjs +4 -0
  31. package/dist/index.d.mts +19 -3
  32. package/dist/index.d.mts.map +1 -1
  33. package/dist/index.mjs +6 -7
  34. package/dist/index.mjs.map +1 -1
  35. package/dist/integrations/ai-sdk.d.mts +1 -1
  36. package/dist/integrations/ai-sdk.mjs +4 -5
  37. package/dist/integrations/ai-sdk.mjs.map +1 -1
  38. package/dist/integrations/langchain.d.mts +1 -1
  39. package/dist/integrations/langchain.mjs +2 -3
  40. package/dist/integrations/langchain.mjs.map +1 -1
  41. package/dist/integrations/llamaindex.d.mts +1 -1
  42. package/dist/integrations/llamaindex.mjs +2 -3
  43. package/dist/integrations/llamaindex.mjs.map +1 -1
  44. package/dist/integrations/mcp-client.mjs +2 -2
  45. package/dist/integrations/mcp.d.mts +2 -2
  46. package/dist/integrations/mcp.mjs +5 -6
  47. package/dist/{mcp-Bitg4sjX.mjs → mcp-DY57Whwj.mjs} +3 -3
  48. package/dist/{mcp-Bitg4sjX.mjs.map → mcp-DY57Whwj.mjs.map} +1 -1
  49. package/dist/{one-liner-B1rmFto6.mjs → one-liner-CgRVfe5K.mjs} +2 -2
  50. package/dist/{one-liner-B1rmFto6.mjs.map → one-liner-CgRVfe5K.mjs.map} +1 -1
  51. package/dist/repl-BEusmMZs.mjs +9 -0
  52. package/dist/skills/index.d.mts +2 -2
  53. package/dist/skills/index.d.mts.map +1 -1
  54. package/dist/skills/index.mjs +4 -5
  55. package/dist/{skills-5DxAV-rn.mjs → skills-BGS20rGK.mjs} +2 -2
  56. package/dist/{skills-5DxAV-rn.mjs.map → skills-BGS20rGK.mjs.map} +1 -1
  57. package/dist/stt-BT4Rt49f.mjs +3 -0
  58. package/dist/stt-BtklAjR2.js +439 -0
  59. package/dist/stt-BtklAjR2.js.map +1 -0
  60. package/dist/{stt-Bv_dum-R.mjs → stt-CkfJswka.mjs} +8 -2
  61. package/dist/stt-CkfJswka.mjs.map +1 -0
  62. package/dist/{tools-IYPrqoek.mjs → tools-Bi1P7Xoy.mjs} +2 -2
  63. package/dist/{tools-IYPrqoek.mjs.map → tools-Bi1P7Xoy.mjs.map} +1 -1
  64. package/dist/{tts-DG6denWG.mjs → tts-BFL984rV.mjs} +11 -3
  65. package/dist/tts-BFL984rV.mjs.map +1 -0
  66. package/dist/{tts-5yWeP_I0.mjs → tts-Cuu1TOkM.mjs} +1 -1
  67. package/dist/tts-DKIOWafo.js +731 -0
  68. package/dist/tts-DKIOWafo.js.map +1 -0
  69. package/dist/{types-s6Py2_DL.d.mts → types-DJhOZ6Ct.d.mts} +1 -1
  70. package/dist/{types-s6Py2_DL.d.mts.map → types-DJhOZ6Ct.d.mts.map} +1 -1
  71. package/dist/{utils-CkB4Roi6.mjs → utils-CZBZ8dgR.mjs} +1 -1
  72. package/dist/{utils-CkB4Roi6.mjs.map → utils-CZBZ8dgR.mjs.map} +1 -1
  73. package/package.json +1 -1
  74. package/dist/browser/index.d.mts.map +0 -1
  75. package/dist/browser/index.mjs.map +0 -1
  76. package/dist/gerbil-DeQlX_Mt.mjs +0 -5
  77. package/dist/gerbil-yoSpRHgv.mjs.map +0 -1
  78. package/dist/models-BAtL8qsA.mjs +0 -171
  79. package/dist/models-BAtL8qsA.mjs.map +0 -1
  80. package/dist/models-CE0fBq0U.d.mts +0 -22
  81. package/dist/models-CE0fBq0U.d.mts.map +0 -1
  82. package/dist/repl-D20JO260.mjs +0 -10
  83. package/dist/stt-Bv_dum-R.mjs.map +0 -1
  84. package/dist/stt-KzSoNvwI.mjs +0 -3
  85. package/dist/tts-DG6denWG.mjs.map +0 -1
  86. /package/dist/{auto-update-DsWBBnEk.mjs → auto-update-S9s5-g0C.mjs} +0 -0
  87. /package/dist/{chunk-Ct1HF2bE.mjs → chunk-CkXuGtQK.mjs} +0 -0
  88. /package/dist/{microphone-D-6y9aiE.mjs → microphone-DaMZFRuR.mjs} +0 -0
@@ -1,6 +1,357 @@
1
- import { A as TranscribeSegment, C as SpeakResult, D as TTSModelConfig, E as SystemInfo, O as TranscribeOptions, S as SpeakOptions, T as StreamingTranscriptionSession, _ as ModelSource, a as FallbackConfig, b as STTModelConfig, c as GerbilConfig, d as ImageInput, f as JsonOptions, g as ModelConfig, h as LoadTTSOptions, i as EmbedResult, j as VoiceInfo, k as TranscribeResult, l as GerbilModelSettings, m as LoadSTTOptions, n as CacheConfig, o as GenerateOptions, p as LoadOptions, r as EmbedOptions, s as GenerateResult, t as AudioChunk, u as GerbilProviderSettings, v as ModelStats, w as StreamingTranscriptionOptions, x as SessionStats, y as ProgressInfo } from "../types-s6Py2_DL.mjs";
2
- import { t as BUILTIN_MODELS } from "../models-CE0fBq0U.mjs";
1
+ import { z } from "zod";
3
2
 
3
+ //#region src/core/types.d.ts
4
+
5
+ type ModelConfig = {
6
+ id: string;
7
+ repo: string;
8
+ description: string;
9
+ size: string;
10
+ contextLength: number;
11
+ supportsThinking: boolean;
12
+ supportsJson: boolean;
13
+ /** Whether model supports vision/image input */
14
+ supportsVision?: boolean;
15
+ /** Vision encoder size (for display, e.g., "0.4B") */
16
+ visionEncoderSize?: string;
17
+ family: "qwen" | "smollm" | "phi" | "mistral" | "llama" | "other";
18
+ };
19
+ type ModelSource = {
20
+ type: "builtin" | "huggingface" | "local";
21
+ path: string;
22
+ };
23
+ type ImageInput = {
24
+ /** Image source: URL, base64 data URI, or local file path */
25
+ source: string;
26
+ /** Optional alt text for context */
27
+ alt?: string;
28
+ };
29
+ type GenerateOptions = {
30
+ /** Maximum tokens to generate (default: 256) */
31
+ maxTokens?: number;
32
+ /** Temperature for sampling, 0-2 (default: 0.7) */
33
+ temperature?: number;
34
+ /** Top-p sampling (default: 0.9) */
35
+ topP?: number;
36
+ /** Top-k sampling (default: 50) */
37
+ topK?: number;
38
+ /** Stop sequences */
39
+ stopSequences?: string[];
40
+ /** System prompt */
41
+ system?: string;
42
+ /** Enable thinking/reasoning mode (Qwen3) */
43
+ thinking?: boolean;
44
+ /** Callback for each token (streaming) */
45
+ onToken?: (token: string) => void;
46
+ /** Images to include (only used if model supports vision) */
47
+ images?: ImageInput[];
48
+ /** Enable response caching (default: false) */
49
+ cache?: boolean;
50
+ /** Cache TTL in milliseconds (default: 5 minutes) */
51
+ cacheTtl?: number;
52
+ };
53
+ type GenerateResult = {
54
+ /** Generated text */
55
+ text: string;
56
+ /** Thinking/reasoning (if enabled) */
57
+ thinking?: string;
58
+ /** Tokens generated */
59
+ tokensGenerated: number;
60
+ /** Generation speed */
61
+ tokensPerSecond: number;
62
+ /** Total time in ms */
63
+ totalTime: number;
64
+ /** Why generation stopped */
65
+ finishReason: "stop" | "length" | "error";
66
+ /** Which provider was used (for hybrid mode) */
67
+ provider?: "local" | "openai" | "anthropic";
68
+ /** Whether result came from cache */
69
+ cached?: boolean;
70
+ };
71
+ type JsonOptions<T = unknown> = {
72
+ /** Zod schema for validation */
73
+ schema: z.ZodType<T>;
74
+ /** Number of retries on invalid JSON (default: 3) */
75
+ retries?: number;
76
+ /** Temperature (lower = more deterministic, default: 0.3) */
77
+ temperature?: number;
78
+ /** System prompt override */
79
+ system?: string;
80
+ };
81
+ type EmbedOptions = {
82
+ /** Model to use for embeddings */
83
+ model?: string;
84
+ /** Normalize vectors (default: true) */
85
+ normalize?: boolean;
86
+ };
87
+ type EmbedResult = {
88
+ /** Embedding vector */
89
+ vector: number[];
90
+ /** Original text */
91
+ text: string;
92
+ /** Time in ms */
93
+ totalTime: number;
94
+ };
95
+ type LoadOptions = {
96
+ /** Progress callback */
97
+ onProgress?: (info: ProgressInfo) => void;
98
+ /** Device: 'auto', 'gpu', 'cpu', 'webgpu' (default: 'auto') */
99
+ device?: "auto" | "gpu" | "cpu" | "webgpu";
100
+ /** Quantization: 'q4', 'q8', 'fp16', 'fp32' (default: 'q4') */
101
+ dtype?: "q4" | "q8" | "fp16" | "fp32";
102
+ /** Override context length */
103
+ contextLength?: number;
104
+ };
105
+ type ProgressInfo = {
106
+ status: string;
107
+ progress?: number;
108
+ file?: string;
109
+ loaded?: number;
110
+ total?: number;
111
+ };
112
+ type GerbilConfig = {
113
+ /** Default model */
114
+ model?: string;
115
+ /** Default device */
116
+ device?: "auto" | "gpu" | "cpu";
117
+ /** Default quantization */
118
+ dtype?: "q4" | "q8" | "fp16" | "fp32";
119
+ /** Cache configuration */
120
+ cache?: CacheConfig;
121
+ /** Fallback configuration */
122
+ fallback?: FallbackConfig;
123
+ };
124
+ type CacheConfig = {
125
+ /** Enable caching (default: true) */
126
+ enabled?: boolean;
127
+ /** Time-to-live in seconds (default: 3600) */
128
+ ttl?: number;
129
+ /** Max cache size (default: "500mb") */
130
+ maxSize?: string;
131
+ /** Storage backend */
132
+ storage?: "memory" | "disk" | "redis";
133
+ /** Redis URL (if storage is redis) */
134
+ redisUrl?: string;
135
+ };
136
+ type FallbackConfig = {
137
+ /** Fallback provider */
138
+ provider: "openai" | "anthropic";
139
+ /** API key */
140
+ apiKey: string;
141
+ /** Model to use */
142
+ model: string;
143
+ /** When to fallback */
144
+ when: "timeout" | "error" | "always-verify";
145
+ /** Timeout in ms before fallback (default: 5000) */
146
+ timeout?: number;
147
+ };
148
+ type SessionStats = {
149
+ prompts: number;
150
+ tokensIn: number;
151
+ tokensOut: number;
152
+ avgSpeed: number;
153
+ totalTime: number;
154
+ cacheHits: number;
155
+ cacheMisses: number;
156
+ };
157
+ type ModelStats = {
158
+ modelId: string;
159
+ avgSpeed: number;
160
+ totalGenerations: number;
161
+ totalTokens: number;
162
+ };
163
+ type SystemInfo = {
164
+ version: string;
165
+ model: ModelConfig | null;
166
+ device: {
167
+ backend: string;
168
+ gpu: string | null;
169
+ vram: string | null;
170
+ status: "ready" | "loading" | "error";
171
+ };
172
+ context: {
173
+ max: number;
174
+ used: number;
175
+ available: number;
176
+ };
177
+ cache: {
178
+ location: string;
179
+ size: string;
180
+ modelCount: number;
181
+ };
182
+ };
183
+ type GerbilModelSettings = {
184
+ /** Enable thinking mode */
185
+ thinking?: boolean;
186
+ /** Device to use */
187
+ device?: "auto" | "gpu" | "cpu";
188
+ /** Quantization level */
189
+ dtype?: "q4" | "q8" | "fp16" | "fp32";
190
+ };
191
+ type GerbilProviderSettings = {
192
+ /** Default device */
193
+ device?: "auto" | "gpu" | "cpu";
194
+ /** Default quantization */
195
+ dtype?: "q4" | "q8" | "fp16" | "fp32";
196
+ };
197
+ type VoiceInfo = {
198
+ /** Voice ID (e.g., "af_bella", "am_adam") */
199
+ id: string;
200
+ /** Display name (e.g., "Bella", "Adam") */
201
+ name: string;
202
+ /** Gender: male, female, or neutral */
203
+ gender: "male" | "female" | "neutral";
204
+ /** Language code (e.g., "en", "en-us", "zh") */
205
+ language: string;
206
+ /** Optional description */
207
+ description?: string;
208
+ /** Speaker embedding file name (internal) */
209
+ embeddingFile?: string;
210
+ };
211
+ type TTSModelConfig = {
212
+ /** Model ID (e.g., "kokoro-82m") */
213
+ id: string;
214
+ /** HuggingFace repo path */
215
+ repo: string;
216
+ /** Human-readable description */
217
+ description: string;
218
+ /** Approximate model size */
219
+ size: string;
220
+ /** Output sample rate in Hz (e.g., 24000) */
221
+ sampleRate: number;
222
+ /** Available voices */
223
+ voices: VoiceInfo[];
224
+ /** Default voice ID */
225
+ defaultVoice: string;
226
+ /** Supported languages */
227
+ languages: string[];
228
+ };
229
+ type SpeakOptions = {
230
+ /** Voice ID to use (default: model's default voice) */
231
+ voice?: string;
232
+ /** Speech speed multiplier (0.5 = half speed, 2.0 = double speed, default: 1.0) */
233
+ speed?: number;
234
+ /** Progress callback */
235
+ onProgress?: (info: ProgressInfo) => void;
236
+ /** Callback for audio chunks during streaming */
237
+ onAudioChunk?: (chunk: AudioChunk) => void;
238
+ };
239
+ type AudioChunk = {
240
+ /** Raw PCM audio samples (Float32Array) */
241
+ samples: Float32Array;
242
+ /** Sample rate in Hz */
243
+ sampleRate: number;
244
+ /** Chunk index (0-based) */
245
+ index: number;
246
+ /** Whether this is the final chunk */
247
+ isFinal: boolean;
248
+ };
249
+ type SpeakResult = {
250
+ /** Raw PCM audio samples (Float32Array) */
251
+ audio: Float32Array;
252
+ /** Sample rate in Hz */
253
+ sampleRate: number;
254
+ /** Audio duration in seconds */
255
+ duration: number;
256
+ /** Voice used */
257
+ voice: string;
258
+ /** Total processing time in ms */
259
+ totalTime: number;
260
+ };
261
+ type LoadTTSOptions = {
262
+ /** Progress callback */
263
+ onProgress?: (info: ProgressInfo) => void;
264
+ /** Device: 'auto', 'webgpu', 'cpu' (default: 'auto') */
265
+ device?: "auto" | "webgpu" | "cpu";
266
+ };
267
+ type STTModelConfig = {
268
+ /** Model ID (e.g., "whisper-tiny.en") */
269
+ id: string;
270
+ /** HuggingFace repo path */
271
+ repo: string;
272
+ /** Human-readable description */
273
+ description: string;
274
+ /** Model size (e.g., "39M", "244M") */
275
+ size: string;
276
+ /** Whether model supports multiple languages */
277
+ multilingual: boolean;
278
+ /** Supported languages (ISO 639-1 codes) */
279
+ languages: string[];
280
+ /** Expected sample rate (default: 16000) */
281
+ sampleRate: number;
282
+ };
283
+ type TranscribeOptions = {
284
+ /** Language hint (ISO 639-1 code like "en", "es", "fr") - only for multilingual models */
285
+ language?: string;
286
+ /** Return word/segment timestamps */
287
+ timestamps?: boolean;
288
+ /** Progress callback */
289
+ onProgress?: (info: ProgressInfo) => void;
290
+ };
291
+ type TranscribeSegment = {
292
+ /** Segment text */
293
+ text: string;
294
+ /** Start time in seconds */
295
+ start: number;
296
+ /** End time in seconds */
297
+ end: number;
298
+ };
299
+ type TranscribeResult = {
300
+ /** Full transcribed text */
301
+ text: string;
302
+ /** Detected or specified language */
303
+ language: string;
304
+ /** Segments with timestamps (if timestamps option enabled) */
305
+ segments?: TranscribeSegment[];
306
+ /** Audio duration in seconds */
307
+ duration: number;
308
+ /** Total processing time in ms */
309
+ totalTime: number;
310
+ };
311
+ type LoadSTTOptions = {
312
+ /** Progress callback */
313
+ onProgress?: (info: ProgressInfo) => void;
314
+ /** Device: 'auto', 'webgpu', 'cpu' (default: 'auto') */
315
+ device?: "auto" | "webgpu" | "cpu";
316
+ };
317
+ type StreamingTranscriptionOptions = {
318
+ /** Interval between transcriptions in ms (default: 3000) */
319
+ chunkDuration?: number;
320
+ /** Minimum audio samples before transcribing (default: 8000 = 0.5s at 16kHz) */
321
+ minChunkSize?: number;
322
+ /** Callback for each transcribed chunk */
323
+ onChunk?: (text: string, chunkIndex: number) => void;
324
+ /** Callback with full accumulated transcript */
325
+ onTranscript?: (fullText: string) => void;
326
+ /** Callback on transcription error */
327
+ onError?: (error: string) => void;
328
+ /** Language hint (for multilingual models) */
329
+ language?: string;
330
+ };
331
+ type StreamingTranscriptionSession = {
332
+ /** Feed audio data to the buffer (Float32Array at 16kHz) */
333
+ feedAudio: (audio: Float32Array) => void;
334
+ /** Manually trigger transcription of buffered audio */
335
+ flush: () => Promise<string>;
336
+ /** Start automatic interval-based transcription */
337
+ start: () => void;
338
+ /** Stop transcription and return final transcript */
339
+ stop: () => Promise<string>;
340
+ /** Immediately abort without final transcription (for cleanup) */
341
+ abort: () => void;
342
+ /** Check if session is running */
343
+ isRunning: () => boolean;
344
+ /** Get current full transcript */
345
+ getTranscript: () => string;
346
+ /** Get number of chunks transcribed */
347
+ getChunkCount: () => number;
348
+ /** Reset session (clear buffer and transcript) */
349
+ reset: () => void;
350
+ };
351
+ //#endregion
352
+ //#region src/core/models.d.ts
353
+ declare const BUILTIN_MODELS: Record<string, ModelConfig>;
354
+ //#endregion
4
355
  //#region src/browser/index.d.ts
5
356
 
6
357
  type WorkerProgress = {
@@ -655,4 +1006,4 @@ declare const _default: {
655
1006
  };
656
1007
  //#endregion
657
1008
  export { AudioChunk, BUILTIN_MODELS, BrowserVoiceInfo, CacheConfig, CompleteOptions, EmbedOptions, EmbedResult, FallbackConfig, GenerateOptions, GenerateResult, GenerateStreamOptions, GerbilConfig, GerbilModelSettings, GerbilProviderSettings, GerbilWorker, GerbilWorkerOptions, ImageInput, JsonOptions, LoadOptions, LoadSTTOptions, LoadTTSOptions, LoadingProgress, Message, ModelConfig, ModelSource, ModelStats, ProgressInfo, STTModelConfig, STTProgress, SessionStats, SpeakOptions, SpeakResult, StreamingTranscriptionOptions, StreamingTranscriptionSession, SystemInfo, TTSModelConfig, TTSModelId, TTSProgress, TranscribeOptions, TranscribeResult, TranscribeSegment, UseChatOptions, UseChatReturn, UseCompletionOptions, UseCompletionReturn, UseSpeechOptions, UseSpeechReturn, UseVoiceChatOptions, UseVoiceChatReturn, UseVoiceInputOptions, UseVoiceInputReturn, VoiceChatMessage, VoiceInfo, WorkerComplete, WorkerProgress, WorkerToken, createAudioPlayer, createGerbilWorker, _default as default, getWebGPUInfo, isWebGPUSupported, playAudio, useChat, useCompletion, useSpeech, useVoiceChat, useVoiceInput };
658
- //# sourceMappingURL=index.d.mts.map
1009
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","names":[],"sources":["../../src/core/types.ts","../../src/core/models.ts","../../src/browser/index.ts"],"sourcesContent":[],"mappings":";;;;AAyBY,KAfA,WAAA,GAeW;EASX,EAAA,EAAA,MAAA;EAWA,IAAA,EAAA,MAAA;EAmCA,WAAA,EAAA,MAAc;EA8Bd,IAAA,EAAA,MAAA;EAkBA,aAAA,EAAY,MAAA;EAQZ,gBAAW,EAAA,OAAA;EAeX,YAAA,EAAW,OAAA;EAcX;EAYA,cAAA,CAAY,EAAA,OAAA;EAiBZ;EAiBA,iBAAc,CAAA,EAAA,MAAA;EAqBd,MAAA,EAAA,MAAA,GAAY,QAAA,GAAA,KAAA,GAAA,SAAA,GAAA,OAAA,GAAA,OAAA;AAUxB,CAAA;AAOY,KAhOA,WAAA,GAgOU;EAyBV,IAAA,EAAA,SAAA,GAAA,aAAmB,GAAA,OAAA;EAWnB,IAAA,EAAA,MAAA;AAYZ,CAAA;AAeY,KAtRA,UAAA,GAsRc;EAmBd;EAWA,MAAA,EAAA,MAAU;EAWV;EAaA,GAAA,CAAA,EAAA,MAAA;AAWZ,CAAA;AAiBY,KA7VA,eAAA,GA6ViB;EASjB;EASA,SAAA,CAAA,EAAA,MAAA;EAaA;EAWA,WAAA,CAAA,EAAA,MAAA;EAeA;EAES,IAAA,CAAA,EAAA,MAAA;EAEN;EAID,IAAA,CAAA,EAAA,MAAA;EAAO;;;;EC/bR;;;;ECiDD;EAYA,MAAA,CAAA,EFFD,UEEY,EAAA;EAQX;EAQA,KAAA,CAAA,EAAA,OAAA;EAIc;EAEN,QAAA,CAAA,EAAA,MAAA;CAEI;AAAc,KFjB1B,cAAA,GEiB0B;EAO1B;EAmBA,IAAA,EAAA,MAAA;EAuBU;EAA4B,QAAA,CAAA,EAAA,MAAA;EAAmC;EAAR,eAAA,EAAA,MAAA;EAAO;EA+exE,eAAO,EAAA,MAAA;EAUP;EAYA,SAAA,EAAA,MAAc;EAsBd;EAmEI,YAAO,EAAA,MAAA,GAAU,QAAA,GAAA,OAAsB;EAwS3C;EAoBA,QAAA,CAAA,EAAA,OAAe,GAAA,QAAA,GAAA,WAAA;EAMf;EAM2B,MAAA,CAAA,EAAA,OAAA;CAAoB;AAIxC,KF98BP,WE88BO,CAAA,IAAA,OAAA,CAAA,GAAA;EAAe;EAoClB,MAAA,EFh/BN,CAAA,CAAE,OEg/BiB,CFh/BT,CEg/BS,CAAA;EAsMjB;EASA,OAAA,CAAA,EAAA,MAAU;EAGV;EA0NA,WAAA,CAAA,EAAA,MAAgB;EAoBhB;EAE6D,MAAA,CAAA,EAAA,MAAA;CAMtD;AAUC,KFl7CR,YAAA,GEk7CQ;EAUJ;EAAU,KAAA,CAAA,EAAA,MAAA;EAuCV;EAiYM,SAAA,CAAA,EAAS,OAAA;CACtB;AAE+B,KF/1D5B,WAAA,GE+1D4B;EAArC;EAAO,MAAA,EAAA,MAAA,EAAA;EAmDM;EAgEJ,IAAA,EAAA,MAAA;EAUA;EAwBA,SAAA,EAAA,MAAA;CAEY;AAED,KFz+DX,WAAA,GEy+DW;EAID;EAAiB,UAAA,CAAA,EAAA,CAAA,IAAA,EF3+DjB,YE2+DiB,EAAA,GAAA,IAAA;EAgBpB;EAAW,MAAA,CAAA,EAAA,MAAA,GAAA,KAAA,GAAA,KAAA,GAAA,QAAA;EAiDd;EA2iBJ,KAAA,CAAA,EAAA,IAAA,GAAA,IAAA,GAAmB,MAAA,GAAA,MAMlB;EAsBD;EAWA,aAAA,CAAA,EAAA,MAAkB;CAElB;AAEY,KFtnFZ,YAAA,GEsnFY;EAED,MAAA,EAAA,MAAA;EAAO,QAAA,CAAA,EAAA,MAAA;EAoEd,IAAA,CAAA,EAAA,MAAA;EAgcA,MAAA,CAAA,EAAA,MAAA;EAUM,KAAA,CAAA,EAAA,MAAA;AAwBrB,CAAA;KFlpGW,YAAA;;;;;;;;UAWF;;aAGG;;KAGD,WAAA;;;;;;;;;;;;KAiBA,cAAA;;;;;;;;;;;;KAqBA,YAAA;;;;;;;;;KAUA,UAAA;;;;;;KAOA,UAAA;;SAEH;;;;;;;;;;;;;;;;;;KAuBG,mBAAA;;;;;;;;KAWA,sBAAA;;;;;;KAYA,SAAA;;;;;;;;;;;;;;KAeA,cAAA;;;;;;;;;;;;UAYF;;;;;;KAOE,YAAA;;;;;;sBAMU;;yBAEG;;KAGb,UAAA;;WAED;;;;;;;;KASC,WAAA;;SAEH;;;;;;;;;;KAWG,cAAA;;sBAEU;;;;KASV,cAAA;;;;;;;;;;;;;;;;KAiBA,iBAAA;;;;;;sBAMU;;KAGV,iBAAA;;;;;;;;KASA,gBAAA;;;;;;aAMC;;;;;;KAOD,cAAA;;sBAEU;;;;KASV,6BAAA;;;;;;;;;;;;;;KAeA,6BAAA;;qBAES;;eAEN;;;;cAID;;;;;;;;;;;;;;AA9ZF,cCjCC,cD2DF,EC3DkB,MD2DlB,CAAA,MAAU,EC3DuB,WD2DvB,CAAA;;;;AEkBT,KA5BA,cAAA,GA4BmB;EAIL,MAAA,EAAA,SAAA,GAAA,aAAA,GAAA,OAAA,GAAA,OAAA;EAEN,OAAA,CAAA,EAAA,MAAA;EAEI,IAAA,CAAA,EAAA,MAAA;EAAc,QAAA,CAAA,EAAA,MAAA;EAO1B;EAmBA,aAAA,CAAA,EAAY,MAAA;EAuBF;EAA4B,UAAA,CAAA,EAAA,MAAA;EAAmC,KAAA,CAAA,EAAA,MAAA;CAAR;AAAO,KAzExE,WAAA,GAyEwE;EA+exE,MAAA,EAAA,OAAO;EAUP,IAAA,EAAA,MAAA;EAYA,KAAA,EAAA,UAAc,GAAA,WAYN;EAUR,SAAA,EAAA,MAAa;EAmET,GAAA,EAAA,MAAO;AAwSvB,CAAA;AAoBY,KA39BA,cAAA,GA29Be;EAMf,MAAA,EAAA,UAAA;EAM2B,IAAA,EAAA,MAAA;EAAoB,SAAA,EAAA,MAAA;EAIxC,SAAA,EAAA,MAAA;EAAe,GAAA,EAAA,MAAA;AAoClC,CAAA;AAsMY,KA7sCA,mBAAA,GA6sCW;EASX;EAGA,OAAA,CAAA,EAAA,MAAA;EA0NA;EAoBA,UAAA,CAAA,EAAA,CAAA,QAAe,EAn8CD,cAm8CC,EAAA,GAAA,IAAA;EAE8C;EAMtD,OAAA,CAAA,EAAA,CAAA,KAAA,EAz8CC,WAy8CD,EAAA,GAAA,IAAA;EAUC;EAUJ,UAAA,CAAA,EAAA,CAAA,MAAA,EA39CQ,cA29CR,EAAA,GAAA,IAAA;EAAU;EAuCV,OAAA,CAAA,EAAA,CAAA,KAAS,EAAA,MAAU,EAAA,GAAA,IAAA;EAiYb;EACb,SAAA,CAAA,EAAA,MAAA;CAE+B;AAArC,KA/3DS,qBAAA,GA+3DT;EAAO;EAmDM,SAAA,CAAA,EAAA,MAAA;EAgEJ;EAUA,WAAA,CAAA,EAAA,MAAA;EAwBA;EAEY,IAAA,CAAA,EAAA,MAAA;EAED;EAID,IAAA,CAAA,EAAA,MAAA;EAAiB;EAgBpB,QAAA,CAAA,EAAA,OAAA;EAAW;EAiDd,MAAA,CAAA,EAAA,MAAA;EA2iBJ;EA4BA,MAAA,CAAA,EAAA,MAAA,EAAA;EAWA;EAEA,OAAA,CAAA,EAjqFA,KAiqFA,CAAA;IAEY,IAAA,EAAA,MAAA,GAAA,WAAA,GAAA,QAAA;IAED,OAAA,EAAA,MAAA;EAAO,CAAA,CAAA;AAoE9B,CAAA;AAgcgB,KAtqGJ,YAAA,GAsqGqB;EAUX;EAwBrB,QAAA,EAAA,CAAA,MAAA,EAAA,MAAA,EAAA,OAAA,CAAA,EAtsGsC,qBAssGtC,EAAA,GAtsGgE,OAssGhE,CAAA,MAAA,CAAA;;;;;;;;;;;;;;;;iBAjrGqB,kBAAA,WAA4B,sBAA2B,QAAQ;;KA+ezE,OAAA;;;;;;;;;KAUA,eAAA;;;;;;;;;;;KAYA,cAAA;;;;;;;;;;;;oBAYQ;;;;;;;;;KAUR,aAAA;;YAEA;;;;;;;;;;;;mBAUO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAuDH,OAAA,WAAiB,iBAAsB;;KAwS3C,oBAAA;;;;;;;;;;;;;;;;;;;KAoBA,eAAA;;;;;KAMA,mBAAA;;;;;;uCAM2B,oBAAoB;;;;mBAIxC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAoCH,aAAA,WAAuB,uBAA4B;;KAsMvD,WAAA;;;;;;;;KASA,UAAA;;KAGA,gBAAA;;;;;;;;KA0NA,gBAAA;;UAEF;;;;;;;;;;;;;;;;;KAkBE,eAAA;;;;;QAE6D;;;;;;mBAMtD;;;;;;;;;;oBAUC;;;;;;;;;;gBAUJ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAuCA,SAAA,WAAmB,mBAAwB;;;;;;;;;;;;;;;iBAiYrC,SAAA,QACb,oCAEN;;WAAqC;;;;;;;;;;;;;;;;;;;iBAmDxB,iBAAA;iBACC;;;;;;;KA+DL,WAAA;;;;;;;;;KAUA,oBAAA;;;;;;;;;;;;0BAYc;;;;;;;;;;;KAYd,mBAAA;;wBAEY;;uBAED;;;;sBAID,iBAAiB;;;;;;;;;;;;;;;;mBAgBpB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAiDH,aAAA,WAAuB,uBAA4B;;;;KA2iBvD,mBAAA;;;;;;aAMC;;;;;;;;;;;;;;;;;;;;;KAsBD,gBAAA;;;;;;;;;;KAWA,kBAAA;;YAEA;;wBAEY;;uBAED;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAoEP,YAAA,WAAsB,sBAA2B;;;;iBAgcjD,iBAAA,CAAA;;;;iBAUM,aAAA,CAAA,GAAiB;;;;;cAwBtC"}
@@ -1,5 +1,115 @@
1
- import { o as resolveModel, t as BUILTIN_MODELS } from "../models-BAtL8qsA.mjs";
1
+ //#region src/core/models.ts
2
+ const BUILTIN_MODELS = {
3
+ "qwen3-0.6b": {
4
+ id: "qwen3-0.6b",
5
+ repo: "onnx-community/Qwen3-0.6B-ONNX",
6
+ description: "Qwen3 0.6B - Best balance of speed and quality, supports thinking",
7
+ size: "~400MB",
8
+ contextLength: 32768,
9
+ supportsThinking: true,
10
+ supportsJson: true,
11
+ family: "qwen"
12
+ },
13
+ "qwen2.5-0.5b": {
14
+ id: "qwen2.5-0.5b",
15
+ repo: "onnx-community/Qwen2.5-0.5B-Instruct",
16
+ description: "Qwen2.5 0.5B - Fast and capable",
17
+ size: "~350MB",
18
+ contextLength: 32768,
19
+ supportsThinking: false,
20
+ supportsJson: true,
21
+ family: "qwen"
22
+ },
23
+ "qwen2.5-coder-0.5b": {
24
+ id: "qwen2.5-coder-0.5b",
25
+ repo: "onnx-community/Qwen2.5-Coder-0.5B-Instruct",
26
+ description: "Qwen2.5 Coder 0.5B - Optimized for code",
27
+ size: "~400MB",
28
+ contextLength: 32768,
29
+ supportsThinking: false,
30
+ supportsJson: true,
31
+ family: "qwen"
32
+ },
33
+ "smollm2-360m": {
34
+ id: "smollm2-360m",
35
+ repo: "HuggingFaceTB/SmolLM2-360M-Instruct",
36
+ description: "SmolLM2 360M - Fast, good for simple tasks",
37
+ size: "~250MB",
38
+ contextLength: 8192,
39
+ supportsThinking: false,
40
+ supportsJson: false,
41
+ family: "smollm"
42
+ },
43
+ "smollm2-135m": {
44
+ id: "smollm2-135m",
45
+ repo: "HuggingFaceTB/SmolLM2-135M-Instruct",
46
+ description: "SmolLM2 135M - Fastest, basic generation",
47
+ size: "~100MB",
48
+ contextLength: 8192,
49
+ supportsThinking: false,
50
+ supportsJson: false,
51
+ family: "smollm"
52
+ },
53
+ "phi-3-mini": {
54
+ id: "phi-3-mini",
55
+ repo: "microsoft/Phi-3-mini-4k-instruct-onnx",
56
+ description: "Phi-3 Mini - High quality, larger model",
57
+ size: "~2.1GB",
58
+ contextLength: 4096,
59
+ supportsThinking: false,
60
+ supportsJson: true,
61
+ family: "phi"
62
+ },
63
+ "ministral-3b": {
64
+ id: "ministral-3b",
65
+ repo: "mistralai/Ministral-3-3B-Instruct-2512-ONNX",
66
+ description: "Ministral 3 3B - Vision + Reasoning, 256k context",
67
+ size: "~2.5GB",
68
+ contextLength: 262144,
69
+ supportsThinking: true,
70
+ supportsJson: true,
71
+ supportsVision: true,
72
+ visionEncoderSize: "0.4B",
73
+ family: "mistral"
74
+ }
75
+ };
76
+ /**
77
+ * Parse model identifier and resolve to source
78
+ *
79
+ * Supported formats:
80
+ * - "qwen3-0.6b" (built-in)
81
+ * - "hf:org/model" (HuggingFace shorthand)
82
+ * - "https://huggingface.co/org/model" (full URL)
83
+ * - "file:./path/to/model" (local path)
84
+ */
85
+ function resolveModel(modelId) {
86
+ if (BUILTIN_MODELS[modelId]) return {
87
+ type: "builtin",
88
+ path: BUILTIN_MODELS[modelId].repo
89
+ };
90
+ if (modelId.startsWith("hf:")) return {
91
+ type: "huggingface",
92
+ path: modelId.slice(3)
93
+ };
94
+ if (modelId.startsWith("https://huggingface.co/")) return {
95
+ type: "huggingface",
96
+ path: modelId.replace("https://huggingface.co/", "")
97
+ };
98
+ if (modelId.startsWith("file:")) return {
99
+ type: "local",
100
+ path: modelId.slice(5)
101
+ };
102
+ if (modelId.includes("/")) return {
103
+ type: "huggingface",
104
+ path: modelId
105
+ };
106
+ return {
107
+ type: "huggingface",
108
+ path: modelId
109
+ };
110
+ }
2
111
 
112
+ //#endregion
3
113
  //#region src/browser/index.ts
4
114
  /**
5
115
  * Gerbil Browser Support
@@ -1642,7 +1752,7 @@ function useVoiceInput(options = {}) {
1642
1752
  status: "loading",
1643
1753
  message: "Loading STT model..."
1644
1754
  });
1645
- const { WhisperSTT } = await import("../stt-KzSoNvwI.mjs");
1755
+ const { WhisperSTT } = await import("../stt-BtklAjR2.js");
1646
1756
  if (cancelled || !mountedRef.current) return;
1647
1757
  const stt = new WhisperSTT(model);
1648
1758
  await stt.load({ onProgress: (p) => {
@@ -1778,7 +1888,7 @@ function useVoiceInput(options = {}) {
1778
1888
  if (streaming && !sttRef.current) {
1779
1889
  if (!shouldLoad) setShouldLoad(true);
1780
1890
  setIsLoading(true);
1781
- const { WhisperSTT } = await import("../stt-KzSoNvwI.mjs");
1891
+ const { WhisperSTT } = await import("../stt-BtklAjR2.js");
1782
1892
  const stt = new WhisperSTT(model);
1783
1893
  await stt.load({ onProgress: (p) => {
1784
1894
  if (mountedRef.current) {
@@ -2061,7 +2171,7 @@ function useVoiceChat(options = {}) {
2061
2171
  setIsLoading(true);
2062
2172
  setError(null);
2063
2173
  setLoadingMessage("Loading speech recognition (Whisper)...");
2064
- const { WhisperSTT } = await import("../stt-KzSoNvwI.mjs");
2174
+ const { WhisperSTT } = await import("../stt-BtklAjR2.js");
2065
2175
  if (cancelled || !mountedRef.current) return;
2066
2176
  const stt = new WhisperSTT(sttModel);
2067
2177
  await stt.load({ onProgress: (p) => {
@@ -2087,7 +2197,7 @@ function useVoiceChat(options = {}) {
2087
2197
  }
2088
2198
  llmWorkerRef.current = worker;
2089
2199
  setLoadingMessage(`Loading text-to-speech (${ttsModelId === "supertonic-66m" ? "Supertonic" : "Kokoro"})...`);
2090
- const { createTTS } = await import("../tts-5yWeP_I0.mjs");
2200
+ const { createTTS } = await import("../tts-DKIOWafo.js");
2091
2201
  if (cancelled || !mountedRef.current) return;
2092
2202
  const tts = createTTS(ttsModelId);
2093
2203
  await tts.load({ onProgress: (p) => {
@@ -2378,4 +2488,4 @@ var browser_default = {
2378
2488
 
2379
2489
  //#endregion
2380
2490
  export { BUILTIN_MODELS, createAudioPlayer, createGerbilWorker, browser_default as default, getWebGPUInfo, isWebGPUSupported, playAudio, useChat, useCompletion, useSpeech, useVoiceChat, useVoiceInput };
2381
- //# sourceMappingURL=index.mjs.map
2491
+ //# sourceMappingURL=index.js.map