@tryhamster/gerbil 1.0.0-rc.11 → 1.0.0-rc.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/README.md +52 -1
  2. package/dist/browser/index.d.ts +159 -1
  3. package/dist/browser/index.d.ts.map +1 -1
  4. package/dist/browser/index.js +473 -6
  5. package/dist/browser/index.js.map +1 -1
  6. package/dist/cli.mjs +7 -7
  7. package/dist/cli.mjs.map +1 -1
  8. package/dist/frameworks/express.d.mts +1 -1
  9. package/dist/frameworks/express.mjs +2 -1
  10. package/dist/frameworks/express.mjs.map +1 -1
  11. package/dist/frameworks/fastify.d.mts +1 -1
  12. package/dist/frameworks/fastify.mjs +2 -1
  13. package/dist/frameworks/fastify.mjs.map +1 -1
  14. package/dist/frameworks/hono.d.mts +1 -1
  15. package/dist/frameworks/hono.mjs +2 -1
  16. package/dist/frameworks/hono.mjs.map +1 -1
  17. package/dist/frameworks/next.d.mts +2 -2
  18. package/dist/frameworks/next.mjs +2 -1
  19. package/dist/frameworks/next.mjs.map +1 -1
  20. package/dist/frameworks/react.d.mts +1 -1
  21. package/dist/frameworks/trpc.d.mts +1 -1
  22. package/dist/frameworks/trpc.mjs +2 -1
  23. package/dist/frameworks/trpc.mjs.map +1 -1
  24. package/dist/{gerbil-DoDGHe6Z.mjs → gerbil-BZklpDhM.mjs} +289 -1
  25. package/dist/gerbil-BZklpDhM.mjs.map +1 -0
  26. package/dist/gerbil-CAMb_nrK.mjs +5 -0
  27. package/dist/{gerbil-qOTe1nl2.d.mts → gerbil-DJygY0sJ.d.mts} +120 -2
  28. package/dist/gerbil-DJygY0sJ.d.mts.map +1 -0
  29. package/dist/index.d.mts +3 -3
  30. package/dist/index.d.mts.map +1 -1
  31. package/dist/index.mjs +2 -2
  32. package/dist/index.mjs.map +1 -1
  33. package/dist/integrations/ai-sdk.d.mts +72 -3
  34. package/dist/integrations/ai-sdk.d.mts.map +1 -1
  35. package/dist/integrations/ai-sdk.mjs +106 -3
  36. package/dist/integrations/ai-sdk.mjs.map +1 -1
  37. package/dist/integrations/langchain.d.mts +1 -1
  38. package/dist/integrations/langchain.mjs +2 -1
  39. package/dist/integrations/langchain.mjs.map +1 -1
  40. package/dist/integrations/llamaindex.d.mts +1 -1
  41. package/dist/integrations/llamaindex.mjs +2 -1
  42. package/dist/integrations/llamaindex.mjs.map +1 -1
  43. package/dist/integrations/mcp.d.mts +2 -2
  44. package/dist/integrations/mcp.mjs +5 -4
  45. package/dist/{mcp-kzDDWIoS.mjs → mcp-ZCC5OR7B.mjs} +3 -3
  46. package/dist/{mcp-kzDDWIoS.mjs.map → mcp-ZCC5OR7B.mjs.map} +1 -1
  47. package/dist/{one-liner-DxnNs_JK.mjs → one-liner-mH5SKPvT.mjs} +2 -2
  48. package/dist/{one-liner-DxnNs_JK.mjs.map → one-liner-mH5SKPvT.mjs.map} +1 -1
  49. package/dist/{repl-DGUw4fCc.mjs → repl-CSM1IBP1.mjs} +3 -3
  50. package/dist/skills/index.d.mts +3 -3
  51. package/dist/skills/index.d.mts.map +1 -1
  52. package/dist/skills/index.mjs +4 -3
  53. package/dist/{skills-DulrOPeP.mjs → skills-CPB_9YfF.mjs} +2 -2
  54. package/dist/{skills-DulrOPeP.mjs.map → skills-CPB_9YfF.mjs.map} +1 -1
  55. package/dist/{types-CiTc7ez3.d.mts → types-evP8RShr.d.mts} +26 -2
  56. package/dist/types-evP8RShr.d.mts.map +1 -0
  57. package/docs/ai-sdk.md +56 -1
  58. package/docs/browser.md +103 -0
  59. package/docs/embeddings.md +311 -0
  60. package/package.json +1 -1
  61. package/dist/gerbil-DJGqq7BX.mjs +0 -4
  62. package/dist/gerbil-DoDGHe6Z.mjs.map +0 -1
  63. package/dist/gerbil-qOTe1nl2.d.mts.map +0 -1
  64. package/dist/types-CiTc7ez3.d.mts.map +0 -1
@@ -1878,7 +1878,7 @@ function useVoiceInput(options = {}) {
1878
1878
  const fullTranscriptRef = useRef("");
1879
1879
  const transcribeResolveRef = useRef(null);
1880
1880
  const transcribeRejectRef = useRef(null);
1881
- const resolveSTTModel = (modelId) => {
1881
+ const resolveSTTModel$1 = (modelId) => {
1882
1882
  return {
1883
1883
  "whisper-tiny": "onnx-community/whisper-tiny",
1884
1884
  "whisper-tiny.en": "onnx-community/whisper-tiny.en",
@@ -1969,7 +1969,7 @@ function useVoiceInput(options = {}) {
1969
1969
  };
1970
1970
  worker.postMessage({
1971
1971
  type: "load",
1972
- payload: { model: resolveSTTModel(model) }
1972
+ payload: { model: resolveSTTModel$1(model) }
1973
1973
  });
1974
1974
  return () => {
1975
1975
  mountedRef.current = false;
@@ -2348,7 +2348,7 @@ function useVoiceChat(options = {}) {
2348
2348
  const isListening = stage === "listening";
2349
2349
  const isProcessing = stage === "transcribing" || stage === "thinking";
2350
2350
  const isSpeaking = stage === "speaking";
2351
- const resolveSTTModel = (modelId) => {
2351
+ const resolveSTTModel$1 = (modelId) => {
2352
2352
  return {
2353
2353
  "whisper-tiny": "onnx-community/whisper-tiny",
2354
2354
  "whisper-tiny.en": "onnx-community/whisper-tiny.en",
@@ -2381,7 +2381,7 @@ function useVoiceChat(options = {}) {
2381
2381
  sttWorker.onerror = (e) => reject(new Error(e.message));
2382
2382
  sttWorker.postMessage({
2383
2383
  type: "load",
2384
- payload: { model: resolveSTTModel(sttModel) }
2384
+ payload: { model: resolveSTTModel$1(sttModel) }
2385
2385
  });
2386
2386
  });
2387
2387
  if (cancelled || !mountedRef.current) {
@@ -2715,6 +2715,469 @@ function useVoiceChat(options = {}) {
2715
2715
  load
2716
2716
  };
2717
2717
  }
2718
+ const EMBEDDING_WORKER_CODE = `
2719
+ // Embedding Worker - runs in separate thread, loads from CDN
2720
+ import { pipeline, env } from "https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.1";
2721
+
2722
+ // Configure environment
2723
+ env.useBrowserCache = true;
2724
+ env.allowLocalModels = false;
2725
+
2726
+ let embedder = null;
2727
+ let modelId = null;
2728
+
2729
+ self.onmessage = async (e) => {
2730
+ const { type, payload } = e.data;
2731
+
2732
+ if (type === "load") {
2733
+ try {
2734
+ modelId = payload.model || "Xenova/all-MiniLM-L6-v2";
2735
+
2736
+ embedder = await pipeline("feature-extraction", modelId, {
2737
+ progress_callback: (progress) => {
2738
+ self.postMessage({ type: "progress", payload: progress });
2739
+ },
2740
+ });
2741
+
2742
+ // Warmup
2743
+ try {
2744
+ await embedder("hello", { pooling: "mean", normalize: true });
2745
+ } catch (e) {
2746
+ console.warn("Embedding warmup failed:", e);
2747
+ }
2748
+
2749
+ self.postMessage({ type: "ready" });
2750
+ } catch (err) {
2751
+ self.postMessage({ type: "error", payload: err.message || String(err) });
2752
+ }
2753
+ }
2754
+
2755
+ if (type === "embed") {
2756
+ try {
2757
+ const { text, normalize } = payload;
2758
+ const output = await embedder(text, {
2759
+ pooling: "mean",
2760
+ normalize: normalize !== false,
2761
+ });
2762
+
2763
+ const vector = Array.from(output.data);
2764
+ self.postMessage({ type: "embedding", payload: { vector, text } });
2765
+ } catch (err) {
2766
+ self.postMessage({ type: "error", payload: err.message || String(err) });
2767
+ }
2768
+ }
2769
+
2770
+ if (type === "embedBatch") {
2771
+ try {
2772
+ const { texts, normalize } = payload;
2773
+ const results = [];
2774
+
2775
+ for (const text of texts) {
2776
+ const output = await embedder(text, {
2777
+ pooling: "mean",
2778
+ normalize: normalize !== false,
2779
+ });
2780
+ results.push({ vector: Array.from(output.data), text });
2781
+ }
2782
+
2783
+ self.postMessage({ type: "embeddings", payload: results });
2784
+ } catch (err) {
2785
+ self.postMessage({ type: "error", payload: err.message || String(err) });
2786
+ }
2787
+ }
2788
+ };
2789
+ `;
2790
+ /** Create Embedding worker instance */
2791
+ function createEmbeddingWorker() {
2792
+ const blob = new Blob([EMBEDDING_WORKER_CODE], { type: "application/javascript" });
2793
+ const url = URL.createObjectURL(blob);
2794
+ const worker = new Worker(url, { type: "module" });
2795
+ URL.revokeObjectURL(url);
2796
+ return worker;
2797
+ }
2798
+ /**
2799
+ * React hook for text embeddings in the browser
2800
+ *
2801
+ * @example
2802
+ * ```tsx
2803
+ * import { useEmbedding } from "@tryhamster/gerbil/browser";
2804
+ *
2805
+ * function App() {
2806
+ * const { embed, similarity, search, isLoading, isReady } = useEmbedding();
2807
+ *
2808
+ * if (isLoading) return <div>Loading embedding model...</div>;
2809
+ *
2810
+ * const handleSearch = async () => {
2811
+ * const results = await search("capital of France", [
2812
+ * "Paris is beautiful",
2813
+ * "London is in England",
2814
+ * "Dogs are pets"
2815
+ * ]);
2816
+ * console.log(results); // [{ text: "Paris is beautiful", score: 0.89, index: 0 }, ...]
2817
+ * };
2818
+ *
2819
+ * return <button onClick={handleSearch}>Search</button>;
2820
+ * }
2821
+ * ```
2822
+ */
2823
+ function useEmbedding(options = {}) {
2824
+ const React = globalThis.React;
2825
+ if (!React) throw new Error("useEmbedding requires React. Make sure React is available in the global scope.");
2826
+ const { useState, useEffect, useRef, useCallback } = React;
2827
+ const { model = "Xenova/all-MiniLM-L6-v2", normalize = true, autoLoad = false, onReady, onError } = options;
2828
+ const [isLoading, setIsLoading] = useState(false);
2829
+ const [isReady, setIsReady] = useState(false);
2830
+ const [error, setError] = useState(null);
2831
+ const [loadingProgress, setLoadingProgress] = useState(null);
2832
+ const workerRef = useRef(null);
2833
+ const loadRequestedRef = useRef(false);
2834
+ const readyPromiseRef = useRef(null);
2835
+ const readyResolveRef = useRef(null);
2836
+ const cosineSimilarity = useCallback((a, b) => {
2837
+ if (a.length !== b.length) throw new Error(`Vector dimensions must match: ${a.length} vs ${b.length}`);
2838
+ let dotProduct = 0;
2839
+ let normA = 0;
2840
+ let normB = 0;
2841
+ for (let i = 0; i < a.length; i++) {
2842
+ dotProduct += a[i] * b[i];
2843
+ normA += a[i] * a[i];
2844
+ normB += b[i] * b[i];
2845
+ }
2846
+ const magnitude = Math.sqrt(normA) * Math.sqrt(normB);
2847
+ if (magnitude === 0) return 0;
2848
+ return dotProduct / magnitude;
2849
+ }, []);
2850
+ const load = useCallback(() => {
2851
+ if (isReady && workerRef.current) return Promise.resolve();
2852
+ if (loadRequestedRef.current && readyPromiseRef.current) return readyPromiseRef.current;
2853
+ loadRequestedRef.current = true;
2854
+ setIsLoading(true);
2855
+ setLoadingProgress({
2856
+ status: "loading",
2857
+ message: "Loading embedding model..."
2858
+ });
2859
+ readyPromiseRef.current = new Promise((resolve) => {
2860
+ readyResolveRef.current = resolve;
2861
+ });
2862
+ const worker = createEmbeddingWorker();
2863
+ workerRef.current = worker;
2864
+ worker.onmessage = (e) => {
2865
+ const { type, payload } = e.data;
2866
+ if (type === "progress") {
2867
+ if (payload.status === "progress" && payload.file) setLoadingProgress({
2868
+ status: "downloading",
2869
+ message: `Downloading ${payload.file}`,
2870
+ progress: Math.round(payload.loaded / payload.total * 100)
2871
+ });
2872
+ } else if (type === "ready") {
2873
+ setIsLoading(false);
2874
+ setIsReady(true);
2875
+ setLoadingProgress({ status: "ready" });
2876
+ readyResolveRef.current?.();
2877
+ onReady?.();
2878
+ } else if (type === "error") {
2879
+ setIsLoading(false);
2880
+ setError(payload);
2881
+ onError?.(payload);
2882
+ }
2883
+ };
2884
+ worker.postMessage({
2885
+ type: "load",
2886
+ payload: { model }
2887
+ });
2888
+ return readyPromiseRef.current;
2889
+ }, [
2890
+ model,
2891
+ isReady,
2892
+ onReady,
2893
+ onError
2894
+ ]);
2895
+ useEffect(() => {
2896
+ if (autoLoad) load();
2897
+ return () => {
2898
+ if (workerRef.current) {
2899
+ workerRef.current.terminate();
2900
+ workerRef.current = null;
2901
+ }
2902
+ };
2903
+ }, [autoLoad, load]);
2904
+ const embed = useCallback(async (text) => {
2905
+ await load();
2906
+ return new Promise((resolve, reject) => {
2907
+ const worker = workerRef.current;
2908
+ if (!worker) {
2909
+ reject(/* @__PURE__ */ new Error("Worker not initialized"));
2910
+ return;
2911
+ }
2912
+ const handler = (e) => {
2913
+ if (e.data.type === "embedding") {
2914
+ worker.removeEventListener("message", handler);
2915
+ resolve(e.data.payload.vector);
2916
+ } else if (e.data.type === "error") {
2917
+ worker.removeEventListener("message", handler);
2918
+ reject(new Error(e.data.payload));
2919
+ }
2920
+ };
2921
+ worker.addEventListener("message", handler);
2922
+ worker.postMessage({
2923
+ type: "embed",
2924
+ payload: {
2925
+ text,
2926
+ normalize
2927
+ }
2928
+ });
2929
+ });
2930
+ }, [load, normalize]);
2931
+ const embedBatch = useCallback(async (texts) => {
2932
+ await load();
2933
+ return new Promise((resolve, reject) => {
2934
+ const worker = workerRef.current;
2935
+ if (!worker) {
2936
+ reject(/* @__PURE__ */ new Error("Worker not initialized"));
2937
+ return;
2938
+ }
2939
+ const handler = (e) => {
2940
+ if (e.data.type === "embeddings") {
2941
+ worker.removeEventListener("message", handler);
2942
+ resolve(e.data.payload);
2943
+ } else if (e.data.type === "error") {
2944
+ worker.removeEventListener("message", handler);
2945
+ reject(new Error(e.data.payload));
2946
+ }
2947
+ };
2948
+ worker.addEventListener("message", handler);
2949
+ worker.postMessage({
2950
+ type: "embedBatch",
2951
+ payload: {
2952
+ texts,
2953
+ normalize
2954
+ }
2955
+ });
2956
+ });
2957
+ }, [load, normalize]);
2958
+ return {
2959
+ embed,
2960
+ embedBatch,
2961
+ similarity: useCallback(async (textA, textB) => {
2962
+ const [embA, embB] = await Promise.all([embed(textA), embed(textB)]);
2963
+ return cosineSimilarity(embA, embB);
2964
+ }, [embed, cosineSimilarity]),
2965
+ search: useCallback(async (query, corpus, topK) => {
2966
+ const [queryEmb, corpusEmbs] = await Promise.all([embed(query), embedBatch(corpus)]);
2967
+ const results = corpusEmbs.map((doc, index) => ({
2968
+ text: doc.text,
2969
+ score: cosineSimilarity(queryEmb, doc.vector),
2970
+ index
2971
+ }));
2972
+ results.sort((a, b) => b.score - a.score);
2973
+ return topK ? results.slice(0, topK) : results;
2974
+ }, [
2975
+ embed,
2976
+ embedBatch,
2977
+ cosineSimilarity
2978
+ ]),
2979
+ findNearest: useCallback(async (embedding, candidates, topK) => {
2980
+ const results = (await embedBatch(candidates)).map((doc, index) => ({
2981
+ text: doc.text,
2982
+ score: cosineSimilarity(embedding, doc.vector),
2983
+ index
2984
+ }));
2985
+ results.sort((a, b) => b.score - a.score);
2986
+ return topK ? results.slice(0, topK) : results;
2987
+ }, [embedBatch, cosineSimilarity]),
2988
+ cosineSimilarity,
2989
+ load,
2990
+ isLoading,
2991
+ isReady,
2992
+ loadingProgress,
2993
+ error
2994
+ };
2995
+ }
2996
+ /**
2997
+ * Preload a chat/LLM model (downloads to IndexedDB cache)
2998
+ *
2999
+ * Call this during app initialization to ensure the model is ready
3000
+ * when users need it.
3001
+ *
3002
+ * @example
3003
+ * ```ts
3004
+ * // In your app's initialization
3005
+ * import { preloadChatModel } from "@tryhamster/gerbil/browser";
3006
+ *
3007
+ * await preloadChatModel("qwen3-0.6b", {
3008
+ * onProgress: (p) => console.log(p.status, p.progress),
3009
+ * });
3010
+ *
3011
+ * // Later, useChat will load instantly from cache
3012
+ * ```
3013
+ */
3014
+ async function preloadChatModel(modelId, options = {}) {
3015
+ const { onProgress } = options;
3016
+ const worker = await createGerbilWorker({
3017
+ modelId,
3018
+ onProgress: (p) => {
3019
+ if (p.status === "downloading") onProgress?.({
3020
+ status: "downloading",
3021
+ file: p.file,
3022
+ progress: p.progress
3023
+ });
3024
+ else onProgress?.({
3025
+ status: "loading",
3026
+ message: p.status
3027
+ });
3028
+ }
3029
+ });
3030
+ onProgress?.({ status: "ready" });
3031
+ worker.terminate();
3032
+ }
3033
+ /**
3034
+ * Preload an embedding model
3035
+ *
3036
+ * @example
3037
+ * ```ts
3038
+ * await preloadEmbeddingModel("Xenova/all-MiniLM-L6-v2");
3039
+ * ```
3040
+ */
3041
+ async function preloadEmbeddingModel(modelId = "Xenova/all-MiniLM-L6-v2", options = {}) {
3042
+ const { onProgress } = options;
3043
+ return new Promise((resolve, reject) => {
3044
+ const worker = createEmbeddingWorker();
3045
+ worker.onmessage = (e) => {
3046
+ const { type, payload } = e.data;
3047
+ if (type === "progress") {
3048
+ if (payload.status === "progress" && payload.file) onProgress?.({
3049
+ status: "downloading",
3050
+ file: payload.file,
3051
+ progress: Math.round(payload.loaded / payload.total * 100)
3052
+ });
3053
+ } else if (type === "ready") {
3054
+ onProgress?.({ status: "ready" });
3055
+ worker.terminate();
3056
+ resolve();
3057
+ } else if (type === "error") {
3058
+ onProgress?.({
3059
+ status: "error",
3060
+ message: payload
3061
+ });
3062
+ worker.terminate();
3063
+ reject(new Error(payload));
3064
+ }
3065
+ };
3066
+ onProgress?.({
3067
+ status: "loading",
3068
+ message: `Loading ${modelId}...`
3069
+ });
3070
+ worker.postMessage({
3071
+ type: "load",
3072
+ payload: { model: modelId }
3073
+ });
3074
+ });
3075
+ }
3076
+ /**
3077
+ * Preload a TTS model
3078
+ *
3079
+ * @example
3080
+ * ```ts
3081
+ * await preloadTTSModel("kokoro-82m");
3082
+ * ```
3083
+ */
3084
+ async function preloadTTSModel(modelId = "kokoro-82m", options = {}) {
3085
+ const { onProgress } = options;
3086
+ const modelConfig = TTS_MODELS[modelId];
3087
+ if (!modelConfig) throw new Error(`Unknown TTS model: ${modelId}`);
3088
+ return new Promise((resolve, reject) => {
3089
+ const worker = createTTSWorker();
3090
+ worker.onmessage = (e) => {
3091
+ const { type, payload } = e.data;
3092
+ if (type === "progress") {
3093
+ if (payload.status === "progress" && payload.file) onProgress?.({
3094
+ status: "downloading",
3095
+ file: payload.file,
3096
+ progress: Math.round(payload.loaded / payload.total * 100)
3097
+ });
3098
+ } else if (type === "ready") {
3099
+ onProgress?.({ status: "ready" });
3100
+ worker.terminate();
3101
+ resolve();
3102
+ } else if (type === "error") {
3103
+ onProgress?.({
3104
+ status: "error",
3105
+ message: payload
3106
+ });
3107
+ worker.terminate();
3108
+ reject(new Error(payload));
3109
+ }
3110
+ };
3111
+ onProgress?.({
3112
+ status: "loading",
3113
+ message: `Loading ${modelId}...`
3114
+ });
3115
+ worker.postMessage({
3116
+ type: "load",
3117
+ payload: {
3118
+ modelId,
3119
+ repo: modelConfig.repo,
3120
+ voices: modelConfig.voices
3121
+ }
3122
+ });
3123
+ });
3124
+ }
3125
+ /**
3126
+ * Preload an STT model
3127
+ *
3128
+ * @example
3129
+ * ```ts
3130
+ * await preloadSTTModel("whisper-tiny.en");
3131
+ * ```
3132
+ */
3133
+ async function preloadSTTModel(modelId = "whisper-tiny.en", options = {}) {
3134
+ const { onProgress } = options;
3135
+ const resolved = resolveSTTModel(modelId);
3136
+ return new Promise((resolve, reject) => {
3137
+ const worker = createSTTWorker();
3138
+ worker.onmessage = (e) => {
3139
+ const { type, payload } = e.data;
3140
+ if (type === "progress") {
3141
+ if (payload.status === "progress" && payload.file) onProgress?.({
3142
+ status: "downloading",
3143
+ file: payload.file,
3144
+ progress: Math.round(payload.loaded / payload.total * 100)
3145
+ });
3146
+ } else if (type === "ready") {
3147
+ onProgress?.({ status: "ready" });
3148
+ worker.terminate();
3149
+ resolve();
3150
+ } else if (type === "error") {
3151
+ onProgress?.({
3152
+ status: "error",
3153
+ message: payload
3154
+ });
3155
+ worker.terminate();
3156
+ reject(new Error(payload));
3157
+ }
3158
+ };
3159
+ onProgress?.({
3160
+ status: "loading",
3161
+ message: `Loading ${modelId}...`
3162
+ });
3163
+ worker.postMessage({
3164
+ type: "load",
3165
+ payload: { modelId: resolved }
3166
+ });
3167
+ });
3168
+ }
3169
+ /** Helper to resolve STT model ID to repo */
3170
+ function resolveSTTModel(modelId) {
3171
+ return {
3172
+ "whisper-tiny": "onnx-community/whisper-tiny",
3173
+ "whisper-tiny.en": "onnx-community/whisper-tiny.en",
3174
+ "whisper-base": "onnx-community/whisper-base",
3175
+ "whisper-base.en": "onnx-community/whisper-base.en",
3176
+ "whisper-small": "onnx-community/whisper-small",
3177
+ "whisper-small.en": "onnx-community/whisper-small.en",
3178
+ "whisper-large-v3-turbo": "onnx-community/whisper-large-v3-turbo"
3179
+ }[modelId] || modelId;
3180
+ }
2718
3181
  /**
2719
3182
  * Check if WebGPU is supported
2720
3183
  */
@@ -2745,9 +3208,13 @@ var browser_default = {
2745
3208
  getWebGPUInfo,
2746
3209
  createGerbilWorker,
2747
3210
  playAudio,
2748
- createAudioPlayer
3211
+ createAudioPlayer,
3212
+ preloadChatModel,
3213
+ preloadEmbeddingModel,
3214
+ preloadTTSModel,
3215
+ preloadSTTModel
2749
3216
  };
2750
3217
 
2751
3218
  //#endregion
2752
- export { BUILTIN_MODELS, createAudioPlayer, createGerbilWorker, browser_default as default, getWebGPUInfo, isWebGPUSupported, playAudio, useChat, useCompletion, useSpeech, useVoiceChat, useVoiceInput };
3219
+ export { BUILTIN_MODELS, createAudioPlayer, createGerbilWorker, browser_default as default, getWebGPUInfo, isWebGPUSupported, playAudio, preloadChatModel, preloadEmbeddingModel, preloadSTTModel, preloadTTSModel, useChat, useCompletion, useEmbedding, useSpeech, useVoiceChat, useVoiceInput };
2753
3220
  //# sourceMappingURL=index.js.map