@reverbia/sdk 1.0.0-next.20251205183506 → 1.0.0-next.20251208093930

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -40,6 +40,19 @@ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__ge
40
40
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
41
41
  var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
42
42
 
43
+ // src/lib/chat/constants.ts
44
+ var constants_exports = {};
45
+ __export(constants_exports, {
46
+ DEFAULT_LOCAL_CHAT_MODEL: () => DEFAULT_LOCAL_CHAT_MODEL
47
+ });
48
+ var DEFAULT_LOCAL_CHAT_MODEL;
49
+ var init_constants = __esm({
50
+ "src/lib/chat/constants.ts"() {
51
+ "use strict";
52
+ DEFAULT_LOCAL_CHAT_MODEL = "onnx-community/Qwen2.5-0.5B-Instruct";
53
+ }
54
+ });
55
+
43
56
  // node_modules/.pnpm/onnxruntime-common@1.21.0/node_modules/onnxruntime-common/dist/esm/backend-impl.js
44
57
  var backends, backendsSortedByPriority, registerBackend, tryResolveAndInitializeBackend, resolveBackendAndExecutionProviders;
45
58
  var init_backend_impl = __esm({
@@ -47098,6 +47111,246 @@ ${fake_token_around_image}${global_img_token}` + image_token.repeat(image_seq_le
47098
47111
  }
47099
47112
  });
47100
47113
 
47114
+ // src/lib/chat/pipeline.ts
47115
+ async function getTextGenerationPipeline(options) {
47116
+ const { model, device = "wasm", dtype = "q4" } = options;
47117
+ if (sharedPipeline && currentModel === model && currentDevice === device) {
47118
+ return sharedPipeline;
47119
+ }
47120
+ const { pipeline, env: env3 } = await Promise.resolve().then(() => (init_transformers_node(), transformers_node_exports));
47121
+ env3.allowLocalModels = false;
47122
+ if (env3.backends?.onnx) {
47123
+ env3.backends.onnx.logLevel = "fatal";
47124
+ }
47125
+ console.log(`[Pipeline] Loading model: ${model} on ${device}...`);
47126
+ sharedPipeline = await pipeline("text-generation", model, {
47127
+ dtype,
47128
+ device
47129
+ });
47130
+ currentModel = model;
47131
+ currentDevice = device;
47132
+ console.log(`[Pipeline] Model loaded: ${model}`);
47133
+ return sharedPipeline;
47134
+ }
47135
+ var sharedPipeline, currentModel, currentDevice;
47136
+ var init_pipeline = __esm({
47137
+ "src/lib/chat/pipeline.ts"() {
47138
+ "use strict";
47139
+ sharedPipeline = null;
47140
+ currentModel = null;
47141
+ currentDevice = null;
47142
+ }
47143
+ });
47144
+
47145
+ // src/lib/chat/generation.ts
47146
+ var generation_exports = {};
47147
+ __export(generation_exports, {
47148
+ generateLocalChatCompletion: () => generateLocalChatCompletion
47149
+ });
47150
+ async function generateLocalChatCompletion(messages, options = {}) {
47151
+ const {
47152
+ model = DEFAULT_LOCAL_CHAT_MODEL,
47153
+ temperature = 0.7,
47154
+ max_tokens = 1024,
47155
+ top_p = 0.9,
47156
+ onToken,
47157
+ signal
47158
+ } = options;
47159
+ const { TextStreamer } = await Promise.resolve().then(() => (init_transformers_node(), transformers_node_exports));
47160
+ const chatPipeline = await getTextGenerationPipeline({
47161
+ model,
47162
+ device: "wasm",
47163
+ dtype: "q4"
47164
+ });
47165
+ class CallbackStreamer extends TextStreamer {
47166
+ constructor(tokenizer, cb) {
47167
+ super(tokenizer, {
47168
+ skip_prompt: true,
47169
+ skip_special_tokens: true
47170
+ });
47171
+ this.cb = cb;
47172
+ }
47173
+ on_finalized_text(text) {
47174
+ if (signal?.aborted) {
47175
+ throw new Error("AbortError");
47176
+ }
47177
+ this.cb(text);
47178
+ }
47179
+ }
47180
+ const streamer = onToken ? new CallbackStreamer(chatPipeline.tokenizer, onToken) : void 0;
47181
+ const output = await chatPipeline(messages, {
47182
+ max_new_tokens: max_tokens,
47183
+ temperature,
47184
+ top_p,
47185
+ streamer,
47186
+ return_full_text: false
47187
+ });
47188
+ return output;
47189
+ }
47190
+ var init_generation = __esm({
47191
+ "src/lib/chat/generation.ts"() {
47192
+ "use strict";
47193
+ init_constants();
47194
+ init_pipeline();
47195
+ }
47196
+ });
47197
+
47198
+ // src/lib/tools/selector.ts
47199
+ var selector_exports = {};
47200
+ __export(selector_exports, {
47201
+ DEFAULT_TOOL_SELECTOR_MODEL: () => DEFAULT_TOOL_SELECTOR_MODEL,
47202
+ executeTool: () => executeTool,
47203
+ preloadToolSelectorModel: () => preloadToolSelectorModel,
47204
+ selectTool: () => selectTool
47205
+ });
47206
+ function buildToolSelectionPrompt(userMessage, tools) {
47207
+ const toolList = tools.map((t) => `${t.name} (${t.description})`).join("\n");
47208
+ return `Pick the best tool for the task. Reply with ONLY the tool name.
47209
+
47210
+ Available tools:
47211
+ ${toolList}
47212
+ none (no tool needed)
47213
+
47214
+ Task: "${userMessage}"
47215
+
47216
+ Best tool:`;
47217
+ }
47218
+ function extractParams(userMessage, tool) {
47219
+ const params = {};
47220
+ if (!tool.parameters) return params;
47221
+ for (const param of tool.parameters) {
47222
+ if (param.name === "expression" || param.name === "query") {
47223
+ params[param.name] = userMessage;
47224
+ } else if (param.name === "location" || param.name === "city") {
47225
+ const words = userMessage.split(/\s+/);
47226
+ const capitalizedWords = words.filter(
47227
+ (w) => w.length > 1 && w[0] === w[0].toUpperCase()
47228
+ );
47229
+ params[param.name] = capitalizedWords.length > 0 ? capitalizedWords.join(" ") : userMessage;
47230
+ } else if (param.name === "text" || param.name === "input") {
47231
+ params[param.name] = userMessage;
47232
+ } else {
47233
+ params[param.name] = userMessage;
47234
+ }
47235
+ }
47236
+ return params;
47237
+ }
47238
+ function parseToolSelectionResponse(response, tools, userMessage) {
47239
+ console.log("[Tool Selector] Raw response:", response);
47240
+ const cleaned = response.toLowerCase().trim().split(/[\s\n,.]+/)[0].replace(/[^a-z0-9_-]/g, "");
47241
+ console.log("[Tool Selector] Parsed tool name:", cleaned);
47242
+ if (cleaned === "none" || cleaned === "null" || cleaned === "") {
47243
+ console.log("[Tool Selector] No tool selected");
47244
+ return { toolSelected: false };
47245
+ }
47246
+ const selectedTool = tools.find((t) => t.name.toLowerCase() === cleaned);
47247
+ if (!selectedTool) {
47248
+ const fuzzyTool = tools.find(
47249
+ (t) => t.name.toLowerCase().includes(cleaned) || cleaned.includes(t.name.toLowerCase())
47250
+ );
47251
+ if (fuzzyTool) {
47252
+ console.log(`[Tool Selector] Fuzzy matched tool: ${fuzzyTool.name}`);
47253
+ const params2 = extractParams(userMessage, fuzzyTool);
47254
+ return {
47255
+ toolSelected: true,
47256
+ toolName: fuzzyTool.name,
47257
+ parameters: params2,
47258
+ confidence: 0.6
47259
+ };
47260
+ }
47261
+ console.warn(`[Tool Selector] Unknown tool: ${cleaned}`);
47262
+ return { toolSelected: false };
47263
+ }
47264
+ const params = extractParams(userMessage, selectedTool);
47265
+ console.log(`[Tool Selector] Selected tool: ${selectedTool.name}`, params);
47266
+ return {
47267
+ toolSelected: true,
47268
+ toolName: selectedTool.name,
47269
+ parameters: params,
47270
+ confidence: 0.9
47271
+ };
47272
+ }
47273
+ async function selectTool(userMessage, tools, options = {}) {
47274
+ const {
47275
+ model = DEFAULT_TOOL_SELECTOR_MODEL,
47276
+ signal,
47277
+ device = "wasm"
47278
+ } = options;
47279
+ if (!tools.length) {
47280
+ return { toolSelected: false };
47281
+ }
47282
+ console.log(
47283
+ `[Tool Selector] analyzing message: "${userMessage}" with model ${model}`
47284
+ );
47285
+ try {
47286
+ const selectorPipeline = await getTextGenerationPipeline({
47287
+ model,
47288
+ device,
47289
+ dtype: "q4"
47290
+ // Aggressive quantization for speed
47291
+ });
47292
+ const prompt = buildToolSelectionPrompt(userMessage, tools);
47293
+ const output = await selectorPipeline(prompt, {
47294
+ max_new_tokens: 4,
47295
+ // Just need the tool name
47296
+ temperature: 0,
47297
+ // Deterministic
47298
+ do_sample: false,
47299
+ return_full_text: false
47300
+ });
47301
+ if (signal?.aborted) {
47302
+ return { toolSelected: false };
47303
+ }
47304
+ const generatedText = output?.[0]?.generated_text || output?.generated_text || "";
47305
+ return parseToolSelectionResponse(generatedText, tools, userMessage);
47306
+ } catch (error) {
47307
+ console.error("[Tool Selector] Error:", error);
47308
+ return { toolSelected: false };
47309
+ }
47310
+ }
47311
+ async function preloadToolSelectorModel(options = {}) {
47312
+ if (preloadPromise) {
47313
+ return preloadPromise;
47314
+ }
47315
+ const { model = DEFAULT_TOOL_SELECTOR_MODEL, device = "wasm" } = options;
47316
+ console.log(`[Tool Selector] Preloading model: ${model}`);
47317
+ preloadPromise = getTextGenerationPipeline({
47318
+ model,
47319
+ device,
47320
+ dtype: "q4"
47321
+ }).then(() => {
47322
+ console.log(`[Tool Selector] Model preloaded: ${model}`);
47323
+ }).catch((error) => {
47324
+ console.warn("[Tool Selector] Failed to preload model:", error);
47325
+ preloadPromise = null;
47326
+ });
47327
+ return preloadPromise;
47328
+ }
47329
+ async function executeTool(tool, params) {
47330
+ try {
47331
+ console.log(
47332
+ `[Tool Selector] Executing tool ${tool.name} with params:`,
47333
+ params
47334
+ );
47335
+ const result = await tool.execute(params);
47336
+ console.log(`[Tool Selector] Tool ${tool.name} execution result:`, result);
47337
+ return { success: true, result };
47338
+ } catch (error) {
47339
+ const errorMessage = error instanceof Error ? error.message : "Tool execution failed";
47340
+ console.error(`[Tool Selector] Tool ${tool.name} failed:`, errorMessage);
47341
+ return { success: false, error: errorMessage };
47342
+ }
47343
+ }
47344
+ var DEFAULT_TOOL_SELECTOR_MODEL, preloadPromise;
47345
+ var init_selector = __esm({
47346
+ "src/lib/tools/selector.ts"() {
47347
+ "use strict";
47348
+ init_pipeline();
47349
+ DEFAULT_TOOL_SELECTOR_MODEL = "Xenova/LaMini-GPT-124M";
47350
+ preloadPromise = null;
47351
+ }
47352
+ });
47353
+
47101
47354
  // src/react/index.ts
47102
47355
  var index_exports = {};
47103
47356
  __export(index_exports, {
@@ -47125,6 +47378,38 @@ module.exports = __toCommonJS(index_exports);
47125
47378
  // src/react/useChat.ts
47126
47379
  var import_react = require("react");
47127
47380
 
47381
+ // src/lib/polyfills/textDecoderStream.ts
47382
+ var needsPolyfill = typeof globalThis.TextDecoderStream === "undefined";
47383
+ if (needsPolyfill && typeof globalThis.TransformStream !== "undefined") {
47384
+ class TextDecoderStreamPolyfill {
47385
+ constructor(label = "utf-8", options) {
47386
+ this.decoder = new TextDecoder(label, options);
47387
+ const decoder = this.decoder;
47388
+ this.transform = new TransformStream({
47389
+ transform(chunk, controller) {
47390
+ const text = decoder.decode(chunk, { stream: true });
47391
+ if (text) {
47392
+ controller.enqueue(text);
47393
+ }
47394
+ },
47395
+ flush(controller) {
47396
+ const text = decoder.decode();
47397
+ if (text) {
47398
+ controller.enqueue(text);
47399
+ }
47400
+ }
47401
+ });
47402
+ }
47403
+ get readable() {
47404
+ return this.transform.readable;
47405
+ }
47406
+ get writable() {
47407
+ return this.transform.writable;
47408
+ }
47409
+ }
47410
+ globalThis.TextDecoderStream = TextDecoderStreamPolyfill;
47411
+ }
47412
+
47128
47413
  // src/client/core/bodySerializer.gen.ts
47129
47414
  var jsonBodySerializer = {
47130
47415
  bodySerializer: (body) => JSON.stringify(
@@ -47940,219 +48225,24 @@ var createClientConfig = (config) => ({
47940
48225
  // src/client/client.gen.ts
47941
48226
  var client = createClient(createClientConfig(createConfig()));
47942
48227
 
47943
- // src/lib/chat/constants.ts
47944
- var DEFAULT_LOCAL_CHAT_MODEL = "onnx-community/Qwen2.5-0.5B-Instruct";
47945
-
47946
- // src/lib/chat/pipeline.ts
47947
- var sharedPipeline = null;
47948
- var currentModel = null;
47949
- var currentDevice = null;
47950
- async function getTextGenerationPipeline(options) {
47951
- const { model, device = "wasm", dtype = "q4" } = options;
47952
- if (sharedPipeline && currentModel === model && currentDevice === device) {
47953
- return sharedPipeline;
47954
- }
47955
- const { pipeline, env: env3 } = await Promise.resolve().then(() => (init_transformers_node(), transformers_node_exports));
47956
- env3.allowLocalModels = false;
47957
- if (env3.backends?.onnx) {
47958
- env3.backends.onnx.logLevel = "fatal";
47959
- }
47960
- console.log(`[Pipeline] Loading model: ${model} on ${device}...`);
47961
- sharedPipeline = await pipeline("text-generation", model, {
47962
- dtype,
47963
- device
47964
- });
47965
- currentModel = model;
47966
- currentDevice = device;
47967
- console.log(`[Pipeline] Model loaded: ${model}`);
47968
- return sharedPipeline;
47969
- }
47970
-
47971
- // src/lib/chat/generation.ts
47972
- async function generateLocalChatCompletion(messages, options = {}) {
47973
- const {
47974
- model = DEFAULT_LOCAL_CHAT_MODEL,
47975
- temperature = 0.7,
47976
- max_tokens = 1024,
47977
- top_p = 0.9,
47978
- onToken,
47979
- signal
47980
- } = options;
47981
- const { TextStreamer } = await Promise.resolve().then(() => (init_transformers_node(), transformers_node_exports));
47982
- const chatPipeline = await getTextGenerationPipeline({
47983
- model,
47984
- device: "wasm",
47985
- dtype: "q4"
47986
- });
47987
- class CallbackStreamer extends TextStreamer {
47988
- constructor(tokenizer, cb) {
47989
- super(tokenizer, {
47990
- skip_prompt: true,
47991
- skip_special_tokens: true
47992
- });
47993
- this.cb = cb;
47994
- }
47995
- on_finalized_text(text) {
47996
- if (signal?.aborted) {
47997
- throw new Error("AbortError");
47998
- }
47999
- this.cb(text);
48000
- }
48001
- }
48002
- const streamer = onToken ? new CallbackStreamer(chatPipeline.tokenizer, onToken) : void 0;
48003
- const output = await chatPipeline(messages, {
48004
- max_new_tokens: max_tokens,
48005
- temperature,
48006
- top_p,
48007
- streamer,
48008
- return_full_text: false
48009
- });
48010
- return output;
48011
- }
48012
-
48013
- // src/lib/tools/selector.ts
48014
- var DEFAULT_TOOL_SELECTOR_MODEL = "Xenova/LaMini-GPT-124M";
48015
- function buildToolSelectionPrompt(userMessage, tools) {
48016
- const toolList = tools.map((t) => `${t.name} (${t.description})`).join("\n");
48017
- return `Pick the best tool for the task. Reply with ONLY the tool name.
48018
-
48019
- Available tools:
48020
- ${toolList}
48021
- none (no tool needed)
48022
-
48023
- Task: "${userMessage}"
48024
-
48025
- Best tool:`;
48026
- }
48027
- function extractParams(userMessage, tool) {
48028
- const params = {};
48029
- if (!tool.parameters) return params;
48030
- for (const param of tool.parameters) {
48031
- if (param.name === "expression" || param.name === "query") {
48032
- params[param.name] = userMessage;
48033
- } else if (param.name === "location" || param.name === "city") {
48034
- const words = userMessage.split(/\s+/);
48035
- const capitalizedWords = words.filter(
48036
- (w) => w.length > 1 && w[0] === w[0].toUpperCase()
48037
- );
48038
- params[param.name] = capitalizedWords.length > 0 ? capitalizedWords.join(" ") : userMessage;
48039
- } else if (param.name === "text" || param.name === "input") {
48040
- params[param.name] = userMessage;
48041
- } else {
48042
- params[param.name] = userMessage;
48043
- }
48044
- }
48045
- return params;
48046
- }
48047
- function parseToolSelectionResponse(response, tools, userMessage) {
48048
- console.log("[Tool Selector] Raw response:", response);
48049
- const cleaned = response.toLowerCase().trim().split(/[\s\n,.]+/)[0].replace(/[^a-z0-9_-]/g, "");
48050
- console.log("[Tool Selector] Parsed tool name:", cleaned);
48051
- if (cleaned === "none" || cleaned === "null" || cleaned === "") {
48052
- console.log("[Tool Selector] No tool selected");
48053
- return { toolSelected: false };
48054
- }
48055
- const selectedTool = tools.find((t) => t.name.toLowerCase() === cleaned);
48056
- if (!selectedTool) {
48057
- const fuzzyTool = tools.find(
48058
- (t) => t.name.toLowerCase().includes(cleaned) || cleaned.includes(t.name.toLowerCase())
48059
- );
48060
- if (fuzzyTool) {
48061
- console.log(`[Tool Selector] Fuzzy matched tool: ${fuzzyTool.name}`);
48062
- const params2 = extractParams(userMessage, fuzzyTool);
48063
- return {
48064
- toolSelected: true,
48065
- toolName: fuzzyTool.name,
48066
- parameters: params2,
48067
- confidence: 0.6
48068
- };
48069
- }
48070
- console.warn(`[Tool Selector] Unknown tool: ${cleaned}`);
48071
- return { toolSelected: false };
48072
- }
48073
- const params = extractParams(userMessage, selectedTool);
48074
- console.log(`[Tool Selector] Selected tool: ${selectedTool.name}`, params);
48075
- return {
48076
- toolSelected: true,
48077
- toolName: selectedTool.name,
48078
- parameters: params,
48079
- confidence: 0.9
48080
- };
48081
- }
48082
- async function selectTool(userMessage, tools, options = {}) {
48083
- const {
48084
- model = DEFAULT_TOOL_SELECTOR_MODEL,
48085
- signal,
48086
- device = "wasm"
48087
- } = options;
48088
- if (!tools.length) {
48089
- return { toolSelected: false };
48090
- }
48091
- console.log(
48092
- `[Tool Selector] analyzing message: "${userMessage}" with model ${model}`
48093
- );
48094
- try {
48095
- const selectorPipeline = await getTextGenerationPipeline({
48096
- model,
48097
- device,
48098
- dtype: "q4"
48099
- // Aggressive quantization for speed
48100
- });
48101
- const prompt = buildToolSelectionPrompt(userMessage, tools);
48102
- const output = await selectorPipeline(prompt, {
48103
- max_new_tokens: 4,
48104
- // Just need the tool name
48105
- temperature: 0,
48106
- // Deterministic
48107
- do_sample: false,
48108
- return_full_text: false
48109
- });
48110
- if (signal?.aborted) {
48111
- return { toolSelected: false };
48112
- }
48113
- const generatedText = output?.[0]?.generated_text || output?.generated_text || "";
48114
- return parseToolSelectionResponse(generatedText, tools, userMessage);
48115
- } catch (error) {
48116
- console.error("[Tool Selector] Error:", error);
48117
- return { toolSelected: false };
48118
- }
48119
- }
48120
- var preloadPromise = null;
48121
- async function preloadToolSelectorModel(options = {}) {
48122
- if (preloadPromise) {
48123
- return preloadPromise;
48124
- }
48125
- const { model = DEFAULT_TOOL_SELECTOR_MODEL, device = "wasm" } = options;
48126
- console.log(`[Tool Selector] Preloading model: ${model}`);
48127
- preloadPromise = getTextGenerationPipeline({
48128
- model,
48129
- device,
48130
- dtype: "q4"
48131
- }).then(() => {
48132
- console.log(`[Tool Selector] Model preloaded: ${model}`);
48133
- }).catch((error) => {
48134
- console.warn("[Tool Selector] Failed to preload model:", error);
48135
- preloadPromise = null;
48136
- });
48137
- return preloadPromise;
48138
- }
48139
- async function executeTool(tool, params) {
48140
- try {
48141
- console.log(
48142
- `[Tool Selector] Executing tool ${tool.name} with params:`,
48143
- params
48144
- );
48145
- const result = await tool.execute(params);
48146
- console.log(`[Tool Selector] Tool ${tool.name} execution result:`, result);
48147
- return { success: true, result };
48148
- } catch (error) {
48149
- const errorMessage = error instanceof Error ? error.message : "Tool execution failed";
48150
- console.error(`[Tool Selector] Tool ${tool.name} failed:`, errorMessage);
48151
- return { success: false, error: errorMessage };
48152
- }
48153
- }
48154
-
48155
48228
  // src/react/useChat.ts
48229
+ var isReactNative = typeof navigator !== "undefined" && navigator.product === "ReactNative";
48230
+ var webFeatures = null;
48231
+ var webFeaturesPromise = isReactNative ? Promise.resolve(null) : Promise.all([
48232
+ Promise.resolve().then(() => (init_generation(), generation_exports)),
48233
+ Promise.resolve().then(() => (init_constants(), constants_exports)),
48234
+ Promise.resolve().then(() => (init_selector(), selector_exports))
48235
+ ]).then(([generation, constants, selector]) => {
48236
+ webFeatures = {
48237
+ generateLocalChatCompletion: generation.generateLocalChatCompletion,
48238
+ DEFAULT_LOCAL_CHAT_MODEL: constants.DEFAULT_LOCAL_CHAT_MODEL,
48239
+ selectTool: selector.selectTool,
48240
+ executeTool: selector.executeTool,
48241
+ preloadToolSelectorModel: selector.preloadToolSelectorModel,
48242
+ DEFAULT_TOOL_SELECTOR_MODEL: selector.DEFAULT_TOOL_SELECTOR_MODEL
48243
+ };
48244
+ return webFeatures;
48245
+ });
48156
48246
  function useChat(options) {
48157
48247
  const {
48158
48248
  getToken,
@@ -48161,13 +48251,14 @@ function useChat(options) {
48161
48251
  onFinish,
48162
48252
  onError,
48163
48253
  chatProvider = "api",
48164
- localModel = DEFAULT_LOCAL_CHAT_MODEL,
48254
+ localModel,
48165
48255
  tools,
48166
- toolSelectorModel = DEFAULT_TOOL_SELECTOR_MODEL,
48256
+ toolSelectorModel,
48167
48257
  onToolExecution
48168
48258
  } = options || {};
48169
48259
  const [isLoading, setIsLoading] = (0, import_react.useState)(false);
48170
48260
  const [isSelectingTool, setIsSelectingTool] = (0, import_react.useState)(false);
48261
+ const [webFeaturesLoaded, setWebFeaturesLoaded] = (0, import_react.useState)(false);
48171
48262
  const abortControllerRef = (0, import_react.useRef)(null);
48172
48263
  const stop = (0, import_react.useCallback)(() => {
48173
48264
  if (abortControllerRef.current) {
@@ -48184,10 +48275,17 @@ function useChat(options) {
48184
48275
  };
48185
48276
  }, []);
48186
48277
  (0, import_react.useEffect)(() => {
48187
- if (tools && tools.length > 0) {
48188
- preloadToolSelectorModel({ model: toolSelectorModel });
48278
+ webFeaturesPromise?.then((loaded) => {
48279
+ if (loaded) setWebFeaturesLoaded(true);
48280
+ });
48281
+ }, []);
48282
+ (0, import_react.useEffect)(() => {
48283
+ if (!isReactNative && webFeaturesLoaded && webFeatures && tools && tools.length > 0) {
48284
+ webFeatures.preloadToolSelectorModel({
48285
+ model: toolSelectorModel || webFeatures.DEFAULT_TOOL_SELECTOR_MODEL
48286
+ });
48189
48287
  }
48190
- }, [tools, toolSelectorModel]);
48288
+ }, [tools, toolSelectorModel, webFeaturesLoaded]);
48191
48289
  const sendMessage = (0, import_react.useCallback)(
48192
48290
  async ({
48193
48291
  messages,
@@ -48208,22 +48306,27 @@ function useChat(options) {
48208
48306
  setIsLoading(true);
48209
48307
  let toolExecutionResult;
48210
48308
  let messagesWithToolContext = messages;
48211
- if (runTools && tools && tools.length > 0) {
48309
+ const canRunTools = !isReactNative && webFeaturesLoaded && webFeatures !== null && runTools && tools && tools.length > 0;
48310
+ if (canRunTools && webFeatures) {
48212
48311
  const lastUserMessage = [...messages].reverse().find((m) => m.role === "user");
48213
48312
  if (lastUserMessage?.content) {
48214
48313
  setIsSelectingTool(true);
48215
48314
  const contentString = lastUserMessage.content?.map((part) => part.text || "").join("") || "";
48216
48315
  try {
48217
- const selectionResult = await selectTool(contentString, tools, {
48218
- model: toolSelectorModel,
48219
- signal: abortController.signal
48220
- });
48316
+ const selectionResult = await webFeatures.selectTool(
48317
+ contentString,
48318
+ tools,
48319
+ {
48320
+ model: toolSelectorModel || webFeatures.DEFAULT_TOOL_SELECTOR_MODEL,
48321
+ signal: abortController.signal
48322
+ }
48323
+ );
48221
48324
  if (selectionResult.toolSelected && selectionResult.toolName) {
48222
48325
  const selectedTool = tools.find(
48223
48326
  (t) => t.name === selectionResult.toolName
48224
48327
  );
48225
48328
  if (selectedTool) {
48226
- const execResult = await executeTool(
48329
+ const execResult = await webFeatures.executeTool(
48227
48330
  selectedTool,
48228
48331
  selectionResult.parameters || {}
48229
48332
  );
@@ -48279,13 +48382,23 @@ Please inform the user about this issue and try to help them alternatively.`
48279
48382
  }
48280
48383
  try {
48281
48384
  if (chatProvider === "local") {
48385
+ if (isReactNative || !webFeaturesLoaded || !webFeatures) {
48386
+ const errorMsg = 'Local chat provider is not available in React Native. Use chatProvider: "api" instead.';
48387
+ setIsLoading(false);
48388
+ if (onError) onError(new Error(errorMsg));
48389
+ return {
48390
+ data: null,
48391
+ error: errorMsg,
48392
+ toolExecution: toolExecutionResult
48393
+ };
48394
+ }
48282
48395
  let accumulatedContent = "";
48283
- const usedModel = localModel;
48396
+ const usedModel = localModel || webFeatures.DEFAULT_LOCAL_CHAT_MODEL;
48284
48397
  const formattedMessages = messagesWithToolContext.map((m) => ({
48285
48398
  role: m.role || "user",
48286
48399
  content: m.content?.map((p) => p.text || "").join("") || ""
48287
48400
  }));
48288
- await generateLocalChatCompletion(formattedMessages, {
48401
+ await webFeatures.generateLocalChatCompletion(formattedMessages, {
48289
48402
  model: usedModel,
48290
48403
  signal: abortController.signal,
48291
48404
  onToken: (token) => {
@@ -48469,7 +48582,8 @@ Please inform the user about this issue and try to help them alternatively.`
48469
48582
  localModel,
48470
48583
  tools,
48471
48584
  toolSelectorModel,
48472
- onToolExecution
48585
+ onToolExecution,
48586
+ webFeaturesLoaded
48473
48587
  ]
48474
48588
  );
48475
48589
  return {
@@ -49671,6 +49785,9 @@ var extractConversationContext = (messages, maxMessages = 3) => {
49671
49785
  const userMessages = messages.filter((msg) => msg.role === "user").slice(-maxMessages).map((msg) => msg.content).join(" ");
49672
49786
  return userMessages.trim();
49673
49787
  };
49788
+
49789
+ // src/react/index.ts
49790
+ init_selector();
49674
49791
  // Annotate the CommonJS export names for ESM import in node:
49675
49792
  0 && (module.exports = {
49676
49793
  DEFAULT_TOOL_SELECTOR_MODEL,