@reverbia/sdk 1.0.0-next.20251202092727 → 1.0.0-next.20251202095402

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -46669,12 +46669,15 @@ ${fake_token_around_image}${global_img_token}` + image_token.repeat(image_seq_le
46669
46669
  // src/react/index.ts
46670
46670
  var index_exports = {};
46671
46671
  __export(index_exports, {
46672
+ DEFAULT_TOOL_SELECTOR_MODEL: () => DEFAULT_TOOL_SELECTOR_MODEL,
46672
46673
  createMemoryContextSystemMessage: () => createMemoryContextSystemMessage,
46673
46674
  decryptData: () => decryptData,
46674
46675
  decryptDataBytes: () => decryptDataBytes,
46675
46676
  encryptData: () => encryptData,
46677
+ executeTool: () => executeTool,
46676
46678
  extractConversationContext: () => extractConversationContext,
46677
46679
  formatMemoriesForChat: () => formatMemoriesForChat,
46680
+ selectTool: () => selectTool,
46678
46681
  useChat: () => useChat,
46679
46682
  useEncryption: () => useEncryption,
46680
46683
  useMemory: () => useMemory,
@@ -47503,9 +47506,32 @@ var client = createClient(createClientConfig(createConfig()));
47503
47506
  // src/lib/chat/constants.ts
47504
47507
  var DEFAULT_LOCAL_CHAT_MODEL = "onnx-community/Qwen2.5-0.5B-Instruct";
47505
47508
 
47506
- // src/lib/chat/generation.ts
47507
- var chatPipeline = null;
47509
+ // src/lib/chat/pipeline.ts
47510
+ var sharedPipeline = null;
47508
47511
  var currentModel = null;
47512
+ var currentDevice = null;
47513
+ async function getTextGenerationPipeline(options) {
47514
+ const { model, device = "wasm", dtype = "q4" } = options;
47515
+ if (sharedPipeline && currentModel === model && currentDevice === device) {
47516
+ return sharedPipeline;
47517
+ }
47518
+ const { pipeline, env: env3 } = await Promise.resolve().then(() => (init_transformers_node(), transformers_node_exports));
47519
+ env3.allowLocalModels = false;
47520
+ if (env3.backends?.onnx) {
47521
+ env3.backends.onnx.logLevel = "fatal";
47522
+ }
47523
+ console.log(`[Pipeline] Loading model: ${model} on ${device}...`);
47524
+ sharedPipeline = await pipeline("text-generation", model, {
47525
+ dtype,
47526
+ device
47527
+ });
47528
+ currentModel = model;
47529
+ currentDevice = device;
47530
+ console.log(`[Pipeline] Model loaded: ${model}`);
47531
+ return sharedPipeline;
47532
+ }
47533
+
47534
+ // src/lib/chat/generation.ts
47509
47535
  async function generateLocalChatCompletion(messages, options = {}) {
47510
47536
  const {
47511
47537
  model = DEFAULT_LOCAL_CHAT_MODEL,
@@ -47515,13 +47541,12 @@ async function generateLocalChatCompletion(messages, options = {}) {
47515
47541
  onToken,
47516
47542
  signal
47517
47543
  } = options;
47518
- const { pipeline, TextStreamer } = await Promise.resolve().then(() => (init_transformers_node(), transformers_node_exports));
47519
- if (!chatPipeline || currentModel !== model) {
47520
- chatPipeline = await pipeline("text-generation", model, {
47521
- dtype: "fp16"
47522
- });
47523
- currentModel = model;
47524
- }
47544
+ const { TextStreamer } = await Promise.resolve().then(() => (init_transformers_node(), transformers_node_exports));
47545
+ const chatPipeline = await getTextGenerationPipeline({
47546
+ model,
47547
+ device: "wasm",
47548
+ dtype: "q4"
47549
+ });
47525
47550
  class CallbackStreamer extends TextStreamer {
47526
47551
  constructor(tokenizer, cb) {
47527
47552
  super(tokenizer, {
@@ -47548,6 +47573,148 @@ async function generateLocalChatCompletion(messages, options = {}) {
47548
47573
  return output;
47549
47574
  }
47550
47575
 
47576
+ // src/lib/tools/selector.ts
47577
+ var DEFAULT_TOOL_SELECTOR_MODEL = "Xenova/LaMini-GPT-124M";
47578
+ function buildToolSelectionPrompt(userMessage, tools) {
47579
+ const toolList = tools.map((t) => `${t.name} (${t.description})`).join("\n");
47580
+ return `Pick the best tool for the task. Reply with ONLY the tool name.
47581
+
47582
+ Available tools:
47583
+ ${toolList}
47584
+ none (no tool needed)
47585
+
47586
+ Task: "${userMessage}"
47587
+
47588
+ Best tool:`;
47589
+ }
47590
+ function extractParams(userMessage, tool) {
47591
+ const params = {};
47592
+ if (!tool.parameters) return params;
47593
+ for (const param of tool.parameters) {
47594
+ if (param.name === "expression" || param.name === "query") {
47595
+ params[param.name] = userMessage;
47596
+ } else if (param.name === "location" || param.name === "city") {
47597
+ const words = userMessage.split(/\s+/);
47598
+ const capitalizedWords = words.filter(
47599
+ (w) => w.length > 1 && w[0] === w[0].toUpperCase()
47600
+ );
47601
+ params[param.name] = capitalizedWords.length > 0 ? capitalizedWords.join(" ") : userMessage;
47602
+ } else if (param.name === "text" || param.name === "input") {
47603
+ params[param.name] = userMessage;
47604
+ } else {
47605
+ params[param.name] = userMessage;
47606
+ }
47607
+ }
47608
+ return params;
47609
+ }
47610
+ function parseToolSelectionResponse(response, tools, userMessage) {
47611
+ console.log("[Tool Selector] Raw response:", response);
47612
+ const cleaned = response.toLowerCase().trim().split(/[\s\n,.]+/)[0].replace(/[^a-z0-9_-]/g, "");
47613
+ console.log("[Tool Selector] Parsed tool name:", cleaned);
47614
+ if (cleaned === "none" || cleaned === "null" || cleaned === "") {
47615
+ console.log("[Tool Selector] No tool selected");
47616
+ return { toolSelected: false };
47617
+ }
47618
+ const selectedTool = tools.find((t) => t.name.toLowerCase() === cleaned);
47619
+ if (!selectedTool) {
47620
+ const fuzzyTool = tools.find(
47621
+ (t) => t.name.toLowerCase().includes(cleaned) || cleaned.includes(t.name.toLowerCase())
47622
+ );
47623
+ if (fuzzyTool) {
47624
+ console.log(`[Tool Selector] Fuzzy matched tool: ${fuzzyTool.name}`);
47625
+ const params2 = extractParams(userMessage, fuzzyTool);
47626
+ return {
47627
+ toolSelected: true,
47628
+ toolName: fuzzyTool.name,
47629
+ parameters: params2,
47630
+ confidence: 0.6
47631
+ };
47632
+ }
47633
+ console.warn(`[Tool Selector] Unknown tool: ${cleaned}`);
47634
+ return { toolSelected: false };
47635
+ }
47636
+ const params = extractParams(userMessage, selectedTool);
47637
+ console.log(`[Tool Selector] Selected tool: ${selectedTool.name}`, params);
47638
+ return {
47639
+ toolSelected: true,
47640
+ toolName: selectedTool.name,
47641
+ parameters: params,
47642
+ confidence: 0.9
47643
+ };
47644
+ }
47645
+ async function selectTool(userMessage, tools, options = {}) {
47646
+ const {
47647
+ model = DEFAULT_TOOL_SELECTOR_MODEL,
47648
+ signal,
47649
+ device = "wasm"
47650
+ } = options;
47651
+ if (!tools.length) {
47652
+ return { toolSelected: false };
47653
+ }
47654
+ console.log(
47655
+ `[Tool Selector] analyzing message: "${userMessage}" with model ${model}`
47656
+ );
47657
+ try {
47658
+ const selectorPipeline = await getTextGenerationPipeline({
47659
+ model,
47660
+ device,
47661
+ dtype: "q4"
47662
+ // Aggressive quantization for speed
47663
+ });
47664
+ const prompt = buildToolSelectionPrompt(userMessage, tools);
47665
+ const output = await selectorPipeline(prompt, {
47666
+ max_new_tokens: 4,
47667
+ // Just need the tool name
47668
+ temperature: 0,
47669
+ // Deterministic
47670
+ do_sample: false,
47671
+ return_full_text: false
47672
+ });
47673
+ if (signal?.aborted) {
47674
+ return { toolSelected: false };
47675
+ }
47676
+ const generatedText = output?.[0]?.generated_text || output?.generated_text || "";
47677
+ return parseToolSelectionResponse(generatedText, tools, userMessage);
47678
+ } catch (error) {
47679
+ console.error("[Tool Selector] Error:", error);
47680
+ return { toolSelected: false };
47681
+ }
47682
+ }
47683
+ var preloadPromise = null;
47684
+ async function preloadToolSelectorModel(options = {}) {
47685
+ if (preloadPromise) {
47686
+ return preloadPromise;
47687
+ }
47688
+ const { model = DEFAULT_TOOL_SELECTOR_MODEL, device = "wasm" } = options;
47689
+ console.log(`[Tool Selector] Preloading model: ${model}`);
47690
+ preloadPromise = getTextGenerationPipeline({
47691
+ model,
47692
+ device,
47693
+ dtype: "q4"
47694
+ }).then(() => {
47695
+ console.log(`[Tool Selector] Model preloaded: ${model}`);
47696
+ }).catch((error) => {
47697
+ console.warn("[Tool Selector] Failed to preload model:", error);
47698
+ preloadPromise = null;
47699
+ });
47700
+ return preloadPromise;
47701
+ }
47702
+ async function executeTool(tool, params) {
47703
+ try {
47704
+ console.log(
47705
+ `[Tool Selector] Executing tool ${tool.name} with params:`,
47706
+ params
47707
+ );
47708
+ const result = await tool.execute(params);
47709
+ console.log(`[Tool Selector] Tool ${tool.name} execution result:`, result);
47710
+ return { success: true, result };
47711
+ } catch (error) {
47712
+ const errorMessage = error instanceof Error ? error.message : "Tool execution failed";
47713
+ console.error(`[Tool Selector] Tool ${tool.name} failed:`, errorMessage);
47714
+ return { success: false, error: errorMessage };
47715
+ }
47716
+ }
47717
+
47551
47718
  // src/react/useChat.ts
47552
47719
  function useChat(options) {
47553
47720
  const {
@@ -47557,9 +47724,13 @@ function useChat(options) {
47557
47724
  onFinish,
47558
47725
  onError,
47559
47726
  chatProvider = "api",
47560
- localModel = DEFAULT_LOCAL_CHAT_MODEL
47727
+ localModel = DEFAULT_LOCAL_CHAT_MODEL,
47728
+ tools,
47729
+ toolSelectorModel = DEFAULT_TOOL_SELECTOR_MODEL,
47730
+ onToolExecution
47561
47731
  } = options || {};
47562
47732
  const [isLoading, setIsLoading] = (0, import_react.useState)(false);
47733
+ const [isSelectingTool, setIsSelectingTool] = (0, import_react.useState)(false);
47563
47734
  const abortControllerRef = (0, import_react.useRef)(null);
47564
47735
  const stop = (0, import_react.useCallback)(() => {
47565
47736
  if (abortControllerRef.current) {
@@ -47575,11 +47746,17 @@ function useChat(options) {
47575
47746
  }
47576
47747
  };
47577
47748
  }, []);
47749
+ (0, import_react.useEffect)(() => {
47750
+ if (tools && tools.length > 0) {
47751
+ preloadToolSelectorModel({ model: toolSelectorModel });
47752
+ }
47753
+ }, [tools, toolSelectorModel]);
47578
47754
  const sendMessage = (0, import_react.useCallback)(
47579
47755
  async ({
47580
47756
  messages,
47581
47757
  model,
47582
- onData
47758
+ onData,
47759
+ runTools = true
47583
47760
  }) => {
47584
47761
  if (!messages?.length) {
47585
47762
  const errorMsg = "messages are required to call sendMessage.";
@@ -47592,11 +47769,75 @@ function useChat(options) {
47592
47769
  const abortController = new AbortController();
47593
47770
  abortControllerRef.current = abortController;
47594
47771
  setIsLoading(true);
47772
+ let toolExecutionResult;
47773
+ let messagesWithToolContext = messages;
47774
+ if (runTools && tools && tools.length > 0) {
47775
+ const lastUserMessage = [...messages].reverse().find((m) => m.role === "user");
47776
+ if (lastUserMessage?.content) {
47777
+ setIsSelectingTool(true);
47778
+ try {
47779
+ const selectionResult = await selectTool(
47780
+ lastUserMessage.content,
47781
+ tools,
47782
+ {
47783
+ model: toolSelectorModel,
47784
+ signal: abortController.signal
47785
+ }
47786
+ );
47787
+ if (selectionResult.toolSelected && selectionResult.toolName) {
47788
+ const selectedTool = tools.find(
47789
+ (t) => t.name === selectionResult.toolName
47790
+ );
47791
+ if (selectedTool) {
47792
+ const execResult = await executeTool(
47793
+ selectedTool,
47794
+ selectionResult.parameters || {}
47795
+ );
47796
+ toolExecutionResult = {
47797
+ toolName: selectionResult.toolName,
47798
+ success: execResult.success,
47799
+ result: execResult.result,
47800
+ error: execResult.error
47801
+ };
47802
+ if (onToolExecution) {
47803
+ onToolExecution(toolExecutionResult);
47804
+ }
47805
+ if (toolExecutionResult.success && toolExecutionResult.result !== void 0) {
47806
+ const toolResultContext = {
47807
+ role: "system",
47808
+ content: `Tool "${toolExecutionResult.toolName}" was executed with the following result:
47809
+ ${JSON.stringify(
47810
+ toolExecutionResult.result,
47811
+ null,
47812
+ 2
47813
+ )}
47814
+
47815
+ Use this information to respond to the user's request.`
47816
+ };
47817
+ messagesWithToolContext = [...messages, toolResultContext];
47818
+ } else if (toolExecutionResult.error) {
47819
+ const toolErrorContext = {
47820
+ role: "system",
47821
+ content: `Tool "${toolExecutionResult.toolName}" was executed but encountered an error: ${toolExecutionResult.error}
47822
+
47823
+ Please inform the user about this issue and try to help them alternatively.`
47824
+ };
47825
+ messagesWithToolContext = [...messages, toolErrorContext];
47826
+ }
47827
+ }
47828
+ }
47829
+ } catch (err) {
47830
+ console.warn("Tool selection error:", err);
47831
+ } finally {
47832
+ setIsSelectingTool(false);
47833
+ }
47834
+ }
47835
+ }
47595
47836
  try {
47596
47837
  if (chatProvider === "local") {
47597
47838
  let accumulatedContent = "";
47598
47839
  const usedModel = localModel;
47599
- const formattedMessages = messages.map((m) => ({
47840
+ const formattedMessages = messagesWithToolContext.map((m) => ({
47600
47841
  role: m.role || "user",
47601
47842
  content: m.content || ""
47602
47843
  }));
@@ -47633,30 +47874,46 @@ function useChat(options) {
47633
47874
  if (onFinish) {
47634
47875
  onFinish(completion);
47635
47876
  }
47636
- return { data: completion, error: null };
47877
+ return {
47878
+ data: completion,
47879
+ error: null,
47880
+ toolExecution: toolExecutionResult
47881
+ };
47637
47882
  } else {
47638
47883
  if (!model) {
47639
47884
  const errorMsg = "model is required to call sendMessage.";
47640
47885
  if (onError) onError(new Error(errorMsg));
47641
- return { data: null, error: errorMsg };
47886
+ return {
47887
+ data: null,
47888
+ error: errorMsg,
47889
+ toolExecution: toolExecutionResult
47890
+ };
47642
47891
  }
47643
47892
  if (!getToken) {
47644
47893
  const errorMsg = "Token getter function is required.";
47645
47894
  if (onError) onError(new Error(errorMsg));
47646
- return { data: null, error: errorMsg };
47895
+ return {
47896
+ data: null,
47897
+ error: errorMsg,
47898
+ toolExecution: toolExecutionResult
47899
+ };
47647
47900
  }
47648
47901
  const token = await getToken();
47649
47902
  if (!token) {
47650
47903
  const errorMsg = "No access token available.";
47651
47904
  setIsLoading(false);
47652
47905
  if (onError) onError(new Error(errorMsg));
47653
- return { data: null, error: errorMsg };
47906
+ return {
47907
+ data: null,
47908
+ error: errorMsg,
47909
+ toolExecution: toolExecutionResult
47910
+ };
47654
47911
  }
47655
47912
  const sseResult = await client.sse.post({
47656
47913
  baseUrl,
47657
47914
  url: "/api/v1/chat/completions",
47658
47915
  body: {
47659
- messages,
47916
+ messages: messagesWithToolContext,
47660
47917
  model,
47661
47918
  stream: true
47662
47919
  },
@@ -47726,12 +47983,20 @@ function useChat(options) {
47726
47983
  if (onFinish) {
47727
47984
  onFinish(completion);
47728
47985
  }
47729
- return { data: completion, error: null };
47986
+ return {
47987
+ data: completion,
47988
+ error: null,
47989
+ toolExecution: toolExecutionResult
47990
+ };
47730
47991
  }
47731
47992
  } catch (err) {
47732
47993
  if (err instanceof Error && err.name === "AbortError") {
47733
47994
  setIsLoading(false);
47734
- return { data: null, error: "Request aborted" };
47995
+ return {
47996
+ data: null,
47997
+ error: "Request aborted",
47998
+ toolExecution: toolExecutionResult
47999
+ };
47735
48000
  }
47736
48001
  const errorMsg = err instanceof Error ? err.message : "Failed to send message.";
47737
48002
  const errorObj = err instanceof Error ? err : new Error(errorMsg);
@@ -47739,7 +48004,11 @@ function useChat(options) {
47739
48004
  if (onError) {
47740
48005
  onError(errorObj);
47741
48006
  }
47742
- return { data: null, error: errorMsg };
48007
+ return {
48008
+ data: null,
48009
+ error: errorMsg,
48010
+ toolExecution: toolExecutionResult
48011
+ };
47743
48012
  } finally {
47744
48013
  if (abortControllerRef.current === abortController) {
47745
48014
  abortControllerRef.current = null;
@@ -47753,11 +48022,15 @@ function useChat(options) {
47753
48022
  onFinish,
47754
48023
  onError,
47755
48024
  chatProvider,
47756
- localModel
48025
+ localModel,
48026
+ tools,
48027
+ toolSelectorModel,
48028
+ onToolExecution
47757
48029
  ]
47758
48030
  );
47759
48031
  return {
47760
48032
  isLoading,
48033
+ isSelectingTool,
47761
48034
  sendMessage,
47762
48035
  stop
47763
48036
  };
@@ -48680,12 +48953,15 @@ var extractConversationContext = (messages, maxMessages = 3) => {
48680
48953
  };
48681
48954
  // Annotate the CommonJS export names for ESM import in node:
48682
48955
  0 && (module.exports = {
48956
+ DEFAULT_TOOL_SELECTOR_MODEL,
48683
48957
  createMemoryContextSystemMessage,
48684
48958
  decryptData,
48685
48959
  decryptDataBytes,
48686
48960
  encryptData,
48961
+ executeTool,
48687
48962
  extractConversationContext,
48688
48963
  formatMemoriesForChat,
48964
+ selectTool,
48689
48965
  useChat,
48690
48966
  useEncryption,
48691
48967
  useMemory,
@@ -175,6 +175,64 @@ type LlmapiModelTopProvider = {
175
175
  */
176
176
  type LlmapiRole = string;
177
177
 
178
+ /**
179
+ * Parameter definition for a client-side tool
180
+ */
181
+ interface ToolParameter {
182
+ /** Parameter name */
183
+ name: string;
184
+ /** Parameter type (string, number, boolean, etc.) */
185
+ type: "string" | "number" | "boolean" | "object" | "array";
186
+ /** Human-readable description of the parameter */
187
+ description: string;
188
+ /** Whether this parameter is required */
189
+ required?: boolean;
190
+ /** Default value if not provided */
191
+ default?: unknown;
192
+ }
193
+ /**
194
+ * Definition for a client-side tool that can be executed in the browser
195
+ */
196
+ interface ClientTool {
197
+ /** Unique identifier for the tool */
198
+ name: string;
199
+ /** Human-readable description of what the tool does */
200
+ description: string;
201
+ /** Parameters the tool accepts */
202
+ parameters?: ToolParameter[];
203
+ /**
204
+ * The function to execute when the tool is called.
205
+ * Receives extracted parameters and returns a result.
206
+ */
207
+ execute: (params: Record<string, unknown>) => Promise<unknown> | unknown;
208
+ }
209
+ /**
210
+ * Result of a tool selection operation
211
+ */
212
+ interface ToolSelectionResult {
213
+ /** Whether a tool was selected */
214
+ toolSelected: boolean;
215
+ /** Name of the selected tool (if any) */
216
+ toolName?: string;
217
+ /** Extracted parameters for the tool */
218
+ parameters?: Record<string, unknown>;
219
+ /** Confidence score (0-1) of the selection */
220
+ confidence?: number;
221
+ }
222
+ /**
223
+ * Result of executing a client-side tool
224
+ */
225
+ interface ToolExecutionResult {
226
+ /** Name of the tool that was executed */
227
+ toolName: string;
228
+ /** Whether execution was successful */
229
+ success: boolean;
230
+ /** The result returned by the tool */
231
+ result?: unknown;
232
+ /** Error message if execution failed */
233
+ error?: string;
234
+ }
235
+
178
236
  type SendMessageArgs = {
179
237
  messages: LlmapiMessage[];
180
238
  model?: string;
@@ -185,13 +243,20 @@ type SendMessageArgs = {
185
243
  * @param chunk - The content delta from the current chunk
186
244
  */
187
245
  onData?: (chunk: string) => void;
246
+ /**
247
+ * Whether to run tool selection for this message.
248
+ * Defaults to true if tools are configured.
249
+ */
250
+ runTools?: boolean;
188
251
  };
189
252
  type SendMessageResult = {
190
253
  data: LlmapiChatCompletionResponse;
191
254
  error: null;
255
+ toolExecution?: ToolExecutionResult;
192
256
  } | {
193
257
  data: null;
194
258
  error: string;
259
+ toolExecution?: ToolExecutionResult;
195
260
  };
196
261
  type UseChatOptions = {
197
262
  getToken?: () => Promise<string | null>;
@@ -226,9 +291,25 @@ type UseChatOptions = {
226
291
  * Default is "ibm-granite/Granite-4.0-Nano-WebGPU"
227
292
  */
228
293
  localModel?: string;
294
+ /**
295
+ * Client-side tools that can be executed in the browser.
296
+ * When provided, the hook will use a local model to determine
297
+ * if any tool should be called based on the user's message.
298
+ */
299
+ tools?: ClientTool[];
300
+ /**
301
+ * The model to use for tool selection.
302
+ * Default is "onnx-community/granite-4.0-350m-ONNX-web"
303
+ */
304
+ toolSelectorModel?: string;
305
+ /**
306
+ * Callback function to be called when a tool is executed.
307
+ */
308
+ onToolExecution?: (result: ToolExecutionResult) => void;
229
309
  };
230
310
  type UseChatResult = {
231
311
  isLoading: boolean;
312
+ isSelectingTool: boolean;
232
313
  sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
233
314
  /**
234
315
  * Aborts the current streaming request if one is in progress.
@@ -257,47 +338,56 @@ type UseChatResult = {
257
338
  * is encountered. Note: This is NOT called for aborted requests (see `stop()`).
258
339
  * @param options.chatProvider - The provider to use for chat completions (default: "api").
259
340
  * @param options.localModel - The model to use for local chat completions.
341
+ * @param options.tools - Client-side tools that can be executed in the browser.
342
+ * @param options.toolSelectorModel - The model to use for tool selection.
343
+ * @param options.onToolExecution - Callback function to be called when a tool is executed.
260
344
  *
261
345
  * @returns An object containing:
262
346
  * - `isLoading`: A boolean indicating whether a request is currently in progress
347
+ * - `isSelectingTool`: A boolean indicating whether tool selection is in progress
263
348
  * - `sendMessage`: An async function to send chat messages
264
349
  * - `stop`: A function to abort the current request
265
350
  *
266
351
  * @example
267
352
  * ```tsx
353
+ * // Basic usage with API
268
354
  * const { isLoading, sendMessage, stop } = useChat({
269
- * getToken: async () => {
270
- * // Get your auth token from your auth provider
271
- * return await getAuthToken();
272
- * },
273
- * onFinish: (response) => {
274
- * console.log("Chat finished:", response);
275
- * },
276
- * onError: (error) => {
277
- * // This is only called for unexpected errors, not aborts
278
- * console.error("Chat error:", error);
355
+ * getToken: async () => await getAuthToken(),
356
+ * onFinish: (response) => console.log("Chat finished:", response),
357
+ * onError: (error) => console.error("Chat error:", error)
358
+ * });
359
+ *
360
+ * // With client-side tools
361
+ * const { isLoading, isSelectingTool, sendMessage } = useChat({
362
+ * getToken: async () => await getAuthToken(),
363
+ * tools: [
364
+ * {
365
+ * name: "get_weather",
366
+ * description: "Get the current weather for a location",
367
+ * parameters: [
368
+ * { name: "location", type: "string", description: "City name", required: true }
369
+ * ],
370
+ * execute: async ({ location }) => {
371
+ * // Your weather API call here
372
+ * return { temperature: 72, condition: "sunny" };
373
+ * }
374
+ * }
375
+ * ],
376
+ * onToolExecution: (result) => {
377
+ * console.log("Tool executed:", result.toolName, result.result);
279
378
  * }
280
379
  * });
281
380
  *
282
381
  * const handleSend = async () => {
283
382
  * const result = await sendMessage({
284
- * messages: [{ role: 'user', content: 'Hello!' }],
383
+ * messages: [{ role: 'user', content: 'What is the weather in Paris?' }],
285
384
  * model: 'gpt-4o-mini'
286
385
  * });
287
386
  *
288
- * if (result.error) {
289
- * if (result.error === "Request aborted") {
290
- * console.log("Request was aborted");
291
- * } else {
292
- * console.error("Error:", result.error);
293
- * }
294
- * } else {
295
- * console.log("Success:", result.data);
387
+ * if (result.toolExecution) {
388
+ * console.log("Tool was called:", result.toolExecution);
296
389
  * }
297
390
  * };
298
- *
299
- * // To stop generation:
300
- * // stop();
301
391
  * ```
302
392
  */
303
393
  declare function useChat(options?: UseChatOptions): UseChatResult;
@@ -469,4 +559,26 @@ declare const extractConversationContext: (messages: Array<{
469
559
  content: string;
470
560
  }>, maxMessages?: number) => string;
471
561
 
472
- export { createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, extractConversationContext, formatMemoriesForChat, useChat, useEncryption, useMemory, useModels };
562
+ declare const DEFAULT_TOOL_SELECTOR_MODEL = "Xenova/LaMini-GPT-124M";
563
+ interface ToolSelectorOptions {
564
+ /** Model to use for tool selection. Defaults to Xenova/LaMini-GPT-124M */
565
+ model?: string;
566
+ /** Abort signal */
567
+ signal?: AbortSignal;
568
+ /** Device to use (webgpu, wasm, cpu). Defaults to wasm */
569
+ device?: "webgpu" | "wasm" | "cpu";
570
+ }
571
+ /**
572
+ * Select a tool based on user message using an in-browser model
573
+ */
574
+ declare function selectTool(userMessage: string, tools: ClientTool[], options?: ToolSelectorOptions): Promise<ToolSelectionResult>;
575
+ /**
576
+ * Execute a client-side tool with the given parameters
577
+ */
578
+ declare function executeTool(tool: ClientTool, params: Record<string, unknown>): Promise<{
579
+ success: boolean;
580
+ result?: unknown;
581
+ error?: string;
582
+ }>;
583
+
584
+ export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useMemory, useModels };
@@ -175,6 +175,64 @@ type LlmapiModelTopProvider = {
175
175
  */
176
176
  type LlmapiRole = string;
177
177
 
178
+ /**
179
+ * Parameter definition for a client-side tool
180
+ */
181
+ interface ToolParameter {
182
+ /** Parameter name */
183
+ name: string;
184
+ /** Parameter type (string, number, boolean, etc.) */
185
+ type: "string" | "number" | "boolean" | "object" | "array";
186
+ /** Human-readable description of the parameter */
187
+ description: string;
188
+ /** Whether this parameter is required */
189
+ required?: boolean;
190
+ /** Default value if not provided */
191
+ default?: unknown;
192
+ }
193
+ /**
194
+ * Definition for a client-side tool that can be executed in the browser
195
+ */
196
+ interface ClientTool {
197
+ /** Unique identifier for the tool */
198
+ name: string;
199
+ /** Human-readable description of what the tool does */
200
+ description: string;
201
+ /** Parameters the tool accepts */
202
+ parameters?: ToolParameter[];
203
+ /**
204
+ * The function to execute when the tool is called.
205
+ * Receives extracted parameters and returns a result.
206
+ */
207
+ execute: (params: Record<string, unknown>) => Promise<unknown> | unknown;
208
+ }
209
+ /**
210
+ * Result of a tool selection operation
211
+ */
212
+ interface ToolSelectionResult {
213
+ /** Whether a tool was selected */
214
+ toolSelected: boolean;
215
+ /** Name of the selected tool (if any) */
216
+ toolName?: string;
217
+ /** Extracted parameters for the tool */
218
+ parameters?: Record<string, unknown>;
219
+ /** Confidence score (0-1) of the selection */
220
+ confidence?: number;
221
+ }
222
+ /**
223
+ * Result of executing a client-side tool
224
+ */
225
+ interface ToolExecutionResult {
226
+ /** Name of the tool that was executed */
227
+ toolName: string;
228
+ /** Whether execution was successful */
229
+ success: boolean;
230
+ /** The result returned by the tool */
231
+ result?: unknown;
232
+ /** Error message if execution failed */
233
+ error?: string;
234
+ }
235
+
178
236
  type SendMessageArgs = {
179
237
  messages: LlmapiMessage[];
180
238
  model?: string;
@@ -185,13 +243,20 @@ type SendMessageArgs = {
185
243
  * @param chunk - The content delta from the current chunk
186
244
  */
187
245
  onData?: (chunk: string) => void;
246
+ /**
247
+ * Whether to run tool selection for this message.
248
+ * Defaults to true if tools are configured.
249
+ */
250
+ runTools?: boolean;
188
251
  };
189
252
  type SendMessageResult = {
190
253
  data: LlmapiChatCompletionResponse;
191
254
  error: null;
255
+ toolExecution?: ToolExecutionResult;
192
256
  } | {
193
257
  data: null;
194
258
  error: string;
259
+ toolExecution?: ToolExecutionResult;
195
260
  };
196
261
  type UseChatOptions = {
197
262
  getToken?: () => Promise<string | null>;
@@ -226,9 +291,25 @@ type UseChatOptions = {
226
291
  * Default is "ibm-granite/Granite-4.0-Nano-WebGPU"
227
292
  */
228
293
  localModel?: string;
294
+ /**
295
+ * Client-side tools that can be executed in the browser.
296
+ * When provided, the hook will use a local model to determine
297
+ * if any tool should be called based on the user's message.
298
+ */
299
+ tools?: ClientTool[];
300
+ /**
301
+ * The model to use for tool selection.
302
+ * Default is "onnx-community/granite-4.0-350m-ONNX-web"
303
+ */
304
+ toolSelectorModel?: string;
305
+ /**
306
+ * Callback function to be called when a tool is executed.
307
+ */
308
+ onToolExecution?: (result: ToolExecutionResult) => void;
229
309
  };
230
310
  type UseChatResult = {
231
311
  isLoading: boolean;
312
+ isSelectingTool: boolean;
232
313
  sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
233
314
  /**
234
315
  * Aborts the current streaming request if one is in progress.
@@ -257,47 +338,56 @@ type UseChatResult = {
257
338
  * is encountered. Note: This is NOT called for aborted requests (see `stop()`).
258
339
  * @param options.chatProvider - The provider to use for chat completions (default: "api").
259
340
  * @param options.localModel - The model to use for local chat completions.
341
+ * @param options.tools - Client-side tools that can be executed in the browser.
342
+ * @param options.toolSelectorModel - The model to use for tool selection.
343
+ * @param options.onToolExecution - Callback function to be called when a tool is executed.
260
344
  *
261
345
  * @returns An object containing:
262
346
  * - `isLoading`: A boolean indicating whether a request is currently in progress
347
+ * - `isSelectingTool`: A boolean indicating whether tool selection is in progress
263
348
  * - `sendMessage`: An async function to send chat messages
264
349
  * - `stop`: A function to abort the current request
265
350
  *
266
351
  * @example
267
352
  * ```tsx
353
+ * // Basic usage with API
268
354
  * const { isLoading, sendMessage, stop } = useChat({
269
- * getToken: async () => {
270
- * // Get your auth token from your auth provider
271
- * return await getAuthToken();
272
- * },
273
- * onFinish: (response) => {
274
- * console.log("Chat finished:", response);
275
- * },
276
- * onError: (error) => {
277
- * // This is only called for unexpected errors, not aborts
278
- * console.error("Chat error:", error);
355
+ * getToken: async () => await getAuthToken(),
356
+ * onFinish: (response) => console.log("Chat finished:", response),
357
+ * onError: (error) => console.error("Chat error:", error)
358
+ * });
359
+ *
360
+ * // With client-side tools
361
+ * const { isLoading, isSelectingTool, sendMessage } = useChat({
362
+ * getToken: async () => await getAuthToken(),
363
+ * tools: [
364
+ * {
365
+ * name: "get_weather",
366
+ * description: "Get the current weather for a location",
367
+ * parameters: [
368
+ * { name: "location", type: "string", description: "City name", required: true }
369
+ * ],
370
+ * execute: async ({ location }) => {
371
+ * // Your weather API call here
372
+ * return { temperature: 72, condition: "sunny" };
373
+ * }
374
+ * }
375
+ * ],
376
+ * onToolExecution: (result) => {
377
+ * console.log("Tool executed:", result.toolName, result.result);
279
378
  * }
280
379
  * });
281
380
  *
282
381
  * const handleSend = async () => {
283
382
  * const result = await sendMessage({
284
- * messages: [{ role: 'user', content: 'Hello!' }],
383
+ * messages: [{ role: 'user', content: 'What is the weather in Paris?' }],
285
384
  * model: 'gpt-4o-mini'
286
385
  * });
287
386
  *
288
- * if (result.error) {
289
- * if (result.error === "Request aborted") {
290
- * console.log("Request was aborted");
291
- * } else {
292
- * console.error("Error:", result.error);
293
- * }
294
- * } else {
295
- * console.log("Success:", result.data);
387
+ * if (result.toolExecution) {
388
+ * console.log("Tool was called:", result.toolExecution);
296
389
  * }
297
390
  * };
298
- *
299
- * // To stop generation:
300
- * // stop();
301
391
  * ```
302
392
  */
303
393
  declare function useChat(options?: UseChatOptions): UseChatResult;
@@ -469,4 +559,26 @@ declare const extractConversationContext: (messages: Array<{
469
559
  content: string;
470
560
  }>, maxMessages?: number) => string;
471
561
 
472
- export { createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, extractConversationContext, formatMemoriesForChat, useChat, useEncryption, useMemory, useModels };
562
+ declare const DEFAULT_TOOL_SELECTOR_MODEL = "Xenova/LaMini-GPT-124M";
563
+ interface ToolSelectorOptions {
564
+ /** Model to use for tool selection. Defaults to Xenova/LaMini-GPT-124M */
565
+ model?: string;
566
+ /** Abort signal */
567
+ signal?: AbortSignal;
568
+ /** Device to use (webgpu, wasm, cpu). Defaults to wasm */
569
+ device?: "webgpu" | "wasm" | "cpu";
570
+ }
571
+ /**
572
+ * Select a tool based on user message using an in-browser model
573
+ */
574
+ declare function selectTool(userMessage: string, tools: ClientTool[], options?: ToolSelectorOptions): Promise<ToolSelectionResult>;
575
+ /**
576
+ * Execute a client-side tool with the given parameters
577
+ */
578
+ declare function executeTool(tool: ClientTool, params: Record<string, unknown>): Promise<{
579
+ success: boolean;
580
+ result?: unknown;
581
+ error?: string;
582
+ }>;
583
+
584
+ export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useMemory, useModels };
@@ -821,9 +821,32 @@ var client = createClient(createClientConfig(createConfig()));
821
821
  // src/lib/chat/constants.ts
822
822
  var DEFAULT_LOCAL_CHAT_MODEL = "onnx-community/Qwen2.5-0.5B-Instruct";
823
823
 
824
- // src/lib/chat/generation.ts
825
- var chatPipeline = null;
824
+ // src/lib/chat/pipeline.ts
825
+ var sharedPipeline = null;
826
826
  var currentModel = null;
827
+ var currentDevice = null;
828
+ async function getTextGenerationPipeline(options) {
829
+ const { model, device = "wasm", dtype = "q4" } = options;
830
+ if (sharedPipeline && currentModel === model && currentDevice === device) {
831
+ return sharedPipeline;
832
+ }
833
+ const { pipeline, env } = await import("./transformers.node-BSHUG7OY.mjs");
834
+ env.allowLocalModels = false;
835
+ if (env.backends?.onnx) {
836
+ env.backends.onnx.logLevel = "fatal";
837
+ }
838
+ console.log(`[Pipeline] Loading model: ${model} on ${device}...`);
839
+ sharedPipeline = await pipeline("text-generation", model, {
840
+ dtype,
841
+ device
842
+ });
843
+ currentModel = model;
844
+ currentDevice = device;
845
+ console.log(`[Pipeline] Model loaded: ${model}`);
846
+ return sharedPipeline;
847
+ }
848
+
849
+ // src/lib/chat/generation.ts
827
850
  async function generateLocalChatCompletion(messages, options = {}) {
828
851
  const {
829
852
  model = DEFAULT_LOCAL_CHAT_MODEL,
@@ -833,13 +856,12 @@ async function generateLocalChatCompletion(messages, options = {}) {
833
856
  onToken,
834
857
  signal
835
858
  } = options;
836
- const { pipeline, TextStreamer } = await import("./transformers.node-BSHUG7OY.mjs");
837
- if (!chatPipeline || currentModel !== model) {
838
- chatPipeline = await pipeline("text-generation", model, {
839
- dtype: "fp16"
840
- });
841
- currentModel = model;
842
- }
859
+ const { TextStreamer } = await import("./transformers.node-BSHUG7OY.mjs");
860
+ const chatPipeline = await getTextGenerationPipeline({
861
+ model,
862
+ device: "wasm",
863
+ dtype: "q4"
864
+ });
843
865
  class CallbackStreamer extends TextStreamer {
844
866
  constructor(tokenizer, cb) {
845
867
  super(tokenizer, {
@@ -866,6 +888,148 @@ async function generateLocalChatCompletion(messages, options = {}) {
866
888
  return output;
867
889
  }
868
890
 
891
+ // src/lib/tools/selector.ts
892
+ var DEFAULT_TOOL_SELECTOR_MODEL = "Xenova/LaMini-GPT-124M";
893
+ function buildToolSelectionPrompt(userMessage, tools) {
894
+ const toolList = tools.map((t) => `${t.name} (${t.description})`).join("\n");
895
+ return `Pick the best tool for the task. Reply with ONLY the tool name.
896
+
897
+ Available tools:
898
+ ${toolList}
899
+ none (no tool needed)
900
+
901
+ Task: "${userMessage}"
902
+
903
+ Best tool:`;
904
+ }
905
+ function extractParams(userMessage, tool) {
906
+ const params = {};
907
+ if (!tool.parameters) return params;
908
+ for (const param of tool.parameters) {
909
+ if (param.name === "expression" || param.name === "query") {
910
+ params[param.name] = userMessage;
911
+ } else if (param.name === "location" || param.name === "city") {
912
+ const words = userMessage.split(/\s+/);
913
+ const capitalizedWords = words.filter(
914
+ (w) => w.length > 1 && w[0] === w[0].toUpperCase()
915
+ );
916
+ params[param.name] = capitalizedWords.length > 0 ? capitalizedWords.join(" ") : userMessage;
917
+ } else if (param.name === "text" || param.name === "input") {
918
+ params[param.name] = userMessage;
919
+ } else {
920
+ params[param.name] = userMessage;
921
+ }
922
+ }
923
+ return params;
924
+ }
925
+ function parseToolSelectionResponse(response, tools, userMessage) {
926
+ console.log("[Tool Selector] Raw response:", response);
927
+ const cleaned = response.toLowerCase().trim().split(/[\s\n,.]+/)[0].replace(/[^a-z0-9_-]/g, "");
928
+ console.log("[Tool Selector] Parsed tool name:", cleaned);
929
+ if (cleaned === "none" || cleaned === "null" || cleaned === "") {
930
+ console.log("[Tool Selector] No tool selected");
931
+ return { toolSelected: false };
932
+ }
933
+ const selectedTool = tools.find((t) => t.name.toLowerCase() === cleaned);
934
+ if (!selectedTool) {
935
+ const fuzzyTool = tools.find(
936
+ (t) => t.name.toLowerCase().includes(cleaned) || cleaned.includes(t.name.toLowerCase())
937
+ );
938
+ if (fuzzyTool) {
939
+ console.log(`[Tool Selector] Fuzzy matched tool: ${fuzzyTool.name}`);
940
+ const params2 = extractParams(userMessage, fuzzyTool);
941
+ return {
942
+ toolSelected: true,
943
+ toolName: fuzzyTool.name,
944
+ parameters: params2,
945
+ confidence: 0.6
946
+ };
947
+ }
948
+ console.warn(`[Tool Selector] Unknown tool: ${cleaned}`);
949
+ return { toolSelected: false };
950
+ }
951
+ const params = extractParams(userMessage, selectedTool);
952
+ console.log(`[Tool Selector] Selected tool: ${selectedTool.name}`, params);
953
+ return {
954
+ toolSelected: true,
955
+ toolName: selectedTool.name,
956
+ parameters: params,
957
+ confidence: 0.9
958
+ };
959
+ }
960
+ async function selectTool(userMessage, tools, options = {}) {
961
+ const {
962
+ model = DEFAULT_TOOL_SELECTOR_MODEL,
963
+ signal,
964
+ device = "wasm"
965
+ } = options;
966
+ if (!tools.length) {
967
+ return { toolSelected: false };
968
+ }
969
+ console.log(
970
+ `[Tool Selector] analyzing message: "${userMessage}" with model ${model}`
971
+ );
972
+ try {
973
+ const selectorPipeline = await getTextGenerationPipeline({
974
+ model,
975
+ device,
976
+ dtype: "q4"
977
+ // Aggressive quantization for speed
978
+ });
979
+ const prompt = buildToolSelectionPrompt(userMessage, tools);
980
+ const output = await selectorPipeline(prompt, {
981
+ max_new_tokens: 4,
982
+ // Just need the tool name
983
+ temperature: 0,
984
+ // Deterministic
985
+ do_sample: false,
986
+ return_full_text: false
987
+ });
988
+ if (signal?.aborted) {
989
+ return { toolSelected: false };
990
+ }
991
+ const generatedText = output?.[0]?.generated_text || output?.generated_text || "";
992
+ return parseToolSelectionResponse(generatedText, tools, userMessage);
993
+ } catch (error) {
994
+ console.error("[Tool Selector] Error:", error);
995
+ return { toolSelected: false };
996
+ }
997
+ }
998
+ var preloadPromise = null;
999
+ async function preloadToolSelectorModel(options = {}) {
1000
+ if (preloadPromise) {
1001
+ return preloadPromise;
1002
+ }
1003
+ const { model = DEFAULT_TOOL_SELECTOR_MODEL, device = "wasm" } = options;
1004
+ console.log(`[Tool Selector] Preloading model: ${model}`);
1005
+ preloadPromise = getTextGenerationPipeline({
1006
+ model,
1007
+ device,
1008
+ dtype: "q4"
1009
+ }).then(() => {
1010
+ console.log(`[Tool Selector] Model preloaded: ${model}`);
1011
+ }).catch((error) => {
1012
+ console.warn("[Tool Selector] Failed to preload model:", error);
1013
+ preloadPromise = null;
1014
+ });
1015
+ return preloadPromise;
1016
+ }
1017
+ async function executeTool(tool, params) {
1018
+ try {
1019
+ console.log(
1020
+ `[Tool Selector] Executing tool ${tool.name} with params:`,
1021
+ params
1022
+ );
1023
+ const result = await tool.execute(params);
1024
+ console.log(`[Tool Selector] Tool ${tool.name} execution result:`, result);
1025
+ return { success: true, result };
1026
+ } catch (error) {
1027
+ const errorMessage = error instanceof Error ? error.message : "Tool execution failed";
1028
+ console.error(`[Tool Selector] Tool ${tool.name} failed:`, errorMessage);
1029
+ return { success: false, error: errorMessage };
1030
+ }
1031
+ }
1032
+
869
1033
  // src/react/useChat.ts
870
1034
  function useChat(options) {
871
1035
  const {
@@ -875,9 +1039,13 @@ function useChat(options) {
875
1039
  onFinish,
876
1040
  onError,
877
1041
  chatProvider = "api",
878
- localModel = DEFAULT_LOCAL_CHAT_MODEL
1042
+ localModel = DEFAULT_LOCAL_CHAT_MODEL,
1043
+ tools,
1044
+ toolSelectorModel = DEFAULT_TOOL_SELECTOR_MODEL,
1045
+ onToolExecution
879
1046
  } = options || {};
880
1047
  const [isLoading, setIsLoading] = useState(false);
1048
+ const [isSelectingTool, setIsSelectingTool] = useState(false);
881
1049
  const abortControllerRef = useRef(null);
882
1050
  const stop = useCallback(() => {
883
1051
  if (abortControllerRef.current) {
@@ -893,11 +1061,17 @@ function useChat(options) {
893
1061
  }
894
1062
  };
895
1063
  }, []);
1064
+ useEffect(() => {
1065
+ if (tools && tools.length > 0) {
1066
+ preloadToolSelectorModel({ model: toolSelectorModel });
1067
+ }
1068
+ }, [tools, toolSelectorModel]);
896
1069
  const sendMessage = useCallback(
897
1070
  async ({
898
1071
  messages,
899
1072
  model,
900
- onData
1073
+ onData,
1074
+ runTools = true
901
1075
  }) => {
902
1076
  if (!messages?.length) {
903
1077
  const errorMsg = "messages are required to call sendMessage.";
@@ -910,11 +1084,75 @@ function useChat(options) {
910
1084
  const abortController = new AbortController();
911
1085
  abortControllerRef.current = abortController;
912
1086
  setIsLoading(true);
1087
+ let toolExecutionResult;
1088
+ let messagesWithToolContext = messages;
1089
+ if (runTools && tools && tools.length > 0) {
1090
+ const lastUserMessage = [...messages].reverse().find((m) => m.role === "user");
1091
+ if (lastUserMessage?.content) {
1092
+ setIsSelectingTool(true);
1093
+ try {
1094
+ const selectionResult = await selectTool(
1095
+ lastUserMessage.content,
1096
+ tools,
1097
+ {
1098
+ model: toolSelectorModel,
1099
+ signal: abortController.signal
1100
+ }
1101
+ );
1102
+ if (selectionResult.toolSelected && selectionResult.toolName) {
1103
+ const selectedTool = tools.find(
1104
+ (t) => t.name === selectionResult.toolName
1105
+ );
1106
+ if (selectedTool) {
1107
+ const execResult = await executeTool(
1108
+ selectedTool,
1109
+ selectionResult.parameters || {}
1110
+ );
1111
+ toolExecutionResult = {
1112
+ toolName: selectionResult.toolName,
1113
+ success: execResult.success,
1114
+ result: execResult.result,
1115
+ error: execResult.error
1116
+ };
1117
+ if (onToolExecution) {
1118
+ onToolExecution(toolExecutionResult);
1119
+ }
1120
+ if (toolExecutionResult.success && toolExecutionResult.result !== void 0) {
1121
+ const toolResultContext = {
1122
+ role: "system",
1123
+ content: `Tool "${toolExecutionResult.toolName}" was executed with the following result:
1124
+ ${JSON.stringify(
1125
+ toolExecutionResult.result,
1126
+ null,
1127
+ 2
1128
+ )}
1129
+
1130
+ Use this information to respond to the user's request.`
1131
+ };
1132
+ messagesWithToolContext = [...messages, toolResultContext];
1133
+ } else if (toolExecutionResult.error) {
1134
+ const toolErrorContext = {
1135
+ role: "system",
1136
+ content: `Tool "${toolExecutionResult.toolName}" was executed but encountered an error: ${toolExecutionResult.error}
1137
+
1138
+ Please inform the user about this issue and try to help them alternatively.`
1139
+ };
1140
+ messagesWithToolContext = [...messages, toolErrorContext];
1141
+ }
1142
+ }
1143
+ }
1144
+ } catch (err) {
1145
+ console.warn("Tool selection error:", err);
1146
+ } finally {
1147
+ setIsSelectingTool(false);
1148
+ }
1149
+ }
1150
+ }
913
1151
  try {
914
1152
  if (chatProvider === "local") {
915
1153
  let accumulatedContent = "";
916
1154
  const usedModel = localModel;
917
- const formattedMessages = messages.map((m) => ({
1155
+ const formattedMessages = messagesWithToolContext.map((m) => ({
918
1156
  role: m.role || "user",
919
1157
  content: m.content || ""
920
1158
  }));
@@ -951,30 +1189,46 @@ function useChat(options) {
951
1189
  if (onFinish) {
952
1190
  onFinish(completion);
953
1191
  }
954
- return { data: completion, error: null };
1192
+ return {
1193
+ data: completion,
1194
+ error: null,
1195
+ toolExecution: toolExecutionResult
1196
+ };
955
1197
  } else {
956
1198
  if (!model) {
957
1199
  const errorMsg = "model is required to call sendMessage.";
958
1200
  if (onError) onError(new Error(errorMsg));
959
- return { data: null, error: errorMsg };
1201
+ return {
1202
+ data: null,
1203
+ error: errorMsg,
1204
+ toolExecution: toolExecutionResult
1205
+ };
960
1206
  }
961
1207
  if (!getToken) {
962
1208
  const errorMsg = "Token getter function is required.";
963
1209
  if (onError) onError(new Error(errorMsg));
964
- return { data: null, error: errorMsg };
1210
+ return {
1211
+ data: null,
1212
+ error: errorMsg,
1213
+ toolExecution: toolExecutionResult
1214
+ };
965
1215
  }
966
1216
  const token = await getToken();
967
1217
  if (!token) {
968
1218
  const errorMsg = "No access token available.";
969
1219
  setIsLoading(false);
970
1220
  if (onError) onError(new Error(errorMsg));
971
- return { data: null, error: errorMsg };
1221
+ return {
1222
+ data: null,
1223
+ error: errorMsg,
1224
+ toolExecution: toolExecutionResult
1225
+ };
972
1226
  }
973
1227
  const sseResult = await client.sse.post({
974
1228
  baseUrl,
975
1229
  url: "/api/v1/chat/completions",
976
1230
  body: {
977
- messages,
1231
+ messages: messagesWithToolContext,
978
1232
  model,
979
1233
  stream: true
980
1234
  },
@@ -1044,12 +1298,20 @@ function useChat(options) {
1044
1298
  if (onFinish) {
1045
1299
  onFinish(completion);
1046
1300
  }
1047
- return { data: completion, error: null };
1301
+ return {
1302
+ data: completion,
1303
+ error: null,
1304
+ toolExecution: toolExecutionResult
1305
+ };
1048
1306
  }
1049
1307
  } catch (err) {
1050
1308
  if (err instanceof Error && err.name === "AbortError") {
1051
1309
  setIsLoading(false);
1052
- return { data: null, error: "Request aborted" };
1310
+ return {
1311
+ data: null,
1312
+ error: "Request aborted",
1313
+ toolExecution: toolExecutionResult
1314
+ };
1053
1315
  }
1054
1316
  const errorMsg = err instanceof Error ? err.message : "Failed to send message.";
1055
1317
  const errorObj = err instanceof Error ? err : new Error(errorMsg);
@@ -1057,7 +1319,11 @@ function useChat(options) {
1057
1319
  if (onError) {
1058
1320
  onError(errorObj);
1059
1321
  }
1060
- return { data: null, error: errorMsg };
1322
+ return {
1323
+ data: null,
1324
+ error: errorMsg,
1325
+ toolExecution: toolExecutionResult
1326
+ };
1061
1327
  } finally {
1062
1328
  if (abortControllerRef.current === abortController) {
1063
1329
  abortControllerRef.current = null;
@@ -1071,11 +1337,15 @@ function useChat(options) {
1071
1337
  onFinish,
1072
1338
  onError,
1073
1339
  chatProvider,
1074
- localModel
1340
+ localModel,
1341
+ tools,
1342
+ toolSelectorModel,
1343
+ onToolExecution
1075
1344
  ]
1076
1345
  );
1077
1346
  return {
1078
1347
  isLoading,
1348
+ isSelectingTool,
1079
1349
  sendMessage,
1080
1350
  stop
1081
1351
  };
@@ -1997,12 +2267,15 @@ var extractConversationContext = (messages, maxMessages = 3) => {
1997
2267
  return userMessages.trim();
1998
2268
  };
1999
2269
  export {
2270
+ DEFAULT_TOOL_SELECTOR_MODEL,
2000
2271
  createMemoryContextSystemMessage,
2001
2272
  decryptData,
2002
2273
  decryptDataBytes,
2003
2274
  encryptData,
2275
+ executeTool,
2004
2276
  extractConversationContext,
2005
2277
  formatMemoriesForChat,
2278
+ selectTool,
2006
2279
  useChat,
2007
2280
  useEncryption,
2008
2281
  useMemory,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@reverbia/sdk",
3
- "version": "1.0.0-next.20251202092727",
3
+ "version": "1.0.0-next.20251202095402",
4
4
  "description": "",
5
5
  "main": "./dist/index.cjs",
6
6
  "module": "./dist/index.mjs",