@reverbia/sdk 1.0.0-next.20251202090922 → 1.0.0-next.20251202095402

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -175,9 +175,67 @@ type LlmapiModelTopProvider = {
175
175
  */
176
176
  type LlmapiRole = string;
177
177
 
178
+ /**
179
+ * Parameter definition for a client-side tool
180
+ */
181
+ interface ToolParameter {
182
+ /** Parameter name */
183
+ name: string;
184
+ /** Parameter type (string, number, boolean, etc.) */
185
+ type: "string" | "number" | "boolean" | "object" | "array";
186
+ /** Human-readable description of the parameter */
187
+ description: string;
188
+ /** Whether this parameter is required */
189
+ required?: boolean;
190
+ /** Default value if not provided */
191
+ default?: unknown;
192
+ }
193
+ /**
194
+ * Definition for a client-side tool that can be executed in the browser
195
+ */
196
+ interface ClientTool {
197
+ /** Unique identifier for the tool */
198
+ name: string;
199
+ /** Human-readable description of what the tool does */
200
+ description: string;
201
+ /** Parameters the tool accepts */
202
+ parameters?: ToolParameter[];
203
+ /**
204
+ * The function to execute when the tool is called.
205
+ * Receives extracted parameters and returns a result.
206
+ */
207
+ execute: (params: Record<string, unknown>) => Promise<unknown> | unknown;
208
+ }
209
+ /**
210
+ * Result of a tool selection operation
211
+ */
212
+ interface ToolSelectionResult {
213
+ /** Whether a tool was selected */
214
+ toolSelected: boolean;
215
+ /** Name of the selected tool (if any) */
216
+ toolName?: string;
217
+ /** Extracted parameters for the tool */
218
+ parameters?: Record<string, unknown>;
219
+ /** Confidence score (0-1) of the selection */
220
+ confidence?: number;
221
+ }
222
+ /**
223
+ * Result of executing a client-side tool
224
+ */
225
+ interface ToolExecutionResult {
226
+ /** Name of the tool that was executed */
227
+ toolName: string;
228
+ /** Whether execution was successful */
229
+ success: boolean;
230
+ /** The result returned by the tool */
231
+ result?: unknown;
232
+ /** Error message if execution failed */
233
+ error?: string;
234
+ }
235
+
178
236
  type SendMessageArgs = {
179
237
  messages: LlmapiMessage[];
180
- model: string;
238
+ model?: string;
181
239
  /**
182
240
  * Per-request callback for data chunks. Called in addition to the global
183
241
  * `onData` callback if provided in `useChat` options.
@@ -185,13 +243,20 @@ type SendMessageArgs = {
185
243
  * @param chunk - The content delta from the current chunk
186
244
  */
187
245
  onData?: (chunk: string) => void;
246
+ /**
247
+ * Whether to run tool selection for this message.
248
+ * Defaults to true if tools are configured.
249
+ */
250
+ runTools?: boolean;
188
251
  };
189
252
  type SendMessageResult = {
190
253
  data: LlmapiChatCompletionResponse;
191
254
  error: null;
255
+ toolExecution?: ToolExecutionResult;
192
256
  } | {
193
257
  data: null;
194
258
  error: string;
259
+ toolExecution?: ToolExecutionResult;
195
260
  };
196
261
  type UseChatOptions = {
197
262
  getToken?: () => Promise<string | null>;
@@ -215,9 +280,36 @@ type UseChatOptions = {
215
280
  * @param error - The error that occurred (never an AbortError)
216
281
  */
217
282
  onError?: (error: Error) => void;
283
+ /**
284
+ * The provider to use for chat completions (default: "api")
285
+ * "local": Uses a local HuggingFace model (in-browser)
286
+ * "api": Uses the backend API
287
+ */
288
+ chatProvider?: "api" | "local";
289
+ /**
290
+ * The model to use for local chat completions
291
+ * Default is "ibm-granite/Granite-4.0-Nano-WebGPU"
292
+ */
293
+ localModel?: string;
294
+ /**
295
+ * Client-side tools that can be executed in the browser.
296
+ * When provided, the hook will use a local model to determine
297
+ * if any tool should be called based on the user's message.
298
+ */
299
+ tools?: ClientTool[];
300
+ /**
301
+ * The model to use for tool selection.
302
+ * Default is "onnx-community/granite-4.0-350m-ONNX-web"
303
+ */
304
+ toolSelectorModel?: string;
305
+ /**
306
+ * Callback function to be called when a tool is executed.
307
+ */
308
+ onToolExecution?: (result: ToolExecutionResult) => void;
218
309
  };
219
310
  type UseChatResult = {
220
311
  isLoading: boolean;
312
+ isSelectingTool: boolean;
221
313
  sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
222
314
  /**
223
315
  * Aborts the current streaming request if one is in progress.
@@ -244,47 +336,58 @@ type UseChatResult = {
244
336
  * @param options.onFinish - Callback function to be called when the chat completion finishes successfully.
245
337
  * @param options.onError - Callback function to be called when an unexpected error
246
338
  * is encountered. Note: This is NOT called for aborted requests (see `stop()`).
339
+ * @param options.chatProvider - The provider to use for chat completions (default: "api").
340
+ * @param options.localModel - The model to use for local chat completions.
341
+ * @param options.tools - Client-side tools that can be executed in the browser.
342
+ * @param options.toolSelectorModel - The model to use for tool selection.
343
+ * @param options.onToolExecution - Callback function to be called when a tool is executed.
247
344
  *
248
345
  * @returns An object containing:
249
346
  * - `isLoading`: A boolean indicating whether a request is currently in progress
347
+ * - `isSelectingTool`: A boolean indicating whether tool selection is in progress
250
348
  * - `sendMessage`: An async function to send chat messages
251
349
  * - `stop`: A function to abort the current request
252
350
  *
253
351
  * @example
254
352
  * ```tsx
353
+ * // Basic usage with API
255
354
  * const { isLoading, sendMessage, stop } = useChat({
256
- * getToken: async () => {
257
- * // Get your auth token from your auth provider
258
- * return await getAuthToken();
259
- * },
260
- * onFinish: (response) => {
261
- * console.log("Chat finished:", response);
262
- * },
263
- * onError: (error) => {
264
- * // This is only called for unexpected errors, not aborts
265
- * console.error("Chat error:", error);
355
+ * getToken: async () => await getAuthToken(),
356
+ * onFinish: (response) => console.log("Chat finished:", response),
357
+ * onError: (error) => console.error("Chat error:", error)
358
+ * });
359
+ *
360
+ * // With client-side tools
361
+ * const { isLoading, isSelectingTool, sendMessage } = useChat({
362
+ * getToken: async () => await getAuthToken(),
363
+ * tools: [
364
+ * {
365
+ * name: "get_weather",
366
+ * description: "Get the current weather for a location",
367
+ * parameters: [
368
+ * { name: "location", type: "string", description: "City name", required: true }
369
+ * ],
370
+ * execute: async ({ location }) => {
371
+ * // Your weather API call here
372
+ * return { temperature: 72, condition: "sunny" };
373
+ * }
374
+ * }
375
+ * ],
376
+ * onToolExecution: (result) => {
377
+ * console.log("Tool executed:", result.toolName, result.result);
266
378
  * }
267
379
  * });
268
380
  *
269
381
  * const handleSend = async () => {
270
382
  * const result = await sendMessage({
271
- * messages: [{ role: 'user', content: 'Hello!' }],
383
+ * messages: [{ role: 'user', content: 'What is the weather in Paris?' }],
272
384
  * model: 'gpt-4o-mini'
273
385
  * });
274
386
  *
275
- * if (result.error) {
276
- * if (result.error === "Request aborted") {
277
- * console.log("Request was aborted");
278
- * } else {
279
- * console.error("Error:", result.error);
280
- * }
281
- * } else {
282
- * console.log("Success:", result.data);
387
+ * if (result.toolExecution) {
388
+ * console.log("Tool was called:", result.toolExecution);
283
389
  * }
284
390
  * };
285
- *
286
- * // To stop generation:
287
- * // stop();
288
391
  * ```
289
392
  */
290
393
  declare function useChat(options?: UseChatOptions): UseChatResult;
@@ -456,4 +559,26 @@ declare const extractConversationContext: (messages: Array<{
456
559
  content: string;
457
560
  }>, maxMessages?: number) => string;
458
561
 
459
- export { createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, extractConversationContext, formatMemoriesForChat, useChat, useEncryption, useMemory, useModels };
562
+ declare const DEFAULT_TOOL_SELECTOR_MODEL = "Xenova/LaMini-GPT-124M";
563
+ interface ToolSelectorOptions {
564
+ /** Model to use for tool selection. Defaults to Xenova/LaMini-GPT-124M */
565
+ model?: string;
566
+ /** Abort signal */
567
+ signal?: AbortSignal;
568
+ /** Device to use (webgpu, wasm, cpu). Defaults to wasm */
569
+ device?: "webgpu" | "wasm" | "cpu";
570
+ }
571
+ /**
572
+ * Select a tool based on user message using an in-browser model
573
+ */
574
+ declare function selectTool(userMessage: string, tools: ClientTool[], options?: ToolSelectorOptions): Promise<ToolSelectionResult>;
575
+ /**
576
+ * Execute a client-side tool with the given parameters
577
+ */
578
+ declare function executeTool(tool: ClientTool, params: Record<string, unknown>): Promise<{
579
+ success: boolean;
580
+ result?: unknown;
581
+ error?: string;
582
+ }>;
583
+
584
+ export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useMemory, useModels };
@@ -175,9 +175,67 @@ type LlmapiModelTopProvider = {
175
175
  */
176
176
  type LlmapiRole = string;
177
177
 
178
+ /**
179
+ * Parameter definition for a client-side tool
180
+ */
181
+ interface ToolParameter {
182
+ /** Parameter name */
183
+ name: string;
184
+ /** Parameter type (string, number, boolean, etc.) */
185
+ type: "string" | "number" | "boolean" | "object" | "array";
186
+ /** Human-readable description of the parameter */
187
+ description: string;
188
+ /** Whether this parameter is required */
189
+ required?: boolean;
190
+ /** Default value if not provided */
191
+ default?: unknown;
192
+ }
193
+ /**
194
+ * Definition for a client-side tool that can be executed in the browser
195
+ */
196
+ interface ClientTool {
197
+ /** Unique identifier for the tool */
198
+ name: string;
199
+ /** Human-readable description of what the tool does */
200
+ description: string;
201
+ /** Parameters the tool accepts */
202
+ parameters?: ToolParameter[];
203
+ /**
204
+ * The function to execute when the tool is called.
205
+ * Receives extracted parameters and returns a result.
206
+ */
207
+ execute: (params: Record<string, unknown>) => Promise<unknown> | unknown;
208
+ }
209
+ /**
210
+ * Result of a tool selection operation
211
+ */
212
+ interface ToolSelectionResult {
213
+ /** Whether a tool was selected */
214
+ toolSelected: boolean;
215
+ /** Name of the selected tool (if any) */
216
+ toolName?: string;
217
+ /** Extracted parameters for the tool */
218
+ parameters?: Record<string, unknown>;
219
+ /** Confidence score (0-1) of the selection */
220
+ confidence?: number;
221
+ }
222
+ /**
223
+ * Result of executing a client-side tool
224
+ */
225
+ interface ToolExecutionResult {
226
+ /** Name of the tool that was executed */
227
+ toolName: string;
228
+ /** Whether execution was successful */
229
+ success: boolean;
230
+ /** The result returned by the tool */
231
+ result?: unknown;
232
+ /** Error message if execution failed */
233
+ error?: string;
234
+ }
235
+
178
236
  type SendMessageArgs = {
179
237
  messages: LlmapiMessage[];
180
- model: string;
238
+ model?: string;
181
239
  /**
182
240
  * Per-request callback for data chunks. Called in addition to the global
183
241
  * `onData` callback if provided in `useChat` options.
@@ -185,13 +243,20 @@ type SendMessageArgs = {
185
243
  * @param chunk - The content delta from the current chunk
186
244
  */
187
245
  onData?: (chunk: string) => void;
246
+ /**
247
+ * Whether to run tool selection for this message.
248
+ * Defaults to true if tools are configured.
249
+ */
250
+ runTools?: boolean;
188
251
  };
189
252
  type SendMessageResult = {
190
253
  data: LlmapiChatCompletionResponse;
191
254
  error: null;
255
+ toolExecution?: ToolExecutionResult;
192
256
  } | {
193
257
  data: null;
194
258
  error: string;
259
+ toolExecution?: ToolExecutionResult;
195
260
  };
196
261
  type UseChatOptions = {
197
262
  getToken?: () => Promise<string | null>;
@@ -215,9 +280,36 @@ type UseChatOptions = {
215
280
  * @param error - The error that occurred (never an AbortError)
216
281
  */
217
282
  onError?: (error: Error) => void;
283
+ /**
284
+ * The provider to use for chat completions (default: "api")
285
+ * "local": Uses a local HuggingFace model (in-browser)
286
+ * "api": Uses the backend API
287
+ */
288
+ chatProvider?: "api" | "local";
289
+ /**
290
+ * The model to use for local chat completions
291
+ * Default is "ibm-granite/Granite-4.0-Nano-WebGPU"
292
+ */
293
+ localModel?: string;
294
+ /**
295
+ * Client-side tools that can be executed in the browser.
296
+ * When provided, the hook will use a local model to determine
297
+ * if any tool should be called based on the user's message.
298
+ */
299
+ tools?: ClientTool[];
300
+ /**
301
+ * The model to use for tool selection.
302
+ * Default is "onnx-community/granite-4.0-350m-ONNX-web"
303
+ */
304
+ toolSelectorModel?: string;
305
+ /**
306
+ * Callback function to be called when a tool is executed.
307
+ */
308
+ onToolExecution?: (result: ToolExecutionResult) => void;
218
309
  };
219
310
  type UseChatResult = {
220
311
  isLoading: boolean;
312
+ isSelectingTool: boolean;
221
313
  sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
222
314
  /**
223
315
  * Aborts the current streaming request if one is in progress.
@@ -244,47 +336,58 @@ type UseChatResult = {
244
336
  * @param options.onFinish - Callback function to be called when the chat completion finishes successfully.
245
337
  * @param options.onError - Callback function to be called when an unexpected error
246
338
  * is encountered. Note: This is NOT called for aborted requests (see `stop()`).
339
+ * @param options.chatProvider - The provider to use for chat completions (default: "api").
340
+ * @param options.localModel - The model to use for local chat completions.
341
+ * @param options.tools - Client-side tools that can be executed in the browser.
342
+ * @param options.toolSelectorModel - The model to use for tool selection.
343
+ * @param options.onToolExecution - Callback function to be called when a tool is executed.
247
344
  *
248
345
  * @returns An object containing:
249
346
  * - `isLoading`: A boolean indicating whether a request is currently in progress
347
+ * - `isSelectingTool`: A boolean indicating whether tool selection is in progress
250
348
  * - `sendMessage`: An async function to send chat messages
251
349
  * - `stop`: A function to abort the current request
252
350
  *
253
351
  * @example
254
352
  * ```tsx
353
+ * // Basic usage with API
255
354
  * const { isLoading, sendMessage, stop } = useChat({
256
- * getToken: async () => {
257
- * // Get your auth token from your auth provider
258
- * return await getAuthToken();
259
- * },
260
- * onFinish: (response) => {
261
- * console.log("Chat finished:", response);
262
- * },
263
- * onError: (error) => {
264
- * // This is only called for unexpected errors, not aborts
265
- * console.error("Chat error:", error);
355
+ * getToken: async () => await getAuthToken(),
356
+ * onFinish: (response) => console.log("Chat finished:", response),
357
+ * onError: (error) => console.error("Chat error:", error)
358
+ * });
359
+ *
360
+ * // With client-side tools
361
+ * const { isLoading, isSelectingTool, sendMessage } = useChat({
362
+ * getToken: async () => await getAuthToken(),
363
+ * tools: [
364
+ * {
365
+ * name: "get_weather",
366
+ * description: "Get the current weather for a location",
367
+ * parameters: [
368
+ * { name: "location", type: "string", description: "City name", required: true }
369
+ * ],
370
+ * execute: async ({ location }) => {
371
+ * // Your weather API call here
372
+ * return { temperature: 72, condition: "sunny" };
373
+ * }
374
+ * }
375
+ * ],
376
+ * onToolExecution: (result) => {
377
+ * console.log("Tool executed:", result.toolName, result.result);
266
378
  * }
267
379
  * });
268
380
  *
269
381
  * const handleSend = async () => {
270
382
  * const result = await sendMessage({
271
- * messages: [{ role: 'user', content: 'Hello!' }],
383
+ * messages: [{ role: 'user', content: 'What is the weather in Paris?' }],
272
384
  * model: 'gpt-4o-mini'
273
385
  * });
274
386
  *
275
- * if (result.error) {
276
- * if (result.error === "Request aborted") {
277
- * console.log("Request was aborted");
278
- * } else {
279
- * console.error("Error:", result.error);
280
- * }
281
- * } else {
282
- * console.log("Success:", result.data);
387
+ * if (result.toolExecution) {
388
+ * console.log("Tool was called:", result.toolExecution);
283
389
  * }
284
390
  * };
285
- *
286
- * // To stop generation:
287
- * // stop();
288
391
  * ```
289
392
  */
290
393
  declare function useChat(options?: UseChatOptions): UseChatResult;
@@ -456,4 +559,26 @@ declare const extractConversationContext: (messages: Array<{
456
559
  content: string;
457
560
  }>, maxMessages?: number) => string;
458
561
 
459
- export { createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, extractConversationContext, formatMemoriesForChat, useChat, useEncryption, useMemory, useModels };
562
+ declare const DEFAULT_TOOL_SELECTOR_MODEL = "Xenova/LaMini-GPT-124M";
563
+ interface ToolSelectorOptions {
564
+ /** Model to use for tool selection. Defaults to Xenova/LaMini-GPT-124M */
565
+ model?: string;
566
+ /** Abort signal */
567
+ signal?: AbortSignal;
568
+ /** Device to use (webgpu, wasm, cpu). Defaults to wasm */
569
+ device?: "webgpu" | "wasm" | "cpu";
570
+ }
571
+ /**
572
+ * Select a tool based on user message using an in-browser model
573
+ */
574
+ declare function selectTool(userMessage: string, tools: ClientTool[], options?: ToolSelectorOptions): Promise<ToolSelectionResult>;
575
+ /**
576
+ * Execute a client-side tool with the given parameters
577
+ */
578
+ declare function executeTool(tool: ClientTool, params: Record<string, unknown>): Promise<{
579
+ success: boolean;
580
+ result?: unknown;
581
+ error?: string;
582
+ }>;
583
+
584
+ export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useMemory, useModels };