@reverbia/sdk 1.0.0-next.20260109140427 → 1.0.0-next.20260109180912
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/expo/index.cjs +49 -26
- package/dist/expo/index.d.mts +130 -8
- package/dist/expo/index.d.ts +130 -8
- package/dist/expo/index.mjs +49 -26
- package/dist/react/index.cjs +178 -60
- package/dist/react/index.d.mts +233 -40
- package/dist/react/index.d.ts +233 -40
- package/dist/react/index.mjs +178 -60
- package/package.json +3 -1
package/dist/expo/index.cjs
CHANGED
|
@@ -1303,6 +1303,36 @@ function finalizeThoughtProcess(thoughtProcess) {
|
|
|
1303
1303
|
(phase, idx) => idx === thoughtProcess.length - 1 ? { ...phase, status: "completed" } : phase
|
|
1304
1304
|
);
|
|
1305
1305
|
}
|
|
1306
|
+
function extractUserMessageFromMessages(messages) {
|
|
1307
|
+
if (!messages || messages.length === 0) {
|
|
1308
|
+
return null;
|
|
1309
|
+
}
|
|
1310
|
+
const userMessages = messages.filter((m) => m.role === "user");
|
|
1311
|
+
const lastUserMessage = userMessages[userMessages.length - 1];
|
|
1312
|
+
if (!lastUserMessage || !lastUserMessage.content) {
|
|
1313
|
+
return null;
|
|
1314
|
+
}
|
|
1315
|
+
const textParts = [];
|
|
1316
|
+
const files = [];
|
|
1317
|
+
for (const part of lastUserMessage.content) {
|
|
1318
|
+
if (part.type === "text" && part.text) {
|
|
1319
|
+
textParts.push(part.text);
|
|
1320
|
+
} else if (part.type === "image_url" && part.image_url?.url) {
|
|
1321
|
+
files.push({
|
|
1322
|
+
id: `img_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`,
|
|
1323
|
+
name: "image",
|
|
1324
|
+
type: "image/unknown",
|
|
1325
|
+
size: 0,
|
|
1326
|
+
url: part.image_url.url
|
|
1327
|
+
});
|
|
1328
|
+
}
|
|
1329
|
+
}
|
|
1330
|
+
const content = textParts.join("\n");
|
|
1331
|
+
return {
|
|
1332
|
+
content,
|
|
1333
|
+
files: files.length > 0 ? files : void 0
|
|
1334
|
+
};
|
|
1335
|
+
}
|
|
1306
1336
|
|
|
1307
1337
|
// src/lib/db/chat/operations.ts
|
|
1308
1338
|
var import_watermelondb3 = require("@nozbe/watermelondb");
|
|
@@ -1679,9 +1709,8 @@ function useChatStorage(options) {
|
|
|
1679
1709
|
const sendMessage = (0, import_react2.useCallback)(
|
|
1680
1710
|
async (args) => {
|
|
1681
1711
|
const {
|
|
1682
|
-
|
|
1712
|
+
messages,
|
|
1683
1713
|
model,
|
|
1684
|
-
messages: providedMessages,
|
|
1685
1714
|
includeHistory = true,
|
|
1686
1715
|
maxHistoryMessages = 50,
|
|
1687
1716
|
files,
|
|
@@ -1702,6 +1731,15 @@ function useChatStorage(options) {
|
|
|
1702
1731
|
reasoning,
|
|
1703
1732
|
thinking
|
|
1704
1733
|
} = args;
|
|
1734
|
+
const extracted = extractUserMessageFromMessages(messages);
|
|
1735
|
+
if (!extracted || !extracted.content) {
|
|
1736
|
+
return {
|
|
1737
|
+
data: null,
|
|
1738
|
+
error: "No user message found in messages array"
|
|
1739
|
+
};
|
|
1740
|
+
}
|
|
1741
|
+
const contentForStorage = extracted.content;
|
|
1742
|
+
const filesForStorage = files ?? extracted.files;
|
|
1705
1743
|
let convId;
|
|
1706
1744
|
try {
|
|
1707
1745
|
convId = await ensureConversation();
|
|
@@ -1712,33 +1750,18 @@ function useChatStorage(options) {
|
|
|
1712
1750
|
};
|
|
1713
1751
|
}
|
|
1714
1752
|
let messagesToSend = [];
|
|
1715
|
-
if (includeHistory
|
|
1753
|
+
if (includeHistory) {
|
|
1716
1754
|
const storedMessages = await getMessages(convId);
|
|
1717
1755
|
const validMessages = storedMessages.filter((msg) => !msg.error);
|
|
1718
1756
|
const limitedMessages = validMessages.slice(-maxHistoryMessages);
|
|
1719
|
-
messagesToSend =
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
];
|
|
1726
|
-
if (files?.length) {
|
|
1727
|
-
for (const file of files) {
|
|
1728
|
-
if (file.url) {
|
|
1729
|
-
userMessageContent.push({
|
|
1730
|
-
type: "image_url",
|
|
1731
|
-
image_url: { url: file.url }
|
|
1732
|
-
});
|
|
1733
|
-
}
|
|
1734
|
-
}
|
|
1757
|
+
messagesToSend = [
|
|
1758
|
+
...limitedMessages.map(storedToLlmapiMessage),
|
|
1759
|
+
...messages
|
|
1760
|
+
];
|
|
1761
|
+
} else {
|
|
1762
|
+
messagesToSend = [...messages];
|
|
1735
1763
|
}
|
|
1736
|
-
const
|
|
1737
|
-
role: "user",
|
|
1738
|
-
content: userMessageContent
|
|
1739
|
-
};
|
|
1740
|
-
messagesToSend.push(userMessage);
|
|
1741
|
-
const sanitizedFiles = files?.map((file) => ({
|
|
1764
|
+
const sanitizedFiles = filesForStorage?.map((file) => ({
|
|
1742
1765
|
id: file.id,
|
|
1743
1766
|
name: file.name,
|
|
1744
1767
|
type: file.type,
|
|
@@ -1751,7 +1774,7 @@ function useChatStorage(options) {
|
|
|
1751
1774
|
storedUserMessage = await createMessageOp(storageCtx, {
|
|
1752
1775
|
conversationId: convId,
|
|
1753
1776
|
role: "user",
|
|
1754
|
-
content,
|
|
1777
|
+
content: contentForStorage,
|
|
1755
1778
|
files: sanitizedFiles,
|
|
1756
1779
|
model
|
|
1757
1780
|
});
|
package/dist/expo/index.d.mts
CHANGED
|
@@ -554,6 +554,7 @@ type BaseSendMessageResult = {
|
|
|
554
554
|
};
|
|
555
555
|
/**
|
|
556
556
|
* Base options for useChat hook
|
|
557
|
+
* @inline
|
|
557
558
|
*/
|
|
558
559
|
type BaseUseChatOptions = {
|
|
559
560
|
getToken?: () => Promise<string | null>;
|
|
@@ -623,6 +624,9 @@ type SendMessageArgs = BaseSendMessageArgs & {
|
|
|
623
624
|
searchContext?: string;
|
|
624
625
|
};
|
|
625
626
|
type SendMessageResult = BaseSendMessageResult;
|
|
627
|
+
/**
|
|
628
|
+
* @inline
|
|
629
|
+
*/
|
|
626
630
|
type UseChatOptions = BaseUseChatOptions;
|
|
627
631
|
type UseChatResult = BaseUseChatResult & {
|
|
628
632
|
sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
|
|
@@ -731,6 +735,9 @@ interface StoredMemory extends MemoryItem {
|
|
|
731
735
|
interface StoredMemoryWithSimilarity extends StoredMemory {
|
|
732
736
|
similarity: number;
|
|
733
737
|
}
|
|
738
|
+
/**
|
|
739
|
+
* @inline
|
|
740
|
+
*/
|
|
734
741
|
interface BaseUseMemoryStorageOptions {
|
|
735
742
|
database: Database;
|
|
736
743
|
completionsModel?: string;
|
|
@@ -871,27 +878,122 @@ interface UpdateMessageOptions {
|
|
|
871
878
|
* @inline
|
|
872
879
|
*/
|
|
873
880
|
interface BaseUseChatStorageOptions {
|
|
881
|
+
/** WatermelonDB database instance for storing conversations and messages */
|
|
874
882
|
database: Database;
|
|
883
|
+
/** ID of an existing conversation to load and continue */
|
|
875
884
|
conversationId?: string;
|
|
885
|
+
/** Automatically create a new conversation if none is set (default: true) */
|
|
876
886
|
autoCreateConversation?: boolean;
|
|
887
|
+
/** Title for auto-created conversations (default: "New conversation") */
|
|
877
888
|
defaultConversationTitle?: string;
|
|
889
|
+
/** Function to retrieve the auth token for API requests */
|
|
878
890
|
getToken?: () => Promise<string | null>;
|
|
891
|
+
/** Base URL for the chat API endpoint */
|
|
879
892
|
baseUrl?: string;
|
|
893
|
+
/** Callback invoked with each streamed response chunk */
|
|
880
894
|
onData?: (chunk: string) => void;
|
|
895
|
+
/** Callback invoked when the response completes successfully */
|
|
881
896
|
onFinish?: (response: LlmapiResponseResponse) => void;
|
|
897
|
+
/** Callback invoked when an error occurs during the request */
|
|
882
898
|
onError?: (error: Error) => void;
|
|
883
899
|
}
|
|
900
|
+
/**
|
|
901
|
+
* Base arguments for sending a message with automatic storage.
|
|
902
|
+
*
|
|
903
|
+
* These arguments control both the AI request and how the message
|
|
904
|
+
* is persisted to the local database.
|
|
905
|
+
* @inline
|
|
906
|
+
*/
|
|
884
907
|
interface BaseSendMessageWithStorageArgs {
|
|
885
|
-
|
|
908
|
+
/**
|
|
909
|
+
* The message array to send to the AI.
|
|
910
|
+
*
|
|
911
|
+
* Uses the modern array format that supports multimodal content (text, images, files).
|
|
912
|
+
* The last user message in this array will be extracted and stored in the database.
|
|
913
|
+
*
|
|
914
|
+
* When `includeHistory` is true (default), conversation history is prepended.
|
|
915
|
+
* When `includeHistory` is false, only these messages are sent.
|
|
916
|
+
*
|
|
917
|
+
* @example
|
|
918
|
+
* ```ts
|
|
919
|
+
* // Simple usage
|
|
920
|
+
* sendMessage({
|
|
921
|
+
* messages: [
|
|
922
|
+
* { role: "user", content: [{ type: "text", text: "Hello!" }] }
|
|
923
|
+
* ]
|
|
924
|
+
* })
|
|
925
|
+
*
|
|
926
|
+
* // With system prompt and history disabled
|
|
927
|
+
* sendMessage({
|
|
928
|
+
* messages: [
|
|
929
|
+
* { role: "system", content: [{ type: "text", text: "You are helpful" }] },
|
|
930
|
+
* { role: "user", content: [{ type: "text", text: "Question" }] },
|
|
931
|
+
* ],
|
|
932
|
+
* includeHistory: false
|
|
933
|
+
* })
|
|
934
|
+
*
|
|
935
|
+
* // With images
|
|
936
|
+
* sendMessage({
|
|
937
|
+
* messages: [
|
|
938
|
+
* { role: "user", content: [
|
|
939
|
+
* { type: "text", text: "What's in this image?" },
|
|
940
|
+
* { type: "image_url", image_url: { url: "data:image/png;base64,..." } }
|
|
941
|
+
* ]}
|
|
942
|
+
* ]
|
|
943
|
+
* })
|
|
944
|
+
* ```
|
|
945
|
+
*/
|
|
946
|
+
messages: LlmapiMessage[];
|
|
947
|
+
/**
|
|
948
|
+
* The model identifier to use for this request (e.g., "gpt-4o", "claude-sonnet-4-20250514").
|
|
949
|
+
* If not specified, uses the default model configured on the server.
|
|
950
|
+
*/
|
|
886
951
|
model?: string;
|
|
887
|
-
|
|
952
|
+
/**
|
|
953
|
+
* Whether to automatically include previous messages from the conversation as context.
|
|
954
|
+
* When true, fetches stored messages and prepends them to the request.
|
|
955
|
+
* Ignored if `messages` is provided.
|
|
956
|
+
* @default true
|
|
957
|
+
*/
|
|
888
958
|
includeHistory?: boolean;
|
|
959
|
+
/**
|
|
960
|
+
* Maximum number of historical messages to include when `includeHistory` is true.
|
|
961
|
+
* Only the most recent N messages are included to manage context window size.
|
|
962
|
+
* @default 50
|
|
963
|
+
*/
|
|
889
964
|
maxHistoryMessages?: number;
|
|
965
|
+
/**
|
|
966
|
+
* File attachments to include with the message (images, documents, etc.).
|
|
967
|
+
* Files with image MIME types and URLs are sent as image content parts.
|
|
968
|
+
* File metadata is stored with the message (URLs are stripped if they're data URIs).
|
|
969
|
+
*/
|
|
890
970
|
files?: FileMetadata[];
|
|
971
|
+
/**
|
|
972
|
+
* Per-request callback invoked with each streamed response chunk.
|
|
973
|
+
* Overrides the hook-level `onData` callback for this request only.
|
|
974
|
+
* Use this to update UI as the response streams in.
|
|
975
|
+
*/
|
|
891
976
|
onData?: (chunk: string) => void;
|
|
977
|
+
/**
|
|
978
|
+
* Additional context from memory/RAG system to include in the request.
|
|
979
|
+
* Typically contains retrieved relevant information from past conversations.
|
|
980
|
+
*/
|
|
892
981
|
memoryContext?: string;
|
|
982
|
+
/**
|
|
983
|
+
* Additional context from search results to include in the request.
|
|
984
|
+
* Typically contains relevant information from web or document searches.
|
|
985
|
+
*/
|
|
893
986
|
searchContext?: string;
|
|
987
|
+
/**
|
|
988
|
+
* Search sources to attach to the stored message for citation/reference.
|
|
989
|
+
* These are combined with any sources extracted from the assistant's response.
|
|
990
|
+
*/
|
|
894
991
|
sources?: SearchSource[];
|
|
992
|
+
/**
|
|
993
|
+
* Activity phases for tracking the request lifecycle in the UI.
|
|
994
|
+
* Each phase represents a step like "Searching", "Thinking", "Generating".
|
|
995
|
+
* The final phase is automatically marked as completed when stored.
|
|
996
|
+
*/
|
|
895
997
|
thoughtProcess?: ActivityPhase[];
|
|
896
998
|
/**
|
|
897
999
|
* Whether to store the response server-side.
|
|
@@ -909,33 +1011,43 @@ interface BaseSendMessageWithStorageArgs {
|
|
|
909
1011
|
serverConversation?: string;
|
|
910
1012
|
/**
|
|
911
1013
|
* Controls randomness in the response (0.0 to 2.0).
|
|
1014
|
+
* Lower values make output more deterministic, higher values more creative.
|
|
912
1015
|
*/
|
|
913
1016
|
temperature?: number;
|
|
914
1017
|
/**
|
|
915
1018
|
* Maximum number of tokens to generate in the response.
|
|
1019
|
+
* Use this to limit response length and control costs.
|
|
916
1020
|
*/
|
|
917
1021
|
maxOutputTokens?: number;
|
|
918
1022
|
/**
|
|
919
1023
|
* Array of tool definitions available to the model.
|
|
1024
|
+
* Tools enable the model to call functions, search, execute code, etc.
|
|
920
1025
|
*/
|
|
921
1026
|
tools?: LlmapiTool[];
|
|
922
1027
|
/**
|
|
923
|
-
* Controls which tool
|
|
1028
|
+
* Controls which tool the model should use:
|
|
1029
|
+
* - "auto": Model decides whether to use a tool (default)
|
|
1030
|
+
* - "any": Model must use one of the provided tools
|
|
1031
|
+
* - "none": Model cannot use any tools
|
|
1032
|
+
* - "required": Model must use a tool
|
|
1033
|
+
* - Specific tool name: Model must use that specific tool
|
|
924
1034
|
*/
|
|
925
1035
|
toolChoice?: string;
|
|
926
1036
|
/**
|
|
927
1037
|
* Reasoning configuration for o-series and other reasoning models.
|
|
928
|
-
* Controls reasoning effort and summary
|
|
1038
|
+
* Controls reasoning effort level and whether to include reasoning summary.
|
|
929
1039
|
*/
|
|
930
1040
|
reasoning?: LlmapiResponseReasoning;
|
|
931
1041
|
/**
|
|
932
1042
|
* Extended thinking configuration for Anthropic models (Claude).
|
|
933
|
-
* Enables the model to think through complex problems step by step
|
|
1043
|
+
* Enables the model to think through complex problems step by step
|
|
1044
|
+
* before generating the final response.
|
|
934
1045
|
*/
|
|
935
1046
|
thinking?: LlmapiThinkingOptions;
|
|
936
1047
|
/**
|
|
937
1048
|
* Per-request callback for thinking/reasoning chunks.
|
|
938
1049
|
* Called with delta chunks as the model "thinks" through a problem.
|
|
1050
|
+
* Use this to display thinking progress in the UI.
|
|
939
1051
|
*/
|
|
940
1052
|
onThinking?: (chunk: string) => void;
|
|
941
1053
|
}
|
|
@@ -1003,8 +1115,10 @@ declare class Conversation extends Model {
|
|
|
1003
1115
|
* Options for useChatStorage hook (Expo version)
|
|
1004
1116
|
*
|
|
1005
1117
|
* Uses the base options without React-specific features (no local chat, no tools).
|
|
1118
|
+
* @inline
|
|
1006
1119
|
*/
|
|
1007
|
-
|
|
1120
|
+
interface UseChatStorageOptions extends BaseUseChatStorageOptions {
|
|
1121
|
+
}
|
|
1008
1122
|
/**
|
|
1009
1123
|
* Arguments for sendMessage with storage (Expo version)
|
|
1010
1124
|
*
|
|
@@ -1080,6 +1194,9 @@ interface UseChatStorageResult extends BaseUseChatStorageResult {
|
|
|
1080
1194
|
*/
|
|
1081
1195
|
declare function useChatStorage(options: UseChatStorageOptions): UseChatStorageResult;
|
|
1082
1196
|
|
|
1197
|
+
/**
|
|
1198
|
+
* @inline
|
|
1199
|
+
*/
|
|
1083
1200
|
type UseImageGenerationOptions = {
|
|
1084
1201
|
/**
|
|
1085
1202
|
* Custom function to get auth token for API calls
|
|
@@ -1117,6 +1234,9 @@ type UseImageGenerationResult = {
|
|
|
1117
1234
|
*/
|
|
1118
1235
|
declare function useImageGeneration(options?: UseImageGenerationOptions): UseImageGenerationResult;
|
|
1119
1236
|
|
|
1237
|
+
/**
|
|
1238
|
+
* @inline
|
|
1239
|
+
*/
|
|
1120
1240
|
type UseModelsOptions = {
|
|
1121
1241
|
/**
|
|
1122
1242
|
* Custom function to get auth token for API calls
|
|
@@ -1176,8 +1296,10 @@ declare class Memory extends Model {
|
|
|
1176
1296
|
* Options for useMemoryStorage hook (Expo version)
|
|
1177
1297
|
*
|
|
1178
1298
|
* Uses the base options.
|
|
1299
|
+
* @inline
|
|
1179
1300
|
*/
|
|
1180
|
-
|
|
1301
|
+
interface UseMemoryStorageOptions extends BaseUseMemoryStorageOptions {
|
|
1302
|
+
}
|
|
1181
1303
|
/**
|
|
1182
1304
|
* Result returned by useMemoryStorage hook (Expo version)
|
|
1183
1305
|
*
|
|
@@ -1305,4 +1427,4 @@ declare const sdkMigrations: Readonly<{
|
|
|
1305
1427
|
*/
|
|
1306
1428
|
declare const sdkModelClasses: Class<Model$1>[];
|
|
1307
1429
|
|
|
1308
|
-
export { Conversation as ChatConversation, Message as ChatMessage, type ChatRole, type CreateConversationOptions, type CreateMemoryOptions, type CreateMessageOptions, type FileMetadata, type MemoryItem, type MemoryType, type SearchSource, type SendMessageWithStorageArgs, type SendMessageWithStorageResult, type ChatCompletionUsage as StoredChatCompletionUsage, type StoredConversation, type StoredMemory, Memory as StoredMemoryModel, type StoredMemoryWithSimilarity, type StoredMessage, type StoredMessageWithSimilarity, type UpdateMemoryOptions, type UseChatStorageOptions, type UseChatStorageResult, type UseMemoryStorageOptions, type UseMemoryStorageResult, type UseModelsOptions, type UseModelsResult, chatStorageMigrations, chatStorageSchema, generateCompositeKey, generateConversationId, generateUniqueKey, memoryStorageSchema, sdkMigrations, sdkModelClasses, sdkSchema, useChat, useChatStorage, useImageGeneration, useMemoryStorage, useModels };
|
|
1430
|
+
export { Conversation as ChatConversation, Message as ChatMessage, type ChatRole, type CreateConversationOptions, type CreateMemoryOptions, type CreateMessageOptions, type FileMetadata, type MemoryItem, type MemoryType, type SearchSource, type SendMessageWithStorageArgs, type SendMessageWithStorageResult, type ChatCompletionUsage as StoredChatCompletionUsage, type StoredConversation, type StoredMemory, Memory as StoredMemoryModel, type StoredMemoryWithSimilarity, type StoredMessage, type StoredMessageWithSimilarity, type UpdateMemoryOptions, type UseChatStorageOptions, type UseChatStorageResult, type UseImageGenerationResult, type UseMemoryStorageOptions, type UseMemoryStorageResult, type UseModelsOptions, type UseModelsResult, chatStorageMigrations, chatStorageSchema, generateCompositeKey, generateConversationId, generateUniqueKey, memoryStorageSchema, sdkMigrations, sdkModelClasses, sdkSchema, useChat, useChatStorage, useImageGeneration, useMemoryStorage, useModels };
|
package/dist/expo/index.d.ts
CHANGED
|
@@ -554,6 +554,7 @@ type BaseSendMessageResult = {
|
|
|
554
554
|
};
|
|
555
555
|
/**
|
|
556
556
|
* Base options for useChat hook
|
|
557
|
+
* @inline
|
|
557
558
|
*/
|
|
558
559
|
type BaseUseChatOptions = {
|
|
559
560
|
getToken?: () => Promise<string | null>;
|
|
@@ -623,6 +624,9 @@ type SendMessageArgs = BaseSendMessageArgs & {
|
|
|
623
624
|
searchContext?: string;
|
|
624
625
|
};
|
|
625
626
|
type SendMessageResult = BaseSendMessageResult;
|
|
627
|
+
/**
|
|
628
|
+
* @inline
|
|
629
|
+
*/
|
|
626
630
|
type UseChatOptions = BaseUseChatOptions;
|
|
627
631
|
type UseChatResult = BaseUseChatResult & {
|
|
628
632
|
sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
|
|
@@ -731,6 +735,9 @@ interface StoredMemory extends MemoryItem {
|
|
|
731
735
|
interface StoredMemoryWithSimilarity extends StoredMemory {
|
|
732
736
|
similarity: number;
|
|
733
737
|
}
|
|
738
|
+
/**
|
|
739
|
+
* @inline
|
|
740
|
+
*/
|
|
734
741
|
interface BaseUseMemoryStorageOptions {
|
|
735
742
|
database: Database;
|
|
736
743
|
completionsModel?: string;
|
|
@@ -871,27 +878,122 @@ interface UpdateMessageOptions {
|
|
|
871
878
|
* @inline
|
|
872
879
|
*/
|
|
873
880
|
interface BaseUseChatStorageOptions {
|
|
881
|
+
/** WatermelonDB database instance for storing conversations and messages */
|
|
874
882
|
database: Database;
|
|
883
|
+
/** ID of an existing conversation to load and continue */
|
|
875
884
|
conversationId?: string;
|
|
885
|
+
/** Automatically create a new conversation if none is set (default: true) */
|
|
876
886
|
autoCreateConversation?: boolean;
|
|
887
|
+
/** Title for auto-created conversations (default: "New conversation") */
|
|
877
888
|
defaultConversationTitle?: string;
|
|
889
|
+
/** Function to retrieve the auth token for API requests */
|
|
878
890
|
getToken?: () => Promise<string | null>;
|
|
891
|
+
/** Base URL for the chat API endpoint */
|
|
879
892
|
baseUrl?: string;
|
|
893
|
+
/** Callback invoked with each streamed response chunk */
|
|
880
894
|
onData?: (chunk: string) => void;
|
|
895
|
+
/** Callback invoked when the response completes successfully */
|
|
881
896
|
onFinish?: (response: LlmapiResponseResponse) => void;
|
|
897
|
+
/** Callback invoked when an error occurs during the request */
|
|
882
898
|
onError?: (error: Error) => void;
|
|
883
899
|
}
|
|
900
|
+
/**
|
|
901
|
+
* Base arguments for sending a message with automatic storage.
|
|
902
|
+
*
|
|
903
|
+
* These arguments control both the AI request and how the message
|
|
904
|
+
* is persisted to the local database.
|
|
905
|
+
* @inline
|
|
906
|
+
*/
|
|
884
907
|
interface BaseSendMessageWithStorageArgs {
|
|
885
|
-
|
|
908
|
+
/**
|
|
909
|
+
* The message array to send to the AI.
|
|
910
|
+
*
|
|
911
|
+
* Uses the modern array format that supports multimodal content (text, images, files).
|
|
912
|
+
* The last user message in this array will be extracted and stored in the database.
|
|
913
|
+
*
|
|
914
|
+
* When `includeHistory` is true (default), conversation history is prepended.
|
|
915
|
+
* When `includeHistory` is false, only these messages are sent.
|
|
916
|
+
*
|
|
917
|
+
* @example
|
|
918
|
+
* ```ts
|
|
919
|
+
* // Simple usage
|
|
920
|
+
* sendMessage({
|
|
921
|
+
* messages: [
|
|
922
|
+
* { role: "user", content: [{ type: "text", text: "Hello!" }] }
|
|
923
|
+
* ]
|
|
924
|
+
* })
|
|
925
|
+
*
|
|
926
|
+
* // With system prompt and history disabled
|
|
927
|
+
* sendMessage({
|
|
928
|
+
* messages: [
|
|
929
|
+
* { role: "system", content: [{ type: "text", text: "You are helpful" }] },
|
|
930
|
+
* { role: "user", content: [{ type: "text", text: "Question" }] },
|
|
931
|
+
* ],
|
|
932
|
+
* includeHistory: false
|
|
933
|
+
* })
|
|
934
|
+
*
|
|
935
|
+
* // With images
|
|
936
|
+
* sendMessage({
|
|
937
|
+
* messages: [
|
|
938
|
+
* { role: "user", content: [
|
|
939
|
+
* { type: "text", text: "What's in this image?" },
|
|
940
|
+
* { type: "image_url", image_url: { url: "data:image/png;base64,..." } }
|
|
941
|
+
* ]}
|
|
942
|
+
* ]
|
|
943
|
+
* })
|
|
944
|
+
* ```
|
|
945
|
+
*/
|
|
946
|
+
messages: LlmapiMessage[];
|
|
947
|
+
/**
|
|
948
|
+
* The model identifier to use for this request (e.g., "gpt-4o", "claude-sonnet-4-20250514").
|
|
949
|
+
* If not specified, uses the default model configured on the server.
|
|
950
|
+
*/
|
|
886
951
|
model?: string;
|
|
887
|
-
|
|
952
|
+
/**
|
|
953
|
+
* Whether to automatically include previous messages from the conversation as context.
|
|
954
|
+
* When true, fetches stored messages and prepends them to the request.
|
|
955
|
+
* Ignored if `messages` is provided.
|
|
956
|
+
* @default true
|
|
957
|
+
*/
|
|
888
958
|
includeHistory?: boolean;
|
|
959
|
+
/**
|
|
960
|
+
* Maximum number of historical messages to include when `includeHistory` is true.
|
|
961
|
+
* Only the most recent N messages are included to manage context window size.
|
|
962
|
+
* @default 50
|
|
963
|
+
*/
|
|
889
964
|
maxHistoryMessages?: number;
|
|
965
|
+
/**
|
|
966
|
+
* File attachments to include with the message (images, documents, etc.).
|
|
967
|
+
* Files with image MIME types and URLs are sent as image content parts.
|
|
968
|
+
* File metadata is stored with the message (URLs are stripped if they're data URIs).
|
|
969
|
+
*/
|
|
890
970
|
files?: FileMetadata[];
|
|
971
|
+
/**
|
|
972
|
+
* Per-request callback invoked with each streamed response chunk.
|
|
973
|
+
* Overrides the hook-level `onData` callback for this request only.
|
|
974
|
+
* Use this to update UI as the response streams in.
|
|
975
|
+
*/
|
|
891
976
|
onData?: (chunk: string) => void;
|
|
977
|
+
/**
|
|
978
|
+
* Additional context from memory/RAG system to include in the request.
|
|
979
|
+
* Typically contains retrieved relevant information from past conversations.
|
|
980
|
+
*/
|
|
892
981
|
memoryContext?: string;
|
|
982
|
+
/**
|
|
983
|
+
* Additional context from search results to include in the request.
|
|
984
|
+
* Typically contains relevant information from web or document searches.
|
|
985
|
+
*/
|
|
893
986
|
searchContext?: string;
|
|
987
|
+
/**
|
|
988
|
+
* Search sources to attach to the stored message for citation/reference.
|
|
989
|
+
* These are combined with any sources extracted from the assistant's response.
|
|
990
|
+
*/
|
|
894
991
|
sources?: SearchSource[];
|
|
992
|
+
/**
|
|
993
|
+
* Activity phases for tracking the request lifecycle in the UI.
|
|
994
|
+
* Each phase represents a step like "Searching", "Thinking", "Generating".
|
|
995
|
+
* The final phase is automatically marked as completed when stored.
|
|
996
|
+
*/
|
|
895
997
|
thoughtProcess?: ActivityPhase[];
|
|
896
998
|
/**
|
|
897
999
|
* Whether to store the response server-side.
|
|
@@ -909,33 +1011,43 @@ interface BaseSendMessageWithStorageArgs {
|
|
|
909
1011
|
serverConversation?: string;
|
|
910
1012
|
/**
|
|
911
1013
|
* Controls randomness in the response (0.0 to 2.0).
|
|
1014
|
+
* Lower values make output more deterministic, higher values more creative.
|
|
912
1015
|
*/
|
|
913
1016
|
temperature?: number;
|
|
914
1017
|
/**
|
|
915
1018
|
* Maximum number of tokens to generate in the response.
|
|
1019
|
+
* Use this to limit response length and control costs.
|
|
916
1020
|
*/
|
|
917
1021
|
maxOutputTokens?: number;
|
|
918
1022
|
/**
|
|
919
1023
|
* Array of tool definitions available to the model.
|
|
1024
|
+
* Tools enable the model to call functions, search, execute code, etc.
|
|
920
1025
|
*/
|
|
921
1026
|
tools?: LlmapiTool[];
|
|
922
1027
|
/**
|
|
923
|
-
* Controls which tool
|
|
1028
|
+
* Controls which tool the model should use:
|
|
1029
|
+
* - "auto": Model decides whether to use a tool (default)
|
|
1030
|
+
* - "any": Model must use one of the provided tools
|
|
1031
|
+
* - "none": Model cannot use any tools
|
|
1032
|
+
* - "required": Model must use a tool
|
|
1033
|
+
* - Specific tool name: Model must use that specific tool
|
|
924
1034
|
*/
|
|
925
1035
|
toolChoice?: string;
|
|
926
1036
|
/**
|
|
927
1037
|
* Reasoning configuration for o-series and other reasoning models.
|
|
928
|
-
* Controls reasoning effort and summary
|
|
1038
|
+
* Controls reasoning effort level and whether to include reasoning summary.
|
|
929
1039
|
*/
|
|
930
1040
|
reasoning?: LlmapiResponseReasoning;
|
|
931
1041
|
/**
|
|
932
1042
|
* Extended thinking configuration for Anthropic models (Claude).
|
|
933
|
-
* Enables the model to think through complex problems step by step
|
|
1043
|
+
* Enables the model to think through complex problems step by step
|
|
1044
|
+
* before generating the final response.
|
|
934
1045
|
*/
|
|
935
1046
|
thinking?: LlmapiThinkingOptions;
|
|
936
1047
|
/**
|
|
937
1048
|
* Per-request callback for thinking/reasoning chunks.
|
|
938
1049
|
* Called with delta chunks as the model "thinks" through a problem.
|
|
1050
|
+
* Use this to display thinking progress in the UI.
|
|
939
1051
|
*/
|
|
940
1052
|
onThinking?: (chunk: string) => void;
|
|
941
1053
|
}
|
|
@@ -1003,8 +1115,10 @@ declare class Conversation extends Model {
|
|
|
1003
1115
|
* Options for useChatStorage hook (Expo version)
|
|
1004
1116
|
*
|
|
1005
1117
|
* Uses the base options without React-specific features (no local chat, no tools).
|
|
1118
|
+
* @inline
|
|
1006
1119
|
*/
|
|
1007
|
-
|
|
1120
|
+
interface UseChatStorageOptions extends BaseUseChatStorageOptions {
|
|
1121
|
+
}
|
|
1008
1122
|
/**
|
|
1009
1123
|
* Arguments for sendMessage with storage (Expo version)
|
|
1010
1124
|
*
|
|
@@ -1080,6 +1194,9 @@ interface UseChatStorageResult extends BaseUseChatStorageResult {
|
|
|
1080
1194
|
*/
|
|
1081
1195
|
declare function useChatStorage(options: UseChatStorageOptions): UseChatStorageResult;
|
|
1082
1196
|
|
|
1197
|
+
/**
|
|
1198
|
+
* @inline
|
|
1199
|
+
*/
|
|
1083
1200
|
type UseImageGenerationOptions = {
|
|
1084
1201
|
/**
|
|
1085
1202
|
* Custom function to get auth token for API calls
|
|
@@ -1117,6 +1234,9 @@ type UseImageGenerationResult = {
|
|
|
1117
1234
|
*/
|
|
1118
1235
|
declare function useImageGeneration(options?: UseImageGenerationOptions): UseImageGenerationResult;
|
|
1119
1236
|
|
|
1237
|
+
/**
|
|
1238
|
+
* @inline
|
|
1239
|
+
*/
|
|
1120
1240
|
type UseModelsOptions = {
|
|
1121
1241
|
/**
|
|
1122
1242
|
* Custom function to get auth token for API calls
|
|
@@ -1176,8 +1296,10 @@ declare class Memory extends Model {
|
|
|
1176
1296
|
* Options for useMemoryStorage hook (Expo version)
|
|
1177
1297
|
*
|
|
1178
1298
|
* Uses the base options.
|
|
1299
|
+
* @inline
|
|
1179
1300
|
*/
|
|
1180
|
-
|
|
1301
|
+
interface UseMemoryStorageOptions extends BaseUseMemoryStorageOptions {
|
|
1302
|
+
}
|
|
1181
1303
|
/**
|
|
1182
1304
|
* Result returned by useMemoryStorage hook (Expo version)
|
|
1183
1305
|
*
|
|
@@ -1305,4 +1427,4 @@ declare const sdkMigrations: Readonly<{
|
|
|
1305
1427
|
*/
|
|
1306
1428
|
declare const sdkModelClasses: Class<Model$1>[];
|
|
1307
1429
|
|
|
1308
|
-
export { Conversation as ChatConversation, Message as ChatMessage, type ChatRole, type CreateConversationOptions, type CreateMemoryOptions, type CreateMessageOptions, type FileMetadata, type MemoryItem, type MemoryType, type SearchSource, type SendMessageWithStorageArgs, type SendMessageWithStorageResult, type ChatCompletionUsage as StoredChatCompletionUsage, type StoredConversation, type StoredMemory, Memory as StoredMemoryModel, type StoredMemoryWithSimilarity, type StoredMessage, type StoredMessageWithSimilarity, type UpdateMemoryOptions, type UseChatStorageOptions, type UseChatStorageResult, type UseMemoryStorageOptions, type UseMemoryStorageResult, type UseModelsOptions, type UseModelsResult, chatStorageMigrations, chatStorageSchema, generateCompositeKey, generateConversationId, generateUniqueKey, memoryStorageSchema, sdkMigrations, sdkModelClasses, sdkSchema, useChat, useChatStorage, useImageGeneration, useMemoryStorage, useModels };
|
|
1430
|
+
export { Conversation as ChatConversation, Message as ChatMessage, type ChatRole, type CreateConversationOptions, type CreateMemoryOptions, type CreateMessageOptions, type FileMetadata, type MemoryItem, type MemoryType, type SearchSource, type SendMessageWithStorageArgs, type SendMessageWithStorageResult, type ChatCompletionUsage as StoredChatCompletionUsage, type StoredConversation, type StoredMemory, Memory as StoredMemoryModel, type StoredMemoryWithSimilarity, type StoredMessage, type StoredMessageWithSimilarity, type UpdateMemoryOptions, type UseChatStorageOptions, type UseChatStorageResult, type UseImageGenerationResult, type UseMemoryStorageOptions, type UseMemoryStorageResult, type UseModelsOptions, type UseModelsResult, chatStorageMigrations, chatStorageSchema, generateCompositeKey, generateConversationId, generateUniqueKey, memoryStorageSchema, sdkMigrations, sdkModelClasses, sdkSchema, useChat, useChatStorage, useImageGeneration, useMemoryStorage, useModels };
|