@reverbia/sdk 1.0.0-next.20260109140427 → 1.0.0-next.20260109150925

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -554,6 +554,7 @@ type BaseSendMessageResult = {
554
554
  };
555
555
  /**
556
556
  * Base options for useChat hook
557
+ * @inline
557
558
  */
558
559
  type BaseUseChatOptions = {
559
560
  getToken?: () => Promise<string | null>;
@@ -623,6 +624,9 @@ type SendMessageArgs = BaseSendMessageArgs & {
623
624
  searchContext?: string;
624
625
  };
625
626
  type SendMessageResult = BaseSendMessageResult;
627
+ /**
628
+ * @inline
629
+ */
626
630
  type UseChatOptions = BaseUseChatOptions;
627
631
  type UseChatResult = BaseUseChatResult & {
628
632
  sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
@@ -731,6 +735,9 @@ interface StoredMemory extends MemoryItem {
731
735
  interface StoredMemoryWithSimilarity extends StoredMemory {
732
736
  similarity: number;
733
737
  }
738
+ /**
739
+ * @inline
740
+ */
734
741
  interface BaseUseMemoryStorageOptions {
735
742
  database: Database;
736
743
  completionsModel?: string;
@@ -871,27 +878,93 @@ interface UpdateMessageOptions {
871
878
  * @inline
872
879
  */
873
880
  interface BaseUseChatStorageOptions {
881
+ /** WatermelonDB database instance for storing conversations and messages */
874
882
  database: Database;
883
+ /** ID of an existing conversation to load and continue */
875
884
  conversationId?: string;
885
+ /** Automatically create a new conversation if none is set (default: true) */
876
886
  autoCreateConversation?: boolean;
887
+ /** Title for auto-created conversations (default: "New conversation") */
877
888
  defaultConversationTitle?: string;
889
+ /** Function to retrieve the auth token for API requests */
878
890
  getToken?: () => Promise<string | null>;
891
+ /** Base URL for the chat API endpoint */
879
892
  baseUrl?: string;
893
+ /** Callback invoked with each streamed response chunk */
880
894
  onData?: (chunk: string) => void;
895
+ /** Callback invoked when the response completes successfully */
881
896
  onFinish?: (response: LlmapiResponseResponse) => void;
897
+ /** Callback invoked when an error occurs during the request */
882
898
  onError?: (error: Error) => void;
883
899
  }
900
+ /**
901
+ * Base arguments for sending a message with automatic storage.
902
+ *
903
+ * These arguments control both the AI request and how the message
904
+ * is persisted to the local database.
905
+ * @inline
906
+ */
884
907
  interface BaseSendMessageWithStorageArgs {
908
+ /**
909
+ * The text content of the message to send to the AI.
910
+ */
885
911
  content: string;
912
+ /**
913
+ * The model identifier to use for this request (e.g., "gpt-4o", "claude-sonnet-4-20250514").
914
+ * If not specified, uses the default model configured on the server.
915
+ */
886
916
  model?: string;
917
+ /**
918
+ * Pre-built message array to send instead of using conversation history.
919
+ * When provided, `includeHistory` is ignored and these messages are used directly.
920
+ * Useful for custom message construction or when you need full control over context.
921
+ */
887
922
  messages?: LlmapiMessage[];
923
+ /**
924
+ * Whether to automatically include previous messages from the conversation as context.
925
+ * When true, fetches stored messages and prepends them to the request.
926
+ * Ignored if `messages` is provided.
927
+ * @default true
928
+ */
888
929
  includeHistory?: boolean;
930
+ /**
931
+ * Maximum number of historical messages to include when `includeHistory` is true.
932
+ * Only the most recent N messages are included to manage context window size.
933
+ * @default 50
934
+ */
889
935
  maxHistoryMessages?: number;
936
+ /**
937
+ * File attachments to include with the message (images, documents, etc.).
938
+ * Files with image MIME types and URLs are sent as image content parts.
939
+ * File metadata is stored with the message (URLs are stripped if they're data URIs).
940
+ */
890
941
  files?: FileMetadata[];
942
+ /**
943
+ * Per-request callback invoked with each streamed response chunk.
944
+ * Overrides the hook-level `onData` callback for this request only.
945
+ * Use this to update UI as the response streams in.
946
+ */
891
947
  onData?: (chunk: string) => void;
948
+ /**
949
+ * Additional context from memory/RAG system to include in the request.
950
+ * Typically contains retrieved relevant information from past conversations.
951
+ */
892
952
  memoryContext?: string;
953
+ /**
954
+ * Additional context from search results to include in the request.
955
+ * Typically contains relevant information from web or document searches.
956
+ */
893
957
  searchContext?: string;
958
+ /**
959
+ * Search sources to attach to the stored message for citation/reference.
960
+ * These are combined with any sources extracted from the assistant's response.
961
+ */
894
962
  sources?: SearchSource[];
963
+ /**
964
+ * Activity phases for tracking the request lifecycle in the UI.
965
+ * Each phase represents a step like "Searching", "Thinking", "Generating".
966
+ * The final phase is automatically marked as completed when stored.
967
+ */
895
968
  thoughtProcess?: ActivityPhase[];
896
969
  /**
897
970
  * Whether to store the response server-side.
@@ -909,33 +982,43 @@ interface BaseSendMessageWithStorageArgs {
909
982
  serverConversation?: string;
910
983
  /**
911
984
  * Controls randomness in the response (0.0 to 2.0).
985
+ * Lower values make output more deterministic, higher values more creative.
912
986
  */
913
987
  temperature?: number;
914
988
  /**
915
989
  * Maximum number of tokens to generate in the response.
990
+ * Use this to limit response length and control costs.
916
991
  */
917
992
  maxOutputTokens?: number;
918
993
  /**
919
994
  * Array of tool definitions available to the model.
995
+ * Tools enable the model to call functions, search, execute code, etc.
920
996
  */
921
997
  tools?: LlmapiTool[];
922
998
  /**
923
- * Controls which tool to use: "auto", "any", "none", "required", or a specific tool name.
999
+ * Controls which tool the model should use:
1000
+ * - "auto": Model decides whether to use a tool (default)
1001
+ * - "any": Model must use one of the provided tools
1002
+ * - "none": Model cannot use any tools
1003
+ * - "required": Model must use a tool
1004
+ * - Specific tool name: Model must use that specific tool
924
1005
  */
925
1006
  toolChoice?: string;
926
1007
  /**
927
1008
  * Reasoning configuration for o-series and other reasoning models.
928
- * Controls reasoning effort and summary output.
1009
+ * Controls reasoning effort level and whether to include reasoning summary.
929
1010
  */
930
1011
  reasoning?: LlmapiResponseReasoning;
931
1012
  /**
932
1013
  * Extended thinking configuration for Anthropic models (Claude).
933
- * Enables the model to think through complex problems step by step.
1014
+ * Enables the model to think through complex problems step by step
1015
+ * before generating the final response.
934
1016
  */
935
1017
  thinking?: LlmapiThinkingOptions;
936
1018
  /**
937
1019
  * Per-request callback for thinking/reasoning chunks.
938
1020
  * Called with delta chunks as the model "thinks" through a problem.
1021
+ * Use this to display thinking progress in the UI.
939
1022
  */
940
1023
  onThinking?: (chunk: string) => void;
941
1024
  }
@@ -1003,8 +1086,10 @@ declare class Conversation extends Model {
1003
1086
  * Options for useChatStorage hook (Expo version)
1004
1087
  *
1005
1088
  * Uses the base options without React-specific features (no local chat, no tools).
1089
+ * @inline
1006
1090
  */
1007
- type UseChatStorageOptions = BaseUseChatStorageOptions;
1091
+ interface UseChatStorageOptions extends BaseUseChatStorageOptions {
1092
+ }
1008
1093
  /**
1009
1094
  * Arguments for sendMessage with storage (Expo version)
1010
1095
  *
@@ -1080,6 +1165,9 @@ interface UseChatStorageResult extends BaseUseChatStorageResult {
1080
1165
  */
1081
1166
  declare function useChatStorage(options: UseChatStorageOptions): UseChatStorageResult;
1082
1167
 
1168
+ /**
1169
+ * @inline
1170
+ */
1083
1171
  type UseImageGenerationOptions = {
1084
1172
  /**
1085
1173
  * Custom function to get auth token for API calls
@@ -1117,6 +1205,9 @@ type UseImageGenerationResult = {
1117
1205
  */
1118
1206
  declare function useImageGeneration(options?: UseImageGenerationOptions): UseImageGenerationResult;
1119
1207
 
1208
+ /**
1209
+ * @inline
1210
+ */
1120
1211
  type UseModelsOptions = {
1121
1212
  /**
1122
1213
  * Custom function to get auth token for API calls
@@ -1176,8 +1267,10 @@ declare class Memory extends Model {
1176
1267
  * Options for useMemoryStorage hook (Expo version)
1177
1268
  *
1178
1269
  * Uses the base options.
1270
+ * @inline
1179
1271
  */
1180
- type UseMemoryStorageOptions = BaseUseMemoryStorageOptions;
1272
+ interface UseMemoryStorageOptions extends BaseUseMemoryStorageOptions {
1273
+ }
1181
1274
  /**
1182
1275
  * Result returned by useMemoryStorage hook (Expo version)
1183
1276
  *
@@ -1305,4 +1398,4 @@ declare const sdkMigrations: Readonly<{
1305
1398
  */
1306
1399
  declare const sdkModelClasses: Class<Model$1>[];
1307
1400
 
1308
- export { Conversation as ChatConversation, Message as ChatMessage, type ChatRole, type CreateConversationOptions, type CreateMemoryOptions, type CreateMessageOptions, type FileMetadata, type MemoryItem, type MemoryType, type SearchSource, type SendMessageWithStorageArgs, type SendMessageWithStorageResult, type ChatCompletionUsage as StoredChatCompletionUsage, type StoredConversation, type StoredMemory, Memory as StoredMemoryModel, type StoredMemoryWithSimilarity, type StoredMessage, type StoredMessageWithSimilarity, type UpdateMemoryOptions, type UseChatStorageOptions, type UseChatStorageResult, type UseMemoryStorageOptions, type UseMemoryStorageResult, type UseModelsOptions, type UseModelsResult, chatStorageMigrations, chatStorageSchema, generateCompositeKey, generateConversationId, generateUniqueKey, memoryStorageSchema, sdkMigrations, sdkModelClasses, sdkSchema, useChat, useChatStorage, useImageGeneration, useMemoryStorage, useModels };
1401
+ export { Conversation as ChatConversation, Message as ChatMessage, type ChatRole, type CreateConversationOptions, type CreateMemoryOptions, type CreateMessageOptions, type FileMetadata, type MemoryItem, type MemoryType, type SearchSource, type SendMessageWithStorageArgs, type SendMessageWithStorageResult, type ChatCompletionUsage as StoredChatCompletionUsage, type StoredConversation, type StoredMemory, Memory as StoredMemoryModel, type StoredMemoryWithSimilarity, type StoredMessage, type StoredMessageWithSimilarity, type UpdateMemoryOptions, type UseChatStorageOptions, type UseChatStorageResult, type UseImageGenerationResult, type UseMemoryStorageOptions, type UseMemoryStorageResult, type UseModelsOptions, type UseModelsResult, chatStorageMigrations, chatStorageSchema, generateCompositeKey, generateConversationId, generateUniqueKey, memoryStorageSchema, sdkMigrations, sdkModelClasses, sdkSchema, useChat, useChatStorage, useImageGeneration, useMemoryStorage, useModels };
@@ -554,6 +554,7 @@ type BaseSendMessageResult = {
554
554
  };
555
555
  /**
556
556
  * Base options for useChat hook
557
+ * @inline
557
558
  */
558
559
  type BaseUseChatOptions = {
559
560
  getToken?: () => Promise<string | null>;
@@ -623,6 +624,9 @@ type SendMessageArgs = BaseSendMessageArgs & {
623
624
  searchContext?: string;
624
625
  };
625
626
  type SendMessageResult = BaseSendMessageResult;
627
+ /**
628
+ * @inline
629
+ */
626
630
  type UseChatOptions = BaseUseChatOptions;
627
631
  type UseChatResult = BaseUseChatResult & {
628
632
  sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
@@ -731,6 +735,9 @@ interface StoredMemory extends MemoryItem {
731
735
  interface StoredMemoryWithSimilarity extends StoredMemory {
732
736
  similarity: number;
733
737
  }
738
+ /**
739
+ * @inline
740
+ */
734
741
  interface BaseUseMemoryStorageOptions {
735
742
  database: Database;
736
743
  completionsModel?: string;
@@ -871,27 +878,93 @@ interface UpdateMessageOptions {
871
878
  * @inline
872
879
  */
873
880
  interface BaseUseChatStorageOptions {
881
+ /** WatermelonDB database instance for storing conversations and messages */
874
882
  database: Database;
883
+ /** ID of an existing conversation to load and continue */
875
884
  conversationId?: string;
885
+ /** Automatically create a new conversation if none is set (default: true) */
876
886
  autoCreateConversation?: boolean;
887
+ /** Title for auto-created conversations (default: "New conversation") */
877
888
  defaultConversationTitle?: string;
889
+ /** Function to retrieve the auth token for API requests */
878
890
  getToken?: () => Promise<string | null>;
891
+ /** Base URL for the chat API endpoint */
879
892
  baseUrl?: string;
893
+ /** Callback invoked with each streamed response chunk */
880
894
  onData?: (chunk: string) => void;
895
+ /** Callback invoked when the response completes successfully */
881
896
  onFinish?: (response: LlmapiResponseResponse) => void;
897
+ /** Callback invoked when an error occurs during the request */
882
898
  onError?: (error: Error) => void;
883
899
  }
900
+ /**
901
+ * Base arguments for sending a message with automatic storage.
902
+ *
903
+ * These arguments control both the AI request and how the message
904
+ * is persisted to the local database.
905
+ * @inline
906
+ */
884
907
  interface BaseSendMessageWithStorageArgs {
908
+ /**
909
+ * The text content of the message to send to the AI.
910
+ */
885
911
  content: string;
912
+ /**
913
+ * The model identifier to use for this request (e.g., "gpt-4o", "claude-sonnet-4-20250514").
914
+ * If not specified, uses the default model configured on the server.
915
+ */
886
916
  model?: string;
917
+ /**
918
+ * Pre-built message array to send instead of using conversation history.
919
+ * When provided, `includeHistory` is ignored and these messages are used directly.
920
+ * Useful for custom message construction or when you need full control over context.
921
+ */
887
922
  messages?: LlmapiMessage[];
923
+ /**
924
+ * Whether to automatically include previous messages from the conversation as context.
925
+ * When true, fetches stored messages and prepends them to the request.
926
+ * Ignored if `messages` is provided.
927
+ * @default true
928
+ */
888
929
  includeHistory?: boolean;
930
+ /**
931
+ * Maximum number of historical messages to include when `includeHistory` is true.
932
+ * Only the most recent N messages are included to manage context window size.
933
+ * @default 50
934
+ */
889
935
  maxHistoryMessages?: number;
936
+ /**
937
+ * File attachments to include with the message (images, documents, etc.).
938
+ * Files with image MIME types and URLs are sent as image content parts.
939
+ * File metadata is stored with the message (URLs are stripped if they're data URIs).
940
+ */
890
941
  files?: FileMetadata[];
942
+ /**
943
+ * Per-request callback invoked with each streamed response chunk.
944
+ * Overrides the hook-level `onData` callback for this request only.
945
+ * Use this to update UI as the response streams in.
946
+ */
891
947
  onData?: (chunk: string) => void;
948
+ /**
949
+ * Additional context from memory/RAG system to include in the request.
950
+ * Typically contains retrieved relevant information from past conversations.
951
+ */
892
952
  memoryContext?: string;
953
+ /**
954
+ * Additional context from search results to include in the request.
955
+ * Typically contains relevant information from web or document searches.
956
+ */
893
957
  searchContext?: string;
958
+ /**
959
+ * Search sources to attach to the stored message for citation/reference.
960
+ * These are combined with any sources extracted from the assistant's response.
961
+ */
894
962
  sources?: SearchSource[];
963
+ /**
964
+ * Activity phases for tracking the request lifecycle in the UI.
965
+ * Each phase represents a step like "Searching", "Thinking", "Generating".
966
+ * The final phase is automatically marked as completed when stored.
967
+ */
895
968
  thoughtProcess?: ActivityPhase[];
896
969
  /**
897
970
  * Whether to store the response server-side.
@@ -909,33 +982,43 @@ interface BaseSendMessageWithStorageArgs {
909
982
  serverConversation?: string;
910
983
  /**
911
984
  * Controls randomness in the response (0.0 to 2.0).
985
+ * Lower values make output more deterministic, higher values more creative.
912
986
  */
913
987
  temperature?: number;
914
988
  /**
915
989
  * Maximum number of tokens to generate in the response.
990
+ * Use this to limit response length and control costs.
916
991
  */
917
992
  maxOutputTokens?: number;
918
993
  /**
919
994
  * Array of tool definitions available to the model.
995
+ * Tools enable the model to call functions, search, execute code, etc.
920
996
  */
921
997
  tools?: LlmapiTool[];
922
998
  /**
923
- * Controls which tool to use: "auto", "any", "none", "required", or a specific tool name.
999
+ * Controls which tool the model should use:
1000
+ * - "auto": Model decides whether to use a tool (default)
1001
+ * - "any": Model must use one of the provided tools
1002
+ * - "none": Model cannot use any tools
1003
+ * - "required": Model must use a tool
1004
+ * - Specific tool name: Model must use that specific tool
924
1005
  */
925
1006
  toolChoice?: string;
926
1007
  /**
927
1008
  * Reasoning configuration for o-series and other reasoning models.
928
- * Controls reasoning effort and summary output.
1009
+ * Controls reasoning effort level and whether to include reasoning summary.
929
1010
  */
930
1011
  reasoning?: LlmapiResponseReasoning;
931
1012
  /**
932
1013
  * Extended thinking configuration for Anthropic models (Claude).
933
- * Enables the model to think through complex problems step by step.
1014
+ * Enables the model to think through complex problems step by step
1015
+ * before generating the final response.
934
1016
  */
935
1017
  thinking?: LlmapiThinkingOptions;
936
1018
  /**
937
1019
  * Per-request callback for thinking/reasoning chunks.
938
1020
  * Called with delta chunks as the model "thinks" through a problem.
1021
+ * Use this to display thinking progress in the UI.
939
1022
  */
940
1023
  onThinking?: (chunk: string) => void;
941
1024
  }
@@ -1003,8 +1086,10 @@ declare class Conversation extends Model {
1003
1086
  * Options for useChatStorage hook (Expo version)
1004
1087
  *
1005
1088
  * Uses the base options without React-specific features (no local chat, no tools).
1089
+ * @inline
1006
1090
  */
1007
- type UseChatStorageOptions = BaseUseChatStorageOptions;
1091
+ interface UseChatStorageOptions extends BaseUseChatStorageOptions {
1092
+ }
1008
1093
  /**
1009
1094
  * Arguments for sendMessage with storage (Expo version)
1010
1095
  *
@@ -1080,6 +1165,9 @@ interface UseChatStorageResult extends BaseUseChatStorageResult {
1080
1165
  */
1081
1166
  declare function useChatStorage(options: UseChatStorageOptions): UseChatStorageResult;
1082
1167
 
1168
+ /**
1169
+ * @inline
1170
+ */
1083
1171
  type UseImageGenerationOptions = {
1084
1172
  /**
1085
1173
  * Custom function to get auth token for API calls
@@ -1117,6 +1205,9 @@ type UseImageGenerationResult = {
1117
1205
  */
1118
1206
  declare function useImageGeneration(options?: UseImageGenerationOptions): UseImageGenerationResult;
1119
1207
 
1208
+ /**
1209
+ * @inline
1210
+ */
1120
1211
  type UseModelsOptions = {
1121
1212
  /**
1122
1213
  * Custom function to get auth token for API calls
@@ -1176,8 +1267,10 @@ declare class Memory extends Model {
1176
1267
  * Options for useMemoryStorage hook (Expo version)
1177
1268
  *
1178
1269
  * Uses the base options.
1270
+ * @inline
1179
1271
  */
1180
- type UseMemoryStorageOptions = BaseUseMemoryStorageOptions;
1272
+ interface UseMemoryStorageOptions extends BaseUseMemoryStorageOptions {
1273
+ }
1181
1274
  /**
1182
1275
  * Result returned by useMemoryStorage hook (Expo version)
1183
1276
  *
@@ -1305,4 +1398,4 @@ declare const sdkMigrations: Readonly<{
1305
1398
  */
1306
1399
  declare const sdkModelClasses: Class<Model$1>[];
1307
1400
 
1308
- export { Conversation as ChatConversation, Message as ChatMessage, type ChatRole, type CreateConversationOptions, type CreateMemoryOptions, type CreateMessageOptions, type FileMetadata, type MemoryItem, type MemoryType, type SearchSource, type SendMessageWithStorageArgs, type SendMessageWithStorageResult, type ChatCompletionUsage as StoredChatCompletionUsage, type StoredConversation, type StoredMemory, Memory as StoredMemoryModel, type StoredMemoryWithSimilarity, type StoredMessage, type StoredMessageWithSimilarity, type UpdateMemoryOptions, type UseChatStorageOptions, type UseChatStorageResult, type UseMemoryStorageOptions, type UseMemoryStorageResult, type UseModelsOptions, type UseModelsResult, chatStorageMigrations, chatStorageSchema, generateCompositeKey, generateConversationId, generateUniqueKey, memoryStorageSchema, sdkMigrations, sdkModelClasses, sdkSchema, useChat, useChatStorage, useImageGeneration, useMemoryStorage, useModels };
1401
+ export { Conversation as ChatConversation, Message as ChatMessage, type ChatRole, type CreateConversationOptions, type CreateMemoryOptions, type CreateMessageOptions, type FileMetadata, type MemoryItem, type MemoryType, type SearchSource, type SendMessageWithStorageArgs, type SendMessageWithStorageResult, type ChatCompletionUsage as StoredChatCompletionUsage, type StoredConversation, type StoredMemory, Memory as StoredMemoryModel, type StoredMemoryWithSimilarity, type StoredMessage, type StoredMessageWithSimilarity, type UpdateMemoryOptions, type UseChatStorageOptions, type UseChatStorageResult, type UseImageGenerationResult, type UseMemoryStorageOptions, type UseMemoryStorageResult, type UseModelsOptions, type UseModelsResult, chatStorageMigrations, chatStorageSchema, generateCompositeKey, generateConversationId, generateUniqueKey, memoryStorageSchema, sdkMigrations, sdkModelClasses, sdkSchema, useChat, useChatStorage, useImageGeneration, useMemoryStorage, useModels };