@reverbia/sdk 1.0.0 → 1.1.0-next.20251230221037

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/README.md +290 -45
  2. package/dist/expo/index.cjs +3428 -0
  3. package/dist/expo/index.d.mts +1217 -0
  4. package/dist/expo/index.d.ts +1217 -0
  5. package/dist/expo/index.mjs +3395 -0
  6. package/dist/index.cjs +972 -0
  7. package/dist/index.d.mts +1514 -0
  8. package/dist/index.d.ts +1514 -0
  9. package/dist/index.mjs +934 -0
  10. package/dist/next/index.cjs +64 -0
  11. package/dist/next/index.d.mts +23 -0
  12. package/dist/next/index.d.ts +23 -0
  13. package/dist/next/index.mjs +39 -0
  14. package/dist/polyfills/index.cjs +61 -0
  15. package/dist/polyfills/index.d.mts +9 -0
  16. package/dist/polyfills/index.d.ts +9 -0
  17. package/dist/polyfills/index.mjs +34 -0
  18. package/dist/react/chunk-KUFGQF6E.mjs +290 -0
  19. package/dist/react/chunk-T56Y62G7.mjs +410 -0
  20. package/dist/react/index.cjs +7982 -0
  21. package/dist/react/index.d.mts +3139 -0
  22. package/dist/react/index.d.ts +3139 -0
  23. package/dist/react/index.mjs +7209 -0
  24. package/dist/react/storage-Z2NBANCK.mjs +29 -0
  25. package/dist/react/useEncryption-5RTXKDNZ.mjs +31 -0
  26. package/dist/vercel/index.cjs +86 -0
  27. package/dist/vercel/index.d.mts +119 -0
  28. package/dist/vercel/index.d.ts +119 -0
  29. package/dist/vercel/index.mjs +57 -0
  30. package/package.json +91 -16
  31. package/dist/cjs/client/client/client.gen.d.ts +0 -2
  32. package/dist/cjs/client/client/client.gen.js +0 -203
  33. package/dist/cjs/client/client/index.d.ts +0 -8
  34. package/dist/cjs/client/client/index.js +0 -16
  35. package/dist/cjs/client/client/types.gen.d.ts +0 -99
  36. package/dist/cjs/client/client/types.gen.js +0 -3
  37. package/dist/cjs/client/client/utils.gen.d.ts +0 -40
  38. package/dist/cjs/client/client/utils.gen.js +0 -314
  39. package/dist/cjs/client/client.gen.d.ts +0 -12
  40. package/dist/cjs/client/client.gen.js +0 -6
  41. package/dist/cjs/client/core/auth.gen.d.ts +0 -18
  42. package/dist/cjs/client/core/auth.gen.js +0 -18
  43. package/dist/cjs/client/core/bodySerializer.gen.d.ts +0 -25
  44. package/dist/cjs/client/core/bodySerializer.gen.js +0 -60
  45. package/dist/cjs/client/core/params.gen.d.ts +0 -43
  46. package/dist/cjs/client/core/params.gen.js +0 -104
  47. package/dist/cjs/client/core/pathSerializer.gen.d.ts +0 -33
  48. package/dist/cjs/client/core/pathSerializer.gen.js +0 -123
  49. package/dist/cjs/client/core/queryKeySerializer.gen.d.ts +0 -18
  50. package/dist/cjs/client/core/queryKeySerializer.gen.js +0 -105
  51. package/dist/cjs/client/core/serverSentEvents.gen.d.ts +0 -71
  52. package/dist/cjs/client/core/serverSentEvents.gen.js +0 -139
  53. package/dist/cjs/client/core/types.gen.d.ts +0 -78
  54. package/dist/cjs/client/core/types.gen.js +0 -3
  55. package/dist/cjs/client/core/utils.gen.d.ts +0 -19
  56. package/dist/cjs/client/core/utils.gen.js +0 -93
  57. package/dist/cjs/client/index.d.ts +0 -2
  58. package/dist/cjs/client/index.js +0 -18
  59. package/dist/cjs/client/sdk.gen.d.ts +0 -27
  60. package/dist/cjs/client/sdk.gen.js +0 -33
  61. package/dist/cjs/client/types.gen.d.ts +0 -120
  62. package/dist/cjs/client/types.gen.js +0 -3
  63. package/dist/esm/client/client/client.gen.d.ts +0 -2
  64. package/dist/esm/client/client/client.gen.js +0 -199
  65. package/dist/esm/client/client/index.d.ts +0 -8
  66. package/dist/esm/client/client/index.js +0 -6
  67. package/dist/esm/client/client/types.gen.d.ts +0 -99
  68. package/dist/esm/client/client/types.gen.js +0 -2
  69. package/dist/esm/client/client/utils.gen.d.ts +0 -40
  70. package/dist/esm/client/client/utils.gen.js +0 -302
  71. package/dist/esm/client/client.gen.d.ts +0 -12
  72. package/dist/esm/client/client.gen.js +0 -3
  73. package/dist/esm/client/core/auth.gen.d.ts +0 -18
  74. package/dist/esm/client/core/auth.gen.js +0 -14
  75. package/dist/esm/client/core/bodySerializer.gen.d.ts +0 -25
  76. package/dist/esm/client/core/bodySerializer.gen.js +0 -57
  77. package/dist/esm/client/core/params.gen.d.ts +0 -43
  78. package/dist/esm/client/core/params.gen.js +0 -100
  79. package/dist/esm/client/core/pathSerializer.gen.d.ts +0 -33
  80. package/dist/esm/client/core/pathSerializer.gen.js +0 -114
  81. package/dist/esm/client/core/queryKeySerializer.gen.d.ts +0 -18
  82. package/dist/esm/client/core/queryKeySerializer.gen.js +0 -99
  83. package/dist/esm/client/core/serverSentEvents.gen.d.ts +0 -71
  84. package/dist/esm/client/core/serverSentEvents.gen.js +0 -135
  85. package/dist/esm/client/core/types.gen.d.ts +0 -78
  86. package/dist/esm/client/core/types.gen.js +0 -2
  87. package/dist/esm/client/core/utils.gen.d.ts +0 -19
  88. package/dist/esm/client/core/utils.gen.js +0 -87
  89. package/dist/esm/client/index.d.ts +0 -2
  90. package/dist/esm/client/index.js +0 -2
  91. package/dist/esm/client/sdk.gen.d.ts +0 -27
  92. package/dist/esm/client/sdk.gen.js +0 -28
  93. package/dist/esm/client/types.gen.d.ts +0 -120
  94. package/dist/esm/client/types.gen.js +0 -2
  95. package/dist/types/client/client/client.gen.d.ts +0 -2
  96. package/dist/types/client/client/index.d.ts +0 -8
  97. package/dist/types/client/client/types.gen.d.ts +0 -99
  98. package/dist/types/client/client/utils.gen.d.ts +0 -40
  99. package/dist/types/client/client.gen.d.ts +0 -12
  100. package/dist/types/client/core/auth.gen.d.ts +0 -18
  101. package/dist/types/client/core/bodySerializer.gen.d.ts +0 -25
  102. package/dist/types/client/core/params.gen.d.ts +0 -43
  103. package/dist/types/client/core/pathSerializer.gen.d.ts +0 -33
  104. package/dist/types/client/core/queryKeySerializer.gen.d.ts +0 -18
  105. package/dist/types/client/core/serverSentEvents.gen.d.ts +0 -71
  106. package/dist/types/client/core/types.gen.d.ts +0 -78
  107. package/dist/types/client/core/utils.gen.d.ts +0 -19
  108. package/dist/types/client/index.d.ts +0 -2
  109. package/dist/types/client/sdk.gen.d.ts +0 -27
  110. package/dist/types/client/types.gen.d.ts +0 -120
@@ -0,0 +1,1217 @@
1
+ import { Database, Model } from '@nozbe/watermelondb';
2
+ import * as _nozbe_watermelondb_Schema_migrations from '@nozbe/watermelondb/Schema/migrations';
3
+ import * as _nozbe_watermelondb_Schema from '@nozbe/watermelondb/Schema';
4
+ import Model$1, { Associations } from '@nozbe/watermelondb/Model';
5
+ import { Class } from '@nozbe/watermelondb/types';
6
+
7
+ /**
8
+ * ExtraFields contains additional metadata such as provider/model information.
9
+ */
10
+ type LlmapiImageGenerationExtraFields = {
11
+ /**
12
+ * ModelRequested is the model identifier that the client asked for.
13
+ */
14
+ model_requested?: string;
15
+ /**
16
+ * Provider is the gateway that serviced this request.
17
+ */
18
+ provider?: string;
19
+ /**
20
+ * RequestType is always "image_generation".
21
+ */
22
+ request_type?: string;
23
+ };
24
+ type LlmapiImageGenerationImage = {
25
+ /**
26
+ * B64JSON is the base64 payload for models that can only return binary.
27
+ */
28
+ b64_json?: string;
29
+ /**
30
+ * URL is the signed URL to download the image.
31
+ */
32
+ url?: string;
33
+ };
34
+ type LlmapiImageGenerationRequest = {
35
+ /**
36
+ * Model is the model identifier to use for generation (e.g., "gpt-image-1").
37
+ */
38
+ model: string;
39
+ /**
40
+ * Prompt is the text description of the desired image.
41
+ */
42
+ prompt: string;
43
+ /**
44
+ * Quality targets a quality preset (e.g., "auto", "high").
45
+ */
46
+ quality?: string;
47
+ /**
48
+ * ResponseFormat controls how the generated image is returned (e.g., "url" or "b64_json").
49
+ */
50
+ response_format?: string;
51
+ /**
52
+ * Size controls the dimensions of the generated image (e.g., "1024x1024").
53
+ */
54
+ size?: string;
55
+ };
56
+ type LlmapiImageGenerationResponse = {
57
+ /**
58
+ * Created is the Unix timestamp when the image was generated.
59
+ */
60
+ created?: number;
61
+ extra_fields?: LlmapiImageGenerationExtraFields;
62
+ /**
63
+ * Images contains the generated images.
64
+ */
65
+ images?: Array<LlmapiImageGenerationImage>;
66
+ /**
67
+ * Model is the model identifier that generated the image.
68
+ */
69
+ model?: string;
70
+ /**
71
+ * Provider is the gateway that produced the image.
72
+ */
73
+ provider?: string;
74
+ usage?: LlmapiImageGenerationUsage;
75
+ };
76
+ /**
77
+ * Usage documents token usage (when available).
78
+ */
79
+ type LlmapiImageGenerationUsage = {
80
+ /**
81
+ * CostMicroUSD is the inference cost for this image generation request
82
+ */
83
+ cost_micro_usd?: number;
84
+ /**
85
+ * InputTokens is the number of tokens sent in the prompt.
86
+ */
87
+ input_tokens?: number;
88
+ /**
89
+ * OutputTokens is the number of tokens returned by the model.
90
+ */
91
+ output_tokens?: number;
92
+ /**
93
+ * TotalTokens is the total number of tokens consumed.
94
+ */
95
+ total_tokens?: number;
96
+ };
97
+ /**
98
+ * Message is the generated message
99
+ */
100
+ type LlmapiMessage = {
101
+ /**
102
+ * Content is the message content
103
+ */
104
+ content?: Array<LlmapiMessageContentPart>;
105
+ role?: LlmapiRole;
106
+ /**
107
+ * ToolCalls contains tool/function calls made by the assistant (only for assistant role)
108
+ */
109
+ tool_calls?: Array<LlmapiToolCall>;
110
+ };
111
+ /**
112
+ * ImageURL is used when Type=image_url
113
+ */
114
+ type LlmapiMessageContentImage = {
115
+ /**
116
+ * Detail is the OpenAI detail hint (auto|low|high)
117
+ */
118
+ detail?: string;
119
+ /**
120
+ * URL is the image URL or data URI
121
+ */
122
+ url?: string;
123
+ };
124
+ type LlmapiMessageContentPart = {
125
+ image_url?: LlmapiMessageContentImage;
126
+ /**
127
+ * Text holds the text content when Type=text
128
+ */
129
+ text?: string;
130
+ /**
131
+ * Type is the block type (`text` or `image_url`)
132
+ */
133
+ type?: string;
134
+ };
135
+ type LlmapiModel = {
136
+ architecture?: LlmapiModelArchitecture;
137
+ /**
138
+ * CanonicalSlug is the canonical slug for the model
139
+ */
140
+ canonical_slug?: string;
141
+ /**
142
+ * ContextLength is the maximum context length in tokens
143
+ */
144
+ context_length?: number;
145
+ /**
146
+ * Created is the Unix timestamp of when the model was created
147
+ */
148
+ created?: number;
149
+ /**
150
+ * DefaultParameters contains default parameter values
151
+ */
152
+ default_parameters?: {
153
+ [key: string]: unknown;
154
+ };
155
+ /**
156
+ * Description describes the model and its capabilities
157
+ */
158
+ description?: string;
159
+ /**
160
+ * HuggingFaceID is the Hugging Face model identifier
161
+ */
162
+ hugging_face_id?: string;
163
+ /**
164
+ * ID is the model identifier (e.g., "openai/gpt-4")
165
+ */
166
+ id?: string;
167
+ /**
168
+ * MaxInputTokens is the maximum input tokens
169
+ */
170
+ max_input_tokens?: number;
171
+ /**
172
+ * MaxOutputTokens is the maximum output tokens
173
+ */
174
+ max_output_tokens?: number;
175
+ /**
176
+ * Modalities is a list of supported modalities (e.g., ["llm", "vision"])
177
+ */
178
+ modalities?: Array<string>;
179
+ /**
180
+ * Name is the human-readable model name (optional)
181
+ */
182
+ name?: string;
183
+ /**
184
+ * OwnedBy is the organization that owns the model
185
+ */
186
+ owned_by?: string;
187
+ per_request_limits?: LlmapiModelPerRequestLimits;
188
+ pricing?: LlmapiModelPricing;
189
+ /**
190
+ * SupportedMethods is a list of supported API methods
191
+ */
192
+ supported_methods?: Array<string>;
193
+ /**
194
+ * SupportedParameters is a list of supported parameter names
195
+ */
196
+ supported_parameters?: Array<string>;
197
+ top_provider?: LlmapiModelTopProvider;
198
+ };
199
+ /**
200
+ * Architecture describes the model's technical capabilities
201
+ */
202
+ type LlmapiModelArchitecture = {
203
+ instruct_type?: string;
204
+ modality?: string;
205
+ prompt_formatting?: string;
206
+ tokenizer?: string;
207
+ };
208
+ /**
209
+ * PerRequestLimits contains rate limiting information
210
+ */
211
+ type LlmapiModelPerRequestLimits = {
212
+ completion_tokens?: number;
213
+ prompt_tokens?: number;
214
+ };
215
+ /**
216
+ * Pricing contains the pricing structure for using this model
217
+ */
218
+ type LlmapiModelPricing = {
219
+ completion?: string;
220
+ image?: string;
221
+ prompt?: string;
222
+ request?: string;
223
+ };
224
+ /**
225
+ * TopProvider contains configuration details for the primary provider
226
+ */
227
+ type LlmapiModelTopProvider = {
228
+ context_length?: number;
229
+ is_moderated?: boolean;
230
+ max_completion_tokens?: number;
231
+ };
232
+ /**
233
+ * ExtraFields contains additional metadata
234
+ */
235
+ type LlmapiResponseExtraFields = {
236
+ /**
237
+ * Latency is the request latency in milliseconds
238
+ */
239
+ latency?: number;
240
+ /**
241
+ * ModelRequested is the model that was requested
242
+ */
243
+ model_requested?: string;
244
+ /**
245
+ * Provider is the LLM provider used (e.g., "openai", "anthropic")
246
+ */
247
+ provider?: string;
248
+ /**
249
+ * RequestType is always "responses"
250
+ */
251
+ request_type?: string;
252
+ };
253
+ type LlmapiResponseOutputContent = {
254
+ /**
255
+ * Text is the text content
256
+ */
257
+ text?: string;
258
+ /**
259
+ * Type is the content type (e.g., "output_text")
260
+ */
261
+ type?: string;
262
+ };
263
+ type LlmapiResponseOutputItem = {
264
+ /**
265
+ * Arguments is the function arguments for function_call types
266
+ */
267
+ arguments?: string;
268
+ /**
269
+ * CallID is the call ID for function_call types
270
+ */
271
+ call_id?: string;
272
+ /**
273
+ * Content is the content array for message and reasoning types
274
+ */
275
+ content?: Array<LlmapiResponseOutputContent>;
276
+ /**
277
+ * ID is the unique identifier for this output item
278
+ */
279
+ id?: string;
280
+ /**
281
+ * Name is the function name for function_call types
282
+ */
283
+ name?: string;
284
+ /**
285
+ * Role is the role for message types (e.g., "assistant")
286
+ */
287
+ role?: string;
288
+ /**
289
+ * Status is the status of this output item (e.g., "completed")
290
+ */
291
+ status?: string;
292
+ /**
293
+ * Summary is the reasoning summary for reasoning types
294
+ */
295
+ summary?: Array<LlmapiResponseOutputContent>;
296
+ /**
297
+ * Type is the output item type (e.g., "message", "function_call", "reasoning")
298
+ */
299
+ type?: string;
300
+ };
301
+ /**
302
+ * Reasoning configures reasoning for o-series and other reasoning models
303
+ */
304
+ type LlmapiResponseReasoning = {
305
+ /**
306
+ * Effort controls reasoning effort: "low", "medium", or "high"
307
+ */
308
+ effort?: string;
309
+ /**
310
+ * Summary controls reasoning summary: "auto", "concise", or "detailed"
311
+ */
312
+ summary?: string;
313
+ };
314
+ type LlmapiResponseResponse = {
315
+ /**
316
+ * Created is the Unix timestamp of creation (created_at in OpenAI format)
317
+ */
318
+ created_at?: number;
319
+ extra_fields?: LlmapiResponseExtraFields;
320
+ /**
321
+ * ID is the unique response identifier
322
+ */
323
+ id?: string;
324
+ /**
325
+ * Model is the model used for generation
326
+ */
327
+ model?: string;
328
+ /**
329
+ * Object is the response type (e.g., "response")
330
+ */
331
+ object?: string;
332
+ /**
333
+ * Output is the array of output items (OpenAI Responses API format)
334
+ */
335
+ output?: Array<LlmapiResponseOutputItem>;
336
+ usage?: LlmapiResponseUsage;
337
+ };
338
+ /**
339
+ * Usage contains token usage information
340
+ */
341
+ type LlmapiResponseUsage = {
342
+ /**
343
+ * CompletionTokens is the number of tokens in the completion
344
+ */
345
+ completion_tokens?: number;
346
+ /**
347
+ * CostMicroUSD is the cost of this response in micro-dollars (USD × 1,000,000)
348
+ */
349
+ cost_micro_usd?: number;
350
+ /**
351
+ * PromptTokens is the number of tokens in the prompt
352
+ */
353
+ prompt_tokens?: number;
354
+ /**
355
+ * TotalTokens is the total number of tokens used
356
+ */
357
+ total_tokens?: number;
358
+ };
359
+ /**
360
+ * Role is the message role (system, user, assistant)
361
+ */
362
+ type LlmapiRole = string;
363
+ /**
364
+ * Thinking configures extended thinking for Anthropic models
365
+ */
366
+ type LlmapiThinkingOptions = {
367
+ /**
368
+ * BudgetTokens is the token budget for thinking
369
+ */
370
+ budget_tokens?: number;
371
+ /**
372
+ * Type indicates if thinking is enabled: "enabled" or "disabled"
373
+ */
374
+ type?: string;
375
+ };
376
+ type LlmapiTool = {
377
+ function?: LlmapiToolFunction;
378
+ /**
379
+ * Type is the tool type (function, code_interpreter, file_search, web_search)
380
+ */
381
+ type?: string;
382
+ };
383
+ type LlmapiToolCall = {
384
+ function?: LlmapiToolCallFunction;
385
+ /**
386
+ * ID is the unique identifier for this tool call
387
+ */
388
+ id?: string;
389
+ /**
390
+ * Type is the type of tool call (always "function" for now)
391
+ */
392
+ type?: string;
393
+ };
394
+ /**
395
+ * Function contains the function call details
396
+ */
397
+ type LlmapiToolCallFunction = {
398
+ /**
399
+ * Arguments is the JSON string of arguments to pass to the function
400
+ */
401
+ arguments?: string;
402
+ /**
403
+ * Name is the name of the function to call
404
+ */
405
+ name?: string;
406
+ };
407
+ /**
408
+ * Function is the function definition (when Type is "function")
409
+ */
410
+ type LlmapiToolFunction = {
411
+ /**
412
+ * Arguments is the function arguments schema (JSON object)
413
+ */
414
+ arguments?: {
415
+ [key: string]: unknown;
416
+ };
417
+ /**
418
+ * Name is the function name
419
+ */
420
+ name?: string;
421
+ };
422
+
423
+ /**
424
+ * Responses API options that can be passed to sendMessage
425
+ */
426
+ type ResponsesApiOptions = {
427
+ /**
428
+ * Whether to store the response server-side.
429
+ * When true, the response can be retrieved later using the response ID.
430
+ */
431
+ store?: boolean;
432
+ /**
433
+ * ID of a previous response to continue from.
434
+ * Enables multi-turn conversations without resending full history.
435
+ */
436
+ previousResponseId?: string;
437
+ /**
438
+ * Conversation ID for grouping related responses.
439
+ */
440
+ conversation?: string;
441
+ /**
442
+ * Controls randomness in the response (0.0 to 2.0).
443
+ * Lower values make output more deterministic.
444
+ */
445
+ temperature?: number;
446
+ /**
447
+ * Maximum number of tokens to generate in the response.
448
+ */
449
+ maxOutputTokens?: number;
450
+ /**
451
+ * Array of tool definitions available to the model.
452
+ */
453
+ tools?: LlmapiTool[];
454
+ /**
455
+ * Controls which tool to use: "auto", "any", "none", "required", or a specific tool name.
456
+ */
457
+ toolChoice?: string;
458
+ /**
459
+ * Reasoning configuration for o-series and other reasoning models.
460
+ * Controls reasoning effort and summary output.
461
+ */
462
+ reasoning?: LlmapiResponseReasoning;
463
+ /**
464
+ * Extended thinking configuration for Anthropic models (Claude).
465
+ * Enables the model to think through complex problems step by step.
466
+ */
467
+ thinking?: LlmapiThinkingOptions;
468
+ };
469
+ /**
470
+ * Base arguments for sending a message
471
+ */
472
+ type BaseSendMessageArgs = ResponsesApiOptions & {
473
+ messages: LlmapiMessage[];
474
+ model?: string;
475
+ /**
476
+ * Per-request callback for data chunks. Called in addition to the global
477
+ * `onData` callback if provided in `useChat` options.
478
+ *
479
+ * @param chunk - The content delta from the current chunk
480
+ */
481
+ onData?: (chunk: string) => void;
482
+ };
483
+ /**
484
+ * Base result type for sendMessage
485
+ */
486
+ type BaseSendMessageResult = {
487
+ data: LlmapiResponseResponse;
488
+ error: null;
489
+ } | {
490
+ data: null;
491
+ error: string;
492
+ };
493
+ /**
494
+ * Base options for useChat hook
495
+ */
496
+ type BaseUseChatOptions = {
497
+ getToken?: () => Promise<string | null>;
498
+ baseUrl?: string;
499
+ /**
500
+ * Callback function to be called when a new data chunk is received.
501
+ */
502
+ onData?: (chunk: string) => void;
503
+ /**
504
+ * Callback function to be called when thinking/reasoning content is received.
505
+ * This is called with delta chunks as the model "thinks" through a problem.
506
+ */
507
+ onThinking?: (chunk: string) => void;
508
+ /**
509
+ * Callback function to be called when the chat completion finishes successfully.
510
+ */
511
+ onFinish?: (response: LlmapiResponseResponse) => void;
512
+ /**
513
+ * Callback function to be called when an unexpected error is encountered.
514
+ *
515
+ * **Note:** This callback is NOT called for aborted requests (via `stop()` or
516
+ * component unmount). Aborts are intentional actions and are not considered
517
+ * errors. To detect aborts, check the `error` field in the `sendMessage` result:
518
+ * `result.error === "Request aborted"`.
519
+ *
520
+ * @param error - The error that occurred (never an AbortError)
521
+ */
522
+ onError?: (error: Error) => void;
523
+ };
524
+ /**
525
+ * Base result type for useChat hook
526
+ */
527
+ type BaseUseChatResult = {
528
+ isLoading: boolean;
529
+ /**
530
+ * Aborts the current streaming request if one is in progress.
531
+ *
532
+ * When a request is aborted, `sendMessage` will return with
533
+ * `{ data: null, error: "Request aborted" }`. The `onError` callback
534
+ * will NOT be called, as aborts are intentional actions, not errors.
535
+ */
536
+ stop: () => void;
537
+ };
538
+
539
+ type SendMessageArgs = BaseSendMessageArgs & {
540
+ /**
541
+ * Per-request callback for thinking/reasoning chunks.
542
+ */
543
+ onThinking?: (chunk: string) => void;
544
+ };
545
+ type SendMessageResult = BaseSendMessageResult;
546
+ type UseChatOptions = BaseUseChatOptions;
547
+ type UseChatResult = BaseUseChatResult & {
548
+ sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
549
+ };
550
+ /**
551
+ * A React hook for managing chat completions with authentication.
552
+ *
553
+ * **React Native version** - This is a lightweight version that only supports
554
+ * API-based chat completions. Local chat and client-side tools are not available
555
+ * in React Native.
556
+ *
557
+ * @param options - Optional configuration object
558
+ * @param options.getToken - An async function that returns an authentication token.
559
+ * @param options.baseUrl - Optional base URL for the API requests.
560
+ * @param options.onData - Callback function to be called when a new data chunk is received.
561
+ * @param options.onFinish - Callback function to be called when the chat completion finishes successfully.
562
+ * @param options.onError - Callback function to be called when an unexpected error is encountered.
563
+ *
564
+ * @returns An object containing:
565
+ * - `isLoading`: A boolean indicating whether a request is currently in progress
566
+ * - `sendMessage`: An async function to send chat messages
567
+ * - `stop`: A function to abort the current request
568
+ *
569
+ * @category Hooks
570
+ *
571
+ * @example
572
+ * ```tsx
573
+ * const { isLoading, sendMessage, stop } = useChat({
574
+ * getToken: async () => await getAuthToken(),
575
+ * onFinish: (response) => console.log("Chat finished:", response),
576
+ * onError: (error) => console.error("Chat error:", error)
577
+ * });
578
+ *
579
+ * const handleSend = async () => {
580
+ * const result = await sendMessage({
581
+ * messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }],
582
+ * model: 'gpt-4o-mini'
583
+ * });
584
+ * };
585
+ * ```
586
+ */
587
+ declare function useChat(options?: UseChatOptions): UseChatResult;
588
+
589
+ declare const chatStorageSchema: Readonly<{
590
+ version: _nozbe_watermelondb_Schema.SchemaVersion;
591
+ tables: _nozbe_watermelondb_Schema.TableMap;
592
+ unsafeSql?: (_: string, __: _nozbe_watermelondb_Schema.AppSchemaUnsafeSqlKind) => string;
593
+ }>;
594
+ declare const chatStorageMigrations: Readonly<{
595
+ validated: true;
596
+ minVersion: _nozbe_watermelondb_Schema.SchemaVersion;
597
+ maxVersion: _nozbe_watermelondb_Schema.SchemaVersion;
598
+ sortedMigrations: _nozbe_watermelondb_Schema_migrations.Migration[];
599
+ }>;
600
+
601
+ interface MemoryItem$1 {
602
+ type: "identity" | "preference" | "project" | "skill" | "constraint";
603
+ namespace: string;
604
+ key: string;
605
+ value: string;
606
+ rawEvidence: string;
607
+ confidence: number;
608
+ pii: boolean;
609
+ }
610
+ interface MemoryExtractionResult {
611
+ items: MemoryItem$1[];
612
+ }
613
+
614
+ type MemoryType = "identity" | "preference" | "project" | "skill" | "constraint";
615
+ interface MemoryItem {
616
+ type: MemoryType;
617
+ namespace: string;
618
+ key: string;
619
+ value: string;
620
+ rawEvidence: string;
621
+ confidence: number;
622
+ pii: boolean;
623
+ }
624
+ interface CreateMemoryOptions extends MemoryItem {
625
+ embedding?: number[];
626
+ embeddingModel?: string;
627
+ }
628
+ type UpdateMemoryOptions = Partial<CreateMemoryOptions>;
629
+ interface StoredMemory extends MemoryItem {
630
+ uniqueId: string;
631
+ compositeKey: string;
632
+ uniqueKey: string;
633
+ createdAt: Date;
634
+ updatedAt: Date;
635
+ embedding?: number[];
636
+ embeddingModel?: string;
637
+ isDeleted: boolean;
638
+ }
639
+ interface StoredMemoryWithSimilarity extends StoredMemory {
640
+ similarity: number;
641
+ }
642
+ interface BaseUseMemoryStorageOptions {
643
+ database: Database;
644
+ completionsModel?: string;
645
+ embeddingModel?: string | null;
646
+ generateEmbeddings?: boolean;
647
+ onFactsExtracted?: (facts: MemoryExtractionResult) => void;
648
+ getToken?: () => Promise<string | null>;
649
+ baseUrl?: string;
650
+ /** Wallet address for encryption (optional - encryption disabled if not provided) */
651
+ walletAddress?: string | null;
652
+ /** Function to request encryption key (optional - encryption disabled if not provided) */
653
+ requestEncryptionKey?: (address: string) => Promise<void>;
654
+ /** Function to sign message for migration (optional - migration disabled if not provided) */
655
+ signMessage?: (message: string) => Promise<string>;
656
+ }
657
+ interface BaseUseMemoryStorageResult {
658
+ memories: StoredMemory[];
659
+ refreshMemories: () => Promise<void>;
660
+ extractMemoriesFromMessage: (options: {
661
+ messages: Array<{
662
+ role: string;
663
+ content: string;
664
+ }>;
665
+ model?: string;
666
+ }) => Promise<MemoryExtractionResult | null>;
667
+ searchMemories: (query: string, limit?: number, minSimilarity?: number) => Promise<StoredMemoryWithSimilarity[]>;
668
+ fetchAllMemories: () => Promise<StoredMemory[]>;
669
+ fetchMemoriesByNamespace: (namespace: string) => Promise<StoredMemory[]>;
670
+ fetchMemoriesByKey: (namespace: string, key: string) => Promise<StoredMemory[]>;
671
+ getMemoryById: (id: string) => Promise<StoredMemory | null>;
672
+ saveMemory: (memory: CreateMemoryOptions) => Promise<StoredMemory>;
673
+ saveMemories: (memories: CreateMemoryOptions[]) => Promise<StoredMemory[]>;
674
+ updateMemory: (id: string, updates: UpdateMemoryOptions) => Promise<StoredMemory | null>;
675
+ removeMemory: (namespace: string, key: string, value: string) => Promise<void>;
676
+ removeMemoryById: (id: string) => Promise<void>;
677
+ removeMemories: (namespace: string, key: string) => Promise<void>;
678
+ clearMemories: () => Promise<void>;
679
+ }
680
+ declare function generateCompositeKey(namespace: string, key: string): string;
681
+ declare function generateUniqueKey(namespace: string, key: string, value: string): string;
682
+
683
+ type ChatRole = "user" | "assistant" | "system";
684
+ interface FileMetadata {
685
+ id: string;
686
+ name: string;
687
+ type: string;
688
+ size: number;
689
+ url?: string;
690
+ }
691
+ interface ChatCompletionUsage {
692
+ promptTokens?: number;
693
+ completionTokens?: number;
694
+ totalTokens?: number;
695
+ costMicroUsd?: number;
696
+ }
697
+ interface SearchSource {
698
+ title?: string;
699
+ url?: string;
700
+ snippet?: string;
701
+ date?: string;
702
+ }
703
+ interface StoredMessage {
704
+ uniqueId: string;
705
+ messageId: number;
706
+ conversationId: string;
707
+ role: ChatRole;
708
+ content: string;
709
+ model?: string;
710
+ files?: FileMetadata[];
711
+ createdAt: Date;
712
+ updatedAt: Date;
713
+ vector?: number[];
714
+ embeddingModel?: string;
715
+ usage?: ChatCompletionUsage;
716
+ sources?: SearchSource[];
717
+ responseDuration?: number;
718
+ wasStopped?: boolean;
719
+ /** If set, indicates the message failed with this error */
720
+ error?: string;
721
+ thoughtProcess?: ActivityPhase[];
722
+ /** Reasoning/thinking content from models that support extended thinking */
723
+ thinking?: string;
724
+ }
725
+ interface ActivityPhase {
726
+ id: string;
727
+ label: string;
728
+ timestamp: number;
729
+ status: "pending" | "active" | "completed";
730
+ data?: StoredMemory[];
731
+ }
732
+ interface StoredConversation {
733
+ uniqueId: string;
734
+ conversationId: string;
735
+ title: string;
736
+ createdAt: Date;
737
+ updatedAt: Date;
738
+ isDeleted: boolean;
739
+ }
740
+ interface StoredMessageWithSimilarity extends StoredMessage {
741
+ similarity: number;
742
+ }
743
+ interface CreateMessageOptions {
744
+ conversationId: string;
745
+ role: ChatRole;
746
+ content: string;
747
+ model?: string;
748
+ files?: FileMetadata[];
749
+ usage?: ChatCompletionUsage;
750
+ sources?: SearchSource[];
751
+ responseDuration?: number;
752
+ vector?: number[];
753
+ embeddingModel?: string;
754
+ wasStopped?: boolean;
755
+ /** If set, indicates the message failed with this error */
756
+ error?: string;
757
+ thoughtProcess?: ActivityPhase[];
758
+ /** Reasoning/thinking content from models that support extended thinking */
759
+ thinking?: string;
760
+ }
761
+ interface CreateConversationOptions {
762
+ conversationId?: string;
763
+ title?: string;
764
+ }
765
+ interface UpdateMessageOptions {
766
+ content?: string;
767
+ model?: string;
768
+ files?: FileMetadata[];
769
+ usage?: ChatCompletionUsage;
770
+ sources?: SearchSource[];
771
+ responseDuration?: number;
772
+ vector?: number[];
773
+ embeddingModel?: string;
774
+ wasStopped?: boolean;
775
+ error?: string | null;
776
+ thoughtProcess?: ActivityPhase[];
777
+ /** Reasoning/thinking content from models that support extended thinking */
778
+ thinking?: string | null;
779
+ }
780
+ interface BaseUseChatStorageOptions {
781
+ database: Database;
782
+ conversationId?: string;
783
+ autoCreateConversation?: boolean;
784
+ defaultConversationTitle?: string;
785
+ getToken?: () => Promise<string | null>;
786
+ baseUrl?: string;
787
+ onData?: (chunk: string) => void;
788
+ onFinish?: (response: LlmapiResponseResponse) => void;
789
+ onError?: (error: Error) => void;
790
+ /** Wallet address for encryption (optional - encryption disabled if not provided) */
791
+ walletAddress?: string | null;
792
+ /** Function to request encryption key (optional - encryption disabled if not provided) */
793
+ requestEncryptionKey?: (address: string) => Promise<void>;
794
+ /** Function to sign message for migration (optional - required for migrating old encrypted data) */
795
+ signMessage?: (message: string) => Promise<string>;
796
+ }
797
+ interface BaseSendMessageWithStorageArgs {
798
+ content: string;
799
+ model?: string;
800
+ messages?: LlmapiMessage[];
801
+ includeHistory?: boolean;
802
+ maxHistoryMessages?: number;
803
+ files?: FileMetadata[];
804
+ onData?: (chunk: string) => void;
805
+ memoryContext?: string;
806
+ searchContext?: string;
807
+ sources?: SearchSource[];
808
+ thoughtProcess?: ActivityPhase[];
809
+ /**
810
+ * Whether to store the response server-side.
811
+ * When true, the response can be retrieved later using the response ID.
812
+ */
813
+ store?: boolean;
814
+ /**
815
+ * ID of a previous response to continue from.
816
+ * Enables multi-turn conversations without resending full history.
817
+ */
818
+ previousResponseId?: string;
819
+ /**
820
+ * Conversation ID for grouping related responses on the server.
821
+ */
822
+ serverConversation?: string;
823
+ /**
824
+ * Controls randomness in the response (0.0 to 2.0).
825
+ */
826
+ temperature?: number;
827
+ /**
828
+ * Maximum number of tokens to generate in the response.
829
+ */
830
+ maxOutputTokens?: number;
831
+ /**
832
+ * Array of tool definitions available to the model.
833
+ */
834
+ tools?: LlmapiTool[];
835
+ /**
836
+ * Controls which tool to use: "auto", "any", "none", "required", or a specific tool name.
837
+ */
838
+ toolChoice?: string;
839
+ /**
840
+ * Reasoning configuration for o-series and other reasoning models.
841
+ * Controls reasoning effort and summary output.
842
+ */
843
+ reasoning?: LlmapiResponseReasoning;
844
+ /**
845
+ * Extended thinking configuration for Anthropic models (Claude).
846
+ * Enables the model to think through complex problems step by step.
847
+ */
848
+ thinking?: LlmapiThinkingOptions;
849
+ /**
850
+ * Per-request callback for thinking/reasoning chunks.
851
+ * Called with delta chunks as the model "thinks" through a problem.
852
+ */
853
+ onThinking?: (chunk: string) => void;
854
+ }
855
+ interface BaseSendMessageSuccessResult {
856
+ data: LlmapiResponseResponse;
857
+ error: null;
858
+ userMessage: StoredMessage;
859
+ assistantMessage: StoredMessage;
860
+ }
861
+ interface BaseSendMessageErrorResult {
862
+ data: null;
863
+ error: string;
864
+ userMessage?: StoredMessage;
865
+ assistantMessage?: undefined;
866
+ }
867
+ type BaseSendMessageWithStorageResult = BaseSendMessageSuccessResult | BaseSendMessageErrorResult;
868
+ interface BaseUseChatStorageResult {
869
+ isLoading: boolean;
870
+ stop: () => void;
871
+ conversationId: string | null;
872
+ setConversationId: (id: string | null) => void;
873
+ createConversation: (options?: CreateConversationOptions) => Promise<StoredConversation>;
874
+ getConversation: (id: string) => Promise<StoredConversation | null>;
875
+ getConversations: () => Promise<StoredConversation[]>;
876
+ updateConversationTitle: (id: string, title: string) => Promise<boolean>;
877
+ deleteConversation: (id: string) => Promise<boolean>;
878
+ getMessages: (conversationId: string) => Promise<StoredMessage[]>;
879
+ getMessageCount: (conversationId: string) => Promise<number>;
880
+ clearMessages: (conversationId: string) => Promise<void>;
881
+ }
882
+ declare function generateConversationId(): string;
883
+
884
+ declare class Message extends Model {
885
+ static table: string;
886
+ static associations: Associations;
887
+ messageId: number;
888
+ conversationId: string;
889
+ role: ChatRole;
890
+ content: string;
891
+ model?: string;
892
+ files?: FileMetadata[];
893
+ createdAt: Date;
894
+ updatedAt: Date;
895
+ vector?: number[];
896
+ embeddingModel?: string;
897
+ usage?: ChatCompletionUsage;
898
+ sources?: SearchSource[];
899
+ responseDuration?: number;
900
+ wasStopped?: boolean;
901
+ error?: string;
902
+ thoughtProcess?: ActivityPhase[];
903
+ thinking?: string;
904
+ }
905
+ declare class Conversation extends Model {
906
+ static table: string;
907
+ static associations: Associations;
908
+ conversationId: string;
909
+ title: string;
910
+ createdAt: Date;
911
+ updatedAt: Date;
912
+ isDeleted: boolean;
913
+ }
914
+
915
+ /**
916
+ * Options for useChatStorage hook (Expo version)
917
+ *
918
+ * Uses the base options without React-specific features (no local chat, no tools).
919
+ */
920
+ type UseChatStorageOptions = BaseUseChatStorageOptions;
921
+ /**
922
+ * Arguments for sendMessage with storage (Expo version)
923
+ *
924
+ * Uses the base arguments without React-specific features (no runTools, no headers).
925
+ */
926
+ type SendMessageWithStorageArgs = BaseSendMessageWithStorageArgs;
927
+ /**
928
+ * Result from sendMessage with storage (Expo version)
929
+ *
930
+ * Uses the base result without tool execution information.
931
+ */
932
+ type SendMessageWithStorageResult = BaseSendMessageWithStorageResult;
933
+ /**
934
+ * Result returned by useChatStorage hook (Expo version)
935
+ *
936
+ * Extends base result with Expo-specific sendMessage signature.
937
+ */
938
+ interface UseChatStorageResult extends BaseUseChatStorageResult {
939
+ /** Send a message and automatically store it (Expo version) */
940
+ sendMessage: (args: SendMessageWithStorageArgs) => Promise<SendMessageWithStorageResult>;
941
+ /** Extract all links from assistant message content as SearchSource objects */
942
+ extractSourcesFromAssistantMessage: (assistantMessage: {
943
+ content: string;
944
+ sources?: SearchSource[];
945
+ }) => SearchSource[];
946
+ /** Update a message's fields (content, embedding, files, etc). Returns updated message or null if not found. */
947
+ updateMessage: (uniqueId: string, options: UpdateMessageOptions) => Promise<StoredMessage | null>;
948
+ }
949
+ /**
950
+ * A React hook that wraps useChat with automatic message persistence using WatermelonDB.
951
+ *
952
+ * **Expo/React Native version** - This is a lightweight version that only supports
953
+ * API-based chat completions. Local chat and client-side tools are not available.
954
+ *
955
+ * @param options - Configuration options
956
+ * @returns An object containing chat state, methods, and storage operations
957
+ *
958
+ * @example
959
+ * ```tsx
960
+ * import { Database } from '@nozbe/watermelondb';
961
+ * import { useChatStorage } from '@reverbia/sdk/expo';
962
+ *
963
+ * function ChatScreen({ database }: { database: Database }) {
964
+ * const {
965
+ * isLoading,
966
+ * sendMessage,
967
+ * conversationId,
968
+ * getMessages,
969
+ * } = useChatStorage({
970
+ * database,
971
+ * getToken: async () => getAuthToken(),
972
+ * onData: (chunk) => setResponse((prev) => prev + chunk),
973
+ * });
974
+ *
975
+ * const handleSend = async () => {
976
+ * const result = await sendMessage({
977
+ * content: 'Hello!',
978
+ * model: 'gpt-4o-mini',
979
+ * includeHistory: true,
980
+ * });
981
+ * };
982
+ *
983
+ * return (
984
+ * <View>
985
+ * <Button onPress={handleSend} disabled={isLoading} title="Send" />
986
+ * </View>
987
+ * );
988
+ * }
989
+ * ```
990
+ *
991
+ * @category Hooks
992
+ */
993
+ declare function useChatStorage(options: UseChatStorageOptions): UseChatStorageResult;
994
+
995
+ type UseImageGenerationOptions = {
996
+ /**
997
+ * Custom function to get auth token for API calls
998
+ */
999
+ getToken?: () => Promise<string | null>;
1000
+ /**
1001
+ * Optional base URL for the API requests.
1002
+ */
1003
+ baseUrl?: string;
1004
+ /**
1005
+ * Callback function to be called when the generation finishes successfully.
1006
+ */
1007
+ onFinish?: (response: LlmapiImageGenerationResponse) => void;
1008
+ /**
1009
+ * Callback function to be called when an unexpected error is encountered.
1010
+ */
1011
+ onError?: (error: Error) => void;
1012
+ };
1013
+ type GenerateImageArgs = LlmapiImageGenerationRequest;
1014
+ type GenerateImageResult = {
1015
+ data: LlmapiImageGenerationResponse;
1016
+ error: null;
1017
+ } | {
1018
+ data: null;
1019
+ error: string;
1020
+ };
1021
+ type UseImageGenerationResult = {
1022
+ isLoading: boolean;
1023
+ generateImage: (args: GenerateImageArgs) => Promise<GenerateImageResult>;
1024
+ stop: () => void;
1025
+ };
1026
+ /**
1027
+ * React hook for generating images using the LLM API.
1028
+ * @category Hooks
1029
+ */
1030
+ declare function useImageGeneration(options?: UseImageGenerationOptions): UseImageGenerationResult;
1031
+
1032
+ type UseModelsOptions = {
1033
+ /**
1034
+ * Custom function to get auth token for API calls
1035
+ */
1036
+ getToken?: () => Promise<string | null>;
1037
+ /**
1038
+ * Optional base URL for the API requests.
1039
+ */
1040
+ baseUrl?: string;
1041
+ /**
1042
+ * Optional filter for specific provider (e.g. "openai")
1043
+ */
1044
+ provider?: string;
1045
+ /**
1046
+ * Whether to fetch models automatically on mount (default: true)
1047
+ */
1048
+ autoFetch?: boolean;
1049
+ };
1050
+ type UseModelsResult = {
1051
+ models: LlmapiModel[];
1052
+ isLoading: boolean;
1053
+ error: Error | null;
1054
+ refetch: () => Promise<void>;
1055
+ };
1056
+ /**
1057
+ * React hook for fetching available LLM models.
1058
+ * Automatically fetches all available models.
1059
+ * @category Hooks
1060
+ */
1061
+ declare function useModels(options?: UseModelsOptions): UseModelsResult;
1062
+
1063
+ declare const memoryStorageSchema: Readonly<{
1064
+ version: _nozbe_watermelondb_Schema.SchemaVersion;
1065
+ tables: _nozbe_watermelondb_Schema.TableMap;
1066
+ unsafeSql?: (_: string, __: _nozbe_watermelondb_Schema.AppSchemaUnsafeSqlKind) => string;
1067
+ }>;
1068
+
1069
+ declare class Memory extends Model {
1070
+ static table: string;
1071
+ type: MemoryType;
1072
+ namespace: string;
1073
+ key: string;
1074
+ value: string;
1075
+ rawEvidence: string;
1076
+ confidence: number;
1077
+ pii: boolean;
1078
+ compositeKey: string;
1079
+ uniqueKey: string;
1080
+ createdAt: Date;
1081
+ updatedAt: Date;
1082
+ embedding?: number[];
1083
+ embeddingModel?: string;
1084
+ isDeleted: boolean;
1085
+ }
1086
+
1087
+ /**
1088
+ * Options for useMemoryStorage hook (Expo version)
1089
+ *
1090
+ * Uses the base options.
1091
+ */
1092
+ type UseMemoryStorageOptions = BaseUseMemoryStorageOptions;
1093
+ /**
1094
+ * Result returned by useMemoryStorage hook (Expo version)
1095
+ *
1096
+ * Uses the base result.
1097
+ */
1098
+ type UseMemoryStorageResult = BaseUseMemoryStorageResult;
1099
+ /**
1100
+ * A React hook that wraps useMemory with automatic memory persistence using WatermelonDB.
1101
+ *
1102
+ * **Expo/React Native version** - This is a lightweight version that only supports
1103
+ * API-based embeddings. Local embeddings require web APIs not available in React Native.
1104
+ *
1105
+ * @param options - Configuration options
1106
+ * @returns An object containing memory state, methods, and storage operations
1107
+ *
1108
+ * @example
1109
+ * ```tsx
1110
+ * import { Database } from '@nozbe/watermelondb';
1111
+ * import { useMemoryStorage } from '@reverbia/sdk/expo';
1112
+ *
1113
+ * function MemoryScreen({ database }: { database: Database }) {
1114
+ * const {
1115
+ * memories,
1116
+ * extractMemoriesFromMessage,
1117
+ * searchMemories,
1118
+ * } = useMemoryStorage({
1119
+ * database,
1120
+ * getToken: async () => getAuthToken(),
1121
+ * });
1122
+ *
1123
+ * const handleExtract = async () => {
1124
+ * await extractMemoriesFromMessage({
1125
+ * messages: [{ role: 'user', content: 'My name is John' }],
1126
+ * });
1127
+ * };
1128
+ *
1129
+ * return (
1130
+ * <View>
1131
+ * <Button onPress={handleExtract} title="Extract" />
1132
+ * <Text>Memories: {memories.length}</Text>
1133
+ * </View>
1134
+ * );
1135
+ * }
1136
+ * ```
1137
+ *
1138
+ * @category Hooks
1139
+ */
1140
+ declare function useMemoryStorage(options: UseMemoryStorageOptions): UseMemoryStorageResult;
1141
+
1142
+ /**
1143
+ * Combined WatermelonDB schema for all SDK storage modules.
1144
+ *
1145
+ * This unified schema includes all tables needed by the SDK:
1146
+ * - `history`: Chat message storage with embeddings and metadata
1147
+ * - `conversations`: Conversation metadata and organization
1148
+ * - `memories`: Persistent memory storage with semantic search
1149
+ * - `modelPreferences`: User model preferences and settings
1150
+ *
1151
+ * @example
1152
+ * ```typescript
1153
+ * import { Database } from '@nozbe/watermelondb';
1154
+ * import LokiJSAdapter from '@nozbe/watermelondb/adapters/lokijs';
1155
+ * import { sdkSchema, sdkMigrations, sdkModelClasses } from '@reverbia/sdk/react';
1156
+ *
1157
+ * const adapter = new LokiJSAdapter({
1158
+ * schema: sdkSchema,
1159
+ * migrations: sdkMigrations,
1160
+ * dbName: 'my-app-db',
1161
+ * useWebWorker: false,
1162
+ * useIncrementalIndexedDB: true,
1163
+ * });
1164
+ *
1165
+ * const database = new Database({
1166
+ * adapter,
1167
+ * modelClasses: sdkModelClasses,
1168
+ * });
1169
+ * ```
1170
+ */
1171
+ declare const sdkSchema: Readonly<{
1172
+ version: _nozbe_watermelondb_Schema.SchemaVersion;
1173
+ tables: _nozbe_watermelondb_Schema.TableMap;
1174
+ unsafeSql?: (_: string, __: _nozbe_watermelondb_Schema.AppSchemaUnsafeSqlKind) => string;
1175
+ }>;
1176
+ /**
1177
+ * Combined migrations for all SDK storage modules.
1178
+ *
1179
+ * These migrations handle database schema upgrades from any previous version
1180
+ * to the current version. The SDK manages all migration logic internally,
1181
+ * so consumer apps don't need to handle version arithmetic or migration merging.
1182
+ *
1183
+ * **Minimum supported version: v2**
1184
+ * Migrations from v1 are not supported. Databases at v1 require a fresh install.
1185
+ *
1186
+ * Migration history:
1187
+ * - v2 → v3: Added `was_stopped` column to history table
1188
+ * - v3 → v4: Added `modelPreferences` table for settings storage
1189
+ * - v4 → v5: Added `error` column to history table for error persistence
1190
+ * - v5 → v6: Added `thought_process` column to history table for activity tracking
1191
+ */
1192
+ declare const sdkMigrations: Readonly<{
1193
+ validated: true;
1194
+ minVersion: _nozbe_watermelondb_Schema.SchemaVersion;
1195
+ maxVersion: _nozbe_watermelondb_Schema.SchemaVersion;
1196
+ sortedMigrations: _nozbe_watermelondb_Schema_migrations.Migration[];
1197
+ }>;
1198
+ /**
1199
+ * Model classes to register with the WatermelonDB database.
1200
+ *
1201
+ * Pass this array directly to the `modelClasses` option when creating
1202
+ * your Database instance.
1203
+ *
1204
+ * @example
1205
+ * ```typescript
1206
+ * import { Database } from '@nozbe/watermelondb';
1207
+ * import { sdkSchema, sdkMigrations, sdkModelClasses } from '@reverbia/sdk/react';
1208
+ *
1209
+ * const database = new Database({
1210
+ * adapter,
1211
+ * modelClasses: sdkModelClasses,
1212
+ * });
1213
+ * ```
1214
+ */
1215
+ declare const sdkModelClasses: Class<Model$1>[];
1216
+
1217
+ export { Conversation as ChatConversation, Message as ChatMessage, type ChatRole, type CreateConversationOptions, type CreateMemoryOptions, type CreateMessageOptions, type FileMetadata, type MemoryItem, type MemoryType, type SearchSource, type SendMessageWithStorageArgs, type SendMessageWithStorageResult, type ChatCompletionUsage as StoredChatCompletionUsage, type StoredConversation, type StoredMemory, Memory as StoredMemoryModel, type StoredMemoryWithSimilarity, type StoredMessage, type StoredMessageWithSimilarity, type UpdateMemoryOptions, type UseChatStorageOptions, type UseChatStorageResult, type UseMemoryStorageOptions, type UseMemoryStorageResult, type UseModelsOptions, type UseModelsResult, chatStorageMigrations, chatStorageSchema, generateCompositeKey, generateConversationId, generateUniqueKey, memoryStorageSchema, sdkMigrations, sdkModelClasses, sdkSchema, useChat, useChatStorage, useImageGeneration, useMemoryStorage, useModels };