@reverbia/sdk 1.0.0-next.20251120124145 → 1.0.0-next.20251121094738

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -136,4 +136,109 @@ type UseChatResult = {
136
136
  */
137
137
  declare function useChat(options?: UseChatOptions): UseChatResult;
138
138
 
139
- export { useChat };
139
+ interface MemoryItem {
140
+ type: "identity" | "preference" | "project" | "skill" | "constraint";
141
+ namespace: string;
142
+ key: string;
143
+ value: string;
144
+ rawEvidence: string;
145
+ confidence: number;
146
+ pii: boolean;
147
+ }
148
+ interface MemoryExtractionResult {
149
+ items: MemoryItem[];
150
+ }
151
+
152
+ /**
153
+ * Extended MemoryItem with database fields
154
+ */
155
+ interface StoredMemoryItem extends MemoryItem {
156
+ id?: number;
157
+ createdAt: number;
158
+ updatedAt: number;
159
+ compositeKey: string;
160
+ uniqueKey: string;
161
+ embedding?: number[];
162
+ embeddingModel?: string;
163
+ }
164
+
165
+ type UseMemoryOptions = {
166
+ /**
167
+ * The model to use for fact extraction (default: "openai/gpt-4o")
168
+ */
169
+ memoryModel?: string;
170
+ /**
171
+ * The model to use for generating embeddings (default: "openai/text-embedding-3-small")
172
+ * Set to null/undefined to disable embedding generation
173
+ */
174
+ embeddingModel?: string | null;
175
+ /**
176
+ * Whether to automatically generate embeddings for extracted memories (default: true)
177
+ */
178
+ generateEmbeddings?: boolean;
179
+ /**
180
+ * Callback when facts are extracted
181
+ */
182
+ onFactsExtracted?: (facts: MemoryExtractionResult) => void;
183
+ /**
184
+ * Custom function to get auth token for API calls
185
+ */
186
+ getToken?: () => Promise<string | null>;
187
+ };
188
+ type UseMemoryResult = {
189
+ extractMemoriesFromMessage: (options: {
190
+ messages: Array<{
191
+ role: string;
192
+ content: string;
193
+ }>;
194
+ model: string;
195
+ }) => Promise<MemoryExtractionResult | null>;
196
+ /**
197
+ * Search for similar memories using semantic search
198
+ * @param query The text query to search for
199
+ * @param limit Maximum number of results (default: 10)
200
+ * @param minSimilarity Minimum similarity threshold 0-1 (default: 0.6)
201
+ * Note: Embedding similarity scores are typically lower than expected.
202
+ * A score of 0.6-0.7 is usually a good match, 0.5-0.6 is moderate.
203
+ * @returns Array of memories with similarity scores, sorted by relevance
204
+ */
205
+ searchMemories: (query: string, limit?: number, minSimilarity?: number) => Promise<Array<StoredMemoryItem & {
206
+ similarity: number;
207
+ }>>;
208
+ };
209
+ /**
210
+ * Standalone hook for extracting memories from user messages.
211
+ * Can be composed with other hooks like useChat, useFiles, etc.
212
+ */
213
+ declare function useMemory(options?: UseMemoryOptions): UseMemoryResult;
214
+
215
+ /**
216
+ * Format memories into a context string that can be included in chat messages
217
+ * @param memories Array of memories with similarity scores
218
+ * @param format Format style: "compact" (key-value pairs) or "detailed" (includes evidence)
219
+ * @returns Formatted string ready to include in system/user message
220
+ */
221
+ declare const formatMemoriesForChat: (memories: Array<StoredMemoryItem & {
222
+ similarity?: number;
223
+ }>, format?: "compact" | "detailed") => string;
224
+ /**
225
+ * Create a system message that includes relevant memories
226
+ * @param memories Array of memories to include
227
+ * @param baseSystemPrompt Optional base system prompt (memories will be prepended)
228
+ * @returns System message content with memories
229
+ */
230
+ declare const createMemoryContextSystemMessage: (memories: Array<StoredMemoryItem & {
231
+ similarity?: number;
232
+ }>, baseSystemPrompt?: string) => string;
233
+ /**
234
+ * Extract conversation context from messages for memory search
235
+ * @param messages Array of chat messages
236
+ * @param maxMessages Maximum number of recent messages to include (default: 3)
237
+ * @returns Combined text query for memory search
238
+ */
239
+ declare const extractConversationContext: (messages: Array<{
240
+ role: string;
241
+ content: string;
242
+ }>, maxMessages?: number) => string;
243
+
244
+ export { createMemoryContextSystemMessage, extractConversationContext, formatMemoriesForChat, useChat, useMemory };
@@ -136,4 +136,109 @@ type UseChatResult = {
136
136
  */
137
137
  declare function useChat(options?: UseChatOptions): UseChatResult;
138
138
 
139
- export { useChat };
139
+ interface MemoryItem {
140
+ type: "identity" | "preference" | "project" | "skill" | "constraint";
141
+ namespace: string;
142
+ key: string;
143
+ value: string;
144
+ rawEvidence: string;
145
+ confidence: number;
146
+ pii: boolean;
147
+ }
148
+ interface MemoryExtractionResult {
149
+ items: MemoryItem[];
150
+ }
151
+
152
+ /**
153
+ * Extended MemoryItem with database fields
154
+ */
155
+ interface StoredMemoryItem extends MemoryItem {
156
+ id?: number;
157
+ createdAt: number;
158
+ updatedAt: number;
159
+ compositeKey: string;
160
+ uniqueKey: string;
161
+ embedding?: number[];
162
+ embeddingModel?: string;
163
+ }
164
+
165
+ type UseMemoryOptions = {
166
+ /**
167
+ * The model to use for fact extraction (default: "openai/gpt-4o")
168
+ */
169
+ memoryModel?: string;
170
+ /**
171
+ * The model to use for generating embeddings (default: "openai/text-embedding-3-small")
172
+ * Set to null/undefined to disable embedding generation
173
+ */
174
+ embeddingModel?: string | null;
175
+ /**
176
+ * Whether to automatically generate embeddings for extracted memories (default: true)
177
+ */
178
+ generateEmbeddings?: boolean;
179
+ /**
180
+ * Callback when facts are extracted
181
+ */
182
+ onFactsExtracted?: (facts: MemoryExtractionResult) => void;
183
+ /**
184
+ * Custom function to get auth token for API calls
185
+ */
186
+ getToken?: () => Promise<string | null>;
187
+ };
188
+ type UseMemoryResult = {
189
+ extractMemoriesFromMessage: (options: {
190
+ messages: Array<{
191
+ role: string;
192
+ content: string;
193
+ }>;
194
+ model: string;
195
+ }) => Promise<MemoryExtractionResult | null>;
196
+ /**
197
+ * Search for similar memories using semantic search
198
+ * @param query The text query to search for
199
+ * @param limit Maximum number of results (default: 10)
200
+ * @param minSimilarity Minimum similarity threshold 0-1 (default: 0.6)
201
+ * Note: Embedding similarity scores are typically lower than expected.
202
+ * A score of 0.6-0.7 is usually a good match, 0.5-0.6 is moderate.
203
+ * @returns Array of memories with similarity scores, sorted by relevance
204
+ */
205
+ searchMemories: (query: string, limit?: number, minSimilarity?: number) => Promise<Array<StoredMemoryItem & {
206
+ similarity: number;
207
+ }>>;
208
+ };
209
+ /**
210
+ * Standalone hook for extracting memories from user messages.
211
+ * Can be composed with other hooks like useChat, useFiles, etc.
212
+ */
213
+ declare function useMemory(options?: UseMemoryOptions): UseMemoryResult;
214
+
215
+ /**
216
+ * Format memories into a context string that can be included in chat messages
217
+ * @param memories Array of memories with similarity scores
218
+ * @param format Format style: "compact" (key-value pairs) or "detailed" (includes evidence)
219
+ * @returns Formatted string ready to include in system/user message
220
+ */
221
+ declare const formatMemoriesForChat: (memories: Array<StoredMemoryItem & {
222
+ similarity?: number;
223
+ }>, format?: "compact" | "detailed") => string;
224
+ /**
225
+ * Create a system message that includes relevant memories
226
+ * @param memories Array of memories to include
227
+ * @param baseSystemPrompt Optional base system prompt (memories will be prepended)
228
+ * @returns System message content with memories
229
+ */
230
+ declare const createMemoryContextSystemMessage: (memories: Array<StoredMemoryItem & {
231
+ similarity?: number;
232
+ }>, baseSystemPrompt?: string) => string;
233
+ /**
234
+ * Extract conversation context from messages for memory search
235
+ * @param messages Array of chat messages
236
+ * @param maxMessages Maximum number of recent messages to include (default: 3)
237
+ * @returns Combined text query for memory search
238
+ */
239
+ declare const extractConversationContext: (messages: Array<{
240
+ role: string;
241
+ content: string;
242
+ }>, maxMessages?: number) => string;
243
+
244
+ export { createMemoryContextSystemMessage, extractConversationContext, formatMemoriesForChat, useChat, useMemory };