@reverbia/sdk 1.0.0-next.20251119170952 → 1.0.0-next.20251121094738

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,244 @@
1
+ /**
2
+ * ExtraFields contains additional metadata
3
+ */
4
+ type LlmapiChatCompletionExtraFields = {
5
+ /**
6
+ * Latency is the request latency in milliseconds
7
+ */
8
+ latency?: number;
9
+ /**
10
+ * ModelRequested is the model that was requested
11
+ */
12
+ model_requested?: string;
13
+ /**
14
+ * Provider is the LLM provider used (e.g., "openai", "anthropic")
15
+ */
16
+ provider?: string;
17
+ /**
18
+ * RequestType is always "chat_completion"
19
+ */
20
+ request_type?: string;
21
+ };
22
+ type LlmapiChatCompletionResponse = {
23
+ /**
24
+ * Choices contains the completion choices
25
+ */
26
+ choices?: Array<LlmapiChoice>;
27
+ extra_fields?: LlmapiChatCompletionExtraFields;
28
+ /**
29
+ * ID is the completion ID
30
+ */
31
+ id?: string;
32
+ /**
33
+ * Model is the model used
34
+ */
35
+ model?: string;
36
+ usage?: LlmapiChatCompletionUsage;
37
+ };
38
+ /**
39
+ * Usage contains token usage information
40
+ */
41
+ type LlmapiChatCompletionUsage = {
42
+ /**
43
+ * CompletionTokens is the number of tokens in the completion
44
+ */
45
+ completion_tokens?: number;
46
+ /**
47
+ * PromptTokens is the number of tokens in the prompt
48
+ */
49
+ prompt_tokens?: number;
50
+ /**
51
+ * TotalTokens is the total number of tokens used
52
+ */
53
+ total_tokens?: number;
54
+ };
55
+ type LlmapiChoice = {
56
+ /**
57
+ * FinishReason indicates why the completion stopped
58
+ */
59
+ finish_reason?: string;
60
+ /**
61
+ * Index is the choice index
62
+ */
63
+ index?: number;
64
+ message?: LlmapiMessage;
65
+ };
66
+ /**
67
+ * Message is the generated message
68
+ */
69
+ type LlmapiMessage = {
70
+ /**
71
+ * Content is the message content
72
+ */
73
+ content?: string;
74
+ role?: LlmapiRole;
75
+ };
76
+ /**
77
+ * Role is the message role (system, user, assistant)
78
+ */
79
+ type LlmapiRole = string;
80
+
81
+ type SendMessageArgs = {
82
+ messages: LlmapiMessage[];
83
+ model: string;
84
+ };
85
+ type SendMessageResult = {
86
+ data: LlmapiChatCompletionResponse;
87
+ error: null;
88
+ } | {
89
+ data: null;
90
+ error: string;
91
+ };
92
+ type UseChatOptions = {
93
+ getToken?: () => Promise<string | null>;
94
+ };
95
+ type UseChatResult = {
96
+ isLoading: boolean;
97
+ sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
98
+ };
99
+ /**
100
+ * A React hook for managing chat completions with authentication.
101
+ *
102
+ * This hook provides a convenient way to send chat messages to the LLM API
103
+ * with automatic token management and loading state handling.
104
+ *
105
+ * @param options - Optional configuration object
106
+ * @param options.getToken - An async function that returns an authentication token.
107
+ * This token will be used as a Bearer token in the Authorization header.
108
+ * If not provided, `sendMessage` will return an error.
109
+ *
110
+ * @returns An object containing:
111
+ * - `isLoading`: A boolean indicating whether a request is currently in progress
112
+ * - `sendMessage`: An async function to send chat messages
113
+ *
114
+ * @example
115
+ * ```tsx
116
+ * const { isLoading, sendMessage } = useChat({
117
+ * getToken: async () => {
118
+ * // Get your auth token from your auth provider
119
+ * return await getAuthToken();
120
+ * }
121
+ * });
122
+ *
123
+ * const handleSend = async () => {
124
+ * const result = await sendMessage({
125
+ * messages: [{ role: 'user', content: 'Hello!' }],
126
+ * model: 'gpt-4o-mini'
127
+ * });
128
+ *
129
+ * if (result.error) {
130
+ * console.error(result.error);
131
+ * } else {
132
+ * console.log(result.data);
133
+ * }
134
+ * };
135
+ * ```
136
+ */
137
+ declare function useChat(options?: UseChatOptions): UseChatResult;
138
+
139
+ interface MemoryItem {
140
+ type: "identity" | "preference" | "project" | "skill" | "constraint";
141
+ namespace: string;
142
+ key: string;
143
+ value: string;
144
+ rawEvidence: string;
145
+ confidence: number;
146
+ pii: boolean;
147
+ }
148
+ interface MemoryExtractionResult {
149
+ items: MemoryItem[];
150
+ }
151
+
152
+ /**
153
+ * Extended MemoryItem with database fields
154
+ */
155
+ interface StoredMemoryItem extends MemoryItem {
156
+ id?: number;
157
+ createdAt: number;
158
+ updatedAt: number;
159
+ compositeKey: string;
160
+ uniqueKey: string;
161
+ embedding?: number[];
162
+ embeddingModel?: string;
163
+ }
164
+
165
+ type UseMemoryOptions = {
166
+ /**
167
+ * The model to use for fact extraction (default: "openai/gpt-4o")
168
+ */
169
+ memoryModel?: string;
170
+ /**
171
+ * The model to use for generating embeddings (default: "openai/text-embedding-3-small")
172
+ * Set to null/undefined to disable embedding generation
173
+ */
174
+ embeddingModel?: string | null;
175
+ /**
176
+ * Whether to automatically generate embeddings for extracted memories (default: true)
177
+ */
178
+ generateEmbeddings?: boolean;
179
+ /**
180
+ * Callback when facts are extracted
181
+ */
182
+ onFactsExtracted?: (facts: MemoryExtractionResult) => void;
183
+ /**
184
+ * Custom function to get auth token for API calls
185
+ */
186
+ getToken?: () => Promise<string | null>;
187
+ };
188
+ type UseMemoryResult = {
189
+ extractMemoriesFromMessage: (options: {
190
+ messages: Array<{
191
+ role: string;
192
+ content: string;
193
+ }>;
194
+ model: string;
195
+ }) => Promise<MemoryExtractionResult | null>;
196
+ /**
197
+ * Search for similar memories using semantic search
198
+ * @param query The text query to search for
199
+ * @param limit Maximum number of results (default: 10)
200
+ * @param minSimilarity Minimum similarity threshold 0-1 (default: 0.6)
201
+ * Note: Embedding similarity scores are typically lower than expected.
202
+ * A score of 0.6-0.7 is usually a good match, 0.5-0.6 is moderate.
203
+ * @returns Array of memories with similarity scores, sorted by relevance
204
+ */
205
+ searchMemories: (query: string, limit?: number, minSimilarity?: number) => Promise<Array<StoredMemoryItem & {
206
+ similarity: number;
207
+ }>>;
208
+ };
209
+ /**
210
+ * Standalone hook for extracting memories from user messages.
211
+ * Can be composed with other hooks like useChat, useFiles, etc.
212
+ */
213
+ declare function useMemory(options?: UseMemoryOptions): UseMemoryResult;
214
+
215
+ /**
216
+ * Format memories into a context string that can be included in chat messages
217
+ * @param memories Array of memories with similarity scores
218
+ * @param format Format style: "compact" (key-value pairs) or "detailed" (includes evidence)
219
+ * @returns Formatted string ready to include in system/user message
220
+ */
221
+ declare const formatMemoriesForChat: (memories: Array<StoredMemoryItem & {
222
+ similarity?: number;
223
+ }>, format?: "compact" | "detailed") => string;
224
+ /**
225
+ * Create a system message that includes relevant memories
226
+ * @param memories Array of memories to include
227
+ * @param baseSystemPrompt Optional base system prompt (memories will be prepended)
228
+ * @returns System message content with memories
229
+ */
230
+ declare const createMemoryContextSystemMessage: (memories: Array<StoredMemoryItem & {
231
+ similarity?: number;
232
+ }>, baseSystemPrompt?: string) => string;
233
+ /**
234
+ * Extract conversation context from messages for memory search
235
+ * @param messages Array of chat messages
236
+ * @param maxMessages Maximum number of recent messages to include (default: 3)
237
+ * @returns Combined text query for memory search
238
+ */
239
+ declare const extractConversationContext: (messages: Array<{
240
+ role: string;
241
+ content: string;
242
+ }>, maxMessages?: number) => string;
243
+
244
+ export { createMemoryContextSystemMessage, extractConversationContext, formatMemoriesForChat, useChat, useMemory };
@@ -0,0 +1,244 @@
1
+ /**
2
+ * ExtraFields contains additional metadata
3
+ */
4
+ type LlmapiChatCompletionExtraFields = {
5
+ /**
6
+ * Latency is the request latency in milliseconds
7
+ */
8
+ latency?: number;
9
+ /**
10
+ * ModelRequested is the model that was requested
11
+ */
12
+ model_requested?: string;
13
+ /**
14
+ * Provider is the LLM provider used (e.g., "openai", "anthropic")
15
+ */
16
+ provider?: string;
17
+ /**
18
+ * RequestType is always "chat_completion"
19
+ */
20
+ request_type?: string;
21
+ };
22
+ type LlmapiChatCompletionResponse = {
23
+ /**
24
+ * Choices contains the completion choices
25
+ */
26
+ choices?: Array<LlmapiChoice>;
27
+ extra_fields?: LlmapiChatCompletionExtraFields;
28
+ /**
29
+ * ID is the completion ID
30
+ */
31
+ id?: string;
32
+ /**
33
+ * Model is the model used
34
+ */
35
+ model?: string;
36
+ usage?: LlmapiChatCompletionUsage;
37
+ };
38
+ /**
39
+ * Usage contains token usage information
40
+ */
41
+ type LlmapiChatCompletionUsage = {
42
+ /**
43
+ * CompletionTokens is the number of tokens in the completion
44
+ */
45
+ completion_tokens?: number;
46
+ /**
47
+ * PromptTokens is the number of tokens in the prompt
48
+ */
49
+ prompt_tokens?: number;
50
+ /**
51
+ * TotalTokens is the total number of tokens used
52
+ */
53
+ total_tokens?: number;
54
+ };
55
+ type LlmapiChoice = {
56
+ /**
57
+ * FinishReason indicates why the completion stopped
58
+ */
59
+ finish_reason?: string;
60
+ /**
61
+ * Index is the choice index
62
+ */
63
+ index?: number;
64
+ message?: LlmapiMessage;
65
+ };
66
+ /**
67
+ * Message is the generated message
68
+ */
69
+ type LlmapiMessage = {
70
+ /**
71
+ * Content is the message content
72
+ */
73
+ content?: string;
74
+ role?: LlmapiRole;
75
+ };
76
+ /**
77
+ * Role is the message role (system, user, assistant)
78
+ */
79
+ type LlmapiRole = string;
80
+
81
+ type SendMessageArgs = {
82
+ messages: LlmapiMessage[];
83
+ model: string;
84
+ };
85
+ type SendMessageResult = {
86
+ data: LlmapiChatCompletionResponse;
87
+ error: null;
88
+ } | {
89
+ data: null;
90
+ error: string;
91
+ };
92
+ type UseChatOptions = {
93
+ getToken?: () => Promise<string | null>;
94
+ };
95
+ type UseChatResult = {
96
+ isLoading: boolean;
97
+ sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
98
+ };
99
+ /**
100
+ * A React hook for managing chat completions with authentication.
101
+ *
102
+ * This hook provides a convenient way to send chat messages to the LLM API
103
+ * with automatic token management and loading state handling.
104
+ *
105
+ * @param options - Optional configuration object
106
+ * @param options.getToken - An async function that returns an authentication token.
107
+ * This token will be used as a Bearer token in the Authorization header.
108
+ * If not provided, `sendMessage` will return an error.
109
+ *
110
+ * @returns An object containing:
111
+ * - `isLoading`: A boolean indicating whether a request is currently in progress
112
+ * - `sendMessage`: An async function to send chat messages
113
+ *
114
+ * @example
115
+ * ```tsx
116
+ * const { isLoading, sendMessage } = useChat({
117
+ * getToken: async () => {
118
+ * // Get your auth token from your auth provider
119
+ * return await getAuthToken();
120
+ * }
121
+ * });
122
+ *
123
+ * const handleSend = async () => {
124
+ * const result = await sendMessage({
125
+ * messages: [{ role: 'user', content: 'Hello!' }],
126
+ * model: 'gpt-4o-mini'
127
+ * });
128
+ *
129
+ * if (result.error) {
130
+ * console.error(result.error);
131
+ * } else {
132
+ * console.log(result.data);
133
+ * }
134
+ * };
135
+ * ```
136
+ */
137
+ declare function useChat(options?: UseChatOptions): UseChatResult;
138
+
139
+ interface MemoryItem {
140
+ type: "identity" | "preference" | "project" | "skill" | "constraint";
141
+ namespace: string;
142
+ key: string;
143
+ value: string;
144
+ rawEvidence: string;
145
+ confidence: number;
146
+ pii: boolean;
147
+ }
148
+ interface MemoryExtractionResult {
149
+ items: MemoryItem[];
150
+ }
151
+
152
+ /**
153
+ * Extended MemoryItem with database fields
154
+ */
155
+ interface StoredMemoryItem extends MemoryItem {
156
+ id?: number;
157
+ createdAt: number;
158
+ updatedAt: number;
159
+ compositeKey: string;
160
+ uniqueKey: string;
161
+ embedding?: number[];
162
+ embeddingModel?: string;
163
+ }
164
+
165
+ type UseMemoryOptions = {
166
+ /**
167
+ * The model to use for fact extraction (default: "openai/gpt-4o")
168
+ */
169
+ memoryModel?: string;
170
+ /**
171
+ * The model to use for generating embeddings (default: "openai/text-embedding-3-small")
172
+ * Set to null/undefined to disable embedding generation
173
+ */
174
+ embeddingModel?: string | null;
175
+ /**
176
+ * Whether to automatically generate embeddings for extracted memories (default: true)
177
+ */
178
+ generateEmbeddings?: boolean;
179
+ /**
180
+ * Callback when facts are extracted
181
+ */
182
+ onFactsExtracted?: (facts: MemoryExtractionResult) => void;
183
+ /**
184
+ * Custom function to get auth token for API calls
185
+ */
186
+ getToken?: () => Promise<string | null>;
187
+ };
188
+ type UseMemoryResult = {
189
+ extractMemoriesFromMessage: (options: {
190
+ messages: Array<{
191
+ role: string;
192
+ content: string;
193
+ }>;
194
+ model: string;
195
+ }) => Promise<MemoryExtractionResult | null>;
196
+ /**
197
+ * Search for similar memories using semantic search
198
+ * @param query The text query to search for
199
+ * @param limit Maximum number of results (default: 10)
200
+ * @param minSimilarity Minimum similarity threshold 0-1 (default: 0.6)
201
+ * Note: Embedding similarity scores are typically lower than expected.
202
+ * A score of 0.6-0.7 is usually a good match, 0.5-0.6 is moderate.
203
+ * @returns Array of memories with similarity scores, sorted by relevance
204
+ */
205
+ searchMemories: (query: string, limit?: number, minSimilarity?: number) => Promise<Array<StoredMemoryItem & {
206
+ similarity: number;
207
+ }>>;
208
+ };
209
+ /**
210
+ * Standalone hook for extracting memories from user messages.
211
+ * Can be composed with other hooks like useChat, useFiles, etc.
212
+ */
213
+ declare function useMemory(options?: UseMemoryOptions): UseMemoryResult;
214
+
215
+ /**
216
+ * Format memories into a context string that can be included in chat messages
217
+ * @param memories Array of memories with similarity scores
218
+ * @param format Format style: "compact" (key-value pairs) or "detailed" (includes evidence)
219
+ * @returns Formatted string ready to include in system/user message
220
+ */
221
+ declare const formatMemoriesForChat: (memories: Array<StoredMemoryItem & {
222
+ similarity?: number;
223
+ }>, format?: "compact" | "detailed") => string;
224
+ /**
225
+ * Create a system message that includes relevant memories
226
+ * @param memories Array of memories to include
227
+ * @param baseSystemPrompt Optional base system prompt (memories will be prepended)
228
+ * @returns System message content with memories
229
+ */
230
+ declare const createMemoryContextSystemMessage: (memories: Array<StoredMemoryItem & {
231
+ similarity?: number;
232
+ }>, baseSystemPrompt?: string) => string;
233
+ /**
234
+ * Extract conversation context from messages for memory search
235
+ * @param messages Array of chat messages
236
+ * @param maxMessages Maximum number of recent messages to include (default: 3)
237
+ * @returns Combined text query for memory search
238
+ */
239
+ declare const extractConversationContext: (messages: Array<{
240
+ role: string;
241
+ content: string;
242
+ }>, maxMessages?: number) => string;
243
+
244
+ export { createMemoryContextSystemMessage, extractConversationContext, formatMemoriesForChat, useChat, useMemory };