@yamo/memory-mesh 2.3.1 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. package/bin/memory_mesh.js +1 -1
  2. package/lib/llm/client.d.ts +111 -0
  3. package/lib/llm/client.js +299 -357
  4. package/lib/llm/client.ts +413 -0
  5. package/lib/llm/index.d.ts +17 -0
  6. package/lib/llm/index.js +15 -8
  7. package/lib/llm/index.ts +19 -0
  8. package/lib/memory/adapters/client.d.ts +183 -0
  9. package/lib/memory/adapters/client.js +518 -0
  10. package/lib/memory/adapters/client.ts +678 -0
  11. package/lib/memory/adapters/config.d.ts +137 -0
  12. package/lib/memory/adapters/config.js +189 -0
  13. package/lib/memory/adapters/config.ts +259 -0
  14. package/lib/memory/adapters/errors.d.ts +76 -0
  15. package/lib/memory/adapters/errors.js +128 -0
  16. package/lib/memory/adapters/errors.ts +166 -0
  17. package/lib/memory/context-manager.d.ts +44 -0
  18. package/lib/memory/context-manager.js +344 -0
  19. package/lib/memory/context-manager.ts +432 -0
  20. package/lib/memory/embeddings/factory.d.ts +59 -0
  21. package/lib/memory/embeddings/factory.js +148 -0
  22. package/lib/{embeddings/factory.js → memory/embeddings/factory.ts} +69 -28
  23. package/lib/memory/embeddings/index.d.ts +2 -0
  24. package/lib/memory/embeddings/index.js +2 -0
  25. package/lib/memory/embeddings/index.ts +2 -0
  26. package/lib/memory/embeddings/service.d.ts +164 -0
  27. package/lib/memory/embeddings/service.js +515 -0
  28. package/lib/{embeddings/service.js → memory/embeddings/service.ts} +223 -156
  29. package/lib/memory/index.d.ts +9 -0
  30. package/lib/memory/index.js +9 -1
  31. package/lib/memory/index.ts +20 -0
  32. package/lib/memory/memory-mesh.d.ts +274 -0
  33. package/lib/memory/memory-mesh.js +1445 -1189
  34. package/lib/memory/memory-mesh.ts +1803 -0
  35. package/lib/memory/memory-translator.d.ts +19 -0
  36. package/lib/memory/memory-translator.js +125 -0
  37. package/lib/memory/memory-translator.ts +158 -0
  38. package/lib/memory/schema.d.ts +111 -0
  39. package/lib/memory/schema.js +183 -0
  40. package/lib/memory/schema.ts +267 -0
  41. package/lib/memory/scorer.d.ts +26 -0
  42. package/lib/memory/scorer.js +77 -0
  43. package/lib/memory/scorer.ts +95 -0
  44. package/lib/memory/search/index.d.ts +1 -0
  45. package/lib/memory/search/index.js +1 -0
  46. package/lib/memory/search/index.ts +1 -0
  47. package/lib/memory/search/keyword-search.d.ts +62 -0
  48. package/lib/memory/search/keyword-search.js +135 -0
  49. package/lib/{search/keyword-search.js → memory/search/keyword-search.ts} +66 -36
  50. package/lib/scrubber/config/defaults.d.ts +53 -0
  51. package/lib/scrubber/config/defaults.js +49 -57
  52. package/lib/scrubber/config/defaults.ts +117 -0
  53. package/lib/scrubber/index.d.ts +6 -0
  54. package/lib/scrubber/index.js +3 -23
  55. package/lib/scrubber/index.ts +7 -0
  56. package/lib/scrubber/scrubber.d.ts +61 -0
  57. package/lib/scrubber/scrubber.js +99 -121
  58. package/lib/scrubber/scrubber.ts +168 -0
  59. package/lib/scrubber/stages/chunker.d.ts +13 -0
  60. package/lib/scrubber/stages/metadata-annotator.d.ts +18 -0
  61. package/lib/scrubber/stages/normalizer.d.ts +13 -0
  62. package/lib/scrubber/stages/semantic-filter.d.ts +13 -0
  63. package/lib/scrubber/stages/structural-cleaner.d.ts +13 -0
  64. package/lib/scrubber/stages/validator.d.ts +18 -0
  65. package/lib/scrubber/telemetry.d.ts +36 -0
  66. package/lib/scrubber/telemetry.js +53 -58
  67. package/lib/scrubber/telemetry.ts +99 -0
  68. package/lib/utils/logger.d.ts +29 -0
  69. package/lib/utils/logger.js +64 -0
  70. package/lib/utils/logger.ts +85 -0
  71. package/lib/utils/skill-metadata.d.ts +32 -0
  72. package/lib/utils/skill-metadata.js +132 -0
  73. package/lib/utils/skill-metadata.ts +147 -0
  74. package/lib/yamo/emitter.d.ts +73 -0
  75. package/lib/yamo/emitter.js +78 -143
  76. package/lib/yamo/emitter.ts +249 -0
  77. package/lib/yamo/schema.d.ts +58 -0
  78. package/lib/yamo/schema.js +81 -108
  79. package/lib/yamo/schema.ts +165 -0
  80. package/package.json +11 -8
  81. package/index.d.ts +0 -111
  82. package/lib/embeddings/index.js +0 -2
  83. package/lib/index.js +0 -6
  84. package/lib/lancedb/client.js +0 -633
  85. package/lib/lancedb/config.js +0 -215
  86. package/lib/lancedb/errors.js +0 -144
  87. package/lib/lancedb/index.js +0 -4
  88. package/lib/lancedb/schema.js +0 -197
  89. package/lib/scrubber/errors/scrubber-error.js +0 -43
  90. package/lib/scrubber/stages/chunker.js +0 -103
  91. package/lib/scrubber/stages/metadata-annotator.js +0 -74
  92. package/lib/scrubber/stages/normalizer.js +0 -59
  93. package/lib/scrubber/stages/semantic-filter.js +0 -61
  94. package/lib/scrubber/stages/structural-cleaner.js +0 -82
  95. package/lib/scrubber/stages/validator.js +0 -66
  96. package/lib/scrubber/utils/hash.js +0 -39
  97. package/lib/scrubber/utils/html-parser.js +0 -45
  98. package/lib/scrubber/utils/pattern-matcher.js +0 -63
  99. package/lib/scrubber/utils/token-counter.js +0 -31
  100. package/lib/search/index.js +0 -1
  101. package/lib/utils/index.js +0 -1
  102. package/lib/yamo/index.js +0 -15
@@ -0,0 +1,413 @@
1
+ /**
2
+ * LLM Client - Multi-provider LLM API client for reflection generation
3
+ *
4
+ * Supports:
5
+ * - OpenAI (GPT-4, GPT-4o-mini, etc.)
6
+ * - Anthropic (Claude)
7
+ * - Ollama (local models)
8
+ * - Graceful fallback when LLM unavailable
9
+ */
10
+
11
+ import { createLogger } from "../utils/logger.js";
12
+
13
+ const logger = createLogger("llm-client");
14
+
15
+ export interface LLMConfig {
16
+ provider?: string;
17
+ apiKey?: string;
18
+ model?: string;
19
+ baseUrl?: string;
20
+ timeout?: number;
21
+ maxRetries?: number;
22
+ maxTokens?: number;
23
+ }
24
+
25
+ export interface ReflectionResult {
26
+ reflection: string;
27
+ confidence: number;
28
+ }
29
+
30
+ export interface LLMStats {
31
+ totalRequests: number;
32
+ successfulRequests: number;
33
+ failedRequests: number;
34
+ fallbackCount: number;
35
+ }
36
+
37
+ /**
38
+ * LLMClient provides unified interface for calling various LLM providers
39
+ * to generate reflections from memory contexts.
40
+ */
41
+ export class LLMClient {
42
+ provider: string;
43
+ apiKey: string;
44
+ model: string;
45
+ baseUrl: string;
46
+ timeout: number;
47
+ maxRetries: number;
48
+ maxTokens: number;
49
+ stats: LLMStats;
50
+
51
+ /**
52
+ * Create a new LLMClient instance
53
+ */
54
+ constructor(config: LLMConfig = {}) {
55
+ this.provider = config.provider || process.env.LLM_PROVIDER || "openai";
56
+ this.apiKey = config.apiKey || process.env.LLM_API_KEY || "";
57
+ this.model =
58
+ config.model || process.env.LLM_MODEL || this._getDefaultModel();
59
+ this.baseUrl =
60
+ config.baseUrl || process.env.LLM_BASE_URL || this._getDefaultBaseUrl();
61
+ this.maxTokens = config.maxTokens || 2000;
62
+ this.timeout = config.timeout || (this.maxTokens >= 4000 ? 300000 : 60000);
63
+ this.maxRetries = config.maxRetries || 2;
64
+
65
+ // Statistics
66
+ this.stats = {
67
+ totalRequests: 0,
68
+ successfulRequests: 0,
69
+ failedRequests: 0,
70
+ fallbackCount: 0,
71
+ };
72
+ }
73
+
74
+ /**
75
+ * Get default model for provider
76
+ * @private
77
+ */
78
+ _getDefaultModel(): string {
79
+ const defaults: Record<string, string> = {
80
+ openai: "gpt-4o-mini",
81
+ anthropic: "claude-3-5-haiku-20241022",
82
+ ollama: "llama3.2",
83
+ };
84
+ return defaults[this.provider] || "gpt-4o-mini";
85
+ }
86
+
87
+ /**
88
+ * Get default base URL for provider
89
+ * @private
90
+ */
91
+ _getDefaultBaseUrl(): string {
92
+ const defaults: Record<string, string> = {
93
+ openai: "https://api.openai.com/v1",
94
+ anthropic: "https://api.anthropic.com/v1",
95
+ ollama: "http://localhost:11434",
96
+ };
97
+ return defaults[this.provider] || "https://api.openai.com/v1";
98
+ }
99
+
100
+ /**
101
+ * Generate reflection from memories
102
+ * Main entry point for reflection generation
103
+ */
104
+ async reflect(prompt: string, memories: any[]): Promise<ReflectionResult> {
105
+ this.stats.totalRequests++;
106
+
107
+ if (!memories || memories.length === 0) {
108
+ return this._fallback("No memories provided");
109
+ }
110
+
111
+ const systemPrompt = `You are a reflective AI agent. Review the provided memories and synthesize a high-level insight, belief, or observation.
112
+ Respond ONLY in JSON format with exactly these keys:
113
+ {
114
+ "reflection": "a concise insight or observation derived from the memories",
115
+ "confidence": 0.0 to 1.0
116
+ }
117
+
118
+ Keep the reflection brief (1-2 sentences) and actionable.`;
119
+
120
+ const userContent = this._formatMemoriesForLLM(prompt, memories);
121
+
122
+ try {
123
+ const response = await this._callWithRetry(systemPrompt, userContent);
124
+ const parsed = JSON.parse(response);
125
+
126
+ // Validate response structure
127
+ if (!parsed.reflection || typeof parsed.confidence !== "number") {
128
+ throw new Error("Invalid LLM response format");
129
+ }
130
+
131
+ // Clamp confidence to valid range
132
+ parsed.confidence = Math.max(0, Math.min(1, parsed.confidence));
133
+
134
+ this.stats.successfulRequests++;
135
+ return parsed as ReflectionResult;
136
+ } catch (error) {
137
+ this.stats.failedRequests++;
138
+ const errorMessage =
139
+ error instanceof Error ? error.message : String(error);
140
+ logger.debug({ err: error, errorMessage }, "LLM call failed");
141
+ return this._fallback("LLM error", memories);
142
+ }
143
+ }
144
+
145
+ /**
146
+ * Format memories for LLM consumption
147
+ * @private
148
+ */
149
+ _formatMemoriesForLLM(prompt: string, memories: any[]): string {
150
+ const memoryList = memories
151
+ .map((m, i) => `${i + 1}. ${m.content}`)
152
+ .join("\n");
153
+
154
+ return `Prompt: ${prompt}\n\nMemories:\n${memoryList}\n\nBased on these memories, provide a brief reflective insight.`;
155
+ }
156
+
157
+ /**
158
+ * Call LLM with retry logic
159
+ * @private
160
+ */
161
+ async _callWithRetry(
162
+ systemPrompt: string,
163
+ userContent: string,
164
+ ): Promise<string> {
165
+ let lastError: any = null;
166
+
167
+ for (let attempt = 1; attempt <= this.maxRetries; attempt++) {
168
+ try {
169
+ return await this._callLLM(systemPrompt, userContent);
170
+ } catch (error) {
171
+ lastError = error;
172
+ if (attempt < this.maxRetries) {
173
+ const delay = Math.pow(2, attempt) * 1000; // Exponential backoff
174
+ await this._sleep(delay);
175
+ }
176
+ }
177
+ }
178
+
179
+ throw lastError;
180
+ }
181
+
182
+ /**
183
+ * Call LLM based on provider
184
+ * @private
185
+ */
186
+ async _callLLM(systemPrompt: string, userContent: string): Promise<string> {
187
+ switch (this.provider) {
188
+ case "openai":
189
+ return this._callOpenAI(systemPrompt, userContent);
190
+ case "anthropic":
191
+ return this._callAnthropic(systemPrompt, userContent);
192
+ case "ollama":
193
+ return this._callOllama(systemPrompt, userContent);
194
+ default:
195
+ throw new Error(`Unsupported provider: ${this.provider}`);
196
+ }
197
+ }
198
+
199
+ /**
200
+ * Call OpenAI API
201
+ * @private
202
+ */
203
+ async _callOpenAI(
204
+ systemPrompt: string,
205
+ userContent: string,
206
+ ): Promise<string> {
207
+ if (!this.apiKey) {
208
+ throw new Error("OpenAI API key not configured");
209
+ }
210
+
211
+ const controller = new AbortController();
212
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
213
+
214
+ try {
215
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
216
+ method: "POST",
217
+ headers: {
218
+ "Content-Type": "application/json",
219
+ Authorization: `Bearer ${this.apiKey}`,
220
+ },
221
+ body: JSON.stringify({
222
+ model: this.model,
223
+ messages: [
224
+ { role: "system", content: systemPrompt },
225
+ { role: "user", content: userContent },
226
+ ],
227
+ temperature: 0.7,
228
+ max_tokens: this.maxTokens,
229
+ }),
230
+ signal: controller.signal,
231
+ });
232
+
233
+ clearTimeout(timeoutId);
234
+
235
+ if (!response.ok) {
236
+ const error = await response.text();
237
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
238
+ }
239
+
240
+ const data = await response.json();
241
+ return data.choices[0].message.content;
242
+ } catch (error) {
243
+ clearTimeout(timeoutId);
244
+ if (error instanceof Error && error.name === "AbortError") {
245
+ throw new Error("Request timeout");
246
+ }
247
+ throw error;
248
+ }
249
+ }
250
+
251
+ /**
252
+ * Call Anthropic (Claude) API
253
+ * @private
254
+ */
255
+ async _callAnthropic(
256
+ systemPrompt: string,
257
+ userContent: string,
258
+ ): Promise<string> {
259
+ if (!this.apiKey) {
260
+ throw new Error("Anthropic API key not configured");
261
+ }
262
+
263
+ const controller = new AbortController();
264
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
265
+
266
+ try {
267
+ const response = await fetch(`${this.baseUrl}/messages`, {
268
+ method: "POST",
269
+ headers: {
270
+ "Content-Type": "application/json",
271
+ "x-api-key": this.apiKey,
272
+ "anthropic-version": "2023-06-01",
273
+ },
274
+ body: JSON.stringify({
275
+ model: this.model,
276
+ max_tokens: this.maxTokens,
277
+ system: systemPrompt,
278
+ messages: [{ role: "user", content: userContent }],
279
+ }),
280
+ signal: controller.signal,
281
+ });
282
+
283
+ clearTimeout(timeoutId);
284
+
285
+ if (!response.ok) {
286
+ const error = await response.text();
287
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
288
+ }
289
+
290
+ const data = await response.json();
291
+ return data.content[0].text;
292
+ } catch (error) {
293
+ clearTimeout(timeoutId);
294
+ if (error instanceof Error && error.name === "AbortError") {
295
+ throw new Error("Request timeout");
296
+ }
297
+ throw error;
298
+ }
299
+ }
300
+
301
+ /**
302
+ * Call Ollama (local) API
303
+ * @private
304
+ */
305
+ async _callOllama(
306
+ systemPrompt: string,
307
+ userContent: string,
308
+ ): Promise<string> {
309
+ const controller = new AbortController();
310
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
311
+
312
+ try {
313
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
314
+ method: "POST",
315
+ headers: {
316
+ "Content-Type": "application/json",
317
+ },
318
+ body: JSON.stringify({
319
+ model: this.model,
320
+ messages: [
321
+ { role: "system", content: systemPrompt },
322
+ { role: "user", content: userContent },
323
+ ],
324
+ stream: false,
325
+ options: {
326
+ num_predict: this.maxTokens,
327
+ },
328
+ }),
329
+ signal: controller.signal,
330
+ });
331
+
332
+ clearTimeout(timeoutId);
333
+
334
+ if (!response.ok) {
335
+ const error = await response.text();
336
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
337
+ }
338
+
339
+ const data = await response.json();
340
+ return data.message.content;
341
+ } catch (error) {
342
+ clearTimeout(timeoutId);
343
+ if (error instanceof Error && error.name === "AbortError") {
344
+ throw new Error("Request timeout");
345
+ }
346
+ throw error;
347
+ }
348
+ }
349
+
350
+ /**
351
+ * Fallback when LLM fails
352
+ * @private
353
+ */
354
+ _fallback(reason: string, memories: any[] = []): ReflectionResult {
355
+ this.stats.fallbackCount++;
356
+
357
+ if (memories && memories.length > 0) {
358
+ // Simple aggregation fallback
359
+ const contents = memories.map((m) => m.content);
360
+ const combined = contents.join("; ");
361
+ const preview =
362
+ combined.length > 200 ? `${combined.substring(0, 200)}...` : combined;
363
+
364
+ return {
365
+ reflection: `Aggregated from ${memories.length} memories: ${preview}`,
366
+ confidence: 0.5,
367
+ };
368
+ }
369
+
370
+ return {
371
+ reflection: `Reflection generation unavailable: ${reason}`,
372
+ confidence: 0.3,
373
+ };
374
+ }
375
+
376
+ /**
377
+ * Sleep utility
378
+ * @private
379
+ */
380
+ _sleep(ms: number): Promise<void> {
381
+ return new Promise((resolve) => setTimeout(resolve, ms));
382
+ }
383
+
384
+ /**
385
+ * Get client statistics
386
+ * @returns {Object} Statistics
387
+ */
388
+ getStats(): any {
389
+ return {
390
+ ...this.stats,
391
+ successRate:
392
+ this.stats.totalRequests > 0
393
+ ? (this.stats.successfulRequests / this.stats.totalRequests).toFixed(
394
+ 2,
395
+ )
396
+ : "0.00",
397
+ };
398
+ }
399
+
400
+ /**
401
+ * Reset statistics
402
+ */
403
+ resetStats(): void {
404
+ this.stats = {
405
+ totalRequests: 0,
406
+ successfulRequests: 0,
407
+ failedRequests: 0,
408
+ fallbackCount: 0,
409
+ };
410
+ }
411
+ }
412
+
413
+ export default LLMClient;
@@ -0,0 +1,17 @@
1
+ /**
2
+ * YAMO LLM Module
3
+ * Large Language Model client abstraction
4
+ */
5
+ export { LLMClient } from "./client.js";
6
+ /**
7
+ * Self-RefiningExtractor is now implemented as a YAMO skill.
8
+ * Use: skill-self-refining-extractor.yamo
9
+ *
10
+ * Example:
11
+ * _kernel_execute({
12
+ * skill: 'skill-self-refining-extractor.yamo',
13
+ * skill_path: 'skills/skill-super.yamo',
14
+ * max_iterations: 5
15
+ * })
16
+ */
17
+ export declare const SELF_REFINING_EXTRACTOR = "skill-self-refining-extractor.yamo";
package/lib/llm/index.js CHANGED
@@ -1,10 +1,17 @@
1
1
  /**
2
- * LLM Module - LLM client support for yamo-memory-mesh
3
- * Exports multi-provider LLM client for reflection generation
2
+ * YAMO LLM Module
3
+ * Large Language Model client abstraction
4
4
  */
5
-
6
- export { LLMClient } from './client.js';
7
-
8
- export default {
9
- LLMClient: (await import('./client.js')).LLMClient
10
- };
5
+ export { LLMClient } from "./client.js";
6
+ /**
7
+ * Self-RefiningExtractor is now implemented as a YAMO skill.
8
+ * Use: skill-self-refining-extractor.yamo
9
+ *
10
+ * Example:
11
+ * _kernel_execute({
12
+ * skill: 'skill-self-refining-extractor.yamo',
13
+ * skill_path: 'skills/skill-super.yamo',
14
+ * max_iterations: 5
15
+ * })
16
+ */
17
+ export const SELF_REFINING_EXTRACTOR = "skill-self-refining-extractor.yamo";
@@ -0,0 +1,19 @@
1
+ /**
2
+ * YAMO LLM Module
3
+ * Large Language Model client abstraction
4
+ */
5
+
6
+ export { LLMClient } from "./client.js";
7
+
8
+ /**
9
+ * Self-RefiningExtractor is now implemented as a YAMO skill.
10
+ * Use: skill-self-refining-extractor.yamo
11
+ *
12
+ * Example:
13
+ * _kernel_execute({
14
+ * skill: 'skill-self-refining-extractor.yamo',
15
+ * skill_path: 'skills/skill-super.yamo',
16
+ * max_iterations: 5
17
+ * })
18
+ */
19
+ export const SELF_REFINING_EXTRACTOR = "skill-self-refining-extractor.yamo";
@@ -0,0 +1,183 @@
1
+ /**
2
+ * LanceDB Client Wrapper
3
+ *
4
+ * A comprehensive wrapper around LanceDB JavaScript SDK providing:
5
+ * - Connection management with pooling and retries
6
+ * - CRUD operations for memory entries
7
+ * - Vector similarity search with filtering
8
+ * - Database statistics and monitoring
9
+ *
10
+ * @class LanceDBClient
11
+ */
12
+ import * as lancedb from "@lancedb/lancedb";
13
+ /**
14
+ * LanceDB driver interface for dependency injection/testing
15
+ */
16
+ export interface LanceDBDriver {
17
+ connect(uri: string): Promise<lancedb.Connection>;
18
+ }
19
+ export interface ClientConfig {
20
+ uri?: string;
21
+ tableName?: string;
22
+ maxRetries?: number;
23
+ retryDelay?: number;
24
+ vectorDimension?: number;
25
+ driver?: LanceDBDriver;
26
+ }
27
+ export interface MemoryEntry {
28
+ id: string;
29
+ vector: number[];
30
+ content: string;
31
+ metadata?: string | Record<string, any> | null;
32
+ created_at?: Date | string;
33
+ updated_at?: Date | string;
34
+ }
35
+ export interface SearchResult extends MemoryEntry {
36
+ score?: number;
37
+ }
38
+ export interface SearchOptions {
39
+ limit?: number;
40
+ metric?: string;
41
+ nprobes?: number;
42
+ filter?: string | null;
43
+ }
44
+ export interface Stats {
45
+ tableName: string;
46
+ uri: string;
47
+ count: number;
48
+ isConnected: boolean;
49
+ }
50
+ /**
51
+ * LanceDB Client wrapper class
52
+ */
53
+ export declare class LanceDBClient {
54
+ uri: string;
55
+ tableName: string;
56
+ maxRetries: number;
57
+ retryDelay: number;
58
+ vectorDimension: number;
59
+ driver: LanceDBDriver;
60
+ db: lancedb.Connection | null;
61
+ table: lancedb.Table | null;
62
+ isConnected: boolean;
63
+ private tempDir?;
64
+ /**
65
+ * Create a new LanceDBClient instance
66
+ * @param {Object} [config={}] - Configuration object
67
+ */
68
+ constructor(config?: ClientConfig);
69
+ /**
70
+ * Connect to LanceDB and initialize table
71
+ * Creates the database directory and table if they don't exist
72
+ * @returns {Promise<void>}
73
+ * @throws {StorageError} If connection fails after retries
74
+ */
75
+ connect(): Promise<void>;
76
+ /**
77
+ * Disconnect from LanceDB
78
+ * @returns {Promise<void>}
79
+ */
80
+ disconnect(): void;
81
+ /**
82
+ * Add a single memory entry
83
+ * @param {Object} data - Entry data
84
+ * @returns {Promise<Object>} Result with id and success status
85
+ * @throws {StorageError} If add operation fails
86
+ */
87
+ add(data: MemoryEntry): Promise<{
88
+ id: string;
89
+ success: boolean;
90
+ }>;
91
+ /**
92
+ * Add multiple memory entries in batch
93
+ * @param {Array<Object>} records - Array of entry data objects
94
+ * @returns {Promise<Object>} Result with count of added records
95
+ * @throws {StorageError} If batch add fails
96
+ */
97
+ addBatch(records: MemoryEntry[]): Promise<{
98
+ count: number;
99
+ success: boolean;
100
+ }>;
101
+ /**
102
+ * Search for similar vectors
103
+ * @param {Array<number>} vector - Query vector (384 dimensions)
104
+ * @param {Object} options - Search options
105
+ * @returns {Promise<Array<Object>>} Array of search results with scores
106
+ * @throws {QueryError} If search fails
107
+ */
108
+ search(vector: number[], options?: SearchOptions): Promise<SearchResult[]>;
109
+ /**
110
+ * Get a record by ID
111
+ * @param {string} id - Record ID
112
+ * @returns {Promise<Object|null>} Record object or null if not found
113
+ * @throws {QueryError} If query fails
114
+ */
115
+ getById(id: string): Promise<MemoryEntry | null>;
116
+ /**
117
+ * Get all records from the database
118
+ * @param {Object} options - Options
119
+ * @returns {Promise<Array<Object>>} Array of all records
120
+ */
121
+ getAll(options?: {
122
+ limit?: number;
123
+ }): Promise<MemoryEntry[]>;
124
+ /**
125
+ * Delete a record by ID
126
+ * @param {string} id - Record ID to delete
127
+ * @returns {Promise<Object>} Result with success status
128
+ * @throws {StorageError} If delete fails
129
+ */
130
+ delete(id: string): Promise<{
131
+ id: string;
132
+ success: boolean;
133
+ }>;
134
+ /**
135
+ * Update an existing record
136
+ * @param {string} id - Record ID to update
137
+ * @param {Object} data - Updated data fields
138
+ * @returns {Promise<Object>} Result with success status
139
+ * @throws {StorageError} If update fails
140
+ */
141
+ update(id: string, data: Partial<MemoryEntry>): Promise<{
142
+ id: string;
143
+ success: boolean;
144
+ }>;
145
+ /**
146
+ * Get database statistics
147
+ * @returns {Promise<Object>} Statistics including count, size, etc.
148
+ * @throws {QueryError} If stats query fails
149
+ */
150
+ getStats(): Promise<Stats>;
151
+ /**
152
+ * Sanitize an ID to prevent SQL injection
153
+ * Removes any characters that aren't alphanumeric, underscore, or hyphen
154
+ * @private
155
+ */
156
+ _sanitizeId(id: string): string;
157
+ /**
158
+ * Validate a record object
159
+ * @private
160
+ */
161
+ _validateRecord(record: any): void;
162
+ /**
163
+ * Validate a vector array
164
+ * @private
165
+ */
166
+ _validateVector(vector: any): void;
167
+ /**
168
+ * Sleep for a specified duration
169
+ * @private
170
+ */
171
+ _sleep(ms: number): Promise<void>;
172
+ /**
173
+ * Check if an error is retryable (transient network/connection issues)
174
+ * @private
175
+ */
176
+ _isRetryableError(error: any): boolean;
177
+ /**
178
+ * Retry an operation with exponential backoff
179
+ * @private
180
+ */
181
+ _retryOperation<T>(operation: () => Promise<T>, maxRetries?: number, baseDelay?: number): Promise<T>;
182
+ }
183
+ export default LanceDBClient;