@yamo/memory-mesh 2.3.2 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. package/README.md +8 -2
  2. package/bin/memory_mesh.js +1 -1
  3. package/lib/llm/client.d.ts +86 -0
  4. package/lib/llm/client.js +300 -357
  5. package/lib/llm/client.ts +334 -0
  6. package/lib/llm/index.d.ts +17 -0
  7. package/lib/llm/index.js +16 -8
  8. package/lib/llm/index.ts +18 -0
  9. package/lib/memory/adapters/client.d.ts +120 -0
  10. package/lib/memory/adapters/client.js +519 -0
  11. package/lib/memory/adapters/client.ts +519 -0
  12. package/lib/memory/adapters/config.d.ts +130 -0
  13. package/lib/memory/adapters/config.js +190 -0
  14. package/lib/memory/adapters/config.ts +190 -0
  15. package/lib/memory/adapters/errors.d.ts +84 -0
  16. package/lib/memory/adapters/errors.js +129 -0
  17. package/lib/memory/adapters/errors.ts +129 -0
  18. package/lib/memory/context-manager.d.ts +41 -0
  19. package/lib/memory/context-manager.js +345 -0
  20. package/lib/memory/context-manager.ts +345 -0
  21. package/lib/memory/embeddings/factory.d.ts +57 -0
  22. package/lib/memory/embeddings/factory.js +149 -0
  23. package/lib/memory/embeddings/factory.ts +149 -0
  24. package/lib/memory/embeddings/index.d.ts +2 -0
  25. package/lib/memory/embeddings/index.js +3 -0
  26. package/lib/memory/embeddings/index.ts +3 -0
  27. package/lib/memory/embeddings/service.d.ts +134 -0
  28. package/lib/memory/embeddings/service.js +516 -0
  29. package/lib/memory/embeddings/service.ts +516 -0
  30. package/lib/memory/index.d.ts +9 -0
  31. package/lib/memory/index.js +10 -1
  32. package/lib/memory/index.ts +10 -0
  33. package/lib/memory/memory-mesh.d.ts +332 -0
  34. package/lib/memory/memory-mesh.js +1470 -678
  35. package/lib/memory/memory-mesh.ts +1517 -0
  36. package/lib/memory/memory-translator.d.ts +14 -0
  37. package/lib/memory/memory-translator.js +126 -0
  38. package/lib/memory/memory-translator.ts +126 -0
  39. package/lib/memory/schema.d.ts +130 -0
  40. package/lib/memory/schema.js +184 -0
  41. package/lib/memory/schema.ts +184 -0
  42. package/lib/memory/scorer.d.ts +25 -0
  43. package/lib/memory/scorer.js +78 -0
  44. package/lib/memory/scorer.ts +78 -0
  45. package/lib/memory/search/index.d.ts +1 -0
  46. package/lib/memory/search/index.js +2 -0
  47. package/lib/memory/search/index.ts +2 -0
  48. package/lib/memory/search/keyword-search.d.ts +46 -0
  49. package/lib/memory/search/keyword-search.js +136 -0
  50. package/lib/memory/search/keyword-search.ts +136 -0
  51. package/lib/scrubber/config/defaults.d.ts +46 -0
  52. package/lib/scrubber/config/defaults.js +50 -57
  53. package/lib/scrubber/config/defaults.ts +55 -0
  54. package/lib/scrubber/errors/scrubber-error.d.ts +22 -0
  55. package/lib/scrubber/errors/scrubber-error.js +28 -32
  56. package/lib/scrubber/errors/scrubber-error.ts +44 -0
  57. package/lib/scrubber/index.d.ts +5 -0
  58. package/lib/scrubber/index.js +4 -23
  59. package/lib/scrubber/index.ts +6 -0
  60. package/lib/scrubber/scrubber.d.ts +44 -0
  61. package/lib/scrubber/scrubber.js +100 -121
  62. package/lib/scrubber/scrubber.ts +109 -0
  63. package/lib/scrubber/stages/chunker.d.ts +25 -0
  64. package/lib/scrubber/stages/chunker.js +74 -91
  65. package/lib/scrubber/stages/chunker.ts +104 -0
  66. package/lib/scrubber/stages/metadata-annotator.d.ts +17 -0
  67. package/lib/scrubber/stages/metadata-annotator.js +55 -65
  68. package/lib/scrubber/stages/metadata-annotator.ts +75 -0
  69. package/lib/scrubber/stages/normalizer.d.ts +16 -0
  70. package/lib/scrubber/stages/normalizer.js +42 -50
  71. package/lib/scrubber/stages/normalizer.ts +60 -0
  72. package/lib/scrubber/stages/semantic-filter.d.ts +16 -0
  73. package/lib/scrubber/stages/semantic-filter.js +42 -52
  74. package/lib/scrubber/stages/semantic-filter.ts +62 -0
  75. package/lib/scrubber/stages/structural-cleaner.d.ts +18 -0
  76. package/lib/scrubber/stages/structural-cleaner.js +66 -75
  77. package/lib/scrubber/stages/structural-cleaner.ts +83 -0
  78. package/lib/scrubber/stages/validator.d.ts +17 -0
  79. package/lib/scrubber/stages/validator.js +46 -56
  80. package/lib/scrubber/stages/validator.ts +67 -0
  81. package/lib/scrubber/telemetry.d.ts +29 -0
  82. package/lib/scrubber/telemetry.js +54 -58
  83. package/lib/scrubber/telemetry.ts +62 -0
  84. package/lib/scrubber/utils/hash.d.ts +14 -0
  85. package/lib/scrubber/utils/hash.js +30 -32
  86. package/lib/scrubber/utils/hash.ts +40 -0
  87. package/lib/scrubber/utils/html-parser.d.ts +14 -0
  88. package/lib/scrubber/utils/html-parser.js +32 -39
  89. package/lib/scrubber/utils/html-parser.ts +46 -0
  90. package/lib/scrubber/utils/pattern-matcher.d.ts +12 -0
  91. package/lib/scrubber/utils/pattern-matcher.js +48 -57
  92. package/lib/scrubber/utils/pattern-matcher.ts +64 -0
  93. package/lib/scrubber/utils/token-counter.d.ts +18 -0
  94. package/lib/scrubber/utils/token-counter.js +24 -25
  95. package/lib/scrubber/utils/token-counter.ts +32 -0
  96. package/lib/utils/logger.d.ts +19 -0
  97. package/lib/utils/logger.js +65 -0
  98. package/lib/utils/logger.ts +65 -0
  99. package/lib/utils/skill-metadata.d.ts +24 -0
  100. package/lib/utils/skill-metadata.js +133 -0
  101. package/lib/utils/skill-metadata.ts +133 -0
  102. package/lib/yamo/emitter.d.ts +46 -0
  103. package/lib/yamo/emitter.js +79 -143
  104. package/lib/yamo/emitter.ts +171 -0
  105. package/lib/yamo/index.d.ts +14 -0
  106. package/lib/yamo/index.js +6 -7
  107. package/lib/yamo/index.ts +16 -0
  108. package/lib/yamo/schema.d.ts +56 -0
  109. package/lib/yamo/schema.js +82 -108
  110. package/lib/yamo/schema.ts +133 -0
  111. package/package.json +13 -8
  112. package/index.d.ts +0 -111
  113. package/lib/embeddings/factory.js +0 -151
  114. package/lib/embeddings/index.js +0 -2
  115. package/lib/embeddings/service.js +0 -586
  116. package/lib/index.js +0 -6
  117. package/lib/lancedb/client.js +0 -633
  118. package/lib/lancedb/config.js +0 -215
  119. package/lib/lancedb/errors.js +0 -144
  120. package/lib/lancedb/index.js +0 -4
  121. package/lib/lancedb/schema.js +0 -217
  122. package/lib/search/index.js +0 -1
  123. package/lib/search/keyword-search.js +0 -144
  124. package/lib/utils/index.js +0 -1
@@ -0,0 +1,334 @@
1
+ // @ts-nocheck
2
+ /**
3
+ * LLM Client - Multi-provider LLM API client for reflection generation
4
+ *
5
+ * Supports:
6
+ * - OpenAI (GPT-4, GPT-4o-mini, etc.)
7
+ * - Anthropic (Claude)
8
+ * - Ollama (local models)
9
+ * - Graceful fallback when LLM unavailable
10
+ */
11
+ import { createLogger } from "../utils/logger.js";
12
+ const logger = createLogger("llm-client");
13
+ /**
14
+ * LLMClient provides unified interface for calling various LLM providers
15
+ * to generate reflections from memory contexts.
16
+ */
17
+ export class LLMClient {
18
+ provider;
19
+ apiKey;
20
+ model;
21
+ baseUrl;
22
+ timeout;
23
+ maxRetries;
24
+ maxTokens;
25
+ stats;
26
+ /**
27
+ * Create a new LLMClient instance
28
+ */
29
+ constructor(config = {}) {
30
+ this.provider = config.provider || process.env.LLM_PROVIDER || "openai";
31
+ this.apiKey = config.apiKey || process.env.LLM_API_KEY || "";
32
+ this.model =
33
+ config.model || process.env.LLM_MODEL || this._getDefaultModel();
34
+ this.baseUrl =
35
+ config.baseUrl || process.env.LLM_BASE_URL || this._getDefaultBaseUrl();
36
+ this.maxTokens = config.maxTokens || 2000;
37
+ this.timeout = config.timeout || (this.maxTokens >= 4000 ? 300000 : 60000);
38
+ this.maxRetries = config.maxRetries || 2;
39
+ // Statistics
40
+ this.stats = {
41
+ totalRequests: 0,
42
+ successfulRequests: 0,
43
+ failedRequests: 0,
44
+ fallbackCount: 0,
45
+ };
46
+ }
47
+ /**
48
+ * Get default model for provider
49
+ * @private
50
+ */
51
+ _getDefaultModel() {
52
+ const defaults = {
53
+ openai: "gpt-4o-mini",
54
+ anthropic: "claude-3-5-haiku-20241022",
55
+ ollama: "llama3.2",
56
+ };
57
+ return defaults[this.provider] || "gpt-4o-mini";
58
+ }
59
+ /**
60
+ * Get default base URL for provider
61
+ * @private
62
+ */
63
+ _getDefaultBaseUrl() {
64
+ const defaults = {
65
+ openai: "https://api.openai.com/v1",
66
+ anthropic: "https://api.anthropic.com/v1",
67
+ ollama: "http://localhost:11434",
68
+ };
69
+ return defaults[this.provider] || "https://api.openai.com/v1";
70
+ }
71
+ /**
72
+ * Generate reflection from memories
73
+ * Main entry point for reflection generation
74
+ */
75
+ async reflect(prompt, memories) {
76
+ this.stats.totalRequests++;
77
+ if (!memories || memories.length === 0) {
78
+ return this._fallback("No memories provided");
79
+ }
80
+ const systemPrompt = `You are a reflective AI agent. Review the provided memories and synthesize a high-level insight, belief, or observation.
81
+ Respond ONLY in JSON format with exactly these keys:
82
+ {
83
+ "reflection": "a concise insight or observation derived from the memories",
84
+ "confidence": 0.0 to 1.0
85
+ }
86
+
87
+ Keep the reflection brief (1-2 sentences) and actionable.`;
88
+ const userContent = this._formatMemoriesForLLM(prompt, memories);
89
+ try {
90
+ const response = await this._callWithRetry(systemPrompt, userContent);
91
+ const parsed = JSON.parse(response);
92
+ // Validate response structure
93
+ if (!parsed.reflection || typeof parsed.confidence !== "number") {
94
+ throw new Error("Invalid LLM response format");
95
+ }
96
+ // Clamp confidence to valid range
97
+ parsed.confidence = Math.max(0, Math.min(1, parsed.confidence));
98
+ this.stats.successfulRequests++;
99
+ return parsed;
100
+ }
101
+ catch (error) {
102
+ this.stats.failedRequests++;
103
+ const errorMessage = error instanceof Error ? error.message : String(error);
104
+ logger.debug({ err: error, errorMessage }, "LLM call failed");
105
+ return this._fallback("LLM error", memories);
106
+ }
107
+ }
108
+ /**
109
+ * Format memories for LLM consumption
110
+ * @private
111
+ */
112
+ _formatMemoriesForLLM(prompt, memories) {
113
+ const memoryList = memories
114
+ .map((m, i) => `${i + 1}. ${m.content}`)
115
+ .join("\n");
116
+ return `Prompt: ${prompt}\n\nMemories:\n${memoryList}\n\nBased on these memories, provide a brief reflective insight.`;
117
+ }
118
+ /**
119
+ * Call LLM with retry logic
120
+ * @private
121
+ */
122
+ async _callWithRetry(systemPrompt, userContent) {
123
+ let lastError = null;
124
+ for (let attempt = 1; attempt <= this.maxRetries; attempt++) {
125
+ try {
126
+ return await this._callLLM(systemPrompt, userContent);
127
+ }
128
+ catch (error) {
129
+ lastError = error;
130
+ if (attempt < this.maxRetries) {
131
+ const delay = Math.pow(2, attempt) * 1000; // Exponential backoff
132
+ await this._sleep(delay);
133
+ }
134
+ }
135
+ }
136
+ throw lastError;
137
+ }
138
+ /**
139
+ * Call LLM based on provider
140
+ * @private
141
+ */
142
+ async _callLLM(systemPrompt, userContent) {
143
+ switch (this.provider) {
144
+ case "openai":
145
+ return this._callOpenAI(systemPrompt, userContent);
146
+ case "anthropic":
147
+ return this._callAnthropic(systemPrompt, userContent);
148
+ case "ollama":
149
+ return this._callOllama(systemPrompt, userContent);
150
+ default:
151
+ throw new Error(`Unsupported provider: ${this.provider}`);
152
+ }
153
+ }
154
+ /**
155
+ * Call OpenAI API
156
+ * @private
157
+ */
158
+ async _callOpenAI(systemPrompt, userContent) {
159
+ if (!this.apiKey) {
160
+ throw new Error("OpenAI API key not configured");
161
+ }
162
+ const controller = new AbortController();
163
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
164
+ try {
165
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
166
+ method: "POST",
167
+ headers: {
168
+ "Content-Type": "application/json",
169
+ Authorization: `Bearer ${this.apiKey}`,
170
+ },
171
+ body: JSON.stringify({
172
+ model: this.model,
173
+ messages: [
174
+ { role: "system", content: systemPrompt },
175
+ { role: "user", content: userContent },
176
+ ],
177
+ temperature: 0.7,
178
+ max_tokens: this.maxTokens,
179
+ }),
180
+ signal: controller.signal,
181
+ });
182
+ clearTimeout(timeoutId);
183
+ if (!response.ok) {
184
+ const error = await response.text();
185
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
186
+ }
187
+ const data = await response.json();
188
+ return data.choices[0].message.content;
189
+ }
190
+ catch (error) {
191
+ clearTimeout(timeoutId);
192
+ if (error instanceof Error && error.name === "AbortError") {
193
+ throw new Error("Request timeout");
194
+ }
195
+ throw error;
196
+ }
197
+ }
198
+ /**
199
+ * Call Anthropic (Claude) API
200
+ * @private
201
+ */
202
+ async _callAnthropic(systemPrompt, userContent) {
203
+ if (!this.apiKey) {
204
+ throw new Error("Anthropic API key not configured");
205
+ }
206
+ const controller = new AbortController();
207
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
208
+ try {
209
+ const response = await fetch(`${this.baseUrl}/messages`, {
210
+ method: "POST",
211
+ headers: {
212
+ "Content-Type": "application/json",
213
+ "x-api-key": this.apiKey,
214
+ "anthropic-version": "2023-06-01",
215
+ },
216
+ body: JSON.stringify({
217
+ model: this.model,
218
+ max_tokens: this.maxTokens,
219
+ system: systemPrompt,
220
+ messages: [{ role: "user", content: userContent }],
221
+ }),
222
+ signal: controller.signal,
223
+ });
224
+ clearTimeout(timeoutId);
225
+ if (!response.ok) {
226
+ const error = await response.text();
227
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
228
+ }
229
+ const data = await response.json();
230
+ return data.content[0].text;
231
+ }
232
+ catch (error) {
233
+ clearTimeout(timeoutId);
234
+ if (error instanceof Error && error.name === "AbortError") {
235
+ throw new Error("Request timeout");
236
+ }
237
+ throw error;
238
+ }
239
+ }
240
+ /**
241
+ * Call Ollama (local) API
242
+ * @private
243
+ */
244
+ async _callOllama(systemPrompt, userContent) {
245
+ const controller = new AbortController();
246
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
247
+ try {
248
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
249
+ method: "POST",
250
+ headers: {
251
+ "Content-Type": "application/json",
252
+ },
253
+ body: JSON.stringify({
254
+ model: this.model,
255
+ messages: [
256
+ { role: "system", content: systemPrompt },
257
+ { role: "user", content: userContent },
258
+ ],
259
+ stream: false,
260
+ options: {
261
+ num_predict: this.maxTokens,
262
+ },
263
+ }),
264
+ signal: controller.signal,
265
+ });
266
+ clearTimeout(timeoutId);
267
+ if (!response.ok) {
268
+ const error = await response.text();
269
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
270
+ }
271
+ const data = await response.json();
272
+ return data.message.content;
273
+ }
274
+ catch (error) {
275
+ clearTimeout(timeoutId);
276
+ if (error instanceof Error && error.name === "AbortError") {
277
+ throw new Error("Request timeout");
278
+ }
279
+ throw error;
280
+ }
281
+ }
282
+ /**
283
+ * Fallback when LLM fails
284
+ * @private
285
+ */
286
+ _fallback(reason, memories = []) {
287
+ this.stats.fallbackCount++;
288
+ if (memories && memories.length > 0) {
289
+ // Simple aggregation fallback
290
+ const contents = memories.map((m) => m.content);
291
+ const combined = contents.join("; ");
292
+ const preview = combined.length > 200 ? `${combined.substring(0, 200)}...` : combined;
293
+ return {
294
+ reflection: `Aggregated from ${memories.length} memories: ${preview}`,
295
+ confidence: 0.5,
296
+ };
297
+ }
298
+ return {
299
+ reflection: `Reflection generation unavailable: ${reason}`,
300
+ confidence: 0.3,
301
+ };
302
+ }
303
+ /**
304
+ * Sleep utility
305
+ * @private
306
+ */
307
+ _sleep(ms) {
308
+ return new Promise((resolve) => setTimeout(resolve, ms));
309
+ }
310
+ /**
311
+ * Get client statistics
312
+ * @returns {Object} Statistics
313
+ */
314
+ getStats() {
315
+ return {
316
+ ...this.stats,
317
+ successRate: this.stats.totalRequests > 0
318
+ ? (this.stats.successfulRequests / this.stats.totalRequests).toFixed(2)
319
+ : "0.00",
320
+ };
321
+ }
322
+ /**
323
+ * Reset statistics
324
+ */
325
+ resetStats() {
326
+ this.stats = {
327
+ totalRequests: 0,
328
+ successfulRequests: 0,
329
+ failedRequests: 0,
330
+ fallbackCount: 0,
331
+ };
332
+ }
333
+ }
334
+ export default LLMClient;
@@ -0,0 +1,17 @@
1
+ /**
2
+ * YAMO LLM Module
3
+ * Large Language Model client abstraction
4
+ */
5
+ export { LLMClient } from "./client.js";
6
+ /**
7
+ * Self-RefiningExtractor is now implemented as a YAMO skill.
8
+ * Use: skill-self-refining-extractor.yamo
9
+ *
10
+ * Example:
11
+ * _kernel_execute({
12
+ * skill: 'skill-self-refining-extractor.yamo',
13
+ * skill_path: 'skills/skill-super.yamo',
14
+ * max_iterations: 5
15
+ * })
16
+ */
17
+ export declare const SELF_REFINING_EXTRACTOR = "skill-self-refining-extractor.yamo";
package/lib/llm/index.js CHANGED
@@ -1,10 +1,18 @@
1
+ // @ts-nocheck
1
2
  /**
2
- * LLM Module - LLM client support for yamo-memory-mesh
3
- * Exports multi-provider LLM client for reflection generation
3
+ * YAMO LLM Module
4
+ * Large Language Model client abstraction
4
5
  */
5
-
6
- export { LLMClient } from './client.js';
7
-
8
- export default {
9
- LLMClient: (await import('./client.js')).LLMClient
10
- };
6
+ export { LLMClient } from "./client.js";
7
+ /**
8
+ * Self-RefiningExtractor is now implemented as a YAMO skill.
9
+ * Use: skill-self-refining-extractor.yamo
10
+ *
11
+ * Example:
12
+ * _kernel_execute({
13
+ * skill: 'skill-self-refining-extractor.yamo',
14
+ * skill_path: 'skills/skill-super.yamo',
15
+ * max_iterations: 5
16
+ * })
17
+ */
18
+ export const SELF_REFINING_EXTRACTOR = "skill-self-refining-extractor.yamo";
@@ -0,0 +1,18 @@
1
+ // @ts-nocheck
2
+ /**
3
+ * YAMO LLM Module
4
+ * Large Language Model client abstraction
5
+ */
6
+ export { LLMClient } from "./client.js";
7
+ /**
8
+ * Self-RefiningExtractor is now implemented as a YAMO skill.
9
+ * Use: skill-self-refining-extractor.yamo
10
+ *
11
+ * Example:
12
+ * _kernel_execute({
13
+ * skill: 'skill-self-refining-extractor.yamo',
14
+ * skill_path: 'skills/skill-super.yamo',
15
+ * max_iterations: 5
16
+ * })
17
+ */
18
+ export const SELF_REFINING_EXTRACTOR = "skill-self-refining-extractor.yamo";
@@ -0,0 +1,120 @@
1
+ /**
2
+ * LanceDB Client wrapper class
3
+ */
4
+ export declare class LanceDBClient {
5
+ uri: any;
6
+ tableName: any;
7
+ maxRetries: any;
8
+ retryDelay: any;
9
+ vectorDimension: any;
10
+ driver: any;
11
+ db: any;
12
+ table: any;
13
+ isConnected: any;
14
+ tempDir: any;
15
+ /**
16
+ * Create a new LanceDBClient instance
17
+ * @param {Object} [config={}] - Configuration object
18
+ */
19
+ constructor(config?: {});
20
+ /**
21
+ * Connect to LanceDB and initialize table
22
+ * Creates the database directory and table if they don't exist
23
+ * @returns {Promise<void>}
24
+ * @throws {StorageError} If connection fails after retries
25
+ */
26
+ connect(): Promise<void>;
27
+ /**
28
+ * Disconnect from LanceDB
29
+ * @returns {Promise<void>}
30
+ */
31
+ disconnect(): void;
32
+ /**
33
+ * Add a single memory entry
34
+ * @param {Object} data - Entry data
35
+ * @returns {Promise<Object>} Result with id and success status
36
+ * @throws {StorageError} If add operation fails
37
+ */
38
+ add(data: any): Promise<any>;
39
+ /**
40
+ * Add multiple memory entries in batch
41
+ * @param {Array<Object>} records - Array of entry data objects
42
+ * @returns {Promise<Object>} Result with count of added records
43
+ * @throws {StorageError} If batch add fails
44
+ */
45
+ addBatch(records: any): Promise<any>;
46
+ /**
47
+ * Search for similar vectors
48
+ * @param {Array<number>} vector - Query vector (384 dimensions)
49
+ * @param {Object} options - Search options
50
+ * @returns {Promise<Array<Object>>} Array of search results with scores
51
+ * @throws {QueryError} If search fails
52
+ */
53
+ search(vector: any, options?: {}): Promise<any>;
54
+ /**
55
+ * Get a record by ID
56
+ * @param {string} id - Record ID
57
+ * @returns {Promise<Object|null>} Record object or null if not found
58
+ * @throws {QueryError} If query fails
59
+ */
60
+ getById(id: any): Promise<any>;
61
+ /**
62
+ * Get all records from the database
63
+ * @param {Object} options - Options
64
+ * @returns {Promise<Array<Object>>} Array of all records
65
+ */
66
+ getAll(options?: {}): Promise<any>;
67
+ /**
68
+ * Delete a record by ID
69
+ * @param {string} id - Record ID to delete
70
+ * @returns {Promise<Object>} Result with success status
71
+ * @throws {StorageError} If delete fails
72
+ */
73
+ delete(id: any): Promise<any>;
74
+ /**
75
+ * Update an existing record
76
+ * @param {string} id - Record ID to update
77
+ * @param {Object} data - Updated data fields
78
+ * @returns {Promise<Object>} Result with success status
79
+ * @throws {StorageError} If update fails
80
+ */
81
+ update(id: any, data: any): Promise<any>;
82
+ /**
83
+ * Get database statistics
84
+ * @returns {Promise<Object>} Statistics including count, size, etc.
85
+ * @throws {QueryError} If stats query fails
86
+ */
87
+ getStats(): Promise<any>;
88
+ /**
89
+ * Sanitize an ID to prevent SQL injection
90
+ * Removes any characters that aren't alphanumeric, underscore, or hyphen
91
+ * @private
92
+ */
93
+ _sanitizeId(id: any): any;
94
+ /**
95
+ * Validate a record object
96
+ * @private
97
+ */
98
+ _validateRecord(record: any): void;
99
+ /**
100
+ * Validate a vector array
101
+ * @private
102
+ */
103
+ _validateVector(vector: any): void;
104
+ /**
105
+ * Sleep for a specified duration
106
+ * @private
107
+ */
108
+ _sleep(ms: any): Promise<unknown>;
109
+ /**
110
+ * Check if an error is retryable (transient network/connection issues)
111
+ * @private
112
+ */
113
+ _isRetryableError(error: any): boolean;
114
+ /**
115
+ * Retry an operation with exponential backoff
116
+ * @private
117
+ */
118
+ _retryOperation(operation: any, maxRetries: any, baseDelay: any): Promise<any>;
119
+ }
120
+ export default LanceDBClient;