@yamo/memory-mesh 2.3.2 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. package/README.md +8 -2
  2. package/bin/memory_mesh.js +1 -1
  3. package/lib/llm/client.d.ts +86 -0
  4. package/lib/llm/client.js +300 -357
  5. package/lib/llm/client.ts +334 -0
  6. package/lib/llm/index.d.ts +17 -0
  7. package/lib/llm/index.js +16 -8
  8. package/lib/llm/index.ts +18 -0
  9. package/lib/memory/adapters/client.d.ts +120 -0
  10. package/lib/memory/adapters/client.js +519 -0
  11. package/lib/memory/adapters/client.ts +519 -0
  12. package/lib/memory/adapters/config.d.ts +130 -0
  13. package/lib/memory/adapters/config.js +190 -0
  14. package/lib/memory/adapters/config.ts +190 -0
  15. package/lib/memory/adapters/errors.d.ts +84 -0
  16. package/lib/memory/adapters/errors.js +129 -0
  17. package/lib/memory/adapters/errors.ts +129 -0
  18. package/lib/memory/context-manager.d.ts +41 -0
  19. package/lib/memory/context-manager.js +345 -0
  20. package/lib/memory/context-manager.ts +345 -0
  21. package/lib/memory/embeddings/factory.d.ts +57 -0
  22. package/lib/memory/embeddings/factory.js +149 -0
  23. package/lib/memory/embeddings/factory.ts +149 -0
  24. package/lib/memory/embeddings/index.d.ts +2 -0
  25. package/lib/memory/embeddings/index.js +3 -0
  26. package/lib/memory/embeddings/index.ts +3 -0
  27. package/lib/memory/embeddings/service.d.ts +134 -0
  28. package/lib/memory/embeddings/service.js +516 -0
  29. package/lib/memory/embeddings/service.ts +516 -0
  30. package/lib/memory/index.d.ts +9 -0
  31. package/lib/memory/index.js +10 -1
  32. package/lib/memory/index.ts +10 -0
  33. package/lib/memory/memory-mesh.d.ts +332 -0
  34. package/lib/memory/memory-mesh.js +1470 -678
  35. package/lib/memory/memory-mesh.ts +1517 -0
  36. package/lib/memory/memory-translator.d.ts +14 -0
  37. package/lib/memory/memory-translator.js +126 -0
  38. package/lib/memory/memory-translator.ts +126 -0
  39. package/lib/memory/schema.d.ts +130 -0
  40. package/lib/memory/schema.js +184 -0
  41. package/lib/memory/schema.ts +184 -0
  42. package/lib/memory/scorer.d.ts +25 -0
  43. package/lib/memory/scorer.js +78 -0
  44. package/lib/memory/scorer.ts +78 -0
  45. package/lib/memory/search/index.d.ts +1 -0
  46. package/lib/memory/search/index.js +2 -0
  47. package/lib/memory/search/index.ts +2 -0
  48. package/lib/memory/search/keyword-search.d.ts +46 -0
  49. package/lib/memory/search/keyword-search.js +136 -0
  50. package/lib/memory/search/keyword-search.ts +136 -0
  51. package/lib/scrubber/config/defaults.d.ts +46 -0
  52. package/lib/scrubber/config/defaults.js +50 -57
  53. package/lib/scrubber/config/defaults.ts +55 -0
  54. package/lib/scrubber/errors/scrubber-error.d.ts +22 -0
  55. package/lib/scrubber/errors/scrubber-error.js +28 -32
  56. package/lib/scrubber/errors/scrubber-error.ts +44 -0
  57. package/lib/scrubber/index.d.ts +5 -0
  58. package/lib/scrubber/index.js +4 -23
  59. package/lib/scrubber/index.ts +6 -0
  60. package/lib/scrubber/scrubber.d.ts +44 -0
  61. package/lib/scrubber/scrubber.js +100 -121
  62. package/lib/scrubber/scrubber.ts +109 -0
  63. package/lib/scrubber/stages/chunker.d.ts +25 -0
  64. package/lib/scrubber/stages/chunker.js +74 -91
  65. package/lib/scrubber/stages/chunker.ts +104 -0
  66. package/lib/scrubber/stages/metadata-annotator.d.ts +17 -0
  67. package/lib/scrubber/stages/metadata-annotator.js +55 -65
  68. package/lib/scrubber/stages/metadata-annotator.ts +75 -0
  69. package/lib/scrubber/stages/normalizer.d.ts +16 -0
  70. package/lib/scrubber/stages/normalizer.js +42 -50
  71. package/lib/scrubber/stages/normalizer.ts +60 -0
  72. package/lib/scrubber/stages/semantic-filter.d.ts +16 -0
  73. package/lib/scrubber/stages/semantic-filter.js +42 -52
  74. package/lib/scrubber/stages/semantic-filter.ts +62 -0
  75. package/lib/scrubber/stages/structural-cleaner.d.ts +18 -0
  76. package/lib/scrubber/stages/structural-cleaner.js +66 -75
  77. package/lib/scrubber/stages/structural-cleaner.ts +83 -0
  78. package/lib/scrubber/stages/validator.d.ts +17 -0
  79. package/lib/scrubber/stages/validator.js +46 -56
  80. package/lib/scrubber/stages/validator.ts +67 -0
  81. package/lib/scrubber/telemetry.d.ts +29 -0
  82. package/lib/scrubber/telemetry.js +54 -58
  83. package/lib/scrubber/telemetry.ts +62 -0
  84. package/lib/scrubber/utils/hash.d.ts +14 -0
  85. package/lib/scrubber/utils/hash.js +30 -32
  86. package/lib/scrubber/utils/hash.ts +40 -0
  87. package/lib/scrubber/utils/html-parser.d.ts +14 -0
  88. package/lib/scrubber/utils/html-parser.js +32 -39
  89. package/lib/scrubber/utils/html-parser.ts +46 -0
  90. package/lib/scrubber/utils/pattern-matcher.d.ts +12 -0
  91. package/lib/scrubber/utils/pattern-matcher.js +48 -57
  92. package/lib/scrubber/utils/pattern-matcher.ts +64 -0
  93. package/lib/scrubber/utils/token-counter.d.ts +18 -0
  94. package/lib/scrubber/utils/token-counter.js +24 -25
  95. package/lib/scrubber/utils/token-counter.ts +32 -0
  96. package/lib/utils/logger.d.ts +19 -0
  97. package/lib/utils/logger.js +65 -0
  98. package/lib/utils/logger.ts +65 -0
  99. package/lib/utils/skill-metadata.d.ts +24 -0
  100. package/lib/utils/skill-metadata.js +133 -0
  101. package/lib/utils/skill-metadata.ts +133 -0
  102. package/lib/yamo/emitter.d.ts +46 -0
  103. package/lib/yamo/emitter.js +79 -143
  104. package/lib/yamo/emitter.ts +171 -0
  105. package/lib/yamo/index.d.ts +14 -0
  106. package/lib/yamo/index.js +6 -7
  107. package/lib/yamo/index.ts +16 -0
  108. package/lib/yamo/schema.d.ts +56 -0
  109. package/lib/yamo/schema.js +82 -108
  110. package/lib/yamo/schema.ts +133 -0
  111. package/package.json +13 -8
  112. package/index.d.ts +0 -111
  113. package/lib/embeddings/factory.js +0 -151
  114. package/lib/embeddings/index.js +0 -2
  115. package/lib/embeddings/service.js +0 -586
  116. package/lib/index.js +0 -6
  117. package/lib/lancedb/client.js +0 -633
  118. package/lib/lancedb/config.js +0 -215
  119. package/lib/lancedb/errors.js +0 -144
  120. package/lib/lancedb/index.js +0 -4
  121. package/lib/lancedb/schema.js +0 -217
  122. package/lib/search/index.js +0 -1
  123. package/lib/search/keyword-search.js +0 -144
  124. package/lib/utils/index.js +0 -1
@@ -0,0 +1,516 @@
1
+ // @ts-nocheck
2
+ /**
3
+ * EmbeddingService - Multi-provider embedding generation service
4
+ *
5
+ * Supports:
6
+ * - Local models: Xenova/Transformers.js (ONNX runtime)
7
+ * - Ollama: Local Ollama embeddings API
8
+ * - API models: OpenAI, Cohere
9
+ *
10
+ * Implements TDD for Phase 3, Task 3.1 - Embedding Service Architecture
11
+ */
12
+ import crypto from "crypto";
13
+ import { ConfigurationError, EmbeddingError } from "../adapters/errors.js";
14
+ /**
15
+ * EmbeddingService provides a unified interface for generating text embeddings
16
+ * using multiple backend providers (local ONNX models or cloud APIs).
17
+ */
18
+ export class EmbeddingService {
19
+ modelType;
20
+ modelName;
21
+ baseUrl;
22
+ dimension;
23
+ batchSize;
24
+ normalize;
25
+ apiKey;
26
+ model;
27
+ cache;
28
+ cacheMaxSize;
29
+ initialized;
30
+ stats;
31
+ /**
32
+ * Create a new EmbeddingService instance
33
+ * @param {Object} [config={}] - Configuration options
34
+ */
35
+ constructor(config = {}) {
36
+ this.modelType =
37
+ (config && config.modelType) ||
38
+ process.env.EMBEDDING_MODEL_TYPE ||
39
+ "local";
40
+ this.modelName =
41
+ (config && config.modelName) ||
42
+ process.env.EMBEDDING_MODEL_NAME ||
43
+ "Xenova/all-MiniLM-L6-v2";
44
+ this.baseUrl =
45
+ (config && config.baseUrl) ||
46
+ process.env.OLLAMA_BASE_URL ||
47
+ process.env.EMBEDDING_BASE_URL ||
48
+ "http://localhost:11434";
49
+ this.dimension =
50
+ (config && config.dimension) ||
51
+ parseInt(process.env.EMBEDDING_DIMENSION || "384") ||
52
+ 384;
53
+ this.batchSize =
54
+ (config && config.batchSize) ||
55
+ parseInt(process.env.EMBEDDING_BATCH_SIZE || "32") ||
56
+ 32;
57
+ this.normalize =
58
+ config && config.normalize !== undefined
59
+ ? config.normalize
60
+ : process.env.EMBEDDING_NORMALIZE !== "false";
61
+ this.apiKey = (config && config.apiKey) || process.env.EMBEDDING_API_KEY;
62
+ this.model = null;
63
+ this.cache = new Map();
64
+ this.cacheMaxSize = (config && config.cacheMaxSize) || 1000;
65
+ this.initialized = false;
66
+ // Statistics
67
+ this.stats = {
68
+ totalEmbeddings: 0,
69
+ cacheHits: 0,
70
+ cacheMisses: 0,
71
+ batchCount: 0,
72
+ };
73
+ }
74
+ /**
75
+ * Initialize the embedding model
76
+ * Loads the model based on modelType (local, ollama, openai, cohere)
77
+ */
78
+ async init() {
79
+ try {
80
+ switch (this.modelType) {
81
+ case "local":
82
+ await this._initLocalModel();
83
+ break;
84
+ case "ollama":
85
+ this._initOllama();
86
+ break;
87
+ case "openai":
88
+ await this._initOpenAI();
89
+ break;
90
+ case "cohere":
91
+ await this._initCohere();
92
+ break;
93
+ default:
94
+ throw new ConfigurationError(`Unknown model type: ${this.modelType}. Must be 'local', 'ollama', 'openai', or 'cohere'`, { modelType: this.modelType });
95
+ }
96
+ this.initialized = true;
97
+ }
98
+ catch (error) {
99
+ if (error instanceof ConfigurationError) {
100
+ throw error;
101
+ }
102
+ const message = error instanceof Error ? error.message : String(error);
103
+ throw new EmbeddingError(`Failed to initialize embedding service: ${message}`, {
104
+ modelType: this.modelType,
105
+ modelName: this.modelName,
106
+ originalError: message,
107
+ });
108
+ }
109
+ }
110
+ /**
111
+ * Generate embedding for a single text
112
+ * @param {string} text - Text to embed
113
+ * @param {Object} options - Options for embedding generation
114
+ * @returns {Promise<number[]>} Embedding vector
115
+ */
116
+ async embed(text, _options = {}) {
117
+ if (!this.initialized) {
118
+ throw new EmbeddingError("Embedding service not initialized. Call init() first.", {
119
+ modelType: this.modelType,
120
+ });
121
+ }
122
+ if (!text || typeof text !== "string") {
123
+ throw new EmbeddingError("Text must be a non-empty string", {
124
+ text,
125
+ textType: typeof text,
126
+ });
127
+ }
128
+ // Check cache
129
+ const cacheKey = this._getCacheKey(text);
130
+ const cached = this.cache.get(cacheKey);
131
+ if (cached) {
132
+ this.stats.cacheHits++;
133
+ return cached;
134
+ }
135
+ // Generate embedding
136
+ let embedding;
137
+ try {
138
+ switch (this.modelType) {
139
+ case "local":
140
+ embedding = await this._embedLocal(text);
141
+ break;
142
+ case "ollama":
143
+ embedding = await this._embedOllama(text);
144
+ break;
145
+ case "openai":
146
+ embedding = await this._embedOpenAI(text);
147
+ break;
148
+ case "cohere":
149
+ embedding = await this._embedCohere(text);
150
+ break;
151
+ default:
152
+ throw new EmbeddingError(`Unknown model type: ${this.modelType}`, {
153
+ modelType: this.modelType,
154
+ });
155
+ }
156
+ // Normalize if enabled
157
+ if (this.normalize) {
158
+ embedding = this._normalize(embedding);
159
+ }
160
+ // Cache result
161
+ this._setCache(cacheKey, embedding);
162
+ this.stats.totalEmbeddings++;
163
+ this.stats.cacheMisses++;
164
+ return embedding;
165
+ }
166
+ catch (error) {
167
+ if (error instanceof EmbeddingError) {
168
+ throw error;
169
+ }
170
+ const message = error instanceof Error ? error.message : String(error);
171
+ throw new EmbeddingError(`Failed to generate embedding: ${message}`, {
172
+ modelType: this.modelType,
173
+ text: text.substring(0, 100),
174
+ });
175
+ }
176
+ }
177
+ /**
178
+ * Generate embeddings for a batch of texts
179
+ * @param {string[]} texts - Array of texts to embed
180
+ * @param {Object} options - Options for embedding generation
181
+ * @returns {Promise<number[][]>} Array of embedding vectors
182
+ */
183
+ async embedBatch(texts, _options = {}) {
184
+ if (!this.initialized) {
185
+ throw new EmbeddingError("Embedding service not initialized. Call init() first.", {
186
+ modelType: this.modelType,
187
+ });
188
+ }
189
+ if (!Array.isArray(texts)) {
190
+ throw new EmbeddingError("Texts must be an array", {
191
+ textsType: typeof texts,
192
+ });
193
+ }
194
+ if (texts.length === 0) {
195
+ return [];
196
+ }
197
+ try {
198
+ const embeddings = [];
199
+ // Process in batches
200
+ for (let i = 0; i < texts.length; i += this.batchSize) {
201
+ const batch = texts.slice(i, Math.min(i + this.batchSize, texts.length));
202
+ // Generate embeddings for batch
203
+ const batchEmbeddings = await Promise.all(batch.map((text) => this.embed(text)));
204
+ embeddings.push(...batchEmbeddings);
205
+ this.stats.batchCount++;
206
+ }
207
+ return embeddings;
208
+ }
209
+ catch (error) {
210
+ if (error instanceof EmbeddingError) {
211
+ throw error;
212
+ }
213
+ const message = error instanceof Error ? error.message : String(error);
214
+ throw new EmbeddingError(`Failed to generate batch embeddings: ${message}`, {
215
+ modelType: this.modelType,
216
+ batchSize: texts.length,
217
+ });
218
+ }
219
+ }
220
+ /**
221
+ * Initialize local ONNX model using Xenova/Transformers.js
222
+ * @private
223
+ */
224
+ async _initLocalModel() {
225
+ try {
226
+ // Dynamic import to allow optional dependency
227
+ const { pipeline } = (await import("@xenova/transformers"));
228
+ // Load feature extraction pipeline
229
+ this.model = await pipeline("feature-extraction", this.modelName, {
230
+ quantized: true,
231
+ progress_callback: (progress) => {
232
+ // Optional: Log model download progress
233
+ if (progress.status === "downloading") {
234
+ // Silently handle progress
235
+ }
236
+ },
237
+ });
238
+ // Update dimension based on model (384 for all-MiniLM-L6-v2)
239
+ if (this.modelName.includes("all-MiniLM-L6-v2")) {
240
+ this.dimension = 384;
241
+ }
242
+ }
243
+ catch (error) {
244
+ const message = error instanceof Error ? error.message : String(error);
245
+ throw new ConfigurationError(`Failed to load local model: ${message}. Make sure @xenova/transformers is installed.`, { modelName: this.modelName, error: message });
246
+ }
247
+ }
248
+ /**
249
+ * Initialize Ollama client
250
+ * Ollama runs locally and doesn't require authentication
251
+ * @private
252
+ */
253
+ _initOllama() {
254
+ // Ollama doesn't require initialization - it's a local HTTP API
255
+ // Store the base URL for use in _embedOllama
256
+ this.model = {
257
+ baseUrl: this.baseUrl,
258
+ modelName: this.modelName || "nomic-embed-text",
259
+ };
260
+ // Set default dimension for common Ollama embedding models
261
+ if (this.modelName.includes("nomic-embed-text")) {
262
+ this.dimension = 768;
263
+ }
264
+ else if (this.modelName.includes("mxbai-embed")) {
265
+ this.dimension = 1024;
266
+ }
267
+ else if (this.modelName.includes("all-MiniLM")) {
268
+ this.dimension = 384;
269
+ }
270
+ }
271
+ /**
272
+ * Initialize OpenAI client
273
+ * @private
274
+ */
275
+ async _initOpenAI() {
276
+ if (!this.apiKey) {
277
+ throw new ConfigurationError("OpenAI API key is required. Set EMBEDDING_API_KEY environment variable or pass apiKey in config.", { modelType: "openai" });
278
+ }
279
+ try {
280
+ // Dynamic import to allow optional dependency (openai may not be installed)
281
+ const { OpenAI } = await import("openai");
282
+ this.model = new OpenAI({ apiKey: this.apiKey });
283
+ // Update dimension for OpenAI models
284
+ if (this.modelName.includes("text-embedding-ada-002")) {
285
+ this.dimension = 1536;
286
+ }
287
+ }
288
+ catch (error) {
289
+ const message = error instanceof Error ? error.message : String(error);
290
+ throw new ConfigurationError(`Failed to initialize OpenAI client: ${message}. Make sure openai package is installed.`, { error: message });
291
+ }
292
+ }
293
+ /**
294
+ * Initialize Cohere client
295
+ * @private
296
+ */
297
+ async _initCohere() {
298
+ if (!this.apiKey) {
299
+ throw new ConfigurationError("Cohere API key is required. Set EMBEDDING_API_KEY environment variable or pass apiKey in config.", { modelType: "cohere" });
300
+ }
301
+ try {
302
+ // Dynamic import to allow optional dependency (cohere-ai may not be installed)
303
+ const cohere = await import("cohere-ai");
304
+ this.model = new cohere.CohereClient({ token: this.apiKey });
305
+ // Update dimension for Cohere models
306
+ if (this.modelName.includes("embed-english-v3.0")) {
307
+ this.dimension = 1024;
308
+ }
309
+ }
310
+ catch (error) {
311
+ const message = error instanceof Error ? error.message : String(error);
312
+ throw new ConfigurationError(`Failed to initialize Cohere client: ${message}. Make sure cohere-ai package is installed.`, { error: message });
313
+ }
314
+ }
315
+ /**
316
+ * Generate embedding using local ONNX model
317
+ * @param {string} text - Text to embed
318
+ * @returns {Promise<number[]>} Embedding vector
319
+ * @private
320
+ */
321
+ async _embedLocal(text) {
322
+ if (!this.model) {
323
+ throw new EmbeddingError("Model not initialized");
324
+ }
325
+ try {
326
+ // Local model call
327
+ const output = await this.model(text, {
328
+ pooling: "mean",
329
+ normalize: false,
330
+ });
331
+ // Convert from tensor to array
332
+ const embedding = Array.from(output.data);
333
+ return embedding;
334
+ }
335
+ catch (error) {
336
+ const message = error instanceof Error ? error.message : String(error);
337
+ throw new EmbeddingError(`Failed to generate local embedding: ${message}`, {
338
+ modelName: this.modelName,
339
+ text: text.substring(0, 100),
340
+ });
341
+ }
342
+ }
343
+ /**
344
+ * Generate embedding using Ollama API
345
+ * @param {string} text - Text to embed
346
+ * @returns {Promise<number[]>} Embedding vector
347
+ * @private
348
+ */
349
+ async _embedOllama(text) {
350
+ if (!this.model) {
351
+ throw new EmbeddingError("Model not initialized");
352
+ }
353
+ try {
354
+ const baseUrl = this.model.baseUrl;
355
+ const modelName = this.model.modelName;
356
+ const response = await fetch(`${baseUrl}/api/embeddings`, {
357
+ method: "POST",
358
+ headers: {
359
+ "Content-Type": "application/json",
360
+ },
361
+ body: JSON.stringify({
362
+ model: modelName,
363
+ prompt: text,
364
+ }),
365
+ });
366
+ if (!response.ok) {
367
+ const errorText = await response.text();
368
+ throw new EmbeddingError(`Ollama API error: ${response.status} ${response.statusText} - ${errorText}`, { baseUrl: baseUrl, modelName: modelName });
369
+ }
370
+ const data = await response.json();
371
+ if (!data.embedding) {
372
+ throw new EmbeddingError("Invalid response from Ollama API: missing embedding field", {
373
+ response: data,
374
+ });
375
+ }
376
+ return data.embedding;
377
+ }
378
+ catch (error) {
379
+ if (error instanceof EmbeddingError) {
380
+ throw error;
381
+ }
382
+ const message = error instanceof Error ? error.message : String(error);
383
+ const baseUrl = this.model?.baseUrl;
384
+ const modelName = this.model?.modelName;
385
+ throw new EmbeddingError(`Failed to generate Ollama embedding: ${message}. Make sure Ollama is running and the model is available.`, { baseUrl, modelName, error: message });
386
+ }
387
+ }
388
+ /**
389
+ * Generate embedding using OpenAI API
390
+ * @param {string} text - Text to embed
391
+ * @returns {Promise<number[]>} Embedding vector
392
+ * @private
393
+ */
394
+ async _embedOpenAI(text) {
395
+ if (!this.model) {
396
+ throw new EmbeddingError("Model not initialized");
397
+ }
398
+ try {
399
+ const response = await this.model.embeddings.create({
400
+ model: this.modelName,
401
+ input: text,
402
+ });
403
+ const embedding = response.data[0].embedding;
404
+ return embedding;
405
+ }
406
+ catch (error) {
407
+ const message = error instanceof Error ? error.message : String(error);
408
+ throw new EmbeddingError(`Failed to generate OpenAI embedding: ${message}`, {
409
+ modelName: this.modelName,
410
+ error: message,
411
+ });
412
+ }
413
+ }
414
+ /**
415
+ * Generate embedding using Cohere API
416
+ * @param {string} text - Text to embed
417
+ * @returns {Promise<number[]>} Embedding vector
418
+ * @private
419
+ */
420
+ async _embedCohere(text) {
421
+ if (!this.model) {
422
+ throw new EmbeddingError("Model not initialized");
423
+ }
424
+ try {
425
+ const response = await this.model.embed({
426
+ model: this.modelName,
427
+ texts: [text],
428
+ inputType: "search_document",
429
+ });
430
+ const embedding = response.embeddings[0];
431
+ return embedding;
432
+ }
433
+ catch (error) {
434
+ const message = error instanceof Error ? error.message : String(error);
435
+ throw new EmbeddingError(`Failed to generate Cohere embedding: ${message}`, {
436
+ modelName: this.modelName,
437
+ error: message,
438
+ });
439
+ }
440
+ }
441
+ /**
442
+ * Normalize vector to unit length
443
+ * @param {number[]} vector - Vector to normalize
444
+ * @returns {number[]} Normalized vector
445
+ * @private
446
+ */
447
+ _normalize(vector) {
448
+ // Calculate magnitude
449
+ const magnitude = Math.sqrt(vector.reduce((sum, val) => sum + val * val, 0));
450
+ // Avoid division by zero
451
+ if (magnitude === 0) {
452
+ return vector.map(() => 0);
453
+ }
454
+ // Normalize
455
+ return vector.map((val) => val / magnitude);
456
+ }
457
+ /**
458
+ * Generate cache key from text
459
+ * @param {string} text - Text to generate key from
460
+ * @returns {string} Cache key
461
+ * @private
462
+ */
463
+ _getCacheKey(text) {
464
+ return crypto.createHash("md5").update(text).digest("hex");
465
+ }
466
+ _setCache(key, value) {
467
+ // Evict oldest if at capacity
468
+ if (this.cache.size >= this.cacheMaxSize) {
469
+ const firstKey = this.cache.keys().next().value;
470
+ if (firstKey !== undefined) {
471
+ this.cache.delete(firstKey);
472
+ }
473
+ }
474
+ this.cache.set(key, value);
475
+ }
476
+ /**
477
+ * Get service statistics
478
+ * @returns {Object} Statistics object
479
+ */
480
+ getStats() {
481
+ return {
482
+ modelType: this.modelType,
483
+ modelName: this.modelName,
484
+ dimension: this.dimension,
485
+ initialized: this.initialized,
486
+ totalEmbeddings: this.stats.totalEmbeddings,
487
+ cacheHits: this.stats.cacheHits,
488
+ cacheMisses: this.stats.cacheMisses,
489
+ cacheSize: this.cache.size,
490
+ cacheMaxSize: this.cacheMaxSize,
491
+ cacheHitRate: this.stats.cacheHits /
492
+ (this.stats.cacheHits + this.stats.cacheMisses) || 0,
493
+ batchCount: this.stats.batchCount,
494
+ batchSize: this.batchSize,
495
+ normalize: this.normalize,
496
+ };
497
+ }
498
+ /**
499
+ * Clear the embedding cache
500
+ */
501
+ clearCache() {
502
+ this.cache.clear();
503
+ }
504
+ /**
505
+ * Reset statistics
506
+ */
507
+ resetStats() {
508
+ this.stats = {
509
+ totalEmbeddings: 0,
510
+ cacheHits: 0,
511
+ cacheMisses: 0,
512
+ batchCount: 0,
513
+ };
514
+ }
515
+ }
516
+ export default EmbeddingService;
@@ -0,0 +1,9 @@
1
+ /**
2
+ * YAMO Brain Module
3
+ * Semantic memory mesh with vector search capabilities
4
+ */
5
+ export { MemoryMesh, run, } from "./memory-mesh.js";
6
+ export { MemoryContextManager } from "./context-manager.js";
7
+ export { LanceDBClient } from "./adapters/client.js";
8
+ export * from "./embeddings/index.js";
9
+ export * from "./search/index.js";
@@ -1 +1,10 @@
1
- export { default as MemoryMesh, run } from './memory-mesh.js';
1
+ // @ts-nocheck
2
+ /**
3
+ * YAMO Brain Module
4
+ * Semantic memory mesh with vector search capabilities
5
+ */
6
+ export { MemoryMesh, run, } from "./memory-mesh.js";
7
+ export { MemoryContextManager } from "./context-manager.js";
8
+ export { LanceDBClient } from "./adapters/client.js";
9
+ export * from "./embeddings/index.js";
10
+ export * from "./search/index.js";
@@ -0,0 +1,10 @@
1
+ // @ts-nocheck
2
+ /**
3
+ * YAMO Brain Module
4
+ * Semantic memory mesh with vector search capabilities
5
+ */
6
+ export { MemoryMesh, run, } from "./memory-mesh.js";
7
+ export { MemoryContextManager } from "./context-manager.js";
8
+ export { LanceDBClient } from "./adapters/client.js";
9
+ export * from "./embeddings/index.js";
10
+ export * from "./search/index.js";