@yamo/memory-mesh 2.3.2 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. package/bin/memory_mesh.js +1 -1
  2. package/lib/llm/client.d.ts +111 -0
  3. package/lib/llm/client.js +299 -357
  4. package/lib/llm/client.ts +413 -0
  5. package/lib/llm/index.d.ts +17 -0
  6. package/lib/llm/index.js +15 -8
  7. package/lib/llm/index.ts +19 -0
  8. package/lib/memory/adapters/client.d.ts +183 -0
  9. package/lib/memory/adapters/client.js +518 -0
  10. package/lib/memory/adapters/client.ts +678 -0
  11. package/lib/memory/adapters/config.d.ts +137 -0
  12. package/lib/memory/adapters/config.js +189 -0
  13. package/lib/memory/adapters/config.ts +259 -0
  14. package/lib/memory/adapters/errors.d.ts +76 -0
  15. package/lib/memory/adapters/errors.js +128 -0
  16. package/lib/memory/adapters/errors.ts +166 -0
  17. package/lib/memory/context-manager.d.ts +44 -0
  18. package/lib/memory/context-manager.js +344 -0
  19. package/lib/memory/context-manager.ts +432 -0
  20. package/lib/memory/embeddings/factory.d.ts +59 -0
  21. package/lib/memory/embeddings/factory.js +148 -0
  22. package/lib/{embeddings/factory.js → memory/embeddings/factory.ts} +69 -28
  23. package/lib/memory/embeddings/index.d.ts +2 -0
  24. package/lib/memory/embeddings/index.js +2 -0
  25. package/lib/memory/embeddings/index.ts +2 -0
  26. package/lib/memory/embeddings/service.d.ts +164 -0
  27. package/lib/memory/embeddings/service.js +515 -0
  28. package/lib/{embeddings/service.js → memory/embeddings/service.ts} +223 -156
  29. package/lib/memory/index.d.ts +9 -0
  30. package/lib/memory/index.js +9 -1
  31. package/lib/memory/index.ts +20 -0
  32. package/lib/memory/memory-mesh.d.ts +274 -0
  33. package/lib/memory/memory-mesh.js +1469 -678
  34. package/lib/memory/memory-mesh.ts +1803 -0
  35. package/lib/memory/memory-translator.d.ts +19 -0
  36. package/lib/memory/memory-translator.js +125 -0
  37. package/lib/memory/memory-translator.ts +158 -0
  38. package/lib/memory/schema.d.ts +111 -0
  39. package/lib/memory/schema.js +183 -0
  40. package/lib/memory/schema.ts +267 -0
  41. package/lib/memory/scorer.d.ts +26 -0
  42. package/lib/memory/scorer.js +77 -0
  43. package/lib/memory/scorer.ts +95 -0
  44. package/lib/memory/search/index.d.ts +1 -0
  45. package/lib/memory/search/index.js +1 -0
  46. package/lib/memory/search/index.ts +1 -0
  47. package/lib/memory/search/keyword-search.d.ts +62 -0
  48. package/lib/memory/search/keyword-search.js +135 -0
  49. package/lib/{search/keyword-search.js → memory/search/keyword-search.ts} +66 -36
  50. package/lib/scrubber/config/defaults.d.ts +53 -0
  51. package/lib/scrubber/config/defaults.js +49 -57
  52. package/lib/scrubber/config/defaults.ts +117 -0
  53. package/lib/scrubber/index.d.ts +6 -0
  54. package/lib/scrubber/index.js +3 -23
  55. package/lib/scrubber/index.ts +7 -0
  56. package/lib/scrubber/scrubber.d.ts +61 -0
  57. package/lib/scrubber/scrubber.js +99 -121
  58. package/lib/scrubber/scrubber.ts +168 -0
  59. package/lib/scrubber/stages/chunker.d.ts +13 -0
  60. package/lib/scrubber/stages/metadata-annotator.d.ts +18 -0
  61. package/lib/scrubber/stages/normalizer.d.ts +13 -0
  62. package/lib/scrubber/stages/semantic-filter.d.ts +13 -0
  63. package/lib/scrubber/stages/structural-cleaner.d.ts +13 -0
  64. package/lib/scrubber/stages/validator.d.ts +18 -0
  65. package/lib/scrubber/telemetry.d.ts +36 -0
  66. package/lib/scrubber/telemetry.js +53 -58
  67. package/lib/scrubber/telemetry.ts +99 -0
  68. package/lib/utils/logger.d.ts +29 -0
  69. package/lib/utils/logger.js +64 -0
  70. package/lib/utils/logger.ts +85 -0
  71. package/lib/utils/skill-metadata.d.ts +32 -0
  72. package/lib/utils/skill-metadata.js +132 -0
  73. package/lib/utils/skill-metadata.ts +147 -0
  74. package/lib/yamo/emitter.d.ts +73 -0
  75. package/lib/yamo/emitter.js +78 -143
  76. package/lib/yamo/emitter.ts +249 -0
  77. package/lib/yamo/schema.d.ts +58 -0
  78. package/lib/yamo/schema.js +81 -108
  79. package/lib/yamo/schema.ts +165 -0
  80. package/package.json +11 -8
  81. package/index.d.ts +0 -111
  82. package/lib/embeddings/index.js +0 -2
  83. package/lib/index.js +0 -6
  84. package/lib/lancedb/client.js +0 -633
  85. package/lib/lancedb/config.js +0 -215
  86. package/lib/lancedb/errors.js +0 -144
  87. package/lib/lancedb/index.js +0 -4
  88. package/lib/lancedb/schema.js +0 -217
  89. package/lib/scrubber/errors/scrubber-error.js +0 -43
  90. package/lib/scrubber/stages/chunker.js +0 -103
  91. package/lib/scrubber/stages/metadata-annotator.js +0 -74
  92. package/lib/scrubber/stages/normalizer.js +0 -59
  93. package/lib/scrubber/stages/semantic-filter.js +0 -61
  94. package/lib/scrubber/stages/structural-cleaner.js +0 -82
  95. package/lib/scrubber/stages/validator.js +0 -66
  96. package/lib/scrubber/utils/hash.js +0 -39
  97. package/lib/scrubber/utils/html-parser.js +0 -45
  98. package/lib/scrubber/utils/pattern-matcher.js +0 -63
  99. package/lib/scrubber/utils/token-counter.js +0 -31
  100. package/lib/search/index.js +0 -1
  101. package/lib/utils/index.js +0 -1
  102. package/lib/yamo/index.js +0 -15
@@ -0,0 +1,515 @@
1
+ /**
2
+ * EmbeddingService - Multi-provider embedding generation service
3
+ *
4
+ * Supports:
5
+ * - Local models: Xenova/Transformers.js (ONNX runtime)
6
+ * - Ollama: Local Ollama embeddings API
7
+ * - API models: OpenAI, Cohere
8
+ *
9
+ * Implements TDD for Phase 3, Task 3.1 - Embedding Service Architecture
10
+ */
11
+ import crypto from "crypto";
12
+ import { ConfigurationError, EmbeddingError } from "../adapters/errors.js";
13
+ /**
14
+ * EmbeddingService provides a unified interface for generating text embeddings
15
+ * using multiple backend providers (local ONNX models or cloud APIs).
16
+ */
17
+ export class EmbeddingService {
18
+ modelType;
19
+ modelName;
20
+ baseUrl;
21
+ dimension;
22
+ batchSize;
23
+ normalize;
24
+ apiKey;
25
+ model;
26
+ cache;
27
+ cacheMaxSize;
28
+ initialized;
29
+ stats;
30
+ /**
31
+ * Create a new EmbeddingService instance
32
+ * @param {Object} [config={}] - Configuration options
33
+ */
34
+ constructor(config = {}) {
35
+ this.modelType =
36
+ (config && config.modelType) ||
37
+ process.env.EMBEDDING_MODEL_TYPE ||
38
+ "local";
39
+ this.modelName =
40
+ (config && config.modelName) ||
41
+ process.env.EMBEDDING_MODEL_NAME ||
42
+ "Xenova/all-MiniLM-L6-v2";
43
+ this.baseUrl =
44
+ (config && config.baseUrl) ||
45
+ process.env.OLLAMA_BASE_URL ||
46
+ process.env.EMBEDDING_BASE_URL ||
47
+ "http://localhost:11434";
48
+ this.dimension =
49
+ (config && config.dimension) ||
50
+ parseInt(process.env.EMBEDDING_DIMENSION || "384") ||
51
+ 384;
52
+ this.batchSize =
53
+ (config && config.batchSize) ||
54
+ parseInt(process.env.EMBEDDING_BATCH_SIZE || "32") ||
55
+ 32;
56
+ this.normalize =
57
+ config && config.normalize !== undefined
58
+ ? config.normalize
59
+ : process.env.EMBEDDING_NORMALIZE !== "false";
60
+ this.apiKey = (config && config.apiKey) || process.env.EMBEDDING_API_KEY;
61
+ this.model = null;
62
+ this.cache = new Map();
63
+ this.cacheMaxSize = (config && config.cacheMaxSize) || 1000;
64
+ this.initialized = false;
65
+ // Statistics
66
+ this.stats = {
67
+ totalEmbeddings: 0,
68
+ cacheHits: 0,
69
+ cacheMisses: 0,
70
+ batchCount: 0,
71
+ };
72
+ }
73
+ /**
74
+ * Initialize the embedding model
75
+ * Loads the model based on modelType (local, ollama, openai, cohere)
76
+ */
77
+ async init() {
78
+ try {
79
+ switch (this.modelType) {
80
+ case "local":
81
+ await this._initLocalModel();
82
+ break;
83
+ case "ollama":
84
+ this._initOllama();
85
+ break;
86
+ case "openai":
87
+ await this._initOpenAI();
88
+ break;
89
+ case "cohere":
90
+ await this._initCohere();
91
+ break;
92
+ default:
93
+ throw new ConfigurationError(`Unknown model type: ${this.modelType}. Must be 'local', 'ollama', 'openai', or 'cohere'`, { modelType: this.modelType });
94
+ }
95
+ this.initialized = true;
96
+ }
97
+ catch (error) {
98
+ if (error instanceof ConfigurationError) {
99
+ throw error;
100
+ }
101
+ const message = error instanceof Error ? error.message : String(error);
102
+ throw new EmbeddingError(`Failed to initialize embedding service: ${message}`, {
103
+ modelType: this.modelType,
104
+ modelName: this.modelName,
105
+ originalError: message,
106
+ });
107
+ }
108
+ }
109
+ /**
110
+ * Generate embedding for a single text
111
+ * @param {string} text - Text to embed
112
+ * @param {Object} options - Options for embedding generation
113
+ * @returns {Promise<number[]>} Embedding vector
114
+ */
115
+ async embed(text, _options = {}) {
116
+ if (!this.initialized) {
117
+ throw new EmbeddingError("Embedding service not initialized. Call init() first.", {
118
+ modelType: this.modelType,
119
+ });
120
+ }
121
+ if (!text || typeof text !== "string") {
122
+ throw new EmbeddingError("Text must be a non-empty string", {
123
+ text,
124
+ textType: typeof text,
125
+ });
126
+ }
127
+ // Check cache
128
+ const cacheKey = this._getCacheKey(text);
129
+ const cached = this.cache.get(cacheKey);
130
+ if (cached) {
131
+ this.stats.cacheHits++;
132
+ return cached;
133
+ }
134
+ // Generate embedding
135
+ let embedding;
136
+ try {
137
+ switch (this.modelType) {
138
+ case "local":
139
+ embedding = await this._embedLocal(text);
140
+ break;
141
+ case "ollama":
142
+ embedding = await this._embedOllama(text);
143
+ break;
144
+ case "openai":
145
+ embedding = await this._embedOpenAI(text);
146
+ break;
147
+ case "cohere":
148
+ embedding = await this._embedCohere(text);
149
+ break;
150
+ default:
151
+ throw new EmbeddingError(`Unknown model type: ${this.modelType}`, {
152
+ modelType: this.modelType,
153
+ });
154
+ }
155
+ // Normalize if enabled
156
+ if (this.normalize) {
157
+ embedding = this._normalize(embedding);
158
+ }
159
+ // Cache result
160
+ this._setCache(cacheKey, embedding);
161
+ this.stats.totalEmbeddings++;
162
+ this.stats.cacheMisses++;
163
+ return embedding;
164
+ }
165
+ catch (error) {
166
+ if (error instanceof EmbeddingError) {
167
+ throw error;
168
+ }
169
+ const message = error instanceof Error ? error.message : String(error);
170
+ throw new EmbeddingError(`Failed to generate embedding: ${message}`, {
171
+ modelType: this.modelType,
172
+ text: text.substring(0, 100),
173
+ });
174
+ }
175
+ }
176
+ /**
177
+ * Generate embeddings for a batch of texts
178
+ * @param {string[]} texts - Array of texts to embed
179
+ * @param {Object} options - Options for embedding generation
180
+ * @returns {Promise<number[][]>} Array of embedding vectors
181
+ */
182
+ async embedBatch(texts, _options = {}) {
183
+ if (!this.initialized) {
184
+ throw new EmbeddingError("Embedding service not initialized. Call init() first.", {
185
+ modelType: this.modelType,
186
+ });
187
+ }
188
+ if (!Array.isArray(texts)) {
189
+ throw new EmbeddingError("Texts must be an array", {
190
+ textsType: typeof texts,
191
+ });
192
+ }
193
+ if (texts.length === 0) {
194
+ return [];
195
+ }
196
+ try {
197
+ const embeddings = [];
198
+ // Process in batches
199
+ for (let i = 0; i < texts.length; i += this.batchSize) {
200
+ const batch = texts.slice(i, Math.min(i + this.batchSize, texts.length));
201
+ // Generate embeddings for batch
202
+ const batchEmbeddings = await Promise.all(batch.map((text) => this.embed(text)));
203
+ embeddings.push(...batchEmbeddings);
204
+ this.stats.batchCount++;
205
+ }
206
+ return embeddings;
207
+ }
208
+ catch (error) {
209
+ if (error instanceof EmbeddingError) {
210
+ throw error;
211
+ }
212
+ const message = error instanceof Error ? error.message : String(error);
213
+ throw new EmbeddingError(`Failed to generate batch embeddings: ${message}`, {
214
+ modelType: this.modelType,
215
+ batchSize: texts.length,
216
+ });
217
+ }
218
+ }
219
+ /**
220
+ * Initialize local ONNX model using Xenova/Transformers.js
221
+ * @private
222
+ */
223
+ async _initLocalModel() {
224
+ try {
225
+ // Dynamic import to allow optional dependency
226
+ const { pipeline } = (await import("@xenova/transformers"));
227
+ // Load feature extraction pipeline
228
+ this.model = await pipeline("feature-extraction", this.modelName, {
229
+ quantized: true,
230
+ progress_callback: (progress) => {
231
+ // Optional: Log model download progress
232
+ if (progress.status === "downloading") {
233
+ // Silently handle progress
234
+ }
235
+ },
236
+ });
237
+ // Update dimension based on model (384 for all-MiniLM-L6-v2)
238
+ if (this.modelName.includes("all-MiniLM-L6-v2")) {
239
+ this.dimension = 384;
240
+ }
241
+ }
242
+ catch (error) {
243
+ const message = error instanceof Error ? error.message : String(error);
244
+ throw new ConfigurationError(`Failed to load local model: ${message}. Make sure @xenova/transformers is installed.`, { modelName: this.modelName, error: message });
245
+ }
246
+ }
247
+ /**
248
+ * Initialize Ollama client
249
+ * Ollama runs locally and doesn't require authentication
250
+ * @private
251
+ */
252
+ _initOllama() {
253
+ // Ollama doesn't require initialization - it's a local HTTP API
254
+ // Store the base URL for use in _embedOllama
255
+ this.model = {
256
+ baseUrl: this.baseUrl,
257
+ modelName: this.modelName || "nomic-embed-text",
258
+ };
259
+ // Set default dimension for common Ollama embedding models
260
+ if (this.modelName.includes("nomic-embed-text")) {
261
+ this.dimension = 768;
262
+ }
263
+ else if (this.modelName.includes("mxbai-embed")) {
264
+ this.dimension = 1024;
265
+ }
266
+ else if (this.modelName.includes("all-MiniLM")) {
267
+ this.dimension = 384;
268
+ }
269
+ }
270
+ /**
271
+ * Initialize OpenAI client
272
+ * @private
273
+ */
274
+ async _initOpenAI() {
275
+ if (!this.apiKey) {
276
+ throw new ConfigurationError("OpenAI API key is required. Set EMBEDDING_API_KEY environment variable or pass apiKey in config.", { modelType: "openai" });
277
+ }
278
+ try {
279
+ // Dynamic import to allow optional dependency (openai may not be installed)
280
+ const { OpenAI } = await import("openai");
281
+ this.model = new OpenAI({ apiKey: this.apiKey });
282
+ // Update dimension for OpenAI models
283
+ if (this.modelName.includes("text-embedding-ada-002")) {
284
+ this.dimension = 1536;
285
+ }
286
+ }
287
+ catch (error) {
288
+ const message = error instanceof Error ? error.message : String(error);
289
+ throw new ConfigurationError(`Failed to initialize OpenAI client: ${message}. Make sure openai package is installed.`, { error: message });
290
+ }
291
+ }
292
+ /**
293
+ * Initialize Cohere client
294
+ * @private
295
+ */
296
+ async _initCohere() {
297
+ if (!this.apiKey) {
298
+ throw new ConfigurationError("Cohere API key is required. Set EMBEDDING_API_KEY environment variable or pass apiKey in config.", { modelType: "cohere" });
299
+ }
300
+ try {
301
+ // Dynamic import to allow optional dependency (cohere-ai may not be installed)
302
+ const cohere = await import("cohere-ai");
303
+ this.model = new cohere.CohereClient({ token: this.apiKey });
304
+ // Update dimension for Cohere models
305
+ if (this.modelName.includes("embed-english-v3.0")) {
306
+ this.dimension = 1024;
307
+ }
308
+ }
309
+ catch (error) {
310
+ const message = error instanceof Error ? error.message : String(error);
311
+ throw new ConfigurationError(`Failed to initialize Cohere client: ${message}. Make sure cohere-ai package is installed.`, { error: message });
312
+ }
313
+ }
314
+ /**
315
+ * Generate embedding using local ONNX model
316
+ * @param {string} text - Text to embed
317
+ * @returns {Promise<number[]>} Embedding vector
318
+ * @private
319
+ */
320
+ async _embedLocal(text) {
321
+ if (!this.model) {
322
+ throw new EmbeddingError("Model not initialized");
323
+ }
324
+ try {
325
+ // Local model call
326
+ const output = await this.model(text, {
327
+ pooling: "mean",
328
+ normalize: false,
329
+ });
330
+ // Convert from tensor to array
331
+ const embedding = Array.from(output.data);
332
+ return embedding;
333
+ }
334
+ catch (error) {
335
+ const message = error instanceof Error ? error.message : String(error);
336
+ throw new EmbeddingError(`Failed to generate local embedding: ${message}`, {
337
+ modelName: this.modelName,
338
+ text: text.substring(0, 100),
339
+ });
340
+ }
341
+ }
342
+ /**
343
+ * Generate embedding using Ollama API
344
+ * @param {string} text - Text to embed
345
+ * @returns {Promise<number[]>} Embedding vector
346
+ * @private
347
+ */
348
+ async _embedOllama(text) {
349
+ if (!this.model) {
350
+ throw new EmbeddingError("Model not initialized");
351
+ }
352
+ try {
353
+ const baseUrl = this.model.baseUrl;
354
+ const modelName = this.model.modelName;
355
+ const response = await fetch(`${baseUrl}/api/embeddings`, {
356
+ method: "POST",
357
+ headers: {
358
+ "Content-Type": "application/json",
359
+ },
360
+ body: JSON.stringify({
361
+ model: modelName,
362
+ prompt: text,
363
+ }),
364
+ });
365
+ if (!response.ok) {
366
+ const errorText = await response.text();
367
+ throw new EmbeddingError(`Ollama API error: ${response.status} ${response.statusText} - ${errorText}`, { baseUrl: baseUrl, modelName: modelName });
368
+ }
369
+ const data = await response.json();
370
+ if (!data.embedding) {
371
+ throw new EmbeddingError("Invalid response from Ollama API: missing embedding field", {
372
+ response: data,
373
+ });
374
+ }
375
+ return data.embedding;
376
+ }
377
+ catch (error) {
378
+ if (error instanceof EmbeddingError) {
379
+ throw error;
380
+ }
381
+ const message = error instanceof Error ? error.message : String(error);
382
+ const baseUrl = this.model?.baseUrl;
383
+ const modelName = this.model?.modelName;
384
+ throw new EmbeddingError(`Failed to generate Ollama embedding: ${message}. Make sure Ollama is running and the model is available.`, { baseUrl, modelName, error: message });
385
+ }
386
+ }
387
+ /**
388
+ * Generate embedding using OpenAI API
389
+ * @param {string} text - Text to embed
390
+ * @returns {Promise<number[]>} Embedding vector
391
+ * @private
392
+ */
393
+ async _embedOpenAI(text) {
394
+ if (!this.model) {
395
+ throw new EmbeddingError("Model not initialized");
396
+ }
397
+ try {
398
+ const response = await this.model.embeddings.create({
399
+ model: this.modelName,
400
+ input: text,
401
+ });
402
+ const embedding = response.data[0].embedding;
403
+ return embedding;
404
+ }
405
+ catch (error) {
406
+ const message = error instanceof Error ? error.message : String(error);
407
+ throw new EmbeddingError(`Failed to generate OpenAI embedding: ${message}`, {
408
+ modelName: this.modelName,
409
+ error: message,
410
+ });
411
+ }
412
+ }
413
+ /**
414
+ * Generate embedding using Cohere API
415
+ * @param {string} text - Text to embed
416
+ * @returns {Promise<number[]>} Embedding vector
417
+ * @private
418
+ */
419
+ async _embedCohere(text) {
420
+ if (!this.model) {
421
+ throw new EmbeddingError("Model not initialized");
422
+ }
423
+ try {
424
+ const response = await this.model.embed({
425
+ model: this.modelName,
426
+ texts: [text],
427
+ inputType: "search_document",
428
+ });
429
+ const embedding = response.embeddings[0];
430
+ return embedding;
431
+ }
432
+ catch (error) {
433
+ const message = error instanceof Error ? error.message : String(error);
434
+ throw new EmbeddingError(`Failed to generate Cohere embedding: ${message}`, {
435
+ modelName: this.modelName,
436
+ error: message,
437
+ });
438
+ }
439
+ }
440
+ /**
441
+ * Normalize vector to unit length
442
+ * @param {number[]} vector - Vector to normalize
443
+ * @returns {number[]} Normalized vector
444
+ * @private
445
+ */
446
+ _normalize(vector) {
447
+ // Calculate magnitude
448
+ const magnitude = Math.sqrt(vector.reduce((sum, val) => sum + val * val, 0));
449
+ // Avoid division by zero
450
+ if (magnitude === 0) {
451
+ return vector.map(() => 0);
452
+ }
453
+ // Normalize
454
+ return vector.map((val) => val / magnitude);
455
+ }
456
+ /**
457
+ * Generate cache key from text
458
+ * @param {string} text - Text to generate key from
459
+ * @returns {string} Cache key
460
+ * @private
461
+ */
462
+ _getCacheKey(text) {
463
+ return crypto.createHash("md5").update(text).digest("hex");
464
+ }
465
+ _setCache(key, value) {
466
+ // Evict oldest if at capacity
467
+ if (this.cache.size >= this.cacheMaxSize) {
468
+ const firstKey = this.cache.keys().next().value;
469
+ if (firstKey !== undefined) {
470
+ this.cache.delete(firstKey);
471
+ }
472
+ }
473
+ this.cache.set(key, value);
474
+ }
475
+ /**
476
+ * Get service statistics
477
+ * @returns {Object} Statistics object
478
+ */
479
+ getStats() {
480
+ return {
481
+ modelType: this.modelType,
482
+ modelName: this.modelName,
483
+ dimension: this.dimension,
484
+ initialized: this.initialized,
485
+ totalEmbeddings: this.stats.totalEmbeddings,
486
+ cacheHits: this.stats.cacheHits,
487
+ cacheMisses: this.stats.cacheMisses,
488
+ cacheSize: this.cache.size,
489
+ cacheMaxSize: this.cacheMaxSize,
490
+ cacheHitRate: this.stats.cacheHits /
491
+ (this.stats.cacheHits + this.stats.cacheMisses) || 0,
492
+ batchCount: this.stats.batchCount,
493
+ batchSize: this.batchSize,
494
+ normalize: this.normalize,
495
+ };
496
+ }
497
+ /**
498
+ * Clear the embedding cache
499
+ */
500
+ clearCache() {
501
+ this.cache.clear();
502
+ }
503
+ /**
504
+ * Reset statistics
505
+ */
506
+ resetStats() {
507
+ this.stats = {
508
+ totalEmbeddings: 0,
509
+ cacheHits: 0,
510
+ cacheMisses: 0,
511
+ batchCount: 0,
512
+ };
513
+ }
514
+ }
515
+ export default EmbeddingService;