@yamo/memory-mesh 2.3.2 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. package/bin/memory_mesh.js +1 -1
  2. package/lib/llm/client.d.ts +111 -0
  3. package/lib/llm/client.js +299 -357
  4. package/lib/llm/client.ts +413 -0
  5. package/lib/llm/index.d.ts +17 -0
  6. package/lib/llm/index.js +15 -8
  7. package/lib/llm/index.ts +19 -0
  8. package/lib/memory/adapters/client.d.ts +183 -0
  9. package/lib/memory/adapters/client.js +518 -0
  10. package/lib/memory/adapters/client.ts +678 -0
  11. package/lib/memory/adapters/config.d.ts +137 -0
  12. package/lib/memory/adapters/config.js +189 -0
  13. package/lib/memory/adapters/config.ts +259 -0
  14. package/lib/memory/adapters/errors.d.ts +76 -0
  15. package/lib/memory/adapters/errors.js +128 -0
  16. package/lib/memory/adapters/errors.ts +166 -0
  17. package/lib/memory/context-manager.d.ts +44 -0
  18. package/lib/memory/context-manager.js +344 -0
  19. package/lib/memory/context-manager.ts +432 -0
  20. package/lib/memory/embeddings/factory.d.ts +59 -0
  21. package/lib/memory/embeddings/factory.js +148 -0
  22. package/lib/{embeddings/factory.js → memory/embeddings/factory.ts} +69 -28
  23. package/lib/memory/embeddings/index.d.ts +2 -0
  24. package/lib/memory/embeddings/index.js +2 -0
  25. package/lib/memory/embeddings/index.ts +2 -0
  26. package/lib/memory/embeddings/service.d.ts +164 -0
  27. package/lib/memory/embeddings/service.js +515 -0
  28. package/lib/{embeddings/service.js → memory/embeddings/service.ts} +223 -156
  29. package/lib/memory/index.d.ts +9 -0
  30. package/lib/memory/index.js +9 -1
  31. package/lib/memory/index.ts +20 -0
  32. package/lib/memory/memory-mesh.d.ts +274 -0
  33. package/lib/memory/memory-mesh.js +1469 -678
  34. package/lib/memory/memory-mesh.ts +1803 -0
  35. package/lib/memory/memory-translator.d.ts +19 -0
  36. package/lib/memory/memory-translator.js +125 -0
  37. package/lib/memory/memory-translator.ts +158 -0
  38. package/lib/memory/schema.d.ts +111 -0
  39. package/lib/memory/schema.js +183 -0
  40. package/lib/memory/schema.ts +267 -0
  41. package/lib/memory/scorer.d.ts +26 -0
  42. package/lib/memory/scorer.js +77 -0
  43. package/lib/memory/scorer.ts +95 -0
  44. package/lib/memory/search/index.d.ts +1 -0
  45. package/lib/memory/search/index.js +1 -0
  46. package/lib/memory/search/index.ts +1 -0
  47. package/lib/memory/search/keyword-search.d.ts +62 -0
  48. package/lib/memory/search/keyword-search.js +135 -0
  49. package/lib/{search/keyword-search.js → memory/search/keyword-search.ts} +66 -36
  50. package/lib/scrubber/config/defaults.d.ts +53 -0
  51. package/lib/scrubber/config/defaults.js +49 -57
  52. package/lib/scrubber/config/defaults.ts +117 -0
  53. package/lib/scrubber/index.d.ts +6 -0
  54. package/lib/scrubber/index.js +3 -23
  55. package/lib/scrubber/index.ts +7 -0
  56. package/lib/scrubber/scrubber.d.ts +61 -0
  57. package/lib/scrubber/scrubber.js +99 -121
  58. package/lib/scrubber/scrubber.ts +168 -0
  59. package/lib/scrubber/stages/chunker.d.ts +13 -0
  60. package/lib/scrubber/stages/metadata-annotator.d.ts +18 -0
  61. package/lib/scrubber/stages/normalizer.d.ts +13 -0
  62. package/lib/scrubber/stages/semantic-filter.d.ts +13 -0
  63. package/lib/scrubber/stages/structural-cleaner.d.ts +13 -0
  64. package/lib/scrubber/stages/validator.d.ts +18 -0
  65. package/lib/scrubber/telemetry.d.ts +36 -0
  66. package/lib/scrubber/telemetry.js +53 -58
  67. package/lib/scrubber/telemetry.ts +99 -0
  68. package/lib/utils/logger.d.ts +29 -0
  69. package/lib/utils/logger.js +64 -0
  70. package/lib/utils/logger.ts +85 -0
  71. package/lib/utils/skill-metadata.d.ts +32 -0
  72. package/lib/utils/skill-metadata.js +132 -0
  73. package/lib/utils/skill-metadata.ts +147 -0
  74. package/lib/yamo/emitter.d.ts +73 -0
  75. package/lib/yamo/emitter.js +78 -143
  76. package/lib/yamo/emitter.ts +249 -0
  77. package/lib/yamo/schema.d.ts +58 -0
  78. package/lib/yamo/schema.js +81 -108
  79. package/lib/yamo/schema.ts +165 -0
  80. package/package.json +11 -8
  81. package/index.d.ts +0 -111
  82. package/lib/embeddings/index.js +0 -2
  83. package/lib/index.js +0 -6
  84. package/lib/lancedb/client.js +0 -633
  85. package/lib/lancedb/config.js +0 -215
  86. package/lib/lancedb/errors.js +0 -144
  87. package/lib/lancedb/index.js +0 -4
  88. package/lib/lancedb/schema.js +0 -217
  89. package/lib/scrubber/errors/scrubber-error.js +0 -43
  90. package/lib/scrubber/stages/chunker.js +0 -103
  91. package/lib/scrubber/stages/metadata-annotator.js +0 -74
  92. package/lib/scrubber/stages/normalizer.js +0 -59
  93. package/lib/scrubber/stages/semantic-filter.js +0 -61
  94. package/lib/scrubber/stages/structural-cleaner.js +0 -82
  95. package/lib/scrubber/stages/validator.js +0 -66
  96. package/lib/scrubber/utils/hash.js +0 -39
  97. package/lib/scrubber/utils/html-parser.js +0 -45
  98. package/lib/scrubber/utils/pattern-matcher.js +0 -63
  99. package/lib/scrubber/utils/token-counter.js +0 -31
  100. package/lib/search/index.js +0 -1
  101. package/lib/utils/index.js +0 -1
  102. package/lib/yamo/index.js +0 -15
package/lib/llm/client.js CHANGED
@@ -7,84 +7,76 @@
7
7
  * - Ollama (local models)
8
8
  * - Graceful fallback when LLM unavailable
9
9
  */
10
-
10
+ import { createLogger } from "../utils/logger.js";
11
+ const logger = createLogger("llm-client");
11
12
  /**
12
13
  * LLMClient provides unified interface for calling various LLM providers
13
14
  * to generate reflections from memory contexts.
14
15
  */
15
16
  export class LLMClient {
16
- /**
17
- * Create a new LLMClient instance
18
- *
19
- * @param {Object} [config={}] - Configuration options
20
- * @param {string} [config.provider='openai'] - LLM provider ('openai', 'anthropic', 'ollama')
21
- * @param {string} [config.apiKey] - API key (defaults to env var)
22
- * @param {string} [config.model] - Model name
23
- * @param {string} [config.baseUrl] - Base URL for API (optional)
24
- * @param {number} [config.timeout=30000] - Request timeout in ms
25
- * @param {number} [config.maxRetries=2] - Max retry attempts
26
- */
27
- constructor(config = {}) {
28
- this.provider = config.provider || process.env.LLM_PROVIDER || 'openai';
29
- this.apiKey = config.apiKey || process.env.LLM_API_KEY || '';
30
- this.model = config.model || process.env.LLM_MODEL || this._getDefaultModel();
31
- this.baseUrl = config.baseUrl || process.env.LLM_BASE_URL || this._getDefaultBaseUrl();
32
- this.timeout = config.timeout || 30000;
33
- this.maxRetries = config.maxRetries || 2;
34
-
35
- // Statistics
36
- this.stats = {
37
- totalRequests: 0,
38
- successfulRequests: 0,
39
- failedRequests: 0,
40
- fallbackCount: 0
41
- };
42
- }
43
-
44
- /**
45
- * Get default model for provider
46
- * @private
47
- * @returns {string} Default model name
48
- */
49
- _getDefaultModel() {
50
- const defaults = {
51
- openai: 'gpt-4o-mini',
52
- anthropic: 'claude-3-5-haiku-20241022',
53
- ollama: 'llama3.2'
54
- };
55
- return defaults[this.provider] || 'gpt-4o-mini';
56
- }
57
-
58
- /**
59
- * Get default base URL for provider
60
- * @private
61
- * @returns {string} Default base URL
62
- */
63
- _getDefaultBaseUrl() {
64
- const defaults = {
65
- openai: 'https://api.openai.com/v1',
66
- anthropic: 'https://api.anthropic.com/v1',
67
- ollama: 'http://localhost:11434'
68
- };
69
- return defaults[this.provider] || 'https://api.openai.com/v1';
70
- }
71
-
72
- /**
73
- * Generate reflection from memories
74
- * Main entry point for reflection generation
75
- *
76
- * @param {string} prompt - The reflection prompt
77
- * @param {Array} memories - Context memories
78
- * @returns {Promise<Object>} { reflection, confidence }
79
- */
80
- async reflect(prompt, memories) {
81
- this.stats.totalRequests++;
82
-
83
- if (!memories || memories.length === 0) {
84
- return this._fallback('No memories provided');
17
+ provider;
18
+ apiKey;
19
+ model;
20
+ baseUrl;
21
+ timeout;
22
+ maxRetries;
23
+ maxTokens;
24
+ stats;
25
+ /**
26
+ * Create a new LLMClient instance
27
+ */
28
+ constructor(config = {}) {
29
+ this.provider = config.provider || process.env.LLM_PROVIDER || "openai";
30
+ this.apiKey = config.apiKey || process.env.LLM_API_KEY || "";
31
+ this.model =
32
+ config.model || process.env.LLM_MODEL || this._getDefaultModel();
33
+ this.baseUrl =
34
+ config.baseUrl || process.env.LLM_BASE_URL || this._getDefaultBaseUrl();
35
+ this.maxTokens = config.maxTokens || 2000;
36
+ this.timeout = config.timeout || (this.maxTokens >= 4000 ? 300000 : 60000);
37
+ this.maxRetries = config.maxRetries || 2;
38
+ // Statistics
39
+ this.stats = {
40
+ totalRequests: 0,
41
+ successfulRequests: 0,
42
+ failedRequests: 0,
43
+ fallbackCount: 0,
44
+ };
85
45
  }
86
-
87
- const systemPrompt = `You are a reflective AI agent. Review the provided memories and synthesize a high-level insight, belief, or observation.
46
+ /**
47
+ * Get default model for provider
48
+ * @private
49
+ */
50
+ _getDefaultModel() {
51
+ const defaults = {
52
+ openai: "gpt-4o-mini",
53
+ anthropic: "claude-3-5-haiku-20241022",
54
+ ollama: "llama3.2",
55
+ };
56
+ return defaults[this.provider] || "gpt-4o-mini";
57
+ }
58
+ /**
59
+ * Get default base URL for provider
60
+ * @private
61
+ */
62
+ _getDefaultBaseUrl() {
63
+ const defaults = {
64
+ openai: "https://api.openai.com/v1",
65
+ anthropic: "https://api.anthropic.com/v1",
66
+ ollama: "http://localhost:11434",
67
+ };
68
+ return defaults[this.provider] || "https://api.openai.com/v1";
69
+ }
70
+ /**
71
+ * Generate reflection from memories
72
+ * Main entry point for reflection generation
73
+ */
74
+ async reflect(prompt, memories) {
75
+ this.stats.totalRequests++;
76
+ if (!memories || memories.length === 0) {
77
+ return this._fallback("No memories provided");
78
+ }
79
+ const systemPrompt = `You are a reflective AI agent. Review the provided memories and synthesize a high-level insight, belief, or observation.
88
80
  Respond ONLY in JSON format with exactly these keys:
89
81
  {
90
82
  "reflection": "a concise insight or observation derived from the memories",
@@ -92,300 +84,250 @@ Respond ONLY in JSON format with exactly these keys:
92
84
  }
93
85
 
94
86
  Keep the reflection brief (1-2 sentences) and actionable.`;
95
-
96
- const userContent = this._formatMemoriesForLLM(prompt, memories);
97
-
98
- try {
99
- const response = await this._callWithRetry(systemPrompt, userContent);
100
- const parsed = JSON.parse(response);
101
-
102
- // Validate response structure
103
- if (!parsed.reflection || typeof parsed.confidence !== 'number') {
104
- throw new Error('Invalid LLM response format');
105
- }
106
-
107
- // Clamp confidence to valid range
108
- parsed.confidence = Math.max(0, Math.min(1, parsed.confidence));
109
-
110
- this.stats.successfulRequests++;
111
- return parsed;
112
-
113
- } catch (error) {
114
- this.stats.failedRequests++;
115
- const errorMessage = error instanceof Error ? error.message : String(error);
116
- console.warn(`[LLMClient] LLM call failed: ${errorMessage}`);
117
- return this._fallback('LLM error', memories);
87
+ const userContent = this._formatMemoriesForLLM(prompt, memories);
88
+ try {
89
+ const response = await this._callWithRetry(systemPrompt, userContent);
90
+ const parsed = JSON.parse(response);
91
+ // Validate response structure
92
+ if (!parsed.reflection || typeof parsed.confidence !== "number") {
93
+ throw new Error("Invalid LLM response format");
94
+ }
95
+ // Clamp confidence to valid range
96
+ parsed.confidence = Math.max(0, Math.min(1, parsed.confidence));
97
+ this.stats.successfulRequests++;
98
+ return parsed;
99
+ }
100
+ catch (error) {
101
+ this.stats.failedRequests++;
102
+ const errorMessage = error instanceof Error ? error.message : String(error);
103
+ logger.debug({ err: error, errorMessage }, "LLM call failed");
104
+ return this._fallback("LLM error", memories);
105
+ }
118
106
  }
119
- }
120
-
121
- /**
122
- * Format memories for LLM consumption
123
- * @private
124
- * @param {string} prompt - User prompt
125
- * @param {Array} memories - Memory array
126
- * @returns {string} Formatted content
127
- */
128
- _formatMemoriesForLLM(prompt, memories) {
129
- const memoryList = memories
130
- .map((m, i) => `${i + 1}. ${m.content}`)
131
- .join('\n');
132
-
133
- return `Prompt: ${prompt}\n\nMemories:\n${memoryList}\n\nBased on these memories, provide a brief reflective insight.`;
134
- }
135
-
136
- /**
137
- * Call LLM with retry logic
138
- * @private
139
- * @param {string} systemPrompt - System prompt
140
- * @param {string} userContent - User content
141
- * @returns {Promise<string>} LLM response text
142
- */
143
- async _callWithRetry(systemPrompt, userContent) {
144
- let lastError = null;
145
-
146
- for (let attempt = 1; attempt <= this.maxRetries; attempt++) {
147
- try {
148
- return await this._callLLM(systemPrompt, userContent);
149
- } catch (error) {
150
- lastError = error;
151
- if (attempt < this.maxRetries) {
152
- const delay = Math.pow(2, attempt) * 1000; // Exponential backoff
153
- await this._sleep(delay);
107
+ /**
108
+ * Format memories for LLM consumption
109
+ * @private
110
+ */
111
+ _formatMemoriesForLLM(prompt, memories) {
112
+ const memoryList = memories
113
+ .map((m, i) => `${i + 1}. ${m.content}`)
114
+ .join("\n");
115
+ return `Prompt: ${prompt}\n\nMemories:\n${memoryList}\n\nBased on these memories, provide a brief reflective insight.`;
116
+ }
117
+ /**
118
+ * Call LLM with retry logic
119
+ * @private
120
+ */
121
+ async _callWithRetry(systemPrompt, userContent) {
122
+ let lastError = null;
123
+ for (let attempt = 1; attempt <= this.maxRetries; attempt++) {
124
+ try {
125
+ return await this._callLLM(systemPrompt, userContent);
126
+ }
127
+ catch (error) {
128
+ lastError = error;
129
+ if (attempt < this.maxRetries) {
130
+ const delay = Math.pow(2, attempt) * 1000; // Exponential backoff
131
+ await this._sleep(delay);
132
+ }
133
+ }
154
134
  }
155
- }
135
+ throw lastError;
156
136
  }
157
-
158
- throw lastError;
159
- }
160
-
161
- /**
162
- * Call LLM based on provider
163
- * @private
164
- * @param {string} systemPrompt - System prompt
165
- * @param {string} userContent - User content
166
- * @returns {Promise<string>} Response text
167
- */
168
- async _callLLM(systemPrompt, userContent) {
169
- switch (this.provider) {
170
- case 'openai':
171
- return await this._callOpenAI(systemPrompt, userContent);
172
- case 'anthropic':
173
- return await this._callAnthropic(systemPrompt, userContent);
174
- case 'ollama':
175
- return await this._callOllama(systemPrompt, userContent);
176
- default:
177
- throw new Error(`Unsupported provider: ${this.provider}`);
137
+ /**
138
+ * Call LLM based on provider
139
+ * @private
140
+ */
141
+ async _callLLM(systemPrompt, userContent) {
142
+ switch (this.provider) {
143
+ case "openai":
144
+ return this._callOpenAI(systemPrompt, userContent);
145
+ case "anthropic":
146
+ return this._callAnthropic(systemPrompt, userContent);
147
+ case "ollama":
148
+ return this._callOllama(systemPrompt, userContent);
149
+ default:
150
+ throw new Error(`Unsupported provider: ${this.provider}`);
151
+ }
178
152
  }
179
- }
180
-
181
- /**
182
- * Call OpenAI API
183
- * @private
184
- */
185
- async _callOpenAI(systemPrompt, userContent) {
186
- if (!this.apiKey) {
187
- throw new Error('OpenAI API key not configured');
153
+ /**
154
+ * Call OpenAI API
155
+ * @private
156
+ */
157
+ async _callOpenAI(systemPrompt, userContent) {
158
+ if (!this.apiKey) {
159
+ throw new Error("OpenAI API key not configured");
160
+ }
161
+ const controller = new AbortController();
162
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
163
+ try {
164
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
165
+ method: "POST",
166
+ headers: {
167
+ "Content-Type": "application/json",
168
+ Authorization: `Bearer ${this.apiKey}`,
169
+ },
170
+ body: JSON.stringify({
171
+ model: this.model,
172
+ messages: [
173
+ { role: "system", content: systemPrompt },
174
+ { role: "user", content: userContent },
175
+ ],
176
+ temperature: 0.7,
177
+ max_tokens: this.maxTokens,
178
+ }),
179
+ signal: controller.signal,
180
+ });
181
+ clearTimeout(timeoutId);
182
+ if (!response.ok) {
183
+ const error = await response.text();
184
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
185
+ }
186
+ const data = await response.json();
187
+ return data.choices[0].message.content;
188
+ }
189
+ catch (error) {
190
+ clearTimeout(timeoutId);
191
+ if (error instanceof Error && error.name === "AbortError") {
192
+ throw new Error("Request timeout");
193
+ }
194
+ throw error;
195
+ }
188
196
  }
189
-
190
- const controller = new AbortController();
191
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
192
-
193
- try {
194
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
195
- method: 'POST',
196
- headers: {
197
- 'Content-Type': 'application/json',
198
- 'Authorization': `Bearer ${this.apiKey}`
199
- },
200
- body: JSON.stringify({
201
- model: this.model,
202
- messages: [
203
- { role: 'system', content: systemPrompt },
204
- { role: 'user', content: userContent }
205
- ],
206
- temperature: 0.7,
207
- max_tokens: 500
208
- }),
209
- signal: controller.signal
210
- });
211
-
212
- clearTimeout(timeoutId);
213
-
214
- if (!response.ok) {
215
- const error = await response.text();
216
- throw new Error(`OpenAI API error: ${response.status} - ${error}`);
217
- }
218
-
219
- const data = await response.json();
220
- return data.choices[0].message.content;
221
-
222
- } catch (error) {
223
- clearTimeout(timeoutId);
224
- if (error instanceof Error && error.name === 'AbortError') {
225
- throw new Error('Request timeout');
226
- }
227
- throw error;
197
+ /**
198
+ * Call Anthropic (Claude) API
199
+ * @private
200
+ */
201
+ async _callAnthropic(systemPrompt, userContent) {
202
+ if (!this.apiKey) {
203
+ throw new Error("Anthropic API key not configured");
204
+ }
205
+ const controller = new AbortController();
206
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
207
+ try {
208
+ const response = await fetch(`${this.baseUrl}/messages`, {
209
+ method: "POST",
210
+ headers: {
211
+ "Content-Type": "application/json",
212
+ "x-api-key": this.apiKey,
213
+ "anthropic-version": "2023-06-01",
214
+ },
215
+ body: JSON.stringify({
216
+ model: this.model,
217
+ max_tokens: this.maxTokens,
218
+ system: systemPrompt,
219
+ messages: [{ role: "user", content: userContent }],
220
+ }),
221
+ signal: controller.signal,
222
+ });
223
+ clearTimeout(timeoutId);
224
+ if (!response.ok) {
225
+ const error = await response.text();
226
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
227
+ }
228
+ const data = await response.json();
229
+ return data.content[0].text;
230
+ }
231
+ catch (error) {
232
+ clearTimeout(timeoutId);
233
+ if (error instanceof Error && error.name === "AbortError") {
234
+ throw new Error("Request timeout");
235
+ }
236
+ throw error;
237
+ }
228
238
  }
229
- }
230
-
231
- /**
232
- * Call Anthropic (Claude) API
233
- * @private
234
- */
235
- async _callAnthropic(systemPrompt, userContent) {
236
- if (!this.apiKey) {
237
- throw new Error('Anthropic API key not configured');
239
+ /**
240
+ * Call Ollama (local) API
241
+ * @private
242
+ */
243
+ async _callOllama(systemPrompt, userContent) {
244
+ const controller = new AbortController();
245
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
246
+ try {
247
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
248
+ method: "POST",
249
+ headers: {
250
+ "Content-Type": "application/json",
251
+ },
252
+ body: JSON.stringify({
253
+ model: this.model,
254
+ messages: [
255
+ { role: "system", content: systemPrompt },
256
+ { role: "user", content: userContent },
257
+ ],
258
+ stream: false,
259
+ options: {
260
+ num_predict: this.maxTokens,
261
+ },
262
+ }),
263
+ signal: controller.signal,
264
+ });
265
+ clearTimeout(timeoutId);
266
+ if (!response.ok) {
267
+ const error = await response.text();
268
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
269
+ }
270
+ const data = await response.json();
271
+ return data.message.content;
272
+ }
273
+ catch (error) {
274
+ clearTimeout(timeoutId);
275
+ if (error instanceof Error && error.name === "AbortError") {
276
+ throw new Error("Request timeout");
277
+ }
278
+ throw error;
279
+ }
238
280
  }
239
-
240
- const controller = new AbortController();
241
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
242
-
243
- try {
244
- const response = await fetch(`${this.baseUrl}/messages`, {
245
- method: 'POST',
246
- headers: {
247
- 'Content-Type': 'application/json',
248
- 'x-api-key': this.apiKey,
249
- 'anthropic-version': '2023-06-01'
250
- },
251
- body: JSON.stringify({
252
- model: this.model,
253
- max_tokens: 500,
254
- system: systemPrompt,
255
- messages: [
256
- { role: 'user', content: userContent }
257
- ]
258
- }),
259
- signal: controller.signal
260
- });
261
-
262
- clearTimeout(timeoutId);
263
-
264
- if (!response.ok) {
265
- const error = await response.text();
266
- throw new Error(`Anthropic API error: ${response.status} - ${error}`);
267
- }
268
-
269
- const data = await response.json();
270
- return data.content[0].text;
271
-
272
- } catch (error) {
273
- clearTimeout(timeoutId);
274
- if (error instanceof Error && error.name === 'AbortError') {
275
- throw new Error('Request timeout');
276
- }
277
- throw error;
281
+ /**
282
+ * Fallback when LLM fails
283
+ * @private
284
+ */
285
+ _fallback(reason, memories = []) {
286
+ this.stats.fallbackCount++;
287
+ if (memories && memories.length > 0) {
288
+ // Simple aggregation fallback
289
+ const contents = memories.map((m) => m.content);
290
+ const combined = contents.join("; ");
291
+ const preview = combined.length > 200 ? `${combined.substring(0, 200)}...` : combined;
292
+ return {
293
+ reflection: `Aggregated from ${memories.length} memories: ${preview}`,
294
+ confidence: 0.5,
295
+ };
296
+ }
297
+ return {
298
+ reflection: `Reflection generation unavailable: ${reason}`,
299
+ confidence: 0.3,
300
+ };
278
301
  }
279
- }
280
-
281
- /**
282
- * Call Ollama (local) API
283
- * @private
284
- */
285
- async _callOllama(systemPrompt, userContent) {
286
- const controller = new AbortController();
287
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
288
-
289
- try {
290
- const response = await fetch(`${this.baseUrl}/api/chat`, {
291
- method: 'POST',
292
- headers: {
293
- 'Content-Type': 'application/json'
294
- },
295
- body: JSON.stringify({
296
- model: this.model,
297
- messages: [
298
- { role: 'system', content: systemPrompt },
299
- { role: 'user', content: userContent }
300
- ],
301
- stream: false
302
- }),
303
- signal: controller.signal
304
- });
305
-
306
- clearTimeout(timeoutId);
307
-
308
- if (!response.ok) {
309
- const error = await response.text();
310
- throw new Error(`Ollama API error: ${response.status} - ${error}`);
311
- }
312
-
313
- const data = await response.json();
314
- return data.message.content;
315
-
316
- } catch (error) {
317
- clearTimeout(timeoutId);
318
- if (error instanceof Error && error.name === 'AbortError') {
319
- throw new Error('Request timeout');
320
- }
321
- throw error;
302
+ /**
303
+ * Sleep utility
304
+ * @private
305
+ */
306
+ _sleep(ms) {
307
+ return new Promise((resolve) => setTimeout(resolve, ms));
322
308
  }
323
- }
324
-
325
- /**
326
- * Fallback when LLM fails
327
- * @private
328
- * @param {string} reason - Fallback reason
329
- * @param {Array} [memories=[]] - Memory array
330
- * @returns {Object} Fallback result
331
- */
332
- _fallback(reason, memories = []) {
333
- this.stats.fallbackCount++;
334
-
335
- if (memories && memories.length > 0) {
336
- // Simple aggregation fallback
337
- const contents = memories.map(m => m.content);
338
- const combined = contents.join('; ');
339
- const preview = combined.length > 200
340
- ? combined.substring(0, 200) + '...'
341
- : combined;
342
-
343
- return {
344
- reflection: `Aggregated from ${memories.length} memories: ${preview}`,
345
- confidence: 0.5
346
- };
309
+ /**
310
+ * Get client statistics
311
+ * @returns {Object} Statistics
312
+ */
313
+ getStats() {
314
+ return {
315
+ ...this.stats,
316
+ successRate: this.stats.totalRequests > 0
317
+ ? (this.stats.successfulRequests / this.stats.totalRequests).toFixed(2)
318
+ : "0.00",
319
+ };
320
+ }
321
+ /**
322
+ * Reset statistics
323
+ */
324
+ resetStats() {
325
+ this.stats = {
326
+ totalRequests: 0,
327
+ successfulRequests: 0,
328
+ failedRequests: 0,
329
+ fallbackCount: 0,
330
+ };
347
331
  }
348
-
349
- return {
350
- reflection: `Reflection generation unavailable: ${reason}`,
351
- confidence: 0.3
352
- };
353
- }
354
-
355
- /**
356
- * Sleep utility
357
- * @private
358
- * @param {number} ms - Milliseconds to sleep
359
- * @returns {Promise<void>}
360
- */
361
- _sleep(ms) {
362
- return new Promise(resolve => setTimeout(resolve, ms));
363
- }
364
-
365
- /**
366
- * Get client statistics
367
- * @returns {Object} Statistics
368
- */
369
- getStats() {
370
- return {
371
- ...this.stats,
372
- successRate: this.stats.totalRequests > 0
373
- ? (this.stats.successfulRequests / this.stats.totalRequests).toFixed(2)
374
- : '0.00'
375
- };
376
- }
377
-
378
- /**
379
- * Reset statistics
380
- */
381
- resetStats() {
382
- this.stats = {
383
- totalRequests: 0,
384
- successfulRequests: 0,
385
- failedRequests: 0,
386
- fallbackCount: 0
387
- };
388
- }
389
332
  }
390
-
391
333
  export default LLMClient;