@yamo/memory-mesh 2.3.2 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. package/README.md +8 -2
  2. package/bin/memory_mesh.js +1 -1
  3. package/lib/llm/client.d.ts +86 -0
  4. package/lib/llm/client.js +300 -357
  5. package/lib/llm/client.ts +334 -0
  6. package/lib/llm/index.d.ts +17 -0
  7. package/lib/llm/index.js +16 -8
  8. package/lib/llm/index.ts +18 -0
  9. package/lib/memory/adapters/client.d.ts +120 -0
  10. package/lib/memory/adapters/client.js +519 -0
  11. package/lib/memory/adapters/client.ts +519 -0
  12. package/lib/memory/adapters/config.d.ts +130 -0
  13. package/lib/memory/adapters/config.js +190 -0
  14. package/lib/memory/adapters/config.ts +190 -0
  15. package/lib/memory/adapters/errors.d.ts +84 -0
  16. package/lib/memory/adapters/errors.js +129 -0
  17. package/lib/memory/adapters/errors.ts +129 -0
  18. package/lib/memory/context-manager.d.ts +41 -0
  19. package/lib/memory/context-manager.js +345 -0
  20. package/lib/memory/context-manager.ts +345 -0
  21. package/lib/memory/embeddings/factory.d.ts +57 -0
  22. package/lib/memory/embeddings/factory.js +149 -0
  23. package/lib/memory/embeddings/factory.ts +149 -0
  24. package/lib/memory/embeddings/index.d.ts +2 -0
  25. package/lib/memory/embeddings/index.js +3 -0
  26. package/lib/memory/embeddings/index.ts +3 -0
  27. package/lib/memory/embeddings/service.d.ts +134 -0
  28. package/lib/memory/embeddings/service.js +516 -0
  29. package/lib/memory/embeddings/service.ts +516 -0
  30. package/lib/memory/index.d.ts +9 -0
  31. package/lib/memory/index.js +10 -1
  32. package/lib/memory/index.ts +10 -0
  33. package/lib/memory/memory-mesh.d.ts +332 -0
  34. package/lib/memory/memory-mesh.js +1470 -678
  35. package/lib/memory/memory-mesh.ts +1517 -0
  36. package/lib/memory/memory-translator.d.ts +14 -0
  37. package/lib/memory/memory-translator.js +126 -0
  38. package/lib/memory/memory-translator.ts +126 -0
  39. package/lib/memory/schema.d.ts +130 -0
  40. package/lib/memory/schema.js +184 -0
  41. package/lib/memory/schema.ts +184 -0
  42. package/lib/memory/scorer.d.ts +25 -0
  43. package/lib/memory/scorer.js +78 -0
  44. package/lib/memory/scorer.ts +78 -0
  45. package/lib/memory/search/index.d.ts +1 -0
  46. package/lib/memory/search/index.js +2 -0
  47. package/lib/memory/search/index.ts +2 -0
  48. package/lib/memory/search/keyword-search.d.ts +46 -0
  49. package/lib/memory/search/keyword-search.js +136 -0
  50. package/lib/memory/search/keyword-search.ts +136 -0
  51. package/lib/scrubber/config/defaults.d.ts +46 -0
  52. package/lib/scrubber/config/defaults.js +50 -57
  53. package/lib/scrubber/config/defaults.ts +55 -0
  54. package/lib/scrubber/errors/scrubber-error.d.ts +22 -0
  55. package/lib/scrubber/errors/scrubber-error.js +28 -32
  56. package/lib/scrubber/errors/scrubber-error.ts +44 -0
  57. package/lib/scrubber/index.d.ts +5 -0
  58. package/lib/scrubber/index.js +4 -23
  59. package/lib/scrubber/index.ts +6 -0
  60. package/lib/scrubber/scrubber.d.ts +44 -0
  61. package/lib/scrubber/scrubber.js +100 -121
  62. package/lib/scrubber/scrubber.ts +109 -0
  63. package/lib/scrubber/stages/chunker.d.ts +25 -0
  64. package/lib/scrubber/stages/chunker.js +74 -91
  65. package/lib/scrubber/stages/chunker.ts +104 -0
  66. package/lib/scrubber/stages/metadata-annotator.d.ts +17 -0
  67. package/lib/scrubber/stages/metadata-annotator.js +55 -65
  68. package/lib/scrubber/stages/metadata-annotator.ts +75 -0
  69. package/lib/scrubber/stages/normalizer.d.ts +16 -0
  70. package/lib/scrubber/stages/normalizer.js +42 -50
  71. package/lib/scrubber/stages/normalizer.ts +60 -0
  72. package/lib/scrubber/stages/semantic-filter.d.ts +16 -0
  73. package/lib/scrubber/stages/semantic-filter.js +42 -52
  74. package/lib/scrubber/stages/semantic-filter.ts +62 -0
  75. package/lib/scrubber/stages/structural-cleaner.d.ts +18 -0
  76. package/lib/scrubber/stages/structural-cleaner.js +66 -75
  77. package/lib/scrubber/stages/structural-cleaner.ts +83 -0
  78. package/lib/scrubber/stages/validator.d.ts +17 -0
  79. package/lib/scrubber/stages/validator.js +46 -56
  80. package/lib/scrubber/stages/validator.ts +67 -0
  81. package/lib/scrubber/telemetry.d.ts +29 -0
  82. package/lib/scrubber/telemetry.js +54 -58
  83. package/lib/scrubber/telemetry.ts +62 -0
  84. package/lib/scrubber/utils/hash.d.ts +14 -0
  85. package/lib/scrubber/utils/hash.js +30 -32
  86. package/lib/scrubber/utils/hash.ts +40 -0
  87. package/lib/scrubber/utils/html-parser.d.ts +14 -0
  88. package/lib/scrubber/utils/html-parser.js +32 -39
  89. package/lib/scrubber/utils/html-parser.ts +46 -0
  90. package/lib/scrubber/utils/pattern-matcher.d.ts +12 -0
  91. package/lib/scrubber/utils/pattern-matcher.js +48 -57
  92. package/lib/scrubber/utils/pattern-matcher.ts +64 -0
  93. package/lib/scrubber/utils/token-counter.d.ts +18 -0
  94. package/lib/scrubber/utils/token-counter.js +24 -25
  95. package/lib/scrubber/utils/token-counter.ts +32 -0
  96. package/lib/utils/logger.d.ts +19 -0
  97. package/lib/utils/logger.js +65 -0
  98. package/lib/utils/logger.ts +65 -0
  99. package/lib/utils/skill-metadata.d.ts +24 -0
  100. package/lib/utils/skill-metadata.js +133 -0
  101. package/lib/utils/skill-metadata.ts +133 -0
  102. package/lib/yamo/emitter.d.ts +46 -0
  103. package/lib/yamo/emitter.js +79 -143
  104. package/lib/yamo/emitter.ts +171 -0
  105. package/lib/yamo/index.d.ts +14 -0
  106. package/lib/yamo/index.js +6 -7
  107. package/lib/yamo/index.ts +16 -0
  108. package/lib/yamo/schema.d.ts +56 -0
  109. package/lib/yamo/schema.js +82 -108
  110. package/lib/yamo/schema.ts +133 -0
  111. package/package.json +13 -8
  112. package/index.d.ts +0 -111
  113. package/lib/embeddings/factory.js +0 -151
  114. package/lib/embeddings/index.js +0 -2
  115. package/lib/embeddings/service.js +0 -586
  116. package/lib/index.js +0 -6
  117. package/lib/lancedb/client.js +0 -633
  118. package/lib/lancedb/config.js +0 -215
  119. package/lib/lancedb/errors.js +0 -144
  120. package/lib/lancedb/index.js +0 -4
  121. package/lib/lancedb/schema.js +0 -217
  122. package/lib/search/index.js +0 -1
  123. package/lib/search/keyword-search.js +0 -144
  124. package/lib/utils/index.js +0 -1
package/lib/llm/client.js CHANGED
@@ -1,3 +1,4 @@
1
+ // @ts-nocheck
1
2
  /**
2
3
  * LLM Client - Multi-provider LLM API client for reflection generation
3
4
  *
@@ -7,84 +8,76 @@
7
8
  * - Ollama (local models)
8
9
  * - Graceful fallback when LLM unavailable
9
10
  */
10
-
11
+ import { createLogger } from "../utils/logger.js";
12
+ const logger = createLogger("llm-client");
11
13
  /**
12
14
  * LLMClient provides unified interface for calling various LLM providers
13
15
  * to generate reflections from memory contexts.
14
16
  */
15
17
  export class LLMClient {
16
- /**
17
- * Create a new LLMClient instance
18
- *
19
- * @param {Object} [config={}] - Configuration options
20
- * @param {string} [config.provider='openai'] - LLM provider ('openai', 'anthropic', 'ollama')
21
- * @param {string} [config.apiKey] - API key (defaults to env var)
22
- * @param {string} [config.model] - Model name
23
- * @param {string} [config.baseUrl] - Base URL for API (optional)
24
- * @param {number} [config.timeout=30000] - Request timeout in ms
25
- * @param {number} [config.maxRetries=2] - Max retry attempts
26
- */
27
- constructor(config = {}) {
28
- this.provider = config.provider || process.env.LLM_PROVIDER || 'openai';
29
- this.apiKey = config.apiKey || process.env.LLM_API_KEY || '';
30
- this.model = config.model || process.env.LLM_MODEL || this._getDefaultModel();
31
- this.baseUrl = config.baseUrl || process.env.LLM_BASE_URL || this._getDefaultBaseUrl();
32
- this.timeout = config.timeout || 30000;
33
- this.maxRetries = config.maxRetries || 2;
34
-
35
- // Statistics
36
- this.stats = {
37
- totalRequests: 0,
38
- successfulRequests: 0,
39
- failedRequests: 0,
40
- fallbackCount: 0
41
- };
42
- }
43
-
44
- /**
45
- * Get default model for provider
46
- * @private
47
- * @returns {string} Default model name
48
- */
49
- _getDefaultModel() {
50
- const defaults = {
51
- openai: 'gpt-4o-mini',
52
- anthropic: 'claude-3-5-haiku-20241022',
53
- ollama: 'llama3.2'
54
- };
55
- return defaults[this.provider] || 'gpt-4o-mini';
56
- }
57
-
58
- /**
59
- * Get default base URL for provider
60
- * @private
61
- * @returns {string} Default base URL
62
- */
63
- _getDefaultBaseUrl() {
64
- const defaults = {
65
- openai: 'https://api.openai.com/v1',
66
- anthropic: 'https://api.anthropic.com/v1',
67
- ollama: 'http://localhost:11434'
68
- };
69
- return defaults[this.provider] || 'https://api.openai.com/v1';
70
- }
71
-
72
- /**
73
- * Generate reflection from memories
74
- * Main entry point for reflection generation
75
- *
76
- * @param {string} prompt - The reflection prompt
77
- * @param {Array} memories - Context memories
78
- * @returns {Promise<Object>} { reflection, confidence }
79
- */
80
- async reflect(prompt, memories) {
81
- this.stats.totalRequests++;
82
-
83
- if (!memories || memories.length === 0) {
84
- return this._fallback('No memories provided');
18
+ provider;
19
+ apiKey;
20
+ model;
21
+ baseUrl;
22
+ timeout;
23
+ maxRetries;
24
+ maxTokens;
25
+ stats;
26
+ /**
27
+ * Create a new LLMClient instance
28
+ */
29
+ constructor(config = {}) {
30
+ this.provider = config.provider || process.env.LLM_PROVIDER || "openai";
31
+ this.apiKey = config.apiKey || process.env.LLM_API_KEY || "";
32
+ this.model =
33
+ config.model || process.env.LLM_MODEL || this._getDefaultModel();
34
+ this.baseUrl =
35
+ config.baseUrl || process.env.LLM_BASE_URL || this._getDefaultBaseUrl();
36
+ this.maxTokens = config.maxTokens || 2000;
37
+ this.timeout = config.timeout || (this.maxTokens >= 4000 ? 300000 : 60000);
38
+ this.maxRetries = config.maxRetries || 2;
39
+ // Statistics
40
+ this.stats = {
41
+ totalRequests: 0,
42
+ successfulRequests: 0,
43
+ failedRequests: 0,
44
+ fallbackCount: 0,
45
+ };
85
46
  }
86
-
87
- const systemPrompt = `You are a reflective AI agent. Review the provided memories and synthesize a high-level insight, belief, or observation.
47
+ /**
48
+ * Get default model for provider
49
+ * @private
50
+ */
51
+ _getDefaultModel() {
52
+ const defaults = {
53
+ openai: "gpt-4o-mini",
54
+ anthropic: "claude-3-5-haiku-20241022",
55
+ ollama: "llama3.2",
56
+ };
57
+ return defaults[this.provider] || "gpt-4o-mini";
58
+ }
59
+ /**
60
+ * Get default base URL for provider
61
+ * @private
62
+ */
63
+ _getDefaultBaseUrl() {
64
+ const defaults = {
65
+ openai: "https://api.openai.com/v1",
66
+ anthropic: "https://api.anthropic.com/v1",
67
+ ollama: "http://localhost:11434",
68
+ };
69
+ return defaults[this.provider] || "https://api.openai.com/v1";
70
+ }
71
+ /**
72
+ * Generate reflection from memories
73
+ * Main entry point for reflection generation
74
+ */
75
+ async reflect(prompt, memories) {
76
+ this.stats.totalRequests++;
77
+ if (!memories || memories.length === 0) {
78
+ return this._fallback("No memories provided");
79
+ }
80
+ const systemPrompt = `You are a reflective AI agent. Review the provided memories and synthesize a high-level insight, belief, or observation.
88
81
  Respond ONLY in JSON format with exactly these keys:
89
82
  {
90
83
  "reflection": "a concise insight or observation derived from the memories",
@@ -92,300 +85,250 @@ Respond ONLY in JSON format with exactly these keys:
92
85
  }
93
86
 
94
87
  Keep the reflection brief (1-2 sentences) and actionable.`;
95
-
96
- const userContent = this._formatMemoriesForLLM(prompt, memories);
97
-
98
- try {
99
- const response = await this._callWithRetry(systemPrompt, userContent);
100
- const parsed = JSON.parse(response);
101
-
102
- // Validate response structure
103
- if (!parsed.reflection || typeof parsed.confidence !== 'number') {
104
- throw new Error('Invalid LLM response format');
105
- }
106
-
107
- // Clamp confidence to valid range
108
- parsed.confidence = Math.max(0, Math.min(1, parsed.confidence));
109
-
110
- this.stats.successfulRequests++;
111
- return parsed;
112
-
113
- } catch (error) {
114
- this.stats.failedRequests++;
115
- const errorMessage = error instanceof Error ? error.message : String(error);
116
- console.warn(`[LLMClient] LLM call failed: ${errorMessage}`);
117
- return this._fallback('LLM error', memories);
88
+ const userContent = this._formatMemoriesForLLM(prompt, memories);
89
+ try {
90
+ const response = await this._callWithRetry(systemPrompt, userContent);
91
+ const parsed = JSON.parse(response);
92
+ // Validate response structure
93
+ if (!parsed.reflection || typeof parsed.confidence !== "number") {
94
+ throw new Error("Invalid LLM response format");
95
+ }
96
+ // Clamp confidence to valid range
97
+ parsed.confidence = Math.max(0, Math.min(1, parsed.confidence));
98
+ this.stats.successfulRequests++;
99
+ return parsed;
100
+ }
101
+ catch (error) {
102
+ this.stats.failedRequests++;
103
+ const errorMessage = error instanceof Error ? error.message : String(error);
104
+ logger.debug({ err: error, errorMessage }, "LLM call failed");
105
+ return this._fallback("LLM error", memories);
106
+ }
118
107
  }
119
- }
120
-
121
- /**
122
- * Format memories for LLM consumption
123
- * @private
124
- * @param {string} prompt - User prompt
125
- * @param {Array} memories - Memory array
126
- * @returns {string} Formatted content
127
- */
128
- _formatMemoriesForLLM(prompt, memories) {
129
- const memoryList = memories
130
- .map((m, i) => `${i + 1}. ${m.content}`)
131
- .join('\n');
132
-
133
- return `Prompt: ${prompt}\n\nMemories:\n${memoryList}\n\nBased on these memories, provide a brief reflective insight.`;
134
- }
135
-
136
- /**
137
- * Call LLM with retry logic
138
- * @private
139
- * @param {string} systemPrompt - System prompt
140
- * @param {string} userContent - User content
141
- * @returns {Promise<string>} LLM response text
142
- */
143
- async _callWithRetry(systemPrompt, userContent) {
144
- let lastError = null;
145
-
146
- for (let attempt = 1; attempt <= this.maxRetries; attempt++) {
147
- try {
148
- return await this._callLLM(systemPrompt, userContent);
149
- } catch (error) {
150
- lastError = error;
151
- if (attempt < this.maxRetries) {
152
- const delay = Math.pow(2, attempt) * 1000; // Exponential backoff
153
- await this._sleep(delay);
108
+ /**
109
+ * Format memories for LLM consumption
110
+ * @private
111
+ */
112
+ _formatMemoriesForLLM(prompt, memories) {
113
+ const memoryList = memories
114
+ .map((m, i) => `${i + 1}. ${m.content}`)
115
+ .join("\n");
116
+ return `Prompt: ${prompt}\n\nMemories:\n${memoryList}\n\nBased on these memories, provide a brief reflective insight.`;
117
+ }
118
+ /**
119
+ * Call LLM with retry logic
120
+ * @private
121
+ */
122
+ async _callWithRetry(systemPrompt, userContent) {
123
+ let lastError = null;
124
+ for (let attempt = 1; attempt <= this.maxRetries; attempt++) {
125
+ try {
126
+ return await this._callLLM(systemPrompt, userContent);
127
+ }
128
+ catch (error) {
129
+ lastError = error;
130
+ if (attempt < this.maxRetries) {
131
+ const delay = Math.pow(2, attempt) * 1000; // Exponential backoff
132
+ await this._sleep(delay);
133
+ }
134
+ }
154
135
  }
155
- }
136
+ throw lastError;
156
137
  }
157
-
158
- throw lastError;
159
- }
160
-
161
- /**
162
- * Call LLM based on provider
163
- * @private
164
- * @param {string} systemPrompt - System prompt
165
- * @param {string} userContent - User content
166
- * @returns {Promise<string>} Response text
167
- */
168
- async _callLLM(systemPrompt, userContent) {
169
- switch (this.provider) {
170
- case 'openai':
171
- return await this._callOpenAI(systemPrompt, userContent);
172
- case 'anthropic':
173
- return await this._callAnthropic(systemPrompt, userContent);
174
- case 'ollama':
175
- return await this._callOllama(systemPrompt, userContent);
176
- default:
177
- throw new Error(`Unsupported provider: ${this.provider}`);
138
+ /**
139
+ * Call LLM based on provider
140
+ * @private
141
+ */
142
+ async _callLLM(systemPrompt, userContent) {
143
+ switch (this.provider) {
144
+ case "openai":
145
+ return this._callOpenAI(systemPrompt, userContent);
146
+ case "anthropic":
147
+ return this._callAnthropic(systemPrompt, userContent);
148
+ case "ollama":
149
+ return this._callOllama(systemPrompt, userContent);
150
+ default:
151
+ throw new Error(`Unsupported provider: ${this.provider}`);
152
+ }
178
153
  }
179
- }
180
-
181
- /**
182
- * Call OpenAI API
183
- * @private
184
- */
185
- async _callOpenAI(systemPrompt, userContent) {
186
- if (!this.apiKey) {
187
- throw new Error('OpenAI API key not configured');
154
+ /**
155
+ * Call OpenAI API
156
+ * @private
157
+ */
158
+ async _callOpenAI(systemPrompt, userContent) {
159
+ if (!this.apiKey) {
160
+ throw new Error("OpenAI API key not configured");
161
+ }
162
+ const controller = new AbortController();
163
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
164
+ try {
165
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
166
+ method: "POST",
167
+ headers: {
168
+ "Content-Type": "application/json",
169
+ Authorization: `Bearer ${this.apiKey}`,
170
+ },
171
+ body: JSON.stringify({
172
+ model: this.model,
173
+ messages: [
174
+ { role: "system", content: systemPrompt },
175
+ { role: "user", content: userContent },
176
+ ],
177
+ temperature: 0.7,
178
+ max_tokens: this.maxTokens,
179
+ }),
180
+ signal: controller.signal,
181
+ });
182
+ clearTimeout(timeoutId);
183
+ if (!response.ok) {
184
+ const error = await response.text();
185
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
186
+ }
187
+ const data = await response.json();
188
+ return data.choices[0].message.content;
189
+ }
190
+ catch (error) {
191
+ clearTimeout(timeoutId);
192
+ if (error instanceof Error && error.name === "AbortError") {
193
+ throw new Error("Request timeout");
194
+ }
195
+ throw error;
196
+ }
188
197
  }
189
-
190
- const controller = new AbortController();
191
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
192
-
193
- try {
194
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
195
- method: 'POST',
196
- headers: {
197
- 'Content-Type': 'application/json',
198
- 'Authorization': `Bearer ${this.apiKey}`
199
- },
200
- body: JSON.stringify({
201
- model: this.model,
202
- messages: [
203
- { role: 'system', content: systemPrompt },
204
- { role: 'user', content: userContent }
205
- ],
206
- temperature: 0.7,
207
- max_tokens: 500
208
- }),
209
- signal: controller.signal
210
- });
211
-
212
- clearTimeout(timeoutId);
213
-
214
- if (!response.ok) {
215
- const error = await response.text();
216
- throw new Error(`OpenAI API error: ${response.status} - ${error}`);
217
- }
218
-
219
- const data = await response.json();
220
- return data.choices[0].message.content;
221
-
222
- } catch (error) {
223
- clearTimeout(timeoutId);
224
- if (error instanceof Error && error.name === 'AbortError') {
225
- throw new Error('Request timeout');
226
- }
227
- throw error;
198
+ /**
199
+ * Call Anthropic (Claude) API
200
+ * @private
201
+ */
202
+ async _callAnthropic(systemPrompt, userContent) {
203
+ if (!this.apiKey) {
204
+ throw new Error("Anthropic API key not configured");
205
+ }
206
+ const controller = new AbortController();
207
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
208
+ try {
209
+ const response = await fetch(`${this.baseUrl}/messages`, {
210
+ method: "POST",
211
+ headers: {
212
+ "Content-Type": "application/json",
213
+ "x-api-key": this.apiKey,
214
+ "anthropic-version": "2023-06-01",
215
+ },
216
+ body: JSON.stringify({
217
+ model: this.model,
218
+ max_tokens: this.maxTokens,
219
+ system: systemPrompt,
220
+ messages: [{ role: "user", content: userContent }],
221
+ }),
222
+ signal: controller.signal,
223
+ });
224
+ clearTimeout(timeoutId);
225
+ if (!response.ok) {
226
+ const error = await response.text();
227
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
228
+ }
229
+ const data = await response.json();
230
+ return data.content[0].text;
231
+ }
232
+ catch (error) {
233
+ clearTimeout(timeoutId);
234
+ if (error instanceof Error && error.name === "AbortError") {
235
+ throw new Error("Request timeout");
236
+ }
237
+ throw error;
238
+ }
228
239
  }
229
- }
230
-
231
- /**
232
- * Call Anthropic (Claude) API
233
- * @private
234
- */
235
- async _callAnthropic(systemPrompt, userContent) {
236
- if (!this.apiKey) {
237
- throw new Error('Anthropic API key not configured');
240
+ /**
241
+ * Call Ollama (local) API
242
+ * @private
243
+ */
244
+ async _callOllama(systemPrompt, userContent) {
245
+ const controller = new AbortController();
246
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
247
+ try {
248
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
249
+ method: "POST",
250
+ headers: {
251
+ "Content-Type": "application/json",
252
+ },
253
+ body: JSON.stringify({
254
+ model: this.model,
255
+ messages: [
256
+ { role: "system", content: systemPrompt },
257
+ { role: "user", content: userContent },
258
+ ],
259
+ stream: false,
260
+ options: {
261
+ num_predict: this.maxTokens,
262
+ },
263
+ }),
264
+ signal: controller.signal,
265
+ });
266
+ clearTimeout(timeoutId);
267
+ if (!response.ok) {
268
+ const error = await response.text();
269
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
270
+ }
271
+ const data = await response.json();
272
+ return data.message.content;
273
+ }
274
+ catch (error) {
275
+ clearTimeout(timeoutId);
276
+ if (error instanceof Error && error.name === "AbortError") {
277
+ throw new Error("Request timeout");
278
+ }
279
+ throw error;
280
+ }
238
281
  }
239
-
240
- const controller = new AbortController();
241
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
242
-
243
- try {
244
- const response = await fetch(`${this.baseUrl}/messages`, {
245
- method: 'POST',
246
- headers: {
247
- 'Content-Type': 'application/json',
248
- 'x-api-key': this.apiKey,
249
- 'anthropic-version': '2023-06-01'
250
- },
251
- body: JSON.stringify({
252
- model: this.model,
253
- max_tokens: 500,
254
- system: systemPrompt,
255
- messages: [
256
- { role: 'user', content: userContent }
257
- ]
258
- }),
259
- signal: controller.signal
260
- });
261
-
262
- clearTimeout(timeoutId);
263
-
264
- if (!response.ok) {
265
- const error = await response.text();
266
- throw new Error(`Anthropic API error: ${response.status} - ${error}`);
267
- }
268
-
269
- const data = await response.json();
270
- return data.content[0].text;
271
-
272
- } catch (error) {
273
- clearTimeout(timeoutId);
274
- if (error instanceof Error && error.name === 'AbortError') {
275
- throw new Error('Request timeout');
276
- }
277
- throw error;
282
+ /**
283
+ * Fallback when LLM fails
284
+ * @private
285
+ */
286
+ _fallback(reason, memories = []) {
287
+ this.stats.fallbackCount++;
288
+ if (memories && memories.length > 0) {
289
+ // Simple aggregation fallback
290
+ const contents = memories.map((m) => m.content);
291
+ const combined = contents.join("; ");
292
+ const preview = combined.length > 200 ? `${combined.substring(0, 200)}...` : combined;
293
+ return {
294
+ reflection: `Aggregated from ${memories.length} memories: ${preview}`,
295
+ confidence: 0.5,
296
+ };
297
+ }
298
+ return {
299
+ reflection: `Reflection generation unavailable: ${reason}`,
300
+ confidence: 0.3,
301
+ };
278
302
  }
279
- }
280
-
281
- /**
282
- * Call Ollama (local) API
283
- * @private
284
- */
285
- async _callOllama(systemPrompt, userContent) {
286
- const controller = new AbortController();
287
- const timeoutId = setTimeout(() => controller.abort(), this.timeout);
288
-
289
- try {
290
- const response = await fetch(`${this.baseUrl}/api/chat`, {
291
- method: 'POST',
292
- headers: {
293
- 'Content-Type': 'application/json'
294
- },
295
- body: JSON.stringify({
296
- model: this.model,
297
- messages: [
298
- { role: 'system', content: systemPrompt },
299
- { role: 'user', content: userContent }
300
- ],
301
- stream: false
302
- }),
303
- signal: controller.signal
304
- });
305
-
306
- clearTimeout(timeoutId);
307
-
308
- if (!response.ok) {
309
- const error = await response.text();
310
- throw new Error(`Ollama API error: ${response.status} - ${error}`);
311
- }
312
-
313
- const data = await response.json();
314
- return data.message.content;
315
-
316
- } catch (error) {
317
- clearTimeout(timeoutId);
318
- if (error instanceof Error && error.name === 'AbortError') {
319
- throw new Error('Request timeout');
320
- }
321
- throw error;
303
+ /**
304
+ * Sleep utility
305
+ * @private
306
+ */
307
+ _sleep(ms) {
308
+ return new Promise((resolve) => setTimeout(resolve, ms));
322
309
  }
323
- }
324
-
325
- /**
326
- * Fallback when LLM fails
327
- * @private
328
- * @param {string} reason - Fallback reason
329
- * @param {Array} [memories=[]] - Memory array
330
- * @returns {Object} Fallback result
331
- */
332
- _fallback(reason, memories = []) {
333
- this.stats.fallbackCount++;
334
-
335
- if (memories && memories.length > 0) {
336
- // Simple aggregation fallback
337
- const contents = memories.map(m => m.content);
338
- const combined = contents.join('; ');
339
- const preview = combined.length > 200
340
- ? combined.substring(0, 200) + '...'
341
- : combined;
342
-
343
- return {
344
- reflection: `Aggregated from ${memories.length} memories: ${preview}`,
345
- confidence: 0.5
346
- };
310
+ /**
311
+ * Get client statistics
312
+ * @returns {Object} Statistics
313
+ */
314
+ getStats() {
315
+ return {
316
+ ...this.stats,
317
+ successRate: this.stats.totalRequests > 0
318
+ ? (this.stats.successfulRequests / this.stats.totalRequests).toFixed(2)
319
+ : "0.00",
320
+ };
321
+ }
322
+ /**
323
+ * Reset statistics
324
+ */
325
+ resetStats() {
326
+ this.stats = {
327
+ totalRequests: 0,
328
+ successfulRequests: 0,
329
+ failedRequests: 0,
330
+ fallbackCount: 0,
331
+ };
347
332
  }
348
-
349
- return {
350
- reflection: `Reflection generation unavailable: ${reason}`,
351
- confidence: 0.3
352
- };
353
- }
354
-
355
- /**
356
- * Sleep utility
357
- * @private
358
- * @param {number} ms - Milliseconds to sleep
359
- * @returns {Promise<void>}
360
- */
361
- _sleep(ms) {
362
- return new Promise(resolve => setTimeout(resolve, ms));
363
- }
364
-
365
- /**
366
- * Get client statistics
367
- * @returns {Object} Statistics
368
- */
369
- getStats() {
370
- return {
371
- ...this.stats,
372
- successRate: this.stats.totalRequests > 0
373
- ? (this.stats.successfulRequests / this.stats.totalRequests).toFixed(2)
374
- : '0.00'
375
- };
376
- }
377
-
378
- /**
379
- * Reset statistics
380
- */
381
- resetStats() {
382
- this.stats = {
383
- totalRequests: 0,
384
- successfulRequests: 0,
385
- failedRequests: 0,
386
- fallbackCount: 0
387
- };
388
- }
389
333
  }
390
-
391
334
  export default LLMClient;