@yamo/memory-mesh 2.1.2 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -12,6 +12,8 @@ Built on the [YAMO Protocol](https://github.com/yamo-protocol) for transparent a
12
12
  - **Portable CLI**: Simple JSON-based interface for any agent or language.
13
13
  - **YAMO Skills Integration**: Includes yamo-super workflow system with automatic memory learning.
14
14
  - **Pattern Recognition**: Workflows automatically store and retrieve execution patterns for optimization.
15
+ - **LLM-Powered Reflections**: Generate insights from memories using configurable LLM providers.
16
+ - **YAMO Audit Trail**: Automatic emission of structured blocks for all memory operations.
15
17
 
16
18
  ## Installation
17
19
 
@@ -44,6 +46,63 @@ await mesh.add('Content', { meta: 'data' });
44
46
  const results = await mesh.search('query');
45
47
  ```
46
48
 
49
+ ### Enhanced Reflections with LLM
50
+
51
+ MemoryMesh supports LLM-powered reflection generation that synthesizes insights from stored memories:
52
+
53
+ ```javascript
54
+ import { MemoryMesh } from '@yamo/memory-mesh';
55
+
56
+ // Enable LLM integration (requires API key or local model)
57
+ const mesh = new MemoryMesh({
58
+ enableLLM: true,
59
+ llmProvider: 'openai', // or 'anthropic', 'ollama'
60
+ llmApiKey: process.env.OPENAI_API_KEY,
61
+ llmModel: 'gpt-4o-mini'
62
+ });
63
+
64
+ // Store some memories
65
+ await mesh.add('Bug: type mismatch in keyword search', { type: 'bug' });
66
+ await mesh.add('Bug: missing content field', { type: 'bug' });
67
+
68
+ // Generate reflection (automatically stores result to memory)
69
+ const reflection = await mesh.reflect({ topic: 'bugs', lookback: 10 });
70
+
71
+ console.log(reflection.reflection);
72
+ // Output: "Synthesized from 2 memories: Bug: type mismatch..., Bug: missing content..."
73
+
74
+ console.log(reflection.confidence); // 0.85
75
+ console.log(reflection.yamoBlock); // YAMO audit trail
76
+ ```
77
+
78
+ **CLI Usage:**
79
+
80
+ ```bash
81
+ # With LLM (default)
82
+ memory-mesh reflect '{"topic": "bugs", "limit": 10}'
83
+
84
+ # Without LLM (prompt-only for external LLM)
85
+ memory-mesh reflect '{"topic": "bugs", "llm": false}'
86
+ ```
87
+
88
+ ### YAMO Audit Trail
89
+
90
+ MemoryMesh automatically emits YAMO blocks for all operations when enabled:
91
+
92
+ ```javascript
93
+ const mesh = new MemoryMesh({ enableYamo: true });
94
+
95
+ // All operations now emit YAMO blocks
96
+ await mesh.add('Memory content', { type: 'event' }); // emits 'retain' block
97
+ await mesh.search('query'); // emits 'recall' block
98
+ await mesh.reflect({ topic: 'test' }); // emits 'reflect' block
99
+
100
+ // Query YAMO log
101
+ const yamoLog = await mesh.getYamoLog({ operationType: 'reflect', limit: 10 });
102
+ console.log(yamoLog);
103
+ // [{ id, agentId, operationType, yamoText, timestamp, ... }]
104
+ ```
105
+
47
106
  ## Using in a Project
48
107
 
49
108
  To use MemoryMesh with your Claude Code skills (like `yamo-super`) in a new project:
@@ -127,3 +186,66 @@ Memory Mesh implements YAMO v2.1.0 compliance with:
127
186
  - **Development Guide**: [CLAUDE.md](CLAUDE.md) - Guide for Claude Code development
128
187
  - **Marketplace**: [.claude-plugin/marketplace.json](.claude-plugin/marketplace.json) - Plugin metadata
129
188
 
189
+ ## Configuration
190
+
191
+ ### LLM Provider Configuration
192
+
193
+ ```bash
194
+ # Required for LLM-powered reflections
195
+ LLM_PROVIDER=openai # Provider: 'openai', 'anthropic', 'ollama'
196
+ LLM_API_KEY=sk-... # API key for OpenAI/Anthropic
197
+ LLM_MODEL=gpt-4o-mini # Model name
198
+ LLM_BASE_URL=https://... # Optional: Custom API base URL
199
+ ```
200
+
201
+ **Supported Providers:**
202
+ - **OpenAI**: GPT-4, GPT-4o-mini, etc.
203
+ - **Anthropic**: Claude 3.5 Haiku, Sonnet, Opus
204
+ - **Ollama**: Local models (llama3.2, mistral, etc.)
205
+
206
+ ### YAMO Configuration
207
+
208
+ ```bash
209
+ # Optional YAMO settings
210
+ ENABLE_YAMO=true # Enable YAMO block emission (default: true)
211
+ YAMO_DEBUG=true # Enable verbose YAMO logging
212
+ ```
213
+
214
+ ### LanceDB Configuration
215
+
216
+ ```bash
217
+ # Vector database settings
218
+ LANCEDB_URI=./runtime/data/lancedb
219
+ LANCEDB_MEMORY_TABLE=memory_entries
220
+ ```
221
+
222
+ ### Embedding Configuration
223
+
224
+ ```bash
225
+ # Embedding model settings
226
+ EMBEDDING_MODEL_TYPE=local # 'local', 'openai', 'cohere', 'ollama'
227
+ EMBEDDING_MODEL_NAME=Xenova/all-MiniLM-L6-v2
228
+ EMBEDDING_DIMENSION=384
229
+ ```
230
+
231
+ ### Example .env File
232
+
233
+ ```bash
234
+ # LLM for reflections
235
+ LLM_PROVIDER=openai
236
+ LLM_API_KEY=sk-your-key-here
237
+ LLM_MODEL=gpt-4o-mini
238
+
239
+ # YAMO audit
240
+ ENABLE_YAMO=true
241
+ YAMO_DEBUG=false
242
+
243
+ # Vector DB
244
+ LANCEDB_URI=./data/lancedb
245
+
246
+ # Embeddings (local default)
247
+ EMBEDDING_MODEL_TYPE=local
248
+ EMBEDDING_MODEL_NAME=Xenova/all-MiniLM-L6-v2
249
+ ```
250
+
251
+
@@ -1,69 +1,14 @@
1
1
  #!/usr/bin/env node
2
2
 
3
3
  /**
4
- * MemoryMesh CLI Adapter
5
- * Provides a portable interface for skills to interact with the MemoryMesh system.
6
- *
7
- * Usage:
8
- * node tools/memory_mesh.js store <content_string_or_json> [metadata_json]
9
- * node tools/memory_mesh.js search <query_string> [limit]
4
+ * MemoryMesh CLI Entry Point
5
+ * Delegates to the core CLI handler in lib/memory/memory-mesh.js
10
6
  */
11
7
 
12
- import { MemoryMesh } from '../lib/memory/memory-mesh.js';
13
- import path from 'path';
8
+ import { run } from '../lib/memory/memory-mesh.js';
14
9
 
15
- // Parse arguments
16
- const args = process.argv.slice(2);
17
- const command = args[0];
18
-
19
- async function main() {
20
- try {
21
- const mesh = new MemoryMesh();
22
- // Wait for initialization if necessary (MemoryMesh constructor usually doesn't await,
23
- // but operations might need internal init. We assume standard usage.)
24
-
25
- if (command === 'store') {
26
- const content = args[1];
27
- let metadata = {};
28
- if (args[2]) {
29
- try {
30
- metadata = JSON.parse(args[2]);
31
- } catch (e) {
32
- console.error(JSON.stringify({ error: "Invalid metadata JSON" }));
33
- process.exit(1);
34
- }
35
- }
36
-
37
- if (!content) {
38
- console.error(JSON.stringify({ error: "Content required for store command" }));
39
- process.exit(1);
40
- }
41
-
42
- const result = await mesh.add(content, metadata);
43
- console.log(JSON.stringify({ success: true, id: result.id, message: "Memory stored successfully" }));
44
-
45
- } else if (command === 'search') {
46
- const query = args[1];
47
- const limit = parseInt(args[2]) || 5;
48
-
49
- if (!query) {
50
- console.error(JSON.stringify({ error: "Query required for search command" }));
51
- process.exit(1);
52
- }
53
-
54
- const results = await mesh.search(query, { limit });
55
- console.log(JSON.stringify({ success: true, results: results }));
56
-
57
- } else {
58
- console.error(JSON.stringify({ error: `Unknown command: ${command}` }));
59
- console.error("Usage: node tools/memory_mesh.js [store|search] ...");
60
- process.exit(1);
61
- }
62
-
63
- } catch (error) {
64
- console.error(JSON.stringify({ error: error.message }));
65
- process.exit(1);
66
- }
67
- }
68
-
69
- main();
10
+ // Execute the main CLI handler
11
+ run().catch(err => {
12
+ console.error(`❌ Fatal Error: ${err.message}`);
13
+ process.exit(1);
14
+ });
@@ -7,10 +7,11 @@ import EmbeddingService from "./service.js";
7
7
  import { ConfigurationError, EmbeddingError } from "../lancedb/errors.js";
8
8
 
9
9
  class EmbeddingFactory {
10
- constructor() {
10
+ constructor(ServiceClass = EmbeddingService) {
11
11
  this.primaryService = null;
12
12
  this.fallbackServices = [];
13
13
  this.configured = false;
14
+ this.ServiceClass = ServiceClass;
14
15
  }
15
16
 
16
17
  /**
@@ -22,10 +23,10 @@ class EmbeddingFactory {
22
23
  // Sort by priority (lower = higher priority)
23
24
  configs.sort((a, b) => a.priority - b.priority);
24
25
 
25
- this.primaryService = new EmbeddingService(configs[0]);
26
+ this.primaryService = new this.ServiceClass(configs[0]);
26
27
 
27
28
  if (configs.length > 1) {
28
- this.fallbackServices = configs.slice(1).map(c => new EmbeddingService(c));
29
+ this.fallbackServices = configs.slice(1).map(c => new this.ServiceClass(c));
29
30
  }
30
31
 
31
32
  this.configured = true;
@@ -28,6 +28,7 @@ class LanceDBClient {
28
28
  * @param {number} [config.maxRetries] - Maximum connection retries (default: 3)
29
29
  * @param {number} [config.retryDelay] - Delay between retries in ms (default: 1000)
30
30
  * @param {number} [config.vectorDimension] - Vector dimension for embeddings (default: 384)
31
+ * @param {Object} [config.driver] - LanceDB driver instance (for testing)
31
32
  */
32
33
  constructor(config = {}) {
33
34
  this.uri = (config && config.uri) || process.env.LANCEDB_URI || './data/lancedb';
@@ -35,6 +36,7 @@ class LanceDBClient {
35
36
  this.maxRetries = (config && config.maxRetries) || 3;
36
37
  this.retryDelay = (config && config.retryDelay) || 1000;
37
38
  this.vectorDimension = (config && config.vectorDimension) || DEFAULT_VECTOR_DIMENSION;
39
+ this.driver = (config && config.driver) || lancedb;
38
40
 
39
41
  // Connection state
40
42
  this.db = null;
@@ -66,7 +68,7 @@ class LanceDBClient {
66
68
  }
67
69
 
68
70
  // Connect to database
69
- this.db = await lancedb.connect(this.uri);
71
+ this.db = await this.driver.connect(this.uri);
70
72
 
71
73
  // Initialize table with dynamic dimension (creates if doesn't exist, opens if it does)
72
74
  this.table = await createMemoryTableWithDimension(this.db, this.tableName, this.vectorDimension);
@@ -0,0 +1,391 @@
1
+ /**
2
+ * LLM Client - Multi-provider LLM API client for reflection generation
3
+ *
4
+ * Supports:
5
+ * - OpenAI (GPT-4, GPT-4o-mini, etc.)
6
+ * - Anthropic (Claude)
7
+ * - Ollama (local models)
8
+ * - Graceful fallback when LLM unavailable
9
+ */
10
+
11
+ /**
12
+ * LLMClient provides unified interface for calling various LLM providers
13
+ * to generate reflections from memory contexts.
14
+ */
15
+ export class LLMClient {
16
+ /**
17
+ * Create a new LLMClient instance
18
+ *
19
+ * @param {Object} [config={}] - Configuration options
20
+ * @param {string} [config.provider='openai'] - LLM provider ('openai', 'anthropic', 'ollama')
21
+ * @param {string} [config.apiKey] - API key (defaults to env var)
22
+ * @param {string} [config.model] - Model name
23
+ * @param {string} [config.baseUrl] - Base URL for API (optional)
24
+ * @param {number} [config.timeout=30000] - Request timeout in ms
25
+ * @param {number} [config.maxRetries=2] - Max retry attempts
26
+ */
27
+ constructor(config = {}) {
28
+ this.provider = config.provider || process.env.LLM_PROVIDER || 'openai';
29
+ this.apiKey = config.apiKey || process.env.LLM_API_KEY || '';
30
+ this.model = config.model || process.env.LLM_MODEL || this._getDefaultModel();
31
+ this.baseUrl = config.baseUrl || process.env.LLM_BASE_URL || this._getDefaultBaseUrl();
32
+ this.timeout = config.timeout || 30000;
33
+ this.maxRetries = config.maxRetries || 2;
34
+
35
+ // Statistics
36
+ this.stats = {
37
+ totalRequests: 0,
38
+ successfulRequests: 0,
39
+ failedRequests: 0,
40
+ fallbackCount: 0
41
+ };
42
+ }
43
+
44
+ /**
45
+ * Get default model for provider
46
+ * @private
47
+ * @returns {string} Default model name
48
+ */
49
+ _getDefaultModel() {
50
+ const defaults = {
51
+ openai: 'gpt-4o-mini',
52
+ anthropic: 'claude-3-5-haiku-20241022',
53
+ ollama: 'llama3.2'
54
+ };
55
+ return defaults[this.provider] || 'gpt-4o-mini';
56
+ }
57
+
58
+ /**
59
+ * Get default base URL for provider
60
+ * @private
61
+ * @returns {string} Default base URL
62
+ */
63
+ _getDefaultBaseUrl() {
64
+ const defaults = {
65
+ openai: 'https://api.openai.com/v1',
66
+ anthropic: 'https://api.anthropic.com/v1',
67
+ ollama: 'http://localhost:11434'
68
+ };
69
+ return defaults[this.provider] || 'https://api.openai.com/v1';
70
+ }
71
+
72
+ /**
73
+ * Generate reflection from memories
74
+ * Main entry point for reflection generation
75
+ *
76
+ * @param {string} prompt - The reflection prompt
77
+ * @param {Array} memories - Context memories
78
+ * @returns {Promise<Object>} { reflection, confidence }
79
+ */
80
+ async reflect(prompt, memories) {
81
+ this.stats.totalRequests++;
82
+
83
+ if (!memories || memories.length === 0) {
84
+ return this._fallback('No memories provided');
85
+ }
86
+
87
+ const systemPrompt = `You are a reflective AI agent. Review the provided memories and synthesize a high-level insight, belief, or observation.
88
+ Respond ONLY in JSON format with exactly these keys:
89
+ {
90
+ "reflection": "a concise insight or observation derived from the memories",
91
+ "confidence": 0.0 to 1.0
92
+ }
93
+
94
+ Keep the reflection brief (1-2 sentences) and actionable.`;
95
+
96
+ const userContent = this._formatMemoriesForLLM(prompt, memories);
97
+
98
+ try {
99
+ const response = await this._callWithRetry(systemPrompt, userContent);
100
+ const parsed = JSON.parse(response);
101
+
102
+ // Validate response structure
103
+ if (!parsed.reflection || typeof parsed.confidence !== 'number') {
104
+ throw new Error('Invalid LLM response format');
105
+ }
106
+
107
+ // Clamp confidence to valid range
108
+ parsed.confidence = Math.max(0, Math.min(1, parsed.confidence));
109
+
110
+ this.stats.successfulRequests++;
111
+ return parsed;
112
+
113
+ } catch (error) {
114
+ this.stats.failedRequests++;
115
+ const errorMessage = error instanceof Error ? error.message : String(error);
116
+ console.warn(`[LLMClient] LLM call failed: ${errorMessage}`);
117
+ return this._fallback('LLM error', memories);
118
+ }
119
+ }
120
+
121
+ /**
122
+ * Format memories for LLM consumption
123
+ * @private
124
+ * @param {string} prompt - User prompt
125
+ * @param {Array} memories - Memory array
126
+ * @returns {string} Formatted content
127
+ */
128
+ _formatMemoriesForLLM(prompt, memories) {
129
+ const memoryList = memories
130
+ .map((m, i) => `${i + 1}. ${m.content}`)
131
+ .join('\n');
132
+
133
+ return `Prompt: ${prompt}\n\nMemories:\n${memoryList}\n\nBased on these memories, provide a brief reflective insight.`;
134
+ }
135
+
136
+ /**
137
+ * Call LLM with retry logic
138
+ * @private
139
+ * @param {string} systemPrompt - System prompt
140
+ * @param {string} userContent - User content
141
+ * @returns {Promise<string>} LLM response text
142
+ */
143
+ async _callWithRetry(systemPrompt, userContent) {
144
+ let lastError = null;
145
+
146
+ for (let attempt = 1; attempt <= this.maxRetries; attempt++) {
147
+ try {
148
+ return await this._callLLM(systemPrompt, userContent);
149
+ } catch (error) {
150
+ lastError = error;
151
+ if (attempt < this.maxRetries) {
152
+ const delay = Math.pow(2, attempt) * 1000; // Exponential backoff
153
+ await this._sleep(delay);
154
+ }
155
+ }
156
+ }
157
+
158
+ throw lastError;
159
+ }
160
+
161
+ /**
162
+ * Call LLM based on provider
163
+ * @private
164
+ * @param {string} systemPrompt - System prompt
165
+ * @param {string} userContent - User content
166
+ * @returns {Promise<string>} Response text
167
+ */
168
+ async _callLLM(systemPrompt, userContent) {
169
+ switch (this.provider) {
170
+ case 'openai':
171
+ return await this._callOpenAI(systemPrompt, userContent);
172
+ case 'anthropic':
173
+ return await this._callAnthropic(systemPrompt, userContent);
174
+ case 'ollama':
175
+ return await this._callOllama(systemPrompt, userContent);
176
+ default:
177
+ throw new Error(`Unsupported provider: ${this.provider}`);
178
+ }
179
+ }
180
+
181
+ /**
182
+ * Call OpenAI API
183
+ * @private
184
+ */
185
+ async _callOpenAI(systemPrompt, userContent) {
186
+ if (!this.apiKey) {
187
+ throw new Error('OpenAI API key not configured');
188
+ }
189
+
190
+ const controller = new AbortController();
191
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
192
+
193
+ try {
194
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
195
+ method: 'POST',
196
+ headers: {
197
+ 'Content-Type': 'application/json',
198
+ 'Authorization': `Bearer ${this.apiKey}`
199
+ },
200
+ body: JSON.stringify({
201
+ model: this.model,
202
+ messages: [
203
+ { role: 'system', content: systemPrompt },
204
+ { role: 'user', content: userContent }
205
+ ],
206
+ temperature: 0.7,
207
+ max_tokens: 500
208
+ }),
209
+ signal: controller.signal
210
+ });
211
+
212
+ clearTimeout(timeoutId);
213
+
214
+ if (!response.ok) {
215
+ const error = await response.text();
216
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
217
+ }
218
+
219
+ const data = await response.json();
220
+ return data.choices[0].message.content;
221
+
222
+ } catch (error) {
223
+ clearTimeout(timeoutId);
224
+ if (error instanceof Error && error.name === 'AbortError') {
225
+ throw new Error('Request timeout');
226
+ }
227
+ throw error;
228
+ }
229
+ }
230
+
231
+ /**
232
+ * Call Anthropic (Claude) API
233
+ * @private
234
+ */
235
+ async _callAnthropic(systemPrompt, userContent) {
236
+ if (!this.apiKey) {
237
+ throw new Error('Anthropic API key not configured');
238
+ }
239
+
240
+ const controller = new AbortController();
241
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
242
+
243
+ try {
244
+ const response = await fetch(`${this.baseUrl}/messages`, {
245
+ method: 'POST',
246
+ headers: {
247
+ 'Content-Type': 'application/json',
248
+ 'x-api-key': this.apiKey,
249
+ 'anthropic-version': '2023-06-01'
250
+ },
251
+ body: JSON.stringify({
252
+ model: this.model,
253
+ max_tokens: 500,
254
+ system: systemPrompt,
255
+ messages: [
256
+ { role: 'user', content: userContent }
257
+ ]
258
+ }),
259
+ signal: controller.signal
260
+ });
261
+
262
+ clearTimeout(timeoutId);
263
+
264
+ if (!response.ok) {
265
+ const error = await response.text();
266
+ throw new Error(`Anthropic API error: ${response.status} - ${error}`);
267
+ }
268
+
269
+ const data = await response.json();
270
+ return data.content[0].text;
271
+
272
+ } catch (error) {
273
+ clearTimeout(timeoutId);
274
+ if (error instanceof Error && error.name === 'AbortError') {
275
+ throw new Error('Request timeout');
276
+ }
277
+ throw error;
278
+ }
279
+ }
280
+
281
+ /**
282
+ * Call Ollama (local) API
283
+ * @private
284
+ */
285
+ async _callOllama(systemPrompt, userContent) {
286
+ const controller = new AbortController();
287
+ const timeoutId = setTimeout(() => controller.abort(), this.timeout);
288
+
289
+ try {
290
+ const response = await fetch(`${this.baseUrl}/api/chat`, {
291
+ method: 'POST',
292
+ headers: {
293
+ 'Content-Type': 'application/json'
294
+ },
295
+ body: JSON.stringify({
296
+ model: this.model,
297
+ messages: [
298
+ { role: 'system', content: systemPrompt },
299
+ { role: 'user', content: userContent }
300
+ ],
301
+ stream: false
302
+ }),
303
+ signal: controller.signal
304
+ });
305
+
306
+ clearTimeout(timeoutId);
307
+
308
+ if (!response.ok) {
309
+ const error = await response.text();
310
+ throw new Error(`Ollama API error: ${response.status} - ${error}`);
311
+ }
312
+
313
+ const data = await response.json();
314
+ return data.message.content;
315
+
316
+ } catch (error) {
317
+ clearTimeout(timeoutId);
318
+ if (error instanceof Error && error.name === 'AbortError') {
319
+ throw new Error('Request timeout');
320
+ }
321
+ throw error;
322
+ }
323
+ }
324
+
325
+ /**
326
+ * Fallback when LLM fails
327
+ * @private
328
+ * @param {string} reason - Fallback reason
329
+ * @param {Array} [memories=[]] - Memory array
330
+ * @returns {Object} Fallback result
331
+ */
332
+ _fallback(reason, memories = []) {
333
+ this.stats.fallbackCount++;
334
+
335
+ if (memories && memories.length > 0) {
336
+ // Simple aggregation fallback
337
+ const contents = memories.map(m => m.content);
338
+ const combined = contents.join('; ');
339
+ const preview = combined.length > 200
340
+ ? combined.substring(0, 200) + '...'
341
+ : combined;
342
+
343
+ return {
344
+ reflection: `Aggregated from ${memories.length} memories: ${preview}`,
345
+ confidence: 0.5
346
+ };
347
+ }
348
+
349
+ return {
350
+ reflection: `Reflection generation unavailable: ${reason}`,
351
+ confidence: 0.3
352
+ };
353
+ }
354
+
355
+ /**
356
+ * Sleep utility
357
+ * @private
358
+ * @param {number} ms - Milliseconds to sleep
359
+ * @returns {Promise<void>}
360
+ */
361
+ _sleep(ms) {
362
+ return new Promise(resolve => setTimeout(resolve, ms));
363
+ }
364
+
365
+ /**
366
+ * Get client statistics
367
+ * @returns {Object} Statistics
368
+ */
369
+ getStats() {
370
+ return {
371
+ ...this.stats,
372
+ successRate: this.stats.totalRequests > 0
373
+ ? (this.stats.successfulRequests / this.stats.totalRequests).toFixed(2)
374
+ : '0.00'
375
+ };
376
+ }
377
+
378
+ /**
379
+ * Reset statistics
380
+ */
381
+ resetStats() {
382
+ this.stats = {
383
+ totalRequests: 0,
384
+ successfulRequests: 0,
385
+ failedRequests: 0,
386
+ fallbackCount: 0
387
+ };
388
+ }
389
+ }
390
+
391
+ export default LLMClient;
@@ -0,0 +1,10 @@
1
+ /**
2
+ * LLM Module - LLM client support for yamo-memory-mesh
3
+ * Exports multi-provider LLM client for reflection generation
4
+ */
5
+
6
+ export { LLMClient } from './client.js';
7
+
8
+ export default {
9
+ LLMClient: (await import('./client.js')).LLMClient
10
+ };