claude-flow 2.7.0-alpha.2 → 2.7.0-alpha.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,24 +2,65 @@
2
2
  * ReasoningBank Adapter for Claude-Flow
3
3
  *
4
4
  * Wraps agentic-flow's ReasoningBank SDK for use in claude-flow memory commands
5
+ * Performance-optimized with caching, timeouts, and async embeddings
5
6
  */
6
7
 
7
8
  import { db, initialize, retrieveMemories, computeEmbedding, loadConfig } from 'agentic-flow/dist/reasoningbank/index.js';
8
9
  import { v4 as uuidv4 } from 'uuid';
9
10
 
11
+ // Query result cache (LRU)
12
+ const queryCache = new Map();
13
+ const CACHE_SIZE = 100;
14
+ const CACHE_TTL = 60000; // 60 seconds
15
+
16
+ // Async embedding processing queue
17
+ const embeddingQueue = [];
18
+ let processingQueue = false;
19
+
10
20
  /**
11
- * Initialize ReasoningBank database
21
+ * Initialize ReasoningBank database with performance optimizations
12
22
  */
13
23
  export async function initializeReasoningBank() {
14
24
  // Set database path
15
25
  process.env.CLAUDE_FLOW_DB_PATH = '.swarm/memory.db';
16
26
 
17
27
  await initialize();
28
+ await optimizeDatabase();
18
29
  return true;
19
30
  }
20
31
 
21
32
  /**
22
- * Store a memory in ReasoningBank
33
+ * Add database indexes for query performance
34
+ */
35
+ async function optimizeDatabase() {
36
+ try {
37
+ const dbInstance = db.getDb();
38
+
39
+ // Add indexes for common queries
40
+ dbInstance.exec(`
41
+ -- Index on confidence for sorting
42
+ CREATE INDEX IF NOT EXISTS idx_patterns_confidence
43
+ ON patterns(confidence DESC);
44
+
45
+ -- Index on usage_count for sorting
46
+ CREATE INDEX IF NOT EXISTS idx_patterns_usage
47
+ ON patterns(usage_count DESC);
48
+
49
+ -- Index on created_at for time-based queries
50
+ CREATE INDEX IF NOT EXISTS idx_patterns_created
51
+ ON patterns(created_at DESC);
52
+
53
+ -- Index on memory_id for embeddings lookup
54
+ CREATE INDEX IF NOT EXISTS idx_embeddings_memory
55
+ ON pattern_embeddings(memory_id);
56
+ `);
57
+ } catch (error) {
58
+ console.warn('[ReasoningBank] Failed to create indexes:', error.message);
59
+ }
60
+ }
61
+
62
+ /**
63
+ * Store a memory in ReasoningBank (optimized with async embeddings)
23
64
  */
24
65
  export async function storeMemory(key, value, options = {}) {
25
66
  const memoryId = `mem_${uuidv4()}`;
@@ -39,10 +80,49 @@ export async function storeMemory(key, value, options = {}) {
39
80
  created_at: new Date().toISOString(),
40
81
  };
41
82
 
42
- // Store memory
83
+ // Store memory immediately (fast, no blocking)
43
84
  db.upsertMemory(memory);
44
85
 
45
- // Compute and store embedding for semantic search
86
+ // Invalidate query cache when new memory is added
87
+ queryCache.clear();
88
+
89
+ // Queue embedding computation for background processing (unless disabled)
90
+ if (options.async !== false) {
91
+ embeddingQueue.push({ memoryId, key, value });
92
+ processEmbeddingQueue(); // Non-blocking
93
+ } else {
94
+ // Synchronous mode for critical operations
95
+ await computeAndStoreEmbedding(memoryId, key, value);
96
+ }
97
+
98
+ return memoryId;
99
+ }
100
+
101
+ /**
102
+ * Process embedding queue in background
103
+ */
104
+ async function processEmbeddingQueue() {
105
+ if (processingQueue || embeddingQueue.length === 0) return;
106
+
107
+ processingQueue = true;
108
+
109
+ while (embeddingQueue.length > 0) {
110
+ const batch = embeddingQueue.splice(0, 5); // Process 5 at a time
111
+
112
+ await Promise.allSettled(
113
+ batch.map(({ memoryId, key, value }) =>
114
+ computeAndStoreEmbedding(memoryId, key, value)
115
+ )
116
+ );
117
+ }
118
+
119
+ processingQueue = false;
120
+ }
121
+
122
+ /**
123
+ * Compute and store embedding for a memory
124
+ */
125
+ async function computeAndStoreEmbedding(memoryId, key, value) {
46
126
  try {
47
127
  const config = loadConfig();
48
128
  const embeddingModel = config.embeddings.provider || 'claude';
@@ -53,31 +133,41 @@ export async function storeMemory(key, value, options = {}) {
53
133
  db.upsertEmbedding({
54
134
  memory_id: memoryId,
55
135
  vector: vectorArray,
56
- model: embeddingModel, // Dynamic model from config
57
- dims: vectorArray.length, // Required: embedding dimensions
136
+ model: embeddingModel,
137
+ dims: vectorArray.length,
58
138
  created_at: new Date().toISOString(),
59
139
  });
60
140
  } catch (error) {
61
- console.warn('[ReasoningBank] Warning: Could not compute embedding:', error.message);
62
- // Continue without embedding - memory is still stored
141
+ console.warn(`[ReasoningBank] Failed to compute embedding for ${memoryId}:`, error.message);
63
142
  }
64
-
65
- return memoryId;
66
143
  }
67
144
 
68
145
  /**
69
- * Query memories from ReasoningBank
146
+ * Query memories from ReasoningBank (optimized with caching and timeout)
70
147
  */
71
148
  export async function queryMemories(searchQuery, options = {}) {
72
- try {
73
- // Use ReasoningBank's semantic retrieval
74
- const memories = await retrieveMemories(searchQuery, {
75
- domain: options.domain || 'general',
76
- agent: options.agent || 'memory-agent',
77
- k: options.limit || 10,
78
- });
149
+ // Check cache first
150
+ const cached = getCachedQuery(searchQuery, options);
151
+ if (cached) {
152
+ return cached;
153
+ }
79
154
 
80
- return memories.map(mem => {
155
+ const timeout = options.timeout || 3000; // 3s default timeout
156
+
157
+ try {
158
+ // Race between semantic search and timeout
159
+ const memories = await Promise.race([
160
+ retrieveMemories(searchQuery, {
161
+ domain: options.domain || 'general',
162
+ agent: options.agent || 'memory-agent',
163
+ k: options.limit || 10,
164
+ }),
165
+ new Promise((_, reject) =>
166
+ setTimeout(() => reject(new Error('Query timeout')), timeout)
167
+ )
168
+ ]);
169
+
170
+ const results = memories.map(mem => {
81
171
  try {
82
172
  const data = JSON.parse(mem.pattern_data);
83
173
  return {
@@ -94,35 +184,119 @@ export async function queryMemories(searchQuery, options = {}) {
94
184
  return null;
95
185
  }
96
186
  }).filter(Boolean);
187
+
188
+ // If semantic search returns no results, try SQL fallback
189
+ // (handles case where embeddings don't exist yet)
190
+ if (results.length === 0) {
191
+ console.warn('[ReasoningBank] Semantic search returned 0 results, trying SQL fallback');
192
+ const fallbackResults = await queryMemoriesFast(searchQuery, options);
193
+ setCachedQuery(searchQuery, options, fallbackResults);
194
+ return fallbackResults;
195
+ }
196
+
197
+ // Cache successful results
198
+ setCachedQuery(searchQuery, options, results);
199
+ return results;
97
200
  } catch (error) {
98
- // Fallback to simple query if semantic search fails
99
- console.warn('[ReasoningBank] Semantic search failed, using simple query:', error.message);
201
+ // Fast SQL fallback on timeout or error
202
+ console.warn('[ReasoningBank] Using fast SQL fallback:', error.message);
203
+ const results = await queryMemoriesFast(searchQuery, options);
100
204
 
101
- const dbInstance = db.getDb();
102
- const rows = dbInstance.prepare(`
103
- SELECT * FROM patterns
104
- WHERE pattern_data LIKE ?
105
- ORDER BY confidence DESC, usage_count DESC
106
- LIMIT ?
107
- `).all(`%${searchQuery}%`, options.limit || 10);
108
-
109
- return rows.map(row => {
110
- try {
111
- const data = JSON.parse(row.pattern_data);
112
- return {
113
- id: row.id,
114
- key: data.key,
115
- value: data.value,
116
- namespace: data.namespace,
117
- confidence: row.confidence,
118
- usage_count: row.usage_count,
119
- created_at: row.created_at,
120
- };
121
- } catch {
122
- return null;
123
- }
124
- }).filter(Boolean);
205
+ // Cache fallback results too
206
+ setCachedQuery(searchQuery, options, results);
207
+ return results;
208
+ }
209
+ }
210
+
211
+ /**
212
+ * Fast SQL-based query fallback (optimized)
213
+ */
214
+ async function queryMemoriesFast(searchQuery, options = {}) {
215
+ const dbInstance = db.getDb();
216
+ const limit = options.limit || 10;
217
+ const namespace = options.namespace;
218
+
219
+ // Use optimized query with indexes
220
+ let query = `
221
+ SELECT
222
+ id,
223
+ pattern_data,
224
+ confidence,
225
+ usage_count,
226
+ created_at
227
+ FROM patterns
228
+ WHERE 1=1
229
+ `;
230
+
231
+ const params = [];
232
+
233
+ // Add namespace filter if provided
234
+ if (namespace) {
235
+ query += ` AND pattern_data LIKE ?`;
236
+ params.push(`%"namespace":"${namespace}"%`);
237
+ }
238
+
239
+ // Add search filter - check both key and value
240
+ query += ` AND (
241
+ pattern_data LIKE ? OR
242
+ pattern_data LIKE ?
243
+ )`;
244
+ params.push(`%"key":"%${searchQuery}%"%`, `%"value":"%${searchQuery}%"%`);
245
+
246
+ // Use indexed sort
247
+ query += ` ORDER BY confidence DESC, usage_count DESC LIMIT ?`;
248
+ params.push(limit);
249
+
250
+ const rows = dbInstance.prepare(query).all(...params);
251
+
252
+ return rows.map(row => {
253
+ try {
254
+ const data = JSON.parse(row.pattern_data);
255
+ return {
256
+ id: row.id,
257
+ key: data.key,
258
+ value: data.value,
259
+ namespace: data.namespace,
260
+ confidence: row.confidence,
261
+ usage_count: row.usage_count,
262
+ created_at: row.created_at,
263
+ };
264
+ } catch {
265
+ return null;
266
+ }
267
+ }).filter(Boolean);
268
+ }
269
+
270
+ /**
271
+ * Get cached query results
272
+ */
273
+ function getCachedQuery(searchQuery, options) {
274
+ const cacheKey = JSON.stringify({ searchQuery, options });
275
+ const cached = queryCache.get(cacheKey);
276
+
277
+ if (cached && Date.now() - cached.timestamp < CACHE_TTL) {
278
+ return cached.results;
279
+ }
280
+
281
+ return null;
282
+ }
283
+
284
+ /**
285
+ * Set cached query results (LRU eviction)
286
+ */
287
+ function setCachedQuery(searchQuery, options, results) {
288
+ const cacheKey = JSON.stringify({ searchQuery, options });
289
+
290
+ // LRU eviction
291
+ if (queryCache.size >= CACHE_SIZE) {
292
+ const firstKey = queryCache.keys().next().value;
293
+ queryCache.delete(firstKey);
125
294
  }
295
+
296
+ queryCache.set(cacheKey, {
297
+ results,
298
+ timestamp: Date.now()
299
+ });
126
300
  }
127
301
 
128
302
  /**