@grec0/memory-bank-mcp 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -362,7 +362,7 @@ ${chunk.content}
362
362
  return doc;
363
363
  }
364
364
  /**
365
- * Generates all project documents
365
+ * Generates all project documents (in parallel for speed)
366
366
  */
367
367
  async generateAllDocuments(chunks, force = false) {
368
368
  const result = {
@@ -374,54 +374,65 @@ ${chunk.content}
374
374
  totalOutputTokens: 0,
375
375
  errors: [],
376
376
  };
377
- // Get previous progress if exists
377
+ // Get previous progress if exists (read before parallel generation)
378
378
  let previousProgress;
379
379
  const progressPath = path.join(this.options.docsPath, "progress.md");
380
380
  if (fs.existsSync(progressPath)) {
381
381
  previousProgress = fs.readFileSync(progressPath, "utf-8");
382
382
  }
383
- // Define generation order (some docs may depend on others conceptually)
384
- const docOrder = [
385
- "techContext", // Foundation - understand the tech stack first
386
- "projectBrief", // High-level overview
387
- "systemPatterns", // Architecture
388
- "productContext", // Business/user perspective
389
- "activeContext", // Current state
390
- "progress", // Progress tracking (last, uses previous data)
383
+ // Prepare chunks for activeContext (recent only)
384
+ const recentChunks = [...chunks]
385
+ .sort((a, b) => b.timestamp - a.timestamp)
386
+ .slice(0, Math.min(30, chunks.length));
387
+ // All document types to generate
388
+ const docTypes = [
389
+ "techContext",
390
+ "projectBrief",
391
+ "systemPatterns",
392
+ "productContext",
393
+ "activeContext",
394
+ "progress",
391
395
  ];
392
- for (const docType of docOrder) {
396
+ console.error(`\n🚀 Generating ${docTypes.length} documents in PARALLEL...`);
397
+ // Generate all documents in parallel
398
+ const generationPromises = docTypes.map(async (docType) => {
393
399
  try {
394
- // For activeContext, use only recent chunks (by timestamp)
395
- let docChunks = chunks;
396
- if (docType === "activeContext") {
397
- // Sort by timestamp and take most recent
398
- docChunks = [...chunks]
399
- .sort((a, b) => b.timestamp - a.timestamp)
400
- .slice(0, Math.min(30, chunks.length));
401
- }
400
+ // For activeContext, use only recent chunks
401
+ const docChunks = docType === "activeContext" ? recentChunks : chunks;
402
402
  const existingMetadata = this.metadataCache.get(docType);
403
403
  const isNew = !existingMetadata;
404
404
  const doc = await this.generateDocument(docType, docChunks, force, docType === "progress" ? previousProgress : undefined);
405
- if (doc) {
406
- result.totalReasoningTokens += doc.metadata.reasoningTokens;
407
- result.totalOutputTokens += doc.metadata.outputTokens;
408
- if (isNew) {
409
- result.documentsGenerated.push(docType);
410
- }
411
- else {
412
- result.documentsUpdated.push(docType);
413
- }
414
- }
415
- else {
416
- result.documentsSkipped.push(docType);
417
- }
405
+ return { docType, doc, isNew, error: null };
418
406
  }
419
407
  catch (error) {
408
+ return { docType, doc: null, isNew: false, error: error };
409
+ }
410
+ });
411
+ // Wait for all documents to complete
412
+ const results = await Promise.all(generationPromises);
413
+ // Process results
414
+ for (const { docType, doc, isNew, error } of results) {
415
+ if (error) {
420
416
  console.error(`Error generating ${docType}: ${error.message}`);
421
417
  result.errors.push(`${docType}: ${error.message}`);
422
418
  result.success = false;
419
+ continue;
420
+ }
421
+ if (doc) {
422
+ result.totalReasoningTokens += doc.metadata.reasoningTokens;
423
+ result.totalOutputTokens += doc.metadata.outputTokens;
424
+ if (isNew) {
425
+ result.documentsGenerated.push(docType);
426
+ }
427
+ else {
428
+ result.documentsUpdated.push(docType);
429
+ }
430
+ }
431
+ else {
432
+ result.documentsSkipped.push(docType);
423
433
  }
424
434
  }
435
+ console.error(`\n✅ Parallel generation complete: ${result.documentsGenerated.length + result.documentsUpdated.length} docs, ${result.totalReasoningTokens} reasoning + ${result.totalOutputTokens} output tokens`);
425
436
  return result;
426
437
  }
427
438
  /**
@@ -462,42 +473,53 @@ ${chunk.content}
462
473
  totalOutputTokens: 0,
463
474
  errors: [],
464
475
  };
465
- // Get previous progress
476
+ // Get previous progress (read before parallel generation)
466
477
  let previousProgress;
467
478
  const progressPath = path.join(this.options.docsPath, "progress.md");
468
479
  if (fs.existsSync(progressPath)) {
469
480
  previousProgress = fs.readFileSync(progressPath, "utf-8");
470
481
  }
471
- for (const docType of docsToUpdate) {
482
+ // Prepare recent chunks for activeContext
483
+ const recentChunks = [...chunks]
484
+ .sort((a, b) => b.timestamp - a.timestamp)
485
+ .slice(0, Math.min(30, chunks.length));
486
+ console.error(`\n🚀 Updating ${docsToUpdate.length} documents in PARALLEL...`);
487
+ // Generate docs in parallel
488
+ const updatePromises = docsToUpdate.map(async (docType) => {
472
489
  try {
473
- let docChunks = chunks;
474
- if (docType === "activeContext") {
475
- docChunks = [...chunks]
476
- .sort((a, b) => b.timestamp - a.timestamp)
477
- .slice(0, Math.min(30, chunks.length));
478
- }
490
+ const docChunks = docType === "activeContext" ? recentChunks : chunks;
479
491
  const existingMetadata = this.metadataCache.get(docType);
480
492
  const isNew = !existingMetadata;
481
493
  const doc = await this.generateDocument(docType, docChunks, true, // Force update for changed docs
482
494
  docType === "progress" ? previousProgress : undefined);
483
- if (doc) {
484
- result.totalReasoningTokens += doc.metadata.reasoningTokens;
485
- result.totalOutputTokens += doc.metadata.outputTokens;
486
- if (isNew) {
487
- result.documentsGenerated.push(docType);
488
- }
489
- else {
490
- result.documentsUpdated.push(docType);
491
- }
492
- }
493
- else {
494
- result.documentsSkipped.push(docType);
495
- }
495
+ return { docType, doc, isNew, error: null };
496
496
  }
497
497
  catch (error) {
498
+ return { docType, doc: null, isNew: false, error: error };
499
+ }
500
+ });
501
+ // Wait for all
502
+ const updateResults = await Promise.all(updatePromises);
503
+ // Process results
504
+ for (const { docType, doc, isNew, error } of updateResults) {
505
+ if (error) {
498
506
  console.error(`Error updating ${docType}: ${error.message}`);
499
507
  result.errors.push(`${docType}: ${error.message}`);
500
508
  result.success = false;
509
+ continue;
510
+ }
511
+ if (doc) {
512
+ result.totalReasoningTokens += doc.metadata.reasoningTokens;
513
+ result.totalOutputTokens += doc.metadata.outputTokens;
514
+ if (isNew) {
515
+ result.documentsGenerated.push(docType);
516
+ }
517
+ else {
518
+ result.documentsUpdated.push(docType);
519
+ }
520
+ }
521
+ else {
522
+ result.documentsSkipped.push(docType);
501
523
  }
502
524
  }
503
525
  // Mark docs we didn't update as skipped
@@ -234,34 +234,40 @@ export async function analyzeCoverage(indexManager, vectorStore, workspaceRoot,
234
234
  console.error(`Error escaneando archivos: ${error}`);
235
235
  throw error;
236
236
  }
237
- // 2. Get indexed files from vector store
238
- console.error("Obteniendo archivos indexados...");
237
+ // 2. Get ALL chunks from vector store in ONE query (optimized)
238
+ console.error("Obteniendo todos los chunks indexados (query única)...");
239
239
  await vectorStore.initialize();
240
- const fileHashes = await vectorStore.getFileHashes();
241
- // 3. Get index metadata
242
- const indexStats = await indexManager.getStats();
243
- // 4. Build indexed files map with chunk counts
240
+ const queryStart = Date.now();
241
+ const allChunks = await vectorStore.getAllChunks(projectId);
242
+ console.error(`Query completada en ${Date.now() - queryStart}ms - ${allChunks.length} chunks`);
243
+ // 3. Build indexed files map from chunks (in memory - fast)
244
+ console.error("Procesando chunks en memoria...");
244
245
  const indexedFiles = new Map();
245
- // Get chunks grouped by file from vector store
246
- for (const [filePath, hash] of fileHashes) {
247
- const chunks = await vectorStore.getChunksByFile(filePath);
248
- if (chunks.length > 0) {
249
- indexedFiles.set(filePath, {
250
- lastIndexed: chunks[0].timestamp,
251
- chunks: chunks.length,
246
+ for (const chunk of allChunks) {
247
+ const existing = indexedFiles.get(chunk.file_path);
248
+ if (!existing) {
249
+ indexedFiles.set(chunk.file_path, {
250
+ lastIndexed: chunk.timestamp,
251
+ chunks: 1,
252
+ hash: chunk.file_hash,
252
253
  });
253
254
  }
255
+ else {
256
+ existing.chunks++;
257
+ // Keep most recent timestamp
258
+ if (chunk.timestamp > existing.lastIndexed) {
259
+ existing.lastIndexed = chunk.timestamp;
260
+ }
261
+ }
254
262
  }
255
- // 5. Identify pending files (files that changed)
263
+ // 4. Get index stats
264
+ const indexStats = await indexManager.getStats();
265
+ // 5. Identify pending files (files that changed) - in memory comparison
256
266
  const pendingFiles = new Set();
257
267
  for (const file of allFiles) {
258
268
  const indexed = indexedFiles.get(file.path);
259
- if (indexed) {
260
- // Check if file hash matches
261
- const chunks = await vectorStore.getChunksByFile(file.path);
262
- if (chunks.length > 0 && chunks[0].file_hash !== file.hash) {
263
- pendingFiles.add(file.path);
264
- }
269
+ if (indexed && indexed.hash !== file.hash) {
270
+ pendingFiles.add(file.path);
265
271
  }
266
272
  }
267
273
  console.error(`Archivos indexados: ${indexedFiles.size}`);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@grec0/memory-bank-mcp",
3
- "version": "0.1.3",
3
+ "version": "0.1.5",
4
4
  "description": "MCP server for semantic code indexing with Memory Bank - AI-powered codebase understanding",
5
5
  "license": "MIT",
6
6
  "author": "@grec0",