@grec0/memory-bank-mcp 0.0.2 → 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -5
- package/dist/common/chunker.js +168 -24
- package/dist/common/fileScanner.js +94 -10
- package/dist/common/indexManager.js +97 -25
- package/dist/common/logger.js +54 -0
- package/dist/common/projectKnowledgeService.js +627 -0
- package/dist/common/vectorStore.js +77 -21
- package/dist/index.js +76 -8
- package/dist/tools/analyzeCoverage.js +1 -1
- package/dist/tools/generateProjectDocs.js +133 -0
- package/dist/tools/getProjectDocs.js +126 -0
- package/dist/tools/index.js +3 -0
- package/dist/tools/searchMemory.js +2 -2
- package/package.json +2 -1
|
@@ -0,0 +1,627 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Project Knowledge Service for Memory Bank
|
|
3
|
+
* Generates and maintains structured project documentation using OpenAI Responses API
|
|
4
|
+
* with reasoning models (gpt-5-mini) for intelligent analysis
|
|
5
|
+
*/
|
|
6
|
+
import OpenAI from "openai";
|
|
7
|
+
import * as fs from "fs";
|
|
8
|
+
import * as path from "path";
|
|
9
|
+
import * as crypto from "crypto";
|
|
10
|
+
/**
|
|
11
|
+
* Document definitions with prompts
|
|
12
|
+
*/
|
|
13
|
+
const DOC_DEFINITIONS = {
|
|
14
|
+
projectBrief: {
|
|
15
|
+
filename: "projectBrief.md",
|
|
16
|
+
title: "Project Brief",
|
|
17
|
+
description: "High-level description of the project, its purpose, and main goals",
|
|
18
|
+
promptTemplate: `Analyze the following code chunks from a software project and generate a comprehensive Project Brief document.
|
|
19
|
+
|
|
20
|
+
The Project Brief should include:
|
|
21
|
+
1. **Project Name**: Infer from package.json, README, or main files
|
|
22
|
+
2. **Purpose**: What problem does this project solve?
|
|
23
|
+
3. **Main Features**: Key functionalities based on the code
|
|
24
|
+
4. **Target Audience**: Who would use this project?
|
|
25
|
+
5. **Project Type**: Library, CLI tool, web app, API, etc.
|
|
26
|
+
|
|
27
|
+
Code chunks to analyze:
|
|
28
|
+
{chunks}
|
|
29
|
+
|
|
30
|
+
Generate a well-structured markdown document. Be specific and base everything on the actual code provided.`,
|
|
31
|
+
},
|
|
32
|
+
productContext: {
|
|
33
|
+
filename: "productContext.md",
|
|
34
|
+
title: "Product Context",
|
|
35
|
+
description: "Business perspective, user needs, and product requirements",
|
|
36
|
+
promptTemplate: `Analyze the following code chunks and generate a Product Context document.
|
|
37
|
+
|
|
38
|
+
Focus on:
|
|
39
|
+
1. **User Stories**: What can users do with this product?
|
|
40
|
+
2. **Business Logic**: Key business rules implemented in the code
|
|
41
|
+
3. **User Interface**: If applicable, describe UI components and flows
|
|
42
|
+
4. **Integration Points**: External services, APIs, or systems it connects to
|
|
43
|
+
5. **Data Models**: Key entities and their relationships
|
|
44
|
+
|
|
45
|
+
Code chunks to analyze:
|
|
46
|
+
{chunks}
|
|
47
|
+
|
|
48
|
+
Generate a markdown document that would help a product manager understand this project.`,
|
|
49
|
+
},
|
|
50
|
+
systemPatterns: {
|
|
51
|
+
filename: "systemPatterns.md",
|
|
52
|
+
title: "System Patterns",
|
|
53
|
+
description: "Architecture decisions, design patterns, and code organization",
|
|
54
|
+
promptTemplate: `Analyze the following code chunks and document the System Patterns used.
|
|
55
|
+
|
|
56
|
+
Document:
|
|
57
|
+
1. **Architecture Style**: MVC, microservices, monolith, etc.
|
|
58
|
+
2. **Design Patterns**: Singleton, Factory, Observer, etc.
|
|
59
|
+
3. **Code Organization**: How files and modules are structured
|
|
60
|
+
4. **Naming Conventions**: Patterns in naming files, functions, classes
|
|
61
|
+
5. **Error Handling**: How errors are managed across the codebase
|
|
62
|
+
6. **State Management**: How state is handled (if applicable)
|
|
63
|
+
|
|
64
|
+
Code chunks to analyze:
|
|
65
|
+
{chunks}
|
|
66
|
+
|
|
67
|
+
Generate a technical markdown document for developers to understand the architectural decisions.`,
|
|
68
|
+
},
|
|
69
|
+
techContext: {
|
|
70
|
+
filename: "techContext.md",
|
|
71
|
+
title: "Technical Context",
|
|
72
|
+
description: "Technology stack, dependencies, and development environment",
|
|
73
|
+
promptTemplate: `Analyze the following code chunks and generate a Technical Context document.
|
|
74
|
+
|
|
75
|
+
Include:
|
|
76
|
+
1. **Programming Languages**: Languages used and their versions
|
|
77
|
+
2. **Frameworks**: Main frameworks (React, Express, etc.)
|
|
78
|
+
3. **Dependencies**: Key libraries and their purposes
|
|
79
|
+
4. **Development Tools**: Build tools, linters, formatters
|
|
80
|
+
5. **Runtime Requirements**: Node version, environment variables, etc.
|
|
81
|
+
6. **Database/Storage**: Data persistence solutions used
|
|
82
|
+
7. **Testing**: Testing frameworks and strategies
|
|
83
|
+
|
|
84
|
+
Code chunks to analyze:
|
|
85
|
+
{chunks}
|
|
86
|
+
|
|
87
|
+
Generate a markdown document useful for setting up the development environment.`,
|
|
88
|
+
},
|
|
89
|
+
activeContext: {
|
|
90
|
+
filename: "activeContext.md",
|
|
91
|
+
title: "Active Context",
|
|
92
|
+
description: "Current development state, recent changes, and work in progress",
|
|
93
|
+
promptTemplate: `Analyze the following recently modified code chunks and generate an Active Context document.
|
|
94
|
+
|
|
95
|
+
Document:
|
|
96
|
+
1. **Recent Changes**: What parts of the code were recently modified?
|
|
97
|
+
2. **Work in Progress**: Features or fixes that appear incomplete
|
|
98
|
+
3. **Hot Areas**: Parts of the code with high activity
|
|
99
|
+
4. **Potential Issues**: Code that might need attention (TODOs, FIXMEs)
|
|
100
|
+
5. **Current Focus**: What seems to be the current development focus?
|
|
101
|
+
|
|
102
|
+
Recent code chunks:
|
|
103
|
+
{chunks}
|
|
104
|
+
|
|
105
|
+
Generate a markdown document that helps developers understand the current state of development.`,
|
|
106
|
+
},
|
|
107
|
+
progress: {
|
|
108
|
+
filename: "progress.md",
|
|
109
|
+
title: "Progress Tracking",
|
|
110
|
+
description: "Development progress, milestones, and change history",
|
|
111
|
+
promptTemplate: `Based on the indexed code and previous progress data, generate a Progress document.
|
|
112
|
+
|
|
113
|
+
Include:
|
|
114
|
+
1. **Indexing Summary**: Files and chunks indexed
|
|
115
|
+
2. **Code Statistics**: Lines of code, languages breakdown
|
|
116
|
+
3. **Recent Activity**: Summary of recent indexing sessions
|
|
117
|
+
4. **Coverage**: What parts of the project are indexed
|
|
118
|
+
5. **Recommendations**: Suggestions for improving coverage
|
|
119
|
+
|
|
120
|
+
Current indexing data:
|
|
121
|
+
{chunks}
|
|
122
|
+
|
|
123
|
+
Previous progress data:
|
|
124
|
+
{previousProgress}
|
|
125
|
+
|
|
126
|
+
Generate a markdown document tracking project documentation progress.`,
|
|
127
|
+
},
|
|
128
|
+
};
|
|
129
|
+
/**
|
|
130
|
+
* Project Knowledge Service
|
|
131
|
+
* Uses OpenAI Responses API with reasoning models to generate project documentation
|
|
132
|
+
*/
|
|
133
|
+
export class ProjectKnowledgeService {
|
|
134
|
+
client;
|
|
135
|
+
options;
|
|
136
|
+
metadataCache;
|
|
137
|
+
constructor(apiKey, options) {
|
|
138
|
+
if (!apiKey) {
|
|
139
|
+
throw new Error("OpenAI API key is required for Project Knowledge Service");
|
|
140
|
+
}
|
|
141
|
+
this.client = new OpenAI({ apiKey });
|
|
142
|
+
this.options = {
|
|
143
|
+
model: options?.model || "gpt-5-mini",
|
|
144
|
+
reasoningEffort: options?.reasoningEffort || "medium",
|
|
145
|
+
docsPath: options?.docsPath || ".memorybank/project-docs",
|
|
146
|
+
enableSummary: options?.enableSummary !== undefined ? options.enableSummary : true,
|
|
147
|
+
maxChunksPerDoc: options?.maxChunksPerDoc || 50,
|
|
148
|
+
};
|
|
149
|
+
this.metadataCache = new Map();
|
|
150
|
+
this.ensureDocsDirectory();
|
|
151
|
+
this.loadMetadata();
|
|
152
|
+
}
|
|
153
|
+
/**
|
|
154
|
+
* Ensures the docs directory exists
|
|
155
|
+
*/
|
|
156
|
+
ensureDocsDirectory() {
|
|
157
|
+
if (!fs.existsSync(this.options.docsPath)) {
|
|
158
|
+
fs.mkdirSync(this.options.docsPath, { recursive: true });
|
|
159
|
+
console.error(`Created project docs directory: ${this.options.docsPath}`);
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
/**
|
|
163
|
+
* Loads metadata for all documents
|
|
164
|
+
*/
|
|
165
|
+
loadMetadata() {
|
|
166
|
+
const metadataPath = path.join(this.options.docsPath, "metadata.json");
|
|
167
|
+
try {
|
|
168
|
+
if (fs.existsSync(metadataPath)) {
|
|
169
|
+
const data = JSON.parse(fs.readFileSync(metadataPath, "utf-8"));
|
|
170
|
+
for (const [type, metadata] of Object.entries(data)) {
|
|
171
|
+
this.metadataCache.set(type, metadata);
|
|
172
|
+
}
|
|
173
|
+
console.error(`Loaded metadata for ${this.metadataCache.size} project documents`);
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
catch (error) {
|
|
177
|
+
console.error(`Warning: Could not load project docs metadata: ${error}`);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
/**
|
|
181
|
+
* Saves metadata for all documents
|
|
182
|
+
*/
|
|
183
|
+
saveMetadata() {
|
|
184
|
+
const metadataPath = path.join(this.options.docsPath, "metadata.json");
|
|
185
|
+
try {
|
|
186
|
+
const data = {};
|
|
187
|
+
for (const [type, metadata] of this.metadataCache) {
|
|
188
|
+
data[type] = metadata;
|
|
189
|
+
}
|
|
190
|
+
fs.writeFileSync(metadataPath, JSON.stringify(data, null, 2));
|
|
191
|
+
}
|
|
192
|
+
catch (error) {
|
|
193
|
+
console.error(`Warning: Could not save project docs metadata: ${error}`);
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
/**
|
|
197
|
+
* Generates a hash of input chunks for change detection
|
|
198
|
+
*/
|
|
199
|
+
hashChunks(chunks) {
|
|
200
|
+
const content = chunks
|
|
201
|
+
.map(c => `${c.file_path}:${c.file_hash}`)
|
|
202
|
+
.sort()
|
|
203
|
+
.join("|");
|
|
204
|
+
return crypto.createHash("md5").update(content).digest("hex");
|
|
205
|
+
}
|
|
206
|
+
/**
|
|
207
|
+
* Prepares chunks for inclusion in a prompt
|
|
208
|
+
*/
|
|
209
|
+
prepareChunksForPrompt(chunks, maxChunks) {
|
|
210
|
+
// Sort by relevance (prioritize certain file types)
|
|
211
|
+
const priorityFiles = ["package.json", "readme", "index", "main", "app"];
|
|
212
|
+
const sorted = [...chunks].sort((a, b) => {
|
|
213
|
+
const aName = path.basename(a.file_path).toLowerCase();
|
|
214
|
+
const bName = path.basename(b.file_path).toLowerCase();
|
|
215
|
+
const aPriority = priorityFiles.findIndex(p => aName.includes(p));
|
|
216
|
+
const bPriority = priorityFiles.findIndex(p => bName.includes(p));
|
|
217
|
+
if (aPriority !== -1 && bPriority === -1)
|
|
218
|
+
return -1;
|
|
219
|
+
if (aPriority === -1 && bPriority !== -1)
|
|
220
|
+
return 1;
|
|
221
|
+
if (aPriority !== -1 && bPriority !== -1)
|
|
222
|
+
return aPriority - bPriority;
|
|
223
|
+
return 0;
|
|
224
|
+
});
|
|
225
|
+
// Take top chunks
|
|
226
|
+
const selected = sorted.slice(0, maxChunks);
|
|
227
|
+
// Format for prompt
|
|
228
|
+
return selected.map(chunk => {
|
|
229
|
+
return `--- File: ${chunk.file_path} (${chunk.language}) [${chunk.chunk_type}${chunk.name ? `: ${chunk.name}` : ""}] ---
|
|
230
|
+
${chunk.content}
|
|
231
|
+
---`;
|
|
232
|
+
}).join("\n\n");
|
|
233
|
+
}
|
|
234
|
+
/**
|
|
235
|
+
* Calls the OpenAI Responses API with reasoning
|
|
236
|
+
*/
|
|
237
|
+
async callResponsesAPI(prompt) {
|
|
238
|
+
try {
|
|
239
|
+
// Use the Responses API with reasoning
|
|
240
|
+
const response = await this.client.responses.create({
|
|
241
|
+
model: this.options.model,
|
|
242
|
+
reasoning: {
|
|
243
|
+
effort: this.options.reasoningEffort,
|
|
244
|
+
summary: this.options.enableSummary ? "auto" : undefined,
|
|
245
|
+
},
|
|
246
|
+
input: [
|
|
247
|
+
{
|
|
248
|
+
role: "user",
|
|
249
|
+
content: prompt,
|
|
250
|
+
},
|
|
251
|
+
],
|
|
252
|
+
max_output_tokens: 16000,
|
|
253
|
+
});
|
|
254
|
+
// Extract content and usage
|
|
255
|
+
let content = "";
|
|
256
|
+
let summary = "";
|
|
257
|
+
for (const item of response.output || []) {
|
|
258
|
+
if (item.type === "message" && item.content) {
|
|
259
|
+
for (const contentItem of item.content) {
|
|
260
|
+
if (contentItem.type === "output_text") {
|
|
261
|
+
content += contentItem.text;
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
else if (item.type === "reasoning" && item.summary) {
|
|
266
|
+
for (const summaryItem of item.summary) {
|
|
267
|
+
if (summaryItem.type === "summary_text") {
|
|
268
|
+
summary += summaryItem.text;
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
const reasoningTokens = response.usage?.output_tokens_details?.reasoning_tokens || 0;
|
|
274
|
+
const outputTokens = response.usage?.output_tokens || 0;
|
|
275
|
+
return {
|
|
276
|
+
content,
|
|
277
|
+
reasoningTokens,
|
|
278
|
+
outputTokens,
|
|
279
|
+
summary: summary || undefined,
|
|
280
|
+
};
|
|
281
|
+
}
|
|
282
|
+
catch (error) {
|
|
283
|
+
// Fallback to Chat Completions API if Responses API is not available
|
|
284
|
+
if (error?.status === 404 || error?.code === "model_not_found") {
|
|
285
|
+
console.error("Responses API not available, falling back to Chat Completions API");
|
|
286
|
+
return this.callChatCompletionsAPI(prompt);
|
|
287
|
+
}
|
|
288
|
+
throw error;
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
/**
|
|
292
|
+
* Fallback to Chat Completions API
|
|
293
|
+
*/
|
|
294
|
+
async callChatCompletionsAPI(prompt) {
|
|
295
|
+
// Use a standard model as fallback
|
|
296
|
+
const fallbackModel = "gpt-4o";
|
|
297
|
+
const response = await this.client.chat.completions.create({
|
|
298
|
+
model: fallbackModel,
|
|
299
|
+
messages: [
|
|
300
|
+
{
|
|
301
|
+
role: "system",
|
|
302
|
+
content: "You are a technical documentation expert. Generate well-structured markdown documentation based on code analysis.",
|
|
303
|
+
},
|
|
304
|
+
{
|
|
305
|
+
role: "user",
|
|
306
|
+
content: prompt,
|
|
307
|
+
},
|
|
308
|
+
],
|
|
309
|
+
max_tokens: 8000,
|
|
310
|
+
});
|
|
311
|
+
return {
|
|
312
|
+
content: response.choices[0]?.message?.content || "",
|
|
313
|
+
reasoningTokens: 0, // Chat API doesn't have reasoning tokens
|
|
314
|
+
outputTokens: response.usage?.completion_tokens || 0,
|
|
315
|
+
};
|
|
316
|
+
}
|
|
317
|
+
/**
|
|
318
|
+
* Generates a single document
|
|
319
|
+
*/
|
|
320
|
+
async generateDocument(type, chunks, force = false, previousProgress) {
|
|
321
|
+
const definition = DOC_DEFINITIONS[type];
|
|
322
|
+
const inputHash = this.hashChunks(chunks);
|
|
323
|
+
// Check if regeneration is needed
|
|
324
|
+
const existingMetadata = this.metadataCache.get(type);
|
|
325
|
+
if (!force && existingMetadata && existingMetadata.lastInputHash === inputHash) {
|
|
326
|
+
console.error(`Skipping ${type}: No changes detected`);
|
|
327
|
+
return null;
|
|
328
|
+
}
|
|
329
|
+
console.error(`Generating document: ${definition.title}`);
|
|
330
|
+
console.error(` Input chunks: ${chunks.length}`);
|
|
331
|
+
// Prepare prompt
|
|
332
|
+
const chunksText = this.prepareChunksForPrompt(chunks, this.options.maxChunksPerDoc);
|
|
333
|
+
console.error(` Chunks text length: ${chunksText.length} chars`);
|
|
334
|
+
let prompt = definition.promptTemplate.replace("{chunks}", chunksText);
|
|
335
|
+
if (type === "progress" && previousProgress) {
|
|
336
|
+
prompt = prompt.replace("{previousProgress}", previousProgress);
|
|
337
|
+
}
|
|
338
|
+
else {
|
|
339
|
+
prompt = prompt.replace("{previousProgress}", "No previous progress data available.");
|
|
340
|
+
}
|
|
341
|
+
// Call API
|
|
342
|
+
const result = await this.callResponsesAPI(prompt);
|
|
343
|
+
// Create document
|
|
344
|
+
const doc = {
|
|
345
|
+
type,
|
|
346
|
+
content: `# ${definition.title}\n\n${result.content}`,
|
|
347
|
+
metadata: {
|
|
348
|
+
type,
|
|
349
|
+
lastGenerated: Date.now(),
|
|
350
|
+
lastInputHash: inputHash,
|
|
351
|
+
reasoningTokens: result.reasoningTokens,
|
|
352
|
+
outputTokens: result.outputTokens,
|
|
353
|
+
},
|
|
354
|
+
};
|
|
355
|
+
// Save document
|
|
356
|
+
const docPath = path.join(this.options.docsPath, definition.filename);
|
|
357
|
+
fs.writeFileSync(docPath, doc.content);
|
|
358
|
+
// Update metadata
|
|
359
|
+
this.metadataCache.set(type, doc.metadata);
|
|
360
|
+
this.saveMetadata();
|
|
361
|
+
console.error(`Generated ${definition.title} (${result.reasoningTokens} reasoning + ${result.outputTokens} output tokens)`);
|
|
362
|
+
return doc;
|
|
363
|
+
}
|
|
364
|
+
/**
|
|
365
|
+
* Generates all project documents
|
|
366
|
+
*/
|
|
367
|
+
async generateAllDocuments(chunks, force = false) {
|
|
368
|
+
const result = {
|
|
369
|
+
success: true,
|
|
370
|
+
documentsGenerated: [],
|
|
371
|
+
documentsUpdated: [],
|
|
372
|
+
documentsSkipped: [],
|
|
373
|
+
totalReasoningTokens: 0,
|
|
374
|
+
totalOutputTokens: 0,
|
|
375
|
+
errors: [],
|
|
376
|
+
};
|
|
377
|
+
// Get previous progress if exists
|
|
378
|
+
let previousProgress;
|
|
379
|
+
const progressPath = path.join(this.options.docsPath, "progress.md");
|
|
380
|
+
if (fs.existsSync(progressPath)) {
|
|
381
|
+
previousProgress = fs.readFileSync(progressPath, "utf-8");
|
|
382
|
+
}
|
|
383
|
+
// Define generation order (some docs may depend on others conceptually)
|
|
384
|
+
const docOrder = [
|
|
385
|
+
"techContext", // Foundation - understand the tech stack first
|
|
386
|
+
"projectBrief", // High-level overview
|
|
387
|
+
"systemPatterns", // Architecture
|
|
388
|
+
"productContext", // Business/user perspective
|
|
389
|
+
"activeContext", // Current state
|
|
390
|
+
"progress", // Progress tracking (last, uses previous data)
|
|
391
|
+
];
|
|
392
|
+
for (const docType of docOrder) {
|
|
393
|
+
try {
|
|
394
|
+
// For activeContext, use only recent chunks (by timestamp)
|
|
395
|
+
let docChunks = chunks;
|
|
396
|
+
if (docType === "activeContext") {
|
|
397
|
+
// Sort by timestamp and take most recent
|
|
398
|
+
docChunks = [...chunks]
|
|
399
|
+
.sort((a, b) => b.timestamp - a.timestamp)
|
|
400
|
+
.slice(0, Math.min(30, chunks.length));
|
|
401
|
+
}
|
|
402
|
+
const existingMetadata = this.metadataCache.get(docType);
|
|
403
|
+
const isNew = !existingMetadata;
|
|
404
|
+
const doc = await this.generateDocument(docType, docChunks, force, docType === "progress" ? previousProgress : undefined);
|
|
405
|
+
if (doc) {
|
|
406
|
+
result.totalReasoningTokens += doc.metadata.reasoningTokens;
|
|
407
|
+
result.totalOutputTokens += doc.metadata.outputTokens;
|
|
408
|
+
if (isNew) {
|
|
409
|
+
result.documentsGenerated.push(docType);
|
|
410
|
+
}
|
|
411
|
+
else {
|
|
412
|
+
result.documentsUpdated.push(docType);
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
else {
|
|
416
|
+
result.documentsSkipped.push(docType);
|
|
417
|
+
}
|
|
418
|
+
}
|
|
419
|
+
catch (error) {
|
|
420
|
+
console.error(`Error generating ${docType}: ${error.message}`);
|
|
421
|
+
result.errors.push(`${docType}: ${error.message}`);
|
|
422
|
+
result.success = false;
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
return result;
|
|
426
|
+
}
|
|
427
|
+
/**
|
|
428
|
+
* Updates only documents affected by changes
|
|
429
|
+
*/
|
|
430
|
+
async updateDocuments(chunks, changedFiles) {
|
|
431
|
+
console.error(`updateDocuments called with ${chunks.length} chunks and ${changedFiles.length} changed files`);
|
|
432
|
+
// Debug: show first chunk if exists
|
|
433
|
+
if (chunks.length > 0) {
|
|
434
|
+
const firstChunk = chunks[0];
|
|
435
|
+
console.error(`First chunk: ${firstChunk.file_path}, content length: ${firstChunk.content?.length || 0}`);
|
|
436
|
+
}
|
|
437
|
+
// Determine which documents need updating based on changed files
|
|
438
|
+
const docsToUpdate = [];
|
|
439
|
+
// Always update activeContext and progress when there are changes
|
|
440
|
+
docsToUpdate.push("activeContext", "progress");
|
|
441
|
+
// Check if config/package files changed -> update techContext
|
|
442
|
+
const configPatterns = ["package.json", "tsconfig", ".env", "config"];
|
|
443
|
+
if (changedFiles.some(f => configPatterns.some(p => f.toLowerCase().includes(p)))) {
|
|
444
|
+
docsToUpdate.push("techContext");
|
|
445
|
+
}
|
|
446
|
+
// Check if main entry files changed -> update projectBrief
|
|
447
|
+
const entryPatterns = ["index", "main", "app", "readme"];
|
|
448
|
+
if (changedFiles.some(f => entryPatterns.some(p => f.toLowerCase().includes(p)))) {
|
|
449
|
+
docsToUpdate.push("projectBrief");
|
|
450
|
+
}
|
|
451
|
+
// If significant code changes, update systemPatterns and productContext
|
|
452
|
+
if (changedFiles.length > 5) {
|
|
453
|
+
docsToUpdate.push("systemPatterns", "productContext");
|
|
454
|
+
}
|
|
455
|
+
// Generate only the docs that need updating
|
|
456
|
+
const result = {
|
|
457
|
+
success: true,
|
|
458
|
+
documentsGenerated: [],
|
|
459
|
+
documentsUpdated: [],
|
|
460
|
+
documentsSkipped: [],
|
|
461
|
+
totalReasoningTokens: 0,
|
|
462
|
+
totalOutputTokens: 0,
|
|
463
|
+
errors: [],
|
|
464
|
+
};
|
|
465
|
+
// Get previous progress
|
|
466
|
+
let previousProgress;
|
|
467
|
+
const progressPath = path.join(this.options.docsPath, "progress.md");
|
|
468
|
+
if (fs.existsSync(progressPath)) {
|
|
469
|
+
previousProgress = fs.readFileSync(progressPath, "utf-8");
|
|
470
|
+
}
|
|
471
|
+
for (const docType of docsToUpdate) {
|
|
472
|
+
try {
|
|
473
|
+
let docChunks = chunks;
|
|
474
|
+
if (docType === "activeContext") {
|
|
475
|
+
docChunks = [...chunks]
|
|
476
|
+
.sort((a, b) => b.timestamp - a.timestamp)
|
|
477
|
+
.slice(0, Math.min(30, chunks.length));
|
|
478
|
+
}
|
|
479
|
+
const existingMetadata = this.metadataCache.get(docType);
|
|
480
|
+
const isNew = !existingMetadata;
|
|
481
|
+
const doc = await this.generateDocument(docType, docChunks, true, // Force update for changed docs
|
|
482
|
+
docType === "progress" ? previousProgress : undefined);
|
|
483
|
+
if (doc) {
|
|
484
|
+
result.totalReasoningTokens += doc.metadata.reasoningTokens;
|
|
485
|
+
result.totalOutputTokens += doc.metadata.outputTokens;
|
|
486
|
+
if (isNew) {
|
|
487
|
+
result.documentsGenerated.push(docType);
|
|
488
|
+
}
|
|
489
|
+
else {
|
|
490
|
+
result.documentsUpdated.push(docType);
|
|
491
|
+
}
|
|
492
|
+
}
|
|
493
|
+
else {
|
|
494
|
+
result.documentsSkipped.push(docType);
|
|
495
|
+
}
|
|
496
|
+
}
|
|
497
|
+
catch (error) {
|
|
498
|
+
console.error(`Error updating ${docType}: ${error.message}`);
|
|
499
|
+
result.errors.push(`${docType}: ${error.message}`);
|
|
500
|
+
result.success = false;
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
// Mark docs we didn't update as skipped
|
|
504
|
+
const allDocTypes = [
|
|
505
|
+
"projectBrief", "productContext", "systemPatterns",
|
|
506
|
+
"techContext", "activeContext", "progress"
|
|
507
|
+
];
|
|
508
|
+
for (const docType of allDocTypes) {
|
|
509
|
+
if (!docsToUpdate.includes(docType)) {
|
|
510
|
+
result.documentsSkipped.push(docType);
|
|
511
|
+
}
|
|
512
|
+
}
|
|
513
|
+
return result;
|
|
514
|
+
}
|
|
515
|
+
/**
|
|
516
|
+
* Reads a project document
|
|
517
|
+
*/
|
|
518
|
+
getDocument(type) {
|
|
519
|
+
const definition = DOC_DEFINITIONS[type];
|
|
520
|
+
const docPath = path.join(this.options.docsPath, definition.filename);
|
|
521
|
+
if (!fs.existsSync(docPath)) {
|
|
522
|
+
return null;
|
|
523
|
+
}
|
|
524
|
+
const content = fs.readFileSync(docPath, "utf-8");
|
|
525
|
+
const metadata = this.metadataCache.get(type);
|
|
526
|
+
return {
|
|
527
|
+
type,
|
|
528
|
+
content,
|
|
529
|
+
metadata: metadata || {
|
|
530
|
+
type,
|
|
531
|
+
lastGenerated: 0,
|
|
532
|
+
lastInputHash: "",
|
|
533
|
+
reasoningTokens: 0,
|
|
534
|
+
outputTokens: 0,
|
|
535
|
+
},
|
|
536
|
+
};
|
|
537
|
+
}
|
|
538
|
+
/**
|
|
539
|
+
* Reads all project documents
|
|
540
|
+
*/
|
|
541
|
+
getAllDocuments() {
|
|
542
|
+
const docs = [];
|
|
543
|
+
for (const type of Object.keys(DOC_DEFINITIONS)) {
|
|
544
|
+
const doc = this.getDocument(type);
|
|
545
|
+
if (doc) {
|
|
546
|
+
docs.push(doc);
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
return docs;
|
|
550
|
+
}
|
|
551
|
+
/**
|
|
552
|
+
* Gets a summary of all documents (useful for context loading)
|
|
553
|
+
*/
|
|
554
|
+
getDocumentsSummary() {
|
|
555
|
+
const docs = this.getAllDocuments();
|
|
556
|
+
if (docs.length === 0) {
|
|
557
|
+
return "No project documentation has been generated yet. Use memorybank_generate_project_docs to generate documentation.";
|
|
558
|
+
}
|
|
559
|
+
let summary = "# Project Documentation Summary\n\n";
|
|
560
|
+
for (const doc of docs) {
|
|
561
|
+
const definition = DOC_DEFINITIONS[doc.type];
|
|
562
|
+
const lastGenerated = doc.metadata.lastGenerated
|
|
563
|
+
? new Date(doc.metadata.lastGenerated).toISOString()
|
|
564
|
+
: "Unknown";
|
|
565
|
+
summary += `## ${definition.title}\n`;
|
|
566
|
+
summary += `*Last generated: ${lastGenerated}*\n\n`;
|
|
567
|
+
// Extract first few paragraphs as preview
|
|
568
|
+
const lines = doc.content.split("\n").filter(l => l.trim());
|
|
569
|
+
const preview = lines.slice(1, 6).join("\n"); // Skip title, take 5 lines
|
|
570
|
+
summary += preview + "\n\n---\n\n";
|
|
571
|
+
}
|
|
572
|
+
return summary;
|
|
573
|
+
}
|
|
574
|
+
/**
|
|
575
|
+
* Checks if documents exist
|
|
576
|
+
*/
|
|
577
|
+
hasDocuments() {
|
|
578
|
+
return this.metadataCache.size > 0;
|
|
579
|
+
}
|
|
580
|
+
/**
|
|
581
|
+
* Gets statistics about generated documents
|
|
582
|
+
*/
|
|
583
|
+
getStats() {
|
|
584
|
+
let totalReasoningTokens = 0;
|
|
585
|
+
let totalOutputTokens = 0;
|
|
586
|
+
let lastGenerated = 0;
|
|
587
|
+
const documents = {};
|
|
588
|
+
for (const type of Object.keys(DOC_DEFINITIONS)) {
|
|
589
|
+
const metadata = this.metadataCache.get(type);
|
|
590
|
+
documents[type] = {
|
|
591
|
+
exists: !!metadata,
|
|
592
|
+
lastGenerated: metadata ? new Date(metadata.lastGenerated) : undefined,
|
|
593
|
+
};
|
|
594
|
+
if (metadata) {
|
|
595
|
+
totalReasoningTokens += metadata.reasoningTokens;
|
|
596
|
+
totalOutputTokens += metadata.outputTokens;
|
|
597
|
+
if (metadata.lastGenerated > lastGenerated) {
|
|
598
|
+
lastGenerated = metadata.lastGenerated;
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
return {
|
|
603
|
+
documentCount: this.metadataCache.size,
|
|
604
|
+
totalReasoningTokens,
|
|
605
|
+
totalOutputTokens,
|
|
606
|
+
lastGenerated: lastGenerated > 0 ? new Date(lastGenerated) : undefined,
|
|
607
|
+
documents: documents,
|
|
608
|
+
};
|
|
609
|
+
}
|
|
610
|
+
}
|
|
611
|
+
/**
|
|
612
|
+
* Creates a Project Knowledge Service from environment variables
|
|
613
|
+
*/
|
|
614
|
+
export function createProjectKnowledgeService() {
|
|
615
|
+
const apiKey = process.env.OPENAI_API_KEY;
|
|
616
|
+
if (!apiKey) {
|
|
617
|
+
throw new Error("OPENAI_API_KEY environment variable is required. Get your API key from https://platform.openai.com/api-keys");
|
|
618
|
+
}
|
|
619
|
+
const storagePath = process.env.MEMORYBANK_STORAGE_PATH || ".memorybank";
|
|
620
|
+
const options = {
|
|
621
|
+
model: process.env.MEMORYBANK_REASONING_MODEL || "gpt-5-mini",
|
|
622
|
+
reasoningEffort: process.env.MEMORYBANK_REASONING_EFFORT || "medium",
|
|
623
|
+
docsPath: path.join(storagePath, "project-docs"),
|
|
624
|
+
enableSummary: true,
|
|
625
|
+
};
|
|
626
|
+
return new ProjectKnowledgeService(apiKey, options);
|
|
627
|
+
}
|