@asd412id/mcp-context-manager 1.0.10 → 1.0.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +34 -3
- package/dist/prompts.js +88 -4
- package/dist/tools/context.d.ts +23 -0
- package/dist/tools/context.js +229 -0
- package/dist/tools/context.test.d.ts +1 -0
- package/dist/tools/context.test.js +44 -0
- package/dist/tools/memory.d.ts +14 -0
- package/dist/tools/memory.js +130 -0
- package/dist/tools/memory.test.d.ts +1 -0
- package/dist/tools/memory.test.js +27 -0
- package/dist/tools/session.js +31 -6
- package/package.json +2 -1
package/README.md
CHANGED
|
@@ -10,6 +10,32 @@ MCP (Model Context Protocol) tools for context management in AI coding agents. H
|
|
|
10
10
|
- **Session Checkpoint** - Save/restore session state
|
|
11
11
|
- **Smart File Loader** - Load files with relevance filtering
|
|
12
12
|
|
|
13
|
+
## Agent Skill (Recommended)
|
|
14
|
+
|
|
15
|
+
For optimal agent performance, use the included **`SKILL.md`** file as agent instructions. This skill teaches agents to:
|
|
16
|
+
|
|
17
|
+
- **Minimize context window usage** - Free up RAM by offloading data to persistent storage
|
|
18
|
+
- **Auto-save progress** - Checkpoint every 10-15 messages
|
|
19
|
+
- **Track decisions & changes** - Maintain project history
|
|
20
|
+
- **Handle long sessions** - Seamless handoff to new sessions when context gets full
|
|
21
|
+
|
|
22
|
+
### How to Use the Skill
|
|
23
|
+
|
|
24
|
+
**Option 1: Claude Code / Cline / Cursor**
|
|
25
|
+
Place `SKILL.md` in your project root or reference it in your agent configuration.
|
|
26
|
+
|
|
27
|
+
**Option 2: Custom Agents**
|
|
28
|
+
Include the content of `SKILL.md` in your system prompt or agent instructions.
|
|
29
|
+
|
|
30
|
+
**Option 3: Direct Reference**
|
|
31
|
+
Tell your agent: *"Load and follow the instructions in SKILL.md"*
|
|
32
|
+
|
|
33
|
+
The skill provides:
|
|
34
|
+
- Mandatory triggers for when to use each tool
|
|
35
|
+
- Context cleanup strategies to reduce LLM memory usage
|
|
36
|
+
- Best practices for minimal context consumption
|
|
37
|
+
- Complete tool reference with examples
|
|
38
|
+
|
|
13
39
|
## Installation
|
|
14
40
|
|
|
15
41
|
Requires Node.js >= 18.0.0
|
|
@@ -241,7 +267,7 @@ To specify a custom path for storing context data, add environment variables.
|
|
|
241
267
|
|
|
242
268
|
| Prompt | Description |
|
|
243
269
|
|--------|-------------|
|
|
244
|
-
| `ctx-init` | Load context from previous session |
|
|
270
|
+
| `ctx-init` | Load context from previous session with context management instructions |
|
|
245
271
|
| `ctx-save` | Save current state to checkpoint |
|
|
246
272
|
| `ctx-remember` | Save important info to memory |
|
|
247
273
|
| `ctx-todo` | Add a todo item |
|
|
@@ -249,8 +275,10 @@ To specify a custom path for storing context data, add environment variables.
|
|
|
249
275
|
| `ctx-status` | Show project status |
|
|
250
276
|
| `ctx-compress` | Compress long context |
|
|
251
277
|
| `ctx-recall` | Search in memory |
|
|
278
|
+
| `ctx-cleanup` | **Free up context window space and reduce RAM usage** |
|
|
279
|
+
| `ctx-handoff` | **Generate handoff document for new session** |
|
|
252
280
|
|
|
253
|
-
## Available Tools (
|
|
281
|
+
## Available Tools (33 tools)
|
|
254
282
|
|
|
255
283
|
### Session Management
|
|
256
284
|
|
|
@@ -258,7 +286,7 @@ To specify a custom path for storing context data, add environment variables.
|
|
|
258
286
|
|------|-------------|
|
|
259
287
|
| `session_init` | Initialize session - loads checkpoint, tracker, memories, detects project in ONE call |
|
|
260
288
|
| `session_handoff` | Generate compact markdown handoff document for seamless session continuation |
|
|
261
|
-
| `project_detect` | Auto-detect project info
|
|
289
|
+
| `project_detect` | Auto-detect project info (supports 20+ languages: Node, Python, Rust, Go, Java, PHP, Ruby, Elixir, .NET, Swift, Kotlin, Dart, Haskell, Scala, Clojure, Lua, Zig, Nim, V, Deno, Bun) |
|
|
262
290
|
|
|
263
291
|
### Memory Store
|
|
264
292
|
|
|
@@ -272,6 +300,7 @@ To specify a custom path for storing context data, add environment variables.
|
|
|
272
300
|
| `memory_list` | List all memory keys |
|
|
273
301
|
| `memory_clear` | Clear memory (all/by tags) |
|
|
274
302
|
| `memory_cleanup` | Remove expired memory entries |
|
|
303
|
+
| `memory_capture_candidates` | Extract and optionally persist important memory candidates from text |
|
|
275
304
|
|
|
276
305
|
### Context Summarizer
|
|
277
306
|
|
|
@@ -282,6 +311,7 @@ To specify a custom path for storing context data, add environment variables.
|
|
|
282
311
|
| `context_list_summaries` | List all summaries |
|
|
283
312
|
| `context_merge_summaries` | Merge multiple summaries |
|
|
284
313
|
| `context_status` | Get storage stats and token usage estimate |
|
|
314
|
+
| `context_prune_smart` | Score context items, keep high-signal, summarize/prune low-signal |
|
|
285
315
|
| `store_health` | Check store integrity and recommendations |
|
|
286
316
|
|
|
287
317
|
### Project Tracker
|
|
@@ -290,6 +320,7 @@ To specify a custom path for storing context data, add environment variables.
|
|
|
290
320
|
|------|-------------|
|
|
291
321
|
| `tracker_log` | Log decision/change/todo/note/error |
|
|
292
322
|
| `tracker_status` | Get project status overview |
|
|
323
|
+
| `tracker_get` | Get specific tracker entry by ID |
|
|
293
324
|
| `tracker_todo_update` | Update todo status |
|
|
294
325
|
| `tracker_search` | Search tracker entries |
|
|
295
326
|
| `tracker_set_project` | Set project name |
|
package/dist/prompts.js
CHANGED
|
@@ -1,15 +1,49 @@
|
|
|
1
1
|
import * as z from 'zod';
|
|
2
|
+
// Built-in instructions for context management and RAM optimization
|
|
3
|
+
const CONTEXT_MANAGEMENT_INSTRUCTIONS = `
|
|
4
|
+
## Context Management Best Practices
|
|
5
|
+
|
|
6
|
+
**To minimize context window usage and free RAM:**
|
|
7
|
+
|
|
8
|
+
1. **Recall before reading** - Use memory_search()/tracker_search() first, then read files only if needed
|
|
9
|
+
2. **Offload to persistent storage** - Use memory_set() or memory_capture_candidates() for important findings
|
|
10
|
+
3. **Read files efficiently** - Use file_smart_read(structureOnly:true) or file_smart_read(keywords:[...])
|
|
11
|
+
4. **Smart prune context** - Use context_prune_smart() to keep high-signal items and prune low-signal chatter
|
|
12
|
+
5. **Checkpoint regularly** - Save state every 10-15 messages with checkpoint_save()
|
|
13
|
+
|
|
14
|
+
**When to cleanup (check with context_status()):**
|
|
15
|
+
- Token usage >50% → Run context_prune_smart() and summarize verbose content
|
|
16
|
+
- Token usage >70% → Checkpoint and consider handoff
|
|
17
|
+
- Token usage >85% → MUST handoff to new session
|
|
18
|
+
|
|
19
|
+
**Cleanup workflow:**
|
|
20
|
+
1. context_status() → Check token usage
|
|
21
|
+
2. context_prune_smart(items, mode:"hybrid") → Score and prune low-signal context
|
|
22
|
+
3. context_summarize(longText) → Compress high-value verbose content
|
|
23
|
+
4. memory_capture_candidates(text, dryRun:false) → Persist important facts automatically
|
|
24
|
+
5. checkpoint_save() → Save state
|
|
25
|
+
6. session_handoff() → Generate handoff doc
|
|
26
|
+
`;
|
|
2
27
|
export function registerPrompts(server) {
|
|
3
|
-
// ctx-init - Load previous context
|
|
28
|
+
// ctx-init - Load previous context with instructions
|
|
4
29
|
server.registerPrompt('ctx-init', {
|
|
5
30
|
title: 'Init Session',
|
|
6
|
-
description: 'Load previous context at session start'
|
|
31
|
+
description: 'Load previous context at session start with context management instructions'
|
|
7
32
|
}, () => ({
|
|
8
33
|
messages: [{
|
|
9
34
|
role: 'user',
|
|
10
35
|
content: {
|
|
11
36
|
type: 'text',
|
|
12
|
-
text: `Initialize session with session_init() - loads checkpoint, tracker status, memories, and detects project in one call
|
|
37
|
+
text: `Initialize session with session_init() - loads checkpoint, tracker status, memories, and detects project in one call.
|
|
38
|
+
|
|
39
|
+
${CONTEXT_MANAGEMENT_INSTRUCTIONS}
|
|
40
|
+
|
|
41
|
+
**After init, follow these rules:**
|
|
42
|
+
- Log decisions with tracker_log(type:"decision")
|
|
43
|
+
- Log file changes with tracker_log(type:"change")
|
|
44
|
+
- Capture important findings with memory_capture_candidates() or memory_set()
|
|
45
|
+
- Prune processed context with context_prune_smart()
|
|
46
|
+
- Checkpoint every 10-15 messages`
|
|
13
47
|
}
|
|
14
48
|
}]
|
|
15
49
|
}));
|
|
@@ -99,7 +133,7 @@ export function registerPrompts(server) {
|
|
|
99
133
|
role: 'user',
|
|
100
134
|
content: {
|
|
101
135
|
type: 'text',
|
|
102
|
-
text: `Context getting long.
|
|
136
|
+
text: `Context getting long. Run context_prune_smart() first, summarize with context_summarize(), persist key info via memory_capture_candidates(), then save checkpoint.`
|
|
103
137
|
}
|
|
104
138
|
}]
|
|
105
139
|
}));
|
|
@@ -121,4 +155,54 @@ export function registerPrompts(server) {
|
|
|
121
155
|
}
|
|
122
156
|
}]
|
|
123
157
|
}));
|
|
158
|
+
// ctx-cleanup - Cleanup context and free RAM
|
|
159
|
+
server.registerPrompt('ctx-cleanup', {
|
|
160
|
+
title: 'Cleanup Context',
|
|
161
|
+
description: 'Free up context window space and reduce RAM usage'
|
|
162
|
+
}, () => ({
|
|
163
|
+
messages: [{
|
|
164
|
+
role: 'user',
|
|
165
|
+
content: {
|
|
166
|
+
type: 'text',
|
|
167
|
+
text: `Cleanup context to free RAM and reduce token usage.
|
|
168
|
+
|
|
169
|
+
**Execute this workflow:**
|
|
170
|
+
1. context_status() → Check current token usage estimate
|
|
171
|
+
2. Identify verbose content in conversation that can be summarized
|
|
172
|
+
3. context_prune_smart(items, mode:"hybrid") → Keep high-signal and prune low-signal context
|
|
173
|
+
4. context_summarize(verboseText, maxLength:1000) → Compress long content
|
|
174
|
+
5. memory_capture_candidates(text, dryRun:false) → Offload important data automatically
|
|
175
|
+
6. checkpoint_save(name, state) → Save current session state
|
|
176
|
+
7. If token usage >70%, run session_handoff() to prepare for new session
|
|
177
|
+
|
|
178
|
+
**Tips to reduce context:**
|
|
179
|
+
- Use file_smart_read(structureOnly:true) instead of reading full files
|
|
180
|
+
- Use file_smart_read(keywords:[...]) to read only relevant sections
|
|
181
|
+
- Store discovered info in memory_capture_candidates()/memory_set() instead of keeping in context
|
|
182
|
+
- Don't re-read files - use memory_get() for cached data`
|
|
183
|
+
}
|
|
184
|
+
}]
|
|
185
|
+
}));
|
|
186
|
+
// ctx-handoff - Generate handoff for new session
|
|
187
|
+
server.registerPrompt('ctx-handoff', {
|
|
188
|
+
title: 'Session Handoff',
|
|
189
|
+
description: 'Generate handoff document for seamless session continuation'
|
|
190
|
+
}, () => ({
|
|
191
|
+
messages: [{
|
|
192
|
+
role: 'user',
|
|
193
|
+
content: {
|
|
194
|
+
type: 'text',
|
|
195
|
+
text: `Context is getting long. Generate a handoff document for continuing in a new session.
|
|
196
|
+
|
|
197
|
+
**Execute:**
|
|
198
|
+
1. checkpoint_save() → Save current state
|
|
199
|
+
2. session_handoff() → Generate compact markdown handoff
|
|
200
|
+
|
|
201
|
+
**Instructions for new session:**
|
|
202
|
+
1. Start new conversation
|
|
203
|
+
2. Paste the handoff document
|
|
204
|
+
3. Run session_init() to restore full context`
|
|
205
|
+
}
|
|
206
|
+
}]
|
|
207
|
+
}));
|
|
124
208
|
}
|
package/dist/tools/context.d.ts
CHANGED
|
@@ -1,2 +1,25 @@
|
|
|
1
1
|
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
2
|
+
interface SmartContextItem {
|
|
3
|
+
id: string;
|
|
4
|
+
text: string;
|
|
5
|
+
source?: string;
|
|
6
|
+
timestamp?: string;
|
|
7
|
+
pinned?: boolean;
|
|
8
|
+
}
|
|
9
|
+
declare function summarizeForPrune(text: string, maxLength: number): string;
|
|
10
|
+
declare function scoreSmartContextItem(item: SmartContextItem, now: number): {
|
|
11
|
+
score: number;
|
|
12
|
+
signals: string[];
|
|
13
|
+
};
|
|
14
|
+
declare function extractPruneMemoryCandidates(text: string): Array<{
|
|
15
|
+
keyHint: string;
|
|
16
|
+
reason: string;
|
|
17
|
+
value: string;
|
|
18
|
+
}>;
|
|
19
|
+
export declare const __contextTestables: {
|
|
20
|
+
summarizeForPrune: typeof summarizeForPrune;
|
|
21
|
+
scoreSmartContextItem: typeof scoreSmartContextItem;
|
|
22
|
+
extractPruneMemoryCandidates: typeof extractPruneMemoryCandidates;
|
|
23
|
+
};
|
|
2
24
|
export declare function registerContextTools(server: McpServer): void;
|
|
25
|
+
export {};
|
package/dist/tools/context.js
CHANGED
|
@@ -78,6 +78,121 @@ async function getBackupStats(basePath) {
|
|
|
78
78
|
}
|
|
79
79
|
return stats;
|
|
80
80
|
}
|
|
81
|
+
function summarizeForPrune(text, maxLength) {
|
|
82
|
+
if (text.length <= maxLength)
|
|
83
|
+
return text;
|
|
84
|
+
const sentences = text.match(/[^.!?\n]+[.!?]?/g) || [text];
|
|
85
|
+
const important = [];
|
|
86
|
+
const normal = [];
|
|
87
|
+
for (const sentenceRaw of sentences) {
|
|
88
|
+
const sentence = sentenceRaw.trim();
|
|
89
|
+
if (!sentence)
|
|
90
|
+
continue;
|
|
91
|
+
const lower = sentence.toLowerCase();
|
|
92
|
+
if (lower.includes('important') ||
|
|
93
|
+
lower.includes('decision') ||
|
|
94
|
+
lower.includes('todo') ||
|
|
95
|
+
lower.includes('error') ||
|
|
96
|
+
lower.includes('must') ||
|
|
97
|
+
lower.includes('action')) {
|
|
98
|
+
important.push(sentence);
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
normal.push(sentence);
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
const compact = [];
|
|
105
|
+
for (const sentence of [...important, ...normal]) {
|
|
106
|
+
const next = compact.length > 0 ? `${compact.join(' ')} ${sentence}` : sentence;
|
|
107
|
+
if (next.length > maxLength)
|
|
108
|
+
break;
|
|
109
|
+
compact.push(sentence);
|
|
110
|
+
}
|
|
111
|
+
const summary = compact.join(' ').trim();
|
|
112
|
+
return summary || `${text.slice(0, Math.max(0, maxLength - 3))}...`;
|
|
113
|
+
}
|
|
114
|
+
function scoreSmartContextItem(item, now) {
|
|
115
|
+
const text = item.text || '';
|
|
116
|
+
const lower = text.toLowerCase();
|
|
117
|
+
let score = 0;
|
|
118
|
+
const signals = [];
|
|
119
|
+
if (item.pinned) {
|
|
120
|
+
score += 1000;
|
|
121
|
+
signals.push('pinned');
|
|
122
|
+
}
|
|
123
|
+
const importantKeywords = ['error', 'bug', 'decision', 'todo', 'must', 'important', 'action', 'requirement', 'blocked'];
|
|
124
|
+
const matchedKeywords = importantKeywords.filter((kw) => lower.includes(kw));
|
|
125
|
+
if (matchedKeywords.length > 0) {
|
|
126
|
+
score += Math.min(28, matchedKeywords.length * 4);
|
|
127
|
+
signals.push(`keywords:${matchedKeywords.join(',')}`);
|
|
128
|
+
}
|
|
129
|
+
if (item.source) {
|
|
130
|
+
const sourceLower = item.source.toLowerCase();
|
|
131
|
+
if (sourceLower.includes('user')) {
|
|
132
|
+
score += 10;
|
|
133
|
+
signals.push('source:user');
|
|
134
|
+
}
|
|
135
|
+
else if (sourceLower.includes('system')) {
|
|
136
|
+
score += 8;
|
|
137
|
+
signals.push('source:system');
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
if (/([a-zA-Z]:\\|\/).+\.[a-z0-9]+/i.test(text)) {
|
|
141
|
+
score += 6;
|
|
142
|
+
signals.push('contains:path');
|
|
143
|
+
}
|
|
144
|
+
if (/\bhttps?:\/\//i.test(text)) {
|
|
145
|
+
score += 5;
|
|
146
|
+
signals.push('contains:url');
|
|
147
|
+
}
|
|
148
|
+
if (text.length > 400) {
|
|
149
|
+
score += 4;
|
|
150
|
+
signals.push('long-context');
|
|
151
|
+
}
|
|
152
|
+
else if (text.length < 24) {
|
|
153
|
+
score -= 4;
|
|
154
|
+
signals.push('very-short');
|
|
155
|
+
}
|
|
156
|
+
if (item.timestamp) {
|
|
157
|
+
const ts = Date.parse(item.timestamp);
|
|
158
|
+
if (!Number.isNaN(ts)) {
|
|
159
|
+
const ageHours = Math.max(0, (now - ts) / (1000 * 60 * 60));
|
|
160
|
+
const recency = Math.max(0, 20 - Math.floor(ageHours / 2));
|
|
161
|
+
score += recency;
|
|
162
|
+
signals.push(`recency:+${recency}`);
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
if (/(^|\s)(ok|thanks|noted|done)(\s|$)/i.test(lower)) {
|
|
166
|
+
score -= 3;
|
|
167
|
+
signals.push('low-signal-chat');
|
|
168
|
+
}
|
|
169
|
+
return { score, signals };
|
|
170
|
+
}
|
|
171
|
+
function extractPruneMemoryCandidates(text) {
|
|
172
|
+
const candidates = [];
|
|
173
|
+
const lines = text.split('\n').map((line) => line.trim()).filter(Boolean);
|
|
174
|
+
for (const line of lines) {
|
|
175
|
+
const lower = line.toLowerCase();
|
|
176
|
+
if (lower.includes('decision:') || lower.includes('decided to')) {
|
|
177
|
+
candidates.push({ keyHint: 'decision.auto', reason: 'decision-signal', value: line });
|
|
178
|
+
}
|
|
179
|
+
if (lower.includes('todo:') || lower.includes('action:') || lower.includes('must ')) {
|
|
180
|
+
candidates.push({ keyHint: 'todo.auto', reason: 'action-signal', value: line });
|
|
181
|
+
}
|
|
182
|
+
if (lower.includes('error') || lower.includes('failed') || lower.includes('exception')) {
|
|
183
|
+
candidates.push({ keyHint: 'error.auto', reason: 'error-signal', value: line });
|
|
184
|
+
}
|
|
185
|
+
if (/\bhttps?:\/\//i.test(line)) {
|
|
186
|
+
candidates.push({ keyHint: 'reference.url', reason: 'url-signal', value: line });
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
return candidates.slice(0, 8);
|
|
190
|
+
}
|
|
191
|
+
export const __contextTestables = {
|
|
192
|
+
summarizeForPrune,
|
|
193
|
+
scoreSmartContextItem,
|
|
194
|
+
extractPruneMemoryCandidates
|
|
195
|
+
};
|
|
81
196
|
export function registerContextTools(server) {
|
|
82
197
|
server.registerTool('context_status', {
|
|
83
198
|
title: 'Context Status',
|
|
@@ -140,6 +255,120 @@ WHEN TO USE:
|
|
|
140
255
|
content: [{ type: 'text', text: JSON.stringify(result, null, 2) }]
|
|
141
256
|
};
|
|
142
257
|
});
|
|
258
|
+
server.registerTool('context_prune_smart', {
|
|
259
|
+
title: 'Smart Context Prune',
|
|
260
|
+
description: `Smart context pruning with relevance scoring, optional summarization, and memory candidate extraction.
|
|
261
|
+
WHEN TO USE:
|
|
262
|
+
- After processing large batches of context/tool output
|
|
263
|
+
- When token pressure is rising and you need to keep only high-signal context
|
|
264
|
+
- To generate compact summaries before pruning noisy context`,
|
|
265
|
+
inputSchema: {
|
|
266
|
+
items: z.array(z.object({
|
|
267
|
+
id: z.string().describe('Unique context item identifier'),
|
|
268
|
+
text: z.string().describe('Context content text'),
|
|
269
|
+
source: z.string().optional().describe('Context source (user/tool/system/assistant)'),
|
|
270
|
+
timestamp: z.string().optional().describe('ISO timestamp for recency scoring'),
|
|
271
|
+
pinned: z.boolean().optional().describe('Pinned items are always kept')
|
|
272
|
+
})).describe('Context items to evaluate'),
|
|
273
|
+
mode: z.enum(['hybrid', 'aggressive', 'conservative']).optional().describe('Prune strategy mode (default: hybrid)'),
|
|
274
|
+
maxKeep: z.number().optional().describe('Maximum number of items to keep (default: 8)'),
|
|
275
|
+
summaryMaxLength: z.number().optional().describe('Maximum summary length for compressed entries (default: 240)'),
|
|
276
|
+
memoryCandidateLimit: z.number().optional().describe('Maximum extracted memory candidates (default: 8)')
|
|
277
|
+
}
|
|
278
|
+
}, async ({ items, mode = 'hybrid', maxKeep = 8, summaryMaxLength = 240, memoryCandidateLimit = 8 }) => {
|
|
279
|
+
if (items.length === 0) {
|
|
280
|
+
return {
|
|
281
|
+
content: [{ type: 'text', text: JSON.stringify({
|
|
282
|
+
mode,
|
|
283
|
+
totalItems: 0,
|
|
284
|
+
keep: [],
|
|
285
|
+
prune: [],
|
|
286
|
+
summaries: [],
|
|
287
|
+
memoryCandidates: []
|
|
288
|
+
}, null, 2) }]
|
|
289
|
+
};
|
|
290
|
+
}
|
|
291
|
+
const now = Date.now();
|
|
292
|
+
const normalizedItems = items.map((item) => ({
|
|
293
|
+
id: item.id,
|
|
294
|
+
text: item.text,
|
|
295
|
+
source: item.source,
|
|
296
|
+
timestamp: item.timestamp,
|
|
297
|
+
pinned: item.pinned
|
|
298
|
+
}));
|
|
299
|
+
const scored = normalizedItems.map((item) => {
|
|
300
|
+
const { score, signals } = scoreSmartContextItem(item, now);
|
|
301
|
+
return { item, score, signals };
|
|
302
|
+
}).sort((a, b) => b.score - a.score);
|
|
303
|
+
const pinnedItems = scored.filter((entry) => entry.item.pinned);
|
|
304
|
+
const keepBaseByMode = mode === 'aggressive'
|
|
305
|
+
? Math.max(3, Math.ceil(scored.length * 0.25))
|
|
306
|
+
: mode === 'conservative'
|
|
307
|
+
? Math.max(5, Math.ceil(scored.length * 0.7))
|
|
308
|
+
: Math.max(4, Math.ceil(scored.length * 0.45));
|
|
309
|
+
const keepTarget = Math.min(scored.length, Math.max(pinnedItems.length, Math.min(maxKeep, keepBaseByMode)));
|
|
310
|
+
const keepSet = new Set();
|
|
311
|
+
for (const pinned of pinnedItems) {
|
|
312
|
+
keepSet.add(pinned.item.id);
|
|
313
|
+
}
|
|
314
|
+
for (const entry of scored) {
|
|
315
|
+
if (keepSet.size >= keepTarget)
|
|
316
|
+
break;
|
|
317
|
+
keepSet.add(entry.item.id);
|
|
318
|
+
}
|
|
319
|
+
const keep = scored
|
|
320
|
+
.filter((entry) => keepSet.has(entry.item.id))
|
|
321
|
+
.map((entry) => ({
|
|
322
|
+
id: entry.item.id,
|
|
323
|
+
source: entry.item.source,
|
|
324
|
+
score: entry.score,
|
|
325
|
+
pinned: !!entry.item.pinned,
|
|
326
|
+
signals: entry.signals,
|
|
327
|
+
textPreview: summarizeForPrune(entry.item.text, 180)
|
|
328
|
+
}));
|
|
329
|
+
const prunedScored = scored
|
|
330
|
+
.filter((entry) => !keepSet.has(entry.item.id));
|
|
331
|
+
const prune = prunedScored
|
|
332
|
+
.map((entry) => ({
|
|
333
|
+
id: entry.item.id,
|
|
334
|
+
source: entry.item.source,
|
|
335
|
+
score: entry.score,
|
|
336
|
+
signals: entry.signals,
|
|
337
|
+
reason: entry.score < 8 ? 'low-signal' : 'lower-priority',
|
|
338
|
+
textPreview: summarizeForPrune(entry.item.text, 120)
|
|
339
|
+
}));
|
|
340
|
+
const summaries = prunedScored
|
|
341
|
+
.filter((entry) => entry.item.text.length > summaryMaxLength)
|
|
342
|
+
.slice(0, 12)
|
|
343
|
+
.map((entry) => ({
|
|
344
|
+
id: entry.item.id,
|
|
345
|
+
source: entry.item.source,
|
|
346
|
+
originalLength: entry.item.text.length,
|
|
347
|
+
summary: summarizeForPrune(entry.item.text, summaryMaxLength)
|
|
348
|
+
}));
|
|
349
|
+
const memoryCandidates = prunedScored
|
|
350
|
+
.flatMap((entry) => extractPruneMemoryCandidates(entry.item.text))
|
|
351
|
+
.slice(0, memoryCandidateLimit);
|
|
352
|
+
const output = {
|
|
353
|
+
mode,
|
|
354
|
+
strategy: {
|
|
355
|
+
totalItems: scored.length,
|
|
356
|
+
keepTarget,
|
|
357
|
+
kept: keep.length,
|
|
358
|
+
pruned: prune.length
|
|
359
|
+
},
|
|
360
|
+
keep,
|
|
361
|
+
prune,
|
|
362
|
+
summaries,
|
|
363
|
+
memoryCandidates,
|
|
364
|
+
recommendation: prune.length > 0
|
|
365
|
+
? 'Prune listed low-signal items and persist memoryCandidates via memory_set or memory_capture_candidates'
|
|
366
|
+
: 'No prune needed; current context is already high-signal'
|
|
367
|
+
};
|
|
368
|
+
return {
|
|
369
|
+
content: [{ type: 'text', text: JSON.stringify(output, null, 2) }]
|
|
370
|
+
};
|
|
371
|
+
});
|
|
143
372
|
server.registerTool('store_health', {
|
|
144
373
|
title: 'Store Health',
|
|
145
374
|
description: `Check health of the context store - file integrity, backup status, and recommendations.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import test from 'node:test';
|
|
2
|
+
import assert from 'node:assert/strict';
|
|
3
|
+
import { __contextTestables } from './context.js';
|
|
4
|
+
test('summarizeForPrune respects max length and keeps key sentence', () => {
|
|
5
|
+
const text = [
|
|
6
|
+
'This is regular background sentence.',
|
|
7
|
+
'Decision: use smart prune scoring for noisy outputs.',
|
|
8
|
+
'Another long detail to fill space and trigger compression behavior.'
|
|
9
|
+
].join(' ');
|
|
10
|
+
const summary = __contextTestables.summarizeForPrune(text, 80);
|
|
11
|
+
assert.ok(summary.length <= 80);
|
|
12
|
+
assert.ok(summary.toLowerCase().includes('decision'));
|
|
13
|
+
});
|
|
14
|
+
test('scoreSmartContextItem gives higher score for pinned/important content', () => {
|
|
15
|
+
const now = Date.now();
|
|
16
|
+
const high = __contextTestables.scoreSmartContextItem({
|
|
17
|
+
id: 'a',
|
|
18
|
+
text: 'Important decision: must fix error in src/tools/context.ts',
|
|
19
|
+
source: 'user',
|
|
20
|
+
timestamp: new Date(now).toISOString(),
|
|
21
|
+
pinned: true
|
|
22
|
+
}, now);
|
|
23
|
+
const low = __contextTestables.scoreSmartContextItem({
|
|
24
|
+
id: 'b',
|
|
25
|
+
text: 'ok thanks',
|
|
26
|
+
source: 'assistant',
|
|
27
|
+
timestamp: new Date(now - 1000 * 60 * 60 * 48).toISOString()
|
|
28
|
+
}, now);
|
|
29
|
+
assert.ok(high.score > low.score);
|
|
30
|
+
assert.ok(high.signals.includes('pinned'));
|
|
31
|
+
});
|
|
32
|
+
test('extractPruneMemoryCandidates extracts decision/todo/error/url signals', () => {
|
|
33
|
+
const candidates = __contextTestables.extractPruneMemoryCandidates([
|
|
34
|
+
'Decision: Use hybrid mode for prune.',
|
|
35
|
+
'TODO: add tests for memory capture.',
|
|
36
|
+
'Error: build failed in CI.',
|
|
37
|
+
'Reference: https://example.com/docs'
|
|
38
|
+
].join('\n'));
|
|
39
|
+
const reasons = candidates.map((c) => c.reason);
|
|
40
|
+
assert.ok(reasons.includes('decision-signal'));
|
|
41
|
+
assert.ok(reasons.includes('action-signal'));
|
|
42
|
+
assert.ok(reasons.includes('error-signal'));
|
|
43
|
+
assert.ok(reasons.includes('url-signal'));
|
|
44
|
+
});
|
package/dist/tools/memory.d.ts
CHANGED
|
@@ -1,3 +1,17 @@
|
|
|
1
1
|
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
2
2
|
export declare function cleanupExpiredMemories(): Promise<number>;
|
|
3
|
+
interface MemoryCandidate {
|
|
4
|
+
key: string;
|
|
5
|
+
value: string;
|
|
6
|
+
tags: string[];
|
|
7
|
+
confidence: number;
|
|
8
|
+
reason: string;
|
|
9
|
+
}
|
|
10
|
+
declare function slugify(value: string): string;
|
|
11
|
+
declare function buildMemoryCandidates(text: string, source: string, baseTags: string[]): MemoryCandidate[];
|
|
12
|
+
export declare const __memoryTestables: {
|
|
13
|
+
slugify: typeof slugify;
|
|
14
|
+
buildMemoryCandidates: typeof buildMemoryCandidates;
|
|
15
|
+
};
|
|
3
16
|
export declare function registerMemoryTools(server: McpServer): void;
|
|
17
|
+
export {};
|
package/dist/tools/memory.js
CHANGED
|
@@ -79,6 +79,80 @@ function deepMerge(target, source) {
|
|
|
79
79
|
}
|
|
80
80
|
return result;
|
|
81
81
|
}
|
|
82
|
+
function slugify(value) {
|
|
83
|
+
return value
|
|
84
|
+
.toLowerCase()
|
|
85
|
+
.replace(/[^a-z0-9]+/g, '-')
|
|
86
|
+
.replace(/^-+|-+$/g, '')
|
|
87
|
+
.slice(0, 36) || 'item';
|
|
88
|
+
}
|
|
89
|
+
function buildMemoryCandidates(text, source, baseTags) {
|
|
90
|
+
const candidates = [];
|
|
91
|
+
const lines = text.split('\n').map((line) => line.trim()).filter(Boolean);
|
|
92
|
+
for (const line of lines) {
|
|
93
|
+
const lower = line.toLowerCase();
|
|
94
|
+
if (lower.includes('decision:') || lower.includes('decided to')) {
|
|
95
|
+
const subject = line.replace(/^.*?(decision:\s*|decided to\s*)/i, '').trim() || line;
|
|
96
|
+
candidates.push({
|
|
97
|
+
key: `decision.${slugify(subject)}`,
|
|
98
|
+
value: line,
|
|
99
|
+
tags: [...baseTags, source, 'decision'],
|
|
100
|
+
confidence: 0.92,
|
|
101
|
+
reason: 'decision signal'
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
if (lower.includes('todo:') || lower.includes('action:') || lower.includes('must ')) {
|
|
105
|
+
const subject = line.replace(/^.*?(todo:\s*|action:\s*|must\s*)/i, '').trim() || line;
|
|
106
|
+
candidates.push({
|
|
107
|
+
key: `todo.${slugify(subject)}`,
|
|
108
|
+
value: line,
|
|
109
|
+
tags: [...baseTags, source, 'todo'],
|
|
110
|
+
confidence: 0.86,
|
|
111
|
+
reason: 'actionable item signal'
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
if (lower.includes('error') || lower.includes('failed') || lower.includes('exception')) {
|
|
115
|
+
candidates.push({
|
|
116
|
+
key: `error.${slugify(line)}`,
|
|
117
|
+
value: line,
|
|
118
|
+
tags: [...baseTags, source, 'error'],
|
|
119
|
+
confidence: 0.9,
|
|
120
|
+
reason: 'error signal'
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
if (/\bhttps?:\/\//i.test(line)) {
|
|
124
|
+
candidates.push({
|
|
125
|
+
key: `reference.url.${slugify(line)}`,
|
|
126
|
+
value: line,
|
|
127
|
+
tags: [...baseTags, source, 'reference'],
|
|
128
|
+
confidence: 0.8,
|
|
129
|
+
reason: 'url reference signal'
|
|
130
|
+
});
|
|
131
|
+
}
|
|
132
|
+
const configMatch = line.match(/\b([A-Z][A-Z0-9_]{2,})\s*[=:]\s*(.+)$/);
|
|
133
|
+
if (configMatch) {
|
|
134
|
+
candidates.push({
|
|
135
|
+
key: `config.${slugify(configMatch[1])}`,
|
|
136
|
+
value: line,
|
|
137
|
+
tags: [...baseTags, source, 'config'],
|
|
138
|
+
confidence: 0.84,
|
|
139
|
+
reason: 'config/env signal'
|
|
140
|
+
});
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
const dedup = new Map();
|
|
144
|
+
for (const candidate of candidates) {
|
|
145
|
+
const existing = dedup.get(candidate.key);
|
|
146
|
+
if (!existing || candidate.confidence > existing.confidence) {
|
|
147
|
+
dedup.set(candidate.key, candidate);
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
return Array.from(dedup.values());
|
|
151
|
+
}
|
|
152
|
+
export const __memoryTestables = {
|
|
153
|
+
slugify,
|
|
154
|
+
buildMemoryCandidates
|
|
155
|
+
};
|
|
82
156
|
export function registerMemoryTools(server) {
|
|
83
157
|
server.registerTool('memory_set', {
|
|
84
158
|
title: 'Memory Set',
|
|
@@ -197,6 +271,62 @@ WHEN TO USE:
|
|
|
197
271
|
}]
|
|
198
272
|
};
|
|
199
273
|
});
|
|
274
|
+
server.registerTool('memory_capture_candidates', {
|
|
275
|
+
title: 'Memory Capture Candidates',
|
|
276
|
+
description: `Extract and optionally persist important memory candidates from raw text.
|
|
277
|
+
WHEN TO USE:
|
|
278
|
+
- After long tool outputs to store decisions/errors/todos automatically
|
|
279
|
+
- Before pruning context to avoid losing important details
|
|
280
|
+
- For proactive memory capture workflows`,
|
|
281
|
+
inputSchema: {
|
|
282
|
+
text: z.string().describe('Raw text to analyze for memory candidates'),
|
|
283
|
+
source: z.string().optional().describe('Source label for extracted candidates (default: llm)'),
|
|
284
|
+
autoTags: z.array(z.string()).optional().describe('Additional tags to include on all candidates'),
|
|
285
|
+
maxCandidates: z.number().optional().describe('Maximum number of candidates to return/store (default: 10)'),
|
|
286
|
+
dryRun: z.boolean().optional().describe('If true, only preview candidates without persisting')
|
|
287
|
+
}
|
|
288
|
+
}, async ({ text, source = 'llm', autoTags = [], maxCandidates = 10, dryRun = true }) => {
|
|
289
|
+
const candidates = buildMemoryCandidates(text, source, autoTags).slice(0, maxCandidates);
|
|
290
|
+
if (candidates.length === 0) {
|
|
291
|
+
return {
|
|
292
|
+
content: [{ type: 'text', text: 'No important memory candidates detected' }]
|
|
293
|
+
};
|
|
294
|
+
}
|
|
295
|
+
if (dryRun) {
|
|
296
|
+
return {
|
|
297
|
+
content: [{
|
|
298
|
+
type: 'text',
|
|
299
|
+
text: JSON.stringify({ dryRun: true, count: candidates.length, candidates }, null, 2)
|
|
300
|
+
}]
|
|
301
|
+
};
|
|
302
|
+
}
|
|
303
|
+
const memStore = await getMemoryStore();
|
|
304
|
+
const now = new Date().toISOString();
|
|
305
|
+
const savedKeys = [];
|
|
306
|
+
for (const candidate of candidates) {
|
|
307
|
+
const existing = memStore.entries[candidate.key];
|
|
308
|
+
memStore.entries[candidate.key] = {
|
|
309
|
+
key: candidate.key,
|
|
310
|
+
value: candidate.value,
|
|
311
|
+
tags: candidate.tags,
|
|
312
|
+
createdAt: existing?.createdAt || now,
|
|
313
|
+
updatedAt: now
|
|
314
|
+
};
|
|
315
|
+
savedKeys.push(candidate.key);
|
|
316
|
+
}
|
|
317
|
+
await saveMemoryStore(memStore);
|
|
318
|
+
return {
|
|
319
|
+
content: [{
|
|
320
|
+
type: 'text',
|
|
321
|
+
text: JSON.stringify({
|
|
322
|
+
dryRun: false,
|
|
323
|
+
savedCount: savedKeys.length,
|
|
324
|
+
savedKeys,
|
|
325
|
+
candidates
|
|
326
|
+
}, null, 2)
|
|
327
|
+
}]
|
|
328
|
+
};
|
|
329
|
+
});
|
|
200
330
|
server.registerTool('memory_delete', {
|
|
201
331
|
title: 'Memory Delete',
|
|
202
332
|
description: 'Delete a memory entry by key.',
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import test from 'node:test';
|
|
2
|
+
import assert from 'node:assert/strict';
|
|
3
|
+
import { __memoryTestables } from './memory.js';
|
|
4
|
+
test('slugify normalizes and trims tokens safely', () => {
|
|
5
|
+
const result = __memoryTestables.slugify(' Decision: Add New Feature! ');
|
|
6
|
+
assert.equal(result, 'decision-add-new-feature');
|
|
7
|
+
});
|
|
8
|
+
test('buildMemoryCandidates extracts high-signal candidates with tags', () => {
|
|
9
|
+
const text = [
|
|
10
|
+
'Decision: use memory capture as default.',
|
|
11
|
+
'TODO: improve context pruning.',
|
|
12
|
+
'ERROR: request failed with timeout.',
|
|
13
|
+
'MCP_CONTEXT_PATH=/tmp/context',
|
|
14
|
+
'See docs: https://example.dev/mcp'
|
|
15
|
+
].join('\n');
|
|
16
|
+
const candidates = __memoryTestables.buildMemoryCandidates(text, 'llm', ['test']);
|
|
17
|
+
const keys = candidates.map((item) => item.key);
|
|
18
|
+
assert.ok(keys.some((key) => key.startsWith('decision.')));
|
|
19
|
+
assert.ok(keys.some((key) => key.startsWith('todo.')));
|
|
20
|
+
assert.ok(keys.some((key) => key.startsWith('error.')));
|
|
21
|
+
assert.ok(keys.some((key) => key.startsWith('config.')));
|
|
22
|
+
assert.ok(keys.some((key) => key.startsWith('reference.url.')));
|
|
23
|
+
for (const candidate of candidates) {
|
|
24
|
+
assert.ok(candidate.tags.includes('test'));
|
|
25
|
+
assert.ok(candidate.tags.includes('llm'));
|
|
26
|
+
}
|
|
27
|
+
});
|
package/dist/tools/session.js
CHANGED
|
@@ -311,9 +311,10 @@ WHEN TO USE: Call this ONCE at the START of every session/conversation.
|
|
|
311
311
|
Returns: latest checkpoint, tracker status (todos/decisions), all memories, and auto-detected project info.
|
|
312
312
|
This replaces calling checkpoint_load(), tracker_status(), and memory_list() separately.`,
|
|
313
313
|
inputSchema: {
|
|
314
|
-
cwd: z.string().optional().describe('Current working directory for project detection (defaults to process.cwd())')
|
|
314
|
+
cwd: z.string().optional().describe('Current working directory for project detection (defaults to process.cwd())'),
|
|
315
|
+
verbose: z.boolean().optional().describe('Include full checkpoint/tracker/memory payload (default: false)')
|
|
315
316
|
}
|
|
316
|
-
}, async ({ cwd }) => {
|
|
317
|
+
}, async ({ cwd, verbose = false }) => {
|
|
317
318
|
const workingDir = cwd || process.cwd();
|
|
318
319
|
// Cleanup expired memories first
|
|
319
320
|
const cleanedUp = await cleanupExpiredMemories();
|
|
@@ -345,13 +346,37 @@ This replaces calling checkpoint_load(), tracker_status(), and memory_list() sep
|
|
|
345
346
|
memoriesCount: Array.isArray(memories) ? memories.length : 0,
|
|
346
347
|
cleanedUpExpiredMemories: cleanedUp
|
|
347
348
|
};
|
|
349
|
+
const output = verbose
|
|
350
|
+
? { summary, ...state }
|
|
351
|
+
: {
|
|
352
|
+
summary,
|
|
353
|
+
checkpoint: checkpoint
|
|
354
|
+
? {
|
|
355
|
+
id: checkpoint.id,
|
|
356
|
+
name: checkpoint.name,
|
|
357
|
+
createdAt: checkpoint.createdAt,
|
|
358
|
+
files: checkpoint.files || []
|
|
359
|
+
}
|
|
360
|
+
: null,
|
|
361
|
+
tracker: {
|
|
362
|
+
projectName: tracker.projectName,
|
|
363
|
+
pendingTodos: tracker.pendingTodos || [],
|
|
364
|
+
recentChanges: tracker.recentChanges || [],
|
|
365
|
+
decisions: tracker.decisions || []
|
|
366
|
+
},
|
|
367
|
+
memories: Array.isArray(memories)
|
|
368
|
+
? memories.map((m) => ({
|
|
369
|
+
key: m.key,
|
|
370
|
+
tags: m.tags || [],
|
|
371
|
+
updatedAt: m.updatedAt
|
|
372
|
+
}))
|
|
373
|
+
: [],
|
|
374
|
+
project
|
|
375
|
+
};
|
|
348
376
|
return {
|
|
349
377
|
content: [{
|
|
350
378
|
type: 'text',
|
|
351
|
-
text: JSON.stringify(
|
|
352
|
-
summary,
|
|
353
|
-
...state
|
|
354
|
-
}, null, 2)
|
|
379
|
+
text: JSON.stringify(output, null, 2)
|
|
355
380
|
}]
|
|
356
381
|
};
|
|
357
382
|
});
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@asd412id/mcp-context-manager",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.12",
|
|
4
4
|
"description": "MCP tools for context management - summarizer, memory store, project tracker, checkpoints, and smart file loader",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -15,6 +15,7 @@
|
|
|
15
15
|
"build": "tsc",
|
|
16
16
|
"dev": "tsc --watch",
|
|
17
17
|
"start": "node dist/index.js",
|
|
18
|
+
"test": "npm run build && node --test dist/**/*.test.js",
|
|
18
19
|
"prepublishOnly": "npm run build"
|
|
19
20
|
},
|
|
20
21
|
"keywords": [
|