dev-mcp-server 0.0.2 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +23 -55
- package/README.md +609 -219
- package/cli.js +486 -160
- package/package.json +2 -2
- package/src/agents/BaseAgent.js +113 -0
- package/src/agents/dreamer.js +165 -0
- package/src/agents/improver.js +175 -0
- package/src/agents/specialists.js +202 -0
- package/src/agents/taskDecomposer.js +176 -0
- package/src/agents/teamCoordinator.js +153 -0
- package/src/api/routes/agents.js +172 -0
- package/src/api/routes/extras.js +115 -0
- package/src/api/routes/git.js +72 -0
- package/src/api/routes/ingest.js +60 -40
- package/src/api/routes/knowledge.js +59 -41
- package/src/api/routes/memory.js +41 -0
- package/src/api/routes/newRoutes.js +168 -0
- package/src/api/routes/pipelines.js +41 -0
- package/src/api/routes/planner.js +54 -0
- package/src/api/routes/query.js +24 -0
- package/src/api/routes/sessions.js +54 -0
- package/src/api/routes/tasks.js +67 -0
- package/src/api/routes/tools.js +85 -0
- package/src/api/routes/v5routes.js +196 -0
- package/src/api/server.js +133 -5
- package/src/context/compactor.js +151 -0
- package/src/context/contextEngineer.js +181 -0
- package/src/context/contextVisualizer.js +140 -0
- package/src/core/conversationEngine.js +231 -0
- package/src/core/indexer.js +169 -143
- package/src/core/ingester.js +141 -126
- package/src/core/queryEngine.js +286 -236
- package/src/cron/cronScheduler.js +260 -0
- package/src/dashboard/index.html +1181 -0
- package/src/lsp/symbolNavigator.js +220 -0
- package/src/memory/memoryManager.js +186 -0
- package/src/memory/teamMemory.js +111 -0
- package/src/messaging/messageBus.js +177 -0
- package/src/monitor/proactiveMonitor.js +337 -0
- package/src/pipelines/pipelineEngine.js +230 -0
- package/src/planner/plannerEngine.js +202 -0
- package/src/plugins/builtin/stats-plugin.js +29 -0
- package/src/plugins/pluginManager.js +144 -0
- package/src/prompts/promptEngineer.js +289 -0
- package/src/sessions/sessionManager.js +166 -0
- package/src/skills/skillsManager.js +263 -0
- package/src/storage/store.js +127 -105
- package/src/tasks/taskManager.js +151 -0
- package/src/tools/BashTool.js +154 -0
- package/src/tools/FileEditTool.js +280 -0
- package/src/tools/GitTool.js +212 -0
- package/src/tools/GrepTool.js +199 -0
- package/src/tools/registry.js +1380 -0
- package/src/utils/costTracker.js +69 -0
- package/src/utils/fileParser.js +176 -153
- package/src/utils/llmClient.js +355 -206
- package/src/watcher/fileWatcher.js +137 -0
- package/src/worktrees/worktreeManager.js +176 -0
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
/**
|
|
3
|
+
* Sliding-window context compaction with token-budget awareness.
|
|
4
|
+
*
|
|
5
|
+
* Strategy:
|
|
6
|
+
* WINDOW: keep the last N messages verbatim (always fresh)
|
|
7
|
+
* BODY: older messages → compressed summary
|
|
8
|
+
* INJECT: summary injected as a system-level recap at the top
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
const llm = require('../utils/llmClient');
|
|
12
|
+
const costTracker = require('../utils/costTracker');
|
|
13
|
+
const logger = require('../utils/logger');
|
|
14
|
+
|
|
15
|
+
// ── constants ──────────────────────────────────────────────────────────────────
|
|
16
|
+
const EST_TOKENS = text => Math.ceil((text || '').length / 4);
|
|
17
|
+
const WINDOW_MSGS = 6; // keep this many recent messages verbatim
|
|
18
|
+
const MAX_RAW_TOK = 8000; // if history exceeds this, compact
|
|
19
|
+
const TARGET_TOK = 3000; // target token count for compacted history
|
|
20
|
+
|
|
21
|
+
class Compactor {
|
|
22
|
+
/**
|
|
23
|
+
* Ensure a conversation history fits within the token budget.
|
|
24
|
+
* Returns { messages, compacted, summary, savedTokens }
|
|
25
|
+
*/
|
|
26
|
+
async compact(messages, opts = {}) {
|
|
27
|
+
const { sessionId = 'default', force = false } = opts;
|
|
28
|
+
|
|
29
|
+
if (!messages?.length) return { messages: [], compacted: false };
|
|
30
|
+
|
|
31
|
+
// Estimate current token usage
|
|
32
|
+
const totalTokens = messages.reduce((sum, m) => sum + EST_TOKENS(
|
|
33
|
+
Array.isArray(m.content) ? JSON.stringify(m.content) : (m.content || '')
|
|
34
|
+
), 0);
|
|
35
|
+
|
|
36
|
+
logger.info(`[Compactor] history: ${messages.length} messages, ~${totalTokens} tokens`);
|
|
37
|
+
|
|
38
|
+
if (!force && totalTokens <= MAX_RAW_TOK) {
|
|
39
|
+
return { messages, compacted: false, totalTokens };
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// Split: window (keep verbatim) + body (compress)
|
|
43
|
+
const window = messages.slice(-WINDOW_MSGS);
|
|
44
|
+
const body = messages.slice(0, -WINDOW_MSGS);
|
|
45
|
+
|
|
46
|
+
if (!body.length) {
|
|
47
|
+
return { messages, compacted: false, reason: 'Too few messages to compact' };
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Importance-weighted body: prioritise tool results and assistant answers
|
|
51
|
+
const importantLines = body
|
|
52
|
+
.filter(m => m.role === 'assistant' || (m.role === 'user' && Array.isArray(m.content)))
|
|
53
|
+
.map(m => {
|
|
54
|
+
const text = Array.isArray(m.content)
|
|
55
|
+
? m.content.filter(b => b.type === 'text').map(b => b.text).join(' ')
|
|
56
|
+
: (m.content || '');
|
|
57
|
+
return `${m.role.toUpperCase()}: ${text.slice(0, 400)}`;
|
|
58
|
+
})
|
|
59
|
+
.join('\n\n');
|
|
60
|
+
|
|
61
|
+
// Compress
|
|
62
|
+
const response = await llm.chat({
|
|
63
|
+
model: llm.model('fast'),
|
|
64
|
+
max_tokens: 600,
|
|
65
|
+
messages: [{
|
|
66
|
+
role: 'user',
|
|
67
|
+
content: `Compress this conversation history into a dense, information-preserving summary for a developer AI assistant.
|
|
68
|
+
|
|
69
|
+
Capture:
|
|
70
|
+
- Questions asked and answers given
|
|
71
|
+
- File names, function names, error types discovered
|
|
72
|
+
- Decisions made or actions taken
|
|
73
|
+
- Any unresolved issues or follow-up questions
|
|
74
|
+
- Tool results that revealed important facts
|
|
75
|
+
|
|
76
|
+
Keep it under 400 words. Be specific. Preserve technical details exactly.
|
|
77
|
+
|
|
78
|
+
History:
|
|
79
|
+
${importantLines}`,
|
|
80
|
+
}],
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
costTracker.record({
|
|
84
|
+
model: llm.model('fast'),
|
|
85
|
+
inputTokens: response.usage.input_tokens,
|
|
86
|
+
outputTokens: response.usage.output_tokens,
|
|
87
|
+
sessionId,
|
|
88
|
+
queryType: 'compact',
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
const summary = response.content[0].text;
|
|
92
|
+
|
|
93
|
+
// Build compacted history: [summary-message] + [recent window]
|
|
94
|
+
const summaryMessage = {
|
|
95
|
+
role: 'user',
|
|
96
|
+
content: `[Conversation summary — ${body.length} messages compacted]\n${summary}`,
|
|
97
|
+
_compacted: true,
|
|
98
|
+
_originalCount: body.length,
|
|
99
|
+
_compactedAt: new Date().toISOString(),
|
|
100
|
+
};
|
|
101
|
+
// Pair with an assistant ack so the message array stays valid
|
|
102
|
+
const ackMessage = {
|
|
103
|
+
role: 'assistant',
|
|
104
|
+
content: 'Understood — I have the summary of our earlier conversation. Continuing from here.',
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
const compactedMessages = [summaryMessage, ackMessage, ...window];
|
|
108
|
+
const newTokens = compactedMessages.reduce((s, m) => s + EST_TOKENS(
|
|
109
|
+
Array.isArray(m.content) ? JSON.stringify(m.content) : (m.content || '')
|
|
110
|
+
), 0);
|
|
111
|
+
|
|
112
|
+
logger.info(`[Compactor] ${messages.length}→${compactedMessages.length} messages, ${totalTokens}→${newTokens} tokens saved ${totalTokens - newTokens}`);
|
|
113
|
+
|
|
114
|
+
return {
|
|
115
|
+
compacted: true,
|
|
116
|
+
messages: compactedMessages,
|
|
117
|
+
summary,
|
|
118
|
+
originalCount: messages.length,
|
|
119
|
+
newCount: compactedMessages.length,
|
|
120
|
+
savedTokens: totalTokens - newTokens,
|
|
121
|
+
totalTokens: newTokens,
|
|
122
|
+
};
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Multi-tier compaction: compact the summary if it's also too long.
|
|
127
|
+
*/
|
|
128
|
+
async deepCompact(messages, opts = {}) {
|
|
129
|
+
let result = await this.compact(messages, opts);
|
|
130
|
+
|
|
131
|
+
// If still over budget, compact the summary itself
|
|
132
|
+
if (result.compacted && EST_TOKENS(result.summary) > TARGET_TOK) {
|
|
133
|
+
logger.info('[Compactor] Deep compaction: compressing the summary');
|
|
134
|
+
result = await this.compact(result.messages, { ...opts, force: true });
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
return result;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
/**
|
|
141
|
+
* Estimate if a conversation needs compaction.
|
|
142
|
+
*/
|
|
143
|
+
needsCompaction(messages) {
|
|
144
|
+
const tokens = messages?.reduce((s, m) => s + EST_TOKENS(
|
|
145
|
+
Array.isArray(m.content) ? JSON.stringify(m.content) : (m.content || '')
|
|
146
|
+
), 0) || 0;
|
|
147
|
+
return { needs: tokens > MAX_RAW_TOK, tokens, threshold: MAX_RAW_TOK };
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
module.exports = new Compactor();
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Intelligent context window management.
|
|
3
|
+
* Decides WHAT to include, HOW MUCH of it, in WHAT ORDER, and WHAT to cut.
|
|
4
|
+
*
|
|
5
|
+
* Core idea: context is a budget. Every token costs. Spend it wisely.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
const logger = require('../utils/logger');
|
|
9
|
+
|
|
10
|
+
// Rough token estimator (4 chars ≈ 1 token)
|
|
11
|
+
const estimateTokens = (text) => Math.ceil((text || '').length / 4);
|
|
12
|
+
|
|
13
|
+
// How much of the context window to spend on retrieval context (the rest goes to answer)
|
|
14
|
+
const CONTEXT_BUDGET_TOKENS = 6000;
|
|
15
|
+
|
|
16
|
+
// Signal weights for scoring importance of a context chunk
|
|
17
|
+
const SIGNAL_WEIGHTS = {
|
|
18
|
+
relevanceScore: 3.0, // TF-IDF relevance from indexer
|
|
19
|
+
isErrorLog: 2.5, // Error logs are gold for debugging
|
|
20
|
+
isBugFix: 2.0, // Bug-fix files are highly relevant
|
|
21
|
+
isRecent: 1.5, // Recently ingested files
|
|
22
|
+
hasExports: 1.2, // Files that export things = likely central
|
|
23
|
+
hasErrors: 1.8, // Files with known error types
|
|
24
|
+
isConfig: 0.6, // Config is often less relevant
|
|
25
|
+
isTestFile: 0.5, // Test files lower priority unless debug
|
|
26
|
+
chunkIsFirst: 1.3, // First chunk of a file has more context
|
|
27
|
+
metadataRich: 1.2, // Rich metadata = better indexed
|
|
28
|
+
};
|
|
29
|
+
|
|
30
|
+
class ContextEngineer {
|
|
31
|
+
/**
|
|
32
|
+
* Given raw retrieved docs + query intent, build the optimal context bundle.
|
|
33
|
+
* Returns ranked, trimmed, budget-aware context ready for the prompt.
|
|
34
|
+
*/
|
|
35
|
+
engineer(docs, query, mode = 'general') {
|
|
36
|
+
if (!docs || docs.length === 0) return { chunks: [], budgetUsed: 0, dropped: 0 };
|
|
37
|
+
|
|
38
|
+
// 1. Score every chunk using multiple signals
|
|
39
|
+
const scored = docs.map(doc => ({
|
|
40
|
+
doc,
|
|
41
|
+
score: this._scoreChunk(doc, query, mode),
|
|
42
|
+
}));
|
|
43
|
+
|
|
44
|
+
// 2. Sort by composite score
|
|
45
|
+
scored.sort((a, b) => b.score - a.score);
|
|
46
|
+
|
|
47
|
+
// 3. Fill budget greedily, highest-score first
|
|
48
|
+
const selected = [];
|
|
49
|
+
let budgetUsed = 0;
|
|
50
|
+
let dropped = 0;
|
|
51
|
+
|
|
52
|
+
for (const { doc, score } of scored) {
|
|
53
|
+
const tokens = estimateTokens(doc.content);
|
|
54
|
+
if (budgetUsed + tokens <= CONTEXT_BUDGET_TOKENS) {
|
|
55
|
+
selected.push({ ...doc, engineeredScore: parseFloat(score.toFixed(3)) });
|
|
56
|
+
budgetUsed += tokens;
|
|
57
|
+
} else {
|
|
58
|
+
// Try to include a trimmed version if the chunk is large
|
|
59
|
+
const remaining = CONTEXT_BUDGET_TOKENS - budgetUsed;
|
|
60
|
+
if (remaining > 300) {
|
|
61
|
+
const trimmedContent = this._trimToTokens(doc.content, remaining);
|
|
62
|
+
selected.push({ ...doc, content: trimmedContent, engineeredScore: parseFloat(score.toFixed(3)), trimmed: true });
|
|
63
|
+
budgetUsed += remaining;
|
|
64
|
+
} else {
|
|
65
|
+
dropped++;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// 4. Re-order for readability: put config/schema first, then code, then logs
|
|
71
|
+
const ordered = this._readabilityOrder(selected, mode);
|
|
72
|
+
|
|
73
|
+
logger.info(`[ContextEngineer] ${docs.length} docs → ${selected.length} selected, ${dropped} dropped, ~${budgetUsed} tokens`);
|
|
74
|
+
|
|
75
|
+
return {
|
|
76
|
+
chunks: ordered,
|
|
77
|
+
budgetUsed,
|
|
78
|
+
dropped,
|
|
79
|
+
totalCandidates: docs.length,
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Compute a composite importance score for a chunk
|
|
85
|
+
*/
|
|
86
|
+
_scoreChunk(doc, query, mode) {
|
|
87
|
+
let score = (doc.relevanceScore || 0) * SIGNAL_WEIGHTS.relevanceScore;
|
|
88
|
+
const meta = doc.metadata || {};
|
|
89
|
+
const content = doc.content || '';
|
|
90
|
+
|
|
91
|
+
// Mode-specific boosts
|
|
92
|
+
if (mode === 'debug') {
|
|
93
|
+
if (doc.kind === 'log') score += SIGNAL_WEIGHTS.isErrorLog * 3;
|
|
94
|
+
if (meta.isBugFix) score += SIGNAL_WEIGHTS.isBugFix * 2;
|
|
95
|
+
if (meta.errors?.length > 0) score += SIGNAL_WEIGHTS.hasErrors;
|
|
96
|
+
}
|
|
97
|
+
if (mode === 'impact') {
|
|
98
|
+
if (meta.exports?.length > 0) score += SIGNAL_WEIGHTS.hasExports;
|
|
99
|
+
if (meta.imports?.length > 0) score += 0.5;
|
|
100
|
+
}
|
|
101
|
+
if (mode === 'usage') {
|
|
102
|
+
if (meta.functions?.length > 0) score += 0.8;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// Universal signals
|
|
106
|
+
if (meta.isBugFix) score += SIGNAL_WEIGHTS.isBugFix;
|
|
107
|
+
if (meta.errors?.length > 0) score += SIGNAL_WEIGHTS.hasErrors * 0.5;
|
|
108
|
+
if (doc.kind === 'config') score *= SIGNAL_WEIGHTS.isConfig;
|
|
109
|
+
if (doc.filename?.includes('test') || doc.filename?.includes('spec')) score *= SIGNAL_WEIGHTS.isTestFile;
|
|
110
|
+
if (doc.chunkIndex === 0) score *= SIGNAL_WEIGHTS.chunkIsFirst;
|
|
111
|
+
|
|
112
|
+
// Metadata richness bonus
|
|
113
|
+
const metaFields = ['functions', 'classes', 'imports', 'exports', 'errors', 'patterns'];
|
|
114
|
+
const richness = metaFields.filter(f => meta[f]?.length > 0).length;
|
|
115
|
+
if (richness >= 3) score *= SIGNAL_WEIGHTS.metadataRich;
|
|
116
|
+
|
|
117
|
+
// Recency bonus (ingested within last 24h)
|
|
118
|
+
if (doc.ingestedAt) {
|
|
119
|
+
const ageHours = (Date.now() - new Date(doc.ingestedAt).getTime()) / 3600000;
|
|
120
|
+
if (ageHours < 24) score *= SIGNAL_WEIGHTS.isRecent;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
return score;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
/**
|
|
127
|
+
* Re-order chunks for maximum readability in the prompt
|
|
128
|
+
*/
|
|
129
|
+
_readabilityOrder(chunks, mode) {
|
|
130
|
+
const order = { schema: 0, config: 1, code: 2, documentation: 3, log: 4, script: 5, unknown: 6 };
|
|
131
|
+
if (mode === 'debug') {
|
|
132
|
+
// Logs first for debugging
|
|
133
|
+
order.log = 0; order.code = 1; order.documentation = 2;
|
|
134
|
+
}
|
|
135
|
+
return [...chunks].sort((a, b) => (order[a.kind] ?? 6) - (order[b.kind] ?? 6));
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
/**
|
|
139
|
+
* Trim text to approximately N tokens
|
|
140
|
+
*/
|
|
141
|
+
_trimToTokens(text, maxTokens) {
|
|
142
|
+
const maxChars = maxTokens * 4;
|
|
143
|
+
if (text.length <= maxChars) return text;
|
|
144
|
+
return text.slice(0, maxChars) + '\n... [truncated for context budget]';
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
/**
|
|
148
|
+
* Compress a context bundle by summarizing less-important chunks
|
|
149
|
+
* Used when context must be even further reduced
|
|
150
|
+
*/
|
|
151
|
+
summarizeChunk(doc) {
|
|
152
|
+
const meta = doc.metadata || {};
|
|
153
|
+
const lines = [
|
|
154
|
+
`[${doc.kind}] ${doc.filename}`,
|
|
155
|
+
meta.functions?.length ? `Functions: ${meta.functions.join(', ')}` : null,
|
|
156
|
+
meta.classes?.length ? `Classes: ${meta.classes.join(', ')}` : null,
|
|
157
|
+
meta.errors?.length ? `Known errors: ${meta.errors.join(', ')}` : null,
|
|
158
|
+
`Snippet: ${doc.content.slice(0, 200)}...`,
|
|
159
|
+
].filter(Boolean);
|
|
160
|
+
return lines.join('\n');
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
/**
|
|
164
|
+
* Prioritize a list of plain text messages by estimated importance
|
|
165
|
+
* Used for memory/conversation injection
|
|
166
|
+
*/
|
|
167
|
+
prioritizeMessages(messages, budgetTokens = 2000) {
|
|
168
|
+
let used = 0;
|
|
169
|
+
const result = [];
|
|
170
|
+
for (const msg of messages) {
|
|
171
|
+
const t = estimateTokens(msg.content || msg);
|
|
172
|
+
if (used + t <= budgetTokens) {
|
|
173
|
+
result.push(msg);
|
|
174
|
+
used += t;
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
return result;
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
module.exports = new ContextEngineer();
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
/**
|
|
3
|
+
* Shows developers exactly what's going into the LLM context window:
|
|
4
|
+
* - Which chunks were retrieved and why (score breakdown)
|
|
5
|
+
* - How much of the token budget each piece uses
|
|
6
|
+
* - What memories are being injected
|
|
7
|
+
* - What the final prompt looks like (sanitized)
|
|
8
|
+
* - Token breakdown by section
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
const indexer = require('../core/indexer');
|
|
12
|
+
const contextEngineer = require('./contextEngineer');
|
|
13
|
+
const { MemoryManager } = require('../memory/memoryManager');
|
|
14
|
+
const teamMemory = require('../memory/teamMemory');
|
|
15
|
+
|
|
16
|
+
const EST_TOKENS = text => Math.ceil((text || '').length / 4);
|
|
17
|
+
|
|
18
|
+
class ContextVisualizer {
|
|
19
|
+
/**
|
|
20
|
+
* Visualize the full context that would be sent for a given query
|
|
21
|
+
*/
|
|
22
|
+
visualize(query, opts = {}) {
|
|
23
|
+
const { mode = 'general', topK = 8, team, sessionId } = opts;
|
|
24
|
+
|
|
25
|
+
// 1. Retrieve docs
|
|
26
|
+
let docs;
|
|
27
|
+
switch (mode) {
|
|
28
|
+
case 'debug': docs = indexer.searchForErrors(query, topK); break;
|
|
29
|
+
case 'usage': docs = indexer.searchForUsages(query, topK); break;
|
|
30
|
+
case 'impact': docs = indexer.searchForImpact(query, topK); break;
|
|
31
|
+
default: docs = indexer.search(query, topK);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
// 2. Engineer context
|
|
35
|
+
const engineered = contextEngineer.engineer(docs, query, mode);
|
|
36
|
+
|
|
37
|
+
// 3. Get memories
|
|
38
|
+
const memories = MemoryManager.getRelevant(query, 5);
|
|
39
|
+
const memContext = MemoryManager.formatAsContext(memories);
|
|
40
|
+
const teamContext = teamMemory.formatForAgent ? teamMemory.formatForAgent(team || 'global') : '';
|
|
41
|
+
|
|
42
|
+
// 4. Build token breakdown
|
|
43
|
+
const sections = {
|
|
44
|
+
system_prompt: {
|
|
45
|
+
label: 'System Prompt',
|
|
46
|
+
tokens: EST_TOKENS('You are an expert developer assistant...'),
|
|
47
|
+
content: '[system prompt — not shown for brevity]',
|
|
48
|
+
},
|
|
49
|
+
memory_context: {
|
|
50
|
+
label: 'Memory Context',
|
|
51
|
+
tokens: EST_TOKENS(memContext),
|
|
52
|
+
content: memContext || '(no relevant memories)',
|
|
53
|
+
count: memories.length,
|
|
54
|
+
},
|
|
55
|
+
team_memory: {
|
|
56
|
+
label: 'Team Memory',
|
|
57
|
+
tokens: EST_TOKENS(teamContext),
|
|
58
|
+
content: teamContext || '(no team memory)',
|
|
59
|
+
},
|
|
60
|
+
retrieval_context: {
|
|
61
|
+
label: 'Retrieved Codebase Context',
|
|
62
|
+
tokens: engineered.budgetUsed,
|
|
63
|
+
chunks: engineered.chunks.map(c => ({
|
|
64
|
+
file: c.filename,
|
|
65
|
+
kind: c.kind,
|
|
66
|
+
chunkIndex: c.chunkIndex,
|
|
67
|
+
relevanceScore: c.relevanceScore,
|
|
68
|
+
engineeredScore: c.engineeredScore,
|
|
69
|
+
tokens: EST_TOKENS(c.content),
|
|
70
|
+
trimmed: c.trimmed || false,
|
|
71
|
+
snippet: c.content.slice(0, 100) + (c.content.length > 100 ? '...' : ''),
|
|
72
|
+
metadata: {
|
|
73
|
+
functions: c.metadata?.functions?.slice(0, 3) || [],
|
|
74
|
+
classes: c.metadata?.classes?.slice(0, 3) || [],
|
|
75
|
+
isBugFix: c.metadata?.isBugFix || false,
|
|
76
|
+
},
|
|
77
|
+
})),
|
|
78
|
+
dropped: engineered.dropped,
|
|
79
|
+
},
|
|
80
|
+
user_query: {
|
|
81
|
+
label: 'User Query',
|
|
82
|
+
tokens: EST_TOKENS(query),
|
|
83
|
+
content: query,
|
|
84
|
+
},
|
|
85
|
+
};
|
|
86
|
+
|
|
87
|
+
const totalTokens = Object.values(sections).reduce((s, sec) => s + (sec.tokens || 0), 0);
|
|
88
|
+
|
|
89
|
+
return {
|
|
90
|
+
query,
|
|
91
|
+
mode,
|
|
92
|
+
totalTokens,
|
|
93
|
+
budgetUtilization: `${((totalTokens / 8000) * 100).toFixed(1)}%`,
|
|
94
|
+
sections,
|
|
95
|
+
summary: {
|
|
96
|
+
chunksRetrieved: docs.length,
|
|
97
|
+
chunksUsed: engineered.chunks.length,
|
|
98
|
+
chunksDropped: engineered.dropped,
|
|
99
|
+
memoriesInjected: memories.length,
|
|
100
|
+
topSources: engineered.chunks.slice(0, 3).map(c => `${c.filename} (${c.relevanceScore})`),
|
|
101
|
+
},
|
|
102
|
+
warnings: [
|
|
103
|
+
totalTokens > 7000 ? '⚠ Context is near limit — consider using /compact' : null,
|
|
104
|
+
engineered.dropped > 2 ? `⚠ ${engineered.dropped} chunks dropped due to budget` : null,
|
|
105
|
+
memories.length === 0 ? 'ℹ No relevant memories found — knowledge grows with use' : null,
|
|
106
|
+
].filter(Boolean),
|
|
107
|
+
};
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Format context visualization as a human-readable string
|
|
112
|
+
*/
|
|
113
|
+
format(viz) {
|
|
114
|
+
const lines = [
|
|
115
|
+
`╔══ Context Window Visualization ══╗`,
|
|
116
|
+
` Query: "${viz.query.slice(0, 60)}"`,
|
|
117
|
+
` Mode: ${viz.mode}`,
|
|
118
|
+
` Tokens: ${viz.totalTokens} (~${viz.budgetUtilization} of budget)`,
|
|
119
|
+
``,
|
|
120
|
+
` Sources (${viz.summary.chunksUsed} used, ${viz.summary.chunksDropped} dropped):`,
|
|
121
|
+
];
|
|
122
|
+
|
|
123
|
+
for (const chunk of viz.sections.retrieval_context.chunks) {
|
|
124
|
+
lines.push(` [${chunk.kind}] ${chunk.file} — rel:${chunk.relevanceScore} eng:${chunk.engineeredScore} ${chunk.trimmed ? '(trimmed)' : ''}`);
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
if (viz.summary.memoriesInjected > 0) {
|
|
128
|
+
lines.push(``, ` Memories injected: ${viz.summary.memoriesInjected}`);
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
if (viz.warnings.length) {
|
|
132
|
+
lines.push(``, ` Warnings:`);
|
|
133
|
+
viz.warnings.forEach(w => lines.push(` ${w}`));
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
return lines.join('\n');
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
module.exports = new ContextVisualizer();
|