@su-record/vibe 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +13 -6
  2. package/bin/vibe +20 -2
  3. package/package.json +5 -6
  4. package/scripts/install-mcp.js +31 -5
  5. package/mcp/dist/__tests__/complexity.test.js +0 -126
  6. package/mcp/dist/__tests__/memory.test.js +0 -120
  7. package/mcp/dist/__tests__/python-dart-complexity.test.js +0 -146
  8. package/mcp/dist/index.js +0 -230
  9. package/mcp/dist/lib/ContextCompressor.js +0 -305
  10. package/mcp/dist/lib/MemoryManager.js +0 -334
  11. package/mcp/dist/lib/ProjectCache.js +0 -126
  12. package/mcp/dist/lib/PythonParser.js +0 -241
  13. package/mcp/dist/tools/browser/browserPool.js +0 -76
  14. package/mcp/dist/tools/browser/browserUtils.js +0 -135
  15. package/mcp/dist/tools/browser/inspectNetworkRequests.js +0 -140
  16. package/mcp/dist/tools/browser/monitorConsoleLogs.js +0 -97
  17. package/mcp/dist/tools/convention/analyzeComplexity.js +0 -248
  18. package/mcp/dist/tools/convention/applyQualityRules.js +0 -102
  19. package/mcp/dist/tools/convention/checkCouplingCohesion.js +0 -233
  20. package/mcp/dist/tools/convention/complexityMetrics.js +0 -133
  21. package/mcp/dist/tools/convention/dartComplexity.js +0 -117
  22. package/mcp/dist/tools/convention/getCodingGuide.js +0 -64
  23. package/mcp/dist/tools/convention/languageDetector.js +0 -50
  24. package/mcp/dist/tools/convention/pythonComplexity.js +0 -109
  25. package/mcp/dist/tools/convention/suggestImprovements.js +0 -257
  26. package/mcp/dist/tools/convention/validateCodeQuality.js +0 -177
  27. package/mcp/dist/tools/memory/autoSaveContext.js +0 -79
  28. package/mcp/dist/tools/memory/database.js +0 -123
  29. package/mcp/dist/tools/memory/deleteMemory.js +0 -39
  30. package/mcp/dist/tools/memory/listMemories.js +0 -38
  31. package/mcp/dist/tools/memory/memoryConfig.js +0 -27
  32. package/mcp/dist/tools/memory/memorySQLite.js +0 -138
  33. package/mcp/dist/tools/memory/memoryUtils.js +0 -34
  34. package/mcp/dist/tools/memory/migrate.js +0 -113
  35. package/mcp/dist/tools/memory/prioritizeMemory.js +0 -109
  36. package/mcp/dist/tools/memory/recallMemory.js +0 -40
  37. package/mcp/dist/tools/memory/restoreSessionContext.js +0 -69
  38. package/mcp/dist/tools/memory/saveMemory.js +0 -34
  39. package/mcp/dist/tools/memory/searchMemories.js +0 -37
  40. package/mcp/dist/tools/memory/startSession.js +0 -100
  41. package/mcp/dist/tools/memory/updateMemory.js +0 -46
  42. package/mcp/dist/tools/planning/analyzeRequirements.js +0 -166
  43. package/mcp/dist/tools/planning/createUserStories.js +0 -119
  44. package/mcp/dist/tools/planning/featureRoadmap.js +0 -202
  45. package/mcp/dist/tools/planning/generatePrd.js +0 -156
  46. package/mcp/dist/tools/prompt/analyzePrompt.js +0 -145
  47. package/mcp/dist/tools/prompt/enhancePrompt.js +0 -105
  48. package/mcp/dist/tools/semantic/findReferences.js +0 -195
  49. package/mcp/dist/tools/semantic/findSymbol.js +0 -200
  50. package/mcp/dist/tools/thinking/analyzeProblem.js +0 -50
  51. package/mcp/dist/tools/thinking/breakDownProblem.js +0 -140
  52. package/mcp/dist/tools/thinking/createThinkingChain.js +0 -39
  53. package/mcp/dist/tools/thinking/formatAsPlan.js +0 -73
  54. package/mcp/dist/tools/thinking/stepByStepAnalysis.js +0 -58
  55. package/mcp/dist/tools/thinking/thinkAloudProcess.js +0 -75
  56. package/mcp/dist/tools/time/getCurrentTime.js +0 -61
  57. package/mcp/dist/tools/ui/previewUiAscii.js +0 -232
  58. package/mcp/dist/types/tool.js +0 -2
  59. package/mcp/package.json +0 -53
package/mcp/dist/index.js DELETED
@@ -1,230 +0,0 @@
1
- #!/usr/bin/env node
2
- import { Server } from '@modelcontextprotocol/sdk/server/index.js';
3
- import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
4
- import { CallToolRequestSchema, ListToolsRequestSchema, ErrorCode, McpError } from '@modelcontextprotocol/sdk/types.js';
5
- // Import all tool definitions and handlers
6
- import { getCurrentTimeDefinition, getCurrentTime } from './tools/time/getCurrentTime.js';
7
- // Semantic code analysis tools (Serena-inspired)
8
- import { findSymbolDefinition, findSymbol } from './tools/semantic/findSymbol.js';
9
- import { findReferencesDefinition, findReferences } from './tools/semantic/findReferences.js';
10
- import { createThinkingChainDefinition, createThinkingChain } from './tools/thinking/createThinkingChain.js';
11
- import { analyzeProblemDefinition, analyzeProblem } from './tools/thinking/analyzeProblem.js';
12
- import { stepByStepAnalysisDefinition, stepByStepAnalysis } from './tools/thinking/stepByStepAnalysis.js';
13
- import { breakDownProblemDefinition, breakDownProblem } from './tools/thinking/breakDownProblem.js';
14
- import { thinkAloudProcessDefinition, thinkAloudProcess } from './tools/thinking/thinkAloudProcess.js';
15
- import { formatAsPlanDefinition, formatAsPlan } from './tools/thinking/formatAsPlan.js';
16
- import { monitorConsoleLogsDefinition, monitorConsoleLogs } from './tools/browser/monitorConsoleLogs.js';
17
- import { inspectNetworkRequestsDefinition, inspectNetworkRequests } from './tools/browser/inspectNetworkRequests.js';
18
- import { saveMemoryDefinition, saveMemory } from './tools/memory/saveMemory.js';
19
- import { recallMemoryDefinition, recallMemory } from './tools/memory/recallMemory.js';
20
- import { listMemoriesDefinition, listMemories } from './tools/memory/listMemories.js';
21
- import { deleteMemoryDefinition, deleteMemory } from './tools/memory/deleteMemory.js';
22
- import { searchMemoriesDefinition, searchMemoriesHandler } from './tools/memory/searchMemories.js';
23
- import { updateMemoryDefinition, updateMemory } from './tools/memory/updateMemory.js';
24
- import { autoSaveContextDefinition, autoSaveContext } from './tools/memory/autoSaveContext.js';
25
- import { restoreSessionContextDefinition, restoreSessionContext } from './tools/memory/restoreSessionContext.js';
26
- import { prioritizeMemoryDefinition, prioritizeMemory } from './tools/memory/prioritizeMemory.js';
27
- import { startSessionDefinition, startSession } from './tools/memory/startSession.js';
28
- import { getCodingGuideDefinition, getCodingGuide } from './tools/convention/getCodingGuide.js';
29
- import { applyQualityRulesDefinition, applyQualityRules } from './tools/convention/applyQualityRules.js';
30
- import { validateCodeQualityDefinition, validateCodeQuality } from './tools/convention/validateCodeQuality.js';
31
- import { analyzeComplexityDefinition, analyzeComplexity } from './tools/convention/analyzeComplexity.js';
32
- import { checkCouplingCohesionDefinition, checkCouplingCohesion } from './tools/convention/checkCouplingCohesion.js';
33
- import { suggestImprovementsDefinition, suggestImprovements } from './tools/convention/suggestImprovements.js';
34
- import { generatePrdDefinition, generatePrd } from './tools/planning/generatePrd.js';
35
- import { createUserStoriesDefinition, createUserStories } from './tools/planning/createUserStories.js';
36
- import { analyzeRequirementsDefinition, analyzeRequirements } from './tools/planning/analyzeRequirements.js';
37
- import { featureRoadmapDefinition, featureRoadmap } from './tools/planning/featureRoadmap.js';
38
- import { enhancePromptDefinition, enhancePrompt } from './tools/prompt/enhancePrompt.js';
39
- import { analyzePromptDefinition, analyzePrompt } from './tools/prompt/analyzePrompt.js';
40
- import { previewUiAsciiDefinition, previewUiAscii } from './tools/ui/previewUiAscii.js';
41
- // Collect all tool definitions
42
- const tools = [
43
- // Time Utility Tools
44
- getCurrentTimeDefinition,
45
- // Semantic Code Analysis Tools (Serena-inspired)
46
- findSymbolDefinition,
47
- findReferencesDefinition,
48
- // Sequential Thinking Tools
49
- createThinkingChainDefinition,
50
- analyzeProblemDefinition,
51
- stepByStepAnalysisDefinition,
52
- breakDownProblemDefinition,
53
- thinkAloudProcessDefinition,
54
- formatAsPlanDefinition,
55
- // Browser Development Tools
56
- monitorConsoleLogsDefinition,
57
- inspectNetworkRequestsDefinition,
58
- // Memory Management Tools
59
- saveMemoryDefinition,
60
- recallMemoryDefinition,
61
- listMemoriesDefinition,
62
- deleteMemoryDefinition,
63
- searchMemoriesDefinition,
64
- updateMemoryDefinition,
65
- autoSaveContextDefinition,
66
- restoreSessionContextDefinition,
67
- prioritizeMemoryDefinition,
68
- startSessionDefinition,
69
- // Convention Tools
70
- getCodingGuideDefinition,
71
- applyQualityRulesDefinition,
72
- validateCodeQualityDefinition,
73
- analyzeComplexityDefinition,
74
- checkCouplingCohesionDefinition,
75
- suggestImprovementsDefinition,
76
- // Planning Tools
77
- generatePrdDefinition,
78
- createUserStoriesDefinition,
79
- analyzeRequirementsDefinition,
80
- featureRoadmapDefinition,
81
- // Prompt Enhancement Tools
82
- enhancePromptDefinition,
83
- analyzePromptDefinition,
84
- // UI Preview Tools
85
- previewUiAsciiDefinition
86
- ];
87
- function createServer() {
88
- const server = new Server({
89
- name: 'Hi-AI',
90
- version: '1.3.0',
91
- }, {
92
- capabilities: {
93
- tools: {},
94
- prompts: {},
95
- resources: {},
96
- },
97
- });
98
- server.setRequestHandler(ListToolsRequestSchema, async () => {
99
- return { tools };
100
- });
101
- server.setRequestHandler(CallToolRequestSchema, async (request, extra) => {
102
- const { name, arguments: args } = request.params;
103
- try {
104
- switch (name) {
105
- // Time Utility Tools
106
- case 'get_current_time':
107
- return await getCurrentTime(args);
108
- // Semantic Code Analysis Tools
109
- case 'find_symbol':
110
- return await findSymbol(args);
111
- case 'find_references':
112
- return await findReferences(args);
113
- // Sequential Thinking Tools
114
- case 'create_thinking_chain':
115
- return await createThinkingChain(args);
116
- case 'analyze_problem':
117
- return await analyzeProblem(args);
118
- case 'step_by_step_analysis':
119
- return await stepByStepAnalysis(args);
120
- case 'break_down_problem':
121
- return await breakDownProblem(args);
122
- case 'think_aloud_process':
123
- return await thinkAloudProcess(args);
124
- case 'format_as_plan':
125
- return await formatAsPlan(args);
126
- // Browser Development Tools
127
- case 'monitor_console_logs':
128
- return await monitorConsoleLogs(args);
129
- case 'inspect_network_requests':
130
- return await inspectNetworkRequests(args);
131
- // Memory Management Tools
132
- case 'save_memory':
133
- return await saveMemory(args);
134
- case 'recall_memory':
135
- return await recallMemory(args);
136
- case 'list_memories':
137
- return await listMemories(args);
138
- case 'delete_memory':
139
- return await deleteMemory(args);
140
- case 'search_memories':
141
- return await searchMemoriesHandler(args);
142
- case 'update_memory':
143
- return await updateMemory(args);
144
- case 'auto_save_context':
145
- return await autoSaveContext(args);
146
- case 'restore_session_context':
147
- return await restoreSessionContext(args);
148
- case 'prioritize_memory':
149
- return await prioritizeMemory(args);
150
- case 'start_session':
151
- return await startSession(args);
152
- // Convention Tools
153
- case 'get_coding_guide':
154
- return await getCodingGuide(args);
155
- case 'apply_quality_rules':
156
- return await applyQualityRules(args);
157
- case 'validate_code_quality':
158
- return await validateCodeQuality(args);
159
- case 'analyze_complexity':
160
- return await analyzeComplexity(args);
161
- case 'check_coupling_cohesion':
162
- return await checkCouplingCohesion(args);
163
- case 'suggest_improvements':
164
- return await suggestImprovements(args);
165
- // Planning Tools
166
- case 'generate_prd':
167
- return await generatePrd(args);
168
- case 'create_user_stories':
169
- return await createUserStories(args);
170
- case 'analyze_requirements':
171
- return await analyzeRequirements(args);
172
- case 'feature_roadmap':
173
- return await featureRoadmap(args);
174
- // Prompt Enhancement Tools
175
- case 'enhance_prompt':
176
- return await enhancePrompt(args);
177
- case 'analyze_prompt':
178
- return await analyzePrompt(args);
179
- // UI Preview Tools
180
- case 'preview_ui_ascii':
181
- return await previewUiAscii(args);
182
- default:
183
- throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`);
184
- }
185
- }
186
- catch (error) {
187
- throw new McpError(ErrorCode.InternalError, `Error executing tool: ${error instanceof Error ? error.message : 'Unknown error'}`);
188
- }
189
- });
190
- return server;
191
- }
192
- // Default export for Smithery platform
193
- export default function ({ sessionId, config }) {
194
- // Return the configured server instance
195
- return createServer();
196
- }
197
- async function main() {
198
- const server = createServer();
199
- const transport = new StdioServerTransport();
200
- // Handle process termination gracefully
201
- process.on('SIGINT', async () => {
202
- await server.close();
203
- process.exit(0);
204
- });
205
- process.on('SIGTERM', async () => {
206
- await server.close();
207
- process.exit(0);
208
- });
209
- // Handle EPIPE errors that occur with sidecar proxy
210
- process.on('uncaughtException', (error) => {
211
- if (error.message && error.message.includes('EPIPE')) {
212
- // Gracefully handle EPIPE errors
213
- console.error('Connection closed by client');
214
- return;
215
- }
216
- console.error('Uncaught exception:', error);
217
- process.exit(1);
218
- });
219
- process.on('unhandledRejection', (reason, promise) => {
220
- console.error('Unhandled Rejection at:', promise, 'reason:', reason);
221
- });
222
- await server.connect(transport);
223
- }
224
- // Only run main when not being imported by Smithery
225
- if (process.argv[1]?.includes('hi-ai') || process.argv[1]?.endsWith('index.js')) {
226
- main().catch((error) => {
227
- console.error('Server initialization failed:', error);
228
- process.exit(1);
229
- });
230
- }
@@ -1,305 +0,0 @@
1
- // Context compression utility (v1.3)
2
- // Intelligently compress context when approaching token limits
3
- export class ContextCompressor {
4
- static MAX_CHUNK_SIZE = 500; // characters
5
- static DEFAULT_TARGET_TOKENS = 4000;
6
- static TOKENS_PER_CHAR_ESTIMATE = 0.25;
7
- static MAX_SCORE = 100;
8
- static MIN_SCORE = 0;
9
- static CODE_KEYWORDS = [
10
- 'function', 'class', 'const', 'let', 'var', 'import', 'export',
11
- 'def', 'async', 'await', 'return', 'if', 'for', 'while'
12
- ];
13
- static IMPORTANT_KEYWORDS = [
14
- 'error', 'bug', 'fix', 'issue', 'problem', 'solution',
15
- '에러', '버그', '수정', '문제', '해결', 'TODO', 'FIXME'
16
- ];
17
- /**
18
- * Compress context by selecting most important chunks
19
- * @param context - Text content to compress
20
- * @param targetTokens - Target token count (default: 4000)
21
- * @returns Compression result with statistics
22
- */
23
- static compress(context, targetTokens = ContextCompressor.DEFAULT_TARGET_TOKENS) {
24
- // Handle empty or very short context
25
- if (!context || context.trim().length === 0) {
26
- return {
27
- compressed: '',
28
- originalSize: 0,
29
- compressedSize: 0,
30
- compressionRatio: 0,
31
- removedSections: [],
32
- retainedSections: []
33
- };
34
- }
35
- const chunks = this.splitIntoChunks(context);
36
- const scoredChunks = chunks.map(chunk => this.scoreChunk(chunk));
37
- // If content is already smaller than target, return as-is
38
- // Only skip compression if content is very small (use 1.2x instead of 4x)
39
- // This ensures compression activates more aggressively
40
- if (context.length <= targetTokens * 1.2) {
41
- return {
42
- compressed: context,
43
- originalSize: context.length,
44
- compressedSize: context.length,
45
- compressionRatio: 1,
46
- removedSections: [],
47
- retainedSections: scoredChunks.map(s => s.type),
48
- retentionStats: {
49
- codeRetentionPercent: 100,
50
- answerRetentionPercent: 100,
51
- questionRetentionPercent: 100
52
- }
53
- };
54
- }
55
- // Sort by score (highest first)
56
- scoredChunks.sort((a, b) => b.score - a.score);
57
- // Select chunks until target size
58
- // TOKENS_PER_CHAR_ESTIMATE = 0.25 means 1 char ≈ 0.25 tokens, so 4 chars ≈ 1 token
59
- // Reserve space for headers and formatting (5% overhead, min 50 chars, max 300 chars)
60
- const HEADER_OVERHEAD = Math.max(50, Math.min(300, targetTokens * 4 * 0.05));
61
- const targetChars = (targetTokens * 4) - HEADER_OVERHEAD;
62
- const selected = [];
63
- const removed = [];
64
- let currentSize = 0;
65
- for (const chunk of scoredChunks) {
66
- if (currentSize + chunk.text.length <= targetChars) {
67
- selected.push(chunk);
68
- currentSize += chunk.text.length;
69
- }
70
- else {
71
- removed.push(this.summarizeChunk(chunk));
72
- }
73
- }
74
- // Reconstruct compressed context
75
- const compressed = this.reconstructContext(selected, removed);
76
- // Calculate retention statistics
77
- const retentionStats = this.calculateRetentionStats(scoredChunks, selected);
78
- return {
79
- compressed,
80
- originalSize: context.length,
81
- compressedSize: compressed.length,
82
- compressionRatio: compressed.length / context.length,
83
- removedSections: removed,
84
- retainedSections: selected.map(s => s.type),
85
- retentionStats
86
- };
87
- }
88
- /**
89
- * Calculate retention percentages by type
90
- */
91
- static calculateRetentionStats(allChunks, selectedChunks) {
92
- const countByType = (chunks, type) => {
93
- return chunks.filter(c => c.type === type).length;
94
- };
95
- const totalCode = countByType(allChunks, 'code');
96
- const totalAnswer = countByType(allChunks, 'answer');
97
- const totalQuestion = countByType(allChunks, 'question');
98
- const retainedCode = countByType(selectedChunks, 'code');
99
- const retainedAnswer = countByType(selectedChunks, 'answer');
100
- const retainedQuestion = countByType(selectedChunks, 'question');
101
- return {
102
- codeRetentionPercent: totalCode > 0 ? Math.round((retainedCode / totalCode) * 100) : 0,
103
- answerRetentionPercent: totalAnswer > 0 ? Math.round((retainedAnswer / totalAnswer) * 100) : 0,
104
- questionRetentionPercent: totalQuestion > 0 ? Math.round((retainedQuestion / totalQuestion) * 100) : 0
105
- };
106
- }
107
- /**
108
- * Split context into manageable chunks
109
- */
110
- static splitIntoChunks(context) {
111
- const chunks = [];
112
- const lines = context.split('\n');
113
- let currentChunk = '';
114
- for (const line of lines) {
115
- if (currentChunk.length + line.length > this.MAX_CHUNK_SIZE) {
116
- if (currentChunk.trim()) {
117
- chunks.push(currentChunk.trim());
118
- }
119
- currentChunk = line;
120
- }
121
- else {
122
- currentChunk += '\n' + line;
123
- }
124
- }
125
- if (currentChunk.trim()) {
126
- chunks.push(currentChunk.trim());
127
- }
128
- return chunks;
129
- }
130
- /**
131
- * Score chunk importance (0-100)
132
- * @param text - Text chunk to score
133
- * @returns Scored chunk with type and keywords
134
- */
135
- static scoreChunk(text) {
136
- const lowerText = text.toLowerCase();
137
- const type = this.detectChunkType(lowerText, text);
138
- const keywords = this.extractKeywords(lowerText);
139
- const baseScore = this.calculateBaseScore(text, lowerText, type);
140
- const finalScore = Math.max(ContextCompressor.MIN_SCORE, Math.min(ContextCompressor.MAX_SCORE, baseScore));
141
- return { text, score: finalScore, type, keywords };
142
- }
143
- /**
144
- * Detect chunk type based on content
145
- */
146
- static detectChunkType(lowerText, text) {
147
- if (text.includes('```'))
148
- return 'code';
149
- if (lowerText.match(/^(answer|solution|결과|답변):/i))
150
- return 'answer';
151
- if (lowerText.match(/^(timestamp|date|author|file):/i))
152
- return 'metadata';
153
- if (lowerText.includes('?'))
154
- return 'question';
155
- if (this.CODE_KEYWORDS.some(kw => lowerText.includes(kw)))
156
- return 'code';
157
- return 'explanation';
158
- }
159
- /**
160
- * Extract important keywords from text
161
- */
162
- static extractKeywords(lowerText) {
163
- const keywords = [];
164
- for (const keyword of this.IMPORTANT_KEYWORDS) {
165
- if (lowerText.includes(keyword.toLowerCase())) {
166
- keywords.push(keyword);
167
- }
168
- }
169
- return keywords;
170
- }
171
- /**
172
- * Calculate base score for chunk
173
- */
174
- static calculateBaseScore(text, lowerText, type) {
175
- let score = 0;
176
- // Type-based scoring
177
- score += this.getTypeScore(type, lowerText);
178
- // Keyword bonus
179
- score += this.getKeywordScore(lowerText);
180
- // Structure bonuses
181
- score += this.getStructureScore(text);
182
- return score;
183
- }
184
- /**
185
- * Get score based on chunk type
186
- */
187
- static getTypeScore(type, lowerText) {
188
- const typeScores = {
189
- code: 30,
190
- answer: 35,
191
- question: 25,
192
- explanation: 0,
193
- metadata: -20
194
- };
195
- return typeScores[type];
196
- }
197
- /**
198
- * Get score for important keywords
199
- */
200
- static getKeywordScore(lowerText) {
201
- let score = 0;
202
- for (const keyword of this.IMPORTANT_KEYWORDS) {
203
- if (lowerText.includes(keyword.toLowerCase())) {
204
- score += 15;
205
- }
206
- }
207
- return score;
208
- }
209
- /**
210
- * Get score based on text structure
211
- */
212
- static getStructureScore(text) {
213
- let score = 0;
214
- // Penalize very long chunks
215
- if (text.length > 1000)
216
- score -= 10;
217
- // Boost short, concise chunks
218
- if (text.length < 200 && text.split('\n').length <= 5)
219
- score += 10;
220
- // Boost structured content (lists)
221
- if (text.match(/^[\d\-\*•]/m))
222
- score += 15;
223
- // Boost code blocks
224
- if (text.includes('```'))
225
- score += 20;
226
- return score;
227
- }
228
- /**
229
- * Summarize removed chunk (one-liner)
230
- */
231
- static summarizeChunk(chunk) {
232
- const firstLine = chunk.text.split('\n')[0].trim();
233
- const summary = firstLine.length > 80
234
- ? firstLine.substring(0, 77) + '...'
235
- : firstLine;
236
- return `[${chunk.type}] ${summary}`;
237
- }
238
- /**
239
- * Reconstruct compressed context
240
- */
241
- static reconstructContext(selected, removed) {
242
- // Group by type for better organization
243
- const byType = {
244
- code: [],
245
- answer: [],
246
- question: [],
247
- explanation: [],
248
- metadata: []
249
- };
250
- selected.forEach(chunk => {
251
- byType[chunk.type].push(chunk);
252
- });
253
- const sections = [];
254
- // Add header
255
- sections.push('[Compressed Context - High Priority Information]\n');
256
- // Add answers first (most important)
257
- if (byType.answer.length > 0) {
258
- sections.push('## Key Answers & Solutions');
259
- sections.push(byType.answer.map(c => c.text).join('\n\n'));
260
- sections.push('');
261
- }
262
- // Add code blocks
263
- if (byType.code.length > 0) {
264
- sections.push('## Code Snippets');
265
- sections.push(byType.code.map(c => c.text).join('\n\n'));
266
- sections.push('');
267
- }
268
- // Add questions
269
- if (byType.question.length > 0) {
270
- sections.push('## Questions');
271
- sections.push(byType.question.map(c => c.text).join('\n\n'));
272
- sections.push('');
273
- }
274
- // Add explanations
275
- if (byType.explanation.length > 0) {
276
- sections.push('## Context');
277
- sections.push(byType.explanation.map(c => c.text).join('\n\n'));
278
- sections.push('');
279
- }
280
- // Add summary of removed sections
281
- if (removed.length > 0) {
282
- sections.push('## Removed Sections (Low Priority)');
283
- sections.push(removed.join('\n'));
284
- }
285
- return sections.join('\n');
286
- }
287
- /**
288
- * Extract key entities (names, numbers, dates) from context
289
- */
290
- static extractKeyEntities(context) {
291
- const names = Array.from(new Set(context.match(/\b[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*\b/g) || []));
292
- const numbers = Array.from(new Set(context.match(/\b\d+(?:\.\d+)?\b/g) || []));
293
- const dates = Array.from(new Set(context.match(/\d{4}-\d{2}-\d{2}|\d{2}\/\d{2}\/\d{4}/g) || []));
294
- const files = Array.from(new Set(context.match(/[\w\-]+\.[a-z]{2,4}\b/gi) || []));
295
- return { names, numbers, dates, files };
296
- }
297
- /**
298
- * Estimate token count (rough approximation)
299
- */
300
- static estimateTokens(text) {
301
- // GPT-like tokenization: ~1 token per 4 characters
302
- // More accurate would require actual tokenizer
303
- return Math.ceil(text.length / 4);
304
- }
305
- }