smart-coding-mcp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ARCHITECTURE.md +287 -0
- package/CONTRIBUTING.md +308 -0
- package/LICENSE +21 -0
- package/README.md +305 -0
- package/config.json +18 -0
- package/features/clear-cache.js +45 -0
- package/features/hybrid-search.js +114 -0
- package/features/index-codebase.js +213 -0
- package/index.js +174 -0
- package/lib/cache.js +114 -0
- package/lib/config.js +146 -0
- package/lib/ignore-patterns.js +314 -0
- package/lib/project-detector.js +75 -0
- package/lib/utils.js +80 -0
- package/package.json +54 -0
- package/scripts/clear-cache.js +31 -0
package/README.md
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
# Smart Coding MCP
|
|
2
|
+
|
|
3
|
+
An extensible Model Context Protocol (MCP) server that provides intelligent semantic code search for AI assistants. Built with local AI models, inspired by Cursor's semantic search research.
|
|
4
|
+
|
|
5
|
+
## What This Does
|
|
6
|
+
|
|
7
|
+
AI coding assistants work better when they can find relevant code quickly. Traditional keyword search falls short - if you ask "where do we handle authentication?" but your code uses "login" and "session", keyword search misses it.
|
|
8
|
+
|
|
9
|
+
This MCP server solves that by indexing your codebase with AI embeddings. Your AI assistant can search by meaning instead of exact keywords, finding relevant code even when the terminology differs.
|
|
10
|
+
|
|
11
|
+
## Why Use This
|
|
12
|
+
|
|
13
|
+
**Better Code Understanding**
|
|
14
|
+
|
|
15
|
+
- Search finds code by concept, not just matching words
|
|
16
|
+
- Works with typos and variations in terminology
|
|
17
|
+
- Natural language queries like "where do we validate user input?"
|
|
18
|
+
|
|
19
|
+
**Performance**
|
|
20
|
+
|
|
21
|
+
- Pre-indexed embeddings are faster than scanning files at runtime
|
|
22
|
+
- Smart project detection skips dependencies automatically (node_modules, vendor, etc.)
|
|
23
|
+
- Incremental updates - only re-processes changed files
|
|
24
|
+
|
|
25
|
+
**Privacy**
|
|
26
|
+
|
|
27
|
+
- Everything runs locally on your machine
|
|
28
|
+
- Your code never leaves your system
|
|
29
|
+
- No API calls to external services
|
|
30
|
+
|
|
31
|
+
## Installation
|
|
32
|
+
|
|
33
|
+
### Prerequisites
|
|
34
|
+
|
|
35
|
+
- Node.js 18 or higher
|
|
36
|
+
- npm or yarn
|
|
37
|
+
|
|
38
|
+
### Setup
|
|
39
|
+
|
|
40
|
+
1. Install dependencies:
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
npm install
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
2. Add to your MCP configuration file:
|
|
47
|
+
|
|
48
|
+
```json
|
|
49
|
+
{
|
|
50
|
+
"mcpServers": {
|
|
51
|
+
"smart-coding-mcp": {
|
|
52
|
+
"command": "node",
|
|
53
|
+
"args": ["/path/to/smart-coding-mcp/index.js"],
|
|
54
|
+
"cwd": "/path/to/smart-coding-mcp"
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
3. Restart your AI assistant
|
|
61
|
+
|
|
62
|
+
The server will automatically index your codebase on first run.
|
|
63
|
+
|
|
64
|
+
## Available Tools
|
|
65
|
+
|
|
66
|
+
**semantic_search** - Find code by meaning
|
|
67
|
+
|
|
68
|
+
```
|
|
69
|
+
Query: "Where do we validate user input?"
|
|
70
|
+
Returns: Relevant validation code with file paths and line numbers
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
**index_codebase** - Manually trigger reindexing
|
|
74
|
+
|
|
75
|
+
```
|
|
76
|
+
Use after major refactoring or branch switches
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
**clear_cache** - Reset the embeddings cache
|
|
80
|
+
|
|
81
|
+
```
|
|
82
|
+
Useful when cache becomes corrupted or outdated
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
## How It Works
|
|
86
|
+
|
|
87
|
+
The server indexes your code in four steps:
|
|
88
|
+
|
|
89
|
+
1. **Discovery**: Scans your project for source files
|
|
90
|
+
2. **Chunking**: Breaks code into meaningful pieces (respecting function boundaries)
|
|
91
|
+
3. **Embedding**: Converts each chunk to a vector using a local AI model
|
|
92
|
+
4. **Storage**: Saves embeddings to `.smart-coding-cache/` for fast startup
|
|
93
|
+
|
|
94
|
+
When you search, your query is converted to the same vector format and compared against all code chunks using cosine similarity. The most relevant matches are returned.
|
|
95
|
+
|
|
96
|
+
### Smart Project Detection
|
|
97
|
+
|
|
98
|
+
The server detects your project type by looking for marker files and automatically applies appropriate ignore patterns:
|
|
99
|
+
|
|
100
|
+
**JavaScript/Node** (package.json found)
|
|
101
|
+
|
|
102
|
+
- Ignores: node_modules, dist, build, .next, coverage
|
|
103
|
+
|
|
104
|
+
**Python** (requirements.txt or pyproject.toml)
|
|
105
|
+
|
|
106
|
+
- Ignores: **pycache**, venv, .pytest_cache, .tox
|
|
107
|
+
|
|
108
|
+
**Android** (build.gradle)
|
|
109
|
+
|
|
110
|
+
- Ignores: .gradle, build artifacts, generated code
|
|
111
|
+
|
|
112
|
+
**iOS** (Podfile)
|
|
113
|
+
|
|
114
|
+
- Ignores: Pods, DerivedData, xcuserdata
|
|
115
|
+
|
|
116
|
+
**And more**: Go, PHP, Rust, Ruby, .NET
|
|
117
|
+
|
|
118
|
+
This typically reduces indexed file count by 100x. A project with 50,000 files (including node_modules) indexes just 500 actual source files.
|
|
119
|
+
|
|
120
|
+
## Configuration
|
|
121
|
+
|
|
122
|
+
The server works out of the box with sensible defaults. Create a `config.json` file to customize:
|
|
123
|
+
|
|
124
|
+
```json
|
|
125
|
+
{
|
|
126
|
+
"searchDirectory": ".",
|
|
127
|
+
"fileExtensions": ["js", "ts", "py", "java", "go"],
|
|
128
|
+
"excludePatterns": ["**/my-custom-ignore/**"],
|
|
129
|
+
"smartIndexing": true,
|
|
130
|
+
"verbose": false,
|
|
131
|
+
"enableCache": true,
|
|
132
|
+
"cacheDirectory": "./.smart-coding-cache",
|
|
133
|
+
"watchFiles": true,
|
|
134
|
+
"chunkSize": 15,
|
|
135
|
+
"maxResults": 5
|
|
136
|
+
}
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
**Key options:**
|
|
140
|
+
|
|
141
|
+
- `smartIndexing`: Enable automatic project type detection and smart ignore patterns (default: true)
|
|
142
|
+
- `verbose`: Show detailed indexing logs (default: false)
|
|
143
|
+
- `watchFiles`: Automatically reindex when files change (default: true)
|
|
144
|
+
- `enableCache`: Cache embeddings to disk (default: true)
|
|
145
|
+
- `chunkSize`: Lines of code per chunk - smaller = more precise, larger = more context (default: 15)
|
|
146
|
+
|
|
147
|
+
## Examples
|
|
148
|
+
|
|
149
|
+
**Natural language search:**
|
|
150
|
+
|
|
151
|
+
Query: "How do we handle cache persistence?"
|
|
152
|
+
|
|
153
|
+
Result:
|
|
154
|
+
|
|
155
|
+
```javascript
|
|
156
|
+
// lib/cache.js (Relevance: 38.2%)
|
|
157
|
+
async save() {
|
|
158
|
+
await fs.writeFile(cacheFile, JSON.stringify(this.vectorStore));
|
|
159
|
+
await fs.writeFile(hashFile, JSON.stringify(this.fileHashes));
|
|
160
|
+
}
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
**Typo tolerance:**
|
|
164
|
+
|
|
165
|
+
Query: "embeding modle initializashun"
|
|
166
|
+
|
|
167
|
+
Still finds embedding model initialization code despite multiple typos.
|
|
168
|
+
|
|
169
|
+
**Conceptual search:**
|
|
170
|
+
|
|
171
|
+
Query: "error handling and exceptions"
|
|
172
|
+
|
|
173
|
+
Finds all try/catch blocks and error handling patterns.
|
|
174
|
+
|
|
175
|
+
## Performance
|
|
176
|
+
|
|
177
|
+
Tested on a typical JavaScript project:
|
|
178
|
+
|
|
179
|
+
| Metric | Without Smart Indexing | With Smart Indexing |
|
|
180
|
+
| -------------- | ---------------------- | ------------------- |
|
|
181
|
+
| Files scanned | 50,000+ | 500 |
|
|
182
|
+
| Indexing time | 10+ min | 2-3 min |
|
|
183
|
+
| Memory usage | 2GB+ | ~200MB |
|
|
184
|
+
| Search latency | N/A | <100ms |
|
|
185
|
+
|
|
186
|
+
## Supported File Types
|
|
187
|
+
|
|
188
|
+
Languages: JavaScript, TypeScript, Python, Java, Kotlin, Scala, C, C++, C#, Go, Rust, Ruby, PHP, Swift, Shell
|
|
189
|
+
|
|
190
|
+
Web: HTML, CSS, SCSS, Sass, XML, SVG
|
|
191
|
+
|
|
192
|
+
Config/Data: JSON, YAML, TOML, SQL
|
|
193
|
+
|
|
194
|
+
Total: 36 file extensions
|
|
195
|
+
|
|
196
|
+
## Architecture
|
|
197
|
+
|
|
198
|
+
```
|
|
199
|
+
smart-coding-mcp/
|
|
200
|
+
├── index.js # MCP server entry point
|
|
201
|
+
├── lib/
|
|
202
|
+
│ ├── config.js # Configuration + smart detection
|
|
203
|
+
│ ├── cache.js # Embeddings persistence
|
|
204
|
+
│ ├── utils.js # Smart chunking
|
|
205
|
+
│ ├── ignore-patterns.js # Language-specific patterns
|
|
206
|
+
│ └── project-detector.js # Project type detection
|
|
207
|
+
└── features/
|
|
208
|
+
├── hybrid-search.js # Semantic + exact match search
|
|
209
|
+
├── index-codebase.js # File indexing + watching
|
|
210
|
+
└── clear-cache.js # Cache management
|
|
211
|
+
```
|
|
212
|
+
|
|
213
|
+
The modular design makes it easy to add new features. See ARCHITECTURE.md for implementation details.
|
|
214
|
+
|
|
215
|
+
## Troubleshooting
|
|
216
|
+
|
|
217
|
+
**"Server can't find config.json"**
|
|
218
|
+
|
|
219
|
+
Make sure `cwd` is set in your MCP configuration to the full path of smart-coding-mcp.
|
|
220
|
+
|
|
221
|
+
**"Indexing takes too long"**
|
|
222
|
+
|
|
223
|
+
- Verify `smartIndexing` is enabled
|
|
224
|
+
- Add more patterns to `excludePatterns`
|
|
225
|
+
- Reduce `fileExtensions` to only what you need
|
|
226
|
+
|
|
227
|
+
**"Search results aren't relevant"**
|
|
228
|
+
|
|
229
|
+
- Try more specific queries
|
|
230
|
+
- Increase `maxResults` to see more options
|
|
231
|
+
- Run `index_codebase` to force a full reindex
|
|
232
|
+
|
|
233
|
+
**"Cache corruption errors"**
|
|
234
|
+
|
|
235
|
+
Use the `clear_cache` tool or run:
|
|
236
|
+
|
|
237
|
+
```bash
|
|
238
|
+
npm run clear-cache
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
## CLI Commands
|
|
242
|
+
|
|
243
|
+
```bash
|
|
244
|
+
# Start the server
|
|
245
|
+
npm start
|
|
246
|
+
|
|
247
|
+
# Development mode with auto-restart
|
|
248
|
+
npm run dev
|
|
249
|
+
|
|
250
|
+
# Clear embeddings cache
|
|
251
|
+
npm run clear-cache
|
|
252
|
+
```
|
|
253
|
+
|
|
254
|
+
## Privacy
|
|
255
|
+
|
|
256
|
+
- AI model runs entirely on your machine
|
|
257
|
+
- No network requests to external services
|
|
258
|
+
- No telemetry or analytics
|
|
259
|
+
- Cache stored locally in `.smart-coding-cache/`
|
|
260
|
+
|
|
261
|
+
## Technical Details
|
|
262
|
+
|
|
263
|
+
**Embedding Model**: all-MiniLM-L6-v2 via transformers.js
|
|
264
|
+
|
|
265
|
+
- Fast inference (CPU-friendly)
|
|
266
|
+
- Small model size (~100MB)
|
|
267
|
+
- Good accuracy for code search
|
|
268
|
+
|
|
269
|
+
**Vector Similarity**: Cosine similarity
|
|
270
|
+
|
|
271
|
+
- Efficient comparison of embeddings
|
|
272
|
+
- Normalized vectors for consistent scoring
|
|
273
|
+
|
|
274
|
+
**Hybrid Scoring**: Combines semantic similarity with exact text matching
|
|
275
|
+
|
|
276
|
+
- Semantic weight: 0.7 (configurable)
|
|
277
|
+
- Exact match boost: 1.5x (configurable)
|
|
278
|
+
|
|
279
|
+
## Research Background
|
|
280
|
+
|
|
281
|
+
This project builds on research from Cursor showing that semantic search improves AI coding agent performance by 12.5% on average across question-answering tasks. The key insight is that AI assistants benefit more from relevant context than from large amounts of context.
|
|
282
|
+
|
|
283
|
+
See: https://cursor.com/blog/semsearch
|
|
284
|
+
|
|
285
|
+
## Contributing
|
|
286
|
+
|
|
287
|
+
Contributions are welcome. See CONTRIBUTING.md for guidelines.
|
|
288
|
+
|
|
289
|
+
Potential areas for improvement:
|
|
290
|
+
|
|
291
|
+
- Additional language support
|
|
292
|
+
- Code complexity analysis
|
|
293
|
+
- Refactoring pattern detection
|
|
294
|
+
- Documentation generation
|
|
295
|
+
|
|
296
|
+
## License
|
|
297
|
+
|
|
298
|
+
MIT - see LICENSE file
|
|
299
|
+
|
|
300
|
+
## Documentation
|
|
301
|
+
|
|
302
|
+
- ARCHITECTURE.md - Implementation details and design decisions
|
|
303
|
+
- CONTRIBUTING.md - Guidelines for contributors
|
|
304
|
+
- EXAMPLES.md - More usage examples
|
|
305
|
+
- QUICKSTART.md - Detailed setup guide
|
package/config.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
{
|
|
2
|
+
"searchDirectory": ".",
|
|
3
|
+
"fileExtensions": ["js", "ts", "jsx", "tsx", "mjs", "cjs", "css", "scss", "sass", "less", "html", "htm", "xml", "svg", "py", "pyw", "java", "kt", "scala", "c", "cpp", "h", "hpp", "cs", "go", "rs", "rb", "php", "swift", "sh", "bash", "json", "yaml", "yml", "toml", "sql"],
|
|
4
|
+
"excludePatterns": ["**/node_modules/**", "**/dist/**", "**/build/**", "**/.git/**", "**/coverage/**", "**/.next/**", "**/target/**", "**/vendor/**", "**/.smart-coding-cache/**"],
|
|
5
|
+
"smartIndexing": true,
|
|
6
|
+
"chunkSize": 15,
|
|
7
|
+
"chunkOverlap": 3,
|
|
8
|
+
"batchSize": 100,
|
|
9
|
+
"maxFileSize": 1048576,
|
|
10
|
+
"maxResults": 5,
|
|
11
|
+
"enableCache": true,
|
|
12
|
+
"cacheDirectory": "./.smart-coding-cache",
|
|
13
|
+
"watchFiles": true,
|
|
14
|
+
"verbose": false,
|
|
15
|
+
"embeddingModel": "Xenova/all-MiniLM-L6-v2",
|
|
16
|
+
"semanticWeight": 0.7,
|
|
17
|
+
"exactMatchBoost": 1.5
|
|
18
|
+
}
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
export class CacheClearer {
|
|
2
|
+
constructor(embedder, cache, config) {
|
|
3
|
+
this.cache = cache;
|
|
4
|
+
this.config = config;
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
async execute() {
|
|
8
|
+
await this.cache.clear();
|
|
9
|
+
return {
|
|
10
|
+
success: true,
|
|
11
|
+
message: `Cache cleared successfully. Next indexing will be a full rebuild.`,
|
|
12
|
+
cacheDirectory: this.config.cacheDirectory
|
|
13
|
+
};
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export function getToolDefinition() {
|
|
18
|
+
return {
|
|
19
|
+
name: "clear_cache",
|
|
20
|
+
description: "Clears the embeddings cache, forcing a complete reindex on next search or manual index operation. Useful when encountering cache corruption or after major codebase changes.",
|
|
21
|
+
inputSchema: {
|
|
22
|
+
type: "object",
|
|
23
|
+
properties: {}
|
|
24
|
+
}
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export async function handleToolCall(request, cacheClearer) {
|
|
29
|
+
try {
|
|
30
|
+
const result = await cacheClearer.execute();
|
|
31
|
+
return {
|
|
32
|
+
content: [{
|
|
33
|
+
type: "text",
|
|
34
|
+
text: `${result.message}\n\nCache directory: ${result.cacheDirectory}`
|
|
35
|
+
}]
|
|
36
|
+
};
|
|
37
|
+
} catch (error) {
|
|
38
|
+
return {
|
|
39
|
+
content: [{
|
|
40
|
+
type: "text",
|
|
41
|
+
text: `Failed to clear cache: ${error.message}`
|
|
42
|
+
}]
|
|
43
|
+
};
|
|
44
|
+
}
|
|
45
|
+
}
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
import path from "path";
|
|
2
|
+
import { cosineSimilarity } from "../lib/utils.js";
|
|
3
|
+
|
|
4
|
+
export class HybridSearch {
|
|
5
|
+
constructor(embedder, cache, config) {
|
|
6
|
+
this.embedder = embedder;
|
|
7
|
+
this.cache = cache;
|
|
8
|
+
this.config = config;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
async search(query, maxResults) {
|
|
12
|
+
const vectorStore = this.cache.getVectorStore();
|
|
13
|
+
|
|
14
|
+
if (vectorStore.length === 0) {
|
|
15
|
+
return {
|
|
16
|
+
results: [],
|
|
17
|
+
message: "No code has been indexed yet. Please wait for initial indexing to complete."
|
|
18
|
+
};
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
// Generate query embedding
|
|
22
|
+
const queryEmbed = await this.embedder(query, { pooling: "mean", normalize: true });
|
|
23
|
+
const queryVector = Array.from(queryEmbed.data);
|
|
24
|
+
|
|
25
|
+
// Score all chunks
|
|
26
|
+
const scoredChunks = vectorStore.map(chunk => {
|
|
27
|
+
// Semantic similarity
|
|
28
|
+
let score = cosineSimilarity(queryVector, chunk.vector) * this.config.semanticWeight;
|
|
29
|
+
|
|
30
|
+
// Exact match boost
|
|
31
|
+
const lowerQuery = query.toLowerCase();
|
|
32
|
+
const lowerContent = chunk.content.toLowerCase();
|
|
33
|
+
|
|
34
|
+
if (lowerContent.includes(lowerQuery)) {
|
|
35
|
+
score += this.config.exactMatchBoost;
|
|
36
|
+
} else {
|
|
37
|
+
// Partial word matching
|
|
38
|
+
const queryWords = lowerQuery.split(/\s+/);
|
|
39
|
+
const matchedWords = queryWords.filter(word =>
|
|
40
|
+
word.length > 2 && lowerContent.includes(word)
|
|
41
|
+
).length;
|
|
42
|
+
score += (matchedWords / queryWords.length) * 0.3;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
return { ...chunk, score };
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
// Get top results
|
|
49
|
+
const results = scoredChunks
|
|
50
|
+
.sort((a, b) => b.score - a.score)
|
|
51
|
+
.slice(0, maxResults);
|
|
52
|
+
|
|
53
|
+
return { results, message: null };
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
formatResults(results) {
|
|
57
|
+
if (results.length === 0) {
|
|
58
|
+
return "No matching code found for your query.";
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
return results.map((r, idx) => {
|
|
62
|
+
const relPath = path.relative(this.config.searchDirectory, r.file);
|
|
63
|
+
return `## Result ${idx + 1} (Relevance: ${(r.score * 100).toFixed(1)}%)\n` +
|
|
64
|
+
`**File:** \`${relPath}\`\n` +
|
|
65
|
+
`**Lines:** ${r.startLine}-${r.endLine}\n\n` +
|
|
66
|
+
"```" + path.extname(r.file).slice(1) + "\n" +
|
|
67
|
+
r.content + "\n" +
|
|
68
|
+
"```\n";
|
|
69
|
+
}).join("\n");
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// MCP Tool definition for this feature
|
|
74
|
+
export function getToolDefinition(config) {
|
|
75
|
+
return {
|
|
76
|
+
name: "semantic_search",
|
|
77
|
+
description: "Performs intelligent hybrid code search combining semantic understanding with exact text matching. Ideal for finding code by meaning (e.g., 'authentication logic', 'database queries') even with typos or variations. Returns the most relevant code snippets with file locations and line numbers.",
|
|
78
|
+
inputSchema: {
|
|
79
|
+
type: "object",
|
|
80
|
+
properties: {
|
|
81
|
+
query: {
|
|
82
|
+
type: "string",
|
|
83
|
+
description: "Search query - can be natural language (e.g., 'where do we handle user login') or specific terms"
|
|
84
|
+
},
|
|
85
|
+
maxResults: {
|
|
86
|
+
type: "number",
|
|
87
|
+
description: "Maximum number of results to return (default: from config)",
|
|
88
|
+
default: config.maxResults
|
|
89
|
+
}
|
|
90
|
+
},
|
|
91
|
+
required: ["query"]
|
|
92
|
+
}
|
|
93
|
+
};
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Tool handler
|
|
97
|
+
export async function handleToolCall(request, hybridSearch) {
|
|
98
|
+
const query = request.params.arguments.query;
|
|
99
|
+
const maxResults = request.params.arguments.maxResults || hybridSearch.config.maxResults;
|
|
100
|
+
|
|
101
|
+
const { results, message } = await hybridSearch.search(query, maxResults);
|
|
102
|
+
|
|
103
|
+
if (message) {
|
|
104
|
+
return {
|
|
105
|
+
content: [{ type: "text", text: message }]
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const formattedText = hybridSearch.formatResults(results);
|
|
110
|
+
|
|
111
|
+
return {
|
|
112
|
+
content: [{ type: "text", text: formattedText }]
|
|
113
|
+
};
|
|
114
|
+
}
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
import { glob } from "glob";
|
|
2
|
+
import fs from "fs/promises";
|
|
3
|
+
import chokidar from "chokidar";
|
|
4
|
+
import path from "path";
|
|
5
|
+
import { smartChunk, hashContent } from "../lib/utils.js";
|
|
6
|
+
|
|
7
|
+
export class CodebaseIndexer {
|
|
8
|
+
constructor(embedder, cache, config) {
|
|
9
|
+
this.embedder = embedder;
|
|
10
|
+
this.cache = cache;
|
|
11
|
+
this.config = config;
|
|
12
|
+
this.watcher = null;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
async indexFile(file) {
|
|
16
|
+
const fileName = path.basename(file);
|
|
17
|
+
if (this.config.verbose) {
|
|
18
|
+
console.error(`[Indexer] Processing: ${fileName}...`);
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
try {
|
|
22
|
+
// Check file size first
|
|
23
|
+
const stats = await fs.stat(file);
|
|
24
|
+
|
|
25
|
+
// Skip directories
|
|
26
|
+
if (stats.isDirectory()) {
|
|
27
|
+
return 0;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
if (stats.size > this.config.maxFileSize) {
|
|
31
|
+
if (this.config.verbose) {
|
|
32
|
+
console.error(`[Indexer] Skipped ${fileName} (too large: ${(stats.size / 1024 / 1024).toFixed(2)}MB)`);
|
|
33
|
+
}
|
|
34
|
+
return 0;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
const content = await fs.readFile(file, "utf-8");
|
|
38
|
+
const hash = hashContent(content);
|
|
39
|
+
|
|
40
|
+
// Skip if file hasn't changed
|
|
41
|
+
if (this.cache.getFileHash(file) === hash) {
|
|
42
|
+
if (this.config.verbose) {
|
|
43
|
+
console.error(`[Indexer] Skipped ${fileName} (unchanged)`);
|
|
44
|
+
}
|
|
45
|
+
return 0;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
if (this.config.verbose) {
|
|
49
|
+
console.error(`[Indexer] Indexing ${fileName}...`);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Remove old chunks for this file
|
|
53
|
+
this.cache.removeFileFromStore(file);
|
|
54
|
+
|
|
55
|
+
const chunks = smartChunk(content, file, this.config);
|
|
56
|
+
let addedChunks = 0;
|
|
57
|
+
|
|
58
|
+
for (const chunk of chunks) {
|
|
59
|
+
try {
|
|
60
|
+
const output = await this.embedder(chunk.text, { pooling: "mean", normalize: true });
|
|
61
|
+
|
|
62
|
+
this.cache.addToStore({
|
|
63
|
+
file,
|
|
64
|
+
startLine: chunk.startLine,
|
|
65
|
+
endLine: chunk.endLine,
|
|
66
|
+
content: chunk.text,
|
|
67
|
+
vector: Array.from(output.data)
|
|
68
|
+
});
|
|
69
|
+
addedChunks++;
|
|
70
|
+
} catch (embeddingError) {
|
|
71
|
+
console.error(`[Indexer] Failed to embed chunk in ${fileName}:`, embeddingError.message);
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
this.cache.setFileHash(file, hash);
|
|
76
|
+
if (this.config.verbose) {
|
|
77
|
+
console.error(`[Indexer] Completed ${fileName} (${addedChunks} chunks)`);
|
|
78
|
+
}
|
|
79
|
+
return addedChunks;
|
|
80
|
+
} catch (error) {
|
|
81
|
+
console.error(`[Indexer] Error indexing ${fileName}:`, error.message);
|
|
82
|
+
return 0;
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
async indexAll() {
|
|
87
|
+
console.error(`[Indexer] Indexing files in ${this.config.searchDirectory}...`);
|
|
88
|
+
|
|
89
|
+
const pattern = `${this.config.searchDirectory}/**/*.{${this.config.fileExtensions.join(",")}}`;
|
|
90
|
+
const files = await glob(pattern, {
|
|
91
|
+
ignore: this.config.excludePatterns,
|
|
92
|
+
absolute: true
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
console.error(`[Indexer] Found ${files.length} files to process`);
|
|
96
|
+
|
|
97
|
+
let totalChunks = 0;
|
|
98
|
+
let processedFiles = 0;
|
|
99
|
+
let skippedFiles = 0;
|
|
100
|
+
|
|
101
|
+
// Process files in parallel batches for speed
|
|
102
|
+
const BATCH_SIZE = this.config.batchSize || 100;
|
|
103
|
+
|
|
104
|
+
for (let i = 0; i < files.length; i += BATCH_SIZE) {
|
|
105
|
+
const batch = files.slice(i, i + BATCH_SIZE);
|
|
106
|
+
|
|
107
|
+
// Process batch in parallel
|
|
108
|
+
const results = await Promise.all(
|
|
109
|
+
batch.map(file => this.indexFile(file))
|
|
110
|
+
);
|
|
111
|
+
|
|
112
|
+
// Aggregate results
|
|
113
|
+
for (const chunksAdded of results) {
|
|
114
|
+
totalChunks += chunksAdded;
|
|
115
|
+
processedFiles++;
|
|
116
|
+
if (chunksAdded === 0) skippedFiles++;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Progress indicator every 500 files (less console overhead)
|
|
120
|
+
if (processedFiles % 500 === 0 || processedFiles === files.length) {
|
|
121
|
+
console.error(`[Indexer] Progress: ${processedFiles}/${files.length} files processed...`);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
console.error(`[Indexer] Indexed ${totalChunks} code chunks from ${files.length} files (${skippedFiles} unchanged)`);
|
|
126
|
+
await this.cache.save();
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
setupFileWatcher() {
|
|
130
|
+
if (!this.config.watchFiles) return;
|
|
131
|
+
|
|
132
|
+
const pattern = this.config.fileExtensions.map(ext => `**/*.${ext}`);
|
|
133
|
+
|
|
134
|
+
this.watcher = chokidar.watch(pattern, {
|
|
135
|
+
cwd: this.config.searchDirectory,
|
|
136
|
+
ignored: this.config.excludePatterns,
|
|
137
|
+
persistent: true,
|
|
138
|
+
ignoreInitial: true
|
|
139
|
+
});
|
|
140
|
+
|
|
141
|
+
this.watcher
|
|
142
|
+
.on("add", async (filePath) => {
|
|
143
|
+
const fullPath = path.join(this.config.searchDirectory, filePath);
|
|
144
|
+
console.error(`[Indexer] New file detected: ${filePath}`);
|
|
145
|
+
await this.indexFile(fullPath);
|
|
146
|
+
await this.cache.save();
|
|
147
|
+
})
|
|
148
|
+
.on("change", async (filePath) => {
|
|
149
|
+
const fullPath = path.join(this.config.searchDirectory, filePath);
|
|
150
|
+
console.error(`[Indexer] File changed: ${filePath}`);
|
|
151
|
+
await this.indexFile(fullPath);
|
|
152
|
+
await this.cache.save();
|
|
153
|
+
})
|
|
154
|
+
.on("unlink", (filePath) => {
|
|
155
|
+
const fullPath = path.join(this.config.searchDirectory, filePath);
|
|
156
|
+
console.error(`[Indexer] File deleted: ${filePath}`);
|
|
157
|
+
this.cache.removeFileFromStore(fullPath);
|
|
158
|
+
this.cache.deleteFileHash(fullPath);
|
|
159
|
+
this.cache.save();
|
|
160
|
+
});
|
|
161
|
+
|
|
162
|
+
console.error("[Indexer] File watcher enabled for incremental indexing");
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
async initialize() {
|
|
166
|
+
await this.indexAll();
|
|
167
|
+
this.setupFileWatcher();
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
// MCP Tool definition for this feature
|
|
172
|
+
export function getToolDefinition() {
|
|
173
|
+
return {
|
|
174
|
+
name: "index_codebase",
|
|
175
|
+
description: "Manually trigger a full reindex of the codebase. This will scan all files and update the embeddings cache. Useful after large code changes or if the index seems out of date.",
|
|
176
|
+
inputSchema: {
|
|
177
|
+
type: "object",
|
|
178
|
+
properties: {
|
|
179
|
+
force: {
|
|
180
|
+
type: "boolean",
|
|
181
|
+
description: "Force reindex even if files haven't changed",
|
|
182
|
+
default: false
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
};
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
// Tool handler
|
|
190
|
+
export async function handleToolCall(request, indexer) {
|
|
191
|
+
const force = request.params.arguments?.force || false;
|
|
192
|
+
|
|
193
|
+
if (force) {
|
|
194
|
+
// Clear cache to force full reindex
|
|
195
|
+
indexer.cache.setVectorStore([]);
|
|
196
|
+
indexer.cache.fileHashes = new Map();
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
await indexer.indexAll();
|
|
200
|
+
|
|
201
|
+
const vectorStore = indexer.cache.getVectorStore();
|
|
202
|
+
const stats = {
|
|
203
|
+
totalChunks: vectorStore.length,
|
|
204
|
+
totalFiles: new Set(vectorStore.map(v => v.file)).size
|
|
205
|
+
};
|
|
206
|
+
|
|
207
|
+
return {
|
|
208
|
+
content: [{
|
|
209
|
+
type: "text",
|
|
210
|
+
text: `Codebase reindexed successfully.\n\nStatistics:\n- Files indexed: ${stats.totalFiles}\n- Code chunks: ${stats.totalChunks}`
|
|
211
|
+
}]
|
|
212
|
+
};
|
|
213
|
+
}
|