serena-slim 0.0.1-slim.1.2 → 0.0.1-slim.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +11 -10
- package/bin/mcpslim-windows-x64.exe +0 -0
- package/index.js +22 -5
- package/package.json +1 -1
- package/recipes/serena.json +9 -11
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# serena-slim
|
|
2
2
|
|
|
3
|
-
> **Serena MCP server optimized for AI assistants** — Reduce context window tokens by
|
|
3
|
+
> **Serena MCP server optimized for AI assistants** — Reduce context window tokens by ~38% while keeping full functionality. Compatible with Claude, ChatGPT, Gemini, Cursor, and all MCP clients.
|
|
4
4
|
|
|
5
5
|
[](https://www.npmjs.com/package/serena-slim)
|
|
6
6
|
[](https://github.com/mcpslim/mcpslim)
|
|
@@ -14,11 +14,11 @@ A **token-optimized** version of the Serena [Model Context Protocol (MCP)](https
|
|
|
14
14
|
|
|
15
15
|
MCP tool schemas consume significant **context window tokens**. When AI assistants like Claude or ChatGPT load MCP tools, each tool definition takes up valuable context space.
|
|
16
16
|
|
|
17
|
-
The original `serena` loads **29 tools** consuming approximately **~
|
|
17
|
+
The original `serena` loads **29 tools** consuming approximately **~0 tokens** — that's space you could use for actual conversation.
|
|
18
18
|
|
|
19
19
|
### The Solution
|
|
20
20
|
|
|
21
|
-
`serena-slim` intelligently **groups 29 tools into 18 semantic operations**, reducing token usage by **
|
|
21
|
+
`serena-slim` intelligently **groups 29 tools into 18 semantic operations**, reducing token usage by **undefined** — with **zero functionality loss**.
|
|
22
22
|
|
|
23
23
|
Your AI assistant sees fewer, smarter tools. Every original capability remains available.
|
|
24
24
|
|
|
@@ -27,8 +27,6 @@ Your AI assistant sees fewer, smarter tools. Every original capability remains a
|
|
|
27
27
|
| Metric | Original | Slim | Reduction |
|
|
28
28
|
|--------|----------|------|-----------|
|
|
29
29
|
| Tools | 29 | 18 | **-38%** |
|
|
30
|
-
| Schema Tokens | 7,348 | 1,395 | **81.0%** |
|
|
31
|
-
| Claude Code (est.) | ~23,878 | ~11,655 | **~51.2%** |
|
|
32
30
|
|
|
33
31
|
> **Benchmark Info**
|
|
34
32
|
> - Original: `serena@0.0.1`
|
|
@@ -55,8 +53,11 @@ Done! Restart your app to use serena.
|
|
|
55
53
|
### CLI Tools (already have CLI?)
|
|
56
54
|
|
|
57
55
|
```bash
|
|
58
|
-
# Claude Code
|
|
59
|
-
claude mcp add serena -- npx -y serena-slim
|
|
56
|
+
# Claude Code (creates .mcp.json in project root)
|
|
57
|
+
claude mcp add serena -s project -- npx -y serena-slim
|
|
58
|
+
|
|
59
|
+
# Windows: use cmd /c wrapper
|
|
60
|
+
claude mcp add serena -s project -- cmd /c npx -y serena-slim
|
|
60
61
|
|
|
61
62
|
# VS Code (Copilot, Cline, Roo Code)
|
|
62
63
|
code --add-mcp '{"name":"serena","command":"npx","args":["-y","serena-slim"]}'
|
|
@@ -113,7 +114,7 @@ MCPSlim acts as a **transparent bridge** between AI models and the original MCP
|
|
|
113
114
|
│ Without MCPSlim │
|
|
114
115
|
│ │
|
|
115
116
|
│ [AI Model] ──── reads 29 tool schemas ────→ [Original MCP] │
|
|
116
|
-
│ (~
|
|
117
|
+
│ (~0 tokens loaded into context) │
|
|
117
118
|
├─────────────────────────────────────────────────────────────────┤
|
|
118
119
|
│ With MCPSlim │
|
|
119
120
|
│ │
|
|
@@ -121,7 +122,7 @@ MCPSlim acts as a **transparent bridge** between AI models and the original MCP
|
|
|
121
122
|
│ │ │ │ │
|
|
122
123
|
│ Sees 18 grouped Translates to Executes actual │
|
|
123
124
|
│ tools only original call tool & returns │
|
|
124
|
-
│ (~
|
|
125
|
+
│ (~0 tokens) │
|
|
125
126
|
└─────────────────────────────────────────────────────────────────┘
|
|
126
127
|
```
|
|
127
128
|
|
|
@@ -133,7 +134,7 @@ MCPSlim acts as a **transparent bridge** between AI models and the original MCP
|
|
|
133
134
|
4. **Original MCP executes** — Real server processes the request
|
|
134
135
|
5. **Response returned** — Result passes back unchanged
|
|
135
136
|
|
|
136
|
-
**Zero functionality loss.
|
|
137
|
+
**Zero functionality loss. ~38% token savings.**
|
|
137
138
|
|
|
138
139
|
## Available Tool Groups
|
|
139
140
|
|
|
Binary file
|
package/index.js
CHANGED
|
@@ -117,10 +117,11 @@ async function interactiveSetup() {
|
|
|
117
117
|
case '3':
|
|
118
118
|
console.log('\nRun this command:\n');
|
|
119
119
|
if (REQUIRED_ENV_VARS.length > 0) {
|
|
120
|
-
console.log(` claude mcp add ${MCP_NAME} ${envFlags} -- npx -y ${PACKAGE_NAME}\n`);
|
|
120
|
+
console.log(` claude mcp add ${MCP_NAME} -s project ${envFlags} -- npx -y ${PACKAGE_NAME}\n`);
|
|
121
121
|
} else {
|
|
122
|
-
console.log(` claude mcp add ${MCP_NAME} -- npx -y ${PACKAGE_NAME}\n`);
|
|
122
|
+
console.log(` claude mcp add ${MCP_NAME} -s project -- npx -y ${PACKAGE_NAME}\n`);
|
|
123
123
|
}
|
|
124
|
+
console.log(' (Windows: use "cmd /c npx" instead of "npx")\n');
|
|
124
125
|
return true;
|
|
125
126
|
case '4':
|
|
126
127
|
console.log('\nRun this command:\n');
|
|
@@ -137,11 +138,14 @@ function setupClaudeCode() {
|
|
|
137
138
|
// 환경변수 플래그 생성
|
|
138
139
|
const envFlags = REQUIRED_ENV_VARS.map(v => `--env ${v}=<YOUR_${v.split('_').pop()}>`).join(' ');
|
|
139
140
|
|
|
140
|
-
|
|
141
|
+
// Windows에서는 cmd /c wrapper 필요
|
|
142
|
+
const npxCmd = os.platform() === 'win32' ? 'cmd /c npx' : 'npx';
|
|
143
|
+
|
|
144
|
+
let cmd = `claude mcp add ${MCP_NAME} -s project`;
|
|
141
145
|
if (REQUIRED_ENV_VARS.length > 0) {
|
|
142
146
|
cmd += ` ${envFlags}`;
|
|
143
147
|
}
|
|
144
|
-
cmd += ` --
|
|
148
|
+
cmd += ` -- ${npxCmd} -y ${PACKAGE_NAME}`;
|
|
145
149
|
|
|
146
150
|
console.log(`\n🔧 Adding ${MCP_NAME} to Claude Code...\n`);
|
|
147
151
|
console.log(`Running: ${cmd}\n`);
|
|
@@ -230,7 +234,20 @@ if (setupIndex !== -1) {
|
|
|
230
234
|
// Normal Mode: Run MCP server
|
|
231
235
|
// ============================================
|
|
232
236
|
|
|
233
|
-
|
|
237
|
+
// 플랫폼별 바이너리 이름 결정
|
|
238
|
+
function getBinaryName() {
|
|
239
|
+
const platform = os.platform();
|
|
240
|
+
const arch = os.arch();
|
|
241
|
+
if (platform === 'win32') {
|
|
242
|
+
return 'mcpslim-windows-x64.exe';
|
|
243
|
+
} else if (platform === 'darwin') {
|
|
244
|
+
return arch === 'arm64' ? 'mcpslim-darwin-arm64' : 'mcpslim-darwin-x64';
|
|
245
|
+
} else {
|
|
246
|
+
return 'mcpslim-linux-x64';
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
const binName = getBinaryName();
|
|
234
251
|
const mcpslimBin = path.join(__dirname, 'bin', binName);
|
|
235
252
|
const recipePath = path.join(__dirname, 'recipes', 'serena.json');
|
|
236
253
|
|
package/package.json
CHANGED
package/recipes/serena.json
CHANGED
|
@@ -9,7 +9,7 @@
|
|
|
9
9
|
"groups": [
|
|
10
10
|
{
|
|
11
11
|
"name": "read",
|
|
12
|
-
"description": "
|
|
12
|
+
"description": "read operations",
|
|
13
13
|
"mapping": {
|
|
14
14
|
"file": "read_file",
|
|
15
15
|
"memory": "read_memory"
|
|
@@ -51,7 +51,7 @@
|
|
|
51
51
|
},
|
|
52
52
|
{
|
|
53
53
|
"name": "list",
|
|
54
|
-
"description": "
|
|
54
|
+
"description": "list operations",
|
|
55
55
|
"mapping": {
|
|
56
56
|
"dir": "list_dir",
|
|
57
57
|
"memories": "list_memories"
|
|
@@ -90,7 +90,7 @@
|
|
|
90
90
|
},
|
|
91
91
|
{
|
|
92
92
|
"name": "find",
|
|
93
|
-
"description": "
|
|
93
|
+
"description": "find operations",
|
|
94
94
|
"mapping": {
|
|
95
95
|
"file": "find_file",
|
|
96
96
|
"symbol": "find_symbol",
|
|
@@ -161,7 +161,7 @@
|
|
|
161
161
|
},
|
|
162
162
|
{
|
|
163
163
|
"name": "replace",
|
|
164
|
-
"description": "
|
|
164
|
+
"description": "replace operations",
|
|
165
165
|
"mapping": {
|
|
166
166
|
"content": "replace_content",
|
|
167
167
|
"symbol_body": "replace_symbol_body"
|
|
@@ -216,7 +216,7 @@
|
|
|
216
216
|
},
|
|
217
217
|
{
|
|
218
218
|
"name": "get",
|
|
219
|
-
"description": "
|
|
219
|
+
"description": "get operations",
|
|
220
220
|
"mapping": {
|
|
221
221
|
"symbols_overview": "get_symbols_overview",
|
|
222
222
|
"current_config": "get_current_config"
|
|
@@ -251,7 +251,7 @@
|
|
|
251
251
|
},
|
|
252
252
|
{
|
|
253
253
|
"name": "insert",
|
|
254
|
-
"description": "
|
|
254
|
+
"description": "insert operations",
|
|
255
255
|
"mapping": {
|
|
256
256
|
"after_symbol": "insert_after_symbol",
|
|
257
257
|
"before_symbol": "insert_before_symbol"
|
|
@@ -286,7 +286,7 @@
|
|
|
286
286
|
},
|
|
287
287
|
{
|
|
288
288
|
"name": "think",
|
|
289
|
-
"description": "
|
|
289
|
+
"description": "think operations",
|
|
290
290
|
"mapping": {
|
|
291
291
|
"about_collected_information": "think_about_collected_information",
|
|
292
292
|
"about_task_adherence": "think_about_task_adherence",
|
|
@@ -311,7 +311,7 @@
|
|
|
311
311
|
},
|
|
312
312
|
{
|
|
313
313
|
"name": "memory",
|
|
314
|
-
"description": "
|
|
314
|
+
"description": "memory operations",
|
|
315
315
|
"mapping": {
|
|
316
316
|
"write": "write_memory",
|
|
317
317
|
"delete": "delete_memory",
|
|
@@ -374,7 +374,5 @@
|
|
|
374
374
|
"onboarding",
|
|
375
375
|
"prepare_for_new_conversation",
|
|
376
376
|
"initial_instructions"
|
|
377
|
-
]
|
|
378
|
-
"ai_enhanced": true,
|
|
379
|
-
"enhanced_at": "2026-01-05T07:32:33.116Z"
|
|
377
|
+
]
|
|
380
378
|
}
|