context-shuttle-mcp 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +103 -0
- package/index.js +730 -0
- package/package.json +53 -0
- package/smithery.yaml +7 -0
package/README.md
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# Context Shuttle MCP Server
|
|
2
|
+
|
|
3
|
+
这是 **Context Shuttle** 系统的 MCP 服务端组件。它充当 Chrome 插件 / Desktop 应用与 AI IDE (Cursor/Gemini) 之间的桥梁。
|
|
4
|
+
|
|
5
|
+
## 🚀 快速开始
|
|
6
|
+
|
|
7
|
+
### 方式一:使用 npx(推荐)
|
|
8
|
+
|
|
9
|
+
无需安装,直接运行:
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
npx -y context-shuttle-mcp
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
### 方式二:Smithery (Claude Desktop 推荐)
|
|
16
|
+
|
|
17
|
+
如果你使用 Claude Desktop,可以通过 Smithery 自动安装和配置:
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
npx -y @smithery/cli install context-shuttle-mcp --client claude
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
### 方式二:全局安装
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
npm install -g context-shuttle-mcp
|
|
27
|
+
context-shuttle-mcp
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
### 方式三:从源码运行
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
git clone https://github.com/julyCodeGo/context-shuttle.git
|
|
34
|
+
cd context-shuttle/mcp-server
|
|
35
|
+
npm install
|
|
36
|
+
node index.js
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## ⚙️ 在 AI IDE 中配置
|
|
40
|
+
|
|
41
|
+
### Cursor / Windsurf 配置
|
|
42
|
+
|
|
43
|
+
编辑 `~/.cursor/mcp.json` 或 `~/.codeium/windsurf/mcp_config.json`:
|
|
44
|
+
|
|
45
|
+
```json
|
|
46
|
+
{
|
|
47
|
+
"mcpServers": {
|
|
48
|
+
"context-shuttle": {
|
|
49
|
+
"command": "npx",
|
|
50
|
+
"args": ["-y", "context-shuttle-mcp"]
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
### Gemini 配置
|
|
57
|
+
|
|
58
|
+
在 Settings -> Features -> MCP 中添加:
|
|
59
|
+
- **Type**: `command`
|
|
60
|
+
- **Name**: `context-shuttle`
|
|
61
|
+
- **Command**: `npx -y context-shuttle-mcp`
|
|
62
|
+
|
|
63
|
+
## 核心职责
|
|
64
|
+
|
|
65
|
+
1. **HTTP 服务器 (Express)**:
|
|
66
|
+
* 监听 `http://localhost:3333`
|
|
67
|
+
* 接收来自 Chrome 插件或 Desktop 应用的 POST 请求 (`/api/sync`)
|
|
68
|
+
* 将 Base64 图片数据写入本地临时文件系统
|
|
69
|
+
* 维护一个基于文件的 JSON 队列 (`queue.json`)
|
|
70
|
+
|
|
71
|
+
2. **MCP 服务器 (Model Context Protocol)**:
|
|
72
|
+
* 通过标准输入输出 (Stdio) 与 AI IDE 通信
|
|
73
|
+
* 提供 `get_context_queue` 工具,供 AI 读取当前采集到的上下文信息
|
|
74
|
+
* 提供 `list_history_batches` 工具,查看历史批次
|
|
75
|
+
* 提供 `read_batch` 工具,读取指定批次的完整内容
|
|
76
|
+
|
|
77
|
+
## API 接口
|
|
78
|
+
|
|
79
|
+
### `GET /api/status`
|
|
80
|
+
检查服务是否存活,返回当前队列状态。
|
|
81
|
+
|
|
82
|
+
### `POST /api/sync`
|
|
83
|
+
接收从浏览器或桌面端采集的数据批次。
|
|
84
|
+
|
|
85
|
+
**请求体示例**:
|
|
86
|
+
```json
|
|
87
|
+
{
|
|
88
|
+
"batch_title": "登录页 Bug",
|
|
89
|
+
"batch_notes": "按钮颜色不对",
|
|
90
|
+
"items": [
|
|
91
|
+
{
|
|
92
|
+
"type": "image",
|
|
93
|
+
"base64": "data:image/png;base64,...",
|
|
94
|
+
"url": "http://...",
|
|
95
|
+
"title": "Page Title"
|
|
96
|
+
}
|
|
97
|
+
]
|
|
98
|
+
}
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
## 许可证
|
|
102
|
+
|
|
103
|
+
MIT License
|
package/index.js
ADDED
|
@@ -0,0 +1,730 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
const express = require('express');
|
|
3
|
+
const cors = require('cors');
|
|
4
|
+
const fs = require('fs-extra');
|
|
5
|
+
const path = require('path');
|
|
6
|
+
const os = require('os');
|
|
7
|
+
const { v4: uuidv4 } = require('uuid');
|
|
8
|
+
const { Server } = require('@modelcontextprotocol/sdk/server/index.js');
|
|
9
|
+
const { StdioServerTransport } = require('@modelcontextprotocol/sdk/server/stdio.js');
|
|
10
|
+
const {
|
|
11
|
+
CallToolRequestSchema,
|
|
12
|
+
ListToolsRequestSchema,
|
|
13
|
+
ListPromptsRequestSchema,
|
|
14
|
+
GetPromptRequestSchema,
|
|
15
|
+
} = require('@modelcontextprotocol/sdk/types.js');
|
|
16
|
+
|
|
17
|
+
// --- Configuration ---
|
|
18
|
+
// --- Configuration ---
|
|
19
|
+
const PORT = 3333;
|
|
20
|
+
const TMP_DIR = path.join(os.tmpdir(), 'mcp_kopiki_captures');
|
|
21
|
+
const QUEUE_FILE = path.join(TMP_DIR, 'queue.json');
|
|
22
|
+
const BATCHES_DIR = path.join(TMP_DIR, 'batches');
|
|
23
|
+
const ASSETS_DIR = path.join(TMP_DIR, 'assets');
|
|
24
|
+
|
|
25
|
+
// --- Initialization ---
|
|
26
|
+
fs.ensureDirSync(TMP_DIR);
|
|
27
|
+
fs.ensureDirSync(BATCHES_DIR);
|
|
28
|
+
fs.ensureDirSync(ASSETS_DIR);
|
|
29
|
+
|
|
30
|
+
if (!fs.existsSync(QUEUE_FILE)) {
|
|
31
|
+
fs.writeJsonSync(QUEUE_FILE, []); // Stores list of Batch IDs
|
|
32
|
+
}
|
|
33
|
+
console.error(`[MCP Server] Root Dir: ${TMP_DIR}`);
|
|
34
|
+
console.error(`[MCP Server] Batches Dir: ${BATCHES_DIR}`);
|
|
35
|
+
|
|
36
|
+
// --- In-Memory Cache ---
|
|
37
|
+
let queueCache = [];
|
|
38
|
+
|
|
39
|
+
function loadQueueFromCache() {
|
|
40
|
+
try {
|
|
41
|
+
if (fs.existsSync(QUEUE_FILE)) {
|
|
42
|
+
return fs.readJsonSync(QUEUE_FILE);
|
|
43
|
+
}
|
|
44
|
+
} catch (e) {
|
|
45
|
+
console.error('[MCP Server] Failed to load queue from disk:', e);
|
|
46
|
+
}
|
|
47
|
+
return [];
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Initialize Cache
|
|
51
|
+
queueCache = loadQueueFromCache();
|
|
52
|
+
|
|
53
|
+
// --- Initial Cleanup ---
|
|
54
|
+
// Moved here to ensure queueCache is initialized
|
|
55
|
+
cleanupData();
|
|
56
|
+
|
|
57
|
+
// --- Helper Functions ---
|
|
58
|
+
function getQueue() {
|
|
59
|
+
return queueCache;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
function saveQueue(queue) {
|
|
63
|
+
queueCache = queue;
|
|
64
|
+
try {
|
|
65
|
+
fs.writeJsonSync(QUEUE_FILE, queue);
|
|
66
|
+
} catch (e) {
|
|
67
|
+
console.error('[MCP Server] Failed to save queue to disk:', e);
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
function saveBatch(batch) {
|
|
72
|
+
const filepath = path.join(BATCHES_DIR, `batch_${batch.id}.json`);
|
|
73
|
+
fs.writeJsonSync(filepath, batch);
|
|
74
|
+
return filepath;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
function getBatch(batchId) {
|
|
78
|
+
try {
|
|
79
|
+
const filepath = path.join(BATCHES_DIR, `batch_${batchId}.json`);
|
|
80
|
+
if (fs.existsSync(filepath)) {
|
|
81
|
+
return fs.readJsonSync(filepath);
|
|
82
|
+
}
|
|
83
|
+
} catch (e) {
|
|
84
|
+
console.error(`[MCP Server] Failed to load batch ${batchId}:`, e);
|
|
85
|
+
}
|
|
86
|
+
return null;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
function getAllBatches() {
|
|
90
|
+
try {
|
|
91
|
+
const files = fs.readdirSync(BATCHES_DIR).filter(f => f.endsWith('.json'));
|
|
92
|
+
// Sort by time (newest first), assuming filename format batch_{id}.json isn't enough,
|
|
93
|
+
// we might need to read content or rely on file stats.
|
|
94
|
+
// For simplicity, we'll read them to get timestamps.
|
|
95
|
+
const batches = files.map(f => {
|
|
96
|
+
try {
|
|
97
|
+
return fs.readJsonSync(path.join(BATCHES_DIR, f));
|
|
98
|
+
} catch (e) { return null; }
|
|
99
|
+
}).filter(b => b !== null);
|
|
100
|
+
|
|
101
|
+
return batches.sort((a, b) => b.created_at - a.created_at);
|
|
102
|
+
} catch (e) {
|
|
103
|
+
return [];
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
function cleanupData() {
|
|
108
|
+
console.error('[MCP Server] Running cleanup (Policy: Delete processed & >24h old)...');
|
|
109
|
+
const queue = getQueue();
|
|
110
|
+
const now = Date.now();
|
|
111
|
+
const ONE_DAY_MS = 24 * 60 * 60 * 1000;
|
|
112
|
+
|
|
113
|
+
let queueChanged = false;
|
|
114
|
+
const newQueue = [...queue];
|
|
115
|
+
|
|
116
|
+
try {
|
|
117
|
+
if (!fs.existsSync(BATCHES_DIR)) return;
|
|
118
|
+
|
|
119
|
+
const files = fs.readdirSync(BATCHES_DIR).filter(f => f.endsWith('.json'));
|
|
120
|
+
|
|
121
|
+
for (const file of files) {
|
|
122
|
+
const filepath = path.join(BATCHES_DIR, file);
|
|
123
|
+
let batch = null;
|
|
124
|
+
try {
|
|
125
|
+
batch = fs.readJsonSync(filepath);
|
|
126
|
+
} catch (e) {
|
|
127
|
+
// Corrupt file, delete it
|
|
128
|
+
try { fs.unlinkSync(filepath); } catch (e) { }
|
|
129
|
+
continue;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
const isQueued = queue.includes(batch.id);
|
|
133
|
+
const isExpired = (now - batch.created_at) > ONE_DAY_MS;
|
|
134
|
+
|
|
135
|
+
// Delete if:
|
|
136
|
+
// 1. Not in queue (already processed)
|
|
137
|
+
// 2. OR Expired (older than 24h)
|
|
138
|
+
if (!isQueued || isExpired) {
|
|
139
|
+
// Delete associated images
|
|
140
|
+
const contexts = batch.contexts || [{ items: batch.items || [] }];
|
|
141
|
+
for (const context of contexts) {
|
|
142
|
+
if (context.items) {
|
|
143
|
+
for (const item of context.items) {
|
|
144
|
+
if (item.type === 'image' && item.filepath) {
|
|
145
|
+
try {
|
|
146
|
+
if (fs.existsSync(item.filepath)) {
|
|
147
|
+
fs.unlinkSync(item.filepath);
|
|
148
|
+
}
|
|
149
|
+
} catch (e) {
|
|
150
|
+
// Ignore missing images
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
// Delete batch file
|
|
158
|
+
try {
|
|
159
|
+
fs.unlinkSync(filepath);
|
|
160
|
+
console.error(`[MCP Server] Deleted batch: ${batch.title} (ID: ${batch.id}). Reason: ${isExpired ? 'Expired' : 'Already Processed'}`);
|
|
161
|
+
} catch (e) {
|
|
162
|
+
console.error(`[MCP Server] Failed to delete batch file ${filepath}:`, e);
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// If it was queued but expired, remove from queue
|
|
166
|
+
if (isQueued && isExpired) {
|
|
167
|
+
const idx = newQueue.indexOf(batch.id);
|
|
168
|
+
if (idx > -1) {
|
|
169
|
+
newQueue.splice(idx, 1);
|
|
170
|
+
queueChanged = true;
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
if (queueChanged) {
|
|
177
|
+
saveQueue(newQueue);
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
} catch (e) {
|
|
181
|
+
console.error('[MCP Server] Error during cleanup:', e);
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// --- Express App (HTTP Server) ---
|
|
186
|
+
const app = express();
|
|
187
|
+
|
|
188
|
+
// Middleware
|
|
189
|
+
app.use(cors()); // Allow all origins for now
|
|
190
|
+
app.use(express.json({ limit: '50mb' })); // Support large payloads (images)
|
|
191
|
+
|
|
192
|
+
// Routes
|
|
193
|
+
app.get('/api/status', (req, res) => {
|
|
194
|
+
const queue = getQueue();
|
|
195
|
+
res.json({
|
|
196
|
+
status: 'running',
|
|
197
|
+
port: PORT,
|
|
198
|
+
version: '1.0.0',
|
|
199
|
+
queue_length: queue.length,
|
|
200
|
+
tools: [
|
|
201
|
+
{ name: "get_context_queue", description: "获取最新未处理的采集内容。" },
|
|
202
|
+
{ name: "list_history_batches", description: "列出所有历史采集批次。" },
|
|
203
|
+
{ name: "read_batch", description: "读取指定历史批次的完整内容。" },
|
|
204
|
+
{ name: "read_local_file", description: "读取扩展引用的本地文件。" },
|
|
205
|
+
{ name: "clear_context_queue", description: "手动清空待处理队列。" }
|
|
206
|
+
],
|
|
207
|
+
prompts: [
|
|
208
|
+
{ name: "organize_requirements", description: "将采集的需求整理为结构化的 Markdown 规范文档。" },
|
|
209
|
+
{ name: "analyze_bugs", description: "分析采集的 Bug 报告并制定修复计划。" }
|
|
210
|
+
]
|
|
211
|
+
});
|
|
212
|
+
});
|
|
213
|
+
|
|
214
|
+
app.post('/api/sync', async (req, res) => {
|
|
215
|
+
try {
|
|
216
|
+
// Run cleanup before processing new data
|
|
217
|
+
cleanupData();
|
|
218
|
+
|
|
219
|
+
// v1.0: Extract batch info (supports both contexts and items)
|
|
220
|
+
const { contexts, items, batch_id, batch_title, batch_notes, intent } = req.body;
|
|
221
|
+
|
|
222
|
+
const batchId = batch_id || uuidv4();
|
|
223
|
+
const now = Date.now();
|
|
224
|
+
let processedCount = 0;
|
|
225
|
+
let finalContexts = [];
|
|
226
|
+
|
|
227
|
+
if (contexts && Array.isArray(contexts)) {
|
|
228
|
+
// --- New Path: Context-based ---
|
|
229
|
+
for (const ctx of contexts) {
|
|
230
|
+
// Ensure metadata
|
|
231
|
+
if (!ctx.metadata) ctx.metadata = {};
|
|
232
|
+
ctx.source_id = uuidv4();
|
|
233
|
+
|
|
234
|
+
// Process items
|
|
235
|
+
if (ctx.items && Array.isArray(ctx.items)) {
|
|
236
|
+
for (const item of ctx.items) {
|
|
237
|
+
item.id = uuidv4(); // Assign ID
|
|
238
|
+
|
|
239
|
+
if (item.type === 'image' && item.base64) {
|
|
240
|
+
// Save image to disk
|
|
241
|
+
const buffer = Buffer.from(item.base64.replace(/^data:image\/\w+;base64,/, ""), 'base64');
|
|
242
|
+
const filename = `capture_${now}_${item.id}.png`;
|
|
243
|
+
const filepath = path.join(ASSETS_DIR, filename);
|
|
244
|
+
|
|
245
|
+
await fs.writeFile(filepath, buffer);
|
|
246
|
+
|
|
247
|
+
item.uri = `file://${filepath}`;
|
|
248
|
+
item.filepath = filepath;
|
|
249
|
+
delete item.base64; // Remove base64 to save space
|
|
250
|
+
}
|
|
251
|
+
processedCount++;
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
finalContexts = contexts;
|
|
256
|
+
|
|
257
|
+
} else if (items && Array.isArray(items)) {
|
|
258
|
+
// --- Legacy Path: Flat Items (Auto-Group) ---
|
|
259
|
+
const contextsMap = {};
|
|
260
|
+
|
|
261
|
+
for (const item of items) {
|
|
262
|
+
const itemId = uuidv4();
|
|
263
|
+
const url = item.url || 'unknown';
|
|
264
|
+
|
|
265
|
+
// Create Context if not exists
|
|
266
|
+
if (!contextsMap[url]) {
|
|
267
|
+
contextsMap[url] = {
|
|
268
|
+
source_id: uuidv4(),
|
|
269
|
+
metadata: {
|
|
270
|
+
url: item.url,
|
|
271
|
+
title: item.title,
|
|
272
|
+
fixVersion: item.fixVersion
|
|
273
|
+
},
|
|
274
|
+
items: []
|
|
275
|
+
};
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
const batchItem = {
|
|
279
|
+
id: itemId,
|
|
280
|
+
type: item.type,
|
|
281
|
+
notes: item.notes || '',
|
|
282
|
+
};
|
|
283
|
+
|
|
284
|
+
if (item.type === 'image' && item.base64) {
|
|
285
|
+
const buffer = Buffer.from(item.base64.replace(/^data:image\/\w+;base64,/, ""), 'base64');
|
|
286
|
+
const filename = `capture_${now}_${itemId}.png`;
|
|
287
|
+
const filepath = path.join(ASSETS_DIR, filename);
|
|
288
|
+
|
|
289
|
+
await fs.writeFile(filepath, buffer);
|
|
290
|
+
|
|
291
|
+
batchItem.uri = `file://${filepath}`;
|
|
292
|
+
batchItem.filepath = filepath;
|
|
293
|
+
} else if (item.type === 'text') {
|
|
294
|
+
batchItem.content = item.content;
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
contextsMap[url].items.push(batchItem);
|
|
298
|
+
processedCount++;
|
|
299
|
+
}
|
|
300
|
+
finalContexts = Object.values(contextsMap);
|
|
301
|
+
} else {
|
|
302
|
+
return res.status(400).json({ error: 'Invalid format: must provide contexts or items' });
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
const newBatch = {
|
|
306
|
+
id: batchId,
|
|
307
|
+
title: batch_title || 'Untitled Batch',
|
|
308
|
+
notes: batch_notes || '',
|
|
309
|
+
intent: intent || 'requirement',
|
|
310
|
+
created_at: now,
|
|
311
|
+
contexts: finalContexts // New Structure
|
|
312
|
+
};
|
|
313
|
+
|
|
314
|
+
// 1. Save Batch to Disk (Persistence)
|
|
315
|
+
saveBatch(newBatch);
|
|
316
|
+
|
|
317
|
+
// 2. Add Batch ID to Queue (for immediate consumption)
|
|
318
|
+
const currentQueue = getQueue();
|
|
319
|
+
if (!currentQueue.includes(batchId)) {
|
|
320
|
+
currentQueue.push(batchId);
|
|
321
|
+
saveQueue(currentQueue);
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
console.error(`[MCP Server] Saved Batch '${newBatch.title}' (${processedCount} items in ${finalContexts.length} contexts). Queue size: ${currentQueue.length}`);
|
|
325
|
+
res.json({ status: 'success', processed_count: processedCount, message: 'Batch saved and queued' });
|
|
326
|
+
|
|
327
|
+
} catch (error) {
|
|
328
|
+
console.error('[MCP Server] Error processing sync:', error);
|
|
329
|
+
res.status(500).json({ error: 'Internal server error' });
|
|
330
|
+
}
|
|
331
|
+
});
|
|
332
|
+
|
|
333
|
+
app.post('/api/clear', (req, res) => {
|
|
334
|
+
saveQueue([]); // Only clears the queue, not the batches!
|
|
335
|
+
console.error('[MCP Server] Queue cleared via HTTP (Batches remain on disk)');
|
|
336
|
+
res.json({ status: 'success', message: 'Queue cleared' });
|
|
337
|
+
});
|
|
338
|
+
|
|
339
|
+
// Start HTTP Server
|
|
340
|
+
const httpServer = app.listen(PORT, () => {
|
|
341
|
+
console.error(`[MCP Server] HTTP Server listening on port ${PORT}`);
|
|
342
|
+
});
|
|
343
|
+
|
|
344
|
+
// Fix for "frequently disconnects" (ECONNRESET) issue
|
|
345
|
+
// Node.js default keepAliveTimeout is 5s, which often races with Chrome's usage of Keep-Alive connections.
|
|
346
|
+
// We increase it to 60s to ensure the server doesn't close the connection while Chrome might try to reuse it.
|
|
347
|
+
httpServer.keepAliveTimeout = 60000;
|
|
348
|
+
httpServer.headersTimeout = 61000;
|
|
349
|
+
|
|
350
|
+
httpServer.on('error', (e) => {
|
|
351
|
+
if (e.code === 'EADDRINUSE') {
|
|
352
|
+
console.error(`[MCP Server] Port ${PORT} is already in use.`);
|
|
353
|
+
} else {
|
|
354
|
+
console.error('[MCP Server] HTTP Server error:', e);
|
|
355
|
+
}
|
|
356
|
+
});
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
// --- MCP Server (Stdio) ---
|
|
360
|
+
const server = new Server(
|
|
361
|
+
{
|
|
362
|
+
name: "context-shuttle",
|
|
363
|
+
version: "1.0.0",
|
|
364
|
+
},
|
|
365
|
+
{
|
|
366
|
+
capabilities: {
|
|
367
|
+
tools: {},
|
|
368
|
+
prompts: {},
|
|
369
|
+
},
|
|
370
|
+
}
|
|
371
|
+
);
|
|
372
|
+
|
|
373
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
374
|
+
return {
|
|
375
|
+
tools: [
|
|
376
|
+
{
|
|
377
|
+
name: "get_context_queue",
|
|
378
|
+
description: "Retrieve the *latest unprocessed* captured items from the Chrome extension. Use this to get what the user just sent you. Reading this will remove items from the 'pending' queue, but they are saved in history.",
|
|
379
|
+
inputSchema: {
|
|
380
|
+
type: "object",
|
|
381
|
+
properties: {},
|
|
382
|
+
},
|
|
383
|
+
},
|
|
384
|
+
{
|
|
385
|
+
name: "list_history_batches",
|
|
386
|
+
description: "List all historical batches of captured data. Use this to see what the user has captured in the past.",
|
|
387
|
+
inputSchema: {
|
|
388
|
+
type: "object",
|
|
389
|
+
properties: {},
|
|
390
|
+
},
|
|
391
|
+
},
|
|
392
|
+
{
|
|
393
|
+
name: "read_batch",
|
|
394
|
+
description: "Read the full content of a specific historical batch.",
|
|
395
|
+
inputSchema: {
|
|
396
|
+
type: "object",
|
|
397
|
+
properties: {
|
|
398
|
+
batchId: { type: "string", description: "The ID of the batch to read" }
|
|
399
|
+
},
|
|
400
|
+
required: ["batchId"]
|
|
401
|
+
},
|
|
402
|
+
},
|
|
403
|
+
{
|
|
404
|
+
name: "read_local_file",
|
|
405
|
+
description: "Read a local file referenced by the Chrome extension. The file path should be provided in the captured items' metadata.",
|
|
406
|
+
inputSchema: {
|
|
407
|
+
type: "object",
|
|
408
|
+
properties: {
|
|
409
|
+
filePath: {
|
|
410
|
+
type: "string",
|
|
411
|
+
description: "Absolute path to the local file to read"
|
|
412
|
+
}
|
|
413
|
+
},
|
|
414
|
+
required: ["filePath"]
|
|
415
|
+
},
|
|
416
|
+
},
|
|
417
|
+
{
|
|
418
|
+
name: "clear_context_queue",
|
|
419
|
+
description: "Clear the pending queue manually.",
|
|
420
|
+
inputSchema: {
|
|
421
|
+
type: "object",
|
|
422
|
+
properties: {},
|
|
423
|
+
},
|
|
424
|
+
},
|
|
425
|
+
],
|
|
426
|
+
};
|
|
427
|
+
});
|
|
428
|
+
|
|
429
|
+
server.setRequestHandler(ListPromptsRequestSchema, async () => {
|
|
430
|
+
return {
|
|
431
|
+
prompts: [
|
|
432
|
+
{
|
|
433
|
+
name: "organize_requirements",
|
|
434
|
+
description: "Compile captured requirements into a structured Markdown specification document.",
|
|
435
|
+
},
|
|
436
|
+
{
|
|
437
|
+
name: "analyze_bugs",
|
|
438
|
+
description: "Analyze captured bug reports and prepare a fix plan.",
|
|
439
|
+
}
|
|
440
|
+
]
|
|
441
|
+
};
|
|
442
|
+
});
|
|
443
|
+
|
|
444
|
+
server.setRequestHandler(GetPromptRequestSchema, async (request) => {
|
|
445
|
+
const promptName = request.params.name;
|
|
446
|
+
|
|
447
|
+
if (promptName === "organize_requirements") {
|
|
448
|
+
return {
|
|
449
|
+
messages: [
|
|
450
|
+
{
|
|
451
|
+
role: "user",
|
|
452
|
+
content: {
|
|
453
|
+
type: "text",
|
|
454
|
+
text: "Please retrieve the latest captured context using `get_context_queue`. Identify items that are Requirements. Format them into a clean Markdown document titled 'Requirement Specification'. For each item, list:\n1. Title\n2. Source URL\n3. Fix Version\n4. Detailed Description (from content/notes)\n5. Attached Images (if any)."
|
|
455
|
+
}
|
|
456
|
+
}
|
|
457
|
+
]
|
|
458
|
+
};
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
if (promptName === "analyze_bugs") {
|
|
462
|
+
return {
|
|
463
|
+
messages: [
|
|
464
|
+
{
|
|
465
|
+
role: "user",
|
|
466
|
+
content: {
|
|
467
|
+
type: "text",
|
|
468
|
+
text: "Please retrieve the latest captured context using `get_context_queue`. Identify items that are Bugs. For each bug, generate a 'Bug Analysis' section containing:\n1. Bug Title & URL\n2. Fix Version\n3. Reproduction Steps (parsed from content)\n4. Visual Analysis (if images are present)\n5. Suggested Fix Approach (based on your knowledge of the codebase)."
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
]
|
|
472
|
+
};
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
|
|
477
|
+
throw new Error("Prompt not found");
|
|
478
|
+
});
|
|
479
|
+
|
|
480
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
481
|
+
switch (request.params.name) {
|
|
482
|
+
case "get_context_queue": {
|
|
483
|
+
const queueIds = getQueue();
|
|
484
|
+
const content = [];
|
|
485
|
+
|
|
486
|
+
if (queueIds.length === 0) {
|
|
487
|
+
return {
|
|
488
|
+
content: [{ type: "text", text: "The context queue is empty." }],
|
|
489
|
+
};
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
// Load Batches
|
|
493
|
+
for (const batchId of queueIds) {
|
|
494
|
+
const batch = getBatch(batchId);
|
|
495
|
+
if (!batch) continue;
|
|
496
|
+
|
|
497
|
+
// Render Batch
|
|
498
|
+
let batchHeader = `\n=== BATCH: ${batch.title} (ID: ${batch.id}) ===\n`;
|
|
499
|
+
batchHeader += `Intent: ${batch.intent || 'requirement'}\n`;
|
|
500
|
+
if (batch.notes) batchHeader += `Batch Notes: ${batch.notes}\n`;
|
|
501
|
+
batchHeader += `Time: ${new Date(batch.created_at).toLocaleString()}\n`;
|
|
502
|
+
batchHeader += `--------------------------------------------------\n`;
|
|
503
|
+
|
|
504
|
+
content.push({ type: "text", text: batchHeader });
|
|
505
|
+
|
|
506
|
+
// Handle both old (flat) and new (grouped) structures
|
|
507
|
+
const contexts = batch.contexts || [{ metadata: {}, items: batch.items || [] }];
|
|
508
|
+
|
|
509
|
+
for (const context of contexts) {
|
|
510
|
+
if (batch.contexts) {
|
|
511
|
+
// Only render context header if it's the new structure
|
|
512
|
+
let contextHeader = `\n### Source: ${context.metadata.title || 'Unknown'}\n`;
|
|
513
|
+
if (context.metadata.url) contextHeader += `URL: ${context.metadata.url}\n`;
|
|
514
|
+
if (context.metadata.fixVersion) contextHeader += `Fix Version: ${context.metadata.fixVersion}\n`;
|
|
515
|
+
contextHeader += `\n`;
|
|
516
|
+
content.push({ type: "text", text: contextHeader });
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
for (const item of context.items) {
|
|
520
|
+
let itemHeader = `- [${item.type}]`;
|
|
521
|
+
if (item.notes) itemHeader += ` Notes: ${item.notes}`;
|
|
522
|
+
itemHeader += `\n`;
|
|
523
|
+
|
|
524
|
+
// For old structure compatibility
|
|
525
|
+
if (!batch.contexts && item.metadata) {
|
|
526
|
+
if (item.metadata.title) itemHeader += ` Source: ${item.metadata.title}\n`;
|
|
527
|
+
if (item.metadata.url) itemHeader += ` URL: ${item.metadata.url}\n`;
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
content.push({ type: "text", text: itemHeader });
|
|
531
|
+
|
|
532
|
+
if (item.type === 'image' && item.filepath) {
|
|
533
|
+
try {
|
|
534
|
+
content.push({
|
|
535
|
+
type: "image",
|
|
536
|
+
data: fs.readFileSync(item.filepath).toString('base64'),
|
|
537
|
+
mimeType: "image/png"
|
|
538
|
+
});
|
|
539
|
+
} catch (e) {
|
|
540
|
+
content.push({ type: "text", text: ` [Error loading image: ${e.message}]\n` });
|
|
541
|
+
}
|
|
542
|
+
} else if (item.type === 'text') {
|
|
543
|
+
content.push({ type: "text", text: ` Content:\n${item.content}\n` });
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
content.push({ type: "text", text: "\n" });
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
content.push({ type: "text", text: "==================================================\n\n" });
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
// Auto-Clear Queue (but keep files!)
|
|
553
|
+
saveQueue([]);
|
|
554
|
+
console.error(`[MCP Server] Queue processed and cleared. (${queueIds.length} batches archived)`);
|
|
555
|
+
|
|
556
|
+
return { content };
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
case "list_history_batches": {
|
|
560
|
+
const batches = getAllBatches();
|
|
561
|
+
let text = "Here are the historical batches:\n\n";
|
|
562
|
+
batches.forEach(b => {
|
|
563
|
+
let itemCount = 0;
|
|
564
|
+
if (b.contexts) {
|
|
565
|
+
itemCount = b.contexts.reduce((acc, ctx) => acc + (ctx.items ? ctx.items.length : 0), 0);
|
|
566
|
+
} else if (b.items) {
|
|
567
|
+
itemCount = b.items.length;
|
|
568
|
+
}
|
|
569
|
+
text += `- [${new Date(b.created_at).toLocaleString()}] **${b.title}** (ID: \`${b.id}\`) - ${itemCount} items\n`;
|
|
570
|
+
if (b.notes) text += ` Notes: ${b.notes}\n`;
|
|
571
|
+
});
|
|
572
|
+
return { content: [{ type: "text", text }] };
|
|
573
|
+
}
|
|
574
|
+
|
|
575
|
+
case "read_batch": {
|
|
576
|
+
const { batchId } = request.params.arguments;
|
|
577
|
+
const batch = getBatch(batchId);
|
|
578
|
+
if (!batch) {
|
|
579
|
+
return { content: [{ type: "text", text: `Batch not found: ${batchId}` }] };
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
// Reuse rendering logic? For simplicity, just JSON dump or simple text for now.
|
|
583
|
+
// Better to reuse the rendering logic from get_context_queue but without clearing.
|
|
584
|
+
// Let's do a simple render here.
|
|
585
|
+
const content = [];
|
|
586
|
+
content.push({ type: "text", text: `Reading Batch: ${batch.title}\n\n` });
|
|
587
|
+
|
|
588
|
+
// Handle both old (flat) and new (grouped) structures
|
|
589
|
+
const contexts = batch.contexts || [{ metadata: {}, items: batch.items || [] }];
|
|
590
|
+
|
|
591
|
+
for (const context of contexts) {
|
|
592
|
+
if (batch.contexts) {
|
|
593
|
+
let contextHeader = `\n### Source: ${context.metadata.title || 'Unknown'}\n`;
|
|
594
|
+
if (context.metadata.url) contextHeader += `URL: ${context.metadata.url}\n`;
|
|
595
|
+
content.push({ type: "text", text: contextHeader });
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
for (const item of context.items) {
|
|
599
|
+
let itemText = `--- Item (${item.type}) ---\n`;
|
|
600
|
+
if (item.notes) itemText += `Notes: ${item.notes}\n`;
|
|
601
|
+
if (item.content) itemText += `Content: ${item.content.substring(0, 200)}...\n`;
|
|
602
|
+
|
|
603
|
+
// Old structure compatibility
|
|
604
|
+
if (!batch.contexts && item.metadata) {
|
|
605
|
+
if (item.metadata.title) itemText += `Title: ${item.metadata.title}\n`;
|
|
606
|
+
}
|
|
607
|
+
|
|
608
|
+
content.push({ type: "text", text: itemText });
|
|
609
|
+
|
|
610
|
+
if (item.type === 'image' && item.filepath) {
|
|
611
|
+
try {
|
|
612
|
+
content.push({
|
|
613
|
+
type: "image",
|
|
614
|
+
data: fs.readFileSync(item.filepath).toString('base64'),
|
|
615
|
+
mimeType: "image/png"
|
|
616
|
+
});
|
|
617
|
+
} catch (e) { }
|
|
618
|
+
}
|
|
619
|
+
}
|
|
620
|
+
}
|
|
621
|
+
|
|
622
|
+
return { content };
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
case "clear_context_queue": {
|
|
626
|
+
saveQueue([]);
|
|
627
|
+
return {
|
|
628
|
+
content: [{ type: "text", text: `Queue cleared.` }],
|
|
629
|
+
};
|
|
630
|
+
}
|
|
631
|
+
|
|
632
|
+
case "read_local_file": {
|
|
633
|
+
const { filePath } = request.params.arguments;
|
|
634
|
+
|
|
635
|
+
try {
|
|
636
|
+
// 安全检查:确保文件存在
|
|
637
|
+
if (!fs.existsSync(filePath)) {
|
|
638
|
+
return {
|
|
639
|
+
content: [{
|
|
640
|
+
type: "text",
|
|
641
|
+
text: `❌ 文件不存在: ${filePath}`
|
|
642
|
+
}]
|
|
643
|
+
};
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
const stats = fs.statSync(filePath);
|
|
647
|
+
const ext = path.extname(filePath).toLowerCase();
|
|
648
|
+
const fileName = path.basename(filePath);
|
|
649
|
+
|
|
650
|
+
// 图片文件
|
|
651
|
+
const imageExts = ['.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp'];
|
|
652
|
+
if (imageExts.includes(ext)) {
|
|
653
|
+
const buffer = fs.readFileSync(filePath);
|
|
654
|
+
const base64 = buffer.toString('base64');
|
|
655
|
+
const mimeType = ext === '.jpg' || ext === '.jpeg' ? 'image/jpeg' :
|
|
656
|
+
ext === '.png' ? 'image/png' :
|
|
657
|
+
ext === '.gif' ? 'image/gif' :
|
|
658
|
+
ext === '.webp' ? 'image/webp' : 'image/png';
|
|
659
|
+
|
|
660
|
+
return {
|
|
661
|
+
content: [
|
|
662
|
+
{
|
|
663
|
+
type: "text",
|
|
664
|
+
text: `📷 读取图片文件: ${fileName}\n文件大小: ${(stats.size / 1024).toFixed(2)} KB\n修改时间: ${stats.mtime.toLocaleString()}\n\n`
|
|
665
|
+
},
|
|
666
|
+
{
|
|
667
|
+
type: "image",
|
|
668
|
+
data: base64,
|
|
669
|
+
mimeType: mimeType
|
|
670
|
+
}
|
|
671
|
+
]
|
|
672
|
+
};
|
|
673
|
+
}
|
|
674
|
+
|
|
675
|
+
// 文本文件
|
|
676
|
+
const textExts = ['.txt', '.md', '.json', '.js', '.ts', '.html', '.css', '.xml', '.log', '.csv'];
|
|
677
|
+
if (textExts.includes(ext) || stats.size < 1024 * 1024) { // 小于 1MB 尝试作为文本读取
|
|
678
|
+
try {
|
|
679
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
680
|
+
return {
|
|
681
|
+
content: [{
|
|
682
|
+
type: "text",
|
|
683
|
+
text: `📄 读取文本文件: ${fileName}\n文件大小: ${(stats.size / 1024).toFixed(2)} KB\n修改时间: ${stats.mtime.toLocaleString()}\n\n--- 文件内容 ---\n${content}\n--- 结束 ---`
|
|
684
|
+
}]
|
|
685
|
+
};
|
|
686
|
+
} catch (e) {
|
|
687
|
+
return {
|
|
688
|
+
content: [{
|
|
689
|
+
type: "text",
|
|
690
|
+
text: `⚠️ 文件可能不是文本格式或编码不支持: ${fileName}`
|
|
691
|
+
}]
|
|
692
|
+
};
|
|
693
|
+
}
|
|
694
|
+
}
|
|
695
|
+
|
|
696
|
+
// 其他文件类型
|
|
697
|
+
return {
|
|
698
|
+
content: [{
|
|
699
|
+
type: "text",
|
|
700
|
+
text: `ℹ️ 文件信息: ${fileName}\n类型: ${ext || '未知'}\n大小: ${(stats.size / 1024).toFixed(2)} KB\n修改时间: ${stats.mtime.toLocaleString()}\n\n⚠️ 该文件类型暂不支持直接读取。`
|
|
701
|
+
}]
|
|
702
|
+
};
|
|
703
|
+
|
|
704
|
+
} catch (error) {
|
|
705
|
+
console.error(`[MCP Server] Error reading local file ${filePath}:`, error);
|
|
706
|
+
return {
|
|
707
|
+
content: [{
|
|
708
|
+
type: "text",
|
|
709
|
+
text: `❌ 读取文件失败: ${error.message}`
|
|
710
|
+
}]
|
|
711
|
+
};
|
|
712
|
+
}
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
default:
|
|
716
|
+
throw new Error("Unknown tool");
|
|
717
|
+
}
|
|
718
|
+
});
|
|
719
|
+
|
|
720
|
+
// Start MCP Server
|
|
721
|
+
async function runMcp() {
|
|
722
|
+
const transport = new StdioServerTransport();
|
|
723
|
+
await server.connect(transport);
|
|
724
|
+
console.error("[MCP Server] MCP Server connected via Stdio");
|
|
725
|
+
}
|
|
726
|
+
|
|
727
|
+
runMcp().catch((error) => {
|
|
728
|
+
console.error("[MCP Server] Fatal error:", error);
|
|
729
|
+
process.exit(1);
|
|
730
|
+
});
|
package/package.json
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "context-shuttle-mcp",
|
|
3
|
+
"version": "0.0.1",
|
|
4
|
+
"description": "MCP Server for Context Shuttle - Bridge between browser extension and AI IDE (Cursor/Gemini)",
|
|
5
|
+
"files": [
|
|
6
|
+
"index.js",
|
|
7
|
+
"README.md",
|
|
8
|
+
"LICENSE",
|
|
9
|
+
"package.json",
|
|
10
|
+
"smithery.yaml"
|
|
11
|
+
],
|
|
12
|
+
"publishConfig": {
|
|
13
|
+
"access": "public"
|
|
14
|
+
},
|
|
15
|
+
"main": "index.js",
|
|
16
|
+
"bin": {
|
|
17
|
+
"context-shuttle-mcp": "index.js"
|
|
18
|
+
},
|
|
19
|
+
"scripts": {
|
|
20
|
+
"start": "node index.js",
|
|
21
|
+
"test": "echo \"Error: no test specified\" && exit 1"
|
|
22
|
+
},
|
|
23
|
+
"keywords": [
|
|
24
|
+
"mcp",
|
|
25
|
+
"context-shuttle",
|
|
26
|
+
"ai",
|
|
27
|
+
"cursor",
|
|
28
|
+
"gemini",
|
|
29
|
+
"chrome-extension",
|
|
30
|
+
"model-context-protocol"
|
|
31
|
+
],
|
|
32
|
+
"author": "Context Shuttle Team",
|
|
33
|
+
"license": "MIT",
|
|
34
|
+
"repository": {
|
|
35
|
+
"type": "git",
|
|
36
|
+
"url": "git+https://github.com/julyCodeGo/context-shuttle.git"
|
|
37
|
+
},
|
|
38
|
+
"homepage": "https://contextshuttle.com",
|
|
39
|
+
"bugs": {
|
|
40
|
+
"url": "https://github.com/julyCodeGo/context-shuttle/issues"
|
|
41
|
+
},
|
|
42
|
+
"type": "commonjs",
|
|
43
|
+
"dependencies": {
|
|
44
|
+
"@modelcontextprotocol/sdk": "^1.22.0",
|
|
45
|
+
"cors": "^2.8.5",
|
|
46
|
+
"express": "^5.1.0",
|
|
47
|
+
"fs-extra": "^11.3.2",
|
|
48
|
+
"uuid": "^13.0.0"
|
|
49
|
+
},
|
|
50
|
+
"engines": {
|
|
51
|
+
"node": ">=18.0.0"
|
|
52
|
+
}
|
|
53
|
+
}
|