memtrace-mcp-server 1.0.0 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +9 -0
- package/PUBLISH.md +40 -0
- package/README.md +200 -0
- package/index.js +11 -30
- package/package.json +1 -1
package/PUBLISH.md
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# 发布 MCP Server 到 NPM
|
|
2
|
+
|
|
3
|
+
## 发布步骤
|
|
4
|
+
|
|
5
|
+
1. 登录 NPM(如果还没登录):
|
|
6
|
+
```bash
|
|
7
|
+
npm login
|
|
8
|
+
```
|
|
9
|
+
|
|
10
|
+
2. 更新版本号:
|
|
11
|
+
```bash
|
|
12
|
+
# 补丁版本(bug fix)
|
|
13
|
+
npm version patch
|
|
14
|
+
|
|
15
|
+
# 小版本(新功能)
|
|
16
|
+
npm version minor
|
|
17
|
+
|
|
18
|
+
# 大版本(破坏性更新)
|
|
19
|
+
npm version major
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
3. 发布:
|
|
23
|
+
```bash
|
|
24
|
+
npm publish
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## 用户更新方式
|
|
28
|
+
|
|
29
|
+
发布后,用户会自动获取最新版本:
|
|
30
|
+
|
|
31
|
+
### 方式 1:npx -y(推荐,自动最新)
|
|
32
|
+
配置文件中使用 `npx -y memtrace-mcp-server`,每次启动自动使用最新版
|
|
33
|
+
|
|
34
|
+
### 方式 2:重启 Claude Code
|
|
35
|
+
用户只需重启 Claude Code / Cursor,npx 会自动下载最新版
|
|
36
|
+
|
|
37
|
+
### 方式 3:清除缓存(如果需要)
|
|
38
|
+
```bash
|
|
39
|
+
npx clear-npx-cache
|
|
40
|
+
```
|
package/README.md
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
# MemTrace MCP Server
|
|
2
|
+
|
|
3
|
+
MCP (Model Context Protocol) server for [MemTrace](https://github.com/chaitin/memtrace) - an AI context memory service that helps AI assistants remember and retrieve information across conversations.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- 🔍 **Semantic Search**: Find relevant knowledge using natural language queries
|
|
8
|
+
- 💾 **Knowledge Storage**: Store important information, decisions, and insights
|
|
9
|
+
- 🧠 **LLM-Powered Extraction**: Automatically extract structured knowledge from text
|
|
10
|
+
- 🔄 **Context Retrieval**: Get startup context and pull relevant knowledge
|
|
11
|
+
- 🏷️ **Categorization**: Organize knowledge by categories (facts, preferences, decisions, etc.)
|
|
12
|
+
|
|
13
|
+
## Installation
|
|
14
|
+
|
|
15
|
+
### Option 1: npx (Recommended - Always Latest)
|
|
16
|
+
|
|
17
|
+
No installation needed! Just configure your MCP client:
|
|
18
|
+
|
|
19
|
+
```json
|
|
20
|
+
{
|
|
21
|
+
"mcpServers": {
|
|
22
|
+
"memtrace": {
|
|
23
|
+
"command": "npx",
|
|
24
|
+
"args": ["-y", "memtrace-mcp-server"],
|
|
25
|
+
"env": {
|
|
26
|
+
"MEMTRACE_API_URL": "https://your-memtrace-server.com/api/v1",
|
|
27
|
+
"MEMTRACE_TOKEN": "your-token-here"
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
### Option 2: Global Installation
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
npm install -g memtrace-mcp-server
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
Then configure:
|
|
41
|
+
|
|
42
|
+
```json
|
|
43
|
+
{
|
|
44
|
+
"mcpServers": {
|
|
45
|
+
"memtrace": {
|
|
46
|
+
"command": "memtrace-mcp-server",
|
|
47
|
+
"env": {
|
|
48
|
+
"MEMTRACE_API_URL": "https://your-memtrace-server.com/api/v1",
|
|
49
|
+
"MEMTRACE_TOKEN": "your-token-here"
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Configuration
|
|
57
|
+
|
|
58
|
+
### Required Environment Variables
|
|
59
|
+
|
|
60
|
+
- `MEMTRACE_TOKEN`: Your MemTrace API token (get it from your MemTrace dashboard)
|
|
61
|
+
- `MEMTRACE_API_URL`: MemTrace API base URL (default: `http://localhost:3000/api/v1`)
|
|
62
|
+
|
|
63
|
+
### Getting Your API Token
|
|
64
|
+
|
|
65
|
+
1. Log in to your MemTrace instance
|
|
66
|
+
2. Go to Dashboard → Tokens
|
|
67
|
+
3. Create a new MCP token with required permissions
|
|
68
|
+
4. Copy the token and add it to your configuration
|
|
69
|
+
|
|
70
|
+
## Available Tools
|
|
71
|
+
|
|
72
|
+
### search_knowledge
|
|
73
|
+
|
|
74
|
+
Search for relevant knowledge using semantic search.
|
|
75
|
+
|
|
76
|
+
```typescript
|
|
77
|
+
search_knowledge({
|
|
78
|
+
query: "search query here",
|
|
79
|
+
limit: 10 // optional, default: 10
|
|
80
|
+
})
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### push_knowledge
|
|
84
|
+
|
|
85
|
+
Store knowledge by submitting raw content. The server will automatically extract structured knowledge using LLM.
|
|
86
|
+
|
|
87
|
+
**Important**: It's recommended to summarize/extract key points on the client side before pushing for better results.
|
|
88
|
+
|
|
89
|
+
```typescript
|
|
90
|
+
push_knowledge({
|
|
91
|
+
content: "Raw text content containing important information...",
|
|
92
|
+
project_id: "optional-project-id" // optional
|
|
93
|
+
})
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
**Content Size Limits** (per tier):
|
|
97
|
+
- Free: 10,000 characters
|
|
98
|
+
- Pro: 50,000 characters
|
|
99
|
+
- Business: 100,000 characters
|
|
100
|
+
|
|
101
|
+
### pull_knowledge
|
|
102
|
+
|
|
103
|
+
Retrieve recent knowledge items.
|
|
104
|
+
|
|
105
|
+
```typescript
|
|
106
|
+
pull_knowledge({
|
|
107
|
+
limit: 10, // optional, default: 10
|
|
108
|
+
category: "fact" // optional: fact, preference, procedure, decision, insight, other
|
|
109
|
+
})
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
### extract_knowledge
|
|
113
|
+
|
|
114
|
+
Extract knowledge from text/conversation using LLM (preview mode, doesn't save unless `save: true`).
|
|
115
|
+
|
|
116
|
+
```typescript
|
|
117
|
+
extract_knowledge({
|
|
118
|
+
content: "Text to extract knowledge from...",
|
|
119
|
+
save: false, // optional, default: false
|
|
120
|
+
project_id: "optional-project-id" // optional
|
|
121
|
+
})
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### get_startup_context
|
|
125
|
+
|
|
126
|
+
Get startup context with pinned knowledge and recent items for project initialization.
|
|
127
|
+
|
|
128
|
+
```typescript
|
|
129
|
+
get_startup_context({
|
|
130
|
+
project_id: "optional-project-id", // optional
|
|
131
|
+
max_tokens: 4000 // optional, default: 4000
|
|
132
|
+
})
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
## Usage Examples
|
|
136
|
+
|
|
137
|
+
### With Claude Code
|
|
138
|
+
|
|
139
|
+
1. Install Claude Code CLI
|
|
140
|
+
2. Add MemTrace MCP server to `~/.claude/claude_desktop_config.json`:
|
|
141
|
+
|
|
142
|
+
```json
|
|
143
|
+
{
|
|
144
|
+
"mcpServers": {
|
|
145
|
+
"memtrace": {
|
|
146
|
+
"command": "npx",
|
|
147
|
+
"args": ["-y", "memtrace-mcp-server"],
|
|
148
|
+
"env": {
|
|
149
|
+
"MEMTRACE_API_URL": "https://your-memtrace-server.com/api/v1",
|
|
150
|
+
"MEMTRACE_TOKEN": "your-mcp-token-here"
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
3. Restart Claude Code
|
|
158
|
+
|
|
159
|
+
### With Cursor
|
|
160
|
+
|
|
161
|
+
1. Add to Cursor MCP configuration
|
|
162
|
+
2. Restart Cursor
|
|
163
|
+
|
|
164
|
+
## Rate Limits
|
|
165
|
+
|
|
166
|
+
API rate limits vary by subscription tier:
|
|
167
|
+
|
|
168
|
+
| Tier | Daily Limit | Burst (per minute) |
|
|
169
|
+
|------|-------------|-------------------|
|
|
170
|
+
| Free | 50 requests/day | 10 requests/min |
|
|
171
|
+
| Pro | 200 requests/day | 30 requests/min |
|
|
172
|
+
| Business | 1000 requests/day | 100 requests/min |
|
|
173
|
+
|
|
174
|
+
## Troubleshooting
|
|
175
|
+
|
|
176
|
+
### "MEMTRACE_TOKEN environment variable is required"
|
|
177
|
+
|
|
178
|
+
Make sure you've set the `MEMTRACE_TOKEN` in your MCP server configuration.
|
|
179
|
+
|
|
180
|
+
### "Failed to push knowledge"
|
|
181
|
+
|
|
182
|
+
Check:
|
|
183
|
+
1. Your API token is valid
|
|
184
|
+
2. Content doesn't exceed tier limits
|
|
185
|
+
3. Server logs for detailed error messages
|
|
186
|
+
|
|
187
|
+
### Updating to Latest Version
|
|
188
|
+
|
|
189
|
+
If using `npx -y`:
|
|
190
|
+
- Just restart your MCP client (Claude Code / Cursor)
|
|
191
|
+
- It will automatically fetch the latest version
|
|
192
|
+
|
|
193
|
+
If using global installation:
|
|
194
|
+
```bash
|
|
195
|
+
npm update -g memtrace-mcp-server
|
|
196
|
+
```
|
|
197
|
+
|
|
198
|
+
## License
|
|
199
|
+
|
|
200
|
+
MIT
|
package/index.js
CHANGED
|
@@ -79,40 +79,20 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
|
79
79
|
},
|
|
80
80
|
{
|
|
81
81
|
name: 'push_knowledge',
|
|
82
|
-
description: 'Store
|
|
82
|
+
description: 'Store knowledge by submitting raw content. The server will automatically extract structured knowledge using LLM.\n\n**IMPORTANT - Content Size Limits (by tier):**\n- Free tier: 10,000 characters max\n- Pro tier: 50,000 characters max\n- Business tier: 100,000 characters max\n\n**Rate Limits (by tier):**\n- Free: 50 requests/day, 10 requests/min\n- Pro: 200 requests/day, 30 requests/min\n- Business: 1000 requests/day, 100 requests/min\n\n**Best Practices:**\n1. Summarize and extract key points on client side before pushing\n2. Focus on important information worth remembering\n3. Keep content concise to avoid hitting limits\n4. Split large content into multiple logical chunks if needed',
|
|
83
83
|
inputSchema: {
|
|
84
84
|
type: 'object',
|
|
85
85
|
properties: {
|
|
86
|
-
|
|
87
|
-
type: '
|
|
88
|
-
description: '
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
type: 'string',
|
|
94
|
-
description: 'Short descriptive title for the knowledge',
|
|
95
|
-
},
|
|
96
|
-
content: {
|
|
97
|
-
type: 'string',
|
|
98
|
-
description: 'The knowledge content to store',
|
|
99
|
-
},
|
|
100
|
-
category: {
|
|
101
|
-
type: 'string',
|
|
102
|
-
enum: ['fact', 'preference', 'procedure', 'decision', 'insight', 'other'],
|
|
103
|
-
description: 'Category of the knowledge',
|
|
104
|
-
},
|
|
105
|
-
tags: {
|
|
106
|
-
type: 'array',
|
|
107
|
-
items: { type: 'string' },
|
|
108
|
-
description: 'Tags for organizing the knowledge',
|
|
109
|
-
},
|
|
110
|
-
},
|
|
111
|
-
required: ['title', 'content'],
|
|
112
|
-
},
|
|
86
|
+
content: {
|
|
87
|
+
type: 'string',
|
|
88
|
+
description: 'Raw text content to extract knowledge from and store. Must not exceed your tier\'s character limit (Free: 10K, Pro: 50K, Business: 100K). Recommended: summarize key points before submitting to stay well under the limit.',
|
|
89
|
+
},
|
|
90
|
+
project_id: {
|
|
91
|
+
type: 'string',
|
|
92
|
+
description: 'Optional project identifier to associate the knowledge with',
|
|
113
93
|
},
|
|
114
94
|
},
|
|
115
|
-
required: ['
|
|
95
|
+
required: ['content'],
|
|
116
96
|
},
|
|
117
97
|
},
|
|
118
98
|
{
|
|
@@ -176,7 +156,8 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
176
156
|
|
|
177
157
|
case 'push_knowledge': {
|
|
178
158
|
const result = await apiCall('/mcp/knowledge/push', 'POST', {
|
|
179
|
-
|
|
159
|
+
content: args.content,
|
|
160
|
+
project_id: args.project_id,
|
|
180
161
|
});
|
|
181
162
|
return {
|
|
182
163
|
content: [
|