memtrace-mcp-server 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,9 @@
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "mcp__memtrace__pull_knowledge",
5
+ "mcp__memtrace__get_startup_context",
6
+ "mcp__memtrace__search_knowledge"
7
+ ]
8
+ }
9
+ }
package/PUBLISH.md ADDED
@@ -0,0 +1,40 @@
1
+ # 发布 MCP Server 到 NPM
2
+
3
+ ## 发布步骤
4
+
5
+ 1. 登录 NPM(如果还没登录):
6
+ ```bash
7
+ npm login
8
+ ```
9
+
10
+ 2. 更新版本号:
11
+ ```bash
12
+ # 补丁版本(bug fix)
13
+ npm version patch
14
+
15
+ # 小版本(新功能)
16
+ npm version minor
17
+
18
+ # 大版本(破坏性更新)
19
+ npm version major
20
+ ```
21
+
22
+ 3. 发布:
23
+ ```bash
24
+ npm publish
25
+ ```
26
+
27
+ ## 用户更新方式
28
+
29
+ 发布后,用户会自动获取最新版本:
30
+
31
+ ### 方式 1:npx -y(推荐,自动最新)
32
+ 配置文件中使用 `npx -y memtrace-mcp-server`,每次启动自动使用最新版
33
+
34
+ ### 方式 2:重启 Claude Code
35
+ 用户只需重启 Claude Code / Cursor,npx 会自动下载最新版
36
+
37
+ ### 方式 3:清除缓存(如果需要)
38
+ ```bash
39
+ npx clear-npx-cache
40
+ ```
package/README.md ADDED
@@ -0,0 +1,200 @@
1
+ # MemTrace MCP Server
2
+
3
+ MCP (Model Context Protocol) server for [MemTrace](https://github.com/chaitin/memtrace) - an AI context memory service that helps AI assistants remember and retrieve information across conversations.
4
+
5
+ ## Features
6
+
7
+ - 🔍 **Semantic Search**: Find relevant knowledge using natural language queries
8
+ - 💾 **Knowledge Storage**: Store important information, decisions, and insights
9
+ - 🧠 **LLM-Powered Extraction**: Automatically extract structured knowledge from text
10
+ - 🔄 **Context Retrieval**: Get startup context and pull relevant knowledge
11
+ - 🏷️ **Categorization**: Organize knowledge by categories (facts, preferences, decisions, etc.)
12
+
13
+ ## Installation
14
+
15
+ ### Option 1: npx (Recommended - Always Latest)
16
+
17
+ No installation needed! Just configure your MCP client:
18
+
19
+ ```json
20
+ {
21
+ "mcpServers": {
22
+ "memtrace": {
23
+ "command": "npx",
24
+ "args": ["-y", "memtrace-mcp-server"],
25
+ "env": {
26
+ "MEMTRACE_API_URL": "https://your-memtrace-server.com/api/v1",
27
+ "MEMTRACE_TOKEN": "your-token-here"
28
+ }
29
+ }
30
+ }
31
+ }
32
+ ```
33
+
34
+ ### Option 2: Global Installation
35
+
36
+ ```bash
37
+ npm install -g memtrace-mcp-server
38
+ ```
39
+
40
+ Then configure:
41
+
42
+ ```json
43
+ {
44
+ "mcpServers": {
45
+ "memtrace": {
46
+ "command": "memtrace-mcp-server",
47
+ "env": {
48
+ "MEMTRACE_API_URL": "https://your-memtrace-server.com/api/v1",
49
+ "MEMTRACE_TOKEN": "your-token-here"
50
+ }
51
+ }
52
+ }
53
+ }
54
+ ```
55
+
56
+ ## Configuration
57
+
58
+ ### Required Environment Variables
59
+
60
+ - `MEMTRACE_TOKEN`: Your MemTrace API token (get it from your MemTrace dashboard)
61
+ - `MEMTRACE_API_URL`: MemTrace API base URL (default: `http://localhost:3000/api/v1`)
62
+
63
+ ### Getting Your API Token
64
+
65
+ 1. Log in to your MemTrace instance
66
+ 2. Go to Dashboard → Tokens
67
+ 3. Create a new MCP token with required permissions
68
+ 4. Copy the token and add it to your configuration
69
+
70
+ ## Available Tools
71
+
72
+ ### search_knowledge
73
+
74
+ Search for relevant knowledge using semantic search.
75
+
76
+ ```typescript
77
+ search_knowledge({
78
+ query: "search query here",
79
+ limit: 10 // optional, default: 10
80
+ })
81
+ ```
82
+
83
+ ### push_knowledge
84
+
85
+ Store knowledge by submitting raw content. The server will automatically extract structured knowledge using LLM.
86
+
87
+ **Important**: It's recommended to summarize/extract key points on the client side before pushing for better results.
88
+
89
+ ```typescript
90
+ push_knowledge({
91
+ content: "Raw text content containing important information...",
92
+ project_id: "optional-project-id" // optional
93
+ })
94
+ ```
95
+
96
+ **Content Size Limits** (per tier):
97
+ - Free: 10,000 characters
98
+ - Pro: 50,000 characters
99
+ - Business: 100,000 characters
100
+
101
+ ### pull_knowledge
102
+
103
+ Retrieve recent knowledge items.
104
+
105
+ ```typescript
106
+ pull_knowledge({
107
+ limit: 10, // optional, default: 10
108
+ category: "fact" // optional: fact, preference, procedure, decision, insight, other
109
+ })
110
+ ```
111
+
112
+ ### extract_knowledge
113
+
114
+ Extract knowledge from text/conversation using LLM (preview mode, doesn't save unless `save: true`).
115
+
116
+ ```typescript
117
+ extract_knowledge({
118
+ content: "Text to extract knowledge from...",
119
+ save: false, // optional, default: false
120
+ project_id: "optional-project-id" // optional
121
+ })
122
+ ```
123
+
124
+ ### get_startup_context
125
+
126
+ Get startup context with pinned knowledge and recent items for project initialization.
127
+
128
+ ```typescript
129
+ get_startup_context({
130
+ project_id: "optional-project-id", // optional
131
+ max_tokens: 4000 // optional, default: 4000
132
+ })
133
+ ```
134
+
135
+ ## Usage Examples
136
+
137
+ ### With Claude Code
138
+
139
+ 1. Install Claude Code CLI
140
+ 2. Add MemTrace MCP server to `~/.claude/claude_desktop_config.json`:
141
+
142
+ ```json
143
+ {
144
+ "mcpServers": {
145
+ "memtrace": {
146
+ "command": "npx",
147
+ "args": ["-y", "memtrace-mcp-server"],
148
+ "env": {
149
+ "MEMTRACE_API_URL": "https://your-memtrace-server.com/api/v1",
150
+ "MEMTRACE_TOKEN": "your-mcp-token-here"
151
+ }
152
+ }
153
+ }
154
+ }
155
+ ```
156
+
157
+ 3. Restart Claude Code
158
+
159
+ ### With Cursor
160
+
161
+ 1. Add to Cursor MCP configuration
162
+ 2. Restart Cursor
163
+
164
+ ## Rate Limits
165
+
166
+ API rate limits vary by subscription tier:
167
+
168
+ | Tier | Daily Limit | Burst (per minute) |
169
+ |------|-------------|-------------------|
170
+ | Free | 50 requests/day | 10 requests/min |
171
+ | Pro | 200 requests/day | 30 requests/min |
172
+ | Business | 1000 requests/day | 100 requests/min |
173
+
174
+ ## Troubleshooting
175
+
176
+ ### "MEMTRACE_TOKEN environment variable is required"
177
+
178
+ Make sure you've set the `MEMTRACE_TOKEN` in your MCP server configuration.
179
+
180
+ ### "Failed to push knowledge"
181
+
182
+ Check:
183
+ 1. Your API token is valid
184
+ 2. Content doesn't exceed tier limits
185
+ 3. Server logs for detailed error messages
186
+
187
+ ### Updating to Latest Version
188
+
189
+ If using `npx -y`:
190
+ - Just restart your MCP client (Claude Code / Cursor)
191
+ - It will automatically fetch the latest version
192
+
193
+ If using global installation:
194
+ ```bash
195
+ npm update -g memtrace-mcp-server
196
+ ```
197
+
198
+ ## License
199
+
200
+ MIT
package/index.js CHANGED
@@ -79,40 +79,20 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
79
79
  },
80
80
  {
81
81
  name: 'push_knowledge',
82
- description: 'Store new knowledge items for future reference. Use this to save important information, decisions, preferences, or learnings.',
82
+ description: 'Store knowledge by submitting raw content. The server will automatically extract structured knowledge using LLM. Recommended to summarize/extract key points on client side before pushing for better results.',
83
83
  inputSchema: {
84
84
  type: 'object',
85
85
  properties: {
86
- items: {
87
- type: 'array',
88
- description: 'Array of knowledge items to store',
89
- items: {
90
- type: 'object',
91
- properties: {
92
- title: {
93
- type: 'string',
94
- description: 'Short descriptive title for the knowledge',
95
- },
96
- content: {
97
- type: 'string',
98
- description: 'The knowledge content to store',
99
- },
100
- category: {
101
- type: 'string',
102
- enum: ['fact', 'preference', 'procedure', 'decision', 'insight', 'other'],
103
- description: 'Category of the knowledge',
104
- },
105
- tags: {
106
- type: 'array',
107
- items: { type: 'string' },
108
- description: 'Tags for organizing the knowledge',
109
- },
110
- },
111
- required: ['title', 'content'],
112
- },
86
+ content: {
87
+ type: 'string',
88
+ description: 'The raw text content to extract knowledge from and store. Can be conversation history, notes, or any text containing important information.',
89
+ },
90
+ project_id: {
91
+ type: 'string',
92
+ description: 'Optional project identifier to associate the knowledge with',
113
93
  },
114
94
  },
115
- required: ['items'],
95
+ required: ['content'],
116
96
  },
117
97
  },
118
98
  {
@@ -132,6 +112,28 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
132
112
  },
133
113
  },
134
114
  },
115
+ {
116
+ name: 'extract_knowledge',
117
+ description: 'Extract knowledge from text/conversation using LLM. Use this to analyze text and identify key facts, preferences, decisions, and insights that should be remembered.',
118
+ inputSchema: {
119
+ type: 'object',
120
+ properties: {
121
+ content: {
122
+ type: 'string',
123
+ description: 'The text or conversation content to extract knowledge from',
124
+ },
125
+ save: {
126
+ type: 'boolean',
127
+ description: 'If true, save extracted knowledge to the database (default: false)',
128
+ },
129
+ project_id: {
130
+ type: 'string',
131
+ description: 'Optional project identifier to associate with extracted knowledge',
132
+ },
133
+ },
134
+ required: ['content'],
135
+ },
136
+ },
135
137
  {
136
138
  name: 'get_startup_context',
137
139
  description: 'Get startup context including pinned knowledge and recent items. Call this at the beginning of a session.',
@@ -176,7 +178,8 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
176
178
 
177
179
  case 'push_knowledge': {
178
180
  const result = await apiCall('/mcp/knowledge/push', 'POST', {
179
- items: args.items,
181
+ content: args.content,
182
+ project_id: args.project_id,
180
183
  });
181
184
  return {
182
185
  content: [
@@ -203,6 +206,37 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
203
206
  };
204
207
  }
205
208
 
209
+ case 'extract_knowledge': {
210
+ const result = await apiCall('/mcp/knowledge/extract', 'POST', {
211
+ content: args.content,
212
+ save: args.save || false,
213
+ project_id: args.project_id,
214
+ });
215
+
216
+ let responseText = `Extracted ${result.items.length} knowledge item(s):\n\n`;
217
+ for (const item of result.items) {
218
+ responseText += `**${item.title}** (${item.category}, confidence: ${(item.confidence * 100).toFixed(0)}%)\n`;
219
+ responseText += `${item.content}\n`;
220
+ if (item.tags && item.tags.length > 0) {
221
+ responseText += `Tags: ${item.tags.join(', ')}\n`;
222
+ }
223
+ responseText += '\n';
224
+ }
225
+
226
+ if (result.saved) {
227
+ responseText += `\n✓ Knowledge items have been saved to the database.`;
228
+ }
229
+
230
+ return {
231
+ content: [
232
+ {
233
+ type: 'text',
234
+ text: responseText,
235
+ },
236
+ ],
237
+ };
238
+ }
239
+
206
240
  case 'get_startup_context': {
207
241
  const context = await apiCall('/context/startup', 'POST', {
208
242
  project_id: args.project_id,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "memtrace-mcp-server",
3
- "version": "1.0.0",
3
+ "version": "1.0.1",
4
4
  "description": "MCP server for MemTrace - AI context memory service",
5
5
  "main": "index.js",
6
6
  "type": "module",