memtrace-mcp-server 1.0.1 → 1.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.js +2 -55
  2. package/package.json +1 -1
package/index.js CHANGED
@@ -79,13 +79,13 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
79
79
  },
80
80
  {
81
81
  name: 'push_knowledge',
82
- description: 'Store knowledge by submitting raw content. The server will automatically extract structured knowledge using LLM. Recommended to summarize/extract key points on client side before pushing for better results.',
82
+ description: 'Store knowledge by submitting raw content. The server will automatically extract structured knowledge using LLM.\n\n**IMPORTANT - Content Size Limits (by tier):**\n- Free tier: 10,000 characters max\n- Pro tier: 50,000 characters max\n- Business tier: 100,000 characters max\n\n**Rate Limits (by tier):**\n- Free: 50 requests/day, 10 requests/min\n- Pro: 200 requests/day, 30 requests/min\n- Business: 1000 requests/day, 100 requests/min\n\n**Best Practices:**\n1. Summarize and extract key points on client side before pushing\n2. Focus on important information worth remembering\n3. Keep content concise to avoid hitting limits\n4. Split large content into multiple logical chunks if needed',
83
83
  inputSchema: {
84
84
  type: 'object',
85
85
  properties: {
86
86
  content: {
87
87
  type: 'string',
88
- description: 'The raw text content to extract knowledge from and store. Can be conversation history, notes, or any text containing important information.',
88
+ description: 'Raw text content to extract knowledge from and store. Must not exceed your tier\'s character limit (Free: 10K, Pro: 50K, Business: 100K). Recommended: summarize key points before submitting to stay well under the limit.',
89
89
  },
90
90
  project_id: {
91
91
  type: 'string',
@@ -112,28 +112,6 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
112
112
  },
113
113
  },
114
114
  },
115
- {
116
- name: 'extract_knowledge',
117
- description: 'Extract knowledge from text/conversation using LLM. Use this to analyze text and identify key facts, preferences, decisions, and insights that should be remembered.',
118
- inputSchema: {
119
- type: 'object',
120
- properties: {
121
- content: {
122
- type: 'string',
123
- description: 'The text or conversation content to extract knowledge from',
124
- },
125
- save: {
126
- type: 'boolean',
127
- description: 'If true, save extracted knowledge to the database (default: false)',
128
- },
129
- project_id: {
130
- type: 'string',
131
- description: 'Optional project identifier to associate with extracted knowledge',
132
- },
133
- },
134
- required: ['content'],
135
- },
136
- },
137
115
  {
138
116
  name: 'get_startup_context',
139
117
  description: 'Get startup context including pinned knowledge and recent items. Call this at the beginning of a session.',
@@ -206,37 +184,6 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
206
184
  };
207
185
  }
208
186
 
209
- case 'extract_knowledge': {
210
- const result = await apiCall('/mcp/knowledge/extract', 'POST', {
211
- content: args.content,
212
- save: args.save || false,
213
- project_id: args.project_id,
214
- });
215
-
216
- let responseText = `Extracted ${result.items.length} knowledge item(s):\n\n`;
217
- for (const item of result.items) {
218
- responseText += `**${item.title}** (${item.category}, confidence: ${(item.confidence * 100).toFixed(0)}%)\n`;
219
- responseText += `${item.content}\n`;
220
- if (item.tags && item.tags.length > 0) {
221
- responseText += `Tags: ${item.tags.join(', ')}\n`;
222
- }
223
- responseText += '\n';
224
- }
225
-
226
- if (result.saved) {
227
- responseText += `\n✓ Knowledge items have been saved to the database.`;
228
- }
229
-
230
- return {
231
- content: [
232
- {
233
- type: 'text',
234
- text: responseText,
235
- },
236
- ],
237
- };
238
- }
239
-
240
187
  case 'get_startup_context': {
241
188
  const context = await apiCall('/context/startup', 'POST', {
242
189
  project_id: args.project_id,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "memtrace-mcp-server",
3
- "version": "1.0.1",
3
+ "version": "1.0.3",
4
4
  "description": "MCP server for MemTrace - AI context memory service",
5
5
  "main": "index.js",
6
6
  "type": "module",