@xache/mcp-server 0.1.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # @xache/mcp-server
2
2
 
3
- MCP (Model Context Protocol) server for Xache Protocol - collective intelligence, verifiable memory, and reputation for AI agents.
3
+ MCP (Model Context Protocol) server for Xache Protocol - collective intelligence, verifiable memory, extraction, and reputation for AI agents.
4
4
 
5
5
  Works with any MCP-compatible client:
6
6
  - Claude Desktop
@@ -24,12 +24,19 @@ npx @xache/mcp-server
24
24
  ### Environment Variables
25
25
 
26
26
  ```bash
27
+ # Required
27
28
  export XACHE_WALLET_ADDRESS=0x...
28
29
  export XACHE_PRIVATE_KEY=0x...
29
30
 
30
31
  # Optional
31
32
  export XACHE_API_URL=https://api.xache.xyz
32
33
  export XACHE_CHAIN=base # or 'solana'
34
+
35
+ # Optional: Extraction with your own LLM API key (BYOK)
36
+ # Saves cost: $0.002 vs $0.011 with Xache-managed LLM
37
+ export XACHE_LLM_PROVIDER=anthropic # or 'openai'
38
+ export XACHE_LLM_API_KEY=sk-ant-...
39
+ export XACHE_LLM_MODEL=claude-3-5-sonnet-20241022 # optional
33
40
  ```
34
41
 
35
42
  ### Claude Desktop
@@ -44,7 +51,9 @@ Add to your Claude Desktop config (`~/Library/Application Support/Claude/claude_
44
51
  "args": ["@xache/mcp-server"],
45
52
  "env": {
46
53
  "XACHE_WALLET_ADDRESS": "0x...",
47
- "XACHE_PRIVATE_KEY": "0x..."
54
+ "XACHE_PRIVATE_KEY": "0x...",
55
+ "XACHE_LLM_PROVIDER": "anthropic",
56
+ "XACHE_LLM_API_KEY": "sk-ant-..."
48
57
  }
49
58
  }
50
59
  }
@@ -64,7 +73,9 @@ Add to your OpenClaw config:
64
73
  "args": ["@xache/mcp-server"],
65
74
  "env": {
66
75
  "XACHE_WALLET_ADDRESS": "0x...",
67
- "XACHE_PRIVATE_KEY": "0x..."
76
+ "XACHE_PRIVATE_KEY": "0x...",
77
+ "XACHE_LLM_PROVIDER": "anthropic",
78
+ "XACHE_LLM_API_KEY": "sk-ant-..."
68
79
  }
69
80
  }
70
81
  }
@@ -74,61 +85,122 @@ Add to your OpenClaw config:
74
85
 
75
86
  ## Available Tools
76
87
 
77
- ### `xache_collective_contribute`
88
+ ### Collective Intelligence
89
+
90
+ #### `xache_collective_contribute`
78
91
 
79
- Share an insight with the collective intelligence pool.
92
+ Share an insight with the collective intelligence pool. Quality contributions earn reputation.
80
93
 
81
94
  **Parameters:**
82
- - `insight` (required): The insight or pattern to share
95
+ - `pattern` (required): The insight or pattern to share (10-500 chars)
83
96
  - `domain` (required): Domain/topic (e.g., "api-integration", "research")
84
- - `evidence` (optional): Supporting evidence
85
- - `tags` (optional): Categorization tags
97
+ - `tags` (required): Categorization tags (1-10 tags)
98
+ - `successRate` (optional): Success rate of this pattern (0.0-1.0, default: 0.8)
99
+
100
+ #### `xache_collective_query`
101
+
102
+ Query insights from other agents in the collective.
103
+
104
+ **Parameters:**
105
+ - `queryText` (required): What to search for (5-500 chars)
106
+ - `domain` (optional): Filter by domain
107
+ - `limit` (optional): Max results (1-50, default 5)
108
+
109
+ #### `xache_collective_list`
110
+
111
+ List heuristics in the collective intelligence pool.
112
+
113
+ **Parameters:**
114
+ - `domain` (optional): Filter by domain
115
+ - `limit` (optional): Max results (default 20)
116
+
117
+ ### Memory
118
+
119
+ #### `xache_memory_store`
120
+
121
+ Store data with cryptographic receipt. Use for important information that needs verification.
122
+
123
+ **Parameters:**
124
+ - `data` (required): The data object to store
125
+ - `context` (optional): Context/category for organization
126
+ - `tags` (optional): Tags for filtering
127
+ - `tier` (optional): Storage tier - "hot", "warm", or "cold" (default: warm)
128
+
129
+ #### `xache_memory_retrieve`
130
+
131
+ Retrieve a stored memory by its storage key.
132
+
133
+ **Parameters:**
134
+ - `storageKey` (required): The storage key from when the memory was stored
135
+
136
+ #### `xache_memory_list`
137
+
138
+ List your stored memories.
139
+
140
+ **Parameters:**
141
+ - `context` (optional): Filter by context
142
+ - `limit` (optional): Max results (default 20)
143
+
144
+ ### Extraction
145
+
146
+ #### `xache_extract_memories`
147
+
148
+ Extract structured memories from agent traces using LLM. Automatically stores extracted memories.
149
+
150
+ **Pricing:**
151
+ - BYOK mode (your API key): $0.002
152
+ - Xache-managed LLM: $0.011
153
+
154
+ **Parameters:**
155
+ - `trace` (required): The agent trace/conversation to extract from
156
+ - `mode` (optional): "byok" or "xache-managed" (default: byok if API key set)
157
+ - `provider` (optional): "anthropic" or "openai" (default: anthropic)
158
+ - `model` (optional): Specific model to use
159
+ - `contextHint` (optional): Context hint to guide extraction
160
+ - `confidenceThreshold` (optional): Min confidence (0.0-1.0, default: 0.7)
161
+ - `autoStore` (optional): Auto-store extracted memories (default: true)
86
162
 
87
163
  **Example:**
88
164
  ```
89
- Contribute: "Rate limiting with exponential backoff prevents 429 errors"
90
- Domain: "api-integration"
91
- Evidence: "Reduced errors by 95%"
165
+ Extract memories from this coding session and store any useful patterns.
92
166
  ```
93
167
 
94
- ### `xache_collective_query`
168
+ #### `xache_extract_and_contribute`
95
169
 
96
- Query insights from other agents.
170
+ Extract memories AND automatically contribute high-quality heuristics to the collective. Earns reputation for valuable insights.
97
171
 
98
172
  **Parameters:**
99
- - `query` (required): What to search for
100
- - `domain` (optional): Filter by domain
101
- - `limit` (optional): Max results (default 5)
173
+ - `trace` (required): The agent trace to extract from
174
+ - `domain` (required): Domain for contributed heuristics
175
+ - `mode` (optional): "byok" or "xache-managed"
176
+ - `provider` (optional): "anthropic" or "openai"
177
+ - `contributionThreshold` (optional): Min confidence for auto-contribute (default: 0.85)
102
178
 
103
179
  **Example:**
104
180
  ```
105
- Query: "best practices for API error handling"
181
+ Extract insights from this API integration session and contribute any valuable patterns to the collective.
106
182
  Domain: "api-integration"
107
183
  ```
108
184
 
109
- ### `xache_memory_store`
185
+ ### Reputation
110
186
 
111
- Store memory with cryptographic receipt.
112
-
113
- **Parameters:**
114
- - `content` (required): Content to store
115
- - `context` (required): Category/context
116
- - `tags` (optional): Tags for filtering
187
+ #### `xache_check_reputation`
117
188
 
118
- ### `xache_memory_retrieve`
189
+ Check your agent's reputation score. Higher reputation means lower costs and more trust.
119
190
 
120
- Retrieve memories by semantic search.
191
+ **No parameters required.**
121
192
 
122
- **Parameters:**
123
- - `query` (required): Search query
124
- - `context` (optional): Context filter
125
- - `limit` (optional): Max results (default 5)
193
+ Returns:
194
+ - Overall score (0.0-1.0)
195
+ - Level (New, Developing, Established, Trusted, Elite)
196
+ - Breakdown by category
126
197
 
127
- ### `xache_check_reputation`
198
+ #### `xache_leaderboard`
128
199
 
129
- Check your agent's reputation score and ERC-8004 status.
200
+ View top agents by reputation score.
130
201
 
131
- **No parameters required.**
202
+ **Parameters:**
203
+ - `limit` (optional): Number of agents to show (default 10)
132
204
 
133
205
  ## Security
134
206
 
package/dist/index.d.ts CHANGED
@@ -16,5 +16,27 @@
16
16
  * XACHE_PRIVATE_KEY - Private key for signing (stays local, never transmitted)
17
17
  * XACHE_API_URL - API URL (default: https://api.xache.xyz)
18
18
  * XACHE_CHAIN - Chain type: 'base' or 'solana' (default: base)
19
+ *
20
+ * Optional extraction environment variables:
21
+ * For api-key mode (major providers - we know their endpoints):
22
+ * XACHE_LLM_PROVIDER - Provider name:
23
+ * 'anthropic' - api.anthropic.com (Claude models)
24
+ * 'openai' - api.openai.com (GPT models)
25
+ * 'google' - generativelanguage.googleapis.com (Gemini models)
26
+ * 'mistral' - api.mistral.ai (Mistral models)
27
+ * 'groq' - api.groq.com (Fast inference)
28
+ * 'together' - api.together.xyz (Open models)
29
+ * 'fireworks' - api.fireworks.ai (Fast open models)
30
+ * 'cohere' - api.cohere.com (Command models)
31
+ * 'xai' - api.x.ai (Grok models)
32
+ * 'deepseek' - api.deepseek.com (DeepSeek models)
33
+ * XACHE_LLM_API_KEY - Your API key for the provider
34
+ * XACHE_LLM_MODEL - Model to use (optional, uses provider default)
35
+ *
36
+ * For endpoint mode (custom/self-hosted - Ollama, OpenRouter, vLLM, etc.):
37
+ * XACHE_LLM_ENDPOINT - Full URL (e.g., http://localhost:11434/v1/chat/completions)
38
+ * XACHE_LLM_AUTH_TOKEN - Auth token if required (e.g., OpenRouter API key)
39
+ * XACHE_LLM_FORMAT - 'openai' | 'anthropic' | 'cohere' (default: openai)
40
+ * XACHE_LLM_MODEL - Model to use
19
41
  */
20
42
  export {};
package/dist/index.js CHANGED
@@ -16,6 +16,28 @@
16
16
  * XACHE_PRIVATE_KEY - Private key for signing (stays local, never transmitted)
17
17
  * XACHE_API_URL - API URL (default: https://api.xache.xyz)
18
18
  * XACHE_CHAIN - Chain type: 'base' or 'solana' (default: base)
19
+ *
20
+ * Optional extraction environment variables:
21
+ * For api-key mode (major providers - we know their endpoints):
22
+ * XACHE_LLM_PROVIDER - Provider name:
23
+ * 'anthropic' - api.anthropic.com (Claude models)
24
+ * 'openai' - api.openai.com (GPT models)
25
+ * 'google' - generativelanguage.googleapis.com (Gemini models)
26
+ * 'mistral' - api.mistral.ai (Mistral models)
27
+ * 'groq' - api.groq.com (Fast inference)
28
+ * 'together' - api.together.xyz (Open models)
29
+ * 'fireworks' - api.fireworks.ai (Fast open models)
30
+ * 'cohere' - api.cohere.com (Command models)
31
+ * 'xai' - api.x.ai (Grok models)
32
+ * 'deepseek' - api.deepseek.com (DeepSeek models)
33
+ * XACHE_LLM_API_KEY - Your API key for the provider
34
+ * XACHE_LLM_MODEL - Model to use (optional, uses provider default)
35
+ *
36
+ * For endpoint mode (custom/self-hosted - Ollama, OpenRouter, vLLM, etc.):
37
+ * XACHE_LLM_ENDPOINT - Full URL (e.g., http://localhost:11434/v1/chat/completions)
38
+ * XACHE_LLM_AUTH_TOKEN - Auth token if required (e.g., OpenRouter API key)
39
+ * XACHE_LLM_FORMAT - 'openai' | 'anthropic' | 'cohere' (default: openai)
40
+ * XACHE_LLM_MODEL - Model to use
19
41
  */
20
42
  import { Server } from '@modelcontextprotocol/sdk/server/index.js';
21
43
  import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
@@ -25,11 +47,27 @@ import crypto from 'crypto';
25
47
  // =============================================================================
26
48
  // Configuration
27
49
  // =============================================================================
50
+ /**
51
+ * Supported providers for api-key mode
52
+ */
53
+ const SUPPORTED_PROVIDERS = [
54
+ 'anthropic', 'openai', 'google', 'mistral', 'groq',
55
+ 'together', 'fireworks', 'cohere', 'xai', 'deepseek',
56
+ ];
28
57
  const config = {
29
58
  walletAddress: process.env.XACHE_WALLET_ADDRESS || '',
30
59
  privateKey: process.env.XACHE_PRIVATE_KEY || '',
31
60
  apiUrl: process.env.XACHE_API_URL || 'https://api.xache.xyz',
32
61
  chain: process.env.XACHE_CHAIN || 'base',
62
+ // Optional LLM config for extraction
63
+ // api-key mode: major providers (we know their endpoints)
64
+ llmProvider: process.env.XACHE_LLM_PROVIDER,
65
+ llmApiKey: process.env.XACHE_LLM_API_KEY || '',
66
+ llmModel: process.env.XACHE_LLM_MODEL || '',
67
+ // endpoint mode: custom/self-hosted endpoints
68
+ llmEndpoint: process.env.XACHE_LLM_ENDPOINT || '',
69
+ llmAuthToken: process.env.XACHE_LLM_AUTH_TOKEN || '',
70
+ llmFormat: (process.env.XACHE_LLM_FORMAT || 'openai'),
33
71
  };
34
72
  function getDID() {
35
73
  const chainPrefix = config.chain === 'solana' ? 'sol' : 'evm';
@@ -205,6 +243,78 @@ const TOOLS = [
205
243
  required: [],
206
244
  },
207
245
  },
246
+ {
247
+ name: 'xache_extract_memories',
248
+ description: 'Extract structured memories from agent traces using LLM. Automatically stores extracted memories. Pricing: $0.002 with your own API key or endpoint, $0.011 with Xache-managed LLM.',
249
+ inputSchema: {
250
+ type: 'object',
251
+ properties: {
252
+ trace: {
253
+ type: 'string',
254
+ description: 'The agent trace/conversation to extract memories from. Can be a string or JSON object.',
255
+ },
256
+ mode: {
257
+ type: 'string',
258
+ enum: ['api-key', 'endpoint', 'xache-managed'],
259
+ description: 'LLM mode: "api-key" uses major provider (set XACHE_LLM_PROVIDER + XACHE_LLM_API_KEY, $0.002), "endpoint" uses custom URL (set XACHE_LLM_ENDPOINT, $0.002), "xache-managed" uses Xache LLM ($0.011). Default: api-key if API key is set, endpoint if URL is set, otherwise xache-managed.',
260
+ },
261
+ provider: {
262
+ type: 'string',
263
+ enum: ['anthropic', 'openai', 'google', 'mistral', 'groq', 'together', 'fireworks', 'cohere', 'xai', 'deepseek'],
264
+ description: 'LLM provider for api-key mode. Supports: anthropic (Claude), openai (GPT), google (Gemini), mistral, groq, together, fireworks, cohere, xai (Grok), deepseek. Default: anthropic',
265
+ },
266
+ model: {
267
+ type: 'string',
268
+ description: 'Specific model to use (optional, uses provider default)',
269
+ },
270
+ contextHint: {
271
+ type: 'string',
272
+ description: 'Optional context hint to guide extraction (e.g., "coding session", "customer support")',
273
+ },
274
+ confidenceThreshold: {
275
+ type: 'number',
276
+ description: 'Minimum confidence score for extractions (0.0-1.0, default: 0.7)',
277
+ },
278
+ autoStore: {
279
+ type: 'boolean',
280
+ description: 'Whether to automatically store extracted memories (default: true)',
281
+ },
282
+ },
283
+ required: ['trace'],
284
+ },
285
+ },
286
+ {
287
+ name: 'xache_extract_and_contribute',
288
+ description: 'Extract memories from trace AND automatically contribute high-quality heuristics to the collective intelligence pool. Combines extraction + contribution in one call. Earns reputation for valuable insights.',
289
+ inputSchema: {
290
+ type: 'object',
291
+ properties: {
292
+ trace: {
293
+ type: 'string',
294
+ description: 'The agent trace/conversation to extract from',
295
+ },
296
+ domain: {
297
+ type: 'string',
298
+ description: 'Domain for contributed heuristics (e.g., "coding", "research", "api-integration")',
299
+ },
300
+ mode: {
301
+ type: 'string',
302
+ enum: ['api-key', 'endpoint', 'xache-managed'],
303
+ description: 'LLM mode for extraction',
304
+ },
305
+ provider: {
306
+ type: 'string',
307
+ enum: ['anthropic', 'openai', 'google', 'mistral', 'groq', 'together', 'fireworks', 'cohere', 'xai', 'deepseek'],
308
+ description: 'LLM provider for api-key mode (default: anthropic)',
309
+ },
310
+ contributionThreshold: {
311
+ type: 'number',
312
+ description: 'Minimum confidence for auto-contributing to collective (0.0-1.0, default: 0.85)',
313
+ },
314
+ },
315
+ required: ['trace', 'domain'],
316
+ },
317
+ },
208
318
  ];
209
319
  // =============================================================================
210
320
  // Tool Handlers
@@ -334,6 +444,190 @@ async function handleLeaderboard(client, args) {
334
444
  }
335
445
  return output;
336
446
  }
447
+ async function handleExtractMemories(client, args) {
448
+ // Determine mode based on config and args
449
+ // Priority: explicit mode arg > endpoint config > api-key config > xache-managed
450
+ let mode = args.mode;
451
+ if (!mode) {
452
+ if (config.llmEndpoint) {
453
+ mode = 'endpoint';
454
+ }
455
+ else if (config.llmApiKey && config.llmProvider) {
456
+ mode = 'api-key';
457
+ }
458
+ else {
459
+ mode = 'xache-managed';
460
+ }
461
+ }
462
+ const provider = args.provider || config.llmProvider || 'anthropic';
463
+ const model = args.model || config.llmModel || undefined;
464
+ // Build LLM config based on mode (properly typed for discriminated union)
465
+ let llmConfig;
466
+ let modeDescription;
467
+ if (mode === 'api-key') {
468
+ if (!config.llmApiKey) {
469
+ throw new Error('api-key mode requires XACHE_LLM_API_KEY environment variable. Set it or use mode="xache-managed".');
470
+ }
471
+ if (!SUPPORTED_PROVIDERS.includes(provider)) {
472
+ throw new Error(`Unsupported provider: ${provider}. Supported: ${SUPPORTED_PROVIDERS.join(', ')}`);
473
+ }
474
+ llmConfig = {
475
+ type: 'api-key',
476
+ provider,
477
+ apiKey: config.llmApiKey,
478
+ model,
479
+ };
480
+ modeDescription = `api-key (${provider})`;
481
+ }
482
+ else if (mode === 'endpoint') {
483
+ if (!config.llmEndpoint) {
484
+ throw new Error('endpoint mode requires XACHE_LLM_ENDPOINT environment variable.');
485
+ }
486
+ llmConfig = {
487
+ type: 'endpoint',
488
+ url: config.llmEndpoint,
489
+ authToken: config.llmAuthToken || undefined,
490
+ format: config.llmFormat,
491
+ model,
492
+ };
493
+ modeDescription = `endpoint (${config.llmEndpoint.substring(0, 40)}...)`;
494
+ }
495
+ else {
496
+ llmConfig = {
497
+ type: 'xache-managed',
498
+ provider: provider === 'anthropic' || provider === 'openai' ? provider : 'anthropic',
499
+ model,
500
+ };
501
+ modeDescription = `xache-managed (${provider})`;
502
+ }
503
+ const result = await client.extraction.extract({
504
+ trace: args.trace,
505
+ llmConfig,
506
+ options: {
507
+ contextHint: args.contextHint,
508
+ confidenceThreshold: args.confidenceThreshold ?? 0.7,
509
+ autoStore: args.autoStore ?? true,
510
+ },
511
+ });
512
+ const extractions = result.extractions || [];
513
+ if (extractions.length === 0) {
514
+ return 'No memories extracted from trace.';
515
+ }
516
+ let output = `Extracted ${extractions.length} memories:\n`;
517
+ output += `Mode: ${modeDescription}\n`;
518
+ for (let i = 0; i < extractions.length; i++) {
519
+ const mem = extractions[i];
520
+ const dataStr = JSON.stringify(mem.data).substring(0, 150);
521
+ output += `\n${i + 1}. [${mem.type}] ${dataStr}`;
522
+ output += `\n Confidence: ${((mem.confidence || 0) * 100).toFixed(0)}%`;
523
+ if (mem.reasoning)
524
+ output += ` | ${mem.reasoning.substring(0, 50)}`;
525
+ }
526
+ if (result.stored && result.stored.length > 0) {
527
+ output += `\n\nAuto-stored ${result.stored.length} memories.`;
528
+ }
529
+ return output;
530
+ }
531
+ async function handleExtractAndContribute(client, args) {
532
+ // Determine mode based on config and args
533
+ let mode = args.mode;
534
+ if (!mode) {
535
+ if (config.llmEndpoint) {
536
+ mode = 'endpoint';
537
+ }
538
+ else if (config.llmApiKey && config.llmProvider) {
539
+ mode = 'api-key';
540
+ }
541
+ else {
542
+ mode = 'xache-managed';
543
+ }
544
+ }
545
+ const provider = args.provider || config.llmProvider || 'anthropic';
546
+ const model = config.llmModel || undefined;
547
+ // Build LLM config based on mode
548
+ let llmConfig;
549
+ if (mode === 'api-key') {
550
+ if (!config.llmApiKey) {
551
+ throw new Error('api-key mode requires XACHE_LLM_API_KEY environment variable.');
552
+ }
553
+ llmConfig = {
554
+ type: 'api-key',
555
+ provider,
556
+ apiKey: config.llmApiKey,
557
+ model,
558
+ };
559
+ }
560
+ else if (mode === 'endpoint') {
561
+ if (!config.llmEndpoint) {
562
+ throw new Error('endpoint mode requires XACHE_LLM_ENDPOINT environment variable.');
563
+ }
564
+ llmConfig = {
565
+ type: 'endpoint',
566
+ url: config.llmEndpoint,
567
+ authToken: config.llmAuthToken || undefined,
568
+ format: config.llmFormat,
569
+ model,
570
+ };
571
+ }
572
+ else {
573
+ llmConfig = {
574
+ type: 'xache-managed',
575
+ provider: provider === 'anthropic' || provider === 'openai' ? provider : 'anthropic',
576
+ model,
577
+ };
578
+ }
579
+ const threshold = args.contributionThreshold ?? 0.85;
580
+ const result = await client.extraction.extract({
581
+ trace: args.trace,
582
+ llmConfig,
583
+ options: {
584
+ confidenceThreshold: 0.7,
585
+ autoStore: true,
586
+ },
587
+ });
588
+ const extractions = result.extractions || [];
589
+ let output = `Extracted ${extractions.length} memories.\n`;
590
+ // Find high-quality heuristics to contribute
591
+ const heuristicTypes = ['DOMAIN_HEURISTIC', 'SUCCESSFUL_PATTERN', 'ERROR_FIX'];
592
+ const contributions = [];
593
+ for (const mem of extractions) {
594
+ if (heuristicTypes.includes(mem.type) &&
595
+ (mem.confidence || 0) >= threshold) {
596
+ try {
597
+ // Use reasoning as the pattern content
598
+ const patternContent = mem.reasoning || JSON.stringify(mem.data);
599
+ const patternHash = hashPattern(patternContent);
600
+ const contribResult = await client.collective.contribute({
601
+ pattern: patternContent,
602
+ patternHash,
603
+ domain: args.domain,
604
+ tags: [mem.type.toLowerCase().replace('_', '-'), args.domain],
605
+ metrics: {
606
+ successRate: mem.confidence || 0.85,
607
+ sampleSize: 1,
608
+ confidence: mem.confidence || 0.85,
609
+ },
610
+ encryptedContentRef: patternHash,
611
+ });
612
+ contributions.push(`${patternContent.substring(0, 50)}... → ${contribResult.heuristicId}`);
613
+ }
614
+ catch (e) {
615
+ // Skip contribution errors, continue with others
616
+ }
617
+ }
618
+ }
619
+ if (contributions.length > 0) {
620
+ output += `\nContributed ${contributions.length} heuristics to collective:\n`;
621
+ for (const c of contributions) {
622
+ output += ` • ${c}\n`;
623
+ }
624
+ output += `\nThese contributions earn reputation!`;
625
+ }
626
+ else {
627
+ output += `\nNo heuristics met the contribution threshold (${(threshold * 100).toFixed(0)}%).`;
628
+ }
629
+ return output;
630
+ }
337
631
  // =============================================================================
338
632
  // Server Setup
339
633
  // =============================================================================
@@ -341,7 +635,7 @@ async function main() {
341
635
  validateConfig();
342
636
  const server = new Server({
343
637
  name: 'xache-mcp-server',
344
- version: '0.1.0',
638
+ version: '0.2.0',
345
639
  }, {
346
640
  capabilities: {
347
641
  tools: {},
@@ -387,6 +681,12 @@ async function main() {
387
681
  case 'xache_leaderboard':
388
682
  result = await handleLeaderboard(client, args);
389
683
  break;
684
+ case 'xache_extract_memories':
685
+ result = await handleExtractMemories(client, args);
686
+ break;
687
+ case 'xache_extract_and_contribute':
688
+ result = await handleExtractAndContribute(client, args);
689
+ break;
390
690
  default:
391
691
  throw new Error(`Unknown tool: ${name}`);
392
692
  }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@xache/mcp-server",
3
- "version": "0.1.0",
4
- "description": "MCP server for Xache Protocol - collective intelligence, verifiable memory, and reputation for AI agents",
3
+ "version": "0.3.0",
4
+ "description": "MCP server for Xache Protocol - collective intelligence, verifiable memory, extraction, and reputation for AI agents",
5
5
  "main": "dist/index.js",
6
6
  "bin": {
7
7
  "xache-mcp": "./dist/index.js"
@@ -19,6 +19,7 @@
19
19
  "agents",
20
20
  "collective-intelligence",
21
21
  "memory",
22
+ "extraction",
22
23
  "blockchain",
23
24
  "receipts",
24
25
  "reputation"
@@ -32,7 +33,7 @@
32
33
  "homepage": "https://xache.xyz",
33
34
  "dependencies": {
34
35
  "@modelcontextprotocol/sdk": "^1.0.0",
35
- "@xache/sdk": "^5.1.0",
36
+ "@xache/sdk": "workspace:*",
36
37
  "zod": "^3.22.0"
37
38
  },
38
39
  "devDependencies": {
package/src/index.ts CHANGED
@@ -16,6 +16,28 @@
16
16
  * XACHE_PRIVATE_KEY - Private key for signing (stays local, never transmitted)
17
17
  * XACHE_API_URL - API URL (default: https://api.xache.xyz)
18
18
  * XACHE_CHAIN - Chain type: 'base' or 'solana' (default: base)
19
+ *
20
+ * Optional extraction environment variables:
21
+ * For api-key mode (major providers - we know their endpoints):
22
+ * XACHE_LLM_PROVIDER - Provider name:
23
+ * 'anthropic' - api.anthropic.com (Claude models)
24
+ * 'openai' - api.openai.com (GPT models)
25
+ * 'google' - generativelanguage.googleapis.com (Gemini models)
26
+ * 'mistral' - api.mistral.ai (Mistral models)
27
+ * 'groq' - api.groq.com (Fast inference)
28
+ * 'together' - api.together.xyz (Open models)
29
+ * 'fireworks' - api.fireworks.ai (Fast open models)
30
+ * 'cohere' - api.cohere.com (Command models)
31
+ * 'xai' - api.x.ai (Grok models)
32
+ * 'deepseek' - api.deepseek.com (DeepSeek models)
33
+ * XACHE_LLM_API_KEY - Your API key for the provider
34
+ * XACHE_LLM_MODEL - Model to use (optional, uses provider default)
35
+ *
36
+ * For endpoint mode (custom/self-hosted - Ollama, OpenRouter, vLLM, etc.):
37
+ * XACHE_LLM_ENDPOINT - Full URL (e.g., http://localhost:11434/v1/chat/completions)
38
+ * XACHE_LLM_AUTH_TOKEN - Auth token if required (e.g., OpenRouter API key)
39
+ * XACHE_LLM_FORMAT - 'openai' | 'anthropic' | 'cohere' (default: openai)
40
+ * XACHE_LLM_MODEL - Model to use
19
41
  */
20
42
 
21
43
  import { Server } from '@modelcontextprotocol/sdk/server/index.js';
@@ -25,18 +47,35 @@ import {
25
47
  ListToolsRequestSchema,
26
48
  Tool,
27
49
  } from '@modelcontextprotocol/sdk/types.js';
28
- import { XacheClient, type DID } from '@xache/sdk';
50
+ import { XacheClient, type DID, type LLMProvider, type LLMApiFormat } from '@xache/sdk';
29
51
  import crypto from 'crypto';
30
52
 
31
53
  // =============================================================================
32
54
  // Configuration
33
55
  // =============================================================================
34
56
 
57
+ /**
58
+ * Supported providers for api-key mode
59
+ */
60
+ const SUPPORTED_PROVIDERS: LLMProvider[] = [
61
+ 'anthropic', 'openai', 'google', 'mistral', 'groq',
62
+ 'together', 'fireworks', 'cohere', 'xai', 'deepseek',
63
+ ];
64
+
35
65
  const config = {
36
66
  walletAddress: process.env.XACHE_WALLET_ADDRESS || '',
37
67
  privateKey: process.env.XACHE_PRIVATE_KEY || '',
38
68
  apiUrl: process.env.XACHE_API_URL || 'https://api.xache.xyz',
39
69
  chain: process.env.XACHE_CHAIN || 'base',
70
+ // Optional LLM config for extraction
71
+ // api-key mode: major providers (we know their endpoints)
72
+ llmProvider: process.env.XACHE_LLM_PROVIDER as LLMProvider | undefined,
73
+ llmApiKey: process.env.XACHE_LLM_API_KEY || '',
74
+ llmModel: process.env.XACHE_LLM_MODEL || '',
75
+ // endpoint mode: custom/self-hosted endpoints
76
+ llmEndpoint: process.env.XACHE_LLM_ENDPOINT || '',
77
+ llmAuthToken: process.env.XACHE_LLM_AUTH_TOKEN || '',
78
+ llmFormat: (process.env.XACHE_LLM_FORMAT || 'openai') as LLMApiFormat,
40
79
  };
41
80
 
42
81
  function getDID(): DID {
@@ -223,6 +262,82 @@ const TOOLS: Tool[] = [
223
262
  required: [],
224
263
  },
225
264
  },
265
+ {
266
+ name: 'xache_extract_memories',
267
+ description:
268
+ 'Extract structured memories from agent traces using LLM. Automatically stores extracted memories. Pricing: $0.002 with your own API key or endpoint, $0.011 with Xache-managed LLM.',
269
+ inputSchema: {
270
+ type: 'object',
271
+ properties: {
272
+ trace: {
273
+ type: 'string',
274
+ description:
275
+ 'The agent trace/conversation to extract memories from. Can be a string or JSON object.',
276
+ },
277
+ mode: {
278
+ type: 'string',
279
+ enum: ['api-key', 'endpoint', 'xache-managed'],
280
+ description:
281
+ 'LLM mode: "api-key" uses major provider (set XACHE_LLM_PROVIDER + XACHE_LLM_API_KEY, $0.002), "endpoint" uses custom URL (set XACHE_LLM_ENDPOINT, $0.002), "xache-managed" uses Xache LLM ($0.011). Default: api-key if API key is set, endpoint if URL is set, otherwise xache-managed.',
282
+ },
283
+ provider: {
284
+ type: 'string',
285
+ enum: ['anthropic', 'openai', 'google', 'mistral', 'groq', 'together', 'fireworks', 'cohere', 'xai', 'deepseek'],
286
+ description: 'LLM provider for api-key mode. Supports: anthropic (Claude), openai (GPT), google (Gemini), mistral, groq, together, fireworks, cohere, xai (Grok), deepseek. Default: anthropic',
287
+ },
288
+ model: {
289
+ type: 'string',
290
+ description: 'Specific model to use (optional, uses provider default)',
291
+ },
292
+ contextHint: {
293
+ type: 'string',
294
+ description: 'Optional context hint to guide extraction (e.g., "coding session", "customer support")',
295
+ },
296
+ confidenceThreshold: {
297
+ type: 'number',
298
+ description: 'Minimum confidence score for extractions (0.0-1.0, default: 0.7)',
299
+ },
300
+ autoStore: {
301
+ type: 'boolean',
302
+ description: 'Whether to automatically store extracted memories (default: true)',
303
+ },
304
+ },
305
+ required: ['trace'],
306
+ },
307
+ },
308
+ {
309
+ name: 'xache_extract_and_contribute',
310
+ description:
311
+ 'Extract memories from trace AND automatically contribute high-quality heuristics to the collective intelligence pool. Combines extraction + contribution in one call. Earns reputation for valuable insights.',
312
+ inputSchema: {
313
+ type: 'object',
314
+ properties: {
315
+ trace: {
316
+ type: 'string',
317
+ description: 'The agent trace/conversation to extract from',
318
+ },
319
+ domain: {
320
+ type: 'string',
321
+ description: 'Domain for contributed heuristics (e.g., "coding", "research", "api-integration")',
322
+ },
323
+ mode: {
324
+ type: 'string',
325
+ enum: ['api-key', 'endpoint', 'xache-managed'],
326
+ description: 'LLM mode for extraction',
327
+ },
328
+ provider: {
329
+ type: 'string',
330
+ enum: ['anthropic', 'openai', 'google', 'mistral', 'groq', 'together', 'fireworks', 'cohere', 'xai', 'deepseek'],
331
+ description: 'LLM provider for api-key mode (default: anthropic)',
332
+ },
333
+ contributionThreshold: {
334
+ type: 'number',
335
+ description: 'Minimum confidence for auto-contributing to collective (0.0-1.0, default: 0.85)',
336
+ },
337
+ },
338
+ required: ['trace', 'domain'],
339
+ },
340
+ },
226
341
  ];
227
342
 
228
343
  // =============================================================================
@@ -407,6 +522,224 @@ async function handleLeaderboard(
407
522
  return output;
408
523
  }
409
524
 
525
+ async function handleExtractMemories(
526
+ client: XacheClient,
527
+ args: {
528
+ trace: string;
529
+ mode?: 'api-key' | 'endpoint' | 'xache-managed';
530
+ provider?: LLMProvider;
531
+ model?: string;
532
+ contextHint?: string;
533
+ confidenceThreshold?: number;
534
+ autoStore?: boolean;
535
+ }
536
+ ): Promise<string> {
537
+ // Determine mode based on config and args
538
+ // Priority: explicit mode arg > endpoint config > api-key config > xache-managed
539
+ let mode = args.mode;
540
+ if (!mode) {
541
+ if (config.llmEndpoint) {
542
+ mode = 'endpoint';
543
+ } else if (config.llmApiKey && config.llmProvider) {
544
+ mode = 'api-key';
545
+ } else {
546
+ mode = 'xache-managed';
547
+ }
548
+ }
549
+
550
+ const provider = args.provider || config.llmProvider || 'anthropic';
551
+ const model = args.model || config.llmModel || undefined;
552
+
553
+ // Build LLM config based on mode (properly typed for discriminated union)
554
+ let llmConfig;
555
+ let modeDescription: string;
556
+
557
+ if (mode === 'api-key') {
558
+ if (!config.llmApiKey) {
559
+ throw new Error('api-key mode requires XACHE_LLM_API_KEY environment variable. Set it or use mode="xache-managed".');
560
+ }
561
+ if (!SUPPORTED_PROVIDERS.includes(provider)) {
562
+ throw new Error(`Unsupported provider: ${provider}. Supported: ${SUPPORTED_PROVIDERS.join(', ')}`);
563
+ }
564
+ llmConfig = {
565
+ type: 'api-key' as const,
566
+ provider,
567
+ apiKey: config.llmApiKey,
568
+ model,
569
+ };
570
+ modeDescription = `api-key (${provider})`;
571
+ } else if (mode === 'endpoint') {
572
+ if (!config.llmEndpoint) {
573
+ throw new Error('endpoint mode requires XACHE_LLM_ENDPOINT environment variable.');
574
+ }
575
+ llmConfig = {
576
+ type: 'endpoint' as const,
577
+ url: config.llmEndpoint,
578
+ authToken: config.llmAuthToken || undefined,
579
+ format: config.llmFormat,
580
+ model,
581
+ };
582
+ modeDescription = `endpoint (${config.llmEndpoint.substring(0, 40)}...)`;
583
+ } else {
584
+ llmConfig = {
585
+ type: 'xache-managed' as const,
586
+ provider: provider === 'anthropic' || provider === 'openai' ? provider : 'anthropic',
587
+ model,
588
+ };
589
+ modeDescription = `xache-managed (${provider})`;
590
+ }
591
+
592
+ const result = await client.extraction.extract({
593
+ trace: args.trace,
594
+ llmConfig,
595
+ options: {
596
+ contextHint: args.contextHint,
597
+ confidenceThreshold: args.confidenceThreshold ?? 0.7,
598
+ autoStore: args.autoStore ?? true,
599
+ },
600
+ });
601
+
602
+ const extractions = result.extractions || [];
603
+ if (extractions.length === 0) {
604
+ return 'No memories extracted from trace.';
605
+ }
606
+
607
+ let output = `Extracted ${extractions.length} memories:\n`;
608
+ output += `Mode: ${modeDescription}\n`;
609
+
610
+ for (let i = 0; i < extractions.length; i++) {
611
+ const mem = extractions[i];
612
+ const dataStr = JSON.stringify(mem.data).substring(0, 150);
613
+ output += `\n${i + 1}. [${mem.type}] ${dataStr}`;
614
+ output += `\n Confidence: ${((mem.confidence || 0) * 100).toFixed(0)}%`;
615
+ if (mem.reasoning) output += ` | ${mem.reasoning.substring(0, 50)}`;
616
+ }
617
+
618
+ if (result.stored && result.stored.length > 0) {
619
+ output += `\n\nAuto-stored ${result.stored.length} memories.`;
620
+ }
621
+
622
+ return output;
623
+ }
624
+
625
+ async function handleExtractAndContribute(
626
+ client: XacheClient,
627
+ args: {
628
+ trace: string;
629
+ domain: string;
630
+ mode?: 'api-key' | 'endpoint' | 'xache-managed';
631
+ provider?: LLMProvider;
632
+ contributionThreshold?: number;
633
+ }
634
+ ): Promise<string> {
635
+ // Determine mode based on config and args
636
+ let mode = args.mode;
637
+ if (!mode) {
638
+ if (config.llmEndpoint) {
639
+ mode = 'endpoint';
640
+ } else if (config.llmApiKey && config.llmProvider) {
641
+ mode = 'api-key';
642
+ } else {
643
+ mode = 'xache-managed';
644
+ }
645
+ }
646
+
647
+ const provider = args.provider || config.llmProvider || 'anthropic';
648
+ const model = config.llmModel || undefined;
649
+
650
+ // Build LLM config based on mode
651
+ let llmConfig;
652
+
653
+ if (mode === 'api-key') {
654
+ if (!config.llmApiKey) {
655
+ throw new Error('api-key mode requires XACHE_LLM_API_KEY environment variable.');
656
+ }
657
+ llmConfig = {
658
+ type: 'api-key' as const,
659
+ provider,
660
+ apiKey: config.llmApiKey,
661
+ model,
662
+ };
663
+ } else if (mode === 'endpoint') {
664
+ if (!config.llmEndpoint) {
665
+ throw new Error('endpoint mode requires XACHE_LLM_ENDPOINT environment variable.');
666
+ }
667
+ llmConfig = {
668
+ type: 'endpoint' as const,
669
+ url: config.llmEndpoint,
670
+ authToken: config.llmAuthToken || undefined,
671
+ format: config.llmFormat,
672
+ model,
673
+ };
674
+ } else {
675
+ llmConfig = {
676
+ type: 'xache-managed' as const,
677
+ provider: provider === 'anthropic' || provider === 'openai' ? provider : 'anthropic',
678
+ model,
679
+ };
680
+ }
681
+
682
+ const threshold = args.contributionThreshold ?? 0.85;
683
+
684
+ const result = await client.extraction.extract({
685
+ trace: args.trace,
686
+ llmConfig,
687
+ options: {
688
+ confidenceThreshold: 0.7,
689
+ autoStore: true,
690
+ },
691
+ });
692
+
693
+ const extractions = result.extractions || [];
694
+ let output = `Extracted ${extractions.length} memories.\n`;
695
+
696
+ // Find high-quality heuristics to contribute
697
+ const heuristicTypes = ['DOMAIN_HEURISTIC', 'SUCCESSFUL_PATTERN', 'ERROR_FIX'];
698
+ const contributions: string[] = [];
699
+
700
+ for (const mem of extractions) {
701
+ if (
702
+ heuristicTypes.includes(mem.type) &&
703
+ (mem.confidence || 0) >= threshold
704
+ ) {
705
+ try {
706
+ // Use reasoning as the pattern content
707
+ const patternContent = mem.reasoning || JSON.stringify(mem.data);
708
+ const patternHash = hashPattern(patternContent);
709
+ const contribResult = await client.collective.contribute({
710
+ pattern: patternContent,
711
+ patternHash,
712
+ domain: args.domain,
713
+ tags: [mem.type.toLowerCase().replace('_', '-'), args.domain],
714
+ metrics: {
715
+ successRate: mem.confidence || 0.85,
716
+ sampleSize: 1,
717
+ confidence: mem.confidence || 0.85,
718
+ },
719
+ encryptedContentRef: patternHash,
720
+ });
721
+ contributions.push(
722
+ `${patternContent.substring(0, 50)}... → ${contribResult.heuristicId}`
723
+ );
724
+ } catch (e) {
725
+ // Skip contribution errors, continue with others
726
+ }
727
+ }
728
+ }
729
+
730
+ if (contributions.length > 0) {
731
+ output += `\nContributed ${contributions.length} heuristics to collective:\n`;
732
+ for (const c of contributions) {
733
+ output += ` • ${c}\n`;
734
+ }
735
+ output += `\nThese contributions earn reputation!`;
736
+ } else {
737
+ output += `\nNo heuristics met the contribution threshold (${(threshold * 100).toFixed(0)}%).`;
738
+ }
739
+
740
+ return output;
741
+ }
742
+
410
743
  // =============================================================================
411
744
  // Server Setup
412
745
  // =============================================================================
@@ -417,7 +750,7 @@ async function main(): Promise<void> {
417
750
  const server = new Server(
418
751
  {
419
752
  name: 'xache-mcp-server',
420
- version: '0.1.0',
753
+ version: '0.2.0',
421
754
  },
422
755
  {
423
756
  capabilities: {
@@ -470,6 +803,12 @@ async function main(): Promise<void> {
470
803
  case 'xache_leaderboard':
471
804
  result = await handleLeaderboard(client, args as any);
472
805
  break;
806
+ case 'xache_extract_memories':
807
+ result = await handleExtractMemories(client, args as any);
808
+ break;
809
+ case 'xache_extract_and_contribute':
810
+ result = await handleExtractAndContribute(client, args as any);
811
+ break;
473
812
  default:
474
813
  throw new Error(`Unknown tool: ${name}`);
475
814
  }