@shodh/memory-mcp 0.1.4 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,13 +1,37 @@
1
- # Shodh-Memory MCP Server
1
+ <p align="center">
2
+ <img src="https://raw.githubusercontent.com/varun29ankuS/shodh-memory/main/assets/logo.png" width="120" alt="Shodh-Memory">
3
+ </p>
2
4
 
3
- Persistent AI memory with semantic search. Store observations, decisions, learnings, and recall them across sessions.
5
+ <h1 align="center">Shodh-Memory MCP Server</h1>
6
+
7
+ <p align="center">
8
+ <strong>v0.1.6</strong> | Persistent cognitive memory for AI agents
9
+ </p>
10
+
11
+ <p align="center">
12
+ <a href="https://www.npmjs.com/package/@shodh/memory-mcp"><img src="https://img.shields.io/npm/v/@shodh/memory-mcp" alt="npm"></a>
13
+ <a href="https://github.com/varun29ankuS/shodh-memory/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-Apache--2.0-blue" alt="License"></a>
14
+ </p>
15
+
16
+ <p align="center">
17
+ <a href="https://www.shodh-rag.com/memory">Documentation</a> |
18
+ <a href="https://github.com/varun29ankuS/shodh-memory">GitHub</a> |
19
+ <a href="https://pypi.org/project/shodh-memory/">Python SDK</a> |
20
+ <a href="https://crates.io/crates/shodh-memory">Rust Crate</a>
21
+ </p>
22
+
23
+ ---
4
24
 
5
25
  ## Features
6
26
 
7
- - **Semantic Search**: Find memories by meaning, not just keywords
8
- - **Memory Types**: Categorize as Observation, Decision, Learning, Error, Pattern, etc.
9
- - **Persistent**: Memories survive across sessions and restarts
10
- - **Fast**: Sub-millisecond retrieval with vector indexing
27
+ - **Cognitive Architecture**: 3-tier memory (working, session, long-term) based on Cowan's model
28
+ - **Hebbian Learning**: "Neurons that fire together wire together" - associations strengthen with use
29
+ - **Semantic Search**: Find memories by meaning using MiniLM-L6 embeddings
30
+ - **Knowledge Graph**: Entity extraction and relationship tracking
31
+ - **Memory Consolidation**: Automatic decay, replay, and strengthening
32
+ - **1-Click Install**: Auto-downloads native server binary for your platform
33
+ - **Offline-First**: All models bundled (~15MB), no internet required after install
34
+ - **Fast**: Sub-millisecond graph lookup, 30-50ms semantic search
11
35
 
12
36
  ## Installation
13
37
 
@@ -19,7 +43,10 @@ Add to your MCP client config:
19
43
  "mcpServers": {
20
44
  "shodh-memory": {
21
45
  "command": "npx",
22
- "args": ["-y", "@shodh/memory-mcp"]
46
+ "args": ["-y", "@shodh/memory-mcp"],
47
+ "env": {
48
+ "SHODH_API_KEY": "your-api-key-here"
49
+ }
23
50
  }
24
51
  }
25
52
  }
@@ -32,26 +59,149 @@ Config file locations:
32
59
 
33
60
  **For Cursor/other MCP clients**: Similar configuration with the npx command.
34
61
 
35
- ## Tools
62
+ ## Environment Variables
63
+
64
+ | Variable | Description | Default |
65
+ |----------|-------------|---------|
66
+ | `SHODH_API_KEY` | **Required**. API key for authentication | - |
67
+ | `SHODH_API_URL` | Backend server URL | `http://127.0.0.1:3030` |
68
+ | `SHODH_USER_ID` | User ID for memory isolation | `claude-code` |
69
+ | `SHODH_NO_AUTO_SPAWN` | Set to `true` to disable auto-starting the backend | `false` |
70
+ | `SHODH_STREAM` | Enable/disable streaming ingestion | `true` |
71
+ | `SHODH_PROACTIVE` | Enable/disable proactive memory surfacing | `true` |
72
+
73
+ ## MCP Tools
36
74
 
37
75
  | Tool | Description |
38
76
  |------|-------------|
39
77
  | `remember` | Store a memory with optional type and tags |
40
78
  | `recall` | Semantic search to find relevant memories |
79
+ | `proactive_context` | Auto-surface relevant memories for current context |
41
80
  | `context_summary` | Get categorized context for session bootstrap |
42
81
  | `list_memories` | List all stored memories |
43
82
  | `forget` | Delete a specific memory by ID |
44
83
  | `memory_stats` | Get statistics about stored memories |
84
+ | `recall_by_tags` | Find memories by tag |
85
+ | `recall_by_date` | Find memories within a date range |
86
+ | `verify_index` | Check vector index health |
87
+ | `repair_index` | Repair orphaned memories |
88
+ | `consolidation_report` | View memory consolidation activity |
89
+
90
+ ## REST API (for Developers)
91
+
92
+ The server exposes a REST API at `http://127.0.0.1:3030`:
93
+
94
+ ```javascript
95
+ // Store a memory
96
+ const res = await fetch("http://127.0.0.1:3030/api/remember", {
97
+ method: "POST",
98
+ headers: {
99
+ "Content-Type": "application/json",
100
+ "X-API-Key": "your-api-key"
101
+ },
102
+ body: JSON.stringify({
103
+ user_id: "my-app",
104
+ content: "User prefers dark mode",
105
+ memory_type: "Observation",
106
+ tags: ["preferences", "ui"]
107
+ })
108
+ });
109
+
110
+ // Semantic search
111
+ const results = await fetch("http://127.0.0.1:3030/api/recall", {
112
+ method: "POST",
113
+ headers: {
114
+ "Content-Type": "application/json",
115
+ "X-API-Key": "your-api-key"
116
+ },
117
+ body: JSON.stringify({
118
+ user_id: "my-app",
119
+ query: "user preferences",
120
+ limit: 5
121
+ })
122
+ });
123
+ ```
124
+
125
+ ### Key Endpoints
126
+
127
+ | Endpoint | Method | Description |
128
+ |----------|--------|-------------|
129
+ | `/health` | GET | Health check |
130
+ | `/api/remember` | POST | Store a memory |
131
+ | `/api/recall` | POST | Semantic search |
132
+ | `/api/recall/tags` | POST | Search by tags |
133
+ | `/api/recall/date` | POST | Search by date range |
134
+ | `/api/memories` | POST | List all memories |
135
+ | `/api/memory/{id}` | GET/PUT/DELETE | CRUD operations |
136
+ | `/api/context_summary` | POST | Get context summary |
137
+ | `/api/relevant` | POST | Proactive context surfacing |
138
+ | `/api/batch_remember` | POST | Store multiple memories |
139
+ | `/api/upsert` | POST | Create or update by external_id |
140
+ | `/api/graph/{user_id}/stats` | GET | Knowledge graph statistics |
141
+ | `/api/consolidation/report` | POST | Memory consolidation report |
142
+ | `/api/index/verify` | POST | Verify index integrity |
143
+ | `/metrics` | GET | Prometheus metrics |
144
+
145
+ ## Cognitive Features
146
+
147
+ ### Hebbian Learning
148
+ Memories that are frequently accessed together form stronger associations. The system automatically:
149
+ - Forms edges between co-retrieved memories
150
+ - Strengthens connections with repeated co-activation
151
+ - Enables Long-Term Potentiation (LTP) for permanent associations
152
+
153
+ ### Memory Consolidation
154
+ Background processes maintain memory health:
155
+ - **Decay**: Unused memories gradually lose activation
156
+ - **Replay**: High-value memories are periodically replayed
157
+ - **Pruning**: Weak associations are removed
158
+ - **Promotion**: Important memories move to long-term storage
159
+
160
+ ### 3-Tier Architecture
161
+ Based on Cowan's working memory model:
162
+ 1. **Working Memory**: Recent, highly active memories
163
+ 2. **Session Memory**: Current session context
164
+ 3. **Long-Term Memory**: Persistent storage with vector indexing
165
+
166
+ ## How It Works
167
+
168
+ 1. **Install**: `npx -y @shodh/memory-mcp` downloads the package
169
+ 2. **Auto-spawn**: On first run, downloads the native server binary (~15MB)
170
+ 3. **Connect**: MCP client connects to the server via stdio
171
+ 4. **Ready**: Start using `remember` and `recall` tools
172
+
173
+ The backend server runs locally and stores all data on your machine. No cloud dependency.
45
174
 
46
175
  ## Usage Examples
47
176
 
48
177
  ```
49
178
  "Remember that the user prefers Rust over Python for systems programming"
50
179
  "Recall what I know about user's programming preferences"
180
+ "What context do you have about this project?"
51
181
  "List my recent memories"
52
- "Show memory stats"
182
+ "Show me the consolidation report"
53
183
  ```
54
184
 
185
+ ## Platform Support
186
+
187
+ | Platform | Architecture | Status |
188
+ |----------|--------------|--------|
189
+ | Linux | x64 | Supported |
190
+ | macOS | x64 | Supported |
191
+ | macOS | ARM64 (M1/M2) | Supported |
192
+ | Windows | x64 | Supported |
193
+
194
+ ## Related Packages
195
+
196
+ - **Python SDK**: `pip install shodh-memory` - Native Python bindings
197
+ - **Rust Crate**: `cargo add shodh-memory` - Use as a library
198
+
199
+ ## Links
200
+
201
+ - [Documentation](https://www.shodh-rag.com/memory)
202
+ - [GitHub Repository](https://github.com/varun29ankuS/shodh-memory)
203
+ - [Issue Tracker](https://github.com/varun29ankuS/shodh-memory/issues)
204
+
55
205
  ## License
56
206
 
57
207
  Apache-2.0
package/dist/index.js CHANGED
@@ -4878,21 +4878,198 @@ class StdioServerTransport {
4878
4878
  }
4879
4879
 
4880
4880
  // index.ts
4881
+ import { spawn } from "child_process";
4882
+ import * as path from "path";
4883
+ import * as fs from "fs";
4884
+ import { fileURLToPath } from "url";
4885
+ var __filename2 = fileURLToPath(import.meta.url);
4886
+ var __dirname2 = path.dirname(__filename2);
4881
4887
  var API_URL = process.env.SHODH_API_URL || "http://127.0.0.1:3030";
4882
- var API_KEY = process.env.SHODH_API_KEY || "shodh-dev-key-change-in-production";
4888
+ var WS_URL = API_URL.replace(/^http/, "ws") + "/api/stream";
4883
4889
  var USER_ID = process.env.SHODH_USER_ID || "claude-code";
4890
+ var API_KEY = process.env.SHODH_API_KEY;
4891
+ if (!API_KEY) {
4892
+ console.error("ERROR: SHODH_API_KEY environment variable not set.");
4893
+ console.error("");
4894
+ console.error("To fix, add to your MCP config (claude_desktop_config.json or mcp.json):");
4895
+ console.error(` "env": { "SHODH_API_KEY": "your-api-key" }`);
4896
+ console.error("");
4897
+ console.error("Or set in your shell:");
4898
+ console.error(" export SHODH_API_KEY=your-api-key");
4899
+ console.error("");
4900
+ console.error("For local development, use the same key set in SHODH_DEV_API_KEY on the server.");
4901
+ process.exit(1);
4902
+ }
4884
4903
  var RETRY_ATTEMPTS = 3;
4885
4904
  var RETRY_DELAY_MS = 1000;
4886
4905
  var REQUEST_TIMEOUT_MS = 1e4;
4906
+ var STREAM_ENABLED = process.env.SHODH_STREAM !== "false";
4907
+ var STREAM_MIN_CONTENT_LENGTH = 50;
4908
+ var PROACTIVE_SURFACING = process.env.SHODH_PROACTIVE !== "false";
4909
+ var PROACTIVE_MIN_CONTEXT_LENGTH = 30;
4910
+ var streamSocket = null;
4911
+ var streamConnecting = false;
4912
+ var streamReconnectTimer = null;
4913
+ var streamBuffer = [];
4914
+ var MAX_BUFFER_SIZE = 100;
4915
+ var streamHandshakeComplete = false;
4916
+ async function connectStream() {
4917
+ if (!STREAM_ENABLED || streamConnecting || streamSocket?.readyState === WebSocket.OPEN) {
4918
+ return;
4919
+ }
4920
+ streamConnecting = true;
4921
+ streamHandshakeComplete = false;
4922
+ try {
4923
+ streamSocket = new WebSocket(WS_URL);
4924
+ streamSocket.onopen = () => {
4925
+ streamConnecting = false;
4926
+ console.error("[Stream] WebSocket connected to", WS_URL);
4927
+ const handshake = JSON.stringify({
4928
+ user_id: USER_ID,
4929
+ mode: "conversation",
4930
+ extraction_config: {
4931
+ checkpoint_interval_ms: 5000,
4932
+ max_buffer_size: 50,
4933
+ auto_dedupe: true,
4934
+ extract_entities: true
4935
+ }
4936
+ });
4937
+ streamSocket?.send(handshake);
4938
+ console.error("[Stream] Sent handshake for user:", USER_ID);
4939
+ };
4940
+ streamSocket.onmessage = (event) => {
4941
+ try {
4942
+ const response = JSON.parse(event.data);
4943
+ if (response.type === "ack" && response.message_type === "handshake") {
4944
+ streamHandshakeComplete = true;
4945
+ console.error("[Stream] Handshake ACK received, streaming ready");
4946
+ const bufferedCount = streamBuffer.length;
4947
+ while (streamBuffer.length > 0) {
4948
+ const msg = streamBuffer.shift();
4949
+ if (msg && streamSocket?.readyState === WebSocket.OPEN) {
4950
+ streamSocket.send(msg);
4951
+ }
4952
+ }
4953
+ if (bufferedCount > 0) {
4954
+ console.error(`[Stream] Flushed ${bufferedCount} buffered messages`);
4955
+ }
4956
+ }
4957
+ } catch {}
4958
+ };
4959
+ streamSocket.onclose = (event) => {
4960
+ console.error("[Stream] WebSocket closed:", event.code, event.reason || "(no reason)");
4961
+ streamSocket = null;
4962
+ streamConnecting = false;
4963
+ streamHandshakeComplete = false;
4964
+ if (STREAM_ENABLED && !streamReconnectTimer) {
4965
+ streamReconnectTimer = setTimeout(() => {
4966
+ streamReconnectTimer = null;
4967
+ console.error("[Stream] Attempting reconnect...");
4968
+ connectStream().catch(() => {});
4969
+ }, 5000);
4970
+ }
4971
+ };
4972
+ streamSocket.onerror = (error) => {
4973
+ console.error("[Stream] WebSocket error:", error);
4974
+ };
4975
+ } catch (err) {
4976
+ console.error("[Stream] Failed to create WebSocket:", err);
4977
+ streamConnecting = false;
4978
+ }
4979
+ }
4980
+ function streamMemory(content, tags = [], source = "assistant", timestamp) {
4981
+ if (!STREAM_ENABLED || content.length < STREAM_MIN_CONTENT_LENGTH)
4982
+ return;
4983
+ const message = JSON.stringify({
4984
+ type: "content",
4985
+ content: content.slice(0, 4000),
4986
+ source,
4987
+ timestamp: timestamp || new Date().toISOString(),
4988
+ tags: ["stream", ...tags],
4989
+ metadata: {}
4990
+ });
4991
+ if (streamSocket?.readyState === WebSocket.OPEN && streamHandshakeComplete) {
4992
+ streamSocket.send(message);
4993
+ console.error(`[Stream] Sent memory (${content.length} chars) with tags:`, tags);
4994
+ } else {
4995
+ if (streamBuffer.length < MAX_BUFFER_SIZE) {
4996
+ streamBuffer.push(message);
4997
+ console.error(`[Stream] Buffered memory (socket not ready, buffer size: ${streamBuffer.length})`);
4998
+ }
4999
+ connectStream().catch(() => {});
5000
+ }
5001
+ }
5002
+ function streamFlush() {
5003
+ if (!STREAM_ENABLED)
5004
+ return;
5005
+ if (streamSocket?.readyState === WebSocket.OPEN && streamHandshakeComplete) {
5006
+ streamSocket.send(JSON.stringify({ type: "flush" }));
5007
+ }
5008
+ }
5009
+ console.error("[Stream] Initializing connection to", WS_URL);
5010
+ connectStream().catch((err) => {
5011
+ console.error("[Stream] Initial connection failed:", err);
5012
+ });
4887
5013
  function getContent(m) {
4888
- return m.experience?.content || "";
5014
+ return m.content || m.experience?.content || "";
4889
5015
  }
4890
5016
  function getType(m) {
4891
- return m.experience?.experience_type || "Observation";
5017
+ return m.memory_type || m.experience?.memory_type || m.experience?.experience_type || "Observation";
4892
5018
  }
4893
5019
  function sleep(ms) {
4894
5020
  return new Promise((resolve) => setTimeout(resolve, ms));
4895
5021
  }
5022
+ async function surfaceRelevant(context, maxResults = 3) {
5023
+ if (!PROACTIVE_SURFACING || context.length < PROACTIVE_MIN_CONTEXT_LENGTH) {
5024
+ return null;
5025
+ }
5026
+ try {
5027
+ const controller = new AbortController;
5028
+ const timeoutId = setTimeout(() => controller.abort(), 3000);
5029
+ const response = await fetch(`${API_URL}/api/relevant`, {
5030
+ method: "POST",
5031
+ headers: {
5032
+ "Content-Type": "application/json",
5033
+ "X-API-Key": API_KEY
5034
+ },
5035
+ body: JSON.stringify({
5036
+ user_id: USER_ID,
5037
+ context: context.slice(0, 2000),
5038
+ config: {
5039
+ semantic_threshold: 0.65,
5040
+ max_results: maxResults
5041
+ }
5042
+ }),
5043
+ signal: controller.signal
5044
+ });
5045
+ clearTimeout(timeoutId);
5046
+ if (!response.ok)
5047
+ return null;
5048
+ const result = await response.json();
5049
+ return result.memories || null;
5050
+ } catch {
5051
+ return null;
5052
+ }
5053
+ }
5054
+ function formatSurfacedMemories(memories) {
5055
+ if (!memories || memories.length === 0)
5056
+ return "";
5057
+ const formatted = memories.map((m, i) => ` ${i + 1}. [${(m.relevance_score * 100).toFixed(0)}%] ${m.content.slice(0, 80)}...`).join(`
5058
+ `);
5059
+ return `
5060
+
5061
+ [Relevant memories surfaced]
5062
+ ${formatted}`;
5063
+ }
5064
+ function streamToolCall(toolName, args, resultText) {
5065
+ if (["remember", "recall", "forget", "list_memories"].includes(toolName))
5066
+ return;
5067
+ const argsStr = JSON.stringify(args, null, 2);
5068
+ const content = `Tool: ${toolName}
5069
+ Input: ${argsStr}
5070
+ Result: ${resultText.slice(0, 1000)}${resultText.length > 1000 ? "..." : ""}`;
5071
+ streamMemory(content, ["tool-call", toolName], "tool");
5072
+ }
4896
5073
  async function apiCall(endpoint, method = "GET", body) {
4897
5074
  let lastError = null;
4898
5075
  for (let attempt = 1;attempt <= RETRY_ATTEMPTS; attempt++) {
@@ -4945,7 +5122,7 @@ async function isServerAvailable() {
4945
5122
  }
4946
5123
  var server = new Server({
4947
5124
  name: "shodh-memory",
4948
- version: "0.1.2"
5125
+ version: "0.1.51"
4949
5126
  }, {
4950
5127
  capabilities: {
4951
5128
  tools: {},
@@ -4975,6 +5152,43 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
4975
5152
  type: "array",
4976
5153
  items: { type: "string" },
4977
5154
  description: "Optional tags for categorization"
5155
+ },
5156
+ created_at: {
5157
+ type: "string",
5158
+ description: "Optional ISO 8601 timestamp for the memory (e.g., '2025-12-15T06:30:00Z'). If not provided, uses current time."
5159
+ },
5160
+ emotional_valence: {
5161
+ type: "number",
5162
+ description: "Emotional valence: -1.0 (negative) to 1.0 (positive), 0.0 = neutral. E.g., bug found: -0.3, feature shipped: 0.7"
5163
+ },
5164
+ emotional_arousal: {
5165
+ type: "number",
5166
+ description: "Arousal level: 0.0 (calm) to 1.0 (highly aroused). E.g., routine task: 0.2, critical issue: 0.9"
5167
+ },
5168
+ emotion: {
5169
+ type: "string",
5170
+ description: "Dominant emotion label (e.g., 'joy', 'frustration', 'surprise')"
5171
+ },
5172
+ source_type: {
5173
+ type: "string",
5174
+ enum: ["user", "system", "api", "file", "web", "ai_generated", "inferred"],
5175
+ description: "Source type: where the information came from"
5176
+ },
5177
+ credibility: {
5178
+ type: "number",
5179
+ description: "Credibility score: 0.0 to 1.0 (1.0 = verified facts, 0.3 = inferred)"
5180
+ },
5181
+ episode_id: {
5182
+ type: "string",
5183
+ description: "Episode ID - groups memories into coherent episodes/conversations"
5184
+ },
5185
+ sequence_number: {
5186
+ type: "number",
5187
+ description: "Sequence number within episode (1, 2, 3...)"
5188
+ },
5189
+ preceding_memory_id: {
5190
+ type: "string",
5191
+ description: "ID of the preceding memory (for temporal chains)"
4978
5192
  }
4979
5193
  },
4980
5194
  required: ["content"]
@@ -4982,7 +5196,7 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
4982
5196
  },
4983
5197
  {
4984
5198
  name: "recall",
4985
- description: "Search memories by semantic similarity. Use this to find relevant past experiences, decisions, or context.",
5199
+ description: "Search memories using different retrieval modes. Use this to find relevant past experiences, decisions, or context. Modes: 'semantic' (vector similarity), 'associative' (graph traversal - follows learned connections between memories), 'hybrid' (combines both with density-dependent weighting).",
4986
5200
  inputSchema: {
4987
5201
  type: "object",
4988
5202
  properties: {
@@ -4994,6 +5208,12 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
4994
5208
  type: "number",
4995
5209
  description: "Maximum number of results (default: 5)",
4996
5210
  default: 5
5211
+ },
5212
+ mode: {
5213
+ type: "string",
5214
+ enum: ["semantic", "associative", "hybrid"],
5215
+ description: "Retrieval mode: 'semantic' for pure vector similarity, 'associative' for graph-based traversal (follows learned connections), 'hybrid' for density-dependent combination (default)",
5216
+ default: "hybrid"
4997
5217
  }
4998
5218
  },
4999
5219
  required: ["query"]
@@ -5048,12 +5268,12 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
5048
5268
  inputSchema: {
5049
5269
  type: "object",
5050
5270
  properties: {
5051
- memory_id: {
5271
+ id: {
5052
5272
  type: "string",
5053
5273
  description: "The ID of the memory to delete"
5054
5274
  }
5055
5275
  },
5056
- required: ["memory_id"]
5276
+ required: ["id"]
5057
5277
  }
5058
5278
  },
5059
5279
  {
@@ -5064,6 +5284,22 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
5064
5284
  properties: {}
5065
5285
  }
5066
5286
  },
5287
+ {
5288
+ name: "verify_index",
5289
+ description: "Verify vector index integrity - diagnose orphaned memories that are stored but not searchable. Returns health status and count of orphaned memories.",
5290
+ inputSchema: {
5291
+ type: "object",
5292
+ properties: {}
5293
+ }
5294
+ },
5295
+ {
5296
+ name: "repair_index",
5297
+ description: "Repair vector index by re-indexing orphaned memories. Use this when verify_index shows unhealthy status. Returns count of repaired memories.",
5298
+ inputSchema: {
5299
+ type: "object",
5300
+ properties: {}
5301
+ }
5302
+ },
5067
5303
  {
5068
5304
  name: "recall_by_tags",
5069
5305
  description: "Search memories by tags. Returns memories matching ANY of the provided tags.",
@@ -5139,12 +5375,100 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
5139
5375
  },
5140
5376
  required: ["start", "end"]
5141
5377
  }
5378
+ },
5379
+ {
5380
+ name: "consolidation_report",
5381
+ description: "Get a report of what the memory system has been learning. Shows memory strengthening/decay events, edge formation, fact extraction, and maintenance cycles. Use this to understand how your memories are evolving.",
5382
+ inputSchema: {
5383
+ type: "object",
5384
+ properties: {
5385
+ since: {
5386
+ type: "string",
5387
+ description: "Start of report period (ISO 8601 format). Defaults to 24 hours ago."
5388
+ },
5389
+ until: {
5390
+ type: "string",
5391
+ description: "End of report period (ISO 8601 format). Defaults to now."
5392
+ }
5393
+ }
5394
+ }
5395
+ },
5396
+ {
5397
+ name: "proactive_context",
5398
+ description: "REQUIRED: Call this tool with EVERY user message to surface relevant memories and build conversation history. Pass the user's message as context. This enables: (1) retrieving memories relevant to what the user is asking, (2) building persistent memory of the conversation for future sessions. The system analyzes entities, semantic similarity, and recency to find contextually appropriate memories. Auto-ingest stores the context automatically. USAGE: Always call this FIRST when you receive a user message, passing their message as the context parameter.",
5399
+ inputSchema: {
5400
+ type: "object",
5401
+ properties: {
5402
+ context: {
5403
+ type: "string",
5404
+ description: "The current conversation context or topic (e.g., recent messages, current task description)"
5405
+ },
5406
+ semantic_threshold: {
5407
+ type: "number",
5408
+ description: "Minimum semantic similarity (0.0-1.0) for memories to be surfaced (default: 0.65)",
5409
+ default: 0.65
5410
+ },
5411
+ entity_match_weight: {
5412
+ type: "number",
5413
+ description: "Weight for entity matching in relevance scoring (0.0-1.0, default: 0.4)",
5414
+ default: 0.4
5415
+ },
5416
+ recency_weight: {
5417
+ type: "number",
5418
+ description: "Weight for recency boost in relevance scoring (0.0-1.0, default: 0.2)",
5419
+ default: 0.2
5420
+ },
5421
+ max_results: {
5422
+ type: "number",
5423
+ description: "Maximum number of memories to surface (default: 5)",
5424
+ default: 5
5425
+ },
5426
+ memory_types: {
5427
+ type: "array",
5428
+ items: { type: "string" },
5429
+ description: "Filter to specific memory types (e.g., ['Decision', 'Learning', 'Context']). Empty means all types."
5430
+ },
5431
+ auto_ingest: {
5432
+ type: "boolean",
5433
+ description: "Automatically store the context as a Conversation memory (default: true). Set to false to only surface memories without storing.",
5434
+ default: true
5435
+ }
5436
+ },
5437
+ required: ["context"]
5438
+ }
5439
+ },
5440
+ {
5441
+ name: "streaming_status",
5442
+ description: "Check the status of WebSocket streaming connection. Use this to diagnose if streaming memory ingestion is working.",
5443
+ inputSchema: {
5444
+ type: "object",
5445
+ properties: {}
5446
+ }
5142
5447
  }
5143
5448
  ]
5144
5449
  };
5145
5450
  });
5451
+ function autoStreamContext(toolName, args) {
5452
+ if (["proactive_context", "streaming_status"].includes(toolName))
5453
+ return;
5454
+ let context = "";
5455
+ if (args.query && typeof args.query === "string") {
5456
+ context = `Query: ${args.query}`;
5457
+ } else if (args.content && typeof args.content === "string") {
5458
+ context = args.content;
5459
+ } else if (args.context && typeof args.context === "string") {
5460
+ context = args.context;
5461
+ }
5462
+ if (context.length >= 20) {
5463
+ streamMemory(context, ["auto-context", toolName], "user");
5464
+ }
5465
+ }
5146
5466
  server.setRequestHandler(CallToolRequestSchema, async (request) => {
5147
5467
  const { name, arguments: args } = request.params;
5468
+ if (STREAM_ENABLED && (!streamSocket || streamSocket.readyState !== WebSocket.OPEN)) {
5469
+ connectStream().catch(() => {});
5470
+ }
5471
+ autoStreamContext(name, args);
5148
5472
  const serverUp = await isServerAvailable();
5149
5473
  if (!serverUp) {
5150
5474
  return {
@@ -5159,42 +5483,64 @@ To start: cd shodh-memory && cargo run`
5159
5483
  isError: true
5160
5484
  };
5161
5485
  }
5162
- try {
5486
+ const executeTool = async () => {
5163
5487
  switch (name) {
5164
5488
  case "remember": {
5165
- const { content, type = "Observation", tags = [] } = args;
5166
- const result = await apiCall("/api/record", "POST", {
5489
+ const {
5490
+ content,
5491
+ type = "Observation",
5492
+ tags = [],
5493
+ created_at,
5494
+ emotional_valence,
5495
+ emotional_arousal,
5496
+ emotion,
5497
+ source_type,
5498
+ credibility,
5499
+ episode_id,
5500
+ sequence_number,
5501
+ preceding_memory_id
5502
+ } = args;
5503
+ const result = await apiCall("/api/remember", "POST", {
5167
5504
  user_id: USER_ID,
5168
- experience: {
5169
- content,
5170
- experience_type: type,
5171
- tags
5172
- }
5505
+ content,
5506
+ memory_type: type,
5507
+ tags,
5508
+ ...created_at && { created_at },
5509
+ ...emotional_valence !== undefined && { emotional_valence },
5510
+ ...emotional_arousal !== undefined && { emotional_arousal },
5511
+ ...emotion && { emotion },
5512
+ ...source_type && { source_type },
5513
+ ...credibility !== undefined && { credibility },
5514
+ ...episode_id && { episode_id },
5515
+ ...sequence_number !== undefined && { sequence_number },
5516
+ ...preceding_memory_id && { preceding_memory_id }
5173
5517
  });
5174
5518
  return {
5175
5519
  content: [
5176
5520
  {
5177
5521
  type: "text",
5178
5522
  text: `Remembered: "${content.slice(0, 50)}${content.length > 50 ? "..." : ""}"
5179
- Memory ID: ${result.memory_id}`
5523
+ Memory ID: ${result.id}`
5180
5524
  }
5181
5525
  ]
5182
5526
  };
5183
5527
  }
5184
5528
  case "recall": {
5185
- const { query, limit = 5 } = args;
5186
- const result = await apiCall("/api/retrieve", "POST", {
5529
+ const { query, limit = 5, mode = "hybrid" } = args;
5530
+ const result = await apiCall("/api/recall", "POST", {
5187
5531
  user_id: USER_ID,
5188
5532
  query,
5189
- limit
5533
+ limit,
5534
+ mode
5190
5535
  });
5191
5536
  const memories = result.memories || [];
5537
+ const stats = result.retrieval_stats;
5192
5538
  if (memories.length === 0) {
5193
5539
  return {
5194
5540
  content: [
5195
5541
  {
5196
5542
  type: "text",
5197
- text: `No memories found for: "${query}"`
5543
+ text: `No memories found for: "${query}" (mode: ${mode})`
5198
5544
  }
5199
5545
  ]
5200
5546
  };
@@ -5207,13 +5553,21 @@ Memory ID: ${result.memory_id}`
5207
5553
  }).join(`
5208
5554
 
5209
5555
  `);
5556
+ let statsText = "";
5557
+ if (stats && (mode === "associative" || mode === "hybrid")) {
5558
+ const graphPct = (stats.graph_weight * 100).toFixed(0);
5559
+ const semPct = (stats.semantic_weight * 100).toFixed(0);
5560
+ statsText = `
5561
+
5562
+ [Stats: ${stats.mode} mode | graph=${graphPct}% semantic=${semPct}% | density=${stats.graph_density.toFixed(2)} | ${stats.graph_candidates} graph + ${stats.semantic_candidates} semantic candidates | ${stats.entities_activated} entities | ${(stats.retrieval_time_us / 1000).toFixed(1)}ms]`;
5563
+ }
5210
5564
  return {
5211
5565
  content: [
5212
5566
  {
5213
5567
  type: "text",
5214
- text: `Found ${memories.length} relevant memories:
5568
+ text: `Found ${memories.length} relevant memories (${mode} mode):
5215
5569
 
5216
- ${formatted}`
5570
+ ${formatted}${statsText}`
5217
5571
  }
5218
5572
  ]
5219
5573
  };
@@ -5345,13 +5699,13 @@ ${formatted}`
5345
5699
  };
5346
5700
  }
5347
5701
  case "forget": {
5348
- const { memory_id } = args;
5349
- await apiCall(`/api/memory/${memory_id}?user_id=${USER_ID}`, "DELETE");
5702
+ const { id } = args;
5703
+ await apiCall(`/api/memory/${id}?user_id=${USER_ID}`, "DELETE");
5350
5704
  return {
5351
5705
  content: [
5352
5706
  {
5353
5707
  type: "text",
5354
- text: `Deleted memory: ${memory_id}`
5708
+ text: `Deleted memory: ${id}`
5355
5709
  }
5356
5710
  ]
5357
5711
  };
@@ -5368,6 +5722,71 @@ ${JSON.stringify(result, null, 2)}`
5368
5722
  ]
5369
5723
  };
5370
5724
  }
5725
+ case "verify_index": {
5726
+ const result = await apiCall("/api/index/verify", "POST", {
5727
+ user_id: USER_ID
5728
+ });
5729
+ const statusIcon = result.is_healthy ? "✓" : "⚠";
5730
+ const healthText = result.is_healthy ? "Healthy" : "Unhealthy - orphaned memories detected";
5731
+ let response = `Index Integrity Report
5732
+ `;
5733
+ response += `━━━━━━━━━━━━━━━━━━━━━━━━━
5734
+ `;
5735
+ response += `Status: ${statusIcon} ${healthText}
5736
+ `;
5737
+ response += `Total in storage: ${result.total_storage}
5738
+ `;
5739
+ response += `Total indexed: ${result.total_indexed}
5740
+ `;
5741
+ response += `Orphaned count: ${result.orphaned_count}
5742
+ `;
5743
+ if (result.orphaned_count > 0) {
5744
+ response += `
5745
+ Recommendation: Run repair_index to fix orphaned memories.`;
5746
+ }
5747
+ return {
5748
+ content: [
5749
+ {
5750
+ type: "text",
5751
+ text: response
5752
+ }
5753
+ ]
5754
+ };
5755
+ }
5756
+ case "repair_index": {
5757
+ const result = await apiCall("/api/index/repair", "POST", {
5758
+ user_id: USER_ID
5759
+ });
5760
+ const statusIcon = result.is_healthy ? "✓" : "⚠";
5761
+ let response = `Index Repair Results
5762
+ `;
5763
+ response += `━━━━━━━━━━━━━━━━━━━━━
5764
+ `;
5765
+ response += `Status: ${statusIcon} ${result.success ? "Success" : "Partial success"}
5766
+ `;
5767
+ response += `Total in storage: ${result.total_storage}
5768
+ `;
5769
+ response += `Total indexed: ${result.total_indexed}
5770
+ `;
5771
+ response += `Repaired: ${result.repaired}
5772
+ `;
5773
+ response += `Failed: ${result.failed}
5774
+ `;
5775
+ response += `Index healthy: ${result.is_healthy ? "Yes" : "No"}
5776
+ `;
5777
+ if (result.failed > 0) {
5778
+ response += `
5779
+ Note: ${result.failed} memories could not be repaired (embedding generation failed).`;
5780
+ }
5781
+ return {
5782
+ content: [
5783
+ {
5784
+ type: "text",
5785
+ text: response
5786
+ }
5787
+ ]
5788
+ };
5789
+ }
5371
5790
  case "recall_by_tags": {
5372
5791
  const { tags, limit = 20 } = args;
5373
5792
  const result = await apiCall("/api/recall/tags", "POST", {
@@ -5472,9 +5891,219 @@ ${formatted}`
5472
5891
  ]
5473
5892
  };
5474
5893
  }
5894
+ case "proactive_context": {
5895
+ const {
5896
+ context,
5897
+ semantic_threshold = 0.65,
5898
+ entity_match_weight = 0.4,
5899
+ recency_weight = 0.2,
5900
+ max_results = 5,
5901
+ memory_types = [],
5902
+ auto_ingest = true
5903
+ } = args;
5904
+ if (auto_ingest && context.length > 100) {
5905
+ streamMemory(context.slice(0, 2000), ["proactive-context"]);
5906
+ streamFlush();
5907
+ }
5908
+ const result = await apiCall("/api/relevant", "POST", {
5909
+ user_id: USER_ID,
5910
+ context,
5911
+ config: {
5912
+ semantic_threshold,
5913
+ entity_match_weight,
5914
+ semantic_weight: 1 - entity_match_weight - recency_weight,
5915
+ recency_weight,
5916
+ max_results,
5917
+ memory_types
5918
+ }
5919
+ });
5920
+ const memories = result.memories || [];
5921
+ const entities = result.detected_entities || [];
5922
+ if (memories.length === 0) {
5923
+ if (entities.length > 0) {
5924
+ const entityList = entities.map((e) => ` - "${e.text}" (${e.entity_type}, ${(e.confidence * 100).toFixed(0)}% confidence)`).join(`
5925
+ `);
5926
+ return {
5927
+ content: [
5928
+ {
5929
+ type: "text",
5930
+ text: `No relevant memories surfaced for this context.
5931
+
5932
+ Detected entities:
5933
+ ${entityList}
5934
+
5935
+ [Latency: ${result.latency_ms.toFixed(1)}ms]`
5936
+ }
5937
+ ]
5938
+ };
5939
+ }
5940
+ return {
5941
+ content: [
5942
+ {
5943
+ type: "text",
5944
+ text: `No relevant memories surfaced for this context.
5945
+
5946
+ [Latency: ${result.latency_ms.toFixed(1)}ms]`
5947
+ }
5948
+ ]
5949
+ };
5950
+ }
5951
+ const formatted = memories.map((m, i) => {
5952
+ const score = (m.relevance_score * 100).toFixed(0);
5953
+ const entityMatchStr = m.matched_entities && m.matched_entities.length > 0 ? `
5954
+ Entity matches: ${m.matched_entities.join(", ")}` : "";
5955
+ const semScore = (m.semantic_similarity * 100).toFixed(0);
5956
+ return `${i + 1}. [${score}% relevant] ${m.content.slice(0, 100)}${m.content.length > 100 ? "..." : ""}
5957
+ Type: ${m.memory_type} | semantic=${semScore}% | reason: ${m.relevance_reason}${entityMatchStr}`;
5958
+ }).join(`
5959
+
5960
+ `);
5961
+ const entitySummary = entities.length > 0 ? `
5962
+
5963
+ Detected entities: ${entities.map((e) => `"${e.text}" (${e.entity_type})`).join(", ")}` : "";
5964
+ return {
5965
+ content: [
5966
+ {
5967
+ type: "text",
5968
+ text: `Surfaced ${memories.length} relevant memories:
5969
+
5970
+ ${formatted}${entitySummary}
5971
+
5972
+ [Latency: ${result.latency_ms.toFixed(1)}ms | Threshold: ${(semantic_threshold * 100).toFixed(0)}%]`
5973
+ }
5974
+ ]
5975
+ };
5976
+ }
5977
+ case "streaming_status": {
5978
+ const wsState = streamSocket?.readyState;
5979
+ const stateNames = ["CONNECTING", "OPEN", "CLOSING", "CLOSED"];
5980
+ const stateName = wsState !== undefined ? stateNames[wsState] || "UNKNOWN" : "NULL";
5981
+ const status = {
5982
+ enabled: STREAM_ENABLED,
5983
+ ws_url: WS_URL,
5984
+ socket_state: stateName,
5985
+ handshake_complete: streamHandshakeComplete,
5986
+ buffer_size: streamBuffer.length,
5987
+ connecting: streamConnecting,
5988
+ reconnect_pending: streamReconnectTimer !== null
5989
+ };
5990
+ if (!streamSocket || streamSocket.readyState !== WebSocket.OPEN) {
5991
+ connectStream().catch(() => {});
5992
+ }
5993
+ return {
5994
+ content: [
5995
+ {
5996
+ type: "text",
5997
+ text: `Streaming Status:
5998
+
5999
+ ` + `Enabled: ${status.enabled}
6000
+ ` + `WebSocket URL: ${status.ws_url}
6001
+ ` + `Socket State: ${status.socket_state}
6002
+ ` + `Handshake Complete: ${status.handshake_complete}
6003
+ ` + `Buffer Size: ${status.buffer_size}
6004
+ ` + `Currently Connecting: ${status.connecting}
6005
+ ` + `Reconnect Pending: ${status.reconnect_pending}
6006
+
6007
+ ` + (status.handshake_complete ? "✓ Streaming is ACTIVE" : "✗ Streaming is NOT ACTIVE - attempting reconnect...")
6008
+ }
6009
+ ]
6010
+ };
6011
+ }
6012
+ case "consolidation_report": {
6013
+ const { since, until } = args;
6014
+ const result = await apiCall("/api/consolidation/report", "POST", {
6015
+ user_id: USER_ID,
6016
+ since,
6017
+ until
6018
+ });
6019
+ const stats = result.statistics;
6020
+ const sections = [];
6021
+ const eventCount = result.strengthened_memories.length + result.decayed_memories.length + result.formed_associations.length + result.strengthened_associations.length + result.potentiated_associations.length + result.pruned_associations.length + result.extracted_facts.length + result.reinforced_facts.length;
6022
+ sections.push(`CONSOLIDATION REPORT (${eventCount} events)`);
6023
+ sections.push(`Period: ${result.period.start} to ${result.period.end}`);
6024
+ sections.push("=".repeat(50));
6025
+ if (stats.memories_strengthened > 0 || stats.memories_decayed > 0 || stats.memories_at_risk > 0) {
6026
+ const memoryLines = [];
6027
+ if (stats.memories_strengthened > 0)
6028
+ memoryLines.push(` + ${stats.memories_strengthened} memories strengthened`);
6029
+ if (stats.memories_decayed > 0)
6030
+ memoryLines.push(` - ${stats.memories_decayed} memories decayed`);
6031
+ if (stats.memories_at_risk > 0)
6032
+ memoryLines.push(` ! ${stats.memories_at_risk} memories at risk of forgetting`);
6033
+ sections.push(`MEMORY CHANGES:
6034
+ ${memoryLines.join(`
6035
+ `)}`);
6036
+ }
6037
+ if (stats.edges_formed > 0 || stats.edges_strengthened > 0 || stats.edges_potentiated > 0 || stats.edges_pruned > 0) {
6038
+ const edgeLines = [];
6039
+ if (stats.edges_formed > 0)
6040
+ edgeLines.push(` + ${stats.edges_formed} new associations formed`);
6041
+ if (stats.edges_strengthened > 0)
6042
+ edgeLines.push(` + ${stats.edges_strengthened} associations strengthened`);
6043
+ if (stats.edges_potentiated > 0)
6044
+ edgeLines.push(` * ${stats.edges_potentiated} associations became permanent (LTP)`);
6045
+ if (stats.edges_pruned > 0)
6046
+ edgeLines.push(` - ${stats.edges_pruned} weak associations pruned`);
6047
+ sections.push(`ASSOCIATIONS (Hebbian Learning):
6048
+ ${edgeLines.join(`
6049
+ `)}`);
6050
+ }
6051
+ if (stats.facts_extracted > 0 || stats.facts_reinforced > 0) {
6052
+ const factLines = [];
6053
+ if (stats.facts_extracted > 0)
6054
+ factLines.push(` + ${stats.facts_extracted} facts extracted`);
6055
+ if (stats.facts_reinforced > 0)
6056
+ factLines.push(` + ${stats.facts_reinforced} facts reinforced`);
6057
+ sections.push(`FACTS:
6058
+ ${factLines.join(`
6059
+ `)}`);
6060
+ }
6061
+ if (stats.maintenance_cycles > 0) {
6062
+ const durationSec = (stats.total_maintenance_duration_ms / 1000).toFixed(2);
6063
+ sections.push(`MAINTENANCE: ${stats.maintenance_cycles} cycle(s) completed (${durationSec}s total)`);
6064
+ }
6065
+ if (eventCount === 0) {
6066
+ sections.push("No consolidation activity in this period. Store and access memories to trigger learning.");
6067
+ }
6068
+ return {
6069
+ content: [
6070
+ {
6071
+ type: "text",
6072
+ text: sections.join(`
6073
+
6074
+ `)
6075
+ }
6076
+ ]
6077
+ };
6078
+ }
5475
6079
  default:
5476
6080
  throw new Error(`Unknown tool: ${name}`);
5477
6081
  }
6082
+ };
6083
+ try {
6084
+ const result = await executeTool();
6085
+ const resultText = result.content.map((c) => c.text).join(`
6086
+ `);
6087
+ streamToolCall(name, args, resultText);
6088
+ if (PROACTIVE_SURFACING && !["remember", "recall", "forget", "list_memories", "proactive_context", "context_summary", "memory_stats"].includes(name)) {
6089
+ const contextParts = [];
6090
+ if (args && typeof args === "object") {
6091
+ for (const [key, value] of Object.entries(args)) {
6092
+ if (typeof value === "string" && value.length > 10) {
6093
+ contextParts.push(value);
6094
+ }
6095
+ }
6096
+ }
6097
+ const context = contextParts.join(" ").slice(0, 1000);
6098
+ if (context.length >= PROACTIVE_MIN_CONTEXT_LENGTH) {
6099
+ const surfaced = await surfaceRelevant(context, 3);
6100
+ if (surfaced && surfaced.length > 0) {
6101
+ const surfacedText = formatSurfacedMemories(surfaced);
6102
+ result.content[result.content.length - 1].text += surfacedText;
6103
+ }
6104
+ }
6105
+ }
6106
+ return result;
5478
6107
  } catch (error) {
5479
6108
  const message = error instanceof Error ? error.message : String(error);
5480
6109
  let helpText = "";
@@ -5554,11 +6183,98 @@ ID: ${memory.id}`
5554
6183
  throw new Error(`Failed to read memory: ${message}`);
5555
6184
  }
5556
6185
  });
6186
+ var AUTO_SPAWN_ENABLED = process.env.SHODH_NO_AUTO_SPAWN !== "true";
6187
+ var serverProcess = null;
6188
+ function getBinaryPath() {
6189
+ const platform = process.platform;
6190
+ const binDir = path.join(__dirname2, "..", "bin");
6191
+ let wrapperName;
6192
+ let fallbackName;
6193
+ if (platform === "win32") {
6194
+ wrapperName = "shodh-memory.bat";
6195
+ fallbackName = "shodh-memory-server.exe";
6196
+ } else {
6197
+ wrapperName = "shodh-memory";
6198
+ fallbackName = "shodh-memory-server";
6199
+ }
6200
+ const wrapperPath = path.join(binDir, wrapperName);
6201
+ if (fs.existsSync(wrapperPath)) {
6202
+ return wrapperPath;
6203
+ }
6204
+ const binaryPath = path.join(binDir, fallbackName);
6205
+ if (fs.existsSync(binaryPath)) {
6206
+ return binaryPath;
6207
+ }
6208
+ return null;
6209
+ }
6210
+ async function isServerRunning() {
6211
+ try {
6212
+ const controller = new AbortController;
6213
+ const timeout = setTimeout(() => controller.abort(), 2000);
6214
+ const response = await fetch(`${API_URL}/health`, {
6215
+ signal: controller.signal
6216
+ });
6217
+ clearTimeout(timeout);
6218
+ return response.ok;
6219
+ } catch {
6220
+ return false;
6221
+ }
6222
+ }
6223
+ async function waitForServer(maxAttempts = 30) {
6224
+ for (let i = 0;i < maxAttempts; i++) {
6225
+ if (await isServerRunning()) {
6226
+ return true;
6227
+ }
6228
+ await new Promise((resolve) => setTimeout(resolve, 500));
6229
+ }
6230
+ return false;
6231
+ }
6232
+ async function ensureServerRunning() {
6233
+ if (await isServerRunning()) {
6234
+ console.error("[shodh-memory] Backend server already running at", API_URL);
6235
+ return;
6236
+ }
6237
+ if (!AUTO_SPAWN_ENABLED) {
6238
+ console.error("[shodh-memory] Auto-spawn disabled. Please start the server manually.");
6239
+ return;
6240
+ }
6241
+ const binaryPath = getBinaryPath();
6242
+ if (!binaryPath) {
6243
+ console.error("[shodh-memory] Server binary not found. Please run: npx @shodh/memory-mcp");
6244
+ console.error("[shodh-memory] Or download from: https://github.com/varun29ankuS/shodh-memory/releases");
6245
+ return;
6246
+ }
6247
+ console.error("[shodh-memory] Starting backend server...");
6248
+ serverProcess = spawn(binaryPath, [], {
6249
+ detached: true,
6250
+ stdio: "ignore",
6251
+ env: {
6252
+ ...process.env,
6253
+ SHODH_DEV_API_KEY: API_KEY
6254
+ }
6255
+ });
6256
+ serverProcess.unref();
6257
+ console.error("[shodh-memory] Waiting for server to start...");
6258
+ const started = await waitForServer();
6259
+ if (started) {
6260
+ console.error("[shodh-memory] Backend server started successfully");
6261
+ } else {
6262
+ console.error("[shodh-memory] Warning: Server may not have started properly");
6263
+ }
6264
+ }
6265
+ process.on("exit", () => {
6266
+ if (serverProcess && !serverProcess.killed) {
6267
+ serverProcess.kill();
6268
+ }
6269
+ });
5557
6270
  async function main() {
6271
+ await ensureServerRunning();
5558
6272
  const transport = new StdioServerTransport;
5559
6273
  await server.connect(transport);
5560
- console.error("Shodh-Memory MCP server v0.1.2 running");
6274
+ console.error("Shodh-Memory MCP server v0.1.51 running");
5561
6275
  console.error(`Connecting to: ${API_URL}`);
5562
6276
  console.error(`User ID: ${USER_ID}`);
6277
+ console.error(`Streaming: ${STREAM_ENABLED ? "enabled" : "disabled"}`);
6278
+ console.error(`Proactive surfacing: ${PROACTIVE_SURFACING ? "enabled" : "disabled (SHODH_PROACTIVE=false)"}`);
5563
6279
  }
5564
6280
  main().catch(console.error);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@shodh/memory-mcp",
3
- "version": "0.1.4",
3
+ "version": "0.1.6",
4
4
  "mcpName": "io.github.varun29ankuS/shodh-memory",
5
5
  "description": "MCP server for persistent AI memory - store and recall context across sessions",
6
6
  "type": "module",
@@ -11,10 +11,12 @@
11
11
  "scripts": {
12
12
  "start": "bun run index.ts",
13
13
  "build": "bun build index.ts --outdir dist --target node",
14
+ "postinstall": "node scripts/postinstall.cjs",
14
15
  "prepublishOnly": "npm run build"
15
16
  },
16
17
  "files": [
17
18
  "dist",
19
+ "scripts",
18
20
  "README.md"
19
21
  ],
20
22
  "dependencies": {
@@ -0,0 +1,132 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * Postinstall script for @shodh/memory-mcp
4
+ *
5
+ * Downloads the appropriate shodh-memory-server binary for the current platform
6
+ * from GitHub releases.
7
+ */
8
+
9
+ const fs = require('fs');
10
+ const path = require('path');
11
+ const https = require('https');
12
+ const { execSync } = require('child_process');
13
+
14
+ const VERSION = '0.1.6';
15
+ const REPO = 'varun29ankuS/shodh-memory';
16
+ const BIN_DIR = path.join(__dirname, '..', 'bin');
17
+
18
+ // Platform detection
19
+ function getPlatformInfo() {
20
+ const platform = process.platform;
21
+ const arch = process.arch;
22
+
23
+ if (platform === 'linux' && arch === 'x64') {
24
+ return { name: 'shodh-memory-linux-x64', ext: '.tar.gz', binary: 'shodh-memory-server' };
25
+ } else if (platform === 'darwin' && arch === 'x64') {
26
+ return { name: 'shodh-memory-macos-x64', ext: '.tar.gz', binary: 'shodh-memory-server' };
27
+ } else if (platform === 'darwin' && arch === 'arm64') {
28
+ return { name: 'shodh-memory-macos-arm64', ext: '.tar.gz', binary: 'shodh-memory-server' };
29
+ } else if (platform === 'win32' && arch === 'x64') {
30
+ return { name: 'shodh-memory-windows-x64', ext: '.zip', binary: 'shodh-memory-server.exe' };
31
+ } else {
32
+ return null;
33
+ }
34
+ }
35
+
36
+ // Download file with redirect following
37
+ function download(url, dest) {
38
+ return new Promise((resolve, reject) => {
39
+ const file = fs.createWriteStream(dest);
40
+
41
+ const request = (url) => {
42
+ https.get(url, (response) => {
43
+ if (response.statusCode === 302 || response.statusCode === 301) {
44
+ // Follow redirect
45
+ request(response.headers.location);
46
+ return;
47
+ }
48
+
49
+ if (response.statusCode !== 200) {
50
+ reject(new Error(`Failed to download: ${response.statusCode}`));
51
+ return;
52
+ }
53
+
54
+ response.pipe(file);
55
+ file.on('finish', () => {
56
+ file.close();
57
+ resolve();
58
+ });
59
+ }).on('error', (err) => {
60
+ fs.unlink(dest, () => {});
61
+ reject(err);
62
+ });
63
+ };
64
+
65
+ request(url);
66
+ });
67
+ }
68
+
69
+ // Extract archive
70
+ function extract(archive, dest, platformInfo) {
71
+ if (platformInfo.ext === '.tar.gz') {
72
+ execSync(`tar -xzf "${archive}" -C "${dest}"`, { stdio: 'inherit' });
73
+ } else if (platformInfo.ext === '.zip') {
74
+ // Use PowerShell on Windows
75
+ execSync(`powershell -Command "Expand-Archive -Path '${archive}' -DestinationPath '${dest}' -Force"`, { stdio: 'inherit' });
76
+ }
77
+ }
78
+
79
+ async function main() {
80
+ const platformInfo = getPlatformInfo();
81
+
82
+ if (!platformInfo) {
83
+ console.log('[shodh-memory] Unsupported platform:', process.platform, process.arch);
84
+ console.log('[shodh-memory] You will need to run the server manually.');
85
+ return;
86
+ }
87
+
88
+ console.log('[shodh-memory] Installing server binary for', process.platform, process.arch);
89
+
90
+ // Create bin directory
91
+ if (!fs.existsSync(BIN_DIR)) {
92
+ fs.mkdirSync(BIN_DIR, { recursive: true });
93
+ }
94
+
95
+ const binaryPath = path.join(BIN_DIR, platformInfo.binary);
96
+
97
+ // Check if already installed
98
+ if (fs.existsSync(binaryPath)) {
99
+ console.log('[shodh-memory] Binary already installed at', binaryPath);
100
+ return;
101
+ }
102
+
103
+ // Download URL
104
+ const downloadUrl = `https://github.com/${REPO}/releases/download/v${VERSION}/${platformInfo.name}${platformInfo.ext}`;
105
+ const archivePath = path.join(BIN_DIR, `${platformInfo.name}${platformInfo.ext}`);
106
+
107
+ console.log('[shodh-memory] Downloading from', downloadUrl);
108
+
109
+ try {
110
+ await download(downloadUrl, archivePath);
111
+ console.log('[shodh-memory] Downloaded archive');
112
+
113
+ // Extract
114
+ extract(archivePath, BIN_DIR, platformInfo);
115
+ console.log('[shodh-memory] Extracted binary');
116
+
117
+ // Clean up archive
118
+ fs.unlinkSync(archivePath);
119
+
120
+ // Make executable (Unix)
121
+ if (process.platform !== 'win32') {
122
+ fs.chmodSync(binaryPath, 0o755);
123
+ }
124
+
125
+ console.log('[shodh-memory] Server binary installed at', binaryPath);
126
+ } catch (err) {
127
+ console.error('[shodh-memory] Failed to install binary:', err.message);
128
+ console.log('[shodh-memory] You can manually download from:', `https://github.com/${REPO}/releases`);
129
+ }
130
+ }
131
+
132
+ main();