neural-memory 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neural_memory/__init__.py +38 -0
- neural_memory/cli/__init__.py +15 -0
- neural_memory/cli/__main__.py +6 -0
- neural_memory/cli/config.py +176 -0
- neural_memory/cli/main.py +2702 -0
- neural_memory/cli/storage.py +169 -0
- neural_memory/cli/tui.py +471 -0
- neural_memory/core/__init__.py +52 -0
- neural_memory/core/brain.py +301 -0
- neural_memory/core/brain_mode.py +273 -0
- neural_memory/core/fiber.py +236 -0
- neural_memory/core/memory_types.py +331 -0
- neural_memory/core/neuron.py +168 -0
- neural_memory/core/project.py +257 -0
- neural_memory/core/synapse.py +215 -0
- neural_memory/engine/__init__.py +15 -0
- neural_memory/engine/activation.py +335 -0
- neural_memory/engine/encoder.py +391 -0
- neural_memory/engine/retrieval.py +440 -0
- neural_memory/extraction/__init__.py +42 -0
- neural_memory/extraction/entities.py +547 -0
- neural_memory/extraction/parser.py +337 -0
- neural_memory/extraction/router.py +396 -0
- neural_memory/extraction/temporal.py +428 -0
- neural_memory/mcp/__init__.py +9 -0
- neural_memory/mcp/__main__.py +6 -0
- neural_memory/mcp/server.py +621 -0
- neural_memory/py.typed +0 -0
- neural_memory/safety/__init__.py +31 -0
- neural_memory/safety/freshness.py +238 -0
- neural_memory/safety/sensitive.py +304 -0
- neural_memory/server/__init__.py +5 -0
- neural_memory/server/app.py +99 -0
- neural_memory/server/dependencies.py +33 -0
- neural_memory/server/models.py +138 -0
- neural_memory/server/routes/__init__.py +7 -0
- neural_memory/server/routes/brain.py +221 -0
- neural_memory/server/routes/memory.py +169 -0
- neural_memory/server/routes/sync.py +387 -0
- neural_memory/storage/__init__.py +17 -0
- neural_memory/storage/base.py +441 -0
- neural_memory/storage/factory.py +329 -0
- neural_memory/storage/memory_store.py +896 -0
- neural_memory/storage/shared_store.py +650 -0
- neural_memory/storage/sqlite_store.py +1613 -0
- neural_memory/sync/__init__.py +5 -0
- neural_memory/sync/client.py +435 -0
- neural_memory/unified_config.py +315 -0
- neural_memory/utils/__init__.py +5 -0
- neural_memory/utils/config.py +98 -0
- neural_memory-0.1.0.dist-info/METADATA +314 -0
- neural_memory-0.1.0.dist-info/RECORD +55 -0
- neural_memory-0.1.0.dist-info/WHEEL +4 -0
- neural_memory-0.1.0.dist-info/entry_points.txt +4 -0
- neural_memory-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,621 @@
|
|
|
1
|
+
"""MCP server implementation for NeuralMemory.
|
|
2
|
+
|
|
3
|
+
Exposes NeuralMemory as tools via Model Context Protocol (MCP),
|
|
4
|
+
allowing Claude Code, Cursor, AntiGravity and other MCP clients to
|
|
5
|
+
store and recall memories.
|
|
6
|
+
|
|
7
|
+
All tools share the same SQLite database at ~/.neuralmemory/brains/<brain>.db
|
|
8
|
+
This enables seamless memory sharing between different AI tools.
|
|
9
|
+
|
|
10
|
+
Usage:
|
|
11
|
+
# Run directly
|
|
12
|
+
python -m neural_memory.mcp
|
|
13
|
+
|
|
14
|
+
# Or in Claude Code's mcp_servers.json:
|
|
15
|
+
{
|
|
16
|
+
"neural-memory": {
|
|
17
|
+
"command": "python",
|
|
18
|
+
"args": ["-m", "neural_memory.mcp"]
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
# Or set NEURALMEMORY_BRAIN to use a specific brain:
|
|
23
|
+
NEURALMEMORY_BRAIN=myproject python -m neural_memory.mcp
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
from __future__ import annotations
|
|
27
|
+
|
|
28
|
+
import asyncio
|
|
29
|
+
import json
|
|
30
|
+
import sys
|
|
31
|
+
from datetime import datetime
|
|
32
|
+
from typing import TYPE_CHECKING, Any
|
|
33
|
+
|
|
34
|
+
from neural_memory.core.memory_types import MemoryType, Priority, TypedMemory, suggest_memory_type
|
|
35
|
+
from neural_memory.engine.encoder import MemoryEncoder
|
|
36
|
+
from neural_memory.engine.retrieval import DepthLevel, ReflexPipeline
|
|
37
|
+
from neural_memory.unified_config import get_config, get_shared_storage
|
|
38
|
+
|
|
39
|
+
if TYPE_CHECKING:
|
|
40
|
+
from neural_memory.storage.sqlite_store import SQLiteStorage
|
|
41
|
+
from neural_memory.unified_config import UnifiedConfig
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class MCPServer:
|
|
45
|
+
"""MCP server that exposes NeuralMemory tools.
|
|
46
|
+
|
|
47
|
+
Uses shared SQLite storage for cross-tool memory sharing.
|
|
48
|
+
Configuration from ~/.neuralmemory/config.toml
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(self) -> None:
|
|
52
|
+
self.config: UnifiedConfig = get_config()
|
|
53
|
+
self._storage: SQLiteStorage | None = None
|
|
54
|
+
|
|
55
|
+
async def get_storage(self) -> SQLiteStorage:
|
|
56
|
+
"""Get or create shared SQLite storage instance."""
|
|
57
|
+
if self._storage is None:
|
|
58
|
+
self._storage = await get_shared_storage()
|
|
59
|
+
return self._storage
|
|
60
|
+
|
|
61
|
+
def get_tools(self) -> list[dict[str, Any]]:
|
|
62
|
+
"""Return list of available MCP tools."""
|
|
63
|
+
return [
|
|
64
|
+
{
|
|
65
|
+
"name": "nmem_remember",
|
|
66
|
+
"description": "Store a memory in NeuralMemory. Use this to remember facts, decisions, insights, todos, errors, and other information that should persist across sessions.",
|
|
67
|
+
"inputSchema": {
|
|
68
|
+
"type": "object",
|
|
69
|
+
"properties": {
|
|
70
|
+
"content": {"type": "string", "description": "The content to remember"},
|
|
71
|
+
"type": {
|
|
72
|
+
"type": "string",
|
|
73
|
+
"enum": [
|
|
74
|
+
"fact",
|
|
75
|
+
"decision",
|
|
76
|
+
"preference",
|
|
77
|
+
"todo",
|
|
78
|
+
"insight",
|
|
79
|
+
"context",
|
|
80
|
+
"instruction",
|
|
81
|
+
"error",
|
|
82
|
+
"workflow",
|
|
83
|
+
"reference",
|
|
84
|
+
],
|
|
85
|
+
"description": "Memory type (auto-detected if not specified)",
|
|
86
|
+
},
|
|
87
|
+
"priority": {
|
|
88
|
+
"type": "integer",
|
|
89
|
+
"minimum": 0,
|
|
90
|
+
"maximum": 10,
|
|
91
|
+
"description": "Priority 0-10 (5=normal, 10=critical)",
|
|
92
|
+
},
|
|
93
|
+
"tags": {
|
|
94
|
+
"type": "array",
|
|
95
|
+
"items": {"type": "string"},
|
|
96
|
+
"description": "Tags for categorization",
|
|
97
|
+
},
|
|
98
|
+
"expires_days": {
|
|
99
|
+
"type": "integer",
|
|
100
|
+
"description": "Days until memory expires",
|
|
101
|
+
},
|
|
102
|
+
},
|
|
103
|
+
"required": ["content"],
|
|
104
|
+
},
|
|
105
|
+
},
|
|
106
|
+
{
|
|
107
|
+
"name": "nmem_recall",
|
|
108
|
+
"description": "Query memories from NeuralMemory. Use this to recall past information, decisions, patterns, or context relevant to the current task.",
|
|
109
|
+
"inputSchema": {
|
|
110
|
+
"type": "object",
|
|
111
|
+
"properties": {
|
|
112
|
+
"query": {"type": "string", "description": "The query to search memories"},
|
|
113
|
+
"depth": {
|
|
114
|
+
"type": "integer",
|
|
115
|
+
"minimum": 0,
|
|
116
|
+
"maximum": 3,
|
|
117
|
+
"description": "Search depth: 0=instant, 1=context, 2=habit, 3=deep",
|
|
118
|
+
},
|
|
119
|
+
"max_tokens": {
|
|
120
|
+
"type": "integer",
|
|
121
|
+
"description": "Maximum tokens in response (default: 500)",
|
|
122
|
+
},
|
|
123
|
+
"min_confidence": {
|
|
124
|
+
"type": "number",
|
|
125
|
+
"minimum": 0,
|
|
126
|
+
"maximum": 1,
|
|
127
|
+
"description": "Minimum confidence threshold",
|
|
128
|
+
},
|
|
129
|
+
},
|
|
130
|
+
"required": ["query"],
|
|
131
|
+
},
|
|
132
|
+
},
|
|
133
|
+
{
|
|
134
|
+
"name": "nmem_context",
|
|
135
|
+
"description": "Get recent context from NeuralMemory. Use this at the start of tasks to inject relevant recent memories.",
|
|
136
|
+
"inputSchema": {
|
|
137
|
+
"type": "object",
|
|
138
|
+
"properties": {
|
|
139
|
+
"limit": {
|
|
140
|
+
"type": "integer",
|
|
141
|
+
"description": "Number of recent memories (default: 10)",
|
|
142
|
+
},
|
|
143
|
+
"fresh_only": {
|
|
144
|
+
"type": "boolean",
|
|
145
|
+
"description": "Only include memories < 30 days old",
|
|
146
|
+
},
|
|
147
|
+
},
|
|
148
|
+
},
|
|
149
|
+
},
|
|
150
|
+
{
|
|
151
|
+
"name": "nmem_todo",
|
|
152
|
+
"description": "Quick shortcut to add a TODO memory with 30-day expiry.",
|
|
153
|
+
"inputSchema": {
|
|
154
|
+
"type": "object",
|
|
155
|
+
"properties": {
|
|
156
|
+
"task": {"type": "string", "description": "The task to remember"},
|
|
157
|
+
"priority": {
|
|
158
|
+
"type": "integer",
|
|
159
|
+
"minimum": 0,
|
|
160
|
+
"maximum": 10,
|
|
161
|
+
"description": "Priority 0-10 (default: 5)",
|
|
162
|
+
},
|
|
163
|
+
},
|
|
164
|
+
"required": ["task"],
|
|
165
|
+
},
|
|
166
|
+
},
|
|
167
|
+
{
|
|
168
|
+
"name": "nmem_stats",
|
|
169
|
+
"description": "Get brain statistics including memory counts and freshness.",
|
|
170
|
+
"inputSchema": {"type": "object", "properties": {}},
|
|
171
|
+
},
|
|
172
|
+
{
|
|
173
|
+
"name": "nmem_auto",
|
|
174
|
+
"description": "Control auto-capture settings and analyze text for auto-save. When enabled, NeuralMemory automatically detects and saves decisions, errors, todos, and important facts from conversations.",
|
|
175
|
+
"inputSchema": {
|
|
176
|
+
"type": "object",
|
|
177
|
+
"properties": {
|
|
178
|
+
"action": {
|
|
179
|
+
"type": "string",
|
|
180
|
+
"enum": ["status", "enable", "disable", "analyze"],
|
|
181
|
+
"description": "Action to perform",
|
|
182
|
+
},
|
|
183
|
+
"text": {
|
|
184
|
+
"type": "string",
|
|
185
|
+
"description": "Text to analyze for auto-capture (required for 'analyze' action)",
|
|
186
|
+
},
|
|
187
|
+
"save": {
|
|
188
|
+
"type": "boolean",
|
|
189
|
+
"description": "If true, save detected memories (default: false for analyze)",
|
|
190
|
+
},
|
|
191
|
+
},
|
|
192
|
+
"required": ["action"],
|
|
193
|
+
},
|
|
194
|
+
},
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
async def call_tool(self, name: str, arguments: dict[str, Any]) -> dict[str, Any]:
|
|
198
|
+
"""Execute an MCP tool call."""
|
|
199
|
+
if name == "nmem_remember":
|
|
200
|
+
return await self._remember(arguments)
|
|
201
|
+
elif name == "nmem_recall":
|
|
202
|
+
return await self._recall(arguments)
|
|
203
|
+
elif name == "nmem_context":
|
|
204
|
+
return await self._context(arguments)
|
|
205
|
+
elif name == "nmem_todo":
|
|
206
|
+
return await self._todo(arguments)
|
|
207
|
+
elif name == "nmem_stats":
|
|
208
|
+
return await self._stats(arguments)
|
|
209
|
+
elif name == "nmem_auto":
|
|
210
|
+
return await self._auto(arguments)
|
|
211
|
+
else:
|
|
212
|
+
return {"error": f"Unknown tool: {name}"}
|
|
213
|
+
|
|
214
|
+
async def _remember(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
215
|
+
"""Store a memory."""
|
|
216
|
+
storage = await self.get_storage()
|
|
217
|
+
brain = await storage.get_brain(storage._current_brain_id)
|
|
218
|
+
if not brain:
|
|
219
|
+
return {"error": "No brain configured"}
|
|
220
|
+
|
|
221
|
+
content = args["content"]
|
|
222
|
+
|
|
223
|
+
# Determine memory type
|
|
224
|
+
if "type" in args:
|
|
225
|
+
mem_type = MemoryType(args["type"])
|
|
226
|
+
else:
|
|
227
|
+
mem_type = suggest_memory_type(content)
|
|
228
|
+
|
|
229
|
+
# Determine priority
|
|
230
|
+
priority = Priority.from_int(args.get("priority", 5))
|
|
231
|
+
|
|
232
|
+
# Encode memory
|
|
233
|
+
encoder = MemoryEncoder(storage, brain.config)
|
|
234
|
+
storage.disable_auto_save()
|
|
235
|
+
|
|
236
|
+
tags = set(args.get("tags", []))
|
|
237
|
+
result = await encoder.encode(
|
|
238
|
+
content=content,
|
|
239
|
+
timestamp=datetime.now(),
|
|
240
|
+
tags=tags if tags else None,
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
# Create typed memory
|
|
244
|
+
typed_mem = TypedMemory.create(
|
|
245
|
+
fiber_id=result.fiber.id,
|
|
246
|
+
memory_type=mem_type,
|
|
247
|
+
priority=priority,
|
|
248
|
+
source="mcp_tool",
|
|
249
|
+
expires_in_days=args.get("expires_days"),
|
|
250
|
+
tags=tags if tags else None,
|
|
251
|
+
)
|
|
252
|
+
await storage.add_typed_memory(typed_mem)
|
|
253
|
+
await storage.batch_save()
|
|
254
|
+
|
|
255
|
+
return {
|
|
256
|
+
"success": True,
|
|
257
|
+
"fiber_id": result.fiber.id,
|
|
258
|
+
"memory_type": mem_type.value,
|
|
259
|
+
"neurons_created": len(result.neurons_created),
|
|
260
|
+
"message": f"Remembered: {content[:50]}{'...' if len(content) > 50 else ''}",
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
async def _recall(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
264
|
+
"""Query memories."""
|
|
265
|
+
storage = await self.get_storage()
|
|
266
|
+
brain = await storage.get_brain(storage._current_brain_id)
|
|
267
|
+
if not brain:
|
|
268
|
+
return {"error": "No brain configured"}
|
|
269
|
+
|
|
270
|
+
query = args["query"]
|
|
271
|
+
depth = DepthLevel(args.get("depth", 1))
|
|
272
|
+
max_tokens = args.get("max_tokens", 500)
|
|
273
|
+
min_confidence = args.get("min_confidence", 0.0)
|
|
274
|
+
|
|
275
|
+
pipeline = ReflexPipeline(storage, brain.config)
|
|
276
|
+
result = await pipeline.query(
|
|
277
|
+
query=query,
|
|
278
|
+
depth=depth,
|
|
279
|
+
max_tokens=max_tokens,
|
|
280
|
+
reference_time=datetime.now(),
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
if result.confidence < min_confidence:
|
|
284
|
+
return {
|
|
285
|
+
"answer": None,
|
|
286
|
+
"message": f"No memories found with confidence >= {min_confidence}",
|
|
287
|
+
"confidence": result.confidence,
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
return {
|
|
291
|
+
"answer": result.context or "No relevant memories found.",
|
|
292
|
+
"confidence": result.confidence,
|
|
293
|
+
"neurons_activated": result.neurons_activated,
|
|
294
|
+
"fibers_matched": result.fibers_matched,
|
|
295
|
+
"depth_used": result.depth_used.value,
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
async def _context(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
299
|
+
"""Get recent context."""
|
|
300
|
+
storage = await self.get_storage()
|
|
301
|
+
|
|
302
|
+
limit = args.get("limit", 10)
|
|
303
|
+
fresh_only = args.get("fresh_only", False)
|
|
304
|
+
|
|
305
|
+
fibers = await storage.get_fibers(limit=limit * 2 if fresh_only else limit)
|
|
306
|
+
|
|
307
|
+
if not fibers:
|
|
308
|
+
return {"context": "No memories stored yet.", "count": 0}
|
|
309
|
+
|
|
310
|
+
# Filter by freshness if requested
|
|
311
|
+
if fresh_only:
|
|
312
|
+
from neural_memory.safety.freshness import FreshnessLevel, evaluate_freshness
|
|
313
|
+
|
|
314
|
+
now = datetime.now()
|
|
315
|
+
fresh_fibers = []
|
|
316
|
+
for fiber in fibers:
|
|
317
|
+
freshness = evaluate_freshness(fiber.created_at, now)
|
|
318
|
+
if freshness.level in (FreshnessLevel.FRESH, FreshnessLevel.RECENT):
|
|
319
|
+
fresh_fibers.append(fiber)
|
|
320
|
+
fibers = fresh_fibers[:limit]
|
|
321
|
+
|
|
322
|
+
# Build context
|
|
323
|
+
context_parts = []
|
|
324
|
+
for fiber in fibers:
|
|
325
|
+
content = fiber.summary
|
|
326
|
+
if not content and fiber.anchor_neuron_id:
|
|
327
|
+
anchor = await storage.get_neuron(fiber.anchor_neuron_id)
|
|
328
|
+
if anchor:
|
|
329
|
+
content = anchor.content
|
|
330
|
+
if content:
|
|
331
|
+
context_parts.append(f"- {content}")
|
|
332
|
+
|
|
333
|
+
return {
|
|
334
|
+
"context": "\n".join(context_parts) if context_parts else "No context available.",
|
|
335
|
+
"count": len(context_parts),
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
async def _todo(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
339
|
+
"""Add a TODO."""
|
|
340
|
+
return await self._remember(
|
|
341
|
+
{
|
|
342
|
+
"content": args["task"],
|
|
343
|
+
"type": "todo",
|
|
344
|
+
"priority": args.get("priority", 5),
|
|
345
|
+
"expires_days": 30,
|
|
346
|
+
}
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
async def _stats(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
350
|
+
"""Get brain statistics."""
|
|
351
|
+
storage = await self.get_storage()
|
|
352
|
+
brain = await storage.get_brain(storage._current_brain_id)
|
|
353
|
+
if not brain:
|
|
354
|
+
return {"error": "No brain configured"}
|
|
355
|
+
|
|
356
|
+
stats = await storage.get_stats(brain.id)
|
|
357
|
+
|
|
358
|
+
return {
|
|
359
|
+
"brain": brain.name,
|
|
360
|
+
"neuron_count": stats["neuron_count"],
|
|
361
|
+
"synapse_count": stats["synapse_count"],
|
|
362
|
+
"fiber_count": stats["fiber_count"],
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
async def _auto(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
366
|
+
"""Handle auto-capture settings and analysis."""
|
|
367
|
+
action = args.get("action", "status")
|
|
368
|
+
|
|
369
|
+
if action == "status":
|
|
370
|
+
return {
|
|
371
|
+
"enabled": self.config.auto.enabled,
|
|
372
|
+
"capture_decisions": self.config.auto.capture_decisions,
|
|
373
|
+
"capture_errors": self.config.auto.capture_errors,
|
|
374
|
+
"capture_todos": self.config.auto.capture_todos,
|
|
375
|
+
"capture_facts": self.config.auto.capture_facts,
|
|
376
|
+
"min_confidence": self.config.auto.min_confidence,
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
elif action == "enable":
|
|
380
|
+
self.config.auto.enabled = True
|
|
381
|
+
self.config.save()
|
|
382
|
+
return {"enabled": True, "message": "Auto-capture enabled"}
|
|
383
|
+
|
|
384
|
+
elif action == "disable":
|
|
385
|
+
self.config.auto.enabled = False
|
|
386
|
+
self.config.save()
|
|
387
|
+
return {"enabled": False, "message": "Auto-capture disabled"}
|
|
388
|
+
|
|
389
|
+
elif action == "analyze":
|
|
390
|
+
text = args.get("text", "")
|
|
391
|
+
if not text:
|
|
392
|
+
return {"error": "Text required for analyze action"}
|
|
393
|
+
|
|
394
|
+
# Analyze text for potential memories
|
|
395
|
+
detected = self._analyze_text_for_memories(text)
|
|
396
|
+
|
|
397
|
+
if not detected:
|
|
398
|
+
return {"detected": [], "message": "No memorable content detected"}
|
|
399
|
+
|
|
400
|
+
# Optionally save detected memories
|
|
401
|
+
if args.get("save", False) and self.config.auto.enabled:
|
|
402
|
+
saved = []
|
|
403
|
+
for item in detected:
|
|
404
|
+
if item["confidence"] >= self.config.auto.min_confidence:
|
|
405
|
+
result = await self._remember(
|
|
406
|
+
{
|
|
407
|
+
"content": item["content"],
|
|
408
|
+
"type": item["type"],
|
|
409
|
+
"priority": item.get("priority", 5),
|
|
410
|
+
}
|
|
411
|
+
)
|
|
412
|
+
if "error" not in result:
|
|
413
|
+
saved.append(item["content"][:50])
|
|
414
|
+
return {
|
|
415
|
+
"detected": detected,
|
|
416
|
+
"saved": saved,
|
|
417
|
+
"message": f"Analyzed and saved {len(saved)} memories",
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
return {
|
|
421
|
+
"detected": detected,
|
|
422
|
+
"message": f"Detected {len(detected)} potential memories (not saved)",
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
return {"error": f"Unknown action: {action}"}
|
|
426
|
+
|
|
427
|
+
def _analyze_text_for_memories(self, text: str) -> list[dict[str, Any]]:
|
|
428
|
+
"""Analyze text and detect potential memories.
|
|
429
|
+
|
|
430
|
+
Returns list of detected memories with type, content, and confidence.
|
|
431
|
+
"""
|
|
432
|
+
import re
|
|
433
|
+
|
|
434
|
+
detected: list[dict[str, Any]] = []
|
|
435
|
+
text_lower = text.lower()
|
|
436
|
+
|
|
437
|
+
# Decision patterns
|
|
438
|
+
decision_patterns = [
|
|
439
|
+
r"(?:we |I )(?:decided|chose|selected|picked|opted)(?: to)?[:\s]+(.+?)(?:\.|$)",
|
|
440
|
+
r"(?:the )?decision(?: is)?[:\s]+(.+?)(?:\.|$)",
|
|
441
|
+
r"(?:we\'re |I\'m )going (?:to|with)[:\s]+(.+?)(?:\.|$)",
|
|
442
|
+
r"let\'s (?:go with|use|choose)[:\s]+(.+?)(?:\.|$)",
|
|
443
|
+
]
|
|
444
|
+
if self.config.auto.capture_decisions:
|
|
445
|
+
for pattern in decision_patterns:
|
|
446
|
+
matches = re.findall(pattern, text_lower, re.IGNORECASE)
|
|
447
|
+
for match in matches:
|
|
448
|
+
if len(match) > 10:
|
|
449
|
+
detected.append(
|
|
450
|
+
{
|
|
451
|
+
"type": "decision",
|
|
452
|
+
"content": f"Decision: {match.strip()}",
|
|
453
|
+
"confidence": 0.8,
|
|
454
|
+
"priority": 6,
|
|
455
|
+
}
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
# Error patterns
|
|
459
|
+
error_patterns = [
|
|
460
|
+
r"error[:\s]+(.+?)(?:\.|$)",
|
|
461
|
+
r"failed[:\s]+(.+?)(?:\.|$)",
|
|
462
|
+
r"bug[:\s]+(.+?)(?:\.|$)",
|
|
463
|
+
r"(?:the )?issue (?:is|was)[:\s]+(.+?)(?:\.|$)",
|
|
464
|
+
r"problem[:\s]+(.+?)(?:\.|$)",
|
|
465
|
+
]
|
|
466
|
+
if self.config.auto.capture_errors:
|
|
467
|
+
for pattern in error_patterns:
|
|
468
|
+
matches = re.findall(pattern, text_lower, re.IGNORECASE)
|
|
469
|
+
for match in matches:
|
|
470
|
+
if len(match) > 10:
|
|
471
|
+
detected.append(
|
|
472
|
+
{
|
|
473
|
+
"type": "error",
|
|
474
|
+
"content": f"Error: {match.strip()}",
|
|
475
|
+
"confidence": 0.85,
|
|
476
|
+
"priority": 7,
|
|
477
|
+
}
|
|
478
|
+
)
|
|
479
|
+
|
|
480
|
+
# TODO patterns
|
|
481
|
+
todo_patterns = [
|
|
482
|
+
r"(?:TODO|FIXME|HACK|XXX)[:\s]+(.+?)(?:\.|$)",
|
|
483
|
+
r"(?:we |I )?(?:need to|should|must|have to)[:\s]+(.+?)(?:\.|$)",
|
|
484
|
+
r"(?:remember to|don\'t forget to)[:\s]+(.+?)(?:\.|$)",
|
|
485
|
+
r"(?:later|next)[:\s]+(.+?)(?:\.|$)",
|
|
486
|
+
]
|
|
487
|
+
if self.config.auto.capture_todos:
|
|
488
|
+
for pattern in todo_patterns:
|
|
489
|
+
matches = re.findall(pattern, text, re.IGNORECASE)
|
|
490
|
+
for match in matches:
|
|
491
|
+
if len(match) > 5:
|
|
492
|
+
detected.append(
|
|
493
|
+
{
|
|
494
|
+
"type": "todo",
|
|
495
|
+
"content": f"TODO: {match.strip()}",
|
|
496
|
+
"confidence": 0.75,
|
|
497
|
+
"priority": 5,
|
|
498
|
+
}
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
# Fact patterns (more conservative)
|
|
502
|
+
fact_patterns = [
|
|
503
|
+
r"(?:the |a )?(?:answer|solution|fix) (?:is|was)[:\s]+(.+?)(?:\.|$)",
|
|
504
|
+
r"(?:it |this )(?:works|worked) because[:\s]+(.+?)(?:\.|$)",
|
|
505
|
+
r"(?:the )?(?:key|important|note)[:\s]+(.+?)(?:\.|$)",
|
|
506
|
+
r"(?:learned|discovered|found out)[:\s]+(.+?)(?:\.|$)",
|
|
507
|
+
]
|
|
508
|
+
if self.config.auto.capture_facts:
|
|
509
|
+
for pattern in fact_patterns:
|
|
510
|
+
matches = re.findall(pattern, text_lower, re.IGNORECASE)
|
|
511
|
+
for match in matches:
|
|
512
|
+
if len(match) > 15:
|
|
513
|
+
detected.append(
|
|
514
|
+
{
|
|
515
|
+
"type": "fact",
|
|
516
|
+
"content": match.strip(),
|
|
517
|
+
"confidence": 0.7,
|
|
518
|
+
"priority": 5,
|
|
519
|
+
}
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
# Remove duplicates
|
|
523
|
+
seen = set()
|
|
524
|
+
unique_detected = []
|
|
525
|
+
for item in detected:
|
|
526
|
+
content_key = item["content"][:50].lower()
|
|
527
|
+
if content_key not in seen:
|
|
528
|
+
seen.add(content_key)
|
|
529
|
+
unique_detected.append(item)
|
|
530
|
+
|
|
531
|
+
return unique_detected
|
|
532
|
+
|
|
533
|
+
|
|
534
|
+
def create_mcp_server() -> MCPServer:
|
|
535
|
+
"""Create an MCP server instance."""
|
|
536
|
+
return MCPServer()
|
|
537
|
+
|
|
538
|
+
|
|
539
|
+
async def handle_message(server: MCPServer, message: dict[str, Any]) -> dict[str, Any]:
|
|
540
|
+
"""Handle a single MCP message."""
|
|
541
|
+
method = message.get("method", "")
|
|
542
|
+
msg_id = message.get("id")
|
|
543
|
+
params = message.get("params", {})
|
|
544
|
+
|
|
545
|
+
if method == "initialize":
|
|
546
|
+
return {
|
|
547
|
+
"jsonrpc": "2.0",
|
|
548
|
+
"id": msg_id,
|
|
549
|
+
"result": {
|
|
550
|
+
"protocolVersion": "2024-11-05",
|
|
551
|
+
"serverInfo": {"name": "neural-memory", "version": "0.1.0"},
|
|
552
|
+
"capabilities": {"tools": {}},
|
|
553
|
+
},
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
elif method == "tools/list":
|
|
557
|
+
return {"jsonrpc": "2.0", "id": msg_id, "result": {"tools": server.get_tools()}}
|
|
558
|
+
|
|
559
|
+
elif method == "tools/call":
|
|
560
|
+
tool_name = params.get("name", "")
|
|
561
|
+
tool_args = params.get("arguments", {})
|
|
562
|
+
|
|
563
|
+
try:
|
|
564
|
+
result = await server.call_tool(tool_name, tool_args)
|
|
565
|
+
return {
|
|
566
|
+
"jsonrpc": "2.0",
|
|
567
|
+
"id": msg_id,
|
|
568
|
+
"result": {"content": [{"type": "text", "text": json.dumps(result, indent=2)}]},
|
|
569
|
+
}
|
|
570
|
+
except Exception as e:
|
|
571
|
+
return {"jsonrpc": "2.0", "id": msg_id, "error": {"code": -32000, "message": str(e)}}
|
|
572
|
+
|
|
573
|
+
elif method == "notifications/initialized":
|
|
574
|
+
# No response needed for notifications
|
|
575
|
+
return None # type: ignore
|
|
576
|
+
|
|
577
|
+
else:
|
|
578
|
+
return {
|
|
579
|
+
"jsonrpc": "2.0",
|
|
580
|
+
"id": msg_id,
|
|
581
|
+
"error": {"code": -32601, "message": f"Method not found: {method}"},
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
async def run_mcp_server() -> None:
|
|
586
|
+
"""Run the MCP server over stdio."""
|
|
587
|
+
server = create_mcp_server()
|
|
588
|
+
|
|
589
|
+
# Read from stdin, write to stdout
|
|
590
|
+
while True:
|
|
591
|
+
try:
|
|
592
|
+
line = await asyncio.get_event_loop().run_in_executor(None, sys.stdin.readline)
|
|
593
|
+
|
|
594
|
+
if not line:
|
|
595
|
+
break
|
|
596
|
+
|
|
597
|
+
line = line.strip()
|
|
598
|
+
if not line:
|
|
599
|
+
continue
|
|
600
|
+
|
|
601
|
+
message = json.loads(line)
|
|
602
|
+
response = await handle_message(server, message)
|
|
603
|
+
|
|
604
|
+
if response is not None:
|
|
605
|
+
print(json.dumps(response), flush=True)
|
|
606
|
+
|
|
607
|
+
except json.JSONDecodeError:
|
|
608
|
+
continue
|
|
609
|
+
except EOFError:
|
|
610
|
+
break
|
|
611
|
+
except KeyboardInterrupt:
|
|
612
|
+
break
|
|
613
|
+
|
|
614
|
+
|
|
615
|
+
def main() -> None:
|
|
616
|
+
"""Main entry point for MCP server."""
|
|
617
|
+
asyncio.run(run_mcp_server())
|
|
618
|
+
|
|
619
|
+
|
|
620
|
+
if __name__ == "__main__":
|
|
621
|
+
main()
|
neural_memory/py.typed
ADDED
|
File without changes
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""Safety utilities for Neural Memory.
|
|
2
|
+
|
|
3
|
+
This module provides tools for:
|
|
4
|
+
- Sensitive content detection
|
|
5
|
+
- Memory freshness evaluation
|
|
6
|
+
- Privacy protection
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from neural_memory.safety.freshness import (
|
|
10
|
+
FreshnessLevel,
|
|
11
|
+
evaluate_freshness,
|
|
12
|
+
get_freshness_warning,
|
|
13
|
+
)
|
|
14
|
+
from neural_memory.safety.sensitive import (
|
|
15
|
+
SensitiveMatch,
|
|
16
|
+
SensitivePattern,
|
|
17
|
+
check_sensitive_content,
|
|
18
|
+
filter_sensitive_content,
|
|
19
|
+
get_default_patterns,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
__all__ = [
|
|
23
|
+
"FreshnessLevel",
|
|
24
|
+
"SensitiveMatch",
|
|
25
|
+
"SensitivePattern",
|
|
26
|
+
"check_sensitive_content",
|
|
27
|
+
"evaluate_freshness",
|
|
28
|
+
"filter_sensitive_content",
|
|
29
|
+
"get_default_patterns",
|
|
30
|
+
"get_freshness_warning",
|
|
31
|
+
]
|