neural-memory 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neural_memory/__init__.py +1 -1
- neural_memory/cli/main.py +83 -0
- neural_memory/mcp/prompt.py +138 -0
- neural_memory/mcp/server.py +94 -21
- {neural_memory-0.1.0.dist-info → neural_memory-0.2.0.dist-info}/METADATA +1 -1
- {neural_memory-0.1.0.dist-info → neural_memory-0.2.0.dist-info}/RECORD +9 -8
- {neural_memory-0.1.0.dist-info → neural_memory-0.2.0.dist-info}/WHEEL +0 -0
- {neural_memory-0.1.0.dist-info → neural_memory-0.2.0.dist-info}/entry_points.txt +0 -0
- {neural_memory-0.1.0.dist-info → neural_memory-0.2.0.dist-info}/licenses/LICENSE +0 -0
neural_memory/__init__.py
CHANGED
|
@@ -13,7 +13,7 @@ from neural_memory.core.synapse import Direction, Synapse, SynapseType
|
|
|
13
13
|
from neural_memory.engine.encoder import EncodingResult, MemoryEncoder
|
|
14
14
|
from neural_memory.engine.retrieval import DepthLevel, ReflexPipeline, RetrievalResult
|
|
15
15
|
|
|
16
|
-
__version__ = "0.
|
|
16
|
+
__version__ = "0.2.0"
|
|
17
17
|
|
|
18
18
|
__all__ = [
|
|
19
19
|
"__version__",
|
neural_memory/cli/main.py
CHANGED
|
@@ -2685,6 +2685,89 @@ def show_today() -> None:
|
|
|
2685
2685
|
asyncio.run(_today())
|
|
2686
2686
|
|
|
2687
2687
|
|
|
2688
|
+
@app.command(name="mcp-config")
|
|
2689
|
+
def mcp_config(
|
|
2690
|
+
with_prompt: Annotated[
|
|
2691
|
+
bool, typer.Option("--with-prompt", "-p", help="Include system prompt in config")
|
|
2692
|
+
] = False,
|
|
2693
|
+
compact_prompt: Annotated[
|
|
2694
|
+
bool, typer.Option("--compact", "-c", help="Use compact prompt (if --with-prompt)")
|
|
2695
|
+
] = False,
|
|
2696
|
+
) -> None:
|
|
2697
|
+
"""Generate MCP server configuration for Claude Code/Cursor.
|
|
2698
|
+
|
|
2699
|
+
Outputs JSON configuration that can be added to your MCP settings.
|
|
2700
|
+
|
|
2701
|
+
Examples:
|
|
2702
|
+
nmem mcp-config # Basic config
|
|
2703
|
+
nmem mcp-config --with-prompt # Include system prompt
|
|
2704
|
+
nmem mcp-config -p -c # Include compact prompt
|
|
2705
|
+
"""
|
|
2706
|
+
import shutil
|
|
2707
|
+
import sys
|
|
2708
|
+
|
|
2709
|
+
from neural_memory.mcp.prompt import get_system_prompt
|
|
2710
|
+
|
|
2711
|
+
# Find nmem executable path
|
|
2712
|
+
nmem_path = shutil.which("nmem") or shutil.which("nmem-mcp") or sys.executable
|
|
2713
|
+
|
|
2714
|
+
config = {
|
|
2715
|
+
"neural-memory": {
|
|
2716
|
+
"command": nmem_path if "python" not in nmem_path.lower() else "python",
|
|
2717
|
+
"args": ["-m", "neural_memory.mcp"] if "python" in nmem_path.lower() else ["mcp"],
|
|
2718
|
+
}
|
|
2719
|
+
}
|
|
2720
|
+
|
|
2721
|
+
# Simplify if nmem is available
|
|
2722
|
+
if shutil.which("nmem-mcp"):
|
|
2723
|
+
config["neural-memory"] = {"command": "nmem-mcp", "args": []}
|
|
2724
|
+
|
|
2725
|
+
typer.echo("Add this to your MCP configuration:\n")
|
|
2726
|
+
typer.echo(json.dumps(config, indent=2))
|
|
2727
|
+
|
|
2728
|
+
if with_prompt:
|
|
2729
|
+
typer.echo("\n" + "=" * 60)
|
|
2730
|
+
typer.echo("System prompt to add to your AI assistant:\n")
|
|
2731
|
+
typer.echo(get_system_prompt(compact=compact_prompt))
|
|
2732
|
+
|
|
2733
|
+
|
|
2734
|
+
@app.command()
|
|
2735
|
+
def prompt(
|
|
2736
|
+
compact: Annotated[
|
|
2737
|
+
bool, typer.Option("--compact", "-c", help="Show compact version")
|
|
2738
|
+
] = False,
|
|
2739
|
+
copy: Annotated[
|
|
2740
|
+
bool, typer.Option("--copy", help="Copy to clipboard (requires pyperclip)")
|
|
2741
|
+
] = False,
|
|
2742
|
+
) -> None:
|
|
2743
|
+
"""Show system prompt for AI tools.
|
|
2744
|
+
|
|
2745
|
+
This prompt instructs AI assistants (Claude, GPT, etc.) on when and how
|
|
2746
|
+
to use NeuralMemory for persistent memory across sessions.
|
|
2747
|
+
|
|
2748
|
+
Examples:
|
|
2749
|
+
nmem prompt # Show full prompt
|
|
2750
|
+
nmem prompt --compact # Show shorter version
|
|
2751
|
+
nmem prompt --copy # Copy to clipboard
|
|
2752
|
+
"""
|
|
2753
|
+
from neural_memory.mcp.prompt import get_system_prompt
|
|
2754
|
+
|
|
2755
|
+
text = get_system_prompt(compact=compact)
|
|
2756
|
+
|
|
2757
|
+
if copy:
|
|
2758
|
+
try:
|
|
2759
|
+
import pyperclip
|
|
2760
|
+
|
|
2761
|
+
pyperclip.copy(text)
|
|
2762
|
+
typer.echo("System prompt copied to clipboard!")
|
|
2763
|
+
except ImportError:
|
|
2764
|
+
typer.echo("Install pyperclip for clipboard support: pip install pyperclip")
|
|
2765
|
+
typer.echo("")
|
|
2766
|
+
typer.echo(text)
|
|
2767
|
+
else:
|
|
2768
|
+
typer.echo(text)
|
|
2769
|
+
|
|
2770
|
+
|
|
2688
2771
|
@app.command()
|
|
2689
2772
|
def version() -> None:
|
|
2690
2773
|
"""Show version information."""
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
"""System prompt for AI tools using NeuralMemory.
|
|
2
|
+
|
|
3
|
+
This prompt instructs AI assistants on when and how to use NeuralMemory
|
|
4
|
+
for persistent memory across sessions.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
SYSTEM_PROMPT = """# NeuralMemory - Persistent Memory System
|
|
8
|
+
|
|
9
|
+
You have access to NeuralMemory, a persistent memory system that survives across sessions.
|
|
10
|
+
Use it to remember important information and recall past context.
|
|
11
|
+
|
|
12
|
+
## When to REMEMBER (nmem_remember)
|
|
13
|
+
|
|
14
|
+
Automatically save these to memory:
|
|
15
|
+
- **Decisions**: "We decided to use PostgreSQL" -> remember as decision
|
|
16
|
+
- **User preferences**: "I prefer dark mode" -> remember as preference
|
|
17
|
+
- **Project context**: "This is a React app using TypeScript" -> remember as context
|
|
18
|
+
- **Important facts**: "The API key is stored in .env" -> remember as fact
|
|
19
|
+
- **Errors & solutions**: "Fixed by adding await" -> remember as error
|
|
20
|
+
- **TODOs**: "Need to add tests later" -> remember as todo
|
|
21
|
+
- **Workflows**: "Deploy process: build -> test -> push" -> remember as workflow
|
|
22
|
+
|
|
23
|
+
## When to RECALL (nmem_recall)
|
|
24
|
+
|
|
25
|
+
Query memory when:
|
|
26
|
+
- Starting a new session on an existing project
|
|
27
|
+
- User asks about past decisions or context
|
|
28
|
+
- You need information from previous conversations
|
|
29
|
+
- Before making decisions that might conflict with past choices
|
|
30
|
+
|
|
31
|
+
## When to get CONTEXT (nmem_context)
|
|
32
|
+
|
|
33
|
+
Use at session start to:
|
|
34
|
+
- Load recent memories relevant to current task
|
|
35
|
+
- Understand project state from previous sessions
|
|
36
|
+
- Avoid asking questions already answered before
|
|
37
|
+
|
|
38
|
+
## Auto-Capture (nmem_auto)
|
|
39
|
+
|
|
40
|
+
After important conversations, call nmem_auto to automatically capture memories:
|
|
41
|
+
|
|
42
|
+
```
|
|
43
|
+
# Simple: process and save in one call
|
|
44
|
+
nmem_auto(action="process", text="<conversation or response text>")
|
|
45
|
+
|
|
46
|
+
# Preview first: see what would be captured
|
|
47
|
+
nmem_auto(action="analyze", text="<text>")
|
|
48
|
+
|
|
49
|
+
# Force save (even if auto-capture disabled)
|
|
50
|
+
nmem_auto(action="analyze", text="<text>", save=true)
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
Auto-capture detects:
|
|
54
|
+
- **Decisions**: "We decided...", "Let's use...", "Going with..."
|
|
55
|
+
- **Errors**: "Error:", "The issue was...", "Bug:", "Failed to..."
|
|
56
|
+
- **TODOs**: "TODO:", "Need to...", "Remember to...", "Later:"
|
|
57
|
+
- **Facts**: "The solution is...", "It works because...", "Learned that..."
|
|
58
|
+
|
|
59
|
+
**When to call nmem_auto(action="process")**:
|
|
60
|
+
- After making important decisions
|
|
61
|
+
- After solving bugs or errors
|
|
62
|
+
- After learning something new about the project
|
|
63
|
+
- At the end of a productive session
|
|
64
|
+
|
|
65
|
+
## Best Practices
|
|
66
|
+
|
|
67
|
+
1. **Be proactive**: Don't wait for user to ask - remember important info automatically
|
|
68
|
+
2. **Be concise**: Store essence, not full conversations
|
|
69
|
+
3. **Use types**: Categorize memories (fact/decision/todo/error/etc.)
|
|
70
|
+
4. **Set priority**: Critical info = high priority (7-10), routine = normal (5)
|
|
71
|
+
5. **Add tags**: Help organize memories by project/topic
|
|
72
|
+
6. **Check first**: Recall before asking questions user may have answered before
|
|
73
|
+
|
|
74
|
+
## Examples
|
|
75
|
+
|
|
76
|
+
```
|
|
77
|
+
# User mentions a preference
|
|
78
|
+
User: "I always use 4-space indentation"
|
|
79
|
+
-> nmem_remember(content="User prefers 4-space indentation", type="preference", priority=6)
|
|
80
|
+
|
|
81
|
+
# Starting work on existing project
|
|
82
|
+
-> nmem_context(limit=10, fresh_only=true)
|
|
83
|
+
-> nmem_recall(query="project setup and decisions")
|
|
84
|
+
|
|
85
|
+
# Made an important decision
|
|
86
|
+
"Let's use Redis for caching"
|
|
87
|
+
-> nmem_remember(content="Decision: Use Redis for caching", type="decision", priority=7)
|
|
88
|
+
|
|
89
|
+
# Found a bug fix
|
|
90
|
+
"The issue was missing await - fixed by adding await before fetch()"
|
|
91
|
+
-> nmem_remember(content="Bug fix: Missing await before fetch() caused race condition", type="error", priority=7)
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
## Memory Types
|
|
95
|
+
|
|
96
|
+
- `fact`: Objective information
|
|
97
|
+
- `decision`: Choices made
|
|
98
|
+
- `preference`: User preferences
|
|
99
|
+
- `todo`: Tasks to do
|
|
100
|
+
- `insight`: Learned patterns
|
|
101
|
+
- `context`: Project/session context
|
|
102
|
+
- `instruction`: User instructions
|
|
103
|
+
- `error`: Bugs and fixes
|
|
104
|
+
- `workflow`: Processes/procedures
|
|
105
|
+
- `reference`: Links/resources
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
COMPACT_PROMPT = """You have NeuralMemory for persistent memory across sessions.
|
|
109
|
+
|
|
110
|
+
**Remember** (nmem_remember): Save decisions, preferences, facts, errors, todos.
|
|
111
|
+
**Recall** (nmem_recall): Query past context before making decisions.
|
|
112
|
+
**Context** (nmem_context): Load recent memories at session start.
|
|
113
|
+
**Auto-capture** (nmem_auto): Call `nmem_auto(action="process", text="...")` after important conversations to auto-save decisions, errors, and todos.
|
|
114
|
+
|
|
115
|
+
Be proactive: remember important info without being asked. Use auto-capture after solving bugs or making decisions."""
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def get_system_prompt(compact: bool = False) -> str:
|
|
119
|
+
"""Get the system prompt for AI tools.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
compact: If True, return shorter version for limited context
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
System prompt string
|
|
126
|
+
"""
|
|
127
|
+
return COMPACT_PROMPT if compact else SYSTEM_PROMPT
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def get_prompt_for_mcp() -> dict:
|
|
131
|
+
"""Get prompt formatted for MCP resources."""
|
|
132
|
+
return {
|
|
133
|
+
"uri": "neuralmemory://prompt/system",
|
|
134
|
+
"name": "NeuralMemory System Prompt",
|
|
135
|
+
"description": "Instructions for AI assistants on using NeuralMemory",
|
|
136
|
+
"mimeType": "text/plain",
|
|
137
|
+
"text": SYSTEM_PROMPT,
|
|
138
|
+
}
|
neural_memory/mcp/server.py
CHANGED
|
@@ -34,6 +34,7 @@ from typing import TYPE_CHECKING, Any
|
|
|
34
34
|
from neural_memory.core.memory_types import MemoryType, Priority, TypedMemory, suggest_memory_type
|
|
35
35
|
from neural_memory.engine.encoder import MemoryEncoder
|
|
36
36
|
from neural_memory.engine.retrieval import DepthLevel, ReflexPipeline
|
|
37
|
+
from neural_memory.mcp.prompt import get_prompt_for_mcp, get_system_prompt
|
|
37
38
|
from neural_memory.unified_config import get_config, get_shared_storage
|
|
38
39
|
|
|
39
40
|
if TYPE_CHECKING:
|
|
@@ -58,6 +59,31 @@ class MCPServer:
|
|
|
58
59
|
self._storage = await get_shared_storage()
|
|
59
60
|
return self._storage
|
|
60
61
|
|
|
62
|
+
def get_resources(self) -> list[dict[str, Any]]:
|
|
63
|
+
"""Return list of available MCP resources."""
|
|
64
|
+
return [
|
|
65
|
+
{
|
|
66
|
+
"uri": "neuralmemory://prompt/system",
|
|
67
|
+
"name": "NeuralMemory System Prompt",
|
|
68
|
+
"description": "Instructions for AI on when/how to use NeuralMemory",
|
|
69
|
+
"mimeType": "text/plain",
|
|
70
|
+
},
|
|
71
|
+
{
|
|
72
|
+
"uri": "neuralmemory://prompt/compact",
|
|
73
|
+
"name": "NeuralMemory Compact Prompt",
|
|
74
|
+
"description": "Short version of system prompt for limited context",
|
|
75
|
+
"mimeType": "text/plain",
|
|
76
|
+
},
|
|
77
|
+
]
|
|
78
|
+
|
|
79
|
+
def get_resource_content(self, uri: str) -> str | None:
|
|
80
|
+
"""Get content of a resource by URI."""
|
|
81
|
+
if uri == "neuralmemory://prompt/system":
|
|
82
|
+
return get_system_prompt(compact=False)
|
|
83
|
+
elif uri == "neuralmemory://prompt/compact":
|
|
84
|
+
return get_system_prompt(compact=True)
|
|
85
|
+
return None
|
|
86
|
+
|
|
61
87
|
def get_tools(self) -> list[dict[str, Any]]:
|
|
62
88
|
"""Return list of available MCP tools."""
|
|
63
89
|
return [
|
|
@@ -171,22 +197,22 @@ class MCPServer:
|
|
|
171
197
|
},
|
|
172
198
|
{
|
|
173
199
|
"name": "nmem_auto",
|
|
174
|
-
"description": "
|
|
200
|
+
"description": "Auto-capture memories from text. Use 'process' to analyze and save in one call. Call this after important conversations to capture decisions, errors, todos, and facts automatically.",
|
|
175
201
|
"inputSchema": {
|
|
176
202
|
"type": "object",
|
|
177
203
|
"properties": {
|
|
178
204
|
"action": {
|
|
179
205
|
"type": "string",
|
|
180
|
-
"enum": ["status", "enable", "disable", "analyze"],
|
|
181
|
-
"description": "Action
|
|
206
|
+
"enum": ["status", "enable", "disable", "analyze", "process"],
|
|
207
|
+
"description": "Action: 'process' analyzes and saves, 'analyze' only detects",
|
|
182
208
|
},
|
|
183
209
|
"text": {
|
|
184
210
|
"type": "string",
|
|
185
|
-
"description": "Text to analyze
|
|
211
|
+
"description": "Text to analyze (required for 'analyze' and 'process')",
|
|
186
212
|
},
|
|
187
213
|
"save": {
|
|
188
214
|
"type": "boolean",
|
|
189
|
-
"description": "
|
|
215
|
+
"description": "Force save even if auto-capture disabled (for 'analyze')",
|
|
190
216
|
},
|
|
191
217
|
},
|
|
192
218
|
"required": ["action"],
|
|
@@ -397,20 +423,10 @@ class MCPServer:
|
|
|
397
423
|
if not detected:
|
|
398
424
|
return {"detected": [], "message": "No memorable content detected"}
|
|
399
425
|
|
|
400
|
-
# Optionally save detected memories
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
if item["confidence"] >= self.config.auto.min_confidence:
|
|
405
|
-
result = await self._remember(
|
|
406
|
-
{
|
|
407
|
-
"content": item["content"],
|
|
408
|
-
"type": item["type"],
|
|
409
|
-
"priority": item.get("priority", 5),
|
|
410
|
-
}
|
|
411
|
-
)
|
|
412
|
-
if "error" not in result:
|
|
413
|
-
saved.append(item["content"][:50])
|
|
426
|
+
# Optionally save detected memories (save=true forces save regardless of auto setting)
|
|
427
|
+
should_save = args.get("save", False)
|
|
428
|
+
if should_save:
|
|
429
|
+
saved = await self._save_detected_memories(detected)
|
|
414
430
|
return {
|
|
415
431
|
"detected": detected,
|
|
416
432
|
"saved": saved,
|
|
@@ -422,8 +438,47 @@ class MCPServer:
|
|
|
422
438
|
"message": f"Detected {len(detected)} potential memories (not saved)",
|
|
423
439
|
}
|
|
424
440
|
|
|
441
|
+
elif action == "process":
|
|
442
|
+
# Process = analyze + auto-save (simpler API for AI tools)
|
|
443
|
+
text = args.get("text", "")
|
|
444
|
+
if not text:
|
|
445
|
+
return {"error": "Text required for process action"}
|
|
446
|
+
|
|
447
|
+
detected = self._analyze_text_for_memories(text)
|
|
448
|
+
|
|
449
|
+
if not detected:
|
|
450
|
+
return {"saved": 0, "message": "No memorable content detected"}
|
|
451
|
+
|
|
452
|
+
# Auto-save all detected memories above confidence threshold
|
|
453
|
+
saved = await self._save_detected_memories(detected)
|
|
454
|
+
|
|
455
|
+
return {
|
|
456
|
+
"saved": len(saved),
|
|
457
|
+
"memories": saved,
|
|
458
|
+
"message": f"Auto-captured {len(saved)} memories" if saved else "No memories met confidence threshold",
|
|
459
|
+
}
|
|
460
|
+
|
|
425
461
|
return {"error": f"Unknown action: {action}"}
|
|
426
462
|
|
|
463
|
+
async def _save_detected_memories(self, detected: list[dict[str, Any]]) -> list[str]:
|
|
464
|
+
"""Save detected memories that meet confidence threshold.
|
|
465
|
+
|
|
466
|
+
Returns list of saved memory summaries.
|
|
467
|
+
"""
|
|
468
|
+
saved = []
|
|
469
|
+
for item in detected:
|
|
470
|
+
if item["confidence"] >= self.config.auto.min_confidence:
|
|
471
|
+
result = await self._remember(
|
|
472
|
+
{
|
|
473
|
+
"content": item["content"],
|
|
474
|
+
"type": item["type"],
|
|
475
|
+
"priority": item.get("priority", 5),
|
|
476
|
+
}
|
|
477
|
+
)
|
|
478
|
+
if "error" not in result:
|
|
479
|
+
saved.append(item["content"][:50])
|
|
480
|
+
return saved
|
|
481
|
+
|
|
427
482
|
def _analyze_text_for_memories(self, text: str) -> list[dict[str, Any]]:
|
|
428
483
|
"""Analyze text and detect potential memories.
|
|
429
484
|
|
|
@@ -548,14 +603,32 @@ async def handle_message(server: MCPServer, message: dict[str, Any]) -> dict[str
|
|
|
548
603
|
"id": msg_id,
|
|
549
604
|
"result": {
|
|
550
605
|
"protocolVersion": "2024-11-05",
|
|
551
|
-
"serverInfo": {"name": "neural-memory", "version": "0.
|
|
552
|
-
"capabilities": {"tools": {}},
|
|
606
|
+
"serverInfo": {"name": "neural-memory", "version": "0.2.0"},
|
|
607
|
+
"capabilities": {"tools": {}, "resources": {}},
|
|
553
608
|
},
|
|
554
609
|
}
|
|
555
610
|
|
|
556
611
|
elif method == "tools/list":
|
|
557
612
|
return {"jsonrpc": "2.0", "id": msg_id, "result": {"tools": server.get_tools()}}
|
|
558
613
|
|
|
614
|
+
elif method == "resources/list":
|
|
615
|
+
return {"jsonrpc": "2.0", "id": msg_id, "result": {"resources": server.get_resources()}}
|
|
616
|
+
|
|
617
|
+
elif method == "resources/read":
|
|
618
|
+
uri = params.get("uri", "")
|
|
619
|
+
content = server.get_resource_content(uri)
|
|
620
|
+
if content is None:
|
|
621
|
+
return {
|
|
622
|
+
"jsonrpc": "2.0",
|
|
623
|
+
"id": msg_id,
|
|
624
|
+
"error": {"code": -32002, "message": f"Resource not found: {uri}"},
|
|
625
|
+
}
|
|
626
|
+
return {
|
|
627
|
+
"jsonrpc": "2.0",
|
|
628
|
+
"id": msg_id,
|
|
629
|
+
"result": {"contents": [{"uri": uri, "mimeType": "text/plain", "text": content}]},
|
|
630
|
+
}
|
|
631
|
+
|
|
559
632
|
elif method == "tools/call":
|
|
560
633
|
tool_name = params.get("name", "")
|
|
561
634
|
tool_args = params.get("arguments", {})
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: neural-memory
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.0
|
|
4
4
|
Summary: Reflex-based memory system for AI agents - retrieval through activation, not search
|
|
5
5
|
Project-URL: Homepage, https://github.com/nhadaututtheky/neural-memory
|
|
6
6
|
Project-URL: Documentation, https://github.com/nhadaututtheky/neural-memory#readme
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
neural_memory/__init__.py,sha256
|
|
1
|
+
neural_memory/__init__.py,sha256=m2XAEkY23Kd3_yNBOcQRIl7kcDPJ4MO6fYrFI79qt_E,970
|
|
2
2
|
neural_memory/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
neural_memory/unified_config.py,sha256=BySYLNJmnBcz9A83kKztJ53ShkScJ5qOVqBNGQls0Rs,10208
|
|
4
4
|
neural_memory/cli/__init__.py,sha256=4srWgYwiGcs-mG6dm91gRbYCN-om2hfkuxd-fYkgIGI,409
|
|
5
5
|
neural_memory/cli/__main__.py,sha256=TnhrDVCqsaagmqua7QH1yaP9YXLNRvLxQ0pFrMxDYRw,132
|
|
6
6
|
neural_memory/cli/config.py,sha256=3kuYeWyTJms_2H_xAgx0qXEN1FbaYwvcU0oKPdiGO2Y,5612
|
|
7
|
-
neural_memory/cli/main.py,sha256=
|
|
7
|
+
neural_memory/cli/main.py,sha256=wA4qLKzP2-H9PfyYHBVqw_OVFYQrxmgCVHDyGoI1Hxc,95060
|
|
8
8
|
neural_memory/cli/storage.py,sha256=xiIk9Nmygw2G-nBDuRaSh6ZY6Ccu3zX_1l_eyeOlIsM,6044
|
|
9
9
|
neural_memory/cli/tui.py,sha256=3D0uhQD2LpP7caBOns7EeOsx-JsORl4-DyzSsS1HN64,16335
|
|
10
10
|
neural_memory/core/__init__.py,sha256=j9oPfDRg6n3Jr1OwwjY1OufH1zEPcL5aZol23R8vik0,1181
|
|
@@ -26,7 +26,8 @@ neural_memory/extraction/router.py,sha256=PGN0Z5uwmtYhARajVhb-q01PDG2cuxMUp_xqwZ
|
|
|
26
26
|
neural_memory/extraction/temporal.py,sha256=uESwr-NWTMNiRJHDryKBdSS3swnkp_FBwjUwiHAnIf0,15930
|
|
27
27
|
neural_memory/mcp/__init__.py,sha256=8qo502ZfwryBv5htRaUJWsk3q9tbZX6TBtjmO6H0iTA,332
|
|
28
28
|
neural_memory/mcp/__main__.py,sha256=BETnISNHCDaqFhvOo51fSV1DcB2yMu9iefu-b6mA0bk,137
|
|
29
|
-
neural_memory/mcp/
|
|
29
|
+
neural_memory/mcp/prompt.py,sha256=PTTWJ5ZxSPFq42ojU7UH8DKqgJxepg8ZMZmj9xYcaKQ,5003
|
|
30
|
+
neural_memory/mcp/server.py,sha256=MITUs-5DXieZ2Wr5n9f9MTlOWoanmK10L6FvCm_HPCk,26121
|
|
30
31
|
neural_memory/safety/__init__.py,sha256=25_pdeXu-UI3dNsPYiCsC3Rvq26ma-tCd3h1ow7Ny3w,679
|
|
31
32
|
neural_memory/safety/freshness.py,sha256=nwUdybjsDK_8uwa_mJRPflx0XGZXVwDwJCw-LEVyh9c,6748
|
|
32
33
|
neural_memory/safety/sensitive.py,sha256=3dCv951D1JOMo6V4vu11d0OxUFhxCITZ2w05HhJ3CtA,9359
|
|
@@ -48,8 +49,8 @@ neural_memory/sync/__init__.py,sha256=epKpdAsHzwUYXrWawlHPckSEgK5CZYb-B29AAcdTnV
|
|
|
48
49
|
neural_memory/sync/client.py,sha256=kiKqRlteXVe9P-IzgayV7bwdT9Ovu2UgV_XJtEUIbxQ,13421
|
|
49
50
|
neural_memory/utils/__init__.py,sha256=vZM_jKQ_BUUd6jXtz9BGq5A1d-kFRMcMXt96XVhRv2M,135
|
|
50
51
|
neural_memory/utils/config.py,sha256=jgDaHksEibusC2gub1I_USUBOJyP6gBHcfgS7RkL4dc,3086
|
|
51
|
-
neural_memory-0.
|
|
52
|
-
neural_memory-0.
|
|
53
|
-
neural_memory-0.
|
|
54
|
-
neural_memory-0.
|
|
55
|
-
neural_memory-0.
|
|
52
|
+
neural_memory-0.2.0.dist-info/METADATA,sha256=wdXskPmp9hpl_wVG0yQl8sik8IoSUjFDpe8zTVRWp88,10305
|
|
53
|
+
neural_memory-0.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
54
|
+
neural_memory-0.2.0.dist-info/entry_points.txt,sha256=u0ZGaAD6uU98g0Wheyj1WjrSchm_zlAkyzz1HRbKOnE,121
|
|
55
|
+
neural_memory-0.2.0.dist-info/licenses/LICENSE,sha256=uuCGDPgkW8shclBRpQNK5I0T97ZQy9HHolwo9Qr3Bbc,1082
|
|
56
|
+
neural_memory-0.2.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|