neural-memory 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
neural_memory/__init__.py CHANGED
@@ -13,7 +13,7 @@ from neural_memory.core.synapse import Direction, Synapse, SynapseType
13
13
  from neural_memory.engine.encoder import EncodingResult, MemoryEncoder
14
14
  from neural_memory.engine.retrieval import DepthLevel, ReflexPipeline, RetrievalResult
15
15
 
16
- __version__ = "0.1.0"
16
+ __version__ = "0.3.0"
17
17
 
18
18
  __all__ = [
19
19
  "__version__",
neural_memory/cli/main.py CHANGED
@@ -2685,6 +2685,233 @@ def show_today() -> None:
2685
2685
  asyncio.run(_today())
2686
2686
 
2687
2687
 
2688
+ @app.command(name="mcp-config")
2689
+ def mcp_config(
2690
+ with_prompt: Annotated[
2691
+ bool, typer.Option("--with-prompt", "-p", help="Include system prompt in config")
2692
+ ] = False,
2693
+ compact_prompt: Annotated[
2694
+ bool, typer.Option("--compact", "-c", help="Use compact prompt (if --with-prompt)")
2695
+ ] = False,
2696
+ ) -> None:
2697
+ """Generate MCP server configuration for Claude Code/Cursor.
2698
+
2699
+ Outputs JSON configuration that can be added to your MCP settings.
2700
+
2701
+ Examples:
2702
+ nmem mcp-config # Basic config
2703
+ nmem mcp-config --with-prompt # Include system prompt
2704
+ nmem mcp-config -p -c # Include compact prompt
2705
+ """
2706
+ import shutil
2707
+ import sys
2708
+
2709
+ from neural_memory.mcp.prompt import get_system_prompt
2710
+
2711
+ # Find nmem executable path
2712
+ nmem_path = shutil.which("nmem") or shutil.which("nmem-mcp") or sys.executable
2713
+
2714
+ config = {
2715
+ "neural-memory": {
2716
+ "command": nmem_path if "python" not in nmem_path.lower() else "python",
2717
+ "args": ["-m", "neural_memory.mcp"] if "python" in nmem_path.lower() else ["mcp"],
2718
+ }
2719
+ }
2720
+
2721
+ # Simplify if nmem is available
2722
+ if shutil.which("nmem-mcp"):
2723
+ config["neural-memory"] = {"command": "nmem-mcp", "args": []}
2724
+
2725
+ typer.echo("Add this to your MCP configuration:\n")
2726
+ typer.echo(json.dumps(config, indent=2))
2727
+
2728
+ if with_prompt:
2729
+ typer.echo("\n" + "=" * 60)
2730
+ typer.echo("System prompt to add to your AI assistant:\n")
2731
+ typer.echo(get_system_prompt(compact=compact_prompt))
2732
+
2733
+
2734
+ @app.command()
2735
+ def prompt(
2736
+ compact: Annotated[
2737
+ bool, typer.Option("--compact", "-c", help="Show compact version")
2738
+ ] = False,
2739
+ copy: Annotated[
2740
+ bool, typer.Option("--copy", help="Copy to clipboard (requires pyperclip)")
2741
+ ] = False,
2742
+ ) -> None:
2743
+ """Show system prompt for AI tools.
2744
+
2745
+ This prompt instructs AI assistants (Claude, GPT, etc.) on when and how
2746
+ to use NeuralMemory for persistent memory across sessions.
2747
+
2748
+ Examples:
2749
+ nmem prompt # Show full prompt
2750
+ nmem prompt --compact # Show shorter version
2751
+ nmem prompt --copy # Copy to clipboard
2752
+ """
2753
+ from neural_memory.mcp.prompt import get_system_prompt
2754
+
2755
+ text = get_system_prompt(compact=compact)
2756
+
2757
+ if copy:
2758
+ try:
2759
+ import pyperclip
2760
+
2761
+ pyperclip.copy(text)
2762
+ typer.echo("System prompt copied to clipboard!")
2763
+ except ImportError:
2764
+ typer.echo("Install pyperclip for clipboard support: pip install pyperclip")
2765
+ typer.echo("")
2766
+ typer.echo(text)
2767
+ else:
2768
+ typer.echo(text)
2769
+
2770
+
2771
+ @app.command(name="export")
2772
+ def export_brain_cmd(
2773
+ output: Annotated[
2774
+ str, typer.Argument(help="Output file path (e.g., my-brain.json)")
2775
+ ],
2776
+ brain: Annotated[
2777
+ str | None, typer.Option("--brain", "-b", help="Brain to export (default: current)")
2778
+ ] = None,
2779
+ ) -> None:
2780
+ """Export brain to JSON file for backup or sharing.
2781
+
2782
+ Examples:
2783
+ nmem export backup.json # Export current brain
2784
+ nmem export work.json -b work # Export specific brain
2785
+ """
2786
+ from pathlib import Path
2787
+
2788
+ from neural_memory.unified_config import get_config, get_shared_storage
2789
+
2790
+ async def _export() -> None:
2791
+ config = get_config()
2792
+ brain_name = brain or config.current_brain
2793
+ storage = await get_shared_storage(brain_name)
2794
+
2795
+ snapshot = await storage.export_brain(brain_name)
2796
+
2797
+ output_path = Path(output)
2798
+ export_data = {
2799
+ "brain_id": snapshot.brain_id,
2800
+ "brain_name": snapshot.brain_name,
2801
+ "exported_at": snapshot.exported_at.isoformat(),
2802
+ "version": snapshot.version,
2803
+ "neurons": snapshot.neurons,
2804
+ "synapses": snapshot.synapses,
2805
+ "fibers": snapshot.fibers,
2806
+ "config": snapshot.config,
2807
+ "metadata": snapshot.metadata,
2808
+ }
2809
+
2810
+ output_path.write_text(json.dumps(export_data, indent=2, default=str))
2811
+
2812
+ typer.echo(f"Exported brain '{brain_name}' to {output_path}")
2813
+ typer.echo(f" Neurons: {len(snapshot.neurons)}")
2814
+ typer.echo(f" Synapses: {len(snapshot.synapses)}")
2815
+ typer.echo(f" Fibers: {len(snapshot.fibers)}")
2816
+
2817
+ asyncio.run(_export())
2818
+
2819
+
2820
+ @app.command(name="import")
2821
+ def import_brain_cmd(
2822
+ input_file: Annotated[
2823
+ str, typer.Argument(help="Input file path (e.g., my-brain.json)")
2824
+ ],
2825
+ brain: Annotated[
2826
+ str | None, typer.Option("--brain", "-b", help="Target brain name (default: from file)")
2827
+ ] = None,
2828
+ merge: Annotated[
2829
+ bool, typer.Option("--merge", "-m", help="Merge with existing brain")
2830
+ ] = False,
2831
+ ) -> None:
2832
+ """Import brain from JSON file.
2833
+
2834
+ Examples:
2835
+ nmem import backup.json # Import as original brain name
2836
+ nmem import backup.json -b new # Import as 'new' brain
2837
+ nmem import backup.json --merge # Merge into existing brain
2838
+ """
2839
+ from pathlib import Path
2840
+
2841
+ from neural_memory.core.brain import BrainSnapshot
2842
+ from neural_memory.unified_config import get_shared_storage
2843
+
2844
+ async def _import() -> None:
2845
+ input_path = Path(input_file)
2846
+ if not input_path.exists():
2847
+ typer.echo(f"Error: File not found: {input_path}", err=True)
2848
+ raise typer.Exit(1)
2849
+
2850
+ data = json.loads(input_path.read_text())
2851
+
2852
+ brain_name = brain or data.get("brain_name", "imported")
2853
+ storage = await get_shared_storage(brain_name)
2854
+
2855
+ snapshot = BrainSnapshot(
2856
+ brain_id=data.get("brain_id", brain_name),
2857
+ brain_name=data["brain_name"],
2858
+ exported_at=datetime.fromisoformat(data["exported_at"]),
2859
+ version=data["version"],
2860
+ neurons=data["neurons"],
2861
+ synapses=data["synapses"],
2862
+ fibers=data["fibers"],
2863
+ config=data["config"],
2864
+ metadata=data.get("metadata", {}),
2865
+ )
2866
+
2867
+ imported_id = await storage.import_brain(snapshot, brain_name)
2868
+
2869
+ typer.echo(f"Imported brain '{brain_name}' from {input_path}")
2870
+ typer.echo(f" Neurons: {len(snapshot.neurons)}")
2871
+ typer.echo(f" Synapses: {len(snapshot.synapses)}")
2872
+ typer.echo(f" Fibers: {len(snapshot.fibers)}")
2873
+
2874
+ asyncio.run(_import())
2875
+
2876
+
2877
+ @app.command()
2878
+ def serve(
2879
+ host: Annotated[
2880
+ str, typer.Option("--host", "-h", help="Host to bind to")
2881
+ ] = "127.0.0.1",
2882
+ port: Annotated[
2883
+ int, typer.Option("--port", "-p", help="Port to bind to")
2884
+ ] = 8000,
2885
+ reload: Annotated[
2886
+ bool, typer.Option("--reload", "-r", help="Enable auto-reload for development")
2887
+ ] = False,
2888
+ ) -> None:
2889
+ """Run the NeuralMemory API server.
2890
+
2891
+ Examples:
2892
+ nmem serve # Run on localhost:8000
2893
+ nmem serve -p 9000 # Run on port 9000
2894
+ nmem serve --host 0.0.0.0 # Expose to network
2895
+ nmem serve --reload # Development mode
2896
+ """
2897
+ try:
2898
+ import uvicorn
2899
+ except ImportError:
2900
+ typer.echo("Error: uvicorn not installed. Run: pip install neural-memory[server]", err=True)
2901
+ raise typer.Exit(1)
2902
+
2903
+ typer.echo(f"Starting NeuralMemory API server on http://{host}:{port}")
2904
+ typer.echo("API docs: http://{host}:{port}/docs")
2905
+
2906
+ uvicorn.run(
2907
+ "neural_memory.server.app:create_app",
2908
+ host=host,
2909
+ port=port,
2910
+ reload=reload,
2911
+ factory=True,
2912
+ )
2913
+
2914
+
2688
2915
  @app.command()
2689
2916
  def version() -> None:
2690
2917
  """Show version information."""
@@ -0,0 +1,138 @@
1
+ """System prompt for AI tools using NeuralMemory.
2
+
3
+ This prompt instructs AI assistants on when and how to use NeuralMemory
4
+ for persistent memory across sessions.
5
+ """
6
+
7
+ SYSTEM_PROMPT = """# NeuralMemory - Persistent Memory System
8
+
9
+ You have access to NeuralMemory, a persistent memory system that survives across sessions.
10
+ Use it to remember important information and recall past context.
11
+
12
+ ## When to REMEMBER (nmem_remember)
13
+
14
+ Automatically save these to memory:
15
+ - **Decisions**: "We decided to use PostgreSQL" -> remember as decision
16
+ - **User preferences**: "I prefer dark mode" -> remember as preference
17
+ - **Project context**: "This is a React app using TypeScript" -> remember as context
18
+ - **Important facts**: "The API key is stored in .env" -> remember as fact
19
+ - **Errors & solutions**: "Fixed by adding await" -> remember as error
20
+ - **TODOs**: "Need to add tests later" -> remember as todo
21
+ - **Workflows**: "Deploy process: build -> test -> push" -> remember as workflow
22
+
23
+ ## When to RECALL (nmem_recall)
24
+
25
+ Query memory when:
26
+ - Starting a new session on an existing project
27
+ - User asks about past decisions or context
28
+ - You need information from previous conversations
29
+ - Before making decisions that might conflict with past choices
30
+
31
+ ## When to get CONTEXT (nmem_context)
32
+
33
+ Use at session start to:
34
+ - Load recent memories relevant to current task
35
+ - Understand project state from previous sessions
36
+ - Avoid asking questions already answered before
37
+
38
+ ## Auto-Capture (nmem_auto)
39
+
40
+ After important conversations, call nmem_auto to automatically capture memories:
41
+
42
+ ```
43
+ # Simple: process and save in one call
44
+ nmem_auto(action="process", text="<conversation or response text>")
45
+
46
+ # Preview first: see what would be captured
47
+ nmem_auto(action="analyze", text="<text>")
48
+
49
+ # Force save (even if auto-capture disabled)
50
+ nmem_auto(action="analyze", text="<text>", save=true)
51
+ ```
52
+
53
+ Auto-capture detects:
54
+ - **Decisions**: "We decided...", "Let's use...", "Going with..."
55
+ - **Errors**: "Error:", "The issue was...", "Bug:", "Failed to..."
56
+ - **TODOs**: "TODO:", "Need to...", "Remember to...", "Later:"
57
+ - **Facts**: "The solution is...", "It works because...", "Learned that..."
58
+
59
+ **When to call nmem_auto(action="process")**:
60
+ - After making important decisions
61
+ - After solving bugs or errors
62
+ - After learning something new about the project
63
+ - At the end of a productive session
64
+
65
+ ## Best Practices
66
+
67
+ 1. **Be proactive**: Don't wait for user to ask - remember important info automatically
68
+ 2. **Be concise**: Store essence, not full conversations
69
+ 3. **Use types**: Categorize memories (fact/decision/todo/error/etc.)
70
+ 4. **Set priority**: Critical info = high priority (7-10), routine = normal (5)
71
+ 5. **Add tags**: Help organize memories by project/topic
72
+ 6. **Check first**: Recall before asking questions user may have answered before
73
+
74
+ ## Examples
75
+
76
+ ```
77
+ # User mentions a preference
78
+ User: "I always use 4-space indentation"
79
+ -> nmem_remember(content="User prefers 4-space indentation", type="preference", priority=6)
80
+
81
+ # Starting work on existing project
82
+ -> nmem_context(limit=10, fresh_only=true)
83
+ -> nmem_recall(query="project setup and decisions")
84
+
85
+ # Made an important decision
86
+ "Let's use Redis for caching"
87
+ -> nmem_remember(content="Decision: Use Redis for caching", type="decision", priority=7)
88
+
89
+ # Found a bug fix
90
+ "The issue was missing await - fixed by adding await before fetch()"
91
+ -> nmem_remember(content="Bug fix: Missing await before fetch() caused race condition", type="error", priority=7)
92
+ ```
93
+
94
+ ## Memory Types
95
+
96
+ - `fact`: Objective information
97
+ - `decision`: Choices made
98
+ - `preference`: User preferences
99
+ - `todo`: Tasks to do
100
+ - `insight`: Learned patterns
101
+ - `context`: Project/session context
102
+ - `instruction`: User instructions
103
+ - `error`: Bugs and fixes
104
+ - `workflow`: Processes/procedures
105
+ - `reference`: Links/resources
106
+ """
107
+
108
+ COMPACT_PROMPT = """You have NeuralMemory for persistent memory across sessions.
109
+
110
+ **Remember** (nmem_remember): Save decisions, preferences, facts, errors, todos.
111
+ **Recall** (nmem_recall): Query past context before making decisions.
112
+ **Context** (nmem_context): Load recent memories at session start.
113
+ **Auto-capture** (nmem_auto): Call `nmem_auto(action="process", text="...")` after important conversations to auto-save decisions, errors, and todos.
114
+
115
+ Be proactive: remember important info without being asked. Use auto-capture after solving bugs or making decisions."""
116
+
117
+
118
+ def get_system_prompt(compact: bool = False) -> str:
119
+ """Get the system prompt for AI tools.
120
+
121
+ Args:
122
+ compact: If True, return shorter version for limited context
123
+
124
+ Returns:
125
+ System prompt string
126
+ """
127
+ return COMPACT_PROMPT if compact else SYSTEM_PROMPT
128
+
129
+
130
+ def get_prompt_for_mcp() -> dict:
131
+ """Get prompt formatted for MCP resources."""
132
+ return {
133
+ "uri": "neuralmemory://prompt/system",
134
+ "name": "NeuralMemory System Prompt",
135
+ "description": "Instructions for AI assistants on using NeuralMemory",
136
+ "mimeType": "text/plain",
137
+ "text": SYSTEM_PROMPT,
138
+ }
@@ -34,6 +34,7 @@ from typing import TYPE_CHECKING, Any
34
34
  from neural_memory.core.memory_types import MemoryType, Priority, TypedMemory, suggest_memory_type
35
35
  from neural_memory.engine.encoder import MemoryEncoder
36
36
  from neural_memory.engine.retrieval import DepthLevel, ReflexPipeline
37
+ from neural_memory.mcp.prompt import get_prompt_for_mcp, get_system_prompt
37
38
  from neural_memory.unified_config import get_config, get_shared_storage
38
39
 
39
40
  if TYPE_CHECKING:
@@ -58,6 +59,31 @@ class MCPServer:
58
59
  self._storage = await get_shared_storage()
59
60
  return self._storage
60
61
 
62
+ def get_resources(self) -> list[dict[str, Any]]:
63
+ """Return list of available MCP resources."""
64
+ return [
65
+ {
66
+ "uri": "neuralmemory://prompt/system",
67
+ "name": "NeuralMemory System Prompt",
68
+ "description": "Instructions for AI on when/how to use NeuralMemory",
69
+ "mimeType": "text/plain",
70
+ },
71
+ {
72
+ "uri": "neuralmemory://prompt/compact",
73
+ "name": "NeuralMemory Compact Prompt",
74
+ "description": "Short version of system prompt for limited context",
75
+ "mimeType": "text/plain",
76
+ },
77
+ ]
78
+
79
+ def get_resource_content(self, uri: str) -> str | None:
80
+ """Get content of a resource by URI."""
81
+ if uri == "neuralmemory://prompt/system":
82
+ return get_system_prompt(compact=False)
83
+ elif uri == "neuralmemory://prompt/compact":
84
+ return get_system_prompt(compact=True)
85
+ return None
86
+
61
87
  def get_tools(self) -> list[dict[str, Any]]:
62
88
  """Return list of available MCP tools."""
63
89
  return [
@@ -171,22 +197,22 @@ class MCPServer:
171
197
  },
172
198
  {
173
199
  "name": "nmem_auto",
174
- "description": "Control auto-capture settings and analyze text for auto-save. When enabled, NeuralMemory automatically detects and saves decisions, errors, todos, and important facts from conversations.",
200
+ "description": "Auto-capture memories from text. Use 'process' to analyze and save in one call. Call this after important conversations to capture decisions, errors, todos, and facts automatically.",
175
201
  "inputSchema": {
176
202
  "type": "object",
177
203
  "properties": {
178
204
  "action": {
179
205
  "type": "string",
180
- "enum": ["status", "enable", "disable", "analyze"],
181
- "description": "Action to perform",
206
+ "enum": ["status", "enable", "disable", "analyze", "process"],
207
+ "description": "Action: 'process' analyzes and saves, 'analyze' only detects",
182
208
  },
183
209
  "text": {
184
210
  "type": "string",
185
- "description": "Text to analyze for auto-capture (required for 'analyze' action)",
211
+ "description": "Text to analyze (required for 'analyze' and 'process')",
186
212
  },
187
213
  "save": {
188
214
  "type": "boolean",
189
- "description": "If true, save detected memories (default: false for analyze)",
215
+ "description": "Force save even if auto-capture disabled (for 'analyze')",
190
216
  },
191
217
  },
192
218
  "required": ["action"],
@@ -397,20 +423,10 @@ class MCPServer:
397
423
  if not detected:
398
424
  return {"detected": [], "message": "No memorable content detected"}
399
425
 
400
- # Optionally save detected memories
401
- if args.get("save", False) and self.config.auto.enabled:
402
- saved = []
403
- for item in detected:
404
- if item["confidence"] >= self.config.auto.min_confidence:
405
- result = await self._remember(
406
- {
407
- "content": item["content"],
408
- "type": item["type"],
409
- "priority": item.get("priority", 5),
410
- }
411
- )
412
- if "error" not in result:
413
- saved.append(item["content"][:50])
426
+ # Optionally save detected memories (save=true forces save regardless of auto setting)
427
+ should_save = args.get("save", False)
428
+ if should_save:
429
+ saved = await self._save_detected_memories(detected)
414
430
  return {
415
431
  "detected": detected,
416
432
  "saved": saved,
@@ -422,8 +438,47 @@ class MCPServer:
422
438
  "message": f"Detected {len(detected)} potential memories (not saved)",
423
439
  }
424
440
 
441
+ elif action == "process":
442
+ # Process = analyze + auto-save (simpler API for AI tools)
443
+ text = args.get("text", "")
444
+ if not text:
445
+ return {"error": "Text required for process action"}
446
+
447
+ detected = self._analyze_text_for_memories(text)
448
+
449
+ if not detected:
450
+ return {"saved": 0, "message": "No memorable content detected"}
451
+
452
+ # Auto-save all detected memories above confidence threshold
453
+ saved = await self._save_detected_memories(detected)
454
+
455
+ return {
456
+ "saved": len(saved),
457
+ "memories": saved,
458
+ "message": f"Auto-captured {len(saved)} memories" if saved else "No memories met confidence threshold",
459
+ }
460
+
425
461
  return {"error": f"Unknown action: {action}"}
426
462
 
463
+ async def _save_detected_memories(self, detected: list[dict[str, Any]]) -> list[str]:
464
+ """Save detected memories that meet confidence threshold.
465
+
466
+ Returns list of saved memory summaries.
467
+ """
468
+ saved = []
469
+ for item in detected:
470
+ if item["confidence"] >= self.config.auto.min_confidence:
471
+ result = await self._remember(
472
+ {
473
+ "content": item["content"],
474
+ "type": item["type"],
475
+ "priority": item.get("priority", 5),
476
+ }
477
+ )
478
+ if "error" not in result:
479
+ saved.append(item["content"][:50])
480
+ return saved
481
+
427
482
  def _analyze_text_for_memories(self, text: str) -> list[dict[str, Any]]:
428
483
  """Analyze text and detect potential memories.
429
484
 
@@ -548,14 +603,32 @@ async def handle_message(server: MCPServer, message: dict[str, Any]) -> dict[str
548
603
  "id": msg_id,
549
604
  "result": {
550
605
  "protocolVersion": "2024-11-05",
551
- "serverInfo": {"name": "neural-memory", "version": "0.1.0"},
552
- "capabilities": {"tools": {}},
606
+ "serverInfo": {"name": "neural-memory", "version": "0.3.0"},
607
+ "capabilities": {"tools": {}, "resources": {}},
553
608
  },
554
609
  }
555
610
 
556
611
  elif method == "tools/list":
557
612
  return {"jsonrpc": "2.0", "id": msg_id, "result": {"tools": server.get_tools()}}
558
613
 
614
+ elif method == "resources/list":
615
+ return {"jsonrpc": "2.0", "id": msg_id, "result": {"resources": server.get_resources()}}
616
+
617
+ elif method == "resources/read":
618
+ uri = params.get("uri", "")
619
+ content = server.get_resource_content(uri)
620
+ if content is None:
621
+ return {
622
+ "jsonrpc": "2.0",
623
+ "id": msg_id,
624
+ "error": {"code": -32002, "message": f"Resource not found: {uri}"},
625
+ }
626
+ return {
627
+ "jsonrpc": "2.0",
628
+ "id": msg_id,
629
+ "result": {"contents": [{"uri": uri, "mimeType": "text/plain", "text": content}]},
630
+ }
631
+
559
632
  elif method == "tools/call":
560
633
  tool_name = params.get("name", "")
561
634
  tool_args = params.get("arguments", {})
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: neural-memory
3
- Version: 0.1.0
3
+ Version: 0.3.0
4
4
  Summary: Reflex-based memory system for AI agents - retrieval through activation, not search
5
5
  Project-URL: Homepage, https://github.com/nhadaututtheky/neural-memory
6
6
  Project-URL: Documentation, https://github.com/nhadaututtheky/neural-memory#readme
@@ -1,10 +1,10 @@
1
- neural_memory/__init__.py,sha256=-PzpjmZU17qTgPuZJE6VgFijzpefAdr2HlRpP3wSTeM,970
1
+ neural_memory/__init__.py,sha256=98W3ehXo6Pf4-bL0DpVt3W_aTIZPbyDqp4nXnI_uj0U,970
2
2
  neural_memory/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  neural_memory/unified_config.py,sha256=BySYLNJmnBcz9A83kKztJ53ShkScJ5qOVqBNGQls0Rs,10208
4
4
  neural_memory/cli/__init__.py,sha256=4srWgYwiGcs-mG6dm91gRbYCN-om2hfkuxd-fYkgIGI,409
5
5
  neural_memory/cli/__main__.py,sha256=TnhrDVCqsaagmqua7QH1yaP9YXLNRvLxQ0pFrMxDYRw,132
6
6
  neural_memory/cli/config.py,sha256=3kuYeWyTJms_2H_xAgx0qXEN1FbaYwvcU0oKPdiGO2Y,5612
7
- neural_memory/cli/main.py,sha256=DsHZDnYXFaWRqJzOFo_IK0bMNEmTtDxDpeJSGCriTJU,92396
7
+ neural_memory/cli/main.py,sha256=rQra4oGAmd18yJ0e3xbw18IKg3--FpQSqvdyZGRO8tM,99873
8
8
  neural_memory/cli/storage.py,sha256=xiIk9Nmygw2G-nBDuRaSh6ZY6Ccu3zX_1l_eyeOlIsM,6044
9
9
  neural_memory/cli/tui.py,sha256=3D0uhQD2LpP7caBOns7EeOsx-JsORl4-DyzSsS1HN64,16335
10
10
  neural_memory/core/__init__.py,sha256=j9oPfDRg6n3Jr1OwwjY1OufH1zEPcL5aZol23R8vik0,1181
@@ -26,7 +26,8 @@ neural_memory/extraction/router.py,sha256=PGN0Z5uwmtYhARajVhb-q01PDG2cuxMUp_xqwZ
26
26
  neural_memory/extraction/temporal.py,sha256=uESwr-NWTMNiRJHDryKBdSS3swnkp_FBwjUwiHAnIf0,15930
27
27
  neural_memory/mcp/__init__.py,sha256=8qo502ZfwryBv5htRaUJWsk3q9tbZX6TBtjmO6H0iTA,332
28
28
  neural_memory/mcp/__main__.py,sha256=BETnISNHCDaqFhvOo51fSV1DcB2yMu9iefu-b6mA0bk,137
29
- neural_memory/mcp/server.py,sha256=TwTIc60xCSDSdGOHSVEr7tPi-DUHAABdBHS74F1ADZw,23291
29
+ neural_memory/mcp/prompt.py,sha256=PTTWJ5ZxSPFq42ojU7UH8DKqgJxepg8ZMZmj9xYcaKQ,5003
30
+ neural_memory/mcp/server.py,sha256=lQ9kfNOU8nkBd8mza506h9N9b08uyBqNkp8AoUT_6dc,26121
30
31
  neural_memory/safety/__init__.py,sha256=25_pdeXu-UI3dNsPYiCsC3Rvq26ma-tCd3h1ow7Ny3w,679
31
32
  neural_memory/safety/freshness.py,sha256=nwUdybjsDK_8uwa_mJRPflx0XGZXVwDwJCw-LEVyh9c,6748
32
33
  neural_memory/safety/sensitive.py,sha256=3dCv951D1JOMo6V4vu11d0OxUFhxCITZ2w05HhJ3CtA,9359
@@ -48,8 +49,8 @@ neural_memory/sync/__init__.py,sha256=epKpdAsHzwUYXrWawlHPckSEgK5CZYb-B29AAcdTnV
48
49
  neural_memory/sync/client.py,sha256=kiKqRlteXVe9P-IzgayV7bwdT9Ovu2UgV_XJtEUIbxQ,13421
49
50
  neural_memory/utils/__init__.py,sha256=vZM_jKQ_BUUd6jXtz9BGq5A1d-kFRMcMXt96XVhRv2M,135
50
51
  neural_memory/utils/config.py,sha256=jgDaHksEibusC2gub1I_USUBOJyP6gBHcfgS7RkL4dc,3086
51
- neural_memory-0.1.0.dist-info/METADATA,sha256=VWQ5A-nyNmq28EGUP1AWf-kduWj5-4xyIA-6QXBp9K8,10305
52
- neural_memory-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
53
- neural_memory-0.1.0.dist-info/entry_points.txt,sha256=u0ZGaAD6uU98g0Wheyj1WjrSchm_zlAkyzz1HRbKOnE,121
54
- neural_memory-0.1.0.dist-info/licenses/LICENSE,sha256=uuCGDPgkW8shclBRpQNK5I0T97ZQy9HHolwo9Qr3Bbc,1082
55
- neural_memory-0.1.0.dist-info/RECORD,,
52
+ neural_memory-0.3.0.dist-info/METADATA,sha256=UarxhykY-BWQ-PhREpptBF9MUDnD5ejnHmpxKNGAmKI,10305
53
+ neural_memory-0.3.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
54
+ neural_memory-0.3.0.dist-info/entry_points.txt,sha256=u0ZGaAD6uU98g0Wheyj1WjrSchm_zlAkyzz1HRbKOnE,121
55
+ neural_memory-0.3.0.dist-info/licenses/LICENSE,sha256=uuCGDPgkW8shclBRpQNK5I0T97ZQy9HHolwo9Qr3Bbc,1082
56
+ neural_memory-0.3.0.dist-info/RECORD,,