neural-memory 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. neural_memory/__init__.py +38 -0
  2. neural_memory/cli/__init__.py +15 -0
  3. neural_memory/cli/__main__.py +6 -0
  4. neural_memory/cli/config.py +176 -0
  5. neural_memory/cli/main.py +2702 -0
  6. neural_memory/cli/storage.py +169 -0
  7. neural_memory/cli/tui.py +471 -0
  8. neural_memory/core/__init__.py +52 -0
  9. neural_memory/core/brain.py +301 -0
  10. neural_memory/core/brain_mode.py +273 -0
  11. neural_memory/core/fiber.py +236 -0
  12. neural_memory/core/memory_types.py +331 -0
  13. neural_memory/core/neuron.py +168 -0
  14. neural_memory/core/project.py +257 -0
  15. neural_memory/core/synapse.py +215 -0
  16. neural_memory/engine/__init__.py +15 -0
  17. neural_memory/engine/activation.py +335 -0
  18. neural_memory/engine/encoder.py +391 -0
  19. neural_memory/engine/retrieval.py +440 -0
  20. neural_memory/extraction/__init__.py +42 -0
  21. neural_memory/extraction/entities.py +547 -0
  22. neural_memory/extraction/parser.py +337 -0
  23. neural_memory/extraction/router.py +396 -0
  24. neural_memory/extraction/temporal.py +428 -0
  25. neural_memory/mcp/__init__.py +9 -0
  26. neural_memory/mcp/__main__.py +6 -0
  27. neural_memory/mcp/server.py +621 -0
  28. neural_memory/py.typed +0 -0
  29. neural_memory/safety/__init__.py +31 -0
  30. neural_memory/safety/freshness.py +238 -0
  31. neural_memory/safety/sensitive.py +304 -0
  32. neural_memory/server/__init__.py +5 -0
  33. neural_memory/server/app.py +99 -0
  34. neural_memory/server/dependencies.py +33 -0
  35. neural_memory/server/models.py +138 -0
  36. neural_memory/server/routes/__init__.py +7 -0
  37. neural_memory/server/routes/brain.py +221 -0
  38. neural_memory/server/routes/memory.py +169 -0
  39. neural_memory/server/routes/sync.py +387 -0
  40. neural_memory/storage/__init__.py +17 -0
  41. neural_memory/storage/base.py +441 -0
  42. neural_memory/storage/factory.py +329 -0
  43. neural_memory/storage/memory_store.py +896 -0
  44. neural_memory/storage/shared_store.py +650 -0
  45. neural_memory/storage/sqlite_store.py +1613 -0
  46. neural_memory/sync/__init__.py +5 -0
  47. neural_memory/sync/client.py +435 -0
  48. neural_memory/unified_config.py +315 -0
  49. neural_memory/utils/__init__.py +5 -0
  50. neural_memory/utils/config.py +98 -0
  51. neural_memory-0.1.0.dist-info/METADATA +314 -0
  52. neural_memory-0.1.0.dist-info/RECORD +55 -0
  53. neural_memory-0.1.0.dist-info/WHEEL +4 -0
  54. neural_memory-0.1.0.dist-info/entry_points.txt +4 -0
  55. neural_memory-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,440 @@
1
+ """Reflex retrieval pipeline - the main memory retrieval mechanism."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import time
6
+ from dataclasses import dataclass, field
7
+ from datetime import datetime
8
+ from enum import IntEnum
9
+ from typing import TYPE_CHECKING, Any
10
+
11
+ from neural_memory.core.neuron import NeuronType
12
+ from neural_memory.engine.activation import ActivationResult, SpreadingActivation
13
+ from neural_memory.extraction.parser import QueryIntent, QueryParser, Stimulus
14
+
15
+ if TYPE_CHECKING:
16
+ from neural_memory.core.brain import BrainConfig
17
+ from neural_memory.core.fiber import Fiber
18
+ from neural_memory.storage.base import NeuralStorage
19
+
20
+
21
+ class DepthLevel(IntEnum):
22
+ """
23
+ Depth levels for retrieval queries.
24
+
25
+ Higher depth = more exploration but slower retrieval.
26
+ """
27
+
28
+ INSTANT = 0 # Who, where, what (1 hop) - Simple fact retrieval
29
+ CONTEXT = 1 # Before/after (2-3 hops) - Contextual information
30
+ HABIT = 2 # Patterns (cross-time) - Recurring patterns
31
+ DEEP = 3 # Emotions, causality (full) - Deep analysis
32
+
33
+
34
+ @dataclass
35
+ class Subgraph:
36
+ """
37
+ Extracted subgraph from activation.
38
+
39
+ Attributes:
40
+ neuron_ids: IDs of neurons in the subgraph
41
+ synapse_ids: IDs of synapses connecting neurons
42
+ anchor_ids: IDs of the anchor neurons that started activation
43
+ """
44
+
45
+ neuron_ids: list[str]
46
+ synapse_ids: list[str]
47
+ anchor_ids: list[str]
48
+
49
+
50
+ @dataclass
51
+ class RetrievalResult:
52
+ """
53
+ Result of a retrieval query.
54
+
55
+ Attributes:
56
+ answer: Reconstructed answer text (if determinable)
57
+ confidence: Confidence in the answer (0.0 - 1.0)
58
+ depth_used: Which depth level was used
59
+ neurons_activated: Number of neurons that were activated
60
+ fibers_matched: IDs of fibers that matched the query
61
+ subgraph: The extracted relevant subgraph
62
+ context: Formatted context for injection into agent prompts
63
+ latency_ms: Time taken for retrieval in milliseconds
64
+ metadata: Additional retrieval metadata
65
+ """
66
+
67
+ answer: str | None
68
+ confidence: float
69
+ depth_used: DepthLevel
70
+ neurons_activated: int
71
+ fibers_matched: list[str]
72
+ subgraph: Subgraph
73
+ context: str
74
+ latency_ms: float
75
+ metadata: dict[str, Any] = field(default_factory=dict)
76
+
77
+
78
+ class ReflexPipeline:
79
+ """
80
+ Main retrieval engine - the "consciousness" of the memory system.
81
+
82
+ The reflex pipeline:
83
+ 1. Decomposes queries into activation signals (Stimulus)
84
+ 2. Finds anchor neurons matching signals
85
+ 3. Spreads activation through the graph
86
+ 4. Finds intersection points
87
+ 5. Extracts relevant subgraph
88
+ 6. Reconstitutes answer/context
89
+
90
+ This mimics human memory retrieval - associative recall through
91
+ spreading activation rather than database search.
92
+ """
93
+
94
+ def __init__(
95
+ self,
96
+ storage: NeuralStorage,
97
+ config: BrainConfig,
98
+ parser: QueryParser | None = None,
99
+ ) -> None:
100
+ """
101
+ Initialize the retrieval pipeline.
102
+
103
+ Args:
104
+ storage: Storage backend
105
+ config: Brain configuration
106
+ parser: Custom query parser (creates default if None)
107
+ """
108
+ self._storage = storage
109
+ self._config = config
110
+ self._parser = parser or QueryParser()
111
+ self._activator = SpreadingActivation(storage, config)
112
+
113
+ async def query(
114
+ self,
115
+ query: str,
116
+ depth: DepthLevel | None = None,
117
+ max_tokens: int | None = None,
118
+ reference_time: datetime | None = None,
119
+ ) -> RetrievalResult:
120
+ """
121
+ Execute the retrieval pipeline.
122
+
123
+ Args:
124
+ query: The query text
125
+ depth: Retrieval depth (auto-detect if None)
126
+ max_tokens: Maximum tokens in context
127
+ reference_time: Reference time for temporal parsing
128
+
129
+ Returns:
130
+ RetrievalResult with answer and context
131
+ """
132
+ start_time = time.perf_counter()
133
+
134
+ if max_tokens is None:
135
+ max_tokens = self._config.max_context_tokens
136
+
137
+ if reference_time is None:
138
+ reference_time = datetime.now()
139
+
140
+ # 1. Parse query into stimulus
141
+ stimulus = self._parser.parse(query, reference_time)
142
+
143
+ # 2. Auto-detect depth if not specified
144
+ if depth is None:
145
+ depth = self._detect_depth(stimulus)
146
+
147
+ # 3. Find anchor neurons
148
+ anchor_sets = await self._find_anchors(stimulus)
149
+
150
+ # 4. Spread activation
151
+ activations, intersections = await self._activator.activate_from_multiple(
152
+ anchor_sets,
153
+ max_hops=self._depth_to_hops(depth),
154
+ )
155
+
156
+ # 5. Find matching fibers
157
+ fibers_matched = await self._find_matching_fibers(activations)
158
+
159
+ # 6. Extract subgraph
160
+ neuron_ids, synapse_ids = await self._activator.get_activated_subgraph(
161
+ activations,
162
+ min_activation=self._config.activation_threshold,
163
+ max_neurons=50,
164
+ )
165
+
166
+ subgraph = Subgraph(
167
+ neuron_ids=neuron_ids,
168
+ synapse_ids=synapse_ids,
169
+ anchor_ids=[a for anchors in anchor_sets for a in anchors],
170
+ )
171
+
172
+ # 7. Reconstitute answer and context
173
+ answer, confidence = await self._reconstitute_answer(
174
+ activations,
175
+ intersections,
176
+ stimulus,
177
+ )
178
+
179
+ context = await self._format_context(
180
+ activations,
181
+ fibers_matched,
182
+ max_tokens,
183
+ )
184
+
185
+ latency_ms = (time.perf_counter() - start_time) * 1000
186
+
187
+ return RetrievalResult(
188
+ answer=answer,
189
+ confidence=confidence,
190
+ depth_used=depth,
191
+ neurons_activated=len(activations),
192
+ fibers_matched=[f.id for f in fibers_matched],
193
+ subgraph=subgraph,
194
+ context=context,
195
+ latency_ms=latency_ms,
196
+ metadata={
197
+ "query_intent": stimulus.intent.value,
198
+ "anchors_found": sum(len(a) for a in anchor_sets),
199
+ "intersections": len(intersections),
200
+ },
201
+ )
202
+
203
+ def _detect_depth(self, stimulus: Stimulus) -> DepthLevel:
204
+ """Auto-detect required depth from query intent."""
205
+ # Deep questions need full exploration
206
+ if stimulus.intent in (QueryIntent.ASK_WHY, QueryIntent.ASK_FEELING):
207
+ return DepthLevel.DEEP
208
+
209
+ # Pattern questions need cross-time analysis
210
+ if stimulus.intent == QueryIntent.ASK_PATTERN:
211
+ return DepthLevel.HABIT
212
+
213
+ # Contextual questions need some exploration
214
+ if stimulus.intent in (QueryIntent.ASK_HOW, QueryIntent.COMPARE):
215
+ return DepthLevel.CONTEXT
216
+
217
+ # Check for context keywords
218
+ context_words = {"before", "after", "then", "trước", "sau", "rồi"}
219
+ query_words = set(stimulus.raw_query.lower().split())
220
+ if query_words & context_words:
221
+ return DepthLevel.CONTEXT
222
+
223
+ # Simple queries use instant retrieval
224
+ return DepthLevel.INSTANT
225
+
226
+ def _depth_to_hops(self, depth: DepthLevel) -> int:
227
+ """Convert depth level to maximum hops."""
228
+ mapping = {
229
+ DepthLevel.INSTANT: 1,
230
+ DepthLevel.CONTEXT: 3,
231
+ DepthLevel.HABIT: 4,
232
+ DepthLevel.DEEP: self._config.max_spread_hops,
233
+ }
234
+ return mapping.get(depth, 2)
235
+
236
+ async def _find_anchors(self, stimulus: Stimulus) -> list[list[str]]:
237
+ """Find anchor neurons for each signal type."""
238
+ anchor_sets: list[list[str]] = []
239
+
240
+ # Time anchors
241
+ time_anchors: list[str] = []
242
+ for hint in stimulus.time_hints:
243
+ neurons = await self._storage.find_neurons(
244
+ type=NeuronType.TIME,
245
+ time_range=(hint.absolute_start, hint.absolute_end),
246
+ limit=5,
247
+ )
248
+ time_anchors.extend(n.id for n in neurons)
249
+
250
+ if time_anchors:
251
+ anchor_sets.append(time_anchors)
252
+
253
+ # Entity anchors
254
+ entity_anchors: list[str] = []
255
+ for entity in stimulus.entities:
256
+ neurons = await self._storage.find_neurons(
257
+ content_contains=entity.text,
258
+ limit=3,
259
+ )
260
+ entity_anchors.extend(n.id for n in neurons)
261
+
262
+ if entity_anchors:
263
+ anchor_sets.append(entity_anchors)
264
+
265
+ # Keyword anchors
266
+ keyword_anchors: list[str] = []
267
+ for keyword in stimulus.keywords[:5]: # Limit keywords
268
+ neurons = await self._storage.find_neurons(
269
+ content_contains=keyword,
270
+ limit=2,
271
+ )
272
+ keyword_anchors.extend(n.id for n in neurons)
273
+
274
+ if keyword_anchors:
275
+ anchor_sets.append(keyword_anchors)
276
+
277
+ return anchor_sets
278
+
279
+ async def _find_matching_fibers(
280
+ self,
281
+ activations: dict[str, ActivationResult],
282
+ ) -> list[Fiber]:
283
+ """Find fibers that contain activated neurons."""
284
+ fibers: list[Fiber] = []
285
+ seen_fiber_ids: set[str] = set()
286
+
287
+ # Get highly activated neurons
288
+ top_neurons = sorted(
289
+ activations.values(),
290
+ key=lambda a: a.activation_level,
291
+ reverse=True,
292
+ )[:20]
293
+
294
+ for activation in top_neurons:
295
+ matching = await self._storage.find_fibers(
296
+ contains_neuron=activation.neuron_id,
297
+ limit=3,
298
+ )
299
+
300
+ for fiber in matching:
301
+ if fiber.id not in seen_fiber_ids:
302
+ fibers.append(fiber)
303
+ seen_fiber_ids.add(fiber.id)
304
+
305
+ # Sort by salience
306
+ fibers.sort(key=lambda f: f.salience, reverse=True)
307
+
308
+ return fibers[:10] # Limit to top 10
309
+
310
+ async def _reconstitute_answer(
311
+ self,
312
+ activations: dict[str, ActivationResult],
313
+ intersections: list[str],
314
+ stimulus: Stimulus,
315
+ ) -> tuple[str | None, float]:
316
+ """
317
+ Attempt to reconstitute an answer from activated neurons.
318
+
319
+ Returns (answer_text, confidence)
320
+ """
321
+ if not activations:
322
+ return None, 0.0
323
+
324
+ # Find the most relevant neurons
325
+ candidates: list[tuple[str, float]] = []
326
+
327
+ # Prioritize intersection neurons
328
+ for neuron_id in intersections:
329
+ if neuron_id in activations:
330
+ candidates.append((neuron_id, activations[neuron_id].activation_level * 1.5))
331
+
332
+ # Add highly activated neurons
333
+ for neuron_id, result in activations.items():
334
+ if neuron_id not in intersections:
335
+ candidates.append((neuron_id, result.activation_level))
336
+
337
+ # Sort by score
338
+ candidates.sort(key=lambda x: x[1], reverse=True)
339
+
340
+ if not candidates:
341
+ return None, 0.0
342
+
343
+ # Get the top neuron's content as answer
344
+ top_neuron_id = candidates[0][0]
345
+ top_neuron = await self._storage.get_neuron(top_neuron_id)
346
+
347
+ if top_neuron is None:
348
+ return None, 0.0
349
+
350
+ # Confidence based on activation and intersection count
351
+ confidence = min(1.0, candidates[0][1])
352
+ if intersections:
353
+ confidence = min(1.0, confidence + 0.1 * len(intersections))
354
+
355
+ return top_neuron.content, confidence
356
+
357
+ async def _format_context(
358
+ self,
359
+ activations: dict[str, ActivationResult],
360
+ fibers: list[Fiber],
361
+ max_tokens: int,
362
+ ) -> str:
363
+ """Format activated memories into context for agent injection."""
364
+ lines: list[str] = []
365
+ token_estimate = 0
366
+
367
+ # Add fiber summaries first
368
+ if fibers:
369
+ lines.append("## Relevant Memories\n")
370
+
371
+ for fiber in fibers[:5]:
372
+ if fiber.summary:
373
+ line = f"- {fiber.summary}"
374
+ else:
375
+ anchor = await self._storage.get_neuron(fiber.anchor_neuron_id)
376
+ if anchor:
377
+ line = f"- {anchor.content}"
378
+ else:
379
+ continue
380
+
381
+ token_estimate += len(line.split())
382
+ if token_estimate > max_tokens:
383
+ break
384
+
385
+ lines.append(line)
386
+
387
+ # Add individual activated neurons
388
+ if token_estimate < max_tokens:
389
+ lines.append("\n## Related Information\n")
390
+
391
+ sorted_activations = sorted(
392
+ activations.values(),
393
+ key=lambda a: a.activation_level,
394
+ reverse=True,
395
+ )
396
+
397
+ for result in sorted_activations[:20]:
398
+ neuron = await self._storage.get_neuron(result.neuron_id)
399
+ if neuron is None:
400
+ continue
401
+
402
+ # Skip time neurons in context (they're implicit)
403
+ if neuron.type == NeuronType.TIME:
404
+ continue
405
+
406
+ line = f"- [{neuron.type.value}] {neuron.content}"
407
+ token_estimate += len(line.split())
408
+
409
+ if token_estimate > max_tokens:
410
+ break
411
+
412
+ lines.append(line)
413
+
414
+ return "\n".join(lines)
415
+
416
+ async def query_with_stimulus(
417
+ self,
418
+ stimulus: Stimulus,
419
+ depth: DepthLevel | None = None,
420
+ max_tokens: int | None = None,
421
+ ) -> RetrievalResult:
422
+ """
423
+ Execute retrieval with a pre-parsed stimulus.
424
+
425
+ Useful when you want to control the parsing or reuse a stimulus.
426
+
427
+ Args:
428
+ stimulus: Pre-parsed stimulus
429
+ depth: Retrieval depth
430
+ max_tokens: Maximum tokens in context
431
+
432
+ Returns:
433
+ RetrievalResult
434
+ """
435
+ # Reconstruct query string for the main method
436
+ return await self.query(
437
+ stimulus.raw_query,
438
+ depth=depth,
439
+ max_tokens=max_tokens,
440
+ )
@@ -0,0 +1,42 @@
1
+ """Extraction modules for parsing queries and content."""
2
+
3
+ from neural_memory.extraction.entities import Entity, EntityExtractor
4
+ from neural_memory.extraction.parser import (
5
+ Perspective,
6
+ QueryIntent,
7
+ QueryParser,
8
+ Stimulus,
9
+ )
10
+ from neural_memory.extraction.router import (
11
+ QueryRouter,
12
+ QueryType,
13
+ RouteConfidence,
14
+ RouteDecision,
15
+ route_query,
16
+ )
17
+ from neural_memory.extraction.temporal import (
18
+ TemporalExtractor,
19
+ TimeGranularity,
20
+ TimeHint,
21
+ )
22
+
23
+ __all__ = [
24
+ # Temporal
25
+ "TimeHint",
26
+ "TimeGranularity",
27
+ "TemporalExtractor",
28
+ # Parser
29
+ "Stimulus",
30
+ "QueryIntent",
31
+ "Perspective",
32
+ "QueryParser",
33
+ # Router (MemoCore integration)
34
+ "QueryRouter",
35
+ "QueryType",
36
+ "RouteConfidence",
37
+ "RouteDecision",
38
+ "route_query",
39
+ # Entities
40
+ "Entity",
41
+ "EntityExtractor",
42
+ ]