neural-memory 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neural_memory/__init__.py +38 -0
- neural_memory/cli/__init__.py +15 -0
- neural_memory/cli/__main__.py +6 -0
- neural_memory/cli/config.py +176 -0
- neural_memory/cli/main.py +2702 -0
- neural_memory/cli/storage.py +169 -0
- neural_memory/cli/tui.py +471 -0
- neural_memory/core/__init__.py +52 -0
- neural_memory/core/brain.py +301 -0
- neural_memory/core/brain_mode.py +273 -0
- neural_memory/core/fiber.py +236 -0
- neural_memory/core/memory_types.py +331 -0
- neural_memory/core/neuron.py +168 -0
- neural_memory/core/project.py +257 -0
- neural_memory/core/synapse.py +215 -0
- neural_memory/engine/__init__.py +15 -0
- neural_memory/engine/activation.py +335 -0
- neural_memory/engine/encoder.py +391 -0
- neural_memory/engine/retrieval.py +440 -0
- neural_memory/extraction/__init__.py +42 -0
- neural_memory/extraction/entities.py +547 -0
- neural_memory/extraction/parser.py +337 -0
- neural_memory/extraction/router.py +396 -0
- neural_memory/extraction/temporal.py +428 -0
- neural_memory/mcp/__init__.py +9 -0
- neural_memory/mcp/__main__.py +6 -0
- neural_memory/mcp/server.py +621 -0
- neural_memory/py.typed +0 -0
- neural_memory/safety/__init__.py +31 -0
- neural_memory/safety/freshness.py +238 -0
- neural_memory/safety/sensitive.py +304 -0
- neural_memory/server/__init__.py +5 -0
- neural_memory/server/app.py +99 -0
- neural_memory/server/dependencies.py +33 -0
- neural_memory/server/models.py +138 -0
- neural_memory/server/routes/__init__.py +7 -0
- neural_memory/server/routes/brain.py +221 -0
- neural_memory/server/routes/memory.py +169 -0
- neural_memory/server/routes/sync.py +387 -0
- neural_memory/storage/__init__.py +17 -0
- neural_memory/storage/base.py +441 -0
- neural_memory/storage/factory.py +329 -0
- neural_memory/storage/memory_store.py +896 -0
- neural_memory/storage/shared_store.py +650 -0
- neural_memory/storage/sqlite_store.py +1613 -0
- neural_memory/sync/__init__.py +5 -0
- neural_memory/sync/client.py +435 -0
- neural_memory/unified_config.py +315 -0
- neural_memory/utils/__init__.py +5 -0
- neural_memory/utils/config.py +98 -0
- neural_memory-0.1.0.dist-info/METADATA +314 -0
- neural_memory-0.1.0.dist-info/RECORD +55 -0
- neural_memory-0.1.0.dist-info/WHEEL +4 -0
- neural_memory-0.1.0.dist-info/entry_points.txt +4 -0
- neural_memory-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
"""Spreading activation algorithm for memory retrieval."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import heapq
|
|
6
|
+
from collections import defaultdict
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from neural_memory.core.brain import BrainConfig
|
|
12
|
+
from neural_memory.storage.base import NeuralStorage
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ActivationResult:
|
|
17
|
+
"""
|
|
18
|
+
Result of activating a neuron through spreading activation.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
neuron_id: The activated neuron's ID
|
|
22
|
+
activation_level: Final activation level (0.0 - 1.0)
|
|
23
|
+
hop_distance: Number of hops from the nearest anchor
|
|
24
|
+
path: List of neuron IDs showing how we reached this neuron
|
|
25
|
+
source_anchor: The anchor neuron that led to this activation
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
neuron_id: str
|
|
29
|
+
activation_level: float
|
|
30
|
+
hop_distance: int
|
|
31
|
+
path: list[str]
|
|
32
|
+
source_anchor: str
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class ActivationState:
|
|
37
|
+
"""Internal state during activation spreading."""
|
|
38
|
+
|
|
39
|
+
neuron_id: str
|
|
40
|
+
level: float
|
|
41
|
+
hops: int
|
|
42
|
+
path: list[str]
|
|
43
|
+
source: str
|
|
44
|
+
|
|
45
|
+
def __lt__(self, other: ActivationState) -> bool:
|
|
46
|
+
"""For heap ordering (higher activation = higher priority)."""
|
|
47
|
+
return self.level > other.level
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class SpreadingActivation:
|
|
51
|
+
"""
|
|
52
|
+
Spreading activation algorithm for neural memory retrieval.
|
|
53
|
+
|
|
54
|
+
This implements the core retrieval mechanism: starting from
|
|
55
|
+
anchor neurons and spreading activation through synapses,
|
|
56
|
+
decaying with distance, to find related memories.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def __init__(
|
|
60
|
+
self,
|
|
61
|
+
storage: NeuralStorage,
|
|
62
|
+
config: BrainConfig,
|
|
63
|
+
) -> None:
|
|
64
|
+
"""
|
|
65
|
+
Initialize the activation system.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
storage: Storage backend to read graph from
|
|
69
|
+
config: Brain configuration for parameters
|
|
70
|
+
"""
|
|
71
|
+
self._storage = storage
|
|
72
|
+
self._config = config
|
|
73
|
+
|
|
74
|
+
async def activate(
|
|
75
|
+
self,
|
|
76
|
+
anchor_neurons: list[str],
|
|
77
|
+
max_hops: int | None = None,
|
|
78
|
+
decay_factor: float = 0.5,
|
|
79
|
+
min_activation: float | None = None,
|
|
80
|
+
) -> dict[str, ActivationResult]:
|
|
81
|
+
"""
|
|
82
|
+
Spread activation from anchor neurons through the graph.
|
|
83
|
+
|
|
84
|
+
The activation spreads through synapses, with the level
|
|
85
|
+
decaying at each hop:
|
|
86
|
+
activation(hop) = initial * decay_factor^hop * synapse_weight
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
anchor_neurons: Starting neurons with activation = 1.0
|
|
90
|
+
max_hops: Maximum number of hops (default: from config)
|
|
91
|
+
decay_factor: How much activation decays per hop
|
|
92
|
+
min_activation: Minimum activation to continue spreading
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
Dict mapping neuron_id to ActivationResult
|
|
96
|
+
"""
|
|
97
|
+
if max_hops is None:
|
|
98
|
+
max_hops = self._config.max_spread_hops
|
|
99
|
+
|
|
100
|
+
if min_activation is None:
|
|
101
|
+
min_activation = self._config.activation_threshold
|
|
102
|
+
|
|
103
|
+
# Track best activation for each neuron
|
|
104
|
+
results: dict[str, ActivationResult] = {}
|
|
105
|
+
|
|
106
|
+
# Priority queue for BFS with activation ordering
|
|
107
|
+
queue: list[ActivationState] = []
|
|
108
|
+
|
|
109
|
+
# Initialize with anchor neurons
|
|
110
|
+
for anchor_id in anchor_neurons:
|
|
111
|
+
neuron = await self._storage.get_neuron(anchor_id)
|
|
112
|
+
if neuron is None:
|
|
113
|
+
continue
|
|
114
|
+
|
|
115
|
+
state = ActivationState(
|
|
116
|
+
neuron_id=anchor_id,
|
|
117
|
+
level=1.0,
|
|
118
|
+
hops=0,
|
|
119
|
+
path=[anchor_id],
|
|
120
|
+
source=anchor_id,
|
|
121
|
+
)
|
|
122
|
+
heapq.heappush(queue, state)
|
|
123
|
+
|
|
124
|
+
# Record anchor activation
|
|
125
|
+
results[anchor_id] = ActivationResult(
|
|
126
|
+
neuron_id=anchor_id,
|
|
127
|
+
activation_level=1.0,
|
|
128
|
+
hop_distance=0,
|
|
129
|
+
path=[anchor_id],
|
|
130
|
+
source_anchor=anchor_id,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Visited tracking (neuron_id, source) to allow multiple paths
|
|
134
|
+
visited: set[tuple[str, str]] = set()
|
|
135
|
+
|
|
136
|
+
# Spread activation
|
|
137
|
+
while queue:
|
|
138
|
+
current = heapq.heappop(queue)
|
|
139
|
+
|
|
140
|
+
# Skip if we've visited this neuron from this source
|
|
141
|
+
visit_key = (current.neuron_id, current.source)
|
|
142
|
+
if visit_key in visited:
|
|
143
|
+
continue
|
|
144
|
+
visited.add(visit_key)
|
|
145
|
+
|
|
146
|
+
# Skip if we've exceeded max hops
|
|
147
|
+
if current.hops >= max_hops:
|
|
148
|
+
continue
|
|
149
|
+
|
|
150
|
+
# Get neighbors
|
|
151
|
+
neighbors = await self._storage.get_neighbors(
|
|
152
|
+
current.neuron_id,
|
|
153
|
+
direction="both",
|
|
154
|
+
min_weight=0.1,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
for neighbor_neuron, synapse in neighbors:
|
|
158
|
+
# Calculate new activation
|
|
159
|
+
new_level = current.level * decay_factor * synapse.weight
|
|
160
|
+
|
|
161
|
+
# Skip if below threshold
|
|
162
|
+
if new_level < min_activation:
|
|
163
|
+
continue
|
|
164
|
+
|
|
165
|
+
new_path = [*current.path, neighbor_neuron.id]
|
|
166
|
+
|
|
167
|
+
# Update result if this is better activation
|
|
168
|
+
existing = results.get(neighbor_neuron.id)
|
|
169
|
+
if existing is None or new_level > existing.activation_level:
|
|
170
|
+
results[neighbor_neuron.id] = ActivationResult(
|
|
171
|
+
neuron_id=neighbor_neuron.id,
|
|
172
|
+
activation_level=new_level,
|
|
173
|
+
hop_distance=current.hops + 1,
|
|
174
|
+
path=new_path,
|
|
175
|
+
source_anchor=current.source,
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# Add to queue for further spreading
|
|
179
|
+
new_state = ActivationState(
|
|
180
|
+
neuron_id=neighbor_neuron.id,
|
|
181
|
+
level=new_level,
|
|
182
|
+
hops=current.hops + 1,
|
|
183
|
+
path=new_path,
|
|
184
|
+
source=current.source,
|
|
185
|
+
)
|
|
186
|
+
heapq.heappush(queue, new_state)
|
|
187
|
+
|
|
188
|
+
return results
|
|
189
|
+
|
|
190
|
+
async def activate_from_multiple(
|
|
191
|
+
self,
|
|
192
|
+
anchor_sets: list[list[str]],
|
|
193
|
+
max_hops: int | None = None,
|
|
194
|
+
) -> tuple[dict[str, ActivationResult], list[str]]:
|
|
195
|
+
"""
|
|
196
|
+
Activate from multiple anchor sets and find intersections.
|
|
197
|
+
|
|
198
|
+
This is useful when a query has multiple constraints (e.g.,
|
|
199
|
+
time + entity). Neurons activated by multiple anchor sets
|
|
200
|
+
are likely to be more relevant.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
anchor_sets: List of anchor neuron lists
|
|
204
|
+
max_hops: Maximum hops for each activation
|
|
205
|
+
|
|
206
|
+
Returns:
|
|
207
|
+
Tuple of (combined activations, intersection neuron IDs)
|
|
208
|
+
"""
|
|
209
|
+
if not anchor_sets:
|
|
210
|
+
return {}, []
|
|
211
|
+
|
|
212
|
+
# Activate from each set
|
|
213
|
+
activation_results: list[dict[str, ActivationResult]] = []
|
|
214
|
+
for anchors in anchor_sets:
|
|
215
|
+
if anchors:
|
|
216
|
+
result = await self.activate(anchors, max_hops)
|
|
217
|
+
activation_results.append(result)
|
|
218
|
+
|
|
219
|
+
if not activation_results:
|
|
220
|
+
return {}, []
|
|
221
|
+
|
|
222
|
+
if len(activation_results) == 1:
|
|
223
|
+
return activation_results[0], list(activation_results[0].keys())
|
|
224
|
+
|
|
225
|
+
# Find intersection
|
|
226
|
+
intersection = self._find_intersection(activation_results)
|
|
227
|
+
|
|
228
|
+
# Combine results with boosted activation for intersections
|
|
229
|
+
combined: dict[str, ActivationResult] = {}
|
|
230
|
+
|
|
231
|
+
for result_set in activation_results:
|
|
232
|
+
for neuron_id, activation in result_set.items():
|
|
233
|
+
existing = combined.get(neuron_id)
|
|
234
|
+
|
|
235
|
+
if existing is None:
|
|
236
|
+
combined[neuron_id] = activation
|
|
237
|
+
else:
|
|
238
|
+
# Combine activations (take max, but boost if in intersection)
|
|
239
|
+
if neuron_id in intersection:
|
|
240
|
+
# Boost: multiply activations
|
|
241
|
+
new_level = min(
|
|
242
|
+
1.0, existing.activation_level + activation.activation_level * 0.5
|
|
243
|
+
)
|
|
244
|
+
else:
|
|
245
|
+
new_level = max(existing.activation_level, activation.activation_level)
|
|
246
|
+
|
|
247
|
+
combined[neuron_id] = ActivationResult(
|
|
248
|
+
neuron_id=neuron_id,
|
|
249
|
+
activation_level=new_level,
|
|
250
|
+
hop_distance=min(existing.hop_distance, activation.hop_distance),
|
|
251
|
+
path=existing.path
|
|
252
|
+
if existing.hop_distance <= activation.hop_distance
|
|
253
|
+
else activation.path,
|
|
254
|
+
source_anchor=existing.source_anchor,
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
return combined, intersection
|
|
258
|
+
|
|
259
|
+
def _find_intersection(
|
|
260
|
+
self,
|
|
261
|
+
activation_sets: list[dict[str, ActivationResult]],
|
|
262
|
+
) -> list[str]:
|
|
263
|
+
"""
|
|
264
|
+
Find neurons activated by multiple anchor sets.
|
|
265
|
+
|
|
266
|
+
Args:
|
|
267
|
+
activation_sets: List of activation results from different anchor sets
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
List of neuron IDs appearing in multiple sets, sorted by
|
|
271
|
+
combined activation level
|
|
272
|
+
"""
|
|
273
|
+
if not activation_sets:
|
|
274
|
+
return []
|
|
275
|
+
|
|
276
|
+
# Count appearances and sum activations
|
|
277
|
+
appearances: dict[str, int] = defaultdict(int)
|
|
278
|
+
total_activation: dict[str, float] = defaultdict(float)
|
|
279
|
+
|
|
280
|
+
for result_set in activation_sets:
|
|
281
|
+
for neuron_id, activation in result_set.items():
|
|
282
|
+
appearances[neuron_id] += 1
|
|
283
|
+
total_activation[neuron_id] += activation.activation_level
|
|
284
|
+
|
|
285
|
+
# Find neurons in multiple sets
|
|
286
|
+
multi_set_neurons = [
|
|
287
|
+
(neuron_id, total_activation[neuron_id], count)
|
|
288
|
+
for neuron_id, count in appearances.items()
|
|
289
|
+
if count > 1
|
|
290
|
+
]
|
|
291
|
+
|
|
292
|
+
# Sort by count (descending) then activation (descending)
|
|
293
|
+
multi_set_neurons.sort(key=lambda x: (x[2], x[1]), reverse=True)
|
|
294
|
+
|
|
295
|
+
return [n[0] for n in multi_set_neurons]
|
|
296
|
+
|
|
297
|
+
async def get_activated_subgraph(
|
|
298
|
+
self,
|
|
299
|
+
activations: dict[str, ActivationResult],
|
|
300
|
+
min_activation: float = 0.2,
|
|
301
|
+
max_neurons: int = 50,
|
|
302
|
+
) -> tuple[list[str], list[str]]:
|
|
303
|
+
"""
|
|
304
|
+
Get the subgraph of activated neurons and their connections.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
activations: Activation results
|
|
308
|
+
min_activation: Minimum activation to include
|
|
309
|
+
max_neurons: Maximum neurons to include
|
|
310
|
+
|
|
311
|
+
Returns:
|
|
312
|
+
Tuple of (neuron_ids, synapse_ids) in the subgraph
|
|
313
|
+
"""
|
|
314
|
+
# Filter and sort by activation
|
|
315
|
+
filtered = [
|
|
316
|
+
(neuron_id, result)
|
|
317
|
+
for neuron_id, result in activations.items()
|
|
318
|
+
if result.activation_level >= min_activation
|
|
319
|
+
]
|
|
320
|
+
filtered.sort(key=lambda x: x[1].activation_level, reverse=True)
|
|
321
|
+
|
|
322
|
+
# Take top neurons
|
|
323
|
+
selected_neurons = [n[0] for n in filtered[:max_neurons]]
|
|
324
|
+
selected_set = set(selected_neurons)
|
|
325
|
+
|
|
326
|
+
# Find synapses connecting selected neurons
|
|
327
|
+
synapse_ids: list[str] = []
|
|
328
|
+
|
|
329
|
+
for neuron_id in selected_neurons:
|
|
330
|
+
synapses = await self._storage.get_synapses(source_id=neuron_id)
|
|
331
|
+
for synapse in synapses:
|
|
332
|
+
if synapse.target_id in selected_set:
|
|
333
|
+
synapse_ids.append(synapse.id)
|
|
334
|
+
|
|
335
|
+
return selected_neurons, synapse_ids
|