neural-memory 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- neural_memory/__init__.py +38 -0
- neural_memory/cli/__init__.py +15 -0
- neural_memory/cli/__main__.py +6 -0
- neural_memory/cli/config.py +176 -0
- neural_memory/cli/main.py +2702 -0
- neural_memory/cli/storage.py +169 -0
- neural_memory/cli/tui.py +471 -0
- neural_memory/core/__init__.py +52 -0
- neural_memory/core/brain.py +301 -0
- neural_memory/core/brain_mode.py +273 -0
- neural_memory/core/fiber.py +236 -0
- neural_memory/core/memory_types.py +331 -0
- neural_memory/core/neuron.py +168 -0
- neural_memory/core/project.py +257 -0
- neural_memory/core/synapse.py +215 -0
- neural_memory/engine/__init__.py +15 -0
- neural_memory/engine/activation.py +335 -0
- neural_memory/engine/encoder.py +391 -0
- neural_memory/engine/retrieval.py +440 -0
- neural_memory/extraction/__init__.py +42 -0
- neural_memory/extraction/entities.py +547 -0
- neural_memory/extraction/parser.py +337 -0
- neural_memory/extraction/router.py +396 -0
- neural_memory/extraction/temporal.py +428 -0
- neural_memory/mcp/__init__.py +9 -0
- neural_memory/mcp/__main__.py +6 -0
- neural_memory/mcp/server.py +621 -0
- neural_memory/py.typed +0 -0
- neural_memory/safety/__init__.py +31 -0
- neural_memory/safety/freshness.py +238 -0
- neural_memory/safety/sensitive.py +304 -0
- neural_memory/server/__init__.py +5 -0
- neural_memory/server/app.py +99 -0
- neural_memory/server/dependencies.py +33 -0
- neural_memory/server/models.py +138 -0
- neural_memory/server/routes/__init__.py +7 -0
- neural_memory/server/routes/brain.py +221 -0
- neural_memory/server/routes/memory.py +169 -0
- neural_memory/server/routes/sync.py +387 -0
- neural_memory/storage/__init__.py +17 -0
- neural_memory/storage/base.py +441 -0
- neural_memory/storage/factory.py +329 -0
- neural_memory/storage/memory_store.py +896 -0
- neural_memory/storage/shared_store.py +650 -0
- neural_memory/storage/sqlite_store.py +1613 -0
- neural_memory/sync/__init__.py +5 -0
- neural_memory/sync/client.py +435 -0
- neural_memory/unified_config.py +315 -0
- neural_memory/utils/__init__.py +5 -0
- neural_memory/utils/config.py +98 -0
- neural_memory-0.1.0.dist-info/METADATA +314 -0
- neural_memory-0.1.0.dist-info/RECORD +55 -0
- neural_memory-0.1.0.dist-info/WHEEL +4 -0
- neural_memory-0.1.0.dist-info/entry_points.txt +4 -0
- neural_memory-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
"""Memory freshness evaluation for Neural Memory.
|
|
2
|
+
|
|
3
|
+
Evaluates how fresh/stale memories are and provides warnings
|
|
4
|
+
for potentially outdated information.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from enum import StrEnum
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class FreshnessLevel(StrEnum):
|
|
15
|
+
"""Freshness levels for memories."""
|
|
16
|
+
|
|
17
|
+
FRESH = "fresh" # < 7 days
|
|
18
|
+
RECENT = "recent" # 7-30 days
|
|
19
|
+
AGING = "aging" # 30-90 days
|
|
20
|
+
STALE = "stale" # 90-365 days
|
|
21
|
+
ANCIENT = "ancient" # > 365 days
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class FreshnessResult:
|
|
26
|
+
"""Result of freshness evaluation."""
|
|
27
|
+
|
|
28
|
+
level: FreshnessLevel
|
|
29
|
+
age_days: int
|
|
30
|
+
warning: str | None
|
|
31
|
+
should_verify: bool
|
|
32
|
+
score: float # 0.0 (ancient) to 1.0 (fresh)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# Default thresholds in days
|
|
36
|
+
DEFAULT_THRESHOLDS = {
|
|
37
|
+
FreshnessLevel.FRESH: 7,
|
|
38
|
+
FreshnessLevel.RECENT: 30,
|
|
39
|
+
FreshnessLevel.AGING: 90,
|
|
40
|
+
FreshnessLevel.STALE: 365,
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def evaluate_freshness(
|
|
45
|
+
created_at: datetime,
|
|
46
|
+
reference_time: datetime | None = None,
|
|
47
|
+
thresholds: dict[FreshnessLevel, int] | None = None,
|
|
48
|
+
) -> FreshnessResult:
|
|
49
|
+
"""
|
|
50
|
+
Evaluate the freshness of a memory.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
created_at: When the memory was created
|
|
54
|
+
reference_time: Reference time for comparison (default: now)
|
|
55
|
+
thresholds: Custom thresholds in days
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
FreshnessResult with level, warning, and score
|
|
59
|
+
"""
|
|
60
|
+
if reference_time is None:
|
|
61
|
+
reference_time = datetime.now()
|
|
62
|
+
|
|
63
|
+
if thresholds is None:
|
|
64
|
+
thresholds = DEFAULT_THRESHOLDS
|
|
65
|
+
|
|
66
|
+
# Calculate age
|
|
67
|
+
age = reference_time - created_at
|
|
68
|
+
age_days = max(0, age.days)
|
|
69
|
+
|
|
70
|
+
# Determine level
|
|
71
|
+
if age_days < thresholds[FreshnessLevel.FRESH]:
|
|
72
|
+
level = FreshnessLevel.FRESH
|
|
73
|
+
score = 1.0
|
|
74
|
+
warning = None
|
|
75
|
+
should_verify = False
|
|
76
|
+
elif age_days < thresholds[FreshnessLevel.RECENT]:
|
|
77
|
+
level = FreshnessLevel.RECENT
|
|
78
|
+
score = 0.8
|
|
79
|
+
warning = None
|
|
80
|
+
should_verify = False
|
|
81
|
+
elif age_days < thresholds[FreshnessLevel.AGING]:
|
|
82
|
+
level = FreshnessLevel.AGING
|
|
83
|
+
score = 0.5
|
|
84
|
+
warning = f"[~] This memory is {age_days} days old - information may have changed"
|
|
85
|
+
should_verify = True
|
|
86
|
+
elif age_days < thresholds[FreshnessLevel.STALE]:
|
|
87
|
+
level = FreshnessLevel.STALE
|
|
88
|
+
score = 0.3
|
|
89
|
+
warning = f"[!] STALE: This memory is {age_days} days old - verify before using"
|
|
90
|
+
should_verify = True
|
|
91
|
+
else:
|
|
92
|
+
level = FreshnessLevel.ANCIENT
|
|
93
|
+
score = 0.1
|
|
94
|
+
warning = f"[!!] ANCIENT: This memory is {age_days} days old ({age_days // 365} years) - likely outdated"
|
|
95
|
+
should_verify = True
|
|
96
|
+
|
|
97
|
+
return FreshnessResult(
|
|
98
|
+
level=level,
|
|
99
|
+
age_days=age_days,
|
|
100
|
+
warning=warning,
|
|
101
|
+
should_verify=should_verify,
|
|
102
|
+
score=score,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def get_freshness_warning(
|
|
107
|
+
created_at: datetime,
|
|
108
|
+
reference_time: datetime | None = None,
|
|
109
|
+
) -> str | None:
|
|
110
|
+
"""
|
|
111
|
+
Get a warning message if memory is stale.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
created_at: When the memory was created
|
|
115
|
+
reference_time: Reference time for comparison
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
Warning message or None if fresh
|
|
119
|
+
"""
|
|
120
|
+
result = evaluate_freshness(created_at, reference_time)
|
|
121
|
+
return result.warning
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def format_age(age_days: int) -> str:
|
|
125
|
+
"""Format age in human-readable form."""
|
|
126
|
+
if age_days == 0:
|
|
127
|
+
return "today"
|
|
128
|
+
elif age_days == 1:
|
|
129
|
+
return "yesterday"
|
|
130
|
+
elif age_days < 7:
|
|
131
|
+
return f"{age_days} days ago"
|
|
132
|
+
elif age_days < 30:
|
|
133
|
+
weeks = age_days // 7
|
|
134
|
+
return f"{weeks} week{'s' if weeks > 1 else ''} ago"
|
|
135
|
+
elif age_days < 365:
|
|
136
|
+
months = age_days // 30
|
|
137
|
+
return f"{months} month{'s' if months > 1 else ''} ago"
|
|
138
|
+
else:
|
|
139
|
+
years = age_days // 365
|
|
140
|
+
return f"{years} year{'s' if years > 1 else ''} ago"
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def get_freshness_indicator(level: FreshnessLevel, use_ascii: bool = True) -> str:
|
|
144
|
+
"""Get a visual indicator for freshness level.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
level: Freshness level
|
|
148
|
+
use_ascii: Use ASCII characters instead of emojis (for Windows compatibility)
|
|
149
|
+
"""
|
|
150
|
+
if use_ascii:
|
|
151
|
+
indicators = {
|
|
152
|
+
FreshnessLevel.FRESH: "[+]",
|
|
153
|
+
FreshnessLevel.RECENT: "[+]",
|
|
154
|
+
FreshnessLevel.AGING: "[~]",
|
|
155
|
+
FreshnessLevel.STALE: "[!]",
|
|
156
|
+
FreshnessLevel.ANCIENT: "[!!]",
|
|
157
|
+
}
|
|
158
|
+
else:
|
|
159
|
+
indicators = {
|
|
160
|
+
FreshnessLevel.FRESH: "G",
|
|
161
|
+
FreshnessLevel.RECENT: "G",
|
|
162
|
+
FreshnessLevel.AGING: "Y",
|
|
163
|
+
FreshnessLevel.STALE: "O",
|
|
164
|
+
FreshnessLevel.ANCIENT: "R",
|
|
165
|
+
}
|
|
166
|
+
return indicators.get(level, "[ ]")
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
@dataclass
|
|
170
|
+
class MemoryFreshnessReport:
|
|
171
|
+
"""Report on memory freshness for a set of memories."""
|
|
172
|
+
|
|
173
|
+
total: int
|
|
174
|
+
fresh: int
|
|
175
|
+
recent: int
|
|
176
|
+
aging: int
|
|
177
|
+
stale: int
|
|
178
|
+
ancient: int
|
|
179
|
+
average_age_days: float
|
|
180
|
+
oldest_days: int
|
|
181
|
+
newest_days: int
|
|
182
|
+
|
|
183
|
+
def summary(self) -> str:
|
|
184
|
+
"""Get a summary string."""
|
|
185
|
+
lines = [
|
|
186
|
+
f"Total memories: {self.total}",
|
|
187
|
+
f" 🟢 Fresh (<7d): {self.fresh}",
|
|
188
|
+
f" 🟢 Recent (7-30d): {self.recent}",
|
|
189
|
+
f" 🟡 Aging (30-90d): {self.aging}",
|
|
190
|
+
f" 🟠 Stale (90-365d): {self.stale}",
|
|
191
|
+
f" 🔴 Ancient (>365d): {self.ancient}",
|
|
192
|
+
f"Average age: {self.average_age_days:.1f} days",
|
|
193
|
+
]
|
|
194
|
+
return "\n".join(lines)
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def analyze_freshness(
|
|
198
|
+
created_dates: list[datetime],
|
|
199
|
+
reference_time: datetime | None = None,
|
|
200
|
+
) -> MemoryFreshnessReport:
|
|
201
|
+
"""
|
|
202
|
+
Analyze freshness of a collection of memories.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
created_dates: List of creation timestamps
|
|
206
|
+
reference_time: Reference time for comparison
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
MemoryFreshnessReport with statistics
|
|
210
|
+
"""
|
|
211
|
+
if not created_dates:
|
|
212
|
+
return MemoryFreshnessReport(
|
|
213
|
+
total=0,
|
|
214
|
+
fresh=0,
|
|
215
|
+
recent=0,
|
|
216
|
+
aging=0,
|
|
217
|
+
stale=0,
|
|
218
|
+
ancient=0,
|
|
219
|
+
average_age_days=0,
|
|
220
|
+
oldest_days=0,
|
|
221
|
+
newest_days=0,
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
results = [evaluate_freshness(dt, reference_time) for dt in created_dates]
|
|
225
|
+
|
|
226
|
+
ages = [r.age_days for r in results]
|
|
227
|
+
|
|
228
|
+
return MemoryFreshnessReport(
|
|
229
|
+
total=len(results),
|
|
230
|
+
fresh=sum(1 for r in results if r.level == FreshnessLevel.FRESH),
|
|
231
|
+
recent=sum(1 for r in results if r.level == FreshnessLevel.RECENT),
|
|
232
|
+
aging=sum(1 for r in results if r.level == FreshnessLevel.AGING),
|
|
233
|
+
stale=sum(1 for r in results if r.level == FreshnessLevel.STALE),
|
|
234
|
+
ancient=sum(1 for r in results if r.level == FreshnessLevel.ANCIENT),
|
|
235
|
+
average_age_days=sum(ages) / len(ages),
|
|
236
|
+
oldest_days=max(ages),
|
|
237
|
+
newest_days=min(ages),
|
|
238
|
+
)
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
"""Sensitive content detection for Neural Memory.
|
|
2
|
+
|
|
3
|
+
Detects potentially sensitive information like:
|
|
4
|
+
- API keys and secrets
|
|
5
|
+
- Passwords and tokens
|
|
6
|
+
- Personal identifiable information (PII)
|
|
7
|
+
- Credit card numbers
|
|
8
|
+
- Private keys
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import re
|
|
14
|
+
from dataclasses import dataclass
|
|
15
|
+
from enum import StrEnum
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class SensitiveType(StrEnum):
|
|
19
|
+
"""Types of sensitive content."""
|
|
20
|
+
|
|
21
|
+
API_KEY = "api_key"
|
|
22
|
+
PASSWORD = "password"
|
|
23
|
+
SECRET = "secret"
|
|
24
|
+
TOKEN = "token"
|
|
25
|
+
PRIVATE_KEY = "private_key"
|
|
26
|
+
CREDIT_CARD = "credit_card"
|
|
27
|
+
SSN = "ssn"
|
|
28
|
+
EMAIL = "email"
|
|
29
|
+
PHONE = "phone"
|
|
30
|
+
AWS_KEY = "aws_key"
|
|
31
|
+
DATABASE_URL = "database_url"
|
|
32
|
+
JWT = "jwt"
|
|
33
|
+
GENERIC_SECRET = "generic_secret"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclass(frozen=True)
|
|
37
|
+
class SensitivePattern:
|
|
38
|
+
"""A pattern for detecting sensitive content."""
|
|
39
|
+
|
|
40
|
+
name: str
|
|
41
|
+
pattern: str
|
|
42
|
+
type: SensitiveType
|
|
43
|
+
description: str
|
|
44
|
+
severity: int = 1 # 1=low, 2=medium, 3=high
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class SensitiveMatch:
|
|
49
|
+
"""A match found in content."""
|
|
50
|
+
|
|
51
|
+
pattern_name: str
|
|
52
|
+
matched_text: str
|
|
53
|
+
type: SensitiveType
|
|
54
|
+
severity: int
|
|
55
|
+
start: int
|
|
56
|
+
end: int
|
|
57
|
+
|
|
58
|
+
def redacted(self) -> str:
|
|
59
|
+
"""Get redacted version of matched text."""
|
|
60
|
+
if len(self.matched_text) <= 8:
|
|
61
|
+
return "*" * len(self.matched_text)
|
|
62
|
+
return self.matched_text[:4] + "*" * (len(self.matched_text) - 8) + self.matched_text[-4:]
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def get_default_patterns() -> list[SensitivePattern]:
|
|
66
|
+
"""Get default sensitive content patterns."""
|
|
67
|
+
return [
|
|
68
|
+
# API Keys and Secrets
|
|
69
|
+
SensitivePattern(
|
|
70
|
+
name="Generic API Key",
|
|
71
|
+
pattern=r"(?i)(api[_-]?key|apikey)\s*[=:]\s*['\"]?([a-zA-Z0-9_\-]{16,})['\"]?",
|
|
72
|
+
type=SensitiveType.API_KEY,
|
|
73
|
+
description="Generic API key assignment",
|
|
74
|
+
severity=3,
|
|
75
|
+
),
|
|
76
|
+
SensitivePattern(
|
|
77
|
+
name="Generic Secret",
|
|
78
|
+
pattern=r"(?i)(secret|secret[_-]?key)\s*[=:]\s*['\"]?([a-zA-Z0-9_\-]{16,})['\"]?",
|
|
79
|
+
type=SensitiveType.SECRET,
|
|
80
|
+
description="Generic secret assignment",
|
|
81
|
+
severity=3,
|
|
82
|
+
),
|
|
83
|
+
SensitivePattern(
|
|
84
|
+
name="Generic Password",
|
|
85
|
+
pattern=r"(?i)(password|passwd|pwd)\s*[=:]\s*['\"]?([^\s'\"]{4,})['\"]?",
|
|
86
|
+
type=SensitiveType.PASSWORD,
|
|
87
|
+
description="Password assignment",
|
|
88
|
+
severity=3,
|
|
89
|
+
),
|
|
90
|
+
SensitivePattern(
|
|
91
|
+
name="Generic Token",
|
|
92
|
+
pattern=r"(?i)(token|auth[_-]?token|access[_-]?token|bearer)\s*[=:]\s*['\"]?([a-zA-Z0-9_\-\.]{16,})['\"]?",
|
|
93
|
+
type=SensitiveType.TOKEN,
|
|
94
|
+
description="Auth token assignment",
|
|
95
|
+
severity=3,
|
|
96
|
+
),
|
|
97
|
+
# AWS
|
|
98
|
+
SensitivePattern(
|
|
99
|
+
name="AWS Access Key",
|
|
100
|
+
pattern=r"(?i)aws[_-]?access[_-]?key[_-]?id\s*[=:]\s*['\"]?(AKIA[0-9A-Z]{16})['\"]?",
|
|
101
|
+
type=SensitiveType.AWS_KEY,
|
|
102
|
+
description="AWS Access Key ID",
|
|
103
|
+
severity=3,
|
|
104
|
+
),
|
|
105
|
+
SensitivePattern(
|
|
106
|
+
name="AWS Secret Key",
|
|
107
|
+
pattern=r"(?i)aws[_-]?secret[_-]?access[_-]?key\s*[=:]\s*['\"]?([a-zA-Z0-9/+=]{40})['\"]?",
|
|
108
|
+
type=SensitiveType.AWS_KEY,
|
|
109
|
+
description="AWS Secret Access Key",
|
|
110
|
+
severity=3,
|
|
111
|
+
),
|
|
112
|
+
# Database
|
|
113
|
+
SensitivePattern(
|
|
114
|
+
name="Database URL",
|
|
115
|
+
pattern=r"(?i)(postgres|mysql|mongodb|redis)://[^\s]+:[^\s]+@[^\s]+",
|
|
116
|
+
type=SensitiveType.DATABASE_URL,
|
|
117
|
+
description="Database connection string with credentials",
|
|
118
|
+
severity=3,
|
|
119
|
+
),
|
|
120
|
+
# Private Keys
|
|
121
|
+
SensitivePattern(
|
|
122
|
+
name="Private Key",
|
|
123
|
+
pattern=r"-----BEGIN\s+(RSA|DSA|EC|OPENSSH|PGP)?\s*PRIVATE KEY-----",
|
|
124
|
+
type=SensitiveType.PRIVATE_KEY,
|
|
125
|
+
description="Private key header",
|
|
126
|
+
severity=3,
|
|
127
|
+
),
|
|
128
|
+
# JWT
|
|
129
|
+
SensitivePattern(
|
|
130
|
+
name="JWT Token",
|
|
131
|
+
pattern=r"eyJ[a-zA-Z0-9_-]*\.eyJ[a-zA-Z0-9_-]*\.[a-zA-Z0-9_-]*",
|
|
132
|
+
type=SensitiveType.JWT,
|
|
133
|
+
description="JSON Web Token",
|
|
134
|
+
severity=2,
|
|
135
|
+
),
|
|
136
|
+
# Credit Card (basic pattern)
|
|
137
|
+
SensitivePattern(
|
|
138
|
+
name="Credit Card",
|
|
139
|
+
pattern=r"\b(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|3[47][0-9]{13}|6(?:011|5[0-9]{2})[0-9]{12})\b",
|
|
140
|
+
type=SensitiveType.CREDIT_CARD,
|
|
141
|
+
description="Credit card number",
|
|
142
|
+
severity=3,
|
|
143
|
+
),
|
|
144
|
+
# SSN (US)
|
|
145
|
+
SensitivePattern(
|
|
146
|
+
name="SSN",
|
|
147
|
+
pattern=r"\b\d{3}-\d{2}-\d{4}\b",
|
|
148
|
+
type=SensitiveType.SSN,
|
|
149
|
+
description="Social Security Number format",
|
|
150
|
+
severity=3,
|
|
151
|
+
),
|
|
152
|
+
# Long random strings (potential secrets)
|
|
153
|
+
SensitivePattern(
|
|
154
|
+
name="Long Base64 String",
|
|
155
|
+
pattern=r"\b[A-Za-z0-9+/]{40,}={0,2}\b",
|
|
156
|
+
type=SensitiveType.GENERIC_SECRET,
|
|
157
|
+
description="Long base64-encoded string (potential secret)",
|
|
158
|
+
severity=1,
|
|
159
|
+
),
|
|
160
|
+
# Hex strings (potential keys)
|
|
161
|
+
SensitivePattern(
|
|
162
|
+
name="Long Hex String",
|
|
163
|
+
pattern=r"\b[a-fA-F0-9]{32,}\b",
|
|
164
|
+
type=SensitiveType.GENERIC_SECRET,
|
|
165
|
+
description="Long hexadecimal string (potential key)",
|
|
166
|
+
severity=1,
|
|
167
|
+
),
|
|
168
|
+
]
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def check_sensitive_content(
|
|
172
|
+
content: str,
|
|
173
|
+
patterns: list[SensitivePattern] | None = None,
|
|
174
|
+
min_severity: int = 1,
|
|
175
|
+
) -> list[SensitiveMatch]:
|
|
176
|
+
"""
|
|
177
|
+
Check content for sensitive information.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
content: Text to check
|
|
181
|
+
patterns: Patterns to use (default: get_default_patterns())
|
|
182
|
+
min_severity: Minimum severity level to report (1-3)
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
List of sensitive matches found
|
|
186
|
+
"""
|
|
187
|
+
if patterns is None:
|
|
188
|
+
patterns = get_default_patterns()
|
|
189
|
+
|
|
190
|
+
matches: list[SensitiveMatch] = []
|
|
191
|
+
|
|
192
|
+
for pattern in patterns:
|
|
193
|
+
if pattern.severity < min_severity:
|
|
194
|
+
continue
|
|
195
|
+
|
|
196
|
+
try:
|
|
197
|
+
regex = re.compile(pattern.pattern)
|
|
198
|
+
for match in regex.finditer(content):
|
|
199
|
+
matches.append(
|
|
200
|
+
SensitiveMatch(
|
|
201
|
+
pattern_name=pattern.name,
|
|
202
|
+
matched_text=match.group(0),
|
|
203
|
+
type=pattern.type,
|
|
204
|
+
severity=pattern.severity,
|
|
205
|
+
start=match.start(),
|
|
206
|
+
end=match.end(),
|
|
207
|
+
)
|
|
208
|
+
)
|
|
209
|
+
except re.error:
|
|
210
|
+
# Skip invalid patterns
|
|
211
|
+
continue
|
|
212
|
+
|
|
213
|
+
# Remove duplicates (same position)
|
|
214
|
+
seen_positions: set[tuple[int, int]] = set()
|
|
215
|
+
unique_matches: list[SensitiveMatch] = []
|
|
216
|
+
for match in matches:
|
|
217
|
+
pos = (match.start, match.end)
|
|
218
|
+
if pos not in seen_positions:
|
|
219
|
+
seen_positions.add(pos)
|
|
220
|
+
unique_matches.append(match)
|
|
221
|
+
|
|
222
|
+
return sorted(unique_matches, key=lambda m: (-m.severity, m.start))
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def filter_sensitive_content(
|
|
226
|
+
content: str,
|
|
227
|
+
patterns: list[SensitivePattern] | None = None,
|
|
228
|
+
replacement: str = "[REDACTED]",
|
|
229
|
+
) -> tuple[str, list[SensitiveMatch]]:
|
|
230
|
+
"""
|
|
231
|
+
Filter sensitive content by replacing matches.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
content: Text to filter
|
|
235
|
+
patterns: Patterns to use
|
|
236
|
+
replacement: Replacement text
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
Tuple of (filtered_content, matches_found)
|
|
240
|
+
"""
|
|
241
|
+
matches = check_sensitive_content(content, patterns)
|
|
242
|
+
|
|
243
|
+
if not matches:
|
|
244
|
+
return content, []
|
|
245
|
+
|
|
246
|
+
# Sort by position descending to replace from end
|
|
247
|
+
sorted_matches = sorted(matches, key=lambda m: m.start, reverse=True)
|
|
248
|
+
|
|
249
|
+
filtered = content
|
|
250
|
+
for match in sorted_matches:
|
|
251
|
+
filtered = filtered[: match.start] + replacement + filtered[match.end :]
|
|
252
|
+
|
|
253
|
+
return filtered, matches
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def format_sensitive_warning(matches: list[SensitiveMatch], use_ascii: bool = False) -> str:
|
|
257
|
+
"""Format a warning message for sensitive content.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
matches: List of sensitive matches
|
|
261
|
+
use_ascii: Use ASCII characters instead of emojis (for Windows compatibility)
|
|
262
|
+
"""
|
|
263
|
+
if not matches:
|
|
264
|
+
return ""
|
|
265
|
+
|
|
266
|
+
# Use ASCII or Unicode based on preference/platform
|
|
267
|
+
if use_ascii:
|
|
268
|
+
warn_icon = "[!]"
|
|
269
|
+
high_icon = "[!!!]"
|
|
270
|
+
medium_icon = "[!!]"
|
|
271
|
+
low_icon = "[!]"
|
|
272
|
+
else:
|
|
273
|
+
warn_icon = "<!>"
|
|
274
|
+
high_icon = "[HIGH]"
|
|
275
|
+
medium_icon = "[MED]"
|
|
276
|
+
low_icon = "[LOW]"
|
|
277
|
+
|
|
278
|
+
lines = [f"{warn_icon} SENSITIVE CONTENT DETECTED:"]
|
|
279
|
+
|
|
280
|
+
# Group by severity
|
|
281
|
+
high = [m for m in matches if m.severity == 3]
|
|
282
|
+
medium = [m for m in matches if m.severity == 2]
|
|
283
|
+
low = [m for m in matches if m.severity == 1]
|
|
284
|
+
|
|
285
|
+
if high:
|
|
286
|
+
lines.append(f"\n {high_icon} HIGH RISK:")
|
|
287
|
+
for m in high:
|
|
288
|
+
lines.append(f" - {m.pattern_name}: {m.redacted()}")
|
|
289
|
+
|
|
290
|
+
if medium:
|
|
291
|
+
lines.append(f"\n {medium_icon} MEDIUM RISK:")
|
|
292
|
+
for m in medium:
|
|
293
|
+
lines.append(f" - {m.pattern_name}: {m.redacted()}")
|
|
294
|
+
|
|
295
|
+
if low:
|
|
296
|
+
lines.append(f"\n {low_icon} LOW RISK:")
|
|
297
|
+
for m in low[:3]: # Limit low risk to 3
|
|
298
|
+
lines.append(f" - {m.pattern_name}")
|
|
299
|
+
if len(low) > 3:
|
|
300
|
+
lines.append(f" ... and {len(low) - 3} more")
|
|
301
|
+
|
|
302
|
+
lines.append("\n Use --force to store anyway, or --redact to auto-redact.")
|
|
303
|
+
|
|
304
|
+
return "\n".join(lines)
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
"""FastAPI application factory."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import AsyncGenerator
|
|
6
|
+
from contextlib import asynccontextmanager
|
|
7
|
+
|
|
8
|
+
from fastapi import FastAPI
|
|
9
|
+
from fastapi.middleware.cors import CORSMiddleware
|
|
10
|
+
|
|
11
|
+
from neural_memory import __version__
|
|
12
|
+
from neural_memory.server.models import HealthResponse
|
|
13
|
+
from neural_memory.server.routes import brain_router, memory_router, sync_router
|
|
14
|
+
from neural_memory.storage.memory_store import InMemoryStorage
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@asynccontextmanager
|
|
18
|
+
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
|
19
|
+
"""Application lifespan handler."""
|
|
20
|
+
# Startup
|
|
21
|
+
app.state.storage = InMemoryStorage()
|
|
22
|
+
yield
|
|
23
|
+
# Shutdown
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def create_app(
|
|
28
|
+
title: str = "NeuralMemory",
|
|
29
|
+
description: str = "Reflex-based memory system for AI agents",
|
|
30
|
+
cors_origins: list[str] | None = None,
|
|
31
|
+
) -> FastAPI:
|
|
32
|
+
"""
|
|
33
|
+
Create and configure the FastAPI application.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
title: API title
|
|
37
|
+
description: API description
|
|
38
|
+
cors_origins: Allowed CORS origins (default: ["*"])
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Configured FastAPI application
|
|
42
|
+
"""
|
|
43
|
+
app = FastAPI(
|
|
44
|
+
title=title,
|
|
45
|
+
description=description,
|
|
46
|
+
version=__version__,
|
|
47
|
+
lifespan=lifespan,
|
|
48
|
+
docs_url="/docs",
|
|
49
|
+
redoc_url="/redoc",
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# CORS middleware
|
|
53
|
+
if cors_origins is None:
|
|
54
|
+
cors_origins = ["*"]
|
|
55
|
+
|
|
56
|
+
app.add_middleware(
|
|
57
|
+
CORSMiddleware,
|
|
58
|
+
allow_origins=cors_origins,
|
|
59
|
+
allow_credentials=True,
|
|
60
|
+
allow_methods=["*"],
|
|
61
|
+
allow_headers=["*"],
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Override storage dependency using the shared module
|
|
65
|
+
from neural_memory.server.dependencies import get_storage as shared_get_storage
|
|
66
|
+
|
|
67
|
+
async def get_storage() -> InMemoryStorage:
|
|
68
|
+
return app.state.storage
|
|
69
|
+
|
|
70
|
+
app.dependency_overrides[shared_get_storage] = get_storage
|
|
71
|
+
|
|
72
|
+
# Include routers
|
|
73
|
+
app.include_router(memory_router)
|
|
74
|
+
app.include_router(brain_router)
|
|
75
|
+
app.include_router(sync_router)
|
|
76
|
+
|
|
77
|
+
# Health check endpoint
|
|
78
|
+
@app.get("/health", response_model=HealthResponse, tags=["health"])
|
|
79
|
+
async def health_check() -> HealthResponse:
|
|
80
|
+
"""Health check endpoint."""
|
|
81
|
+
return HealthResponse(status="healthy", version=__version__)
|
|
82
|
+
|
|
83
|
+
# Root endpoint
|
|
84
|
+
@app.get("/", tags=["health"])
|
|
85
|
+
async def root() -> dict:
|
|
86
|
+
"""Root endpoint with API info."""
|
|
87
|
+
return {
|
|
88
|
+
"name": title,
|
|
89
|
+
"description": description,
|
|
90
|
+
"version": __version__,
|
|
91
|
+
"docs": "/docs",
|
|
92
|
+
"health": "/health",
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
return app
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
# Create default app instance for uvicorn
|
|
99
|
+
app = create_app()
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Shared dependencies for API routes."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Annotated
|
|
6
|
+
|
|
7
|
+
from fastapi import Depends, Header, HTTPException
|
|
8
|
+
|
|
9
|
+
from neural_memory.core.brain import Brain
|
|
10
|
+
from neural_memory.storage.base import NeuralStorage
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
async def get_storage() -> NeuralStorage:
|
|
14
|
+
"""
|
|
15
|
+
Dependency to get storage instance.
|
|
16
|
+
|
|
17
|
+
This is overridden by the application at startup.
|
|
18
|
+
"""
|
|
19
|
+
raise NotImplementedError("Storage not configured")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
async def get_brain(
|
|
23
|
+
brain_id: Annotated[str, Header(alias="X-Brain-ID")],
|
|
24
|
+
storage: Annotated[NeuralStorage, Depends(get_storage)],
|
|
25
|
+
) -> Brain:
|
|
26
|
+
"""Dependency to get and validate brain from header."""
|
|
27
|
+
brain = await storage.get_brain(brain_id)
|
|
28
|
+
if brain is None:
|
|
29
|
+
raise HTTPException(status_code=404, detail=f"Brain {brain_id} not found")
|
|
30
|
+
|
|
31
|
+
# Set brain context
|
|
32
|
+
storage.set_brain(brain_id) # type: ignore
|
|
33
|
+
return brain
|