memorisdk 2.0.0__py3-none-any.whl → 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of memorisdk might be problematic. Click here for more details.
- memori/__init__.py +3 -3
- memori/agents/conscious_agent.py +289 -77
- memori/agents/memory_agent.py +19 -9
- memori/agents/retrieval_agent.py +138 -63
- memori/config/manager.py +7 -7
- memori/config/memory_manager.py +25 -25
- memori/config/settings.py +13 -6
- memori/core/conversation.py +15 -15
- memori/core/database.py +14 -13
- memori/core/memory.py +438 -123
- memori/core/providers.py +25 -25
- memori/database/__init__.py +11 -0
- memori/database/adapters/__init__.py +11 -0
- memori/database/adapters/mongodb_adapter.py +739 -0
- memori/database/adapters/mysql_adapter.py +8 -8
- memori/database/adapters/postgresql_adapter.py +6 -6
- memori/database/adapters/sqlite_adapter.py +6 -6
- memori/database/auto_creator.py +8 -9
- memori/database/connection_utils.py +5 -5
- memori/database/connectors/__init__.py +11 -0
- memori/database/connectors/base_connector.py +18 -19
- memori/database/connectors/mongodb_connector.py +527 -0
- memori/database/connectors/mysql_connector.py +13 -15
- memori/database/connectors/postgres_connector.py +12 -12
- memori/database/connectors/sqlite_connector.py +11 -11
- memori/database/models.py +2 -2
- memori/database/mongodb_manager.py +1402 -0
- memori/database/queries/base_queries.py +3 -4
- memori/database/queries/chat_queries.py +3 -5
- memori/database/queries/entity_queries.py +3 -5
- memori/database/queries/memory_queries.py +3 -5
- memori/database/query_translator.py +11 -11
- memori/database/schema_generators/__init__.py +11 -0
- memori/database/schema_generators/mongodb_schema_generator.py +666 -0
- memori/database/schema_generators/mysql_schema_generator.py +2 -4
- memori/database/search/__init__.py +11 -0
- memori/database/search/mongodb_search_adapter.py +653 -0
- memori/database/search/mysql_search_adapter.py +8 -8
- memori/database/search/sqlite_search_adapter.py +6 -6
- memori/database/search_service.py +218 -66
- memori/database/sqlalchemy_manager.py +72 -25
- memori/integrations/__init__.py +1 -1
- memori/integrations/anthropic_integration.py +1 -3
- memori/integrations/litellm_integration.py +23 -6
- memori/integrations/openai_integration.py +31 -3
- memori/tools/memory_tool.py +104 -13
- memori/utils/exceptions.py +58 -58
- memori/utils/helpers.py +11 -12
- memori/utils/input_validator.py +10 -12
- memori/utils/logging.py +4 -4
- memori/utils/pydantic_models.py +57 -57
- memori/utils/query_builder.py +20 -20
- memori/utils/security_audit.py +28 -28
- memori/utils/security_integration.py +9 -9
- memori/utils/transaction_manager.py +20 -19
- memori/utils/validators.py +6 -6
- {memorisdk-2.0.0.dist-info → memorisdk-2.1.0.dist-info}/METADATA +36 -20
- memorisdk-2.1.0.dist-info/RECORD +71 -0
- memori/scripts/llm_text.py +0 -50
- memorisdk-2.0.0.dist-info/RECORD +0 -67
- {memorisdk-2.0.0.dist-info → memorisdk-2.1.0.dist-info}/WHEEL +0 -0
- {memorisdk-2.0.0.dist-info → memorisdk-2.1.0.dist-info}/licenses/LICENSE +0 -0
- {memorisdk-2.0.0.dist-info → memorisdk-2.1.0.dist-info}/top_level.txt +0 -0
memori/utils/pydantic_models.py
CHANGED
|
@@ -4,7 +4,7 @@ Pydantic Models for Structured Memory Processing
|
|
|
4
4
|
|
|
5
5
|
from datetime import datetime
|
|
6
6
|
from enum import Enum
|
|
7
|
-
from typing import Annotated,
|
|
7
|
+
from typing import Annotated, Literal
|
|
8
8
|
|
|
9
9
|
from pydantic import BaseModel, Field
|
|
10
10
|
|
|
@@ -85,7 +85,7 @@ class ExtractedEntity(BaseModel):
|
|
|
85
85
|
relevance_score: RelevanceScore = Field(
|
|
86
86
|
description="How relevant this entity is to the memory"
|
|
87
87
|
)
|
|
88
|
-
context:
|
|
88
|
+
context: str | None = Field(
|
|
89
89
|
default=None, description="Additional context about this entity"
|
|
90
90
|
)
|
|
91
91
|
|
|
@@ -93,28 +93,28 @@ class ExtractedEntity(BaseModel):
|
|
|
93
93
|
class ExtractedEntities(BaseModel):
|
|
94
94
|
"""All entities extracted from a conversation"""
|
|
95
95
|
|
|
96
|
-
people:
|
|
96
|
+
people: list[str] = Field(
|
|
97
97
|
default_factory=list, description="Names of people mentioned"
|
|
98
98
|
)
|
|
99
|
-
technologies:
|
|
99
|
+
technologies: list[str] = Field(
|
|
100
100
|
default_factory=list, description="Technologies, tools, libraries mentioned"
|
|
101
101
|
)
|
|
102
|
-
topics:
|
|
102
|
+
topics: list[str] = Field(
|
|
103
103
|
default_factory=list, description="Main topics or subjects discussed"
|
|
104
104
|
)
|
|
105
|
-
skills:
|
|
105
|
+
skills: list[str] = Field(
|
|
106
106
|
default_factory=list, description="Skills, abilities, or competencies mentioned"
|
|
107
107
|
)
|
|
108
|
-
projects:
|
|
108
|
+
projects: list[str] = Field(
|
|
109
109
|
default_factory=list,
|
|
110
110
|
description="Projects, repositories, or initiatives mentioned",
|
|
111
111
|
)
|
|
112
|
-
keywords:
|
|
112
|
+
keywords: list[str] = Field(
|
|
113
113
|
default_factory=list, description="Important keywords for search"
|
|
114
114
|
)
|
|
115
115
|
|
|
116
116
|
# Structured entities with metadata
|
|
117
|
-
structured_entities:
|
|
117
|
+
structured_entities: list[ExtractedEntity] = Field(
|
|
118
118
|
default_factory=list, description="Detailed entity extraction"
|
|
119
119
|
)
|
|
120
120
|
|
|
@@ -150,13 +150,13 @@ class MemorySearchQuery(BaseModel):
|
|
|
150
150
|
intent: str = Field(description="Interpreted intent of the query")
|
|
151
151
|
|
|
152
152
|
# Search parameters
|
|
153
|
-
entity_filters:
|
|
153
|
+
entity_filters: list[str] = Field(
|
|
154
154
|
default_factory=list, description="Specific entities to search for"
|
|
155
155
|
)
|
|
156
|
-
category_filters:
|
|
156
|
+
category_filters: list[MemoryCategoryType] = Field(
|
|
157
157
|
default_factory=list, description="Memory categories to include"
|
|
158
158
|
)
|
|
159
|
-
time_range:
|
|
159
|
+
time_range: str | None = Field(
|
|
160
160
|
default=None, description="Time range for search (e.g., 'last_week')"
|
|
161
161
|
)
|
|
162
162
|
min_importance: ImportanceScore = Field(
|
|
@@ -164,10 +164,10 @@ class MemorySearchQuery(BaseModel):
|
|
|
164
164
|
)
|
|
165
165
|
|
|
166
166
|
# Search strategy
|
|
167
|
-
search_strategy:
|
|
167
|
+
search_strategy: list[str] = Field(
|
|
168
168
|
default_factory=list, description="Recommended search strategies"
|
|
169
169
|
)
|
|
170
|
-
expected_result_types:
|
|
170
|
+
expected_result_types: list[str] = Field(
|
|
171
171
|
default_factory=list, description="Expected types of results"
|
|
172
172
|
)
|
|
173
173
|
|
|
@@ -190,7 +190,7 @@ class UserRule(BaseModel):
|
|
|
190
190
|
rule_text: str = Field(description="The rule or preference in natural language")
|
|
191
191
|
rule_type: Literal["preference", "instruction", "constraint", "goal"]
|
|
192
192
|
priority: PriorityLevel = Field(default=5, description="Priority level (1-10)")
|
|
193
|
-
context:
|
|
193
|
+
context: str | None = Field(default=None, description="When this rule applies")
|
|
194
194
|
active: bool = Field(
|
|
195
195
|
default=True, description="Whether this rule is currently active"
|
|
196
196
|
)
|
|
@@ -201,29 +201,29 @@ class ConversationContext(BaseModel):
|
|
|
201
201
|
|
|
202
202
|
model_config = {"protected_namespaces": ()}
|
|
203
203
|
|
|
204
|
-
user_id:
|
|
204
|
+
user_id: str | None = Field(default=None)
|
|
205
205
|
session_id: str
|
|
206
206
|
conversation_id: str
|
|
207
207
|
model_used: str
|
|
208
208
|
|
|
209
209
|
# User context
|
|
210
|
-
user_preferences:
|
|
211
|
-
current_projects:
|
|
212
|
-
relevant_skills:
|
|
210
|
+
user_preferences: list[str] = Field(default_factory=list)
|
|
211
|
+
current_projects: list[str] = Field(default_factory=list)
|
|
212
|
+
relevant_skills: list[str] = Field(default_factory=list)
|
|
213
213
|
|
|
214
214
|
# Conversation metadata
|
|
215
215
|
conversation_length: int = Field(
|
|
216
216
|
default=1, description="Number of exchanges in this conversation"
|
|
217
217
|
)
|
|
218
|
-
topic_thread:
|
|
218
|
+
topic_thread: str | None = Field(
|
|
219
219
|
default=None, description="Main topic thread being discussed"
|
|
220
220
|
)
|
|
221
221
|
|
|
222
222
|
# Memory context
|
|
223
|
-
recent_memories:
|
|
223
|
+
recent_memories: list[str] = Field(
|
|
224
224
|
default_factory=list, description="IDs of recently accessed memories"
|
|
225
225
|
)
|
|
226
|
-
applied_rules:
|
|
226
|
+
applied_rules: list[str] = Field(
|
|
227
227
|
default_factory=list, description="Rules that were applied"
|
|
228
228
|
)
|
|
229
229
|
|
|
@@ -239,7 +239,7 @@ class ProcessedMemory(BaseModel):
|
|
|
239
239
|
description="Why this memory should or shouldn't be stored"
|
|
240
240
|
)
|
|
241
241
|
timestamp: datetime = Field(default_factory=datetime.now)
|
|
242
|
-
processing_metadata:
|
|
242
|
+
processing_metadata: dict[str, str] | None = Field(default=None)
|
|
243
243
|
|
|
244
244
|
|
|
245
245
|
class ProcessedLongTermMemory(BaseModel):
|
|
@@ -252,11 +252,11 @@ class ProcessedLongTermMemory(BaseModel):
|
|
|
252
252
|
importance: MemoryImportanceLevel = Field(description="Importance level")
|
|
253
253
|
|
|
254
254
|
# Context Information
|
|
255
|
-
topic:
|
|
256
|
-
entities:
|
|
255
|
+
topic: str | None = Field(default=None, description="Main topic/subject")
|
|
256
|
+
entities: list[str] = Field(
|
|
257
257
|
default_factory=list, description="People, places, technologies mentioned"
|
|
258
258
|
)
|
|
259
|
-
keywords:
|
|
259
|
+
keywords: list[str] = Field(
|
|
260
260
|
default_factory=list, description="Key terms for search"
|
|
261
261
|
)
|
|
262
262
|
|
|
@@ -271,13 +271,13 @@ class ProcessedLongTermMemory(BaseModel):
|
|
|
271
271
|
is_current_project: bool = Field(default=False, description="Current work context")
|
|
272
272
|
|
|
273
273
|
# Memory Management
|
|
274
|
-
duplicate_of:
|
|
274
|
+
duplicate_of: str | None = Field(
|
|
275
275
|
default=None, description="Links to original if duplicate"
|
|
276
276
|
)
|
|
277
|
-
supersedes:
|
|
277
|
+
supersedes: list[str] = Field(
|
|
278
278
|
default_factory=list, description="Previous memories this replaces"
|
|
279
279
|
)
|
|
280
|
-
related_memories:
|
|
280
|
+
related_memories: list[str] = Field(
|
|
281
281
|
default_factory=list, description="Connected memory IDs"
|
|
282
282
|
)
|
|
283
283
|
|
|
@@ -287,7 +287,7 @@ class ProcessedLongTermMemory(BaseModel):
|
|
|
287
287
|
default=0.8, description="AI confidence in extraction"
|
|
288
288
|
)
|
|
289
289
|
extraction_timestamp: datetime = Field(default_factory=datetime.now)
|
|
290
|
-
last_accessed:
|
|
290
|
+
last_accessed: datetime | None = Field(default=None)
|
|
291
291
|
access_count: int = Field(default=0)
|
|
292
292
|
|
|
293
293
|
# Classification Reasoning
|
|
@@ -308,38 +308,38 @@ class UserContextProfile(BaseModel):
|
|
|
308
308
|
"""Permanent user context for conscious ingestion"""
|
|
309
309
|
|
|
310
310
|
# Core Identity
|
|
311
|
-
name:
|
|
312
|
-
pronouns:
|
|
313
|
-
location:
|
|
314
|
-
timezone:
|
|
311
|
+
name: str | None = None
|
|
312
|
+
pronouns: str | None = None
|
|
313
|
+
location: str | None = None
|
|
314
|
+
timezone: str | None = None
|
|
315
315
|
|
|
316
316
|
# Professional Context
|
|
317
|
-
job_title:
|
|
318
|
-
company:
|
|
319
|
-
industry:
|
|
320
|
-
experience_level:
|
|
321
|
-
specializations:
|
|
317
|
+
job_title: str | None = None
|
|
318
|
+
company: str | None = None
|
|
319
|
+
industry: str | None = None
|
|
320
|
+
experience_level: str | None = None
|
|
321
|
+
specializations: list[str] = Field(default_factory=list)
|
|
322
322
|
|
|
323
323
|
# Technical Stack
|
|
324
|
-
primary_languages:
|
|
325
|
-
frameworks:
|
|
326
|
-
tools:
|
|
327
|
-
environment:
|
|
324
|
+
primary_languages: list[str] = Field(default_factory=list)
|
|
325
|
+
frameworks: list[str] = Field(default_factory=list)
|
|
326
|
+
tools: list[str] = Field(default_factory=list)
|
|
327
|
+
environment: str | None = None
|
|
328
328
|
|
|
329
329
|
# Behavioral Preferences
|
|
330
|
-
communication_style:
|
|
331
|
-
technical_depth:
|
|
332
|
-
response_preference:
|
|
330
|
+
communication_style: str | None = None
|
|
331
|
+
technical_depth: str | None = None
|
|
332
|
+
response_preference: str | None = None
|
|
333
333
|
|
|
334
334
|
# Current Context
|
|
335
|
-
active_projects:
|
|
336
|
-
learning_goals:
|
|
337
|
-
domain_expertise:
|
|
335
|
+
active_projects: list[str] = Field(default_factory=list)
|
|
336
|
+
learning_goals: list[str] = Field(default_factory=list)
|
|
337
|
+
domain_expertise: list[str] = Field(default_factory=list)
|
|
338
338
|
|
|
339
339
|
# Values & Constraints
|
|
340
|
-
code_standards:
|
|
341
|
-
time_constraints:
|
|
342
|
-
technology_preferences:
|
|
340
|
+
code_standards: list[str] = Field(default_factory=list)
|
|
341
|
+
time_constraints: str | None = None
|
|
342
|
+
technology_preferences: list[str] = Field(default_factory=list)
|
|
343
343
|
|
|
344
344
|
# Metadata
|
|
345
345
|
last_updated: datetime = Field(default_factory=datetime.now)
|
|
@@ -350,11 +350,11 @@ class MemoryStats(BaseModel):
|
|
|
350
350
|
"""Statistics about stored memories"""
|
|
351
351
|
|
|
352
352
|
total_memories: int
|
|
353
|
-
memories_by_category:
|
|
354
|
-
memories_by_retention:
|
|
353
|
+
memories_by_category: dict[str, int]
|
|
354
|
+
memories_by_retention: dict[str, int]
|
|
355
355
|
average_importance: float
|
|
356
356
|
total_entities: int
|
|
357
|
-
most_common_entities:
|
|
357
|
+
most_common_entities: list[tuple[str, int]]
|
|
358
358
|
storage_size_mb: float
|
|
359
|
-
oldest_memory_date:
|
|
360
|
-
newest_memory_date:
|
|
359
|
+
oldest_memory_date: datetime | None
|
|
360
|
+
newest_memory_date: datetime | None
|
memori/utils/query_builder.py
CHANGED
|
@@ -4,7 +4,7 @@ Provides consistent parameter handling across SQLite, PostgreSQL, and MySQL
|
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
from enum import Enum
|
|
7
|
-
from typing import Any
|
|
7
|
+
from typing import Any
|
|
8
8
|
|
|
9
9
|
from loguru import logger
|
|
10
10
|
|
|
@@ -43,14 +43,14 @@ class QueryBuilder:
|
|
|
43
43
|
|
|
44
44
|
def build_search_query(
|
|
45
45
|
self,
|
|
46
|
-
tables:
|
|
47
|
-
search_columns:
|
|
46
|
+
tables: list[str],
|
|
47
|
+
search_columns: list[str],
|
|
48
48
|
query_text: str,
|
|
49
49
|
namespace: str,
|
|
50
|
-
category_filter:
|
|
50
|
+
category_filter: list[str] | None = None,
|
|
51
51
|
limit: int = 10,
|
|
52
52
|
use_fts: bool = False,
|
|
53
|
-
) ->
|
|
53
|
+
) -> tuple[str, list[Any]]:
|
|
54
54
|
"""Build a database-specific search query with proper parameter binding"""
|
|
55
55
|
|
|
56
56
|
try:
|
|
@@ -148,8 +148,8 @@ class QueryBuilder:
|
|
|
148
148
|
return query, params
|
|
149
149
|
|
|
150
150
|
def build_insert_query(
|
|
151
|
-
self, table: str, data:
|
|
152
|
-
) ->
|
|
151
|
+
self, table: str, data: dict[str, Any], on_conflict: str = "REPLACE"
|
|
152
|
+
) -> tuple[str, list[Any]]:
|
|
153
153
|
"""Build database-specific insert query with proper parameter binding"""
|
|
154
154
|
|
|
155
155
|
try:
|
|
@@ -210,8 +210,8 @@ class QueryBuilder:
|
|
|
210
210
|
return query, values
|
|
211
211
|
|
|
212
212
|
def build_update_query(
|
|
213
|
-
self, table: str, data:
|
|
214
|
-
) ->
|
|
213
|
+
self, table: str, data: dict[str, Any], where_conditions: dict[str, Any]
|
|
214
|
+
) -> tuple[str, list[Any]]:
|
|
215
215
|
"""Build database-specific update query"""
|
|
216
216
|
|
|
217
217
|
try:
|
|
@@ -248,8 +248,8 @@ class QueryBuilder:
|
|
|
248
248
|
return query, params
|
|
249
249
|
|
|
250
250
|
def build_delete_query(
|
|
251
|
-
self, table: str, where_conditions:
|
|
252
|
-
) ->
|
|
251
|
+
self, table: str, where_conditions: dict[str, Any]
|
|
252
|
+
) -> tuple[str, list[Any]]:
|
|
253
253
|
"""Build database-specific delete query"""
|
|
254
254
|
|
|
255
255
|
try:
|
|
@@ -276,9 +276,9 @@ class QueryBuilder:
|
|
|
276
276
|
self,
|
|
277
277
|
query_text: str,
|
|
278
278
|
namespace: str,
|
|
279
|
-
category_filter:
|
|
279
|
+
category_filter: list[str] | None = None,
|
|
280
280
|
limit: int = 10,
|
|
281
|
-
) ->
|
|
281
|
+
) -> tuple[str, list[Any]]:
|
|
282
282
|
"""Build database-specific full-text search query"""
|
|
283
283
|
|
|
284
284
|
try:
|
|
@@ -420,7 +420,7 @@ class QueryBuilder:
|
|
|
420
420
|
params.append(limit)
|
|
421
421
|
return query, params
|
|
422
422
|
|
|
423
|
-
def _get_primary_key_column(self, columns:
|
|
423
|
+
def _get_primary_key_column(self, columns: list[str]) -> str | None:
|
|
424
424
|
"""Detect likely primary key column from column names"""
|
|
425
425
|
pk_candidates = [
|
|
426
426
|
"id",
|
|
@@ -454,10 +454,10 @@ class DatabaseQueryExecutor:
|
|
|
454
454
|
self,
|
|
455
455
|
query_text: str,
|
|
456
456
|
namespace: str = "default",
|
|
457
|
-
category_filter:
|
|
457
|
+
category_filter: list[str] | None = None,
|
|
458
458
|
limit: int = 10,
|
|
459
459
|
use_fts: bool = True,
|
|
460
|
-
) ->
|
|
460
|
+
) -> list[dict[str, Any]]:
|
|
461
461
|
"""Execute search with proper error handling"""
|
|
462
462
|
try:
|
|
463
463
|
if use_fts:
|
|
@@ -493,8 +493,8 @@ class DatabaseQueryExecutor:
|
|
|
493
493
|
return []
|
|
494
494
|
|
|
495
495
|
def execute_safe_insert(
|
|
496
|
-
self, table: str, data:
|
|
497
|
-
) ->
|
|
496
|
+
self, table: str, data: dict[str, Any], on_conflict: str = "REPLACE"
|
|
497
|
+
) -> str | None:
|
|
498
498
|
"""Execute insert with proper error handling"""
|
|
499
499
|
try:
|
|
500
500
|
sql_query, params = self.query_builder.build_insert_query(
|
|
@@ -506,7 +506,7 @@ class DatabaseQueryExecutor:
|
|
|
506
506
|
raise DatabaseError(f"Failed to insert into {table}: {e}")
|
|
507
507
|
|
|
508
508
|
def execute_safe_update(
|
|
509
|
-
self, table: str, data:
|
|
509
|
+
self, table: str, data: dict[str, Any], where_conditions: dict[str, Any]
|
|
510
510
|
) -> int:
|
|
511
511
|
"""Execute update with proper error handling"""
|
|
512
512
|
try:
|
|
@@ -518,7 +518,7 @@ class DatabaseQueryExecutor:
|
|
|
518
518
|
logger.error(f"Update execution failed: {e}")
|
|
519
519
|
raise DatabaseError(f"Failed to update {table}: {e}")
|
|
520
520
|
|
|
521
|
-
def execute_safe_delete(self, table: str, where_conditions:
|
|
521
|
+
def execute_safe_delete(self, table: str, where_conditions: dict[str, Any]) -> int:
|
|
522
522
|
"""Execute delete with proper error handling"""
|
|
523
523
|
try:
|
|
524
524
|
sql_query, params = self.query_builder.build_delete_query(
|
memori/utils/security_audit.py
CHANGED
|
@@ -7,7 +7,7 @@ import re
|
|
|
7
7
|
import time
|
|
8
8
|
from dataclasses import dataclass
|
|
9
9
|
from enum import Enum
|
|
10
|
-
from typing import Any
|
|
10
|
+
from typing import Any
|
|
11
11
|
|
|
12
12
|
from .exceptions import SecurityError
|
|
13
13
|
from .input_validator import InputValidator
|
|
@@ -44,15 +44,15 @@ class SecurityFinding:
|
|
|
44
44
|
description: str
|
|
45
45
|
location: str
|
|
46
46
|
recommendation: str
|
|
47
|
-
evidence:
|
|
48
|
-
remediation_code:
|
|
47
|
+
evidence: str | None = None
|
|
48
|
+
remediation_code: str | None = None
|
|
49
49
|
|
|
50
50
|
|
|
51
51
|
@dataclass
|
|
52
52
|
class SecurityAuditReport:
|
|
53
53
|
"""Security audit report"""
|
|
54
54
|
|
|
55
|
-
findings:
|
|
55
|
+
findings: list[SecurityFinding]
|
|
56
56
|
total_queries_audited: int
|
|
57
57
|
critical_count: int
|
|
58
58
|
high_count: int
|
|
@@ -143,9 +143,9 @@ class DatabaseSecurityAuditor:
|
|
|
143
143
|
def audit_query(
|
|
144
144
|
self,
|
|
145
145
|
query: str,
|
|
146
|
-
params:
|
|
147
|
-
context:
|
|
148
|
-
) ->
|
|
146
|
+
params: list[Any] | None = None,
|
|
147
|
+
context: str | None = None,
|
|
148
|
+
) -> list[SecurityFinding]:
|
|
149
149
|
"""Audit a single database query for security vulnerabilities"""
|
|
150
150
|
findings = []
|
|
151
151
|
self.queries_audited += 1
|
|
@@ -169,8 +169,8 @@ class DatabaseSecurityAuditor:
|
|
|
169
169
|
return findings
|
|
170
170
|
|
|
171
171
|
def _audit_sql_injection(
|
|
172
|
-
self, query: str, params:
|
|
173
|
-
) ->
|
|
172
|
+
self, query: str, params: list[Any] | None, context: str | None
|
|
173
|
+
) -> list[SecurityFinding]:
|
|
174
174
|
"""Audit for SQL injection vulnerabilities"""
|
|
175
175
|
findings = []
|
|
176
176
|
query_lower = query.lower()
|
|
@@ -219,8 +219,8 @@ class DatabaseSecurityAuditor:
|
|
|
219
219
|
return findings
|
|
220
220
|
|
|
221
221
|
def _audit_parameter_validation(
|
|
222
|
-
self, query: str, params:
|
|
223
|
-
) ->
|
|
222
|
+
self, query: str, params: list[Any] | None, context: str | None
|
|
223
|
+
) -> list[SecurityFinding]:
|
|
224
224
|
"""Audit parameter validation"""
|
|
225
225
|
findings = []
|
|
226
226
|
|
|
@@ -285,8 +285,8 @@ class DatabaseSecurityAuditor:
|
|
|
285
285
|
return findings
|
|
286
286
|
|
|
287
287
|
def _audit_privilege_operations(
|
|
288
|
-
self, query: str, context:
|
|
289
|
-
) ->
|
|
288
|
+
self, query: str, context: str | None
|
|
289
|
+
) -> list[SecurityFinding]:
|
|
290
290
|
"""Audit for privilege escalation attempts"""
|
|
291
291
|
findings = []
|
|
292
292
|
query_upper = query.upper().strip()
|
|
@@ -334,8 +334,8 @@ class DatabaseSecurityAuditor:
|
|
|
334
334
|
return findings
|
|
335
335
|
|
|
336
336
|
def _audit_data_exposure(
|
|
337
|
-
self, query: str, context:
|
|
338
|
-
) ->
|
|
337
|
+
self, query: str, context: str | None
|
|
338
|
+
) -> list[SecurityFinding]:
|
|
339
339
|
"""Audit for potential data exposure issues"""
|
|
340
340
|
findings = []
|
|
341
341
|
query_upper = query.upper()
|
|
@@ -397,10 +397,10 @@ class DatabaseSecurityAuditor:
|
|
|
397
397
|
def validate_query_safety(
|
|
398
398
|
self,
|
|
399
399
|
query: str,
|
|
400
|
-
params:
|
|
401
|
-
context:
|
|
400
|
+
params: list[Any] | None = None,
|
|
401
|
+
context: str | None = None,
|
|
402
402
|
strict_mode: bool = True,
|
|
403
|
-
) ->
|
|
403
|
+
) -> tuple[bool, list[SecurityFinding]]:
|
|
404
404
|
"""Validate if a query is safe to execute"""
|
|
405
405
|
findings = self.audit_query(query, params, context)
|
|
406
406
|
|
|
@@ -444,7 +444,7 @@ class DatabaseSecurityAuditor:
|
|
|
444
444
|
overall_risk_score=risk_score,
|
|
445
445
|
)
|
|
446
446
|
|
|
447
|
-
def get_remediation_suggestions(self) ->
|
|
447
|
+
def get_remediation_suggestions(self) -> dict[VulnerabilityType, list[str]]:
|
|
448
448
|
"""Get remediation suggestions grouped by vulnerability type"""
|
|
449
449
|
suggestions = {
|
|
450
450
|
VulnerabilityType.SQL_INJECTION: [
|
|
@@ -503,10 +503,10 @@ class SecureQueryBuilder:
|
|
|
503
503
|
def build_safe_select(
|
|
504
504
|
self,
|
|
505
505
|
table: str,
|
|
506
|
-
columns:
|
|
507
|
-
where_conditions:
|
|
508
|
-
limit:
|
|
509
|
-
) ->
|
|
506
|
+
columns: list[str],
|
|
507
|
+
where_conditions: dict[str, Any],
|
|
508
|
+
limit: int | None = None,
|
|
509
|
+
) -> tuple[str, list[Any]]:
|
|
510
510
|
"""Build a safe SELECT query"""
|
|
511
511
|
# Validate inputs
|
|
512
512
|
table = InputValidator.sanitize_sql_identifier(table)
|
|
@@ -541,8 +541,8 @@ class SecureQueryBuilder:
|
|
|
541
541
|
return query, params
|
|
542
542
|
|
|
543
543
|
def build_safe_insert(
|
|
544
|
-
self, table: str, data:
|
|
545
|
-
) ->
|
|
544
|
+
self, table: str, data: dict[str, Any]
|
|
545
|
+
) -> tuple[str, list[Any]]:
|
|
546
546
|
"""Build a safe INSERT query"""
|
|
547
547
|
# Validate inputs
|
|
548
548
|
table = InputValidator.sanitize_sql_identifier(table)
|
|
@@ -580,14 +580,14 @@ def get_security_auditor() -> DatabaseSecurityAuditor:
|
|
|
580
580
|
|
|
581
581
|
|
|
582
582
|
def audit_query(
|
|
583
|
-
query: str, params:
|
|
584
|
-
) ->
|
|
583
|
+
query: str, params: list[Any] | None = None, context: str | None = None
|
|
584
|
+
) -> list[SecurityFinding]:
|
|
585
585
|
"""Convenience function to audit a single query"""
|
|
586
586
|
return _global_auditor.audit_query(query, params, context)
|
|
587
587
|
|
|
588
588
|
|
|
589
589
|
def validate_query_safety(
|
|
590
|
-
query: str, params:
|
|
590
|
+
query: str, params: list[Any] | None = None, context: str | None = None
|
|
591
591
|
) -> bool:
|
|
592
592
|
"""Convenience function to validate query safety"""
|
|
593
593
|
is_safe, _ = _global_auditor.validate_query_safety(query, params, context)
|
|
@@ -3,7 +3,7 @@ Security Integration Module for Memori
|
|
|
3
3
|
Integrates all security components into a unified system
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
-
from typing import Any
|
|
6
|
+
from typing import Any
|
|
7
7
|
|
|
8
8
|
from loguru import logger
|
|
9
9
|
|
|
@@ -29,10 +29,10 @@ class SecureMemoriDatabase:
|
|
|
29
29
|
self,
|
|
30
30
|
query_text: str,
|
|
31
31
|
namespace: str = "default",
|
|
32
|
-
category_filter:
|
|
32
|
+
category_filter: list[str] | None = None,
|
|
33
33
|
limit: int = 10,
|
|
34
34
|
use_fts: bool = True,
|
|
35
|
-
) ->
|
|
35
|
+
) -> list[dict[str, Any]]:
|
|
36
36
|
"""Execute secure search with comprehensive validation"""
|
|
37
37
|
try:
|
|
38
38
|
# Phase 1: Input validation
|
|
@@ -96,8 +96,8 @@ class SecureMemoriDatabase:
|
|
|
96
96
|
return []
|
|
97
97
|
|
|
98
98
|
def secure_insert(
|
|
99
|
-
self, table: str, data:
|
|
100
|
-
) ->
|
|
99
|
+
self, table: str, data: dict[str, Any], on_conflict: str = "REPLACE"
|
|
100
|
+
) -> str | None:
|
|
101
101
|
"""Execute secure insert with comprehensive validation"""
|
|
102
102
|
try:
|
|
103
103
|
# Phase 1: Input validation
|
|
@@ -135,7 +135,7 @@ class SecureMemoriDatabase:
|
|
|
135
135
|
raise DatabaseError(f"Secure insert failed: {e}")
|
|
136
136
|
|
|
137
137
|
def secure_update(
|
|
138
|
-
self, table: str, data:
|
|
138
|
+
self, table: str, data: dict[str, Any], where_conditions: dict[str, Any]
|
|
139
139
|
) -> int:
|
|
140
140
|
"""Execute secure update with comprehensive validation"""
|
|
141
141
|
try:
|
|
@@ -188,7 +188,7 @@ class SecureMemoriDatabase:
|
|
|
188
188
|
logger.error(f"Secure update failed: {e}")
|
|
189
189
|
raise DatabaseError(f"Secure update failed: {e}")
|
|
190
190
|
|
|
191
|
-
def secure_delete(self, table: str, where_conditions:
|
|
191
|
+
def secure_delete(self, table: str, where_conditions: dict[str, Any]) -> int:
|
|
192
192
|
"""Execute secure delete with comprehensive validation"""
|
|
193
193
|
try:
|
|
194
194
|
# Phase 1: Input validation
|
|
@@ -235,7 +235,7 @@ class SecureMemoriDatabase:
|
|
|
235
235
|
logger.error(f"Secure delete failed: {e}")
|
|
236
236
|
raise DatabaseError(f"Secure delete failed: {e}")
|
|
237
237
|
|
|
238
|
-
def get_security_report(self) ->
|
|
238
|
+
def get_security_report(self) -> dict[str, Any]:
|
|
239
239
|
"""Get comprehensive security report"""
|
|
240
240
|
audit_report = self.security_auditor.generate_audit_report()
|
|
241
241
|
|
|
@@ -279,7 +279,7 @@ def create_secure_database(connector, dialect: DatabaseDialect) -> SecureMemoriD
|
|
|
279
279
|
return SecureMemoriDatabase(connector, dialect)
|
|
280
280
|
|
|
281
281
|
|
|
282
|
-
def validate_memori_security_config() ->
|
|
282
|
+
def validate_memori_security_config() -> dict[str, Any]:
|
|
283
283
|
"""Validate that all security components are properly configured"""
|
|
284
284
|
validation_results = {
|
|
285
285
|
"input_validator": False,
|