mcp-sqlite-memory-bank 1.6.0__py3-none-any.whl → 1.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_sqlite_memory_bank/__init__.py +2 -2
- mcp_sqlite_memory_bank/__main__.py +4 -12
- mcp_sqlite_memory_bank/database.py +25 -73
- mcp_sqlite_memory_bank/prompts.py +12 -20
- mcp_sqlite_memory_bank/resources.py +13 -39
- mcp_sqlite_memory_bank/semantic.py +10 -31
- mcp_sqlite_memory_bank/server.py +27 -57
- mcp_sqlite_memory_bank/tools/analytics.py +18 -55
- mcp_sqlite_memory_bank/tools/basic.py +23 -20
- mcp_sqlite_memory_bank/tools/discovery.py +34 -97
- mcp_sqlite_memory_bank/tools/search.py +6 -17
- mcp_sqlite_memory_bank/utils.py +4 -13
- {mcp_sqlite_memory_bank-1.6.0.dist-info → mcp_sqlite_memory_bank-1.6.2.dist-info}/METADATA +1 -1
- mcp_sqlite_memory_bank-1.6.2.dist-info/RECORD +21 -0
- mcp_sqlite_memory_bank-1.6.0.dist-info/RECORD +0 -21
- {mcp_sqlite_memory_bank-1.6.0.dist-info → mcp_sqlite_memory_bank-1.6.2.dist-info}/WHEEL +0 -0
- {mcp_sqlite_memory_bank-1.6.0.dist-info → mcp_sqlite_memory_bank-1.6.2.dist-info}/entry_points.txt +0 -0
- {mcp_sqlite_memory_bank-1.6.0.dist-info → mcp_sqlite_memory_bank-1.6.2.dist-info}/licenses/LICENSE +0 -0
- {mcp_sqlite_memory_bank-1.6.0.dist-info → mcp_sqlite_memory_bank-1.6.2.dist-info}/top_level.txt +0 -0
mcp_sqlite_memory_bank/server.py
CHANGED
@@ -43,14 +43,25 @@ Each tool is designed for explicit, discoverable use by LLMs and agents:
|
|
43
43
|
Author: Robert Meisner
|
44
44
|
"""
|
45
45
|
|
46
|
+
from .tools.discovery import (
|
47
|
+
intelligent_discovery as intelligent_discovery_impl,
|
48
|
+
discovery_templates as discovery_templates_impl,
|
49
|
+
discover_relationships as discover_relationships_impl,
|
50
|
+
)
|
51
|
+
from .tools.search import (
|
52
|
+
search_content as search_content_impl,
|
53
|
+
explore_tables as explore_tables_impl,
|
54
|
+
add_embeddings as add_embeddings_impl,
|
55
|
+
auto_semantic_search as auto_semantic_search_impl,
|
56
|
+
auto_smart_search as auto_smart_search_impl,
|
57
|
+
embedding_stats as embedding_stats_impl,
|
58
|
+
)
|
46
59
|
import os
|
47
|
-
import re
|
48
60
|
import logging
|
49
61
|
from typing import Dict, Optional, List, cast, Any
|
50
62
|
from fastmcp import FastMCP
|
51
63
|
|
52
64
|
from .database import get_database
|
53
|
-
from .semantic import is_semantic_search_available
|
54
65
|
from .types import (
|
55
66
|
ToolResponse,
|
56
67
|
CreateTableResponse,
|
@@ -124,9 +135,7 @@ def create_table(table_name: str, columns: List[Dict[str, str]]) -> ToolResponse
|
|
124
135
|
- Creates table if it doesn't exist (idempotent)
|
125
136
|
- Raises appropriate errors for invalid input
|
126
137
|
"""
|
127
|
-
return cast(
|
128
|
-
CreateTableResponse, get_database(DB_PATH).create_table(table_name, columns)
|
129
|
-
)
|
138
|
+
return cast(CreateTableResponse, get_database(DB_PATH).create_table(table_name, columns))
|
130
139
|
|
131
140
|
|
132
141
|
@mcp.tool
|
@@ -238,9 +247,7 @@ def rename_table(old_name: str, new_name: str) -> ToolResponse:
|
|
238
247
|
- Validates both old and new table names
|
239
248
|
- Confirms old table exists and new name doesn't conflict
|
240
249
|
"""
|
241
|
-
return cast(
|
242
|
-
RenameTableResponse, get_database(DB_PATH).rename_table(old_name, new_name)
|
243
|
-
)
|
250
|
+
return cast(RenameTableResponse, get_database(DB_PATH).rename_table(old_name, new_name))
|
244
251
|
|
245
252
|
|
246
253
|
@mcp.tool
|
@@ -271,9 +278,7 @@ def create_row(table_name: str, data: Dict[str, Any]) -> ToolResponse:
|
|
271
278
|
|
272
279
|
@mcp.tool
|
273
280
|
@catch_errors
|
274
|
-
def upsert_memory(
|
275
|
-
table_name: str, data: Dict[str, Any], match_columns: List[str]
|
276
|
-
) -> ToolResponse:
|
281
|
+
def upsert_memory(table_name: str, data: Dict[str, Any], match_columns: List[str]) -> ToolResponse:
|
277
282
|
"""
|
278
283
|
🔄 **SMART MEMORY UPSERT** - Prevent duplicates and maintain data consistency!
|
279
284
|
|
@@ -358,16 +363,12 @@ def update_rows(
|
|
358
363
|
- Parameterizes all queries for safety
|
359
364
|
- Where clause is optional (omitting it updates all rows!)
|
360
365
|
"""
|
361
|
-
return cast(
|
362
|
-
UpdateRowsResponse, get_database(DB_PATH).update_rows(table_name, data, where)
|
363
|
-
)
|
366
|
+
return cast(UpdateRowsResponse, get_database(DB_PATH).update_rows(table_name, data, where))
|
364
367
|
|
365
368
|
|
366
369
|
@mcp.tool
|
367
370
|
@catch_errors
|
368
|
-
def delete_rows(
|
369
|
-
table_name: str, where: Optional[Dict[str, Any]] = None
|
370
|
-
) -> ToolResponse:
|
371
|
+
def delete_rows(table_name: str, where: Optional[Dict[str, Any]] = None) -> ToolResponse:
|
371
372
|
"""
|
372
373
|
Delete rows from any table in the SQLite Memory Bank for Copilot/AI agents, matching the WHERE clause.
|
373
374
|
|
@@ -389,9 +390,7 @@ def delete_rows(
|
|
389
390
|
- Parameterizes all queries for safety
|
390
391
|
- Where clause is optional (omitting it deletes all rows!)
|
391
392
|
"""
|
392
|
-
return cast(
|
393
|
-
DeleteRowsResponse, get_database(DB_PATH).delete_rows(table_name, where)
|
394
|
-
)
|
393
|
+
return cast(DeleteRowsResponse, get_database(DB_PATH).delete_rows(table_name, where))
|
395
394
|
|
396
395
|
|
397
396
|
@mcp.tool
|
@@ -454,21 +453,8 @@ def list_all_columns() -> ToolResponse:
|
|
454
453
|
|
455
454
|
|
456
455
|
# Import the implementation functions from tools modules
|
457
|
-
from .tools.search import (
|
458
|
-
search_content as search_content_impl,
|
459
|
-
explore_tables as explore_tables_impl,
|
460
|
-
add_embeddings as add_embeddings_impl,
|
461
|
-
auto_semantic_search as auto_semantic_search_impl,
|
462
|
-
auto_smart_search as auto_smart_search_impl,
|
463
|
-
embedding_stats as embedding_stats_impl,
|
464
|
-
)
|
465
456
|
|
466
457
|
# Import the implementation functions from discovery module
|
467
|
-
from .tools.discovery import (
|
468
|
-
intelligent_discovery as intelligent_discovery_impl,
|
469
|
-
discovery_templates as discovery_templates_impl,
|
470
|
-
discover_relationships as discover_relationships_impl,
|
471
|
-
)
|
472
458
|
|
473
459
|
# --- MCP Tool Definitions (Required in main server.py for FastMCP) ---
|
474
460
|
|
@@ -635,9 +621,7 @@ def auto_semantic_search(
|
|
635
621
|
- Supports fuzzy matching and concept discovery
|
636
622
|
- Perfect for agents - just search and it works!
|
637
623
|
"""
|
638
|
-
return auto_semantic_search_impl(
|
639
|
-
query, tables, similarity_threshold, limit, model_name
|
640
|
-
)
|
624
|
+
return auto_semantic_search_impl(query, tables, similarity_threshold, limit, model_name)
|
641
625
|
|
642
626
|
|
643
627
|
@mcp.tool
|
@@ -684,9 +668,7 @@ def auto_smart_search(
|
|
684
668
|
- Optimal for both exploratory and precise searches
|
685
669
|
- Perfect for agents - ultimate search tool that just works!
|
686
670
|
"""
|
687
|
-
return auto_smart_search_impl(
|
688
|
-
query, tables, semantic_weight, text_weight, limit, model_name
|
689
|
-
)
|
671
|
+
return auto_smart_search_impl(query, tables, semantic_weight, text_weight, limit, model_name)
|
690
672
|
|
691
673
|
|
692
674
|
@mcp.tool
|
@@ -820,9 +802,7 @@ def smart_search(
|
|
820
802
|
- Optimal for both exploratory and precise searches
|
821
803
|
- Perfect for agents - ultimate search tool that just works!
|
822
804
|
"""
|
823
|
-
return _smart_search_impl(
|
824
|
-
query, tables, semantic_weight, text_weight, limit, model_name
|
825
|
-
)
|
805
|
+
return _smart_search_impl(query, tables, semantic_weight, text_weight, limit, model_name)
|
826
806
|
|
827
807
|
|
828
808
|
@mcp.tool
|
@@ -864,9 +844,7 @@ def find_related(
|
|
864
844
|
- Can reveal patterns and themes across your knowledge base
|
865
845
|
- Enables serendipitous discovery of relevant information
|
866
846
|
"""
|
867
|
-
return _find_related_impl(
|
868
|
-
table_name, row_id, similarity_threshold, limit, model_name
|
869
|
-
)
|
847
|
+
return _find_related_impl(table_name, row_id, similarity_threshold, limit, model_name)
|
870
848
|
|
871
849
|
|
872
850
|
# --- Advanced Discovery Tools for SQLite Memory Bank ---
|
@@ -1011,9 +989,7 @@ def discover_relationships(
|
|
1011
989
|
- **ACTIONABLE INSIGHTS**: Suggests how to leverage discovered relationships
|
1012
990
|
- **PERFECT FOR EXPLORATION**: Reveals hidden data organization patterns
|
1013
991
|
"""
|
1014
|
-
return discover_relationships_impl(
|
1015
|
-
table_name, relationship_types, similarity_threshold
|
1016
|
-
)
|
992
|
+
return discover_relationships_impl(table_name, relationship_types, similarity_threshold)
|
1017
993
|
|
1018
994
|
|
1019
995
|
# Export the FastMCP app for use in other modules and server runners
|
@@ -1095,15 +1071,11 @@ def main():
|
|
1095
1071
|
import uvicorn
|
1096
1072
|
import argparse
|
1097
1073
|
|
1098
|
-
parser = argparse.ArgumentParser(
|
1099
|
-
description="Run MCP SQLite Memory Bank Server in HTTP mode"
|
1100
|
-
)
|
1074
|
+
parser = argparse.ArgumentParser(description="Run MCP SQLite Memory Bank Server in HTTP mode")
|
1101
1075
|
parser.add_argument("--host", default="127.0.0.1", help="Host to bind to")
|
1102
1076
|
parser.add_argument("--port", type=int, default=8000, help="Port to bind to")
|
1103
1077
|
parser.add_argument("--db-path", help="Path to SQLite database file")
|
1104
|
-
parser.add_argument(
|
1105
|
-
"--reload", action="store_true", help="Enable auto-reload for development"
|
1106
|
-
)
|
1078
|
+
parser.add_argument("--reload", action="store_true", help="Enable auto-reload for development")
|
1107
1079
|
|
1108
1080
|
args = parser.parse_args()
|
1109
1081
|
|
@@ -1113,9 +1085,7 @@ def main():
|
|
1113
1085
|
DB_PATH = args.db_path
|
1114
1086
|
os.environ["DB_PATH"] = args.db_path
|
1115
1087
|
|
1116
|
-
print(
|
1117
|
-
f"Starting MCP SQLite Memory Bank server in HTTP mode on {args.host}:{args.port}"
|
1118
|
-
)
|
1088
|
+
print(f"Starting MCP SQLite Memory Bank server in HTTP mode on {args.host}:{args.port}")
|
1119
1089
|
print(f"Database path: {DB_PATH}")
|
1120
1090
|
print("Available at: http://localhost:8000/docs")
|
1121
1091
|
|
@@ -7,8 +7,7 @@ and providing insights for better knowledge organization.
|
|
7
7
|
"""
|
8
8
|
|
9
9
|
import logging
|
10
|
-
from typing import
|
11
|
-
from fastmcp import FastMCP
|
10
|
+
from typing import cast
|
12
11
|
|
13
12
|
from ..database import get_database
|
14
13
|
from ..semantic import is_semantic_search_available
|
@@ -80,19 +79,14 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
80
79
|
schema_result = db.describe_table(table_name)
|
81
80
|
if schema_result.get("success"):
|
82
81
|
columns = schema_result.get("columns", [])
|
83
|
-
text_columns = [
|
84
|
-
col for col in columns if "TEXT" in col.get("type", "").upper()
|
85
|
-
]
|
82
|
+
text_columns = [col for col in columns if "TEXT" in col.get("type", "").upper()]
|
86
83
|
|
87
84
|
analysis["schema_analysis"][table_name] = {
|
88
85
|
"total_columns": len(columns),
|
89
86
|
"text_columns": len(text_columns),
|
90
|
-
"has_id_column": any(
|
91
|
-
col.get("name") == "id" for col in columns
|
92
|
-
),
|
87
|
+
"has_id_column": any(col.get("name") == "id" for col in columns),
|
93
88
|
"has_timestamp": any(
|
94
|
-
"timestamp" in col.get("name", "").lower()
|
95
|
-
for col in columns
|
89
|
+
"timestamp" in col.get("name", "").lower() for col in columns
|
96
90
|
),
|
97
91
|
}
|
98
92
|
|
@@ -106,9 +100,7 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
106
100
|
text_content_lengths.append(len(str(content)))
|
107
101
|
|
108
102
|
if text_content_lengths:
|
109
|
-
avg_length = sum(text_content_lengths) / len(
|
110
|
-
text_content_lengths
|
111
|
-
)
|
103
|
+
avg_length = sum(text_content_lengths) / len(text_content_lengths)
|
112
104
|
if avg_length > 500:
|
113
105
|
analysis["text_density"]["high"].append(table_name)
|
114
106
|
elif avg_length > 100:
|
@@ -126,9 +118,7 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
126
118
|
elif coverage > 0:
|
127
119
|
analysis["semantic_readiness"]["partial"].append(table_name)
|
128
120
|
else:
|
129
|
-
analysis["semantic_readiness"]["needs_setup"].append(
|
130
|
-
table_name
|
131
|
-
)
|
121
|
+
analysis["semantic_readiness"]["needs_setup"].append(table_name)
|
132
122
|
|
133
123
|
except Exception as e:
|
134
124
|
logging.warning(f"Error analyzing table {table_name}: {e}")
|
@@ -142,8 +132,7 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
142
132
|
high_value_tables = [
|
143
133
|
t
|
144
134
|
for t in analysis["semantic_readiness"]["needs_setup"]
|
145
|
-
if t
|
146
|
-
in analysis["text_density"]["high"] + analysis["text_density"]["medium"]
|
135
|
+
if t in analysis["text_density"]["high"] + analysis["text_density"]["medium"]
|
147
136
|
]
|
148
137
|
if high_value_tables:
|
149
138
|
recommendations.append(
|
@@ -151,9 +140,7 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
151
140
|
)
|
152
141
|
|
153
142
|
# Content organization recommendations
|
154
|
-
large_tables = [
|
155
|
-
t for t, count in analysis["content_distribution"].items() if count > 50
|
156
|
-
]
|
143
|
+
large_tables = [t for t, count in analysis["content_distribution"].items() if count > 50]
|
157
144
|
if large_tables:
|
158
145
|
recommendations.append(
|
159
146
|
f"Large tables detected: {', '.join(large_tables)}. Consider organizing with categories or tags."
|
@@ -161,9 +148,7 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
161
148
|
|
162
149
|
# Empty or sparse tables
|
163
150
|
sparse_tables = [
|
164
|
-
t
|
165
|
-
for t, count in analysis["content_distribution"].items()
|
166
|
-
if count < 5 and count > 0
|
151
|
+
t for t, count in analysis["content_distribution"].items() if count < 5 and count > 0
|
167
152
|
]
|
168
153
|
if sparse_tables:
|
169
154
|
recommendations.append(
|
@@ -282,19 +267,14 @@ def get_content_health_score() -> ToolResponse:
|
|
282
267
|
schema_result = db.describe_table(table_name)
|
283
268
|
if schema_result.get("success"):
|
284
269
|
columns = schema_result.get("columns", [])
|
285
|
-
text_columns = [
|
286
|
-
col for col in columns if "TEXT" in col.get("type", "").upper()
|
287
|
-
]
|
270
|
+
text_columns = [col for col in columns if "TEXT" in col.get("type", "").upper()]
|
288
271
|
|
289
272
|
analysis["schema_analysis"][table_name] = {
|
290
273
|
"total_columns": len(columns),
|
291
274
|
"text_columns": len(text_columns),
|
292
|
-
"has_id_column": any(
|
293
|
-
col.get("name") == "id" for col in columns
|
294
|
-
),
|
275
|
+
"has_id_column": any(col.get("name") == "id" for col in columns),
|
295
276
|
"has_timestamp": any(
|
296
|
-
"timestamp" in col.get("name", "").lower()
|
297
|
-
for col in columns
|
277
|
+
"timestamp" in col.get("name", "").lower() for col in columns
|
298
278
|
),
|
299
279
|
}
|
300
280
|
|
@@ -308,9 +288,7 @@ def get_content_health_score() -> ToolResponse:
|
|
308
288
|
text_content_lengths.append(len(str(content)))
|
309
289
|
|
310
290
|
if text_content_lengths:
|
311
|
-
avg_length = sum(text_content_lengths) / len(
|
312
|
-
text_content_lengths
|
313
|
-
)
|
291
|
+
avg_length = sum(text_content_lengths) / len(text_content_lengths)
|
314
292
|
if avg_length > 500:
|
315
293
|
analysis["text_density"]["high"].append(table_name)
|
316
294
|
elif avg_length > 100:
|
@@ -328,9 +306,7 @@ def get_content_health_score() -> ToolResponse:
|
|
328
306
|
elif coverage > 0:
|
329
307
|
analysis["semantic_readiness"]["partial"].append(table_name)
|
330
308
|
else:
|
331
|
-
analysis["semantic_readiness"]["needs_setup"].append(
|
332
|
-
table_name
|
333
|
-
)
|
309
|
+
analysis["semantic_readiness"]["needs_setup"].append(table_name)
|
334
310
|
|
335
311
|
except Exception as e:
|
336
312
|
logging.warning(f"Error analyzing table {table_name}: {e}")
|
@@ -362,9 +338,7 @@ def get_content_health_score() -> ToolResponse:
|
|
362
338
|
else:
|
363
339
|
metrics["content_volume"] = 10.0
|
364
340
|
|
365
|
-
metrics["content_quality"] = min(
|
366
|
-
10.0, (high_quality_tables / total_tables) * 10 + 3
|
367
|
-
)
|
341
|
+
metrics["content_quality"] = min(10.0, (high_quality_tables / total_tables) * 10 + 3)
|
368
342
|
|
369
343
|
# 2. Organization Score (based on schema quality)
|
370
344
|
schema_analysis = analysis.get("schema_analysis", {})
|
@@ -383,32 +357,21 @@ def get_content_health_score() -> ToolResponse:
|
|
383
357
|
organization_factors.append(table_score)
|
384
358
|
|
385
359
|
metrics["organization"] = (
|
386
|
-
(sum(organization_factors) / len(organization_factors))
|
387
|
-
if organization_factors
|
388
|
-
else 5.0
|
360
|
+
(sum(organization_factors) / len(organization_factors)) if organization_factors else 5.0
|
389
361
|
)
|
390
362
|
|
391
363
|
# 3. Semantic Readiness Score
|
392
364
|
semantic_ready = len(analysis.get("semantic_readiness", {}).get("ready", []))
|
393
|
-
semantic_partial = len(
|
394
|
-
analysis.get("semantic_readiness", {}).get("partial", [])
|
395
|
-
)
|
396
|
-
semantic_needed = len(
|
397
|
-
analysis.get("semantic_readiness", {}).get("needs_setup", [])
|
398
|
-
)
|
399
|
-
|
365
|
+
semantic_partial = len(analysis.get("semantic_readiness", {}).get("partial", []))
|
400
366
|
if not is_semantic_search_available():
|
401
367
|
metrics["semantic_readiness"] = 5.0 # Neutral score if not available
|
402
368
|
metrics["semantic_note"] = "Semantic search dependencies not available"
|
403
369
|
else:
|
404
|
-
semantic_score = (
|
405
|
-
(semantic_ready * 2 + semantic_partial) / (total_tables * 2)
|
406
|
-
) * 10
|
370
|
+
semantic_score = ((semantic_ready * 2 + semantic_partial) / (total_tables * 2)) * 10
|
407
371
|
metrics["semantic_readiness"] = min(10.0, semantic_score)
|
408
372
|
|
409
373
|
# 4. Accessibility Score (how easy it is to find and use content)
|
410
374
|
medium_density = len(analysis.get("text_density", {}).get("medium", []))
|
411
|
-
low_density = len(analysis.get("text_density", {}).get("low", []))
|
412
375
|
|
413
376
|
# Prefer medium density (not too verbose, not too sparse)
|
414
377
|
if total_tables == 0:
|
@@ -8,7 +8,7 @@ data operations, and core functionality.
|
|
8
8
|
from typing import Any, Dict, List, Optional, cast
|
9
9
|
|
10
10
|
from ..database import get_database
|
11
|
-
from ..types import
|
11
|
+
from ..types import ToolResponse
|
12
12
|
from ..utils import catch_errors
|
13
13
|
|
14
14
|
|
@@ -20,9 +20,7 @@ def create_table(
|
|
20
20
|
"""Create a new table in the SQLite memory bank."""
|
21
21
|
from .. import server
|
22
22
|
|
23
|
-
return cast(
|
24
|
-
ToolResponse, get_database(server.DB_PATH).create_table(table_name, columns)
|
25
|
-
)
|
23
|
+
return cast(ToolResponse, get_database(server.DB_PATH).create_table(table_name, columns))
|
26
24
|
|
27
25
|
|
28
26
|
@catch_errors
|
@@ -54,9 +52,7 @@ def rename_table(old_name: str, new_name: str) -> ToolResponse:
|
|
54
52
|
"""Rename a table in the SQLite memory bank."""
|
55
53
|
from .. import server
|
56
54
|
|
57
|
-
return cast(
|
58
|
-
ToolResponse, get_database(server.DB_PATH).rename_table(old_name, new_name)
|
59
|
-
)
|
55
|
+
return cast(ToolResponse, get_database(server.DB_PATH).rename_table(old_name, new_name))
|
60
56
|
|
61
57
|
|
62
58
|
@catch_errors
|
@@ -90,9 +86,7 @@ def update_rows(
|
|
90
86
|
"""Update rows in any table in the SQLite Memory Bank, matching the WHERE clause."""
|
91
87
|
from .. import server
|
92
88
|
|
93
|
-
return cast(
|
94
|
-
ToolResponse, get_database(server.DB_PATH).update_rows(table_name, data, where)
|
95
|
-
)
|
89
|
+
return cast(ToolResponse, get_database(server.DB_PATH).update_rows(table_name, data, where))
|
96
90
|
|
97
91
|
|
98
92
|
@catch_errors
|
@@ -103,9 +97,7 @@ def delete_rows(
|
|
103
97
|
"""Delete rows from any table in the SQLite Memory Bank, matching the WHERE clause."""
|
104
98
|
from .. import server
|
105
99
|
|
106
|
-
return cast(
|
107
|
-
ToolResponse, get_database(server.DB_PATH).delete_rows(table_name, where)
|
108
|
-
)
|
100
|
+
return cast(ToolResponse, get_database(server.DB_PATH).delete_rows(table_name, where))
|
109
101
|
|
110
102
|
|
111
103
|
@catch_errors
|
@@ -133,9 +125,7 @@ def list_all_columns() -> ToolResponse:
|
|
133
125
|
|
134
126
|
|
135
127
|
@catch_errors
|
136
|
-
def upsert_memory(
|
137
|
-
table_name: str, data: Dict[str, Any], match_columns: List[str]
|
138
|
-
) -> ToolResponse:
|
128
|
+
def upsert_memory(table_name: str, data: Dict[str, Any], match_columns: List[str]) -> ToolResponse:
|
139
129
|
"""
|
140
130
|
Smart memory upsert: Update existing records or create new ones based on matching columns.
|
141
131
|
|
@@ -148,7 +138,8 @@ def upsert_memory(
|
|
148
138
|
match_columns (List[str]): Columns to use for finding existing records
|
149
139
|
|
150
140
|
Returns:
|
151
|
-
ToolResponse: {"success": True, "action": "updated"
|
141
|
+
ToolResponse: For updates: {"success": True, "action": "updated", "id": rowid, "updated_fields": {...}}
|
142
|
+
For creates: {"success": True, "action": "created", "id": rowid}
|
152
143
|
"""
|
153
144
|
import os
|
154
145
|
|
@@ -174,8 +165,21 @@ def upsert_memory(
|
|
174
165
|
# Update the first matching record
|
175
166
|
row_id = existing_rows[0].get("id")
|
176
167
|
if row_id:
|
168
|
+
# Get the original record to compare changes
|
169
|
+
original_record = existing_rows[0]
|
170
|
+
|
177
171
|
update_result = db.update_rows(table_name, data, {"id": row_id})
|
178
172
|
if update_result.get("success"):
|
173
|
+
# Determine which fields were actually updated
|
174
|
+
updated_fields = {}
|
175
|
+
for key, new_value in data.items():
|
176
|
+
original_value = original_record.get(key)
|
177
|
+
if original_value != new_value:
|
178
|
+
updated_fields[key] = {
|
179
|
+
"old": original_value,
|
180
|
+
"new": new_value
|
181
|
+
}
|
182
|
+
|
179
183
|
return cast(
|
180
184
|
ToolResponse,
|
181
185
|
{
|
@@ -183,6 +187,7 @@ def upsert_memory(
|
|
183
187
|
"action": "updated",
|
184
188
|
"id": row_id,
|
185
189
|
"rows_affected": update_result.get("rows_affected", 1),
|
190
|
+
"updated_fields": updated_fields,
|
186
191
|
},
|
187
192
|
)
|
188
193
|
return cast(ToolResponse, update_result)
|
@@ -308,9 +313,7 @@ def batch_create_memories(
|
|
308
313
|
|
309
314
|
except Exception as e:
|
310
315
|
failed_count += 1
|
311
|
-
results.append(
|
312
|
-
{"index": i, "action": "failed", "error": str(e), "success": False}
|
313
|
-
)
|
316
|
+
results.append({"index": i, "action": "failed", "error": str(e), "success": False})
|
314
317
|
|
315
318
|
return cast(
|
316
319
|
ToolResponse,
|