mcp-sqlite-memory-bank 1.5.1__py3-none-any.whl → 1.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,7 +21,9 @@ except ImportError:
21
21
  SENTENCE_TRANSFORMERS_AVAILABLE = False
22
22
  SentenceTransformer = None # type: ignore
23
23
  util = None # type: ignore
24
- logging.warning("sentence-transformers not available. Install with: pip install sentence-transformers")
24
+ logging.warning(
25
+ "sentence-transformers not available. Install with: pip install sentence-transformers"
26
+ )
25
27
 
26
28
  try:
27
29
  import torch
@@ -58,7 +60,7 @@ class SemanticSearchEngine:
58
60
  )
59
61
 
60
62
  @property
61
- def model(self):
63
+ def model(self) -> Any:
62
64
  """Lazy load the sentence transformer model."""
63
65
  if self._model is None:
64
66
  if not SENTENCE_TRANSFORMERS_AVAILABLE or SentenceTransformer is None:
@@ -67,7 +69,9 @@ class SemanticSearchEngine:
67
69
  self._model = SentenceTransformer(self.model_name)
68
70
  logging.info(f"Loaded semantic search model: {self.model_name}")
69
71
  except Exception as e:
70
- raise DatabaseError(f"Failed to load semantic search model {self.model_name}: {e}")
72
+ raise DatabaseError(
73
+ f"Failed to load semantic search model {self.model_name}: {e}"
74
+ )
71
75
  return self._model
72
76
 
73
77
  def get_embedding_dimensions(self) -> Optional[int]:
@@ -124,13 +128,17 @@ class SemanticSearchEngine:
124
128
 
125
129
  try:
126
130
  embeddings = self.model.encode(
127
- valid_texts, convert_to_tensor=False, show_progress_bar=len(valid_texts) > 10
131
+ valid_texts,
132
+ convert_to_tensor=False,
133
+ show_progress_bar=len(valid_texts) > 10,
128
134
  )
129
135
  return [emb.tolist() for emb in embeddings]
130
136
  except Exception as e:
131
137
  raise DatabaseError(f"Failed to generate batch embeddings: {e}")
132
138
 
133
- def calculate_similarity(self, embedding1: List[float], embedding2: List[float]) -> float:
139
+ def calculate_similarity(
140
+ self, embedding1: List[float], embedding2: List[float]
141
+ ) -> float:
134
142
  """
135
143
  Calculate cosine similarity between two embeddings.
136
144
 
@@ -193,7 +201,12 @@ class SemanticSearchEngine:
193
201
  return []
194
202
 
195
203
  # Use efficient torch/sentence-transformers if available
196
- if TORCH_AVAILABLE and torch is not None and SENTENCE_TRANSFORMERS_AVAILABLE and util is not None:
204
+ if (
205
+ TORCH_AVAILABLE
206
+ and torch is not None
207
+ and SENTENCE_TRANSFORMERS_AVAILABLE
208
+ and util is not None
209
+ ):
197
210
  try:
198
211
  # Convert to tensors
199
212
  query_tensor = torch.tensor(query_embedding).unsqueeze(0)
@@ -213,7 +226,9 @@ class SemanticSearchEngine:
213
226
  results.sort(key=lambda x: x[1], reverse=True)
214
227
  return results[:top_k]
215
228
  except Exception as e:
216
- logging.warning(f"Torch similarity search failed, using numpy fallback: {e}")
229
+ logging.warning(
230
+ f"Torch similarity search failed, using numpy fallback: {e}"
231
+ )
217
232
 
218
233
  # Fallback to numpy implementation
219
234
  results = []
@@ -291,6 +306,10 @@ class SemanticSearchEngine:
291
306
  original_idx = valid_indices[candidate_idx]
292
307
  row = content_data[original_idx].copy()
293
308
 
309
+ # Remove embedding data to avoid polluting LLM responses
310
+ if embedding_column in row:
311
+ del row[embedding_column]
312
+
294
313
  # Add similarity score
295
314
  row["similarity_score"] = round(similarity_score, 3)
296
315
 
@@ -298,7 +317,11 @@ class SemanticSearchEngine:
298
317
  if content_columns:
299
318
  matched_content = []
300
319
  for col in content_columns:
301
- if col in row and row[col] and query.lower() in str(row[col]).lower():
320
+ if (
321
+ col in row
322
+ and row[col]
323
+ and query.lower() in str(row[col]).lower()
324
+ ):
302
325
  matched_content.append(f"{col}: {row[col]}")
303
326
  if matched_content:
304
327
  row["matched_content"] = matched_content
@@ -346,7 +369,11 @@ class SemanticSearchEngine:
346
369
 
347
370
  # Get semantic search results
348
371
  semantic_results = self.semantic_search(
349
- query, content_data, embedding_column, similarity_threshold=0.3, top_k=top_k * 2 # Get more for reranking
372
+ query,
373
+ content_data,
374
+ embedding_column,
375
+ similarity_threshold=0.3,
376
+ top_k=top_k * 2, # Get more for reranking
350
377
  )
351
378
 
352
379
  # Add text matching scores
@@ -360,11 +387,15 @@ class SemanticSearchEngine:
360
387
  content = str(result[col]).lower()
361
388
  if query_lower in content:
362
389
  # Simple frequency-based text scoring
363
- text_score += content.count(query_lower) / len(content.split())
390
+ text_score += content.count(query_lower) / len(
391
+ content.split()
392
+ )
364
393
 
365
394
  # Combine scores
366
395
  semantic_score = result.get("similarity_score", 0.0)
367
- combined_score = (semantic_score * semantic_weight) + (text_score * text_weight)
396
+ combined_score = (semantic_score * semantic_weight) + (
397
+ text_score * text_weight
398
+ )
368
399
  result["combined_score"] = round(combined_score, 3)
369
400
  result["text_score"] = round(text_score, 3)
370
401
 
@@ -373,7 +404,7 @@ class SemanticSearchEngine:
373
404
 
374
405
  return semantic_results[:top_k]
375
406
 
376
- def clear_cache(self):
407
+ def clear_cache(self) -> None:
377
408
  """Clear the embedding cache."""
378
409
  self._embedding_cache.clear()
379
410
  logging.info("Semantic search cache cleared")
@@ -390,15 +421,17 @@ def get_semantic_engine(model_name: str = "all-MiniLM-L6-v2") -> SemanticSearchE
390
421
  try:
391
422
  if _semantic_engine is None or _semantic_engine.model_name != model_name:
392
423
  if not SENTENCE_TRANSFORMERS_AVAILABLE:
393
- raise ValueError("Sentence transformers not available for semantic search")
424
+ raise ValueError(
425
+ "Sentence transformers not available for semantic search"
426
+ )
394
427
  _semantic_engine = SemanticSearchEngine(model_name)
395
-
428
+
396
429
  # Verify the engine is properly initialized
397
- if not hasattr(_semantic_engine, 'hybrid_search'):
430
+ if not hasattr(_semantic_engine, "hybrid_search"):
398
431
  raise ValueError("Semantic engine missing hybrid_search method")
399
-
432
+
400
433
  return _semantic_engine
401
-
434
+
402
435
  except Exception as e:
403
436
  raise DatabaseError(f"Failed to initialize semantic engine: {e}")
404
437
 
@@ -124,7 +124,9 @@ def create_table(table_name: str, columns: List[Dict[str, str]]) -> ToolResponse
124
124
  - Creates table if it doesn't exist (idempotent)
125
125
  - Raises appropriate errors for invalid input
126
126
  """
127
- return cast(CreateTableResponse, get_database(DB_PATH).create_table(table_name, columns))
127
+ return cast(
128
+ CreateTableResponse, get_database(DB_PATH).create_table(table_name, columns)
129
+ )
128
130
 
129
131
 
130
132
  @mcp.tool
@@ -236,7 +238,9 @@ def rename_table(old_name: str, new_name: str) -> ToolResponse:
236
238
  - Validates both old and new table names
237
239
  - Confirms old table exists and new name doesn't conflict
238
240
  """
239
- return cast(RenameTableResponse, get_database(DB_PATH).rename_table(old_name, new_name))
241
+ return cast(
242
+ RenameTableResponse, get_database(DB_PATH).rename_table(old_name, new_name)
243
+ )
240
244
 
241
245
 
242
246
  @mcp.tool
@@ -265,6 +269,42 @@ def create_row(table_name: str, data: Dict[str, Any]) -> ToolResponse:
265
269
  return cast(CreateRowResponse, get_database(DB_PATH).insert_row(table_name, data))
266
270
 
267
271
 
272
+ @mcp.tool
273
+ @catch_errors
274
+ def upsert_memory(
275
+ table_name: str, data: Dict[str, Any], match_columns: List[str]
276
+ ) -> ToolResponse:
277
+ """
278
+ 🔄 **SMART MEMORY UPSERT** - Prevent duplicates and maintain data consistency!
279
+
280
+ Update existing records or create new ones based on matching columns.
281
+ This is the preferred method for memory management as it prevents duplicates.
282
+
283
+ Args:
284
+ table_name (str): Table to upsert into
285
+ data (Dict[str, Any]): Data to upsert (column-value pairs)
286
+ match_columns (List[str]): Columns to use for finding existing records
287
+
288
+ Returns:
289
+ ToolResponse: On success: {"success": True, "action": "updated"|"created", "id": rowid}
290
+ On error: {"success": False, "error": str, "category": str, "details": dict}
291
+
292
+ Examples:
293
+ >>> upsert_memory('technical_decisions',
294
+ ... {'decision_name': 'API Design', 'chosen_approach': 'REST'},
295
+ ... ['decision_name'])
296
+ {"success": True, "action": "updated", "id": 15, "rows_affected": 1}
297
+
298
+ FastMCP Tool Info:
299
+ - **PREVENTS DUPLICATES**: Automatically updates existing records instead of creating duplicates
300
+ - **SMART MATCHING**: Uses specified columns to find existing records
301
+ - **EFFICIENT MEMORY MANAGEMENT**: Ideal for agent memory patterns
302
+ - **CLEAR FEEDBACK**: Returns whether record was created or updated
303
+ - **PERFECT FOR AGENTS**: Handles the common "update or create" pattern automatically
304
+ """
305
+ return basic.upsert_memory(table_name, data, match_columns)
306
+
307
+
268
308
  @mcp.tool
269
309
  @catch_errors
270
310
  def read_rows(table_name: str, where: Optional[Dict[str, Any]] = None) -> ToolResponse:
@@ -293,7 +333,9 @@ def read_rows(table_name: str, where: Optional[Dict[str, Any]] = None) -> ToolRe
293
333
 
294
334
  @mcp.tool
295
335
  @catch_errors
296
- def update_rows(table_name: str, data: Dict[str, Any], where: Optional[Dict[str, Any]] = None) -> ToolResponse:
336
+ def update_rows(
337
+ table_name: str, data: Dict[str, Any], where: Optional[Dict[str, Any]] = None
338
+ ) -> ToolResponse:
297
339
  """
298
340
  Update rows in any table in the SQLite Memory Bank for Copilot/AI agents, matching the WHERE clause.
299
341
 
@@ -316,12 +358,16 @@ def update_rows(table_name: str, data: Dict[str, Any], where: Optional[Dict[str,
316
358
  - Parameterizes all queries for safety
317
359
  - Where clause is optional (omitting it updates all rows!)
318
360
  """
319
- return cast(UpdateRowsResponse, get_database(DB_PATH).update_rows(table_name, data, where))
361
+ return cast(
362
+ UpdateRowsResponse, get_database(DB_PATH).update_rows(table_name, data, where)
363
+ )
320
364
 
321
365
 
322
366
  @mcp.tool
323
367
  @catch_errors
324
- def delete_rows(table_name: str, where: Optional[Dict[str, Any]] = None) -> ToolResponse:
368
+ def delete_rows(
369
+ table_name: str, where: Optional[Dict[str, Any]] = None
370
+ ) -> ToolResponse:
325
371
  """
326
372
  Delete rows from any table in the SQLite Memory Bank for Copilot/AI agents, matching the WHERE clause.
327
373
 
@@ -343,13 +389,18 @@ def delete_rows(table_name: str, where: Optional[Dict[str, Any]] = None) -> Tool
343
389
  - Parameterizes all queries for safety
344
390
  - Where clause is optional (omitting it deletes all rows!)
345
391
  """
346
- return cast(DeleteRowsResponse, get_database(DB_PATH).delete_rows(table_name, where))
392
+ return cast(
393
+ DeleteRowsResponse, get_database(DB_PATH).delete_rows(table_name, where)
394
+ )
347
395
 
348
396
 
349
397
  @mcp.tool
350
398
  @catch_errors
351
399
  def run_select_query(
352
- table_name: str, columns: Optional[List[str]] = None, where: Optional[Dict[str, Any]] = None, limit: int = 100
400
+ table_name: str,
401
+ columns: Optional[List[str]] = None,
402
+ where: Optional[Dict[str, Any]] = None,
403
+ limit: int = 100,
353
404
  ) -> ToolResponse:
354
405
  """
355
406
  Run a safe SELECT query on a table in the SQLite memory bank.
@@ -374,7 +425,10 @@ def run_select_query(
374
425
  - Only SELECT queries are allowed (no arbitrary SQL)
375
426
  - Default limit of 100 rows prevents memory issues
376
427
  """
377
- return cast(SelectQueryResponse, get_database(DB_PATH).select_query(table_name, columns, where, limit))
428
+ return cast(
429
+ SelectQueryResponse,
430
+ get_database(DB_PATH).select_query(table_name, columns, where, limit),
431
+ )
378
432
 
379
433
 
380
434
  @mcp.tool
@@ -402,14 +456,14 @@ def list_all_columns() -> ToolResponse:
402
456
  # Import the implementation functions from tools modules
403
457
  from .tools.search import (
404
458
  search_content as search_content_impl,
405
- explore_tables as explore_tables_impl,
459
+ explore_tables as explore_tables_impl,
406
460
  add_embeddings as add_embeddings_impl,
407
461
  auto_semantic_search as auto_semantic_search_impl,
408
462
  auto_smart_search as auto_smart_search_impl,
409
463
  embedding_stats as embedding_stats_impl,
410
464
  )
411
465
 
412
- # Import the implementation functions from discovery module
466
+ # Import the implementation functions from discovery module
413
467
  from .tools.discovery import (
414
468
  intelligent_discovery as intelligent_discovery_impl,
415
469
  discovery_templates as discovery_templates_impl,
@@ -418,6 +472,7 @@ from .tools.discovery import (
418
472
 
419
473
  # --- MCP Tool Definitions (Required in main server.py for FastMCP) ---
420
474
 
475
+
421
476
  @mcp.tool
422
477
  @catch_errors
423
478
  def search_content(
@@ -455,7 +510,7 @@ def search_content(
455
510
 
456
511
 
457
512
  @mcp.tool
458
- @catch_errors
513
+ @catch_errors
459
514
  def explore_tables(
460
515
  pattern: Optional[str] = None,
461
516
  include_row_counts: bool = True,
@@ -580,7 +635,9 @@ def auto_semantic_search(
580
635
  - Supports fuzzy matching and concept discovery
581
636
  - Perfect for agents - just search and it works!
582
637
  """
583
- return auto_semantic_search_impl(query, tables, similarity_threshold, limit, model_name)
638
+ return auto_semantic_search_impl(
639
+ query, tables, similarity_threshold, limit, model_name
640
+ )
584
641
 
585
642
 
586
643
  @mcp.tool
@@ -627,7 +684,9 @@ def auto_smart_search(
627
684
  - Optimal for both exploratory and precise searches
628
685
  - Perfect for agents - ultimate search tool that just works!
629
686
  """
630
- return auto_smart_search_impl(query, tables, semantic_weight, text_weight, limit, model_name)
687
+ return auto_smart_search_impl(
688
+ query, tables, semantic_weight, text_weight, limit, model_name
689
+ )
631
690
 
632
691
 
633
692
  @mcp.tool
@@ -656,7 +715,7 @@ def embedding_stats(
656
715
 
657
716
  FastMCP Tool Info:
658
717
  - Shows how much content is ready for semantic search
659
- - Helps identify tables that need embedding generation
718
+ - Helps identify tables that need embedding generation
660
719
  - Provides embedding dimension info for debugging
661
720
  - Useful for monitoring semantic search capabilities
662
721
  """
@@ -761,7 +820,9 @@ def smart_search(
761
820
  - Optimal for both exploratory and precise searches
762
821
  - Perfect for agents - ultimate search tool that just works!
763
822
  """
764
- return _smart_search_impl(query, tables, semantic_weight, text_weight, limit, model_name)
823
+ return _smart_search_impl(
824
+ query, tables, semantic_weight, text_weight, limit, model_name
825
+ )
765
826
 
766
827
 
767
828
  @mcp.tool
@@ -803,11 +864,14 @@ def find_related(
803
864
  - Can reveal patterns and themes across your knowledge base
804
865
  - Enables serendipitous discovery of relevant information
805
866
  """
806
- return _find_related_impl(table_name, row_id, similarity_threshold, limit, model_name)
867
+ return _find_related_impl(
868
+ table_name, row_id, similarity_threshold, limit, model_name
869
+ )
807
870
 
808
871
 
809
872
  # --- Advanced Discovery Tools for SQLite Memory Bank ---
810
873
 
874
+
811
875
  @mcp.tool
812
876
  @catch_errors
813
877
  def intelligent_discovery(
@@ -825,14 +889,14 @@ def intelligent_discovery(
825
889
  Args:
826
890
  discovery_goal (str): What you want to achieve
827
891
  - "understand_content": Learn what data is available and how it's organized
828
- - "find_patterns": Discover themes, relationships, and content patterns
892
+ - "find_patterns": Discover themes, relationships, and content patterns
829
893
  - "explore_structure": Understand database schema and organization
830
894
  - "assess_quality": Evaluate content quality and completeness
831
895
  - "prepare_search": Get ready for effective content searching
832
896
  focus_area (Optional[str]): Specific table or topic to focus on (default: all)
833
897
  depth (str): How thorough the discovery should be
834
898
  - "quick": Fast overview with key insights
835
- - "moderate": Balanced analysis with actionable recommendations
899
+ - "moderate": Balanced analysis with actionable recommendations
836
900
  - "comprehensive": Deep dive with detailed analysis
837
901
  agent_id (Optional[str]): Agent identifier for learning discovery patterns
838
902
 
@@ -861,8 +925,7 @@ def intelligent_discovery(
861
925
  @mcp.tool
862
926
  @catch_errors
863
927
  def discovery_templates(
864
- template_type: str = "first_time_exploration",
865
- customize_for: Optional[str] = None
928
+ template_type: str = "first_time_exploration", customize_for: Optional[str] = None
866
929
  ) -> ToolResponse:
867
930
  """
868
931
  📋 **DISCOVERY TEMPLATES** - Pre-built exploration workflows for common scenarios!
@@ -895,7 +958,7 @@ def discovery_templates(
895
958
  }}
896
959
 
897
960
  FastMCP Tool Info:
898
- - **PROVEN WORKFLOWS**: Battle-tested discovery sequences
961
+ - **PROVEN WORKFLOWS**: Battle-tested discovery sequences
899
962
  - **STEP-BY-STEP GUIDANCE**: Exact tools and parameters to use
900
963
  - **CUSTOMIZABLE**: Adapt templates to your specific needs
901
964
  - **LEARNING-OPTIMIZED**: Based on successful discovery patterns
@@ -907,8 +970,12 @@ def discovery_templates(
907
970
  @catch_errors
908
971
  def discover_relationships(
909
972
  table_name: Optional[str] = None,
910
- relationship_types: List[str] = ["foreign_keys", "semantic_similarity", "temporal_patterns"],
911
- similarity_threshold: float = 0.6
973
+ relationship_types: List[str] = [
974
+ "foreign_keys",
975
+ "semantic_similarity",
976
+ "temporal_patterns",
977
+ ],
978
+ similarity_threshold: float = 0.6,
912
979
  ) -> ToolResponse:
913
980
  """
914
981
  🔗 **RELATIONSHIP DISCOVERY** - Find hidden connections in your data!
@@ -944,7 +1011,9 @@ def discover_relationships(
944
1011
  - **ACTIONABLE INSIGHTS**: Suggests how to leverage discovered relationships
945
1012
  - **PERFECT FOR EXPLORATION**: Reveals hidden data organization patterns
946
1013
  """
947
- return discover_relationships_impl(table_name, relationship_types, similarity_threshold)
1014
+ return discover_relationships_impl(
1015
+ table_name, relationship_types, similarity_threshold
1016
+ )
948
1017
 
949
1018
 
950
1019
  # Export the FastMCP app for use in other modules and server runners
@@ -983,12 +1052,13 @@ __all__ = [
983
1052
  "app",
984
1053
  "mcp",
985
1054
  "create_table",
986
- "drop_table",
1055
+ "drop_table",
987
1056
  "rename_table",
988
1057
  "list_tables",
989
1058
  "describe_table",
990
1059
  "list_all_columns",
991
1060
  "create_row",
1061
+ "upsert_memory",
992
1062
  "read_rows",
993
1063
  "update_rows",
994
1064
  "delete_rows",
@@ -1008,7 +1078,10 @@ __all__ = [
1008
1078
  def mcp_server():
1009
1079
  """Entry point for MCP stdio server (for uvx and package installations)."""
1010
1080
  # Configure logging for MCP server
1011
- logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
1081
+ logging.basicConfig(
1082
+ level=logging.INFO,
1083
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
1084
+ )
1012
1085
 
1013
1086
  # Log startup information
1014
1087
  logging.info(f"Starting SQLite Memory Bank MCP server with database at {DB_PATH}")
@@ -1022,11 +1095,15 @@ def main():
1022
1095
  import uvicorn
1023
1096
  import argparse
1024
1097
 
1025
- parser = argparse.ArgumentParser(description="Run MCP SQLite Memory Bank Server in HTTP mode")
1098
+ parser = argparse.ArgumentParser(
1099
+ description="Run MCP SQLite Memory Bank Server in HTTP mode"
1100
+ )
1026
1101
  parser.add_argument("--host", default="127.0.0.1", help="Host to bind to")
1027
1102
  parser.add_argument("--port", type=int, default=8000, help="Port to bind to")
1028
1103
  parser.add_argument("--db-path", help="Path to SQLite database file")
1029
- parser.add_argument("--reload", action="store_true", help="Enable auto-reload for development")
1104
+ parser.add_argument(
1105
+ "--reload", action="store_true", help="Enable auto-reload for development"
1106
+ )
1030
1107
 
1031
1108
  args = parser.parse_args()
1032
1109
 
@@ -1036,16 +1113,26 @@ def main():
1036
1113
  DB_PATH = args.db_path
1037
1114
  os.environ["DB_PATH"] = args.db_path
1038
1115
 
1039
- print(f"Starting MCP SQLite Memory Bank server in HTTP mode on {args.host}:{args.port}")
1116
+ print(
1117
+ f"Starting MCP SQLite Memory Bank server in HTTP mode on {args.host}:{args.port}"
1118
+ )
1040
1119
  print(f"Database path: {DB_PATH}")
1041
1120
  print("Available at: http://localhost:8000/docs")
1042
1121
 
1043
- uvicorn.run("mcp_sqlite_memory_bank.server:app", host=args.host, port=args.port, reload=args.reload)
1122
+ uvicorn.run(
1123
+ "mcp_sqlite_memory_bank.server:app",
1124
+ host=args.host,
1125
+ port=args.port,
1126
+ reload=args.reload,
1127
+ )
1044
1128
 
1045
1129
 
1046
1130
  if __name__ == "__main__":
1047
1131
  # Configure logging
1048
- logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
1132
+ logging.basicConfig(
1133
+ level=logging.INFO,
1134
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
1135
+ )
1049
1136
 
1050
1137
  # Log startup information
1051
1138
  logging.info(f"Starting SQLite Memory Bank with database at {DB_PATH}")
@@ -1085,3 +1172,88 @@ _get_content_health_score_impl = analytics.get_content_health_score
1085
1172
  _intelligent_discovery_impl = intelligent_discovery_impl
1086
1173
  _discovery_templates_impl = discovery_templates_impl
1087
1174
  _discover_relationships_impl = discover_relationships_impl
1175
+
1176
+
1177
+ @mcp.tool
1178
+ @catch_errors
1179
+ def batch_create_memories(
1180
+ table_name: str,
1181
+ data_list: List[Dict[str, Any]],
1182
+ match_columns: Optional[List[str]] = None,
1183
+ use_upsert: bool = True,
1184
+ ) -> ToolResponse:
1185
+ """
1186
+ 🚀 **BATCH MEMORY CREATION** - Efficiently add multiple memories at once!
1187
+
1188
+ Create multiple memory records in a single operation with optional duplicate prevention.
1189
+ Much faster than creating records one by one.
1190
+
1191
+ Args:
1192
+ table_name (str): Table to insert records into
1193
+ data_list (List[Dict[str, Any]]): List of memory records to create
1194
+ match_columns (Optional[List[str]]): Columns to use for duplicate detection (if use_upsert=True)
1195
+ use_upsert (bool): Whether to use upsert logic to prevent duplicates (default: True)
1196
+
1197
+ Returns:
1198
+ ToolResponse: On success: {"success": True, "created": int, "updated": int, "failed": int}
1199
+ On error: {"success": False, "error": str, "category": str, "details": dict}
1200
+
1201
+ Examples:
1202
+ >>> batch_create_memories('technical_decisions', [
1203
+ ... {'decision_name': 'API Design', 'chosen_approach': 'REST'},
1204
+ ... {'decision_name': 'Database Choice', 'chosen_approach': 'SQLite'},
1205
+ ... {'decision_name': 'Frontend Framework', 'chosen_approach': 'React'}
1206
+ ... ], match_columns=['decision_name'])
1207
+ {"success": True, "created": 2, "updated": 1, "failed": 0, "total_processed": 3}
1208
+
1209
+ FastMCP Tool Info:
1210
+ - **EFFICIENT**: Process multiple records in one operation
1211
+ - **SMART DEDUPLICATION**: Optional upsert logic prevents duplicates
1212
+ - **DETAILED FEEDBACK**: Returns counts for created, updated, and failed records
1213
+ - **PARTIAL SUCCESS**: Continues processing even if some records fail
1214
+ - **PERFECT FOR BULK IMPORTS**: Ideal for importing knowledge bases or datasets
1215
+ """
1216
+ return basic.batch_create_memories(table_name, data_list, match_columns, use_upsert)
1217
+
1218
+
1219
+ @mcp.tool
1220
+ @catch_errors
1221
+ def batch_delete_memories(
1222
+ table_name: str, where_conditions: List[Dict[str, Any]], match_all: bool = False
1223
+ ) -> ToolResponse:
1224
+ """
1225
+ 🗑️ **BATCH MEMORY DELETION** - Efficiently delete multiple memories at once!
1226
+
1227
+ Delete multiple memory records in a single operation with flexible matching conditions.
1228
+ Much faster than deleting records one by one.
1229
+
1230
+ Args:
1231
+ table_name (str): Table to delete records from
1232
+ where_conditions (List[Dict[str, Any]]): List of WHERE conditions for deletion
1233
+ match_all (bool): If True, delete records matching ALL conditions; if False, delete records matching ANY condition (default: False)
1234
+
1235
+ Returns:
1236
+ ToolResponse: On success: {"success": True, "deleted": int, "failed": int}
1237
+ On error: {"success": False, "error": str, "category": str, "details": dict}
1238
+
1239
+ Examples:
1240
+ >>> batch_delete_memories('technical_decisions', [
1241
+ ... {'decision_name': 'Old Decision 1'},
1242
+ ... {'decision_name': 'Old Decision 2'},
1243
+ ... {'id': 42}
1244
+ ... ])
1245
+ {"success": True, "deleted": 3, "failed": 0, "total_conditions": 3}
1246
+
1247
+ >>> batch_delete_memories('notes', [
1248
+ ... {'category': 'temp', 'created_date': '2024-01-01'}
1249
+ ... ], match_all=True)
1250
+ {"success": True, "deleted": 15, "failed": 0} # Deletes notes that are BOTH temp AND from that date
1251
+
1252
+ FastMCP Tool Info:
1253
+ - **EFFICIENT**: Process multiple deletions in one operation
1254
+ - **FLEXIBLE MATCHING**: Support both OR logic (any condition) and AND logic (all conditions)
1255
+ - **DETAILED FEEDBACK**: Returns counts and per-condition results
1256
+ - **PARTIAL SUCCESS**: Continues processing even if some deletions fail
1257
+ - **SAFE**: Uses parameterized queries to prevent SQL injection
1258
+ """
1259
+ return basic.batch_delete_memories(table_name, where_conditions, match_all)
@@ -3,7 +3,7 @@ Tools module for SQLite Memory Bank MCP server.
3
3
 
4
4
  This module organizes the various MCP tools into logical categories:
5
5
  - analytics: Content analysis and health assessment tools
6
- - search: Intelligent search and discovery tools
6
+ - search: Intelligent search and discovery tools
7
7
  - discovery: Advanced exploration and relationship discovery tools
8
8
  - basic: Core CRUD operations and table management
9
9
  """
@@ -45,35 +45,32 @@ from .basic import (
45
45
 
46
46
  __all__ = [
47
47
  # Analytics tools
48
- 'analyze_memory_patterns',
49
- 'get_content_health_score',
50
-
48
+ "analyze_memory_patterns",
49
+ "get_content_health_score",
51
50
  # Search tools
52
- 'search_content',
53
- 'explore_tables',
54
- 'add_embeddings',
55
- 'semantic_search',
56
- 'find_related',
57
- 'smart_search',
58
- 'embedding_stats',
59
- 'auto_semantic_search',
60
- 'auto_smart_search',
61
-
51
+ "search_content",
52
+ "explore_tables",
53
+ "add_embeddings",
54
+ "semantic_search",
55
+ "find_related",
56
+ "smart_search",
57
+ "embedding_stats",
58
+ "auto_semantic_search",
59
+ "auto_smart_search",
62
60
  # Discovery tools
63
- 'intelligent_discovery',
64
- 'discovery_templates',
65
- 'discover_relationships',
66
-
61
+ "intelligent_discovery",
62
+ "discovery_templates",
63
+ "discover_relationships",
67
64
  # Basic tools
68
- 'create_table',
69
- 'list_tables',
70
- 'describe_table',
71
- 'drop_table',
72
- 'rename_table',
73
- 'create_row',
74
- 'read_rows',
75
- 'update_rows',
76
- 'delete_rows',
77
- 'run_select_query',
78
- 'list_all_columns',
65
+ "create_table",
66
+ "list_tables",
67
+ "describe_table",
68
+ "drop_table",
69
+ "rename_table",
70
+ "create_row",
71
+ "read_rows",
72
+ "update_rows",
73
+ "delete_rows",
74
+ "run_select_query",
75
+ "list_all_columns",
79
76
  ]