mcp-sqlite-memory-bank 1.6.0__tar.gz → 1.6.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {mcp_sqlite_memory_bank-1.6.0/src/mcp_sqlite_memory_bank.egg-info → mcp_sqlite_memory_bank-1.6.2}/PKG-INFO +1 -1
  2. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/pyproject.toml +1 -1
  3. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/__init__.py +2 -2
  4. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/__main__.py +4 -12
  5. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/database.py +25 -73
  6. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/prompts.py +12 -20
  7. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/resources.py +13 -39
  8. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/semantic.py +10 -31
  9. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/server.py +27 -57
  10. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/tools/analytics.py +18 -55
  11. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/tools/basic.py +23 -20
  12. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/tools/discovery.py +34 -97
  13. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/tools/search.py +6 -17
  14. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/utils.py +4 -13
  15. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2/src/mcp_sqlite_memory_bank.egg-info}/PKG-INFO +1 -1
  16. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/tests/conftest.py +91 -81
  17. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/tests/test_api.py +1103 -984
  18. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/tests/test_discovery_tools.py +296 -262
  19. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/tests/test_edge_cases.py +105 -93
  20. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/tests/test_mocks.py +43 -69
  21. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/tests/test_performance.py +95 -75
  22. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/tests/test_server.py +185 -179
  23. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/LICENSE +0 -0
  24. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/MANIFEST.in +0 -0
  25. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/README.md +0 -0
  26. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/setup.cfg +0 -0
  27. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/py.typed +0 -0
  28. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/tools/__init__.py +0 -0
  29. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank/types.py +0 -0
  30. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank.egg-info/SOURCES.txt +0 -0
  31. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank.egg-info/dependency_links.txt +0 -0
  32. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank.egg-info/entry_points.txt +0 -0
  33. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank.egg-info/requires.txt +0 -0
  34. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/src/mcp_sqlite_memory_bank.egg-info/top_level.txt +0 -0
  35. {mcp_sqlite_memory_bank-1.6.0 → mcp_sqlite_memory_bank-1.6.2}/tests/README.md +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp_sqlite_memory_bank
3
- Version: 1.6.0
3
+ Version: 1.6.2
4
4
  Summary: A dynamic, agent/LLM-friendly SQLite memory bank for MCP servers with semantic search capabilities.
5
5
  Author-email: Robert Meisner <robert@catchit.pl>
6
6
  License-Expression: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "mcp_sqlite_memory_bank"
7
- version = "1.6.0"
7
+ version = "1.6.2"
8
8
  description = "A dynamic, agent/LLM-friendly SQLite memory bank for MCP servers with semantic search capabilities."
9
9
  authors = [
10
10
  { name="Robert Meisner", email="robert@catchit.pl" }
@@ -8,7 +8,7 @@ Cursor, and other LLM-powered tools to interact with structured data in a
8
8
  safe, explicit, and extensible way.
9
9
 
10
10
  Author: Robert Meisner
11
- Version: 1.6.0
11
+ Version: 1.6.2
12
12
  License: MIT
13
13
  """
14
14
 
@@ -73,7 +73,7 @@ from .types import (
73
73
  )
74
74
 
75
75
  # Package metadata
76
- __version__ = "0.1.0"
76
+ __version__ = "1.6.2"
77
77
  __author__ = "Robert Meisner"
78
78
  __all__ = [
79
79
  # Core tools
@@ -11,9 +11,7 @@ import sys
11
11
  import os
12
12
 
13
13
  # Add the project root to Python path to avoid import issues
14
- project_root = os.path.dirname(
15
- os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
16
- )
14
+ project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
17
15
  if project_root not in sys.path:
18
16
  sys.path.insert(0, project_root)
19
17
 
@@ -34,12 +32,8 @@ def main() -> None:
34
32
  print("SQLite Memory Bank MCP Server")
35
33
  print("Usage: python -m src.mcp_sqlite_memory_bank")
36
34
  print("")
37
- print(
38
- "This starts the SQLite Memory Bank as an MCP (Model Context Protocol) server."
39
- )
40
- print(
41
- "The server communicates via STDIO and provides memory management tools"
42
- )
35
+ print("This starts the SQLite Memory Bank as an MCP (Model Context Protocol) server.")
36
+ print("The server communicates via STDIO and provides memory management tools")
43
37
  print("for LLMs and AI agents.")
44
38
  print("")
45
39
  print(f"Database location: {DB_PATH}")
@@ -49,9 +43,7 @@ def main() -> None:
49
43
  return
50
44
 
51
45
  # Log startup information
52
- logging.info(
53
- f"Starting SQLite Memory Bank MCP server with database at {DB_PATH}"
54
- )
46
+ logging.info(f"Starting SQLite Memory Bank MCP server with database at {DB_PATH}")
55
47
 
56
48
  # Run the FastMCP app in stdio mode for MCP clients
57
49
  app.run(transport="stdio")
@@ -116,8 +116,7 @@ class SQLiteMemoryDatabase:
116
116
  for col_name in column_names:
117
117
  if col_name not in valid_columns:
118
118
  raise ValidationError(
119
- f"Invalid column '{col_name}' for table "
120
- f"'{table.name}' in {context}"
119
+ f"Invalid column '{col_name}' for table " f"'{table.name}' in {context}"
121
120
  )
122
121
 
123
122
  def _build_where_conditions(self, table: Table, where: Dict[str, Any]) -> List:
@@ -152,9 +151,7 @@ class SQLiteMemoryDatabase:
152
151
 
153
152
  return decorator
154
153
 
155
- def create_table(
156
- self, table_name: str, columns: List[Dict[str, str]]
157
- ) -> ToolResponse:
154
+ def create_table(self, table_name: str, columns: List[Dict[str, str]]) -> ToolResponse:
158
155
  """Create a new table with the specified columns."""
159
156
  # Input validation
160
157
  if not table_name or not table_name.isidentifier():
@@ -190,9 +187,7 @@ class SQLiteMemoryDatabase:
190
187
  with self.get_connection() as conn:
191
188
  inspector = inspect(conn)
192
189
  tables = [
193
- name
194
- for name in inspector.get_table_names()
195
- if not name.startswith("sqlite_")
190
+ name for name in inspector.get_table_names() if not name.startswith("sqlite_")
196
191
  ]
197
192
  return {"success": True, "tables": tables}
198
193
  except SQLAlchemyError as e:
@@ -252,9 +247,7 @@ class SQLiteMemoryDatabase:
252
247
  except (ValidationError, SQLAlchemyError) as e:
253
248
  if isinstance(e, ValidationError):
254
249
  raise e
255
- raise DatabaseError(
256
- f"Failed to rename table from {old_name} to {new_name}: {str(e)}"
257
- )
250
+ raise DatabaseError(f"Failed to rename table from {old_name} to {new_name}: {str(e)}")
258
251
 
259
252
  def insert_row(self, table_name: str, data: Dict[str, Any]) -> ToolResponse:
260
253
  """Insert a row into a table."""
@@ -330,9 +323,7 @@ class SQLiteMemoryDatabase:
330
323
  raise e
331
324
  raise DatabaseError(f"Failed to update table {table_name}: {str(e)}")
332
325
 
333
- def delete_rows(
334
- self, table_name: str, where: Optional[Dict[str, Any]] = None
335
- ) -> ToolResponse:
326
+ def delete_rows(self, table_name: str, where: Optional[Dict[str, Any]] = None) -> ToolResponse:
336
327
  """Delete rows from a table."""
337
328
  try:
338
329
  table = self._ensure_table_exists(table_name)
@@ -343,9 +334,7 @@ class SQLiteMemoryDatabase:
343
334
  if conditions:
344
335
  stmt = stmt.where(and_(*conditions))
345
336
  else:
346
- logging.warning(
347
- f"delete_rows called without WHERE clause on table {table_name}"
348
- )
337
+ logging.warning(f"delete_rows called without WHERE clause on table {table_name}")
349
338
 
350
339
  result = self._execute_with_commit(stmt)
351
340
  return {"success": True, "rows_affected": result.rowcount}
@@ -428,8 +417,7 @@ class SQLiteMemoryDatabase:
428
417
  text_columns = [
429
418
  col
430
419
  for col in table.columns
431
- if "TEXT" in str(col.type).upper()
432
- or "VARCHAR" in str(col.type).upper()
420
+ if "TEXT" in str(col.type).upper() or "VARCHAR" in str(col.type).upper()
433
421
  ]
434
422
 
435
423
  if not text_columns:
@@ -497,10 +485,7 @@ class SQLiteMemoryDatabase:
497
485
 
498
486
  # Combined relevance score
499
487
  col_relevance = (
500
- exact_score
501
- + term_score
502
- + position_bonus
503
- + column_bonus
488
+ exact_score + term_score + position_bonus + column_bonus
504
489
  )
505
490
  relevance_scores.append(col_relevance)
506
491
 
@@ -510,9 +495,7 @@ class SQLiteMemoryDatabase:
510
495
  len(row_dict[col.name]),
511
496
  first_occurrence + len(query) + 50,
512
497
  )
513
- snippet = str(row_dict[col.name])[
514
- snippet_start:snippet_end
515
- ]
498
+ snippet = str(row_dict[col.name])[snippet_start:snippet_end]
516
499
  if snippet_start > 0:
517
500
  snippet = "..." + snippet
518
501
  if snippet_end < len(str(row_dict[col.name])):
@@ -532,9 +515,7 @@ class SQLiteMemoryDatabase:
532
515
  "match_quality": (
533
516
  "high"
534
517
  if total_relevance > 0.5
535
- else (
536
- "medium" if total_relevance > 0.1 else "low"
537
- )
518
+ else ("medium" if total_relevance > 0.1 else "low")
538
519
  ),
539
520
  "match_count": len(relevance_scores),
540
521
  }
@@ -571,9 +552,7 @@ class SQLiteMemoryDatabase:
571
552
  table_names = list(self.metadata.tables.keys())
572
553
 
573
554
  if pattern:
574
- table_names = [
575
- name for name in table_names if pattern.replace("%", "") in name
576
- ]
555
+ table_names = [name for name in table_names if pattern.replace("%", "") in name]
577
556
 
578
557
  exploration: Dict[str, Any] = {
579
558
  "tables": [],
@@ -599,10 +578,7 @@ class SQLiteMemoryDatabase:
599
578
  }
600
579
  columns.append(col_data)
601
580
 
602
- if (
603
- "TEXT" in str(col.type).upper()
604
- or "VARCHAR" in str(col.type).upper()
605
- ):
581
+ if "TEXT" in str(col.type).upper() or "VARCHAR" in str(col.type).upper():
606
582
  text_columns.append(col.name)
607
583
 
608
584
  table_info: Dict[str, Any] = {
@@ -613,27 +589,21 @@ class SQLiteMemoryDatabase:
613
589
 
614
590
  # Add row count if requested
615
591
  if include_row_counts:
616
- count_result = conn.execute(
617
- select(text("COUNT(*)")).select_from(table)
618
- )
592
+ count_result = conn.execute(select(text("COUNT(*)")).select_from(table))
619
593
  row_count = count_result.scalar()
620
594
  table_info["row_count"] = row_count
621
595
  exploration["total_rows"] += row_count
622
596
 
623
597
  # Add sample data
624
598
  sample_result = conn.execute(select(table).limit(3))
625
- sample_rows = [
626
- dict(row._mapping) for row in sample_result.fetchall()
627
- ]
599
+ sample_rows = [dict(row._mapping) for row in sample_result.fetchall()]
628
600
  if sample_rows:
629
601
  table_info["sample_data"] = sample_rows
630
602
 
631
603
  # Add content preview for text columns
632
604
  if text_columns:
633
605
  content_preview: Dict[str, List[Any]] = {}
634
- for col_name in text_columns[
635
- :3
636
- ]: # Limit to first 3 text columns
606
+ for col_name in text_columns[:3]: # Limit to first 3 text columns
637
607
  col = table.c[col_name]
638
608
  preview_result = conn.execute(
639
609
  select(col).distinct().where(col.isnot(None)).limit(5)
@@ -671,9 +641,7 @@ class SQLiteMemoryDatabase:
671
641
 
672
642
  # Add embedding column as TEXT (JSON storage)
673
643
  with self.get_connection() as conn:
674
- conn.execute(
675
- text(f"ALTER TABLE {table_name} ADD COLUMN {embedding_column} TEXT")
676
- )
644
+ conn.execute(text(f"ALTER TABLE {table_name} ADD COLUMN {embedding_column} TEXT"))
677
645
  conn.commit()
678
646
 
679
647
  self._refresh_metadata()
@@ -709,9 +677,7 @@ class SQLiteMemoryDatabase:
709
677
  table_columns = [col.name for col in table.columns]
710
678
  for col in text_columns:
711
679
  if col not in table_columns:
712
- raise ValidationError(
713
- f"Column '{col}' not found in table '{table_name}'"
714
- )
680
+ raise ValidationError(f"Column '{col}' not found in table '{table_name}'")
715
681
 
716
682
  # Add embedding column if it doesn't exist
717
683
  if embedding_column not in table_columns:
@@ -757,9 +723,7 @@ class SQLiteMemoryDatabase:
757
723
  combined_text = " ".join(text_parts)
758
724
 
759
725
  # Generate embedding
760
- embedding = semantic_engine.generate_embedding(
761
- combined_text
762
- )
726
+ embedding = semantic_engine.generate_embedding(combined_text)
763
727
  embedding_json = json.dumps(embedding)
764
728
 
765
729
  # Update row with embedding
@@ -782,8 +746,7 @@ class SQLiteMemoryDatabase:
782
746
  "message": f"Generated embeddings for {processed} rows",
783
747
  "processed": processed,
784
748
  "model": model_name,
785
- "embedding_dimension": semantic_engine.get_embedding_dimensions()
786
- or 0,
749
+ "embedding_dimension": semantic_engine.get_embedding_dimensions() or 0,
787
750
  }
788
751
 
789
752
  except (ValidationError, SQLAlchemyError) as e:
@@ -852,8 +815,7 @@ class SQLiteMemoryDatabase:
852
815
  text_cols = [
853
816
  col.name
854
817
  for col in table.columns
855
- if "TEXT" in str(col.type).upper()
856
- or "VARCHAR" in str(col.type).upper()
818
+ if "TEXT" in str(col.type).upper() or "VARCHAR" in str(col.type).upper()
857
819
  ]
858
820
  else:
859
821
  text_cols = text_columns
@@ -923,9 +885,7 @@ class SQLiteMemoryDatabase:
923
885
  target_row = conn.execute(target_stmt).fetchone()
924
886
 
925
887
  if not target_row:
926
- raise ValidationError(
927
- f"Row with id {row_id} not found in table '{table_name}'"
928
- )
888
+ raise ValidationError(f"Row with id {row_id} not found in table '{table_name}'")
929
889
 
930
890
  target_dict = dict(target_row._mapping)
931
891
 
@@ -1099,9 +1059,7 @@ class SQLiteMemoryDatabase:
1099
1059
  if not hasattr(semantic_engine, "hybrid_search") or not callable(
1100
1060
  getattr(semantic_engine, "hybrid_search")
1101
1061
  ):
1102
- raise DatabaseError(
1103
- "Semantic engine hybrid_search method is not callable"
1104
- )
1062
+ raise DatabaseError("Semantic engine hybrid_search method is not callable")
1105
1063
 
1106
1064
  enhanced_results = semantic_engine.hybrid_search(
1107
1065
  query,
@@ -1146,10 +1104,7 @@ class SQLiteMemoryDatabase:
1146
1104
  total_count = 0
1147
1105
  with self.get_connection() as conn:
1148
1106
  total_count = (
1149
- conn.execute(
1150
- select(text("COUNT(*)")).select_from(table)
1151
- ).scalar()
1152
- or 0
1107
+ conn.execute(select(text("COUNT(*)")).select_from(table)).scalar() or 0
1153
1108
  )
1154
1109
 
1155
1110
  return {
@@ -1165,8 +1120,7 @@ class SQLiteMemoryDatabase:
1165
1120
  with self.get_connection() as conn:
1166
1121
  # Count total rows
1167
1122
  total_count = (
1168
- conn.execute(select(text("COUNT(*)")).select_from(table)).scalar()
1169
- or 0
1123
+ conn.execute(select(text("COUNT(*)")).select_from(table)).scalar() or 0
1170
1124
  )
1171
1125
 
1172
1126
  # Count rows with embeddings
@@ -1207,9 +1161,7 @@ class SQLiteMemoryDatabase:
1207
1161
  except json.JSONDecodeError:
1208
1162
  pass
1209
1163
 
1210
- coverage_percent = (
1211
- (embedded_count / total_count * 100) if total_count > 0 else 0.0
1212
- )
1164
+ coverage_percent = (embedded_count / total_count * 100) if total_count > 0 else 0.0
1213
1165
 
1214
1166
  return {
1215
1167
  "success": True,
@@ -37,7 +37,9 @@ class MemoryBankPrompts:
37
37
  # Analyze specific table
38
38
  result = cast(Dict[str, Any], db.read_rows(table_name, {}))
39
39
  if not result.get("success"):
40
- return f"Error: Could not access table '{table_name}'. Please check if it exists."
40
+ return (
41
+ f"Error: Could not access table '{table_name}'. Please check if it exists."
42
+ )
41
43
 
42
44
  rows = result.get("rows", [])
43
45
  prompt = f"""Please analyze the content in the '{table_name}' table from the memory bank.
@@ -91,18 +93,16 @@ Focus on high-level strategic insights about the memory bank's utility and organ
91
93
  return prompt
92
94
 
93
95
  @self.mcp.prompt("search-and-summarize")
94
- async def search_and_summarize(
95
- query: str, max_results: Optional[int] = 10
96
- ) -> str:
96
+ async def search_and_summarize(query: str, max_results: Optional[int] = 10) -> str:
97
97
  """Search memory content and create a summary prompt."""
98
98
  db = get_database(self.db_path)
99
99
 
100
100
  # Perform search
101
- result = cast(
102
- Dict[str, Any], db.search_content(query, None, max_results or 10)
103
- )
101
+ result = cast(Dict[str, Any], db.search_content(query, None, max_results or 10))
104
102
  if not result.get("success"):
105
- return f"Error: Could not search for '{query}'. {result.get('error', 'Unknown error')}"
103
+ return (
104
+ f"Error: Could not search for '{query}'. {result.get('error', 'Unknown error')}"
105
+ )
106
106
 
107
107
  search_results = result.get("results", [])
108
108
  if not search_results:
@@ -111,15 +111,11 @@ Focus on high-level strategic insights about the memory bank's utility and organ
111
111
  # Format results for prompt
112
112
  formatted_results = []
113
113
  for i, result in enumerate(search_results[: max_results or 10], 1):
114
- formatted_results.append(
115
- f"{i}. Table: {result.get('table', 'unknown')}"
116
- )
114
+ formatted_results.append(f"{i}. Table: {result.get('table', 'unknown')}")
117
115
  formatted_results.append(
118
116
  f" Content: {result.get('content', 'No content')[:200]}..."
119
117
  )
120
- formatted_results.append(
121
- f" Relevance: {result.get('relevance', 'N/A')}"
122
- )
118
+ formatted_results.append(f" Relevance: {result.get('relevance', 'N/A')}")
123
119
  formatted_results.append("")
124
120
 
125
121
  prompt = f"""Based on the search query "{query}", here are the most relevant results from the memory bank:
@@ -189,12 +185,8 @@ The table should include fields like: decision_name, chosen_approach, rationale,
189
185
  f" Rationale: {decision.get('rationale', 'Not provided')}"
190
186
  )
191
187
  if decision.get("alternatives"):
192
- formatted_decisions.append(
193
- f" Alternatives: {decision.get('alternatives')}"
194
- )
195
- formatted_decisions.append(
196
- f" Date: {decision.get('timestamp', 'Unknown')}"
197
- )
188
+ formatted_decisions.append(f" Alternatives: {decision.get('alternatives')}")
189
+ formatted_decisions.append(f" Date: {decision.get('timestamp', 'Unknown')}")
198
190
  formatted_decisions.append("")
199
191
 
200
192
  prompt = f"""Please analyze these technical decisions from the memory bank:
@@ -36,9 +36,7 @@ class MemoryBankResources:
36
36
  result = cast(Dict[str, Any], db.list_tables())
37
37
 
38
38
  if not result.get("success"):
39
- return json.dumps(
40
- {"error": "Failed to fetch tables", "details": result}
41
- )
39
+ return json.dumps({"error": "Failed to fetch tables", "details": result})
42
40
 
43
41
  resource_content = {
44
42
  "resource_type": "table_list",
@@ -110,9 +108,7 @@ class MemoryBankResources:
110
108
  ) # Search all tables, limit to 50 results
111
109
 
112
110
  if not result.get("success"):
113
- return json.dumps(
114
- {"error": f"Failed to search for '{query}'", "details": result}
115
- )
111
+ return json.dumps({"error": f"Failed to search for '{query}'", "details": result})
116
112
 
117
113
  search_results = result.get("results", [])
118
114
  resource_content = {
@@ -167,11 +163,7 @@ class MemoryBankResources:
167
163
  max_rows = 0
168
164
  for table_name, stats in table_stats.items():
169
165
  row_count_obj = stats.get("row_count", 0)
170
- row_count = (
171
- int(row_count_obj)
172
- if isinstance(row_count_obj, (int, str))
173
- else 0
174
- )
166
+ row_count = int(row_count_obj) if isinstance(row_count_obj, (int, str)) else 0
175
167
  if row_count > max_rows:
176
168
  max_rows = row_count
177
169
  largest_table = table_name
@@ -198,9 +190,7 @@ class MemoryBankResources:
198
190
  # Get tables with timestamp columns for activity tracking
199
191
  tables_result = cast(Dict[str, Any], db.list_tables())
200
192
  if not tables_result.get("success"):
201
- return json.dumps(
202
- {"error": "Failed to get tables", "details": tables_result}
203
- )
193
+ return json.dumps({"error": "Failed to get tables", "details": tables_result})
204
194
 
205
195
  recent_activity = []
206
196
  tables = tables_result.get("tables", [])
@@ -214,16 +204,12 @@ class MemoryBankResources:
214
204
 
215
205
  columns = schema_result.get("columns", [])
216
206
  timestamp_cols = [
217
- col
218
- for col in columns
219
- if "timestamp" in col.get("name", "").lower()
207
+ col for col in columns if "timestamp" in col.get("name", "").lower()
220
208
  ]
221
209
 
222
210
  if timestamp_cols:
223
211
  # Get recent entries (last 10)
224
- recent_result = cast(
225
- Dict[str, Any], db.read_rows(table_name, None, 10)
226
- )
212
+ recent_result = cast(Dict[str, Any], db.read_rows(table_name, None, 10))
227
213
  if recent_result.get("success"):
228
214
  rows = recent_result.get("rows", [])
229
215
  for row in rows:
@@ -240,7 +226,7 @@ class MemoryBankResources:
240
226
  }
241
227
  recent_activity.append(activity_entry)
242
228
 
243
- except Exception as e:
229
+ except Exception:
244
230
  continue
245
231
 
246
232
  # Sort by timestamp (most recent first)
@@ -308,9 +294,7 @@ class MemoryBankResources:
308
294
  embedding_stats.get("success")
309
295
  and embedding_stats.get("coverage_percent", 0) == 0
310
296
  ):
311
- schema_result = cast(
312
- Dict[str, Any], db.describe_table(table_name)
313
- )
297
+ schema_result = cast(Dict[str, Any], db.describe_table(table_name))
314
298
  if schema_result.get("success"):
315
299
  text_cols = [
316
300
  col
@@ -354,7 +338,7 @@ class MemoryBankResources:
354
338
  }
355
339
  )
356
340
 
357
- except Exception as e:
341
+ except Exception:
358
342
  continue
359
343
 
360
344
  # Prioritize suggestions
@@ -407,9 +391,7 @@ class MemoryBankResources:
407
391
  try:
408
392
  tables_result = cast(Dict[str, Any], db.list_tables())
409
393
  if not tables_result.get("success"):
410
- return json.dumps(
411
- {"error": "Failed to get insights", "details": tables_result}
412
- )
394
+ return json.dumps({"error": "Failed to get insights", "details": tables_result})
413
395
 
414
396
  tables = tables_result.get("tables", [])
415
397
  total_rows = 0
@@ -433,13 +415,9 @@ class MemoryBankResources:
433
415
  total_content_length += len(value)
434
416
 
435
417
  avg_content_length = (
436
- total_content_length / sample_size
437
- if sample_size > 0
438
- else 0
418
+ total_content_length / sample_size if sample_size > 0 else 0
439
419
  )
440
- quality_score = min(
441
- 10, avg_content_length / 50
442
- ) # Normalize to 0-10
420
+ quality_score = min(10, avg_content_length / 50) # Normalize to 0-10
443
421
  content_quality_scores.append(quality_score)
444
422
 
445
423
  insights["usage_patterns"][table_name] = {
@@ -449,11 +427,7 @@ class MemoryBankResources:
449
427
  "category": (
450
428
  "high_value"
451
429
  if quality_score > 7
452
- else (
453
- "medium_value"
454
- if quality_score > 3
455
- else "low_value"
456
- )
430
+ else ("medium_value" if quality_score > 3 else "low_value")
457
431
  ),
458
432
  }
459
433
 
@@ -69,9 +69,7 @@ class SemanticSearchEngine:
69
69
  self._model = SentenceTransformer(self.model_name)
70
70
  logging.info(f"Loaded semantic search model: {self.model_name}")
71
71
  except Exception as e:
72
- raise DatabaseError(
73
- f"Failed to load semantic search model {self.model_name}: {e}"
74
- )
72
+ raise DatabaseError(f"Failed to load semantic search model {self.model_name}: {e}")
75
73
  return self._model
76
74
 
77
75
  def get_embedding_dimensions(self) -> Optional[int]:
@@ -136,9 +134,7 @@ class SemanticSearchEngine:
136
134
  except Exception as e:
137
135
  raise DatabaseError(f"Failed to generate batch embeddings: {e}")
138
136
 
139
- def calculate_similarity(
140
- self, embedding1: List[float], embedding2: List[float]
141
- ) -> float:
137
+ def calculate_similarity(self, embedding1: List[float], embedding2: List[float]) -> float:
142
138
  """
143
139
  Calculate cosine similarity between two embeddings.
144
140
 
@@ -226,9 +222,7 @@ class SemanticSearchEngine:
226
222
  results.sort(key=lambda x: x[1], reverse=True)
227
223
  return results[:top_k]
228
224
  except Exception as e:
229
- logging.warning(
230
- f"Torch similarity search failed, using numpy fallback: {e}"
231
- )
225
+ logging.warning(f"Torch similarity search failed, using numpy fallback: {e}")
232
226
 
233
227
  # Fallback to numpy implementation
234
228
  results = []
@@ -317,11 +311,7 @@ class SemanticSearchEngine:
317
311
  if content_columns:
318
312
  matched_content = []
319
313
  for col in content_columns:
320
- if (
321
- col in row
322
- and row[col]
323
- and query.lower() in str(row[col]).lower()
324
- ):
314
+ if col in row and row[col] and query.lower() in str(row[col]).lower():
325
315
  matched_content.append(f"{col}: {row[col]}")
326
316
  if matched_content:
327
317
  row["matched_content"] = matched_content
@@ -367,14 +357,9 @@ class SemanticSearchEngine:
367
357
  semantic_weight /= total_weight
368
358
  text_weight /= total_weight
369
359
 
370
- # Get semantic search results
371
- semantic_results = self.semantic_search(
372
- query,
373
- content_data,
374
- embedding_column,
375
- similarity_threshold=0.3,
376
- top_k=top_k * 2, # Get more for reranking
377
- )
360
+ # Use the provided content_data as semantic results (already filtered by database)
361
+ # The content_data passed here should already be the semantic search results
362
+ semantic_results = content_data.copy() if content_data else []
378
363
 
379
364
  # Add text matching scores
380
365
  query_lower = query.lower()
@@ -387,15 +372,11 @@ class SemanticSearchEngine:
387
372
  content = str(result[col]).lower()
388
373
  if query_lower in content:
389
374
  # Simple frequency-based text scoring
390
- text_score += content.count(query_lower) / len(
391
- content.split()
392
- )
375
+ text_score += content.count(query_lower) / len(content.split())
393
376
 
394
377
  # Combine scores
395
378
  semantic_score = result.get("similarity_score", 0.0)
396
- combined_score = (semantic_score * semantic_weight) + (
397
- text_score * text_weight
398
- )
379
+ combined_score = (semantic_score * semantic_weight) + (text_score * text_weight)
399
380
  result["combined_score"] = round(combined_score, 3)
400
381
  result["text_score"] = round(text_score, 3)
401
382
 
@@ -421,9 +402,7 @@ def get_semantic_engine(model_name: str = "all-MiniLM-L6-v2") -> SemanticSearchE
421
402
  try:
422
403
  if _semantic_engine is None or _semantic_engine.model_name != model_name:
423
404
  if not SENTENCE_TRANSFORMERS_AVAILABLE:
424
- raise ValueError(
425
- "Sentence transformers not available for semantic search"
426
- )
405
+ raise ValueError("Sentence transformers not available for semantic search")
427
406
  _semantic_engine = SemanticSearchEngine(model_name)
428
407
 
429
408
  # Verify the engine is properly initialized