mcp-sqlite-memory-bank 1.3.1__py3-none-any.whl → 1.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -429,7 +429,12 @@ class SQLiteMemoryDatabase:
429
429
  )
430
430
 
431
431
  # Sort by relevance and limit results
432
- results.sort(key=lambda x: x["relevance"], reverse=True)
432
+ def get_relevance(x: Dict[str, Any]) -> float:
433
+ rel = x.get("relevance", 0)
434
+ if isinstance(rel, (int, float)):
435
+ return float(rel)
436
+ return 0.0
437
+ results.sort(key=get_relevance, reverse=True)
433
438
  results = results[:limit]
434
439
 
435
440
  return {
@@ -453,7 +458,7 @@ class SQLiteMemoryDatabase:
453
458
  if pattern:
454
459
  table_names = [name for name in table_names if pattern.replace("%", "") in name]
455
460
 
456
- exploration = {"tables": [], "total_tables": len(table_names), "total_rows": 0}
461
+ exploration: Dict[str, Any] = {"tables": [], "total_tables": len(table_names), "total_rows": 0}
457
462
 
458
463
  with self.get_connection() as conn:
459
464
  for table_name in table_names:
@@ -461,7 +466,7 @@ class SQLiteMemoryDatabase:
461
466
 
462
467
  # Build column info and identify text columns
463
468
  columns = []
464
- text_columns = []
469
+ text_columns: List[str] = []
465
470
 
466
471
  for col in table.columns:
467
472
  col_data = {
@@ -476,7 +481,7 @@ class SQLiteMemoryDatabase:
476
481
  if "TEXT" in str(col.type).upper() or "VARCHAR" in str(col.type).upper():
477
482
  text_columns.append(col.name)
478
483
 
479
- table_info = {"name": table_name, "columns": columns, "text_columns": text_columns}
484
+ table_info: Dict[str, Any] = {"name": table_name, "columns": columns, "text_columns": text_columns}
480
485
 
481
486
  # Add row count if requested
482
487
  if include_row_counts:
@@ -493,11 +498,11 @@ class SQLiteMemoryDatabase:
493
498
 
494
499
  # Add content preview for text columns
495
500
  if text_columns:
496
- content_preview = {}
501
+ content_preview: Dict[str, List[Any]] = {}
497
502
  for col_name in text_columns[:3]: # Limit to first 3 text columns
498
503
  col = table.c[col_name]
499
504
  preview_result = conn.execute(select(col).distinct().where(col.isnot(None)).limit(5))
500
- unique_values = [row[0] for row in preview_result.fetchall() if row[0]]
505
+ unique_values: List[Any] = [row[0] for row in preview_result.fetchall() if row[0]]
501
506
  if unique_values:
502
507
  content_preview[col_name] = unique_values
503
508
 
@@ -1001,6 +1006,9 @@ def get_database(db_path: Optional[str] = None) -> SQLiteMemoryDatabase:
1001
1006
  global _db_instance
1002
1007
 
1003
1008
  actual_path = db_path or os.environ.get("DB_PATH", "./test.db")
1009
+ if actual_path is None:
1010
+ actual_path = "./test.db"
1011
+
1004
1012
  if _db_instance is None or (db_path and db_path != _db_instance.db_path):
1005
1013
  # Close previous instance if it exists
1006
1014
  if _db_instance is not None:
@@ -0,0 +1,256 @@
1
+ """
2
+ MCP Prompts Support for SQLite Memory Bank
3
+ =========================================
4
+
5
+ This module adds MCP Prompts support, providing templated prompts and workflows
6
+ that leverage the memory bank content for enhanced AI interactions.
7
+
8
+ Prompts provide reusable, contextual templates that can dynamically incorporate
9
+ stored memory content into LLM conversations.
10
+
11
+ Author: Robert Meisner
12
+ """
13
+
14
+ from typing import Optional, Dict, List, Any, cast
15
+ from fastmcp import FastMCP
16
+ from .database import get_database
17
+ import json
18
+
19
+
20
+ class MemoryBankPrompts:
21
+ """Manages MCP Prompts for the SQLite Memory Bank."""
22
+
23
+ def __init__(self, mcp_app: FastMCP, db_path: str):
24
+ self.mcp = mcp_app
25
+ self.db_path = db_path
26
+ self._register_prompts()
27
+
28
+ def _register_prompts(self):
29
+ """Register MCP prompts with the FastMCP app."""
30
+
31
+ @self.mcp.prompt("analyze-memory-content")
32
+ async def analyze_memory_content(table_name: Optional[str] = None) -> str:
33
+ """Analyze memory bank content and provide insights."""
34
+ db = get_database(self.db_path)
35
+
36
+ if table_name:
37
+ # Analyze specific table
38
+ result = cast(Dict[str, Any], db.read_rows(table_name, {}))
39
+ if not result.get("success"):
40
+ return f"Error: Could not access table '{table_name}'. Please check if it exists."
41
+
42
+ rows = result.get("rows", [])
43
+ prompt = f"""Please analyze the content in the '{table_name}' table from the memory bank.
44
+
45
+ Table: {table_name}
46
+ Row count: {len(rows)}
47
+ Sample data: {json.dumps(rows[:3], indent=2) if rows else "No data"}
48
+
49
+ Please provide:
50
+ 1. A summary of the content patterns
51
+ 2. Key insights or themes
52
+ 3. Suggestions for better organization
53
+ 4. Potential use cases for this data
54
+
55
+ Focus on actionable insights that could help improve how this information is stored and retrieved."""
56
+ else:
57
+ # Analyze all tables
58
+ tables_result = cast(Dict[str, Any], db.list_tables())
59
+ if not tables_result.get("success"):
60
+ return "Error: Could not access memory bank tables."
61
+
62
+ tables = tables_result.get("tables", [])
63
+ overview = {"tables": len(tables), "total_content": []}
64
+
65
+ for table in tables[:5]: # Limit to first 5 tables
66
+ rows_result = cast(Dict[str, Any], db.read_rows(table, {}))
67
+ if rows_result.get("success"):
68
+ rows = rows_result.get("rows", [])
69
+ total_content = cast(List[Any], overview["total_content"])
70
+ total_content.append({
71
+ "table": table,
72
+ "rows": len(rows),
73
+ "sample": rows[:2] if rows else []
74
+ })
75
+
76
+ prompt = f"""Please analyze the overall content in this memory bank.
77
+
78
+ Memory Bank Overview:
79
+ {json.dumps(overview, indent=2)}
80
+
81
+ Please provide:
82
+ 1. Assessment of content organization
83
+ 2. Identification of content patterns across tables
84
+ 3. Recommendations for improving memory structure
85
+ 4. Suggestions for leveraging this content more effectively
86
+
87
+ Focus on high-level strategic insights about the memory bank's utility and organization."""
88
+
89
+ return prompt
90
+
91
+ @self.mcp.prompt("search-and-summarize")
92
+ async def search_and_summarize(query: str, max_results: Optional[int] = 10) -> str:
93
+ """Search memory content and create a summary prompt."""
94
+ db = get_database(self.db_path)
95
+
96
+ # Perform search
97
+ result = cast(Dict[str, Any], db.search_content(query, None, max_results or 10))
98
+ if not result.get("success"):
99
+ return f"Error: Could not search for '{query}'. {result.get('error', 'Unknown error')}"
100
+
101
+ search_results = result.get("results", [])
102
+ if not search_results:
103
+ return f"No results found for query: '{query}'. Please try different search terms or check if relevant content exists in the memory bank."
104
+
105
+ # Format results for prompt
106
+ formatted_results = []
107
+ for i, result in enumerate(search_results[:max_results or 10], 1):
108
+ formatted_results.append(f"{i}. Table: {result.get('table', 'unknown')}")
109
+ formatted_results.append(f" Content: {result.get('content', 'No content')[:200]}...")
110
+ formatted_results.append(f" Relevance: {result.get('relevance', 'N/A')}")
111
+ formatted_results.append("")
112
+
113
+ prompt = f"""Based on the search query "{query}", here are the most relevant results from the memory bank:
114
+
115
+ Search Results:
116
+ {chr(10).join(formatted_results)}
117
+
118
+ Please provide:
119
+ 1. A comprehensive summary of the key information found
120
+ 2. Common themes or patterns across the results
121
+ 3. Any gaps or additional information that might be needed
122
+ 4. Actionable insights based on this content
123
+
124
+ Use this information to provide a thorough, well-organized response that synthesizes the search results."""
125
+
126
+ return prompt
127
+
128
+ @self.mcp.prompt("technical-decision-analysis")
129
+ async def technical_decision_analysis(decision_topic: Optional[str] = None) -> str:
130
+ """Analyze technical decisions from the memory bank."""
131
+ db = get_database(self.db_path)
132
+
133
+ # Try to find technical_decisions table
134
+ tables_result = cast(Dict[str, Any], db.list_tables())
135
+ if not tables_result.get("success"):
136
+ return "Error: Could not access memory bank."
137
+
138
+ tables = tables_result.get("tables", [])
139
+ if "technical_decisions" not in tables:
140
+ return """No technical decisions table found in the memory bank.
141
+
142
+ To use this prompt effectively, please:
143
+ 1. Create a 'technical_decisions' table
144
+ 2. Store your technical decisions with context
145
+ 3. Try this prompt again
146
+
147
+ The table should include fields like: decision_name, chosen_approach, rationale, alternatives, timestamp."""
148
+
149
+ # Get technical decisions
150
+ where_clause = {}
151
+ if decision_topic:
152
+ # This is a simplified search - in practice you'd want semantic search
153
+ where_clause = {"decision_name": decision_topic}
154
+
155
+ result = db.read_rows("technical_decisions", where_clause)
156
+ if not result.get("success"):
157
+ return "Error: Could not read technical decisions."
158
+
159
+ decisions = result.get("rows", [])
160
+ if not decisions:
161
+ topic_msg = f" related to '{decision_topic}'" if decision_topic else ""
162
+ return f"No technical decisions found{topic_msg}. Consider adding some decisions to the memory bank first."
163
+
164
+ # Format decisions for analysis
165
+ formatted_decisions = []
166
+ decisions_list = cast(List[Dict[str, Any]], decisions)
167
+ for i, decision in enumerate(decisions_list, 1):
168
+ formatted_decisions.append(f"{i}. Decision: {decision.get('decision_name', 'Unknown')}")
169
+ formatted_decisions.append(f" Approach: {decision.get('chosen_approach', 'Not specified')}")
170
+ formatted_decisions.append(f" Rationale: {decision.get('rationale', 'Not provided')}")
171
+ if decision.get('alternatives'):
172
+ formatted_decisions.append(f" Alternatives: {decision.get('alternatives')}")
173
+ formatted_decisions.append(f" Date: {decision.get('timestamp', 'Unknown')}")
174
+ formatted_decisions.append("")
175
+
176
+ prompt = f"""Please analyze these technical decisions from the memory bank:
177
+
178
+ Technical Decisions{f" (filtered by: {decision_topic})" if decision_topic else ""}:
179
+ {chr(10).join(formatted_decisions)}
180
+
181
+ Please provide:
182
+ 1. Analysis of decision-making patterns
183
+ 2. Assessment of the rationale quality
184
+ 3. Identification of any decision dependencies or conflicts
185
+ 4. Recommendations for future decisions
186
+ 5. Suggestions for improving decision documentation
187
+
188
+ Focus on actionable insights that can improve technical decision-making processes."""
189
+
190
+ return prompt
191
+
192
+ @self.mcp.prompt("memory-bank-context")
193
+ async def memory_bank_context(context_type: str = "full") -> str:
194
+ """Provide memory bank context for AI conversations."""
195
+ db = get_database(self.db_path)
196
+
197
+ # Get overview
198
+ tables_result = db.list_tables()
199
+ if not tables_result.get("success"):
200
+ return "Error: Could not access memory bank for context."
201
+
202
+ tables = tables_result.get("tables", [])
203
+ context_info = {
204
+ "available_tables": tables,
205
+ "capabilities": [
206
+ "Full-text search across all content",
207
+ "Semantic search (if embeddings are available)",
208
+ "Structured data queries",
209
+ "Content analytics and insights"
210
+ ],
211
+ "usage_suggestions": [
212
+ "Use search_content() for finding specific information",
213
+ "Use semantic_search() for conceptual queries",
214
+ "Use read_rows() for structured data access",
215
+ "Use explore_tables() to discover available content"
216
+ ]
217
+ }
218
+
219
+ if context_type == "brief":
220
+ tables_list = cast(List[str], tables)
221
+ prompt = f"""Memory Bank Context (Brief):
222
+ Available tables: {', '.join(tables_list)}
223
+ Total tables: {len(tables_list)}
224
+
225
+ This memory bank contains structured information that can be searched and analyzed. Use the available tools to access specific content as needed."""
226
+ else:
227
+ # Get sample content from a few tables
228
+ sample_content = {}
229
+ tables_list = cast(List[str], tables)
230
+ for table in tables_list[:3]: # Sample from first 3 tables
231
+ try:
232
+ result = cast(Dict[str, Any], db.read_rows(table, {}))
233
+ if result.get("success"):
234
+ rows = cast(List[Any], result.get("rows", []))
235
+ sample_content[table] = {
236
+ "row_count": len(rows),
237
+ "sample_row": rows[0] if rows else None
238
+ }
239
+ except Exception:
240
+ continue
241
+
242
+ prompt = f"""Memory Bank Context (Full):
243
+
244
+ {json.dumps(context_info, indent=2)}
245
+
246
+ Sample Content:
247
+ {json.dumps(sample_content, indent=2)}
248
+
249
+ This memory bank contains structured information that can be searched, analyzed, and leveraged for various tasks. The content is organized in tables with different types of information. Use the available search and query tools to access specific content as needed for your current task."""
250
+
251
+ return prompt
252
+
253
+
254
+ def setup_mcp_prompts(mcp_app: FastMCP, db_path: str) -> MemoryBankPrompts:
255
+ """Set up MCP Prompts for the memory bank."""
256
+ return MemoryBankPrompts(mcp_app, db_path)
@@ -0,0 +1,176 @@
1
+ """
2
+ MCP Resources Support for SQLite Memory Bank
3
+ ==========================================
4
+
5
+ This module adds MCP Resources support, allowing the memory bank to expose
6
+ stored content as MCP resources that can be consumed by LLM applications.
7
+
8
+ Resources provide context and data that can be accessed by AI models through
9
+ the standardized MCP protocol.
10
+
11
+ Author: Robert Meisner
12
+ """
13
+
14
+ from typing import Dict, Any, cast
15
+ from fastmcp import FastMCP
16
+ from .database import get_database
17
+ import json
18
+
19
+
20
+ class MemoryBankResources:
21
+ """Manages MCP Resources for the SQLite Memory Bank."""
22
+
23
+ def __init__(self, mcp_app: FastMCP, db_path: str):
24
+ self.mcp = mcp_app
25
+ self.db_path = db_path
26
+ self._register_resources()
27
+
28
+ def _register_resources(self):
29
+ """Register MCP resources with the FastMCP app."""
30
+
31
+ @self.mcp.resource("memory://tables/list")
32
+ async def get_tables_list() -> str:
33
+ """Provide a list of all available tables as an MCP resource."""
34
+ db = get_database(self.db_path)
35
+ result = cast(Dict[str, Any], db.list_tables())
36
+
37
+ if not result.get("success"):
38
+ return json.dumps({"error": "Failed to fetch tables", "details": result})
39
+
40
+ resource_content = {
41
+ "resource_type": "table_list",
42
+ "description": "List of all available tables in the memory bank",
43
+ "tables": result.get("tables", []),
44
+ "total_count": len(result.get("tables", [])),
45
+ "last_updated": "dynamic"
46
+ }
47
+
48
+ return json.dumps(resource_content, indent=2)
49
+
50
+ @self.mcp.resource("memory://tables/{table_name}/schema")
51
+ async def get_table_schema(table_name: str) -> str:
52
+ """Provide table schema information as an MCP resource."""
53
+ db = get_database(self.db_path)
54
+ result = cast(Dict[str, Any], db.describe_table(table_name))
55
+
56
+ if not result.get("success"):
57
+ return json.dumps({"error": f"Failed to fetch schema for table '{table_name}'", "details": result})
58
+
59
+ resource_content = {
60
+ "resource_type": "table_schema",
61
+ "table_name": table_name,
62
+ "description": f"Schema definition for table '{table_name}'",
63
+ "columns": result.get("columns", []),
64
+ "column_count": len(result.get("columns", [])),
65
+ "last_updated": "dynamic"
66
+ }
67
+
68
+ return json.dumps(resource_content, indent=2)
69
+
70
+ @self.mcp.resource("memory://tables/{table_name}/data")
71
+ async def get_table_data(table_name: str) -> str:
72
+ """Provide table data as an MCP resource."""
73
+ db = get_database(self.db_path)
74
+ result = cast(Dict[str, Any], db.read_rows(table_name, {}))
75
+
76
+ if not result.get("success"):
77
+ return json.dumps({"error": f"Failed to fetch data for table '{table_name}'", "details": result})
78
+
79
+ rows = result.get("rows", [])
80
+ resource_content = {
81
+ "resource_type": "table_data",
82
+ "table_name": table_name,
83
+ "description": f"All data from table '{table_name}'",
84
+ "rows": rows,
85
+ "row_count": len(rows),
86
+ "last_updated": "dynamic"
87
+ }
88
+
89
+ return json.dumps(resource_content, indent=2)
90
+
91
+ @self.mcp.resource("memory://search/{query}")
92
+ async def search_memory_content(query: str) -> str:
93
+ """Provide search results as an MCP resource."""
94
+ db = get_database(self.db_path)
95
+ result = cast(Dict[str, Any], db.search_content(query, None, 50)) # Search all tables, limit to 50 results
96
+
97
+ if not result.get("success"):
98
+ return json.dumps({"error": f"Failed to search for '{query}'", "details": result})
99
+
100
+ search_results = result.get("results", [])
101
+ resource_content = {
102
+ "resource_type": "search_results",
103
+ "query": query,
104
+ "description": f"Search results for query: '{query}'",
105
+ "results": search_results,
106
+ "result_count": len(search_results),
107
+ "last_updated": "dynamic"
108
+ }
109
+
110
+ return json.dumps(resource_content, indent=2)
111
+
112
+ @self.mcp.resource("memory://analytics/overview")
113
+ async def get_memory_overview() -> str:
114
+ """Provide memory bank overview analytics as an MCP resource."""
115
+ db = get_database(self.db_path)
116
+
117
+ # Get table list
118
+ tables_result = cast(Dict[str, Any], db.list_tables())
119
+ if not tables_result.get("success"):
120
+ return json.dumps({"error": "Failed to fetch memory overview", "details": tables_result})
121
+
122
+ tables = tables_result.get("tables", [])
123
+ total_rows = 0
124
+ table_stats = {}
125
+
126
+ # Get row counts for each table
127
+ for table in tables:
128
+ try:
129
+ rows_result = cast(Dict[str, Any], db.read_rows(table, {}))
130
+ if rows_result.get("success"):
131
+ row_count = len(rows_result.get("rows", []))
132
+ table_stats[table] = {
133
+ "row_count": row_count,
134
+ "status": "accessible"
135
+ }
136
+ total_rows += row_count
137
+ else:
138
+ table_stats[table] = {
139
+ "row_count": 0,
140
+ "status": "error"
141
+ }
142
+ except Exception as e:
143
+ table_stats[table] = {
144
+ "row_count": 0,
145
+ "status": f"error: {str(e)}"
146
+ }
147
+
148
+ # Find largest table
149
+ largest_table = None
150
+ if table_stats:
151
+ max_rows = 0
152
+ for table_name, stats in table_stats.items():
153
+ row_count_obj = stats.get("row_count", 0)
154
+ row_count = int(row_count_obj) if isinstance(row_count_obj, (int, str)) else 0
155
+ if row_count > max_rows:
156
+ max_rows = row_count
157
+ largest_table = table_name
158
+
159
+ resource_content = {
160
+ "resource_type": "memory_overview",
161
+ "description": "Overview of memory bank contents and usage",
162
+ "summary": {
163
+ "total_tables": len(tables),
164
+ "total_rows": total_rows,
165
+ "largest_table": largest_table
166
+ },
167
+ "table_statistics": table_stats,
168
+ "last_updated": "dynamic"
169
+ }
170
+
171
+ return json.dumps(resource_content, indent=2)
172
+
173
+
174
+ def setup_mcp_resources(mcp_app: FastMCP, db_path: str) -> MemoryBankResources:
175
+ """Set up MCP Resources for the memory bank."""
176
+ return MemoryBankResources(mcp_app, db_path)
@@ -19,8 +19,8 @@ try:
19
19
  SENTENCE_TRANSFORMERS_AVAILABLE = True
20
20
  except ImportError:
21
21
  SENTENCE_TRANSFORMERS_AVAILABLE = False
22
- SentenceTransformer = None
23
- util = None
22
+ SentenceTransformer = None # type: ignore
23
+ util = None # type: ignore
24
24
  logging.warning("sentence-transformers not available. Install with: pip install sentence-transformers")
25
25
 
26
26
  try:
@@ -29,7 +29,7 @@ try:
29
29
  TORCH_AVAILABLE = True
30
30
  except ImportError:
31
31
  TORCH_AVAILABLE = False
32
- torch = None
32
+ torch = None # type: ignore
33
33
  logging.warning("torch not available. Install with: pip install torch")
34
34
 
35
35
  from .types import ValidationError, DatabaseError
@@ -50,7 +50,7 @@ class SemanticSearchEngine:
50
50
  """Initialize the semantic search engine."""
51
51
  self.model_name = model_name
52
52
  self._model = None
53
- self._embedding_cache = {}
53
+ self._embedding_cache: Dict[str, Any] = {}
54
54
 
55
55
  if not SENTENCE_TRANSFORMERS_AVAILABLE:
56
56
  raise ValueError(
@@ -65,6 +65,8 @@ from .types import (
65
65
  SelectQueryResponse,
66
66
  )
67
67
  from .utils import catch_errors
68
+ from .resources import setup_mcp_resources
69
+ from .prompts import setup_mcp_prompts
68
70
 
69
71
  # Initialize FastMCP app with explicit name
70
72
  mcp: FastMCP = FastMCP("SQLite Memory Bank for Copilot/AI Agents")
@@ -78,6 +80,12 @@ os.makedirs(os.path.dirname(os.path.abspath(DB_PATH)), exist_ok=True)
78
80
  # Initialize database
79
81
  db = get_database(DB_PATH)
80
82
 
83
+ # Set up MCP Resources for enhanced context provision
84
+ setup_mcp_resources(mcp, DB_PATH)
85
+
86
+ # Set up MCP Prompts for enhanced workflow support
87
+ setup_mcp_prompts(mcp, DB_PATH)
88
+
81
89
 
82
90
  # --- Schema Management Tools for SQLite Memory Bank ---
83
91
 
@@ -462,7 +470,12 @@ def add_embeddings(
462
470
  table_name: str, text_columns: List[str], embedding_column: str = "embedding", model_name: str = "all-MiniLM-L6-v2"
463
471
  ) -> ToolResponse:
464
472
  """
473
+ ⚠️ **ADVANCED TOOL** - Most agents should use auto_smart_search() instead!
474
+
465
475
  Generate and store vector embeddings for semantic search on table content.
476
+
477
+ **RECOMMENDATION**: Use auto_smart_search() or auto_semantic_search() for automatic setup.
478
+ This tool is for advanced users who need manual control over embedding generation.
466
479
 
467
480
  This tool enables intelligent knowledge discovery by creating vector representations
468
481
  of text content that can be searched semantically rather than just by exact keywords.
@@ -503,7 +516,12 @@ def semantic_search(
503
516
  model_name: str = "all-MiniLM-L6-v2",
504
517
  ) -> ToolResponse:
505
518
  """
519
+ ⚠️ **ADVANCED TOOL** - Most agents should use auto_smart_search() instead!
520
+
506
521
  Find content using natural language semantic similarity rather than exact keyword matching.
522
+
523
+ **RECOMMENDATION**: Use auto_smart_search() for automatic setup and hybrid search capabilities.
524
+ This tool requires manual embedding setup via add_embeddings() first.
507
525
 
508
526
  This enables intelligent knowledge discovery - find related concepts even when
509
527
  they use different terminology or phrasing.
@@ -602,7 +620,12 @@ def smart_search(
602
620
  model_name: str = "all-MiniLM-L6-v2",
603
621
  ) -> ToolResponse:
604
622
  """
623
+ ⚠️ **ADVANCED TOOL** - Most agents should use auto_smart_search() instead!
624
+
605
625
  Intelligent hybrid search combining semantic understanding with keyword matching.
626
+
627
+ **RECOMMENDATION**: Use auto_smart_search() for the same functionality with automatic setup.
628
+ This tool requires manual embedding setup via add_embeddings() first.
606
629
 
607
630
  Provides the best of both worlds - semantic similarity for concept discovery
608
631
  plus exact text matching for precise searches.
@@ -631,7 +654,7 @@ def smart_search(
631
654
  - Provides separate scores for transparency
632
655
  - Falls back gracefully if semantic search unavailable
633
656
  - Optimal for both exploratory and precise searches
634
- - Recommended for general-purpose knowledge discovery
657
+ - Perfect for agents - ultimate search tool that just works!
635
658
  """
636
659
  return cast(
637
660
  ToolResponse,
@@ -641,34 +664,306 @@ def smart_search(
641
664
  )
642
665
 
643
666
 
667
+ # --- Auto-Embedding Semantic Search Tools ---
668
+
669
+
644
670
  @mcp.tool
645
671
  @catch_errors
646
- def embedding_stats(table_name: str, embedding_column: str = "embedding") -> ToolResponse:
672
+ def auto_semantic_search(
673
+ query: str,
674
+ tables: Optional[List[str]] = None,
675
+ similarity_threshold: float = 0.5,
676
+ limit: int = 10,
677
+ model_name: str = "all-MiniLM-L6-v2",
678
+ ) -> ToolResponse:
647
679
  """
648
- Get statistics about semantic search readiness for a table.
680
+ 🚀 **ZERO-SETUP SEMANTIC SEARCH** - Just search, embeddings are handled automatically!
649
681
 
650
- Check which content has embeddings and can be searched semantically.
682
+ Find content using natural language semantic similarity. If embeddings don't exist,
683
+ they will be automatically generated for text columns. This is the easiest way to
684
+ do semantic search - no manual setup required!
651
685
 
652
686
  Args:
653
- table_name (str): Table to analyze
654
- embedding_column (str): Embedding column to check (default: "embedding")
687
+ query (str): Natural language search query
688
+ tables (Optional[List[str]]): Specific tables to search (default: all tables)
689
+ similarity_threshold (float): Minimum similarity score (0.0-1.0, default: 0.5)
690
+ limit (int): Maximum number of results to return (default: 10)
691
+ model_name (str): Model to use for embeddings (default: "all-MiniLM-L6-v2")
655
692
 
656
693
  Returns:
657
- ToolResponse: On success: {"success": True, "coverage_percent": float, "total_rows": int}
694
+ ToolResponse: On success: {"success": True, "results": List[...], "auto_embedded_tables": List[str]}
658
695
  On error: {"success": False, "error": str, "category": str, "details": dict}
659
696
 
660
697
  Examples:
661
- >>> embedding_stats("technical_decisions")
662
- {"success": True, "total_rows": 25, "embedded_rows": 25, "coverage_percent": 100.0,
663
- "embedding_dimensions": 384}
698
+ >>> auto_semantic_search("API design patterns")
699
+ {"success": True, "results": [
700
+ {"table_name": "technical_decisions", "similarity_score": 0.87, "decision_name": "REST API Structure", ...}
701
+ ], "auto_embedded_tables": ["technical_decisions"]}
702
+
703
+ >>> auto_semantic_search("machine learning concepts")
704
+ # Finds content about "ML", "AI", "neural networks", etc.
705
+ # Automatically creates embeddings if they don't exist!
664
706
 
665
707
  FastMCP Tool Info:
666
- - Shows how much content is ready for semantic search
667
- - Helps identify tables that need embedding generation
668
- - Provides embedding dimension info for debugging
669
- - Useful for monitoring semantic search capabilities
708
+ - **COMPLETELY AUTOMATIC**: No manual embedding setup required
709
+ - Auto-detects text columns and creates embeddings as needed
710
+ - Works across multiple tables simultaneously
711
+ - Finds conceptually similar content regardless of exact wording
712
+ - Returns relevance scores for ranking results
713
+ - Supports fuzzy matching and concept discovery
714
+ - Perfect for agents - just search and it works!
715
+ """
716
+ try:
717
+ db = get_database(DB_PATH)
718
+ auto_embedded_tables = []
719
+
720
+ # Get tables to search
721
+ if tables:
722
+ search_tables = tables
723
+ else:
724
+ tables_result = db.list_tables()
725
+ if not tables_result.get("success"):
726
+ return cast(ToolResponse, tables_result)
727
+ search_tables = tables_result.get("tables", [])
728
+
729
+ # Auto-embed text columns in tables that don't have embeddings
730
+ for table_name in search_tables:
731
+ try:
732
+ # Check if table has embeddings
733
+ stats_result = db.get_embedding_stats(table_name, "embedding")
734
+ if stats_result.get("success") and stats_result.get("coverage_percent", 0) > 0:
735
+ continue # Table already has embeddings
736
+
737
+ # Get table schema to find text columns
738
+ schema_result = db.describe_table(table_name)
739
+ if not schema_result.get("success"):
740
+ continue
741
+
742
+ # Find text columns
743
+ text_columns = []
744
+ for col in schema_result.get("columns", []):
745
+ if "TEXT" in col.get("type", "").upper():
746
+ text_columns.append(col["name"])
747
+
748
+ # Auto-embed text columns
749
+ if text_columns:
750
+ embed_result = db.generate_embeddings(table_name, text_columns, "embedding", model_name)
751
+ if embed_result.get("success"):
752
+ auto_embedded_tables.append(table_name)
753
+
754
+ except Exception:
755
+ # If auto-embedding fails, continue without it
756
+ continue
757
+
758
+ # Perform semantic search
759
+ search_result = db.semantic_search(
760
+ query, search_tables, "embedding", None, similarity_threshold, limit, model_name
761
+ )
762
+
763
+ # Add auto-embedding info to result
764
+ if isinstance(search_result, dict):
765
+ search_result["auto_embedded_tables"] = auto_embedded_tables
766
+ if auto_embedded_tables:
767
+ search_result["auto_embedding_note"] = f"Automatically generated embeddings for {len(auto_embedded_tables)} table(s)"
768
+
769
+ return cast(ToolResponse, search_result)
770
+
771
+ except Exception as e:
772
+ return cast(ToolResponse, {
773
+ "success": False,
774
+ "error": f"Auto semantic search failed: {str(e)}",
775
+ "category": "SEMANTIC_SEARCH_ERROR",
776
+ "details": {"query": query, "tables": tables}
777
+ })
778
+
779
+
780
+ @mcp.tool
781
+ @catch_errors
782
+ def auto_smart_search(
783
+ query: str,
784
+ tables: Optional[List[str]] = None,
785
+ semantic_weight: float = 0.7,
786
+ text_weight: float = 0.3,
787
+ limit: int = 10,
788
+ model_name: str = "all-MiniLM-L6-v2",
789
+ ) -> ToolResponse:
790
+ """
791
+ 🚀 **ZERO-SETUP HYBRID SEARCH** - Best of both worlds with automatic embedding!
792
+
793
+ Intelligent hybrid search combining semantic understanding with keyword matching.
794
+ Automatically generates embeddings for text columns when needed. This is the
795
+ ultimate search tool - no manual setup required!
796
+
797
+ Args:
798
+ query (str): Search query (natural language or keywords)
799
+ tables (Optional[List[str]]): Tables to search (default: all)
800
+ semantic_weight (float): Weight for semantic similarity (0.0-1.0, default: 0.7)
801
+ text_weight (float): Weight for keyword matching (0.0-1.0, default: 0.3)
802
+ limit (int): Maximum results (default: 10)
803
+ model_name (str): Semantic model to use (default: "all-MiniLM-L6-v2")
804
+
805
+ Returns:
806
+ ToolResponse: On success: {"success": True, "results": List[...], "search_type": "auto_hybrid"}
807
+ On error: {"success": False, "error": str, "category": str, "details": dict}
808
+
809
+ Examples:
810
+ >>> auto_smart_search("user authentication security")
811
+ {"success": True, "results": [
812
+ {"combined_score": 0.89, "semantic_score": 0.92, "text_score": 0.82, ...}
813
+ ], "search_type": "auto_hybrid", "auto_embedded_tables": ["user_data"]}
814
+
815
+ FastMCP Tool Info:
816
+ - **COMPLETELY AUTOMATIC**: No manual embedding setup required
817
+ - Automatically balances semantic and keyword search
818
+ - Auto-detects text columns and creates embeddings as needed
819
+ - Provides separate scores for transparency
820
+ - Falls back gracefully if semantic search unavailable
821
+ - Optimal for both exploratory and precise searches
822
+ - Perfect for agents - ultimate search tool that just works!
823
+ """
824
+ try:
825
+ # First try auto semantic search to ensure embeddings exist
826
+ auto_semantic_result = auto_semantic_search(query, tables, 0.3, limit, model_name)
827
+ auto_embedded_tables = []
828
+
829
+ if auto_semantic_result.get("success"):
830
+ auto_embedded_tables = auto_semantic_result.get("auto_embedded_tables", [])
831
+
832
+ # Now perform hybrid search
833
+ db = get_database(DB_PATH)
834
+ hybrid_result = db.hybrid_search(
835
+ query, tables, None, "embedding", semantic_weight, text_weight, limit, model_name
836
+ )
837
+
838
+ # Add auto-embedding info to result
839
+ if isinstance(hybrid_result, dict):
840
+ hybrid_result["search_type"] = "auto_hybrid"
841
+ hybrid_result["auto_embedded_tables"] = auto_embedded_tables
842
+ if auto_embedded_tables:
843
+ hybrid_result["auto_embedding_note"] = f"Automatically generated embeddings for {len(auto_embedded_tables)} table(s)"
844
+
845
+ return cast(ToolResponse, hybrid_result)
846
+
847
+ except Exception as e:
848
+ return cast(ToolResponse, {
849
+ "success": False,
850
+ "error": f"Auto smart search failed: {str(e)}",
851
+ "category": "HYBRID_SEARCH_ERROR",
852
+ "details": {"query": query, "tables": tables}
853
+ })
854
+
855
+
856
+ # --- Enhanced Tool Discovery and Categorization ---
857
+
858
+
859
+ @mcp.tool
860
+ @catch_errors
861
+ def list_tool_categories() -> ToolResponse:
862
+ """
863
+ List all available tool categories for better organization and discovery.
864
+
865
+ Returns organized view of available functionality for LLMs and agents.
866
+
867
+ Returns:
868
+ ToolResponse: {"success": True, "categories": {category: [tool_names]}}
869
+ """
870
+ categories = {
871
+ "schema_management": [
872
+ "create_table", "list_tables", "describe_table",
873
+ "drop_table", "rename_table", "list_all_columns"
874
+ ],
875
+ "data_operations": [
876
+ "create_row", "read_rows", "update_rows",
877
+ "delete_rows", "run_select_query"
878
+ ],
879
+ "search_discovery": [
880
+ "search_content", "explore_tables"
881
+ ],
882
+ "semantic_search": [
883
+ "add_embeddings", "semantic_search", "find_related",
884
+ "smart_search", "embedding_stats"
885
+ ],
886
+ "workflow_shortcuts": [
887
+ "quick_note", "remember_decision", "store_context"
888
+ ],
889
+ "analytics_insights": [
890
+ "memory_usage_stats", "content_analytics"
891
+ ]
892
+ }
893
+
894
+ return cast(ToolResponse, {
895
+ "success": True,
896
+ "categories": categories,
897
+ "total_tools": sum(len(tools) for tools in categories.values()),
898
+ "description": "Organized view of all available memory bank capabilities"
899
+ })
900
+
901
+
902
+ @mcp.tool
903
+ @catch_errors
904
+ def get_tools_by_category(category: str) -> ToolResponse:
905
+ """
906
+ Get detailed information about tools in a specific category.
907
+
908
+ Args:
909
+ category (str): Category name (schema_management, data_operations,
910
+ search_discovery, semantic_search, workflow_shortcuts, analytics_insights)
911
+
912
+ Returns:
913
+ ToolResponse: {"success": True, "tools": [{"name": str, "description": str, "usage": str}]}
670
914
  """
671
- return cast(ToolResponse, get_database(DB_PATH).get_embedding_stats(table_name, embedding_column))
915
+ tool_details = {
916
+ "schema_management": [
917
+ {"name": "create_table", "description": "Create new tables with custom schemas", "usage": "create_table('table_name', [{'name': 'col', 'type': 'TEXT'}])"},
918
+ {"name": "list_tables", "description": "List all available tables", "usage": "list_tables()"},
919
+ {"name": "describe_table", "description": "Get detailed schema for a table", "usage": "describe_table('table_name')"},
920
+ {"name": "drop_table", "description": "Delete a table permanently", "usage": "drop_table('table_name')"},
921
+ {"name": "rename_table", "description": "Rename an existing table", "usage": "rename_table('old_name', 'new_name')"},
922
+ {"name": "list_all_columns", "description": "Get all columns across all tables", "usage": "list_all_columns()"},
923
+ ],
924
+ "data_operations": [
925
+ {"name": "create_row", "description": "Insert new data into any table", "usage": "create_row('table', {'col': 'value'})"},
926
+ {"name": "read_rows", "description": "Query data with optional filtering", "usage": "read_rows('table', {'filter_col': 'value'})"},
927
+ {"name": "update_rows", "description": "Modify existing data", "usage": "update_rows('table', {'new_data': 'value'}, {'where_col': 'value'})"},
928
+ {"name": "delete_rows", "description": "Remove data from tables", "usage": "delete_rows('table', {'filter_col': 'value'})"},
929
+ {"name": "run_select_query", "description": "Execute safe SELECT queries", "usage": "run_select_query('table', ['col1', 'col2'], {'filter': 'value'})"},
930
+ ],
931
+ "search_discovery": [
932
+ {"name": "search_content", "description": "Full-text search across all content", "usage": "search_content('search query', ['table1', 'table2'])"},
933
+ {"name": "explore_tables", "description": "Discover table structures and sample data", "usage": "explore_tables('pattern*')"},
934
+ ],
935
+ "semantic_search": [
936
+ {"name": "add_embeddings", "description": "Enable semantic search on tables", "usage": "add_embeddings('table', ['text_col1', 'text_col2'])"},
937
+ {"name": "semantic_search", "description": "Natural language content discovery", "usage": "semantic_search('find ML algorithms')"},
938
+ {"name": "find_related", "description": "Discover similar content", "usage": "find_related('table', row_id, 0.5)"},
939
+ {"name": "smart_search", "description": "Hybrid keyword + semantic search", "usage": "smart_search('search query')"},
940
+ {"name": "embedding_stats", "description": "Check semantic search readiness", "usage": "embedding_stats('table')"},
941
+ ],
942
+ "workflow_shortcuts": [
943
+ {"name": "quick_note", "description": "Rapidly store notes and observations", "usage": "quick_note('content', 'category')"},
944
+ {"name": "remember_decision", "description": "Store technical decisions with context", "usage": "remember_decision('decision', 'approach', 'rationale')"},
945
+ {"name": "store_context", "description": "Save session context and progress", "usage": "store_context('topic', 'current_state', 'next_steps')"},
946
+ ],
947
+ "analytics_insights": [
948
+ {"name": "memory_usage_stats", "description": "Analyze memory bank usage patterns", "usage": "memory_usage_stats()"},
949
+ {"name": "content_analytics", "description": "Get insights on stored content", "usage": "content_analytics('table_name')"},
950
+ ],
951
+ }
952
+
953
+ if category not in tool_details:
954
+ return cast(ToolResponse, {
955
+ "success": False,
956
+ "error": f"Unknown category '{category}'. Available: {list(tool_details.keys())}",
957
+ "category": "VALIDATION",
958
+ "details": {"available_categories": list(tool_details.keys())},
959
+ })
960
+
961
+ return cast(ToolResponse, {
962
+ "success": True,
963
+ "category": category,
964
+ "tools": tool_details[category],
965
+ "tool_count": len(tool_details[category]),
966
+ })
672
967
 
673
968
 
674
969
  # Export the FastMCP app for use in other modules and server runners
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp_sqlite_memory_bank
3
- Version: 1.3.1
3
+ Version: 1.4.2
4
4
  Summary: A dynamic, agent/LLM-friendly SQLite memory bank for MCP servers with semantic search capabilities.
5
5
  Author-email: Robert Meisner <robert@catchit.pl>
6
6
  License-Expression: MIT
@@ -40,10 +40,14 @@ This project provides a robust, discoverable API for creating, exploring, and ma
40
40
  - Build and query knowledge graphs for semantic search and reasoning
41
41
  - Store, retrieve, and organize notes or structured data for LLM agents
42
42
  - Enable natural language workflows for database management and exploration
43
+ - Intelligent content discovery with semantic search capabilities
44
+ - Access memory content through standardized MCP Resources and Prompts
43
45
  - Integrate with FastMCP, Claude Desktop, and other agent platforms for seamless tool discovery
44
46
 
45
47
  **Why mcp_sqlite_memory_bank?**
46
- - Explicit, discoverable APIs for LLMs and agents
48
+ - **Full MCP Compliance:** Resources, Prompts, and 20+ organized tools
49
+ - **Semantic Search:** Natural language content discovery with AI-powered similarity matching
50
+ - **Explicit, discoverable APIs** for LLMs and agents with enhanced categorization
47
51
  - Safe, parameterized queries and schema management
48
52
  - Designed for extensibility and open source collaboration
49
53
 
@@ -108,18 +112,59 @@ Restart your IDE and try asking your AI assistant:
108
112
  - **Dynamic Table Management:** Create, list, describe, rename, and drop tables at runtime
109
113
  - **CRUD Operations:** Insert, read, update, and delete rows in any table
110
114
  - **Safe SQL:** Run parameterized SELECT queries with input validation
115
+ - **Semantic Search:** Natural language search using sentence-transformers for intelligent content discovery
116
+ - **MCP Resources:** Access memory content through standardized MCP resource URIs
117
+ - **MCP Prompts:** Built-in intelligent prompts for common memory analysis workflows
118
+ - **Tool Categorization:** Organized tool discovery with detailed usage examples for enhanced LLM integration
111
119
  - **Knowledge Graph Tools:** Built-in support for node/edge schemas and property graphs
112
120
  - **Agent/LLM Integration:** Explicit, tool-based APIs for easy discovery and automation
113
121
  - **Open Source:** MIT licensed, fully tested, and ready for community use
114
122
 
115
123
  ---
116
124
 
125
+ ## MCP Compliance & Enhanced Integration
126
+
127
+ SQLite Memory Bank v1.4.0+ provides full Model Context Protocol (MCP) compliance with advanced features for enhanced LLM and agent integration:
128
+
129
+ ### 🔧 MCP Tools (20 Available)
130
+ Organized into logical categories for easy discovery:
131
+ - **Schema Management** (6 tools): Table creation, modification, and inspection
132
+ - **Data Operations** (5 tools): CRUD operations with validation
133
+ - **Search & Discovery** (2 tools): Content search and exploration
134
+ - **Semantic Search** (5 tools): AI-powered natural language content discovery
135
+ - **Analytics** (2 tools): Memory bank insights and statistics
136
+
137
+ ### 📄 MCP Resources (5 Available)
138
+ Real-time access to memory content via standardized URIs:
139
+ - `memory://tables/list` - List of all available tables
140
+ - `memory://tables/{table_name}/schema` - Table schema information
141
+ - `memory://tables/{table_name}/data` - Table data content
142
+ - `memory://search/{query}` - Search results as resources
143
+ - `memory://analytics/overview` - Memory bank overview analytics
144
+
145
+ ### 💡 MCP Prompts (4 Available)
146
+ Intelligent prompts for common memory analysis workflows:
147
+ - `analyze-memory-content` - Analyze memory bank content and provide insights
148
+ - `search-and-summarize` - Search and create summary prompts
149
+ - `technical-decision-analysis` - Analyze technical decisions from memory
150
+ - `memory-bank-context` - Provide memory bank context for AI conversations
151
+
152
+ ### 🎯 Enhanced Discoverability
153
+ - **Tool Categorization:** `list_tool_categories()` for organized tool discovery
154
+ - **Usage Examples:** `get_tools_by_category()` with detailed examples for each tool
155
+ - **Semantic Search:** Natural language queries for intelligent content discovery
156
+ - **LLM-Friendly APIs:** Explicit, descriptive tool names and comprehensive documentation
157
+
158
+ ---
159
+
117
160
 
118
161
  ## Tools & API Reference
119
162
 
120
163
  All tools are designed for explicit, discoverable use by LLMs, agents, and developers. Each function is available as a direct Python import and as an MCP tool.
121
164
 
122
- ### Table Management Tools
165
+ **🔍 Tool Discovery:** Use `list_tool_categories()` to see all organized tool categories, or `get_tools_by_category(category)` for detailed information about specific tool groups with usage examples.
166
+
167
+ ### Schema Management Tools (6 tools)
123
168
 
124
169
  | Tool | Description | Required Parameters | Optional Parameters |
125
170
  |------|-------------|---------------------|---------------------|
@@ -130,7 +175,7 @@ All tools are designed for explicit, discoverable use by LLMs, agents, and devel
130
175
  | `describe_table` | Get schema details | `table_name` (str) | None |
131
176
  | `list_all_columns` | List all columns for all tables | None | None |
132
177
 
133
- ### Data Management Tools
178
+ ### Data Operations Tools (5 tools)
134
179
 
135
180
  | Tool | Description | Required Parameters | Optional Parameters |
136
181
  |------|-------------|---------------------|---------------------|
@@ -140,6 +185,30 @@ All tools are designed for explicit, discoverable use by LLMs, agents, and devel
140
185
  | `delete_rows` | Delete rows from table | `table_name` (str), `where` (dict) | None |
141
186
  | `run_select_query` | Run safe SELECT query | `table_name` (str) | `columns` (list[str]), `where` (dict), `limit` (int) |
142
187
 
188
+ ### Search & Discovery Tools (2 tools)
189
+
190
+ | Tool | Description | Required Parameters | Optional Parameters |
191
+ |------|-------------|---------------------|---------------------|
192
+ | `search_content` | Full-text search across table content | `query` (str) | `tables` (list[str]), `limit` (int) |
193
+ | `explore_tables` | Explore and discover table structures | None | `pattern` (str), `include_row_counts` (bool) |
194
+
195
+ ### Semantic Search Tools (5 tools)
196
+
197
+ | Tool | Description | Required Parameters | Optional Parameters |
198
+ |------|-------------|---------------------|---------------------|
199
+ | `add_embeddings` | Generate vector embeddings for semantic search | `table_name` (str), `text_columns` (list[str]) | `embedding_column` (str), `model_name` (str) |
200
+ | `semantic_search` | Natural language search using vector similarity | `query` (str) | `tables` (list[str]), `similarity_threshold` (float), `limit` (int) |
201
+ | `find_related` | Find content related to specific row by similarity | `table_name` (str), `row_id` (int) | `similarity_threshold` (float), `limit` (int) |
202
+ | `smart_search` | Hybrid keyword + semantic search | `query` (str) | `tables` (list[str]), `semantic_weight` (float), `text_weight` (float) |
203
+ | `embedding_stats` | Get statistics about semantic search readiness | `table_name` (str) | `embedding_column` (str) |
204
+
205
+ ### Tool Discovery & Organization (2 tools)
206
+
207
+ | Tool | Description | Required Parameters | Optional Parameters |
208
+ |------|-------------|---------------------|---------------------|
209
+ | `list_tool_categories` | List all available tool categories | None | None |
210
+ | `get_tools_by_category` | Get detailed tool information by category | `category` (str) | None |
211
+
143
212
  Each tool validates inputs and returns consistent response formats with success/error indicators and appropriate data payloads.
144
213
 
145
214
  ---
@@ -468,6 +537,97 @@ For a complete agent memory implementation example, see [examples/agent_memory_e
468
537
 
469
538
  ---
470
539
 
540
+ ## MCP Resources and Prompts Usage
541
+
542
+ ### Using MCP Resources
543
+
544
+ MCP Resources provide real-time access to memory content through standardized URIs:
545
+
546
+ ```python
547
+ # Access resource via MCP client
548
+ resource_uri = "memory://tables/list"
549
+ tables_resource = await client.read_resource(resource_uri)
550
+
551
+ # Get table schema
552
+ schema_uri = "memory://tables/user_preferences/schema"
553
+ schema_resource = await client.read_resource(schema_uri)
554
+
555
+ # Access table data
556
+ data_uri = "memory://tables/user_preferences/data"
557
+ data_resource = await client.read_resource(data_uri)
558
+
559
+ # Search as resource
560
+ search_uri = "memory://search/user preferences coding style"
561
+ search_resource = await client.read_resource(search_uri)
562
+
563
+ # Analytics overview
564
+ analytics_uri = "memory://analytics/overview"
565
+ analytics_resource = await client.read_resource(analytics_uri)
566
+ ```
567
+
568
+ ### Using MCP Prompts
569
+
570
+ MCP Prompts provide intelligent analysis workflows:
571
+
572
+ ```python
573
+ # Analyze memory content
574
+ analysis_prompt = await client.get_prompt("analyze-memory-content", {
575
+ "focus_area": "technical_decisions"
576
+ })
577
+
578
+ # Search and summarize
579
+ summary_prompt = await client.get_prompt("search-and-summarize", {
580
+ "query": "database performance optimization",
581
+ "max_results": 10
582
+ })
583
+
584
+ # Technical decision analysis
585
+ decision_analysis = await client.get_prompt("technical-decision-analysis", {
586
+ "decision_category": "architecture"
587
+ })
588
+
589
+ # Get memory context for conversations
590
+ context_prompt = await client.get_prompt("memory-bank-context", {
591
+ "conversation_topic": "API design patterns"
592
+ })
593
+ ```
594
+
595
+ ### Semantic Search Examples
596
+
597
+ ```python
598
+ # Enable semantic search on existing table
599
+ add_embeddings("technical_decisions", ["decision_name", "rationale"])
600
+
601
+ # Natural language search
602
+ results = semantic_search("machine learning algorithms",
603
+ similarity_threshold=0.4,
604
+ limit=5)
605
+
606
+ # Find related content
607
+ related = find_related("technical_decisions",
608
+ row_id=123,
609
+ similarity_threshold=0.5)
610
+
611
+ # Hybrid search (keyword + semantic)
612
+ hybrid_results = smart_search("API design patterns",
613
+ semantic_weight=0.7,
614
+ text_weight=0.3)
615
+ ```
616
+
617
+ ### Tool Organization Discovery
618
+
619
+ ```python
620
+ # Discover tool categories
621
+ categories = list_tool_categories()
622
+ # Returns: {"schema_management": 6, "data_operations": 5, ...}
623
+
624
+ # Get detailed tool information
625
+ schema_tools = get_tools_by_category("schema_management")
626
+ # Returns detailed info with usage examples for each tool
627
+ ```
628
+
629
+ ---
630
+
471
631
  ## Troubleshooting
472
632
 
473
633
  ### Common MCP Connection Issues
@@ -0,0 +1,15 @@
1
+ mcp_sqlite_memory_bank/__init__.py,sha256=6Y9_iSiQIWOPJYMcMkbrqmaWiM-ymRHdNR6W-8jHj-k,2403
2
+ mcp_sqlite_memory_bank/database.py,sha256=dQdl3QPqBBf_AQwMXHjRZ8rdget0UKQ2vz0V-Ik1o7g,42231
3
+ mcp_sqlite_memory_bank/prompts.py,sha256=nLY6rf08wU5TeSLoSxjTlwcU_OIiJeOIkJYDQM_PFpo,11762
4
+ mcp_sqlite_memory_bank/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ mcp_sqlite_memory_bank/resources.py,sha256=A55G44MIQdNiYlOa9fJqGVC7OL_Cx7sbE8oCF4JlnX8,7464
6
+ mcp_sqlite_memory_bank/semantic.py,sha256=wvabBqlThNN76feyEa8BNRachBZYxkZQaPPIweHgxV8,14855
7
+ mcp_sqlite_memory_bank/server.py,sha256=x4cHGEheG6XY54ErHjtRaPPsDjbQUdvmWZPgipKk7rU,48096
8
+ mcp_sqlite_memory_bank/types.py,sha256=2rNhd6dbvEFsey9QGHQ0VPGSB3U0RaXw8fKVfiBuUJw,6535
9
+ mcp_sqlite_memory_bank/utils.py,sha256=wHbR0cUlV-AWBk8ToI5ZgCrfrMp380ofyEc_GLB0l4g,6185
10
+ mcp_sqlite_memory_bank-1.4.2.dist-info/licenses/LICENSE,sha256=KPr7eFgCJqQIjeSAcwRafbjcgm-10zkrJ7MFoTOGJQg,1092
11
+ mcp_sqlite_memory_bank-1.4.2.dist-info/METADATA,sha256=D52NnWZDvyA9lVqgM-YZaev8X3lfAeQdo2udl3GHj_k,33094
12
+ mcp_sqlite_memory_bank-1.4.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
+ mcp_sqlite_memory_bank-1.4.2.dist-info/entry_points.txt,sha256=S9yGWiCe8f_rgcGCgbwEAX2FfJ9jXWxcc4K4Jenbcn8,150
14
+ mcp_sqlite_memory_bank-1.4.2.dist-info/top_level.txt,sha256=xQ8MTGECpWMR-9DV4H8mMqaSoZqE-C8EvpOg9E2U1wM,23
15
+ mcp_sqlite_memory_bank-1.4.2.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- mcp_sqlite_memory_bank/__init__.py,sha256=6Y9_iSiQIWOPJYMcMkbrqmaWiM-ymRHdNR6W-8jHj-k,2403
2
- mcp_sqlite_memory_bank/database.py,sha256=kBHiibDV2ucm9alaDrNB-txm7v-hGt-uTNHIFKuFlKI,41873
3
- mcp_sqlite_memory_bank/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- mcp_sqlite_memory_bank/semantic.py,sha256=XVfM1C95TdZDtXOrwrGxR5JZnmeeO4O45gxniPWa6go,14791
5
- mcp_sqlite_memory_bank/server.py,sha256=lt_1LSXDDJoSATNd4T07GmfPqlYcdQlmwDGR_TySboM,33896
6
- mcp_sqlite_memory_bank/types.py,sha256=2rNhd6dbvEFsey9QGHQ0VPGSB3U0RaXw8fKVfiBuUJw,6535
7
- mcp_sqlite_memory_bank/utils.py,sha256=wHbR0cUlV-AWBk8ToI5ZgCrfrMp380ofyEc_GLB0l4g,6185
8
- mcp_sqlite_memory_bank-1.3.1.dist-info/licenses/LICENSE,sha256=KPr7eFgCJqQIjeSAcwRafbjcgm-10zkrJ7MFoTOGJQg,1092
9
- mcp_sqlite_memory_bank-1.3.1.dist-info/METADATA,sha256=xgs7hG0Uu1Sr6Qpj_HR6wHz48P4wxTQmD_CbSknsLus,26002
10
- mcp_sqlite_memory_bank-1.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
- mcp_sqlite_memory_bank-1.3.1.dist-info/entry_points.txt,sha256=S9yGWiCe8f_rgcGCgbwEAX2FfJ9jXWxcc4K4Jenbcn8,150
12
- mcp_sqlite_memory_bank-1.3.1.dist-info/top_level.txt,sha256=xQ8MTGECpWMR-9DV4H8mMqaSoZqE-C8EvpOg9E2U1wM,23
13
- mcp_sqlite_memory_bank-1.3.1.dist-info/RECORD,,