mcp-sqlite-memory-bank 1.5.1__py3-none-any.whl → 1.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,7 +11,7 @@ from typing import Dict, Optional, List, cast, Any
11
11
  from fastmcp import FastMCP
12
12
 
13
13
  from ..database import get_database
14
- from ..semantic import is_semantic_search_available
14
+ from ..semantic import is_semantic_search_available
15
15
  from ..types import ToolResponse
16
16
  from ..utils import catch_errors
17
17
 
@@ -20,14 +20,14 @@ from ..utils import catch_errors
20
20
  def analyze_memory_patterns() -> ToolResponse:
21
21
  """
22
22
  🔍 **MEMORY PATTERN ANALYSIS** - Discover insights in your memory bank!
23
-
23
+
24
24
  Analyzes content patterns, usage statistics, and identifies opportunities
25
25
  for better organization and knowledge discovery.
26
-
26
+
27
27
  Returns:
28
28
  ToolResponse: On success: {"success": True, "analysis": dict}
29
29
  On error: {"success": False, "error": str, "category": str, "details": dict}
30
-
30
+
31
31
  Examples:
32
32
  >>> analyze_memory_patterns()
33
33
  {"success": True, "analysis": {
@@ -36,7 +36,7 @@ def analyze_memory_patterns() -> ToolResponse:
36
36
  "semantic_readiness": {"ready": 2, "needs_setup": 1},
37
37
  "recommendations": ["Consider embedding setup for 'notes' table"]
38
38
  }}
39
-
39
+
40
40
  FastMCP Tool Info:
41
41
  - **CONTENT INSIGHTS**: Analyzes distribution and quality of stored content
42
42
  - **SEMANTIC READINESS**: Shows which tables are ready for semantic search
@@ -45,13 +45,14 @@ def analyze_memory_patterns() -> ToolResponse:
45
45
  """
46
46
  try:
47
47
  from .. import server
48
+
48
49
  db = get_database(server.DB_PATH)
49
-
50
+
50
51
  # Get all tables
51
52
  tables_result = db.list_tables()
52
53
  if not tables_result.get("success"):
53
54
  return cast(ToolResponse, tables_result)
54
-
55
+
55
56
  tables = tables_result.get("tables", [])
56
57
  analysis = {
57
58
  "content_distribution": {},
@@ -60,34 +61,41 @@ def analyze_memory_patterns() -> ToolResponse:
60
61
  "schema_analysis": {},
61
62
  "recommendations": [],
62
63
  "total_tables": len(tables),
63
- "total_content_rows": 0
64
+ "total_content_rows": 0,
64
65
  }
65
-
66
+
66
67
  for table_name in tables:
67
68
  try:
68
69
  # Get basic table info
69
70
  rows_result = db.read_rows(table_name)
70
71
  if not rows_result.get("success"):
71
72
  continue
72
-
73
+
73
74
  rows = rows_result.get("rows", [])
74
75
  row_count = len(rows)
75
76
  analysis["content_distribution"][table_name] = row_count
76
77
  analysis["total_content_rows"] += row_count
77
-
78
+
78
79
  # Analyze schema
79
80
  schema_result = db.describe_table(table_name)
80
81
  if schema_result.get("success"):
81
82
  columns = schema_result.get("columns", [])
82
- text_columns = [col for col in columns if "TEXT" in col.get("type", "").upper()]
83
-
83
+ text_columns = [
84
+ col for col in columns if "TEXT" in col.get("type", "").upper()
85
+ ]
86
+
84
87
  analysis["schema_analysis"][table_name] = {
85
88
  "total_columns": len(columns),
86
89
  "text_columns": len(text_columns),
87
- "has_id_column": any(col.get("name") == "id" for col in columns),
88
- "has_timestamp": any("timestamp" in col.get("name", "").lower() for col in columns)
90
+ "has_id_column": any(
91
+ col.get("name") == "id" for col in columns
92
+ ),
93
+ "has_timestamp": any(
94
+ "timestamp" in col.get("name", "").lower()
95
+ for col in columns
96
+ ),
89
97
  }
90
-
98
+
91
99
  # Analyze text density
92
100
  if rows and text_columns:
93
101
  text_content_lengths = []
@@ -96,16 +104,18 @@ def analyze_memory_patterns() -> ToolResponse:
96
104
  content = row.get(col["name"], "")
97
105
  if content:
98
106
  text_content_lengths.append(len(str(content)))
99
-
107
+
100
108
  if text_content_lengths:
101
- avg_length = sum(text_content_lengths) / len(text_content_lengths)
109
+ avg_length = sum(text_content_lengths) / len(
110
+ text_content_lengths
111
+ )
102
112
  if avg_length > 500:
103
113
  analysis["text_density"]["high"].append(table_name)
104
114
  elif avg_length > 100:
105
115
  analysis["text_density"]["medium"].append(table_name)
106
116
  else:
107
117
  analysis["text_density"]["low"].append(table_name)
108
-
118
+
109
119
  # Check semantic readiness
110
120
  if is_semantic_search_available():
111
121
  embedding_stats = db.get_embedding_stats(table_name)
@@ -116,80 +126,109 @@ def analyze_memory_patterns() -> ToolResponse:
116
126
  elif coverage > 0:
117
127
  analysis["semantic_readiness"]["partial"].append(table_name)
118
128
  else:
119
- analysis["semantic_readiness"]["needs_setup"].append(table_name)
120
-
129
+ analysis["semantic_readiness"]["needs_setup"].append(
130
+ table_name
131
+ )
132
+
121
133
  except Exception as e:
122
134
  logging.warning(f"Error analyzing table {table_name}: {e}")
123
135
  continue
124
-
136
+
125
137
  # Generate recommendations
126
138
  recommendations = []
127
-
139
+
128
140
  # Semantic search recommendations
129
141
  if len(analysis["semantic_readiness"]["needs_setup"]) > 0:
130
- high_value_tables = [t for t in analysis["semantic_readiness"]["needs_setup"]
131
- if t in analysis["text_density"]["high"] + analysis["text_density"]["medium"]]
142
+ high_value_tables = [
143
+ t
144
+ for t in analysis["semantic_readiness"]["needs_setup"]
145
+ if t
146
+ in analysis["text_density"]["high"] + analysis["text_density"]["medium"]
147
+ ]
132
148
  if high_value_tables:
133
- recommendations.append(f"Consider setting up semantic search for high-value tables: {', '.join(high_value_tables[:3])}")
134
-
149
+ recommendations.append(
150
+ f"Consider setting up semantic search for high-value tables: {', '.join(high_value_tables[:3])}"
151
+ )
152
+
135
153
  # Content organization recommendations
136
- large_tables = [t for t, count in analysis["content_distribution"].items() if count > 50]
154
+ large_tables = [
155
+ t for t, count in analysis["content_distribution"].items() if count > 50
156
+ ]
137
157
  if large_tables:
138
- recommendations.append(f"Large tables detected: {', '.join(large_tables)}. Consider organizing with categories or tags.")
139
-
158
+ recommendations.append(
159
+ f"Large tables detected: {', '.join(large_tables)}. Consider organizing with categories or tags."
160
+ )
161
+
140
162
  # Empty or sparse tables
141
- sparse_tables = [t for t, count in analysis["content_distribution"].items() if count < 5 and count > 0]
163
+ sparse_tables = [
164
+ t
165
+ for t, count in analysis["content_distribution"].items()
166
+ if count < 5 and count > 0
167
+ ]
142
168
  if sparse_tables:
143
- recommendations.append(f"Sparse tables found: {', '.join(sparse_tables)}. Consider consolidating or adding more content.")
144
-
169
+ recommendations.append(
170
+ f"Sparse tables found: {', '.join(sparse_tables)}. Consider consolidating or adding more content."
171
+ )
172
+
145
173
  # Schema improvements
146
- tables_without_timestamps = [t for t, schema in analysis["schema_analysis"].items()
147
- if not schema.get("has_timestamp")]
174
+ tables_without_timestamps = [
175
+ t
176
+ for t, schema in analysis["schema_analysis"].items()
177
+ if not schema.get("has_timestamp")
178
+ ]
148
179
  if len(tables_without_timestamps) > 2:
149
- recommendations.append("Consider adding timestamp columns to track when content was created/modified.")
150
-
180
+ recommendations.append(
181
+ "Consider adding timestamp columns to track when content was created/modified."
182
+ )
183
+
151
184
  analysis["recommendations"] = recommendations
152
-
153
- return cast(ToolResponse, {
154
- "success": True,
155
- "analysis": analysis,
156
- "summary": {
157
- "tables_analyzed": len(tables),
158
- "total_rows": analysis["total_content_rows"],
159
- "semantic_ready": len(analysis["semantic_readiness"]["ready"]),
160
- "high_value_content": len(analysis["text_density"]["high"]),
161
- "recommendations_count": len(recommendations)
162
- }
163
- })
164
-
185
+
186
+ return cast(
187
+ ToolResponse,
188
+ {
189
+ "success": True,
190
+ "analysis": analysis,
191
+ "summary": {
192
+ "tables_analyzed": len(tables),
193
+ "total_rows": analysis["total_content_rows"],
194
+ "semantic_ready": len(analysis["semantic_readiness"]["ready"]),
195
+ "high_value_content": len(analysis["text_density"]["high"]),
196
+ "recommendations_count": len(recommendations),
197
+ },
198
+ },
199
+ )
200
+
165
201
  except Exception as e:
166
- return cast(ToolResponse, {
167
- "success": False,
168
- "error": f"Memory pattern analysis failed: {str(e)}",
169
- "category": "ANALYSIS",
170
- "details": {"exception": str(e)}
171
- })
202
+ return cast(
203
+ ToolResponse,
204
+ {
205
+ "success": False,
206
+ "error": f"Memory pattern analysis failed: {str(e)}",
207
+ "category": "ANALYSIS",
208
+ "details": {"exception": str(e)},
209
+ },
210
+ )
172
211
 
173
212
 
174
213
  @catch_errors
175
214
  def get_content_health_score() -> ToolResponse:
176
215
  """
177
216
  📊 **CONTENT HEALTH ASSESSMENT** - Rate the quality of your memory bank!
178
-
217
+
179
218
  Provides a comprehensive health score based on content quality, organization,
180
219
  semantic search readiness, and usage patterns.
181
-
220
+
182
221
  Returns:
183
222
  ToolResponse: On success: {"success": True, "health_score": float, "metrics": dict}
184
223
  On error: {"success": False, "error": str, "category": str, "details": dict}
185
-
224
+
186
225
  Examples:
187
226
  >>> get_content_health_score()
188
227
  {"success": True, "health_score": 8.5, "metrics": {
189
228
  "content_quality": 9.0, "organization": 7.5, "semantic_readiness": 8.0,
190
229
  "accessibility": 9.0, "recommendations": [...]
191
230
  }}
192
-
231
+
193
232
  FastMCP Tool Info:
194
233
  - **OVERALL SCORE**: Single metric (0-10) indicating memory bank health
195
234
  - **DETAILED METRICS**: Breakdown by quality, organization, and readiness
@@ -199,20 +238,24 @@ def get_content_health_score() -> ToolResponse:
199
238
  try:
200
239
  # Get the pattern analysis first - call database methods directly
201
240
  from .. import server
241
+
202
242
  db = get_database(server.DB_PATH)
203
-
243
+
204
244
  # Get all tables
205
245
  tables_result = db.list_tables()
206
246
  if not tables_result.get("success"):
207
- return cast(ToolResponse, {
208
- "success": False,
209
- "error": "Failed to get tables for health analysis",
210
- "category": "DATABASE",
211
- "details": tables_result
212
- })
213
-
247
+ return cast(
248
+ ToolResponse,
249
+ {
250
+ "success": False,
251
+ "error": "Failed to get tables for health analysis",
252
+ "category": "DATABASE",
253
+ "details": tables_result,
254
+ },
255
+ )
256
+
214
257
  tables = tables_result.get("tables", [])
215
-
258
+
216
259
  # Build basic analysis for health scoring
217
260
  analysis = {
218
261
  "content_distribution": {},
@@ -220,34 +263,41 @@ def get_content_health_score() -> ToolResponse:
220
263
  "semantic_readiness": {"ready": [], "partial": [], "needs_setup": []},
221
264
  "schema_analysis": {},
222
265
  "total_tables": len(tables),
223
- "total_content_rows": 0
266
+ "total_content_rows": 0,
224
267
  }
225
-
268
+
226
269
  for table_name in tables:
227
270
  try:
228
271
  # Get basic table info
229
272
  rows_result = db.read_rows(table_name)
230
273
  if not rows_result.get("success"):
231
274
  continue
232
-
275
+
233
276
  rows = rows_result.get("rows", [])
234
277
  row_count = len(rows)
235
278
  analysis["content_distribution"][table_name] = row_count
236
279
  analysis["total_content_rows"] += row_count
237
-
280
+
238
281
  # Analyze schema
239
282
  schema_result = db.describe_table(table_name)
240
283
  if schema_result.get("success"):
241
284
  columns = schema_result.get("columns", [])
242
- text_columns = [col for col in columns if "TEXT" in col.get("type", "").upper()]
243
-
285
+ text_columns = [
286
+ col for col in columns if "TEXT" in col.get("type", "").upper()
287
+ ]
288
+
244
289
  analysis["schema_analysis"][table_name] = {
245
290
  "total_columns": len(columns),
246
291
  "text_columns": len(text_columns),
247
- "has_id_column": any(col.get("name") == "id" for col in columns),
248
- "has_timestamp": any("timestamp" in col.get("name", "").lower() for col in columns)
292
+ "has_id_column": any(
293
+ col.get("name") == "id" for col in columns
294
+ ),
295
+ "has_timestamp": any(
296
+ "timestamp" in col.get("name", "").lower()
297
+ for col in columns
298
+ ),
249
299
  }
250
-
300
+
251
301
  # Analyze text density
252
302
  if rows and text_columns:
253
303
  text_content_lengths = []
@@ -256,16 +306,18 @@ def get_content_health_score() -> ToolResponse:
256
306
  content = row.get(col["name"], "")
257
307
  if content:
258
308
  text_content_lengths.append(len(str(content)))
259
-
309
+
260
310
  if text_content_lengths:
261
- avg_length = sum(text_content_lengths) / len(text_content_lengths)
311
+ avg_length = sum(text_content_lengths) / len(
312
+ text_content_lengths
313
+ )
262
314
  if avg_length > 500:
263
315
  analysis["text_density"]["high"].append(table_name)
264
316
  elif avg_length > 100:
265
317
  analysis["text_density"]["medium"].append(table_name)
266
318
  else:
267
319
  analysis["text_density"]["low"].append(table_name)
268
-
320
+
269
321
  # Check semantic readiness
270
322
  if is_semantic_search_available():
271
323
  embedding_stats = db.get_embedding_stats(table_name)
@@ -276,27 +328,29 @@ def get_content_health_score() -> ToolResponse:
276
328
  elif coverage > 0:
277
329
  analysis["semantic_readiness"]["partial"].append(table_name)
278
330
  else:
279
- analysis["semantic_readiness"]["needs_setup"].append(table_name)
280
-
331
+ analysis["semantic_readiness"]["needs_setup"].append(
332
+ table_name
333
+ )
334
+
281
335
  except Exception as e:
282
336
  logging.warning(f"Error analyzing table {table_name}: {e}")
283
337
  continue
284
-
338
+
285
339
  summary = {
286
340
  "tables_analyzed": len(tables),
287
341
  "total_rows": analysis["total_content_rows"],
288
342
  "semantic_ready": len(analysis["semantic_readiness"]["ready"]),
289
- "high_value_content": len(analysis["text_density"]["high"])
343
+ "high_value_content": len(analysis["text_density"]["high"]),
290
344
  }
291
-
345
+
292
346
  # Calculate health metrics (0-10 scale)
293
347
  metrics = {}
294
-
348
+
295
349
  # 1. Content Quality Score (based on text density and volume)
296
350
  total_rows = summary.get("total_rows", 0)
297
351
  high_quality_tables = len(analysis.get("text_density", {}).get("high", []))
298
352
  total_tables = summary.get("tables_analyzed", 1)
299
-
353
+
300
354
  if total_rows == 0:
301
355
  metrics["content_volume"] = 0.0
302
356
  elif total_rows < 10:
@@ -307,13 +361,15 @@ def get_content_health_score() -> ToolResponse:
307
361
  metrics["content_volume"] = 8.0
308
362
  else:
309
363
  metrics["content_volume"] = 10.0
310
-
311
- metrics["content_quality"] = min(10.0, (high_quality_tables / total_tables) * 10 + 3)
312
-
364
+
365
+ metrics["content_quality"] = min(
366
+ 10.0, (high_quality_tables / total_tables) * 10 + 3
367
+ )
368
+
313
369
  # 2. Organization Score (based on schema quality)
314
370
  schema_analysis = analysis.get("schema_analysis", {})
315
371
  organization_factors = []
316
-
372
+
317
373
  for table_name, schema in schema_analysis.items():
318
374
  table_score = 0
319
375
  if schema.get("has_id_column"):
@@ -325,63 +381,87 @@ def get_content_health_score() -> ToolResponse:
325
381
  if 2 <= schema.get("total_columns", 0) <= 10: # Good column count
326
382
  table_score += 3
327
383
  organization_factors.append(table_score)
328
-
329
- metrics["organization"] = (sum(organization_factors) / len(organization_factors)) if organization_factors else 5.0
330
-
384
+
385
+ metrics["organization"] = (
386
+ (sum(organization_factors) / len(organization_factors))
387
+ if organization_factors
388
+ else 5.0
389
+ )
390
+
331
391
  # 3. Semantic Readiness Score
332
392
  semantic_ready = len(analysis.get("semantic_readiness", {}).get("ready", []))
333
- semantic_partial = len(analysis.get("semantic_readiness", {}).get("partial", []))
334
- semantic_needed = len(analysis.get("semantic_readiness", {}).get("needs_setup", []))
335
-
393
+ semantic_partial = len(
394
+ analysis.get("semantic_readiness", {}).get("partial", [])
395
+ )
396
+ semantic_needed = len(
397
+ analysis.get("semantic_readiness", {}).get("needs_setup", [])
398
+ )
399
+
336
400
  if not is_semantic_search_available():
337
401
  metrics["semantic_readiness"] = 5.0 # Neutral score if not available
338
402
  metrics["semantic_note"] = "Semantic search dependencies not available"
339
403
  else:
340
- semantic_score = ((semantic_ready * 2 + semantic_partial) / (total_tables * 2)) * 10
404
+ semantic_score = (
405
+ (semantic_ready * 2 + semantic_partial) / (total_tables * 2)
406
+ ) * 10
341
407
  metrics["semantic_readiness"] = min(10.0, semantic_score)
342
-
408
+
343
409
  # 4. Accessibility Score (how easy it is to find and use content)
344
410
  medium_density = len(analysis.get("text_density", {}).get("medium", []))
345
411
  low_density = len(analysis.get("text_density", {}).get("low", []))
346
-
412
+
347
413
  # Prefer medium density (not too verbose, not too sparse)
348
414
  if total_tables == 0:
349
415
  metrics["accessibility"] = 5.0
350
416
  else:
351
- accessibility_score = ((high_quality_tables + medium_density * 1.5) / total_tables) * 8 + 2
417
+ accessibility_score = (
418
+ (high_quality_tables + medium_density * 1.5) / total_tables
419
+ ) * 8 + 2
352
420
  metrics["accessibility"] = min(10.0, accessibility_score)
353
-
421
+
354
422
  # 5. Overall Health Score (weighted average)
355
423
  weights = {
356
424
  "content_volume": 0.2,
357
425
  "content_quality": 0.3,
358
426
  "organization": 0.2,
359
427
  "semantic_readiness": 0.15,
360
- "accessibility": 0.15
428
+ "accessibility": 0.15,
361
429
  }
362
-
430
+
363
431
  health_score = sum(metrics[key] * weights[key] for key in weights.keys())
364
-
432
+
365
433
  # Generate health-specific recommendations
366
434
  health_recommendations = []
367
-
435
+
368
436
  if metrics["content_volume"] < 5:
369
- health_recommendations.append("🔴 LOW CONTENT: Add more valuable content to your memory bank")
437
+ health_recommendations.append(
438
+ "🔴 LOW CONTENT: Add more valuable content to your memory bank"
439
+ )
370
440
  elif metrics["content_volume"] < 7:
371
- health_recommendations.append("🟡 MODERATE CONTENT: Consider expanding your knowledge base")
372
-
441
+ health_recommendations.append(
442
+ "🟡 MODERATE CONTENT: Consider expanding your knowledge base"
443
+ )
444
+
373
445
  if metrics["content_quality"] < 6:
374
- health_recommendations.append("🔴 CONTENT QUALITY: Focus on adding more detailed, rich content")
375
-
446
+ health_recommendations.append(
447
+ "🔴 CONTENT QUALITY: Focus on adding more detailed, rich content"
448
+ )
449
+
376
450
  if metrics["organization"] < 6:
377
- health_recommendations.append("🔴 ORGANIZATION: Improve table schemas with timestamps and proper columns")
378
-
451
+ health_recommendations.append(
452
+ "🔴 ORGANIZATION: Improve table schemas with timestamps and proper columns"
453
+ )
454
+
379
455
  if metrics["semantic_readiness"] < 5 and is_semantic_search_available():
380
- health_recommendations.append("🟡 SEMANTIC SEARCH: Set up embeddings for better content discovery")
381
-
456
+ health_recommendations.append(
457
+ "🟡 SEMANTIC SEARCH: Set up embeddings for better content discovery"
458
+ )
459
+
382
460
  if metrics["accessibility"] < 6:
383
- health_recommendations.append("🔴 ACCESSIBILITY: Improve content structure for easier discovery")
384
-
461
+ health_recommendations.append(
462
+ "🔴 ACCESSIBILITY: Improve content structure for easier discovery"
463
+ )
464
+
385
465
  # Health grade
386
466
  if health_score >= 9:
387
467
  grade = "A+ (Excellent)"
@@ -395,25 +475,31 @@ def get_content_health_score() -> ToolResponse:
395
475
  grade = "C (Needs Improvement)"
396
476
  else:
397
477
  grade = "D (Poor - Needs Attention)"
398
-
399
- return cast(ToolResponse, {
400
- "success": True,
401
- "health_score": round(health_score, 1),
402
- "grade": grade,
403
- "metrics": {k: round(v, 1) for k, v in metrics.items()},
404
- "recommendations": health_recommendations,
405
- "detailed_analysis": analysis,
406
- "improvement_priority": {
407
- "highest": [k for k, v in metrics.items() if v < 5],
408
- "medium": [k for k, v in metrics.items() if 5 <= v < 7],
409
- "good": [k for k, v in metrics.items() if v >= 7]
410
- }
411
- })
412
-
478
+
479
+ return cast(
480
+ ToolResponse,
481
+ {
482
+ "success": True,
483
+ "health_score": round(health_score, 1),
484
+ "grade": grade,
485
+ "metrics": {k: round(v, 1) for k, v in metrics.items()},
486
+ "recommendations": health_recommendations,
487
+ "detailed_analysis": analysis,
488
+ "improvement_priority": {
489
+ "highest": [k for k, v in metrics.items() if v < 5],
490
+ "medium": [k for k, v in metrics.items() if 5 <= v < 7],
491
+ "good": [k for k, v in metrics.items() if v >= 7],
492
+ },
493
+ },
494
+ )
495
+
413
496
  except Exception as e:
414
- return cast(ToolResponse, {
415
- "success": False,
416
- "error": f"Content health assessment failed: {str(e)}",
417
- "category": "ANALYSIS",
418
- "details": {"exception": str(e)}
419
- })
497
+ return cast(
498
+ ToolResponse,
499
+ {
500
+ "success": False,
501
+ "error": f"Content health assessment failed: {str(e)}",
502
+ "category": "ANALYSIS",
503
+ "details": {"exception": str(e)},
504
+ },
505
+ )