superlocalmemory 2.8.2 → 2.8.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/ATTRIBUTION.md +1 -1
  2. package/CHANGELOG.md +17 -0
  3. package/README.md +7 -5
  4. package/api_server.py +5 -0
  5. package/bin/slm +35 -0
  6. package/bin/slm.bat +3 -3
  7. package/docs/SECURITY-QUICK-REFERENCE.md +214 -0
  8. package/install.ps1 +11 -11
  9. package/mcp_server.py +78 -10
  10. package/package.json +2 -2
  11. package/requirements-core.txt +16 -18
  12. package/requirements-learning.txt +8 -8
  13. package/requirements.txt +9 -7
  14. package/scripts/prepack.js +33 -0
  15. package/scripts/verify-v27.ps1 +301 -0
  16. package/src/agent_registry.py +32 -28
  17. package/src/auto_backup.py +12 -6
  18. package/src/cache_manager.py +2 -2
  19. package/src/compression/__init__.py +25 -0
  20. package/src/compression/cli.py +150 -0
  21. package/src/compression/cold_storage.py +217 -0
  22. package/src/compression/config.py +72 -0
  23. package/src/compression/orchestrator.py +133 -0
  24. package/src/compression/tier2_compressor.py +228 -0
  25. package/src/compression/tier3_compressor.py +153 -0
  26. package/src/compression/tier_classifier.py +148 -0
  27. package/src/db_connection_manager.py +5 -5
  28. package/src/event_bus.py +24 -22
  29. package/src/hnsw_index.py +3 -3
  30. package/src/learning/__init__.py +5 -4
  31. package/src/learning/adaptive_ranker.py +14 -265
  32. package/src/learning/bootstrap/__init__.py +69 -0
  33. package/src/learning/bootstrap/constants.py +93 -0
  34. package/src/learning/bootstrap/db_queries.py +316 -0
  35. package/src/learning/bootstrap/sampling.py +82 -0
  36. package/src/learning/bootstrap/text_utils.py +71 -0
  37. package/src/learning/cross_project_aggregator.py +58 -57
  38. package/src/learning/db/__init__.py +40 -0
  39. package/src/learning/db/constants.py +44 -0
  40. package/src/learning/db/schema.py +279 -0
  41. package/src/learning/learning_db.py +15 -234
  42. package/src/learning/ranking/__init__.py +33 -0
  43. package/src/learning/ranking/constants.py +84 -0
  44. package/src/learning/ranking/helpers.py +278 -0
  45. package/src/learning/source_quality_scorer.py +66 -65
  46. package/src/learning/synthetic_bootstrap.py +28 -310
  47. package/src/memory/__init__.py +36 -0
  48. package/src/memory/cli.py +205 -0
  49. package/src/memory/constants.py +39 -0
  50. package/src/memory/helpers.py +28 -0
  51. package/src/memory/schema.py +166 -0
  52. package/src/memory-profiles.py +94 -86
  53. package/src/memory-reset.py +187 -185
  54. package/src/memory_compression.py +2 -2
  55. package/src/memory_store_v2.py +44 -354
  56. package/src/migrate_v1_to_v2.py +11 -10
  57. package/src/patterns/analyzers.py +104 -100
  58. package/src/patterns/learner.py +17 -13
  59. package/src/patterns/scoring.py +25 -21
  60. package/src/patterns/store.py +40 -38
  61. package/src/patterns/terminology.py +53 -51
  62. package/src/provenance_tracker.py +2 -2
  63. package/src/qualixar_attribution.py +1 -1
  64. package/src/search/engine.py +16 -14
  65. package/src/search/index_loader.py +13 -11
  66. package/src/setup_validator.py +160 -158
  67. package/src/subscription_manager.py +20 -18
  68. package/src/tree/builder.py +66 -64
  69. package/src/tree/nodes.py +103 -97
  70. package/src/tree/queries.py +142 -137
  71. package/src/tree/schema.py +46 -42
  72. package/src/webhook_dispatcher.py +3 -3
  73. package/ui_server.py +7 -4
@@ -41,59 +41,61 @@ class FrequencyAnalyzer:
41
41
  patterns = {}
42
42
 
43
43
  conn = sqlite3.connect(self.db_path)
44
- cursor = conn.cursor()
45
-
46
- for category, keywords in self.tech_categories.items():
47
- keyword_counts = Counter()
48
- evidence_memories = {} # {keyword: [memory_ids]}
49
-
50
- for memory_id in memory_ids:
51
- cursor.execute('SELECT content FROM memories WHERE id = ?', (memory_id,))
52
- row = cursor.fetchone()
53
-
54
- if not row:
55
- continue
56
-
57
- content = row[0].lower()
58
-
59
- for keyword in keywords:
60
- # Count occurrences with word boundaries
61
- pattern = r'\b' + re.escape(keyword.replace('.', r'\.')) + r'\b'
62
- matches = re.findall(pattern, content, re.IGNORECASE)
63
- count = len(matches)
64
-
65
- if count > 0:
66
- keyword_counts[keyword] += count
67
-
68
- if keyword not in evidence_memories:
69
- evidence_memories[keyword] = []
70
- evidence_memories[keyword].append(memory_id)
71
-
72
- # Determine preference (most mentioned)
73
- if keyword_counts:
74
- top_keyword = keyword_counts.most_common(1)[0][0]
75
- total_mentions = sum(keyword_counts.values())
76
- top_count = keyword_counts[top_keyword]
77
-
78
- # Calculate confidence (% of mentions)
79
- confidence = top_count / total_mentions if total_mentions > 0 else 0
80
-
81
- # Only create pattern if confidence > 0.6 and at least 3 mentions
82
- if confidence > 0.6 and top_count >= 3:
83
- value = self._format_preference(top_keyword, keyword_counts)
84
- evidence_list = list(set(evidence_memories.get(top_keyword, [])))
85
-
86
- patterns[category] = {
87
- 'pattern_type': 'preference',
88
- 'key': category,
89
- 'value': value,
90
- 'confidence': round(confidence, 2),
91
- 'evidence_count': len(evidence_list),
92
- 'memory_ids': evidence_list,
93
- 'category': self._categorize_pattern(category)
94
- }
95
-
96
- conn.close()
44
+ try:
45
+ cursor = conn.cursor()
46
+
47
+ for category, keywords in self.tech_categories.items():
48
+ keyword_counts = Counter()
49
+ evidence_memories = {} # {keyword: [memory_ids]}
50
+
51
+ for memory_id in memory_ids:
52
+ cursor.execute('SELECT content FROM memories WHERE id = ?', (memory_id,))
53
+ row = cursor.fetchone()
54
+
55
+ if not row:
56
+ continue
57
+
58
+ content = row[0].lower()
59
+
60
+ for keyword in keywords:
61
+ # Count occurrences with word boundaries
62
+ pattern = r'\b' + re.escape(keyword.replace('.', r'\.')) + r'\b'
63
+ matches = re.findall(pattern, content, re.IGNORECASE)
64
+ count = len(matches)
65
+
66
+ if count > 0:
67
+ keyword_counts[keyword] += count
68
+
69
+ if keyword not in evidence_memories:
70
+ evidence_memories[keyword] = []
71
+ evidence_memories[keyword].append(memory_id)
72
+
73
+ # Determine preference (most mentioned)
74
+ if keyword_counts:
75
+ top_keyword = keyword_counts.most_common(1)[0][0]
76
+ total_mentions = sum(keyword_counts.values())
77
+ top_count = keyword_counts[top_keyword]
78
+
79
+ # Calculate confidence (% of mentions)
80
+ confidence = top_count / total_mentions if total_mentions > 0 else 0
81
+
82
+ # Only create pattern if confidence > 0.6 and at least 3 mentions
83
+ if confidence > 0.6 and top_count >= 3:
84
+ value = self._format_preference(top_keyword, keyword_counts)
85
+ evidence_list = list(set(evidence_memories.get(top_keyword, [])))
86
+
87
+ patterns[category] = {
88
+ 'pattern_type': 'preference',
89
+ 'key': category,
90
+ 'value': value,
91
+ 'confidence': round(confidence, 2),
92
+ 'evidence_count': len(evidence_list),
93
+ 'memory_ids': evidence_list,
94
+ 'category': self._categorize_pattern(category)
95
+ }
96
+
97
+ finally:
98
+ conn.close()
97
99
  return patterns
98
100
 
99
101
  def _format_preference(self, top_keyword: str, all_counts: Counter) -> str:
@@ -171,53 +173,55 @@ class ContextAnalyzer:
171
173
  patterns = {}
172
174
 
173
175
  conn = sqlite3.connect(self.db_path)
174
- cursor = conn.cursor()
175
-
176
- for pattern_key, indicators in self.style_indicators.items():
177
- indicator_counts = Counter()
178
- evidence_memories = {} # {style_type: [memory_ids]}
179
-
180
- for memory_id in memory_ids:
181
- cursor.execute('SELECT content FROM memories WHERE id = ?', (memory_id,))
182
- row = cursor.fetchone()
183
-
184
- if not row:
185
- continue
186
-
187
- content = row[0].lower()
188
-
189
- for style_type, keywords in indicators.items():
190
- for keyword in keywords:
191
- if keyword in content:
192
- indicator_counts[style_type] += 1
193
-
194
- if style_type not in evidence_memories:
195
- evidence_memories[style_type] = []
196
- evidence_memories[style_type].append(memory_id)
197
-
198
- # Determine dominant style
199
- if indicator_counts:
200
- top_style = indicator_counts.most_common(1)[0][0]
201
- total = sum(indicator_counts.values())
202
- top_count = indicator_counts[top_style]
203
- confidence = top_count / total if total > 0 else 0
204
-
205
- # Only create pattern if confidence > 0.65 and at least 3 mentions
206
- if confidence > 0.65 and top_count >= 3:
207
- value = self._format_style_value(pattern_key, top_style, indicator_counts)
208
- evidence_list = list(set(evidence_memories.get(top_style, [])))
209
-
210
- patterns[pattern_key] = {
211
- 'pattern_type': 'style',
212
- 'key': pattern_key,
213
- 'value': value,
214
- 'confidence': round(confidence, 2),
215
- 'evidence_count': len(evidence_list),
216
- 'memory_ids': evidence_list,
217
- 'category': 'general'
218
- }
219
-
220
- conn.close()
176
+ try:
177
+ cursor = conn.cursor()
178
+
179
+ for pattern_key, indicators in self.style_indicators.items():
180
+ indicator_counts = Counter()
181
+ evidence_memories = {} # {style_type: [memory_ids]}
182
+
183
+ for memory_id in memory_ids:
184
+ cursor.execute('SELECT content FROM memories WHERE id = ?', (memory_id,))
185
+ row = cursor.fetchone()
186
+
187
+ if not row:
188
+ continue
189
+
190
+ content = row[0].lower()
191
+
192
+ for style_type, keywords in indicators.items():
193
+ for keyword in keywords:
194
+ if keyword in content:
195
+ indicator_counts[style_type] += 1
196
+
197
+ if style_type not in evidence_memories:
198
+ evidence_memories[style_type] = []
199
+ evidence_memories[style_type].append(memory_id)
200
+
201
+ # Determine dominant style
202
+ if indicator_counts:
203
+ top_style = indicator_counts.most_common(1)[0][0]
204
+ total = sum(indicator_counts.values())
205
+ top_count = indicator_counts[top_style]
206
+ confidence = top_count / total if total > 0 else 0
207
+
208
+ # Only create pattern if confidence > 0.65 and at least 3 mentions
209
+ if confidence > 0.65 and top_count >= 3:
210
+ value = self._format_style_value(pattern_key, top_style, indicator_counts)
211
+ evidence_list = list(set(evidence_memories.get(top_style, [])))
212
+
213
+ patterns[pattern_key] = {
214
+ 'pattern_type': 'style',
215
+ 'key': pattern_key,
216
+ 'value': value,
217
+ 'confidence': round(confidence, 2),
218
+ 'evidence_count': len(evidence_list),
219
+ 'memory_ids': evidence_list,
220
+ 'category': 'general'
221
+ }
222
+
223
+ finally:
224
+ conn.close()
221
225
  return patterns
222
226
 
223
227
  def _format_style_value(self, pattern_key: str, top_style: str, all_counts: Counter) -> str:
@@ -66,12 +66,14 @@ class PatternLearner:
66
66
 
67
67
  # Get memory IDs for active profile only
68
68
  conn = sqlite3.connect(self.db_path)
69
- cursor = conn.cursor()
70
- cursor.execute('SELECT id FROM memories WHERE profile = ? ORDER BY created_at',
71
- (active_profile,))
72
- all_memory_ids = [row[0] for row in cursor.fetchall()]
73
- total_memories = len(all_memory_ids)
74
- conn.close()
69
+ try:
70
+ cursor = conn.cursor()
71
+ cursor.execute('SELECT id FROM memories WHERE profile = ? ORDER BY created_at',
72
+ (active_profile,))
73
+ all_memory_ids = [row[0] for row in cursor.fetchall()]
74
+ total_memories = len(all_memory_ids)
75
+ finally:
76
+ conn.close()
75
77
 
76
78
  if total_memories == 0:
77
79
  print(f"No memories found for profile '{active_profile}'. Add memories first.")
@@ -142,16 +144,18 @@ class PatternLearner:
142
144
  """Incremental update when new memory is added."""
143
145
  active_profile = self._get_active_profile()
144
146
  conn = sqlite3.connect(self.db_path)
145
- cursor = conn.cursor()
146
- cursor.execute('SELECT COUNT(*) FROM memories WHERE profile = ?',
147
- (active_profile,))
148
- total = cursor.fetchone()[0]
149
- conn.close()
147
+ try:
148
+ cursor = conn.cursor()
149
+ cursor.execute('SELECT COUNT(*) FROM memories WHERE profile = ?',
150
+ (active_profile,))
151
+ total = cursor.fetchone()[0]
152
+ finally:
153
+ conn.close()
150
154
 
151
155
  # Only do incremental updates if we have many memories (>50)
152
156
  if total > 50:
153
- # TODO: Implement true incremental update
154
- print(f"New memory #{memory_id} added. Run weekly_pattern_update() to update patterns.")
157
+ # Deferred to batch update for efficiency (see weekly_pattern_update)
158
+ pass
155
159
  else:
156
160
  # For small memory counts, just do full update
157
161
  self.weekly_pattern_update()
@@ -83,18 +83,20 @@ class ConfidenceScorer:
83
83
  def _calculate_recency_bonus(self, memory_ids: List[int]) -> float:
84
84
  """Give bonus to patterns with recent evidence."""
85
85
  conn = sqlite3.connect(self.db_path)
86
- cursor = conn.cursor()
86
+ try:
87
+ cursor = conn.cursor()
87
88
 
88
- # Get timestamps
89
- placeholders = ','.join('?' * len(memory_ids))
90
- cursor.execute(f'''
91
- SELECT created_at FROM memories
92
- WHERE id IN ({placeholders})
93
- ORDER BY created_at DESC
94
- ''', memory_ids)
89
+ # Get timestamps
90
+ placeholders = ','.join('?' * len(memory_ids))
91
+ cursor.execute(f'''
92
+ SELECT created_at FROM memories
93
+ WHERE id IN ({placeholders})
94
+ ORDER BY created_at DESC
95
+ ''', memory_ids)
95
96
 
96
- timestamps = cursor.fetchall()
97
- conn.close()
97
+ timestamps = cursor.fetchall()
98
+ finally:
99
+ conn.close()
98
100
 
99
101
  if not timestamps:
100
102
  return 1.0
@@ -124,17 +126,19 @@ class ConfidenceScorer:
124
126
  return 0.8 # Penalize low sample size
125
127
 
126
128
  conn = sqlite3.connect(self.db_path)
127
- cursor = conn.cursor()
128
-
129
- placeholders = ','.join('?' * len(memory_ids))
130
- cursor.execute(f'''
131
- SELECT created_at FROM memories
132
- WHERE id IN ({placeholders})
133
- ORDER BY created_at
134
- ''', memory_ids)
135
-
136
- timestamps = [row[0] for row in cursor.fetchall()]
137
- conn.close()
129
+ try:
130
+ cursor = conn.cursor()
131
+
132
+ placeholders = ','.join('?' * len(memory_ids))
133
+ cursor.execute(f'''
134
+ SELECT created_at FROM memories
135
+ WHERE id IN ({placeholders})
136
+ ORDER BY created_at
137
+ ''', memory_ids)
138
+
139
+ timestamps = [row[0] for row in cursor.fetchall()]
140
+ finally:
141
+ conn.close()
138
142
 
139
143
  if len(timestamps) < 2:
140
144
  return 0.8
@@ -181,43 +181,45 @@ class PatternStore:
181
181
  profile: Optional[str] = None) -> List[Dict[str, Any]]:
182
182
  """Get patterns above confidence threshold, optionally filtered by profile."""
183
183
  conn = sqlite3.connect(self.db_path)
184
- cursor = conn.cursor()
185
-
186
- # Build query with optional filters
187
- conditions = ['confidence >= ?']
188
- params = [min_confidence]
189
-
190
- if pattern_type:
191
- conditions.append('pattern_type = ?')
192
- params.append(pattern_type)
193
-
194
- if profile:
195
- conditions.append('profile = ?')
196
- params.append(profile)
197
-
198
- where_clause = ' AND '.join(conditions)
199
- cursor.execute(f'''
200
- SELECT id, pattern_type, key, value, confidence, evidence_count,
201
- updated_at, created_at, category
202
- FROM identity_patterns
203
- WHERE {where_clause}
204
- ORDER BY confidence DESC, evidence_count DESC
205
- ''', params)
206
-
207
- patterns = []
208
- for row in cursor.fetchall():
209
- patterns.append({
210
- 'id': row[0],
211
- 'pattern_type': row[1],
212
- 'key': row[2],
213
- 'value': row[3],
214
- 'confidence': row[4],
215
- 'evidence_count': row[5],
216
- 'frequency': row[5],
217
- 'last_seen': row[6],
218
- 'created_at': row[7],
219
- 'category': row[8]
220
- })
184
+ try:
185
+ cursor = conn.cursor()
186
+
187
+ # Build query with optional filters
188
+ conditions = ['confidence >= ?']
189
+ params = [min_confidence]
190
+
191
+ if pattern_type:
192
+ conditions.append('pattern_type = ?')
193
+ params.append(pattern_type)
194
+
195
+ if profile:
196
+ conditions.append('profile = ?')
197
+ params.append(profile)
198
+
199
+ where_clause = ' AND '.join(conditions)
200
+ cursor.execute(f'''
201
+ SELECT id, pattern_type, key, value, confidence, evidence_count,
202
+ updated_at, created_at, category
203
+ FROM identity_patterns
204
+ WHERE {where_clause}
205
+ ORDER BY confidence DESC, evidence_count DESC
206
+ ''', params)
207
+
208
+ patterns = []
209
+ for row in cursor.fetchall():
210
+ patterns.append({
211
+ 'id': row[0],
212
+ 'pattern_type': row[1],
213
+ 'key': row[2],
214
+ 'value': row[3],
215
+ 'confidence': row[4],
216
+ 'evidence_count': row[5],
217
+ 'frequency': row[5],
218
+ 'last_seen': row[6],
219
+ 'created_at': row[7],
220
+ 'category': row[8]
221
+ })
221
222
 
222
- conn.close()
223
+ finally:
224
+ conn.close()
223
225
  return patterns
@@ -36,57 +36,59 @@ class TerminologyLearner:
36
36
  patterns = {}
37
37
 
38
38
  conn = sqlite3.connect(self.db_path)
39
- cursor = conn.cursor()
40
-
41
- for term in self.ambiguous_terms:
42
- contexts = []
43
-
44
- # Find all contexts where term appears
45
- for memory_id in memory_ids:
46
- cursor.execute('SELECT content FROM memories WHERE id = ?', (memory_id,))
47
- row = cursor.fetchone()
48
-
49
- if not row:
50
- continue
51
-
52
- content = row[0]
53
-
54
- # Find term in content (case-insensitive)
55
- pattern = r'\b' + re.escape(term) + r'\b'
56
- for match in re.finditer(pattern, content, re.IGNORECASE):
57
- term_idx = match.start()
58
-
59
- # Extract 100-char window around term
60
- start = max(0, term_idx - 100)
61
- end = min(len(content), term_idx + len(term) + 100)
62
- context_window = content[start:end]
63
-
64
- contexts.append({
65
- 'memory_id': memory_id,
66
- 'context': context_window
67
- })
68
-
69
- # Analyze contexts to extract meaning (need at least 3 examples)
70
- if len(contexts) >= 3:
71
- definition = self._extract_definition(term, contexts)
72
-
73
- if definition:
74
- evidence_list = list(set([ctx['memory_id'] for ctx in contexts]))
75
-
76
- # Confidence increases with more examples, capped at 0.95
77
- confidence = min(0.95, 0.6 + (len(contexts) * 0.05))
78
-
79
- patterns[term] = {
80
- 'pattern_type': 'terminology',
81
- 'key': term,
82
- 'value': definition,
83
- 'confidence': round(confidence, 2),
84
- 'evidence_count': len(evidence_list),
85
- 'memory_ids': evidence_list,
86
- 'category': 'general'
87
- }
88
-
89
- conn.close()
39
+ try:
40
+ cursor = conn.cursor()
41
+
42
+ for term in self.ambiguous_terms:
43
+ contexts = []
44
+
45
+ # Find all contexts where term appears
46
+ for memory_id in memory_ids:
47
+ cursor.execute('SELECT content FROM memories WHERE id = ?', (memory_id,))
48
+ row = cursor.fetchone()
49
+
50
+ if not row:
51
+ continue
52
+
53
+ content = row[0]
54
+
55
+ # Find term in content (case-insensitive)
56
+ pattern = r'\b' + re.escape(term) + r'\b'
57
+ for match in re.finditer(pattern, content, re.IGNORECASE):
58
+ term_idx = match.start()
59
+
60
+ # Extract 100-char window around term
61
+ start = max(0, term_idx - 100)
62
+ end = min(len(content), term_idx + len(term) + 100)
63
+ context_window = content[start:end]
64
+
65
+ contexts.append({
66
+ 'memory_id': memory_id,
67
+ 'context': context_window
68
+ })
69
+
70
+ # Analyze contexts to extract meaning (need at least 3 examples)
71
+ if len(contexts) >= 3:
72
+ definition = self._extract_definition(term, contexts)
73
+
74
+ if definition:
75
+ evidence_list = list(set([ctx['memory_id'] for ctx in contexts]))
76
+
77
+ # Confidence increases with more examples, capped at 0.95
78
+ confidence = min(0.95, 0.6 + (len(contexts) * 0.05))
79
+
80
+ patterns[term] = {
81
+ 'pattern_type': 'terminology',
82
+ 'key': term,
83
+ 'value': definition,
84
+ 'confidence': round(confidence, 2),
85
+ 'evidence_count': len(evidence_list),
86
+ 'memory_ids': evidence_list,
87
+ 'category': 'general'
88
+ }
89
+
90
+ finally:
91
+ conn.close()
90
92
  return patterns
91
93
 
92
94
  def _extract_definition(self, term: str, contexts: List[Dict]) -> Optional[str]:
@@ -53,7 +53,7 @@ class ProvenanceTracker:
53
53
  return cls._instances[key]
54
54
 
55
55
  @classmethod
56
- def reset_instance(cls, db_path: Optional[Path] = None):
56
+ def reset_instance(cls, db_path: Optional[Path] = None) -> None:
57
57
  """Remove singleton. Used for testing."""
58
58
  with cls._instances_lock:
59
59
  if db_path is None:
@@ -154,7 +154,7 @@ class ProvenanceTracker:
154
154
  source_protocol: str = "cli",
155
155
  trust_score: float = 1.0,
156
156
  derived_from: Optional[int] = None,
157
- ):
157
+ ) -> None:
158
158
  """
159
159
  Record provenance metadata for a memory.
160
160
 
@@ -45,7 +45,7 @@ class QualixarSigner:
45
45
 
46
46
  Example::
47
47
 
48
- signer = QualixarSigner("superlocalmemory", "2.8.1")
48
+ signer = QualixarSigner("superlocalmemory", "2.8.3")
49
49
  signed = signer.sign({"memories": [...]})
50
50
  assert QualixarSigner.verify(signed) is True
51
51
  """
@@ -172,20 +172,22 @@ class HybridSearchEngine(IndexLoaderMixin, SearchMethodsMixin, FusionMixin):
172
172
  id_to_score = {mem_id: score for mem_id, score in raw_results}
173
173
 
174
174
  conn = sqlite3.connect(self.db_path)
175
- cursor = conn.cursor()
176
-
177
- # Fetch memories
178
- placeholders = ','.join(['?'] * len(memory_ids))
179
- cursor.execute(f'''
180
- SELECT id, content, summary, project_path, project_name, tags,
181
- category, parent_id, tree_path, depth, memory_type,
182
- importance, created_at, cluster_id, last_accessed, access_count
183
- FROM memories
184
- WHERE id IN ({placeholders})
185
- ''', memory_ids)
186
-
187
- rows = cursor.fetchall()
188
- conn.close()
175
+ try:
176
+ cursor = conn.cursor()
177
+
178
+ # Fetch memories
179
+ placeholders = ','.join(['?'] * len(memory_ids))
180
+ cursor.execute(f'''
181
+ SELECT id, content, summary, project_path, project_name, tags,
182
+ category, parent_id, tree_path, depth, memory_type,
183
+ importance, created_at, cluster_id, last_accessed, access_count
184
+ FROM memories
185
+ WHERE id IN ({placeholders})
186
+ ''', memory_ids)
187
+
188
+ rows = cursor.fetchall()
189
+ finally:
190
+ conn.close()
189
191
 
190
192
  # Build result dictionaries
191
193
  results = []
@@ -31,17 +31,19 @@ class IndexLoaderMixin:
31
31
  Load documents from database and build search indexes.
32
32
  """
33
33
  conn = sqlite3.connect(self.db_path)
34
- cursor = conn.cursor()
35
-
36
- # Fetch all memories
37
- cursor.execute('''
38
- SELECT id, content, summary, tags
39
- FROM memories
40
- ORDER BY id
41
- ''')
42
-
43
- rows = cursor.fetchall()
44
- conn.close()
34
+ try:
35
+ cursor = conn.cursor()
36
+
37
+ # Fetch all memories
38
+ cursor.execute('''
39
+ SELECT id, content, summary, tags
40
+ FROM memories
41
+ ORDER BY id
42
+ ''')
43
+
44
+ rows = cursor.fetchall()
45
+ finally:
46
+ conn.close()
45
47
 
46
48
  if not rows:
47
49
  return