basic-memory 0.13.7.dev1__py3-none-any.whl → 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (30) hide show
  1. basic_memory/__init__.py +1 -1
  2. basic_memory/api/routers/utils.py +1 -1
  3. basic_memory/cli/commands/db.py +8 -1
  4. basic_memory/cli/commands/mcp.py +1 -0
  5. basic_memory/cli/commands/project.py +3 -7
  6. basic_memory/config.py +6 -2
  7. basic_memory/db.py +5 -4
  8. basic_memory/markdown/utils.py +3 -1
  9. basic_memory/mcp/project_session.py +16 -1
  10. basic_memory/mcp/prompts/sync_status.py +0 -4
  11. basic_memory/mcp/server.py +0 -1
  12. basic_memory/mcp/tools/build_context.py +6 -3
  13. basic_memory/mcp/tools/move_note.py +155 -1
  14. basic_memory/mcp/tools/read_note.py +6 -3
  15. basic_memory/mcp/tools/search.py +115 -38
  16. basic_memory/mcp/tools/utils.py +27 -4
  17. basic_memory/mcp/tools/write_note.py +6 -2
  18. basic_memory/repository/entity_repository.py +46 -43
  19. basic_memory/repository/search_repository.py +153 -23
  20. basic_memory/schemas/memory.py +1 -1
  21. basic_memory/schemas/response.py +1 -1
  22. basic_memory/services/entity_service.py +10 -5
  23. basic_memory/services/initialization.py +11 -5
  24. basic_memory/services/project_service.py +18 -0
  25. basic_memory/services/sync_status_service.py +17 -0
  26. {basic_memory-0.13.7.dev1.dist-info → basic_memory-0.14.0.dist-info}/METADATA +26 -1
  27. {basic_memory-0.13.7.dev1.dist-info → basic_memory-0.14.0.dist-info}/RECORD +30 -30
  28. {basic_memory-0.13.7.dev1.dist-info → basic_memory-0.14.0.dist-info}/WHEEL +0 -0
  29. {basic_memory-0.13.7.dev1.dist-info → basic_memory-0.14.0.dist-info}/entry_points.txt +0 -0
  30. {basic_memory-0.13.7.dev1.dist-info → basic_memory-0.14.0.dist-info}/licenses/LICENSE +0 -0
@@ -525,11 +525,16 @@ def check_migration_status() -> Optional[str]:
525
525
  return None
526
526
 
527
527
 
528
- async def wait_for_migration_or_return_status(timeout: float = 5.0) -> Optional[str]:
528
+ async def wait_for_migration_or_return_status(
529
+ timeout: float = 5.0, project_name: Optional[str] = None
530
+ ) -> Optional[str]:
529
531
  """Wait briefly for sync/migration to complete, or return status message.
530
532
 
531
533
  Args:
532
534
  timeout: Maximum time to wait for sync completion
535
+ project_name: Optional project name to check specific project status.
536
+ If provided, only checks that project's readiness.
537
+ If None, uses global status check (legacy behavior).
533
538
 
534
539
  Returns:
535
540
  Status message if sync is still in progress, None if ready
@@ -538,18 +543,36 @@ async def wait_for_migration_or_return_status(timeout: float = 5.0) -> Optional[
538
543
  from basic_memory.services.sync_status_service import sync_status_tracker
539
544
  import asyncio
540
545
 
541
- if sync_status_tracker.is_ready:
546
+ # Check if we should use project-specific or global status
547
+ def is_ready() -> bool:
548
+ if project_name:
549
+ return sync_status_tracker.is_project_ready(project_name)
550
+ return sync_status_tracker.is_ready
551
+
552
+ if is_ready():
542
553
  return None
543
554
 
544
555
  # Wait briefly for sync to complete
545
556
  start_time = asyncio.get_event_loop().time()
546
557
  while (asyncio.get_event_loop().time() - start_time) < timeout:
547
- if sync_status_tracker.is_ready:
558
+ if is_ready():
548
559
  return None
549
560
  await asyncio.sleep(0.1) # Check every 100ms
550
561
 
551
562
  # Still not ready after timeout
552
- return sync_status_tracker.get_summary()
563
+ if project_name:
564
+ # For project-specific checks, get project status details
565
+ project_status = sync_status_tracker.get_project_status(project_name)
566
+ if project_status and project_status.status.value == "failed":
567
+ error_msg = project_status.error or "Unknown sync error"
568
+ return f"❌ Sync failed for project '{project_name}': {error_msg}"
569
+ elif project_status:
570
+ return f"🔄 Project '{project_name}' is still syncing: {project_status.message}"
571
+ else:
572
+ return f"⚠️ Project '{project_name}' status unknown"
573
+ else:
574
+ # Fall back to global summary for legacy calls
575
+ return sync_status_tracker.get_summary()
553
576
  except Exception: # pragma: no cover
554
577
  # If there's any error, assume ready
555
578
  return None
@@ -72,10 +72,15 @@ async def write_note(
72
72
  """
73
73
  logger.info(f"MCP tool call tool=write_note folder={folder}, title={title}, tags={tags}")
74
74
 
75
+ # Get the active project first to check project-specific sync status
76
+ active_project = get_active_project(project)
77
+
75
78
  # Check migration status and wait briefly if needed
76
79
  from basic_memory.mcp.tools.utils import wait_for_migration_or_return_status
77
80
 
78
- migration_status = await wait_for_migration_or_return_status(timeout=5.0)
81
+ migration_status = await wait_for_migration_or_return_status(
82
+ timeout=5.0, project_name=active_project.name
83
+ )
79
84
  if migration_status: # pragma: no cover
80
85
  return f"# System Status\n\n{migration_status}\n\nPlease wait for migration to complete before creating notes."
81
86
 
@@ -91,7 +96,6 @@ async def write_note(
91
96
  content=content,
92
97
  entity_metadata=metadata,
93
98
  )
94
- active_project = get_active_project(project)
95
99
  project_url = active_project.project_url
96
100
 
97
101
  # Create or update via knowledge API
@@ -102,14 +102,14 @@ class EntityRepository(Repository[Entity]):
102
102
 
103
103
  async def upsert_entity(self, entity: Entity) -> Entity:
104
104
  """Insert or update entity using a hybrid approach.
105
-
105
+
106
106
  This method provides a cleaner alternative to the try/catch approach
107
- for handling permalink and file_path conflicts. It first tries direct
107
+ for handling permalink and file_path conflicts. It first tries direct
108
108
  insertion, then handles conflicts intelligently.
109
-
109
+
110
110
  Args:
111
111
  entity: The entity to insert or update
112
-
112
+
113
113
  Returns:
114
114
  The inserted or updated entity
115
115
  """
@@ -117,98 +117,102 @@ class EntityRepository(Repository[Entity]):
117
117
  async with db.scoped_session(self.session_maker) as session:
118
118
  # Set project_id if applicable and not already set
119
119
  self._set_project_id_if_needed(entity)
120
-
120
+
121
121
  # Check for existing entity with same file_path first
122
122
  existing_by_path = await session.execute(
123
123
  select(Entity).where(
124
- Entity.file_path == entity.file_path,
125
- Entity.project_id == entity.project_id
124
+ Entity.file_path == entity.file_path, Entity.project_id == entity.project_id
126
125
  )
127
126
  )
128
127
  existing_path_entity = existing_by_path.scalar_one_or_none()
129
-
128
+
130
129
  if existing_path_entity:
131
130
  # Update existing entity with same file path
132
131
  for key, value in {
133
- 'title': entity.title,
134
- 'entity_type': entity.entity_type,
135
- 'entity_metadata': entity.entity_metadata,
136
- 'content_type': entity.content_type,
137
- 'permalink': entity.permalink,
138
- 'checksum': entity.checksum,
139
- 'updated_at': entity.updated_at,
132
+ "title": entity.title,
133
+ "entity_type": entity.entity_type,
134
+ "entity_metadata": entity.entity_metadata,
135
+ "content_type": entity.content_type,
136
+ "permalink": entity.permalink,
137
+ "checksum": entity.checksum,
138
+ "updated_at": entity.updated_at,
140
139
  }.items():
141
140
  setattr(existing_path_entity, key, value)
142
-
141
+
143
142
  await session.flush()
144
143
  # Return with relationships loaded
145
144
  query = (
146
- select(Entity)
145
+ self.select()
147
146
  .where(Entity.file_path == entity.file_path)
148
147
  .options(*self.get_load_options())
149
148
  )
150
149
  result = await session.execute(query)
151
150
  found = result.scalar_one_or_none()
152
151
  if not found: # pragma: no cover
153
- raise RuntimeError(f"Failed to retrieve entity after update: {entity.file_path}")
152
+ raise RuntimeError(
153
+ f"Failed to retrieve entity after update: {entity.file_path}"
154
+ )
154
155
  return found
155
-
156
+
156
157
  # No existing entity with same file_path, try insert
157
158
  try:
158
159
  # Simple insert for new entity
159
160
  session.add(entity)
160
161
  await session.flush()
161
-
162
+
162
163
  # Return with relationships loaded
163
164
  query = (
164
- select(Entity)
165
+ self.select()
165
166
  .where(Entity.file_path == entity.file_path)
166
167
  .options(*self.get_load_options())
167
168
  )
168
169
  result = await session.execute(query)
169
170
  found = result.scalar_one_or_none()
170
171
  if not found: # pragma: no cover
171
- raise RuntimeError(f"Failed to retrieve entity after insert: {entity.file_path}")
172
+ raise RuntimeError(
173
+ f"Failed to retrieve entity after insert: {entity.file_path}"
174
+ )
172
175
  return found
173
-
176
+
174
177
  except IntegrityError:
175
178
  # Could be either file_path or permalink conflict
176
179
  await session.rollback()
177
-
180
+
178
181
  # Check if it's a file_path conflict (race condition)
179
182
  existing_by_path_check = await session.execute(
180
183
  select(Entity).where(
181
- Entity.file_path == entity.file_path,
182
- Entity.project_id == entity.project_id
184
+ Entity.file_path == entity.file_path, Entity.project_id == entity.project_id
183
185
  )
184
186
  )
185
187
  race_condition_entity = existing_by_path_check.scalar_one_or_none()
186
-
188
+
187
189
  if race_condition_entity:
188
190
  # Race condition: file_path conflict detected after our initial check
189
191
  # Update the existing entity instead
190
192
  for key, value in {
191
- 'title': entity.title,
192
- 'entity_type': entity.entity_type,
193
- 'entity_metadata': entity.entity_metadata,
194
- 'content_type': entity.content_type,
195
- 'permalink': entity.permalink,
196
- 'checksum': entity.checksum,
197
- 'updated_at': entity.updated_at,
193
+ "title": entity.title,
194
+ "entity_type": entity.entity_type,
195
+ "entity_metadata": entity.entity_metadata,
196
+ "content_type": entity.content_type,
197
+ "permalink": entity.permalink,
198
+ "checksum": entity.checksum,
199
+ "updated_at": entity.updated_at,
198
200
  }.items():
199
201
  setattr(race_condition_entity, key, value)
200
-
202
+
201
203
  await session.flush()
202
204
  # Return the updated entity with relationships loaded
203
205
  query = (
204
- select(Entity)
206
+ self.select()
205
207
  .where(Entity.file_path == entity.file_path)
206
208
  .options(*self.get_load_options())
207
209
  )
208
210
  result = await session.execute(query)
209
211
  found = result.scalar_one_or_none()
210
212
  if not found: # pragma: no cover
211
- raise RuntimeError(f"Failed to retrieve entity after race condition update: {entity.file_path}")
213
+ raise RuntimeError(
214
+ f"Failed to retrieve entity after race condition update: {entity.file_path}"
215
+ )
212
216
  return found
213
217
  else:
214
218
  # Must be permalink conflict - generate unique permalink
@@ -218,14 +222,13 @@ class EntityRepository(Repository[Entity]):
218
222
  """Handle permalink conflicts by generating a unique permalink."""
219
223
  base_permalink = entity.permalink
220
224
  suffix = 1
221
-
225
+
222
226
  # Find a unique permalink
223
227
  while True:
224
228
  test_permalink = f"{base_permalink}-{suffix}"
225
229
  existing = await session.execute(
226
230
  select(Entity).where(
227
- Entity.permalink == test_permalink,
228
- Entity.project_id == entity.project_id
231
+ Entity.permalink == test_permalink, Entity.project_id == entity.project_id
229
232
  )
230
233
  )
231
234
  if existing.scalar_one_or_none() is None:
@@ -233,14 +236,14 @@ class EntityRepository(Repository[Entity]):
233
236
  entity.permalink = test_permalink
234
237
  break
235
238
  suffix += 1
236
-
239
+
237
240
  # Insert with unique permalink (no conflict possible now)
238
241
  session.add(entity)
239
242
  await session.flush()
240
-
243
+
241
244
  # Return the inserted entity with relationships loaded
242
245
  query = (
243
- select(Entity)
246
+ self.select()
244
247
  .where(Entity.file_path == entity.file_path)
245
248
  .options(*self.get_load_options())
246
249
  )
@@ -1,6 +1,7 @@
1
1
  """Repository for search operations."""
2
2
 
3
3
  import json
4
+ import re
4
5
  import time
5
6
  from dataclasses import dataclass
6
7
  from datetime import datetime
@@ -120,23 +121,141 @@ class SearchRepository:
120
121
  logger.error(f"Error initializing search index: {e}")
121
122
  raise e
122
123
 
123
- def _prepare_search_term(self, term: str, is_prefix: bool = True) -> str:
124
- """Prepare a search term for FTS5 query.
124
+ def _prepare_boolean_query(self, query: str) -> str:
125
+ """Prepare a Boolean query by quoting individual terms while preserving operators.
125
126
 
126
127
  Args:
127
- term: The search term to prepare
128
+ query: A Boolean query like "tier1-test AND unicode" or "(hello OR world) NOT test"
129
+
130
+ Returns:
131
+ A properly formatted Boolean query with quoted terms that need quoting
132
+ """
133
+ # Define Boolean operators and their boundaries
134
+ boolean_pattern = r"(\bAND\b|\bOR\b|\bNOT\b)"
135
+
136
+ # Split the query by Boolean operators, keeping the operators
137
+ parts = re.split(boolean_pattern, query)
138
+
139
+ processed_parts = []
140
+ for part in parts:
141
+ part = part.strip()
142
+ if not part:
143
+ continue
144
+
145
+ # If it's a Boolean operator, keep it as is
146
+ if part in ["AND", "OR", "NOT"]:
147
+ processed_parts.append(part)
148
+ else:
149
+ # Handle parentheses specially - they should be preserved for grouping
150
+ if "(" in part or ")" in part:
151
+ # Parse parenthetical expressions carefully
152
+ processed_part = self._prepare_parenthetical_term(part)
153
+ processed_parts.append(processed_part)
154
+ else:
155
+ # This is a search term - for Boolean queries, don't add prefix wildcards
156
+ prepared_term = self._prepare_single_term(part, is_prefix=False)
157
+ processed_parts.append(prepared_term)
158
+
159
+ return " ".join(processed_parts)
160
+
161
+ def _prepare_parenthetical_term(self, term: str) -> str:
162
+ """Prepare a term that contains parentheses, preserving the parentheses for grouping.
163
+
164
+ Args:
165
+ term: A term that may contain parentheses like "(hello" or "world)" or "(hello OR world)"
166
+
167
+ Returns:
168
+ A properly formatted term with parentheses preserved
169
+ """
170
+ # Handle terms that start/end with parentheses but may contain quotable content
171
+ result = ""
172
+ i = 0
173
+ while i < len(term):
174
+ if term[i] in "()":
175
+ # Preserve parentheses as-is
176
+ result += term[i]
177
+ i += 1
178
+ else:
179
+ # Find the next parenthesis or end of string
180
+ start = i
181
+ while i < len(term) and term[i] not in "()":
182
+ i += 1
183
+
184
+ # Extract the content between parentheses
185
+ content = term[start:i].strip()
186
+ if content:
187
+ # Only quote if it actually needs quoting (has hyphens, special chars, etc)
188
+ # but don't quote if it's just simple words
189
+ if self._needs_quoting(content):
190
+ escaped_content = content.replace('"', '""')
191
+ result += f'"{escaped_content}"'
192
+ else:
193
+ result += content
194
+
195
+ return result
196
+
197
+ def _needs_quoting(self, term: str) -> bool:
198
+ """Check if a term needs to be quoted for FTS5 safety.
199
+
200
+ Args:
201
+ term: The term to check
202
+
203
+ Returns:
204
+ True if the term should be quoted
205
+ """
206
+ if not term or not term.strip():
207
+ return False
208
+
209
+ # Characters that indicate we should quote (excluding parentheses which are valid syntax)
210
+ needs_quoting_chars = [
211
+ " ",
212
+ ".",
213
+ ":",
214
+ ";",
215
+ ",",
216
+ "<",
217
+ ">",
218
+ "?",
219
+ "/",
220
+ "-",
221
+ "'",
222
+ '"',
223
+ "[",
224
+ "]",
225
+ "{",
226
+ "}",
227
+ "+",
228
+ "!",
229
+ "@",
230
+ "#",
231
+ "$",
232
+ "%",
233
+ "^",
234
+ "&",
235
+ "=",
236
+ "|",
237
+ "\\",
238
+ "~",
239
+ "`",
240
+ ]
241
+
242
+ return any(c in term for c in needs_quoting_chars)
243
+
244
+ def _prepare_single_term(self, term: str, is_prefix: bool = True) -> str:
245
+ """Prepare a single search term (no Boolean operators).
246
+
247
+ Args:
248
+ term: A single search term
128
249
  is_prefix: Whether to add prefix search capability (* suffix)
129
250
 
130
- For FTS5:
131
- - Boolean operators (AND, OR, NOT) are preserved for complex queries
132
- - Terms with FTS5 special characters are quoted to prevent syntax errors
133
- - Simple terms get prefix wildcards for better matching
251
+ Returns:
252
+ A properly formatted single term
134
253
  """
135
- # Check for explicit boolean operators - if present, return the term as is
136
- boolean_operators = [" AND ", " OR ", " NOT "]
137
- if any(op in f" {term} " for op in boolean_operators):
254
+ if not term or not term.strip():
138
255
  return term
139
256
 
257
+ term = term.strip()
258
+
140
259
  # Check if term is already a proper wildcard pattern (alphanumeric + *)
141
260
  # e.g., "hello*", "test*world" - these should be left alone
142
261
  if "*" in term and all(c.isalnum() or c in "*_-" for c in term):
@@ -218,6 +337,26 @@ class SearchRepository:
218
337
 
219
338
  return term
220
339
 
340
+ def _prepare_search_term(self, term: str, is_prefix: bool = True) -> str:
341
+ """Prepare a search term for FTS5 query.
342
+
343
+ Args:
344
+ term: The search term to prepare
345
+ is_prefix: Whether to add prefix search capability (* suffix)
346
+
347
+ For FTS5:
348
+ - Boolean operators (AND, OR, NOT) are preserved for complex queries
349
+ - Terms with FTS5 special characters are quoted to prevent syntax errors
350
+ - Simple terms get prefix wildcards for better matching
351
+ """
352
+ # Check for explicit boolean operators - if present, process as Boolean query
353
+ boolean_operators = [" AND ", " OR ", " NOT "]
354
+ if any(op in f" {term} " for op in boolean_operators):
355
+ return self._prepare_boolean_query(term)
356
+
357
+ # For non-Boolean queries, use the single term preparation logic
358
+ return self._prepare_single_term(term, is_prefix)
359
+
221
360
  async def search(
222
361
  self,
223
362
  search_text: Optional[str] = None,
@@ -242,19 +381,10 @@ class SearchRepository:
242
381
  # For wildcard searches, don't add any text conditions - return all results
243
382
  pass
244
383
  else:
245
- # Check for explicit boolean operators - only detect them in proper boolean contexts
246
- has_boolean = any(op in f" {search_text} " for op in [" AND ", " OR ", " NOT "])
247
-
248
- if has_boolean:
249
- # If boolean operators are present, use the raw query
250
- # No need to prepare it, FTS5 will understand the operators
251
- params["text"] = search_text
252
- conditions.append("(title MATCH :text OR content_stems MATCH :text)")
253
- else:
254
- # Standard search with term preparation
255
- processed_text = self._prepare_search_term(search_text.strip())
256
- params["text"] = processed_text
257
- conditions.append("(title MATCH :text OR content_stems MATCH :text)")
384
+ # Use _prepare_search_term to handle both Boolean and non-Boolean queries
385
+ processed_text = self._prepare_search_term(search_text.strip())
386
+ params["text"] = processed_text
387
+ conditions.append("(title MATCH :text OR content_stems MATCH :text)")
258
388
 
259
389
  # Handle title match search
260
390
  if title:
@@ -134,7 +134,7 @@ class RelationSummary(BaseModel):
134
134
  file_path: str
135
135
  permalink: str
136
136
  relation_type: str
137
- from_entity: str
137
+ from_entity: Optional[str] = None
138
138
  to_entity: Optional[str] = None
139
139
  created_at: datetime
140
140
 
@@ -131,7 +131,7 @@ class EntityResponse(SQLAlchemyModel):
131
131
  }
132
132
  """
133
133
 
134
- permalink: Permalink
134
+ permalink: Optional[Permalink]
135
135
  title: str
136
136
  file_path: str
137
137
  entity_type: EntityType
@@ -302,7 +302,7 @@ class EntityService(BaseService[EntityModel]):
302
302
 
303
303
  Creates the entity with null checksum to indicate sync not complete.
304
304
  Relations will be added in second pass.
305
-
305
+
306
306
  Uses UPSERT approach to handle permalink/file_path conflicts cleanly.
307
307
  """
308
308
  logger.debug(f"Creating entity: {markdown.frontmatter.title} file_path: {file_path}")
@@ -310,7 +310,7 @@ class EntityService(BaseService[EntityModel]):
310
310
 
311
311
  # Mark as incomplete because we still need to add relations
312
312
  model.checksum = None
313
-
313
+
314
314
  # Use UPSERT to handle conflicts cleanly
315
315
  try:
316
316
  return await self.repository.upsert_entity(model)
@@ -682,8 +682,8 @@ class EntityService(BaseService[EntityModel]):
682
682
  # 6. Prepare database updates
683
683
  updates = {"file_path": destination_path}
684
684
 
685
- # 7. Update permalink if configured
686
- if app_config.update_permalinks_on_move:
685
+ # 7. Update permalink if configured or if entity has null permalink
686
+ if app_config.update_permalinks_on_move or old_permalink is None:
687
687
  # Generate new permalink from destination path
688
688
  new_permalink = await self.resolve_permalink(destination_path)
689
689
 
@@ -693,7 +693,12 @@ class EntityService(BaseService[EntityModel]):
693
693
  )
694
694
 
695
695
  updates["permalink"] = new_permalink
696
- logger.info(f"Updated permalink: {old_permalink} -> {new_permalink}")
696
+ if old_permalink is None:
697
+ logger.info(
698
+ f"Generated permalink for entity with null permalink: {new_permalink}"
699
+ )
700
+ else:
701
+ logger.info(f"Updated permalink: {old_permalink} -> {new_permalink}")
697
702
 
698
703
  # 8. Recalculate checksum
699
704
  new_checksum = await self.file_service.compute_checksum(destination_path)
@@ -21,9 +21,9 @@ async def initialize_database(app_config: BasicMemoryConfig) -> None:
21
21
 
22
22
  Args:
23
23
  app_config: The Basic Memory project configuration
24
-
24
+
25
25
  Note:
26
- Database migrations are now handled automatically when the database
26
+ Database migrations are now handled automatically when the database
27
27
  connection is first established via get_or_create_db().
28
28
  """
29
29
  # Trigger database initialization and migrations by getting the database connection
@@ -50,7 +50,9 @@ async def reconcile_projects_with_config(app_config: BasicMemoryConfig):
50
50
 
51
51
  # Get database session - migrations handled centrally
52
52
  _, session_maker = await db.get_or_create_db(
53
- db_path=app_config.database_path, db_type=db.DatabaseType.FILESYSTEM, ensure_migrations=False
53
+ db_path=app_config.database_path,
54
+ db_type=db.DatabaseType.FILESYSTEM,
55
+ ensure_migrations=False,
54
56
  )
55
57
  project_repository = ProjectRepository(session_maker)
56
58
 
@@ -71,7 +73,9 @@ async def reconcile_projects_with_config(app_config: BasicMemoryConfig):
71
73
  async def migrate_legacy_projects(app_config: BasicMemoryConfig):
72
74
  # Get database session - migrations handled centrally
73
75
  _, session_maker = await db.get_or_create_db(
74
- db_path=app_config.database_path, db_type=db.DatabaseType.FILESYSTEM, ensure_migrations=False
76
+ db_path=app_config.database_path,
77
+ db_type=db.DatabaseType.FILESYSTEM,
78
+ ensure_migrations=False,
75
79
  )
76
80
  logger.info("Migrating legacy projects...")
77
81
  project_repository = ProjectRepository(session_maker)
@@ -140,7 +144,9 @@ async def initialize_file_sync(
140
144
 
141
145
  # Load app configuration - migrations handled centrally
142
146
  _, session_maker = await db.get_or_create_db(
143
- db_path=app_config.database_path, db_type=db.DatabaseType.FILESYSTEM, ensure_migrations=False
147
+ db_path=app_config.database_path,
148
+ db_type=db.DatabaseType.FILESYSTEM,
149
+ ensure_migrations=False,
144
150
  )
145
151
  project_repository = ProjectRepository(session_maker)
146
152
 
@@ -154,6 +154,15 @@ class ProjectService:
154
154
 
155
155
  logger.info(f"Project '{name}' set as default in configuration and database")
156
156
 
157
+ # Refresh MCP session to pick up the new default project
158
+ try:
159
+ from basic_memory.mcp.project_session import session
160
+
161
+ session.refresh_from_config()
162
+ except ImportError: # pragma: no cover
163
+ # MCP components might not be available in all contexts (e.g., CLI-only usage)
164
+ logger.debug("MCP session not available, skipping session refresh")
165
+
157
166
  async def _ensure_single_default_project(self) -> None:
158
167
  """Ensure only one project has is_default=True.
159
168
 
@@ -274,6 +283,15 @@ class ProjectService:
274
283
 
275
284
  logger.info("Project synchronization complete")
276
285
 
286
+ # Refresh MCP session to ensure it's in sync with current config
287
+ try:
288
+ from basic_memory.mcp.project_session import session
289
+
290
+ session.refresh_from_config()
291
+ except ImportError:
292
+ # MCP components might not be available in all contexts
293
+ logger.debug("MCP session not available, skipping session refresh")
294
+
277
295
  async def update_project( # pragma: no cover
278
296
  self, name: str, updated_path: Optional[str] = None, is_active: Optional[bool] = None
279
297
  ) -> None:
@@ -131,6 +131,23 @@ class SyncStatusTracker:
131
131
  """Check if system is ready (no sync in progress)."""
132
132
  return self._global_status in (SyncStatus.IDLE, SyncStatus.COMPLETED)
133
133
 
134
+ def is_project_ready(self, project_name: str) -> bool:
135
+ """Check if a specific project is ready for operations.
136
+
137
+ Args:
138
+ project_name: Name of the project to check
139
+
140
+ Returns:
141
+ True if the project is ready (completed, watching, or not tracked),
142
+ False if the project is syncing, scanning, or failed
143
+ """
144
+ project_status = self._project_statuses.get(project_name)
145
+ if not project_status:
146
+ # Project not tracked = ready (likely hasn't been synced yet)
147
+ return True
148
+
149
+ return project_status.status in (SyncStatus.COMPLETED, SyncStatus.WATCHING, SyncStatus.IDLE)
150
+
134
151
  def get_project_status(self, project_name: str) -> Optional[ProjectSyncStatus]:
135
152
  """Get status for a specific project."""
136
153
  return self._project_statuses.get(project_name)