basic-memory 0.14.2__py3-none-any.whl → 0.14.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (69) hide show
  1. basic_memory/__init__.py +1 -1
  2. basic_memory/alembic/env.py +3 -1
  3. basic_memory/alembic/versions/a1b2c3d4e5f6_fix_project_foreign_keys.py +53 -0
  4. basic_memory/api/app.py +4 -1
  5. basic_memory/api/routers/management_router.py +3 -1
  6. basic_memory/api/routers/project_router.py +21 -13
  7. basic_memory/api/routers/resource_router.py +3 -3
  8. basic_memory/cli/app.py +3 -3
  9. basic_memory/cli/commands/__init__.py +1 -2
  10. basic_memory/cli/commands/db.py +5 -5
  11. basic_memory/cli/commands/import_chatgpt.py +3 -2
  12. basic_memory/cli/commands/import_claude_conversations.py +3 -1
  13. basic_memory/cli/commands/import_claude_projects.py +3 -1
  14. basic_memory/cli/commands/import_memory_json.py +5 -2
  15. basic_memory/cli/commands/mcp.py +3 -15
  16. basic_memory/cli/commands/project.py +46 -6
  17. basic_memory/cli/commands/status.py +4 -1
  18. basic_memory/cli/commands/sync.py +10 -2
  19. basic_memory/cli/main.py +0 -1
  20. basic_memory/config.py +61 -34
  21. basic_memory/db.py +2 -6
  22. basic_memory/deps.py +3 -2
  23. basic_memory/file_utils.py +65 -0
  24. basic_memory/importers/chatgpt_importer.py +20 -10
  25. basic_memory/importers/memory_json_importer.py +22 -7
  26. basic_memory/importers/utils.py +2 -2
  27. basic_memory/markdown/entity_parser.py +2 -2
  28. basic_memory/markdown/markdown_processor.py +2 -2
  29. basic_memory/markdown/plugins.py +42 -26
  30. basic_memory/markdown/utils.py +1 -1
  31. basic_memory/mcp/async_client.py +22 -2
  32. basic_memory/mcp/project_session.py +6 -4
  33. basic_memory/mcp/prompts/__init__.py +0 -2
  34. basic_memory/mcp/server.py +8 -71
  35. basic_memory/mcp/tools/build_context.py +12 -2
  36. basic_memory/mcp/tools/move_note.py +24 -12
  37. basic_memory/mcp/tools/project_management.py +22 -7
  38. basic_memory/mcp/tools/read_content.py +16 -0
  39. basic_memory/mcp/tools/read_note.py +17 -2
  40. basic_memory/mcp/tools/sync_status.py +3 -2
  41. basic_memory/mcp/tools/write_note.py +9 -1
  42. basic_memory/models/knowledge.py +13 -2
  43. basic_memory/models/project.py +3 -3
  44. basic_memory/repository/entity_repository.py +2 -2
  45. basic_memory/repository/project_repository.py +19 -1
  46. basic_memory/repository/search_repository.py +7 -3
  47. basic_memory/schemas/base.py +40 -10
  48. basic_memory/schemas/importer.py +1 -0
  49. basic_memory/schemas/memory.py +23 -11
  50. basic_memory/services/context_service.py +12 -2
  51. basic_memory/services/directory_service.py +7 -0
  52. basic_memory/services/entity_service.py +56 -10
  53. basic_memory/services/initialization.py +0 -75
  54. basic_memory/services/project_service.py +93 -36
  55. basic_memory/sync/background_sync.py +4 -3
  56. basic_memory/sync/sync_service.py +53 -4
  57. basic_memory/sync/watch_service.py +31 -8
  58. basic_memory/utils.py +234 -71
  59. {basic_memory-0.14.2.dist-info → basic_memory-0.14.4.dist-info}/METADATA +21 -92
  60. {basic_memory-0.14.2.dist-info → basic_memory-0.14.4.dist-info}/RECORD +63 -68
  61. basic_memory/cli/commands/auth.py +0 -136
  62. basic_memory/mcp/auth_provider.py +0 -270
  63. basic_memory/mcp/external_auth_provider.py +0 -321
  64. basic_memory/mcp/prompts/sync_status.py +0 -112
  65. basic_memory/mcp/supabase_auth_provider.py +0 -463
  66. basic_memory/services/migration_service.py +0 -168
  67. {basic_memory-0.14.2.dist-info → basic_memory-0.14.4.dist-info}/WHEEL +0 -0
  68. {basic_memory-0.14.2.dist-info → basic_memory-0.14.4.dist-info}/entry_points.txt +0 -0
  69. {basic_memory-0.14.2.dist-info → basic_memory-0.14.4.dist-info}/licenses/LICENSE +0 -0
@@ -22,6 +22,8 @@ from dateparser import parse
22
22
 
23
23
  from pydantic import BaseModel, BeforeValidator, Field, model_validator
24
24
 
25
+ from basic_memory.config import ConfigManager
26
+ from basic_memory.file_utils import sanitize_for_filename
25
27
  from basic_memory.utils import generate_permalink
26
28
 
27
29
 
@@ -53,22 +55,28 @@ def parse_timeframe(timeframe: str) -> datetime:
53
55
  timeframe: Natural language timeframe like 'today', '1d', '1 week ago', etc.
54
56
 
55
57
  Returns:
56
- datetime: The parsed datetime for the start of the timeframe
58
+ datetime: The parsed datetime for the start of the timeframe, timezone-aware in local system timezone
57
59
 
58
60
  Examples:
59
- parse_timeframe('today') -> 2025-06-05 00:00:00 (start of today)
60
- parse_timeframe('1d') -> 2025-06-04 14:50:00 (24 hours ago)
61
- parse_timeframe('1 week ago') -> 2025-05-29 14:50:00 (1 week ago)
61
+ parse_timeframe('today') -> 2025-06-05 00:00:00-07:00 (start of today with local timezone)
62
+ parse_timeframe('1d') -> 2025-06-04 14:50:00-07:00 (24 hours ago with local timezone)
63
+ parse_timeframe('1 week ago') -> 2025-05-29 14:50:00-07:00 (1 week ago with local timezone)
62
64
  """
63
65
  if timeframe.lower() == "today":
64
- # Return start of today (00:00:00)
65
- return datetime.combine(datetime.now().date(), time.min)
66
+ # Return start of today (00:00:00) in local timezone
67
+ naive_dt = datetime.combine(datetime.now().date(), time.min)
68
+ return naive_dt.astimezone()
66
69
  else:
67
70
  # Use dateparser for other formats
68
71
  parsed = parse(timeframe)
69
72
  if not parsed:
70
73
  raise ValueError(f"Could not parse timeframe: {timeframe}")
71
- return parsed
74
+
75
+ # If the parsed datetime is naive, make it timezone-aware in local system timezone
76
+ if parsed.tzinfo is None:
77
+ return parsed.astimezone()
78
+ else:
79
+ return parsed
72
80
 
73
81
 
74
82
  def validate_timeframe(timeframe: str) -> str:
@@ -85,7 +93,7 @@ def validate_timeframe(timeframe: str) -> str:
85
93
  parsed = parse_timeframe(timeframe)
86
94
 
87
95
  # Convert to duration
88
- now = datetime.now()
96
+ now = datetime.now().astimezone()
89
97
  if parsed > now:
90
98
  raise ValueError("Timeframe cannot be in the future")
91
99
 
@@ -184,13 +192,35 @@ class Entity(BaseModel):
184
192
  default="text/markdown",
185
193
  )
186
194
 
195
+ @property
196
+ def safe_title(self) -> str:
197
+ """
198
+ A sanitized version of the title, which is safe for use on the filesystem. For example,
199
+ a title of "Coupon Enable/Disable Feature" should create a the file as "Coupon Enable-Disable Feature.md"
200
+ instead of creating a file named "Disable Feature.md" beneath the "Coupon Enable" directory.
201
+
202
+ Replaces POSIX and/or Windows style slashes as well as a few other characters that are not safe for filenames.
203
+ If kebab_filenames is True, then behavior is consistent with transformation used when generating permalink
204
+ strings (e.g. "Coupon Enable/Disable Feature" -> "coupon-enable-disable-feature").
205
+ """
206
+ fixed_title = sanitize_for_filename(self.title)
207
+
208
+ app_config = ConfigManager().config
209
+ use_kebab_case = app_config.kebab_filenames
210
+
211
+ if use_kebab_case:
212
+ fixed_title = generate_permalink(file_path=fixed_title, split_extension=False)
213
+
214
+ return fixed_title
215
+
187
216
  @property
188
217
  def file_path(self):
189
218
  """Get the file path for this entity based on its permalink."""
219
+ safe_title = self.safe_title
190
220
  if self.content_type == "text/markdown":
191
- return f"{self.folder}/{self.title}.md" if self.folder else f"{self.title}.md"
221
+ return f"{self.folder}/{safe_title}.md" if self.folder else f"{safe_title}.md"
192
222
  else:
193
- return f"{self.folder}/{self.title}" if self.folder else self.title
223
+ return f"{self.folder}/{safe_title}" if self.folder else safe_title
194
224
 
195
225
  @property
196
226
  def permalink(self) -> Permalink:
@@ -32,3 +32,4 @@ class EntityImportResult(ImportResult):
32
32
 
33
33
  entities: int = 0
34
34
  relations: int = 0
35
+ skipped_entities: int = 0
@@ -1,10 +1,10 @@
1
1
  """Schemas for memory context."""
2
2
 
3
3
  from datetime import datetime
4
- from typing import List, Optional, Annotated, Sequence
4
+ from typing import List, Optional, Annotated, Sequence, Literal, Union
5
5
 
6
6
  from annotated_types import MinLen, MaxLen
7
- from pydantic import BaseModel, Field, BeforeValidator, TypeAdapter
7
+ from pydantic import BaseModel, Field, BeforeValidator, TypeAdapter, ConfigDict
8
8
 
9
9
  from basic_memory.schemas.search import SearchItemType
10
10
 
@@ -117,8 +117,10 @@ def memory_url_path(url: memory_url) -> str: # pyright: ignore
117
117
 
118
118
  class EntitySummary(BaseModel):
119
119
  """Simplified entity representation."""
120
+
121
+ model_config = ConfigDict(json_encoders={datetime: lambda dt: dt.isoformat()})
120
122
 
121
- type: str = "entity"
123
+ type: Literal["entity"] = "entity"
122
124
  permalink: Optional[str]
123
125
  title: str
124
126
  content: Optional[str] = None
@@ -128,8 +130,10 @@ class EntitySummary(BaseModel):
128
130
 
129
131
  class RelationSummary(BaseModel):
130
132
  """Simplified relation representation."""
133
+
134
+ model_config = ConfigDict(json_encoders={datetime: lambda dt: dt.isoformat()})
131
135
 
132
- type: str = "relation"
136
+ type: Literal["relation"] = "relation"
133
137
  title: str
134
138
  file_path: str
135
139
  permalink: str
@@ -141,8 +145,10 @@ class RelationSummary(BaseModel):
141
145
 
142
146
  class ObservationSummary(BaseModel):
143
147
  """Simplified observation representation."""
148
+
149
+ model_config = ConfigDict(json_encoders={datetime: lambda dt: dt.isoformat()})
144
150
 
145
- type: str = "observation"
151
+ type: Literal["observation"] = "observation"
146
152
  title: str
147
153
  file_path: str
148
154
  permalink: str
@@ -153,6 +159,8 @@ class ObservationSummary(BaseModel):
153
159
 
154
160
  class MemoryMetadata(BaseModel):
155
161
  """Simplified response metadata."""
162
+
163
+ model_config = ConfigDict(json_encoders={datetime: lambda dt: dt.isoformat()})
156
164
 
157
165
  uri: Optional[str] = None
158
166
  types: Optional[List[SearchItemType]] = None
@@ -169,17 +177,21 @@ class MemoryMetadata(BaseModel):
169
177
  class ContextResult(BaseModel):
170
178
  """Context result containing a primary item with its observations and related items."""
171
179
 
172
- primary_result: EntitySummary | RelationSummary | ObservationSummary = Field(
173
- description="Primary item"
174
- )
180
+ primary_result: Annotated[
181
+ Union[EntitySummary, RelationSummary, ObservationSummary],
182
+ Field(discriminator="type", description="Primary item")
183
+ ]
175
184
 
176
185
  observations: Sequence[ObservationSummary] = Field(
177
186
  description="Observations belonging to this entity", default_factory=list
178
187
  )
179
188
 
180
- related_results: Sequence[EntitySummary | RelationSummary | ObservationSummary] = Field(
181
- description="Related items", default_factory=list
182
- )
189
+ related_results: Sequence[
190
+ Annotated[
191
+ Union[EntitySummary, RelationSummary, ObservationSummary],
192
+ Field(discriminator="type")
193
+ ]
194
+ ] = Field(description="Related items", default_factory=list)
183
195
 
184
196
 
185
197
  class GraphContext(BaseModel):
@@ -245,8 +245,8 @@ class ContextService:
245
245
  # For compatibility with the old query, we still need this for filtering
246
246
  values = ", ".join([f"('{t}', {i})" for t, i in type_id_pairs])
247
247
 
248
- # Parameters for bindings
249
- params = {"max_depth": max_depth, "max_results": max_results}
248
+ # Parameters for bindings - include project_id for security filtering
249
+ params = {"max_depth": max_depth, "max_results": max_results, "project_id": self.search_repository.project_id}
250
250
 
251
251
  # Build date and timeframe filters conditionally based on since parameter
252
252
  if since:
@@ -258,6 +258,10 @@ class ContextService:
258
258
  date_filter = ""
259
259
  relation_date_filter = ""
260
260
  timeframe_condition = ""
261
+
262
+ # Add project filtering for security - ensure all entities and relations belong to the same project
263
+ project_filter = "AND e.project_id = :project_id"
264
+ relation_project_filter = "AND e_from.project_id = :project_id"
261
265
 
262
266
  # Use a CTE that operates directly on entity and relation tables
263
267
  # This avoids the overhead of the search_index virtual table
@@ -284,6 +288,7 @@ class ContextService:
284
288
  FROM entity e
285
289
  WHERE e.id IN ({entity_id_values})
286
290
  {date_filter}
291
+ {project_filter}
287
292
 
288
293
  UNION ALL
289
294
 
@@ -314,8 +319,12 @@ class ContextService:
314
319
  JOIN entity e_from ON (
315
320
  r.from_id = e_from.id
316
321
  {relation_date_filter}
322
+ {relation_project_filter}
317
323
  )
324
+ LEFT JOIN entity e_to ON (r.to_id = e_to.id)
318
325
  WHERE eg.depth < :max_depth
326
+ -- Ensure to_entity (if exists) also belongs to same project
327
+ AND (r.to_id IS NULL OR e_to.project_id = :project_id)
319
328
 
320
329
  UNION ALL
321
330
 
@@ -348,6 +357,7 @@ class ContextService:
348
357
  ELSE eg.from_id
349
358
  END
350
359
  {date_filter}
360
+ {project_filter}
351
361
  )
352
362
  WHERE eg.depth < :max_depth
353
363
  -- Only include entities connected by relations within timeframe if specified
@@ -106,8 +106,15 @@ class DirectoryService:
106
106
  List of DirectoryNode objects matching the criteria
107
107
  """
108
108
  # Normalize directory path
109
+ # Strip ./ prefix if present (handles relative path notation)
110
+ if dir_name.startswith("./"):
111
+ dir_name = dir_name[2:] # Remove "./" prefix
112
+
113
+ # Ensure path starts with "/"
109
114
  if not dir_name.startswith("/"):
110
115
  dir_name = f"/{dir_name}"
116
+
117
+ # Remove trailing slashes except for root
111
118
  if dir_name != "/" and dir_name.endswith("/"):
112
119
  dir_name = dir_name.rstrip("/")
113
120
 
@@ -9,12 +9,13 @@ from loguru import logger
9
9
  from sqlalchemy.exc import IntegrityError
10
10
 
11
11
  from basic_memory.config import ProjectConfig, BasicMemoryConfig
12
- from basic_memory.file_utils import has_frontmatter, parse_frontmatter, remove_frontmatter
12
+ from basic_memory.file_utils import has_frontmatter, parse_frontmatter, remove_frontmatter, dump_frontmatter
13
13
  from basic_memory.markdown import EntityMarkdown
14
14
  from basic_memory.markdown.entity_parser import EntityParser
15
15
  from basic_memory.markdown.utils import entity_model_from_markdown, schema_to_markdown
16
16
  from basic_memory.models import Entity as EntityModel
17
17
  from basic_memory.models import Observation, Relation
18
+ from basic_memory.models.knowledge import Entity
18
19
  from basic_memory.repository import ObservationRepository, RelationRepository
19
20
  from basic_memory.repository.entity_repository import EntityRepository
20
21
  from basic_memory.schemas import Entity as EntitySchema
@@ -44,6 +45,39 @@ class EntityService(BaseService[EntityModel]):
44
45
  self.file_service = file_service
45
46
  self.link_resolver = link_resolver
46
47
 
48
+ async def detect_file_path_conflicts(self, file_path: str) -> List[Entity]:
49
+ """Detect potential file path conflicts for a given file path.
50
+
51
+ This checks for entities with similar file paths that might cause conflicts:
52
+ - Case sensitivity differences (Finance/file.md vs finance/file.md)
53
+ - Character encoding differences
54
+ - Hyphen vs space differences
55
+ - Unicode normalization differences
56
+
57
+ Args:
58
+ file_path: The file path to check for conflicts
59
+
60
+ Returns:
61
+ List of entities that might conflict with the given file path
62
+ """
63
+ from basic_memory.utils import detect_potential_file_conflicts
64
+
65
+ conflicts = []
66
+
67
+ # Get all existing file paths
68
+ all_entities = await self.repository.find_all()
69
+ existing_paths = [entity.file_path for entity in all_entities]
70
+
71
+ # Use the enhanced conflict detection utility
72
+ conflicting_paths = detect_potential_file_conflicts(file_path, existing_paths)
73
+
74
+ # Find the entities corresponding to conflicting paths
75
+ for entity in all_entities:
76
+ if entity.file_path in conflicting_paths:
77
+ conflicts.append(entity)
78
+
79
+ return conflicts
80
+
47
81
  async def resolve_permalink(
48
82
  self, file_path: Permalink | Path, markdown: Optional[EntityMarkdown] = None
49
83
  ) -> str:
@@ -54,18 +88,30 @@ class EntityService(BaseService[EntityModel]):
54
88
  2. If markdown has permalink but it's used by another file -> make unique
55
89
  3. For existing files, keep current permalink from db
56
90
  4. Generate new unique permalink from file path
91
+
92
+ Enhanced to detect and handle character-related conflicts.
57
93
  """
94
+ file_path_str = Path(file_path).as_posix()
95
+
96
+ # Check for potential file path conflicts before resolving permalink
97
+ conflicts = await self.detect_file_path_conflicts(file_path_str)
98
+ if conflicts:
99
+ logger.warning(
100
+ f"Detected potential file path conflicts for '{file_path_str}': "
101
+ f"{[entity.file_path for entity in conflicts]}"
102
+ )
103
+
58
104
  # If markdown has explicit permalink, try to validate it
59
105
  if markdown and markdown.frontmatter.permalink:
60
106
  desired_permalink = markdown.frontmatter.permalink
61
107
  existing = await self.repository.get_by_permalink(desired_permalink)
62
108
 
63
109
  # If no conflict or it's our own file, use as is
64
- if not existing or existing.file_path == str(file_path):
110
+ if not existing or existing.file_path == file_path_str:
65
111
  return desired_permalink
66
112
 
67
113
  # For existing files, try to find current permalink
68
- existing = await self.repository.get_by_file_path(str(file_path))
114
+ existing = await self.repository.get_by_file_path(file_path_str)
69
115
  if existing:
70
116
  return existing.permalink
71
117
 
@@ -73,9 +119,9 @@ class EntityService(BaseService[EntityModel]):
73
119
  if markdown and markdown.frontmatter.permalink:
74
120
  desired_permalink = markdown.frontmatter.permalink
75
121
  else:
76
- desired_permalink = generate_permalink(file_path)
122
+ desired_permalink = generate_permalink(file_path_str)
77
123
 
78
- # Make unique if needed
124
+ # Make unique if needed - enhanced to handle character conflicts
79
125
  permalink = desired_permalink
80
126
  suffix = 1
81
127
  while await self.repository.get_by_permalink(permalink):
@@ -150,7 +196,7 @@ class EntityService(BaseService[EntityModel]):
150
196
  post = await schema_to_markdown(schema)
151
197
 
152
198
  # write file
153
- final_content = frontmatter.dumps(post, sort_keys=False)
199
+ final_content = dump_frontmatter(post)
154
200
  checksum = await self.file_service.write_file(file_path, final_content)
155
201
 
156
202
  # parse entity from file
@@ -227,7 +273,7 @@ class EntityService(BaseService[EntityModel]):
227
273
  merged_post = frontmatter.Post(post.content, **existing_markdown.frontmatter.metadata)
228
274
 
229
275
  # write file
230
- final_content = frontmatter.dumps(merged_post, sort_keys=False)
276
+ final_content = dump_frontmatter(merged_post)
231
277
  checksum = await self.file_service.write_file(file_path, final_content)
232
278
 
233
279
  # parse entity from file
@@ -237,7 +283,7 @@ class EntityService(BaseService[EntityModel]):
237
283
  entity = await self.update_entity_and_observations(file_path, entity_markdown)
238
284
 
239
285
  # add relations
240
- await self.update_entity_relations(str(file_path), entity_markdown)
286
+ await self.update_entity_relations(file_path.as_posix(), entity_markdown)
241
287
 
242
288
  # Set final checksum to match file
243
289
  entity = await self.repository.update(entity.id, {"checksum": checksum})
@@ -328,7 +374,7 @@ class EntityService(BaseService[EntityModel]):
328
374
  """
329
375
  logger.debug(f"Updating entity and observations: {file_path}")
330
376
 
331
- db_entity = await self.repository.get_by_file_path(str(file_path))
377
+ db_entity = await self.repository.get_by_file_path(file_path.as_posix())
332
378
 
333
379
  # Clear observations for entity
334
380
  await self.observation_repository.delete_by_fields(entity_id=db_entity.id)
@@ -452,7 +498,7 @@ class EntityService(BaseService[EntityModel]):
452
498
 
453
499
  # Update entity and its relationships
454
500
  entity = await self.update_entity_and_observations(file_path, entity_markdown)
455
- await self.update_entity_relations(str(file_path), entity_markdown)
501
+ await self.update_entity_relations(file_path.as_posix(), entity_markdown)
456
502
 
457
503
  # Set final checksum to match file
458
504
  entity = await self.repository.update(entity.id, {"checksum": checksum})
@@ -5,14 +5,12 @@ to ensure consistent application startup across all entry points.
5
5
  """
6
6
 
7
7
  import asyncio
8
- import shutil
9
8
  from pathlib import Path
10
9
 
11
10
  from loguru import logger
12
11
 
13
12
  from basic_memory import db
14
13
  from basic_memory.config import BasicMemoryConfig
15
- from basic_memory.models import Project
16
14
  from basic_memory.repository import ProjectRepository
17
15
 
18
16
 
@@ -70,63 +68,6 @@ async def reconcile_projects_with_config(app_config: BasicMemoryConfig):
70
68
  logger.info("Continuing with initialization despite synchronization error")
71
69
 
72
70
 
73
- async def migrate_legacy_projects(app_config: BasicMemoryConfig):
74
- # Get database session - migrations handled centrally
75
- _, session_maker = await db.get_or_create_db(
76
- db_path=app_config.database_path,
77
- db_type=db.DatabaseType.FILESYSTEM,
78
- ensure_migrations=False,
79
- )
80
- logger.info("Migrating legacy projects...")
81
- project_repository = ProjectRepository(session_maker)
82
-
83
- # For each project in config.json, check if it has a .basic-memory dir
84
- for project_name, project_path in app_config.projects.items():
85
- legacy_dir = Path(project_path) / ".basic-memory"
86
- if not legacy_dir.exists():
87
- continue
88
- logger.info(f"Detected legacy project directory: {legacy_dir}")
89
- project = await project_repository.get_by_name(project_name)
90
- if not project: # pragma: no cover
91
- logger.error(f"Project {project_name} not found in database, skipping migration")
92
- continue
93
-
94
- logger.info(f"Starting migration for project: {project_name} (id: {project.id})")
95
- await migrate_legacy_project_data(project, legacy_dir)
96
- logger.info(f"Completed migration for project: {project_name}")
97
- logger.info("Legacy projects successfully migrated")
98
-
99
-
100
- async def migrate_legacy_project_data(project: Project, legacy_dir: Path) -> bool:
101
- """Check if project has legacy .basic-memory dir and migrate if needed.
102
-
103
- Args:
104
- project: The project to check and potentially migrate
105
-
106
- Returns:
107
- True if migration occurred, False otherwise
108
- """
109
-
110
- # avoid circular imports
111
- from basic_memory.cli.commands.sync import get_sync_service
112
-
113
- sync_service = await get_sync_service(project)
114
- sync_dir = Path(project.path)
115
-
116
- logger.info(f"Sync starting project: {project.name}")
117
- await sync_service.sync(sync_dir, project_name=project.name)
118
- logger.info(f"Sync completed successfully for project: {project.name}")
119
-
120
- # After successful sync, remove the legacy directory
121
- try:
122
- logger.info(f"Removing legacy directory: {legacy_dir}")
123
- shutil.rmtree(legacy_dir)
124
- return True
125
- except Exception as e:
126
- logger.error(f"Error removing legacy directory: {e}")
127
- return False
128
-
129
-
130
71
  async def initialize_file_sync(
131
72
  app_config: BasicMemoryConfig,
132
73
  ):
@@ -186,16 +127,6 @@ async def initialize_file_sync(
186
127
  sync_status_tracker.fail_project_sync(project.name, str(e))
187
128
  # Continue with other projects even if one fails
188
129
 
189
- # Mark migration complete if it was in progress
190
- try:
191
- from basic_memory.services.migration_service import migration_manager
192
-
193
- if not migration_manager.is_ready: # pragma: no cover
194
- migration_manager.mark_completed("Migration completed with file sync")
195
- logger.info("Marked migration as completed after file sync")
196
- except Exception as e: # pragma: no cover
197
- logger.warning(f"Could not update migration status: {e}")
198
-
199
130
  # Then start the watch service in the background
200
131
  logger.info("Starting watch service for all projects")
201
132
  # run the watch service
@@ -229,13 +160,7 @@ async def initialize_app(
229
160
  # Reconcile projects from config.json with projects table
230
161
  await reconcile_projects_with_config(app_config)
231
162
 
232
- # Start background migration for legacy project data (non-blocking)
233
- from basic_memory.services.migration_service import migration_manager
234
-
235
- await migration_manager.start_background_migration(app_config)
236
-
237
163
  logger.info("App initialization completed (migration running in background if needed)")
238
- return migration_manager
239
164
 
240
165
 
241
166
  def ensure_initialization(app_config: BasicMemoryConfig) -> None: