basic-memory 0.14.4__py3-none-any.whl → 0.15.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (84) hide show
  1. basic_memory/__init__.py +1 -1
  2. basic_memory/alembic/versions/a1b2c3d4e5f6_fix_project_foreign_keys.py +5 -9
  3. basic_memory/api/app.py +10 -4
  4. basic_memory/api/routers/directory_router.py +23 -2
  5. basic_memory/api/routers/knowledge_router.py +25 -8
  6. basic_memory/api/routers/project_router.py +100 -4
  7. basic_memory/cli/app.py +9 -28
  8. basic_memory/cli/auth.py +277 -0
  9. basic_memory/cli/commands/cloud/__init__.py +5 -0
  10. basic_memory/cli/commands/cloud/api_client.py +112 -0
  11. basic_memory/cli/commands/cloud/bisync_commands.py +818 -0
  12. basic_memory/cli/commands/cloud/core_commands.py +288 -0
  13. basic_memory/cli/commands/cloud/mount_commands.py +295 -0
  14. basic_memory/cli/commands/cloud/rclone_config.py +288 -0
  15. basic_memory/cli/commands/cloud/rclone_installer.py +198 -0
  16. basic_memory/cli/commands/command_utils.py +43 -0
  17. basic_memory/cli/commands/import_memory_json.py +0 -4
  18. basic_memory/cli/commands/mcp.py +77 -60
  19. basic_memory/cli/commands/project.py +154 -152
  20. basic_memory/cli/commands/status.py +25 -22
  21. basic_memory/cli/commands/sync.py +45 -228
  22. basic_memory/cli/commands/tool.py +87 -16
  23. basic_memory/cli/main.py +1 -0
  24. basic_memory/config.py +131 -21
  25. basic_memory/db.py +104 -3
  26. basic_memory/deps.py +27 -8
  27. basic_memory/file_utils.py +37 -13
  28. basic_memory/ignore_utils.py +295 -0
  29. basic_memory/markdown/plugins.py +9 -7
  30. basic_memory/mcp/async_client.py +124 -14
  31. basic_memory/mcp/project_context.py +141 -0
  32. basic_memory/mcp/prompts/ai_assistant_guide.py +49 -4
  33. basic_memory/mcp/prompts/continue_conversation.py +17 -16
  34. basic_memory/mcp/prompts/recent_activity.py +116 -32
  35. basic_memory/mcp/prompts/search.py +13 -12
  36. basic_memory/mcp/prompts/utils.py +11 -4
  37. basic_memory/mcp/resources/ai_assistant_guide.md +211 -341
  38. basic_memory/mcp/resources/project_info.py +27 -11
  39. basic_memory/mcp/server.py +0 -37
  40. basic_memory/mcp/tools/__init__.py +5 -6
  41. basic_memory/mcp/tools/build_context.py +67 -56
  42. basic_memory/mcp/tools/canvas.py +38 -26
  43. basic_memory/mcp/tools/chatgpt_tools.py +187 -0
  44. basic_memory/mcp/tools/delete_note.py +81 -47
  45. basic_memory/mcp/tools/edit_note.py +155 -138
  46. basic_memory/mcp/tools/list_directory.py +112 -99
  47. basic_memory/mcp/tools/move_note.py +181 -101
  48. basic_memory/mcp/tools/project_management.py +113 -277
  49. basic_memory/mcp/tools/read_content.py +91 -74
  50. basic_memory/mcp/tools/read_note.py +152 -115
  51. basic_memory/mcp/tools/recent_activity.py +471 -68
  52. basic_memory/mcp/tools/search.py +105 -92
  53. basic_memory/mcp/tools/sync_status.py +136 -130
  54. basic_memory/mcp/tools/utils.py +4 -0
  55. basic_memory/mcp/tools/view_note.py +44 -33
  56. basic_memory/mcp/tools/write_note.py +151 -90
  57. basic_memory/models/knowledge.py +12 -6
  58. basic_memory/models/project.py +6 -2
  59. basic_memory/repository/entity_repository.py +89 -82
  60. basic_memory/repository/relation_repository.py +13 -0
  61. basic_memory/repository/repository.py +18 -5
  62. basic_memory/repository/search_repository.py +46 -2
  63. basic_memory/schemas/__init__.py +6 -0
  64. basic_memory/schemas/base.py +39 -11
  65. basic_memory/schemas/cloud.py +46 -0
  66. basic_memory/schemas/memory.py +90 -21
  67. basic_memory/schemas/project_info.py +9 -10
  68. basic_memory/schemas/sync_report.py +48 -0
  69. basic_memory/services/context_service.py +25 -11
  70. basic_memory/services/directory_service.py +124 -3
  71. basic_memory/services/entity_service.py +100 -48
  72. basic_memory/services/initialization.py +30 -11
  73. basic_memory/services/project_service.py +101 -24
  74. basic_memory/services/search_service.py +16 -8
  75. basic_memory/sync/sync_service.py +173 -34
  76. basic_memory/sync/watch_service.py +101 -40
  77. basic_memory/utils.py +14 -4
  78. {basic_memory-0.14.4.dist-info → basic_memory-0.15.1.dist-info}/METADATA +57 -9
  79. basic_memory-0.15.1.dist-info/RECORD +146 -0
  80. basic_memory/mcp/project_session.py +0 -120
  81. basic_memory-0.14.4.dist-info/RECORD +0 -133
  82. {basic_memory-0.14.4.dist-info → basic_memory-0.15.1.dist-info}/WHEEL +0 -0
  83. {basic_memory-0.14.4.dist-info → basic_memory-0.15.1.dist-info}/entry_points.txt +0 -0
  84. {basic_memory-0.14.4.dist-info → basic_memory-0.15.1.dist-info}/licenses/LICENSE +0 -0
@@ -62,7 +62,7 @@ class SearchIndexRow:
62
62
 
63
63
  # Normalize path separators to handle both Windows (\) and Unix (/) paths
64
64
  normalized_path = Path(self.file_path).as_posix()
65
-
65
+
66
66
  # Split the path by slashes
67
67
  parts = normalized_path.split("/")
68
68
 
@@ -527,7 +527,9 @@ class SearchRepository:
527
527
  async with db.scoped_session(self.session_maker) as session:
528
528
  # Delete existing record if any
529
529
  await session.execute(
530
- text("DELETE FROM search_index WHERE permalink = :permalink AND project_id = :project_id"),
530
+ text(
531
+ "DELETE FROM search_index WHERE permalink = :permalink AND project_id = :project_id"
532
+ ),
531
533
  {"permalink": search_index_row.permalink, "project_id": self.project_id},
532
534
  )
533
535
 
@@ -557,6 +559,48 @@ class SearchRepository:
557
559
  logger.debug(f"indexed row {search_index_row}")
558
560
  await session.commit()
559
561
 
562
+ async def bulk_index_items(self, search_index_rows: List[SearchIndexRow]):
563
+ """Index multiple items in a single batch operation.
564
+
565
+ Note: This method assumes that any existing records for the entity_id
566
+ have already been deleted (typically via delete_by_entity_id).
567
+
568
+ Args:
569
+ search_index_rows: List of SearchIndexRow objects to index
570
+ """
571
+ if not search_index_rows:
572
+ return
573
+
574
+ async with db.scoped_session(self.session_maker) as session:
575
+ # Prepare all insert data with project_id
576
+ insert_data_list = []
577
+ for row in search_index_rows:
578
+ insert_data = row.to_insert()
579
+ insert_data["project_id"] = self.project_id
580
+ insert_data_list.append(insert_data)
581
+
582
+ # Batch insert all records using executemany
583
+ await session.execute(
584
+ text("""
585
+ INSERT INTO search_index (
586
+ id, title, content_stems, content_snippet, permalink, file_path, type, metadata,
587
+ from_id, to_id, relation_type,
588
+ entity_id, category,
589
+ created_at, updated_at,
590
+ project_id
591
+ ) VALUES (
592
+ :id, :title, :content_stems, :content_snippet, :permalink, :file_path, :type, :metadata,
593
+ :from_id, :to_id, :relation_type,
594
+ :entity_id, :category,
595
+ :created_at, :updated_at,
596
+ :project_id
597
+ )
598
+ """),
599
+ insert_data_list,
600
+ )
601
+ logger.debug(f"Bulk indexed {len(search_index_rows)} rows")
602
+ await session.commit()
603
+
560
604
  async def delete_by_entity_id(self, entity_id: int):
561
605
  """Delete an item from the search index by entity_id."""
562
606
  async with db.scoped_session(self.session_maker) as session:
@@ -48,6 +48,10 @@ from basic_memory.schemas.directory import (
48
48
  DirectoryNode,
49
49
  )
50
50
 
51
+ from basic_memory.schemas.sync_report import (
52
+ SyncReportResponse,
53
+ )
54
+
51
55
  # For convenient imports, export all models
52
56
  __all__ = [
53
57
  # Base
@@ -77,4 +81,6 @@ __all__ = [
77
81
  "ProjectInfoResponse",
78
82
  # Directory
79
83
  "DirectoryNode",
84
+ # Sync
85
+ "SyncReportResponse",
80
86
  ]
@@ -11,9 +11,10 @@ Key Concepts:
11
11
  4. Everything is stored in both SQLite and markdown files
12
12
  """
13
13
 
14
+ import os
14
15
  import mimetypes
15
16
  import re
16
- from datetime import datetime, time
17
+ from datetime import datetime, timedelta
17
18
  from pathlib import Path
18
19
  from typing import List, Optional, Annotated, Dict
19
20
 
@@ -23,7 +24,7 @@ from dateparser import parse
23
24
  from pydantic import BaseModel, BeforeValidator, Field, model_validator
24
25
 
25
26
  from basic_memory.config import ConfigManager
26
- from basic_memory.file_utils import sanitize_for_filename
27
+ from basic_memory.file_utils import sanitize_for_filename, sanitize_for_folder
27
28
  from basic_memory.utils import generate_permalink
28
29
 
29
30
 
@@ -51,30 +52,47 @@ def to_snake_case(name: str) -> str:
51
52
  def parse_timeframe(timeframe: str) -> datetime:
52
53
  """Parse timeframe with special handling for 'today' and other natural language expressions.
53
54
 
55
+ Enforces a minimum 1-day lookback to handle timezone differences in distributed deployments.
56
+
54
57
  Args:
55
58
  timeframe: Natural language timeframe like 'today', '1d', '1 week ago', etc.
56
59
 
57
60
  Returns:
58
61
  datetime: The parsed datetime for the start of the timeframe, timezone-aware in local system timezone
62
+ Always returns at least 1 day ago to handle timezone differences.
59
63
 
60
64
  Examples:
61
- parse_timeframe('today') -> 2025-06-05 00:00:00-07:00 (start of today with local timezone)
65
+ parse_timeframe('today') -> 2025-06-04 14:50:00-07:00 (1 day ago, not start of today)
66
+ parse_timeframe('1h') -> 2025-06-04 14:50:00-07:00 (1 day ago, not 1 hour ago)
62
67
  parse_timeframe('1d') -> 2025-06-04 14:50:00-07:00 (24 hours ago with local timezone)
63
68
  parse_timeframe('1 week ago') -> 2025-05-29 14:50:00-07:00 (1 week ago with local timezone)
64
69
  """
65
70
  if timeframe.lower() == "today":
66
- # Return start of today (00:00:00) in local timezone
67
- naive_dt = datetime.combine(datetime.now().date(), time.min)
68
- return naive_dt.astimezone()
71
+ # For "today", return 1 day ago to ensure we capture recent activity across timezones
72
+ # This handles the case where client and server are in different timezones
73
+ now = datetime.now()
74
+ one_day_ago = now - timedelta(days=1)
75
+ return one_day_ago.astimezone()
69
76
  else:
70
77
  # Use dateparser for other formats
71
78
  parsed = parse(timeframe)
72
79
  if not parsed:
73
80
  raise ValueError(f"Could not parse timeframe: {timeframe}")
74
-
81
+
75
82
  # If the parsed datetime is naive, make it timezone-aware in local system timezone
76
83
  if parsed.tzinfo is None:
77
- return parsed.astimezone()
84
+ parsed = parsed.astimezone()
85
+ else:
86
+ parsed = parsed
87
+
88
+ # Enforce minimum 1-day lookback to handle timezone differences
89
+ # This ensures we don't miss recent activity due to client/server timezone mismatches
90
+ now = datetime.now().astimezone()
91
+ one_day_ago = now - timedelta(days=1)
92
+
93
+ # If the parsed time is more recent than 1 day ago, use 1 day ago instead
94
+ if parsed > one_day_ago:
95
+ return one_day_ago
78
96
  else:
79
97
  return parsed
80
98
 
@@ -179,6 +197,7 @@ class Entity(BaseModel):
179
197
  """
180
198
 
181
199
  # private field to override permalink
200
+ # Use empty string "" as sentinel to indicate permalinks are explicitly disabled
182
201
  _permalink: Optional[str] = None
183
202
 
184
203
  title: str
@@ -192,6 +211,10 @@ class Entity(BaseModel):
192
211
  default="text/markdown",
193
212
  )
194
213
 
214
+ def __init__(self, **data):
215
+ data["folder"] = sanitize_for_folder(data.get("folder", ""))
216
+ super().__init__(**data)
217
+
195
218
  @property
196
219
  def safe_title(self) -> str:
197
220
  """
@@ -218,13 +241,18 @@ class Entity(BaseModel):
218
241
  """Get the file path for this entity based on its permalink."""
219
242
  safe_title = self.safe_title
220
243
  if self.content_type == "text/markdown":
221
- return f"{self.folder}/{safe_title}.md" if self.folder else f"{safe_title}.md"
244
+ return (
245
+ os.path.join(self.folder, f"{safe_title}.md") if self.folder else f"{safe_title}.md"
246
+ )
222
247
  else:
223
- return f"{self.folder}/{safe_title}" if self.folder else safe_title
248
+ return os.path.join(self.folder, safe_title) if self.folder else safe_title
224
249
 
225
250
  @property
226
- def permalink(self) -> Permalink:
251
+ def permalink(self) -> Optional[Permalink]:
227
252
  """Get a url friendly path}."""
253
+ # Empty string is a sentinel value indicating permalinks are disabled
254
+ if self._permalink == "":
255
+ return None
228
256
  return self._permalink or generate_permalink(self.file_path)
229
257
 
230
258
  @model_validator(mode="after")
@@ -0,0 +1,46 @@
1
+ """Schemas for cloud-related API responses."""
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class TenantMountInfo(BaseModel):
7
+ """Response from /tenant/mount/info endpoint."""
8
+
9
+ tenant_id: str = Field(..., description="Unique identifier for the tenant")
10
+ bucket_name: str = Field(..., description="S3 bucket name for the tenant")
11
+
12
+
13
+ class MountCredentials(BaseModel):
14
+ """Response from /tenant/mount/credentials endpoint."""
15
+
16
+ access_key: str = Field(..., description="S3 access key for mount")
17
+ secret_key: str = Field(..., description="S3 secret key for mount")
18
+
19
+
20
+ class CloudProject(BaseModel):
21
+ """Representation of a cloud project."""
22
+
23
+ name: str = Field(..., description="Project name")
24
+ path: str = Field(..., description="Project path on cloud")
25
+
26
+
27
+ class CloudProjectList(BaseModel):
28
+ """Response from /proxy/projects/projects endpoint."""
29
+
30
+ projects: list[CloudProject] = Field(default_factory=list, description="List of cloud projects")
31
+
32
+
33
+ class CloudProjectCreateRequest(BaseModel):
34
+ """Request to create a new cloud project."""
35
+
36
+ name: str = Field(..., description="Project name")
37
+ path: str = Field(..., description="Project path (permalink)")
38
+ set_default: bool = Field(default=False, description="Set as default project")
39
+
40
+
41
+ class CloudProjectCreateResponse(BaseModel):
42
+ """Response from creating a cloud project."""
43
+
44
+ name: str = Field(..., description="Created project name")
45
+ path: str = Field(..., description="Created project path")
46
+ message: str = Field(default="", description="Success message")
@@ -1,10 +1,10 @@
1
1
  """Schemas for memory context."""
2
2
 
3
3
  from datetime import datetime
4
- from typing import List, Optional, Annotated, Sequence, Literal, Union
4
+ from typing import List, Optional, Annotated, Sequence, Literal, Union, Dict
5
5
 
6
6
  from annotated_types import MinLen, MaxLen
7
- from pydantic import BaseModel, Field, BeforeValidator, TypeAdapter, ConfigDict
7
+ from pydantic import BaseModel, Field, BeforeValidator, TypeAdapter, field_serializer
8
8
 
9
9
  from basic_memory.schemas.search import SearchItemType
10
10
 
@@ -26,6 +26,7 @@ def validate_memory_url_path(path: str) -> bool:
26
26
  >>> validate_memory_url_path("invalid://test") # Contains protocol
27
27
  False
28
28
  """
29
+ # Empty paths are not valid
29
30
  if not path or not path.strip():
30
31
  return False
31
32
 
@@ -68,7 +69,13 @@ def normalize_memory_url(url: str | None) -> str:
68
69
  ValueError: Invalid memory URL path: 'memory//test' contains double slashes
69
70
  """
70
71
  if not url:
71
- return ""
72
+ raise ValueError("Memory URL cannot be empty")
73
+
74
+ # Strip whitespace for consistency
75
+ url = url.strip()
76
+
77
+ if not url:
78
+ raise ValueError("Memory URL cannot be empty or whitespace")
72
79
 
73
80
  clean_path = url.removeprefix("memory://")
74
81
 
@@ -79,8 +86,6 @@ def normalize_memory_url(url: str | None) -> str:
79
86
  raise ValueError(f"Invalid memory URL path: '{clean_path}' contains protocol scheme")
80
87
  elif "//" in clean_path:
81
88
  raise ValueError(f"Invalid memory URL path: '{clean_path}' contains double slashes")
82
- elif not clean_path.strip():
83
- raise ValueError("Memory URL path cannot be empty or whitespace")
84
89
  else:
85
90
  raise ValueError(f"Invalid memory URL path: '{clean_path}' contains invalid characters")
86
91
 
@@ -117,21 +122,23 @@ def memory_url_path(url: memory_url) -> str: # pyright: ignore
117
122
 
118
123
  class EntitySummary(BaseModel):
119
124
  """Simplified entity representation."""
120
-
121
- model_config = ConfigDict(json_encoders={datetime: lambda dt: dt.isoformat()})
122
125
 
123
126
  type: Literal["entity"] = "entity"
124
127
  permalink: Optional[str]
125
128
  title: str
126
129
  content: Optional[str] = None
127
130
  file_path: str
128
- created_at: datetime
131
+ created_at: Annotated[
132
+ datetime, Field(json_schema_extra={"type": "string", "format": "date-time"})
133
+ ]
134
+
135
+ @field_serializer("created_at")
136
+ def serialize_created_at(self, dt: datetime) -> str:
137
+ return dt.isoformat()
129
138
 
130
139
 
131
140
  class RelationSummary(BaseModel):
132
141
  """Simplified relation representation."""
133
-
134
- model_config = ConfigDict(json_encoders={datetime: lambda dt: dt.isoformat()})
135
142
 
136
143
  type: Literal["relation"] = "relation"
137
144
  title: str
@@ -140,13 +147,17 @@ class RelationSummary(BaseModel):
140
147
  relation_type: str
141
148
  from_entity: Optional[str] = None
142
149
  to_entity: Optional[str] = None
143
- created_at: datetime
150
+ created_at: Annotated[
151
+ datetime, Field(json_schema_extra={"type": "string", "format": "date-time"})
152
+ ]
153
+
154
+ @field_serializer("created_at")
155
+ def serialize_created_at(self, dt: datetime) -> str:
156
+ return dt.isoformat()
144
157
 
145
158
 
146
159
  class ObservationSummary(BaseModel):
147
160
  """Simplified observation representation."""
148
-
149
- model_config = ConfigDict(json_encoders={datetime: lambda dt: dt.isoformat()})
150
161
 
151
162
  type: Literal["observation"] = "observation"
152
163
  title: str
@@ -154,32 +165,42 @@ class ObservationSummary(BaseModel):
154
165
  permalink: str
155
166
  category: str
156
167
  content: str
157
- created_at: datetime
168
+ created_at: Annotated[
169
+ datetime, Field(json_schema_extra={"type": "string", "format": "date-time"})
170
+ ]
171
+
172
+ @field_serializer("created_at")
173
+ def serialize_created_at(self, dt: datetime) -> str:
174
+ return dt.isoformat()
158
175
 
159
176
 
160
177
  class MemoryMetadata(BaseModel):
161
178
  """Simplified response metadata."""
162
-
163
- model_config = ConfigDict(json_encoders={datetime: lambda dt: dt.isoformat()})
164
179
 
165
180
  uri: Optional[str] = None
166
181
  types: Optional[List[SearchItemType]] = None
167
182
  depth: int
168
183
  timeframe: Optional[str] = None
169
- generated_at: datetime
184
+ generated_at: Annotated[
185
+ datetime, Field(json_schema_extra={"type": "string", "format": "date-time"})
186
+ ]
170
187
  primary_count: Optional[int] = None # Changed field name
171
188
  related_count: Optional[int] = None # Changed field name
172
189
  total_results: Optional[int] = None # For backward compatibility
173
190
  total_relations: Optional[int] = None
174
191
  total_observations: Optional[int] = None
175
192
 
193
+ @field_serializer("generated_at")
194
+ def serialize_generated_at(self, dt: datetime) -> str:
195
+ return dt.isoformat()
196
+
176
197
 
177
198
  class ContextResult(BaseModel):
178
199
  """Context result containing a primary item with its observations and related items."""
179
200
 
180
201
  primary_result: Annotated[
181
- Union[EntitySummary, RelationSummary, ObservationSummary],
182
- Field(discriminator="type", description="Primary item")
202
+ Union[EntitySummary, RelationSummary, ObservationSummary],
203
+ Field(discriminator="type", description="Primary item"),
183
204
  ]
184
205
 
185
206
  observations: Sequence[ObservationSummary] = Field(
@@ -188,8 +209,7 @@ class ContextResult(BaseModel):
188
209
 
189
210
  related_results: Sequence[
190
211
  Annotated[
191
- Union[EntitySummary, RelationSummary, ObservationSummary],
192
- Field(discriminator="type")
212
+ Union[EntitySummary, RelationSummary, ObservationSummary], Field(discriminator="type")
193
213
  ]
194
214
  ] = Field(description="Related items", default_factory=list)
195
215
 
@@ -207,3 +227,52 @@ class GraphContext(BaseModel):
207
227
 
208
228
  page: Optional[int] = None
209
229
  page_size: Optional[int] = None
230
+
231
+
232
+ class ActivityStats(BaseModel):
233
+ """Statistics about activity across all projects."""
234
+
235
+ total_projects: int
236
+ active_projects: int = Field(description="Projects with activity in timeframe")
237
+ most_active_project: Optional[str] = None
238
+ total_items: int = Field(description="Total items across all projects")
239
+ total_entities: int = 0
240
+ total_relations: int = 0
241
+ total_observations: int = 0
242
+
243
+
244
+ class ProjectActivity(BaseModel):
245
+ """Activity summary for a single project."""
246
+
247
+ project_name: str
248
+ project_path: str
249
+ activity: GraphContext = Field(description="The actual activity data for this project")
250
+ item_count: int = Field(description="Total items in this project's activity")
251
+ last_activity: Optional[
252
+ Annotated[datetime, Field(json_schema_extra={"type": "string", "format": "date-time"})]
253
+ ] = Field(default=None, description="Most recent activity timestamp")
254
+ active_folders: List[str] = Field(default_factory=list, description="Most active folders")
255
+
256
+ @field_serializer("last_activity")
257
+ def serialize_last_activity(self, dt: Optional[datetime]) -> Optional[str]:
258
+ return dt.isoformat() if dt else None
259
+
260
+
261
+ class ProjectActivitySummary(BaseModel):
262
+ """Summary of activity across all projects."""
263
+
264
+ projects: Dict[str, ProjectActivity] = Field(
265
+ description="Activity per project, keyed by project name"
266
+ )
267
+ summary: ActivityStats
268
+ timeframe: str = Field(description="The timeframe used for the query")
269
+ generated_at: Annotated[
270
+ datetime, Field(json_schema_extra={"type": "string", "format": "date-time"})
271
+ ]
272
+ guidance: Optional[str] = Field(
273
+ default=None, description="Assistant guidance for project selection and session management"
274
+ )
275
+
276
+ @field_serializer("generated_at")
277
+ def serialize_generated_at(self, dt: datetime) -> str:
278
+ return dt.isoformat()
@@ -2,6 +2,7 @@
2
2
 
3
3
  import os
4
4
  from datetime import datetime
5
+ from pathlib import Path
5
6
  from typing import Dict, List, Optional, Any
6
7
 
7
8
  from pydantic import Field, BaseModel
@@ -78,16 +79,6 @@ class SystemStatus(BaseModel):
78
79
  timestamp: datetime = Field(description="Timestamp when the information was collected")
79
80
 
80
81
 
81
- class ProjectDetail(BaseModel):
82
- """Detailed information about a project."""
83
-
84
- path: str = Field(description="Path to the project directory")
85
- active: bool = Field(description="Whether the project is active")
86
- id: Optional[int] = Field(description="Database ID of the project if available")
87
- is_default: bool = Field(description="Whether this is the default project")
88
- permalink: str = Field(description="URL-friendly identifier for the project")
89
-
90
-
91
82
  class ProjectInfoResponse(BaseModel):
92
83
  """Response for the project_info tool."""
93
84
 
@@ -190,6 +181,14 @@ class ProjectItem(BaseModel):
190
181
  def permalink(self) -> str: # pragma: no cover
191
182
  return generate_permalink(self.name)
192
183
 
184
+ @property
185
+ def home(self) -> Path: # pragma: no cover
186
+ return Path(self.path).expanduser()
187
+
188
+ @property
189
+ def project_url(self) -> str: # pragma: no cover
190
+ return f"/{generate_permalink(self.name)}"
191
+
193
192
 
194
193
  class ProjectList(BaseModel):
195
194
  """Response model for listing projects."""
@@ -0,0 +1,48 @@
1
+ """Pydantic schemas for sync report responses."""
2
+
3
+ from typing import TYPE_CHECKING, Dict, Set
4
+
5
+ from pydantic import BaseModel, Field
6
+
7
+ # avoid cirular imports
8
+ if TYPE_CHECKING:
9
+ from basic_memory.sync.sync_service import SyncReport
10
+
11
+
12
+ class SyncReportResponse(BaseModel):
13
+ """Report of file changes found compared to database state.
14
+
15
+ Used for API responses when scanning or syncing files.
16
+ """
17
+
18
+ new: Set[str] = Field(default_factory=set, description="Files on disk but not in database")
19
+ modified: Set[str] = Field(default_factory=set, description="Files with different checksums")
20
+ deleted: Set[str] = Field(default_factory=set, description="Files in database but not on disk")
21
+ moves: Dict[str, str] = Field(
22
+ default_factory=dict, description="Files moved (old_path -> new_path)"
23
+ )
24
+ checksums: Dict[str, str] = Field(
25
+ default_factory=dict, description="Current file checksums (path -> checksum)"
26
+ )
27
+ total: int = Field(description="Total number of changes")
28
+
29
+ @classmethod
30
+ def from_sync_report(cls, report: "SyncReport") -> "SyncReportResponse":
31
+ """Convert SyncReport dataclass to Pydantic model.
32
+
33
+ Args:
34
+ report: SyncReport dataclass from sync service
35
+
36
+ Returns:
37
+ SyncReportResponse with same data
38
+ """
39
+ return cls(
40
+ new=report.new,
41
+ modified=report.modified,
42
+ deleted=report.deleted,
43
+ moves=report.moves,
44
+ checksums=report.checksums,
45
+ total=report.total,
46
+ )
47
+
48
+ model_config = {"from_attributes": True}
@@ -100,20 +100,30 @@ class ContextService:
100
100
  f"Building context for URI: '{memory_url}' depth: '{depth}' since: '{since}' limit: '{limit}' offset: '{offset}' max_related: '{max_related}'"
101
101
  )
102
102
 
103
+ normalized_path: Optional[str] = None
103
104
  if memory_url:
104
105
  path = memory_url_path(memory_url)
105
- # Pattern matching - use search
106
- if "*" in path:
107
- logger.debug(f"Pattern search for '{path}'")
106
+ # Check for wildcards before normalization
107
+ has_wildcard = "*" in path
108
+
109
+ if has_wildcard:
110
+ # For wildcard patterns, normalize each segment separately to preserve the *
111
+ parts = path.split("*")
112
+ normalized_parts = [
113
+ generate_permalink(part, split_extension=False) if part else ""
114
+ for part in parts
115
+ ]
116
+ normalized_path = "*".join(normalized_parts)
117
+ logger.debug(f"Pattern search for '{normalized_path}'")
108
118
  primary = await self.search_repository.search(
109
- permalink_match=path, limit=limit, offset=offset
119
+ permalink_match=normalized_path, limit=limit, offset=offset
110
120
  )
111
-
112
- # Direct lookup for exact path
113
121
  else:
114
- logger.debug(f"Direct lookup for '{path}'")
122
+ # For exact paths, normalize the whole thing
123
+ normalized_path = generate_permalink(path, split_extension=False)
124
+ logger.debug(f"Direct lookup for '{normalized_path}'")
115
125
  primary = await self.search_repository.search(
116
- permalink=path, limit=limit, offset=offset
126
+ permalink=normalized_path, limit=limit, offset=offset
117
127
  )
118
128
  else:
119
129
  logger.debug(f"Build context for '{types}'")
@@ -151,7 +161,7 @@ class ContextService:
151
161
 
152
162
  # Create metadata dataclass
153
163
  metadata = ContextMetadata(
154
- uri=memory_url_path(memory_url) if memory_url else None,
164
+ uri=normalized_path if memory_url else None,
155
165
  types=types,
156
166
  depth=depth,
157
167
  timeframe=since.isoformat() if since else None,
@@ -246,7 +256,11 @@ class ContextService:
246
256
  values = ", ".join([f"('{t}', {i})" for t, i in type_id_pairs])
247
257
 
248
258
  # Parameters for bindings - include project_id for security filtering
249
- params = {"max_depth": max_depth, "max_results": max_results, "project_id": self.search_repository.project_id}
259
+ params = {
260
+ "max_depth": max_depth,
261
+ "max_results": max_results,
262
+ "project_id": self.search_repository.project_id,
263
+ }
250
264
 
251
265
  # Build date and timeframe filters conditionally based on since parameter
252
266
  if since:
@@ -258,7 +272,7 @@ class ContextService:
258
272
  date_filter = ""
259
273
  relation_date_filter = ""
260
274
  timeframe_condition = ""
261
-
275
+
262
276
  # Add project filtering for security - ensure all entities and relations belong to the same project
263
277
  project_filter = "AND e.project_id = :project_id"
264
278
  relation_project_filter = "AND e_from.project_id = :project_id"