basic-memory 0.12.3__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (116) hide show
  1. basic_memory/__init__.py +2 -1
  2. basic_memory/alembic/env.py +1 -1
  3. basic_memory/alembic/versions/5fe1ab1ccebe_add_projects_table.py +108 -0
  4. basic_memory/alembic/versions/647e7a75e2cd_project_constraint_fix.py +104 -0
  5. basic_memory/alembic/versions/cc7172b46608_update_search_index_schema.py +0 -6
  6. basic_memory/api/app.py +43 -13
  7. basic_memory/api/routers/__init__.py +4 -2
  8. basic_memory/api/routers/directory_router.py +63 -0
  9. basic_memory/api/routers/importer_router.py +152 -0
  10. basic_memory/api/routers/knowledge_router.py +139 -37
  11. basic_memory/api/routers/management_router.py +78 -0
  12. basic_memory/api/routers/memory_router.py +6 -62
  13. basic_memory/api/routers/project_router.py +234 -0
  14. basic_memory/api/routers/prompt_router.py +260 -0
  15. basic_memory/api/routers/search_router.py +3 -21
  16. basic_memory/api/routers/utils.py +130 -0
  17. basic_memory/api/template_loader.py +292 -0
  18. basic_memory/cli/app.py +20 -21
  19. basic_memory/cli/commands/__init__.py +2 -1
  20. basic_memory/cli/commands/auth.py +136 -0
  21. basic_memory/cli/commands/db.py +3 -3
  22. basic_memory/cli/commands/import_chatgpt.py +31 -207
  23. basic_memory/cli/commands/import_claude_conversations.py +16 -142
  24. basic_memory/cli/commands/import_claude_projects.py +33 -143
  25. basic_memory/cli/commands/import_memory_json.py +26 -83
  26. basic_memory/cli/commands/mcp.py +71 -18
  27. basic_memory/cli/commands/project.py +102 -70
  28. basic_memory/cli/commands/status.py +19 -9
  29. basic_memory/cli/commands/sync.py +44 -58
  30. basic_memory/cli/commands/tool.py +6 -6
  31. basic_memory/cli/main.py +1 -5
  32. basic_memory/config.py +143 -87
  33. basic_memory/db.py +6 -4
  34. basic_memory/deps.py +227 -30
  35. basic_memory/importers/__init__.py +27 -0
  36. basic_memory/importers/base.py +79 -0
  37. basic_memory/importers/chatgpt_importer.py +222 -0
  38. basic_memory/importers/claude_conversations_importer.py +172 -0
  39. basic_memory/importers/claude_projects_importer.py +148 -0
  40. basic_memory/importers/memory_json_importer.py +93 -0
  41. basic_memory/importers/utils.py +58 -0
  42. basic_memory/markdown/entity_parser.py +5 -2
  43. basic_memory/mcp/auth_provider.py +270 -0
  44. basic_memory/mcp/external_auth_provider.py +321 -0
  45. basic_memory/mcp/project_session.py +103 -0
  46. basic_memory/mcp/prompts/__init__.py +2 -0
  47. basic_memory/mcp/prompts/continue_conversation.py +18 -68
  48. basic_memory/mcp/prompts/recent_activity.py +20 -4
  49. basic_memory/mcp/prompts/search.py +14 -140
  50. basic_memory/mcp/prompts/sync_status.py +116 -0
  51. basic_memory/mcp/prompts/utils.py +3 -3
  52. basic_memory/mcp/{tools → resources}/project_info.py +6 -2
  53. basic_memory/mcp/server.py +86 -13
  54. basic_memory/mcp/supabase_auth_provider.py +463 -0
  55. basic_memory/mcp/tools/__init__.py +24 -0
  56. basic_memory/mcp/tools/build_context.py +43 -8
  57. basic_memory/mcp/tools/canvas.py +17 -3
  58. basic_memory/mcp/tools/delete_note.py +168 -5
  59. basic_memory/mcp/tools/edit_note.py +303 -0
  60. basic_memory/mcp/tools/list_directory.py +154 -0
  61. basic_memory/mcp/tools/move_note.py +299 -0
  62. basic_memory/mcp/tools/project_management.py +332 -0
  63. basic_memory/mcp/tools/read_content.py +15 -6
  64. basic_memory/mcp/tools/read_note.py +26 -7
  65. basic_memory/mcp/tools/recent_activity.py +11 -2
  66. basic_memory/mcp/tools/search.py +189 -8
  67. basic_memory/mcp/tools/sync_status.py +254 -0
  68. basic_memory/mcp/tools/utils.py +184 -12
  69. basic_memory/mcp/tools/view_note.py +66 -0
  70. basic_memory/mcp/tools/write_note.py +24 -17
  71. basic_memory/models/__init__.py +3 -2
  72. basic_memory/models/knowledge.py +16 -4
  73. basic_memory/models/project.py +78 -0
  74. basic_memory/models/search.py +8 -5
  75. basic_memory/repository/__init__.py +2 -0
  76. basic_memory/repository/entity_repository.py +8 -3
  77. basic_memory/repository/observation_repository.py +35 -3
  78. basic_memory/repository/project_info_repository.py +3 -2
  79. basic_memory/repository/project_repository.py +85 -0
  80. basic_memory/repository/relation_repository.py +8 -2
  81. basic_memory/repository/repository.py +107 -15
  82. basic_memory/repository/search_repository.py +192 -54
  83. basic_memory/schemas/__init__.py +6 -0
  84. basic_memory/schemas/base.py +33 -5
  85. basic_memory/schemas/directory.py +30 -0
  86. basic_memory/schemas/importer.py +34 -0
  87. basic_memory/schemas/memory.py +84 -13
  88. basic_memory/schemas/project_info.py +112 -2
  89. basic_memory/schemas/prompt.py +90 -0
  90. basic_memory/schemas/request.py +56 -2
  91. basic_memory/schemas/search.py +1 -1
  92. basic_memory/services/__init__.py +2 -1
  93. basic_memory/services/context_service.py +208 -95
  94. basic_memory/services/directory_service.py +167 -0
  95. basic_memory/services/entity_service.py +399 -6
  96. basic_memory/services/exceptions.py +6 -0
  97. basic_memory/services/file_service.py +14 -15
  98. basic_memory/services/initialization.py +170 -66
  99. basic_memory/services/link_resolver.py +35 -12
  100. basic_memory/services/migration_service.py +168 -0
  101. basic_memory/services/project_service.py +671 -0
  102. basic_memory/services/search_service.py +77 -2
  103. basic_memory/services/sync_status_service.py +181 -0
  104. basic_memory/sync/background_sync.py +25 -0
  105. basic_memory/sync/sync_service.py +102 -21
  106. basic_memory/sync/watch_service.py +63 -39
  107. basic_memory/templates/prompts/continue_conversation.hbs +110 -0
  108. basic_memory/templates/prompts/search.hbs +101 -0
  109. {basic_memory-0.12.3.dist-info → basic_memory-0.13.0.dist-info}/METADATA +24 -2
  110. basic_memory-0.13.0.dist-info/RECORD +138 -0
  111. basic_memory/api/routers/project_info_router.py +0 -274
  112. basic_memory/mcp/main.py +0 -24
  113. basic_memory-0.12.3.dist-info/RECORD +0 -100
  114. {basic_memory-0.12.3.dist-info → basic_memory-0.13.0.dist-info}/WHEEL +0 -0
  115. {basic_memory-0.12.3.dist-info → basic_memory-0.13.0.dist-info}/entry_points.txt +0 -0
  116. {basic_memory-0.12.3.dist-info → basic_memory-0.13.0.dist-info}/licenses/LICENSE +0 -0
@@ -19,6 +19,7 @@ from basic_memory.schemas.search import SearchItemType
19
19
  class SearchIndexRow:
20
20
  """Search result with score and metadata."""
21
21
 
22
+ project_id: int
22
23
  id: int
23
24
  type: str
24
25
  file_path: str
@@ -47,6 +48,27 @@ class SearchIndexRow:
47
48
  def content(self):
48
49
  return self.content_snippet
49
50
 
51
+ @property
52
+ def directory(self) -> str:
53
+ """Extract directory part from file_path.
54
+
55
+ For a file at "projects/notes/ideas.md", returns "/projects/notes"
56
+ For a file at root level "README.md", returns "/"
57
+ """
58
+ if not self.type == SearchItemType.ENTITY.value and not self.file_path:
59
+ return ""
60
+
61
+ # Split the path by slashes
62
+ parts = self.file_path.split("/")
63
+
64
+ # If there's only one part (e.g., "README.md"), it's at the root
65
+ if len(parts) <= 1:
66
+ return "/"
67
+
68
+ # Join all parts except the last one (filename)
69
+ directory_path = "/".join(parts[:-1])
70
+ return f"/{directory_path}"
71
+
50
72
  def to_insert(self):
51
73
  return {
52
74
  "id": self.id,
@@ -64,14 +86,28 @@ class SearchIndexRow:
64
86
  "category": self.category,
65
87
  "created_at": self.created_at if self.created_at else None,
66
88
  "updated_at": self.updated_at if self.updated_at else None,
89
+ "project_id": self.project_id,
67
90
  }
68
91
 
69
92
 
70
93
  class SearchRepository:
71
94
  """Repository for search index operations."""
72
95
 
73
- def __init__(self, session_maker: async_sessionmaker[AsyncSession]):
96
+ def __init__(self, session_maker: async_sessionmaker[AsyncSession], project_id: int):
97
+ """Initialize with session maker and project_id filter.
98
+
99
+ Args:
100
+ session_maker: SQLAlchemy session maker
101
+ project_id: Project ID to filter all operations by
102
+
103
+ Raises:
104
+ ValueError: If project_id is None or invalid
105
+ """
106
+ if project_id is None or project_id <= 0: # pragma: no cover
107
+ raise ValueError("A valid project_id is required for SearchRepository")
108
+
74
109
  self.session_maker = session_maker
110
+ self.project_id = project_id
75
111
 
76
112
  async def init_search_index(self):
77
113
  """Create or recreate the search index."""
@@ -92,28 +128,93 @@ class SearchRepository:
92
128
  is_prefix: Whether to add prefix search capability (* suffix)
93
129
 
94
130
  For FTS5:
95
- - Special characters and phrases need to be quoted
96
- - Terms with spaces or special chars need quotes
97
- - Boolean operators (AND, OR, NOT) and parentheses are preserved
131
+ - Boolean operators (AND, OR, NOT) are preserved for complex queries
132
+ - Terms with FTS5 special characters are quoted to prevent syntax errors
133
+ - Simple terms get prefix wildcards for better matching
98
134
  """
99
- if "*" in term:
100
- return term
101
-
102
- # Check for boolean operators - if present, return the term as is
103
- boolean_operators = [" AND ", " OR ", " NOT ", "(", ")"]
135
+ # Check for explicit boolean operators - if present, return the term as is
136
+ boolean_operators = [" AND ", " OR ", " NOT "]
104
137
  if any(op in f" {term} " for op in boolean_operators):
105
138
  return term
106
139
 
107
- # List of special characters that need quoting (excluding *)
108
- special_chars = ["/", "-", ".", " ", "(", ")", "[", "]", '"', "'"]
140
+ # Check if term is already a proper wildcard pattern (alphanumeric + *)
141
+ # e.g., "hello*", "test*world" - these should be left alone
142
+ if "*" in term and all(c.isalnum() or c in "*_-" for c in term):
143
+ return term
109
144
 
110
- # Check if term contains any special characters
111
- needs_quotes = any(c in term for c in special_chars)
145
+ # Characters that can cause FTS5 syntax errors when used as operators
146
+ # We're more conservative here - only quote when we detect problematic patterns
147
+ problematic_chars = [
148
+ '"',
149
+ "'",
150
+ "(",
151
+ ")",
152
+ "[",
153
+ "]",
154
+ "{",
155
+ "}",
156
+ "+",
157
+ "!",
158
+ "@",
159
+ "#",
160
+ "$",
161
+ "%",
162
+ "^",
163
+ "&",
164
+ "=",
165
+ "|",
166
+ "\\",
167
+ "~",
168
+ "`",
169
+ ]
112
170
 
113
- if needs_quotes:
114
- # If the term already contains quotes, escape them and add a wildcard
115
- term = term.replace('"', '""')
116
- term = f'"{term}"*'
171
+ # Characters that indicate we should quote (spaces, dots, colons, etc.)
172
+ # Adding hyphens here because FTS5 can have issues with hyphens followed by wildcards
173
+ needs_quoting_chars = [" ", ".", ":", ";", ",", "<", ">", "?", "/", "-"]
174
+
175
+ # Check if term needs quoting
176
+ has_problematic = any(c in term for c in problematic_chars)
177
+ has_spaces_or_special = any(c in term for c in needs_quoting_chars)
178
+
179
+ if has_problematic or has_spaces_or_special:
180
+ # Handle multi-word queries differently from special character queries
181
+ if " " in term and not any(c in term for c in problematic_chars):
182
+ # Check if any individual word contains special characters that need quoting
183
+ words = term.strip().split()
184
+ has_special_in_words = any(
185
+ any(c in word for c in needs_quoting_chars if c != " ") for word in words
186
+ )
187
+
188
+ if not has_special_in_words:
189
+ # For multi-word queries with simple words (like "emoji unicode"),
190
+ # use boolean AND to handle word order variations
191
+ if is_prefix:
192
+ # Add prefix wildcard to each word for better matching
193
+ prepared_words = [f"{word}*" for word in words if word]
194
+ else:
195
+ prepared_words = words
196
+ term = " AND ".join(prepared_words)
197
+ else:
198
+ # If any word has special characters, quote the entire phrase
199
+ escaped_term = term.replace('"', '""')
200
+ if is_prefix and not ("/" in term and term.endswith(".md")):
201
+ term = f'"{escaped_term}"*'
202
+ else:
203
+ term = f'"{escaped_term}"'
204
+ else:
205
+ # For terms with problematic characters or file paths, use exact phrase matching
206
+ # Escape any existing quotes by doubling them
207
+ escaped_term = term.replace('"', '""')
208
+ # Quote the entire term to handle special characters safely
209
+ if is_prefix and not ("/" in term and term.endswith(".md")):
210
+ # For search terms (not file paths), add prefix matching
211
+ term = f'"{escaped_term}"*'
212
+ else:
213
+ # For file paths, use exact matching
214
+ term = f'"{escaped_term}"'
215
+ elif is_prefix:
216
+ # Only add wildcard for simple terms without special characters
217
+ term = f"{term}*"
117
218
 
118
219
  return term
119
220
 
@@ -125,7 +226,7 @@ class SearchRepository:
125
226
  title: Optional[str] = None,
126
227
  types: Optional[List[str]] = None,
127
228
  after_date: Optional[datetime] = None,
128
- entity_types: Optional[List[SearchItemType]] = None,
229
+ search_item_types: Optional[List[SearchItemType]] = None,
129
230
  limit: int = 10,
130
231
  offset: int = 0,
131
232
  ) -> List[SearchIndexRow]:
@@ -136,26 +237,30 @@ class SearchRepository:
136
237
 
137
238
  # Handle text search for title and content
138
239
  if search_text:
139
- has_boolean = any(
140
- op in f" {search_text} " for op in [" AND ", " OR ", " NOT ", "(", ")"]
141
- )
142
-
143
- if has_boolean:
144
- # If boolean operators are present, use the raw query
145
- # No need to prepare it, FTS5 will understand the operators
146
- params["text"] = search_text
147
- conditions.append("(title MATCH :text OR content_stems MATCH :text)")
240
+ # Skip FTS for wildcard-only queries that would cause "unknown special query" errors
241
+ if search_text.strip() == "*" or search_text.strip() == "":
242
+ # For wildcard searches, don't add any text conditions - return all results
243
+ pass
148
244
  else:
149
- # Standard search with term preparation
150
- processed_text = self._prepare_search_term(search_text.strip())
151
- params["text"] = processed_text
152
- conditions.append("(title MATCH :text OR content_stems MATCH :text)")
245
+ # Check for explicit boolean operators - only detect them in proper boolean contexts
246
+ has_boolean = any(op in f" {search_text} " for op in [" AND ", " OR ", " NOT "])
247
+
248
+ if has_boolean:
249
+ # If boolean operators are present, use the raw query
250
+ # No need to prepare it, FTS5 will understand the operators
251
+ params["text"] = search_text
252
+ conditions.append("(title MATCH :text OR content_stems MATCH :text)")
253
+ else:
254
+ # Standard search with term preparation
255
+ processed_text = self._prepare_search_term(search_text.strip())
256
+ params["text"] = processed_text
257
+ conditions.append("(title MATCH :text OR content_stems MATCH :text)")
153
258
 
154
259
  # Handle title match search
155
260
  if title:
156
- title_text = self._prepare_search_term(title.strip())
157
- params["text"] = title_text
158
- conditions.append("title MATCH :text")
261
+ title_text = self._prepare_search_term(title.strip(), is_prefix=False)
262
+ params["title_text"] = title_text
263
+ conditions.append("title MATCH :title_text")
159
264
 
160
265
  # Handle permalink exact search
161
266
  if permalink:
@@ -164,19 +269,25 @@ class SearchRepository:
164
269
 
165
270
  # Handle permalink match search, supports *
166
271
  if permalink_match:
167
- # Clean and prepare permalink for FTS5 GLOB match
168
- permalink_text = self._prepare_search_term(
169
- permalink_match.lower().strip(), is_prefix=False
170
- )
272
+ # For GLOB patterns, don't use _prepare_search_term as it will quote slashes
273
+ # GLOB patterns need to preserve their syntax
274
+ permalink_text = permalink_match.lower().strip()
171
275
  params["permalink"] = permalink_text
172
276
  if "*" in permalink_match:
173
277
  conditions.append("permalink GLOB :permalink")
174
278
  else:
175
- conditions.append("permalink MATCH :permalink")
279
+ # For exact matches without *, we can use FTS5 MATCH
280
+ # but only prepare the term if it doesn't look like a path
281
+ if "/" in permalink_text:
282
+ conditions.append("permalink = :permalink")
283
+ else:
284
+ permalink_text = self._prepare_search_term(permalink_text, is_prefix=False)
285
+ params["permalink"] = permalink_text
286
+ conditions.append("permalink MATCH :permalink")
176
287
 
177
288
  # Handle entity type filter
178
- if entity_types:
179
- type_list = ", ".join(f"'{t.value}'" for t in entity_types)
289
+ if search_item_types:
290
+ type_list = ", ".join(f"'{t.value}'" for t in search_item_types)
180
291
  conditions.append(f"type IN ({type_list})")
181
292
 
182
293
  # Handle type filter
@@ -192,6 +303,10 @@ class SearchRepository:
192
303
  # order by most recent first
193
304
  order_by_clause = ", updated_at DESC"
194
305
 
306
+ # Always filter by project_id
307
+ params["project_id"] = self.project_id
308
+ conditions.append("project_id = :project_id")
309
+
195
310
  # set limit on search query
196
311
  params["limit"] = limit
197
312
  params["offset"] = offset
@@ -201,6 +316,7 @@ class SearchRepository:
201
316
 
202
317
  sql = f"""
203
318
  SELECT
319
+ project_id,
204
320
  id,
205
321
  title,
206
322
  permalink,
@@ -224,12 +340,24 @@ class SearchRepository:
224
340
  """
225
341
 
226
342
  logger.trace(f"Search {sql} params: {params}")
227
- async with db.scoped_session(self.session_maker) as session:
228
- result = await session.execute(text(sql), params)
229
- rows = result.fetchall()
343
+ try:
344
+ async with db.scoped_session(self.session_maker) as session:
345
+ result = await session.execute(text(sql), params)
346
+ rows = result.fetchall()
347
+ except Exception as e:
348
+ # Handle FTS5 syntax errors and provide user-friendly feedback
349
+ if "fts5: syntax error" in str(e).lower(): # pragma: no cover
350
+ logger.warning(f"FTS5 syntax error for search term: {search_text}, error: {e}")
351
+ # Return empty results rather than crashing
352
+ return []
353
+ else:
354
+ # Re-raise other database errors
355
+ logger.error(f"Database error during search: {e}")
356
+ raise
230
357
 
231
358
  results = [
232
359
  SearchIndexRow(
360
+ project_id=self.project_id,
233
361
  id=row.id,
234
362
  title=row.title,
235
363
  permalink=row.permalink,
@@ -249,10 +377,10 @@ class SearchRepository:
249
377
  for row in rows
250
378
  ]
251
379
 
252
- logger.debug(f"Found {len(results)} search results")
380
+ logger.trace(f"Found {len(results)} search results")
253
381
  for r in results:
254
- logger.debug(
255
- f"Search result: type:{r.type} title: {r.title} permalink: {r.permalink} score: {r.score}"
382
+ logger.trace(
383
+ f"Search result: project_id: {r.project_id} type:{r.type} title: {r.title} permalink: {r.permalink} score: {r.score}"
256
384
  )
257
385
 
258
386
  return results
@@ -269,6 +397,10 @@ class SearchRepository:
269
397
  {"permalink": search_index_row.permalink},
270
398
  )
271
399
 
400
+ # Prepare data for insert with project_id
401
+ insert_data = search_index_row.to_insert()
402
+ insert_data["project_id"] = self.project_id
403
+
272
404
  # Insert new record
273
405
  await session.execute(
274
406
  text("""
@@ -276,15 +408,17 @@ class SearchRepository:
276
408
  id, title, content_stems, content_snippet, permalink, file_path, type, metadata,
277
409
  from_id, to_id, relation_type,
278
410
  entity_id, category,
279
- created_at, updated_at
411
+ created_at, updated_at,
412
+ project_id
280
413
  ) VALUES (
281
414
  :id, :title, :content_stems, :content_snippet, :permalink, :file_path, :type, :metadata,
282
415
  :from_id, :to_id, :relation_type,
283
416
  :entity_id, :category,
284
- :created_at, :updated_at
417
+ :created_at, :updated_at,
418
+ :project_id
285
419
  )
286
420
  """),
287
- search_index_row.to_insert(),
421
+ insert_data,
288
422
  )
289
423
  logger.debug(f"indexed row {search_index_row}")
290
424
  await session.commit()
@@ -293,8 +427,10 @@ class SearchRepository:
293
427
  """Delete an item from the search index by entity_id."""
294
428
  async with db.scoped_session(self.session_maker) as session:
295
429
  await session.execute(
296
- text("DELETE FROM search_index WHERE entity_id = :entity_id"),
297
- {"entity_id": entity_id},
430
+ text(
431
+ "DELETE FROM search_index WHERE entity_id = :entity_id AND project_id = :project_id"
432
+ ),
433
+ {"entity_id": entity_id, "project_id": self.project_id},
298
434
  )
299
435
  await session.commit()
300
436
 
@@ -302,8 +438,10 @@ class SearchRepository:
302
438
  """Delete an item from the search index."""
303
439
  async with db.scoped_session(self.session_maker) as session:
304
440
  await session.execute(
305
- text("DELETE FROM search_index WHERE permalink = :permalink"),
306
- {"permalink": permalink},
441
+ text(
442
+ "DELETE FROM search_index WHERE permalink = :permalink AND project_id = :project_id"
443
+ ),
444
+ {"permalink": permalink, "project_id": self.project_id},
307
445
  )
308
446
  await session.commit()
309
447
 
@@ -44,6 +44,10 @@ from basic_memory.schemas.project_info import (
44
44
  ProjectInfoResponse,
45
45
  )
46
46
 
47
+ from basic_memory.schemas.directory import (
48
+ DirectoryNode,
49
+ )
50
+
47
51
  # For convenient imports, export all models
48
52
  __all__ = [
49
53
  # Base
@@ -71,4 +75,6 @@ __all__ = [
71
75
  "ActivityMetrics",
72
76
  "SystemStatus",
73
77
  "ProjectInfoResponse",
78
+ # Directory
79
+ "DirectoryNode",
74
80
  ]
@@ -13,7 +13,7 @@ Key Concepts:
13
13
 
14
14
  import mimetypes
15
15
  import re
16
- from datetime import datetime
16
+ from datetime import datetime, time
17
17
  from pathlib import Path
18
18
  from typing import List, Optional, Annotated, Dict
19
19
 
@@ -46,15 +46,43 @@ def to_snake_case(name: str) -> str:
46
46
  return s2.lower()
47
47
 
48
48
 
49
+ def parse_timeframe(timeframe: str) -> datetime:
50
+ """Parse timeframe with special handling for 'today' and other natural language expressions.
51
+
52
+ Args:
53
+ timeframe: Natural language timeframe like 'today', '1d', '1 week ago', etc.
54
+
55
+ Returns:
56
+ datetime: The parsed datetime for the start of the timeframe
57
+
58
+ Examples:
59
+ parse_timeframe('today') -> 2025-06-05 00:00:00 (start of today)
60
+ parse_timeframe('1d') -> 2025-06-04 14:50:00 (24 hours ago)
61
+ parse_timeframe('1 week ago') -> 2025-05-29 14:50:00 (1 week ago)
62
+ """
63
+ if timeframe.lower() == "today":
64
+ # Return start of today (00:00:00)
65
+ return datetime.combine(datetime.now().date(), time.min)
66
+ else:
67
+ # Use dateparser for other formats
68
+ parsed = parse(timeframe)
69
+ if not parsed:
70
+ raise ValueError(f"Could not parse timeframe: {timeframe}")
71
+ return parsed
72
+
73
+
49
74
  def validate_timeframe(timeframe: str) -> str:
50
75
  """Convert human readable timeframes to a duration relative to the current time."""
51
76
  if not isinstance(timeframe, str):
52
77
  raise ValueError("Timeframe must be a string")
53
78
 
54
- # Parse relative time expression
55
- parsed = parse(timeframe)
56
- if not parsed:
57
- raise ValueError(f"Could not parse timeframe: {timeframe}")
79
+ # Preserve special timeframe strings that need custom handling
80
+ special_timeframes = ["today"]
81
+ if timeframe.lower() in special_timeframes:
82
+ return timeframe.lower()
83
+
84
+ # Parse relative time expression using our enhanced parser
85
+ parsed = parse_timeframe(timeframe)
58
86
 
59
87
  # Convert to duration
60
88
  now = datetime.now()
@@ -0,0 +1,30 @@
1
+ """Schemas for directory tree operations."""
2
+
3
+ from datetime import datetime
4
+ from typing import List, Optional, Literal
5
+
6
+ from pydantic import BaseModel
7
+
8
+
9
+ class DirectoryNode(BaseModel):
10
+ """Directory node in file system."""
11
+
12
+ name: str
13
+ file_path: Optional[str] = None # Original path without leading slash (matches DB)
14
+ directory_path: str # Path with leading slash for directory navigation
15
+ type: Literal["directory", "file"]
16
+ children: List["DirectoryNode"] = [] # Default to empty list
17
+ title: Optional[str] = None
18
+ permalink: Optional[str] = None
19
+ entity_id: Optional[int] = None
20
+ entity_type: Optional[str] = None
21
+ content_type: Optional[str] = None
22
+ updated_at: Optional[datetime] = None
23
+
24
+ @property
25
+ def has_children(self) -> bool:
26
+ return bool(self.children)
27
+
28
+
29
+ # Support for recursive model
30
+ DirectoryNode.model_rebuild()
@@ -0,0 +1,34 @@
1
+ """Schemas for import services."""
2
+
3
+ from typing import Dict, Optional
4
+
5
+ from pydantic import BaseModel
6
+
7
+
8
+ class ImportResult(BaseModel):
9
+ """Common import result schema."""
10
+
11
+ import_count: Dict[str, int]
12
+ success: bool
13
+ error_message: Optional[str] = None
14
+
15
+
16
+ class ChatImportResult(ImportResult):
17
+ """Result schema for chat imports."""
18
+
19
+ conversations: int = 0
20
+ messages: int = 0
21
+
22
+
23
+ class ProjectImportResult(ImportResult):
24
+ """Result schema for project imports."""
25
+
26
+ documents: int = 0
27
+ prompts: int = 0
28
+
29
+
30
+ class EntityImportResult(ImportResult):
31
+ """Result schema for entity imports."""
32
+
33
+ entities: int = 0
34
+ relations: int = 0
@@ -9,8 +9,44 @@ from pydantic import BaseModel, Field, BeforeValidator, TypeAdapter
9
9
  from basic_memory.schemas.search import SearchItemType
10
10
 
11
11
 
12
+ def validate_memory_url_path(path: str) -> bool:
13
+ """Validate that a memory URL path is well-formed.
14
+
15
+ Args:
16
+ path: The path part of a memory URL (without memory:// prefix)
17
+
18
+ Returns:
19
+ True if the path is valid, False otherwise
20
+
21
+ Examples:
22
+ >>> validate_memory_url_path("specs/search")
23
+ True
24
+ >>> validate_memory_url_path("memory//test") # Double slash
25
+ False
26
+ >>> validate_memory_url_path("invalid://test") # Contains protocol
27
+ False
28
+ """
29
+ if not path or not path.strip():
30
+ return False
31
+
32
+ # Check for invalid protocol schemes within the path first (more specific)
33
+ if "://" in path:
34
+ return False
35
+
36
+ # Check for double slashes (except at the beginning for absolute paths)
37
+ if "//" in path:
38
+ return False
39
+
40
+ # Check for invalid characters (excluding * which is used for pattern matching)
41
+ invalid_chars = {"<", ">", '"', "|", "?"}
42
+ if any(char in path for char in invalid_chars):
43
+ return False
44
+
45
+ return True
46
+
47
+
12
48
  def normalize_memory_url(url: str | None) -> str:
13
- """Normalize a MemoryUrl string.
49
+ """Normalize a MemoryUrl string with validation.
14
50
 
15
51
  Args:
16
52
  url: A path like "specs/search" or "memory://specs/search"
@@ -18,22 +54,43 @@ def normalize_memory_url(url: str | None) -> str:
18
54
  Returns:
19
55
  Normalized URL starting with memory://
20
56
 
57
+ Raises:
58
+ ValueError: If the URL path is malformed
59
+
21
60
  Examples:
22
61
  >>> normalize_memory_url("specs/search")
23
62
  'memory://specs/search'
24
63
  >>> normalize_memory_url("memory://specs/search")
25
64
  'memory://specs/search'
65
+ >>> normalize_memory_url("memory//test")
66
+ Traceback (most recent call last):
67
+ ...
68
+ ValueError: Invalid memory URL path: 'memory//test' contains double slashes
26
69
  """
27
70
  if not url:
28
71
  return ""
29
72
 
30
73
  clean_path = url.removeprefix("memory://")
74
+
75
+ # Validate the extracted path
76
+ if not validate_memory_url_path(clean_path):
77
+ # Provide specific error messages for common issues
78
+ if "://" in clean_path:
79
+ raise ValueError(f"Invalid memory URL path: '{clean_path}' contains protocol scheme")
80
+ elif "//" in clean_path:
81
+ raise ValueError(f"Invalid memory URL path: '{clean_path}' contains double slashes")
82
+ elif not clean_path.strip():
83
+ raise ValueError("Memory URL path cannot be empty or whitespace")
84
+ else:
85
+ raise ValueError(f"Invalid memory URL path: '{clean_path}' contains invalid characters")
86
+
31
87
  return f"memory://{clean_path}"
32
88
 
33
89
 
34
90
  MemoryUrl = Annotated[
35
91
  str,
36
92
  BeforeValidator(str.strip), # Clean whitespace
93
+ BeforeValidator(normalize_memory_url), # Validate and normalize the URL
37
94
  MinLen(1),
38
95
  MaxLen(2028),
39
96
  ]
@@ -100,27 +157,41 @@ class MemoryMetadata(BaseModel):
100
157
  uri: Optional[str] = None
101
158
  types: Optional[List[SearchItemType]] = None
102
159
  depth: int
103
- timeframe: str
160
+ timeframe: Optional[str] = None
104
161
  generated_at: datetime
105
- total_results: int
106
- total_relations: int
162
+ primary_count: Optional[int] = None # Changed field name
163
+ related_count: Optional[int] = None # Changed field name
164
+ total_results: Optional[int] = None # For backward compatibility
165
+ total_relations: Optional[int] = None
166
+ total_observations: Optional[int] = None
107
167
 
108
168
 
109
- class GraphContext(BaseModel):
110
- """Complete context response."""
169
+ class ContextResult(BaseModel):
170
+ """Context result containing a primary item with its observations and related items."""
171
+
172
+ primary_result: EntitySummary | RelationSummary | ObservationSummary = Field(
173
+ description="Primary item"
174
+ )
111
175
 
112
- # Direct matches
113
- primary_results: Sequence[EntitySummary | RelationSummary | ObservationSummary] = Field(
114
- description="results directly matching URI"
176
+ observations: Sequence[ObservationSummary] = Field(
177
+ description="Observations belonging to this entity", default_factory=list
115
178
  )
116
179
 
117
- # Related entities
118
180
  related_results: Sequence[EntitySummary | RelationSummary | ObservationSummary] = Field(
119
- description="related results"
181
+ description="Related items", default_factory=list
182
+ )
183
+
184
+
185
+ class GraphContext(BaseModel):
186
+ """Complete context response."""
187
+
188
+ # hierarchical results
189
+ results: Sequence[ContextResult] = Field(
190
+ description="Hierarchical results with related items nested", default_factory=list
120
191
  )
121
192
 
122
193
  # Context metadata
123
194
  metadata: MemoryMetadata
124
195
 
125
- page: int = 1
126
- page_size: int = 1
196
+ page: Optional[int] = None
197
+ page_size: Optional[int] = None