basic-memory 0.13.0b4__py3-none-any.whl → 0.13.0b5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (39) hide show
  1. basic_memory/__init__.py +1 -7
  2. basic_memory/api/routers/knowledge_router.py +13 -0
  3. basic_memory/api/routers/memory_router.py +3 -4
  4. basic_memory/api/routers/project_router.py +6 -5
  5. basic_memory/api/routers/prompt_router.py +2 -2
  6. basic_memory/cli/commands/project.py +2 -2
  7. basic_memory/cli/commands/status.py +1 -1
  8. basic_memory/cli/commands/sync.py +1 -1
  9. basic_memory/mcp/prompts/__init__.py +2 -0
  10. basic_memory/mcp/prompts/sync_status.py +116 -0
  11. basic_memory/mcp/server.py +6 -6
  12. basic_memory/mcp/tools/__init__.py +4 -0
  13. basic_memory/mcp/tools/build_context.py +32 -7
  14. basic_memory/mcp/tools/canvas.py +2 -1
  15. basic_memory/mcp/tools/delete_note.py +159 -4
  16. basic_memory/mcp/tools/edit_note.py +17 -11
  17. basic_memory/mcp/tools/move_note.py +252 -40
  18. basic_memory/mcp/tools/project_management.py +35 -3
  19. basic_memory/mcp/tools/read_note.py +9 -2
  20. basic_memory/mcp/tools/search.py +180 -8
  21. basic_memory/mcp/tools/sync_status.py +254 -0
  22. basic_memory/mcp/tools/utils.py +47 -0
  23. basic_memory/mcp/tools/view_note.py +66 -0
  24. basic_memory/mcp/tools/write_note.py +13 -2
  25. basic_memory/repository/search_repository.py +99 -26
  26. basic_memory/schemas/base.py +33 -5
  27. basic_memory/schemas/memory.py +58 -1
  28. basic_memory/services/entity_service.py +4 -4
  29. basic_memory/services/initialization.py +32 -5
  30. basic_memory/services/link_resolver.py +20 -5
  31. basic_memory/services/migration_service.py +168 -0
  32. basic_memory/services/project_service.py +97 -47
  33. basic_memory/services/sync_status_service.py +181 -0
  34. basic_memory/sync/sync_service.py +55 -2
  35. {basic_memory-0.13.0b4.dist-info → basic_memory-0.13.0b5.dist-info}/METADATA +2 -2
  36. {basic_memory-0.13.0b4.dist-info → basic_memory-0.13.0b5.dist-info}/RECORD +39 -34
  37. {basic_memory-0.13.0b4.dist-info → basic_memory-0.13.0b5.dist-info}/WHEEL +0 -0
  38. {basic_memory-0.13.0b4.dist-info → basic_memory-0.13.0b5.dist-info}/entry_points.txt +0 -0
  39. {basic_memory-0.13.0b4.dist-info → basic_memory-0.13.0b5.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,254 @@
1
+ """Sync status tool for Basic Memory MCP server."""
2
+
3
+ from typing import Optional
4
+
5
+ from loguru import logger
6
+
7
+ from basic_memory.mcp.server import mcp
8
+ from basic_memory.mcp.project_session import get_active_project
9
+
10
+
11
+ def _get_all_projects_status() -> list[str]:
12
+ """Get status lines for all configured projects."""
13
+ status_lines = []
14
+
15
+ try:
16
+ from basic_memory.config import app_config
17
+ from basic_memory.services.sync_status_service import sync_status_tracker
18
+
19
+ if app_config.projects:
20
+ status_lines.extend(["", "---", "", "**All Projects Status:**"])
21
+
22
+ for project_name, project_path in app_config.projects.items():
23
+ # Check if this project has sync status
24
+ project_sync_status = sync_status_tracker.get_project_status(project_name)
25
+
26
+ if project_sync_status:
27
+ # Project has tracked sync activity
28
+ if project_sync_status.status.value == "watching":
29
+ # Project is actively watching for changes (steady state)
30
+ status_icon = "👁️"
31
+ status_text = "Watching for changes"
32
+ elif project_sync_status.status.value == "completed":
33
+ # Sync completed but not yet watching - transitional state
34
+ status_icon = "✅"
35
+ status_text = "Sync completed"
36
+ elif project_sync_status.status.value in ["scanning", "syncing"]:
37
+ status_icon = "🔄"
38
+ status_text = "Sync in progress"
39
+ if project_sync_status.files_total > 0:
40
+ progress_pct = (
41
+ project_sync_status.files_processed
42
+ / project_sync_status.files_total
43
+ ) * 100
44
+ status_text += f" ({project_sync_status.files_processed}/{project_sync_status.files_total}, {progress_pct:.0f}%)"
45
+ elif project_sync_status.status.value == "failed":
46
+ status_icon = "❌"
47
+ status_text = f"Sync error: {project_sync_status.error or 'Unknown error'}"
48
+ else:
49
+ status_icon = "⏸️"
50
+ status_text = project_sync_status.status.value.title()
51
+ else:
52
+ # Project has no tracked sync activity - will be synced automatically
53
+ status_icon = "⏳"
54
+ status_text = "Pending sync"
55
+
56
+ status_lines.append(f"- {status_icon} **{project_name}**: {status_text}")
57
+
58
+ except Exception as e:
59
+ logger.debug(f"Could not get project config for comprehensive status: {e}")
60
+
61
+ return status_lines
62
+
63
+
64
+ @mcp.tool(
65
+ description="""Check the status of file synchronization and background operations.
66
+
67
+ Use this tool to:
68
+ - Check if file sync is in progress or completed
69
+ - Get detailed sync progress information
70
+ - Understand if your files are fully indexed
71
+ - Get specific error details if sync operations failed
72
+ - Monitor initial project setup and legacy migration
73
+
74
+ This covers all sync operations including:
75
+ - Initial project setup and file indexing
76
+ - Legacy project migration to unified database
77
+ - Ongoing file monitoring and updates
78
+ - Background processing of knowledge graphs
79
+ """,
80
+ )
81
+ async def sync_status(project: Optional[str] = None) -> str:
82
+ """Get current sync status and system readiness information.
83
+
84
+ This tool provides detailed information about any ongoing or completed
85
+ sync operations, helping users understand when their files are ready.
86
+
87
+ Args:
88
+ project: Optional project name to get project-specific context
89
+
90
+ Returns:
91
+ Formatted sync status with progress, readiness, and guidance
92
+ """
93
+ logger.info("MCP tool call tool=sync_status")
94
+
95
+ status_lines = []
96
+
97
+ try:
98
+ from basic_memory.services.sync_status_service import sync_status_tracker
99
+
100
+ # Get overall summary
101
+ summary = sync_status_tracker.get_summary()
102
+ is_ready = sync_status_tracker.is_ready
103
+
104
+ # Header
105
+ status_lines.extend(
106
+ [
107
+ "# Basic Memory Sync Status",
108
+ "",
109
+ f"**Current Status**: {summary}",
110
+ f"**System Ready**: {'✅ Yes' if is_ready else '🔄 Processing'}",
111
+ "",
112
+ ]
113
+ )
114
+
115
+ if is_ready:
116
+ status_lines.extend(
117
+ [
118
+ "✅ **All sync operations completed**",
119
+ "",
120
+ "- File indexing is complete",
121
+ "- Knowledge graphs are up to date",
122
+ "- All Basic Memory tools are fully operational",
123
+ "",
124
+ "Your knowledge base is ready for use!",
125
+ ]
126
+ )
127
+
128
+ # Show all projects status even when ready
129
+ status_lines.extend(_get_all_projects_status())
130
+ else:
131
+ # System is still processing - show both active and all projects
132
+ all_sync_projects = sync_status_tracker.get_all_projects()
133
+
134
+ active_projects = [
135
+ p for p in all_sync_projects.values() if p.status.value in ["scanning", "syncing"]
136
+ ]
137
+ failed_projects = [p for p in all_sync_projects.values() if p.status.value == "failed"]
138
+
139
+ if active_projects:
140
+ status_lines.extend(
141
+ [
142
+ "🔄 **File synchronization in progress**",
143
+ "",
144
+ "Basic Memory is automatically processing all configured projects and building knowledge graphs.",
145
+ "This typically takes 1-3 minutes depending on the amount of content.",
146
+ "",
147
+ "**Currently Processing:**",
148
+ ]
149
+ )
150
+
151
+ for project_status in active_projects:
152
+ progress = ""
153
+ if project_status.files_total > 0:
154
+ progress_pct = (
155
+ project_status.files_processed / project_status.files_total
156
+ ) * 100
157
+ progress = f" ({project_status.files_processed}/{project_status.files_total}, {progress_pct:.0f}%)"
158
+
159
+ status_lines.append(
160
+ f"- **{project_status.project_name}**: {project_status.message}{progress}"
161
+ )
162
+
163
+ status_lines.extend(
164
+ [
165
+ "",
166
+ "**What's happening:**",
167
+ "- Scanning and indexing markdown files",
168
+ "- Building entity and relationship graphs",
169
+ "- Setting up full-text search indexes",
170
+ "- Processing file changes and updates",
171
+ "",
172
+ "**What you can do:**",
173
+ "- Wait for automatic processing to complete - no action needed",
174
+ "- Use this tool again to check progress",
175
+ "- Simple operations may work already",
176
+ "- All projects will be available once sync finishes",
177
+ ]
178
+ )
179
+
180
+ # Handle failed projects (independent of active projects)
181
+ if failed_projects:
182
+ status_lines.extend(["", "❌ **Some projects failed to sync:**", ""])
183
+
184
+ for project_status in failed_projects:
185
+ status_lines.append(
186
+ f"- **{project_status.project_name}**: {project_status.error or 'Unknown error'}"
187
+ )
188
+
189
+ status_lines.extend(
190
+ [
191
+ "",
192
+ "**Next steps:**",
193
+ "1. Check the logs for detailed error information",
194
+ "2. Ensure file permissions allow read/write access",
195
+ "3. Try restarting the MCP server",
196
+ "4. If issues persist, consider filing a support issue",
197
+ ]
198
+ )
199
+ elif not active_projects:
200
+ # No active or failed projects - must be pending
201
+ status_lines.extend(
202
+ [
203
+ "⏳ **Sync operations pending**",
204
+ "",
205
+ "File synchronization has been queued but hasn't started yet.",
206
+ "This usually resolves automatically within a few seconds.",
207
+ ]
208
+ )
209
+
210
+ # Add comprehensive project status for all configured projects
211
+ all_projects_status = _get_all_projects_status()
212
+ if all_projects_status:
213
+ status_lines.extend(all_projects_status)
214
+
215
+ # Add explanation about automatic syncing if there are unsynced projects
216
+ unsynced_count = sum(1 for line in all_projects_status if "⏳" in line)
217
+ if unsynced_count > 0 and not is_ready:
218
+ status_lines.extend(
219
+ [
220
+ "",
221
+ "**Note**: All configured projects will be automatically synced during startup.",
222
+ "You don't need to manually switch projects - Basic Memory handles this for you.",
223
+ ]
224
+ )
225
+
226
+ # Add project context if provided
227
+ if project:
228
+ try:
229
+ active_project = get_active_project(project)
230
+ status_lines.extend(
231
+ [
232
+ "",
233
+ "---",
234
+ "",
235
+ f"**Active Project**: {active_project.name}",
236
+ f"**Project Path**: {active_project.home}",
237
+ ]
238
+ )
239
+ except Exception as e:
240
+ logger.debug(f"Could not get project info: {e}")
241
+
242
+ return "\n".join(status_lines)
243
+
244
+ except Exception as e:
245
+ return f"""# Sync Status - Error
246
+
247
+ ❌ **Unable to check sync status**: {str(e)}
248
+
249
+ **Troubleshooting:**
250
+ - The system may still be starting up
251
+ - Try waiting a few seconds and checking again
252
+ - Check logs for detailed error information
253
+ - Consider restarting if the issue persists
254
+ """
@@ -506,3 +506,50 @@ async def call_delete(
506
506
 
507
507
  except HTTPStatusError as e:
508
508
  raise ToolError(error_message) from e
509
+
510
+
511
+ def check_migration_status() -> Optional[str]:
512
+ """Check if sync/migration is in progress and return status message if so.
513
+
514
+ Returns:
515
+ Status message if sync is in progress, None if system is ready
516
+ """
517
+ try:
518
+ from basic_memory.services.sync_status_service import sync_status_tracker
519
+
520
+ if not sync_status_tracker.is_ready:
521
+ return sync_status_tracker.get_summary()
522
+ return None
523
+ except Exception:
524
+ # If there's any error checking sync status, assume ready
525
+ return None
526
+
527
+
528
+ async def wait_for_migration_or_return_status(timeout: float = 5.0) -> Optional[str]:
529
+ """Wait briefly for sync/migration to complete, or return status message.
530
+
531
+ Args:
532
+ timeout: Maximum time to wait for sync completion
533
+
534
+ Returns:
535
+ Status message if sync is still in progress, None if ready
536
+ """
537
+ try:
538
+ from basic_memory.services.sync_status_service import sync_status_tracker
539
+ import asyncio
540
+
541
+ if sync_status_tracker.is_ready:
542
+ return None
543
+
544
+ # Wait briefly for sync to complete
545
+ start_time = asyncio.get_event_loop().time()
546
+ while (asyncio.get_event_loop().time() - start_time) < timeout:
547
+ if sync_status_tracker.is_ready:
548
+ return None
549
+ await asyncio.sleep(0.1) # Check every 100ms
550
+
551
+ # Still not ready after timeout
552
+ return sync_status_tracker.get_summary()
553
+ except Exception: # pragma: no cover
554
+ # If there's any error, assume ready
555
+ return None
@@ -0,0 +1,66 @@
1
+ """View note tool for Basic Memory MCP server."""
2
+
3
+ from textwrap import dedent
4
+ from typing import Optional
5
+
6
+ from loguru import logger
7
+
8
+ from basic_memory.mcp.server import mcp
9
+ from basic_memory.mcp.tools.read_note import read_note
10
+
11
+
12
+ @mcp.tool(
13
+ description="View a note as a formatted artifact for better readability.",
14
+ )
15
+ async def view_note(
16
+ identifier: str, page: int = 1, page_size: int = 10, project: Optional[str] = None
17
+ ) -> str:
18
+ """View a markdown note as a formatted artifact.
19
+
20
+ This tool reads a note using the same logic as read_note but displays the content
21
+ as a markdown artifact for better viewing experience in Claude Desktop.
22
+
23
+ After calling this tool, create an artifact using the returned content to display
24
+ the note in a readable format. The tool returns the note content that should be
25
+ used to create a markdown artifact.
26
+
27
+ Args:
28
+ identifier: The title or permalink of the note to view
29
+ page: Page number for paginated results (default: 1)
30
+ page_size: Number of items per page (default: 10)
31
+ project: Optional project name to read from. If not provided, uses current active project.
32
+
33
+ Returns:
34
+ The note content as a markdown artifact with a confirmation message.
35
+ """
36
+
37
+ logger.info(f"Viewing note: {identifier}")
38
+
39
+ # Call the existing read_note logic
40
+ content = await read_note(identifier, page, page_size, project)
41
+
42
+ # Check if this is an error message (note not found)
43
+ if "# Note Not Found:" in content:
44
+ return content # Return error message directly instead of creating artifact
45
+
46
+ # Extract title from content if possible
47
+ title = identifier
48
+ lines = content.split("\n")
49
+ for line in lines:
50
+ if line.startswith("title:"):
51
+ # Extract title from frontmatter
52
+ title = line.split("title:", 1)[1].strip().strip('"').strip("'")
53
+ break
54
+ elif line.startswith("# "):
55
+ # Extract title from first heading
56
+ title = line[2:].strip()
57
+ break
58
+
59
+ # Return the artifact XML as part of the response
60
+ artifact = f'<artifact identifier="note-{hash(identifier) & 0x7FFFFFFF}" type="text/markdown" title="{title}">\n{content}\n</artifact>'
61
+
62
+ return dedent(f"""
63
+ <instructions>
64
+ Create an artifact using the returned artifact content to display the note in a readable format.
65
+ </instructions>
66
+ {artifact}\n\n✅ Note displayed as artifact: **{title}**""")
@@ -54,7 +54,8 @@ async def write_note(
54
54
  Args:
55
55
  title: The title of the note
56
56
  content: Markdown content for the note, can include observations and relations
57
- folder: the folder where the file should be saved
57
+ folder: Folder path relative to project root where the file should be saved.
58
+ Use forward slashes (/) as separators. Examples: "notes", "projects/2025", "research/ml"
58
59
  tags: Tags to categorize the note. Can be a list of strings, a comma-separated string, or None.
59
60
  Note: If passing from external MCP clients, use a string format (e.g. "tag1,tag2,tag3")
60
61
  project: Optional project name to write to. If not provided, uses current active project.
@@ -69,6 +70,13 @@ async def write_note(
69
70
  """
70
71
  logger.info(f"MCP tool call tool=write_note folder={folder}, title={title}, tags={tags}")
71
72
 
73
+ # Check migration status and wait briefly if needed
74
+ from basic_memory.mcp.tools.utils import wait_for_migration_or_return_status
75
+
76
+ migration_status = await wait_for_migration_or_return_status(timeout=5.0)
77
+ if migration_status: # pragma: no cover
78
+ return f"# System Status\n\n{migration_status}\n\nPlease wait for migration to complete before creating notes."
79
+
72
80
  # Process tags using the helper function
73
81
  tag_list = parse_tags(tags)
74
82
  # Create the entity request
@@ -120,7 +128,10 @@ async def write_note(
120
128
  summary.append(f"- Resolved: {resolved}")
121
129
  if unresolved:
122
130
  summary.append(f"- Unresolved: {unresolved}")
123
- summary.append("\nUnresolved relations will be retried on next sync.")
131
+ summary.append("\nNote: Unresolved relations point to entities that don't exist yet.")
132
+ summary.append(
133
+ "They will be automatically resolved when target entities are created or during sync operations."
134
+ )
124
135
 
125
136
  if tag_list:
126
137
  summary.append(f"\n## Tags\n- {', '.join(tag_list)}")
@@ -128,34 +128,90 @@ class SearchRepository:
128
128
  is_prefix: Whether to add prefix search capability (* suffix)
129
129
 
130
130
  For FTS5:
131
- - Special characters and phrases need to be quoted
132
- - Terms with spaces or special chars need quotes
133
131
  - Boolean operators (AND, OR, NOT) are preserved for complex queries
132
+ - Terms with FTS5 special characters are quoted to prevent syntax errors
133
+ - Simple terms get prefix wildcards for better matching
134
134
  """
135
- if "*" in term:
136
- return term
137
-
138
135
  # Check for explicit boolean operators - if present, return the term as is
139
136
  boolean_operators = [" AND ", " OR ", " NOT "]
140
137
  if any(op in f" {term} " for op in boolean_operators):
141
138
  return term
142
139
 
143
- # List of FTS5 special characters that need escaping/quoting
144
- special_chars = ["/", "-", ".", " ", "(", ")", "[", "]", '"', "'"]
140
+ # Check if term is already a proper wildcard pattern (alphanumeric + *)
141
+ # e.g., "hello*", "test*world" - these should be left alone
142
+ if "*" in term and all(c.isalnum() or c in "*_-" for c in term):
143
+ return term
145
144
 
146
- # Check if term contains any special characters
147
- needs_quotes = any(c in term for c in special_chars)
145
+ # Characters that can cause FTS5 syntax errors when used as operators
146
+ # We're more conservative here - only quote when we detect problematic patterns
147
+ problematic_chars = [
148
+ '"',
149
+ "'",
150
+ "(",
151
+ ")",
152
+ "[",
153
+ "]",
154
+ "{",
155
+ "}",
156
+ "+",
157
+ "!",
158
+ "@",
159
+ "#",
160
+ "$",
161
+ "%",
162
+ "^",
163
+ "&",
164
+ "=",
165
+ "|",
166
+ "\\",
167
+ "~",
168
+ "`",
169
+ ]
148
170
 
149
- if needs_quotes:
150
- # Escape any existing quotes by doubling them
151
- escaped_term = term.replace('"', '""')
152
- # Quote the entire term to handle special characters safely
153
- if is_prefix and not ("/" in term and term.endswith(".md")):
154
- # For search terms (not file paths), add prefix matching
155
- term = f'"{escaped_term}"*'
171
+ # Characters that indicate we should quote (spaces, dots, colons, etc.)
172
+ # Adding hyphens here because FTS5 can have issues with hyphens followed by wildcards
173
+ needs_quoting_chars = [" ", ".", ":", ";", ",", "<", ">", "?", "/", "-"]
174
+
175
+ # Check if term needs quoting
176
+ has_problematic = any(c in term for c in problematic_chars)
177
+ has_spaces_or_special = any(c in term for c in needs_quoting_chars)
178
+
179
+ if has_problematic or has_spaces_or_special:
180
+ # Handle multi-word queries differently from special character queries
181
+ if " " in term and not any(c in term for c in problematic_chars):
182
+ # Check if any individual word contains special characters that need quoting
183
+ words = term.strip().split()
184
+ has_special_in_words = any(
185
+ any(c in word for c in needs_quoting_chars if c != " ") for word in words
186
+ )
187
+
188
+ if not has_special_in_words:
189
+ # For multi-word queries with simple words (like "emoji unicode"),
190
+ # use boolean AND to handle word order variations
191
+ if is_prefix:
192
+ # Add prefix wildcard to each word for better matching
193
+ prepared_words = [f"{word}*" for word in words if word]
194
+ else:
195
+ prepared_words = words
196
+ term = " AND ".join(prepared_words)
197
+ else:
198
+ # If any word has special characters, quote the entire phrase
199
+ escaped_term = term.replace('"', '""')
200
+ if is_prefix and not ("/" in term and term.endswith(".md")):
201
+ term = f'"{escaped_term}"*'
202
+ else:
203
+ term = f'"{escaped_term}"'
156
204
  else:
157
- # For file paths, use exact matching
158
- term = f'"{escaped_term}"'
205
+ # For terms with problematic characters or file paths, use exact phrase matching
206
+ # Escape any existing quotes by doubling them
207
+ escaped_term = term.replace('"', '""')
208
+ # Quote the entire term to handle special characters safely
209
+ if is_prefix and not ("/" in term and term.endswith(".md")):
210
+ # For search terms (not file paths), add prefix matching
211
+ term = f'"{escaped_term}"*'
212
+ else:
213
+ # For file paths, use exact matching
214
+ term = f'"{escaped_term}"'
159
215
  elif is_prefix:
160
216
  # Only add wildcard for simple terms without special characters
161
217
  term = f"{term}*"
@@ -208,15 +264,21 @@ class SearchRepository:
208
264
 
209
265
  # Handle permalink match search, supports *
210
266
  if permalink_match:
211
- # Clean and prepare permalink for FTS5 GLOB match
212
- permalink_text = self._prepare_search_term(
213
- permalink_match.lower().strip(), is_prefix=False
214
- )
267
+ # For GLOB patterns, don't use _prepare_search_term as it will quote slashes
268
+ # GLOB patterns need to preserve their syntax
269
+ permalink_text = permalink_match.lower().strip()
215
270
  params["permalink"] = permalink_text
216
271
  if "*" in permalink_match:
217
272
  conditions.append("permalink GLOB :permalink")
218
273
  else:
219
- conditions.append("permalink MATCH :permalink")
274
+ # For exact matches without *, we can use FTS5 MATCH
275
+ # but only prepare the term if it doesn't look like a path
276
+ if "/" in permalink_text:
277
+ conditions.append("permalink = :permalink")
278
+ else:
279
+ permalink_text = self._prepare_search_term(permalink_text, is_prefix=False)
280
+ params["permalink"] = permalink_text
281
+ conditions.append("permalink MATCH :permalink")
220
282
 
221
283
  # Handle entity type filter
222
284
  if search_item_types:
@@ -273,9 +335,20 @@ class SearchRepository:
273
335
  """
274
336
 
275
337
  logger.trace(f"Search {sql} params: {params}")
276
- async with db.scoped_session(self.session_maker) as session:
277
- result = await session.execute(text(sql), params)
278
- rows = result.fetchall()
338
+ try:
339
+ async with db.scoped_session(self.session_maker) as session:
340
+ result = await session.execute(text(sql), params)
341
+ rows = result.fetchall()
342
+ except Exception as e:
343
+ # Handle FTS5 syntax errors and provide user-friendly feedback
344
+ if "fts5: syntax error" in str(e).lower(): # pragma: no cover
345
+ logger.warning(f"FTS5 syntax error for search term: {search_text}, error: {e}")
346
+ # Return empty results rather than crashing
347
+ return []
348
+ else:
349
+ # Re-raise other database errors
350
+ logger.error(f"Database error during search: {e}")
351
+ raise
279
352
 
280
353
  results = [
281
354
  SearchIndexRow(
@@ -13,7 +13,7 @@ Key Concepts:
13
13
 
14
14
  import mimetypes
15
15
  import re
16
- from datetime import datetime
16
+ from datetime import datetime, time
17
17
  from pathlib import Path
18
18
  from typing import List, Optional, Annotated, Dict
19
19
 
@@ -46,15 +46,43 @@ def to_snake_case(name: str) -> str:
46
46
  return s2.lower()
47
47
 
48
48
 
49
+ def parse_timeframe(timeframe: str) -> datetime:
50
+ """Parse timeframe with special handling for 'today' and other natural language expressions.
51
+
52
+ Args:
53
+ timeframe: Natural language timeframe like 'today', '1d', '1 week ago', etc.
54
+
55
+ Returns:
56
+ datetime: The parsed datetime for the start of the timeframe
57
+
58
+ Examples:
59
+ parse_timeframe('today') -> 2025-06-05 00:00:00 (start of today)
60
+ parse_timeframe('1d') -> 2025-06-04 14:50:00 (24 hours ago)
61
+ parse_timeframe('1 week ago') -> 2025-05-29 14:50:00 (1 week ago)
62
+ """
63
+ if timeframe.lower() == "today":
64
+ # Return start of today (00:00:00)
65
+ return datetime.combine(datetime.now().date(), time.min)
66
+ else:
67
+ # Use dateparser for other formats
68
+ parsed = parse(timeframe)
69
+ if not parsed:
70
+ raise ValueError(f"Could not parse timeframe: {timeframe}")
71
+ return parsed
72
+
73
+
49
74
  def validate_timeframe(timeframe: str) -> str:
50
75
  """Convert human readable timeframes to a duration relative to the current time."""
51
76
  if not isinstance(timeframe, str):
52
77
  raise ValueError("Timeframe must be a string")
53
78
 
54
- # Parse relative time expression
55
- parsed = parse(timeframe)
56
- if not parsed:
57
- raise ValueError(f"Could not parse timeframe: {timeframe}")
79
+ # Preserve special timeframe strings that need custom handling
80
+ special_timeframes = ["today"]
81
+ if timeframe.lower() in special_timeframes:
82
+ return timeframe.lower()
83
+
84
+ # Parse relative time expression using our enhanced parser
85
+ parsed = parse_timeframe(timeframe)
58
86
 
59
87
  # Convert to duration
60
88
  now = datetime.now()