basic-memory 0.13.0b4__py3-none-any.whl → 0.13.0b6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (41) hide show
  1. basic_memory/__init__.py +2 -7
  2. basic_memory/api/routers/knowledge_router.py +13 -0
  3. basic_memory/api/routers/memory_router.py +3 -4
  4. basic_memory/api/routers/project_router.py +6 -5
  5. basic_memory/api/routers/prompt_router.py +2 -2
  6. basic_memory/cli/commands/project.py +3 -3
  7. basic_memory/cli/commands/status.py +1 -1
  8. basic_memory/cli/commands/sync.py +1 -1
  9. basic_memory/cli/commands/tool.py +6 -6
  10. basic_memory/mcp/prompts/__init__.py +2 -0
  11. basic_memory/mcp/prompts/recent_activity.py +1 -1
  12. basic_memory/mcp/prompts/sync_status.py +116 -0
  13. basic_memory/mcp/server.py +6 -6
  14. basic_memory/mcp/tools/__init__.py +4 -0
  15. basic_memory/mcp/tools/build_context.py +32 -7
  16. basic_memory/mcp/tools/canvas.py +2 -1
  17. basic_memory/mcp/tools/delete_note.py +159 -4
  18. basic_memory/mcp/tools/edit_note.py +17 -11
  19. basic_memory/mcp/tools/move_note.py +252 -40
  20. basic_memory/mcp/tools/project_management.py +35 -3
  21. basic_memory/mcp/tools/read_note.py +11 -4
  22. basic_memory/mcp/tools/search.py +180 -8
  23. basic_memory/mcp/tools/sync_status.py +254 -0
  24. basic_memory/mcp/tools/utils.py +47 -0
  25. basic_memory/mcp/tools/view_note.py +66 -0
  26. basic_memory/mcp/tools/write_note.py +13 -2
  27. basic_memory/repository/search_repository.py +116 -38
  28. basic_memory/schemas/base.py +33 -5
  29. basic_memory/schemas/memory.py +58 -1
  30. basic_memory/services/entity_service.py +18 -5
  31. basic_memory/services/initialization.py +32 -5
  32. basic_memory/services/link_resolver.py +20 -5
  33. basic_memory/services/migration_service.py +168 -0
  34. basic_memory/services/project_service.py +121 -50
  35. basic_memory/services/sync_status_service.py +181 -0
  36. basic_memory/sync/sync_service.py +91 -13
  37. {basic_memory-0.13.0b4.dist-info → basic_memory-0.13.0b6.dist-info}/METADATA +2 -2
  38. {basic_memory-0.13.0b4.dist-info → basic_memory-0.13.0b6.dist-info}/RECORD +41 -36
  39. {basic_memory-0.13.0b4.dist-info → basic_memory-0.13.0b6.dist-info}/WHEEL +0 -0
  40. {basic_memory-0.13.0b4.dist-info → basic_memory-0.13.0b6.dist-info}/entry_points.txt +0 -0
  41. {basic_memory-0.13.0b4.dist-info → basic_memory-0.13.0b6.dist-info}/licenses/LICENSE +0 -0
@@ -128,34 +128,90 @@ class SearchRepository:
128
128
  is_prefix: Whether to add prefix search capability (* suffix)
129
129
 
130
130
  For FTS5:
131
- - Special characters and phrases need to be quoted
132
- - Terms with spaces or special chars need quotes
133
131
  - Boolean operators (AND, OR, NOT) are preserved for complex queries
132
+ - Terms with FTS5 special characters are quoted to prevent syntax errors
133
+ - Simple terms get prefix wildcards for better matching
134
134
  """
135
- if "*" in term:
136
- return term
137
-
138
135
  # Check for explicit boolean operators - if present, return the term as is
139
136
  boolean_operators = [" AND ", " OR ", " NOT "]
140
137
  if any(op in f" {term} " for op in boolean_operators):
141
138
  return term
142
139
 
143
- # List of FTS5 special characters that need escaping/quoting
144
- special_chars = ["/", "-", ".", " ", "(", ")", "[", "]", '"', "'"]
140
+ # Check if term is already a proper wildcard pattern (alphanumeric + *)
141
+ # e.g., "hello*", "test*world" - these should be left alone
142
+ if "*" in term and all(c.isalnum() or c in "*_-" for c in term):
143
+ return term
145
144
 
146
- # Check if term contains any special characters
147
- needs_quotes = any(c in term for c in special_chars)
145
+ # Characters that can cause FTS5 syntax errors when used as operators
146
+ # We're more conservative here - only quote when we detect problematic patterns
147
+ problematic_chars = [
148
+ '"',
149
+ "'",
150
+ "(",
151
+ ")",
152
+ "[",
153
+ "]",
154
+ "{",
155
+ "}",
156
+ "+",
157
+ "!",
158
+ "@",
159
+ "#",
160
+ "$",
161
+ "%",
162
+ "^",
163
+ "&",
164
+ "=",
165
+ "|",
166
+ "\\",
167
+ "~",
168
+ "`",
169
+ ]
148
170
 
149
- if needs_quotes:
150
- # Escape any existing quotes by doubling them
151
- escaped_term = term.replace('"', '""')
152
- # Quote the entire term to handle special characters safely
153
- if is_prefix and not ("/" in term and term.endswith(".md")):
154
- # For search terms (not file paths), add prefix matching
155
- term = f'"{escaped_term}"*'
171
+ # Characters that indicate we should quote (spaces, dots, colons, etc.)
172
+ # Adding hyphens here because FTS5 can have issues with hyphens followed by wildcards
173
+ needs_quoting_chars = [" ", ".", ":", ";", ",", "<", ">", "?", "/", "-"]
174
+
175
+ # Check if term needs quoting
176
+ has_problematic = any(c in term for c in problematic_chars)
177
+ has_spaces_or_special = any(c in term for c in needs_quoting_chars)
178
+
179
+ if has_problematic or has_spaces_or_special:
180
+ # Handle multi-word queries differently from special character queries
181
+ if " " in term and not any(c in term for c in problematic_chars):
182
+ # Check if any individual word contains special characters that need quoting
183
+ words = term.strip().split()
184
+ has_special_in_words = any(
185
+ any(c in word for c in needs_quoting_chars if c != " ") for word in words
186
+ )
187
+
188
+ if not has_special_in_words:
189
+ # For multi-word queries with simple words (like "emoji unicode"),
190
+ # use boolean AND to handle word order variations
191
+ if is_prefix:
192
+ # Add prefix wildcard to each word for better matching
193
+ prepared_words = [f"{word}*" for word in words if word]
194
+ else:
195
+ prepared_words = words
196
+ term = " AND ".join(prepared_words)
197
+ else:
198
+ # If any word has special characters, quote the entire phrase
199
+ escaped_term = term.replace('"', '""')
200
+ if is_prefix and not ("/" in term and term.endswith(".md")):
201
+ term = f'"{escaped_term}"*'
202
+ else:
203
+ term = f'"{escaped_term}"'
156
204
  else:
157
- # For file paths, use exact matching
158
- term = f'"{escaped_term}"'
205
+ # For terms with problematic characters or file paths, use exact phrase matching
206
+ # Escape any existing quotes by doubling them
207
+ escaped_term = term.replace('"', '""')
208
+ # Quote the entire term to handle special characters safely
209
+ if is_prefix and not ("/" in term and term.endswith(".md")):
210
+ # For search terms (not file paths), add prefix matching
211
+ term = f'"{escaped_term}"*'
212
+ else:
213
+ # For file paths, use exact matching
214
+ term = f'"{escaped_term}"'
159
215
  elif is_prefix:
160
216
  # Only add wildcard for simple terms without special characters
161
217
  term = f"{term}*"
@@ -181,19 +237,24 @@ class SearchRepository:
181
237
 
182
238
  # Handle text search for title and content
183
239
  if search_text:
184
- # Check for explicit boolean operators - only detect them in proper boolean contexts
185
- has_boolean = any(op in f" {search_text} " for op in [" AND ", " OR ", " NOT "])
186
-
187
- if has_boolean:
188
- # If boolean operators are present, use the raw query
189
- # No need to prepare it, FTS5 will understand the operators
190
- params["text"] = search_text
191
- conditions.append("(title MATCH :text OR content_stems MATCH :text)")
240
+ # Skip FTS for wildcard-only queries that would cause "unknown special query" errors
241
+ if search_text.strip() == "*" or search_text.strip() == "":
242
+ # For wildcard searches, don't add any text conditions - return all results
243
+ pass
192
244
  else:
193
- # Standard search with term preparation
194
- processed_text = self._prepare_search_term(search_text.strip())
195
- params["text"] = processed_text
196
- conditions.append("(title MATCH :text OR content_stems MATCH :text)")
245
+ # Check for explicit boolean operators - only detect them in proper boolean contexts
246
+ has_boolean = any(op in f" {search_text} " for op in [" AND ", " OR ", " NOT "])
247
+
248
+ if has_boolean:
249
+ # If boolean operators are present, use the raw query
250
+ # No need to prepare it, FTS5 will understand the operators
251
+ params["text"] = search_text
252
+ conditions.append("(title MATCH :text OR content_stems MATCH :text)")
253
+ else:
254
+ # Standard search with term preparation
255
+ processed_text = self._prepare_search_term(search_text.strip())
256
+ params["text"] = processed_text
257
+ conditions.append("(title MATCH :text OR content_stems MATCH :text)")
197
258
 
198
259
  # Handle title match search
199
260
  if title:
@@ -208,15 +269,21 @@ class SearchRepository:
208
269
 
209
270
  # Handle permalink match search, supports *
210
271
  if permalink_match:
211
- # Clean and prepare permalink for FTS5 GLOB match
212
- permalink_text = self._prepare_search_term(
213
- permalink_match.lower().strip(), is_prefix=False
214
- )
272
+ # For GLOB patterns, don't use _prepare_search_term as it will quote slashes
273
+ # GLOB patterns need to preserve their syntax
274
+ permalink_text = permalink_match.lower().strip()
215
275
  params["permalink"] = permalink_text
216
276
  if "*" in permalink_match:
217
277
  conditions.append("permalink GLOB :permalink")
218
278
  else:
219
- conditions.append("permalink MATCH :permalink")
279
+ # For exact matches without *, we can use FTS5 MATCH
280
+ # but only prepare the term if it doesn't look like a path
281
+ if "/" in permalink_text:
282
+ conditions.append("permalink = :permalink")
283
+ else:
284
+ permalink_text = self._prepare_search_term(permalink_text, is_prefix=False)
285
+ params["permalink"] = permalink_text
286
+ conditions.append("permalink MATCH :permalink")
220
287
 
221
288
  # Handle entity type filter
222
289
  if search_item_types:
@@ -273,9 +340,20 @@ class SearchRepository:
273
340
  """
274
341
 
275
342
  logger.trace(f"Search {sql} params: {params}")
276
- async with db.scoped_session(self.session_maker) as session:
277
- result = await session.execute(text(sql), params)
278
- rows = result.fetchall()
343
+ try:
344
+ async with db.scoped_session(self.session_maker) as session:
345
+ result = await session.execute(text(sql), params)
346
+ rows = result.fetchall()
347
+ except Exception as e:
348
+ # Handle FTS5 syntax errors and provide user-friendly feedback
349
+ if "fts5: syntax error" in str(e).lower(): # pragma: no cover
350
+ logger.warning(f"FTS5 syntax error for search term: {search_text}, error: {e}")
351
+ # Return empty results rather than crashing
352
+ return []
353
+ else:
354
+ # Re-raise other database errors
355
+ logger.error(f"Database error during search: {e}")
356
+ raise
279
357
 
280
358
  results = [
281
359
  SearchIndexRow(
@@ -13,7 +13,7 @@ Key Concepts:
13
13
 
14
14
  import mimetypes
15
15
  import re
16
- from datetime import datetime
16
+ from datetime import datetime, time
17
17
  from pathlib import Path
18
18
  from typing import List, Optional, Annotated, Dict
19
19
 
@@ -46,15 +46,43 @@ def to_snake_case(name: str) -> str:
46
46
  return s2.lower()
47
47
 
48
48
 
49
+ def parse_timeframe(timeframe: str) -> datetime:
50
+ """Parse timeframe with special handling for 'today' and other natural language expressions.
51
+
52
+ Args:
53
+ timeframe: Natural language timeframe like 'today', '1d', '1 week ago', etc.
54
+
55
+ Returns:
56
+ datetime: The parsed datetime for the start of the timeframe
57
+
58
+ Examples:
59
+ parse_timeframe('today') -> 2025-06-05 00:00:00 (start of today)
60
+ parse_timeframe('1d') -> 2025-06-04 14:50:00 (24 hours ago)
61
+ parse_timeframe('1 week ago') -> 2025-05-29 14:50:00 (1 week ago)
62
+ """
63
+ if timeframe.lower() == "today":
64
+ # Return start of today (00:00:00)
65
+ return datetime.combine(datetime.now().date(), time.min)
66
+ else:
67
+ # Use dateparser for other formats
68
+ parsed = parse(timeframe)
69
+ if not parsed:
70
+ raise ValueError(f"Could not parse timeframe: {timeframe}")
71
+ return parsed
72
+
73
+
49
74
  def validate_timeframe(timeframe: str) -> str:
50
75
  """Convert human readable timeframes to a duration relative to the current time."""
51
76
  if not isinstance(timeframe, str):
52
77
  raise ValueError("Timeframe must be a string")
53
78
 
54
- # Parse relative time expression
55
- parsed = parse(timeframe)
56
- if not parsed:
57
- raise ValueError(f"Could not parse timeframe: {timeframe}")
79
+ # Preserve special timeframe strings that need custom handling
80
+ special_timeframes = ["today"]
81
+ if timeframe.lower() in special_timeframes:
82
+ return timeframe.lower()
83
+
84
+ # Parse relative time expression using our enhanced parser
85
+ parsed = parse_timeframe(timeframe)
58
86
 
59
87
  # Convert to duration
60
88
  now = datetime.now()
@@ -9,8 +9,44 @@ from pydantic import BaseModel, Field, BeforeValidator, TypeAdapter
9
9
  from basic_memory.schemas.search import SearchItemType
10
10
 
11
11
 
12
+ def validate_memory_url_path(path: str) -> bool:
13
+ """Validate that a memory URL path is well-formed.
14
+
15
+ Args:
16
+ path: The path part of a memory URL (without memory:// prefix)
17
+
18
+ Returns:
19
+ True if the path is valid, False otherwise
20
+
21
+ Examples:
22
+ >>> validate_memory_url_path("specs/search")
23
+ True
24
+ >>> validate_memory_url_path("memory//test") # Double slash
25
+ False
26
+ >>> validate_memory_url_path("invalid://test") # Contains protocol
27
+ False
28
+ """
29
+ if not path or not path.strip():
30
+ return False
31
+
32
+ # Check for invalid protocol schemes within the path first (more specific)
33
+ if "://" in path:
34
+ return False
35
+
36
+ # Check for double slashes (except at the beginning for absolute paths)
37
+ if "//" in path:
38
+ return False
39
+
40
+ # Check for invalid characters (excluding * which is used for pattern matching)
41
+ invalid_chars = {"<", ">", '"', "|", "?"}
42
+ if any(char in path for char in invalid_chars):
43
+ return False
44
+
45
+ return True
46
+
47
+
12
48
  def normalize_memory_url(url: str | None) -> str:
13
- """Normalize a MemoryUrl string.
49
+ """Normalize a MemoryUrl string with validation.
14
50
 
15
51
  Args:
16
52
  url: A path like "specs/search" or "memory://specs/search"
@@ -18,22 +54,43 @@ def normalize_memory_url(url: str | None) -> str:
18
54
  Returns:
19
55
  Normalized URL starting with memory://
20
56
 
57
+ Raises:
58
+ ValueError: If the URL path is malformed
59
+
21
60
  Examples:
22
61
  >>> normalize_memory_url("specs/search")
23
62
  'memory://specs/search'
24
63
  >>> normalize_memory_url("memory://specs/search")
25
64
  'memory://specs/search'
65
+ >>> normalize_memory_url("memory//test")
66
+ Traceback (most recent call last):
67
+ ...
68
+ ValueError: Invalid memory URL path: 'memory//test' contains double slashes
26
69
  """
27
70
  if not url:
28
71
  return ""
29
72
 
30
73
  clean_path = url.removeprefix("memory://")
74
+
75
+ # Validate the extracted path
76
+ if not validate_memory_url_path(clean_path):
77
+ # Provide specific error messages for common issues
78
+ if "://" in clean_path:
79
+ raise ValueError(f"Invalid memory URL path: '{clean_path}' contains protocol scheme")
80
+ elif "//" in clean_path:
81
+ raise ValueError(f"Invalid memory URL path: '{clean_path}' contains double slashes")
82
+ elif not clean_path.strip():
83
+ raise ValueError("Memory URL path cannot be empty or whitespace")
84
+ else:
85
+ raise ValueError(f"Invalid memory URL path: '{clean_path}' contains invalid characters")
86
+
31
87
  return f"memory://{clean_path}"
32
88
 
33
89
 
34
90
  MemoryUrl = Annotated[
35
91
  str,
36
92
  BeforeValidator(str.strip), # Clean whitespace
93
+ BeforeValidator(normalize_memory_url), # Validate and normalize the URL
37
94
  MinLen(1),
38
95
  MaxLen(2028),
39
96
  ]
@@ -299,7 +299,20 @@ class EntityService(BaseService[EntityModel]):
299
299
  # Mark as incomplete because we still need to add relations
300
300
  model.checksum = None
301
301
  # Repository will set project_id automatically
302
- return await self.repository.add(model)
302
+ try:
303
+ return await self.repository.add(model)
304
+ except IntegrityError as e:
305
+ # Handle race condition where entity was created by another process
306
+ if "UNIQUE constraint failed: entity.file_path" in str(
307
+ e
308
+ ) or "UNIQUE constraint failed: entity.permalink" in str(e):
309
+ logger.info(
310
+ f"Entity already exists for file_path={file_path} (file_path or permalink conflict), updating instead of creating"
311
+ )
312
+ return await self.update_entity_and_observations(file_path, markdown)
313
+ else:
314
+ # Re-raise if it's a different integrity error
315
+ raise
303
316
 
304
317
  async def update_entity_and_observations(
305
318
  self, file_path: Path, markdown: EntityMarkdown
@@ -413,8 +426,8 @@ class EntityService(BaseService[EntityModel]):
413
426
  """
414
427
  logger.debug(f"Editing entity: {identifier}, operation: {operation}")
415
428
 
416
- # Find the entity using the link resolver
417
- entity = await self.link_resolver.resolve_link(identifier)
429
+ # Find the entity using the link resolver with strict mode for destructive operations
430
+ entity = await self.link_resolver.resolve_link(identifier, strict=True)
418
431
  if not entity:
419
432
  raise EntityNotFoundError(f"Entity not found: {identifier}")
420
433
 
@@ -630,8 +643,8 @@ class EntityService(BaseService[EntityModel]):
630
643
  """
631
644
  logger.debug(f"Moving entity: {identifier} to {destination_path}")
632
645
 
633
- # 1. Resolve identifier to entity
634
- entity = await self.link_resolver.resolve_link(identifier)
646
+ # 1. Resolve identifier to entity with strict mode for destructive operations
647
+ entity = await self.link_resolver.resolve_link(identifier, strict=True)
635
648
  if not entity:
636
649
  raise EntityNotFoundError(f"Entity not found: {identifier}")
637
650
 
@@ -83,7 +83,9 @@ async def migrate_legacy_projects(app_config: BasicMemoryConfig):
83
83
  logger.error(f"Project {project_name} not found in database, skipping migration")
84
84
  continue
85
85
 
86
+ logger.info(f"Starting migration for project: {project_name} (id: {project.id})")
86
87
  await migrate_legacy_project_data(project, legacy_dir)
88
+ logger.info(f"Completed migration for project: {project_name}")
87
89
  logger.info("Legacy projects successfully migrated")
88
90
 
89
91
 
@@ -104,7 +106,7 @@ async def migrate_legacy_project_data(project: Project, legacy_dir: Path) -> boo
104
106
  sync_dir = Path(project.path)
105
107
 
106
108
  logger.info(f"Sync starting project: {project.name}")
107
- await sync_service.sync(sync_dir)
109
+ await sync_service.sync(sync_dir, project_name=project.name)
108
110
  logger.info(f"Sync completed successfully for project: {project.name}")
109
111
 
110
112
  # After successful sync, remove the legacy directory
@@ -158,12 +160,32 @@ async def initialize_file_sync(
158
160
  sync_dir = Path(project.path)
159
161
 
160
162
  try:
161
- await sync_service.sync(sync_dir)
163
+ await sync_service.sync(sync_dir, project_name=project.name)
162
164
  logger.info(f"Sync completed successfully for project: {project.name}")
165
+
166
+ # Mark project as watching for changes after successful sync
167
+ from basic_memory.services.sync_status_service import sync_status_tracker
168
+
169
+ sync_status_tracker.start_project_watch(project.name)
170
+ logger.info(f"Project {project.name} is now watching for changes")
163
171
  except Exception as e: # pragma: no cover
164
172
  logger.error(f"Error syncing project {project.name}: {e}")
173
+ # Mark sync as failed for this project
174
+ from basic_memory.services.sync_status_service import sync_status_tracker
175
+
176
+ sync_status_tracker.fail_project_sync(project.name, str(e))
165
177
  # Continue with other projects even if one fails
166
178
 
179
+ # Mark migration complete if it was in progress
180
+ try:
181
+ from basic_memory.services.migration_service import migration_manager
182
+
183
+ if not migration_manager.is_ready: # pragma: no cover
184
+ migration_manager.mark_completed("Migration completed with file sync")
185
+ logger.info("Marked migration as completed after file sync")
186
+ except Exception as e: # pragma: no cover
187
+ logger.warning(f"Could not update migration status: {e}")
188
+
167
189
  # Then start the watch service in the background
168
190
  logger.info("Starting watch service for all projects")
169
191
  # run the watch service
@@ -185,7 +207,7 @@ async def initialize_app(
185
207
  - Running database migrations
186
208
  - Reconciling projects from config.json with projects table
187
209
  - Setting up file synchronization
188
- - Migrating legacy project data
210
+ - Starting background migration for legacy project data
189
211
 
190
212
  Args:
191
213
  app_config: The Basic Memory project configuration
@@ -197,8 +219,13 @@ async def initialize_app(
197
219
  # Reconcile projects from config.json with projects table
198
220
  await reconcile_projects_with_config(app_config)
199
221
 
200
- # migrate legacy project data
201
- await migrate_legacy_projects(app_config)
222
+ # Start background migration for legacy project data (non-blocking)
223
+ from basic_memory.services.migration_service import migration_manager
224
+
225
+ await migration_manager.start_background_migration(app_config)
226
+
227
+ logger.info("App initialization completed (migration running in background if needed)")
228
+ return migration_manager
202
229
 
203
230
 
204
231
  def ensure_initialization(app_config: BasicMemoryConfig) -> None:
@@ -26,8 +26,16 @@ class LinkResolver:
26
26
  self.entity_repository = entity_repository
27
27
  self.search_service = search_service
28
28
 
29
- async def resolve_link(self, link_text: str, use_search: bool = True) -> Optional[Entity]:
30
- """Resolve a markdown link to a permalink."""
29
+ async def resolve_link(
30
+ self, link_text: str, use_search: bool = True, strict: bool = False
31
+ ) -> Optional[Entity]:
32
+ """Resolve a markdown link to a permalink.
33
+
34
+ Args:
35
+ link_text: The link text to resolve
36
+ use_search: Whether to use search-based fuzzy matching as fallback
37
+ strict: If True, only exact matches are allowed (no fuzzy search fallback)
38
+ """
31
39
  logger.trace(f"Resolving link: {link_text}")
32
40
 
33
41
  # Clean link text and extract any alias
@@ -41,7 +49,8 @@ class LinkResolver:
41
49
 
42
50
  # 2. Try exact title match
43
51
  found = await self.entity_repository.get_by_title(clean_text)
44
- if found and len(found) == 1:
52
+ if found:
53
+ # Return first match if there are duplicates (consistent behavior)
45
54
  entity = found[0]
46
55
  logger.debug(f"Found title match: {entity.title}")
47
56
  return entity
@@ -60,9 +69,12 @@ class LinkResolver:
60
69
  logger.debug(f"Found entity with path (with .md): {found_path_md.file_path}")
61
70
  return found_path_md
62
71
 
63
- # search if indicated
72
+ # In strict mode, don't try fuzzy search - return None if no exact match found
73
+ if strict:
74
+ return None
75
+
76
+ # 5. Fall back to search for fuzzy matching (only if not in strict mode)
64
77
  if use_search and "*" not in clean_text:
65
- # 5. Fall back to search for fuzzy matching on title (use text search for prefix matching)
66
78
  results = await self.search_service.search(
67
79
  query=SearchQuery(text=clean_text, entity_types=[SearchItemType.ENTITY]),
68
80
  )
@@ -101,5 +113,8 @@ class LinkResolver:
101
113
  text, alias = text.split("|", 1)
102
114
  text = text.strip()
103
115
  alias = alias.strip()
116
+ else:
117
+ # Strip whitespace from text even if no alias
118
+ text = text.strip()
104
119
 
105
120
  return text, alias