basic-memory 0.14.3__py3-none-any.whl → 0.14.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (34) hide show
  1. basic_memory/__init__.py +1 -1
  2. basic_memory/alembic/versions/a1b2c3d4e5f6_fix_project_foreign_keys.py +53 -0
  3. basic_memory/api/routers/resource_router.py +3 -3
  4. basic_memory/cli/commands/project.py +9 -10
  5. basic_memory/config.py +20 -8
  6. basic_memory/file_utils.py +65 -0
  7. basic_memory/importers/chatgpt_importer.py +1 -1
  8. basic_memory/importers/utils.py +2 -2
  9. basic_memory/markdown/entity_parser.py +2 -2
  10. basic_memory/markdown/markdown_processor.py +2 -2
  11. basic_memory/markdown/plugins.py +42 -26
  12. basic_memory/markdown/utils.py +1 -1
  13. basic_memory/mcp/tools/build_context.py +12 -2
  14. basic_memory/mcp/tools/project_management.py +22 -7
  15. basic_memory/mcp/tools/read_note.py +16 -13
  16. basic_memory/models/knowledge.py +13 -2
  17. basic_memory/models/project.py +2 -2
  18. basic_memory/repository/entity_repository.py +2 -2
  19. basic_memory/repository/project_repository.py +1 -1
  20. basic_memory/repository/search_repository.py +7 -3
  21. basic_memory/schemas/base.py +40 -10
  22. basic_memory/schemas/memory.py +23 -11
  23. basic_memory/services/context_service.py +12 -2
  24. basic_memory/services/directory_service.py +7 -0
  25. basic_memory/services/entity_service.py +8 -8
  26. basic_memory/services/project_service.py +11 -11
  27. basic_memory/sync/sync_service.py +3 -3
  28. basic_memory/sync/watch_service.py +31 -8
  29. basic_memory/utils.py +169 -107
  30. {basic_memory-0.14.3.dist-info → basic_memory-0.14.4.dist-info}/METADATA +20 -91
  31. {basic_memory-0.14.3.dist-info → basic_memory-0.14.4.dist-info}/RECORD +34 -33
  32. {basic_memory-0.14.3.dist-info → basic_memory-0.14.4.dist-info}/WHEEL +0 -0
  33. {basic_memory-0.14.3.dist-info → basic_memory-0.14.4.dist-info}/entry_points.txt +0 -0
  34. {basic_memory-0.14.3.dist-info → basic_memory-0.14.4.dist-info}/licenses/LICENSE +0 -0
basic_memory/__init__.py CHANGED
@@ -1,7 +1,7 @@
1
1
  """basic-memory - Local-first knowledge management combining Zettelkasten with knowledge graphs"""
2
2
 
3
3
  # Package version - updated by release automation
4
- __version__ = "0.14.3"
4
+ __version__ = "0.14.4"
5
5
 
6
6
  # API version for FastAPI - independent of package version
7
7
  __api_version__ = "v0"
@@ -0,0 +1,53 @@
1
+ """fix project foreign keys
2
+
3
+ Revision ID: a1b2c3d4e5f6
4
+ Revises: 647e7a75e2cd
5
+ Create Date: 2025-08-19 22:06:00.000000
6
+
7
+ """
8
+
9
+ from typing import Sequence, Union
10
+
11
+ from alembic import op
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = "a1b2c3d4e5f6"
16
+ down_revision: Union[str, None] = "647e7a75e2cd"
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Re-establish foreign key constraints that were lost during project table recreation.
23
+
24
+ The migration 647e7a75e2cd recreated the project table but did not re-establish
25
+ the foreign key constraint from entity.project_id to project.id, causing
26
+ foreign key constraint failures when trying to delete projects with related entities.
27
+ """
28
+ # SQLite doesn't allow adding foreign key constraints to existing tables easily
29
+ # We need to be careful and handle the case where the constraint might already exist
30
+
31
+ with op.batch_alter_table("entity", schema=None) as batch_op:
32
+ # Try to drop existing foreign key constraint (may not exist)
33
+ try:
34
+ batch_op.drop_constraint("fk_entity_project_id", type_="foreignkey")
35
+ except Exception:
36
+ # Constraint may not exist, which is fine - we'll create it next
37
+ pass
38
+
39
+ # Add the foreign key constraint with CASCADE DELETE
40
+ # This ensures that when a project is deleted, all related entities are also deleted
41
+ batch_op.create_foreign_key(
42
+ "fk_entity_project_id",
43
+ "project",
44
+ ["project_id"],
45
+ ["id"],
46
+ ondelete="CASCADE"
47
+ )
48
+
49
+
50
+ def downgrade() -> None:
51
+ """Remove the foreign key constraint."""
52
+ with op.batch_alter_table("entity", schema=None) as batch_op:
53
+ batch_op.drop_constraint("fk_entity_project_id", type_="foreignkey")
@@ -188,7 +188,7 @@ async def write_resource(
188
188
  "content_type": content_type,
189
189
  "file_path": file_path,
190
190
  "checksum": checksum,
191
- "updated_at": datetime.fromtimestamp(file_stats.st_mtime),
191
+ "updated_at": datetime.fromtimestamp(file_stats.st_mtime).astimezone(),
192
192
  },
193
193
  )
194
194
  status_code = 200
@@ -200,8 +200,8 @@ async def write_resource(
200
200
  content_type=content_type,
201
201
  file_path=file_path,
202
202
  checksum=checksum,
203
- created_at=datetime.fromtimestamp(file_stats.st_ctime),
204
- updated_at=datetime.fromtimestamp(file_stats.st_mtime),
203
+ created_at=datetime.fromtimestamp(file_stats.st_ctime).astimezone(),
204
+ updated_at=datetime.fromtimestamp(file_stats.st_mtime).astimezone(),
205
205
  )
206
206
  entity = await entity_repository.add(entity)
207
207
  status_code = 201
@@ -23,8 +23,8 @@ from basic_memory.mcp.tools.utils import call_post
23
23
  from basic_memory.schemas.project_info import ProjectStatusResponse
24
24
  from basic_memory.mcp.tools.utils import call_delete
25
25
  from basic_memory.mcp.tools.utils import call_put
26
- from basic_memory.mcp.tools.utils import call_patch
27
26
  from basic_memory.utils import generate_permalink
27
+ from basic_memory.mcp.tools.utils import call_patch
28
28
 
29
29
  console = Console()
30
30
 
@@ -74,7 +74,7 @@ def add_project(
74
74
  ) -> None:
75
75
  """Add a new project."""
76
76
  # Resolve to absolute path
77
- resolved_path = os.path.abspath(os.path.expanduser(path))
77
+ resolved_path = Path(os.path.abspath(os.path.expanduser(path))).as_posix()
78
78
 
79
79
  try:
80
80
  data = {"name": name, "path": resolved_path, "set_default": set_default}
@@ -100,8 +100,8 @@ def remove_project(
100
100
  ) -> None:
101
101
  """Remove a project from configuration."""
102
102
  try:
103
- project_name = generate_permalink(name)
104
- response = asyncio.run(call_delete(client, f"/projects/{project_name}"))
103
+ project_permalink = generate_permalink(name)
104
+ response = asyncio.run(call_delete(client, f"/projects/{project_permalink}"))
105
105
  result = ProjectStatusResponse.model_validate(response.json())
106
106
 
107
107
  console.print(f"[green]{result.message}[/green]")
@@ -119,9 +119,8 @@ def set_default_project(
119
119
  ) -> None:
120
120
  """Set the default project and activate it for the current session."""
121
121
  try:
122
- project_name = generate_permalink(name)
123
-
124
- response = asyncio.run(call_put(client, f"/projects/{project_name}/default"))
122
+ project_permalink = generate_permalink(name)
123
+ response = asyncio.run(call_put(client, f"/projects/{project_permalink}/default"))
125
124
  result = ProjectStatusResponse.model_validate(response.json())
126
125
 
127
126
  console.print(f"[green]{result.message}[/green]")
@@ -156,15 +155,15 @@ def move_project(
156
155
  ) -> None:
157
156
  """Move a project to a new location."""
158
157
  # Resolve to absolute path
159
- resolved_path = os.path.abspath(os.path.expanduser(new_path))
158
+ resolved_path = Path(os.path.abspath(os.path.expanduser(new_path))).as_posix()
160
159
 
161
160
  try:
162
161
  data = {"path": resolved_path}
163
- project_name = generate_permalink(name)
164
162
 
163
+ project_permalink = generate_permalink(name)
165
164
  current_project = session.get_current_project()
166
165
  response = asyncio.run(
167
- call_patch(client, f"/{current_project}/project/{project_name}", json=data)
166
+ call_patch(client, f"/{current_project}/project/{project_permalink}", json=data)
168
167
  )
169
168
  result = ProjectStatusResponse.model_validate(response.json())
170
169
 
basic_memory/config.py CHANGED
@@ -46,7 +46,7 @@ class BasicMemoryConfig(BaseSettings):
46
46
 
47
47
  projects: Dict[str, str] = Field(
48
48
  default_factory=lambda: {
49
- "main": str(Path(os.getenv("BASIC_MEMORY_HOME", Path.home() / "basic-memory")))
49
+ "main": Path(os.getenv("BASIC_MEMORY_HOME", Path.home() / "basic-memory")).as_posix()
50
50
  },
51
51
  description="Mapping of project names to their filesystem paths",
52
52
  )
@@ -74,6 +74,11 @@ class BasicMemoryConfig(BaseSettings):
74
74
  description="Whether to sync changes in real time. default (True)",
75
75
  )
76
76
 
77
+ kebab_filenames: bool = Field(
78
+ default=False,
79
+ description="Format for generated filenames. False preserves spaces and special chars, True converts them to hyphens for consistency with permalinks",
80
+ )
81
+
77
82
  # API connection configuration
78
83
  api_url: Optional[str] = Field(
79
84
  default=None,
@@ -100,9 +105,9 @@ class BasicMemoryConfig(BaseSettings):
100
105
  """Ensure configuration is valid after initialization."""
101
106
  # Ensure main project exists
102
107
  if "main" not in self.projects: # pragma: no cover
103
- self.projects["main"] = str(
108
+ self.projects["main"] = (
104
109
  Path(os.getenv("BASIC_MEMORY_HOME", Path.home() / "basic-memory"))
105
- )
110
+ ).as_posix()
106
111
 
107
112
  # Ensure default project is valid
108
113
  if self.default_project not in self.projects: # pragma: no cover
@@ -215,7 +220,7 @@ class ConfigManager:
215
220
 
216
221
  # Load config, modify it, and save it
217
222
  config = self.load_config()
218
- config.projects[name] = str(project_path)
223
+ config.projects[name] = project_path.as_posix()
219
224
  self.save_config(config)
220
225
  return ProjectConfig(name=name, home=project_path)
221
226
 
@@ -242,7 +247,7 @@ class ConfigManager:
242
247
 
243
248
  # Load config, modify, and save
244
249
  config = self.load_config()
245
- config.default_project = name
250
+ config.default_project = project_name
246
251
  self.save_config(config)
247
252
 
248
253
  def get_project(self, name: str) -> Tuple[str, str] | Tuple[None, None]:
@@ -351,15 +356,22 @@ def setup_basic_memory_logging(): # pragma: no cover
351
356
  # print("Skipping duplicate logging setup")
352
357
  return
353
358
 
354
- # Check for console logging environment variable
355
- console_logging = os.getenv("BASIC_MEMORY_CONSOLE_LOGGING", "false").lower() == "true"
359
+ # Check for console logging environment variable - accept more truthy values
360
+ console_logging_env = os.getenv("BASIC_MEMORY_CONSOLE_LOGGING", "false").lower()
361
+ console_logging = console_logging_env in ("true", "1", "yes", "on")
362
+
363
+ # Check for log level environment variable first, fall back to config
364
+ log_level = os.getenv("BASIC_MEMORY_LOG_LEVEL")
365
+ if not log_level:
366
+ config_manager = ConfigManager()
367
+ log_level = config_manager.config.log_level
356
368
 
357
369
  config_manager = ConfigManager()
358
370
  config = get_project_config()
359
371
  setup_logging(
360
372
  env=config_manager.config.env,
361
373
  home_dir=user_home, # Use user home for logs
362
- log_level=config_manager.config.log_level,
374
+ log_level=log_level,
363
375
  log_file=f"{DATA_DIR_NAME}/basic-memory-{process_name}.log",
364
376
  console=console_logging,
365
377
  )
@@ -2,9 +2,11 @@
2
2
 
3
3
  import hashlib
4
4
  from pathlib import Path
5
+ import re
5
6
  from typing import Any, Dict, Union
6
7
 
7
8
  import yaml
9
+ import frontmatter
8
10
  from loguru import logger
9
11
 
10
12
  from basic_memory.utils import FilePath
@@ -233,3 +235,66 @@ async def update_frontmatter(path: FilePath, updates: Dict[str, Any]) -> str:
233
235
  error=str(e),
234
236
  )
235
237
  raise FileError(f"Failed to update frontmatter: {e}")
238
+
239
+
240
+ def dump_frontmatter(post: frontmatter.Post) -> str:
241
+ """
242
+ Serialize frontmatter.Post to markdown with Obsidian-compatible YAML format.
243
+
244
+ This function ensures that tags are formatted as YAML lists instead of JSON arrays:
245
+
246
+ Good (Obsidian compatible):
247
+ ---
248
+ tags:
249
+ - system
250
+ - overview
251
+ - reference
252
+ ---
253
+
254
+ Bad (current behavior):
255
+ ---
256
+ tags: ["system", "overview", "reference"]
257
+ ---
258
+
259
+ Args:
260
+ post: frontmatter.Post object to serialize
261
+
262
+ Returns:
263
+ String containing markdown with properly formatted YAML frontmatter
264
+ """
265
+ if not post.metadata:
266
+ # No frontmatter, just return content
267
+ return post.content
268
+
269
+ # Serialize YAML with block style for lists
270
+ yaml_str = yaml.dump(
271
+ post.metadata,
272
+ sort_keys=False,
273
+ allow_unicode=True,
274
+ default_flow_style=False
275
+ )
276
+
277
+ # Construct the final markdown with frontmatter
278
+ if post.content:
279
+ return f"---\n{yaml_str}---\n\n{post.content}"
280
+ else:
281
+ return f"---\n{yaml_str}---\n"
282
+
283
+
284
+ def sanitize_for_filename(text: str, replacement: str = "-") -> str:
285
+ """
286
+ Sanitize string to be safe for use as a note title
287
+ Replaces path separators and other problematic characters
288
+ with hyphens.
289
+ """
290
+ # replace both POSIX and Windows path separators
291
+ text = re.sub(r"[/\\]", replacement, text)
292
+
293
+ # replace some other problematic chars
294
+ text = re.sub(r'[<>:"|?*]', replacement, text)
295
+
296
+ # compress multiple, repeated replacements
297
+ text = re.sub(f"{re.escape(replacement)}+", replacement, text)
298
+
299
+ return text.strip(replacement)
300
+
@@ -93,7 +93,7 @@ class ChatGPTImporter(Importer[ChatImportResult]):
93
93
  break
94
94
 
95
95
  # Generate permalink
96
- date_prefix = datetime.fromtimestamp(created_at).strftime("%Y%m%d")
96
+ date_prefix = datetime.fromtimestamp(created_at).astimezone().strftime("%Y%m%d")
97
97
  clean_title = clean_filename(conversation["title"])
98
98
 
99
99
  # Format content
@@ -43,13 +43,13 @@ def format_timestamp(timestamp: Any) -> str: # pragma: no cover
43
43
  except ValueError:
44
44
  try:
45
45
  # Try unix timestamp as string
46
- timestamp = datetime.fromtimestamp(float(timestamp))
46
+ timestamp = datetime.fromtimestamp(float(timestamp)).astimezone()
47
47
  except ValueError:
48
48
  # Return as is if we can't parse it
49
49
  return timestamp
50
50
  elif isinstance(timestamp, (int, float)):
51
51
  # Unix timestamp
52
- timestamp = datetime.fromtimestamp(timestamp)
52
+ timestamp = datetime.fromtimestamp(timestamp).astimezone()
53
53
 
54
54
  if isinstance(timestamp, datetime):
55
55
  return timestamp.strftime("%Y-%m-%d %H:%M:%S")
@@ -130,6 +130,6 @@ class EntityParser:
130
130
  content=post.content,
131
131
  observations=entity_content.observations,
132
132
  relations=entity_content.relations,
133
- created=datetime.fromtimestamp(file_stats.st_ctime),
134
- modified=datetime.fromtimestamp(file_stats.st_mtime),
133
+ created=datetime.fromtimestamp(file_stats.st_ctime).astimezone(),
134
+ modified=datetime.fromtimestamp(file_stats.st_mtime).astimezone(),
135
135
  )
@@ -2,11 +2,11 @@ from pathlib import Path
2
2
  from typing import Optional
3
3
  from collections import OrderedDict
4
4
 
5
- import frontmatter
6
5
  from frontmatter import Post
7
6
  from loguru import logger
8
7
 
9
8
  from basic_memory import file_utils
9
+ from basic_memory.file_utils import dump_frontmatter
10
10
  from basic_memory.markdown.entity_parser import EntityParser
11
11
  from basic_memory.markdown.schemas import EntityMarkdown, Observation, Relation
12
12
 
@@ -115,7 +115,7 @@ class MarkdownProcessor:
115
115
 
116
116
  # Create Post object for frontmatter
117
117
  post = Post(content, **frontmatter_dict)
118
- final_content = frontmatter.dumps(post, sort_keys=False)
118
+ final_content = dump_frontmatter(post)
119
119
 
120
120
  logger.debug(f"writing file {path} with content:\n{final_content}")
121
121
 
@@ -8,35 +8,49 @@ from markdown_it.token import Token
8
8
  # Observation handling functions
9
9
  def is_observation(token: Token) -> bool:
10
10
  """Check if token looks like our observation format."""
11
+ import re
11
12
  if token.type != "inline": # pragma: no cover
12
13
  return False
13
-
14
- content = token.content.strip()
14
+ # Use token.tag which contains the actual content for test tokens, fallback to content
15
+ content = (token.tag or token.content).strip()
15
16
  if not content: # pragma: no cover
16
17
  return False
17
-
18
18
  # if it's a markdown_task, return false
19
19
  if content.startswith("[ ]") or content.startswith("[x]") or content.startswith("[-]"):
20
20
  return False
21
-
22
- has_category = content.startswith("[") and "]" in content
21
+
22
+ # Exclude markdown links: [text](url)
23
+ if re.match(r"^\[.*?\]\(.*?\)$", content):
24
+ return False
25
+
26
+ # Exclude wiki links: [[text]]
27
+ if re.match(r"^\[\[.*?\]\]$", content):
28
+ return False
29
+
30
+ # Check for proper observation format: [category] content
31
+ match = re.match(r"^\[([^\[\]()]+)\]\s+(.+)", content)
23
32
  has_tags = "#" in content
24
- return has_category or has_tags
33
+ return bool(match) or has_tags
25
34
 
26
35
 
27
36
  def parse_observation(token: Token) -> Dict[str, Any]:
28
37
  """Extract observation parts from token."""
29
- # Strip bullet point if present
30
- content = token.content.strip()
31
-
32
- # Parse [category]
38
+ import re
39
+ # Use token.tag which contains the actual content for test tokens, fallback to content
40
+ content = (token.tag or token.content).strip()
41
+
42
+ # Parse [category] with regex
43
+ match = re.match(r"^\[([^\[\]()]+)\]\s+(.+)", content)
33
44
  category = None
34
- if content.startswith("["):
35
- end = content.find("]")
36
- if end != -1:
37
- category = content[1:end].strip() or None # Convert empty to None
38
- content = content[end + 1 :].strip()
39
-
45
+ if match:
46
+ category = match.group(1).strip()
47
+ content = match.group(2).strip()
48
+ else:
49
+ # Handle empty brackets [] followed by content
50
+ empty_match = re.match(r"^\[\]\s+(.+)", content)
51
+ if empty_match:
52
+ content = empty_match.group(1).strip()
53
+
40
54
  # Parse (context)
41
55
  context = None
42
56
  if content.endswith(")"):
@@ -44,20 +58,18 @@ def parse_observation(token: Token) -> Dict[str, Any]:
44
58
  if start != -1:
45
59
  context = content[start + 1 : -1].strip()
46
60
  content = content[:start].strip()
47
-
61
+
48
62
  # Extract tags and keep original content
49
63
  tags = []
50
64
  parts = content.split()
51
65
  for part in parts:
52
66
  if part.startswith("#"):
53
- # Handle multiple #tags stuck together
54
67
  if "#" in part[1:]:
55
- # Split on # but keep non-empty tags
56
68
  subtags = [t for t in part.split("#") if t]
57
69
  tags.extend(subtags)
58
70
  else:
59
71
  tags.append(part[1:])
60
-
72
+
61
73
  return {
62
74
  "category": category,
63
75
  "content": content,
@@ -72,14 +84,16 @@ def is_explicit_relation(token: Token) -> bool:
72
84
  if token.type != "inline": # pragma: no cover
73
85
  return False
74
86
 
75
- content = token.content.strip()
87
+ # Use token.tag which contains the actual content for test tokens, fallback to content
88
+ content = (token.tag or token.content).strip()
76
89
  return "[[" in content and "]]" in content
77
90
 
78
91
 
79
92
  def parse_relation(token: Token) -> Dict[str, Any] | None:
80
93
  """Extract relation parts from token."""
81
94
  # Remove bullet point if present
82
- content = token.content.strip()
95
+ # Use token.tag which contains the actual content for test tokens, fallback to content
96
+ content = (token.tag or token.content).strip()
83
97
 
84
98
  # Extract [[target]]
85
99
  target = None
@@ -213,10 +227,12 @@ def relation_plugin(md: MarkdownIt) -> None:
213
227
  token.meta["relations"] = [rel]
214
228
 
215
229
  # Always check for inline links in any text
216
- elif "[[" in token.content:
217
- rels = parse_inline_relations(token.content)
218
- if rels:
219
- token.meta["relations"] = token.meta.get("relations", []) + rels
230
+ else:
231
+ content = token.tag or token.content
232
+ if "[[" in content:
233
+ rels = parse_inline_relations(content)
234
+ if rels:
235
+ token.meta["relations"] = token.meta.get("relations", []) + rels
220
236
 
221
237
  # Add the rule after inline processing
222
238
  md.core.ruler.after("inline", "relations", relation_rule)
@@ -41,7 +41,7 @@ def entity_model_from_markdown(
41
41
  # Only update permalink if it exists in frontmatter, otherwise preserve existing
42
42
  if markdown.frontmatter.permalink is not None:
43
43
  model.permalink = markdown.frontmatter.permalink
44
- model.file_path = str(file_path)
44
+ model.file_path = file_path.as_posix()
45
45
  model.content_type = "text/markdown"
46
46
  model.created_at = markdown.created
47
47
  model.updated_at = markdown.modified
@@ -15,6 +15,7 @@ from basic_memory.schemas.memory import (
15
15
  memory_url_path,
16
16
  )
17
17
 
18
+ type StringOrInt = str | int
18
19
 
19
20
  @mcp.tool(
20
21
  description="""Build context from a memory:// URI to continue conversations naturally.
@@ -35,7 +36,7 @@ from basic_memory.schemas.memory import (
35
36
  )
36
37
  async def build_context(
37
38
  url: MemoryUrl,
38
- depth: Optional[int] = 1,
39
+ depth: Optional[StringOrInt] = 1,
39
40
  timeframe: Optional[TimeFrame] = "7d",
40
41
  page: int = 1,
41
42
  page_size: int = 10,
@@ -80,6 +81,15 @@ async def build_context(
80
81
  build_context("memory://specs/search", project="work-project")
81
82
  """
82
83
  logger.info(f"Building context from {url}")
84
+
85
+ # Convert string depth to integer if needed
86
+ if isinstance(depth, str):
87
+ try:
88
+ depth = int(depth)
89
+ except ValueError:
90
+ from mcp.server.fastmcp.exceptions import ToolError
91
+ raise ToolError(f"Invalid depth parameter: '{depth}' is not a valid integer")
92
+
83
93
  # URL is already validated and normalized by MemoryUrl type annotation
84
94
 
85
95
  # Get the active project first to check project-specific sync status
@@ -101,7 +111,7 @@ async def build_context(
101
111
  metadata=MemoryMetadata(
102
112
  depth=depth or 1,
103
113
  timeframe=timeframe,
104
- generated_at=datetime.now(),
114
+ generated_at=datetime.now().astimezone(),
105
115
  primary_count=0,
106
116
  related_count=0,
107
117
  uri=migration_status, # Include status in metadata
@@ -221,8 +221,10 @@ async def set_default_project(project_name: str, ctx: Context | None = None) ->
221
221
  if ctx: # pragma: no cover
222
222
  await ctx.info(f"Setting default project to: {project_name}")
223
223
 
224
- # Call API to set default project
225
- response = await call_put(client, f"/projects/{project_name}/default")
224
+ # Call API to set default project using URL encoding for special characters
225
+ from urllib.parse import quote
226
+ encoded_name = quote(project_name, safe='')
227
+ response = await call_put(client, f"/projects/{encoded_name}/default")
226
228
  status_response = ProjectStatusResponse.model_validate(response.json())
227
229
 
228
230
  result = f"✓ {status_response.message}\n\n"
@@ -323,16 +325,29 @@ async def delete_project(project_name: str, ctx: Context | None = None) -> str:
323
325
  response = await call_get(client, "/projects/projects")
324
326
  project_list = ProjectList.model_validate(response.json())
325
327
 
326
- # Check if project exists
327
- project_exists = any(p.name == project_name for p in project_list.projects)
328
- if not project_exists:
328
+ # Find the project by name (case-insensitive) or permalink - same logic as switch_project
329
+ project_permalink = generate_permalink(project_name)
330
+ target_project = None
331
+ for p in project_list.projects:
332
+ # Match by permalink (handles case-insensitive input)
333
+ if p.permalink == project_permalink:
334
+ target_project = p
335
+ break
336
+ # Also match by name comparison (case-insensitive)
337
+ if p.name.lower() == project_name.lower():
338
+ target_project = p
339
+ break
340
+
341
+ if not target_project:
329
342
  available_projects = [p.name for p in project_list.projects]
330
343
  raise ValueError(
331
344
  f"Project '{project_name}' not found. Available projects: {', '.join(available_projects)}"
332
345
  )
333
346
 
334
- # Call API to delete project
335
- response = await call_delete(client, f"/projects/{project_name}")
347
+ # Call API to delete project using URL encoding for special characters
348
+ from urllib.parse import quote
349
+ encoded_name = quote(target_project.name, safe='')
350
+ response = await call_delete(client, f"/projects/{encoded_name}")
336
351
  status_response = ProjectStatusResponse.model_validate(response.json())
337
352
 
338
353
  result = f"✓ {status_response.message}\n\n"
@@ -56,6 +56,20 @@ async def read_note(
56
56
  # Get the active project first to check project-specific sync status
57
57
  active_project = get_active_project(project)
58
58
 
59
+ # Validate identifier to prevent path traversal attacks
60
+ # We need to check both the raw identifier and the processed path
61
+ processed_path = memory_url_path(identifier)
62
+ project_path = active_project.home
63
+
64
+ if not validate_project_path(identifier, project_path) or not validate_project_path(processed_path, project_path):
65
+ logger.warning(
66
+ "Attempted path traversal attack blocked",
67
+ identifier=identifier,
68
+ processed_path=processed_path,
69
+ project=active_project.name,
70
+ )
71
+ return f"# Error\n\nIdentifier '{identifier}' is not allowed - paths must stay within project boundaries"
72
+
59
73
  # Check migration status and wait briefly if needed
60
74
  from basic_memory.mcp.tools.utils import wait_for_migration_or_return_status
61
75
 
@@ -68,17 +82,6 @@ async def read_note(
68
82
 
69
83
  # Get the file via REST API - first try direct permalink lookup
70
84
  entity_path = memory_url_path(identifier)
71
-
72
- # Validate path to prevent path traversal attacks
73
- project_path = active_project.home
74
- if not validate_project_path(entity_path, project_path):
75
- logger.warning(
76
- "Attempted path traversal attack blocked",
77
- identifier=identifier,
78
- entity_path=entity_path,
79
- project=active_project.name,
80
- )
81
- return f"# Error\n\nPath '{identifier}' is not allowed - paths must stay within project boundaries"
82
85
  path = f"{project_url}/resource/{entity_path}"
83
86
  logger.info(f"Attempting to read note from URL: {path}")
84
87
 
@@ -136,7 +139,7 @@ def format_not_found_message(identifier: str) -> str:
136
139
  return dedent(f"""
137
140
  # Note Not Found: "{identifier}"
138
141
 
139
- I searched for "{identifier}" using multiple methods (direct lookup, title search, and text search) but couldn't find any matching notes. Here are some suggestions:
142
+ I couldn't find any notes matching "{identifier}". Here are some suggestions:
140
143
 
141
144
  ## Check Identifier Type
142
145
  - If you provided a title, try using the exact permalink instead
@@ -182,7 +185,7 @@ def format_related_results(identifier: str, results) -> str:
182
185
  message = dedent(f"""
183
186
  # Note Not Found: "{identifier}"
184
187
 
185
- I searched for "{identifier}" using direct lookup and title search but couldn't find an exact match. However, I found some related notes through text search:
188
+ I couldn't find an exact match for "{identifier}", but I found some related notes:
186
189
 
187
190
  """)
188
191