basic-memory 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (58) hide show
  1. basic_memory/__init__.py +1 -1
  2. basic_memory/alembic/alembic.ini +119 -0
  3. basic_memory/alembic/env.py +23 -1
  4. basic_memory/alembic/versions/502b60eaa905_remove_required_from_entity_permalink.py +51 -0
  5. basic_memory/alembic/versions/b3c3938bacdb_relation_to_name_unique_index.py +44 -0
  6. basic_memory/api/app.py +0 -4
  7. basic_memory/api/routers/knowledge_router.py +1 -1
  8. basic_memory/api/routers/memory_router.py +16 -16
  9. basic_memory/api/routers/resource_router.py +105 -4
  10. basic_memory/cli/app.py +0 -2
  11. basic_memory/cli/commands/status.py +9 -21
  12. basic_memory/cli/commands/sync.py +12 -16
  13. basic_memory/cli/commands/tools.py +36 -13
  14. basic_memory/cli/main.py +0 -1
  15. basic_memory/config.py +15 -1
  16. basic_memory/file_utils.py +6 -4
  17. basic_memory/markdown/entity_parser.py +3 -3
  18. basic_memory/mcp/async_client.py +1 -1
  19. basic_memory/mcp/main.py +25 -0
  20. basic_memory/mcp/prompts/__init__.py +15 -0
  21. basic_memory/mcp/prompts/ai_assistant_guide.py +28 -0
  22. basic_memory/mcp/prompts/continue_conversation.py +172 -0
  23. basic_memory/mcp/prompts/json_canvas_spec.py +25 -0
  24. basic_memory/mcp/prompts/recent_activity.py +46 -0
  25. basic_memory/mcp/prompts/search.py +127 -0
  26. basic_memory/mcp/prompts/utils.py +98 -0
  27. basic_memory/mcp/server.py +3 -7
  28. basic_memory/mcp/tools/__init__.py +6 -4
  29. basic_memory/mcp/tools/canvas.py +99 -0
  30. basic_memory/mcp/tools/memory.py +12 -5
  31. basic_memory/mcp/tools/notes.py +1 -2
  32. basic_memory/mcp/tools/resource.py +192 -0
  33. basic_memory/mcp/tools/utils.py +2 -1
  34. basic_memory/models/knowledge.py +27 -11
  35. basic_memory/repository/repository.py +1 -1
  36. basic_memory/repository/search_repository.py +14 -4
  37. basic_memory/schemas/__init__.py +0 -11
  38. basic_memory/schemas/base.py +4 -1
  39. basic_memory/schemas/memory.py +11 -2
  40. basic_memory/schemas/search.py +2 -1
  41. basic_memory/services/entity_service.py +19 -12
  42. basic_memory/services/file_service.py +69 -2
  43. basic_memory/services/link_resolver.py +12 -9
  44. basic_memory/services/search_service.py +56 -12
  45. basic_memory/sync/__init__.py +3 -2
  46. basic_memory/sync/sync_service.py +294 -123
  47. basic_memory/sync/watch_service.py +125 -129
  48. basic_memory/utils.py +24 -9
  49. {basic_memory-0.7.0.dist-info → basic_memory-0.8.0.dist-info}/METADATA +2 -1
  50. basic_memory-0.8.0.dist-info/RECORD +91 -0
  51. basic_memory/alembic/README +0 -1
  52. basic_memory/schemas/discovery.py +0 -28
  53. basic_memory/sync/file_change_scanner.py +0 -158
  54. basic_memory/sync/utils.py +0 -31
  55. basic_memory-0.7.0.dist-info/RECORD +0 -82
  56. {basic_memory-0.7.0.dist-info → basic_memory-0.8.0.dist-info}/WHEEL +0 -0
  57. {basic_memory-0.7.0.dist-info → basic_memory-0.8.0.dist-info}/entry_points.txt +0 -0
  58. {basic_memory-0.7.0.dist-info → basic_memory-0.8.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,98 @@
1
+ """Utility functions for formatting prompt responses.
2
+
3
+ These utilities help format data from various tools into consistent,
4
+ user-friendly markdown summaries.
5
+ """
6
+
7
+ from basic_memory.schemas.memory import GraphContext
8
+
9
+
10
+ def format_context_summary(header: str, context: GraphContext) -> str:
11
+ """Format GraphContext as a helpful markdown summary.
12
+
13
+ This creates a user-friendly markdown response that explains the context
14
+ and provides guidance on how to explore further.
15
+
16
+ Args:
17
+ header: The title to use for the summary
18
+ context: The GraphContext object to format
19
+
20
+ Returns:
21
+ Formatted markdown string with the context summary
22
+ """
23
+ summary = []
24
+
25
+ # Extract URI for reference
26
+ uri = context.metadata.uri or "a/permalink-value"
27
+
28
+ # Add header
29
+ summary.append(f"{header}")
30
+ summary.append("")
31
+
32
+ # Primary document section
33
+ if context.primary_results:
34
+ summary.append(f"## Primary Documents ({len(context.primary_results)})")
35
+
36
+ for primary in context.primary_results:
37
+ summary.append(f"### {primary.title}")
38
+ summary.append(f"- **Type**: {primary.type}")
39
+ summary.append(f"- **Path**: {primary.file_path}")
40
+ summary.append(f"- **Created**: {primary.created_at.strftime('%Y-%m-%d %H:%M')}")
41
+ summary.append("")
42
+ summary.append(
43
+ f'To view this document\'s content: `read_note("{primary.permalink}")` or `read_note("{primary.title}")` '
44
+ )
45
+ summary.append("")
46
+ else:
47
+ summary.append("\nNo primary documents found.")
48
+
49
+ # Related documents section
50
+ if context.related_results:
51
+ summary.append(f"## Related Documents ({len(context.related_results)})")
52
+
53
+ # Group by relation type for better organization
54
+ relation_types = {}
55
+ for rel in context.related_results:
56
+ if hasattr(rel, "relation_type"):
57
+ rel_type = rel.relation_type # pyright: ignore
58
+ if rel_type not in relation_types:
59
+ relation_types[rel_type] = []
60
+ relation_types[rel_type].append(rel)
61
+
62
+ # Display relations grouped by type
63
+ for rel_type, relations in relation_types.items():
64
+ summary.append(f"### {rel_type.replace('_', ' ').title()} ({len(relations)})")
65
+
66
+ for rel in relations:
67
+ if hasattr(rel, "to_id") and rel.to_id:
68
+ summary.append(f"- **{rel.to_id}**")
69
+ summary.append(f' - View document: `read_note("{rel.to_id}")` ')
70
+ summary.append(
71
+ f' - Explore connections: `build_context("memory://{rel.to_id}")` '
72
+ )
73
+ else:
74
+ summary.append(f"- **Unresolved relation**: {rel.permalink}")
75
+ summary.append("")
76
+
77
+ # Next steps section
78
+ summary.append("## Next Steps")
79
+ summary.append("Here are some ways to explore further:")
80
+
81
+ search_term = uri.split("/")[-1]
82
+ summary.append(f'- **Search related topics**: `search({{"text": "{search_term}"}})`')
83
+
84
+ summary.append('- **Check recent changes**: `recent_activity(timeframe="3 days")`')
85
+ summary.append(f'- **Explore all relations**: `build_context("memory://{uri}/*")`')
86
+
87
+ # Tips section
88
+ summary.append("")
89
+ summary.append("## Tips")
90
+ summary.append(
91
+ f'- For more specific context, increase depth: `build_context("memory://{uri}", depth=2)`'
92
+ )
93
+ summary.append(
94
+ "- You can follow specific relation types using patterns like: `memory://document/relation-type/*`"
95
+ )
96
+ summary.append("- Look for connected documents by checking relations between them")
97
+
98
+ return "\n".join(summary)
@@ -1,15 +1,11 @@
1
1
  """Enhanced FastMCP server instance for Basic Memory."""
2
2
 
3
3
  from mcp.server.fastmcp import FastMCP
4
-
5
- from basic_memory.utils import setup_logging
4
+ from mcp.server.fastmcp.utilities.logging import configure_logging
6
5
 
7
6
  # mcp console logging
8
- # configure_logging(level='INFO')
9
-
7
+ configure_logging(level="INFO")
10
8
 
11
- # start our out file logging
12
- setup_logging(log_file=".basic-memory/basic-memory.log")
13
9
 
14
10
  # Create the shared server instance
15
- mcp = FastMCP("Basic Memory")
11
+ mcp = FastMCP("Basic Memory")
@@ -6,11 +6,11 @@ all tools with the MCP server.
6
6
  """
7
7
 
8
8
  # Import tools to register them with MCP
9
+ from basic_memory.mcp.tools.resource import read_resource
9
10
  from basic_memory.mcp.tools.memory import build_context, recent_activity
10
-
11
- # from basic_memory.mcp.tools.ai_edit import ai_edit
12
11
  from basic_memory.mcp.tools.notes import read_note, write_note
13
12
  from basic_memory.mcp.tools.search import search
13
+ from basic_memory.mcp.tools.canvas import canvas
14
14
 
15
15
  from basic_memory.mcp.tools.knowledge import (
16
16
  delete_entities,
@@ -31,6 +31,8 @@ __all__ = [
31
31
  # notes
32
32
  "read_note",
33
33
  "write_note",
34
- # file edit
35
- # "ai_edit",
34
+ # files
35
+ "read_resource",
36
+ # canvas
37
+ "canvas",
36
38
  ]
@@ -0,0 +1,99 @@
1
+ """Canvas creation tool for Basic Memory MCP server.
2
+
3
+ This tool creates Obsidian canvas files (.canvas) using the JSON Canvas 1.0 spec.
4
+ """
5
+
6
+ import json
7
+ from typing import Dict, List, Any
8
+
9
+ import logfire
10
+ from loguru import logger
11
+
12
+ from basic_memory.mcp.async_client import client
13
+ from basic_memory.mcp.server import mcp
14
+ from basic_memory.mcp.tools.utils import call_put
15
+
16
+
17
+ @mcp.tool(
18
+ description="Create an Obsidian canvas file to visualize concepts and connections.",
19
+ )
20
+ async def canvas(
21
+ nodes: List[Dict[str, Any]],
22
+ edges: List[Dict[str, Any]],
23
+ title: str,
24
+ folder: str,
25
+ ) -> str:
26
+ """Create an Obsidian canvas file with the provided nodes and edges.
27
+
28
+ This tool creates a .canvas file compatible with Obsidian's Canvas feature,
29
+ allowing visualization of relationships between concepts or documents.
30
+
31
+ For the full JSON Canvas 1.0 specification, see the 'spec://canvas' resource.
32
+
33
+ Args:
34
+ nodes: List of node objects following JSON Canvas 1.0 spec
35
+ edges: List of edge objects following JSON Canvas 1.0 spec
36
+ title: The title of the canvas (will be saved as title.canvas)
37
+ folder: The folder where the file should be saved
38
+
39
+ Returns:
40
+ A summary of the created canvas file
41
+
42
+ Important Notes:
43
+ - When referencing files, use the exact file path as shown in Obsidian
44
+ Example: "folder/Document Name.md" (not permalink format)
45
+ - For file nodes, the "file" attribute must reference an existing file
46
+ - Nodes require id, type, x, y, width, height properties
47
+ - Edges require id, fromNode, toNode properties
48
+ - Position nodes in a logical layout (x,y coordinates in pixels)
49
+ - Use color attributes ("1"-"6" or hex) for visual organization
50
+
51
+ Basic Structure:
52
+ ```json
53
+ {
54
+ "nodes": [
55
+ {
56
+ "id": "node1",
57
+ "type": "file", // Options: "file", "text", "link", "group"
58
+ "file": "folder/Document.md",
59
+ "x": 0,
60
+ "y": 0,
61
+ "width": 400,
62
+ "height": 300
63
+ }
64
+ ],
65
+ "edges": [
66
+ {
67
+ "id": "edge1",
68
+ "fromNode": "node1",
69
+ "toNode": "node2",
70
+ "label": "connects to"
71
+ }
72
+ ]
73
+ }
74
+ ```
75
+ """
76
+ with logfire.span("Creating canvas", folder=folder, title=title): # type: ignore
77
+ # Ensure path has .canvas extension
78
+ file_title = title if title.endswith(".canvas") else f"{title}.canvas"
79
+ file_path = f"{folder}/{file_title}"
80
+
81
+ # Create canvas data structure
82
+ canvas_data = {"nodes": nodes, "edges": edges}
83
+
84
+ # Convert to JSON
85
+ canvas_json = json.dumps(canvas_data, indent=2)
86
+
87
+ # Write the file using the resource API
88
+ logger.info(f"Creating canvas file: {file_path}")
89
+ response = await call_put(client, f"/resource/{file_path}", json=canvas_json)
90
+
91
+ # Parse response
92
+ result = response.json()
93
+ logger.debug(result)
94
+
95
+ # Build summary
96
+ action = "Created" if response.status_code == 201 else "Updated"
97
+ summary = [f"# {action}: {file_path}", "\nThe canvas is ready to open in Obsidian."]
98
+
99
+ return "\n".join(summary)
@@ -1,6 +1,6 @@
1
1
  """Discussion context tools for Basic Memory MCP server."""
2
2
 
3
- from typing import Optional, Literal, List
3
+ from typing import Optional, List
4
4
 
5
5
  from loguru import logger
6
6
  import logfire
@@ -15,6 +15,7 @@ from basic_memory.schemas.memory import (
15
15
  normalize_memory_url,
16
16
  )
17
17
  from basic_memory.schemas.base import TimeFrame
18
+ from basic_memory.schemas.search import SearchItemType
18
19
 
19
20
 
20
21
  @mcp.tool(
@@ -100,7 +101,7 @@ async def build_context(
100
101
  """,
101
102
  )
102
103
  async def recent_activity(
103
- type: List[Literal["entity", "observation", "relation"]] = [],
104
+ type: Optional[List[SearchItemType]] = None,
104
105
  depth: Optional[int] = 1,
105
106
  timeframe: Optional[TimeFrame] = "7d",
106
107
  page: int = 1,
@@ -153,14 +154,20 @@ async def recent_activity(
153
154
  f"Getting recent activity from {type}, depth={depth}, timeframe={timeframe}, page={page}, page_size={page_size}, max_related={max_related}"
154
155
  )
155
156
  params = {
156
- "depth": depth,
157
- "timeframe": timeframe,
158
157
  "page": page,
159
158
  "page_size": page_size,
160
159
  "max_related": max_related,
161
160
  }
161
+ if depth:
162
+ params["depth"] = depth
163
+ if timeframe:
164
+ params["timeframe"] = timeframe # pyright: ignore
165
+
166
+ # send enum values if we have an enum, else send string value
162
167
  if type:
163
- params["type"] = type
168
+ params["type"] = [ # pyright: ignore
169
+ type.value if isinstance(type, SearchItemType) else type for type in type
170
+ ]
164
171
 
165
172
  response = await call_get(
166
173
  client,
@@ -84,9 +84,8 @@ async def write_note(
84
84
 
85
85
  # Format semantic summary based on status code
86
86
  action = "Created" if response.status_code == 201 else "Updated"
87
- assert result.checksum is not None
88
87
  summary = [
89
- f"# {action} {result.file_path} ({result.checksum[:8]})",
88
+ f"# {action} {result.file_path} ({result.checksum[:8] if result.checksum else 'unknown'})",
90
89
  f"permalink: {result.permalink}",
91
90
  ]
92
91
 
@@ -0,0 +1,192 @@
1
+ from loguru import logger
2
+
3
+ from basic_memory.mcp.server import mcp
4
+ from basic_memory.mcp.async_client import client
5
+ from basic_memory.mcp.tools.utils import call_get
6
+ from basic_memory.schemas.memory import memory_url_path
7
+
8
+ import base64
9
+ import io
10
+ from PIL import Image as PILImage
11
+
12
+
13
+ def calculate_target_params(content_length):
14
+ """Calculate initial quality and size based on input file size"""
15
+ target_size = 350000 # Reduced target for more safety margin
16
+ ratio = content_length / target_size
17
+
18
+ logger.debug(
19
+ "Calculating target parameters",
20
+ content_length=content_length,
21
+ ratio=ratio,
22
+ target_size=target_size,
23
+ )
24
+
25
+ if ratio > 4:
26
+ # Very large images - start very aggressive
27
+ return 50, 600 # Lower initial quality and size
28
+ elif ratio > 2:
29
+ return 60, 800
30
+ else:
31
+ return 70, 1000
32
+
33
+
34
+ def resize_image(img, max_size):
35
+ """Resize image maintaining aspect ratio"""
36
+ original_dimensions = {"width": img.width, "height": img.height}
37
+
38
+ if img.width > max_size or img.height > max_size:
39
+ ratio = min(max_size / img.width, max_size / img.height)
40
+ new_size = (int(img.width * ratio), int(img.height * ratio))
41
+ logger.debug("Resizing image", original=original_dimensions, target=new_size, ratio=ratio)
42
+ return img.resize(new_size, PILImage.Resampling.LANCZOS)
43
+
44
+ logger.debug("No resize needed", dimensions=original_dimensions)
45
+ return img
46
+
47
+
48
+ def optimize_image(img, content_length, max_output_bytes=350000):
49
+ """Iteratively optimize image with aggressive size reduction"""
50
+ stats = {
51
+ "dimensions": {"width": img.width, "height": img.height},
52
+ "mode": img.mode,
53
+ "estimated_memory": (img.width * img.height * len(img.getbands())),
54
+ }
55
+
56
+ initial_quality, initial_size = calculate_target_params(content_length)
57
+
58
+ logger.debug(
59
+ "Starting optimization",
60
+ image_stats=stats,
61
+ content_length=content_length,
62
+ initial_quality=initial_quality,
63
+ initial_size=initial_size,
64
+ max_output_bytes=max_output_bytes,
65
+ )
66
+
67
+ quality = initial_quality
68
+ size = initial_size
69
+
70
+ # Convert to RGB if needed
71
+ if img.mode in ("RGBA", "LA") or (img.mode == "P" and "transparency" in img.info):
72
+ img = img.convert("RGB")
73
+ logger.debug("Converted to RGB mode")
74
+
75
+ iteration = 0
76
+ min_size = 300 # Absolute minimum size
77
+ min_quality = 20 # Absolute minimum quality
78
+
79
+ while True:
80
+ iteration += 1
81
+ buf = io.BytesIO()
82
+ resized = resize_image(img, size)
83
+
84
+ resized.save(
85
+ buf,
86
+ format="JPEG",
87
+ quality=quality,
88
+ optimize=True,
89
+ progressive=True,
90
+ subsampling="4:2:0",
91
+ )
92
+
93
+ output_size = buf.getbuffer().nbytes
94
+ reduction_ratio = output_size / content_length
95
+
96
+ logger.debug(
97
+ "Optimization attempt",
98
+ iteration=iteration,
99
+ quality=quality,
100
+ size=size,
101
+ output_bytes=output_size,
102
+ target_bytes=max_output_bytes,
103
+ reduction_ratio=f"{reduction_ratio:.2f}",
104
+ )
105
+
106
+ if output_size < max_output_bytes:
107
+ logger.info(
108
+ "Image optimization complete",
109
+ final_size=output_size,
110
+ quality=quality,
111
+ dimensions={"width": resized.width, "height": resized.height},
112
+ reduction_ratio=f"{reduction_ratio:.2f}",
113
+ )
114
+ return buf.getvalue()
115
+
116
+ # Very aggressive reduction for large files
117
+ if content_length > 2000000: # 2MB+ # pragma: no cover
118
+ quality = max(min_quality, quality - 20)
119
+ size = max(min_size, int(size * 0.6))
120
+ elif content_length > 1000000: # 1MB+ # pragma: no cover
121
+ quality = max(min_quality, quality - 15)
122
+ size = max(min_size, int(size * 0.7))
123
+ else:
124
+ quality = max(min_quality, quality - 10) # pragma: no cover
125
+ size = max(min_size, int(size * 0.8)) # pragma: no cover
126
+
127
+ logger.debug("Reducing parameters", new_quality=quality, new_size=size) # pragma: no cover
128
+
129
+ # If we've hit minimum values and still too big
130
+ if quality <= min_quality and size <= min_size: # pragma: no cover
131
+ logger.warning(
132
+ "Reached minimum parameters",
133
+ final_size=output_size,
134
+ over_limit_by=output_size - max_output_bytes,
135
+ )
136
+ return buf.getvalue()
137
+
138
+
139
+ @mcp.tool(description="Read a single file's content by path or permalink")
140
+ async def read_resource(path: str) -> dict:
141
+ """Get a file's raw content."""
142
+ logger.info("Reading resource", path=path)
143
+
144
+ url = memory_url_path(path)
145
+ response = await call_get(client, f"/resource/{url}")
146
+ content_type = response.headers.get("content-type", "application/octet-stream")
147
+ content_length = int(response.headers.get("content-length", 0))
148
+
149
+ logger.debug("Resource metadata", content_type=content_type, size=content_length, path=path)
150
+
151
+ # Handle text or json
152
+ if content_type.startswith("text/") or content_type == "application/json":
153
+ logger.debug("Processing text resource")
154
+ return {
155
+ "type": "text",
156
+ "text": response.text,
157
+ "content_type": content_type,
158
+ "encoding": "utf-8",
159
+ }
160
+
161
+ # Handle images
162
+ elif content_type.startswith("image/"):
163
+ logger.debug("Processing image")
164
+ img = PILImage.open(io.BytesIO(response.content))
165
+ img_bytes = optimize_image(img, content_length)
166
+
167
+ return {
168
+ "type": "image",
169
+ "source": {
170
+ "type": "base64",
171
+ "media_type": "image/jpeg",
172
+ "data": base64.b64encode(img_bytes).decode("utf-8"),
173
+ },
174
+ }
175
+
176
+ # Handle other file types
177
+ else:
178
+ logger.debug(f"Processing binary resource content_type {content_type}")
179
+ if content_length > 350000:
180
+ logger.warning("Document too large for response", size=content_length)
181
+ return {
182
+ "type": "error",
183
+ "error": f"Document size {content_length} bytes exceeds maximum allowed size",
184
+ }
185
+ return {
186
+ "type": "document",
187
+ "source": {
188
+ "type": "base64",
189
+ "media_type": content_type,
190
+ "data": base64.b64encode(response.content).decode("utf-8"),
191
+ },
192
+ }
@@ -44,7 +44,7 @@ async def call_get(
44
44
  response.raise_for_status()
45
45
  return response
46
46
  except HTTPStatusError as e:
47
- logger.error(f"Error calling GET {url}: {e}")
47
+ logger.exception(f"Error calling GET {url}: {e}")
48
48
  raise ToolError(f"Error calling tool: {e}.") from e
49
49
 
50
50
 
@@ -79,6 +79,7 @@ async def call_put(
79
79
  timeout=timeout,
80
80
  extensions=extensions,
81
81
  )
82
+ logger.debug(response)
82
83
  response.raise_for_status()
83
84
  return response
84
85
  except HTTPStatusError as e:
@@ -12,6 +12,7 @@ from sqlalchemy import (
12
12
  DateTime,
13
13
  Index,
14
14
  JSON,
15
+ text,
15
16
  )
16
17
  from sqlalchemy.orm import Mapped, mapped_column, relationship
17
18
 
@@ -32,11 +33,18 @@ class Entity(Base):
32
33
 
33
34
  __tablename__ = "entity"
34
35
  __table_args__ = (
35
- UniqueConstraint("permalink", name="uix_entity_permalink"), # Make permalink unique
36
+ # Regular indexes
36
37
  Index("ix_entity_type", "entity_type"),
37
38
  Index("ix_entity_title", "title"),
38
39
  Index("ix_entity_created_at", "created_at"), # For timeline queries
39
40
  Index("ix_entity_updated_at", "updated_at"), # For timeline queries
41
+ # Unique index only for markdown files with non-null permalinks
42
+ Index(
43
+ "uix_entity_permalink",
44
+ "permalink",
45
+ unique=True,
46
+ sqlite_where=text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
47
+ ),
40
48
  )
41
49
 
42
50
  # Core identity
@@ -46,8 +54,8 @@ class Entity(Base):
46
54
  entity_metadata: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True)
47
55
  content_type: Mapped[str] = mapped_column(String)
48
56
 
49
- # Normalized path for URIs
50
- permalink: Mapped[str] = mapped_column(String, unique=True, index=True)
57
+ # Normalized path for URIs - required for markdown files only
58
+ permalink: Mapped[Optional[str]] = mapped_column(String, nullable=True, index=True)
51
59
  # Actual filesystem relative path
52
60
  file_path: Mapped[str] = mapped_column(String, unique=True, index=True)
53
61
  # checksum of file
@@ -79,6 +87,11 @@ class Entity(Base):
79
87
  """Get all relations (incoming and outgoing) for this entity."""
80
88
  return self.incoming_relations + self.outgoing_relations
81
89
 
90
+ @property
91
+ def is_markdown(self):
92
+ """Check if the entity is a markdown file."""
93
+ return self.content_type == "text/markdown"
94
+
82
95
  def __repr__(self) -> str:
83
96
  return f"Entity(id={self.id}, name='{self.title}', type='{self.entity_type}'"
84
97
 
@@ -127,7 +140,10 @@ class Relation(Base):
127
140
 
128
141
  __tablename__ = "relation"
129
142
  __table_args__ = (
130
- UniqueConstraint("from_id", "to_id", "relation_type", name="uix_relation"),
143
+ UniqueConstraint("from_id", "to_id", "relation_type", name="uix_relation_from_id_to_id"),
144
+ UniqueConstraint(
145
+ "from_id", "to_name", "relation_type", name="uix_relation_from_id_to_name"
146
+ ),
131
147
  Index("ix_relation_type", "relation_type"),
132
148
  Index("ix_relation_from_id", "from_id"), # Add FK indexes
133
149
  Index("ix_relation_to_id", "to_id"),
@@ -155,13 +171,13 @@ class Relation(Base):
155
171
  Format: source/relation_type/target
156
172
  Example: "specs/search/implements/features/search-ui"
157
173
  """
174
+ # Only create permalinks when both source and target have permalinks
175
+ from_permalink = self.from_entity.permalink or self.from_entity.file_path
176
+
158
177
  if self.to_entity:
159
- return generate_permalink(
160
- f"{self.from_entity.permalink}/{self.relation_type}/{self.to_entity.permalink}"
161
- )
162
- return generate_permalink(
163
- f"{self.from_entity.permalink}/{self.relation_type}/{self.to_name}"
164
- )
178
+ to_permalink = self.to_entity.permalink or self.to_entity.file_path
179
+ return generate_permalink(f"{from_permalink}/{self.relation_type}/{to_permalink}")
180
+ return generate_permalink(f"{from_permalink}/{self.relation_type}/{self.to_name}")
165
181
 
166
182
  def __repr__(self) -> str:
167
- return f"Relation(id={self.id}, from_id={self.from_id}, to_id={self.to_id}, to_name={self.to_name}, type='{self.relation_type}')"
183
+ return f"Relation(id={self.id}, from_id={self.from_id}, to_id={self.to_id}, to_name={self.to_name}, type='{self.relation_type}')" # pragma: no cover
@@ -97,7 +97,7 @@ class Repository[T: Base]:
97
97
  entities = (self.Model,)
98
98
  return select(*entities)
99
99
 
100
- async def find_all(self, skip: int = 0, limit: Optional[int] = 0) -> Sequence[T]:
100
+ async def find_all(self, skip: int = 0, limit: Optional[int] = None) -> Sequence[T]:
101
101
  """Fetch records from the database with pagination."""
102
102
  logger.debug(f"Finding all {self.Model.__name__} (skip={skip}, limit={limit})")
103
103
 
@@ -21,13 +21,14 @@ class SearchIndexRow:
21
21
 
22
22
  id: int
23
23
  type: str
24
- permalink: str
25
24
  file_path: str
26
- metadata: Optional[dict] = None
27
25
 
28
26
  # date values
29
- created_at: Optional[datetime] = None
30
- updated_at: Optional[datetime] = None
27
+ created_at: datetime
28
+ updated_at: datetime
29
+
30
+ permalink: Optional[str] = None
31
+ metadata: Optional[dict] = None
31
32
 
32
33
  # assigned in result
33
34
  score: Optional[float] = None
@@ -265,6 +266,15 @@ class SearchRepository:
265
266
  logger.debug(f"indexed row {search_index_row}")
266
267
  await session.commit()
267
268
 
269
+ async def delete_by_entity_id(self, entity_id: int):
270
+ """Delete an item from the search index by entity_id."""
271
+ async with db.scoped_session(self.session_maker) as session:
272
+ await session.execute(
273
+ text("DELETE FROM search_index WHERE entity_id = :entity_id"),
274
+ {"entity_id": entity_id},
275
+ )
276
+ await session.commit()
277
+
268
278
  async def delete_by_permalink(self, permalink: str):
269
279
  """Delete an item from the search index."""
270
280
  async with db.scoped_session(self.session_maker) as session:
@@ -37,13 +37,6 @@ from basic_memory.schemas.response import (
37
37
  DeleteEntitiesResponse,
38
38
  )
39
39
 
40
- # Discovery and analytics models
41
- from basic_memory.schemas.discovery import (
42
- EntityTypeList,
43
- ObservationCategoryList,
44
- TypedEntityList,
45
- )
46
-
47
40
  # For convenient imports, export all models
48
41
  __all__ = [
49
42
  # Base
@@ -66,8 +59,4 @@ __all__ = [
66
59
  "DeleteEntitiesResponse",
67
60
  # Delete Operations
68
61
  "DeleteEntitiesRequest",
69
- # Discovery and Analytics
70
- "EntityTypeList",
71
- "ObservationCategoryList",
72
- "TypedEntityList",
73
62
  ]
@@ -159,7 +159,10 @@ class Entity(BaseModel):
159
159
  @property
160
160
  def file_path(self):
161
161
  """Get the file path for this entity based on its permalink."""
162
- return f"{self.folder}/{self.title}.md" if self.folder else f"{self.title}.md"
162
+ if self.content_type == "text/markdown":
163
+ return f"{self.folder}/{self.title}.md" if self.folder else f"{self.title}.md"
164
+ else:
165
+ return f"{self.folder}/{self.title}" if self.folder else self.title
163
166
 
164
167
  @property
165
168
  def permalink(self) -> Permalink: