basic-memory 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (58) hide show
  1. basic_memory/__init__.py +1 -1
  2. basic_memory/alembic/alembic.ini +119 -0
  3. basic_memory/alembic/env.py +23 -1
  4. basic_memory/alembic/versions/502b60eaa905_remove_required_from_entity_permalink.py +51 -0
  5. basic_memory/alembic/versions/b3c3938bacdb_relation_to_name_unique_index.py +44 -0
  6. basic_memory/api/app.py +0 -4
  7. basic_memory/api/routers/knowledge_router.py +1 -1
  8. basic_memory/api/routers/memory_router.py +16 -16
  9. basic_memory/api/routers/resource_router.py +105 -4
  10. basic_memory/cli/app.py +0 -2
  11. basic_memory/cli/commands/status.py +9 -21
  12. basic_memory/cli/commands/sync.py +12 -16
  13. basic_memory/cli/commands/tools.py +36 -13
  14. basic_memory/cli/main.py +0 -1
  15. basic_memory/config.py +15 -1
  16. basic_memory/file_utils.py +6 -4
  17. basic_memory/markdown/entity_parser.py +3 -3
  18. basic_memory/mcp/async_client.py +1 -1
  19. basic_memory/mcp/main.py +25 -0
  20. basic_memory/mcp/prompts/__init__.py +15 -0
  21. basic_memory/mcp/prompts/ai_assistant_guide.py +28 -0
  22. basic_memory/mcp/prompts/continue_conversation.py +172 -0
  23. basic_memory/mcp/prompts/json_canvas_spec.py +25 -0
  24. basic_memory/mcp/prompts/recent_activity.py +46 -0
  25. basic_memory/mcp/prompts/search.py +127 -0
  26. basic_memory/mcp/prompts/utils.py +98 -0
  27. basic_memory/mcp/server.py +3 -7
  28. basic_memory/mcp/tools/__init__.py +6 -4
  29. basic_memory/mcp/tools/canvas.py +99 -0
  30. basic_memory/mcp/tools/memory.py +12 -5
  31. basic_memory/mcp/tools/notes.py +1 -2
  32. basic_memory/mcp/tools/resource.py +192 -0
  33. basic_memory/mcp/tools/utils.py +2 -1
  34. basic_memory/models/knowledge.py +27 -11
  35. basic_memory/repository/repository.py +1 -1
  36. basic_memory/repository/search_repository.py +14 -4
  37. basic_memory/schemas/__init__.py +0 -11
  38. basic_memory/schemas/base.py +4 -1
  39. basic_memory/schemas/memory.py +11 -2
  40. basic_memory/schemas/search.py +2 -1
  41. basic_memory/services/entity_service.py +19 -12
  42. basic_memory/services/file_service.py +69 -2
  43. basic_memory/services/link_resolver.py +12 -9
  44. basic_memory/services/search_service.py +56 -12
  45. basic_memory/sync/__init__.py +3 -2
  46. basic_memory/sync/sync_service.py +294 -123
  47. basic_memory/sync/watch_service.py +125 -129
  48. basic_memory/utils.py +24 -9
  49. {basic_memory-0.7.0.dist-info → basic_memory-0.8.0.dist-info}/METADATA +2 -1
  50. basic_memory-0.8.0.dist-info/RECORD +91 -0
  51. basic_memory/alembic/README +0 -1
  52. basic_memory/schemas/discovery.py +0 -28
  53. basic_memory/sync/file_change_scanner.py +0 -158
  54. basic_memory/sync/utils.py +0 -31
  55. basic_memory-0.7.0.dist-info/RECORD +0 -82
  56. {basic_memory-0.7.0.dist-info → basic_memory-0.8.0.dist-info}/WHEEL +0 -0
  57. {basic_memory-0.7.0.dist-info → basic_memory-0.8.0.dist-info}/entry_points.txt +0 -0
  58. {basic_memory-0.7.0.dist-info → basic_memory-0.8.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,9 +1,10 @@
1
- """Database management commands."""
1
+ """CLI tool commands for Basic Memory."""
2
2
 
3
3
  import asyncio
4
4
  from typing import Optional, List, Annotated
5
5
 
6
6
  import typer
7
+ from loguru import logger
7
8
  from rich import print as rprint
8
9
 
9
10
  from basic_memory.cli.app import app
@@ -13,9 +14,15 @@ from basic_memory.mcp.tools import read_note as mcp_read_note
13
14
  from basic_memory.mcp.tools import recent_activity as mcp_recent_activity
14
15
  from basic_memory.mcp.tools import search as mcp_search
15
16
  from basic_memory.mcp.tools import write_note as mcp_write_note
17
+
18
+ # Import prompts
19
+ from basic_memory.mcp.prompts.continue_conversation import (
20
+ continue_conversation as mcp_continue_conversation,
21
+ )
22
+
16
23
  from basic_memory.schemas.base import TimeFrame
17
24
  from basic_memory.schemas.memory import MemoryUrl
18
- from basic_memory.schemas.search import SearchQuery
25
+ from basic_memory.schemas.search import SearchQuery, SearchItemType
19
26
 
20
27
  tool_app = typer.Typer()
21
28
  app.add_typer(tool_app, name="tools", help="cli versions mcp tools")
@@ -72,7 +79,7 @@ def build_context(
72
79
  max_related=max_related,
73
80
  )
74
81
  )
75
- rprint(context.model_dump())
82
+ rprint(context.model_dump_json(indent=2))
76
83
  except Exception as e: # pragma: no cover
77
84
  if not isinstance(e, typer.Exit):
78
85
  typer.echo(f"Error during build_context: {e}", err=True)
@@ -82,18 +89,13 @@ def build_context(
82
89
 
83
90
  @tool_app.command()
84
91
  def recent_activity(
85
- type: Annotated[Optional[List[str]], typer.Option()] = ["entity", "observation", "relation"],
92
+ type: Annotated[Optional[List[SearchItemType]], typer.Option()] = None,
86
93
  depth: Optional[int] = 1,
87
94
  timeframe: Optional[TimeFrame] = "7d",
88
95
  page: int = 1,
89
96
  page_size: int = 10,
90
97
  max_related: int = 10,
91
98
  ):
92
- assert type is not None, "type is required"
93
- if any(t not in ["entity", "observation", "relation"] for t in type): # pragma: no cover
94
- print("type must be one of ['entity', 'observation', 'relation']")
95
- raise typer.Abort()
96
-
97
99
  try:
98
100
  context = asyncio.run(
99
101
  mcp_recent_activity(
@@ -105,7 +107,7 @@ def recent_activity(
105
107
  max_related=max_related,
106
108
  )
107
109
  )
108
- rprint(context.model_dump())
110
+ rprint(context.model_dump_json(indent=2))
109
111
  except Exception as e: # pragma: no cover
110
112
  if not isinstance(e, typer.Exit):
111
113
  typer.echo(f"Error during build_context: {e}", err=True)
@@ -132,14 +134,15 @@ def search(
132
134
  try:
133
135
  search_query = SearchQuery(
134
136
  permalink_match=query if permalink else None,
135
- text=query if query else None,
137
+ text=query if not (permalink or title) else None,
136
138
  title=query if title else None,
137
139
  after_date=after_date,
138
140
  )
139
141
  results = asyncio.run(mcp_search(query=search_query, page=page, page_size=page_size))
140
- rprint(results.model_dump())
142
+ rprint(results.model_dump_json(indent=2))
141
143
  except Exception as e: # pragma: no cover
142
144
  if not isinstance(e, typer.Exit):
145
+ logger.exception("Error during search", e)
143
146
  typer.echo(f"Error during search: {e}", err=True)
144
147
  raise typer.Exit(1)
145
148
  raise
@@ -149,9 +152,29 @@ def search(
149
152
  def get_entity(identifier: str):
150
153
  try:
151
154
  entity = asyncio.run(mcp_get_entity(identifier=identifier))
152
- rprint(entity.model_dump())
155
+ rprint(entity.model_dump_json(indent=2))
153
156
  except Exception as e: # pragma: no cover
154
157
  if not isinstance(e, typer.Exit):
155
158
  typer.echo(f"Error during get_entity: {e}", err=True)
156
159
  raise typer.Exit(1)
157
160
  raise
161
+
162
+
163
+ @tool_app.command(name="continue-conversation")
164
+ def continue_conversation(
165
+ topic: Annotated[Optional[str], typer.Option(help="Topic or keyword to search for")] = None,
166
+ timeframe: Annotated[
167
+ Optional[str], typer.Option(help="How far back to look for activity")
168
+ ] = None,
169
+ ):
170
+ """Continue a previous conversation or work session."""
171
+ try:
172
+ # Prompt functions return formatted strings directly
173
+ session = asyncio.run(mcp_continue_conversation(topic=topic, timeframe=timeframe))
174
+ rprint(session)
175
+ except Exception as e: # pragma: no cover
176
+ if not isinstance(e, typer.Exit):
177
+ logger.exception("Error continuing conversation", e)
178
+ typer.echo(f"Error continuing conversation: {e}", err=True)
179
+ raise typer.Exit(1)
180
+ raise
basic_memory/cli/main.py CHANGED
@@ -15,6 +15,5 @@ from basic_memory.cli.commands import ( # noqa: F401 # pragma: no cover
15
15
  tools,
16
16
  )
17
17
 
18
-
19
18
  if __name__ == "__main__": # pragma: no cover
20
19
  app()
basic_memory/config.py CHANGED
@@ -3,9 +3,13 @@
3
3
  from pathlib import Path
4
4
  from typing import Literal
5
5
 
6
+ from loguru import logger
6
7
  from pydantic import Field, field_validator
7
8
  from pydantic_settings import BaseSettings, SettingsConfigDict
8
9
 
10
+ import basic_memory
11
+ from basic_memory.utils import setup_logging
12
+
9
13
  DATABASE_NAME = "memory.db"
10
14
  DATA_DIR_NAME = ".basic-memory"
11
15
 
@@ -31,7 +35,7 @@ class ProjectConfig(BaseSettings):
31
35
  default=500, description="Milliseconds to wait after changes before syncing", gt=0
32
36
  )
33
37
 
34
- log_level: str = "INFO"
38
+ log_level: str = "DEBUG"
35
39
 
36
40
  model_config = SettingsConfigDict(
37
41
  env_prefix="BASIC_MEMORY_",
@@ -60,3 +64,13 @@ class ProjectConfig(BaseSettings):
60
64
 
61
65
  # Load project config
62
66
  config = ProjectConfig()
67
+
68
+ # setup logging
69
+ setup_logging(
70
+ env=config.env,
71
+ home_dir=config.home,
72
+ log_level=config.log_level,
73
+ log_file=".basic-memory/basic-memory.log",
74
+ console=False,
75
+ )
76
+ logger.info(f"Starting Basic Memory {basic_memory.__version__}")
@@ -2,7 +2,7 @@
2
2
 
3
3
  import hashlib
4
4
  from pathlib import Path
5
- from typing import Dict, Any
5
+ from typing import Dict, Any, Union
6
6
 
7
7
  import yaml
8
8
  from loguru import logger
@@ -26,12 +26,12 @@ class ParseError(FileError):
26
26
  pass
27
27
 
28
28
 
29
- async def compute_checksum(content: str) -> str:
29
+ async def compute_checksum(content: Union[str, bytes]) -> str:
30
30
  """
31
31
  Compute SHA-256 checksum of content.
32
32
 
33
33
  Args:
34
- content: Text content to hash
34
+ content: Content to hash (either text string or bytes)
35
35
 
36
36
  Returns:
37
37
  SHA-256 hex digest
@@ -40,7 +40,9 @@ async def compute_checksum(content: str) -> str:
40
40
  FileError: If checksum computation fails
41
41
  """
42
42
  try:
43
- return hashlib.sha256(content.encode()).hexdigest()
43
+ if isinstance(content, str):
44
+ content = content.encode()
45
+ return hashlib.sha256(content).hexdigest()
44
46
  except Exception as e: # pragma: no cover
45
47
  logger.error(f"Failed to compute checksum: {e}")
46
48
  raise FileError(f"Failed to compute checksum: {e}")
@@ -88,10 +88,10 @@ class EntityParser:
88
88
  return parsed
89
89
  return None
90
90
 
91
- async def parse_file(self, file_path: Path) -> EntityMarkdown:
91
+ async def parse_file(self, path: Path | str) -> EntityMarkdown:
92
92
  """Parse markdown file into EntityMarkdown."""
93
93
 
94
- absolute_path = self.base_path / file_path
94
+ absolute_path = self.base_path / path
95
95
  # Parse frontmatter and content using python-frontmatter
96
96
  post = frontmatter.load(str(absolute_path))
97
97
 
@@ -99,7 +99,7 @@ class EntityParser:
99
99
  file_stats = absolute_path.stat()
100
100
 
101
101
  metadata = post.metadata
102
- metadata["title"] = post.metadata.get("title", file_path.name)
102
+ metadata["title"] = post.metadata.get("title", absolute_path.name)
103
103
  metadata["type"] = post.metadata.get("type", "note")
104
104
  metadata["tags"] = parse_tags(post.metadata.get("tags", []))
105
105
 
@@ -2,7 +2,7 @@ from httpx import ASGITransport, AsyncClient
2
2
 
3
3
  from basic_memory.api.app import app as fastapi_app
4
4
 
5
- BASE_URL = "memory://"
5
+ BASE_URL = "http://test"
6
6
 
7
7
  # Create shared async client
8
8
  client = AsyncClient(transport=ASGITransport(app=fastapi_app), base_url=BASE_URL)
@@ -0,0 +1,25 @@
1
+ """Main MCP entrypoint for Basic Memory.
2
+
3
+ Creates and configures the shared MCP instance and handles server startup.
4
+ """
5
+
6
+ from loguru import logger # pragma: no cover
7
+
8
+ from basic_memory.config import config # pragma: no cover
9
+
10
+ # Import shared mcp instance
11
+ from basic_memory.mcp.server import mcp # pragma: no cover
12
+
13
+ # Import tools to register them
14
+ import basic_memory.mcp.tools # noqa: F401 # pragma: no cover
15
+
16
+ # Import prompts to register them
17
+ import basic_memory.mcp.prompts # noqa: F401 # pragma: no cover
18
+
19
+
20
+
21
+ if __name__ == "__main__": # pragma: no cover
22
+ home_dir = config.home
23
+ logger.info("Starting Basic Memory MCP server")
24
+ logger.info(f"Home directory: {home_dir}")
25
+ mcp.run()
@@ -0,0 +1,15 @@
1
+ """Basic Memory MCP prompts.
2
+
3
+ Prompts are a special type of tool that returns a string response
4
+ formatted for a user to read, typically invoking one or more tools
5
+ and transforming their results into user-friendly text.
6
+ """
7
+
8
+ # Import individual prompt modules to register them with the MCP server
9
+ from basic_memory.mcp.prompts import continue_conversation
10
+ from basic_memory.mcp.prompts import recent_activity
11
+ from basic_memory.mcp.prompts import search
12
+ from basic_memory.mcp.prompts import ai_assistant_guide
13
+ from basic_memory.mcp.prompts import json_canvas_spec
14
+
15
+ __all__ = ["ai_assistant_guide", "continue_conversation", "json_canvas_spec", "recent_activity", "search"]
@@ -0,0 +1,28 @@
1
+ from pathlib import Path
2
+
3
+ import logfire
4
+ from loguru import logger
5
+
6
+ from basic_memory.mcp.server import mcp
7
+
8
+
9
+ @mcp.resource(
10
+ uri="memory://ai_assistant_guide",
11
+ name="ai_assistant_guide",
12
+ description="Give an AI assistant guidance on how to use Basic Memory tools effectively",
13
+ )
14
+ def ai_assistant_guide() -> str:
15
+ """Return a concise guide on Basic Memory tools and how to use them.
16
+
17
+ Args:
18
+ focus: Optional area to focus on ("writing", "context", "search", etc.)
19
+
20
+ Returns:
21
+ A focused guide on Basic Memory usage.
22
+ """
23
+ with logfire.span("Getting Basic Memory guide"): # pyright: ignore
24
+ logger.info("Loading AI assistant guide resource")
25
+ guide_doc = Path(__file__).parent.parent.parent.parent.parent / "data/ai_assistant_guide.md"
26
+ content = guide_doc.read_text()
27
+ logger.info(f"Loaded AI assistant guide ({len(content)} chars)")
28
+ return content
@@ -0,0 +1,172 @@
1
+ """Session continuation prompts for Basic Memory MCP server.
2
+
3
+ These prompts help users continue conversations and work across sessions,
4
+ providing context from previous interactions to maintain continuity.
5
+ """
6
+
7
+ from textwrap import dedent
8
+ from typing import Optional, List, Annotated
9
+
10
+ from loguru import logger
11
+ import logfire
12
+ from pydantic import Field
13
+
14
+ from basic_memory.mcp.server import mcp
15
+ from basic_memory.mcp.tools.memory import build_context, recent_activity
16
+ from basic_memory.mcp.tools.search import search
17
+ from basic_memory.schemas.base import TimeFrame
18
+ from basic_memory.schemas.memory import GraphContext
19
+ from basic_memory.schemas.search import SearchQuery
20
+
21
+
22
+ @mcp.prompt(
23
+ name="continue_conversation",
24
+ description="Continue a previous conversation",
25
+ )
26
+ async def continue_conversation(
27
+ topic: Annotated[Optional[str], Field(description="Topic or keyword to search for")] = None,
28
+ timeframe: Annotated[
29
+ Optional[TimeFrame],
30
+ Field(description="How far back to look for activity (e.g. '1d', '1 week')"),
31
+ ] = None,
32
+ ) -> str:
33
+ """Continue a previous conversation or work session.
34
+
35
+ This prompt helps you pick up where you left off by finding recent context
36
+ about a specific topic or showing general recent activity.
37
+
38
+ Args:
39
+ topic: Topic or keyword to search for (optional)
40
+ timeframe: How far back to look for activity
41
+
42
+ Returns:
43
+ Context from previous sessions on this topic
44
+ """
45
+ with logfire.span("Continuing session", topic=topic, timeframe=timeframe): # pyright: ignore
46
+ logger.info(f"Continuing session, topic: {topic}, timeframe: {timeframe}")
47
+
48
+ # If topic provided, search for it
49
+ if topic:
50
+ search_results = await search(SearchQuery(text=topic, after_date=timeframe))
51
+
52
+ # Build context from top results
53
+ contexts = []
54
+ for result in search_results.results[:3]:
55
+ if hasattr(result, "permalink") and result.permalink:
56
+ context = await build_context(f"memory://{result.permalink}")
57
+ contexts.append(context)
58
+
59
+ return format_continuation_context(topic, contexts, timeframe)
60
+
61
+ # If no topic, get recent activity
62
+ recent = await recent_activity(timeframe=timeframe)
63
+ return format_continuation_context("Recent Activity", [recent], timeframe)
64
+
65
+
66
+ def format_continuation_context(
67
+ topic: str, contexts: List[GraphContext], timeframe: TimeFrame | None
68
+ ) -> str:
69
+ """Format continuation context into a helpful summary.
70
+
71
+ Args:
72
+ topic: The topic or focus of continuation
73
+ contexts: List of context graphs
74
+ timeframe: How far back to look for activity
75
+
76
+ Returns:
77
+ Formatted continuation summary
78
+ """
79
+ if not contexts or all(not context.primary_results for context in contexts):
80
+ return dedent(f"""
81
+ # Continuing conversation on: {topic}
82
+
83
+ This is a memory retrieval session.
84
+ Please use the available basic-memory tools to gather relevant context before responding.
85
+ Start by executing one of the suggested commands below to retrieve content.
86
+
87
+ I couldn't find any recent work specifically on this topic.
88
+
89
+ ## Suggestions
90
+ - Try a different search term
91
+ - Check recent activity with `recent_activity(timeframe="1w")`
92
+ - Start a new topic with `write_note(...)`
93
+ """)
94
+
95
+ # Start building our summary with header
96
+ summary = dedent(f"""
97
+ # Continuing conversation on: {topic}
98
+
99
+ This is a memory retrieval session.
100
+ Please use the available basic-memory tools to gather relevant context before responding.
101
+ Start by executing one of the suggested commands below to retrieve content.
102
+
103
+ Here's what I found about the previous conversation:
104
+ """)
105
+
106
+ # Track what we've added to avoid duplicates
107
+ added_permalinks = set()
108
+ sections = []
109
+
110
+ # Process each context
111
+ for context in contexts:
112
+ # Add primary results
113
+ for primary in context.primary_results:
114
+ if hasattr(primary, "permalink") and primary.permalink not in added_permalinks:
115
+ added_permalinks.add(primary.permalink)
116
+
117
+ section = dedent(f"""
118
+ ## {primary.title}
119
+ - **Type**: {primary.type}
120
+ """)
121
+
122
+ # Add creation date if available
123
+ if hasattr(primary, "created_at"):
124
+ section += f"- **Created**: {primary.created_at.strftime('%Y-%m-%d %H:%M')}\n"
125
+
126
+ section += dedent(f"""
127
+
128
+ You can read this document with: `read_note("{primary.permalink}")`
129
+ """)
130
+
131
+ # Add related documents if available
132
+ related_by_type = {}
133
+ if context.related_results:
134
+ for related in context.related_results:
135
+ if hasattr(related, "relation_type") and related.relation_type: # pyright: ignore
136
+ if related.relation_type not in related_by_type: # pyright: ignore
137
+ related_by_type[related.relation_type] = [] # pyright: ignore
138
+ related_by_type[related.relation_type].append(related) # pyright: ignore
139
+
140
+ if related_by_type:
141
+ section += dedent("""
142
+ ### Related Documents
143
+ """)
144
+ for rel_type, relations in related_by_type.items():
145
+ display_type = rel_type.replace("_", " ").title()
146
+ section += f"- **{display_type}**:\n"
147
+ for rel in relations[:3]: # Limit to avoid overwhelming
148
+ if hasattr(rel, "to_id") and rel.to_id:
149
+ section += f" - `{rel.to_id}`\n"
150
+
151
+ sections.append(section)
152
+
153
+ # Add all sections
154
+ summary += "\n".join(sections)
155
+
156
+ # Add next steps
157
+ next_steps = dedent(f"""
158
+ ## Next Steps
159
+
160
+ You can:
161
+ - Explore more with: `search({{"text": "{topic}"}})`
162
+ - See what's changed: `recent_activity(timeframe="{timeframe}")`
163
+ """)
164
+
165
+ # Add specific exploration based on what we found
166
+ if added_permalinks:
167
+ first_permalink = next(iter(added_permalinks))
168
+ next_steps += dedent(f"""
169
+ - Continue the conversation: `build_context("memory://{first_permalink}")`
170
+ """)
171
+
172
+ return summary + next_steps
@@ -0,0 +1,25 @@
1
+ from pathlib import Path
2
+
3
+ import logfire
4
+ from loguru import logger
5
+
6
+ from basic_memory.mcp.server import mcp
7
+
8
+
9
+ @mcp.resource(
10
+ uri="memory://json_canvas_spec",
11
+ name="json_canvas_spec",
12
+ description="JSON Canvas specification for visualizing knowledge graphs in Obsidian"
13
+ )
14
+ def json_canvas_spec() -> str:
15
+ """Return the JSON Canvas specification for Obsidian visualizations.
16
+
17
+ Returns:
18
+ The JSON Canvas specification document.
19
+ """
20
+ with logfire.span("Getting JSON Canvas spec"): # pyright: ignore
21
+ logger.info("Loading JSON Canvas spec resource")
22
+ canvas_spec = Path(__file__).parent.parent.parent.parent.parent / "data/json_canvas_spec_1_0.md"
23
+ content = canvas_spec.read_text()
24
+ logger.info(f"Loaded JSON Canvas spec ({len(content)} chars)")
25
+ return content
@@ -0,0 +1,46 @@
1
+ """Recent activity prompts for Basic Memory MCP server.
2
+
3
+ These prompts help users see what has changed in their knowledge base recently.
4
+ """
5
+
6
+ from typing import Annotated, Optional
7
+
8
+ from loguru import logger
9
+ import logfire
10
+ from pydantic import Field
11
+
12
+ from basic_memory.mcp.prompts.utils import format_context_summary
13
+ from basic_memory.mcp.server import mcp
14
+ from basic_memory.mcp.tools.memory import recent_activity as recent_activity_tool
15
+ from basic_memory.schemas.base import TimeFrame
16
+
17
+
18
+ @mcp.prompt(
19
+ name="recent_activity",
20
+ description="Get recent activity from across the knowledge base",
21
+ )
22
+ async def recent_activity_prompt(
23
+ timeframe: Annotated[
24
+ Optional[TimeFrame],
25
+ Field(description="How far back to look for activity (e.g. '1d', '1 week')"),
26
+ ] = None,
27
+ ) -> str:
28
+ """Get recent activity from across the knowledge base.
29
+
30
+ This prompt helps you see what's changed recently in the knowledge base,
31
+ showing new or updated documents and related information.
32
+
33
+ Args:
34
+ timeframe: How far back to look for activity (e.g. '1d', '1 week')
35
+
36
+ Returns:
37
+ Formatted summary of recent activity
38
+ """
39
+ with logfire.span("Getting recent activity", timeframe=timeframe): # pyright: ignore
40
+ logger.info(f"Getting recent activity, timeframe: {timeframe}")
41
+
42
+ results = await recent_activity_tool(timeframe=timeframe)
43
+
44
+ time_display = f" ({timeframe})" if timeframe else ""
45
+ header = f"# Recent Activity{time_display}"
46
+ return format_context_summary(header, results)
@@ -0,0 +1,127 @@
1
+ """Search prompts for Basic Memory MCP server.
2
+
3
+ These prompts help users search and explore their knowledge base.
4
+ """
5
+
6
+ from textwrap import dedent
7
+ from typing import Annotated, Optional
8
+
9
+ from loguru import logger
10
+ import logfire
11
+ from pydantic import Field
12
+
13
+ from basic_memory.mcp.server import mcp
14
+ from basic_memory.mcp.tools.search import search as search_tool
15
+ from basic_memory.schemas.search import SearchQuery, SearchResponse
16
+ from basic_memory.schemas.base import TimeFrame
17
+
18
+
19
+ @mcp.prompt(
20
+ name="search",
21
+ description="Search across all content in basic-memory",
22
+ )
23
+ async def search_prompt(
24
+ query: str,
25
+ timeframe: Annotated[
26
+ Optional[TimeFrame],
27
+ Field(description="How far back to search (e.g. '1d', '1 week')"),
28
+ ] = None,
29
+ ) -> str:
30
+ """Search across all content in basic-memory.
31
+
32
+ This prompt helps search for content in the knowledge base and
33
+ provides helpful context about the results.
34
+
35
+ Args:
36
+ query: The search text to look for
37
+ timeframe: Optional timeframe to limit results (e.g. '1d', '1 week')
38
+
39
+ Returns:
40
+ Formatted search results with context
41
+ """
42
+ with logfire.span("Searching knowledge base", query=query, timeframe=timeframe): # pyright: ignore
43
+ logger.info(f"Searching knowledge base, query: {query}, timeframe: {timeframe}")
44
+
45
+ search_results = await search_tool(SearchQuery(text=query, after_date=timeframe))
46
+ return format_search_results(query, search_results, timeframe)
47
+
48
+
49
+ def format_search_results(
50
+ query: str, results: SearchResponse, timeframe: Optional[TimeFrame] = None
51
+ ) -> str:
52
+ """Format search results into a helpful summary.
53
+
54
+ Args:
55
+ query: The search query
56
+ results: Search results object
57
+ timeframe: How far back results were searched
58
+
59
+ Returns:
60
+ Formatted search results summary
61
+ """
62
+ if not results.results:
63
+ return dedent(f"""
64
+ # Search Results for: "{query}"
65
+
66
+ I couldn't find any results for this query.
67
+
68
+ ## Suggestions
69
+ - Try a different search term
70
+ - Broaden your search criteria
71
+ - Check recent activity with `recent_activity(timeframe="1w")`
72
+ - Create new content with `write_note(...)`
73
+ """)
74
+
75
+ # Start building our summary with header
76
+ time_info = f" (after {timeframe})" if timeframe else ""
77
+ summary = dedent(f"""
78
+ # Search Results for: "{query}"{time_info}
79
+
80
+ This is a memory search session.
81
+ Please use the available basic-memory tools to gather relevant context before responding.
82
+ I found {len(results.results)} results that match your query.
83
+
84
+ Here are the most relevant results:
85
+ """)
86
+
87
+ # Add each search result
88
+ for i, result in enumerate(results.results[:5]): # Limit to top 5 results
89
+ summary += dedent(f"""
90
+ ## {i + 1}. {result.title}
91
+ - **Type**: {result.type}
92
+ """)
93
+
94
+ # Add creation date if available in metadata
95
+ if hasattr(result, "metadata") and result.metadata and "created_at" in result.metadata:
96
+ created_at = result.metadata["created_at"]
97
+ if hasattr(created_at, "strftime"):
98
+ summary += f"- **Created**: {created_at.strftime('%Y-%m-%d %H:%M')}\n"
99
+ elif isinstance(created_at, str):
100
+ summary += f"- **Created**: {created_at}\n"
101
+
102
+ # Add score and excerpt
103
+ summary += f"- **Relevance Score**: {result.score:.2f}\n"
104
+ # Add excerpt if available in metadata
105
+ if hasattr(result, "metadata") and result.metadata and "excerpt" in result.metadata:
106
+ summary += f"- **Excerpt**: {result.metadata['excerpt']}\n"
107
+
108
+ # Add permalink for retrieving content
109
+ if hasattr(result, "permalink") and result.permalink:
110
+ summary += dedent(f"""
111
+
112
+ You can view this content with: `read_note("{result.permalink}")`
113
+ Or explore its context with: `build_context("memory://{result.permalink}")`
114
+ """)
115
+
116
+ # Add next steps
117
+ summary += dedent(f"""
118
+ ## Next Steps
119
+
120
+ You can:
121
+ - Refine your search: `search("{query} AND additional_term")`
122
+ - Exclude terms: `search("{query} NOT exclude_term")`
123
+ - View more results: `search("{query}", after_date=None)`
124
+ - Check recent activity: `recent_activity()`
125
+ """)
126
+
127
+ return summary