basic-memory 0.14.4__py3-none-any.whl → 0.15.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (84) hide show
  1. basic_memory/__init__.py +1 -1
  2. basic_memory/alembic/versions/a1b2c3d4e5f6_fix_project_foreign_keys.py +5 -9
  3. basic_memory/api/app.py +10 -4
  4. basic_memory/api/routers/directory_router.py +23 -2
  5. basic_memory/api/routers/knowledge_router.py +25 -8
  6. basic_memory/api/routers/project_router.py +100 -4
  7. basic_memory/cli/app.py +9 -28
  8. basic_memory/cli/auth.py +277 -0
  9. basic_memory/cli/commands/cloud/__init__.py +5 -0
  10. basic_memory/cli/commands/cloud/api_client.py +112 -0
  11. basic_memory/cli/commands/cloud/bisync_commands.py +818 -0
  12. basic_memory/cli/commands/cloud/core_commands.py +288 -0
  13. basic_memory/cli/commands/cloud/mount_commands.py +295 -0
  14. basic_memory/cli/commands/cloud/rclone_config.py +288 -0
  15. basic_memory/cli/commands/cloud/rclone_installer.py +198 -0
  16. basic_memory/cli/commands/command_utils.py +43 -0
  17. basic_memory/cli/commands/import_memory_json.py +0 -4
  18. basic_memory/cli/commands/mcp.py +77 -60
  19. basic_memory/cli/commands/project.py +154 -152
  20. basic_memory/cli/commands/status.py +25 -22
  21. basic_memory/cli/commands/sync.py +45 -228
  22. basic_memory/cli/commands/tool.py +87 -16
  23. basic_memory/cli/main.py +1 -0
  24. basic_memory/config.py +131 -21
  25. basic_memory/db.py +104 -3
  26. basic_memory/deps.py +27 -8
  27. basic_memory/file_utils.py +37 -13
  28. basic_memory/ignore_utils.py +295 -0
  29. basic_memory/markdown/plugins.py +9 -7
  30. basic_memory/mcp/async_client.py +124 -14
  31. basic_memory/mcp/project_context.py +141 -0
  32. basic_memory/mcp/prompts/ai_assistant_guide.py +49 -4
  33. basic_memory/mcp/prompts/continue_conversation.py +17 -16
  34. basic_memory/mcp/prompts/recent_activity.py +116 -32
  35. basic_memory/mcp/prompts/search.py +13 -12
  36. basic_memory/mcp/prompts/utils.py +11 -4
  37. basic_memory/mcp/resources/ai_assistant_guide.md +211 -341
  38. basic_memory/mcp/resources/project_info.py +27 -11
  39. basic_memory/mcp/server.py +0 -37
  40. basic_memory/mcp/tools/__init__.py +5 -6
  41. basic_memory/mcp/tools/build_context.py +67 -56
  42. basic_memory/mcp/tools/canvas.py +38 -26
  43. basic_memory/mcp/tools/chatgpt_tools.py +187 -0
  44. basic_memory/mcp/tools/delete_note.py +81 -47
  45. basic_memory/mcp/tools/edit_note.py +155 -138
  46. basic_memory/mcp/tools/list_directory.py +112 -99
  47. basic_memory/mcp/tools/move_note.py +181 -101
  48. basic_memory/mcp/tools/project_management.py +113 -277
  49. basic_memory/mcp/tools/read_content.py +91 -74
  50. basic_memory/mcp/tools/read_note.py +152 -115
  51. basic_memory/mcp/tools/recent_activity.py +471 -68
  52. basic_memory/mcp/tools/search.py +105 -92
  53. basic_memory/mcp/tools/sync_status.py +136 -130
  54. basic_memory/mcp/tools/utils.py +4 -0
  55. basic_memory/mcp/tools/view_note.py +44 -33
  56. basic_memory/mcp/tools/write_note.py +151 -90
  57. basic_memory/models/knowledge.py +12 -6
  58. basic_memory/models/project.py +6 -2
  59. basic_memory/repository/entity_repository.py +89 -82
  60. basic_memory/repository/relation_repository.py +13 -0
  61. basic_memory/repository/repository.py +18 -5
  62. basic_memory/repository/search_repository.py +46 -2
  63. basic_memory/schemas/__init__.py +6 -0
  64. basic_memory/schemas/base.py +39 -11
  65. basic_memory/schemas/cloud.py +46 -0
  66. basic_memory/schemas/memory.py +90 -21
  67. basic_memory/schemas/project_info.py +9 -10
  68. basic_memory/schemas/sync_report.py +48 -0
  69. basic_memory/services/context_service.py +25 -11
  70. basic_memory/services/directory_service.py +124 -3
  71. basic_memory/services/entity_service.py +100 -48
  72. basic_memory/services/initialization.py +30 -11
  73. basic_memory/services/project_service.py +101 -24
  74. basic_memory/services/search_service.py +16 -8
  75. basic_memory/sync/sync_service.py +173 -34
  76. basic_memory/sync/watch_service.py +101 -40
  77. basic_memory/utils.py +14 -4
  78. {basic_memory-0.14.4.dist-info → basic_memory-0.15.1.dist-info}/METADATA +57 -9
  79. basic_memory-0.15.1.dist-info/RECORD +146 -0
  80. basic_memory/mcp/project_session.py +0 -120
  81. basic_memory-0.14.4.dist-info/RECORD +0 -133
  82. {basic_memory-0.14.4.dist-info → basic_memory-0.15.1.dist-info}/WHEEL +0 -0
  83. {basic_memory-0.14.4.dist-info → basic_memory-0.15.1.dist-info}/entry_points.txt +0 -0
  84. {basic_memory-0.14.4.dist-info → basic_memory-0.15.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,19 +1,24 @@
1
1
  """Project info tool for Basic Memory MCP server."""
2
2
 
3
+ from typing import Optional
4
+
3
5
  from loguru import logger
6
+ from fastmcp import Context
4
7
 
5
- from basic_memory.mcp.project_session import get_active_project
6
- from basic_memory.mcp.async_client import client
8
+ from basic_memory.mcp.async_client import get_client
9
+ from basic_memory.mcp.project_context import get_active_project
7
10
  from basic_memory.mcp.server import mcp
8
11
  from basic_memory.mcp.tools.utils import call_get
9
12
  from basic_memory.schemas import ProjectInfoResponse
10
13
 
11
14
 
12
15
  @mcp.resource(
13
- uri="memory://project_info",
16
+ uri="memory://{project}/info",
14
17
  description="Get information and statistics about the current Basic Memory project.",
15
18
  )
16
- async def project_info() -> ProjectInfoResponse:
19
+ async def project_info(
20
+ project: Optional[str] = None, context: Context | None = None
21
+ ) -> ProjectInfoResponse:
17
22
  """Get comprehensive information about the current Basic Memory project.
18
23
 
19
24
  This tool provides detailed statistics and status information about your
@@ -31,13 +36,22 @@ async def project_info() -> ProjectInfoResponse:
31
36
  - Monitor growth and activity over time
32
37
  - Identify potential issues like unresolved relations
33
38
 
39
+ Args:
40
+ project: Optional project name. If not provided, uses default_project
41
+ (if default_project_mode=true) or CLI constraint. If unknown,
42
+ use list_memory_projects() to discover available projects.
43
+ context: Optional FastMCP context for performance caching.
44
+
34
45
  Returns:
35
46
  Detailed project information and statistics
36
47
 
37
48
  Examples:
38
- # Get information about the current project
49
+ # Get information about the current/default project
39
50
  info = await project_info()
40
51
 
52
+ # Get information about a specific project
53
+ info = await project_info(project="my-project")
54
+
41
55
  # Check entity counts
42
56
  print(f"Total entities: {info.statistics.total_entities}")
43
57
 
@@ -45,11 +59,13 @@ async def project_info() -> ProjectInfoResponse:
45
59
  print(f"Basic Memory version: {info.system.version}")
46
60
  """
47
61
  logger.info("Getting project info")
48
- project_config = get_active_project()
49
- project_url = project_config.project_url
50
62
 
51
- # Call the API endpoint
52
- response = await call_get(client, f"{project_url}/project/info")
63
+ async with get_client() as client:
64
+ project_config = await get_active_project(client, project, context)
65
+ project_url = project_config.permalink
66
+
67
+ # Call the API endpoint
68
+ response = await call_get(client, f"{project_url}/project/info")
53
69
 
54
- # Convert response to ProjectInfoResponse
55
- return ProjectInfoResponse.model_validate(response.json())
70
+ # Convert response to ProjectInfoResponse
71
+ return ProjectInfoResponse.model_validate(response.json())
@@ -2,45 +2,8 @@
2
2
  Basic Memory FastMCP server.
3
3
  """
4
4
 
5
- import asyncio
6
- from contextlib import asynccontextmanager
7
- from dataclasses import dataclass
8
- from typing import AsyncIterator, Optional, Any
9
-
10
5
  from fastmcp import FastMCP
11
6
 
12
- from basic_memory.config import ConfigManager
13
- from basic_memory.services.initialization import initialize_app
14
-
15
-
16
- @dataclass
17
- class AppContext:
18
- watch_task: Optional[asyncio.Task]
19
- migration_manager: Optional[Any] = None
20
-
21
-
22
- @asynccontextmanager
23
- async def app_lifespan(server: FastMCP) -> AsyncIterator[AppContext]: # pragma: no cover
24
- """ """
25
- # defer import so tests can monkeypatch
26
- from basic_memory.mcp.project_session import session
27
-
28
- app_config = ConfigManager().config
29
- # Initialize on startup (now returns migration_manager)
30
- migration_manager = await initialize_app(app_config)
31
-
32
- # Initialize project session with default project
33
- session.initialize(app_config.default_project)
34
-
35
- try:
36
- yield AppContext(watch_task=None, migration_manager=migration_manager)
37
- finally:
38
- # Cleanup on shutdown - migration tasks will be cancelled automatically
39
- pass
40
-
41
-
42
- # Create the shared server instance with custom Stytch auth
43
7
  mcp = FastMCP(
44
8
  name="Basic Memory",
45
- lifespan=app_lifespan,
46
9
  )
@@ -21,13 +21,13 @@ from basic_memory.mcp.tools.move_note import move_note
21
21
  from basic_memory.mcp.tools.sync_status import sync_status
22
22
  from basic_memory.mcp.tools.project_management import (
23
23
  list_memory_projects,
24
- switch_project,
25
- get_current_project,
26
- set_default_project,
27
24
  create_memory_project,
28
25
  delete_project,
29
26
  )
30
27
 
28
+ # ChatGPT-compatible tools
29
+ from basic_memory.mcp.tools.chatgpt_tools import search, fetch
30
+
31
31
  __all__ = [
32
32
  "build_context",
33
33
  "canvas",
@@ -35,16 +35,15 @@ __all__ = [
35
35
  "delete_note",
36
36
  "delete_project",
37
37
  "edit_note",
38
- "get_current_project",
38
+ "fetch",
39
39
  "list_directory",
40
40
  "list_memory_projects",
41
41
  "move_note",
42
42
  "read_content",
43
43
  "read_note",
44
44
  "recent_activity",
45
+ "search",
45
46
  "search_notes",
46
- "set_default_project",
47
- "switch_project",
48
47
  "sync_status",
49
48
  "view_note",
50
49
  "write_note",
@@ -3,11 +3,12 @@
3
3
  from typing import Optional
4
4
 
5
5
  from loguru import logger
6
+ from fastmcp import Context
6
7
 
7
- from basic_memory.mcp.async_client import client
8
+ from basic_memory.mcp.async_client import get_client
9
+ from basic_memory.mcp.project_context import get_active_project
8
10
  from basic_memory.mcp.server import mcp
9
11
  from basic_memory.mcp.tools.utils import call_get
10
- from basic_memory.mcp.project_session import get_active_project
11
12
  from basic_memory.schemas.base import TimeFrame
12
13
  from basic_memory.schemas.memory import (
13
14
  GraphContext,
@@ -17,18 +18,19 @@ from basic_memory.schemas.memory import (
17
18
 
18
19
  type StringOrInt = str | int
19
20
 
21
+
20
22
  @mcp.tool(
21
23
  description="""Build context from a memory:// URI to continue conversations naturally.
22
-
24
+
23
25
  Use this to follow up on previous discussions or explore related topics.
24
-
26
+
25
27
  Memory URL Format:
26
- - Use paths like "folder/note" or "memory://folder/note"
28
+ - Use paths like "folder/note" or "memory://folder/note"
27
29
  - Pattern matching: "folder/*" matches all notes in folder
28
30
  - Valid characters: letters, numbers, hyphens, underscores, forward slashes
29
31
  - Avoid: double slashes (//), angle brackets (<>), quotes, pipes (|)
30
32
  - Examples: "specs/search", "projects/basic-memory", "notes/*"
31
-
33
+
32
34
  Timeframes support natural language like:
33
35
  - "2 days ago", "last week", "today", "3 months ago"
34
36
  - Or standard formats like "7d", "24h"
@@ -36,27 +38,34 @@ type StringOrInt = str | int
36
38
  )
37
39
  async def build_context(
38
40
  url: MemoryUrl,
41
+ project: Optional[str] = None,
39
42
  depth: Optional[StringOrInt] = 1,
40
43
  timeframe: Optional[TimeFrame] = "7d",
41
44
  page: int = 1,
42
45
  page_size: int = 10,
43
46
  max_related: int = 10,
44
- project: Optional[str] = None,
47
+ context: Context | None = None,
45
48
  ) -> GraphContext:
46
- """Get context needed to continue a discussion.
49
+ """Get context needed to continue a discussion within a specific project.
47
50
 
48
51
  This tool enables natural continuation of discussions by loading relevant context
49
52
  from memory:// URIs. It uses pattern matching to find relevant content and builds
50
53
  a rich context graph of related information.
51
54
 
55
+ Project Resolution:
56
+ Server resolves projects in this order: Single Project Mode → project parameter → default project.
57
+ If project unknown, use list_memory_projects() or recent_activity() first.
58
+
52
59
  Args:
60
+ project: Project name to build context from. Optional - server will resolve using hierarchy.
61
+ If unknown, use list_memory_projects() to discover available projects.
53
62
  url: memory:// URI pointing to discussion content (e.g. memory://specs/search)
54
63
  depth: How many relation hops to traverse (1-3 recommended for performance)
55
64
  timeframe: How far back to look. Supports natural language like "2 days ago", "last week"
56
65
  page: Page number of results to return (default: 1)
57
66
  page_size: Number of results to return per page (default: 10)
58
67
  max_related: Maximum number of related results to return (default: 10)
59
- project: Optional project name to build context from. If not provided, uses current active project.
68
+ context: Optional FastMCP context for performance caching.
60
69
 
61
70
  Returns:
62
71
  GraphContext containing:
@@ -66,68 +75,70 @@ async def build_context(
66
75
 
67
76
  Examples:
68
77
  # Continue a specific discussion
69
- build_context("memory://specs/search")
78
+ build_context("my-project", "memory://specs/search")
70
79
 
71
80
  # Get deeper context about a component
72
- build_context("memory://components/memory-service", depth=2)
81
+ build_context("work-docs", "memory://components/memory-service", depth=2)
73
82
 
74
83
  # Look at recent changes to a specification
75
- build_context("memory://specs/document-format", timeframe="today")
84
+ build_context("research", "memory://specs/document-format", timeframe="today")
76
85
 
77
86
  # Research the history of a feature
78
- build_context("memory://features/knowledge-graph", timeframe="3 months ago")
87
+ build_context("dev-notes", "memory://features/knowledge-graph", timeframe="3 months ago")
79
88
 
80
- # Build context from specific project
81
- build_context("memory://specs/search", project="work-project")
89
+ Raises:
90
+ ToolError: If project doesn't exist or depth parameter is invalid
82
91
  """
83
- logger.info(f"Building context from {url}")
84
-
92
+ logger.info(f"Building context from {url} in project {project}")
93
+
85
94
  # Convert string depth to integer if needed
86
95
  if isinstance(depth, str):
87
96
  try:
88
97
  depth = int(depth)
89
98
  except ValueError:
90
99
  from mcp.server.fastmcp.exceptions import ToolError
100
+
91
101
  raise ToolError(f"Invalid depth parameter: '{depth}' is not a valid integer")
92
-
102
+
93
103
  # URL is already validated and normalized by MemoryUrl type annotation
94
104
 
95
- # Get the active project first to check project-specific sync status
96
- active_project = get_active_project(project)
97
-
98
- # Check migration status and wait briefly if needed
99
- from basic_memory.mcp.tools.utils import wait_for_migration_or_return_status
100
-
101
- migration_status = await wait_for_migration_or_return_status(
102
- timeout=5.0, project_name=active_project.name
103
- )
104
- if migration_status: # pragma: no cover
105
- # Return a proper GraphContext with status message
106
- from basic_memory.schemas.memory import MemoryMetadata
107
- from datetime import datetime
108
-
109
- return GraphContext(
110
- results=[],
111
- metadata=MemoryMetadata(
112
- depth=depth or 1,
113
- timeframe=timeframe,
114
- generated_at=datetime.now().astimezone(),
115
- primary_count=0,
116
- related_count=0,
117
- uri=migration_status, # Include status in metadata
118
- ),
105
+ async with get_client() as client:
106
+ # Get the active project using the new stateless approach
107
+ active_project = await get_active_project(client, project, context)
108
+
109
+ # Check migration status and wait briefly if needed
110
+ from basic_memory.mcp.tools.utils import wait_for_migration_or_return_status
111
+
112
+ migration_status = await wait_for_migration_or_return_status(
113
+ timeout=5.0, project_name=active_project.name
114
+ )
115
+ if migration_status: # pragma: no cover
116
+ # Return a proper GraphContext with status message
117
+ from basic_memory.schemas.memory import MemoryMetadata
118
+ from datetime import datetime
119
+
120
+ return GraphContext(
121
+ results=[],
122
+ metadata=MemoryMetadata(
123
+ depth=depth or 1,
124
+ timeframe=timeframe,
125
+ generated_at=datetime.now().astimezone(),
126
+ primary_count=0,
127
+ related_count=0,
128
+ uri=migration_status, # Include status in metadata
129
+ ),
130
+ )
131
+ project_url = active_project.project_url
132
+
133
+ response = await call_get(
134
+ client,
135
+ f"{project_url}/memory/{memory_url_path(url)}",
136
+ params={
137
+ "depth": depth,
138
+ "timeframe": timeframe,
139
+ "page": page,
140
+ "page_size": page_size,
141
+ "max_related": max_related,
142
+ },
119
143
  )
120
- project_url = active_project.project_url
121
-
122
- response = await call_get(
123
- client,
124
- f"{project_url}/memory/{memory_url_path(url)}",
125
- params={
126
- "depth": depth,
127
- "timeframe": timeframe,
128
- "page": page,
129
- "page_size": page_size,
130
- "max_related": max_related,
131
- },
132
- )
133
- return GraphContext.model_validate(response.json())
144
+ return GraphContext.model_validate(response.json())
@@ -7,11 +7,12 @@ import json
7
7
  from typing import Dict, List, Any, Optional
8
8
 
9
9
  from loguru import logger
10
+ from fastmcp import Context
10
11
 
11
- from basic_memory.mcp.async_client import client
12
+ from basic_memory.mcp.async_client import get_client
13
+ from basic_memory.mcp.project_context import get_active_project
12
14
  from basic_memory.mcp.server import mcp
13
15
  from basic_memory.mcp.tools.utils import call_put
14
- from basic_memory.mcp.project_session import get_active_project
15
16
 
16
17
 
17
18
  @mcp.tool(
@@ -23,21 +24,28 @@ async def canvas(
23
24
  title: str,
24
25
  folder: str,
25
26
  project: Optional[str] = None,
27
+ context: Context | None = None,
26
28
  ) -> str:
27
29
  """Create an Obsidian canvas file with the provided nodes and edges.
28
30
 
29
31
  This tool creates a .canvas file compatible with Obsidian's Canvas feature,
30
32
  allowing visualization of relationships between concepts or documents.
31
33
 
34
+ Project Resolution:
35
+ Server resolves projects in this order: Single Project Mode → project parameter → default project.
36
+ If project unknown, use list_memory_projects() or recent_activity() first.
37
+
32
38
  For the full JSON Canvas 1.0 specification, see the 'spec://canvas' resource.
33
39
 
34
40
  Args:
41
+ project: Project name to create canvas in. Optional - server will resolve using hierarchy.
42
+ If unknown, use list_memory_projects() to discover available projects.
35
43
  nodes: List of node objects following JSON Canvas 1.0 spec
36
44
  edges: List of edge objects following JSON Canvas 1.0 spec
37
45
  title: The title of the canvas (will be saved as title.canvas)
38
46
  folder: Folder path relative to project root where the canvas should be saved.
39
47
  Use forward slashes (/) as separators. Examples: "diagrams", "projects/2025", "visual/maps"
40
- project: Optional project name to create canvas in. If not provided, uses current active project.
48
+ context: Optional FastMCP context for performance caching.
41
49
 
42
50
  Returns:
43
51
  A summary of the created canvas file
@@ -77,35 +85,39 @@ async def canvas(
77
85
  ```
78
86
 
79
87
  Examples:
80
- # Create canvas in current project
81
- canvas(nodes=[...], edges=[...], title="My Canvas", folder="diagrams")
88
+ # Create canvas in project
89
+ canvas("my-project", nodes=[...], edges=[...], title="My Canvas", folder="diagrams")
90
+
91
+ # Create canvas in work project
92
+ canvas("work-project", nodes=[...], edges=[...], title="Process Flow", folder="visual/maps")
82
93
 
83
- # Create canvas in specific project
84
- canvas(nodes=[...], edges=[...], title="My Canvas", folder="diagrams", project="work-project")
94
+ Raises:
95
+ ToolError: If project doesn't exist or folder path is invalid
85
96
  """
86
- active_project = get_active_project(project)
87
- project_url = active_project.project_url
97
+ async with get_client() as client:
98
+ active_project = await get_active_project(client, project, context)
99
+ project_url = active_project.project_url
88
100
 
89
- # Ensure path has .canvas extension
90
- file_title = title if title.endswith(".canvas") else f"{title}.canvas"
91
- file_path = f"{folder}/{file_title}"
101
+ # Ensure path has .canvas extension
102
+ file_title = title if title.endswith(".canvas") else f"{title}.canvas"
103
+ file_path = f"{folder}/{file_title}"
92
104
 
93
- # Create canvas data structure
94
- canvas_data = {"nodes": nodes, "edges": edges}
105
+ # Create canvas data structure
106
+ canvas_data = {"nodes": nodes, "edges": edges}
95
107
 
96
- # Convert to JSON
97
- canvas_json = json.dumps(canvas_data, indent=2)
108
+ # Convert to JSON
109
+ canvas_json = json.dumps(canvas_data, indent=2)
98
110
 
99
- # Write the file using the resource API
100
- logger.info(f"Creating canvas file: {file_path}")
101
- response = await call_put(client, f"{project_url}/resource/{file_path}", json=canvas_json)
111
+ # Write the file using the resource API
112
+ logger.info(f"Creating canvas file: {file_path} in project {project}")
113
+ response = await call_put(client, f"{project_url}/resource/{file_path}", json=canvas_json)
102
114
 
103
- # Parse response
104
- result = response.json()
105
- logger.debug(result)
115
+ # Parse response
116
+ result = response.json()
117
+ logger.debug(result)
106
118
 
107
- # Build summary
108
- action = "Created" if response.status_code == 201 else "Updated"
109
- summary = [f"# {action}: {file_path}", "\nThe canvas is ready to open in Obsidian."]
119
+ # Build summary
120
+ action = "Created" if response.status_code == 201 else "Updated"
121
+ summary = [f"# {action}: {file_path}", "\nThe canvas is ready to open in Obsidian."]
110
122
 
111
- return "\n".join(summary)
123
+ return "\n".join(summary)
@@ -0,0 +1,187 @@
1
+ """ChatGPT-compatible MCP tools for Basic Memory.
2
+
3
+ These adapters expose Basic Memory's search/fetch functionality using the exact
4
+ tool names and response structure OpenAI's MCP clients expect: each call returns
5
+ a list containing a single `{"type": "text", "text": "{...json...}"}` item.
6
+ """
7
+
8
+ import json
9
+ from typing import Any, Dict, List, Optional
10
+ from loguru import logger
11
+ from fastmcp import Context
12
+
13
+ from basic_memory.mcp.server import mcp
14
+ from basic_memory.mcp.tools.search import search_notes
15
+ from basic_memory.mcp.tools.read_note import read_note
16
+ from basic_memory.schemas.search import SearchResponse
17
+ from basic_memory.config import ConfigManager
18
+
19
+
20
+ def _format_search_results_for_chatgpt(results: SearchResponse) -> List[Dict[str, Any]]:
21
+ """Format search results according to ChatGPT's expected schema.
22
+
23
+ Returns a list of result objects with id, title, and url fields.
24
+ """
25
+ formatted_results = []
26
+
27
+ for result in results.results:
28
+ formatted_result = {
29
+ "id": result.permalink or f"doc-{len(formatted_results)}",
30
+ "title": result.title if result.title and result.title.strip() else "Untitled",
31
+ "url": result.permalink or "",
32
+ }
33
+ formatted_results.append(formatted_result)
34
+
35
+ return formatted_results
36
+
37
+
38
+ def _format_document_for_chatgpt(
39
+ content: str, identifier: str, title: Optional[str] = None
40
+ ) -> Dict[str, Any]:
41
+ """Format document content according to ChatGPT's expected schema.
42
+
43
+ Returns a document object with id, title, text, url, and metadata fields.
44
+ """
45
+ # Extract title from markdown content if not provided
46
+ if not title and isinstance(content, str):
47
+ lines = content.split("\n")
48
+ if lines and lines[0].startswith("# "):
49
+ title = lines[0][2:].strip()
50
+ else:
51
+ title = identifier.split("/")[-1].replace("-", " ").title()
52
+
53
+ # Ensure title is never None
54
+ if not title:
55
+ title = "Untitled Document"
56
+
57
+ # Handle error cases
58
+ if isinstance(content, str) and content.startswith("# Note Not Found"):
59
+ return {
60
+ "id": identifier,
61
+ "title": title or "Document Not Found",
62
+ "text": content,
63
+ "url": identifier,
64
+ "metadata": {"error": "Document not found"},
65
+ }
66
+
67
+ return {
68
+ "id": identifier,
69
+ "title": title or "Untitled Document",
70
+ "text": content,
71
+ "url": identifier,
72
+ "metadata": {"format": "markdown"},
73
+ }
74
+
75
+
76
+ @mcp.tool(description="Search for content across the knowledge base")
77
+ async def search(
78
+ query: str,
79
+ context: Context | None = None,
80
+ ) -> List[Dict[str, Any]]:
81
+ """ChatGPT/OpenAI MCP search adapter returning a single text content item.
82
+
83
+ Args:
84
+ query: Search query (full-text syntax supported by `search_notes`)
85
+ context: Optional FastMCP context passed through for auth/session data
86
+
87
+ Returns:
88
+ List with one dict: `{ "type": "text", "text": "{...JSON...}" }`
89
+ where the JSON body contains `results`, `total_count`, and echo of `query`.
90
+ """
91
+ logger.info(f"ChatGPT search request: query='{query}'")
92
+
93
+ try:
94
+ # ChatGPT tools don't expose project parameter, so use default project
95
+ config = ConfigManager().config
96
+ default_project = config.default_project
97
+
98
+ # Call underlying search_notes with sensible defaults for ChatGPT
99
+ results = await search_notes.fn(
100
+ query=query,
101
+ project=default_project, # Use default project for ChatGPT
102
+ page=1,
103
+ page_size=10, # Reasonable default for ChatGPT consumption
104
+ search_type="text", # Default to full-text search
105
+ context=context,
106
+ )
107
+
108
+ # Handle string error responses from search_notes
109
+ if isinstance(results, str):
110
+ logger.warning(f"Search failed with error: {results[:100]}...")
111
+ search_results = {
112
+ "results": [],
113
+ "error": "Search failed",
114
+ "error_details": results[:500], # Truncate long error messages
115
+ }
116
+ else:
117
+ # Format successful results for ChatGPT
118
+ formatted_results = _format_search_results_for_chatgpt(results)
119
+ search_results = {
120
+ "results": formatted_results,
121
+ "total_count": len(results.results), # Use actual count from results
122
+ "query": query,
123
+ }
124
+ logger.info(f"Search completed: {len(formatted_results)} results returned")
125
+
126
+ # Return in MCP content array format as required by OpenAI
127
+ return [{"type": "text", "text": json.dumps(search_results, ensure_ascii=False)}]
128
+
129
+ except Exception as e:
130
+ logger.error(f"ChatGPT search failed for query '{query}': {e}")
131
+ error_results = {
132
+ "results": [],
133
+ "error": "Internal search error",
134
+ "error_message": str(e)[:200],
135
+ }
136
+ return [{"type": "text", "text": json.dumps(error_results, ensure_ascii=False)}]
137
+
138
+
139
+ @mcp.tool(description="Fetch the full contents of a search result document")
140
+ async def fetch(
141
+ id: str,
142
+ context: Context | None = None,
143
+ ) -> List[Dict[str, Any]]:
144
+ """ChatGPT/OpenAI MCP fetch adapter returning a single text content item.
145
+
146
+ Args:
147
+ id: Document identifier (permalink, title, or memory URL)
148
+ context: Optional FastMCP context passed through for auth/session data
149
+
150
+ Returns:
151
+ List with one dict: `{ "type": "text", "text": "{...JSON...}" }`
152
+ where the JSON body includes `id`, `title`, `text`, `url`, and metadata.
153
+ """
154
+ logger.info(f"ChatGPT fetch request: id='{id}'")
155
+
156
+ try:
157
+ # ChatGPT tools don't expose project parameter, so use default project
158
+ config = ConfigManager().config
159
+ default_project = config.default_project
160
+
161
+ # Call underlying read_note function
162
+ content = await read_note.fn(
163
+ identifier=id,
164
+ project=default_project, # Use default project for ChatGPT
165
+ page=1,
166
+ page_size=10, # Default pagination
167
+ context=context,
168
+ )
169
+
170
+ # Format the document for ChatGPT
171
+ document = _format_document_for_chatgpt(content, id)
172
+
173
+ logger.info(f"Fetch completed: id='{id}', content_length={len(document.get('text', ''))}")
174
+
175
+ # Return in MCP content array format as required by OpenAI
176
+ return [{"type": "text", "text": json.dumps(document, ensure_ascii=False)}]
177
+
178
+ except Exception as e:
179
+ logger.error(f"ChatGPT fetch failed for id '{id}': {e}")
180
+ error_document = {
181
+ "id": id,
182
+ "title": "Fetch Error",
183
+ "text": f"Failed to fetch document: {str(e)[:200]}",
184
+ "url": id,
185
+ "metadata": {"error": "Fetch failed"},
186
+ }
187
+ return [{"type": "text", "text": json.dumps(error_document, ensure_ascii=False)}]