basic-memory 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of basic-memory might be problematic. Click here for more details.
- basic_memory/__init__.py +1 -1
- basic_memory/alembic/alembic.ini +119 -0
- basic_memory/alembic/env.py +23 -1
- basic_memory/alembic/versions/502b60eaa905_remove_required_from_entity_permalink.py +51 -0
- basic_memory/alembic/versions/b3c3938bacdb_relation_to_name_unique_index.py +44 -0
- basic_memory/api/app.py +0 -4
- basic_memory/api/routers/knowledge_router.py +1 -9
- basic_memory/api/routers/memory_router.py +41 -25
- basic_memory/api/routers/resource_router.py +119 -12
- basic_memory/api/routers/search_router.py +17 -9
- basic_memory/cli/app.py +0 -2
- basic_memory/cli/commands/db.py +11 -8
- basic_memory/cli/commands/import_chatgpt.py +31 -27
- basic_memory/cli/commands/import_claude_conversations.py +29 -27
- basic_memory/cli/commands/import_claude_projects.py +30 -29
- basic_memory/cli/commands/import_memory_json.py +28 -26
- basic_memory/cli/commands/status.py +16 -26
- basic_memory/cli/commands/sync.py +11 -12
- basic_memory/cli/commands/tools.py +180 -0
- basic_memory/cli/main.py +1 -1
- basic_memory/config.py +16 -2
- basic_memory/db.py +1 -0
- basic_memory/deps.py +5 -1
- basic_memory/file_utils.py +6 -4
- basic_memory/markdown/entity_parser.py +3 -3
- basic_memory/mcp/async_client.py +1 -1
- basic_memory/mcp/main.py +25 -0
- basic_memory/mcp/prompts/__init__.py +15 -0
- basic_memory/mcp/prompts/ai_assistant_guide.py +28 -0
- basic_memory/mcp/prompts/continue_conversation.py +172 -0
- basic_memory/mcp/prompts/json_canvas_spec.py +25 -0
- basic_memory/mcp/prompts/recent_activity.py +46 -0
- basic_memory/mcp/prompts/search.py +127 -0
- basic_memory/mcp/prompts/utils.py +98 -0
- basic_memory/mcp/server.py +3 -7
- basic_memory/mcp/tools/__init__.py +6 -4
- basic_memory/mcp/tools/canvas.py +99 -0
- basic_memory/mcp/tools/knowledge.py +26 -14
- basic_memory/mcp/tools/memory.py +57 -31
- basic_memory/mcp/tools/notes.py +65 -72
- basic_memory/mcp/tools/resource.py +192 -0
- basic_memory/mcp/tools/search.py +13 -4
- basic_memory/mcp/tools/utils.py +2 -1
- basic_memory/models/knowledge.py +27 -11
- basic_memory/repository/repository.py +1 -1
- basic_memory/repository/search_repository.py +17 -4
- basic_memory/schemas/__init__.py +0 -11
- basic_memory/schemas/base.py +4 -1
- basic_memory/schemas/memory.py +14 -2
- basic_memory/schemas/request.py +1 -1
- basic_memory/schemas/search.py +4 -1
- basic_memory/services/context_service.py +14 -6
- basic_memory/services/entity_service.py +19 -12
- basic_memory/services/file_service.py +69 -2
- basic_memory/services/link_resolver.py +12 -9
- basic_memory/services/search_service.py +59 -13
- basic_memory/sync/__init__.py +3 -2
- basic_memory/sync/sync_service.py +287 -107
- basic_memory/sync/watch_service.py +125 -129
- basic_memory/utils.py +27 -15
- {basic_memory-0.6.0.dist-info → basic_memory-0.8.0.dist-info}/METADATA +3 -2
- basic_memory-0.8.0.dist-info/RECORD +91 -0
- basic_memory/alembic/README +0 -1
- basic_memory/schemas/discovery.py +0 -28
- basic_memory/sync/file_change_scanner.py +0 -158
- basic_memory/sync/utils.py +0 -31
- basic_memory-0.6.0.dist-info/RECORD +0 -81
- {basic_memory-0.6.0.dist-info → basic_memory-0.8.0.dist-info}/WHEEL +0 -0
- {basic_memory-0.6.0.dist-info → basic_memory-0.8.0.dist-info}/entry_points.txt +0 -0
- {basic_memory-0.6.0.dist-info → basic_memory-0.8.0.dist-info}/licenses/LICENSE +0 -0
basic_memory/mcp/tools/notes.py
CHANGED
|
@@ -7,6 +7,7 @@ while leveraging the underlying knowledge graph structure.
|
|
|
7
7
|
from typing import Optional, List
|
|
8
8
|
|
|
9
9
|
from loguru import logger
|
|
10
|
+
import logfire
|
|
10
11
|
|
|
11
12
|
from basic_memory.mcp.server import mcp
|
|
12
13
|
from basic_memory.mcp.async_client import client
|
|
@@ -60,75 +61,61 @@ async def write_note(
|
|
|
60
61
|
- Observation counts by category
|
|
61
62
|
- Relation counts (resolved/unresolved)
|
|
62
63
|
- Tags if present
|
|
63
|
-
|
|
64
|
-
Examples:
|
|
65
|
-
write_note(
|
|
66
|
-
title="Search Implementation",
|
|
67
|
-
content="# Search Component\\n\\n"
|
|
68
|
-
"Implementation of the search feature, building on [[Core Search]].\\n\\n"
|
|
69
|
-
"## Observations\\n"
|
|
70
|
-
"- [tech] Using FTS5 for full-text search #implementation\\n"
|
|
71
|
-
"- [design] Need pagination support #todo\\n\\n"
|
|
72
|
-
"## Relations\\n"
|
|
73
|
-
"- implements [[Search Spec]]\\n"
|
|
74
|
-
"- depends_on [[Database Schema]]",
|
|
75
|
-
folder="docs/components"
|
|
76
|
-
)
|
|
77
64
|
"""
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
65
|
+
with logfire.span("Writing note", title=title, folder=folder): # pyright: ignore [reportGeneralTypeIssues]
|
|
66
|
+
logger.info(f"Writing note folder:'{folder}' title: '{title}'")
|
|
67
|
+
|
|
68
|
+
# Create the entity request
|
|
69
|
+
metadata = {"tags": [f"#{tag}" for tag in tags]} if tags else None
|
|
70
|
+
entity = Entity(
|
|
71
|
+
title=title,
|
|
72
|
+
folder=folder,
|
|
73
|
+
entity_type="note",
|
|
74
|
+
content_type="text/markdown",
|
|
75
|
+
content=content,
|
|
76
|
+
entity_metadata=metadata,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Create or update via knowledge API
|
|
80
|
+
logger.info(f"Creating {entity.permalink}")
|
|
81
|
+
url = f"/knowledge/entities/{entity.permalink}"
|
|
82
|
+
response = await call_put(client, url, json=entity.model_dump())
|
|
83
|
+
result = EntityResponse.model_validate(response.json())
|
|
84
|
+
|
|
85
|
+
# Format semantic summary based on status code
|
|
86
|
+
action = "Created" if response.status_code == 201 else "Updated"
|
|
87
|
+
summary = [
|
|
88
|
+
f"# {action} {result.file_path} ({result.checksum[:8] if result.checksum else 'unknown'})",
|
|
89
|
+
f"permalink: {result.permalink}",
|
|
90
|
+
]
|
|
91
|
+
|
|
92
|
+
if result.observations:
|
|
93
|
+
categories = {}
|
|
94
|
+
for obs in result.observations:
|
|
95
|
+
categories[obs.category] = categories.get(obs.category, 0) + 1
|
|
96
|
+
|
|
97
|
+
summary.append("\n## Observations")
|
|
98
|
+
for category, count in sorted(categories.items()):
|
|
99
|
+
summary.append(f"- {category}: {count}")
|
|
100
|
+
|
|
101
|
+
if result.relations:
|
|
102
|
+
unresolved = sum(1 for r in result.relations if not r.to_id)
|
|
103
|
+
resolved = len(result.relations) - unresolved
|
|
104
|
+
|
|
105
|
+
summary.append("\n## Relations")
|
|
106
|
+
summary.append(f"- Resolved: {resolved}")
|
|
107
|
+
if unresolved:
|
|
108
|
+
summary.append(f"- Unresolved: {unresolved}")
|
|
109
|
+
summary.append("\nUnresolved relations will be retried on next sync.")
|
|
110
|
+
|
|
111
|
+
if tags:
|
|
112
|
+
summary.append(f"\n## Tags\n- {', '.join(tags)}")
|
|
113
|
+
|
|
114
|
+
return "\n".join(summary)
|
|
128
115
|
|
|
129
116
|
|
|
130
117
|
@mcp.tool(description="Read note content by title, permalink, relation, or pattern")
|
|
131
|
-
async def read_note(identifier: str) -> str:
|
|
118
|
+
async def read_note(identifier: str, page: int = 1, page_size: int = 10) -> str:
|
|
132
119
|
"""Get note content in unified diff format.
|
|
133
120
|
|
|
134
121
|
The content is returned in a unified diff inspired format:
|
|
@@ -146,6 +133,8 @@ async def read_note(identifier: str) -> str:
|
|
|
146
133
|
- Note permalink ("docs/example")
|
|
147
134
|
- Relation path ("docs/example/depends-on/other-doc")
|
|
148
135
|
- Pattern match ("docs/*-architecture")
|
|
136
|
+
page: the page number of results to return (default 1)
|
|
137
|
+
page_size: the number of results to return per page (default 10)
|
|
149
138
|
|
|
150
139
|
Returns:
|
|
151
140
|
Document content in unified diff format. For single documents, returns
|
|
@@ -180,10 +169,13 @@ async def read_note(identifier: str) -> str:
|
|
|
180
169
|
- Last modified timestamp
|
|
181
170
|
- Content checksum
|
|
182
171
|
"""
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
172
|
+
with logfire.span("Reading note", identifier=identifier): # pyright: ignore [reportGeneralTypeIssues]
|
|
173
|
+
logger.info(f"Reading note {identifier}")
|
|
174
|
+
url = memory_url_path(identifier)
|
|
175
|
+
response = await call_get(
|
|
176
|
+
client, f"/resource/{url}", params={"page": page, "page_size": page_size}
|
|
177
|
+
)
|
|
178
|
+
return response.text
|
|
187
179
|
|
|
188
180
|
|
|
189
181
|
@mcp.tool(description="Delete a note by title or permalink")
|
|
@@ -203,6 +195,7 @@ async def delete_note(identifier: str) -> bool:
|
|
|
203
195
|
# Delete by permalink
|
|
204
196
|
delete_note("notes/project-planning")
|
|
205
197
|
"""
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
198
|
+
with logfire.span("Deleting note", identifier=identifier): # pyright: ignore [reportGeneralTypeIssues]
|
|
199
|
+
response = await call_delete(client, f"/knowledge/entities/{identifier}")
|
|
200
|
+
result = DeleteEntitiesResponse.model_validate(response.json())
|
|
201
|
+
return result.deleted
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
from loguru import logger
|
|
2
|
+
|
|
3
|
+
from basic_memory.mcp.server import mcp
|
|
4
|
+
from basic_memory.mcp.async_client import client
|
|
5
|
+
from basic_memory.mcp.tools.utils import call_get
|
|
6
|
+
from basic_memory.schemas.memory import memory_url_path
|
|
7
|
+
|
|
8
|
+
import base64
|
|
9
|
+
import io
|
|
10
|
+
from PIL import Image as PILImage
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def calculate_target_params(content_length):
|
|
14
|
+
"""Calculate initial quality and size based on input file size"""
|
|
15
|
+
target_size = 350000 # Reduced target for more safety margin
|
|
16
|
+
ratio = content_length / target_size
|
|
17
|
+
|
|
18
|
+
logger.debug(
|
|
19
|
+
"Calculating target parameters",
|
|
20
|
+
content_length=content_length,
|
|
21
|
+
ratio=ratio,
|
|
22
|
+
target_size=target_size,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
if ratio > 4:
|
|
26
|
+
# Very large images - start very aggressive
|
|
27
|
+
return 50, 600 # Lower initial quality and size
|
|
28
|
+
elif ratio > 2:
|
|
29
|
+
return 60, 800
|
|
30
|
+
else:
|
|
31
|
+
return 70, 1000
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def resize_image(img, max_size):
|
|
35
|
+
"""Resize image maintaining aspect ratio"""
|
|
36
|
+
original_dimensions = {"width": img.width, "height": img.height}
|
|
37
|
+
|
|
38
|
+
if img.width > max_size or img.height > max_size:
|
|
39
|
+
ratio = min(max_size / img.width, max_size / img.height)
|
|
40
|
+
new_size = (int(img.width * ratio), int(img.height * ratio))
|
|
41
|
+
logger.debug("Resizing image", original=original_dimensions, target=new_size, ratio=ratio)
|
|
42
|
+
return img.resize(new_size, PILImage.Resampling.LANCZOS)
|
|
43
|
+
|
|
44
|
+
logger.debug("No resize needed", dimensions=original_dimensions)
|
|
45
|
+
return img
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def optimize_image(img, content_length, max_output_bytes=350000):
|
|
49
|
+
"""Iteratively optimize image with aggressive size reduction"""
|
|
50
|
+
stats = {
|
|
51
|
+
"dimensions": {"width": img.width, "height": img.height},
|
|
52
|
+
"mode": img.mode,
|
|
53
|
+
"estimated_memory": (img.width * img.height * len(img.getbands())),
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
initial_quality, initial_size = calculate_target_params(content_length)
|
|
57
|
+
|
|
58
|
+
logger.debug(
|
|
59
|
+
"Starting optimization",
|
|
60
|
+
image_stats=stats,
|
|
61
|
+
content_length=content_length,
|
|
62
|
+
initial_quality=initial_quality,
|
|
63
|
+
initial_size=initial_size,
|
|
64
|
+
max_output_bytes=max_output_bytes,
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
quality = initial_quality
|
|
68
|
+
size = initial_size
|
|
69
|
+
|
|
70
|
+
# Convert to RGB if needed
|
|
71
|
+
if img.mode in ("RGBA", "LA") or (img.mode == "P" and "transparency" in img.info):
|
|
72
|
+
img = img.convert("RGB")
|
|
73
|
+
logger.debug("Converted to RGB mode")
|
|
74
|
+
|
|
75
|
+
iteration = 0
|
|
76
|
+
min_size = 300 # Absolute minimum size
|
|
77
|
+
min_quality = 20 # Absolute minimum quality
|
|
78
|
+
|
|
79
|
+
while True:
|
|
80
|
+
iteration += 1
|
|
81
|
+
buf = io.BytesIO()
|
|
82
|
+
resized = resize_image(img, size)
|
|
83
|
+
|
|
84
|
+
resized.save(
|
|
85
|
+
buf,
|
|
86
|
+
format="JPEG",
|
|
87
|
+
quality=quality,
|
|
88
|
+
optimize=True,
|
|
89
|
+
progressive=True,
|
|
90
|
+
subsampling="4:2:0",
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
output_size = buf.getbuffer().nbytes
|
|
94
|
+
reduction_ratio = output_size / content_length
|
|
95
|
+
|
|
96
|
+
logger.debug(
|
|
97
|
+
"Optimization attempt",
|
|
98
|
+
iteration=iteration,
|
|
99
|
+
quality=quality,
|
|
100
|
+
size=size,
|
|
101
|
+
output_bytes=output_size,
|
|
102
|
+
target_bytes=max_output_bytes,
|
|
103
|
+
reduction_ratio=f"{reduction_ratio:.2f}",
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
if output_size < max_output_bytes:
|
|
107
|
+
logger.info(
|
|
108
|
+
"Image optimization complete",
|
|
109
|
+
final_size=output_size,
|
|
110
|
+
quality=quality,
|
|
111
|
+
dimensions={"width": resized.width, "height": resized.height},
|
|
112
|
+
reduction_ratio=f"{reduction_ratio:.2f}",
|
|
113
|
+
)
|
|
114
|
+
return buf.getvalue()
|
|
115
|
+
|
|
116
|
+
# Very aggressive reduction for large files
|
|
117
|
+
if content_length > 2000000: # 2MB+ # pragma: no cover
|
|
118
|
+
quality = max(min_quality, quality - 20)
|
|
119
|
+
size = max(min_size, int(size * 0.6))
|
|
120
|
+
elif content_length > 1000000: # 1MB+ # pragma: no cover
|
|
121
|
+
quality = max(min_quality, quality - 15)
|
|
122
|
+
size = max(min_size, int(size * 0.7))
|
|
123
|
+
else:
|
|
124
|
+
quality = max(min_quality, quality - 10) # pragma: no cover
|
|
125
|
+
size = max(min_size, int(size * 0.8)) # pragma: no cover
|
|
126
|
+
|
|
127
|
+
logger.debug("Reducing parameters", new_quality=quality, new_size=size) # pragma: no cover
|
|
128
|
+
|
|
129
|
+
# If we've hit minimum values and still too big
|
|
130
|
+
if quality <= min_quality and size <= min_size: # pragma: no cover
|
|
131
|
+
logger.warning(
|
|
132
|
+
"Reached minimum parameters",
|
|
133
|
+
final_size=output_size,
|
|
134
|
+
over_limit_by=output_size - max_output_bytes,
|
|
135
|
+
)
|
|
136
|
+
return buf.getvalue()
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
@mcp.tool(description="Read a single file's content by path or permalink")
|
|
140
|
+
async def read_resource(path: str) -> dict:
|
|
141
|
+
"""Get a file's raw content."""
|
|
142
|
+
logger.info("Reading resource", path=path)
|
|
143
|
+
|
|
144
|
+
url = memory_url_path(path)
|
|
145
|
+
response = await call_get(client, f"/resource/{url}")
|
|
146
|
+
content_type = response.headers.get("content-type", "application/octet-stream")
|
|
147
|
+
content_length = int(response.headers.get("content-length", 0))
|
|
148
|
+
|
|
149
|
+
logger.debug("Resource metadata", content_type=content_type, size=content_length, path=path)
|
|
150
|
+
|
|
151
|
+
# Handle text or json
|
|
152
|
+
if content_type.startswith("text/") or content_type == "application/json":
|
|
153
|
+
logger.debug("Processing text resource")
|
|
154
|
+
return {
|
|
155
|
+
"type": "text",
|
|
156
|
+
"text": response.text,
|
|
157
|
+
"content_type": content_type,
|
|
158
|
+
"encoding": "utf-8",
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
# Handle images
|
|
162
|
+
elif content_type.startswith("image/"):
|
|
163
|
+
logger.debug("Processing image")
|
|
164
|
+
img = PILImage.open(io.BytesIO(response.content))
|
|
165
|
+
img_bytes = optimize_image(img, content_length)
|
|
166
|
+
|
|
167
|
+
return {
|
|
168
|
+
"type": "image",
|
|
169
|
+
"source": {
|
|
170
|
+
"type": "base64",
|
|
171
|
+
"media_type": "image/jpeg",
|
|
172
|
+
"data": base64.b64encode(img_bytes).decode("utf-8"),
|
|
173
|
+
},
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
# Handle other file types
|
|
177
|
+
else:
|
|
178
|
+
logger.debug(f"Processing binary resource content_type {content_type}")
|
|
179
|
+
if content_length > 350000:
|
|
180
|
+
logger.warning("Document too large for response", size=content_length)
|
|
181
|
+
return {
|
|
182
|
+
"type": "error",
|
|
183
|
+
"error": f"Document size {content_length} bytes exceeds maximum allowed size",
|
|
184
|
+
}
|
|
185
|
+
return {
|
|
186
|
+
"type": "document",
|
|
187
|
+
"source": {
|
|
188
|
+
"type": "base64",
|
|
189
|
+
"media_type": content_type,
|
|
190
|
+
"data": base64.b64encode(response.content).decode("utf-8"),
|
|
191
|
+
},
|
|
192
|
+
}
|
basic_memory/mcp/tools/search.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
"""Search tools for Basic Memory MCP server."""
|
|
2
2
|
|
|
3
|
+
import logfire
|
|
3
4
|
from loguru import logger
|
|
4
5
|
|
|
5
6
|
from basic_memory.mcp.server import mcp
|
|
@@ -11,7 +12,7 @@ from basic_memory.mcp.async_client import client
|
|
|
11
12
|
@mcp.tool(
|
|
12
13
|
description="Search across all content in basic-memory, including documents and entities",
|
|
13
14
|
)
|
|
14
|
-
async def search(query: SearchQuery) -> SearchResponse:
|
|
15
|
+
async def search(query: SearchQuery, page: int = 1, page_size: int = 10) -> SearchResponse:
|
|
15
16
|
"""Search across all content in basic-memory.
|
|
16
17
|
|
|
17
18
|
Args:
|
|
@@ -20,10 +21,18 @@ async def search(query: SearchQuery) -> SearchResponse:
|
|
|
20
21
|
- types: Optional list of content types to search ("document" or "entity")
|
|
21
22
|
- entity_types: Optional list of entity types to filter by
|
|
22
23
|
- after_date: Optional date filter for recent content
|
|
24
|
+
page: the page number of results to return (default 1)
|
|
25
|
+
page_size: the number of results to return per page (default 10)
|
|
23
26
|
|
|
24
27
|
Returns:
|
|
25
28
|
SearchResponse with search results and metadata
|
|
26
29
|
"""
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
+
with logfire.span("Searching for {query}", query=query): # pyright: ignore [reportGeneralTypeIssues]
|
|
31
|
+
logger.info(f"Searching for {query}")
|
|
32
|
+
response = await call_post(
|
|
33
|
+
client,
|
|
34
|
+
"/search/",
|
|
35
|
+
json=query.model_dump(),
|
|
36
|
+
params={"page": page, "page_size": page_size},
|
|
37
|
+
)
|
|
38
|
+
return SearchResponse.model_validate(response.json())
|
basic_memory/mcp/tools/utils.py
CHANGED
|
@@ -44,7 +44,7 @@ async def call_get(
|
|
|
44
44
|
response.raise_for_status()
|
|
45
45
|
return response
|
|
46
46
|
except HTTPStatusError as e:
|
|
47
|
-
logger.
|
|
47
|
+
logger.exception(f"Error calling GET {url}: {e}")
|
|
48
48
|
raise ToolError(f"Error calling tool: {e}.") from e
|
|
49
49
|
|
|
50
50
|
|
|
@@ -79,6 +79,7 @@ async def call_put(
|
|
|
79
79
|
timeout=timeout,
|
|
80
80
|
extensions=extensions,
|
|
81
81
|
)
|
|
82
|
+
logger.debug(response)
|
|
82
83
|
response.raise_for_status()
|
|
83
84
|
return response
|
|
84
85
|
except HTTPStatusError as e:
|
basic_memory/models/knowledge.py
CHANGED
|
@@ -12,6 +12,7 @@ from sqlalchemy import (
|
|
|
12
12
|
DateTime,
|
|
13
13
|
Index,
|
|
14
14
|
JSON,
|
|
15
|
+
text,
|
|
15
16
|
)
|
|
16
17
|
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
|
17
18
|
|
|
@@ -32,11 +33,18 @@ class Entity(Base):
|
|
|
32
33
|
|
|
33
34
|
__tablename__ = "entity"
|
|
34
35
|
__table_args__ = (
|
|
35
|
-
|
|
36
|
+
# Regular indexes
|
|
36
37
|
Index("ix_entity_type", "entity_type"),
|
|
37
38
|
Index("ix_entity_title", "title"),
|
|
38
39
|
Index("ix_entity_created_at", "created_at"), # For timeline queries
|
|
39
40
|
Index("ix_entity_updated_at", "updated_at"), # For timeline queries
|
|
41
|
+
# Unique index only for markdown files with non-null permalinks
|
|
42
|
+
Index(
|
|
43
|
+
"uix_entity_permalink",
|
|
44
|
+
"permalink",
|
|
45
|
+
unique=True,
|
|
46
|
+
sqlite_where=text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
|
|
47
|
+
),
|
|
40
48
|
)
|
|
41
49
|
|
|
42
50
|
# Core identity
|
|
@@ -46,8 +54,8 @@ class Entity(Base):
|
|
|
46
54
|
entity_metadata: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True)
|
|
47
55
|
content_type: Mapped[str] = mapped_column(String)
|
|
48
56
|
|
|
49
|
-
# Normalized path for URIs
|
|
50
|
-
permalink: Mapped[str] = mapped_column(String,
|
|
57
|
+
# Normalized path for URIs - required for markdown files only
|
|
58
|
+
permalink: Mapped[Optional[str]] = mapped_column(String, nullable=True, index=True)
|
|
51
59
|
# Actual filesystem relative path
|
|
52
60
|
file_path: Mapped[str] = mapped_column(String, unique=True, index=True)
|
|
53
61
|
# checksum of file
|
|
@@ -79,6 +87,11 @@ class Entity(Base):
|
|
|
79
87
|
"""Get all relations (incoming and outgoing) for this entity."""
|
|
80
88
|
return self.incoming_relations + self.outgoing_relations
|
|
81
89
|
|
|
90
|
+
@property
|
|
91
|
+
def is_markdown(self):
|
|
92
|
+
"""Check if the entity is a markdown file."""
|
|
93
|
+
return self.content_type == "text/markdown"
|
|
94
|
+
|
|
82
95
|
def __repr__(self) -> str:
|
|
83
96
|
return f"Entity(id={self.id}, name='{self.title}', type='{self.entity_type}'"
|
|
84
97
|
|
|
@@ -127,7 +140,10 @@ class Relation(Base):
|
|
|
127
140
|
|
|
128
141
|
__tablename__ = "relation"
|
|
129
142
|
__table_args__ = (
|
|
130
|
-
UniqueConstraint("from_id", "to_id", "relation_type", name="
|
|
143
|
+
UniqueConstraint("from_id", "to_id", "relation_type", name="uix_relation_from_id_to_id"),
|
|
144
|
+
UniqueConstraint(
|
|
145
|
+
"from_id", "to_name", "relation_type", name="uix_relation_from_id_to_name"
|
|
146
|
+
),
|
|
131
147
|
Index("ix_relation_type", "relation_type"),
|
|
132
148
|
Index("ix_relation_from_id", "from_id"), # Add FK indexes
|
|
133
149
|
Index("ix_relation_to_id", "to_id"),
|
|
@@ -155,13 +171,13 @@ class Relation(Base):
|
|
|
155
171
|
Format: source/relation_type/target
|
|
156
172
|
Example: "specs/search/implements/features/search-ui"
|
|
157
173
|
"""
|
|
174
|
+
# Only create permalinks when both source and target have permalinks
|
|
175
|
+
from_permalink = self.from_entity.permalink or self.from_entity.file_path
|
|
176
|
+
|
|
158
177
|
if self.to_entity:
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
return generate_permalink(
|
|
163
|
-
f"{self.from_entity.permalink}/{self.relation_type}/{self.to_name}"
|
|
164
|
-
)
|
|
178
|
+
to_permalink = self.to_entity.permalink or self.to_entity.file_path
|
|
179
|
+
return generate_permalink(f"{from_permalink}/{self.relation_type}/{to_permalink}")
|
|
180
|
+
return generate_permalink(f"{from_permalink}/{self.relation_type}/{self.to_name}")
|
|
165
181
|
|
|
166
182
|
def __repr__(self) -> str:
|
|
167
|
-
return f"Relation(id={self.id}, from_id={self.from_id}, to_id={self.to_id}, to_name={self.to_name}, type='{self.relation_type}')"
|
|
183
|
+
return f"Relation(id={self.id}, from_id={self.from_id}, to_id={self.to_id}, to_name={self.to_name}, type='{self.relation_type}')" # pragma: no cover
|
|
@@ -97,7 +97,7 @@ class Repository[T: Base]:
|
|
|
97
97
|
entities = (self.Model,)
|
|
98
98
|
return select(*entities)
|
|
99
99
|
|
|
100
|
-
async def find_all(self, skip: int = 0, limit: Optional[int] =
|
|
100
|
+
async def find_all(self, skip: int = 0, limit: Optional[int] = None) -> Sequence[T]:
|
|
101
101
|
"""Fetch records from the database with pagination."""
|
|
102
102
|
logger.debug(f"Finding all {self.Model.__name__} (skip={skip}, limit={limit})")
|
|
103
103
|
|
|
@@ -21,13 +21,14 @@ class SearchIndexRow:
|
|
|
21
21
|
|
|
22
22
|
id: int
|
|
23
23
|
type: str
|
|
24
|
-
permalink: str
|
|
25
24
|
file_path: str
|
|
26
|
-
metadata: Optional[dict] = None
|
|
27
25
|
|
|
28
26
|
# date values
|
|
29
|
-
created_at:
|
|
30
|
-
updated_at:
|
|
27
|
+
created_at: datetime
|
|
28
|
+
updated_at: datetime
|
|
29
|
+
|
|
30
|
+
permalink: Optional[str] = None
|
|
31
|
+
metadata: Optional[dict] = None
|
|
31
32
|
|
|
32
33
|
# assigned in result
|
|
33
34
|
score: Optional[float] = None
|
|
@@ -114,6 +115,7 @@ class SearchRepository:
|
|
|
114
115
|
after_date: Optional[datetime] = None,
|
|
115
116
|
entity_types: Optional[List[str]] = None,
|
|
116
117
|
limit: int = 10,
|
|
118
|
+
offset: int = 0,
|
|
117
119
|
) -> List[SearchIndexRow]:
|
|
118
120
|
"""Search across all indexed content with fuzzy matching."""
|
|
119
121
|
conditions = []
|
|
@@ -169,6 +171,7 @@ class SearchRepository:
|
|
|
169
171
|
|
|
170
172
|
# set limit on search query
|
|
171
173
|
params["limit"] = limit
|
|
174
|
+
params["offset"] = offset
|
|
172
175
|
|
|
173
176
|
# Build WHERE clause
|
|
174
177
|
where_clause = " AND ".join(conditions) if conditions else "1=1"
|
|
@@ -194,6 +197,7 @@ class SearchRepository:
|
|
|
194
197
|
WHERE {where_clause}
|
|
195
198
|
ORDER BY score ASC {order_by_clause}
|
|
196
199
|
LIMIT :limit
|
|
200
|
+
OFFSET :offset
|
|
197
201
|
"""
|
|
198
202
|
|
|
199
203
|
logger.debug(f"Search {sql} params: {params}")
|
|
@@ -262,6 +266,15 @@ class SearchRepository:
|
|
|
262
266
|
logger.debug(f"indexed row {search_index_row}")
|
|
263
267
|
await session.commit()
|
|
264
268
|
|
|
269
|
+
async def delete_by_entity_id(self, entity_id: int):
|
|
270
|
+
"""Delete an item from the search index by entity_id."""
|
|
271
|
+
async with db.scoped_session(self.session_maker) as session:
|
|
272
|
+
await session.execute(
|
|
273
|
+
text("DELETE FROM search_index WHERE entity_id = :entity_id"),
|
|
274
|
+
{"entity_id": entity_id},
|
|
275
|
+
)
|
|
276
|
+
await session.commit()
|
|
277
|
+
|
|
265
278
|
async def delete_by_permalink(self, permalink: str):
|
|
266
279
|
"""Delete an item from the search index."""
|
|
267
280
|
async with db.scoped_session(self.session_maker) as session:
|
basic_memory/schemas/__init__.py
CHANGED
|
@@ -37,13 +37,6 @@ from basic_memory.schemas.response import (
|
|
|
37
37
|
DeleteEntitiesResponse,
|
|
38
38
|
)
|
|
39
39
|
|
|
40
|
-
# Discovery and analytics models
|
|
41
|
-
from basic_memory.schemas.discovery import (
|
|
42
|
-
EntityTypeList,
|
|
43
|
-
ObservationCategoryList,
|
|
44
|
-
TypedEntityList,
|
|
45
|
-
)
|
|
46
|
-
|
|
47
40
|
# For convenient imports, export all models
|
|
48
41
|
__all__ = [
|
|
49
42
|
# Base
|
|
@@ -66,8 +59,4 @@ __all__ = [
|
|
|
66
59
|
"DeleteEntitiesResponse",
|
|
67
60
|
# Delete Operations
|
|
68
61
|
"DeleteEntitiesRequest",
|
|
69
|
-
# Discovery and Analytics
|
|
70
|
-
"EntityTypeList",
|
|
71
|
-
"ObservationCategoryList",
|
|
72
|
-
"TypedEntityList",
|
|
73
62
|
]
|
basic_memory/schemas/base.py
CHANGED
|
@@ -159,7 +159,10 @@ class Entity(BaseModel):
|
|
|
159
159
|
@property
|
|
160
160
|
def file_path(self):
|
|
161
161
|
"""Get the file path for this entity based on its permalink."""
|
|
162
|
-
|
|
162
|
+
if self.content_type == "text/markdown":
|
|
163
|
+
return f"{self.folder}/{self.title}.md" if self.folder else f"{self.title}.md"
|
|
164
|
+
else:
|
|
165
|
+
return f"{self.folder}/{self.title}" if self.folder else self.title
|
|
163
166
|
|
|
164
167
|
@property
|
|
165
168
|
def permalink(self) -> Permalink:
|
basic_memory/schemas/memory.py
CHANGED
|
@@ -9,7 +9,7 @@ from pydantic import BaseModel, Field, BeforeValidator, TypeAdapter
|
|
|
9
9
|
from basic_memory.schemas.search import SearchItemType
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
def normalize_memory_url(url: str) -> str:
|
|
12
|
+
def normalize_memory_url(url: str | None) -> str:
|
|
13
13
|
"""Normalize a MemoryUrl string.
|
|
14
14
|
|
|
15
15
|
Args:
|
|
@@ -24,6 +24,9 @@ def normalize_memory_url(url: str) -> str:
|
|
|
24
24
|
>>> normalize_memory_url("memory://specs/search")
|
|
25
25
|
'memory://specs/search'
|
|
26
26
|
"""
|
|
27
|
+
if not url:
|
|
28
|
+
return ""
|
|
29
|
+
|
|
27
30
|
clean_path = url.removeprefix("memory://")
|
|
28
31
|
return f"memory://{clean_path}"
|
|
29
32
|
|
|
@@ -59,7 +62,7 @@ class EntitySummary(BaseModel):
|
|
|
59
62
|
"""Simplified entity representation."""
|
|
60
63
|
|
|
61
64
|
type: str = "entity"
|
|
62
|
-
permalink: str
|
|
65
|
+
permalink: Optional[str]
|
|
63
66
|
title: str
|
|
64
67
|
file_path: str
|
|
65
68
|
created_at: datetime
|
|
@@ -69,19 +72,25 @@ class RelationSummary(BaseModel):
|
|
|
69
72
|
"""Simplified relation representation."""
|
|
70
73
|
|
|
71
74
|
type: str = "relation"
|
|
75
|
+
title: str
|
|
76
|
+
file_path: str
|
|
72
77
|
permalink: str
|
|
73
78
|
relation_type: str
|
|
74
79
|
from_id: str
|
|
75
80
|
to_id: Optional[str] = None
|
|
81
|
+
created_at: datetime
|
|
76
82
|
|
|
77
83
|
|
|
78
84
|
class ObservationSummary(BaseModel):
|
|
79
85
|
"""Simplified observation representation."""
|
|
80
86
|
|
|
81
87
|
type: str = "observation"
|
|
88
|
+
title: str
|
|
89
|
+
file_path: str
|
|
82
90
|
permalink: str
|
|
83
91
|
category: str
|
|
84
92
|
content: str
|
|
93
|
+
created_at: datetime
|
|
85
94
|
|
|
86
95
|
|
|
87
96
|
class MemoryMetadata(BaseModel):
|
|
@@ -111,3 +120,6 @@ class GraphContext(BaseModel):
|
|
|
111
120
|
|
|
112
121
|
# Context metadata
|
|
113
122
|
metadata: MemoryMetadata
|
|
123
|
+
|
|
124
|
+
page: int = 1
|
|
125
|
+
page_size: int = 1
|