basic-memory 0.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of basic-memory might be problematic. Click here for more details.
- basic_memory/__init__.py +3 -0
- basic_memory/api/__init__.py +4 -0
- basic_memory/api/app.py +42 -0
- basic_memory/api/routers/__init__.py +8 -0
- basic_memory/api/routers/knowledge_router.py +168 -0
- basic_memory/api/routers/memory_router.py +123 -0
- basic_memory/api/routers/resource_router.py +34 -0
- basic_memory/api/routers/search_router.py +34 -0
- basic_memory/cli/__init__.py +1 -0
- basic_memory/cli/app.py +4 -0
- basic_memory/cli/commands/__init__.py +9 -0
- basic_memory/cli/commands/init.py +38 -0
- basic_memory/cli/commands/status.py +152 -0
- basic_memory/cli/commands/sync.py +254 -0
- basic_memory/cli/main.py +48 -0
- basic_memory/config.py +53 -0
- basic_memory/db.py +135 -0
- basic_memory/deps.py +182 -0
- basic_memory/file_utils.py +248 -0
- basic_memory/markdown/__init__.py +19 -0
- basic_memory/markdown/entity_parser.py +137 -0
- basic_memory/markdown/markdown_processor.py +153 -0
- basic_memory/markdown/plugins.py +236 -0
- basic_memory/markdown/schemas.py +73 -0
- basic_memory/markdown/utils.py +144 -0
- basic_memory/mcp/__init__.py +1 -0
- basic_memory/mcp/async_client.py +10 -0
- basic_memory/mcp/main.py +21 -0
- basic_memory/mcp/server.py +39 -0
- basic_memory/mcp/tools/__init__.py +34 -0
- basic_memory/mcp/tools/ai_edit.py +84 -0
- basic_memory/mcp/tools/knowledge.py +56 -0
- basic_memory/mcp/tools/memory.py +142 -0
- basic_memory/mcp/tools/notes.py +122 -0
- basic_memory/mcp/tools/search.py +28 -0
- basic_memory/mcp/tools/utils.py +154 -0
- basic_memory/models/__init__.py +12 -0
- basic_memory/models/base.py +9 -0
- basic_memory/models/knowledge.py +204 -0
- basic_memory/models/search.py +34 -0
- basic_memory/repository/__init__.py +7 -0
- basic_memory/repository/entity_repository.py +156 -0
- basic_memory/repository/observation_repository.py +40 -0
- basic_memory/repository/relation_repository.py +78 -0
- basic_memory/repository/repository.py +303 -0
- basic_memory/repository/search_repository.py +259 -0
- basic_memory/schemas/__init__.py +73 -0
- basic_memory/schemas/base.py +216 -0
- basic_memory/schemas/delete.py +38 -0
- basic_memory/schemas/discovery.py +25 -0
- basic_memory/schemas/memory.py +111 -0
- basic_memory/schemas/request.py +77 -0
- basic_memory/schemas/response.py +220 -0
- basic_memory/schemas/search.py +117 -0
- basic_memory/services/__init__.py +11 -0
- basic_memory/services/context_service.py +274 -0
- basic_memory/services/entity_service.py +281 -0
- basic_memory/services/exceptions.py +15 -0
- basic_memory/services/file_service.py +213 -0
- basic_memory/services/link_resolver.py +126 -0
- basic_memory/services/search_service.py +218 -0
- basic_memory/services/service.py +36 -0
- basic_memory/sync/__init__.py +5 -0
- basic_memory/sync/file_change_scanner.py +162 -0
- basic_memory/sync/sync_service.py +140 -0
- basic_memory/sync/utils.py +66 -0
- basic_memory/sync/watch_service.py +197 -0
- basic_memory/utils.py +78 -0
- basic_memory-0.0.0.dist-info/METADATA +71 -0
- basic_memory-0.0.0.dist-info/RECORD +73 -0
- basic_memory-0.0.0.dist-info/WHEEL +4 -0
- basic_memory-0.0.0.dist-info/entry_points.txt +2 -0
- basic_memory-0.0.0.dist-info/licenses/LICENSE +661 -0
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
"""Markdown-it plugins for Basic Memory markdown parsing."""
|
|
2
|
+
|
|
3
|
+
from typing import List, Any, Dict
|
|
4
|
+
from markdown_it import MarkdownIt
|
|
5
|
+
from markdown_it.token import Token
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Observation handling functions
|
|
9
|
+
def is_observation(token: Token) -> bool:
|
|
10
|
+
"""Check if token looks like our observation format."""
|
|
11
|
+
if token.type != 'inline':
|
|
12
|
+
return False
|
|
13
|
+
|
|
14
|
+
content = token.content.strip()
|
|
15
|
+
if not content:
|
|
16
|
+
return False
|
|
17
|
+
|
|
18
|
+
# if it's a markdown_task, return false
|
|
19
|
+
if content.startswith('[ ]') or content.startswith('[x]') or content.startswith('[-]'):
|
|
20
|
+
return False
|
|
21
|
+
|
|
22
|
+
has_category = content.startswith('[') and ']' in content
|
|
23
|
+
has_tags = '#' in content
|
|
24
|
+
return has_category or has_tags
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def parse_observation(token: Token) -> Dict[str, Any]:
|
|
28
|
+
"""Extract observation parts from token."""
|
|
29
|
+
# Strip bullet point if present
|
|
30
|
+
content = token.content.strip()
|
|
31
|
+
if content.startswith('- '):
|
|
32
|
+
content = content[2:].strip()
|
|
33
|
+
elif content.startswith('-'):
|
|
34
|
+
content = content[1:].strip()
|
|
35
|
+
|
|
36
|
+
# Parse [category]
|
|
37
|
+
category = None
|
|
38
|
+
if content.startswith('['):
|
|
39
|
+
end = content.find(']')
|
|
40
|
+
if end != -1:
|
|
41
|
+
category = content[1:end].strip() or None # Convert empty to None
|
|
42
|
+
content = content[end + 1:].strip()
|
|
43
|
+
|
|
44
|
+
# Parse (context)
|
|
45
|
+
context = None
|
|
46
|
+
if content.endswith(')'):
|
|
47
|
+
start = content.rfind('(')
|
|
48
|
+
if start != -1:
|
|
49
|
+
context = content[start + 1:-1].strip()
|
|
50
|
+
content = content[:start].strip()
|
|
51
|
+
|
|
52
|
+
# Parse #tags and content
|
|
53
|
+
parts = content.split()
|
|
54
|
+
content_parts = []
|
|
55
|
+
tags = set() # Use set to avoid duplicates
|
|
56
|
+
|
|
57
|
+
for part in parts:
|
|
58
|
+
if part.startswith('#'):
|
|
59
|
+
# Handle multiple #tags stuck together
|
|
60
|
+
if '#' in part[1:]:
|
|
61
|
+
# Split on # but keep non-empty tags
|
|
62
|
+
subtags = [t for t in part.split('#') if t]
|
|
63
|
+
tags.update(subtags)
|
|
64
|
+
else:
|
|
65
|
+
tags.add(part[1:])
|
|
66
|
+
else:
|
|
67
|
+
content_parts.append(part)
|
|
68
|
+
|
|
69
|
+
return {
|
|
70
|
+
'category': category,
|
|
71
|
+
'content': ' '.join(content_parts).strip(),
|
|
72
|
+
'tags': list(tags) if tags else None,
|
|
73
|
+
'context': context
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
# Relation handling functions
|
|
78
|
+
def is_explicit_relation(token: Token) -> bool:
|
|
79
|
+
"""Check if token looks like our relation format."""
|
|
80
|
+
if token.type != 'inline':
|
|
81
|
+
return False
|
|
82
|
+
|
|
83
|
+
content = token.content.strip()
|
|
84
|
+
return '[[' in content and ']]' in content
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def parse_relation(token: Token) -> Dict[str, Any]:
|
|
88
|
+
"""Extract relation parts from token."""
|
|
89
|
+
# Remove bullet point if present
|
|
90
|
+
content = token.content.strip()
|
|
91
|
+
if content.startswith('- '):
|
|
92
|
+
content = content[2:].strip()
|
|
93
|
+
elif content.startswith('-'):
|
|
94
|
+
content = content[1:].strip()
|
|
95
|
+
|
|
96
|
+
# Extract [[target]]
|
|
97
|
+
target = None
|
|
98
|
+
rel_type = 'relates_to' # default
|
|
99
|
+
context = None
|
|
100
|
+
|
|
101
|
+
start = content.find('[[')
|
|
102
|
+
end = content.find(']]')
|
|
103
|
+
|
|
104
|
+
if start != -1 and end != -1:
|
|
105
|
+
# Get text before link as relation type
|
|
106
|
+
before = content[:start].strip()
|
|
107
|
+
if before:
|
|
108
|
+
rel_type = before
|
|
109
|
+
|
|
110
|
+
# Get target
|
|
111
|
+
target = content[start + 2:end].strip()
|
|
112
|
+
|
|
113
|
+
# Look for context after
|
|
114
|
+
after = content[end + 2:].strip()
|
|
115
|
+
if after.startswith('(') and after.endswith(')'):
|
|
116
|
+
context = after[1:-1].strip() or None
|
|
117
|
+
|
|
118
|
+
if not target:
|
|
119
|
+
return None
|
|
120
|
+
|
|
121
|
+
return {
|
|
122
|
+
'type': rel_type,
|
|
123
|
+
'target': target,
|
|
124
|
+
'context': context
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def parse_inline_relations(content: str) -> List[Dict[str, Any]]:
|
|
129
|
+
"""Find wiki-style links in regular content."""
|
|
130
|
+
relations = []
|
|
131
|
+
|
|
132
|
+
import re
|
|
133
|
+
pattern = r'\[\[([^\]]+)\]\]'
|
|
134
|
+
|
|
135
|
+
for match in re.finditer(pattern, content):
|
|
136
|
+
target = match.group(1).strip()
|
|
137
|
+
if target and not target.startswith('[['): # Avoid nested matches
|
|
138
|
+
relations.append({
|
|
139
|
+
'type': 'links to',
|
|
140
|
+
'target': target,
|
|
141
|
+
'context': None
|
|
142
|
+
})
|
|
143
|
+
|
|
144
|
+
return relations
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def observation_plugin(md: MarkdownIt) -> None:
|
|
148
|
+
"""Plugin for parsing observation format:
|
|
149
|
+
- [category] Content text #tag1 #tag2 (context)
|
|
150
|
+
- Content text #tag1 (context) # No category is also valid
|
|
151
|
+
"""
|
|
152
|
+
|
|
153
|
+
def observation_rule(state: Any) -> None:
|
|
154
|
+
"""Process observations in token stream."""
|
|
155
|
+
tokens = state.tokens
|
|
156
|
+
current_section = None
|
|
157
|
+
in_list_item = False
|
|
158
|
+
|
|
159
|
+
for idx in range(len(tokens)):
|
|
160
|
+
token = tokens[idx]
|
|
161
|
+
|
|
162
|
+
# Track current section by headings
|
|
163
|
+
if token.type == 'heading_open':
|
|
164
|
+
next_token = tokens[idx + 1] if idx + 1 < len(tokens) else None
|
|
165
|
+
if next_token and next_token.type == 'inline':
|
|
166
|
+
current_section = next_token.content.lower()
|
|
167
|
+
|
|
168
|
+
# Track list nesting
|
|
169
|
+
elif token.type == 'list_item_open':
|
|
170
|
+
in_list_item = True
|
|
171
|
+
elif token.type == 'list_item_close':
|
|
172
|
+
in_list_item = False
|
|
173
|
+
|
|
174
|
+
# Initialize meta for all tokens
|
|
175
|
+
token.meta = token.meta or {}
|
|
176
|
+
|
|
177
|
+
# Parse observations in list items
|
|
178
|
+
if token.type == 'inline' and is_observation(token):
|
|
179
|
+
obs = parse_observation(token)
|
|
180
|
+
if obs['content']: # Only store if we have content
|
|
181
|
+
token.meta['observation'] = obs
|
|
182
|
+
|
|
183
|
+
# Add the rule after inline processing
|
|
184
|
+
md.core.ruler.after('inline', 'observations', observation_rule)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def relation_plugin(md: MarkdownIt) -> None:
|
|
188
|
+
"""Plugin for parsing relation formats:
|
|
189
|
+
|
|
190
|
+
Explicit relations:
|
|
191
|
+
- relation_type [[target]] (context)
|
|
192
|
+
|
|
193
|
+
Implicit relations (links in content):
|
|
194
|
+
Some text with [[target]] reference
|
|
195
|
+
"""
|
|
196
|
+
|
|
197
|
+
def relation_rule(state: Any) -> None:
|
|
198
|
+
"""Process relations in token stream."""
|
|
199
|
+
tokens = state.tokens
|
|
200
|
+
current_section = None
|
|
201
|
+
in_list_item = False
|
|
202
|
+
|
|
203
|
+
for idx in range(len(tokens)):
|
|
204
|
+
token = tokens[idx]
|
|
205
|
+
|
|
206
|
+
# Track current section by headings
|
|
207
|
+
if token.type == 'heading_open':
|
|
208
|
+
next_token = tokens[idx + 1] if idx + 1 < len(tokens) else None
|
|
209
|
+
if next_token and next_token.type == 'inline':
|
|
210
|
+
current_section = next_token.content.lower()
|
|
211
|
+
|
|
212
|
+
# Track list nesting
|
|
213
|
+
elif token.type == 'list_item_open':
|
|
214
|
+
in_list_item = True
|
|
215
|
+
elif token.type == 'list_item_close':
|
|
216
|
+
in_list_item = False
|
|
217
|
+
|
|
218
|
+
# Initialize meta for all tokens
|
|
219
|
+
token.meta = token.meta or {}
|
|
220
|
+
|
|
221
|
+
# Only process inline tokens
|
|
222
|
+
if token.type == 'inline':
|
|
223
|
+
# Check for explicit relations in list items
|
|
224
|
+
if in_list_item and is_explicit_relation(token):
|
|
225
|
+
rel = parse_relation(token)
|
|
226
|
+
if rel:
|
|
227
|
+
token.meta['relations'] = [rel]
|
|
228
|
+
|
|
229
|
+
# Always check for inline links in any text
|
|
230
|
+
elif '[[' in token.content:
|
|
231
|
+
rels = parse_inline_relations(token.content)
|
|
232
|
+
if rels:
|
|
233
|
+
token.meta['relations'] = token.meta.get('relations', []) + rels
|
|
234
|
+
|
|
235
|
+
# Add the rule after inline processing
|
|
236
|
+
md.core.ruler.after('inline', 'relations', relation_rule)
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""Schema models for entity markdown files."""
|
|
2
|
+
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from typing import List, Optional
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Observation(BaseModel):
|
|
10
|
+
"""An observation about an entity."""
|
|
11
|
+
|
|
12
|
+
category: Optional[str] = None
|
|
13
|
+
content: str
|
|
14
|
+
tags: Optional[List[str]] = None
|
|
15
|
+
context: Optional[str] = None
|
|
16
|
+
|
|
17
|
+
def __str__(self) -> str:
|
|
18
|
+
obs_string = f"- [{self.category}] {self.content}"
|
|
19
|
+
if self.tags:
|
|
20
|
+
obs_string += " " + " ".join(f"#{tag}" for tag in sorted(self.tags))
|
|
21
|
+
if self.context:
|
|
22
|
+
obs_string += f" ({self.context})"
|
|
23
|
+
return obs_string
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Relation(BaseModel):
|
|
27
|
+
"""A relation between entities."""
|
|
28
|
+
|
|
29
|
+
type: str
|
|
30
|
+
target: str
|
|
31
|
+
context: Optional[str] = None
|
|
32
|
+
|
|
33
|
+
def __str__(self) -> str:
|
|
34
|
+
rel_string = f"- {self.type} [[{self.target}]]"
|
|
35
|
+
if self.context:
|
|
36
|
+
rel_string += f" ({self.context})"
|
|
37
|
+
return rel_string
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class EntityFrontmatter(BaseModel):
|
|
41
|
+
"""Required frontmatter fields for an entity."""
|
|
42
|
+
|
|
43
|
+
metadata: Optional[dict] = None
|
|
44
|
+
|
|
45
|
+
@property
|
|
46
|
+
def tags(self) -> List[str]:
|
|
47
|
+
return self.metadata.get("tags") if self.metadata else []
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def title(self) -> str:
|
|
51
|
+
return self.metadata.get("title") if self.metadata else None
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def type(self) -> str:
|
|
55
|
+
return self.metadata.get("type", "note") if self.metadata else "note"
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def permalink(self) -> str:
|
|
59
|
+
return self.metadata.get("permalink") if self.metadata else None
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class EntityMarkdown(BaseModel):
|
|
64
|
+
"""Complete entity combining frontmatter, content, and metadata."""
|
|
65
|
+
|
|
66
|
+
frontmatter: EntityFrontmatter
|
|
67
|
+
content: Optional[str] = None
|
|
68
|
+
observations: List[Observation] = []
|
|
69
|
+
relations: List[Relation] = []
|
|
70
|
+
|
|
71
|
+
# created, updated will have values after a read
|
|
72
|
+
created: Optional[datetime] = None
|
|
73
|
+
modified: Optional[datetime] = None
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
from frontmatter import Post
|
|
5
|
+
|
|
6
|
+
from basic_memory.markdown import EntityMarkdown, EntityFrontmatter, Observation, Relation
|
|
7
|
+
from basic_memory.markdown.entity_parser import parse
|
|
8
|
+
from basic_memory.models import Entity, ObservationCategory, Observation as ObservationModel
|
|
9
|
+
from basic_memory.utils import generate_permalink
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def entity_model_to_markdown(entity: Entity, content: Optional[str] = None) -> EntityMarkdown:
|
|
13
|
+
"""
|
|
14
|
+
Converts an entity model to its Markdown representation, including metadata,
|
|
15
|
+
observations, relations, and content. Ensures that observations and relations
|
|
16
|
+
from the provided content are synchronized with the entity model. Removes
|
|
17
|
+
duplicate or unmatched observations and relations from the content to maintain
|
|
18
|
+
consistency.
|
|
19
|
+
|
|
20
|
+
:param entity: An instance of the Entity class containing metadata, observations,
|
|
21
|
+
relations, and other properties of the entity.
|
|
22
|
+
:type entity: Entity
|
|
23
|
+
:param content: Optional raw Markdown-formatted content to be parsed for semantic
|
|
24
|
+
information like observations or relations.
|
|
25
|
+
:type content: Optional[str]
|
|
26
|
+
:return: An instance of the EntityMarkdown class containing the entity's
|
|
27
|
+
frontmatter, observations, relations, and sanitized content formatted
|
|
28
|
+
in Markdown.
|
|
29
|
+
:rtype: EntityMarkdown
|
|
30
|
+
"""
|
|
31
|
+
metadata = entity.entity_metadata or {}
|
|
32
|
+
metadata["type"] = entity.entity_type or "note"
|
|
33
|
+
metadata["title"] = entity.title
|
|
34
|
+
metadata["permalink"] = entity.permalink
|
|
35
|
+
|
|
36
|
+
# convert model to markdown
|
|
37
|
+
entity_observations = [
|
|
38
|
+
Observation(
|
|
39
|
+
category=obs.category,
|
|
40
|
+
content=obs.content,
|
|
41
|
+
tags=obs.tags if obs.tags else None,
|
|
42
|
+
context=obs.context,
|
|
43
|
+
)
|
|
44
|
+
for obs in entity.observations
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
entity_relations = [
|
|
48
|
+
Relation(
|
|
49
|
+
type=r.relation_type,
|
|
50
|
+
target=r.to_entity.title if r.to_entity else r.to_name,
|
|
51
|
+
context=r.context,
|
|
52
|
+
)
|
|
53
|
+
for r in entity.outgoing_relations
|
|
54
|
+
]
|
|
55
|
+
|
|
56
|
+
observations = entity_observations
|
|
57
|
+
relations = entity_relations
|
|
58
|
+
|
|
59
|
+
# parse the content to see if it has semantic info (observations/relations)
|
|
60
|
+
entity_content = parse(content) if content else None
|
|
61
|
+
|
|
62
|
+
if entity_content:
|
|
63
|
+
# remove if they are already in the content
|
|
64
|
+
observations = [o for o in entity_observations if o not in entity_content.observations]
|
|
65
|
+
relations = [r for r in entity_relations if r not in entity_content.relations]
|
|
66
|
+
|
|
67
|
+
# remove from the content if not present in the db entity
|
|
68
|
+
for o in entity_content.observations:
|
|
69
|
+
if o not in entity_observations:
|
|
70
|
+
content = content.replace(str(o), "")
|
|
71
|
+
|
|
72
|
+
for r in entity_content.relations:
|
|
73
|
+
if r not in entity_relations:
|
|
74
|
+
content = content.replace(str(r), "")
|
|
75
|
+
|
|
76
|
+
return EntityMarkdown(
|
|
77
|
+
frontmatter=EntityFrontmatter(metadata=metadata),
|
|
78
|
+
content=content,
|
|
79
|
+
observations=observations,
|
|
80
|
+
relations=relations,
|
|
81
|
+
created = entity.created_at,
|
|
82
|
+
modified = entity.updated_at,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def entity_model_from_markdown(file_path: Path, markdown: EntityMarkdown, entity: Optional[Entity] = None) -> Entity:
|
|
87
|
+
"""
|
|
88
|
+
Convert markdown entity to model.
|
|
89
|
+
Does not include relations.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
markdown: Parsed markdown entity
|
|
93
|
+
include_relations: Whether to include relations. Set False for first sync pass.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
# Validate/default category
|
|
97
|
+
def get_valid_category(obs):
|
|
98
|
+
if not obs.category or obs.category not in [c.value for c in ObservationCategory]:
|
|
99
|
+
return ObservationCategory.NOTE.value
|
|
100
|
+
return obs.category
|
|
101
|
+
|
|
102
|
+
permalink = markdown.frontmatter.permalink or generate_permalink(file_path)
|
|
103
|
+
model = entity or Entity()
|
|
104
|
+
|
|
105
|
+
model.title=markdown.frontmatter.title
|
|
106
|
+
model.entity_type=markdown.frontmatter.type
|
|
107
|
+
model.permalink=permalink
|
|
108
|
+
model.file_path=str(file_path)
|
|
109
|
+
model.content_type="text/markdown"
|
|
110
|
+
model.created_at=markdown.created
|
|
111
|
+
model.updated_at=markdown.modified
|
|
112
|
+
model.entity_metadata={k:str(v) for k,v in markdown.frontmatter.metadata.items()}
|
|
113
|
+
model.observations=[
|
|
114
|
+
ObservationModel(
|
|
115
|
+
content=obs.content,
|
|
116
|
+
category=get_valid_category(obs),
|
|
117
|
+
context=obs.context,
|
|
118
|
+
tags=obs.tags,
|
|
119
|
+
)
|
|
120
|
+
for obs in markdown.observations
|
|
121
|
+
]
|
|
122
|
+
|
|
123
|
+
return model
|
|
124
|
+
|
|
125
|
+
async def schema_to_markdown(schema):
|
|
126
|
+
"""
|
|
127
|
+
Convert schema to markdown.
|
|
128
|
+
:param schema: the schema to convert
|
|
129
|
+
:return: Post
|
|
130
|
+
"""
|
|
131
|
+
# Create Post object
|
|
132
|
+
content = schema.content or ""
|
|
133
|
+
frontmatter_metadata = schema.entity_metadata or {}
|
|
134
|
+
|
|
135
|
+
# remove from map so we can define ordering in frontmatter
|
|
136
|
+
if "type" in frontmatter_metadata:
|
|
137
|
+
del frontmatter_metadata["type"]
|
|
138
|
+
if "title" in frontmatter_metadata:
|
|
139
|
+
del frontmatter_metadata["title"]
|
|
140
|
+
if "permalink" in frontmatter_metadata:
|
|
141
|
+
del frontmatter_metadata["permalink"]
|
|
142
|
+
|
|
143
|
+
post = Post(content, title=schema.title, type=schema.entity_type, permalink=schema.permalink, **frontmatter_metadata)
|
|
144
|
+
return post
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""MCP server for basic-memory."""
|
basic_memory/mcp/main.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Main MCP entrypoint for Basic Memory.
|
|
2
|
+
|
|
3
|
+
Creates and configures the shared MCP instance and handles server startup.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from loguru import logger
|
|
7
|
+
|
|
8
|
+
from basic_memory.config import config
|
|
9
|
+
|
|
10
|
+
# Import shared mcp instance
|
|
11
|
+
from basic_memory.mcp.server import mcp
|
|
12
|
+
|
|
13
|
+
# Import tools to register them
|
|
14
|
+
import basic_memory.mcp.tools # noqa: F401
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
if __name__ == "__main__":
|
|
18
|
+
home_dir = config.home
|
|
19
|
+
logger.info("Starting Basic Memory MCP server")
|
|
20
|
+
logger.info(f"Home directory: {home_dir}")
|
|
21
|
+
mcp.run()
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""Enhanced FastMCP server instance for Basic Memory."""
|
|
2
|
+
import sys
|
|
3
|
+
|
|
4
|
+
from loguru import logger
|
|
5
|
+
from mcp.server.fastmcp import FastMCP
|
|
6
|
+
from mcp.server.fastmcp.utilities.logging import configure_logging
|
|
7
|
+
|
|
8
|
+
from basic_memory.config import config
|
|
9
|
+
|
|
10
|
+
# mcp console logging
|
|
11
|
+
configure_logging(level="INFO")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def setup_logging(home_dir: str = config.home, log_file: str = ".basic-memory/basic-memory.log"):
|
|
15
|
+
"""Configure file logging to the basic-memory home directory."""
|
|
16
|
+
log = f"{home_dir}/{log_file}"
|
|
17
|
+
|
|
18
|
+
# Add file handler with rotation
|
|
19
|
+
logger.add(
|
|
20
|
+
log,
|
|
21
|
+
rotation="100 MB",
|
|
22
|
+
retention="10 days",
|
|
23
|
+
backtrace=True,
|
|
24
|
+
diagnose=True,
|
|
25
|
+
enqueue=True,
|
|
26
|
+
colorize=False,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
# Add stderr handler
|
|
30
|
+
logger.add(
|
|
31
|
+
sys.stderr,
|
|
32
|
+
colorize=True,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# start our out file logging
|
|
36
|
+
setup_logging()
|
|
37
|
+
|
|
38
|
+
# Create the shared server instance
|
|
39
|
+
mcp = FastMCP("Basic Memory")
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""MCP tools for Basic Memory.
|
|
2
|
+
|
|
3
|
+
This package provides the complete set of tools for interacting with
|
|
4
|
+
Basic Memory through the MCP protocol. Importing this module registers
|
|
5
|
+
all tools with the MCP server.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
# Import tools to register them with MCP
|
|
9
|
+
from basic_memory.mcp.tools.memory import build_context, recent_activity
|
|
10
|
+
#from basic_memory.mcp.tools.ai_edit import ai_edit
|
|
11
|
+
from basic_memory.mcp.tools.notes import read_note, write_note
|
|
12
|
+
|
|
13
|
+
from basic_memory.mcp.tools.knowledge import (
|
|
14
|
+
delete_entities,
|
|
15
|
+
get_entity,
|
|
16
|
+
get_entities,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
# Knowledge graph tools
|
|
21
|
+
"delete_entities",
|
|
22
|
+
"get_entity",
|
|
23
|
+
"get_entities",
|
|
24
|
+
# Search tools
|
|
25
|
+
"search",
|
|
26
|
+
# memory tools
|
|
27
|
+
"build_context",
|
|
28
|
+
"recent_activity",
|
|
29
|
+
#notes
|
|
30
|
+
"read_note",
|
|
31
|
+
"write_note",
|
|
32
|
+
# file edit
|
|
33
|
+
#"ai_edit",
|
|
34
|
+
]
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""Tool for AI-assisted file editing."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import List, Dict, Any
|
|
5
|
+
|
|
6
|
+
from basic_memory.mcp.server import mcp
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _detect_indent(text: str, match_pos: int) -> int:
|
|
10
|
+
"""Get indentation level at a position in text."""
|
|
11
|
+
# Find start of line containing the match
|
|
12
|
+
line_start = text.rfind("\n", 0, match_pos)
|
|
13
|
+
if line_start < 0:
|
|
14
|
+
line_start = 0
|
|
15
|
+
else:
|
|
16
|
+
line_start += 1 # Skip newline char
|
|
17
|
+
|
|
18
|
+
# Count leading spaces
|
|
19
|
+
pos = line_start
|
|
20
|
+
while pos < len(text) and text[pos].isspace():
|
|
21
|
+
pos += 1
|
|
22
|
+
return pos - line_start
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _apply_indent(text: str, spaces: int) -> str:
|
|
26
|
+
"""Apply indentation to text."""
|
|
27
|
+
prefix = " " * spaces
|
|
28
|
+
return "\n".join(prefix + line if line.strip() else line for line in text.split("\n"))
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@mcp.tool()
|
|
32
|
+
async def ai_edit(path: str, edits: List[Dict[str, Any]]) -> bool:
|
|
33
|
+
"""AI-assisted file editing tool.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
path: Path to file to edit
|
|
37
|
+
edits: List of edits to apply. Each edit is a dict with:
|
|
38
|
+
oldText: Text to replace
|
|
39
|
+
newText: New content
|
|
40
|
+
options: Optional dict with:
|
|
41
|
+
indent: Number of spaces to indent
|
|
42
|
+
preserveIndentation: Keep existing indent (default: true)
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
bool: True if edits were applied successfully
|
|
46
|
+
"""
|
|
47
|
+
try:
|
|
48
|
+
# Read file
|
|
49
|
+
content = Path(path).read_text()
|
|
50
|
+
original = content
|
|
51
|
+
success = True
|
|
52
|
+
|
|
53
|
+
# Apply each edit
|
|
54
|
+
for edit in edits:
|
|
55
|
+
old_text = edit["oldText"]
|
|
56
|
+
new_text = edit["newText"]
|
|
57
|
+
options = edit.get("options", {})
|
|
58
|
+
|
|
59
|
+
# Find text to replace
|
|
60
|
+
match_pos = content.find(old_text)
|
|
61
|
+
if match_pos < 0:
|
|
62
|
+
success = False
|
|
63
|
+
continue
|
|
64
|
+
|
|
65
|
+
# Handle indentation
|
|
66
|
+
if not options.get("preserveIndentation", True):
|
|
67
|
+
# Use existing indentation
|
|
68
|
+
indent = _detect_indent(content, match_pos)
|
|
69
|
+
new_text = _apply_indent(new_text, indent)
|
|
70
|
+
elif "indent" in options:
|
|
71
|
+
# Use specified indentation
|
|
72
|
+
new_text = _apply_indent(new_text, options["indent"])
|
|
73
|
+
|
|
74
|
+
# Apply the edit
|
|
75
|
+
content = content.replace(old_text, new_text)
|
|
76
|
+
|
|
77
|
+
# Write back if changed
|
|
78
|
+
if content != original:
|
|
79
|
+
Path(path).write_text(content)
|
|
80
|
+
return success
|
|
81
|
+
|
|
82
|
+
except Exception as e:
|
|
83
|
+
print(f"Error applying edits: {e}")
|
|
84
|
+
return False
|