basic-memory 0.11.0__py3-none-any.whl → 0.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

@@ -1,17 +1,27 @@
1
1
  """Search tools for Basic Memory MCP server."""
2
2
 
3
+ from typing import List, Optional
4
+
3
5
  from loguru import logger
4
6
 
7
+ from basic_memory.mcp.async_client import client
5
8
  from basic_memory.mcp.server import mcp
6
9
  from basic_memory.mcp.tools.utils import call_post
7
- from basic_memory.schemas.search import SearchQuery, SearchResponse
8
- from basic_memory.mcp.async_client import client
10
+ from basic_memory.schemas.search import SearchItemType, SearchQuery, SearchResponse
9
11
 
10
12
 
11
13
  @mcp.tool(
12
14
  description="Search across all content in the knowledge base.",
13
15
  )
14
- async def search_notes(query: SearchQuery, page: int = 1, page_size: int = 10) -> SearchResponse:
16
+ async def search_notes(
17
+ query: str,
18
+ page: int = 1,
19
+ page_size: int = 10,
20
+ search_type: str = "text",
21
+ types: Optional[List[str]] = None,
22
+ entity_types: Optional[List[str]] = None,
23
+ after_date: Optional[str] = None,
24
+ ) -> SearchResponse:
15
25
  """Search across all content in the knowledge base.
16
26
 
17
27
  This tool searches the knowledge base using full-text search, pattern matching,
@@ -19,59 +29,85 @@ async def search_notes(query: SearchQuery, page: int = 1, page_size: int = 10) -
19
29
  and date.
20
30
 
21
31
  Args:
22
- query: SearchQuery object with search parameters including:
23
- - text: Full-text search (e.g., "project planning")
24
- Supports boolean operators: AND, OR, NOT and parentheses for grouping
25
- - title: Search only in titles (e.g., "Meeting notes")
26
- - permalink: Exact permalink match (e.g., "docs/meeting-notes")
27
- - permalink_match: Pattern matching for permalinks (e.g., "docs/*-notes")
28
- - types: Optional list of content types to search (e.g., ["entity", "observation"])
29
- - entity_types: Optional list of entity types to filter by (e.g., ["note", "person"])
30
- - after_date: Optional date filter for recent content (e.g., "1 week", "2d")
32
+ query: The search query string
31
33
  page: The page number of results to return (default 1)
32
34
  page_size: The number of results to return per page (default 10)
35
+ search_type: Type of search to perform, one of: "text", "title", "permalink" (default: "text")
36
+ types: Optional list of note types to search (e.g., ["note", "person"])
37
+ entity_types: Optional list of entity types to filter by (e.g., ["entity", "observation"])
38
+ after_date: Optional date filter for recent content (e.g., "1 week", "2d")
33
39
 
34
40
  Returns:
35
41
  SearchResponse with results and pagination info
36
42
 
37
43
  Examples:
38
44
  # Basic text search
39
- results = await search_notes(SearchQuery(text="project planning"))
45
+ results = await search_notes("project planning")
40
46
 
41
47
  # Boolean AND search (both terms must be present)
42
- results = await search_notes(SearchQuery(text="project AND planning"))
48
+ results = await search_notes("project AND planning")
43
49
 
44
50
  # Boolean OR search (either term can be present)
45
- results = await search_notes(SearchQuery(text="project OR meeting"))
51
+ results = await search_notes("project OR meeting")
46
52
 
47
53
  # Boolean NOT search (exclude terms)
48
- results = await search_notes(SearchQuery(text="project NOT meeting"))
54
+ results = await search_notes("project NOT meeting")
49
55
 
50
56
  # Boolean search with grouping
51
- results = await search_notes(SearchQuery(text="(project OR planning) AND notes"))
57
+ results = await search_notes("(project OR planning) AND notes")
52
58
 
53
59
  # Search with type filter
54
- results = await search_notes(SearchQuery(
55
- text="meeting notes",
60
+ results = await search_notes(
61
+ query="meeting notes",
62
+ types=["entity"],
63
+ )
64
+
65
+ # Search with entity type filter, e.g., note vs
66
+ results = await search_notes(
67
+ query="meeting notes",
56
68
  types=["entity"],
57
- ))
69
+ )
58
70
 
59
71
  # Search for recent content
60
- results = await search_notes(SearchQuery(
61
- text="bug report",
72
+ results = await search_notes(
73
+ query="bug report",
62
74
  after_date="1 week"
63
- ))
75
+ )
64
76
 
65
77
  # Pattern matching on permalinks
66
- results = await search_notes(SearchQuery(
67
- permalink_match="docs/meeting-*"
68
- ))
78
+ results = await search_notes(
79
+ query="docs/meeting-*",
80
+ search_type="permalink"
81
+ )
69
82
  """
70
- logger.info(f"Searching for {query}")
83
+ # Create a SearchQuery object based on the parameters
84
+ search_query = SearchQuery()
85
+
86
+ # Set the appropriate search field based on search_type
87
+ if search_type == "text":
88
+ search_query.text = query
89
+ elif search_type == "title":
90
+ search_query.title = query
91
+ elif search_type == "permalink" and "*" in query:
92
+ search_query.permalink_match = query
93
+ elif search_type == "permalink":
94
+ search_query.permalink = query
95
+ else:
96
+ search_query.text = query # Default to text search
97
+
98
+ # Add optional filters if provided
99
+ if entity_types:
100
+ search_query.entity_types = [SearchItemType(t) for t in entity_types]
101
+ if types:
102
+ search_query.types = types
103
+ if after_date:
104
+ search_query.after_date = after_date
105
+
106
+ logger.info(f"Searching for {search_query}")
71
107
  response = await call_post(
72
108
  client,
73
109
  "/search/",
74
- json=query.model_dump(),
110
+ json=search_query.model_dump(),
75
111
  params={"page": page, "page_size": page_size},
76
112
  )
77
113
  return SearchResponse.model_validate(response.json())
@@ -88,8 +88,10 @@ async def write_note(
88
88
  # Format semantic summary based on status code
89
89
  action = "Created" if response.status_code == 201 else "Updated"
90
90
  summary = [
91
- f"# {action} {result.file_path} ({result.checksum[:8] if result.checksum else 'unknown'})",
91
+ f"# {action} note",
92
+ f"file_path: {result.file_path}",
92
93
  f"permalink: {result.permalink}",
94
+ f"checksum: {result.checksum[:8] if result.checksum else 'unknown'}",
93
95
  ]
94
96
 
95
97
  # Count observations by category
@@ -4,10 +4,10 @@ import json
4
4
  import time
5
5
  from dataclasses import dataclass
6
6
  from datetime import datetime
7
- from typing import List, Optional, Any, Dict
7
+ from typing import Any, Dict, List, Optional
8
8
 
9
9
  from loguru import logger
10
- from sqlalchemy import text, Executable, Result
10
+ from sqlalchemy import Executable, Result, text
11
11
  from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
12
12
 
13
13
  from basic_memory import db
@@ -123,9 +123,9 @@ class SearchRepository:
123
123
  permalink: Optional[str] = None,
124
124
  permalink_match: Optional[str] = None,
125
125
  title: Optional[str] = None,
126
- types: Optional[List[SearchItemType]] = None,
126
+ types: Optional[List[str]] = None,
127
127
  after_date: Optional[datetime] = None,
128
- entity_types: Optional[List[str]] = None,
128
+ entity_types: Optional[List[SearchItemType]] = None,
129
129
  limit: int = 10,
130
130
  offset: int = 0,
131
131
  ) -> List[SearchIndexRow]:
@@ -174,15 +174,15 @@ class SearchRepository:
174
174
  else:
175
175
  conditions.append("permalink MATCH :permalink")
176
176
 
177
- # Handle type filter
178
- if types:
179
- type_list = ", ".join(f"'{t.value}'" for t in types)
180
- conditions.append(f"type IN ({type_list})")
181
-
182
177
  # Handle entity type filter
183
178
  if entity_types:
184
- entity_type_list = ", ".join(f"'{t}'" for t in entity_types)
185
- conditions.append(f"json_extract(metadata, '$.entity_type') IN ({entity_type_list})")
179
+ type_list = ", ".join(f"'{t.value}'" for t in entity_types)
180
+ conditions.append(f"type IN ({type_list})")
181
+
182
+ # Handle type filter
183
+ if types:
184
+ type_list = ", ".join(f"'{t}'" for t in types)
185
+ conditions.append(f"json_extract(metadata, '$.entity_type') IN ({type_list})")
186
186
 
187
187
  # Handle date filter using datetime() for proper comparison
188
188
  if after_date:
@@ -49,8 +49,8 @@ class SearchQuery(BaseModel):
49
49
  title: Optional[str] = None # title only search
50
50
 
51
51
  # Optional filters
52
- types: Optional[List[SearchItemType]] = None # Filter by item type
53
- entity_types: Optional[List[str]] = None # Filter by entity type
52
+ types: Optional[List[str]] = None # Filter by type
53
+ entity_types: Optional[List[SearchItemType]] = None # Filter by entity type
54
54
  after_date: Optional[Union[datetime, str]] = None # Time-based filter
55
55
 
56
56
  @field_validator("after_date")
@@ -81,7 +81,7 @@ class ContextService:
81
81
  else:
82
82
  logger.debug(f"Build context for '{types}'")
83
83
  primary = await self.search_repository.search(
84
- types=types, after_date=since, limit=limit, offset=offset
84
+ entity_types=types, after_date=since, limit=limit, offset=offset
85
85
  )
86
86
 
87
87
  # Get type_id pairs for traversal
@@ -1,24 +1,24 @@
1
1
  """Service for managing entities in the database."""
2
2
 
3
3
  from pathlib import Path
4
- from typing import Sequence, List, Optional, Tuple, Union
4
+ from typing import List, Optional, Sequence, Tuple, Union
5
5
 
6
6
  import frontmatter
7
7
  from loguru import logger
8
8
  from sqlalchemy.exc import IntegrityError
9
9
 
10
10
  from basic_memory.markdown import EntityMarkdown
11
+ from basic_memory.markdown.entity_parser import EntityParser
11
12
  from basic_memory.markdown.utils import entity_model_from_markdown, schema_to_markdown
12
- from basic_memory.models import Entity as EntityModel, Observation, Relation
13
+ from basic_memory.models import Entity as EntityModel
14
+ from basic_memory.models import Observation, Relation
13
15
  from basic_memory.repository import ObservationRepository, RelationRepository
14
16
  from basic_memory.repository.entity_repository import EntityRepository
15
17
  from basic_memory.schemas import Entity as EntitySchema
16
18
  from basic_memory.schemas.base import Permalink
17
- from basic_memory.services.exceptions import EntityNotFoundError, EntityCreationError
18
- from basic_memory.services import FileService
19
- from basic_memory.services import BaseService
19
+ from basic_memory.services import BaseService, FileService
20
+ from basic_memory.services.exceptions import EntityCreationError, EntityNotFoundError
20
21
  from basic_memory.services.link_resolver import LinkResolver
21
- from basic_memory.markdown.entity_parser import EntityParser
22
22
  from basic_memory.utils import generate_permalink
23
23
 
24
24
 
@@ -89,7 +89,7 @@ class EntityService(BaseService[EntityModel]):
89
89
  logger.debug(f"Creating or updating entity: {schema}")
90
90
 
91
91
  # Try to find existing entity using smart resolution
92
- existing = await self.link_resolver.resolve_link(schema.permalink)
92
+ existing = await self.link_resolver.resolve_link(schema.permalink or schema.file_path)
93
93
 
94
94
  if existing:
95
95
  logger.debug(f"Found existing entity: {existing.permalink}")
@@ -100,7 +100,7 @@ class EntityService(BaseService[EntityModel]):
100
100
 
101
101
  async def create_entity(self, schema: EntitySchema) -> EntityModel:
102
102
  """Create a new entity and write to filesystem."""
103
- logger.debug(f"Creating entity: {schema.permalink}")
103
+ logger.debug(f"Creating entity: {schema.title}")
104
104
 
105
105
  # Get file path and ensure it's a Path object
106
106
  file_path = Path(schema.file_path)
@@ -230,7 +230,7 @@ class EntityService(BaseService[EntityModel]):
230
230
  Creates the entity with null checksum to indicate sync not complete.
231
231
  Relations will be added in second pass.
232
232
  """
233
- logger.debug(f"Creating entity: {markdown.frontmatter.title}")
233
+ logger.debug(f"Creating entity: {markdown.frontmatter.title} file_path: {file_path}")
234
234
  model = entity_model_from_markdown(file_path, markdown)
235
235
 
236
236
  # Mark as incomplete because we still need to add relations
@@ -315,7 +315,7 @@ class EntityService(BaseService[EntityModel]):
315
315
  except IntegrityError:
316
316
  # Unique constraint violation - relation already exists
317
317
  logger.debug(
318
- f"Skipping duplicate relation {rel.type} from {db_entity.permalink} target: {rel.target}, type: {rel.type}"
318
+ f"Skipping duplicate relation {rel.type} from {db_entity.permalink} target: {rel.target}"
319
319
  )
320
320
  continue
321
321
 
@@ -46,10 +46,17 @@ class LinkResolver:
46
46
  logger.debug(f"Found title match: {entity.title}")
47
47
  return entity
48
48
 
49
+ # 3. Try file path
50
+ found_path = await self.entity_repository.get_by_file_path(clean_text)
51
+ if found_path:
52
+ logger.debug(f"Found entity with path: {found_path.file_path}")
53
+ return found_path
54
+
55
+ # search if indicated
49
56
  if use_search and "*" not in clean_text:
50
57
  # 3. Fall back to search for fuzzy matching on title
51
58
  results = await self.search_service.search(
52
- query=SearchQuery(title=clean_text, types=[SearchItemType.ENTITY]),
59
+ query=SearchQuery(title=clean_text, entity_types=[SearchItemType.ENTITY]),
53
60
  )
54
61
 
55
62
  if results:
@@ -181,17 +181,6 @@ class SearchService:
181
181
  Each type gets its own row in the search index with appropriate metadata.
182
182
  """
183
183
 
184
- if entity.permalink is None: # pragma: no cover
185
- logger.error(
186
- "Missing permalink for markdown entity",
187
- entity_id=entity.id,
188
- title=entity.title,
189
- file_path=entity.file_path,
190
- )
191
- raise ValueError(
192
- f"Entity permalink should not be None for markdown entity: {entity.id} ({entity.title})"
193
- )
194
-
195
184
  content_stems = []
196
185
  content_snippet = ""
197
186
  title_variants = self._generate_variants(entity.title)
@@ -202,22 +191,13 @@ class SearchService:
202
191
  content_stems.append(content)
203
192
  content_snippet = f"{content[:250]}"
204
193
 
205
- content_stems.extend(self._generate_variants(entity.permalink))
194
+ if entity.permalink:
195
+ content_stems.extend(self._generate_variants(entity.permalink))
196
+
206
197
  content_stems.extend(self._generate_variants(entity.file_path))
207
198
 
208
199
  entity_content_stems = "\n".join(p for p in content_stems if p and p.strip())
209
200
 
210
- if entity.permalink is None: # pragma: no cover
211
- logger.error(
212
- "Missing permalink for markdown entity",
213
- entity_id=entity.id,
214
- title=entity.title,
215
- file_path=entity.file_path,
216
- )
217
- raise ValueError(
218
- f"Entity permalink should not be None for markdown entity: {entity.id} ({entity.title})"
219
- )
220
-
221
201
  # Index entity
222
202
  await self.repository.index_item(
223
203
  SearchIndexRow(
@@ -11,6 +11,8 @@ from typing import Dict, Optional, Set, Tuple
11
11
  from loguru import logger
12
12
  from sqlalchemy.exc import IntegrityError
13
13
 
14
+ from basic_memory.config import ProjectConfig
15
+ from basic_memory.file_utils import has_frontmatter
14
16
  from basic_memory.markdown import EntityParser
15
17
  from basic_memory.models import Entity
16
18
  from basic_memory.repository import EntityRepository, RelationRepository
@@ -65,6 +67,7 @@ class SyncService:
65
67
 
66
68
  def __init__(
67
69
  self,
70
+ config: ProjectConfig,
68
71
  entity_service: EntityService,
69
72
  entity_parser: EntityParser,
70
73
  entity_repository: EntityRepository,
@@ -72,6 +75,7 @@ class SyncService:
72
75
  search_service: SearchService,
73
76
  file_service: FileService,
74
77
  ):
78
+ self.config = config
75
79
  self.entity_service = entity_service
76
80
  self.entity_parser = entity_parser
77
81
  self.entity_repository = entity_repository
@@ -327,43 +331,52 @@ class SyncService:
327
331
  """
328
332
  # Parse markdown first to get any existing permalink
329
333
  logger.debug("Parsing markdown file", path=path)
330
- entity_markdown = await self.entity_parser.parse_file(path)
331
334
 
332
- # Resolve permalink - this handles all the cases including conflicts
333
- permalink = await self.entity_service.resolve_permalink(path, markdown=entity_markdown)
335
+ file_path = self.entity_parser.base_path / path
336
+ file_content = file_path.read_text()
337
+ file_contains_frontmatter = has_frontmatter(file_content)
334
338
 
335
- # If permalink changed, update the file
336
- if permalink != entity_markdown.frontmatter.permalink:
337
- logger.info(
338
- "Updating permalink",
339
- path=path,
340
- old_permalink=entity_markdown.frontmatter.permalink,
341
- new_permalink=permalink,
342
- )
339
+ # entity markdown will always contain front matter, so it can be used up create/update the entity
340
+ entity_markdown = await self.entity_parser.parse_file(path)
343
341
 
344
- entity_markdown.frontmatter.metadata["permalink"] = permalink
345
- checksum = await self.file_service.update_frontmatter(path, {"permalink": permalink})
346
- else:
347
- checksum = await self.file_service.compute_checksum(path)
342
+ # if the file contains frontmatter, resolve a permalink
343
+ if file_contains_frontmatter:
344
+ # Resolve permalink - this handles all the cases including conflicts
345
+ permalink = await self.entity_service.resolve_permalink(path, markdown=entity_markdown)
346
+
347
+ # If permalink changed, update the file
348
+ if permalink != entity_markdown.frontmatter.permalink:
349
+ logger.info(
350
+ "Updating permalink",
351
+ path=path,
352
+ old_permalink=entity_markdown.frontmatter.permalink,
353
+ new_permalink=permalink,
354
+ )
355
+
356
+ entity_markdown.frontmatter.metadata["permalink"] = permalink
357
+ await self.file_service.update_frontmatter(path, {"permalink": permalink})
348
358
 
349
359
  # if the file is new, create an entity
350
360
  if new:
351
361
  # Create entity with final permalink
352
- logger.debug("Creating new entity from markdown", path=path, permalink=permalink)
353
-
362
+ logger.debug("Creating new entity from markdown", path=path)
354
363
  await self.entity_service.create_entity_from_markdown(Path(path), entity_markdown)
355
364
 
356
365
  # otherwise we need to update the entity and observations
357
366
  else:
358
- logger.debug("Updating entity from markdown", path=path, permalink=permalink)
359
-
367
+ logger.debug("Updating entity from markdown", path=path)
360
368
  await self.entity_service.update_entity_and_observations(Path(path), entity_markdown)
361
369
 
362
370
  # Update relations and search index
363
371
  entity = await self.entity_service.update_entity_relations(path, entity_markdown)
364
372
 
373
+ # After updating relations, we need to compute the checksum again
374
+ # This is necessary for files with wikilinks to ensure consistent checksums
375
+ # after relation processing is complete
376
+ final_checksum = await self.file_service.compute_checksum(path)
377
+
365
378
  # set checksum
366
- await self.entity_repository.update(entity.id, {"checksum": checksum})
379
+ await self.entity_repository.update(entity.id, {"checksum": final_checksum})
367
380
 
368
381
  logger.debug(
369
382
  "Markdown sync completed",
@@ -371,9 +384,11 @@ class SyncService:
371
384
  entity_id=entity.id,
372
385
  observation_count=len(entity.observations),
373
386
  relation_count=len(entity.relations),
387
+ checksum=final_checksum,
374
388
  )
375
389
 
376
- return entity, checksum
390
+ # Return the final checksum to ensure everything is consistent
391
+ return entity, final_checksum
377
392
 
378
393
  async def sync_regular_file(self, path: str, new: bool = True) -> Tuple[Optional[Entity], str]:
379
394
  """Sync a non-markdown file with basic tracking.
@@ -468,8 +483,30 @@ class SyncService:
468
483
 
469
484
  entity = await self.entity_repository.get_by_file_path(old_path)
470
485
  if entity:
471
- # Update file_path but keep the same permalink for link stability
472
- updated = await self.entity_repository.update(entity.id, {"file_path": new_path})
486
+ # Update file_path in all cases
487
+ updates = {"file_path": new_path}
488
+
489
+ # If configured, also update permalink to match new path
490
+ if self.config.update_permalinks_on_move:
491
+ # generate new permalink value
492
+ new_permalink = await self.entity_service.resolve_permalink(new_path)
493
+
494
+ # write to file and get new checksum
495
+ new_checksum = await self.file_service.update_frontmatter(
496
+ new_path, {"permalink": new_permalink}
497
+ )
498
+
499
+ updates["permalink"] = new_permalink
500
+ updates["checksum"] = new_checksum
501
+
502
+ logger.info(
503
+ "Updating permalink on move",
504
+ old_permalink=entity.permalink,
505
+ new_permalink=new_permalink,
506
+ new_checksum=new_checksum,
507
+ )
508
+
509
+ updated = await self.entity_repository.update(entity.id, updates)
473
510
 
474
511
  if updated is None: # pragma: no cover
475
512
  logger.error(
basic_memory/utils.py CHANGED
@@ -138,15 +138,23 @@ def parse_tags(tags: Union[List[str], str, None]) -> List[str]:
138
138
 
139
139
  Returns:
140
140
  A list of tag strings, or an empty list if no tags
141
+
142
+ Note:
143
+ This function strips leading '#' characters from tags to prevent
144
+ their accumulation when tags are processed multiple times.
141
145
  """
142
146
  if tags is None:
143
147
  return []
144
148
 
149
+ # Process list of tags
145
150
  if isinstance(tags, list):
146
- return tags
151
+ # First strip whitespace, then strip leading '#' characters to prevent accumulation
152
+ return [tag.strip().lstrip("#") for tag in tags if tag and tag.strip()]
147
153
 
154
+ # Process comma-separated string of tags
148
155
  if isinstance(tags, str):
149
- return [tag.strip() for tag in tags.split(",") if tag.strip()]
156
+ # Split by comma, strip whitespace, then strip leading '#' characters
157
+ return [tag.strip().lstrip("#") for tag in tags.split(",") if tag and tag.strip()]
150
158
 
151
159
  # For any other type, try to convert to string and parse
152
160
  try: # pragma: no cover
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: basic-memory
3
- Version: 0.11.0
3
+ Version: 0.12.0
4
4
  Summary: Local-first knowledge management combining Zettelkasten with knowledge graphs
5
5
  Project-URL: Homepage, https://github.com/basicmachines-co/basic-memory
6
6
  Project-URL: Repository, https://github.com/basicmachines-co/basic-memory
@@ -47,8 +47,8 @@ Basic Memory lets you build persistent knowledge through natural conversations w
47
47
  Claude, while keeping everything in simple Markdown files on your computer. It uses the Model Context Protocol (MCP) to
48
48
  enable any compatible LLM to read and write to your local knowledge base.
49
49
 
50
- - Website: http://basicmachines.co
51
- - Documentation: http://memory.basicmachines.co
50
+ - Website: https://basicmachines.co
51
+ - Documentation: https://memory.basicmachines.co
52
52
 
53
53
  ## Pick up your conversation right where you left off
54
54
 
@@ -296,6 +296,43 @@ Examples of relations:
296
296
  - documented_in [[Coffee Journal]]
297
297
  ```
298
298
 
299
+ ## Using with VS Code
300
+ For one-click installation, click one of the install buttons below...
301
+
302
+ [![Install with UV in VS Code](https://img.shields.io/badge/VS_Code-UV-0098FF?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=basic-memory&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22basic-memory%22%2C%22mcp%22%5D%7D) [![Install with UV in VS Code Insiders](https://img.shields.io/badge/VS_Code_Insiders-UV-24bfa5?style=flat-square&logo=visualstudiocode&logoColor=white)](https://insiders.vscode.dev/redirect/mcp/install?name=basic-memory&config=%7B%22command%22%3A%22uvx%22%2C%22args%22%3A%5B%22basic-memory%22%2C%22mcp%22%5D%7D&quality=insiders)
303
+
304
+ You can use Basic Memory with VS Code to easily retrieve and store information while coding. Click the installation buttons above for one-click setup, or follow the manual installation instructions below.
305
+
306
+ ### Manual Installation
307
+
308
+ Add the following JSON block to your User Settings (JSON) file in VS Code. You can do this by pressing `Ctrl + Shift + P` and typing `Preferences: Open User Settings (JSON)`.
309
+
310
+ ```json
311
+ {
312
+ "mcp": {
313
+ "servers": {
314
+ "basic-memory": {
315
+ "command": "uvx",
316
+ "args": ["basic-memory", "mcp"]
317
+ }
318
+ }
319
+ }
320
+ }
321
+ ```
322
+
323
+ Optionally, you can add it to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.
324
+
325
+ ```json
326
+ {
327
+ "servers": {
328
+ "basic-memory": {
329
+ "command": "uvx",
330
+ "args": ["basic-memory", "mcp"]
331
+ }
332
+ }
333
+ }
334
+ ```
335
+
299
336
  ## Using with Claude Desktop
300
337
 
301
338
  Basic Memory is built using the MCP (Model Context Protocol) and works with the Claude desktop app (https://claude.ai/):