basic-memory 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (35) hide show
  1. basic_memory/__init__.py +1 -1
  2. basic_memory/api/routers/knowledge_router.py +0 -8
  3. basic_memory/api/routers/memory_router.py +26 -10
  4. basic_memory/api/routers/resource_router.py +14 -8
  5. basic_memory/api/routers/search_router.py +17 -9
  6. basic_memory/cli/app.py +1 -1
  7. basic_memory/cli/commands/db.py +11 -8
  8. basic_memory/cli/commands/import_chatgpt.py +31 -27
  9. basic_memory/cli/commands/import_claude_conversations.py +29 -27
  10. basic_memory/cli/commands/import_claude_projects.py +30 -29
  11. basic_memory/cli/commands/import_memory_json.py +28 -26
  12. basic_memory/cli/commands/status.py +8 -6
  13. basic_memory/cli/commands/sync.py +6 -3
  14. basic_memory/cli/commands/tools.py +157 -0
  15. basic_memory/cli/main.py +1 -0
  16. basic_memory/config.py +1 -1
  17. basic_memory/db.py +1 -0
  18. basic_memory/deps.py +5 -1
  19. basic_memory/mcp/tools/knowledge.py +26 -14
  20. basic_memory/mcp/tools/memory.py +48 -29
  21. basic_memory/mcp/tools/notes.py +66 -72
  22. basic_memory/mcp/tools/search.py +13 -4
  23. basic_memory/repository/search_repository.py +3 -0
  24. basic_memory/schemas/memory.py +3 -0
  25. basic_memory/schemas/request.py +1 -1
  26. basic_memory/schemas/search.py +2 -0
  27. basic_memory/services/context_service.py +14 -6
  28. basic_memory/services/search_service.py +3 -1
  29. basic_memory/sync/sync_service.py +98 -89
  30. basic_memory/utils.py +4 -7
  31. {basic_memory-0.6.0.dist-info → basic_memory-0.7.0.dist-info}/METADATA +2 -2
  32. {basic_memory-0.6.0.dist-info → basic_memory-0.7.0.dist-info}/RECORD +35 -34
  33. {basic_memory-0.6.0.dist-info → basic_memory-0.7.0.dist-info}/WHEEL +0 -0
  34. {basic_memory-0.6.0.dist-info → basic_memory-0.7.0.dist-info}/entry_points.txt +0 -0
  35. {basic_memory-0.6.0.dist-info → basic_memory-0.7.0.dist-info}/licenses/LICENSE +0 -0
basic_memory/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """basic-memory - Local-first knowledge management combining Zettelkasten with knowledge graphs"""
2
2
 
3
- __version__ = "0.6.0"
3
+ __version__ = "0.7.0"
@@ -94,11 +94,8 @@ async def get_entity(
94
94
  try:
95
95
  entity = await entity_service.get_by_permalink(permalink)
96
96
  result = EntityResponse.model_validate(entity)
97
-
98
- logger.info(f"response: get_entity with result={result}")
99
97
  return result
100
98
  except EntityNotFoundError:
101
- logger.error(f"Error: Entity with {permalink} not found")
102
99
  raise HTTPException(status_code=404, detail=f"Entity with {permalink} not found")
103
100
 
104
101
 
@@ -114,8 +111,6 @@ async def get_entities(
114
111
  result = EntityListResponse(
115
112
  entities=[EntityResponse.model_validate(entity) for entity in entities]
116
113
  )
117
-
118
- logger.info(f"response: get_entities with result={result}")
119
114
  return result
120
115
 
121
116
 
@@ -135,7 +130,6 @@ async def delete_entity(
135
130
 
136
131
  entity = await link_resolver.resolve_link(identifier)
137
132
  if entity is None:
138
- logger.info("response: delete_entity with result=DeleteEntitiesResponse(deleted=False)")
139
133
  return DeleteEntitiesResponse(deleted=False)
140
134
 
141
135
  # Delete the entity
@@ -145,7 +139,6 @@ async def delete_entity(
145
139
  background_tasks.add_task(search_service.delete_by_permalink, entity.permalink)
146
140
 
147
141
  result = DeleteEntitiesResponse(deleted=deleted)
148
- logger.info(f"response: delete_entity with result={result}")
149
142
  return result
150
143
 
151
144
 
@@ -166,5 +159,4 @@ async def delete_entities(
166
159
  background_tasks.add_task(search_service.delete_by_permalink, permalink)
167
160
 
168
161
  result = DeleteEntitiesResponse(deleted=deleted)
169
- logger.info(f"response: delete_entities with result={result}")
170
162
  return result
@@ -24,7 +24,7 @@ from basic_memory.services.context_service import ContextResultRow
24
24
  router = APIRouter(prefix="/memory", tags=["memory"])
25
25
 
26
26
 
27
- async def to_graph_context(context, entity_repository: EntityRepository):
27
+ async def to_graph_context(context, entity_repository: EntityRepository, page: int, page_size: int):
28
28
  # return results
29
29
  async def to_summary(item: SearchIndexRow | ContextResultRow):
30
30
  match item.type:
@@ -66,7 +66,11 @@ async def to_graph_context(context, entity_repository: EntityRepository):
66
66
  metadata = MemoryMetadata.model_validate(context["metadata"])
67
67
  # Transform to GraphContext
68
68
  return GraphContext(
69
- primary_results=primary_results, related_results=related_results, metadata=metadata
69
+ primary_results=primary_results,
70
+ related_results=related_results,
71
+ metadata=metadata,
72
+ page=page,
73
+ page_size=page_size,
70
74
  )
71
75
 
72
76
 
@@ -77,7 +81,9 @@ async def recent(
77
81
  type: Annotated[list[SearchItemType] | None, Query()] = None,
78
82
  depth: int = 1,
79
83
  timeframe: TimeFrame = "7d",
80
- max_results: int = 10,
84
+ page: int = 1,
85
+ page_size: int = 10,
86
+ max_related: int = 10,
81
87
  ) -> GraphContext:
82
88
  # return all types by default
83
89
  types = (
@@ -87,16 +93,20 @@ async def recent(
87
93
  )
88
94
 
89
95
  logger.debug(
90
- f"Getting recent context: `{types}` depth: `{depth}` timeframe: `{timeframe}` max_results: `{max_results}`"
96
+ f"Getting recent context: `{types}` depth: `{depth}` timeframe: `{timeframe}` page: `{page}` page_size: `{page_size}` max_related: `{max_related}`"
91
97
  )
92
98
  # Parse timeframe
93
99
  since = parse(timeframe)
100
+ limit = page_size
101
+ offset = (page - 1) * page_size
94
102
 
95
103
  # Build context
96
104
  context = await context_service.build_context(
97
- types=types, depth=depth, since=since, max_results=max_results
105
+ types=types, depth=depth, since=since, limit=limit, offset=offset, max_related=max_related
106
+ )
107
+ return await to_graph_context(
108
+ context, entity_repository=entity_repository, page=page, page_size=page_size
98
109
  )
99
- return await to_graph_context(context, entity_repository=entity_repository)
100
110
 
101
111
 
102
112
  # get_memory_context needs to be declared last so other paths can match
@@ -109,21 +119,27 @@ async def get_memory_context(
109
119
  uri: str,
110
120
  depth: int = 1,
111
121
  timeframe: TimeFrame = "7d",
112
- max_results: int = 10,
122
+ page: int = 1,
123
+ page_size: int = 10,
124
+ max_related: int = 10,
113
125
  ) -> GraphContext:
114
126
  """Get rich context from memory:// URI."""
115
127
  # add the project name from the config to the url as the "host
116
128
  # Parse URI
117
129
  logger.debug(
118
- f"Getting context for URI: `{uri}` depth: `{depth}` timeframe: `{timeframe}` max_results: `{max_results}`"
130
+ f"Getting context for URI: `{uri}` depth: `{depth}` timeframe: `{timeframe}` page: `{page}` page_size: `{page_size}` max_related: `{max_related}`"
119
131
  )
120
132
  memory_url = normalize_memory_url(uri)
121
133
 
122
134
  # Parse timeframe
123
135
  since = parse(timeframe)
136
+ limit = page_size
137
+ offset = (page - 1) * page_size
124
138
 
125
139
  # Build context
126
140
  context = await context_service.build_context(
127
- memory_url, depth=depth, since=since, max_results=max_results
141
+ memory_url, depth=depth, since=since, limit=limit, offset=offset, max_related=max_related
142
+ )
143
+ return await to_graph_context(
144
+ context, entity_repository=entity_repository, page=page, page_size=page_size
128
145
  )
129
- return await to_graph_context(context, entity_repository=entity_repository)
@@ -21,16 +21,16 @@ from basic_memory.schemas.search import SearchQuery, SearchItemType
21
21
  router = APIRouter(prefix="/resource", tags=["resources"])
22
22
 
23
23
 
24
- def get_entity_ids(item: SearchIndexRow) -> list[int]:
24
+ def get_entity_ids(item: SearchIndexRow) -> set[int]:
25
25
  match item.type:
26
26
  case SearchItemType.ENTITY:
27
- return [item.id]
27
+ return {item.id}
28
28
  case SearchItemType.OBSERVATION:
29
- return [item.entity_id] # pyright: ignore [reportReturnType]
29
+ return {item.entity_id} # pyright: ignore [reportReturnType]
30
30
  case SearchItemType.RELATION:
31
31
  from_entity = item.from_id
32
32
  to_entity = item.to_id # pyright: ignore [reportReturnType]
33
- return [from_entity, to_entity] if to_entity else [from_entity] # pyright: ignore [reportReturnType]
33
+ return {from_entity, to_entity} if to_entity else {from_entity} # pyright: ignore [reportReturnType]
34
34
  case _: # pragma: no cover
35
35
  raise ValueError(f"Unexpected type: {item.type}")
36
36
 
@@ -44,6 +44,8 @@ async def get_resource_content(
44
44
  file_service: FileServiceDep,
45
45
  background_tasks: BackgroundTasks,
46
46
  identifier: str,
47
+ page: int = 1,
48
+ page_size: int = 10,
47
49
  ) -> FileResponse:
48
50
  """Get resource content by identifier: name or permalink."""
49
51
  logger.debug(f"Getting content for: {identifier}")
@@ -52,6 +54,10 @@ async def get_resource_content(
52
54
  entity = await link_resolver.resolve_link(identifier)
53
55
  results = [entity] if entity else []
54
56
 
57
+ # pagination for multiple results
58
+ limit = page_size
59
+ offset = (page - 1) * page_size
60
+
55
61
  # search using the identifier as a permalink
56
62
  if not results:
57
63
  # if the identifier contains a wildcard, use GLOB search
@@ -60,13 +66,13 @@ async def get_resource_content(
60
66
  if "*" in identifier
61
67
  else SearchQuery(permalink=identifier)
62
68
  )
63
- search_results = await search_service.search(query)
69
+ search_results = await search_service.search(query, limit, offset)
64
70
  if not search_results:
65
71
  raise HTTPException(status_code=404, detail=f"Resource not found: {identifier}")
66
72
 
67
- # get the entities related to the search results
68
- entity_ids = [id for result in search_results for id in get_entity_ids(result)]
69
- results = await entity_service.get_entities_by_id(entity_ids)
73
+ # get the deduplicated entities related to the search results
74
+ entity_ids = {id for result in search_results for id in get_entity_ids(result)}
75
+ results = await entity_service.get_entities_by_id(list(entity_ids))
70
76
 
71
77
  # return single response
72
78
  if len(results) == 1:
@@ -2,27 +2,35 @@
2
2
 
3
3
  from dataclasses import asdict
4
4
 
5
- from fastapi import APIRouter, Depends, BackgroundTasks
5
+ from fastapi import APIRouter, BackgroundTasks
6
6
 
7
- from basic_memory.services.search_service import SearchService
8
7
  from basic_memory.schemas.search import SearchQuery, SearchResult, SearchResponse
9
- from basic_memory.deps import get_search_service
8
+ from basic_memory.deps import SearchServiceDep
10
9
 
11
10
  router = APIRouter(prefix="/search", tags=["search"])
12
11
 
13
12
 
14
13
  @router.post("/", response_model=SearchResponse)
15
- async def search(query: SearchQuery, search_service: SearchService = Depends(get_search_service)):
14
+ async def search(
15
+ query: SearchQuery,
16
+ search_service: SearchServiceDep,
17
+ page: int = 1,
18
+ page_size: int = 10,
19
+ ):
16
20
  """Search across all knowledge and documents."""
17
- results = await search_service.search(query)
21
+ limit = page_size
22
+ offset = (page - 1) * page_size
23
+ results = await search_service.search(query, limit=limit, offset=offset)
18
24
  search_results = [SearchResult.model_validate(asdict(r)) for r in results]
19
- return SearchResponse(results=search_results)
25
+ return SearchResponse(
26
+ results=search_results,
27
+ current_page=page,
28
+ page_size=page_size,
29
+ )
20
30
 
21
31
 
22
32
  @router.post("/reindex")
23
- async def reindex(
24
- background_tasks: BackgroundTasks, search_service: SearchService = Depends(get_search_service)
25
- ):
33
+ async def reindex(background_tasks: BackgroundTasks, search_service: SearchServiceDep):
26
34
  """Recreate and populate the search index."""
27
35
  await search_service.reindex_all(background_tasks=background_tasks)
28
36
  return {"status": "ok", "message": "Reindex initiated"}
basic_memory/cli/app.py CHANGED
@@ -6,7 +6,7 @@ from basic_memory import db
6
6
  from basic_memory.config import config
7
7
  from basic_memory.utils import setup_logging
8
8
 
9
- setup_logging(log_file=".basic-memory/basic-memory-cli.log") # pragma: no cover
9
+ setup_logging(log_file=".basic-memory/basic-memory-cli.log", console=False) # pragma: no cover
10
10
 
11
11
  asyncio.run(db.run_migrations(config))
12
12
 
@@ -1,6 +1,8 @@
1
1
  """Database management commands."""
2
2
 
3
3
  import asyncio
4
+
5
+ import logfire
4
6
  import typer
5
7
  from loguru import logger
6
8
 
@@ -13,13 +15,14 @@ def reset(
13
15
  reindex: bool = typer.Option(False, "--reindex", help="Rebuild indices from filesystem"),
14
16
  ): # pragma: no cover
15
17
  """Reset database (drop all tables and recreate)."""
16
- if typer.confirm("This will delete all data. Are you sure?"):
17
- logger.info("Resetting database...")
18
- asyncio.run(migrations.reset_database())
18
+ with logfire.span("reset"): # pyright: ignore [reportGeneralTypeIssues]
19
+ if typer.confirm("This will delete all data in your db. Are you sure?"):
20
+ logger.info("Resetting database...")
21
+ asyncio.run(migrations.reset_database())
19
22
 
20
- if reindex:
21
- # Import and run sync
22
- from basic_memory.cli.commands.sync import sync
23
+ if reindex:
24
+ # Import and run sync
25
+ from basic_memory.cli.commands.sync import sync
23
26
 
24
- logger.info("Rebuilding search index from filesystem...")
25
- sync(watch=False) # pyright: ignore
27
+ logger.info("Rebuilding search index from filesystem...")
28
+ sync(watch=False) # pyright: ignore
@@ -6,6 +6,7 @@ from datetime import datetime
6
6
  from pathlib import Path
7
7
  from typing import Dict, Any, List, Annotated, Set, Optional
8
8
 
9
+ import logfire
9
10
  import typer
10
11
  from loguru import logger
11
12
  from rich.console import Console
@@ -209,7 +210,7 @@ async def get_markdown_processor() -> MarkdownProcessor:
209
210
  @import_app.command(name="chatgpt", help="Import conversations from ChatGPT JSON export.")
210
211
  def import_chatgpt(
211
212
  conversations_json: Annotated[
212
- Path, typer.Option(..., help="Path to ChatGPT conversations.json file")
213
+ Path, typer.Argument(help="Path to ChatGPT conversations.json file")
213
214
  ] = Path("conversations.json"),
214
215
  folder: Annotated[
215
216
  str, typer.Option(help="The folder to place the files in.")
@@ -225,35 +226,38 @@ def import_chatgpt(
225
226
  After importing, run 'basic-memory sync' to index the new files.
226
227
  """
227
228
 
228
- try:
229
- if conversations_json:
230
- if not conversations_json.exists():
231
- typer.echo(f"Error: File not found: {conversations_json}", err=True)
232
- raise typer.Exit(1)
229
+ with logfire.span("import chatgpt"): # pyright: ignore [reportGeneralTypeIssues]
230
+ try:
231
+ if conversations_json:
232
+ if not conversations_json.exists():
233
+ typer.echo(f"Error: File not found: {conversations_json}", err=True)
234
+ raise typer.Exit(1)
233
235
 
234
- # Get markdown processor
235
- markdown_processor = asyncio.run(get_markdown_processor())
236
+ # Get markdown processor
237
+ markdown_processor = asyncio.run(get_markdown_processor())
236
238
 
237
- # Process the file
238
- base_path = config.home / folder
239
- console.print(f"\nImporting chats from {conversations_json}...writing to {base_path}")
240
- results = asyncio.run(
241
- process_chatgpt_json(conversations_json, folder, markdown_processor)
242
- )
239
+ # Process the file
240
+ base_path = config.home / folder
241
+ console.print(
242
+ f"\nImporting chats from {conversations_json}...writing to {base_path}"
243
+ )
244
+ results = asyncio.run(
245
+ process_chatgpt_json(conversations_json, folder, markdown_processor)
246
+ )
243
247
 
244
- # Show results
245
- console.print(
246
- Panel(
247
- f"[green]Import complete![/green]\n\n"
248
- f"Imported {results['conversations']} conversations\n"
249
- f"Containing {results['messages']} messages",
250
- expand=False,
248
+ # Show results
249
+ console.print(
250
+ Panel(
251
+ f"[green]Import complete![/green]\n\n"
252
+ f"Imported {results['conversations']} conversations\n"
253
+ f"Containing {results['messages']} messages",
254
+ expand=False,
255
+ )
251
256
  )
252
- )
253
257
 
254
- console.print("\nRun 'basic-memory sync' to index the new files.")
258
+ console.print("\nRun 'basic-memory sync' to index the new files.")
255
259
 
256
- except Exception as e:
257
- logger.error("Import failed")
258
- typer.echo(f"Error during import: {e}", err=True)
259
- raise typer.Exit(1)
260
+ except Exception as e:
261
+ logger.error("Import failed")
262
+ typer.echo(f"Error during import: {e}", err=True)
263
+ raise typer.Exit(1)
@@ -6,6 +6,7 @@ from datetime import datetime
6
6
  from pathlib import Path
7
7
  from typing import Dict, Any, List, Annotated
8
8
 
9
+ import logfire
9
10
  import typer
10
11
  from loguru import logger
11
12
  from rich.console import Console
@@ -178,34 +179,35 @@ def import_claude(
178
179
  After importing, run 'basic-memory sync' to index the new files.
179
180
  """
180
181
 
181
- try:
182
- if not conversations_json.exists():
183
- typer.echo(f"Error: File not found: {conversations_json}", err=True)
184
- raise typer.Exit(1)
182
+ with logfire.span("import claude conversations"): # pyright: ignore [reportGeneralTypeIssues]
183
+ try:
184
+ if not conversations_json.exists():
185
+ typer.echo(f"Error: File not found: {conversations_json}", err=True)
186
+ raise typer.Exit(1)
187
+
188
+ # Get markdown processor
189
+ markdown_processor = asyncio.run(get_markdown_processor())
185
190
 
186
- # Get markdown processor
187
- markdown_processor = asyncio.run(get_markdown_processor())
188
-
189
- # Process the file
190
- base_path = config.home / folder
191
- console.print(f"\nImporting chats from {conversations_json}...writing to {base_path}")
192
- results = asyncio.run(
193
- process_conversations_json(conversations_json, base_path, markdown_processor)
194
- )
195
-
196
- # Show results
197
- console.print(
198
- Panel(
199
- f"[green]Import complete![/green]\n\n"
200
- f"Imported {results['conversations']} conversations\n"
201
- f"Containing {results['messages']} messages",
202
- expand=False,
191
+ # Process the file
192
+ base_path = config.home / folder
193
+ console.print(f"\nImporting chats from {conversations_json}...writing to {base_path}")
194
+ results = asyncio.run(
195
+ process_conversations_json(conversations_json, base_path, markdown_processor)
203
196
  )
204
- )
205
197
 
206
- console.print("\nRun 'basic-memory sync' to index the new files.")
198
+ # Show results
199
+ console.print(
200
+ Panel(
201
+ f"[green]Import complete![/green]\n\n"
202
+ f"Imported {results['conversations']} conversations\n"
203
+ f"Containing {results['messages']} messages",
204
+ expand=False,
205
+ )
206
+ )
207
207
 
208
- except Exception as e:
209
- logger.error("Import failed")
210
- typer.echo(f"Error during import: {e}", err=True)
211
- raise typer.Exit(1)
208
+ console.print("\nRun 'basic-memory sync' to index the new files.")
209
+
210
+ except Exception as e:
211
+ logger.error("Import failed")
212
+ typer.echo(f"Error during import: {e}", err=True)
213
+ raise typer.Exit(1)
@@ -5,6 +5,7 @@ import json
5
5
  from pathlib import Path
6
6
  from typing import Dict, Any, Annotated, Optional
7
7
 
8
+ import logfire
8
9
  import typer
9
10
  from loguru import logger
10
11
  from rich.console import Console
@@ -160,36 +161,36 @@ def import_projects(
160
161
 
161
162
  After importing, run 'basic-memory sync' to index the new files.
162
163
  """
164
+ with logfire.span("import claude projects"): # pyright: ignore [reportGeneralTypeIssues]
165
+ try:
166
+ if projects_json:
167
+ if not projects_json.exists():
168
+ typer.echo(f"Error: File not found: {projects_json}", err=True)
169
+ raise typer.Exit(1)
170
+
171
+ # Get markdown processor
172
+ markdown_processor = asyncio.run(get_markdown_processor())
173
+
174
+ # Process the file
175
+ base_path = config.home / base_folder if base_folder else config.home
176
+ console.print(f"\nImporting projects from {projects_json}...writing to {base_path}")
177
+ results = asyncio.run(
178
+ process_projects_json(projects_json, base_path, markdown_processor)
179
+ )
163
180
 
164
- try:
165
- if projects_json:
166
- if not projects_json.exists():
167
- typer.echo(f"Error: File not found: {projects_json}", err=True)
168
- raise typer.Exit(1)
169
-
170
- # Get markdown processor
171
- markdown_processor = asyncio.run(get_markdown_processor())
172
-
173
- # Process the file
174
- base_path = config.home / base_folder if base_folder else config.home
175
- console.print(f"\nImporting projects from {projects_json}...writing to {base_path}")
176
- results = asyncio.run(
177
- process_projects_json(projects_json, base_path, markdown_processor)
178
- )
179
-
180
- # Show results
181
- console.print(
182
- Panel(
183
- f"[green]Import complete![/green]\n\n"
184
- f"Imported {results['documents']} project documents\n"
185
- f"Imported {results['prompts']} prompt templates",
186
- expand=False,
181
+ # Show results
182
+ console.print(
183
+ Panel(
184
+ f"[green]Import complete![/green]\n\n"
185
+ f"Imported {results['documents']} project documents\n"
186
+ f"Imported {results['prompts']} prompt templates",
187
+ expand=False,
188
+ )
187
189
  )
188
- )
189
190
 
190
- console.print("\nRun 'basic-memory sync' to index the new files.")
191
+ console.print("\nRun 'basic-memory sync' to index the new files.")
191
192
 
192
- except Exception as e:
193
- logger.error("Import failed")
194
- typer.echo(f"Error during import: {e}", err=True)
195
- raise typer.Exit(1)
193
+ except Exception as e:
194
+ logger.error("Import failed")
195
+ typer.echo(f"Error during import: {e}", err=True)
196
+ raise typer.Exit(1)
@@ -5,6 +5,7 @@ import json
5
5
  from pathlib import Path
6
6
  from typing import Dict, Any, List, Annotated
7
7
 
8
+ import logfire
8
9
  import typer
9
10
  from loguru import logger
10
11
  from rich.console import Console
@@ -113,32 +114,33 @@ def memory_json(
113
114
  After importing, run 'basic-memory sync' to index the new files.
114
115
  """
115
116
 
116
- if not json_path.exists():
117
- typer.echo(f"Error: File not found: {json_path}", err=True)
118
- raise typer.Exit(1)
119
-
120
- try:
121
- # Get markdown processor
122
- markdown_processor = asyncio.run(get_markdown_processor())
123
-
124
- # Process the file
125
- base_path = config.home
126
- console.print(f"\nImporting from {json_path}...writing to {base_path}")
127
- results = asyncio.run(process_memory_json(json_path, base_path, markdown_processor))
128
-
129
- # Show results
130
- console.print(
131
- Panel(
132
- f"[green]Import complete![/green]\n\n"
133
- f"Created {results['entities']} entities\n"
134
- f"Added {results['relations']} relations",
135
- expand=False,
117
+ with logfire.span("import memory_json"): # pyright: ignore [reportGeneralTypeIssues]
118
+ if not json_path.exists():
119
+ typer.echo(f"Error: File not found: {json_path}", err=True)
120
+ raise typer.Exit(1)
121
+
122
+ try:
123
+ # Get markdown processor
124
+ markdown_processor = asyncio.run(get_markdown_processor())
125
+
126
+ # Process the file
127
+ base_path = config.home
128
+ console.print(f"\nImporting from {json_path}...writing to {base_path}")
129
+ results = asyncio.run(process_memory_json(json_path, base_path, markdown_processor))
130
+
131
+ # Show results
132
+ console.print(
133
+ Panel(
134
+ f"[green]Import complete![/green]\n\n"
135
+ f"Created {results['entities']} entities\n"
136
+ f"Added {results['relations']} relations",
137
+ expand=False,
138
+ )
136
139
  )
137
- )
138
140
 
139
- console.print("\nRun 'basic-memory sync' to index the new files.")
141
+ console.print("\nRun 'basic-memory sync' to index the new files.")
140
142
 
141
- except Exception as e:
142
- logger.error("Import failed")
143
- typer.echo(f"Error during import: {e}", err=True)
144
- raise typer.Exit(1)
143
+ except Exception as e:
144
+ logger.error("Import failed")
145
+ typer.echo(f"Error during import: {e}", err=True)
146
+ raise typer.Exit(1)
@@ -3,6 +3,7 @@
3
3
  import asyncio
4
4
  from typing import Set, Dict
5
5
 
6
+ import logfire
6
7
  import typer
7
8
  from loguru import logger
8
9
  from rich.console import Console
@@ -146,9 +147,10 @@ def status(
146
147
  verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed file information"),
147
148
  ):
148
149
  """Show sync status between files and database."""
149
- try:
150
- sync_service = asyncio.run(get_file_change_scanner())
151
- asyncio.run(run_status(sync_service, verbose)) # pragma: no cover
152
- except Exception as e:
153
- logger.exception(f"Error checking status: {e}")
154
- raise typer.Exit(code=1) # pragma: no cover
150
+ with logfire.span("status"): # pyright: ignore [reportGeneralTypeIssues]
151
+ try:
152
+ sync_service = asyncio.run(get_file_change_scanner())
153
+ asyncio.run(run_status(sync_service, verbose)) # pragma: no cover
154
+ except Exception as e:
155
+ logger.exception(f"Error checking status: {e}")
156
+ raise typer.Exit(code=1) # pragma: no cover
@@ -151,7 +151,7 @@ def display_detailed_sync_results(knowledge: SyncReport):
151
151
  console.print(knowledge_tree)
152
152
 
153
153
 
154
- async def run_sync(verbose: bool = False, watch: bool = False):
154
+ async def run_sync(verbose: bool = False, watch: bool = False, console_status: bool = False):
155
155
  """Run sync operation."""
156
156
 
157
157
  sync_service = await get_sync_service()
@@ -164,7 +164,7 @@ async def run_sync(verbose: bool = False, watch: bool = False):
164
164
  config=config,
165
165
  )
166
166
  await watch_service.handle_changes(config.home)
167
- await watch_service.run() # pragma: no cover
167
+ await watch_service.run(console_status=console_status) # pragma: no cover
168
168
  else:
169
169
  # one time sync
170
170
  knowledge_changes = await sync_service.sync(config.home)
@@ -189,11 +189,14 @@ def sync(
189
189
  "-w",
190
190
  help="Start watching for changes after sync.",
191
191
  ),
192
+ console_status: bool = typer.Option(
193
+ False, "--console-status", "-c", help="Show live console status"
194
+ ),
192
195
  ) -> None:
193
196
  """Sync knowledge files with the database."""
194
197
  try:
195
198
  # Run sync
196
- asyncio.run(run_sync(verbose=verbose, watch=watch))
199
+ asyncio.run(run_sync(verbose=verbose, watch=watch, console_status=console_status))
197
200
 
198
201
  except Exception as e: # pragma: no cover
199
202
  if not isinstance(e, typer.Exit):