basic-memory 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of basic-memory might be problematic. Click here for more details.

Files changed (70) hide show
  1. basic_memory/__init__.py +1 -1
  2. basic_memory/alembic/alembic.ini +119 -0
  3. basic_memory/alembic/env.py +23 -1
  4. basic_memory/alembic/versions/502b60eaa905_remove_required_from_entity_permalink.py +51 -0
  5. basic_memory/alembic/versions/b3c3938bacdb_relation_to_name_unique_index.py +44 -0
  6. basic_memory/api/app.py +0 -4
  7. basic_memory/api/routers/knowledge_router.py +1 -9
  8. basic_memory/api/routers/memory_router.py +41 -25
  9. basic_memory/api/routers/resource_router.py +119 -12
  10. basic_memory/api/routers/search_router.py +17 -9
  11. basic_memory/cli/app.py +0 -2
  12. basic_memory/cli/commands/db.py +11 -8
  13. basic_memory/cli/commands/import_chatgpt.py +31 -27
  14. basic_memory/cli/commands/import_claude_conversations.py +29 -27
  15. basic_memory/cli/commands/import_claude_projects.py +30 -29
  16. basic_memory/cli/commands/import_memory_json.py +28 -26
  17. basic_memory/cli/commands/status.py +16 -26
  18. basic_memory/cli/commands/sync.py +11 -12
  19. basic_memory/cli/commands/tools.py +180 -0
  20. basic_memory/cli/main.py +1 -1
  21. basic_memory/config.py +16 -2
  22. basic_memory/db.py +1 -0
  23. basic_memory/deps.py +5 -1
  24. basic_memory/file_utils.py +6 -4
  25. basic_memory/markdown/entity_parser.py +3 -3
  26. basic_memory/mcp/async_client.py +1 -1
  27. basic_memory/mcp/main.py +25 -0
  28. basic_memory/mcp/prompts/__init__.py +15 -0
  29. basic_memory/mcp/prompts/ai_assistant_guide.py +28 -0
  30. basic_memory/mcp/prompts/continue_conversation.py +172 -0
  31. basic_memory/mcp/prompts/json_canvas_spec.py +25 -0
  32. basic_memory/mcp/prompts/recent_activity.py +46 -0
  33. basic_memory/mcp/prompts/search.py +127 -0
  34. basic_memory/mcp/prompts/utils.py +98 -0
  35. basic_memory/mcp/server.py +3 -7
  36. basic_memory/mcp/tools/__init__.py +6 -4
  37. basic_memory/mcp/tools/canvas.py +99 -0
  38. basic_memory/mcp/tools/knowledge.py +26 -14
  39. basic_memory/mcp/tools/memory.py +57 -31
  40. basic_memory/mcp/tools/notes.py +65 -72
  41. basic_memory/mcp/tools/resource.py +192 -0
  42. basic_memory/mcp/tools/search.py +13 -4
  43. basic_memory/mcp/tools/utils.py +2 -1
  44. basic_memory/models/knowledge.py +27 -11
  45. basic_memory/repository/repository.py +1 -1
  46. basic_memory/repository/search_repository.py +17 -4
  47. basic_memory/schemas/__init__.py +0 -11
  48. basic_memory/schemas/base.py +4 -1
  49. basic_memory/schemas/memory.py +14 -2
  50. basic_memory/schemas/request.py +1 -1
  51. basic_memory/schemas/search.py +4 -1
  52. basic_memory/services/context_service.py +14 -6
  53. basic_memory/services/entity_service.py +19 -12
  54. basic_memory/services/file_service.py +69 -2
  55. basic_memory/services/link_resolver.py +12 -9
  56. basic_memory/services/search_service.py +59 -13
  57. basic_memory/sync/__init__.py +3 -2
  58. basic_memory/sync/sync_service.py +287 -107
  59. basic_memory/sync/watch_service.py +125 -129
  60. basic_memory/utils.py +27 -15
  61. {basic_memory-0.6.0.dist-info → basic_memory-0.8.0.dist-info}/METADATA +3 -2
  62. basic_memory-0.8.0.dist-info/RECORD +91 -0
  63. basic_memory/alembic/README +0 -1
  64. basic_memory/schemas/discovery.py +0 -28
  65. basic_memory/sync/file_change_scanner.py +0 -158
  66. basic_memory/sync/utils.py +0 -31
  67. basic_memory-0.6.0.dist-info/RECORD +0 -81
  68. {basic_memory-0.6.0.dist-info → basic_memory-0.8.0.dist-info}/WHEEL +0 -0
  69. {basic_memory-0.6.0.dist-info → basic_memory-0.8.0.dist-info}/entry_points.txt +0 -0
  70. {basic_memory-0.6.0.dist-info → basic_memory-0.8.0.dist-info}/licenses/LICENSE +0 -0
basic_memory/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """basic-memory - Local-first knowledge management combining Zettelkasten with knowledge graphs"""
2
2
 
3
- __version__ = "0.6.0"
3
+ __version__ = "0.8.0"
@@ -0,0 +1,119 @@
1
+ # A generic, single database configuration.
2
+
3
+ [alembic]
4
+ # path to migration scripts
5
+ # Use forward slashes (/) also on windows to provide an os agnostic path
6
+ script_location = .
7
+
8
+ # template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
9
+ # Uncomment the line below if you want the files to be prepended with date and time
10
+ # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
11
+ # for all available tokens
12
+ # file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
13
+
14
+ # sys.path path, will be prepended to sys.path if present.
15
+ # defaults to the current working directory.
16
+ prepend_sys_path = .
17
+
18
+ # timezone to use when rendering the date within the migration file
19
+ # as well as the filename.
20
+ # If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library.
21
+ # Any required deps can installed by adding `alembic[tz]` to the pip requirements
22
+ # string value is passed to ZoneInfo()
23
+ # leave blank for localtime
24
+ # timezone =
25
+
26
+ # max length of characters to apply to the "slug" field
27
+ # truncate_slug_length = 40
28
+
29
+ # set to 'true' to run the environment during
30
+ # the 'revision' command, regardless of autogenerate
31
+ # revision_environment = false
32
+
33
+ # set to 'true' to allow .pyc and .pyo files without
34
+ # a source .py file to be detected as revisions in the
35
+ # versions/ directory
36
+ # sourceless = false
37
+
38
+ # version location specification; This defaults
39
+ # to migrations/versions. When using multiple version
40
+ # directories, initial revisions must be specified with --version-path.
41
+ # The path separator used here should be the separator specified by "version_path_separator" below.
42
+ # version_locations = %(here)s/bar:%(here)s/bat:migrations/versions
43
+
44
+ # version path separator; As mentioned above, this is the character used to split
45
+ # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
46
+ # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
47
+ # Valid values for version_path_separator are:
48
+ #
49
+ # version_path_separator = :
50
+ # version_path_separator = ;
51
+ # version_path_separator = space
52
+ # version_path_separator = newline
53
+ #
54
+ # Use os.pathsep. Default configuration used for new projects.
55
+ version_path_separator = os
56
+
57
+ # set to 'true' to search source files recursively
58
+ # in each "version_locations" directory
59
+ # new in Alembic version 1.10
60
+ # recursive_version_locations = false
61
+
62
+ # the output encoding used when revision files
63
+ # are written from script.py.mako
64
+ # output_encoding = utf-8
65
+
66
+ sqlalchemy.url = driver://user:pass@localhost/dbname
67
+
68
+
69
+ [post_write_hooks]
70
+ # post_write_hooks defines scripts or Python functions that are run
71
+ # on newly generated revision scripts. See the documentation for further
72
+ # detail and examples
73
+
74
+ # format using "black" - use the console_scripts runner, against the "black" entrypoint
75
+ # hooks = black
76
+ # black.type = console_scripts
77
+ # black.entrypoint = black
78
+ # black.options = -l 79 REVISION_SCRIPT_FILENAME
79
+
80
+ # lint with attempts to fix using "ruff" - use the exec runner, execute a binary
81
+ # hooks = ruff
82
+ # ruff.type = exec
83
+ # ruff.executable = %(here)s/.venv/bin/ruff
84
+ # ruff.options = --fix REVISION_SCRIPT_FILENAME
85
+
86
+ # Logging configuration
87
+ [loggers]
88
+ keys = root,sqlalchemy,alembic
89
+
90
+ [handlers]
91
+ keys = console
92
+
93
+ [formatters]
94
+ keys = generic
95
+
96
+ [logger_root]
97
+ level = WARNING
98
+ handlers = console
99
+ qualname =
100
+
101
+ [logger_sqlalchemy]
102
+ level = WARNING
103
+ handlers =
104
+ qualname = sqlalchemy.engine
105
+
106
+ [logger_alembic]
107
+ level = INFO
108
+ handlers =
109
+ qualname = alembic
110
+
111
+ [handler_console]
112
+ class = StreamHandler
113
+ args = (sys.stderr,)
114
+ level = NOTSET
115
+ formatter = generic
116
+
117
+ [formatter_generic]
118
+ format = %(levelname)-5.5s [%(name)s] %(message)s
119
+ datefmt = %H:%M:%S
@@ -1,5 +1,6 @@
1
1
  """Alembic environment configuration."""
2
2
 
3
+ import os
3
4
  from logging.config import fileConfig
4
5
 
5
6
  from sqlalchemy import engine_from_config
@@ -8,6 +9,10 @@ from sqlalchemy import pool
8
9
  from alembic import context
9
10
 
10
11
  from basic_memory.models import Base
12
+
13
+ # set config.env to "test" for pytest to prevent logging to file in utils.setup_logging()
14
+ os.environ["BASIC_MEMORY_ENV"] = "test"
15
+
11
16
  from basic_memory.config import config as app_config
12
17
 
13
18
  # this is the Alembic Config object, which provides
@@ -18,6 +23,8 @@ config = context.config
18
23
  sqlalchemy_url = f"sqlite:///{app_config.database_path}"
19
24
  config.set_main_option("sqlalchemy.url", sqlalchemy_url)
20
25
 
26
+ # print(f"Using SQLAlchemy URL: {sqlalchemy_url}")
27
+
21
28
  # Interpret the config file for Python logging.
22
29
  if config.config_file_name is not None:
23
30
  fileConfig(config.config_file_name)
@@ -27,6 +34,14 @@ if config.config_file_name is not None:
27
34
  target_metadata = Base.metadata
28
35
 
29
36
 
37
+ # Add this function to tell Alembic what to include/exclude
38
+ def include_object(object, name, type_, reflected, compare_to):
39
+ # Ignore SQLite FTS tables
40
+ if type_ == "table" and name.startswith("search_index"):
41
+ return False
42
+ return True
43
+
44
+
30
45
  def run_migrations_offline() -> None:
31
46
  """Run migrations in 'offline' mode.
32
47
 
@@ -44,6 +59,8 @@ def run_migrations_offline() -> None:
44
59
  target_metadata=target_metadata,
45
60
  literal_binds=True,
46
61
  dialect_opts={"paramstyle": "named"},
62
+ include_object=include_object,
63
+ render_as_batch=True,
47
64
  )
48
65
 
49
66
  with context.begin_transaction():
@@ -63,7 +80,12 @@ def run_migrations_online() -> None:
63
80
  )
64
81
 
65
82
  with connectable.connect() as connection:
66
- context.configure(connection=connection, target_metadata=target_metadata)
83
+ context.configure(
84
+ connection=connection,
85
+ target_metadata=target_metadata,
86
+ include_object=include_object,
87
+ render_as_batch=True,
88
+ )
67
89
 
68
90
  with context.begin_transaction():
69
91
  context.run_migrations()
@@ -0,0 +1,51 @@
1
+ """remove required from entity.permalink
2
+
3
+ Revision ID: 502b60eaa905
4
+ Revises: b3c3938bacdb
5
+ Create Date: 2025-02-24 13:33:09.790951
6
+
7
+ """
8
+
9
+ from typing import Sequence, Union
10
+
11
+ from alembic import op
12
+ import sqlalchemy as sa
13
+
14
+
15
+ # revision identifiers, used by Alembic.
16
+ revision: str = "502b60eaa905"
17
+ down_revision: Union[str, None] = "b3c3938bacdb"
18
+ branch_labels: Union[str, Sequence[str], None] = None
19
+ depends_on: Union[str, Sequence[str], None] = None
20
+
21
+
22
+ def upgrade() -> None:
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ with op.batch_alter_table("entity", schema=None) as batch_op:
25
+ batch_op.alter_column("permalink", existing_type=sa.VARCHAR(), nullable=True)
26
+ batch_op.drop_index("ix_entity_permalink")
27
+ batch_op.create_index(batch_op.f("ix_entity_permalink"), ["permalink"], unique=False)
28
+ batch_op.drop_constraint("uix_entity_permalink", type_="unique")
29
+ batch_op.create_index(
30
+ "uix_entity_permalink",
31
+ ["permalink"],
32
+ unique=True,
33
+ sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
34
+ )
35
+
36
+ # ### end Alembic commands ###
37
+
38
+
39
+ def downgrade() -> None:
40
+ # ### commands auto generated by Alembic - please adjust! ###
41
+ with op.batch_alter_table("entity", schema=None) as batch_op:
42
+ batch_op.drop_index(
43
+ "uix_entity_permalink",
44
+ sqlite_where=sa.text("content_type = 'text/markdown' AND permalink IS NOT NULL"),
45
+ )
46
+ batch_op.create_unique_constraint("uix_entity_permalink", ["permalink"])
47
+ batch_op.drop_index(batch_op.f("ix_entity_permalink"))
48
+ batch_op.create_index("ix_entity_permalink", ["permalink"], unique=1)
49
+ batch_op.alter_column("permalink", existing_type=sa.VARCHAR(), nullable=False)
50
+
51
+ # ### end Alembic commands ###
@@ -0,0 +1,44 @@
1
+ """relation to_name unique index
2
+
3
+ Revision ID: b3c3938bacdb
4
+ Revises: 3dae7c7b1564
5
+ Create Date: 2025-02-22 14:59:30.668466
6
+
7
+ """
8
+
9
+ from typing import Sequence, Union
10
+
11
+ from alembic import op
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = "b3c3938bacdb"
16
+ down_revision: Union[str, None] = "3dae7c7b1564"
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ # SQLite doesn't support constraint changes through ALTER
23
+ # Need to recreate table with desired constraints
24
+ with op.batch_alter_table("relation") as batch_op:
25
+ # Drop existing unique constraint
26
+ batch_op.drop_constraint("uix_relation", type_="unique")
27
+
28
+ # Add new constraints
29
+ batch_op.create_unique_constraint(
30
+ "uix_relation_from_id_to_id", ["from_id", "to_id", "relation_type"]
31
+ )
32
+ batch_op.create_unique_constraint(
33
+ "uix_relation_from_id_to_name", ["from_id", "to_name", "relation_type"]
34
+ )
35
+
36
+
37
+ def downgrade() -> None:
38
+ with op.batch_alter_table("relation") as batch_op:
39
+ # Drop new constraints
40
+ batch_op.drop_constraint("uix_relation_from_id_to_name", type_="unique")
41
+ batch_op.drop_constraint("uix_relation_from_id_to_id", type_="unique")
42
+
43
+ # Restore original constraint
44
+ batch_op.create_unique_constraint("uix_relation", ["from_id", "to_id", "relation_type"])
basic_memory/api/app.py CHANGED
@@ -7,18 +7,14 @@ from fastapi import FastAPI, HTTPException
7
7
  from fastapi.exception_handlers import http_exception_handler
8
8
  from loguru import logger
9
9
 
10
- import basic_memory
11
10
  from basic_memory import db
12
11
  from basic_memory.config import config as app_config
13
12
  from basic_memory.api.routers import knowledge, search, memory, resource
14
- from basic_memory.utils import setup_logging
15
13
 
16
14
 
17
15
  @asynccontextmanager
18
16
  async def lifespan(app: FastAPI): # pragma: no cover
19
17
  """Lifecycle manager for the FastAPI app."""
20
- setup_logging(log_file=".basic-memory/basic-memory.log")
21
- logger.info(f"Starting Basic Memory API {basic_memory.__version__}")
22
18
  await db.run_migrations(app_config)
23
19
  yield
24
20
  logger.info("Shutting down Basic Memory API")
@@ -94,11 +94,8 @@ async def get_entity(
94
94
  try:
95
95
  entity = await entity_service.get_by_permalink(permalink)
96
96
  result = EntityResponse.model_validate(entity)
97
-
98
- logger.info(f"response: get_entity with result={result}")
99
97
  return result
100
98
  except EntityNotFoundError:
101
- logger.error(f"Error: Entity with {permalink} not found")
102
99
  raise HTTPException(status_code=404, detail=f"Entity with {permalink} not found")
103
100
 
104
101
 
@@ -114,8 +111,6 @@ async def get_entities(
114
111
  result = EntityListResponse(
115
112
  entities=[EntityResponse.model_validate(entity) for entity in entities]
116
113
  )
117
-
118
- logger.info(f"response: get_entities with result={result}")
119
114
  return result
120
115
 
121
116
 
@@ -135,17 +130,15 @@ async def delete_entity(
135
130
 
136
131
  entity = await link_resolver.resolve_link(identifier)
137
132
  if entity is None:
138
- logger.info("response: delete_entity with result=DeleteEntitiesResponse(deleted=False)")
139
133
  return DeleteEntitiesResponse(deleted=False)
140
134
 
141
135
  # Delete the entity
142
- deleted = await entity_service.delete_entity(entity.permalink)
136
+ deleted = await entity_service.delete_entity(entity.permalink or entity.id)
143
137
 
144
138
  # Remove from search index
145
139
  background_tasks.add_task(search_service.delete_by_permalink, entity.permalink)
146
140
 
147
141
  result = DeleteEntitiesResponse(deleted=deleted)
148
- logger.info(f"response: delete_entity with result={result}")
149
142
  return result
150
143
 
151
144
 
@@ -166,5 +159,4 @@ async def delete_entities(
166
159
  background_tasks.add_task(search_service.delete_by_permalink, permalink)
167
160
 
168
161
  result = DeleteEntitiesResponse(deleted=deleted)
169
- logger.info(f"response: delete_entities with result={result}")
170
162
  return result
@@ -24,39 +24,37 @@ from basic_memory.services.context_service import ContextResultRow
24
24
  router = APIRouter(prefix="/memory", tags=["memory"])
25
25
 
26
26
 
27
- async def to_graph_context(context, entity_repository: EntityRepository):
27
+ async def to_graph_context(context, entity_repository: EntityRepository, page: int, page_size: int):
28
28
  # return results
29
29
  async def to_summary(item: SearchIndexRow | ContextResultRow):
30
30
  match item.type:
31
31
  case SearchItemType.ENTITY:
32
- assert item.title is not None
33
- assert item.created_at is not None
34
-
35
32
  return EntitySummary(
36
- title=item.title,
33
+ title=item.title, # pyright: ignore
37
34
  permalink=item.permalink,
38
35
  file_path=item.file_path,
39
36
  created_at=item.created_at,
40
37
  )
41
38
  case SearchItemType.OBSERVATION:
42
- assert item.category is not None
43
- assert item.content is not None
44
-
45
39
  return ObservationSummary(
46
- category=item.category, content=item.content, permalink=item.permalink
40
+ title=item.title, # pyright: ignore
41
+ file_path=item.file_path,
42
+ category=item.category, # pyright: ignore
43
+ content=item.content, # pyright: ignore
44
+ permalink=item.permalink, # pyright: ignore
45
+ created_at=item.created_at,
47
46
  )
48
47
  case SearchItemType.RELATION:
49
- assert item.from_id is not None
50
- from_entity = await entity_repository.find_by_id(item.from_id)
51
- assert from_entity is not None
52
-
48
+ from_entity = await entity_repository.find_by_id(item.from_id) # pyright: ignore
53
49
  to_entity = await entity_repository.find_by_id(item.to_id) if item.to_id else None
54
-
55
50
  return RelationSummary(
56
- permalink=item.permalink,
51
+ title=item.title, # pyright: ignore
52
+ file_path=item.file_path,
53
+ permalink=item.permalink, # pyright: ignore
57
54
  relation_type=item.type,
58
- from_id=from_entity.permalink,
55
+ from_id=from_entity.permalink, # pyright: ignore
59
56
  to_id=to_entity.permalink if to_entity else None,
57
+ created_at=item.created_at,
60
58
  )
61
59
  case _: # pragma: no cover
62
60
  raise ValueError(f"Unexpected type: {item.type}")
@@ -66,7 +64,11 @@ async def to_graph_context(context, entity_repository: EntityRepository):
66
64
  metadata = MemoryMetadata.model_validate(context["metadata"])
67
65
  # Transform to GraphContext
68
66
  return GraphContext(
69
- primary_results=primary_results, related_results=related_results, metadata=metadata
67
+ primary_results=primary_results,
68
+ related_results=related_results,
69
+ metadata=metadata,
70
+ page=page,
71
+ page_size=page_size,
70
72
  )
71
73
 
72
74
 
@@ -77,7 +79,9 @@ async def recent(
77
79
  type: Annotated[list[SearchItemType] | None, Query()] = None,
78
80
  depth: int = 1,
79
81
  timeframe: TimeFrame = "7d",
80
- max_results: int = 10,
82
+ page: int = 1,
83
+ page_size: int = 10,
84
+ max_related: int = 10,
81
85
  ) -> GraphContext:
82
86
  # return all types by default
83
87
  types = (
@@ -87,16 +91,22 @@ async def recent(
87
91
  )
88
92
 
89
93
  logger.debug(
90
- f"Getting recent context: `{types}` depth: `{depth}` timeframe: `{timeframe}` max_results: `{max_results}`"
94
+ f"Getting recent context: `{types}` depth: `{depth}` timeframe: `{timeframe}` page: `{page}` page_size: `{page_size}` max_related: `{max_related}`"
91
95
  )
92
96
  # Parse timeframe
93
97
  since = parse(timeframe)
98
+ limit = page_size
99
+ offset = (page - 1) * page_size
94
100
 
95
101
  # Build context
96
102
  context = await context_service.build_context(
97
- types=types, depth=depth, since=since, max_results=max_results
103
+ types=types, depth=depth, since=since, limit=limit, offset=offset, max_related=max_related
98
104
  )
99
- return await to_graph_context(context, entity_repository=entity_repository)
105
+ recent_context = await to_graph_context(
106
+ context, entity_repository=entity_repository, page=page, page_size=page_size
107
+ )
108
+ logger.debug(f"Recent context: {recent_context.model_dump_json()}")
109
+ return recent_context
100
110
 
101
111
 
102
112
  # get_memory_context needs to be declared last so other paths can match
@@ -109,21 +119,27 @@ async def get_memory_context(
109
119
  uri: str,
110
120
  depth: int = 1,
111
121
  timeframe: TimeFrame = "7d",
112
- max_results: int = 10,
122
+ page: int = 1,
123
+ page_size: int = 10,
124
+ max_related: int = 10,
113
125
  ) -> GraphContext:
114
126
  """Get rich context from memory:// URI."""
115
127
  # add the project name from the config to the url as the "host
116
128
  # Parse URI
117
129
  logger.debug(
118
- f"Getting context for URI: `{uri}` depth: `{depth}` timeframe: `{timeframe}` max_results: `{max_results}`"
130
+ f"Getting context for URI: `{uri}` depth: `{depth}` timeframe: `{timeframe}` page: `{page}` page_size: `{page_size}` max_related: `{max_related}`"
119
131
  )
120
132
  memory_url = normalize_memory_url(uri)
121
133
 
122
134
  # Parse timeframe
123
135
  since = parse(timeframe)
136
+ limit = page_size
137
+ offset = (page - 1) * page_size
124
138
 
125
139
  # Build context
126
140
  context = await context_service.build_context(
127
- memory_url, depth=depth, since=since, max_results=max_results
141
+ memory_url, depth=depth, since=since, limit=limit, offset=offset, max_related=max_related
142
+ )
143
+ return await to_graph_context(
144
+ context, entity_repository=entity_repository, page=page, page_size=page_size
128
145
  )
129
- return await to_graph_context(context, entity_repository=entity_repository)
@@ -2,9 +2,10 @@
2
2
 
3
3
  import tempfile
4
4
  from pathlib import Path
5
+ from typing import Annotated
5
6
 
6
- from fastapi import APIRouter, HTTPException, BackgroundTasks
7
- from fastapi.responses import FileResponse
7
+ from fastapi import APIRouter, HTTPException, BackgroundTasks, Body
8
+ from fastapi.responses import FileResponse, JSONResponse
8
9
  from loguru import logger
9
10
 
10
11
  from basic_memory.deps import (
@@ -13,24 +14,27 @@ from basic_memory.deps import (
13
14
  SearchServiceDep,
14
15
  EntityServiceDep,
15
16
  FileServiceDep,
17
+ EntityRepositoryDep,
16
18
  )
17
19
  from basic_memory.repository.search_repository import SearchIndexRow
18
20
  from basic_memory.schemas.memory import normalize_memory_url
19
21
  from basic_memory.schemas.search import SearchQuery, SearchItemType
22
+ from basic_memory.models.knowledge import Entity as EntityModel
23
+ from datetime import datetime
20
24
 
21
25
  router = APIRouter(prefix="/resource", tags=["resources"])
22
26
 
23
27
 
24
- def get_entity_ids(item: SearchIndexRow) -> list[int]:
28
+ def get_entity_ids(item: SearchIndexRow) -> set[int]:
25
29
  match item.type:
26
30
  case SearchItemType.ENTITY:
27
- return [item.id]
31
+ return {item.id}
28
32
  case SearchItemType.OBSERVATION:
29
- return [item.entity_id] # pyright: ignore [reportReturnType]
33
+ return {item.entity_id} # pyright: ignore [reportReturnType]
30
34
  case SearchItemType.RELATION:
31
35
  from_entity = item.from_id
32
36
  to_entity = item.to_id # pyright: ignore [reportReturnType]
33
- return [from_entity, to_entity] if to_entity else [from_entity] # pyright: ignore [reportReturnType]
37
+ return {from_entity, to_entity} if to_entity else {from_entity} # pyright: ignore [reportReturnType]
34
38
  case _: # pragma: no cover
35
39
  raise ValueError(f"Unexpected type: {item.type}")
36
40
 
@@ -44,6 +48,8 @@ async def get_resource_content(
44
48
  file_service: FileServiceDep,
45
49
  background_tasks: BackgroundTasks,
46
50
  identifier: str,
51
+ page: int = 1,
52
+ page_size: int = 10,
47
53
  ) -> FileResponse:
48
54
  """Get resource content by identifier: name or permalink."""
49
55
  logger.debug(f"Getting content for: {identifier}")
@@ -52,6 +58,10 @@ async def get_resource_content(
52
58
  entity = await link_resolver.resolve_link(identifier)
53
59
  results = [entity] if entity else []
54
60
 
61
+ # pagination for multiple results
62
+ limit = page_size
63
+ offset = (page - 1) * page_size
64
+
55
65
  # search using the identifier as a permalink
56
66
  if not results:
57
67
  # if the identifier contains a wildcard, use GLOB search
@@ -60,13 +70,13 @@ async def get_resource_content(
60
70
  if "*" in identifier
61
71
  else SearchQuery(permalink=identifier)
62
72
  )
63
- search_results = await search_service.search(query)
73
+ search_results = await search_service.search(query, limit, offset)
64
74
  if not search_results:
65
75
  raise HTTPException(status_code=404, detail=f"Resource not found: {identifier}")
66
76
 
67
- # get the entities related to the search results
68
- entity_ids = [id for result in search_results for id in get_entity_ids(result)]
69
- results = await entity_service.get_entities_by_id(entity_ids)
77
+ # get the deduplicated entities related to the search results
78
+ entity_ids = {id for result in search_results for id in get_entity_ids(result)}
79
+ results = await entity_service.get_entities_by_id(list(entity_ids))
70
80
 
71
81
  # return single response
72
82
  if len(results) == 1:
@@ -88,8 +98,7 @@ async def get_resource_content(
88
98
  content = await file_service.read_entity_content(result)
89
99
  memory_url = normalize_memory_url(result.permalink)
90
100
  modified_date = result.updated_at.isoformat()
91
- assert result.checksum
92
- checksum = result.checksum[:8]
101
+ checksum = result.checksum[:8] if result.checksum else ""
93
102
 
94
103
  # Prepare the delimited content
95
104
  response_content = f"--- {memory_url} {modified_date} {checksum}\n"
@@ -116,3 +125,101 @@ def cleanup_temp_file(file_path: str):
116
125
  logger.debug(f"Temporary file deleted: {file_path}")
117
126
  except Exception as e: # pragma: no cover
118
127
  logger.error(f"Error deleting temporary file {file_path}: {e}")
128
+
129
+
130
+ @router.put("/{file_path:path}")
131
+ async def write_resource(
132
+ config: ProjectConfigDep,
133
+ file_service: FileServiceDep,
134
+ entity_repository: EntityRepositoryDep,
135
+ search_service: SearchServiceDep,
136
+ file_path: str,
137
+ content: Annotated[str, Body()],
138
+ ) -> JSONResponse:
139
+ """Write content to a file in the project.
140
+
141
+ This endpoint allows writing content directly to a file in the project.
142
+ Also creates an entity record and indexes the file for search.
143
+
144
+ Args:
145
+ file_path: Path to write to, relative to project root
146
+ request: Contains the content to write
147
+
148
+ Returns:
149
+ JSON response with file information
150
+ """
151
+ try:
152
+ # Get content from request body
153
+
154
+ # Ensure it's UTF-8 string content
155
+ if isinstance(content, bytes): # pragma: no cover
156
+ content_str = content.decode("utf-8")
157
+ else:
158
+ content_str = str(content)
159
+
160
+ # Get full file path
161
+ full_path = Path(f"{config.home}/{file_path}")
162
+
163
+ # Ensure parent directory exists
164
+ full_path.parent.mkdir(parents=True, exist_ok=True)
165
+
166
+ # Write content to file
167
+ checksum = await file_service.write_file(full_path, content_str)
168
+
169
+ # Get file info
170
+ file_stats = file_service.file_stats(full_path)
171
+
172
+ # Determine file details
173
+ file_name = Path(file_path).name
174
+ content_type = file_service.content_type(full_path)
175
+
176
+ entity_type = "canvas" if file_path.endswith(".canvas") else "file"
177
+
178
+ # Check if entity already exists
179
+ existing_entity = await entity_repository.get_by_file_path(file_path)
180
+
181
+ if existing_entity:
182
+ # Update existing entity
183
+ entity = await entity_repository.update(
184
+ existing_entity.id,
185
+ {
186
+ "title": file_name,
187
+ "entity_type": entity_type,
188
+ "content_type": content_type,
189
+ "file_path": file_path,
190
+ "checksum": checksum,
191
+ "updated_at": datetime.fromtimestamp(file_stats.st_mtime),
192
+ },
193
+ )
194
+ status_code = 200
195
+ else:
196
+ # Create a new entity model
197
+ entity = EntityModel(
198
+ title=file_name,
199
+ entity_type=entity_type,
200
+ content_type=content_type,
201
+ file_path=file_path,
202
+ checksum=checksum,
203
+ created_at=datetime.fromtimestamp(file_stats.st_ctime),
204
+ updated_at=datetime.fromtimestamp(file_stats.st_mtime),
205
+ )
206
+ entity = await entity_repository.add(entity)
207
+ status_code = 201
208
+
209
+ # Index the file for search
210
+ await search_service.index_entity(entity) # pyright: ignore
211
+
212
+ # Return success response
213
+ return JSONResponse(
214
+ status_code=status_code,
215
+ content={
216
+ "file_path": file_path,
217
+ "checksum": checksum,
218
+ "size": file_stats.st_size,
219
+ "created_at": file_stats.st_ctime,
220
+ "modified_at": file_stats.st_mtime,
221
+ },
222
+ )
223
+ except Exception as e: # pragma: no cover
224
+ logger.error(f"Error writing resource {file_path}: {e}")
225
+ raise HTTPException(status_code=500, detail=f"Failed to write resource: {str(e)}")