memu-py 1.1.2__cp313-abi3-win_amd64.whl → 1.3.0__cp313-abi3-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. memu/_core.pyd +0 -0
  2. memu/app/crud.py +100 -1
  3. memu/app/service.py +67 -2
  4. memu/app/settings.py +28 -5
  5. memu/database/__init__.py +1 -0
  6. memu/database/factory.py +10 -0
  7. memu/database/inmemory/repositories/memory_category_repo.py +9 -0
  8. memu/database/inmemory/repositories/memory_item_repo.py +9 -0
  9. memu/database/inmemory/repositories/resource_repo.py +9 -0
  10. memu/database/inmemory/vector.py +30 -9
  11. memu/database/models.py +4 -2
  12. memu/database/postgres/models.py +4 -1
  13. memu/database/postgres/repositories/memory_category_repo.py +25 -0
  14. memu/database/postgres/repositories/memory_item_repo.py +25 -0
  15. memu/database/postgres/repositories/resource_repo.py +25 -0
  16. memu/database/repositories/memory_category.py +2 -0
  17. memu/database/repositories/memory_item.py +2 -0
  18. memu/database/repositories/resource.py +2 -0
  19. memu/database/sqlite/__init__.py +36 -0
  20. memu/database/sqlite/models.py +237 -0
  21. memu/database/sqlite/repositories/__init__.py +15 -0
  22. memu/database/sqlite/repositories/base.py +128 -0
  23. memu/database/sqlite/repositories/category_item_repo.py +180 -0
  24. memu/database/sqlite/repositories/memory_category_repo.py +260 -0
  25. memu/database/sqlite/repositories/memory_item_repo.py +311 -0
  26. memu/database/sqlite/repositories/resource_repo.py +195 -0
  27. memu/database/sqlite/schema.py +106 -0
  28. memu/database/sqlite/session.py +48 -0
  29. memu/database/sqlite/sqlite.py +145 -0
  30. memu/integrations/__init__.py +3 -0
  31. memu/integrations/langgraph.py +163 -0
  32. memu/llm/backends/__init__.py +3 -1
  33. memu/llm/backends/grok.py +11 -0
  34. memu/llm/backends/openrouter.py +70 -0
  35. memu/llm/http_client.py +19 -0
  36. memu/llm/lazyllm_client.py +134 -0
  37. memu/llm/openai_sdk.py +1 -1
  38. memu/prompts/category_summary/category.py +1 -1
  39. memu/workflow/__init__.py +8 -0
  40. memu/workflow/interceptor.py +218 -0
  41. memu/workflow/runner.py +7 -2
  42. memu/workflow/step.py +39 -2
  43. memu_py-1.3.0.dist-info/METADATA +634 -0
  44. {memu_py-1.1.2.dist-info → memu_py-1.3.0.dist-info}/RECORD +47 -30
  45. {memu_py-1.1.2.dist-info → memu_py-1.3.0.dist-info}/WHEEL +1 -1
  46. memu_py-1.1.2.dist-info/METADATA +0 -416
  47. {memu_py-1.1.2.dist-info → memu_py-1.3.0.dist-info}/entry_points.txt +0 -0
  48. {memu_py-1.1.2.dist-info → memu_py-1.3.0.dist-info}/licenses/LICENSE.txt +0 -0
memu/_core.pyd CHANGED
Binary file
memu/app/crud.py CHANGED
@@ -76,6 +76,27 @@ class CRUDMixin:
76
76
  raise RuntimeError(msg)
77
77
  return response
78
78
 
79
+ async def clear_memory(
80
+ self,
81
+ where: dict[str, Any] | None = None,
82
+ ) -> dict[str, Any]:
83
+ ctx = self._get_context()
84
+ store = self._get_database()
85
+ where_filters = self._normalize_where(where)
86
+
87
+ state: WorkflowState = {
88
+ "ctx": ctx,
89
+ "store": store,
90
+ "where": where_filters,
91
+ }
92
+
93
+ result = await self._run_workflow("crud_clear_memory", state)
94
+ response = cast(dict[str, Any] | None, result.get("response"))
95
+ if response is None:
96
+ msg = "Clear memory workflow failed to produce a response"
97
+ raise RuntimeError(msg)
98
+ return response
99
+
79
100
  def _build_list_memory_items_workflow(self) -> list[WorkflowStep]:
80
101
  steps = [
81
102
  WorkflowStep(
@@ -98,7 +119,7 @@ class CRUDMixin:
98
119
  return steps
99
120
 
100
121
  @staticmethod
101
- def _list_list_memory_items_initial_keys() -> set[str]:
122
+ def _list_list_memories_initial_keys() -> set[str]:
102
123
  return {
103
124
  "ctx",
104
125
  "store",
@@ -126,6 +147,51 @@ class CRUDMixin:
126
147
  ]
127
148
  return steps
128
149
 
150
+ def _build_clear_memory_workflow(self) -> list[WorkflowStep]:
151
+ steps = [
152
+ WorkflowStep(
153
+ step_id="clear_memory_categories",
154
+ role="delete_memories",
155
+ handler=self._crud_clear_memory_categories,
156
+ requires={"ctx", "store", "where"},
157
+ produces={"deleted_categories"},
158
+ capabilities={"db"},
159
+ ),
160
+ WorkflowStep(
161
+ step_id="clear_memory_items",
162
+ role="delete_memories",
163
+ handler=self._crud_clear_memory_items,
164
+ requires={"ctx", "store", "where"},
165
+ produces={"deleted_items"},
166
+ capabilities={"db"},
167
+ ),
168
+ WorkflowStep(
169
+ step_id="clear_memory_resources",
170
+ role="delete_memories",
171
+ handler=self._crud_clear_memory_resources,
172
+ requires={"ctx", "store", "where"},
173
+ produces={"deleted_resources"},
174
+ capabilities={"db"},
175
+ ),
176
+ WorkflowStep(
177
+ step_id="build_response",
178
+ role="emit",
179
+ handler=self._crud_build_clear_memory_response,
180
+ requires={"ctx", "store", "deleted_categories", "deleted_items", "deleted_resources"},
181
+ produces={"response"},
182
+ capabilities=set(),
183
+ ),
184
+ ]
185
+ return steps
186
+
187
+ @staticmethod
188
+ def _list_clear_memories_initial_keys() -> set[str]:
189
+ return {
190
+ "ctx",
191
+ "store",
192
+ "where",
193
+ }
194
+
129
195
  def _normalize_where(self, where: Mapping[str, Any] | None) -> dict[str, Any]:
130
196
  """Validate and clean the `where` scope filters against the configured user model."""
131
197
  if not where:
@@ -177,6 +243,39 @@ class CRUDMixin:
177
243
  state["response"] = response
178
244
  return state
179
245
 
246
+ def _crud_clear_memory_categories(self, state: WorkflowState, step_context: Any) -> WorkflowState:
247
+ where_filters = state.get("where") or {}
248
+ store = state["store"]
249
+ deleted = store.memory_category_repo.clear_categories(where_filters)
250
+ state["deleted_categories"] = deleted
251
+ return state
252
+
253
+ def _crud_clear_memory_items(self, state: WorkflowState, step_context: Any) -> WorkflowState:
254
+ where_filters = state.get("where") or {}
255
+ store = state["store"]
256
+ deleted = store.memory_item_repo.clear_items(where_filters)
257
+ state["deleted_items"] = deleted
258
+ return state
259
+
260
+ def _crud_clear_memory_resources(self, state: WorkflowState, step_context: Any) -> WorkflowState:
261
+ where_filters = state.get("where") or {}
262
+ store = state["store"]
263
+ deleted = store.resource_repo.clear_resources(where_filters)
264
+ state["deleted_resources"] = deleted
265
+ return state
266
+
267
+ def _crud_build_clear_memory_response(self, state: WorkflowState, step_context: Any) -> WorkflowState:
268
+ deleted_categories = state.get("deleted_categories", {})
269
+ deleted_items = state.get("deleted_items", {})
270
+ deleted_resources = state.get("deleted_resources", {})
271
+ response = {
272
+ "deleted_categories": [self._model_dump_without_embeddings(cat) for cat in deleted_categories.values()],
273
+ "deleted_items": [self._model_dump_without_embeddings(item) for item in deleted_items.values()],
274
+ "deleted_resources": [self._model_dump_without_embeddings(res) for res in deleted_resources.values()],
275
+ }
276
+ state["response"] = response
277
+ return state
278
+
180
279
  async def create_memory_item(
181
280
  self,
182
281
  *,
memu/app/service.py CHANGED
@@ -30,6 +30,7 @@ from memu.llm.wrapper import (
30
30
  LLMInterceptorHandle,
31
31
  LLMInterceptorRegistry,
32
32
  )
33
+ from memu.workflow.interceptor import WorkflowInterceptorHandle, WorkflowInterceptorRegistry
33
34
  from memu.workflow.pipeline import PipelineManager
34
35
  from memu.workflow.runner import WorkflowRunner, resolve_workflow_runner
35
36
  from memu.workflow.step import WorkflowState, WorkflowStep
@@ -83,6 +84,7 @@ class MemoryService(MemorizeMixin, RetrieveMixin, CRUDMixin):
83
84
  # Initialize client caches (lazy creation on first use)
84
85
  self._llm_clients: dict[str, Any] = {}
85
86
  self._llm_interceptors = LLMInterceptorRegistry()
87
+ self._workflow_interceptors = WorkflowInterceptorRegistry()
86
88
 
87
89
  self._workflow_runner = resolve_workflow_runner(workflow_runner)
88
90
 
@@ -115,6 +117,19 @@ class MemoryService(MemorizeMixin, RetrieveMixin, CRUDMixin):
115
117
  endpoint_overrides=cfg.endpoint_overrides,
116
118
  embed_model=cfg.embed_model,
117
119
  )
120
+ elif backend == "lazyllm_backend":
121
+ from memu.llm.lazyllm_client import LazyLLMClient
122
+
123
+ return LazyLLMClient(
124
+ llm_source=cfg.lazyllm_source.llm_source or cfg.lazyllm_source.source,
125
+ vlm_source=cfg.lazyllm_source.vlm_source or cfg.lazyllm_source.source,
126
+ embed_source=cfg.lazyllm_source.embed_source or cfg.lazyllm_source.source,
127
+ stt_source=cfg.lazyllm_source.stt_source or cfg.lazyllm_source.source,
128
+ chat_model=cfg.chat_model,
129
+ embed_model=cfg.embed_model,
130
+ vlm_model=cfg.lazyllm_source.vlm_model,
131
+ stt_model=cfg.lazyllm_source.stt_model,
132
+ )
118
133
  else:
119
134
  msg = f"Unknown llm_client_backend '{cfg.client_backend}'"
120
135
  raise ValueError(msg)
@@ -240,6 +255,45 @@ class MemoryService(MemorizeMixin, RetrieveMixin, CRUDMixin):
240
255
  ) -> LLMInterceptorHandle:
241
256
  return self._llm_interceptors.register_on_error(fn, name=name, priority=priority, where=where)
242
257
 
258
+ def intercept_before_workflow_step(
259
+ self,
260
+ fn: Callable[..., Any],
261
+ *,
262
+ name: str | None = None,
263
+ ) -> WorkflowInterceptorHandle:
264
+ """
265
+ Register an interceptor to be called before each workflow step.
266
+
267
+ The interceptor receives (step_context: WorkflowStepContext, state: WorkflowState).
268
+ """
269
+ return self._workflow_interceptors.register_before(fn, name=name)
270
+
271
+ def intercept_after_workflow_step(
272
+ self,
273
+ fn: Callable[..., Any],
274
+ *,
275
+ name: str | None = None,
276
+ ) -> WorkflowInterceptorHandle:
277
+ """
278
+ Register an interceptor to be called after each workflow step.
279
+
280
+ The interceptor receives (step_context: WorkflowStepContext, state: WorkflowState).
281
+ """
282
+ return self._workflow_interceptors.register_after(fn, name=name)
283
+
284
+ def intercept_on_error_workflow_step(
285
+ self,
286
+ fn: Callable[..., Any],
287
+ *,
288
+ name: str | None = None,
289
+ ) -> WorkflowInterceptorHandle:
290
+ """
291
+ Register an interceptor to be called when a workflow step raises an exception.
292
+
293
+ The interceptor receives (step_context: WorkflowStepContext, state: WorkflowState, error: Exception).
294
+ """
295
+ return self._workflow_interceptors.register_on_error(fn, name=name)
296
+
243
297
  def _get_context(self) -> Context:
244
298
  return self._context
245
299
 
@@ -277,7 +331,7 @@ class MemoryService(MemorizeMixin, RetrieveMixin, CRUDMixin):
277
331
  patch_delete_initial_keys = CRUDMixin._list_delete_memory_item_initial_keys()
278
332
  self._pipelines.register("patch_delete", patch_delete_workflow, initial_state_keys=patch_delete_initial_keys)
279
333
  crud_list_items_workflow = self._build_list_memory_items_workflow()
280
- crud_list_memories_initial_keys = CRUDMixin._list_list_memory_items_initial_keys()
334
+ crud_list_memories_initial_keys = CRUDMixin._list_list_memories_initial_keys()
281
335
  self._pipelines.register(
282
336
  "crud_list_memory_items", crud_list_items_workflow, initial_state_keys=crud_list_memories_initial_keys
283
337
  )
@@ -287,12 +341,23 @@ class MemoryService(MemorizeMixin, RetrieveMixin, CRUDMixin):
287
341
  crud_list_categories_workflow,
288
342
  initial_state_keys=crud_list_memories_initial_keys,
289
343
  )
344
+ crud_clear_memory_workflow = self._build_clear_memory_workflow()
345
+ crud_clear_memory_initial_keys = CRUDMixin._list_clear_memories_initial_keys()
346
+ self._pipelines.register(
347
+ "crud_clear_memory", crud_clear_memory_workflow, initial_state_keys=crud_clear_memory_initial_keys
348
+ )
290
349
 
291
350
  async def _run_workflow(self, workflow_name: str, initial_state: WorkflowState) -> WorkflowState:
292
351
  """Execute a workflow through the configured runner backend."""
293
352
  steps = self._pipelines.build(workflow_name)
294
353
  runner_context = {"workflow_name": workflow_name}
295
- return await self._workflow_runner.run(workflow_name, steps, initial_state, runner_context)
354
+ return await self._workflow_runner.run(
355
+ workflow_name,
356
+ steps,
357
+ initial_state,
358
+ runner_context,
359
+ interceptor_registry=self._workflow_interceptors,
360
+ )
296
361
 
297
362
  @staticmethod
298
363
  def _extract_json_blob(raw: str) -> str:
memu/app/settings.py CHANGED
@@ -36,7 +36,7 @@ def _default_memory_type_prompts() -> "dict[str, str | CustomPrompt]":
36
36
 
37
37
 
38
38
  class PromptBlock(BaseModel):
39
- lable: str | None = None
39
+ label: str | None = None
40
40
  ordinal: int = Field(default=0)
41
41
  prompt: str | None = None
42
42
 
@@ -89,6 +89,16 @@ def _default_memory_categories() -> list[CategoryConfig]:
89
89
  ]
90
90
 
91
91
 
92
+ class LazyLLMSource(BaseModel):
93
+ source: str | None = Field(default=None, description="default source for lazyllm client backend")
94
+ llm_source: str | None = Field(default=None, description="LLM source for lazyllm client backend")
95
+ embed_source: str | None = Field(default=None, description="Embedding source for lazyllm client backend")
96
+ vlm_source: str | None = Field(default=None, description="VLM source for lazyllm client backend")
97
+ stt_source: str | None = Field(default=None, description="STT source for lazyllm client backend")
98
+ vlm_model: str = Field(default="qwen-vl-plus", description="Vision language model for lazyllm client backend")
99
+ stt_model: str = Field(default="qwen-audio-turbo", description="Speech-to-text model for lazyllm client backend")
100
+
101
+
92
102
  class LLMConfig(BaseModel):
93
103
  provider: str = Field(
94
104
  default="openai",
@@ -99,8 +109,9 @@ class LLMConfig(BaseModel):
99
109
  chat_model: str = Field(default="gpt-4o-mini")
100
110
  client_backend: str = Field(
101
111
  default="sdk",
102
- description="Which LLM client backend to use: 'httpx' (httpx) or 'sdk' (official OpenAI).",
112
+ description="Which LLM client backend to use: 'httpx' (httpx), 'sdk' (official OpenAI), or 'lazyllm_backend' (for more LLM source like Qwen, Doubao, SIliconflow, etc.)",
103
113
  )
114
+ lazyllm_source: LazyLLMSource = Field(default=LazyLLMSource())
104
115
  endpoint_overrides: dict[str, str] = Field(
105
116
  default_factory=dict,
106
117
  description="Optional overrides for HTTP endpoints (keys: 'chat'/'summary').",
@@ -110,10 +121,22 @@ class LLMConfig(BaseModel):
110
121
  description="Default embedding model used for vectorization.",
111
122
  )
112
123
  embed_batch_size: int = Field(
113
- default=25,
124
+ default=1,
114
125
  description="Maximum batch size for embedding API calls (used by SDK client backends).",
115
126
  )
116
127
 
128
+ @model_validator(mode="after")
129
+ def set_provider_defaults(self) -> "LLMConfig":
130
+ if self.provider == "grok":
131
+ # If values match the OpenAI defaults, switch them to Grok defaults
132
+ if self.base_url == "https://api.openai.com/v1":
133
+ self.base_url = "https://api.x.ai/v1"
134
+ if self.api_key == "OPENAI_API_KEY":
135
+ self.api_key = "XAI_API_KEY"
136
+ if self.chat_model == "gpt-4o-mini":
137
+ self.chat_model = "grok-2-latest"
138
+ return self
139
+
117
140
 
118
141
  class BlobConfig(BaseModel):
119
142
  provider: str = Field(default="local")
@@ -248,9 +271,9 @@ class LLMProfilesConfig(RootModel[dict[Key, LLMConfig]]):
248
271
 
249
272
 
250
273
  class MetadataStoreConfig(BaseModel):
251
- provider: Annotated[Literal["inmemory", "postgres"], Normalize] = "inmemory"
274
+ provider: Annotated[Literal["inmemory", "postgres", "sqlite"], Normalize] = "inmemory"
252
275
  ddl_mode: Annotated[Literal["create", "validate"], Normalize] = "create"
253
- dsn: str | None = Field(default=None, description="Postgres connection string when provider=postgres.")
276
+ dsn: str | None = Field(default=None, description="Database connection string (required for postgres/sqlite).")
254
277
 
255
278
 
256
279
  class VectorIndexConfig(BaseModel):
memu/database/__init__.py CHANGED
@@ -24,4 +24,5 @@ __all__ = [
24
24
  "inmemory",
25
25
  "postgres",
26
26
  "schema",
27
+ "sqlite",
27
28
  ]
memu/database/factory.py CHANGED
@@ -19,6 +19,11 @@ def build_database(
19
19
  ) -> Database:
20
20
  """
21
21
  Initialize a database backend for the configured provider.
22
+
23
+ Supported providers:
24
+ - "inmemory": In-memory storage (default, no persistence)
25
+ - "postgres": PostgreSQL with optional pgvector support
26
+ - "sqlite": SQLite file-based storage (lightweight, portable)
22
27
  """
23
28
  provider = config.metadata_store.provider
24
29
  if provider == "inmemory":
@@ -28,6 +33,11 @@ def build_database(
28
33
  from memu.database.postgres import build_postgres_database
29
34
 
30
35
  return build_postgres_database(config=config, user_model=user_model)
36
+ elif provider == "sqlite":
37
+ # Lazy import to avoid loading SQLite dependencies when not needed
38
+ from memu.database.sqlite import build_sqlite_database
39
+
40
+ return build_sqlite_database(config=config, user_model=user_model)
31
41
  else:
32
42
  msg = f"Unsupported metadata_store provider: {provider}"
33
43
  raise ValueError(msg)
@@ -23,6 +23,15 @@ class InMemoryMemoryCategoryRepository(MemoryCategoryRepoProtocol):
23
23
  return dict(self.categories)
24
24
  return {cid: cat for cid, cat in self.categories.items() if matches_where(cat, where)}
25
25
 
26
+ def clear_categories(self, where: Mapping[str, Any] | None = None) -> dict[str, MemoryCategory]:
27
+ if not where:
28
+ matches = self.categories.copy()
29
+ self.categories.clear()
30
+ return matches
31
+ matches = {cid: cat for cid, cat in self.categories.items() if matches_where(cat, where)}
32
+ self.categories = {cid: cat for cid, cat in self.categories.items() if cid not in matches}
33
+ return matches
34
+
26
35
  def get_or_create_category(
27
36
  self, *, name: str, description: str, embedding: list[float], user_data: dict[str, Any]
28
37
  ) -> MemoryCategory:
@@ -22,6 +22,15 @@ class InMemoryMemoryItemRepository(MemoryItemRepo):
22
22
  return dict(self.items)
23
23
  return {mid: item for mid, item in self.items.items() if matches_where(item, where)}
24
24
 
25
+ def clear_items(self, where: Mapping[str, Any] | None = None) -> dict[str, MemoryItem]:
26
+ if not where:
27
+ matches = self.items.copy()
28
+ self.items.clear()
29
+ return matches
30
+ matches = {mid: item for mid, item in self.items.items() if matches_where(item, where)}
31
+ self.items = {mid: item for mid, item in self.items.items() if mid not in matches}
32
+ return matches
33
+
25
34
  def create_item(
26
35
  self,
27
36
  *,
@@ -21,6 +21,15 @@ class InMemoryResourceRepository(ResourceRepoProtocol):
21
21
  return dict(self.resources)
22
22
  return {rid: res for rid, res in self.resources.items() if matches_where(res, where)}
23
23
 
24
+ def clear_resources(self, where: Mapping[str, Any] | None = None) -> dict[str, Resource]:
25
+ if not where:
26
+ matches = self.resources.copy()
27
+ self.resources.clear()
28
+ return matches
29
+ matches = {rid: res for rid, res in self.resources.items() if matches_where(res, where)}
30
+ self.resources = {rid: res for rid, res in self.resources.items() if rid not in matches}
31
+ return matches
32
+
24
33
  def create_resource(
25
34
  self,
26
35
  *,
@@ -16,16 +16,37 @@ def cosine_topk(
16
16
  corpus: Iterable[tuple[str, list[float] | None]],
17
17
  k: int = 5,
18
18
  ) -> list[tuple[str, float]]:
19
- q = np.array(query_vec, dtype=np.float32)
20
- scored: list[tuple[str, float]] = []
19
+ # Filter out None vectors and collect valid entries
20
+ ids: list[str] = []
21
+ vecs: list[list[float]] = []
21
22
  for _id, vec in corpus:
22
- if vec is None:
23
- continue
24
- vec_list = cast(list[float], vec)
25
- v = np.array(vec_list, dtype=np.float32)
26
- scored.append((_id, _cosine(q, v)))
27
- scored.sort(key=lambda x: x[1], reverse=True)
28
- return scored[:k]
23
+ if vec is not None:
24
+ ids.append(_id)
25
+ vecs.append(cast(list[float], vec))
26
+
27
+ if not vecs:
28
+ return []
29
+
30
+ # Vectorized computation: stack all vectors into a matrix
31
+ q = np.array(query_vec, dtype=np.float32)
32
+ matrix = np.array(vecs, dtype=np.float32) # shape: (n, dim)
33
+
34
+ # Compute all cosine similarities at once
35
+ q_norm = np.linalg.norm(q)
36
+ vec_norms = np.linalg.norm(matrix, axis=1)
37
+ scores = matrix @ q / (vec_norms * q_norm + 1e-9)
38
+
39
+ # Use argpartition for O(n) topk selection instead of O(n log n) sort
40
+ n = len(scores)
41
+ actual_k = min(k, n)
42
+ if actual_k == n:
43
+ topk_indices = np.argsort(scores)[::-1]
44
+ else:
45
+ # Get indices of top k elements (unordered), then sort only those
46
+ topk_indices = np.argpartition(scores, -actual_k)[-actual_k:]
47
+ topk_indices = topk_indices[np.argsort(scores[topk_indices])[::-1]]
48
+
49
+ return [(ids[i], float(scores[i])) for i in topk_indices]
29
50
 
30
51
 
31
52
  def query_cosine(query_vec: list[float], vecs: list[list[float]]) -> list[tuple[int, float]]:
memu/database/models.py CHANGED
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import uuid
4
4
  from datetime import datetime
5
- from typing import Literal
5
+ from typing import Any, Literal
6
6
 
7
7
  import pendulum
8
8
  from pydantic import BaseModel, ConfigDict, Field
@@ -28,9 +28,11 @@ class Resource(BaseRecord):
28
28
 
29
29
  class MemoryItem(BaseRecord):
30
30
  resource_id: str | None
31
- memory_type: MemoryType
31
+ memory_type: str
32
32
  summary: str
33
33
  embedding: list[float] | None = None
34
+ happened_at: datetime | None = None
35
+ extra: dict[str, Any] = {}
34
36
 
35
37
 
36
38
  class MemoryCategory(BaseRecord):
@@ -14,6 +14,7 @@ except ImportError as exc:
14
14
 
15
15
  from pydantic import BaseModel
16
16
  from sqlalchemy import ForeignKey, MetaData, String, Text
17
+ from sqlalchemy.dialects.postgresql import JSONB
17
18
  from sqlmodel import Column, DateTime, Field, Index, SQLModel, func
18
19
 
19
20
  from memu.database.models import CategoryItem, MemoryCategory, MemoryItem, MemoryType, Resource
@@ -51,10 +52,12 @@ class ResourceModel(BaseModelMixin, Resource):
51
52
 
52
53
 
53
54
  class MemoryItemModel(BaseModelMixin, MemoryItem):
54
- resource_id: str = Field(sa_column=Column(ForeignKey("resources.id", ondelete="CASCADE"), nullable=True))
55
+ resource_id: str | None = Field(sa_column=Column(ForeignKey("resources.id", ondelete="CASCADE"), nullable=True))
55
56
  memory_type: MemoryType = Field(sa_column=Column(String, nullable=False))
56
57
  summary: str = Field(sa_column=Column(Text, nullable=False))
57
58
  embedding: list[float] | None = Field(default=None, sa_column=Column(Vector(), nullable=True))
59
+ happened_at: datetime | None = Field(default=None, sa_column=Column(DateTime, nullable=True))
60
+ extra: dict[str, Any] = Field(default={}, sa_column=Column(JSONB, nullable=True))
58
61
 
59
62
 
60
63
  class MemoryCategoryModel(BaseModelMixin, MemoryCategory):
@@ -37,6 +37,31 @@ class PostgresMemoryCategoryRepo(PostgresRepoBase, MemoryCategoryRepo):
37
37
  result[cat.id] = cat
38
38
  return result
39
39
 
40
+ def clear_categories(self, where: Mapping[str, Any] | None = None) -> dict[str, MemoryCategory]:
41
+ from sqlmodel import delete, select
42
+
43
+ filters = self._build_filters(self._sqla_models.MemoryCategory, where)
44
+ with self._sessions.session() as session:
45
+ # First get the objects to delete
46
+ rows = session.scalars(select(self._sqla_models.MemoryCategory).where(*filters)).all()
47
+ deleted: dict[str, MemoryCategory] = {}
48
+ for row in rows:
49
+ row.embedding = self._normalize_embedding(row.embedding)
50
+ deleted[row.id] = row
51
+
52
+ if not deleted:
53
+ return {}
54
+
55
+ # Delete from database
56
+ session.exec(delete(self._sqla_models.MemoryCategory).where(*filters))
57
+ session.commit()
58
+
59
+ # Clean up cache
60
+ for cat_id in deleted:
61
+ self.categories.pop(cat_id, None)
62
+
63
+ return deleted
64
+
40
65
  def get_or_create_category(
41
66
  self,
42
67
  *,
@@ -51,6 +51,31 @@ class PostgresMemoryItemRepo(PostgresRepoBase):
51
51
  result[item.id] = item
52
52
  return result
53
53
 
54
+ def clear_items(self, where: Mapping[str, Any] | None = None) -> dict[str, MemoryItem]:
55
+ from sqlmodel import delete, select
56
+
57
+ filters = self._build_filters(self._sqla_models.MemoryItem, where)
58
+ with self._sessions.session() as session:
59
+ # First get the objects to delete
60
+ rows = session.scalars(select(self._sqla_models.MemoryItem).where(*filters)).all()
61
+ deleted: dict[str, MemoryItem] = {}
62
+ for row in rows:
63
+ row.embedding = self._normalize_embedding(row.embedding)
64
+ deleted[row.id] = row
65
+
66
+ if not deleted:
67
+ return {}
68
+
69
+ # Delete from database
70
+ session.exec(delete(self._sqla_models.MemoryItem).where(*filters))
71
+ session.commit()
72
+
73
+ # Clean up cache
74
+ for item_id in deleted:
75
+ self.items.pop(item_id, None)
76
+
77
+ return deleted
78
+
54
79
  def create_item(
55
80
  self,
56
81
  *,
@@ -37,6 +37,31 @@ class PostgresResourceRepo(PostgresRepoBase, ResourceRepo):
37
37
  result[res.id] = res
38
38
  return result
39
39
 
40
+ def clear_resources(self, where: Mapping[str, Any] | None = None) -> dict[str, Resource]:
41
+ from sqlmodel import delete, select
42
+
43
+ filters = self._build_filters(self._sqla_models.Resource, where)
44
+ with self._sessions.session() as session:
45
+ # First get the objects to delete
46
+ rows = session.scalars(select(self._sqla_models.Resource).where(*filters)).all()
47
+ deleted: dict[str, Resource] = {}
48
+ for row in rows:
49
+ row.embedding = self._normalize_embedding(row.embedding)
50
+ deleted[row.id] = row
51
+
52
+ if not deleted:
53
+ return {}
54
+
55
+ # Delete from database
56
+ session.exec(delete(self._sqla_models.Resource).where(*filters))
57
+ session.commit()
58
+
59
+ # Clean up cache
60
+ for res_id in deleted:
61
+ self.resources.pop(res_id, None)
62
+
63
+ return deleted
64
+
40
65
  def create_resource(
41
66
  self,
42
67
  *,
@@ -14,6 +14,8 @@ class MemoryCategoryRepo(Protocol):
14
14
 
15
15
  def list_categories(self, where: Mapping[str, Any] | None = None) -> dict[str, MemoryCategory]: ...
16
16
 
17
+ def clear_categories(self, where: Mapping[str, Any] | None = None) -> dict[str, MemoryCategory]: ...
18
+
17
19
  def get_or_create_category(
18
20
  self, *, name: str, description: str, embedding: list[float], user_data: dict[str, Any]
19
21
  ) -> MemoryCategory: ...
@@ -16,6 +16,8 @@ class MemoryItemRepo(Protocol):
16
16
 
17
17
  def list_items(self, where: Mapping[str, Any] | None = None) -> dict[str, MemoryItem]: ...
18
18
 
19
+ def clear_items(self, where: Mapping[str, Any] | None = None) -> dict[str, MemoryItem]: ...
20
+
19
21
  def create_item(
20
22
  self,
21
23
  *,
@@ -14,6 +14,8 @@ class ResourceRepo(Protocol):
14
14
 
15
15
  def list_resources(self, where: Mapping[str, Any] | None = None) -> dict[str, Resource]: ...
16
16
 
17
+ def clear_resources(self, where: Mapping[str, Any] | None = None) -> dict[str, Resource]: ...
18
+
17
19
  def create_resource(
18
20
  self,
19
21
  *,
@@ -0,0 +1,36 @@
1
+ """SQLite database backend for MemU."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from memu.app.settings import DatabaseConfig
8
+ from memu.database.sqlite.sqlite import SQLiteStore
9
+
10
+
11
+ def build_sqlite_database(
12
+ *,
13
+ config: DatabaseConfig,
14
+ user_model: type[BaseModel],
15
+ ) -> SQLiteStore:
16
+ """Build a SQLite database store instance.
17
+
18
+ Args:
19
+ config: Database configuration containing metadata_store settings.
20
+ user_model: Pydantic model for user scope fields.
21
+
22
+ Returns:
23
+ Configured SQLiteStore instance.
24
+ """
25
+ dsn = config.metadata_store.dsn
26
+ if not dsn:
27
+ # Default to a local file if no DSN provided
28
+ dsn = "sqlite:///memu.db"
29
+
30
+ return SQLiteStore(
31
+ dsn=dsn,
32
+ scope_model=user_model,
33
+ )
34
+
35
+
36
+ __all__ = ["SQLiteStore", "build_sqlite_database"]