agent-memory-engine 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. agent_memory/__init__.py +33 -0
  2. agent_memory/cli.py +142 -0
  3. agent_memory/client.py +355 -0
  4. agent_memory/config.py +28 -0
  5. agent_memory/controller/__init__.py +15 -0
  6. agent_memory/controller/conflict.py +95 -0
  7. agent_memory/controller/consolidation.py +136 -0
  8. agent_memory/controller/forgetting.py +29 -0
  9. agent_memory/controller/router.py +62 -0
  10. agent_memory/controller/trust.py +31 -0
  11. agent_memory/embedding/__init__.py +5 -0
  12. agent_memory/embedding/base.py +11 -0
  13. agent_memory/embedding/local_provider.py +38 -0
  14. agent_memory/embedding/openai_provider.py +11 -0
  15. agent_memory/extraction/__init__.py +5 -0
  16. agent_memory/extraction/entity_extractor.py +13 -0
  17. agent_memory/extraction/pipeline.py +123 -0
  18. agent_memory/extraction/prompts.py +40 -0
  19. agent_memory/governance/__init__.py +6 -0
  20. agent_memory/governance/audit.py +14 -0
  21. agent_memory/governance/export.py +72 -0
  22. agent_memory/governance/health.py +40 -0
  23. agent_memory/interfaces/__init__.py +14 -0
  24. agent_memory/interfaces/mcp_server.py +128 -0
  25. agent_memory/interfaces/rest_api.py +71 -0
  26. agent_memory/llm/__init__.py +5 -0
  27. agent_memory/llm/base.py +23 -0
  28. agent_memory/llm/ollama_client.py +64 -0
  29. agent_memory/llm/openai_client.py +94 -0
  30. agent_memory/models.py +149 -0
  31. agent_memory/storage/__init__.py +4 -0
  32. agent_memory/storage/base.py +59 -0
  33. agent_memory/storage/schema.sql +125 -0
  34. agent_memory/storage/sqlite_backend.py +762 -0
  35. agent_memory_engine-0.1.0.dist-info/METADATA +228 -0
  36. agent_memory_engine-0.1.0.dist-info/RECORD +39 -0
  37. agent_memory_engine-0.1.0.dist-info/WHEEL +4 -0
  38. agent_memory_engine-0.1.0.dist-info/entry_points.txt +2 -0
  39. agent_memory_engine-0.1.0.dist-info/licenses/LICENSE +22 -0
@@ -0,0 +1,33 @@
1
+ from agent_memory.client import MemoryClient
2
+ from agent_memory.models import (
3
+ ConflictRecord,
4
+ ConflictResolution,
5
+ ConversationTurn,
6
+ HealthReport,
7
+ MaintenanceReport,
8
+ MemoryDraft,
9
+ MemoryItem,
10
+ MemoryLayer,
11
+ MemoryType,
12
+ QueryIntent,
13
+ RelationEdge,
14
+ SearchResult,
15
+ TraceReport,
16
+ )
17
+
18
+ __all__ = [
19
+ "ConflictRecord",
20
+ "ConflictResolution",
21
+ "ConversationTurn",
22
+ "HealthReport",
23
+ "MaintenanceReport",
24
+ "MemoryClient",
25
+ "MemoryDraft",
26
+ "MemoryItem",
27
+ "MemoryLayer",
28
+ "MemoryType",
29
+ "QueryIntent",
30
+ "RelationEdge",
31
+ "SearchResult",
32
+ "TraceReport",
33
+ ]
agent_memory/cli.py ADDED
@@ -0,0 +1,142 @@
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ from dataclasses import asdict, is_dataclass, replace
5
+ from enum import Enum
6
+ import json
7
+ from pathlib import Path
8
+ from typing import Any
9
+
10
+ from agent_memory.client import MemoryClient
11
+ from agent_memory.config import AgentMemoryConfig
12
+
13
+
14
+ def _json_default(value: Any) -> Any:
15
+ if isinstance(value, Enum):
16
+ return value.value
17
+ if hasattr(value, "isoformat"):
18
+ return value.isoformat()
19
+ if is_dataclass(value):
20
+ return asdict(value)
21
+ raise TypeError(f"Object of type {type(value)!r} is not JSON serializable")
22
+
23
+
24
+ def _print_json(payload: Any) -> None:
25
+ print(json.dumps(payload, ensure_ascii=False, indent=2, default=_json_default))
26
+
27
+
28
+ def build_parser() -> argparse.ArgumentParser:
29
+ parser = argparse.ArgumentParser(prog="agent-memory", description="Zero-config local memory engine for agents.")
30
+ parser.add_argument("--db", dest="database_path", help="Path to the SQLite database file.")
31
+ subparsers = parser.add_subparsers(dest="command", required=True)
32
+
33
+ store = subparsers.add_parser("store", help="Store a memory.")
34
+ store.add_argument("content")
35
+ store.add_argument("--source-id", default="cli")
36
+ store.add_argument("--memory-type", default="semantic")
37
+ store.add_argument("--causal-parent-id")
38
+ store.add_argument("--tag", action="append", default=[])
39
+
40
+ search = subparsers.add_parser("search", help="Search memories.")
41
+ search.add_argument("query")
42
+ search.add_argument("--limit", type=int, default=5)
43
+
44
+ trace = subparsers.add_parser("trace", help="Trace a memory graph.")
45
+ trace.add_argument("memory_id")
46
+ trace.add_argument("--max-depth", type=int, default=10)
47
+
48
+ evolution = subparsers.add_parser("evolution", help="Show memory evolution history.")
49
+ evolution.add_argument("memory_id")
50
+ evolution.add_argument("--limit", type=int, default=20)
51
+
52
+ audit = subparsers.add_parser("audit", help="Show recent audit events.")
53
+ audit.add_argument("--limit", type=int, default=20)
54
+
55
+ health = subparsers.add_parser("health", help="Show memory health report.")
56
+
57
+ maintain = subparsers.add_parser("maintain", help="Run maintenance cycle.")
58
+
59
+ export_cmd = subparsers.add_parser("export", help="Export to JSONL.")
60
+ export_cmd.add_argument("path")
61
+
62
+ import_cmd = subparsers.add_parser("import", help="Import from JSONL.")
63
+ import_cmd.add_argument("path")
64
+
65
+ return parser
66
+
67
+
68
+ def _build_client(database_path: str | None) -> MemoryClient:
69
+ config = AgentMemoryConfig.from_env()
70
+ if database_path:
71
+ config = replace(config, database_path=database_path)
72
+ return MemoryClient(config=config)
73
+
74
+
75
+ def main(argv: list[str] | None = None) -> int:
76
+ parser = build_parser()
77
+ args = parser.parse_args(argv)
78
+ client = _build_client(args.database_path)
79
+ try:
80
+ if args.command == "store":
81
+ item = client.add(
82
+ args.content,
83
+ source_id=args.source_id,
84
+ memory_type=args.memory_type,
85
+ causal_parent_id=args.causal_parent_id,
86
+ tags=list(args.tag),
87
+ )
88
+ _print_json({"id": item.id, "content": item.content, "trust_score": item.trust_score})
89
+ return 0
90
+
91
+ if args.command == "search":
92
+ _print_json(
93
+ [
94
+ {
95
+ "id": result.item.id,
96
+ "content": result.item.content,
97
+ "score": result.score,
98
+ "matched_by": result.matched_by,
99
+ }
100
+ for result in client.search(args.query, limit=args.limit)
101
+ ]
102
+ )
103
+ return 0
104
+
105
+ if args.command == "trace":
106
+ _print_json(asdict(client.trace_graph(args.memory_id, max_depth=args.max_depth)))
107
+ return 0
108
+
109
+ if args.command == "evolution":
110
+ _print_json(client.evolution_events(memory_id=args.memory_id, limit=args.limit))
111
+ return 0
112
+
113
+ if args.command == "audit":
114
+ _print_json(client.audit_events(limit=args.limit))
115
+ return 0
116
+
117
+ if args.command == "health":
118
+ _print_json(asdict(client.health()))
119
+ return 0
120
+
121
+ if args.command == "maintain":
122
+ _print_json(asdict(client.maintain()))
123
+ return 0
124
+
125
+ if args.command == "export":
126
+ path = str(Path(args.path))
127
+ _print_json({"path": path, "exported": client.export_jsonl(path)})
128
+ return 0
129
+
130
+ if args.command == "import":
131
+ path = str(Path(args.path))
132
+ _print_json({"path": path, "imported": client.import_jsonl(path)})
133
+ return 0
134
+
135
+ parser.error(f"Unsupported command: {args.command}")
136
+ return 2
137
+ finally:
138
+ client.close()
139
+
140
+
141
+ if __name__ == "__main__":
142
+ raise SystemExit(main())
agent_memory/client.py ADDED
@@ -0,0 +1,355 @@
1
+ from __future__ import annotations
2
+
3
+ from collections import defaultdict
4
+ from dataclasses import replace
5
+ from datetime import datetime, timezone
6
+ import uuid
7
+
8
+ from agent_memory.controller.conflict import ConflictDetector
9
+ from agent_memory.controller.consolidation import ConsolidationPlanner
10
+ from agent_memory.controller.forgetting import ForgettingPolicy
11
+ from agent_memory.config import AgentMemoryConfig
12
+ from agent_memory.controller.router import IntentRouter, reciprocal_rank_fusion, strip_intent_markers
13
+ from agent_memory.controller.trust import TrustScorer
14
+ from agent_memory.embedding.local_provider import LocalEmbeddingProvider
15
+ from agent_memory.governance.audit import AuditLogReader
16
+ from agent_memory.governance.export import MemoryExporter, MemoryImporter
17
+ from agent_memory.governance.health import MemoryHealthMonitor
18
+ from agent_memory.extraction.entity_extractor import EntityExtractor
19
+ from agent_memory.extraction.pipeline import ConversationMemoryPipeline
20
+ from agent_memory.llm.base import LLMClient
21
+ from agent_memory.models import (
22
+ ConflictRecord,
23
+ ConversationTurn,
24
+ MaintenanceReport,
25
+ MemoryDraft,
26
+ MemoryItem,
27
+ MemoryLayer,
28
+ MemoryType,
29
+ RelationEdge,
30
+ RelationType,
31
+ SearchResult,
32
+ TraceReport,
33
+ )
34
+ from agent_memory.storage.sqlite_backend import SQLiteBackend
35
+
36
+
37
+ class MemoryClient:
38
+ def __init__(
39
+ self,
40
+ config: AgentMemoryConfig | None = None,
41
+ backend: SQLiteBackend | None = None,
42
+ embedding_provider: LocalEmbeddingProvider | None = None,
43
+ entity_extractor: EntityExtractor | None = None,
44
+ llm_client: LLMClient | None = None,
45
+ ) -> None:
46
+ self.config = config or AgentMemoryConfig.from_env()
47
+ self.backend = backend or SQLiteBackend(
48
+ self.config.database_path,
49
+ prefer_sqlite_vec=self.config.enable_sqlite_vec,
50
+ )
51
+ self.embedding_provider = embedding_provider or LocalEmbeddingProvider()
52
+ self.entity_extractor = entity_extractor or EntityExtractor()
53
+ self.router = IntentRouter()
54
+ self.llm_client = llm_client
55
+ self.pipeline = ConversationMemoryPipeline(entity_extractor=self.entity_extractor, llm_client=self.llm_client)
56
+ self.turn_model = ConversationTurn
57
+ self.forgetting_policy = ForgettingPolicy()
58
+ self.trust_scorer = TrustScorer()
59
+ self.conflict_detector = ConflictDetector(self.backend, llm_client=self.llm_client)
60
+ self.consolidation_planner = ConsolidationPlanner()
61
+ self.health_monitor = MemoryHealthMonitor(self.backend)
62
+ self.audit_reader = AuditLogReader(self.backend)
63
+ self.exporter = MemoryExporter(self.backend)
64
+ self.importer = MemoryImporter(self.backend)
65
+
66
+ def close(self) -> None:
67
+ self.backend.close()
68
+
69
+ def add(
70
+ self,
71
+ content: str,
72
+ *,
73
+ source_id: str,
74
+ memory_type: MemoryType | str = MemoryType.SEMANTIC,
75
+ importance: float = 0.5,
76
+ trust_score: float = 0.75,
77
+ tags: list[str] | None = None,
78
+ entity_refs: list[str] | None = None,
79
+ causal_parent_id: str | None = None,
80
+ supersedes_id: str | None = None,
81
+ ) -> MemoryItem:
82
+ memory_type = MemoryType(memory_type)
83
+ embedding = self.embedding_provider.embed([content])[0]
84
+ now = datetime.now(timezone.utc)
85
+ base_trust = self.trust_scorer.score(source_reliability=trust_score)
86
+ item = MemoryItem(
87
+ id=str(uuid.uuid4()),
88
+ content=content,
89
+ memory_type=memory_type,
90
+ embedding=embedding,
91
+ created_at=now,
92
+ last_accessed=now,
93
+ importance=importance,
94
+ trust_score=base_trust,
95
+ source_id=source_id,
96
+ entity_refs=entity_refs or self.entity_extractor.extract(content),
97
+ tags=tags or [],
98
+ causal_parent_id=causal_parent_id,
99
+ supersedes_id=supersedes_id,
100
+ )
101
+ item = self.backend.add_memory(item)
102
+ self._attach_structural_relations(item)
103
+ conflicts = self.detect_conflicts(item)
104
+ if conflicts:
105
+ item, _ = self._apply_conflicts(item, conflicts)
106
+ return item
107
+
108
+ def get(self, memory_id: str) -> MemoryItem | None:
109
+ return self.backend.get_memory(memory_id)
110
+
111
+ def delete(self, memory_id: str) -> bool:
112
+ return self.backend.soft_delete_memory(memory_id)
113
+
114
+ def search(self, query: str, limit: int | None = None) -> list[SearchResult]:
115
+ search_limit = limit or self.config.default_search_limit
116
+ plan = self.router.plan(query)
117
+ rankings: dict[str, list[str]] = {}
118
+ results_by_id: dict[str, MemoryItem] = {}
119
+ matched_by: dict[str, set[str]] = defaultdict(set)
120
+ memory_type = plan.filters.get("memory_type")
121
+ normalized_query = strip_intent_markers(query) or query
122
+
123
+ if "semantic" in plan.strategies:
124
+ embedding = self.embedding_provider.embed([normalized_query])[0]
125
+ semantic_results = self.backend.search_by_vector(embedding, limit=self.config.semantic_limit, memory_type=memory_type)
126
+ rankings["semantic"] = [item.id for item, _ in semantic_results]
127
+ for item, _ in semantic_results:
128
+ results_by_id[item.id] = item
129
+ matched_by[item.id].add("semantic")
130
+
131
+ if "full_text" in plan.strategies:
132
+ lexical_results = self.backend.search_full_text(normalized_query, limit=self.config.lexical_limit, memory_type=memory_type)
133
+ rankings["full_text"] = [item.id for item, _ in lexical_results]
134
+ for item, _ in lexical_results:
135
+ results_by_id[item.id] = item
136
+ matched_by[item.id].add("full_text")
137
+
138
+ if "entity" in plan.strategies:
139
+ entities = self.entity_extractor.extract(normalized_query)
140
+ entity_results = self.backend.search_by_entities(entities, limit=self.config.entity_limit, memory_type=memory_type)
141
+ rankings["entity"] = [item.id for item, _ in entity_results]
142
+ for item, _ in entity_results:
143
+ results_by_id[item.id] = item
144
+ matched_by[item.id].add("entity")
145
+
146
+ if "causal_trace" in plan.strategies:
147
+ seed_ids = rankings.get("semantic", [])[:2] or rankings.get("full_text", [])[:2]
148
+ trace_ids: list[str] = []
149
+ for seed_id in seed_ids:
150
+ for ancestor in self.backend.trace_ancestors(seed_id, max_depth=5):
151
+ results_by_id[ancestor.id] = ancestor
152
+ matched_by[ancestor.id].add("causal_trace")
153
+ trace_ids.append(ancestor.id)
154
+ if trace_ids:
155
+ rankings["causal_trace"] = trace_ids
156
+
157
+ fused = reciprocal_rank_fusion(rankings, k=self.config.rrf_k)
158
+ final_ids = list(fused.keys())
159
+ if plan.filters.get("sort") == "recency":
160
+ final_ids.sort(
161
+ key=lambda item_id: (
162
+ fused.get(item_id, 0.0),
163
+ results_by_id[item_id].created_at,
164
+ ),
165
+ reverse=True,
166
+ )
167
+
168
+ output: list[SearchResult] = []
169
+ for memory_id in final_ids[:search_limit]:
170
+ self.backend.touch_memory(memory_id)
171
+ refreshed = self.backend.get_memory(memory_id)
172
+ if refreshed is None:
173
+ continue
174
+ output.append(
175
+ SearchResult(
176
+ item=refreshed,
177
+ score=fused.get(memory_id, 0.0),
178
+ matched_by=sorted(matched_by[memory_id]),
179
+ )
180
+ )
181
+ return output
182
+
183
+ def ingest_conversation(self, turns: list[ConversationTurn], source_id: str) -> list[MemoryItem]:
184
+ drafts = self.pipeline.extract(turns, source_id=source_id)
185
+ created: list[MemoryItem] = []
186
+ for draft in drafts:
187
+ created.append(self.add_from_draft(draft))
188
+ return created
189
+
190
+ def add_from_draft(self, draft: MemoryDraft) -> MemoryItem:
191
+ return self.add(
192
+ draft.content,
193
+ source_id=draft.source_id,
194
+ memory_type=draft.memory_type,
195
+ importance=draft.importance,
196
+ trust_score=draft.trust_score,
197
+ tags=list(draft.tags),
198
+ entity_refs=list(draft.entity_refs),
199
+ causal_parent_id=draft.causal_parent_id,
200
+ supersedes_id=draft.supersedes_id,
201
+ )
202
+
203
+ def update(self, item: MemoryItem, **changes: object) -> MemoryItem:
204
+ if "memory_type" in changes and isinstance(changes["memory_type"], str):
205
+ changes["memory_type"] = MemoryType(changes["memory_type"])
206
+ updated = replace(item, **changes)
207
+ updated = self.backend.update_memory(updated)
208
+ self._attach_structural_relations(updated)
209
+ if "content" in changes or "entity_refs" in changes:
210
+ conflicts = self.detect_conflicts(updated)
211
+ if conflicts:
212
+ updated, _ = self._apply_conflicts(updated, conflicts)
213
+ return updated
214
+
215
+ def trace(self, memory_id: str, max_depth: int = 10) -> list[MemoryItem]:
216
+ return self.backend.trace_ancestors(memory_id, max_depth=max_depth)
217
+
218
+ def trace_graph(self, memory_id: str, max_depth: int = 10) -> TraceReport:
219
+ focus = self.get(memory_id)
220
+ if focus is None:
221
+ raise ValueError(f"Memory {memory_id} not found")
222
+ return TraceReport(
223
+ focus=focus,
224
+ ancestors=self.backend.trace_ancestors(memory_id, max_depth=max_depth),
225
+ descendants=self.backend.trace_descendants(memory_id, max_depth=max_depth),
226
+ relations=self.backend.list_relations(memory_id),
227
+ evolution_events=self.backend.get_evolution_events(memory_id=memory_id, limit=100),
228
+ )
229
+
230
+ def detect_conflicts(self, item: MemoryItem) -> list[ConflictRecord]:
231
+ return self.conflict_detector.detect(item)
232
+
233
+ def health(self):
234
+ return self.health_monitor.generate()
235
+
236
+ def audit_events(self, limit: int = 50) -> list[dict[str, object]]:
237
+ return self.audit_reader.recent(limit=limit)
238
+
239
+ def evolution_events(self, memory_id: str | None = None, limit: int = 50) -> list[dict[str, object]]:
240
+ return self.backend.get_evolution_events(memory_id=memory_id, limit=limit)
241
+
242
+ def export_jsonl(self, path: str) -> int:
243
+ return self.exporter.export_jsonl(path)
244
+
245
+ def import_jsonl(self, path: str) -> int:
246
+ return self.importer.import_jsonl(path)
247
+
248
+ def maintain(self) -> MaintenanceReport:
249
+ report = MaintenanceReport()
250
+ now = datetime.now(timezone.utc)
251
+ for memory in self.backend.list_memories():
252
+ age_days = max((now - memory.last_accessed).total_seconds() / 86400.0, 0.0)
253
+ strength = self.forgetting_policy.effective_strength(memory, age_days=age_days)
254
+ next_layer = self.forgetting_policy.next_layer(memory, age_days=age_days)
255
+ updated = memory
256
+ if next_layer is not memory.layer:
257
+ updated = replace(updated, layer=next_layer)
258
+ if next_layer is MemoryLayer.LONG_TERM:
259
+ report.promoted += 1
260
+ else:
261
+ report.demoted += 1
262
+ if strength < 0.1 and age_days > 60:
263
+ if self.backend.soft_delete_memory(memory.id):
264
+ report.decayed += 1
265
+ continue
266
+ if updated is not memory:
267
+ self.backend.update_memory(updated)
268
+ for memory in self.backend.list_memories():
269
+ conflicts = self.detect_conflicts(memory)
270
+ report.conflicts_found += len(conflicts)
271
+ if conflicts:
272
+ report.conflicts_resolved += self._apply_conflicts(memory, conflicts)[1]
273
+ report.consolidated = self.consolidate()
274
+ return report
275
+
276
+ def consolidate(self) -> int:
277
+ memories = self.backend.list_memories()
278
+ groups = self.consolidation_planner.find_merge_groups(memories)
279
+ consolidated = 0
280
+ for index, group in enumerate(groups, start=1):
281
+ primary = max(group, key=lambda item: (item.importance, item.trust_score, item.created_at))
282
+ if any(
283
+ memory.supersedes_id == primary.id and "consolidated" in memory.tags
284
+ for memory in memories
285
+ ):
286
+ continue
287
+ draft = self.consolidation_planner.create_merged_draft(
288
+ group,
289
+ source_id=f"consolidation:{index}",
290
+ llm_client=self.llm_client,
291
+ )
292
+ merged = self.add_from_draft(
293
+ replace(
294
+ draft,
295
+ supersedes_id=primary.id,
296
+ causal_parent_id=primary.causal_parent_id,
297
+ )
298
+ )
299
+ for memory in group:
300
+ if memory.id == primary.id:
301
+ continue
302
+ self.backend.add_relation(
303
+ RelationEdge(
304
+ source_id=merged.id,
305
+ target_id=memory.id,
306
+ relation_type=RelationType.SUPERSEDES,
307
+ )
308
+ )
309
+ consolidated += 1
310
+ return consolidated
311
+
312
+ def _attach_structural_relations(self, item: MemoryItem) -> None:
313
+ if item.causal_parent_id:
314
+ self.backend.add_relation(
315
+ RelationEdge(
316
+ source_id=item.id,
317
+ target_id=item.causal_parent_id,
318
+ relation_type=RelationType.DERIVED_FROM,
319
+ )
320
+ )
321
+ if item.supersedes_id:
322
+ self.backend.add_relation(
323
+ RelationEdge(
324
+ source_id=item.id,
325
+ target_id=item.supersedes_id,
326
+ relation_type=RelationType.SUPERSEDES,
327
+ )
328
+ )
329
+
330
+ def _apply_conflicts(self, item: MemoryItem, conflicts: list[ConflictRecord]) -> tuple[MemoryItem, int]:
331
+ contradiction_count = 0
332
+ inserted_relations = 0
333
+ for conflict in conflicts:
334
+ contradiction_count += 1
335
+ inserted_relations += int(
336
+ self.backend.add_relation(
337
+ RelationEdge(
338
+ source_id=item.id,
339
+ target_id=conflict.existing_id,
340
+ relation_type=(
341
+ RelationType.SUPERSEDES
342
+ if conflict.resolution.value == "supersede"
343
+ else RelationType.CONTRADICTS
344
+ ),
345
+ )
346
+ )
347
+ )
348
+ age_days = max((datetime.now(timezone.utc) - item.created_at).total_seconds() / 86400.0, 0.0)
349
+ adjusted_trust = self.trust_scorer.score(
350
+ source_reliability=item.trust_score,
351
+ contradiction_count=contradiction_count,
352
+ age_days=age_days,
353
+ )
354
+ updated = replace(item, trust_score=adjusted_trust)
355
+ return self.backend.update_memory(updated), inserted_relations
agent_memory/config.py ADDED
@@ -0,0 +1,28 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ import os
5
+ from pathlib import Path
6
+
7
+
8
+ @dataclass(slots=True)
9
+ class AgentMemoryConfig:
10
+ database_path: str = str(Path.cwd() / "agent-memory.db")
11
+ semantic_limit: int = 10
12
+ lexical_limit: int = 10
13
+ entity_limit: int = 10
14
+ default_search_limit: int = 5
15
+ rrf_k: int = 60
16
+ enable_sqlite_vec: bool = True
17
+
18
+ @classmethod
19
+ def from_env(cls) -> "AgentMemoryConfig":
20
+ return cls(
21
+ database_path=os.getenv("AGENT_MEMORY_DB_PATH", str(Path.cwd() / "agent-memory.db")),
22
+ semantic_limit=int(os.getenv("AGENT_MEMORY_SEMANTIC_LIMIT", "10")),
23
+ lexical_limit=int(os.getenv("AGENT_MEMORY_LEXICAL_LIMIT", "10")),
24
+ entity_limit=int(os.getenv("AGENT_MEMORY_ENTITY_LIMIT", "10")),
25
+ default_search_limit=int(os.getenv("AGENT_MEMORY_DEFAULT_SEARCH_LIMIT", "5")),
26
+ rrf_k=int(os.getenv("AGENT_MEMORY_RRF_K", "60")),
27
+ enable_sqlite_vec=os.getenv("AGENT_MEMORY_ENABLE_SQLITE_VEC", "true").lower() not in {"0", "false", "no"},
28
+ )
@@ -0,0 +1,15 @@
1
+ from agent_memory.controller.conflict import ConflictDetector
2
+ from agent_memory.controller.consolidation import ConsolidationPlanner
3
+ from agent_memory.controller.forgetting import ForgettingPolicy
4
+ from agent_memory.controller.router import IntentRouter, reciprocal_rank_fusion
5
+ from agent_memory.controller.trust import TrustScorer
6
+
7
+ __all__ = [
8
+ "ConflictDetector",
9
+ "ConsolidationPlanner",
10
+ "ForgettingPolicy",
11
+ "IntentRouter",
12
+ "TrustScorer",
13
+ "reciprocal_rank_fusion",
14
+ ]
15
+
@@ -0,0 +1,95 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from difflib import SequenceMatcher
5
+ import re
6
+ from typing import Any
7
+
8
+ from agent_memory.extraction.prompts import CONFLICT_JUDGE_PROMPT
9
+ from agent_memory.llm.base import LLMClient
10
+ from agent_memory.models import ConflictRecord, ConflictResolution, MemoryItem
11
+ from agent_memory.storage.sqlite_backend import SQLiteBackend
12
+
13
+
14
+ NEGATION_MARKERS = ("不", "没", "不是", "不会", "never", "not", "no ")
15
+ PREFERENCE_MARKERS = ("喜欢", "偏好", "prefer", "prefers", "using", "uses", "选择", "selected")
16
+ CONFLICT_SCHEMA: dict[str, Any] = {
17
+ "type": "object",
18
+ "properties": {
19
+ "label": {"type": "string", "enum": ["contradicts", "supersedes", "supports", "related", "none"]},
20
+ "confidence": {"type": "number"},
21
+ "reason": {"type": "string"},
22
+ },
23
+ "required": ["label", "confidence", "reason"],
24
+ "additionalProperties": False,
25
+ }
26
+
27
+
28
+ @dataclass(slots=True)
29
+ class ConflictDetector:
30
+ backend: SQLiteBackend
31
+ llm_client: LLMClient | None = None
32
+
33
+ def detect(self, candidate: MemoryItem, limit: int = 10) -> list[ConflictRecord]:
34
+ vector_hits = self.backend.search_by_vector(candidate.embedding, limit=limit)
35
+ conflicts: list[ConflictRecord] = []
36
+ for existing, similarity in vector_hits:
37
+ if existing.id == candidate.id:
38
+ continue
39
+ if self.backend.relation_exists_between(
40
+ candidate.id,
41
+ existing.id,
42
+ relation_types=["contradicts", "supersedes"],
43
+ ):
44
+ continue
45
+ label, confidence, reason = self._judge_relationship(candidate, existing, similarity)
46
+ if label not in {"contradicts", "supersedes"} or confidence < 0.55:
47
+ continue
48
+ conflicts.append(
49
+ ConflictRecord(
50
+ existing_id=existing.id,
51
+ candidate_id=candidate.id,
52
+ confidence=confidence,
53
+ resolution=ConflictResolution.SUPERSEDE if label == "supersedes" else ConflictResolution.KEEP_BOTH,
54
+ reason=reason,
55
+ )
56
+ )
57
+ conflicts.sort(key=lambda item: item.confidence, reverse=True)
58
+ return conflicts
59
+
60
+ def _judge_relationship(self, candidate: MemoryItem, existing: MemoryItem, similarity: float) -> tuple[str, float, str]:
61
+ heuristic_confidence = self._contradiction_confidence(candidate.content, existing.content, similarity)
62
+ heuristic_label = "contradicts" if heuristic_confidence >= 0.55 else "none"
63
+ heuristic_reason = "Heuristic semantic overlap and polarity mismatch."
64
+ if self.llm_client is None or similarity < 0.4:
65
+ return heuristic_label, heuristic_confidence, heuristic_reason
66
+ try:
67
+ response = self.llm_client.generate_json(
68
+ prompt=(
69
+ f"Memory A: {existing.content}\n"
70
+ f"Memory B: {candidate.content}\n"
71
+ "Decide the relationship."
72
+ ),
73
+ schema=CONFLICT_SCHEMA,
74
+ schema_name="memory_conflict_judgement",
75
+ system_prompt=CONFLICT_JUDGE_PROMPT,
76
+ )
77
+ except Exception:
78
+ return heuristic_label, heuristic_confidence, heuristic_reason
79
+ label = str(response.get("label", heuristic_label))
80
+ confidence = float(response.get("confidence", heuristic_confidence))
81
+ reason = str(response.get("reason", heuristic_reason))
82
+ return label, confidence, reason
83
+
84
+ def _contradiction_confidence(self, left: str, right: str, similarity: float) -> float:
85
+ left_norm = self._normalize(left)
86
+ right_norm = self._normalize(right)
87
+ ratio = SequenceMatcher(None, left_norm, right_norm).ratio()
88
+ left_negative = any(marker in left_norm for marker in NEGATION_MARKERS)
89
+ right_negative = any(marker in right_norm for marker in NEGATION_MARKERS)
90
+ polarity_bonus = 0.25 if left_negative != right_negative else 0.0
91
+ preference_bonus = 0.15 if any(marker in left_norm or marker in right_norm for marker in PREFERENCE_MARKERS) else 0.0
92
+ return min(1.0, similarity * 0.45 + ratio * 0.25 + polarity_bonus + preference_bonus)
93
+
94
+ def _normalize(self, text: str) -> str:
95
+ return " ".join(re.findall(r"[\w\u4e00-\u9fff]+", text.lower()))