alma-memory 0.5.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. alma/__init__.py +296 -194
  2. alma/compression/__init__.py +33 -0
  3. alma/compression/pipeline.py +980 -0
  4. alma/confidence/__init__.py +47 -47
  5. alma/confidence/engine.py +540 -540
  6. alma/confidence/types.py +351 -351
  7. alma/config/loader.py +157 -157
  8. alma/consolidation/__init__.py +23 -23
  9. alma/consolidation/engine.py +678 -678
  10. alma/consolidation/prompts.py +84 -84
  11. alma/core.py +1189 -322
  12. alma/domains/__init__.py +30 -30
  13. alma/domains/factory.py +359 -359
  14. alma/domains/schemas.py +448 -448
  15. alma/domains/types.py +272 -272
  16. alma/events/__init__.py +75 -75
  17. alma/events/emitter.py +285 -284
  18. alma/events/storage_mixin.py +246 -246
  19. alma/events/types.py +126 -126
  20. alma/events/webhook.py +425 -425
  21. alma/exceptions.py +49 -49
  22. alma/extraction/__init__.py +31 -31
  23. alma/extraction/auto_learner.py +265 -264
  24. alma/extraction/extractor.py +420 -420
  25. alma/graph/__init__.py +106 -81
  26. alma/graph/backends/__init__.py +32 -18
  27. alma/graph/backends/kuzu.py +624 -0
  28. alma/graph/backends/memgraph.py +432 -0
  29. alma/graph/backends/memory.py +236 -236
  30. alma/graph/backends/neo4j.py +417 -417
  31. alma/graph/base.py +159 -159
  32. alma/graph/extraction.py +198 -198
  33. alma/graph/store.py +860 -860
  34. alma/harness/__init__.py +35 -35
  35. alma/harness/base.py +386 -386
  36. alma/harness/domains.py +705 -705
  37. alma/initializer/__init__.py +37 -37
  38. alma/initializer/initializer.py +418 -418
  39. alma/initializer/types.py +250 -250
  40. alma/integration/__init__.py +62 -62
  41. alma/integration/claude_agents.py +444 -432
  42. alma/integration/helena.py +423 -423
  43. alma/integration/victor.py +471 -471
  44. alma/learning/__init__.py +101 -86
  45. alma/learning/decay.py +878 -0
  46. alma/learning/forgetting.py +1446 -1446
  47. alma/learning/heuristic_extractor.py +390 -390
  48. alma/learning/protocols.py +374 -374
  49. alma/learning/validation.py +346 -346
  50. alma/mcp/__init__.py +123 -45
  51. alma/mcp/__main__.py +156 -156
  52. alma/mcp/resources.py +122 -122
  53. alma/mcp/server.py +955 -591
  54. alma/mcp/tools.py +3254 -511
  55. alma/observability/__init__.py +91 -0
  56. alma/observability/config.py +302 -0
  57. alma/observability/guidelines.py +170 -0
  58. alma/observability/logging.py +424 -0
  59. alma/observability/metrics.py +583 -0
  60. alma/observability/tracing.py +440 -0
  61. alma/progress/__init__.py +21 -21
  62. alma/progress/tracker.py +607 -607
  63. alma/progress/types.py +250 -250
  64. alma/retrieval/__init__.py +134 -53
  65. alma/retrieval/budget.py +525 -0
  66. alma/retrieval/cache.py +1304 -1061
  67. alma/retrieval/embeddings.py +202 -202
  68. alma/retrieval/engine.py +850 -366
  69. alma/retrieval/modes.py +365 -0
  70. alma/retrieval/progressive.py +560 -0
  71. alma/retrieval/scoring.py +344 -344
  72. alma/retrieval/trust_scoring.py +637 -0
  73. alma/retrieval/verification.py +797 -0
  74. alma/session/__init__.py +19 -19
  75. alma/session/manager.py +442 -399
  76. alma/session/types.py +288 -288
  77. alma/storage/__init__.py +101 -61
  78. alma/storage/archive.py +233 -0
  79. alma/storage/azure_cosmos.py +1259 -1048
  80. alma/storage/base.py +1083 -525
  81. alma/storage/chroma.py +1443 -1443
  82. alma/storage/constants.py +103 -0
  83. alma/storage/file_based.py +614 -619
  84. alma/storage/migrations/__init__.py +21 -0
  85. alma/storage/migrations/base.py +321 -0
  86. alma/storage/migrations/runner.py +323 -0
  87. alma/storage/migrations/version_stores.py +337 -0
  88. alma/storage/migrations/versions/__init__.py +11 -0
  89. alma/storage/migrations/versions/v1_0_0.py +373 -0
  90. alma/storage/migrations/versions/v1_1_0_workflow_context.py +551 -0
  91. alma/storage/pinecone.py +1080 -1080
  92. alma/storage/postgresql.py +1948 -1452
  93. alma/storage/qdrant.py +1306 -1306
  94. alma/storage/sqlite_local.py +3041 -1358
  95. alma/testing/__init__.py +46 -0
  96. alma/testing/factories.py +301 -0
  97. alma/testing/mocks.py +389 -0
  98. alma/types.py +292 -264
  99. alma/utils/__init__.py +19 -0
  100. alma/utils/tokenizer.py +521 -0
  101. alma/workflow/__init__.py +83 -0
  102. alma/workflow/artifacts.py +170 -0
  103. alma/workflow/checkpoint.py +311 -0
  104. alma/workflow/context.py +228 -0
  105. alma/workflow/outcomes.py +189 -0
  106. alma/workflow/reducers.py +393 -0
  107. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/METADATA +244 -72
  108. alma_memory-0.7.0.dist-info/RECORD +112 -0
  109. alma_memory-0.5.0.dist-info/RECORD +0 -76
  110. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/WHEEL +0 -0
  111. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/top_level.txt +0 -0
alma/storage/base.py CHANGED
@@ -1,525 +1,1083 @@
1
- """
2
- ALMA Storage Backend Interface.
3
-
4
- Abstract base class that all storage backends must implement.
5
- """
6
-
7
- from abc import ABC, abstractmethod
8
- from datetime import datetime
9
- from typing import Any, Dict, List, Optional
10
-
11
- from alma.types import (
12
- AntiPattern,
13
- DomainKnowledge,
14
- Heuristic,
15
- Outcome,
16
- UserPreference,
17
- )
18
-
19
-
20
- class StorageBackend(ABC):
21
- """
22
- Abstract base class for ALMA storage backends.
23
-
24
- Implementations:
25
- - FileBasedStorage: JSON files (testing/fallback)
26
- - SQLiteStorage: Local SQLite + FAISS vectors
27
- - AzureCosmosStorage: Production Azure Cosmos DB
28
- """
29
-
30
- # ==================== WRITE OPERATIONS ====================
31
-
32
- @abstractmethod
33
- def save_heuristic(self, heuristic: Heuristic) -> str:
34
- """Save a heuristic, return its ID."""
35
- pass
36
-
37
- @abstractmethod
38
- def save_outcome(self, outcome: Outcome) -> str:
39
- """Save an outcome, return its ID."""
40
- pass
41
-
42
- @abstractmethod
43
- def save_user_preference(self, preference: UserPreference) -> str:
44
- """Save a user preference, return its ID."""
45
- pass
46
-
47
- @abstractmethod
48
- def save_domain_knowledge(self, knowledge: DomainKnowledge) -> str:
49
- """Save domain knowledge, return its ID."""
50
- pass
51
-
52
- @abstractmethod
53
- def save_anti_pattern(self, anti_pattern: AntiPattern) -> str:
54
- """Save an anti-pattern, return its ID."""
55
- pass
56
-
57
- # ==================== BATCH WRITE OPERATIONS ====================
58
-
59
- def save_heuristics(self, heuristics: List[Heuristic]) -> List[str]:
60
- """Save multiple heuristics in a batch. Default implementation calls save_heuristic in a loop."""
61
- return [self.save_heuristic(h) for h in heuristics]
62
-
63
- def save_outcomes(self, outcomes: List[Outcome]) -> List[str]:
64
- """Save multiple outcomes in a batch. Default implementation calls save_outcome in a loop."""
65
- return [self.save_outcome(o) for o in outcomes]
66
-
67
- def save_domain_knowledge_batch(
68
- self, knowledge_items: List[DomainKnowledge]
69
- ) -> List[str]:
70
- """Save multiple domain knowledge items in a batch. Default implementation calls save_domain_knowledge in a loop."""
71
- return [self.save_domain_knowledge(k) for k in knowledge_items]
72
-
73
- # ==================== READ OPERATIONS ====================
74
-
75
- @abstractmethod
76
- def get_heuristics(
77
- self,
78
- project_id: str,
79
- agent: Optional[str] = None,
80
- embedding: Optional[List[float]] = None,
81
- top_k: int = 5,
82
- min_confidence: float = 0.0,
83
- ) -> List[Heuristic]:
84
- """
85
- Get heuristics, optionally filtered by agent and similarity.
86
-
87
- Args:
88
- project_id: Project to query
89
- agent: Filter by agent name
90
- embedding: Query embedding for semantic search
91
- top_k: Max results to return
92
- min_confidence: Minimum confidence threshold
93
-
94
- Returns:
95
- List of matching heuristics
96
- """
97
- pass
98
-
99
- @abstractmethod
100
- def get_outcomes(
101
- self,
102
- project_id: str,
103
- agent: Optional[str] = None,
104
- task_type: Optional[str] = None,
105
- embedding: Optional[List[float]] = None,
106
- top_k: int = 5,
107
- success_only: bool = False,
108
- ) -> List[Outcome]:
109
- """
110
- Get outcomes, optionally filtered.
111
-
112
- Args:
113
- project_id: Project to query
114
- agent: Filter by agent name
115
- task_type: Filter by task type
116
- embedding: Query embedding for semantic search
117
- top_k: Max results
118
- success_only: Only return successful outcomes
119
-
120
- Returns:
121
- List of matching outcomes
122
- """
123
- pass
124
-
125
- @abstractmethod
126
- def get_user_preferences(
127
- self,
128
- user_id: str,
129
- category: Optional[str] = None,
130
- ) -> List[UserPreference]:
131
- """
132
- Get user preferences.
133
-
134
- Args:
135
- user_id: User to query
136
- category: Optional category filter
137
-
138
- Returns:
139
- List of user preferences
140
- """
141
- pass
142
-
143
- @abstractmethod
144
- def get_domain_knowledge(
145
- self,
146
- project_id: str,
147
- agent: Optional[str] = None,
148
- domain: Optional[str] = None,
149
- embedding: Optional[List[float]] = None,
150
- top_k: int = 5,
151
- ) -> List[DomainKnowledge]:
152
- """
153
- Get domain knowledge.
154
-
155
- Args:
156
- project_id: Project to query
157
- agent: Filter by agent
158
- domain: Filter by domain
159
- embedding: Query embedding for semantic search
160
- top_k: Max results
161
-
162
- Returns:
163
- List of domain knowledge items
164
- """
165
- pass
166
-
167
- @abstractmethod
168
- def get_anti_patterns(
169
- self,
170
- project_id: str,
171
- agent: Optional[str] = None,
172
- embedding: Optional[List[float]] = None,
173
- top_k: int = 5,
174
- ) -> List[AntiPattern]:
175
- """
176
- Get anti-patterns.
177
-
178
- Args:
179
- project_id: Project to query
180
- agent: Filter by agent
181
- embedding: Query embedding for semantic search
182
- top_k: Max results
183
-
184
- Returns:
185
- List of anti-patterns
186
- """
187
- pass
188
-
189
- # ==================== UPDATE OPERATIONS ====================
190
-
191
- @abstractmethod
192
- def update_heuristic(
193
- self,
194
- heuristic_id: str,
195
- updates: Dict[str, Any],
196
- ) -> bool:
197
- """
198
- Update a heuristic's fields.
199
-
200
- Args:
201
- heuristic_id: ID of heuristic to update
202
- updates: Dict of field->value updates
203
-
204
- Returns:
205
- True if updated, False if not found
206
- """
207
- pass
208
-
209
- @abstractmethod
210
- def increment_heuristic_occurrence(
211
- self,
212
- heuristic_id: str,
213
- success: bool,
214
- ) -> bool:
215
- """
216
- Increment heuristic occurrence count.
217
-
218
- Args:
219
- heuristic_id: ID of heuristic
220
- success: Whether this occurrence was successful
221
-
222
- Returns:
223
- True if updated
224
- """
225
- pass
226
-
227
- @abstractmethod
228
- def update_heuristic_confidence(
229
- self,
230
- heuristic_id: str,
231
- new_confidence: float,
232
- ) -> bool:
233
- """
234
- Update a heuristic's confidence value.
235
-
236
- Args:
237
- heuristic_id: ID of heuristic to update
238
- new_confidence: New confidence value (0.0 - 1.0)
239
-
240
- Returns:
241
- True if updated, False if not found
242
- """
243
- pass
244
-
245
- @abstractmethod
246
- def update_knowledge_confidence(
247
- self,
248
- knowledge_id: str,
249
- new_confidence: float,
250
- ) -> bool:
251
- """
252
- Update domain knowledge confidence value.
253
-
254
- Args:
255
- knowledge_id: ID of knowledge to update
256
- new_confidence: New confidence value (0.0 - 1.0)
257
-
258
- Returns:
259
- True if updated, False if not found
260
- """
261
- pass
262
-
263
- # ==================== DELETE OPERATIONS ====================
264
-
265
- @abstractmethod
266
- def delete_heuristic(self, heuristic_id: str) -> bool:
267
- """
268
- Delete a heuristic by ID.
269
-
270
- Args:
271
- heuristic_id: ID of heuristic to delete
272
-
273
- Returns:
274
- True if deleted, False if not found
275
- """
276
- pass
277
-
278
- @abstractmethod
279
- def delete_outcome(self, outcome_id: str) -> bool:
280
- """
281
- Delete an outcome by ID.
282
-
283
- Args:
284
- outcome_id: ID of outcome to delete
285
-
286
- Returns:
287
- True if deleted, False if not found
288
- """
289
- pass
290
-
291
- @abstractmethod
292
- def delete_domain_knowledge(self, knowledge_id: str) -> bool:
293
- """
294
- Delete domain knowledge by ID.
295
-
296
- Args:
297
- knowledge_id: ID of knowledge to delete
298
-
299
- Returns:
300
- True if deleted, False if not found
301
- """
302
- pass
303
-
304
- @abstractmethod
305
- def delete_anti_pattern(self, anti_pattern_id: str) -> bool:
306
- """
307
- Delete an anti-pattern by ID.
308
-
309
- Args:
310
- anti_pattern_id: ID of anti-pattern to delete
311
-
312
- Returns:
313
- True if deleted, False if not found
314
- """
315
- pass
316
-
317
- @abstractmethod
318
- def delete_outcomes_older_than(
319
- self,
320
- project_id: str,
321
- older_than: datetime,
322
- agent: Optional[str] = None,
323
- ) -> int:
324
- """
325
- Delete old outcomes.
326
-
327
- Args:
328
- project_id: Project to prune
329
- older_than: Delete outcomes older than this
330
- agent: Optional agent filter
331
-
332
- Returns:
333
- Number of items deleted
334
- """
335
- pass
336
-
337
- @abstractmethod
338
- def delete_low_confidence_heuristics(
339
- self,
340
- project_id: str,
341
- below_confidence: float,
342
- agent: Optional[str] = None,
343
- ) -> int:
344
- """
345
- Delete low-confidence heuristics.
346
-
347
- Args:
348
- project_id: Project to prune
349
- below_confidence: Delete below this threshold
350
- agent: Optional agent filter
351
-
352
- Returns:
353
- Number of items deleted
354
- """
355
- pass
356
-
357
- # ==================== MULTI-AGENT MEMORY SHARING ====================
358
-
359
- def get_heuristics_for_agents(
360
- self,
361
- project_id: str,
362
- agents: List[str],
363
- embedding: Optional[List[float]] = None,
364
- top_k: int = 5,
365
- min_confidence: float = 0.0,
366
- ) -> List[Heuristic]:
367
- """
368
- Get heuristics from multiple agents in one call.
369
-
370
- This enables multi-agent memory sharing where an agent can
371
- read memories from agents it inherits from.
372
-
373
- Args:
374
- project_id: Project to query
375
- agents: List of agent names to query
376
- embedding: Query embedding for semantic search
377
- top_k: Max results to return per agent
378
- min_confidence: Minimum confidence threshold
379
-
380
- Returns:
381
- List of matching heuristics from all specified agents
382
- """
383
- # Default implementation: query each agent individually
384
- results = []
385
- for agent in agents:
386
- agent_heuristics = self.get_heuristics(
387
- project_id=project_id,
388
- agent=agent,
389
- embedding=embedding,
390
- top_k=top_k,
391
- min_confidence=min_confidence,
392
- )
393
- results.extend(agent_heuristics)
394
- return results
395
-
396
- def get_outcomes_for_agents(
397
- self,
398
- project_id: str,
399
- agents: List[str],
400
- task_type: Optional[str] = None,
401
- embedding: Optional[List[float]] = None,
402
- top_k: int = 5,
403
- success_only: bool = False,
404
- ) -> List[Outcome]:
405
- """
406
- Get outcomes from multiple agents in one call.
407
-
408
- Args:
409
- project_id: Project to query
410
- agents: List of agent names to query
411
- task_type: Filter by task type
412
- embedding: Query embedding for semantic search
413
- top_k: Max results to return per agent
414
- success_only: Only return successful outcomes
415
-
416
- Returns:
417
- List of matching outcomes from all specified agents
418
- """
419
- results = []
420
- for agent in agents:
421
- agent_outcomes = self.get_outcomes(
422
- project_id=project_id,
423
- agent=agent,
424
- task_type=task_type,
425
- embedding=embedding,
426
- top_k=top_k,
427
- success_only=success_only,
428
- )
429
- results.extend(agent_outcomes)
430
- return results
431
-
432
- def get_domain_knowledge_for_agents(
433
- self,
434
- project_id: str,
435
- agents: List[str],
436
- domain: Optional[str] = None,
437
- embedding: Optional[List[float]] = None,
438
- top_k: int = 5,
439
- ) -> List[DomainKnowledge]:
440
- """
441
- Get domain knowledge from multiple agents in one call.
442
-
443
- Args:
444
- project_id: Project to query
445
- agents: List of agent names to query
446
- domain: Filter by domain
447
- embedding: Query embedding for semantic search
448
- top_k: Max results to return per agent
449
-
450
- Returns:
451
- List of matching domain knowledge from all specified agents
452
- """
453
- results = []
454
- for agent in agents:
455
- agent_knowledge = self.get_domain_knowledge(
456
- project_id=project_id,
457
- agent=agent,
458
- domain=domain,
459
- embedding=embedding,
460
- top_k=top_k,
461
- )
462
- results.extend(agent_knowledge)
463
- return results
464
-
465
- def get_anti_patterns_for_agents(
466
- self,
467
- project_id: str,
468
- agents: List[str],
469
- embedding: Optional[List[float]] = None,
470
- top_k: int = 5,
471
- ) -> List[AntiPattern]:
472
- """
473
- Get anti-patterns from multiple agents in one call.
474
-
475
- Args:
476
- project_id: Project to query
477
- agents: List of agent names to query
478
- embedding: Query embedding for semantic search
479
- top_k: Max results to return per agent
480
-
481
- Returns:
482
- List of matching anti-patterns from all specified agents
483
- """
484
- results = []
485
- for agent in agents:
486
- agent_patterns = self.get_anti_patterns(
487
- project_id=project_id,
488
- agent=agent,
489
- embedding=embedding,
490
- top_k=top_k,
491
- )
492
- results.extend(agent_patterns)
493
- return results
494
-
495
- # ==================== STATS ====================
496
-
497
- @abstractmethod
498
- def get_stats(
499
- self,
500
- project_id: str,
501
- agent: Optional[str] = None,
502
- ) -> Dict[str, Any]:
503
- """
504
- Get memory statistics.
505
-
506
- Returns:
507
- Dict with counts per memory type, total size, etc.
508
- """
509
- pass
510
-
511
- # ==================== UTILITY ====================
512
-
513
- @classmethod
514
- @abstractmethod
515
- def from_config(cls, config: Dict[str, Any]) -> "StorageBackend":
516
- """
517
- Create instance from configuration dict.
518
-
519
- Args:
520
- config: Configuration dictionary
521
-
522
- Returns:
523
- Configured storage backend instance
524
- """
525
- pass
1
+ """
2
+ ALMA Storage Backend Interface.
3
+
4
+ Abstract base class that all storage backends must implement.
5
+
6
+ v0.6.0 adds workflow context support:
7
+ - Checkpoint CRUD operations
8
+ - WorkflowOutcome storage and retrieval
9
+ - ArtifactRef linking
10
+ - scope_filter parameter for workflow-scoped queries
11
+ """
12
+
13
+ from abc import ABC, abstractmethod
14
+ from datetime import datetime
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
16
+
17
+ from alma.types import (
18
+ AntiPattern,
19
+ DomainKnowledge,
20
+ Heuristic,
21
+ Outcome,
22
+ UserPreference,
23
+ )
24
+
25
+ if TYPE_CHECKING:
26
+ from alma.session import SessionHandoff
27
+ from alma.workflow import ArtifactRef, Checkpoint, WorkflowOutcome
28
+
29
+
30
+ class StorageBackend(ABC):
31
+ """
32
+ Abstract base class for ALMA storage backends.
33
+
34
+ Implementations:
35
+ - FileBasedStorage: JSON files (testing/fallback)
36
+ - SQLiteStorage: Local SQLite + FAISS vectors
37
+ - AzureCosmosStorage: Production Azure Cosmos DB
38
+ """
39
+
40
+ # ==================== WRITE OPERATIONS ====================
41
+
42
+ @abstractmethod
43
+ def save_heuristic(self, heuristic: Heuristic) -> str:
44
+ """Save a heuristic, return its ID."""
45
+ pass
46
+
47
+ @abstractmethod
48
+ def save_outcome(self, outcome: Outcome) -> str:
49
+ """Save an outcome, return its ID."""
50
+ pass
51
+
52
+ @abstractmethod
53
+ def save_user_preference(self, preference: UserPreference) -> str:
54
+ """Save a user preference, return its ID."""
55
+ pass
56
+
57
+ @abstractmethod
58
+ def save_domain_knowledge(self, knowledge: DomainKnowledge) -> str:
59
+ """Save domain knowledge, return its ID."""
60
+ pass
61
+
62
+ @abstractmethod
63
+ def save_anti_pattern(self, anti_pattern: AntiPattern) -> str:
64
+ """Save an anti-pattern, return its ID."""
65
+ pass
66
+
67
+ # ==================== BATCH WRITE OPERATIONS ====================
68
+
69
+ def save_heuristics(self, heuristics: List[Heuristic]) -> List[str]:
70
+ """Save multiple heuristics in a batch. Default implementation calls save_heuristic in a loop."""
71
+ return [self.save_heuristic(h) for h in heuristics]
72
+
73
+ def save_outcomes(self, outcomes: List[Outcome]) -> List[str]:
74
+ """Save multiple outcomes in a batch. Default implementation calls save_outcome in a loop."""
75
+ return [self.save_outcome(o) for o in outcomes]
76
+
77
+ def save_domain_knowledge_batch(
78
+ self, knowledge_items: List[DomainKnowledge]
79
+ ) -> List[str]:
80
+ """Save multiple domain knowledge items in a batch. Default implementation calls save_domain_knowledge in a loop."""
81
+ return [self.save_domain_knowledge(k) for k in knowledge_items]
82
+
83
+ # ==================== READ OPERATIONS ====================
84
+
85
+ @abstractmethod
86
+ def get_heuristics(
87
+ self,
88
+ project_id: str,
89
+ agent: Optional[str] = None,
90
+ embedding: Optional[List[float]] = None,
91
+ top_k: int = 5,
92
+ min_confidence: float = 0.0,
93
+ scope_filter: Optional[Dict[str, Any]] = None,
94
+ ) -> List[Heuristic]:
95
+ """
96
+ Get heuristics, optionally filtered by agent and similarity.
97
+
98
+ Args:
99
+ project_id: Project to query
100
+ agent: Filter by agent name
101
+ embedding: Query embedding for semantic search
102
+ top_k: Max results to return
103
+ min_confidence: Minimum confidence threshold
104
+ scope_filter: Optional workflow scope filter (v0.6.0+)
105
+ Keys: tenant_id, workflow_id, run_id, node_id
106
+
107
+ Returns:
108
+ List of matching heuristics
109
+ """
110
+ pass
111
+
112
+ @abstractmethod
113
+ def get_outcomes(
114
+ self,
115
+ project_id: str,
116
+ agent: Optional[str] = None,
117
+ task_type: Optional[str] = None,
118
+ embedding: Optional[List[float]] = None,
119
+ top_k: int = 5,
120
+ success_only: bool = False,
121
+ scope_filter: Optional[Dict[str, Any]] = None,
122
+ ) -> List[Outcome]:
123
+ """
124
+ Get outcomes, optionally filtered.
125
+
126
+ Args:
127
+ project_id: Project to query
128
+ agent: Filter by agent name
129
+ task_type: Filter by task type
130
+ embedding: Query embedding for semantic search
131
+ top_k: Max results
132
+ success_only: Only return successful outcomes
133
+ scope_filter: Optional workflow scope filter (v0.6.0+)
134
+ Keys: tenant_id, workflow_id, run_id, node_id
135
+
136
+ Returns:
137
+ List of matching outcomes
138
+ """
139
+ pass
140
+
141
+ @abstractmethod
142
+ def get_user_preferences(
143
+ self,
144
+ user_id: str,
145
+ category: Optional[str] = None,
146
+ ) -> List[UserPreference]:
147
+ """
148
+ Get user preferences.
149
+
150
+ Args:
151
+ user_id: User to query
152
+ category: Optional category filter
153
+
154
+ Returns:
155
+ List of user preferences
156
+ """
157
+ pass
158
+
159
+ @abstractmethod
160
+ def get_domain_knowledge(
161
+ self,
162
+ project_id: str,
163
+ agent: Optional[str] = None,
164
+ domain: Optional[str] = None,
165
+ embedding: Optional[List[float]] = None,
166
+ top_k: int = 5,
167
+ scope_filter: Optional[Dict[str, Any]] = None,
168
+ ) -> List[DomainKnowledge]:
169
+ """
170
+ Get domain knowledge.
171
+
172
+ Args:
173
+ project_id: Project to query
174
+ agent: Filter by agent
175
+ domain: Filter by domain
176
+ embedding: Query embedding for semantic search
177
+ top_k: Max results
178
+ scope_filter: Optional workflow scope filter (v0.6.0+)
179
+ Keys: tenant_id, workflow_id, run_id, node_id
180
+
181
+ Returns:
182
+ List of domain knowledge items
183
+ """
184
+ pass
185
+
186
+ @abstractmethod
187
+ def get_anti_patterns(
188
+ self,
189
+ project_id: str,
190
+ agent: Optional[str] = None,
191
+ embedding: Optional[List[float]] = None,
192
+ top_k: int = 5,
193
+ scope_filter: Optional[Dict[str, Any]] = None,
194
+ ) -> List[AntiPattern]:
195
+ """
196
+ Get anti-patterns.
197
+
198
+ Args:
199
+ project_id: Project to query
200
+ agent: Filter by agent
201
+ embedding: Query embedding for semantic search
202
+ top_k: Max results
203
+ scope_filter: Optional workflow scope filter (v0.6.0+)
204
+ Keys: tenant_id, workflow_id, run_id, node_id
205
+
206
+ Returns:
207
+ List of anti-patterns
208
+ """
209
+ pass
210
+
211
+ # ==================== UPDATE OPERATIONS ====================
212
+
213
+ @abstractmethod
214
+ def update_heuristic(
215
+ self,
216
+ heuristic_id: str,
217
+ updates: Dict[str, Any],
218
+ ) -> bool:
219
+ """
220
+ Update a heuristic's fields.
221
+
222
+ Args:
223
+ heuristic_id: ID of heuristic to update
224
+ updates: Dict of field->value updates
225
+
226
+ Returns:
227
+ True if updated, False if not found
228
+ """
229
+ pass
230
+
231
+ @abstractmethod
232
+ def increment_heuristic_occurrence(
233
+ self,
234
+ heuristic_id: str,
235
+ success: bool,
236
+ ) -> bool:
237
+ """
238
+ Increment heuristic occurrence count.
239
+
240
+ Args:
241
+ heuristic_id: ID of heuristic
242
+ success: Whether this occurrence was successful
243
+
244
+ Returns:
245
+ True if updated
246
+ """
247
+ pass
248
+
249
+ @abstractmethod
250
+ def update_heuristic_confidence(
251
+ self,
252
+ heuristic_id: str,
253
+ new_confidence: float,
254
+ ) -> bool:
255
+ """
256
+ Update a heuristic's confidence value.
257
+
258
+ Args:
259
+ heuristic_id: ID of heuristic to update
260
+ new_confidence: New confidence value (0.0 - 1.0)
261
+
262
+ Returns:
263
+ True if updated, False if not found
264
+ """
265
+ pass
266
+
267
+ @abstractmethod
268
+ def update_knowledge_confidence(
269
+ self,
270
+ knowledge_id: str,
271
+ new_confidence: float,
272
+ ) -> bool:
273
+ """
274
+ Update domain knowledge confidence value.
275
+
276
+ Args:
277
+ knowledge_id: ID of knowledge to update
278
+ new_confidence: New confidence value (0.0 - 1.0)
279
+
280
+ Returns:
281
+ True if updated, False if not found
282
+ """
283
+ pass
284
+
285
+ # ==================== DELETE OPERATIONS ====================
286
+
287
+ @abstractmethod
288
+ def delete_heuristic(self, heuristic_id: str) -> bool:
289
+ """
290
+ Delete a heuristic by ID.
291
+
292
+ Args:
293
+ heuristic_id: ID of heuristic to delete
294
+
295
+ Returns:
296
+ True if deleted, False if not found
297
+ """
298
+ pass
299
+
300
+ @abstractmethod
301
+ def delete_outcome(self, outcome_id: str) -> bool:
302
+ """
303
+ Delete an outcome by ID.
304
+
305
+ Args:
306
+ outcome_id: ID of outcome to delete
307
+
308
+ Returns:
309
+ True if deleted, False if not found
310
+ """
311
+ pass
312
+
313
+ @abstractmethod
314
+ def delete_domain_knowledge(self, knowledge_id: str) -> bool:
315
+ """
316
+ Delete domain knowledge by ID.
317
+
318
+ Args:
319
+ knowledge_id: ID of knowledge to delete
320
+
321
+ Returns:
322
+ True if deleted, False if not found
323
+ """
324
+ pass
325
+
326
+ @abstractmethod
327
+ def delete_anti_pattern(self, anti_pattern_id: str) -> bool:
328
+ """
329
+ Delete an anti-pattern by ID.
330
+
331
+ Args:
332
+ anti_pattern_id: ID of anti-pattern to delete
333
+
334
+ Returns:
335
+ True if deleted, False if not found
336
+ """
337
+ pass
338
+
339
+ @abstractmethod
340
+ def delete_outcomes_older_than(
341
+ self,
342
+ project_id: str,
343
+ older_than: datetime,
344
+ agent: Optional[str] = None,
345
+ ) -> int:
346
+ """
347
+ Delete old outcomes.
348
+
349
+ Args:
350
+ project_id: Project to prune
351
+ older_than: Delete outcomes older than this
352
+ agent: Optional agent filter
353
+
354
+ Returns:
355
+ Number of items deleted
356
+ """
357
+ pass
358
+
359
+ @abstractmethod
360
+ def delete_low_confidence_heuristics(
361
+ self,
362
+ project_id: str,
363
+ below_confidence: float,
364
+ agent: Optional[str] = None,
365
+ ) -> int:
366
+ """
367
+ Delete low-confidence heuristics.
368
+
369
+ Args:
370
+ project_id: Project to prune
371
+ below_confidence: Delete below this threshold
372
+ agent: Optional agent filter
373
+
374
+ Returns:
375
+ Number of items deleted
376
+ """
377
+ pass
378
+
379
+ # ==================== MULTI-AGENT MEMORY SHARING ====================
380
+
381
+ def get_heuristics_for_agents(
382
+ self,
383
+ project_id: str,
384
+ agents: List[str],
385
+ embedding: Optional[List[float]] = None,
386
+ top_k: int = 5,
387
+ min_confidence: float = 0.0,
388
+ ) -> List[Heuristic]:
389
+ """
390
+ Get heuristics from multiple agents in one call.
391
+
392
+ This enables multi-agent memory sharing where an agent can
393
+ read memories from agents it inherits from.
394
+
395
+ Args:
396
+ project_id: Project to query
397
+ agents: List of agent names to query
398
+ embedding: Query embedding for semantic search
399
+ top_k: Max results to return per agent
400
+ min_confidence: Minimum confidence threshold
401
+
402
+ Returns:
403
+ List of matching heuristics from all specified agents
404
+ """
405
+ # Default implementation: query each agent individually
406
+ results = []
407
+ for agent in agents:
408
+ agent_heuristics = self.get_heuristics(
409
+ project_id=project_id,
410
+ agent=agent,
411
+ embedding=embedding,
412
+ top_k=top_k,
413
+ min_confidence=min_confidence,
414
+ )
415
+ results.extend(agent_heuristics)
416
+ return results
417
+
418
+ def get_outcomes_for_agents(
419
+ self,
420
+ project_id: str,
421
+ agents: List[str],
422
+ task_type: Optional[str] = None,
423
+ embedding: Optional[List[float]] = None,
424
+ top_k: int = 5,
425
+ success_only: bool = False,
426
+ ) -> List[Outcome]:
427
+ """
428
+ Get outcomes from multiple agents in one call.
429
+
430
+ Args:
431
+ project_id: Project to query
432
+ agents: List of agent names to query
433
+ task_type: Filter by task type
434
+ embedding: Query embedding for semantic search
435
+ top_k: Max results to return per agent
436
+ success_only: Only return successful outcomes
437
+
438
+ Returns:
439
+ List of matching outcomes from all specified agents
440
+ """
441
+ results = []
442
+ for agent in agents:
443
+ agent_outcomes = self.get_outcomes(
444
+ project_id=project_id,
445
+ agent=agent,
446
+ task_type=task_type,
447
+ embedding=embedding,
448
+ top_k=top_k,
449
+ success_only=success_only,
450
+ )
451
+ results.extend(agent_outcomes)
452
+ return results
453
+
454
+ def get_domain_knowledge_for_agents(
455
+ self,
456
+ project_id: str,
457
+ agents: List[str],
458
+ domain: Optional[str] = None,
459
+ embedding: Optional[List[float]] = None,
460
+ top_k: int = 5,
461
+ ) -> List[DomainKnowledge]:
462
+ """
463
+ Get domain knowledge from multiple agents in one call.
464
+
465
+ Args:
466
+ project_id: Project to query
467
+ agents: List of agent names to query
468
+ domain: Filter by domain
469
+ embedding: Query embedding for semantic search
470
+ top_k: Max results to return per agent
471
+
472
+ Returns:
473
+ List of matching domain knowledge from all specified agents
474
+ """
475
+ results = []
476
+ for agent in agents:
477
+ agent_knowledge = self.get_domain_knowledge(
478
+ project_id=project_id,
479
+ agent=agent,
480
+ domain=domain,
481
+ embedding=embedding,
482
+ top_k=top_k,
483
+ )
484
+ results.extend(agent_knowledge)
485
+ return results
486
+
487
+ def get_anti_patterns_for_agents(
488
+ self,
489
+ project_id: str,
490
+ agents: List[str],
491
+ embedding: Optional[List[float]] = None,
492
+ top_k: int = 5,
493
+ ) -> List[AntiPattern]:
494
+ """
495
+ Get anti-patterns from multiple agents in one call.
496
+
497
+ Args:
498
+ project_id: Project to query
499
+ agents: List of agent names to query
500
+ embedding: Query embedding for semantic search
501
+ top_k: Max results to return per agent
502
+
503
+ Returns:
504
+ List of matching anti-patterns from all specified agents
505
+ """
506
+ results = []
507
+ for agent in agents:
508
+ agent_patterns = self.get_anti_patterns(
509
+ project_id=project_id,
510
+ agent=agent,
511
+ embedding=embedding,
512
+ top_k=top_k,
513
+ )
514
+ results.extend(agent_patterns)
515
+ return results
516
+
517
+ # ==================== STATS ====================
518
+
519
+ @abstractmethod
520
+ def get_stats(
521
+ self,
522
+ project_id: str,
523
+ agent: Optional[str] = None,
524
+ ) -> Dict[str, Any]:
525
+ """
526
+ Get memory statistics.
527
+
528
+ Returns:
529
+ Dict with counts per memory type, total size, etc.
530
+ """
531
+ pass
532
+
533
+ # ==================== MIGRATION SUPPORT ====================
534
+
535
+ def get_schema_version(self) -> Optional[str]:
536
+ """
537
+ Get the current schema version.
538
+
539
+ Returns:
540
+ Current schema version string, or None if not tracked
541
+ """
542
+ # Default implementation returns None (no version tracking)
543
+ return None
544
+
545
+ def get_migration_status(self) -> Dict[str, Any]:
546
+ """
547
+ Get migration status information.
548
+
549
+ Returns:
550
+ Dict with current version, pending migrations, etc.
551
+ """
552
+ return {
553
+ "current_version": self.get_schema_version(),
554
+ "target_version": None,
555
+ "pending_count": 0,
556
+ "pending_versions": [],
557
+ "needs_migration": False,
558
+ "migration_supported": False,
559
+ }
560
+
561
+ def migrate(
562
+ self, target_version: Optional[str] = None, dry_run: bool = False
563
+ ) -> List[str]:
564
+ """
565
+ Apply pending schema migrations.
566
+
567
+ Args:
568
+ target_version: Optional target version (applies all if not specified)
569
+ dry_run: If True, show what would be done without making changes
570
+
571
+ Returns:
572
+ List of applied migration versions
573
+ """
574
+ # Default implementation does nothing
575
+ return []
576
+
577
+ def rollback(self, target_version: str, dry_run: bool = False) -> List[str]:
578
+ """
579
+ Roll back schema to a previous version.
580
+
581
+ Args:
582
+ target_version: Version to roll back to
583
+ dry_run: If True, show what would be done without making changes
584
+
585
+ Returns:
586
+ List of rolled back migration versions
587
+ """
588
+ # Default implementation does nothing
589
+ return []
590
+
591
+ # ==================== CHECKPOINT OPERATIONS (v0.6.0+) ====================
592
+
593
+ def save_checkpoint(self, checkpoint: "Checkpoint") -> str:
594
+ """
595
+ Save a workflow checkpoint.
596
+
597
+ Args:
598
+ checkpoint: Checkpoint to save
599
+
600
+ Returns:
601
+ The checkpoint ID
602
+
603
+ Note: Default implementation raises NotImplementedError.
604
+ Backends should override for workflow support.
605
+ """
606
+ raise NotImplementedError(
607
+ f"{self.__class__.__name__} does not support checkpoints. "
608
+ "Use SQLiteStorage or PostgreSQLStorage for workflow features."
609
+ )
610
+
611
+ def get_checkpoint(self, checkpoint_id: str) -> Optional["Checkpoint"]:
612
+ """
613
+ Get a checkpoint by ID.
614
+
615
+ Args:
616
+ checkpoint_id: The checkpoint ID
617
+
618
+ Returns:
619
+ The checkpoint, or None if not found
620
+ """
621
+ raise NotImplementedError(
622
+ f"{self.__class__.__name__} does not support checkpoints."
623
+ )
624
+
625
+ def get_latest_checkpoint(
626
+ self,
627
+ run_id: str,
628
+ branch_id: Optional[str] = None,
629
+ ) -> Optional["Checkpoint"]:
630
+ """
631
+ Get the most recent checkpoint for a workflow run.
632
+
633
+ Args:
634
+ run_id: The workflow run identifier
635
+ branch_id: Optional branch to filter by
636
+
637
+ Returns:
638
+ The latest checkpoint, or None if no checkpoints exist
639
+ """
640
+ raise NotImplementedError(
641
+ f"{self.__class__.__name__} does not support checkpoints."
642
+ )
643
+
644
+ def get_checkpoints_for_run(
645
+ self,
646
+ run_id: str,
647
+ branch_id: Optional[str] = None,
648
+ limit: int = 100,
649
+ ) -> List["Checkpoint"]:
650
+ """
651
+ Get all checkpoints for a workflow run.
652
+
653
+ Args:
654
+ run_id: The workflow run identifier
655
+ branch_id: Optional branch filter
656
+ limit: Maximum checkpoints to return
657
+
658
+ Returns:
659
+ List of checkpoints ordered by sequence number
660
+ """
661
+ raise NotImplementedError(
662
+ f"{self.__class__.__name__} does not support checkpoints."
663
+ )
664
+
665
+ def cleanup_checkpoints(
666
+ self,
667
+ run_id: str,
668
+ keep_latest: int = 1,
669
+ ) -> int:
670
+ """
671
+ Clean up old checkpoints for a completed run.
672
+
673
+ Args:
674
+ run_id: The workflow run identifier
675
+ keep_latest: Number of latest checkpoints to keep
676
+
677
+ Returns:
678
+ Number of checkpoints deleted
679
+ """
680
+ raise NotImplementedError(
681
+ f"{self.__class__.__name__} does not support checkpoints."
682
+ )
683
+
684
+ # ==================== WORKFLOW OUTCOME OPERATIONS (v0.6.0+) ====================
685
+
686
+ def save_workflow_outcome(self, outcome: "WorkflowOutcome") -> str:
687
+ """
688
+ Save a workflow outcome.
689
+
690
+ Args:
691
+ outcome: WorkflowOutcome to save
692
+
693
+ Returns:
694
+ The outcome ID
695
+ """
696
+ raise NotImplementedError(
697
+ f"{self.__class__.__name__} does not support workflow outcomes."
698
+ )
699
+
700
+ def get_workflow_outcome(self, outcome_id: str) -> Optional["WorkflowOutcome"]:
701
+ """
702
+ Get a workflow outcome by ID.
703
+
704
+ Args:
705
+ outcome_id: The outcome ID
706
+
707
+ Returns:
708
+ The workflow outcome, or None if not found
709
+ """
710
+ raise NotImplementedError(
711
+ f"{self.__class__.__name__} does not support workflow outcomes."
712
+ )
713
+
714
+ def get_workflow_outcomes(
715
+ self,
716
+ project_id: str,
717
+ agent: Optional[str] = None,
718
+ workflow_id: Optional[str] = None,
719
+ embedding: Optional[List[float]] = None,
720
+ top_k: int = 10,
721
+ scope_filter: Optional[Dict[str, Any]] = None,
722
+ ) -> List["WorkflowOutcome"]:
723
+ """
724
+ Get workflow outcomes with optional filtering.
725
+
726
+ Args:
727
+ project_id: Project to query
728
+ agent: Filter by agent
729
+ workflow_id: Filter by workflow definition
730
+ embedding: Query embedding for semantic search
731
+ top_k: Max results
732
+ scope_filter: Optional workflow scope filter
733
+
734
+ Returns:
735
+ List of matching workflow outcomes
736
+ """
737
+ raise NotImplementedError(
738
+ f"{self.__class__.__name__} does not support workflow outcomes."
739
+ )
740
+
741
+ # ==================== ARTIFACT LINK OPERATIONS (v0.6.0+) ====================
742
+
743
+ def save_artifact_link(self, artifact_ref: "ArtifactRef") -> str:
744
+ """
745
+ Save an artifact reference linked to a memory.
746
+
747
+ Args:
748
+ artifact_ref: ArtifactRef to save
749
+
750
+ Returns:
751
+ The artifact reference ID
752
+ """
753
+ raise NotImplementedError(
754
+ f"{self.__class__.__name__} does not support artifact links."
755
+ )
756
+
757
+ def get_artifact_links(
758
+ self,
759
+ memory_id: str,
760
+ ) -> List["ArtifactRef"]:
761
+ """
762
+ Get all artifact references linked to a memory.
763
+
764
+ Args:
765
+ memory_id: The memory ID to get artifacts for
766
+
767
+ Returns:
768
+ List of artifact references
769
+ """
770
+ raise NotImplementedError(
771
+ f"{self.__class__.__name__} does not support artifact links."
772
+ )
773
+
774
+ def delete_artifact_link(self, artifact_id: str) -> bool:
775
+ """
776
+ Delete an artifact reference.
777
+
778
+ Args:
779
+ artifact_id: The artifact reference ID
780
+
781
+ Returns:
782
+ True if deleted, False if not found
783
+ """
784
+ raise NotImplementedError(
785
+ f"{self.__class__.__name__} does not support artifact links."
786
+ )
787
+
788
+ # ==================== SESSION HANDOFFS ====================
789
+
790
+ def save_session_handoff(self, handoff: "SessionHandoff") -> str:
791
+ """
792
+ Save a session handoff for persistence across restarts.
793
+
794
+ Args:
795
+ handoff: SessionHandoff to save
796
+
797
+ Returns:
798
+ The handoff ID
799
+
800
+ Note: Default implementation raises NotImplementedError.
801
+ """
802
+ raise NotImplementedError(
803
+ f"{self.__class__.__name__} does not support session handoffs."
804
+ )
805
+
806
+ def get_session_handoffs(
807
+ self,
808
+ project_id: str,
809
+ agent: str,
810
+ limit: int = 50,
811
+ ) -> List["SessionHandoff"]:
812
+ """
813
+ Get session handoffs for an agent, most recent first.
814
+
815
+ Args:
816
+ project_id: Project identifier
817
+ agent: Agent identifier
818
+ limit: Maximum number of handoffs to return
819
+
820
+ Returns:
821
+ List of SessionHandoff, most recent first
822
+ """
823
+ raise NotImplementedError(
824
+ f"{self.__class__.__name__} does not support session handoffs."
825
+ )
826
+
827
+ def get_latest_session_handoff(
828
+ self,
829
+ project_id: str,
830
+ agent: str,
831
+ ) -> Optional["SessionHandoff"]:
832
+ """
833
+ Get the most recent session handoff for an agent.
834
+
835
+ Args:
836
+ project_id: Project identifier
837
+ agent: Agent identifier
838
+
839
+ Returns:
840
+ Most recent SessionHandoff or None
841
+ """
842
+ handoffs = self.get_session_handoffs(project_id, agent, limit=1)
843
+ return handoffs[0] if handoffs else None
844
+
845
+ def delete_session_handoffs(
846
+ self,
847
+ project_id: str,
848
+ agent: Optional[str] = None,
849
+ ) -> int:
850
+ """
851
+ Delete session handoffs.
852
+
853
+ Args:
854
+ project_id: Project identifier
855
+ agent: If provided, only delete for this agent
856
+
857
+ Returns:
858
+ Number of handoffs deleted
859
+ """
860
+ raise NotImplementedError(
861
+ f"{self.__class__.__name__} does not support session handoffs."
862
+ )
863
+
864
+ # ==================== MEMORY STRENGTH OPERATIONS (v0.7.0+) ====================
865
+
866
+ def save_memory_strength(self, strength: Any) -> str:
867
+ """
868
+ Save or update a memory strength record.
869
+
870
+ Args:
871
+ strength: MemoryStrength instance to save
872
+
873
+ Returns:
874
+ The memory ID
875
+
876
+ Note: Default implementation raises NotImplementedError.
877
+ Backends should override for decay-based forgetting support.
878
+ """
879
+ raise NotImplementedError(
880
+ f"{self.__class__.__name__} does not support memory strength tracking. "
881
+ "Use SQLiteStorage for decay-based forgetting features."
882
+ )
883
+
884
+ def get_memory_strength(self, memory_id: str) -> Optional[Any]:
885
+ """
886
+ Get a memory strength record by memory ID.
887
+
888
+ Args:
889
+ memory_id: The memory ID to look up
890
+
891
+ Returns:
892
+ MemoryStrength instance, or None if not found
893
+ """
894
+ raise NotImplementedError(
895
+ f"{self.__class__.__name__} does not support memory strength tracking."
896
+ )
897
+
898
+ def get_all_memory_strengths(
899
+ self,
900
+ project_id: str,
901
+ agent: Optional[str] = None,
902
+ ) -> List[Any]:
903
+ """
904
+ Get all memory strength records for a project/agent.
905
+
906
+ Args:
907
+ project_id: Project to query
908
+ agent: Optional agent filter
909
+
910
+ Returns:
911
+ List of MemoryStrength instances
912
+ """
913
+ raise NotImplementedError(
914
+ f"{self.__class__.__name__} does not support memory strength tracking."
915
+ )
916
+
917
+ def delete_memory_strength(self, memory_id: str) -> bool:
918
+ """
919
+ Delete a memory strength record.
920
+
921
+ Args:
922
+ memory_id: The memory ID
923
+
924
+ Returns:
925
+ True if deleted, False if not found
926
+ """
927
+ raise NotImplementedError(
928
+ f"{self.__class__.__name__} does not support memory strength tracking."
929
+ )
930
+
931
+ # ==================== ARCHIVE OPERATIONS (v0.7.0+) ====================
932
+
933
+ def archive_memory(
934
+ self,
935
+ memory_id: str,
936
+ memory_type: str,
937
+ reason: str,
938
+ final_strength: float,
939
+ ) -> Any:
940
+ """
941
+ Archive a memory before deletion.
942
+
943
+ Captures full memory data including content, embedding, and metadata
944
+ for potential future recovery or compliance auditing.
945
+
946
+ Args:
947
+ memory_id: ID of the memory to archive
948
+ memory_type: Type of memory (heuristic, outcome, etc.)
949
+ reason: Why being archived (decay, manual, consolidation, etc.)
950
+ final_strength: Memory strength at time of archival
951
+
952
+ Returns:
953
+ ArchivedMemory instance
954
+
955
+ Note: Default implementation raises NotImplementedError.
956
+ Backends should override for archive support.
957
+ """
958
+ raise NotImplementedError(
959
+ f"{self.__class__.__name__} does not support memory archiving. "
960
+ "Use SQLiteStorage for archive features."
961
+ )
962
+
963
+ def get_archive(self, archive_id: str) -> Optional[Any]:
964
+ """
965
+ Get an archived memory by its archive ID.
966
+
967
+ Args:
968
+ archive_id: The archive ID
969
+
970
+ Returns:
971
+ ArchivedMemory instance, or None if not found
972
+ """
973
+ raise NotImplementedError(
974
+ f"{self.__class__.__name__} does not support memory archiving."
975
+ )
976
+
977
+ def list_archives(
978
+ self,
979
+ project_id: str,
980
+ agent: Optional[str] = None,
981
+ reason: Optional[str] = None,
982
+ memory_type: Optional[str] = None,
983
+ older_than: Optional[datetime] = None,
984
+ younger_than: Optional[datetime] = None,
985
+ include_restored: bool = False,
986
+ limit: int = 100,
987
+ ) -> List[Any]:
988
+ """
989
+ List archived memories with filtering.
990
+
991
+ Args:
992
+ project_id: Project to query
993
+ agent: Optional agent filter
994
+ reason: Optional archive reason filter
995
+ memory_type: Optional memory type filter
996
+ older_than: Optional filter for archives older than this time
997
+ younger_than: Optional filter for archives younger than this time
998
+ include_restored: Whether to include archives that have been restored
999
+ limit: Maximum number of archives to return
1000
+
1001
+ Returns:
1002
+ List of ArchivedMemory instances
1003
+ """
1004
+ raise NotImplementedError(
1005
+ f"{self.__class__.__name__} does not support memory archiving."
1006
+ )
1007
+
1008
+ def restore_from_archive(self, archive_id: str) -> str:
1009
+ """
1010
+ Restore an archived memory, creating a new memory from archive data.
1011
+
1012
+ The original archive is marked as restored but retained for audit purposes.
1013
+
1014
+ Args:
1015
+ archive_id: The archive ID to restore
1016
+
1017
+ Returns:
1018
+ New memory ID of the restored memory
1019
+
1020
+ Raises:
1021
+ ValueError: If archive not found or already restored
1022
+ """
1023
+ raise NotImplementedError(
1024
+ f"{self.__class__.__name__} does not support memory archiving."
1025
+ )
1026
+
1027
+ def purge_archives(
1028
+ self,
1029
+ older_than: datetime,
1030
+ project_id: Optional[str] = None,
1031
+ reason: Optional[str] = None,
1032
+ ) -> int:
1033
+ """
1034
+ Permanently delete archived memories.
1035
+
1036
+ This is a destructive operation - archives cannot be recovered after purging.
1037
+
1038
+ Args:
1039
+ older_than: Delete archives older than this datetime
1040
+ project_id: Optional project filter
1041
+ reason: Optional reason filter
1042
+
1043
+ Returns:
1044
+ Number of archives permanently deleted
1045
+ """
1046
+ raise NotImplementedError(
1047
+ f"{self.__class__.__name__} does not support memory archiving."
1048
+ )
1049
+
1050
+ def get_archive_stats(
1051
+ self,
1052
+ project_id: str,
1053
+ agent: Optional[str] = None,
1054
+ ) -> Dict[str, Any]:
1055
+ """
1056
+ Get statistics about archived memories.
1057
+
1058
+ Args:
1059
+ project_id: Project to query
1060
+ agent: Optional agent filter
1061
+
1062
+ Returns:
1063
+ Dict with archive statistics (counts, by reason, by type, etc.)
1064
+ """
1065
+ raise NotImplementedError(
1066
+ f"{self.__class__.__name__} does not support memory archiving."
1067
+ )
1068
+
1069
+ # ==================== UTILITY ====================
1070
+
1071
+ @classmethod
1072
+ @abstractmethod
1073
+ def from_config(cls, config: Dict[str, Any]) -> "StorageBackend":
1074
+ """
1075
+ Create instance from configuration dict.
1076
+
1077
+ Args:
1078
+ config: Configuration dictionary
1079
+
1080
+ Returns:
1081
+ Configured storage backend instance
1082
+ """
1083
+ pass