alma-memory 0.5.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. alma/__init__.py +296 -194
  2. alma/compression/__init__.py +33 -0
  3. alma/compression/pipeline.py +980 -0
  4. alma/confidence/__init__.py +47 -47
  5. alma/confidence/engine.py +540 -540
  6. alma/confidence/types.py +351 -351
  7. alma/config/loader.py +157 -157
  8. alma/consolidation/__init__.py +23 -23
  9. alma/consolidation/engine.py +678 -678
  10. alma/consolidation/prompts.py +84 -84
  11. alma/core.py +1189 -322
  12. alma/domains/__init__.py +30 -30
  13. alma/domains/factory.py +359 -359
  14. alma/domains/schemas.py +448 -448
  15. alma/domains/types.py +272 -272
  16. alma/events/__init__.py +75 -75
  17. alma/events/emitter.py +285 -284
  18. alma/events/storage_mixin.py +246 -246
  19. alma/events/types.py +126 -126
  20. alma/events/webhook.py +425 -425
  21. alma/exceptions.py +49 -49
  22. alma/extraction/__init__.py +31 -31
  23. alma/extraction/auto_learner.py +265 -264
  24. alma/extraction/extractor.py +420 -420
  25. alma/graph/__init__.py +106 -81
  26. alma/graph/backends/__init__.py +32 -18
  27. alma/graph/backends/kuzu.py +624 -0
  28. alma/graph/backends/memgraph.py +432 -0
  29. alma/graph/backends/memory.py +236 -236
  30. alma/graph/backends/neo4j.py +417 -417
  31. alma/graph/base.py +159 -159
  32. alma/graph/extraction.py +198 -198
  33. alma/graph/store.py +860 -860
  34. alma/harness/__init__.py +35 -35
  35. alma/harness/base.py +386 -386
  36. alma/harness/domains.py +705 -705
  37. alma/initializer/__init__.py +37 -37
  38. alma/initializer/initializer.py +418 -418
  39. alma/initializer/types.py +250 -250
  40. alma/integration/__init__.py +62 -62
  41. alma/integration/claude_agents.py +444 -432
  42. alma/integration/helena.py +423 -423
  43. alma/integration/victor.py +471 -471
  44. alma/learning/__init__.py +101 -86
  45. alma/learning/decay.py +878 -0
  46. alma/learning/forgetting.py +1446 -1446
  47. alma/learning/heuristic_extractor.py +390 -390
  48. alma/learning/protocols.py +374 -374
  49. alma/learning/validation.py +346 -346
  50. alma/mcp/__init__.py +123 -45
  51. alma/mcp/__main__.py +156 -156
  52. alma/mcp/resources.py +122 -122
  53. alma/mcp/server.py +955 -591
  54. alma/mcp/tools.py +3254 -511
  55. alma/observability/__init__.py +91 -0
  56. alma/observability/config.py +302 -0
  57. alma/observability/guidelines.py +170 -0
  58. alma/observability/logging.py +424 -0
  59. alma/observability/metrics.py +583 -0
  60. alma/observability/tracing.py +440 -0
  61. alma/progress/__init__.py +21 -21
  62. alma/progress/tracker.py +607 -607
  63. alma/progress/types.py +250 -250
  64. alma/retrieval/__init__.py +134 -53
  65. alma/retrieval/budget.py +525 -0
  66. alma/retrieval/cache.py +1304 -1061
  67. alma/retrieval/embeddings.py +202 -202
  68. alma/retrieval/engine.py +850 -366
  69. alma/retrieval/modes.py +365 -0
  70. alma/retrieval/progressive.py +560 -0
  71. alma/retrieval/scoring.py +344 -344
  72. alma/retrieval/trust_scoring.py +637 -0
  73. alma/retrieval/verification.py +797 -0
  74. alma/session/__init__.py +19 -19
  75. alma/session/manager.py +442 -399
  76. alma/session/types.py +288 -288
  77. alma/storage/__init__.py +101 -61
  78. alma/storage/archive.py +233 -0
  79. alma/storage/azure_cosmos.py +1259 -1048
  80. alma/storage/base.py +1083 -525
  81. alma/storage/chroma.py +1443 -1443
  82. alma/storage/constants.py +103 -0
  83. alma/storage/file_based.py +614 -619
  84. alma/storage/migrations/__init__.py +21 -0
  85. alma/storage/migrations/base.py +321 -0
  86. alma/storage/migrations/runner.py +323 -0
  87. alma/storage/migrations/version_stores.py +337 -0
  88. alma/storage/migrations/versions/__init__.py +11 -0
  89. alma/storage/migrations/versions/v1_0_0.py +373 -0
  90. alma/storage/migrations/versions/v1_1_0_workflow_context.py +551 -0
  91. alma/storage/pinecone.py +1080 -1080
  92. alma/storage/postgresql.py +1948 -1452
  93. alma/storage/qdrant.py +1306 -1306
  94. alma/storage/sqlite_local.py +3041 -1358
  95. alma/testing/__init__.py +46 -0
  96. alma/testing/factories.py +301 -0
  97. alma/testing/mocks.py +389 -0
  98. alma/types.py +292 -264
  99. alma/utils/__init__.py +19 -0
  100. alma/utils/tokenizer.py +521 -0
  101. alma/workflow/__init__.py +83 -0
  102. alma/workflow/artifacts.py +170 -0
  103. alma/workflow/checkpoint.py +311 -0
  104. alma/workflow/context.py +228 -0
  105. alma/workflow/outcomes.py +189 -0
  106. alma/workflow/reducers.py +393 -0
  107. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/METADATA +244 -72
  108. alma_memory-0.7.0.dist-info/RECORD +112 -0
  109. alma_memory-0.5.0.dist-info/RECORD +0 -76
  110. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/WHEEL +0 -0
  111. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/top_level.txt +0 -0
alma/core.py CHANGED
@@ -1,322 +1,1189 @@
1
- """
2
- ALMA Core - Main interface for the Agent Learning Memory Architecture.
3
- """
4
-
5
- import logging
6
- from typing import Any, Dict, Optional
7
-
8
- from alma.config.loader import ConfigLoader
9
- from alma.learning.protocols import LearningProtocol
10
- from alma.retrieval.engine import RetrievalEngine
11
- from alma.storage.base import StorageBackend
12
- from alma.types import (
13
- DomainKnowledge,
14
- MemoryScope,
15
- MemorySlice,
16
- UserPreference,
17
- )
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
-
22
- class ALMA:
23
- """
24
- Agent Learning Memory Architecture - Main Interface.
25
-
26
- Provides methods for:
27
- - Retrieving relevant memories for a task
28
- - Learning from task outcomes
29
- - Managing agent memory scopes
30
- """
31
-
32
- def __init__(
33
- self,
34
- storage: StorageBackend,
35
- retrieval_engine: RetrievalEngine,
36
- learning_protocol: LearningProtocol,
37
- scopes: Dict[str, MemoryScope],
38
- project_id: str,
39
- ):
40
- self.storage = storage
41
- self.retrieval = retrieval_engine
42
- self.learning = learning_protocol
43
- self.scopes = scopes
44
- self.project_id = project_id
45
-
46
- @classmethod
47
- def from_config(cls, config_path: str) -> "ALMA":
48
- """
49
- Initialize ALMA from a configuration file.
50
-
51
- Args:
52
- config_path: Path to .alma/config.yaml
53
-
54
- Returns:
55
- Configured ALMA instance
56
- """
57
- config = ConfigLoader.load(config_path)
58
-
59
- # Initialize storage backend based on config
60
- storage = cls._create_storage(config)
61
-
62
- # Initialize retrieval engine
63
- retrieval = RetrievalEngine(
64
- storage=storage,
65
- embedding_provider=config.get("embedding_provider", "local"),
66
- )
67
-
68
- # Initialize learning protocol
69
- learning = LearningProtocol(
70
- storage=storage,
71
- scopes={
72
- name: MemoryScope(
73
- agent_name=name,
74
- can_learn=scope.get("can_learn", []),
75
- cannot_learn=scope.get("cannot_learn", []),
76
- min_occurrences_for_heuristic=scope.get(
77
- "min_occurrences_for_heuristic", 3
78
- ),
79
- )
80
- for name, scope in config.get("agents", {}).items()
81
- },
82
- )
83
-
84
- # Build scopes dict
85
- scopes = {
86
- name: MemoryScope(
87
- agent_name=name,
88
- can_learn=scope.get("can_learn", []),
89
- cannot_learn=scope.get("cannot_learn", []),
90
- min_occurrences_for_heuristic=scope.get(
91
- "min_occurrences_for_heuristic", 3
92
- ),
93
- )
94
- for name, scope in config.get("agents", {}).items()
95
- }
96
-
97
- return cls(
98
- storage=storage,
99
- retrieval_engine=retrieval,
100
- learning_protocol=learning,
101
- scopes=scopes,
102
- project_id=config.get("project_id", "default"),
103
- )
104
-
105
- @staticmethod
106
- def _create_storage(config: Dict[str, Any]) -> StorageBackend:
107
- """Create appropriate storage backend based on config."""
108
- storage_type = config.get("storage", "file")
109
-
110
- if storage_type == "azure":
111
- from alma.storage.azure_cosmos import AzureCosmosStorage
112
-
113
- return AzureCosmosStorage.from_config(config)
114
- elif storage_type == "postgres":
115
- from alma.storage.postgresql import PostgreSQLStorage
116
-
117
- return PostgreSQLStorage.from_config(config)
118
- elif storage_type == "sqlite":
119
- from alma.storage.sqlite_local import SQLiteStorage
120
-
121
- return SQLiteStorage.from_config(config)
122
- else:
123
- from alma.storage.file_based import FileBasedStorage
124
-
125
- return FileBasedStorage.from_config(config)
126
-
127
- def retrieve(
128
- self,
129
- task: str,
130
- agent: str,
131
- user_id: Optional[str] = None,
132
- top_k: int = 5,
133
- ) -> MemorySlice:
134
- """
135
- Retrieve relevant memories for a task.
136
-
137
- Args:
138
- task: Description of the task to perform
139
- agent: Name of the agent requesting memories
140
- user_id: Optional user ID for preference retrieval
141
- top_k: Maximum items per memory type
142
-
143
- Returns:
144
- MemorySlice with relevant memories for context injection
145
- """
146
- # Validate agent has a defined scope
147
- if agent not in self.scopes:
148
- logger.warning(f"Agent '{agent}' has no defined scope, using defaults")
149
-
150
- return self.retrieval.retrieve(
151
- query=task,
152
- agent=agent,
153
- project_id=self.project_id,
154
- user_id=user_id,
155
- top_k=top_k,
156
- scope=self.scopes.get(agent),
157
- )
158
-
159
- def learn(
160
- self,
161
- agent: str,
162
- task: str,
163
- outcome: str, # "success" or "failure"
164
- strategy_used: str,
165
- task_type: Optional[str] = None,
166
- duration_ms: Optional[int] = None,
167
- error_message: Optional[str] = None,
168
- feedback: Optional[str] = None,
169
- ) -> bool:
170
- """
171
- Learn from a task outcome.
172
-
173
- Validates that learning is within agent's scope before committing.
174
- Invalidates cache after learning to ensure fresh retrieval results.
175
-
176
- Args:
177
- agent: Name of the agent that executed the task
178
- task: Description of the task
179
- outcome: "success" or "failure"
180
- strategy_used: What approach was taken
181
- task_type: Category of task (for grouping)
182
- duration_ms: How long the task took
183
- error_message: Error details if failed
184
- feedback: User feedback if provided
185
-
186
- Returns:
187
- True if learning was accepted, False if rejected (scope violation)
188
- """
189
- result = self.learning.learn(
190
- agent=agent,
191
- project_id=self.project_id,
192
- task=task,
193
- outcome=outcome == "success",
194
- strategy_used=strategy_used,
195
- task_type=task_type,
196
- duration_ms=duration_ms,
197
- error_message=error_message,
198
- feedback=feedback,
199
- )
200
-
201
- # Invalidate cache for this agent/project after learning
202
- if result:
203
- self.retrieval.invalidate_cache(agent=agent, project_id=self.project_id)
204
-
205
- return result
206
-
207
- def add_user_preference(
208
- self,
209
- user_id: str,
210
- category: str,
211
- preference: str,
212
- source: str = "explicit_instruction",
213
- ) -> UserPreference:
214
- """
215
- Add a user preference to memory.
216
-
217
- Args:
218
- user_id: User identifier
219
- category: Category (communication, code_style, workflow)
220
- preference: The preference text
221
- source: How this was learned
222
-
223
- Returns:
224
- The created UserPreference
225
- """
226
- result = self.learning.add_preference(
227
- user_id=user_id,
228
- category=category,
229
- preference=preference,
230
- source=source,
231
- )
232
-
233
- # Invalidate cache for project (user preferences affect all agents)
234
- self.retrieval.invalidate_cache(project_id=self.project_id)
235
-
236
- return result
237
-
238
- def add_domain_knowledge(
239
- self,
240
- agent: str,
241
- domain: str,
242
- fact: str,
243
- source: str = "user_stated",
244
- ) -> Optional[DomainKnowledge]:
245
- """
246
- Add domain knowledge within agent's scope.
247
-
248
- Args:
249
- agent: Agent this knowledge belongs to
250
- domain: Knowledge domain
251
- fact: The fact to remember
252
- source: How this was learned
253
-
254
- Returns:
255
- The created DomainKnowledge or None if scope violation
256
- """
257
- # Check scope
258
- scope = self.scopes.get(agent)
259
- if scope and not scope.is_allowed(domain):
260
- logger.warning(f"Agent '{agent}' not allowed to learn in domain '{domain}'")
261
- return None
262
-
263
- result = self.learning.add_domain_knowledge(
264
- agent=agent,
265
- project_id=self.project_id,
266
- domain=domain,
267
- fact=fact,
268
- source=source,
269
- )
270
-
271
- # Invalidate cache for this agent/project after adding knowledge
272
- if result:
273
- self.retrieval.invalidate_cache(agent=agent, project_id=self.project_id)
274
-
275
- return result
276
-
277
- def forget(
278
- self,
279
- agent: Optional[str] = None,
280
- older_than_days: int = 90,
281
- below_confidence: float = 0.3,
282
- ) -> int:
283
- """
284
- Prune stale or low-confidence memories.
285
-
286
- Invalidates cache after pruning to ensure fresh retrieval results.
287
-
288
- Args:
289
- agent: Specific agent to prune, or None for all
290
- older_than_days: Remove outcomes older than this
291
- below_confidence: Remove heuristics below this confidence
292
-
293
- Returns:
294
- Number of items pruned
295
- """
296
- count = self.learning.forget(
297
- project_id=self.project_id,
298
- agent=agent,
299
- older_than_days=older_than_days,
300
- below_confidence=below_confidence,
301
- )
302
-
303
- # Invalidate cache after forgetting (memories were removed)
304
- if count > 0:
305
- self.retrieval.invalidate_cache(agent=agent, project_id=self.project_id)
306
-
307
- return count
308
-
309
- def get_stats(self, agent: Optional[str] = None) -> Dict[str, Any]:
310
- """
311
- Get memory statistics.
312
-
313
- Args:
314
- agent: Specific agent or None for all
315
-
316
- Returns:
317
- Dict with counts and metadata
318
- """
319
- return self.storage.get_stats(
320
- project_id=self.project_id,
321
- agent=agent,
322
- )
1
+ """
2
+ ALMA Core - Main interface for the Agent Learning Memory Architecture.
3
+
4
+ API Return Type Conventions:
5
+ - Create operations: Return created object or raise exception
6
+ - Update operations: Return updated object or raise exception
7
+ - Delete operations: Return bool (success) or int (count), raise on failure
8
+ - Query operations: Return list (empty if none) or object
9
+
10
+ All scope violations raise ScopeViolationError for consistent error handling.
11
+
12
+ Async API:
13
+ ALMA provides both synchronous and asynchronous APIs. The async variants
14
+ (async_retrieve, async_learn, etc.) use asyncio.to_thread() to run
15
+ blocking storage operations in a thread pool, enabling better concurrency
16
+ in async applications without blocking the event loop.
17
+
18
+ Workflow Integration (v0.6.0):
19
+ ALMA supports AGtestari workflow integration with:
20
+ - Checkpoints: Crash recovery and state persistence
21
+ - Workflow Outcomes: Learning from completed workflows
22
+ - Artifact Links: Connecting external files to memories
23
+ - Scoped Retrieval: Filtering by workflow context
24
+ """
25
+
26
+ import asyncio
27
+ import logging
28
+ import time
29
+ from typing import Any, Dict, List, Optional
30
+
31
+ from alma.config.loader import ConfigLoader
32
+ from alma.exceptions import ScopeViolationError
33
+ from alma.learning.protocols import LearningProtocol
34
+ from alma.observability.logging import get_logger
35
+ from alma.observability.metrics import get_metrics
36
+ from alma.observability.tracing import SpanKind, get_tracer, trace_method
37
+ from alma.retrieval.engine import RetrievalEngine
38
+ from alma.storage.base import StorageBackend
39
+ from alma.types import (
40
+ DomainKnowledge,
41
+ MemoryScope,
42
+ MemorySlice,
43
+ Outcome,
44
+ UserPreference,
45
+ )
46
+ from alma.workflow import (
47
+ ArtifactRef,
48
+ ArtifactType,
49
+ Checkpoint,
50
+ CheckpointManager,
51
+ ReducerConfig,
52
+ RetrievalScope,
53
+ StateMerger,
54
+ WorkflowContext,
55
+ WorkflowOutcome,
56
+ WorkflowResult,
57
+ )
58
+
59
+ logger = logging.getLogger(__name__)
60
+ structured_logger = get_logger(__name__)
61
+ tracer = get_tracer(__name__)
62
+
63
+
64
+ class ALMA:
65
+ """
66
+ Agent Learning Memory Architecture - Main Interface.
67
+
68
+ Provides methods for:
69
+ - Retrieving relevant memories for a task
70
+ - Learning from task outcomes
71
+ - Managing agent memory scopes
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ storage: StorageBackend,
77
+ retrieval_engine: RetrievalEngine,
78
+ learning_protocol: LearningProtocol,
79
+ scopes: Dict[str, MemoryScope],
80
+ project_id: str,
81
+ ):
82
+ self.storage = storage
83
+ self.retrieval = retrieval_engine
84
+ self.learning = learning_protocol
85
+ self.scopes = scopes
86
+ self.project_id = project_id
87
+
88
+ @classmethod
89
+ def from_config(cls, config_path: str) -> "ALMA":
90
+ """
91
+ Initialize ALMA from a configuration file.
92
+
93
+ Args:
94
+ config_path: Path to .alma/config.yaml
95
+
96
+ Returns:
97
+ Configured ALMA instance
98
+ """
99
+ config = ConfigLoader.load(config_path)
100
+
101
+ # Initialize storage backend based on config
102
+ storage = cls._create_storage(config)
103
+
104
+ # Initialize retrieval engine
105
+ retrieval = RetrievalEngine(
106
+ storage=storage,
107
+ embedding_provider=config.get("embedding_provider", "local"),
108
+ )
109
+
110
+ # Initialize learning protocol
111
+ learning = LearningProtocol(
112
+ storage=storage,
113
+ scopes={
114
+ name: MemoryScope(
115
+ agent_name=name,
116
+ can_learn=scope.get("can_learn", []),
117
+ cannot_learn=scope.get("cannot_learn", []),
118
+ min_occurrences_for_heuristic=scope.get(
119
+ "min_occurrences_for_heuristic", 3
120
+ ),
121
+ )
122
+ for name, scope in config.get("agents", {}).items()
123
+ },
124
+ )
125
+
126
+ # Build scopes dict
127
+ scopes = {
128
+ name: MemoryScope(
129
+ agent_name=name,
130
+ can_learn=scope.get("can_learn", []),
131
+ cannot_learn=scope.get("cannot_learn", []),
132
+ min_occurrences_for_heuristic=scope.get(
133
+ "min_occurrences_for_heuristic", 3
134
+ ),
135
+ )
136
+ for name, scope in config.get("agents", {}).items()
137
+ }
138
+
139
+ return cls(
140
+ storage=storage,
141
+ retrieval_engine=retrieval,
142
+ learning_protocol=learning,
143
+ scopes=scopes,
144
+ project_id=config.get("project_id", "default"),
145
+ )
146
+
147
+ @staticmethod
148
+ def _create_storage(config: Dict[str, Any]) -> StorageBackend:
149
+ """Create appropriate storage backend based on config."""
150
+ storage_type = config.get("storage", "file")
151
+
152
+ if storage_type == "azure":
153
+ from alma.storage.azure_cosmos import AzureCosmosStorage
154
+
155
+ return AzureCosmosStorage.from_config(config)
156
+ elif storage_type == "postgres":
157
+ from alma.storage.postgresql import PostgreSQLStorage
158
+
159
+ return PostgreSQLStorage.from_config(config)
160
+ elif storage_type == "sqlite":
161
+ from alma.storage.sqlite_local import SQLiteStorage
162
+
163
+ return SQLiteStorage.from_config(config)
164
+ else:
165
+ from alma.storage.file_based import FileBasedStorage
166
+
167
+ return FileBasedStorage.from_config(config)
168
+
169
+ @trace_method(name="ALMA.retrieve", kind=SpanKind.INTERNAL)
170
+ def retrieve(
171
+ self,
172
+ task: str,
173
+ agent: str,
174
+ user_id: Optional[str] = None,
175
+ top_k: int = 5,
176
+ ) -> MemorySlice:
177
+ """
178
+ Retrieve relevant memories for a task.
179
+
180
+ Args:
181
+ task: Description of the task to perform
182
+ agent: Name of the agent requesting memories
183
+ user_id: Optional user ID for preference retrieval
184
+ top_k: Maximum items per memory type
185
+
186
+ Returns:
187
+ MemorySlice with relevant memories for context injection
188
+ """
189
+ start_time = time.time()
190
+ metrics = get_metrics()
191
+
192
+ # Validate agent has a defined scope
193
+ if agent not in self.scopes:
194
+ structured_logger.warning(
195
+ f"Agent '{agent}' has no defined scope, using defaults",
196
+ agent=agent,
197
+ project_id=self.project_id,
198
+ )
199
+
200
+ result = self.retrieval.retrieve(
201
+ query=task,
202
+ agent=agent,
203
+ project_id=self.project_id,
204
+ user_id=user_id,
205
+ top_k=top_k,
206
+ scope=self.scopes.get(agent),
207
+ )
208
+
209
+ # Record metrics
210
+ duration_ms = (time.time() - start_time) * 1000
211
+ cache_hit = result.retrieval_time_ms < 10 # Approximate cache hit detection
212
+ metrics.record_retrieve_latency(
213
+ duration_ms=duration_ms,
214
+ agent=agent,
215
+ project_id=self.project_id,
216
+ cache_hit=cache_hit,
217
+ items_returned=result.total_items,
218
+ )
219
+
220
+ structured_logger.info(
221
+ "Memory retrieval completed",
222
+ agent=agent,
223
+ project_id=self.project_id,
224
+ task_preview=task[:50] if task else "",
225
+ items_returned=result.total_items,
226
+ duration_ms=duration_ms,
227
+ cache_hit=cache_hit,
228
+ )
229
+
230
+ return result
231
+
232
+ @trace_method(name="ALMA.learn", kind=SpanKind.INTERNAL)
233
+ def learn(
234
+ self,
235
+ agent: str,
236
+ task: str,
237
+ outcome: str, # "success" or "failure"
238
+ strategy_used: str,
239
+ task_type: Optional[str] = None,
240
+ duration_ms: Optional[int] = None,
241
+ error_message: Optional[str] = None,
242
+ feedback: Optional[str] = None,
243
+ ) -> Outcome:
244
+ """
245
+ Learn from a task outcome.
246
+
247
+ Validates that learning is within agent's scope before committing.
248
+ Invalidates cache after learning to ensure fresh retrieval results.
249
+
250
+ Args:
251
+ agent: Name of the agent that executed the task
252
+ task: Description of the task
253
+ outcome: "success" or "failure"
254
+ strategy_used: What approach was taken
255
+ task_type: Category of task (for grouping)
256
+ duration_ms: How long the task took
257
+ error_message: Error details if failed
258
+ feedback: User feedback if provided
259
+
260
+ Returns:
261
+ The created Outcome record
262
+
263
+ Raises:
264
+ ScopeViolationError: If learning is outside agent's scope
265
+ """
266
+ start_time = time.time()
267
+ metrics = get_metrics()
268
+
269
+ outcome_record = self.learning.learn(
270
+ agent=agent,
271
+ project_id=self.project_id,
272
+ task=task,
273
+ outcome=outcome == "success",
274
+ strategy_used=strategy_used,
275
+ task_type=task_type,
276
+ duration_ms=duration_ms,
277
+ error_message=error_message,
278
+ feedback=feedback,
279
+ )
280
+
281
+ # Invalidate cache for this agent/project after learning
282
+ self.retrieval.invalidate_cache(agent=agent, project_id=self.project_id)
283
+
284
+ # Record metrics
285
+ learn_duration_ms = (time.time() - start_time) * 1000
286
+ metrics.record_learn_operation(
287
+ duration_ms=learn_duration_ms,
288
+ agent=agent,
289
+ project_id=self.project_id,
290
+ memory_type="outcome",
291
+ success=True,
292
+ )
293
+
294
+ structured_logger.info(
295
+ "Learning operation completed",
296
+ agent=agent,
297
+ project_id=self.project_id,
298
+ task_type=task_type,
299
+ outcome=outcome,
300
+ duration_ms=learn_duration_ms,
301
+ )
302
+
303
+ return outcome_record
304
+
305
+ def add_user_preference(
306
+ self,
307
+ user_id: str,
308
+ category: str,
309
+ preference: str,
310
+ source: str = "explicit_instruction",
311
+ ) -> UserPreference:
312
+ """
313
+ Add a user preference to memory.
314
+
315
+ Args:
316
+ user_id: User identifier
317
+ category: Category (communication, code_style, workflow)
318
+ preference: The preference text
319
+ source: How this was learned
320
+
321
+ Returns:
322
+ The created UserPreference
323
+ """
324
+ result = self.learning.add_preference(
325
+ user_id=user_id,
326
+ category=category,
327
+ preference=preference,
328
+ source=source,
329
+ )
330
+
331
+ # Invalidate cache for project (user preferences affect all agents)
332
+ self.retrieval.invalidate_cache(project_id=self.project_id)
333
+
334
+ return result
335
+
336
+ def add_domain_knowledge(
337
+ self,
338
+ agent: str,
339
+ domain: str,
340
+ fact: str,
341
+ source: str = "user_stated",
342
+ ) -> DomainKnowledge:
343
+ """
344
+ Add domain knowledge within agent's scope.
345
+
346
+ Args:
347
+ agent: Agent this knowledge belongs to
348
+ domain: Knowledge domain
349
+ fact: The fact to remember
350
+ source: How this was learned
351
+
352
+ Returns:
353
+ The created DomainKnowledge
354
+
355
+ Raises:
356
+ ScopeViolationError: If agent is not allowed to learn in this domain
357
+ """
358
+ # Check scope
359
+ scope = self.scopes.get(agent)
360
+ if scope and not scope.is_allowed(domain):
361
+ raise ScopeViolationError(
362
+ f"Agent '{agent}' is not allowed to learn in domain '{domain}'"
363
+ )
364
+
365
+ result = self.learning.add_domain_knowledge(
366
+ agent=agent,
367
+ project_id=self.project_id,
368
+ domain=domain,
369
+ fact=fact,
370
+ source=source,
371
+ )
372
+
373
+ # Invalidate cache for this agent/project after adding knowledge
374
+ self.retrieval.invalidate_cache(agent=agent, project_id=self.project_id)
375
+
376
+ return result
377
+
378
+ @trace_method(name="ALMA.forget", kind=SpanKind.INTERNAL)
379
+ def forget(
380
+ self,
381
+ agent: Optional[str] = None,
382
+ older_than_days: int = 90,
383
+ below_confidence: float = 0.3,
384
+ ) -> int:
385
+ """
386
+ Prune stale or low-confidence memories.
387
+
388
+ This is a delete operation that invalidates cache after pruning
389
+ to ensure fresh retrieval results.
390
+
391
+ Args:
392
+ agent: Specific agent to prune, or None for all
393
+ older_than_days: Remove outcomes older than this
394
+ below_confidence: Remove heuristics below this confidence
395
+
396
+ Returns:
397
+ Number of items pruned (0 if nothing was pruned)
398
+
399
+ Raises:
400
+ StorageError: If the delete operation fails
401
+ """
402
+ start_time = time.time()
403
+ metrics = get_metrics()
404
+
405
+ count = self.learning.forget(
406
+ project_id=self.project_id,
407
+ agent=agent,
408
+ older_than_days=older_than_days,
409
+ below_confidence=below_confidence,
410
+ )
411
+
412
+ # Invalidate cache after forgetting (memories were removed)
413
+ if count > 0:
414
+ self.retrieval.invalidate_cache(agent=agent, project_id=self.project_id)
415
+
416
+ # Record metrics
417
+ duration_ms = (time.time() - start_time) * 1000
418
+ metrics.record_forget_operation(
419
+ duration_ms=duration_ms,
420
+ agent=agent,
421
+ project_id=self.project_id,
422
+ items_removed=count,
423
+ )
424
+
425
+ structured_logger.info(
426
+ "Forget operation completed",
427
+ agent=agent or "all",
428
+ project_id=self.project_id,
429
+ items_removed=count,
430
+ older_than_days=older_than_days,
431
+ below_confidence=below_confidence,
432
+ duration_ms=duration_ms,
433
+ )
434
+
435
+ return count
436
+
437
+ def get_stats(self, agent: Optional[str] = None) -> Dict[str, Any]:
438
+ """
439
+ Get memory statistics.
440
+
441
+ This is a query operation that returns statistics about stored memories.
442
+
443
+ Args:
444
+ agent: Specific agent or None for all
445
+
446
+ Returns:
447
+ Dict with counts and metadata (always returns a dict, may be empty)
448
+
449
+ Raises:
450
+ StorageError: If the query operation fails
451
+ """
452
+ return self.storage.get_stats(
453
+ project_id=self.project_id,
454
+ agent=agent,
455
+ )
456
+
457
+ # ==================== WORKFLOW INTEGRATION (v0.6.0) ====================
458
+ #
459
+ # Methods for AGtestari workflow integration: checkpointing, scoped
460
+ # retrieval, learning from workflows, and artifact linking.
461
+
462
+ def _get_checkpoint_manager(self) -> CheckpointManager:
463
+ """Get or create the checkpoint manager."""
464
+ if not hasattr(self, "_checkpoint_manager"):
465
+ self._checkpoint_manager = CheckpointManager(storage=self.storage)
466
+ return self._checkpoint_manager
467
+
468
+ @trace_method(name="ALMA.checkpoint", kind=SpanKind.INTERNAL)
469
+ def checkpoint(
470
+ self,
471
+ run_id: str,
472
+ node_id: str,
473
+ state: Dict[str, Any],
474
+ branch_id: Optional[str] = None,
475
+ parent_checkpoint_id: Optional[str] = None,
476
+ metadata: Optional[Dict[str, Any]] = None,
477
+ skip_if_unchanged: bool = True,
478
+ ) -> Optional[Checkpoint]:
479
+ """
480
+ Create a checkpoint for crash recovery.
481
+
482
+ Checkpoints persist workflow state at key points during execution,
483
+ enabling recovery after crashes or failures.
484
+
485
+ Args:
486
+ run_id: The workflow run identifier.
487
+ node_id: The node creating this checkpoint.
488
+ state: The state to persist.
489
+ branch_id: Optional branch identifier for parallel execution.
490
+ parent_checkpoint_id: Previous checkpoint in the chain.
491
+ metadata: Additional checkpoint metadata.
492
+ skip_if_unchanged: If True, skip creating checkpoint if state
493
+ hasn't changed from the last checkpoint.
494
+
495
+ Returns:
496
+ The created Checkpoint, or None if skipped due to no changes.
497
+
498
+ Raises:
499
+ ValueError: If state exceeds max_state_size (1MB by default).
500
+ """
501
+ manager = self._get_checkpoint_manager()
502
+ checkpoint = manager.create_checkpoint(
503
+ run_id=run_id,
504
+ node_id=node_id,
505
+ state=state,
506
+ branch_id=branch_id,
507
+ parent_checkpoint_id=parent_checkpoint_id,
508
+ metadata=metadata,
509
+ skip_if_unchanged=skip_if_unchanged,
510
+ )
511
+
512
+ if checkpoint:
513
+ structured_logger.info(
514
+ "Checkpoint created",
515
+ run_id=run_id,
516
+ node_id=node_id,
517
+ checkpoint_id=checkpoint.id,
518
+ sequence_number=checkpoint.sequence_number,
519
+ )
520
+
521
+ return checkpoint
522
+
523
+ def get_resume_point(
524
+ self,
525
+ run_id: str,
526
+ branch_id: Optional[str] = None,
527
+ ) -> Optional[Checkpoint]:
528
+ """
529
+ Get the checkpoint to resume from after a crash.
530
+
531
+ Args:
532
+ run_id: The workflow run identifier.
533
+ branch_id: Optional branch to filter by.
534
+
535
+ Returns:
536
+ The checkpoint to resume from, or None if no checkpoints.
537
+ """
538
+ manager = self._get_checkpoint_manager()
539
+ return manager.get_latest_checkpoint(run_id, branch_id)
540
+
541
+ def merge_states(
542
+ self,
543
+ states: List[Dict[str, Any]],
544
+ reducer_config: Optional[Dict[str, str]] = None,
545
+ ) -> Dict[str, Any]:
546
+ """
547
+ Merge multiple branch states after parallel execution.
548
+
549
+ Uses configurable reducers to handle each key in the state.
550
+ Default reducer is 'last_value' which takes the value from
551
+ the last state.
552
+
553
+ Args:
554
+ states: List of state dicts from parallel branches.
555
+ reducer_config: Optional mapping of key -> reducer name.
556
+ Available reducers: append, merge_dict,
557
+ last_value, first_value, sum, max, min, union.
558
+
559
+ Returns:
560
+ Merged state dictionary.
561
+
562
+ Example:
563
+ >>> states = [
564
+ ... {"count": 5, "items": ["a"]},
565
+ ... {"count": 3, "items": ["b", "c"]},
566
+ ... ]
567
+ >>> alma.merge_states(states, {"count": "sum", "items": "append"})
568
+ {"count": 8, "items": ["a", "b", "c"]}
569
+ """
570
+ config = ReducerConfig(field_reducers=reducer_config or {})
571
+ merger = StateMerger(config)
572
+ return merger.merge(states)
573
+
574
+ @trace_method(name="ALMA.learn_from_workflow", kind=SpanKind.INTERNAL)
575
+ def learn_from_workflow(
576
+ self,
577
+ agent: str,
578
+ workflow_id: str,
579
+ run_id: str,
580
+ result: str,
581
+ summary: str,
582
+ strategies_used: Optional[List[str]] = None,
583
+ successful_patterns: Optional[List[str]] = None,
584
+ failed_patterns: Optional[List[str]] = None,
585
+ duration_seconds: Optional[float] = None,
586
+ node_count: Optional[int] = None,
587
+ error_message: Optional[str] = None,
588
+ tenant_id: Optional[str] = None,
589
+ metadata: Optional[Dict[str, Any]] = None,
590
+ ) -> WorkflowOutcome:
591
+ """
592
+ Record learnings from a completed workflow execution.
593
+
594
+ Captures what was learned from running a workflow, including
595
+ strategies used, what worked, what didn't, and error details.
596
+
597
+ Args:
598
+ agent: The agent that executed the workflow.
599
+ workflow_id: The workflow definition that was executed.
600
+ run_id: The specific run this outcome is from.
601
+ result: Result status ("success", "failure", "partial",
602
+ "cancelled", "timeout").
603
+ summary: Human-readable summary of what happened.
604
+ strategies_used: List of strategies/approaches attempted.
605
+ successful_patterns: Patterns that worked well.
606
+ failed_patterns: Patterns that didn't work.
607
+ duration_seconds: How long the workflow took.
608
+ node_count: Number of nodes executed.
609
+ error_message: Error details if failed.
610
+ tenant_id: Multi-tenant isolation identifier.
611
+ metadata: Additional outcome metadata.
612
+
613
+ Returns:
614
+ The created WorkflowOutcome.
615
+ """
616
+ start_time = time.time()
617
+ metrics = get_metrics()
618
+
619
+ # Create the outcome
620
+ outcome = WorkflowOutcome(
621
+ tenant_id=tenant_id,
622
+ workflow_id=workflow_id,
623
+ run_id=run_id,
624
+ agent=agent,
625
+ project_id=self.project_id,
626
+ result=WorkflowResult(result),
627
+ summary=summary,
628
+ strategies_used=strategies_used or [],
629
+ successful_patterns=successful_patterns or [],
630
+ failed_patterns=failed_patterns or [],
631
+ duration_seconds=duration_seconds,
632
+ node_count=node_count,
633
+ error_message=error_message,
634
+ metadata=metadata or {},
635
+ )
636
+
637
+ # Validate
638
+ outcome.validate()
639
+
640
+ # Save to storage
641
+ self.storage.save_workflow_outcome(outcome)
642
+
643
+ # Invalidate cache
644
+ self.retrieval.invalidate_cache(agent=agent, project_id=self.project_id)
645
+
646
+ # Record metrics
647
+ learn_duration_ms = (time.time() - start_time) * 1000
648
+ metrics.record_learn_operation(
649
+ duration_ms=learn_duration_ms,
650
+ agent=agent,
651
+ project_id=self.project_id,
652
+ memory_type="workflow_outcome",
653
+ success=True,
654
+ )
655
+
656
+ structured_logger.info(
657
+ "Workflow outcome recorded",
658
+ agent=agent,
659
+ workflow_id=workflow_id,
660
+ run_id=run_id,
661
+ result=result,
662
+ duration_ms=learn_duration_ms,
663
+ )
664
+
665
+ return outcome
666
+
667
+ def link_artifact(
668
+ self,
669
+ memory_id: str,
670
+ artifact_type: str,
671
+ storage_url: str,
672
+ filename: Optional[str] = None,
673
+ mime_type: Optional[str] = None,
674
+ size_bytes: Optional[int] = None,
675
+ checksum: Optional[str] = None,
676
+ metadata: Optional[Dict[str, Any]] = None,
677
+ ) -> ArtifactRef:
678
+ """
679
+ Link an external artifact to a memory.
680
+
681
+ Artifacts are stored externally (e.g., Cloudflare R2, S3) and
682
+ referenced by URL. This allows memories to reference large files
683
+ without bloating the memory database.
684
+
685
+ Args:
686
+ memory_id: The memory to link the artifact to.
687
+ artifact_type: Type of artifact ("screenshot", "log", "report",
688
+ "file", "document", "image", etc.).
689
+ storage_url: URL or path to the artifact in storage.
690
+ filename: Original filename.
691
+ mime_type: MIME type.
692
+ size_bytes: Size in bytes.
693
+ checksum: SHA256 checksum for integrity verification.
694
+ metadata: Additional artifact metadata.
695
+
696
+ Returns:
697
+ The created ArtifactRef.
698
+ """
699
+ # Convert string to enum
700
+ try:
701
+ artifact_type_enum = ArtifactType(artifact_type)
702
+ except ValueError:
703
+ artifact_type_enum = ArtifactType.OTHER
704
+
705
+ artifact = ArtifactRef(
706
+ memory_id=memory_id,
707
+ artifact_type=artifact_type_enum,
708
+ storage_url=storage_url,
709
+ filename=filename,
710
+ mime_type=mime_type,
711
+ size_bytes=size_bytes,
712
+ checksum=checksum,
713
+ metadata=metadata or {},
714
+ )
715
+
716
+ # Validate
717
+ artifact.validate()
718
+
719
+ # Save to storage
720
+ self.storage.save_artifact_link(artifact)
721
+
722
+ structured_logger.info(
723
+ "Artifact linked",
724
+ memory_id=memory_id,
725
+ artifact_id=artifact.id,
726
+ artifact_type=artifact_type,
727
+ storage_url=storage_url[:50] if storage_url else None,
728
+ )
729
+
730
+ return artifact
731
+
732
+ def get_artifacts(self, memory_id: str) -> List[ArtifactRef]:
733
+ """
734
+ Get all artifacts linked to a memory.
735
+
736
+ Args:
737
+ memory_id: The memory to get artifacts for.
738
+
739
+ Returns:
740
+ List of ArtifactRef objects.
741
+ """
742
+ return self.storage.get_artifact_links(memory_id)
743
+
744
+ def cleanup_checkpoints(
745
+ self,
746
+ run_id: str,
747
+ keep_latest: int = 1,
748
+ ) -> int:
749
+ """
750
+ Clean up old checkpoints for a completed run.
751
+
752
+ Call this after a workflow completes to free up storage.
753
+
754
+ Args:
755
+ run_id: The workflow run identifier.
756
+ keep_latest: Number of latest checkpoints to keep.
757
+
758
+ Returns:
759
+ Number of checkpoints deleted.
760
+ """
761
+ manager = self._get_checkpoint_manager()
762
+ count = manager.cleanup_checkpoints(run_id, keep_latest)
763
+
764
+ if count > 0:
765
+ structured_logger.info(
766
+ "Checkpoints cleaned up",
767
+ run_id=run_id,
768
+ deleted_count=count,
769
+ kept=keep_latest,
770
+ )
771
+
772
+ return count
773
+
774
+ def retrieve_with_scope(
775
+ self,
776
+ task: str,
777
+ agent: str,
778
+ context: WorkflowContext,
779
+ scope: RetrievalScope = RetrievalScope.AGENT,
780
+ user_id: Optional[str] = None,
781
+ top_k: int = 5,
782
+ ) -> MemorySlice:
783
+ """
784
+ Retrieve memories with workflow scope filtering.
785
+
786
+ This is an enhanced version of retrieve() that supports
787
+ filtering by workflow context and scope level.
788
+
789
+ Args:
790
+ task: Description of the task to perform.
791
+ agent: Name of the agent requesting memories.
792
+ context: Workflow context for scoping.
793
+ scope: How broadly to search for memories.
794
+ user_id: Optional user ID for preference retrieval.
795
+ top_k: Maximum items per memory type.
796
+
797
+ Returns:
798
+ MemorySlice with relevant memories for context injection.
799
+ """
800
+ start_time = time.time()
801
+ metrics = get_metrics()
802
+
803
+ # Build scope filter from context
804
+ scope_filter = context.get_scope_filter(scope)
805
+
806
+ # For now, scope_filter is passed to the retrieval as metadata
807
+ # Future: pass to storage.get_* methods for proper filtering
808
+ result = self.retrieval.retrieve(
809
+ query=task,
810
+ agent=agent,
811
+ project_id=self.project_id,
812
+ user_id=user_id,
813
+ top_k=top_k,
814
+ scope=self.scopes.get(agent),
815
+ )
816
+
817
+ # Add scope context to result metadata
818
+ result.metadata = {
819
+ "scope": scope.value,
820
+ "scope_filter": scope_filter,
821
+ "context": context.to_dict(),
822
+ }
823
+
824
+ # Record metrics
825
+ duration_ms = (time.time() - start_time) * 1000
826
+ cache_hit = result.retrieval_time_ms < 10
827
+ metrics.record_retrieve_latency(
828
+ duration_ms=duration_ms,
829
+ agent=agent,
830
+ project_id=self.project_id,
831
+ cache_hit=cache_hit,
832
+ items_returned=result.total_items,
833
+ )
834
+
835
+ structured_logger.info(
836
+ "Scoped memory retrieval completed",
837
+ agent=agent,
838
+ project_id=self.project_id,
839
+ scope=scope.value,
840
+ workflow_id=context.workflow_id,
841
+ run_id=context.run_id,
842
+ items_returned=result.total_items,
843
+ duration_ms=duration_ms,
844
+ )
845
+
846
+ return result
847
+
848
+ # ==================== ASYNC API ====================
849
+ #
850
+ # Async variants of core methods for better concurrency support.
851
+ # These use asyncio.to_thread() to run blocking operations in a
852
+ # thread pool, preventing event loop blocking in async applications.
853
+
854
+ async def async_retrieve(
855
+ self,
856
+ task: str,
857
+ agent: str,
858
+ user_id: Optional[str] = None,
859
+ top_k: int = 5,
860
+ ) -> MemorySlice:
861
+ """
862
+ Async version of retrieve(). Retrieve relevant memories for a task.
863
+
864
+ Runs the blocking storage operations in a thread pool to avoid
865
+ blocking the event loop.
866
+
867
+ Args:
868
+ task: Description of the task to perform
869
+ agent: Name of the agent requesting memories
870
+ user_id: Optional user ID for preference retrieval
871
+ top_k: Maximum items per memory type
872
+
873
+ Returns:
874
+ MemorySlice with relevant memories for context injection
875
+ """
876
+ return await asyncio.to_thread(
877
+ self.retrieve,
878
+ task=task,
879
+ agent=agent,
880
+ user_id=user_id,
881
+ top_k=top_k,
882
+ )
883
+
884
+ async def async_learn(
885
+ self,
886
+ agent: str,
887
+ task: str,
888
+ outcome: str,
889
+ strategy_used: str,
890
+ task_type: Optional[str] = None,
891
+ duration_ms: Optional[int] = None,
892
+ error_message: Optional[str] = None,
893
+ feedback: Optional[str] = None,
894
+ ) -> Outcome:
895
+ """
896
+ Async version of learn(). Learn from a task outcome.
897
+
898
+ Validates that learning is within agent's scope before committing.
899
+ Invalidates cache after learning to ensure fresh retrieval results.
900
+
901
+ Args:
902
+ agent: Name of the agent that executed the task
903
+ task: Description of the task
904
+ outcome: "success" or "failure"
905
+ strategy_used: What approach was taken
906
+ task_type: Category of task (for grouping)
907
+ duration_ms: How long the task took
908
+ error_message: Error details if failed
909
+ feedback: User feedback if provided
910
+
911
+ Returns:
912
+ The created Outcome record
913
+
914
+ Raises:
915
+ ScopeViolationError: If learning is outside agent's scope
916
+ """
917
+ return await asyncio.to_thread(
918
+ self.learn,
919
+ agent=agent,
920
+ task=task,
921
+ outcome=outcome,
922
+ strategy_used=strategy_used,
923
+ task_type=task_type,
924
+ duration_ms=duration_ms,
925
+ error_message=error_message,
926
+ feedback=feedback,
927
+ )
928
+
929
+ async def async_add_user_preference(
930
+ self,
931
+ user_id: str,
932
+ category: str,
933
+ preference: str,
934
+ source: str = "explicit_instruction",
935
+ ) -> UserPreference:
936
+ """
937
+ Async version of add_user_preference(). Add a user preference to memory.
938
+
939
+ Args:
940
+ user_id: User identifier
941
+ category: Category (communication, code_style, workflow)
942
+ preference: The preference text
943
+ source: How this was learned
944
+
945
+ Returns:
946
+ The created UserPreference
947
+ """
948
+ return await asyncio.to_thread(
949
+ self.add_user_preference,
950
+ user_id=user_id,
951
+ category=category,
952
+ preference=preference,
953
+ source=source,
954
+ )
955
+
956
+ async def async_add_domain_knowledge(
957
+ self,
958
+ agent: str,
959
+ domain: str,
960
+ fact: str,
961
+ source: str = "user_stated",
962
+ ) -> DomainKnowledge:
963
+ """
964
+ Async version of add_domain_knowledge(). Add domain knowledge within agent's scope.
965
+
966
+ Args:
967
+ agent: Agent this knowledge belongs to
968
+ domain: Knowledge domain
969
+ fact: The fact to remember
970
+ source: How this was learned
971
+
972
+ Returns:
973
+ The created DomainKnowledge
974
+
975
+ Raises:
976
+ ScopeViolationError: If agent is not allowed to learn in this domain
977
+ """
978
+ return await asyncio.to_thread(
979
+ self.add_domain_knowledge,
980
+ agent=agent,
981
+ domain=domain,
982
+ fact=fact,
983
+ source=source,
984
+ )
985
+
986
+ async def async_forget(
987
+ self,
988
+ agent: Optional[str] = None,
989
+ older_than_days: int = 90,
990
+ below_confidence: float = 0.3,
991
+ ) -> int:
992
+ """
993
+ Async version of forget(). Prune stale or low-confidence memories.
994
+
995
+ This is a delete operation that invalidates cache after pruning
996
+ to ensure fresh retrieval results.
997
+
998
+ Args:
999
+ agent: Specific agent to prune, or None for all
1000
+ older_than_days: Remove outcomes older than this
1001
+ below_confidence: Remove heuristics below this confidence
1002
+
1003
+ Returns:
1004
+ Number of items pruned (0 if nothing was pruned)
1005
+
1006
+ Raises:
1007
+ StorageError: If the delete operation fails
1008
+ """
1009
+ return await asyncio.to_thread(
1010
+ self.forget,
1011
+ agent=agent,
1012
+ older_than_days=older_than_days,
1013
+ below_confidence=below_confidence,
1014
+ )
1015
+
1016
+ async def async_get_stats(self, agent: Optional[str] = None) -> Dict[str, Any]:
1017
+ """
1018
+ Async version of get_stats(). Get memory statistics.
1019
+
1020
+ Args:
1021
+ agent: Specific agent or None for all
1022
+
1023
+ Returns:
1024
+ Dict with counts and metadata (always returns a dict, may be empty)
1025
+
1026
+ Raises:
1027
+ StorageError: If the query operation fails
1028
+ """
1029
+ return await asyncio.to_thread(
1030
+ self.get_stats,
1031
+ agent=agent,
1032
+ )
1033
+
1034
+ # ==================== ASYNC WORKFLOW API ====================
1035
+
1036
+ async def async_checkpoint(
1037
+ self,
1038
+ run_id: str,
1039
+ node_id: str,
1040
+ state: Dict[str, Any],
1041
+ branch_id: Optional[str] = None,
1042
+ parent_checkpoint_id: Optional[str] = None,
1043
+ metadata: Optional[Dict[str, Any]] = None,
1044
+ skip_if_unchanged: bool = True,
1045
+ ) -> Optional[Checkpoint]:
1046
+ """Async version of checkpoint()."""
1047
+ return await asyncio.to_thread(
1048
+ self.checkpoint,
1049
+ run_id=run_id,
1050
+ node_id=node_id,
1051
+ state=state,
1052
+ branch_id=branch_id,
1053
+ parent_checkpoint_id=parent_checkpoint_id,
1054
+ metadata=metadata,
1055
+ skip_if_unchanged=skip_if_unchanged,
1056
+ )
1057
+
1058
+ async def async_get_resume_point(
1059
+ self,
1060
+ run_id: str,
1061
+ branch_id: Optional[str] = None,
1062
+ ) -> Optional[Checkpoint]:
1063
+ """Async version of get_resume_point()."""
1064
+ return await asyncio.to_thread(
1065
+ self.get_resume_point,
1066
+ run_id=run_id,
1067
+ branch_id=branch_id,
1068
+ )
1069
+
1070
+ async def async_learn_from_workflow(
1071
+ self,
1072
+ agent: str,
1073
+ workflow_id: str,
1074
+ run_id: str,
1075
+ result: str,
1076
+ summary: str,
1077
+ strategies_used: Optional[List[str]] = None,
1078
+ successful_patterns: Optional[List[str]] = None,
1079
+ failed_patterns: Optional[List[str]] = None,
1080
+ duration_seconds: Optional[float] = None,
1081
+ node_count: Optional[int] = None,
1082
+ error_message: Optional[str] = None,
1083
+ tenant_id: Optional[str] = None,
1084
+ metadata: Optional[Dict[str, Any]] = None,
1085
+ ) -> WorkflowOutcome:
1086
+ """Async version of learn_from_workflow()."""
1087
+ return await asyncio.to_thread(
1088
+ self.learn_from_workflow,
1089
+ agent=agent,
1090
+ workflow_id=workflow_id,
1091
+ run_id=run_id,
1092
+ result=result,
1093
+ summary=summary,
1094
+ strategies_used=strategies_used,
1095
+ successful_patterns=successful_patterns,
1096
+ failed_patterns=failed_patterns,
1097
+ duration_seconds=duration_seconds,
1098
+ node_count=node_count,
1099
+ error_message=error_message,
1100
+ tenant_id=tenant_id,
1101
+ metadata=metadata,
1102
+ )
1103
+
1104
+ async def async_link_artifact(
1105
+ self,
1106
+ memory_id: str,
1107
+ artifact_type: str,
1108
+ storage_url: str,
1109
+ filename: Optional[str] = None,
1110
+ mime_type: Optional[str] = None,
1111
+ size_bytes: Optional[int] = None,
1112
+ checksum: Optional[str] = None,
1113
+ metadata: Optional[Dict[str, Any]] = None,
1114
+ ) -> ArtifactRef:
1115
+ """Async version of link_artifact()."""
1116
+ return await asyncio.to_thread(
1117
+ self.link_artifact,
1118
+ memory_id=memory_id,
1119
+ artifact_type=artifact_type,
1120
+ storage_url=storage_url,
1121
+ filename=filename,
1122
+ mime_type=mime_type,
1123
+ size_bytes=size_bytes,
1124
+ checksum=checksum,
1125
+ metadata=metadata,
1126
+ )
1127
+
1128
+ async def async_retrieve_with_scope(
1129
+ self,
1130
+ task: str,
1131
+ agent: str,
1132
+ context: WorkflowContext,
1133
+ scope: RetrievalScope = RetrievalScope.AGENT,
1134
+ user_id: Optional[str] = None,
1135
+ top_k: int = 5,
1136
+ ) -> MemorySlice:
1137
+ """Async version of retrieve_with_scope()."""
1138
+ return await asyncio.to_thread(
1139
+ self.retrieve_with_scope,
1140
+ task=task,
1141
+ agent=agent,
1142
+ context=context,
1143
+ scope=scope,
1144
+ user_id=user_id,
1145
+ top_k=top_k,
1146
+ )
1147
+
1148
+ async def async_merge_states(
1149
+ self,
1150
+ states: List[Dict[str, Any]],
1151
+ reducer_config: Optional[Dict[str, str]] = None,
1152
+ ) -> Dict[str, Any]:
1153
+ """
1154
+ Async version of merge_states().
1155
+
1156
+ Merge multiple branch states after parallel execution.
1157
+ """
1158
+ return await asyncio.to_thread(
1159
+ self.merge_states,
1160
+ states=states,
1161
+ reducer_config=reducer_config,
1162
+ )
1163
+
1164
+ async def async_get_artifacts(self, memory_id: str) -> List[ArtifactRef]:
1165
+ """
1166
+ Async version of get_artifacts().
1167
+
1168
+ Get all artifacts linked to a memory.
1169
+ """
1170
+ return await asyncio.to_thread(
1171
+ self.get_artifacts,
1172
+ memory_id=memory_id,
1173
+ )
1174
+
1175
+ async def async_cleanup_checkpoints(
1176
+ self,
1177
+ run_id: str,
1178
+ keep_latest: int = 1,
1179
+ ) -> int:
1180
+ """
1181
+ Async version of cleanup_checkpoints().
1182
+
1183
+ Clean up old checkpoints for a completed run.
1184
+ """
1185
+ return await asyncio.to_thread(
1186
+ self.cleanup_checkpoints,
1187
+ run_id=run_id,
1188
+ keep_latest=keep_latest,
1189
+ )