alma-memory 0.5.0__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alma/__init__.py +296 -194
- alma/compression/__init__.py +33 -0
- alma/compression/pipeline.py +980 -0
- alma/confidence/__init__.py +47 -47
- alma/confidence/engine.py +540 -540
- alma/confidence/types.py +351 -351
- alma/config/loader.py +157 -157
- alma/consolidation/__init__.py +23 -23
- alma/consolidation/engine.py +678 -678
- alma/consolidation/prompts.py +84 -84
- alma/core.py +1189 -322
- alma/domains/__init__.py +30 -30
- alma/domains/factory.py +359 -359
- alma/domains/schemas.py +448 -448
- alma/domains/types.py +272 -272
- alma/events/__init__.py +75 -75
- alma/events/emitter.py +285 -284
- alma/events/storage_mixin.py +246 -246
- alma/events/types.py +126 -126
- alma/events/webhook.py +425 -425
- alma/exceptions.py +49 -49
- alma/extraction/__init__.py +31 -31
- alma/extraction/auto_learner.py +265 -264
- alma/extraction/extractor.py +420 -420
- alma/graph/__init__.py +106 -81
- alma/graph/backends/__init__.py +32 -18
- alma/graph/backends/kuzu.py +624 -0
- alma/graph/backends/memgraph.py +432 -0
- alma/graph/backends/memory.py +236 -236
- alma/graph/backends/neo4j.py +417 -417
- alma/graph/base.py +159 -159
- alma/graph/extraction.py +198 -198
- alma/graph/store.py +860 -860
- alma/harness/__init__.py +35 -35
- alma/harness/base.py +386 -386
- alma/harness/domains.py +705 -705
- alma/initializer/__init__.py +37 -37
- alma/initializer/initializer.py +418 -418
- alma/initializer/types.py +250 -250
- alma/integration/__init__.py +62 -62
- alma/integration/claude_agents.py +444 -432
- alma/integration/helena.py +423 -423
- alma/integration/victor.py +471 -471
- alma/learning/__init__.py +101 -86
- alma/learning/decay.py +878 -0
- alma/learning/forgetting.py +1446 -1446
- alma/learning/heuristic_extractor.py +390 -390
- alma/learning/protocols.py +374 -374
- alma/learning/validation.py +346 -346
- alma/mcp/__init__.py +123 -45
- alma/mcp/__main__.py +156 -156
- alma/mcp/resources.py +122 -122
- alma/mcp/server.py +955 -591
- alma/mcp/tools.py +3254 -511
- alma/observability/__init__.py +91 -0
- alma/observability/config.py +302 -0
- alma/observability/guidelines.py +170 -0
- alma/observability/logging.py +424 -0
- alma/observability/metrics.py +583 -0
- alma/observability/tracing.py +440 -0
- alma/progress/__init__.py +21 -21
- alma/progress/tracker.py +607 -607
- alma/progress/types.py +250 -250
- alma/retrieval/__init__.py +134 -53
- alma/retrieval/budget.py +525 -0
- alma/retrieval/cache.py +1304 -1061
- alma/retrieval/embeddings.py +202 -202
- alma/retrieval/engine.py +850 -366
- alma/retrieval/modes.py +365 -0
- alma/retrieval/progressive.py +560 -0
- alma/retrieval/scoring.py +344 -344
- alma/retrieval/trust_scoring.py +637 -0
- alma/retrieval/verification.py +797 -0
- alma/session/__init__.py +19 -19
- alma/session/manager.py +442 -399
- alma/session/types.py +288 -288
- alma/storage/__init__.py +101 -61
- alma/storage/archive.py +233 -0
- alma/storage/azure_cosmos.py +1259 -1048
- alma/storage/base.py +1083 -525
- alma/storage/chroma.py +1443 -1443
- alma/storage/constants.py +103 -0
- alma/storage/file_based.py +614 -619
- alma/storage/migrations/__init__.py +21 -0
- alma/storage/migrations/base.py +321 -0
- alma/storage/migrations/runner.py +323 -0
- alma/storage/migrations/version_stores.py +337 -0
- alma/storage/migrations/versions/__init__.py +11 -0
- alma/storage/migrations/versions/v1_0_0.py +373 -0
- alma/storage/migrations/versions/v1_1_0_workflow_context.py +551 -0
- alma/storage/pinecone.py +1080 -1080
- alma/storage/postgresql.py +1948 -1452
- alma/storage/qdrant.py +1306 -1306
- alma/storage/sqlite_local.py +3041 -1358
- alma/testing/__init__.py +46 -0
- alma/testing/factories.py +301 -0
- alma/testing/mocks.py +389 -0
- alma/types.py +292 -264
- alma/utils/__init__.py +19 -0
- alma/utils/tokenizer.py +521 -0
- alma/workflow/__init__.py +83 -0
- alma/workflow/artifacts.py +170 -0
- alma/workflow/checkpoint.py +311 -0
- alma/workflow/context.py +228 -0
- alma/workflow/outcomes.py +189 -0
- alma/workflow/reducers.py +393 -0
- {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/METADATA +244 -72
- alma_memory-0.7.0.dist-info/RECORD +112 -0
- alma_memory-0.5.0.dist-info/RECORD +0 -76
- {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/WHEEL +0 -0
- {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/top_level.txt +0 -0
alma/mcp/tools.py
CHANGED
|
@@ -1,511 +1,3254 @@
|
|
|
1
|
-
"""
|
|
2
|
-
ALMA MCP Tool Definitions.
|
|
3
|
-
|
|
4
|
-
Provides the tool functions that can be called via MCP protocol.
|
|
5
|
-
Each tool corresponds to an ALMA operation.
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
"
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
"
|
|
48
|
-
"
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
"
|
|
60
|
-
"
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
"
|
|
70
|
-
"
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
"
|
|
80
|
-
"
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
return {
|
|
131
|
-
"success":
|
|
132
|
-
"
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
"
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
)
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
)
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
return {
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
return {
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
return {
|
|
402
|
-
"success":
|
|
403
|
-
"
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
"""
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
return {
|
|
457
|
-
"success": False,
|
|
458
|
-
"error":
|
|
459
|
-
}
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
)
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
"
|
|
497
|
-
"
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
1
|
+
"""
|
|
2
|
+
ALMA MCP Tool Definitions.
|
|
3
|
+
|
|
4
|
+
Provides the tool functions that can be called via MCP protocol.
|
|
5
|
+
Each tool corresponds to an ALMA operation.
|
|
6
|
+
|
|
7
|
+
Both sync and async versions are provided:
|
|
8
|
+
- Sync tools: alma_retrieve, alma_learn, etc.
|
|
9
|
+
- Async tools: async_alma_retrieve, async_alma_learn, etc.
|
|
10
|
+
|
|
11
|
+
The async tools use ALMA's async API for better concurrency in
|
|
12
|
+
async MCP server implementations.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
from datetime import datetime, timezone
|
|
17
|
+
from typing import Any, Dict, Optional
|
|
18
|
+
|
|
19
|
+
from alma import ALMA
|
|
20
|
+
from alma.retrieval.modes import RetrievalMode
|
|
21
|
+
from alma.types import MemorySlice
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _serialize_memory_slice(memory_slice: MemorySlice) -> Dict[str, Any]:
|
|
27
|
+
"""Convert MemorySlice to JSON-serializable dict."""
|
|
28
|
+
result = {
|
|
29
|
+
"heuristics": [],
|
|
30
|
+
"outcomes": [],
|
|
31
|
+
"domain_knowledge": [],
|
|
32
|
+
"anti_patterns": [],
|
|
33
|
+
"preferences": [],
|
|
34
|
+
"query": memory_slice.query,
|
|
35
|
+
"agent": memory_slice.agent,
|
|
36
|
+
"retrieval_time_ms": memory_slice.retrieval_time_ms,
|
|
37
|
+
"total_items": memory_slice.total_items,
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
for h in memory_slice.heuristics:
|
|
41
|
+
result["heuristics"].append(
|
|
42
|
+
{
|
|
43
|
+
"id": h.id,
|
|
44
|
+
"condition": h.condition,
|
|
45
|
+
"strategy": h.strategy,
|
|
46
|
+
"confidence": h.confidence,
|
|
47
|
+
"occurrence_count": h.occurrence_count,
|
|
48
|
+
"success_rate": h.success_rate,
|
|
49
|
+
}
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
for o in memory_slice.outcomes:
|
|
53
|
+
result["outcomes"].append(
|
|
54
|
+
{
|
|
55
|
+
"id": o.id,
|
|
56
|
+
"task_type": o.task_type,
|
|
57
|
+
"task_description": o.task_description,
|
|
58
|
+
"success": o.success,
|
|
59
|
+
"strategy_used": o.strategy_used,
|
|
60
|
+
"duration_ms": o.duration_ms,
|
|
61
|
+
}
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
for dk in memory_slice.domain_knowledge:
|
|
65
|
+
result["domain_knowledge"].append(
|
|
66
|
+
{
|
|
67
|
+
"id": dk.id,
|
|
68
|
+
"domain": dk.domain,
|
|
69
|
+
"fact": dk.fact,
|
|
70
|
+
"confidence": dk.confidence,
|
|
71
|
+
}
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
for ap in memory_slice.anti_patterns:
|
|
75
|
+
result["anti_patterns"].append(
|
|
76
|
+
{
|
|
77
|
+
"id": ap.id,
|
|
78
|
+
"pattern": ap.pattern,
|
|
79
|
+
"why_bad": ap.why_bad,
|
|
80
|
+
"better_alternative": ap.better_alternative,
|
|
81
|
+
}
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
for p in memory_slice.preferences:
|
|
85
|
+
result["preferences"].append(
|
|
86
|
+
{
|
|
87
|
+
"id": p.id,
|
|
88
|
+
"category": p.category,
|
|
89
|
+
"preference": p.preference,
|
|
90
|
+
}
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
return result
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def alma_retrieve(
|
|
97
|
+
alma: ALMA,
|
|
98
|
+
task: str,
|
|
99
|
+
agent: str,
|
|
100
|
+
user_id: Optional[str] = None,
|
|
101
|
+
top_k: int = 5,
|
|
102
|
+
) -> Dict[str, Any]:
|
|
103
|
+
"""
|
|
104
|
+
Retrieve relevant memories for a task.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
alma: ALMA instance
|
|
108
|
+
task: Description of the task to perform
|
|
109
|
+
agent: Name of the agent requesting memories
|
|
110
|
+
user_id: Optional user ID for preference retrieval
|
|
111
|
+
top_k: Maximum items per memory type
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Dict containing the memory slice with relevant memories
|
|
115
|
+
"""
|
|
116
|
+
# Input validation
|
|
117
|
+
if not task or not task.strip():
|
|
118
|
+
return {"success": False, "error": "task cannot be empty"}
|
|
119
|
+
if not agent or not agent.strip():
|
|
120
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
121
|
+
|
|
122
|
+
try:
|
|
123
|
+
memories = alma.retrieve(
|
|
124
|
+
task=task,
|
|
125
|
+
agent=agent,
|
|
126
|
+
user_id=user_id,
|
|
127
|
+
top_k=top_k,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
return {
|
|
131
|
+
"success": True,
|
|
132
|
+
"memories": _serialize_memory_slice(memories),
|
|
133
|
+
"prompt_injection": memories.to_prompt(),
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
except Exception as e:
|
|
137
|
+
logger.exception(f"Error in alma_retrieve: {e}")
|
|
138
|
+
return {
|
|
139
|
+
"success": False,
|
|
140
|
+
"error": str(e),
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def alma_retrieve_for_mode(
|
|
145
|
+
alma: ALMA,
|
|
146
|
+
query: str,
|
|
147
|
+
mode: str,
|
|
148
|
+
agent: str,
|
|
149
|
+
user_id: Optional[str] = None,
|
|
150
|
+
top_k: Optional[int] = None,
|
|
151
|
+
) -> Dict[str, Any]:
|
|
152
|
+
"""
|
|
153
|
+
Retrieve memories using a specific retrieval mode.
|
|
154
|
+
|
|
155
|
+
Mode-aware retrieval adapts strategy based on task type:
|
|
156
|
+
- BROAD: For planning, brainstorming - diverse, exploratory results
|
|
157
|
+
- PRECISE: For execution, implementation - high-confidence matches
|
|
158
|
+
- DIAGNOSTIC: For debugging, troubleshooting - prioritizes failures
|
|
159
|
+
- LEARNING: For pattern finding - similar memories for consolidation
|
|
160
|
+
- RECALL: For exact lookup - prioritizes exact matches
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
alma: ALMA instance
|
|
164
|
+
query: Description of the task to perform
|
|
165
|
+
mode: Retrieval mode (broad, precise, diagnostic, learning, recall)
|
|
166
|
+
agent: Name of the agent requesting memories
|
|
167
|
+
user_id: Optional user ID for preference retrieval
|
|
168
|
+
top_k: Override mode's default top_k
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Dict containing memories, mode used, and reason for mode selection
|
|
172
|
+
"""
|
|
173
|
+
# Input validation
|
|
174
|
+
if not query or not query.strip():
|
|
175
|
+
return {"success": False, "error": "query cannot be empty"}
|
|
176
|
+
if not agent or not agent.strip():
|
|
177
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
178
|
+
if not mode or not mode.strip():
|
|
179
|
+
return {"success": False, "error": "mode cannot be empty"}
|
|
180
|
+
|
|
181
|
+
# Validate mode
|
|
182
|
+
valid_modes = ["broad", "precise", "diagnostic", "learning", "recall"]
|
|
183
|
+
mode_lower = mode.lower()
|
|
184
|
+
if mode_lower not in valid_modes:
|
|
185
|
+
return {
|
|
186
|
+
"success": False,
|
|
187
|
+
"error": f"mode must be one of: {', '.join(valid_modes)}",
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
try:
|
|
191
|
+
# Convert string to RetrievalMode enum
|
|
192
|
+
retrieval_mode = RetrievalMode(mode_lower)
|
|
193
|
+
|
|
194
|
+
# Call retrieve_with_mode on the retrieval engine
|
|
195
|
+
memories, used_mode, mode_reason = alma.retrieval.retrieve_with_mode(
|
|
196
|
+
query=query,
|
|
197
|
+
agent=agent,
|
|
198
|
+
project_id=alma.project_id,
|
|
199
|
+
mode=retrieval_mode,
|
|
200
|
+
user_id=user_id,
|
|
201
|
+
top_k=top_k,
|
|
202
|
+
scope=alma.scopes.get(agent),
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
return {
|
|
206
|
+
"success": True,
|
|
207
|
+
"memories": _serialize_memory_slice(memories),
|
|
208
|
+
"prompt_injection": memories.to_prompt(),
|
|
209
|
+
"mode": used_mode.value,
|
|
210
|
+
"mode_reason": mode_reason,
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
except Exception as e:
|
|
214
|
+
logger.exception(f"Error in alma_retrieve_for_mode: {e}")
|
|
215
|
+
return {
|
|
216
|
+
"success": False,
|
|
217
|
+
"error": str(e),
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def alma_retrieve_smart(
|
|
222
|
+
alma: ALMA,
|
|
223
|
+
query: str,
|
|
224
|
+
agent: str,
|
|
225
|
+
user_id: Optional[str] = None,
|
|
226
|
+
top_k: Optional[int] = None,
|
|
227
|
+
) -> Dict[str, Any]:
|
|
228
|
+
"""
|
|
229
|
+
Retrieve memories with auto-inferred retrieval mode.
|
|
230
|
+
|
|
231
|
+
Automatically detects the best retrieval mode from the query text:
|
|
232
|
+
- Queries with "error", "bug", "fix", "debug" → DIAGNOSTIC mode
|
|
233
|
+
- Queries with "how should", "options", "plan" → BROAD mode
|
|
234
|
+
- Queries with "what was", "remember when" → RECALL mode
|
|
235
|
+
- Queries with "pattern", "similar" → LEARNING mode
|
|
236
|
+
- Default → PRECISE mode for implementation tasks
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
alma: ALMA instance
|
|
240
|
+
query: Description of the task to perform
|
|
241
|
+
agent: Name of the agent requesting memories
|
|
242
|
+
user_id: Optional user ID for preference retrieval
|
|
243
|
+
top_k: Override mode's default top_k
|
|
244
|
+
|
|
245
|
+
Returns:
|
|
246
|
+
Dict containing memories, inferred mode, and reason for mode selection
|
|
247
|
+
"""
|
|
248
|
+
# Input validation
|
|
249
|
+
if not query or not query.strip():
|
|
250
|
+
return {"success": False, "error": "query cannot be empty"}
|
|
251
|
+
if not agent or not agent.strip():
|
|
252
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
253
|
+
|
|
254
|
+
try:
|
|
255
|
+
# Call retrieve_with_mode with mode=None to auto-infer
|
|
256
|
+
memories, inferred_mode, mode_reason = alma.retrieval.retrieve_with_mode(
|
|
257
|
+
query=query,
|
|
258
|
+
agent=agent,
|
|
259
|
+
project_id=alma.project_id,
|
|
260
|
+
mode=None, # Auto-infer
|
|
261
|
+
user_id=user_id,
|
|
262
|
+
top_k=top_k,
|
|
263
|
+
scope=alma.scopes.get(agent),
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
return {
|
|
267
|
+
"success": True,
|
|
268
|
+
"memories": _serialize_memory_slice(memories),
|
|
269
|
+
"prompt_injection": memories.to_prompt(),
|
|
270
|
+
"mode": inferred_mode.value,
|
|
271
|
+
"mode_reason": mode_reason,
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
except Exception as e:
|
|
275
|
+
logger.exception(f"Error in alma_retrieve_smart: {e}")
|
|
276
|
+
return {
|
|
277
|
+
"success": False,
|
|
278
|
+
"error": str(e),
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
def alma_learn(
|
|
283
|
+
alma: ALMA,
|
|
284
|
+
agent: str,
|
|
285
|
+
task: str,
|
|
286
|
+
outcome: str,
|
|
287
|
+
strategy_used: str,
|
|
288
|
+
task_type: Optional[str] = None,
|
|
289
|
+
duration_ms: Optional[int] = None,
|
|
290
|
+
error_message: Optional[str] = None,
|
|
291
|
+
feedback: Optional[str] = None,
|
|
292
|
+
) -> Dict[str, Any]:
|
|
293
|
+
"""
|
|
294
|
+
Record a task outcome for learning.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
alma: ALMA instance
|
|
298
|
+
agent: Name of the agent that executed the task
|
|
299
|
+
task: Description of the task
|
|
300
|
+
outcome: "success" or "failure"
|
|
301
|
+
strategy_used: What approach was taken
|
|
302
|
+
task_type: Category of task (for grouping)
|
|
303
|
+
duration_ms: How long the task took
|
|
304
|
+
error_message: Error details if failed
|
|
305
|
+
feedback: User feedback if provided
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
Dict with learning result
|
|
309
|
+
"""
|
|
310
|
+
# Input validation
|
|
311
|
+
if not agent or not agent.strip():
|
|
312
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
313
|
+
if not task or not task.strip():
|
|
314
|
+
return {"success": False, "error": "task cannot be empty"}
|
|
315
|
+
if not outcome or not outcome.strip():
|
|
316
|
+
return {"success": False, "error": "outcome cannot be empty"}
|
|
317
|
+
if not strategy_used or not strategy_used.strip():
|
|
318
|
+
return {"success": False, "error": "strategy_used cannot be empty"}
|
|
319
|
+
|
|
320
|
+
try:
|
|
321
|
+
outcome_record = alma.learn(
|
|
322
|
+
agent=agent,
|
|
323
|
+
task=task,
|
|
324
|
+
outcome=outcome,
|
|
325
|
+
strategy_used=strategy_used,
|
|
326
|
+
task_type=task_type,
|
|
327
|
+
duration_ms=duration_ms,
|
|
328
|
+
error_message=error_message,
|
|
329
|
+
feedback=feedback,
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
return {
|
|
333
|
+
"success": True,
|
|
334
|
+
"learned": True,
|
|
335
|
+
"outcome": {
|
|
336
|
+
"id": outcome_record.id,
|
|
337
|
+
"agent": outcome_record.agent,
|
|
338
|
+
"task_type": outcome_record.task_type,
|
|
339
|
+
"success": outcome_record.success,
|
|
340
|
+
},
|
|
341
|
+
"message": "Outcome recorded successfully",
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
except Exception as e:
|
|
345
|
+
logger.exception(f"Error in alma_learn: {e}")
|
|
346
|
+
return {
|
|
347
|
+
"success": False,
|
|
348
|
+
"error": str(e),
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
def alma_add_preference(
|
|
353
|
+
alma: ALMA,
|
|
354
|
+
user_id: str,
|
|
355
|
+
category: str,
|
|
356
|
+
preference: str,
|
|
357
|
+
source: str = "explicit_instruction",
|
|
358
|
+
) -> Dict[str, Any]:
|
|
359
|
+
"""
|
|
360
|
+
Add a user preference to memory.
|
|
361
|
+
|
|
362
|
+
Args:
|
|
363
|
+
alma: ALMA instance
|
|
364
|
+
user_id: User identifier
|
|
365
|
+
category: Category (communication, code_style, workflow)
|
|
366
|
+
preference: The preference text
|
|
367
|
+
source: How this was learned
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
Dict with the created preference
|
|
371
|
+
"""
|
|
372
|
+
# Input validation
|
|
373
|
+
if not user_id or not user_id.strip():
|
|
374
|
+
return {"success": False, "error": "user_id cannot be empty"}
|
|
375
|
+
if not category or not category.strip():
|
|
376
|
+
return {"success": False, "error": "category cannot be empty"}
|
|
377
|
+
if not preference or not preference.strip():
|
|
378
|
+
return {"success": False, "error": "preference cannot be empty"}
|
|
379
|
+
|
|
380
|
+
try:
|
|
381
|
+
pref = alma.add_user_preference(
|
|
382
|
+
user_id=user_id,
|
|
383
|
+
category=category,
|
|
384
|
+
preference=preference,
|
|
385
|
+
source=source,
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
return {
|
|
389
|
+
"success": True,
|
|
390
|
+
"preference": {
|
|
391
|
+
"id": pref.id,
|
|
392
|
+
"user_id": pref.user_id,
|
|
393
|
+
"category": pref.category,
|
|
394
|
+
"preference": pref.preference,
|
|
395
|
+
"source": pref.source,
|
|
396
|
+
},
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
except Exception as e:
|
|
400
|
+
logger.exception(f"Error in alma_add_preference: {e}")
|
|
401
|
+
return {
|
|
402
|
+
"success": False,
|
|
403
|
+
"error": str(e),
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def alma_add_knowledge(
|
|
408
|
+
alma: ALMA,
|
|
409
|
+
agent: str,
|
|
410
|
+
domain: str,
|
|
411
|
+
fact: str,
|
|
412
|
+
source: str = "user_stated",
|
|
413
|
+
) -> Dict[str, Any]:
|
|
414
|
+
"""
|
|
415
|
+
Add domain knowledge within agent's scope.
|
|
416
|
+
|
|
417
|
+
Args:
|
|
418
|
+
alma: ALMA instance
|
|
419
|
+
agent: Agent this knowledge belongs to
|
|
420
|
+
domain: Knowledge domain
|
|
421
|
+
fact: The fact to remember
|
|
422
|
+
source: How this was learned
|
|
423
|
+
|
|
424
|
+
Returns:
|
|
425
|
+
Dict with the created knowledge or rejection reason
|
|
426
|
+
"""
|
|
427
|
+
# Input validation
|
|
428
|
+
if not agent or not agent.strip():
|
|
429
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
430
|
+
if not domain or not domain.strip():
|
|
431
|
+
return {"success": False, "error": "domain cannot be empty"}
|
|
432
|
+
if not fact or not fact.strip():
|
|
433
|
+
return {"success": False, "error": "fact cannot be empty"}
|
|
434
|
+
|
|
435
|
+
try:
|
|
436
|
+
knowledge = alma.add_domain_knowledge(
|
|
437
|
+
agent=agent,
|
|
438
|
+
domain=domain,
|
|
439
|
+
fact=fact,
|
|
440
|
+
source=source,
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
return {
|
|
444
|
+
"success": True,
|
|
445
|
+
"knowledge": {
|
|
446
|
+
"id": knowledge.id,
|
|
447
|
+
"agent": knowledge.agent,
|
|
448
|
+
"domain": knowledge.domain,
|
|
449
|
+
"fact": knowledge.fact,
|
|
450
|
+
"source": knowledge.source,
|
|
451
|
+
},
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
except Exception as e:
|
|
455
|
+
logger.exception(f"Error in alma_add_knowledge: {e}")
|
|
456
|
+
return {
|
|
457
|
+
"success": False,
|
|
458
|
+
"error": str(e),
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
|
|
462
|
+
def alma_forget(
|
|
463
|
+
alma: ALMA,
|
|
464
|
+
agent: Optional[str] = None,
|
|
465
|
+
older_than_days: int = 90,
|
|
466
|
+
below_confidence: float = 0.3,
|
|
467
|
+
) -> Dict[str, Any]:
|
|
468
|
+
"""
|
|
469
|
+
Prune stale or low-confidence memories.
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
alma: ALMA instance
|
|
473
|
+
agent: Specific agent to prune, or None for all
|
|
474
|
+
older_than_days: Remove outcomes older than this
|
|
475
|
+
below_confidence: Remove heuristics below this confidence
|
|
476
|
+
|
|
477
|
+
Returns:
|
|
478
|
+
Dict with number of items pruned
|
|
479
|
+
"""
|
|
480
|
+
try:
|
|
481
|
+
count = alma.forget(
|
|
482
|
+
agent=agent,
|
|
483
|
+
older_than_days=older_than_days,
|
|
484
|
+
below_confidence=below_confidence,
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
return {
|
|
488
|
+
"success": True,
|
|
489
|
+
"pruned_count": count,
|
|
490
|
+
"message": f"Pruned {count} stale or low-confidence memories",
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
except Exception as e:
|
|
494
|
+
logger.exception(f"Error in alma_forget: {e}")
|
|
495
|
+
return {
|
|
496
|
+
"success": False,
|
|
497
|
+
"error": str(e),
|
|
498
|
+
}
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
def alma_stats(
|
|
502
|
+
alma: ALMA,
|
|
503
|
+
agent: Optional[str] = None,
|
|
504
|
+
) -> Dict[str, Any]:
|
|
505
|
+
"""
|
|
506
|
+
Get memory statistics.
|
|
507
|
+
|
|
508
|
+
Args:
|
|
509
|
+
alma: ALMA instance
|
|
510
|
+
agent: Specific agent or None for all
|
|
511
|
+
|
|
512
|
+
Returns:
|
|
513
|
+
Dict with memory statistics
|
|
514
|
+
"""
|
|
515
|
+
try:
|
|
516
|
+
stats = alma.get_stats(agent=agent)
|
|
517
|
+
|
|
518
|
+
return {
|
|
519
|
+
"success": True,
|
|
520
|
+
"stats": stats,
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
except Exception as e:
|
|
524
|
+
logger.exception(f"Error in alma_stats: {e}")
|
|
525
|
+
return {
|
|
526
|
+
"success": False,
|
|
527
|
+
"error": str(e),
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
def alma_health(alma: ALMA) -> Dict[str, Any]:
|
|
532
|
+
"""
|
|
533
|
+
Health check for ALMA server.
|
|
534
|
+
|
|
535
|
+
Args:
|
|
536
|
+
alma: ALMA instance
|
|
537
|
+
|
|
538
|
+
Returns:
|
|
539
|
+
Dict with health status
|
|
540
|
+
"""
|
|
541
|
+
try:
|
|
542
|
+
# Basic health checks
|
|
543
|
+
stats = alma.get_stats()
|
|
544
|
+
|
|
545
|
+
return {
|
|
546
|
+
"success": True,
|
|
547
|
+
"status": "healthy",
|
|
548
|
+
"project_id": alma.project_id,
|
|
549
|
+
"total_memories": stats.get("total_count", 0),
|
|
550
|
+
"registered_agents": list(alma.scopes.keys()),
|
|
551
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
except Exception as e:
|
|
555
|
+
logger.exception(f"Error in alma_health: {e}")
|
|
556
|
+
return {
|
|
557
|
+
"success": False,
|
|
558
|
+
"status": "unhealthy",
|
|
559
|
+
"error": str(e),
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
|
|
563
|
+
async def alma_consolidate(
|
|
564
|
+
alma: ALMA,
|
|
565
|
+
agent: str,
|
|
566
|
+
memory_type: str = "heuristics",
|
|
567
|
+
similarity_threshold: float = 0.85,
|
|
568
|
+
dry_run: bool = True,
|
|
569
|
+
) -> Dict[str, Any]:
|
|
570
|
+
"""
|
|
571
|
+
Consolidate similar memories to reduce redundancy.
|
|
572
|
+
|
|
573
|
+
This is ALMA's implementation of Mem0's core innovation - LLM-powered
|
|
574
|
+
deduplication that merges similar memories intelligently.
|
|
575
|
+
|
|
576
|
+
Args:
|
|
577
|
+
alma: ALMA instance
|
|
578
|
+
agent: Agent whose memories to consolidate
|
|
579
|
+
memory_type: Type of memory to consolidate
|
|
580
|
+
("heuristics", "outcomes", "domain_knowledge", "anti_patterns")
|
|
581
|
+
similarity_threshold: Minimum cosine similarity to group (0.0 to 1.0)
|
|
582
|
+
Higher values are more conservative (fewer merges)
|
|
583
|
+
dry_run: If True, report what would be merged without actually modifying storage
|
|
584
|
+
Recommended for first run to preview changes
|
|
585
|
+
|
|
586
|
+
Returns:
|
|
587
|
+
Dict with consolidation results including:
|
|
588
|
+
- merged_count: Number of memories merged
|
|
589
|
+
- groups_found: Number of similar memory groups identified
|
|
590
|
+
- memories_processed: Total memories analyzed
|
|
591
|
+
- merge_details: List of merge operations (or planned operations if dry_run)
|
|
592
|
+
- errors: Any errors encountered
|
|
593
|
+
"""
|
|
594
|
+
# Input validation
|
|
595
|
+
if not agent or not agent.strip():
|
|
596
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
597
|
+
|
|
598
|
+
valid_types = ["heuristics", "outcomes", "domain_knowledge", "anti_patterns"]
|
|
599
|
+
if memory_type not in valid_types:
|
|
600
|
+
return {
|
|
601
|
+
"success": False,
|
|
602
|
+
"error": f"memory_type must be one of: {', '.join(valid_types)}",
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
if not 0.0 <= similarity_threshold <= 1.0:
|
|
606
|
+
return {
|
|
607
|
+
"success": False,
|
|
608
|
+
"error": "similarity_threshold must be between 0.0 and 1.0",
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
try:
|
|
612
|
+
from alma.consolidation import ConsolidationEngine
|
|
613
|
+
|
|
614
|
+
# Create consolidation engine
|
|
615
|
+
engine = ConsolidationEngine(
|
|
616
|
+
storage=alma.storage,
|
|
617
|
+
embedder=None, # Will use default LocalEmbedder
|
|
618
|
+
llm_client=None, # LLM merging disabled by default
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
# Run consolidation
|
|
622
|
+
result = await engine.consolidate(
|
|
623
|
+
agent=agent,
|
|
624
|
+
project_id=alma.project_id,
|
|
625
|
+
memory_type=memory_type,
|
|
626
|
+
similarity_threshold=similarity_threshold,
|
|
627
|
+
use_llm=False, # LLM disabled - uses highest confidence merge
|
|
628
|
+
dry_run=dry_run,
|
|
629
|
+
)
|
|
630
|
+
|
|
631
|
+
# Invalidate cache after consolidation (if not dry run)
|
|
632
|
+
if not dry_run and result.merged_count > 0:
|
|
633
|
+
alma.retrieval.invalidate_cache(agent=agent, project_id=alma.project_id)
|
|
634
|
+
|
|
635
|
+
return {
|
|
636
|
+
"success": result.success,
|
|
637
|
+
"dry_run": dry_run,
|
|
638
|
+
"merged_count": result.merged_count,
|
|
639
|
+
"groups_found": result.groups_found,
|
|
640
|
+
"memories_processed": result.memories_processed,
|
|
641
|
+
"merge_details": result.merge_details,
|
|
642
|
+
"errors": result.errors,
|
|
643
|
+
"message": (
|
|
644
|
+
f"{'Would merge' if dry_run else 'Merged'} {result.merged_count} memories "
|
|
645
|
+
f"from {result.groups_found} similar groups "
|
|
646
|
+
f"(processed {result.memories_processed} total)"
|
|
647
|
+
),
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
except Exception as e:
|
|
651
|
+
logger.exception(f"Error in alma_consolidate: {e}")
|
|
652
|
+
return {
|
|
653
|
+
"success": False,
|
|
654
|
+
"error": str(e),
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
|
|
658
|
+
# =============================================================================
|
|
659
|
+
# ASYNC MCP TOOLS
|
|
660
|
+
# =============================================================================
|
|
661
|
+
#
|
|
662
|
+
# Async versions of MCP tools for use in async MCP server implementations.
|
|
663
|
+
# These use ALMA's async_* methods to avoid blocking the event loop.
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
async def async_alma_retrieve(
|
|
667
|
+
alma: ALMA,
|
|
668
|
+
task: str,
|
|
669
|
+
agent: str,
|
|
670
|
+
user_id: Optional[str] = None,
|
|
671
|
+
top_k: int = 5,
|
|
672
|
+
) -> Dict[str, Any]:
|
|
673
|
+
"""
|
|
674
|
+
Async version of alma_retrieve. Retrieve relevant memories for a task.
|
|
675
|
+
|
|
676
|
+
Args:
|
|
677
|
+
alma: ALMA instance
|
|
678
|
+
task: Description of the task to perform
|
|
679
|
+
agent: Name of the agent requesting memories
|
|
680
|
+
user_id: Optional user ID for preference retrieval
|
|
681
|
+
top_k: Maximum items per memory type
|
|
682
|
+
|
|
683
|
+
Returns:
|
|
684
|
+
Dict containing the memory slice with relevant memories
|
|
685
|
+
"""
|
|
686
|
+
# Input validation
|
|
687
|
+
if not task or not task.strip():
|
|
688
|
+
return {"success": False, "error": "task cannot be empty"}
|
|
689
|
+
if not agent or not agent.strip():
|
|
690
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
691
|
+
|
|
692
|
+
try:
|
|
693
|
+
memories = await alma.async_retrieve(
|
|
694
|
+
task=task,
|
|
695
|
+
agent=agent,
|
|
696
|
+
user_id=user_id,
|
|
697
|
+
top_k=top_k,
|
|
698
|
+
)
|
|
699
|
+
|
|
700
|
+
return {
|
|
701
|
+
"success": True,
|
|
702
|
+
"memories": _serialize_memory_slice(memories),
|
|
703
|
+
"prompt_injection": memories.to_prompt(),
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
except Exception as e:
|
|
707
|
+
logger.exception(f"Error in async_alma_retrieve: {e}")
|
|
708
|
+
return {
|
|
709
|
+
"success": False,
|
|
710
|
+
"error": str(e),
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
|
|
714
|
+
async def async_alma_retrieve_for_mode(
|
|
715
|
+
alma: ALMA,
|
|
716
|
+
query: str,
|
|
717
|
+
mode: str,
|
|
718
|
+
agent: str,
|
|
719
|
+
user_id: Optional[str] = None,
|
|
720
|
+
top_k: Optional[int] = None,
|
|
721
|
+
) -> Dict[str, Any]:
|
|
722
|
+
"""
|
|
723
|
+
Async version of alma_retrieve_for_mode.
|
|
724
|
+
|
|
725
|
+
Retrieve memories using a specific retrieval mode asynchronously.
|
|
726
|
+
|
|
727
|
+
Args:
|
|
728
|
+
alma: ALMA instance
|
|
729
|
+
query: Description of the task to perform
|
|
730
|
+
mode: Retrieval mode (broad, precise, diagnostic, learning, recall)
|
|
731
|
+
agent: Name of the agent requesting memories
|
|
732
|
+
user_id: Optional user ID for preference retrieval
|
|
733
|
+
top_k: Override mode's default top_k
|
|
734
|
+
|
|
735
|
+
Returns:
|
|
736
|
+
Dict containing memories, mode used, and reason for mode selection
|
|
737
|
+
"""
|
|
738
|
+
# Input validation
|
|
739
|
+
if not query or not query.strip():
|
|
740
|
+
return {"success": False, "error": "query cannot be empty"}
|
|
741
|
+
if not agent or not agent.strip():
|
|
742
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
743
|
+
if not mode or not mode.strip():
|
|
744
|
+
return {"success": False, "error": "mode cannot be empty"}
|
|
745
|
+
|
|
746
|
+
# Validate mode
|
|
747
|
+
valid_modes = ["broad", "precise", "diagnostic", "learning", "recall"]
|
|
748
|
+
mode_lower = mode.lower()
|
|
749
|
+
if mode_lower not in valid_modes:
|
|
750
|
+
return {
|
|
751
|
+
"success": False,
|
|
752
|
+
"error": f"mode must be one of: {', '.join(valid_modes)}",
|
|
753
|
+
}
|
|
754
|
+
|
|
755
|
+
try:
|
|
756
|
+
# Convert string to RetrievalMode enum
|
|
757
|
+
retrieval_mode = RetrievalMode(mode_lower)
|
|
758
|
+
|
|
759
|
+
# Call retrieve_with_mode on the retrieval engine
|
|
760
|
+
# Note: retrieve_with_mode is synchronous, run in executor for async
|
|
761
|
+
import asyncio
|
|
762
|
+
|
|
763
|
+
loop = asyncio.get_event_loop()
|
|
764
|
+
memories, used_mode, mode_reason = await loop.run_in_executor(
|
|
765
|
+
None,
|
|
766
|
+
lambda: alma.retrieval.retrieve_with_mode(
|
|
767
|
+
query=query,
|
|
768
|
+
agent=agent,
|
|
769
|
+
project_id=alma.project_id,
|
|
770
|
+
mode=retrieval_mode,
|
|
771
|
+
user_id=user_id,
|
|
772
|
+
top_k=top_k,
|
|
773
|
+
scope=alma.scopes.get(agent),
|
|
774
|
+
),
|
|
775
|
+
)
|
|
776
|
+
|
|
777
|
+
return {
|
|
778
|
+
"success": True,
|
|
779
|
+
"memories": _serialize_memory_slice(memories),
|
|
780
|
+
"prompt_injection": memories.to_prompt(),
|
|
781
|
+
"mode": used_mode.value,
|
|
782
|
+
"mode_reason": mode_reason,
|
|
783
|
+
}
|
|
784
|
+
|
|
785
|
+
except Exception as e:
|
|
786
|
+
logger.exception(f"Error in async_alma_retrieve_for_mode: {e}")
|
|
787
|
+
return {
|
|
788
|
+
"success": False,
|
|
789
|
+
"error": str(e),
|
|
790
|
+
}
|
|
791
|
+
|
|
792
|
+
|
|
793
|
+
async def async_alma_retrieve_smart(
|
|
794
|
+
alma: ALMA,
|
|
795
|
+
query: str,
|
|
796
|
+
agent: str,
|
|
797
|
+
user_id: Optional[str] = None,
|
|
798
|
+
top_k: Optional[int] = None,
|
|
799
|
+
) -> Dict[str, Any]:
|
|
800
|
+
"""
|
|
801
|
+
Async version of alma_retrieve_smart.
|
|
802
|
+
|
|
803
|
+
Retrieve memories with auto-inferred retrieval mode asynchronously.
|
|
804
|
+
|
|
805
|
+
Args:
|
|
806
|
+
alma: ALMA instance
|
|
807
|
+
query: Description of the task to perform
|
|
808
|
+
agent: Name of the agent requesting memories
|
|
809
|
+
user_id: Optional user ID for preference retrieval
|
|
810
|
+
top_k: Override mode's default top_k
|
|
811
|
+
|
|
812
|
+
Returns:
|
|
813
|
+
Dict containing memories, inferred mode, and reason for mode selection
|
|
814
|
+
"""
|
|
815
|
+
# Input validation
|
|
816
|
+
if not query or not query.strip():
|
|
817
|
+
return {"success": False, "error": "query cannot be empty"}
|
|
818
|
+
if not agent or not agent.strip():
|
|
819
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
820
|
+
|
|
821
|
+
try:
|
|
822
|
+
# Call retrieve_with_mode with mode=None to auto-infer
|
|
823
|
+
# Note: retrieve_with_mode is synchronous, run in executor for async
|
|
824
|
+
import asyncio
|
|
825
|
+
|
|
826
|
+
loop = asyncio.get_event_loop()
|
|
827
|
+
memories, inferred_mode, mode_reason = await loop.run_in_executor(
|
|
828
|
+
None,
|
|
829
|
+
lambda: alma.retrieval.retrieve_with_mode(
|
|
830
|
+
query=query,
|
|
831
|
+
agent=agent,
|
|
832
|
+
project_id=alma.project_id,
|
|
833
|
+
mode=None, # Auto-infer
|
|
834
|
+
user_id=user_id,
|
|
835
|
+
top_k=top_k,
|
|
836
|
+
scope=alma.scopes.get(agent),
|
|
837
|
+
),
|
|
838
|
+
)
|
|
839
|
+
|
|
840
|
+
return {
|
|
841
|
+
"success": True,
|
|
842
|
+
"memories": _serialize_memory_slice(memories),
|
|
843
|
+
"prompt_injection": memories.to_prompt(),
|
|
844
|
+
"mode": inferred_mode.value,
|
|
845
|
+
"mode_reason": mode_reason,
|
|
846
|
+
}
|
|
847
|
+
|
|
848
|
+
except Exception as e:
|
|
849
|
+
logger.exception(f"Error in async_alma_retrieve_smart: {e}")
|
|
850
|
+
return {
|
|
851
|
+
"success": False,
|
|
852
|
+
"error": str(e),
|
|
853
|
+
}
|
|
854
|
+
|
|
855
|
+
|
|
856
|
+
async def async_alma_learn(
|
|
857
|
+
alma: ALMA,
|
|
858
|
+
agent: str,
|
|
859
|
+
task: str,
|
|
860
|
+
outcome: str,
|
|
861
|
+
strategy_used: str,
|
|
862
|
+
task_type: Optional[str] = None,
|
|
863
|
+
duration_ms: Optional[int] = None,
|
|
864
|
+
error_message: Optional[str] = None,
|
|
865
|
+
feedback: Optional[str] = None,
|
|
866
|
+
) -> Dict[str, Any]:
|
|
867
|
+
"""
|
|
868
|
+
Async version of alma_learn. Record a task outcome for learning.
|
|
869
|
+
|
|
870
|
+
Args:
|
|
871
|
+
alma: ALMA instance
|
|
872
|
+
agent: Name of the agent that executed the task
|
|
873
|
+
task: Description of the task
|
|
874
|
+
outcome: "success" or "failure"
|
|
875
|
+
strategy_used: What approach was taken
|
|
876
|
+
task_type: Category of task (for grouping)
|
|
877
|
+
duration_ms: How long the task took
|
|
878
|
+
error_message: Error details if failed
|
|
879
|
+
feedback: User feedback if provided
|
|
880
|
+
|
|
881
|
+
Returns:
|
|
882
|
+
Dict with learning result
|
|
883
|
+
"""
|
|
884
|
+
# Input validation
|
|
885
|
+
if not agent or not agent.strip():
|
|
886
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
887
|
+
if not task or not task.strip():
|
|
888
|
+
return {"success": False, "error": "task cannot be empty"}
|
|
889
|
+
if not outcome or not outcome.strip():
|
|
890
|
+
return {"success": False, "error": "outcome cannot be empty"}
|
|
891
|
+
if not strategy_used or not strategy_used.strip():
|
|
892
|
+
return {"success": False, "error": "strategy_used cannot be empty"}
|
|
893
|
+
|
|
894
|
+
try:
|
|
895
|
+
outcome_record = await alma.async_learn(
|
|
896
|
+
agent=agent,
|
|
897
|
+
task=task,
|
|
898
|
+
outcome=outcome,
|
|
899
|
+
strategy_used=strategy_used,
|
|
900
|
+
task_type=task_type,
|
|
901
|
+
duration_ms=duration_ms,
|
|
902
|
+
error_message=error_message,
|
|
903
|
+
feedback=feedback,
|
|
904
|
+
)
|
|
905
|
+
|
|
906
|
+
return {
|
|
907
|
+
"success": True,
|
|
908
|
+
"learned": True,
|
|
909
|
+
"outcome": {
|
|
910
|
+
"id": outcome_record.id,
|
|
911
|
+
"agent": outcome_record.agent,
|
|
912
|
+
"task_type": outcome_record.task_type,
|
|
913
|
+
"success": outcome_record.success,
|
|
914
|
+
},
|
|
915
|
+
"message": "Outcome recorded successfully",
|
|
916
|
+
}
|
|
917
|
+
|
|
918
|
+
except Exception as e:
|
|
919
|
+
logger.exception(f"Error in async_alma_learn: {e}")
|
|
920
|
+
return {
|
|
921
|
+
"success": False,
|
|
922
|
+
"error": str(e),
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
|
|
926
|
+
async def async_alma_add_preference(
|
|
927
|
+
alma: ALMA,
|
|
928
|
+
user_id: str,
|
|
929
|
+
category: str,
|
|
930
|
+
preference: str,
|
|
931
|
+
source: str = "explicit_instruction",
|
|
932
|
+
) -> Dict[str, Any]:
|
|
933
|
+
"""
|
|
934
|
+
Async version of alma_add_preference. Add a user preference to memory.
|
|
935
|
+
|
|
936
|
+
Args:
|
|
937
|
+
alma: ALMA instance
|
|
938
|
+
user_id: User identifier
|
|
939
|
+
category: Category (communication, code_style, workflow)
|
|
940
|
+
preference: The preference text
|
|
941
|
+
source: How this was learned
|
|
942
|
+
|
|
943
|
+
Returns:
|
|
944
|
+
Dict with the created preference
|
|
945
|
+
"""
|
|
946
|
+
# Input validation
|
|
947
|
+
if not user_id or not user_id.strip():
|
|
948
|
+
return {"success": False, "error": "user_id cannot be empty"}
|
|
949
|
+
if not category or not category.strip():
|
|
950
|
+
return {"success": False, "error": "category cannot be empty"}
|
|
951
|
+
if not preference or not preference.strip():
|
|
952
|
+
return {"success": False, "error": "preference cannot be empty"}
|
|
953
|
+
|
|
954
|
+
try:
|
|
955
|
+
pref = await alma.async_add_user_preference(
|
|
956
|
+
user_id=user_id,
|
|
957
|
+
category=category,
|
|
958
|
+
preference=preference,
|
|
959
|
+
source=source,
|
|
960
|
+
)
|
|
961
|
+
|
|
962
|
+
return {
|
|
963
|
+
"success": True,
|
|
964
|
+
"preference": {
|
|
965
|
+
"id": pref.id,
|
|
966
|
+
"user_id": pref.user_id,
|
|
967
|
+
"category": pref.category,
|
|
968
|
+
"preference": pref.preference,
|
|
969
|
+
"source": pref.source,
|
|
970
|
+
},
|
|
971
|
+
}
|
|
972
|
+
|
|
973
|
+
except Exception as e:
|
|
974
|
+
logger.exception(f"Error in async_alma_add_preference: {e}")
|
|
975
|
+
return {
|
|
976
|
+
"success": False,
|
|
977
|
+
"error": str(e),
|
|
978
|
+
}
|
|
979
|
+
|
|
980
|
+
|
|
981
|
+
async def async_alma_add_knowledge(
|
|
982
|
+
alma: ALMA,
|
|
983
|
+
agent: str,
|
|
984
|
+
domain: str,
|
|
985
|
+
fact: str,
|
|
986
|
+
source: str = "user_stated",
|
|
987
|
+
) -> Dict[str, Any]:
|
|
988
|
+
"""
|
|
989
|
+
Async version of alma_add_knowledge. Add domain knowledge within agent's scope.
|
|
990
|
+
|
|
991
|
+
Args:
|
|
992
|
+
alma: ALMA instance
|
|
993
|
+
agent: Agent this knowledge belongs to
|
|
994
|
+
domain: Knowledge domain
|
|
995
|
+
fact: The fact to remember
|
|
996
|
+
source: How this was learned
|
|
997
|
+
|
|
998
|
+
Returns:
|
|
999
|
+
Dict with the created knowledge or rejection reason
|
|
1000
|
+
"""
|
|
1001
|
+
# Input validation
|
|
1002
|
+
if not agent or not agent.strip():
|
|
1003
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
1004
|
+
if not domain or not domain.strip():
|
|
1005
|
+
return {"success": False, "error": "domain cannot be empty"}
|
|
1006
|
+
if not fact or not fact.strip():
|
|
1007
|
+
return {"success": False, "error": "fact cannot be empty"}
|
|
1008
|
+
|
|
1009
|
+
try:
|
|
1010
|
+
knowledge = await alma.async_add_domain_knowledge(
|
|
1011
|
+
agent=agent,
|
|
1012
|
+
domain=domain,
|
|
1013
|
+
fact=fact,
|
|
1014
|
+
source=source,
|
|
1015
|
+
)
|
|
1016
|
+
|
|
1017
|
+
return {
|
|
1018
|
+
"success": True,
|
|
1019
|
+
"knowledge": {
|
|
1020
|
+
"id": knowledge.id,
|
|
1021
|
+
"agent": knowledge.agent,
|
|
1022
|
+
"domain": knowledge.domain,
|
|
1023
|
+
"fact": knowledge.fact,
|
|
1024
|
+
"source": knowledge.source,
|
|
1025
|
+
},
|
|
1026
|
+
}
|
|
1027
|
+
|
|
1028
|
+
except Exception as e:
|
|
1029
|
+
logger.exception(f"Error in async_alma_add_knowledge: {e}")
|
|
1030
|
+
return {
|
|
1031
|
+
"success": False,
|
|
1032
|
+
"error": str(e),
|
|
1033
|
+
}
|
|
1034
|
+
|
|
1035
|
+
|
|
1036
|
+
async def async_alma_forget(
|
|
1037
|
+
alma: ALMA,
|
|
1038
|
+
agent: Optional[str] = None,
|
|
1039
|
+
older_than_days: int = 90,
|
|
1040
|
+
below_confidence: float = 0.3,
|
|
1041
|
+
) -> Dict[str, Any]:
|
|
1042
|
+
"""
|
|
1043
|
+
Async version of alma_forget. Prune stale or low-confidence memories.
|
|
1044
|
+
|
|
1045
|
+
Args:
|
|
1046
|
+
alma: ALMA instance
|
|
1047
|
+
agent: Specific agent to prune, or None for all
|
|
1048
|
+
older_than_days: Remove outcomes older than this
|
|
1049
|
+
below_confidence: Remove heuristics below this confidence
|
|
1050
|
+
|
|
1051
|
+
Returns:
|
|
1052
|
+
Dict with number of items pruned
|
|
1053
|
+
"""
|
|
1054
|
+
try:
|
|
1055
|
+
count = await alma.async_forget(
|
|
1056
|
+
agent=agent,
|
|
1057
|
+
older_than_days=older_than_days,
|
|
1058
|
+
below_confidence=below_confidence,
|
|
1059
|
+
)
|
|
1060
|
+
|
|
1061
|
+
return {
|
|
1062
|
+
"success": True,
|
|
1063
|
+
"pruned_count": count,
|
|
1064
|
+
"message": f"Pruned {count} stale or low-confidence memories",
|
|
1065
|
+
}
|
|
1066
|
+
|
|
1067
|
+
except Exception as e:
|
|
1068
|
+
logger.exception(f"Error in async_alma_forget: {e}")
|
|
1069
|
+
return {
|
|
1070
|
+
"success": False,
|
|
1071
|
+
"error": str(e),
|
|
1072
|
+
}
|
|
1073
|
+
|
|
1074
|
+
|
|
1075
|
+
async def async_alma_stats(
|
|
1076
|
+
alma: ALMA,
|
|
1077
|
+
agent: Optional[str] = None,
|
|
1078
|
+
) -> Dict[str, Any]:
|
|
1079
|
+
"""
|
|
1080
|
+
Async version of alma_stats. Get memory statistics.
|
|
1081
|
+
|
|
1082
|
+
Args:
|
|
1083
|
+
alma: ALMA instance
|
|
1084
|
+
agent: Specific agent or None for all
|
|
1085
|
+
|
|
1086
|
+
Returns:
|
|
1087
|
+
Dict with memory statistics
|
|
1088
|
+
"""
|
|
1089
|
+
try:
|
|
1090
|
+
stats = await alma.async_get_stats(agent=agent)
|
|
1091
|
+
|
|
1092
|
+
return {
|
|
1093
|
+
"success": True,
|
|
1094
|
+
"stats": stats,
|
|
1095
|
+
}
|
|
1096
|
+
|
|
1097
|
+
except Exception as e:
|
|
1098
|
+
logger.exception(f"Error in async_alma_stats: {e}")
|
|
1099
|
+
return {
|
|
1100
|
+
"success": False,
|
|
1101
|
+
"error": str(e),
|
|
1102
|
+
}
|
|
1103
|
+
|
|
1104
|
+
|
|
1105
|
+
async def async_alma_health(alma: ALMA) -> Dict[str, Any]:
|
|
1106
|
+
"""
|
|
1107
|
+
Async version of alma_health. Health check for ALMA server.
|
|
1108
|
+
|
|
1109
|
+
Args:
|
|
1110
|
+
alma: ALMA instance
|
|
1111
|
+
|
|
1112
|
+
Returns:
|
|
1113
|
+
Dict with health status
|
|
1114
|
+
"""
|
|
1115
|
+
try:
|
|
1116
|
+
# Basic health checks
|
|
1117
|
+
stats = await alma.async_get_stats()
|
|
1118
|
+
|
|
1119
|
+
return {
|
|
1120
|
+
"success": True,
|
|
1121
|
+
"status": "healthy",
|
|
1122
|
+
"project_id": alma.project_id,
|
|
1123
|
+
"total_memories": stats.get("total_count", 0),
|
|
1124
|
+
"registered_agents": list(alma.scopes.keys()),
|
|
1125
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
1126
|
+
}
|
|
1127
|
+
|
|
1128
|
+
except Exception as e:
|
|
1129
|
+
logger.exception(f"Error in async_alma_health: {e}")
|
|
1130
|
+
return {
|
|
1131
|
+
"success": False,
|
|
1132
|
+
"status": "unhealthy",
|
|
1133
|
+
"error": str(e),
|
|
1134
|
+
}
|
|
1135
|
+
|
|
1136
|
+
|
|
1137
|
+
# =============================================================================
|
|
1138
|
+
# WORKFLOW MCP TOOLS (v0.6.0)
|
|
1139
|
+
# =============================================================================
|
|
1140
|
+
#
|
|
1141
|
+
# Tools for workflow integration: checkpointing, scoped retrieval,
|
|
1142
|
+
# learning from workflows, and artifact linking.
|
|
1143
|
+
|
|
1144
|
+
|
|
1145
|
+
def alma_checkpoint(
|
|
1146
|
+
alma: ALMA,
|
|
1147
|
+
run_id: str,
|
|
1148
|
+
node_id: str,
|
|
1149
|
+
state: Dict[str, Any],
|
|
1150
|
+
branch_id: Optional[str] = None,
|
|
1151
|
+
parent_checkpoint_id: Optional[str] = None,
|
|
1152
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
1153
|
+
skip_if_unchanged: bool = True,
|
|
1154
|
+
) -> Dict[str, Any]:
|
|
1155
|
+
"""
|
|
1156
|
+
Create a checkpoint for crash recovery.
|
|
1157
|
+
|
|
1158
|
+
Args:
|
|
1159
|
+
alma: ALMA instance
|
|
1160
|
+
run_id: The workflow run identifier
|
|
1161
|
+
node_id: The node creating this checkpoint
|
|
1162
|
+
state: The state to persist
|
|
1163
|
+
branch_id: Optional branch identifier for parallel execution
|
|
1164
|
+
parent_checkpoint_id: Previous checkpoint in the chain
|
|
1165
|
+
metadata: Additional checkpoint metadata
|
|
1166
|
+
skip_if_unchanged: If True, skip if state hasn't changed
|
|
1167
|
+
|
|
1168
|
+
Returns:
|
|
1169
|
+
Dict with checkpoint info or skip notification
|
|
1170
|
+
"""
|
|
1171
|
+
if not run_id or not run_id.strip():
|
|
1172
|
+
return {"success": False, "error": "run_id cannot be empty"}
|
|
1173
|
+
if not node_id or not node_id.strip():
|
|
1174
|
+
return {"success": False, "error": "node_id cannot be empty"}
|
|
1175
|
+
|
|
1176
|
+
try:
|
|
1177
|
+
checkpoint = alma.checkpoint(
|
|
1178
|
+
run_id=run_id,
|
|
1179
|
+
node_id=node_id,
|
|
1180
|
+
state=state,
|
|
1181
|
+
branch_id=branch_id,
|
|
1182
|
+
parent_checkpoint_id=parent_checkpoint_id,
|
|
1183
|
+
metadata=metadata,
|
|
1184
|
+
skip_if_unchanged=skip_if_unchanged,
|
|
1185
|
+
)
|
|
1186
|
+
|
|
1187
|
+
if checkpoint is None:
|
|
1188
|
+
return {
|
|
1189
|
+
"success": True,
|
|
1190
|
+
"skipped": True,
|
|
1191
|
+
"message": "Checkpoint skipped - state unchanged",
|
|
1192
|
+
}
|
|
1193
|
+
|
|
1194
|
+
return {
|
|
1195
|
+
"success": True,
|
|
1196
|
+
"checkpoint": {
|
|
1197
|
+
"id": checkpoint.id,
|
|
1198
|
+
"run_id": checkpoint.run_id,
|
|
1199
|
+
"node_id": checkpoint.node_id,
|
|
1200
|
+
"sequence_number": checkpoint.sequence_number,
|
|
1201
|
+
"branch_id": checkpoint.branch_id,
|
|
1202
|
+
"state_hash": checkpoint.state_hash,
|
|
1203
|
+
"created_at": checkpoint.created_at.isoformat(),
|
|
1204
|
+
},
|
|
1205
|
+
}
|
|
1206
|
+
|
|
1207
|
+
except Exception as e:
|
|
1208
|
+
logger.exception(f"Error in alma_checkpoint: {e}")
|
|
1209
|
+
return {"success": False, "error": str(e)}
|
|
1210
|
+
|
|
1211
|
+
|
|
1212
|
+
def alma_resume(
|
|
1213
|
+
alma: ALMA,
|
|
1214
|
+
run_id: str,
|
|
1215
|
+
branch_id: Optional[str] = None,
|
|
1216
|
+
) -> Dict[str, Any]:
|
|
1217
|
+
"""
|
|
1218
|
+
Get the checkpoint to resume from after a crash.
|
|
1219
|
+
|
|
1220
|
+
Args:
|
|
1221
|
+
alma: ALMA instance
|
|
1222
|
+
run_id: The workflow run identifier
|
|
1223
|
+
branch_id: Optional branch to filter by
|
|
1224
|
+
|
|
1225
|
+
Returns:
|
|
1226
|
+
Dict with checkpoint info or None if no checkpoints
|
|
1227
|
+
"""
|
|
1228
|
+
if not run_id or not run_id.strip():
|
|
1229
|
+
return {"success": False, "error": "run_id cannot be empty"}
|
|
1230
|
+
|
|
1231
|
+
try:
|
|
1232
|
+
checkpoint = alma.get_resume_point(run_id, branch_id)
|
|
1233
|
+
|
|
1234
|
+
if checkpoint is None:
|
|
1235
|
+
return {
|
|
1236
|
+
"success": True,
|
|
1237
|
+
"checkpoint": None,
|
|
1238
|
+
"message": "No checkpoint found for this run",
|
|
1239
|
+
}
|
|
1240
|
+
|
|
1241
|
+
return {
|
|
1242
|
+
"success": True,
|
|
1243
|
+
"checkpoint": {
|
|
1244
|
+
"id": checkpoint.id,
|
|
1245
|
+
"run_id": checkpoint.run_id,
|
|
1246
|
+
"node_id": checkpoint.node_id,
|
|
1247
|
+
"state": checkpoint.state,
|
|
1248
|
+
"sequence_number": checkpoint.sequence_number,
|
|
1249
|
+
"branch_id": checkpoint.branch_id,
|
|
1250
|
+
"parent_checkpoint_id": checkpoint.parent_checkpoint_id,
|
|
1251
|
+
"created_at": checkpoint.created_at.isoformat(),
|
|
1252
|
+
},
|
|
1253
|
+
}
|
|
1254
|
+
|
|
1255
|
+
except Exception as e:
|
|
1256
|
+
logger.exception(f"Error in alma_resume: {e}")
|
|
1257
|
+
return {"success": False, "error": str(e)}
|
|
1258
|
+
|
|
1259
|
+
|
|
1260
|
+
def alma_merge_states(
|
|
1261
|
+
alma: ALMA,
|
|
1262
|
+
states: list,
|
|
1263
|
+
reducer_config: Optional[Dict[str, str]] = None,
|
|
1264
|
+
) -> Dict[str, Any]:
|
|
1265
|
+
"""
|
|
1266
|
+
Merge multiple branch states after parallel execution.
|
|
1267
|
+
|
|
1268
|
+
Args:
|
|
1269
|
+
alma: ALMA instance
|
|
1270
|
+
states: List of state dicts from parallel branches
|
|
1271
|
+
reducer_config: Optional mapping of key -> reducer name.
|
|
1272
|
+
Available: append, merge_dict, last_value,
|
|
1273
|
+
first_value, sum, max, min, union
|
|
1274
|
+
|
|
1275
|
+
Returns:
|
|
1276
|
+
Dict with merged state
|
|
1277
|
+
"""
|
|
1278
|
+
if not states:
|
|
1279
|
+
return {"success": True, "merged_state": {}}
|
|
1280
|
+
|
|
1281
|
+
try:
|
|
1282
|
+
merged = alma.merge_states(states, reducer_config)
|
|
1283
|
+
|
|
1284
|
+
return {
|
|
1285
|
+
"success": True,
|
|
1286
|
+
"merged_state": merged,
|
|
1287
|
+
"input_count": len(states),
|
|
1288
|
+
}
|
|
1289
|
+
|
|
1290
|
+
except Exception as e:
|
|
1291
|
+
logger.exception(f"Error in alma_merge_states: {e}")
|
|
1292
|
+
return {"success": False, "error": str(e)}
|
|
1293
|
+
|
|
1294
|
+
|
|
1295
|
+
def alma_workflow_learn(
|
|
1296
|
+
alma: ALMA,
|
|
1297
|
+
agent: str,
|
|
1298
|
+
workflow_id: str,
|
|
1299
|
+
run_id: str,
|
|
1300
|
+
result: str,
|
|
1301
|
+
summary: str,
|
|
1302
|
+
strategies_used: Optional[list] = None,
|
|
1303
|
+
successful_patterns: Optional[list] = None,
|
|
1304
|
+
failed_patterns: Optional[list] = None,
|
|
1305
|
+
duration_seconds: Optional[float] = None,
|
|
1306
|
+
node_count: Optional[int] = None,
|
|
1307
|
+
error_message: Optional[str] = None,
|
|
1308
|
+
tenant_id: Optional[str] = None,
|
|
1309
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
1310
|
+
) -> Dict[str, Any]:
|
|
1311
|
+
"""
|
|
1312
|
+
Record learnings from a completed workflow execution.
|
|
1313
|
+
|
|
1314
|
+
Args:
|
|
1315
|
+
alma: ALMA instance
|
|
1316
|
+
agent: The agent that executed the workflow
|
|
1317
|
+
workflow_id: The workflow definition that was executed
|
|
1318
|
+
run_id: The specific run this outcome is from
|
|
1319
|
+
result: Result status (success, failure, partial, cancelled, timeout)
|
|
1320
|
+
summary: Human-readable summary of what happened
|
|
1321
|
+
strategies_used: List of strategies/approaches attempted
|
|
1322
|
+
successful_patterns: Patterns that worked well
|
|
1323
|
+
failed_patterns: Patterns that didn't work
|
|
1324
|
+
duration_seconds: How long the workflow took
|
|
1325
|
+
node_count: Number of nodes executed
|
|
1326
|
+
error_message: Error details if failed
|
|
1327
|
+
tenant_id: Multi-tenant isolation identifier
|
|
1328
|
+
metadata: Additional outcome metadata
|
|
1329
|
+
|
|
1330
|
+
Returns:
|
|
1331
|
+
Dict with workflow outcome info
|
|
1332
|
+
"""
|
|
1333
|
+
if not agent or not agent.strip():
|
|
1334
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
1335
|
+
if not workflow_id or not workflow_id.strip():
|
|
1336
|
+
return {"success": False, "error": "workflow_id cannot be empty"}
|
|
1337
|
+
if not run_id or not run_id.strip():
|
|
1338
|
+
return {"success": False, "error": "run_id cannot be empty"}
|
|
1339
|
+
if not result or not result.strip():
|
|
1340
|
+
return {"success": False, "error": "result cannot be empty"}
|
|
1341
|
+
if not summary or not summary.strip():
|
|
1342
|
+
return {"success": False, "error": "summary cannot be empty"}
|
|
1343
|
+
|
|
1344
|
+
valid_results = ["success", "failure", "partial", "cancelled", "timeout"]
|
|
1345
|
+
if result not in valid_results:
|
|
1346
|
+
return {
|
|
1347
|
+
"success": False,
|
|
1348
|
+
"error": f"result must be one of: {', '.join(valid_results)}",
|
|
1349
|
+
}
|
|
1350
|
+
|
|
1351
|
+
try:
|
|
1352
|
+
outcome = alma.learn_from_workflow(
|
|
1353
|
+
agent=agent,
|
|
1354
|
+
workflow_id=workflow_id,
|
|
1355
|
+
run_id=run_id,
|
|
1356
|
+
result=result,
|
|
1357
|
+
summary=summary,
|
|
1358
|
+
strategies_used=strategies_used,
|
|
1359
|
+
successful_patterns=successful_patterns,
|
|
1360
|
+
failed_patterns=failed_patterns,
|
|
1361
|
+
duration_seconds=duration_seconds,
|
|
1362
|
+
node_count=node_count,
|
|
1363
|
+
error_message=error_message,
|
|
1364
|
+
tenant_id=tenant_id,
|
|
1365
|
+
metadata=metadata,
|
|
1366
|
+
)
|
|
1367
|
+
|
|
1368
|
+
return {
|
|
1369
|
+
"success": True,
|
|
1370
|
+
"outcome": {
|
|
1371
|
+
"id": outcome.id,
|
|
1372
|
+
"workflow_id": outcome.workflow_id,
|
|
1373
|
+
"run_id": outcome.run_id,
|
|
1374
|
+
"result": outcome.result.value,
|
|
1375
|
+
"agent": outcome.agent,
|
|
1376
|
+
"created_at": outcome.created_at.isoformat(),
|
|
1377
|
+
},
|
|
1378
|
+
"message": "Workflow outcome recorded successfully",
|
|
1379
|
+
}
|
|
1380
|
+
|
|
1381
|
+
except Exception as e:
|
|
1382
|
+
logger.exception(f"Error in alma_workflow_learn: {e}")
|
|
1383
|
+
return {"success": False, "error": str(e)}
|
|
1384
|
+
|
|
1385
|
+
|
|
1386
|
+
def alma_link_artifact(
|
|
1387
|
+
alma: ALMA,
|
|
1388
|
+
memory_id: str,
|
|
1389
|
+
artifact_type: str,
|
|
1390
|
+
storage_url: str,
|
|
1391
|
+
filename: Optional[str] = None,
|
|
1392
|
+
mime_type: Optional[str] = None,
|
|
1393
|
+
size_bytes: Optional[int] = None,
|
|
1394
|
+
checksum: Optional[str] = None,
|
|
1395
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
1396
|
+
) -> Dict[str, Any]:
|
|
1397
|
+
"""
|
|
1398
|
+
Link an external artifact to a memory.
|
|
1399
|
+
|
|
1400
|
+
Args:
|
|
1401
|
+
alma: ALMA instance
|
|
1402
|
+
memory_id: The memory to link the artifact to
|
|
1403
|
+
artifact_type: Type (screenshot, log, report, file, etc.)
|
|
1404
|
+
storage_url: URL or path to the artifact in storage
|
|
1405
|
+
filename: Original filename
|
|
1406
|
+
mime_type: MIME type
|
|
1407
|
+
size_bytes: Size in bytes
|
|
1408
|
+
checksum: SHA256 checksum for integrity
|
|
1409
|
+
metadata: Additional artifact metadata
|
|
1410
|
+
|
|
1411
|
+
Returns:
|
|
1412
|
+
Dict with artifact reference info
|
|
1413
|
+
"""
|
|
1414
|
+
if not memory_id or not memory_id.strip():
|
|
1415
|
+
return {"success": False, "error": "memory_id cannot be empty"}
|
|
1416
|
+
if not artifact_type or not artifact_type.strip():
|
|
1417
|
+
return {"success": False, "error": "artifact_type cannot be empty"}
|
|
1418
|
+
if not storage_url or not storage_url.strip():
|
|
1419
|
+
return {"success": False, "error": "storage_url cannot be empty"}
|
|
1420
|
+
|
|
1421
|
+
try:
|
|
1422
|
+
artifact = alma.link_artifact(
|
|
1423
|
+
memory_id=memory_id,
|
|
1424
|
+
artifact_type=artifact_type,
|
|
1425
|
+
storage_url=storage_url,
|
|
1426
|
+
filename=filename,
|
|
1427
|
+
mime_type=mime_type,
|
|
1428
|
+
size_bytes=size_bytes,
|
|
1429
|
+
checksum=checksum,
|
|
1430
|
+
metadata=metadata,
|
|
1431
|
+
)
|
|
1432
|
+
|
|
1433
|
+
return {
|
|
1434
|
+
"success": True,
|
|
1435
|
+
"artifact": {
|
|
1436
|
+
"id": artifact.id,
|
|
1437
|
+
"memory_id": artifact.memory_id,
|
|
1438
|
+
"artifact_type": artifact.artifact_type.value,
|
|
1439
|
+
"storage_url": artifact.storage_url,
|
|
1440
|
+
"filename": artifact.filename,
|
|
1441
|
+
"created_at": artifact.created_at.isoformat(),
|
|
1442
|
+
},
|
|
1443
|
+
}
|
|
1444
|
+
|
|
1445
|
+
except Exception as e:
|
|
1446
|
+
logger.exception(f"Error in alma_link_artifact: {e}")
|
|
1447
|
+
return {"success": False, "error": str(e)}
|
|
1448
|
+
|
|
1449
|
+
|
|
1450
|
+
def alma_get_artifacts(
|
|
1451
|
+
alma: ALMA,
|
|
1452
|
+
memory_id: str,
|
|
1453
|
+
) -> Dict[str, Any]:
|
|
1454
|
+
"""
|
|
1455
|
+
Get all artifacts linked to a memory.
|
|
1456
|
+
|
|
1457
|
+
Args:
|
|
1458
|
+
alma: ALMA instance
|
|
1459
|
+
memory_id: The memory to get artifacts for
|
|
1460
|
+
|
|
1461
|
+
Returns:
|
|
1462
|
+
Dict with list of artifact references
|
|
1463
|
+
"""
|
|
1464
|
+
if not memory_id or not memory_id.strip():
|
|
1465
|
+
return {"success": False, "error": "memory_id cannot be empty"}
|
|
1466
|
+
|
|
1467
|
+
try:
|
|
1468
|
+
artifacts = alma.get_artifacts(memory_id)
|
|
1469
|
+
|
|
1470
|
+
return {
|
|
1471
|
+
"success": True,
|
|
1472
|
+
"artifacts": [
|
|
1473
|
+
{
|
|
1474
|
+
"id": a.id,
|
|
1475
|
+
"artifact_type": a.artifact_type.value,
|
|
1476
|
+
"storage_url": a.storage_url,
|
|
1477
|
+
"filename": a.filename,
|
|
1478
|
+
"mime_type": a.mime_type,
|
|
1479
|
+
"size_bytes": a.size_bytes,
|
|
1480
|
+
"created_at": a.created_at.isoformat(),
|
|
1481
|
+
}
|
|
1482
|
+
for a in artifacts
|
|
1483
|
+
],
|
|
1484
|
+
"count": len(artifacts),
|
|
1485
|
+
}
|
|
1486
|
+
|
|
1487
|
+
except Exception as e:
|
|
1488
|
+
logger.exception(f"Error in alma_get_artifacts: {e}")
|
|
1489
|
+
return {"success": False, "error": str(e)}
|
|
1490
|
+
|
|
1491
|
+
|
|
1492
|
+
def alma_cleanup_checkpoints(
|
|
1493
|
+
alma: ALMA,
|
|
1494
|
+
run_id: str,
|
|
1495
|
+
keep_latest: int = 1,
|
|
1496
|
+
) -> Dict[str, Any]:
|
|
1497
|
+
"""
|
|
1498
|
+
Clean up old checkpoints for a completed run.
|
|
1499
|
+
|
|
1500
|
+
Args:
|
|
1501
|
+
alma: ALMA instance
|
|
1502
|
+
run_id: The workflow run identifier
|
|
1503
|
+
keep_latest: Number of latest checkpoints to keep
|
|
1504
|
+
|
|
1505
|
+
Returns:
|
|
1506
|
+
Dict with cleanup results
|
|
1507
|
+
"""
|
|
1508
|
+
if not run_id or not run_id.strip():
|
|
1509
|
+
return {"success": False, "error": "run_id cannot be empty"}
|
|
1510
|
+
|
|
1511
|
+
try:
|
|
1512
|
+
count = alma.cleanup_checkpoints(run_id, keep_latest)
|
|
1513
|
+
|
|
1514
|
+
return {
|
|
1515
|
+
"success": True,
|
|
1516
|
+
"deleted_count": count,
|
|
1517
|
+
"kept": keep_latest,
|
|
1518
|
+
"message": f"Deleted {count} checkpoints, kept {keep_latest}",
|
|
1519
|
+
}
|
|
1520
|
+
|
|
1521
|
+
except Exception as e:
|
|
1522
|
+
logger.exception(f"Error in alma_cleanup_checkpoints: {e}")
|
|
1523
|
+
return {"success": False, "error": str(e)}
|
|
1524
|
+
|
|
1525
|
+
|
|
1526
|
+
def alma_retrieve_scoped(
|
|
1527
|
+
alma: ALMA,
|
|
1528
|
+
task: str,
|
|
1529
|
+
agent: str,
|
|
1530
|
+
scope: str = "agent",
|
|
1531
|
+
tenant_id: Optional[str] = None,
|
|
1532
|
+
workflow_id: Optional[str] = None,
|
|
1533
|
+
run_id: Optional[str] = None,
|
|
1534
|
+
node_id: Optional[str] = None,
|
|
1535
|
+
user_id: Optional[str] = None,
|
|
1536
|
+
top_k: int = 5,
|
|
1537
|
+
) -> Dict[str, Any]:
|
|
1538
|
+
"""
|
|
1539
|
+
Retrieve memories with workflow scope filtering.
|
|
1540
|
+
|
|
1541
|
+
Args:
|
|
1542
|
+
alma: ALMA instance
|
|
1543
|
+
task: Description of the task to perform
|
|
1544
|
+
agent: Name of the agent requesting memories
|
|
1545
|
+
scope: Scope level (node, run, workflow, agent, tenant, global)
|
|
1546
|
+
tenant_id: Tenant identifier for multi-tenant
|
|
1547
|
+
workflow_id: Workflow definition identifier
|
|
1548
|
+
run_id: Specific run identifier
|
|
1549
|
+
node_id: Current node identifier
|
|
1550
|
+
user_id: Optional user ID for preferences
|
|
1551
|
+
top_k: Maximum items per memory type
|
|
1552
|
+
|
|
1553
|
+
Returns:
|
|
1554
|
+
Dict with scoped memory slice
|
|
1555
|
+
"""
|
|
1556
|
+
if not task or not task.strip():
|
|
1557
|
+
return {"success": False, "error": "task cannot be empty"}
|
|
1558
|
+
if not agent or not agent.strip():
|
|
1559
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
1560
|
+
|
|
1561
|
+
valid_scopes = ["node", "run", "workflow", "agent", "tenant", "global"]
|
|
1562
|
+
if scope not in valid_scopes:
|
|
1563
|
+
return {
|
|
1564
|
+
"success": False,
|
|
1565
|
+
"error": f"scope must be one of: {', '.join(valid_scopes)}",
|
|
1566
|
+
}
|
|
1567
|
+
|
|
1568
|
+
try:
|
|
1569
|
+
from alma.workflow import RetrievalScope, WorkflowContext
|
|
1570
|
+
|
|
1571
|
+
context = WorkflowContext(
|
|
1572
|
+
tenant_id=tenant_id,
|
|
1573
|
+
workflow_id=workflow_id,
|
|
1574
|
+
run_id=run_id,
|
|
1575
|
+
node_id=node_id,
|
|
1576
|
+
)
|
|
1577
|
+
|
|
1578
|
+
memories = alma.retrieve_with_scope(
|
|
1579
|
+
task=task,
|
|
1580
|
+
agent=agent,
|
|
1581
|
+
context=context,
|
|
1582
|
+
scope=RetrievalScope(scope),
|
|
1583
|
+
user_id=user_id,
|
|
1584
|
+
top_k=top_k,
|
|
1585
|
+
)
|
|
1586
|
+
|
|
1587
|
+
return {
|
|
1588
|
+
"success": True,
|
|
1589
|
+
"memories": _serialize_memory_slice(memories),
|
|
1590
|
+
"prompt_injection": memories.to_prompt(),
|
|
1591
|
+
"scope": scope,
|
|
1592
|
+
"scope_filter": memories.metadata.get("scope_filter", {}),
|
|
1593
|
+
}
|
|
1594
|
+
|
|
1595
|
+
except Exception as e:
|
|
1596
|
+
logger.exception(f"Error in alma_retrieve_scoped: {e}")
|
|
1597
|
+
return {"success": False, "error": str(e)}
|
|
1598
|
+
|
|
1599
|
+
|
|
1600
|
+
# =============================================================================
|
|
1601
|
+
# ASYNC WORKFLOW MCP TOOLS
|
|
1602
|
+
# =============================================================================
|
|
1603
|
+
|
|
1604
|
+
|
|
1605
|
+
async def async_alma_checkpoint(
|
|
1606
|
+
alma: ALMA,
|
|
1607
|
+
run_id: str,
|
|
1608
|
+
node_id: str,
|
|
1609
|
+
state: Dict[str, Any],
|
|
1610
|
+
branch_id: Optional[str] = None,
|
|
1611
|
+
parent_checkpoint_id: Optional[str] = None,
|
|
1612
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
1613
|
+
skip_if_unchanged: bool = True,
|
|
1614
|
+
) -> Dict[str, Any]:
|
|
1615
|
+
"""Async version of alma_checkpoint."""
|
|
1616
|
+
if not run_id or not run_id.strip():
|
|
1617
|
+
return {"success": False, "error": "run_id cannot be empty"}
|
|
1618
|
+
if not node_id or not node_id.strip():
|
|
1619
|
+
return {"success": False, "error": "node_id cannot be empty"}
|
|
1620
|
+
|
|
1621
|
+
try:
|
|
1622
|
+
checkpoint = await alma.async_checkpoint(
|
|
1623
|
+
run_id=run_id,
|
|
1624
|
+
node_id=node_id,
|
|
1625
|
+
state=state,
|
|
1626
|
+
branch_id=branch_id,
|
|
1627
|
+
parent_checkpoint_id=parent_checkpoint_id,
|
|
1628
|
+
metadata=metadata,
|
|
1629
|
+
skip_if_unchanged=skip_if_unchanged,
|
|
1630
|
+
)
|
|
1631
|
+
|
|
1632
|
+
if checkpoint is None:
|
|
1633
|
+
return {
|
|
1634
|
+
"success": True,
|
|
1635
|
+
"skipped": True,
|
|
1636
|
+
"message": "Checkpoint skipped - state unchanged",
|
|
1637
|
+
}
|
|
1638
|
+
|
|
1639
|
+
return {
|
|
1640
|
+
"success": True,
|
|
1641
|
+
"checkpoint": {
|
|
1642
|
+
"id": checkpoint.id,
|
|
1643
|
+
"run_id": checkpoint.run_id,
|
|
1644
|
+
"node_id": checkpoint.node_id,
|
|
1645
|
+
"sequence_number": checkpoint.sequence_number,
|
|
1646
|
+
"branch_id": checkpoint.branch_id,
|
|
1647
|
+
"state_hash": checkpoint.state_hash,
|
|
1648
|
+
"created_at": checkpoint.created_at.isoformat(),
|
|
1649
|
+
},
|
|
1650
|
+
}
|
|
1651
|
+
|
|
1652
|
+
except Exception as e:
|
|
1653
|
+
logger.exception(f"Error in async_alma_checkpoint: {e}")
|
|
1654
|
+
return {"success": False, "error": str(e)}
|
|
1655
|
+
|
|
1656
|
+
|
|
1657
|
+
async def async_alma_resume(
|
|
1658
|
+
alma: ALMA,
|
|
1659
|
+
run_id: str,
|
|
1660
|
+
branch_id: Optional[str] = None,
|
|
1661
|
+
) -> Dict[str, Any]:
|
|
1662
|
+
"""Async version of alma_resume."""
|
|
1663
|
+
if not run_id or not run_id.strip():
|
|
1664
|
+
return {"success": False, "error": "run_id cannot be empty"}
|
|
1665
|
+
|
|
1666
|
+
try:
|
|
1667
|
+
checkpoint = await alma.async_get_resume_point(run_id, branch_id)
|
|
1668
|
+
|
|
1669
|
+
if checkpoint is None:
|
|
1670
|
+
return {
|
|
1671
|
+
"success": True,
|
|
1672
|
+
"checkpoint": None,
|
|
1673
|
+
"message": "No checkpoint found for this run",
|
|
1674
|
+
}
|
|
1675
|
+
|
|
1676
|
+
return {
|
|
1677
|
+
"success": True,
|
|
1678
|
+
"checkpoint": {
|
|
1679
|
+
"id": checkpoint.id,
|
|
1680
|
+
"run_id": checkpoint.run_id,
|
|
1681
|
+
"node_id": checkpoint.node_id,
|
|
1682
|
+
"state": checkpoint.state,
|
|
1683
|
+
"sequence_number": checkpoint.sequence_number,
|
|
1684
|
+
"branch_id": checkpoint.branch_id,
|
|
1685
|
+
"parent_checkpoint_id": checkpoint.parent_checkpoint_id,
|
|
1686
|
+
"created_at": checkpoint.created_at.isoformat(),
|
|
1687
|
+
},
|
|
1688
|
+
}
|
|
1689
|
+
|
|
1690
|
+
except Exception as e:
|
|
1691
|
+
logger.exception(f"Error in async_alma_resume: {e}")
|
|
1692
|
+
return {"success": False, "error": str(e)}
|
|
1693
|
+
|
|
1694
|
+
|
|
1695
|
+
async def async_alma_workflow_learn(
|
|
1696
|
+
alma: ALMA,
|
|
1697
|
+
agent: str,
|
|
1698
|
+
workflow_id: str,
|
|
1699
|
+
run_id: str,
|
|
1700
|
+
result: str,
|
|
1701
|
+
summary: str,
|
|
1702
|
+
strategies_used: Optional[list] = None,
|
|
1703
|
+
successful_patterns: Optional[list] = None,
|
|
1704
|
+
failed_patterns: Optional[list] = None,
|
|
1705
|
+
duration_seconds: Optional[float] = None,
|
|
1706
|
+
node_count: Optional[int] = None,
|
|
1707
|
+
error_message: Optional[str] = None,
|
|
1708
|
+
tenant_id: Optional[str] = None,
|
|
1709
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
1710
|
+
) -> Dict[str, Any]:
|
|
1711
|
+
"""Async version of alma_workflow_learn."""
|
|
1712
|
+
if not agent or not agent.strip():
|
|
1713
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
1714
|
+
if not workflow_id or not workflow_id.strip():
|
|
1715
|
+
return {"success": False, "error": "workflow_id cannot be empty"}
|
|
1716
|
+
if not run_id or not run_id.strip():
|
|
1717
|
+
return {"success": False, "error": "run_id cannot be empty"}
|
|
1718
|
+
if not result or not result.strip():
|
|
1719
|
+
return {"success": False, "error": "result cannot be empty"}
|
|
1720
|
+
if not summary or not summary.strip():
|
|
1721
|
+
return {"success": False, "error": "summary cannot be empty"}
|
|
1722
|
+
|
|
1723
|
+
valid_results = ["success", "failure", "partial", "cancelled", "timeout"]
|
|
1724
|
+
if result not in valid_results:
|
|
1725
|
+
return {
|
|
1726
|
+
"success": False,
|
|
1727
|
+
"error": f"result must be one of: {', '.join(valid_results)}",
|
|
1728
|
+
}
|
|
1729
|
+
|
|
1730
|
+
try:
|
|
1731
|
+
outcome = await alma.async_learn_from_workflow(
|
|
1732
|
+
agent=agent,
|
|
1733
|
+
workflow_id=workflow_id,
|
|
1734
|
+
run_id=run_id,
|
|
1735
|
+
result=result,
|
|
1736
|
+
summary=summary,
|
|
1737
|
+
strategies_used=strategies_used,
|
|
1738
|
+
successful_patterns=successful_patterns,
|
|
1739
|
+
failed_patterns=failed_patterns,
|
|
1740
|
+
duration_seconds=duration_seconds,
|
|
1741
|
+
node_count=node_count,
|
|
1742
|
+
error_message=error_message,
|
|
1743
|
+
tenant_id=tenant_id,
|
|
1744
|
+
metadata=metadata,
|
|
1745
|
+
)
|
|
1746
|
+
|
|
1747
|
+
return {
|
|
1748
|
+
"success": True,
|
|
1749
|
+
"outcome": {
|
|
1750
|
+
"id": outcome.id,
|
|
1751
|
+
"workflow_id": outcome.workflow_id,
|
|
1752
|
+
"run_id": outcome.run_id,
|
|
1753
|
+
"result": outcome.result.value,
|
|
1754
|
+
"agent": outcome.agent,
|
|
1755
|
+
"created_at": outcome.created_at.isoformat(),
|
|
1756
|
+
},
|
|
1757
|
+
"message": "Workflow outcome recorded successfully",
|
|
1758
|
+
}
|
|
1759
|
+
|
|
1760
|
+
except Exception as e:
|
|
1761
|
+
logger.exception(f"Error in async_alma_workflow_learn: {e}")
|
|
1762
|
+
return {"success": False, "error": str(e)}
|
|
1763
|
+
|
|
1764
|
+
|
|
1765
|
+
async def async_alma_link_artifact(
|
|
1766
|
+
alma: ALMA,
|
|
1767
|
+
memory_id: str,
|
|
1768
|
+
artifact_type: str,
|
|
1769
|
+
storage_url: str,
|
|
1770
|
+
filename: Optional[str] = None,
|
|
1771
|
+
mime_type: Optional[str] = None,
|
|
1772
|
+
size_bytes: Optional[int] = None,
|
|
1773
|
+
checksum: Optional[str] = None,
|
|
1774
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
1775
|
+
) -> Dict[str, Any]:
|
|
1776
|
+
"""Async version of alma_link_artifact."""
|
|
1777
|
+
if not memory_id or not memory_id.strip():
|
|
1778
|
+
return {"success": False, "error": "memory_id cannot be empty"}
|
|
1779
|
+
if not artifact_type or not artifact_type.strip():
|
|
1780
|
+
return {"success": False, "error": "artifact_type cannot be empty"}
|
|
1781
|
+
if not storage_url or not storage_url.strip():
|
|
1782
|
+
return {"success": False, "error": "storage_url cannot be empty"}
|
|
1783
|
+
|
|
1784
|
+
try:
|
|
1785
|
+
artifact = await alma.async_link_artifact(
|
|
1786
|
+
memory_id=memory_id,
|
|
1787
|
+
artifact_type=artifact_type,
|
|
1788
|
+
storage_url=storage_url,
|
|
1789
|
+
filename=filename,
|
|
1790
|
+
mime_type=mime_type,
|
|
1791
|
+
size_bytes=size_bytes,
|
|
1792
|
+
checksum=checksum,
|
|
1793
|
+
metadata=metadata,
|
|
1794
|
+
)
|
|
1795
|
+
|
|
1796
|
+
return {
|
|
1797
|
+
"success": True,
|
|
1798
|
+
"artifact": {
|
|
1799
|
+
"id": artifact.id,
|
|
1800
|
+
"memory_id": artifact.memory_id,
|
|
1801
|
+
"artifact_type": artifact.artifact_type.value,
|
|
1802
|
+
"storage_url": artifact.storage_url,
|
|
1803
|
+
"filename": artifact.filename,
|
|
1804
|
+
"created_at": artifact.created_at.isoformat(),
|
|
1805
|
+
},
|
|
1806
|
+
}
|
|
1807
|
+
|
|
1808
|
+
except Exception as e:
|
|
1809
|
+
logger.exception(f"Error in async_alma_link_artifact: {e}")
|
|
1810
|
+
return {"success": False, "error": str(e)}
|
|
1811
|
+
|
|
1812
|
+
|
|
1813
|
+
async def async_alma_retrieve_scoped(
|
|
1814
|
+
alma: ALMA,
|
|
1815
|
+
task: str,
|
|
1816
|
+
agent: str,
|
|
1817
|
+
scope: str = "agent",
|
|
1818
|
+
tenant_id: Optional[str] = None,
|
|
1819
|
+
workflow_id: Optional[str] = None,
|
|
1820
|
+
run_id: Optional[str] = None,
|
|
1821
|
+
node_id: Optional[str] = None,
|
|
1822
|
+
user_id: Optional[str] = None,
|
|
1823
|
+
top_k: int = 5,
|
|
1824
|
+
) -> Dict[str, Any]:
|
|
1825
|
+
"""Async version of alma_retrieve_scoped."""
|
|
1826
|
+
if not task or not task.strip():
|
|
1827
|
+
return {"success": False, "error": "task cannot be empty"}
|
|
1828
|
+
if not agent or not agent.strip():
|
|
1829
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
1830
|
+
|
|
1831
|
+
valid_scopes = ["node", "run", "workflow", "agent", "tenant", "global"]
|
|
1832
|
+
if scope not in valid_scopes:
|
|
1833
|
+
return {
|
|
1834
|
+
"success": False,
|
|
1835
|
+
"error": f"scope must be one of: {', '.join(valid_scopes)}",
|
|
1836
|
+
}
|
|
1837
|
+
|
|
1838
|
+
try:
|
|
1839
|
+
from alma.workflow import RetrievalScope, WorkflowContext
|
|
1840
|
+
|
|
1841
|
+
context = WorkflowContext(
|
|
1842
|
+
tenant_id=tenant_id,
|
|
1843
|
+
workflow_id=workflow_id,
|
|
1844
|
+
run_id=run_id,
|
|
1845
|
+
node_id=node_id,
|
|
1846
|
+
)
|
|
1847
|
+
|
|
1848
|
+
memories = await alma.async_retrieve_with_scope(
|
|
1849
|
+
task=task,
|
|
1850
|
+
agent=agent,
|
|
1851
|
+
context=context,
|
|
1852
|
+
scope=RetrievalScope(scope),
|
|
1853
|
+
user_id=user_id,
|
|
1854
|
+
top_k=top_k,
|
|
1855
|
+
)
|
|
1856
|
+
|
|
1857
|
+
return {
|
|
1858
|
+
"success": True,
|
|
1859
|
+
"memories": _serialize_memory_slice(memories),
|
|
1860
|
+
"prompt_injection": memories.to_prompt(),
|
|
1861
|
+
"scope": scope,
|
|
1862
|
+
"scope_filter": memories.metadata.get("scope_filter", {}),
|
|
1863
|
+
}
|
|
1864
|
+
|
|
1865
|
+
except Exception as e:
|
|
1866
|
+
logger.exception(f"Error in async_alma_retrieve_scoped: {e}")
|
|
1867
|
+
return {"success": False, "error": str(e)}
|
|
1868
|
+
|
|
1869
|
+
|
|
1870
|
+
# =============================================================================
|
|
1871
|
+
# MEMORY WALL ENHANCEMENT TOOLS (v0.7.0)
|
|
1872
|
+
# =============================================================================
|
|
1873
|
+
#
|
|
1874
|
+
# Tools for Memory Wall enhancements: decay management, verified retrieval,
|
|
1875
|
+
# and compression pipeline.
|
|
1876
|
+
|
|
1877
|
+
|
|
1878
|
+
def alma_reinforce(
|
|
1879
|
+
alma: ALMA,
|
|
1880
|
+
memory_id: str,
|
|
1881
|
+
memory_type: str = "unknown",
|
|
1882
|
+
) -> Dict[str, Any]:
|
|
1883
|
+
"""
|
|
1884
|
+
Reinforce a memory to prevent it from being forgotten.
|
|
1885
|
+
|
|
1886
|
+
Use this when:
|
|
1887
|
+
- A memory proved valuable and should be preserved
|
|
1888
|
+
- You want to strengthen a weak memory
|
|
1889
|
+
- You're reviewing memories and want to keep one active
|
|
1890
|
+
|
|
1891
|
+
Args:
|
|
1892
|
+
alma: ALMA instance
|
|
1893
|
+
memory_id: ID of the memory to reinforce
|
|
1894
|
+
memory_type: Type of memory (heuristic, outcome, knowledge, etc.)
|
|
1895
|
+
|
|
1896
|
+
Returns:
|
|
1897
|
+
Dict with new strength score (0.0-1.0). Higher is stronger.
|
|
1898
|
+
"""
|
|
1899
|
+
if not memory_id or not memory_id.strip():
|
|
1900
|
+
return {"success": False, "error": "memory_id cannot be empty"}
|
|
1901
|
+
|
|
1902
|
+
try:
|
|
1903
|
+
from alma.learning.decay import DecayManager
|
|
1904
|
+
|
|
1905
|
+
decay_manager = DecayManager(alma.storage)
|
|
1906
|
+
new_strength = decay_manager.reinforce_memory(memory_id, memory_type)
|
|
1907
|
+
strength_obj = decay_manager.get_strength(memory_id, memory_type)
|
|
1908
|
+
|
|
1909
|
+
return {
|
|
1910
|
+
"success": True,
|
|
1911
|
+
"memory_id": memory_id,
|
|
1912
|
+
"new_strength": round(new_strength, 3),
|
|
1913
|
+
"access_count": strength_obj.access_count,
|
|
1914
|
+
"reinforcement_count": len(strength_obj.reinforcement_events),
|
|
1915
|
+
"status": "reinforced",
|
|
1916
|
+
}
|
|
1917
|
+
|
|
1918
|
+
except Exception as e:
|
|
1919
|
+
logger.exception(f"Error in alma_reinforce: {e}")
|
|
1920
|
+
return {"success": False, "error": str(e)}
|
|
1921
|
+
|
|
1922
|
+
|
|
1923
|
+
def alma_get_weak_memories(
|
|
1924
|
+
alma: ALMA,
|
|
1925
|
+
project_id: Optional[str] = None,
|
|
1926
|
+
agent: Optional[str] = None,
|
|
1927
|
+
include_forgettable: bool = False,
|
|
1928
|
+
) -> Dict[str, Any]:
|
|
1929
|
+
"""
|
|
1930
|
+
List memories that are weak and may be forgotten soon.
|
|
1931
|
+
|
|
1932
|
+
Returns memories in "recoverable" state (strength 0.1-0.3).
|
|
1933
|
+
These can be reinforced to save them, or left to naturally decay.
|
|
1934
|
+
|
|
1935
|
+
Use this for periodic memory health checks.
|
|
1936
|
+
|
|
1937
|
+
Args:
|
|
1938
|
+
alma: ALMA instance
|
|
1939
|
+
project_id: Project to check (defaults to ALMA's project)
|
|
1940
|
+
agent: Specific agent to check
|
|
1941
|
+
include_forgettable: Also include memories ready to forget (strength < 0.1)
|
|
1942
|
+
|
|
1943
|
+
Returns:
|
|
1944
|
+
Dict with list of weak memories and counts
|
|
1945
|
+
"""
|
|
1946
|
+
try:
|
|
1947
|
+
from alma.learning.decay import DecayManager
|
|
1948
|
+
|
|
1949
|
+
decay_manager = DecayManager(alma.storage)
|
|
1950
|
+
pid = project_id or alma.project_id
|
|
1951
|
+
|
|
1952
|
+
weak = decay_manager.get_weak_memories(project_id=pid, agent=agent)
|
|
1953
|
+
|
|
1954
|
+
result: Dict[str, Any] = {
|
|
1955
|
+
"success": True,
|
|
1956
|
+
"weak_memories": [
|
|
1957
|
+
{"memory_id": mid, "memory_type": mtype, "strength": round(strength, 3)}
|
|
1958
|
+
for mid, mtype, strength in weak
|
|
1959
|
+
],
|
|
1960
|
+
"count": len(weak),
|
|
1961
|
+
}
|
|
1962
|
+
|
|
1963
|
+
if include_forgettable:
|
|
1964
|
+
forgettable = decay_manager.get_forgettable_memories(
|
|
1965
|
+
project_id=pid, agent=agent
|
|
1966
|
+
)
|
|
1967
|
+
result["forgettable"] = [
|
|
1968
|
+
{"memory_id": mid, "memory_type": mtype, "strength": round(strength, 3)}
|
|
1969
|
+
for mid, mtype, strength in forgettable
|
|
1970
|
+
]
|
|
1971
|
+
result["forgettable_count"] = len(forgettable)
|
|
1972
|
+
|
|
1973
|
+
return result
|
|
1974
|
+
|
|
1975
|
+
except Exception as e:
|
|
1976
|
+
logger.exception(f"Error in alma_get_weak_memories: {e}")
|
|
1977
|
+
return {"success": False, "error": str(e)}
|
|
1978
|
+
|
|
1979
|
+
|
|
1980
|
+
def alma_smart_forget(
|
|
1981
|
+
alma: ALMA,
|
|
1982
|
+
project_id: Optional[str] = None,
|
|
1983
|
+
agent: Optional[str] = None,
|
|
1984
|
+
threshold: float = 0.1,
|
|
1985
|
+
dry_run: bool = True,
|
|
1986
|
+
) -> Dict[str, Any]:
|
|
1987
|
+
"""
|
|
1988
|
+
Trigger intelligent forgetting of weak memories.
|
|
1989
|
+
|
|
1990
|
+
Memories below the strength threshold are:
|
|
1991
|
+
1. Archived (preserved for recovery if needed)
|
|
1992
|
+
2. Removed from active memory
|
|
1993
|
+
|
|
1994
|
+
Use this for periodic cleanup or when memory gets noisy.
|
|
1995
|
+
Archives are kept for compliance/recovery.
|
|
1996
|
+
|
|
1997
|
+
Args:
|
|
1998
|
+
alma: ALMA instance
|
|
1999
|
+
project_id: Project to clean (defaults to ALMA's project)
|
|
2000
|
+
agent: Specific agent to clean
|
|
2001
|
+
threshold: Strength threshold below which to forget (default 0.1)
|
|
2002
|
+
dry_run: If True, show what would be forgotten without doing it
|
|
2003
|
+
|
|
2004
|
+
Returns:
|
|
2005
|
+
Dict with forgotten memories or preview
|
|
2006
|
+
"""
|
|
2007
|
+
if not 0.0 <= threshold <= 1.0:
|
|
2008
|
+
return {
|
|
2009
|
+
"success": False,
|
|
2010
|
+
"error": "threshold must be between 0.0 and 1.0",
|
|
2011
|
+
}
|
|
2012
|
+
|
|
2013
|
+
try:
|
|
2014
|
+
from alma.learning.decay import DecayManager
|
|
2015
|
+
|
|
2016
|
+
decay_manager = DecayManager(alma.storage)
|
|
2017
|
+
pid = project_id or alma.project_id
|
|
2018
|
+
|
|
2019
|
+
result = decay_manager.smart_forget(
|
|
2020
|
+
project_id=pid,
|
|
2021
|
+
agent=agent,
|
|
2022
|
+
threshold=threshold,
|
|
2023
|
+
archive=True,
|
|
2024
|
+
dry_run=dry_run,
|
|
2025
|
+
)
|
|
2026
|
+
|
|
2027
|
+
if dry_run:
|
|
2028
|
+
return {
|
|
2029
|
+
"success": True,
|
|
2030
|
+
"dry_run": True,
|
|
2031
|
+
"would_forget": result.get("would_forget", []),
|
|
2032
|
+
"count": result.get("count", 0),
|
|
2033
|
+
"message": f"Would forget {result.get('count', 0)} memories. "
|
|
2034
|
+
"Run with dry_run=False to execute.",
|
|
2035
|
+
}
|
|
2036
|
+
|
|
2037
|
+
return {
|
|
2038
|
+
"success": True,
|
|
2039
|
+
"dry_run": False,
|
|
2040
|
+
"forgotten": result.get("forgotten", []),
|
|
2041
|
+
"archived": result.get("archived", []),
|
|
2042
|
+
"count": result.get("count", 0),
|
|
2043
|
+
"message": f"Forgot {result.get('count', 0)} memories (archived for recovery)",
|
|
2044
|
+
}
|
|
2045
|
+
|
|
2046
|
+
except Exception as e:
|
|
2047
|
+
logger.exception(f"Error in alma_smart_forget: {e}")
|
|
2048
|
+
return {"success": False, "error": str(e)}
|
|
2049
|
+
|
|
2050
|
+
|
|
2051
|
+
def alma_retrieve_verified(
|
|
2052
|
+
alma: ALMA,
|
|
2053
|
+
query: str,
|
|
2054
|
+
agent: str,
|
|
2055
|
+
project_id: Optional[str] = None,
|
|
2056
|
+
ground_truth: Optional[list] = None,
|
|
2057
|
+
cross_verify: bool = True,
|
|
2058
|
+
top_k: int = 5,
|
|
2059
|
+
) -> Dict[str, Any]:
|
|
2060
|
+
"""
|
|
2061
|
+
Retrieve memories with verification status.
|
|
2062
|
+
|
|
2063
|
+
Two-stage retrieval:
|
|
2064
|
+
1. Semantic search for candidates
|
|
2065
|
+
2. Verify each candidate
|
|
2066
|
+
|
|
2067
|
+
Results are categorized:
|
|
2068
|
+
- verified: Safe to use
|
|
2069
|
+
- uncertain: Use with caution
|
|
2070
|
+
- contradicted: Needs review (may be stale)
|
|
2071
|
+
|
|
2072
|
+
Optionally provide ground_truth sources for fact-checking.
|
|
2073
|
+
|
|
2074
|
+
Args:
|
|
2075
|
+
alma: ALMA instance
|
|
2076
|
+
query: Search query
|
|
2077
|
+
agent: Agent requesting retrieval
|
|
2078
|
+
project_id: Project context (defaults to ALMA's project)
|
|
2079
|
+
ground_truth: Optional list of authoritative source texts for verification
|
|
2080
|
+
cross_verify: Whether to cross-verify against other memories
|
|
2081
|
+
top_k: Number of results to return
|
|
2082
|
+
|
|
2083
|
+
Returns:
|
|
2084
|
+
Dict with categorized verification results
|
|
2085
|
+
"""
|
|
2086
|
+
if not query or not query.strip():
|
|
2087
|
+
return {"success": False, "error": "query cannot be empty"}
|
|
2088
|
+
if not agent or not agent.strip():
|
|
2089
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
2090
|
+
|
|
2091
|
+
try:
|
|
2092
|
+
from alma.retrieval.verification import VerifiedRetriever
|
|
2093
|
+
|
|
2094
|
+
pid = project_id or alma.project_id
|
|
2095
|
+
|
|
2096
|
+
retriever = VerifiedRetriever(
|
|
2097
|
+
retrieval_engine=alma.retrieval,
|
|
2098
|
+
llm_client=getattr(alma, "llm", None),
|
|
2099
|
+
)
|
|
2100
|
+
|
|
2101
|
+
results = retriever.retrieve_verified(
|
|
2102
|
+
query=query,
|
|
2103
|
+
agent=agent,
|
|
2104
|
+
project_id=pid,
|
|
2105
|
+
ground_truth_sources=ground_truth,
|
|
2106
|
+
cross_verify=cross_verify,
|
|
2107
|
+
top_k=top_k,
|
|
2108
|
+
)
|
|
2109
|
+
|
|
2110
|
+
def serialize_verified_memory(vm):
|
|
2111
|
+
"""Serialize a VerifiedMemory object."""
|
|
2112
|
+
memory_dict = {}
|
|
2113
|
+
memory = vm.memory
|
|
2114
|
+
# Handle different memory types
|
|
2115
|
+
if hasattr(memory, "to_dict"):
|
|
2116
|
+
memory_dict = memory.to_dict()
|
|
2117
|
+
elif hasattr(memory, "__dict__"):
|
|
2118
|
+
memory_dict = {
|
|
2119
|
+
k: v for k, v in memory.__dict__.items() if not k.startswith("_")
|
|
2120
|
+
}
|
|
2121
|
+
else:
|
|
2122
|
+
memory_dict = {"content": str(memory)}
|
|
2123
|
+
|
|
2124
|
+
return {
|
|
2125
|
+
"memory": memory_dict,
|
|
2126
|
+
"confidence": round(vm.verification.confidence, 3),
|
|
2127
|
+
"reason": vm.verification.reason,
|
|
2128
|
+
"retrieval_score": round(vm.retrieval_score, 3),
|
|
2129
|
+
}
|
|
2130
|
+
|
|
2131
|
+
return {
|
|
2132
|
+
"success": True,
|
|
2133
|
+
"verified": [serialize_verified_memory(vm) for vm in results.verified],
|
|
2134
|
+
"uncertain": [serialize_verified_memory(vm) for vm in results.uncertain],
|
|
2135
|
+
"contradicted": [
|
|
2136
|
+
{
|
|
2137
|
+
**serialize_verified_memory(vm),
|
|
2138
|
+
"contradiction": vm.verification.contradicting_source,
|
|
2139
|
+
}
|
|
2140
|
+
for vm in results.contradicted
|
|
2141
|
+
],
|
|
2142
|
+
"unverifiable": [
|
|
2143
|
+
serialize_verified_memory(vm) for vm in results.unverifiable
|
|
2144
|
+
],
|
|
2145
|
+
"summary": results.summary(),
|
|
2146
|
+
}
|
|
2147
|
+
|
|
2148
|
+
except Exception as e:
|
|
2149
|
+
logger.exception(f"Error in alma_retrieve_verified: {e}")
|
|
2150
|
+
return {"success": False, "error": str(e)}
|
|
2151
|
+
|
|
2152
|
+
|
|
2153
|
+
def alma_compress_and_learn(
|
|
2154
|
+
alma: ALMA,
|
|
2155
|
+
content: str,
|
|
2156
|
+
agent: str,
|
|
2157
|
+
memory_type: str = "outcome",
|
|
2158
|
+
compression_level: str = "medium",
|
|
2159
|
+
project_id: Optional[str] = None,
|
|
2160
|
+
task_type: Optional[str] = None,
|
|
2161
|
+
) -> Dict[str, Any]:
|
|
2162
|
+
"""
|
|
2163
|
+
Compress verbose content and store as memory.
|
|
2164
|
+
|
|
2165
|
+
Extracts key facts, constraints, and patterns from lengthy content.
|
|
2166
|
+
Achieves 3-5x compression while preserving essential information.
|
|
2167
|
+
|
|
2168
|
+
Use this instead of alma_learn when storing verbose task outcomes
|
|
2169
|
+
or long documents.
|
|
2170
|
+
|
|
2171
|
+
Args:
|
|
2172
|
+
alma: ALMA instance
|
|
2173
|
+
content: Verbose content to compress and store
|
|
2174
|
+
agent: Agent storing the memory
|
|
2175
|
+
memory_type: Type of memory (outcome, heuristic, knowledge)
|
|
2176
|
+
compression_level: Compression level (light, medium, aggressive)
|
|
2177
|
+
project_id: Project context
|
|
2178
|
+
task_type: Optional task type categorization
|
|
2179
|
+
|
|
2180
|
+
Returns:
|
|
2181
|
+
Dict with memory ID and compression stats
|
|
2182
|
+
"""
|
|
2183
|
+
if not content or not content.strip():
|
|
2184
|
+
return {"success": False, "error": "content cannot be empty"}
|
|
2185
|
+
if not agent or not agent.strip():
|
|
2186
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
2187
|
+
|
|
2188
|
+
valid_levels = ["light", "medium", "aggressive"]
|
|
2189
|
+
if compression_level not in valid_levels:
|
|
2190
|
+
return {
|
|
2191
|
+
"success": False,
|
|
2192
|
+
"error": f"compression_level must be one of: {', '.join(valid_levels)}",
|
|
2193
|
+
}
|
|
2194
|
+
|
|
2195
|
+
valid_types = ["outcome", "heuristic", "knowledge"]
|
|
2196
|
+
if memory_type not in valid_types:
|
|
2197
|
+
return {
|
|
2198
|
+
"success": False,
|
|
2199
|
+
"error": f"memory_type must be one of: {', '.join(valid_types)}",
|
|
2200
|
+
}
|
|
2201
|
+
|
|
2202
|
+
try:
|
|
2203
|
+
from alma.compression.pipeline import CompressionLevel, MemoryCompressor
|
|
2204
|
+
|
|
2205
|
+
pid = project_id or alma.project_id
|
|
2206
|
+
level = CompressionLevel(compression_level)
|
|
2207
|
+
|
|
2208
|
+
# Create compressor with optional LLM
|
|
2209
|
+
compressor = MemoryCompressor(llm_client=getattr(alma, "llm", None))
|
|
2210
|
+
|
|
2211
|
+
# Compress the content
|
|
2212
|
+
compressed = compressor.compress_outcome(content, level)
|
|
2213
|
+
|
|
2214
|
+
# Store based on memory type
|
|
2215
|
+
now = datetime.now(timezone.utc)
|
|
2216
|
+
metadata = compressed.to_metadata()
|
|
2217
|
+
|
|
2218
|
+
if memory_type == "outcome":
|
|
2219
|
+
from alma.types import Outcome
|
|
2220
|
+
|
|
2221
|
+
outcome = Outcome(
|
|
2222
|
+
id=f"out-compressed-{now.strftime('%Y%m%d%H%M%S')}",
|
|
2223
|
+
agent=agent,
|
|
2224
|
+
project_id=pid,
|
|
2225
|
+
task_type=task_type or "compressed",
|
|
2226
|
+
task_description=compressed.summary,
|
|
2227
|
+
success=True,
|
|
2228
|
+
strategy_used="compressed learning",
|
|
2229
|
+
timestamp=now,
|
|
2230
|
+
metadata=metadata,
|
|
2231
|
+
)
|
|
2232
|
+
alma.storage.save_outcome(outcome)
|
|
2233
|
+
memory_id = outcome.id
|
|
2234
|
+
|
|
2235
|
+
elif memory_type == "heuristic":
|
|
2236
|
+
from alma.types import Heuristic
|
|
2237
|
+
|
|
2238
|
+
# Extract pattern from key facts if available
|
|
2239
|
+
condition = compressed.key_facts[0] if compressed.key_facts else "general"
|
|
2240
|
+
strategy = compressed.summary
|
|
2241
|
+
|
|
2242
|
+
heuristic = Heuristic(
|
|
2243
|
+
id=f"heur-compressed-{now.strftime('%Y%m%d%H%M%S')}",
|
|
2244
|
+
agent=agent,
|
|
2245
|
+
project_id=pid,
|
|
2246
|
+
condition=condition,
|
|
2247
|
+
strategy=strategy,
|
|
2248
|
+
confidence=0.7,
|
|
2249
|
+
occurrence_count=1,
|
|
2250
|
+
success_count=1,
|
|
2251
|
+
last_validated=now,
|
|
2252
|
+
created_at=now,
|
|
2253
|
+
metadata=metadata,
|
|
2254
|
+
)
|
|
2255
|
+
alma.storage.save_heuristic(heuristic)
|
|
2256
|
+
memory_id = heuristic.id
|
|
2257
|
+
|
|
2258
|
+
else: # knowledge
|
|
2259
|
+
from alma.types import DomainKnowledge
|
|
2260
|
+
|
|
2261
|
+
knowledge = DomainKnowledge(
|
|
2262
|
+
id=f"dk-compressed-{now.strftime('%Y%m%d%H%M%S')}",
|
|
2263
|
+
agent=agent,
|
|
2264
|
+
project_id=pid,
|
|
2265
|
+
domain=task_type or "general",
|
|
2266
|
+
fact=compressed.summary,
|
|
2267
|
+
source="compressed_learning",
|
|
2268
|
+
confidence=0.8,
|
|
2269
|
+
last_verified=now,
|
|
2270
|
+
metadata=metadata,
|
|
2271
|
+
)
|
|
2272
|
+
alma.storage.save_domain_knowledge(knowledge)
|
|
2273
|
+
memory_id = knowledge.id
|
|
2274
|
+
|
|
2275
|
+
return {
|
|
2276
|
+
"success": True,
|
|
2277
|
+
"memory_id": memory_id,
|
|
2278
|
+
"memory_type": memory_type,
|
|
2279
|
+
"compression_ratio": round(compressed.compression_ratio, 2),
|
|
2280
|
+
"original_length": compressed.original_length,
|
|
2281
|
+
"compressed_length": compressed.compressed_length,
|
|
2282
|
+
"key_facts": compressed.key_facts,
|
|
2283
|
+
"constraints": compressed.constraints,
|
|
2284
|
+
"summary_preview": (
|
|
2285
|
+
compressed.summary[:200] + "..."
|
|
2286
|
+
if len(compressed.summary) > 200
|
|
2287
|
+
else compressed.summary
|
|
2288
|
+
),
|
|
2289
|
+
}
|
|
2290
|
+
|
|
2291
|
+
except Exception as e:
|
|
2292
|
+
logger.exception(f"Error in alma_compress_and_learn: {e}")
|
|
2293
|
+
return {"success": False, "error": str(e)}
|
|
2294
|
+
|
|
2295
|
+
|
|
2296
|
+
def alma_extract_heuristic(
|
|
2297
|
+
alma: ALMA,
|
|
2298
|
+
experiences: list,
|
|
2299
|
+
agent: str,
|
|
2300
|
+
project_id: Optional[str] = None,
|
|
2301
|
+
auto_save: bool = True,
|
|
2302
|
+
) -> Dict[str, Any]:
|
|
2303
|
+
"""
|
|
2304
|
+
Extract a general rule from multiple similar experiences.
|
|
2305
|
+
|
|
2306
|
+
Provide 3+ similar experiences and this tool will identify patterns
|
|
2307
|
+
and create a reusable heuristic rule.
|
|
2308
|
+
|
|
2309
|
+
Example: Pass 3 debugging experiences and get:
|
|
2310
|
+
"When tests fail with timeout errors, check for async operations
|
|
2311
|
+
that aren't being awaited."
|
|
2312
|
+
|
|
2313
|
+
Returns null if no clear pattern found.
|
|
2314
|
+
|
|
2315
|
+
Args:
|
|
2316
|
+
alma: ALMA instance
|
|
2317
|
+
experiences: List of 3+ similar experience descriptions
|
|
2318
|
+
agent: Agent to attribute the heuristic to
|
|
2319
|
+
project_id: Project context
|
|
2320
|
+
auto_save: If True, automatically save extracted heuristic
|
|
2321
|
+
|
|
2322
|
+
Returns:
|
|
2323
|
+
Dict with extracted heuristic or null if no pattern found
|
|
2324
|
+
"""
|
|
2325
|
+
if not experiences or len(experiences) < 3:
|
|
2326
|
+
return {
|
|
2327
|
+
"success": False,
|
|
2328
|
+
"error": "Need at least 3 experiences to extract a pattern",
|
|
2329
|
+
"provided": len(experiences) if experiences else 0,
|
|
2330
|
+
}
|
|
2331
|
+
if not agent or not agent.strip():
|
|
2332
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
2333
|
+
|
|
2334
|
+
try:
|
|
2335
|
+
from alma.compression.pipeline import MemoryCompressor
|
|
2336
|
+
|
|
2337
|
+
pid = project_id or alma.project_id
|
|
2338
|
+
|
|
2339
|
+
compressor = MemoryCompressor(llm_client=getattr(alma, "llm", None))
|
|
2340
|
+
heuristic_text = compressor.extract_heuristic(experiences)
|
|
2341
|
+
|
|
2342
|
+
if not heuristic_text:
|
|
2343
|
+
return {
|
|
2344
|
+
"success": True,
|
|
2345
|
+
"heuristic": None,
|
|
2346
|
+
"message": "No clear pattern found in these experiences",
|
|
2347
|
+
}
|
|
2348
|
+
|
|
2349
|
+
result: Dict[str, Any] = {
|
|
2350
|
+
"success": True,
|
|
2351
|
+
"heuristic": heuristic_text,
|
|
2352
|
+
"source_count": len(experiences),
|
|
2353
|
+
}
|
|
2354
|
+
|
|
2355
|
+
if auto_save:
|
|
2356
|
+
from alma.types import Heuristic
|
|
2357
|
+
|
|
2358
|
+
now = datetime.now(timezone.utc)
|
|
2359
|
+
heuristic = Heuristic(
|
|
2360
|
+
id=f"heur-extracted-{now.strftime('%Y%m%d%H%M%S')}",
|
|
2361
|
+
agent=agent,
|
|
2362
|
+
project_id=pid,
|
|
2363
|
+
condition="extracted from experiences",
|
|
2364
|
+
strategy=heuristic_text,
|
|
2365
|
+
confidence=0.7,
|
|
2366
|
+
occurrence_count=len(experiences),
|
|
2367
|
+
success_count=len(experiences),
|
|
2368
|
+
last_validated=now,
|
|
2369
|
+
created_at=now,
|
|
2370
|
+
metadata={"extracted_from_count": len(experiences)},
|
|
2371
|
+
)
|
|
2372
|
+
alma.storage.save_heuristic(heuristic)
|
|
2373
|
+
result["saved"] = True
|
|
2374
|
+
result["memory_id"] = heuristic.id
|
|
2375
|
+
else:
|
|
2376
|
+
result["saved"] = False
|
|
2377
|
+
|
|
2378
|
+
return result
|
|
2379
|
+
|
|
2380
|
+
except Exception as e:
|
|
2381
|
+
logger.exception(f"Error in alma_extract_heuristic: {e}")
|
|
2382
|
+
return {"success": False, "error": str(e)}
|
|
2383
|
+
|
|
2384
|
+
|
|
2385
|
+
# =============================================================================
|
|
2386
|
+
# ASYNC MEMORY WALL ENHANCEMENT TOOLS
|
|
2387
|
+
# =============================================================================
|
|
2388
|
+
|
|
2389
|
+
|
|
2390
|
+
async def async_alma_reinforce(
|
|
2391
|
+
alma: ALMA,
|
|
2392
|
+
memory_id: str,
|
|
2393
|
+
memory_type: str = "unknown",
|
|
2394
|
+
) -> Dict[str, Any]:
|
|
2395
|
+
"""Async version of alma_reinforce."""
|
|
2396
|
+
import asyncio
|
|
2397
|
+
|
|
2398
|
+
loop = asyncio.get_event_loop()
|
|
2399
|
+
return await loop.run_in_executor(
|
|
2400
|
+
None,
|
|
2401
|
+
lambda: alma_reinforce(alma, memory_id, memory_type),
|
|
2402
|
+
)
|
|
2403
|
+
|
|
2404
|
+
|
|
2405
|
+
async def async_alma_get_weak_memories(
|
|
2406
|
+
alma: ALMA,
|
|
2407
|
+
project_id: Optional[str] = None,
|
|
2408
|
+
agent: Optional[str] = None,
|
|
2409
|
+
include_forgettable: bool = False,
|
|
2410
|
+
) -> Dict[str, Any]:
|
|
2411
|
+
"""Async version of alma_get_weak_memories."""
|
|
2412
|
+
import asyncio
|
|
2413
|
+
|
|
2414
|
+
loop = asyncio.get_event_loop()
|
|
2415
|
+
return await loop.run_in_executor(
|
|
2416
|
+
None,
|
|
2417
|
+
lambda: alma_get_weak_memories(alma, project_id, agent, include_forgettable),
|
|
2418
|
+
)
|
|
2419
|
+
|
|
2420
|
+
|
|
2421
|
+
async def async_alma_smart_forget(
|
|
2422
|
+
alma: ALMA,
|
|
2423
|
+
project_id: Optional[str] = None,
|
|
2424
|
+
agent: Optional[str] = None,
|
|
2425
|
+
threshold: float = 0.1,
|
|
2426
|
+
dry_run: bool = True,
|
|
2427
|
+
) -> Dict[str, Any]:
|
|
2428
|
+
"""Async version of alma_smart_forget."""
|
|
2429
|
+
import asyncio
|
|
2430
|
+
|
|
2431
|
+
loop = asyncio.get_event_loop()
|
|
2432
|
+
return await loop.run_in_executor(
|
|
2433
|
+
None,
|
|
2434
|
+
lambda: alma_smart_forget(alma, project_id, agent, threshold, dry_run),
|
|
2435
|
+
)
|
|
2436
|
+
|
|
2437
|
+
|
|
2438
|
+
async def async_alma_retrieve_verified(
|
|
2439
|
+
alma: ALMA,
|
|
2440
|
+
query: str,
|
|
2441
|
+
agent: str,
|
|
2442
|
+
project_id: Optional[str] = None,
|
|
2443
|
+
ground_truth: Optional[list] = None,
|
|
2444
|
+
cross_verify: bool = True,
|
|
2445
|
+
top_k: int = 5,
|
|
2446
|
+
) -> Dict[str, Any]:
|
|
2447
|
+
"""Async version of alma_retrieve_verified."""
|
|
2448
|
+
import asyncio
|
|
2449
|
+
|
|
2450
|
+
loop = asyncio.get_event_loop()
|
|
2451
|
+
return await loop.run_in_executor(
|
|
2452
|
+
None,
|
|
2453
|
+
lambda: alma_retrieve_verified(
|
|
2454
|
+
alma, query, agent, project_id, ground_truth, cross_verify, top_k
|
|
2455
|
+
),
|
|
2456
|
+
)
|
|
2457
|
+
|
|
2458
|
+
|
|
2459
|
+
async def async_alma_compress_and_learn(
|
|
2460
|
+
alma: ALMA,
|
|
2461
|
+
content: str,
|
|
2462
|
+
agent: str,
|
|
2463
|
+
memory_type: str = "outcome",
|
|
2464
|
+
compression_level: str = "medium",
|
|
2465
|
+
project_id: Optional[str] = None,
|
|
2466
|
+
task_type: Optional[str] = None,
|
|
2467
|
+
) -> Dict[str, Any]:
|
|
2468
|
+
"""Async version of alma_compress_and_learn."""
|
|
2469
|
+
import asyncio
|
|
2470
|
+
|
|
2471
|
+
loop = asyncio.get_event_loop()
|
|
2472
|
+
return await loop.run_in_executor(
|
|
2473
|
+
None,
|
|
2474
|
+
lambda: alma_compress_and_learn(
|
|
2475
|
+
alma, content, agent, memory_type, compression_level, project_id, task_type
|
|
2476
|
+
),
|
|
2477
|
+
)
|
|
2478
|
+
|
|
2479
|
+
|
|
2480
|
+
async def async_alma_extract_heuristic(
|
|
2481
|
+
alma: ALMA,
|
|
2482
|
+
experiences: list,
|
|
2483
|
+
agent: str,
|
|
2484
|
+
project_id: Optional[str] = None,
|
|
2485
|
+
auto_save: bool = True,
|
|
2486
|
+
) -> Dict[str, Any]:
|
|
2487
|
+
"""Async version of alma_extract_heuristic."""
|
|
2488
|
+
import asyncio
|
|
2489
|
+
|
|
2490
|
+
loop = asyncio.get_event_loop()
|
|
2491
|
+
return await loop.run_in_executor(
|
|
2492
|
+
None,
|
|
2493
|
+
lambda: alma_extract_heuristic(alma, experiences, agent, project_id, auto_save),
|
|
2494
|
+
)
|
|
2495
|
+
|
|
2496
|
+
|
|
2497
|
+
# =============================================================================
|
|
2498
|
+
# TRUST-INTEGRATED RETRIEVAL TOOLS (v0.8.0)
|
|
2499
|
+
# =============================================================================
|
|
2500
|
+
#
|
|
2501
|
+
# Tools for trust-aware scoring, token budget management, and progressive
|
|
2502
|
+
# disclosure for context engineering.
|
|
2503
|
+
|
|
2504
|
+
|
|
2505
|
+
def alma_retrieve_with_trust(
|
|
2506
|
+
alma: ALMA,
|
|
2507
|
+
query: str,
|
|
2508
|
+
agent: str,
|
|
2509
|
+
requesting_agent_id: str,
|
|
2510
|
+
requesting_agent_trust: float = 0.5,
|
|
2511
|
+
trust_behaviors: Optional[Dict[str, float]] = None,
|
|
2512
|
+
user_id: Optional[str] = None,
|
|
2513
|
+
top_k: int = 5,
|
|
2514
|
+
) -> Dict[str, Any]:
|
|
2515
|
+
"""
|
|
2516
|
+
Retrieve memories with trust-integrated scoring.
|
|
2517
|
+
|
|
2518
|
+
Adjusts retrieval based on the requesting agent's trust profile.
|
|
2519
|
+
Higher trust agents get access to more sensitive memories and
|
|
2520
|
+
higher scores for high-confidence heuristics.
|
|
2521
|
+
|
|
2522
|
+
Anti-patterns and warnings are prioritized for lower-trust agents.
|
|
2523
|
+
|
|
2524
|
+
Args:
|
|
2525
|
+
alma: ALMA instance
|
|
2526
|
+
query: Search query
|
|
2527
|
+
agent: Agent whose memories to search
|
|
2528
|
+
requesting_agent_id: ID of agent making the request
|
|
2529
|
+
requesting_agent_trust: Trust score (0.0-1.0) of requesting agent
|
|
2530
|
+
trust_behaviors: Optional per-behavior trust scores
|
|
2531
|
+
user_id: Optional user ID for preferences
|
|
2532
|
+
top_k: Number of results per type
|
|
2533
|
+
|
|
2534
|
+
Returns:
|
|
2535
|
+
Dict with trust-scored memories and trust context
|
|
2536
|
+
"""
|
|
2537
|
+
if not query or not query.strip():
|
|
2538
|
+
return {"success": False, "error": "query cannot be empty"}
|
|
2539
|
+
if not agent or not agent.strip():
|
|
2540
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
2541
|
+
if not 0.0 <= requesting_agent_trust <= 1.0:
|
|
2542
|
+
return {"success": False, "error": "requesting_agent_trust must be 0.0-1.0"}
|
|
2543
|
+
|
|
2544
|
+
try:
|
|
2545
|
+
from alma.retrieval.trust_scoring import AgentTrustContext, TrustAwareScorer
|
|
2546
|
+
|
|
2547
|
+
# Build trust context
|
|
2548
|
+
trust_context = AgentTrustContext(
|
|
2549
|
+
agent_id=requesting_agent_id,
|
|
2550
|
+
trust_score=requesting_agent_trust,
|
|
2551
|
+
trust_behaviors=trust_behaviors or {},
|
|
2552
|
+
)
|
|
2553
|
+
|
|
2554
|
+
# Create trust-aware scorer
|
|
2555
|
+
scorer = TrustAwareScorer(embedder=alma.retrieval.embedder)
|
|
2556
|
+
|
|
2557
|
+
# Get base memories
|
|
2558
|
+
memories = alma.retrieve(
|
|
2559
|
+
task=query,
|
|
2560
|
+
agent=agent,
|
|
2561
|
+
user_id=user_id,
|
|
2562
|
+
top_k=top_k * 2, # Get more, then filter by trust
|
|
2563
|
+
)
|
|
2564
|
+
|
|
2565
|
+
# Apply trust scoring to heuristics
|
|
2566
|
+
if memories.heuristics:
|
|
2567
|
+
query_embedding = scorer.embedder.embed(query)
|
|
2568
|
+
heuristic_embeddings = [
|
|
2569
|
+
scorer.embedder.embed(h.condition + " " + h.strategy)
|
|
2570
|
+
for h in memories.heuristics
|
|
2571
|
+
]
|
|
2572
|
+
similarities = [
|
|
2573
|
+
scorer._cosine_similarity(query_embedding, e)
|
|
2574
|
+
for e in heuristic_embeddings
|
|
2575
|
+
]
|
|
2576
|
+
|
|
2577
|
+
trust_scored = scorer.score_heuristics_with_trust(
|
|
2578
|
+
memories.heuristics, similarities, trust_context
|
|
2579
|
+
)
|
|
2580
|
+
|
|
2581
|
+
# Sort by trust-adjusted score and limit
|
|
2582
|
+
trust_scored.sort(key=lambda x: x.trust_adjusted_score, reverse=True)
|
|
2583
|
+
top_heuristics = trust_scored[:top_k]
|
|
2584
|
+
|
|
2585
|
+
# Extract trust info
|
|
2586
|
+
heuristic_results = [
|
|
2587
|
+
{
|
|
2588
|
+
"id": ts.item.id,
|
|
2589
|
+
"condition": ts.item.condition,
|
|
2590
|
+
"strategy": ts.item.strategy,
|
|
2591
|
+
"confidence": ts.item.confidence,
|
|
2592
|
+
"base_score": round(ts.base_score, 3),
|
|
2593
|
+
"trust_adjusted_score": round(ts.trust_adjusted_score, 3),
|
|
2594
|
+
"trust_factor": round(ts.trust_factor, 3),
|
|
2595
|
+
"trust_explanation": ts.trust_explanation,
|
|
2596
|
+
}
|
|
2597
|
+
for ts in top_heuristics
|
|
2598
|
+
]
|
|
2599
|
+
else:
|
|
2600
|
+
heuristic_results = []
|
|
2601
|
+
|
|
2602
|
+
# Apply trust scoring to anti-patterns (prioritize for low-trust)
|
|
2603
|
+
if memories.anti_patterns:
|
|
2604
|
+
ap_embeddings = [
|
|
2605
|
+
scorer.embedder.embed(ap.pattern + " " + ap.why_bad)
|
|
2606
|
+
for ap in memories.anti_patterns
|
|
2607
|
+
]
|
|
2608
|
+
ap_similarities = [
|
|
2609
|
+
scorer._cosine_similarity(query_embedding, e) for e in ap_embeddings
|
|
2610
|
+
]
|
|
2611
|
+
|
|
2612
|
+
trust_scored_ap = scorer.score_anti_patterns_with_trust(
|
|
2613
|
+
memories.anti_patterns, ap_similarities
|
|
2614
|
+
)
|
|
2615
|
+
trust_scored_ap.sort(key=lambda x: x.trust_adjusted_score, reverse=True)
|
|
2616
|
+
top_anti_patterns = trust_scored_ap[:top_k]
|
|
2617
|
+
|
|
2618
|
+
anti_pattern_results = [
|
|
2619
|
+
{
|
|
2620
|
+
"id": ts.item.id,
|
|
2621
|
+
"pattern": ts.item.pattern,
|
|
2622
|
+
"why_bad": ts.item.why_bad,
|
|
2623
|
+
"better_alternative": ts.item.better_alternative,
|
|
2624
|
+
"base_score": round(ts.base_score, 3),
|
|
2625
|
+
"trust_adjusted_score": round(ts.trust_adjusted_score, 3),
|
|
2626
|
+
}
|
|
2627
|
+
for ts in top_anti_patterns
|
|
2628
|
+
]
|
|
2629
|
+
else:
|
|
2630
|
+
anti_pattern_results = []
|
|
2631
|
+
|
|
2632
|
+
# Trust-based retrieval summary
|
|
2633
|
+
trust_level = (
|
|
2634
|
+
"HIGH"
|
|
2635
|
+
if requesting_agent_trust >= 0.7
|
|
2636
|
+
else "MODERATE"
|
|
2637
|
+
if requesting_agent_trust >= 0.5
|
|
2638
|
+
else "LOW"
|
|
2639
|
+
)
|
|
2640
|
+
|
|
2641
|
+
return {
|
|
2642
|
+
"success": True,
|
|
2643
|
+
"heuristics": heuristic_results,
|
|
2644
|
+
"anti_patterns": anti_pattern_results,
|
|
2645
|
+
"outcomes": _serialize_memory_slice(memories).get("outcomes", [])[:top_k],
|
|
2646
|
+
"trust_context": {
|
|
2647
|
+
"requesting_agent": requesting_agent_id,
|
|
2648
|
+
"trust_score": requesting_agent_trust,
|
|
2649
|
+
"trust_level": trust_level,
|
|
2650
|
+
"anti_patterns_prioritized": requesting_agent_trust < 0.5,
|
|
2651
|
+
},
|
|
2652
|
+
"retrieval_time_ms": memories.retrieval_time_ms,
|
|
2653
|
+
}
|
|
2654
|
+
|
|
2655
|
+
except Exception as e:
|
|
2656
|
+
logger.exception(f"Error in alma_retrieve_with_trust: {e}")
|
|
2657
|
+
return {"success": False, "error": str(e)}
|
|
2658
|
+
|
|
2659
|
+
|
|
2660
|
+
def alma_retrieve_with_budget(
|
|
2661
|
+
alma: ALMA,
|
|
2662
|
+
query: str,
|
|
2663
|
+
agent: str,
|
|
2664
|
+
max_tokens: int = 4000,
|
|
2665
|
+
must_see_types: Optional[list] = None,
|
|
2666
|
+
should_see_types: Optional[list] = None,
|
|
2667
|
+
user_id: Optional[str] = None,
|
|
2668
|
+
top_k: int = 10,
|
|
2669
|
+
) -> Dict[str, Any]:
|
|
2670
|
+
"""
|
|
2671
|
+
Retrieve memories within a token budget.
|
|
2672
|
+
|
|
2673
|
+
Prioritizes memories based on token budget constraints:
|
|
2674
|
+
- MUST_SEE: Always included (anti-patterns, critical warnings)
|
|
2675
|
+
- SHOULD_SEE: Included if budget allows (heuristics, outcomes)
|
|
2676
|
+
- FETCH_ON_DEMAND: Summaries only, full content on request
|
|
2677
|
+
- EXCLUDE: Not included in this retrieval
|
|
2678
|
+
|
|
2679
|
+
Returns a budget report showing what was included/excluded.
|
|
2680
|
+
|
|
2681
|
+
Args:
|
|
2682
|
+
alma: ALMA instance
|
|
2683
|
+
query: Search query
|
|
2684
|
+
agent: Agent whose memories to search
|
|
2685
|
+
max_tokens: Maximum token budget for context
|
|
2686
|
+
must_see_types: Memory types that must be included
|
|
2687
|
+
should_see_types: Memory types to include if space allows
|
|
2688
|
+
user_id: Optional user ID for preferences
|
|
2689
|
+
top_k: Maximum items per type before budget filtering
|
|
2690
|
+
|
|
2691
|
+
Returns:
|
|
2692
|
+
Dict with budgeted memories and allocation report
|
|
2693
|
+
"""
|
|
2694
|
+
if not query or not query.strip():
|
|
2695
|
+
return {"success": False, "error": "query cannot be empty"}
|
|
2696
|
+
if not agent or not agent.strip():
|
|
2697
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
2698
|
+
if max_tokens < 100:
|
|
2699
|
+
return {"success": False, "error": "max_tokens must be at least 100"}
|
|
2700
|
+
|
|
2701
|
+
try:
|
|
2702
|
+
from alma.retrieval.budget import PriorityTier, RetrievalBudget
|
|
2703
|
+
|
|
2704
|
+
# Get all memories
|
|
2705
|
+
memories = alma.retrieve(
|
|
2706
|
+
task=query,
|
|
2707
|
+
agent=agent,
|
|
2708
|
+
user_id=user_id,
|
|
2709
|
+
top_k=top_k,
|
|
2710
|
+
)
|
|
2711
|
+
|
|
2712
|
+
# Create budget manager
|
|
2713
|
+
budget = RetrievalBudget(max_tokens=max_tokens)
|
|
2714
|
+
|
|
2715
|
+
# Default priority mapping
|
|
2716
|
+
default_must_see = must_see_types or ["anti_patterns"]
|
|
2717
|
+
default_should_see = should_see_types or ["heuristics", "outcomes"]
|
|
2718
|
+
|
|
2719
|
+
type_priorities = {}
|
|
2720
|
+
for t in default_must_see:
|
|
2721
|
+
type_priorities[t] = PriorityTier.MUST_SEE
|
|
2722
|
+
for t in default_should_see:
|
|
2723
|
+
type_priorities[t] = PriorityTier.SHOULD_SEE
|
|
2724
|
+
# Domain knowledge and preferences default to FETCH_ON_DEMAND
|
|
2725
|
+
type_priorities.setdefault("domain_knowledge", PriorityTier.FETCH_ON_DEMAND)
|
|
2726
|
+
type_priorities.setdefault("preferences", PriorityTier.FETCH_ON_DEMAND)
|
|
2727
|
+
|
|
2728
|
+
# Apply budget
|
|
2729
|
+
budgeted_slice, report = budget.apply_budget(memories, type_priorities)
|
|
2730
|
+
|
|
2731
|
+
# Serialize budgeted results
|
|
2732
|
+
result = _serialize_memory_slice(budgeted_slice)
|
|
2733
|
+
|
|
2734
|
+
return {
|
|
2735
|
+
"success": True,
|
|
2736
|
+
"memories": result,
|
|
2737
|
+
"prompt_injection": budgeted_slice.to_prompt(),
|
|
2738
|
+
"budget_report": {
|
|
2739
|
+
"max_tokens": max_tokens,
|
|
2740
|
+
"tokens_used": report.tokens_used,
|
|
2741
|
+
"tokens_remaining": report.tokens_remaining,
|
|
2742
|
+
"utilization": round(report.utilization, 3),
|
|
2743
|
+
"included_count": report.included_count,
|
|
2744
|
+
"excluded_count": report.excluded_count,
|
|
2745
|
+
"by_priority": {
|
|
2746
|
+
tier.value: {
|
|
2747
|
+
"included": count,
|
|
2748
|
+
"tokens": report.tokens_by_priority.get(tier, 0),
|
|
2749
|
+
}
|
|
2750
|
+
for tier, count in report.included_by_priority.items()
|
|
2751
|
+
},
|
|
2752
|
+
"excluded_items": [
|
|
2753
|
+
{
|
|
2754
|
+
"type": item.memory_type,
|
|
2755
|
+
"id": item.item.id,
|
|
2756
|
+
"priority": item.priority.value,
|
|
2757
|
+
}
|
|
2758
|
+
for item in report.excluded_items[:5] # Show first 5 excluded
|
|
2759
|
+
],
|
|
2760
|
+
},
|
|
2761
|
+
}
|
|
2762
|
+
|
|
2763
|
+
except Exception as e:
|
|
2764
|
+
logger.exception(f"Error in alma_retrieve_with_budget: {e}")
|
|
2765
|
+
return {"success": False, "error": str(e)}
|
|
2766
|
+
|
|
2767
|
+
|
|
2768
|
+
def alma_retrieve_progressive(
|
|
2769
|
+
alma: ALMA,
|
|
2770
|
+
query: str,
|
|
2771
|
+
agent: str,
|
|
2772
|
+
disclosure_level: str = "summary",
|
|
2773
|
+
max_summaries: int = 10,
|
|
2774
|
+
user_id: Optional[str] = None,
|
|
2775
|
+
) -> Dict[str, Any]:
|
|
2776
|
+
"""
|
|
2777
|
+
Retrieve memory summaries with lazy-loading of full content.
|
|
2778
|
+
|
|
2779
|
+
Returns compact summaries first, with IDs to fetch full details
|
|
2780
|
+
on demand. Implements progressive disclosure for context efficiency.
|
|
2781
|
+
|
|
2782
|
+
Disclosure levels:
|
|
2783
|
+
- REFERENCE: Just IDs and one-line descriptions
|
|
2784
|
+
- SUMMARY: Brief summaries with key info
|
|
2785
|
+
- KEY_DETAILS: Important details without full content
|
|
2786
|
+
- FULL: Complete memory content
|
|
2787
|
+
|
|
2788
|
+
Args:
|
|
2789
|
+
alma: ALMA instance
|
|
2790
|
+
query: Search query
|
|
2791
|
+
agent: Agent whose memories to search
|
|
2792
|
+
disclosure_level: Level of detail (reference, summary, key_details, full)
|
|
2793
|
+
max_summaries: Maximum number of summaries to return
|
|
2794
|
+
user_id: Optional user ID for preferences
|
|
2795
|
+
|
|
2796
|
+
Returns:
|
|
2797
|
+
Dict with memory summaries and fetch instructions
|
|
2798
|
+
"""
|
|
2799
|
+
if not query or not query.strip():
|
|
2800
|
+
return {"success": False, "error": "query cannot be empty"}
|
|
2801
|
+
if not agent or not agent.strip():
|
|
2802
|
+
return {"success": False, "error": "agent cannot be empty"}
|
|
2803
|
+
|
|
2804
|
+
valid_levels = ["reference", "summary", "key_details", "full"]
|
|
2805
|
+
if disclosure_level not in valid_levels:
|
|
2806
|
+
return {
|
|
2807
|
+
"success": False,
|
|
2808
|
+
"error": f"disclosure_level must be one of: {', '.join(valid_levels)}",
|
|
2809
|
+
}
|
|
2810
|
+
|
|
2811
|
+
try:
|
|
2812
|
+
from alma.retrieval.progressive import DisclosureLevel, ProgressiveRetrieval
|
|
2813
|
+
|
|
2814
|
+
level = DisclosureLevel(disclosure_level)
|
|
2815
|
+
|
|
2816
|
+
# Create progressive retriever
|
|
2817
|
+
progressive = ProgressiveRetrieval(
|
|
2818
|
+
retrieval_engine=alma.retrieval,
|
|
2819
|
+
storage=alma.storage,
|
|
2820
|
+
)
|
|
2821
|
+
|
|
2822
|
+
# Get summaries
|
|
2823
|
+
progressive_slice = progressive.retrieve_summaries(
|
|
2824
|
+
query=query,
|
|
2825
|
+
agent=agent,
|
|
2826
|
+
project_id=alma.project_id,
|
|
2827
|
+
disclosure_level=level,
|
|
2828
|
+
max_items=max_summaries,
|
|
2829
|
+
user_id=user_id,
|
|
2830
|
+
scope=alma.scopes.get(agent),
|
|
2831
|
+
)
|
|
2832
|
+
|
|
2833
|
+
# Format for context
|
|
2834
|
+
context_text = progressive.format_summaries_for_context(
|
|
2835
|
+
progressive_slice,
|
|
2836
|
+
include_fetch_hint=True,
|
|
2837
|
+
)
|
|
2838
|
+
|
|
2839
|
+
# Build response
|
|
2840
|
+
summaries = []
|
|
2841
|
+
for summary in progressive_slice.summaries:
|
|
2842
|
+
summary_dict = {
|
|
2843
|
+
"memory_id": summary.memory_id,
|
|
2844
|
+
"memory_type": summary.memory_type,
|
|
2845
|
+
"one_liner": summary.one_liner,
|
|
2846
|
+
"relevance_score": round(summary.relevance_score, 3),
|
|
2847
|
+
}
|
|
2848
|
+
if level in [
|
|
2849
|
+
DisclosureLevel.SUMMARY,
|
|
2850
|
+
DisclosureLevel.KEY_DETAILS,
|
|
2851
|
+
DisclosureLevel.FULL,
|
|
2852
|
+
]:
|
|
2853
|
+
summary_dict["summary"] = summary.summary
|
|
2854
|
+
if level in [DisclosureLevel.KEY_DETAILS, DisclosureLevel.FULL]:
|
|
2855
|
+
summary_dict["key_details"] = summary.key_details
|
|
2856
|
+
summaries.append(summary_dict)
|
|
2857
|
+
|
|
2858
|
+
return {
|
|
2859
|
+
"success": True,
|
|
2860
|
+
"disclosure_level": disclosure_level,
|
|
2861
|
+
"summaries": summaries,
|
|
2862
|
+
"context_injection": context_text,
|
|
2863
|
+
"total_available": progressive_slice.total_available,
|
|
2864
|
+
"can_fetch_more": progressive_slice.total_available > len(summaries),
|
|
2865
|
+
"fetch_instructions": (
|
|
2866
|
+
"Use alma_get_memory_full to retrieve complete content for any memory_id"
|
|
2867
|
+
if level != "full"
|
|
2868
|
+
else None
|
|
2869
|
+
),
|
|
2870
|
+
}
|
|
2871
|
+
|
|
2872
|
+
except Exception as e:
|
|
2873
|
+
logger.exception(f"Error in alma_retrieve_progressive: {e}")
|
|
2874
|
+
return {"success": False, "error": str(e)}
|
|
2875
|
+
|
|
2876
|
+
|
|
2877
|
+
def alma_get_memory_full(
|
|
2878
|
+
alma: ALMA,
|
|
2879
|
+
memory_id: str,
|
|
2880
|
+
memory_type: str,
|
|
2881
|
+
) -> Dict[str, Any]:
|
|
2882
|
+
"""
|
|
2883
|
+
Get full content of a specific memory.
|
|
2884
|
+
|
|
2885
|
+
Use this to fetch complete details after progressive retrieval
|
|
2886
|
+
returns summaries. Supports lazy-loading pattern.
|
|
2887
|
+
|
|
2888
|
+
Args:
|
|
2889
|
+
alma: ALMA instance
|
|
2890
|
+
memory_id: ID of the memory to fetch
|
|
2891
|
+
memory_type: Type (heuristic, outcome, domain_knowledge, anti_pattern, preference)
|
|
2892
|
+
|
|
2893
|
+
Returns:
|
|
2894
|
+
Dict with full memory content
|
|
2895
|
+
"""
|
|
2896
|
+
if not memory_id or not memory_id.strip():
|
|
2897
|
+
return {"success": False, "error": "memory_id cannot be empty"}
|
|
2898
|
+
if not memory_type or not memory_type.strip():
|
|
2899
|
+
return {"success": False, "error": "memory_type cannot be empty"}
|
|
2900
|
+
|
|
2901
|
+
valid_types = [
|
|
2902
|
+
"heuristic",
|
|
2903
|
+
"outcome",
|
|
2904
|
+
"domain_knowledge",
|
|
2905
|
+
"anti_pattern",
|
|
2906
|
+
"preference",
|
|
2907
|
+
]
|
|
2908
|
+
if memory_type not in valid_types:
|
|
2909
|
+
return {
|
|
2910
|
+
"success": False,
|
|
2911
|
+
"error": f"memory_type must be one of: {', '.join(valid_types)}",
|
|
2912
|
+
}
|
|
2913
|
+
|
|
2914
|
+
try:
|
|
2915
|
+
from alma.retrieval.progressive import ProgressiveRetrieval
|
|
2916
|
+
|
|
2917
|
+
progressive = ProgressiveRetrieval(
|
|
2918
|
+
retrieval_engine=alma.retrieval,
|
|
2919
|
+
storage=alma.storage,
|
|
2920
|
+
)
|
|
2921
|
+
|
|
2922
|
+
memory = progressive.get_full_item(memory_id, memory_type)
|
|
2923
|
+
|
|
2924
|
+
if memory is None:
|
|
2925
|
+
return {
|
|
2926
|
+
"success": False,
|
|
2927
|
+
"error": f"Memory not found: {memory_id}",
|
|
2928
|
+
}
|
|
2929
|
+
|
|
2930
|
+
# Serialize based on type
|
|
2931
|
+
if memory_type == "heuristic":
|
|
2932
|
+
content = {
|
|
2933
|
+
"id": memory.id,
|
|
2934
|
+
"condition": memory.condition,
|
|
2935
|
+
"strategy": memory.strategy,
|
|
2936
|
+
"confidence": memory.confidence,
|
|
2937
|
+
"occurrence_count": memory.occurrence_count,
|
|
2938
|
+
"success_rate": getattr(memory, "success_rate", None),
|
|
2939
|
+
"last_validated": memory.last_validated.isoformat()
|
|
2940
|
+
if memory.last_validated
|
|
2941
|
+
else None,
|
|
2942
|
+
}
|
|
2943
|
+
elif memory_type == "outcome":
|
|
2944
|
+
content = {
|
|
2945
|
+
"id": memory.id,
|
|
2946
|
+
"task_type": memory.task_type,
|
|
2947
|
+
"task_description": memory.task_description,
|
|
2948
|
+
"success": memory.success,
|
|
2949
|
+
"strategy_used": memory.strategy_used,
|
|
2950
|
+
"duration_ms": memory.duration_ms,
|
|
2951
|
+
"error_message": getattr(memory, "error_message", None),
|
|
2952
|
+
}
|
|
2953
|
+
elif memory_type == "domain_knowledge":
|
|
2954
|
+
content = {
|
|
2955
|
+
"id": memory.id,
|
|
2956
|
+
"domain": memory.domain,
|
|
2957
|
+
"fact": memory.fact,
|
|
2958
|
+
"source": memory.source,
|
|
2959
|
+
"confidence": memory.confidence,
|
|
2960
|
+
}
|
|
2961
|
+
elif memory_type == "anti_pattern":
|
|
2962
|
+
content = {
|
|
2963
|
+
"id": memory.id,
|
|
2964
|
+
"pattern": memory.pattern,
|
|
2965
|
+
"why_bad": memory.why_bad,
|
|
2966
|
+
"better_alternative": memory.better_alternative,
|
|
2967
|
+
"severity": getattr(memory, "severity", None),
|
|
2968
|
+
}
|
|
2969
|
+
else: # preference
|
|
2970
|
+
content = {
|
|
2971
|
+
"id": memory.id,
|
|
2972
|
+
"category": memory.category,
|
|
2973
|
+
"preference": memory.preference,
|
|
2974
|
+
"source": memory.source,
|
|
2975
|
+
}
|
|
2976
|
+
|
|
2977
|
+
return {
|
|
2978
|
+
"success": True,
|
|
2979
|
+
"memory_type": memory_type,
|
|
2980
|
+
"content": content,
|
|
2981
|
+
}
|
|
2982
|
+
|
|
2983
|
+
except Exception as e:
|
|
2984
|
+
logger.exception(f"Error in alma_get_memory_full: {e}")
|
|
2985
|
+
return {"success": False, "error": str(e)}
|
|
2986
|
+
|
|
2987
|
+
|
|
2988
|
+
def alma_store_trust_pattern(
|
|
2989
|
+
alma: ALMA,
|
|
2990
|
+
agent_id: str,
|
|
2991
|
+
pattern_type: str,
|
|
2992
|
+
task_type: str,
|
|
2993
|
+
description: str,
|
|
2994
|
+
evidence: Optional[str] = None,
|
|
2995
|
+
severity: str = "medium",
|
|
2996
|
+
) -> Dict[str, Any]:
|
|
2997
|
+
"""
|
|
2998
|
+
Store a trust-related pattern (violation or verification).
|
|
2999
|
+
|
|
3000
|
+
Use this to record:
|
|
3001
|
+
- Trust violations (claims without evidence, silent failures)
|
|
3002
|
+
- Verification patterns (successful verification approaches)
|
|
3003
|
+
|
|
3004
|
+
These patterns are retrieved as warnings for similar future tasks.
|
|
3005
|
+
|
|
3006
|
+
Args:
|
|
3007
|
+
alma: ALMA instance
|
|
3008
|
+
agent_id: Agent the pattern is about
|
|
3009
|
+
pattern_type: Type (violation, verification)
|
|
3010
|
+
task_type: Category of task where pattern occurred
|
|
3011
|
+
description: Description of the pattern
|
|
3012
|
+
evidence: Optional evidence or context
|
|
3013
|
+
severity: Severity level (low, medium, high, critical)
|
|
3014
|
+
|
|
3015
|
+
Returns:
|
|
3016
|
+
Dict with stored pattern info
|
|
3017
|
+
"""
|
|
3018
|
+
if not agent_id or not agent_id.strip():
|
|
3019
|
+
return {"success": False, "error": "agent_id cannot be empty"}
|
|
3020
|
+
if not pattern_type or pattern_type not in ["violation", "verification"]:
|
|
3021
|
+
return {
|
|
3022
|
+
"success": False,
|
|
3023
|
+
"error": "pattern_type must be 'violation' or 'verification'",
|
|
3024
|
+
}
|
|
3025
|
+
if not task_type or not task_type.strip():
|
|
3026
|
+
return {"success": False, "error": "task_type cannot be empty"}
|
|
3027
|
+
if not description or not description.strip():
|
|
3028
|
+
return {"success": False, "error": "description cannot be empty"}
|
|
3029
|
+
|
|
3030
|
+
valid_severities = ["low", "medium", "high", "critical"]
|
|
3031
|
+
if severity not in valid_severities:
|
|
3032
|
+
return {
|
|
3033
|
+
"success": False,
|
|
3034
|
+
"error": f"severity must be one of: {', '.join(valid_severities)}",
|
|
3035
|
+
}
|
|
3036
|
+
|
|
3037
|
+
try:
|
|
3038
|
+
from alma.retrieval.trust_scoring import TrustPatternStore
|
|
3039
|
+
|
|
3040
|
+
pattern_store = TrustPatternStore(alma.storage)
|
|
3041
|
+
|
|
3042
|
+
if pattern_type == "violation":
|
|
3043
|
+
pattern_id = pattern_store.store_trust_violation(
|
|
3044
|
+
agent_id=agent_id,
|
|
3045
|
+
project_id=alma.project_id,
|
|
3046
|
+
violation_type=task_type,
|
|
3047
|
+
description=description,
|
|
3048
|
+
severity=severity,
|
|
3049
|
+
evidence=evidence,
|
|
3050
|
+
)
|
|
3051
|
+
message = "Trust violation recorded"
|
|
3052
|
+
else:
|
|
3053
|
+
pattern_id = pattern_store.store_verification_pattern(
|
|
3054
|
+
agent_id=agent_id,
|
|
3055
|
+
project_id=alma.project_id,
|
|
3056
|
+
task_type=task_type,
|
|
3057
|
+
verification_approach=description,
|
|
3058
|
+
evidence=evidence,
|
|
3059
|
+
)
|
|
3060
|
+
message = "Verification pattern recorded"
|
|
3061
|
+
|
|
3062
|
+
return {
|
|
3063
|
+
"success": True,
|
|
3064
|
+
"pattern_id": pattern_id,
|
|
3065
|
+
"pattern_type": pattern_type,
|
|
3066
|
+
"agent_id": agent_id,
|
|
3067
|
+
"message": message,
|
|
3068
|
+
}
|
|
3069
|
+
|
|
3070
|
+
except Exception as e:
|
|
3071
|
+
logger.exception(f"Error in alma_store_trust_pattern: {e}")
|
|
3072
|
+
return {"success": False, "error": str(e)}
|
|
3073
|
+
|
|
3074
|
+
|
|
3075
|
+
def alma_get_trust_warnings(
|
|
3076
|
+
alma: ALMA,
|
|
3077
|
+
task_description: str,
|
|
3078
|
+
agent_id: str,
|
|
3079
|
+
) -> Dict[str, Any]:
|
|
3080
|
+
"""
|
|
3081
|
+
Get trust warnings relevant to a task.
|
|
3082
|
+
|
|
3083
|
+
Retrieves past violations and warnings that are semantically
|
|
3084
|
+
similar to the current task. Use this before starting a task
|
|
3085
|
+
to understand trust-related pitfalls.
|
|
3086
|
+
|
|
3087
|
+
Args:
|
|
3088
|
+
alma: ALMA instance
|
|
3089
|
+
task_description: Description of the task to check
|
|
3090
|
+
agent_id: Agent to get warnings for
|
|
3091
|
+
|
|
3092
|
+
Returns:
|
|
3093
|
+
Dict with list of relevant warnings
|
|
3094
|
+
"""
|
|
3095
|
+
if not task_description or not task_description.strip():
|
|
3096
|
+
return {"success": False, "error": "task_description cannot be empty"}
|
|
3097
|
+
if not agent_id or not agent_id.strip():
|
|
3098
|
+
return {"success": False, "error": "agent_id cannot be empty"}
|
|
3099
|
+
|
|
3100
|
+
try:
|
|
3101
|
+
from alma.retrieval.trust_scoring import TrustPatternStore
|
|
3102
|
+
|
|
3103
|
+
pattern_store = TrustPatternStore(alma.storage)
|
|
3104
|
+
|
|
3105
|
+
warnings = pattern_store.retrieve_trust_warnings(
|
|
3106
|
+
task_description=task_description,
|
|
3107
|
+
agent_id=agent_id,
|
|
3108
|
+
project_id=alma.project_id,
|
|
3109
|
+
)
|
|
3110
|
+
|
|
3111
|
+
return {
|
|
3112
|
+
"success": True,
|
|
3113
|
+
"warnings": warnings,
|
|
3114
|
+
"count": len(warnings),
|
|
3115
|
+
"has_warnings": len(warnings) > 0,
|
|
3116
|
+
}
|
|
3117
|
+
|
|
3118
|
+
except Exception as e:
|
|
3119
|
+
logger.exception(f"Error in alma_get_trust_warnings: {e}")
|
|
3120
|
+
return {"success": False, "error": str(e)}
|
|
3121
|
+
|
|
3122
|
+
|
|
3123
|
+
# =============================================================================
|
|
3124
|
+
# ASYNC TRUST-INTEGRATED RETRIEVAL TOOLS
|
|
3125
|
+
# =============================================================================
|
|
3126
|
+
|
|
3127
|
+
|
|
3128
|
+
async def async_alma_retrieve_with_trust(
|
|
3129
|
+
alma: ALMA,
|
|
3130
|
+
query: str,
|
|
3131
|
+
agent: str,
|
|
3132
|
+
requesting_agent_id: str,
|
|
3133
|
+
requesting_agent_trust: float = 0.5,
|
|
3134
|
+
trust_behaviors: Optional[Dict[str, float]] = None,
|
|
3135
|
+
user_id: Optional[str] = None,
|
|
3136
|
+
top_k: int = 5,
|
|
3137
|
+
) -> Dict[str, Any]:
|
|
3138
|
+
"""Async version of alma_retrieve_with_trust."""
|
|
3139
|
+
import asyncio
|
|
3140
|
+
|
|
3141
|
+
loop = asyncio.get_event_loop()
|
|
3142
|
+
return await loop.run_in_executor(
|
|
3143
|
+
None,
|
|
3144
|
+
lambda: alma_retrieve_with_trust(
|
|
3145
|
+
alma,
|
|
3146
|
+
query,
|
|
3147
|
+
agent,
|
|
3148
|
+
requesting_agent_id,
|
|
3149
|
+
requesting_agent_trust,
|
|
3150
|
+
trust_behaviors,
|
|
3151
|
+
user_id,
|
|
3152
|
+
top_k,
|
|
3153
|
+
),
|
|
3154
|
+
)
|
|
3155
|
+
|
|
3156
|
+
|
|
3157
|
+
async def async_alma_retrieve_with_budget(
|
|
3158
|
+
alma: ALMA,
|
|
3159
|
+
query: str,
|
|
3160
|
+
agent: str,
|
|
3161
|
+
max_tokens: int = 4000,
|
|
3162
|
+
must_see_types: Optional[list] = None,
|
|
3163
|
+
should_see_types: Optional[list] = None,
|
|
3164
|
+
user_id: Optional[str] = None,
|
|
3165
|
+
top_k: int = 10,
|
|
3166
|
+
) -> Dict[str, Any]:
|
|
3167
|
+
"""Async version of alma_retrieve_with_budget."""
|
|
3168
|
+
import asyncio
|
|
3169
|
+
|
|
3170
|
+
loop = asyncio.get_event_loop()
|
|
3171
|
+
return await loop.run_in_executor(
|
|
3172
|
+
None,
|
|
3173
|
+
lambda: alma_retrieve_with_budget(
|
|
3174
|
+
alma,
|
|
3175
|
+
query,
|
|
3176
|
+
agent,
|
|
3177
|
+
max_tokens,
|
|
3178
|
+
must_see_types,
|
|
3179
|
+
should_see_types,
|
|
3180
|
+
user_id,
|
|
3181
|
+
top_k,
|
|
3182
|
+
),
|
|
3183
|
+
)
|
|
3184
|
+
|
|
3185
|
+
|
|
3186
|
+
async def async_alma_retrieve_progressive(
|
|
3187
|
+
alma: ALMA,
|
|
3188
|
+
query: str,
|
|
3189
|
+
agent: str,
|
|
3190
|
+
disclosure_level: str = "summary",
|
|
3191
|
+
max_summaries: int = 10,
|
|
3192
|
+
user_id: Optional[str] = None,
|
|
3193
|
+
) -> Dict[str, Any]:
|
|
3194
|
+
"""Async version of alma_retrieve_progressive."""
|
|
3195
|
+
import asyncio
|
|
3196
|
+
|
|
3197
|
+
loop = asyncio.get_event_loop()
|
|
3198
|
+
return await loop.run_in_executor(
|
|
3199
|
+
None,
|
|
3200
|
+
lambda: alma_retrieve_progressive(
|
|
3201
|
+
alma, query, agent, disclosure_level, max_summaries, user_id
|
|
3202
|
+
),
|
|
3203
|
+
)
|
|
3204
|
+
|
|
3205
|
+
|
|
3206
|
+
async def async_alma_get_memory_full(
|
|
3207
|
+
alma: ALMA,
|
|
3208
|
+
memory_id: str,
|
|
3209
|
+
memory_type: str,
|
|
3210
|
+
) -> Dict[str, Any]:
|
|
3211
|
+
"""Async version of alma_get_memory_full."""
|
|
3212
|
+
import asyncio
|
|
3213
|
+
|
|
3214
|
+
loop = asyncio.get_event_loop()
|
|
3215
|
+
return await loop.run_in_executor(
|
|
3216
|
+
None,
|
|
3217
|
+
lambda: alma_get_memory_full(alma, memory_id, memory_type),
|
|
3218
|
+
)
|
|
3219
|
+
|
|
3220
|
+
|
|
3221
|
+
async def async_alma_store_trust_pattern(
|
|
3222
|
+
alma: ALMA,
|
|
3223
|
+
agent_id: str,
|
|
3224
|
+
pattern_type: str,
|
|
3225
|
+
task_type: str,
|
|
3226
|
+
description: str,
|
|
3227
|
+
evidence: Optional[str] = None,
|
|
3228
|
+
severity: str = "medium",
|
|
3229
|
+
) -> Dict[str, Any]:
|
|
3230
|
+
"""Async version of alma_store_trust_pattern."""
|
|
3231
|
+
import asyncio
|
|
3232
|
+
|
|
3233
|
+
loop = asyncio.get_event_loop()
|
|
3234
|
+
return await loop.run_in_executor(
|
|
3235
|
+
None,
|
|
3236
|
+
lambda: alma_store_trust_pattern(
|
|
3237
|
+
alma, agent_id, pattern_type, task_type, description, evidence, severity
|
|
3238
|
+
),
|
|
3239
|
+
)
|
|
3240
|
+
|
|
3241
|
+
|
|
3242
|
+
async def async_alma_get_trust_warnings(
|
|
3243
|
+
alma: ALMA,
|
|
3244
|
+
task_description: str,
|
|
3245
|
+
agent_id: str,
|
|
3246
|
+
) -> Dict[str, Any]:
|
|
3247
|
+
"""Async version of alma_get_trust_warnings."""
|
|
3248
|
+
import asyncio
|
|
3249
|
+
|
|
3250
|
+
loop = asyncio.get_event_loop()
|
|
3251
|
+
return await loop.run_in_executor(
|
|
3252
|
+
None,
|
|
3253
|
+
lambda: alma_get_trust_warnings(alma, task_description, agent_id),
|
|
3254
|
+
)
|